crypto: qce - dma_map_sg can handle chained SG
authorLABBE Corentin <clabbe.montjoie@gmail.com>
Fri, 2 Oct 2015 06:01:02 +0000 (08:01 +0200)
committerHerbert Xu <herbert@gondor.apana.org.au>
Thu, 8 Oct 2015 13:42:19 +0000 (21:42 +0800)
The qce driver use two dma_map_sg path according to SG are chained
or not.
Since dma_map_sg can handle both case, clean the code with all
references to sg chained.

Thus removing qce_mapsg, qce_unmapsg and qce_countsg functions.

Signed-off-by: LABBE Corentin <clabbe.montjoie@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
drivers/crypto/qce/ablkcipher.c
drivers/crypto/qce/cipher.h
drivers/crypto/qce/dma.c
drivers/crypto/qce/dma.h
drivers/crypto/qce/sha.c
drivers/crypto/qce/sha.h

index ad592de475a4295be62c0c59f2af9e4bb2d8e529..2c0d63d48747dcc6fdd9d44a5182144032c2db4f 100644 (file)
@@ -44,10 +44,8 @@ static void qce_ablkcipher_done(void *data)
                        error);
 
        if (diff_dst)
-               qce_unmapsg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src,
-                           rctx->dst_chained);
-       qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
-                   rctx->dst_chained);
+               dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
+       dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
 
        sg_free_table(&rctx->dst_tbl);
 
@@ -80,15 +78,11 @@ qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
        dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
        dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
 
-       rctx->src_nents = qce_countsg(req->src, req->nbytes,
-                                     &rctx->src_chained);
-       if (diff_dst) {
-               rctx->dst_nents = qce_countsg(req->dst, req->nbytes,
-                                             &rctx->dst_chained);
-       } else {
+       rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
+       if (diff_dst)
+               rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+       else
                rctx->dst_nents = rctx->src_nents;
-               rctx->dst_chained = rctx->src_chained;
-       }
 
        rctx->dst_nents += 1;
 
@@ -116,14 +110,12 @@ qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
        sg_mark_end(sg);
        rctx->dst_sg = rctx->dst_tbl.sgl;
 
-       ret = qce_mapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
-                       rctx->dst_chained);
+       ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
        if (ret < 0)
                goto error_free;
 
        if (diff_dst) {
-               ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, dir_src,
-                               rctx->src_chained);
+               ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
                if (ret < 0)
                        goto error_unmap_dst;
                rctx->src_sg = req->src;
@@ -149,11 +141,9 @@ error_terminate:
        qce_dma_terminate_all(&qce->dma);
 error_unmap_src:
        if (diff_dst)
-               qce_unmapsg(qce->dev, req->src, rctx->src_nents, dir_src,
-                           rctx->src_chained);
+               dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
 error_unmap_dst:
-       qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
-                   rctx->dst_chained);
+       dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
 error_free:
        sg_free_table(&rctx->dst_tbl);
        return ret;
index d5757cfcda2de1a9c8b3da309ff723d9fedc817b..5c6a5f8633e5d39d0f20340617a1df3f8a4137d1 100644 (file)
@@ -32,8 +32,6 @@ struct qce_cipher_ctx {
  * @ivsize: IV size
  * @src_nents: source entries
  * @dst_nents: destination entries
- * @src_chained: is source chained
- * @dst_chained: is destination chained
  * @result_sg: scatterlist used for result buffer
  * @dst_tbl: destination sg table
  * @dst_sg: destination sg pointer table beginning
@@ -47,8 +45,6 @@ struct qce_cipher_reqctx {
        unsigned int ivsize;
        int src_nents;
        int dst_nents;
-       bool src_chained;
-       bool dst_chained;
        struct scatterlist result_sg;
        struct sg_table dst_tbl;
        struct scatterlist *dst_sg;
index 378cb768647f6f7306c94485cc4a5c5347ffe173..4797e795c9b9ccabb665e2a75a8790c2bc304cf3 100644 (file)
@@ -54,58 +54,6 @@ void qce_dma_release(struct qce_dma_data *dma)
        kfree(dma->result_buf);
 }
 
-int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents,
-             enum dma_data_direction dir, bool chained)
-{
-       int err;
-
-       if (chained) {
-               while (sg) {
-                       err = dma_map_sg(dev, sg, 1, dir);
-                       if (!err)
-                               return -EFAULT;
-                       sg = sg_next(sg);
-               }
-       } else {
-               err = dma_map_sg(dev, sg, nents, dir);
-               if (!err)
-                       return -EFAULT;
-       }
-
-       return nents;
-}
-
-void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents,
-                enum dma_data_direction dir, bool chained)
-{
-       if (chained)
-               while (sg) {
-                       dma_unmap_sg(dev, sg, 1, dir);
-                       sg = sg_next(sg);
-               }
-       else
-               dma_unmap_sg(dev, sg, nents, dir);
-}
-
-int qce_countsg(struct scatterlist *sglist, int nbytes, bool *chained)
-{
-       struct scatterlist *sg = sglist;
-       int nents = 0;
-
-       if (chained)
-               *chained = false;
-
-       while (nbytes > 0 && sg) {
-               nents++;
-               nbytes -= sg->length;
-               if (!sg_is_last(sg) && (sg + 1)->length == 0 && chained)
-                       *chained = true;
-               sg = sg_next(sg);
-       }
-
-       return nents;
-}
-
 struct scatterlist *
 qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl)
 {
index 65bedb81de0b5f19071fff20a2ff9ee2db4fa18d..130235d17bb4e0b60ea6c78986de0a5bb2320619 100644 (file)
@@ -49,11 +49,6 @@ int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *sg_in,
                     dma_async_tx_callback cb, void *cb_param);
 void qce_dma_issue_pending(struct qce_dma_data *dma);
 int qce_dma_terminate_all(struct qce_dma_data *dma);
-int qce_countsg(struct scatterlist *sg_list, int nbytes, bool *chained);
-void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents,
-                enum dma_data_direction dir, bool chained);
-int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents,
-             enum dma_data_direction dir, bool chained);
 struct scatterlist *
 qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add);
 
index be2f5049256af5e846c49a66da40f420f5e6a6ae..0c9973ec80ebde80513152b7f16af5d50da8daae 100644 (file)
@@ -51,9 +51,8 @@ static void qce_ahash_done(void *data)
        if (error)
                dev_dbg(qce->dev, "ahash dma termination error (%d)\n", error);
 
-       qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
-                   rctx->src_chained);
-       qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);
+       dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
+       dma_unmap_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE);
 
        memcpy(rctx->digest, result->auth_iv, digestsize);
        if (req->result)
@@ -92,16 +91,14 @@ static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
                rctx->authklen = AES_KEYSIZE_128;
        }
 
-       rctx->src_nents = qce_countsg(req->src, req->nbytes,
-                                     &rctx->src_chained);
-       ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
-                       rctx->src_chained);
+       rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
+       ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
        if (ret < 0)
                return ret;
 
        sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
 
-       ret = qce_mapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);
+       ret = dma_map_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE);
        if (ret < 0)
                goto error_unmap_src;
 
@@ -121,10 +118,9 @@ static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
 error_terminate:
        qce_dma_terminate_all(&qce->dma);
 error_unmap_dst:
-       qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);
+       dma_unmap_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE);
 error_unmap_src:
-       qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
-                   rctx->src_chained);
+       dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
        return ret;
 }
 
index 286f0d5397f359c6662014499f7462887c5e1290..236bb5e9ae7537792ad4b3b925b46f86a053b507 100644 (file)
@@ -36,7 +36,6 @@ struct qce_sha_ctx {
  * @flags: operation flags
  * @src_orig: original request sg list
  * @nbytes_orig: original request number of bytes
- * @src_chained: is source scatterlist chained
  * @src_nents: source number of entries
  * @byte_count: byte count
  * @count: save count in states during update, import and export
@@ -55,7 +54,6 @@ struct qce_sha_reqctx {
        unsigned long flags;
        struct scatterlist *src_orig;
        unsigned int nbytes_orig;
-       bool src_chained;
        int src_nents;
        __be32 byte_count[2];
        u64 count;