Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: qce - dma_map_sg can handle chained SG

The qce driver use two dma_map_sg path according to SG are chained
or not.
Since dma_map_sg can handle both case, clean the code with all
references to sg chained.

Thus removing qce_mapsg, qce_unmapsg and qce_countsg functions.

Signed-off-by: LABBE Corentin <clabbe.montjoie@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

LABBE Corentin and committed by
Herbert Xu
fea40451 166db195

+17 -94
+10 -20
drivers/crypto/qce/ablkcipher.c
··· 44 44 error); 45 45 46 46 if (diff_dst) 47 - qce_unmapsg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src, 48 - rctx->dst_chained); 49 - qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst, 50 - rctx->dst_chained); 47 + dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src); 48 + dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); 51 49 52 50 sg_free_table(&rctx->dst_tbl); 53 51 ··· 78 80 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; 79 81 dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL; 80 82 81 - rctx->src_nents = qce_countsg(req->src, req->nbytes, 82 - &rctx->src_chained); 83 - if (diff_dst) { 84 - rctx->dst_nents = qce_countsg(req->dst, req->nbytes, 85 - &rctx->dst_chained); 86 - } else { 83 + rctx->src_nents = sg_nents_for_len(req->src, req->nbytes); 84 + if (diff_dst) 85 + rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes); 86 + else 87 87 rctx->dst_nents = rctx->src_nents; 88 - rctx->dst_chained = rctx->src_chained; 89 - } 90 88 91 89 rctx->dst_nents += 1; 92 90 ··· 110 116 sg_mark_end(sg); 111 117 rctx->dst_sg = rctx->dst_tbl.sgl; 112 118 113 - ret = qce_mapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst, 114 - rctx->dst_chained); 119 + ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); 115 120 if (ret < 0) 116 121 goto error_free; 117 122 118 123 if (diff_dst) { 119 - ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, dir_src, 120 - rctx->src_chained); 124 + ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src); 121 125 if (ret < 0) 122 126 goto error_unmap_dst; 123 127 rctx->src_sg = req->src; ··· 141 149 qce_dma_terminate_all(&qce->dma); 142 150 error_unmap_src: 143 151 if (diff_dst) 144 - qce_unmapsg(qce->dev, req->src, rctx->src_nents, dir_src, 145 - rctx->src_chained); 152 + dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src); 146 153 error_unmap_dst: 147 - qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst, 148 - rctx->dst_chained); 154 + dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); 149 155 error_free: 150 156 sg_free_table(&rctx->dst_tbl); 151 157 return ret;
-4
drivers/crypto/qce/cipher.h
··· 32 32 * @ivsize: IV size 33 33 * @src_nents: source entries 34 34 * @dst_nents: destination entries 35 - * @src_chained: is source chained 36 - * @dst_chained: is destination chained 37 35 * @result_sg: scatterlist used for result buffer 38 36 * @dst_tbl: destination sg table 39 37 * @dst_sg: destination sg pointer table beginning ··· 45 47 unsigned int ivsize; 46 48 int src_nents; 47 49 int dst_nents; 48 - bool src_chained; 49 - bool dst_chained; 50 50 struct scatterlist result_sg; 51 51 struct sg_table dst_tbl; 52 52 struct scatterlist *dst_sg;
-52
drivers/crypto/qce/dma.c
··· 54 54 kfree(dma->result_buf); 55 55 } 56 56 57 - int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents, 58 - enum dma_data_direction dir, bool chained) 59 - { 60 - int err; 61 - 62 - if (chained) { 63 - while (sg) { 64 - err = dma_map_sg(dev, sg, 1, dir); 65 - if (!err) 66 - return -EFAULT; 67 - sg = sg_next(sg); 68 - } 69 - } else { 70 - err = dma_map_sg(dev, sg, nents, dir); 71 - if (!err) 72 - return -EFAULT; 73 - } 74 - 75 - return nents; 76 - } 77 - 78 - void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents, 79 - enum dma_data_direction dir, bool chained) 80 - { 81 - if (chained) 82 - while (sg) { 83 - dma_unmap_sg(dev, sg, 1, dir); 84 - sg = sg_next(sg); 85 - } 86 - else 87 - dma_unmap_sg(dev, sg, nents, dir); 88 - } 89 - 90 - int qce_countsg(struct scatterlist *sglist, int nbytes, bool *chained) 91 - { 92 - struct scatterlist *sg = sglist; 93 - int nents = 0; 94 - 95 - if (chained) 96 - *chained = false; 97 - 98 - while (nbytes > 0 && sg) { 99 - nents++; 100 - nbytes -= sg->length; 101 - if (!sg_is_last(sg) && (sg + 1)->length == 0 && chained) 102 - *chained = true; 103 - sg = sg_next(sg); 104 - } 105 - 106 - return nents; 107 - } 108 - 109 57 struct scatterlist * 110 58 qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl) 111 59 {
-5
drivers/crypto/qce/dma.h
··· 49 49 dma_async_tx_callback cb, void *cb_param); 50 50 void qce_dma_issue_pending(struct qce_dma_data *dma); 51 51 int qce_dma_terminate_all(struct qce_dma_data *dma); 52 - int qce_countsg(struct scatterlist *sg_list, int nbytes, bool *chained); 53 - void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents, 54 - enum dma_data_direction dir, bool chained); 55 - int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents, 56 - enum dma_data_direction dir, bool chained); 57 52 struct scatterlist * 58 53 qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add); 59 54
+7 -11
drivers/crypto/qce/sha.c
··· 51 51 if (error) 52 52 dev_dbg(qce->dev, "ahash dma termination error (%d)\n", error); 53 53 54 - qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE, 55 - rctx->src_chained); 56 - qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0); 54 + dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); 55 + dma_unmap_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE); 57 56 58 57 memcpy(rctx->digest, result->auth_iv, digestsize); 59 58 if (req->result) ··· 91 92 rctx->authklen = AES_KEYSIZE_128; 92 93 } 93 94 94 - rctx->src_nents = qce_countsg(req->src, req->nbytes, 95 - &rctx->src_chained); 96 - ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE, 97 - rctx->src_chained); 95 + rctx->src_nents = sg_nents_for_len(req->src, req->nbytes); 96 + ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); 98 97 if (ret < 0) 99 98 return ret; 100 99 101 100 sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); 102 101 103 - ret = qce_mapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0); 102 + ret = dma_map_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE); 104 103 if (ret < 0) 105 104 goto error_unmap_src; 106 105 ··· 118 121 error_terminate: 119 122 qce_dma_terminate_all(&qce->dma); 120 123 error_unmap_dst: 121 - qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0); 124 + dma_unmap_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE); 122 125 error_unmap_src: 123 - qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE, 124 - rctx->src_chained); 126 + dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); 125 127 return ret; 126 128 } 127 129
-2
drivers/crypto/qce/sha.h
··· 36 36 * @flags: operation flags 37 37 * @src_orig: original request sg list 38 38 * @nbytes_orig: original request number of bytes 39 - * @src_chained: is source scatterlist chained 40 39 * @src_nents: source number of entries 41 40 * @byte_count: byte count 42 41 * @count: save count in states during update, import and export ··· 54 55 unsigned long flags; 55 56 struct scatterlist *src_orig; 56 57 unsigned int nbytes_orig; 57 - bool src_chained; 58 58 int src_nents; 59 59 __be32 byte_count[2]; 60 60 u64 count;