Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: caam - Remove GFP_DMA and add DMA alignment padding

GFP_DMA does not guarantee that the returned memory is aligned
for DMA. It should be removed where it is superfluous.

However, kmalloc may start returning DMA-unaligned memory in future
so fix this by adding the alignment by hand.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

+111 -71
+1 -1
drivers/crypto/caam/blob_gen.c
··· 83 83 output_len = info->input_len - CAAM_BLOB_OVERHEAD; 84 84 } 85 85 86 - desc = kzalloc(CAAM_BLOB_DESC_BYTES_MAX, GFP_KERNEL | GFP_DMA); 86 + desc = kzalloc(CAAM_BLOB_DESC_BYTES_MAX, GFP_KERNEL); 87 87 if (!desc) 88 88 return -ENOMEM; 89 89
+10 -6
drivers/crypto/caam/caamalg.c
··· 59 59 #include <crypto/engine.h> 60 60 #include <crypto/xts.h> 61 61 #include <asm/unaligned.h> 62 + #include <linux/dma-mapping.h> 63 + #include <linux/kernel.h> 62 64 63 65 /* 64 66 * crypto alg ··· 1381 1379 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); 1382 1380 1383 1381 /* allocate space for base edesc and hw desc commands, link tables */ 1384 - edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, 1385 - GFP_DMA | flags); 1382 + edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, flags); 1386 1383 if (!edesc) { 1387 1384 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, 1388 1385 0, 0, 0); ··· 1609 1608 u8 *iv; 1610 1609 int ivsize = crypto_skcipher_ivsize(skcipher); 1611 1610 int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes; 1611 + unsigned int aligned_size; 1612 1612 1613 1613 src_nents = sg_nents_for_len(req->src, req->cryptlen); 1614 1614 if (unlikely(src_nents < 0)) { ··· 1683 1681 /* 1684 1682 * allocate space for base edesc and hw desc commands, link tables, IV 1685 1683 */ 1686 - edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize, 1687 - GFP_DMA | flags); 1688 - if (!edesc) { 1684 + aligned_size = ALIGN(ivsize, __alignof__(*edesc)); 1685 + aligned_size += sizeof(*edesc) + desc_bytes + sec4_sg_bytes; 1686 + aligned_size = ALIGN(aligned_size, dma_get_cache_alignment()); 1687 + iv = kzalloc(aligned_size, flags); 1688 + if (!iv) { 1689 1689 dev_err(jrdev, "could not allocate extended descriptor\n"); 1690 1690 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, 1691 1691 0, 0, 0); 1692 1692 return ERR_PTR(-ENOMEM); 1693 1693 } 1694 1694 1695 + edesc = (void *)(iv + ALIGN(ivsize, __alignof__(*edesc))); 1695 1696 edesc->src_nents = src_nents; 1696 1697 edesc->dst_nents = dst_nents; 1697 1698 edesc->mapped_src_nents = mapped_src_nents; ··· 1706 1701 1707 1702 /* Make sure IV is located in a DMAable area */ 1708 1703 if (ivsize) { 1709 - iv = (u8 *)edesc->sec4_sg + sec4_sg_bytes; 1710 1704 memcpy(iv, req->iv, ivsize); 1711 1705 1712 1706 iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_BIDIRECTIONAL);
+10 -6
drivers/crypto/caam/caamalg_qi.c
··· 20 20 #include "caamalg_desc.h" 21 21 #include <crypto/xts.h> 22 22 #include <asm/unaligned.h> 23 + #include <linux/dma-mapping.h> 24 + #include <linux/kernel.h> 23 25 24 26 /* 25 27 * crypto alg ··· 961 959 return (struct aead_edesc *)drv_ctx; 962 960 963 961 /* allocate space for base edesc and hw desc commands, link tables */ 964 - edesc = qi_cache_alloc(GFP_DMA | flags); 962 + edesc = qi_cache_alloc(flags); 965 963 if (unlikely(!edesc)) { 966 964 dev_err(qidev, "could not allocate extended descriptor\n"); 967 965 return ERR_PTR(-ENOMEM); ··· 1319 1317 qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents); 1320 1318 1321 1319 qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry); 1322 - if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes + 1323 - ivsize > CAAM_QI_MEMCACHE_SIZE)) { 1320 + if (unlikely(ALIGN(ivsize, __alignof__(*edesc)) + 1321 + offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes > 1322 + CAAM_QI_MEMCACHE_SIZE)) { 1324 1323 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", 1325 1324 qm_sg_ents, ivsize); 1326 1325 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, ··· 1330 1327 } 1331 1328 1332 1329 /* allocate space for base edesc, link tables and IV */ 1333 - edesc = qi_cache_alloc(GFP_DMA | flags); 1334 - if (unlikely(!edesc)) { 1330 + iv = qi_cache_alloc(flags); 1331 + if (unlikely(!iv)) { 1335 1332 dev_err(qidev, "could not allocate extended descriptor\n"); 1336 1333 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1337 1334 0, DMA_NONE, 0, 0); 1338 1335 return ERR_PTR(-ENOMEM); 1339 1336 } 1340 1337 1338 + edesc = (void *)(iv + ALIGN(ivsize, __alignof__(*edesc))); 1339 + 1341 1340 /* Make sure IV is located in a DMAable area */ 1342 1341 sg_table = &edesc->sgt[0]; 1343 - iv = (u8 *)(sg_table + qm_sg_ents); 1344 1342 memcpy(iv, req->iv, ivsize); 1345 1343 1346 1344 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_BIDIRECTIONAL);
+29 -19
drivers/crypto/caam/caamalg_qi2.c
··· 16 16 #include "caamalg_desc.h" 17 17 #include "caamhash_desc.h" 18 18 #include "dpseci-debugfs.h" 19 + #include <linux/dma-mapping.h> 19 20 #include <linux/fsl/mc.h> 21 + #include <linux/kernel.h> 20 22 #include <soc/fsl/dpaa2-io.h> 21 23 #include <soc/fsl/dpaa2-fd.h> 22 24 #include <crypto/xts.h> ··· 372 370 struct dpaa2_sg_entry *sg_table; 373 371 374 372 /* allocate space for base edesc, link tables and IV */ 375 - edesc = qi_cache_zalloc(GFP_DMA | flags); 373 + edesc = qi_cache_zalloc(flags); 376 374 if (unlikely(!edesc)) { 377 375 dev_err(dev, "could not allocate extended descriptor\n"); 378 376 return ERR_PTR(-ENOMEM); ··· 1191 1189 } 1192 1190 1193 1191 /* allocate space for base edesc, link tables and IV */ 1194 - edesc = qi_cache_zalloc(GFP_DMA | flags); 1192 + edesc = qi_cache_zalloc(flags); 1195 1193 if (unlikely(!edesc)) { 1196 1194 dev_err(dev, "could not allocate extended descriptor\n"); 1197 1195 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, ··· 3222 3220 int ret = -ENOMEM; 3223 3221 struct dpaa2_fl_entry *in_fle, *out_fle; 3224 3222 3225 - req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA); 3223 + req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL); 3226 3224 if (!req_ctx) 3227 3225 return -ENOMEM; 3228 3226 3229 3227 in_fle = &req_ctx->fd_flt[1]; 3230 3228 out_fle = &req_ctx->fd_flt[0]; 3231 3229 3232 - flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA); 3230 + flc = kzalloc(sizeof(*flc), GFP_KERNEL); 3233 3231 if (!flc) 3234 3232 goto err_flc; 3235 3233 ··· 3318 3316 dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize); 3319 3317 3320 3318 if (keylen > blocksize) { 3321 - hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA); 3319 + unsigned int aligned_len = 3320 + ALIGN(keylen, dma_get_cache_alignment()); 3321 + 3322 + if (aligned_len < keylen) 3323 + return -EOVERFLOW; 3324 + 3325 + hashed_key = kmemdup(key, aligned_len, GFP_KERNEL); 3322 3326 if (!hashed_key) 3323 3327 return -ENOMEM; 3324 3328 ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize); ··· 3568 3560 } 3569 3561 3570 3562 /* allocate space for base edesc and link tables */ 3571 - edesc = qi_cache_zalloc(GFP_DMA | flags); 3563 + edesc = qi_cache_zalloc(flags); 3572 3564 if (!edesc) { 3573 3565 dma_unmap_sg(ctx->dev, req->src, src_nents, 3574 3566 DMA_TO_DEVICE); ··· 3662 3654 int ret; 3663 3655 3664 3656 /* allocate space for base edesc and link tables */ 3665 - edesc = qi_cache_zalloc(GFP_DMA | flags); 3657 + edesc = qi_cache_zalloc(flags); 3666 3658 if (!edesc) 3667 3659 return -ENOMEM; 3668 3660 ··· 3751 3743 } 3752 3744 3753 3745 /* allocate space for base edesc and link tables */ 3754 - edesc = qi_cache_zalloc(GFP_DMA | flags); 3746 + edesc = qi_cache_zalloc(flags); 3755 3747 if (!edesc) { 3756 3748 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); 3757 3749 return -ENOMEM; ··· 3844 3836 } 3845 3837 3846 3838 /* allocate space for base edesc and link tables */ 3847 - edesc = qi_cache_zalloc(GFP_DMA | flags); 3839 + edesc = qi_cache_zalloc(flags); 3848 3840 if (!edesc) { 3849 3841 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); 3850 3842 return ret; ··· 3921 3913 int ret = -ENOMEM; 3922 3914 3923 3915 /* allocate space for base edesc and link tables */ 3924 - edesc = qi_cache_zalloc(GFP_DMA | flags); 3916 + edesc = qi_cache_zalloc(flags); 3925 3917 if (!edesc) 3926 3918 return ret; 3927 3919 ··· 4020 4012 } 4021 4013 4022 4014 /* allocate space for base edesc and link tables */ 4023 - edesc = qi_cache_zalloc(GFP_DMA | flags); 4015 + edesc = qi_cache_zalloc(flags); 4024 4016 if (!edesc) { 4025 4017 dma_unmap_sg(ctx->dev, req->src, src_nents, 4026 4018 DMA_TO_DEVICE); ··· 4133 4125 } 4134 4126 4135 4127 /* allocate space for base edesc and link tables */ 4136 - edesc = qi_cache_zalloc(GFP_DMA | flags); 4128 + edesc = qi_cache_zalloc(flags); 4137 4129 if (!edesc) { 4138 4130 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); 4139 4131 return ret; ··· 4238 4230 } 4239 4231 4240 4232 /* allocate space for base edesc and link tables */ 4241 - edesc = qi_cache_zalloc(GFP_DMA | flags); 4233 + edesc = qi_cache_zalloc(flags); 4242 4234 if (!edesc) { 4243 4235 dma_unmap_sg(ctx->dev, req->src, src_nents, 4244 4236 DMA_TO_DEVICE); ··· 4934 4926 { 4935 4927 struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 }; 4936 4928 struct device *dev = priv->dev; 4929 + unsigned int alignmask; 4937 4930 int err; 4938 4931 4939 4932 /* ··· 4945 4936 !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG)) 4946 4937 return 0; 4947 4938 4948 - priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN, 4949 - GFP_KERNEL | GFP_DMA); 4939 + alignmask = DPAA2_CSCN_ALIGN - 1; 4940 + alignmask |= dma_get_cache_alignment() - 1; 4941 + priv->cscn_mem = kzalloc(ALIGN(DPAA2_CSCN_SIZE, alignmask + 1), 4942 + GFP_KERNEL); 4950 4943 if (!priv->cscn_mem) 4951 4944 return -ENOMEM; 4952 4945 4953 - priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN); 4954 - priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned, 4946 + priv->cscn_dma = dma_map_single(dev, priv->cscn_mem, 4955 4947 DPAA2_CSCN_SIZE, DMA_FROM_DEVICE); 4956 4948 if (dma_mapping_error(dev, priv->cscn_dma)) { 4957 4949 dev_err(dev, "Error mapping CSCN memory area\n"); ··· 5184 5174 priv->domain = iommu_get_domain_for_dev(dev); 5185 5175 5186 5176 qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE, 5187 - 0, SLAB_CACHE_DMA, NULL); 5177 + 0, 0, NULL); 5188 5178 if (!qi_cache) { 5189 5179 dev_err(dev, "Can't allocate SEC cache\n"); 5190 5180 return -ENOMEM; ··· 5461 5451 dma_sync_single_for_cpu(priv->dev, priv->cscn_dma, 5462 5452 DPAA2_CSCN_SIZE, 5463 5453 DMA_FROM_DEVICE); 5464 - if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) { 5454 + if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem))) { 5465 5455 dev_dbg_ratelimited(dev, "Dropping request\n"); 5466 5456 return -EBUSY; 5467 5457 }
+4 -6
drivers/crypto/caam/caamalg_qi2.h
··· 7 7 #ifndef _CAAMALG_QI2_H_ 8 8 #define _CAAMALG_QI2_H_ 9 9 10 + #include <crypto/internal/skcipher.h> 11 + #include <linux/compiler_attributes.h> 10 12 #include <soc/fsl/dpaa2-io.h> 11 13 #include <soc/fsl/dpaa2-fd.h> 12 14 #include <linux/threads.h> 13 15 #include <linux/netdevice.h> 14 16 #include "dpseci.h" 15 17 #include "desc_constr.h" 16 - #include <crypto/skcipher.h> 17 18 18 19 #define DPAA2_CAAM_STORE_SIZE 16 19 20 /* NAPI weight *must* be a multiple of the store size. */ ··· 37 36 * @tx_queue_attr: array of Tx queue attributes 38 37 * @cscn_mem: pointer to memory region containing the congestion SCN 39 38 * it's size is larger than to accommodate alignment 40 - * @cscn_mem_aligned: pointer to congestion SCN; it is computed as 41 - * PTR_ALIGN(cscn_mem, DPAA2_CSCN_ALIGN) 42 39 * @cscn_dma: dma address used by the QMAN to write CSCN messages 43 40 * @dev: device associated with the DPSECI object 44 41 * @mc_io: pointer to MC portal's I/O object ··· 57 58 58 59 /* congestion */ 59 60 void *cscn_mem; 60 - void *cscn_mem_aligned; 61 61 dma_addr_t cscn_dma; 62 62 63 63 struct device *dev; ··· 156 158 struct caam_flc { 157 159 u32 flc[16]; 158 160 u32 sh_desc[MAX_SDLEN]; 159 - } ____cacheline_aligned; 161 + } __aligned(CRYPTO_DMA_ALIGN); 160 162 161 163 enum optype { 162 164 ENCRYPT = 0, ··· 178 180 * @edesc: extended descriptor; points to one of {skcipher,aead}_edesc 179 181 */ 180 182 struct caam_request { 181 - struct dpaa2_fl_entry fd_flt[2]; 183 + struct dpaa2_fl_entry fd_flt[2] __aligned(CRYPTO_DMA_ALIGN); 182 184 dma_addr_t fd_flt_dma; 183 185 struct caam_flc *flc; 184 186 dma_addr_t flc_dma;
+11 -3
drivers/crypto/caam/caamhash.c
··· 66 66 #include "key_gen.h" 67 67 #include "caamhash_desc.h" 68 68 #include <crypto/engine.h> 69 + #include <linux/dma-mapping.h> 70 + #include <linux/kernel.h> 69 71 70 72 #define CAAM_CRA_PRIORITY 3000 71 73 ··· 367 365 dma_addr_t key_dma; 368 366 int ret; 369 367 370 - desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); 368 + desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL); 371 369 if (!desc) { 372 370 dev_err(jrdev, "unable to allocate key input memory\n"); 373 371 return -ENOMEM; ··· 434 432 dev_dbg(jrdev, "keylen %d\n", keylen); 435 433 436 434 if (keylen > blocksize) { 437 - hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA); 435 + unsigned int aligned_len = 436 + ALIGN(keylen, dma_get_cache_alignment()); 437 + 438 + if (aligned_len < keylen) 439 + return -EOVERFLOW; 440 + 441 + hashed_key = kmemdup(key, keylen, GFP_KERNEL); 438 442 if (!hashed_key) 439 443 return -ENOMEM; 440 444 ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize); ··· 710 702 struct ahash_edesc *edesc; 711 703 unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry); 712 704 713 - edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags); 705 + edesc = kzalloc(sizeof(*edesc) + sg_size, flags); 714 706 if (!edesc) { 715 707 dev_err(ctx->jrdev, "could not allocate extended descriptor\n"); 716 708 return NULL;
+17 -14
drivers/crypto/caam/caampkc.c
··· 16 16 #include "desc_constr.h" 17 17 #include "sg_sw_sec4.h" 18 18 #include "caampkc.h" 19 + #include <linux/dma-mapping.h> 20 + #include <linux/kernel.h> 19 21 20 22 #define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB) 21 23 #define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \ ··· 312 310 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); 313 311 314 312 /* allocate space for base edesc, hw desc commands and link tables */ 315 - edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes, 316 - GFP_DMA | flags); 313 + edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes, flags); 317 314 if (!edesc) 318 315 goto dst_fail; 319 316 ··· 899 898 if (!nbytes) 900 899 return NULL; 901 900 902 - dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL); 901 + dst = kzalloc(dstlen, GFP_KERNEL); 903 902 if (!dst) 904 903 return NULL; 905 904 ··· 911 910 /** 912 911 * caam_read_raw_data - Read a raw byte stream as a positive integer. 913 912 * The function skips buffer's leading zeros, copies the remained data 914 - * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns 913 + * to a buffer allocated in the GFP_KERNEL zone and returns 915 914 * the address of the new buffer. 916 915 * 917 916 * @buf : The data to read ··· 924 923 if (!*nbytes) 925 924 return NULL; 926 925 927 - return kmemdup(buf, *nbytes, GFP_DMA | GFP_KERNEL); 926 + return kmemdup(buf, *nbytes, GFP_KERNEL); 928 927 } 929 928 930 929 static int caam_rsa_check_key_length(unsigned int len) ··· 950 949 return ret; 951 950 952 951 /* Copy key in DMA zone */ 953 - rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL); 952 + rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_KERNEL); 954 953 if (!rsa_key->e) 955 954 goto err; 956 955 957 956 /* 958 957 * Skip leading zeros and copy the positive integer to a buffer 959 - * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor 958 + * allocated in the GFP_KERNEL zone. The decryption descriptor 960 959 * expects a positive integer for the RSA modulus and uses its length as 961 960 * decryption output length. 962 961 */ ··· 984 983 struct caam_rsa_key *rsa_key = &ctx->key; 985 984 size_t p_sz = raw_key->p_sz; 986 985 size_t q_sz = raw_key->q_sz; 986 + unsigned aligned_size; 987 987 988 988 rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz); 989 989 if (!rsa_key->p) ··· 996 994 goto free_p; 997 995 rsa_key->q_sz = q_sz; 998 996 999 - rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL); 997 + aligned_size = ALIGN(raw_key->p_sz, dma_get_cache_alignment()); 998 + rsa_key->tmp1 = kzalloc(aligned_size, GFP_KERNEL); 1000 999 if (!rsa_key->tmp1) 1001 1000 goto free_q; 1002 1001 1003 - rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL); 1002 + aligned_size = ALIGN(raw_key->q_sz, dma_get_cache_alignment()); 1003 + rsa_key->tmp2 = kzalloc(aligned_size, GFP_KERNEL); 1004 1004 if (!rsa_key->tmp2) 1005 1005 goto free_tmp1; 1006 1006 ··· 1055 1051 return ret; 1056 1052 1057 1053 /* Copy key in DMA zone */ 1058 - rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_DMA | GFP_KERNEL); 1054 + rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_KERNEL); 1059 1055 if (!rsa_key->d) 1060 1056 goto err; 1061 1057 1062 - rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL); 1058 + rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_KERNEL); 1063 1059 if (!rsa_key->e) 1064 1060 goto err; 1065 1061 1066 1062 /* 1067 1063 * Skip leading zeros and copy the positive integer to a buffer 1068 - * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor 1064 + * allocated in the GFP_KERNEL zone. The decryption descriptor 1069 1065 * expects a positive integer for the RSA modulus and uses its length as 1070 1066 * decryption output length. 1071 1067 */ ··· 1189 1185 return 0; 1190 1186 1191 1187 /* allocate zero buffer, used for padding input */ 1192 - zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_DMA | 1193 - GFP_KERNEL); 1188 + zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_KERNEL); 1194 1189 if (!zero_buffer) 1195 1190 return -ENOMEM; 1196 1191
+9 -3
drivers/crypto/caam/caamprng.c
··· 8 8 9 9 #include <linux/completion.h> 10 10 #include <crypto/internal/rng.h> 11 + #include <linux/dma-mapping.h> 12 + #include <linux/kernel.h> 11 13 #include "compat.h" 12 14 #include "regs.h" 13 15 #include "intern.h" ··· 77 75 const u8 *src, unsigned int slen, 78 76 u8 *dst, unsigned int dlen) 79 77 { 78 + unsigned int aligned_dlen = ALIGN(dlen, dma_get_cache_alignment()); 80 79 struct caam_prng_ctx ctx; 81 80 struct device *jrdev; 82 81 dma_addr_t dst_dma; ··· 85 82 u8 *buf; 86 83 int ret; 87 84 88 - buf = kzalloc(dlen, GFP_KERNEL); 85 + if (aligned_dlen < dlen) 86 + return -EOVERFLOW; 87 + 88 + buf = kzalloc(aligned_dlen, GFP_KERNEL); 89 89 if (!buf) 90 90 return -ENOMEM; 91 91 ··· 100 94 return ret; 101 95 } 102 96 103 - desc = kzalloc(CAAM_PRNG_MAX_DESC_LEN, GFP_KERNEL | GFP_DMA); 97 + desc = kzalloc(CAAM_PRNG_MAX_DESC_LEN, GFP_KERNEL); 104 98 if (!desc) { 105 99 ret = -ENOMEM; 106 100 goto out1; ··· 162 156 return ret; 163 157 } 164 158 165 - desc = kzalloc(CAAM_PRNG_MAX_DESC_LEN, GFP_KERNEL | GFP_DMA); 159 + desc = kzalloc(CAAM_PRNG_MAX_DESC_LEN, GFP_KERNEL); 166 160 if (!desc) { 167 161 caam_jr_free(jrdev); 168 162 return -ENOMEM;
+7 -4
drivers/crypto/caam/caamrng.c
··· 12 12 #include <linux/hw_random.h> 13 13 #include <linux/completion.h> 14 14 #include <linux/atomic.h> 15 + #include <linux/dma-mapping.h> 16 + #include <linux/kernel.h> 15 17 #include <linux/kfifo.h> 16 18 17 19 #include "compat.h" ··· 178 176 int err; 179 177 180 178 ctx->desc_sync = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN, 181 - GFP_DMA | GFP_KERNEL); 179 + GFP_KERNEL); 182 180 if (!ctx->desc_sync) 183 181 return -ENOMEM; 184 182 185 183 ctx->desc_async = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN, 186 - GFP_DMA | GFP_KERNEL); 184 + GFP_KERNEL); 187 185 if (!ctx->desc_async) 188 186 return -ENOMEM; 189 187 190 - if (kfifo_alloc(&ctx->fifo, CAAM_RNG_MAX_FIFO_STORE_SIZE, 191 - GFP_DMA | GFP_KERNEL)) 188 + if (kfifo_alloc(&ctx->fifo, ALIGN(CAAM_RNG_MAX_FIFO_STORE_SIZE, 189 + dma_get_cache_alignment()), 190 + GFP_KERNEL)) 192 191 return -ENOMEM; 193 192 194 193 INIT_WORK(&ctx->worker, caam_rng_worker);
+2 -2
drivers/crypto/caam/ctrl.c
··· 199 199 u32 *desc, status; 200 200 int sh_idx, ret = 0; 201 201 202 - desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL | GFP_DMA); 202 + desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL); 203 203 if (!desc) 204 204 return -ENOMEM; 205 205 ··· 276 276 int ret = 0, sh_idx; 277 277 278 278 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl; 279 - desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL | GFP_DMA); 279 + desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL); 280 280 if (!desc) 281 281 return -ENOMEM; 282 282
+1 -1
drivers/crypto/caam/key_gen.c
··· 64 64 if (local_max > max_keylen) 65 65 return -EINVAL; 66 66 67 - desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); 67 + desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL); 68 68 if (!desc) { 69 69 dev_err(jrdev, "unable to allocate key input memory\n"); 70 70 return ret;
+2 -2
drivers/crypto/caam/qi.c
··· 614 614 struct qman_fq *fq; 615 615 int ret; 616 616 617 - fq = kzalloc(sizeof(*fq), GFP_KERNEL | GFP_DMA); 617 + fq = kzalloc(sizeof(*fq), GFP_KERNEL); 618 618 if (!fq) 619 619 return -ENOMEM; 620 620 ··· 756 756 } 757 757 758 758 qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0, 759 - SLAB_CACHE_DMA, NULL); 759 + 0, NULL); 760 760 if (!qi_cache) { 761 761 dev_err(qidev, "Can't allocate CAAM cache\n"); 762 762 free_rsp_fqs();
+8 -4
drivers/crypto/caam/qi.h
··· 9 9 #ifndef __QI_H__ 10 10 #define __QI_H__ 11 11 12 + #include <crypto/algapi.h> 13 + #include <linux/compiler_attributes.h> 12 14 #include <soc/fsl/qman.h> 13 15 #include "compat.h" 14 16 #include "desc.h" ··· 60 58 * @qidev: device pointer for CAAM/QI backend 61 59 */ 62 60 struct caam_drv_ctx { 63 - u32 prehdr[2]; 64 - u32 sh_desc[MAX_SDLEN]; 61 + struct { 62 + u32 prehdr[2]; 63 + u32 sh_desc[MAX_SDLEN]; 64 + } __aligned(CRYPTO_DMA_ALIGN); 65 65 dma_addr_t context_a; 66 66 struct qman_fq *req_fq; 67 67 struct qman_fq *rsp_fq; ··· 71 67 int cpu; 72 68 enum optype op_type; 73 69 struct device *qidev; 74 - } ____cacheline_aligned; 70 + }; 75 71 76 72 /** 77 73 * caam_drv_req - The request structure the driver application should fill while ··· 92 88 struct caam_drv_ctx *drv_ctx; 93 89 caam_qi_cbk cbk; 94 90 void *app_ctx; 95 - } ____cacheline_aligned; 91 + } __aligned(CRYPTO_DMA_ALIGN); 96 92 97 93 /** 98 94 * caam_drv_ctx_init - Initialise a CAAM/QI driver context