Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'v7.0-p4' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto fixes from Herbert Xu:

- Add missing async markers to tegra

- Fix long hmac key DMA handling in caam

- Fix spurious ENOSPC errors in deflate

- Fix SG chaining in af_alg

- Do not use in-place process in algif_aead

- Fix out-of-place destination overflow in authencesn

* tag 'v7.0-p4' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
crypto: authencesn - Do not place hiseq at end of dst for out-of-place decryption
crypto: algif_aead - Revert to operating out-of-place
crypto: af-alg - fix NULL pointer dereference in scatterwalk
crypto: deflate - fix spurious -ENOSPC
crypto: caam - fix overflow on long hmac keys
crypto: caam - fix DMA corruption on long hmac keys
crypto: tegra - Add missing CRYPTO_ALG_ASYNC

+102 -170
+13 -40
crypto/af_alg.c
··· 623 623 sg_init_table(sgl->sg, MAX_SGL_ENTS + 1); 624 624 sgl->cur = 0; 625 625 626 - if (sg) 626 + if (sg) { 627 + sg_unmark_end(sg + MAX_SGL_ENTS - 1); 627 628 sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg); 629 + } 628 630 629 631 list_add_tail(&sgl->list, &ctx->tsgl_list); 630 632 } ··· 637 635 /** 638 636 * af_alg_count_tsgl - Count number of TX SG entries 639 637 * 640 - * The counting starts from the beginning of the SGL to @bytes. If 641 - * an @offset is provided, the counting of the SG entries starts at the @offset. 638 + * The counting starts from the beginning of the SGL to @bytes. 642 639 * 643 640 * @sk: socket of connection to user space 644 641 * @bytes: Count the number of SG entries holding given number of bytes. 645 - * @offset: Start the counting of SG entries from the given offset. 646 642 * Return: Number of TX SG entries found given the constraints 647 643 */ 648 - unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset) 644 + unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes) 649 645 { 650 646 const struct alg_sock *ask = alg_sk(sk); 651 647 const struct af_alg_ctx *ctx = ask->private; ··· 658 658 const struct scatterlist *sg = sgl->sg; 659 659 660 660 for (i = 0; i < sgl->cur; i++) { 661 - size_t bytes_count; 662 - 663 - /* Skip offset */ 664 - if (offset >= sg[i].length) { 665 - offset -= sg[i].length; 666 - bytes -= sg[i].length; 667 - continue; 668 - } 669 - 670 - bytes_count = sg[i].length - offset; 671 - 672 - offset = 0; 673 661 sgl_count++; 674 - 675 - /* If we have seen requested number of bytes, stop */ 676 - if (bytes_count >= bytes) 662 + if (sg[i].length >= bytes) 677 663 return sgl_count; 678 664 679 - bytes -= bytes_count; 665 + bytes -= sg[i].length; 680 666 } 681 667 } 682 668 ··· 674 688 * af_alg_pull_tsgl - Release the specified buffers from TX SGL 675 689 * 676 690 * If @dst is non-null, reassign the pages to @dst. The caller must release 677 - * the pages. If @dst_offset is given only reassign the pages to @dst starting 678 - * at the @dst_offset (byte). The caller must ensure that @dst is large 679 - * enough (e.g. by using af_alg_count_tsgl with the same offset). 691 + * the pages. 680 692 * 681 693 * @sk: socket of connection to user space 682 694 * @used: Number of bytes to pull from TX SGL 683 695 * @dst: If non-NULL, buffer is reassigned to dst SGL instead of releasing. The 684 696 * caller must release the buffers in dst. 685 - * @dst_offset: Reassign the TX SGL from given offset. All buffers before 686 - * reaching the offset is released. 687 697 */ 688 - void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, 689 - size_t dst_offset) 698 + void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst) 690 699 { 691 700 struct alg_sock *ask = alg_sk(sk); 692 701 struct af_alg_ctx *ctx = ask->private; ··· 706 725 * SG entries in dst. 707 726 */ 708 727 if (dst) { 709 - if (dst_offset >= plen) { 710 - /* discard page before offset */ 711 - dst_offset -= plen; 712 - } else { 713 - /* reassign page to dst after offset */ 714 - get_page(page); 715 - sg_set_page(dst + j, page, 716 - plen - dst_offset, 717 - sg[i].offset + dst_offset); 718 - dst_offset = 0; 719 - j++; 720 - } 728 + /* reassign page to dst after offset */ 729 + get_page(page); 730 + sg_set_page(dst + j, page, plen, sg[i].offset); 731 + j++; 721 732 } 722 733 723 734 sg[i].length -= plen;
+19 -81
crypto/algif_aead.c
··· 26 26 #include <crypto/internal/aead.h> 27 27 #include <crypto/scatterwalk.h> 28 28 #include <crypto/if_alg.h> 29 - #include <crypto/skcipher.h> 30 29 #include <linux/init.h> 31 30 #include <linux/list.h> 32 31 #include <linux/kernel.h> ··· 71 72 struct alg_sock *pask = alg_sk(psk); 72 73 struct af_alg_ctx *ctx = ask->private; 73 74 struct crypto_aead *tfm = pask->private; 74 - unsigned int i, as = crypto_aead_authsize(tfm); 75 + unsigned int as = crypto_aead_authsize(tfm); 75 76 struct af_alg_async_req *areq; 76 - struct af_alg_tsgl *tsgl, *tmp; 77 77 struct scatterlist *rsgl_src, *tsgl_src = NULL; 78 78 int err = 0; 79 79 size_t used = 0; /* [in] TX bufs to be en/decrypted */ ··· 152 154 outlen -= less; 153 155 } 154 156 157 + /* 158 + * Create a per request TX SGL for this request which tracks the 159 + * SG entries from the global TX SGL. 160 + */ 155 161 processed = used + ctx->aead_assoclen; 156 - list_for_each_entry_safe(tsgl, tmp, &ctx->tsgl_list, list) { 157 - for (i = 0; i < tsgl->cur; i++) { 158 - struct scatterlist *process_sg = tsgl->sg + i; 159 - 160 - if (!(process_sg->length) || !sg_page(process_sg)) 161 - continue; 162 - tsgl_src = process_sg; 163 - break; 164 - } 165 - if (tsgl_src) 166 - break; 167 - } 168 - if (processed && !tsgl_src) { 169 - err = -EFAULT; 162 + areq->tsgl_entries = af_alg_count_tsgl(sk, processed); 163 + if (!areq->tsgl_entries) 164 + areq->tsgl_entries = 1; 165 + areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl), 166 + areq->tsgl_entries), 167 + GFP_KERNEL); 168 + if (!areq->tsgl) { 169 + err = -ENOMEM; 170 170 goto free; 171 171 } 172 + sg_init_table(areq->tsgl, areq->tsgl_entries); 173 + af_alg_pull_tsgl(sk, processed, areq->tsgl); 174 + tsgl_src = areq->tsgl; 172 175 173 176 /* 174 177 * Copy of AAD from source to destination ··· 178 179 * when user space uses an in-place cipher operation, the kernel 179 180 * will copy the data as it does not see whether such in-place operation 180 181 * is initiated. 181 - * 182 - * To ensure efficiency, the following implementation ensure that the 183 - * ciphers are invoked to perform a crypto operation in-place. This 184 - * is achieved by memory management specified as follows. 185 182 */ 186 183 187 184 /* Use the RX SGL as source (and destination) for crypto op. */ 188 185 rsgl_src = areq->first_rsgl.sgl.sgt.sgl; 189 186 190 - if (ctx->enc) { 191 - /* 192 - * Encryption operation - The in-place cipher operation is 193 - * achieved by the following operation: 194 - * 195 - * TX SGL: AAD || PT 196 - * | | 197 - * | copy | 198 - * v v 199 - * RX SGL: AAD || PT || Tag 200 - */ 201 - memcpy_sglist(areq->first_rsgl.sgl.sgt.sgl, tsgl_src, 202 - processed); 203 - af_alg_pull_tsgl(sk, processed, NULL, 0); 204 - } else { 205 - /* 206 - * Decryption operation - To achieve an in-place cipher 207 - * operation, the following SGL structure is used: 208 - * 209 - * TX SGL: AAD || CT || Tag 210 - * | | ^ 211 - * | copy | | Create SGL link. 212 - * v v | 213 - * RX SGL: AAD || CT ----+ 214 - */ 215 - 216 - /* Copy AAD || CT to RX SGL buffer for in-place operation. */ 217 - memcpy_sglist(areq->first_rsgl.sgl.sgt.sgl, tsgl_src, outlen); 218 - 219 - /* Create TX SGL for tag and chain it to RX SGL. */ 220 - areq->tsgl_entries = af_alg_count_tsgl(sk, processed, 221 - processed - as); 222 - if (!areq->tsgl_entries) 223 - areq->tsgl_entries = 1; 224 - areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl), 225 - areq->tsgl_entries), 226 - GFP_KERNEL); 227 - if (!areq->tsgl) { 228 - err = -ENOMEM; 229 - goto free; 230 - } 231 - sg_init_table(areq->tsgl, areq->tsgl_entries); 232 - 233 - /* Release TX SGL, except for tag data and reassign tag data. */ 234 - af_alg_pull_tsgl(sk, processed, areq->tsgl, processed - as); 235 - 236 - /* chain the areq TX SGL holding the tag with RX SGL */ 237 - if (usedpages) { 238 - /* RX SGL present */ 239 - struct af_alg_sgl *sgl_prev = &areq->last_rsgl->sgl; 240 - struct scatterlist *sg = sgl_prev->sgt.sgl; 241 - 242 - sg_unmark_end(sg + sgl_prev->sgt.nents - 1); 243 - sg_chain(sg, sgl_prev->sgt.nents + 1, areq->tsgl); 244 - } else 245 - /* no RX SGL present (e.g. authentication only) */ 246 - rsgl_src = areq->tsgl; 247 - } 187 + memcpy_sglist(rsgl_src, tsgl_src, ctx->aead_assoclen); 248 188 249 189 /* Initialize the crypto operation */ 250 - aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src, 190 + aead_request_set_crypt(&areq->cra_u.aead_req, tsgl_src, 251 191 areq->first_rsgl.sgl.sgt.sgl, used, ctx->iv); 252 192 aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen); 253 193 aead_request_set_tfm(&areq->cra_u.aead_req, tfm); ··· 388 450 struct crypto_aead *tfm = pask->private; 389 451 unsigned int ivlen = crypto_aead_ivsize(tfm); 390 452 391 - af_alg_pull_tsgl(sk, ctx->used, NULL, 0); 453 + af_alg_pull_tsgl(sk, ctx->used, NULL); 392 454 sock_kzfree_s(sk, ctx->iv, ivlen); 393 455 sock_kfree_s(sk, ctx, ctx->len); 394 456 af_alg_release_parent(sk);
+3 -3
crypto/algif_skcipher.c
··· 138 138 * Create a per request TX SGL for this request which tracks the 139 139 * SG entries from the global TX SGL. 140 140 */ 141 - areq->tsgl_entries = af_alg_count_tsgl(sk, len, 0); 141 + areq->tsgl_entries = af_alg_count_tsgl(sk, len); 142 142 if (!areq->tsgl_entries) 143 143 areq->tsgl_entries = 1; 144 144 areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl), ··· 149 149 goto free; 150 150 } 151 151 sg_init_table(areq->tsgl, areq->tsgl_entries); 152 - af_alg_pull_tsgl(sk, len, areq->tsgl, 0); 152 + af_alg_pull_tsgl(sk, len, areq->tsgl); 153 153 154 154 /* Initialize the crypto operation */ 155 155 skcipher_request_set_tfm(&areq->cra_u.skcipher_req, tfm); ··· 363 363 struct alg_sock *pask = alg_sk(psk); 364 364 struct crypto_skcipher *tfm = pask->private; 365 365 366 - af_alg_pull_tsgl(sk, ctx->used, NULL, 0); 366 + af_alg_pull_tsgl(sk, ctx->used, NULL); 367 367 sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm)); 368 368 if (ctx->state) 369 369 sock_kzfree_s(sk, ctx->state, crypto_skcipher_statesize(tfm));
+30 -20
crypto/authencesn.c
··· 207 207 u8 *ohash = areq_ctx->tail; 208 208 unsigned int cryptlen = req->cryptlen - authsize; 209 209 unsigned int assoclen = req->assoclen; 210 + struct scatterlist *src = req->src; 210 211 struct scatterlist *dst = req->dst; 211 212 u8 *ihash = ohash + crypto_ahash_digestsize(auth); 212 213 u32 tmp[2]; ··· 215 214 if (!authsize) 216 215 goto decrypt; 217 216 218 - /* Move high-order bits of sequence number back. */ 219 - scatterwalk_map_and_copy(tmp, dst, 4, 4, 0); 220 - scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0); 221 - scatterwalk_map_and_copy(tmp, dst, 0, 8, 1); 217 + if (src == dst) { 218 + /* Move high-order bits of sequence number back. */ 219 + scatterwalk_map_and_copy(tmp, dst, 4, 4, 0); 220 + scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0); 221 + scatterwalk_map_and_copy(tmp, dst, 0, 8, 1); 222 + } else 223 + memcpy_sglist(dst, src, assoclen); 222 224 223 225 if (crypto_memneq(ihash, ohash, authsize)) 224 226 return -EBADMSG; 225 227 226 228 decrypt: 227 229 228 - sg_init_table(areq_ctx->dst, 2); 230 + if (src != dst) 231 + src = scatterwalk_ffwd(areq_ctx->src, src, assoclen); 229 232 dst = scatterwalk_ffwd(areq_ctx->dst, dst, assoclen); 230 233 231 234 skcipher_request_set_tfm(skreq, ctx->enc); 232 235 skcipher_request_set_callback(skreq, flags, 233 236 req->base.complete, req->base.data); 234 - skcipher_request_set_crypt(skreq, dst, dst, cryptlen, req->iv); 237 + skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv); 235 238 236 239 return crypto_skcipher_decrypt(skreq); 237 240 } ··· 260 255 unsigned int assoclen = req->assoclen; 261 256 unsigned int cryptlen = req->cryptlen; 262 257 u8 *ihash = ohash + crypto_ahash_digestsize(auth); 258 + struct scatterlist *src = req->src; 263 259 struct scatterlist *dst = req->dst; 264 260 u32 tmp[2]; 265 261 int err; ··· 268 262 if (assoclen < 8) 269 263 return -EINVAL; 270 264 271 - cryptlen -= authsize; 272 - 273 - if (req->src != dst) 274 - memcpy_sglist(dst, req->src, assoclen + cryptlen); 275 - 276 - scatterwalk_map_and_copy(ihash, req->src, assoclen + cryptlen, 277 - authsize, 0); 278 - 279 265 if (!authsize) 280 266 goto tail; 281 267 282 - /* Move high-order bits of sequence number to the end. */ 283 - scatterwalk_map_and_copy(tmp, dst, 0, 8, 0); 284 - scatterwalk_map_and_copy(tmp, dst, 4, 4, 1); 285 - scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 1); 268 + cryptlen -= authsize; 269 + scatterwalk_map_and_copy(ihash, req->src, assoclen + cryptlen, 270 + authsize, 0); 286 271 287 - sg_init_table(areq_ctx->dst, 2); 288 - dst = scatterwalk_ffwd(areq_ctx->dst, dst, 4); 272 + /* Move high-order bits of sequence number to the end. */ 273 + scatterwalk_map_and_copy(tmp, src, 0, 8, 0); 274 + if (src == dst) { 275 + scatterwalk_map_and_copy(tmp, dst, 4, 4, 1); 276 + scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 1); 277 + dst = scatterwalk_ffwd(areq_ctx->dst, dst, 4); 278 + } else { 279 + scatterwalk_map_and_copy(tmp, dst, 0, 4, 1); 280 + scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen - 4, 4, 1); 281 + 282 + src = scatterwalk_ffwd(areq_ctx->src, src, 8); 283 + dst = scatterwalk_ffwd(areq_ctx->dst, dst, 4); 284 + memcpy_sglist(dst, src, assoclen + cryptlen - 8); 285 + dst = req->dst; 286 + } 289 287 290 288 ahash_request_set_tfm(ahreq, auth); 291 289 ahash_request_set_crypt(ahreq, dst, ohash, assoclen + cryptlen);
+7 -4
crypto/deflate.c
··· 164 164 165 165 do { 166 166 unsigned int dcur; 167 + unsigned long avail_in; 167 168 168 169 dcur = acomp_walk_next_dst(&walk); 169 - if (!dcur) { 170 - out_of_space = true; 171 - break; 172 - } 173 170 174 171 stream->avail_out = dcur; 175 172 stream->next_out = walk.dst.virt.addr; 173 + avail_in = stream->avail_in; 176 174 177 175 ret = zlib_inflate(stream, Z_NO_FLUSH); 176 + 177 + if (!dcur && avail_in == stream->avail_in) { 178 + out_of_space = true; 179 + break; 180 + } 178 181 179 182 dcur -= stream->avail_out; 180 183 acomp_walk_done_dst(&walk, dcur);
+2 -1
drivers/crypto/caam/caamalg_qi2.c
··· 3326 3326 if (aligned_len < keylen) 3327 3327 return -EOVERFLOW; 3328 3328 3329 - hashed_key = kmemdup(key, aligned_len, GFP_KERNEL); 3329 + hashed_key = kmalloc(aligned_len, GFP_KERNEL); 3330 3330 if (!hashed_key) 3331 3331 return -ENOMEM; 3332 + memcpy(hashed_key, key, keylen); 3332 3333 ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize); 3333 3334 if (ret) 3334 3335 goto bad_free_key;
+2 -1
drivers/crypto/caam/caamhash.c
··· 441 441 if (aligned_len < keylen) 442 442 return -EOVERFLOW; 443 443 444 - hashed_key = kmemdup(key, keylen, GFP_KERNEL); 444 + hashed_key = kmalloc(aligned_len, GFP_KERNEL); 445 445 if (!hashed_key) 446 446 return -ENOMEM; 447 + memcpy(hashed_key, key, keylen); 447 448 ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize); 448 449 if (ret) 449 450 goto bad_free_key;
+7 -4
drivers/crypto/tegra/tegra-se-aes.c
··· 529 529 .cra_name = "cbc(aes)", 530 530 .cra_driver_name = "cbc-aes-tegra", 531 531 .cra_priority = 500, 532 - .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC, 532 + .cra_flags = CRYPTO_ALG_ASYNC, 533 533 .cra_blocksize = AES_BLOCK_SIZE, 534 534 .cra_ctxsize = sizeof(struct tegra_aes_ctx), 535 535 .cra_alignmask = 0xf, ··· 550 550 .cra_name = "ecb(aes)", 551 551 .cra_driver_name = "ecb-aes-tegra", 552 552 .cra_priority = 500, 553 - .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC, 553 + .cra_flags = CRYPTO_ALG_ASYNC, 554 554 .cra_blocksize = AES_BLOCK_SIZE, 555 555 .cra_ctxsize = sizeof(struct tegra_aes_ctx), 556 556 .cra_alignmask = 0xf, ··· 572 572 .cra_name = "ctr(aes)", 573 573 .cra_driver_name = "ctr-aes-tegra", 574 574 .cra_priority = 500, 575 - .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC, 575 + .cra_flags = CRYPTO_ALG_ASYNC, 576 576 .cra_blocksize = 1, 577 577 .cra_ctxsize = sizeof(struct tegra_aes_ctx), 578 578 .cra_alignmask = 0xf, ··· 594 594 .cra_name = "xts(aes)", 595 595 .cra_driver_name = "xts-aes-tegra", 596 596 .cra_priority = 500, 597 + .cra_flags = CRYPTO_ALG_ASYNC, 597 598 .cra_blocksize = AES_BLOCK_SIZE, 598 599 .cra_ctxsize = sizeof(struct tegra_aes_ctx), 599 600 .cra_alignmask = (__alignof__(u64) - 1), ··· 1923 1922 .cra_name = "gcm(aes)", 1924 1923 .cra_driver_name = "gcm-aes-tegra", 1925 1924 .cra_priority = 500, 1925 + .cra_flags = CRYPTO_ALG_ASYNC, 1926 1926 .cra_blocksize = 1, 1927 1927 .cra_ctxsize = sizeof(struct tegra_aead_ctx), 1928 1928 .cra_alignmask = 0xf, ··· 1946 1944 .cra_name = "ccm(aes)", 1947 1945 .cra_driver_name = "ccm-aes-tegra", 1948 1946 .cra_priority = 500, 1947 + .cra_flags = CRYPTO_ALG_ASYNC, 1949 1948 .cra_blocksize = 1, 1950 1949 .cra_ctxsize = sizeof(struct tegra_aead_ctx), 1951 1950 .cra_alignmask = 0xf, ··· 1974 1971 .cra_name = "cmac(aes)", 1975 1972 .cra_driver_name = "tegra-se-cmac", 1976 1973 .cra_priority = 300, 1977 - .cra_flags = CRYPTO_ALG_TYPE_AHASH, 1974 + .cra_flags = CRYPTO_ALG_ASYNC, 1978 1975 .cra_blocksize = AES_BLOCK_SIZE, 1979 1976 .cra_ctxsize = sizeof(struct tegra_cmac_ctx), 1980 1977 .cra_alignmask = 0,
+17 -13
drivers/crypto/tegra/tegra-se-hash.c
··· 761 761 .cra_name = "sha1", 762 762 .cra_driver_name = "tegra-se-sha1", 763 763 .cra_priority = 300, 764 - .cra_flags = CRYPTO_ALG_TYPE_AHASH, 764 + .cra_flags = CRYPTO_ALG_ASYNC, 765 765 .cra_blocksize = SHA1_BLOCK_SIZE, 766 766 .cra_ctxsize = sizeof(struct tegra_sha_ctx), 767 767 .cra_alignmask = 0, ··· 786 786 .cra_name = "sha224", 787 787 .cra_driver_name = "tegra-se-sha224", 788 788 .cra_priority = 300, 789 - .cra_flags = CRYPTO_ALG_TYPE_AHASH, 789 + .cra_flags = CRYPTO_ALG_ASYNC, 790 790 .cra_blocksize = SHA224_BLOCK_SIZE, 791 791 .cra_ctxsize = sizeof(struct tegra_sha_ctx), 792 792 .cra_alignmask = 0, ··· 811 811 .cra_name = "sha256", 812 812 .cra_driver_name = "tegra-se-sha256", 813 813 .cra_priority = 300, 814 - .cra_flags = CRYPTO_ALG_TYPE_AHASH, 814 + .cra_flags = CRYPTO_ALG_ASYNC, 815 815 .cra_blocksize = SHA256_BLOCK_SIZE, 816 816 .cra_ctxsize = sizeof(struct tegra_sha_ctx), 817 817 .cra_alignmask = 0, ··· 836 836 .cra_name = "sha384", 837 837 .cra_driver_name = "tegra-se-sha384", 838 838 .cra_priority = 300, 839 - .cra_flags = CRYPTO_ALG_TYPE_AHASH, 839 + .cra_flags = CRYPTO_ALG_ASYNC, 840 840 .cra_blocksize = SHA384_BLOCK_SIZE, 841 841 .cra_ctxsize = sizeof(struct tegra_sha_ctx), 842 842 .cra_alignmask = 0, ··· 861 861 .cra_name = "sha512", 862 862 .cra_driver_name = "tegra-se-sha512", 863 863 .cra_priority = 300, 864 - .cra_flags = CRYPTO_ALG_TYPE_AHASH, 864 + .cra_flags = CRYPTO_ALG_ASYNC, 865 865 .cra_blocksize = SHA512_BLOCK_SIZE, 866 866 .cra_ctxsize = sizeof(struct tegra_sha_ctx), 867 867 .cra_alignmask = 0, ··· 886 886 .cra_name = "sha3-224", 887 887 .cra_driver_name = "tegra-se-sha3-224", 888 888 .cra_priority = 300, 889 - .cra_flags = CRYPTO_ALG_TYPE_AHASH, 889 + .cra_flags = CRYPTO_ALG_ASYNC, 890 890 .cra_blocksize = SHA3_224_BLOCK_SIZE, 891 891 .cra_ctxsize = sizeof(struct tegra_sha_ctx), 892 892 .cra_alignmask = 0, ··· 911 911 .cra_name = "sha3-256", 912 912 .cra_driver_name = "tegra-se-sha3-256", 913 913 .cra_priority = 300, 914 - .cra_flags = CRYPTO_ALG_TYPE_AHASH, 914 + .cra_flags = CRYPTO_ALG_ASYNC, 915 915 .cra_blocksize = SHA3_256_BLOCK_SIZE, 916 916 .cra_ctxsize = sizeof(struct tegra_sha_ctx), 917 917 .cra_alignmask = 0, ··· 936 936 .cra_name = "sha3-384", 937 937 .cra_driver_name = "tegra-se-sha3-384", 938 938 .cra_priority = 300, 939 - .cra_flags = CRYPTO_ALG_TYPE_AHASH, 939 + .cra_flags = CRYPTO_ALG_ASYNC, 940 940 .cra_blocksize = SHA3_384_BLOCK_SIZE, 941 941 .cra_ctxsize = sizeof(struct tegra_sha_ctx), 942 942 .cra_alignmask = 0, ··· 961 961 .cra_name = "sha3-512", 962 962 .cra_driver_name = "tegra-se-sha3-512", 963 963 .cra_priority = 300, 964 - .cra_flags = CRYPTO_ALG_TYPE_AHASH, 964 + .cra_flags = CRYPTO_ALG_ASYNC, 965 965 .cra_blocksize = SHA3_512_BLOCK_SIZE, 966 966 .cra_ctxsize = sizeof(struct tegra_sha_ctx), 967 967 .cra_alignmask = 0, ··· 988 988 .cra_name = "hmac(sha224)", 989 989 .cra_driver_name = "tegra-se-hmac-sha224", 990 990 .cra_priority = 300, 991 - .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK, 991 + .cra_flags = CRYPTO_ALG_ASYNC | 992 + CRYPTO_ALG_NEED_FALLBACK, 992 993 .cra_blocksize = SHA224_BLOCK_SIZE, 993 994 .cra_ctxsize = sizeof(struct tegra_sha_ctx), 994 995 .cra_alignmask = 0, ··· 1016 1015 .cra_name = "hmac(sha256)", 1017 1016 .cra_driver_name = "tegra-se-hmac-sha256", 1018 1017 .cra_priority = 300, 1019 - .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK, 1018 + .cra_flags = CRYPTO_ALG_ASYNC | 1019 + CRYPTO_ALG_NEED_FALLBACK, 1020 1020 .cra_blocksize = SHA256_BLOCK_SIZE, 1021 1021 .cra_ctxsize = sizeof(struct tegra_sha_ctx), 1022 1022 .cra_alignmask = 0, ··· 1044 1042 .cra_name = "hmac(sha384)", 1045 1043 .cra_driver_name = "tegra-se-hmac-sha384", 1046 1044 .cra_priority = 300, 1047 - .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK, 1045 + .cra_flags = CRYPTO_ALG_ASYNC | 1046 + CRYPTO_ALG_NEED_FALLBACK, 1048 1047 .cra_blocksize = SHA384_BLOCK_SIZE, 1049 1048 .cra_ctxsize = sizeof(struct tegra_sha_ctx), 1050 1049 .cra_alignmask = 0, ··· 1072 1069 .cra_name = "hmac(sha512)", 1073 1070 .cra_driver_name = "tegra-se-hmac-sha512", 1074 1071 .cra_priority = 300, 1075 - .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK, 1072 + .cra_flags = CRYPTO_ALG_ASYNC | 1073 + CRYPTO_ALG_NEED_FALLBACK, 1076 1074 .cra_blocksize = SHA512_BLOCK_SIZE, 1077 1075 .cra_ctxsize = sizeof(struct tegra_sha_ctx), 1078 1076 .cra_alignmask = 0,
+2 -3
include/crypto/if_alg.h
··· 230 230 return PAGE_SIZE <= af_alg_rcvbuf(sk); 231 231 } 232 232 233 - unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset); 234 - void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, 235 - size_t dst_offset); 233 + unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes); 234 + void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst); 236 235 void af_alg_wmem_wakeup(struct sock *sk); 237 236 int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min); 238 237 int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,