Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sunrpc: Use skcipher and ahash/shash

This patch replaces uses of blkcipher with skcipher and the long
obsolete hash interface with either shash (for non-SG users) and
ahash.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

+312 -229
+16 -16
include/linux/sunrpc/gss_krb5.h
··· 36 36 * 37 37 */ 38 38 39 - #include <linux/crypto.h> 39 + #include <crypto/skcipher.h> 40 40 #include <linux/sunrpc/auth_gss.h> 41 41 #include <linux/sunrpc/gss_err.h> 42 42 #include <linux/sunrpc/gss_asn1.h> ··· 71 71 const u32 keyed_cksum; /* is it a keyed cksum? */ 72 72 const u32 keybytes; /* raw key len, in bytes */ 73 73 const u32 keylength; /* final key len, in bytes */ 74 - u32 (*encrypt) (struct crypto_blkcipher *tfm, 74 + u32 (*encrypt) (struct crypto_skcipher *tfm, 75 75 void *iv, void *in, void *out, 76 76 int length); /* encryption function */ 77 - u32 (*decrypt) (struct crypto_blkcipher *tfm, 77 + u32 (*decrypt) (struct crypto_skcipher *tfm, 78 78 void *iv, void *in, void *out, 79 79 int length); /* decryption function */ 80 80 u32 (*mk_key) (const struct gss_krb5_enctype *gk5e, ··· 98 98 u32 enctype; 99 99 u32 flags; 100 100 const struct gss_krb5_enctype *gk5e; /* enctype-specific info */ 101 - struct crypto_blkcipher *enc; 102 - struct crypto_blkcipher *seq; 103 - struct crypto_blkcipher *acceptor_enc; 104 - struct crypto_blkcipher *initiator_enc; 105 - struct crypto_blkcipher *acceptor_enc_aux; 106 - struct crypto_blkcipher *initiator_enc_aux; 101 + struct crypto_skcipher *enc; 102 + struct crypto_skcipher *seq; 103 + struct crypto_skcipher *acceptor_enc; 104 + struct crypto_skcipher *initiator_enc; 105 + struct crypto_skcipher *acceptor_enc_aux; 106 + struct crypto_skcipher *initiator_enc_aux; 107 107 u8 Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */ 108 108 u8 cksum[GSS_KRB5_MAX_KEYLEN]; 109 109 s32 endtime; ··· 262 262 263 263 264 264 u32 265 - krb5_encrypt(struct crypto_blkcipher *key, 265 + krb5_encrypt(struct crypto_skcipher *key, 266 266 void *iv, void *in, void *out, int length); 267 267 268 268 u32 269 - krb5_decrypt(struct crypto_blkcipher *key, 269 + krb5_decrypt(struct crypto_skcipher *key, 270 270 void *iv, void *in, void *out, int length); 271 271 272 272 int 273 - gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *outbuf, 273 + gss_encrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *outbuf, 274 274 int offset, struct page **pages); 275 275 276 276 int 277 - gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *inbuf, 277 + gss_decrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *inbuf, 278 278 int offset); 279 279 280 280 s32 281 281 krb5_make_seq_num(struct krb5_ctx *kctx, 282 - struct crypto_blkcipher *key, 282 + struct crypto_skcipher *key, 283 283 int direction, 284 284 u32 seqnum, unsigned char *cksum, unsigned char *buf); 285 285 ··· 320 320 321 321 int 322 322 krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, 323 - struct crypto_blkcipher *cipher, 323 + struct crypto_skcipher *cipher, 324 324 unsigned char *cksum); 325 325 326 326 int 327 327 krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, 328 - struct crypto_blkcipher *cipher, 328 + struct crypto_skcipher *cipher, 329 329 s32 seqnum); 330 330 void 331 331 gss_krb5_make_confounder(char *p, u32 conflen);
+221 -141
net/sunrpc/auth_gss/gss_krb5_crypto.c
··· 34 34 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 35 35 */ 36 36 37 + #include <crypto/hash.h> 38 + #include <crypto/skcipher.h> 37 39 #include <linux/err.h> 38 40 #include <linux/types.h> 39 41 #include <linux/mm.h> 40 42 #include <linux/scatterlist.h> 41 - #include <linux/crypto.h> 42 43 #include <linux/highmem.h> 43 44 #include <linux/pagemap.h> 44 45 #include <linux/random.h> ··· 52 51 53 52 u32 54 53 krb5_encrypt( 55 - struct crypto_blkcipher *tfm, 54 + struct crypto_skcipher *tfm, 56 55 void * iv, 57 56 void * in, 58 57 void * out, ··· 61 60 u32 ret = -EINVAL; 62 61 struct scatterlist sg[1]; 63 62 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0}; 64 - struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv }; 63 + SKCIPHER_REQUEST_ON_STACK(req, tfm); 65 64 66 - if (length % crypto_blkcipher_blocksize(tfm) != 0) 65 + if (length % crypto_skcipher_blocksize(tfm) != 0) 67 66 goto out; 68 67 69 - if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) { 68 + if (crypto_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) { 70 69 dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n", 71 - crypto_blkcipher_ivsize(tfm)); 70 + crypto_skcipher_ivsize(tfm)); 72 71 goto out; 73 72 } 74 73 75 74 if (iv) 76 - memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm)); 75 + memcpy(local_iv, iv, crypto_skcipher_ivsize(tfm)); 77 76 78 77 memcpy(out, in, length); 79 78 sg_init_one(sg, out, length); 80 79 81 - ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length); 80 + skcipher_request_set_callback(req, 0, NULL, NULL); 81 + skcipher_request_set_crypt(req, sg, sg, length, local_iv); 82 + 83 + ret = crypto_skcipher_encrypt(req); 84 + skcipher_request_zero(req); 82 85 out: 83 86 dprintk("RPC: krb5_encrypt returns %d\n", ret); 84 87 return ret; ··· 90 85 91 86 u32 92 87 krb5_decrypt( 93 - struct crypto_blkcipher *tfm, 88 + struct crypto_skcipher *tfm, 94 89 void * iv, 95 90 void * in, 96 91 void * out, ··· 99 94 u32 ret = -EINVAL; 100 95 struct scatterlist sg[1]; 101 96 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0}; 102 - struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv }; 97 + SKCIPHER_REQUEST_ON_STACK(req, tfm); 103 98 104 - if (length % crypto_blkcipher_blocksize(tfm) != 0) 99 + if (length % crypto_skcipher_blocksize(tfm) != 0) 105 100 goto out; 106 101 107 - if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) { 102 + if (crypto_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) { 108 103 dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n", 109 - crypto_blkcipher_ivsize(tfm)); 104 + crypto_skcipher_ivsize(tfm)); 110 105 goto out; 111 106 } 112 107 if (iv) 113 - memcpy(local_iv,iv, crypto_blkcipher_ivsize(tfm)); 108 + memcpy(local_iv,iv, crypto_skcipher_ivsize(tfm)); 114 109 115 110 memcpy(out, in, length); 116 111 sg_init_one(sg, out, length); 117 112 118 - ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length); 113 + skcipher_request_set_callback(req, 0, NULL, NULL); 114 + skcipher_request_set_crypt(req, sg, sg, length, local_iv); 115 + 116 + ret = crypto_skcipher_decrypt(req); 117 + skcipher_request_zero(req); 119 118 out: 120 119 dprintk("RPC: gss_k5decrypt returns %d\n",ret); 121 120 return ret; ··· 128 119 static int 129 120 checksummer(struct scatterlist *sg, void *data) 130 121 { 131 - struct hash_desc *desc = data; 122 + struct ahash_request *req = data; 132 123 133 - return crypto_hash_update(desc, sg, sg->length); 124 + ahash_request_set_crypt(req, sg, NULL, sg->length); 125 + 126 + return crypto_ahash_update(req); 134 127 } 135 128 136 129 static int ··· 163 152 struct xdr_buf *body, int body_offset, u8 *cksumkey, 164 153 unsigned int usage, struct xdr_netobj *cksumout) 165 154 { 166 - struct hash_desc desc; 167 155 struct scatterlist sg[1]; 168 156 int err; 169 157 u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN]; 170 158 u8 rc4salt[4]; 171 - struct crypto_hash *md5; 172 - struct crypto_hash *hmac_md5; 159 + struct crypto_ahash *md5; 160 + struct crypto_ahash *hmac_md5; 161 + struct ahash_request *req; 173 162 174 163 if (cksumkey == NULL) 175 164 return GSS_S_FAILURE; ··· 185 174 return GSS_S_FAILURE; 186 175 } 187 176 188 - md5 = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); 177 + md5 = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC); 189 178 if (IS_ERR(md5)) 190 179 return GSS_S_FAILURE; 191 180 192 - hmac_md5 = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, 193 - CRYPTO_ALG_ASYNC); 181 + hmac_md5 = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, 182 + CRYPTO_ALG_ASYNC); 194 183 if (IS_ERR(hmac_md5)) { 195 - crypto_free_hash(md5); 184 + crypto_free_ahash(md5); 196 185 return GSS_S_FAILURE; 197 186 } 198 187 199 - desc.tfm = md5; 200 - desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; 188 + req = ahash_request_alloc(md5, GFP_KERNEL); 189 + if (!req) { 190 + crypto_free_ahash(hmac_md5); 191 + crypto_free_ahash(md5); 192 + return GSS_S_FAILURE; 193 + } 201 194 202 - err = crypto_hash_init(&desc); 195 + ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); 196 + 197 + err = crypto_ahash_init(req); 203 198 if (err) 204 199 goto out; 205 200 sg_init_one(sg, rc4salt, 4); 206 - err = crypto_hash_update(&desc, sg, 4); 201 + ahash_request_set_crypt(req, sg, NULL, 4); 202 + err = crypto_ahash_update(req); 207 203 if (err) 208 204 goto out; 209 205 210 206 sg_init_one(sg, header, hdrlen); 211 - err = crypto_hash_update(&desc, sg, hdrlen); 207 + ahash_request_set_crypt(req, sg, NULL, hdrlen); 208 + err = crypto_ahash_update(req); 212 209 if (err) 213 210 goto out; 214 211 err = xdr_process_buf(body, body_offset, body->len - body_offset, 215 - checksummer, &desc); 212 + checksummer, req); 216 213 if (err) 217 214 goto out; 218 - err = crypto_hash_final(&desc, checksumdata); 219 - if (err) 220 - goto out; 221 - 222 - desc.tfm = hmac_md5; 223 - desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; 224 - 225 - err = crypto_hash_init(&desc); 226 - if (err) 227 - goto out; 228 - err = crypto_hash_setkey(hmac_md5, cksumkey, kctx->gk5e->keylength); 215 + ahash_request_set_crypt(req, NULL, checksumdata, 0); 216 + err = crypto_ahash_final(req); 229 217 if (err) 230 218 goto out; 231 219 232 - sg_init_one(sg, checksumdata, crypto_hash_digestsize(md5)); 233 - err = crypto_hash_digest(&desc, sg, crypto_hash_digestsize(md5), 234 - checksumdata); 220 + ahash_request_free(req); 221 + req = ahash_request_alloc(hmac_md5, GFP_KERNEL); 222 + if (!req) { 223 + crypto_free_ahash(hmac_md5); 224 + crypto_free_ahash(md5); 225 + return GSS_S_FAILURE; 226 + } 227 + 228 + ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); 229 + 230 + err = crypto_ahash_init(req); 231 + if (err) 232 + goto out; 233 + err = crypto_ahash_setkey(hmac_md5, cksumkey, kctx->gk5e->keylength); 234 + if (err) 235 + goto out; 236 + 237 + sg_init_one(sg, checksumdata, crypto_ahash_digestsize(md5)); 238 + ahash_request_set_crypt(req, sg, checksumdata, 239 + crypto_ahash_digestsize(md5)); 240 + err = crypto_ahash_digest(req); 235 241 if (err) 236 242 goto out; 237 243 238 244 memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength); 239 245 cksumout->len = kctx->gk5e->cksumlength; 240 246 out: 241 - crypto_free_hash(md5); 242 - crypto_free_hash(hmac_md5); 247 + ahash_request_free(req); 248 + crypto_free_ahash(md5); 249 + crypto_free_ahash(hmac_md5); 243 250 return err ? GSS_S_FAILURE : 0; 244 251 } 245 252 ··· 271 242 struct xdr_buf *body, int body_offset, u8 *cksumkey, 272 243 unsigned int usage, struct xdr_netobj *cksumout) 273 244 { 274 - struct hash_desc desc; 245 + struct crypto_ahash *tfm; 246 + struct ahash_request *req; 275 247 struct scatterlist sg[1]; 276 248 int err; 277 249 u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN]; ··· 289 259 return GSS_S_FAILURE; 290 260 } 291 261 292 - desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); 293 - if (IS_ERR(desc.tfm)) 262 + tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); 263 + if (IS_ERR(tfm)) 294 264 return GSS_S_FAILURE; 295 - desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; 296 265 297 - checksumlen = crypto_hash_digestsize(desc.tfm); 266 + req = ahash_request_alloc(tfm, GFP_KERNEL); 267 + if (!req) { 268 + crypto_free_ahash(tfm); 269 + return GSS_S_FAILURE; 270 + } 271 + 272 + ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); 273 + 274 + checksumlen = crypto_ahash_digestsize(tfm); 298 275 299 276 if (cksumkey != NULL) { 300 - err = crypto_hash_setkey(desc.tfm, cksumkey, 301 - kctx->gk5e->keylength); 277 + err = crypto_ahash_setkey(tfm, cksumkey, 278 + kctx->gk5e->keylength); 302 279 if (err) 303 280 goto out; 304 281 } 305 282 306 - err = crypto_hash_init(&desc); 283 + err = crypto_ahash_init(req); 307 284 if (err) 308 285 goto out; 309 286 sg_init_one(sg, header, hdrlen); 310 - err = crypto_hash_update(&desc, sg, hdrlen); 287 + ahash_request_set_crypt(req, sg, NULL, hdrlen); 288 + err = crypto_ahash_update(req); 311 289 if (err) 312 290 goto out; 313 291 err = xdr_process_buf(body, body_offset, body->len - body_offset, 314 - checksummer, &desc); 292 + checksummer, req); 315 293 if (err) 316 294 goto out; 317 - err = crypto_hash_final(&desc, checksumdata); 295 + ahash_request_set_crypt(req, NULL, checksumdata, 0); 296 + err = crypto_ahash_final(req); 318 297 if (err) 319 298 goto out; 320 299 ··· 346 307 } 347 308 cksumout->len = kctx->gk5e->cksumlength; 348 309 out: 349 - crypto_free_hash(desc.tfm); 310 + ahash_request_free(req); 311 + crypto_free_ahash(tfm); 350 312 return err ? GSS_S_FAILURE : 0; 351 313 } 352 314 ··· 363 323 struct xdr_buf *body, int body_offset, u8 *cksumkey, 364 324 unsigned int usage, struct xdr_netobj *cksumout) 365 325 { 366 - struct hash_desc desc; 326 + struct crypto_ahash *tfm; 327 + struct ahash_request *req; 367 328 struct scatterlist sg[1]; 368 329 int err; 369 330 u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN]; ··· 381 340 return GSS_S_FAILURE; 382 341 } 383 342 384 - desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, 385 - CRYPTO_ALG_ASYNC); 386 - if (IS_ERR(desc.tfm)) 343 + tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); 344 + if (IS_ERR(tfm)) 387 345 return GSS_S_FAILURE; 388 - checksumlen = crypto_hash_digestsize(desc.tfm); 389 - desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; 346 + checksumlen = crypto_ahash_digestsize(tfm); 390 347 391 - err = crypto_hash_setkey(desc.tfm, cksumkey, kctx->gk5e->keylength); 348 + req = ahash_request_alloc(tfm, GFP_KERNEL); 349 + if (!req) { 350 + crypto_free_ahash(tfm); 351 + return GSS_S_FAILURE; 352 + } 353 + 354 + ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); 355 + 356 + err = crypto_ahash_setkey(tfm, cksumkey, kctx->gk5e->keylength); 392 357 if (err) 393 358 goto out; 394 359 395 - err = crypto_hash_init(&desc); 360 + err = crypto_ahash_init(req); 396 361 if (err) 397 362 goto out; 398 363 err = xdr_process_buf(body, body_offset, body->len - body_offset, 399 - checksummer, &desc); 364 + checksummer, req); 400 365 if (err) 401 366 goto out; 402 367 if (header != NULL) { 403 368 sg_init_one(sg, header, hdrlen); 404 - err = crypto_hash_update(&desc, sg, hdrlen); 369 + ahash_request_set_crypt(req, sg, NULL, hdrlen); 370 + err = crypto_ahash_update(req); 405 371 if (err) 406 372 goto out; 407 373 } 408 - err = crypto_hash_final(&desc, checksumdata); 374 + ahash_request_set_crypt(req, NULL, checksumdata, 0); 375 + err = crypto_ahash_final(req); 409 376 if (err) 410 377 goto out; 411 378 ··· 430 381 break; 431 382 } 432 383 out: 433 - crypto_free_hash(desc.tfm); 384 + ahash_request_free(req); 385 + crypto_free_ahash(tfm); 434 386 return err ? GSS_S_FAILURE : 0; 435 387 } 436 388 437 389 struct encryptor_desc { 438 390 u8 iv[GSS_KRB5_MAX_BLOCKSIZE]; 439 - struct blkcipher_desc desc; 391 + struct skcipher_request *req; 440 392 int pos; 441 393 struct xdr_buf *outbuf; 442 394 struct page **pages; ··· 452 402 { 453 403 struct encryptor_desc *desc = data; 454 404 struct xdr_buf *outbuf = desc->outbuf; 405 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(desc->req); 455 406 struct page *in_page; 456 407 int thislen = desc->fraglen + sg->length; 457 408 int fraglen, ret; ··· 478 427 desc->fraglen += sg->length; 479 428 desc->pos += sg->length; 480 429 481 - fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1); 430 + fraglen = thislen & (crypto_skcipher_blocksize(tfm) - 1); 482 431 thislen -= fraglen; 483 432 484 433 if (thislen == 0) ··· 487 436 sg_mark_end(&desc->infrags[desc->fragno - 1]); 488 437 sg_mark_end(&desc->outfrags[desc->fragno - 1]); 489 438 490 - ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags, 491 - desc->infrags, thislen); 439 + skcipher_request_set_crypt(desc->req, desc->infrags, desc->outfrags, 440 + thislen, desc->iv); 441 + 442 + ret = crypto_skcipher_encrypt(desc->req); 492 443 if (ret) 493 444 return ret; 494 445 ··· 512 459 } 513 460 514 461 int 515 - gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf, 462 + gss_encrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *buf, 516 463 int offset, struct page **pages) 517 464 { 518 465 int ret; 519 466 struct encryptor_desc desc; 467 + SKCIPHER_REQUEST_ON_STACK(req, tfm); 520 468 521 - BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0); 469 + BUG_ON((buf->len - offset) % crypto_skcipher_blocksize(tfm) != 0); 470 + 471 + skcipher_request_set_tfm(req, tfm); 472 + skcipher_request_set_callback(req, 0, NULL, NULL); 522 473 523 474 memset(desc.iv, 0, sizeof(desc.iv)); 524 - desc.desc.tfm = tfm; 525 - desc.desc.info = desc.iv; 526 - desc.desc.flags = 0; 475 + desc.req = req; 527 476 desc.pos = offset; 528 477 desc.outbuf = buf; 529 478 desc.pages = pages; ··· 536 481 sg_init_table(desc.outfrags, 4); 537 482 538 483 ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc); 484 + skcipher_request_zero(req); 539 485 return ret; 540 486 } 541 487 542 488 struct decryptor_desc { 543 489 u8 iv[GSS_KRB5_MAX_BLOCKSIZE]; 544 - struct blkcipher_desc desc; 490 + struct skcipher_request *req; 545 491 struct scatterlist frags[4]; 546 492 int fragno; 547 493 int fraglen; ··· 553 497 { 554 498 struct decryptor_desc *desc = data; 555 499 int thislen = desc->fraglen + sg->length; 500 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(desc->req); 556 501 int fraglen, ret; 557 502 558 503 /* Worst case is 4 fragments: head, end of page 1, start ··· 564 507 desc->fragno++; 565 508 desc->fraglen += sg->length; 566 509 567 - fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1); 510 + fraglen = thislen & (crypto_skcipher_blocksize(tfm) - 1); 568 511 thislen -= fraglen; 569 512 570 513 if (thislen == 0) ··· 572 515 573 516 sg_mark_end(&desc->frags[desc->fragno - 1]); 574 517 575 - ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags, 576 - desc->frags, thislen); 518 + skcipher_request_set_crypt(desc->req, desc->frags, desc->frags, 519 + thislen, desc->iv); 520 + 521 + ret = crypto_skcipher_decrypt(desc->req); 577 522 if (ret) 578 523 return ret; 579 524 ··· 594 535 } 595 536 596 537 int 597 - gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf, 538 + gss_decrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *buf, 598 539 int offset) 599 540 { 541 + int ret; 600 542 struct decryptor_desc desc; 543 + SKCIPHER_REQUEST_ON_STACK(req, tfm); 601 544 602 545 /* XXXJBF: */ 603 - BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0); 546 + BUG_ON((buf->len - offset) % crypto_skcipher_blocksize(tfm) != 0); 547 + 548 + skcipher_request_set_tfm(req, tfm); 549 + skcipher_request_set_callback(req, 0, NULL, NULL); 604 550 605 551 memset(desc.iv, 0, sizeof(desc.iv)); 606 - desc.desc.tfm = tfm; 607 - desc.desc.info = desc.iv; 608 - desc.desc.flags = 0; 552 + desc.req = req; 609 553 desc.fragno = 0; 610 554 desc.fraglen = 0; 611 555 612 556 sg_init_table(desc.frags, 4); 613 557 614 - return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc); 558 + ret = xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc); 559 + skcipher_request_zero(req); 560 + return ret; 615 561 } 616 562 617 563 /* ··· 658 594 } 659 595 660 596 static u32 661 - gss_krb5_cts_crypt(struct crypto_blkcipher *cipher, struct xdr_buf *buf, 597 + gss_krb5_cts_crypt(struct crypto_skcipher *cipher, struct xdr_buf *buf, 662 598 u32 offset, u8 *iv, struct page **pages, int encrypt) 663 599 { 664 600 u32 ret; 665 601 struct scatterlist sg[1]; 666 - struct blkcipher_desc desc = { .tfm = cipher, .info = iv }; 602 + SKCIPHER_REQUEST_ON_STACK(req, cipher); 667 603 u8 data[GSS_KRB5_MAX_BLOCKSIZE * 2]; 668 604 struct page **save_pages; 669 605 u32 len = buf->len - offset; ··· 689 625 690 626 sg_init_one(sg, data, len); 691 627 628 + skcipher_request_set_tfm(req, cipher); 629 + skcipher_request_set_callback(req, 0, NULL, NULL); 630 + skcipher_request_set_crypt(req, sg, sg, len, iv); 631 + 692 632 if (encrypt) 693 - ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, len); 633 + ret = crypto_skcipher_encrypt(req); 694 634 else 695 - ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, len); 635 + ret = crypto_skcipher_decrypt(req); 636 + 637 + skcipher_request_zero(req); 696 638 697 639 if (ret) 698 640 goto out; ··· 717 647 struct xdr_netobj hmac; 718 648 u8 *cksumkey; 719 649 u8 *ecptr; 720 - struct crypto_blkcipher *cipher, *aux_cipher; 650 + struct crypto_skcipher *cipher, *aux_cipher; 721 651 int blocksize; 722 652 struct page **save_pages; 723 653 int nblocks, nbytes; ··· 736 666 cksumkey = kctx->acceptor_integ; 737 667 usage = KG_USAGE_ACCEPTOR_SEAL; 738 668 } 739 - blocksize = crypto_blkcipher_blocksize(cipher); 669 + blocksize = crypto_skcipher_blocksize(cipher); 740 670 741 671 /* hide the gss token header and insert the confounder */ 742 672 offset += GSS_KRB5_TOK_HDR_LEN; ··· 789 719 memset(desc.iv, 0, sizeof(desc.iv)); 790 720 791 721 if (cbcbytes) { 722 + SKCIPHER_REQUEST_ON_STACK(req, aux_cipher); 723 + 792 724 desc.pos = offset + GSS_KRB5_TOK_HDR_LEN; 793 725 desc.fragno = 0; 794 726 desc.fraglen = 0; 795 727 desc.pages = pages; 796 728 desc.outbuf = buf; 797 - desc.desc.info = desc.iv; 798 - desc.desc.flags = 0; 799 - desc.desc.tfm = aux_cipher; 729 + desc.req = req; 730 + 731 + skcipher_request_set_tfm(req, aux_cipher); 732 + skcipher_request_set_callback(req, 0, NULL, NULL); 800 733 801 734 sg_init_table(desc.infrags, 4); 802 735 sg_init_table(desc.outfrags, 4); 803 736 804 737 err = xdr_process_buf(buf, offset + GSS_KRB5_TOK_HDR_LEN, 805 738 cbcbytes, encryptor, &desc); 739 + skcipher_request_zero(req); 806 740 if (err) 807 741 goto out_err; 808 742 } ··· 837 763 struct xdr_buf subbuf; 838 764 u32 ret = 0; 839 765 u8 *cksum_key; 840 - struct crypto_blkcipher *cipher, *aux_cipher; 766 + struct crypto_skcipher *cipher, *aux_cipher; 841 767 struct xdr_netobj our_hmac_obj; 842 768 u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN]; 843 769 u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN]; ··· 856 782 cksum_key = kctx->initiator_integ; 857 783 usage = KG_USAGE_INITIATOR_SEAL; 858 784 } 859 - blocksize = crypto_blkcipher_blocksize(cipher); 785 + blocksize = crypto_skcipher_blocksize(cipher); 860 786 861 787 862 788 /* create a segment skipping the header and leaving out the checksum */ ··· 873 799 memset(desc.iv, 0, sizeof(desc.iv)); 874 800 875 801 if (cbcbytes) { 802 + SKCIPHER_REQUEST_ON_STACK(req, aux_cipher); 803 + 876 804 desc.fragno = 0; 877 805 desc.fraglen = 0; 878 - desc.desc.info = desc.iv; 879 - desc.desc.flags = 0; 880 - desc.desc.tfm = aux_cipher; 806 + desc.req = req; 807 + 808 + skcipher_request_set_tfm(req, aux_cipher); 809 + skcipher_request_set_callback(req, 0, NULL, NULL); 881 810 882 811 sg_init_table(desc.frags, 4); 883 812 884 813 ret = xdr_process_buf(&subbuf, 0, cbcbytes, decryptor, &desc); 814 + skcipher_request_zero(req); 885 815 if (ret) 886 816 goto out_err; 887 817 } ··· 928 850 * Set the key of the given cipher. 929 851 */ 930 852 int 931 - krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher, 853 + krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher, 932 854 unsigned char *cksum) 933 855 { 934 - struct crypto_hash *hmac; 935 - struct hash_desc desc; 936 - struct scatterlist sg[1]; 856 + struct crypto_shash *hmac; 857 + struct shash_desc *desc; 937 858 u8 Kseq[GSS_KRB5_MAX_KEYLEN]; 938 859 u32 zeroconstant = 0; 939 860 int err; 940 861 941 862 dprintk("%s: entered\n", __func__); 942 863 943 - hmac = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); 864 + hmac = crypto_alloc_shash(kctx->gk5e->cksum_name, 0, 0); 944 865 if (IS_ERR(hmac)) { 945 866 dprintk("%s: error %ld, allocating hash '%s'\n", 946 867 __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name); 947 868 return PTR_ERR(hmac); 948 869 } 949 870 950 - desc.tfm = hmac; 951 - desc.flags = 0; 871 + desc = kmalloc(sizeof(*desc), GFP_KERNEL); 872 + if (!desc) { 873 + dprintk("%s: failed to allocate shash descriptor for '%s'\n", 874 + __func__, kctx->gk5e->cksum_name); 875 + crypto_free_shash(hmac); 876 + return -ENOMEM; 877 + } 952 878 953 - err = crypto_hash_init(&desc); 954 - if (err) 955 - goto out_err; 879 + desc->tfm = hmac; 880 + desc->flags = 0; 956 881 957 882 /* Compute intermediate Kseq from session key */ 958 - err = crypto_hash_setkey(hmac, kctx->Ksess, kctx->gk5e->keylength); 883 + err = crypto_shash_setkey(hmac, kctx->Ksess, kctx->gk5e->keylength); 959 884 if (err) 960 885 goto out_err; 961 886 962 - sg_init_one(sg, &zeroconstant, 4); 963 - err = crypto_hash_digest(&desc, sg, 4, Kseq); 887 + err = crypto_shash_digest(desc, (u8 *)&zeroconstant, 4, Kseq); 964 888 if (err) 965 889 goto out_err; 966 890 967 891 /* Compute final Kseq from the checksum and intermediate Kseq */ 968 - err = crypto_hash_setkey(hmac, Kseq, kctx->gk5e->keylength); 892 + err = crypto_shash_setkey(hmac, Kseq, kctx->gk5e->keylength); 969 893 if (err) 970 894 goto out_err; 971 895 972 - sg_set_buf(sg, cksum, 8); 973 - 974 - err = crypto_hash_digest(&desc, sg, 8, Kseq); 896 + err = crypto_shash_digest(desc, cksum, 8, Kseq); 975 897 if (err) 976 898 goto out_err; 977 899 978 - err = crypto_blkcipher_setkey(cipher, Kseq, kctx->gk5e->keylength); 900 + err = crypto_skcipher_setkey(cipher, Kseq, kctx->gk5e->keylength); 979 901 if (err) 980 902 goto out_err; 981 903 982 904 err = 0; 983 905 984 906 out_err: 985 - crypto_free_hash(hmac); 907 + kzfree(desc); 908 + crypto_free_shash(hmac); 986 909 dprintk("%s: returning %d\n", __func__, err); 987 910 return err; 988 911 } ··· 993 914 * Set the key of cipher kctx->enc. 994 915 */ 995 916 int 996 - krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher, 917 + krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher, 997 918 s32 seqnum) 998 919 { 999 - struct crypto_hash *hmac; 1000 - struct hash_desc desc; 1001 - struct scatterlist sg[1]; 920 + struct crypto_shash *hmac; 921 + struct shash_desc *desc; 1002 922 u8 Kcrypt[GSS_KRB5_MAX_KEYLEN]; 1003 923 u8 zeroconstant[4] = {0}; 1004 924 u8 seqnumarray[4]; ··· 1005 927 1006 928 dprintk("%s: entered, seqnum %u\n", __func__, seqnum); 1007 929 1008 - hmac = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); 930 + hmac = crypto_alloc_shash(kctx->gk5e->cksum_name, 0, 0); 1009 931 if (IS_ERR(hmac)) { 1010 932 dprintk("%s: error %ld, allocating hash '%s'\n", 1011 933 __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name); 1012 934 return PTR_ERR(hmac); 1013 935 } 1014 936 1015 - desc.tfm = hmac; 1016 - desc.flags = 0; 937 + desc = kmalloc(sizeof(*desc), GFP_KERNEL); 938 + if (!desc) { 939 + dprintk("%s: failed to allocate shash descriptor for '%s'\n", 940 + __func__, kctx->gk5e->cksum_name); 941 + crypto_free_shash(hmac); 942 + return -ENOMEM; 943 + } 1017 944 1018 - err = crypto_hash_init(&desc); 1019 - if (err) 1020 - goto out_err; 945 + desc->tfm = hmac; 946 + desc->flags = 0; 1021 947 1022 948 /* Compute intermediate Kcrypt from session key */ 1023 949 for (i = 0; i < kctx->gk5e->keylength; i++) 1024 950 Kcrypt[i] = kctx->Ksess[i] ^ 0xf0; 1025 951 1026 - err = crypto_hash_setkey(hmac, Kcrypt, kctx->gk5e->keylength); 952 + err = crypto_shash_setkey(hmac, Kcrypt, kctx->gk5e->keylength); 1027 953 if (err) 1028 954 goto out_err; 1029 955 1030 - sg_init_one(sg, zeroconstant, 4); 1031 - err = crypto_hash_digest(&desc, sg, 4, Kcrypt); 956 + err = crypto_shash_digest(desc, zeroconstant, 4, Kcrypt); 1032 957 if (err) 1033 958 goto out_err; 1034 959 1035 960 /* Compute final Kcrypt from the seqnum and intermediate Kcrypt */ 1036 - err = crypto_hash_setkey(hmac, Kcrypt, kctx->gk5e->keylength); 961 + err = crypto_shash_setkey(hmac, Kcrypt, kctx->gk5e->keylength); 1037 962 if (err) 1038 963 goto out_err; 1039 964 ··· 1045 964 seqnumarray[2] = (unsigned char) ((seqnum >> 8) & 0xff); 1046 965 seqnumarray[3] = (unsigned char) ((seqnum >> 0) & 0xff); 1047 966 1048 - sg_set_buf(sg, seqnumarray, 4); 1049 - 1050 - err = crypto_hash_digest(&desc, sg, 4, Kcrypt); 967 + err = crypto_shash_digest(desc, seqnumarray, 4, Kcrypt); 1051 968 if (err) 1052 969 goto out_err; 1053 970 1054 - err = crypto_blkcipher_setkey(cipher, Kcrypt, kctx->gk5e->keylength); 971 + err = crypto_skcipher_setkey(cipher, Kcrypt, kctx->gk5e->keylength); 1055 972 if (err) 1056 973 goto out_err; 1057 974 1058 975 err = 0; 1059 976 1060 977 out_err: 1061 - crypto_free_hash(hmac); 978 + kzfree(desc); 979 + crypto_free_shash(hmac); 1062 980 dprintk("%s: returning %d\n", __func__, err); 1063 981 return err; 1064 982 }
+6 -6
net/sunrpc/auth_gss/gss_krb5_keys.c
··· 54 54 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 55 55 */ 56 56 57 + #include <crypto/skcipher.h> 57 58 #include <linux/err.h> 58 59 #include <linux/types.h> 59 - #include <linux/crypto.h> 60 60 #include <linux/sunrpc/gss_krb5.h> 61 61 #include <linux/sunrpc/xdr.h> 62 62 #include <linux/lcm.h> ··· 147 147 size_t blocksize, keybytes, keylength, n; 148 148 unsigned char *inblockdata, *outblockdata, *rawkey; 149 149 struct xdr_netobj inblock, outblock; 150 - struct crypto_blkcipher *cipher; 150 + struct crypto_skcipher *cipher; 151 151 u32 ret = EINVAL; 152 152 153 153 blocksize = gk5e->blocksize; ··· 157 157 if ((inkey->len != keylength) || (outkey->len != keylength)) 158 158 goto err_return; 159 159 160 - cipher = crypto_alloc_blkcipher(gk5e->encrypt_name, 0, 161 - CRYPTO_ALG_ASYNC); 160 + cipher = crypto_alloc_skcipher(gk5e->encrypt_name, 0, 161 + CRYPTO_ALG_ASYNC); 162 162 if (IS_ERR(cipher)) 163 163 goto err_return; 164 - if (crypto_blkcipher_setkey(cipher, inkey->data, inkey->len)) 164 + if (crypto_skcipher_setkey(cipher, inkey->data, inkey->len)) 165 165 goto err_return; 166 166 167 167 /* allocate and set up buffers */ ··· 238 238 memset(inblockdata, 0, blocksize); 239 239 kfree(inblockdata); 240 240 err_free_cipher: 241 - crypto_free_blkcipher(cipher); 241 + crypto_free_skcipher(cipher); 242 242 err_return: 243 243 return ret; 244 244 }
+46 -43
net/sunrpc/auth_gss/gss_krb5_mech.c
··· 34 34 * 35 35 */ 36 36 37 + #include <crypto/hash.h> 38 + #include <crypto/skcipher.h> 37 39 #include <linux/err.h> 38 40 #include <linux/module.h> 39 41 #include <linux/init.h> ··· 44 42 #include <linux/sunrpc/auth.h> 45 43 #include <linux/sunrpc/gss_krb5.h> 46 44 #include <linux/sunrpc/xdr.h> 47 - #include <linux/crypto.h> 48 45 #include <linux/sunrpc/gss_krb5_enctypes.h> 49 46 50 47 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) ··· 218 217 219 218 static inline const void * 220 219 get_key(const void *p, const void *end, 221 - struct krb5_ctx *ctx, struct crypto_blkcipher **res) 220 + struct krb5_ctx *ctx, struct crypto_skcipher **res) 222 221 { 223 222 struct xdr_netobj key; 224 223 int alg; ··· 246 245 if (IS_ERR(p)) 247 246 goto out_err; 248 247 249 - *res = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0, 248 + *res = crypto_alloc_skcipher(ctx->gk5e->encrypt_name, 0, 250 249 CRYPTO_ALG_ASYNC); 251 250 if (IS_ERR(*res)) { 252 251 printk(KERN_WARNING "gss_kerberos_mech: unable to initialize " ··· 254 253 *res = NULL; 255 254 goto out_err_free_key; 256 255 } 257 - if (crypto_blkcipher_setkey(*res, key.data, key.len)) { 256 + if (crypto_skcipher_setkey(*res, key.data, key.len)) { 258 257 printk(KERN_WARNING "gss_kerberos_mech: error setting key for " 259 258 "crypto algorithm %s\n", ctx->gk5e->encrypt_name); 260 259 goto out_err_free_tfm; ··· 264 263 return p; 265 264 266 265 out_err_free_tfm: 267 - crypto_free_blkcipher(*res); 266 + crypto_free_skcipher(*res); 268 267 out_err_free_key: 269 268 kfree(key.data); 270 269 p = ERR_PTR(-EINVAL); ··· 336 335 return 0; 337 336 338 337 out_err_free_key2: 339 - crypto_free_blkcipher(ctx->seq); 338 + crypto_free_skcipher(ctx->seq); 340 339 out_err_free_key1: 341 - crypto_free_blkcipher(ctx->enc); 340 + crypto_free_skcipher(ctx->enc); 342 341 out_err_free_mech: 343 342 kfree(ctx->mech_used.data); 344 343 out_err: 345 344 return PTR_ERR(p); 346 345 } 347 346 348 - static struct crypto_blkcipher * 347 + static struct crypto_skcipher * 349 348 context_v2_alloc_cipher(struct krb5_ctx *ctx, const char *cname, u8 *key) 350 349 { 351 - struct crypto_blkcipher *cp; 350 + struct crypto_skcipher *cp; 352 351 353 - cp = crypto_alloc_blkcipher(cname, 0, CRYPTO_ALG_ASYNC); 352 + cp = crypto_alloc_skcipher(cname, 0, CRYPTO_ALG_ASYNC); 354 353 if (IS_ERR(cp)) { 355 354 dprintk("gss_kerberos_mech: unable to initialize " 356 355 "crypto algorithm %s\n", cname); 357 356 return NULL; 358 357 } 359 - if (crypto_blkcipher_setkey(cp, key, ctx->gk5e->keylength)) { 358 + if (crypto_skcipher_setkey(cp, key, ctx->gk5e->keylength)) { 360 359 dprintk("gss_kerberos_mech: error setting key for " 361 360 "crypto algorithm %s\n", cname); 362 - crypto_free_blkcipher(cp); 361 + crypto_free_skcipher(cp); 363 362 return NULL; 364 363 } 365 364 return cp; ··· 413 412 return 0; 414 413 415 414 out_free_enc: 416 - crypto_free_blkcipher(ctx->enc); 415 + crypto_free_skcipher(ctx->enc); 417 416 out_free_seq: 418 - crypto_free_blkcipher(ctx->seq); 417 + crypto_free_skcipher(ctx->seq); 419 418 out_err: 420 419 return -EINVAL; 421 420 } ··· 428 427 static int 429 428 context_derive_keys_rc4(struct krb5_ctx *ctx) 430 429 { 431 - struct crypto_hash *hmac; 430 + struct crypto_shash *hmac; 432 431 char sigkeyconstant[] = "signaturekey"; 433 432 int slen = strlen(sigkeyconstant) + 1; /* include null terminator */ 434 - struct hash_desc desc; 435 - struct scatterlist sg[1]; 433 + struct shash_desc *desc; 436 434 int err; 437 435 438 436 dprintk("RPC: %s: entered\n", __func__); 439 437 /* 440 438 * derive cksum (aka Ksign) key 441 439 */ 442 - hmac = crypto_alloc_hash(ctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); 440 + hmac = crypto_alloc_shash(ctx->gk5e->cksum_name, 0, 0); 443 441 if (IS_ERR(hmac)) { 444 442 dprintk("%s: error %ld allocating hash '%s'\n", 445 443 __func__, PTR_ERR(hmac), ctx->gk5e->cksum_name); ··· 446 446 goto out_err; 447 447 } 448 448 449 - err = crypto_hash_setkey(hmac, ctx->Ksess, ctx->gk5e->keylength); 449 + err = crypto_shash_setkey(hmac, ctx->Ksess, ctx->gk5e->keylength); 450 450 if (err) 451 451 goto out_err_free_hmac; 452 452 453 - sg_init_table(sg, 1); 454 - sg_set_buf(sg, sigkeyconstant, slen); 455 453 456 - desc.tfm = hmac; 457 - desc.flags = 0; 458 - 459 - err = crypto_hash_init(&desc); 460 - if (err) 454 + desc = kmalloc(sizeof(*desc), GFP_KERNEL); 455 + if (!desc) { 456 + dprintk("%s: failed to allocate hash descriptor for '%s'\n", 457 + __func__, ctx->gk5e->cksum_name); 458 + err = -ENOMEM; 461 459 goto out_err_free_hmac; 460 + } 462 461 463 - err = crypto_hash_digest(&desc, sg, slen, ctx->cksum); 462 + desc->tfm = hmac; 463 + desc->flags = 0; 464 + 465 + err = crypto_shash_digest(desc, sigkeyconstant, slen, ctx->cksum); 466 + kzfree(desc); 464 467 if (err) 465 468 goto out_err_free_hmac; 466 469 /* 467 - * allocate hash, and blkciphers for data and seqnum encryption 470 + * allocate hash, and skciphers for data and seqnum encryption 468 471 */ 469 - ctx->enc = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0, 470 - CRYPTO_ALG_ASYNC); 472 + ctx->enc = crypto_alloc_skcipher(ctx->gk5e->encrypt_name, 0, 473 + CRYPTO_ALG_ASYNC); 471 474 if (IS_ERR(ctx->enc)) { 472 475 err = PTR_ERR(ctx->enc); 473 476 goto out_err_free_hmac; 474 477 } 475 478 476 - ctx->seq = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0, 477 - CRYPTO_ALG_ASYNC); 479 + ctx->seq = crypto_alloc_skcipher(ctx->gk5e->encrypt_name, 0, 480 + CRYPTO_ALG_ASYNC); 478 481 if (IS_ERR(ctx->seq)) { 479 - crypto_free_blkcipher(ctx->enc); 482 + crypto_free_skcipher(ctx->enc); 480 483 err = PTR_ERR(ctx->seq); 481 484 goto out_err_free_hmac; 482 485 } ··· 489 486 err = 0; 490 487 491 488 out_err_free_hmac: 492 - crypto_free_hash(hmac); 489 + crypto_free_shash(hmac); 493 490 out_err: 494 491 dprintk("RPC: %s: returning %d\n", __func__, err); 495 492 return err; ··· 591 588 context_v2_alloc_cipher(ctx, "cbc(aes)", 592 589 ctx->acceptor_seal); 593 590 if (ctx->acceptor_enc_aux == NULL) { 594 - crypto_free_blkcipher(ctx->initiator_enc_aux); 591 + crypto_free_skcipher(ctx->initiator_enc_aux); 595 592 goto out_free_acceptor_enc; 596 593 } 597 594 } ··· 599 596 return 0; 600 597 601 598 out_free_acceptor_enc: 602 - crypto_free_blkcipher(ctx->acceptor_enc); 599 + crypto_free_skcipher(ctx->acceptor_enc); 603 600 out_free_initiator_enc: 604 - crypto_free_blkcipher(ctx->initiator_enc); 601 + crypto_free_skcipher(ctx->initiator_enc); 605 602 out_err: 606 603 return -EINVAL; 607 604 } ··· 713 710 gss_delete_sec_context_kerberos(void *internal_ctx) { 714 711 struct krb5_ctx *kctx = internal_ctx; 715 712 716 - crypto_free_blkcipher(kctx->seq); 717 - crypto_free_blkcipher(kctx->enc); 718 - crypto_free_blkcipher(kctx->acceptor_enc); 719 - crypto_free_blkcipher(kctx->initiator_enc); 720 - crypto_free_blkcipher(kctx->acceptor_enc_aux); 721 - crypto_free_blkcipher(kctx->initiator_enc_aux); 713 + crypto_free_skcipher(kctx->seq); 714 + crypto_free_skcipher(kctx->enc); 715 + crypto_free_skcipher(kctx->acceptor_enc); 716 + crypto_free_skcipher(kctx->initiator_enc); 717 + crypto_free_skcipher(kctx->acceptor_enc_aux); 718 + crypto_free_skcipher(kctx->initiator_enc_aux); 722 719 kfree(kctx->mech_used.data); 723 720 kfree(kctx); 724 721 }
+11 -11
net/sunrpc/auth_gss/gss_krb5_seqnum.c
··· 31 31 * PERFORMANCE OF THIS SOFTWARE. 32 32 */ 33 33 34 + #include <crypto/skcipher.h> 34 35 #include <linux/types.h> 35 36 #include <linux/sunrpc/gss_krb5.h> 36 - #include <linux/crypto.h> 37 37 38 38 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 39 39 # define RPCDBG_FACILITY RPCDBG_AUTH ··· 43 43 krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum, 44 44 unsigned char *cksum, unsigned char *buf) 45 45 { 46 - struct crypto_blkcipher *cipher; 46 + struct crypto_skcipher *cipher; 47 47 unsigned char plain[8]; 48 48 s32 code; 49 49 50 50 dprintk("RPC: %s:\n", __func__); 51 - cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0, 52 - CRYPTO_ALG_ASYNC); 51 + cipher = crypto_alloc_skcipher(kctx->gk5e->encrypt_name, 0, 52 + CRYPTO_ALG_ASYNC); 53 53 if (IS_ERR(cipher)) 54 54 return PTR_ERR(cipher); 55 55 ··· 68 68 69 69 code = krb5_encrypt(cipher, cksum, plain, buf, 8); 70 70 out: 71 - crypto_free_blkcipher(cipher); 71 + crypto_free_skcipher(cipher); 72 72 return code; 73 73 } 74 74 s32 75 75 krb5_make_seq_num(struct krb5_ctx *kctx, 76 - struct crypto_blkcipher *key, 76 + struct crypto_skcipher *key, 77 77 int direction, 78 78 u32 seqnum, 79 79 unsigned char *cksum, unsigned char *buf) ··· 101 101 krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum, 102 102 unsigned char *buf, int *direction, s32 *seqnum) 103 103 { 104 - struct crypto_blkcipher *cipher; 104 + struct crypto_skcipher *cipher; 105 105 unsigned char plain[8]; 106 106 s32 code; 107 107 108 108 dprintk("RPC: %s:\n", __func__); 109 - cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0, 110 - CRYPTO_ALG_ASYNC); 109 + cipher = crypto_alloc_skcipher(kctx->gk5e->encrypt_name, 0, 110 + CRYPTO_ALG_ASYNC); 111 111 if (IS_ERR(cipher)) 112 112 return PTR_ERR(cipher); 113 113 ··· 130 130 *seqnum = ((plain[0] << 24) | (plain[1] << 16) | 131 131 (plain[2] << 8) | (plain[3])); 132 132 out: 133 - crypto_free_blkcipher(cipher); 133 + crypto_free_skcipher(cipher); 134 134 return code; 135 135 } 136 136 ··· 142 142 { 143 143 s32 code; 144 144 unsigned char plain[8]; 145 - struct crypto_blkcipher *key = kctx->seq; 145 + struct crypto_skcipher *key = kctx->seq; 146 146 147 147 dprintk("RPC: krb5_get_seq_num:\n"); 148 148
+12 -12
net/sunrpc/auth_gss/gss_krb5_wrap.c
··· 28 28 * SUCH DAMAGES. 29 29 */ 30 30 31 + #include <crypto/skcipher.h> 31 32 #include <linux/types.h> 32 33 #include <linux/jiffies.h> 33 34 #include <linux/sunrpc/gss_krb5.h> 34 35 #include <linux/random.h> 35 36 #include <linux/pagemap.h> 36 - #include <linux/crypto.h> 37 37 38 38 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 39 39 # define RPCDBG_FACILITY RPCDBG_AUTH ··· 174 174 175 175 now = get_seconds(); 176 176 177 - blocksize = crypto_blkcipher_blocksize(kctx->enc); 177 + blocksize = crypto_skcipher_blocksize(kctx->enc); 178 178 gss_krb5_add_padding(buf, offset, blocksize); 179 179 BUG_ON((buf->len - offset) % blocksize); 180 180 plainlen = conflen + buf->len - offset; ··· 239 239 return GSS_S_FAILURE; 240 240 241 241 if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) { 242 - struct crypto_blkcipher *cipher; 242 + struct crypto_skcipher *cipher; 243 243 int err; 244 - cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0, 245 - CRYPTO_ALG_ASYNC); 244 + cipher = crypto_alloc_skcipher(kctx->gk5e->encrypt_name, 0, 245 + CRYPTO_ALG_ASYNC); 246 246 if (IS_ERR(cipher)) 247 247 return GSS_S_FAILURE; 248 248 ··· 250 250 251 251 err = gss_encrypt_xdr_buf(cipher, buf, 252 252 offset + headlen - conflen, pages); 253 - crypto_free_blkcipher(cipher); 253 + crypto_free_skcipher(cipher); 254 254 if (err) 255 255 return GSS_S_FAILURE; 256 256 } else { ··· 327 327 return GSS_S_BAD_SIG; 328 328 329 329 if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) { 330 - struct crypto_blkcipher *cipher; 330 + struct crypto_skcipher *cipher; 331 331 int err; 332 332 333 - cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0, 334 - CRYPTO_ALG_ASYNC); 333 + cipher = crypto_alloc_skcipher(kctx->gk5e->encrypt_name, 0, 334 + CRYPTO_ALG_ASYNC); 335 335 if (IS_ERR(cipher)) 336 336 return GSS_S_FAILURE; 337 337 338 338 krb5_rc4_setup_enc_key(kctx, cipher, seqnum); 339 339 340 340 err = gss_decrypt_xdr_buf(cipher, buf, crypt_offset); 341 - crypto_free_blkcipher(cipher); 341 + crypto_free_skcipher(cipher); 342 342 if (err) 343 343 return GSS_S_DEFECTIVE_TOKEN; 344 344 } else { ··· 371 371 /* Copy the data back to the right position. XXX: Would probably be 372 372 * better to copy and encrypt at the same time. */ 373 373 374 - blocksize = crypto_blkcipher_blocksize(kctx->enc); 374 + blocksize = crypto_skcipher_blocksize(kctx->enc); 375 375 data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) + 376 376 conflen; 377 377 orig_start = buf->head[0].iov_base + offset; ··· 473 473 *ptr++ = 0xff; 474 474 be16ptr = (__be16 *)ptr; 475 475 476 - blocksize = crypto_blkcipher_blocksize(kctx->acceptor_enc); 476 + blocksize = crypto_skcipher_blocksize(kctx->acceptor_enc); 477 477 *be16ptr++ = 0; 478 478 /* "inner" token header always uses 0 for RRC */ 479 479 *be16ptr++ = 0;