n2_crypto: Plumb fallback ahash requests properly.

Do this by putting the async fallback request at the end of an n2
specific ahash request context, then properly adjusting the request
private size in our ahash ->cra_init().

We also need to put the writable state bits into the n2 request
private instead of the n2 cra_ctx.

With help from Herbert Xu.

Signed-off-by: David S. Miller <davem@davemloft.net>

+49 -48
+49 -48
drivers/crypto/n2_core.c
··· 251 struct n2_hash_ctx { 252 struct n2_base_ctx base; 253 254 - struct crypto_ahash *fallback; 255 256 - /* These next three members must match the layout created by 257 - * crypto_init_shash_ops_async. This allows us to properly 258 - * plumb requests we can't do in hardware down to the fallback 259 - * operation, providing all of the data structures and layouts 260 - * expected by those paths. 261 - */ 262 - struct ahash_request fallback_req; 263 - struct shash_desc fallback_desc; 264 union { 265 struct md5_state md5; 266 struct sha1_state sha1; ··· 263 264 unsigned char hash_key[64]; 265 unsigned char keyed_zero_hash[32]; 266 }; 267 268 static int n2_hash_async_init(struct ahash_request *req) 269 { 270 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 271 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 272 273 - ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); 274 - ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 275 276 - return crypto_ahash_init(&ctx->fallback_req); 277 } 278 279 static int n2_hash_async_update(struct ahash_request *req) 280 { 281 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 282 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 283 284 - ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); 285 - ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 286 - ctx->fallback_req.nbytes = req->nbytes; 287 - ctx->fallback_req.src = req->src; 288 289 - return crypto_ahash_update(&ctx->fallback_req); 290 } 291 292 static int n2_hash_async_final(struct ahash_request *req) 293 { 294 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 295 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 296 297 - ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); 298 - ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 299 - ctx->fallback_req.result = req->result; 300 301 - return crypto_ahash_final(&ctx->fallback_req); 302 } 303 304 static int n2_hash_async_finup(struct ahash_request *req) 305 { 306 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 307 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 308 309 - ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); 310 - ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 311 - ctx->fallback_req.nbytes = req->nbytes; 312 - ctx->fallback_req.src = req->src; 313 - ctx->fallback_req.result = req->result; 314 315 - return crypto_ahash_finup(&ctx->fallback_req); 316 } 317 318 static int n2_hash_cra_init(struct crypto_tfm *tfm) ··· 338 goto out; 339 } 340 341 - ctx->fallback = fallback_tfm; 342 return 0; 343 344 out: ··· 353 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 354 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); 355 356 - crypto_free_ahash(ctx->fallback); 357 } 358 359 static unsigned long wait_for_tail(struct spu_queue *qp) ··· 402 * exceed 2^16. 403 */ 404 if (unlikely(req->nbytes > (1 << 16))) { 405 - ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); 406 - ctx->fallback_req.base.flags = 407 - req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 408 - ctx->fallback_req.nbytes = req->nbytes; 409 - ctx->fallback_req.src = req->src; 410 - ctx->fallback_req.result = req->result; 411 412 - return crypto_ahash_digest(&ctx->fallback_req); 413 } 414 415 n2_base_ctx_init(&ctx->base); ··· 477 478 static int n2_md5_async_digest(struct ahash_request *req) 479 { 480 - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 481 - struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 482 - struct md5_state *m = &ctx->u.md5; 483 484 if (unlikely(req->nbytes == 0)) { 485 static const char md5_zero[MD5_DIGEST_SIZE] = { ··· 501 502 static int n2_sha1_async_digest(struct ahash_request *req) 503 { 504 - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 505 - struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 506 - struct sha1_state *s = &ctx->u.sha1; 507 508 if (unlikely(req->nbytes == 0)) { 509 static const char sha1_zero[SHA1_DIGEST_SIZE] = { ··· 527 528 static int n2_sha256_async_digest(struct ahash_request *req) 529 { 530 - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 531 - struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 532 - struct sha256_state *s = &ctx->u.sha256; 533 534 if (req->nbytes == 0) { 535 static const char sha256_zero[SHA256_DIGEST_SIZE] = { ··· 557 558 static int n2_sha224_async_digest(struct ahash_request *req) 559 { 560 - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 561 - struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 562 - struct sha256_state *s = &ctx->u.sha256; 563 564 if (req->nbytes == 0) { 565 static const char sha224_zero[SHA224_DIGEST_SIZE] = {
··· 251 struct n2_hash_ctx { 252 struct n2_base_ctx base; 253 254 + struct crypto_ahash *fallback_tfm; 255 + }; 256 257 + struct n2_hash_req_ctx { 258 union { 259 struct md5_state md5; 260 struct sha1_state sha1; ··· 269 270 unsigned char hash_key[64]; 271 unsigned char keyed_zero_hash[32]; 272 + 273 + struct ahash_request fallback_req; 274 }; 275 276 static int n2_hash_async_init(struct ahash_request *req) 277 { 278 + struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 279 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 280 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 281 282 + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 283 + rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 284 285 + return crypto_ahash_init(&rctx->fallback_req); 286 } 287 288 static int n2_hash_async_update(struct ahash_request *req) 289 { 290 + struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 291 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 292 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 293 294 + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 295 + rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 296 + rctx->fallback_req.nbytes = req->nbytes; 297 + rctx->fallback_req.src = req->src; 298 299 + return crypto_ahash_update(&rctx->fallback_req); 300 } 301 302 static int n2_hash_async_final(struct ahash_request *req) 303 { 304 + struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 305 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 306 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 307 308 + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 309 + rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 310 + rctx->fallback_req.result = req->result; 311 312 + return crypto_ahash_final(&rctx->fallback_req); 313 } 314 315 static int n2_hash_async_finup(struct ahash_request *req) 316 { 317 + struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 318 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 319 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 320 321 + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 322 + rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 323 + rctx->fallback_req.nbytes = req->nbytes; 324 + rctx->fallback_req.src = req->src; 325 + rctx->fallback_req.result = req->result; 326 327 + return crypto_ahash_finup(&rctx->fallback_req); 328 } 329 330 static int n2_hash_cra_init(struct crypto_tfm *tfm) ··· 338 goto out; 339 } 340 341 + crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) + 342 + crypto_ahash_reqsize(fallback_tfm))); 343 + 344 + ctx->fallback_tfm = fallback_tfm; 345 return 0; 346 347 out: ··· 350 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 351 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); 352 353 + crypto_free_ahash(ctx->fallback_tfm); 354 } 355 356 static unsigned long wait_for_tail(struct spu_queue *qp) ··· 399 * exceed 2^16. 400 */ 401 if (unlikely(req->nbytes > (1 << 16))) { 402 + struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 403 404 + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 405 + rctx->fallback_req.base.flags = 406 + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 407 + rctx->fallback_req.nbytes = req->nbytes; 408 + rctx->fallback_req.src = req->src; 409 + rctx->fallback_req.result = req->result; 410 + 411 + return crypto_ahash_digest(&rctx->fallback_req); 412 } 413 414 n2_base_ctx_init(&ctx->base); ··· 472 473 static int n2_md5_async_digest(struct ahash_request *req) 474 { 475 + struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 476 + struct md5_state *m = &rctx->u.md5; 477 478 if (unlikely(req->nbytes == 0)) { 479 static const char md5_zero[MD5_DIGEST_SIZE] = { ··· 497 498 static int n2_sha1_async_digest(struct ahash_request *req) 499 { 500 + struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 501 + struct sha1_state *s = &rctx->u.sha1; 502 503 if (unlikely(req->nbytes == 0)) { 504 static const char sha1_zero[SHA1_DIGEST_SIZE] = { ··· 524 525 static int n2_sha256_async_digest(struct ahash_request *req) 526 { 527 + struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 528 + struct sha256_state *s = &rctx->u.sha256; 529 530 if (req->nbytes == 0) { 531 static const char sha256_zero[SHA256_DIGEST_SIZE] = { ··· 555 556 static int n2_sha224_async_digest(struct ahash_request *req) 557 { 558 + struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 559 + struct sha256_state *s = &rctx->u.sha256; 560 561 if (req->nbytes == 0) { 562 static const char sha224_zero[SHA224_DIGEST_SIZE] = {