n2_crypto: Plumb fallback ahash requests properly.

Do this by putting the async fallback request at the end of an n2
specific ahash request context, then properly adjusting the request
private size in our ahash ->cra_init().

We also need to put the writable state bits into the n2 request
private instead of the n2 cra_ctx.

With help from Herbert Xu.

Signed-off-by: David S. Miller <davem@davemloft.net>

+49 -48
+49 -48
drivers/crypto/n2_core.c
··· 251 251 struct n2_hash_ctx { 252 252 struct n2_base_ctx base; 253 253 254 - struct crypto_ahash *fallback; 254 + struct crypto_ahash *fallback_tfm; 255 + }; 255 256 256 - /* These next three members must match the layout created by 257 - * crypto_init_shash_ops_async. This allows us to properly 258 - * plumb requests we can't do in hardware down to the fallback 259 - * operation, providing all of the data structures and layouts 260 - * expected by those paths. 261 - */ 262 - struct ahash_request fallback_req; 263 - struct shash_desc fallback_desc; 257 + struct n2_hash_req_ctx { 264 258 union { 265 259 struct md5_state md5; 266 260 struct sha1_state sha1; ··· 263 269 264 270 unsigned char hash_key[64]; 265 271 unsigned char keyed_zero_hash[32]; 272 + 273 + struct ahash_request fallback_req; 266 274 }; 267 275 268 276 static int n2_hash_async_init(struct ahash_request *req) 269 277 { 278 + struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 270 279 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 271 280 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 272 281 273 - ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); 274 - ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 282 + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 283 + rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 275 284 276 - return crypto_ahash_init(&ctx->fallback_req); 285 + return crypto_ahash_init(&rctx->fallback_req); 277 286 } 278 287 279 288 static int n2_hash_async_update(struct ahash_request *req) 280 289 { 290 + struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 281 291 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 282 292 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 283 293 284 - ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); 285 - ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 286 - ctx->fallback_req.nbytes = req->nbytes; 287 - ctx->fallback_req.src = req->src; 294 + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 295 + rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 296 + rctx->fallback_req.nbytes = req->nbytes; 297 + rctx->fallback_req.src = req->src; 288 298 289 - return crypto_ahash_update(&ctx->fallback_req); 299 + return crypto_ahash_update(&rctx->fallback_req); 290 300 } 291 301 292 302 static int n2_hash_async_final(struct ahash_request *req) 293 303 { 304 + struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 294 305 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 295 306 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 296 307 297 - ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); 298 - ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 299 - ctx->fallback_req.result = req->result; 308 + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 309 + rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 310 + rctx->fallback_req.result = req->result; 300 311 301 - return crypto_ahash_final(&ctx->fallback_req); 312 + return crypto_ahash_final(&rctx->fallback_req); 302 313 } 303 314 304 315 static int n2_hash_async_finup(struct ahash_request *req) 305 316 { 317 + struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 306 318 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 307 319 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 308 320 309 - ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); 310 - ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 311 - ctx->fallback_req.nbytes = req->nbytes; 312 - ctx->fallback_req.src = req->src; 313 - ctx->fallback_req.result = req->result; 321 + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 322 + rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 323 + rctx->fallback_req.nbytes = req->nbytes; 324 + rctx->fallback_req.src = req->src; 325 + rctx->fallback_req.result = req->result; 314 326 315 - return crypto_ahash_finup(&ctx->fallback_req); 327 + return crypto_ahash_finup(&rctx->fallback_req); 316 328 } 317 329 318 330 static int n2_hash_cra_init(struct crypto_tfm *tfm) ··· 338 338 goto out; 339 339 } 340 340 341 - ctx->fallback = fallback_tfm; 341 + crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) + 342 + crypto_ahash_reqsize(fallback_tfm))); 343 + 344 + ctx->fallback_tfm = fallback_tfm; 342 345 return 0; 343 346 344 347 out: ··· 353 350 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 354 351 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); 355 352 356 - crypto_free_ahash(ctx->fallback); 353 + crypto_free_ahash(ctx->fallback_tfm); 357 354 } 358 355 359 356 static unsigned long wait_for_tail(struct spu_queue *qp) ··· 402 399 * exceed 2^16. 403 400 */ 404 401 if (unlikely(req->nbytes > (1 << 16))) { 405 - ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); 406 - ctx->fallback_req.base.flags = 407 - req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 408 - ctx->fallback_req.nbytes = req->nbytes; 409 - ctx->fallback_req.src = req->src; 410 - ctx->fallback_req.result = req->result; 402 + struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 411 403 412 - return crypto_ahash_digest(&ctx->fallback_req); 404 + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 405 + rctx->fallback_req.base.flags = 406 + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 407 + rctx->fallback_req.nbytes = req->nbytes; 408 + rctx->fallback_req.src = req->src; 409 + rctx->fallback_req.result = req->result; 410 + 411 + return crypto_ahash_digest(&rctx->fallback_req); 413 412 } 414 413 415 414 n2_base_ctx_init(&ctx->base); ··· 477 472 478 473 static int n2_md5_async_digest(struct ahash_request *req) 479 474 { 480 - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 481 - struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 482 - struct md5_state *m = &ctx->u.md5; 475 + struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 476 + struct md5_state *m = &rctx->u.md5; 483 477 484 478 if (unlikely(req->nbytes == 0)) { 485 479 static const char md5_zero[MD5_DIGEST_SIZE] = { ··· 501 497 502 498 static int n2_sha1_async_digest(struct ahash_request *req) 503 499 { 504 - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 505 - struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 506 - struct sha1_state *s = &ctx->u.sha1; 500 + struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 501 + struct sha1_state *s = &rctx->u.sha1; 507 502 508 503 if (unlikely(req->nbytes == 0)) { 509 504 static const char sha1_zero[SHA1_DIGEST_SIZE] = { ··· 527 524 528 525 static int n2_sha256_async_digest(struct ahash_request *req) 529 526 { 530 - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 531 - struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 532 - struct sha256_state *s = &ctx->u.sha256; 527 + struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 528 + struct sha256_state *s = &rctx->u.sha256; 533 529 534 530 if (req->nbytes == 0) { 535 531 static const char sha256_zero[SHA256_DIGEST_SIZE] = { ··· 557 555 558 556 static int n2_sha224_async_digest(struct ahash_request *req) 559 557 { 560 - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 561 - struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 562 - struct sha256_state *s = &ctx->u.sha256; 558 + struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 559 + struct sha256_state *s = &rctx->u.sha256; 563 560 564 561 if (req->nbytes == 0) { 565 562 static const char sha224_zero[SHA224_DIGEST_SIZE] = {