Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: ccp - permit asynchronous skcipher as fallback

Even though the ccp driver implements an asynchronous version of xts(aes),
the fallback it allocates is required to be synchronous. Given that SIMD
based software implementations are usually asynchronous as well, even
though they rarely complete asynchronously (this typically only happens
in cases where the request was made from softirq context, while SIMD was
already in use in the task context that it interrupted), these
implementations are disregarded, and either the generic C version or
another table based version implemented in assembler is selected instead.

Since falling back to synchronous AES is not only a performance issue, but
potentially a security issue as well (due to the fact that table based AES
is not time invariant), let's fix this, by allocating an ordinary skcipher
as the fallback, and invoke it with the completion routine that was given
to the outer request.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Acked-by: John Allen <john.allen@amd.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Ard Biesheuvel and committed by
Herbert Xu
413b61ce 44b59175

+19 -18
+16 -17
drivers/crypto/ccp/ccp-crypto-aes-xts.c
··· 98 98 ctx->u.aes.key_len = key_len / 2; 99 99 sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len); 100 100 101 - return crypto_sync_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len); 101 + return crypto_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len); 102 102 } 103 103 104 104 static int ccp_aes_xts_crypt(struct skcipher_request *req, ··· 145 145 (ctx->u.aes.key_len != AES_KEYSIZE_256)) 146 146 fallback = 1; 147 147 if (fallback) { 148 - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, 149 - ctx->u.aes.tfm_skcipher); 150 - 151 148 /* Use the fallback to process the request for any 152 149 * unsupported unit sizes or key sizes 153 150 */ 154 - skcipher_request_set_sync_tfm(subreq, ctx->u.aes.tfm_skcipher); 155 - skcipher_request_set_callback(subreq, req->base.flags, 156 - NULL, NULL); 157 - skcipher_request_set_crypt(subreq, req->src, req->dst, 158 - req->cryptlen, req->iv); 159 - ret = encrypt ? crypto_skcipher_encrypt(subreq) : 160 - crypto_skcipher_decrypt(subreq); 161 - skcipher_request_zero(subreq); 151 + skcipher_request_set_tfm(&rctx->fallback_req, 152 + ctx->u.aes.tfm_skcipher); 153 + skcipher_request_set_callback(&rctx->fallback_req, 154 + req->base.flags, 155 + req->base.complete, 156 + req->base.data); 157 + skcipher_request_set_crypt(&rctx->fallback_req, req->src, 158 + req->dst, req->cryptlen, req->iv); 159 + ret = encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) : 160 + crypto_skcipher_decrypt(&rctx->fallback_req); 162 161 return ret; 163 162 } 164 163 ··· 197 198 static int ccp_aes_xts_init_tfm(struct crypto_skcipher *tfm) 198 199 { 199 200 struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); 200 - struct crypto_sync_skcipher *fallback_tfm; 201 + struct crypto_skcipher *fallback_tfm; 201 202 202 203 ctx->complete = ccp_aes_xts_complete; 203 204 ctx->u.aes.key_len = 0; 204 205 205 - fallback_tfm = crypto_alloc_sync_skcipher("xts(aes)", 0, 206 - CRYPTO_ALG_ASYNC | 206 + fallback_tfm = crypto_alloc_skcipher("xts(aes)", 0, 207 207 CRYPTO_ALG_NEED_FALLBACK); 208 208 if (IS_ERR(fallback_tfm)) { 209 209 pr_warn("could not load fallback driver xts(aes)\n"); ··· 210 212 } 211 213 ctx->u.aes.tfm_skcipher = fallback_tfm; 212 214 213 - crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx)); 215 + crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx) + 216 + crypto_skcipher_reqsize(fallback_tfm)); 214 217 215 218 return 0; 216 219 } ··· 220 221 { 221 222 struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); 222 223 223 - crypto_free_sync_skcipher(ctx->u.aes.tfm_skcipher); 224 + crypto_free_skcipher(ctx->u.aes.tfm_skcipher); 224 225 } 225 226 226 227 static int ccp_register_aes_xts_alg(struct list_head *head,
+3 -1
drivers/crypto/ccp/ccp-crypto.h
··· 89 89 /***** AES related defines *****/ 90 90 struct ccp_aes_ctx { 91 91 /* Fallback cipher for XTS with unsupported unit sizes */ 92 - struct crypto_sync_skcipher *tfm_skcipher; 92 + struct crypto_skcipher *tfm_skcipher; 93 93 94 94 enum ccp_engine engine; 95 95 enum ccp_aes_type type; ··· 121 121 u8 rfc3686_iv[AES_BLOCK_SIZE]; 122 122 123 123 struct ccp_cmd cmd; 124 + 125 + struct skcipher_request fallback_req; // keep at the end 124 126 }; 125 127 126 128 struct ccp_aes_cmac_req_ctx {