Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: geode-aes - convert to skcipher API and make thread-safe

The geode AES driver is heavily broken because it stores per-request
state in the transform context. So it will crash or produce the wrong
result if used by any of the many places in the kernel that issue
concurrent requests for the same transform object.

This driver is also implemented using the deprecated blkcipher API,
which makes it difficult to fix, and puts it among the drivers
preventing that API from being removed.

Convert this driver to use the skcipher API, and change it to not store
per-request state in the transform context.

Fixes: 9fe757b0cfce ("[PATCH] crypto: Add support for the Geode LX AES hardware")
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Eric Biggers and committed by
Herbert Xu
4549f7e5 e53619c8

+152 -309
+150 -296
drivers/crypto/geode-aes.c
··· 10 10 #include <linux/spinlock.h> 11 11 #include <crypto/algapi.h> 12 12 #include <crypto/aes.h> 13 - #include <crypto/skcipher.h> 13 + #include <crypto/internal/skcipher.h> 14 14 15 15 #include <linux/io.h> 16 16 #include <linux/delay.h> ··· 24 24 25 25 /* Write a 128 bit field (either a writable key or IV) */ 26 26 static inline void 27 - _writefield(u32 offset, void *value) 27 + _writefield(u32 offset, const void *value) 28 28 { 29 29 int i; 30 30 31 31 for (i = 0; i < 4; i++) 32 - iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4)); 32 + iowrite32(((const u32 *) value)[i], _iobase + offset + (i * 4)); 33 33 } 34 34 35 35 /* Read a 128 bit field (either a writable key or IV) */ ··· 43 43 } 44 44 45 45 static int 46 - do_crypt(void *src, void *dst, int len, u32 flags) 46 + do_crypt(const void *src, void *dst, u32 len, u32 flags) 47 47 { 48 48 u32 status; 49 49 u32 counter = AES_OP_TIMEOUT; 50 50 51 - iowrite32(virt_to_phys(src), _iobase + AES_SOURCEA_REG); 51 + iowrite32(virt_to_phys((void *)src), _iobase + AES_SOURCEA_REG); 52 52 iowrite32(virt_to_phys(dst), _iobase + AES_DSTA_REG); 53 53 iowrite32(len, _iobase + AES_LENA_REG); 54 54 ··· 65 65 return counter ? 0 : 1; 66 66 } 67 67 68 - static unsigned int 69 - geode_aes_crypt(struct geode_aes_op *op) 68 + static void 69 + geode_aes_crypt(const struct geode_aes_tfm_ctx *tctx, const void *src, 70 + void *dst, u32 len, u8 *iv, int mode, int dir) 70 71 { 71 72 u32 flags = 0; 72 73 unsigned long iflags; 73 74 int ret; 74 - 75 - if (op->len == 0) 76 - return 0; 77 75 78 76 /* If the source and destination is the same, then 79 77 * we need to turn on the coherent flags, otherwise ··· 80 82 81 83 flags |= (AES_CTRL_DCA | AES_CTRL_SCA); 82 84 83 - if (op->dir == AES_DIR_ENCRYPT) 85 + if (dir == AES_DIR_ENCRYPT) 84 86 flags |= AES_CTRL_ENCRYPT; 85 87 86 88 /* Start the critical section */ 87 89 88 90 spin_lock_irqsave(&lock, iflags); 89 91 90 - if (op->mode == AES_MODE_CBC) { 92 + if (mode == AES_MODE_CBC) { 91 93 flags |= AES_CTRL_CBC; 92 - _writefield(AES_WRITEIV0_REG, op->iv); 94 + _writefield(AES_WRITEIV0_REG, iv); 93 95 } 94 96 95 - if (!(op->flags & AES_FLAGS_HIDDENKEY)) { 96 - flags |= AES_CTRL_WRKEY; 97 - _writefield(AES_WRITEKEY0_REG, op->key); 98 - } 97 + flags |= AES_CTRL_WRKEY; 98 + _writefield(AES_WRITEKEY0_REG, tctx->key); 99 99 100 - ret = do_crypt(op->src, op->dst, op->len, flags); 100 + ret = do_crypt(src, dst, len, flags); 101 101 BUG_ON(ret); 102 102 103 - if (op->mode == AES_MODE_CBC) 104 - _readfield(AES_WRITEIV0_REG, op->iv); 103 + if (mode == AES_MODE_CBC) 104 + _readfield(AES_WRITEIV0_REG, iv); 105 105 106 106 spin_unlock_irqrestore(&lock, iflags); 107 - 108 - return op->len; 109 107 } 110 108 111 109 /* CRYPTO-API Functions */ ··· 109 115 static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key, 110 116 unsigned int len) 111 117 { 112 - struct geode_aes_op *op = crypto_tfm_ctx(tfm); 118 + struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm); 113 119 unsigned int ret; 114 120 115 - op->keylen = len; 121 + tctx->keylen = len; 116 122 117 123 if (len == AES_KEYSIZE_128) { 118 - memcpy(op->key, key, len); 124 + memcpy(tctx->key, key, len); 119 125 return 0; 120 126 } 121 127 ··· 128 134 /* 129 135 * The requested key size is not supported by HW, do a fallback 130 136 */ 131 - op->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; 132 - op->fallback.cip->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK); 137 + tctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; 138 + tctx->fallback.cip->base.crt_flags |= 139 + (tfm->crt_flags & CRYPTO_TFM_REQ_MASK); 133 140 134 - ret = crypto_cipher_setkey(op->fallback.cip, key, len); 141 + ret = crypto_cipher_setkey(tctx->fallback.cip, key, len); 135 142 if (ret) { 136 143 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; 137 - tfm->crt_flags |= (op->fallback.cip->base.crt_flags & CRYPTO_TFM_RES_MASK); 144 + tfm->crt_flags |= (tctx->fallback.cip->base.crt_flags & 145 + CRYPTO_TFM_RES_MASK); 138 146 } 139 147 return ret; 140 148 } 141 149 142 - static int geode_setkey_blk(struct crypto_tfm *tfm, const u8 *key, 143 - unsigned int len) 150 + static int geode_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, 151 + unsigned int len) 144 152 { 145 - struct geode_aes_op *op = crypto_tfm_ctx(tfm); 153 + struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); 146 154 unsigned int ret; 147 155 148 - op->keylen = len; 156 + tctx->keylen = len; 149 157 150 158 if (len == AES_KEYSIZE_128) { 151 - memcpy(op->key, key, len); 159 + memcpy(tctx->key, key, len); 152 160 return 0; 153 161 } 154 162 155 163 if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) { 156 164 /* not supported at all */ 157 - tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 165 + crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 158 166 return -EINVAL; 159 167 } 160 168 161 169 /* 162 170 * The requested key size is not supported by HW, do a fallback 163 171 */ 164 - crypto_sync_skcipher_clear_flags(op->fallback.blk, CRYPTO_TFM_REQ_MASK); 165 - crypto_sync_skcipher_set_flags(op->fallback.blk, 166 - tfm->crt_flags & CRYPTO_TFM_REQ_MASK); 167 - 168 - ret = crypto_sync_skcipher_setkey(op->fallback.blk, key, len); 169 - if (ret) { 170 - tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; 171 - tfm->crt_flags |= crypto_sync_skcipher_get_flags(op->fallback.blk) & 172 - CRYPTO_TFM_RES_MASK; 173 - } 172 + crypto_skcipher_clear_flags(tctx->fallback.skcipher, 173 + CRYPTO_TFM_REQ_MASK); 174 + crypto_skcipher_set_flags(tctx->fallback.skcipher, 175 + crypto_skcipher_get_flags(tfm) & 176 + CRYPTO_TFM_REQ_MASK); 177 + ret = crypto_skcipher_setkey(tctx->fallback.skcipher, key, len); 178 + crypto_skcipher_set_flags(tfm, 179 + crypto_skcipher_get_flags(tctx->fallback.skcipher) & 180 + CRYPTO_TFM_RES_MASK); 174 181 return ret; 175 - } 176 - 177 - static int fallback_blk_dec(struct blkcipher_desc *desc, 178 - struct scatterlist *dst, struct scatterlist *src, 179 - unsigned int nbytes) 180 - { 181 - struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); 182 - SYNC_SKCIPHER_REQUEST_ON_STACK(req, op->fallback.blk); 183 - 184 - skcipher_request_set_sync_tfm(req, op->fallback.blk); 185 - skcipher_request_set_callback(req, 0, NULL, NULL); 186 - skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); 187 - 188 - return crypto_skcipher_decrypt(req); 189 - } 190 - 191 - static int fallback_blk_enc(struct blkcipher_desc *desc, 192 - struct scatterlist *dst, struct scatterlist *src, 193 - unsigned int nbytes) 194 - { 195 - struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); 196 - SYNC_SKCIPHER_REQUEST_ON_STACK(req, op->fallback.blk); 197 - 198 - skcipher_request_set_sync_tfm(req, op->fallback.blk); 199 - skcipher_request_set_callback(req, 0, NULL, NULL); 200 - skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); 201 - 202 - return crypto_skcipher_encrypt(req); 203 182 } 204 183 205 184 static void 206 185 geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 207 186 { 208 - struct geode_aes_op *op = crypto_tfm_ctx(tfm); 187 + const struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm); 209 188 210 - if (unlikely(op->keylen != AES_KEYSIZE_128)) { 211 - crypto_cipher_encrypt_one(op->fallback.cip, out, in); 189 + if (unlikely(tctx->keylen != AES_KEYSIZE_128)) { 190 + crypto_cipher_encrypt_one(tctx->fallback.cip, out, in); 212 191 return; 213 192 } 214 193 215 - op->src = (void *) in; 216 - op->dst = (void *) out; 217 - op->mode = AES_MODE_ECB; 218 - op->flags = 0; 219 - op->len = AES_BLOCK_SIZE; 220 - op->dir = AES_DIR_ENCRYPT; 221 - 222 - geode_aes_crypt(op); 194 + geode_aes_crypt(tctx, in, out, AES_BLOCK_SIZE, NULL, 195 + AES_MODE_ECB, AES_DIR_ENCRYPT); 223 196 } 224 197 225 198 226 199 static void 227 200 geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 228 201 { 229 - struct geode_aes_op *op = crypto_tfm_ctx(tfm); 202 + const struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm); 230 203 231 - if (unlikely(op->keylen != AES_KEYSIZE_128)) { 232 - crypto_cipher_decrypt_one(op->fallback.cip, out, in); 204 + if (unlikely(tctx->keylen != AES_KEYSIZE_128)) { 205 + crypto_cipher_decrypt_one(tctx->fallback.cip, out, in); 233 206 return; 234 207 } 235 208 236 - op->src = (void *) in; 237 - op->dst = (void *) out; 238 - op->mode = AES_MODE_ECB; 239 - op->flags = 0; 240 - op->len = AES_BLOCK_SIZE; 241 - op->dir = AES_DIR_DECRYPT; 242 - 243 - geode_aes_crypt(op); 209 + geode_aes_crypt(tctx, in, out, AES_BLOCK_SIZE, NULL, 210 + AES_MODE_ECB, AES_DIR_DECRYPT); 244 211 } 245 212 246 213 static int fallback_init_cip(struct crypto_tfm *tfm) 247 214 { 248 215 const char *name = crypto_tfm_alg_name(tfm); 249 - struct geode_aes_op *op = crypto_tfm_ctx(tfm); 216 + struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm); 250 217 251 - op->fallback.cip = crypto_alloc_cipher(name, 0, 252 - CRYPTO_ALG_NEED_FALLBACK); 218 + tctx->fallback.cip = crypto_alloc_cipher(name, 0, 219 + CRYPTO_ALG_NEED_FALLBACK); 253 220 254 - if (IS_ERR(op->fallback.cip)) { 221 + if (IS_ERR(tctx->fallback.cip)) { 255 222 printk(KERN_ERR "Error allocating fallback algo %s\n", name); 256 - return PTR_ERR(op->fallback.cip); 223 + return PTR_ERR(tctx->fallback.cip); 257 224 } 258 225 259 226 return 0; ··· 222 267 223 268 static void fallback_exit_cip(struct crypto_tfm *tfm) 224 269 { 225 - struct geode_aes_op *op = crypto_tfm_ctx(tfm); 270 + struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm); 226 271 227 - crypto_free_cipher(op->fallback.cip); 228 - op->fallback.cip = NULL; 272 + crypto_free_cipher(tctx->fallback.cip); 229 273 } 230 274 231 275 static struct crypto_alg geode_alg = { ··· 237 283 .cra_init = fallback_init_cip, 238 284 .cra_exit = fallback_exit_cip, 239 285 .cra_blocksize = AES_BLOCK_SIZE, 240 - .cra_ctxsize = sizeof(struct geode_aes_op), 286 + .cra_ctxsize = sizeof(struct geode_aes_tfm_ctx), 241 287 .cra_module = THIS_MODULE, 242 288 .cra_u = { 243 289 .cipher = { ··· 250 296 } 251 297 }; 252 298 253 - static int 254 - geode_cbc_decrypt(struct blkcipher_desc *desc, 255 - struct scatterlist *dst, struct scatterlist *src, 256 - unsigned int nbytes) 299 + static int geode_init_skcipher(struct crypto_skcipher *tfm) 257 300 { 258 - struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); 259 - struct blkcipher_walk walk; 260 - int err, ret; 301 + const char *name = crypto_tfm_alg_name(&tfm->base); 302 + struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); 261 303 262 - if (nbytes % AES_BLOCK_SIZE) 263 - return -EINVAL; 264 - 265 - if (unlikely(op->keylen != AES_KEYSIZE_128)) 266 - return fallback_blk_dec(desc, dst, src, nbytes); 267 - 268 - blkcipher_walk_init(&walk, dst, src, nbytes); 269 - err = blkcipher_walk_virt(desc, &walk); 270 - op->iv = walk.iv; 271 - 272 - while ((nbytes = walk.nbytes)) { 273 - op->src = walk.src.virt.addr, 274 - op->dst = walk.dst.virt.addr; 275 - op->mode = AES_MODE_CBC; 276 - op->len = nbytes - (nbytes % AES_BLOCK_SIZE); 277 - op->dir = AES_DIR_DECRYPT; 278 - 279 - ret = geode_aes_crypt(op); 280 - 281 - nbytes -= ret; 282 - err = blkcipher_walk_done(desc, &walk, nbytes); 283 - } 284 - 285 - return err; 286 - } 287 - 288 - static int 289 - geode_cbc_encrypt(struct blkcipher_desc *desc, 290 - struct scatterlist *dst, struct scatterlist *src, 291 - unsigned int nbytes) 292 - { 293 - struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); 294 - struct blkcipher_walk walk; 295 - int err, ret; 296 - 297 - if (nbytes % AES_BLOCK_SIZE) 298 - return -EINVAL; 299 - 300 - if (unlikely(op->keylen != AES_KEYSIZE_128)) 301 - return fallback_blk_enc(desc, dst, src, nbytes); 302 - 303 - blkcipher_walk_init(&walk, dst, src, nbytes); 304 - err = blkcipher_walk_virt(desc, &walk); 305 - op->iv = walk.iv; 306 - 307 - while ((nbytes = walk.nbytes)) { 308 - op->src = walk.src.virt.addr, 309 - op->dst = walk.dst.virt.addr; 310 - op->mode = AES_MODE_CBC; 311 - op->len = nbytes - (nbytes % AES_BLOCK_SIZE); 312 - op->dir = AES_DIR_ENCRYPT; 313 - 314 - ret = geode_aes_crypt(op); 315 - nbytes -= ret; 316 - err = blkcipher_walk_done(desc, &walk, nbytes); 317 - } 318 - 319 - return err; 320 - } 321 - 322 - static int fallback_init_blk(struct crypto_tfm *tfm) 323 - { 324 - const char *name = crypto_tfm_alg_name(tfm); 325 - struct geode_aes_op *op = crypto_tfm_ctx(tfm); 326 - 327 - op->fallback.blk = crypto_alloc_sync_skcipher(name, 0, 328 - CRYPTO_ALG_NEED_FALLBACK); 329 - if (IS_ERR(op->fallback.blk)) { 304 + tctx->fallback.skcipher = 305 + crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK | 306 + CRYPTO_ALG_ASYNC); 307 + if (IS_ERR(tctx->fallback.skcipher)) { 330 308 printk(KERN_ERR "Error allocating fallback algo %s\n", name); 331 - return PTR_ERR(op->fallback.blk); 309 + return PTR_ERR(tctx->fallback.skcipher); 332 310 } 333 311 312 + crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) + 313 + crypto_skcipher_reqsize(tctx->fallback.skcipher)); 334 314 return 0; 335 315 } 336 316 337 - static void fallback_exit_blk(struct crypto_tfm *tfm) 317 + static void geode_exit_skcipher(struct crypto_skcipher *tfm) 338 318 { 339 - struct geode_aes_op *op = crypto_tfm_ctx(tfm); 319 + struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); 340 320 341 - crypto_free_sync_skcipher(op->fallback.blk); 342 - op->fallback.blk = NULL; 321 + crypto_free_skcipher(tctx->fallback.skcipher); 343 322 } 344 323 345 - static struct crypto_alg geode_cbc_alg = { 346 - .cra_name = "cbc(aes)", 347 - .cra_driver_name = "cbc-aes-geode", 348 - .cra_priority = 400, 349 - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | 350 - CRYPTO_ALG_KERN_DRIVER_ONLY | 351 - CRYPTO_ALG_NEED_FALLBACK, 352 - .cra_init = fallback_init_blk, 353 - .cra_exit = fallback_exit_blk, 354 - .cra_blocksize = AES_BLOCK_SIZE, 355 - .cra_ctxsize = sizeof(struct geode_aes_op), 356 - .cra_alignmask = 15, 357 - .cra_type = &crypto_blkcipher_type, 358 - .cra_module = THIS_MODULE, 359 - .cra_u = { 360 - .blkcipher = { 361 - .min_keysize = AES_MIN_KEY_SIZE, 362 - .max_keysize = AES_MAX_KEY_SIZE, 363 - .setkey = geode_setkey_blk, 364 - .encrypt = geode_cbc_encrypt, 365 - .decrypt = geode_cbc_decrypt, 366 - .ivsize = AES_BLOCK_SIZE, 367 - } 368 - } 369 - }; 370 - 371 - static int 372 - geode_ecb_decrypt(struct blkcipher_desc *desc, 373 - struct scatterlist *dst, struct scatterlist *src, 374 - unsigned int nbytes) 324 + static int geode_skcipher_crypt(struct skcipher_request *req, int mode, int dir) 375 325 { 376 - struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); 377 - struct blkcipher_walk walk; 378 - int err, ret; 326 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 327 + const struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); 328 + struct skcipher_walk walk; 329 + unsigned int nbytes; 330 + int err; 379 331 380 - if (nbytes % AES_BLOCK_SIZE) 381 - return -EINVAL; 332 + if (unlikely(tctx->keylen != AES_KEYSIZE_128)) { 333 + struct skcipher_request *subreq = skcipher_request_ctx(req); 382 334 383 - if (unlikely(op->keylen != AES_KEYSIZE_128)) 384 - return fallback_blk_dec(desc, dst, src, nbytes); 335 + *subreq = *req; 336 + skcipher_request_set_tfm(subreq, tctx->fallback.skcipher); 337 + if (dir == AES_DIR_DECRYPT) 338 + return crypto_skcipher_decrypt(subreq); 339 + else 340 + return crypto_skcipher_encrypt(subreq); 341 + } 385 342 386 - blkcipher_walk_init(&walk, dst, src, nbytes); 387 - err = blkcipher_walk_virt(desc, &walk); 343 + err = skcipher_walk_virt(&walk, req, false); 388 344 389 - while ((nbytes = walk.nbytes)) { 390 - op->src = walk.src.virt.addr, 391 - op->dst = walk.dst.virt.addr; 392 - op->mode = AES_MODE_ECB; 393 - op->len = nbytes - (nbytes % AES_BLOCK_SIZE); 394 - op->dir = AES_DIR_DECRYPT; 395 - 396 - ret = geode_aes_crypt(op); 397 - nbytes -= ret; 398 - err = blkcipher_walk_done(desc, &walk, nbytes); 345 + while ((nbytes = walk.nbytes) != 0) { 346 + geode_aes_crypt(tctx, walk.src.virt.addr, walk.dst.virt.addr, 347 + round_down(nbytes, AES_BLOCK_SIZE), 348 + walk.iv, mode, dir); 349 + err = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE); 399 350 } 400 351 401 352 return err; 402 353 } 403 354 404 - static int 405 - geode_ecb_encrypt(struct blkcipher_desc *desc, 406 - struct scatterlist *dst, struct scatterlist *src, 407 - unsigned int nbytes) 355 + static int geode_cbc_encrypt(struct skcipher_request *req) 408 356 { 409 - struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); 410 - struct blkcipher_walk walk; 411 - int err, ret; 412 - 413 - if (nbytes % AES_BLOCK_SIZE) 414 - return -EINVAL; 415 - 416 - if (unlikely(op->keylen != AES_KEYSIZE_128)) 417 - return fallback_blk_enc(desc, dst, src, nbytes); 418 - 419 - blkcipher_walk_init(&walk, dst, src, nbytes); 420 - err = blkcipher_walk_virt(desc, &walk); 421 - 422 - while ((nbytes = walk.nbytes)) { 423 - op->src = walk.src.virt.addr, 424 - op->dst = walk.dst.virt.addr; 425 - op->mode = AES_MODE_ECB; 426 - op->len = nbytes - (nbytes % AES_BLOCK_SIZE); 427 - op->dir = AES_DIR_ENCRYPT; 428 - 429 - ret = geode_aes_crypt(op); 430 - nbytes -= ret; 431 - ret = blkcipher_walk_done(desc, &walk, nbytes); 432 - } 433 - 434 - return err; 357 + return geode_skcipher_crypt(req, AES_MODE_CBC, AES_DIR_ENCRYPT); 435 358 } 436 359 437 - static struct crypto_alg geode_ecb_alg = { 438 - .cra_name = "ecb(aes)", 439 - .cra_driver_name = "ecb-aes-geode", 440 - .cra_priority = 400, 441 - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | 442 - CRYPTO_ALG_KERN_DRIVER_ONLY | 443 - CRYPTO_ALG_NEED_FALLBACK, 444 - .cra_init = fallback_init_blk, 445 - .cra_exit = fallback_exit_blk, 446 - .cra_blocksize = AES_BLOCK_SIZE, 447 - .cra_ctxsize = sizeof(struct geode_aes_op), 448 - .cra_alignmask = 15, 449 - .cra_type = &crypto_blkcipher_type, 450 - .cra_module = THIS_MODULE, 451 - .cra_u = { 452 - .blkcipher = { 453 - .min_keysize = AES_MIN_KEY_SIZE, 454 - .max_keysize = AES_MAX_KEY_SIZE, 455 - .setkey = geode_setkey_blk, 456 - .encrypt = geode_ecb_encrypt, 457 - .decrypt = geode_ecb_decrypt, 458 - } 459 - } 360 + static int geode_cbc_decrypt(struct skcipher_request *req) 361 + { 362 + return geode_skcipher_crypt(req, AES_MODE_CBC, AES_DIR_DECRYPT); 363 + } 364 + 365 + static int geode_ecb_encrypt(struct skcipher_request *req) 366 + { 367 + return geode_skcipher_crypt(req, AES_MODE_ECB, AES_DIR_ENCRYPT); 368 + } 369 + 370 + static int geode_ecb_decrypt(struct skcipher_request *req) 371 + { 372 + return geode_skcipher_crypt(req, AES_MODE_ECB, AES_DIR_DECRYPT); 373 + } 374 + 375 + static struct skcipher_alg geode_skcipher_algs[] = { 376 + { 377 + .base.cra_name = "cbc(aes)", 378 + .base.cra_driver_name = "cbc-aes-geode", 379 + .base.cra_priority = 400, 380 + .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | 381 + CRYPTO_ALG_NEED_FALLBACK, 382 + .base.cra_blocksize = AES_BLOCK_SIZE, 383 + .base.cra_ctxsize = sizeof(struct geode_aes_tfm_ctx), 384 + .base.cra_alignmask = 15, 385 + .base.cra_module = THIS_MODULE, 386 + .init = geode_init_skcipher, 387 + .exit = geode_exit_skcipher, 388 + .setkey = geode_setkey_skcipher, 389 + .encrypt = geode_cbc_encrypt, 390 + .decrypt = geode_cbc_decrypt, 391 + .min_keysize = AES_MIN_KEY_SIZE, 392 + .max_keysize = AES_MAX_KEY_SIZE, 393 + .ivsize = AES_BLOCK_SIZE, 394 + }, { 395 + .base.cra_name = "ecb(aes)", 396 + .base.cra_driver_name = "ecb-aes-geode", 397 + .base.cra_priority = 400, 398 + .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | 399 + CRYPTO_ALG_NEED_FALLBACK, 400 + .base.cra_blocksize = AES_BLOCK_SIZE, 401 + .base.cra_ctxsize = sizeof(struct geode_aes_tfm_ctx), 402 + .base.cra_alignmask = 15, 403 + .base.cra_module = THIS_MODULE, 404 + .init = geode_init_skcipher, 405 + .exit = geode_exit_skcipher, 406 + .setkey = geode_setkey_skcipher, 407 + .encrypt = geode_ecb_encrypt, 408 + .decrypt = geode_ecb_decrypt, 409 + .min_keysize = AES_MIN_KEY_SIZE, 410 + .max_keysize = AES_MAX_KEY_SIZE, 411 + }, 460 412 }; 461 413 462 414 static void geode_aes_remove(struct pci_dev *dev) 463 415 { 464 416 crypto_unregister_alg(&geode_alg); 465 - crypto_unregister_alg(&geode_ecb_alg); 466 - crypto_unregister_alg(&geode_cbc_alg); 417 + crypto_unregister_skciphers(geode_skcipher_algs, 418 + ARRAY_SIZE(geode_skcipher_algs)); 467 419 468 420 pci_iounmap(dev, _iobase); 469 421 _iobase = NULL; ··· 407 547 if (ret) 408 548 goto eiomap; 409 549 410 - ret = crypto_register_alg(&geode_ecb_alg); 550 + ret = crypto_register_skciphers(geode_skcipher_algs, 551 + ARRAY_SIZE(geode_skcipher_algs)); 411 552 if (ret) 412 553 goto ealg; 413 554 414 - ret = crypto_register_alg(&geode_cbc_alg); 415 - if (ret) 416 - goto eecb; 417 - 418 555 dev_notice(&dev->dev, "GEODE AES engine enabled.\n"); 419 556 return 0; 420 - 421 - eecb: 422 - crypto_unregister_alg(&geode_ecb_alg); 423 557 424 558 ealg: 425 559 crypto_unregister_alg(&geode_alg);
+2 -13
drivers/crypto/geode-aes.h
··· 46 46 47 47 #define AES_OP_TIMEOUT 0x50000 48 48 49 - struct geode_aes_op { 50 - 51 - void *src; 52 - void *dst; 53 - 54 - u32 mode; 55 - u32 dir; 56 - u32 flags; 57 - int len; 58 - 49 + struct geode_aes_tfm_ctx { 59 50 u8 key[AES_KEYSIZE_128]; 60 - u8 *iv; 61 - 62 51 union { 63 - struct crypto_sync_skcipher *blk; 52 + struct crypto_skcipher *skcipher; 64 53 struct crypto_cipher *cip; 65 54 } fallback; 66 55 u32 keylen;