Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: cbc - Convert to skcipher

This patch converts cbc over to the skcipher interface. It also
rearranges the code to allow it to be reused by drivers.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

+138 -104
+138 -104
crypto/cbc.c
··· 10 10 * 11 11 */ 12 12 13 - #include <crypto/algapi.h> 13 + #include <crypto/internal/skcipher.h> 14 14 #include <linux/err.h> 15 15 #include <linux/init.h> 16 16 #include <linux/kernel.h> 17 17 #include <linux/log2.h> 18 18 #include <linux/module.h> 19 - #include <linux/scatterlist.h> 20 19 #include <linux/slab.h> 21 20 22 21 struct crypto_cbc_ctx { 23 22 struct crypto_cipher *child; 24 23 }; 25 24 26 - static int crypto_cbc_setkey(struct crypto_tfm *parent, const u8 *key, 25 + static int crypto_cbc_setkey(struct crypto_skcipher *parent, const u8 *key, 27 26 unsigned int keylen) 28 27 { 29 - struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(parent); 28 + struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(parent); 30 29 struct crypto_cipher *child = ctx->child; 31 30 int err; 32 31 33 32 crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); 34 - crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & 33 + crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) & 35 34 CRYPTO_TFM_REQ_MASK); 36 35 err = crypto_cipher_setkey(child, key, keylen); 37 - crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & 38 - CRYPTO_TFM_RES_MASK); 36 + crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) & 37 + CRYPTO_TFM_RES_MASK); 39 38 return err; 40 39 } 41 40 42 - static int crypto_cbc_encrypt_segment(struct blkcipher_desc *desc, 43 - struct blkcipher_walk *walk, 44 - struct crypto_cipher *tfm) 41 + static inline int crypto_cbc_encrypt_segment( 42 + struct skcipher_walk *walk, struct crypto_skcipher *tfm, 43 + void (*fn)(struct crypto_skcipher *, const u8 *, u8 *)) 45 44 { 46 - void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = 47 - crypto_cipher_alg(tfm)->cia_encrypt; 48 - int bsize = crypto_cipher_blocksize(tfm); 45 + unsigned int bsize = crypto_skcipher_blocksize(tfm); 49 46 unsigned int nbytes = walk->nbytes; 50 47 u8 *src = walk->src.virt.addr; 51 48 u8 *dst = walk->dst.virt.addr; ··· 50 53 51 54 do { 52 55 crypto_xor(iv, src, bsize); 53 - fn(crypto_cipher_tfm(tfm), dst, iv); 56 + fn(tfm, iv, dst); 54 57 memcpy(iv, dst, bsize); 55 58 56 59 src += bsize; ··· 60 63 return nbytes; 61 64 } 62 65 63 - static int crypto_cbc_encrypt_inplace(struct blkcipher_desc *desc, 64 - struct blkcipher_walk *walk, 65 - struct crypto_cipher *tfm) 66 + static inline int crypto_cbc_encrypt_inplace( 67 + struct skcipher_walk *walk, struct crypto_skcipher *tfm, 68 + void (*fn)(struct crypto_skcipher *, const u8 *, u8 *)) 66 69 { 67 - void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = 68 - crypto_cipher_alg(tfm)->cia_encrypt; 69 - int bsize = crypto_cipher_blocksize(tfm); 70 + unsigned int bsize = crypto_skcipher_blocksize(tfm); 70 71 unsigned int nbytes = walk->nbytes; 71 72 u8 *src = walk->src.virt.addr; 72 73 u8 *iv = walk->iv; 73 74 74 75 do { 75 76 crypto_xor(src, iv, bsize); 76 - fn(crypto_cipher_tfm(tfm), src, src); 77 + fn(tfm, src, src); 77 78 iv = src; 78 79 79 80 src += bsize; ··· 82 87 return nbytes; 83 88 } 84 89 85 - static int crypto_cbc_encrypt(struct blkcipher_desc *desc, 86 - struct scatterlist *dst, struct scatterlist *src, 87 - unsigned int nbytes) 90 + static inline int crypto_cbc_encrypt_walk(struct skcipher_request *req, 91 + void (*fn)(struct crypto_skcipher *, 92 + const u8 *, u8 *)) 88 93 { 89 - struct blkcipher_walk walk; 90 - struct crypto_blkcipher *tfm = desc->tfm; 91 - struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm); 92 - struct crypto_cipher *child = ctx->child; 94 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 95 + struct skcipher_walk walk; 93 96 int err; 94 97 95 - blkcipher_walk_init(&walk, dst, src, nbytes); 96 - err = blkcipher_walk_virt(desc, &walk); 98 + err = skcipher_walk_virt(&walk, req, false); 97 99 98 - while ((nbytes = walk.nbytes)) { 100 + while (walk.nbytes) { 99 101 if (walk.src.virt.addr == walk.dst.virt.addr) 100 - nbytes = crypto_cbc_encrypt_inplace(desc, &walk, child); 102 + err = crypto_cbc_encrypt_inplace(&walk, tfm, fn); 101 103 else 102 - nbytes = crypto_cbc_encrypt_segment(desc, &walk, child); 103 - err = blkcipher_walk_done(desc, &walk, nbytes); 104 + err = crypto_cbc_encrypt_segment(&walk, tfm, fn); 105 + err = skcipher_walk_done(&walk, err); 104 106 } 105 107 106 108 return err; 107 109 } 108 110 109 - static int crypto_cbc_decrypt_segment(struct blkcipher_desc *desc, 110 - struct blkcipher_walk *walk, 111 - struct crypto_cipher *tfm) 111 + static inline void crypto_cbc_encrypt_one(struct crypto_skcipher *tfm, 112 + const u8 *src, u8 *dst) 112 113 { 113 - void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = 114 - crypto_cipher_alg(tfm)->cia_decrypt; 115 - int bsize = crypto_cipher_blocksize(tfm); 114 + struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); 115 + 116 + crypto_cipher_encrypt_one(ctx->child, dst, src); 117 + } 118 + 119 + static int crypto_cbc_encrypt(struct skcipher_request *req) 120 + { 121 + return crypto_cbc_encrypt_walk(req, crypto_cbc_encrypt_one); 122 + } 123 + 124 + static inline int crypto_cbc_decrypt_segment( 125 + struct skcipher_walk *walk, struct crypto_skcipher *tfm, 126 + void (*fn)(struct crypto_skcipher *, const u8 *, u8 *)) 127 + { 128 + unsigned int bsize = crypto_skcipher_blocksize(tfm); 116 129 unsigned int nbytes = walk->nbytes; 117 130 u8 *src = walk->src.virt.addr; 118 131 u8 *dst = walk->dst.virt.addr; 119 132 u8 *iv = walk->iv; 120 133 121 134 do { 122 - fn(crypto_cipher_tfm(tfm), dst, src); 135 + fn(tfm, src, dst); 123 136 crypto_xor(dst, iv, bsize); 124 137 iv = src; 125 138 ··· 140 137 return nbytes; 141 138 } 142 139 143 - static int crypto_cbc_decrypt_inplace(struct blkcipher_desc *desc, 144 - struct blkcipher_walk *walk, 145 - struct crypto_cipher *tfm) 140 + static inline int crypto_cbc_decrypt_inplace( 141 + struct skcipher_walk *walk, struct crypto_skcipher *tfm, 142 + void (*fn)(struct crypto_skcipher *, const u8 *, u8 *)) 146 143 { 147 - void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = 148 - crypto_cipher_alg(tfm)->cia_decrypt; 149 - int bsize = crypto_cipher_blocksize(tfm); 144 + unsigned int bsize = crypto_skcipher_blocksize(tfm); 150 145 unsigned int nbytes = walk->nbytes; 151 146 u8 *src = walk->src.virt.addr; 152 147 u8 last_iv[bsize]; ··· 154 153 memcpy(last_iv, src, bsize); 155 154 156 155 for (;;) { 157 - fn(crypto_cipher_tfm(tfm), src, src); 156 + fn(tfm, src, src); 158 157 if ((nbytes -= bsize) < bsize) 159 158 break; 160 159 crypto_xor(src, src - bsize, bsize); ··· 167 166 return nbytes; 168 167 } 169 168 170 - static int crypto_cbc_decrypt(struct blkcipher_desc *desc, 171 - struct scatterlist *dst, struct scatterlist *src, 172 - unsigned int nbytes) 169 + static inline int crypto_cbc_decrypt_blocks( 170 + struct skcipher_walk *walk, struct crypto_skcipher *tfm, 171 + void (*fn)(struct crypto_skcipher *, const u8 *, u8 *)) 173 172 { 174 - struct blkcipher_walk walk; 175 - struct crypto_blkcipher *tfm = desc->tfm; 176 - struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm); 177 - struct crypto_cipher *child = ctx->child; 173 + if (walk->src.virt.addr == walk->dst.virt.addr) 174 + return crypto_cbc_decrypt_inplace(walk, tfm, fn); 175 + else 176 + return crypto_cbc_decrypt_segment(walk, tfm, fn); 177 + } 178 + 179 + static inline void crypto_cbc_decrypt_one(struct crypto_skcipher *tfm, 180 + const u8 *src, u8 *dst) 181 + { 182 + struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); 183 + 184 + crypto_cipher_decrypt_one(ctx->child, dst, src); 185 + } 186 + 187 + static int crypto_cbc_decrypt(struct skcipher_request *req) 188 + { 189 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 190 + struct skcipher_walk walk; 178 191 int err; 179 192 180 - blkcipher_walk_init(&walk, dst, src, nbytes); 181 - err = blkcipher_walk_virt(desc, &walk); 193 + err = skcipher_walk_virt(&walk, req, false); 182 194 183 - while ((nbytes = walk.nbytes)) { 184 - if (walk.src.virt.addr == walk.dst.virt.addr) 185 - nbytes = crypto_cbc_decrypt_inplace(desc, &walk, child); 186 - else 187 - nbytes = crypto_cbc_decrypt_segment(desc, &walk, child); 188 - err = blkcipher_walk_done(desc, &walk, nbytes); 195 + while (walk.nbytes) { 196 + err = crypto_cbc_decrypt_blocks(&walk, tfm, 197 + crypto_cbc_decrypt_one); 198 + err = skcipher_walk_done(&walk, err); 189 199 } 190 200 191 201 return err; 192 202 } 193 203 194 - static int crypto_cbc_init_tfm(struct crypto_tfm *tfm) 204 + static int crypto_cbc_init_tfm(struct crypto_skcipher *tfm) 195 205 { 196 - struct crypto_instance *inst = (void *)tfm->__crt_alg; 197 - struct crypto_spawn *spawn = crypto_instance_ctx(inst); 198 - struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm); 206 + struct skcipher_instance *inst = skcipher_alg_instance(tfm); 207 + struct crypto_spawn *spawn = skcipher_instance_ctx(inst); 208 + struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); 199 209 struct crypto_cipher *cipher; 200 210 201 211 cipher = crypto_spawn_cipher(spawn); ··· 217 205 return 0; 218 206 } 219 207 220 - static void crypto_cbc_exit_tfm(struct crypto_tfm *tfm) 208 + static void crypto_cbc_exit_tfm(struct crypto_skcipher *tfm) 221 209 { 222 - struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm); 210 + struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); 211 + 223 212 crypto_free_cipher(ctx->child); 224 213 } 225 214 226 - static struct crypto_instance *crypto_cbc_alloc(struct rtattr **tb) 215 + static void crypto_cbc_free(struct skcipher_instance *inst) 227 216 { 228 - struct crypto_instance *inst; 217 + crypto_drop_skcipher(skcipher_instance_ctx(inst)); 218 + kfree(inst); 219 + } 220 + 221 + static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb) 222 + { 223 + struct skcipher_instance *inst; 224 + struct crypto_spawn *spawn; 229 225 struct crypto_alg *alg; 230 226 int err; 231 227 232 - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); 228 + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER); 233 229 if (err) 234 - return ERR_PTR(err); 230 + return err; 231 + 232 + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); 233 + if (!inst) 234 + return -ENOMEM; 235 235 236 236 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, 237 237 CRYPTO_ALG_TYPE_MASK); 238 + err = PTR_ERR(alg); 238 239 if (IS_ERR(alg)) 239 - return ERR_CAST(alg); 240 + goto err_free_inst; 240 241 241 - inst = ERR_PTR(-EINVAL); 242 + spawn = skcipher_instance_ctx(inst); 243 + err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst), 244 + CRYPTO_ALG_TYPE_MASK); 245 + crypto_mod_put(alg); 246 + if (err) 247 + goto err_free_inst; 248 + 249 + err = crypto_inst_setname(skcipher_crypto_instance(inst), "cbc", alg); 250 + if (err) 251 + goto err_drop_spawn; 252 + 253 + err = -EINVAL; 242 254 if (!is_power_of_2(alg->cra_blocksize)) 243 - goto out_put_alg; 255 + goto err_drop_spawn; 244 256 245 - inst = crypto_alloc_instance("cbc", alg); 246 - if (IS_ERR(inst)) 247 - goto out_put_alg; 248 - 249 - inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; 250 - inst->alg.cra_priority = alg->cra_priority; 251 - inst->alg.cra_blocksize = alg->cra_blocksize; 252 - inst->alg.cra_alignmask = alg->cra_alignmask; 253 - inst->alg.cra_type = &crypto_blkcipher_type; 257 + inst->alg.base.cra_priority = alg->cra_priority; 258 + inst->alg.base.cra_blocksize = alg->cra_blocksize; 259 + inst->alg.base.cra_alignmask = alg->cra_alignmask; 254 260 255 261 /* We access the data as u32s when xoring. */ 256 - inst->alg.cra_alignmask |= __alignof__(u32) - 1; 262 + inst->alg.base.cra_alignmask |= __alignof__(u32) - 1; 257 263 258 - inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; 259 - inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize; 260 - inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize; 264 + inst->alg.ivsize = alg->cra_blocksize; 265 + inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize; 266 + inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize; 261 267 262 - inst->alg.cra_ctxsize = sizeof(struct crypto_cbc_ctx); 268 + inst->alg.base.cra_ctxsize = sizeof(struct crypto_cbc_ctx); 263 269 264 - inst->alg.cra_init = crypto_cbc_init_tfm; 265 - inst->alg.cra_exit = crypto_cbc_exit_tfm; 270 + inst->alg.init = crypto_cbc_init_tfm; 271 + inst->alg.exit = crypto_cbc_exit_tfm; 266 272 267 - inst->alg.cra_blkcipher.setkey = crypto_cbc_setkey; 268 - inst->alg.cra_blkcipher.encrypt = crypto_cbc_encrypt; 269 - inst->alg.cra_blkcipher.decrypt = crypto_cbc_decrypt; 273 + inst->alg.setkey = crypto_cbc_setkey; 274 + inst->alg.encrypt = crypto_cbc_encrypt; 275 + inst->alg.decrypt = crypto_cbc_decrypt; 270 276 271 - out_put_alg: 272 - crypto_mod_put(alg); 273 - return inst; 274 - } 277 + inst->free = crypto_cbc_free; 275 278 276 - static void crypto_cbc_free(struct crypto_instance *inst) 277 - { 278 - crypto_drop_spawn(crypto_instance_ctx(inst)); 279 + err = skcipher_register_instance(tmpl, inst); 280 + if (err) 281 + goto err_drop_spawn; 282 + 283 + out: 284 + return err; 285 + 286 + err_drop_spawn: 287 + crypto_drop_spawn(spawn); 288 + err_free_inst: 279 289 kfree(inst); 290 + goto out; 280 291 } 281 292 282 293 static struct crypto_template crypto_cbc_tmpl = { 283 294 .name = "cbc", 284 - .alloc = crypto_cbc_alloc, 285 - .free = crypto_cbc_free, 295 + .create = crypto_cbc_create, 286 296 .module = THIS_MODULE, 287 297 }; 288 298