Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[CRYPTO] padlock: Convert padlock-sha to use crypto_hash

This patch converts padlock-sha to use crypto_hash for its fallback.
It also changes the fallback selection to use selection by type instead
of name. This is done through the new CRYPTO_ALG_NEED_FALLBACK bit,
which is set if and only if an algorithm needs a fallback of the same
type.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

+40 -55
+34 -55
drivers/crypto/padlock-sha.c
··· 12 12 * 13 13 */ 14 14 15 + #include <crypto/algapi.h> 16 + #include <linux/err.h> 15 17 #include <linux/module.h> 16 18 #include <linux/init.h> 17 19 #include <linux/errno.h> 18 - #include <linux/crypto.h> 19 20 #include <linux/cryptohash.h> 20 21 #include <linux/interrupt.h> 21 22 #include <linux/kernel.h> ··· 31 30 #define SHA256_DIGEST_SIZE 32 32 31 #define SHA256_HMAC_BLOCK_SIZE 64 33 32 34 - static char *sha1_fallback = SHA1_DEFAULT_FALLBACK; 35 - static char *sha256_fallback = SHA256_DEFAULT_FALLBACK; 36 - 37 - module_param(sha1_fallback, charp, 0644); 38 - module_param(sha256_fallback, charp, 0644); 39 - 40 - MODULE_PARM_DESC(sha1_fallback, "Fallback driver for SHA1. Default is " 41 - SHA1_DEFAULT_FALLBACK); 42 - MODULE_PARM_DESC(sha256_fallback, "Fallback driver for SHA256. Default is " 43 - SHA256_DEFAULT_FALLBACK); 44 - 45 33 struct padlock_sha_ctx { 46 34 char *data; 47 35 size_t used; 48 36 int bypass; 49 37 void (*f_sha_padlock)(const char *in, char *out, int count); 50 - struct crypto_tfm *fallback_tfm; 38 + struct hash_desc fallback; 51 39 }; 52 40 53 41 static inline struct padlock_sha_ctx *ctx(struct crypto_tfm *tfm) 54 42 { 55 - return (struct padlock_sha_ctx *)(crypto_tfm_ctx(tfm)); 43 + return crypto_tfm_ctx(tfm); 56 44 } 57 45 58 46 /* We'll need aligned address on the stack */ ··· 55 65 if (ctx(tfm)->bypass) 56 66 return; 57 67 58 - BUG_ON(!ctx(tfm)->fallback_tfm); 59 - 60 - crypto_digest_init(ctx(tfm)->fallback_tfm); 68 + crypto_hash_init(&ctx(tfm)->fallback); 61 69 if (ctx(tfm)->data && ctx(tfm)->used) { 62 70 struct scatterlist sg; 63 71 64 72 sg_set_buf(&sg, ctx(tfm)->data, ctx(tfm)->used); 65 - crypto_digest_update(ctx(tfm)->fallback_tfm, &sg, 1); 73 + crypto_hash_update(&ctx(tfm)->fallback, &sg, sg.length); 66 74 } 67 75 68 76 ctx(tfm)->used = 0; ··· 83 95 84 96 if (unlikely(ctx(tfm)->bypass)) { 85 97 struct scatterlist sg; 86 - BUG_ON(!ctx(tfm)->fallback_tfm); 87 98 sg_set_buf(&sg, (uint8_t *)data, length); 88 - crypto_digest_update(ctx(tfm)->fallback_tfm, &sg, 1); 99 + crypto_hash_update(&ctx(tfm)->fallback, &sg, length); 89 100 return; 90 101 } 91 102 ··· 147 160 static void padlock_sha_final(struct crypto_tfm *tfm, uint8_t *out) 148 161 { 149 162 if (unlikely(ctx(tfm)->bypass)) { 150 - BUG_ON(!ctx(tfm)->fallback_tfm); 151 - crypto_digest_final(ctx(tfm)->fallback_tfm, out); 163 + crypto_hash_final(&ctx(tfm)->fallback, out); 152 164 ctx(tfm)->bypass = 0; 153 165 return; 154 166 } ··· 158 172 ctx(tfm)->used = 0; 159 173 } 160 174 161 - static int padlock_cra_init(struct crypto_tfm *tfm, const char *fallback_driver_name) 175 + static int padlock_cra_init(struct crypto_tfm *tfm) 162 176 { 177 + const char *fallback_driver_name = tfm->__crt_alg->cra_name; 178 + struct crypto_hash *fallback_tfm; 179 + 163 180 /* For now we'll allocate one page. This 164 181 * could eventually be configurable one day. */ 165 182 ctx(tfm)->data = (char *)__get_free_page(GFP_KERNEL); ··· 170 181 return -ENOMEM; 171 182 172 183 /* Allocate a fallback and abort if it failed. */ 173 - ctx(tfm)->fallback_tfm = crypto_alloc_tfm(fallback_driver_name, 0); 174 - if (!ctx(tfm)->fallback_tfm) { 184 + fallback_tfm = crypto_alloc_hash(fallback_driver_name, 0, 185 + CRYPTO_ALG_ASYNC | 186 + CRYPTO_ALG_NEED_FALLBACK); 187 + if (IS_ERR(fallback_tfm)) { 175 188 printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n", 176 189 fallback_driver_name); 177 190 free_page((unsigned long)(ctx(tfm)->data)); 178 - return -ENOENT; 191 + return PTR_ERR(fallback_tfm); 179 192 } 180 193 194 + ctx(tfm)->fallback.tfm = fallback_tfm; 181 195 return 0; 182 196 } 183 197 ··· 188 196 { 189 197 ctx(tfm)->f_sha_padlock = padlock_do_sha1; 190 198 191 - return padlock_cra_init(tfm, sha1_fallback); 199 + return padlock_cra_init(tfm); 192 200 } 193 201 194 202 static int padlock_sha256_cra_init(struct crypto_tfm *tfm) 195 203 { 196 204 ctx(tfm)->f_sha_padlock = padlock_do_sha256; 197 205 198 - return padlock_cra_init(tfm, sha256_fallback); 206 + return padlock_cra_init(tfm); 199 207 } 200 208 201 209 static void padlock_cra_exit(struct crypto_tfm *tfm) ··· 205 213 ctx(tfm)->data = NULL; 206 214 } 207 215 208 - BUG_ON(!ctx(tfm)->fallback_tfm); 209 - crypto_free_tfm(ctx(tfm)->fallback_tfm); 210 - ctx(tfm)->fallback_tfm = NULL; 216 + crypto_free_hash(ctx(tfm)->fallback.tfm); 217 + ctx(tfm)->fallback.tfm = NULL; 211 218 } 212 219 213 220 static struct crypto_alg sha1_alg = { 214 221 .cra_name = "sha1", 215 222 .cra_driver_name = "sha1-padlock", 216 223 .cra_priority = PADLOCK_CRA_PRIORITY, 217 - .cra_flags = CRYPTO_ALG_TYPE_DIGEST, 224 + .cra_flags = CRYPTO_ALG_TYPE_DIGEST | 225 + CRYPTO_ALG_NEED_FALLBACK, 218 226 .cra_blocksize = SHA1_HMAC_BLOCK_SIZE, 219 227 .cra_ctxsize = sizeof(struct padlock_sha_ctx), 220 228 .cra_module = THIS_MODULE, ··· 235 243 .cra_name = "sha256", 236 244 .cra_driver_name = "sha256-padlock", 237 245 .cra_priority = PADLOCK_CRA_PRIORITY, 238 - .cra_flags = CRYPTO_ALG_TYPE_DIGEST, 246 + .cra_flags = CRYPTO_ALG_TYPE_DIGEST | 247 + CRYPTO_ALG_NEED_FALLBACK, 239 248 .cra_blocksize = SHA256_HMAC_BLOCK_SIZE, 240 249 .cra_ctxsize = sizeof(struct padlock_sha_ctx), 241 250 .cra_module = THIS_MODULE, ··· 255 262 256 263 static void __init padlock_sha_check_fallbacks(void) 257 264 { 258 - struct crypto_tfm *tfm; 265 + if (!crypto_has_hash("sha1", 0, CRYPTO_ALG_ASYNC | 266 + CRYPTO_ALG_NEED_FALLBACK)) 267 + printk(KERN_WARNING PFX 268 + "Couldn't load fallback module for sha1.\n"); 259 269 260 - /* We'll try to allocate one TFM for each fallback 261 - * to test that the modules are available. */ 262 - tfm = crypto_alloc_tfm(sha1_fallback, 0); 263 - if (!tfm) { 264 - printk(KERN_WARNING PFX "Couldn't load fallback module for '%s'. Tried '%s'.\n", 265 - sha1_alg.cra_name, sha1_fallback); 266 - } else { 267 - printk(KERN_NOTICE PFX "Fallback for '%s' is driver '%s' (prio=%d)\n", sha1_alg.cra_name, 268 - crypto_tfm_alg_driver_name(tfm), crypto_tfm_alg_priority(tfm)); 269 - crypto_free_tfm(tfm); 270 - } 271 - 272 - tfm = crypto_alloc_tfm(sha256_fallback, 0); 273 - if (!tfm) { 274 - printk(KERN_WARNING PFX "Couldn't load fallback module for '%s'. Tried '%s'.\n", 275 - sha256_alg.cra_name, sha256_fallback); 276 - } else { 277 - printk(KERN_NOTICE PFX "Fallback for '%s' is driver '%s' (prio=%d)\n", sha256_alg.cra_name, 278 - crypto_tfm_alg_driver_name(tfm), crypto_tfm_alg_priority(tfm)); 279 - crypto_free_tfm(tfm); 280 - } 270 + if (!crypto_has_hash("sha256", 0, CRYPTO_ALG_ASYNC | 271 + CRYPTO_ALG_NEED_FALLBACK)) 272 + printk(KERN_WARNING PFX 273 + "Couldn't load fallback module for sha256.\n"); 281 274 } 282 275 283 276 static int __init padlock_init(void)
+6
include/linux/crypto.h
··· 43 43 #define CRYPTO_ALG_ASYNC 0x00000080 44 44 45 45 /* 46 + * Set this bit if and only if the algorithm requires another algorithm of 47 + * the same type to handle corner cases. 48 + */ 49 + #define CRYPTO_ALG_NEED_FALLBACK 0x00000100 50 + 51 + /* 46 52 * Transform masks and values (for crt_flags). 47 53 */ 48 54 #define CRYPTO_TFM_MODE_MASK 0x000000ff