Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[CRYPTO] users: Use crypto_comp and crypto_has_*

This patch converts all users to use the new crypto_comp type and the
crypto_has_* functions.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

+60 -45
+4 -4
crypto/tcrypt.c
··· 749 749 { 750 750 unsigned int i; 751 751 char result[COMP_BUF_SIZE]; 752 - struct crypto_tfm *tfm; 752 + struct crypto_comp *tfm; 753 753 struct comp_testvec *tv; 754 754 unsigned int tsize; 755 755 ··· 821 821 ilen, dlen); 822 822 } 823 823 out: 824 - crypto_free_tfm(tfm); 824 + crypto_free_comp(tfm); 825 825 } 826 826 827 827 static void test_available(void) ··· 830 830 831 831 while (*name) { 832 832 printk("alg %s ", *name); 833 - printk((crypto_alg_available(*name, 0)) ? 834 - "found\n" : "not found\n"); 833 + printk(crypto_has_alg(*name, 0, CRYPTO_ALG_ASYNC) ? 834 + "found\n" : "not found\n"); 835 835 name++; 836 836 } 837 837 }
+3 -3
drivers/crypto/padlock.c
··· 26 26 { 27 27 int success = 0; 28 28 29 - if (crypto_alg_available("aes-padlock", 0)) 29 + if (crypto_has_cipher("aes-padlock", 0, 0)) 30 30 success++; 31 31 32 - if (crypto_alg_available("sha1-padlock", 0)) 32 + if (crypto_has_hash("sha1-padlock", 0, 0)) 33 33 success++; 34 34 35 - if (crypto_alg_available("sha256-padlock", 0)) 35 + if (crypto_has_hash("sha256-padlock", 0, 0)) 36 36 success++; 37 37 38 38 if (!success) {
+2 -2
drivers/net/ppp_mppe.c
··· 710 710 static int __init ppp_mppe_init(void) 711 711 { 712 712 int answer; 713 - if (!(crypto_alg_available("ecb(arc4)", 0) && 714 - crypto_alg_available("sha1", 0))) 713 + if (!(crypto_has_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC) && 714 + crypto_has_hash("sha1", 0, CRYPTO_ALG_ASYNC))) 715 715 return -ENODEV; 716 716 717 717 sha_pad = kmalloc(sizeof(struct sha_pad), GFP_KERNEL);
+5
include/linux/crypto.h
··· 928 928 return crypto_has_alg(alg_name, type, mask); 929 929 } 930 930 931 + static inline const char *crypto_comp_name(struct crypto_comp *tfm) 932 + { 933 + return crypto_tfm_alg_name(crypto_comp_tfm(tfm)); 934 + } 935 + 931 936 static inline struct compress_tfm *crypto_comp_crt(struct crypto_comp *tfm) 932 937 { 933 938 return &crypto_comp_tfm(tfm)->crt_compress;
+2 -3
include/net/ipcomp.h
··· 1 1 #ifndef _NET_IPCOMP_H 2 2 #define _NET_IPCOMP_H 3 3 4 + #include <linux/crypto.h> 4 5 #include <linux/types.h> 5 6 6 7 #define IPCOMP_SCRATCH_SIZE 65400 7 8 8 - struct crypto_tfm; 9 - 10 9 struct ipcomp_data { 11 10 u16 threshold; 12 - struct crypto_tfm **tfms; 11 + struct crypto_comp **tfms; 13 12 }; 14 13 15 14 #endif
+13 -12
net/ipv4/ipcomp.c
··· 32 32 33 33 struct ipcomp_tfms { 34 34 struct list_head list; 35 - struct crypto_tfm **tfms; 35 + struct crypto_comp **tfms; 36 36 int users; 37 37 }; 38 38 ··· 46 46 int err, plen, dlen; 47 47 struct ipcomp_data *ipcd = x->data; 48 48 u8 *start, *scratch; 49 - struct crypto_tfm *tfm; 49 + struct crypto_comp *tfm; 50 50 int cpu; 51 51 52 52 plen = skb->len; ··· 107 107 struct iphdr *iph = skb->nh.iph; 108 108 struct ipcomp_data *ipcd = x->data; 109 109 u8 *start, *scratch; 110 - struct crypto_tfm *tfm; 110 + struct crypto_comp *tfm; 111 111 int cpu; 112 112 113 113 ihlen = iph->ihl * 4; ··· 302 302 return scratches; 303 303 } 304 304 305 - static void ipcomp_free_tfms(struct crypto_tfm **tfms) 305 + static void ipcomp_free_tfms(struct crypto_comp **tfms) 306 306 { 307 307 struct ipcomp_tfms *pos; 308 308 int cpu; ··· 324 324 return; 325 325 326 326 for_each_possible_cpu(cpu) { 327 - struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu); 328 - crypto_free_tfm(tfm); 327 + struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu); 328 + crypto_free_comp(tfm); 329 329 } 330 330 free_percpu(tfms); 331 331 } 332 332 333 - static struct crypto_tfm **ipcomp_alloc_tfms(const char *alg_name) 333 + static struct crypto_comp **ipcomp_alloc_tfms(const char *alg_name) 334 334 { 335 335 struct ipcomp_tfms *pos; 336 - struct crypto_tfm **tfms; 336 + struct crypto_comp **tfms; 337 337 int cpu; 338 338 339 339 /* This can be any valid CPU ID so we don't need locking. */ 340 340 cpu = raw_smp_processor_id(); 341 341 342 342 list_for_each_entry(pos, &ipcomp_tfms_list, list) { 343 - struct crypto_tfm *tfm; 343 + struct crypto_comp *tfm; 344 344 345 345 tfms = pos->tfms; 346 346 tfm = *per_cpu_ptr(tfms, cpu); 347 347 348 - if (!strcmp(crypto_tfm_alg_name(tfm), alg_name)) { 348 + if (!strcmp(crypto_comp_name(tfm), alg_name)) { 349 349 pos->users++; 350 350 return tfms; 351 351 } ··· 359 359 INIT_LIST_HEAD(&pos->list); 360 360 list_add(&pos->list, &ipcomp_tfms_list); 361 361 362 - pos->tfms = tfms = alloc_percpu(struct crypto_tfm *); 362 + pos->tfms = tfms = alloc_percpu(struct crypto_comp *); 363 363 if (!tfms) 364 364 goto error; 365 365 366 366 for_each_possible_cpu(cpu) { 367 - struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0); 367 + struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0, 368 + CRYPTO_ALG_ASYNC); 368 369 if (!tfm) 369 370 goto error; 370 371 *per_cpu_ptr(tfms, cpu) = tfm;
+13 -12
net/ipv6/ipcomp6.c
··· 53 53 54 54 struct ipcomp6_tfms { 55 55 struct list_head list; 56 - struct crypto_tfm **tfms; 56 + struct crypto_comp **tfms; 57 57 int users; 58 58 }; 59 59 ··· 70 70 int plen, dlen; 71 71 struct ipcomp_data *ipcd = x->data; 72 72 u8 *start, *scratch; 73 - struct crypto_tfm *tfm; 73 + struct crypto_comp *tfm; 74 74 int cpu; 75 75 76 76 if (skb_linearize_cow(skb)) ··· 129 129 struct ipcomp_data *ipcd = x->data; 130 130 int plen, dlen; 131 131 u8 *start, *scratch; 132 - struct crypto_tfm *tfm; 132 + struct crypto_comp *tfm; 133 133 int cpu; 134 134 135 135 hdr_len = skb->h.raw - skb->data; ··· 301 301 return scratches; 302 302 } 303 303 304 - static void ipcomp6_free_tfms(struct crypto_tfm **tfms) 304 + static void ipcomp6_free_tfms(struct crypto_comp **tfms) 305 305 { 306 306 struct ipcomp6_tfms *pos; 307 307 int cpu; ··· 323 323 return; 324 324 325 325 for_each_possible_cpu(cpu) { 326 - struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu); 327 - crypto_free_tfm(tfm); 326 + struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu); 327 + crypto_free_comp(tfm); 328 328 } 329 329 free_percpu(tfms); 330 330 } 331 331 332 - static struct crypto_tfm **ipcomp6_alloc_tfms(const char *alg_name) 332 + static struct crypto_comp **ipcomp6_alloc_tfms(const char *alg_name) 333 333 { 334 334 struct ipcomp6_tfms *pos; 335 - struct crypto_tfm **tfms; 335 + struct crypto_comp **tfms; 336 336 int cpu; 337 337 338 338 /* This can be any valid CPU ID so we don't need locking. */ 339 339 cpu = raw_smp_processor_id(); 340 340 341 341 list_for_each_entry(pos, &ipcomp6_tfms_list, list) { 342 - struct crypto_tfm *tfm; 342 + struct crypto_comp *tfm; 343 343 344 344 tfms = pos->tfms; 345 345 tfm = *per_cpu_ptr(tfms, cpu); 346 346 347 - if (!strcmp(crypto_tfm_alg_name(tfm), alg_name)) { 347 + if (!strcmp(crypto_comp_name(tfm), alg_name)) { 348 348 pos->users++; 349 349 return tfms; 350 350 } ··· 358 358 INIT_LIST_HEAD(&pos->list); 359 359 list_add(&pos->list, &ipcomp6_tfms_list); 360 360 361 - pos->tfms = tfms = alloc_percpu(struct crypto_tfm *); 361 + pos->tfms = tfms = alloc_percpu(struct crypto_comp *); 362 362 if (!tfms) 363 363 goto error; 364 364 365 365 for_each_possible_cpu(cpu) { 366 - struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0); 366 + struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0, 367 + CRYPTO_ALG_ASYNC); 367 368 if (!tfm) 368 369 goto error; 369 370 *per_cpu_ptr(tfms, cpu) = tfm;
+18 -9
net/xfrm/xfrm_algo.c
··· 363 363 EXPORT_SYMBOL_GPL(xfrm_calg_get_byid); 364 364 365 365 static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list, 366 - int entries, char *name, 367 - int probe) 366 + int entries, u32 type, u32 mask, 367 + char *name, int probe) 368 368 { 369 369 int i, status; 370 370 ··· 382 382 if (!probe) 383 383 break; 384 384 385 - status = crypto_alg_available(name, 0); 385 + status = crypto_has_alg(name, type, mask | CRYPTO_ALG_ASYNC); 386 386 if (!status) 387 387 break; 388 388 ··· 394 394 395 395 struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe) 396 396 { 397 - return xfrm_get_byname(aalg_list, aalg_entries(), name, probe); 397 + return xfrm_get_byname(aalg_list, aalg_entries(), 398 + CRYPTO_ALG_TYPE_HASH, CRYPTO_ALG_TYPE_HASH_MASK, 399 + name, probe); 398 400 } 399 401 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byname); 400 402 401 403 struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe) 402 404 { 403 - return xfrm_get_byname(ealg_list, ealg_entries(), name, probe); 405 + return xfrm_get_byname(ealg_list, ealg_entries(), 406 + CRYPTO_ALG_TYPE_BLKCIPHER, CRYPTO_ALG_TYPE_MASK, 407 + name, probe); 404 408 } 405 409 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byname); 406 410 407 411 struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe) 408 412 { 409 - return xfrm_get_byname(calg_list, calg_entries(), name, probe); 413 + return xfrm_get_byname(calg_list, calg_entries(), 414 + CRYPTO_ALG_TYPE_COMPRESS, CRYPTO_ALG_TYPE_MASK, 415 + name, probe); 410 416 } 411 417 EXPORT_SYMBOL_GPL(xfrm_calg_get_byname); 412 418 ··· 447 441 BUG_ON(in_softirq()); 448 442 449 443 for (i = 0; i < aalg_entries(); i++) { 450 - status = crypto_alg_available(aalg_list[i].name, 0); 444 + status = crypto_has_hash(aalg_list[i].name, 0, 445 + CRYPTO_ALG_ASYNC); 451 446 if (aalg_list[i].available != status) 452 447 aalg_list[i].available = status; 453 448 } 454 449 455 450 for (i = 0; i < ealg_entries(); i++) { 456 - status = crypto_alg_available(ealg_list[i].name, 0); 451 + status = crypto_has_blkcipher(ealg_list[i].name, 0, 452 + CRYPTO_ALG_ASYNC); 457 453 if (ealg_list[i].available != status) 458 454 ealg_list[i].available = status; 459 455 } 460 456 461 457 for (i = 0; i < calg_entries(); i++) { 462 - status = crypto_alg_available(calg_list[i].name, 0); 458 + status = crypto_has_comp(calg_list[i].name, 0, 459 + CRYPTO_ALG_ASYNC); 463 460 if (calg_list[i].available != status) 464 461 calg_list[i].available = status; 465 462 }