Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[IPSEC]: Use HMAC template and hash interface

This patch converts IPsec to use the new HMAC template. The names of
existing simple digest algorithms may still be used to refer to their
HMAC composites.

The same structure can be used by other MACs such as AES-XCBC-MAC.

This patch also switches from the digest interface to hash.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>

+157 -96
+18 -11
include/net/ah.h
··· 15 15 int icv_full_len; 16 16 int icv_trunc_len; 17 17 18 - void (*icv)(struct ah_data*, 19 - struct sk_buff *skb, u8 *icv); 20 - 21 - struct crypto_tfm *tfm; 18 + struct crypto_hash *tfm; 22 19 }; 23 20 24 - static inline void 25 - ah_hmac_digest(struct ah_data *ahp, struct sk_buff *skb, u8 *auth_data) 21 + static inline int ah_mac_digest(struct ah_data *ahp, struct sk_buff *skb, 22 + u8 *auth_data) 26 23 { 27 - struct crypto_tfm *tfm = ahp->tfm; 24 + struct hash_desc desc; 25 + int err; 26 + 27 + desc.tfm = ahp->tfm; 28 + desc.flags = 0; 28 29 29 30 memset(auth_data, 0, ahp->icv_trunc_len); 30 - crypto_hmac_init(tfm, ahp->key, &ahp->key_len); 31 - skb_icv_walk(skb, tfm, 0, skb->len, crypto_hmac_update); 32 - crypto_hmac_final(tfm, ahp->key, &ahp->key_len, ahp->work_icv); 33 - memcpy(auth_data, ahp->work_icv, ahp->icv_trunc_len); 31 + err = crypto_hash_init(&desc); 32 + if (unlikely(err)) 33 + goto out; 34 + err = skb_icv_walk(skb, &desc, 0, skb->len, crypto_hash_update); 35 + if (unlikely(err)) 36 + goto out; 37 + err = crypto_hash_final(&desc, ahp->work_icv); 38 + 39 + out: 40 + return err; 34 41 } 35 42 36 43 #endif
+15 -11
include/net/esp.h
··· 35 35 void (*icv)(struct esp_data*, 36 36 struct sk_buff *skb, 37 37 int offset, int len, u8 *icv); 38 - struct crypto_tfm *tfm; 38 + struct crypto_hash *tfm; 39 39 } auth; 40 40 }; 41 41 ··· 43 43 extern int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer); 44 44 extern void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len); 45 45 46 - static inline void 47 - esp_hmac_digest(struct esp_data *esp, struct sk_buff *skb, int offset, 48 - int len, u8 *auth_data) 46 + static inline int esp_mac_digest(struct esp_data *esp, struct sk_buff *skb, 47 + int offset, int len) 49 48 { 50 - struct crypto_tfm *tfm = esp->auth.tfm; 51 - char *icv = esp->auth.work_icv; 49 + struct hash_desc desc; 50 + int err; 52 51 53 - memset(auth_data, 0, esp->auth.icv_trunc_len); 54 - crypto_hmac_init(tfm, esp->auth.key, &esp->auth.key_len); 55 - skb_icv_walk(skb, tfm, offset, len, crypto_hmac_update); 56 - crypto_hmac_final(tfm, esp->auth.key, &esp->auth.key_len, icv); 57 - memcpy(auth_data, icv, esp->auth.icv_trunc_len); 52 + desc.tfm = esp->auth.tfm; 53 + desc.flags = 0; 54 + 55 + err = crypto_hash_init(&desc); 56 + if (unlikely(err)) 57 + return err; 58 + err = skb_icv_walk(skb, &desc, offset, len, crypto_hash_update); 59 + if (unlikely(err)) 60 + return err; 61 + return crypto_hash_final(&desc, esp->auth.work_icv); 58 62 } 59 63 60 64 #endif
+5 -4
include/net/xfrm.h
··· 984 984 extern struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe); 985 985 extern struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe); 986 986 987 - struct crypto_tfm; 987 + struct hash_desc; 988 988 struct scatterlist; 989 - typedef void (icv_update_fn_t)(struct crypto_tfm *, struct scatterlist *, unsigned int); 989 + typedef int (icv_update_fn_t)(struct hash_desc *, struct scatterlist *, 990 + unsigned int); 990 991 991 - extern void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm, 992 - int offset, int len, icv_update_fn_t icv_update); 992 + extern int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *tfm, 993 + int offset, int len, icv_update_fn_t icv_update); 993 994 994 995 static inline int xfrm_addr_cmp(xfrm_address_t *a, xfrm_address_t *b, 995 996 int family)
+24 -12
net/ipv4/ah4.c
··· 1 + #include <linux/err.h> 1 2 #include <linux/module.h> 2 3 #include <net/ip.h> 3 4 #include <net/xfrm.h> ··· 98 97 ah->spi = x->id.spi; 99 98 ah->seq_no = htonl(++x->replay.oseq); 100 99 xfrm_aevent_doreplay(x); 101 - ahp->icv(ahp, skb, ah->auth_data); 100 + err = ah_mac_digest(ahp, skb, ah->auth_data); 101 + if (err) 102 + goto error; 103 + memcpy(ah->auth_data, ahp->work_icv, ahp->icv_trunc_len); 102 104 103 105 top_iph->tos = iph->tos; 104 106 top_iph->ttl = iph->ttl; ··· 123 119 { 124 120 int ah_hlen; 125 121 int ihl; 122 + int err = -EINVAL; 126 123 struct iphdr *iph; 127 124 struct ip_auth_hdr *ah; 128 125 struct ah_data *ahp; ··· 171 166 172 167 memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len); 173 168 skb_push(skb, ihl); 174 - ahp->icv(ahp, skb, ah->auth_data); 175 - if (memcmp(ah->auth_data, auth_data, ahp->icv_trunc_len)) { 169 + err = ah_mac_digest(ahp, skb, ah->auth_data); 170 + if (err) 171 + goto out; 172 + err = -EINVAL; 173 + if (memcmp(ahp->work_icv, auth_data, ahp->icv_trunc_len)) { 176 174 x->stats.integrity_failed++; 177 175 goto out; 178 176 } ··· 187 179 return 0; 188 180 189 181 out: 190 - return -EINVAL; 182 + return err; 191 183 } 192 184 193 185 static void ah4_err(struct sk_buff *skb, u32 info) ··· 212 204 { 213 205 struct ah_data *ahp = NULL; 214 206 struct xfrm_algo_desc *aalg_desc; 207 + struct crypto_hash *tfm; 215 208 216 209 if (!x->aalg) 217 210 goto error; ··· 230 221 231 222 ahp->key = x->aalg->alg_key; 232 223 ahp->key_len = (x->aalg->alg_key_len+7)/8; 233 - ahp->tfm = crypto_alloc_tfm(x->aalg->alg_name, 0); 234 - if (!ahp->tfm) 224 + tfm = crypto_alloc_hash(x->aalg->alg_name, 0, CRYPTO_ALG_ASYNC); 225 + if (IS_ERR(tfm)) 235 226 goto error; 236 - ahp->icv = ah_hmac_digest; 227 + 228 + ahp->tfm = tfm; 229 + if (crypto_hash_setkey(tfm, ahp->key, ahp->key_len)) 230 + goto error; 237 231 238 232 /* 239 233 * Lookup the algorithm description maintained by xfrm_algo, 240 234 * verify crypto transform properties, and store information 241 235 * we need for AH processing. This lookup cannot fail here 242 - * after a successful crypto_alloc_tfm(). 236 + * after a successful crypto_alloc_hash(). 243 237 */ 244 238 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); 245 239 BUG_ON(!aalg_desc); 246 240 247 241 if (aalg_desc->uinfo.auth.icv_fullbits/8 != 248 - crypto_tfm_alg_digestsize(ahp->tfm)) { 242 + crypto_hash_digestsize(tfm)) { 249 243 printk(KERN_INFO "AH: %s digestsize %u != %hu\n", 250 - x->aalg->alg_name, crypto_tfm_alg_digestsize(ahp->tfm), 244 + x->aalg->alg_name, crypto_hash_digestsize(tfm), 251 245 aalg_desc->uinfo.auth.icv_fullbits/8); 252 246 goto error; 253 247 } ··· 274 262 error: 275 263 if (ahp) { 276 264 kfree(ahp->work_icv); 277 - crypto_free_tfm(ahp->tfm); 265 + crypto_free_hash(ahp->tfm); 278 266 kfree(ahp); 279 267 } 280 268 return -EINVAL; ··· 289 277 290 278 kfree(ahp->work_icv); 291 279 ahp->work_icv = NULL; 292 - crypto_free_tfm(ahp->tfm); 280 + crypto_free_hash(ahp->tfm); 293 281 ahp->tfm = NULL; 294 282 kfree(ahp); 295 283 }
+21 -15
net/ipv4/esp4.c
··· 121 121 } 122 122 123 123 if (esp->auth.icv_full_len) { 124 - esp->auth.icv(esp, skb, (u8*)esph-skb->data, 125 - sizeof(struct ip_esp_hdr) + esp->conf.ivlen+clen, trailer->tail); 126 - pskb_put(skb, trailer, alen); 124 + err = esp_mac_digest(esp, skb, (u8 *)esph - skb->data, 125 + sizeof(*esph) + esp->conf.ivlen + clen); 126 + memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen); 127 127 } 128 128 129 129 ip_send_check(top_iph); ··· 163 163 164 164 /* If integrity check is required, do this. */ 165 165 if (esp->auth.icv_full_len) { 166 - u8 sum[esp->auth.icv_full_len]; 167 - u8 sum1[alen]; 168 - 169 - esp->auth.icv(esp, skb, 0, skb->len-alen, sum); 166 + u8 sum[alen]; 170 167 171 - if (skb_copy_bits(skb, skb->len-alen, sum1, alen)) 168 + err = esp_mac_digest(esp, skb, 0, skb->len - alen); 169 + if (err) 170 + goto out; 171 + 172 + if (skb_copy_bits(skb, skb->len - alen, sum, alen)) 172 173 BUG(); 173 174 174 - if (unlikely(memcmp(sum, sum1, alen))) { 175 + if (unlikely(memcmp(esp->auth.work_icv, sum, alen))) { 175 176 x->stats.integrity_failed++; 176 177 goto out; 177 178 } ··· 308 307 esp->conf.tfm = NULL; 309 308 kfree(esp->conf.ivec); 310 309 esp->conf.ivec = NULL; 311 - crypto_free_tfm(esp->auth.tfm); 310 + crypto_free_hash(esp->auth.tfm); 312 311 esp->auth.tfm = NULL; 313 312 kfree(esp->auth.work_icv); 314 313 esp->auth.work_icv = NULL; ··· 334 333 335 334 if (x->aalg) { 336 335 struct xfrm_algo_desc *aalg_desc; 336 + struct crypto_hash *hash; 337 337 338 338 esp->auth.key = x->aalg->alg_key; 339 339 esp->auth.key_len = (x->aalg->alg_key_len+7)/8; 340 - esp->auth.tfm = crypto_alloc_tfm(x->aalg->alg_name, 0); 341 - if (esp->auth.tfm == NULL) 340 + hash = crypto_alloc_hash(x->aalg->alg_name, 0, 341 + CRYPTO_ALG_ASYNC); 342 + if (IS_ERR(hash)) 342 343 goto error; 343 - esp->auth.icv = esp_hmac_digest; 344 + 345 + esp->auth.tfm = hash; 346 + if (crypto_hash_setkey(hash, esp->auth.key, esp->auth.key_len)) 347 + goto error; 344 348 345 349 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); 346 350 BUG_ON(!aalg_desc); 347 351 348 352 if (aalg_desc->uinfo.auth.icv_fullbits/8 != 349 - crypto_tfm_alg_digestsize(esp->auth.tfm)) { 353 + crypto_hash_digestsize(hash)) { 350 354 NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", 351 355 x->aalg->alg_name, 352 - crypto_tfm_alg_digestsize(esp->auth.tfm), 356 + crypto_hash_digestsize(hash), 353 357 aalg_desc->uinfo.auth.icv_fullbits/8); 354 358 goto error; 355 359 }
+23 -12
net/ipv6/ah6.c
··· 213 213 ah->spi = x->id.spi; 214 214 ah->seq_no = htonl(++x->replay.oseq); 215 215 xfrm_aevent_doreplay(x); 216 - ahp->icv(ahp, skb, ah->auth_data); 216 + err = ah_mac_digest(ahp, skb, ah->auth_data); 217 + if (err) 218 + goto error_free_iph; 219 + memcpy(ah->auth_data, ahp->work_icv, ahp->icv_trunc_len); 217 220 218 221 err = 0; 219 222 ··· 254 251 u16 hdr_len; 255 252 u16 ah_hlen; 256 253 int nexthdr; 254 + int err = -EINVAL; 257 255 258 256 if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr))) 259 257 goto out; ··· 296 292 memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len); 297 293 memset(ah->auth_data, 0, ahp->icv_trunc_len); 298 294 skb_push(skb, hdr_len); 299 - ahp->icv(ahp, skb, ah->auth_data); 300 - if (memcmp(ah->auth_data, auth_data, ahp->icv_trunc_len)) { 295 + err = ah_mac_digest(ahp, skb, ah->auth_data); 296 + if (err) 297 + goto free_out; 298 + err = -EINVAL; 299 + if (memcmp(ahp->work_icv, auth_data, ahp->icv_trunc_len)) { 301 300 LIMIT_NETDEBUG(KERN_WARNING "ipsec ah authentication error\n"); 302 301 x->stats.integrity_failed++; 303 302 goto free_out; ··· 317 310 free_out: 318 311 kfree(tmp_hdr); 319 312 out: 320 - return -EINVAL; 313 + return err; 321 314 } 322 315 323 316 static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, ··· 345 338 { 346 339 struct ah_data *ahp = NULL; 347 340 struct xfrm_algo_desc *aalg_desc; 341 + struct crypto_hash *tfm; 348 342 349 343 if (!x->aalg) 350 344 goto error; ··· 363 355 364 356 ahp->key = x->aalg->alg_key; 365 357 ahp->key_len = (x->aalg->alg_key_len+7)/8; 366 - ahp->tfm = crypto_alloc_tfm(x->aalg->alg_name, 0); 367 - if (!ahp->tfm) 358 + tfm = crypto_alloc_hash(x->aalg->alg_name, 0, CRYPTO_ALG_ASYNC); 359 + if (IS_ERR(tfm)) 368 360 goto error; 369 - ahp->icv = ah_hmac_digest; 361 + 362 + ahp->tfm = tfm; 363 + if (crypto_hash_setkey(tfm, ahp->key, ahp->key_len)) 364 + goto error; 370 365 371 366 /* 372 367 * Lookup the algorithm description maintained by xfrm_algo, 373 368 * verify crypto transform properties, and store information 374 369 * we need for AH processing. This lookup cannot fail here 375 - * after a successful crypto_alloc_tfm(). 370 + * after a successful crypto_alloc_hash(). 376 371 */ 377 372 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); 378 373 BUG_ON(!aalg_desc); 379 374 380 375 if (aalg_desc->uinfo.auth.icv_fullbits/8 != 381 - crypto_tfm_alg_digestsize(ahp->tfm)) { 376 + crypto_hash_digestsize(tfm)) { 382 377 printk(KERN_INFO "AH: %s digestsize %u != %hu\n", 383 - x->aalg->alg_name, crypto_tfm_alg_digestsize(ahp->tfm), 378 + x->aalg->alg_name, crypto_hash_digestsize(tfm), 384 379 aalg_desc->uinfo.auth.icv_fullbits/8); 385 380 goto error; 386 381 } ··· 407 396 error: 408 397 if (ahp) { 409 398 kfree(ahp->work_icv); 410 - crypto_free_tfm(ahp->tfm); 399 + crypto_free_hash(ahp->tfm); 411 400 kfree(ahp); 412 401 } 413 402 return -EINVAL; ··· 422 411 423 412 kfree(ahp->work_icv); 424 413 ahp->work_icv = NULL; 425 - crypto_free_tfm(ahp->tfm); 414 + crypto_free_hash(ahp->tfm); 426 415 ahp->tfm = NULL; 427 416 kfree(ahp); 428 417 }
+24 -18
net/ipv6/esp6.c
··· 125 125 } 126 126 127 127 if (esp->auth.icv_full_len) { 128 - esp->auth.icv(esp, skb, (u8*)esph-skb->data, 129 - sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen+clen, trailer->tail); 130 - pskb_put(skb, trailer, alen); 128 + err = esp_mac_digest(esp, skb, (u8 *)esph - skb->data, 129 + sizeof(*esph) + esp->conf.ivlen + clen); 130 + memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen); 131 131 } 132 132 133 133 error: ··· 162 162 163 163 /* If integrity check is required, do this. */ 164 164 if (esp->auth.icv_full_len) { 165 - u8 sum[esp->auth.icv_full_len]; 166 - u8 sum1[alen]; 165 + u8 sum[alen]; 167 166 168 - esp->auth.icv(esp, skb, 0, skb->len-alen, sum); 167 + ret = esp_mac_digest(esp, skb, 0, skb->len - alen); 168 + if (ret) 169 + goto out; 169 170 170 - if (skb_copy_bits(skb, skb->len-alen, sum1, alen)) 171 + if (skb_copy_bits(skb, skb->len - alen, sum, alen)) 171 172 BUG(); 172 173 173 - if (unlikely(memcmp(sum, sum1, alen))) { 174 + if (unlikely(memcmp(esp->auth.work_icv, sum, alen))) { 174 175 x->stats.integrity_failed++; 175 176 ret = -EINVAL; 176 177 goto out; ··· 280 279 esp->conf.tfm = NULL; 281 280 kfree(esp->conf.ivec); 282 281 esp->conf.ivec = NULL; 283 - crypto_free_tfm(esp->auth.tfm); 282 + crypto_free_hash(esp->auth.tfm); 284 283 esp->auth.tfm = NULL; 285 284 kfree(esp->auth.work_icv); 286 285 esp->auth.work_icv = NULL; ··· 309 308 310 309 if (x->aalg) { 311 310 struct xfrm_algo_desc *aalg_desc; 311 + struct crypto_hash *hash; 312 312 313 313 esp->auth.key = x->aalg->alg_key; 314 314 esp->auth.key_len = (x->aalg->alg_key_len+7)/8; 315 - esp->auth.tfm = crypto_alloc_tfm(x->aalg->alg_name, 0); 316 - if (esp->auth.tfm == NULL) 315 + hash = crypto_alloc_hash(x->aalg->alg_name, 0, 316 + CRYPTO_ALG_ASYNC); 317 + if (IS_ERR(hash)) 317 318 goto error; 318 - esp->auth.icv = esp_hmac_digest; 319 + 320 + esp->auth.tfm = hash; 321 + if (crypto_hash_setkey(hash, esp->auth.key, esp->auth.key_len)) 322 + goto error; 319 323 320 324 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); 321 325 BUG_ON(!aalg_desc); 322 326 323 327 if (aalg_desc->uinfo.auth.icv_fullbits/8 != 324 - crypto_tfm_alg_digestsize(esp->auth.tfm)) { 325 - printk(KERN_INFO "ESP: %s digestsize %u != %hu\n", 326 - x->aalg->alg_name, 327 - crypto_tfm_alg_digestsize(esp->auth.tfm), 328 - aalg_desc->uinfo.auth.icv_fullbits/8); 329 - goto error; 328 + crypto_hash_digestsize(hash)) { 329 + NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", 330 + x->aalg->alg_name, 331 + crypto_hash_digestsize(hash), 332 + aalg_desc->uinfo.auth.icv_fullbits/8); 333 + goto error; 330 334 } 331 335 332 336 esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
+27 -13
net/xfrm/xfrm_algo.c
··· 30 30 */ 31 31 static struct xfrm_algo_desc aalg_list[] = { 32 32 { 33 - .name = "digest_null", 33 + .name = "hmac(digest_null)", 34 + .compat = "digest_null", 34 35 35 36 .uinfo = { 36 37 .auth = { ··· 48 47 } 49 48 }, 50 49 { 51 - .name = "md5", 50 + .name = "hmac(md5)", 51 + .compat = "md5", 52 52 53 53 .uinfo = { 54 54 .auth = { ··· 66 64 } 67 65 }, 68 66 { 69 - .name = "sha1", 67 + .name = "hmac(sha1)", 68 + .compat = "sha1", 70 69 71 70 .uinfo = { 72 71 .auth = { ··· 84 81 } 85 82 }, 86 83 { 87 - .name = "sha256", 84 + .name = "hmac(sha256)", 85 + .compat = "sha256", 88 86 89 87 .uinfo = { 90 88 .auth = { ··· 102 98 } 103 99 }, 104 100 { 105 - .name = "ripemd160", 101 + .name = "hmac(ripemd160)", 102 + .compat = "ripemd160", 106 103 107 104 .uinfo = { 108 105 .auth = { ··· 485 480 486 481 /* Move to common area: it is shared with AH. */ 487 482 488 - void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm, 489 - int offset, int len, icv_update_fn_t icv_update) 483 + int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc, 484 + int offset, int len, icv_update_fn_t icv_update) 490 485 { 491 486 int start = skb_headlen(skb); 492 487 int i, copy = start - offset; 488 + int err; 493 489 struct scatterlist sg; 494 490 495 491 /* Checksum header. */ ··· 502 496 sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE; 503 497 sg.length = copy; 504 498 505 - icv_update(tfm, &sg, 1); 499 + err = icv_update(desc, &sg, copy); 500 + if (unlikely(err)) 501 + return err; 506 502 507 503 if ((len -= copy) == 0) 508 - return; 504 + return 0; 509 505 offset += copy; 510 506 } 511 507 ··· 527 519 sg.offset = frag->page_offset + offset-start; 528 520 sg.length = copy; 529 521 530 - icv_update(tfm, &sg, 1); 522 + err = icv_update(desc, &sg, copy); 523 + if (unlikely(err)) 524 + return err; 531 525 532 526 if (!(len -= copy)) 533 - return; 527 + return 0; 534 528 offset += copy; 535 529 } 536 530 start = end; ··· 550 540 if ((copy = end - offset) > 0) { 551 541 if (copy > len) 552 542 copy = len; 553 - skb_icv_walk(list, tfm, offset-start, copy, icv_update); 543 + err = skb_icv_walk(list, desc, offset-start, 544 + copy, icv_update); 545 + if (unlikely(err)) 546 + return err; 554 547 if ((len -= copy) == 0) 555 - return; 548 + return 0; 556 549 offset += copy; 557 550 } 558 551 start = end; 559 552 } 560 553 } 561 554 BUG_ON(len); 555 + return 0; 562 556 } 563 557 EXPORT_SYMBOL_GPL(skb_icv_walk); 564 558