Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next

Steffen Klassert says:

====================
pull request (net-next): ipsec-next 2014-09-25

1) Remove useless hash_resize_mutex in xfrm_hash_resize().
This mutex is used only there, but xfrm_hash_resize()
can't be called concurrently at all. From Ying Xue.

2) Extend policy hashing to prefixed policies based on
prefix lenght thresholds. From Christophe Gouault.

3) Make the policy hash table thresholds configurable
via netlink. From Christophe Gouault.

4) Remove the maximum authentication length for AH.
This was needed to limit stack usage. We switched
already to allocate space, so no need to keep the
limit. From Herbert Xu.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+303 -38
-3
include/net/ah.h
··· 3 3 4 4 #include <linux/skbuff.h> 5 5 6 - /* This is the maximum truncated ICV length that we know of. */ 7 - #define MAX_AH_AUTH_LEN 64 8 - 9 6 struct crypto_ahash; 10 7 11 8 struct ah_data {
+14
include/net/netns/xfrm.h
··· 13 13 struct xfrm_policy_hash { 14 14 struct hlist_head *table; 15 15 unsigned int hmask; 16 + u8 dbits4; 17 + u8 sbits4; 18 + u8 dbits6; 19 + u8 sbits6; 20 + }; 21 + 22 + struct xfrm_policy_hthresh { 23 + struct work_struct work; 24 + seqlock_t lock; 25 + u8 lbits4; 26 + u8 rbits4; 27 + u8 lbits6; 28 + u8 rbits6; 16 29 }; 17 30 18 31 struct netns_xfrm { ··· 54 41 struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX * 2]; 55 42 unsigned int policy_count[XFRM_POLICY_MAX * 2]; 56 43 struct work_struct policy_hash_work; 44 + struct xfrm_policy_hthresh policy_hthresh; 57 45 58 46 59 47 struct sock *nlsk;
+1
include/net/xfrm.h
··· 1591 1591 struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir, 1592 1592 u32 id, int delete, int *err); 1593 1593 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid); 1594 + void xfrm_policy_hash_rebuild(struct net *net); 1594 1595 u32 xfrm_get_acqseq(void); 1595 1596 int verify_spi_info(u8 proto, u32 min, u32 max); 1596 1597 int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
+7
include/uapi/linux/xfrm.h
··· 328 328 XFRMA_SPD_UNSPEC, 329 329 XFRMA_SPD_INFO, 330 330 XFRMA_SPD_HINFO, 331 + XFRMA_SPD_IPV4_HTHRESH, 332 + XFRMA_SPD_IPV6_HTHRESH, 331 333 __XFRMA_SPD_MAX 332 334 333 335 #define XFRMA_SPD_MAX (__XFRMA_SPD_MAX - 1) ··· 347 345 struct xfrmu_spdhinfo { 348 346 __u32 spdhcnt; 349 347 __u32 spdhmcnt; 348 + }; 349 + 350 + struct xfrmu_spdhthresh { 351 + __u8 lbits; 352 + __u8 rbits; 350 353 }; 351 354 352 355 struct xfrm_usersa_info {
-2
net/ipv4/ah4.c
··· 505 505 ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; 506 506 ahp->icv_trunc_len = x->aalg->alg_trunc_len/8; 507 507 508 - BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN); 509 - 510 508 if (x->props.flags & XFRM_STATE_ALIGN4) 511 509 x->props.header_len = XFRM_ALIGN4(sizeof(struct ip_auth_hdr) + 512 510 ahp->icv_trunc_len);
-2
net/ipv6/ah6.c
··· 713 713 ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; 714 714 ahp->icv_trunc_len = x->aalg->alg_trunc_len/8; 715 715 716 - BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN); 717 - 718 716 x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + 719 717 ahp->icv_trunc_len); 720 718 switch (x->props.mode) {
+66 -10
net/xfrm/xfrm_hash.h
··· 3 3 4 4 #include <linux/xfrm.h> 5 5 #include <linux/socket.h> 6 + #include <linux/jhash.h> 6 7 7 8 static inline unsigned int __xfrm4_addr_hash(const xfrm_address_t *addr) 8 9 { ··· 27 26 { 28 27 return ntohl(daddr->a6[2] ^ daddr->a6[3] ^ 29 28 saddr->a6[2] ^ saddr->a6[3]); 29 + } 30 + 31 + static inline u32 __bits2mask32(__u8 bits) 32 + { 33 + u32 mask32 = 0xffffffff; 34 + 35 + if (bits == 0) 36 + mask32 = 0; 37 + else if (bits < 32) 38 + mask32 <<= (32 - bits); 39 + 40 + return mask32; 41 + } 42 + 43 + static inline unsigned int __xfrm4_dpref_spref_hash(const xfrm_address_t *daddr, 44 + const xfrm_address_t *saddr, 45 + __u8 dbits, 46 + __u8 sbits) 47 + { 48 + return jhash_2words(ntohl(daddr->a4) & __bits2mask32(dbits), 49 + ntohl(saddr->a4) & __bits2mask32(sbits), 50 + 0); 51 + } 52 + 53 + static inline unsigned int __xfrm6_pref_hash(const xfrm_address_t *addr, 54 + __u8 prefixlen) 55 + { 56 + int pdw; 57 + int pbi; 58 + u32 initval = 0; 59 + 60 + pdw = prefixlen >> 5; /* num of whole u32 in prefix */ 61 + pbi = prefixlen & 0x1f; /* num of bits in incomplete u32 in prefix */ 62 + 63 + if (pbi) { 64 + __be32 mask; 65 + 66 + mask = htonl((0xffffffff) << (32 - pbi)); 67 + 68 + initval = (__force u32)(addr->a6[pdw] & mask); 69 + } 70 + 71 + return jhash2((__force u32 *)addr->a6, pdw, initval); 72 + } 73 + 74 + static inline unsigned int __xfrm6_dpref_spref_hash(const xfrm_address_t *daddr, 75 + const xfrm_address_t *saddr, 76 + __u8 dbits, 77 + __u8 sbits) 78 + { 79 + return __xfrm6_pref_hash(daddr, dbits) ^ 80 + __xfrm6_pref_hash(saddr, sbits); 30 81 } 31 82 32 83 static inline unsigned int __xfrm_dst_hash(const xfrm_address_t *daddr, ··· 137 84 } 138 85 139 86 static inline unsigned int __sel_hash(const struct xfrm_selector *sel, 140 - unsigned short family, unsigned int hmask) 87 + unsigned short family, unsigned int hmask, 88 + u8 dbits, u8 sbits) 141 89 { 142 90 const xfrm_address_t *daddr = &sel->daddr; 143 91 const xfrm_address_t *saddr = &sel->saddr; ··· 146 92 147 93 switch (family) { 148 94 case AF_INET: 149 - if (sel->prefixlen_d != 32 || 150 - sel->prefixlen_s != 32) 95 + if (sel->prefixlen_d < dbits || 96 + sel->prefixlen_s < sbits) 151 97 return hmask + 1; 152 98 153 - h = __xfrm4_daddr_saddr_hash(daddr, saddr); 99 + h = __xfrm4_dpref_spref_hash(daddr, saddr, dbits, sbits); 154 100 break; 155 101 156 102 case AF_INET6: 157 - if (sel->prefixlen_d != 128 || 158 - sel->prefixlen_s != 128) 103 + if (sel->prefixlen_d < dbits || 104 + sel->prefixlen_s < sbits) 159 105 return hmask + 1; 160 106 161 - h = __xfrm6_daddr_saddr_hash(daddr, saddr); 107 + h = __xfrm6_dpref_spref_hash(daddr, saddr, dbits, sbits); 162 108 break; 163 109 } 164 110 h ^= (h >> 16); ··· 167 113 168 114 static inline unsigned int __addr_hash(const xfrm_address_t *daddr, 169 115 const xfrm_address_t *saddr, 170 - unsigned short family, unsigned int hmask) 116 + unsigned short family, 117 + unsigned int hmask, 118 + u8 dbits, u8 sbits) 171 119 { 172 120 unsigned int h = 0; 173 121 174 122 switch (family) { 175 123 case AF_INET: 176 - h = __xfrm4_daddr_saddr_hash(daddr, saddr); 124 + h = __xfrm4_dpref_spref_hash(daddr, saddr, dbits, sbits); 177 125 break; 178 126 179 127 case AF_INET6: 180 - h = __xfrm6_daddr_saddr_hash(daddr, saddr); 128 + h = __xfrm6_dpref_spref_hash(daddr, saddr, dbits, sbits); 181 129 break; 182 130 } 183 131 h ^= (h >> 16);
+134 -6
net/xfrm/xfrm_policy.c
··· 349 349 return __idx_hash(index, net->xfrm.policy_idx_hmask); 350 350 } 351 351 352 + /* calculate policy hash thresholds */ 353 + static void __get_hash_thresh(struct net *net, 354 + unsigned short family, int dir, 355 + u8 *dbits, u8 *sbits) 356 + { 357 + switch (family) { 358 + case AF_INET: 359 + *dbits = net->xfrm.policy_bydst[dir].dbits4; 360 + *sbits = net->xfrm.policy_bydst[dir].sbits4; 361 + break; 362 + 363 + case AF_INET6: 364 + *dbits = net->xfrm.policy_bydst[dir].dbits6; 365 + *sbits = net->xfrm.policy_bydst[dir].sbits6; 366 + break; 367 + 368 + default: 369 + *dbits = 0; 370 + *sbits = 0; 371 + } 372 + } 373 + 352 374 static struct hlist_head *policy_hash_bysel(struct net *net, 353 375 const struct xfrm_selector *sel, 354 376 unsigned short family, int dir) 355 377 { 356 378 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 357 - unsigned int hash = __sel_hash(sel, family, hmask); 379 + unsigned int hash; 380 + u8 dbits; 381 + u8 sbits; 382 + 383 + __get_hash_thresh(net, family, dir, &dbits, &sbits); 384 + hash = __sel_hash(sel, family, hmask, dbits, sbits); 358 385 359 386 return (hash == hmask + 1 ? 360 387 &net->xfrm.policy_inexact[dir] : ··· 394 367 unsigned short family, int dir) 395 368 { 396 369 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 397 - unsigned int hash = __addr_hash(daddr, saddr, family, hmask); 370 + unsigned int hash; 371 + u8 dbits; 372 + u8 sbits; 373 + 374 + __get_hash_thresh(net, family, dir, &dbits, &sbits); 375 + hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits); 398 376 399 377 return net->xfrm.policy_bydst[dir].table + hash; 400 378 } 401 379 402 - static void xfrm_dst_hash_transfer(struct hlist_head *list, 380 + static void xfrm_dst_hash_transfer(struct net *net, 381 + struct hlist_head *list, 403 382 struct hlist_head *ndsttable, 404 - unsigned int nhashmask) 383 + unsigned int nhashmask, 384 + int dir) 405 385 { 406 386 struct hlist_node *tmp, *entry0 = NULL; 407 387 struct xfrm_policy *pol; 408 388 unsigned int h0 = 0; 389 + u8 dbits; 390 + u8 sbits; 409 391 410 392 redo: 411 393 hlist_for_each_entry_safe(pol, tmp, list, bydst) { 412 394 unsigned int h; 413 395 396 + __get_hash_thresh(net, pol->family, dir, &dbits, &sbits); 414 397 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr, 415 - pol->family, nhashmask); 398 + pol->family, nhashmask, dbits, sbits); 416 399 if (!entry0) { 417 400 hlist_del(&pol->bydst); 418 401 hlist_add_head(&pol->bydst, ndsttable+h); ··· 476 439 write_lock_bh(&net->xfrm.xfrm_policy_lock); 477 440 478 441 for (i = hmask; i >= 0; i--) 479 - xfrm_dst_hash_transfer(odst + i, ndst, nhashmask); 442 + xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir); 480 443 481 444 net->xfrm.policy_bydst[dir].table = ndst; 482 445 net->xfrm.policy_bydst[dir].hmask = nhashmask; ··· 570 533 571 534 mutex_unlock(&hash_resize_mutex); 572 535 } 536 + 537 + static void xfrm_hash_rebuild(struct work_struct *work) 538 + { 539 + struct net *net = container_of(work, struct net, 540 + xfrm.policy_hthresh.work); 541 + unsigned int hmask; 542 + struct xfrm_policy *pol; 543 + struct xfrm_policy *policy; 544 + struct hlist_head *chain; 545 + struct hlist_head *odst; 546 + struct hlist_node *newpos; 547 + int i; 548 + int dir; 549 + unsigned seq; 550 + u8 lbits4, rbits4, lbits6, rbits6; 551 + 552 + mutex_lock(&hash_resize_mutex); 553 + 554 + /* read selector prefixlen thresholds */ 555 + do { 556 + seq = read_seqbegin(&net->xfrm.policy_hthresh.lock); 557 + 558 + lbits4 = net->xfrm.policy_hthresh.lbits4; 559 + rbits4 = net->xfrm.policy_hthresh.rbits4; 560 + lbits6 = net->xfrm.policy_hthresh.lbits6; 561 + rbits6 = net->xfrm.policy_hthresh.rbits6; 562 + } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq)); 563 + 564 + write_lock_bh(&net->xfrm.xfrm_policy_lock); 565 + 566 + /* reset the bydst and inexact table in all directions */ 567 + for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) { 568 + INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]); 569 + hmask = net->xfrm.policy_bydst[dir].hmask; 570 + odst = net->xfrm.policy_bydst[dir].table; 571 + for (i = hmask; i >= 0; i--) 572 + INIT_HLIST_HEAD(odst + i); 573 + if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) { 574 + /* dir out => dst = remote, src = local */ 575 + net->xfrm.policy_bydst[dir].dbits4 = rbits4; 576 + net->xfrm.policy_bydst[dir].sbits4 = lbits4; 577 + net->xfrm.policy_bydst[dir].dbits6 = rbits6; 578 + net->xfrm.policy_bydst[dir].sbits6 = lbits6; 579 + } else { 580 + /* dir in/fwd => dst = local, src = remote */ 581 + net->xfrm.policy_bydst[dir].dbits4 = lbits4; 582 + net->xfrm.policy_bydst[dir].sbits4 = rbits4; 583 + net->xfrm.policy_bydst[dir].dbits6 = lbits6; 584 + net->xfrm.policy_bydst[dir].sbits6 = rbits6; 585 + } 586 + } 587 + 588 + /* re-insert all policies by order of creation */ 589 + list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) { 590 + newpos = NULL; 591 + chain = policy_hash_bysel(net, &policy->selector, 592 + policy->family, 593 + xfrm_policy_id2dir(policy->index)); 594 + hlist_for_each_entry(pol, chain, bydst) { 595 + if (policy->priority >= pol->priority) 596 + newpos = &pol->bydst; 597 + else 598 + break; 599 + } 600 + if (newpos) 601 + hlist_add_behind(&policy->bydst, newpos); 602 + else 603 + hlist_add_head(&policy->bydst, chain); 604 + } 605 + 606 + write_unlock_bh(&net->xfrm.xfrm_policy_lock); 607 + 608 + mutex_unlock(&hash_resize_mutex); 609 + } 610 + 611 + void xfrm_policy_hash_rebuild(struct net *net) 612 + { 613 + schedule_work(&net->xfrm.policy_hthresh.work); 614 + } 615 + EXPORT_SYMBOL(xfrm_policy_hash_rebuild); 573 616 574 617 /* Generate new index... KAME seems to generate them ordered by cost 575 618 * of an absolute inpredictability of ordering of rules. This will not pass. */ ··· 2979 2862 if (!htab->table) 2980 2863 goto out_bydst; 2981 2864 htab->hmask = hmask; 2865 + htab->dbits4 = 32; 2866 + htab->sbits4 = 32; 2867 + htab->dbits6 = 128; 2868 + htab->sbits6 = 128; 2982 2869 } 2870 + net->xfrm.policy_hthresh.lbits4 = 32; 2871 + net->xfrm.policy_hthresh.rbits4 = 32; 2872 + net->xfrm.policy_hthresh.lbits6 = 128; 2873 + net->xfrm.policy_hthresh.rbits6 = 128; 2874 + 2875 + seqlock_init(&net->xfrm.policy_hthresh.lock); 2983 2876 2984 2877 INIT_LIST_HEAD(&net->xfrm.policy_all); 2985 2878 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize); 2879 + INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild); 2986 2880 if (net_eq(net, &init_net)) 2987 2881 register_netdevice_notifier(&xfrm_dev_notifier); 2988 2882 return 0;
+3 -10
net/xfrm/xfrm_state.c
··· 97 97 return ((state_hmask + 1) << 1) * sizeof(struct hlist_head); 98 98 } 99 99 100 - static DEFINE_MUTEX(hash_resize_mutex); 101 - 102 100 static void xfrm_hash_resize(struct work_struct *work) 103 101 { 104 102 struct net *net = container_of(work, struct net, xfrm.state_hash_work); ··· 105 107 unsigned int nhashmask, ohashmask; 106 108 int i; 107 109 108 - mutex_lock(&hash_resize_mutex); 109 - 110 110 nsize = xfrm_hash_new_size(net->xfrm.state_hmask); 111 111 ndst = xfrm_hash_alloc(nsize); 112 112 if (!ndst) 113 - goto out_unlock; 113 + return; 114 114 nsrc = xfrm_hash_alloc(nsize); 115 115 if (!nsrc) { 116 116 xfrm_hash_free(ndst, nsize); 117 - goto out_unlock; 117 + return; 118 118 } 119 119 nspi = xfrm_hash_alloc(nsize); 120 120 if (!nspi) { 121 121 xfrm_hash_free(ndst, nsize); 122 122 xfrm_hash_free(nsrc, nsize); 123 - goto out_unlock; 123 + return; 124 124 } 125 125 126 126 spin_lock_bh(&net->xfrm.xfrm_state_lock); ··· 144 148 xfrm_hash_free(odst, osize); 145 149 xfrm_hash_free(osrc, osize); 146 150 xfrm_hash_free(ospi, osize); 147 - 148 - out_unlock: 149 - mutex_unlock(&hash_resize_mutex); 150 151 } 151 152 152 153 static DEFINE_SPINLOCK(xfrm_state_afinfo_lock);
+78 -5
net/xfrm/xfrm_user.c
··· 333 333 algo = xfrm_aalg_get_byname(ualg->alg_name, 1); 334 334 if (!algo) 335 335 return -ENOSYS; 336 - if ((ualg->alg_trunc_len / 8) > MAX_AH_AUTH_LEN || 337 - ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits) 336 + if (ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits) 338 337 return -EINVAL; 339 338 *props = algo->desc.sadb_alg_id; 340 339 ··· 963 964 { 964 965 return NLMSG_ALIGN(4) 965 966 + nla_total_size(sizeof(struct xfrmu_spdinfo)) 966 - + nla_total_size(sizeof(struct xfrmu_spdhinfo)); 967 + + nla_total_size(sizeof(struct xfrmu_spdhinfo)) 968 + + nla_total_size(sizeof(struct xfrmu_spdhthresh)) 969 + + nla_total_size(sizeof(struct xfrmu_spdhthresh)); 967 970 } 968 971 969 972 static int build_spdinfo(struct sk_buff *skb, struct net *net, ··· 974 973 struct xfrmk_spdinfo si; 975 974 struct xfrmu_spdinfo spc; 976 975 struct xfrmu_spdhinfo sph; 976 + struct xfrmu_spdhthresh spt4, spt6; 977 977 struct nlmsghdr *nlh; 978 978 int err; 979 979 u32 *f; 980 + unsigned lseq; 980 981 981 982 nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0); 982 983 if (nlh == NULL) /* shouldn't really happen ... */ ··· 996 993 sph.spdhcnt = si.spdhcnt; 997 994 sph.spdhmcnt = si.spdhmcnt; 998 995 996 + do { 997 + lseq = read_seqbegin(&net->xfrm.policy_hthresh.lock); 998 + 999 + spt4.lbits = net->xfrm.policy_hthresh.lbits4; 1000 + spt4.rbits = net->xfrm.policy_hthresh.rbits4; 1001 + spt6.lbits = net->xfrm.policy_hthresh.lbits6; 1002 + spt6.rbits = net->xfrm.policy_hthresh.rbits6; 1003 + } while (read_seqretry(&net->xfrm.policy_hthresh.lock, lseq)); 1004 + 999 1005 err = nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc); 1000 1006 if (!err) 1001 1007 err = nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph); 1008 + if (!err) 1009 + err = nla_put(skb, XFRMA_SPD_IPV4_HTHRESH, sizeof(spt4), &spt4); 1010 + if (!err) 1011 + err = nla_put(skb, XFRMA_SPD_IPV6_HTHRESH, sizeof(spt6), &spt6); 1002 1012 if (err) { 1003 1013 nlmsg_cancel(skb, nlh); 1004 1014 return err; 1005 1015 } 1006 1016 1007 1017 return nlmsg_end(skb, nlh); 1018 + } 1019 + 1020 + static int xfrm_set_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh, 1021 + struct nlattr **attrs) 1022 + { 1023 + struct net *net = sock_net(skb->sk); 1024 + struct xfrmu_spdhthresh *thresh4 = NULL; 1025 + struct xfrmu_spdhthresh *thresh6 = NULL; 1026 + 1027 + /* selector prefixlen thresholds to hash policies */ 1028 + if (attrs[XFRMA_SPD_IPV4_HTHRESH]) { 1029 + struct nlattr *rta = attrs[XFRMA_SPD_IPV4_HTHRESH]; 1030 + 1031 + if (nla_len(rta) < sizeof(*thresh4)) 1032 + return -EINVAL; 1033 + thresh4 = nla_data(rta); 1034 + if (thresh4->lbits > 32 || thresh4->rbits > 32) 1035 + return -EINVAL; 1036 + } 1037 + if (attrs[XFRMA_SPD_IPV6_HTHRESH]) { 1038 + struct nlattr *rta = attrs[XFRMA_SPD_IPV6_HTHRESH]; 1039 + 1040 + if (nla_len(rta) < sizeof(*thresh6)) 1041 + return -EINVAL; 1042 + thresh6 = nla_data(rta); 1043 + if (thresh6->lbits > 128 || thresh6->rbits > 128) 1044 + return -EINVAL; 1045 + } 1046 + 1047 + if (thresh4 || thresh6) { 1048 + write_seqlock(&net->xfrm.policy_hthresh.lock); 1049 + if (thresh4) { 1050 + net->xfrm.policy_hthresh.lbits4 = thresh4->lbits; 1051 + net->xfrm.policy_hthresh.rbits4 = thresh4->rbits; 1052 + } 1053 + if (thresh6) { 1054 + net->xfrm.policy_hthresh.lbits6 = thresh6->lbits; 1055 + net->xfrm.policy_hthresh.rbits6 = thresh6->rbits; 1056 + } 1057 + write_sequnlock(&net->xfrm.policy_hthresh.lock); 1058 + 1059 + xfrm_policy_hash_rebuild(net); 1060 + } 1061 + 1062 + return 0; 1008 1063 } 1009 1064 1010 1065 static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh, ··· 2335 2274 [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report), 2336 2275 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), 2337 2276 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = sizeof(u32), 2277 + [XFRM_MSG_NEWSPDINFO - XFRM_MSG_BASE] = sizeof(u32), 2338 2278 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32), 2339 2279 }; 2340 2280 ··· 2370 2308 [XFRMA_ADDRESS_FILTER] = { .len = sizeof(struct xfrm_address_filter) }, 2371 2309 }; 2372 2310 2311 + static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = { 2312 + [XFRMA_SPD_IPV4_HTHRESH] = { .len = sizeof(struct xfrmu_spdhthresh) }, 2313 + [XFRMA_SPD_IPV6_HTHRESH] = { .len = sizeof(struct xfrmu_spdhthresh) }, 2314 + }; 2315 + 2373 2316 static const struct xfrm_link { 2374 2317 int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **); 2375 2318 int (*dump)(struct sk_buff *, struct netlink_callback *); 2376 2319 int (*done)(struct netlink_callback *); 2320 + const struct nla_policy *nla_pol; 2321 + int nla_max; 2377 2322 } xfrm_dispatch[XFRM_NR_MSGTYPES] = { 2378 2323 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa }, 2379 2324 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa }, ··· 2404 2335 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae }, 2405 2336 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate }, 2406 2337 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo }, 2338 + [XFRM_MSG_NEWSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_set_spdinfo, 2339 + .nla_pol = xfrma_spd_policy, 2340 + .nla_max = XFRMA_SPD_MAX }, 2407 2341 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo }, 2408 2342 }; 2409 2343 ··· 2443 2371 } 2444 2372 } 2445 2373 2446 - err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, XFRMA_MAX, 2447 - xfrma_policy); 2374 + err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, 2375 + link->nla_max ? : XFRMA_MAX, 2376 + link->nla_pol ? : xfrma_policy); 2448 2377 if (err < 0) 2449 2378 return err; 2450 2379