Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git /klassert/ipsec-next

Steffen Klassert says:

====================
pull request (net-next): ipsec-next 2021-06-28

1) Remove an unneeded error assignment in esp4_gro_receive().
From Yang Li.

2) Add a new byseq state hashtable to find acquire states faster.
From Sabrina Dubroca.

3) Remove some unnecessary variables in pfkey_create().
From zuoqilin.

4) Remove the unused description from xfrm_type struct.
From Florian Westphal.

5) Fix a spelling mistake in the comment of xfrm_state_ok().
From gushengxian.

6) Replace hdr_off indirections by a small helper function.
From Florian Westphal.

7) Remove xfrm4_output_finish and xfrm6_output_finish declarations,
they are not used anymore.From Antony Antony.

8) Remove xfrm replay indirections.
From Florian Westphal.

Please pull or let me know if there are problems.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+264 -234
+1
include/net/netns/xfrm.h
··· 42 42 struct hlist_head __rcu *state_bydst; 43 43 struct hlist_head __rcu *state_bysrc; 44 44 struct hlist_head __rcu *state_byspi; 45 + struct hlist_head __rcu *state_byseq; 45 46 unsigned int state_hmask; 46 47 unsigned int state_num; 47 48 struct work_struct state_hash_work;
+15 -22
include/net/xfrm.h
··· 145 145 XFRM_MODE_FLAG_TUNNEL = 1, 146 146 }; 147 147 148 + enum xfrm_replay_mode { 149 + XFRM_REPLAY_MODE_LEGACY, 150 + XFRM_REPLAY_MODE_BMP, 151 + XFRM_REPLAY_MODE_ESN, 152 + }; 153 + 148 154 /* Full description of state of transformer. */ 149 155 struct xfrm_state { 150 156 possible_net_t xs_net; ··· 160 154 }; 161 155 struct hlist_node bysrc; 162 156 struct hlist_node byspi; 157 + struct hlist_node byseq; 163 158 164 159 refcount_t refcnt; 165 160 spinlock_t lock; ··· 221 214 struct xfrm_replay_state preplay; 222 215 struct xfrm_replay_state_esn *preplay_esn; 223 216 224 - /* The functions for replay detection. */ 225 - const struct xfrm_replay *repl; 226 - 217 + /* replay detection mode */ 218 + enum xfrm_replay_mode repl_mode; 227 219 /* internal flag that only holds state for delayed aevent at the 228 220 * moment 229 221 */ ··· 300 294 u32 portid; 301 295 u32 event; 302 296 struct net *net; 303 - }; 304 - 305 - struct xfrm_replay { 306 - void (*advance)(struct xfrm_state *x, __be32 net_seq); 307 - int (*check)(struct xfrm_state *x, 308 - struct sk_buff *skb, 309 - __be32 net_seq); 310 - int (*recheck)(struct xfrm_state *x, 311 - struct sk_buff *skb, 312 - __be32 net_seq); 313 - void (*notify)(struct xfrm_state *x, int event); 314 - int (*overflow)(struct xfrm_state *x, struct sk_buff *skb); 315 297 }; 316 298 317 299 struct xfrm_if_cb { ··· 381 387 void xfrm_state_delete_tunnel(struct xfrm_state *x); 382 388 383 389 struct xfrm_type { 384 - char *description; 385 390 struct module *owner; 386 391 u8 proto; 387 392 u8 flags; ··· 395 402 int (*output)(struct xfrm_state *, struct sk_buff *pskb); 396 403 int (*reject)(struct xfrm_state *, struct sk_buff *, 397 404 const struct flowi *); 398 - int (*hdr_offset)(struct xfrm_state *, struct sk_buff *, u8 **); 399 405 }; 400 406 401 407 int xfrm_register_type(const struct xfrm_type *type, unsigned short family); 402 408 void xfrm_unregister_type(const struct xfrm_type *type, unsigned short family); 403 409 404 410 struct xfrm_type_offload { 405 - char *description; 406 411 struct module *owner; 407 412 u8 proto; 408 413 void (*encap)(struct xfrm_state *, struct sk_buff *pskb); ··· 1573 1582 } 1574 1583 1575 1584 int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb); 1576 - int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb); 1577 1585 int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol); 1578 1586 int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char protocol); 1579 1587 int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family); ··· 1596 1606 __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr); 1597 1607 __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr); 1598 1608 int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb); 1599 - int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb); 1600 - int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, 1601 - u8 **prevhdr); 1602 1609 1603 1610 #ifdef CONFIG_XFRM 1604 1611 void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu); ··· 1709 1722 } 1710 1723 1711 1724 #ifdef CONFIG_XFRM 1725 + void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq); 1726 + int xfrm_replay_check(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq); 1727 + void xfrm_replay_notify(struct xfrm_state *x, int event); 1728 + int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb); 1729 + int xfrm_replay_recheck(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq); 1730 + 1712 1731 static inline int xfrm_aevent_is_on(struct net *net) 1713 1732 { 1714 1733 struct sock *nlsk;
-1
net/ipv4/ah4.c
··· 554 554 555 555 static const struct xfrm_type ah_type = 556 556 { 557 - .description = "AH4", 558 557 .owner = THIS_MODULE, 559 558 .proto = IPPROTO_AH, 560 559 .flags = XFRM_TYPE_REPLAY_PROT,
-1
net/ipv4/esp4.c
··· 1198 1198 1199 1199 static const struct xfrm_type esp_type = 1200 1200 { 1201 - .description = "ESP4", 1202 1201 .owner = THIS_MODULE, 1203 1202 .proto = IPPROTO_ESP, 1204 1203 .flags = XFRM_TYPE_REPLAY_PROT,
+1 -3
net/ipv4/esp4_offload.c
··· 33 33 struct xfrm_state *x; 34 34 __be32 seq; 35 35 __be32 spi; 36 - int err; 37 36 38 37 if (!pskb_pull(skb, offset)) 39 38 return NULL; 40 39 41 - if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0) 40 + if (xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq) != 0) 42 41 goto out; 43 42 44 43 xo = xfrm_offload(skb); ··· 342 343 }; 343 344 344 345 static const struct xfrm_type_offload esp_type_offload = { 345 - .description = "ESP4 OFFLOAD", 346 346 .owner = THIS_MODULE, 347 347 .proto = IPPROTO_ESP, 348 348 .input_tail = esp_input_tail,
-1
net/ipv4/ipcomp.c
··· 152 152 } 153 153 154 154 static const struct xfrm_type ipcomp_type = { 155 - .description = "IPCOMP4", 156 155 .owner = THIS_MODULE, 157 156 .proto = IPPROTO_COMP, 158 157 .init_state = ipcomp4_init_state,
-1
net/ipv4/xfrm4_tunnel.c
··· 42 42 } 43 43 44 44 static const struct xfrm_type ipip_type = { 45 - .description = "IPIP", 46 45 .owner = THIS_MODULE, 47 46 .proto = IPPROTO_IPIP, 48 47 .init_state = ipip_init_state,
-2
net/ipv6/ah6.c
··· 755 755 } 756 756 757 757 static const struct xfrm_type ah6_type = { 758 - .description = "AH6", 759 758 .owner = THIS_MODULE, 760 759 .proto = IPPROTO_AH, 761 760 .flags = XFRM_TYPE_REPLAY_PROT, ··· 762 763 .destructor = ah6_destroy, 763 764 .input = ah6_input, 764 765 .output = ah6_output, 765 - .hdr_offset = xfrm6_find_1stfragopt, 766 766 }; 767 767 768 768 static struct xfrm6_protocol ah6_protocol = {
-2
net/ipv6/esp6.c
··· 1243 1243 } 1244 1244 1245 1245 static const struct xfrm_type esp6_type = { 1246 - .description = "ESP6", 1247 1246 .owner = THIS_MODULE, 1248 1247 .proto = IPPROTO_ESP, 1249 1248 .flags = XFRM_TYPE_REPLAY_PROT, ··· 1250 1251 .destructor = esp6_destroy, 1251 1252 .input = esp6_input, 1252 1253 .output = esp6_output, 1253 - .hdr_offset = xfrm6_find_1stfragopt, 1254 1254 }; 1255 1255 1256 1256 static struct xfrm6_protocol esp6_protocol = {
-1
net/ipv6/esp6_offload.c
··· 377 377 }; 378 378 379 379 static const struct xfrm_type_offload esp6_type_offload = { 380 - .description = "ESP6 OFFLOAD", 381 380 .owner = THIS_MODULE, 382 381 .proto = IPPROTO_ESP, 383 382 .input_tail = esp6_input_tail,
-2
net/ipv6/ipcomp6.c
··· 172 172 } 173 173 174 174 static const struct xfrm_type ipcomp6_type = { 175 - .description = "IPCOMP6", 176 175 .owner = THIS_MODULE, 177 176 .proto = IPPROTO_COMP, 178 177 .init_state = ipcomp6_init_state, 179 178 .destructor = ipcomp_destroy, 180 179 .input = ipcomp_input, 181 180 .output = ipcomp_output, 182 - .hdr_offset = xfrm6_find_1stfragopt, 183 181 }; 184 182 185 183 static struct xfrm6_protocol ipcomp6_protocol = {
-99
net/ipv6/mip6.c
··· 247 247 return err; 248 248 } 249 249 250 - static int mip6_destopt_offset(struct xfrm_state *x, struct sk_buff *skb, 251 - u8 **nexthdr) 252 - { 253 - u16 offset = sizeof(struct ipv6hdr); 254 - struct ipv6_opt_hdr *exthdr = 255 - (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1); 256 - const unsigned char *nh = skb_network_header(skb); 257 - unsigned int packet_len = skb_tail_pointer(skb) - 258 - skb_network_header(skb); 259 - int found_rhdr = 0; 260 - 261 - *nexthdr = &ipv6_hdr(skb)->nexthdr; 262 - 263 - while (offset + 1 <= packet_len) { 264 - 265 - switch (**nexthdr) { 266 - case NEXTHDR_HOP: 267 - break; 268 - case NEXTHDR_ROUTING: 269 - found_rhdr = 1; 270 - break; 271 - case NEXTHDR_DEST: 272 - /* 273 - * HAO MUST NOT appear more than once. 274 - * XXX: It is better to try to find by the end of 275 - * XXX: packet if HAO exists. 276 - */ 277 - if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0) { 278 - net_dbg_ratelimited("mip6: hao exists already, override\n"); 279 - return offset; 280 - } 281 - 282 - if (found_rhdr) 283 - return offset; 284 - 285 - break; 286 - default: 287 - return offset; 288 - } 289 - 290 - offset += ipv6_optlen(exthdr); 291 - *nexthdr = &exthdr->nexthdr; 292 - exthdr = (struct ipv6_opt_hdr *)(nh + offset); 293 - } 294 - 295 - return offset; 296 - } 297 - 298 250 static int mip6_destopt_init_state(struct xfrm_state *x) 299 251 { 300 252 if (x->id.spi) { ··· 276 324 } 277 325 278 326 static const struct xfrm_type mip6_destopt_type = { 279 - .description = "MIP6DESTOPT", 280 327 .owner = THIS_MODULE, 281 328 .proto = IPPROTO_DSTOPTS, 282 329 .flags = XFRM_TYPE_NON_FRAGMENT | XFRM_TYPE_LOCAL_COADDR, ··· 284 333 .input = mip6_destopt_input, 285 334 .output = mip6_destopt_output, 286 335 .reject = mip6_destopt_reject, 287 - .hdr_offset = mip6_destopt_offset, 288 336 }; 289 337 290 338 static int mip6_rthdr_input(struct xfrm_state *x, struct sk_buff *skb) ··· 333 383 return 0; 334 384 } 335 385 336 - static int mip6_rthdr_offset(struct xfrm_state *x, struct sk_buff *skb, 337 - u8 **nexthdr) 338 - { 339 - u16 offset = sizeof(struct ipv6hdr); 340 - struct ipv6_opt_hdr *exthdr = 341 - (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1); 342 - const unsigned char *nh = skb_network_header(skb); 343 - unsigned int packet_len = skb_tail_pointer(skb) - 344 - skb_network_header(skb); 345 - int found_rhdr = 0; 346 - 347 - *nexthdr = &ipv6_hdr(skb)->nexthdr; 348 - 349 - while (offset + 1 <= packet_len) { 350 - 351 - switch (**nexthdr) { 352 - case NEXTHDR_HOP: 353 - break; 354 - case NEXTHDR_ROUTING: 355 - if (offset + 3 <= packet_len) { 356 - struct ipv6_rt_hdr *rt; 357 - rt = (struct ipv6_rt_hdr *)(nh + offset); 358 - if (rt->type != 0) 359 - return offset; 360 - } 361 - found_rhdr = 1; 362 - break; 363 - case NEXTHDR_DEST: 364 - if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0) 365 - return offset; 366 - 367 - if (found_rhdr) 368 - return offset; 369 - 370 - break; 371 - default: 372 - return offset; 373 - } 374 - 375 - offset += ipv6_optlen(exthdr); 376 - *nexthdr = &exthdr->nexthdr; 377 - exthdr = (struct ipv6_opt_hdr *)(nh + offset); 378 - } 379 - 380 - return offset; 381 - } 382 - 383 386 static int mip6_rthdr_init_state(struct xfrm_state *x) 384 387 { 385 388 if (x->id.spi) { ··· 359 456 } 360 457 361 458 static const struct xfrm_type mip6_rthdr_type = { 362 - .description = "MIP6RT", 363 459 .owner = THIS_MODULE, 364 460 .proto = IPPROTO_ROUTING, 365 461 .flags = XFRM_TYPE_NON_FRAGMENT | XFRM_TYPE_REMOTE_COADDR, ··· 366 464 .destructor = mip6_rthdr_destroy, 367 465 .input = mip6_rthdr_input, 368 466 .output = mip6_rthdr_output, 369 - .hdr_offset = mip6_rthdr_offset, 370 467 }; 371 468 372 469 static int __init mip6_init(void)
-7
net/ipv6/xfrm6_output.c
··· 16 16 #include <net/ip6_route.h> 17 17 #include <net/xfrm.h> 18 18 19 - int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, 20 - u8 **prevhdr) 21 - { 22 - return ip6_find_1stfragopt(skb, prevhdr); 23 - } 24 - EXPORT_SYMBOL(xfrm6_find_1stfragopt); 25 - 26 19 void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu) 27 20 { 28 21 struct flowi6 fl6;
-1
net/ipv6/xfrm6_tunnel.c
··· 291 291 } 292 292 293 293 static const struct xfrm_type xfrm6_tunnel_type = { 294 - .description = "IP6IP6", 295 294 .owner = THIS_MODULE, 296 295 .proto = IPPROTO_IPV6, 297 296 .init_state = xfrm6_tunnel_init_state,
+1 -5
net/key/af_key.c
··· 141 141 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); 142 142 struct sock *sk; 143 143 struct pfkey_sock *pfk; 144 - int err; 145 144 146 145 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 147 146 return -EPERM; ··· 149 150 if (protocol != PF_KEY_V2) 150 151 return -EPROTONOSUPPORT; 151 152 152 - err = -ENOMEM; 153 153 sk = sk_alloc(net, PF_KEY, GFP_KERNEL, &key_proto, kern); 154 154 if (sk == NULL) 155 - goto out; 155 + return -ENOMEM; 156 156 157 157 pfk = pfkey_sk(sk); 158 158 mutex_init(&pfk->dump_lock); ··· 167 169 pfkey_insert(sk); 168 170 169 171 return 0; 170 - out: 171 - return err; 172 172 } 173 173 174 174 static int pfkey_release(struct socket *sock)
+7
net/xfrm/xfrm_hash.h
··· 131 131 return (h ^ (h >> 10) ^ (h >> 20)) & hmask; 132 132 } 133 133 134 + static inline unsigned int 135 + __xfrm_seq_hash(u32 seq, unsigned int hmask) 136 + { 137 + unsigned int h = seq; 138 + return (h ^ (h >> 10) ^ (h >> 20)) & hmask; 139 + } 140 + 134 141 static inline unsigned int __idx_hash(u32 index, unsigned int hmask) 135 142 { 136 143 return (index ^ (index >> 8)) & hmask;
+3 -3
net/xfrm/xfrm_input.c
··· 612 612 goto drop_unlock; 613 613 } 614 614 615 - if (x->repl->check(x, skb, seq)) { 615 + if (xfrm_replay_check(x, skb, seq)) { 616 616 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); 617 617 goto drop_unlock; 618 618 } ··· 660 660 /* only the first xfrm gets the encap type */ 661 661 encap_type = 0; 662 662 663 - if (x->repl->recheck(x, skb, seq)) { 663 + if (xfrm_replay_recheck(x, skb, seq)) { 664 664 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); 665 665 goto drop_unlock; 666 666 } 667 667 668 - x->repl->advance(x, seq); 668 + xfrm_replay_advance(x, seq); 669 669 670 670 x->curlft.bytes += skb->len; 671 671 x->curlft.packets++;
+80 -3
net/xfrm/xfrm_output.c
··· 77 77 return 0; 78 78 } 79 79 80 + #if IS_ENABLED(CONFIG_IPV6_MIP6) 81 + static int mip6_rthdr_offset(struct sk_buff *skb, u8 **nexthdr, int type) 82 + { 83 + const unsigned char *nh = skb_network_header(skb); 84 + unsigned int offset = sizeof(struct ipv6hdr); 85 + unsigned int packet_len; 86 + int found_rhdr = 0; 87 + 88 + packet_len = skb_tail_pointer(skb) - nh; 89 + *nexthdr = &ipv6_hdr(skb)->nexthdr; 90 + 91 + while (offset <= packet_len) { 92 + struct ipv6_opt_hdr *exthdr; 93 + 94 + switch (**nexthdr) { 95 + case NEXTHDR_HOP: 96 + break; 97 + case NEXTHDR_ROUTING: 98 + if (type == IPPROTO_ROUTING && offset + 3 <= packet_len) { 99 + struct ipv6_rt_hdr *rt; 100 + 101 + rt = (struct ipv6_rt_hdr *)(nh + offset); 102 + if (rt->type != 0) 103 + return offset; 104 + } 105 + found_rhdr = 1; 106 + break; 107 + case NEXTHDR_DEST: 108 + /* HAO MUST NOT appear more than once. 109 + * XXX: It is better to try to find by the end of 110 + * XXX: packet if HAO exists. 111 + */ 112 + if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0) { 113 + net_dbg_ratelimited("mip6: hao exists already, override\n"); 114 + return offset; 115 + } 116 + 117 + if (found_rhdr) 118 + return offset; 119 + 120 + break; 121 + default: 122 + return offset; 123 + } 124 + 125 + if (offset + sizeof(struct ipv6_opt_hdr) > packet_len) 126 + return -EINVAL; 127 + 128 + exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) + 129 + offset); 130 + offset += ipv6_optlen(exthdr); 131 + if (offset > IPV6_MAXPLEN) 132 + return -EINVAL; 133 + *nexthdr = &exthdr->nexthdr; 134 + } 135 + 136 + return -EINVAL; 137 + } 138 + #endif 139 + 140 + #if IS_ENABLED(CONFIG_IPV6) 141 + static int xfrm6_hdr_offset(struct xfrm_state *x, struct sk_buff *skb, u8 **prevhdr) 142 + { 143 + switch (x->type->proto) { 144 + #if IS_ENABLED(CONFIG_IPV6_MIP6) 145 + case IPPROTO_DSTOPTS: 146 + case IPPROTO_ROUTING: 147 + return mip6_rthdr_offset(skb, prevhdr, x->type->proto); 148 + #endif 149 + default: 150 + break; 151 + } 152 + 153 + return ip6_find_1stfragopt(skb, prevhdr); 154 + } 155 + #endif 156 + 80 157 /* Add encapsulation header. 81 158 * 82 159 * The IP header and mutable extension headers will be moved forward to make ··· 169 92 iph = ipv6_hdr(skb); 170 93 skb_set_inner_transport_header(skb, skb_transport_offset(skb)); 171 94 172 - hdr_len = x->type->hdr_offset(x, skb, &prevhdr); 95 + hdr_len = xfrm6_hdr_offset(x, skb, &prevhdr); 173 96 if (hdr_len < 0) 174 97 return hdr_len; 175 98 skb_set_mac_header(skb, ··· 199 122 200 123 iph = ipv6_hdr(skb); 201 124 202 - hdr_len = x->type->hdr_offset(x, skb, &prevhdr); 125 + hdr_len = xfrm6_hdr_offset(x, skb, &prevhdr); 203 126 if (hdr_len < 0) 204 127 return hdr_len; 205 128 skb_set_mac_header(skb, ··· 525 448 goto error; 526 449 } 527 450 528 - err = x->repl->overflow(x, skb); 451 + err = xfrm_replay_overflow(x, skb); 529 452 if (err) { 530 453 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATESEQERROR); 531 454 goto error;
+1 -1
net/xfrm/xfrm_policy.c
··· 3247 3247 3248 3248 /* 3249 3249 * 0 or more than 0 is returned when validation is succeeded (either bypass 3250 - * because of optional transport mode, or next index of the mathced secpath 3250 + * because of optional transport mode, or next index of the matched secpath 3251 3251 * state with the template. 3252 3252 * -1 is returned when no matching template is found. 3253 3253 * Otherwise "-2 - errored_index" is returned.
+102 -65
net/xfrm/xfrm_replay.c
··· 34 34 return seq_hi; 35 35 } 36 36 EXPORT_SYMBOL(xfrm_replay_seqhi); 37 - ; 38 - static void xfrm_replay_notify(struct xfrm_state *x, int event) 37 + 38 + static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event); 39 + static void xfrm_replay_notify_esn(struct xfrm_state *x, int event); 40 + 41 + void xfrm_replay_notify(struct xfrm_state *x, int event) 39 42 { 40 43 struct km_event c; 41 44 /* we send notify messages in case ··· 50 47 * 51 48 * The state structure must be locked! 52 49 */ 50 + 51 + switch (x->repl_mode) { 52 + case XFRM_REPLAY_MODE_LEGACY: 53 + break; 54 + case XFRM_REPLAY_MODE_BMP: 55 + xfrm_replay_notify_bmp(x, event); 56 + return; 57 + case XFRM_REPLAY_MODE_ESN: 58 + xfrm_replay_notify_esn(x, event); 59 + return; 60 + } 53 61 54 62 switch (event) { 55 63 case XFRM_REPLAY_UPDATE: ··· 95 81 x->xflags &= ~XFRM_TIME_DEFER; 96 82 } 97 83 98 - static int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb) 84 + static int __xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb) 99 85 { 100 86 int err = 0; 101 87 struct net *net = xs_net(x); ··· 112 98 return err; 113 99 } 114 100 if (xfrm_aevent_is_on(net)) 115 - x->repl->notify(x, XFRM_REPLAY_UPDATE); 101 + xfrm_replay_notify(x, XFRM_REPLAY_UPDATE); 116 102 } 117 103 118 104 return err; 119 105 } 120 106 121 - static int xfrm_replay_check(struct xfrm_state *x, 122 - struct sk_buff *skb, __be32 net_seq) 107 + static int xfrm_replay_check_legacy(struct xfrm_state *x, 108 + struct sk_buff *skb, __be32 net_seq) 123 109 { 124 110 u32 diff; 125 111 u32 seq = ntohl(net_seq); ··· 150 136 return -EINVAL; 151 137 } 152 138 153 - static void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq) 139 + static void xfrm_replay_advance_bmp(struct xfrm_state *x, __be32 net_seq); 140 + static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq); 141 + 142 + void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq) 154 143 { 155 - u32 diff; 156 - u32 seq = ntohl(net_seq); 144 + u32 diff, seq; 145 + 146 + switch (x->repl_mode) { 147 + case XFRM_REPLAY_MODE_LEGACY: 148 + break; 149 + case XFRM_REPLAY_MODE_BMP: 150 + return xfrm_replay_advance_bmp(x, net_seq); 151 + case XFRM_REPLAY_MODE_ESN: 152 + return xfrm_replay_advance_esn(x, net_seq); 153 + } 157 154 158 155 if (!x->props.replay_window) 159 156 return; 160 157 158 + seq = ntohl(net_seq); 161 159 if (seq > x->replay.seq) { 162 160 diff = seq - x->replay.seq; 163 161 if (diff < x->props.replay_window) ··· 183 157 } 184 158 185 159 if (xfrm_aevent_is_on(xs_net(x))) 186 - x->repl->notify(x, XFRM_REPLAY_UPDATE); 160 + xfrm_replay_notify(x, XFRM_REPLAY_UPDATE); 187 161 } 188 162 189 163 static int xfrm_replay_overflow_bmp(struct xfrm_state *x, struct sk_buff *skb) ··· 204 178 return err; 205 179 } 206 180 if (xfrm_aevent_is_on(net)) 207 - x->repl->notify(x, XFRM_REPLAY_UPDATE); 181 + xfrm_replay_notify(x, XFRM_REPLAY_UPDATE); 208 182 } 209 183 210 184 return err; ··· 299 273 replay_esn->bmp[nr] |= (1U << bitnr); 300 274 301 275 if (xfrm_aevent_is_on(xs_net(x))) 302 - x->repl->notify(x, XFRM_REPLAY_UPDATE); 276 + xfrm_replay_notify(x, XFRM_REPLAY_UPDATE); 303 277 } 304 278 305 279 static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event) ··· 442 416 } 443 417 } 444 418 if (xfrm_aevent_is_on(net)) 445 - x->repl->notify(x, XFRM_REPLAY_UPDATE); 419 + xfrm_replay_notify(x, XFRM_REPLAY_UPDATE); 446 420 } 447 421 448 422 return err; ··· 507 481 return -EINVAL; 508 482 } 509 483 484 + int xfrm_replay_check(struct xfrm_state *x, 485 + struct sk_buff *skb, __be32 net_seq) 486 + { 487 + switch (x->repl_mode) { 488 + case XFRM_REPLAY_MODE_LEGACY: 489 + break; 490 + case XFRM_REPLAY_MODE_BMP: 491 + return xfrm_replay_check_bmp(x, skb, net_seq); 492 + case XFRM_REPLAY_MODE_ESN: 493 + return xfrm_replay_check_esn(x, skb, net_seq); 494 + } 495 + 496 + return xfrm_replay_check_legacy(x, skb, net_seq); 497 + } 498 + 510 499 static int xfrm_replay_recheck_esn(struct xfrm_state *x, 511 500 struct sk_buff *skb, __be32 net_seq) 512 501 { ··· 532 491 } 533 492 534 493 return xfrm_replay_check_esn(x, skb, net_seq); 494 + } 495 + 496 + int xfrm_replay_recheck(struct xfrm_state *x, 497 + struct sk_buff *skb, __be32 net_seq) 498 + { 499 + switch (x->repl_mode) { 500 + case XFRM_REPLAY_MODE_LEGACY: 501 + break; 502 + case XFRM_REPLAY_MODE_BMP: 503 + /* no special recheck treatment */ 504 + return xfrm_replay_check_bmp(x, skb, net_seq); 505 + case XFRM_REPLAY_MODE_ESN: 506 + return xfrm_replay_recheck_esn(x, skb, net_seq); 507 + } 508 + 509 + return xfrm_replay_check_legacy(x, skb, net_seq); 535 510 } 536 511 537 512 static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq) ··· 605 548 replay_esn->bmp[nr] |= (1U << bitnr); 606 549 607 550 if (xfrm_aevent_is_on(xs_net(x))) 608 - x->repl->notify(x, XFRM_REPLAY_UPDATE); 551 + xfrm_replay_notify(x, XFRM_REPLAY_UPDATE); 609 552 } 610 553 611 554 #ifdef CONFIG_XFRM_OFFLOAD ··· 617 560 __u32 oseq = x->replay.oseq; 618 561 619 562 if (!xo) 620 - return xfrm_replay_overflow(x, skb); 563 + return __xfrm_replay_overflow(x, skb); 621 564 622 565 if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { 623 566 if (!skb_is_gso(skb)) { ··· 642 585 x->replay.oseq = oseq; 643 586 644 587 if (xfrm_aevent_is_on(net)) 645 - x->repl->notify(x, XFRM_REPLAY_UPDATE); 588 + xfrm_replay_notify(x, XFRM_REPLAY_UPDATE); 646 589 } 647 590 648 591 return err; ··· 682 625 } 683 626 684 627 if (xfrm_aevent_is_on(net)) 685 - x->repl->notify(x, XFRM_REPLAY_UPDATE); 628 + xfrm_replay_notify(x, XFRM_REPLAY_UPDATE); 686 629 } 687 630 688 631 return err; ··· 731 674 replay_esn->oseq = oseq; 732 675 733 676 if (xfrm_aevent_is_on(net)) 734 - x->repl->notify(x, XFRM_REPLAY_UPDATE); 677 + xfrm_replay_notify(x, XFRM_REPLAY_UPDATE); 735 678 } 736 679 737 680 return err; 738 681 } 739 682 740 - static const struct xfrm_replay xfrm_replay_legacy = { 741 - .advance = xfrm_replay_advance, 742 - .check = xfrm_replay_check, 743 - .recheck = xfrm_replay_check, 744 - .notify = xfrm_replay_notify, 745 - .overflow = xfrm_replay_overflow_offload, 746 - }; 683 + int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb) 684 + { 685 + switch (x->repl_mode) { 686 + case XFRM_REPLAY_MODE_LEGACY: 687 + break; 688 + case XFRM_REPLAY_MODE_BMP: 689 + return xfrm_replay_overflow_offload_bmp(x, skb); 690 + case XFRM_REPLAY_MODE_ESN: 691 + return xfrm_replay_overflow_offload_esn(x, skb); 692 + } 747 693 748 - static const struct xfrm_replay xfrm_replay_bmp = { 749 - .advance = xfrm_replay_advance_bmp, 750 - .check = xfrm_replay_check_bmp, 751 - .recheck = xfrm_replay_check_bmp, 752 - .notify = xfrm_replay_notify_bmp, 753 - .overflow = xfrm_replay_overflow_offload_bmp, 754 - }; 755 - 756 - static const struct xfrm_replay xfrm_replay_esn = { 757 - .advance = xfrm_replay_advance_esn, 758 - .check = xfrm_replay_check_esn, 759 - .recheck = xfrm_replay_recheck_esn, 760 - .notify = xfrm_replay_notify_esn, 761 - .overflow = xfrm_replay_overflow_offload_esn, 762 - }; 694 + return xfrm_replay_overflow_offload(x, skb); 695 + } 763 696 #else 764 - static const struct xfrm_replay xfrm_replay_legacy = { 765 - .advance = xfrm_replay_advance, 766 - .check = xfrm_replay_check, 767 - .recheck = xfrm_replay_check, 768 - .notify = xfrm_replay_notify, 769 - .overflow = xfrm_replay_overflow, 770 - }; 697 + int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb) 698 + { 699 + switch (x->repl_mode) { 700 + case XFRM_REPLAY_MODE_LEGACY: 701 + break; 702 + case XFRM_REPLAY_MODE_BMP: 703 + return xfrm_replay_overflow_bmp(x, skb); 704 + case XFRM_REPLAY_MODE_ESN: 705 + return xfrm_replay_overflow_esn(x, skb); 706 + } 771 707 772 - static const struct xfrm_replay xfrm_replay_bmp = { 773 - .advance = xfrm_replay_advance_bmp, 774 - .check = xfrm_replay_check_bmp, 775 - .recheck = xfrm_replay_check_bmp, 776 - .notify = xfrm_replay_notify_bmp, 777 - .overflow = xfrm_replay_overflow_bmp, 778 - }; 779 - 780 - static const struct xfrm_replay xfrm_replay_esn = { 781 - .advance = xfrm_replay_advance_esn, 782 - .check = xfrm_replay_check_esn, 783 - .recheck = xfrm_replay_recheck_esn, 784 - .notify = xfrm_replay_notify_esn, 785 - .overflow = xfrm_replay_overflow_esn, 786 - }; 708 + return __xfrm_replay_overflow(x, skb); 709 + } 787 710 #endif 788 711 789 712 int xfrm_init_replay(struct xfrm_state *x) ··· 778 741 if (x->props.flags & XFRM_STATE_ESN) { 779 742 if (replay_esn->replay_window == 0) 780 743 return -EINVAL; 781 - x->repl = &xfrm_replay_esn; 744 + x->repl_mode = XFRM_REPLAY_MODE_ESN; 782 745 } else { 783 - x->repl = &xfrm_replay_bmp; 746 + x->repl_mode = XFRM_REPLAY_MODE_BMP; 784 747 } 785 748 } else { 786 - x->repl = &xfrm_replay_legacy; 749 + x->repl_mode = XFRM_REPLAY_MODE_LEGACY; 787 750 } 788 751 789 752 return 0;
+53 -14
net/xfrm/xfrm_state.c
··· 78 78 return __xfrm_spi_hash(daddr, spi, proto, family, net->xfrm.state_hmask); 79 79 } 80 80 81 + static unsigned int xfrm_seq_hash(struct net *net, u32 seq) 82 + { 83 + return __xfrm_seq_hash(seq, net->xfrm.state_hmask); 84 + } 85 + 81 86 static void xfrm_hash_transfer(struct hlist_head *list, 82 87 struct hlist_head *ndsttable, 83 88 struct hlist_head *nsrctable, 84 89 struct hlist_head *nspitable, 90 + struct hlist_head *nseqtable, 85 91 unsigned int nhashmask) 86 92 { 87 93 struct hlist_node *tmp; ··· 112 106 nhashmask); 113 107 hlist_add_head_rcu(&x->byspi, nspitable + h); 114 108 } 109 + 110 + if (x->km.seq) { 111 + h = __xfrm_seq_hash(x->km.seq, nhashmask); 112 + hlist_add_head_rcu(&x->byseq, nseqtable + h); 113 + } 115 114 } 116 115 } 117 116 ··· 128 117 static void xfrm_hash_resize(struct work_struct *work) 129 118 { 130 119 struct net *net = container_of(work, struct net, xfrm.state_hash_work); 131 - struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi; 120 + struct hlist_head *ndst, *nsrc, *nspi, *nseq, *odst, *osrc, *ospi, *oseq; 132 121 unsigned long nsize, osize; 133 122 unsigned int nhashmask, ohashmask; 134 123 int i; ··· 148 137 xfrm_hash_free(nsrc, nsize); 149 138 return; 150 139 } 140 + nseq = xfrm_hash_alloc(nsize); 141 + if (!nseq) { 142 + xfrm_hash_free(ndst, nsize); 143 + xfrm_hash_free(nsrc, nsize); 144 + xfrm_hash_free(nspi, nsize); 145 + return; 146 + } 151 147 152 148 spin_lock_bh(&net->xfrm.xfrm_state_lock); 153 149 write_seqcount_begin(&net->xfrm.xfrm_state_hash_generation); ··· 162 144 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U; 163 145 odst = xfrm_state_deref_prot(net->xfrm.state_bydst, net); 164 146 for (i = net->xfrm.state_hmask; i >= 0; i--) 165 - xfrm_hash_transfer(odst + i, ndst, nsrc, nspi, nhashmask); 147 + xfrm_hash_transfer(odst + i, ndst, nsrc, nspi, nseq, nhashmask); 166 148 167 149 osrc = xfrm_state_deref_prot(net->xfrm.state_bysrc, net); 168 150 ospi = xfrm_state_deref_prot(net->xfrm.state_byspi, net); 151 + oseq = xfrm_state_deref_prot(net->xfrm.state_byseq, net); 169 152 ohashmask = net->xfrm.state_hmask; 170 153 171 154 rcu_assign_pointer(net->xfrm.state_bydst, ndst); 172 155 rcu_assign_pointer(net->xfrm.state_bysrc, nsrc); 173 156 rcu_assign_pointer(net->xfrm.state_byspi, nspi); 157 + rcu_assign_pointer(net->xfrm.state_byseq, nseq); 174 158 net->xfrm.state_hmask = nhashmask; 175 159 176 160 write_seqcount_end(&net->xfrm.xfrm_state_hash_generation); ··· 185 165 xfrm_hash_free(odst, osize); 186 166 xfrm_hash_free(osrc, osize); 187 167 xfrm_hash_free(ospi, osize); 168 + xfrm_hash_free(oseq, osize); 188 169 } 189 170 190 171 static DEFINE_SPINLOCK(xfrm_state_afinfo_lock); ··· 642 621 INIT_HLIST_NODE(&x->bydst); 643 622 INIT_HLIST_NODE(&x->bysrc); 644 623 INIT_HLIST_NODE(&x->byspi); 624 + INIT_HLIST_NODE(&x->byseq); 645 625 hrtimer_init(&x->mtimer, CLOCK_BOOTTIME, HRTIMER_MODE_ABS_SOFT); 646 626 x->mtimer.function = xfrm_timer_handler; 647 627 timer_setup(&x->rtimer, xfrm_replay_timer_handler, 0); ··· 686 664 list_del(&x->km.all); 687 665 hlist_del_rcu(&x->bydst); 688 666 hlist_del_rcu(&x->bysrc); 667 + if (x->km.seq) 668 + hlist_del_rcu(&x->byseq); 689 669 if (x->id.spi) 690 670 hlist_del_rcu(&x->byspi); 691 671 net->xfrm.state_num--; ··· 1172 1148 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family); 1173 1149 hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h); 1174 1150 } 1151 + if (x->km.seq) { 1152 + h = xfrm_seq_hash(net, x->km.seq); 1153 + hlist_add_head_rcu(&x->byseq, net->xfrm.state_byseq + h); 1154 + } 1175 1155 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires; 1176 1156 hrtimer_start(&x->mtimer, 1177 1157 ktime_set(net->xfrm.sysctl_acq_expires, 0), ··· 1289 1261 x->props.family); 1290 1262 1291 1263 hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h); 1264 + } 1265 + 1266 + if (x->km.seq) { 1267 + h = xfrm_seq_hash(net, x->km.seq); 1268 + 1269 + hlist_add_head_rcu(&x->byseq, net->xfrm.state_byseq + h); 1292 1270 } 1293 1271 1294 1272 hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL_SOFT); ··· 1966 1932 1967 1933 static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq) 1968 1934 { 1969 - int i; 1935 + unsigned int h = xfrm_seq_hash(net, seq); 1936 + struct xfrm_state *x; 1970 1937 1971 - for (i = 0; i <= net->xfrm.state_hmask; i++) { 1972 - struct xfrm_state *x; 1973 - 1974 - hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) { 1975 - if (x->km.seq == seq && 1976 - (mark & x->mark.m) == x->mark.v && 1977 - x->km.state == XFRM_STATE_ACQ) { 1978 - xfrm_state_hold(x); 1979 - return x; 1980 - } 1938 + hlist_for_each_entry_rcu(x, net->xfrm.state_byseq + h, byseq) { 1939 + if (x->km.seq == seq && 1940 + (mark & x->mark.m) == x->mark.v && 1941 + x->km.state == XFRM_STATE_ACQ) { 1942 + xfrm_state_hold(x); 1943 + return x; 1981 1944 } 1982 1945 } 1946 + 1983 1947 return NULL; 1984 1948 } 1985 1949 ··· 2177 2145 2178 2146 if (x->km.state == XFRM_STATE_VALID) { 2179 2147 if (xfrm_aevent_is_on(xs_net(x))) 2180 - x->repl->notify(x, XFRM_REPLAY_TIMEOUT); 2148 + xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT); 2181 2149 else 2182 2150 x->xflags |= XFRM_TIME_DEFER; 2183 2151 } ··· 2692 2660 net->xfrm.state_byspi = xfrm_hash_alloc(sz); 2693 2661 if (!net->xfrm.state_byspi) 2694 2662 goto out_byspi; 2663 + net->xfrm.state_byseq = xfrm_hash_alloc(sz); 2664 + if (!net->xfrm.state_byseq) 2665 + goto out_byseq; 2695 2666 net->xfrm.state_hmask = ((sz / sizeof(struct hlist_head)) - 1); 2696 2667 2697 2668 net->xfrm.state_num = 0; ··· 2704 2669 &net->xfrm.xfrm_state_lock); 2705 2670 return 0; 2706 2671 2672 + out_byseq: 2673 + xfrm_hash_free(net->xfrm.state_byspi, sz); 2707 2674 out_byspi: 2708 2675 xfrm_hash_free(net->xfrm.state_bysrc, sz); 2709 2676 out_bysrc: ··· 2725 2688 WARN_ON(!list_empty(&net->xfrm.state_all)); 2726 2689 2727 2690 sz = (net->xfrm.state_hmask + 1) * sizeof(struct hlist_head); 2691 + WARN_ON(!hlist_empty(net->xfrm.state_byseq)); 2692 + xfrm_hash_free(net->xfrm.state_byseq, sz); 2728 2693 WARN_ON(!hlist_empty(net->xfrm.state_byspi)); 2729 2694 xfrm_hash_free(net->xfrm.state_byspi, sz); 2730 2695 WARN_ON(!hlist_empty(net->xfrm.state_bysrc));