Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'net-next-6.7-followup' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next

Pull more networking updates from Jakub Kicinski:

- Support GRO decapsulation for IPsec ESP in UDP

- Add a handful of MODULE_DESCRIPTION()s

- Drop questionable alignment check in TCP AO to avoid
build issue after changes in the crypto tree

* tag 'net-next-6.7-followup' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next:
net: tcp: remove call to obsolete crypto_ahash_alignmask()
net: fill in MODULE_DESCRIPTION()s under drivers/net/
net: fill in MODULE_DESCRIPTION()s under net/802*
net: fill in MODULE_DESCRIPTION()s under net/core
net: fill in MODULE_DESCRIPTION()s in kuba@'s modules
xfrm: policy: fix layer 4 flowi decoding
xfrm Fix use after free in __xfrm6_udp_encap_rcv.
xfrm: policy: replace session decode with flow dissector
xfrm: move mark and oif flowi decode into common code
xfrm: pass struct net to xfrm_decode_session wrappers
xfrm: Support GRO for IPv6 ESP in UDP encapsulation
xfrm: Support GRO for IPv4 ESP in UDP encapsulation
xfrm: Use the XFRM_GRO to indicate a GRO call on input
xfrm: Annotate struct xfrm_sec_ctx with __counted_by
xfrm: Remove unused function declarations

+362 -248
+1
drivers/net/amt.c
··· 3449 3449 module_exit(amt_fini); 3450 3450 3451 3451 MODULE_LICENSE("GPL"); 3452 + MODULE_DESCRIPTION("Driver for Automatic Multicast Tunneling (AMT)"); 3452 3453 MODULE_AUTHOR("Taehee Yoo <ap420073@gmail.com>"); 3453 3454 MODULE_ALIAS_RTNL_LINK("amt");
+1
drivers/net/dummy.c
··· 202 202 module_init(dummy_init_module); 203 203 module_exit(dummy_cleanup_module); 204 204 MODULE_LICENSE("GPL"); 205 + MODULE_DESCRIPTION("Dummy netdevice driver which discards all packets sent to it"); 205 206 MODULE_ALIAS_RTNL_LINK(DRV_NAME);
+1
drivers/net/eql.c
··· 607 607 608 608 module_init(eql_init_module); 609 609 module_exit(eql_cleanup_module); 610 + MODULE_DESCRIPTION("Equalizer Load-balancer for serial network interfaces"); 610 611 MODULE_LICENSE("GPL");
+1
drivers/net/ifb.c
··· 454 454 module_init(ifb_init_module); 455 455 module_exit(ifb_cleanup_module); 456 456 MODULE_LICENSE("GPL"); 457 + MODULE_DESCRIPTION("Intermediate Functional Block (ifb) netdevice driver for sharing of resources and ingress packet queuing"); 457 458 MODULE_AUTHOR("Jamal Hadi Salim"); 458 459 MODULE_ALIAS_RTNL_LINK("ifb");
+1
drivers/net/macvtap.c
··· 250 250 module_exit(macvtap_exit); 251 251 252 252 MODULE_ALIAS_RTNL_LINK("macvtap"); 253 + MODULE_DESCRIPTION("MAC-VLAN based tap driver"); 253 254 MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>"); 254 255 MODULE_LICENSE("GPL");
+1
drivers/net/netdevsim/netdev.c
··· 470 470 module_init(nsim_module_init); 471 471 module_exit(nsim_module_exit); 472 472 MODULE_LICENSE("GPL"); 473 + MODULE_DESCRIPTION("Simulated networking device for testing"); 473 474 MODULE_ALIAS_RTNL_LINK(DRV_NAME);
+1
drivers/net/sungem_phy.c
··· 1194 1194 } 1195 1195 1196 1196 EXPORT_SYMBOL(sungem_phy_probe); 1197 + MODULE_DESCRIPTION("PHY drivers for the sungem Ethernet MAC driver"); 1197 1198 MODULE_LICENSE("GPL");
+1
drivers/net/tap.c
··· 1399 1399 } 1400 1400 EXPORT_SYMBOL_GPL(tap_destroy_cdev); 1401 1401 1402 + MODULE_DESCRIPTION("Common library for drivers implementing the TAP interface"); 1402 1403 MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>"); 1403 1404 MODULE_AUTHOR("Sainath Grandhi <sainath.grandhi@intel.com>"); 1404 1405 MODULE_LICENSE("GPL");
+1
drivers/net/wireless/mediatek/mt7601u/usb.c
··· 365 365 366 366 MODULE_DEVICE_TABLE(usb, mt7601u_device_table); 367 367 MODULE_FIRMWARE(MT7601U_FIRMWARE); 368 + MODULE_DESCRIPTION("MediaTek MT7601U USB Wireless LAN driver"); 368 369 MODULE_LICENSE("GPL"); 369 370 370 371 static struct usb_driver mt7601u_driver = {
+1 -1
include/net/gro.h
··· 41 41 /* Number of segments aggregated. */ 42 42 u16 count; 43 43 44 - /* Used in ipv6_gro_receive() and foo-over-udp */ 44 + /* Used in ipv6_gro_receive() and foo-over-udp and esp-in-udp */ 45 45 u16 proto; 46 46 47 47 /* Used in napi_gro_cb::free */
+3
include/net/ipv6_stubs.h
··· 60 60 #if IS_ENABLED(CONFIG_XFRM) 61 61 void (*xfrm6_local_rxpmtu)(struct sk_buff *skb, u32 mtu); 62 62 int (*xfrm6_udp_encap_rcv)(struct sock *sk, struct sk_buff *skb); 63 + struct sk_buff *(*xfrm6_gro_udp_encap_rcv)(struct sock *sk, 64 + struct list_head *head, 65 + struct sk_buff *skb); 63 66 int (*xfrm6_rcv_encap)(struct sk_buff *skb, int nexthdr, __be32 spi, 64 67 int encap_type); 65 68 #endif
+10 -8
include/net/xfrm.h
··· 1207 1207 return __xfrm_policy_check2(sk, dir, skb, AF_INET6, 1); 1208 1208 } 1209 1209 1210 - int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, 1210 + int __xfrm_decode_session(struct net *net, struct sk_buff *skb, struct flowi *fl, 1211 1211 unsigned int family, int reverse); 1212 1212 1213 - static inline int xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, 1213 + static inline int xfrm_decode_session(struct net *net, struct sk_buff *skb, struct flowi *fl, 1214 1214 unsigned int family) 1215 1215 { 1216 - return __xfrm_decode_session(skb, fl, family, 0); 1216 + return __xfrm_decode_session(net, skb, fl, family, 0); 1217 1217 } 1218 1218 1219 - static inline int xfrm_decode_session_reverse(struct sk_buff *skb, 1219 + static inline int xfrm_decode_session_reverse(struct net *net, struct sk_buff *skb, 1220 1220 struct flowi *fl, 1221 1221 unsigned int family) 1222 1222 { 1223 - return __xfrm_decode_session(skb, fl, family, 1); 1223 + return __xfrm_decode_session(net, skb, fl, family, 1); 1224 1224 } 1225 1225 1226 1226 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family); ··· 1296 1296 { 1297 1297 return 1; 1298 1298 } 1299 - static inline int xfrm_decode_session_reverse(struct sk_buff *skb, 1299 + static inline int xfrm_decode_session_reverse(struct net *net, struct sk_buff *skb, 1300 1300 struct flowi *fl, 1301 1301 unsigned int family) 1302 1302 { ··· 1669 1669 #endif 1670 1670 1671 1671 void xfrm_local_error(struct sk_buff *skb, int mtu); 1672 - int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb); 1673 1672 int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi, 1674 1673 int encap_type); 1675 1674 int xfrm4_transport_finish(struct sk_buff *skb, int async); ··· 1688 1689 int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family); 1689 1690 int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family); 1690 1691 void xfrm4_local_error(struct sk_buff *skb, u32 mtu); 1691 - int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb); 1692 1692 int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi, 1693 1693 struct ip6_tnl *t); 1694 1694 int xfrm6_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi, ··· 1710 1712 void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu); 1711 1713 int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb); 1712 1714 int xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb); 1715 + struct sk_buff *xfrm4_gro_udp_encap_rcv(struct sock *sk, struct list_head *head, 1716 + struct sk_buff *skb); 1717 + struct sk_buff *xfrm6_gro_udp_encap_rcv(struct sock *sk, struct list_head *head, 1718 + struct sk_buff *skb); 1713 1719 int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval, 1714 1720 int optlen); 1715 1721 #else
+2 -1
include/uapi/linux/xfrm.h
··· 4 4 5 5 #include <linux/in6.h> 6 6 #include <linux/types.h> 7 + #include <linux/stddef.h> 7 8 8 9 /* All of the structures in this file may not change size as they are 9 10 * passed into the kernel from userspace via netlink sockets. ··· 34 33 __u8 ctx_alg; 35 34 __u16 ctx_len; 36 35 __u32 ctx_sid; 37 - char ctx_str[]; 36 + char ctx_str[] __counted_by(ctx_len); 38 37 }; 39 38 40 39 /* Security Context Domains of Interpretation */
+1
net/802/fddi.c
··· 175 175 } 176 176 EXPORT_SYMBOL(alloc_fddidev); 177 177 178 + MODULE_DESCRIPTION("Core routines for FDDI network devices"); 178 179 MODULE_LICENSE("GPL");
+1
net/802/garp.c
··· 21 21 static unsigned int garp_join_time __read_mostly = 200; 22 22 module_param(garp_join_time, uint, 0644); 23 23 MODULE_PARM_DESC(garp_join_time, "Join time in ms (default 200ms)"); 24 + MODULE_DESCRIPTION("IEEE 802.1D Generic Attribute Registration Protocol (GARP)"); 24 25 MODULE_LICENSE("GPL"); 25 26 26 27 static const struct garp_state_trans {
+1
net/802/mrp.c
··· 26 26 module_param(mrp_periodic_time, uint, 0644); 27 27 MODULE_PARM_DESC(mrp_periodic_time, "Periodic time in ms (default 1s)"); 28 28 29 + MODULE_DESCRIPTION("IEEE 802.1Q Multiple Registration Protocol (MRP)"); 29 30 MODULE_LICENSE("GPL"); 30 31 31 32 static const u8
+1
net/802/p8022.c
··· 60 60 EXPORT_SYMBOL(register_8022_client); 61 61 EXPORT_SYMBOL(unregister_8022_client); 62 62 63 + MODULE_DESCRIPTION("Support for 802.2 demultiplexing off Ethernet"); 63 64 MODULE_LICENSE("GPL");
+1
net/802/psnap.c
··· 160 160 kfree(proto); 161 161 } 162 162 163 + MODULE_DESCRIPTION("SNAP data link layer. Derived from 802.2"); 163 164 MODULE_LICENSE("GPL");
+1
net/802/stp.c
··· 98 98 } 99 99 EXPORT_SYMBOL_GPL(stp_proto_unregister); 100 100 101 + MODULE_DESCRIPTION("SAP demux for IEEE 802.1D Spanning Tree Protocol (STP)"); 101 102 MODULE_LICENSE("GPL");
+1
net/8021q/vlan.c
··· 738 738 module_init(vlan_proto_init); 739 739 module_exit(vlan_cleanup_module); 740 740 741 + MODULE_DESCRIPTION("802.1Q/802.1ad VLAN Protocol"); 741 742 MODULE_LICENSE("GPL"); 742 743 MODULE_VERSION(DRV_VERSION);
+1
net/core/dev_addr_lists_test.c
··· 233 233 }; 234 234 kunit_test_suite(dev_addr_test_suite); 235 235 236 + MODULE_DESCRIPTION("KUnit tests for struct netdev_hw_addr_list"); 236 237 MODULE_LICENSE("GPL");
+1
net/core/selftests.c
··· 405 405 } 406 406 EXPORT_SYMBOL_GPL(net_selftest_get_strings); 407 407 408 + MODULE_DESCRIPTION("Common library for generic PHY ethtool selftests"); 408 409 MODULE_LICENSE("GPL v2"); 409 410 MODULE_AUTHOR("Oleksij Rempel <o.rempel@pengutronix.de>");
+5 -1
net/ipv4/esp4_offload.c
··· 33 33 int offset = skb_gro_offset(skb); 34 34 struct xfrm_offload *xo; 35 35 struct xfrm_state *x; 36 + int encap_type = 0; 36 37 __be32 seq; 37 38 __be32 spi; 38 39 ··· 71 70 72 71 xo->flags |= XFRM_GRO; 73 72 73 + if (NAPI_GRO_CB(skb)->proto == IPPROTO_UDP) 74 + encap_type = UDP_ENCAP_ESPINUDP; 75 + 74 76 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL; 75 77 XFRM_SPI_SKB_CB(skb)->family = AF_INET; 76 78 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); ··· 81 77 82 78 /* We don't need to handle errors from xfrm_input, it does all 83 79 * the error handling and frees the resources on error. */ 84 - xfrm_input(skb, IPPROTO_ESP, spi, -2); 80 + xfrm_input(skb, IPPROTO_ESP, spi, encap_type); 85 81 86 82 return ERR_PTR(-EINPROGRESS); 87 83 out_reset:
+1 -1
net/ipv4/icmp.c
··· 517 517 } else 518 518 return rt; 519 519 520 - err = xfrm_decode_session_reverse(skb_in, flowi4_to_flowi(&fl4_dec), AF_INET); 520 + err = xfrm_decode_session_reverse(net, skb_in, flowi4_to_flowi(&fl4_dec), AF_INET); 521 521 if (err) 522 522 goto relookup_failed; 523 523
+2 -2
net/ipv4/ip_vti.c
··· 288 288 switch (skb->protocol) { 289 289 case htons(ETH_P_IP): 290 290 memset(IPCB(skb), 0, sizeof(*IPCB(skb))); 291 - xfrm_decode_session(skb, &fl, AF_INET); 291 + xfrm_decode_session(dev_net(dev), skb, &fl, AF_INET); 292 292 break; 293 293 case htons(ETH_P_IPV6): 294 294 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); 295 - xfrm_decode_session(skb, &fl, AF_INET6); 295 + xfrm_decode_session(dev_net(dev), skb, &fl, AF_INET6); 296 296 break; 297 297 default: 298 298 goto tx_err;
+1 -1
net/ipv4/netfilter.c
··· 62 62 63 63 #ifdef CONFIG_XFRM 64 64 if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) && 65 - xfrm_decode_session(skb, flowi4_to_flowi(&fl4), AF_INET) == 0) { 65 + xfrm_decode_session(net, skb, flowi4_to_flowi(&fl4), AF_INET) == 0) { 66 66 struct dst_entry *dst = skb_dst(skb); 67 67 skb_dst_set(skb, NULL); 68 68 dst = xfrm_lookup(net, dst, flowi4_to_flowi(&fl4), sk, 0);
-6
net/ipv4/tcp_ao.c
··· 1533 1533 goto err_free_pool; 1534 1534 1535 1535 tfm = crypto_ahash_reqtfm(hp.req); 1536 - if (crypto_ahash_alignmask(tfm) > TCP_AO_KEY_ALIGN) { 1537 - err = -EOPNOTSUPP; 1538 - goto err_pool_end; 1539 - } 1540 1536 digest_size = crypto_ahash_digestsize(tfm); 1541 1537 tcp_sigpool_end(&hp); 1542 1538 ··· 1547 1551 key->digest_size = digest_size; 1548 1552 return key; 1549 1553 1550 - err_pool_end: 1551 - tcp_sigpool_end(&hp); 1552 1554 err_free_pool: 1553 1555 tcp_sigpool_release(pool_id); 1554 1556 return ERR_PTR(err);
+16
net/ipv4/udp.c
··· 2630 2630 } 2631 2631 } 2632 2632 2633 + static void set_xfrm_gro_udp_encap_rcv(__u16 encap_type, unsigned short family, 2634 + struct sock *sk) 2635 + { 2636 + #ifdef CONFIG_XFRM 2637 + if (udp_test_bit(GRO_ENABLED, sk) && encap_type == UDP_ENCAP_ESPINUDP) { 2638 + if (family == AF_INET) 2639 + WRITE_ONCE(udp_sk(sk)->gro_receive, xfrm4_gro_udp_encap_rcv); 2640 + else if (IS_ENABLED(CONFIG_IPV6) && family == AF_INET6) 2641 + WRITE_ONCE(udp_sk(sk)->gro_receive, ipv6_stub->xfrm6_gro_udp_encap_rcv); 2642 + } 2643 + #endif 2644 + } 2645 + 2633 2646 /* 2634 2647 * Socket option code for UDP 2635 2648 */ ··· 2692 2679 case 0: 2693 2680 #ifdef CONFIG_XFRM 2694 2681 case UDP_ENCAP_ESPINUDP: 2682 + set_xfrm_gro_udp_encap_rcv(val, sk->sk_family, sk); 2683 + fallthrough; 2695 2684 case UDP_ENCAP_ESPINUDP_NON_IKE: 2696 2685 #if IS_ENABLED(CONFIG_IPV6) 2697 2686 if (sk->sk_family == AF_INET6) ··· 2736 2721 udp_tunnel_encap_enable(sk); 2737 2722 udp_assign_bit(GRO_ENABLED, sk, valbool); 2738 2723 udp_assign_bit(ACCEPT_L4, sk, valbool); 2724 + set_xfrm_gro_udp_encap_rcv(up->encap_type, sk->sk_family, sk); 2739 2725 break; 2740 2726 2741 2727 /*
+77 -18
net/ipv4/xfrm4_input.c
··· 17 17 #include <linux/netfilter_ipv4.h> 18 18 #include <net/ip.h> 19 19 #include <net/xfrm.h> 20 + #include <net/protocol.h> 21 + #include <net/gro.h> 20 22 21 23 static int xfrm4_rcv_encap_finish2(struct net *net, struct sock *sk, 22 24 struct sk_buff *skb) ··· 74 72 return 0; 75 73 } 76 74 77 - /* If it's a keepalive packet, then just eat it. 78 - * If it's an encapsulated packet, then pass it to the 79 - * IPsec xfrm input. 80 - * Returns 0 if skb passed to xfrm or was dropped. 81 - * Returns >0 if skb should be passed to UDP. 82 - * Returns <0 if skb should be resubmitted (-ret is protocol) 83 - */ 84 - int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb) 75 + static int __xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb, bool pull) 85 76 { 86 77 struct udp_sock *up = udp_sk(sk); 87 78 struct udphdr *uh; ··· 105 110 case UDP_ENCAP_ESPINUDP: 106 111 /* Check if this is a keepalive packet. If so, eat it. */ 107 112 if (len == 1 && udpdata[0] == 0xff) { 108 - goto drop; 113 + return -EINVAL; 109 114 } else if (len > sizeof(struct ip_esp_hdr) && udpdata32[0] != 0) { 110 115 /* ESP Packet without Non-ESP header */ 111 116 len = sizeof(struct udphdr); ··· 116 121 case UDP_ENCAP_ESPINUDP_NON_IKE: 117 122 /* Check if this is a keepalive packet. If so, eat it. */ 118 123 if (len == 1 && udpdata[0] == 0xff) { 119 - goto drop; 124 + return -EINVAL; 120 125 } else if (len > 2 * sizeof(u32) + sizeof(struct ip_esp_hdr) && 121 126 udpdata32[0] == 0 && udpdata32[1] == 0) { 122 127 ··· 134 139 * protocol to ESP, and then call into the transform receiver. 135 140 */ 136 141 if (skb_unclone(skb, GFP_ATOMIC)) 137 - goto drop; 142 + return -EINVAL; 138 143 139 144 /* Now we can update and verify the packet length... */ 140 145 iph = ip_hdr(skb); ··· 142 147 iph->tot_len = htons(ntohs(iph->tot_len) - len); 143 148 if (skb->len < iphlen + len) { 144 149 /* packet is too small!?! */ 145 - goto drop; 150 + return -EINVAL; 146 151 } 147 152 148 153 /* pull the data buffer up to the ESP header and set the 149 154 * transport header to point to ESP. Keep UDP on the stack 150 155 * for later. 151 156 */ 152 - __skb_pull(skb, len); 153 - skb_reset_transport_header(skb); 157 + if (pull) { 158 + __skb_pull(skb, len); 159 + skb_reset_transport_header(skb); 160 + } else { 161 + skb_set_transport_header(skb, len); 162 + } 154 163 155 164 /* process ESP */ 156 - return xfrm4_rcv_encap(skb, IPPROTO_ESP, 0, encap_type); 157 - 158 - drop: 159 - kfree_skb(skb); 160 165 return 0; 161 166 } 167 + 168 + /* If it's a keepalive packet, then just eat it. 169 + * If it's an encapsulated packet, then pass it to the 170 + * IPsec xfrm input. 171 + * Returns 0 if skb passed to xfrm or was dropped. 172 + * Returns >0 if skb should be passed to UDP. 173 + * Returns <0 if skb should be resubmitted (-ret is protocol) 174 + */ 175 + int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb) 176 + { 177 + int ret; 178 + 179 + ret = __xfrm4_udp_encap_rcv(sk, skb, true); 180 + if (!ret) 181 + return xfrm4_rcv_encap(skb, IPPROTO_ESP, 0, 182 + udp_sk(sk)->encap_type); 183 + 184 + if (ret < 0) { 185 + kfree_skb(skb); 186 + return 0; 187 + } 188 + 189 + return ret; 190 + } 162 191 EXPORT_SYMBOL(xfrm4_udp_encap_rcv); 192 + 193 + struct sk_buff *xfrm4_gro_udp_encap_rcv(struct sock *sk, struct list_head *head, 194 + struct sk_buff *skb) 195 + { 196 + int offset = skb_gro_offset(skb); 197 + const struct net_offload *ops; 198 + struct sk_buff *pp = NULL; 199 + int ret; 200 + 201 + offset = offset - sizeof(struct udphdr); 202 + 203 + if (!pskb_pull(skb, offset)) 204 + return NULL; 205 + 206 + rcu_read_lock(); 207 + ops = rcu_dereference(inet_offloads[IPPROTO_ESP]); 208 + if (!ops || !ops->callbacks.gro_receive) 209 + goto out; 210 + 211 + ret = __xfrm4_udp_encap_rcv(sk, skb, false); 212 + if (ret) 213 + goto out; 214 + 215 + skb_push(skb, offset); 216 + NAPI_GRO_CB(skb)->proto = IPPROTO_UDP; 217 + 218 + pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); 219 + rcu_read_unlock(); 220 + 221 + return pp; 222 + 223 + out: 224 + rcu_read_unlock(); 225 + skb_push(skb, offset); 226 + NAPI_GRO_CB(skb)->same_flow = 0; 227 + NAPI_GRO_CB(skb)->flush = 1; 228 + 229 + return NULL; 230 + } 231 + EXPORT_SYMBOL(xfrm4_gro_udp_encap_rcv); 163 232 164 233 int xfrm4_rcv(struct sk_buff *skb) 165 234 {
+1
net/ipv6/af_inet6.c
··· 1050 1050 #if IS_ENABLED(CONFIG_XFRM) 1051 1051 .xfrm6_local_rxpmtu = xfrm6_local_rxpmtu, 1052 1052 .xfrm6_udp_encap_rcv = xfrm6_udp_encap_rcv, 1053 + .xfrm6_gro_udp_encap_rcv = xfrm6_gro_udp_encap_rcv, 1053 1054 .xfrm6_rcv_encap = xfrm6_rcv_encap, 1054 1055 #endif 1055 1056 .nd_tbl = &nd_tbl,
+8 -2
net/ipv6/esp6_offload.c
··· 34 34 int off = sizeof(struct ipv6hdr); 35 35 struct ipv6_opt_hdr *exthdr; 36 36 37 - if (likely(ipv6_hdr->nexthdr == NEXTHDR_ESP)) 37 + /* ESP or ESPINUDP */ 38 + if (likely(ipv6_hdr->nexthdr == NEXTHDR_ESP || 39 + ipv6_hdr->nexthdr == NEXTHDR_UDP)) 38 40 return offsetof(struct ipv6hdr, nexthdr); 39 41 40 42 while (off < nhlen) { ··· 56 54 int offset = skb_gro_offset(skb); 57 55 struct xfrm_offload *xo; 58 56 struct xfrm_state *x; 57 + int encap_type = 0; 59 58 __be32 seq; 60 59 __be32 spi; 61 60 int nhoff; 61 + 62 + if (NAPI_GRO_CB(skb)->proto == IPPROTO_UDP) 63 + encap_type = UDP_ENCAP_ESPINUDP; 62 64 63 65 if (!pskb_pull(skb, offset)) 64 66 return NULL; ··· 110 104 111 105 /* We don't need to handle errors from xfrm_input, it does all 112 106 * the error handling and frees the resources on error. */ 113 - xfrm_input(skb, IPPROTO_ESP, spi, -2); 107 + xfrm_input(skb, IPPROTO_ESP, spi, encap_type); 114 108 115 109 return ERR_PTR(-EINPROGRESS); 116 110 out_reset:
+1 -1
net/ipv6/icmp.c
··· 385 385 return dst; 386 386 } 387 387 388 - err = xfrm_decode_session_reverse(skb, flowi6_to_flowi(&fl2), AF_INET6); 388 + err = xfrm_decode_session_reverse(net, skb, flowi6_to_flowi(&fl2), AF_INET6); 389 389 if (err) 390 390 goto relookup_failed; 391 391
+2 -2
net/ipv6/ip6_vti.c
··· 569 569 goto tx_err; 570 570 571 571 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); 572 - xfrm_decode_session(skb, &fl, AF_INET6); 572 + xfrm_decode_session(dev_net(dev), skb, &fl, AF_INET6); 573 573 break; 574 574 case htons(ETH_P_IP): 575 575 memset(IPCB(skb), 0, sizeof(*IPCB(skb))); 576 - xfrm_decode_session(skb, &fl, AF_INET); 576 + xfrm_decode_session(dev_net(dev), skb, &fl, AF_INET); 577 577 break; 578 578 default: 579 579 goto tx_err;
+1 -1
net/ipv6/netfilter.c
··· 61 61 62 62 #ifdef CONFIG_XFRM 63 63 if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && 64 - xfrm_decode_session(skb, flowi6_to_flowi(&fl6), AF_INET6) == 0) { 64 + xfrm_decode_session(net, skb, flowi6_to_flowi(&fl6), AF_INET6) == 0) { 65 65 skb_dst_set(skb, NULL); 66 66 dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0); 67 67 if (IS_ERR(dst))
+82 -21
net/ipv6/xfrm6_input.c
··· 16 16 #include <linux/netfilter_ipv6.h> 17 17 #include <net/ipv6.h> 18 18 #include <net/xfrm.h> 19 + #include <net/protocol.h> 20 + #include <net/gro.h> 19 21 20 22 int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi, 21 23 struct ip6_tnl *t) ··· 69 67 return 0; 70 68 } 71 69 72 - /* If it's a keepalive packet, then just eat it. 73 - * If it's an encapsulated packet, then pass it to the 74 - * IPsec xfrm input. 75 - * Returns 0 if skb passed to xfrm or was dropped. 76 - * Returns >0 if skb should be passed to UDP. 77 - * Returns <0 if skb should be resubmitted (-ret is protocol) 78 - */ 79 - int xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb) 70 + static int __xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb, bool pull) 80 71 { 81 72 struct udp_sock *up = udp_sk(sk); 82 73 struct udphdr *uh; ··· 79 84 __u8 *udpdata; 80 85 __be32 *udpdata32; 81 86 u16 encap_type; 82 - 83 - if (skb->protocol == htons(ETH_P_IP)) 84 - return xfrm4_udp_encap_rcv(sk, skb); 85 87 86 88 encap_type = READ_ONCE(up->encap_type); 87 89 /* if this is not encapsulated socket, then just return now */ ··· 101 109 case UDP_ENCAP_ESPINUDP: 102 110 /* Check if this is a keepalive packet. If so, eat it. */ 103 111 if (len == 1 && udpdata[0] == 0xff) { 104 - goto drop; 112 + return -EINVAL; 105 113 } else if (len > sizeof(struct ip_esp_hdr) && udpdata32[0] != 0) { 106 114 /* ESP Packet without Non-ESP header */ 107 115 len = sizeof(struct udphdr); ··· 112 120 case UDP_ENCAP_ESPINUDP_NON_IKE: 113 121 /* Check if this is a keepalive packet. If so, eat it. */ 114 122 if (len == 1 && udpdata[0] == 0xff) { 115 - goto drop; 123 + return -EINVAL; 116 124 } else if (len > 2 * sizeof(u32) + sizeof(struct ip_esp_hdr) && 117 125 udpdata32[0] == 0 && udpdata32[1] == 0) { 118 126 ··· 130 138 * protocol to ESP, and then call into the transform receiver. 131 139 */ 132 140 if (skb_unclone(skb, GFP_ATOMIC)) 133 - goto drop; 141 + return -EINVAL; 134 142 135 143 /* Now we can update and verify the packet length... */ 136 144 ip6h = ipv6_hdr(skb); 137 145 ip6h->payload_len = htons(ntohs(ip6h->payload_len) - len); 138 146 if (skb->len < ip6hlen + len) { 139 147 /* packet is too small!?! */ 140 - goto drop; 148 + return -EINVAL; 141 149 } 142 150 143 151 /* pull the data buffer up to the ESP header and set the 144 152 * transport header to point to ESP. Keep UDP on the stack 145 153 * for later. 146 154 */ 147 - __skb_pull(skb, len); 148 - skb_reset_transport_header(skb); 155 + if (pull) { 156 + __skb_pull(skb, len); 157 + skb_reset_transport_header(skb); 158 + } else { 159 + skb_set_transport_header(skb, len); 160 + } 149 161 150 162 /* process ESP */ 151 - return xfrm6_rcv_encap(skb, IPPROTO_ESP, 0, encap_type); 152 - 153 - drop: 154 - kfree_skb(skb); 155 163 return 0; 164 + } 165 + 166 + /* If it's a keepalive packet, then just eat it. 167 + * If it's an encapsulated packet, then pass it to the 168 + * IPsec xfrm input. 169 + * Returns 0 if skb passed to xfrm or was dropped. 170 + * Returns >0 if skb should be passed to UDP. 171 + * Returns <0 if skb should be resubmitted (-ret is protocol) 172 + */ 173 + int xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb) 174 + { 175 + int ret; 176 + 177 + if (skb->protocol == htons(ETH_P_IP)) 178 + return xfrm4_udp_encap_rcv(sk, skb); 179 + 180 + ret = __xfrm6_udp_encap_rcv(sk, skb, true); 181 + if (!ret) 182 + return xfrm6_rcv_encap(skb, IPPROTO_ESP, 0, 183 + udp_sk(sk)->encap_type); 184 + 185 + if (ret < 0) { 186 + kfree_skb(skb); 187 + return 0; 188 + } 189 + 190 + return ret; 191 + } 192 + 193 + struct sk_buff *xfrm6_gro_udp_encap_rcv(struct sock *sk, struct list_head *head, 194 + struct sk_buff *skb) 195 + { 196 + int offset = skb_gro_offset(skb); 197 + const struct net_offload *ops; 198 + struct sk_buff *pp = NULL; 199 + int ret; 200 + 201 + if (skb->protocol == htons(ETH_P_IP)) 202 + return xfrm4_gro_udp_encap_rcv(sk, head, skb); 203 + 204 + offset = offset - sizeof(struct udphdr); 205 + 206 + if (!pskb_pull(skb, offset)) 207 + return NULL; 208 + 209 + rcu_read_lock(); 210 + ops = rcu_dereference(inet6_offloads[IPPROTO_ESP]); 211 + if (!ops || !ops->callbacks.gro_receive) 212 + goto out; 213 + 214 + ret = __xfrm6_udp_encap_rcv(sk, skb, false); 215 + if (ret) 216 + goto out; 217 + 218 + skb_push(skb, offset); 219 + NAPI_GRO_CB(skb)->proto = IPPROTO_UDP; 220 + 221 + pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); 222 + rcu_read_unlock(); 223 + 224 + return pp; 225 + 226 + out: 227 + rcu_read_unlock(); 228 + skb_push(skb, offset); 229 + NAPI_GRO_CB(skb)->same_flow = 0; 230 + NAPI_GRO_CB(skb)->flush = 1; 231 + 232 + return NULL; 156 233 } 157 234 158 235 int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t)
+1 -1
net/netfilter/nf_nat_proto.c
··· 668 668 struct flowi fl; 669 669 int err; 670 670 671 - err = xfrm_decode_session(skb, &fl, family); 671 + err = xfrm_decode_session(net, skb, &fl, family); 672 672 if (err < 0) 673 673 return err; 674 674
+2 -4
net/xfrm/xfrm_input.c
··· 462 462 struct xfrm_offload *xo = xfrm_offload(skb); 463 463 struct sec_path *sp; 464 464 465 - if (encap_type < 0) { 465 + if (encap_type < 0 || (xo && xo->flags & XFRM_GRO)) { 466 466 x = xfrm_input_state(skb); 467 467 468 468 if (unlikely(x->km.state != XFRM_STATE_VALID)) { ··· 485 485 seq = XFRM_SKB_CB(skb)->seq.input.low; 486 486 goto resume; 487 487 } 488 - 489 - /* encap_type < -1 indicates a GRO call. */ 490 - encap_type = 0; 488 + /* GRO call */ 491 489 seq = XFRM_SPI_SKB_CB(skb)->seq; 492 490 493 491 if (xo && (xo->flags & CRYPTO_DONE)) {
+2 -2
net/xfrm/xfrm_interface_core.c
··· 536 536 switch (skb->protocol) { 537 537 case htons(ETH_P_IPV6): 538 538 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); 539 - xfrm_decode_session(skb, &fl, AF_INET6); 539 + xfrm_decode_session(dev_net(dev), skb, &fl, AF_INET6); 540 540 if (!dst) { 541 541 fl.u.ip6.flowi6_oif = dev->ifindex; 542 542 fl.u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC; ··· 551 551 break; 552 552 case htons(ETH_P_IP): 553 553 memset(IPCB(skb), 0, sizeof(*IPCB(skb))); 554 - xfrm_decode_session(skb, &fl, AF_INET); 554 + xfrm_decode_session(dev_net(dev), skb, &fl, AF_INET); 555 555 if (!dst) { 556 556 struct rtable *rt; 557 557
+126 -175
net/xfrm/xfrm_policy.c
··· 149 149 struct hlist_head *res[XFRM_POL_CAND_MAX]; 150 150 }; 151 151 152 + struct xfrm_flow_keys { 153 + struct flow_dissector_key_basic basic; 154 + struct flow_dissector_key_control control; 155 + union { 156 + struct flow_dissector_key_ipv4_addrs ipv4; 157 + struct flow_dissector_key_ipv6_addrs ipv6; 158 + } addrs; 159 + struct flow_dissector_key_ip ip; 160 + struct flow_dissector_key_icmp icmp; 161 + struct flow_dissector_key_ports ports; 162 + struct flow_dissector_key_keyid gre; 163 + }; 164 + 165 + static struct flow_dissector xfrm_session_dissector __ro_after_init; 166 + 152 167 static DEFINE_SPINLOCK(xfrm_if_cb_lock); 153 168 static struct xfrm_if_cb const __rcu *xfrm_if_cb __read_mostly; 154 169 ··· 2873 2858 /* Fixup the mark to support VTI. */ 2874 2859 skb_mark = skb->mark; 2875 2860 skb->mark = pol->mark.v; 2876 - xfrm_decode_session(skb, &fl, dst->ops->family); 2861 + xfrm_decode_session(net, skb, &fl, dst->ops->family); 2877 2862 skb->mark = skb_mark; 2878 2863 spin_unlock(&pq->hold_queue.lock); 2879 2864 ··· 2909 2894 /* Fixup the mark to support VTI. */ 2910 2895 skb_mark = skb->mark; 2911 2896 skb->mark = pol->mark.v; 2912 - xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family); 2897 + xfrm_decode_session(net, skb, &fl, skb_dst(skb)->ops->family); 2913 2898 skb->mark = skb_mark; 2914 2899 2915 2900 dst_hold(xfrm_dst_path(skb_dst(skb))); ··· 3387 3372 } 3388 3373 3389 3374 static void 3390 - decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse) 3375 + decode_session4(const struct xfrm_flow_keys *flkeys, struct flowi *fl, bool reverse) 3391 3376 { 3392 - const struct iphdr *iph = ip_hdr(skb); 3393 - int ihl = iph->ihl; 3394 - u8 *xprth = skb_network_header(skb) + ihl * 4; 3395 3377 struct flowi4 *fl4 = &fl->u.ip4; 3396 - int oif = 0; 3397 - 3398 - if (skb_dst(skb) && skb_dst(skb)->dev) 3399 - oif = skb_dst(skb)->dev->ifindex; 3400 3378 3401 3379 memset(fl4, 0, sizeof(struct flowi4)); 3402 - fl4->flowi4_mark = skb->mark; 3403 - fl4->flowi4_oif = reverse ? skb->skb_iif : oif; 3404 3380 3405 - fl4->flowi4_proto = iph->protocol; 3406 - fl4->daddr = reverse ? iph->saddr : iph->daddr; 3407 - fl4->saddr = reverse ? iph->daddr : iph->saddr; 3408 - fl4->flowi4_tos = iph->tos & ~INET_ECN_MASK; 3409 - 3410 - if (!ip_is_fragment(iph)) { 3411 - switch (iph->protocol) { 3412 - case IPPROTO_UDP: 3413 - case IPPROTO_UDPLITE: 3414 - case IPPROTO_TCP: 3415 - case IPPROTO_SCTP: 3416 - case IPPROTO_DCCP: 3417 - if (xprth + 4 < skb->data || 3418 - pskb_may_pull(skb, xprth + 4 - skb->data)) { 3419 - __be16 *ports; 3420 - 3421 - xprth = skb_network_header(skb) + ihl * 4; 3422 - ports = (__be16 *)xprth; 3423 - 3424 - fl4->fl4_sport = ports[!!reverse]; 3425 - fl4->fl4_dport = ports[!reverse]; 3426 - } 3427 - break; 3428 - case IPPROTO_ICMP: 3429 - if (xprth + 2 < skb->data || 3430 - pskb_may_pull(skb, xprth + 2 - skb->data)) { 3431 - u8 *icmp; 3432 - 3433 - xprth = skb_network_header(skb) + ihl * 4; 3434 - icmp = xprth; 3435 - 3436 - fl4->fl4_icmp_type = icmp[0]; 3437 - fl4->fl4_icmp_code = icmp[1]; 3438 - } 3439 - break; 3440 - case IPPROTO_GRE: 3441 - if (xprth + 12 < skb->data || 3442 - pskb_may_pull(skb, xprth + 12 - skb->data)) { 3443 - __be16 *greflags; 3444 - __be32 *gre_hdr; 3445 - 3446 - xprth = skb_network_header(skb) + ihl * 4; 3447 - greflags = (__be16 *)xprth; 3448 - gre_hdr = (__be32 *)xprth; 3449 - 3450 - if (greflags[0] & GRE_KEY) { 3451 - if (greflags[0] & GRE_CSUM) 3452 - gre_hdr++; 3453 - fl4->fl4_gre_key = gre_hdr[1]; 3454 - } 3455 - } 3456 - break; 3457 - default: 3458 - break; 3459 - } 3381 + if (reverse) { 3382 + fl4->saddr = flkeys->addrs.ipv4.dst; 3383 + fl4->daddr = flkeys->addrs.ipv4.src; 3384 + fl4->fl4_sport = flkeys->ports.dst; 3385 + fl4->fl4_dport = flkeys->ports.src; 3386 + } else { 3387 + fl4->saddr = flkeys->addrs.ipv4.src; 3388 + fl4->daddr = flkeys->addrs.ipv4.dst; 3389 + fl4->fl4_sport = flkeys->ports.src; 3390 + fl4->fl4_dport = flkeys->ports.dst; 3460 3391 } 3392 + 3393 + switch (flkeys->basic.ip_proto) { 3394 + case IPPROTO_GRE: 3395 + fl4->fl4_gre_key = flkeys->gre.keyid; 3396 + break; 3397 + case IPPROTO_ICMP: 3398 + fl4->fl4_icmp_type = flkeys->icmp.type; 3399 + fl4->fl4_icmp_code = flkeys->icmp.code; 3400 + break; 3401 + } 3402 + 3403 + fl4->flowi4_proto = flkeys->basic.ip_proto; 3404 + fl4->flowi4_tos = flkeys->ip.tos; 3461 3405 } 3462 3406 3463 3407 #if IS_ENABLED(CONFIG_IPV6) 3464 3408 static void 3465 - decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse) 3409 + decode_session6(const struct xfrm_flow_keys *flkeys, struct flowi *fl, bool reverse) 3466 3410 { 3467 3411 struct flowi6 *fl6 = &fl->u.ip6; 3468 - int onlyproto = 0; 3469 - const struct ipv6hdr *hdr = ipv6_hdr(skb); 3470 - u32 offset = sizeof(*hdr); 3471 - struct ipv6_opt_hdr *exthdr; 3472 - const unsigned char *nh = skb_network_header(skb); 3473 - u16 nhoff = IP6CB(skb)->nhoff; 3474 - int oif = 0; 3475 - u8 nexthdr; 3476 - 3477 - if (!nhoff) 3478 - nhoff = offsetof(struct ipv6hdr, nexthdr); 3479 - 3480 - nexthdr = nh[nhoff]; 3481 - 3482 - if (skb_dst(skb) && skb_dst(skb)->dev) 3483 - oif = skb_dst(skb)->dev->ifindex; 3484 3412 3485 3413 memset(fl6, 0, sizeof(struct flowi6)); 3486 - fl6->flowi6_mark = skb->mark; 3487 - fl6->flowi6_oif = reverse ? skb->skb_iif : oif; 3488 3414 3489 - fl6->daddr = reverse ? hdr->saddr : hdr->daddr; 3490 - fl6->saddr = reverse ? hdr->daddr : hdr->saddr; 3491 - 3492 - while (nh + offset + sizeof(*exthdr) < skb->data || 3493 - pskb_may_pull(skb, nh + offset + sizeof(*exthdr) - skb->data)) { 3494 - nh = skb_network_header(skb); 3495 - exthdr = (struct ipv6_opt_hdr *)(nh + offset); 3496 - 3497 - switch (nexthdr) { 3498 - case NEXTHDR_FRAGMENT: 3499 - onlyproto = 1; 3500 - fallthrough; 3501 - case NEXTHDR_ROUTING: 3502 - case NEXTHDR_HOP: 3503 - case NEXTHDR_DEST: 3504 - offset += ipv6_optlen(exthdr); 3505 - nexthdr = exthdr->nexthdr; 3506 - break; 3507 - case IPPROTO_UDP: 3508 - case IPPROTO_UDPLITE: 3509 - case IPPROTO_TCP: 3510 - case IPPROTO_SCTP: 3511 - case IPPROTO_DCCP: 3512 - if (!onlyproto && (nh + offset + 4 < skb->data || 3513 - pskb_may_pull(skb, nh + offset + 4 - skb->data))) { 3514 - __be16 *ports; 3515 - 3516 - nh = skb_network_header(skb); 3517 - ports = (__be16 *)(nh + offset); 3518 - fl6->fl6_sport = ports[!!reverse]; 3519 - fl6->fl6_dport = ports[!reverse]; 3520 - } 3521 - fl6->flowi6_proto = nexthdr; 3522 - return; 3523 - case IPPROTO_ICMPV6: 3524 - if (!onlyproto && (nh + offset + 2 < skb->data || 3525 - pskb_may_pull(skb, nh + offset + 2 - skb->data))) { 3526 - u8 *icmp; 3527 - 3528 - nh = skb_network_header(skb); 3529 - icmp = (u8 *)(nh + offset); 3530 - fl6->fl6_icmp_type = icmp[0]; 3531 - fl6->fl6_icmp_code = icmp[1]; 3532 - } 3533 - fl6->flowi6_proto = nexthdr; 3534 - return; 3535 - case IPPROTO_GRE: 3536 - if (!onlyproto && 3537 - (nh + offset + 12 < skb->data || 3538 - pskb_may_pull(skb, nh + offset + 12 - skb->data))) { 3539 - struct gre_base_hdr *gre_hdr; 3540 - __be32 *gre_key; 3541 - 3542 - nh = skb_network_header(skb); 3543 - gre_hdr = (struct gre_base_hdr *)(nh + offset); 3544 - gre_key = (__be32 *)(gre_hdr + 1); 3545 - 3546 - if (gre_hdr->flags & GRE_KEY) { 3547 - if (gre_hdr->flags & GRE_CSUM) 3548 - gre_key++; 3549 - fl6->fl6_gre_key = *gre_key; 3550 - } 3551 - } 3552 - fl6->flowi6_proto = nexthdr; 3553 - return; 3554 - 3555 - #if IS_ENABLED(CONFIG_IPV6_MIP6) 3556 - case IPPROTO_MH: 3557 - offset += ipv6_optlen(exthdr); 3558 - if (!onlyproto && (nh + offset + 3 < skb->data || 3559 - pskb_may_pull(skb, nh + offset + 3 - skb->data))) { 3560 - struct ip6_mh *mh; 3561 - 3562 - nh = skb_network_header(skb); 3563 - mh = (struct ip6_mh *)(nh + offset); 3564 - fl6->fl6_mh_type = mh->ip6mh_type; 3565 - } 3566 - fl6->flowi6_proto = nexthdr; 3567 - return; 3568 - #endif 3569 - default: 3570 - fl6->flowi6_proto = nexthdr; 3571 - return; 3572 - } 3415 + if (reverse) { 3416 + fl6->saddr = flkeys->addrs.ipv6.dst; 3417 + fl6->daddr = flkeys->addrs.ipv6.src; 3418 + fl6->fl6_sport = flkeys->ports.dst; 3419 + fl6->fl6_dport = flkeys->ports.src; 3420 + } else { 3421 + fl6->saddr = flkeys->addrs.ipv6.src; 3422 + fl6->daddr = flkeys->addrs.ipv6.dst; 3423 + fl6->fl6_sport = flkeys->ports.src; 3424 + fl6->fl6_dport = flkeys->ports.dst; 3573 3425 } 3426 + 3427 + switch (flkeys->basic.ip_proto) { 3428 + case IPPROTO_GRE: 3429 + fl6->fl6_gre_key = flkeys->gre.keyid; 3430 + break; 3431 + case IPPROTO_ICMPV6: 3432 + fl6->fl6_icmp_type = flkeys->icmp.type; 3433 + fl6->fl6_icmp_code = flkeys->icmp.code; 3434 + break; 3435 + } 3436 + 3437 + fl6->flowi6_proto = flkeys->basic.ip_proto; 3574 3438 } 3575 3439 #endif 3576 3440 3577 - int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, 3441 + int __xfrm_decode_session(struct net *net, struct sk_buff *skb, struct flowi *fl, 3578 3442 unsigned int family, int reverse) 3579 3443 { 3444 + struct xfrm_flow_keys flkeys; 3445 + 3446 + memset(&flkeys, 0, sizeof(flkeys)); 3447 + __skb_flow_dissect(net, skb, &xfrm_session_dissector, &flkeys, 3448 + NULL, 0, 0, 0, FLOW_DISSECTOR_F_STOP_AT_ENCAP); 3449 + 3580 3450 switch (family) { 3581 3451 case AF_INET: 3582 - decode_session4(skb, fl, reverse); 3452 + decode_session4(&flkeys, fl, reverse); 3583 3453 break; 3584 3454 #if IS_ENABLED(CONFIG_IPV6) 3585 3455 case AF_INET6: 3586 - decode_session6(skb, fl, reverse); 3456 + decode_session6(&flkeys, fl, reverse); 3587 3457 break; 3588 3458 #endif 3589 3459 default: 3590 3460 return -EAFNOSUPPORT; 3461 + } 3462 + 3463 + fl->flowi_mark = skb->mark; 3464 + if (reverse) { 3465 + fl->flowi_oif = skb->skb_iif; 3466 + } else { 3467 + int oif = 0; 3468 + 3469 + if (skb_dst(skb) && skb_dst(skb)->dev) 3470 + oif = skb_dst(skb)->dev->ifindex; 3471 + 3472 + fl->flowi_oif = oif; 3591 3473 } 3592 3474 3593 3475 return security_xfrm_decode_session(skb, &fl->flowi_secid); ··· 3535 3623 reverse = dir & ~XFRM_POLICY_MASK; 3536 3624 dir &= XFRM_POLICY_MASK; 3537 3625 3538 - if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) { 3626 + if (__xfrm_decode_session(net, skb, &fl, family, reverse) < 0) { 3539 3627 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); 3540 3628 return 0; 3541 3629 } ··· 3691 3779 struct dst_entry *dst; 3692 3780 int res = 1; 3693 3781 3694 - if (xfrm_decode_session(skb, &fl, family) < 0) { 3782 + if (xfrm_decode_session(net, skb, &fl, family) < 0) { 3695 3783 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR); 3696 3784 return 0; 3697 3785 } ··· 4170 4258 .exit = xfrm_net_exit, 4171 4259 }; 4172 4260 4261 + static const struct flow_dissector_key xfrm_flow_dissector_keys[] = { 4262 + { 4263 + .key_id = FLOW_DISSECTOR_KEY_CONTROL, 4264 + .offset = offsetof(struct xfrm_flow_keys, control), 4265 + }, 4266 + { 4267 + .key_id = FLOW_DISSECTOR_KEY_BASIC, 4268 + .offset = offsetof(struct xfrm_flow_keys, basic), 4269 + }, 4270 + { 4271 + .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS, 4272 + .offset = offsetof(struct xfrm_flow_keys, addrs.ipv4), 4273 + }, 4274 + { 4275 + .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS, 4276 + .offset = offsetof(struct xfrm_flow_keys, addrs.ipv6), 4277 + }, 4278 + { 4279 + .key_id = FLOW_DISSECTOR_KEY_PORTS, 4280 + .offset = offsetof(struct xfrm_flow_keys, ports), 4281 + }, 4282 + { 4283 + .key_id = FLOW_DISSECTOR_KEY_GRE_KEYID, 4284 + .offset = offsetof(struct xfrm_flow_keys, gre), 4285 + }, 4286 + { 4287 + .key_id = FLOW_DISSECTOR_KEY_IP, 4288 + .offset = offsetof(struct xfrm_flow_keys, ip), 4289 + }, 4290 + { 4291 + .key_id = FLOW_DISSECTOR_KEY_ICMP, 4292 + .offset = offsetof(struct xfrm_flow_keys, icmp), 4293 + }, 4294 + }; 4295 + 4173 4296 void __init xfrm_init(void) 4174 4297 { 4298 + skb_flow_dissector_init(&xfrm_session_dissector, 4299 + xfrm_flow_dissector_keys, 4300 + ARRAY_SIZE(xfrm_flow_dissector_keys)); 4301 + 4175 4302 register_pernet_subsys(&xfrm_net_ops); 4176 4303 xfrm_dev_init(); 4177 4304 xfrm_input_init();