Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next

Pablo Neira Ayuso says:

====================
Netfilter/IPVS updates for net-next

The following patchset contains Netfilter/IPVS updates for you net-next
tree:

1) Missing NFTA_RULE_POSITION_ID netlink attribute validation,
from Phil Sutter.

2) Restrict matching on tunnel metadata to rx/tx path, from wenxu.

3) Avoid indirect calls for IPV6=y, from Florian Westphal.

4) Add two indirections to prepare merger of IPV4 and IPV6 nat
modules, from Florian Westphal.

5) Broken indentation in ctnetlink, from Colin Ian King.

6) Patches to use struct_size() from netfilter and IPVS,
from Gustavo A. R. Silva.

7) Display kernel splat only once in case of racing to confirm
conntrack from bridge plus nfqueue setups, from Chieh-Min Wang.

8) Skip checksum validation for layer 4 protocols that don't need it,
patch from Alin Nastac.

9) Sparse warning due to symbol that should be static in CLUSTERIP,
from Wei Yongjun.

10) Add new toggle to disable SDP payload translation when media
endpoint is reachable though the same interface as the signalling
peer, from Alin Nastac.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+244 -63
+52 -8
include/linux/netfilter_ipv6.h
··· 25 25 * if IPv6 is a module. 26 26 */ 27 27 struct nf_ipv6_ops { 28 + #if IS_MODULE(CONFIG_IPV6) 28 29 int (*chk_addr)(struct net *net, const struct in6_addr *addr, 29 30 const struct net_device *dev, int strict); 31 + int (*route_me_harder)(struct net *net, struct sk_buff *skb); 32 + int (*dev_get_saddr)(struct net *net, const struct net_device *dev, 33 + const struct in6_addr *daddr, unsigned int srcprefs, 34 + struct in6_addr *saddr); 35 + int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl, 36 + bool strict); 37 + #endif 30 38 void (*route_input)(struct sk_buff *skb); 31 39 int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb, 32 40 int (*output)(struct net *, struct sock *, struct sk_buff *)); 33 - int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl, 34 - bool strict); 35 41 int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry); 36 42 }; 37 43 38 44 #ifdef CONFIG_NETFILTER 39 - int ip6_route_me_harder(struct net *net, struct sk_buff *skb); 40 - __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, 41 - unsigned int dataoff, u_int8_t protocol); 42 - 43 - int ipv6_netfilter_init(void); 44 - void ipv6_netfilter_fini(void); 45 + #include <net/addrconf.h> 45 46 46 47 extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops; 47 48 static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void) 48 49 { 49 50 return rcu_dereference(nf_ipv6_ops); 50 51 } 52 + 53 + static inline int nf_ipv6_chk_addr(struct net *net, const struct in6_addr *addr, 54 + const struct net_device *dev, int strict) 55 + { 56 + #if IS_MODULE(CONFIG_IPV6) 57 + const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); 58 + 59 + if (!v6_ops) 60 + return 1; 61 + 62 + return v6_ops->chk_addr(net, addr, dev, strict); 63 + #else 64 + return ipv6_chk_addr(net, addr, dev, strict); 65 + #endif 66 + } 67 + 68 + int __nf_ip6_route(struct net *net, struct dst_entry **dst, 69 + struct flowi *fl, bool strict); 70 + 71 + static inline int nf_ip6_route(struct net *net, struct dst_entry **dst, 72 + struct flowi *fl, bool strict) 73 + { 74 + #if IS_MODULE(CONFIG_IPV6) 75 + const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops(); 76 + 77 + if (v6ops) 78 + return v6ops->route(net, dst, fl, strict); 79 + 80 + return -EHOSTUNREACH; 81 + #endif 82 + #if IS_BUILTIN(CONFIG_IPV6) 83 + return __nf_ip6_route(net, dst, fl, strict); 84 + #else 85 + return -EHOSTUNREACH; 86 + #endif 87 + } 88 + 89 + int ip6_route_me_harder(struct net *net, struct sk_buff *skb); 90 + __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, 91 + unsigned int dataoff, u_int8_t protocol); 92 + 93 + int ipv6_netfilter_init(void); 94 + void ipv6_netfilter_fini(void); 51 95 52 96 #else /* CONFIG_NETFILTER */ 53 97 static inline int ipv6_netfilter_init(void) { return 0; }
+1
include/net/netfilter/ipv4/nf_reject.h
··· 5 5 #include <linux/skbuff.h> 6 6 #include <net/ip.h> 7 7 #include <net/icmp.h> 8 + #include <net/netfilter/nf_reject.h> 8 9 9 10 void nf_send_unreach(struct sk_buff *skb_in, int code, int hook); 10 11 void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook);
+1
include/net/netfilter/ipv6/nf_reject.h
··· 3 3 #define _IPV6_NF_REJECT_H 4 4 5 5 #include <linux/icmpv6.h> 6 + #include <net/netfilter/nf_reject.h> 6 7 7 8 void nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code, 8 9 unsigned int hooknum);
+27
include/net/netfilter/nf_reject.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef _NF_REJECT_H 3 + #define _NF_REJECT_H 4 + 5 + static inline bool nf_reject_verify_csum(__u8 proto) 6 + { 7 + /* Skip protocols that don't use 16-bit one's complement checksum 8 + * of the entire payload. 9 + */ 10 + switch (proto) { 11 + /* Protocols with other integrity checks. */ 12 + case IPPROTO_AH: 13 + case IPPROTO_ESP: 14 + case IPPROTO_SCTP: 15 + 16 + /* Protocols with partial checksums. */ 17 + case IPPROTO_UDPLITE: 18 + case IPPROTO_DCCP: 19 + 20 + /* Protocols with optional checksums. */ 21 + case IPPROTO_GRE: 22 + return false; 23 + } 24 + return true; 25 + } 26 + 27 + #endif /* _NF_REJECT_H */
+9
include/uapi/linux/netfilter/nf_tables.h
··· 1727 1727 }; 1728 1728 #define NFT_TUNNEL_MAX (__NFT_TUNNEL_MAX - 1) 1729 1729 1730 + enum nft_tunnel_mode { 1731 + NFT_TUNNEL_MODE_NONE, 1732 + NFT_TUNNEL_MODE_RX, 1733 + NFT_TUNNEL_MODE_TX, 1734 + __NFT_TUNNEL_MODE_MAX 1735 + }; 1736 + #define NFT_TUNNEL_MODE_MAX (__NFT_TUNNEL_MODE_MAX - 1) 1737 + 1730 1738 enum nft_tunnel_attributes { 1731 1739 NFTA_TUNNEL_UNSPEC, 1732 1740 NFTA_TUNNEL_KEY, 1733 1741 NFTA_TUNNEL_DREG, 1742 + NFTA_TUNNEL_MODE, 1734 1743 __NFTA_TUNNEL_MAX 1735 1744 }; 1736 1745 #define NFTA_TUNNEL_MAX (__NFTA_TUNNEL_MAX - 1)
+5 -5
net/bridge/netfilter/nft_reject_bridge.c
··· 125 125 if (pskb_trim_rcsum(oldskb, ntohs(ip_hdr(oldskb)->tot_len))) 126 126 return; 127 127 128 - if (ip_hdr(oldskb)->protocol == IPPROTO_TCP || 129 - ip_hdr(oldskb)->protocol == IPPROTO_UDP) 130 - proto = ip_hdr(oldskb)->protocol; 131 - else 132 - proto = 0; 128 + proto = ip_hdr(oldskb)->protocol; 133 129 134 130 if (!skb_csum_unnecessary(oldskb) && 131 + nf_reject_verify_csum(proto) && 135 132 nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), proto)) 136 133 return; 137 134 ··· 230 233 thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo); 231 234 if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0) 232 235 return false; 236 + 237 + if (!nf_reject_verify_csum(proto)) 238 + return true; 233 239 234 240 return nf_ip6_checksum(skb, hook, thoff, proto) == 0; 235 241 }
+1 -1
net/ipv4/netfilter/ipt_CLUSTERIP.c
··· 864 864 .size = sizeof(struct clusterip_net), 865 865 }; 866 866 867 - struct notifier_block cip_netdev_notifier = { 867 + static struct notifier_block cip_netdev_notifier = { 868 868 .notifier_call = clusterip_netdev_event 869 869 }; 870 870
+2 -7
net/ipv4/netfilter/nf_reject_ipv4.c
··· 173 173 void nf_send_unreach(struct sk_buff *skb_in, int code, int hook) 174 174 { 175 175 struct iphdr *iph = ip_hdr(skb_in); 176 - u8 proto; 176 + u8 proto = iph->protocol; 177 177 178 178 if (iph->frag_off & htons(IP_OFFSET)) 179 179 return; 180 180 181 - if (skb_csum_unnecessary(skb_in)) { 181 + if (skb_csum_unnecessary(skb_in) || !nf_reject_verify_csum(proto)) { 182 182 icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0); 183 183 return; 184 184 } 185 - 186 - if (iph->protocol == IPPROTO_TCP || iph->protocol == IPPROTO_UDP) 187 - proto = iph->protocol; 188 - else 189 - proto = 0; 190 185 191 186 if (nf_ip_checksum(skb_in, hook, ip_hdrlen(skb_in), proto) == 0) 192 187 icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
+9 -4
net/ipv6/netfilter.c
··· 86 86 return 0; 87 87 } 88 88 89 - static int nf_ip6_route(struct net *net, struct dst_entry **dst, 90 - struct flowi *fl, bool strict) 89 + int __nf_ip6_route(struct net *net, struct dst_entry **dst, 90 + struct flowi *fl, bool strict) 91 91 { 92 92 static const struct ipv6_pinfo fake_pinfo; 93 93 static const struct inet_sock fake_sk = { ··· 107 107 *dst = result; 108 108 return err; 109 109 } 110 + EXPORT_SYMBOL_GPL(__nf_ip6_route); 110 111 111 112 static const struct nf_ipv6_ops ipv6ops = { 113 + #if IS_MODULE(CONFIG_IPV6) 112 114 .chk_addr = ipv6_chk_addr, 113 - .route_input = ip6_route_input, 115 + .route_me_harder = ip6_route_me_harder, 116 + .dev_get_saddr = ipv6_dev_get_saddr, 117 + .route = __nf_ip6_route, 118 + #endif 119 + .route_input = ip6_route_input, 114 120 .fragment = ip6_fragment, 115 - .route = nf_ip6_route, 116 121 .reroute = nf_ip6_reroute, 117 122 }; 118 123
+16 -1
net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
··· 17 17 #include <net/checksum.h> 18 18 #include <net/ip6_checksum.h> 19 19 #include <net/ip6_route.h> 20 + #include <net/xfrm.h> 20 21 #include <net/ipv6.h> 21 22 22 23 #include <net/netfilter/nf_conntrack_core.h> ··· 319 318 return ret; 320 319 } 321 320 321 + static int nat_route_me_harder(struct net *net, struct sk_buff *skb) 322 + { 323 + #ifdef CONFIG_IPV6_MODULE 324 + const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); 325 + 326 + if (!v6_ops) 327 + return -EHOSTUNREACH; 328 + 329 + return v6_ops->route_me_harder(net, skb); 330 + #else 331 + return ip6_route_me_harder(net, skb); 332 + #endif 333 + } 334 + 322 335 static unsigned int 323 336 nf_nat_ipv6_local_fn(void *priv, struct sk_buff *skb, 324 337 const struct nf_hook_state *state) ··· 349 334 350 335 if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, 351 336 &ct->tuplehash[!dir].tuple.src.u3)) { 352 - err = ip6_route_me_harder(state->net, skb); 337 + err = nat_route_me_harder(state->net, skb); 353 338 if (err < 0) 354 339 ret = NF_DROP_ERR(err); 355 340 }
+19 -2
net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
··· 24 24 25 25 static atomic_t v6_worker_count; 26 26 27 + static int 28 + nat_ipv6_dev_get_saddr(struct net *net, const struct net_device *dev, 29 + const struct in6_addr *daddr, unsigned int srcprefs, 30 + struct in6_addr *saddr) 31 + { 32 + #ifdef CONFIG_IPV6_MODULE 33 + const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); 34 + 35 + if (!v6_ops) 36 + return -EHOSTUNREACH; 37 + 38 + return v6_ops->dev_get_saddr(net, dev, daddr, srcprefs, saddr); 39 + #else 40 + return ipv6_dev_get_saddr(net, dev, daddr, srcprefs, saddr); 41 + #endif 42 + } 43 + 27 44 unsigned int 28 45 nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range, 29 46 const struct net_device *out) ··· 55 38 WARN_ON(!(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || 56 39 ctinfo == IP_CT_RELATED_REPLY))); 57 40 58 - if (ipv6_dev_get_saddr(nf_ct_net(ct), out, 59 - &ipv6_hdr(skb)->daddr, 0, &src) < 0) 41 + if (nat_ipv6_dev_get_saddr(nf_ct_net(ct), out, 42 + &ipv6_hdr(skb)->daddr, 0, &src) < 0) 60 43 return NF_DROP; 61 44 62 45 nat = nf_ct_nat_ext_add(ct);
+3
net/ipv6/netfilter/nf_reject_ipv6.c
··· 233 233 if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0) 234 234 return false; 235 235 236 + if (!nf_reject_verify_csum(proto)) 237 + return true; 238 + 236 239 return nf_ip6_checksum(skb, hook, thoff, proto) == 0; 237 240 } 238 241
+2 -7
net/ipv6/netfilter/nft_fib_ipv6.c
··· 59 59 struct ipv6hdr *iph) 60 60 { 61 61 const struct net_device *dev = NULL; 62 - const struct nf_ipv6_ops *v6ops; 63 62 int route_err, addrtype; 64 63 struct rt6_info *rt; 65 64 struct flowi6 fl6 = { ··· 67 68 }; 68 69 u32 ret = 0; 69 70 70 - v6ops = nf_get_ipv6_ops(); 71 - if (!v6ops) 72 - return RTN_UNREACHABLE; 73 - 74 71 if (priv->flags & NFTA_FIB_F_IIF) 75 72 dev = nft_in(pkt); 76 73 else if (priv->flags & NFTA_FIB_F_OIF) ··· 74 79 75 80 nft_fib6_flowi_init(&fl6, priv, pkt, dev, iph); 76 81 77 - if (dev && v6ops->chk_addr(nft_net(pkt), &fl6.daddr, dev, true)) 82 + if (dev && nf_ipv6_chk_addr(nft_net(pkt), &fl6.daddr, dev, true)) 78 83 ret = RTN_LOCAL; 79 84 80 - route_err = v6ops->route(nft_net(pkt), (struct dst_entry **)&rt, 85 + route_err = nf_ip6_route(nft_net(pkt), (struct dst_entry **)&rt, 81 86 flowi6_to_flowi(&fl6), false); 82 87 if (route_err) 83 88 goto err;
+2 -4
net/netfilter/ipvs/ip_vs_ctl.c
··· 2744 2744 int size; 2745 2745 2746 2746 get = (struct ip_vs_get_services *)arg; 2747 - size = sizeof(*get) + 2748 - sizeof(struct ip_vs_service_entry) * get->num_services; 2747 + size = struct_size(get, entrytable, get->num_services); 2749 2748 if (*len != size) { 2750 2749 pr_err("length: %u != %u\n", *len, size); 2751 2750 ret = -EINVAL; ··· 2785 2786 int size; 2786 2787 2787 2788 get = (struct ip_vs_get_dests *)arg; 2788 - size = sizeof(*get) + 2789 - sizeof(struct ip_vs_dest_entry) * get->num_dests; 2789 + size = struct_size(get, entrytable, get->num_dests); 2790 2790 if (*len != size) { 2791 2791 pr_err("length: %u != %u\n", *len, size); 2792 2792 ret = -EINVAL;
+11 -3
net/netfilter/nf_conntrack_core.c
··· 936 936 * REJECT will give spurious warnings here. 937 937 */ 938 938 939 - /* No external references means no one else could have 940 - * confirmed us. 939 + /* Another skb with the same unconfirmed conntrack may 940 + * win the race. This may happen for bridge(br_flood) 941 + * or broadcast/multicast packets do skb_clone with 942 + * unconfirmed conntrack. 941 943 */ 942 - WARN_ON(nf_ct_is_confirmed(ct)); 944 + if (unlikely(nf_ct_is_confirmed(ct))) { 945 + WARN_ON_ONCE(1); 946 + nf_conntrack_double_unlock(hash, reply_hash); 947 + local_bh_enable(); 948 + return NF_DROP; 949 + } 950 + 943 951 pr_debug("Confirming conntrack %p\n", ct); 944 952 /* We have to check the DYING flag after unlink to prevent 945 953 * a race against nf_ct_get_next_corpse() possibly called from
+1 -1
net/netfilter/nf_conntrack_netlink.c
··· 2675 2675 ret = ctnetlink_dump_tuples_ip(skb, &m); 2676 2676 if (ret >= 0) { 2677 2677 l4proto = nf_ct_l4proto_find(tuple->dst.protonum); 2678 - ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto); 2678 + ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto); 2679 2679 } 2680 2680 rcu_read_unlock(); 2681 2681
+42
net/netfilter/nf_conntrack_sip.c
··· 21 21 #include <linux/tcp.h> 22 22 #include <linux/netfilter.h> 23 23 24 + #include <net/route.h> 25 + #include <net/ip6_route.h> 24 26 #include <net/netfilter/nf_conntrack.h> 25 27 #include <net/netfilter/nf_conntrack_core.h> 26 28 #include <net/netfilter/nf_conntrack_expect.h> ··· 55 53 module_param(sip_direct_media, int, 0600); 56 54 MODULE_PARM_DESC(sip_direct_media, "Expect Media streams between signalling " 57 55 "endpoints only (default 1)"); 56 + 57 + static int sip_external_media __read_mostly = 0; 58 + module_param(sip_external_media, int, 0600); 59 + MODULE_PARM_DESC(sip_external_media, "Expect Media streams between external " 60 + "endpoints (default 0)"); 58 61 59 62 const struct nf_nat_sip_hooks *nf_nat_sip_hooks; 60 63 EXPORT_SYMBOL_GPL(nf_nat_sip_hooks); ··· 868 861 if (!nf_inet_addr_cmp(daddr, &ct->tuplehash[dir].tuple.src.u3)) 869 862 return NF_ACCEPT; 870 863 saddr = &ct->tuplehash[!dir].tuple.src.u3; 864 + } else if (sip_external_media) { 865 + struct net_device *dev = skb_dst(skb)->dev; 866 + struct net *net = dev_net(dev); 867 + struct rtable *rt; 868 + struct flowi4 fl4 = {}; 869 + #if IS_ENABLED(CONFIG_IPV6) 870 + struct flowi6 fl6 = {}; 871 + #endif 872 + struct dst_entry *dst = NULL; 873 + 874 + switch (nf_ct_l3num(ct)) { 875 + case NFPROTO_IPV4: 876 + fl4.daddr = daddr->ip; 877 + rt = ip_route_output_key(net, &fl4); 878 + if (!IS_ERR(rt)) 879 + dst = &rt->dst; 880 + break; 881 + 882 + #if IS_ENABLED(CONFIG_IPV6) 883 + case NFPROTO_IPV6: 884 + fl6.daddr = daddr->in6; 885 + dst = ip6_route_output(net, NULL, &fl6); 886 + if (dst->error) { 887 + dst_release(dst); 888 + dst = NULL; 889 + } 890 + break; 891 + #endif 892 + } 893 + 894 + /* Don't predict any conntracks when media endpoint is reachable 895 + * through the same interface as the signalling peer. 896 + */ 897 + if (dst && dst->dev == dev) 898 + return NF_ACCEPT; 871 899 } 872 900 873 901 /* We need to check whether the registration exists before attempting
+1
net/netfilter/nf_tables_api.c
··· 2238 2238 [NFTA_RULE_USERDATA] = { .type = NLA_BINARY, 2239 2239 .len = NFT_USERDATA_MAXLEN }, 2240 2240 [NFTA_RULE_ID] = { .type = NLA_U32 }, 2241 + [NFTA_RULE_POSITION_ID] = { .type = NLA_U32 }, 2241 2242 }; 2242 2243 2243 2244 static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net,
+32 -2
net/netfilter/nft_tunnel.c
··· 15 15 struct nft_tunnel { 16 16 enum nft_tunnel_keys key:8; 17 17 enum nft_registers dreg:8; 18 + enum nft_tunnel_mode mode:8; 18 19 }; 19 20 20 21 static void nft_tunnel_get_eval(const struct nft_expr *expr, ··· 30 29 31 30 switch (priv->key) { 32 31 case NFT_TUNNEL_PATH: 33 - nft_reg_store8(dest, !!tun_info); 32 + if (!tun_info) { 33 + nft_reg_store8(dest, false); 34 + return; 35 + } 36 + if (priv->mode == NFT_TUNNEL_MODE_NONE || 37 + (priv->mode == NFT_TUNNEL_MODE_RX && 38 + !(tun_info->mode & IP_TUNNEL_INFO_TX)) || 39 + (priv->mode == NFT_TUNNEL_MODE_TX && 40 + (tun_info->mode & IP_TUNNEL_INFO_TX))) 41 + nft_reg_store8(dest, true); 42 + else 43 + nft_reg_store8(dest, false); 34 44 break; 35 45 case NFT_TUNNEL_ID: 36 46 if (!tun_info) { 37 47 regs->verdict.code = NFT_BREAK; 38 48 return; 39 49 } 40 - *dest = ntohl(tunnel_id_to_key32(tun_info->key.tun_id)); 50 + if (priv->mode == NFT_TUNNEL_MODE_NONE || 51 + (priv->mode == NFT_TUNNEL_MODE_RX && 52 + !(tun_info->mode & IP_TUNNEL_INFO_TX)) || 53 + (priv->mode == NFT_TUNNEL_MODE_TX && 54 + (tun_info->mode & IP_TUNNEL_INFO_TX))) 55 + *dest = ntohl(tunnel_id_to_key32(tun_info->key.tun_id)); 56 + else 57 + regs->verdict.code = NFT_BREAK; 41 58 break; 42 59 default: 43 60 WARN_ON(1); ··· 66 47 static const struct nla_policy nft_tunnel_policy[NFTA_TUNNEL_MAX + 1] = { 67 48 [NFTA_TUNNEL_KEY] = { .type = NLA_U32 }, 68 49 [NFTA_TUNNEL_DREG] = { .type = NLA_U32 }, 50 + [NFTA_TUNNEL_MODE] = { .type = NLA_U32 }, 69 51 }; 70 52 71 53 static int nft_tunnel_get_init(const struct nft_ctx *ctx, ··· 94 74 95 75 priv->dreg = nft_parse_register(tb[NFTA_TUNNEL_DREG]); 96 76 77 + if (tb[NFTA_TUNNEL_MODE]) { 78 + priv->mode = ntohl(nla_get_be32(tb[NFTA_TUNNEL_MODE])); 79 + if (priv->mode > NFT_TUNNEL_MODE_MAX) 80 + return -EOPNOTSUPP; 81 + } else { 82 + priv->mode = NFT_TUNNEL_MODE_NONE; 83 + } 84 + 97 85 return nft_validate_register_store(ctx, priv->dreg, NULL, 98 86 NFT_DATA_VALUE, len); 99 87 } ··· 114 86 if (nla_put_be32(skb, NFTA_TUNNEL_KEY, htonl(priv->key))) 115 87 goto nla_put_failure; 116 88 if (nft_dump_register(skb, NFTA_TUNNEL_DREG, priv->dreg)) 89 + goto nla_put_failure; 90 + if (nla_put_be32(skb, NFTA_TUNNEL_MODE, htonl(priv->mode))) 117 91 goto nla_put_failure; 118 92 return 0; 119 93
+2 -4
net/netfilter/utils.c
··· 162 162 int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl, 163 163 bool strict, unsigned short family) 164 164 { 165 - const struct nf_ipv6_ops *v6ops; 165 + const struct nf_ipv6_ops *v6ops __maybe_unused; 166 166 int ret = 0; 167 167 168 168 switch (family) { ··· 170 170 ret = nf_ip_route(net, dst, fl, strict); 171 171 break; 172 172 case AF_INET6: 173 - v6ops = rcu_dereference(nf_ipv6_ops); 174 - if (v6ops) 175 - ret = v6ops->route(net, dst, fl, strict); 173 + ret = nf_ip6_route(net, dst, fl, strict); 176 174 break; 177 175 } 178 176
+5 -11
net/netfilter/xt_addrtype.c
··· 36 36 static u32 match_lookup_rt6(struct net *net, const struct net_device *dev, 37 37 const struct in6_addr *addr, u16 mask) 38 38 { 39 - const struct nf_ipv6_ops *v6ops; 40 39 struct flowi6 flow; 41 40 struct rt6_info *rt; 42 41 u32 ret = 0; ··· 46 47 if (dev) 47 48 flow.flowi6_oif = dev->ifindex; 48 49 49 - v6ops = nf_get_ipv6_ops(); 50 - if (v6ops) { 51 - if (dev && (mask & XT_ADDRTYPE_LOCAL)) { 52 - if (v6ops->chk_addr(net, addr, dev, true)) 53 - ret = XT_ADDRTYPE_LOCAL; 54 - } 55 - route_err = v6ops->route(net, (struct dst_entry **)&rt, 56 - flowi6_to_flowi(&flow), false); 57 - } else { 58 - route_err = 1; 50 + if (dev && (mask & XT_ADDRTYPE_LOCAL)) { 51 + if (nf_ipv6_chk_addr(net, addr, dev, true)) 52 + ret = XT_ADDRTYPE_LOCAL; 59 53 } 60 54 55 + route_err = nf_ip6_route(net, (struct dst_entry **)&rt, 56 + flowi6_to_flowi(&flow), false); 61 57 if (route_err) 62 58 return XT_ADDRTYPE_UNREACHABLE; 63 59
+1 -3
net/netfilter/xt_recent.c
··· 337 337 unsigned int nstamp_mask; 338 338 unsigned int i; 339 339 int ret = -EINVAL; 340 - size_t sz; 341 340 342 341 net_get_random_once(&hash_rnd, sizeof(hash_rnd)); 343 342 ··· 386 387 goto out; 387 388 } 388 389 389 - sz = sizeof(*t) + sizeof(t->iphash[0]) * ip_list_hash_size; 390 - t = kvzalloc(sz, GFP_KERNEL); 390 + t = kvzalloc(struct_size(t, iphash, ip_list_hash_size), GFP_KERNEL); 391 391 if (t == NULL) { 392 392 ret = -ENOMEM; 393 393 goto out;