Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf

Pablo Neira Ayuso says:

====================
Netfilter fixes for net

The following patchset contains a large batch with Netfilter fixes for
your net tree, they are:

1) Two patches to solve conntrack garbage collector cpu hogging, one to
remove GC_MAX_EVICTS and another to look at the ratio (scanned entries
vs. evicted entries) to make a decision on whether to reduce or not
the scanning interval. From Florian Westphal.

2) Two patches to fix incorrect set element counting if NLM_F_EXCL is
is not set. Moreover, don't decrenent set->nelems from abort patch
if -ENFILE which leaks a spare slot in the set. This includes a
patch to deconstify the set walk callback to update set->ndeact.

3) Two fixes for the fwmark_reflect sysctl feature: Propagate mark to
reply packets both from nf_reject and local stack, from Pau Espin Pedrol.

4) Fix incorrect handling of loopback traffic in rpfilter and nf_tables
fib expression, from Liping Zhang.

5) Fix oops on stateful objects netlink dump, when no filter is specified.
Also from Liping Zhang.

6) Fix a build error if proc is not available in ipt_CLUSTERIP, related
to fix that was applied in the previous batch for net. From Arnd Bergmann.

7) Fix lack of string validation in table, chain, set and stateful
object names in nf_tables, from Liping Zhang. Moreover, restrict
maximum log prefix length to 127 bytes, otherwise explicitly bail
out.

8) Two patches to fix spelling and typos in nf_tables uapi header file
and Kconfig, patches from Alexander Alemayhu and William Breathitt Gray.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+116 -93
+3 -3
include/net/netfilter/nf_tables.h
··· 207 207 unsigned int skip; 208 208 int err; 209 209 int (*fn)(const struct nft_ctx *ctx, 210 - const struct nft_set *set, 210 + struct nft_set *set, 211 211 const struct nft_set_iter *iter, 212 - const struct nft_set_elem *elem); 212 + struct nft_set_elem *elem); 213 213 }; 214 214 215 215 /** ··· 301 301 void (*remove)(const struct nft_set *set, 302 302 const struct nft_set_elem *elem); 303 303 void (*walk)(const struct nft_ctx *ctx, 304 - const struct nft_set *set, 304 + struct nft_set *set, 305 305 struct nft_set_iter *iter); 306 306 307 307 unsigned int (*privsize)(const struct nlattr * const nla[]);
+6
include/net/netfilter/nft_fib.h
··· 9 9 10 10 extern const struct nla_policy nft_fib_policy[]; 11 11 12 + static inline bool 13 + nft_fib_is_loopback(const struct sk_buff *skb, const struct net_device *in) 14 + { 15 + return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK; 16 + } 17 + 12 18 int nft_fib_dump(struct sk_buff *skb, const struct nft_expr *expr); 13 19 int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr, 14 20 const struct nlattr * const tb[]);
+2
include/uapi/linux/netfilter/nf_log.h
··· 9 9 #define NF_LOG_MACDECODE 0x20 /* Decode MAC header */ 10 10 #define NF_LOG_MASK 0x2f 11 11 12 + #define NF_LOG_PREFIXLEN 128 13 + 12 14 #endif /* _NETFILTER_NF_LOG_H */
+2 -2
include/uapi/linux/netfilter/nf_tables.h
··· 235 235 /** 236 236 * enum nft_rule_compat_attributes - nf_tables rule compat attributes 237 237 * 238 - * @NFTA_RULE_COMPAT_PROTO: numerice value of handled protocol (NLA_U32) 238 + * @NFTA_RULE_COMPAT_PROTO: numeric value of handled protocol (NLA_U32) 239 239 * @NFTA_RULE_COMPAT_FLAGS: bitmask of enum nft_rule_compat_flags (NLA_U32) 240 240 */ 241 241 enum nft_rule_compat_attributes { ··· 499 499 * enum nft_byteorder_ops - nf_tables byteorder operators 500 500 * 501 501 * @NFT_BYTEORDER_NTOH: network to host operator 502 - * @NFT_BYTEORDER_HTON: host to network opertaor 502 + * @NFT_BYTEORDER_HTON: host to network operator 503 503 */ 504 504 enum nft_byteorder_ops { 505 505 NFT_BYTEORDER_NTOH,
+1
net/ipv4/ip_output.c
··· 1629 1629 sk->sk_protocol = ip_hdr(skb)->protocol; 1630 1630 sk->sk_bound_dev_if = arg->bound_dev_if; 1631 1631 sk->sk_sndbuf = sysctl_wmem_default; 1632 + sk->sk_mark = fl4.flowi4_mark; 1632 1633 err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, 1633 1634 len, 0, &ipc, &rt, MSG_DONTWAIT); 1634 1635 if (unlikely(err)) {
+6 -1
net/ipv4/netfilter/ipt_CLUSTERIP.c
··· 144 144 rcu_read_lock_bh(); 145 145 c = __clusterip_config_find(net, clusterip); 146 146 if (c) { 147 - if (!c->pde || unlikely(!atomic_inc_not_zero(&c->refcount))) 147 + #ifdef CONFIG_PROC_FS 148 + if (!c->pde) 149 + c = NULL; 150 + else 151 + #endif 152 + if (unlikely(!atomic_inc_not_zero(&c->refcount))) 148 153 c = NULL; 149 154 else if (entry) 150 155 atomic_inc(&c->entries);
+4 -4
net/ipv4/netfilter/ipt_rpfilter.c
··· 63 63 return dev_match || flags & XT_RPFILTER_LOOSE; 64 64 } 65 65 66 - static bool rpfilter_is_local(const struct sk_buff *skb) 66 + static bool 67 + rpfilter_is_loopback(const struct sk_buff *skb, const struct net_device *in) 67 68 { 68 - const struct rtable *rt = skb_rtable(skb); 69 - return rt && (rt->rt_flags & RTCF_LOCAL); 69 + return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK; 70 70 } 71 71 72 72 static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) ··· 79 79 info = par->matchinfo; 80 80 invert = info->flags & XT_RPFILTER_INVERT; 81 81 82 - if (rpfilter_is_local(skb)) 82 + if (rpfilter_is_loopback(skb, xt_in(par))) 83 83 return true ^ invert; 84 84 85 85 iph = ip_hdr(skb);
+2
net/ipv4/netfilter/nf_reject_ipv4.c
··· 126 126 /* ip_route_me_harder expects skb->dst to be set */ 127 127 skb_dst_set_noref(nskb, skb_dst(oldskb)); 128 128 129 + nskb->mark = IP4_REPLY_MARK(net, oldskb->mark); 130 + 129 131 skb_reserve(nskb, LL_MAX_HEADER); 130 132 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP, 131 133 ip4_dst_hoplimit(skb_dst(nskb)));
+5 -10
net/ipv4/netfilter/nft_fib_ipv4.c
··· 26 26 return addr; 27 27 } 28 28 29 - static bool fib4_is_local(const struct sk_buff *skb) 30 - { 31 - const struct rtable *rt = skb_rtable(skb); 32 - 33 - return rt && (rt->rt_flags & RTCF_LOCAL); 34 - } 35 - 36 29 #define DSCP_BITS 0xfc 37 30 38 31 void nft_fib4_eval_type(const struct nft_expr *expr, struct nft_regs *regs, ··· 88 95 else 89 96 oif = NULL; 90 97 91 - if (nft_hook(pkt) == NF_INET_PRE_ROUTING && fib4_is_local(pkt->skb)) { 92 - nft_fib_store_result(dest, priv->result, pkt, LOOPBACK_IFINDEX); 98 + if (nft_hook(pkt) == NF_INET_PRE_ROUTING && 99 + nft_fib_is_loopback(pkt->skb, nft_in(pkt))) { 100 + nft_fib_store_result(dest, priv->result, pkt, 101 + nft_in(pkt)->ifindex); 93 102 return; 94 103 } 95 104 ··· 126 131 switch (res.type) { 127 132 case RTN_UNICAST: 128 133 break; 129 - case RTN_LOCAL: /* should not appear here, see fib4_is_local() above */ 134 + case RTN_LOCAL: /* Should not see RTN_LOCAL here */ 130 135 return; 131 136 default: 132 137 break;
+4 -4
net/ipv6/netfilter/ip6t_rpfilter.c
··· 72 72 return ret; 73 73 } 74 74 75 - static bool rpfilter_is_local(const struct sk_buff *skb) 75 + static bool 76 + rpfilter_is_loopback(const struct sk_buff *skb, const struct net_device *in) 76 77 { 77 - const struct rt6_info *rt = (const void *) skb_dst(skb); 78 - return rt && (rt->rt6i_flags & RTF_LOCAL); 78 + return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK; 79 79 } 80 80 81 81 static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) ··· 85 85 struct ipv6hdr *iph; 86 86 bool invert = info->flags & XT_RPFILTER_INVERT; 87 87 88 - if (rpfilter_is_local(skb)) 88 + if (rpfilter_is_loopback(skb, xt_in(par))) 89 89 return true ^ invert; 90 90 91 91 iph = ipv6_hdr(skb);
+3
net/ipv6/netfilter/nf_reject_ipv6.c
··· 157 157 fl6.fl6_sport = otcph->dest; 158 158 fl6.fl6_dport = otcph->source; 159 159 fl6.flowi6_oif = l3mdev_master_ifindex(skb_dst(oldskb)->dev); 160 + fl6.flowi6_mark = IP6_REPLY_MARK(net, oldskb->mark); 160 161 security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); 161 162 dst = ip6_route_output(net, NULL, &fl6); 162 163 if (dst->error) { ··· 180 179 } 181 180 182 181 skb_dst_set(nskb, dst); 182 + 183 + nskb->mark = fl6.flowi6_mark; 183 184 184 185 skb_reserve(nskb, hh_len + dst->header_len); 185 186 ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
+4 -9
net/ipv6/netfilter/nft_fib_ipv6.c
··· 18 18 #include <net/ip6_fib.h> 19 19 #include <net/ip6_route.h> 20 20 21 - static bool fib6_is_local(const struct sk_buff *skb) 22 - { 23 - const struct rt6_info *rt = (const void *)skb_dst(skb); 24 - 25 - return rt && (rt->rt6i_flags & RTF_LOCAL); 26 - } 27 - 28 21 static int get_ifindex(const struct net_device *dev) 29 22 { 30 23 return dev ? dev->ifindex : 0; ··· 157 164 158 165 lookup_flags = nft_fib6_flowi_init(&fl6, priv, pkt, oif); 159 166 160 - if (nft_hook(pkt) == NF_INET_PRE_ROUTING && fib6_is_local(pkt->skb)) { 161 - nft_fib_store_result(dest, priv->result, pkt, LOOPBACK_IFINDEX); 167 + if (nft_hook(pkt) == NF_INET_PRE_ROUTING && 168 + nft_fib_is_loopback(pkt->skb, nft_in(pkt))) { 169 + nft_fib_store_result(dest, priv->result, pkt, 170 + nft_in(pkt)->ifindex); 162 171 return; 163 172 } 164 173
+1
net/ipv6/tcp_ipv6.c
··· 840 840 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); 841 841 if (!IS_ERR(dst)) { 842 842 skb_dst_set(buff, dst); 843 + ctl_sk->sk_mark = fl6.flowi6_mark; 843 844 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass); 844 845 TCP_INC_STATS(net, TCP_MIB_OUTSEGS); 845 846 if (rst)
+1 -1
net/netfilter/Kconfig
··· 494 494 depends on NF_CONNTRACK 495 495 tristate "Netfilter nf_tables conntrack module" 496 496 help 497 - This option adds the "meta" expression that you can use to match 497 + This option adds the "ct" expression that you can use to match 498 498 connection tracking information such as the flow state. 499 499 500 500 config NFT_SET_RBTREE
+21 -23
net/netfilter/nf_conntrack_core.c
··· 85 85 static __read_mostly bool nf_conntrack_locks_all; 86 86 87 87 /* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */ 88 - #define GC_MAX_BUCKETS_DIV 64u 89 - /* upper bound of scan intervals */ 90 - #define GC_INTERVAL_MAX (2 * HZ) 91 - /* maximum conntracks to evict per gc run */ 92 - #define GC_MAX_EVICTS 256u 88 + #define GC_MAX_BUCKETS_DIV 128u 89 + /* upper bound of full table scan */ 90 + #define GC_MAX_SCAN_JIFFIES (16u * HZ) 91 + /* desired ratio of entries found to be expired */ 92 + #define GC_EVICT_RATIO 50u 93 93 94 94 static struct conntrack_gc_work conntrack_gc_work; 95 95 ··· 938 938 939 939 static void gc_worker(struct work_struct *work) 940 940 { 941 + unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u); 941 942 unsigned int i, goal, buckets = 0, expired_count = 0; 942 943 struct conntrack_gc_work *gc_work; 943 944 unsigned int ratio, scanned = 0; ··· 980 979 */ 981 980 rcu_read_unlock(); 982 981 cond_resched_rcu_qs(); 983 - } while (++buckets < goal && 984 - expired_count < GC_MAX_EVICTS); 982 + } while (++buckets < goal); 985 983 986 984 if (gc_work->exiting) 987 985 return; ··· 997 997 * 1. Minimize time until we notice a stale entry 998 998 * 2. Maximize scan intervals to not waste cycles 999 999 * 1000 - * Normally, expired_count will be 0, this increases the next_run time 1001 - * to priorize 2) above. 1000 + * Normally, expire ratio will be close to 0. 1002 1001 * 1003 - * As soon as a timed-out entry is found, move towards 1) and increase 1004 - * the scan frequency. 1005 - * In case we have lots of evictions next scan is done immediately. 1002 + * As soon as a sizeable fraction of the entries have expired 1003 + * increase scan frequency. 1006 1004 */ 1007 1005 ratio = scanned ? expired_count * 100 / scanned : 0; 1008 - if (ratio >= 90 || expired_count == GC_MAX_EVICTS) { 1009 - gc_work->next_gc_run = 0; 1010 - next_run = 0; 1011 - } else if (expired_count) { 1012 - gc_work->next_gc_run /= 2U; 1013 - next_run = msecs_to_jiffies(1); 1006 + if (ratio > GC_EVICT_RATIO) { 1007 + gc_work->next_gc_run = min_interval; 1014 1008 } else { 1015 - if (gc_work->next_gc_run < GC_INTERVAL_MAX) 1016 - gc_work->next_gc_run += msecs_to_jiffies(1); 1009 + unsigned int max = GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV; 1017 1010 1018 - next_run = gc_work->next_gc_run; 1011 + BUILD_BUG_ON((GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV) == 0); 1012 + 1013 + gc_work->next_gc_run += min_interval; 1014 + if (gc_work->next_gc_run > max) 1015 + gc_work->next_gc_run = max; 1019 1016 } 1020 1017 1018 + next_run = gc_work->next_gc_run; 1021 1019 gc_work->last_bucket = i; 1022 1020 queue_delayed_work(system_long_wq, &gc_work->dwork, next_run); 1023 1021 } ··· 1023 1025 static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work) 1024 1026 { 1025 1027 INIT_DELAYED_WORK(&gc_work->dwork, gc_worker); 1026 - gc_work->next_gc_run = GC_INTERVAL_MAX; 1028 + gc_work->next_gc_run = HZ; 1027 1029 gc_work->exiting = false; 1028 1030 } 1029 1031 ··· 1915 1917 nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED); 1916 1918 1917 1919 conntrack_gc_work_init(&conntrack_gc_work); 1918 - queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, GC_INTERVAL_MAX); 1920 + queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, HZ); 1919 1921 1920 1922 return 0; 1921 1923
-1
net/netfilter/nf_log.c
··· 13 13 /* Internal logging interface, which relies on the real 14 14 LOG target modules */ 15 15 16 - #define NF_LOG_PREFIXLEN 128 17 16 #define NFLOGGER_NAME_LEN 64 18 17 19 18 static struct nf_logger __rcu *loggers[NFPROTO_NUMPROTO][NF_LOG_TYPE_MAX] __read_mostly;
+39 -28
net/netfilter/nf_tables_api.c
··· 928 928 } 929 929 930 930 static const struct nla_policy nft_chain_policy[NFTA_CHAIN_MAX + 1] = { 931 - [NFTA_CHAIN_TABLE] = { .type = NLA_STRING }, 931 + [NFTA_CHAIN_TABLE] = { .type = NLA_STRING, 932 + .len = NFT_TABLE_MAXNAMELEN - 1 }, 932 933 [NFTA_CHAIN_HANDLE] = { .type = NLA_U64 }, 933 934 [NFTA_CHAIN_NAME] = { .type = NLA_STRING, 934 935 .len = NFT_CHAIN_MAXNAMELEN - 1 }, ··· 1855 1854 } 1856 1855 1857 1856 static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = { 1858 - [NFTA_RULE_TABLE] = { .type = NLA_STRING }, 1857 + [NFTA_RULE_TABLE] = { .type = NLA_STRING, 1858 + .len = NFT_TABLE_MAXNAMELEN - 1 }, 1859 1859 [NFTA_RULE_CHAIN] = { .type = NLA_STRING, 1860 1860 .len = NFT_CHAIN_MAXNAMELEN - 1 }, 1861 1861 [NFTA_RULE_HANDLE] = { .type = NLA_U64 }, ··· 2445 2443 } 2446 2444 2447 2445 static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = { 2448 - [NFTA_SET_TABLE] = { .type = NLA_STRING }, 2446 + [NFTA_SET_TABLE] = { .type = NLA_STRING, 2447 + .len = NFT_TABLE_MAXNAMELEN - 1 }, 2449 2448 [NFTA_SET_NAME] = { .type = NLA_STRING, 2450 2449 .len = NFT_SET_MAXNAMELEN - 1 }, 2451 2450 [NFTA_SET_FLAGS] = { .type = NLA_U32 }, ··· 3087 3084 } 3088 3085 3089 3086 static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx, 3090 - const struct nft_set *set, 3087 + struct nft_set *set, 3091 3088 const struct nft_set_iter *iter, 3092 - const struct nft_set_elem *elem) 3089 + struct nft_set_elem *elem) 3093 3090 { 3094 3091 const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv); 3095 3092 enum nft_registers dreg; ··· 3195 3192 }; 3196 3193 3197 3194 static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = { 3198 - [NFTA_SET_ELEM_LIST_TABLE] = { .type = NLA_STRING }, 3199 - [NFTA_SET_ELEM_LIST_SET] = { .type = NLA_STRING }, 3195 + [NFTA_SET_ELEM_LIST_TABLE] = { .type = NLA_STRING, 3196 + .len = NFT_TABLE_MAXNAMELEN - 1 }, 3197 + [NFTA_SET_ELEM_LIST_SET] = { .type = NLA_STRING, 3198 + .len = NFT_SET_MAXNAMELEN - 1 }, 3200 3199 [NFTA_SET_ELEM_LIST_ELEMENTS] = { .type = NLA_NESTED }, 3201 3200 [NFTA_SET_ELEM_LIST_SET_ID] = { .type = NLA_U32 }, 3202 3201 }; ··· 3308 3303 }; 3309 3304 3310 3305 static int nf_tables_dump_setelem(const struct nft_ctx *ctx, 3311 - const struct nft_set *set, 3306 + struct nft_set *set, 3312 3307 const struct nft_set_iter *iter, 3313 - const struct nft_set_elem *elem) 3308 + struct nft_set_elem *elem) 3314 3309 { 3315 3310 struct nft_set_dump_args *args; 3316 3311 ··· 3322 3317 { 3323 3318 struct net *net = sock_net(skb->sk); 3324 3319 u8 genmask = nft_genmask_cur(net); 3325 - const struct nft_set *set; 3320 + struct nft_set *set; 3326 3321 struct nft_set_dump_args args; 3327 3322 struct nft_ctx ctx; 3328 3323 struct nlattr *nla[NFTA_SET_ELEM_LIST_MAX + 1]; ··· 3745 3740 goto err5; 3746 3741 } 3747 3742 3743 + if (set->size && 3744 + !atomic_add_unless(&set->nelems, 1, set->size + set->ndeact)) { 3745 + err = -ENFILE; 3746 + goto err6; 3747 + } 3748 + 3748 3749 nft_trans_elem(trans) = elem; 3749 3750 list_add_tail(&trans->list, &ctx->net->nft.commit_list); 3750 3751 return 0; 3751 3752 3753 + err6: 3754 + set->ops->remove(set, &elem); 3752 3755 err5: 3753 3756 kfree(trans); 3754 3757 err4: ··· 3803 3790 return -EBUSY; 3804 3791 3805 3792 nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) { 3806 - if (set->size && 3807 - !atomic_add_unless(&set->nelems, 1, set->size + set->ndeact)) 3808 - return -ENFILE; 3809 - 3810 3793 err = nft_add_set_elem(&ctx, set, attr, nlh->nlmsg_flags); 3811 - if (err < 0) { 3812 - atomic_dec(&set->nelems); 3794 + if (err < 0) 3813 3795 break; 3814 - } 3815 3796 } 3816 3797 return err; 3817 3798 } ··· 3890 3883 } 3891 3884 3892 3885 static int nft_flush_set(const struct nft_ctx *ctx, 3893 - const struct nft_set *set, 3886 + struct nft_set *set, 3894 3887 const struct nft_set_iter *iter, 3895 - const struct nft_set_elem *elem) 3888 + struct nft_set_elem *elem) 3896 3889 { 3897 3890 struct nft_trans *trans; 3898 3891 int err; ··· 3906 3899 err = -ENOENT; 3907 3900 goto err1; 3908 3901 } 3902 + set->ndeact++; 3909 3903 3910 - nft_trans_elem_set(trans) = (struct nft_set *)set; 3911 - nft_trans_elem(trans) = *((struct nft_set_elem *)elem); 3904 + nft_trans_elem_set(trans) = set; 3905 + nft_trans_elem(trans) = *elem; 3912 3906 list_add_tail(&trans->list, &ctx->net->nft.commit_list); 3913 3907 3914 3908 return 0; ··· 4040 4032 EXPORT_SYMBOL_GPL(nf_tables_obj_lookup); 4041 4033 4042 4034 static const struct nla_policy nft_obj_policy[NFTA_OBJ_MAX + 1] = { 4043 - [NFTA_OBJ_TABLE] = { .type = NLA_STRING }, 4044 - [NFTA_OBJ_NAME] = { .type = NLA_STRING }, 4035 + [NFTA_OBJ_TABLE] = { .type = NLA_STRING, 4036 + .len = NFT_TABLE_MAXNAMELEN - 1 }, 4037 + [NFTA_OBJ_NAME] = { .type = NLA_STRING, 4038 + .len = NFT_OBJ_MAXNAMELEN - 1 }, 4045 4039 [NFTA_OBJ_TYPE] = { .type = NLA_U32 }, 4046 4040 [NFTA_OBJ_DATA] = { .type = NLA_NESTED }, 4047 4041 }; ··· 4272 4262 if (idx > s_idx) 4273 4263 memset(&cb->args[1], 0, 4274 4264 sizeof(cb->args) - sizeof(cb->args[0])); 4275 - if (filter->table[0] && 4265 + if (filter && filter->table[0] && 4276 4266 strcmp(filter->table, table->name)) 4277 4267 goto cont; 4278 - if (filter->type != NFT_OBJECT_UNSPEC && 4268 + if (filter && 4269 + filter->type != NFT_OBJECT_UNSPEC && 4279 4270 obj->type->type != filter->type) 4280 4271 goto cont; 4281 4272 ··· 5020 5009 const struct nft_chain *chain); 5021 5010 5022 5011 static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx, 5023 - const struct nft_set *set, 5012 + struct nft_set *set, 5024 5013 const struct nft_set_iter *iter, 5025 - const struct nft_set_elem *elem) 5014 + struct nft_set_elem *elem) 5026 5015 { 5027 5016 const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv); 5028 5017 const struct nft_data *data; ··· 5046 5035 { 5047 5036 const struct nft_rule *rule; 5048 5037 const struct nft_expr *expr, *last; 5049 - const struct nft_set *set; 5038 + struct nft_set *set; 5050 5039 struct nft_set_binding *binding; 5051 5040 struct nft_set_iter iter; 5052 5041
+2 -1
net/netfilter/nft_dynset.c
··· 98 98 } 99 99 100 100 static const struct nla_policy nft_dynset_policy[NFTA_DYNSET_MAX + 1] = { 101 - [NFTA_DYNSET_SET_NAME] = { .type = NLA_STRING }, 101 + [NFTA_DYNSET_SET_NAME] = { .type = NLA_STRING, 102 + .len = NFT_SET_MAXNAMELEN - 1 }, 102 103 [NFTA_DYNSET_SET_ID] = { .type = NLA_U32 }, 103 104 [NFTA_DYNSET_OP] = { .type = NLA_U32 }, 104 105 [NFTA_DYNSET_SREG_KEY] = { .type = NLA_U32 },
+2 -1
net/netfilter/nft_log.c
··· 39 39 40 40 static const struct nla_policy nft_log_policy[NFTA_LOG_MAX + 1] = { 41 41 [NFTA_LOG_GROUP] = { .type = NLA_U16 }, 42 - [NFTA_LOG_PREFIX] = { .type = NLA_STRING }, 42 + [NFTA_LOG_PREFIX] = { .type = NLA_STRING, 43 + .len = NF_LOG_PREFIXLEN - 1 }, 43 44 [NFTA_LOG_SNAPLEN] = { .type = NLA_U32 }, 44 45 [NFTA_LOG_QTHRESHOLD] = { .type = NLA_U16 }, 45 46 [NFTA_LOG_LEVEL] = { .type = NLA_U32 },
+2 -1
net/netfilter/nft_lookup.c
··· 49 49 } 50 50 51 51 static const struct nla_policy nft_lookup_policy[NFTA_LOOKUP_MAX + 1] = { 52 - [NFTA_LOOKUP_SET] = { .type = NLA_STRING }, 52 + [NFTA_LOOKUP_SET] = { .type = NLA_STRING, 53 + .len = NFT_SET_MAXNAMELEN - 1 }, 53 54 [NFTA_LOOKUP_SET_ID] = { .type = NLA_U32 }, 54 55 [NFTA_LOOKUP_SREG] = { .type = NLA_U32 }, 55 56 [NFTA_LOOKUP_DREG] = { .type = NLA_U32 },
+4 -2
net/netfilter/nft_objref.c
··· 193 193 } 194 194 195 195 static const struct nla_policy nft_objref_policy[NFTA_OBJREF_MAX + 1] = { 196 - [NFTA_OBJREF_IMM_NAME] = { .type = NLA_STRING }, 196 + [NFTA_OBJREF_IMM_NAME] = { .type = NLA_STRING, 197 + .len = NFT_OBJ_MAXNAMELEN - 1 }, 197 198 [NFTA_OBJREF_IMM_TYPE] = { .type = NLA_U32 }, 198 199 [NFTA_OBJREF_SET_SREG] = { .type = NLA_U32 }, 199 - [NFTA_OBJREF_SET_NAME] = { .type = NLA_STRING }, 200 + [NFTA_OBJREF_SET_NAME] = { .type = NLA_STRING, 201 + .len = NFT_SET_MAXNAMELEN - 1 }, 200 202 [NFTA_OBJREF_SET_ID] = { .type = NLA_U32 }, 201 203 }; 202 204
+1 -1
net/netfilter/nft_set_hash.c
··· 212 212 rhashtable_remove_fast(&priv->ht, &he->node, nft_hash_params); 213 213 } 214 214 215 - static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set, 215 + static void nft_hash_walk(const struct nft_ctx *ctx, struct nft_set *set, 216 216 struct nft_set_iter *iter) 217 217 { 218 218 struct nft_hash *priv = nft_set_priv(set);
+1 -1
net/netfilter/nft_set_rbtree.c
··· 221 221 } 222 222 223 223 static void nft_rbtree_walk(const struct nft_ctx *ctx, 224 - const struct nft_set *set, 224 + struct nft_set *set, 225 225 struct nft_set_iter *iter) 226 226 { 227 227 const struct nft_rbtree *priv = nft_set_priv(set);