Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

netfilter: nf_conntrack: add support for "conntrack zones"

Normally, each connection needs a unique identity. Conntrack zones allow
to specify a numerical zone using the CT target, connections in different
zones can use the same identity.

Example:

iptables -t raw -A PREROUTING -i veth0 -j CT --zone 1
iptables -t raw -A OUTPUT -o veth1 -j CT --zone 1

Signed-off-by: Patrick McHardy <kaber@trash.net>

+235 -84
+1 -1
include/linux/netfilter/xt_CT.h
··· 5 5 6 6 struct xt_ct_target_info { 7 7 u_int16_t flags; 8 - u_int16_t __unused; 8 + u_int16_t zone; 9 9 u_int32_t ct_events; 10 10 u_int32_t exp_events; 11 11 char helper[16];
+3
include/net/ip.h
··· 352 352 IP_DEFRAG_LOCAL_DELIVER, 353 353 IP_DEFRAG_CALL_RA_CHAIN, 354 354 IP_DEFRAG_CONNTRACK_IN, 355 + __IP_DEFRAG_CONNTRACK_IN_END = IP_DEFRAG_CONNTRACK_IN + USHORT_MAX, 355 356 IP_DEFRAG_CONNTRACK_OUT, 357 + __IP_DEFRAG_CONNTRACK_OUT_END = IP_DEFRAG_CONNTRACK_OUT + USHORT_MAX, 356 358 IP_DEFRAG_CONNTRACK_BRIDGE_IN, 359 + __IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + USHORT_MAX, 357 360 IP_DEFRAG_VS_IN, 358 361 IP_DEFRAG_VS_OUT, 359 362 IP_DEFRAG_VS_FWD
+3
include/net/ipv6.h
··· 355 355 enum ip6_defrag_users { 356 356 IP6_DEFRAG_LOCAL_DELIVER, 357 357 IP6_DEFRAG_CONNTRACK_IN, 358 + __IP6_DEFRAG_CONNTRACK_IN = IP6_DEFRAG_CONNTRACK_IN + USHORT_MAX, 358 359 IP6_DEFRAG_CONNTRACK_OUT, 360 + __IP6_DEFRAG_CONNTRACK_OUT = IP6_DEFRAG_CONNTRACK_OUT + USHORT_MAX, 359 361 IP6_DEFRAG_CONNTRACK_BRIDGE_IN, 362 + __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHORT_MAX, 360 363 }; 361 364 362 365 struct ip6_create_arg {
+3 -2
include/net/netfilter/nf_conntrack.h
··· 198 198 extern void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size); 199 199 200 200 extern struct nf_conntrack_tuple_hash * 201 - __nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple); 201 + __nf_conntrack_find(struct net *net, u16 zone, 202 + const struct nf_conntrack_tuple *tuple); 202 203 203 204 extern void nf_conntrack_hash_insert(struct nf_conn *ct); 204 205 extern void nf_ct_delete_from_lists(struct nf_conn *ct); ··· 268 267 nf_ct_iterate_cleanup(struct net *net, int (*iter)(struct nf_conn *i, void *data), void *data); 269 268 extern void nf_conntrack_free(struct nf_conn *ct); 270 269 extern struct nf_conn * 271 - nf_conntrack_alloc(struct net *net, 270 + nf_conntrack_alloc(struct net *net, u16 zone, 272 271 const struct nf_conntrack_tuple *orig, 273 272 const struct nf_conntrack_tuple *repl, 274 273 gfp_t gfp);
+2 -1
include/net/netfilter/nf_conntrack_core.h
··· 49 49 50 50 /* Find a connection corresponding to a tuple. */ 51 51 extern struct nf_conntrack_tuple_hash * 52 - nf_conntrack_find_get(struct net *net, const struct nf_conntrack_tuple *tuple); 52 + nf_conntrack_find_get(struct net *net, u16 zone, 53 + const struct nf_conntrack_tuple *tuple); 53 54 54 55 extern int __nf_conntrack_confirm(struct sk_buff *skb); 55 56
+6 -3
include/net/netfilter/nf_conntrack_expect.h
··· 74 74 void nf_conntrack_expect_fini(struct net *net); 75 75 76 76 struct nf_conntrack_expect * 77 - __nf_ct_expect_find(struct net *net, const struct nf_conntrack_tuple *tuple); 77 + __nf_ct_expect_find(struct net *net, u16 zone, 78 + const struct nf_conntrack_tuple *tuple); 78 79 79 80 struct nf_conntrack_expect * 80 - nf_ct_expect_find_get(struct net *net, const struct nf_conntrack_tuple *tuple); 81 + nf_ct_expect_find_get(struct net *net, u16 zone, 82 + const struct nf_conntrack_tuple *tuple); 81 83 82 84 struct nf_conntrack_expect * 83 - nf_ct_find_expectation(struct net *net, const struct nf_conntrack_tuple *tuple); 85 + nf_ct_find_expectation(struct net *net, u16 zone, 86 + const struct nf_conntrack_tuple *tuple); 84 87 85 88 void nf_ct_unlink_expect(struct nf_conntrack_expect *exp); 86 89 void nf_ct_remove_expectations(struct nf_conn *ct);
+2
include/net/netfilter/nf_conntrack_extend.h
··· 8 8 NF_CT_EXT_NAT, 9 9 NF_CT_EXT_ACCT, 10 10 NF_CT_EXT_ECACHE, 11 + NF_CT_EXT_ZONE, 11 12 NF_CT_EXT_NUM, 12 13 }; 13 14 ··· 16 15 #define NF_CT_EXT_NAT_TYPE struct nf_conn_nat 17 16 #define NF_CT_EXT_ACCT_TYPE struct nf_conn_counter 18 17 #define NF_CT_EXT_ECACHE_TYPE struct nf_conntrack_ecache 18 + #define NF_CT_EXT_ZONE_TYPE struct nf_conntrack_zone 19 19 20 20 /* Extensions: optional stuff which isn't permanently in struct. */ 21 21 struct nf_ct_ext {
+23
include/net/netfilter/nf_conntrack_zones.h
··· 1 + #ifndef _NF_CONNTRACK_ZONES_H 2 + #define _NF_CONNTRACK_ZONES_H 3 + 4 + #include <net/netfilter/nf_conntrack_extend.h> 5 + 6 + #define NF_CT_DEFAULT_ZONE 0 7 + 8 + struct nf_conntrack_zone { 9 + u16 id; 10 + }; 11 + 12 + static inline u16 nf_ct_zone(const struct nf_conn *ct) 13 + { 14 + #ifdef CONFIG_NF_CONNTRACK_ZONES 15 + struct nf_conntrack_zone *nf_ct_zone; 16 + nf_ct_zone = nf_ct_ext_find(ct, NF_CT_EXT_ZONE); 17 + if (nf_ct_zone) 18 + return nf_ct_zone->id; 19 + #endif 20 + return NF_CT_DEFAULT_ZONE; 21 + } 22 + 23 + #endif /* _NF_CONNTRACK_ZONES_H */
+2 -1
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
··· 22 22 #include <net/netfilter/nf_conntrack_helper.h> 23 23 #include <net/netfilter/nf_conntrack_l4proto.h> 24 24 #include <net/netfilter/nf_conntrack_l3proto.h> 25 + #include <net/netfilter/nf_conntrack_zones.h> 25 26 #include <net/netfilter/nf_conntrack_core.h> 26 27 #include <net/netfilter/ipv4/nf_conntrack_ipv4.h> 27 28 #include <net/netfilter/nf_nat_helper.h> ··· 267 266 return -EINVAL; 268 267 } 269 268 270 - h = nf_conntrack_find_get(sock_net(sk), &tuple); 269 + h = nf_conntrack_find_get(sock_net(sk), NF_CT_DEFAULT_ZONE, &tuple); 271 270 if (h) { 272 271 struct sockaddr_in sin; 273 272 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
+5 -3
net/ipv4/netfilter/nf_conntrack_proto_icmp.c
··· 18 18 #include <net/netfilter/nf_conntrack_tuple.h> 19 19 #include <net/netfilter/nf_conntrack_l4proto.h> 20 20 #include <net/netfilter/nf_conntrack_core.h> 21 + #include <net/netfilter/nf_conntrack_zones.h> 21 22 #include <net/netfilter/nf_log.h> 22 23 23 24 static unsigned int nf_ct_icmp_timeout __read_mostly = 30*HZ; ··· 115 114 116 115 /* Returns conntrack if it dealt with ICMP, and filled in skb fields */ 117 116 static int 118 - icmp_error_message(struct net *net, struct sk_buff *skb, 117 + icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, 119 118 enum ip_conntrack_info *ctinfo, 120 119 unsigned int hooknum) 121 120 { 122 121 struct nf_conntrack_tuple innertuple, origtuple; 123 122 const struct nf_conntrack_l4proto *innerproto; 124 123 const struct nf_conntrack_tuple_hash *h; 124 + u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE; 125 125 126 126 NF_CT_ASSERT(skb->nfct == NULL); 127 127 ··· 148 146 149 147 *ctinfo = IP_CT_RELATED; 150 148 151 - h = nf_conntrack_find_get(net, &innertuple); 149 + h = nf_conntrack_find_get(net, zone, &innertuple); 152 150 if (!h) { 153 151 pr_debug("icmp_error_message: no match\n"); 154 152 return -NF_ACCEPT; ··· 211 209 icmph->type != ICMP_REDIRECT) 212 210 return NF_ACCEPT; 213 211 214 - return icmp_error_message(net, skb, ctinfo, hooknum); 212 + return icmp_error_message(net, tmpl, skb, ctinfo, hooknum); 215 213 } 216 214 217 215 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+9 -3
net/ipv4/netfilter/nf_defrag_ipv4.c
··· 16 16 17 17 #include <linux/netfilter_bridge.h> 18 18 #include <linux/netfilter_ipv4.h> 19 + #include <net/netfilter/nf_conntrack_zones.h> 19 20 #include <net/netfilter/ipv4/nf_defrag_ipv4.h> 20 21 #include <net/netfilter/nf_conntrack.h> 21 22 ··· 40 39 static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum, 41 40 struct sk_buff *skb) 42 41 { 42 + u16 zone = NF_CT_DEFAULT_ZONE; 43 + 44 + if (skb->nfct) 45 + zone = nf_ct_zone((struct nf_conn *)skb->nfct); 46 + 43 47 #ifdef CONFIG_BRIDGE_NETFILTER 44 48 if (skb->nf_bridge && 45 49 skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING) 46 - return IP_DEFRAG_CONNTRACK_BRIDGE_IN; 50 + return IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone; 47 51 #endif 48 52 if (hooknum == NF_INET_PRE_ROUTING) 49 - return IP_DEFRAG_CONNTRACK_IN; 53 + return IP_DEFRAG_CONNTRACK_IN + zone; 50 54 else 51 - return IP_DEFRAG_CONNTRACK_OUT; 55 + return IP_DEFRAG_CONNTRACK_OUT + zone; 52 56 } 53 57 54 58 static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
+14 -10
net/ipv4/netfilter/nf_nat_core.c
··· 30 30 #include <net/netfilter/nf_conntrack_helper.h> 31 31 #include <net/netfilter/nf_conntrack_l3proto.h> 32 32 #include <net/netfilter/nf_conntrack_l4proto.h> 33 + #include <net/netfilter/nf_conntrack_zones.h> 33 34 34 35 static DEFINE_SPINLOCK(nf_nat_lock); 35 36 ··· 70 69 71 70 /* We keep an extra hash for each conntrack, for fast searching. */ 72 71 static inline unsigned int 73 - hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple) 72 + hash_by_src(const struct net *net, u16 zone, 73 + const struct nf_conntrack_tuple *tuple) 74 74 { 75 75 unsigned int hash; 76 76 77 77 /* Original src, to ensure we map it consistently if poss. */ 78 78 hash = jhash_3words((__force u32)tuple->src.u3.ip, 79 - (__force u32)tuple->src.u.all, 79 + (__force u32)tuple->src.u.all ^ zone, 80 80 tuple->dst.protonum, 0); 81 81 return ((u64)hash * net->ipv4.nat_htable_size) >> 32; 82 82 } ··· 141 139 142 140 /* Only called for SRC manip */ 143 141 static int 144 - find_appropriate_src(struct net *net, 142 + find_appropriate_src(struct net *net, u16 zone, 145 143 const struct nf_conntrack_tuple *tuple, 146 144 struct nf_conntrack_tuple *result, 147 145 const struct nf_nat_range *range) 148 146 { 149 - unsigned int h = hash_by_src(net, tuple); 147 + unsigned int h = hash_by_src(net, zone, tuple); 150 148 const struct nf_conn_nat *nat; 151 149 const struct nf_conn *ct; 152 150 const struct hlist_node *n; ··· 154 152 rcu_read_lock(); 155 153 hlist_for_each_entry_rcu(nat, n, &net->ipv4.nat_bysource[h], bysource) { 156 154 ct = nat->ct; 157 - if (same_src(ct, tuple)) { 155 + if (same_src(ct, tuple) && nf_ct_zone(ct) == zone) { 158 156 /* Copy source part from reply tuple. */ 159 157 nf_ct_invert_tuplepr(result, 160 158 &ct->tuplehash[IP_CT_DIR_REPLY].tuple); ··· 177 175 the ip with the lowest src-ip/dst-ip/proto usage. 178 176 */ 179 177 static void 180 - find_best_ips_proto(struct nf_conntrack_tuple *tuple, 178 + find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple, 181 179 const struct nf_nat_range *range, 182 180 const struct nf_conn *ct, 183 181 enum nf_nat_manip_type maniptype) ··· 211 209 maxip = ntohl(range->max_ip); 212 210 j = jhash_2words((__force u32)tuple->src.u3.ip, 213 211 range->flags & IP_NAT_RANGE_PERSISTENT ? 214 - 0 : (__force u32)tuple->dst.u3.ip, 0); 212 + 0 : (__force u32)tuple->dst.u3.ip ^ zone, 0); 215 213 j = ((u64)j * (maxip - minip + 1)) >> 32; 216 214 *var_ipp = htonl(minip + j); 217 215 } ··· 231 229 { 232 230 struct net *net = nf_ct_net(ct); 233 231 const struct nf_nat_protocol *proto; 232 + u16 zone = nf_ct_zone(ct); 234 233 235 234 /* 1) If this srcip/proto/src-proto-part is currently mapped, 236 235 and that same mapping gives a unique tuple within the given ··· 242 239 manips not an issue. */ 243 240 if (maniptype == IP_NAT_MANIP_SRC && 244 241 !(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) { 245 - if (find_appropriate_src(net, orig_tuple, tuple, range)) { 242 + if (find_appropriate_src(net, zone, orig_tuple, tuple, range)) { 246 243 pr_debug("get_unique_tuple: Found current src map\n"); 247 244 if (!nf_nat_used_tuple(tuple, ct)) 248 245 return; ··· 252 249 /* 2) Select the least-used IP/proto combination in the given 253 250 range. */ 254 251 *tuple = *orig_tuple; 255 - find_best_ips_proto(tuple, range, ct, maniptype); 252 + find_best_ips_proto(zone, tuple, range, ct, maniptype); 256 253 257 254 /* 3) The per-protocol part of the manip is made to map into 258 255 the range to make a unique tuple. */ ··· 330 327 if (have_to_hash) { 331 328 unsigned int srchash; 332 329 333 - srchash = hash_by_src(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 330 + srchash = hash_by_src(net, nf_ct_zone(ct), 331 + &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 334 332 spin_lock_bh(&nf_nat_lock); 335 333 /* nf_conntrack_alter_reply might re-allocate exntension aera */ 336 334 nat = nfct_nat(ct);
+2 -1
net/ipv4/netfilter/nf_nat_pptp.c
··· 25 25 #include <net/netfilter/nf_nat_rule.h> 26 26 #include <net/netfilter/nf_conntrack_helper.h> 27 27 #include <net/netfilter/nf_conntrack_expect.h> 28 + #include <net/netfilter/nf_conntrack_zones.h> 28 29 #include <linux/netfilter/nf_conntrack_proto_gre.h> 29 30 #include <linux/netfilter/nf_conntrack_pptp.h> 30 31 ··· 75 74 76 75 pr_debug("trying to unexpect other dir: "); 77 76 nf_ct_dump_tuple_ip(&t); 78 - other_exp = nf_ct_expect_find_get(net, &t); 77 + other_exp = nf_ct_expect_find_get(net, nf_ct_zone(ct), &t); 79 78 if (other_exp) { 80 79 nf_ct_unexpect_related(other_exp); 81 80 nf_ct_expect_put(other_exp);
+9 -3
net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
··· 27 27 #include <net/netfilter/nf_conntrack_l4proto.h> 28 28 #include <net/netfilter/nf_conntrack_l3proto.h> 29 29 #include <net/netfilter/nf_conntrack_core.h> 30 + #include <net/netfilter/nf_conntrack_zones.h> 30 31 #include <net/netfilter/ipv6/nf_conntrack_ipv6.h> 31 32 #include <net/netfilter/nf_log.h> 32 33 ··· 192 191 static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum, 193 192 struct sk_buff *skb) 194 193 { 194 + u16 zone = NF_CT_DEFAULT_ZONE; 195 + 196 + if (skb->nfct) 197 + zone = nf_ct_zone((struct nf_conn *)skb->nfct); 198 + 195 199 #ifdef CONFIG_BRIDGE_NETFILTER 196 200 if (skb->nf_bridge && 197 201 skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING) 198 - return IP6_DEFRAG_CONNTRACK_BRIDGE_IN; 202 + return IP6_DEFRAG_CONNTRACK_BRIDGE_IN + zone; 199 203 #endif 200 204 if (hooknum == NF_INET_PRE_ROUTING) 201 - return IP6_DEFRAG_CONNTRACK_IN; 205 + return IP6_DEFRAG_CONNTRACK_IN + zone; 202 206 else 203 - return IP6_DEFRAG_CONNTRACK_OUT; 207 + return IP6_DEFRAG_CONNTRACK_OUT + zone; 204 208 205 209 } 206 210
+5 -3
net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
··· 23 23 #include <net/netfilter/nf_conntrack_tuple.h> 24 24 #include <net/netfilter/nf_conntrack_l4proto.h> 25 25 #include <net/netfilter/nf_conntrack_core.h> 26 + #include <net/netfilter/nf_conntrack_zones.h> 26 27 #include <net/netfilter/ipv6/nf_conntrack_icmpv6.h> 27 28 #include <net/netfilter/nf_log.h> 28 29 ··· 129 128 } 130 129 131 130 static int 132 - icmpv6_error_message(struct net *net, 131 + icmpv6_error_message(struct net *net, struct nf_conn *tmpl, 133 132 struct sk_buff *skb, 134 133 unsigned int icmp6off, 135 134 enum ip_conntrack_info *ctinfo, ··· 138 137 struct nf_conntrack_tuple intuple, origtuple; 139 138 const struct nf_conntrack_tuple_hash *h; 140 139 const struct nf_conntrack_l4proto *inproto; 140 + u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE; 141 141 142 142 NF_CT_ASSERT(skb->nfct == NULL); 143 143 ··· 165 163 166 164 *ctinfo = IP_CT_RELATED; 167 165 168 - h = nf_conntrack_find_get(net, &intuple); 166 + h = nf_conntrack_find_get(net, zone, &intuple); 169 167 if (!h) { 170 168 pr_debug("icmpv6_error: no match\n"); 171 169 return -NF_ACCEPT; ··· 218 216 if (icmp6h->icmp6_type >= 128) 219 217 return NF_ACCEPT; 220 218 221 - return icmpv6_error_message(net, skb, dataoff, ctinfo, hooknum); 219 + return icmpv6_error_message(net, tmpl, skb, dataoff, ctinfo, hooknum); 222 220 } 223 221 224 222 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+13
net/netfilter/Kconfig
··· 83 83 84 84 If unsure, say 'N'. 85 85 86 + config NF_CONNTRACK_ZONES 87 + bool 'Connection tracking zones' 88 + depends on NETFILTER_ADVANCED 89 + depends on NETFILTER_XT_TARGET_CT 90 + help 91 + This option enables support for connection tracking zones. 92 + Normally, each connection needs to have a unique system wide 93 + identity. Connection tracking zones allow to have multiple 94 + connections using the same identity, as long as they are 95 + contained in different zones. 96 + 97 + If unsure, say `N'. 98 + 86 99 config NF_CONNTRACK_EVENTS 87 100 bool "Connection tracking events" 88 101 depends on NETFILTER_ADVANCED
+81 -26
net/netfilter/nf_conntrack_core.c
··· 42 42 #include <net/netfilter/nf_conntrack_extend.h> 43 43 #include <net/netfilter/nf_conntrack_acct.h> 44 44 #include <net/netfilter/nf_conntrack_ecache.h> 45 + #include <net/netfilter/nf_conntrack_zones.h> 45 46 #include <net/netfilter/nf_nat.h> 46 47 #include <net/netfilter/nf_nat_core.h> 47 48 ··· 69 68 static unsigned int nf_conntrack_hash_rnd; 70 69 71 70 static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple, 72 - unsigned int size, unsigned int rnd) 71 + u16 zone, unsigned int size, unsigned int rnd) 73 72 { 74 73 unsigned int n; 75 74 u_int32_t h; ··· 80 79 */ 81 80 n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32); 82 81 h = jhash2((u32 *)tuple, n, 83 - rnd ^ (((__force __u16)tuple->dst.u.all << 16) | 84 - tuple->dst.protonum)); 82 + zone ^ rnd ^ (((__force __u16)tuple->dst.u.all << 16) | 83 + tuple->dst.protonum)); 85 84 86 85 return ((u64)h * size) >> 32; 87 86 } 88 87 89 - static inline u_int32_t hash_conntrack(const struct net *net, 88 + static inline u_int32_t hash_conntrack(const struct net *net, u16 zone, 90 89 const struct nf_conntrack_tuple *tuple) 91 90 { 92 - return __hash_conntrack(tuple, net->ct.htable_size, 91 + return __hash_conntrack(tuple, zone, net->ct.htable_size, 93 92 nf_conntrack_hash_rnd); 94 93 } 95 94 ··· 293 292 * - Caller must lock nf_conntrack_lock before calling this function 294 293 */ 295 294 struct nf_conntrack_tuple_hash * 296 - __nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple) 295 + __nf_conntrack_find(struct net *net, u16 zone, 296 + const struct nf_conntrack_tuple *tuple) 297 297 { 298 298 struct nf_conntrack_tuple_hash *h; 299 299 struct hlist_nulls_node *n; 300 - unsigned int hash = hash_conntrack(net, tuple); 300 + unsigned int hash = hash_conntrack(net, zone, tuple); 301 301 302 302 /* Disable BHs the entire time since we normally need to disable them 303 303 * at least once for the stats anyway. ··· 306 304 local_bh_disable(); 307 305 begin: 308 306 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { 309 - if (nf_ct_tuple_equal(tuple, &h->tuple)) { 307 + if (nf_ct_tuple_equal(tuple, &h->tuple) && 308 + nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)) == zone) { 310 309 NF_CT_STAT_INC(net, found); 311 310 local_bh_enable(); 312 311 return h; ··· 329 326 330 327 /* Find a connection corresponding to a tuple. */ 331 328 struct nf_conntrack_tuple_hash * 332 - nf_conntrack_find_get(struct net *net, const struct nf_conntrack_tuple *tuple) 329 + nf_conntrack_find_get(struct net *net, u16 zone, 330 + const struct nf_conntrack_tuple *tuple) 333 331 { 334 332 struct nf_conntrack_tuple_hash *h; 335 333 struct nf_conn *ct; 336 334 337 335 rcu_read_lock(); 338 336 begin: 339 - h = __nf_conntrack_find(net, tuple); 337 + h = __nf_conntrack_find(net, zone, tuple); 340 338 if (h) { 341 339 ct = nf_ct_tuplehash_to_ctrack(h); 342 340 if (unlikely(nf_ct_is_dying(ct) || 343 341 !atomic_inc_not_zero(&ct->ct_general.use))) 344 342 h = NULL; 345 343 else { 346 - if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple))) { 344 + if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple) || 345 + nf_ct_zone(ct) != zone)) { 347 346 nf_ct_put(ct); 348 347 goto begin; 349 348 } ··· 373 368 { 374 369 struct net *net = nf_ct_net(ct); 375 370 unsigned int hash, repl_hash; 371 + u16 zone; 376 372 377 - hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 378 - repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 373 + zone = nf_ct_zone(ct); 374 + hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 375 + repl_hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 379 376 380 377 __nf_conntrack_hash_insert(ct, hash, repl_hash); 381 378 } ··· 394 387 struct hlist_nulls_node *n; 395 388 enum ip_conntrack_info ctinfo; 396 389 struct net *net; 390 + u16 zone; 397 391 398 392 ct = nf_ct_get(skb, &ctinfo); 399 393 net = nf_ct_net(ct); ··· 406 398 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) 407 399 return NF_ACCEPT; 408 400 409 - hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 410 - repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 401 + zone = nf_ct_zone(ct); 402 + hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 403 + repl_hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 411 404 412 405 /* We're not in hash table, and we refuse to set up related 413 406 connections for unconfirmed conns. But packet copies and ··· 427 418 not in the hash. If there is, we lost race. */ 428 419 hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode) 429 420 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, 430 - &h->tuple)) 421 + &h->tuple) && 422 + zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) 431 423 goto out; 432 424 hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode) 433 425 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, 434 - &h->tuple)) 426 + &h->tuple) && 427 + zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) 435 428 goto out; 436 429 437 430 /* Remove from unconfirmed list */ ··· 480 469 struct net *net = nf_ct_net(ignored_conntrack); 481 470 struct nf_conntrack_tuple_hash *h; 482 471 struct hlist_nulls_node *n; 483 - unsigned int hash = hash_conntrack(net, tuple); 472 + struct nf_conn *ct; 473 + u16 zone = nf_ct_zone(ignored_conntrack); 474 + unsigned int hash = hash_conntrack(net, zone, tuple); 484 475 485 476 /* Disable BHs the entire time since we need to disable them at 486 477 * least once for the stats anyway. 487 478 */ 488 479 rcu_read_lock_bh(); 489 480 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { 490 - if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack && 491 - nf_ct_tuple_equal(tuple, &h->tuple)) { 481 + ct = nf_ct_tuplehash_to_ctrack(h); 482 + if (ct != ignored_conntrack && 483 + nf_ct_tuple_equal(tuple, &h->tuple) && 484 + nf_ct_zone(ct) == zone) { 492 485 NF_CT_STAT_INC(net, found); 493 486 rcu_read_unlock_bh(); 494 487 return 1; ··· 555 540 return dropped; 556 541 } 557 542 558 - struct nf_conn *nf_conntrack_alloc(struct net *net, 543 + struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone, 559 544 const struct nf_conntrack_tuple *orig, 560 545 const struct nf_conntrack_tuple *repl, 561 546 gfp_t gfp) ··· 573 558 574 559 if (nf_conntrack_max && 575 560 unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) { 576 - unsigned int hash = hash_conntrack(net, orig); 561 + unsigned int hash = hash_conntrack(net, zone, orig); 577 562 if (!early_drop(net, hash)) { 578 563 atomic_dec(&net->ct.count); 579 564 if (net_ratelimit()) ··· 610 595 #ifdef CONFIG_NET_NS 611 596 ct->ct_net = net; 612 597 #endif 598 + #ifdef CONFIG_NF_CONNTRACK_ZONES 599 + if (zone) { 600 + struct nf_conntrack_zone *nf_ct_zone; 613 601 602 + nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, GFP_ATOMIC); 603 + if (!nf_ct_zone) 604 + goto out_free; 605 + nf_ct_zone->id = zone; 606 + } 607 + #endif 614 608 /* 615 609 * changes to lookup keys must be done before setting refcnt to 1 616 610 */ 617 611 smp_wmb(); 618 612 atomic_set(&ct->ct_general.use, 1); 619 613 return ct; 614 + 615 + #ifdef CONFIG_NF_CONNTRACK_ZONES 616 + out_free: 617 + kmem_cache_free(net->ct.nf_conntrack_cachep, ct); 618 + return ERR_PTR(-ENOMEM); 619 + #endif 620 620 } 621 621 EXPORT_SYMBOL_GPL(nf_conntrack_alloc); 622 622 ··· 661 631 struct nf_conntrack_tuple repl_tuple; 662 632 struct nf_conntrack_ecache *ecache; 663 633 struct nf_conntrack_expect *exp; 634 + u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE; 664 635 665 636 if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) { 666 637 pr_debug("Can't invert tuple.\n"); 667 638 return NULL; 668 639 } 669 640 670 - ct = nf_conntrack_alloc(net, tuple, &repl_tuple, GFP_ATOMIC); 641 + ct = nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC); 671 642 if (IS_ERR(ct)) { 672 643 pr_debug("Can't allocate conntrack.\n"); 673 644 return (struct nf_conntrack_tuple_hash *)ct; ··· 688 657 GFP_ATOMIC); 689 658 690 659 spin_lock_bh(&nf_conntrack_lock); 691 - exp = nf_ct_find_expectation(net, tuple); 660 + exp = nf_ct_find_expectation(net, zone, tuple); 692 661 if (exp) { 693 662 pr_debug("conntrack: expectation arrives ct=%p exp=%p\n", 694 663 ct, exp); ··· 744 713 struct nf_conntrack_tuple tuple; 745 714 struct nf_conntrack_tuple_hash *h; 746 715 struct nf_conn *ct; 716 + u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE; 747 717 748 718 if (!nf_ct_get_tuple(skb, skb_network_offset(skb), 749 719 dataoff, l3num, protonum, &tuple, l3proto, ··· 754 722 } 755 723 756 724 /* look for tuple match */ 757 - h = nf_conntrack_find_get(net, &tuple); 725 + h = nf_conntrack_find_get(net, zone, &tuple); 758 726 if (!h) { 759 727 h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto, 760 728 skb, dataoff); ··· 990 958 } 991 959 EXPORT_SYMBOL_GPL(__nf_ct_kill_acct); 992 960 961 + #ifdef CONFIG_NF_CONNTRACK_ZONES 962 + static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = { 963 + .len = sizeof(struct nf_conntrack_zone), 964 + .align = __alignof__(struct nf_conntrack_zone), 965 + .id = NF_CT_EXT_ZONE, 966 + }; 967 + #endif 968 + 993 969 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 994 970 995 971 #include <linux/netfilter/nfnetlink.h> ··· 1179 1139 1180 1140 nf_conntrack_helper_fini(); 1181 1141 nf_conntrack_proto_fini(); 1142 + #ifdef CONFIG_NF_CONNTRACK_ZONES 1143 + nf_ct_extend_unregister(&nf_ct_zone_extend); 1144 + #endif 1182 1145 } 1183 1146 1184 1147 static void nf_conntrack_cleanup_net(struct net *net) ··· 1257 1214 unsigned int hashsize, old_size; 1258 1215 struct hlist_nulls_head *hash, *old_hash; 1259 1216 struct nf_conntrack_tuple_hash *h; 1217 + struct nf_conn *ct; 1260 1218 1261 1219 if (current->nsproxy->net_ns != &init_net) 1262 1220 return -EOPNOTSUPP; ··· 1284 1240 while (!hlist_nulls_empty(&init_net.ct.hash[i])) { 1285 1241 h = hlist_nulls_entry(init_net.ct.hash[i].first, 1286 1242 struct nf_conntrack_tuple_hash, hnnode); 1243 + ct = nf_ct_tuplehash_to_ctrack(h); 1287 1244 hlist_nulls_del_rcu(&h->hnnode); 1288 - bucket = __hash_conntrack(&h->tuple, hashsize, 1245 + bucket = __hash_conntrack(&h->tuple, nf_ct_zone(ct), 1246 + hashsize, 1289 1247 nf_conntrack_hash_rnd); 1290 1248 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); 1291 1249 } ··· 1345 1299 if (ret < 0) 1346 1300 goto err_helper; 1347 1301 1302 + #ifdef CONFIG_NF_CONNTRACK_ZONES 1303 + ret = nf_ct_extend_register(&nf_ct_zone_extend); 1304 + if (ret < 0) 1305 + goto err_extend; 1306 + #endif 1348 1307 /* Set up fake conntrack: to never be deleted, not in any hashes */ 1349 1308 #ifdef CONFIG_NET_NS 1350 1309 nf_conntrack_untracked.ct_net = &init_net; ··· 1360 1309 1361 1310 return 0; 1362 1311 1312 + #ifdef CONFIG_NF_CONNTRACK_ZONES 1313 + err_extend: 1314 + nf_conntrack_helper_fini(); 1315 + #endif 1363 1316 err_helper: 1364 1317 nf_conntrack_proto_fini(); 1365 1318 err_proto:
+14 -7
net/netfilter/nf_conntrack_expect.c
··· 27 27 #include <net/netfilter/nf_conntrack_expect.h> 28 28 #include <net/netfilter/nf_conntrack_helper.h> 29 29 #include <net/netfilter/nf_conntrack_tuple.h> 30 + #include <net/netfilter/nf_conntrack_zones.h> 30 31 31 32 unsigned int nf_ct_expect_hsize __read_mostly; 32 33 EXPORT_SYMBOL_GPL(nf_ct_expect_hsize); ··· 85 84 } 86 85 87 86 struct nf_conntrack_expect * 88 - __nf_ct_expect_find(struct net *net, const struct nf_conntrack_tuple *tuple) 87 + __nf_ct_expect_find(struct net *net, u16 zone, 88 + const struct nf_conntrack_tuple *tuple) 89 89 { 90 90 struct nf_conntrack_expect *i; 91 91 struct hlist_node *n; ··· 97 95 98 96 h = nf_ct_expect_dst_hash(tuple); 99 97 hlist_for_each_entry_rcu(i, n, &net->ct.expect_hash[h], hnode) { 100 - if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)) 98 + if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) && 99 + nf_ct_zone(i->master) == zone) 101 100 return i; 102 101 } 103 102 return NULL; ··· 107 104 108 105 /* Just find a expectation corresponding to a tuple. */ 109 106 struct nf_conntrack_expect * 110 - nf_ct_expect_find_get(struct net *net, const struct nf_conntrack_tuple *tuple) 107 + nf_ct_expect_find_get(struct net *net, u16 zone, 108 + const struct nf_conntrack_tuple *tuple) 111 109 { 112 110 struct nf_conntrack_expect *i; 113 111 114 112 rcu_read_lock(); 115 - i = __nf_ct_expect_find(net, tuple); 113 + i = __nf_ct_expect_find(net, zone, tuple); 116 114 if (i && !atomic_inc_not_zero(&i->use)) 117 115 i = NULL; 118 116 rcu_read_unlock(); ··· 125 121 /* If an expectation for this connection is found, it gets delete from 126 122 * global list then returned. */ 127 123 struct nf_conntrack_expect * 128 - nf_ct_find_expectation(struct net *net, const struct nf_conntrack_tuple *tuple) 124 + nf_ct_find_expectation(struct net *net, u16 zone, 125 + const struct nf_conntrack_tuple *tuple) 129 126 { 130 127 struct nf_conntrack_expect *i, *exp = NULL; 131 128 struct hlist_node *n; ··· 138 133 h = nf_ct_expect_dst_hash(tuple); 139 134 hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) { 140 135 if (!(i->flags & NF_CT_EXPECT_INACTIVE) && 141 - nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)) { 136 + nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) && 137 + nf_ct_zone(i->master) == zone) { 142 138 exp = i; 143 139 break; 144 140 } ··· 210 204 { 211 205 return a->master == b->master && a->class == b->class && 212 206 nf_ct_tuple_equal(&a->tuple, &b->tuple) && 213 - nf_ct_tuple_mask_equal(&a->mask, &b->mask); 207 + nf_ct_tuple_mask_equal(&a->mask, &b->mask) && 208 + nf_ct_zone(a->master) == nf_ct_zone(b->master); 214 209 } 215 210 216 211 /* Generally a bad idea to call this: could have matched already. */
+2 -1
net/netfilter/nf_conntrack_h323_main.c
··· 29 29 #include <net/netfilter/nf_conntrack_expect.h> 30 30 #include <net/netfilter/nf_conntrack_ecache.h> 31 31 #include <net/netfilter/nf_conntrack_helper.h> 32 + #include <net/netfilter/nf_conntrack_zones.h> 32 33 #include <linux/netfilter/nf_conntrack_h323.h> 33 34 34 35 /* Parameters */ ··· 1217 1216 tuple.dst.u.tcp.port = port; 1218 1217 tuple.dst.protonum = IPPROTO_TCP; 1219 1218 1220 - exp = __nf_ct_expect_find(net, &tuple); 1219 + exp = __nf_ct_expect_find(net, nf_ct_zone(ct), &tuple); 1221 1220 if (exp && exp->master == ct) 1222 1221 return exp; 1223 1222 return NULL;
+10 -10
net/netfilter/nf_conntrack_netlink.c
··· 811 811 if (err < 0) 812 812 return err; 813 813 814 - h = nf_conntrack_find_get(net, &tuple); 814 + h = nf_conntrack_find_get(net, 0, &tuple); 815 815 if (!h) 816 816 return -ENOENT; 817 817 ··· 872 872 if (err < 0) 873 873 return err; 874 874 875 - h = nf_conntrack_find_get(net, &tuple); 875 + h = nf_conntrack_find_get(net, 0, &tuple); 876 876 if (!h) 877 877 return -ENOENT; 878 878 ··· 1221 1221 int err = -EINVAL; 1222 1222 struct nf_conntrack_helper *helper; 1223 1223 1224 - ct = nf_conntrack_alloc(net, otuple, rtuple, GFP_ATOMIC); 1224 + ct = nf_conntrack_alloc(net, 0, otuple, rtuple, GFP_ATOMIC); 1225 1225 if (IS_ERR(ct)) 1226 1226 return ERR_PTR(-ENOMEM); 1227 1227 ··· 1325 1325 if (err < 0) 1326 1326 goto err2; 1327 1327 1328 - master_h = nf_conntrack_find_get(net, &master); 1328 + master_h = nf_conntrack_find_get(net, 0, &master); 1329 1329 if (master_h == NULL) { 1330 1330 err = -ENOENT; 1331 1331 goto err2; ··· 1374 1374 1375 1375 spin_lock_bh(&nf_conntrack_lock); 1376 1376 if (cda[CTA_TUPLE_ORIG]) 1377 - h = __nf_conntrack_find(net, &otuple); 1377 + h = __nf_conntrack_find(net, 0, &otuple); 1378 1378 else if (cda[CTA_TUPLE_REPLY]) 1379 - h = __nf_conntrack_find(net, &rtuple); 1379 + h = __nf_conntrack_find(net, 0, &rtuple); 1380 1380 1381 1381 if (h == NULL) { 1382 1382 err = -ENOENT; ··· 1714 1714 if (err < 0) 1715 1715 return err; 1716 1716 1717 - exp = nf_ct_expect_find_get(net, &tuple); 1717 + exp = nf_ct_expect_find_get(net, 0, &tuple); 1718 1718 if (!exp) 1719 1719 return -ENOENT; 1720 1720 ··· 1770 1770 return err; 1771 1771 1772 1772 /* bump usage count to 2 */ 1773 - exp = nf_ct_expect_find_get(net, &tuple); 1773 + exp = nf_ct_expect_find_get(net, 0, &tuple); 1774 1774 if (!exp) 1775 1775 return -ENOENT; 1776 1776 ··· 1855 1855 return err; 1856 1856 1857 1857 /* Look for master conntrack of this expectation */ 1858 - h = nf_conntrack_find_get(net, &master_tuple); 1858 + h = nf_conntrack_find_get(net, 0, &master_tuple); 1859 1859 if (!h) 1860 1860 return -ENOENT; 1861 1861 ct = nf_ct_tuplehash_to_ctrack(h); ··· 1912 1912 return err; 1913 1913 1914 1914 spin_lock_bh(&nf_conntrack_lock); 1915 - exp = __nf_ct_expect_find(net, &tuple); 1915 + exp = __nf_ct_expect_find(net, 0, &tuple); 1916 1916 1917 1917 if (!exp) { 1918 1918 spin_unlock_bh(&nf_conntrack_lock);
+8 -6
net/netfilter/nf_conntrack_pptp.c
··· 28 28 #include <net/netfilter/nf_conntrack.h> 29 29 #include <net/netfilter/nf_conntrack_core.h> 30 30 #include <net/netfilter/nf_conntrack_helper.h> 31 + #include <net/netfilter/nf_conntrack_zones.h> 31 32 #include <linux/netfilter/nf_conntrack_proto_gre.h> 32 33 #include <linux/netfilter/nf_conntrack_pptp.h> 33 34 ··· 124 123 pr_debug("trying to unexpect other dir: "); 125 124 nf_ct_dump_tuple(&inv_t); 126 125 127 - exp_other = nf_ct_expect_find_get(net, &inv_t); 126 + exp_other = nf_ct_expect_find_get(net, nf_ct_zone(ct), &inv_t); 128 127 if (exp_other) { 129 128 /* delete other expectation. */ 130 129 pr_debug("found\n"); ··· 137 136 rcu_read_unlock(); 138 137 } 139 138 140 - static int destroy_sibling_or_exp(struct net *net, 139 + static int destroy_sibling_or_exp(struct net *net, struct nf_conn *ct, 141 140 const struct nf_conntrack_tuple *t) 142 141 { 143 142 const struct nf_conntrack_tuple_hash *h; 144 143 struct nf_conntrack_expect *exp; 145 144 struct nf_conn *sibling; 145 + u16 zone = nf_ct_zone(ct); 146 146 147 147 pr_debug("trying to timeout ct or exp for tuple "); 148 148 nf_ct_dump_tuple(t); 149 149 150 - h = nf_conntrack_find_get(net, t); 150 + h = nf_conntrack_find_get(net, zone, t); 151 151 if (h) { 152 152 sibling = nf_ct_tuplehash_to_ctrack(h); 153 153 pr_debug("setting timeout of conntrack %p to 0\n", sibling); ··· 159 157 nf_ct_put(sibling); 160 158 return 1; 161 159 } else { 162 - exp = nf_ct_expect_find_get(net, t); 160 + exp = nf_ct_expect_find_get(net, zone, t); 163 161 if (exp) { 164 162 pr_debug("unexpect_related of expect %p\n", exp); 165 163 nf_ct_unexpect_related(exp); ··· 184 182 t.dst.protonum = IPPROTO_GRE; 185 183 t.src.u.gre.key = help->help.ct_pptp_info.pns_call_id; 186 184 t.dst.u.gre.key = help->help.ct_pptp_info.pac_call_id; 187 - if (!destroy_sibling_or_exp(net, &t)) 185 + if (!destroy_sibling_or_exp(net, ct, &t)) 188 186 pr_debug("failed to timeout original pns->pac ct/exp\n"); 189 187 190 188 /* try reply (pac->pns) tuple */ ··· 192 190 t.dst.protonum = IPPROTO_GRE; 193 191 t.src.u.gre.key = help->help.ct_pptp_info.pac_call_id; 194 192 t.dst.u.gre.key = help->help.ct_pptp_info.pns_call_id; 195 - if (!destroy_sibling_or_exp(net, &t)) 193 + if (!destroy_sibling_or_exp(net, ct, &t)) 196 194 pr_debug("failed to timeout reply pac->pns ct/exp\n"); 197 195 } 198 196
+2 -1
net/netfilter/nf_conntrack_sip.c
··· 23 23 #include <net/netfilter/nf_conntrack_core.h> 24 24 #include <net/netfilter/nf_conntrack_expect.h> 25 25 #include <net/netfilter/nf_conntrack_helper.h> 26 + #include <net/netfilter/nf_conntrack_zones.h> 26 27 #include <linux/netfilter/nf_conntrack_sip.h> 27 28 28 29 MODULE_LICENSE("GPL"); ··· 837 836 838 837 rcu_read_lock(); 839 838 do { 840 - exp = __nf_ct_expect_find(net, &tuple); 839 + exp = __nf_ct_expect_find(net, nf_ct_zone(ct), &tuple); 841 840 842 841 if (!exp || exp->master == ct || 843 842 nfct_help(exp->master)->helper != nfct_help(ct)->helper ||
+6
net/netfilter/nf_conntrack_standalone.c
··· 26 26 #include <net/netfilter/nf_conntrack_expect.h> 27 27 #include <net/netfilter/nf_conntrack_helper.h> 28 28 #include <net/netfilter/nf_conntrack_acct.h> 29 + #include <net/netfilter/nf_conntrack_zones.h> 29 30 30 31 MODULE_LICENSE("GPL"); 31 32 ··· 169 168 170 169 #ifdef CONFIG_NF_CONNTRACK_SECMARK 171 170 if (seq_printf(s, "secmark=%u ", ct->secmark)) 171 + goto release; 172 + #endif 173 + 174 + #ifdef CONFIG_NF_CONNTRACK_ZONES 175 + if (seq_printf(s, "zone=%u ", nf_ct_zone(ct))) 172 176 goto release; 173 177 #endif 174 178
+7 -1
net/netfilter/xt_CT.c
··· 16 16 #include <net/netfilter/nf_conntrack.h> 17 17 #include <net/netfilter/nf_conntrack_helper.h> 18 18 #include <net/netfilter/nf_conntrack_ecache.h> 19 + #include <net/netfilter/nf_conntrack_zones.h> 19 20 20 21 static unsigned int xt_ct_target(struct sk_buff *skb, 21 22 const struct xt_target_param *par) ··· 70 69 goto out; 71 70 } 72 71 72 + #ifndef CONFIG_NF_CONNTRACK_ZONES 73 + if (info->zone) 74 + goto err1; 75 + #endif 76 + 73 77 if (nf_ct_l3proto_try_module_get(par->family) < 0) 74 78 goto err1; 75 79 76 80 memset(&t, 0, sizeof(t)); 77 - ct = nf_conntrack_alloc(par->net, &t, &t, GFP_KERNEL); 81 + ct = nf_conntrack_alloc(par->net, info->zone, &t, &t, GFP_KERNEL); 78 82 if (IS_ERR(ct)) 79 83 goto err2; 80 84
+3 -1
net/netfilter/xt_connlimit.c
··· 28 28 #include <net/netfilter/nf_conntrack.h> 29 29 #include <net/netfilter/nf_conntrack_core.h> 30 30 #include <net/netfilter/nf_conntrack_tuple.h> 31 + #include <net/netfilter/nf_conntrack_zones.h> 31 32 32 33 /* we will save the tuples of all connections we care about */ 33 34 struct xt_connlimit_conn { ··· 115 114 116 115 /* check the saved connections */ 117 116 list_for_each_entry_safe(conn, tmp, hash, list) { 118 - found = nf_conntrack_find_get(net, &conn->tuple); 117 + found = nf_conntrack_find_get(net, NF_CT_DEFAULT_ZONE, 118 + &conn->tuple); 119 119 found_ct = NULL; 120 120 121 121 if (found != NULL)