Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next

Pablo Neira Ayuso says:

====================
Netfilter updates for net-next

The following patchset contains the first batch of Netfilter updates for
your net-next tree.

1) Define pr_fmt() in nf_conntrack, from Weongyo Jeong.

2) Define and register netfilter's afinfo for the bridge family,
this comes in preparation for native nfqueue's bridge for nft,
from Stephane Bryant.

3) Add new attributes to store layer 2 and VLAN headers to nfqueue,
also from Stephane Bryant.

4) Parse new NFQA_VLAN and NFQA_L2HDR nfqueue netlink attributes
coming from userspace, from Stephane Bryant.

5) Use net->ipv6.devconf_all->hop_limit instead of hardcoded hop_limit
in IPv6 SYNPROXY, from Liping Zhang.

6) Remove unnecessary check for dst == NULL in nf_reject_ipv6,
from Haishuang Yan.

7) Deinline ctnetlink event report functions, from Florian Westphal.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+298 -129
+15 -93
include/net/netfilter/nf_conntrack_ecache.h
··· 73 73 struct nf_ct_event_notifier *nb); 74 74 75 75 void nf_ct_deliver_cached_events(struct nf_conn *ct); 76 + int nf_conntrack_eventmask_report(unsigned int eventmask, struct nf_conn *ct, 77 + u32 portid, int report); 76 78 77 79 static inline void 78 80 nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct) ··· 93 91 } 94 92 95 93 static inline int 96 - nf_conntrack_eventmask_report(unsigned int eventmask, 97 - struct nf_conn *ct, 98 - u32 portid, 99 - int report) 100 - { 101 - int ret = 0; 102 - struct net *net = nf_ct_net(ct); 103 - struct nf_ct_event_notifier *notify; 104 - struct nf_conntrack_ecache *e; 105 - 106 - rcu_read_lock(); 107 - notify = rcu_dereference(net->ct.nf_conntrack_event_cb); 108 - if (notify == NULL) 109 - goto out_unlock; 110 - 111 - e = nf_ct_ecache_find(ct); 112 - if (e == NULL) 113 - goto out_unlock; 114 - 115 - if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct)) { 116 - struct nf_ct_event item = { 117 - .ct = ct, 118 - .portid = e->portid ? e->portid : portid, 119 - .report = report 120 - }; 121 - /* This is a resent of a destroy event? If so, skip missed */ 122 - unsigned long missed = e->portid ? 0 : e->missed; 123 - 124 - if (!((eventmask | missed) & e->ctmask)) 125 - goto out_unlock; 126 - 127 - ret = notify->fcn(eventmask | missed, &item); 128 - if (unlikely(ret < 0 || missed)) { 129 - spin_lock_bh(&ct->lock); 130 - if (ret < 0) { 131 - /* This is a destroy event that has been 132 - * triggered by a process, we store the PORTID 133 - * to include it in the retransmission. */ 134 - if (eventmask & (1 << IPCT_DESTROY) && 135 - e->portid == 0 && portid != 0) 136 - e->portid = portid; 137 - else 138 - e->missed |= eventmask; 139 - } else 140 - e->missed &= ~missed; 141 - spin_unlock_bh(&ct->lock); 142 - } 143 - } 144 - out_unlock: 145 - rcu_read_unlock(); 146 - return ret; 147 - } 148 - 149 - static inline int 150 94 nf_conntrack_event_report(enum ip_conntrack_events event, struct nf_conn *ct, 151 95 u32 portid, int report) 152 96 { 97 + const struct net *net = nf_ct_net(ct); 98 + 99 + if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb)) 100 + return 0; 101 + 153 102 return nf_conntrack_eventmask_report(1 << event, ct, portid, report); 154 103 } 155 104 156 105 static inline int 157 106 nf_conntrack_event(enum ip_conntrack_events event, struct nf_conn *ct) 158 107 { 108 + const struct net *net = nf_ct_net(ct); 109 + 110 + if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb)) 111 + return 0; 112 + 159 113 return nf_conntrack_eventmask_report(1 << event, ct, 0, 0); 160 114 } 161 115 ··· 130 172 void nf_ct_expect_unregister_notifier(struct net *net, 131 173 struct nf_exp_event_notifier *nb); 132 174 133 - static inline void 134 - nf_ct_expect_event_report(enum ip_conntrack_expect_events event, 135 - struct nf_conntrack_expect *exp, 136 - u32 portid, 137 - int report) 138 - { 139 - struct net *net = nf_ct_exp_net(exp); 140 - struct nf_exp_event_notifier *notify; 141 - struct nf_conntrack_ecache *e; 142 - 143 - rcu_read_lock(); 144 - notify = rcu_dereference(net->ct.nf_expect_event_cb); 145 - if (notify == NULL) 146 - goto out_unlock; 147 - 148 - e = nf_ct_ecache_find(exp->master); 149 - if (e == NULL) 150 - goto out_unlock; 151 - 152 - if (e->expmask & (1 << event)) { 153 - struct nf_exp_event item = { 154 - .exp = exp, 155 - .portid = portid, 156 - .report = report 157 - }; 158 - notify->fcn(1 << event, &item); 159 - } 160 - out_unlock: 161 - rcu_read_unlock(); 162 - } 163 - 164 - static inline void 165 - nf_ct_expect_event(enum ip_conntrack_expect_events event, 166 - struct nf_conntrack_expect *exp) 167 - { 168 - nf_ct_expect_event_report(event, exp, 0, 0); 169 - } 175 + void nf_ct_expect_event_report(enum ip_conntrack_expect_events event, 176 + struct nf_conntrack_expect *exp, 177 + u32 portid, int report); 170 178 171 179 int nf_conntrack_ecache_pernet_init(struct net *net); 172 180 void nf_conntrack_ecache_pernet_fini(struct net *net); ··· 169 245 u32 portid, 170 246 int report) { return 0; } 171 247 static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct) {} 172 - static inline void nf_ct_expect_event(enum ip_conntrack_expect_events event, 173 - struct nf_conntrack_expect *exp) {} 174 248 static inline void nf_ct_expect_event_report(enum ip_conntrack_expect_events e, 175 249 struct nf_conntrack_expect *exp, 176 250 u32 portid,
+45 -2
net/bridge/netfilter/nf_tables_bridge.c
··· 162 162 (1 << NF_BR_POST_ROUTING), 163 163 }; 164 164 165 + static void nf_br_saveroute(const struct sk_buff *skb, 166 + struct nf_queue_entry *entry) 167 + { 168 + } 169 + 170 + static int nf_br_reroute(struct net *net, struct sk_buff *skb, 171 + const struct nf_queue_entry *entry) 172 + { 173 + return 0; 174 + } 175 + 176 + static __sum16 nf_br_checksum(struct sk_buff *skb, unsigned int hook, 177 + unsigned int dataoff, u_int8_t protocol) 178 + { 179 + return 0; 180 + } 181 + 182 + static __sum16 nf_br_checksum_partial(struct sk_buff *skb, unsigned int hook, 183 + unsigned int dataoff, unsigned int len, 184 + u_int8_t protocol) 185 + { 186 + return 0; 187 + } 188 + 189 + static int nf_br_route(struct net *net, struct dst_entry **dst, 190 + struct flowi *fl, bool strict __always_unused) 191 + { 192 + return 0; 193 + } 194 + 195 + static const struct nf_afinfo nf_br_afinfo = { 196 + .family = AF_BRIDGE, 197 + .checksum = nf_br_checksum, 198 + .checksum_partial = nf_br_checksum_partial, 199 + .route = nf_br_route, 200 + .saveroute = nf_br_saveroute, 201 + .reroute = nf_br_reroute, 202 + .route_key_size = 0, 203 + }; 204 + 165 205 static int __init nf_tables_bridge_init(void) 166 206 { 167 207 int ret; 168 208 209 + nf_register_afinfo(&nf_br_afinfo); 169 210 nft_register_chain_type(&filter_bridge); 170 211 ret = register_pernet_subsys(&nf_tables_bridge_net_ops); 171 - if (ret < 0) 212 + if (ret < 0) { 172 213 nft_unregister_chain_type(&filter_bridge); 173 - 214 + nf_unregister_afinfo(&nf_br_afinfo); 215 + } 174 216 return ret; 175 217 } 176 218 ··· 220 178 { 221 179 unregister_pernet_subsys(&nf_tables_bridge_net_ops); 222 180 nft_unregister_chain_type(&filter_bridge); 181 + nf_unregister_afinfo(&nf_br_afinfo); 223 182 } 224 183 225 184 module_init(nf_tables_bridge_init);
+30 -26
net/ipv6/netfilter/ip6t_SYNPROXY.c
··· 20 20 #include <net/netfilter/nf_conntrack_synproxy.h> 21 21 22 22 static struct ipv6hdr * 23 - synproxy_build_ip(struct sk_buff *skb, const struct in6_addr *saddr, 24 - const struct in6_addr *daddr) 23 + synproxy_build_ip(struct net *net, struct sk_buff *skb, 24 + const struct in6_addr *saddr, 25 + const struct in6_addr *daddr) 25 26 { 26 27 struct ipv6hdr *iph; 27 28 28 29 skb_reset_network_header(skb); 29 30 iph = (struct ipv6hdr *)skb_put(skb, sizeof(*iph)); 30 31 ip6_flow_hdr(iph, 0, 0); 31 - iph->hop_limit = 64; //XXX 32 + iph->hop_limit = net->ipv6.devconf_all->hop_limit; 32 33 iph->nexthdr = IPPROTO_TCP; 33 34 iph->saddr = *saddr; 34 35 iph->daddr = *daddr; ··· 38 37 } 39 38 40 39 static void 41 - synproxy_send_tcp(const struct synproxy_net *snet, 40 + synproxy_send_tcp(struct net *net, 42 41 const struct sk_buff *skb, struct sk_buff *nskb, 43 42 struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo, 44 43 struct ipv6hdr *niph, struct tcphdr *nth, 45 44 unsigned int tcp_hdr_size) 46 45 { 47 - struct net *net = nf_ct_net(snet->tmpl); 48 46 struct dst_entry *dst; 49 47 struct flowi6 fl6; 50 48 ··· 84 84 } 85 85 86 86 static void 87 - synproxy_send_client_synack(const struct synproxy_net *snet, 87 + synproxy_send_client_synack(struct net *net, 88 88 const struct sk_buff *skb, const struct tcphdr *th, 89 89 const struct synproxy_options *opts) 90 90 { ··· 103 103 return; 104 104 skb_reserve(nskb, MAX_TCP_HEADER); 105 105 106 - niph = synproxy_build_ip(nskb, &iph->daddr, &iph->saddr); 106 + niph = synproxy_build_ip(net, nskb, &iph->daddr, &iph->saddr); 107 107 108 108 skb_reset_transport_header(nskb); 109 109 nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); ··· 121 121 122 122 synproxy_build_options(nth, opts); 123 123 124 - synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, 124 + synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, 125 125 niph, nth, tcp_hdr_size); 126 126 } 127 127 128 128 static void 129 - synproxy_send_server_syn(const struct synproxy_net *snet, 129 + synproxy_send_server_syn(struct net *net, 130 130 const struct sk_buff *skb, const struct tcphdr *th, 131 131 const struct synproxy_options *opts, u32 recv_seq) 132 132 { 133 + struct synproxy_net *snet = synproxy_pernet(net); 133 134 struct sk_buff *nskb; 134 135 struct ipv6hdr *iph, *niph; 135 136 struct tcphdr *nth; ··· 145 144 return; 146 145 skb_reserve(nskb, MAX_TCP_HEADER); 147 146 148 - niph = synproxy_build_ip(nskb, &iph->saddr, &iph->daddr); 147 + niph = synproxy_build_ip(net, nskb, &iph->saddr, &iph->daddr); 149 148 150 149 skb_reset_transport_header(nskb); 151 150 nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); ··· 166 165 167 166 synproxy_build_options(nth, opts); 168 167 169 - synproxy_send_tcp(snet, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW, 168 + synproxy_send_tcp(net, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW, 170 169 niph, nth, tcp_hdr_size); 171 170 } 172 171 173 172 static void 174 - synproxy_send_server_ack(const struct synproxy_net *snet, 173 + synproxy_send_server_ack(struct net *net, 175 174 const struct ip_ct_tcp *state, 176 175 const struct sk_buff *skb, const struct tcphdr *th, 177 176 const struct synproxy_options *opts) ··· 190 189 return; 191 190 skb_reserve(nskb, MAX_TCP_HEADER); 192 191 193 - niph = synproxy_build_ip(nskb, &iph->daddr, &iph->saddr); 192 + niph = synproxy_build_ip(net, nskb, &iph->daddr, &iph->saddr); 194 193 195 194 skb_reset_transport_header(nskb); 196 195 nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); ··· 206 205 207 206 synproxy_build_options(nth, opts); 208 207 209 - synproxy_send_tcp(snet, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); 208 + synproxy_send_tcp(net, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); 210 209 } 211 210 212 211 static void 213 - synproxy_send_client_ack(const struct synproxy_net *snet, 212 + synproxy_send_client_ack(struct net *net, 214 213 const struct sk_buff *skb, const struct tcphdr *th, 215 214 const struct synproxy_options *opts) 216 215 { ··· 228 227 return; 229 228 skb_reserve(nskb, MAX_TCP_HEADER); 230 229 231 - niph = synproxy_build_ip(nskb, &iph->saddr, &iph->daddr); 230 + niph = synproxy_build_ip(net, nskb, &iph->saddr, &iph->daddr); 232 231 233 232 skb_reset_transport_header(nskb); 234 233 nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); ··· 244 243 245 244 synproxy_build_options(nth, opts); 246 245 247 - synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, 246 + synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, 248 247 niph, nth, tcp_hdr_size); 249 248 } 250 249 251 250 static bool 252 - synproxy_recv_client_ack(const struct synproxy_net *snet, 251 + synproxy_recv_client_ack(struct net *net, 253 252 const struct sk_buff *skb, const struct tcphdr *th, 254 253 struct synproxy_options *opts, u32 recv_seq) 255 254 { 255 + struct synproxy_net *snet = synproxy_pernet(net); 256 256 int mss; 257 257 258 258 mss = __cookie_v6_check(ipv6_hdr(skb), th, ntohl(th->ack_seq) - 1); ··· 269 267 if (opts->options & XT_SYNPROXY_OPT_TIMESTAMP) 270 268 synproxy_check_timestamp_cookie(opts); 271 269 272 - synproxy_send_server_syn(snet, skb, th, opts, recv_seq); 270 + synproxy_send_server_syn(net, skb, th, opts, recv_seq); 273 271 return true; 274 272 } 275 273 ··· 277 275 synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par) 278 276 { 279 277 const struct xt_synproxy_info *info = par->targinfo; 280 - struct synproxy_net *snet = synproxy_pernet(par->net); 278 + struct net *net = par->net; 279 + struct synproxy_net *snet = synproxy_pernet(net); 281 280 struct synproxy_options opts = {}; 282 281 struct tcphdr *th, _th; 283 282 ··· 307 304 XT_SYNPROXY_OPT_SACK_PERM | 308 305 XT_SYNPROXY_OPT_ECN); 309 306 310 - synproxy_send_client_synack(snet, skb, th, &opts); 307 + synproxy_send_client_synack(net, skb, th, &opts); 311 308 return NF_DROP; 312 309 313 310 } else if (th->ack && !(th->fin || th->rst || th->syn)) { 314 311 /* ACK from client */ 315 - synproxy_recv_client_ack(snet, skb, th, &opts, ntohl(th->seq)); 312 + synproxy_recv_client_ack(net, skb, th, &opts, ntohl(th->seq)); 316 313 return NF_DROP; 317 314 } 318 315 ··· 323 320 struct sk_buff *skb, 324 321 const struct nf_hook_state *nhs) 325 322 { 326 - struct synproxy_net *snet = synproxy_pernet(nhs->net); 323 + struct net *net = nhs->net; 324 + struct synproxy_net *snet = synproxy_pernet(net); 327 325 enum ip_conntrack_info ctinfo; 328 326 struct nf_conn *ct; 329 327 struct nf_conn_synproxy *synproxy; ··· 388 384 * therefore we need to add 1 to make the SYN sequence 389 385 * number match the one of first SYN. 390 386 */ 391 - if (synproxy_recv_client_ack(snet, skb, th, &opts, 387 + if (synproxy_recv_client_ack(net, skb, th, &opts, 392 388 ntohl(th->seq) + 1)) 393 389 this_cpu_inc(snet->stats->cookie_retrans); 394 390 ··· 414 410 XT_SYNPROXY_OPT_SACK_PERM); 415 411 416 412 swap(opts.tsval, opts.tsecr); 417 - synproxy_send_server_ack(snet, state, skb, th, &opts); 413 + synproxy_send_server_ack(net, state, skb, th, &opts); 418 414 419 415 nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - ntohl(th->seq)); 420 416 421 417 swap(opts.tsval, opts.tsecr); 422 - synproxy_send_client_ack(snet, skb, th, &opts); 418 + synproxy_send_client_ack(net, skb, th, &opts); 423 419 424 420 consume_skb(skb); 425 421 return NF_STOLEN;
+1 -1
net/ipv6/netfilter/nf_reject_ipv6.c
··· 158 158 fl6.fl6_dport = otcph->source; 159 159 security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); 160 160 dst = ip6_route_output(net, NULL, &fl6); 161 - if (dst == NULL || dst->error) { 161 + if (dst->error) { 162 162 dst_release(dst); 163 163 return; 164 164 }
+8 -7
net/netfilter/nf_conntrack_core.c
··· 12 12 * published by the Free Software Foundation. 13 13 */ 14 14 15 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 16 + 15 17 #include <linux/types.h> 16 18 #include <linux/netfilter.h> 17 19 #include <linux/module.h> ··· 968 966 969 967 if (!l4proto->new(ct, skb, dataoff, timeouts)) { 970 968 nf_conntrack_free(ct); 971 - pr_debug("init conntrack: can't track with proto module\n"); 969 + pr_debug("can't track with proto module\n"); 972 970 return NULL; 973 971 } 974 972 ··· 990 988 spin_lock(&nf_conntrack_expect_lock); 991 989 exp = nf_ct_find_expectation(net, zone, tuple); 992 990 if (exp) { 993 - pr_debug("conntrack: expectation arrives ct=%p exp=%p\n", 991 + pr_debug("expectation arrives ct=%p exp=%p\n", 994 992 ct, exp); 995 993 /* Welcome, Mr. Bond. We've been expecting you... */ 996 994 __set_bit(IPS_EXPECTED_BIT, &ct->status); ··· 1055 1053 if (!nf_ct_get_tuple(skb, skb_network_offset(skb), 1056 1054 dataoff, l3num, protonum, net, &tuple, l3proto, 1057 1055 l4proto)) { 1058 - pr_debug("resolve_normal_ct: Can't get tuple\n"); 1056 + pr_debug("Can't get tuple\n"); 1059 1057 return NULL; 1060 1058 } 1061 1059 ··· 1081 1079 } else { 1082 1080 /* Once we've had two way comms, always ESTABLISHED. */ 1083 1081 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { 1084 - pr_debug("nf_conntrack_in: normal packet for %p\n", ct); 1082 + pr_debug("normal packet for %p\n", ct); 1085 1083 *ctinfo = IP_CT_ESTABLISHED; 1086 1084 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) { 1087 - pr_debug("nf_conntrack_in: related packet for %p\n", 1088 - ct); 1085 + pr_debug("related packet for %p\n", ct); 1089 1086 *ctinfo = IP_CT_RELATED; 1090 1087 } else { 1091 - pr_debug("nf_conntrack_in: new packet for %p\n", ct); 1088 + pr_debug("new packet for %p\n", ct); 1092 1089 *ctinfo = IP_CT_NEW; 1093 1090 } 1094 1091 *set_reply = 0;
+84
net/netfilter/nf_conntrack_ecache.c
··· 113 113 schedule_delayed_work(&ctnet->ecache_dwork, delay); 114 114 } 115 115 116 + int nf_conntrack_eventmask_report(unsigned int eventmask, struct nf_conn *ct, 117 + u32 portid, int report) 118 + { 119 + int ret = 0; 120 + struct net *net = nf_ct_net(ct); 121 + struct nf_ct_event_notifier *notify; 122 + struct nf_conntrack_ecache *e; 123 + 124 + rcu_read_lock(); 125 + notify = rcu_dereference(net->ct.nf_conntrack_event_cb); 126 + if (!notify) 127 + goto out_unlock; 128 + 129 + e = nf_ct_ecache_find(ct); 130 + if (!e) 131 + goto out_unlock; 132 + 133 + if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct)) { 134 + struct nf_ct_event item = { 135 + .ct = ct, 136 + .portid = e->portid ? e->portid : portid, 137 + .report = report 138 + }; 139 + /* This is a resent of a destroy event? If so, skip missed */ 140 + unsigned long missed = e->portid ? 0 : e->missed; 141 + 142 + if (!((eventmask | missed) & e->ctmask)) 143 + goto out_unlock; 144 + 145 + ret = notify->fcn(eventmask | missed, &item); 146 + if (unlikely(ret < 0 || missed)) { 147 + spin_lock_bh(&ct->lock); 148 + if (ret < 0) { 149 + /* This is a destroy event that has been 150 + * triggered by a process, we store the PORTID 151 + * to include it in the retransmission. 152 + */ 153 + if (eventmask & (1 << IPCT_DESTROY) && 154 + e->portid == 0 && portid != 0) 155 + e->portid = portid; 156 + else 157 + e->missed |= eventmask; 158 + } else { 159 + e->missed &= ~missed; 160 + } 161 + spin_unlock_bh(&ct->lock); 162 + } 163 + } 164 + out_unlock: 165 + rcu_read_unlock(); 166 + return ret; 167 + } 168 + EXPORT_SYMBOL_GPL(nf_conntrack_eventmask_report); 169 + 116 170 /* deliver cached events and clear cache entry - must be called with locally 117 171 * disabled softirqs */ 118 172 void nf_ct_deliver_cached_events(struct nf_conn *ct) ··· 220 166 rcu_read_unlock(); 221 167 } 222 168 EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events); 169 + 170 + void nf_ct_expect_event_report(enum ip_conntrack_expect_events event, 171 + struct nf_conntrack_expect *exp, 172 + u32 portid, int report) 173 + 174 + { 175 + struct net *net = nf_ct_exp_net(exp); 176 + struct nf_exp_event_notifier *notify; 177 + struct nf_conntrack_ecache *e; 178 + 179 + rcu_read_lock(); 180 + notify = rcu_dereference(net->ct.nf_expect_event_cb); 181 + if (!notify) 182 + goto out_unlock; 183 + 184 + e = nf_ct_ecache_find(exp->master); 185 + if (!e) 186 + goto out_unlock; 187 + 188 + if (e->expmask & (1 << event)) { 189 + struct nf_exp_event item = { 190 + .exp = exp, 191 + .portid = portid, 192 + .report = report 193 + }; 194 + notify->fcn(1 << event, &item); 195 + } 196 + out_unlock: 197 + rcu_read_unlock(); 198 + } 223 199 224 200 int nf_conntrack_register_notifier(struct net *net, 225 201 struct nf_ct_event_notifier *new)