Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[NETFILTER]: nf_conntrack: split out the event cache

This patch splits out the event cache into its own file
nf_conntrack_ecache.c

Signed-off-by: Martin Josefsson <gandalf@wlug.westbo.se>
Signed-off-by: Patrick McHardy <kaber@trash.net>

authored by

Martin Josefsson and committed by
David S. Miller
f6180121 7e5d03bb

+199 -150
-81
include/net/netfilter/nf_conntrack.h
··· 244 244 245 245 #define NF_CT_STAT_INC(count) (__get_cpu_var(nf_conntrack_stat).count++) 246 246 247 - #ifdef CONFIG_NF_CONNTRACK_EVENTS 248 - #include <linux/notifier.h> 249 - #include <linux/interrupt.h> 250 - #include <net/netfilter/nf_conntrack_expect.h> 251 - 252 - struct nf_conntrack_ecache { 253 - struct nf_conn *ct; 254 - unsigned int events; 255 - }; 256 - DECLARE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache); 257 - 258 - #define CONNTRACK_ECACHE(x) (__get_cpu_var(nf_conntrack_ecache).x) 259 - 260 - extern struct atomic_notifier_head nf_conntrack_chain; 261 - extern struct atomic_notifier_head nf_conntrack_expect_chain; 262 - 263 - static inline int nf_conntrack_register_notifier(struct notifier_block *nb) 264 - { 265 - return atomic_notifier_chain_register(&nf_conntrack_chain, nb); 266 - } 267 - 268 - static inline int nf_conntrack_unregister_notifier(struct notifier_block *nb) 269 - { 270 - return atomic_notifier_chain_unregister(&nf_conntrack_chain, nb); 271 - } 272 - 273 - static inline int 274 - nf_conntrack_expect_register_notifier(struct notifier_block *nb) 275 - { 276 - return atomic_notifier_chain_register(&nf_conntrack_expect_chain, nb); 277 - } 278 - 279 - static inline int 280 - nf_conntrack_expect_unregister_notifier(struct notifier_block *nb) 281 - { 282 - return atomic_notifier_chain_unregister(&nf_conntrack_expect_chain, 283 - nb); 284 - } 285 - 286 - extern void nf_ct_deliver_cached_events(const struct nf_conn *ct); 287 - extern void __nf_ct_event_cache_init(struct nf_conn *ct); 288 - 289 - static inline void 290 - nf_conntrack_event_cache(enum ip_conntrack_events event, 291 - const struct sk_buff *skb) 292 - { 293 - struct nf_conn *ct = (struct nf_conn *)skb->nfct; 294 - struct nf_conntrack_ecache *ecache; 295 - 296 - local_bh_disable(); 297 - ecache = &__get_cpu_var(nf_conntrack_ecache); 298 - if (ct != ecache->ct) 299 - __nf_ct_event_cache_init(ct); 300 - ecache->events |= event; 301 - local_bh_enable(); 302 - } 303 - 304 - static inline void nf_conntrack_event(enum ip_conntrack_events event, 305 - struct nf_conn *ct) 306 - { 307 - if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct)) 308 - atomic_notifier_call_chain(&nf_conntrack_chain, event, ct); 309 - } 310 - 311 - static inline void 312 - nf_conntrack_expect_event(enum ip_conntrack_expect_events event, 313 - struct nf_conntrack_expect *exp) 314 - { 315 - atomic_notifier_call_chain(&nf_conntrack_expect_chain, event, exp); 316 - } 317 - #else /* CONFIG_NF_CONNTRACK_EVENTS */ 318 - static inline void nf_conntrack_event_cache(enum ip_conntrack_events event, 319 - const struct sk_buff *skb) {} 320 - static inline void nf_conntrack_event(enum ip_conntrack_events event, 321 - struct nf_conn *ct) {} 322 - static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct) {} 323 - static inline void 324 - nf_conntrack_expect_event(enum ip_conntrack_expect_events event, 325 - struct nf_conntrack_expect *exp) {} 326 - #endif /* CONFIG_NF_CONNTRACK_EVENTS */ 327 - 328 247 /* no helper, no nat */ 329 248 #define NF_CT_F_BASIC 0 330 249 /* for helper */
+1
include/net/netfilter/nf_conntrack_core.h
··· 15 15 #include <linux/netfilter.h> 16 16 #include <net/netfilter/nf_conntrack_l3proto.h> 17 17 #include <net/netfilter/nf_conntrack_protocol.h> 18 + #include <net/netfilter/nf_conntrack_ecache.h> 18 19 19 20 /* This header is used to share core functionality between the 20 21 standalone connection tracking module, and the compatibility layer's use
+95
include/net/netfilter/nf_conntrack_ecache.h
··· 1 + /* 2 + * connection tracking event cache. 3 + */ 4 + 5 + #ifndef _NF_CONNTRACK_ECACHE_H 6 + #define _NF_CONNTRACK_ECACHE_H 7 + #include <net/netfilter/nf_conntrack.h> 8 + 9 + #include <linux/notifier.h> 10 + #include <linux/interrupt.h> 11 + #include <net/netfilter/nf_conntrack_expect.h> 12 + 13 + #ifdef CONFIG_NF_CONNTRACK_EVENTS 14 + struct nf_conntrack_ecache { 15 + struct nf_conn *ct; 16 + unsigned int events; 17 + }; 18 + DECLARE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache); 19 + 20 + #define CONNTRACK_ECACHE(x) (__get_cpu_var(nf_conntrack_ecache).x) 21 + 22 + extern struct atomic_notifier_head nf_conntrack_chain; 23 + extern struct atomic_notifier_head nf_conntrack_expect_chain; 24 + 25 + static inline int nf_conntrack_register_notifier(struct notifier_block *nb) 26 + { 27 + return atomic_notifier_chain_register(&nf_conntrack_chain, nb); 28 + } 29 + 30 + static inline int nf_conntrack_unregister_notifier(struct notifier_block *nb) 31 + { 32 + return atomic_notifier_chain_unregister(&nf_conntrack_chain, nb); 33 + } 34 + 35 + static inline int 36 + nf_conntrack_expect_register_notifier(struct notifier_block *nb) 37 + { 38 + return atomic_notifier_chain_register(&nf_conntrack_expect_chain, nb); 39 + } 40 + 41 + static inline int 42 + nf_conntrack_expect_unregister_notifier(struct notifier_block *nb) 43 + { 44 + return atomic_notifier_chain_unregister(&nf_conntrack_expect_chain, 45 + nb); 46 + } 47 + 48 + extern void nf_ct_deliver_cached_events(const struct nf_conn *ct); 49 + extern void __nf_ct_event_cache_init(struct nf_conn *ct); 50 + extern void nf_ct_event_cache_flush(void); 51 + 52 + static inline void 53 + nf_conntrack_event_cache(enum ip_conntrack_events event, 54 + const struct sk_buff *skb) 55 + { 56 + struct nf_conn *ct = (struct nf_conn *)skb->nfct; 57 + struct nf_conntrack_ecache *ecache; 58 + 59 + local_bh_disable(); 60 + ecache = &__get_cpu_var(nf_conntrack_ecache); 61 + if (ct != ecache->ct) 62 + __nf_ct_event_cache_init(ct); 63 + ecache->events |= event; 64 + local_bh_enable(); 65 + } 66 + 67 + static inline void nf_conntrack_event(enum ip_conntrack_events event, 68 + struct nf_conn *ct) 69 + { 70 + if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct)) 71 + atomic_notifier_call_chain(&nf_conntrack_chain, event, ct); 72 + } 73 + 74 + static inline void 75 + nf_conntrack_expect_event(enum ip_conntrack_expect_events event, 76 + struct nf_conntrack_expect *exp) 77 + { 78 + atomic_notifier_call_chain(&nf_conntrack_expect_chain, event, exp); 79 + } 80 + 81 + #else /* CONFIG_NF_CONNTRACK_EVENTS */ 82 + 83 + static inline void nf_conntrack_event_cache(enum ip_conntrack_events event, 84 + const struct sk_buff *skb) {} 85 + static inline void nf_conntrack_event(enum ip_conntrack_events event, 86 + struct nf_conn *ct) {} 87 + static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct) {} 88 + static inline void 89 + nf_conntrack_expect_event(enum ip_conntrack_expect_events event, 90 + struct nf_conntrack_expect *exp) {} 91 + static inline void nf_ct_event_cache_flush(void) {} 92 + #endif /* CONFIG_NF_CONNTRACK_EVENTS */ 93 + 94 + #endif /*_NF_CONNTRACK_ECACHE_H*/ 95 +
+3 -1
net/netfilter/Makefile
··· 1 1 netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o 2 - nf_conntrack-objs := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o 2 + 3 + nf_conntrack-y := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o 4 + nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o 3 5 4 6 obj-$(CONFIG_NETFILTER) = netfilter.o 5 7
-67
net/netfilter/nf_conntrack_core.c
··· 85 85 86 86 static unsigned int nf_conntrack_next_id; 87 87 88 - #ifdef CONFIG_NF_CONNTRACK_EVENTS 89 - ATOMIC_NOTIFIER_HEAD(nf_conntrack_chain); 90 - ATOMIC_NOTIFIER_HEAD(nf_conntrack_expect_chain); 91 - 92 - DEFINE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache); 93 - 94 - /* deliver cached events and clear cache entry - must be called with locally 95 - * disabled softirqs */ 96 - static inline void 97 - __nf_ct_deliver_cached_events(struct nf_conntrack_ecache *ecache) 98 - { 99 - DEBUGP("ecache: delivering events for %p\n", ecache->ct); 100 - if (nf_ct_is_confirmed(ecache->ct) && !nf_ct_is_dying(ecache->ct) 101 - && ecache->events) 102 - atomic_notifier_call_chain(&nf_conntrack_chain, ecache->events, 103 - ecache->ct); 104 - 105 - ecache->events = 0; 106 - nf_ct_put(ecache->ct); 107 - ecache->ct = NULL; 108 - } 109 - 110 - /* Deliver all cached events for a particular conntrack. This is called 111 - * by code prior to async packet handling for freeing the skb */ 112 - void nf_ct_deliver_cached_events(const struct nf_conn *ct) 113 - { 114 - struct nf_conntrack_ecache *ecache; 115 - 116 - local_bh_disable(); 117 - ecache = &__get_cpu_var(nf_conntrack_ecache); 118 - if (ecache->ct == ct) 119 - __nf_ct_deliver_cached_events(ecache); 120 - local_bh_enable(); 121 - } 122 - 123 - /* Deliver cached events for old pending events, if current conntrack != old */ 124 - void __nf_ct_event_cache_init(struct nf_conn *ct) 125 - { 126 - struct nf_conntrack_ecache *ecache; 127 - 128 - /* take care of delivering potentially old events */ 129 - ecache = &__get_cpu_var(nf_conntrack_ecache); 130 - BUG_ON(ecache->ct == ct); 131 - if (ecache->ct) 132 - __nf_ct_deliver_cached_events(ecache); 133 - /* initialize for this conntrack/packet */ 134 - ecache->ct = ct; 135 - nf_conntrack_get(&ct->ct_general); 136 - } 137 - 138 - /* flush the event cache - touches other CPU's data and must not be called 139 - * while packets are still passing through the code */ 140 - static void nf_ct_event_cache_flush(void) 141 - { 142 - struct nf_conntrack_ecache *ecache; 143 - int cpu; 144 - 145 - for_each_possible_cpu(cpu) { 146 - ecache = &per_cpu(nf_conntrack_ecache, cpu); 147 - if (ecache->ct) 148 - nf_ct_put(ecache->ct); 149 - } 150 - } 151 - #else 152 - static inline void nf_ct_event_cache_flush(void) {} 153 - #endif /* CONFIG_NF_CONNTRACK_EVENTS */ 154 - 155 88 DEFINE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat); 156 89 EXPORT_PER_CPU_SYMBOL(nf_conntrack_stat); 157 90
+91
net/netfilter/nf_conntrack_ecache.c
··· 1 + /* Event cache for netfilter. */ 2 + 3 + /* (C) 1999-2001 Paul `Rusty' Russell 4 + * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> 5 + * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org> 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + */ 11 + 12 + #include <linux/types.h> 13 + #include <linux/netfilter.h> 14 + #include <linux/skbuff.h> 15 + #include <linux/vmalloc.h> 16 + #include <linux/stddef.h> 17 + #include <linux/err.h> 18 + #include <linux/percpu.h> 19 + #include <linux/notifier.h> 20 + #include <linux/kernel.h> 21 + #include <linux/netdevice.h> 22 + 23 + #include <net/netfilter/nf_conntrack.h> 24 + #include <net/netfilter/nf_conntrack_l3proto.h> 25 + #include <net/netfilter/nf_conntrack_protocol.h> 26 + #include <net/netfilter/nf_conntrack_expect.h> 27 + #include <net/netfilter/nf_conntrack_helper.h> 28 + #include <net/netfilter/nf_conntrack_core.h> 29 + 30 + ATOMIC_NOTIFIER_HEAD(nf_conntrack_chain); 31 + ATOMIC_NOTIFIER_HEAD(nf_conntrack_expect_chain); 32 + 33 + DEFINE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache); 34 + 35 + /* deliver cached events and clear cache entry - must be called with locally 36 + * disabled softirqs */ 37 + static inline void 38 + __nf_ct_deliver_cached_events(struct nf_conntrack_ecache *ecache) 39 + { 40 + if (nf_ct_is_confirmed(ecache->ct) && !nf_ct_is_dying(ecache->ct) 41 + && ecache->events) 42 + atomic_notifier_call_chain(&nf_conntrack_chain, ecache->events, 43 + ecache->ct); 44 + 45 + ecache->events = 0; 46 + nf_ct_put(ecache->ct); 47 + ecache->ct = NULL; 48 + } 49 + 50 + /* Deliver all cached events for a particular conntrack. This is called 51 + * by code prior to async packet handling for freeing the skb */ 52 + void nf_ct_deliver_cached_events(const struct nf_conn *ct) 53 + { 54 + struct nf_conntrack_ecache *ecache; 55 + 56 + local_bh_disable(); 57 + ecache = &__get_cpu_var(nf_conntrack_ecache); 58 + if (ecache->ct == ct) 59 + __nf_ct_deliver_cached_events(ecache); 60 + local_bh_enable(); 61 + } 62 + 63 + /* Deliver cached events for old pending events, if current conntrack != old */ 64 + void __nf_ct_event_cache_init(struct nf_conn *ct) 65 + { 66 + struct nf_conntrack_ecache *ecache; 67 + 68 + /* take care of delivering potentially old events */ 69 + ecache = &__get_cpu_var(nf_conntrack_ecache); 70 + BUG_ON(ecache->ct == ct); 71 + if (ecache->ct) 72 + __nf_ct_deliver_cached_events(ecache); 73 + /* initialize for this conntrack/packet */ 74 + ecache->ct = ct; 75 + nf_conntrack_get(&ct->ct_general); 76 + } 77 + 78 + /* flush the event cache - touches other CPU's data and must not be called 79 + * while packets are still passing through the code */ 80 + void nf_ct_event_cache_flush(void) 81 + { 82 + struct nf_conntrack_ecache *ecache; 83 + int cpu; 84 + 85 + for_each_possible_cpu(cpu) { 86 + ecache = &per_cpu(nf_conntrack_ecache, cpu); 87 + if (ecache->ct) 88 + nf_ct_put(ecache->ct); 89 + } 90 + } 91 +
+1
net/netfilter/nf_conntrack_ftp.c
··· 27 27 28 28 #include <net/netfilter/nf_conntrack.h> 29 29 #include <net/netfilter/nf_conntrack_expect.h> 30 + #include <net/netfilter/nf_conntrack_ecache.h> 30 31 #include <net/netfilter/nf_conntrack_helper.h> 31 32 #include <linux/netfilter/nf_conntrack_ftp.h> 32 33
+1
net/netfilter/nf_conntrack_proto_sctp.c
··· 33 33 34 34 #include <net/netfilter/nf_conntrack.h> 35 35 #include <net/netfilter/nf_conntrack_protocol.h> 36 + #include <net/netfilter/nf_conntrack_ecache.h> 36 37 37 38 #if 0 38 39 #define DEBUGP(format, ...) printk(format, ## __VA_ARGS__)
+1
net/netfilter/nf_conntrack_proto_tcp.c
··· 43 43 #include <linux/netfilter_ipv6.h> 44 44 #include <net/netfilter/nf_conntrack.h> 45 45 #include <net/netfilter/nf_conntrack_protocol.h> 46 + #include <net/netfilter/nf_conntrack_ecache.h> 46 47 47 48 #if 0 48 49 #define DEBUGP printk
+2
net/netfilter/nf_conntrack_proto_udp.c
··· 22 22 #include <linux/ipv6.h> 23 23 #include <net/ip6_checksum.h> 24 24 #include <net/checksum.h> 25 + 25 26 #include <linux/netfilter.h> 26 27 #include <linux/netfilter_ipv4.h> 27 28 #include <linux/netfilter_ipv6.h> 28 29 #include <net/netfilter/nf_conntrack_protocol.h> 30 + #include <net/netfilter/nf_conntrack_ecache.h> 29 31 30 32 unsigned int nf_ct_udp_timeout __read_mostly = 30*HZ; 31 33 unsigned int nf_ct_udp_timeout_stream __read_mostly = 180*HZ;
+1 -1
net/netfilter/nf_conntrack_standalone.c
··· 33 33 #define ASSERT_WRITE_LOCK(x) 34 34 35 35 #include <net/netfilter/nf_conntrack.h> 36 + #include <net/netfilter/nf_conntrack_core.h> 36 37 #include <net/netfilter/nf_conntrack_l3proto.h> 37 38 #include <net/netfilter/nf_conntrack_protocol.h> 38 - #include <net/netfilter/nf_conntrack_core.h> 39 39 #include <net/netfilter/nf_conntrack_expect.h> 40 40 #include <net/netfilter/nf_conntrack_helper.h> 41 41
+3
net/netfilter/xt_CONNMARK.c
··· 31 31 #include <linux/netfilter/x_tables.h> 32 32 #include <linux/netfilter/xt_CONNMARK.h> 33 33 #include <net/netfilter/nf_conntrack_compat.h> 34 + #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 35 + #include <net/netfilter/nf_conntrack_ecache.h> 36 + #endif 34 37 35 38 static unsigned int 36 39 target(struct sk_buff **pskb,