Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-next-2.6

+1627 -785
+3
drivers/leds/Kconfig
··· 223 223 This allows LEDs to be initialised in the ON state. 224 224 If unsure, say Y. 225 225 226 + comment "iptables trigger is under Netfilter config (LED target)" 227 + depends on LEDS_TRIGGERS 228 + 226 229 endif # NEW_LEDS
+8
include/linux/netfilter/Kbuild
··· 7 7 header-y += xt_CONNMARK.h 8 8 header-y += xt_CONNSECMARK.h 9 9 header-y += xt_DSCP.h 10 + header-y += xt_LED.h 10 11 header-y += xt_MARK.h 11 12 header-y += xt_NFLOG.h 12 13 header-y += xt_NFQUEUE.h 13 14 header-y += xt_RATEEST.h 14 15 header-y += xt_SECMARK.h 15 16 header-y += xt_TCPMSS.h 17 + header-y += xt_TCPOPTSTRIP.h 18 + header-y += xt_TPROXY.h 16 19 header-y += xt_comment.h 17 20 header-y += xt_connbytes.h 21 + header-y += xt_connlimit.h 18 22 header-y += xt_connmark.h 19 23 header-y += xt_conntrack.h 24 + header-y += xt_cluster.h 20 25 header-y += xt_dccp.h 21 26 header-y += xt_dscp.h 22 27 header-y += xt_esp.h ··· 35 30 header-y += xt_multiport.h 36 31 header-y += xt_owner.h 37 32 header-y += xt_pkttype.h 33 + header-y += xt_quota.h 38 34 header-y += xt_rateest.h 39 35 header-y += xt_realm.h 40 36 header-y += xt_recent.h ··· 45 39 header-y += xt_string.h 46 40 header-y += xt_tcpmss.h 47 41 header-y += xt_tcpudp.h 42 + header-y += xt_time.h 43 + header-y += xt_u32.h 48 44 49 45 unifdef-y += nf_conntrack_common.h 50 46 unifdef-y += nf_conntrack_ftp.h
+1
include/linux/netfilter/nfnetlink.h
··· 76 76 extern int nfnetlink_has_listeners(unsigned int group); 77 77 extern int nfnetlink_send(struct sk_buff *skb, u32 pid, unsigned group, 78 78 int echo); 79 + extern void nfnetlink_set_err(u32 pid, u32 group, int error); 79 80 extern int nfnetlink_unicast(struct sk_buff *skb, u_int32_t pid, int flags); 80 81 81 82 extern void nfnl_lock(void);
+8 -7
include/linux/netfilter/x_tables.h
··· 349 349 { 350 350 struct list_head list; 351 351 352 - /* A unique name... */ 353 - const char name[XT_TABLE_MAXNAMELEN]; 354 - 355 352 /* What hooks you will enter on */ 356 353 unsigned int valid_hooks; 357 354 358 355 /* Lock for the curtain */ 359 - rwlock_t lock; 356 + struct mutex lock; 360 357 361 358 /* Man behind the curtain... */ 362 - //struct ip6t_table_info *private; 363 - void *private; 359 + struct xt_table_info *private; 364 360 365 361 /* Set this to THIS_MODULE if you are a module, otherwise NULL */ 366 362 struct module *me; 367 363 368 364 u_int8_t af; /* address/protocol family */ 365 + 366 + /* A unique name... */ 367 + const char name[XT_TABLE_MAXNAMELEN]; 369 368 }; 370 369 371 370 #include <linux/netfilter_ipv4.h> ··· 385 386 386 387 /* ipt_entry tables: one per CPU */ 387 388 /* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */ 388 - char *entries[1]; 389 + void *entries[1]; 389 390 }; 390 391 391 392 #define XT_TABLE_INFO_SZ (offsetof(struct xt_table_info, entries) \ ··· 432 433 433 434 extern struct xt_table_info *xt_alloc_table_info(unsigned int size); 434 435 extern void xt_free_table_info(struct xt_table_info *info); 436 + extern void xt_table_entry_swap_rcu(struct xt_table_info *old, 437 + struct xt_table_info *new); 435 438 436 439 #ifdef CONFIG_COMPAT 437 440 #include <net/compat.h>
+13
include/linux/netfilter/xt_LED.h
··· 1 + #ifndef _XT_LED_H 2 + #define _XT_LED_H 3 + 4 + struct xt_led_info { 5 + char id[27]; /* Unique ID for this trigger in the LED class */ 6 + __u8 always_blink; /* Blink even if the LED is already on */ 7 + __u32 delay; /* Delay until LED is switched off after trigger */ 8 + 9 + /* Kernel data used in the module */ 10 + void *internal_data __attribute__((aligned(8))); 11 + }; 12 + 13 + #endif /* _XT_LED_H */
+15
include/linux/netfilter/xt_cluster.h
··· 1 + #ifndef _XT_CLUSTER_MATCH_H 2 + #define _XT_CLUSTER_MATCH_H 3 + 4 + enum xt_cluster_flags { 5 + XT_CLUSTER_F_INV = (1 << 0) 6 + }; 7 + 8 + struct xt_cluster_match_info { 9 + u_int32_t total_nodes; 10 + u_int32_t node_mask; 11 + u_int32_t hash_seed; 12 + u_int32_t flags; 13 + }; 14 + 15 + #endif /* _XT_CLUSTER_MATCH_H */
+5 -4
include/linux/netfilter/xt_limit.h
··· 4 4 /* timings are in milliseconds. */ 5 5 #define XT_LIMIT_SCALE 10000 6 6 7 + struct xt_limit_priv; 8 + 7 9 /* 1/10,000 sec period => max of 10,000/sec. Min rate is then 429490 8 10 seconds, or one every 59 hours. */ 9 11 struct xt_rateinfo { ··· 13 11 u_int32_t burst; /* Period multiplier for upper limit. */ 14 12 15 13 /* Used internally by the kernel */ 16 - unsigned long prev; 17 - u_int32_t credit; 14 + unsigned long prev; /* moved to xt_limit_priv */ 15 + u_int32_t credit; /* moved to xt_limit_priv */ 18 16 u_int32_t credit_cap, cost; 19 17 20 - /* Ugly, ugly fucker. */ 21 - struct xt_rateinfo *master; 18 + struct xt_limit_priv *master; 22 19 }; 23 20 #endif /*_XT_RATE_H*/
+3 -1
include/linux/netfilter/xt_quota.h
··· 6 6 }; 7 7 #define XT_QUOTA_MASK 0x1 8 8 9 + struct xt_quota_priv; 10 + 9 11 struct xt_quota_info { 10 12 u_int32_t flags; 11 13 u_int32_t pad; 12 14 13 15 /* Used internally by the kernel */ 14 16 aligned_u64 quota; 15 - struct xt_quota_info *master; 17 + struct xt_quota_priv *master; 16 18 }; 17 19 18 20 #endif /* _XT_QUOTA_H */
+4 -3
include/linux/netfilter/xt_statistic.h
··· 13 13 }; 14 14 #define XT_STATISTIC_MASK 0x1 15 15 16 + struct xt_statistic_priv; 17 + 16 18 struct xt_statistic_info { 17 19 u_int16_t mode; 18 20 u_int16_t flags; ··· 25 23 struct { 26 24 u_int32_t every; 27 25 u_int32_t packet; 28 - /* Used internally by the kernel */ 29 - u_int32_t count; 26 + u_int32_t count; /* unused */ 30 27 } nth; 31 28 } u; 32 - struct xt_statistic_info *master __attribute__((aligned(8))); 29 + struct xt_statistic_priv *master __attribute__((aligned(8))); 33 30 }; 34 31 35 32 #endif /* _XT_STATISTIC_H */
+1
include/linux/netfilter_ipv6/Kbuild
··· 11 11 header-y += ip6t_limit.h 12 12 header-y += ip6t_mac.h 13 13 header-y += ip6t_mark.h 14 + header-y += ip6t_mh.h 14 15 header-y += ip6t_multiport.h 15 16 header-y += ip6t_opts.h 16 17 header-y += ip6t_owner.h
+1 -1
include/net/netfilter/nf_conntrack.h
··· 287 287 288 288 extern int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp); 289 289 extern unsigned int nf_conntrack_htable_size; 290 - extern int nf_conntrack_max; 290 + extern unsigned int nf_conntrack_max; 291 291 292 292 #define NF_CT_STAT_INC(net, count) \ 293 293 (per_cpu_ptr((net)->ct.stat, raw_smp_processor_id())->count++)
+1 -9
include/net/netfilter/nf_conntrack_l4proto.h
··· 90 90 struct module *me; 91 91 }; 92 92 93 - /* Existing built-in protocols */ 94 - extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6; 95 - extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4; 96 - extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6; 93 + /* Existing built-in generic protocol */ 97 94 extern struct nf_conntrack_l4proto nf_conntrack_l4proto_generic; 98 95 99 96 #define MAX_NF_CT_PROTO 256 100 97 101 98 extern struct nf_conntrack_l4proto * 102 99 __nf_ct_l4proto_find(u_int16_t l3proto, u_int8_t l4proto); 103 - 104 - extern struct nf_conntrack_l4proto * 105 - nf_ct_l4proto_find_get(u_int16_t l3proto, u_int8_t protocol); 106 - 107 - extern void nf_ct_l4proto_put(struct nf_conntrack_l4proto *p); 108 100 109 101 /* Protocol registration. */ 110 102 extern int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *proto);
+8 -3
include/net/netfilter/nf_log.h
··· 1 1 #ifndef _NF_LOG_H 2 2 #define _NF_LOG_H 3 3 4 + #include <linux/netfilter.h> 5 + 4 6 /* those NF_LOG_* defines and struct nf_loginfo are legacy definitios that will 5 7 * disappear once iptables is replaced with pkttables. Please DO NOT use them 6 8 * for any new code! */ ··· 42 40 struct module *me; 43 41 nf_logfn *logfn; 44 42 char *name; 43 + struct list_head list[NFPROTO_NUMPROTO]; 45 44 }; 46 45 47 46 /* Function to register/unregister log function. */ 48 - int nf_log_register(u_int8_t pf, const struct nf_logger *logger); 49 - void nf_log_unregister(const struct nf_logger *logger); 50 - void nf_log_unregister_pf(u_int8_t pf); 47 + int nf_log_register(u_int8_t pf, struct nf_logger *logger); 48 + void nf_log_unregister(struct nf_logger *logger); 49 + 50 + int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger); 51 + void nf_log_unbind_pf(u_int8_t pf); 51 52 52 53 /* Calls the registered backend logging function */ 53 54 void nf_log_packet(u_int8_t pf,
-1
net/bridge/netfilter/ebtable_broute.c
··· 46 46 .name = "broute", 47 47 .table = &initial_table, 48 48 .valid_hooks = 1 << NF_BR_BROUTING, 49 - .lock = __RW_LOCK_UNLOCKED(broute_table.lock), 50 49 .check = check, 51 50 .me = THIS_MODULE, 52 51 };
-1
net/bridge/netfilter/ebtable_filter.c
··· 55 55 .name = "filter", 56 56 .table = &initial_table, 57 57 .valid_hooks = FILTER_VALID_HOOKS, 58 - .lock = __RW_LOCK_UNLOCKED(frame_filter.lock), 59 58 .check = check, 60 59 .me = THIS_MODULE, 61 60 };
-1
net/bridge/netfilter/ebtable_nat.c
··· 55 55 .name = "nat", 56 56 .table = &initial_table, 57 57 .valid_hooks = NAT_VALID_HOOKS, 58 - .lock = __RW_LOCK_UNLOCKED(frame_nat.lock), 59 58 .check = check, 60 59 .me = THIS_MODULE, 61 60 };
+12 -18
net/ipv4/netfilter/Kconfig
··· 31 31 default y 32 32 help 33 33 This option enables /proc and sysctl compatibility with the old 34 - layer 3 dependant connection tracking. This is needed to keep 34 + layer 3 dependent connection tracking. This is needed to keep 35 35 old programs that have not been adapted to the new names working. 36 36 37 37 If unsure, say Y. ··· 95 95 config IP_NF_MATCH_TTL 96 96 tristate '"ttl" match support' 97 97 depends on NETFILTER_ADVANCED 98 - help 99 - This adds CONFIG_IP_NF_MATCH_TTL option, which enabled the user 100 - to match packets by their TTL value. 101 - 102 - To compile it as a module, choose M here. If unsure, say N. 98 + select NETFILTER_XT_MATCH_HL 99 + ---help--- 100 + This is a backwards-compat option for the user's convenience 101 + (e.g. when running oldconfig). It selects 102 + CONFIG_NETFILTER_XT_MATCH_HL. 103 103 104 104 # `filter', generic and specific targets 105 105 config IP_NF_FILTER ··· 323 323 To compile it as a module, choose M here. If unsure, say N. 324 324 325 325 config IP_NF_TARGET_TTL 326 - tristate 'TTL target support' 327 - depends on IP_NF_MANGLE 326 + tristate '"TTL" target support' 328 327 depends on NETFILTER_ADVANCED 329 - help 330 - This option adds a `TTL' target, which enables the user to modify 331 - the TTL value of the IP header. 332 - 333 - While it is safe to decrement/lower the TTL, this target also enables 334 - functionality to increment and set the TTL value of the IP header to 335 - arbitrary values. This is EXTREMELY DANGEROUS since you can easily 336 - create immortal packets that loop forever on the network. 337 - 338 - To compile it as a module, choose M here. If unsure, say N. 328 + select NETFILTER_XT_TARGET_HL 329 + ---help--- 330 + This is a backwards-compat option for the user's convenience 331 + (e.g. when running oldconfig). It selects 332 + CONFIG_NETFILTER_XT_TARGET_HL. 339 333 340 334 # raw + specific targets 341 335 config IP_NF_RAW
-2
net/ipv4/netfilter/Makefile
··· 51 51 obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o 52 52 obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o 53 53 obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o 54 - obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o 55 54 56 55 # targets 57 56 obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o ··· 60 61 obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o 61 62 obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o 62 63 obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o 63 - obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o 64 64 obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o 65 65 66 66 # generic ARP tables
+122 -37
net/ipv4/netfilter/arp_tables.c
··· 73 73 return (ret != 0); 74 74 } 75 75 76 + /* 77 + * Unfortunatly, _b and _mask are not aligned to an int (or long int) 78 + * Some arches dont care, unrolling the loop is a win on them. 79 + */ 80 + static unsigned long ifname_compare(const char *_a, const char *_b, const char *_mask) 81 + { 82 + #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 83 + const unsigned long *a = (const unsigned long *)_a; 84 + const unsigned long *b = (const unsigned long *)_b; 85 + const unsigned long *mask = (const unsigned long *)_mask; 86 + unsigned long ret; 87 + 88 + ret = (a[0] ^ b[0]) & mask[0]; 89 + if (IFNAMSIZ > sizeof(unsigned long)) 90 + ret |= (a[1] ^ b[1]) & mask[1]; 91 + if (IFNAMSIZ > 2 * sizeof(unsigned long)) 92 + ret |= (a[2] ^ b[2]) & mask[2]; 93 + if (IFNAMSIZ > 3 * sizeof(unsigned long)) 94 + ret |= (a[3] ^ b[3]) & mask[3]; 95 + BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long)); 96 + #else 97 + unsigned long ret = 0; 98 + int i; 99 + 100 + for (i = 0; i < IFNAMSIZ; i++) 101 + ret |= (_a[i] ^ _b[i]) & _mask[i]; 102 + #endif 103 + return ret; 104 + } 105 + 76 106 /* Returns whether packet matches rule or not. */ 77 107 static inline int arp_packet_match(const struct arphdr *arphdr, 78 108 struct net_device *dev, ··· 113 83 const char *arpptr = (char *)(arphdr + 1); 114 84 const char *src_devaddr, *tgt_devaddr; 115 85 __be32 src_ipaddr, tgt_ipaddr; 116 - int i, ret; 86 + long ret; 117 87 118 88 #define FWINV(bool, invflg) ((bool) ^ !!(arpinfo->invflags & (invflg))) 119 89 ··· 186 156 } 187 157 188 158 /* Look for ifname matches. */ 189 - for (i = 0, ret = 0; i < IFNAMSIZ; i++) { 190 - ret |= (indev[i] ^ arpinfo->iniface[i]) 191 - & arpinfo->iniface_mask[i]; 192 - } 159 + ret = ifname_compare(indev, arpinfo->iniface, arpinfo->iniface_mask); 193 160 194 161 if (FWINV(ret != 0, ARPT_INV_VIA_IN)) { 195 162 dprintf("VIA in mismatch (%s vs %s).%s\n", ··· 195 168 return 0; 196 169 } 197 170 198 - for (i = 0, ret = 0; i < IFNAMSIZ; i++) { 199 - ret |= (outdev[i] ^ arpinfo->outiface[i]) 200 - & arpinfo->outiface_mask[i]; 201 - } 171 + ret = ifname_compare(outdev, arpinfo->outiface, arpinfo->outiface_mask); 202 172 203 173 if (FWINV(ret != 0, ARPT_INV_VIA_OUT)) { 204 174 dprintf("VIA out mismatch (%s vs %s).%s\n", ··· 245 221 const struct net_device *out, 246 222 struct xt_table *table) 247 223 { 248 - static const char nulldevname[IFNAMSIZ]; 224 + static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); 249 225 unsigned int verdict = NF_DROP; 250 226 const struct arphdr *arp; 251 227 bool hotdrop = false; ··· 261 237 indev = in ? in->name : nulldevname; 262 238 outdev = out ? out->name : nulldevname; 263 239 264 - read_lock_bh(&table->lock); 265 - private = table->private; 266 - table_base = (void *)private->entries[smp_processor_id()]; 240 + rcu_read_lock(); 241 + private = rcu_dereference(table->private); 242 + table_base = rcu_dereference(private->entries[smp_processor_id()]); 243 + 267 244 e = get_entry(table_base, private->hook_entry[hook]); 268 245 back = get_entry(table_base, private->underflow[hook]); 269 246 ··· 336 311 e = (void *)e + e->next_offset; 337 312 } 338 313 } while (!hotdrop); 339 - read_unlock_bh(&table->lock); 314 + 315 + rcu_read_unlock(); 340 316 341 317 if (hotdrop) 342 318 return NF_DROP; ··· 740 714 } 741 715 } 742 716 743 - static inline struct xt_counters *alloc_counters(struct xt_table *table) 717 + 718 + /* We're lazy, and add to the first CPU; overflow works its fey magic 719 + * and everything is OK. */ 720 + static int 721 + add_counter_to_entry(struct arpt_entry *e, 722 + const struct xt_counters addme[], 723 + unsigned int *i) 724 + { 725 + ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt); 726 + 727 + (*i)++; 728 + return 0; 729 + } 730 + 731 + /* Take values from counters and add them back onto the current cpu */ 732 + static void put_counters(struct xt_table_info *t, 733 + const struct xt_counters counters[]) 734 + { 735 + unsigned int i, cpu; 736 + 737 + local_bh_disable(); 738 + cpu = smp_processor_id(); 739 + i = 0; 740 + ARPT_ENTRY_ITERATE(t->entries[cpu], 741 + t->size, 742 + add_counter_to_entry, 743 + counters, 744 + &i); 745 + local_bh_enable(); 746 + } 747 + 748 + static inline int 749 + zero_entry_counter(struct arpt_entry *e, void *arg) 750 + { 751 + e->counters.bcnt = 0; 752 + e->counters.pcnt = 0; 753 + return 0; 754 + } 755 + 756 + static void 757 + clone_counters(struct xt_table_info *newinfo, const struct xt_table_info *info) 758 + { 759 + unsigned int cpu; 760 + const void *loc_cpu_entry = info->entries[raw_smp_processor_id()]; 761 + 762 + memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); 763 + for_each_possible_cpu(cpu) { 764 + memcpy(newinfo->entries[cpu], loc_cpu_entry, info->size); 765 + ARPT_ENTRY_ITERATE(newinfo->entries[cpu], newinfo->size, 766 + zero_entry_counter, NULL); 767 + } 768 + } 769 + 770 + static struct xt_counters *alloc_counters(struct xt_table *table) 744 771 { 745 772 unsigned int countersize; 746 773 struct xt_counters *counters; 747 - const struct xt_table_info *private = table->private; 774 + struct xt_table_info *private = table->private; 775 + struct xt_table_info *info; 748 776 749 777 /* We need atomic snapshot of counters: rest doesn't change 750 778 * (other than comefrom, which userspace doesn't care ··· 808 728 counters = vmalloc_node(countersize, numa_node_id()); 809 729 810 730 if (counters == NULL) 811 - return ERR_PTR(-ENOMEM); 731 + goto nomem; 812 732 813 - /* First, sum counters... */ 814 - write_lock_bh(&table->lock); 815 - get_counters(private, counters); 816 - write_unlock_bh(&table->lock); 733 + info = xt_alloc_table_info(private->size); 734 + if (!info) 735 + goto free_counters; 736 + 737 + clone_counters(info, private); 738 + 739 + mutex_lock(&table->lock); 740 + xt_table_entry_swap_rcu(private, info); 741 + synchronize_net(); /* Wait until smoke has cleared */ 742 + 743 + get_counters(info, counters); 744 + put_counters(private, counters); 745 + mutex_unlock(&table->lock); 746 + 747 + xt_free_table_info(info); 817 748 818 749 return counters; 750 + 751 + free_counters: 752 + vfree(counters); 753 + nomem: 754 + return ERR_PTR(-ENOMEM); 819 755 } 820 756 821 757 static int copy_entries_to_user(unsigned int total_size, ··· 1171 1075 return ret; 1172 1076 } 1173 1077 1174 - /* We're lazy, and add to the first CPU; overflow works its fey magic 1175 - * and everything is OK. 1176 - */ 1177 - static inline int add_counter_to_entry(struct arpt_entry *e, 1178 - const struct xt_counters addme[], 1179 - unsigned int *i) 1180 - { 1181 - 1182 - ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt); 1183 - 1184 - (*i)++; 1185 - return 0; 1186 - } 1187 - 1188 1078 static int do_add_counters(struct net *net, void __user *user, unsigned int len, 1189 1079 int compat) 1190 1080 { ··· 1230 1148 goto free; 1231 1149 } 1232 1150 1233 - write_lock_bh(&t->lock); 1151 + mutex_lock(&t->lock); 1234 1152 private = t->private; 1235 1153 if (private->number != num_counters) { 1236 1154 ret = -EINVAL; 1237 1155 goto unlock_up_free; 1238 1156 } 1239 1157 1158 + preempt_disable(); 1240 1159 i = 0; 1241 1160 /* Choose the copy that is on our node */ 1242 1161 loc_cpu_entry = private->entries[smp_processor_id()]; ··· 1246 1163 add_counter_to_entry, 1247 1164 paddc, 1248 1165 &i); 1166 + preempt_enable(); 1249 1167 unlock_up_free: 1250 - write_unlock_bh(&t->lock); 1168 + mutex_unlock(&t->lock); 1169 + 1251 1170 xt_table_unlock(t); 1252 1171 module_put(t->me); 1253 1172 free:
-2
net/ipv4/netfilter/arptable_filter.c
··· 48 48 static struct xt_table packet_filter = { 49 49 .name = "filter", 50 50 .valid_hooks = FILTER_VALID_HOOKS, 51 - .lock = __RW_LOCK_UNLOCKED(packet_filter.lock), 52 - .private = NULL, 53 51 .me = THIS_MODULE, 54 52 .af = NFPROTO_ARP, 55 53 };
+2
net/ipv4/netfilter/ip_queue.c
··· 24 24 #include <linux/proc_fs.h> 25 25 #include <linux/seq_file.h> 26 26 #include <linux/security.h> 27 + #include <linux/net.h> 27 28 #include <linux/mutex.h> 28 29 #include <net/net_namespace.h> 29 30 #include <net/sock.h> ··· 641 640 MODULE_DESCRIPTION("IPv4 packet queue handler"); 642 641 MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>"); 643 642 MODULE_LICENSE("GPL"); 643 + MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_FIREWALL); 644 644 645 645 module_init(ip_queue_init); 646 646 module_exit(ip_queue_fini);
+108 -45
net/ipv4/netfilter/ip_tables.c
··· 74 74 75 75 Hence the start of any table is given by get_table() below. */ 76 76 77 + static unsigned long ifname_compare(const char *_a, const char *_b, 78 + const unsigned char *_mask) 79 + { 80 + const unsigned long *a = (const unsigned long *)_a; 81 + const unsigned long *b = (const unsigned long *)_b; 82 + const unsigned long *mask = (const unsigned long *)_mask; 83 + unsigned long ret; 84 + 85 + ret = (a[0] ^ b[0]) & mask[0]; 86 + if (IFNAMSIZ > sizeof(unsigned long)) 87 + ret |= (a[1] ^ b[1]) & mask[1]; 88 + if (IFNAMSIZ > 2 * sizeof(unsigned long)) 89 + ret |= (a[2] ^ b[2]) & mask[2]; 90 + if (IFNAMSIZ > 3 * sizeof(unsigned long)) 91 + ret |= (a[3] ^ b[3]) & mask[3]; 92 + BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long)); 93 + return ret; 94 + } 95 + 77 96 /* Returns whether matches rule or not. */ 78 97 /* Performance critical - called for every packet */ 79 98 static inline bool ··· 102 83 const struct ipt_ip *ipinfo, 103 84 int isfrag) 104 85 { 105 - size_t i; 106 86 unsigned long ret; 107 87 108 88 #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg))) ··· 121 103 return false; 122 104 } 123 105 124 - /* Look for ifname matches; this should unroll nicely. */ 125 - for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) { 126 - ret |= (((const unsigned long *)indev)[i] 127 - ^ ((const unsigned long *)ipinfo->iniface)[i]) 128 - & ((const unsigned long *)ipinfo->iniface_mask)[i]; 129 - } 106 + ret = ifname_compare(indev, ipinfo->iniface, ipinfo->iniface_mask); 130 107 131 108 if (FWINV(ret != 0, IPT_INV_VIA_IN)) { 132 109 dprintf("VIA in mismatch (%s vs %s).%s\n", ··· 130 117 return false; 131 118 } 132 119 133 - for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) { 134 - ret |= (((const unsigned long *)outdev)[i] 135 - ^ ((const unsigned long *)ipinfo->outiface)[i]) 136 - & ((const unsigned long *)ipinfo->outiface_mask)[i]; 137 - } 120 + ret = ifname_compare(outdev, ipinfo->outiface, ipinfo->outiface_mask); 138 121 139 122 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) { 140 123 dprintf("VIA out mismatch (%s vs %s).%s\n", ··· 356 347 mtpar.family = tgpar.family = NFPROTO_IPV4; 357 348 tgpar.hooknum = hook; 358 349 359 - read_lock_bh(&table->lock); 360 350 IP_NF_ASSERT(table->valid_hooks & (1 << hook)); 361 - private = table->private; 362 - table_base = (void *)private->entries[smp_processor_id()]; 351 + 352 + rcu_read_lock(); 353 + private = rcu_dereference(table->private); 354 + table_base = rcu_dereference(private->entries[smp_processor_id()]); 355 + 363 356 e = get_entry(table_base, private->hook_entry[hook]); 364 357 365 358 /* For return from builtin chain */ ··· 456 445 } 457 446 } while (!hotdrop); 458 447 459 - read_unlock_bh(&table->lock); 448 + rcu_read_unlock(); 460 449 461 450 #ifdef DEBUG_ALLOW_ALL 462 451 return NF_ACCEPT; ··· 935 924 counters, 936 925 &i); 937 926 } 927 + 928 + } 929 + 930 + /* We're lazy, and add to the first CPU; overflow works its fey magic 931 + * and everything is OK. */ 932 + static int 933 + add_counter_to_entry(struct ipt_entry *e, 934 + const struct xt_counters addme[], 935 + unsigned int *i) 936 + { 937 + ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt); 938 + 939 + (*i)++; 940 + return 0; 941 + } 942 + 943 + /* Take values from counters and add them back onto the current cpu */ 944 + static void put_counters(struct xt_table_info *t, 945 + const struct xt_counters counters[]) 946 + { 947 + unsigned int i, cpu; 948 + 949 + local_bh_disable(); 950 + cpu = smp_processor_id(); 951 + i = 0; 952 + IPT_ENTRY_ITERATE(t->entries[cpu], 953 + t->size, 954 + add_counter_to_entry, 955 + counters, 956 + &i); 957 + local_bh_enable(); 958 + } 959 + 960 + 961 + static inline int 962 + zero_entry_counter(struct ipt_entry *e, void *arg) 963 + { 964 + e->counters.bcnt = 0; 965 + e->counters.pcnt = 0; 966 + return 0; 967 + } 968 + 969 + static void 970 + clone_counters(struct xt_table_info *newinfo, const struct xt_table_info *info) 971 + { 972 + unsigned int cpu; 973 + const void *loc_cpu_entry = info->entries[raw_smp_processor_id()]; 974 + 975 + memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); 976 + for_each_possible_cpu(cpu) { 977 + memcpy(newinfo->entries[cpu], loc_cpu_entry, info->size); 978 + IPT_ENTRY_ITERATE(newinfo->entries[cpu], newinfo->size, 979 + zero_entry_counter, NULL); 980 + } 938 981 } 939 982 940 983 static struct xt_counters * alloc_counters(struct xt_table *table) 941 984 { 942 985 unsigned int countersize; 943 986 struct xt_counters *counters; 944 - const struct xt_table_info *private = table->private; 987 + struct xt_table_info *private = table->private; 988 + struct xt_table_info *info; 945 989 946 990 /* We need atomic snapshot of counters: rest doesn't change 947 991 (other than comefrom, which userspace doesn't care ··· 1005 939 counters = vmalloc_node(countersize, numa_node_id()); 1006 940 1007 941 if (counters == NULL) 1008 - return ERR_PTR(-ENOMEM); 942 + goto nomem; 1009 943 1010 - /* First, sum counters... */ 1011 - write_lock_bh(&table->lock); 1012 - get_counters(private, counters); 1013 - write_unlock_bh(&table->lock); 944 + info = xt_alloc_table_info(private->size); 945 + if (!info) 946 + goto free_counters; 947 + 948 + clone_counters(info, private); 949 + 950 + mutex_lock(&table->lock); 951 + xt_table_entry_swap_rcu(private, info); 952 + synchronize_net(); /* Wait until smoke has cleared */ 953 + 954 + get_counters(info, counters); 955 + put_counters(private, counters); 956 + mutex_unlock(&table->lock); 957 + 958 + xt_free_table_info(info); 1014 959 1015 960 return counters; 961 + 962 + free_counters: 963 + vfree(counters); 964 + nomem: 965 + return ERR_PTR(-ENOMEM); 1016 966 } 1017 967 1018 968 static int ··· 1394 1312 return ret; 1395 1313 } 1396 1314 1397 - /* We're lazy, and add to the first CPU; overflow works its fey magic 1398 - * and everything is OK. */ 1399 - static int 1400 - add_counter_to_entry(struct ipt_entry *e, 1401 - const struct xt_counters addme[], 1402 - unsigned int *i) 1403 - { 1404 - #if 0 1405 - duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n", 1406 - *i, 1407 - (long unsigned int)e->counters.pcnt, 1408 - (long unsigned int)e->counters.bcnt, 1409 - (long unsigned int)addme[*i].pcnt, 1410 - (long unsigned int)addme[*i].bcnt); 1411 - #endif 1412 - 1413 - ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt); 1414 - 1415 - (*i)++; 1416 - return 0; 1417 - } 1418 1315 1419 1316 static int 1420 1317 do_add_counters(struct net *net, void __user *user, unsigned int len, int compat) ··· 1454 1393 goto free; 1455 1394 } 1456 1395 1457 - write_lock_bh(&t->lock); 1396 + mutex_lock(&t->lock); 1458 1397 private = t->private; 1459 1398 if (private->number != num_counters) { 1460 1399 ret = -EINVAL; 1461 1400 goto unlock_up_free; 1462 1401 } 1463 1402 1403 + preempt_disable(); 1464 1404 i = 0; 1465 1405 /* Choose the copy that is on our node */ 1466 1406 loc_cpu_entry = private->entries[raw_smp_processor_id()]; ··· 1470 1408 add_counter_to_entry, 1471 1409 paddc, 1472 1410 &i); 1411 + preempt_enable(); 1473 1412 unlock_up_free: 1474 - write_unlock_bh(&t->lock); 1413 + mutex_unlock(&t->lock); 1475 1414 xt_table_unlock(t); 1476 1415 module_put(t->me); 1477 1416 free:
+1 -1
net/ipv4/netfilter/ipt_LOG.c
··· 464 464 .me = THIS_MODULE, 465 465 }; 466 466 467 - static const struct nf_logger ipt_log_logger ={ 467 + static struct nf_logger ipt_log_logger __read_mostly = { 468 468 .name = "ipt_LOG", 469 469 .logfn = &ipt_log_packet, 470 470 .me = THIS_MODULE,
-97
net/ipv4/netfilter/ipt_TTL.c
··· 1 - /* TTL modification target for IP tables 2 - * (C) 2000,2005 by Harald Welte <laforge@netfilter.org> 3 - * 4 - * This program is free software; you can redistribute it and/or modify 5 - * it under the terms of the GNU General Public License version 2 as 6 - * published by the Free Software Foundation. 7 - * 8 - */ 9 - 10 - #include <linux/module.h> 11 - #include <linux/skbuff.h> 12 - #include <linux/ip.h> 13 - #include <net/checksum.h> 14 - 15 - #include <linux/netfilter/x_tables.h> 16 - #include <linux/netfilter_ipv4/ipt_TTL.h> 17 - 18 - MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); 19 - MODULE_DESCRIPTION("Xtables: IPv4 TTL field modification target"); 20 - MODULE_LICENSE("GPL"); 21 - 22 - static unsigned int 23 - ttl_tg(struct sk_buff *skb, const struct xt_target_param *par) 24 - { 25 - struct iphdr *iph; 26 - const struct ipt_TTL_info *info = par->targinfo; 27 - int new_ttl; 28 - 29 - if (!skb_make_writable(skb, skb->len)) 30 - return NF_DROP; 31 - 32 - iph = ip_hdr(skb); 33 - 34 - switch (info->mode) { 35 - case IPT_TTL_SET: 36 - new_ttl = info->ttl; 37 - break; 38 - case IPT_TTL_INC: 39 - new_ttl = iph->ttl + info->ttl; 40 - if (new_ttl > 255) 41 - new_ttl = 255; 42 - break; 43 - case IPT_TTL_DEC: 44 - new_ttl = iph->ttl - info->ttl; 45 - if (new_ttl < 0) 46 - new_ttl = 0; 47 - break; 48 - default: 49 - new_ttl = iph->ttl; 50 - break; 51 - } 52 - 53 - if (new_ttl != iph->ttl) { 54 - csum_replace2(&iph->check, htons(iph->ttl << 8), 55 - htons(new_ttl << 8)); 56 - iph->ttl = new_ttl; 57 - } 58 - 59 - return XT_CONTINUE; 60 - } 61 - 62 - static bool ttl_tg_check(const struct xt_tgchk_param *par) 63 - { 64 - const struct ipt_TTL_info *info = par->targinfo; 65 - 66 - if (info->mode > IPT_TTL_MAXMODE) { 67 - printk(KERN_WARNING "ipt_TTL: invalid or unknown Mode %u\n", 68 - info->mode); 69 - return false; 70 - } 71 - if (info->mode != IPT_TTL_SET && info->ttl == 0) 72 - return false; 73 - return true; 74 - } 75 - 76 - static struct xt_target ttl_tg_reg __read_mostly = { 77 - .name = "TTL", 78 - .family = NFPROTO_IPV4, 79 - .target = ttl_tg, 80 - .targetsize = sizeof(struct ipt_TTL_info), 81 - .table = "mangle", 82 - .checkentry = ttl_tg_check, 83 - .me = THIS_MODULE, 84 - }; 85 - 86 - static int __init ttl_tg_init(void) 87 - { 88 - return xt_register_target(&ttl_tg_reg); 89 - } 90 - 91 - static void __exit ttl_tg_exit(void) 92 - { 93 - xt_unregister_target(&ttl_tg_reg); 94 - } 95 - 96 - module_init(ttl_tg_init); 97 - module_exit(ttl_tg_exit);
+1 -1
net/ipv4/netfilter/ipt_ULOG.c
··· 379 379 .me = THIS_MODULE, 380 380 }; 381 381 382 - static struct nf_logger ipt_ulog_logger = { 382 + static struct nf_logger ipt_ulog_logger __read_mostly = { 383 383 .name = "ipt_ULOG", 384 384 .logfn = ipt_logfn, 385 385 .me = THIS_MODULE,
-63
net/ipv4/netfilter/ipt_ttl.c
··· 1 - /* IP tables module for matching the value of the TTL 2 - * 3 - * (C) 2000,2001 by Harald Welte <laforge@netfilter.org> 4 - * 5 - * This program is free software; you can redistribute it and/or modify 6 - * it under the terms of the GNU General Public License version 2 as 7 - * published by the Free Software Foundation. 8 - */ 9 - 10 - #include <linux/ip.h> 11 - #include <linux/module.h> 12 - #include <linux/skbuff.h> 13 - 14 - #include <linux/netfilter_ipv4/ipt_ttl.h> 15 - #include <linux/netfilter/x_tables.h> 16 - 17 - MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); 18 - MODULE_DESCRIPTION("Xtables: IPv4 TTL field match"); 19 - MODULE_LICENSE("GPL"); 20 - 21 - static bool ttl_mt(const struct sk_buff *skb, const struct xt_match_param *par) 22 - { 23 - const struct ipt_ttl_info *info = par->matchinfo; 24 - const u8 ttl = ip_hdr(skb)->ttl; 25 - 26 - switch (info->mode) { 27 - case IPT_TTL_EQ: 28 - return ttl == info->ttl; 29 - case IPT_TTL_NE: 30 - return ttl != info->ttl; 31 - case IPT_TTL_LT: 32 - return ttl < info->ttl; 33 - case IPT_TTL_GT: 34 - return ttl > info->ttl; 35 - default: 36 - printk(KERN_WARNING "ipt_ttl: unknown mode %d\n", 37 - info->mode); 38 - return false; 39 - } 40 - 41 - return false; 42 - } 43 - 44 - static struct xt_match ttl_mt_reg __read_mostly = { 45 - .name = "ttl", 46 - .family = NFPROTO_IPV4, 47 - .match = ttl_mt, 48 - .matchsize = sizeof(struct ipt_ttl_info), 49 - .me = THIS_MODULE, 50 - }; 51 - 52 - static int __init ttl_mt_init(void) 53 - { 54 - return xt_register_match(&ttl_mt_reg); 55 - } 56 - 57 - static void __exit ttl_mt_exit(void) 58 - { 59 - xt_unregister_match(&ttl_mt_reg); 60 - } 61 - 62 - module_init(ttl_mt_init); 63 - module_exit(ttl_mt_exit);
-1
net/ipv4/netfilter/iptable_filter.c
··· 56 56 static struct xt_table packet_filter = { 57 57 .name = "filter", 58 58 .valid_hooks = FILTER_VALID_HOOKS, 59 - .lock = __RW_LOCK_UNLOCKED(packet_filter.lock), 60 59 .me = THIS_MODULE, 61 60 .af = AF_INET, 62 61 };
-1
net/ipv4/netfilter/iptable_mangle.c
··· 67 67 static struct xt_table packet_mangler = { 68 68 .name = "mangle", 69 69 .valid_hooks = MANGLE_VALID_HOOKS, 70 - .lock = __RW_LOCK_UNLOCKED(packet_mangler.lock), 71 70 .me = THIS_MODULE, 72 71 .af = AF_INET, 73 72 };
-1
net/ipv4/netfilter/iptable_raw.c
··· 39 39 static struct xt_table packet_raw = { 40 40 .name = "raw", 41 41 .valid_hooks = RAW_VALID_HOOKS, 42 - .lock = __RW_LOCK_UNLOCKED(packet_raw.lock), 43 42 .me = THIS_MODULE, 44 43 .af = AF_INET, 45 44 };
-1
net/ipv4/netfilter/iptable_security.c
··· 60 60 static struct xt_table security_table = { 61 61 .name = "security", 62 62 .valid_hooks = SECURITY_VALID_HOOKS, 63 - .lock = __RW_LOCK_UNLOCKED(security_table.lock), 64 63 .me = THIS_MODULE, 65 64 .af = AF_INET, 66 65 };
+3 -1
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
··· 120 120 typeof(nf_nat_seq_adjust_hook) seq_adjust; 121 121 122 122 seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook); 123 - if (!seq_adjust || !seq_adjust(skb, ct, ctinfo)) 123 + if (!seq_adjust || !seq_adjust(skb, ct, ctinfo)) { 124 + NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop); 124 125 return NF_DROP; 126 + } 125 127 } 126 128 out: 127 129 /* We've seen it coming out the other side: confirm it */
-1
net/ipv4/netfilter/nf_nat_rule.c
··· 61 61 static struct xt_table nat_table = { 62 62 .name = "nat", 63 63 .valid_hooks = NAT_VALID_HOOKS, 64 - .lock = __RW_LOCK_UNLOCKED(nat_table.lock), 65 64 .me = THIS_MODULE, 66 65 .af = AF_INET, 67 66 };
+15 -23
net/ipv6/netfilter/Kconfig
··· 95 95 To compile it as a module, choose M here. If unsure, say N. 96 96 97 97 config IP6_NF_MATCH_HL 98 - tristate '"hl" match support' 98 + tristate '"hl" hoplimit match support' 99 99 depends on NETFILTER_ADVANCED 100 - help 101 - HL matching allows you to match packets based on the hop 102 - limit of the packet. 103 - 104 - To compile it as a module, choose M here. If unsure, say N. 100 + select NETFILTER_XT_MATCH_HL 101 + ---help--- 102 + This is a backwards-compat option for the user's convenience 103 + (e.g. when running oldconfig). It selects 104 + COFNIG_NETFILTER_XT_MATCH_HL. 105 105 106 106 config IP6_NF_MATCH_IPV6HEADER 107 107 tristate '"ipv6header" IPv6 Extension Headers Match' ··· 130 130 To compile it as a module, choose M here. If unsure, say N. 131 131 132 132 # The targets 133 + config IP6_NF_TARGET_HL 134 + tristate '"HL" hoplimit target support' 135 + depends on NETFILTER_ADVANCED 136 + select NETFILTER_XT_TARGET_HL 137 + ---help--- 138 + This is a backwards-compat option for the user's convenience 139 + (e.g. when running oldconfig). It selects 140 + COFNIG_NETFILTER_XT_TARGET_HL. 141 + 133 142 config IP6_NF_TARGET_LOG 134 143 tristate "LOG target support" 135 144 default m if NETFILTER_ADVANCED=n ··· 176 167 This option adds a `mangle' table to iptables: see the man page for 177 168 iptables(8). This table is used for various packet alterations 178 169 which can effect how the packet is routed. 179 - 180 - To compile it as a module, choose M here. If unsure, say N. 181 - 182 - config IP6_NF_TARGET_HL 183 - tristate 'HL (hoplimit) target support' 184 - depends on IP6_NF_MANGLE 185 - depends on NETFILTER_ADVANCED 186 - help 187 - This option adds a `HL' target, which enables the user to decrement 188 - the hoplimit value of the IPv6 header or set it to a given (lower) 189 - value. 190 - 191 - While it is safe to decrement the hoplimit value, this option also 192 - enables functionality to increment and set the hoplimit value of the 193 - IPv6 header to arbitrary values. This is EXTREMELY DANGEROUS since 194 - you can easily create immortal packets that loop forever on the 195 - network. 196 170 197 171 To compile it as a module, choose M here. If unsure, say N. 198 172
-2
net/ipv6/netfilter/Makefile
··· 20 20 obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o 21 21 obj-$(CONFIG_IP6_NF_MATCH_EUI64) += ip6t_eui64.o 22 22 obj-$(CONFIG_IP6_NF_MATCH_FRAG) += ip6t_frag.o 23 - obj-$(CONFIG_IP6_NF_MATCH_HL) += ip6t_hl.o 24 23 obj-$(CONFIG_IP6_NF_MATCH_IPV6HEADER) += ip6t_ipv6header.o 25 24 obj-$(CONFIG_IP6_NF_MATCH_MH) += ip6t_mh.o 26 25 obj-$(CONFIG_IP6_NF_MATCH_OPTS) += ip6t_hbh.o 27 26 obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o 28 27 29 28 # targets 30 - obj-$(CONFIG_IP6_NF_TARGET_HL) += ip6t_HL.o 31 29 obj-$(CONFIG_IP6_NF_TARGET_LOG) += ip6t_LOG.o 32 30 obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o
+1
net/ipv6/netfilter/ip6_queue.c
··· 643 643 644 644 MODULE_DESCRIPTION("IPv6 packet queue handler"); 645 645 MODULE_LICENSE("GPL"); 646 + MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_IP6_FW); 646 647 647 648 module_init(ip6_queue_init); 648 649 module_exit(ip6_queue_fini);
+105 -47
net/ipv6/netfilter/ip6_tables.c
··· 89 89 (nexthdr == IPPROTO_DSTOPTS) ); 90 90 } 91 91 92 + static unsigned long ifname_compare(const char *_a, const char *_b, 93 + const unsigned char *_mask) 94 + { 95 + const unsigned long *a = (const unsigned long *)_a; 96 + const unsigned long *b = (const unsigned long *)_b; 97 + const unsigned long *mask = (const unsigned long *)_mask; 98 + unsigned long ret; 99 + 100 + ret = (a[0] ^ b[0]) & mask[0]; 101 + if (IFNAMSIZ > sizeof(unsigned long)) 102 + ret |= (a[1] ^ b[1]) & mask[1]; 103 + if (IFNAMSIZ > 2 * sizeof(unsigned long)) 104 + ret |= (a[2] ^ b[2]) & mask[2]; 105 + if (IFNAMSIZ > 3 * sizeof(unsigned long)) 106 + ret |= (a[3] ^ b[3]) & mask[3]; 107 + BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long)); 108 + return ret; 109 + } 110 + 92 111 /* Returns whether matches rule or not. */ 93 112 /* Performance critical - called for every packet */ 94 113 static inline bool ··· 118 99 unsigned int *protoff, 119 100 int *fragoff, bool *hotdrop) 120 101 { 121 - size_t i; 122 102 unsigned long ret; 123 103 const struct ipv6hdr *ipv6 = ipv6_hdr(skb); 124 104 ··· 138 120 return false; 139 121 } 140 122 141 - /* Look for ifname matches; this should unroll nicely. */ 142 - for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) { 143 - ret |= (((const unsigned long *)indev)[i] 144 - ^ ((const unsigned long *)ip6info->iniface)[i]) 145 - & ((const unsigned long *)ip6info->iniface_mask)[i]; 146 - } 123 + ret = ifname_compare(indev, ip6info->iniface, ip6info->iniface_mask); 147 124 148 125 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) { 149 126 dprintf("VIA in mismatch (%s vs %s).%s\n", ··· 147 134 return false; 148 135 } 149 136 150 - for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) { 151 - ret |= (((const unsigned long *)outdev)[i] 152 - ^ ((const unsigned long *)ip6info->outiface)[i]) 153 - & ((const unsigned long *)ip6info->outiface_mask)[i]; 154 - } 137 + ret = ifname_compare(outdev, ip6info->outiface, ip6info->outiface_mask); 155 138 156 139 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) { 157 140 dprintf("VIA out mismatch (%s vs %s).%s\n", ··· 382 373 mtpar.family = tgpar.family = NFPROTO_IPV6; 383 374 tgpar.hooknum = hook; 384 375 385 - read_lock_bh(&table->lock); 386 376 IP_NF_ASSERT(table->valid_hooks & (1 << hook)); 387 - private = table->private; 388 - table_base = (void *)private->entries[smp_processor_id()]; 377 + 378 + rcu_read_lock(); 379 + private = rcu_dereference(table->private); 380 + table_base = rcu_dereference(private->entries[smp_processor_id()]); 381 + 389 382 e = get_entry(table_base, private->hook_entry[hook]); 390 383 391 384 /* For return from builtin chain */ ··· 485 474 #ifdef CONFIG_NETFILTER_DEBUG 486 475 ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON; 487 476 #endif 488 - read_unlock_bh(&table->lock); 477 + rcu_read_unlock(); 489 478 490 479 #ifdef DEBUG_ALLOW_ALL 491 480 return NF_ACCEPT; ··· 966 955 } 967 956 } 968 957 958 + /* We're lazy, and add to the first CPU; overflow works its fey magic 959 + * and everything is OK. */ 960 + static int 961 + add_counter_to_entry(struct ip6t_entry *e, 962 + const struct xt_counters addme[], 963 + unsigned int *i) 964 + { 965 + ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt); 966 + 967 + (*i)++; 968 + return 0; 969 + } 970 + 971 + /* Take values from counters and add them back onto the current cpu */ 972 + static void put_counters(struct xt_table_info *t, 973 + const struct xt_counters counters[]) 974 + { 975 + unsigned int i, cpu; 976 + 977 + local_bh_disable(); 978 + cpu = smp_processor_id(); 979 + i = 0; 980 + IP6T_ENTRY_ITERATE(t->entries[cpu], 981 + t->size, 982 + add_counter_to_entry, 983 + counters, 984 + &i); 985 + local_bh_enable(); 986 + } 987 + 988 + static inline int 989 + zero_entry_counter(struct ip6t_entry *e, void *arg) 990 + { 991 + e->counters.bcnt = 0; 992 + e->counters.pcnt = 0; 993 + return 0; 994 + } 995 + 996 + static void 997 + clone_counters(struct xt_table_info *newinfo, const struct xt_table_info *info) 998 + { 999 + unsigned int cpu; 1000 + const void *loc_cpu_entry = info->entries[raw_smp_processor_id()]; 1001 + 1002 + memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); 1003 + for_each_possible_cpu(cpu) { 1004 + memcpy(newinfo->entries[cpu], loc_cpu_entry, info->size); 1005 + IP6T_ENTRY_ITERATE(newinfo->entries[cpu], newinfo->size, 1006 + zero_entry_counter, NULL); 1007 + } 1008 + } 1009 + 969 1010 static struct xt_counters *alloc_counters(struct xt_table *table) 970 1011 { 971 1012 unsigned int countersize; 972 1013 struct xt_counters *counters; 973 - const struct xt_table_info *private = table->private; 1014 + struct xt_table_info *private = table->private; 1015 + struct xt_table_info *info; 974 1016 975 1017 /* We need atomic snapshot of counters: rest doesn't change 976 1018 (other than comefrom, which userspace doesn't care ··· 1032 968 counters = vmalloc_node(countersize, numa_node_id()); 1033 969 1034 970 if (counters == NULL) 1035 - return ERR_PTR(-ENOMEM); 971 + goto nomem; 1036 972 1037 - /* First, sum counters... */ 1038 - write_lock_bh(&table->lock); 1039 - get_counters(private, counters); 1040 - write_unlock_bh(&table->lock); 973 + info = xt_alloc_table_info(private->size); 974 + if (!info) 975 + goto free_counters; 1041 976 1042 - return counters; 977 + clone_counters(info, private); 978 + 979 + mutex_lock(&table->lock); 980 + xt_table_entry_swap_rcu(private, info); 981 + synchronize_net(); /* Wait until smoke has cleared */ 982 + 983 + get_counters(info, counters); 984 + put_counters(private, counters); 985 + mutex_unlock(&table->lock); 986 + 987 + xt_free_table_info(info); 988 + 989 + free_counters: 990 + vfree(counters); 991 + nomem: 992 + return ERR_PTR(-ENOMEM); 1043 993 } 1044 994 1045 995 static int ··· 1420 1342 return ret; 1421 1343 } 1422 1344 1423 - /* We're lazy, and add to the first CPU; overflow works its fey magic 1424 - * and everything is OK. */ 1425 - static inline int 1426 - add_counter_to_entry(struct ip6t_entry *e, 1427 - const struct xt_counters addme[], 1428 - unsigned int *i) 1429 - { 1430 - #if 0 1431 - duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n", 1432 - *i, 1433 - (long unsigned int)e->counters.pcnt, 1434 - (long unsigned int)e->counters.bcnt, 1435 - (long unsigned int)addme[*i].pcnt, 1436 - (long unsigned int)addme[*i].bcnt); 1437 - #endif 1438 - 1439 - ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt); 1440 - 1441 - (*i)++; 1442 - return 0; 1443 - } 1444 - 1445 1345 static int 1446 1346 do_add_counters(struct net *net, void __user *user, unsigned int len, 1447 1347 int compat) ··· 1480 1424 goto free; 1481 1425 } 1482 1426 1483 - write_lock_bh(&t->lock); 1427 + mutex_lock(&t->lock); 1484 1428 private = t->private; 1485 1429 if (private->number != num_counters) { 1486 1430 ret = -EINVAL; 1487 1431 goto unlock_up_free; 1488 1432 } 1489 1433 1434 + preempt_disable(); 1490 1435 i = 0; 1491 1436 /* Choose the copy that is on our node */ 1492 1437 loc_cpu_entry = private->entries[raw_smp_processor_id()]; ··· 1496 1439 add_counter_to_entry, 1497 1440 paddc, 1498 1441 &i); 1442 + preempt_enable(); 1499 1443 unlock_up_free: 1500 - write_unlock_bh(&t->lock); 1444 + mutex_unlock(&t->lock); 1501 1445 xt_table_unlock(t); 1502 1446 module_put(t->me); 1503 1447 free:
-95
net/ipv6/netfilter/ip6t_HL.c
··· 1 - /* 2 - * Hop Limit modification target for ip6tables 3 - * Maciej Soltysiak <solt@dns.toxicfilms.tv> 4 - * Based on HW's TTL module 5 - * 6 - * This software is distributed under the terms of GNU GPL 7 - */ 8 - 9 - #include <linux/module.h> 10 - #include <linux/skbuff.h> 11 - #include <linux/ip.h> 12 - #include <linux/ipv6.h> 13 - 14 - #include <linux/netfilter/x_tables.h> 15 - #include <linux/netfilter_ipv6/ip6t_HL.h> 16 - 17 - MODULE_AUTHOR("Maciej Soltysiak <solt@dns.toxicfilms.tv>"); 18 - MODULE_DESCRIPTION("Xtables: IPv6 Hop Limit field modification target"); 19 - MODULE_LICENSE("GPL"); 20 - 21 - static unsigned int 22 - hl_tg6(struct sk_buff *skb, const struct xt_target_param *par) 23 - { 24 - struct ipv6hdr *ip6h; 25 - const struct ip6t_HL_info *info = par->targinfo; 26 - int new_hl; 27 - 28 - if (!skb_make_writable(skb, skb->len)) 29 - return NF_DROP; 30 - 31 - ip6h = ipv6_hdr(skb); 32 - 33 - switch (info->mode) { 34 - case IP6T_HL_SET: 35 - new_hl = info->hop_limit; 36 - break; 37 - case IP6T_HL_INC: 38 - new_hl = ip6h->hop_limit + info->hop_limit; 39 - if (new_hl > 255) 40 - new_hl = 255; 41 - break; 42 - case IP6T_HL_DEC: 43 - new_hl = ip6h->hop_limit - info->hop_limit; 44 - if (new_hl < 0) 45 - new_hl = 0; 46 - break; 47 - default: 48 - new_hl = ip6h->hop_limit; 49 - break; 50 - } 51 - 52 - ip6h->hop_limit = new_hl; 53 - 54 - return XT_CONTINUE; 55 - } 56 - 57 - static bool hl_tg6_check(const struct xt_tgchk_param *par) 58 - { 59 - const struct ip6t_HL_info *info = par->targinfo; 60 - 61 - if (info->mode > IP6T_HL_MAXMODE) { 62 - printk(KERN_WARNING "ip6t_HL: invalid or unknown Mode %u\n", 63 - info->mode); 64 - return false; 65 - } 66 - if (info->mode != IP6T_HL_SET && info->hop_limit == 0) { 67 - printk(KERN_WARNING "ip6t_HL: increment/decrement doesn't " 68 - "make sense with value 0\n"); 69 - return false; 70 - } 71 - return true; 72 - } 73 - 74 - static struct xt_target hl_tg6_reg __read_mostly = { 75 - .name = "HL", 76 - .family = NFPROTO_IPV6, 77 - .target = hl_tg6, 78 - .targetsize = sizeof(struct ip6t_HL_info), 79 - .table = "mangle", 80 - .checkentry = hl_tg6_check, 81 - .me = THIS_MODULE 82 - }; 83 - 84 - static int __init hl_tg6_init(void) 85 - { 86 - return xt_register_target(&hl_tg6_reg); 87 - } 88 - 89 - static void __exit hl_tg6_exit(void) 90 - { 91 - xt_unregister_target(&hl_tg6_reg); 92 - } 93 - 94 - module_init(hl_tg6_init); 95 - module_exit(hl_tg6_exit);
+1 -1
net/ipv6/netfilter/ip6t_LOG.c
··· 477 477 .me = THIS_MODULE, 478 478 }; 479 479 480 - static const struct nf_logger ip6t_logger = { 480 + static struct nf_logger ip6t_logger __read_mostly = { 481 481 .name = "ip6t_LOG", 482 482 .logfn = &ip6t_log_packet, 483 483 .me = THIS_MODULE,
-68
net/ipv6/netfilter/ip6t_hl.c
··· 1 - /* Hop Limit matching module */ 2 - 3 - /* (C) 2001-2002 Maciej Soltysiak <solt@dns.toxicfilms.tv> 4 - * Based on HW's ttl module 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License version 2 as 8 - * published by the Free Software Foundation. 9 - */ 10 - 11 - #include <linux/ipv6.h> 12 - #include <linux/module.h> 13 - #include <linux/skbuff.h> 14 - 15 - #include <linux/netfilter_ipv6/ip6t_hl.h> 16 - #include <linux/netfilter/x_tables.h> 17 - 18 - MODULE_AUTHOR("Maciej Soltysiak <solt@dns.toxicfilms.tv>"); 19 - MODULE_DESCRIPTION("Xtables: IPv6 Hop Limit field match"); 20 - MODULE_LICENSE("GPL"); 21 - 22 - static bool hl_mt6(const struct sk_buff *skb, const struct xt_match_param *par) 23 - { 24 - const struct ip6t_hl_info *info = par->matchinfo; 25 - const struct ipv6hdr *ip6h = ipv6_hdr(skb); 26 - 27 - switch (info->mode) { 28 - case IP6T_HL_EQ: 29 - return ip6h->hop_limit == info->hop_limit; 30 - break; 31 - case IP6T_HL_NE: 32 - return ip6h->hop_limit != info->hop_limit; 33 - break; 34 - case IP6T_HL_LT: 35 - return ip6h->hop_limit < info->hop_limit; 36 - break; 37 - case IP6T_HL_GT: 38 - return ip6h->hop_limit > info->hop_limit; 39 - break; 40 - default: 41 - printk(KERN_WARNING "ip6t_hl: unknown mode %d\n", 42 - info->mode); 43 - return false; 44 - } 45 - 46 - return false; 47 - } 48 - 49 - static struct xt_match hl_mt6_reg __read_mostly = { 50 - .name = "hl", 51 - .family = NFPROTO_IPV6, 52 - .match = hl_mt6, 53 - .matchsize = sizeof(struct ip6t_hl_info), 54 - .me = THIS_MODULE, 55 - }; 56 - 57 - static int __init hl_mt6_init(void) 58 - { 59 - return xt_register_match(&hl_mt6_reg); 60 - } 61 - 62 - static void __exit hl_mt6_exit(void) 63 - { 64 - xt_unregister_match(&hl_mt6_reg); 65 - } 66 - 67 - module_init(hl_mt6_init); 68 - module_exit(hl_mt6_exit);
-1
net/ipv6/netfilter/ip6table_filter.c
··· 54 54 static struct xt_table packet_filter = { 55 55 .name = "filter", 56 56 .valid_hooks = FILTER_VALID_HOOKS, 57 - .lock = __RW_LOCK_UNLOCKED(packet_filter.lock), 58 57 .me = THIS_MODULE, 59 58 .af = AF_INET6, 60 59 };
-1
net/ipv6/netfilter/ip6table_mangle.c
··· 60 60 static struct xt_table packet_mangler = { 61 61 .name = "mangle", 62 62 .valid_hooks = MANGLE_VALID_HOOKS, 63 - .lock = __RW_LOCK_UNLOCKED(packet_mangler.lock), 64 63 .me = THIS_MODULE, 65 64 .af = AF_INET6, 66 65 };
-1
net/ipv6/netfilter/ip6table_raw.c
··· 38 38 static struct xt_table packet_raw = { 39 39 .name = "raw", 40 40 .valid_hooks = RAW_VALID_HOOKS, 41 - .lock = __RW_LOCK_UNLOCKED(packet_raw.lock), 42 41 .me = THIS_MODULE, 43 42 .af = AF_INET6, 44 43 };
-1
net/ipv6/netfilter/ip6table_security.c
··· 59 59 static struct xt_table security_table = { 60 60 .name = "security", 61 61 .valid_hooks = SECURITY_VALID_HOOKS, 62 - .lock = __RW_LOCK_UNLOCKED(security_table.lock), 63 62 .me = THIS_MODULE, 64 63 .af = AF_INET6, 65 64 };
+1
net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
··· 26 26 #include <net/netfilter/nf_conntrack_l4proto.h> 27 27 #include <net/netfilter/nf_conntrack_l3proto.h> 28 28 #include <net/netfilter/nf_conntrack_core.h> 29 + #include <net/netfilter/ipv6/nf_conntrack_ipv6.h> 29 30 30 31 static bool ipv6_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff, 31 32 struct nf_conntrack_tuple *tuple)
+4
net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
··· 126 126 pr_debug("icmpv6: can't create new conn with type %u\n", 127 127 type + 128); 128 128 nf_ct_dump_tuple_ipv6(&ct->tuplehash[0].tuple); 129 + if (LOG_INVALID(nf_ct_net(ct), IPPROTO_ICMPV6)) 130 + nf_log_packet(PF_INET6, 0, skb, NULL, NULL, NULL, 131 + "nf_ct_icmpv6: invalid new with type %d ", 132 + type + 128); 129 133 return false; 130 134 } 131 135 atomic_set(&ct->proto.icmp.count, 0);
+63
net/netfilter/Kconfig
··· 357 357 358 358 To compile it as a module, choose M here. If unsure, say N. 359 359 360 + config NETFILTER_XT_TARGET_HL 361 + tristate '"HL" hoplimit target support' 362 + depends on IP_NF_MANGLE || IP6_NF_MANGLE 363 + depends on NETFILTER_ADVANCED 364 + ---help--- 365 + This option adds the "HL" (for IPv6) and "TTL" (for IPv4) 366 + targets, which enable the user to change the 367 + hoplimit/time-to-live value of the IP header. 368 + 369 + While it is safe to decrement the hoplimit/TTL value, the 370 + modules also allow to increment and set the hoplimit value of 371 + the header to arbitrary values. This is EXTREMELY DANGEROUS 372 + since you can easily create immortal packets that loop 373 + forever on the network. 374 + 375 + config NETFILTER_XT_TARGET_LED 376 + tristate '"LED" target support' 377 + depends on LEDS_CLASS 378 + depends on NETFILTER_ADVANCED 379 + help 380 + This option adds a `LED' target, which allows you to blink LEDs in 381 + response to particular packets passing through your machine. 382 + 383 + This can be used to turn a spare LED into a network activity LED, 384 + which only flashes in response to FTP transfers, for example. Or 385 + you could have an LED which lights up for a minute or two every time 386 + somebody connects to your machine via SSH. 387 + 388 + You will need support for the "led" class to make this work. 389 + 390 + To create an LED trigger for incoming SSH traffic: 391 + iptables -A INPUT -p tcp --dport 22 -j LED --led-trigger-id ssh --led-delay 1000 392 + 393 + Then attach the new trigger to an LED on your system: 394 + echo netfilter-ssh > /sys/class/leds/<ledname>/trigger 395 + 396 + For more information on the LEDs available on your system, see 397 + Documentation/leds-class.txt 398 + 360 399 config NETFILTER_XT_TARGET_MARK 361 400 tristate '"MARK" target support' 362 401 default m if NETFILTER_ADVANCED=n ··· 527 488 This option adds a "TCPOPTSTRIP" target, which allows you to strip 528 489 TCP options from TCP packets. 529 490 491 + config NETFILTER_XT_MATCH_CLUSTER 492 + tristate '"cluster" match support' 493 + depends on NF_CONNTRACK 494 + depends on NETFILTER_ADVANCED 495 + ---help--- 496 + This option allows you to build work-load-sharing clusters of 497 + network servers/stateful firewalls without having a dedicated 498 + load-balancing router/server/switch. Basically, this match returns 499 + true when the packet must be handled by this cluster node. Thus, 500 + all nodes see all packets and this match decides which node handles 501 + what packets. The work-load sharing algorithm is based on source 502 + address hashing. 503 + 504 + If you say Y or M here, try `iptables -m cluster --help` for 505 + more information. 506 + 530 507 config NETFILTER_XT_MATCH_COMMENT 531 508 tristate '"comment" match support' 532 509 depends on NETFILTER_ADVANCED ··· 659 604 tracked by a conntrack-helper, ie. ip_conntrack_ftp 660 605 661 606 To compile it as a module, choose M here. If unsure, say Y. 607 + 608 + config NETFILTER_XT_MATCH_HL 609 + tristate '"hl" hoplimit/TTL match support' 610 + depends on NETFILTER_ADVANCED 611 + ---help--- 612 + HL matching allows you to match packets based on the hoplimit 613 + in the IPv6 header, or the time-to-live field in the IPv4 614 + header of the packet. 662 615 663 616 config NETFILTER_XT_MATCH_IPRANGE 664 617 tristate '"iprange" address range match support'
+4
net/netfilter/Makefile
··· 45 45 obj-$(CONFIG_NETFILTER_XT_TARGET_CONNMARK) += xt_CONNMARK.o 46 46 obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o 47 47 obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o 48 + obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o 49 + obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o 48 50 obj-$(CONFIG_NETFILTER_XT_TARGET_MARK) += xt_MARK.o 49 51 obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o 50 52 obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o ··· 59 57 obj-$(CONFIG_NETFILTER_XT_TARGET_TRACE) += xt_TRACE.o 60 58 61 59 # matches 60 + obj-$(CONFIG_NETFILTER_XT_MATCH_CLUSTER) += xt_cluster.o 62 61 obj-$(CONFIG_NETFILTER_XT_MATCH_COMMENT) += xt_comment.o 63 62 obj-$(CONFIG_NETFILTER_XT_MATCH_CONNBYTES) += xt_connbytes.o 64 63 obj-$(CONFIG_NETFILTER_XT_MATCH_CONNLIMIT) += xt_connlimit.o ··· 70 67 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o 71 68 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o 72 69 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o 70 + obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o 73 71 obj-$(CONFIG_NETFILTER_XT_MATCH_IPRANGE) += xt_iprange.o 74 72 obj-$(CONFIG_NETFILTER_XT_MATCH_LENGTH) += xt_length.o 75 73 obj-$(CONFIG_NETFILTER_XT_MATCH_LIMIT) += xt_limit.o
-2
net/netfilter/core.c
··· 174 174 outdev, &elem, okfn, hook_thresh); 175 175 if (verdict == NF_ACCEPT || verdict == NF_STOP) { 176 176 ret = 1; 177 - goto unlock; 178 177 } else if (verdict == NF_DROP) { 179 178 kfree_skb(skb); 180 179 ret = -EPERM; ··· 182 183 verdict >> NF_VERDICT_BITS)) 183 184 goto next_hook; 184 185 } 185 - unlock: 186 186 rcu_read_unlock(); 187 187 return ret; 188 188 }
+9 -5
net/netfilter/nf_conntrack_core.c
··· 54 54 unsigned int nf_conntrack_htable_size __read_mostly; 55 55 EXPORT_SYMBOL_GPL(nf_conntrack_htable_size); 56 56 57 - int nf_conntrack_max __read_mostly; 57 + unsigned int nf_conntrack_max __read_mostly; 58 58 EXPORT_SYMBOL_GPL(nf_conntrack_max); 59 59 60 60 struct nf_conn nf_conntrack_untracked __read_mostly; ··· 472 472 struct nf_conn *ct; 473 473 474 474 if (unlikely(!nf_conntrack_hash_rnd_initted)) { 475 - get_random_bytes(&nf_conntrack_hash_rnd, 4); 475 + get_random_bytes(&nf_conntrack_hash_rnd, 476 + sizeof(nf_conntrack_hash_rnd)); 476 477 nf_conntrack_hash_rnd_initted = 1; 477 478 } 478 479 ··· 517 516 static void nf_conntrack_free_rcu(struct rcu_head *head) 518 517 { 519 518 struct nf_conn *ct = container_of(head, struct nf_conn, rcu); 520 - struct net *net = nf_ct_net(ct); 521 519 522 520 nf_ct_ext_free(ct); 523 521 kmem_cache_free(nf_conntrack_cachep, ct); 524 - atomic_dec(&net->ct.count); 525 522 } 526 523 527 524 void nf_conntrack_free(struct nf_conn *ct) 528 525 { 526 + struct net *net = nf_ct_net(ct); 527 + 529 528 nf_ct_ext_destroy(ct); 529 + atomic_dec(&net->ct.count); 530 530 call_rcu(&ct->rcu, nf_conntrack_free_rcu); 531 531 } 532 532 EXPORT_SYMBOL_GPL(nf_conntrack_free); ··· 735 733 nf_conntrack_put(skb->nfct); 736 734 skb->nfct = NULL; 737 735 NF_CT_STAT_INC_ATOMIC(net, invalid); 736 + if (ret == -NF_DROP) 737 + NF_CT_STAT_INC_ATOMIC(net, drop); 738 738 return -ret; 739 739 } 740 740 ··· 1107 1103 1108 1104 /* We have to rehahs for the new table anyway, so we also can 1109 1105 * use a newrandom seed */ 1110 - get_random_bytes(&rnd, 4); 1106 + get_random_bytes(&rnd, sizeof(rnd)); 1111 1107 1112 1108 /* Lookups in the old hash might happen in parallel, which means we 1113 1109 * might get false negatives during connection lookup. New connections
+2 -1
net/netfilter/nf_conntrack_expect.c
··· 72 72 unsigned int hash; 73 73 74 74 if (unlikely(!nf_ct_expect_hash_rnd_initted)) { 75 - get_random_bytes(&nf_ct_expect_hash_rnd, 4); 75 + get_random_bytes(&nf_ct_expect_hash_rnd, 76 + sizeof(nf_ct_expect_hash_rnd)); 76 77 nf_ct_expect_hash_rnd_initted = 1; 77 78 } 78 79
+72 -89
net/netfilter/nf_conntrack_netlink.c
··· 518 518 nla_put_failure: 519 519 rcu_read_unlock(); 520 520 nlmsg_failure: 521 + nfnetlink_set_err(0, group, -ENOBUFS); 521 522 kfree_skb(skb); 522 523 return NOTIFY_DONE; 523 524 } ··· 600 599 601 600 nla_parse_nested(tb, CTA_IP_MAX, attr, NULL); 602 601 603 - l3proto = nf_ct_l3proto_find_get(tuple->src.l3num); 602 + rcu_read_lock(); 603 + l3proto = __nf_ct_l3proto_find(tuple->src.l3num); 604 604 605 605 if (likely(l3proto->nlattr_to_tuple)) { 606 606 ret = nla_validate_nested(attr, CTA_IP_MAX, ··· 610 608 ret = l3proto->nlattr_to_tuple(tb, tuple); 611 609 } 612 610 613 - nf_ct_l3proto_put(l3proto); 611 + rcu_read_unlock(); 614 612 615 613 return ret; 616 614 } ··· 635 633 return -EINVAL; 636 634 tuple->dst.protonum = nla_get_u8(tb[CTA_PROTO_NUM]); 637 635 638 - l4proto = nf_ct_l4proto_find_get(tuple->src.l3num, tuple->dst.protonum); 636 + rcu_read_lock(); 637 + l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum); 639 638 640 639 if (likely(l4proto->nlattr_to_tuple)) { 641 640 ret = nla_validate_nested(attr, CTA_PROTO_MAX, ··· 645 642 ret = l4proto->nlattr_to_tuple(tb, tuple); 646 643 } 647 644 648 - nf_ct_l4proto_put(l4proto); 645 + rcu_read_unlock(); 649 646 650 647 return ret; 651 648 } ··· 992 989 993 990 nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, NULL); 994 991 995 - l4proto = nf_ct_l4proto_find_get(nf_ct_l3num(ct), nf_ct_protonum(ct)); 992 + rcu_read_lock(); 993 + l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); 996 994 if (l4proto->from_nlattr) 997 995 err = l4proto->from_nlattr(tb, ct); 998 - nf_ct_l4proto_put(l4proto); 996 + rcu_read_unlock(); 999 997 1000 998 return err; 1001 999 } ··· 1066 1062 { 1067 1063 int err; 1068 1064 1065 + /* only allow NAT changes and master assignation for new conntracks */ 1066 + if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST] || cda[CTA_TUPLE_MASTER]) 1067 + return -EOPNOTSUPP; 1068 + 1069 1069 if (cda[CTA_HELP]) { 1070 1070 err = ctnetlink_change_helper(ct, cda); 1071 1071 if (err < 0) ··· 1132 1124 report); 1133 1125 } 1134 1126 1135 - static int 1127 + static struct nf_conn * 1136 1128 ctnetlink_create_conntrack(struct nlattr *cda[], 1137 1129 struct nf_conntrack_tuple *otuple, 1138 1130 struct nf_conntrack_tuple *rtuple, 1139 - struct nf_conn *master_ct, 1140 - u32 pid, 1141 - int report) 1131 + u8 u3) 1142 1132 { 1143 1133 struct nf_conn *ct; 1144 1134 int err = -EINVAL; ··· 1144 1138 1145 1139 ct = nf_conntrack_alloc(&init_net, otuple, rtuple, GFP_ATOMIC); 1146 1140 if (IS_ERR(ct)) 1147 - return -ENOMEM; 1141 + return ERR_PTR(-ENOMEM); 1148 1142 1149 1143 if (!cda[CTA_TIMEOUT]) 1150 - goto err; 1144 + goto err1; 1151 1145 ct->timeout.expires = ntohl(nla_get_be32(cda[CTA_TIMEOUT])); 1152 1146 1153 1147 ct->timeout.expires = jiffies + ct->timeout.expires * HZ; ··· 1158 1152 char *helpname; 1159 1153 1160 1154 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname); 1161 - if (err < 0) { 1162 - rcu_read_unlock(); 1163 - goto err; 1164 - } 1155 + if (err < 0) 1156 + goto err2; 1165 1157 1166 1158 helper = __nf_conntrack_helper_find_byname(helpname); 1167 1159 if (helper == NULL) { ··· 1167 1163 #ifdef CONFIG_MODULES 1168 1164 if (request_module("nfct-helper-%s", helpname) < 0) { 1169 1165 err = -EOPNOTSUPP; 1170 - goto err; 1166 + goto err1; 1171 1167 } 1172 1168 1173 1169 rcu_read_lock(); 1174 1170 helper = __nf_conntrack_helper_find_byname(helpname); 1175 1171 if (helper) { 1176 - rcu_read_unlock(); 1177 1172 err = -EAGAIN; 1178 - goto err; 1173 + goto err2; 1179 1174 } 1180 1175 rcu_read_unlock(); 1181 1176 #endif 1182 1177 err = -EOPNOTSUPP; 1183 - goto err; 1178 + goto err1; 1184 1179 } else { 1185 1180 struct nf_conn_help *help; 1186 1181 1187 1182 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); 1188 1183 if (help == NULL) { 1189 - rcu_read_unlock(); 1190 1184 err = -ENOMEM; 1191 - goto err; 1185 + goto err2; 1192 1186 } 1193 1187 1194 1188 /* not in hash table yet so not strictly necessary */ ··· 1195 1193 } else { 1196 1194 /* try an implicit helper assignation */ 1197 1195 err = __nf_ct_try_assign_helper(ct, GFP_ATOMIC); 1198 - if (err < 0) { 1199 - rcu_read_unlock(); 1200 - goto err; 1201 - } 1196 + if (err < 0) 1197 + goto err2; 1202 1198 } 1203 1199 1204 1200 if (cda[CTA_STATUS]) { 1205 1201 err = ctnetlink_change_status(ct, cda); 1206 - if (err < 0) { 1207 - rcu_read_unlock(); 1208 - goto err; 1209 - } 1202 + if (err < 0) 1203 + goto err2; 1210 1204 } 1211 1205 1212 1206 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) { 1213 1207 err = ctnetlink_change_nat(ct, cda); 1214 - if (err < 0) { 1215 - rcu_read_unlock(); 1216 - goto err; 1217 - } 1208 + if (err < 0) 1209 + goto err2; 1218 1210 } 1219 1211 1220 1212 #ifdef CONFIG_NF_NAT_NEEDED 1221 1213 if (cda[CTA_NAT_SEQ_ADJ_ORIG] || cda[CTA_NAT_SEQ_ADJ_REPLY]) { 1222 1214 err = ctnetlink_change_nat_seq_adj(ct, cda); 1223 - if (err < 0) { 1224 - rcu_read_unlock(); 1225 - goto err; 1226 - } 1215 + if (err < 0) 1216 + goto err2; 1227 1217 } 1228 1218 #endif 1229 1219 1230 1220 if (cda[CTA_PROTOINFO]) { 1231 1221 err = ctnetlink_change_protoinfo(ct, cda); 1232 - if (err < 0) { 1233 - rcu_read_unlock(); 1234 - goto err; 1235 - } 1222 + if (err < 0) 1223 + goto err2; 1236 1224 } 1237 1225 1238 1226 nf_ct_acct_ext_add(ct, GFP_ATOMIC); ··· 1233 1241 #endif 1234 1242 1235 1243 /* setup master conntrack: this is a confirmed expectation */ 1236 - if (master_ct) { 1244 + if (cda[CTA_TUPLE_MASTER]) { 1245 + struct nf_conntrack_tuple master; 1246 + struct nf_conntrack_tuple_hash *master_h; 1247 + struct nf_conn *master_ct; 1248 + 1249 + err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER, u3); 1250 + if (err < 0) 1251 + goto err2; 1252 + 1253 + master_h = __nf_conntrack_find(&init_net, &master); 1254 + if (master_h == NULL) { 1255 + err = -ENOENT; 1256 + goto err2; 1257 + } 1258 + master_ct = nf_ct_tuplehash_to_ctrack(master_h); 1259 + nf_conntrack_get(&master_ct->ct_general); 1237 1260 __set_bit(IPS_EXPECTED_BIT, &ct->status); 1238 1261 ct->master = master_ct; 1239 1262 } 1240 1263 1241 - nf_conntrack_get(&ct->ct_general); 1242 1264 add_timer(&ct->timeout); 1243 1265 nf_conntrack_hash_insert(ct); 1244 1266 rcu_read_unlock(); 1245 - ctnetlink_event_report(ct, pid, report); 1246 - nf_ct_put(ct); 1247 1267 1248 - return 0; 1268 + return ct; 1249 1269 1250 - err: 1270 + err2: 1271 + rcu_read_unlock(); 1272 + err1: 1251 1273 nf_conntrack_free(ct); 1252 - return err; 1274 + return ERR_PTR(err); 1253 1275 } 1254 1276 1255 1277 static int ··· 1295 1289 h = __nf_conntrack_find(&init_net, &rtuple); 1296 1290 1297 1291 if (h == NULL) { 1298 - struct nf_conntrack_tuple master; 1299 - struct nf_conntrack_tuple_hash *master_h = NULL; 1300 - struct nf_conn *master_ct = NULL; 1292 + err = -ENOENT; 1293 + if (nlh->nlmsg_flags & NLM_F_CREATE) { 1294 + struct nf_conn *ct; 1301 1295 1302 - if (cda[CTA_TUPLE_MASTER]) { 1303 - err = ctnetlink_parse_tuple(cda, 1304 - &master, 1305 - CTA_TUPLE_MASTER, 1306 - u3); 1307 - if (err < 0) 1308 - goto out_unlock; 1309 - 1310 - master_h = __nf_conntrack_find(&init_net, &master); 1311 - if (master_h == NULL) { 1312 - err = -ENOENT; 1296 + ct = ctnetlink_create_conntrack(cda, &otuple, 1297 + &rtuple, u3); 1298 + if (IS_ERR(ct)) { 1299 + err = PTR_ERR(ct); 1313 1300 goto out_unlock; 1314 1301 } 1315 - master_ct = nf_ct_tuplehash_to_ctrack(master_h); 1316 - nf_conntrack_get(&master_ct->ct_general); 1317 - } 1318 - 1319 - err = -ENOENT; 1320 - if (nlh->nlmsg_flags & NLM_F_CREATE) 1321 - err = ctnetlink_create_conntrack(cda, 1322 - &otuple, 1323 - &rtuple, 1324 - master_ct, 1325 - NETLINK_CB(skb).pid, 1326 - nlmsg_report(nlh)); 1327 - spin_unlock_bh(&nf_conntrack_lock); 1328 - if (err < 0 && master_ct) 1329 - nf_ct_put(master_ct); 1302 + err = 0; 1303 + nf_conntrack_get(&ct->ct_general); 1304 + spin_unlock_bh(&nf_conntrack_lock); 1305 + ctnetlink_event_report(ct, 1306 + NETLINK_CB(skb).pid, 1307 + nlmsg_report(nlh)); 1308 + nf_ct_put(ct); 1309 + } else 1310 + spin_unlock_bh(&nf_conntrack_lock); 1330 1311 1331 1312 return err; 1332 1313 } ··· 1324 1331 err = -EEXIST; 1325 1332 if (!(nlh->nlmsg_flags & NLM_F_EXCL)) { 1326 1333 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); 1327 - 1328 - /* we only allow nat config for new conntracks */ 1329 - if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) { 1330 - err = -EOPNOTSUPP; 1331 - goto out_unlock; 1332 - } 1333 - /* can't link an existing conntrack to a master */ 1334 - if (cda[CTA_TUPLE_MASTER]) { 1335 - err = -EOPNOTSUPP; 1336 - goto out_unlock; 1337 - } 1338 1334 1339 1335 err = ctnetlink_change_conntrack(ct, cda); 1340 1336 if (err == 0) { ··· 1515 1533 nla_put_failure: 1516 1534 rcu_read_unlock(); 1517 1535 nlmsg_failure: 1536 + nfnetlink_set_err(0, 0, -ENOBUFS); 1518 1537 kfree_skb(skb); 1519 1538 return NOTIFY_DONE; 1520 1539 }
-21
net/netfilter/nf_conntrack_proto.c
··· 74 74 75 75 /* this is guaranteed to always return a valid protocol helper, since 76 76 * it falls back to generic_protocol */ 77 - struct nf_conntrack_l4proto * 78 - nf_ct_l4proto_find_get(u_int16_t l3proto, u_int8_t l4proto) 79 - { 80 - struct nf_conntrack_l4proto *p; 81 - 82 - rcu_read_lock(); 83 - p = __nf_ct_l4proto_find(l3proto, l4proto); 84 - if (!try_module_get(p->me)) 85 - p = &nf_conntrack_l4proto_generic; 86 - rcu_read_unlock(); 87 - 88 - return p; 89 - } 90 - EXPORT_SYMBOL_GPL(nf_ct_l4proto_find_get); 91 - 92 - void nf_ct_l4proto_put(struct nf_conntrack_l4proto *p) 93 - { 94 - module_put(p->me); 95 - } 96 - EXPORT_SYMBOL_GPL(nf_ct_l4proto_put); 97 - 98 77 struct nf_conntrack_l3proto * 99 78 nf_ct_l3proto_find_get(u_int16_t l3proto) 100 79 {
+108 -37
net/netfilter/nf_conntrack_proto_dccp.c
··· 16 16 #include <linux/skbuff.h> 17 17 #include <linux/dccp.h> 18 18 19 + #include <net/net_namespace.h> 20 + #include <net/netns/generic.h> 21 + 19 22 #include <linux/netfilter/nfnetlink_conntrack.h> 20 23 #include <net/netfilter/nf_conntrack.h> 21 24 #include <net/netfilter/nf_conntrack_l4proto.h> 22 25 #include <net/netfilter/nf_log.h> 23 26 24 27 static DEFINE_RWLOCK(dccp_lock); 25 - 26 - static int nf_ct_dccp_loose __read_mostly = 1; 27 28 28 29 /* Timeouts are based on values from RFC4340: 29 30 * ··· 72 71 */ 73 72 74 73 #define DCCP_MSL (2 * 60 * HZ) 75 - 76 - static unsigned int dccp_timeout[CT_DCCP_MAX + 1] __read_mostly = { 77 - [CT_DCCP_REQUEST] = 2 * DCCP_MSL, 78 - [CT_DCCP_RESPOND] = 4 * DCCP_MSL, 79 - [CT_DCCP_PARTOPEN] = 4 * DCCP_MSL, 80 - [CT_DCCP_OPEN] = 12 * 3600 * HZ, 81 - [CT_DCCP_CLOSEREQ] = 64 * HZ, 82 - [CT_DCCP_CLOSING] = 64 * HZ, 83 - [CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL, 84 - }; 85 74 86 75 static const char * const dccp_state_names[] = { 87 76 [CT_DCCP_NONE] = "NONE", ··· 384 393 }, 385 394 }; 386 395 396 + /* this module per-net specifics */ 397 + static int dccp_net_id; 398 + struct dccp_net { 399 + int dccp_loose; 400 + unsigned int dccp_timeout[CT_DCCP_MAX + 1]; 401 + #ifdef CONFIG_SYSCTL 402 + struct ctl_table_header *sysctl_header; 403 + struct ctl_table *sysctl_table; 404 + #endif 405 + }; 406 + 407 + static inline struct dccp_net *dccp_pernet(struct net *net) 408 + { 409 + return net_generic(net, dccp_net_id); 410 + } 411 + 387 412 static bool dccp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, 388 413 struct nf_conntrack_tuple *tuple) 389 414 { ··· 426 419 unsigned int dataoff) 427 420 { 428 421 struct net *net = nf_ct_net(ct); 422 + struct dccp_net *dn; 429 423 struct dccp_hdr _dh, *dh; 430 424 const char *msg; 431 425 u_int8_t state; ··· 437 429 state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE]; 438 430 switch (state) { 439 431 default: 440 - if (nf_ct_dccp_loose == 0) { 432 + dn = dccp_pernet(net); 433 + if (dn->dccp_loose == 0) { 441 434 msg = "nf_ct_dccp: not picking up existing connection "; 442 435 goto out_invalid; 443 436 } ··· 474 465 u_int8_t pf, unsigned int hooknum) 475 466 { 476 467 struct net *net = nf_ct_net(ct); 468 + struct dccp_net *dn; 477 469 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 478 470 struct dccp_hdr _dh, *dh; 479 471 u_int8_t type, old_state, new_state; ··· 552 542 ct->proto.dccp.last_pkt = type; 553 543 ct->proto.dccp.state = new_state; 554 544 write_unlock_bh(&dccp_lock); 555 - nf_ct_refresh_acct(ct, ctinfo, skb, dccp_timeout[new_state]); 545 + 546 + dn = dccp_pernet(net); 547 + nf_ct_refresh_acct(ct, ctinfo, skb, dn->dccp_timeout[new_state]); 556 548 557 549 return NF_ACCEPT; 558 550 } ··· 672 660 #endif 673 661 674 662 #ifdef CONFIG_SYSCTL 675 - static unsigned int dccp_sysctl_table_users; 676 - static struct ctl_table_header *dccp_sysctl_header; 677 - static ctl_table dccp_sysctl_table[] = { 663 + /* template, data assigned later */ 664 + static struct ctl_table dccp_sysctl_table[] = { 678 665 { 679 666 .ctl_name = CTL_UNNUMBERED, 680 667 .procname = "nf_conntrack_dccp_timeout_request", 681 - .data = &dccp_timeout[CT_DCCP_REQUEST], 682 668 .maxlen = sizeof(unsigned int), 683 669 .mode = 0644, 684 670 .proc_handler = proc_dointvec_jiffies, ··· 684 674 { 685 675 .ctl_name = CTL_UNNUMBERED, 686 676 .procname = "nf_conntrack_dccp_timeout_respond", 687 - .data = &dccp_timeout[CT_DCCP_RESPOND], 688 677 .maxlen = sizeof(unsigned int), 689 678 .mode = 0644, 690 679 .proc_handler = proc_dointvec_jiffies, ··· 691 682 { 692 683 .ctl_name = CTL_UNNUMBERED, 693 684 .procname = "nf_conntrack_dccp_timeout_partopen", 694 - .data = &dccp_timeout[CT_DCCP_PARTOPEN], 695 685 .maxlen = sizeof(unsigned int), 696 686 .mode = 0644, 697 687 .proc_handler = proc_dointvec_jiffies, ··· 698 690 { 699 691 .ctl_name = CTL_UNNUMBERED, 700 692 .procname = "nf_conntrack_dccp_timeout_open", 701 - .data = &dccp_timeout[CT_DCCP_OPEN], 702 693 .maxlen = sizeof(unsigned int), 703 694 .mode = 0644, 704 695 .proc_handler = proc_dointvec_jiffies, ··· 705 698 { 706 699 .ctl_name = CTL_UNNUMBERED, 707 700 .procname = "nf_conntrack_dccp_timeout_closereq", 708 - .data = &dccp_timeout[CT_DCCP_CLOSEREQ], 709 701 .maxlen = sizeof(unsigned int), 710 702 .mode = 0644, 711 703 .proc_handler = proc_dointvec_jiffies, ··· 712 706 { 713 707 .ctl_name = CTL_UNNUMBERED, 714 708 .procname = "nf_conntrack_dccp_timeout_closing", 715 - .data = &dccp_timeout[CT_DCCP_CLOSING], 716 709 .maxlen = sizeof(unsigned int), 717 710 .mode = 0644, 718 711 .proc_handler = proc_dointvec_jiffies, ··· 719 714 { 720 715 .ctl_name = CTL_UNNUMBERED, 721 716 .procname = "nf_conntrack_dccp_timeout_timewait", 722 - .data = &dccp_timeout[CT_DCCP_TIMEWAIT], 723 717 .maxlen = sizeof(unsigned int), 724 718 .mode = 0644, 725 719 .proc_handler = proc_dointvec_jiffies, ··· 726 722 { 727 723 .ctl_name = CTL_UNNUMBERED, 728 724 .procname = "nf_conntrack_dccp_loose", 729 - .data = &nf_ct_dccp_loose, 730 - .maxlen = sizeof(nf_ct_dccp_loose), 725 + .maxlen = sizeof(int), 731 726 .mode = 0644, 732 727 .proc_handler = proc_dointvec, 733 728 }, ··· 754 751 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 755 752 .nla_policy = nf_ct_port_nla_policy, 756 753 #endif 757 - #ifdef CONFIG_SYSCTL 758 - .ctl_table_users = &dccp_sysctl_table_users, 759 - .ctl_table_header = &dccp_sysctl_header, 760 - .ctl_table = dccp_sysctl_table, 761 - #endif 762 754 }; 763 755 764 756 static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = { ··· 774 776 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 775 777 .nla_policy = nf_ct_port_nla_policy, 776 778 #endif 779 + }; 780 + 781 + static __net_init int dccp_net_init(struct net *net) 782 + { 783 + struct dccp_net *dn; 784 + int err; 785 + 786 + dn = kmalloc(sizeof(*dn), GFP_KERNEL); 787 + if (!dn) 788 + return -ENOMEM; 789 + 790 + /* default values */ 791 + dn->dccp_loose = 1; 792 + dn->dccp_timeout[CT_DCCP_REQUEST] = 2 * DCCP_MSL; 793 + dn->dccp_timeout[CT_DCCP_RESPOND] = 4 * DCCP_MSL; 794 + dn->dccp_timeout[CT_DCCP_PARTOPEN] = 4 * DCCP_MSL; 795 + dn->dccp_timeout[CT_DCCP_OPEN] = 12 * 3600 * HZ; 796 + dn->dccp_timeout[CT_DCCP_CLOSEREQ] = 64 * HZ; 797 + dn->dccp_timeout[CT_DCCP_CLOSING] = 64 * HZ; 798 + dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL; 799 + 800 + err = net_assign_generic(net, dccp_net_id, dn); 801 + if (err) 802 + goto out; 803 + 777 804 #ifdef CONFIG_SYSCTL 778 - .ctl_table_users = &dccp_sysctl_table_users, 779 - .ctl_table_header = &dccp_sysctl_header, 780 - .ctl_table = dccp_sysctl_table, 805 + err = -ENOMEM; 806 + dn->sysctl_table = kmemdup(dccp_sysctl_table, 807 + sizeof(dccp_sysctl_table), GFP_KERNEL); 808 + if (!dn->sysctl_table) 809 + goto out; 810 + 811 + dn->sysctl_table[0].data = &dn->dccp_timeout[CT_DCCP_REQUEST]; 812 + dn->sysctl_table[1].data = &dn->dccp_timeout[CT_DCCP_RESPOND]; 813 + dn->sysctl_table[2].data = &dn->dccp_timeout[CT_DCCP_PARTOPEN]; 814 + dn->sysctl_table[3].data = &dn->dccp_timeout[CT_DCCP_OPEN]; 815 + dn->sysctl_table[4].data = &dn->dccp_timeout[CT_DCCP_CLOSEREQ]; 816 + dn->sysctl_table[5].data = &dn->dccp_timeout[CT_DCCP_CLOSING]; 817 + dn->sysctl_table[6].data = &dn->dccp_timeout[CT_DCCP_TIMEWAIT]; 818 + dn->sysctl_table[7].data = &dn->dccp_loose; 819 + 820 + dn->sysctl_header = register_net_sysctl_table(net, 821 + nf_net_netfilter_sysctl_path, dn->sysctl_table); 822 + if (!dn->sysctl_header) { 823 + kfree(dn->sysctl_table); 824 + goto out; 825 + } 781 826 #endif 827 + 828 + return 0; 829 + 830 + out: 831 + kfree(dn); 832 + return err; 833 + } 834 + 835 + static __net_exit void dccp_net_exit(struct net *net) 836 + { 837 + struct dccp_net *dn = dccp_pernet(net); 838 + #ifdef CONFIG_SYSCTL 839 + unregister_net_sysctl_table(dn->sysctl_header); 840 + kfree(dn->sysctl_table); 841 + #endif 842 + kfree(dn); 843 + 844 + net_assign_generic(net, dccp_net_id, NULL); 845 + } 846 + 847 + static struct pernet_operations dccp_net_ops = { 848 + .init = dccp_net_init, 849 + .exit = dccp_net_exit, 782 850 }; 783 851 784 852 static int __init nf_conntrack_proto_dccp_init(void) 785 853 { 786 854 int err; 787 855 788 - err = nf_conntrack_l4proto_register(&dccp_proto4); 856 + err = register_pernet_gen_subsys(&dccp_net_id, &dccp_net_ops); 789 857 if (err < 0) 790 858 goto err1; 791 859 792 - err = nf_conntrack_l4proto_register(&dccp_proto6); 860 + err = nf_conntrack_l4proto_register(&dccp_proto4); 793 861 if (err < 0) 794 862 goto err2; 863 + 864 + err = nf_conntrack_l4proto_register(&dccp_proto6); 865 + if (err < 0) 866 + goto err3; 795 867 return 0; 796 868 797 - err2: 869 + err3: 798 870 nf_conntrack_l4proto_unregister(&dccp_proto4); 871 + err2: 872 + unregister_pernet_gen_subsys(dccp_net_id, &dccp_net_ops); 799 873 err1: 800 874 return err; 801 875 } 802 876 803 877 static void __exit nf_conntrack_proto_dccp_fini(void) 804 878 { 879 + unregister_pernet_gen_subsys(dccp_net_id, &dccp_net_ops); 805 880 nf_conntrack_l4proto_unregister(&dccp_proto6); 806 881 nf_conntrack_l4proto_unregister(&dccp_proto4); 807 882 }
+1 -1
net/netfilter/nf_conntrack_proto_generic.c
··· 92 92 struct nf_conntrack_l4proto nf_conntrack_l4proto_generic __read_mostly = 93 93 { 94 94 .l3proto = PF_UNSPEC, 95 - .l4proto = 0, 95 + .l4proto = 255, 96 96 .name = "unknown", 97 97 .pkt_to_tuple = generic_pkt_to_tuple, 98 98 .invert_tuple = generic_invert_tuple,
+2
net/netfilter/nf_conntrack_proto_tcp.c
··· 25 25 #include <net/netfilter/nf_conntrack_l4proto.h> 26 26 #include <net/netfilter/nf_conntrack_ecache.h> 27 27 #include <net/netfilter/nf_log.h> 28 + #include <net/netfilter/ipv4/nf_conntrack_ipv4.h> 29 + #include <net/netfilter/ipv6/nf_conntrack_ipv6.h> 28 30 29 31 /* Protects ct->proto.tcp */ 30 32 static DEFINE_RWLOCK(tcp_lock);
+2
net/netfilter/nf_conntrack_proto_udp.c
··· 22 22 #include <net/netfilter/nf_conntrack_l4proto.h> 23 23 #include <net/netfilter/nf_conntrack_ecache.h> 24 24 #include <net/netfilter/nf_log.h> 25 + #include <net/netfilter/ipv4/nf_conntrack_ipv4.h> 26 + #include <net/netfilter/ipv6/nf_conntrack_ipv6.h> 25 27 26 28 static unsigned int nf_ct_udp_timeout __read_mostly = 30*HZ; 27 29 static unsigned int nf_ct_udp_timeout_stream __read_mostly = 180*HZ;
+170 -33
net/netfilter/nf_log.c
··· 14 14 LOG target modules */ 15 15 16 16 #define NF_LOG_PREFIXLEN 128 17 + #define NFLOGGER_NAME_LEN 64 17 18 18 19 static const struct nf_logger *nf_loggers[NFPROTO_NUMPROTO] __read_mostly; 20 + static struct list_head nf_loggers_l[NFPROTO_NUMPROTO] __read_mostly; 19 21 static DEFINE_MUTEX(nf_log_mutex); 20 22 21 - /* return EBUSY if somebody else is registered, EEXIST if the same logger 22 - * is registred, 0 on success. */ 23 - int nf_log_register(u_int8_t pf, const struct nf_logger *logger) 23 + static struct nf_logger *__find_logger(int pf, const char *str_logger) 24 24 { 25 - int ret; 25 + struct nf_logger *t; 26 + 27 + list_for_each_entry(t, &nf_loggers_l[pf], list[pf]) { 28 + if (!strnicmp(str_logger, t->name, strlen(t->name))) 29 + return t; 30 + } 31 + 32 + return NULL; 33 + } 34 + 35 + /* return EEXIST if the same logger is registred, 0 on success. */ 36 + int nf_log_register(u_int8_t pf, struct nf_logger *logger) 37 + { 38 + const struct nf_logger *llog; 26 39 27 40 if (pf >= ARRAY_SIZE(nf_loggers)) 28 41 return -EINVAL; 29 42 30 - /* Any setup of logging members must be done before 31 - * substituting pointer. */ 32 - ret = mutex_lock_interruptible(&nf_log_mutex); 33 - if (ret < 0) 34 - return ret; 43 + mutex_lock(&nf_log_mutex); 35 44 36 - if (!nf_loggers[pf]) 37 - rcu_assign_pointer(nf_loggers[pf], logger); 38 - else if (nf_loggers[pf] == logger) 39 - ret = -EEXIST; 40 - else 41 - ret = -EBUSY; 45 + if (pf == NFPROTO_UNSPEC) { 46 + int i; 47 + for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) 48 + list_add_tail(&(logger->list[i]), &(nf_loggers_l[i])); 49 + } else { 50 + /* register at end of list to honor first register win */ 51 + list_add_tail(&logger->list[pf], &nf_loggers_l[pf]); 52 + llog = rcu_dereference(nf_loggers[pf]); 53 + if (llog == NULL) 54 + rcu_assign_pointer(nf_loggers[pf], logger); 55 + } 42 56 43 57 mutex_unlock(&nf_log_mutex); 44 - return ret; 58 + 59 + return 0; 45 60 } 46 61 EXPORT_SYMBOL(nf_log_register); 47 62 48 - void nf_log_unregister_pf(u_int8_t pf) 63 + void nf_log_unregister(struct nf_logger *logger) 49 64 { 50 - if (pf >= ARRAY_SIZE(nf_loggers)) 51 - return; 52 - mutex_lock(&nf_log_mutex); 53 - rcu_assign_pointer(nf_loggers[pf], NULL); 54 - mutex_unlock(&nf_log_mutex); 55 - 56 - /* Give time to concurrent readers. */ 57 - synchronize_rcu(); 58 - } 59 - EXPORT_SYMBOL(nf_log_unregister_pf); 60 - 61 - void nf_log_unregister(const struct nf_logger *logger) 62 - { 65 + const struct nf_logger *c_logger; 63 66 int i; 64 67 65 68 mutex_lock(&nf_log_mutex); 66 69 for (i = 0; i < ARRAY_SIZE(nf_loggers); i++) { 67 - if (nf_loggers[i] == logger) 70 + c_logger = rcu_dereference(nf_loggers[i]); 71 + if (c_logger == logger) 68 72 rcu_assign_pointer(nf_loggers[i], NULL); 73 + list_del(&logger->list[i]); 69 74 } 70 75 mutex_unlock(&nf_log_mutex); 71 76 72 77 synchronize_rcu(); 73 78 } 74 79 EXPORT_SYMBOL(nf_log_unregister); 80 + 81 + int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger) 82 + { 83 + mutex_lock(&nf_log_mutex); 84 + if (__find_logger(pf, logger->name) == NULL) { 85 + mutex_unlock(&nf_log_mutex); 86 + return -ENOENT; 87 + } 88 + rcu_assign_pointer(nf_loggers[pf], logger); 89 + mutex_unlock(&nf_log_mutex); 90 + return 0; 91 + } 92 + EXPORT_SYMBOL(nf_log_bind_pf); 93 + 94 + void nf_log_unbind_pf(u_int8_t pf) 95 + { 96 + mutex_lock(&nf_log_mutex); 97 + rcu_assign_pointer(nf_loggers[pf], NULL); 98 + mutex_unlock(&nf_log_mutex); 99 + } 100 + EXPORT_SYMBOL(nf_log_unbind_pf); 75 101 76 102 void nf_log_packet(u_int8_t pf, 77 103 unsigned int hooknum, ··· 155 129 { 156 130 loff_t *pos = v; 157 131 const struct nf_logger *logger; 132 + struct nf_logger *t; 133 + int ret; 158 134 159 135 logger = rcu_dereference(nf_loggers[*pos]); 160 136 161 137 if (!logger) 162 - return seq_printf(s, "%2lld NONE\n", *pos); 138 + ret = seq_printf(s, "%2lld NONE (", *pos); 139 + else 140 + ret = seq_printf(s, "%2lld %s (", *pos, logger->name); 163 141 164 - return seq_printf(s, "%2lld %s\n", *pos, logger->name); 142 + if (ret < 0) 143 + return ret; 144 + 145 + mutex_lock(&nf_log_mutex); 146 + list_for_each_entry(t, &nf_loggers_l[*pos], list[*pos]) { 147 + ret = seq_printf(s, "%s", t->name); 148 + if (ret < 0) { 149 + mutex_unlock(&nf_log_mutex); 150 + return ret; 151 + } 152 + if (&t->list[*pos] != nf_loggers_l[*pos].prev) { 153 + ret = seq_printf(s, ","); 154 + if (ret < 0) { 155 + mutex_unlock(&nf_log_mutex); 156 + return ret; 157 + } 158 + } 159 + } 160 + mutex_unlock(&nf_log_mutex); 161 + 162 + return seq_printf(s, ")\n"); 165 163 } 166 164 167 165 static const struct seq_operations nflog_seq_ops = { ··· 208 158 .release = seq_release, 209 159 }; 210 160 161 + 211 162 #endif /* PROC_FS */ 212 163 164 + #ifdef CONFIG_SYSCTL 165 + struct ctl_path nf_log_sysctl_path[] = { 166 + { .procname = "net", .ctl_name = CTL_NET, }, 167 + { .procname = "netfilter", .ctl_name = NET_NETFILTER, }, 168 + { .procname = "nf_log", .ctl_name = CTL_UNNUMBERED, }, 169 + { } 170 + }; 171 + 172 + static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3]; 173 + static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1]; 174 + static struct ctl_table_header *nf_log_dir_header; 175 + 176 + static int nf_log_proc_dostring(ctl_table *table, int write, struct file *filp, 177 + void *buffer, size_t *lenp, loff_t *ppos) 178 + { 179 + const struct nf_logger *logger; 180 + int r = 0; 181 + int tindex = (unsigned long)table->extra1; 182 + 183 + if (write) { 184 + if (!strcmp(buffer, "NONE")) { 185 + nf_log_unbind_pf(tindex); 186 + return 0; 187 + } 188 + mutex_lock(&nf_log_mutex); 189 + logger = __find_logger(tindex, buffer); 190 + if (logger == NULL) { 191 + mutex_unlock(&nf_log_mutex); 192 + return -ENOENT; 193 + } 194 + rcu_assign_pointer(nf_loggers[tindex], logger); 195 + mutex_unlock(&nf_log_mutex); 196 + } else { 197 + rcu_read_lock(); 198 + logger = rcu_dereference(nf_loggers[tindex]); 199 + if (!logger) 200 + table->data = "NONE"; 201 + else 202 + table->data = logger->name; 203 + r = proc_dostring(table, write, filp, buffer, lenp, ppos); 204 + rcu_read_unlock(); 205 + } 206 + 207 + return r; 208 + } 209 + 210 + static __init int netfilter_log_sysctl_init(void) 211 + { 212 + int i; 213 + 214 + for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) { 215 + snprintf(nf_log_sysctl_fnames[i-NFPROTO_UNSPEC], 3, "%d", i); 216 + nf_log_sysctl_table[i].ctl_name = CTL_UNNUMBERED; 217 + nf_log_sysctl_table[i].procname = 218 + nf_log_sysctl_fnames[i-NFPROTO_UNSPEC]; 219 + nf_log_sysctl_table[i].data = NULL; 220 + nf_log_sysctl_table[i].maxlen = 221 + NFLOGGER_NAME_LEN * sizeof(char); 222 + nf_log_sysctl_table[i].mode = 0644; 223 + nf_log_sysctl_table[i].proc_handler = nf_log_proc_dostring; 224 + nf_log_sysctl_table[i].extra1 = (void *)(unsigned long) i; 225 + } 226 + 227 + nf_log_dir_header = register_sysctl_paths(nf_log_sysctl_path, 228 + nf_log_sysctl_table); 229 + if (!nf_log_dir_header) 230 + return -ENOMEM; 231 + 232 + return 0; 233 + } 234 + #else 235 + static __init int netfilter_log_sysctl_init(void) 236 + { 237 + return 0; 238 + } 239 + #endif /* CONFIG_SYSCTL */ 213 240 214 241 int __init netfilter_log_init(void) 215 242 { 243 + int i, r; 216 244 #ifdef CONFIG_PROC_FS 217 245 if (!proc_create("nf_log", S_IRUGO, 218 246 proc_net_netfilter, &nflog_file_ops)) 219 247 return -1; 220 248 #endif 249 + 250 + /* Errors will trigger panic, unroll on error is unnecessary. */ 251 + r = netfilter_log_sysctl_init(); 252 + if (r < 0) 253 + return r; 254 + 255 + for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) 256 + INIT_LIST_HEAD(&(nf_loggers_l[i])); 257 + 221 258 return 0; 222 259 }
+6
net/netfilter/nfnetlink.c
··· 113 113 } 114 114 EXPORT_SYMBOL_GPL(nfnetlink_send); 115 115 116 + void nfnetlink_set_err(u32 pid, u32 group, int error) 117 + { 118 + netlink_set_err(nfnl, pid, group, error); 119 + } 120 + EXPORT_SYMBOL_GPL(nfnetlink_set_err); 121 + 116 122 int nfnetlink_unicast(struct sk_buff *skb, u_int32_t pid, int flags) 117 123 { 118 124 return netlink_unicast(nfnl, skb, pid, flags);
+21 -5
net/netfilter/x_tables.c
··· 625 625 } 626 626 EXPORT_SYMBOL(xt_free_table_info); 627 627 628 + void xt_table_entry_swap_rcu(struct xt_table_info *oldinfo, 629 + struct xt_table_info *newinfo) 630 + { 631 + unsigned int cpu; 632 + 633 + for_each_possible_cpu(cpu) { 634 + void *p = oldinfo->entries[cpu]; 635 + rcu_assign_pointer(oldinfo->entries[cpu], newinfo->entries[cpu]); 636 + newinfo->entries[cpu] = p; 637 + } 638 + 639 + } 640 + EXPORT_SYMBOL_GPL(xt_table_entry_swap_rcu); 641 + 628 642 /* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */ 629 643 struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, 630 644 const char *name) ··· 685 671 struct xt_table_info *oldinfo, *private; 686 672 687 673 /* Do the substitution. */ 688 - write_lock_bh(&table->lock); 674 + mutex_lock(&table->lock); 689 675 private = table->private; 690 676 /* Check inside lock: is the old number correct? */ 691 677 if (num_counters != private->number) { 692 678 duprintf("num_counters != table->private->number (%u/%u)\n", 693 679 num_counters, private->number); 694 - write_unlock_bh(&table->lock); 680 + mutex_unlock(&table->lock); 695 681 *error = -EAGAIN; 696 682 return NULL; 697 683 } 698 684 oldinfo = private; 699 - table->private = newinfo; 685 + rcu_assign_pointer(table->private, newinfo); 700 686 newinfo->initial_entries = oldinfo->initial_entries; 701 - write_unlock_bh(&table->lock); 687 + mutex_unlock(&table->lock); 702 688 689 + synchronize_net(); 703 690 return oldinfo; 704 691 } 705 692 EXPORT_SYMBOL_GPL(xt_replace_table); ··· 734 719 735 720 /* Simplifies replace_table code. */ 736 721 table->private = bootstrap; 737 - rwlock_init(&table->lock); 722 + mutex_init(&table->lock); 723 + 738 724 if (!xt_replace_table(table, 0, newinfo, &ret)) 739 725 goto unlock; 740 726
+171
net/netfilter/xt_HL.c
··· 1 + /* 2 + * TTL modification target for IP tables 3 + * (C) 2000,2005 by Harald Welte <laforge@netfilter.org> 4 + * 5 + * Hop Limit modification target for ip6tables 6 + * Maciej Soltysiak <solt@dns.toxicfilms.tv> 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License version 2 as 10 + * published by the Free Software Foundation. 11 + */ 12 + 13 + #include <linux/module.h> 14 + #include <linux/skbuff.h> 15 + #include <linux/ip.h> 16 + #include <linux/ipv6.h> 17 + #include <net/checksum.h> 18 + 19 + #include <linux/netfilter/x_tables.h> 20 + #include <linux/netfilter_ipv4/ipt_TTL.h> 21 + #include <linux/netfilter_ipv6/ip6t_HL.h> 22 + 23 + MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); 24 + MODULE_AUTHOR("Maciej Soltysiak <solt@dns.toxicfilms.tv>"); 25 + MODULE_DESCRIPTION("Xtables: Hoplimit/TTL Limit field modification target"); 26 + MODULE_LICENSE("GPL"); 27 + 28 + static unsigned int 29 + ttl_tg(struct sk_buff *skb, const struct xt_target_param *par) 30 + { 31 + struct iphdr *iph; 32 + const struct ipt_TTL_info *info = par->targinfo; 33 + int new_ttl; 34 + 35 + if (!skb_make_writable(skb, skb->len)) 36 + return NF_DROP; 37 + 38 + iph = ip_hdr(skb); 39 + 40 + switch (info->mode) { 41 + case IPT_TTL_SET: 42 + new_ttl = info->ttl; 43 + break; 44 + case IPT_TTL_INC: 45 + new_ttl = iph->ttl + info->ttl; 46 + if (new_ttl > 255) 47 + new_ttl = 255; 48 + break; 49 + case IPT_TTL_DEC: 50 + new_ttl = iph->ttl - info->ttl; 51 + if (new_ttl < 0) 52 + new_ttl = 0; 53 + break; 54 + default: 55 + new_ttl = iph->ttl; 56 + break; 57 + } 58 + 59 + if (new_ttl != iph->ttl) { 60 + csum_replace2(&iph->check, htons(iph->ttl << 8), 61 + htons(new_ttl << 8)); 62 + iph->ttl = new_ttl; 63 + } 64 + 65 + return XT_CONTINUE; 66 + } 67 + 68 + static unsigned int 69 + hl_tg6(struct sk_buff *skb, const struct xt_target_param *par) 70 + { 71 + struct ipv6hdr *ip6h; 72 + const struct ip6t_HL_info *info = par->targinfo; 73 + int new_hl; 74 + 75 + if (!skb_make_writable(skb, skb->len)) 76 + return NF_DROP; 77 + 78 + ip6h = ipv6_hdr(skb); 79 + 80 + switch (info->mode) { 81 + case IP6T_HL_SET: 82 + new_hl = info->hop_limit; 83 + break; 84 + case IP6T_HL_INC: 85 + new_hl = ip6h->hop_limit + info->hop_limit; 86 + if (new_hl > 255) 87 + new_hl = 255; 88 + break; 89 + case IP6T_HL_DEC: 90 + new_hl = ip6h->hop_limit - info->hop_limit; 91 + if (new_hl < 0) 92 + new_hl = 0; 93 + break; 94 + default: 95 + new_hl = ip6h->hop_limit; 96 + break; 97 + } 98 + 99 + ip6h->hop_limit = new_hl; 100 + 101 + return XT_CONTINUE; 102 + } 103 + 104 + static bool ttl_tg_check(const struct xt_tgchk_param *par) 105 + { 106 + const struct ipt_TTL_info *info = par->targinfo; 107 + 108 + if (info->mode > IPT_TTL_MAXMODE) { 109 + printk(KERN_WARNING "ipt_TTL: invalid or unknown Mode %u\n", 110 + info->mode); 111 + return false; 112 + } 113 + if (info->mode != IPT_TTL_SET && info->ttl == 0) 114 + return false; 115 + return true; 116 + } 117 + 118 + static bool hl_tg6_check(const struct xt_tgchk_param *par) 119 + { 120 + const struct ip6t_HL_info *info = par->targinfo; 121 + 122 + if (info->mode > IP6T_HL_MAXMODE) { 123 + printk(KERN_WARNING "ip6t_HL: invalid or unknown Mode %u\n", 124 + info->mode); 125 + return false; 126 + } 127 + if (info->mode != IP6T_HL_SET && info->hop_limit == 0) { 128 + printk(KERN_WARNING "ip6t_HL: increment/decrement doesn't " 129 + "make sense with value 0\n"); 130 + return false; 131 + } 132 + return true; 133 + } 134 + 135 + static struct xt_target hl_tg_reg[] __read_mostly = { 136 + { 137 + .name = "TTL", 138 + .revision = 0, 139 + .family = NFPROTO_IPV4, 140 + .target = ttl_tg, 141 + .targetsize = sizeof(struct ipt_TTL_info), 142 + .table = "mangle", 143 + .checkentry = ttl_tg_check, 144 + .me = THIS_MODULE, 145 + }, 146 + { 147 + .name = "HL", 148 + .revision = 0, 149 + .family = NFPROTO_IPV6, 150 + .target = hl_tg6, 151 + .targetsize = sizeof(struct ip6t_HL_info), 152 + .table = "mangle", 153 + .checkentry = hl_tg6_check, 154 + .me = THIS_MODULE, 155 + }, 156 + }; 157 + 158 + static int __init hl_tg_init(void) 159 + { 160 + return xt_register_targets(hl_tg_reg, ARRAY_SIZE(hl_tg_reg)); 161 + } 162 + 163 + static void __exit hl_tg_exit(void) 164 + { 165 + xt_unregister_targets(hl_tg_reg, ARRAY_SIZE(hl_tg_reg)); 166 + } 167 + 168 + module_init(hl_tg_init); 169 + module_exit(hl_tg_exit); 170 + MODULE_ALIAS("ipt_TTL"); 171 + MODULE_ALIAS("ip6t_HL");
+161
net/netfilter/xt_LED.c
··· 1 + /* 2 + * xt_LED.c - netfilter target to make LEDs blink upon packet matches 3 + * 4 + * Copyright (C) 2008 Adam Nielsen <a.nielsen@shikadi.net> 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License as published by 8 + * the Free Software Foundation; version 2 of the License. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program; if not, write to the Free Software 17 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 18 + * 02110-1301 USA. 19 + * 20 + */ 21 + 22 + #include <linux/module.h> 23 + #include <linux/skbuff.h> 24 + #include <linux/netfilter/x_tables.h> 25 + #include <linux/leds.h> 26 + #include <linux/mutex.h> 27 + 28 + #include <linux/netfilter/xt_LED.h> 29 + 30 + MODULE_LICENSE("GPL"); 31 + MODULE_AUTHOR("Adam Nielsen <a.nielsen@shikadi.net>"); 32 + MODULE_DESCRIPTION("Xtables: trigger LED devices on packet match"); 33 + 34 + /* 35 + * This is declared in here (the kernel module) only, to avoid having these 36 + * dependencies in userspace code. This is what xt_led_info.internal_data 37 + * points to. 38 + */ 39 + struct xt_led_info_internal { 40 + struct led_trigger netfilter_led_trigger; 41 + struct timer_list timer; 42 + }; 43 + 44 + static unsigned int 45 + led_tg(struct sk_buff *skb, const struct xt_target_param *par) 46 + { 47 + const struct xt_led_info *ledinfo = par->targinfo; 48 + struct xt_led_info_internal *ledinternal = ledinfo->internal_data; 49 + 50 + /* 51 + * If "always blink" is enabled, and there's still some time until the 52 + * LED will switch off, briefly switch it off now. 53 + */ 54 + if ((ledinfo->delay > 0) && ledinfo->always_blink && 55 + timer_pending(&ledinternal->timer)) 56 + led_trigger_event(&ledinternal->netfilter_led_trigger,LED_OFF); 57 + 58 + led_trigger_event(&ledinternal->netfilter_led_trigger, LED_FULL); 59 + 60 + /* If there's a positive delay, start/update the timer */ 61 + if (ledinfo->delay > 0) { 62 + mod_timer(&ledinternal->timer, 63 + jiffies + msecs_to_jiffies(ledinfo->delay)); 64 + 65 + /* Otherwise if there was no delay given, blink as fast as possible */ 66 + } else if (ledinfo->delay == 0) { 67 + led_trigger_event(&ledinternal->netfilter_led_trigger, LED_OFF); 68 + } 69 + 70 + /* else the delay is negative, which means switch on and stay on */ 71 + 72 + return XT_CONTINUE; 73 + } 74 + 75 + static void led_timeout_callback(unsigned long data) 76 + { 77 + struct xt_led_info *ledinfo = (struct xt_led_info *)data; 78 + struct xt_led_info_internal *ledinternal = ledinfo->internal_data; 79 + 80 + led_trigger_event(&ledinternal->netfilter_led_trigger, LED_OFF); 81 + } 82 + 83 + static bool led_tg_check(const struct xt_tgchk_param *par) 84 + { 85 + struct xt_led_info *ledinfo = par->targinfo; 86 + struct xt_led_info_internal *ledinternal; 87 + int err; 88 + 89 + if (ledinfo->id[0] == '\0') { 90 + printk(KERN_ERR KBUILD_MODNAME ": No 'id' parameter given.\n"); 91 + return false; 92 + } 93 + 94 + ledinternal = kzalloc(sizeof(struct xt_led_info_internal), GFP_KERNEL); 95 + if (!ledinternal) { 96 + printk(KERN_CRIT KBUILD_MODNAME ": out of memory\n"); 97 + return false; 98 + } 99 + 100 + ledinternal->netfilter_led_trigger.name = ledinfo->id; 101 + 102 + err = led_trigger_register(&ledinternal->netfilter_led_trigger); 103 + if (err) { 104 + printk(KERN_CRIT KBUILD_MODNAME 105 + ": led_trigger_register() failed\n"); 106 + if (err == -EEXIST) 107 + printk(KERN_ERR KBUILD_MODNAME 108 + ": Trigger name is already in use.\n"); 109 + goto exit_alloc; 110 + } 111 + 112 + /* See if we need to set up a timer */ 113 + if (ledinfo->delay > 0) 114 + setup_timer(&ledinternal->timer, led_timeout_callback, 115 + (unsigned long)ledinfo); 116 + 117 + ledinfo->internal_data = ledinternal; 118 + 119 + return true; 120 + 121 + exit_alloc: 122 + kfree(ledinternal); 123 + 124 + return false; 125 + } 126 + 127 + static void led_tg_destroy(const struct xt_tgdtor_param *par) 128 + { 129 + const struct xt_led_info *ledinfo = par->targinfo; 130 + struct xt_led_info_internal *ledinternal = ledinfo->internal_data; 131 + 132 + if (ledinfo->delay > 0) 133 + del_timer_sync(&ledinternal->timer); 134 + 135 + led_trigger_unregister(&ledinternal->netfilter_led_trigger); 136 + kfree(ledinternal); 137 + } 138 + 139 + static struct xt_target led_tg_reg __read_mostly = { 140 + .name = "LED", 141 + .revision = 0, 142 + .family = NFPROTO_UNSPEC, 143 + .target = led_tg, 144 + .targetsize = XT_ALIGN(sizeof(struct xt_led_info)), 145 + .checkentry = led_tg_check, 146 + .destroy = led_tg_destroy, 147 + .me = THIS_MODULE, 148 + }; 149 + 150 + static int __init led_tg_init(void) 151 + { 152 + return xt_register_target(&led_tg_reg); 153 + } 154 + 155 + static void __exit led_tg_exit(void) 156 + { 157 + xt_unregister_target(&led_tg_reg); 158 + } 159 + 160 + module_init(led_tg_init); 161 + module_exit(led_tg_exit);
+164
net/netfilter/xt_cluster.c
··· 1 + /* 2 + * (C) 2008-2009 Pablo Neira Ayuso <pablo@netfilter.org> 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + */ 8 + #include <linux/module.h> 9 + #include <linux/skbuff.h> 10 + #include <linux/jhash.h> 11 + #include <linux/ip.h> 12 + #include <net/ipv6.h> 13 + 14 + #include <linux/netfilter/x_tables.h> 15 + #include <net/netfilter/nf_conntrack.h> 16 + #include <linux/netfilter/xt_cluster.h> 17 + 18 + static inline u_int32_t nf_ct_orig_ipv4_src(const struct nf_conn *ct) 19 + { 20 + return ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip; 21 + } 22 + 23 + static inline const void *nf_ct_orig_ipv6_src(const struct nf_conn *ct) 24 + { 25 + return ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip6; 26 + } 27 + 28 + static inline u_int32_t 29 + xt_cluster_hash_ipv4(u_int32_t ip, const struct xt_cluster_match_info *info) 30 + { 31 + return jhash_1word(ip, info->hash_seed); 32 + } 33 + 34 + static inline u_int32_t 35 + xt_cluster_hash_ipv6(const void *ip, const struct xt_cluster_match_info *info) 36 + { 37 + return jhash2(ip, NF_CT_TUPLE_L3SIZE / sizeof(__u32), info->hash_seed); 38 + } 39 + 40 + static inline u_int32_t 41 + xt_cluster_hash(const struct nf_conn *ct, 42 + const struct xt_cluster_match_info *info) 43 + { 44 + u_int32_t hash = 0; 45 + 46 + switch(nf_ct_l3num(ct)) { 47 + case AF_INET: 48 + hash = xt_cluster_hash_ipv4(nf_ct_orig_ipv4_src(ct), info); 49 + break; 50 + case AF_INET6: 51 + hash = xt_cluster_hash_ipv6(nf_ct_orig_ipv6_src(ct), info); 52 + break; 53 + default: 54 + WARN_ON(1); 55 + break; 56 + } 57 + return (((u64)hash * info->total_nodes) >> 32); 58 + } 59 + 60 + static inline bool 61 + xt_cluster_is_multicast_addr(const struct sk_buff *skb, u_int8_t family) 62 + { 63 + bool is_multicast = false; 64 + 65 + switch(family) { 66 + case NFPROTO_IPV4: 67 + is_multicast = ipv4_is_multicast(ip_hdr(skb)->daddr); 68 + break; 69 + case NFPROTO_IPV6: 70 + is_multicast = ipv6_addr_type(&ipv6_hdr(skb)->daddr) & 71 + IPV6_ADDR_MULTICAST; 72 + break; 73 + default: 74 + WARN_ON(1); 75 + break; 76 + } 77 + return is_multicast; 78 + } 79 + 80 + static bool 81 + xt_cluster_mt(const struct sk_buff *skb, const struct xt_match_param *par) 82 + { 83 + struct sk_buff *pskb = (struct sk_buff *)skb; 84 + const struct xt_cluster_match_info *info = par->matchinfo; 85 + const struct nf_conn *ct; 86 + enum ip_conntrack_info ctinfo; 87 + unsigned long hash; 88 + 89 + /* This match assumes that all nodes see the same packets. This can be 90 + * achieved if the switch that connects the cluster nodes support some 91 + * sort of 'port mirroring'. However, if your switch does not support 92 + * this, your cluster nodes can reply ARP request using a multicast MAC 93 + * address. Thus, your switch will flood the same packets to the 94 + * cluster nodes with the same multicast MAC address. Using a multicast 95 + * link address is a RFC 1812 (section 3.3.2) violation, but this works 96 + * fine in practise. 97 + * 98 + * Unfortunately, if you use the multicast MAC address, the link layer 99 + * sets skbuff's pkt_type to PACKET_MULTICAST, which is not accepted 100 + * by TCP and others for packets coming to this node. For that reason, 101 + * this match mangles skbuff's pkt_type if it detects a packet 102 + * addressed to a unicast address but using PACKET_MULTICAST. Yes, I 103 + * know, matches should not alter packets, but we are doing this here 104 + * because we would need to add a PKTTYPE target for this sole purpose. 105 + */ 106 + if (!xt_cluster_is_multicast_addr(skb, par->family) && 107 + skb->pkt_type == PACKET_MULTICAST) { 108 + pskb->pkt_type = PACKET_HOST; 109 + } 110 + 111 + ct = nf_ct_get(skb, &ctinfo); 112 + if (ct == NULL) 113 + return false; 114 + 115 + if (ct == &nf_conntrack_untracked) 116 + return false; 117 + 118 + if (ct->master) 119 + hash = xt_cluster_hash(ct->master, info); 120 + else 121 + hash = xt_cluster_hash(ct, info); 122 + 123 + return !!((1 << hash) & info->node_mask) ^ 124 + !!(info->flags & XT_CLUSTER_F_INV); 125 + } 126 + 127 + static bool xt_cluster_mt_checkentry(const struct xt_mtchk_param *par) 128 + { 129 + struct xt_cluster_match_info *info = par->matchinfo; 130 + 131 + if (info->node_mask >= (1 << info->total_nodes)) { 132 + printk(KERN_ERR "xt_cluster: this node mask cannot be " 133 + "higher than the total number of nodes\n"); 134 + return false; 135 + } 136 + return true; 137 + } 138 + 139 + static struct xt_match xt_cluster_match __read_mostly = { 140 + .name = "cluster", 141 + .family = NFPROTO_UNSPEC, 142 + .match = xt_cluster_mt, 143 + .checkentry = xt_cluster_mt_checkentry, 144 + .matchsize = sizeof(struct xt_cluster_match_info), 145 + .me = THIS_MODULE, 146 + }; 147 + 148 + static int __init xt_cluster_mt_init(void) 149 + { 150 + return xt_register_match(&xt_cluster_match); 151 + } 152 + 153 + static void __exit xt_cluster_mt_fini(void) 154 + { 155 + xt_unregister_match(&xt_cluster_match); 156 + } 157 + 158 + MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>"); 159 + MODULE_LICENSE("GPL"); 160 + MODULE_DESCRIPTION("Xtables: hash-based cluster match"); 161 + MODULE_ALIAS("ipt_cluster"); 162 + MODULE_ALIAS("ip6t_cluster"); 163 + module_init(xt_cluster_mt_init); 164 + module_exit(xt_cluster_mt_fini);
+2 -5
net/netfilter/xt_hashlimit.c
··· 149 149 /* initialize hash with random val at the time we allocate 150 150 * the first hashtable entry */ 151 151 if (!ht->rnd_initialized) { 152 - get_random_bytes(&ht->rnd, 4); 152 + get_random_bytes(&ht->rnd, sizeof(ht->rnd)); 153 153 ht->rnd_initialized = 1; 154 154 } 155 155 ··· 565 565 static bool 566 566 hashlimit_mt_v0(const struct sk_buff *skb, const struct xt_match_param *par) 567 567 { 568 - const struct xt_hashlimit_info *r = 569 - ((const struct xt_hashlimit_info *)par->matchinfo)->u.master; 568 + const struct xt_hashlimit_info *r = par->matchinfo; 570 569 struct xt_hashlimit_htable *hinfo = r->hinfo; 571 570 unsigned long now = jiffies; 572 571 struct dsthash_ent *dh; ··· 701 702 } 702 703 mutex_unlock(&hlimit_mutex); 703 704 704 - /* Ugly hack: For SMP, we only want to use one set */ 705 - r->u.master = r; 706 705 return true; 707 706 } 708 707
+108
net/netfilter/xt_hl.c
··· 1 + /* 2 + * IP tables module for matching the value of the TTL 3 + * (C) 2000,2001 by Harald Welte <laforge@netfilter.org> 4 + * 5 + * Hop Limit matching module 6 + * (C) 2001-2002 Maciej Soltysiak <solt@dns.toxicfilms.tv> 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License version 2 as 10 + * published by the Free Software Foundation. 11 + */ 12 + 13 + #include <linux/ip.h> 14 + #include <linux/ipv6.h> 15 + #include <linux/module.h> 16 + #include <linux/skbuff.h> 17 + 18 + #include <linux/netfilter/x_tables.h> 19 + #include <linux/netfilter_ipv4/ipt_ttl.h> 20 + #include <linux/netfilter_ipv6/ip6t_hl.h> 21 + 22 + MODULE_AUTHOR("Maciej Soltysiak <solt@dns.toxicfilms.tv>"); 23 + MODULE_DESCRIPTION("Xtables: Hoplimit/TTL field match"); 24 + MODULE_LICENSE("GPL"); 25 + MODULE_ALIAS("ipt_ttl"); 26 + MODULE_ALIAS("ip6t_hl"); 27 + 28 + static bool ttl_mt(const struct sk_buff *skb, const struct xt_match_param *par) 29 + { 30 + const struct ipt_ttl_info *info = par->matchinfo; 31 + const u8 ttl = ip_hdr(skb)->ttl; 32 + 33 + switch (info->mode) { 34 + case IPT_TTL_EQ: 35 + return ttl == info->ttl; 36 + case IPT_TTL_NE: 37 + return ttl != info->ttl; 38 + case IPT_TTL_LT: 39 + return ttl < info->ttl; 40 + case IPT_TTL_GT: 41 + return ttl > info->ttl; 42 + default: 43 + printk(KERN_WARNING "ipt_ttl: unknown mode %d\n", 44 + info->mode); 45 + return false; 46 + } 47 + 48 + return false; 49 + } 50 + 51 + static bool hl_mt6(const struct sk_buff *skb, const struct xt_match_param *par) 52 + { 53 + const struct ip6t_hl_info *info = par->matchinfo; 54 + const struct ipv6hdr *ip6h = ipv6_hdr(skb); 55 + 56 + switch (info->mode) { 57 + case IP6T_HL_EQ: 58 + return ip6h->hop_limit == info->hop_limit; 59 + break; 60 + case IP6T_HL_NE: 61 + return ip6h->hop_limit != info->hop_limit; 62 + break; 63 + case IP6T_HL_LT: 64 + return ip6h->hop_limit < info->hop_limit; 65 + break; 66 + case IP6T_HL_GT: 67 + return ip6h->hop_limit > info->hop_limit; 68 + break; 69 + default: 70 + printk(KERN_WARNING "ip6t_hl: unknown mode %d\n", 71 + info->mode); 72 + return false; 73 + } 74 + 75 + return false; 76 + } 77 + 78 + static struct xt_match hl_mt_reg[] __read_mostly = { 79 + { 80 + .name = "ttl", 81 + .revision = 0, 82 + .family = NFPROTO_IPV4, 83 + .match = ttl_mt, 84 + .matchsize = sizeof(struct ipt_ttl_info), 85 + .me = THIS_MODULE, 86 + }, 87 + { 88 + .name = "hl", 89 + .revision = 0, 90 + .family = NFPROTO_IPV6, 91 + .match = hl_mt6, 92 + .matchsize = sizeof(struct ip6t_hl_info), 93 + .me = THIS_MODULE, 94 + }, 95 + }; 96 + 97 + static int __init hl_mt_init(void) 98 + { 99 + return xt_register_matches(hl_mt_reg, ARRAY_SIZE(hl_mt_reg)); 100 + } 101 + 102 + static void __exit hl_mt_exit(void) 103 + { 104 + xt_unregister_matches(hl_mt_reg, ARRAY_SIZE(hl_mt_reg)); 105 + } 106 + 107 + module_init(hl_mt_init); 108 + module_exit(hl_mt_exit);
+29 -11
net/netfilter/xt_limit.c
··· 14 14 #include <linux/netfilter/x_tables.h> 15 15 #include <linux/netfilter/xt_limit.h> 16 16 17 + struct xt_limit_priv { 18 + unsigned long prev; 19 + uint32_t credit; 20 + }; 21 + 17 22 MODULE_LICENSE("GPL"); 18 23 MODULE_AUTHOR("Herve Eychenne <rv@wallfire.org>"); 19 24 MODULE_DESCRIPTION("Xtables: rate-limit match"); ··· 65 60 static bool 66 61 limit_mt(const struct sk_buff *skb, const struct xt_match_param *par) 67 62 { 68 - struct xt_rateinfo *r = 69 - ((const struct xt_rateinfo *)par->matchinfo)->master; 63 + const struct xt_rateinfo *r = par->matchinfo; 64 + struct xt_limit_priv *priv = r->master; 70 65 unsigned long now = jiffies; 71 66 72 67 spin_lock_bh(&limit_lock); 73 - r->credit += (now - xchg(&r->prev, now)) * CREDITS_PER_JIFFY; 74 - if (r->credit > r->credit_cap) 75 - r->credit = r->credit_cap; 68 + priv->credit += (now - xchg(&priv->prev, now)) * CREDITS_PER_JIFFY; 69 + if (priv->credit > r->credit_cap) 70 + priv->credit = r->credit_cap; 76 71 77 - if (r->credit >= r->cost) { 72 + if (priv->credit >= r->cost) { 78 73 /* We're not limited. */ 79 - r->credit -= r->cost; 74 + priv->credit -= r->cost; 80 75 spin_unlock_bh(&limit_lock); 81 76 return true; 82 77 } ··· 100 95 static bool limit_mt_check(const struct xt_mtchk_param *par) 101 96 { 102 97 struct xt_rateinfo *r = par->matchinfo; 98 + struct xt_limit_priv *priv; 103 99 104 100 /* Check for overflow. */ 105 101 if (r->burst == 0 ··· 110 104 return false; 111 105 } 112 106 113 - /* For SMP, we only want to use one set of counters. */ 114 - r->master = r; 107 + priv = kmalloc(sizeof(*priv), GFP_KERNEL); 108 + if (priv == NULL) 109 + return -ENOMEM; 110 + 111 + /* For SMP, we only want to use one set of state. */ 112 + r->master = priv; 115 113 if (r->cost == 0) { 116 114 /* User avg in seconds * XT_LIMIT_SCALE: convert to jiffies * 117 115 128. */ 118 - r->prev = jiffies; 119 - r->credit = user2credits(r->avg * r->burst); /* Credits full. */ 116 + priv->prev = jiffies; 117 + priv->credit = user2credits(r->avg * r->burst); /* Credits full. */ 120 118 r->credit_cap = user2credits(r->avg * r->burst); /* Credits full. */ 121 119 r->cost = user2credits(r->avg); 122 120 } 123 121 return true; 122 + } 123 + 124 + static void limit_mt_destroy(const struct xt_mtdtor_param *par) 125 + { 126 + const struct xt_rateinfo *info = par->matchinfo; 127 + 128 + kfree(info->master); 124 129 } 125 130 126 131 #ifdef CONFIG_COMPAT ··· 184 167 .family = NFPROTO_UNSPEC, 185 168 .match = limit_mt, 186 169 .checkentry = limit_mt_check, 170 + .destroy = limit_mt_destroy, 187 171 .matchsize = sizeof(struct xt_rateinfo), 188 172 #ifdef CONFIG_COMPAT 189 173 .compatsize = sizeof(struct compat_xt_rateinfo),
+23 -14
net/netfilter/xt_physdev.c
··· 20 20 MODULE_ALIAS("ipt_physdev"); 21 21 MODULE_ALIAS("ip6t_physdev"); 22 22 23 + static unsigned long ifname_compare(const char *_a, const char *_b, const char *_mask) 24 + { 25 + const unsigned long *a = (const unsigned long *)_a; 26 + const unsigned long *b = (const unsigned long *)_b; 27 + const unsigned long *mask = (const unsigned long *)_mask; 28 + unsigned long ret; 29 + 30 + ret = (a[0] ^ b[0]) & mask[0]; 31 + if (IFNAMSIZ > sizeof(unsigned long)) 32 + ret |= (a[1] ^ b[1]) & mask[1]; 33 + if (IFNAMSIZ > 2 * sizeof(unsigned long)) 34 + ret |= (a[2] ^ b[2]) & mask[2]; 35 + if (IFNAMSIZ > 3 * sizeof(unsigned long)) 36 + ret |= (a[3] ^ b[3]) & mask[3]; 37 + BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long)); 38 + return ret; 39 + } 40 + 23 41 static bool 24 42 physdev_mt(const struct sk_buff *skb, const struct xt_match_param *par) 25 43 { 26 - int i; 27 - static const char nulldevname[IFNAMSIZ]; 44 + static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); 28 45 const struct xt_physdev_info *info = par->matchinfo; 29 - bool ret; 46 + unsigned long ret; 30 47 const char *indev, *outdev; 31 48 const struct nf_bridge_info *nf_bridge; 32 49 ··· 85 68 if (!(info->bitmask & XT_PHYSDEV_OP_IN)) 86 69 goto match_outdev; 87 70 indev = nf_bridge->physindev ? nf_bridge->physindev->name : nulldevname; 88 - for (i = 0, ret = false; i < IFNAMSIZ/sizeof(unsigned int); i++) { 89 - ret |= (((const unsigned int *)indev)[i] 90 - ^ ((const unsigned int *)info->physindev)[i]) 91 - & ((const unsigned int *)info->in_mask)[i]; 92 - } 71 + ret = ifname_compare(indev, info->physindev, info->in_mask); 93 72 94 73 if (!ret ^ !(info->invert & XT_PHYSDEV_OP_IN)) 95 74 return false; ··· 95 82 return true; 96 83 outdev = nf_bridge->physoutdev ? 97 84 nf_bridge->physoutdev->name : nulldevname; 98 - for (i = 0, ret = false; i < IFNAMSIZ/sizeof(unsigned int); i++) { 99 - ret |= (((const unsigned int *)outdev)[i] 100 - ^ ((const unsigned int *)info->physoutdev)[i]) 101 - & ((const unsigned int *)info->out_mask)[i]; 102 - } 85 + ret = ifname_compare(outdev, info->physoutdev, info->out_mask); 103 86 104 - return ret ^ !(info->invert & XT_PHYSDEV_OP_OUT); 87 + return (!!ret ^ !(info->invert & XT_PHYSDEV_OP_OUT)); 105 88 } 106 89 107 90 static bool physdev_mt_check(const struct xt_mtchk_param *par)
+24 -7
net/netfilter/xt_quota.c
··· 9 9 #include <linux/netfilter/x_tables.h> 10 10 #include <linux/netfilter/xt_quota.h> 11 11 12 + struct xt_quota_priv { 13 + uint64_t quota; 14 + }; 15 + 12 16 MODULE_LICENSE("GPL"); 13 17 MODULE_AUTHOR("Sam Johnston <samj@samj.net>"); 14 18 MODULE_DESCRIPTION("Xtables: countdown quota match"); ··· 24 20 static bool 25 21 quota_mt(const struct sk_buff *skb, const struct xt_match_param *par) 26 22 { 27 - struct xt_quota_info *q = 28 - ((const struct xt_quota_info *)par->matchinfo)->master; 23 + struct xt_quota_info *q = (void *)par->matchinfo; 24 + struct xt_quota_priv *priv = q->master; 29 25 bool ret = q->flags & XT_QUOTA_INVERT; 30 26 31 27 spin_lock_bh(&quota_lock); 32 - if (q->quota >= skb->len) { 33 - q->quota -= skb->len; 28 + if (priv->quota >= skb->len) { 29 + priv->quota -= skb->len; 34 30 ret = !ret; 35 31 } else { 36 32 /* we do not allow even small packets from now on */ 37 - q->quota = 0; 33 + priv->quota = 0; 38 34 } 35 + /* Copy quota back to matchinfo so that iptables can display it */ 36 + q->quota = priv->quota; 39 37 spin_unlock_bh(&quota_lock); 40 38 41 39 return ret; ··· 49 43 50 44 if (q->flags & ~XT_QUOTA_MASK) 51 45 return false; 52 - /* For SMP, we only want to use one set of counters. */ 53 - q->master = q; 46 + 47 + q->master = kmalloc(sizeof(*q->master), GFP_KERNEL); 48 + if (q->master == NULL) 49 + return -ENOMEM; 50 + 54 51 return true; 52 + } 53 + 54 + static void quota_mt_destroy(const struct xt_mtdtor_param *par) 55 + { 56 + const struct xt_quota_info *q = par->matchinfo; 57 + 58 + kfree(q->master); 55 59 } 56 60 57 61 static struct xt_match quota_mt_reg __read_mostly = { ··· 70 54 .family = NFPROTO_UNSPEC, 71 55 .match = quota_mt, 72 56 .checkentry = quota_mt_check, 57 + .destroy = quota_mt_destroy, 73 58 .matchsize = sizeof(struct xt_quota_info), 74 59 .me = THIS_MODULE, 75 60 };
+23 -5
net/netfilter/xt_statistic.c
··· 16 16 #include <linux/netfilter/xt_statistic.h> 17 17 #include <linux/netfilter/x_tables.h> 18 18 19 + struct xt_statistic_priv { 20 + uint32_t count; 21 + }; 22 + 19 23 MODULE_LICENSE("GPL"); 20 24 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); 21 25 MODULE_DESCRIPTION("Xtables: statistics-based matching (\"Nth\", random)"); ··· 31 27 static bool 32 28 statistic_mt(const struct sk_buff *skb, const struct xt_match_param *par) 33 29 { 34 - struct xt_statistic_info *info = (void *)par->matchinfo; 30 + const struct xt_statistic_info *info = par->matchinfo; 35 31 bool ret = info->flags & XT_STATISTIC_INVERT; 36 32 37 33 switch (info->mode) { ··· 40 36 ret = !ret; 41 37 break; 42 38 case XT_STATISTIC_MODE_NTH: 43 - info = info->master; 44 39 spin_lock_bh(&nth_lock); 45 - if (info->u.nth.count++ == info->u.nth.every) { 46 - info->u.nth.count = 0; 40 + if (info->master->count++ == info->u.nth.every) { 41 + info->master->count = 0; 47 42 ret = !ret; 48 43 } 49 44 spin_unlock_bh(&nth_lock); ··· 59 56 if (info->mode > XT_STATISTIC_MODE_MAX || 60 57 info->flags & ~XT_STATISTIC_MASK) 61 58 return false; 62 - info->master = info; 59 + 60 + info->master = kzalloc(sizeof(*info->master), GFP_KERNEL); 61 + if (info->master == NULL) { 62 + printk(KERN_ERR KBUILD_MODNAME ": Out of memory\n"); 63 + return false; 64 + } 65 + info->master->count = info->u.nth.count; 66 + 63 67 return true; 68 + } 69 + 70 + static void statistic_mt_destroy(const struct xt_mtdtor_param *par) 71 + { 72 + const struct xt_statistic_info *info = par->matchinfo; 73 + 74 + kfree(info->master); 64 75 } 65 76 66 77 static struct xt_match xt_statistic_mt_reg __read_mostly = { ··· 83 66 .family = NFPROTO_UNSPEC, 84 67 .match = statistic_mt, 85 68 .checkentry = statistic_mt_check, 69 + .destroy = statistic_mt_destroy, 86 70 .matchsize = sizeof(struct xt_statistic_info), 87 71 .me = THIS_MODULE, 88 72 };
+1
net/netlink/af_netlink.c
··· 1117 1117 1118 1118 read_unlock(&nl_table_lock); 1119 1119 } 1120 + EXPORT_SYMBOL(netlink_set_err); 1120 1121 1121 1122 /* must be called with netlink table grabbed */ 1122 1123 static void netlink_update_socket_mc(struct netlink_sock *nlk,
+1 -1
net/sysctl_net.c
··· 61 61 static int net_ctl_ro_header_perms(struct ctl_table_root *root, 62 62 struct nsproxy *namespaces, struct ctl_table *table) 63 63 { 64 - if (namespaces->net_ns == &init_net) 64 + if (net_eq(namespaces->net_ns, &init_net)) 65 65 return table->mode; 66 66 else 67 67 return table->mode & ~0222;