···103104extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));105106-typedef int (*br_should_route_hook_t)(struct sk_buff *skb);107extern br_should_route_hook_t __rcu *br_should_route_hook;108109#endif
···103104extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));105106+typedef int br_should_route_hook_t(struct sk_buff *skb);107extern br_should_route_hook_t __rcu *br_should_route_hook;108109#endif
+5-5
include/linux/netfilter/x_tables.h
···472 * necessary for reading the counters.473 */474struct xt_info_lock {475- spinlock_t lock;476 unsigned char readers;477};478DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks);···497 local_bh_disable();498 lock = &__get_cpu_var(xt_info_locks);499 if (likely(!lock->readers++))500- spin_lock(&lock->lock);501}502503static inline void xt_info_rdunlock_bh(void)···505 struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks);506507 if (likely(!--lock->readers))508- spin_unlock(&lock->lock);509 local_bh_enable();510}511···516 */517static inline void xt_info_wrlock(unsigned int cpu)518{519- spin_lock(&per_cpu(xt_info_locks, cpu).lock);520}521522static inline void xt_info_wrunlock(unsigned int cpu)523{524- spin_unlock(&per_cpu(xt_info_locks, cpu).lock);525}526527/*
···472 * necessary for reading the counters.473 */474struct xt_info_lock {475+ seqlock_t lock;476 unsigned char readers;477};478DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks);···497 local_bh_disable();498 lock = &__get_cpu_var(xt_info_locks);499 if (likely(!lock->readers++))500+ write_seqlock(&lock->lock);501}502503static inline void xt_info_rdunlock_bh(void)···505 struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks);506507 if (likely(!--lock->readers))508+ write_sequnlock(&lock->lock);509 local_bh_enable();510}511···516 */517static inline void xt_info_wrlock(unsigned int cpu)518{519+ write_seqlock(&per_cpu(xt_info_locks, cpu).lock);520}521522static inline void xt_info_wrunlock(unsigned int cpu)523{524+ write_sequnlock(&per_cpu(xt_info_locks, cpu).lock);525}526527/*
+14-31
net/ipv4/netfilter/arp_tables.c
···710 struct arpt_entry *iter;711 unsigned int cpu;712 unsigned int i;713- unsigned int curcpu = get_cpu();714-715- /* Instead of clearing (by a previous call to memset())716- * the counters and using adds, we set the counters717- * with data used by 'current' CPU718- *719- * Bottom half has to be disabled to prevent deadlock720- * if new softirq were to run and call ipt_do_table721- */722- local_bh_disable();723- i = 0;724- xt_entry_foreach(iter, t->entries[curcpu], t->size) {725- SET_COUNTER(counters[i], iter->counters.bcnt,726- iter->counters.pcnt);727- ++i;728- }729- local_bh_enable();730- /* Processing counters from other cpus, we can let bottom half enabled,731- * (preemption is disabled)732- */733734 for_each_possible_cpu(cpu) {735- if (cpu == curcpu)736- continue;737 i = 0;738- local_bh_disable();739- xt_info_wrlock(cpu);740 xt_entry_foreach(iter, t->entries[cpu], t->size) {741- ADD_COUNTER(counters[i], iter->counters.bcnt,742- iter->counters.pcnt);00000000743 ++i;744 }745- xt_info_wrunlock(cpu);746- local_bh_enable();747 }748- put_cpu();749}750751static struct xt_counters *alloc_counters(const struct xt_table *table)···742 * about).743 */744 countersize = sizeof(struct xt_counters) * private->number;745- counters = vmalloc(countersize);746747 if (counters == NULL)748 return ERR_PTR(-ENOMEM);···990 struct arpt_entry *iter;991992 ret = 0;993- counters = vmalloc(num_counters * sizeof(struct xt_counters));994 if (!counters) {995 ret = -ENOMEM;996 goto out;
···710 struct arpt_entry *iter;711 unsigned int cpu;712 unsigned int i;00000000000000000000713714 for_each_possible_cpu(cpu) {715+ seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;716+717 i = 0;00718 xt_entry_foreach(iter, t->entries[cpu], t->size) {719+ u64 bcnt, pcnt;720+ unsigned int start;721+722+ do {723+ start = read_seqbegin(lock);724+ bcnt = iter->counters.bcnt;725+ pcnt = iter->counters.pcnt;726+ } while (read_seqretry(lock, start));727+728+ ADD_COUNTER(counters[i], bcnt, pcnt);729 ++i;730 }00731 }0732}733734static struct xt_counters *alloc_counters(const struct xt_table *table)···759 * about).760 */761 countersize = sizeof(struct xt_counters) * private->number;762+ counters = vzalloc(countersize);763764 if (counters == NULL)765 return ERR_PTR(-ENOMEM);···1007 struct arpt_entry *iter;10081009 ret = 0;1010+ counters = vzalloc(num_counters * sizeof(struct xt_counters));1011 if (!counters) {1012 ret = -ENOMEM;1013 goto out;
+14-31
net/ipv4/netfilter/ip_tables.c
···884 struct ipt_entry *iter;885 unsigned int cpu;886 unsigned int i;887- unsigned int curcpu = get_cpu();888-889- /* Instead of clearing (by a previous call to memset())890- * the counters and using adds, we set the counters891- * with data used by 'current' CPU.892- *893- * Bottom half has to be disabled to prevent deadlock894- * if new softirq were to run and call ipt_do_table895- */896- local_bh_disable();897- i = 0;898- xt_entry_foreach(iter, t->entries[curcpu], t->size) {899- SET_COUNTER(counters[i], iter->counters.bcnt,900- iter->counters.pcnt);901- ++i;902- }903- local_bh_enable();904- /* Processing counters from other cpus, we can let bottom half enabled,905- * (preemption is disabled)906- */907908 for_each_possible_cpu(cpu) {909- if (cpu == curcpu)910- continue;911 i = 0;912- local_bh_disable();913- xt_info_wrlock(cpu);914 xt_entry_foreach(iter, t->entries[cpu], t->size) {915- ADD_COUNTER(counters[i], iter->counters.bcnt,916- iter->counters.pcnt);00000000917 ++i; /* macro does multi eval of i */918 }919- xt_info_wrunlock(cpu);920- local_bh_enable();921 }922- put_cpu();923}924925static struct xt_counters *alloc_counters(const struct xt_table *table)···915 (other than comefrom, which userspace doesn't care916 about). */917 countersize = sizeof(struct xt_counters) * private->number;918- counters = vmalloc(countersize);919920 if (counters == NULL)921 return ERR_PTR(-ENOMEM);···1186 struct ipt_entry *iter;11871188 ret = 0;1189- counters = vmalloc(num_counters * sizeof(struct xt_counters));1190 if (!counters) {1191 ret = -ENOMEM;1192 goto out;
···884 struct ipt_entry *iter;885 unsigned int cpu;886 unsigned int i;00000000000000000000887888 for_each_possible_cpu(cpu) {889+ seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;890+891 i = 0;00892 xt_entry_foreach(iter, t->entries[cpu], t->size) {893+ u64 bcnt, pcnt;894+ unsigned int start;895+896+ do {897+ start = read_seqbegin(lock);898+ bcnt = iter->counters.bcnt;899+ pcnt = iter->counters.pcnt;900+ } while (read_seqretry(lock, start));901+902+ ADD_COUNTER(counters[i], bcnt, pcnt);903 ++i; /* macro does multi eval of i */904 }00905 }0906}907908static struct xt_counters *alloc_counters(const struct xt_table *table)···932 (other than comefrom, which userspace doesn't care933 about). */934 countersize = sizeof(struct xt_counters) * private->number;935+ counters = vzalloc(countersize);936937 if (counters == NULL)938 return ERR_PTR(-ENOMEM);···1203 struct ipt_entry *iter;12041205 ret = 0;1206+ counters = vzalloc(num_counters * sizeof(struct xt_counters));1207 if (!counters) {1208 ret = -ENOMEM;1209 goto out;
+14-31
net/ipv6/netfilter/ip6_tables.c
···897 struct ip6t_entry *iter;898 unsigned int cpu;899 unsigned int i;900- unsigned int curcpu = get_cpu();901-902- /* Instead of clearing (by a previous call to memset())903- * the counters and using adds, we set the counters904- * with data used by 'current' CPU905- *906- * Bottom half has to be disabled to prevent deadlock907- * if new softirq were to run and call ipt_do_table908- */909- local_bh_disable();910- i = 0;911- xt_entry_foreach(iter, t->entries[curcpu], t->size) {912- SET_COUNTER(counters[i], iter->counters.bcnt,913- iter->counters.pcnt);914- ++i;915- }916- local_bh_enable();917- /* Processing counters from other cpus, we can let bottom half enabled,918- * (preemption is disabled)919- */920921 for_each_possible_cpu(cpu) {922- if (cpu == curcpu)923- continue;924 i = 0;925- local_bh_disable();926- xt_info_wrlock(cpu);927 xt_entry_foreach(iter, t->entries[cpu], t->size) {928- ADD_COUNTER(counters[i], iter->counters.bcnt,929- iter->counters.pcnt);00000000930 ++i;931 }932- xt_info_wrunlock(cpu);933- local_bh_enable();934 }935- put_cpu();936}937938static struct xt_counters *alloc_counters(const struct xt_table *table)···928 (other than comefrom, which userspace doesn't care929 about). */930 countersize = sizeof(struct xt_counters) * private->number;931- counters = vmalloc(countersize);932933 if (counters == NULL)934 return ERR_PTR(-ENOMEM);···1199 struct ip6t_entry *iter;12001201 ret = 0;1202- counters = vmalloc(num_counters * sizeof(struct xt_counters));1203 if (!counters) {1204 ret = -ENOMEM;1205 goto out;
···897 struct ip6t_entry *iter;898 unsigned int cpu;899 unsigned int i;00000000000000000000900901 for_each_possible_cpu(cpu) {902+ seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;903+904 i = 0;00905 xt_entry_foreach(iter, t->entries[cpu], t->size) {906+ u64 bcnt, pcnt;907+ unsigned int start;908+909+ do {910+ start = read_seqbegin(lock);911+ bcnt = iter->counters.bcnt;912+ pcnt = iter->counters.pcnt;913+ } while (read_seqretry(lock, start));914+915+ ADD_COUNTER(counters[i], bcnt, pcnt);916 ++i;917 }00918 }0919}920921static struct xt_counters *alloc_counters(const struct xt_table *table)···945 (other than comefrom, which userspace doesn't care946 about). */947 countersize = sizeof(struct xt_counters) * private->number;948+ counters = vzalloc(countersize);949950 if (counters == NULL)951 return ERR_PTR(-ENOMEM);···1216 struct ip6t_entry *iter;12171218 ret = 0;1219+ counters = vzalloc(num_counters * sizeof(struct xt_counters));1220 if (!counters) {1221 ret = -ENOMEM;1222 goto out;
+5-9
net/netfilter/nf_conntrack_netlink.c
···645 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);646 u_int8_t l3proto = nfmsg->nfgen_family;647648- rcu_read_lock();649 last = (struct nf_conn *)cb->args[1];650 for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {651restart:652- hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[cb->args[0]],653 hnnode) {654 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)655 continue;656 ct = nf_ct_tuplehash_to_ctrack(h);657- if (!atomic_inc_not_zero(&ct->ct_general.use))658- continue;659 /* Dump entries of a given L3 protocol number.660 * If it is not specified, ie. l3proto == 0,661 * then dump everything. */662 if (l3proto && nf_ct_l3num(ct) != l3proto)663- goto releasect;664 if (cb->args[1]) {665 if (ct != last)666- goto releasect;667 cb->args[1] = 0;668 }669 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,···679 if (acct)680 memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX]));681 }682-releasect:683- nf_ct_put(ct);684 }685 if (cb->args[1]) {686 cb->args[1] = 0;···686 }687 }688out:689- rcu_read_unlock();690 if (last)691 nf_ct_put(last);692
···645 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);646 u_int8_t l3proto = nfmsg->nfgen_family;647648+ spin_lock_bh(&nf_conntrack_lock);649 last = (struct nf_conn *)cb->args[1];650 for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {651restart:652+ hlist_nulls_for_each_entry(h, n, &net->ct.hash[cb->args[0]],653 hnnode) {654 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)655 continue;656 ct = nf_ct_tuplehash_to_ctrack(h);00657 /* Dump entries of a given L3 protocol number.658 * If it is not specified, ie. l3proto == 0,659 * then dump everything. */660 if (l3proto && nf_ct_l3num(ct) != l3proto)661+ continue;662 if (cb->args[1]) {663 if (ct != last)664+ continue;665 cb->args[1] = 0;666 }667 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,···681 if (acct)682 memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX]));683 }00684 }685 if (cb->args[1]) {686 cb->args[1] = 0;···690 }691 }692out:693+ spin_unlock_bh(&nf_conntrack_lock);694 if (last)695 nf_ct_put(last);696