···103103104104extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));105105106106-typedef int (*br_should_route_hook_t)(struct sk_buff *skb);106106+typedef int br_should_route_hook_t(struct sk_buff *skb);107107extern br_should_route_hook_t __rcu *br_should_route_hook;108108109109#endif
+5-5
include/linux/netfilter/x_tables.h
···472472 * necessary for reading the counters.473473 */474474struct xt_info_lock {475475- spinlock_t lock;475475+ seqlock_t lock;476476 unsigned char readers;477477};478478DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks);···497497 local_bh_disable();498498 lock = &__get_cpu_var(xt_info_locks);499499 if (likely(!lock->readers++))500500- spin_lock(&lock->lock);500500+ write_seqlock(&lock->lock);501501}502502503503static inline void xt_info_rdunlock_bh(void)···505505 struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks);506506507507 if (likely(!--lock->readers))508508- spin_unlock(&lock->lock);508508+ write_sequnlock(&lock->lock);509509 local_bh_enable();510510}511511···516516 */517517static inline void xt_info_wrlock(unsigned int cpu)518518{519519- spin_lock(&per_cpu(xt_info_locks, cpu).lock);519519+ write_seqlock(&per_cpu(xt_info_locks, cpu).lock);520520}521521522522static inline void xt_info_wrunlock(unsigned int cpu)523523{524524- spin_unlock(&per_cpu(xt_info_locks, cpu).lock);524524+ write_sequnlock(&per_cpu(xt_info_locks, cpu).lock);525525}526526527527/*
+14-31
net/ipv4/netfilter/arp_tables.c
···710710 struct arpt_entry *iter;711711 unsigned int cpu;712712 unsigned int i;713713- unsigned int curcpu = get_cpu();714714-715715- /* Instead of clearing (by a previous call to memset())716716- * the counters and using adds, we set the counters717717- * with data used by 'current' CPU718718- *719719- * Bottom half has to be disabled to prevent deadlock720720- * if new softirq were to run and call ipt_do_table721721- */722722- local_bh_disable();723723- i = 0;724724- xt_entry_foreach(iter, t->entries[curcpu], t->size) {725725- SET_COUNTER(counters[i], iter->counters.bcnt,726726- iter->counters.pcnt);727727- ++i;728728- }729729- local_bh_enable();730730- /* Processing counters from other cpus, we can let bottom half enabled,731731- * (preemption is disabled)732732- */733713734714 for_each_possible_cpu(cpu) {735735- if (cpu == curcpu)736736- continue;715715+ seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;716716+737717 i = 0;738738- local_bh_disable();739739- xt_info_wrlock(cpu);740718 xt_entry_foreach(iter, t->entries[cpu], t->size) {741741- ADD_COUNTER(counters[i], iter->counters.bcnt,742742- iter->counters.pcnt);719719+ u64 bcnt, pcnt;720720+ unsigned int start;721721+722722+ do {723723+ start = read_seqbegin(lock);724724+ bcnt = iter->counters.bcnt;725725+ pcnt = iter->counters.pcnt;726726+ } while (read_seqretry(lock, start));727727+728728+ ADD_COUNTER(counters[i], bcnt, pcnt);743729 ++i;744730 }745745- xt_info_wrunlock(cpu);746746- local_bh_enable();747731 }748748- put_cpu();749732}750733751734static struct xt_counters *alloc_counters(const struct xt_table *table)···742759 * about).743760 */744761 countersize = sizeof(struct xt_counters) * private->number;745745- counters = vmalloc(countersize);762762+ counters = vzalloc(countersize);746763747764 if (counters == NULL)748765 return ERR_PTR(-ENOMEM);···9901007 struct arpt_entry *iter;99110089921009 ret = 0;993993- counters = vmalloc(num_counters * sizeof(struct xt_counters));10101010+ counters = vzalloc(num_counters * sizeof(struct xt_counters));9941011 if (!counters) {9951012 ret = -ENOMEM;9961013 goto out;
+14-31
net/ipv4/netfilter/ip_tables.c
···884884 struct ipt_entry *iter;885885 unsigned int cpu;886886 unsigned int i;887887- unsigned int curcpu = get_cpu();888888-889889- /* Instead of clearing (by a previous call to memset())890890- * the counters and using adds, we set the counters891891- * with data used by 'current' CPU.892892- *893893- * Bottom half has to be disabled to prevent deadlock894894- * if new softirq were to run and call ipt_do_table895895- */896896- local_bh_disable();897897- i = 0;898898- xt_entry_foreach(iter, t->entries[curcpu], t->size) {899899- SET_COUNTER(counters[i], iter->counters.bcnt,900900- iter->counters.pcnt);901901- ++i;902902- }903903- local_bh_enable();904904- /* Processing counters from other cpus, we can let bottom half enabled,905905- * (preemption is disabled)906906- */907887908888 for_each_possible_cpu(cpu) {909909- if (cpu == curcpu)910910- continue;889889+ seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;890890+911891 i = 0;912912- local_bh_disable();913913- xt_info_wrlock(cpu);914892 xt_entry_foreach(iter, t->entries[cpu], t->size) {915915- ADD_COUNTER(counters[i], iter->counters.bcnt,916916- iter->counters.pcnt);893893+ u64 bcnt, pcnt;894894+ unsigned int start;895895+896896+ do {897897+ start = read_seqbegin(lock);898898+ bcnt = iter->counters.bcnt;899899+ pcnt = iter->counters.pcnt;900900+ } while (read_seqretry(lock, start));901901+902902+ ADD_COUNTER(counters[i], bcnt, pcnt);917903 ++i; /* macro does multi eval of i */918904 }919919- xt_info_wrunlock(cpu);920920- local_bh_enable();921905 }922922- put_cpu();923906}924907925908static struct xt_counters *alloc_counters(const struct xt_table *table)···915932 (other than comefrom, which userspace doesn't care916933 about). */917934 countersize = sizeof(struct xt_counters) * private->number;918918- counters = vmalloc(countersize);935935+ counters = vzalloc(countersize);919936920937 if (counters == NULL)921938 return ERR_PTR(-ENOMEM);···11861203 struct ipt_entry *iter;1187120411881205 ret = 0;11891189- counters = vmalloc(num_counters * sizeof(struct xt_counters));12061206+ counters = vzalloc(num_counters * sizeof(struct xt_counters));11901207 if (!counters) {11911208 ret = -ENOMEM;11921209 goto out;
+14-31
net/ipv6/netfilter/ip6_tables.c
···897897 struct ip6t_entry *iter;898898 unsigned int cpu;899899 unsigned int i;900900- unsigned int curcpu = get_cpu();901901-902902- /* Instead of clearing (by a previous call to memset())903903- * the counters and using adds, we set the counters904904- * with data used by 'current' CPU905905- *906906- * Bottom half has to be disabled to prevent deadlock907907- * if new softirq were to run and call ipt_do_table908908- */909909- local_bh_disable();910910- i = 0;911911- xt_entry_foreach(iter, t->entries[curcpu], t->size) {912912- SET_COUNTER(counters[i], iter->counters.bcnt,913913- iter->counters.pcnt);914914- ++i;915915- }916916- local_bh_enable();917917- /* Processing counters from other cpus, we can let bottom half enabled,918918- * (preemption is disabled)919919- */920900921901 for_each_possible_cpu(cpu) {922922- if (cpu == curcpu)923923- continue;902902+ seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;903903+924904 i = 0;925925- local_bh_disable();926926- xt_info_wrlock(cpu);927905 xt_entry_foreach(iter, t->entries[cpu], t->size) {928928- ADD_COUNTER(counters[i], iter->counters.bcnt,929929- iter->counters.pcnt);906906+ u64 bcnt, pcnt;907907+ unsigned int start;908908+909909+ do {910910+ start = read_seqbegin(lock);911911+ bcnt = iter->counters.bcnt;912912+ pcnt = iter->counters.pcnt;913913+ } while (read_seqretry(lock, start));914914+915915+ ADD_COUNTER(counters[i], bcnt, pcnt);930916 ++i;931917 }932932- xt_info_wrunlock(cpu);933933- local_bh_enable();934918 }935935- put_cpu();936919}937920938921static struct xt_counters *alloc_counters(const struct xt_table *table)···928945 (other than comefrom, which userspace doesn't care929946 about). */930947 countersize = sizeof(struct xt_counters) * private->number;931931- counters = vmalloc(countersize);948948+ counters = vzalloc(countersize);932949933950 if (counters == NULL)934951 return ERR_PTR(-ENOMEM);···11991216 struct ip6t_entry *iter;1200121712011218 ret = 0;12021202- counters = vmalloc(num_counters * sizeof(struct xt_counters));12191219+ counters = vzalloc(num_counters * sizeof(struct xt_counters));12031220 if (!counters) {12041221 ret = -ENOMEM;12051222 goto out;
+5-9
net/netfilter/nf_conntrack_netlink.c
···645645 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);646646 u_int8_t l3proto = nfmsg->nfgen_family;647647648648- rcu_read_lock();648648+ spin_lock_bh(&nf_conntrack_lock);649649 last = (struct nf_conn *)cb->args[1];650650 for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {651651restart:652652- hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[cb->args[0]],652652+ hlist_nulls_for_each_entry(h, n, &net->ct.hash[cb->args[0]],653653 hnnode) {654654 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)655655 continue;656656 ct = nf_ct_tuplehash_to_ctrack(h);657657- if (!atomic_inc_not_zero(&ct->ct_general.use))658658- continue;659657 /* Dump entries of a given L3 protocol number.660658 * If it is not specified, ie. l3proto == 0,661659 * then dump everything. */662660 if (l3proto && nf_ct_l3num(ct) != l3proto)663663- goto releasect;661661+ continue;664662 if (cb->args[1]) {665663 if (ct != last)666666- goto releasect;664664+ continue;667665 cb->args[1] = 0;668666 }669667 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,···679681 if (acct)680682 memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX]));681683 }682682-releasect:683683- nf_ct_put(ct);684684 }685685 if (cb->args[1]) {686686 cb->args[1] = 0;···686690 }687691 }688692out:689689- rcu_read_unlock();693693+ spin_unlock_bh(&nf_conntrack_lock);690694 if (last)691695 nf_ct_put(last);692696