Merge branch 'master' of git://1984.lsi.us.es/net-2.6

+55 -109
+1 -1
include/linux/if_bridge.h
··· 103 103 104 104 extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); 105 105 106 - typedef int (*br_should_route_hook_t)(struct sk_buff *skb); 106 + typedef int br_should_route_hook_t(struct sk_buff *skb); 107 107 extern br_should_route_hook_t __rcu *br_should_route_hook; 108 108 109 109 #endif
+5 -5
include/linux/netfilter/x_tables.h
··· 472 472 * necessary for reading the counters. 473 473 */ 474 474 struct xt_info_lock { 475 - spinlock_t lock; 475 + seqlock_t lock; 476 476 unsigned char readers; 477 477 }; 478 478 DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks); ··· 497 497 local_bh_disable(); 498 498 lock = &__get_cpu_var(xt_info_locks); 499 499 if (likely(!lock->readers++)) 500 - spin_lock(&lock->lock); 500 + write_seqlock(&lock->lock); 501 501 } 502 502 503 503 static inline void xt_info_rdunlock_bh(void) ··· 505 505 struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks); 506 506 507 507 if (likely(!--lock->readers)) 508 - spin_unlock(&lock->lock); 508 + write_sequnlock(&lock->lock); 509 509 local_bh_enable(); 510 510 } 511 511 ··· 516 516 */ 517 517 static inline void xt_info_wrlock(unsigned int cpu) 518 518 { 519 - spin_lock(&per_cpu(xt_info_locks, cpu).lock); 519 + write_seqlock(&per_cpu(xt_info_locks, cpu).lock); 520 520 } 521 521 522 522 static inline void xt_info_wrunlock(unsigned int cpu) 523 523 { 524 - spin_unlock(&per_cpu(xt_info_locks, cpu).lock); 524 + write_sequnlock(&per_cpu(xt_info_locks, cpu).lock); 525 525 } 526 526 527 527 /*
+14 -31
net/ipv4/netfilter/arp_tables.c
··· 710 710 struct arpt_entry *iter; 711 711 unsigned int cpu; 712 712 unsigned int i; 713 - unsigned int curcpu = get_cpu(); 714 - 715 - /* Instead of clearing (by a previous call to memset()) 716 - * the counters and using adds, we set the counters 717 - * with data used by 'current' CPU 718 - * 719 - * Bottom half has to be disabled to prevent deadlock 720 - * if new softirq were to run and call ipt_do_table 721 - */ 722 - local_bh_disable(); 723 - i = 0; 724 - xt_entry_foreach(iter, t->entries[curcpu], t->size) { 725 - SET_COUNTER(counters[i], iter->counters.bcnt, 726 - iter->counters.pcnt); 727 - ++i; 728 - } 729 - local_bh_enable(); 730 - /* Processing counters from other cpus, we can let bottom half enabled, 731 - * (preemption is disabled) 732 - */ 733 713 734 714 for_each_possible_cpu(cpu) { 735 - if (cpu == curcpu) 736 - continue; 715 + seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; 716 + 737 717 i = 0; 738 - local_bh_disable(); 739 - xt_info_wrlock(cpu); 740 718 xt_entry_foreach(iter, t->entries[cpu], t->size) { 741 - ADD_COUNTER(counters[i], iter->counters.bcnt, 742 - iter->counters.pcnt); 719 + u64 bcnt, pcnt; 720 + unsigned int start; 721 + 722 + do { 723 + start = read_seqbegin(lock); 724 + bcnt = iter->counters.bcnt; 725 + pcnt = iter->counters.pcnt; 726 + } while (read_seqretry(lock, start)); 727 + 728 + ADD_COUNTER(counters[i], bcnt, pcnt); 743 729 ++i; 744 730 } 745 - xt_info_wrunlock(cpu); 746 - local_bh_enable(); 747 731 } 748 - put_cpu(); 749 732 } 750 733 751 734 static struct xt_counters *alloc_counters(const struct xt_table *table) ··· 742 759 * about). 743 760 */ 744 761 countersize = sizeof(struct xt_counters) * private->number; 745 - counters = vmalloc(countersize); 762 + counters = vzalloc(countersize); 746 763 747 764 if (counters == NULL) 748 765 return ERR_PTR(-ENOMEM); ··· 990 1007 struct arpt_entry *iter; 991 1008 992 1009 ret = 0; 993 - counters = vmalloc(num_counters * sizeof(struct xt_counters)); 1010 + counters = vzalloc(num_counters * sizeof(struct xt_counters)); 994 1011 if (!counters) { 995 1012 ret = -ENOMEM; 996 1013 goto out;
+14 -31
net/ipv4/netfilter/ip_tables.c
··· 884 884 struct ipt_entry *iter; 885 885 unsigned int cpu; 886 886 unsigned int i; 887 - unsigned int curcpu = get_cpu(); 888 - 889 - /* Instead of clearing (by a previous call to memset()) 890 - * the counters and using adds, we set the counters 891 - * with data used by 'current' CPU. 892 - * 893 - * Bottom half has to be disabled to prevent deadlock 894 - * if new softirq were to run and call ipt_do_table 895 - */ 896 - local_bh_disable(); 897 - i = 0; 898 - xt_entry_foreach(iter, t->entries[curcpu], t->size) { 899 - SET_COUNTER(counters[i], iter->counters.bcnt, 900 - iter->counters.pcnt); 901 - ++i; 902 - } 903 - local_bh_enable(); 904 - /* Processing counters from other cpus, we can let bottom half enabled, 905 - * (preemption is disabled) 906 - */ 907 887 908 888 for_each_possible_cpu(cpu) { 909 - if (cpu == curcpu) 910 - continue; 889 + seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; 890 + 911 891 i = 0; 912 - local_bh_disable(); 913 - xt_info_wrlock(cpu); 914 892 xt_entry_foreach(iter, t->entries[cpu], t->size) { 915 - ADD_COUNTER(counters[i], iter->counters.bcnt, 916 - iter->counters.pcnt); 893 + u64 bcnt, pcnt; 894 + unsigned int start; 895 + 896 + do { 897 + start = read_seqbegin(lock); 898 + bcnt = iter->counters.bcnt; 899 + pcnt = iter->counters.pcnt; 900 + } while (read_seqretry(lock, start)); 901 + 902 + ADD_COUNTER(counters[i], bcnt, pcnt); 917 903 ++i; /* macro does multi eval of i */ 918 904 } 919 - xt_info_wrunlock(cpu); 920 - local_bh_enable(); 921 905 } 922 - put_cpu(); 923 906 } 924 907 925 908 static struct xt_counters *alloc_counters(const struct xt_table *table) ··· 915 932 (other than comefrom, which userspace doesn't care 916 933 about). */ 917 934 countersize = sizeof(struct xt_counters) * private->number; 918 - counters = vmalloc(countersize); 935 + counters = vzalloc(countersize); 919 936 920 937 if (counters == NULL) 921 938 return ERR_PTR(-ENOMEM); ··· 1186 1203 struct ipt_entry *iter; 1187 1204 1188 1205 ret = 0; 1189 - counters = vmalloc(num_counters * sizeof(struct xt_counters)); 1206 + counters = vzalloc(num_counters * sizeof(struct xt_counters)); 1190 1207 if (!counters) { 1191 1208 ret = -ENOMEM; 1192 1209 goto out;
+14 -31
net/ipv6/netfilter/ip6_tables.c
··· 897 897 struct ip6t_entry *iter; 898 898 unsigned int cpu; 899 899 unsigned int i; 900 - unsigned int curcpu = get_cpu(); 901 - 902 - /* Instead of clearing (by a previous call to memset()) 903 - * the counters and using adds, we set the counters 904 - * with data used by 'current' CPU 905 - * 906 - * Bottom half has to be disabled to prevent deadlock 907 - * if new softirq were to run and call ipt_do_table 908 - */ 909 - local_bh_disable(); 910 - i = 0; 911 - xt_entry_foreach(iter, t->entries[curcpu], t->size) { 912 - SET_COUNTER(counters[i], iter->counters.bcnt, 913 - iter->counters.pcnt); 914 - ++i; 915 - } 916 - local_bh_enable(); 917 - /* Processing counters from other cpus, we can let bottom half enabled, 918 - * (preemption is disabled) 919 - */ 920 900 921 901 for_each_possible_cpu(cpu) { 922 - if (cpu == curcpu) 923 - continue; 902 + seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; 903 + 924 904 i = 0; 925 - local_bh_disable(); 926 - xt_info_wrlock(cpu); 927 905 xt_entry_foreach(iter, t->entries[cpu], t->size) { 928 - ADD_COUNTER(counters[i], iter->counters.bcnt, 929 - iter->counters.pcnt); 906 + u64 bcnt, pcnt; 907 + unsigned int start; 908 + 909 + do { 910 + start = read_seqbegin(lock); 911 + bcnt = iter->counters.bcnt; 912 + pcnt = iter->counters.pcnt; 913 + } while (read_seqretry(lock, start)); 914 + 915 + ADD_COUNTER(counters[i], bcnt, pcnt); 930 916 ++i; 931 917 } 932 - xt_info_wrunlock(cpu); 933 - local_bh_enable(); 934 918 } 935 - put_cpu(); 936 919 } 937 920 938 921 static struct xt_counters *alloc_counters(const struct xt_table *table) ··· 928 945 (other than comefrom, which userspace doesn't care 929 946 about). */ 930 947 countersize = sizeof(struct xt_counters) * private->number; 931 - counters = vmalloc(countersize); 948 + counters = vzalloc(countersize); 932 949 933 950 if (counters == NULL) 934 951 return ERR_PTR(-ENOMEM); ··· 1199 1216 struct ip6t_entry *iter; 1200 1217 1201 1218 ret = 0; 1202 - counters = vmalloc(num_counters * sizeof(struct xt_counters)); 1219 + counters = vzalloc(num_counters * sizeof(struct xt_counters)); 1203 1220 if (!counters) { 1204 1221 ret = -ENOMEM; 1205 1222 goto out;
+5 -9
net/netfilter/nf_conntrack_netlink.c
··· 645 645 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); 646 646 u_int8_t l3proto = nfmsg->nfgen_family; 647 647 648 - rcu_read_lock(); 648 + spin_lock_bh(&nf_conntrack_lock); 649 649 last = (struct nf_conn *)cb->args[1]; 650 650 for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) { 651 651 restart: 652 - hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[cb->args[0]], 652 + hlist_nulls_for_each_entry(h, n, &net->ct.hash[cb->args[0]], 653 653 hnnode) { 654 654 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) 655 655 continue; 656 656 ct = nf_ct_tuplehash_to_ctrack(h); 657 - if (!atomic_inc_not_zero(&ct->ct_general.use)) 658 - continue; 659 657 /* Dump entries of a given L3 protocol number. 660 658 * If it is not specified, ie. l3proto == 0, 661 659 * then dump everything. */ 662 660 if (l3proto && nf_ct_l3num(ct) != l3proto) 663 - goto releasect; 661 + continue; 664 662 if (cb->args[1]) { 665 663 if (ct != last) 666 - goto releasect; 664 + continue; 667 665 cb->args[1] = 0; 668 666 } 669 667 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, ··· 679 681 if (acct) 680 682 memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX])); 681 683 } 682 - releasect: 683 - nf_ct_put(ct); 684 684 } 685 685 if (cb->args[1]) { 686 686 cb->args[1] = 0; ··· 686 690 } 687 691 } 688 692 out: 689 - rcu_read_unlock(); 693 + spin_unlock_bh(&nf_conntrack_lock); 690 694 if (last) 691 695 nf_ct_put(last); 692 696
+2 -1
net/netfilter/x_tables.c
··· 1325 1325 1326 1326 for_each_possible_cpu(i) { 1327 1327 struct xt_info_lock *lock = &per_cpu(xt_info_locks, i); 1328 - spin_lock_init(&lock->lock); 1328 + 1329 + seqlock_init(&lock->lock); 1329 1330 lock->readers = 0; 1330 1331 } 1331 1332