Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

[NETFILTER]: Fix OOPSes on machines with discontiguous cpu numbering.

Original patch by Harald Welte, with feedback from Herbert Xu
and testing by S�bastien Bernard.

EBTABLES, ARP tables, and IP/IP6 tables all assume that cpus
are numbered linearly. That is not necessarily true.

This patch fixes that up by calculating the largest possible
cpu number, and allocating enough per-cpu structure space given
that.

Signed-off-by: David S. Miller <davem@davemloft.net>

+65 -26
+2
arch/cris/arch-v32/kernel/smp.c
··· 15 15 #include <linux/kernel.h> 16 16 #include <linux/cpumask.h> 17 17 #include <linux/interrupt.h> 18 + #include <linux/module.h> 18 19 19 20 #define IPI_SCHEDULE 1 20 21 #define IPI_CALL 2 ··· 29 28 /* CPU masks */ 30 29 cpumask_t cpu_online_map = CPU_MASK_NONE; 31 30 cpumask_t phys_cpu_present_map = CPU_MASK_NONE; 31 + EXPORT_SYMBOL(phys_cpu_present_map); 32 32 33 33 /* Variables used during SMP boot */ 34 34 volatile int cpu_now_booting = 0;
+3
arch/sh/kernel/smp.c
··· 22 22 #include <linux/time.h> 23 23 #include <linux/timex.h> 24 24 #include <linux/sched.h> 25 + #include <linux/module.h> 25 26 26 27 #include <asm/atomic.h> 27 28 #include <asm/processor.h> ··· 40 39 extern void per_cpu_trap_init(void); 41 40 42 41 cpumask_t cpu_possible_map; 42 + EXPORT_SYMBOL(cpu_possible_map); 43 + 43 44 cpumask_t cpu_online_map; 44 45 static atomic_t cpus_booted = ATOMIC_INIT(0); 45 46
+12
include/linux/cpumask.h
··· 392 392 #define for_each_online_cpu(cpu) for_each_cpu_mask((cpu), cpu_online_map) 393 393 #define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map) 394 394 395 + /* Find the highest possible smp_processor_id() */ 396 + static inline unsigned int highest_possible_processor_id(void) 397 + { 398 + unsigned int cpu, highest = 0; 399 + 400 + for_each_cpu_mask(cpu, cpu_possible_map) 401 + highest = cpu; 402 + 403 + return highest; 404 + } 405 + 406 + 395 407 #endif /* __LINUX_CPUMASK_H */
+17 -10
net/bridge/netfilter/ebtables.c
··· 26 26 #include <linux/spinlock.h> 27 27 #include <asm/uaccess.h> 28 28 #include <linux/smp.h> 29 + #include <linux/cpumask.h> 29 30 #include <net/sock.h> 30 31 /* needed for logical [in,out]-dev filtering */ 31 32 #include "../br_private.h" ··· 824 823 /* this will get free'd in do_replace()/ebt_register_table() 825 824 if an error occurs */ 826 825 newinfo->chainstack = (struct ebt_chainstack **) 827 - vmalloc(num_possible_cpus() * sizeof(struct ebt_chainstack)); 826 + vmalloc((highest_possible_processor_id()+1) 827 + * sizeof(struct ebt_chainstack)); 828 828 if (!newinfo->chainstack) 829 829 return -ENOMEM; 830 - for (i = 0; i < num_possible_cpus(); i++) { 830 + for_each_cpu(i) { 831 831 newinfo->chainstack[i] = 832 832 vmalloc(udc_cnt * sizeof(struct ebt_chainstack)); 833 833 if (!newinfo->chainstack[i]) { ··· 897 895 898 896 /* counters of cpu 0 */ 899 897 memcpy(counters, oldcounters, 900 - sizeof(struct ebt_counter) * nentries); 898 + sizeof(struct ebt_counter) * nentries); 899 + 901 900 /* add other counters to those of cpu 0 */ 902 - for (cpu = 1; cpu < num_possible_cpus(); cpu++) { 901 + for_each_cpu(cpu) { 902 + if (cpu == 0) 903 + continue; 903 904 counter_base = COUNTER_BASE(oldcounters, nentries, cpu); 904 905 for (i = 0; i < nentries; i++) { 905 906 counters[i].pcnt += counter_base[i].pcnt; ··· 934 929 BUGPRINT("Entries_size never zero\n"); 935 930 return -EINVAL; 936 931 } 937 - countersize = COUNTER_OFFSET(tmp.nentries) * num_possible_cpus(); 932 + countersize = COUNTER_OFFSET(tmp.nentries) * 933 + (highest_possible_processor_id()+1); 938 934 newinfo = (struct ebt_table_info *) 939 935 vmalloc(sizeof(struct ebt_table_info) + countersize); 940 936 if (!newinfo) ··· 1028 1022 1029 1023 vfree(table->entries); 1030 1024 if (table->chainstack) { 1031 - for (i = 0; i < num_possible_cpus(); i++) 1025 + for_each_cpu(i) 1032 1026 vfree(table->chainstack[i]); 1033 1027 vfree(table->chainstack); 1034 1028 } ··· 1046 1040 vfree(counterstmp); 1047 1041 /* can be initialized in translate_table() */ 1048 1042 if (newinfo->chainstack) { 1049 - for (i = 0; i < num_possible_cpus(); i++) 1043 + for_each_cpu(i) 1050 1044 vfree(newinfo->chainstack[i]); 1051 1045 vfree(newinfo->chainstack); 1052 1046 } ··· 1138 1132 return -EINVAL; 1139 1133 } 1140 1134 1141 - countersize = COUNTER_OFFSET(table->table->nentries) * num_possible_cpus(); 1135 + countersize = COUNTER_OFFSET(table->table->nentries) * 1136 + (highest_possible_processor_id()+1); 1142 1137 newinfo = (struct ebt_table_info *) 1143 1138 vmalloc(sizeof(struct ebt_table_info) + countersize); 1144 1139 ret = -ENOMEM; ··· 1193 1186 up(&ebt_mutex); 1194 1187 free_chainstack: 1195 1188 if (newinfo->chainstack) { 1196 - for (i = 0; i < num_possible_cpus(); i++) 1189 + for_each_cpu(i) 1197 1190 vfree(newinfo->chainstack[i]); 1198 1191 vfree(newinfo->chainstack); 1199 1192 } ··· 1216 1209 up(&ebt_mutex); 1217 1210 vfree(table->private->entries); 1218 1211 if (table->private->chainstack) { 1219 - for (i = 0; i < num_possible_cpus(); i++) 1212 + for_each_cpu(i) 1220 1213 vfree(table->private->chainstack[i]); 1221 1214 vfree(table->private->chainstack); 1222 1215 }
+9 -5
net/ipv4/netfilter/arp_tables.c
··· 716 716 } 717 717 718 718 /* And one copy for every other CPU */ 719 - for (i = 1; i < num_possible_cpus(); i++) { 720 - memcpy(newinfo->entries + SMP_ALIGN(newinfo->size)*i, 719 + for_each_cpu(i) { 720 + if (i == 0) 721 + continue; 722 + memcpy(newinfo->entries + SMP_ALIGN(newinfo->size) * i, 721 723 newinfo->entries, 722 724 SMP_ALIGN(newinfo->size)); 723 725 } ··· 769 767 unsigned int cpu; 770 768 unsigned int i; 771 769 772 - for (cpu = 0; cpu < num_possible_cpus(); cpu++) { 770 + for_each_cpu(cpu) { 773 771 i = 0; 774 772 ARPT_ENTRY_ITERATE(t->entries + TABLE_OFFSET(t, cpu), 775 773 t->size, ··· 887 885 return -ENOMEM; 888 886 889 887 newinfo = vmalloc(sizeof(struct arpt_table_info) 890 - + SMP_ALIGN(tmp.size) * num_possible_cpus()); 888 + + SMP_ALIGN(tmp.size) * 889 + (highest_possible_processor_id()+1)); 891 890 if (!newinfo) 892 891 return -ENOMEM; 893 892 ··· 1161 1158 = { 0, 0, 0, { 0 }, { 0 }, { } }; 1162 1159 1163 1160 newinfo = vmalloc(sizeof(struct arpt_table_info) 1164 - + SMP_ALIGN(repl->size) * num_possible_cpus()); 1161 + + SMP_ALIGN(repl->size) * 1162 + (highest_possible_processor_id()+1)); 1165 1163 if (!newinfo) { 1166 1164 ret = -ENOMEM; 1167 1165 return ret;
+11 -6
net/ipv4/netfilter/ip_tables.c
··· 27 27 #include <asm/semaphore.h> 28 28 #include <linux/proc_fs.h> 29 29 #include <linux/err.h> 30 + #include <linux/cpumask.h> 30 31 31 32 #include <linux/netfilter_ipv4/ip_tables.h> 32 33 ··· 922 921 } 923 922 924 923 /* And one copy for every other CPU */ 925 - for (i = 1; i < num_possible_cpus(); i++) { 926 - memcpy(newinfo->entries + SMP_ALIGN(newinfo->size)*i, 924 + for_each_cpu(i) { 925 + if (i == 0) 926 + continue; 927 + memcpy(newinfo->entries + SMP_ALIGN(newinfo->size) * i, 927 928 newinfo->entries, 928 929 SMP_ALIGN(newinfo->size)); 929 930 } ··· 946 943 struct ipt_entry *table_base; 947 944 unsigned int i; 948 945 949 - for (i = 0; i < num_possible_cpus(); i++) { 946 + for_each_cpu(i) { 950 947 table_base = 951 948 (void *)newinfo->entries 952 949 + TABLE_OFFSET(newinfo, i); ··· 993 990 unsigned int cpu; 994 991 unsigned int i; 995 992 996 - for (cpu = 0; cpu < num_possible_cpus(); cpu++) { 993 + for_each_cpu(cpu) { 997 994 i = 0; 998 995 IPT_ENTRY_ITERATE(t->entries + TABLE_OFFSET(t, cpu), 999 996 t->size, ··· 1131 1128 return -ENOMEM; 1132 1129 1133 1130 newinfo = vmalloc(sizeof(struct ipt_table_info) 1134 - + SMP_ALIGN(tmp.size) * num_possible_cpus()); 1131 + + SMP_ALIGN(tmp.size) * 1132 + (highest_possible_processor_id()+1)); 1135 1133 if (!newinfo) 1136 1134 return -ENOMEM; 1137 1135 ··· 1462 1458 = { 0, 0, 0, { 0 }, { 0 }, { } }; 1463 1459 1464 1460 newinfo = vmalloc(sizeof(struct ipt_table_info) 1465 - + SMP_ALIGN(repl->size) * num_possible_cpus()); 1461 + + SMP_ALIGN(repl->size) * 1462 + (highest_possible_processor_id()+1)); 1466 1463 if (!newinfo) 1467 1464 return -ENOMEM; 1468 1465
+11 -5
net/ipv6/netfilter/ip6_tables.c
··· 28 28 #include <asm/uaccess.h> 29 29 #include <asm/semaphore.h> 30 30 #include <linux/proc_fs.h> 31 + #include <linux/cpumask.h> 31 32 32 33 #include <linux/netfilter_ipv6/ip6_tables.h> 33 34 ··· 951 950 } 952 951 953 952 /* And one copy for every other CPU */ 954 - for (i = 1; i < num_possible_cpus(); i++) { 955 - memcpy(newinfo->entries + SMP_ALIGN(newinfo->size)*i, 953 + for_each_cpu(i) { 954 + if (i == 0) 955 + continue; 956 + memcpy(newinfo->entries + SMP_ALIGN(newinfo->size) * i, 956 957 newinfo->entries, 957 958 SMP_ALIGN(newinfo->size)); 958 959 } ··· 976 973 unsigned int i; 977 974 978 975 for (i = 0; i < num_possible_cpus(); i++) { 976 + for_each_cpu(i) { 979 977 table_base = 980 978 (void *)newinfo->entries 981 979 + TABLE_OFFSET(newinfo, i); ··· 1023 1019 unsigned int cpu; 1024 1020 unsigned int i; 1025 1021 1026 - for (cpu = 0; cpu < num_possible_cpus(); cpu++) { 1022 + for_each_cpu(cpu) { 1027 1023 i = 0; 1028 1024 IP6T_ENTRY_ITERATE(t->entries + TABLE_OFFSET(t, cpu), 1029 1025 t->size, ··· 1157 1153 return -ENOMEM; 1158 1154 1159 1155 newinfo = vmalloc(sizeof(struct ip6t_table_info) 1160 - + SMP_ALIGN(tmp.size) * num_possible_cpus()); 1156 + + SMP_ALIGN(tmp.size) * 1157 + (highest_possible_processor_id()+1)); 1161 1158 if (!newinfo) 1162 1159 return -ENOMEM; 1163 1160 ··· 1472 1467 = { 0, 0, 0, { 0 }, { 0 }, { } }; 1473 1468 1474 1469 newinfo = vmalloc(sizeof(struct ip6t_table_info) 1475 - + SMP_ALIGN(repl->size) * num_possible_cpus()); 1470 + + SMP_ALIGN(repl->size) * 1471 + (highest_possible_processor_id()+1)); 1476 1472 if (!newinfo) 1477 1473 return -ENOMEM; 1478 1474