Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

percpu: add __percpu sparse annotations to net

Add __percpu sparse annotations to net.

These annotations are to make sparse consider percpu variables to be
in a different address space and warn if accessed without going
through percpu accessors. This patch doesn't affect normal builds.

The macro and type tricks around snmp stats make things a bit
interesting. DEFINE/DECLARE_SNMP_STAT() macros mark the target field
as __percpu and SNMP_UPD_PO_STATS() macro is updated accordingly. All
snmp_mib_*() users which used to cast the argument to (void **) are
updated to cast it to (void __percpu **).

Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: David S. Miller <davem@davemloft.net>
Cc: Patrick McHardy <kaber@trash.net>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Vlad Yasevich <vladislav.yasevich@hp.com>
Cc: netdev@vger.kernel.org
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Tejun Heo and committed by
David S. Miller
7d720c3e 2bb4646f

+127 -114
+3 -3
include/net/ip.h
··· 174 174 #define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd) 175 175 #define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd) 176 176 177 - extern unsigned long snmp_fold_field(void *mib[], int offt); 178 - extern int snmp_mib_init(void *ptr[2], size_t mibsize); 179 - extern void snmp_mib_free(void *ptr[2]); 177 + extern unsigned long snmp_fold_field(void __percpu *mib[], int offt); 178 + extern int snmp_mib_init(void __percpu *ptr[2], size_t mibsize); 179 + extern void snmp_mib_free(void __percpu *ptr[2]); 180 180 181 181 extern struct local_ports { 182 182 seqlock_t lock;
+1 -1
include/net/ipcomp.h
··· 9 9 10 10 struct ipcomp_data { 11 11 u16 threshold; 12 - struct crypto_comp **tfms; 12 + struct crypto_comp * __percpu *tfms; 13 13 }; 14 14 15 15 struct ip_comp_hdr;
+1 -1
include/net/neighbour.h
··· 164 164 rwlock_t lock; 165 165 unsigned long last_rand; 166 166 struct kmem_cache *kmem_cachep; 167 - struct neigh_statistics *stats; 167 + struct neigh_statistics __percpu *stats; 168 168 struct neighbour **hash_buckets; 169 169 unsigned int hash_mask; 170 170 __u32 hash_rnd;
+1 -1
include/net/netns/conntrack.h
··· 17 17 struct hlist_head *expect_hash; 18 18 struct hlist_nulls_head unconfirmed; 19 19 struct hlist_nulls_head dying; 20 - struct ip_conntrack_stat *stat; 20 + struct ip_conntrack_stat __percpu *stat; 21 21 int sysctl_events; 22 22 unsigned int sysctl_events_retry_timeout; 23 23 int sysctl_acct;
+1 -1
include/net/netns/core.h
··· 10 10 11 11 int sysctl_somaxconn; 12 12 13 - struct prot_inuse *inuse; 13 + struct prot_inuse __percpu *inuse; 14 14 }; 15 15 16 16 #endif
+1 -1
include/net/route.h
··· 101 101 unsigned int out_hlist_search; 102 102 }; 103 103 104 - extern struct ip_rt_acct *ip_rt_acct; 104 + extern struct ip_rt_acct __percpu *ip_rt_acct; 105 105 106 106 struct in_device; 107 107 extern int ip_rt_init(void);
+8 -4
include/net/snmp.h
··· 129 129 * nonlocked_atomic_inc() primitives -AK 130 130 */ 131 131 #define DEFINE_SNMP_STAT(type, name) \ 132 - __typeof__(type) *name[2] 132 + __typeof__(type) __percpu *name[2] 133 133 #define DECLARE_SNMP_STAT(type, name) \ 134 - extern __typeof__(type) *name[2] 134 + extern __typeof__(type) __percpu *name[2] 135 135 136 136 #define SNMP_STAT_BHPTR(name) (name[0]) 137 137 #define SNMP_STAT_USRPTR(name) (name[1]) ··· 148 148 __this_cpu_add(mib[0]->mibs[field], addend) 149 149 #define SNMP_ADD_STATS_USER(mib, field, addend) \ 150 150 this_cpu_add(mib[1]->mibs[field], addend) 151 + /* 152 + * Use "__typeof__(*mib[0]) *ptr" instead of "__typeof__(mib[0]) ptr" 153 + * to make @ptr a non-percpu pointer. 154 + */ 151 155 #define SNMP_UPD_PO_STATS(mib, basefield, addend) \ 152 156 do { \ 153 - __typeof__(mib[0]) ptr; \ 157 + __typeof__(*mib[0]) *ptr; \ 154 158 preempt_disable(); \ 155 159 ptr = this_cpu_ptr((mib)[!in_softirq()]); \ 156 160 ptr->mibs[basefield##PKTS]++; \ ··· 163 159 } while (0) 164 160 #define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \ 165 161 do { \ 166 - __typeof__(mib[0]) ptr = \ 162 + __typeof__(*mib[0]) *ptr = \ 167 163 __this_cpu_ptr((mib)[!in_softirq()]); \ 168 164 ptr->mibs[basefield##PKTS]++; \ 169 165 ptr->mibs[basefield##OCTETS] += addend;\
+1 -1
include/net/tcp.h
··· 1189 1189 #define tcp_twsk_md5_key(twsk) NULL 1190 1190 #endif 1191 1191 1192 - extern struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(struct sock *); 1192 + extern struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *); 1193 1193 extern void tcp_free_md5sig_pool(void); 1194 1194 1195 1195 extern struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu);
+1 -1
net/8021q/vlan.h
··· 61 61 struct proc_dir_entry *dent; 62 62 unsigned long cnt_inc_headroom_on_tx; 63 63 unsigned long cnt_encap_on_xmit; 64 - struct vlan_rx_stats *vlan_rx_stats; 64 + struct vlan_rx_stats __percpu *vlan_rx_stats; 65 65 }; 66 66 67 67 static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev)
+3 -2
net/dccp/proto.c
··· 1005 1005 1006 1006 static inline int dccp_mib_init(void) 1007 1007 { 1008 - return snmp_mib_init((void**)dccp_statistics, sizeof(struct dccp_mib)); 1008 + return snmp_mib_init((void __percpu **)dccp_statistics, 1009 + sizeof(struct dccp_mib)); 1009 1010 } 1010 1011 1011 1012 static inline void dccp_mib_exit(void) 1012 1013 { 1013 - snmp_mib_free((void**)dccp_statistics); 1014 + snmp_mib_free((void __percpu **)dccp_statistics); 1014 1015 } 1015 1016 1016 1017 static int thash_entries;
+23 -23
net/ipv4/af_inet.c
··· 1385 1385 } 1386 1386 EXPORT_SYMBOL_GPL(inet_ctl_sock_create); 1387 1387 1388 - unsigned long snmp_fold_field(void *mib[], int offt) 1388 + unsigned long snmp_fold_field(void __percpu *mib[], int offt) 1389 1389 { 1390 1390 unsigned long res = 0; 1391 1391 int i; ··· 1398 1398 } 1399 1399 EXPORT_SYMBOL_GPL(snmp_fold_field); 1400 1400 1401 - int snmp_mib_init(void *ptr[2], size_t mibsize) 1401 + int snmp_mib_init(void __percpu *ptr[2], size_t mibsize) 1402 1402 { 1403 1403 BUG_ON(ptr == NULL); 1404 1404 ptr[0] = __alloc_percpu(mibsize, __alignof__(unsigned long long)); ··· 1416 1416 } 1417 1417 EXPORT_SYMBOL_GPL(snmp_mib_init); 1418 1418 1419 - void snmp_mib_free(void *ptr[2]) 1419 + void snmp_mib_free(void __percpu *ptr[2]) 1420 1420 { 1421 1421 BUG_ON(ptr == NULL); 1422 1422 free_percpu(ptr[0]); ··· 1460 1460 1461 1461 static __net_init int ipv4_mib_init_net(struct net *net) 1462 1462 { 1463 - if (snmp_mib_init((void **)net->mib.tcp_statistics, 1463 + if (snmp_mib_init((void __percpu **)net->mib.tcp_statistics, 1464 1464 sizeof(struct tcp_mib)) < 0) 1465 1465 goto err_tcp_mib; 1466 - if (snmp_mib_init((void **)net->mib.ip_statistics, 1466 + if (snmp_mib_init((void __percpu **)net->mib.ip_statistics, 1467 1467 sizeof(struct ipstats_mib)) < 0) 1468 1468 goto err_ip_mib; 1469 - if (snmp_mib_init((void **)net->mib.net_statistics, 1469 + if (snmp_mib_init((void __percpu **)net->mib.net_statistics, 1470 1470 sizeof(struct linux_mib)) < 0) 1471 1471 goto err_net_mib; 1472 - if (snmp_mib_init((void **)net->mib.udp_statistics, 1472 + if (snmp_mib_init((void __percpu **)net->mib.udp_statistics, 1473 1473 sizeof(struct udp_mib)) < 0) 1474 1474 goto err_udp_mib; 1475 - if (snmp_mib_init((void **)net->mib.udplite_statistics, 1475 + if (snmp_mib_init((void __percpu **)net->mib.udplite_statistics, 1476 1476 sizeof(struct udp_mib)) < 0) 1477 1477 goto err_udplite_mib; 1478 - if (snmp_mib_init((void **)net->mib.icmp_statistics, 1478 + if (snmp_mib_init((void __percpu **)net->mib.icmp_statistics, 1479 1479 sizeof(struct icmp_mib)) < 0) 1480 1480 goto err_icmp_mib; 1481 - if (snmp_mib_init((void **)net->mib.icmpmsg_statistics, 1481 + if (snmp_mib_init((void __percpu **)net->mib.icmpmsg_statistics, 1482 1482 sizeof(struct icmpmsg_mib)) < 0) 1483 1483 goto err_icmpmsg_mib; 1484 1484 ··· 1486 1486 return 0; 1487 1487 1488 1488 err_icmpmsg_mib: 1489 - snmp_mib_free((void **)net->mib.icmp_statistics); 1489 + snmp_mib_free((void __percpu **)net->mib.icmp_statistics); 1490 1490 err_icmp_mib: 1491 - snmp_mib_free((void **)net->mib.udplite_statistics); 1491 + snmp_mib_free((void __percpu **)net->mib.udplite_statistics); 1492 1492 err_udplite_mib: 1493 - snmp_mib_free((void **)net->mib.udp_statistics); 1493 + snmp_mib_free((void __percpu **)net->mib.udp_statistics); 1494 1494 err_udp_mib: 1495 - snmp_mib_free((void **)net->mib.net_statistics); 1495 + snmp_mib_free((void __percpu **)net->mib.net_statistics); 1496 1496 err_net_mib: 1497 - snmp_mib_free((void **)net->mib.ip_statistics); 1497 + snmp_mib_free((void __percpu **)net->mib.ip_statistics); 1498 1498 err_ip_mib: 1499 - snmp_mib_free((void **)net->mib.tcp_statistics); 1499 + snmp_mib_free((void __percpu **)net->mib.tcp_statistics); 1500 1500 err_tcp_mib: 1501 1501 return -ENOMEM; 1502 1502 } 1503 1503 1504 1504 static __net_exit void ipv4_mib_exit_net(struct net *net) 1505 1505 { 1506 - snmp_mib_free((void **)net->mib.icmpmsg_statistics); 1507 - snmp_mib_free((void **)net->mib.icmp_statistics); 1508 - snmp_mib_free((void **)net->mib.udplite_statistics); 1509 - snmp_mib_free((void **)net->mib.udp_statistics); 1510 - snmp_mib_free((void **)net->mib.net_statistics); 1511 - snmp_mib_free((void **)net->mib.ip_statistics); 1512 - snmp_mib_free((void **)net->mib.tcp_statistics); 1506 + snmp_mib_free((void __percpu **)net->mib.icmpmsg_statistics); 1507 + snmp_mib_free((void __percpu **)net->mib.icmp_statistics); 1508 + snmp_mib_free((void __percpu **)net->mib.udplite_statistics); 1509 + snmp_mib_free((void __percpu **)net->mib.udp_statistics); 1510 + snmp_mib_free((void __percpu **)net->mib.net_statistics); 1511 + snmp_mib_free((void __percpu **)net->mib.ip_statistics); 1512 + snmp_mib_free((void __percpu **)net->mib.tcp_statistics); 1513 1513 } 1514 1514 1515 1515 static __net_initdata struct pernet_operations ipv4_mib_ops = {
+14 -14
net/ipv4/proc.c
··· 280 280 281 281 count = 0; 282 282 for (i = 0; i < ICMPMSG_MIB_MAX; i++) { 283 - val = snmp_fold_field((void **) net->mib.icmpmsg_statistics, i); 283 + val = snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics, i); 284 284 if (val) { 285 285 type[count] = i; 286 286 vals[count++] = val; ··· 307 307 for (i=0; icmpmibmap[i].name != NULL; i++) 308 308 seq_printf(seq, " Out%s", icmpmibmap[i].name); 309 309 seq_printf(seq, "\nIcmp: %lu %lu", 310 - snmp_fold_field((void **) net->mib.icmp_statistics, ICMP_MIB_INMSGS), 311 - snmp_fold_field((void **) net->mib.icmp_statistics, ICMP_MIB_INERRORS)); 310 + snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_INMSGS), 311 + snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_INERRORS)); 312 312 for (i=0; icmpmibmap[i].name != NULL; i++) 313 313 seq_printf(seq, " %lu", 314 - snmp_fold_field((void **) net->mib.icmpmsg_statistics, 314 + snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics, 315 315 icmpmibmap[i].index)); 316 316 seq_printf(seq, " %lu %lu", 317 - snmp_fold_field((void **) net->mib.icmp_statistics, ICMP_MIB_OUTMSGS), 318 - snmp_fold_field((void **) net->mib.icmp_statistics, ICMP_MIB_OUTERRORS)); 317 + snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTMSGS), 318 + snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTERRORS)); 319 319 for (i=0; icmpmibmap[i].name != NULL; i++) 320 320 seq_printf(seq, " %lu", 321 - snmp_fold_field((void **) net->mib.icmpmsg_statistics, 321 + snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics, 322 322 icmpmibmap[i].index | 0x100)); 323 323 } 324 324 ··· 341 341 342 342 for (i = 0; snmp4_ipstats_list[i].name != NULL; i++) 343 343 seq_printf(seq, " %lu", 344 - snmp_fold_field((void **)net->mib.ip_statistics, 344 + snmp_fold_field((void __percpu **)net->mib.ip_statistics, 345 345 snmp4_ipstats_list[i].entry)); 346 346 347 347 icmp_put(seq); /* RFC 2011 compatibility */ ··· 356 356 /* MaxConn field is signed, RFC 2012 */ 357 357 if (snmp4_tcp_list[i].entry == TCP_MIB_MAXCONN) 358 358 seq_printf(seq, " %ld", 359 - snmp_fold_field((void **)net->mib.tcp_statistics, 359 + snmp_fold_field((void __percpu **)net->mib.tcp_statistics, 360 360 snmp4_tcp_list[i].entry)); 361 361 else 362 362 seq_printf(seq, " %lu", 363 - snmp_fold_field((void **)net->mib.tcp_statistics, 363 + snmp_fold_field((void __percpu **)net->mib.tcp_statistics, 364 364 snmp4_tcp_list[i].entry)); 365 365 } 366 366 ··· 371 371 seq_puts(seq, "\nUdp:"); 372 372 for (i = 0; snmp4_udp_list[i].name != NULL; i++) 373 373 seq_printf(seq, " %lu", 374 - snmp_fold_field((void **)net->mib.udp_statistics, 374 + snmp_fold_field((void __percpu **)net->mib.udp_statistics, 375 375 snmp4_udp_list[i].entry)); 376 376 377 377 /* the UDP and UDP-Lite MIBs are the same */ ··· 382 382 seq_puts(seq, "\nUdpLite:"); 383 383 for (i = 0; snmp4_udp_list[i].name != NULL; i++) 384 384 seq_printf(seq, " %lu", 385 - snmp_fold_field((void **)net->mib.udplite_statistics, 385 + snmp_fold_field((void __percpu **)net->mib.udplite_statistics, 386 386 snmp4_udp_list[i].entry)); 387 387 388 388 seq_putc(seq, '\n'); ··· 419 419 seq_puts(seq, "\nTcpExt:"); 420 420 for (i = 0; snmp4_net_list[i].name != NULL; i++) 421 421 seq_printf(seq, " %lu", 422 - snmp_fold_field((void **)net->mib.net_statistics, 422 + snmp_fold_field((void __percpu **)net->mib.net_statistics, 423 423 snmp4_net_list[i].entry)); 424 424 425 425 seq_puts(seq, "\nIpExt:"); ··· 429 429 seq_puts(seq, "\nIpExt:"); 430 430 for (i = 0; snmp4_ipextstats_list[i].name != NULL; i++) 431 431 seq_printf(seq, " %lu", 432 - snmp_fold_field((void **)net->mib.ip_statistics, 432 + snmp_fold_field((void __percpu **)net->mib.ip_statistics, 433 433 snmp4_ipextstats_list[i].entry)); 434 434 435 435 seq_putc(seq, '\n');
+1 -1
net/ipv4/route.c
··· 3334 3334 3335 3335 3336 3336 #ifdef CONFIG_NET_CLS_ROUTE 3337 - struct ip_rt_acct *ip_rt_acct __read_mostly; 3337 + struct ip_rt_acct __percpu *ip_rt_acct __read_mostly; 3338 3338 #endif /* CONFIG_NET_CLS_ROUTE */ 3339 3339 3340 3340 static __initdata unsigned long rhash_entries;
+12 -9
net/ipv4/tcp.c
··· 2788 2788 2789 2789 #ifdef CONFIG_TCP_MD5SIG 2790 2790 static unsigned long tcp_md5sig_users; 2791 - static struct tcp_md5sig_pool **tcp_md5sig_pool; 2791 + static struct tcp_md5sig_pool * __percpu *tcp_md5sig_pool; 2792 2792 static DEFINE_SPINLOCK(tcp_md5sig_pool_lock); 2793 2793 2794 - static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool) 2794 + static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool * __percpu *pool) 2795 2795 { 2796 2796 int cpu; 2797 2797 for_each_possible_cpu(cpu) { ··· 2808 2808 2809 2809 void tcp_free_md5sig_pool(void) 2810 2810 { 2811 - struct tcp_md5sig_pool **pool = NULL; 2811 + struct tcp_md5sig_pool * __percpu *pool = NULL; 2812 2812 2813 2813 spin_lock_bh(&tcp_md5sig_pool_lock); 2814 2814 if (--tcp_md5sig_users == 0) { ··· 2822 2822 2823 2823 EXPORT_SYMBOL(tcp_free_md5sig_pool); 2824 2824 2825 - static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(struct sock *sk) 2825 + static struct tcp_md5sig_pool * __percpu * 2826 + __tcp_alloc_md5sig_pool(struct sock *sk) 2826 2827 { 2827 2828 int cpu; 2828 - struct tcp_md5sig_pool **pool; 2829 + struct tcp_md5sig_pool * __percpu *pool; 2829 2830 2830 2831 pool = alloc_percpu(struct tcp_md5sig_pool *); 2831 2832 if (!pool) ··· 2853 2852 return NULL; 2854 2853 } 2855 2854 2856 - struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(struct sock *sk) 2855 + struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *sk) 2857 2856 { 2858 - struct tcp_md5sig_pool **pool; 2857 + struct tcp_md5sig_pool * __percpu *pool; 2859 2858 int alloc = 0; 2860 2859 2861 2860 retry: ··· 2874 2873 2875 2874 if (alloc) { 2876 2875 /* we cannot hold spinlock here because this may sleep. */ 2877 - struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool(sk); 2876 + struct tcp_md5sig_pool * __percpu *p; 2877 + 2878 + p = __tcp_alloc_md5sig_pool(sk); 2878 2879 spin_lock_bh(&tcp_md5sig_pool_lock); 2879 2880 if (!p) { 2880 2881 tcp_md5sig_users--; ··· 2900 2897 2901 2898 struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu) 2902 2899 { 2903 - struct tcp_md5sig_pool **p; 2900 + struct tcp_md5sig_pool * __percpu *p; 2904 2901 spin_lock_bh(&tcp_md5sig_pool_lock); 2905 2902 p = tcp_md5sig_pool; 2906 2903 if (p)
+12 -12
net/ipv6/addrconf.c
··· 278 278 279 279 static int snmp6_alloc_dev(struct inet6_dev *idev) 280 280 { 281 - if (snmp_mib_init((void **)idev->stats.ipv6, 281 + if (snmp_mib_init((void __percpu **)idev->stats.ipv6, 282 282 sizeof(struct ipstats_mib)) < 0) 283 283 goto err_ip; 284 - if (snmp_mib_init((void **)idev->stats.icmpv6, 284 + if (snmp_mib_init((void __percpu **)idev->stats.icmpv6, 285 285 sizeof(struct icmpv6_mib)) < 0) 286 286 goto err_icmp; 287 - if (snmp_mib_init((void **)idev->stats.icmpv6msg, 287 + if (snmp_mib_init((void __percpu **)idev->stats.icmpv6msg, 288 288 sizeof(struct icmpv6msg_mib)) < 0) 289 289 goto err_icmpmsg; 290 290 291 291 return 0; 292 292 293 293 err_icmpmsg: 294 - snmp_mib_free((void **)idev->stats.icmpv6); 294 + snmp_mib_free((void __percpu **)idev->stats.icmpv6); 295 295 err_icmp: 296 - snmp_mib_free((void **)idev->stats.ipv6); 296 + snmp_mib_free((void __percpu **)idev->stats.ipv6); 297 297 err_ip: 298 298 return -ENOMEM; 299 299 } 300 300 301 301 static void snmp6_free_dev(struct inet6_dev *idev) 302 302 { 303 - snmp_mib_free((void **)idev->stats.icmpv6msg); 304 - snmp_mib_free((void **)idev->stats.icmpv6); 305 - snmp_mib_free((void **)idev->stats.ipv6); 303 + snmp_mib_free((void __percpu **)idev->stats.icmpv6msg); 304 + snmp_mib_free((void __percpu **)idev->stats.icmpv6); 305 + snmp_mib_free((void __percpu **)idev->stats.ipv6); 306 306 } 307 307 308 308 /* Nobody refers to this device, we may destroy it. */ ··· 3766 3766 ); 3767 3767 } 3768 3768 3769 - static inline void __snmp6_fill_stats(u64 *stats, void **mib, int items, 3770 - int bytes) 3769 + static inline void __snmp6_fill_stats(u64 *stats, void __percpu **mib, 3770 + int items, int bytes) 3771 3771 { 3772 3772 int i; 3773 3773 int pad = bytes - sizeof(u64) * items; ··· 3786 3786 { 3787 3787 switch(attrtype) { 3788 3788 case IFLA_INET6_STATS: 3789 - __snmp6_fill_stats(stats, (void **)idev->stats.ipv6, IPSTATS_MIB_MAX, bytes); 3789 + __snmp6_fill_stats(stats, (void __percpu **)idev->stats.ipv6, IPSTATS_MIB_MAX, bytes); 3790 3790 break; 3791 3791 case IFLA_INET6_ICMP6STATS: 3792 - __snmp6_fill_stats(stats, (void **)idev->stats.icmpv6, ICMP6_MIB_MAX, bytes); 3792 + __snmp6_fill_stats(stats, (void __percpu **)idev->stats.icmpv6, ICMP6_MIB_MAX, bytes); 3793 3793 break; 3794 3794 } 3795 3795 }
+14 -14
net/ipv6/af_inet6.c
··· 971 971 972 972 static int __net_init ipv6_init_mibs(struct net *net) 973 973 { 974 - if (snmp_mib_init((void **)net->mib.udp_stats_in6, 974 + if (snmp_mib_init((void __percpu **)net->mib.udp_stats_in6, 975 975 sizeof (struct udp_mib)) < 0) 976 976 return -ENOMEM; 977 - if (snmp_mib_init((void **)net->mib.udplite_stats_in6, 977 + if (snmp_mib_init((void __percpu **)net->mib.udplite_stats_in6, 978 978 sizeof (struct udp_mib)) < 0) 979 979 goto err_udplite_mib; 980 - if (snmp_mib_init((void **)net->mib.ipv6_statistics, 980 + if (snmp_mib_init((void __percpu **)net->mib.ipv6_statistics, 981 981 sizeof(struct ipstats_mib)) < 0) 982 982 goto err_ip_mib; 983 - if (snmp_mib_init((void **)net->mib.icmpv6_statistics, 983 + if (snmp_mib_init((void __percpu **)net->mib.icmpv6_statistics, 984 984 sizeof(struct icmpv6_mib)) < 0) 985 985 goto err_icmp_mib; 986 - if (snmp_mib_init((void **)net->mib.icmpv6msg_statistics, 986 + if (snmp_mib_init((void __percpu **)net->mib.icmpv6msg_statistics, 987 987 sizeof(struct icmpv6msg_mib)) < 0) 988 988 goto err_icmpmsg_mib; 989 989 return 0; 990 990 991 991 err_icmpmsg_mib: 992 - snmp_mib_free((void **)net->mib.icmpv6_statistics); 992 + snmp_mib_free((void __percpu **)net->mib.icmpv6_statistics); 993 993 err_icmp_mib: 994 - snmp_mib_free((void **)net->mib.ipv6_statistics); 994 + snmp_mib_free((void __percpu **)net->mib.ipv6_statistics); 995 995 err_ip_mib: 996 - snmp_mib_free((void **)net->mib.udplite_stats_in6); 996 + snmp_mib_free((void __percpu **)net->mib.udplite_stats_in6); 997 997 err_udplite_mib: 998 - snmp_mib_free((void **)net->mib.udp_stats_in6); 998 + snmp_mib_free((void __percpu **)net->mib.udp_stats_in6); 999 999 return -ENOMEM; 1000 1000 } 1001 1001 1002 1002 static void ipv6_cleanup_mibs(struct net *net) 1003 1003 { 1004 - snmp_mib_free((void **)net->mib.udp_stats_in6); 1005 - snmp_mib_free((void **)net->mib.udplite_stats_in6); 1006 - snmp_mib_free((void **)net->mib.ipv6_statistics); 1007 - snmp_mib_free((void **)net->mib.icmpv6_statistics); 1008 - snmp_mib_free((void **)net->mib.icmpv6msg_statistics); 1004 + snmp_mib_free((void __percpu **)net->mib.udp_stats_in6); 1005 + snmp_mib_free((void __percpu **)net->mib.udplite_stats_in6); 1006 + snmp_mib_free((void __percpu **)net->mib.ipv6_statistics); 1007 + snmp_mib_free((void __percpu **)net->mib.icmpv6_statistics); 1008 + snmp_mib_free((void __percpu **)net->mib.icmpv6msg_statistics); 1009 1009 } 1010 1010 1011 1011 static int __net_init inet6_net_init(struct net *net)
+13 -10
net/ipv6/proc.c
··· 136 136 SNMP_MIB_SENTINEL 137 137 }; 138 138 139 - static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void **mib) 139 + static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void __percpu **mib) 140 140 { 141 141 char name[32]; 142 142 int i; ··· 170 170 return; 171 171 } 172 172 173 - static void snmp6_seq_show_item(struct seq_file *seq, void **mib, 173 + static void snmp6_seq_show_item(struct seq_file *seq, void __percpu **mib, 174 174 const struct snmp_mib *itemlist) 175 175 { 176 176 int i; ··· 183 183 { 184 184 struct net *net = (struct net *)seq->private; 185 185 186 - snmp6_seq_show_item(seq, (void **)net->mib.ipv6_statistics, 186 + snmp6_seq_show_item(seq, (void __percpu **)net->mib.ipv6_statistics, 187 187 snmp6_ipstats_list); 188 - snmp6_seq_show_item(seq, (void **)net->mib.icmpv6_statistics, 188 + snmp6_seq_show_item(seq, (void __percpu **)net->mib.icmpv6_statistics, 189 189 snmp6_icmp6_list); 190 - snmp6_seq_show_icmpv6msg(seq, (void **)net->mib.icmpv6msg_statistics); 191 - snmp6_seq_show_item(seq, (void **)net->mib.udp_stats_in6, 190 + snmp6_seq_show_icmpv6msg(seq, 191 + (void __percpu **)net->mib.icmpv6msg_statistics); 192 + snmp6_seq_show_item(seq, (void __percpu **)net->mib.udp_stats_in6, 192 193 snmp6_udp6_list); 193 - snmp6_seq_show_item(seq, (void **)net->mib.udplite_stats_in6, 194 + snmp6_seq_show_item(seq, (void __percpu **)net->mib.udplite_stats_in6, 194 195 snmp6_udplite6_list); 195 196 return 0; 196 197 } ··· 214 213 struct inet6_dev *idev = (struct inet6_dev *)seq->private; 215 214 216 215 seq_printf(seq, "%-32s\t%u\n", "ifIndex", idev->dev->ifindex); 217 - snmp6_seq_show_item(seq, (void **)idev->stats.ipv6, snmp6_ipstats_list); 218 - snmp6_seq_show_item(seq, (void **)idev->stats.icmpv6, snmp6_icmp6_list); 219 - snmp6_seq_show_icmpv6msg(seq, (void **)idev->stats.icmpv6msg); 216 + snmp6_seq_show_item(seq, (void __percpu **)idev->stats.ipv6, 217 + snmp6_ipstats_list); 218 + snmp6_seq_show_item(seq, (void __percpu **)idev->stats.icmpv6, 219 + snmp6_icmp6_list); 220 + snmp6_seq_show_icmpv6msg(seq, (void __percpu **)idev->stats.icmpv6msg); 220 221 return 0; 221 222 } 222 223
+1 -1
net/sctp/proc.c
··· 83 83 84 84 for (i = 0; sctp_snmp_list[i].name != NULL; i++) 85 85 seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name, 86 - snmp_fold_field((void **)sctp_statistics, 86 + snmp_fold_field((void __percpu **)sctp_statistics, 87 87 sctp_snmp_list[i].entry)); 88 88 89 89 return 0;
+3 -2
net/sctp/protocol.c
··· 996 996 997 997 static inline int init_sctp_mibs(void) 998 998 { 999 - return snmp_mib_init((void**)sctp_statistics, sizeof(struct sctp_mib)); 999 + return snmp_mib_init((void __percpu **)sctp_statistics, 1000 + sizeof(struct sctp_mib)); 1000 1001 } 1001 1002 1002 1003 static inline void cleanup_sctp_mibs(void) 1003 1004 { 1004 - snmp_mib_free((void**)sctp_statistics); 1005 + snmp_mib_free((void __percpu **)sctp_statistics); 1005 1006 } 1006 1007 1007 1008 static void sctp_v4_pf_init(void)
+8 -8
net/xfrm/xfrm_ipcomp.c
··· 30 30 31 31 struct ipcomp_tfms { 32 32 struct list_head list; 33 - struct crypto_comp **tfms; 33 + struct crypto_comp * __percpu *tfms; 34 34 int users; 35 35 }; 36 36 37 37 static DEFINE_MUTEX(ipcomp_resource_mutex); 38 - static void **ipcomp_scratches; 38 + static void * __percpu *ipcomp_scratches; 39 39 static int ipcomp_scratch_users; 40 40 static LIST_HEAD(ipcomp_tfms_list); 41 41 ··· 200 200 static void ipcomp_free_scratches(void) 201 201 { 202 202 int i; 203 - void **scratches; 203 + void * __percpu *scratches; 204 204 205 205 if (--ipcomp_scratch_users) 206 206 return; ··· 215 215 free_percpu(scratches); 216 216 } 217 217 218 - static void **ipcomp_alloc_scratches(void) 218 + static void * __percpu *ipcomp_alloc_scratches(void) 219 219 { 220 220 int i; 221 - void **scratches; 221 + void * __percpu *scratches; 222 222 223 223 if (ipcomp_scratch_users++) 224 224 return ipcomp_scratches; ··· 239 239 return scratches; 240 240 } 241 241 242 - static void ipcomp_free_tfms(struct crypto_comp **tfms) 242 + static void ipcomp_free_tfms(struct crypto_comp * __percpu *tfms) 243 243 { 244 244 struct ipcomp_tfms *pos; 245 245 int cpu; ··· 267 267 free_percpu(tfms); 268 268 } 269 269 270 - static struct crypto_comp **ipcomp_alloc_tfms(const char *alg_name) 270 + static struct crypto_comp * __percpu *ipcomp_alloc_tfms(const char *alg_name) 271 271 { 272 272 struct ipcomp_tfms *pos; 273 - struct crypto_comp **tfms; 273 + struct crypto_comp * __percpu *tfms; 274 274 int cpu; 275 275 276 276 /* This can be any valid CPU ID so we don't need locking. */
+3 -3
net/xfrm/xfrm_policy.c
··· 2428 2428 { 2429 2429 int rv; 2430 2430 2431 - if (snmp_mib_init((void **)net->mib.xfrm_statistics, 2431 + if (snmp_mib_init((void __percpu **)net->mib.xfrm_statistics, 2432 2432 sizeof(struct linux_xfrm_mib)) < 0) 2433 2433 return -ENOMEM; 2434 2434 rv = xfrm_proc_init(net); 2435 2435 if (rv < 0) 2436 - snmp_mib_free((void **)net->mib.xfrm_statistics); 2436 + snmp_mib_free((void __percpu **)net->mib.xfrm_statistics); 2437 2437 return rv; 2438 2438 } 2439 2439 2440 2440 static void xfrm_statistics_fini(struct net *net) 2441 2441 { 2442 2442 xfrm_proc_fini(net); 2443 - snmp_mib_free((void **)net->mib.xfrm_statistics); 2443 + snmp_mib_free((void __percpu **)net->mib.xfrm_statistics); 2444 2444 } 2445 2445 #else 2446 2446 static int __net_init xfrm_statistics_init(struct net *net)
+2 -1
net/xfrm/xfrm_proc.c
··· 50 50 int i; 51 51 for (i=0; xfrm_mib_list[i].name; i++) 52 52 seq_printf(seq, "%-24s\t%lu\n", xfrm_mib_list[i].name, 53 - snmp_fold_field((void **)net->mib.xfrm_statistics, 53 + snmp_fold_field((void __percpu **) 54 + net->mib.xfrm_statistics, 54 55 xfrm_mib_list[i].entry)); 55 56 return 0; 56 57 }