Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: Replace get_cpu_var through this_cpu_ptr

Replace uses of get_cpu_var for address calculation through this_cpu_ptr.

Cc: netdev@vger.kernel.org
Cc: Eric Dumazet <edumazet@google.com>
Acked-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>

authored by

Christoph Lameter and committed by
Tejun Heo
903ceff7 f7f66b05

+20 -20
+1 -1
include/net/netfilter/nf_conntrack.h
··· 242 242 DECLARE_PER_CPU(struct nf_conn, nf_conntrack_untracked); 243 243 static inline struct nf_conn *nf_ct_untracked_get(void) 244 244 { 245 - return &__raw_get_cpu_var(nf_conntrack_untracked); 245 + return raw_cpu_ptr(&nf_conntrack_untracked); 246 246 } 247 247 void nf_ct_untracked_status_or(unsigned long bits); 248 248
+3 -3
include/net/snmp.h
··· 168 168 169 169 #define SNMP_ADD_STATS64_BH(mib, field, addend) \ 170 170 do { \ 171 - __typeof__(*mib) *ptr = __this_cpu_ptr(mib); \ 171 + __typeof__(*mib) *ptr = raw_cpu_ptr(mib); \ 172 172 u64_stats_update_begin(&ptr->syncp); \ 173 173 ptr->mibs[field] += addend; \ 174 174 u64_stats_update_end(&ptr->syncp); \ ··· 189 189 #define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1) 190 190 #define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) \ 191 191 do { \ 192 - __typeof__(*mib) *ptr; \ 193 - ptr = __this_cpu_ptr(mib); \ 192 + __typeof__(*mib) *ptr; \ 193 + ptr = raw_cpu_ptr((mib)); \ 194 194 u64_stats_update_begin(&ptr->syncp); \ 195 195 ptr->mibs[basefield##PKTS]++; \ 196 196 ptr->mibs[basefield##OCTETS] += addend; \
+7 -7
net/core/dev.c
··· 2153 2153 unsigned long flags; 2154 2154 2155 2155 local_irq_save(flags); 2156 - sd = &__get_cpu_var(softnet_data); 2156 + sd = this_cpu_ptr(&softnet_data); 2157 2157 q->next_sched = NULL; 2158 2158 *sd->output_queue_tailp = q; 2159 2159 sd->output_queue_tailp = &q->next_sched; ··· 3195 3195 static int rps_ipi_queued(struct softnet_data *sd) 3196 3196 { 3197 3197 #ifdef CONFIG_RPS 3198 - struct softnet_data *mysd = &__get_cpu_var(softnet_data); 3198 + struct softnet_data *mysd = this_cpu_ptr(&softnet_data); 3199 3199 3200 3200 if (sd != mysd) { 3201 3201 sd->rps_ipi_next = mysd->rps_ipi_list; ··· 3222 3222 if (qlen < (netdev_max_backlog >> 1)) 3223 3223 return false; 3224 3224 3225 - sd = &__get_cpu_var(softnet_data); 3225 + sd = this_cpu_ptr(&softnet_data); 3226 3226 3227 3227 rcu_read_lock(); 3228 3228 fl = rcu_dereference(sd->flow_limit); ··· 3369 3369 3370 3370 static void net_tx_action(struct softirq_action *h) 3371 3371 { 3372 - struct softnet_data *sd = &__get_cpu_var(softnet_data); 3372 + struct softnet_data *sd = this_cpu_ptr(&softnet_data); 3373 3373 3374 3374 if (sd->completion_queue) { 3375 3375 struct sk_buff *clist; ··· 3794 3794 static void flush_backlog(void *arg) 3795 3795 { 3796 3796 struct net_device *dev = arg; 3797 - struct softnet_data *sd = &__get_cpu_var(softnet_data); 3797 + struct softnet_data *sd = this_cpu_ptr(&softnet_data); 3798 3798 struct sk_buff *skb, *tmp; 3799 3799 3800 3800 rps_lock(sd); ··· 4301 4301 unsigned long flags; 4302 4302 4303 4303 local_irq_save(flags); 4304 - ____napi_schedule(&__get_cpu_var(softnet_data), n); 4304 + ____napi_schedule(this_cpu_ptr(&softnet_data), n); 4305 4305 local_irq_restore(flags); 4306 4306 } 4307 4307 EXPORT_SYMBOL(__napi_schedule); ··· 4422 4422 4423 4423 static void net_rx_action(struct softirq_action *h) 4424 4424 { 4425 - struct softnet_data *sd = &__get_cpu_var(softnet_data); 4425 + struct softnet_data *sd = this_cpu_ptr(&softnet_data); 4426 4426 unsigned long time_limit = jiffies + 2; 4427 4427 int budget = netdev_budget; 4428 4428 void *have;
+1 -1
net/core/drop_monitor.c
··· 146 146 unsigned long flags; 147 147 148 148 local_irq_save(flags); 149 - data = &__get_cpu_var(dm_cpu_data); 149 + data = this_cpu_ptr(&dm_cpu_data); 150 150 spin_lock(&data->lock); 151 151 dskb = data->skb; 152 152
+1 -1
net/core/skbuff.c
··· 345 345 unsigned long flags; 346 346 347 347 local_irq_save(flags); 348 - nc = &__get_cpu_var(netdev_alloc_cache); 348 + nc = this_cpu_ptr(&netdev_alloc_cache); 349 349 if (unlikely(!nc->frag.page)) { 350 350 refill: 351 351 for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) {
+2 -2
net/ipv4/route.c
··· 1311 1311 if (rt_is_input_route(rt)) { 1312 1312 p = (struct rtable **)&nh->nh_rth_input; 1313 1313 } else { 1314 - p = (struct rtable **)__this_cpu_ptr(nh->nh_pcpu_rth_output); 1314 + p = (struct rtable **)raw_cpu_ptr(nh->nh_pcpu_rth_output); 1315 1315 } 1316 1316 orig = *p; 1317 1317 ··· 1939 1939 do_cache = false; 1940 1940 goto add; 1941 1941 } 1942 - prth = __this_cpu_ptr(nh->nh_pcpu_rth_output); 1942 + prth = raw_cpu_ptr(nh->nh_pcpu_rth_output); 1943 1943 } 1944 1944 rth = rcu_dereference(*prth); 1945 1945 if (rt_cache_valid(rth)) {
+1 -1
net/ipv4/syncookies.c
··· 40 40 41 41 net_get_random_once(syncookie_secret, sizeof(syncookie_secret)); 42 42 43 - tmp = __get_cpu_var(ipv4_cookie_scratch); 43 + tmp = this_cpu_ptr(ipv4_cookie_scratch); 44 44 memcpy(tmp + 4, syncookie_secret[c], sizeof(syncookie_secret[c])); 45 45 tmp[0] = (__force u32)saddr; 46 46 tmp[1] = (__force u32)daddr;
+1 -1
net/ipv4/tcp.c
··· 3058 3058 local_bh_disable(); 3059 3059 p = ACCESS_ONCE(tcp_md5sig_pool); 3060 3060 if (p) 3061 - return __this_cpu_ptr(p); 3061 + return raw_cpu_ptr(p); 3062 3062 3063 3063 local_bh_enable(); 3064 3064 return NULL;
+1 -1
net/ipv4/tcp_output.c
··· 842 842 843 843 /* queue this socket to tasklet queue */ 844 844 local_irq_save(flags); 845 - tsq = &__get_cpu_var(tsq_tasklet); 845 + tsq = this_cpu_ptr(&tsq_tasklet); 846 846 list_add(&tp->tsq_node, &tsq->head); 847 847 tasklet_schedule(&tsq->tasklet); 848 848 local_irq_restore(flags);
+1 -1
net/ipv6/syncookies.c
··· 67 67 68 68 net_get_random_once(syncookie6_secret, sizeof(syncookie6_secret)); 69 69 70 - tmp = __get_cpu_var(ipv6_cookie_scratch); 70 + tmp = this_cpu_ptr(ipv6_cookie_scratch); 71 71 72 72 /* 73 73 * we have 320 bits of information to hash, copy in the remaining
+1 -1
net/rds/ib_rdma.c
··· 267 267 unsigned long *flag; 268 268 269 269 preempt_disable(); 270 - flag = &__get_cpu_var(clean_list_grace); 270 + flag = this_cpu_ptr(&clean_list_grace); 271 271 set_bit(CLEAN_LIST_BUSY_BIT, flag); 272 272 ret = llist_del_first(&pool->clean_list); 273 273 if (ret)