x86, UV: Shorten access to BAU statistics structure

Use a pointer from the per-cpu BAU control structure to the
per-cpu BAU statistics structure.
We nearly always know the first before needing the second.

Signed-off-by: Cliff Wickman <cpw@sgi.com>
Cc: gregkh@suse.de
LKML-Reference: <E1OJvNy-0004aB-2k@eag09.americas.sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

authored by Cliff Wickman and committed by Ingo Molnar 712157aa 50fb55ac

+9 -8
+1
arch/x86/include/asm/uv/uv_bau.h
··· 332 struct bau_payload_queue_entry *bau_msg_head; 333 struct bau_control *uvhub_master; 334 struct bau_control *socket_master; 335 unsigned long timeout_interval; 336 unsigned long set_bau_on_time; 337 atomic_t active_descriptor_count;
··· 332 struct bau_payload_queue_entry *bau_msg_head; 333 struct bau_control *uvhub_master; 334 struct bau_control *socket_master; 335 + struct ptc_stats *statp; 336 unsigned long timeout_interval; 337 unsigned long set_bau_on_time; 338 atomic_t active_descriptor_count;
+8 -8
arch/x86/kernel/tlb_uv.c
··· 153 struct ptc_stats *stat; 154 155 msg = mdp->msg; 156 - stat = &per_cpu(ptcstats, bcp->cpu); 157 stat->d_retries++; 158 /* 159 * cancel any message from msg+1 to the retry itself ··· 217 * This must be a normal message, or retry of a normal message 218 */ 219 msg = mdp->msg; 220 - stat = &per_cpu(ptcstats, bcp->cpu); 221 if (msg->address == TLB_FLUSH_ALL) { 222 local_flush_tlb(); 223 stat->d_alltlb++; ··· 301 302 bcp = &per_cpu(bau_control, smp_processor_id()); 303 rap = (struct reset_args *)ptr; 304 - stat = &per_cpu(ptcstats, bcp->cpu); 305 stat->d_resets++; 306 307 /* ··· 419 unsigned long mask; 420 cycles_t ttime; 421 cycles_t timeout_time; 422 - struct ptc_stats *stat = &per_cpu(ptcstats, this_cpu); 423 struct bau_control *hmaster; 424 425 hmaster = bcp->uvhub_master; ··· 583 cycles_t time1; 584 cycles_t time2; 585 cycles_t elapsed; 586 - struct ptc_stats *stat = &per_cpu(ptcstats, bcp->cpu); 587 struct bau_control *smaster = bcp->socket_master; 588 struct bau_control *hmaster = bcp->uvhub_master; 589 ··· 794 return cpumask; 795 796 bcp = &per_cpu(bau_control, cpu); 797 - stat = &per_cpu(ptcstats, cpu); 798 799 /* bau was disabled due to slow response */ 800 if (bcp->baudisabled) { ··· 903 904 time_start = get_cycles(); 905 bcp = &per_cpu(bau_control, smp_processor_id()); 906 - stat = &per_cpu(ptcstats, smp_processor_id()); 907 msgdesc.va_queue_first = bcp->va_queue_first; 908 msgdesc.va_queue_last = bcp->va_queue_last; 909 msg = bcp->bau_msg_head; ··· 1636 for_each_present_cpu(cpu) { 1637 bcp = &per_cpu(bau_control, cpu); 1638 bcp->baudisabled = 0; 1639 /* time interval to catch a hardware stay-busy bug */ 1640 bcp->timeout_interval = microsec_2_cycles(2*timeout_us); 1641 bcp->max_bau_concurrent = max_bau_concurrent; ··· 1674 zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu), 1675 GFP_KERNEL, cpu_to_node(cur_cpu)); 1676 1677 - max_bau_concurrent = MAX_BAU_CONCURRENT; 1678 uv_nshift = uv_hub_info->m_val; 1679 uv_mmask = (1UL << uv_hub_info->m_val) - 1; 1680 nuvhubs = uv_num_possible_blades();
··· 153 struct ptc_stats *stat; 154 155 msg = mdp->msg; 156 + stat = bcp->statp; 157 stat->d_retries++; 158 /* 159 * cancel any message from msg+1 to the retry itself ··· 217 * This must be a normal message, or retry of a normal message 218 */ 219 msg = mdp->msg; 220 + stat = bcp->statp; 221 if (msg->address == TLB_FLUSH_ALL) { 222 local_flush_tlb(); 223 stat->d_alltlb++; ··· 301 302 bcp = &per_cpu(bau_control, smp_processor_id()); 303 rap = (struct reset_args *)ptr; 304 + stat = bcp->statp; 305 stat->d_resets++; 306 307 /* ··· 419 unsigned long mask; 420 cycles_t ttime; 421 cycles_t timeout_time; 422 + struct ptc_stats *stat = bcp->statp; 423 struct bau_control *hmaster; 424 425 hmaster = bcp->uvhub_master; ··· 583 cycles_t time1; 584 cycles_t time2; 585 cycles_t elapsed; 586 + struct ptc_stats *stat = bcp->statp; 587 struct bau_control *smaster = bcp->socket_master; 588 struct bau_control *hmaster = bcp->uvhub_master; 589 ··· 794 return cpumask; 795 796 bcp = &per_cpu(bau_control, cpu); 797 + stat = bcp->statp; 798 799 /* bau was disabled due to slow response */ 800 if (bcp->baudisabled) { ··· 903 904 time_start = get_cycles(); 905 bcp = &per_cpu(bau_control, smp_processor_id()); 906 + stat = bcp->statp; 907 msgdesc.va_queue_first = bcp->va_queue_first; 908 msgdesc.va_queue_last = bcp->va_queue_last; 909 msg = bcp->bau_msg_head; ··· 1636 for_each_present_cpu(cpu) { 1637 bcp = &per_cpu(bau_control, cpu); 1638 bcp->baudisabled = 0; 1639 + bcp->statp = &per_cpu(ptcstats, cpu); 1640 /* time interval to catch a hardware stay-busy bug */ 1641 bcp->timeout_interval = microsec_2_cycles(2*timeout_us); 1642 bcp->max_bau_concurrent = max_bau_concurrent; ··· 1673 zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu), 1674 GFP_KERNEL, cpu_to_node(cur_cpu)); 1675 1676 uv_nshift = uv_hub_info->m_val; 1677 uv_mmask = (1UL << uv_hub_info->m_val) - 1; 1678 nuvhubs = uv_num_possible_blades();