Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/asm/tsc: Rename native_read_tsc() to rdtsc()

Now that there is no paravirt TSC, the "native" is
inappropriate. The function does RDTSC, so give it the obvious
name: rdtsc().

Suggested-by: Borislav Petkov <bp@suse.de>
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Huang Rui <ray.huang@amd.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: Len Brown <lenb@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: kvm ML <kvm@vger.kernel.org>
Link: http://lkml.kernel.org/r/fd43e16281991f096c1e4d21574d9e1402c62d39.1434501121.git.luto@kernel.org
[ Ported it to v4.2-rc1. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Andy Lutomirski and committed by
Ingo Molnar
4ea1636b fe47ae6e

+59 -50
+1 -1
arch/x86/boot/compressed/aslr.c
··· 82 82 83 83 if (has_cpuflag(X86_FEATURE_TSC)) { 84 84 debug_putstr(" RDTSC"); 85 - raw = native_read_tsc(); 85 + raw = rdtsc(); 86 86 87 87 random ^= raw; 88 88 use_i8254 = false;
+1 -1
arch/x86/entry/vdso/vclock_gettime.c
··· 186 186 * but no one has ever seen it happen. 187 187 */ 188 188 rdtsc_barrier(); 189 - ret = (cycle_t)native_read_tsc(); 189 + ret = (cycle_t)rdtsc(); 190 190 191 191 last = gtod->cycle_last; 192 192
+10 -1
arch/x86/include/asm/msr.h
··· 109 109 extern int rdmsr_safe_regs(u32 regs[8]); 110 110 extern int wrmsr_safe_regs(u32 regs[8]); 111 111 112 - static __always_inline unsigned long long native_read_tsc(void) 112 + /** 113 + * rdtsc() - returns the current TSC without ordering constraints 114 + * 115 + * rdtsc() returns the result of RDTSC as a 64-bit integer. The 116 + * only ordering constraint it supplies is the ordering implied by 117 + * "asm volatile": it will put the RDTSC in the place you expect. The 118 + * CPU can and will speculatively execute that RDTSC, though, so the 119 + * results can be non-monotonic if compared on different CPUs. 120 + */ 121 + static __always_inline unsigned long long rdtsc(void) 113 122 { 114 123 DECLARE_ARGS(val, low, high); 115 124
+1 -1
arch/x86/include/asm/pvclock.h
··· 62 62 static __always_inline 63 63 u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src) 64 64 { 65 - u64 delta = native_read_tsc() - src->tsc_timestamp; 65 + u64 delta = rdtsc() - src->tsc_timestamp; 66 66 return pvclock_scale_delta(delta, src->tsc_to_system_mul, 67 67 src->tsc_shift); 68 68 }
+1 -1
arch/x86/include/asm/stackprotector.h
··· 72 72 * on during the bootup the random pool has true entropy too. 73 73 */ 74 74 get_random_bytes(&canary, sizeof(canary)); 75 - tsc = native_read_tsc(); 75 + tsc = rdtsc(); 76 76 canary += tsc + (tsc << 32UL); 77 77 78 78 current->stack_canary = canary;
+1 -1
arch/x86/include/asm/tsc.h
··· 26 26 return 0; 27 27 #endif 28 28 29 - return native_read_tsc(); 29 + return rdtsc(); 30 30 } 31 31 32 32 extern void tsc_init(void);
+4 -4
arch/x86/kernel/apb_timer.c
··· 263 263 264 264 /* Verify whether apbt counter works */ 265 265 t1 = dw_apb_clocksource_read(clocksource_apbt); 266 - start = native_read_tsc(); 266 + start = rdtsc(); 267 267 268 268 /* 269 269 * We don't know the TSC frequency yet, but waiting for ··· 273 273 */ 274 274 do { 275 275 rep_nop(); 276 - now = native_read_tsc(); 276 + now = rdtsc(); 277 277 } while ((now - start) < 200000UL); 278 278 279 279 /* APBT is the only always on clocksource, it has to work! */ ··· 390 390 old = dw_apb_clocksource_read(clocksource_apbt); 391 391 old += loop; 392 392 393 - t1 = native_read_tsc(); 393 + t1 = rdtsc(); 394 394 395 395 do { 396 396 new = dw_apb_clocksource_read(clocksource_apbt); 397 397 } while (new < old); 398 398 399 - t2 = native_read_tsc(); 399 + t2 = rdtsc(); 400 400 401 401 shift = 5; 402 402 if (unlikely(loop >> shift == 0)) {
+4 -4
arch/x86/kernel/apic/apic.c
··· 457 457 { 458 458 u64 tsc; 459 459 460 - tsc = native_read_tsc(); 460 + tsc = rdtsc(); 461 461 wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR)); 462 462 return 0; 463 463 } ··· 592 592 unsigned long pm = acpi_pm_read_early(); 593 593 594 594 if (cpu_has_tsc) 595 - tsc = native_read_tsc(); 595 + tsc = rdtsc(); 596 596 597 597 switch (lapic_cal_loops++) { 598 598 case 0: ··· 1209 1209 long long max_loops = cpu_khz ? cpu_khz : 1000000; 1210 1210 1211 1211 if (cpu_has_tsc) 1212 - tsc = native_read_tsc(); 1212 + tsc = rdtsc(); 1213 1213 1214 1214 if (disable_apic) { 1215 1215 disable_ioapic_support(); ··· 1293 1293 } 1294 1294 if (queued) { 1295 1295 if (cpu_has_tsc && cpu_khz) { 1296 - ntsc = native_read_tsc(); 1296 + ntsc = rdtsc(); 1297 1297 max_loops = (cpu_khz << 10) - (ntsc - tsc); 1298 1298 } else 1299 1299 max_loops--;
+2 -2
arch/x86/kernel/cpu/amd.c
··· 125 125 126 126 n = K6_BUG_LOOP; 127 127 f_vide = vide; 128 - d = native_read_tsc(); 128 + d = rdtsc(); 129 129 while (n--) 130 130 f_vide(); 131 - d2 = native_read_tsc(); 131 + d2 = rdtsc(); 132 132 d = d2-d; 133 133 134 134 if (d > 20*K6_BUG_LOOP)
+2 -2
arch/x86/kernel/cpu/mcheck/mce.c
··· 125 125 { 126 126 memset(m, 0, sizeof(struct mce)); 127 127 m->cpu = m->extcpu = smp_processor_id(); 128 - m->tsc = native_read_tsc(); 128 + m->tsc = rdtsc(); 129 129 /* We hope get_seconds stays lockless */ 130 130 m->time = get_seconds(); 131 131 m->cpuvendor = boot_cpu_data.x86_vendor; ··· 1784 1784 { 1785 1785 unsigned long *cpu_tsc = (unsigned long *)data; 1786 1786 1787 - cpu_tsc[smp_processor_id()] = native_read_tsc(); 1787 + cpu_tsc[smp_processor_id()] = rdtsc(); 1788 1788 } 1789 1789 1790 1790 static int mce_apei_read_done;
+1 -1
arch/x86/kernel/espfix_64.c
··· 110 110 */ 111 111 if (!arch_get_random_long(&rand)) { 112 112 /* The constant is an arbitrary large prime */ 113 - rand = native_read_tsc(); 113 + rand = rdtsc(); 114 114 rand *= 0xc345c6b72fd16123UL; 115 115 } 116 116
+2 -2
arch/x86/kernel/hpet.c
··· 735 735 736 736 /* Verify whether hpet counter works */ 737 737 t1 = hpet_readl(HPET_COUNTER); 738 - start = native_read_tsc(); 738 + start = rdtsc(); 739 739 740 740 /* 741 741 * We don't know the TSC frequency yet, but waiting for ··· 745 745 */ 746 746 do { 747 747 rep_nop(); 748 - now = native_read_tsc(); 748 + now = rdtsc(); 749 749 } while ((now - start) < 200000UL); 750 750 751 751 if (t1 == hpet_readl(HPET_COUNTER)) {
+1 -1
arch/x86/kernel/trace_clock.c
··· 15 15 u64 ret; 16 16 17 17 rdtsc_barrier(); 18 - ret = native_read_tsc(); 18 + ret = rdtsc(); 19 19 20 20 return ret; 21 21 }
+2 -2
arch/x86/kernel/tsc.c
··· 248 248 249 249 data = cyc2ns_write_begin(cpu); 250 250 251 - tsc_now = native_read_tsc(); 251 + tsc_now = rdtsc(); 252 252 ns_now = cycles_2_ns(tsc_now); 253 253 254 254 /* ··· 290 290 } 291 291 292 292 /* read the Time Stamp Counter: */ 293 - tsc_now = native_read_tsc(); 293 + tsc_now = rdtsc(); 294 294 295 295 /* return the value in ns */ 296 296 return cycles_2_ns(tsc_now);
+2 -2
arch/x86/kvm/lapic.c
··· 1172 1172 1173 1173 tsc_deadline = apic->lapic_timer.expired_tscdeadline; 1174 1174 apic->lapic_timer.expired_tscdeadline = 0; 1175 - guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc()); 1175 + guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, rdtsc()); 1176 1176 trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline); 1177 1177 1178 1178 /* __delay is delay_tsc whenever the hardware has TSC, thus always. */ ··· 1240 1240 local_irq_save(flags); 1241 1241 1242 1242 now = apic->lapic_timer.timer.base->get_time(); 1243 - guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc()); 1243 + guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, rdtsc()); 1244 1244 if (likely(tscdeadline > guest_tsc)) { 1245 1245 ns = (tscdeadline - guest_tsc) * 1000000ULL; 1246 1246 do_div(ns, this_tsc_khz);
+2 -2
arch/x86/kvm/svm.c
··· 1080 1080 { 1081 1081 u64 tsc; 1082 1082 1083 - tsc = svm_scale_tsc(vcpu, native_read_tsc()); 1083 + tsc = svm_scale_tsc(vcpu, rdtsc()); 1084 1084 1085 1085 return target_tsc - tsc; 1086 1086 } ··· 3079 3079 switch (msr_info->index) { 3080 3080 case MSR_IA32_TSC: { 3081 3081 msr_info->data = svm->vmcb->control.tsc_offset + 3082 - svm_scale_tsc(vcpu, native_read_tsc()); 3082 + svm_scale_tsc(vcpu, rdtsc()); 3083 3083 3084 3084 break; 3085 3085 }
+2 -2
arch/x86/kvm/vmx.c
··· 2236 2236 { 2237 2237 u64 host_tsc, tsc_offset; 2238 2238 2239 - host_tsc = native_read_tsc(); 2239 + host_tsc = rdtsc(); 2240 2240 tsc_offset = vmcs_read64(TSC_OFFSET); 2241 2241 return host_tsc + tsc_offset; 2242 2242 } ··· 2317 2317 2318 2318 static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) 2319 2319 { 2320 - return target_tsc - native_read_tsc(); 2320 + return target_tsc - rdtsc(); 2321 2321 } 2322 2322 2323 2323 static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
+6 -6
arch/x86/kvm/x86.c
··· 1455 1455 * but no one has ever seen it happen. 1456 1456 */ 1457 1457 rdtsc_barrier(); 1458 - ret = (cycle_t)native_read_tsc(); 1458 + ret = (cycle_t)rdtsc(); 1459 1459 1460 1460 last = pvclock_gtod_data.clock.cycle_last; 1461 1461 ··· 1646 1646 return 1; 1647 1647 } 1648 1648 if (!use_master_clock) { 1649 - host_tsc = native_read_tsc(); 1649 + host_tsc = rdtsc(); 1650 1650 kernel_ns = get_kernel_ns(); 1651 1651 } 1652 1652 ··· 2810 2810 2811 2811 if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) { 2812 2812 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : 2813 - native_read_tsc() - vcpu->arch.last_host_tsc; 2813 + rdtsc() - vcpu->arch.last_host_tsc; 2814 2814 if (tsc_delta < 0) 2815 2815 mark_tsc_unstable("KVM discovered backwards TSC"); 2816 2816 if (check_tsc_unstable()) { ··· 2838 2838 { 2839 2839 kvm_x86_ops->vcpu_put(vcpu); 2840 2840 kvm_put_guest_fpu(vcpu); 2841 - vcpu->arch.last_host_tsc = native_read_tsc(); 2841 + vcpu->arch.last_host_tsc = rdtsc(); 2842 2842 } 2843 2843 2844 2844 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, ··· 6623 6623 hw_breakpoint_restore(); 6624 6624 6625 6625 vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, 6626 - native_read_tsc()); 6626 + rdtsc()); 6627 6627 6628 6628 vcpu->mode = OUTSIDE_GUEST_MODE; 6629 6629 smp_wmb(); ··· 7437 7437 if (ret != 0) 7438 7438 return ret; 7439 7439 7440 - local_tsc = native_read_tsc(); 7440 + local_tsc = rdtsc(); 7441 7441 stable = !check_tsc_unstable(); 7442 7442 list_for_each_entry(kvm, &vm_list, vm_list) { 7443 7443 kvm_for_each_vcpu(i, vcpu, kvm) {
+4 -4
arch/x86/lib/delay.c
··· 55 55 preempt_disable(); 56 56 cpu = smp_processor_id(); 57 57 rdtsc_barrier(); 58 - bclock = native_read_tsc(); 58 + bclock = rdtsc(); 59 59 for (;;) { 60 60 rdtsc_barrier(); 61 - now = native_read_tsc(); 61 + now = rdtsc(); 62 62 if ((now - bclock) >= loops) 63 63 break; 64 64 ··· 80 80 loops -= (now - bclock); 81 81 cpu = smp_processor_id(); 82 82 rdtsc_barrier(); 83 - bclock = native_read_tsc(); 83 + bclock = rdtsc(); 84 84 } 85 85 } 86 86 preempt_enable(); ··· 100 100 int read_current_timer(unsigned long *timer_val) 101 101 { 102 102 if (delay_fn == delay_tsc) { 103 - *timer_val = native_read_tsc(); 103 + *timer_val = rdtsc(); 104 104 return 0; 105 105 } 106 106 return -1;
+1 -1
drivers/cpufreq/intel_pstate.c
··· 765 765 local_irq_save(flags); 766 766 rdmsrl(MSR_IA32_APERF, aperf); 767 767 rdmsrl(MSR_IA32_MPERF, mperf); 768 - tsc = native_read_tsc(); 768 + tsc = rdtsc(); 769 769 local_irq_restore(flags); 770 770 771 771 cpu->last_sample_time = cpu->sample.time;
+2 -2
drivers/input/gameport/gameport.c
··· 149 149 150 150 for(i = 0; i < 50; i++) { 151 151 local_irq_save(flags); 152 - t1 = native_read_tsc(); 152 + t1 = rdtsc(); 153 153 for (t = 0; t < 50; t++) gameport_read(gameport); 154 - t2 = native_read_tsc(); 154 + t2 = rdtsc(); 155 155 local_irq_restore(flags); 156 156 udelay(i * 10); 157 157 if (t2 - t1 < tx) tx = t2 - t1;
+2 -2
drivers/input/joystick/analog.c
··· 143 143 144 144 #include <linux/i8253.h> 145 145 146 - #define GET_TIME(x) do { if (cpu_has_tsc) x = (unsigned int)native_read_tsc(); else x = get_time_pit(); } while (0) 146 + #define GET_TIME(x) do { if (cpu_has_tsc) x = (unsigned int)rdtsc(); else x = get_time_pit(); } while (0) 147 147 #define DELTA(x,y) (cpu_has_tsc ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? PIT_TICK_RATE / HZ : 0))) 148 148 #define TIME_NAME (cpu_has_tsc?"TSC":"PIT") 149 149 static unsigned int get_time_pit(void) ··· 160 160 return count; 161 161 } 162 162 #elif defined(__x86_64__) 163 - #define GET_TIME(x) do { x = (unsigned int)native_read_tsc(); } while (0) 163 + #define GET_TIME(x) do { x = (unsigned int)rdtsc(); } while (0) 164 164 #define DELTA(x,y) ((y)-(x)) 165 165 #define TIME_NAME "TSC" 166 166 #elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_TILE)
+1 -1
drivers/net/hamradio/baycom_epp.c
··· 638 638 #define GETTICK(x) \ 639 639 ({ \ 640 640 if (cpu_has_tsc) \ 641 - x = (unsigned int)native_read_tsc(); \ 641 + x = (unsigned int)rdtsc(); \ 642 642 }) 643 643 #else /* __i386__ */ 644 644 #define GETTICK(x)
+2 -2
drivers/thermal/intel_powerclamp.c
··· 340 340 341 341 /* check result for the last window */ 342 342 msr_now = pkg_state_counter(); 343 - tsc_now = native_read_tsc(); 343 + tsc_now = rdtsc(); 344 344 345 345 /* calculate pkg cstate vs tsc ratio */ 346 346 if (!msr_last || !tsc_last) ··· 482 482 u64 val64; 483 483 484 484 msr_now = pkg_state_counter(); 485 - tsc_now = native_read_tsc(); 485 + tsc_now = rdtsc(); 486 486 jiffies_now = jiffies; 487 487 488 488 /* calculate pkg cstate vs tsc ratio */
+2 -2
tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c
··· 81 81 82 82 printk(KERN_DEBUG "start--> \n"); 83 83 then = read_pmtmr(); 84 - then_tsc = native_read_tsc(); 84 + then_tsc = rdtsc(); 85 85 for (i=0;i<20;i++) { 86 86 mdelay(100); 87 87 now = read_pmtmr(); 88 - now_tsc = native_read_tsc(); 88 + now_tsc = rdtsc(); 89 89 diff = (now - then) & 0xFFFFFF; 90 90 diff_tsc = now_tsc - then_tsc; 91 91 printk(KERN_DEBUG "t1: %08u t2: %08u diff_pmtmr: %08u diff_tsc: %016llu\n", then, now, diff, diff_tsc);