Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/tsc: Provide ART base clock information for TSC

The core code provides a new mechanism to allow conversion between ART and
TSC. This allows to replace the x86 specific ART/TSC conversion functions.

Prepare for removal by filling in the base clock conversion information for
ART and associating the base clock to the TSC clocksource.

The existing conversion functions will be removed once the usage sites are
converted over to the new model.

[ tglx: Massaged change log ]

Co-developed-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Co-developed-by: Christopher S. Hall <christopher.s.hall@intel.com>
Signed-off-by: Christopher S. Hall <christopher.s.hall@intel.com>
Signed-off-by: Lakshmi Sowjanya D <lakshmi.sowjanya.d@intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/r/20240513103813.5666-3-lakshmi.sowjanya.d@intel.com

authored by

Lakshmi Sowjanya D and committed by
Thomas Gleixner
3a52886c 6b2e2997

+25 -18
+24 -18
arch/x86/kernel/tsc.c
··· 50 50 51 51 static int __read_mostly tsc_force_recalibrate; 52 52 53 - static u32 art_to_tsc_numerator; 54 - static u32 art_to_tsc_denominator; 55 - static u64 art_to_tsc_offset; 53 + static struct clocksource_base art_base_clk = { 54 + .id = CSID_X86_ART, 55 + }; 56 56 static bool have_art; 57 57 58 58 struct cyc2ns { ··· 1074 1074 */ 1075 1075 static void __init detect_art(void) 1076 1076 { 1077 - unsigned int unused[2]; 1077 + unsigned int unused; 1078 1078 1079 1079 if (boot_cpu_data.cpuid_level < ART_CPUID_LEAF) 1080 1080 return; ··· 1089 1089 tsc_async_resets) 1090 1090 return; 1091 1091 1092 - cpuid(ART_CPUID_LEAF, &art_to_tsc_denominator, 1093 - &art_to_tsc_numerator, unused, unused+1); 1092 + cpuid(ART_CPUID_LEAF, &art_base_clk.denominator, 1093 + &art_base_clk.numerator, &art_base_clk.freq_khz, &unused); 1094 1094 1095 - if (art_to_tsc_denominator < ART_MIN_DENOMINATOR) 1095 + art_base_clk.freq_khz /= KHZ; 1096 + if (art_base_clk.denominator < ART_MIN_DENOMINATOR) 1096 1097 return; 1097 1098 1098 - rdmsrl(MSR_IA32_TSC_ADJUST, art_to_tsc_offset); 1099 + rdmsrl(MSR_IA32_TSC_ADJUST, art_base_clk.offset); 1099 1100 1100 1101 /* Make this sticky over multiple CPU init calls */ 1101 1102 setup_force_cpu_cap(X86_FEATURE_ART); ··· 1304 1303 { 1305 1304 u64 tmp, res, rem; 1306 1305 1307 - rem = do_div(art, art_to_tsc_denominator); 1306 + rem = do_div(art, art_base_clk.denominator); 1308 1307 1309 - res = art * art_to_tsc_numerator; 1310 - tmp = rem * art_to_tsc_numerator; 1308 + res = art * art_base_clk.numerator; 1309 + tmp = rem * art_base_clk.numerator; 1311 1310 1312 - do_div(tmp, art_to_tsc_denominator); 1313 - res += tmp + art_to_tsc_offset; 1311 + do_div(tmp, art_base_clk.denominator); 1312 + res += tmp + art_base_clk.offset; 1314 1313 1315 1314 return (struct system_counterval_t) { 1316 1315 .cs_id = have_art ? CSID_X86_TSC : CSID_GENERIC, ··· 1356 1355 }; 1357 1356 } 1358 1357 EXPORT_SYMBOL(convert_art_ns_to_tsc); 1359 - 1360 1358 1361 1359 static void tsc_refine_calibration_work(struct work_struct *work); 1362 1360 static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work); ··· 1458 1458 if (tsc_unstable) 1459 1459 goto unreg; 1460 1460 1461 - if (boot_cpu_has(X86_FEATURE_ART)) 1461 + if (boot_cpu_has(X86_FEATURE_ART)) { 1462 1462 have_art = true; 1463 + clocksource_tsc.base = &art_base_clk; 1464 + } 1463 1465 clocksource_register_khz(&clocksource_tsc, tsc_khz); 1464 1466 unreg: 1465 1467 clocksource_unregister(&clocksource_tsc_early); ··· 1486 1484 * the refined calibration and directly register it as a clocksource. 1487 1485 */ 1488 1486 if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) { 1489 - if (boot_cpu_has(X86_FEATURE_ART)) 1487 + if (boot_cpu_has(X86_FEATURE_ART)) { 1490 1488 have_art = true; 1489 + clocksource_tsc.base = &art_base_clk; 1490 + } 1491 1491 clocksource_register_khz(&clocksource_tsc, tsc_khz); 1492 1492 clocksource_unregister(&clocksource_tsc_early); 1493 1493 ··· 1513 1509 1514 1510 if (early) { 1515 1511 cpu_khz = x86_platform.calibrate_cpu(); 1516 - if (tsc_early_khz) 1512 + if (tsc_early_khz) { 1517 1513 tsc_khz = tsc_early_khz; 1518 - else 1514 + } else { 1519 1515 tsc_khz = x86_platform.calibrate_tsc(); 1516 + clocksource_tsc.freq_khz = tsc_khz; 1517 + } 1520 1518 } else { 1521 1519 /* We should not be here with non-native cpu calibration */ 1522 1520 WARN_ON(x86_platform.calibrate_cpu != native_calibrate_cpu);
+1
include/linux/clocksource_ids.h
··· 9 9 CSID_X86_TSC_EARLY, 10 10 CSID_X86_TSC, 11 11 CSID_X86_KVM_CLK, 12 + CSID_X86_ART, 12 13 CSID_MAX, 13 14 }; 14 15