Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/msr: Rename 'wrmsrl()' to 'wrmsrq()'

Suggested-by: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Juergen Gross <jgross@suse.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Xin Li <xin@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>

Ingo Molnar 78255eb2 c435e608

+436 -436
+2 -2
arch/x86/events/amd/brs.c
··· 187 187 /* 188 188 * Mark first entry as poisoned 189 189 */ 190 - wrmsrl(brs_to(0), BRS_POISON); 190 + wrmsrq(brs_to(0), BRS_POISON); 191 191 } 192 192 193 193 int __init amd_brs_init(void) ··· 371 371 idx = amd_brs_get_tos(&cfg); 372 372 373 373 /* Poison target of entry */ 374 - wrmsrl(brs_to(idx), BRS_POISON); 374 + wrmsrq(brs_to(idx), BRS_POISON); 375 375 } 376 376 377 377 /*
+4 -4
arch/x86/events/amd/core.c
··· 563 563 return; 564 564 565 565 /* Clear enable bits i.e. PerfCntrGlobalCtl.PerfCntrEn */ 566 - wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, 0); 566 + wrmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, 0); 567 567 568 568 /* 569 569 * Clear freeze and overflow bits i.e. PerfCntrGLobalStatus.LbrFreeze 570 570 * and PerfCntrGLobalStatus.PerfCntrOvfl 571 571 */ 572 - wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, 572 + wrmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, 573 573 GLOBAL_STATUS_LBRS_FROZEN | amd_pmu_global_cntr_mask); 574 574 } 575 575 ··· 651 651 652 652 static __always_inline void amd_pmu_set_global_ctl(u64 ctl) 653 653 { 654 - wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, ctl); 654 + wrmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, ctl); 655 655 } 656 656 657 657 static inline u64 amd_pmu_get_global_status(void) ··· 672 672 * clears the same bit in PerfCntrGlobalStatus 673 673 */ 674 674 675 - wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, status); 675 + wrmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, status); 676 676 } 677 677 678 678 static bool amd_pmu_test_overflow_topbit(int idx)
+4 -4
arch/x86/events/amd/ibs.c
··· 435 435 u64 tmp = hwc->config | config; 436 436 437 437 if (perf_ibs->fetch_count_reset_broken) 438 - wrmsrl(hwc->config_base, tmp & ~perf_ibs->enable_mask); 438 + wrmsrq(hwc->config_base, tmp & ~perf_ibs->enable_mask); 439 439 440 - wrmsrl(hwc->config_base, tmp | perf_ibs->enable_mask); 440 + wrmsrq(hwc->config_base, tmp | perf_ibs->enable_mask); 441 441 } 442 442 443 443 /* ··· 452 452 { 453 453 config &= ~perf_ibs->cnt_mask; 454 454 if (boot_cpu_data.x86 == 0x10) 455 - wrmsrl(hwc->config_base, config); 455 + wrmsrq(hwc->config_base, config); 456 456 config &= ~perf_ibs->enable_mask; 457 - wrmsrl(hwc->config_base, config); 457 + wrmsrq(hwc->config_base, config); 458 458 } 459 459 460 460 /*
+6 -6
arch/x86/events/amd/lbr.c
··· 61 61 62 62 static __always_inline void amd_pmu_lbr_set_from(unsigned int idx, u64 val) 63 63 { 64 - wrmsrl(MSR_AMD_SAMP_BR_FROM + idx * 2, val); 64 + wrmsrq(MSR_AMD_SAMP_BR_FROM + idx * 2, val); 65 65 } 66 66 67 67 static __always_inline void amd_pmu_lbr_set_to(unsigned int idx, u64 val) 68 68 { 69 - wrmsrl(MSR_AMD_SAMP_BR_FROM + idx * 2 + 1, val); 69 + wrmsrq(MSR_AMD_SAMP_BR_FROM + idx * 2 + 1, val); 70 70 } 71 71 72 72 static __always_inline u64 amd_pmu_lbr_get_from(unsigned int idx) ··· 333 333 334 334 cpuc->last_task_ctx = NULL; 335 335 cpuc->last_log_id = 0; 336 - wrmsrl(MSR_AMD64_LBR_SELECT, 0); 336 + wrmsrq(MSR_AMD64_LBR_SELECT, 0); 337 337 } 338 338 339 339 void amd_pmu_lbr_add(struct perf_event *event) ··· 396 396 /* Set hardware branch filter */ 397 397 if (cpuc->lbr_select) { 398 398 lbr_select = cpuc->lbr_sel->config & LBR_SELECT_MASK; 399 - wrmsrl(MSR_AMD64_LBR_SELECT, lbr_select); 399 + wrmsrq(MSR_AMD64_LBR_SELECT, lbr_select); 400 400 } 401 401 402 402 if (cpu_feature_enabled(X86_FEATURE_AMD_LBR_PMC_FREEZE)) { 403 403 rdmsrq(MSR_IA32_DEBUGCTLMSR, dbg_ctl); 404 - wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); 404 + wrmsrq(MSR_IA32_DEBUGCTLMSR, dbg_ctl | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); 405 405 } 406 406 407 407 rdmsrq(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg); 408 - wrmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg | DBG_EXTN_CFG_LBRV2EN); 408 + wrmsrq(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg | DBG_EXTN_CFG_LBRV2EN); 409 409 } 410 410 411 411 void amd_pmu_lbr_disable_all(void)
+5 -5
arch/x86/events/amd/uncore.c
··· 121 121 struct hw_perf_event *hwc = &event->hw; 122 122 123 123 if (flags & PERF_EF_RELOAD) 124 - wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count)); 124 + wrmsrq(hwc->event_base, (u64)local64_read(&hwc->prev_count)); 125 125 126 126 hwc->state = 0; 127 - wrmsrl(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE)); 127 + wrmsrq(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE)); 128 128 perf_event_update_userpage(event); 129 129 } 130 130 ··· 132 132 { 133 133 struct hw_perf_event *hwc = &event->hw; 134 134 135 - wrmsrl(hwc->config_base, hwc->config); 135 + wrmsrq(hwc->config_base, hwc->config); 136 136 hwc->state |= PERF_HES_STOPPED; 137 137 138 138 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { ··· 883 883 struct hw_perf_event *hwc = &event->hw; 884 884 885 885 if (flags & PERF_EF_RELOAD) 886 - wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count)); 886 + wrmsrq(hwc->event_base, (u64)local64_read(&hwc->prev_count)); 887 887 888 888 hwc->state = 0; 889 - wrmsrl(hwc->config_base, (hwc->config | AMD64_PERFMON_V2_ENABLE_UMC)); 889 + wrmsrq(hwc->config_base, (hwc->config | AMD64_PERFMON_V2_ENABLE_UMC)); 890 890 perf_event_update_userpage(event); 891 891 } 892 892
+6 -6
arch/x86/events/core.c
··· 697 697 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE)) 698 698 continue; 699 699 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; 700 - wrmsrl(x86_pmu_config_addr(idx), val); 700 + wrmsrq(x86_pmu_config_addr(idx), val); 701 701 if (is_counter_pair(hwc)) 702 - wrmsrl(x86_pmu_config_addr(idx + 1), 0); 702 + wrmsrq(x86_pmu_config_addr(idx + 1), 0); 703 703 } 704 704 } 705 705 ··· 1420 1420 */ 1421 1421 local64_set(&hwc->prev_count, (u64)-left); 1422 1422 1423 - wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); 1423 + wrmsrq(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); 1424 1424 1425 1425 /* 1426 1426 * Sign extend the Merge event counter's upper 16 bits since 1427 1427 * we currently declare a 48-bit counter width 1428 1428 */ 1429 1429 if (is_counter_pair(hwc)) 1430 - wrmsrl(x86_pmu_event_addr(idx + 1), 0xffff); 1430 + wrmsrq(x86_pmu_event_addr(idx + 1), 0xffff); 1431 1431 1432 1432 perf_event_update_userpage(event); 1433 1433 ··· 2496 2496 if (!test_bit(i - INTEL_PMC_IDX_FIXED, hybrid(cpuc->pmu, fixed_cntr_mask))) 2497 2497 continue; 2498 2498 2499 - wrmsrl(x86_pmu_fixed_ctr_addr(i - INTEL_PMC_IDX_FIXED), 0); 2499 + wrmsrq(x86_pmu_fixed_ctr_addr(i - INTEL_PMC_IDX_FIXED), 0); 2500 2500 } else { 2501 - wrmsrl(x86_pmu_event_addr(i), 0); 2501 + wrmsrq(x86_pmu_event_addr(i), 0); 2502 2502 } 2503 2503 } 2504 2504
+22 -22
arch/x86/events/intel/core.c
··· 2285 2285 { 2286 2286 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2287 2287 2288 - wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); 2288 + wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, 0); 2289 2289 2290 2290 if (bts && test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) 2291 2291 intel_pmu_disable_bts(); ··· 2306 2306 intel_pmu_lbr_enable_all(pmi); 2307 2307 2308 2308 if (cpuc->fixed_ctrl_val != cpuc->active_fixed_ctrl_val) { 2309 - wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, cpuc->fixed_ctrl_val); 2309 + wrmsrq(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, cpuc->fixed_ctrl_val); 2310 2310 cpuc->active_fixed_ctrl_val = cpuc->fixed_ctrl_val; 2311 2311 } 2312 2312 2313 - wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 2313 + wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, 2314 2314 intel_ctrl & ~cpuc->intel_ctrl_guest_mask); 2315 2315 2316 2316 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { ··· 2426 2426 } 2427 2427 2428 2428 for (i = 0; i < 4; i++) { 2429 - wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]); 2430 - wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0); 2429 + wrmsrq(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]); 2430 + wrmsrq(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0); 2431 2431 } 2432 2432 2433 - wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf); 2434 - wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0); 2433 + wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, 0xf); 2434 + wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, 0x0); 2435 2435 2436 2436 for (i = 0; i < 4; i++) { 2437 2437 event = cpuc->events[i]; ··· 2441 2441 __x86_pmu_enable_event(&event->hw, 2442 2442 ARCH_PERFMON_EVENTSEL_ENABLE); 2443 2443 } else 2444 - wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0); 2444 + wrmsrq(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0); 2445 2445 } 2446 2446 } 2447 2447 ··· 2458 2458 2459 2459 if (cpuc->tfa_shadow != val) { 2460 2460 cpuc->tfa_shadow = val; 2461 - wrmsrl(MSR_TSX_FORCE_ABORT, val); 2461 + wrmsrq(MSR_TSX_FORCE_ABORT, val); 2462 2462 } 2463 2463 } 2464 2464 ··· 2496 2496 2497 2497 static inline void intel_pmu_ack_status(u64 ack) 2498 2498 { 2499 - wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); 2499 + wrmsrq(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); 2500 2500 } 2501 2501 2502 2502 static inline bool event_is_checkpointed(struct perf_event *event) ··· 2619 2619 * Don't need to clear them again. 2620 2620 */ 2621 2621 if (left == x86_pmu.max_period) { 2622 - wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0); 2623 - wrmsrl(MSR_PERF_METRICS, 0); 2622 + wrmsrq(MSR_CORE_PERF_FIXED_CTR3, 0); 2623 + wrmsrq(MSR_PERF_METRICS, 0); 2624 2624 hwc->saved_slots = 0; 2625 2625 hwc->saved_metric = 0; 2626 2626 } 2627 2627 2628 2628 if ((hwc->saved_slots) && is_slots_event(event)) { 2629 - wrmsrl(MSR_CORE_PERF_FIXED_CTR3, hwc->saved_slots); 2630 - wrmsrl(MSR_PERF_METRICS, hwc->saved_metric); 2629 + wrmsrq(MSR_CORE_PERF_FIXED_CTR3, hwc->saved_slots); 2630 + wrmsrq(MSR_PERF_METRICS, hwc->saved_metric); 2631 2631 } 2632 2632 2633 2633 perf_event_update_userpage(event); ··· 2773 2773 2774 2774 if (reset) { 2775 2775 /* The fixed counter 3 has to be written before the PERF_METRICS. */ 2776 - wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0); 2777 - wrmsrl(MSR_PERF_METRICS, 0); 2776 + wrmsrq(MSR_CORE_PERF_FIXED_CTR3, 0); 2777 + wrmsrq(MSR_PERF_METRICS, 0); 2778 2778 if (event) 2779 2779 update_saved_topdown_regs(event, 0, 0, metric_end); 2780 2780 } ··· 2937 2937 */ 2938 2938 if (unlikely(event_is_checkpointed(event))) { 2939 2939 /* No race with NMIs because the counter should not be armed */ 2940 - wrmsrl(event->hw.event_base, 0); 2940 + wrmsrq(event->hw.event_base, 0); 2941 2941 local64_set(&event->hw.prev_count, 0); 2942 2942 } 2943 2943 return static_call(x86_pmu_set_period)(event); ··· 2991 2991 /* Ack all overflows and disable fixed counters */ 2992 2992 if (x86_pmu.version >= 2) { 2993 2993 intel_pmu_ack_status(intel_pmu_get_status()); 2994 - wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); 2994 + wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, 0); 2995 2995 } 2996 2996 2997 2997 /* Reset LBRs and LBR freezing */ ··· 3103 3103 * Update the MSR if pebs_enabled is changed. 3104 3104 */ 3105 3105 if (pebs_enabled != cpuc->pebs_enabled) 3106 - wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); 3106 + wrmsrq(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); 3107 3107 } 3108 3108 3109 3109 /* ··· 5614 5614 return false; 5615 5615 5616 5616 /* 5617 - * Only change the bits which can be updated by wrmsrl. 5617 + * Only change the bits which can be updated by wrmsrq. 5618 5618 */ 5619 5619 val_tmp = val_old ^ mask; 5620 5620 ··· 5626 5626 return false; 5627 5627 5628 5628 /* 5629 - * Quirk only affects validation in wrmsr(), so wrmsrl()'s value 5629 + * Quirk only affects validation in wrmsr(), so wrmsrq()'s value 5630 5630 * should equal rdmsrq()'s even with the quirk. 5631 5631 */ 5632 5632 if (val_new != val_tmp) ··· 5638 5638 /* Here it's sure that the MSR can be safely accessed. 5639 5639 * Restore the old value and return. 5640 5640 */ 5641 - wrmsrl(msr, val_old); 5641 + wrmsrq(msr, val_old); 5642 5642 5643 5643 return true; 5644 5644 }
+5 -5
arch/x86/events/intel/ds.c
··· 1515 1515 else 1516 1516 value = ds->pebs_event_reset[MAX_PEBS_EVENTS + idx]; 1517 1517 } 1518 - wrmsrl(base + idx, value); 1518 + wrmsrq(base + idx, value); 1519 1519 } 1520 1520 1521 1521 static inline void intel_pmu_drain_large_pebs(struct cpu_hw_events *cpuc) ··· 1552 1552 */ 1553 1553 intel_pmu_drain_pebs_buffer(); 1554 1554 adaptive_pebs_record_size_update(); 1555 - wrmsrl(MSR_PEBS_DATA_CFG, pebs_data_cfg); 1555 + wrmsrq(MSR_PEBS_DATA_CFG, pebs_data_cfg); 1556 1556 cpuc->active_pebs_data_cfg = pebs_data_cfg; 1557 1557 } 1558 1558 } ··· 1615 1615 intel_pmu_pebs_via_pt_disable(event); 1616 1616 1617 1617 if (cpuc->enabled) 1618 - wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); 1618 + wrmsrq(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); 1619 1619 1620 1620 hwc->config |= ARCH_PERFMON_EVENTSEL_INT; 1621 1621 } ··· 1625 1625 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1626 1626 1627 1627 if (cpuc->pebs_enabled) 1628 - wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); 1628 + wrmsrq(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); 1629 1629 } 1630 1630 1631 1631 void intel_pmu_pebs_disable_all(void) ··· 2771 2771 if (!x86_pmu.bts && !x86_pmu.pebs) 2772 2772 return; 2773 2773 2774 - wrmsrl(MSR_IA32_DS_AREA, (unsigned long)ds); 2774 + wrmsrq(MSR_IA32_DS_AREA, (unsigned long)ds); 2775 2775 }
+3 -3
arch/x86/events/intel/knc.c
··· 161 161 162 162 rdmsrq(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val); 163 163 val &= ~(KNC_ENABLE_COUNTER0|KNC_ENABLE_COUNTER1); 164 - wrmsrl(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val); 164 + wrmsrq(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val); 165 165 } 166 166 167 167 static void knc_pmu_enable_all(int added) ··· 170 170 171 171 rdmsrq(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val); 172 172 val |= (KNC_ENABLE_COUNTER0|KNC_ENABLE_COUNTER1); 173 - wrmsrl(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val); 173 + wrmsrq(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val); 174 174 } 175 175 176 176 static inline void ··· 207 207 208 208 static inline void knc_pmu_ack_status(u64 ack) 209 209 { 210 - wrmsrl(MSR_KNC_IA32_PERF_GLOBAL_OVF_CONTROL, ack); 210 + wrmsrq(MSR_KNC_IA32_PERF_GLOBAL_OVF_CONTROL, ack); 211 211 } 212 212 213 213 static int knc_pmu_handle_irq(struct pt_regs *regs)
+14 -14
arch/x86/events/intel/lbr.c
··· 137 137 if (cpuc->lbr_sel) 138 138 lbr_select = cpuc->lbr_sel->config & x86_pmu.lbr_sel_mask; 139 139 if (!static_cpu_has(X86_FEATURE_ARCH_LBR) && !pmi && cpuc->lbr_sel) 140 - wrmsrl(MSR_LBR_SELECT, lbr_select); 140 + wrmsrq(MSR_LBR_SELECT, lbr_select); 141 141 142 142 rdmsrq(MSR_IA32_DEBUGCTLMSR, debugctl); 143 143 orig_debugctl = debugctl; ··· 155 155 debugctl |= DEBUGCTLMSR_FREEZE_LBRS_ON_PMI; 156 156 157 157 if (orig_debugctl != debugctl) 158 - wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); 158 + wrmsrq(MSR_IA32_DEBUGCTLMSR, debugctl); 159 159 160 160 if (static_cpu_has(X86_FEATURE_ARCH_LBR)) 161 - wrmsrl(MSR_ARCH_LBR_CTL, lbr_select | ARCH_LBR_CTL_LBREN); 161 + wrmsrq(MSR_ARCH_LBR_CTL, lbr_select | ARCH_LBR_CTL_LBREN); 162 162 } 163 163 164 164 void intel_pmu_lbr_reset_32(void) ··· 166 166 int i; 167 167 168 168 for (i = 0; i < x86_pmu.lbr_nr; i++) 169 - wrmsrl(x86_pmu.lbr_from + i, 0); 169 + wrmsrq(x86_pmu.lbr_from + i, 0); 170 170 } 171 171 172 172 void intel_pmu_lbr_reset_64(void) ··· 174 174 int i; 175 175 176 176 for (i = 0; i < x86_pmu.lbr_nr; i++) { 177 - wrmsrl(x86_pmu.lbr_from + i, 0); 178 - wrmsrl(x86_pmu.lbr_to + i, 0); 177 + wrmsrq(x86_pmu.lbr_from + i, 0); 178 + wrmsrq(x86_pmu.lbr_to + i, 0); 179 179 if (x86_pmu.lbr_has_info) 180 - wrmsrl(x86_pmu.lbr_info + i, 0); 180 + wrmsrq(x86_pmu.lbr_info + i, 0); 181 181 } 182 182 } 183 183 184 184 static void intel_pmu_arch_lbr_reset(void) 185 185 { 186 186 /* Write to ARCH_LBR_DEPTH MSR, all LBR entries are reset to 0 */ 187 - wrmsrl(MSR_ARCH_LBR_DEPTH, x86_pmu.lbr_nr); 187 + wrmsrq(MSR_ARCH_LBR_DEPTH, x86_pmu.lbr_nr); 188 188 } 189 189 190 190 void intel_pmu_lbr_reset(void) ··· 199 199 cpuc->last_task_ctx = NULL; 200 200 cpuc->last_log_id = 0; 201 201 if (!static_cpu_has(X86_FEATURE_ARCH_LBR) && cpuc->lbr_select) 202 - wrmsrl(MSR_LBR_SELECT, 0); 202 + wrmsrq(MSR_LBR_SELECT, 0); 203 203 } 204 204 205 205 /* ··· 282 282 static __always_inline void wrlbr_from(unsigned int idx, u64 val) 283 283 { 284 284 val = lbr_from_signext_quirk_wr(val); 285 - wrmsrl(x86_pmu.lbr_from + idx, val); 285 + wrmsrq(x86_pmu.lbr_from + idx, val); 286 286 } 287 287 288 288 static __always_inline void wrlbr_to(unsigned int idx, u64 val) 289 289 { 290 - wrmsrl(x86_pmu.lbr_to + idx, val); 290 + wrmsrq(x86_pmu.lbr_to + idx, val); 291 291 } 292 292 293 293 static __always_inline void wrlbr_info(unsigned int idx, u64 val) 294 294 { 295 - wrmsrl(x86_pmu.lbr_info + idx, val); 295 + wrmsrq(x86_pmu.lbr_info + idx, val); 296 296 } 297 297 298 298 static __always_inline u64 rdlbr_from(unsigned int idx, struct lbr_entry *lbr) ··· 380 380 wrlbr_info(lbr_idx, 0); 381 381 } 382 382 383 - wrmsrl(x86_pmu.lbr_tos, tos); 383 + wrmsrq(x86_pmu.lbr_tos, tos); 384 384 385 385 if (cpuc->lbr_select) 386 - wrmsrl(MSR_LBR_SELECT, task_ctx->lbr_sel); 386 + wrmsrq(MSR_LBR_SELECT, task_ctx->lbr_sel); 387 387 } 388 388 389 389 static void intel_pmu_arch_lbr_restore(void *ctx)
+2 -2
arch/x86/events/intel/p4.c
··· 861 861 /* an official way for overflow indication */ 862 862 rdmsrq(hwc->config_base, v); 863 863 if (v & P4_CCCR_OVF) { 864 - wrmsrl(hwc->config_base, v & ~P4_CCCR_OVF); 864 + wrmsrq(hwc->config_base, v & ~P4_CCCR_OVF); 865 865 return 1; 866 866 } 867 867 ··· 1024 1024 * 1025 1025 * the former idea is taken from OProfile code 1026 1026 */ 1027 - wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); 1027 + wrmsrq(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); 1028 1028 } 1029 1029 1030 1030 return ret;
+2 -2
arch/x86/events/intel/p6.c
··· 144 144 /* p6 only has one enable register */ 145 145 rdmsrq(MSR_P6_EVNTSEL0, val); 146 146 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; 147 - wrmsrl(MSR_P6_EVNTSEL0, val); 147 + wrmsrq(MSR_P6_EVNTSEL0, val); 148 148 } 149 149 150 150 static void p6_pmu_enable_all(int added) ··· 154 154 /* p6 only has one enable register */ 155 155 rdmsrq(MSR_P6_EVNTSEL0, val); 156 156 val |= ARCH_PERFMON_EVENTSEL_ENABLE; 157 - wrmsrl(MSR_P6_EVNTSEL0, val); 157 + wrmsrq(MSR_P6_EVNTSEL0, val); 158 158 } 159 159 160 160 static inline void
+9 -9
arch/x86/events/intel/pt.c
··· 426 426 if (READ_ONCE(pt->vmx_on)) 427 427 perf_aux_output_flag(&pt->handle, PERF_AUX_FLAG_PARTIAL); 428 428 else 429 - wrmsrl(MSR_IA32_RTIT_CTL, ctl); 429 + wrmsrq(MSR_IA32_RTIT_CTL, ctl); 430 430 431 431 WRITE_ONCE(event->hw.aux_config, ctl); 432 432 } ··· 485 485 486 486 /* avoid redundant msr writes */ 487 487 if (pt->filters.filter[range].msr_a != filter->msr_a) { 488 - wrmsrl(pt_address_ranges[range].msr_a, filter->msr_a); 488 + wrmsrq(pt_address_ranges[range].msr_a, filter->msr_a); 489 489 pt->filters.filter[range].msr_a = filter->msr_a; 490 490 } 491 491 492 492 if (pt->filters.filter[range].msr_b != filter->msr_b) { 493 - wrmsrl(pt_address_ranges[range].msr_b, filter->msr_b); 493 + wrmsrq(pt_address_ranges[range].msr_b, filter->msr_b); 494 494 pt->filters.filter[range].msr_b = filter->msr_b; 495 495 } 496 496 ··· 509 509 /* First round: clear STATUS, in particular the PSB byte counter. */ 510 510 if (!event->hw.aux_config) { 511 511 perf_event_itrace_started(event); 512 - wrmsrl(MSR_IA32_RTIT_STATUS, 0); 512 + wrmsrq(MSR_IA32_RTIT_STATUS, 0); 513 513 } 514 514 515 515 reg = pt_config_filters(event); ··· 569 569 570 570 ctl &= ~RTIT_CTL_TRACEEN; 571 571 if (!READ_ONCE(pt->vmx_on)) 572 - wrmsrl(MSR_IA32_RTIT_CTL, ctl); 572 + wrmsrq(MSR_IA32_RTIT_CTL, ctl); 573 573 574 574 WRITE_ONCE(event->hw.aux_config, ctl); 575 575 ··· 658 658 reg = virt_to_phys(base); 659 659 if (pt->output_base != reg) { 660 660 pt->output_base = reg; 661 - wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, reg); 661 + wrmsrq(MSR_IA32_RTIT_OUTPUT_BASE, reg); 662 662 } 663 663 664 664 reg = 0x7f | (mask << 7) | ((u64)buf->output_off << 32); 665 665 if (pt->output_mask != reg) { 666 666 pt->output_mask = reg; 667 - wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, reg); 667 + wrmsrq(MSR_IA32_RTIT_OUTPUT_MASK, reg); 668 668 } 669 669 } 670 670 ··· 970 970 if (advance) 971 971 pt_buffer_advance(buf); 972 972 973 - wrmsrl(MSR_IA32_RTIT_STATUS, status); 973 + wrmsrq(MSR_IA32_RTIT_STATUS, status); 974 974 } 975 975 976 976 /** ··· 1585 1585 1586 1586 /* Turn PTs back on */ 1587 1587 if (!on && event) 1588 - wrmsrl(MSR_IA32_RTIT_CTL, event->hw.aux_config); 1588 + wrmsrq(MSR_IA32_RTIT_CTL, event->hw.aux_config); 1589 1589 1590 1590 local_irq_restore(flags); 1591 1591 }
+5 -5
arch/x86/events/intel/uncore_discovery.c
··· 441 441 442 442 void intel_generic_uncore_msr_init_box(struct intel_uncore_box *box) 443 443 { 444 - wrmsrl(intel_generic_uncore_box_ctl(box), GENERIC_PMON_BOX_CTL_INT); 444 + wrmsrq(intel_generic_uncore_box_ctl(box), GENERIC_PMON_BOX_CTL_INT); 445 445 } 446 446 447 447 void intel_generic_uncore_msr_disable_box(struct intel_uncore_box *box) 448 448 { 449 - wrmsrl(intel_generic_uncore_box_ctl(box), GENERIC_PMON_BOX_CTL_FRZ); 449 + wrmsrq(intel_generic_uncore_box_ctl(box), GENERIC_PMON_BOX_CTL_FRZ); 450 450 } 451 451 452 452 void intel_generic_uncore_msr_enable_box(struct intel_uncore_box *box) 453 453 { 454 - wrmsrl(intel_generic_uncore_box_ctl(box), 0); 454 + wrmsrq(intel_generic_uncore_box_ctl(box), 0); 455 455 } 456 456 457 457 static void intel_generic_uncore_msr_enable_event(struct intel_uncore_box *box, ··· 459 459 { 460 460 struct hw_perf_event *hwc = &event->hw; 461 461 462 - wrmsrl(hwc->config_base, hwc->config); 462 + wrmsrq(hwc->config_base, hwc->config); 463 463 } 464 464 465 465 static void intel_generic_uncore_msr_disable_event(struct intel_uncore_box *box, ··· 467 467 { 468 468 struct hw_perf_event *hwc = &event->hw; 469 469 470 - wrmsrl(hwc->config_base, 0); 470 + wrmsrq(hwc->config_base, 0); 471 471 } 472 472 473 473 static struct intel_uncore_ops generic_uncore_msr_ops = {
+33 -33
arch/x86/events/intel/uncore_nhmex.c
··· 200 200 201 201 static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box) 202 202 { 203 - wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL); 203 + wrmsrq(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL); 204 204 } 205 205 206 206 static void nhmex_uncore_msr_exit_box(struct intel_uncore_box *box) 207 207 { 208 - wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, 0); 208 + wrmsrq(NHMEX_U_MSR_PMON_GLOBAL_CTL, 0); 209 209 } 210 210 211 211 static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box) ··· 219 219 /* WBox has a fixed counter */ 220 220 if (uncore_msr_fixed_ctl(box)) 221 221 config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN; 222 - wrmsrl(msr, config); 222 + wrmsrq(msr, config); 223 223 } 224 224 } 225 225 ··· 234 234 /* WBox has a fixed counter */ 235 235 if (uncore_msr_fixed_ctl(box)) 236 236 config |= NHMEX_W_PMON_GLOBAL_FIXED_EN; 237 - wrmsrl(msr, config); 237 + wrmsrq(msr, config); 238 238 } 239 239 } 240 240 241 241 static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) 242 242 { 243 - wrmsrl(event->hw.config_base, 0); 243 + wrmsrq(event->hw.config_base, 0); 244 244 } 245 245 246 246 static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) ··· 248 248 struct hw_perf_event *hwc = &event->hw; 249 249 250 250 if (hwc->idx == UNCORE_PMC_IDX_FIXED) 251 - wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0); 251 + wrmsrq(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0); 252 252 else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0) 253 - wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22); 253 + wrmsrq(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22); 254 254 else 255 - wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0); 255 + wrmsrq(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0); 256 256 } 257 257 258 258 #define NHMEX_UNCORE_OPS_COMMON_INIT() \ ··· 382 382 struct hw_perf_event_extra *reg2 = &hwc->branch_reg; 383 383 384 384 if (reg1->idx != EXTRA_REG_NONE) { 385 - wrmsrl(reg1->reg, reg1->config); 386 - wrmsrl(reg1->reg + 1, reg2->config); 385 + wrmsrq(reg1->reg, reg1->config); 386 + wrmsrq(reg1->reg + 1, reg2->config); 387 387 } 388 - wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 | 388 + wrmsrq(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 | 389 389 (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK)); 390 390 } 391 391 ··· 467 467 struct hw_perf_event_extra *reg2 = &hwc->branch_reg; 468 468 469 469 if (reg1->idx != EXTRA_REG_NONE) { 470 - wrmsrl(reg1->reg, 0); 471 - wrmsrl(reg1->reg + 1, reg1->config); 472 - wrmsrl(reg1->reg + 2, reg2->config); 473 - wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN); 470 + wrmsrq(reg1->reg, 0); 471 + wrmsrq(reg1->reg + 1, reg1->config); 472 + wrmsrq(reg1->reg + 2, reg2->config); 473 + wrmsrq(reg1->reg, NHMEX_S_PMON_MM_CFG_EN); 474 474 } 475 - wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22); 475 + wrmsrq(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22); 476 476 } 477 477 478 478 static struct attribute *nhmex_uncore_sbox_formats_attr[] = { ··· 842 842 843 843 idx = __BITS_VALUE(reg1->idx, 0, 8); 844 844 if (idx != 0xff) 845 - wrmsrl(__BITS_VALUE(reg1->reg, 0, 16), 845 + wrmsrq(__BITS_VALUE(reg1->reg, 0, 16), 846 846 nhmex_mbox_shared_reg_config(box, idx)); 847 847 idx = __BITS_VALUE(reg1->idx, 1, 8); 848 848 if (idx != 0xff) 849 - wrmsrl(__BITS_VALUE(reg1->reg, 1, 16), 849 + wrmsrq(__BITS_VALUE(reg1->reg, 1, 16), 850 850 nhmex_mbox_shared_reg_config(box, idx)); 851 851 852 852 if (reg2->idx != EXTRA_REG_NONE) { 853 - wrmsrl(reg2->reg, 0); 853 + wrmsrq(reg2->reg, 0); 854 854 if (reg2->config != ~0ULL) { 855 - wrmsrl(reg2->reg + 1, 855 + wrmsrq(reg2->reg + 1, 856 856 reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK); 857 - wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK & 857 + wrmsrq(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK & 858 858 (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT)); 859 - wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN); 859 + wrmsrq(reg2->reg, NHMEX_M_PMON_MM_CFG_EN); 860 860 } 861 861 } 862 862 863 - wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0); 863 + wrmsrq(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0); 864 864 } 865 865 866 866 DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3"); ··· 1121 1121 1122 1122 switch (idx % 6) { 1123 1123 case 0: 1124 - wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config); 1124 + wrmsrq(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config); 1125 1125 break; 1126 1126 case 1: 1127 - wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config); 1127 + wrmsrq(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config); 1128 1128 break; 1129 1129 case 2: 1130 1130 case 3: 1131 - wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port), 1131 + wrmsrq(NHMEX_R_MSR_PORTN_QLX_CFG(port), 1132 1132 uncore_shared_reg_config(box, 2 + (idx / 6) * 5)); 1133 1133 break; 1134 1134 case 4: 1135 - wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port), 1135 + wrmsrq(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port), 1136 1136 hwc->config >> 32); 1137 - wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config); 1138 - wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config); 1137 + wrmsrq(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config); 1138 + wrmsrq(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config); 1139 1139 break; 1140 1140 case 5: 1141 - wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port), 1141 + wrmsrq(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port), 1142 1142 hwc->config >> 32); 1143 - wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config); 1144 - wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config); 1143 + wrmsrq(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config); 1144 + wrmsrq(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config); 1145 1145 break; 1146 1146 } 1147 1147 1148 - wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 | 1148 + wrmsrq(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 | 1149 1149 (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK)); 1150 1150 } 1151 1151
+20 -20
arch/x86/events/intel/uncore_snb.c
··· 260 260 struct hw_perf_event *hwc = &event->hw; 261 261 262 262 if (hwc->idx < UNCORE_PMC_IDX_FIXED) 263 - wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); 263 + wrmsrq(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); 264 264 else 265 - wrmsrl(hwc->config_base, SNB_UNC_CTL_EN); 265 + wrmsrq(hwc->config_base, SNB_UNC_CTL_EN); 266 266 } 267 267 268 268 static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) 269 269 { 270 - wrmsrl(event->hw.config_base, 0); 270 + wrmsrq(event->hw.config_base, 0); 271 271 } 272 272 273 273 static void snb_uncore_msr_init_box(struct intel_uncore_box *box) 274 274 { 275 275 if (box->pmu->pmu_idx == 0) { 276 - wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 276 + wrmsrq(SNB_UNC_PERF_GLOBAL_CTL, 277 277 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL); 278 278 } 279 279 } 280 280 281 281 static void snb_uncore_msr_enable_box(struct intel_uncore_box *box) 282 282 { 283 - wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 283 + wrmsrq(SNB_UNC_PERF_GLOBAL_CTL, 284 284 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL); 285 285 } 286 286 287 287 static void snb_uncore_msr_exit_box(struct intel_uncore_box *box) 288 288 { 289 289 if (box->pmu->pmu_idx == 0) 290 - wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 0); 290 + wrmsrq(SNB_UNC_PERF_GLOBAL_CTL, 0); 291 291 } 292 292 293 293 static struct uncore_event_desc snb_uncore_events[] = { ··· 372 372 static void skl_uncore_msr_init_box(struct intel_uncore_box *box) 373 373 { 374 374 if (box->pmu->pmu_idx == 0) { 375 - wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 375 + wrmsrq(SKL_UNC_PERF_GLOBAL_CTL, 376 376 SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL); 377 377 } 378 378 ··· 383 383 384 384 static void skl_uncore_msr_enable_box(struct intel_uncore_box *box) 385 385 { 386 - wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 386 + wrmsrq(SKL_UNC_PERF_GLOBAL_CTL, 387 387 SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL); 388 388 } 389 389 390 390 static void skl_uncore_msr_exit_box(struct intel_uncore_box *box) 391 391 { 392 392 if (box->pmu->pmu_idx == 0) 393 - wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 0); 393 + wrmsrq(SKL_UNC_PERF_GLOBAL_CTL, 0); 394 394 } 395 395 396 396 static struct intel_uncore_ops skl_uncore_msr_ops = { ··· 525 525 static void rkl_uncore_msr_init_box(struct intel_uncore_box *box) 526 526 { 527 527 if (box->pmu->pmu_idx == 0) 528 - wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN); 528 + wrmsrq(SKL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN); 529 529 } 530 530 531 531 void tgl_uncore_cpu_init(void) ··· 541 541 static void adl_uncore_msr_init_box(struct intel_uncore_box *box) 542 542 { 543 543 if (box->pmu->pmu_idx == 0) 544 - wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN); 544 + wrmsrq(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN); 545 545 } 546 546 547 547 static void adl_uncore_msr_enable_box(struct intel_uncore_box *box) 548 548 { 549 - wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN); 549 + wrmsrq(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN); 550 550 } 551 551 552 552 static void adl_uncore_msr_disable_box(struct intel_uncore_box *box) 553 553 { 554 554 if (box->pmu->pmu_idx == 0) 555 - wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, 0); 555 + wrmsrq(ADL_UNC_PERF_GLOBAL_CTL, 0); 556 556 } 557 557 558 558 static void adl_uncore_msr_exit_box(struct intel_uncore_box *box) 559 559 { 560 560 if (box->pmu->pmu_idx == 0) 561 - wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, 0); 561 + wrmsrq(ADL_UNC_PERF_GLOBAL_CTL, 0); 562 562 } 563 563 564 564 static struct intel_uncore_ops adl_uncore_msr_ops = { ··· 691 691 692 692 static void mtl_uncore_msr_init_box(struct intel_uncore_box *box) 693 693 { 694 - wrmsrl(uncore_msr_box_ctl(box), SNB_UNC_GLOBAL_CTL_EN); 694 + wrmsrq(uncore_msr_box_ctl(box), SNB_UNC_GLOBAL_CTL_EN); 695 695 } 696 696 697 697 static struct intel_uncore_ops mtl_uncore_msr_ops = { ··· 758 758 static void lnl_uncore_msr_init_box(struct intel_uncore_box *box) 759 759 { 760 760 if (box->pmu->pmu_idx == 0) 761 - wrmsrl(LNL_UNC_MSR_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN); 761 + wrmsrq(LNL_UNC_MSR_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN); 762 762 } 763 763 764 764 static struct intel_uncore_ops lnl_uncore_msr_ops = { ··· 1306 1306 /* Nehalem uncore support */ 1307 1307 static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box) 1308 1308 { 1309 - wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0); 1309 + wrmsrq(NHM_UNC_PERF_GLOBAL_CTL, 0); 1310 1310 } 1311 1311 1312 1312 static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box) 1313 1313 { 1314 - wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC); 1314 + wrmsrq(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC); 1315 1315 } 1316 1316 1317 1317 static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) ··· 1319 1319 struct hw_perf_event *hwc = &event->hw; 1320 1320 1321 1321 if (hwc->idx < UNCORE_PMC_IDX_FIXED) 1322 - wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); 1322 + wrmsrq(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); 1323 1323 else 1324 - wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN); 1324 + wrmsrq(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN); 1325 1325 } 1326 1326 1327 1327 static struct attribute *nhm_uncore_formats_attr[] = {
+21 -21
arch/x86/events/intel/uncore_snbep.c
··· 620 620 if (msr) { 621 621 rdmsrq(msr, config); 622 622 config |= SNBEP_PMON_BOX_CTL_FRZ; 623 - wrmsrl(msr, config); 623 + wrmsrq(msr, config); 624 624 } 625 625 } 626 626 ··· 633 633 if (msr) { 634 634 rdmsrq(msr, config); 635 635 config &= ~SNBEP_PMON_BOX_CTL_FRZ; 636 - wrmsrl(msr, config); 636 + wrmsrq(msr, config); 637 637 } 638 638 } 639 639 ··· 643 643 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 644 644 645 645 if (reg1->idx != EXTRA_REG_NONE) 646 - wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0)); 646 + wrmsrq(reg1->reg, uncore_shared_reg_config(box, 0)); 647 647 648 - wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); 648 + wrmsrq(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); 649 649 } 650 650 651 651 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box, ··· 653 653 { 654 654 struct hw_perf_event *hwc = &event->hw; 655 655 656 - wrmsrl(hwc->config_base, hwc->config); 656 + wrmsrq(hwc->config_base, hwc->config); 657 657 } 658 658 659 659 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box) ··· 661 661 unsigned msr = uncore_msr_box_ctl(box); 662 662 663 663 if (msr) 664 - wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT); 664 + wrmsrq(msr, SNBEP_PMON_BOX_CTL_INT); 665 665 } 666 666 667 667 static struct attribute *snbep_uncore_formats_attr[] = { ··· 1532 1532 { 1533 1533 unsigned msr = uncore_msr_box_ctl(box); 1534 1534 if (msr) 1535 - wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT); 1535 + wrmsrq(msr, IVBEP_PMON_BOX_CTL_INT); 1536 1536 } 1537 1537 1538 1538 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box) ··· 1783 1783 1784 1784 if (reg1->idx != EXTRA_REG_NONE) { 1785 1785 u64 filter = uncore_shared_reg_config(box, 0); 1786 - wrmsrl(reg1->reg, filter & 0xffffffff); 1787 - wrmsrl(reg1->reg + 6, filter >> 32); 1786 + wrmsrq(reg1->reg, filter & 0xffffffff); 1787 + wrmsrq(reg1->reg + 6, filter >> 32); 1788 1788 } 1789 1789 1790 - wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); 1790 + wrmsrq(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); 1791 1791 } 1792 1792 1793 1793 static struct intel_uncore_ops ivbep_uncore_cbox_ops = { ··· 2767 2767 2768 2768 if (reg1->idx != EXTRA_REG_NONE) { 2769 2769 u64 filter = uncore_shared_reg_config(box, 0); 2770 - wrmsrl(reg1->reg, filter & 0xffffffff); 2771 - wrmsrl(reg1->reg + 1, filter >> 32); 2770 + wrmsrq(reg1->reg, filter & 0xffffffff); 2771 + wrmsrq(reg1->reg + 1, filter >> 32); 2772 2772 } 2773 2773 2774 - wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); 2774 + wrmsrq(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); 2775 2775 } 2776 2776 2777 2777 static struct intel_uncore_ops hswep_uncore_cbox_ops = { ··· 2816 2816 2817 2817 for_each_set_bit(i, (unsigned long *)&init, 64) { 2818 2818 flags |= (1ULL << i); 2819 - wrmsrl(msr, flags); 2819 + wrmsrq(msr, flags); 2820 2820 } 2821 2821 } 2822 2822 } ··· 3708 3708 { 3709 3709 struct hw_perf_event *hwc = &event->hw; 3710 3710 3711 - wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); 3711 + wrmsrq(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); 3712 3712 } 3713 3713 3714 3714 static struct intel_uncore_ops skx_uncore_iio_ops = { ··· 4655 4655 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 4656 4656 4657 4657 if (reg1->idx != EXTRA_REG_NONE) 4658 - wrmsrl(reg1->reg, reg1->config); 4658 + wrmsrq(reg1->reg, reg1->config); 4659 4659 4660 - wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); 4660 + wrmsrq(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); 4661 4661 } 4662 4662 4663 4663 static struct intel_uncore_ops snr_uncore_chabox_ops = { ··· 5913 5913 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 5914 5914 5915 5915 if (reg1->idx != EXTRA_REG_NONE) 5916 - wrmsrl(reg1->reg, reg1->config); 5916 + wrmsrq(reg1->reg, reg1->config); 5917 5917 5918 - wrmsrl(hwc->config_base, hwc->config); 5918 + wrmsrq(hwc->config_base, hwc->config); 5919 5919 } 5920 5920 5921 5921 static void spr_uncore_msr_disable_event(struct intel_uncore_box *box, ··· 5925 5925 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 5926 5926 5927 5927 if (reg1->idx != EXTRA_REG_NONE) 5928 - wrmsrl(reg1->reg, 0); 5928 + wrmsrq(reg1->reg, 0); 5929 5929 5930 - wrmsrl(hwc->config_base, 0); 5930 + wrmsrq(hwc->config_base, 0); 5931 5931 } 5932 5932 5933 5933 static int spr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
+10 -10
arch/x86/events/perf_event.h
··· 1198 1198 u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask); 1199 1199 1200 1200 if (hwc->extra_reg.reg) 1201 - wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); 1201 + wrmsrq(hwc->extra_reg.reg, hwc->extra_reg.config); 1202 1202 1203 1203 /* 1204 1204 * Add enabled Merge event on next counter 1205 1205 * if large increment event being enabled on this counter 1206 1206 */ 1207 1207 if (is_counter_pair(hwc)) 1208 - wrmsrl(x86_pmu_config_addr(hwc->idx + 1), x86_pmu.perf_ctr_pair_en); 1208 + wrmsrq(x86_pmu_config_addr(hwc->idx + 1), x86_pmu.perf_ctr_pair_en); 1209 1209 1210 - wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask); 1210 + wrmsrq(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask); 1211 1211 } 1212 1212 1213 1213 void x86_pmu_enable_all(int added); ··· 1223 1223 u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask); 1224 1224 struct hw_perf_event *hwc = &event->hw; 1225 1225 1226 - wrmsrl(hwc->config_base, hwc->config & ~disable_mask); 1226 + wrmsrq(hwc->config_base, hwc->config & ~disable_mask); 1227 1227 1228 1228 if (is_counter_pair(hwc)) 1229 - wrmsrl(x86_pmu_config_addr(hwc->idx + 1), 0); 1229 + wrmsrq(x86_pmu_config_addr(hwc->idx + 1), 0); 1230 1230 } 1231 1231 1232 1232 void x86_pmu_enable_event(struct perf_event *event); ··· 1395 1395 u64 dbg_ctl, dbg_extn_cfg; 1396 1396 1397 1397 rdmsrq(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg); 1398 - wrmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg & ~DBG_EXTN_CFG_LBRV2EN); 1398 + wrmsrq(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg & ~DBG_EXTN_CFG_LBRV2EN); 1399 1399 1400 1400 if (cpu_feature_enabled(X86_FEATURE_AMD_LBR_PMC_FREEZE)) { 1401 1401 rdmsrq(MSR_IA32_DEBUGCTLMSR, dbg_ctl); 1402 - wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl & ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); 1402 + wrmsrq(MSR_IA32_DEBUGCTLMSR, dbg_ctl & ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); 1403 1403 } 1404 1404 } 1405 1405 ··· 1531 1531 1532 1532 static __always_inline void __intel_pmu_pebs_disable_all(void) 1533 1533 { 1534 - wrmsrl(MSR_IA32_PEBS_ENABLE, 0); 1534 + wrmsrq(MSR_IA32_PEBS_ENABLE, 0); 1535 1535 } 1536 1536 1537 1537 static __always_inline void __intel_pmu_arch_lbr_disable(void) 1538 1538 { 1539 - wrmsrl(MSR_ARCH_LBR_CTL, 0); 1539 + wrmsrq(MSR_ARCH_LBR_CTL, 0); 1540 1540 } 1541 1541 1542 1542 static __always_inline void __intel_pmu_lbr_disable(void) ··· 1545 1545 1546 1546 rdmsrq(MSR_IA32_DEBUGCTLMSR, debugctl); 1547 1547 debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); 1548 - wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); 1548 + wrmsrq(MSR_IA32_DEBUGCTLMSR, debugctl); 1549 1549 } 1550 1550 1551 1551 int intel_pmu_save_and_restart(struct perf_event *event);
+5 -5
arch/x86/events/zhaoxin/core.c
··· 254 254 255 255 static void zhaoxin_pmu_disable_all(void) 256 256 { 257 - wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); 257 + wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, 0); 258 258 } 259 259 260 260 static void zhaoxin_pmu_enable_all(int added) 261 261 { 262 - wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); 262 + wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); 263 263 } 264 264 265 265 static inline u64 zhaoxin_pmu_get_status(void) ··· 273 273 274 274 static inline void zhaoxin_pmu_ack_status(u64 ack) 275 275 { 276 - wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); 276 + wrmsrq(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); 277 277 } 278 278 279 279 static inline void zxc_pmu_ack_status(u64 ack) ··· 295 295 296 296 rdmsrq(hwc->config_base, ctrl_val); 297 297 ctrl_val &= ~mask; 298 - wrmsrl(hwc->config_base, ctrl_val); 298 + wrmsrq(hwc->config_base, ctrl_val); 299 299 } 300 300 301 301 static void zhaoxin_pmu_disable_event(struct perf_event *event) ··· 332 332 rdmsrq(hwc->config_base, ctrl_val); 333 333 ctrl_val &= ~mask; 334 334 ctrl_val |= bits; 335 - wrmsrl(hwc->config_base, ctrl_val); 335 + wrmsrq(hwc->config_base, ctrl_val); 336 336 } 337 337 338 338 static void zhaoxin_pmu_enable_event(struct perf_event *event)
+1 -1
arch/x86/hyperv/hv_apic.c
··· 49 49 reg_val = reg_val << 32; 50 50 reg_val |= low; 51 51 52 - wrmsrl(HV_X64_MSR_ICR, reg_val); 52 + wrmsrq(HV_X64_MSR_ICR, reg_val); 53 53 } 54 54 55 55 static u32 hv_apic_read(u32 reg)
+20 -20
arch/x86/hyperv/hv_init.c
··· 128 128 } 129 129 if (!WARN_ON(!(*hvp))) { 130 130 msr.enable = 1; 131 - wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64); 131 + wrmsrq(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64); 132 132 } 133 133 134 134 return hyperv_init_ghcb(); ··· 155 155 156 156 rdmsrq(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status); 157 157 emu_status.inprogress = 0; 158 - wrmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status); 158 + wrmsrq(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status); 159 159 160 160 rdmsrq(HV_X64_MSR_TSC_FREQUENCY, freq); 161 161 tsc_khz = div64_u64(freq, 1000); ··· 203 203 204 204 re_ctrl.target_vp = hv_vp_index[get_cpu()]; 205 205 206 - wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl)); 207 - wrmsrl(HV_X64_MSR_TSC_EMULATION_CONTROL, *((u64 *)&emu_ctrl)); 206 + wrmsrq(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl)); 207 + wrmsrq(HV_X64_MSR_TSC_EMULATION_CONTROL, *((u64 *)&emu_ctrl)); 208 208 209 209 put_cpu(); 210 210 } ··· 219 219 220 220 rdmsrq(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *(u64 *)&re_ctrl); 221 221 re_ctrl.enabled = 0; 222 - wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *(u64 *)&re_ctrl); 222 + wrmsrq(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *(u64 *)&re_ctrl); 223 223 224 224 hv_reenlightenment_cb = NULL; 225 225 } ··· 254 254 rdmsrq(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64); 255 255 msr.enable = 0; 256 256 } 257 - wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64); 257 + wrmsrq(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64); 258 258 } 259 259 260 260 if (hv_reenlightenment_cb == NULL) ··· 274 274 else 275 275 re_ctrl.enabled = 0; 276 276 277 - wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl)); 277 + wrmsrq(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl)); 278 278 } 279 279 280 280 return 0; ··· 333 333 /* Disable the hypercall page in the hypervisor */ 334 334 rdmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); 335 335 hypercall_msr.enable = 0; 336 - wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); 336 + wrmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); 337 337 338 338 ret = hv_cpu_die(0); 339 339 return ret; ··· 352 352 hypercall_msr.enable = 1; 353 353 hypercall_msr.guest_physical_address = 354 354 vmalloc_to_pfn(hv_hypercall_pg_saved); 355 - wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); 355 + wrmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); 356 356 357 357 hv_hypercall_pg = hv_hypercall_pg_saved; 358 358 hv_hypercall_pg_saved = NULL; ··· 499 499 * in such a VM and is only used in such a VM. 500 500 */ 501 501 guest_id = hv_generate_guest_id(LINUX_VERSION_CODE); 502 - wrmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id); 502 + wrmsrq(HV_X64_MSR_GUEST_OS_ID, guest_id); 503 503 504 504 /* With the paravisor, the VM must also write the ID via GHCB/GHCI */ 505 505 hv_ivm_msr_write(HV_X64_MSR_GUEST_OS_ID, guest_id); ··· 532 532 * so it is populated with code, then copy the code to an 533 533 * executable page. 534 534 */ 535 - wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); 535 + wrmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); 536 536 537 537 pg = vmalloc_to_page(hv_hypercall_pg); 538 538 src = memremap(hypercall_msr.guest_physical_address << PAGE_SHIFT, PAGE_SIZE, ··· 544 544 hv_remap_tsc_clocksource(); 545 545 } else { 546 546 hypercall_msr.guest_physical_address = vmalloc_to_pfn(hv_hypercall_pg); 547 - wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); 547 + wrmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); 548 548 } 549 549 550 550 skip_hypercall_pg_init: ··· 608 608 return; 609 609 610 610 clean_guest_os_id: 611 - wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0); 611 + wrmsrq(HV_X64_MSR_GUEST_OS_ID, 0); 612 612 hv_ivm_msr_write(HV_X64_MSR_GUEST_OS_ID, 0); 613 613 cpuhp_remove_state(CPUHP_AP_HYPERV_ONLINE); 614 614 free_ghcb_page: ··· 629 629 union hv_reference_tsc_msr tsc_msr; 630 630 631 631 /* Reset our OS id */ 632 - wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0); 632 + wrmsrq(HV_X64_MSR_GUEST_OS_ID, 0); 633 633 hv_ivm_msr_write(HV_X64_MSR_GUEST_OS_ID, 0); 634 634 635 635 /* ··· 669 669 670 670 rdmsrq(HV_X64_MSR_GUEST_OS_ID, guest_id); 671 671 672 - wrmsrl(HV_X64_MSR_CRASH_P0, err); 673 - wrmsrl(HV_X64_MSR_CRASH_P1, guest_id); 674 - wrmsrl(HV_X64_MSR_CRASH_P2, regs->ip); 675 - wrmsrl(HV_X64_MSR_CRASH_P3, regs->ax); 676 - wrmsrl(HV_X64_MSR_CRASH_P4, regs->sp); 672 + wrmsrq(HV_X64_MSR_CRASH_P0, err); 673 + wrmsrq(HV_X64_MSR_CRASH_P1, guest_id); 674 + wrmsrq(HV_X64_MSR_CRASH_P2, regs->ip); 675 + wrmsrq(HV_X64_MSR_CRASH_P3, regs->ax); 676 + wrmsrq(HV_X64_MSR_CRASH_P4, regs->sp); 677 677 678 678 /* 679 679 * Let Hyper-V know there is crash data available 680 680 */ 681 - wrmsrl(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY); 681 + wrmsrq(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY); 682 682 } 683 683 EXPORT_SYMBOL_GPL(hyperv_report_panic); 684 684
+1 -1
arch/x86/include/asm/apic.h
··· 230 230 231 231 static inline void native_x2apic_icr_write(u32 low, u32 id) 232 232 { 233 - wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low); 233 + wrmsrq(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low); 234 234 } 235 235 236 236 static inline u64 native_x2apic_icr_read(void)
+1 -1
arch/x86/include/asm/debugreg.h
··· 180 180 if (boot_cpu_data.x86 < 6) 181 181 return; 182 182 #endif 183 - wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); 183 + wrmsrq(MSR_IA32_DEBUGCTLMSR, debugctlmsr); 184 184 } 185 185 186 186 #endif /* _ASM_X86_DEBUGREG_H */
+1 -1
arch/x86/include/asm/fsgsbase.h
··· 70 70 if (boot_cpu_has(X86_FEATURE_FSGSBASE)) 71 71 wrfsbase(fsbase); 72 72 else 73 - wrmsrl(MSR_FS_BASE, fsbase); 73 + wrmsrq(MSR_FS_BASE, fsbase); 74 74 } 75 75 76 76 extern unsigned long x86_gsbase_read_cpu_inactive(void);
+2 -2
arch/x86/include/asm/msr.h
··· 258 258 #define rdmsrq(msr, val) \ 259 259 ((val) = native_read_msr((msr))) 260 260 261 - static inline void wrmsrl(u32 msr, u64 val) 261 + static inline void wrmsrq(u32 msr, u64 val) 262 262 { 263 263 native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32)); 264 264 } ··· 357 357 } 358 358 static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q) 359 359 { 360 - wrmsrl(msr_no, q); 360 + wrmsrq(msr_no, q); 361 361 return 0; 362 362 } 363 363 static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
+1 -1
arch/x86/include/asm/paravirt.h
··· 214 214 val = paravirt_read_msr(msr); \ 215 215 } while (0) 216 216 217 - static inline void wrmsrl(unsigned msr, u64 val) 217 + static inline void wrmsrq(unsigned msr, u64 val) 218 218 { 219 219 wrmsr(msr, (u32)val, (u32)(val>>32)); 220 220 }
+5 -5
arch/x86/kernel/apic/apic.c
··· 425 425 weak_wrmsr_fence(); 426 426 427 427 tsc = rdtsc(); 428 - wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR)); 428 + wrmsrq(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR)); 429 429 return 0; 430 430 } 431 431 ··· 449 449 * the timer _and_ zero the counter registers: 450 450 */ 451 451 if (v & APIC_LVT_TIMER_TSCDEADLINE) 452 - wrmsrl(MSR_IA32_TSC_DEADLINE, 0); 452 + wrmsrq(MSR_IA32_TSC_DEADLINE, 0); 453 453 else 454 454 apic_write(APIC_TMICT, 0); 455 455 ··· 1711 1711 if (!(msr & X2APIC_ENABLE)) 1712 1712 return; 1713 1713 /* Disable xapic and x2apic first and then reenable xapic mode */ 1714 - wrmsrl(MSR_IA32_APICBASE, msr & ~(X2APIC_ENABLE | XAPIC_ENABLE)); 1715 - wrmsrl(MSR_IA32_APICBASE, msr & ~X2APIC_ENABLE); 1714 + wrmsrq(MSR_IA32_APICBASE, msr & ~(X2APIC_ENABLE | XAPIC_ENABLE)); 1715 + wrmsrq(MSR_IA32_APICBASE, msr & ~X2APIC_ENABLE); 1716 1716 printk_once(KERN_INFO "x2apic disabled\n"); 1717 1717 } 1718 1718 ··· 1723 1723 rdmsrq(MSR_IA32_APICBASE, msr); 1724 1724 if (msr & X2APIC_ENABLE) 1725 1725 return; 1726 - wrmsrl(MSR_IA32_APICBASE, msr | X2APIC_ENABLE); 1726 + wrmsrq(MSR_IA32_APICBASE, msr | X2APIC_ENABLE); 1727 1727 printk_once(KERN_INFO "x2apic enabled\n"); 1728 1728 } 1729 1729
+6 -6
arch/x86/kernel/cpu/bugs.c
··· 71 71 static void update_spec_ctrl(u64 val) 72 72 { 73 73 this_cpu_write(x86_spec_ctrl_current, val); 74 - wrmsrl(MSR_IA32_SPEC_CTRL, val); 74 + wrmsrq(MSR_IA32_SPEC_CTRL, val); 75 75 } 76 76 77 77 /* ··· 90 90 * forced the update can be delayed until that time. 91 91 */ 92 92 if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS)) 93 - wrmsrl(MSR_IA32_SPEC_CTRL, val); 93 + wrmsrq(MSR_IA32_SPEC_CTRL, val); 94 94 } 95 95 96 96 noinstr u64 spec_ctrl_current(void) ··· 228 228 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask; 229 229 230 230 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD)) 231 - wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD); 231 + wrmsrq(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD); 232 232 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD)) 233 - wrmsrl(MSR_AMD64_LS_CFG, msrval); 233 + wrmsrq(MSR_AMD64_LS_CFG, msrval); 234 234 } 235 235 236 236 #undef pr_fmt ··· 670 670 break; 671 671 } 672 672 673 - wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 673 + wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 674 674 } 675 675 676 676 static void __init srbds_select_mitigation(void) ··· 795 795 return; 796 796 } 797 797 798 - wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 798 + wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 799 799 800 800 /* 801 801 * Check to make sure that the WRMSR value was not ignored. Writes to
+3 -3
arch/x86/kernel/cpu/bus_lock.c
··· 145 145 } 146 146 147 147 /* Restore the MSR to its cached value. */ 148 - wrmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache); 148 + wrmsrq(MSR_TEST_CTRL, msr_test_ctrl_cache); 149 149 150 150 setup_force_cpu_cap(X86_FEATURE_SPLIT_LOCK_DETECT); 151 151 } ··· 162 162 if (on) 163 163 test_ctrl_val |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT; 164 164 165 - wrmsrl(MSR_TEST_CTRL, test_ctrl_val); 165 + wrmsrq(MSR_TEST_CTRL, test_ctrl_val); 166 166 } 167 167 168 168 void split_lock_init(void) ··· 311 311 val |= DEBUGCTLMSR_BUS_LOCK_DETECT; 312 312 } 313 313 314 - wrmsrl(MSR_IA32_DEBUGCTLMSR, val); 314 + wrmsrq(MSR_IA32_DEBUGCTLMSR, val); 315 315 } 316 316 317 317 bool handle_user_split_lock(struct pt_regs *regs, long error_code)
+17 -17
arch/x86/kernel/cpu/common.c
··· 564 564 if (cpu_feature_enabled(X86_FEATURE_IBT)) { 565 565 rdmsrq(MSR_IA32_S_CET, msr); 566 566 if (disable) 567 - wrmsrl(MSR_IA32_S_CET, msr & ~CET_ENDBR_EN); 567 + wrmsrq(MSR_IA32_S_CET, msr & ~CET_ENDBR_EN); 568 568 } 569 569 570 570 return msr; ··· 578 578 rdmsrq(MSR_IA32_S_CET, msr); 579 579 msr &= ~CET_ENDBR_EN; 580 580 msr |= (save & CET_ENDBR_EN); 581 - wrmsrl(MSR_IA32_S_CET, msr); 581 + wrmsrq(MSR_IA32_S_CET, msr); 582 582 } 583 583 } 584 584 ··· 602 602 set_cpu_cap(c, X86_FEATURE_USER_SHSTK); 603 603 604 604 if (kernel_ibt) 605 - wrmsrl(MSR_IA32_S_CET, CET_ENDBR_EN); 605 + wrmsrq(MSR_IA32_S_CET, CET_ENDBR_EN); 606 606 else 607 - wrmsrl(MSR_IA32_S_CET, 0); 607 + wrmsrq(MSR_IA32_S_CET, 0); 608 608 609 609 cr4_set_bits(X86_CR4_CET); 610 610 611 611 if (kernel_ibt && ibt_selftest()) { 612 612 pr_err("IBT selftest: Failed!\n"); 613 - wrmsrl(MSR_IA32_S_CET, 0); 613 + wrmsrq(MSR_IA32_S_CET, 0); 614 614 setup_clear_cpu_cap(X86_FEATURE_IBT); 615 615 } 616 616 } ··· 621 621 cpu_feature_enabled(X86_FEATURE_SHSTK))) 622 622 return; 623 623 624 - wrmsrl(MSR_IA32_S_CET, 0); 625 - wrmsrl(MSR_IA32_U_CET, 0); 624 + wrmsrq(MSR_IA32_S_CET, 0); 625 + wrmsrq(MSR_IA32_U_CET, 0); 626 626 } 627 627 628 628 /* ··· 751 751 * No need to load %gs. It is already correct. 752 752 * 753 753 * Writing %gs on 64bit would zero GSBASE which would make any per 754 - * CPU operation up to the point of the wrmsrl() fault. 754 + * CPU operation up to the point of the wrmsrq() fault. 755 755 * 756 - * Set GSBASE to the new offset. Until the wrmsrl() happens the 756 + * Set GSBASE to the new offset. Until the wrmsrq() happens the 757 757 * early mapping is still valid. That means the GSBASE update will 758 758 * lose any prior per CPU data which was not copied over in 759 759 * setup_per_cpu_areas(). ··· 761 761 * This works even with stackprotector enabled because the 762 762 * per CPU stack canary is 0 in both per CPU areas. 763 763 */ 764 - wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu)); 764 + wrmsrq(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu)); 765 765 #else 766 766 /* 767 767 * %fs is already set to __KERNEL_PERCPU, but after switching GDT ··· 1750 1750 1751 1751 unsigned long old_base, tmp; 1752 1752 rdmsrq(MSR_FS_BASE, old_base); 1753 - wrmsrl(MSR_FS_BASE, 1); 1753 + wrmsrq(MSR_FS_BASE, 1); 1754 1754 loadsegment(fs, 0); 1755 1755 rdmsrq(MSR_FS_BASE, tmp); 1756 - wrmsrl(MSR_FS_BASE, old_base); 1756 + wrmsrq(MSR_FS_BASE, old_base); 1757 1757 return tmp == 0; 1758 1758 } 1759 1759 ··· 2099 2099 * guest. Avoid the pointless write on all Intel CPUs. 2100 2100 */ 2101 2101 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) 2102 - wrmsrl(MSR_CSTAR, val); 2102 + wrmsrq(MSR_CSTAR, val); 2103 2103 } 2104 2104 2105 2105 static inline void idt_syscall_init(void) 2106 2106 { 2107 - wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64); 2107 + wrmsrq(MSR_LSTAR, (unsigned long)entry_SYSCALL_64); 2108 2108 2109 2109 if (ia32_enabled()) { 2110 2110 wrmsrl_cstar((unsigned long)entry_SYSCALL_compat); ··· 2129 2129 * Flags to clear on syscall; clear as much as possible 2130 2130 * to minimize user space-kernel interference. 2131 2131 */ 2132 - wrmsrl(MSR_SYSCALL_MASK, 2132 + wrmsrq(MSR_SYSCALL_MASK, 2133 2133 X86_EFLAGS_CF|X86_EFLAGS_PF|X86_EFLAGS_AF| 2134 2134 X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_TF| 2135 2135 X86_EFLAGS_IF|X86_EFLAGS_DF|X86_EFLAGS_OF| ··· 2313 2313 memset(cur->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); 2314 2314 syscall_init(); 2315 2315 2316 - wrmsrl(MSR_FS_BASE, 0); 2317 - wrmsrl(MSR_KERNEL_GS_BASE, 0); 2316 + wrmsrq(MSR_FS_BASE, 0); 2317 + wrmsrq(MSR_KERNEL_GS_BASE, 0); 2318 2318 barrier(); 2319 2319 2320 2320 x2apic_setup();
+1 -1
arch/x86/kernel/cpu/feat_ctl.c
··· 165 165 msr |= FEAT_CTL_SGX_LC_ENABLED; 166 166 } 167 167 168 - wrmsrl(MSR_IA32_FEAT_CTL, msr); 168 + wrmsrq(MSR_IA32_FEAT_CTL, msr); 169 169 170 170 update_caps: 171 171 set_cpu_cap(c, X86_FEATURE_MSR_IA32_FEAT_CTL);
+1 -1
arch/x86/kernel/cpu/intel.c
··· 509 509 probe_xeon_phi_r3mwait(c); 510 510 511 511 msr = this_cpu_read(msr_misc_features_shadow); 512 - wrmsrl(MSR_MISC_FEATURES_ENABLES, msr); 512 + wrmsrq(MSR_MISC_FEATURES_ENABLES, msr); 513 513 } 514 514 515 515 /*
+1 -1
arch/x86/kernel/cpu/intel_epb.c
··· 111 111 pr_warn_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n"); 112 112 } 113 113 } 114 - wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, (epb & ~EPB_MASK) | val); 114 + wrmsrq(MSR_IA32_ENERGY_PERF_BIAS, (epb & ~EPB_MASK) | val); 115 115 } 116 116 117 117 static struct syscore_ops intel_epb_syscore_ops = {
+4 -4
arch/x86/kernel/cpu/mce/amd.c
··· 667 667 /* McStatusWrEn has to be set */ 668 668 need_toggle = !(hwcr & BIT(18)); 669 669 if (need_toggle) 670 - wrmsrl(MSR_K7_HWCR, hwcr | BIT(18)); 670 + wrmsrq(MSR_K7_HWCR, hwcr | BIT(18)); 671 671 672 672 /* Clear CntP bit safely */ 673 673 for (i = 0; i < num_msrs; i++) ··· 675 675 676 676 /* restore old settings */ 677 677 if (need_toggle) 678 - wrmsrl(MSR_K7_HWCR, hwcr); 678 + wrmsrq(MSR_K7_HWCR, hwcr); 679 679 } 680 680 681 681 /* cpu init entry point, called from mce.c with preempt off */ ··· 843 843 844 844 __log_error(bank, status, addr, misc); 845 845 846 - wrmsrl(msr_stat, 0); 846 + wrmsrq(msr_stat, 0); 847 847 848 848 return status & MCI_STATUS_DEFERRED; 849 849 } ··· 862 862 return true; 863 863 864 864 /* Clear MCA_DESTAT if the deferred error was logged from MCA_STATUS. */ 865 - wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(bank), 0); 865 + wrmsrq(MSR_AMD64_SMCA_MCx_DESTAT(bank), 0); 866 866 return true; 867 867 } 868 868
+4 -4
arch/x86/kernel/cpu/mce/core.c
··· 1878 1878 1879 1879 if (!b->init) 1880 1880 continue; 1881 - wrmsrl(mca_msr_reg(i, MCA_CTL), b->ctl); 1882 - wrmsrl(mca_msr_reg(i, MCA_STATUS), 0); 1881 + wrmsrq(mca_msr_reg(i, MCA_CTL), b->ctl); 1882 + wrmsrq(mca_msr_reg(i, MCA_STATUS), 0); 1883 1883 } 1884 1884 } 1885 1885 ··· 2436 2436 struct mce_bank *b = &mce_banks[i]; 2437 2437 2438 2438 if (b->init) 2439 - wrmsrl(mca_msr_reg(i, MCA_CTL), 0); 2439 + wrmsrq(mca_msr_reg(i, MCA_CTL), 0); 2440 2440 } 2441 2441 return; 2442 2442 } ··· 2786 2786 struct mce_bank *b = &mce_banks[i]; 2787 2787 2788 2788 if (b->init) 2789 - wrmsrl(mca_msr_reg(i, MCA_CTL), b->ctl); 2789 + wrmsrq(mca_msr_reg(i, MCA_CTL), b->ctl); 2790 2790 } 2791 2791 } 2792 2792
+10 -10
arch/x86/kernel/cpu/mce/inject.c
··· 475 475 struct mce m = *(struct mce *)info; 476 476 u8 b = m.bank; 477 477 478 - wrmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus); 478 + wrmsrq(MSR_IA32_MCG_STATUS, m.mcgstatus); 479 479 480 480 if (boot_cpu_has(X86_FEATURE_SMCA)) { 481 481 if (m.inject_flags == DFR_INT_INJ) { 482 - wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(b), m.status); 483 - wrmsrl(MSR_AMD64_SMCA_MCx_DEADDR(b), m.addr); 482 + wrmsrq(MSR_AMD64_SMCA_MCx_DESTAT(b), m.status); 483 + wrmsrq(MSR_AMD64_SMCA_MCx_DEADDR(b), m.addr); 484 484 } else { 485 - wrmsrl(MSR_AMD64_SMCA_MCx_STATUS(b), m.status); 486 - wrmsrl(MSR_AMD64_SMCA_MCx_ADDR(b), m.addr); 485 + wrmsrq(MSR_AMD64_SMCA_MCx_STATUS(b), m.status); 486 + wrmsrq(MSR_AMD64_SMCA_MCx_ADDR(b), m.addr); 487 487 } 488 488 489 - wrmsrl(MSR_AMD64_SMCA_MCx_SYND(b), m.synd); 489 + wrmsrq(MSR_AMD64_SMCA_MCx_SYND(b), m.synd); 490 490 491 491 if (m.misc) 492 - wrmsrl(MSR_AMD64_SMCA_MCx_MISC(b), m.misc); 492 + wrmsrq(MSR_AMD64_SMCA_MCx_MISC(b), m.misc); 493 493 } else { 494 - wrmsrl(MSR_IA32_MCx_STATUS(b), m.status); 495 - wrmsrl(MSR_IA32_MCx_ADDR(b), m.addr); 494 + wrmsrq(MSR_IA32_MCx_STATUS(b), m.status); 495 + wrmsrq(MSR_IA32_MCx_ADDR(b), m.addr); 496 496 497 497 if (m.misc) 498 - wrmsrl(MSR_IA32_MCx_MISC(b), m.misc); 498 + wrmsrq(MSR_IA32_MCx_MISC(b), m.misc); 499 499 } 500 500 } 501 501
+5 -5
arch/x86/kernel/cpu/mce/intel.c
··· 143 143 raw_spin_lock_irqsave(&cmci_discover_lock, flags); 144 144 rdmsrq(MSR_IA32_MCx_CTL2(bank), val); 145 145 val &= ~MCI_CTL2_CMCI_THRESHOLD_MASK; 146 - wrmsrl(MSR_IA32_MCx_CTL2(bank), val | thresh); 146 + wrmsrq(MSR_IA32_MCx_CTL2(bank), val | thresh); 147 147 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); 148 148 } 149 149 ··· 232 232 struct mca_storm_desc *storm = this_cpu_ptr(&storm_desc); 233 233 234 234 val |= MCI_CTL2_CMCI_EN; 235 - wrmsrl(MSR_IA32_MCx_CTL2(bank), val); 235 + wrmsrq(MSR_IA32_MCx_CTL2(bank), val); 236 236 rdmsrq(MSR_IA32_MCx_CTL2(bank), val); 237 237 238 238 /* If the enable bit did not stick, this bank should be polled. */ ··· 326 326 return; 327 327 rdmsrq(MSR_IA32_MCx_CTL2(bank), val); 328 328 val &= ~MCI_CTL2_CMCI_EN; 329 - wrmsrl(MSR_IA32_MCx_CTL2(bank), val); 329 + wrmsrq(MSR_IA32_MCx_CTL2(bank), val); 330 330 __clear_bit(bank, this_cpu_ptr(mce_banks_owned)); 331 331 332 332 if ((val & MCI_CTL2_CMCI_THRESHOLD_MASK) == CMCI_STORM_THRESHOLD) ··· 433 433 rdmsrq(MSR_IA32_MCG_EXT_CTL, val); 434 434 435 435 if (!(val & MCG_EXT_CTL_LMCE_EN)) 436 - wrmsrl(MSR_IA32_MCG_EXT_CTL, val | MCG_EXT_CTL_LMCE_EN); 436 + wrmsrq(MSR_IA32_MCG_EXT_CTL, val | MCG_EXT_CTL_LMCE_EN); 437 437 } 438 438 439 439 void intel_clear_lmce(void) ··· 445 445 446 446 rdmsrq(MSR_IA32_MCG_EXT_CTL, val); 447 447 val &= ~MCG_EXT_CTL_LMCE_EN; 448 - wrmsrl(MSR_IA32_MCG_EXT_CTL, val); 448 + wrmsrq(MSR_IA32_MCG_EXT_CTL, val); 449 449 } 450 450 451 451 /*
+3 -3
arch/x86/kernel/cpu/mshyperv.c
··· 82 82 83 83 /* Write proxy bit via wrmsl instruction */ 84 84 if (hv_is_sint_msr(reg)) 85 - wrmsrl(reg, value | 1 << 20); 85 + wrmsrq(reg, value | 1 << 20); 86 86 } else { 87 - wrmsrl(reg, value); 87 + wrmsrq(reg, value); 88 88 } 89 89 } 90 90 EXPORT_SYMBOL_GPL(hv_set_non_nested_msr); ··· 574 574 * setting of this MSR bit should happen before init_intel() 575 575 * is called. 576 576 */ 577 - wrmsrl(HV_X64_MSR_TSC_INVARIANT_CONTROL, HV_EXPOSE_INVARIANT_TSC); 577 + wrmsrq(HV_X64_MSR_TSC_INVARIANT_CONTROL, HV_EXPOSE_INVARIANT_TSC); 578 578 setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE); 579 579 } 580 580
+3 -3
arch/x86/kernel/cpu/resctrl/core.c
··· 309 309 unsigned int i; 310 310 311 311 for (i = m->low; i < m->high; i++) 312 - wrmsrl(hw_res->msr_base + i, hw_dom->ctrl_val[i]); 312 + wrmsrq(hw_res->msr_base + i, hw_dom->ctrl_val[i]); 313 313 } 314 314 315 315 /* ··· 334 334 335 335 /* Write the delay values for mba. */ 336 336 for (i = m->low; i < m->high; i++) 337 - wrmsrl(hw_res->msr_base + i, delay_bw_map(hw_dom->ctrl_val[i], m->res)); 337 + wrmsrq(hw_res->msr_base + i, delay_bw_map(hw_dom->ctrl_val[i], m->res)); 338 338 } 339 339 340 340 static void cat_wrmsr(struct msr_param *m) ··· 344 344 unsigned int i; 345 345 346 346 for (i = m->low; i < m->high; i++) 347 - wrmsrl(hw_res->msr_base + i, hw_dom->ctrl_val[i]); 347 + wrmsrq(hw_res->msr_base + i, hw_dom->ctrl_val[i]); 348 348 } 349 349 350 350 u32 resctrl_arch_get_num_closid(struct rdt_resource *r)
+1 -1
arch/x86/kernel/cpu/resctrl/pseudo_lock.c
··· 534 534 __wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, closid_p); 535 535 536 536 /* Re-enable the hardware prefetcher(s) */ 537 - wrmsrl(MSR_MISC_FEATURE_CONTROL, saved_msr); 537 + wrmsrq(MSR_MISC_FEATURE_CONTROL, saved_msr); 538 538 local_irq_enable(); 539 539 540 540 plr->thread_done = 1;
+2 -2
arch/x86/kernel/cpu/resctrl/rdtgroup.c
··· 2326 2326 { 2327 2327 bool *enable = arg; 2328 2328 2329 - wrmsrl(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL); 2329 + wrmsrq(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL); 2330 2330 } 2331 2331 2332 2332 static void l2_qos_cfg_update(void *arg) 2333 2333 { 2334 2334 bool *enable = arg; 2335 2335 2336 - wrmsrl(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL); 2336 + wrmsrq(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL); 2337 2337 } 2338 2338 2339 2339 static inline bool is_mba_linear(void)
+1 -1
arch/x86/kernel/cpu/sgx/main.c
··· 871 871 WARN_ON_ONCE(preemptible()); 872 872 873 873 for (i = 0; i < 4; i++) 874 - wrmsrl(MSR_IA32_SGXLEPUBKEYHASH0 + i, lepubkeyhash[i]); 874 + wrmsrq(MSR_IA32_SGXLEPUBKEYHASH0 + i, lepubkeyhash[i]); 875 875 } 876 876 877 877 const struct file_operations sgx_provision_fops = {
+5 -5
arch/x86/kernel/cpu/tsx.c
··· 37 37 */ 38 38 tsx |= TSX_CTRL_CPUID_CLEAR; 39 39 40 - wrmsrl(MSR_IA32_TSX_CTRL, tsx); 40 + wrmsrq(MSR_IA32_TSX_CTRL, tsx); 41 41 } 42 42 43 43 static void tsx_enable(void) ··· 56 56 */ 57 57 tsx &= ~TSX_CTRL_CPUID_CLEAR; 58 58 59 - wrmsrl(MSR_IA32_TSX_CTRL, tsx); 59 + wrmsrq(MSR_IA32_TSX_CTRL, tsx); 60 60 } 61 61 62 62 static enum tsx_ctrl_states x86_get_tsx_auto_mode(void) ··· 117 117 boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) { 118 118 rdmsrq(MSR_TSX_FORCE_ABORT, msr); 119 119 msr |= MSR_TFA_TSX_CPUID_CLEAR; 120 - wrmsrl(MSR_TSX_FORCE_ABORT, msr); 120 + wrmsrq(MSR_TSX_FORCE_ABORT, msr); 121 121 } else if (cpu_feature_enabled(X86_FEATURE_MSR_TSX_CTRL)) { 122 122 rdmsrq(MSR_IA32_TSX_CTRL, msr); 123 123 msr |= TSX_CTRL_CPUID_CLEAR; 124 - wrmsrl(MSR_IA32_TSX_CTRL, msr); 124 + wrmsrq(MSR_IA32_TSX_CTRL, msr); 125 125 } 126 126 } 127 127 ··· 150 150 151 151 if (mcu_opt_ctrl & RTM_ALLOW) { 152 152 mcu_opt_ctrl &= ~RTM_ALLOW; 153 - wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_opt_ctrl); 153 + wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_opt_ctrl); 154 154 setup_force_cpu_cap(X86_FEATURE_RTM_ALWAYS_ABORT); 155 155 } 156 156 }
+5 -5
arch/x86/kernel/fpu/xstate.c
··· 199 199 * MSR_IA32_XSS sets supervisor states managed by XSAVES. 200 200 */ 201 201 if (boot_cpu_has(X86_FEATURE_XSAVES)) { 202 - wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() | 202 + wrmsrq(MSR_IA32_XSS, xfeatures_mask_supervisor() | 203 203 xfeatures_mask_independent()); 204 204 } 205 205 } ··· 639 639 return get_compacted_size(); 640 640 641 641 /* Disable independent features. */ 642 - wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor()); 642 + wrmsrq(MSR_IA32_XSS, xfeatures_mask_supervisor()); 643 643 644 644 /* 645 645 * Ask the hardware what size is required of the buffer. ··· 648 648 size = get_compacted_size(); 649 649 650 650 /* Re-enable independent features so XSAVES will work on them again. */ 651 - wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() | mask); 651 + wrmsrq(MSR_IA32_XSS, xfeatures_mask_supervisor() | mask); 652 652 653 653 return size; 654 654 } ··· 904 904 * of XSAVES and MSR_IA32_XSS. 905 905 */ 906 906 if (cpu_feature_enabled(X86_FEATURE_XSAVES)) { 907 - wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() | 907 + wrmsrq(MSR_IA32_XSS, xfeatures_mask_supervisor() | 908 908 xfeatures_mask_independent()); 909 909 } 910 910 911 911 if (fpu_state_size_dynamic()) 912 - wrmsrl(MSR_IA32_XFD, current->thread.fpu.fpstate->xfd); 912 + wrmsrq(MSR_IA32_XFD, current->thread.fpu.fpstate->xfd); 913 913 } 914 914 915 915 /*
+1 -1
arch/x86/kernel/fpu/xstate.h
··· 171 171 #ifdef CONFIG_X86_64 172 172 static inline void xfd_set_state(u64 xfd) 173 173 { 174 - wrmsrl(MSR_IA32_XFD, xfd); 174 + wrmsrq(MSR_IA32_XFD, xfd); 175 175 __this_cpu_write(xfd_state, xfd); 176 176 } 177 177
+10 -10
arch/x86/kernel/fred.c
··· 43 43 */ 44 44 loadsegment(ss, __KERNEL_DS); 45 45 46 - wrmsrl(MSR_IA32_FRED_CONFIG, 46 + wrmsrq(MSR_IA32_FRED_CONFIG, 47 47 /* Reserve for CALL emulation */ 48 48 FRED_CONFIG_REDZONE | 49 49 FRED_CONFIG_INT_STKLVL(0) | 50 50 FRED_CONFIG_ENTRYPOINT(asm_fred_entrypoint_user)); 51 51 52 - wrmsrl(MSR_IA32_FRED_STKLVLS, 0); 52 + wrmsrq(MSR_IA32_FRED_STKLVLS, 0); 53 53 54 54 /* 55 55 * Ater a CPU offline/online cycle, the FRED RSP0 MSR should be 56 56 * resynchronized with its per-CPU cache. 57 57 */ 58 - wrmsrl(MSR_IA32_FRED_RSP0, __this_cpu_read(fred_rsp0)); 58 + wrmsrq(MSR_IA32_FRED_RSP0, __this_cpu_read(fred_rsp0)); 59 59 60 - wrmsrl(MSR_IA32_FRED_RSP1, 0); 61 - wrmsrl(MSR_IA32_FRED_RSP2, 0); 62 - wrmsrl(MSR_IA32_FRED_RSP3, 0); 60 + wrmsrq(MSR_IA32_FRED_RSP1, 0); 61 + wrmsrq(MSR_IA32_FRED_RSP2, 0); 62 + wrmsrq(MSR_IA32_FRED_RSP3, 0); 63 63 64 64 /* Enable FRED */ 65 65 cr4_set_bits(X86_CR4_FRED); ··· 79 79 * (remember that user space faults are always taken on stack level 0) 80 80 * is to avoid overflowing the kernel stack. 81 81 */ 82 - wrmsrl(MSR_IA32_FRED_STKLVLS, 82 + wrmsrq(MSR_IA32_FRED_STKLVLS, 83 83 FRED_STKLVL(X86_TRAP_DB, FRED_DB_STACK_LEVEL) | 84 84 FRED_STKLVL(X86_TRAP_NMI, FRED_NMI_STACK_LEVEL) | 85 85 FRED_STKLVL(X86_TRAP_MC, FRED_MC_STACK_LEVEL) | 86 86 FRED_STKLVL(X86_TRAP_DF, FRED_DF_STACK_LEVEL)); 87 87 88 88 /* The FRED equivalents to IST stacks... */ 89 - wrmsrl(MSR_IA32_FRED_RSP1, __this_cpu_ist_top_va(DB)); 90 - wrmsrl(MSR_IA32_FRED_RSP2, __this_cpu_ist_top_va(NMI)); 91 - wrmsrl(MSR_IA32_FRED_RSP3, __this_cpu_ist_top_va(DF)); 89 + wrmsrq(MSR_IA32_FRED_RSP1, __this_cpu_ist_top_va(DB)); 90 + wrmsrq(MSR_IA32_FRED_RSP2, __this_cpu_ist_top_va(NMI)); 91 + wrmsrq(MSR_IA32_FRED_RSP3, __this_cpu_ist_top_va(DF)); 92 92 }
+13 -13
arch/x86/kernel/kvm.c
··· 301 301 token = __this_cpu_read(apf_reason.token); 302 302 kvm_async_pf_task_wake(token); 303 303 __this_cpu_write(apf_reason.token, 0); 304 - wrmsrl(MSR_KVM_ASYNC_PF_ACK, 1); 304 + wrmsrq(MSR_KVM_ASYNC_PF_ACK, 1); 305 305 } 306 306 307 307 set_irq_regs(old_regs); ··· 327 327 if (!has_steal_clock) 328 328 return; 329 329 330 - wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED)); 330 + wrmsrq(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED)); 331 331 pr_debug("stealtime: cpu %d, msr %llx\n", cpu, 332 332 (unsigned long long) slow_virt_to_phys(st)); 333 333 } ··· 361 361 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT)) 362 362 pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT; 363 363 364 - wrmsrl(MSR_KVM_ASYNC_PF_INT, HYPERVISOR_CALLBACK_VECTOR); 364 + wrmsrq(MSR_KVM_ASYNC_PF_INT, HYPERVISOR_CALLBACK_VECTOR); 365 365 366 - wrmsrl(MSR_KVM_ASYNC_PF_EN, pa); 366 + wrmsrq(MSR_KVM_ASYNC_PF_EN, pa); 367 367 __this_cpu_write(async_pf_enabled, true); 368 368 pr_debug("setup async PF for cpu %d\n", smp_processor_id()); 369 369 } ··· 376 376 __this_cpu_write(kvm_apic_eoi, 0); 377 377 pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi)) 378 378 | KVM_MSR_ENABLED; 379 - wrmsrl(MSR_KVM_PV_EOI_EN, pa); 379 + wrmsrq(MSR_KVM_PV_EOI_EN, pa); 380 380 } 381 381 382 382 if (has_steal_clock) ··· 388 388 if (!__this_cpu_read(async_pf_enabled)) 389 389 return; 390 390 391 - wrmsrl(MSR_KVM_ASYNC_PF_EN, 0); 391 + wrmsrq(MSR_KVM_ASYNC_PF_EN, 0); 392 392 __this_cpu_write(async_pf_enabled, false); 393 393 394 394 pr_debug("disable async PF for cpu %d\n", smp_processor_id()); ··· 451 451 { 452 452 kvm_disable_steal_time(); 453 453 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) 454 - wrmsrl(MSR_KVM_PV_EOI_EN, 0); 454 + wrmsrq(MSR_KVM_PV_EOI_EN, 0); 455 455 if (kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL)) 456 - wrmsrl(MSR_KVM_MIGRATION_CONTROL, 0); 456 + wrmsrq(MSR_KVM_MIGRATION_CONTROL, 0); 457 457 kvm_pv_disable_apf(); 458 458 if (!shutdown) 459 459 apf_task_wake_all(); ··· 615 615 } 616 616 617 617 pr_info("%s : live migration enabled in EFI\n", __func__); 618 - wrmsrl(MSR_KVM_MIGRATION_CONTROL, KVM_MIGRATION_READY); 618 + wrmsrq(MSR_KVM_MIGRATION_CONTROL, KVM_MIGRATION_READY); 619 619 620 620 return 1; 621 621 } ··· 740 740 741 741 #ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL 742 742 if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL) && has_guest_poll) 743 - wrmsrl(MSR_KVM_POLL_CONTROL, 0); 743 + wrmsrq(MSR_KVM_POLL_CONTROL, 0); 744 744 #endif 745 745 } 746 746 ··· 975 975 * If not booted using EFI, enable Live migration support. 976 976 */ 977 977 if (!efi_enabled(EFI_BOOT)) 978 - wrmsrl(MSR_KVM_MIGRATION_CONTROL, 978 + wrmsrq(MSR_KVM_MIGRATION_CONTROL, 979 979 KVM_MIGRATION_READY); 980 980 } 981 981 kvmclock_init(); ··· 1124 1124 1125 1125 static void kvm_disable_host_haltpoll(void *i) 1126 1126 { 1127 - wrmsrl(MSR_KVM_POLL_CONTROL, 0); 1127 + wrmsrq(MSR_KVM_POLL_CONTROL, 0); 1128 1128 } 1129 1129 1130 1130 static void kvm_enable_host_haltpoll(void *i) 1131 1131 { 1132 - wrmsrl(MSR_KVM_POLL_CONTROL, 1); 1132 + wrmsrq(MSR_KVM_POLL_CONTROL, 1); 1133 1133 } 1134 1134 1135 1135 void arch_haltpoll_enable(unsigned int cpu)
+2 -2
arch/x86/kernel/kvmclock.c
··· 60 60 */ 61 61 static void kvm_get_wallclock(struct timespec64 *now) 62 62 { 63 - wrmsrl(msr_kvm_wall_clock, slow_virt_to_phys(&wall_clock)); 63 + wrmsrq(msr_kvm_wall_clock, slow_virt_to_phys(&wall_clock)); 64 64 preempt_disable(); 65 65 pvclock_read_wallclock(&wall_clock, this_cpu_pvti(), now); 66 66 preempt_enable(); ··· 173 173 return; 174 174 175 175 pa = slow_virt_to_phys(&src->pvti) | 0x01ULL; 176 - wrmsrl(msr_kvm_system_time, pa); 176 + wrmsrq(msr_kvm_system_time, pa); 177 177 pr_debug("kvm-clock: cpu %d, msr %llx, %s", smp_processor_id(), pa, txt); 178 178 } 179 179
+1 -1
arch/x86/kernel/mmconf-fam10h_64.c
··· 212 212 (FAM10H_MMIO_CONF_BUSRANGE_MASK<<FAM10H_MMIO_CONF_BUSRANGE_SHIFT)); 213 213 val |= fam10h_pci_mmconf_base | (8 << FAM10H_MMIO_CONF_BUSRANGE_SHIFT) | 214 214 FAM10H_MMIO_CONF_ENABLE; 215 - wrmsrl(address, val); 215 + wrmsrq(address, val); 216 216 } 217 217 218 218 static int __init set_check_enable_amd_mmconf(const struct dmi_system_id *d)
+7 -7
arch/x86/kernel/process.c
··· 344 344 msrval &= ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT; 345 345 msrval |= (on << MSR_MISC_FEATURES_ENABLES_CPUID_FAULT_BIT); 346 346 this_cpu_write(msr_misc_features_shadow, msrval); 347 - wrmsrl(MSR_MISC_FEATURES_ENABLES, msrval); 347 + wrmsrq(MSR_MISC_FEATURES_ENABLES, msrval); 348 348 } 349 349 350 350 static void disable_cpuid(void) ··· 561 561 562 562 if (!static_cpu_has(X86_FEATURE_ZEN)) { 563 563 msr |= ssbd_tif_to_amd_ls_cfg(tifn); 564 - wrmsrl(MSR_AMD64_LS_CFG, msr); 564 + wrmsrq(MSR_AMD64_LS_CFG, msr); 565 565 return; 566 566 } 567 567 ··· 578 578 raw_spin_lock(&st->shared_state->lock); 579 579 /* First sibling enables SSBD: */ 580 580 if (!st->shared_state->disable_state) 581 - wrmsrl(MSR_AMD64_LS_CFG, msr); 581 + wrmsrq(MSR_AMD64_LS_CFG, msr); 582 582 st->shared_state->disable_state++; 583 583 raw_spin_unlock(&st->shared_state->lock); 584 584 } else { ··· 588 588 raw_spin_lock(&st->shared_state->lock); 589 589 st->shared_state->disable_state--; 590 590 if (!st->shared_state->disable_state) 591 - wrmsrl(MSR_AMD64_LS_CFG, msr); 591 + wrmsrq(MSR_AMD64_LS_CFG, msr); 592 592 raw_spin_unlock(&st->shared_state->lock); 593 593 } 594 594 } ··· 597 597 { 598 598 u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn); 599 599 600 - wrmsrl(MSR_AMD64_LS_CFG, msr); 600 + wrmsrq(MSR_AMD64_LS_CFG, msr); 601 601 } 602 602 #endif 603 603 ··· 607 607 * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL, 608 608 * so ssbd_tif_to_spec_ctrl() just works. 609 609 */ 610 - wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn)); 610 + wrmsrq(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn)); 611 611 } 612 612 613 613 /* ··· 714 714 debugctl &= ~DEBUGCTLMSR_BTF; 715 715 msk = tifn & _TIF_BLOCKSTEP; 716 716 debugctl |= (msk >> TIF_BLOCKSTEP) << DEBUGCTLMSR_BTF_SHIFT; 717 - wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); 717 + wrmsrq(MSR_IA32_DEBUGCTLMSR, debugctl); 718 718 } 719 719 720 720 if ((tifp ^ tifn) & _TIF_NOTSC)
+3 -3
arch/x86/kernel/process_64.c
··· 221 221 native_swapgs(); 222 222 } else { 223 223 instrumentation_begin(); 224 - wrmsrl(MSR_KERNEL_GS_BASE, gsbase); 224 + wrmsrq(MSR_KERNEL_GS_BASE, gsbase); 225 225 instrumentation_end(); 226 226 } 227 227 } ··· 353 353 } else { 354 354 if (prev_index != next_index) 355 355 loadseg(which, next_index); 356 - wrmsrl(which == FS ? MSR_FS_BASE : MSR_KERNEL_GS_BASE, 356 + wrmsrq(which == FS ? MSR_FS_BASE : MSR_KERNEL_GS_BASE, 357 357 next_base); 358 358 } 359 359 } else { ··· 478 478 __wrgsbase_inactive(gsbase); 479 479 local_irq_restore(flags); 480 480 } else { 481 - wrmsrl(MSR_KERNEL_GS_BASE, gsbase); 481 + wrmsrq(MSR_KERNEL_GS_BASE, gsbase); 482 482 } 483 483 } 484 484
+1 -1
arch/x86/kernel/reboot_fixups_32.c
··· 27 27 static void cs5536_warm_reset(struct pci_dev *dev) 28 28 { 29 29 /* writing 1 to the LSB of this MSR causes a hard reset */ 30 - wrmsrl(MSR_DIVIL_SOFT_RESET, 1ULL); 30 + wrmsrq(MSR_DIVIL_SOFT_RESET, 1ULL); 31 31 udelay(50); /* shouldn't get here but be safe and spin a while */ 32 32 } 33 33
+7 -7
arch/x86/kernel/shstk.c
··· 173 173 return PTR_ERR((void *)addr); 174 174 175 175 fpregs_lock_and_load(); 176 - wrmsrl(MSR_IA32_PL3_SSP, addr + size); 177 - wrmsrl(MSR_IA32_U_CET, CET_SHSTK_EN); 176 + wrmsrq(MSR_IA32_PL3_SSP, addr + size); 177 + wrmsrq(MSR_IA32_U_CET, CET_SHSTK_EN); 178 178 fpregs_unlock(); 179 179 180 180 shstk->base = addr; ··· 372 372 return -EFAULT; 373 373 374 374 fpregs_lock_and_load(); 375 - wrmsrl(MSR_IA32_PL3_SSP, ssp); 375 + wrmsrq(MSR_IA32_PL3_SSP, ssp); 376 376 fpregs_unlock(); 377 377 378 378 return 0; ··· 396 396 return err; 397 397 398 398 fpregs_lock_and_load(); 399 - wrmsrl(MSR_IA32_PL3_SSP, ssp); 399 + wrmsrq(MSR_IA32_PL3_SSP, ssp); 400 400 fpregs_unlock(); 401 401 402 402 return 0; ··· 473 473 msrval &= ~CET_WRSS_EN; 474 474 } 475 475 476 - wrmsrl(MSR_IA32_U_CET, msrval); 476 + wrmsrq(MSR_IA32_U_CET, msrval); 477 477 478 478 unlock: 479 479 fpregs_unlock(); ··· 492 492 493 493 fpregs_lock_and_load(); 494 494 /* Disable WRSS too when disabling shadow stack */ 495 - wrmsrl(MSR_IA32_U_CET, 0); 496 - wrmsrl(MSR_IA32_PL3_SSP, 0); 495 + wrmsrq(MSR_IA32_U_CET, 0); 496 + wrmsrq(MSR_IA32_PL3_SSP, 0); 497 497 fpregs_unlock(); 498 498 499 499 shstk_free(current);
+3 -3
arch/x86/kernel/traps.c
··· 749 749 if (current->pasid_activated) 750 750 return false; 751 751 752 - wrmsrl(MSR_IA32_PASID, pasid | MSR_IA32_PASID_VALID); 752 + wrmsrq(MSR_IA32_PASID, pasid | MSR_IA32_PASID_VALID); 753 753 current->pasid_activated = 1; 754 754 755 755 return true; ··· 1122 1122 1123 1123 rdmsrq(MSR_IA32_DEBUGCTLMSR, debugctl); 1124 1124 debugctl |= DEBUGCTLMSR_BTF; 1125 - wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); 1125 + wrmsrq(MSR_IA32_DEBUGCTLMSR, debugctl); 1126 1126 } 1127 1127 1128 1128 /* ··· 1390 1390 if (!xfd_err) 1391 1391 return false; 1392 1392 1393 - wrmsrl(MSR_IA32_XFD_ERR, 0); 1393 + wrmsrq(MSR_IA32_XFD_ERR, 0); 1394 1394 1395 1395 /* Die if that happens in kernel space */ 1396 1396 if (WARN_ON(!user_mode(regs)))
+4 -4
arch/x86/kernel/tsc_sync.c
··· 70 70 return; 71 71 72 72 /* Restore the original value */ 73 - wrmsrl(MSR_IA32_TSC_ADJUST, adj->adjusted); 73 + wrmsrq(MSR_IA32_TSC_ADJUST, adj->adjusted); 74 74 75 75 if (!adj->warned || resume) { 76 76 pr_warn(FW_BUG "TSC ADJUST differs: CPU%u %lld --> %lld. Restoring\n", ··· 142 142 if (likely(!tsc_async_resets)) { 143 143 pr_warn(FW_BUG "TSC ADJUST: CPU%u: %lld force to 0\n", 144 144 cpu, bootval); 145 - wrmsrl(MSR_IA32_TSC_ADJUST, 0); 145 + wrmsrq(MSR_IA32_TSC_ADJUST, 0); 146 146 bootval = 0; 147 147 } else { 148 148 pr_info("TSC ADJUST: CPU%u: %lld NOT forced to 0\n", ··· 229 229 */ 230 230 if (bootval != ref->adjusted) { 231 231 cur->adjusted = ref->adjusted; 232 - wrmsrl(MSR_IA32_TSC_ADJUST, ref->adjusted); 232 + wrmsrq(MSR_IA32_TSC_ADJUST, ref->adjusted); 233 233 } 234 234 /* 235 235 * We have the TSCs forced to be in sync on this package. Skip sync ··· 518 518 pr_warn("TSC ADJUST compensate: CPU%u observed %lld warp. Adjust: %lld\n", 519 519 cpu, cur_max_warp, cur->adjusted); 520 520 521 - wrmsrl(MSR_IA32_TSC_ADJUST, cur->adjusted); 521 + wrmsrq(MSR_IA32_TSC_ADJUST, cur->adjusted); 522 522 goto retry; 523 523 524 524 }
+1 -1
arch/x86/kvm/svm/avic.c
··· 330 330 int cpu = READ_ONCE(vcpu->cpu); 331 331 332 332 if (cpu != get_cpu()) { 333 - wrmsrl(MSR_AMD64_SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpu)); 333 + wrmsrq(MSR_AMD64_SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpu)); 334 334 trace_kvm_avic_doorbell(vcpu->vcpu_id, kvm_cpu_get_apicid(cpu)); 335 335 } 336 336 put_cpu();
+5 -5
arch/x86/kvm/svm/svm.c
··· 566 566 if (multiplier == __this_cpu_read(current_tsc_ratio)) 567 567 return; 568 568 569 - wrmsrl(MSR_AMD64_TSC_RATIO, multiplier); 569 + wrmsrq(MSR_AMD64_TSC_RATIO, multiplier); 570 570 __this_cpu_write(current_tsc_ratio, multiplier); 571 571 } 572 572 ··· 579 579 { 580 580 uint64_t efer; 581 581 582 - wrmsrl(MSR_VM_HSAVE_PA, 0); 582 + wrmsrq(MSR_VM_HSAVE_PA, 0); 583 583 rdmsrq(MSR_EFER, efer); 584 584 if (efer & EFER_SVME) { 585 585 /* ··· 587 587 * NMI aren't blocked. 588 588 */ 589 589 stgi(); 590 - wrmsrl(MSR_EFER, efer & ~EFER_SVME); 590 + wrmsrq(MSR_EFER, efer & ~EFER_SVME); 591 591 } 592 592 } 593 593 ··· 629 629 sd->next_asid = sd->max_asid + 1; 630 630 sd->min_asid = max_sev_asid + 1; 631 631 632 - wrmsrl(MSR_EFER, efer | EFER_SVME); 632 + wrmsrq(MSR_EFER, efer | EFER_SVME); 633 633 634 - wrmsrl(MSR_VM_HSAVE_PA, sd->save_area_pa); 634 + wrmsrq(MSR_VM_HSAVE_PA, sd->save_area_pa); 635 635 636 636 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { 637 637 /*
+1 -1
arch/x86/kvm/vmx/pmu_intel.c
··· 281 281 if (read) 282 282 rdmsrq(index, msr_info->data); 283 283 else 284 - wrmsrl(index, msr_info->data); 284 + wrmsrq(index, msr_info->data); 285 285 __set_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use); 286 286 local_irq_enable(); 287 287 return true;
+12 -12
arch/x86/kvm/vmx/vmx.c
··· 1063 1063 * provide that period, so a CPU could write host's record into 1064 1064 * guest's memory. 1065 1065 */ 1066 - wrmsrl(MSR_IA32_PEBS_ENABLE, 0); 1066 + wrmsrq(MSR_IA32_PEBS_ENABLE, 0); 1067 1067 } 1068 1068 1069 1069 i = vmx_find_loadstore_msr_slot(&m->guest, msr); ··· 1192 1192 { 1193 1193 u32 i; 1194 1194 1195 - wrmsrl(MSR_IA32_RTIT_STATUS, ctx->status); 1196 - wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base); 1197 - wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask); 1198 - wrmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match); 1195 + wrmsrq(MSR_IA32_RTIT_STATUS, ctx->status); 1196 + wrmsrq(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base); 1197 + wrmsrq(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask); 1198 + wrmsrq(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match); 1199 1199 for (i = 0; i < addr_range; i++) { 1200 - wrmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]); 1201 - wrmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]); 1200 + wrmsrq(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]); 1201 + wrmsrq(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]); 1202 1202 } 1203 1203 } 1204 1204 ··· 1227 1227 */ 1228 1228 rdmsrq(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl); 1229 1229 if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) { 1230 - wrmsrl(MSR_IA32_RTIT_CTL, 0); 1230 + wrmsrq(MSR_IA32_RTIT_CTL, 0); 1231 1231 pt_save_msr(&vmx->pt_desc.host, vmx->pt_desc.num_address_ranges); 1232 1232 pt_load_msr(&vmx->pt_desc.guest, vmx->pt_desc.num_address_ranges); 1233 1233 } ··· 1248 1248 * i.e. RTIT_CTL is always cleared on VM-Exit. Restore it if necessary. 1249 1249 */ 1250 1250 if (vmx->pt_desc.host.ctl) 1251 - wrmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl); 1251 + wrmsrq(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl); 1252 1252 } 1253 1253 1254 1254 void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel, ··· 1338 1338 vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE); 1339 1339 } 1340 1340 1341 - wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); 1341 + wrmsrq(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); 1342 1342 #else 1343 1343 savesegment(fs, fs_sel); 1344 1344 savesegment(gs, gs_sel); ··· 1382 1382 #endif 1383 1383 invalidate_tss_limit(); 1384 1384 #ifdef CONFIG_X86_64 1385 - wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); 1385 + wrmsrq(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); 1386 1386 #endif 1387 1387 load_fixmap_gdt(raw_smp_processor_id()); 1388 1388 vmx->guest_state_loaded = false; ··· 1403 1403 { 1404 1404 preempt_disable(); 1405 1405 if (vmx->guest_state_loaded) 1406 - wrmsrl(MSR_KERNEL_GS_BASE, data); 1406 + wrmsrq(MSR_KERNEL_GS_BASE, data); 1407 1407 preempt_enable(); 1408 1408 vmx->msr_guest_kernel_gs_base = data; 1409 1409 }
+8 -8
arch/x86/kvm/x86.c
··· 578 578 for (slot = 0; slot < kvm_nr_uret_msrs; ++slot) { 579 579 values = &msrs->values[slot]; 580 580 if (values->host != values->curr) { 581 - wrmsrl(kvm_uret_msrs_list[slot], values->host); 581 + wrmsrq(kvm_uret_msrs_list[slot], values->host); 582 582 values->curr = values->host; 583 583 } 584 584 } ··· 1174 1174 1175 1175 if (guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVES) && 1176 1176 vcpu->arch.ia32_xss != kvm_host.xss) 1177 - wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss); 1177 + wrmsrq(MSR_IA32_XSS, vcpu->arch.ia32_xss); 1178 1178 } 1179 1179 1180 1180 if (cpu_feature_enabled(X86_FEATURE_PKU) && ··· 1205 1205 1206 1206 if (guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVES) && 1207 1207 vcpu->arch.ia32_xss != kvm_host.xss) 1208 - wrmsrl(MSR_IA32_XSS, kvm_host.xss); 1208 + wrmsrq(MSR_IA32_XSS, kvm_host.xss); 1209 1209 } 1210 1210 1211 1211 } ··· 3827 3827 if (!data) 3828 3828 break; 3829 3829 3830 - wrmsrl(MSR_IA32_PRED_CMD, data); 3830 + wrmsrq(MSR_IA32_PRED_CMD, data); 3831 3831 break; 3832 3832 } 3833 3833 case MSR_IA32_FLUSH_CMD: ··· 3840 3840 if (!data) 3841 3841 break; 3842 3842 3843 - wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH); 3843 + wrmsrq(MSR_IA32_FLUSH_CMD, L1D_FLUSH); 3844 3844 break; 3845 3845 case MSR_EFER: 3846 3846 return set_efer(vcpu, msr_info); ··· 10974 10974 switch_fpu_return(); 10975 10975 10976 10976 if (vcpu->arch.guest_fpu.xfd_err) 10977 - wrmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err); 10977 + wrmsrq(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err); 10978 10978 10979 10979 if (unlikely(vcpu->arch.switch_db_regs)) { 10980 10980 set_debugreg(0, 7); ··· 11060 11060 kvm_x86_call(handle_exit_irqoff)(vcpu); 11061 11061 11062 11062 if (vcpu->arch.guest_fpu.xfd_err) 11063 - wrmsrl(MSR_IA32_XFD_ERR, 0); 11063 + wrmsrq(MSR_IA32_XFD_ERR, 0); 11064 11064 11065 11065 /* 11066 11066 * Consume any pending interrupts, including the possible source of ··· 13657 13657 else if (wrmsrl_safe(MSR_IA32_SPEC_CTRL, value)) 13658 13658 ret = 1; 13659 13659 else 13660 - wrmsrl(MSR_IA32_SPEC_CTRL, saved_value); 13660 + wrmsrq(MSR_IA32_SPEC_CTRL, saved_value); 13661 13661 13662 13662 local_irq_restore(flags); 13663 13663
+1 -1
arch/x86/mm/pat/memtype.c
··· 232 232 panic("x86/PAT: PAT enabled, but not supported by secondary CPU\n"); 233 233 } 234 234 235 - wrmsrl(MSR_IA32_CR_PAT, pat_msr_val); 235 + wrmsrq(MSR_IA32_CR_PAT, pat_msr_val); 236 236 237 237 __flush_tlb_all(); 238 238 }
+1 -1
arch/x86/mm/tlb.c
··· 623 623 { 624 624 /* Flush L1D if the outgoing task requests it */ 625 625 if (prev_mm & LAST_USER_MM_L1D_FLUSH) 626 - wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH); 626 + wrmsrq(MSR_IA32_FLUSH_CMD, L1D_FLUSH); 627 627 628 628 /* Check whether the incoming task opted in for L1D flush */ 629 629 if (likely(!(next_mm & LAST_USER_MM_L1D_FLUSH)))
+1 -1
arch/x86/pci/amd_bus.c
··· 344 344 rdmsrq(MSR_AMD64_NB_CFG, reg); 345 345 if (!(reg & ENABLE_CF8_EXT_CFG)) { 346 346 reg |= ENABLE_CF8_EXT_CFG; 347 - wrmsrl(MSR_AMD64_NB_CFG, reg); 347 + wrmsrq(MSR_AMD64_NB_CFG, reg); 348 348 } 349 349 return 0; 350 350 }
+1 -1
arch/x86/platform/olpc/olpc-xo1-sci.c
··· 325 325 dev_info(&pdev->dev, "SCI unmapped. Mapping to IRQ 3\n"); 326 326 sci_irq = 3; 327 327 lo |= 0x00300000; 328 - wrmsrl(0x51400020, lo); 328 + wrmsrq(0x51400020, lo); 329 329 } 330 330 331 331 /* Select level triggered in PIC */
+6 -6
arch/x86/power/cpu.c
··· 56 56 57 57 while (msr < end) { 58 58 if (msr->valid) 59 - wrmsrl(msr->info.msr_no, msr->info.reg.q); 59 + wrmsrq(msr->info.msr_no, msr->info.reg.q); 60 60 msr++; 61 61 } 62 62 } ··· 198 198 struct cpuinfo_x86 *c; 199 199 200 200 if (ctxt->misc_enable_saved) 201 - wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable); 201 + wrmsrq(MSR_IA32_MISC_ENABLE, ctxt->misc_enable); 202 202 /* 203 203 * control registers 204 204 */ ··· 208 208 __write_cr4(ctxt->cr4); 209 209 #else 210 210 /* CONFIG X86_64 */ 211 - wrmsrl(MSR_EFER, ctxt->efer); 211 + wrmsrq(MSR_EFER, ctxt->efer); 212 212 __write_cr4(ctxt->cr4); 213 213 #endif 214 214 write_cr3(ctxt->cr3); ··· 231 231 * handlers or in complicated helpers like load_gs_index(). 232 232 */ 233 233 #ifdef CONFIG_X86_64 234 - wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base); 234 + wrmsrq(MSR_GS_BASE, ctxt->kernelmode_gs_base); 235 235 236 236 /* 237 237 * Reinitialize FRED to ensure the FRED MSRs contain the same values ··· 267 267 * restoring the selectors clobbers the bases. Keep in mind 268 268 * that MSR_KERNEL_GS_BASE is horribly misnamed. 269 269 */ 270 - wrmsrl(MSR_FS_BASE, ctxt->fs_base); 271 - wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base); 270 + wrmsrq(MSR_FS_BASE, ctxt->fs_base); 271 + wrmsrq(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base); 272 272 #else 273 273 loadsegment(gs, ctxt->gs); 274 274 #endif
+2 -2
arch/x86/virt/svm/sev.c
··· 140 140 141 141 val |= MSR_AMD64_SYSCFG_MFDM; 142 142 143 - wrmsrl(MSR_AMD64_SYSCFG, val); 143 + wrmsrq(MSR_AMD64_SYSCFG, val); 144 144 145 145 return 0; 146 146 } ··· 162 162 val |= MSR_AMD64_SYSCFG_SNP_EN; 163 163 val |= MSR_AMD64_SYSCFG_SNP_VMPL_EN; 164 164 165 - wrmsrl(MSR_AMD64_SYSCFG, val); 165 + wrmsrq(MSR_AMD64_SYSCFG, val); 166 166 167 167 return 0; 168 168 }
+2 -2
arch/x86/xen/suspend.c
··· 39 39 static void xen_vcpu_notify_restore(void *data) 40 40 { 41 41 if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL)) 42 - wrmsrl(MSR_IA32_SPEC_CTRL, this_cpu_read(spec_ctrl)); 42 + wrmsrq(MSR_IA32_SPEC_CTRL, this_cpu_read(spec_ctrl)); 43 43 44 44 /* Boot processor notified via generic timekeeping_resume() */ 45 45 if (smp_processor_id() == 0) ··· 57 57 if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL)) { 58 58 rdmsrq(MSR_IA32_SPEC_CTRL, tmp); 59 59 this_cpu_write(spec_ctrl, tmp); 60 - wrmsrl(MSR_IA32_SPEC_CTRL, 0); 60 + wrmsrq(MSR_IA32_SPEC_CTRL, 0); 61 61 } 62 62 } 63 63
+1 -1
drivers/cpufreq/acpi-cpufreq.c
··· 117 117 else 118 118 val |= msr_mask; 119 119 120 - wrmsrl(msr_addr, val); 120 + wrmsrq(msr_addr, val); 121 121 return 0; 122 122 } 123 123
+1 -1
drivers/cpufreq/amd-pstate.c
··· 258 258 return 0; 259 259 260 260 if (fast_switch) { 261 - wrmsrl(MSR_AMD_CPPC_REQ, value); 261 + wrmsrq(MSR_AMD_CPPC_REQ, value); 262 262 return 0; 263 263 } else { 264 264 int ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+1 -1
drivers/cpufreq/e_powersaver.c
··· 228 228 rdmsrq(MSR_IA32_MISC_ENABLE, val); 229 229 if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { 230 230 val |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP; 231 - wrmsrl(MSR_IA32_MISC_ENABLE, val); 231 + wrmsrq(MSR_IA32_MISC_ENABLE, val); 232 232 /* Can be locked at 0 */ 233 233 rdmsrq(MSR_IA32_MISC_ENABLE, val); 234 234 if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
+6 -6
drivers/cpufreq/intel_pstate.c
··· 1293 1293 power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE); 1294 1294 power_ctl_ee_state = POWER_CTL_EE_DISABLE; 1295 1295 } 1296 - wrmsrl(MSR_IA32_POWER_CTL, power_ctl); 1296 + wrmsrq(MSR_IA32_POWER_CTL, power_ctl); 1297 1297 mutex_unlock(&intel_pstate_driver_lock); 1298 1298 } 1299 1299 ··· 2351 2351 return; 2352 2352 2353 2353 hwp_req = (hwp_req & ~GENMASK_ULL(7, 0)) | cpu->hwp_boost_min; 2354 - wrmsrl(MSR_HWP_REQUEST, hwp_req); 2354 + wrmsrq(MSR_HWP_REQUEST, hwp_req); 2355 2355 cpu->last_update = cpu->sample.time; 2356 2356 } 2357 2357 ··· 2364 2364 expired = time_after64(cpu->sample.time, cpu->last_update + 2365 2365 hwp_boost_hold_time_ns); 2366 2366 if (expired) { 2367 - wrmsrl(MSR_HWP_REQUEST, cpu->hwp_req_cached); 2367 + wrmsrq(MSR_HWP_REQUEST, cpu->hwp_req_cached); 2368 2368 cpu->hwp_boost_min = 0; 2369 2369 } 2370 2370 } ··· 2520 2520 return; 2521 2521 2522 2522 cpu->pstate.current_pstate = pstate; 2523 - wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); 2523 + wrmsrq(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); 2524 2524 } 2525 2525 2526 2526 static void intel_pstate_adjust_pstate(struct cpudata *cpu) ··· 3100 3100 3101 3101 WRITE_ONCE(cpu->hwp_req_cached, value); 3102 3102 if (fast_switch) 3103 - wrmsrl(MSR_HWP_REQUEST, value); 3103 + wrmsrq(MSR_HWP_REQUEST, value); 3104 3104 else 3105 3105 wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); 3106 3106 } ··· 3109 3109 u32 target_pstate, bool fast_switch) 3110 3110 { 3111 3111 if (fast_switch) 3112 - wrmsrl(MSR_IA32_PERF_CTL, 3112 + wrmsrq(MSR_IA32_PERF_CTL, 3113 3113 pstate_funcs.get_val(cpu, target_pstate)); 3114 3114 else 3115 3115 wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
+8 -8
drivers/cpufreq/longhaul.c
··· 144 144 /* Sync to timer tick */ 145 145 safe_halt(); 146 146 /* Change frequency on next halt or sleep */ 147 - wrmsrl(MSR_VIA_BCR2, bcr2.val); 147 + wrmsrq(MSR_VIA_BCR2, bcr2.val); 148 148 /* Invoke transition */ 149 149 ACPI_FLUSH_CPU_CACHE(); 150 150 halt(); ··· 153 153 local_irq_disable(); 154 154 rdmsrq(MSR_VIA_BCR2, bcr2.val); 155 155 bcr2.bits.ESOFTBF = 0; 156 - wrmsrl(MSR_VIA_BCR2, bcr2.val); 156 + wrmsrq(MSR_VIA_BCR2, bcr2.val); 157 157 } 158 158 159 159 /* For processor with Longhaul MSR */ ··· 180 180 /* Raise voltage if necessary */ 181 181 if (can_scale_voltage && dir) { 182 182 longhaul.bits.EnableSoftVID = 1; 183 - wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); 183 + wrmsrq(MSR_VIA_LONGHAUL, longhaul.val); 184 184 /* Change voltage */ 185 185 if (!cx_address) { 186 186 ACPI_FLUSH_CPU_CACHE(); ··· 194 194 t = inl(acpi_gbl_FADT.xpm_timer_block.address); 195 195 } 196 196 longhaul.bits.EnableSoftVID = 0; 197 - wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); 197 + wrmsrq(MSR_VIA_LONGHAUL, longhaul.val); 198 198 } 199 199 200 200 /* Change frequency on next halt or sleep */ 201 201 longhaul.bits.EnableSoftBusRatio = 1; 202 - wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); 202 + wrmsrq(MSR_VIA_LONGHAUL, longhaul.val); 203 203 if (!cx_address) { 204 204 ACPI_FLUSH_CPU_CACHE(); 205 205 halt(); ··· 212 212 } 213 213 /* Disable bus ratio bit */ 214 214 longhaul.bits.EnableSoftBusRatio = 0; 215 - wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); 215 + wrmsrq(MSR_VIA_LONGHAUL, longhaul.val); 216 216 217 217 /* Reduce voltage if necessary */ 218 218 if (can_scale_voltage && !dir) { 219 219 longhaul.bits.EnableSoftVID = 1; 220 - wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); 220 + wrmsrq(MSR_VIA_LONGHAUL, longhaul.val); 221 221 /* Change voltage */ 222 222 if (!cx_address) { 223 223 ACPI_FLUSH_CPU_CACHE(); ··· 231 231 t = inl(acpi_gbl_FADT.xpm_timer_block.address); 232 232 } 233 233 longhaul.bits.EnableSoftVID = 0; 234 - wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); 234 + wrmsrq(MSR_VIA_LONGHAUL, longhaul.val); 235 235 } 236 236 } 237 237
+2 -2
drivers/cpufreq/powernow-k7.c
··· 225 225 fidvidctl.bits.FID = fid; 226 226 fidvidctl.bits.VIDC = 0; 227 227 fidvidctl.bits.FIDC = 1; 228 - wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val); 228 + wrmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val); 229 229 } 230 230 } 231 231 ··· 240 240 fidvidctl.bits.VID = vid; 241 241 fidvidctl.bits.FIDC = 0; 242 242 fidvidctl.bits.VIDC = 1; 243 - wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val); 243 + wrmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val); 244 244 } 245 245 } 246 246
+1 -1
drivers/crypto/ccp/sev-dev.c
··· 1060 1060 1061 1061 static void snp_set_hsave_pa(void *arg) 1062 1062 { 1063 - wrmsrl(MSR_VM_HSAVE_PA, 0); 1063 + wrmsrq(MSR_VM_HSAVE_PA, 0); 1064 1064 } 1065 1065 1066 1066 static int snp_filter_reserved_mem_regions(struct resource *rs, void *arg)
+5 -5
drivers/idle/intel_idle.c
··· 2082 2082 */ 2083 2083 static void __init byt_cht_auto_demotion_disable(void) 2084 2084 { 2085 - wrmsrl(MSR_CC6_DEMOTION_POLICY_CONFIG, 0); 2086 - wrmsrl(MSR_MC6_DEMOTION_POLICY_CONFIG, 0); 2085 + wrmsrq(MSR_CC6_DEMOTION_POLICY_CONFIG, 0); 2086 + wrmsrq(MSR_MC6_DEMOTION_POLICY_CONFIG, 0); 2087 2087 } 2088 2088 2089 2089 static bool __init intel_idle_verify_cstate(unsigned int mwait_hint) ··· 2243 2243 2244 2244 rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); 2245 2245 msr_bits &= ~auto_demotion_disable_flags; 2246 - wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); 2246 + wrmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); 2247 2247 } 2248 2248 2249 2249 static void c1e_promotion_enable(void) ··· 2252 2252 2253 2253 rdmsrq(MSR_IA32_POWER_CTL, msr_bits); 2254 2254 msr_bits |= 0x2; 2255 - wrmsrl(MSR_IA32_POWER_CTL, msr_bits); 2255 + wrmsrq(MSR_IA32_POWER_CTL, msr_bits); 2256 2256 } 2257 2257 2258 2258 static void c1e_promotion_disable(void) ··· 2261 2261 2262 2262 rdmsrq(MSR_IA32_POWER_CTL, msr_bits); 2263 2263 msr_bits &= ~0x2; 2264 - wrmsrl(MSR_IA32_POWER_CTL, msr_bits); 2264 + wrmsrq(MSR_IA32_POWER_CTL, msr_bits); 2265 2265 } 2266 2266 2267 2267 /**
+5 -5
drivers/platform/x86/intel/ifs/load.c
··· 127 127 ifsd = ifs_get_data(dev); 128 128 msrs = ifs_get_test_msrs(dev); 129 129 /* run scan hash copy */ 130 - wrmsrl(msrs->copy_hashes, ifs_hash_ptr); 130 + wrmsrq(msrs->copy_hashes, ifs_hash_ptr); 131 131 rdmsrq(msrs->copy_hashes_status, hashes_status.data); 132 132 133 133 /* enumerate the scan image information */ ··· 149 149 linear_addr = base + i * chunk_size; 150 150 linear_addr |= i; 151 151 152 - wrmsrl(msrs->copy_chunks, linear_addr); 152 + wrmsrq(msrs->copy_chunks, linear_addr); 153 153 rdmsrq(msrs->copy_chunks_status, chunk_status.data); 154 154 155 155 ifsd->valid_chunks = chunk_status.valid_chunks; ··· 195 195 msrs = ifs_get_test_msrs(dev); 196 196 197 197 if (need_copy_scan_hashes(ifsd)) { 198 - wrmsrl(msrs->copy_hashes, ifs_hash_ptr); 198 + wrmsrq(msrs->copy_hashes, ifs_hash_ptr); 199 199 rdmsrq(msrs->copy_hashes_status, hashes_status.data); 200 200 201 201 /* enumerate the scan image information */ ··· 216 216 } 217 217 218 218 if (ifsd->generation >= IFS_GEN_STRIDE_AWARE) { 219 - wrmsrl(msrs->test_ctrl, INVALIDATE_STRIDE); 219 + wrmsrq(msrs->test_ctrl, INVALIDATE_STRIDE); 220 220 rdmsrq(msrs->copy_chunks_status, chunk_status.data); 221 221 if (chunk_status.valid_chunks != 0) { 222 222 dev_err(dev, "Couldn't invalidate installed stride - %d\n", ··· 238 238 chunk_table[1] = linear_addr; 239 239 do { 240 240 local_irq_disable(); 241 - wrmsrl(msrs->copy_chunks, (u64)chunk_table); 241 + wrmsrq(msrs->copy_chunks, (u64)chunk_table); 242 242 local_irq_enable(); 243 243 rdmsrq(msrs->copy_chunks_status, chunk_status.data); 244 244 err_code = chunk_status.error_code;
+4 -4
drivers/platform/x86/intel/ifs/runtest.c
··· 209 209 * take up to 200 milliseconds (in the case where all chunks 210 210 * are processed in a single pass) before it retires. 211 211 */ 212 - wrmsrl(MSR_ACTIVATE_SCAN, params->activate->data); 212 + wrmsrq(MSR_ACTIVATE_SCAN, params->activate->data); 213 213 rdmsrq(MSR_SCAN_STATUS, status.data); 214 214 215 215 trace_ifs_status(ifsd->cur_batch, start, stop, status.data); ··· 321 321 first = cpumask_first(cpu_smt_mask(cpu)); 322 322 323 323 if (cpu == first) { 324 - wrmsrl(MSR_ARRAY_BIST, command->data); 324 + wrmsrq(MSR_ARRAY_BIST, command->data); 325 325 /* Pass back the result of the test */ 326 326 rdmsrq(MSR_ARRAY_BIST, command->data); 327 327 } ··· 374 374 first = cpumask_first(cpu_smt_mask(cpu)); 375 375 376 376 if (cpu == first) { 377 - wrmsrl(MSR_ARRAY_TRIGGER, ARRAY_GEN1_TEST_ALL_ARRAYS); 377 + wrmsrq(MSR_ARRAY_TRIGGER, ARRAY_GEN1_TEST_ALL_ARRAYS); 378 378 rdmsrq(MSR_ARRAY_STATUS, *((u64 *)status)); 379 379 } 380 380 ··· 526 526 * starts scan of each requested bundle. The core test happens 527 527 * during the "execution" of the WRMSR. 528 528 */ 529 - wrmsrl(MSR_ACTIVATE_SBAF, run_params->activate->data); 529 + wrmsrq(MSR_ACTIVATE_SBAF, run_params->activate->data); 530 530 rdmsrq(MSR_SBAF_STATUS, status.data); 531 531 trace_ifs_sbaf(ifsd->cur_batch, *run_params->activate, status); 532 532
+2 -2
drivers/platform/x86/intel/pmc/cnp.c
··· 230 230 rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, val); 231 231 per_cpu(pkg_cst_config, cpunum) = val; 232 232 val &= ~NHM_C1_AUTO_DEMOTE; 233 - wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, val); 233 + wrmsrq(MSR_PKG_CST_CONFIG_CONTROL, val); 234 234 235 235 pr_debug("%s: cpu:%d cst %llx\n", __func__, cpunum, val); 236 236 } ··· 239 239 { 240 240 int cpunum = smp_processor_id(); 241 241 242 - wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, per_cpu(pkg_cst_config, cpunum)); 242 + wrmsrq(MSR_PKG_CST_CONFIG_CONTROL, per_cpu(pkg_cst_config, cpunum)); 243 243 244 244 pr_debug("%s: cpu:%d cst %llx\n", __func__, cpunum, 245 245 per_cpu(pkg_cst_config, cpunum));
+2 -2
drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c
··· 52 52 return ret; 53 53 54 54 /* Write DATA register */ 55 - wrmsrl(MSR_OS_MAILBOX_DATA, command_data); 55 + wrmsrq(MSR_OS_MAILBOX_DATA, command_data); 56 56 57 57 /* Write command register */ 58 58 data = BIT_ULL(MSR_OS_MAILBOX_BUSY_BIT) | 59 59 (parameter & GENMASK_ULL(13, 0)) << 16 | 60 60 (sub_command << 8) | 61 61 command; 62 - wrmsrl(MSR_OS_MAILBOX_INTERFACE, data); 62 + wrmsrq(MSR_OS_MAILBOX_INTERFACE, data); 63 63 64 64 /* Poll for rb bit == 0 */ 65 65 retries = OS_MAILBOX_RETRY_COUNT;
+8 -8
drivers/platform/x86/intel_ips.c
··· 382 382 thm_writew(THM_MPCPC, (new_tdp_limit * 10) / 8); 383 383 384 384 turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN; 385 - wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); 385 + wrmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override); 386 386 387 387 turbo_override &= ~TURBO_TDP_MASK; 388 388 turbo_override |= new_tdp_limit; 389 389 390 - wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); 390 + wrmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override); 391 391 } 392 392 393 393 /** ··· 417 417 thm_writew(THM_MPCPC, (new_limit * 10) / 8); 418 418 419 419 turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN; 420 - wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); 420 + wrmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override); 421 421 422 422 turbo_override &= ~TURBO_TDP_MASK; 423 423 turbo_override |= new_limit; 424 424 425 - wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); 425 + wrmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override); 426 426 } 427 427 428 428 /** ··· 440 440 rdmsrq(IA32_PERF_CTL, perf_ctl); 441 441 if (perf_ctl & IA32_PERF_TURBO_DIS) { 442 442 perf_ctl &= ~IA32_PERF_TURBO_DIS; 443 - wrmsrl(IA32_PERF_CTL, perf_ctl); 443 + wrmsrq(IA32_PERF_CTL, perf_ctl); 444 444 } 445 445 } 446 446 ··· 478 478 rdmsrq(IA32_PERF_CTL, perf_ctl); 479 479 if (!(perf_ctl & IA32_PERF_TURBO_DIS)) { 480 480 perf_ctl |= IA32_PERF_TURBO_DIS; 481 - wrmsrl(IA32_PERF_CTL, perf_ctl); 481 + wrmsrq(IA32_PERF_CTL, perf_ctl); 482 482 } 483 483 } 484 484 ··· 1598 1598 1599 1599 rdmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override); 1600 1600 turbo_override &= ~(TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN); 1601 - wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); 1602 - wrmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit); 1601 + wrmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override); 1602 + wrmsrq(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit); 1603 1603 1604 1604 free_irq(ips->irq, ips); 1605 1605 pci_free_irq_vectors(dev);
+3 -3
drivers/thermal/intel/intel_hfi.c
··· 358 358 359 359 rdmsrq(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); 360 360 msr_val |= HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT; 361 - wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); 361 + wrmsrq(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); 362 362 } 363 363 364 364 static void hfi_set_hw_table(struct hfi_instance *hfi_instance) ··· 368 368 369 369 hw_table_pa = virt_to_phys(hfi_instance->hw_table); 370 370 msr_val = hw_table_pa | HW_FEEDBACK_PTR_VALID_BIT; 371 - wrmsrl(MSR_IA32_HW_FEEDBACK_PTR, msr_val); 371 + wrmsrq(MSR_IA32_HW_FEEDBACK_PTR, msr_val); 372 372 } 373 373 374 374 /* Caller must hold hfi_instance_lock. */ ··· 379 379 380 380 rdmsrq(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); 381 381 msr_val &= ~HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT; 382 - wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); 382 + wrmsrq(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); 383 383 384 384 /* 385 385 * Wait for hardware to acknowledge the disabling of HFI. Some
+1 -1
drivers/thermal/intel/therm_throt.c
··· 273 273 } 274 274 275 275 msr_val &= ~bit_mask; 276 - wrmsrl(msr, msr_val); 276 + wrmsrq(msr, msr_val); 277 277 } 278 278 EXPORT_SYMBOL_GPL(thermal_clear_package_intr_status); 279 279
+5 -5
drivers/video/fbdev/geode/lxfb_ops.c
··· 371 371 } else 372 372 msrval |= MSR_LX_GLD_MSR_CONFIG_FMT_CRT; 373 373 374 - wrmsrl(MSR_LX_GLD_MSR_CONFIG, msrval); 374 + wrmsrq(MSR_LX_GLD_MSR_CONFIG, msrval); 375 375 376 376 /* Clear the various buffers */ 377 377 /* FIXME: Adjust for panning here */ ··· 427 427 | MSR_LX_SPARE_MSR_WM_LPEN_OVRD); 428 428 msrval |= MSR_LX_SPARE_MSR_DIS_VIFO_WM | 429 429 MSR_LX_SPARE_MSR_DIS_INIT_V_PRI; 430 - wrmsrl(MSR_LX_SPARE_MSR, msrval); 430 + wrmsrq(MSR_LX_SPARE_MSR, msrval); 431 431 432 432 gcfg = DC_GENERAL_CFG_DFLE; /* Display fifo enable */ 433 433 gcfg |= (0x6 << DC_GENERAL_CFG_DFHPSL_SHIFT) | /* default priority */ ··· 664 664 uint32_t filt; 665 665 int i; 666 666 667 - wrmsrl(MSR_LX_SPARE_MSR, par->msr.dcspare); 667 + wrmsrq(MSR_LX_SPARE_MSR, par->msr.dcspare); 668 668 669 669 for (i = 0; i < ARRAY_SIZE(par->dc); i++) { 670 670 switch (i) { ··· 729 729 { 730 730 int i; 731 731 732 - wrmsrl(MSR_LX_GLD_MSR_CONFIG, par->msr.dfglcfg); 733 - wrmsrl(MSR_LX_MSR_PADSEL, par->msr.padsel); 732 + wrmsrq(MSR_LX_GLD_MSR_CONFIG, par->msr.dfglcfg); 733 + wrmsrq(MSR_LX_MSR_PADSEL, par->msr.padsel); 734 734 735 735 for (i = 0; i < ARRAY_SIZE(par->vp); i++) { 736 736 switch (i) {
+1 -1
drivers/video/fbdev/geode/suspend_gx.c
··· 133 133 { 134 134 int i; 135 135 136 - wrmsrl(MSR_GX_MSR_PADSEL, par->msr.padsel); 136 + wrmsrq(MSR_GX_MSR_PADSEL, par->msr.padsel); 137 137 138 138 for (i = 0; i < ARRAY_SIZE(par->vp); i++) { 139 139 switch (i) {
+4 -4
drivers/video/fbdev/geode/video_gx.c
··· 151 151 dotpll |= MSR_GLCP_DOTPLL_DOTRESET; 152 152 dotpll &= ~MSR_GLCP_DOTPLL_BYPASS; 153 153 154 - wrmsrl(MSR_GLCP_DOTPLL, dotpll); 154 + wrmsrq(MSR_GLCP_DOTPLL, dotpll); 155 155 156 156 /* Program dividers. */ 157 157 sys_rstpll &= ~( MSR_GLCP_SYS_RSTPLL_DOTPREDIV2 ··· 159 159 | MSR_GLCP_SYS_RSTPLL_DOTPOSTDIV3 ); 160 160 sys_rstpll |= pll_table[best_i].sys_rstpll_bits; 161 161 162 - wrmsrl(MSR_GLCP_SYS_RSTPLL, sys_rstpll); 162 + wrmsrq(MSR_GLCP_SYS_RSTPLL, sys_rstpll); 163 163 164 164 /* Clear reset bit to start PLL. */ 165 165 dotpll &= ~(MSR_GLCP_DOTPLL_DOTRESET); 166 - wrmsrl(MSR_GLCP_DOTPLL, dotpll); 166 + wrmsrq(MSR_GLCP_DOTPLL, dotpll); 167 167 168 168 /* Wait for LOCK bit. */ 169 169 do { ··· 183 183 rdmsrq(MSR_GX_MSR_PADSEL, val); 184 184 val &= ~MSR_GX_MSR_PADSEL_MASK; 185 185 val |= MSR_GX_MSR_PADSEL_TFT; 186 - wrmsrl(MSR_GX_MSR_PADSEL, val); 186 + wrmsrq(MSR_GX_MSR_PADSEL, val); 187 187 188 188 /* Turn off the panel */ 189 189
+1 -1
include/hyperv/hvgdk_mini.h
··· 1013 1013 1014 1014 /* 1015 1015 * To support arch-generic code calling hv_set/get_register: 1016 - * - On x86, HV_MSR_ indicates an MSR accessed via rdmsrq/wrmsrl 1016 + * - On x86, HV_MSR_ indicates an MSR accessed via rdmsrq/wrmsrq 1017 1017 * - On ARM, HV_MSR_ indicates a VP register accessed via hypercall 1018 1018 */ 1019 1019 #define HV_MSR_CRASH_P0 (HV_X64_MSR_CRASH_P0)