Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/msr: Rename 'rdmsrl()' to 'rdmsrq()'

Suggested-by: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Juergen Gross <jgross@suse.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Xin Li <xin@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>

Ingo Molnar c435e608 d58c04cf

+340 -340
+1 -1
arch/x86/coco/sev/core.c
··· 3278 3278 return; 3279 3279 3280 3280 setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ); 3281 - rdmsrl(MSR_AMD64_GUEST_TSC_FREQ, tsc_freq_mhz); 3281 + rdmsrq(MSR_AMD64_GUEST_TSC_FREQ, tsc_freq_mhz); 3282 3282 snp_tsc_freq_khz = (unsigned long)(tsc_freq_mhz * 1000); 3283 3283 3284 3284 x86_platform.calibrate_cpu = securetsc_get_tsc_khz;
+2 -2
arch/x86/events/amd/brs.c
··· 325 325 u32 brs_idx = tos - i; 326 326 u64 from, to; 327 327 328 - rdmsrl(brs_to(brs_idx), to); 328 + rdmsrq(brs_to(brs_idx), to); 329 329 330 330 /* Entry does not belong to us (as marked by kernel) */ 331 331 if (to == BRS_POISON) ··· 341 341 if (!amd_brs_match_plm(event, to)) 342 342 continue; 343 343 344 - rdmsrl(brs_from(brs_idx), from); 344 + rdmsrq(brs_from(brs_idx), from); 345 345 346 346 perf_clear_branch_entry_bitfields(br+nr); 347 347
+2 -2
arch/x86/events/amd/core.c
··· 659 659 u64 status; 660 660 661 661 /* PerfCntrGlobalStatus is read-only */ 662 - rdmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS, status); 662 + rdmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS, status); 663 663 664 664 return status; 665 665 } ··· 679 679 { 680 680 u64 counter; 681 681 682 - rdmsrl(x86_pmu_event_addr(idx), counter); 682 + rdmsrq(x86_pmu_event_addr(idx), counter); 683 683 684 684 return !(counter & BIT_ULL(x86_pmu.cntval_bits - 1)); 685 685 }
+9 -9
arch/x86/events/amd/ibs.c
··· 424 424 * prev count manually on overflow. 425 425 */ 426 426 while (!perf_event_try_update(event, count, 64)) { 427 - rdmsrl(event->hw.config_base, *config); 427 + rdmsrq(event->hw.config_base, *config); 428 428 count = perf_ibs->get_count(*config); 429 429 } 430 430 } ··· 513 513 if (!stopping && (hwc->state & PERF_HES_UPTODATE)) 514 514 return; 515 515 516 - rdmsrl(hwc->config_base, config); 516 + rdmsrq(hwc->config_base, config); 517 517 518 518 if (stopping) { 519 519 /* ··· 1256 1256 hwc = &event->hw; 1257 1257 msr = hwc->config_base; 1258 1258 buf = ibs_data.regs; 1259 - rdmsrl(msr, *buf); 1259 + rdmsrq(msr, *buf); 1260 1260 if (!(*buf++ & perf_ibs->valid_mask)) 1261 1261 goto fail; 1262 1262 ··· 1274 1274 offset_max = perf_ibs_get_offset_max(perf_ibs, event, check_rip); 1275 1275 1276 1276 do { 1277 - rdmsrl(msr + offset, *buf++); 1277 + rdmsrq(msr + offset, *buf++); 1278 1278 size++; 1279 1279 offset = find_next_bit(perf_ibs->offset_mask, 1280 1280 perf_ibs->offset_max, ··· 1304 1304 if (event->attr.sample_type & PERF_SAMPLE_RAW) { 1305 1305 if (perf_ibs == &perf_ibs_op) { 1306 1306 if (ibs_caps & IBS_CAPS_BRNTRGT) { 1307 - rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++); 1307 + rdmsrq(MSR_AMD64_IBSBRTARGET, *buf++); 1308 1308 br_target_idx = size; 1309 1309 size++; 1310 1310 } 1311 1311 if (ibs_caps & IBS_CAPS_OPDATA4) { 1312 - rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++); 1312 + rdmsrq(MSR_AMD64_IBSOPDATA4, *buf++); 1313 1313 size++; 1314 1314 } 1315 1315 } 1316 1316 if (perf_ibs == &perf_ibs_fetch && (ibs_caps & IBS_CAPS_FETCHCTLEXTD)) { 1317 - rdmsrl(MSR_AMD64_ICIBSEXTDCTL, *buf++); 1317 + rdmsrq(MSR_AMD64_ICIBSEXTDCTL, *buf++); 1318 1318 size++; 1319 1319 } 1320 1320 } ··· 1565 1565 1566 1566 preempt_disable(); 1567 1567 1568 - rdmsrl(MSR_AMD64_IBSCTL, val); 1568 + rdmsrq(MSR_AMD64_IBSCTL, val); 1569 1569 offset = val & IBSCTL_LVT_OFFSET_MASK; 1570 1570 1571 1571 if (!(val & IBSCTL_LVT_OFFSET_VALID)) { ··· 1680 1680 { 1681 1681 u64 val; 1682 1682 1683 - rdmsrl(MSR_AMD64_IBSCTL, val); 1683 + rdmsrq(MSR_AMD64_IBSCTL, val); 1684 1684 if (!(val & IBSCTL_LVT_OFFSET_VALID)) 1685 1685 return -EINVAL; 1686 1686
+4 -4
arch/x86/events/amd/lbr.c
··· 73 73 { 74 74 u64 val; 75 75 76 - rdmsrl(MSR_AMD_SAMP_BR_FROM + idx * 2, val); 76 + rdmsrq(MSR_AMD_SAMP_BR_FROM + idx * 2, val); 77 77 78 78 return val; 79 79 } ··· 82 82 { 83 83 u64 val; 84 84 85 - rdmsrl(MSR_AMD_SAMP_BR_FROM + idx * 2 + 1, val); 85 + rdmsrq(MSR_AMD_SAMP_BR_FROM + idx * 2 + 1, val); 86 86 87 87 return val; 88 88 } ··· 400 400 } 401 401 402 402 if (cpu_feature_enabled(X86_FEATURE_AMD_LBR_PMC_FREEZE)) { 403 - rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl); 403 + rdmsrq(MSR_IA32_DEBUGCTLMSR, dbg_ctl); 404 404 wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); 405 405 } 406 406 407 - rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg); 407 + rdmsrq(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg); 408 408 wrmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg | DBG_EXTN_CFG_LBRV2EN); 409 409 } 410 410
+4 -4
arch/x86/events/amd/power.c
··· 48 48 49 49 prev_pwr_acc = hwc->pwr_acc; 50 50 prev_ptsc = hwc->ptsc; 51 - rdmsrl(MSR_F15H_CU_PWR_ACCUMULATOR, new_pwr_acc); 52 - rdmsrl(MSR_F15H_PTSC, new_ptsc); 51 + rdmsrq(MSR_F15H_CU_PWR_ACCUMULATOR, new_pwr_acc); 52 + rdmsrq(MSR_F15H_PTSC, new_ptsc); 53 53 54 54 /* 55 55 * Calculate the CU power consumption over a time period, the unit of ··· 75 75 76 76 event->hw.state = 0; 77 77 78 - rdmsrl(MSR_F15H_PTSC, event->hw.ptsc); 79 - rdmsrl(MSR_F15H_CU_PWR_ACCUMULATOR, event->hw.pwr_acc); 78 + rdmsrq(MSR_F15H_PTSC, event->hw.ptsc); 79 + rdmsrq(MSR_F15H_CU_PWR_ACCUMULATOR, event->hw.pwr_acc); 80 80 } 81 81 82 82 static void pmu_event_start(struct perf_event *event, int mode)
+1 -1
arch/x86/events/amd/uncore.c
··· 106 106 * read counts directly from the corresponding PERF_CTR. 107 107 */ 108 108 if (hwc->event_base_rdpmc < 0) 109 - rdmsrl(hwc->event_base, new); 109 + rdmsrq(hwc->event_base, new); 110 110 else 111 111 rdpmcl(hwc->event_base_rdpmc, new); 112 112
+10 -10
arch/x86/events/core.c
··· 693 693 694 694 if (!test_bit(idx, cpuc->active_mask)) 695 695 continue; 696 - rdmsrl(x86_pmu_config_addr(idx), val); 696 + rdmsrq(x86_pmu_config_addr(idx), val); 697 697 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE)) 698 698 continue; 699 699 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; ··· 1550 1550 return; 1551 1551 1552 1552 if (x86_pmu.version >= 2) { 1553 - rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); 1554 - rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); 1555 - rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow); 1556 - rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed); 1553 + rdmsrq(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); 1554 + rdmsrq(MSR_CORE_PERF_GLOBAL_STATUS, status); 1555 + rdmsrq(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow); 1556 + rdmsrq(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed); 1557 1557 1558 1558 pr_info("\n"); 1559 1559 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl); ··· 1561 1561 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow); 1562 1562 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed); 1563 1563 if (pebs_constraints) { 1564 - rdmsrl(MSR_IA32_PEBS_ENABLE, pebs); 1564 + rdmsrq(MSR_IA32_PEBS_ENABLE, pebs); 1565 1565 pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs); 1566 1566 } 1567 1567 if (x86_pmu.lbr_nr) { 1568 - rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); 1568 + rdmsrq(MSR_IA32_DEBUGCTLMSR, debugctl); 1569 1569 pr_info("CPU#%d: debugctl: %016llx\n", cpu, debugctl); 1570 1570 } 1571 1571 } 1572 1572 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask); 1573 1573 1574 1574 for_each_set_bit(idx, cntr_mask, X86_PMC_IDX_MAX) { 1575 - rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl); 1576 - rdmsrl(x86_pmu_event_addr(idx), pmc_count); 1575 + rdmsrq(x86_pmu_config_addr(idx), pmc_ctrl); 1576 + rdmsrq(x86_pmu_event_addr(idx), pmc_count); 1577 1577 1578 1578 prev_left = per_cpu(pmc_prev_left[idx], cpu); 1579 1579 ··· 1587 1587 for_each_set_bit(idx, fixed_cntr_mask, X86_PMC_IDX_MAX) { 1588 1588 if (fixed_counter_disabled(idx, cpuc->pmu)) 1589 1589 continue; 1590 - rdmsrl(x86_pmu_fixed_ctr_addr(idx), pmc_count); 1590 + rdmsrq(x86_pmu_fixed_ctr_addr(idx), pmc_count); 1591 1591 1592 1592 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n", 1593 1593 cpu, idx, pmc_count);
+5 -5
arch/x86/events/intel/core.c
··· 2489 2489 { 2490 2490 u64 status; 2491 2491 2492 - rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); 2492 + rdmsrq(MSR_CORE_PERF_GLOBAL_STATUS, status); 2493 2493 2494 2494 return status; 2495 2495 } ··· 5054 5054 5055 5055 if (!intel_pmu_broken_perf_cap()) { 5056 5056 /* Perf Metric (Bit 15) and PEBS via PT (Bit 16) are hybrid enumeration */ 5057 - rdmsrl(MSR_IA32_PERF_CAPABILITIES, pmu->intel_cap.capabilities); 5057 + rdmsrq(MSR_IA32_PERF_CAPABILITIES, pmu->intel_cap.capabilities); 5058 5058 } 5059 5059 } 5060 5060 ··· 5202 5202 if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics) { 5203 5203 union perf_capabilities perf_cap; 5204 5204 5205 - rdmsrl(MSR_IA32_PERF_CAPABILITIES, perf_cap.capabilities); 5205 + rdmsrq(MSR_IA32_PERF_CAPABILITIES, perf_cap.capabilities); 5206 5206 if (!perf_cap.perf_metrics) { 5207 5207 x86_pmu.intel_cap.perf_metrics = 0; 5208 5208 x86_pmu.intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS); ··· 5627 5627 5628 5628 /* 5629 5629 * Quirk only affects validation in wrmsr(), so wrmsrl()'s value 5630 - * should equal rdmsrl()'s even with the quirk. 5630 + * should equal rdmsrq()'s even with the quirk. 5631 5631 */ 5632 5632 if (val_new != val_tmp) 5633 5633 return false; ··· 6642 6642 if (boot_cpu_has(X86_FEATURE_PDCM)) { 6643 6643 u64 capabilities; 6644 6644 6645 - rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities); 6645 + rdmsrq(MSR_IA32_PERF_CAPABILITIES, capabilities); 6646 6646 x86_pmu.intel_cap.capabilities = capabilities; 6647 6647 } 6648 6648
+1 -1
arch/x86/events/intel/cstate.c
··· 320 320 { 321 321 u64 val; 322 322 323 - rdmsrl(event->hw.event_base, val); 323 + rdmsrq(event->hw.event_base, val); 324 324 return val; 325 325 } 326 326
+3 -3
arch/x86/events/intel/knc.c
··· 159 159 { 160 160 u64 val; 161 161 162 - rdmsrl(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val); 162 + rdmsrq(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val); 163 163 val &= ~(KNC_ENABLE_COUNTER0|KNC_ENABLE_COUNTER1); 164 164 wrmsrl(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val); 165 165 } ··· 168 168 { 169 169 u64 val; 170 170 171 - rdmsrl(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val); 171 + rdmsrq(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val); 172 172 val |= (KNC_ENABLE_COUNTER0|KNC_ENABLE_COUNTER1); 173 173 wrmsrl(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val); 174 174 } ··· 200 200 { 201 201 u64 status; 202 202 203 - rdmsrl(MSR_KNC_IA32_PERF_GLOBAL_STATUS, status); 203 + rdmsrq(MSR_KNC_IA32_PERF_GLOBAL_STATUS, status); 204 204 205 205 return status; 206 206 }
+7 -7
arch/x86/events/intel/lbr.c
··· 139 139 if (!static_cpu_has(X86_FEATURE_ARCH_LBR) && !pmi && cpuc->lbr_sel) 140 140 wrmsrl(MSR_LBR_SELECT, lbr_select); 141 141 142 - rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); 142 + rdmsrq(MSR_IA32_DEBUGCTLMSR, debugctl); 143 143 orig_debugctl = debugctl; 144 144 145 145 if (!static_cpu_has(X86_FEATURE_ARCH_LBR)) ··· 209 209 { 210 210 u64 tos; 211 211 212 - rdmsrl(x86_pmu.lbr_tos, tos); 212 + rdmsrq(x86_pmu.lbr_tos, tos); 213 213 return tos; 214 214 } 215 215 ··· 302 302 if (lbr) 303 303 return lbr->from; 304 304 305 - rdmsrl(x86_pmu.lbr_from + idx, val); 305 + rdmsrq(x86_pmu.lbr_from + idx, val); 306 306 307 307 return lbr_from_signext_quirk_rd(val); 308 308 } ··· 314 314 if (lbr) 315 315 return lbr->to; 316 316 317 - rdmsrl(x86_pmu.lbr_to + idx, val); 317 + rdmsrq(x86_pmu.lbr_to + idx, val); 318 318 319 319 return val; 320 320 } ··· 326 326 if (lbr) 327 327 return lbr->info; 328 328 329 - rdmsrl(x86_pmu.lbr_info + idx, val); 329 + rdmsrq(x86_pmu.lbr_info + idx, val); 330 330 331 331 return val; 332 332 } ··· 475 475 task_ctx->tos = tos; 476 476 477 477 if (cpuc->lbr_select) 478 - rdmsrl(MSR_LBR_SELECT, task_ctx->lbr_sel); 478 + rdmsrq(MSR_LBR_SELECT, task_ctx->lbr_sel); 479 479 } 480 480 481 481 static void intel_pmu_arch_lbr_save(void *ctx) ··· 752 752 u64 lbr; 753 753 } msr_lastbranch; 754 754 755 - rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr); 755 + rdmsrq(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr); 756 756 757 757 perf_clear_branch_entry_bitfields(br); 758 758
+2 -2
arch/x86/events/intel/p4.c
··· 859 859 u64 v; 860 860 861 861 /* an official way for overflow indication */ 862 - rdmsrl(hwc->config_base, v); 862 + rdmsrq(hwc->config_base, v); 863 863 if (v & P4_CCCR_OVF) { 864 864 wrmsrl(hwc->config_base, v & ~P4_CCCR_OVF); 865 865 return 1; ··· 872 872 * the counter has reached zero value and continued counting before 873 873 * real NMI signal was received: 874 874 */ 875 - rdmsrl(hwc->event_base, v); 875 + rdmsrq(hwc->event_base, v); 876 876 if (!(v & ARCH_P4_UNFLAGGED_BIT)) 877 877 return 1; 878 878
+2 -2
arch/x86/events/intel/p6.c
··· 142 142 u64 val; 143 143 144 144 /* p6 only has one enable register */ 145 - rdmsrl(MSR_P6_EVNTSEL0, val); 145 + rdmsrq(MSR_P6_EVNTSEL0, val); 146 146 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; 147 147 wrmsrl(MSR_P6_EVNTSEL0, val); 148 148 } ··· 152 152 unsigned long val; 153 153 154 154 /* p6 only has one enable register */ 155 - rdmsrl(MSR_P6_EVNTSEL0, val); 155 + rdmsrq(MSR_P6_EVNTSEL0, val); 156 156 val |= ARCH_PERFMON_EVENTSEL_ENABLE; 157 157 wrmsrl(MSR_P6_EVNTSEL0, val); 158 158 }
+6 -6
arch/x86/events/intel/pt.c
··· 194 194 int ret; 195 195 long i; 196 196 197 - rdmsrl(MSR_PLATFORM_INFO, reg); 197 + rdmsrq(MSR_PLATFORM_INFO, reg); 198 198 pt_pmu.max_nonturbo_ratio = (reg & 0xff00) >> 8; 199 199 200 200 /* ··· 230 230 * "IA32_VMX_MISC[bit 14]" being 1 means PT can trace 231 231 * post-VMXON. 232 232 */ 233 - rdmsrl(MSR_IA32_VMX_MISC, reg); 233 + rdmsrq(MSR_IA32_VMX_MISC, reg); 234 234 if (reg & BIT(14)) 235 235 pt_pmu.vmx = true; 236 236 } ··· 926 926 int advance = 0; 927 927 u64 status; 928 928 929 - rdmsrl(MSR_IA32_RTIT_STATUS, status); 929 + rdmsrq(MSR_IA32_RTIT_STATUS, status); 930 930 931 931 if (status & RTIT_STATUS_ERROR) { 932 932 pr_err_ratelimited("ToPA ERROR encountered, trying to recover\n"); ··· 985 985 struct topa_page *tp; 986 986 987 987 if (!buf->single) { 988 - rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, pt->output_base); 988 + rdmsrq(MSR_IA32_RTIT_OUTPUT_BASE, pt->output_base); 989 989 tp = phys_to_virt(pt->output_base); 990 990 buf->cur = &tp->topa; 991 991 } 992 992 993 - rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, pt->output_mask); 993 + rdmsrq(MSR_IA32_RTIT_OUTPUT_MASK, pt->output_mask); 994 994 /* offset within current output region */ 995 995 buf->output_off = pt->output_mask >> 32; 996 996 /* index of current output region within this table */ ··· 1611 1611 * PMI might have just cleared these, so resume_allowed 1612 1612 * must be checked again also. 1613 1613 */ 1614 - rdmsrl(MSR_IA32_RTIT_STATUS, status); 1614 + rdmsrq(MSR_IA32_RTIT_STATUS, status); 1615 1615 if (!(status & (RTIT_STATUS_TRIGGEREN | 1616 1616 RTIT_STATUS_ERROR | 1617 1617 RTIT_STATUS_STOPPED)) &&
+1 -1
arch/x86/events/intel/uncore.c
··· 150 150 { 151 151 u64 count; 152 152 153 - rdmsrl(event->hw.event_base, count); 153 + rdmsrq(event->hw.event_base, count); 154 154 155 155 return count; 156 156 }
+2 -2
arch/x86/events/intel/uncore_nhmex.c
··· 214 214 u64 config; 215 215 216 216 if (msr) { 217 - rdmsrl(msr, config); 217 + rdmsrq(msr, config); 218 218 config &= ~((1ULL << uncore_num_counters(box)) - 1); 219 219 /* WBox has a fixed counter */ 220 220 if (uncore_msr_fixed_ctl(box)) ··· 229 229 u64 config; 230 230 231 231 if (msr) { 232 - rdmsrl(msr, config); 232 + rdmsrq(msr, config); 233 233 config |= (1ULL << uncore_num_counters(box)) - 1; 234 234 /* WBox has a fixed counter */ 235 235 if (uncore_msr_fixed_ctl(box))
+1 -1
arch/x86/events/intel/uncore_snb.c
··· 504 504 { 505 505 u64 num_boxes; 506 506 507 - rdmsrl(ICL_UNC_CBO_CONFIG, num_boxes); 507 + rdmsrq(ICL_UNC_CBO_CONFIG, num_boxes); 508 508 509 509 return num_boxes & ICL_UNC_NUM_CBO_MASK; 510 510 }
+3 -3
arch/x86/events/intel/uncore_snbep.c
··· 618 618 619 619 msr = uncore_msr_box_ctl(box); 620 620 if (msr) { 621 - rdmsrl(msr, config); 621 + rdmsrq(msr, config); 622 622 config |= SNBEP_PMON_BOX_CTL_FRZ; 623 623 wrmsrl(msr, config); 624 624 } ··· 631 631 632 632 msr = uncore_msr_box_ctl(box); 633 633 if (msr) { 634 - rdmsrl(msr, config); 634 + rdmsrq(msr, config); 635 635 config &= ~SNBEP_PMON_BOX_CTL_FRZ; 636 636 wrmsrl(msr, config); 637 637 } ··· 6572 6572 * of UNCORE_SPR_CHA) is incorrect on some SPR variants because of a 6573 6573 * firmware bug. Using the value from SPR_MSR_UNC_CBO_CONFIG to replace it. 6574 6574 */ 6575 - rdmsrl(SPR_MSR_UNC_CBO_CONFIG, num_cbo); 6575 + rdmsrq(SPR_MSR_UNC_CBO_CONFIG, num_cbo); 6576 6576 /* 6577 6577 * The MSR doesn't work on the EMR XCC, but the firmware bug doesn't impact 6578 6578 * the EMR XCC. Don't let the value from the MSR replace the existing value.
+1 -1
arch/x86/events/msr.c
··· 231 231 u64 now; 232 232 233 233 if (event->hw.event_base) 234 - rdmsrl(event->hw.event_base, now); 234 + rdmsrq(event->hw.event_base, now); 235 235 else 236 236 now = rdtsc_ordered(); 237 237
+3 -3
arch/x86/events/perf_event.h
··· 1394 1394 { 1395 1395 u64 dbg_ctl, dbg_extn_cfg; 1396 1396 1397 - rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg); 1397 + rdmsrq(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg); 1398 1398 wrmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg & ~DBG_EXTN_CFG_LBRV2EN); 1399 1399 1400 1400 if (cpu_feature_enabled(X86_FEATURE_AMD_LBR_PMC_FREEZE)) { 1401 - rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl); 1401 + rdmsrq(MSR_IA32_DEBUGCTLMSR, dbg_ctl); 1402 1402 wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl & ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); 1403 1403 } 1404 1404 } ··· 1543 1543 { 1544 1544 u64 debugctl; 1545 1545 1546 - rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); 1546 + rdmsrq(MSR_IA32_DEBUGCTLMSR, debugctl); 1547 1547 debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); 1548 1548 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); 1549 1549 }
+3 -3
arch/x86/events/rapl.c
··· 192 192 static inline u64 rapl_read_counter(struct perf_event *event) 193 193 { 194 194 u64 raw; 195 - rdmsrl(event->hw.event_base, raw); 195 + rdmsrq(event->hw.event_base, raw); 196 196 return raw; 197 197 } 198 198 ··· 221 221 222 222 prev_raw_count = local64_read(&hwc->prev_count); 223 223 do { 224 - rdmsrl(event->hw.event_base, new_raw_count); 224 + rdmsrq(event->hw.event_base, new_raw_count); 225 225 } while (!local64_try_cmpxchg(&hwc->prev_count, 226 226 &prev_raw_count, new_raw_count)); 227 227 ··· 610 610 u64 msr_rapl_power_unit_bits; 611 611 int i; 612 612 613 - /* protect rdmsrl() to handle virtualization */ 613 + /* protect rdmsrq() to handle virtualization */ 614 614 if (rdmsrl_safe(rapl_model->msr_power_unit, &msr_rapl_power_unit_bits)) 615 615 return -1; 616 616 for (i = 0; i < NR_RAPL_PKG_DOMAINS; i++)
+3 -3
arch/x86/events/zhaoxin/core.c
··· 266 266 { 267 267 u64 status; 268 268 269 - rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); 269 + rdmsrq(MSR_CORE_PERF_GLOBAL_STATUS, status); 270 270 271 271 return status; 272 272 } ··· 293 293 294 294 mask = 0xfULL << (idx * 4); 295 295 296 - rdmsrl(hwc->config_base, ctrl_val); 296 + rdmsrq(hwc->config_base, ctrl_val); 297 297 ctrl_val &= ~mask; 298 298 wrmsrl(hwc->config_base, ctrl_val); 299 299 } ··· 329 329 bits <<= (idx * 4); 330 330 mask = 0xfULL << (idx * 4); 331 331 332 - rdmsrl(hwc->config_base, ctrl_val); 332 + rdmsrq(hwc->config_base, ctrl_val); 333 333 ctrl_val &= ~mask; 334 334 ctrl_val |= bits; 335 335 wrmsrl(hwc->config_base, ctrl_val);
+1 -1
arch/x86/hyperv/hv_apic.c
··· 37 37 { 38 38 u64 reg_val; 39 39 40 - rdmsrl(HV_X64_MSR_ICR, reg_val); 40 + rdmsrq(HV_X64_MSR_ICR, reg_val); 41 41 return reg_val; 42 42 } 43 43
+13 -13
arch/x86/hyperv/hv_init.c
··· 62 62 * returned by MSR_AMD64_SEV_ES_GHCB is above shared 63 63 * memory boundary and map it here. 64 64 */ 65 - rdmsrl(MSR_AMD64_SEV_ES_GHCB, ghcb_gpa); 65 + rdmsrq(MSR_AMD64_SEV_ES_GHCB, ghcb_gpa); 66 66 67 67 /* Mask out vTOM bit. ioremap_cache() maps decrypted */ 68 68 ghcb_gpa &= ~ms_hyperv.shared_gpa_boundary; ··· 95 95 * For root partition we get the hypervisor provided VP assist 96 96 * page, instead of allocating a new page. 97 97 */ 98 - rdmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64); 98 + rdmsrq(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64); 99 99 *hvp = memremap(msr.pfn << HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT, 100 100 PAGE_SIZE, MEMREMAP_WB); 101 101 } else { ··· 140 140 { 141 141 struct hv_tsc_emulation_status emu_status; 142 142 143 - rdmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status); 143 + rdmsrq(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status); 144 144 145 145 /* Don't issue the callback if TSC accesses are not emulated */ 146 146 if (hv_reenlightenment_cb && emu_status.inprogress) ··· 153 153 u64 freq; 154 154 struct hv_tsc_emulation_status emu_status; 155 155 156 - rdmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status); 156 + rdmsrq(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status); 157 157 emu_status.inprogress = 0; 158 158 wrmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status); 159 159 160 - rdmsrl(HV_X64_MSR_TSC_FREQUENCY, freq); 160 + rdmsrq(HV_X64_MSR_TSC_FREQUENCY, freq); 161 161 tsc_khz = div64_u64(freq, 1000); 162 162 } 163 163 EXPORT_SYMBOL_GPL(hyperv_stop_tsc_emulation); ··· 217 217 if (!hv_reenlightenment_available()) 218 218 return; 219 219 220 - rdmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *(u64 *)&re_ctrl); 220 + rdmsrq(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *(u64 *)&re_ctrl); 221 221 re_ctrl.enabled = 0; 222 222 wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *(u64 *)&re_ctrl); 223 223 ··· 251 251 */ 252 252 memunmap(hv_vp_assist_page[cpu]); 253 253 hv_vp_assist_page[cpu] = NULL; 254 - rdmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64); 254 + rdmsrq(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64); 255 255 msr.enable = 0; 256 256 } 257 257 wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64); ··· 260 260 if (hv_reenlightenment_cb == NULL) 261 261 return 0; 262 262 263 - rdmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl)); 263 + rdmsrq(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl)); 264 264 if (re_ctrl.target_vp == hv_vp_index[cpu]) { 265 265 /* 266 266 * Reassign reenlightenment notifications to some other online ··· 331 331 hv_hypercall_pg = NULL; 332 332 333 333 /* Disable the hypercall page in the hypervisor */ 334 - rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); 334 + rdmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); 335 335 hypercall_msr.enable = 0; 336 336 wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); 337 337 ··· 348 348 WARN_ON(ret); 349 349 350 350 /* Re-enable the hypercall page */ 351 - rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); 351 + rdmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); 352 352 hypercall_msr.enable = 1; 353 353 hypercall_msr.guest_physical_address = 354 354 vmalloc_to_pfn(hv_hypercall_pg_saved); ··· 515 515 if (hv_hypercall_pg == NULL) 516 516 goto clean_guest_os_id; 517 517 518 - rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); 518 + rdmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); 519 519 hypercall_msr.enable = 1; 520 520 521 521 if (hv_root_partition()) { ··· 667 667 return; 668 668 panic_reported = true; 669 669 670 - rdmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id); 670 + rdmsrq(HV_X64_MSR_GUEST_OS_ID, guest_id); 671 671 672 672 wrmsrl(HV_X64_MSR_CRASH_P0, err); 673 673 wrmsrl(HV_X64_MSR_CRASH_P1, guest_id); ··· 701 701 * that the hypercall page is setup 702 702 */ 703 703 hypercall_msr.as_uint64 = 0; 704 - rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); 704 + rdmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); 705 705 706 706 return hypercall_msr.enable; 707 707 }
+3 -3
arch/x86/hyperv/hv_spinlock.c
··· 39 39 * To prevent a race against the unlock path it is required to 40 40 * disable interrupts before accessing the HV_X64_MSR_GUEST_IDLE 41 41 * MSR. Otherwise, if the IPI from hv_qlock_kick() arrives between 42 - * the lock value check and the rdmsrl() then the vCPU might be put 42 + * the lock value check and the rdmsrq() then the vCPU might be put 43 43 * into 'idle' state by the hypervisor and kept in that state for 44 44 * an unspecified amount of time. 45 45 */ 46 46 local_irq_save(flags); 47 47 /* 48 - * Only issue the rdmsrl() when the lock state has not changed. 48 + * Only issue the rdmsrq() when the lock state has not changed. 49 49 */ 50 50 if (READ_ONCE(*byte) == val) { 51 51 unsigned long msr_val; 52 52 53 - rdmsrl(HV_X64_MSR_GUEST_IDLE, msr_val); 53 + rdmsrq(HV_X64_MSR_GUEST_IDLE, msr_val); 54 54 55 55 (void)msr_val; 56 56 }
+2 -2
arch/x86/include/asm/apic.h
··· 224 224 if (reg == APIC_DFR) 225 225 return -1; 226 226 227 - rdmsrl(APIC_BASE_MSR + (reg >> 4), msr); 227 + rdmsrq(APIC_BASE_MSR + (reg >> 4), msr); 228 228 return (u32)msr; 229 229 } 230 230 ··· 237 237 { 238 238 unsigned long val; 239 239 240 - rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val); 240 + rdmsrq(APIC_BASE_MSR + (APIC_ICR >> 4), val); 241 241 return val; 242 242 } 243 243
+1 -1
arch/x86/include/asm/debugreg.h
··· 169 169 if (boot_cpu_data.x86 < 6) 170 170 return 0; 171 171 #endif 172 - rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); 172 + rdmsrq(MSR_IA32_DEBUGCTLMSR, debugctlmsr); 173 173 174 174 return debugctlmsr; 175 175 }
+1 -1
arch/x86/include/asm/fsgsbase.h
··· 60 60 if (boot_cpu_has(X86_FEATURE_FSGSBASE)) 61 61 fsbase = rdfsbase(); 62 62 else 63 - rdmsrl(MSR_FS_BASE, fsbase); 63 + rdmsrq(MSR_FS_BASE, fsbase); 64 64 65 65 return fsbase; 66 66 }
+1 -1
arch/x86/include/asm/kvm_host.h
··· 2272 2272 { 2273 2273 u64 value; 2274 2274 2275 - rdmsrl(msr, value); 2275 + rdmsrq(msr, value); 2276 2276 return value; 2277 2277 } 2278 2278 #endif
+2 -2
arch/x86/include/asm/msr.h
··· 255 255 native_write_msr(msr, low, high); 256 256 } 257 257 258 - #define rdmsrl(msr, val) \ 258 + #define rdmsrq(msr, val) \ 259 259 ((val) = native_read_msr((msr))) 260 260 261 261 static inline void wrmsrl(u32 msr, u64 val) ··· 352 352 } 353 353 static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) 354 354 { 355 - rdmsrl(msr_no, *q); 355 + rdmsrq(msr_no, *q); 356 356 return 0; 357 357 } 358 358 static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
+1 -1
arch/x86/include/asm/paravirt.h
··· 209 209 paravirt_write_msr(msr, val1, val2); \ 210 210 } while (0) 211 211 212 - #define rdmsrl(msr, val) \ 212 + #define rdmsrq(msr, val) \ 213 213 do { \ 214 214 val = paravirt_read_msr(msr); \ 215 215 } while (0)
+3 -3
arch/x86/kernel/apic/apic.c
··· 1694 1694 1695 1695 x86_arch_cap_msr = x86_read_arch_cap_msr(); 1696 1696 if (x86_arch_cap_msr & ARCH_CAP_XAPIC_DISABLE) { 1697 - rdmsrl(MSR_IA32_XAPIC_DISABLE_STATUS, msr); 1697 + rdmsrq(MSR_IA32_XAPIC_DISABLE_STATUS, msr); 1698 1698 return (msr & LEGACY_XAPIC_DISABLED); 1699 1699 } 1700 1700 return false; ··· 1707 1707 if (!boot_cpu_has(X86_FEATURE_APIC)) 1708 1708 return; 1709 1709 1710 - rdmsrl(MSR_IA32_APICBASE, msr); 1710 + rdmsrq(MSR_IA32_APICBASE, msr); 1711 1711 if (!(msr & X2APIC_ENABLE)) 1712 1712 return; 1713 1713 /* Disable xapic and x2apic first and then reenable xapic mode */ ··· 1720 1720 { 1721 1721 u64 msr; 1722 1722 1723 - rdmsrl(MSR_IA32_APICBASE, msr); 1723 + rdmsrq(MSR_IA32_APICBASE, msr); 1724 1724 if (msr & X2APIC_ENABLE) 1725 1725 return; 1726 1726 wrmsrl(MSR_IA32_APICBASE, msr | X2APIC_ENABLE);
+3 -3
arch/x86/kernel/apic/apic_numachip.c
··· 31 31 unsigned int id = (x >> 24) & 0xff; 32 32 33 33 if (static_cpu_has(X86_FEATURE_NODEID_MSR)) { 34 - rdmsrl(MSR_FAM10H_NODE_ID, value); 34 + rdmsrq(MSR_FAM10H_NODE_ID, value); 35 35 id |= (value << 2) & 0xff00; 36 36 } 37 37 ··· 42 42 { 43 43 u64 mcfg; 44 44 45 - rdmsrl(MSR_FAM10H_MMIO_CONF_BASE, mcfg); 45 + rdmsrq(MSR_FAM10H_MMIO_CONF_BASE, mcfg); 46 46 return ((mcfg >> (28 - 8)) & 0xfff00) | (x >> 24); 47 47 } 48 48 ··· 150 150 151 151 /* Account for nodes per socket in multi-core-module processors */ 152 152 if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) { 153 - rdmsrl(MSR_FAM10H_NODE_ID, val); 153 + rdmsrq(MSR_FAM10H_NODE_ID, val); 154 154 nodes = ((val >> 3) & 7) + 1; 155 155 } 156 156
+1 -1
arch/x86/kernel/cet.c
··· 55 55 * will be whatever is live in userspace. So read the SSP before enabling 56 56 * interrupts so locking the fpregs to do it later is not required. 57 57 */ 58 - rdmsrl(MSR_IA32_PL3_SSP, ssp); 58 + rdmsrq(MSR_IA32_PL3_SSP, ssp); 59 59 60 60 cond_local_irq_enable(regs); 61 61
+4 -4
arch/x86/kernel/cpu/amd.c
··· 383 383 (c->x86 == 0x10 && c->x86_model >= 0x2)) { 384 384 u64 val; 385 385 386 - rdmsrl(MSR_K7_HWCR, val); 386 + rdmsrq(MSR_K7_HWCR, val); 387 387 if (!(val & BIT(24))) 388 388 pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n"); 389 389 } ··· 508 508 */ 509 509 if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) { 510 510 /* Check if memory encryption is enabled */ 511 - rdmsrl(MSR_AMD64_SYSCFG, msr); 511 + rdmsrq(MSR_AMD64_SYSCFG, msr); 512 512 if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT)) 513 513 goto clear_all; 514 514 ··· 525 525 if (!sme_me_mask) 526 526 setup_clear_cpu_cap(X86_FEATURE_SME); 527 527 528 - rdmsrl(MSR_K7_HWCR, msr); 528 + rdmsrq(MSR_K7_HWCR, msr); 529 529 if (!(msr & MSR_K7_HWCR_SMMLOCK)) 530 530 goto clear_sev; 531 531 ··· 1014 1014 init_amd_cacheinfo(c); 1015 1015 1016 1016 if (cpu_has(c, X86_FEATURE_SVM)) { 1017 - rdmsrl(MSR_VM_CR, vm_cr); 1017 + rdmsrq(MSR_VM_CR, vm_cr); 1018 1018 if (vm_cr & SVM_VM_CR_SVM_DIS_MASK) { 1019 1019 pr_notice_once("SVM disabled (by BIOS) in MSR_VM_CR\n"); 1020 1020 clear_cpu_cap(c, X86_FEATURE_SVM);
+4 -4
arch/x86/kernel/cpu/aperfmperf.c
··· 40 40 { 41 41 u64 aperf, mperf; 42 42 43 - rdmsrl(MSR_IA32_APERF, aperf); 44 - rdmsrl(MSR_IA32_MPERF, mperf); 43 + rdmsrq(MSR_IA32_APERF, aperf); 44 + rdmsrq(MSR_IA32_MPERF, mperf); 45 45 46 46 this_cpu_write(cpu_samples.aperf, aperf); 47 47 this_cpu_write(cpu_samples.mperf, mperf); ··· 474 474 if (!cpu_feature_enabled(X86_FEATURE_APERFMPERF)) 475 475 return; 476 476 477 - rdmsrl(MSR_IA32_APERF, aperf); 478 - rdmsrl(MSR_IA32_MPERF, mperf); 477 + rdmsrq(MSR_IA32_APERF, aperf); 478 + rdmsrq(MSR_IA32_MPERF, mperf); 479 479 acnt = aperf - s->aperf; 480 480 mcnt = mperf - s->mperf; 481 481
+6 -6
arch/x86/kernel/cpu/bugs.c
··· 140 140 * init code as it is not enumerated and depends on the family. 141 141 */ 142 142 if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) { 143 - rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); 143 + rdmsrq(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); 144 144 145 145 /* 146 146 * Previously running kernel (kexec), may have some controls ··· 656 656 if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) 657 657 return; 658 658 659 - rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 659 + rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 660 660 661 661 switch (srbds_mitigation) { 662 662 case SRBDS_MITIGATION_OFF: ··· 776 776 777 777 switch (gds_mitigation) { 778 778 case GDS_MITIGATION_OFF: 779 - rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 779 + rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 780 780 mcu_ctrl |= GDS_MITG_DIS; 781 781 break; 782 782 case GDS_MITIGATION_FULL_LOCKED: ··· 786 786 * CPUs. 787 787 */ 788 788 case GDS_MITIGATION_FULL: 789 - rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 789 + rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 790 790 mcu_ctrl &= ~GDS_MITG_DIS; 791 791 break; 792 792 case GDS_MITIGATION_FORCE: ··· 802 802 * GDS_MITG_DIS will be ignored if this processor is locked but the boot 803 803 * processor was not. 804 804 */ 805 - rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after); 805 + rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after); 806 806 WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after); 807 807 } 808 808 ··· 841 841 if (gds_mitigation == GDS_MITIGATION_FORCE) 842 842 gds_mitigation = GDS_MITIGATION_FULL; 843 843 844 - rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 844 + rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 845 845 if (mcu_ctrl & GDS_MITG_LOCKED) { 846 846 if (gds_mitigation == GDS_MITIGATION_OFF) 847 847 pr_warn("Mitigation locked. Disable failed.\n");
+4 -4
arch/x86/kernel/cpu/bus_lock.c
··· 103 103 ctrl &= ~MSR_TEST_CTRL_SPLIT_LOCK_DETECT; 104 104 if (wrmsrl_safe(MSR_TEST_CTRL, ctrl)) 105 105 return false; 106 - rdmsrl(MSR_TEST_CTRL, tmp); 106 + rdmsrq(MSR_TEST_CTRL, tmp); 107 107 return ctrl == tmp; 108 108 } 109 109 ··· 137 137 return; 138 138 } 139 139 140 - rdmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache); 140 + rdmsrq(MSR_TEST_CTRL, msr_test_ctrl_cache); 141 141 142 142 if (!split_lock_verify_msr(true)) { 143 143 pr_info("MSR access failed: Disabled\n"); ··· 297 297 if (!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) 298 298 return; 299 299 300 - rdmsrl(MSR_IA32_DEBUGCTLMSR, val); 300 + rdmsrq(MSR_IA32_DEBUGCTLMSR, val); 301 301 302 302 if ((boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) && 303 303 (sld_state == sld_warn || sld_state == sld_fatal)) || ··· 375 375 * MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT is. All CPUs that set 376 376 * it have split lock detection. 377 377 */ 378 - rdmsrl(MSR_IA32_CORE_CAPS, ia32_core_caps); 378 + rdmsrq(MSR_IA32_CORE_CAPS, ia32_core_caps); 379 379 if (ia32_core_caps & MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT) 380 380 goto supported; 381 381
+5 -5
arch/x86/kernel/cpu/common.c
··· 562 562 u64 msr = 0; 563 563 564 564 if (cpu_feature_enabled(X86_FEATURE_IBT)) { 565 - rdmsrl(MSR_IA32_S_CET, msr); 565 + rdmsrq(MSR_IA32_S_CET, msr); 566 566 if (disable) 567 567 wrmsrl(MSR_IA32_S_CET, msr & ~CET_ENDBR_EN); 568 568 } ··· 575 575 u64 msr; 576 576 577 577 if (cpu_feature_enabled(X86_FEATURE_IBT)) { 578 - rdmsrl(MSR_IA32_S_CET, msr); 578 + rdmsrq(MSR_IA32_S_CET, msr); 579 579 msr &= ~CET_ENDBR_EN; 580 580 msr |= (save & CET_ENDBR_EN); 581 581 wrmsrl(MSR_IA32_S_CET, msr); ··· 1288 1288 u64 x86_arch_cap_msr = 0; 1289 1289 1290 1290 if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) 1291 - rdmsrl(MSR_IA32_ARCH_CAPABILITIES, x86_arch_cap_msr); 1291 + rdmsrq(MSR_IA32_ARCH_CAPABILITIES, x86_arch_cap_msr); 1292 1292 1293 1293 return x86_arch_cap_msr; 1294 1294 } ··· 1749 1749 */ 1750 1750 1751 1751 unsigned long old_base, tmp; 1752 - rdmsrl(MSR_FS_BASE, old_base); 1752 + rdmsrq(MSR_FS_BASE, old_base); 1753 1753 wrmsrl(MSR_FS_BASE, 1); 1754 1754 loadsegment(fs, 0); 1755 - rdmsrl(MSR_FS_BASE, tmp); 1755 + rdmsrq(MSR_FS_BASE, tmp); 1756 1756 wrmsrl(MSR_FS_BASE, old_base); 1757 1757 return tmp == 0; 1758 1758 }
+2 -2
arch/x86/kernel/cpu/hygon.c
··· 96 96 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { 97 97 u64 val; 98 98 99 - rdmsrl(MSR_K7_HWCR, val); 99 + rdmsrq(MSR_K7_HWCR, val); 100 100 if (!(val & BIT(24))) 101 101 pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n"); 102 102 } ··· 194 194 init_hygon_cacheinfo(c); 195 195 196 196 if (cpu_has(c, X86_FEATURE_SVM)) { 197 - rdmsrl(MSR_VM_CR, vm_cr); 197 + rdmsrq(MSR_VM_CR, vm_cr); 198 198 if (vm_cr & SVM_VM_CR_SVM_DIS_MASK) { 199 199 pr_notice_once("SVM disabled (by BIOS) in MSR_VM_CR\n"); 200 200 clear_cpu_cap(c, X86_FEATURE_SVM);
+2 -2
arch/x86/kernel/cpu/intel.c
··· 157 157 u64 tme_activate; 158 158 int keyid_bits; 159 159 160 - rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate); 160 + rdmsrq(MSR_IA32_TME_ACTIVATE, tme_activate); 161 161 162 162 if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) { 163 163 pr_info_once("x86/tme: not enabled by BIOS\n"); ··· 299 299 * string flag and enhanced fast string capabilities accordingly. 300 300 */ 301 301 if (c->x86_vfm >= INTEL_PENTIUM_M_DOTHAN) { 302 - rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); 302 + rdmsrq(MSR_IA32_MISC_ENABLE, misc_enable); 303 303 if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) { 304 304 /* X86_FEATURE_ERMS is set based on CPUID */ 305 305 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
+2 -2
arch/x86/kernel/cpu/intel_epb.c
··· 79 79 { 80 80 u64 epb; 81 81 82 - rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); 82 + rdmsrq(MSR_IA32_ENERGY_PERF_BIAS, epb); 83 83 /* 84 84 * Ensure that saved_epb will always be nonzero after this write even if 85 85 * the EPB value read from the MSR is 0. ··· 94 94 u64 val = this_cpu_read(saved_epb); 95 95 u64 epb; 96 96 97 - rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); 97 + rdmsrq(MSR_IA32_ENERGY_PERF_BIAS, epb); 98 98 if (val) { 99 99 val &= EPB_MASK; 100 100 } else {
+7 -7
arch/x86/kernel/cpu/mce/amd.c
··· 662 662 return; 663 663 } 664 664 665 - rdmsrl(MSR_K7_HWCR, hwcr); 665 + rdmsrq(MSR_K7_HWCR, hwcr); 666 666 667 667 /* McStatusWrEn has to be set */ 668 668 need_toggle = !(hwcr & BIT(18)); ··· 805 805 } 806 806 807 807 if (mce_flags.smca) { 808 - rdmsrl(MSR_AMD64_SMCA_MCx_IPID(bank), m->ipid); 808 + rdmsrq(MSR_AMD64_SMCA_MCx_IPID(bank), m->ipid); 809 809 810 810 if (m->status & MCI_STATUS_SYNDV) { 811 - rdmsrl(MSR_AMD64_SMCA_MCx_SYND(bank), m->synd); 812 - rdmsrl(MSR_AMD64_SMCA_MCx_SYND1(bank), err.vendor.amd.synd1); 813 - rdmsrl(MSR_AMD64_SMCA_MCx_SYND2(bank), err.vendor.amd.synd2); 811 + rdmsrq(MSR_AMD64_SMCA_MCx_SYND(bank), m->synd); 812 + rdmsrq(MSR_AMD64_SMCA_MCx_SYND1(bank), err.vendor.amd.synd1); 813 + rdmsrq(MSR_AMD64_SMCA_MCx_SYND2(bank), err.vendor.amd.synd2); 814 814 } 815 815 } 816 816 ··· 834 834 { 835 835 u64 status, addr = 0; 836 836 837 - rdmsrl(msr_stat, status); 837 + rdmsrq(msr_stat, status); 838 838 if (!(status & MCI_STATUS_VAL)) 839 839 return false; 840 840 841 841 if (status & MCI_STATUS_ADDRV) 842 - rdmsrl(msr_addr, addr); 842 + rdmsrq(msr_addr, addr); 843 843 844 844 __log_error(bank, status, addr, misc); 845 845
+3 -3
arch/x86/kernel/cpu/mce/core.c
··· 1822 1822 u64 cap; 1823 1823 u8 b; 1824 1824 1825 - rdmsrl(MSR_IA32_MCG_CAP, cap); 1825 + rdmsrq(MSR_IA32_MCG_CAP, cap); 1826 1826 1827 1827 b = cap & MCG_BANKCNT_MASK; 1828 1828 ··· 1863 1863 1864 1864 cr4_set_bits(X86_CR4_MCE); 1865 1865 1866 - rdmsrl(MSR_IA32_MCG_CAP, cap); 1866 + rdmsrq(MSR_IA32_MCG_CAP, cap); 1867 1867 if (cap & MCG_CTL_P) 1868 1868 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); 1869 1869 } ··· 1905 1905 if (!b->init) 1906 1906 continue; 1907 1907 1908 - rdmsrl(mca_msr_reg(i, MCA_CTL), msrval); 1908 + rdmsrq(mca_msr_reg(i, MCA_CTL), msrval); 1909 1909 b->init = !!msrval; 1910 1910 } 1911 1911 }
+1 -1
arch/x86/kernel/cpu/mce/inject.c
··· 741 741 u64 status = MCI_STATUS_VAL, ipid; 742 742 743 743 /* Check whether bank is populated */ 744 - rdmsrl(MSR_AMD64_SMCA_MCx_IPID(bank), ipid); 744 + rdmsrq(MSR_AMD64_SMCA_MCx_IPID(bank), ipid); 745 745 if (!ipid) 746 746 continue; 747 747
+9 -9
arch/x86/kernel/cpu/mce/intel.c
··· 94 94 if (!boot_cpu_has(X86_FEATURE_APIC) || lapic_get_maxlvt() < 6) 95 95 return false; 96 96 97 - rdmsrl(MSR_IA32_MCG_CAP, cap); 97 + rdmsrq(MSR_IA32_MCG_CAP, cap); 98 98 *banks = min_t(unsigned, MAX_NR_BANKS, cap & MCG_BANKCNT_MASK); 99 99 return !!(cap & MCG_CMCI_P); 100 100 } ··· 106 106 if (mca_cfg.lmce_disabled) 107 107 return false; 108 108 109 - rdmsrl(MSR_IA32_MCG_CAP, tmp); 109 + rdmsrq(MSR_IA32_MCG_CAP, tmp); 110 110 111 111 /* 112 112 * LMCE depends on recovery support in the processor. Hence both ··· 123 123 * WARN if the MSR isn't locked as init_ia32_feat_ctl() unconditionally 124 124 * locks the MSR in the event that it wasn't already locked by BIOS. 125 125 */ 126 - rdmsrl(MSR_IA32_FEAT_CTL, tmp); 126 + rdmsrq(MSR_IA32_FEAT_CTL, tmp); 127 127 if (WARN_ON_ONCE(!(tmp & FEAT_CTL_LOCKED))) 128 128 return false; 129 129 ··· 141 141 u64 val; 142 142 143 143 raw_spin_lock_irqsave(&cmci_discover_lock, flags); 144 - rdmsrl(MSR_IA32_MCx_CTL2(bank), val); 144 + rdmsrq(MSR_IA32_MCx_CTL2(bank), val); 145 145 val &= ~MCI_CTL2_CMCI_THRESHOLD_MASK; 146 146 wrmsrl(MSR_IA32_MCx_CTL2(bank), val | thresh); 147 147 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); ··· 184 184 if (test_bit(bank, mce_banks_ce_disabled)) 185 185 return true; 186 186 187 - rdmsrl(MSR_IA32_MCx_CTL2(bank), *val); 187 + rdmsrq(MSR_IA32_MCx_CTL2(bank), *val); 188 188 189 189 /* Already owned by someone else? */ 190 190 if (*val & MCI_CTL2_CMCI_EN) { ··· 233 233 234 234 val |= MCI_CTL2_CMCI_EN; 235 235 wrmsrl(MSR_IA32_MCx_CTL2(bank), val); 236 - rdmsrl(MSR_IA32_MCx_CTL2(bank), val); 236 + rdmsrq(MSR_IA32_MCx_CTL2(bank), val); 237 237 238 238 /* If the enable bit did not stick, this bank should be polled. */ 239 239 if (!(val & MCI_CTL2_CMCI_EN)) { ··· 324 324 325 325 if (!test_bit(bank, this_cpu_ptr(mce_banks_owned))) 326 326 return; 327 - rdmsrl(MSR_IA32_MCx_CTL2(bank), val); 327 + rdmsrq(MSR_IA32_MCx_CTL2(bank), val); 328 328 val &= ~MCI_CTL2_CMCI_EN; 329 329 wrmsrl(MSR_IA32_MCx_CTL2(bank), val); 330 330 __clear_bit(bank, this_cpu_ptr(mce_banks_owned)); ··· 430 430 if (!lmce_supported()) 431 431 return; 432 432 433 - rdmsrl(MSR_IA32_MCG_EXT_CTL, val); 433 + rdmsrq(MSR_IA32_MCG_EXT_CTL, val); 434 434 435 435 if (!(val & MCG_EXT_CTL_LMCE_EN)) 436 436 wrmsrl(MSR_IA32_MCG_EXT_CTL, val | MCG_EXT_CTL_LMCE_EN); ··· 443 443 if (!lmce_supported()) 444 444 return; 445 445 446 - rdmsrl(MSR_IA32_MCG_EXT_CTL, val); 446 + rdmsrq(MSR_IA32_MCG_EXT_CTL, val); 447 447 val &= ~MCG_EXT_CTL_LMCE_EN; 448 448 wrmsrl(MSR_IA32_MCG_EXT_CTL, val); 449 449 }
+3 -3
arch/x86/kernel/cpu/mshyperv.c
··· 70 70 if (hv_is_synic_msr(reg) && ms_hyperv.paravisor_present) 71 71 hv_ivm_msr_read(reg, &value); 72 72 else 73 - rdmsrl(reg, value); 73 + rdmsrq(reg, value); 74 74 return value; 75 75 } 76 76 EXPORT_SYMBOL_GPL(hv_get_non_nested_msr); ··· 345 345 { 346 346 unsigned long freq; 347 347 348 - rdmsrl(HV_X64_MSR_TSC_FREQUENCY, freq); 348 + rdmsrq(HV_X64_MSR_TSC_FREQUENCY, freq); 349 349 350 350 return freq / 1000; 351 351 } ··· 541 541 */ 542 542 u64 hv_lapic_frequency; 543 543 544 - rdmsrl(HV_X64_MSR_APIC_FREQUENCY, hv_lapic_frequency); 544 + rdmsrq(HV_X64_MSR_APIC_FREQUENCY, hv_lapic_frequency); 545 545 hv_lapic_frequency = div_u64(hv_lapic_frequency, HZ); 546 546 lapic_timer_period = hv_lapic_frequency; 547 547 pr_info("Hyper-V: LAPIC Timer Frequency: %#x\n",
+1 -1
arch/x86/kernel/cpu/resctrl/core.c
··· 148 148 if (wrmsrl_safe(MSR_IA32_L3_CBM_BASE, max_cbm)) 149 149 return; 150 150 151 - rdmsrl(MSR_IA32_L3_CBM_BASE, l3_cbm_0); 151 + rdmsrq(MSR_IA32_L3_CBM_BASE, l3_cbm_0); 152 152 153 153 /* If all the bits were set in MSR, return success */ 154 154 if (l3_cbm_0 != max_cbm)
+1 -1
arch/x86/kernel/cpu/resctrl/monitor.c
··· 238 238 * are error bits. 239 239 */ 240 240 wrmsr(MSR_IA32_QM_EVTSEL, eventid, prmid); 241 - rdmsrl(MSR_IA32_QM_CTR, msr_val); 241 + rdmsrq(MSR_IA32_QM_CTR, msr_val); 242 242 243 243 if (msr_val & RMID_VAL_ERROR) 244 244 return -EIO;
+1 -1
arch/x86/kernel/cpu/resctrl/rdtgroup.c
··· 1635 1635 pr_warn_once("Invalid event id %d\n", config_info->evtid); 1636 1636 return; 1637 1637 } 1638 - rdmsrl(MSR_IA32_EVT_CFG_BASE + index, msrval); 1638 + rdmsrq(MSR_IA32_EVT_CFG_BASE + index, msrval); 1639 1639 1640 1640 /* Report only the valid event configuration bits */ 1641 1641 config_info->mon_config = msrval & MAX_EVT_CONFIG_BITS;
+1 -1
arch/x86/kernel/cpu/topology.c
··· 154 154 * kernel must rely on the firmware enumeration order. 155 155 */ 156 156 if (has_apic_base) { 157 - rdmsrl(MSR_IA32_APICBASE, msr); 157 + rdmsrq(MSR_IA32_APICBASE, msr); 158 158 is_bsp = !!(msr & MSR_IA32_APICBASE_BSP); 159 159 } 160 160
+2 -2
arch/x86/kernel/cpu/topology_amd.c
··· 133 133 if (!boot_cpu_has(X86_FEATURE_NODEID_MSR)) 134 134 return; 135 135 136 - rdmsrl(MSR_FAM10H_NODE_ID, nid.msr); 136 + rdmsrq(MSR_FAM10H_NODE_ID, nid.msr); 137 137 store_node(tscan, nid.nodes_per_pkg + 1, nid.node_id); 138 138 tscan->c->topo.llc_id = nid.node_id; 139 139 } ··· 160 160 if (msr_set_bit(0xc0011005, 54) <= 0) 161 161 return; 162 162 163 - rdmsrl(0xc0011005, msrval); 163 + rdmsrq(0xc0011005, msrval); 164 164 if (msrval & BIT_64(54)) { 165 165 set_cpu_cap(c, X86_FEATURE_TOPOEXT); 166 166 pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
+5 -5
arch/x86/kernel/cpu/tsx.c
··· 24 24 { 25 25 u64 tsx; 26 26 27 - rdmsrl(MSR_IA32_TSX_CTRL, tsx); 27 + rdmsrq(MSR_IA32_TSX_CTRL, tsx); 28 28 29 29 /* Force all transactions to immediately abort */ 30 30 tsx |= TSX_CTRL_RTM_DISABLE; ··· 44 44 { 45 45 u64 tsx; 46 46 47 - rdmsrl(MSR_IA32_TSX_CTRL, tsx); 47 + rdmsrq(MSR_IA32_TSX_CTRL, tsx); 48 48 49 49 /* Enable the RTM feature in the cpu */ 50 50 tsx &= ~TSX_CTRL_RTM_DISABLE; ··· 115 115 */ 116 116 if (boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT) && 117 117 boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) { 118 - rdmsrl(MSR_TSX_FORCE_ABORT, msr); 118 + rdmsrq(MSR_TSX_FORCE_ABORT, msr); 119 119 msr |= MSR_TFA_TSX_CPUID_CLEAR; 120 120 wrmsrl(MSR_TSX_FORCE_ABORT, msr); 121 121 } else if (cpu_feature_enabled(X86_FEATURE_MSR_TSX_CTRL)) { 122 - rdmsrl(MSR_IA32_TSX_CTRL, msr); 122 + rdmsrq(MSR_IA32_TSX_CTRL, msr); 123 123 msr |= TSX_CTRL_CPUID_CLEAR; 124 124 wrmsrl(MSR_IA32_TSX_CTRL, msr); 125 125 } ··· 146 146 !cpu_feature_enabled(X86_FEATURE_SRBDS_CTRL)) 147 147 return; 148 148 149 - rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_opt_ctrl); 149 + rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_opt_ctrl); 150 150 151 151 if (mcu_opt_ctrl & RTM_ALLOW) { 152 152 mcu_opt_ctrl &= ~RTM_ALLOW;
+1 -1
arch/x86/kernel/cpu/umwait.c
··· 214 214 * changed. This is the only place where orig_umwait_control_cached 215 215 * is modified. 216 216 */ 217 - rdmsrl(MSR_IA32_UMWAIT_CONTROL, orig_umwait_control_cached); 217 + rdmsrq(MSR_IA32_UMWAIT_CONTROL, orig_umwait_control_cached); 218 218 219 219 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "umwait:online", 220 220 umwait_cpu_online, umwait_cpu_offline);
+1 -1
arch/x86/kernel/fpu/core.c
··· 327 327 328 328 lockdep_assert_irqs_disabled(); 329 329 if (fpu_state_size_dynamic()) { 330 - rdmsrl(MSR_IA32_XFD, fps->xfd); 330 + rdmsrq(MSR_IA32_XFD, fps->xfd); 331 331 __this_cpu_write(xfd_state, fps->xfd); 332 332 } 333 333 }
+1 -1
arch/x86/kernel/hpet.c
··· 970 970 return false; 971 971 972 972 /* Check whether PC10 is enabled in PKG C-state limit */ 973 - rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, pcfg); 973 + rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, pcfg); 974 974 if ((pcfg & 0xF) < 8) 975 975 return false; 976 976
+1 -1
arch/x86/kernel/kvm.c
··· 728 728 729 729 #ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL 730 730 if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL)) 731 - rdmsrl(MSR_KVM_POLL_CONTROL, val); 731 + rdmsrq(MSR_KVM_POLL_CONTROL, val); 732 732 has_guest_poll = !(val & 1); 733 733 #endif 734 734 return 0;
+3 -3
arch/x86/kernel/mmconf-fam10h_64.c
··· 97 97 98 98 /* SYS_CFG */ 99 99 address = MSR_AMD64_SYSCFG; 100 - rdmsrl(address, val); 100 + rdmsrq(address, val); 101 101 102 102 /* TOP_MEM2 is not enabled? */ 103 103 if (!(val & (1<<21))) { ··· 105 105 } else { 106 106 /* TOP_MEM2 */ 107 107 address = MSR_K8_TOP_MEM2; 108 - rdmsrl(address, val); 108 + rdmsrq(address, val); 109 109 tom2 = max(val & 0xffffff800000ULL, 1ULL << 32); 110 110 } 111 111 ··· 177 177 return; 178 178 179 179 address = MSR_FAM10H_MMIO_CONF_BASE; 180 - rdmsrl(address, val); 180 + rdmsrq(address, val); 181 181 182 182 /* try to make sure that AP's setting is identical to BSP setting */ 183 183 if (val & FAM10H_MMIO_CONF_ENABLE) {
+1 -1
arch/x86/kernel/process.c
··· 710 710 arch_has_block_step()) { 711 711 unsigned long debugctl, msk; 712 712 713 - rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); 713 + rdmsrq(MSR_IA32_DEBUGCTLMSR, debugctl); 714 714 debugctl &= ~DEBUGCTLMSR_BTF; 715 715 msk = tifn & _TIF_BLOCKSTEP; 716 716 debugctl |= (msk >> TIF_BLOCKSTEP) << DEBUGCTLMSR_BTF_SHIFT;
+7 -7
arch/x86/kernel/process_64.c
··· 95 95 return; 96 96 97 97 if (mode == SHOW_REGS_USER) { 98 - rdmsrl(MSR_FS_BASE, fs); 99 - rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); 98 + rdmsrq(MSR_FS_BASE, fs); 99 + rdmsrq(MSR_KERNEL_GS_BASE, shadowgs); 100 100 printk("%sFS: %016lx GS: %016lx\n", 101 101 log_lvl, fs, shadowgs); 102 102 return; ··· 107 107 asm("movl %%fs,%0" : "=r" (fsindex)); 108 108 asm("movl %%gs,%0" : "=r" (gsindex)); 109 109 110 - rdmsrl(MSR_FS_BASE, fs); 111 - rdmsrl(MSR_GS_BASE, gs); 112 - rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); 110 + rdmsrq(MSR_FS_BASE, fs); 111 + rdmsrq(MSR_GS_BASE, gs); 112 + rdmsrq(MSR_KERNEL_GS_BASE, shadowgs); 113 113 114 114 cr0 = read_cr0(); 115 115 cr2 = read_cr2(); ··· 195 195 native_swapgs(); 196 196 } else { 197 197 instrumentation_begin(); 198 - rdmsrl(MSR_KERNEL_GS_BASE, gsbase); 198 + rdmsrq(MSR_KERNEL_GS_BASE, gsbase); 199 199 instrumentation_end(); 200 200 } 201 201 ··· 463 463 gsbase = __rdgsbase_inactive(); 464 464 local_irq_restore(flags); 465 465 } else { 466 - rdmsrl(MSR_KERNEL_GS_BASE, gsbase); 466 + rdmsrq(MSR_KERNEL_GS_BASE, gsbase); 467 467 } 468 468 469 469 return gsbase;
+2 -2
arch/x86/kernel/shstk.c
··· 239 239 240 240 fpregs_lock_and_load(); 241 241 242 - rdmsrl(MSR_IA32_PL3_SSP, ssp); 242 + rdmsrq(MSR_IA32_PL3_SSP, ssp); 243 243 244 244 fpregs_unlock(); 245 245 ··· 460 460 return 0; 461 461 462 462 fpregs_lock_and_load(); 463 - rdmsrl(MSR_IA32_U_CET, msrval); 463 + rdmsrq(MSR_IA32_U_CET, msrval); 464 464 465 465 if (enable) { 466 466 features_set(ARCH_SHSTK_WRSS);
+2 -2
arch/x86/kernel/traps.c
··· 1120 1120 */ 1121 1121 unsigned long debugctl; 1122 1122 1123 - rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); 1123 + rdmsrq(MSR_IA32_DEBUGCTLMSR, debugctl); 1124 1124 debugctl |= DEBUGCTLMSR_BTF; 1125 1125 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); 1126 1126 } ··· 1386 1386 if (!IS_ENABLED(CONFIG_X86_64) || !cpu_feature_enabled(X86_FEATURE_XFD)) 1387 1387 return false; 1388 1388 1389 - rdmsrl(MSR_IA32_XFD_ERR, xfd_err); 1389 + rdmsrq(MSR_IA32_XFD_ERR, xfd_err); 1390 1390 if (!xfd_err) 1391 1391 return false; 1392 1392
+1 -1
arch/x86/kernel/tsc.c
··· 1098 1098 if (art_base_clk.denominator < ART_MIN_DENOMINATOR) 1099 1099 return; 1100 1100 1101 - rdmsrl(MSR_IA32_TSC_ADJUST, art_base_clk.offset); 1101 + rdmsrq(MSR_IA32_TSC_ADJUST, art_base_clk.offset); 1102 1102 1103 1103 /* Make this sticky over multiple CPU init calls */ 1104 1104 setup_force_cpu_cap(X86_FEATURE_ART);
+3 -3
arch/x86/kernel/tsc_sync.c
··· 65 65 66 66 adj->nextcheck = jiffies + HZ; 67 67 68 - rdmsrl(MSR_IA32_TSC_ADJUST, curval); 68 + rdmsrq(MSR_IA32_TSC_ADJUST, curval); 69 69 if (adj->adjusted == curval) 70 70 return; 71 71 ··· 165 165 if (check_tsc_unstable()) 166 166 return false; 167 167 168 - rdmsrl(MSR_IA32_TSC_ADJUST, bootval); 168 + rdmsrq(MSR_IA32_TSC_ADJUST, bootval); 169 169 cur->bootval = bootval; 170 170 cur->nextcheck = jiffies + HZ; 171 171 tsc_sanitize_first_cpu(cur, bootval, smp_processor_id(), bootcpu); ··· 187 187 if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST)) 188 188 return false; 189 189 190 - rdmsrl(MSR_IA32_TSC_ADJUST, bootval); 190 + rdmsrq(MSR_IA32_TSC_ADJUST, bootval); 191 191 cur->bootval = bootval; 192 192 cur->nextcheck = jiffies + HZ; 193 193 cur->warned = false;
+3 -3
arch/x86/kvm/svm/svm.c
··· 580 580 uint64_t efer; 581 581 582 582 wrmsrl(MSR_VM_HSAVE_PA, 0); 583 - rdmsrl(MSR_EFER, efer); 583 + rdmsrq(MSR_EFER, efer); 584 584 if (efer & EFER_SVME) { 585 585 /* 586 586 * Force GIF=1 prior to disabling SVM, e.g. to ensure INIT and ··· 619 619 uint64_t efer; 620 620 int me = raw_smp_processor_id(); 621 621 622 - rdmsrl(MSR_EFER, efer); 622 + rdmsrq(MSR_EFER, efer); 623 623 if (efer & EFER_SVME) 624 624 return -EBUSY; 625 625 ··· 5232 5232 return; 5233 5233 5234 5234 /* If memory encryption is not enabled, use existing mask */ 5235 - rdmsrl(MSR_AMD64_SYSCFG, msr); 5235 + rdmsrq(MSR_AMD64_SYSCFG, msr); 5236 5236 if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT)) 5237 5237 return; 5238 5238
+2 -2
arch/x86/kvm/vmx/nested.c
··· 7202 7202 msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON; 7203 7203 7204 7204 /* These MSRs specify bits which the guest must keep fixed off. */ 7205 - rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1); 7206 - rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1); 7205 + rdmsrq(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1); 7206 + rdmsrq(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1); 7207 7207 7208 7208 if (vmx_umip_emulated()) 7209 7209 msrs->cr4_fixed1 |= X86_CR4_UMIP;
+1 -1
arch/x86/kvm/vmx/pmu_intel.c
··· 279 279 local_irq_disable(); 280 280 if (lbr_desc->event->state == PERF_EVENT_STATE_ACTIVE) { 281 281 if (read) 282 - rdmsrl(index, msr_info->data); 282 + rdmsrq(index, msr_info->data); 283 283 else 284 284 wrmsrl(index, msr_info->data); 285 285 __set_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use);
+3 -3
arch/x86/kvm/vmx/sgx.c
··· 418 418 sgx_pubkey_hash[3] = 0xd4f8c05909f9bb3bULL; 419 419 } else { 420 420 /* MSR_IA32_SGXLEPUBKEYHASH0 is read above */ 421 - rdmsrl(MSR_IA32_SGXLEPUBKEYHASH1, sgx_pubkey_hash[1]); 422 - rdmsrl(MSR_IA32_SGXLEPUBKEYHASH2, sgx_pubkey_hash[2]); 423 - rdmsrl(MSR_IA32_SGXLEPUBKEYHASH3, sgx_pubkey_hash[3]); 421 + rdmsrq(MSR_IA32_SGXLEPUBKEYHASH1, sgx_pubkey_hash[1]); 422 + rdmsrq(MSR_IA32_SGXLEPUBKEYHASH2, sgx_pubkey_hash[2]); 423 + rdmsrq(MSR_IA32_SGXLEPUBKEYHASH3, sgx_pubkey_hash[3]); 424 424 } 425 425 } 426 426
+16 -16
arch/x86/kvm/vmx/vmx.c
··· 1206 1206 { 1207 1207 u32 i; 1208 1208 1209 - rdmsrl(MSR_IA32_RTIT_STATUS, ctx->status); 1210 - rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base); 1211 - rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask); 1212 - rdmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match); 1209 + rdmsrq(MSR_IA32_RTIT_STATUS, ctx->status); 1210 + rdmsrq(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base); 1211 + rdmsrq(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask); 1212 + rdmsrq(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match); 1213 1213 for (i = 0; i < addr_range; i++) { 1214 - rdmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]); 1215 - rdmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]); 1214 + rdmsrq(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]); 1215 + rdmsrq(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]); 1216 1216 } 1217 1217 } 1218 1218 ··· 1225 1225 * GUEST_IA32_RTIT_CTL is already set in the VMCS. 1226 1226 * Save host state before VM entry. 1227 1227 */ 1228 - rdmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl); 1228 + rdmsrq(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl); 1229 1229 if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) { 1230 1230 wrmsrl(MSR_IA32_RTIT_CTL, 0); 1231 1231 pt_save_msr(&vmx->pt_desc.host, vmx->pt_desc.num_address_ranges); ··· 1362 1362 ++vmx->vcpu.stat.host_state_reload; 1363 1363 1364 1364 #ifdef CONFIG_X86_64 1365 - rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); 1365 + rdmsrq(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); 1366 1366 #endif 1367 1367 if (host_state->ldt_sel || (host_state->gs_sel & 7)) { 1368 1368 kvm_load_ldt(host_state->ldt_sel); ··· 1394 1394 { 1395 1395 preempt_disable(); 1396 1396 if (vmx->guest_state_loaded) 1397 - rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); 1397 + rdmsrq(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); 1398 1398 preempt_enable(); 1399 1399 return vmx->msr_guest_kernel_gs_base; 1400 1400 } ··· 2574 2574 { 2575 2575 u64 allowed; 2576 2576 2577 - rdmsrl(msr, allowed); 2577 + rdmsrq(msr, allowed); 2578 2578 2579 2579 return ctl_opt & allowed; 2580 2580 } ··· 2746 2746 break; 2747 2747 } 2748 2748 2749 - rdmsrl(MSR_IA32_VMX_BASIC, basic_msr); 2749 + rdmsrq(MSR_IA32_VMX_BASIC, basic_msr); 2750 2750 2751 2751 /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */ 2752 2752 if (vmx_basic_vmcs_size(basic_msr) > PAGE_SIZE) ··· 2766 2766 if (vmx_basic_vmcs_mem_type(basic_msr) != X86_MEMTYPE_WB) 2767 2767 return -EIO; 2768 2768 2769 - rdmsrl(MSR_IA32_VMX_MISC, misc_msr); 2769 + rdmsrq(MSR_IA32_VMX_MISC, misc_msr); 2770 2770 2771 2771 vmcs_conf->basic = basic_msr; 2772 2772 vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control; ··· 4391 4391 if (!IS_ENABLED(CONFIG_IA32_EMULATION) && !IS_ENABLED(CONFIG_X86_32)) 4392 4392 vmcs_writel(HOST_IA32_SYSENTER_ESP, 0); 4393 4393 4394 - rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl); 4394 + rdmsrq(MSR_IA32_SYSENTER_EIP, tmpl); 4395 4395 vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl); /* 22.2.3 */ 4396 4396 4397 4397 if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) { ··· 7052 7052 * the #NM exception. 7053 7053 */ 7054 7054 if (is_xfd_nm_fault(vcpu)) 7055 - rdmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err); 7055 + rdmsrq(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err); 7056 7056 } 7057 7057 7058 7058 static void handle_exception_irqoff(struct kvm_vcpu *vcpu, u32 intr_info) ··· 7959 7959 return 0; 7960 7960 7961 7961 if (boot_cpu_has(X86_FEATURE_PDCM)) 7962 - rdmsrl(MSR_IA32_PERF_CAPABILITIES, host_perf_cap); 7962 + rdmsrq(MSR_IA32_PERF_CAPABILITIES, host_perf_cap); 7963 7963 7964 7964 if (!cpu_feature_enabled(X86_FEATURE_ARCH_LBR)) { 7965 7965 x86_perf_get_lbr(&vmx_lbr_caps); ··· 8508 8508 kvm_enable_efer_bits(EFER_NX); 8509 8509 8510 8510 if (boot_cpu_has(X86_FEATURE_MPX)) { 8511 - rdmsrl(MSR_IA32_BNDCFGS, host_bndcfgs); 8511 + rdmsrq(MSR_IA32_BNDCFGS, host_bndcfgs); 8512 8512 WARN_ONCE(host_bndcfgs, "BNDCFGS in host will be lost"); 8513 8513 } 8514 8514
+2 -2
arch/x86/kvm/x86.c
··· 9773 9773 rdmsrl_safe(MSR_EFER, &kvm_host.efer); 9774 9774 9775 9775 if (boot_cpu_has(X86_FEATURE_XSAVES)) 9776 - rdmsrl(MSR_IA32_XSS, kvm_host.xss); 9776 + rdmsrq(MSR_IA32_XSS, kvm_host.xss); 9777 9777 9778 9778 kvm_init_pmu_capability(ops->pmu_ops); 9779 9779 9780 9780 if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) 9781 - rdmsrl(MSR_IA32_ARCH_CAPABILITIES, kvm_host.arch_capabilities); 9781 + rdmsrq(MSR_IA32_ARCH_CAPABILITIES, kvm_host.arch_capabilities); 9782 9782 9783 9783 r = ops->hardware_setup(); 9784 9784 if (r != 0)
+3 -3
arch/x86/lib/insn-eval.c
··· 702 702 unsigned long base; 703 703 704 704 if (seg_reg_idx == INAT_SEG_REG_FS) { 705 - rdmsrl(MSR_FS_BASE, base); 705 + rdmsrq(MSR_FS_BASE, base); 706 706 } else if (seg_reg_idx == INAT_SEG_REG_GS) { 707 707 /* 708 708 * swapgs was called at the kernel entry point. Thus, 709 709 * MSR_KERNEL_GS_BASE will have the user-space GS base. 710 710 */ 711 711 if (user_mode(regs)) 712 - rdmsrl(MSR_KERNEL_GS_BASE, base); 712 + rdmsrq(MSR_KERNEL_GS_BASE, base); 713 713 else 714 - rdmsrl(MSR_GS_BASE, base); 714 + rdmsrq(MSR_GS_BASE, base); 715 715 } else { 716 716 base = 0; 717 717 }
+1 -1
arch/x86/mm/pat/memtype.c
··· 256 256 if (!cpu_feature_enabled(X86_FEATURE_PAT)) 257 257 pat_disable("PAT not supported by the CPU."); 258 258 else 259 - rdmsrl(MSR_IA32_CR_PAT, pat_msr_val); 259 + rdmsrq(MSR_IA32_CR_PAT, pat_msr_val); 260 260 261 261 if (!pat_msr_val) { 262 262 pat_disable("PAT support disabled by the firmware.");
+4 -4
arch/x86/pci/amd_bus.c
··· 202 202 203 203 /* need to take out [0, TOM) for RAM*/ 204 204 address = MSR_K8_TOP_MEM1; 205 - rdmsrl(address, val); 205 + rdmsrq(address, val); 206 206 end = (val & 0xffffff800000ULL); 207 207 printk(KERN_INFO "TOM: %016llx aka %lldM\n", end, end>>20); 208 208 if (end < (1ULL<<32)) ··· 293 293 /* need to take out [4G, TOM2) for RAM*/ 294 294 /* SYS_CFG */ 295 295 address = MSR_AMD64_SYSCFG; 296 - rdmsrl(address, val); 296 + rdmsrq(address, val); 297 297 /* TOP_MEM2 is enabled? */ 298 298 if (val & (1<<21)) { 299 299 /* TOP_MEM2 */ 300 300 address = MSR_K8_TOP_MEM2; 301 - rdmsrl(address, val); 301 + rdmsrq(address, val); 302 302 end = (val & 0xffffff800000ULL); 303 303 printk(KERN_INFO "TOM2: %016llx aka %lldM\n", end, end>>20); 304 304 subtract_range(range, RANGE_NUM, 1ULL<<32, end); ··· 341 341 { 342 342 u64 reg; 343 343 344 - rdmsrl(MSR_AMD64_NB_CFG, reg); 344 + rdmsrq(MSR_AMD64_NB_CFG, reg); 345 345 if (!(reg & ENABLE_CF8_EXT_CFG)) { 346 346 reg |= ENABLE_CF8_EXT_CFG; 347 347 wrmsrl(MSR_AMD64_NB_CFG, reg);
+3 -3
arch/x86/platform/olpc/olpc-xo1-rtc.c
··· 64 64 of_node_put(node); 65 65 66 66 pr_info("olpc-xo1-rtc: Initializing OLPC XO-1 RTC\n"); 67 - rdmsrl(MSR_RTC_DOMA_OFFSET, rtc_info.rtc_day_alarm); 68 - rdmsrl(MSR_RTC_MONA_OFFSET, rtc_info.rtc_mon_alarm); 69 - rdmsrl(MSR_RTC_CEN_OFFSET, rtc_info.rtc_century); 67 + rdmsrq(MSR_RTC_DOMA_OFFSET, rtc_info.rtc_day_alarm); 68 + rdmsrq(MSR_RTC_MONA_OFFSET, rtc_info.rtc_mon_alarm); 69 + rdmsrq(MSR_RTC_CEN_OFFSET, rtc_info.rtc_century); 70 70 71 71 r = platform_device_register(&xo1_rtc_device); 72 72 if (r)
+5 -5
arch/x86/power/cpu.c
··· 44 44 45 45 while (msr < end) { 46 46 if (msr->valid) 47 - rdmsrl(msr->info.msr_no, msr->info.reg.q); 47 + rdmsrq(msr->info.msr_no, msr->info.reg.q); 48 48 msr++; 49 49 } 50 50 } ··· 110 110 savesegment(ds, ctxt->ds); 111 111 savesegment(es, ctxt->es); 112 112 113 - rdmsrl(MSR_FS_BASE, ctxt->fs_base); 114 - rdmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base); 115 - rdmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base); 113 + rdmsrq(MSR_FS_BASE, ctxt->fs_base); 114 + rdmsrq(MSR_GS_BASE, ctxt->kernelmode_gs_base); 115 + rdmsrq(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base); 116 116 mtrr_save_fixed_ranges(NULL); 117 117 118 - rdmsrl(MSR_EFER, ctxt->efer); 118 + rdmsrq(MSR_EFER, ctxt->efer); 119 119 #endif 120 120 121 121 /*
+1 -1
arch/x86/realmode/init.c
··· 145 145 * Some AMD processors will #GP(0) if EFER.LMA is set in WRMSR 146 146 * so we need to mask it out. 147 147 */ 148 - rdmsrl(MSR_EFER, efer); 148 + rdmsrq(MSR_EFER, efer); 149 149 trampoline_header->efer = efer & ~EFER_LMA; 150 150 151 151 trampoline_header->start = (u64) secondary_startup_64;
+8 -8
arch/x86/virt/svm/sev.c
··· 136 136 if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP)) 137 137 return 0; 138 138 139 - rdmsrl(MSR_AMD64_SYSCFG, val); 139 + rdmsrq(MSR_AMD64_SYSCFG, val); 140 140 141 141 val |= MSR_AMD64_SYSCFG_MFDM; 142 142 ··· 157 157 if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP)) 158 158 return 0; 159 159 160 - rdmsrl(MSR_AMD64_SYSCFG, val); 160 + rdmsrq(MSR_AMD64_SYSCFG, val); 161 161 162 162 val |= MSR_AMD64_SYSCFG_SNP_EN; 163 163 val |= MSR_AMD64_SYSCFG_SNP_VMPL_EN; ··· 522 522 * Check if SEV-SNP is already enabled, this can happen in case of 523 523 * kexec boot. 524 524 */ 525 - rdmsrl(MSR_AMD64_SYSCFG, val); 525 + rdmsrq(MSR_AMD64_SYSCFG, val); 526 526 if (val & MSR_AMD64_SYSCFG_SNP_EN) 527 527 goto skip_enable; 528 528 ··· 576 576 { 577 577 u64 rmp_sz, rmp_base, rmp_end; 578 578 579 - rdmsrl(MSR_AMD64_RMP_BASE, rmp_base); 580 - rdmsrl(MSR_AMD64_RMP_END, rmp_end); 579 + rdmsrq(MSR_AMD64_RMP_BASE, rmp_base); 580 + rdmsrq(MSR_AMD64_RMP_END, rmp_end); 581 581 582 582 if (!(rmp_base & RMP_ADDR_MASK) || !(rmp_end & RMP_ADDR_MASK)) { 583 583 pr_err("Memory for the RMP table has not been reserved by BIOS\n"); ··· 610 610 unsigned int eax, ebx, segment_shift, segment_shift_min, segment_shift_max; 611 611 u64 rmp_base, rmp_end; 612 612 613 - rdmsrl(MSR_AMD64_RMP_BASE, rmp_base); 613 + rdmsrq(MSR_AMD64_RMP_BASE, rmp_base); 614 614 if (!(rmp_base & RMP_ADDR_MASK)) { 615 615 pr_err("Memory for the RMP table has not been reserved by BIOS\n"); 616 616 return false; 617 617 } 618 618 619 - rdmsrl(MSR_AMD64_RMP_END, rmp_end); 619 + rdmsrq(MSR_AMD64_RMP_END, rmp_end); 620 620 WARN_ONCE(rmp_end & RMP_ADDR_MASK, 621 621 "Segmented RMP enabled but RMP_END MSR is non-zero\n"); 622 622 ··· 652 652 bool snp_probe_rmptable_info(void) 653 653 { 654 654 if (cpu_feature_enabled(X86_FEATURE_SEGMENTED_RMP)) 655 - rdmsrl(MSR_AMD64_RMP_CFG, rmp_cfg); 655 + rdmsrq(MSR_AMD64_RMP_CFG, rmp_cfg); 656 656 657 657 if (rmp_cfg & MSR_AMD64_SEG_RMP_ENABLED) 658 658 return probe_segmented_rmptable_info();
+1 -1
arch/x86/xen/suspend.c
··· 55 55 tick_suspend_local(); 56 56 57 57 if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL)) { 58 - rdmsrl(MSR_IA32_SPEC_CTRL, tmp); 58 + rdmsrq(MSR_IA32_SPEC_CTRL, tmp); 59 59 this_cpu_write(spec_ctrl, tmp); 60 60 wrmsrl(MSR_IA32_SPEC_CTRL, 0); 61 61 }
+1 -1
drivers/cpufreq/acpi-cpufreq.c
··· 110 110 return -EINVAL; 111 111 } 112 112 113 - rdmsrl(msr_addr, val); 113 + rdmsrq(msr_addr, val); 114 114 115 115 if (enable) 116 116 val &= ~msr_mask;
+2 -2
drivers/cpufreq/amd-pstate.c
··· 518 518 unsigned long flags; 519 519 520 520 local_irq_save(flags); 521 - rdmsrl(MSR_IA32_APERF, aperf); 522 - rdmsrl(MSR_IA32_MPERF, mperf); 521 + rdmsrq(MSR_IA32_APERF, aperf); 522 + rdmsrq(MSR_IA32_MPERF, mperf); 523 523 tsc = rdtsc(); 524 524 525 525 if (cpudata->prev.mperf == mperf || cpudata->prev.tsc == tsc) {
+2 -2
drivers/cpufreq/e_powersaver.c
··· 225 225 return -ENODEV; 226 226 } 227 227 /* Enable Enhanced PowerSaver */ 228 - rdmsrl(MSR_IA32_MISC_ENABLE, val); 228 + rdmsrq(MSR_IA32_MISC_ENABLE, val); 229 229 if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { 230 230 val |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP; 231 231 wrmsrl(MSR_IA32_MISC_ENABLE, val); 232 232 /* Can be locked at 0 */ 233 - rdmsrl(MSR_IA32_MISC_ENABLE, val); 233 + rdmsrq(MSR_IA32_MISC_ENABLE, val); 234 234 if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { 235 235 pr_info("Can't enable Enhanced PowerSaver\n"); 236 236 return -ENODEV;
+14 -14
drivers/cpufreq/intel_pstate.c
··· 598 598 { 599 599 u64 misc_en; 600 600 601 - rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); 601 + rdmsrq(MSR_IA32_MISC_ENABLE, misc_en); 602 602 603 603 return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE); 604 604 } ··· 1285 1285 u64 power_ctl; 1286 1286 1287 1287 mutex_lock(&intel_pstate_driver_lock); 1288 - rdmsrl(MSR_IA32_POWER_CTL, power_ctl); 1288 + rdmsrq(MSR_IA32_POWER_CTL, power_ctl); 1289 1289 if (input) { 1290 1290 power_ctl &= ~BIT(MSR_IA32_POWER_CTL_BIT_EE); 1291 1291 power_ctl_ee_state = POWER_CTL_EE_ENABLE; ··· 1703 1703 u64 power_ctl; 1704 1704 int enable; 1705 1705 1706 - rdmsrl(MSR_IA32_POWER_CTL, power_ctl); 1706 + rdmsrq(MSR_IA32_POWER_CTL, power_ctl); 1707 1707 enable = !!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE)); 1708 1708 return sprintf(buf, "%d\n", !enable); 1709 1709 } ··· 1990 1990 { 1991 1991 u64 value; 1992 1992 1993 - rdmsrl(MSR_ATOM_CORE_RATIOS, value); 1993 + rdmsrq(MSR_ATOM_CORE_RATIOS, value); 1994 1994 return (value >> 8) & 0x7F; 1995 1995 } 1996 1996 ··· 1998 1998 { 1999 1999 u64 value; 2000 2000 2001 - rdmsrl(MSR_ATOM_CORE_RATIOS, value); 2001 + rdmsrq(MSR_ATOM_CORE_RATIOS, value); 2002 2002 return (value >> 16) & 0x7F; 2003 2003 } 2004 2004 ··· 2006 2006 { 2007 2007 u64 value; 2008 2008 2009 - rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value); 2009 + rdmsrq(MSR_ATOM_CORE_TURBO_RATIOS, value); 2010 2010 return value & 0x7F; 2011 2011 } 2012 2012 ··· 2041 2041 static int silvermont_freq_table[] = { 2042 2042 83300, 100000, 133300, 116700, 80000}; 2043 2043 2044 - rdmsrl(MSR_FSB_FREQ, value); 2044 + rdmsrq(MSR_FSB_FREQ, value); 2045 2045 i = value & 0x7; 2046 2046 WARN_ON(i > 4); 2047 2047 ··· 2057 2057 83300, 100000, 133300, 116700, 80000, 2058 2058 93300, 90000, 88900, 87500}; 2059 2059 2060 - rdmsrl(MSR_FSB_FREQ, value); 2060 + rdmsrq(MSR_FSB_FREQ, value); 2061 2061 i = value & 0xF; 2062 2062 WARN_ON(i > 8); 2063 2063 ··· 2068 2068 { 2069 2069 u64 value; 2070 2070 2071 - rdmsrl(MSR_ATOM_CORE_VIDS, value); 2071 + rdmsrq(MSR_ATOM_CORE_VIDS, value); 2072 2072 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 2073 2073 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 2074 2074 cpudata->vid.ratio = div_fp( ··· 2076 2076 int_tofp(cpudata->pstate.max_pstate - 2077 2077 cpudata->pstate.min_pstate)); 2078 2078 2079 - rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value); 2079 + rdmsrq(MSR_ATOM_CORE_TURBO_VIDS, value); 2080 2080 cpudata->vid.turbo = value & 0x7f; 2081 2081 } 2082 2082 ··· 2425 2425 u64 tsc; 2426 2426 2427 2427 local_irq_save(flags); 2428 - rdmsrl(MSR_IA32_APERF, aperf); 2429 - rdmsrl(MSR_IA32_MPERF, mperf); 2428 + rdmsrq(MSR_IA32_APERF, aperf); 2429 + rdmsrq(MSR_IA32_MPERF, mperf); 2430 2430 tsc = rdtsc(); 2431 2431 if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) { 2432 2432 local_irq_restore(flags); ··· 3573 3573 3574 3574 id = x86_match_cpu(intel_pstate_cpu_oob_ids); 3575 3575 if (id) { 3576 - rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); 3576 + rdmsrq(MSR_MISC_PWR_MGMT, misc_pwr); 3577 3577 if (misc_pwr & BITMASK_OOB) { 3578 3578 pr_debug("Bit 8 or 18 in the MISC_PWR_MGMT MSR set\n"); 3579 3579 pr_debug("P states are controlled in Out of Band mode by the firmware/hardware\n"); ··· 3629 3629 { 3630 3630 u64 value; 3631 3631 3632 - rdmsrl(MSR_PM_ENABLE, value); 3632 + rdmsrq(MSR_PM_ENABLE, value); 3633 3633 return !!(value & 0x1); 3634 3634 } 3635 3635
+4 -4
drivers/cpufreq/longhaul.c
··· 136 136 { 137 137 union msr_bcr2 bcr2; 138 138 139 - rdmsrl(MSR_VIA_BCR2, bcr2.val); 139 + rdmsrq(MSR_VIA_BCR2, bcr2.val); 140 140 /* Enable software clock multiplier */ 141 141 bcr2.bits.ESOFTBF = 1; 142 142 bcr2.bits.CLOCKMUL = mults_index & 0xff; ··· 151 151 152 152 /* Disable software clock multiplier */ 153 153 local_irq_disable(); 154 - rdmsrl(MSR_VIA_BCR2, bcr2.val); 154 + rdmsrq(MSR_VIA_BCR2, bcr2.val); 155 155 bcr2.bits.ESOFTBF = 0; 156 156 wrmsrl(MSR_VIA_BCR2, bcr2.val); 157 157 } ··· 164 164 union msr_longhaul longhaul; 165 165 u32 t; 166 166 167 - rdmsrl(MSR_VIA_LONGHAUL, longhaul.val); 167 + rdmsrq(MSR_VIA_LONGHAUL, longhaul.val); 168 168 /* Setup new frequency */ 169 169 if (!revid_errata) 170 170 longhaul.bits.RevisionKey = longhaul.bits.RevisionID; ··· 534 534 unsigned int j, speed, pos, kHz_step, numvscales; 535 535 int min_vid_speed; 536 536 537 - rdmsrl(MSR_VIA_LONGHAUL, longhaul.val); 537 + rdmsrq(MSR_VIA_LONGHAUL, longhaul.val); 538 538 if (!(longhaul.bits.RevisionID & 1)) { 539 539 pr_info("Voltage scaling not supported by CPU\n"); 540 540 return;
+5 -5
drivers/cpufreq/powernow-k7.c
··· 219 219 { 220 220 union msr_fidvidctl fidvidctl; 221 221 222 - rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val); 222 + rdmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val); 223 223 if (fidvidctl.bits.FID != fid) { 224 224 fidvidctl.bits.SGTC = latency; 225 225 fidvidctl.bits.FID = fid; ··· 234 234 { 235 235 union msr_fidvidctl fidvidctl; 236 236 237 - rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val); 237 + rdmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val); 238 238 if (fidvidctl.bits.VID != vid) { 239 239 fidvidctl.bits.SGTC = latency; 240 240 fidvidctl.bits.VID = vid; ··· 260 260 fid = powernow_table[index].driver_data & 0xFF; 261 261 vid = (powernow_table[index].driver_data & 0xFF00) >> 8; 262 262 263 - rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val); 263 + rdmsrq(MSR_K7_FID_VID_STATUS, fidvidstatus.val); 264 264 cfid = fidvidstatus.bits.CFID; 265 265 freqs.old = fsb * fid_codes[cfid] / 10; 266 266 ··· 557 557 558 558 if (cpu) 559 559 return 0; 560 - rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val); 560 + rdmsrq(MSR_K7_FID_VID_STATUS, fidvidstatus.val); 561 561 cfid = fidvidstatus.bits.CFID; 562 562 563 563 return fsb * fid_codes[cfid] / 10; ··· 598 598 if (policy->cpu != 0) 599 599 return -ENODEV; 600 600 601 - rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val); 601 + rdmsrq(MSR_K7_FID_VID_STATUS, fidvidstatus.val); 602 602 603 603 recalibrate_cpu_khz(); 604 604
+3 -3
drivers/edac/amd64_edac.c
··· 2942 2942 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since 2943 2943 * those are Read-As-Zero. 2944 2944 */ 2945 - rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem); 2945 + rdmsrq(MSR_K8_TOP_MEM1, pvt->top_mem); 2946 2946 edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem); 2947 2947 2948 2948 /* Check first whether TOP_MEM2 is enabled: */ 2949 - rdmsrl(MSR_AMD64_SYSCFG, msr_val); 2949 + rdmsrq(MSR_AMD64_SYSCFG, msr_val); 2950 2950 if (msr_val & BIT(21)) { 2951 - rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2); 2951 + rdmsrq(MSR_K8_TOP_MEM2, pvt->top_mem2); 2952 2952 edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2); 2953 2953 } else { 2954 2954 edac_dbg(0, " TOP_MEM2 disabled\n");
+12 -12
drivers/idle/intel_idle.c
··· 1928 1928 unsigned long long msr; 1929 1929 unsigned int usec; 1930 1930 1931 - rdmsrl(MSR_PKGC6_IRTL, msr); 1931 + rdmsrq(MSR_PKGC6_IRTL, msr); 1932 1932 usec = irtl_2_usec(msr); 1933 1933 if (usec) { 1934 1934 bxt_cstates[2].exit_latency = usec; 1935 1935 bxt_cstates[2].target_residency = usec; 1936 1936 } 1937 1937 1938 - rdmsrl(MSR_PKGC7_IRTL, msr); 1938 + rdmsrq(MSR_PKGC7_IRTL, msr); 1939 1939 usec = irtl_2_usec(msr); 1940 1940 if (usec) { 1941 1941 bxt_cstates[3].exit_latency = usec; 1942 1942 bxt_cstates[3].target_residency = usec; 1943 1943 } 1944 1944 1945 - rdmsrl(MSR_PKGC8_IRTL, msr); 1945 + rdmsrq(MSR_PKGC8_IRTL, msr); 1946 1946 usec = irtl_2_usec(msr); 1947 1947 if (usec) { 1948 1948 bxt_cstates[4].exit_latency = usec; 1949 1949 bxt_cstates[4].target_residency = usec; 1950 1950 } 1951 1951 1952 - rdmsrl(MSR_PKGC9_IRTL, msr); 1952 + rdmsrq(MSR_PKGC9_IRTL, msr); 1953 1953 usec = irtl_2_usec(msr); 1954 1954 if (usec) { 1955 1955 bxt_cstates[5].exit_latency = usec; 1956 1956 bxt_cstates[5].target_residency = usec; 1957 1957 } 1958 1958 1959 - rdmsrl(MSR_PKGC10_IRTL, msr); 1959 + rdmsrq(MSR_PKGC10_IRTL, msr); 1960 1960 usec = irtl_2_usec(msr); 1961 1961 if (usec) { 1962 1962 bxt_cstates[6].exit_latency = usec; ··· 1984 1984 if ((mwait_substates & (0xF << 28)) == 0) 1985 1985 return; 1986 1986 1987 - rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr); 1987 + rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr); 1988 1988 1989 1989 /* PC10 is not enabled in PKG C-state limit */ 1990 1990 if ((msr & 0xF) != 8) ··· 1996 1996 /* if SGX is present */ 1997 1997 if (ebx & (1 << 2)) { 1998 1998 1999 - rdmsrl(MSR_IA32_FEAT_CTL, msr); 1999 + rdmsrq(MSR_IA32_FEAT_CTL, msr); 2000 2000 2001 2001 /* if SGX is enabled */ 2002 2002 if (msr & (1 << 18)) ··· 2015 2015 { 2016 2016 unsigned long long msr; 2017 2017 2018 - rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr); 2018 + rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr); 2019 2019 2020 2020 /* 2021 2021 * 000b: C0/C1 (no package C-state support) ··· 2068 2068 * C6. However, if PC6 is disabled, we update the numbers to match 2069 2069 * core C6. 2070 2070 */ 2071 - rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr); 2071 + rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr); 2072 2072 2073 2073 /* Limit value 2 and above allow for PC6. */ 2074 2074 if ((msr & 0x7) < 2) { ··· 2241 2241 { 2242 2242 unsigned long long msr_bits; 2243 2243 2244 - rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); 2244 + rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); 2245 2245 msr_bits &= ~auto_demotion_disable_flags; 2246 2246 wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); 2247 2247 } ··· 2250 2250 { 2251 2251 unsigned long long msr_bits; 2252 2252 2253 - rdmsrl(MSR_IA32_POWER_CTL, msr_bits); 2253 + rdmsrq(MSR_IA32_POWER_CTL, msr_bits); 2254 2254 msr_bits |= 0x2; 2255 2255 wrmsrl(MSR_IA32_POWER_CTL, msr_bits); 2256 2256 } ··· 2259 2259 { 2260 2260 unsigned long long msr_bits; 2261 2261 2262 - rdmsrl(MSR_IA32_POWER_CTL, msr_bits); 2262 + rdmsrq(MSR_IA32_POWER_CTL, msr_bits); 2263 2263 msr_bits &= ~0x2; 2264 2264 wrmsrl(MSR_IA32_POWER_CTL, msr_bits); 2265 2265 }
+3 -3
drivers/mtd/nand/raw/cs553x_nand.c
··· 351 351 return -ENXIO; 352 352 353 353 /* If it doesn't have the CS553[56], abort */ 354 - rdmsrl(MSR_DIVIL_GLD_CAP, val); 354 + rdmsrq(MSR_DIVIL_GLD_CAP, val); 355 355 val &= ~0xFFULL; 356 356 if (val != CAP_CS5535 && val != CAP_CS5536) 357 357 return -ENXIO; 358 358 359 359 /* If it doesn't have the NAND controller enabled, abort */ 360 - rdmsrl(MSR_DIVIL_BALL_OPTS, val); 360 + rdmsrq(MSR_DIVIL_BALL_OPTS, val); 361 361 if (val & PIN_OPT_IDE) { 362 362 pr_info("CS553x NAND controller: Flash I/O not enabled in MSR_DIVIL_BALL_OPTS.\n"); 363 363 return -ENXIO; 364 364 } 365 365 366 366 for (i = 0; i < NR_CS553X_CONTROLLERS; i++) { 367 - rdmsrl(MSR_DIVIL_LBAR_FLSH0 + i, val); 367 + rdmsrq(MSR_DIVIL_LBAR_FLSH0 + i, val); 368 368 369 369 if ((val & (FLSH_LBAR_EN|FLSH_NOR_NAND)) == (FLSH_LBAR_EN|FLSH_NOR_NAND)) 370 370 err = cs553x_init_one(i, !!(val & FLSH_MEM_IO), val & 0xFFFFFFFF);
+5 -5
drivers/platform/x86/intel/ifs/load.c
··· 128 128 msrs = ifs_get_test_msrs(dev); 129 129 /* run scan hash copy */ 130 130 wrmsrl(msrs->copy_hashes, ifs_hash_ptr); 131 - rdmsrl(msrs->copy_hashes_status, hashes_status.data); 131 + rdmsrq(msrs->copy_hashes_status, hashes_status.data); 132 132 133 133 /* enumerate the scan image information */ 134 134 num_chunks = hashes_status.num_chunks; ··· 150 150 linear_addr |= i; 151 151 152 152 wrmsrl(msrs->copy_chunks, linear_addr); 153 - rdmsrl(msrs->copy_chunks_status, chunk_status.data); 153 + rdmsrq(msrs->copy_chunks_status, chunk_status.data); 154 154 155 155 ifsd->valid_chunks = chunk_status.valid_chunks; 156 156 err_code = chunk_status.error_code; ··· 196 196 197 197 if (need_copy_scan_hashes(ifsd)) { 198 198 wrmsrl(msrs->copy_hashes, ifs_hash_ptr); 199 - rdmsrl(msrs->copy_hashes_status, hashes_status.data); 199 + rdmsrq(msrs->copy_hashes_status, hashes_status.data); 200 200 201 201 /* enumerate the scan image information */ 202 202 chunk_size = hashes_status.chunk_size * SZ_1K; ··· 217 217 218 218 if (ifsd->generation >= IFS_GEN_STRIDE_AWARE) { 219 219 wrmsrl(msrs->test_ctrl, INVALIDATE_STRIDE); 220 - rdmsrl(msrs->copy_chunks_status, chunk_status.data); 220 + rdmsrq(msrs->copy_chunks_status, chunk_status.data); 221 221 if (chunk_status.valid_chunks != 0) { 222 222 dev_err(dev, "Couldn't invalidate installed stride - %d\n", 223 223 chunk_status.valid_chunks); ··· 240 240 local_irq_disable(); 241 241 wrmsrl(msrs->copy_chunks, (u64)chunk_table); 242 242 local_irq_enable(); 243 - rdmsrl(msrs->copy_chunks_status, chunk_status.data); 243 + rdmsrq(msrs->copy_chunks_status, chunk_status.data); 244 244 err_code = chunk_status.error_code; 245 245 } while (err_code == AUTH_INTERRUPTED_ERROR && --retry_count); 246 246
+4 -4
drivers/platform/x86/intel/ifs/runtest.c
··· 210 210 * are processed in a single pass) before it retires. 211 211 */ 212 212 wrmsrl(MSR_ACTIVATE_SCAN, params->activate->data); 213 - rdmsrl(MSR_SCAN_STATUS, status.data); 213 + rdmsrq(MSR_SCAN_STATUS, status.data); 214 214 215 215 trace_ifs_status(ifsd->cur_batch, start, stop, status.data); 216 216 ··· 323 323 if (cpu == first) { 324 324 wrmsrl(MSR_ARRAY_BIST, command->data); 325 325 /* Pass back the result of the test */ 326 - rdmsrl(MSR_ARRAY_BIST, command->data); 326 + rdmsrq(MSR_ARRAY_BIST, command->data); 327 327 } 328 328 329 329 return 0; ··· 375 375 376 376 if (cpu == first) { 377 377 wrmsrl(MSR_ARRAY_TRIGGER, ARRAY_GEN1_TEST_ALL_ARRAYS); 378 - rdmsrl(MSR_ARRAY_STATUS, *((u64 *)status)); 378 + rdmsrq(MSR_ARRAY_STATUS, *((u64 *)status)); 379 379 } 380 380 381 381 return 0; ··· 527 527 * during the "execution" of the WRMSR. 528 528 */ 529 529 wrmsrl(MSR_ACTIVATE_SBAF, run_params->activate->data); 530 - rdmsrl(MSR_SBAF_STATUS, status.data); 530 + rdmsrq(MSR_SBAF_STATUS, status.data); 531 531 trace_ifs_sbaf(ifsd->cur_batch, *run_params->activate, status); 532 532 533 533 /* Pass back the result of the test */
+1 -1
drivers/platform/x86/intel/pmc/cnp.c
··· 227 227 int cpunum = smp_processor_id(); 228 228 u64 val; 229 229 230 - rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, val); 230 + rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, val); 231 231 per_cpu(pkg_cst_config, cpunum) = val; 232 232 val &= ~NHM_C1_AUTO_DEMOTE; 233 233 wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, val);
+3 -3
drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c
··· 39 39 /* Poll for rb bit == 0 */ 40 40 retries = OS_MAILBOX_RETRY_COUNT; 41 41 do { 42 - rdmsrl(MSR_OS_MAILBOX_INTERFACE, data); 42 + rdmsrq(MSR_OS_MAILBOX_INTERFACE, data); 43 43 if (data & BIT_ULL(MSR_OS_MAILBOX_BUSY_BIT)) { 44 44 ret = -EBUSY; 45 45 continue; ··· 64 64 /* Poll for rb bit == 0 */ 65 65 retries = OS_MAILBOX_RETRY_COUNT; 66 66 do { 67 - rdmsrl(MSR_OS_MAILBOX_INTERFACE, data); 67 + rdmsrq(MSR_OS_MAILBOX_INTERFACE, data); 68 68 if (data & BIT_ULL(MSR_OS_MAILBOX_BUSY_BIT)) { 69 69 ret = -EBUSY; 70 70 continue; ··· 74 74 return -ENXIO; 75 75 76 76 if (response_data) { 77 - rdmsrl(MSR_OS_MAILBOX_DATA, data); 77 + rdmsrq(MSR_OS_MAILBOX_DATA, data); 78 78 *response_data = data; 79 79 } 80 80 ret = 0;
+1 -1
drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
··· 556 556 { 557 557 u64 value; 558 558 559 - rdmsrl(MSR_PM_ENABLE, value); 559 + rdmsrq(MSR_PM_ENABLE, value); 560 560 return !(value & 0x1); 561 561 } 562 562
+10 -10
drivers/platform/x86/intel_ips.c
··· 370 370 if (!ips->cpu_turbo_enabled) 371 371 return; 372 372 373 - rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); 373 + rdmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override); 374 374 375 375 cur_tdp_limit = turbo_override & TURBO_TDP_MASK; 376 376 new_tdp_limit = cur_tdp_limit + 8; /* 1W increase */ ··· 405 405 u64 turbo_override; 406 406 u16 cur_limit, new_limit; 407 407 408 - rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); 408 + rdmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override); 409 409 410 410 cur_limit = turbo_override & TURBO_TDP_MASK; 411 411 new_limit = cur_limit - 8; /* 1W decrease */ ··· 437 437 { 438 438 u64 perf_ctl; 439 439 440 - rdmsrl(IA32_PERF_CTL, perf_ctl); 440 + rdmsrq(IA32_PERF_CTL, perf_ctl); 441 441 if (perf_ctl & IA32_PERF_TURBO_DIS) { 442 442 perf_ctl &= ~IA32_PERF_TURBO_DIS; 443 443 wrmsrl(IA32_PERF_CTL, perf_ctl); ··· 475 475 { 476 476 u64 perf_ctl; 477 477 478 - rdmsrl(IA32_PERF_CTL, perf_ctl); 478 + rdmsrq(IA32_PERF_CTL, perf_ctl); 479 479 if (!(perf_ctl & IA32_PERF_TURBO_DIS)) { 480 480 perf_ctl |= IA32_PERF_TURBO_DIS; 481 481 wrmsrl(IA32_PERF_CTL, perf_ctl); ··· 1215 1215 u64 turbo_override; 1216 1216 int tdp, tdc; 1217 1217 1218 - rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); 1218 + rdmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override); 1219 1219 1220 1220 tdp = (int)(turbo_override & TURBO_TDP_MASK); 1221 1221 tdc = (int)((turbo_override & TURBO_TDC_MASK) >> TURBO_TDC_SHIFT); ··· 1290 1290 return NULL; 1291 1291 } 1292 1292 1293 - rdmsrl(IA32_MISC_ENABLE, misc_en); 1293 + rdmsrq(IA32_MISC_ENABLE, misc_en); 1294 1294 /* 1295 1295 * If the turbo enable bit isn't set, we shouldn't try to enable/disable 1296 1296 * turbo manually or we'll get an illegal MSR access, even though ··· 1312 1312 return NULL; 1313 1313 } 1314 1314 1315 - rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_power); 1315 + rdmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_power); 1316 1316 tdp = turbo_power & TURBO_TDP_MASK; 1317 1317 1318 1318 /* Sanity check TDP against CPU */ ··· 1496 1496 * Check PLATFORM_INFO MSR to make sure this chip is 1497 1497 * turbo capable. 1498 1498 */ 1499 - rdmsrl(PLATFORM_INFO, platform_info); 1499 + rdmsrq(PLATFORM_INFO, platform_info); 1500 1500 if (!(platform_info & PLATFORM_TDP)) { 1501 1501 dev_err(&dev->dev, "platform indicates TDP override unavailable, aborting\n"); 1502 1502 return -ENODEV; ··· 1529 1529 ips->mgta_val = thm_readw(THM_MGTA); 1530 1530 1531 1531 /* Save turbo limits & ratios */ 1532 - rdmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit); 1532 + rdmsrq(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit); 1533 1533 1534 1534 ips_disable_cpu_turbo(ips); 1535 1535 ips->cpu_turbo_enabled = false; ··· 1596 1596 if (ips->gpu_turbo_disable) 1597 1597 symbol_put(i915_gpu_turbo_disable); 1598 1598 1599 - rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); 1599 + rdmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override); 1600 1600 turbo_override &= ~(TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN); 1601 1601 wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); 1602 1602 wrmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit);
+4 -4
drivers/thermal/intel/intel_hfi.c
··· 284 284 if (!raw_spin_trylock(&hfi_instance->event_lock)) 285 285 return; 286 286 287 - rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr); 287 + rdmsrq(MSR_IA32_PACKAGE_THERM_STATUS, msr); 288 288 hfi = msr & PACKAGE_THERM_STATUS_HFI_UPDATED; 289 289 if (!hfi) { 290 290 raw_spin_unlock(&hfi_instance->event_lock); ··· 356 356 { 357 357 u64 msr_val; 358 358 359 - rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); 359 + rdmsrq(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); 360 360 msr_val |= HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT; 361 361 wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); 362 362 } ··· 377 377 u64 msr_val; 378 378 int i; 379 379 380 - rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); 380 + rdmsrq(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); 381 381 msr_val &= ~HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT; 382 382 wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); 383 383 ··· 388 388 * memory. 389 389 */ 390 390 for (i = 0; i < 2000; i++) { 391 - rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); 391 + rdmsrq(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); 392 392 if (msr_val & PACKAGE_THERM_STATUS_HFI_UPDATED) 393 393 break; 394 394
+3 -3
drivers/thermal/intel/therm_throt.c
··· 287 287 else 288 288 msr = MSR_IA32_PACKAGE_THERM_STATUS; 289 289 290 - rdmsrl(msr, msr_val); 290 + rdmsrq(msr, msr_val); 291 291 if (msr_val & THERM_STATUS_PROCHOT_LOG) 292 292 *proc_hot = true; 293 293 else ··· 654 654 if (static_cpu_has(X86_FEATURE_HWP)) 655 655 notify_hwp_interrupt(); 656 656 657 - rdmsrl(MSR_IA32_THERM_STATUS, msr_val); 657 + rdmsrq(MSR_IA32_THERM_STATUS, msr_val); 658 658 659 659 /* Check for violation of core thermal thresholds*/ 660 660 notify_thresholds(msr_val); ··· 669 669 CORE_LEVEL); 670 670 671 671 if (this_cpu_has(X86_FEATURE_PTS)) { 672 - rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); 672 + rdmsrq(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); 673 673 /* check violations of package thermal thresholds */ 674 674 notify_package_thresholds(msr_val); 675 675 therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT,
+1 -1
drivers/video/fbdev/geode/gxfb_core.c
··· 377 377 378 378 /* Figure out if this is a TFT or CRT part */ 379 379 380 - rdmsrl(MSR_GX_GLD_MSR_CONFIG, val); 380 + rdmsrq(MSR_GX_GLD_MSR_CONFIG, val); 381 381 382 382 if ((val & MSR_GX_GLD_MSR_CONFIG_FP) == MSR_GX_GLD_MSR_CONFIG_FP) 383 383 par->enable_crt = 0;
+6 -6
drivers/video/fbdev/geode/lxfb_ops.c
··· 358 358 359 359 /* Set output mode */ 360 360 361 - rdmsrl(MSR_LX_GLD_MSR_CONFIG, msrval); 361 + rdmsrq(MSR_LX_GLD_MSR_CONFIG, msrval); 362 362 msrval &= ~MSR_LX_GLD_MSR_CONFIG_FMT; 363 363 364 364 if (par->output & OUTPUT_PANEL) { ··· 419 419 420 420 /* Set default watermark values */ 421 421 422 - rdmsrl(MSR_LX_SPARE_MSR, msrval); 422 + rdmsrq(MSR_LX_SPARE_MSR, msrval); 423 423 424 424 msrval &= ~(MSR_LX_SPARE_MSR_DIS_CFIFO_HGO 425 425 | MSR_LX_SPARE_MSR_VFIFO_ARB_SEL ··· 591 591 } while ((i & GP_BLT_STATUS_PB) || !(i & GP_BLT_STATUS_CE)); 592 592 593 593 /* save MSRs */ 594 - rdmsrl(MSR_LX_MSR_PADSEL, par->msr.padsel); 595 - rdmsrl(MSR_GLCP_DOTPLL, par->msr.dotpll); 596 - rdmsrl(MSR_LX_GLD_MSR_CONFIG, par->msr.dfglcfg); 597 - rdmsrl(MSR_LX_SPARE_MSR, par->msr.dcspare); 594 + rdmsrq(MSR_LX_MSR_PADSEL, par->msr.padsel); 595 + rdmsrq(MSR_GLCP_DOTPLL, par->msr.dotpll); 596 + rdmsrq(MSR_LX_GLD_MSR_CONFIG, par->msr.dfglcfg); 597 + rdmsrq(MSR_LX_SPARE_MSR, par->msr.dcspare); 598 598 599 599 write_dc(par, DC_UNLOCK, DC_UNLOCK_UNLOCK); 600 600
+4 -4
drivers/video/fbdev/geode/suspend_gx.c
··· 21 21 } while (i & (GP_BLT_STATUS_BLT_PENDING | GP_BLT_STATUS_BLT_BUSY)); 22 22 23 23 /* save MSRs */ 24 - rdmsrl(MSR_GX_MSR_PADSEL, par->msr.padsel); 25 - rdmsrl(MSR_GLCP_DOTPLL, par->msr.dotpll); 24 + rdmsrq(MSR_GX_MSR_PADSEL, par->msr.padsel); 25 + rdmsrq(MSR_GLCP_DOTPLL, par->msr.dotpll); 26 26 27 27 write_dc(par, DC_UNLOCK, DC_UNLOCK_UNLOCK); 28 28 ··· 43 43 uint32_t dotpll_lo; 44 44 int i; 45 45 46 - rdmsrl(MSR_GLCP_DOTPLL, dotpll_lo); 46 + rdmsrq(MSR_GLCP_DOTPLL, dotpll_lo); 47 47 dotpll_lo |= MSR_GLCP_DOTPLL_DOTRESET; 48 48 dotpll_lo &= ~MSR_GLCP_DOTPLL_BYPASS; 49 49 wrmsr(MSR_GLCP_DOTPLL, dotpll_lo, dotpll_hi); 50 50 51 51 /* wait for the PLL to lock */ 52 52 for (i = 0; i < 200; i++) { 53 - rdmsrl(MSR_GLCP_DOTPLL, dotpll_lo); 53 + rdmsrq(MSR_GLCP_DOTPLL, dotpll_lo); 54 54 if (dotpll_lo & MSR_GLCP_DOTPLL_LOCK) 55 55 break; 56 56 udelay(1);
+4 -4
drivers/video/fbdev/geode/video_gx.c
··· 142 142 } 143 143 } 144 144 145 - rdmsrl(MSR_GLCP_SYS_RSTPLL, sys_rstpll); 146 - rdmsrl(MSR_GLCP_DOTPLL, dotpll); 145 + rdmsrq(MSR_GLCP_SYS_RSTPLL, sys_rstpll); 146 + rdmsrq(MSR_GLCP_DOTPLL, dotpll); 147 147 148 148 /* Program new M, N and P. */ 149 149 dotpll &= 0x00000000ffffffffull; ··· 167 167 168 168 /* Wait for LOCK bit. */ 169 169 do { 170 - rdmsrl(MSR_GLCP_DOTPLL, dotpll); 170 + rdmsrq(MSR_GLCP_DOTPLL, dotpll); 171 171 } while (timeout-- && !(dotpll & MSR_GLCP_DOTPLL_LOCK)); 172 172 } 173 173 ··· 180 180 181 181 /* Set up the DF pad select MSR */ 182 182 183 - rdmsrl(MSR_GX_MSR_PADSEL, val); 183 + rdmsrq(MSR_GX_MSR_PADSEL, val); 184 184 val &= ~MSR_GX_MSR_PADSEL_MASK; 185 185 val |= MSR_GX_MSR_PADSEL_TFT; 186 186 wrmsrl(MSR_GX_MSR_PADSEL, val);
+1 -1
include/hyperv/hvgdk_mini.h
··· 1013 1013 1014 1014 /* 1015 1015 * To support arch-generic code calling hv_set/get_register: 1016 - * - On x86, HV_MSR_ indicates an MSR accessed via rdmsrl/wrmsrl 1016 + * - On x86, HV_MSR_ indicates an MSR accessed via rdmsrq/wrmsrl 1017 1017 * - On ARM, HV_MSR_ indicates a VP register accessed via hypercall 1018 1018 */ 1019 1019 #define HV_MSR_CRASH_P0 (HV_X64_MSR_CRASH_P0)