Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: Drop SYS_ from SPE register defines

We currently have a non-standard SYS_ prefix in the constants generated
for the SPE register bitfields. Drop this in preparation for automatic
register definition generation.

The SPE mask defines were unshifted, and the SPE register field
enumerations were shifted. The autogenerated defines are the opposite,
so make the necessary adjustments.

No functional changes.

Tested-by: James Clark <james.clark@arm.com>
Signed-off-by: Rob Herring <robh@kernel.org>
Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
Link: https://lore.kernel.org/r/20220825-arm-spe-v8-7-v4-2-327f860daf28@kernel.org
Signed-off-by: Will Deacon <will@kernel.org>

authored by

Rob Herring and committed by
Will Deacon
c759ec85 e080477a

+103 -104
+3 -3
arch/arm64/include/asm/el2_setup.h
··· 53 53 cbz x0, .Lskip_spe_\@ // Skip if SPE not present 54 54 55 55 mrs_s x0, SYS_PMBIDR_EL1 // If SPE available at EL2, 56 - and x0, x0, #(1 << SYS_PMBIDR_EL1_P_SHIFT) 56 + and x0, x0, #(1 << PMBIDR_EL1_P_SHIFT) 57 57 cbnz x0, .Lskip_spe_el2_\@ // then permit sampling of physical 58 - mov x0, #(1 << SYS_PMSCR_EL2_PCT_SHIFT | \ 59 - 1 << SYS_PMSCR_EL2_PA_SHIFT) 58 + mov x0, #(1 << PMSCR_EL2_PCT_SHIFT | \ 59 + 1 << PMSCR_EL2_PA_SHIFT) 60 60 msr_s SYS_PMSCR_EL2, x0 // addresses and physical counter 61 61 .Lskip_spe_el2_\@: 62 62 mov x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
+56 -56
arch/arm64/include/asm/sysreg.h
··· 218 218 /*** Statistical Profiling Extension ***/ 219 219 /* ID registers */ 220 220 #define SYS_PMSIDR_EL1 sys_reg(3, 0, 9, 9, 7) 221 - #define SYS_PMSIDR_EL1_FE_SHIFT 0 222 - #define SYS_PMSIDR_EL1_FT_SHIFT 1 223 - #define SYS_PMSIDR_EL1_FL_SHIFT 2 224 - #define SYS_PMSIDR_EL1_ARCHINST_SHIFT 3 225 - #define SYS_PMSIDR_EL1_LDS_SHIFT 4 226 - #define SYS_PMSIDR_EL1_ERND_SHIFT 5 227 - #define SYS_PMSIDR_EL1_INTERVAL_SHIFT 8 228 - #define SYS_PMSIDR_EL1_INTERVAL_MASK 0xfUL 229 - #define SYS_PMSIDR_EL1_MAXSIZE_SHIFT 12 230 - #define SYS_PMSIDR_EL1_MAXSIZE_MASK 0xfUL 231 - #define SYS_PMSIDR_EL1_COUNTSIZE_SHIFT 16 232 - #define SYS_PMSIDR_EL1_COUNTSIZE_MASK 0xfUL 221 + #define PMSIDR_EL1_FE_SHIFT 0 222 + #define PMSIDR_EL1_FT_SHIFT 1 223 + #define PMSIDR_EL1_FL_SHIFT 2 224 + #define PMSIDR_EL1_ARCHINST_SHIFT 3 225 + #define PMSIDR_EL1_LDS_SHIFT 4 226 + #define PMSIDR_EL1_ERND_SHIFT 5 227 + #define PMSIDR_EL1_INTERVAL_SHIFT 8 228 + #define PMSIDR_EL1_INTERVAL_MASK GENMASK_ULL(11, 8) 229 + #define PMSIDR_EL1_MAXSIZE_SHIFT 12 230 + #define PMSIDR_EL1_MAXSIZE_MASK GENMASK_ULL(15, 12) 231 + #define PMSIDR_EL1_COUNTSIZE_SHIFT 16 232 + #define PMSIDR_EL1_COUNTSIZE_MASK GENMASK_ULL(19, 16) 233 233 234 234 #define SYS_PMBIDR_EL1 sys_reg(3, 0, 9, 10, 7) 235 - #define SYS_PMBIDR_EL1_ALIGN_SHIFT 0 236 - #define SYS_PMBIDR_EL1_ALIGN_MASK 0xfU 237 - #define SYS_PMBIDR_EL1_P_SHIFT 4 238 - #define SYS_PMBIDR_EL1_F_SHIFT 5 235 + #define PMBIDR_EL1_ALIGN_SHIFT 0 236 + #define PMBIDR_EL1_ALIGN_MASK 0xfU 237 + #define PMBIDR_EL1_P_SHIFT 4 238 + #define PMBIDR_EL1_F_SHIFT 5 239 239 240 240 /* Sampling controls */ 241 241 #define SYS_PMSCR_EL1 sys_reg(3, 0, 9, 9, 0) 242 - #define SYS_PMSCR_EL1_E0SPE_SHIFT 0 243 - #define SYS_PMSCR_EL1_E1SPE_SHIFT 1 244 - #define SYS_PMSCR_EL1_CX_SHIFT 3 245 - #define SYS_PMSCR_EL1_PA_SHIFT 4 246 - #define SYS_PMSCR_EL1_TS_SHIFT 5 247 - #define SYS_PMSCR_EL1_PCT_SHIFT 6 242 + #define PMSCR_EL1_E0SPE_SHIFT 0 243 + #define PMSCR_EL1_E1SPE_SHIFT 1 244 + #define PMSCR_EL1_CX_SHIFT 3 245 + #define PMSCR_EL1_PA_SHIFT 4 246 + #define PMSCR_EL1_TS_SHIFT 5 247 + #define PMSCR_EL1_PCT_SHIFT 6 248 248 249 249 #define SYS_PMSCR_EL2 sys_reg(3, 4, 9, 9, 0) 250 - #define SYS_PMSCR_EL2_E0HSPE_SHIFT 0 251 - #define SYS_PMSCR_EL2_E2SPE_SHIFT 1 252 - #define SYS_PMSCR_EL2_CX_SHIFT 3 253 - #define SYS_PMSCR_EL2_PA_SHIFT 4 254 - #define SYS_PMSCR_EL2_TS_SHIFT 5 255 - #define SYS_PMSCR_EL2_PCT_SHIFT 6 250 + #define PMSCR_EL2_E0HSPE_SHIFT 0 251 + #define PMSCR_EL2_E2SPE_SHIFT 1 252 + #define PMSCR_EL2_CX_SHIFT 3 253 + #define PMSCR_EL2_PA_SHIFT 4 254 + #define PMSCR_EL2_TS_SHIFT 5 255 + #define PMSCR_EL2_PCT_SHIFT 6 256 256 257 257 #define SYS_PMSICR_EL1 sys_reg(3, 0, 9, 9, 2) 258 258 259 259 #define SYS_PMSIRR_EL1 sys_reg(3, 0, 9, 9, 3) 260 - #define SYS_PMSIRR_EL1_RND_SHIFT 0 261 - #define SYS_PMSIRR_EL1_INTERVAL_SHIFT 8 262 - #define SYS_PMSIRR_EL1_INTERVAL_MASK 0xffffffUL 260 + #define PMSIRR_EL1_RND_SHIFT 0 261 + #define PMSIRR_EL1_INTERVAL_SHIFT 8 262 + #define PMSIRR_EL1_INTERVAL_MASK GENMASK_ULL(31, 8) 263 263 264 264 /* Filtering controls */ 265 265 #define SYS_PMSNEVFR_EL1 sys_reg(3, 0, 9, 9, 1) 266 266 267 267 #define SYS_PMSFCR_EL1 sys_reg(3, 0, 9, 9, 4) 268 - #define SYS_PMSFCR_EL1_FE_SHIFT 0 269 - #define SYS_PMSFCR_EL1_FT_SHIFT 1 270 - #define SYS_PMSFCR_EL1_FL_SHIFT 2 271 - #define SYS_PMSFCR_EL1_B_SHIFT 16 272 - #define SYS_PMSFCR_EL1_LD_SHIFT 17 273 - #define SYS_PMSFCR_EL1_ST_SHIFT 18 268 + #define PMSFCR_EL1_FE_SHIFT 0 269 + #define PMSFCR_EL1_FT_SHIFT 1 270 + #define PMSFCR_EL1_FL_SHIFT 2 271 + #define PMSFCR_EL1_B_SHIFT 16 272 + #define PMSFCR_EL1_LD_SHIFT 17 273 + #define PMSFCR_EL1_ST_SHIFT 18 274 274 275 275 #define SYS_PMSEVFR_EL1 sys_reg(3, 0, 9, 9, 5) 276 276 #define PMSEVFR_EL1_RES0_IMP \ ··· 280 280 (PMSEVFR_EL1_RES0_IMP & ~(BIT_ULL(18) | BIT_ULL(17) | BIT_ULL(11))) 281 281 282 282 #define SYS_PMSLATFR_EL1 sys_reg(3, 0, 9, 9, 6) 283 - #define SYS_PMSLATFR_EL1_MINLAT_SHIFT 0 283 + #define PMSLATFR_EL1_MINLAT_SHIFT 0 284 284 285 285 /* Buffer controls */ 286 286 #define SYS_PMBLIMITR_EL1 sys_reg(3, 0, 9, 10, 0) 287 - #define SYS_PMBLIMITR_EL1_E_SHIFT 0 288 - #define SYS_PMBLIMITR_EL1_FM_SHIFT 1 289 - #define SYS_PMBLIMITR_EL1_FM_MASK 0x3UL 290 - #define SYS_PMBLIMITR_EL1_FM_STOP_IRQ (0 << SYS_PMBLIMITR_EL1_FM_SHIFT) 287 + #define PMBLIMITR_EL1_E_SHIFT 0 288 + #define PMBLIMITR_EL1_FM_SHIFT 1 289 + #define PMBLIMITR_EL1_FM_MASK GENMASK_ULL(2, 1) 290 + #define PMBLIMITR_EL1_FM_STOP_IRQ 0 291 291 292 292 #define SYS_PMBPTR_EL1 sys_reg(3, 0, 9, 10, 1) 293 293 294 294 /* Buffer error reporting */ 295 295 #define SYS_PMBSR_EL1 sys_reg(3, 0, 9, 10, 3) 296 - #define SYS_PMBSR_EL1_COLL_SHIFT 16 297 - #define SYS_PMBSR_EL1_S_SHIFT 17 298 - #define SYS_PMBSR_EL1_EA_SHIFT 18 299 - #define SYS_PMBSR_EL1_DL_SHIFT 19 300 - #define SYS_PMBSR_EL1_EC_SHIFT 26 301 - #define SYS_PMBSR_EL1_EC_MASK 0x3fUL 296 + #define PMBSR_EL1_COLL_SHIFT 16 297 + #define PMBSR_EL1_S_SHIFT 17 298 + #define PMBSR_EL1_EA_SHIFT 18 299 + #define PMBSR_EL1_DL_SHIFT 19 300 + #define PMBSR_EL1_EC_SHIFT 26 301 + #define PMBSR_EL1_EC_MASK GENMASK_ULL(31, 26) 302 302 303 - #define SYS_PMBSR_EL1_EC_BUF (0x0UL << SYS_PMBSR_EL1_EC_SHIFT) 304 - #define SYS_PMBSR_EL1_EC_FAULT_S1 (0x24UL << SYS_PMBSR_EL1_EC_SHIFT) 305 - #define SYS_PMBSR_EL1_EC_FAULT_S2 (0x25UL << SYS_PMBSR_EL1_EC_SHIFT) 303 + #define PMBSR_EL1_EC_BUF 0x0UL 304 + #define PMBSR_EL1_EC_FAULT_S1 0x24UL 305 + #define PMBSR_EL1_EC_FAULT_S2 0x25UL 306 306 307 - #define SYS_PMBSR_EL1_FAULT_FSC_SHIFT 0 308 - #define SYS_PMBSR_EL1_FAULT_FSC_MASK 0x3fUL 307 + #define PMBSR_EL1_FAULT_FSC_SHIFT 0 308 + #define PMBSR_EL1_FAULT_FSC_MASK 0x3fUL 309 309 310 - #define SYS_PMBSR_EL1_BUF_BSC_SHIFT 0 311 - #define SYS_PMBSR_EL1_BUF_BSC_MASK 0x3fUL 310 + #define PMBSR_EL1_BUF_BSC_SHIFT 0 311 + #define PMBSR_EL1_BUF_BSC_MASK 0x3fUL 312 312 313 - #define SYS_PMBSR_EL1_BUF_BSC_FULL (0x1UL << SYS_PMBSR_EL1_BUF_BSC_SHIFT) 313 + #define PMBSR_EL1_BUF_BSC_FULL 0x1UL 314 314 315 315 /*** End of Statistical Profiling Extension ***/ 316 316
+1 -1
arch/arm64/kvm/debug.c
··· 328 328 * we may need to check if the host state needs to be saved. 329 329 */ 330 330 if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMSVer_SHIFT) && 331 - !(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(SYS_PMBIDR_EL1_P_SHIFT))) 331 + !(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(PMBIDR_EL1_P_SHIFT))) 332 332 vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_SPE); 333 333 334 334 /* Check if we have TRBE implemented and available at the host */
+1 -1
arch/arm64/kvm/hyp/nvhe/debug-sr.c
··· 27 27 * Check if the host is actually using it ? 28 28 */ 29 29 reg = read_sysreg_s(SYS_PMBLIMITR_EL1); 30 - if (!(reg & BIT(SYS_PMBLIMITR_EL1_E_SHIFT))) 30 + if (!(reg & BIT(PMBLIMITR_EL1_E_SHIFT))) 31 31 return; 32 32 33 33 /* Yes; save the control register and disable data generation */
+42 -43
drivers/perf/arm_spe_pmu.c
··· 12 12 #define DRVNAME PMUNAME "_pmu" 13 13 #define pr_fmt(fmt) DRVNAME ": " fmt 14 14 15 + #include <linux/bitfield.h> 15 16 #include <linux/bitops.h> 16 17 #include <linux/bug.h> 17 18 #include <linux/capability.h> ··· 283 282 struct perf_event_attr *attr = &event->attr; 284 283 u64 reg = 0; 285 284 286 - reg |= ATTR_CFG_GET_FLD(attr, ts_enable) << SYS_PMSCR_EL1_TS_SHIFT; 287 - reg |= ATTR_CFG_GET_FLD(attr, pa_enable) << SYS_PMSCR_EL1_PA_SHIFT; 288 - reg |= ATTR_CFG_GET_FLD(attr, pct_enable) << SYS_PMSCR_EL1_PCT_SHIFT; 285 + reg |= ATTR_CFG_GET_FLD(attr, ts_enable) << PMSCR_EL1_TS_SHIFT; 286 + reg |= ATTR_CFG_GET_FLD(attr, pa_enable) << PMSCR_EL1_PA_SHIFT; 287 + reg |= ATTR_CFG_GET_FLD(attr, pct_enable) << PMSCR_EL1_PCT_SHIFT; 289 288 290 289 if (!attr->exclude_user) 291 - reg |= BIT(SYS_PMSCR_EL1_E0SPE_SHIFT); 290 + reg |= BIT(PMSCR_EL1_E0SPE_SHIFT); 292 291 293 292 if (!attr->exclude_kernel) 294 - reg |= BIT(SYS_PMSCR_EL1_E1SPE_SHIFT); 293 + reg |= BIT(PMSCR_EL1_E1SPE_SHIFT); 295 294 296 295 if (get_spe_event_has_cx(event)) 297 - reg |= BIT(SYS_PMSCR_EL1_CX_SHIFT); 296 + reg |= BIT(PMSCR_EL1_CX_SHIFT); 298 297 299 298 return reg; 300 299 } ··· 303 302 { 304 303 struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu); 305 304 u64 period = event->hw.sample_period; 306 - u64 max_period = SYS_PMSIRR_EL1_INTERVAL_MASK 307 - << SYS_PMSIRR_EL1_INTERVAL_SHIFT; 305 + u64 max_period = PMSIRR_EL1_INTERVAL_MASK; 308 306 309 307 if (period < spe_pmu->min_period) 310 308 period = spe_pmu->min_period; ··· 322 322 323 323 arm_spe_event_sanitise_period(event); 324 324 325 - reg |= ATTR_CFG_GET_FLD(attr, jitter) << SYS_PMSIRR_EL1_RND_SHIFT; 325 + reg |= ATTR_CFG_GET_FLD(attr, jitter) << PMSIRR_EL1_RND_SHIFT; 326 326 reg |= event->hw.sample_period; 327 327 328 328 return reg; ··· 333 333 struct perf_event_attr *attr = &event->attr; 334 334 u64 reg = 0; 335 335 336 - reg |= ATTR_CFG_GET_FLD(attr, load_filter) << SYS_PMSFCR_EL1_LD_SHIFT; 337 - reg |= ATTR_CFG_GET_FLD(attr, store_filter) << SYS_PMSFCR_EL1_ST_SHIFT; 338 - reg |= ATTR_CFG_GET_FLD(attr, branch_filter) << SYS_PMSFCR_EL1_B_SHIFT; 336 + reg |= ATTR_CFG_GET_FLD(attr, load_filter) << PMSFCR_EL1_LD_SHIFT; 337 + reg |= ATTR_CFG_GET_FLD(attr, store_filter) << PMSFCR_EL1_ST_SHIFT; 338 + reg |= ATTR_CFG_GET_FLD(attr, branch_filter) << PMSFCR_EL1_B_SHIFT; 339 339 340 340 if (reg) 341 - reg |= BIT(SYS_PMSFCR_EL1_FT_SHIFT); 341 + reg |= BIT(PMSFCR_EL1_FT_SHIFT); 342 342 343 343 if (ATTR_CFG_GET_FLD(attr, event_filter)) 344 - reg |= BIT(SYS_PMSFCR_EL1_FE_SHIFT); 344 + reg |= BIT(PMSFCR_EL1_FE_SHIFT); 345 345 346 346 if (ATTR_CFG_GET_FLD(attr, min_latency)) 347 - reg |= BIT(SYS_PMSFCR_EL1_FL_SHIFT); 347 + reg |= BIT(PMSFCR_EL1_FL_SHIFT); 348 348 349 349 return reg; 350 350 } ··· 359 359 { 360 360 struct perf_event_attr *attr = &event->attr; 361 361 return ATTR_CFG_GET_FLD(attr, min_latency) 362 - << SYS_PMSLATFR_EL1_MINLAT_SHIFT; 362 + << PMSLATFR_EL1_MINLAT_SHIFT; 363 363 } 364 364 365 365 static void arm_spe_pmu_pad_buf(struct perf_output_handle *handle, int len) ··· 511 511 limit = buf->snapshot ? arm_spe_pmu_next_snapshot_off(handle) 512 512 : arm_spe_pmu_next_off(handle); 513 513 if (limit) 514 - limit |= BIT(SYS_PMBLIMITR_EL1_E_SHIFT); 514 + limit |= BIT(PMBLIMITR_EL1_E_SHIFT); 515 515 516 516 limit += (u64)buf->base; 517 517 base = (u64)buf->base + PERF_IDX2OFF(handle->head, buf); ··· 570 570 571 571 /* Service required? */ 572 572 pmbsr = read_sysreg_s(SYS_PMBSR_EL1); 573 - if (!(pmbsr & BIT(SYS_PMBSR_EL1_S_SHIFT))) 573 + if (!(pmbsr & BIT(PMBSR_EL1_S_SHIFT))) 574 574 return SPE_PMU_BUF_FAULT_ACT_SPURIOUS; 575 575 576 576 /* 577 577 * If we've lost data, disable profiling and also set the PARTIAL 578 578 * flag to indicate that the last record is corrupted. 579 579 */ 580 - if (pmbsr & BIT(SYS_PMBSR_EL1_DL_SHIFT)) 580 + if (pmbsr & BIT(PMBSR_EL1_DL_SHIFT)) 581 581 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED | 582 582 PERF_AUX_FLAG_PARTIAL); 583 583 584 584 /* Report collisions to userspace so that it can up the period */ 585 - if (pmbsr & BIT(SYS_PMBSR_EL1_COLL_SHIFT)) 585 + if (pmbsr & BIT(PMBSR_EL1_COLL_SHIFT)) 586 586 perf_aux_output_flag(handle, PERF_AUX_FLAG_COLLISION); 587 587 588 588 /* We only expect buffer management events */ 589 - switch (pmbsr & (SYS_PMBSR_EL1_EC_MASK << SYS_PMBSR_EL1_EC_SHIFT)) { 590 - case SYS_PMBSR_EL1_EC_BUF: 589 + switch (FIELD_GET(PMBSR_EL1_EC_MASK, pmbsr)) { 590 + case PMBSR_EL1_EC_BUF: 591 591 /* Handled below */ 592 592 break; 593 - case SYS_PMBSR_EL1_EC_FAULT_S1: 594 - case SYS_PMBSR_EL1_EC_FAULT_S2: 593 + case PMBSR_EL1_EC_FAULT_S1: 594 + case PMBSR_EL1_EC_FAULT_S2: 595 595 err_str = "Unexpected buffer fault"; 596 596 goto out_err; 597 597 default: ··· 600 600 } 601 601 602 602 /* Buffer management event */ 603 - switch (pmbsr & 604 - (SYS_PMBSR_EL1_BUF_BSC_MASK << SYS_PMBSR_EL1_BUF_BSC_SHIFT)) { 605 - case SYS_PMBSR_EL1_BUF_BSC_FULL: 603 + switch (FIELD_GET(PMBSR_EL1_BUF_BSC_MASK, pmbsr)) { 604 + case PMBSR_EL1_BUF_BSC_FULL: 606 605 ret = SPE_PMU_BUF_FAULT_ACT_OK; 607 606 goto out_stop; 608 607 default: ··· 716 717 return -EINVAL; 717 718 718 719 reg = arm_spe_event_to_pmsfcr(event); 719 - if ((reg & BIT(SYS_PMSFCR_EL1_FE_SHIFT)) && 720 + if ((reg & BIT(PMSFCR_EL1_FE_SHIFT)) && 720 721 !(spe_pmu->features & SPE_PMU_FEAT_FILT_EVT)) 721 722 return -EOPNOTSUPP; 722 723 723 - if ((reg & BIT(SYS_PMSFCR_EL1_FT_SHIFT)) && 724 + if ((reg & BIT(PMSFCR_EL1_FT_SHIFT)) && 724 725 !(spe_pmu->features & SPE_PMU_FEAT_FILT_TYP)) 725 726 return -EOPNOTSUPP; 726 727 727 - if ((reg & BIT(SYS_PMSFCR_EL1_FL_SHIFT)) && 728 + if ((reg & BIT(PMSFCR_EL1_FL_SHIFT)) && 728 729 !(spe_pmu->features & SPE_PMU_FEAT_FILT_LAT)) 729 730 return -EOPNOTSUPP; 730 731 731 732 set_spe_event_has_cx(event); 732 733 reg = arm_spe_event_to_pmscr(event); 733 734 if (!perfmon_capable() && 734 - (reg & (BIT(SYS_PMSCR_EL1_PA_SHIFT) | 735 - BIT(SYS_PMSCR_EL1_PCT_SHIFT)))) 735 + (reg & (BIT(PMSCR_EL1_PA_SHIFT) | 736 + BIT(PMSCR_EL1_PCT_SHIFT)))) 736 737 return -EACCES; 737 738 738 739 return 0; ··· 970 971 971 972 /* Read PMBIDR first to determine whether or not we have access */ 972 973 reg = read_sysreg_s(SYS_PMBIDR_EL1); 973 - if (reg & BIT(SYS_PMBIDR_EL1_P_SHIFT)) { 974 + if (reg & BIT(PMBIDR_EL1_P_SHIFT)) { 974 975 dev_err(dev, 975 976 "profiling buffer owned by higher exception level\n"); 976 977 return; 977 978 } 978 979 979 980 /* Minimum alignment. If it's out-of-range, then fail the probe */ 980 - fld = reg >> SYS_PMBIDR_EL1_ALIGN_SHIFT & SYS_PMBIDR_EL1_ALIGN_MASK; 981 + fld = (reg & PMBIDR_EL1_ALIGN_MASK) >> PMBIDR_EL1_ALIGN_SHIFT; 981 982 spe_pmu->align = 1 << fld; 982 983 if (spe_pmu->align > SZ_2K) { 983 984 dev_err(dev, "unsupported PMBIDR.Align [%d] on CPU %d\n", ··· 987 988 988 989 /* It's now safe to read PMSIDR and figure out what we've got */ 989 990 reg = read_sysreg_s(SYS_PMSIDR_EL1); 990 - if (reg & BIT(SYS_PMSIDR_EL1_FE_SHIFT)) 991 + if (reg & BIT(PMSIDR_EL1_FE_SHIFT)) 991 992 spe_pmu->features |= SPE_PMU_FEAT_FILT_EVT; 992 993 993 - if (reg & BIT(SYS_PMSIDR_EL1_FT_SHIFT)) 994 + if (reg & BIT(PMSIDR_EL1_FT_SHIFT)) 994 995 spe_pmu->features |= SPE_PMU_FEAT_FILT_TYP; 995 996 996 - if (reg & BIT(SYS_PMSIDR_EL1_FL_SHIFT)) 997 + if (reg & BIT(PMSIDR_EL1_FL_SHIFT)) 997 998 spe_pmu->features |= SPE_PMU_FEAT_FILT_LAT; 998 999 999 - if (reg & BIT(SYS_PMSIDR_EL1_ARCHINST_SHIFT)) 1000 + if (reg & BIT(PMSIDR_EL1_ARCHINST_SHIFT)) 1000 1001 spe_pmu->features |= SPE_PMU_FEAT_ARCH_INST; 1001 1002 1002 - if (reg & BIT(SYS_PMSIDR_EL1_LDS_SHIFT)) 1003 + if (reg & BIT(PMSIDR_EL1_LDS_SHIFT)) 1003 1004 spe_pmu->features |= SPE_PMU_FEAT_LDS; 1004 1005 1005 - if (reg & BIT(SYS_PMSIDR_EL1_ERND_SHIFT)) 1006 + if (reg & BIT(PMSIDR_EL1_ERND_SHIFT)) 1006 1007 spe_pmu->features |= SPE_PMU_FEAT_ERND; 1007 1008 1008 1009 /* This field has a spaced out encoding, so just use a look-up */ 1009 - fld = reg >> SYS_PMSIDR_EL1_INTERVAL_SHIFT & SYS_PMSIDR_EL1_INTERVAL_MASK; 1010 + fld = (reg & PMSIDR_EL1_INTERVAL_MASK) >> PMSIDR_EL1_INTERVAL_SHIFT; 1010 1011 switch (fld) { 1011 1012 case 0: 1012 1013 spe_pmu->min_period = 256; ··· 1038 1039 } 1039 1040 1040 1041 /* Maximum record size. If it's out-of-range, then fail the probe */ 1041 - fld = reg >> SYS_PMSIDR_EL1_MAXSIZE_SHIFT & SYS_PMSIDR_EL1_MAXSIZE_MASK; 1042 + fld = (reg & PMSIDR_EL1_MAXSIZE_MASK) >> PMSIDR_EL1_MAXSIZE_SHIFT; 1042 1043 spe_pmu->max_record_sz = 1 << fld; 1043 1044 if (spe_pmu->max_record_sz > SZ_2K || spe_pmu->max_record_sz < 16) { 1044 1045 dev_err(dev, "unsupported PMSIDR_EL1.MaxSize [%d] on CPU %d\n", ··· 1046 1047 return; 1047 1048 } 1048 1049 1049 - fld = reg >> SYS_PMSIDR_EL1_COUNTSIZE_SHIFT & SYS_PMSIDR_EL1_COUNTSIZE_MASK; 1050 + fld = (reg & PMSIDR_EL1_COUNTSIZE_MASK) >> PMSIDR_EL1_COUNTSIZE_SHIFT; 1050 1051 switch (fld) { 1051 1052 default: 1052 1053 dev_warn(dev, "unknown PMSIDR_EL1.CountSize [%d]; assuming 2\n",