Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

MIPS: CM: Add cluster & block args to mips_cm_lock_other()

With CM >= 3.5 we have the notion of multiple clusters & can access
their CM, CPC & GIC registers via the apporpriate redirect/other
register blocks. In order to allow for this introduce cluster & block
arguments to mips_cm_lock_other() which configures the redirect/other
region to point at the appropriate cluster, core, VP & register block.

Since we now have 4 arguments to mips_cm_lock_other() & a common use is
likely to be to target the cluster, core & VP corresponding to a
particular Linux CPU number we also add a new mips_cm_lock_other_cpu()
helper function which handles that without the caller needing to
manually pull out the cluster, core & VP numbers.

Signed-off-by: Paul Burton <paul.burton@imgtec.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/17013/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>

authored by

Paul Burton and committed by
Ralf Baechle
68923cdc 5616897e

+58 -18
+36 -9
arch/mips/include/asm/mips-cm.h
··· 437 437 #ifdef CONFIG_MIPS_CM 438 438 439 439 /** 440 - * mips_cm_lock_other - lock access to another core 440 + * mips_cm_lock_other - lock access to redirect/other region 441 + * @cluster: the other cluster to be accessed 441 442 * @core: the other core to be accessed 442 443 * @vp: the VP within the other core to be accessed 444 + * @block: the register block to be accessed 443 445 * 444 - * Call before operating upon a core via the 'other' register region in 445 - * order to prevent the region being moved during access. Must be followed 446 - * by a call to mips_cm_unlock_other. 446 + * Configure the redirect/other region for the local core/VP (depending upon 447 + * the CM revision) to target the specified @cluster, @core, @vp & register 448 + * @block. Must be called before using the redirect/other region, and followed 449 + * by a call to mips_cm_unlock_other() when access to the redirect/other region 450 + * is complete. 451 + * 452 + * This function acquires a spinlock such that code between it & 453 + * mips_cm_unlock_other() calls cannot be pre-empted by anything which may 454 + * reconfigure the redirect/other region, and cannot be interfered with by 455 + * another VP in the core. As such calls to this function should not be nested. 447 456 */ 448 - extern void mips_cm_lock_other(unsigned int core, unsigned int vp); 457 + extern void mips_cm_lock_other(unsigned int cluster, unsigned int core, 458 + unsigned int vp, unsigned int block); 449 459 450 460 /** 451 - * mips_cm_unlock_other - unlock access to another core 461 + * mips_cm_unlock_other - unlock access to redirect/other region 452 462 * 453 - * Call after operating upon another core via the 'other' register region. 454 - * Must be called after mips_cm_lock_other. 463 + * Must be called after mips_cm_lock_other() once all required access to the 464 + * redirect/other region has been completed. 455 465 */ 456 466 extern void mips_cm_unlock_other(void); 457 467 458 468 #else /* !CONFIG_MIPS_CM */ 459 469 460 - static inline void mips_cm_lock_other(unsigned int core, unsigned int vp) { } 470 + static inline void mips_cm_lock_other(unsigned int cluster, unsigned int core, 471 + unsigned int vp, unsigned int block) { } 461 472 static inline void mips_cm_unlock_other(void) { } 462 473 463 474 #endif /* !CONFIG_MIPS_CM */ 475 + 476 + /** 477 + * mips_cm_lock_other_cpu - lock access to redirect/other region 478 + * @cpu: the other CPU whose register we want to access 479 + * 480 + * Configure the redirect/other region for the local core/VP (depending upon 481 + * the CM revision) to target the specified @cpu & register @block. This is 482 + * equivalent to calling mips_cm_lock_other() but accepts a Linux CPU number 483 + * for convenience. 484 + */ 485 + static inline void mips_cm_lock_other_cpu(unsigned int cpu, unsigned int block) 486 + { 487 + struct cpuinfo_mips *d = &cpu_data[cpu]; 488 + 489 + mips_cm_lock_other(cpu_cluster(d), cpu_core(d), cpu_vpe_id(d), block); 490 + } 464 491 465 492 #endif /* __MIPS_ASM_MIPS_CM_H__ */
+16 -3
arch/mips/kernel/mips-cm.c
··· 257 257 return 0; 258 258 } 259 259 260 - void mips_cm_lock_other(unsigned int core, unsigned int vp) 260 + void mips_cm_lock_other(unsigned int cluster, unsigned int core, 261 + unsigned int vp, unsigned int block) 261 262 { 262 - unsigned curr_core; 263 + unsigned int curr_core, cm_rev; 263 264 u32 val; 264 265 266 + cm_rev = mips_cm_revision(); 265 267 preempt_disable(); 266 268 267 - if (mips_cm_revision() >= CM_REV_CM3) { 269 + if (cm_rev >= CM_REV_CM3) { 268 270 val = core << __ffs(CM3_GCR_Cx_OTHER_CORE); 269 271 val |= vp << __ffs(CM3_GCR_Cx_OTHER_VP); 272 + 273 + if (cm_rev >= CM_REV_CM3_5) { 274 + val |= CM_GCR_Cx_OTHER_CLUSTER_EN; 275 + val |= cluster << __ffs(CM_GCR_Cx_OTHER_CLUSTER); 276 + val |= block << __ffs(CM_GCR_Cx_OTHER_BLOCK); 277 + } else { 278 + WARN_ON(cluster != 0); 279 + WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL); 280 + } 270 281 271 282 /* 272 283 * We need to disable interrupts in SMP systems in order to ··· 291 280 spin_lock_irqsave(this_cpu_ptr(&cm_core_lock), 292 281 *this_cpu_ptr(&cm_core_lock_flags)); 293 282 } else { 283 + WARN_ON(cluster != 0); 294 284 WARN_ON(vp != 0); 285 + WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL); 295 286 296 287 /* 297 288 * We only have a GCR_CL_OTHER per core in systems with
+5 -5
arch/mips/kernel/smp-cps.c
··· 52 52 && (!IS_ENABLED(CONFIG_CPU_MIPSR6) || !cpu_has_vp)) 53 53 return 1; 54 54 55 - mips_cm_lock_other(core, 0); 55 + mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); 56 56 cfg = read_gcr_co_config() & CM_GCR_Cx_CONFIG_PVPE; 57 57 mips_cm_unlock_other(); 58 58 return cfg + 1; ··· 214 214 unsigned timeout; 215 215 216 216 /* Select the appropriate core */ 217 - mips_cm_lock_other(core, 0); 217 + mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); 218 218 219 219 /* Set its reset vector */ 220 220 write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry)); ··· 313 313 } 314 314 315 315 if (cpu_has_vp) { 316 - mips_cm_lock_other(core, vpe_id); 316 + mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL); 317 317 core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry); 318 318 write_gcr_co_reset_base(core_entry); 319 319 mips_cm_unlock_other(); ··· 518 518 */ 519 519 fail_time = ktime_add_ms(ktime_get(), 2000); 520 520 do { 521 - mips_cm_lock_other(core, 0); 521 + mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); 522 522 mips_cpc_lock_other(core); 523 523 stat = read_cpc_co_stat_conf(); 524 524 stat &= CPC_Cx_STAT_CONF_SEQSTATE; ··· 562 562 panic("Failed to call remote sibling CPU\n"); 563 563 } else if (cpu_has_vp) { 564 564 do { 565 - mips_cm_lock_other(core, vpe_id); 565 + mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL); 566 566 stat = read_cpc_co_vp_running(); 567 567 mips_cm_unlock_other(); 568 568 } while (stat & (1 << vpe_id));
+1 -1
arch/mips/kernel/smp.c
··· 190 190 core = cpu_core(&cpu_data[cpu]); 191 191 192 192 while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) { 193 - mips_cm_lock_other(core, 0); 193 + mips_cm_lock_other_cpu(cpu, CM_GCR_Cx_OTHER_BLOCK_LOCAL); 194 194 mips_cpc_lock_other(core); 195 195 write_cpc_co_cmd(CPC_Cx_CMD_PWRUP); 196 196 mips_cpc_unlock_other();