Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

MIPS: pm-cps: Add MIPSr6 CPU support

This patch adds support for CPUs implementing the MIPSr6 ISA to the CPS
power management code. Three changes are necessary:

1. In MIPSr6, coupled coherence is necessary when CPUS implement multiple
Virtual Processors (VPs).

2. MIPSr6 virtual processors are more like real cores and cannot yield
to other VPs on the same core, so drop the MT ASE yield instruction.

3. To halt a MIPSr6 VP, the CPC VP_STOP register is used rather than the
MT ASE TCHalt CP0 register.

Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com>
Reviewed-by: Paul Burton <paul.burton@imgtec.com>
Cc: Adam Buchbinder <adam.buchbinder@gmail.com>
Cc: Masahiro Yamada <yamada.masahiro@socionext.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
Patchwork: https://patchwork.linux-mips.org/patch/14225/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>

authored by

Matt Redfearn and committed by
Ralf Baechle
929d4f51 15ea26cf

+22 -6
+4 -2
arch/mips/include/asm/pm-cps.h
··· 13 13 14 14 /* 15 15 * The CM & CPC can only handle coherence & power control on a per-core basis, 16 - * thus in an MT system the VPEs within each core are coupled and can only 16 + * thus in an MT system the VP(E)s within each core are coupled and can only 17 17 * enter or exit states requiring CM or CPC assistance in unison. 18 18 */ 19 - #ifdef CONFIG_MIPS_MT 19 + #if defined(CONFIG_CPU_MIPSR6) 20 + # define coupled_coherence cpu_has_vp 21 + #elif defined(CONFIG_MIPS_MT) 20 22 # define coupled_coherence cpu_has_mipsmt 21 23 #else 22 24 # define coupled_coherence 0
+18 -4
arch/mips/kernel/pm-cps.c
··· 129 129 return -EINVAL; 130 130 131 131 /* Calculate which coupled CPUs (VPEs) are online */ 132 - #ifdef CONFIG_MIPS_MT 132 + #if defined(CONFIG_MIPS_MT) || defined(CONFIG_CPU_MIPSR6) 133 133 if (cpu_online(cpu)) { 134 134 cpumask_and(coupled_mask, cpu_online_mask, 135 135 &cpu_sibling_map[cpu]); ··· 431 431 uasm_i_lw(&p, t0, 0, r_nc_count); 432 432 uasm_il_bltz(&p, &r, t0, lbl_secondary_cont); 433 433 uasm_i_ehb(&p); 434 - uasm_i_yield(&p, zero, t1); 434 + if (cpu_has_mipsmt) 435 + uasm_i_yield(&p, zero, t1); 435 436 uasm_il_b(&p, &r, lbl_poll_cont); 436 437 uasm_i_nop(&p); 437 438 } else { ··· 440 439 * The core will lose power & this VPE will not continue 441 440 * so it can simply halt here. 442 441 */ 443 - uasm_i_addiu(&p, t0, zero, TCHALT_H); 444 - uasm_i_mtc0(&p, t0, 2, 4); 442 + if (cpu_has_mipsmt) { 443 + /* Halt the VPE via C0 tchalt register */ 444 + uasm_i_addiu(&p, t0, zero, TCHALT_H); 445 + uasm_i_mtc0(&p, t0, 2, 4); 446 + } else if (cpu_has_vp) { 447 + /* Halt the VP via the CPC VP_STOP register */ 448 + unsigned int vpe_id; 449 + 450 + vpe_id = cpu_vpe_id(&cpu_data[cpu]); 451 + uasm_i_addiu(&p, t0, zero, 1 << vpe_id); 452 + UASM_i_LA(&p, t1, (long)addr_cpc_cl_vp_stop()); 453 + uasm_i_sw(&p, t0, 0, t1); 454 + } else { 455 + BUG(); 456 + } 445 457 uasm_build_label(&l, p, lbl_secondary_hang); 446 458 uasm_il_b(&p, &r, lbl_secondary_hang); 447 459 uasm_i_nop(&p);