Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

MIPS: pm-cps: add PM state entry code for CPS systems

This patch adds code to generate entry & exit code for various low power
states available on systems based around the MIPS Coherent Processing
System architecture (ie. those with a Coherence Manager, Global
Interrupt Controller & for >=CM2 a Cluster Power Controller). States
supported are:

- Non-coherent wait. This state first leaves the coherent domain and
then executes a regular MIPS wait instruction. Power savings are
found from the elimination of coherency interventions between the
core and any other coherent requestors in the system.

- Clock gated. This state leaves the coherent domain and then gates
the clock input to the core. This removes all dynamic power from the
core but leaves the core at the mercy of another to restart its
clock. Register state is preserved, but the core can not service
interrupts whilst its clock is gated.

- Power gated. This deepest state removes all power input to the core.
All register state is lost and the core will restart execution from
its BEV when another core powers it back up. Because register state
is lost this state requires cooperation with the CONFIG_MIPS_CPS SMP
implementation in order for the core to exit the state successfully.

The code will detect which states are available on the current system
during boot & generate the entry/exit code for those states. This will
be used by cpuidle & hotplug implementations.

Signed-off-by: Paul Burton <paul.burton@imgtec.com>

+809
+3
arch/mips/Kconfig
··· 2071 2071 no external assistance. It is safe to enable this when hardware 2072 2072 support is unavailable. 2073 2073 2074 + config MIPS_CPS_PM 2075 + bool 2076 + 2074 2077 config MIPS_GIC_IPI 2075 2078 bool 2076 2079
+51
arch/mips/include/asm/pm-cps.h
··· 1 + /* 2 + * Copyright (C) 2014 Imagination Technologies 3 + * Author: Paul Burton <paul.burton@imgtec.com> 4 + * 5 + * This program is free software; you can redistribute it and/or modify it 6 + * under the terms of the GNU General Public License as published by the 7 + * Free Software Foundation; either version 2 of the License, or (at your 8 + * option) any later version. 9 + */ 10 + 11 + #ifndef __MIPS_ASM_PM_CPS_H__ 12 + #define __MIPS_ASM_PM_CPS_H__ 13 + 14 + /* 15 + * The CM & CPC can only handle coherence & power control on a per-core basis, 16 + * thus in an MT system the VPEs within each core are coupled and can only 17 + * enter or exit states requiring CM or CPC assistance in unison. 18 + */ 19 + #ifdef CONFIG_MIPS_MT 20 + # define coupled_coherence cpu_has_mipsmt 21 + #else 22 + # define coupled_coherence 0 23 + #endif 24 + 25 + /* Enumeration of possible PM states */ 26 + enum cps_pm_state { 27 + CPS_PM_NC_WAIT, /* MIPS wait instruction, non-coherent */ 28 + CPS_PM_CLOCK_GATED, /* Core clock gated */ 29 + CPS_PM_POWER_GATED, /* Core power gated */ 30 + CPS_PM_STATE_COUNT, 31 + }; 32 + 33 + /** 34 + * cps_pm_support_state - determine whether the system supports a PM state 35 + * @state: the state to test for support 36 + * 37 + * Returns true if the system supports the given state, otherwise false. 38 + */ 39 + extern bool cps_pm_support_state(enum cps_pm_state state); 40 + 41 + /** 42 + * cps_pm_enter_state - enter a PM state 43 + * @state: the state to enter 44 + * 45 + * Enter the given PM state. If coupled_coherence is non-zero then it is 46 + * expected that this function be called at approximately the same time on 47 + * each coupled CPU. Returns 0 on successful entry & exit, otherwise -errno. 48 + */ 49 + extern int cps_pm_enter_state(enum cps_pm_state state); 50 + 51 + #endif /* __MIPS_ASM_PM_CPS_H__ */
+3
arch/mips/include/asm/smp-cps.h
··· 33 33 34 34 extern bool mips_cps_smp_in_use(void); 35 35 36 + extern void mips_cps_pm_save(void); 37 + extern void mips_cps_pm_restore(void); 38 + 36 39 #else /* __ASSEMBLY__ */ 37 40 38 41 .extern mips_cps_bootcfg;
+1
arch/mips/kernel/Makefile
··· 108 108 obj-$(CONFIG_MIPS_CPC) += mips-cpc.o 109 109 110 110 obj-$(CONFIG_CPU_PM) += pm.o 111 + obj-$(CONFIG_MIPS_CPS_PM) += pm-cps.o 111 112 112 113 # 113 114 # DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not
+35
arch/mips/kernel/cps-vec.S
··· 15 15 #include <asm/cacheops.h> 16 16 #include <asm/mipsregs.h> 17 17 #include <asm/mipsmtregs.h> 18 + #include <asm/pm.h> 18 19 19 20 #define GCR_CL_COHERENCE_OFS 0x2008 20 21 #define GCR_CL_ID_OFS 0x2028 ··· 448 447 jr ra 449 448 nop 450 449 END(mips_cps_boot_vpes) 450 + 451 + #if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM) 452 + 453 + /* Calculate a pointer to this CPUs struct mips_static_suspend_state */ 454 + .macro psstate dest 455 + .set push 456 + .set noat 457 + lw $1, TI_CPU(gp) 458 + sll $1, $1, LONGLOG 459 + la \dest, __per_cpu_offset 460 + addu $1, $1, \dest 461 + lw $1, 0($1) 462 + la \dest, cps_cpu_state 463 + addu \dest, \dest, $1 464 + .set pop 465 + .endm 466 + 467 + LEAF(mips_cps_pm_save) 468 + /* Save CPU state */ 469 + SUSPEND_SAVE_REGS 470 + psstate t1 471 + SUSPEND_SAVE_STATIC 472 + jr v0 473 + nop 474 + END(mips_cps_pm_save) 475 + 476 + LEAF(mips_cps_pm_restore) 477 + /* Restore CPU state */ 478 + psstate t1 479 + RESUME_RESTORE_STATIC 480 + RESUME_RESTORE_REGS_RETURN 481 + END(mips_cps_pm_restore) 482 + 483 + #endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */
+716
arch/mips/kernel/pm-cps.c
··· 1 + /* 2 + * Copyright (C) 2014 Imagination Technologies 3 + * Author: Paul Burton <paul.burton@imgtec.com> 4 + * 5 + * This program is free software; you can redistribute it and/or modify it 6 + * under the terms of the GNU General Public License as published by the 7 + * Free Software Foundation; either version 2 of the License, or (at your 8 + * option) any later version. 9 + */ 10 + 11 + #include <linux/init.h> 12 + #include <linux/percpu.h> 13 + #include <linux/slab.h> 14 + 15 + #include <asm/asm-offsets.h> 16 + #include <asm/cacheflush.h> 17 + #include <asm/cacheops.h> 18 + #include <asm/idle.h> 19 + #include <asm/mips-cm.h> 20 + #include <asm/mips-cpc.h> 21 + #include <asm/mipsmtregs.h> 22 + #include <asm/pm.h> 23 + #include <asm/pm-cps.h> 24 + #include <asm/smp-cps.h> 25 + #include <asm/uasm.h> 26 + 27 + /* 28 + * cps_nc_entry_fn - type of a generated non-coherent state entry function 29 + * @online: the count of online coupled VPEs 30 + * @nc_ready_count: pointer to a non-coherent mapping of the core ready_count 31 + * 32 + * The code entering & exiting non-coherent states is generated at runtime 33 + * using uasm, in order to ensure that the compiler cannot insert a stray 34 + * memory access at an unfortunate time and to allow the generation of optimal 35 + * core-specific code particularly for cache routines. If coupled_coherence 36 + * is non-zero and this is the entry function for the CPS_PM_NC_WAIT state, 37 + * returns the number of VPEs that were in the wait state at the point this 38 + * VPE left it. Returns garbage if coupled_coherence is zero or this is not 39 + * the entry function for CPS_PM_NC_WAIT. 40 + */ 41 + typedef unsigned (*cps_nc_entry_fn)(unsigned online, u32 *nc_ready_count); 42 + 43 + /* 44 + * The entry point of the generated non-coherent idle state entry/exit 45 + * functions. Actually per-core rather than per-CPU. 46 + */ 47 + static DEFINE_PER_CPU_READ_MOSTLY(cps_nc_entry_fn[CPS_PM_STATE_COUNT], 48 + nc_asm_enter); 49 + 50 + /* Bitmap indicating which states are supported by the system */ 51 + DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT); 52 + 53 + /* 54 + * Indicates the number of coupled VPEs ready to operate in a non-coherent 55 + * state. Actually per-core rather than per-CPU. 56 + */ 57 + static DEFINE_PER_CPU_ALIGNED(u32*, ready_count); 58 + static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc); 59 + 60 + /* Indicates online CPUs coupled with the current CPU */ 61 + static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled); 62 + 63 + /* 64 + * Used to synchronize entry to deep idle states. Actually per-core rather 65 + * than per-CPU. 66 + */ 67 + static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier); 68 + 69 + /* Saved CPU state across the CPS_PM_POWER_GATED state */ 70 + DEFINE_PER_CPU_ALIGNED(struct mips_static_suspend_state, cps_cpu_state); 71 + 72 + /* A somewhat arbitrary number of labels & relocs for uasm */ 73 + static struct uasm_label labels[32] __initdata; 74 + static struct uasm_reloc relocs[32] __initdata; 75 + 76 + /* CPU dependant sync types */ 77 + static unsigned stype_intervention; 78 + static unsigned stype_memory; 79 + static unsigned stype_ordering; 80 + 81 + enum mips_reg { 82 + zero, at, v0, v1, a0, a1, a2, a3, 83 + t0, t1, t2, t3, t4, t5, t6, t7, 84 + s0, s1, s2, s3, s4, s5, s6, s7, 85 + t8, t9, k0, k1, gp, sp, fp, ra, 86 + }; 87 + 88 + bool cps_pm_support_state(enum cps_pm_state state) 89 + { 90 + return test_bit(state, state_support); 91 + } 92 + 93 + static void coupled_barrier(atomic_t *a, unsigned online) 94 + { 95 + /* 96 + * This function is effectively the same as 97 + * cpuidle_coupled_parallel_barrier, which can't be used here since 98 + * there's no cpuidle device. 99 + */ 100 + 101 + if (!coupled_coherence) 102 + return; 103 + 104 + smp_mb__before_atomic_inc(); 105 + atomic_inc(a); 106 + 107 + while (atomic_read(a) < online) 108 + cpu_relax(); 109 + 110 + if (atomic_inc_return(a) == online * 2) { 111 + atomic_set(a, 0); 112 + return; 113 + } 114 + 115 + while (atomic_read(a) > online) 116 + cpu_relax(); 117 + } 118 + 119 + int cps_pm_enter_state(enum cps_pm_state state) 120 + { 121 + unsigned cpu = smp_processor_id(); 122 + unsigned core = current_cpu_data.core; 123 + unsigned online, left; 124 + cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled); 125 + u32 *core_ready_count, *nc_core_ready_count; 126 + void *nc_addr; 127 + cps_nc_entry_fn entry; 128 + struct core_boot_config *core_cfg; 129 + struct vpe_boot_config *vpe_cfg; 130 + 131 + /* Check that there is an entry function for this state */ 132 + entry = per_cpu(nc_asm_enter, core)[state]; 133 + if (!entry) 134 + return -EINVAL; 135 + 136 + /* Calculate which coupled CPUs (VPEs) are online */ 137 + #ifdef CONFIG_MIPS_MT 138 + if (cpu_online(cpu)) { 139 + cpumask_and(coupled_mask, cpu_online_mask, 140 + &cpu_sibling_map[cpu]); 141 + online = cpumask_weight(coupled_mask); 142 + cpumask_clear_cpu(cpu, coupled_mask); 143 + } else 144 + #endif 145 + { 146 + cpumask_clear(coupled_mask); 147 + online = 1; 148 + } 149 + 150 + /* Setup the VPE to run mips_cps_pm_restore when started again */ 151 + if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) { 152 + core_cfg = &mips_cps_core_bootcfg[core]; 153 + vpe_cfg = &core_cfg->vpe_config[current_cpu_data.vpe_id]; 154 + vpe_cfg->pc = (unsigned long)mips_cps_pm_restore; 155 + vpe_cfg->gp = (unsigned long)current_thread_info(); 156 + vpe_cfg->sp = 0; 157 + } 158 + 159 + /* Indicate that this CPU might not be coherent */ 160 + cpumask_clear_cpu(cpu, &cpu_coherent_mask); 161 + smp_mb__after_clear_bit(); 162 + 163 + /* Create a non-coherent mapping of the core ready_count */ 164 + core_ready_count = per_cpu(ready_count, core); 165 + nc_addr = kmap_noncoherent(virt_to_page(core_ready_count), 166 + (unsigned long)core_ready_count); 167 + nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK); 168 + nc_core_ready_count = nc_addr; 169 + 170 + /* Ensure ready_count is zero-initialised before the assembly runs */ 171 + ACCESS_ONCE(*nc_core_ready_count) = 0; 172 + coupled_barrier(&per_cpu(pm_barrier, core), online); 173 + 174 + /* Run the generated entry code */ 175 + left = entry(online, nc_core_ready_count); 176 + 177 + /* Remove the non-coherent mapping of ready_count */ 178 + kunmap_noncoherent(); 179 + 180 + /* Indicate that this CPU is definitely coherent */ 181 + cpumask_set_cpu(cpu, &cpu_coherent_mask); 182 + 183 + /* 184 + * If this VPE is the first to leave the non-coherent wait state then 185 + * it needs to wake up any coupled VPEs still running their wait 186 + * instruction so that they return to cpuidle, which can then complete 187 + * coordination between the coupled VPEs & provide the governor with 188 + * a chance to reflect on the length of time the VPEs were in the 189 + * idle state. 190 + */ 191 + if (coupled_coherence && (state == CPS_PM_NC_WAIT) && (left == online)) 192 + arch_send_call_function_ipi_mask(coupled_mask); 193 + 194 + return 0; 195 + } 196 + 197 + static void __init cps_gen_cache_routine(u32 **pp, struct uasm_label **pl, 198 + struct uasm_reloc **pr, 199 + const struct cache_desc *cache, 200 + unsigned op, int lbl) 201 + { 202 + unsigned cache_size = cache->ways << cache->waybit; 203 + unsigned i; 204 + const unsigned unroll_lines = 32; 205 + 206 + /* If the cache isn't present this function has it easy */ 207 + if (cache->flags & MIPS_CACHE_NOT_PRESENT) 208 + return; 209 + 210 + /* Load base address */ 211 + UASM_i_LA(pp, t0, (long)CKSEG0); 212 + 213 + /* Calculate end address */ 214 + if (cache_size < 0x8000) 215 + uasm_i_addiu(pp, t1, t0, cache_size); 216 + else 217 + UASM_i_LA(pp, t1, (long)(CKSEG0 + cache_size)); 218 + 219 + /* Start of cache op loop */ 220 + uasm_build_label(pl, *pp, lbl); 221 + 222 + /* Generate the cache ops */ 223 + for (i = 0; i < unroll_lines; i++) 224 + uasm_i_cache(pp, op, i * cache->linesz, t0); 225 + 226 + /* Update the base address */ 227 + uasm_i_addiu(pp, t0, t0, unroll_lines * cache->linesz); 228 + 229 + /* Loop if we haven't reached the end address yet */ 230 + uasm_il_bne(pp, pr, t0, t1, lbl); 231 + uasm_i_nop(pp); 232 + } 233 + 234 + static int __init cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl, 235 + struct uasm_reloc **pr, 236 + const struct cpuinfo_mips *cpu_info, 237 + int lbl) 238 + { 239 + unsigned i, fsb_size = 8; 240 + unsigned num_loads = (fsb_size * 3) / 2; 241 + unsigned line_stride = 2; 242 + unsigned line_size = cpu_info->dcache.linesz; 243 + unsigned perf_counter, perf_event; 244 + unsigned revision = cpu_info->processor_id & PRID_REV_MASK; 245 + 246 + /* 247 + * Determine whether this CPU requires an FSB flush, and if so which 248 + * performance counter/event reflect stalls due to a full FSB. 249 + */ 250 + switch (__get_cpu_type(cpu_info->cputype)) { 251 + case CPU_INTERAPTIV: 252 + perf_counter = 1; 253 + perf_event = 51; 254 + break; 255 + 256 + case CPU_PROAPTIV: 257 + /* Newer proAptiv cores don't require this workaround */ 258 + if (revision >= PRID_REV_ENCODE_332(1, 1, 0)) 259 + return 0; 260 + 261 + /* On older ones it's unavailable */ 262 + return -1; 263 + 264 + /* CPUs which do not require the workaround */ 265 + case CPU_P5600: 266 + return 0; 267 + 268 + default: 269 + WARN_ONCE(1, "pm-cps: FSB flush unsupported for this CPU\n"); 270 + return -1; 271 + } 272 + 273 + /* 274 + * Ensure that the fill/store buffer (FSB) is not holding the results 275 + * of a prefetch, since if it is then the CPC sequencer may become 276 + * stuck in the D3 (ClrBus) state whilst entering a low power state. 277 + */ 278 + 279 + /* Preserve perf counter setup */ 280 + uasm_i_mfc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ 281 + uasm_i_mfc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */ 282 + 283 + /* Setup perf counter to count FSB full pipeline stalls */ 284 + uasm_i_addiu(pp, t0, zero, (perf_event << 5) | 0xf); 285 + uasm_i_mtc0(pp, t0, 25, (perf_counter * 2) + 0); /* PerfCtlN */ 286 + uasm_i_ehb(pp); 287 + uasm_i_mtc0(pp, zero, 25, (perf_counter * 2) + 1); /* PerfCntN */ 288 + uasm_i_ehb(pp); 289 + 290 + /* Base address for loads */ 291 + UASM_i_LA(pp, t0, (long)CKSEG0); 292 + 293 + /* Start of clear loop */ 294 + uasm_build_label(pl, *pp, lbl); 295 + 296 + /* Perform some loads to fill the FSB */ 297 + for (i = 0; i < num_loads; i++) 298 + uasm_i_lw(pp, zero, i * line_size * line_stride, t0); 299 + 300 + /* 301 + * Invalidate the new D-cache entries so that the cache will need 302 + * refilling (via the FSB) if the loop is executed again. 303 + */ 304 + for (i = 0; i < num_loads; i++) { 305 + uasm_i_cache(pp, Hit_Invalidate_D, 306 + i * line_size * line_stride, t0); 307 + uasm_i_cache(pp, Hit_Writeback_Inv_SD, 308 + i * line_size * line_stride, t0); 309 + } 310 + 311 + /* Completion barrier */ 312 + uasm_i_sync(pp, stype_memory); 313 + uasm_i_ehb(pp); 314 + 315 + /* Check whether the pipeline stalled due to the FSB being full */ 316 + uasm_i_mfc0(pp, t1, 25, (perf_counter * 2) + 1); /* PerfCntN */ 317 + 318 + /* Loop if it didn't */ 319 + uasm_il_beqz(pp, pr, t1, lbl); 320 + uasm_i_nop(pp); 321 + 322 + /* Restore perf counter 1. The count may well now be wrong... */ 323 + uasm_i_mtc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ 324 + uasm_i_ehb(pp); 325 + uasm_i_mtc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */ 326 + uasm_i_ehb(pp); 327 + 328 + return 0; 329 + } 330 + 331 + static void __init cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl, 332 + struct uasm_reloc **pr, 333 + unsigned r_addr, int lbl) 334 + { 335 + uasm_i_lui(pp, t0, uasm_rel_hi(0x80000000)); 336 + uasm_build_label(pl, *pp, lbl); 337 + uasm_i_ll(pp, t1, 0, r_addr); 338 + uasm_i_or(pp, t1, t1, t0); 339 + uasm_i_sc(pp, t1, 0, r_addr); 340 + uasm_il_beqz(pp, pr, t1, lbl); 341 + uasm_i_nop(pp); 342 + } 343 + 344 + static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) 345 + { 346 + struct uasm_label *l = labels; 347 + struct uasm_reloc *r = relocs; 348 + u32 *buf, *p; 349 + const unsigned r_online = a0; 350 + const unsigned r_nc_count = a1; 351 + const unsigned r_pcohctl = t7; 352 + const unsigned max_instrs = 256; 353 + unsigned cpc_cmd; 354 + int err; 355 + enum { 356 + lbl_incready = 1, 357 + lbl_poll_cont, 358 + lbl_secondary_hang, 359 + lbl_disable_coherence, 360 + lbl_flush_fsb, 361 + lbl_invicache, 362 + lbl_flushdcache, 363 + lbl_hang, 364 + lbl_set_cont, 365 + lbl_secondary_cont, 366 + lbl_decready, 367 + }; 368 + 369 + /* Allocate a buffer to hold the generated code */ 370 + p = buf = kcalloc(max_instrs, sizeof(u32), GFP_KERNEL); 371 + if (!buf) 372 + return NULL; 373 + 374 + /* Clear labels & relocs ready for (re)use */ 375 + memset(labels, 0, sizeof(labels)); 376 + memset(relocs, 0, sizeof(relocs)); 377 + 378 + if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) { 379 + /* 380 + * Save CPU state. Note the non-standard calling convention 381 + * with the return address placed in v0 to avoid clobbering 382 + * the ra register before it is saved. 383 + */ 384 + UASM_i_LA(&p, t0, (long)mips_cps_pm_save); 385 + uasm_i_jalr(&p, v0, t0); 386 + uasm_i_nop(&p); 387 + } 388 + 389 + /* 390 + * Load addresses of required CM & CPC registers. This is done early 391 + * because they're needed in both the enable & disable coherence steps 392 + * but in the coupled case the enable step will only run on one VPE. 393 + */ 394 + UASM_i_LA(&p, r_pcohctl, (long)addr_gcr_cl_coherence()); 395 + 396 + if (coupled_coherence) { 397 + /* Increment ready_count */ 398 + uasm_i_sync(&p, stype_ordering); 399 + uasm_build_label(&l, p, lbl_incready); 400 + uasm_i_ll(&p, t1, 0, r_nc_count); 401 + uasm_i_addiu(&p, t2, t1, 1); 402 + uasm_i_sc(&p, t2, 0, r_nc_count); 403 + uasm_il_beqz(&p, &r, t2, lbl_incready); 404 + uasm_i_addiu(&p, t1, t1, 1); 405 + 406 + /* Ordering barrier */ 407 + uasm_i_sync(&p, stype_ordering); 408 + 409 + /* 410 + * If this is the last VPE to become ready for non-coherence 411 + * then it should branch below. 412 + */ 413 + uasm_il_beq(&p, &r, t1, r_online, lbl_disable_coherence); 414 + uasm_i_nop(&p); 415 + 416 + if (state < CPS_PM_POWER_GATED) { 417 + /* 418 + * Otherwise this is not the last VPE to become ready 419 + * for non-coherence. It needs to wait until coherence 420 + * has been disabled before proceeding, which it will do 421 + * by polling for the top bit of ready_count being set. 422 + */ 423 + uasm_i_addiu(&p, t1, zero, -1); 424 + uasm_build_label(&l, p, lbl_poll_cont); 425 + uasm_i_lw(&p, t0, 0, r_nc_count); 426 + uasm_il_bltz(&p, &r, t0, lbl_secondary_cont); 427 + uasm_i_ehb(&p); 428 + uasm_i_yield(&p, zero, t1); 429 + uasm_il_b(&p, &r, lbl_poll_cont); 430 + uasm_i_nop(&p); 431 + } else { 432 + /* 433 + * The core will lose power & this VPE will not continue 434 + * so it can simply halt here. 435 + */ 436 + uasm_i_addiu(&p, t0, zero, TCHALT_H); 437 + uasm_i_mtc0(&p, t0, 2, 4); 438 + uasm_build_label(&l, p, lbl_secondary_hang); 439 + uasm_il_b(&p, &r, lbl_secondary_hang); 440 + uasm_i_nop(&p); 441 + } 442 + } 443 + 444 + /* 445 + * This is the point of no return - this VPE will now proceed to 446 + * disable coherence. At this point we *must* be sure that no other 447 + * VPE within the core will interfere with the L1 dcache. 448 + */ 449 + uasm_build_label(&l, p, lbl_disable_coherence); 450 + 451 + /* Invalidate the L1 icache */ 452 + cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache, 453 + Index_Invalidate_I, lbl_invicache); 454 + 455 + /* Writeback & invalidate the L1 dcache */ 456 + cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache, 457 + Index_Writeback_Inv_D, lbl_flushdcache); 458 + 459 + /* Completion barrier */ 460 + uasm_i_sync(&p, stype_memory); 461 + uasm_i_ehb(&p); 462 + 463 + /* 464 + * Disable all but self interventions. The load from COHCTL is defined 465 + * by the interAptiv & proAptiv SUMs as ensuring that the operation 466 + * resulting from the preceeding store is complete. 467 + */ 468 + uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core); 469 + uasm_i_sw(&p, t0, 0, r_pcohctl); 470 + uasm_i_lw(&p, t0, 0, r_pcohctl); 471 + 472 + /* Sync to ensure previous interventions are complete */ 473 + uasm_i_sync(&p, stype_intervention); 474 + uasm_i_ehb(&p); 475 + 476 + /* Disable coherence */ 477 + uasm_i_sw(&p, zero, 0, r_pcohctl); 478 + uasm_i_lw(&p, t0, 0, r_pcohctl); 479 + 480 + if (state >= CPS_PM_CLOCK_GATED) { 481 + err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu], 482 + lbl_flush_fsb); 483 + if (err) 484 + goto out_err; 485 + 486 + /* Determine the CPC command to issue */ 487 + switch (state) { 488 + case CPS_PM_CLOCK_GATED: 489 + cpc_cmd = CPC_Cx_CMD_CLOCKOFF; 490 + break; 491 + case CPS_PM_POWER_GATED: 492 + cpc_cmd = CPC_Cx_CMD_PWRDOWN; 493 + break; 494 + default: 495 + BUG(); 496 + goto out_err; 497 + } 498 + 499 + /* Issue the CPC command */ 500 + UASM_i_LA(&p, t0, (long)addr_cpc_cl_cmd()); 501 + uasm_i_addiu(&p, t1, zero, cpc_cmd); 502 + uasm_i_sw(&p, t1, 0, t0); 503 + 504 + if (state == CPS_PM_POWER_GATED) { 505 + /* If anything goes wrong just hang */ 506 + uasm_build_label(&l, p, lbl_hang); 507 + uasm_il_b(&p, &r, lbl_hang); 508 + uasm_i_nop(&p); 509 + 510 + /* 511 + * There's no point generating more code, the core is 512 + * powered down & if powered back up will run from the 513 + * reset vector not from here. 514 + */ 515 + goto gen_done; 516 + } 517 + 518 + /* Completion barrier */ 519 + uasm_i_sync(&p, stype_memory); 520 + uasm_i_ehb(&p); 521 + } 522 + 523 + if (state == CPS_PM_NC_WAIT) { 524 + /* 525 + * At this point it is safe for all VPEs to proceed with 526 + * execution. This VPE will set the top bit of ready_count 527 + * to indicate to the other VPEs that they may continue. 528 + */ 529 + if (coupled_coherence) 530 + cps_gen_set_top_bit(&p, &l, &r, r_nc_count, 531 + lbl_set_cont); 532 + 533 + /* 534 + * VPEs which did not disable coherence will continue 535 + * executing, after coherence has been disabled, from this 536 + * point. 537 + */ 538 + uasm_build_label(&l, p, lbl_secondary_cont); 539 + 540 + /* Now perform our wait */ 541 + uasm_i_wait(&p, 0); 542 + } 543 + 544 + /* 545 + * Re-enable coherence. Note that for CPS_PM_NC_WAIT all coupled VPEs 546 + * will run this. The first will actually re-enable coherence & the 547 + * rest will just be performing a rather unusual nop. 548 + */ 549 + uasm_i_addiu(&p, t0, zero, CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK); 550 + uasm_i_sw(&p, t0, 0, r_pcohctl); 551 + uasm_i_lw(&p, t0, 0, r_pcohctl); 552 + 553 + /* Completion barrier */ 554 + uasm_i_sync(&p, stype_memory); 555 + uasm_i_ehb(&p); 556 + 557 + if (coupled_coherence && (state == CPS_PM_NC_WAIT)) { 558 + /* Decrement ready_count */ 559 + uasm_build_label(&l, p, lbl_decready); 560 + uasm_i_sync(&p, stype_ordering); 561 + uasm_i_ll(&p, t1, 0, r_nc_count); 562 + uasm_i_addiu(&p, t2, t1, -1); 563 + uasm_i_sc(&p, t2, 0, r_nc_count); 564 + uasm_il_beqz(&p, &r, t2, lbl_decready); 565 + uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1); 566 + 567 + /* Ordering barrier */ 568 + uasm_i_sync(&p, stype_ordering); 569 + } 570 + 571 + if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) { 572 + /* 573 + * At this point it is safe for all VPEs to proceed with 574 + * execution. This VPE will set the top bit of ready_count 575 + * to indicate to the other VPEs that they may continue. 576 + */ 577 + cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont); 578 + 579 + /* 580 + * This core will be reliant upon another core sending a 581 + * power-up command to the CPC in order to resume operation. 582 + * Thus an arbitrary VPE can't trigger the core leaving the 583 + * idle state and the one that disables coherence might as well 584 + * be the one to re-enable it. The rest will continue from here 585 + * after that has been done. 586 + */ 587 + uasm_build_label(&l, p, lbl_secondary_cont); 588 + 589 + /* Ordering barrier */ 590 + uasm_i_sync(&p, stype_ordering); 591 + } 592 + 593 + /* The core is coherent, time to return to C code */ 594 + uasm_i_jr(&p, ra); 595 + uasm_i_nop(&p); 596 + 597 + gen_done: 598 + /* Ensure the code didn't exceed the resources allocated for it */ 599 + BUG_ON((p - buf) > max_instrs); 600 + BUG_ON((l - labels) > ARRAY_SIZE(labels)); 601 + BUG_ON((r - relocs) > ARRAY_SIZE(relocs)); 602 + 603 + /* Patch branch offsets */ 604 + uasm_resolve_relocs(relocs, labels); 605 + 606 + /* Flush the icache */ 607 + local_flush_icache_range((unsigned long)buf, (unsigned long)p); 608 + 609 + return buf; 610 + out_err: 611 + kfree(buf); 612 + return NULL; 613 + } 614 + 615 + static int __init cps_gen_core_entries(unsigned cpu) 616 + { 617 + enum cps_pm_state state; 618 + unsigned core = cpu_data[cpu].core; 619 + unsigned dlinesz = cpu_data[cpu].dcache.linesz; 620 + void *entry_fn, *core_rc; 621 + 622 + for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) { 623 + if (per_cpu(nc_asm_enter, core)[state]) 624 + continue; 625 + if (!test_bit(state, state_support)) 626 + continue; 627 + 628 + entry_fn = cps_gen_entry_code(cpu, state); 629 + if (!entry_fn) { 630 + pr_err("Failed to generate core %u state %u entry\n", 631 + core, state); 632 + clear_bit(state, state_support); 633 + } 634 + 635 + per_cpu(nc_asm_enter, core)[state] = entry_fn; 636 + } 637 + 638 + if (!per_cpu(ready_count, core)) { 639 + core_rc = kmalloc(dlinesz * 2, GFP_KERNEL); 640 + if (!core_rc) { 641 + pr_err("Failed allocate core %u ready_count\n", core); 642 + return -ENOMEM; 643 + } 644 + per_cpu(ready_count_alloc, core) = core_rc; 645 + 646 + /* Ensure ready_count is aligned to a cacheline boundary */ 647 + core_rc += dlinesz - 1; 648 + core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1)); 649 + per_cpu(ready_count, core) = core_rc; 650 + } 651 + 652 + return 0; 653 + } 654 + 655 + static int __init cps_pm_init(void) 656 + { 657 + unsigned cpu; 658 + int err; 659 + 660 + /* Detect appropriate sync types for the system */ 661 + switch (current_cpu_data.cputype) { 662 + case CPU_INTERAPTIV: 663 + case CPU_PROAPTIV: 664 + case CPU_M5150: 665 + case CPU_P5600: 666 + stype_intervention = 0x2; 667 + stype_memory = 0x3; 668 + stype_ordering = 0x10; 669 + break; 670 + 671 + default: 672 + pr_warn("Power management is using heavyweight sync 0\n"); 673 + } 674 + 675 + /* A CM is required for all non-coherent states */ 676 + if (!mips_cm_present()) { 677 + pr_warn("pm-cps: no CM, non-coherent states unavailable\n"); 678 + goto out; 679 + } 680 + 681 + /* 682 + * If interrupts were enabled whilst running a wait instruction on a 683 + * non-coherent core then the VPE may end up processing interrupts 684 + * whilst non-coherent. That would be bad. 685 + */ 686 + if (cpu_wait == r4k_wait_irqoff) 687 + set_bit(CPS_PM_NC_WAIT, state_support); 688 + else 689 + pr_warn("pm-cps: non-coherent wait unavailable\n"); 690 + 691 + /* Detect whether a CPC is present */ 692 + if (mips_cpc_present()) { 693 + /* Detect whether clock gating is implemented */ 694 + if (read_cpc_cl_stat_conf() & CPC_Cx_STAT_CONF_CLKGAT_IMPL_MSK) 695 + set_bit(CPS_PM_CLOCK_GATED, state_support); 696 + else 697 + pr_warn("pm-cps: CPC does not support clock gating\n"); 698 + 699 + /* Power gating is available with CPS SMP & any CPC */ 700 + if (mips_cps_smp_in_use()) 701 + set_bit(CPS_PM_POWER_GATED, state_support); 702 + else 703 + pr_warn("pm-cps: CPS SMP not in use, power gating unavailable\n"); 704 + } else { 705 + pr_warn("pm-cps: no CPC, clock & power gating unavailable\n"); 706 + } 707 + 708 + for_each_present_cpu(cpu) { 709 + err = cps_gen_core_entries(cpu); 710 + if (err) 711 + return err; 712 + } 713 + out: 714 + return 0; 715 + } 716 + arch_initcall(cps_pm_init);