Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

MIPS: Allows relocation exception vectors everywhere

Now the exception vector for CPS systems are allocated on-fly
with memblock as well.

It will try to allocate from KSEG1 first, and then try to allocate
in low 4G if possible.

The main reset vector is now generated by uasm, to avoid tons
of patches to the code. Other vectors are copied to the location
later.

move 64bits fix in an other patch
fix cache issue with mips_cps_core_entry
rewrite the patch to reduce the diff stat
move extern in header
use cache address for copying vector

gc: use the new macro CKSEG[0A1]DDR_OR_64BIT()
Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
Signed-off-by: Gregory CLEMENT <gregory.clement@bootlin.com>
Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>

authored by

Jiaxun Yang and committed by
Thomas Bogendoerfer
5e9d13bd 3391b95c

+133 -66
+1
arch/mips/include/asm/mips-cm.h
··· 311 311 /* GCR_Cx_RESET_BASE - Configure where powered up cores will fetch from */ 312 312 GCR_CX_ACCESSOR_RW(32, 0x020, reset_base) 313 313 #define CM_GCR_Cx_RESET_BASE_BEVEXCBASE GENMASK(31, 12) 314 + #define CM_GCR_Cx_RESET_BASE_MODE BIT(1) 314 315 315 316 /* GCR_Cx_ID - Identify the current core */ 316 317 GCR_CX_ACCESSOR_RO(32, 0x028, id)
+7 -2
arch/mips/include/asm/smp-cps.h
··· 24 24 25 25 extern struct core_boot_config *mips_cps_core_bootcfg; 26 26 27 - extern void mips_cps_core_entry(void); 27 + extern void mips_cps_core_boot(int cca, void __iomem *gcr_base); 28 28 extern void mips_cps_core_init(void); 29 29 30 30 extern void mips_cps_boot_vpes(struct core_boot_config *cfg, unsigned vpe); ··· 32 32 extern void mips_cps_pm_save(void); 33 33 extern void mips_cps_pm_restore(void); 34 34 35 - extern void *mips_cps_core_entry_patch_end; 35 + extern void excep_tlbfill(void); 36 + extern void excep_xtlbfill(void); 37 + extern void excep_cache(void); 38 + extern void excep_genex(void); 39 + extern void excep_intex(void); 40 + extern void excep_ejtag(void); 36 41 37 42 #ifdef CONFIG_MIPS_CPS 38 43
+8 -40
arch/mips/kernel/cps-vec.S
··· 4 4 * Author: Paul Burton <paul.burton@mips.com> 5 5 */ 6 6 7 + #include <linux/init.h> 7 8 #include <asm/addrspace.h> 8 9 #include <asm/asm.h> 9 10 #include <asm/asm-offsets.h> ··· 83 82 .endm 84 83 85 84 86 - .balign 0x1000 87 - 88 - LEAF(mips_cps_core_entry) 89 - /* 90 - * These first several instructions will be patched by cps_smp_setup to load the 91 - * CCA to use into register s0 and GCR base address to register s1. 92 - */ 93 - .rept CPS_ENTRY_PATCH_INSNS 94 - nop 95 - .endr 96 - 97 - .global mips_cps_core_entry_patch_end 98 - mips_cps_core_entry_patch_end: 99 - 100 - /* Check whether we're here due to an NMI */ 101 - mfc0 k0, CP0_STATUS 102 - and k0, k0, ST0_NMI 103 - beqz k0, not_nmi 104 - nop 105 - 106 - /* This is an NMI */ 107 - PTR_LA k0, nmi_handler 108 - jr k0 109 - nop 110 - 111 - not_nmi: 112 - /* Setup Cause */ 113 - li t0, CAUSEF_IV 114 - mtc0 t0, CP0_CAUSE 115 - 116 - /* Setup Status */ 117 - li t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS 118 - mtc0 t0, CP0_STATUS 85 + LEAF(mips_cps_core_boot) 86 + /* Save CCA and GCR base */ 87 + move s0, a0 88 + move s1, a1 119 89 120 90 /* We don't know how to do coherence setup on earlier ISA */ 121 91 #if MIPS_ISA_REV > 0 ··· 150 178 PTR_L sp, VPEBOOTCFG_SP(v1) 151 179 jr t1 152 180 nop 153 - END(mips_cps_core_entry) 181 + END(mips_cps_core_boot) 154 182 155 - .org 0x200 183 + __INIT 156 184 LEAF(excep_tlbfill) 157 185 DUMP_EXCEP("TLB Fill") 158 186 b . 159 187 nop 160 188 END(excep_tlbfill) 161 189 162 - .org 0x280 163 190 LEAF(excep_xtlbfill) 164 191 DUMP_EXCEP("XTLB Fill") 165 192 b . 166 193 nop 167 194 END(excep_xtlbfill) 168 195 169 - .org 0x300 170 196 LEAF(excep_cache) 171 197 DUMP_EXCEP("Cache") 172 198 b . 173 199 nop 174 200 END(excep_cache) 175 201 176 - .org 0x380 177 202 LEAF(excep_genex) 178 203 DUMP_EXCEP("General") 179 204 b . 180 205 nop 181 206 END(excep_genex) 182 207 183 - .org 0x400 184 208 LEAF(excep_intex) 185 209 DUMP_EXCEP("Interrupt") 186 210 b . 187 211 nop 188 212 END(excep_intex) 189 213 190 - .org 0x480 191 214 LEAF(excep_ejtag) 192 215 PTR_LA k0, ejtag_debug_handler 193 216 jr k0 194 217 nop 195 218 END(excep_ejtag) 219 + __FINIT 196 220 197 221 LEAF(mips_cps_core_init) 198 222 #ifdef CONFIG_MIPS_MT_SMP
+117 -24
arch/mips/kernel/smp-cps.c
··· 7 7 #include <linux/cpu.h> 8 8 #include <linux/delay.h> 9 9 #include <linux/io.h> 10 + #include <linux/memblock.h> 10 11 #include <linux/sched/task_stack.h> 11 12 #include <linux/sched/hotplug.h> 12 13 #include <linux/slab.h> ··· 21 20 #include <asm/mipsregs.h> 22 21 #include <asm/pm-cps.h> 23 22 #include <asm/r4kcache.h> 23 + #include <asm/regdef.h> 24 24 #include <asm/smp.h> 25 25 #include <asm/smp-cps.h> 26 26 #include <asm/time.h> 27 27 #include <asm/uasm.h> 28 28 29 + #define BEV_VEC_SIZE 0x500 30 + #define BEV_VEC_ALIGN 0x1000 31 + 32 + enum label_id { 33 + label_not_nmi = 1, 34 + }; 35 + 36 + UASM_L_LA(_not_nmi) 37 + 29 38 static DECLARE_BITMAP(core_power, NR_CPUS); 39 + static uint32_t core_entry_reg; 40 + static phys_addr_t cps_vec_pa; 30 41 31 42 struct core_boot_config *mips_cps_core_bootcfg; 32 43 ··· 47 34 return min(smp_max_threads, mips_cps_numvps(cluster, core)); 48 35 } 49 36 37 + static void __init *mips_cps_build_core_entry(void *addr) 38 + { 39 + extern void (*nmi_handler)(void); 40 + u32 *p = addr; 41 + u32 val; 42 + struct uasm_label labels[2]; 43 + struct uasm_reloc relocs[2]; 44 + struct uasm_label *l = labels; 45 + struct uasm_reloc *r = relocs; 46 + 47 + memset(labels, 0, sizeof(labels)); 48 + memset(relocs, 0, sizeof(relocs)); 49 + 50 + uasm_i_mfc0(&p, GPR_K0, C0_STATUS); 51 + UASM_i_LA(&p, GPR_T9, ST0_NMI); 52 + uasm_i_and(&p, GPR_K0, GPR_K0, GPR_T9); 53 + 54 + uasm_il_bnez(&p, &r, GPR_K0, label_not_nmi); 55 + uasm_i_nop(&p); 56 + UASM_i_LA(&p, GPR_K0, (long)&nmi_handler); 57 + 58 + uasm_l_not_nmi(&l, p); 59 + 60 + val = CAUSEF_IV; 61 + uasm_i_lui(&p, GPR_K0, val >> 16); 62 + uasm_i_ori(&p, GPR_K0, GPR_K0, val & 0xffff); 63 + uasm_i_mtc0(&p, GPR_K0, C0_CAUSE); 64 + val = ST0_CU1 | ST0_CU0 | ST0_BEV | ST0_KX_IF_64; 65 + uasm_i_lui(&p, GPR_K0, val >> 16); 66 + uasm_i_ori(&p, GPR_K0, GPR_K0, val & 0xffff); 67 + uasm_i_mtc0(&p, GPR_K0, C0_STATUS); 68 + uasm_i_ehb(&p); 69 + uasm_i_ori(&p, GPR_A0, 0, read_c0_config() & CONF_CM_CMASK); 70 + UASM_i_LA(&p, GPR_A1, (long)mips_gcr_base); 71 + #if defined(KBUILD_64BIT_SYM32) || defined(CONFIG_32BIT) 72 + UASM_i_LA(&p, GPR_T9, CKSEG1ADDR(__pa_symbol(mips_cps_core_boot))); 73 + #else 74 + UASM_i_LA(&p, GPR_T9, TO_UNCAC(__pa_symbol(mips_cps_core_boot))); 75 + #endif 76 + uasm_i_jr(&p, GPR_T9); 77 + uasm_i_nop(&p); 78 + 79 + uasm_resolve_relocs(relocs, labels); 80 + 81 + return p; 82 + } 83 + 84 + static int __init allocate_cps_vecs(void) 85 + { 86 + /* Try to allocate in KSEG1 first */ 87 + cps_vec_pa = memblock_phys_alloc_range(BEV_VEC_SIZE, BEV_VEC_ALIGN, 88 + 0x0, CSEGX_SIZE - 1); 89 + 90 + if (cps_vec_pa) 91 + core_entry_reg = CKSEG1ADDR(cps_vec_pa) & 92 + CM_GCR_Cx_RESET_BASE_BEVEXCBASE; 93 + 94 + if (!cps_vec_pa && mips_cm_is64) { 95 + cps_vec_pa = memblock_phys_alloc_range(BEV_VEC_SIZE, BEV_VEC_ALIGN, 96 + 0x0, SZ_4G - 1); 97 + if (cps_vec_pa) 98 + core_entry_reg = (cps_vec_pa & CM_GCR_Cx_RESET_BASE_BEVEXCBASE) | 99 + CM_GCR_Cx_RESET_BASE_MODE; 100 + } 101 + 102 + if (!cps_vec_pa) 103 + return -ENOMEM; 104 + 105 + return 0; 106 + } 107 + 108 + static void __init setup_cps_vecs(void) 109 + { 110 + void *cps_vec; 111 + 112 + cps_vec = (void *)CKSEG1ADDR_OR_64BIT(cps_vec_pa); 113 + mips_cps_build_core_entry(cps_vec); 114 + 115 + memcpy(cps_vec + 0x200, &excep_tlbfill, 0x80); 116 + memcpy(cps_vec + 0x280, &excep_xtlbfill, 0x80); 117 + memcpy(cps_vec + 0x300, &excep_cache, 0x80); 118 + memcpy(cps_vec + 0x380, &excep_genex, 0x80); 119 + memcpy(cps_vec + 0x400, &excep_intex, 0x80); 120 + memcpy(cps_vec + 0x480, &excep_ejtag, 0x80); 121 + 122 + /* Make sure no prefetched data in cache */ 123 + blast_inv_dcache_range(CKSEG0ADDR_OR_64BIT(cps_vec_pa), CKSEG0ADDR_OR_64BIT(cps_vec_pa) + BEV_VEC_SIZE); 124 + bc_inv(CKSEG0ADDR_OR_64BIT(cps_vec_pa), BEV_VEC_SIZE); 125 + __sync(); 126 + } 127 + 50 128 static void __init cps_smp_setup(void) 51 129 { 52 130 unsigned int nclusters, ncores, nvpes, core_vpes; 53 - unsigned long core_entry; 54 131 int cl, c, v; 55 132 56 133 /* Detect & record VPE topology */ ··· 197 94 /* Make core 0 coherent with everything */ 198 95 write_gcr_cl_coherence(0xff); 199 96 200 - if (mips_cm_revision() >= CM_REV_CM3) { 201 - core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry); 202 - write_gcr_bev_base(core_entry); 203 - } 97 + if (allocate_cps_vecs()) 98 + pr_err("Failed to allocate CPS vectors\n"); 99 + 100 + if (core_entry_reg && mips_cm_revision() >= CM_REV_CM3) 101 + write_gcr_bev_base(core_entry_reg); 204 102 205 103 #ifdef CONFIG_MIPS_MT_FPAFF 206 104 /* If we have an FPU, enroll ourselves in the FPU-full mask */ ··· 214 110 { 215 111 unsigned ncores, core_vpes, c, cca; 216 112 bool cca_unsuitable, cores_limited; 217 - u32 *entry_code; 218 113 219 114 mips_mt_set_cpuoptions(); 115 + 116 + if (!core_entry_reg) { 117 + pr_err("core_entry address unsuitable, disabling smp-cps\n"); 118 + goto err_out; 119 + } 220 120 221 121 /* Detect whether the CCA is unsuited to multi-core SMP */ 222 122 cca = read_c0_config() & CONF_CM_CMASK; ··· 253 145 (cca_unsuitable && cpu_has_dc_aliases) ? " & " : "", 254 146 cpu_has_dc_aliases ? "dcache aliasing" : ""); 255 147 256 - /* 257 - * Patch the start of mips_cps_core_entry to provide: 258 - * 259 - * s0 = kseg0 CCA 260 - */ 261 - entry_code = (u32 *)&mips_cps_core_entry; 262 - uasm_i_addiu(&entry_code, 16, 0, cca); 263 - UASM_i_LA(&entry_code, 17, (long)mips_gcr_base); 264 - BUG_ON((void *)entry_code > (void *)&mips_cps_core_entry_patch_end); 265 - blast_dcache_range((unsigned long)&mips_cps_core_entry, 266 - (unsigned long)entry_code); 267 - bc_wback_inv((unsigned long)&mips_cps_core_entry, 268 - (void *)entry_code - (void *)&mips_cps_core_entry); 269 - __sync(); 148 + setup_cps_vecs(); 270 149 271 150 /* Allocate core boot configuration structs */ 272 151 ncores = mips_cps_numcores(0); ··· 308 213 mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); 309 214 310 215 /* Set its reset vector */ 311 - write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry)); 216 + write_gcr_co_reset_base(core_entry_reg); 312 217 313 218 /* Ensure its coherency is disabled */ 314 219 write_gcr_co_coherence(0); ··· 385 290 unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); 386 291 struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core]; 387 292 struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id]; 388 - unsigned long core_entry; 389 293 unsigned int remote; 390 294 int err; 391 295 ··· 408 314 409 315 if (cpu_has_vp) { 410 316 mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL); 411 - core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry); 412 - write_gcr_co_reset_base(core_entry); 317 + write_gcr_co_reset_base(core_entry_reg); 413 318 mips_cm_unlock_other(); 414 319 } 415 320