Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

MIPS: Netlogic: Support for multi-chip configuration

Upto 4 Netlogic XLP SoCs can be connected over ICI links to form a
coherent multi-node system. Each SoC has its own set of on-chip
devices including PIC. To support this, add a per SoC stucture and
use it for the PIC and SYS block addresses instead of using global
variables.

Signed-off-by: Jayachandran C <jchandra@broadcom.com>
Patchwork: http://patchwork.linux-mips.org/patch/4469
Signed-off-by: John Crispin <blogic@openwrt.org>

authored by

Jayachandran C and committed by
John Crispin
77ae798f 2a37b1ae

+176 -86
+37 -5
arch/mips/include/asm/netlogic/common.h
··· 46 46 47 47 #ifndef __ASSEMBLY__ 48 48 #include <linux/cpumask.h> 49 + #include <linux/spinlock.h> 50 + #include <asm/irq.h> 49 51 50 52 struct irq_desc; 51 - extern struct plat_smp_ops nlm_smp_ops; 52 - extern char nlm_reset_entry[], nlm_reset_entry_end[]; 53 53 void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc); 54 54 void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc); 55 55 void nlm_smp_irq_init(void); ··· 70 70 * Misc. 71 71 */ 72 72 unsigned int nlm_get_cpu_frequency(void); 73 + void nlm_node_init(int node); 74 + extern struct plat_smp_ops nlm_smp_ops; 75 + extern char nlm_reset_entry[], nlm_reset_entry_end[]; 73 76 74 - extern unsigned long nlm_common_ebase; 75 - extern int nlm_threads_per_core; 76 - extern uint32_t nlm_coremask; 77 + extern unsigned int nlm_threads_per_core; 77 78 extern cpumask_t nlm_cpumask; 79 + 80 + struct nlm_soc_info { 81 + unsigned long coremask; /* cores enabled on the soc */ 82 + unsigned long ebase; 83 + uint64_t irqmask; 84 + uint64_t sysbase; /* only for XLP */ 85 + uint64_t picbase; 86 + spinlock_t piclock; 87 + }; 88 + 89 + #define NLM_CORES_PER_NODE 8 90 + #define NLM_THREADS_PER_CORE 4 91 + #define NLM_CPUS_PER_NODE (NLM_CORES_PER_NODE * NLM_THREADS_PER_CORE) 92 + #define nlm_get_node(i) (&nlm_nodes[i]) 93 + #define NLM_NR_NODES 1 94 + #define nlm_current_node() (&nlm_nodes[0]) 95 + 96 + struct irq_data; 97 + uint64_t nlm_pci_irqmask(int node); 98 + void nlm_set_pic_extra_ack(int node, int irq, void (*xack)(struct irq_data *)); 99 + 100 + /* 101 + * The NR_IRQs is divided between nodes, each of them has a separate irq space 102 + */ 103 + static inline int nlm_irq_to_xirq(int node, int irq) 104 + { 105 + return node * NR_IRQS / NLM_NR_NODES + irq; 106 + } 107 + 108 + extern struct nlm_soc_info nlm_nodes[NLM_NR_NODES]; 109 + extern int nlm_cpu_ready[]; 78 110 #endif 79 111 #endif /* _NETLOGIC_COMMON_H_ */
+5
arch/mips/include/asm/netlogic/mips-extns.h
··· 73 73 return __read_32bit_c0_register($15, 1) & 0x3ff; 74 74 } 75 75 76 + static inline int nlm_nodeid(void) 77 + { 78 + return (__read_32bit_c0_register($15, 1) >> 5) & 0x3; 79 + } 80 + 76 81 #endif /*_ASM_NLM_MIPS_EXTS_H */
-1
arch/mips/include/asm/netlogic/xlp-hal/pic.h
··· 381 381 nlm_pic_write_irt_direct(base, irt, 0, 0, 0, irq, hwt); 382 382 } 383 383 384 - extern uint64_t nlm_pic_base; 385 384 int nlm_irq_to_irt(int irq); 386 385 int nlm_irt_to_irq(int irt); 387 386
-1
arch/mips/include/asm/netlogic/xlp-hal/sys.h
··· 124 124 #define nlm_get_sys_pcibase(node) nlm_pcicfg_base(XLP_IO_SYS_OFFSET(node)) 125 125 #define nlm_get_sys_regbase(node) (nlm_get_sys_pcibase(node) + XLP_IO_PCI_HDRSZ) 126 126 127 - extern uint64_t nlm_sys_base; 128 127 #endif 129 128 #endif
-2
arch/mips/include/asm/netlogic/xlr/pic.h
··· 258 258 nlm_write_reg(base, PIC_IRT_1(irt), 259 259 (1 << 30) | (1 << 6) | irq); 260 260 } 261 - 262 - extern uint64_t nlm_pic_base; 263 261 #endif 264 262 #endif /* _ASM_NLM_XLR_PIC_H */
+36 -19
arch/mips/netlogic/common/irq.c
··· 70 70 */ 71 71 72 72 /* Globals */ 73 - static uint64_t nlm_irq_mask; 74 - static DEFINE_SPINLOCK(nlm_pic_lock); 75 - 76 73 static void xlp_pic_enable(struct irq_data *d) 77 74 { 78 75 unsigned long flags; 76 + struct nlm_soc_info *nodep; 79 77 int irt; 80 78 79 + nodep = nlm_current_node(); 81 80 irt = nlm_irq_to_irt(d->irq); 82 81 if (irt == -1) 83 82 return; 84 - spin_lock_irqsave(&nlm_pic_lock, flags); 85 - nlm_pic_enable_irt(nlm_pic_base, irt); 86 - spin_unlock_irqrestore(&nlm_pic_lock, flags); 83 + spin_lock_irqsave(&nodep->piclock, flags); 84 + nlm_pic_enable_irt(nodep->picbase, irt); 85 + spin_unlock_irqrestore(&nodep->piclock, flags); 87 86 } 88 87 89 88 static void xlp_pic_disable(struct irq_data *d) 90 89 { 90 + struct nlm_soc_info *nodep; 91 91 unsigned long flags; 92 92 int irt; 93 93 94 + nodep = nlm_current_node(); 94 95 irt = nlm_irq_to_irt(d->irq); 95 96 if (irt == -1) 96 97 return; 97 - spin_lock_irqsave(&nlm_pic_lock, flags); 98 - nlm_pic_disable_irt(nlm_pic_base, irt); 99 - spin_unlock_irqrestore(&nlm_pic_lock, flags); 98 + spin_lock_irqsave(&nodep->piclock, flags); 99 + nlm_pic_disable_irt(nodep->picbase, irt); 100 + spin_unlock_irqrestore(&nodep->piclock, flags); 100 101 } 101 102 102 103 static void xlp_pic_mask_ack(struct irq_data *d) ··· 110 109 static void xlp_pic_unmask(struct irq_data *d) 111 110 { 112 111 void *hd = irq_data_get_irq_handler_data(d); 112 + struct nlm_soc_info *nodep; 113 113 int irt; 114 114 115 + nodep = nlm_current_node(); 115 116 irt = nlm_irq_to_irt(d->irq); 116 117 if (irt == -1) 117 118 return; ··· 123 120 extra_ack(d); 124 121 } 125 122 /* Ack is a single write, no need to lock */ 126 - nlm_pic_ack(nlm_pic_base, irt); 123 + nlm_pic_ack(nodep->picbase, irt); 127 124 } 128 125 129 126 static struct irq_chip xlp_pic = { ··· 180 177 void __init init_nlm_common_irqs(void) 181 178 { 182 179 int i, irq, irt; 180 + uint64_t irqmask; 181 + struct nlm_soc_info *nodep; 183 182 183 + nodep = nlm_current_node(); 184 + irqmask = (1ULL << IRQ_TIMER); 184 185 for (i = 0; i < PIC_IRT_FIRST_IRQ; i++) 185 186 irq_set_chip_and_handler(i, &nlm_cpu_intr, handle_percpu_irq); 186 187 ··· 196 189 nlm_smp_function_ipi_handler); 197 190 irq_set_chip_and_handler(IRQ_IPI_SMP_RESCHEDULE, &nlm_cpu_intr, 198 191 nlm_smp_resched_ipi_handler); 199 - nlm_irq_mask |= 192 + irqmask |= 200 193 ((1ULL << IRQ_IPI_SMP_FUNCTION) | (1ULL << IRQ_IPI_SMP_RESCHEDULE)); 201 194 #endif 202 195 ··· 204 197 irt = nlm_irq_to_irt(irq); 205 198 if (irt == -1) 206 199 continue; 207 - nlm_irq_mask |= (1ULL << irq); 208 - nlm_pic_init_irt(nlm_pic_base, irt, irq, 0); 200 + irqmask |= (1ULL << irq); 201 + nlm_pic_init_irt(nodep->picbase, irt, irq, 0); 209 202 } 210 203 211 - nlm_irq_mask |= (1ULL << IRQ_TIMER); 204 + nodep->irqmask = irqmask; 212 205 } 213 206 214 207 void __init arch_init_irq(void) ··· 216 209 /* Initialize the irq descriptors */ 217 210 init_nlm_common_irqs(); 218 211 219 - write_c0_eimr(nlm_irq_mask); 212 + write_c0_eimr(nlm_current_node()->irqmask); 220 213 } 221 214 222 215 void __cpuinit nlm_smp_irq_init(void) 223 216 { 224 217 /* set interrupt mask for non-zero cpus */ 225 - write_c0_eimr(nlm_irq_mask); 218 + write_c0_eimr(nlm_current_node()->irqmask); 226 219 } 227 220 228 221 asmlinkage void plat_irq_dispatch(void) 229 222 { 230 223 uint64_t eirr; 231 - int i; 224 + int i, node; 232 225 226 + node = nlm_nodeid(); 233 227 eirr = read_c0_eirr() & read_c0_eimr(); 234 228 if (eirr & (1 << IRQ_TIMER)) { 235 229 do_IRQ(IRQ_TIMER); 236 230 return; 237 231 } 238 - 232 + #ifdef CONFIG_SMP 233 + if (eirr & IRQ_IPI_SMP_FUNCTION) { 234 + do_IRQ(IRQ_IPI_SMP_FUNCTION); 235 + return; 236 + } 237 + if (eirr & IRQ_IPI_SMP_RESCHEDULE) { 238 + do_IRQ(IRQ_IPI_SMP_RESCHEDULE); 239 + return; 240 + } 241 + #endif 239 242 i = __ilog2_u64(eirr); 240 243 if (i == -1) 241 244 return; 242 245 243 - do_IRQ(i); 246 + do_IRQ(nlm_irq_to_xirq(node, i)); 244 247 }
+29 -18
arch/mips/netlogic/common/smp.c
··· 59 59 60 60 void nlm_send_ipi_single(int logical_cpu, unsigned int action) 61 61 { 62 - int cpu = cpu_logical_map(logical_cpu); 62 + int cpu, node; 63 + uint64_t picbase; 64 + 65 + cpu = cpu_logical_map(logical_cpu); 66 + node = cpu / NLM_CPUS_PER_NODE; 67 + picbase = nlm_get_node(node)->picbase; 63 68 64 69 if (action & SMP_CALL_FUNCTION) 65 - nlm_pic_send_ipi(nlm_pic_base, cpu, IRQ_IPI_SMP_FUNCTION, 0); 70 + nlm_pic_send_ipi(picbase, cpu, IRQ_IPI_SMP_FUNCTION, 0); 66 71 if (action & SMP_RESCHEDULE_YOURSELF) 67 - nlm_pic_send_ipi(nlm_pic_base, cpu, IRQ_IPI_SMP_RESCHEDULE, 0); 72 + nlm_pic_send_ipi(picbase, cpu, IRQ_IPI_SMP_RESCHEDULE, 0); 68 73 } 69 74 70 75 void nlm_send_ipi_mask(const struct cpumask *mask, unsigned int action) ··· 101 96 void nlm_early_init_secondary(int cpu) 102 97 { 103 98 change_c0_config(CONF_CM_CMASK, 0x3); 104 - write_c0_ebase((uint32_t)nlm_common_ebase); 105 99 #ifdef CONFIG_CPU_XLP 106 - if (cpu % 4 == 0) 100 + /* mmu init, once per core */ 101 + if (cpu % NLM_THREADS_PER_CORE == 0) 107 102 xlp_mmu_init(); 108 103 #endif 104 + write_c0_ebase(nlm_current_node()->ebase); 109 105 } 110 106 111 107 /* ··· 114 108 */ 115 109 static void __cpuinit nlm_init_secondary(void) 116 110 { 117 - current_cpu_data.core = hard_smp_processor_id() / 4; 111 + current_cpu_data.core = hard_smp_processor_id() / NLM_THREADS_PER_CORE; 118 112 nlm_smp_irq_init(); 119 113 } 120 114 ··· 148 142 149 143 void nlm_boot_secondary(int logical_cpu, struct task_struct *idle) 150 144 { 151 - unsigned long gp = (unsigned long)task_thread_info(idle); 152 - unsigned long sp = (unsigned long)__KSTK_TOS(idle); 153 - int cpu = cpu_logical_map(logical_cpu); 145 + int cpu, node; 154 146 155 - nlm_next_sp = sp; 156 - nlm_next_gp = gp; 147 + cpu = cpu_logical_map(logical_cpu); 148 + node = cpu / NLM_CPUS_PER_NODE; 149 + nlm_next_sp = (unsigned long)__KSTK_TOS(idle); 150 + nlm_next_gp = (unsigned long)task_thread_info(idle); 157 151 158 - /* barrier */ 152 + /* barrier for sp/gp store above */ 159 153 __sync(); 160 - nlm_pic_send_ipi(nlm_pic_base, cpu, 1, 1); 154 + nlm_pic_send_ipi(nlm_get_node(node)->picbase, cpu, 1, 1); /* NMI */ 161 155 } 162 156 163 157 void __init nlm_smp_setup(void) 164 158 { 165 159 unsigned int boot_cpu; 166 - int num_cpus, i; 160 + int num_cpus, i, ncore; 167 161 168 162 boot_cpu = hard_smp_processor_id(); 169 163 cpumask_clear(&phys_cpu_present_map); ··· 188 182 } 189 183 } 190 184 185 + /* check with the cores we have worken up */ 186 + for (ncore = 0, i = 0; i < NLM_NR_NODES; i++) 187 + ncore += hweight32(nlm_get_node(i)->coremask); 188 + 191 189 pr_info("Phys CPU present map: %lx, possible map %lx\n", 192 190 (unsigned long)cpumask_bits(&phys_cpu_present_map)[0], 193 191 (unsigned long)cpumask_bits(cpu_possible_mask)[0]); 194 192 195 - pr_info("Detected %i Slave CPU(s)\n", num_cpus); 193 + pr_info("Detected (%dc%dt) %d Slave CPU(s)\n", ncore, 194 + nlm_threads_per_core, num_cpus); 196 195 nlm_set_nmi_handler(nlm_boot_secondary_cpus); 197 196 } 198 197 ··· 207 196 int threadmode, i, j; 208 197 209 198 core0_thr_mask = 0; 210 - for (i = 0; i < 4; i++) 199 + for (i = 0; i < NLM_THREADS_PER_CORE; i++) 211 200 if (cpumask_test_cpu(i, wakeup_mask)) 212 201 core0_thr_mask |= (1 << i); 213 202 switch (core0_thr_mask) { ··· 228 217 } 229 218 230 219 /* Verify other cores CPU masks */ 231 - for (i = 0; i < NR_CPUS; i += 4) { 220 + for (i = 0; i < NR_CPUS; i += NLM_THREADS_PER_CORE) { 232 221 core_thr_mask = 0; 233 - for (j = 0; j < 4; j++) 222 + for (j = 0; j < NLM_THREADS_PER_CORE; j++) 234 223 if (cpumask_test_cpu(i + j, wakeup_mask)) 235 224 core_thr_mask |= (1 << j); 236 225 if (core_thr_mask != 0 && core_thr_mask != core0_thr_mask)
+15 -14
arch/mips/netlogic/xlp/nlm_hal.c
··· 40 40 #include <asm/mipsregs.h> 41 41 #include <asm/time.h> 42 42 43 + #include <asm/netlogic/common.h> 43 44 #include <asm/netlogic/haldefs.h> 44 45 #include <asm/netlogic/xlp-hal/iomap.h> 45 46 #include <asm/netlogic/xlp-hal/xlp.h> 46 47 #include <asm/netlogic/xlp-hal/pic.h> 47 48 #include <asm/netlogic/xlp-hal/sys.h> 48 49 49 - /* These addresses are computed by the nlm_hal_init() */ 50 - uint64_t nlm_io_base; 51 - uint64_t nlm_sys_base; 52 - uint64_t nlm_pic_base; 53 - 54 50 /* Main initialization */ 55 - void nlm_hal_init(void) 51 + void nlm_node_init(int node) 56 52 { 57 - nlm_io_base = CKSEG1ADDR(XLP_DEFAULT_IO_BASE); 58 - nlm_sys_base = nlm_get_sys_regbase(0); /* node 0 */ 59 - nlm_pic_base = nlm_get_pic_regbase(0); /* node 0 */ 53 + struct nlm_soc_info *nodep; 54 + 55 + nodep = nlm_get_node(node); 56 + nodep->sysbase = nlm_get_sys_regbase(node); 57 + nodep->picbase = nlm_get_pic_regbase(node); 58 + nodep->ebase = read_c0_ebase() & (~((1 << 12) - 1)); 59 + spin_lock_init(&nodep->piclock); 60 60 } 61 61 62 62 int nlm_irq_to_irt(int irq) ··· 138 138 } 139 139 } 140 140 141 - unsigned int nlm_get_core_frequency(int core) 141 + unsigned int nlm_get_core_frequency(int node, int core) 142 142 { 143 143 unsigned int pll_divf, pll_divr, dfs_div, ext_div; 144 144 unsigned int rstval, dfsval, denom; 145 - uint64_t num; 145 + uint64_t num, sysbase; 146 146 147 - rstval = nlm_read_sys_reg(nlm_sys_base, SYS_POWER_ON_RESET_CFG); 148 - dfsval = nlm_read_sys_reg(nlm_sys_base, SYS_CORE_DFS_DIV_VALUE); 147 + sysbase = nlm_get_node(node)->sysbase; 148 + rstval = nlm_read_sys_reg(sysbase, SYS_POWER_ON_RESET_CFG); 149 + dfsval = nlm_read_sys_reg(sysbase, SYS_CORE_DFS_DIV_VALUE); 149 150 pll_divf = ((rstval >> 10) & 0x7f) + 1; 150 151 pll_divr = ((rstval >> 8) & 0x3) + 1; 151 152 ext_div = ((rstval >> 30) & 0x3) + 1; ··· 160 159 161 160 unsigned int nlm_get_cpu_frequency(void) 162 161 { 163 - return nlm_get_core_frequency(0); 162 + return nlm_get_core_frequency(0, 0); 164 163 }
+8 -9
arch/mips/netlogic/xlp/setup.c
··· 52 52 #include <asm/netlogic/xlp-hal/xlp.h> 53 53 #include <asm/netlogic/xlp-hal/sys.h> 54 54 55 - unsigned long nlm_common_ebase = 0x0; 56 - 57 - /* default to uniprocessor */ 58 - uint32_t nlm_coremask = 1; 55 + uint64_t nlm_io_base; 56 + struct nlm_soc_info nlm_nodes[NLM_NR_NODES]; 59 57 cpumask_t nlm_cpumask = CPU_MASK_CPU0; 60 - int nlm_threads_per_core = 1; 58 + unsigned int nlm_threads_per_core; 61 59 extern u32 __dtb_start[]; 62 60 63 61 static void nlm_linux_exit(void) 64 62 { 65 - nlm_write_sys_reg(nlm_sys_base, SYS_CHIP_RESET, 1); 63 + uint64_t sysbase = nlm_get_node(0)->sysbase; 64 + 65 + nlm_write_sys_reg(sysbase, SYS_CHIP_RESET, 1); 66 66 for ( ; ; ) 67 67 cpu_wait(); 68 68 } ··· 110 110 111 111 void __init prom_init(void) 112 112 { 113 + nlm_io_base = CKSEG1ADDR(XLP_DEFAULT_IO_BASE); 113 114 xlp_mmu_init(); 114 - nlm_hal_init(); 115 - 116 - nlm_common_ebase = read_c0_ebase() & (~((1 << 12) - 1)); 115 + nlm_node_init(0); 117 116 118 117 #ifdef CONFIG_SMP 119 118 cpumask_setall(&nlm_cpumask);
+14 -8
arch/mips/netlogic/xlp/wakeup.c
··· 79 79 80 80 static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask) 81 81 { 82 - uint64_t syspcibase, sysbase; 82 + struct nlm_soc_info *nodep; 83 + uint64_t syspcibase; 83 84 uint32_t syscoremask; 84 - int core, n; 85 + int core, n, cpu; 85 86 86 - for (n = 0; n < 4; n++) { 87 + for (n = 0; n < NLM_NR_NODES; n++) { 87 88 syspcibase = nlm_get_sys_pcibase(n); 88 89 if (nlm_read_reg(syspcibase, 0) == 0xffffffff) 89 90 break; 90 91 91 92 /* read cores in reset from SYS and account for boot cpu */ 92 - sysbase = nlm_get_sys_regbase(n); 93 - syscoremask = nlm_read_sys_reg(sysbase, SYS_CPU_RESET); 93 + nlm_node_init(n); 94 + nodep = nlm_get_node(n); 95 + syscoremask = nlm_read_sys_reg(nodep->sysbase, SYS_CPU_RESET); 94 96 if (n == 0) 95 97 syscoremask |= 1; 96 98 97 - for (core = 0; core < 8; core++) { 99 + for (core = 0; core < NLM_CORES_PER_NODE; core++) { 98 100 /* see if the core exists */ 99 101 if ((syscoremask & (1 << core)) == 0) 100 102 continue; 101 103 102 104 /* see if at least the first thread is enabled */ 103 - if (!cpumask_test_cpu((n * 8 + core) * 4, wakeup_mask)) 105 + cpu = (n * NLM_CORES_PER_NODE + core) 106 + * NLM_THREADS_PER_CORE; 107 + if (!cpumask_test_cpu(cpu, wakeup_mask)) 104 108 continue; 105 109 106 110 /* wake up the core */ 107 - if (!xlp_wakeup_core(sysbase, core)) 111 + if (xlp_wakeup_core(nodep->sysbase, core)) 112 + nodep->coremask |= 1u << core; 113 + else 108 114 pr_err("Failed to enable core %d\n", core); 109 115 } 110 116 }
+13 -7
arch/mips/netlogic/xlr/setup.c
··· 51 51 #include <asm/netlogic/xlr/gpio.h> 52 52 53 53 uint64_t nlm_io_base = DEFAULT_NETLOGIC_IO_BASE; 54 - uint64_t nlm_pic_base; 55 54 struct psb_info nlm_prom_info; 56 55 57 - unsigned long nlm_common_ebase = 0x0; 58 - 59 56 /* default to uniprocessor */ 60 - uint32_t nlm_coremask = 1; 61 - int nlm_threads_per_core = 1; 57 + unsigned int nlm_threads_per_core = 1; 58 + struct nlm_soc_info nlm_nodes[NLM_NR_NODES]; 62 59 cpumask_t nlm_cpumask = CPU_MASK_CPU0; 63 60 64 61 static void __init nlm_early_serial_setup(void) ··· 174 177 } 175 178 } 176 179 180 + static void nlm_init_node(void) 181 + { 182 + struct nlm_soc_info *nodep; 183 + 184 + nodep = nlm_current_node(); 185 + nodep->picbase = nlm_mmio_base(NETLOGIC_IO_PIC_OFFSET); 186 + nodep->ebase = read_c0_ebase() & (~((1 << 12) - 1)); 187 + spin_lock_init(&nodep->piclock); 188 + } 189 + 177 190 void __init prom_init(void) 178 191 { 179 192 int i, *argv, *envp; /* passed as 32 bit ptrs */ ··· 195 188 prom_infop = (struct psb_info *)(long)(int)fw_arg3; 196 189 197 190 nlm_prom_info = *prom_infop; 198 - nlm_pic_base = nlm_mmio_base(NETLOGIC_IO_PIC_OFFSET); 191 + nlm_init_node(); 199 192 200 193 nlm_early_serial_setup(); 201 194 build_arcs_cmdline(argv); 202 - nlm_common_ebase = read_c0_ebase() & (~((1 << 12) - 1)); 203 195 prom_add_memory(); 204 196 205 197 #ifdef CONFIG_SMP
+19 -2
arch/mips/netlogic/xlr/wakeup.c
··· 33 33 */ 34 34 35 35 #include <linux/init.h> 36 + #include <linux/delay.h> 36 37 #include <linux/threads.h> 37 38 38 39 #include <asm/asm.h> ··· 51 50 52 51 int __cpuinit xlr_wakeup_secondary_cpus(void) 53 52 { 54 - unsigned int i, boot_cpu; 53 + struct nlm_soc_info *nodep; 54 + unsigned int i, j, boot_cpu; 55 55 56 56 /* 57 57 * In case of RMI boot, hit with NMI to get the cores 58 58 * from bootloader to linux code. 59 59 */ 60 + nodep = nlm_get_node(0); 60 61 boot_cpu = hard_smp_processor_id(); 61 62 nlm_set_nmi_handler(nlm_rmiboot_preboot); 62 63 for (i = 0; i < NR_CPUS; i++) { 63 64 if (i == boot_cpu || !cpumask_test_cpu(i, &nlm_cpumask)) 64 65 continue; 65 - nlm_pic_send_ipi(nlm_pic_base, i, 1, 1); /* send NMI */ 66 + nlm_pic_send_ipi(nodep->picbase, i, 1, 1); /* send NMI */ 67 + } 68 + 69 + /* Fill up the coremask early */ 70 + nodep->coremask = 1; 71 + for (i = 1; i < NLM_CORES_PER_NODE; i++) { 72 + for (j = 1000000; j > 0; j--) { 73 + if (nlm_cpu_ready[i * NLM_THREADS_PER_CORE]) 74 + break; 75 + udelay(10); 76 + } 77 + if (j != 0) 78 + nodep->coremask |= (1u << i); 79 + else 80 + pr_err("Failed to wakeup core %d\n", i); 66 81 } 67 82 68 83 return 0;