[MIPS] VSMP: Fix initialization ordering bug.

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>

+83 -69
+83 -69
arch/mips/kernel/smp-mt.c
··· 140 140 .name = "IPI_call" 141 141 }; 142 142 143 + static void __init smp_copy_vpe_config(void) 144 + { 145 + write_vpe_c0_status( 146 + (read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0); 147 + 148 + /* set config to be the same as vpe0, particularly kseg0 coherency alg */ 149 + write_vpe_c0_config( read_c0_config()); 150 + 151 + /* make sure there are no software interrupts pending */ 152 + write_vpe_c0_cause(0); 153 + 154 + /* Propagate Config7 */ 155 + write_vpe_c0_config7(read_c0_config7()); 156 + } 157 + 158 + static unsigned int __init smp_vpe_init(unsigned int tc, unsigned int mvpconf0, 159 + unsigned int ncpu) 160 + { 161 + if (tc > ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)) 162 + return ncpu; 163 + 164 + /* Deactivate all but VPE 0 */ 165 + if (tc != 0) { 166 + unsigned long tmp = read_vpe_c0_vpeconf0(); 167 + 168 + tmp &= ~VPECONF0_VPA; 169 + 170 + /* master VPE */ 171 + tmp |= VPECONF0_MVP; 172 + write_vpe_c0_vpeconf0(tmp); 173 + 174 + /* Record this as available CPU */ 175 + cpu_set(tc, phys_cpu_present_map); 176 + __cpu_number_map[tc] = ++ncpu; 177 + __cpu_logical_map[ncpu] = tc; 178 + } 179 + 180 + /* Disable multi-threading with TC's */ 181 + write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); 182 + 183 + if (tc != 0) 184 + smp_copy_vpe_config(); 185 + 186 + return ncpu; 187 + } 188 + 189 + static void __init smp_tc_init(unsigned int tc, unsigned int mvpconf0) 190 + { 191 + unsigned long tmp; 192 + 193 + if (!tc) 194 + return; 195 + 196 + /* bind a TC to each VPE, May as well put all excess TC's 197 + on the last VPE */ 198 + if (tc >= (((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)+1)) 199 + write_tc_c0_tcbind(read_tc_c0_tcbind() | ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)); 200 + else { 201 + write_tc_c0_tcbind(read_tc_c0_tcbind() | tc); 202 + 203 + /* and set XTC */ 204 + write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | (tc << VPECONF0_XTC_SHIFT)); 205 + } 206 + 207 + tmp = read_tc_c0_tcstatus(); 208 + 209 + /* mark not allocated and not dynamically allocatable */ 210 + tmp &= ~(TCSTATUS_A | TCSTATUS_DA); 211 + tmp |= TCSTATUS_IXMT; /* interrupt exempt */ 212 + write_tc_c0_tcstatus(tmp); 213 + 214 + write_tc_c0_tchalt(TCHALT_H); 215 + } 216 + 143 217 /* 144 218 * Common setup before any secondaries are started 145 219 * Make sure all CPU's are in a sensible state before we boot any of the 146 220 * secondarys 147 221 */ 148 - void plat_smp_setup(void) 222 + void __init plat_smp_setup(void) 149 223 { 150 - unsigned long val; 151 - int i, num; 224 + unsigned int mvpconf0, ntc, tc, ncpu = 0; 152 225 153 226 #ifdef CONFIG_MIPS_MT_FPAFF 154 227 /* If we have an FPU, enroll ourselves in the FPU-full mask */ ··· 240 167 /* Put MVPE's into 'configuration state' */ 241 168 set_c0_mvpcontrol(MVPCONTROL_VPC); 242 169 243 - val = read_c0_mvpconf0(); 170 + mvpconf0 = read_c0_mvpconf0(); 171 + ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT; 244 172 245 173 /* we'll always have more TC's than VPE's, so loop setting everything 246 174 to a sensible state */ 247 - for (i = 0, num = 0; i <= ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT); i++) { 248 - settc(i); 175 + for (tc = 0; tc <= ntc; tc++) { 176 + settc(tc); 249 177 250 - /* VPE's */ 251 - if (i <= ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)) { 252 - 253 - /* deactivate all but vpe0 */ 254 - if (i != 0) { 255 - unsigned long tmp = read_vpe_c0_vpeconf0(); 256 - 257 - tmp &= ~VPECONF0_VPA; 258 - 259 - /* master VPE */ 260 - tmp |= VPECONF0_MVP; 261 - write_vpe_c0_vpeconf0(tmp); 262 - 263 - /* Record this as available CPU */ 264 - cpu_set(i, phys_cpu_present_map); 265 - __cpu_number_map[i] = ++num; 266 - __cpu_logical_map[num] = i; 267 - } 268 - 269 - /* disable multi-threading with TC's */ 270 - write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); 271 - 272 - if (i != 0) { 273 - write_vpe_c0_status((read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0); 274 - 275 - /* set config to be the same as vpe0, particularly kseg0 coherency alg */ 276 - write_vpe_c0_config( read_c0_config()); 277 - 278 - /* make sure there are no software interrupts pending */ 279 - write_vpe_c0_cause(0); 280 - 281 - /* Propagate Config7 */ 282 - write_vpe_c0_config7(read_c0_config7()); 283 - } 284 - 285 - } 286 - 287 - /* TC's */ 288 - 289 - if (i != 0) { 290 - unsigned long tmp; 291 - 292 - /* bind a TC to each VPE, May as well put all excess TC's 293 - on the last VPE */ 294 - if ( i >= (((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)+1) ) 295 - write_tc_c0_tcbind(read_tc_c0_tcbind() | ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) ); 296 - else { 297 - write_tc_c0_tcbind( read_tc_c0_tcbind() | i); 298 - 299 - /* and set XTC */ 300 - write_vpe_c0_vpeconf0( read_vpe_c0_vpeconf0() | (i << VPECONF0_XTC_SHIFT)); 301 - } 302 - 303 - tmp = read_tc_c0_tcstatus(); 304 - 305 - /* mark not allocated and not dynamically allocatable */ 306 - tmp &= ~(TCSTATUS_A | TCSTATUS_DA); 307 - tmp |= TCSTATUS_IXMT; /* interrupt exempt */ 308 - write_tc_c0_tcstatus(tmp); 309 - 310 - write_tc_c0_tchalt(TCHALT_H); 311 - } 178 + smp_tc_init(tc, mvpconf0); 179 + ncpu = smp_vpe_init(tc, mvpconf0, ncpu); 312 180 } 313 181 314 182 /* Release config state */ ··· 257 243 258 244 /* We'll wait until starting the secondaries before starting MVPE */ 259 245 260 - printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num); 246 + printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu); 261 247 } 262 248 263 249 void __init plat_prepare_cpus(unsigned int max_cpus)