Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[SPARC]: Try to start getting SMP back into shape.

Todo items:
- IRQ_INPROGRESS flag - use sparc64 irq buckets, or generic irq_desc?
- sun4d
- re-indent large chunks of sun4m_smp.c
- some places assume sequential cpu numbering (i.e. 0,1 instead of 0,2)

Last I checked (with 2.6.14), random programs segfault with dual
HyperSPARC. And with SuperSPARC II's, it seems stable but will
eventually die from a write lock error (wrong lock owner or something).

I haven't tried the HyperSPARC + highmem combination recently, so that
may still be a problem.

Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Bob Breuer and committed by
David S. Miller
a54123e2 674a396c

+211 -180
-1
arch/sparc/Kconfig
··· 23 23 24 24 config SMP 25 25 bool "Symmetric multi-processing support (does not work on sun4/sun4c)" 26 - depends on BROKEN 27 26 ---help--- 28 27 This enables support for systems with more than one CPU. If you have 29 28 a system with only one CPU, say N. If you have a system with more
+34 -32
arch/sparc/kernel/irq.c
··· 154 154 struct irqaction static_irqaction[MAX_STATIC_ALLOC]; 155 155 int static_irq_count; 156 156 157 - struct irqaction *irq_action[NR_IRQS] = { 158 - [0 ... (NR_IRQS-1)] = NULL 159 - }; 157 + struct { 158 + struct irqaction *action; 159 + int flags; 160 + } sparc_irq[NR_IRQS]; 161 + #define SPARC_IRQ_INPROGRESS 1 160 162 161 163 /* Used to protect the IRQ action lists */ 162 164 DEFINE_SPINLOCK(irq_action_lock); ··· 179 177 } 180 178 spin_lock_irqsave(&irq_action_lock, flags); 181 179 if (i < NR_IRQS) { 182 - action = *(i + irq_action); 180 + action = sparc_irq[i].action; 183 181 if (!action) 184 182 goto out_unlock; 185 183 seq_printf(p, "%3d: ", i); ··· 188 186 #else 189 187 for_each_online_cpu(j) { 190 188 seq_printf(p, "%10u ", 191 - kstat_cpu(cpu_logical_map(j)).irqs[i]); 189 + kstat_cpu(j).irqs[i]); 192 190 } 193 191 #endif 194 192 seq_printf(p, " %c %s", ··· 209 207 void free_irq(unsigned int irq, void *dev_id) 210 208 { 211 209 struct irqaction * action; 212 - struct irqaction * tmp = NULL; 210 + struct irqaction **actionp; 213 211 unsigned long flags; 214 212 unsigned int cpu_irq; 215 213 ··· 227 225 228 226 spin_lock_irqsave(&irq_action_lock, flags); 229 227 230 - action = *(cpu_irq + irq_action); 228 + actionp = &sparc_irq[cpu_irq].action; 229 + action = *actionp; 231 230 232 231 if (!action->handler) { 233 232 printk("Trying to free free IRQ%d\n",irq); ··· 238 235 for (; action; action = action->next) { 239 236 if (action->dev_id == dev_id) 240 237 break; 241 - tmp = action; 238 + actionp = &action->next; 242 239 } 243 240 if (!action) { 244 241 printk("Trying to free free shared IRQ%d\n",irq); ··· 257 254 irq, action->name); 258 255 goto out_unlock; 259 256 } 260 - 261 - if (action && tmp) 262 - tmp->next = action->next; 263 - else 264 - *(cpu_irq + irq_action) = action->next; 257 + 258 + *actionp = action->next; 265 259 266 260 spin_unlock_irqrestore(&irq_action_lock, flags); 267 261 ··· 268 268 269 269 kfree(action); 270 270 271 - if (!(*(cpu_irq + irq_action))) 271 + if (!sparc_irq[cpu_irq].action) 272 272 disable_irq(irq); 273 273 274 274 out_unlock: ··· 287 287 #ifdef CONFIG_SMP 288 288 void synchronize_irq(unsigned int irq) 289 289 { 290 - printk("synchronize_irq says: implement me!\n"); 291 - BUG(); 290 + unsigned int cpu_irq; 291 + 292 + cpu_irq = irq & (NR_IRQS - 1); 293 + while (sparc_irq[cpu_irq].flags & SPARC_IRQ_INPROGRESS) 294 + cpu_relax(); 292 295 } 293 296 #endif /* SMP */ 294 297 ··· 302 299 unsigned int cpu_irq; 303 300 304 301 cpu_irq = irq & (NR_IRQS - 1); 305 - action = *(cpu_irq + irq_action); 302 + action = sparc_irq[cpu_irq].action; 306 303 307 304 printk("IO device interrupt, irq = %d\n", irq); 308 305 printk("PC = %08lx NPC = %08lx FP=%08lx\n", regs->pc, ··· 333 330 if(irq < 10) 334 331 smp4m_irq_rotate(cpu); 335 332 #endif 336 - action = *(irq + irq_action); 333 + action = sparc_irq[irq].action; 334 + sparc_irq[irq].flags |= SPARC_IRQ_INPROGRESS; 337 335 kstat_cpu(cpu).irqs[irq]++; 338 336 do { 339 337 if (!action || !action->handler) ··· 342 338 action->handler(irq, action->dev_id, regs); 343 339 action = action->next; 344 340 } while (action); 341 + sparc_irq[irq].flags &= ~SPARC_IRQ_INPROGRESS; 345 342 enable_pil_irq(irq); 346 343 irq_exit(); 347 344 } ··· 394 389 395 390 spin_lock_irqsave(&irq_action_lock, flags); 396 391 397 - action = *(cpu_irq + irq_action); 392 + action = sparc_irq[cpu_irq].action; 398 393 if(action) { 399 394 if(action->flags & SA_SHIRQ) 400 395 panic("Trying to register fast irq when already shared.\n"); ··· 457 452 action->dev_id = NULL; 458 453 action->next = NULL; 459 454 460 - *(cpu_irq + irq_action) = action; 455 + sparc_irq[cpu_irq].action = action; 461 456 462 457 enable_irq(irq); 463 458 ··· 472 467 irqreturn_t (*handler)(int, void *, struct pt_regs *), 473 468 unsigned long irqflags, const char * devname, void *dev_id) 474 469 { 475 - struct irqaction * action, *tmp = NULL; 470 + struct irqaction * action, **actionp; 476 471 unsigned long flags; 477 472 unsigned int cpu_irq; 478 473 int ret; ··· 495 490 496 491 spin_lock_irqsave(&irq_action_lock, flags); 497 492 498 - action = *(cpu_irq + irq_action); 493 + actionp = &sparc_irq[cpu_irq].action; 494 + action = *actionp; 499 495 if (action) { 500 - if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) { 501 - for (tmp = action; tmp->next; tmp = tmp->next); 502 - } else { 496 + if (!(action->flags & SA_SHIRQ) || !(irqflags & SA_SHIRQ)) { 503 497 ret = -EBUSY; 504 498 goto out_unlock; 505 499 } 506 - if ((action->flags & SA_INTERRUPT) ^ (irqflags & SA_INTERRUPT)) { 500 + if ((action->flags & SA_INTERRUPT) != (irqflags & SA_INTERRUPT)) { 507 501 printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq); 508 502 ret = -EBUSY; 509 503 goto out_unlock; 510 - } 511 - action = NULL; /* Or else! */ 504 + } 505 + for ( ; action; action = *actionp) 506 + actionp = &action->next; 512 507 } 513 508 514 509 /* If this is flagged as statically allocated then we use our ··· 537 532 action->next = NULL; 538 533 action->dev_id = dev_id; 539 534 540 - if (tmp) 541 - tmp->next = action; 542 - else 543 - *(cpu_irq + irq_action) = action; 535 + *actionp = action; 544 536 545 537 enable_irq(irq); 546 538
+63 -21
arch/sparc/kernel/smp.c
··· 45 45 46 46 cpumask_t cpu_online_map = CPU_MASK_NONE; 47 47 cpumask_t phys_cpu_present_map = CPU_MASK_NONE; 48 + cpumask_t smp_commenced_mask = CPU_MASK_NONE; 48 49 49 50 /* The only guaranteed locking primitive available on all Sparc 50 51 * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically ··· 57 56 58 57 /* Used to make bitops atomic */ 59 58 unsigned char bitops_spinlock = 0; 60 - 61 - volatile unsigned long ipi_count; 62 - 63 - volatile int smp_process_available=0; 64 - volatile int smp_commenced = 0; 65 59 66 60 void __init smp_store_cpu_info(int id) 67 61 { ··· 75 79 76 80 void __init smp_cpus_done(unsigned int max_cpus) 77 81 { 82 + extern void smp4m_smp_done(void); 83 + unsigned long bogosum = 0; 84 + int cpu, num; 85 + 86 + for (cpu = 0, num = 0; cpu < NR_CPUS; cpu++) 87 + if (cpu_online(cpu)) { 88 + num++; 89 + bogosum += cpu_data(cpu).udelay_val; 90 + } 91 + 92 + printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n", 93 + num, bogosum/(500000/HZ), 94 + (bogosum/(5000/HZ))%100); 95 + 96 + BUG_ON(sparc_cpu_model != sun4m); 97 + smp4m_smp_done(); 78 98 } 79 99 80 100 void cpu_panic(void) ··· 100 88 } 101 89 102 90 struct linux_prom_registers smp_penguin_ctable __initdata = { 0 }; 103 - 104 - void __init smp_boot_cpus(void) 105 - { 106 - extern void smp4m_boot_cpus(void); 107 - extern void smp4d_boot_cpus(void); 108 - 109 - if (sparc_cpu_model == sun4m) 110 - smp4m_boot_cpus(); 111 - else 112 - smp4d_boot_cpus(); 113 - } 114 91 115 92 void smp_send_reschedule(int cpu) 116 93 { ··· 253 252 return 0; 254 253 } 255 254 256 - void __init smp_prepare_cpus(unsigned int maxcpus) 255 + void __init smp_prepare_cpus(unsigned int max_cpus) 257 256 { 257 + extern void smp4m_boot_cpus(void); 258 + int i, cpuid, ncpus, extra; 259 + 260 + BUG_ON(sparc_cpu_model != sun4m); 261 + printk("Entering SMP Mode...\n"); 262 + 263 + ncpus = 1; 264 + extra = 0; 265 + for (i = 0; !cpu_find_by_instance(i, NULL, &cpuid); i++) { 266 + if (cpuid == boot_cpu_id) 267 + continue; 268 + if (cpuid < NR_CPUS && ncpus++ < max_cpus) 269 + cpu_set(cpuid, phys_cpu_present_map); 270 + else 271 + extra++; 272 + } 273 + if (max_cpus >= NR_CPUS && extra) 274 + printk("Warning: NR_CPUS is too low to start all cpus\n"); 275 + 276 + smp_store_cpu_info(boot_cpu_id); 277 + 278 + smp4m_boot_cpus(); 258 279 } 259 280 260 281 void __devinit smp_prepare_boot_cpu(void) 261 282 { 262 - current_thread_info()->cpu = hard_smp_processor_id(); 263 - cpu_set(smp_processor_id(), cpu_online_map); 264 - cpu_set(smp_processor_id(), phys_cpu_present_map); 283 + int cpuid = hard_smp_processor_id(); 284 + 285 + if (cpuid >= NR_CPUS) { 286 + prom_printf("Serious problem, boot cpu id >= NR_CPUS\n"); 287 + prom_halt(); 288 + } 289 + if (cpuid != 0) 290 + printk("boot cpu id != 0, this could work but is untested\n"); 291 + 292 + current_thread_info()->cpu = cpuid; 293 + cpu_set(cpuid, cpu_online_map); 294 + cpu_set(cpuid, phys_cpu_present_map); 265 295 } 266 296 267 297 int __devinit __cpu_up(unsigned int cpu) 268 298 { 269 - panic("smp doesn't work\n"); 299 + extern int smp4m_boot_one_cpu(int); 300 + int ret; 301 + 302 + ret = smp4m_boot_one_cpu(cpu); 303 + 304 + if (!ret) { 305 + cpu_set(cpu, smp_commenced_mask); 306 + while (!cpu_online(cpu)) 307 + mb(); 308 + } 309 + return ret; 270 310 } 271 311 272 312 void smp_bogo(struct seq_file *m)
-4
arch/sparc/kernel/sparc_ksyms.c
··· 136 136 /* IRQ implementation. */ 137 137 EXPORT_SYMBOL(synchronize_irq); 138 138 139 - /* Misc SMP information */ 140 - EXPORT_SYMBOL(__cpu_number_map); 141 - EXPORT_SYMBOL(__cpu_logical_map); 142 - 143 139 /* CPU online map and active count. */ 144 140 EXPORT_SYMBOL(cpu_online_map); 145 141 EXPORT_SYMBOL(phys_cpu_present_map);
+1 -1
arch/sparc/kernel/sun4d_irq.c
··· 54 54 unsigned char sbus_tid[32]; 55 55 #endif 56 56 57 - extern struct irqaction *irq_action[]; 57 + static struct irqaction *irq_action[NR_IRQS]; 58 58 extern spinlock_t irq_action_lock; 59 59 60 60 struct sbus_action {
+5 -3
arch/sparc/kernel/sun4d_smp.c
··· 46 46 extern int smp_num_cpus; 47 47 static int smp_highest_cpu; 48 48 extern volatile unsigned long cpu_callin_map[NR_CPUS]; 49 - extern struct cpuinfo_sparc cpu_data[NR_CPUS]; 49 + extern cpuinfo_sparc cpu_data[NR_CPUS]; 50 50 extern unsigned char boot_cpu_id; 51 51 extern int smp_activated; 52 52 extern volatile int __cpu_number_map[NR_CPUS]; 53 53 extern volatile int __cpu_logical_map[NR_CPUS]; 54 54 extern volatile unsigned long ipi_count; 55 55 extern volatile int smp_process_available; 56 - extern volatile int smp_commenced; 56 + 57 + extern cpumask_t smp_commenced_mask; 58 + 57 59 extern int __smp4d_processor_id(void); 58 60 59 61 /* #define SMP_DEBUG */ ··· 138 136 139 137 local_irq_enable(); /* We don't allow PIL 14 yet */ 140 138 141 - while(!smp_commenced) 139 + while (!cpu_isset(cpuid, smp_commenced_mask)) 142 140 barrier(); 143 141 144 142 spin_lock_irqsave(&sun4d_imsk_lock, flags);
+78 -107
arch/sparc/kernel/sun4m_smp.c
··· 40 40 extern void calibrate_delay(void); 41 41 42 42 extern volatile int smp_processors_ready; 43 - extern int smp_num_cpus; 44 43 extern volatile unsigned long cpu_callin_map[NR_CPUS]; 45 44 extern unsigned char boot_cpu_id; 46 - extern int smp_activated; 47 - extern volatile int __cpu_number_map[NR_CPUS]; 48 - extern volatile int __cpu_logical_map[NR_CPUS]; 49 - extern volatile unsigned long ipi_count; 50 - extern volatile int smp_process_available; 51 - extern volatile int smp_commenced; 45 + 46 + extern cpumask_t smp_commenced_mask; 47 + 52 48 extern int __smp4m_processor_id(void); 53 49 54 50 /*#define SMP_DEBUG*/ ··· 73 77 local_flush_cache_all(); 74 78 local_flush_tlb_all(); 75 79 76 - set_irq_udt(boot_cpu_id); 77 - 78 80 /* Get our local ticker going. */ 79 81 smp_setup_percpu_timer(); 80 82 ··· 89 95 * to call the scheduler code. 90 96 */ 91 97 /* Allow master to continue. */ 92 - swap((unsigned long *)&cpu_callin_map[cpuid], 1); 98 + swap(&cpu_callin_map[cpuid], 1); 93 99 100 + /* XXX: What's up with all the flushes? */ 94 101 local_flush_cache_all(); 95 102 local_flush_tlb_all(); 96 103 ··· 106 111 atomic_inc(&init_mm.mm_count); 107 112 current->active_mm = &init_mm; 108 113 109 - while(!smp_commenced) 110 - barrier(); 111 - 112 - local_flush_cache_all(); 113 - local_flush_tlb_all(); 114 + while (!cpu_isset(cpuid, smp_commenced_mask)) 115 + mb(); 114 116 115 117 local_irq_enable(); 118 + 119 + cpu_set(cpuid, cpu_online_map); 120 + /* last one in gets all the interrupts (for testing) */ 121 + set_irq_udt(boot_cpu_id); 116 122 } 117 123 118 124 extern void init_IRQ(void); ··· 130 134 131 135 void __init smp4m_boot_cpus(void) 132 136 { 133 - int cpucount = 0; 134 - int i, mid; 135 - 136 - printk("Entering SMP Mode...\n"); 137 - 138 - local_irq_enable(); 139 - cpus_clear(cpu_present_map); 140 - 141 - for (i = 0; !cpu_find_by_instance(i, NULL, &mid); i++) 142 - cpu_set(mid, cpu_present_map); 143 - 144 - for(i=0; i < NR_CPUS; i++) { 145 - __cpu_number_map[i] = -1; 146 - __cpu_logical_map[i] = -1; 147 - } 148 - 149 - __cpu_number_map[boot_cpu_id] = 0; 150 - __cpu_logical_map[0] = boot_cpu_id; 151 - current_thread_info()->cpu = boot_cpu_id; 152 - 153 - smp_store_cpu_info(boot_cpu_id); 154 - set_irq_udt(boot_cpu_id); 155 137 smp_setup_percpu_timer(); 156 138 local_flush_cache_all(); 157 - if(cpu_find_by_instance(1, NULL, NULL)) 158 - return; /* Not an MP box. */ 159 - for(i = 0; i < NR_CPUS; i++) { 160 - if(i == boot_cpu_id) 161 - continue; 139 + } 162 140 163 - if (cpu_isset(i, cpu_present_map)) { 164 - extern unsigned long sun4m_cpu_startup; 165 - unsigned long *entry = &sun4m_cpu_startup; 166 - struct task_struct *p; 167 - int timeout; 141 + int smp4m_boot_one_cpu(int i) 142 + { 143 + extern unsigned long sun4m_cpu_startup; 144 + unsigned long *entry = &sun4m_cpu_startup; 145 + struct task_struct *p; 146 + int timeout; 147 + int cpu_node; 168 148 169 - /* Cook up an idler for this guy. */ 170 - p = fork_idle(i); 171 - cpucount++; 172 - current_set[i] = task_thread_info(p); 173 - /* See trampoline.S for details... */ 174 - entry += ((i-1) * 3); 149 + cpu_find_by_mid(i, &cpu_node); 175 150 176 - /* 177 - * Initialize the contexts table 178 - * Since the call to prom_startcpu() trashes the structure, 179 - * we need to re-initialize it for each cpu 180 - */ 181 - smp_penguin_ctable.which_io = 0; 182 - smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys; 183 - smp_penguin_ctable.reg_size = 0; 151 + /* Cook up an idler for this guy. */ 152 + p = fork_idle(i); 153 + current_set[i] = task_thread_info(p); 154 + /* See trampoline.S for details... */ 155 + entry += ((i-1) * 3); 184 156 185 - /* whirrr, whirrr, whirrrrrrrrr... */ 186 - printk("Starting CPU %d at %p\n", i, entry); 187 - local_flush_cache_all(); 188 - prom_startcpu(cpu_data(i).prom_node, 189 - &smp_penguin_ctable, 0, (char *)entry); 157 + /* 158 + * Initialize the contexts table 159 + * Since the call to prom_startcpu() trashes the structure, 160 + * we need to re-initialize it for each cpu 161 + */ 162 + smp_penguin_ctable.which_io = 0; 163 + smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys; 164 + smp_penguin_ctable.reg_size = 0; 190 165 191 - /* wheee... it's going... */ 192 - for(timeout = 0; timeout < 10000; timeout++) { 193 - if(cpu_callin_map[i]) 194 - break; 195 - udelay(200); 196 - } 197 - if(cpu_callin_map[i]) { 198 - /* Another "Red Snapper". */ 199 - __cpu_number_map[i] = i; 200 - __cpu_logical_map[i] = i; 201 - } else { 202 - cpucount--; 203 - printk("Processor %d is stuck.\n", i); 204 - } 205 - } 206 - if(!(cpu_callin_map[i])) { 207 - cpu_clear(i, cpu_present_map); 208 - __cpu_number_map[i] = -1; 209 - } 210 - } 166 + /* whirrr, whirrr, whirrrrrrrrr... */ 167 + printk("Starting CPU %d at %p\n", i, entry); 211 168 local_flush_cache_all(); 212 - if(cpucount == 0) { 213 - printk("Error: only one Processor found.\n"); 214 - cpu_present_map = cpumask_of_cpu(smp_processor_id()); 215 - } else { 216 - unsigned long bogosum = 0; 217 - for_each_present_cpu(i) 218 - bogosum += cpu_data(i).udelay_val; 219 - printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", 220 - cpucount + 1, 221 - bogosum/(500000/HZ), 222 - (bogosum/(5000/HZ))%100); 223 - smp_activated = 1; 224 - smp_num_cpus = cpucount + 1; 169 + prom_startcpu(cpu_node, 170 + &smp_penguin_ctable, 0, (char *)entry); 171 + 172 + /* wheee... it's going... */ 173 + for(timeout = 0; timeout < 10000; timeout++) { 174 + if(cpu_callin_map[i]) 175 + break; 176 + udelay(200); 225 177 } 178 + 179 + if (!(cpu_callin_map[i])) { 180 + printk("Processor %d is stuck.\n", i); 181 + return -ENODEV; 182 + } 183 + 184 + local_flush_cache_all(); 185 + return 0; 186 + } 187 + 188 + void __init smp4m_smp_done(void) 189 + { 190 + int i, first; 191 + int *prev; 192 + 193 + /* setup cpu list for irq rotation */ 194 + first = 0; 195 + prev = &first; 196 + for (i = 0; i < NR_CPUS; i++) { 197 + if (cpu_online(i)) { 198 + *prev = i; 199 + prev = &cpu_data(i).next; 200 + } 201 + } 202 + *prev = first; 203 + local_flush_cache_all(); 226 204 227 205 /* Free unneeded trap tables */ 228 - if (!cpu_isset(i, cpu_present_map)) { 206 + if (!cpu_isset(1, cpu_present_map)) { 229 207 ClearPageReserved(virt_to_page(trapbase_cpu1)); 230 208 init_page_count(virt_to_page(trapbase_cpu1)); 231 209 free_page((unsigned long)trapbase_cpu1); ··· 233 263 */ 234 264 void smp4m_irq_rotate(int cpu) 235 265 { 266 + int next = cpu_data(cpu).next; 267 + if (next != cpu) 268 + set_irq_udt(next); 236 269 } 237 270 238 271 /* Cross calls, in order to work efficiently and atomically do all ··· 262 289 263 290 smp_cpu_in_msg[me]++; 264 291 if(target == MSG_ALL_BUT_SELF || target == MSG_ALL) { 265 - mask = cpu_present_map; 292 + mask = cpu_online_map; 266 293 if(target == MSG_ALL_BUT_SELF) 267 294 cpu_clear(me, mask); 268 295 for(i = 0; i < 4; i++) { ··· 287 314 unsigned long arg3; 288 315 unsigned long arg4; 289 316 unsigned long arg5; 290 - unsigned long processors_in[NR_CPUS]; /* Set when ipi entered. */ 291 - unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */ 317 + unsigned long processors_in[SUN4M_NCPUS]; /* Set when ipi entered. */ 318 + unsigned long processors_out[SUN4M_NCPUS]; /* Set when ipi exited. */ 292 319 } ccall_info; 293 320 294 321 static DEFINE_SPINLOCK(cross_call_lock); ··· 297 324 void smp4m_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2, 298 325 unsigned long arg3, unsigned long arg4, unsigned long arg5) 299 326 { 300 - if(smp_processors_ready) { 301 - register int ncpus = smp_num_cpus; 327 + register int ncpus = SUN4M_NCPUS; 302 328 unsigned long flags; 303 329 304 330 spin_lock_irqsave(&cross_call_lock, flags); ··· 312 340 313 341 /* Init receive/complete mapping, plus fire the IPI's off. */ 314 342 { 315 - cpumask_t mask = cpu_present_map; 343 + cpumask_t mask = cpu_online_map; 316 344 register int i; 317 345 318 346 cpu_clear(smp_processor_id(), mask); ··· 345 373 } 346 374 347 375 spin_unlock_irqrestore(&cross_call_lock, flags); 348 - } 349 376 } 350 377 351 378 /* Running cross calls. */
+6
arch/sparc/mm/srmmu.c
··· 1302 1302 1303 1303 flush_cache_all(); 1304 1304 srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys); 1305 + #ifdef CONFIG_SMP 1306 + /* Stop from hanging here... */ 1307 + local_flush_tlb_all(); 1308 + #else 1305 1309 flush_tlb_all(); 1310 + #endif 1306 1311 poke_srmmu(); 1307 1312 1308 1313 #ifdef CONFIG_SUN_IO ··· 1424 1419 max_size = vac_cache_size; 1425 1420 if(vac_line_size < min_line_size) 1426 1421 min_line_size = vac_line_size; 1422 + //FIXME: cpus not contiguous!! 1427 1423 cpu++; 1428 1424 if (cpu >= NR_CPUS || !cpu_online(cpu)) 1429 1425 break;
+1
include/asm-sparc/cpudata.h
··· 18 18 unsigned int counter; 19 19 int prom_node; 20 20 int mid; 21 + int next; 21 22 } cpuinfo_sparc; 22 23 23 24 DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
+1 -8
include/asm-sparc/smp.h
··· 81 81 return 0; 82 82 } 83 83 84 - extern __volatile__ int __cpu_number_map[NR_CPUS]; 85 - extern __volatile__ int __cpu_logical_map[NR_CPUS]; 86 - 87 84 static inline int cpu_logical_map(int cpu) 88 85 { 89 - return __cpu_logical_map[cpu]; 90 - } 91 - static inline int cpu_number_map(int cpu) 92 - { 93 - return __cpu_number_map[cpu]; 86 + return cpu; 94 87 } 95 88 96 89 static inline int hard_smp4m_processor_id(void)
+22 -3
include/asm-sparc/spinlock.h
··· 94 94 #define __raw_read_lock(lock) \ 95 95 do { unsigned long flags; \ 96 96 local_irq_save(flags); \ 97 - __raw_read_lock(lock); \ 97 + __read_lock(lock); \ 98 98 local_irq_restore(flags); \ 99 99 } while(0) 100 100 ··· 114 114 #define __raw_read_unlock(lock) \ 115 115 do { unsigned long flags; \ 116 116 local_irq_save(flags); \ 117 - __raw_read_unlock(lock); \ 117 + __read_unlock(lock); \ 118 118 local_irq_restore(flags); \ 119 119 } while(0) 120 120 121 - extern __inline__ void __raw_write_lock(raw_rwlock_t *rw) 121 + static inline void __raw_write_lock(raw_rwlock_t *rw) 122 122 { 123 123 register raw_rwlock_t *lp asm("g1"); 124 124 lp = rw; ··· 131 131 : "g2", "g4", "memory", "cc"); 132 132 } 133 133 134 + static inline int __raw_write_trylock(raw_rwlock_t *rw) 135 + { 136 + unsigned int val; 137 + 138 + __asm__ __volatile__("ldstub [%1 + 3], %0" 139 + : "=r" (val) 140 + : "r" (&rw->lock) 141 + : "memory"); 142 + 143 + if (val == 0) { 144 + val = rw->lock & ~0xff; 145 + if (val) 146 + ((volatile u8*)&rw->lock)[3] = 0; 147 + } 148 + 149 + return (val == 0); 150 + } 151 + 134 152 #define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) 135 153 136 154 #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 155 + #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) 137 156 138 157 #endif /* !(__ASSEMBLY__) */ 139 158