Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc

* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc: (23 commits)
[POWERPC] Remove leftover printk in isa-bridge.c
[POWERPC] Remove duplicate #include
[POWERPC] Initialize lockdep earlier
[POWERPC] Document when printk is useable
[POWERPC] Fix bogus paca->_current initialization
[POWERPC] Fix of_i2c include for module compilation
[POWERPC] Make default cputable entries reflect selected CPU family
[POWERPC] spufs: lockdep annotations for spufs_dir_close
[POWERPC] spufs: don't requeue victim contex in find_victim if it's not in spu_run
[POWERPC] 4xx: Fix PCI mem in sequoia DTS
[POWERPC] 4xx: Add endpoint support to 4xx PCIe driver
[POWERPC] 4xx: Fix problem with new TLB storage attibute fields on 440x6 core
[POWERPC] spufs: spu_create should send inotify IM_CREATE event
[POWERPC] spufs: handle faults while the context switch pending flag is set
[POWERPC] spufs: fix concurrent delivery of class 0 & 1 exceptions
[POWERPC] spufs: try to route SPU interrupts to local node
[POWERPC] spufs: set SPU_CONTEXT_SWITCH_PENDING before synchronising SPU irqs
[POWERPC] spufs: don't acquire state_mutex interruptible while performing callback
[POWERPC] spufs: update master runcntl with context lock held
[POWERPC] spufs: fix post-stopped update of MFC_CNTL register
...

+399 -145
+7 -2
arch/powerpc/boot/dts/sequoia.dts
··· 342 342 /* Outbound ranges, one memory and one IO, 343 343 * later cannot be changed. Chip supports a second 344 344 * IO range but we don't use it for now 345 + * From the 440EPx user manual: 346 + * PCI 1 Memory 1 8000 0000 1 BFFF FFFF 1GB 347 + * I/O 1 E800 0000 1 E800 FFFF 64KB 348 + * I/O 1 E880 0000 1 EBFF FFFF 56MB 345 349 */ 346 - ranges = <02000000 0 80000000 1 80000000 0 10000000 347 - 01000000 0 00000000 1 e8000000 0 00100000>; 350 + ranges = <02000000 0 80000000 1 80000000 0 40000000 351 + 01000000 0 00000000 1 e8000000 0 00010000 352 + 01000000 0 00000000 1 e8800000 0 03800000>; 348 353 349 354 /* Inbound 2GB range starting at 0 */ 350 355 dma-ranges = <42000000 0 0 0 0 0 80000000>;
-1
arch/powerpc/kernel/btext.c
··· 16 16 #include <asm/mmu.h> 17 17 #include <asm/pgtable.h> 18 18 #include <asm/io.h> 19 - #include <asm/prom.h> 20 19 #include <asm/processor.h> 21 20 #include <asm/udbg.h> 22 21
+43 -10
arch/powerpc/kernel/cputable.c
··· 1208 1208 .machine_check = machine_check_4xx, 1209 1209 .platform = "ppc405", 1210 1210 }, 1211 + { /* default match */ 1212 + .pvr_mask = 0x00000000, 1213 + .pvr_value = 0x00000000, 1214 + .cpu_name = "(generic 40x PPC)", 1215 + .cpu_features = CPU_FTRS_40X, 1216 + .cpu_user_features = PPC_FEATURE_32 | 1217 + PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1218 + .icache_bsize = 32, 1219 + .dcache_bsize = 32, 1220 + .machine_check = machine_check_4xx, 1221 + .platform = "ppc405", 1222 + } 1211 1223 1212 1224 #endif /* CONFIG_40x */ 1213 1225 #ifdef CONFIG_44x ··· 1433 1421 .machine_check = machine_check_440A, 1434 1422 .platform = "ppc440", 1435 1423 }, 1424 + { /* default match */ 1425 + .pvr_mask = 0x00000000, 1426 + .pvr_value = 0x00000000, 1427 + .cpu_name = "(generic 44x PPC)", 1428 + .cpu_features = CPU_FTRS_44X, 1429 + .cpu_user_features = COMMON_USER_BOOKE, 1430 + .icache_bsize = 32, 1431 + .dcache_bsize = 32, 1432 + .machine_check = machine_check_4xx, 1433 + .platform = "ppc440", 1434 + } 1436 1435 #endif /* CONFIG_44x */ 1437 - #ifdef CONFIG_FSL_BOOKE 1438 1436 #ifdef CONFIG_E200 1439 1437 { /* e200z5 */ 1440 1438 .pvr_mask = 0xfff00000, ··· 1473 1451 .machine_check = machine_check_e200, 1474 1452 .platform = "ppc5554", 1475 1453 }, 1476 - #elif defined(CONFIG_E500) 1454 + { /* default match */ 1455 + .pvr_mask = 0x00000000, 1456 + .pvr_value = 0x00000000, 1457 + .cpu_name = "(generic E200 PPC)", 1458 + .cpu_features = CPU_FTRS_E200, 1459 + .cpu_user_features = COMMON_USER_BOOKE | 1460 + PPC_FEATURE_HAS_EFP_SINGLE | 1461 + PPC_FEATURE_UNIFIED_CACHE, 1462 + .dcache_bsize = 32, 1463 + .machine_check = machine_check_e200, 1464 + .platform = "ppc5554", 1465 + #endif /* CONFIG_E200 */ 1466 + #ifdef CONFIG_E500 1477 1467 { /* e500 */ 1478 1468 .pvr_mask = 0xffff0000, 1479 1469 .pvr_value = 0x80200000, ··· 1521 1487 .machine_check = machine_check_e500, 1522 1488 .platform = "ppc8548", 1523 1489 }, 1524 - #endif 1525 - #endif 1526 - #if !CLASSIC_PPC 1527 1490 { /* default match */ 1528 1491 .pvr_mask = 0x00000000, 1529 1492 .pvr_value = 0x00000000, 1530 - .cpu_name = "(generic PPC)", 1531 - .cpu_features = CPU_FTRS_GENERIC_32, 1532 - .cpu_user_features = PPC_FEATURE_32, 1493 + .cpu_name = "(generic E500 PPC)", 1494 + .cpu_features = CPU_FTRS_E500, 1495 + .cpu_user_features = COMMON_USER_BOOKE | 1496 + PPC_FEATURE_HAS_SPE_COMP | 1497 + PPC_FEATURE_HAS_EFP_SINGLE_COMP, 1533 1498 .icache_bsize = 32, 1534 1499 .dcache_bsize = 32, 1500 + .machine_check = machine_check_e500, 1535 1501 .platform = "powerpc", 1536 - } 1537 - #endif /* !CLASSIC_PPC */ 1502 + #endif /* CONFIG_E500 */ 1538 1503 #endif /* CONFIG_PPC32 */ 1539 1504 }; 1540 1505
+8 -1
arch/powerpc/kernel/head_44x.S
··· 653 653 rlwimi r10, r11, 0, 26, 26 /* UX = HWEXEC & USER */ 654 654 655 655 rlwimi r12, r10, 0, 26, 31 /* Insert static perms */ 656 - rlwinm r12, r12, 0, 20, 15 /* Clear U0-U3 */ 656 + 657 + /* 658 + * Clear U0-U3 and WL1 IL1I IL1D IL2I IL2D bits which are added 659 + * on newer 440 cores like the 440x6 used on AMCC 460EX/460GT (see 660 + * include/asm-powerpc/pgtable-ppc32.h for details). 661 + */ 662 + rlwinm r12, r12, 0, 20, 10 663 + 657 664 tlbwe r12, r13, PPC44x_TLB_ATTRIB /* Write ATTRIB */ 658 665 659 666 /* Done...restore registers and get out of here.
-4
arch/powerpc/kernel/head_64.S
··· 1517 1517 addi r2,r2,0x4000 1518 1518 add r2,r2,r26 1519 1519 1520 - /* Set initial ptr to current */ 1521 - LOAD_REG_IMMEDIATE(r4, init_task) 1522 - std r4,PACACURRENT(r13) 1523 - 1524 1520 /* Do very early kernel initializations, including initial hash table, 1525 1521 * stab and slb setup before we turn on relocation. */ 1526 1522
-3
arch/powerpc/kernel/isa-bridge.c
··· 108 108 if (size > 0x10000) 109 109 size = 0x10000; 110 110 111 - printk(KERN_ERR "no ISA IO ranges or unexpected isa range, " 112 - "mapping 64k\n"); 113 - 114 111 __ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE, 115 112 size, _PAGE_NO_CACHE|_PAGE_GUARDED); 116 113 return;
+7 -3
arch/powerpc/kernel/setup_64.c
··· 170 170 171 171 void __init early_setup(unsigned long dt_ptr) 172 172 { 173 + /* -------- printk is _NOT_ safe to use here ! ------- */ 174 + 173 175 /* Fill in any unititialised pacas */ 174 176 initialise_pacas(); 175 177 ··· 181 179 /* Assume we're on cpu 0 for now. Don't write to the paca yet! */ 182 180 setup_paca(0); 183 181 184 - /* Enable early debugging if any specified (see udbg.h) */ 185 - udbg_early_init(); 186 - 187 182 /* Initialize lockdep early or else spinlocks will blow */ 188 183 lockdep_init(); 184 + 185 + /* -------- printk is now safe to use ------- */ 186 + 187 + /* Enable early debugging if any specified (see udbg.h) */ 188 + udbg_early_init(); 189 189 190 190 DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr); 191 191
+51 -2
arch/powerpc/platforms/cell/interrupt.c
··· 35 35 #include <linux/percpu.h> 36 36 #include <linux/types.h> 37 37 #include <linux/ioport.h> 38 + #include <linux/kernel_stat.h> 38 39 39 40 #include <asm/io.h> 40 41 #include <asm/pgtable.h> ··· 232 231 "IBM,CBEA-Internal-Interrupt-Controller"); 233 232 } 234 233 234 + extern int noirqdebug; 235 + 236 + static void handle_iic_irq(unsigned int irq, struct irq_desc *desc) 237 + { 238 + const unsigned int cpu = smp_processor_id(); 239 + 240 + spin_lock(&desc->lock); 241 + 242 + desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); 243 + 244 + /* 245 + * If we're currently running this IRQ, or its disabled, 246 + * we shouldn't process the IRQ. Mark it pending, handle 247 + * the necessary masking and go out 248 + */ 249 + if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) || 250 + !desc->action)) { 251 + desc->status |= IRQ_PENDING; 252 + goto out_eoi; 253 + } 254 + 255 + kstat_cpu(cpu).irqs[irq]++; 256 + 257 + /* Mark the IRQ currently in progress.*/ 258 + desc->status |= IRQ_INPROGRESS; 259 + 260 + do { 261 + struct irqaction *action = desc->action; 262 + irqreturn_t action_ret; 263 + 264 + if (unlikely(!action)) 265 + goto out_eoi; 266 + 267 + desc->status &= ~IRQ_PENDING; 268 + spin_unlock(&desc->lock); 269 + action_ret = handle_IRQ_event(irq, action); 270 + if (!noirqdebug) 271 + note_interrupt(irq, desc, action_ret); 272 + spin_lock(&desc->lock); 273 + 274 + } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); 275 + 276 + desc->status &= ~IRQ_INPROGRESS; 277 + out_eoi: 278 + desc->chip->eoi(irq); 279 + spin_unlock(&desc->lock); 280 + } 281 + 235 282 static int iic_host_map(struct irq_host *h, unsigned int virq, 236 283 irq_hw_number_t hw) 237 284 { ··· 289 240 break; 290 241 case IIC_IRQ_TYPE_IOEXC: 291 242 set_irq_chip_and_handler(virq, &iic_ioexc_chip, 292 - handle_fasteoi_irq); 243 + handle_iic_irq); 293 244 break; 294 245 default: 295 - set_irq_chip_and_handler(virq, &iic_chip, handle_fasteoi_irq); 246 + set_irq_chip_and_handler(virq, &iic_chip, handle_iic_irq); 296 247 } 297 248 return 0; 298 249 }
+22 -9
arch/powerpc/platforms/cell/spu_base.c
··· 141 141 142 142 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags)) 143 143 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND); 144 + else { 145 + set_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags); 146 + mb(); 147 + } 144 148 } 145 149 146 150 static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb) ··· 230 226 return 0; 231 227 } 232 228 233 - spu->class_0_pending = 0; 234 - spu->dar = ea; 235 - spu->dsisr = dsisr; 229 + spu->class_1_dar = ea; 230 + spu->class_1_dsisr = dsisr; 236 231 237 - spu->stop_callback(spu); 232 + spu->stop_callback(spu, 1); 233 + 234 + spu->class_1_dar = 0; 235 + spu->class_1_dsisr = 0; 238 236 239 237 return 0; 240 238 } ··· 324 318 stat = spu_int_stat_get(spu, 0) & mask; 325 319 326 320 spu->class_0_pending |= stat; 327 - spu->dsisr = spu_mfc_dsisr_get(spu); 328 - spu->dar = spu_mfc_dar_get(spu); 321 + spu->class_0_dsisr = spu_mfc_dsisr_get(spu); 322 + spu->class_0_dar = spu_mfc_dar_get(spu); 329 323 spin_unlock(&spu->register_lock); 330 324 331 - spu->stop_callback(spu); 325 + spu->stop_callback(spu, 0); 326 + 327 + spu->class_0_pending = 0; 328 + spu->class_0_dsisr = 0; 329 + spu->class_0_dar = 0; 332 330 333 331 spu_int_stat_clear(spu, 0, stat); 334 332 ··· 373 363 if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR) 374 364 ; 375 365 366 + spu->class_1_dsisr = 0; 367 + spu->class_1_dar = 0; 368 + 376 369 return stat ? IRQ_HANDLED : IRQ_NONE; 377 370 } 378 371 ··· 409 396 spu->ibox_callback(spu); 410 397 411 398 if (stat & CLASS2_SPU_STOP_INTR) 412 - spu->stop_callback(spu); 399 + spu->stop_callback(spu, 2); 413 400 414 401 if (stat & CLASS2_SPU_HALT_INTR) 415 - spu->stop_callback(spu); 402 + spu->stop_callback(spu, 2); 416 403 417 404 if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR) 418 405 spu->mfc_callback(spu);
+14 -2
arch/powerpc/platforms/cell/spu_priv1_mmio.c
··· 28 28 #include <linux/io.h> 29 29 #include <linux/mutex.h> 30 30 #include <linux/device.h> 31 + #include <linux/sched.h> 31 32 32 33 #include <asm/spu.h> 33 34 #include <asm/spu_priv1.h> ··· 76 75 77 76 static void cpu_affinity_set(struct spu *spu, int cpu) 78 77 { 79 - u64 target = iic_get_target_id(cpu); 80 - u64 route = target << 48 | target << 32 | target << 16; 78 + u64 target; 79 + u64 route; 80 + 81 + if (nr_cpus_node(spu->node)) { 82 + cpumask_t spumask = node_to_cpumask(spu->node); 83 + cpumask_t cpumask = node_to_cpumask(cpu_to_node(cpu)); 84 + 85 + if (!cpus_intersects(spumask, cpumask)) 86 + return; 87 + } 88 + 89 + target = iic_get_target_id(cpu); 90 + route = target << 48 | target << 32 | target << 16; 81 91 out_be64(&spu->priv1->int_route_RW, route); 82 92 } 83 93
+11 -6
arch/powerpc/platforms/cell/spufs/fault.c
··· 83 83 return 0; 84 84 85 85 if (stat & CLASS0_DMA_ALIGNMENT_INTR) 86 - spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_DMA_ALIGNMENT); 86 + spufs_handle_event(ctx, ctx->csa.class_0_dar, 87 + SPE_EVENT_DMA_ALIGNMENT); 87 88 88 89 if (stat & CLASS0_INVALID_DMA_COMMAND_INTR) 89 - spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_INVALID_DMA); 90 + spufs_handle_event(ctx, ctx->csa.class_0_dar, 91 + SPE_EVENT_INVALID_DMA); 90 92 91 93 if (stat & CLASS0_SPU_ERROR_INTR) 92 - spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_SPE_ERROR); 94 + spufs_handle_event(ctx, ctx->csa.class_0_dar, 95 + SPE_EVENT_SPE_ERROR); 96 + 97 + ctx->csa.class_0_pending = 0; 93 98 94 99 return -EIO; 95 100 } ··· 124 119 * in time, we can still expect to get the same fault 125 120 * the immediately after the context restore. 126 121 */ 127 - ea = ctx->csa.dar; 128 - dsisr = ctx->csa.dsisr; 122 + ea = ctx->csa.class_1_dar; 123 + dsisr = ctx->csa.class_1_dsisr; 129 124 130 125 if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))) 131 126 return 0; ··· 163 158 * time slicing will not preempt the context while the page fault 164 159 * handler is running. Context switch code removes mappings. 165 160 */ 166 - ctx->csa.dar = ctx->csa.dsisr = 0; 161 + ctx->csa.class_1_dar = ctx->csa.class_1_dsisr = 0; 167 162 168 163 /* 169 164 * If we handled the fault successfully and are in runnable
+7 -3
arch/powerpc/platforms/cell/spufs/inode.c
··· 23 23 24 24 #include <linux/file.h> 25 25 #include <linux/fs.h> 26 + #include <linux/fsnotify.h> 26 27 #include <linux/backing-dev.h> 27 28 #include <linux/init.h> 28 29 #include <linux/ioctl.h> ··· 224 223 parent = dir->d_parent->d_inode; 225 224 ctx = SPUFS_I(dir->d_inode)->i_ctx; 226 225 227 - mutex_lock(&parent->i_mutex); 226 + mutex_lock_nested(&parent->i_mutex, I_MUTEX_PARENT); 228 227 ret = spufs_rmdir(parent, dir); 229 228 mutex_unlock(&parent->i_mutex); 230 229 WARN_ON(ret); ··· 619 618 mode &= ~current->fs->umask; 620 619 621 620 if (flags & SPU_CREATE_GANG) 622 - return spufs_create_gang(nd->path.dentry->d_inode, 621 + ret = spufs_create_gang(nd->path.dentry->d_inode, 623 622 dentry, nd->path.mnt, mode); 624 623 else 625 - return spufs_create_context(nd->path.dentry->d_inode, 624 + ret = spufs_create_context(nd->path.dentry->d_inode, 626 625 dentry, nd->path.mnt, flags, mode, 627 626 filp); 627 + if (ret >= 0) 628 + fsnotify_mkdir(nd->path.dentry->d_inode, dentry); 629 + return ret; 628 630 629 631 out_dput: 630 632 dput(dentry);
+23 -15
arch/powerpc/platforms/cell/spufs/run.c
··· 11 11 #include "spufs.h" 12 12 13 13 /* interrupt-level stop callback function. */ 14 - void spufs_stop_callback(struct spu *spu) 14 + void spufs_stop_callback(struct spu *spu, int irq) 15 15 { 16 16 struct spu_context *ctx = spu->ctx; 17 17 ··· 24 24 */ 25 25 if (ctx) { 26 26 /* Copy exception arguments into module specific structure */ 27 - ctx->csa.class_0_pending = spu->class_0_pending; 28 - ctx->csa.dsisr = spu->dsisr; 29 - ctx->csa.dar = spu->dar; 27 + switch(irq) { 28 + case 0 : 29 + ctx->csa.class_0_pending = spu->class_0_pending; 30 + ctx->csa.class_0_dsisr = spu->class_0_dsisr; 31 + ctx->csa.class_0_dar = spu->class_0_dar; 32 + break; 33 + case 1 : 34 + ctx->csa.class_1_dsisr = spu->class_1_dsisr; 35 + ctx->csa.class_1_dar = spu->class_1_dar; 36 + break; 37 + case 2 : 38 + break; 39 + } 30 40 31 41 /* ensure that the exception status has hit memory before a 32 42 * thread waiting on the context's stop queue is woken */ ··· 44 34 45 35 wake_up_all(&ctx->stop_wq); 46 36 } 47 - 48 - /* Clear callback arguments from spu structure */ 49 - spu->class_0_pending = 0; 50 - spu->dsisr = 0; 51 - spu->dar = 0; 52 37 } 53 38 54 39 int spu_stopped(struct spu_context *ctx, u32 *stat) ··· 61 56 if (!(*stat & SPU_STATUS_RUNNING) && (*stat & stopped)) 62 57 return 1; 63 58 64 - dsisr = ctx->csa.dsisr; 59 + dsisr = ctx->csa.class_0_dsisr; 60 + if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) 61 + return 1; 62 + 63 + dsisr = ctx->csa.class_1_dsisr; 65 64 if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) 66 65 return 1; 67 66 ··· 303 294 u32 ls_pointer, npc; 304 295 void __iomem *ls; 305 296 long spu_ret; 306 - int ret, ret2; 297 + int ret; 307 298 308 299 /* get syscall block from local store */ 309 300 npc = ctx->ops->npc_read(ctx) & ~3; ··· 325 316 if (spu_ret <= -ERESTARTSYS) { 326 317 ret = spu_handle_restartsys(ctx, &spu_ret, &npc); 327 318 } 328 - ret2 = spu_acquire(ctx); 319 + mutex_lock(&ctx->state_mutex); 329 320 if (ret == -ERESTARTSYS) 330 321 return ret; 331 - if (ret2) 332 - return -EINTR; 333 322 } 334 323 335 324 /* need to re-get the ls, as it may have changed when we released the ··· 350 343 if (mutex_lock_interruptible(&ctx->run_mutex)) 351 344 return -ERESTARTSYS; 352 345 353 - spu_enable_spu(ctx); 354 346 ctx->event_return = 0; 355 347 356 348 ret = spu_acquire(ctx); 357 349 if (ret) 358 350 goto out_unlock; 351 + 352 + spu_enable_spu(ctx); 359 353 360 354 spu_update_sched_info(ctx); 361 355
+5 -2
arch/powerpc/platforms/cell/spufs/sched.c
··· 140 140 * if it is timesliced or preempted. 141 141 */ 142 142 ctx->cpus_allowed = current->cpus_allowed; 143 + 144 + /* Save the current cpu id for spu interrupt routing. */ 145 + ctx->last_ran = raw_smp_processor_id(); 143 146 } 144 147 145 148 void spu_update_sched_info(struct spu_context *ctx) ··· 246 243 spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0); 247 244 spu_restore(&ctx->csa, spu); 248 245 spu->timestamp = jiffies; 249 - spu_cpu_affinity_set(spu, raw_smp_processor_id()); 250 246 spu_switch_notify(spu, ctx); 251 247 ctx->state = SPU_STATE_RUNNABLE; 252 248 ··· 659 657 660 658 victim->stats.invol_ctx_switch++; 661 659 spu->stats.invol_ctx_switch++; 662 - spu_add_to_rq(victim); 660 + if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) 661 + spu_add_to_rq(victim); 663 662 664 663 mutex_unlock(&victim->state_mutex); 665 664
+2 -1
arch/powerpc/platforms/cell/spufs/spufs.h
··· 121 121 cpumask_t cpus_allowed; 122 122 int policy; 123 123 int prio; 124 + int last_ran; 124 125 125 126 /* statistics */ 126 127 struct { ··· 332 331 /* irq callback funcs. */ 333 332 void spufs_ibox_callback(struct spu *spu); 334 333 void spufs_wbox_callback(struct spu *spu); 335 - void spufs_stop_callback(struct spu *spu); 334 + void spufs_stop_callback(struct spu *spu, int irq); 336 335 void spufs_mfc_callback(struct spu *spu); 337 336 void spufs_dma_callback(struct spu *spu, int type); 338 337
+49 -22
arch/powerpc/platforms/cell/spufs/switch.c
··· 132 132 spu_int_mask_set(spu, 2, 0ul); 133 133 eieio(); 134 134 spin_unlock_irq(&spu->register_lock); 135 + 136 + /* 137 + * This flag needs to be set before calling synchronize_irq so 138 + * that the update will be visible to the relevant handlers 139 + * via a simple load. 140 + */ 141 + set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags); 142 + clear_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags); 135 143 synchronize_irq(spu->irqs[0]); 136 144 synchronize_irq(spu->irqs[1]); 137 145 synchronize_irq(spu->irqs[2]); ··· 174 166 /* Save, Step 7: 175 167 * Restore, Step 5: 176 168 * Set a software context switch pending flag. 169 + * Done above in Step 3 - disable_interrupts(). 177 170 */ 178 - set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags); 179 - mb(); 180 171 } 181 172 182 173 static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu) ··· 193 186 MFC_CNTL_SUSPEND_COMPLETE); 194 187 /* fall through */ 195 188 case MFC_CNTL_SUSPEND_COMPLETE: 196 - if (csa) { 189 + if (csa) 197 190 csa->priv2.mfc_control_RW = 198 - MFC_CNTL_SUSPEND_MASK | 191 + in_be64(&priv2->mfc_control_RW) | 199 192 MFC_CNTL_SUSPEND_DMA_QUEUE; 200 - } 201 193 break; 202 194 case MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION: 203 195 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE); 204 196 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) & 205 197 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) == 206 198 MFC_CNTL_SUSPEND_COMPLETE); 207 - if (csa) { 208 - csa->priv2.mfc_control_RW = 0; 209 - } 199 + if (csa) 200 + csa->priv2.mfc_control_RW = 201 + in_be64(&priv2->mfc_control_RW) & 202 + ~MFC_CNTL_SUSPEND_DMA_QUEUE & 203 + ~MFC_CNTL_SUSPEND_MASK; 210 204 break; 211 205 } 212 206 } ··· 257 249 } 258 250 } 259 251 260 - static inline void save_mfc_decr(struct spu_state *csa, struct spu *spu) 252 + static inline void save_mfc_stopped_status(struct spu_state *csa, 253 + struct spu *spu) 261 254 { 262 255 struct spu_priv2 __iomem *priv2 = spu->priv2; 256 + const u64 mask = MFC_CNTL_DECREMENTER_RUNNING | 257 + MFC_CNTL_DMA_QUEUES_EMPTY; 263 258 264 259 /* Save, Step 12: 265 260 * Read MFC_CNTL[Ds]. Update saved copy of 266 261 * CSA.MFC_CNTL[Ds]. 262 + * 263 + * update: do the same with MFC_CNTL[Q]. 267 264 */ 268 - csa->priv2.mfc_control_RW |= 269 - in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DECREMENTER_RUNNING; 265 + csa->priv2.mfc_control_RW &= ~mask; 266 + csa->priv2.mfc_control_RW |= in_be64(&priv2->mfc_control_RW) & mask; 270 267 } 271 268 272 269 static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu) ··· 475 462 * Restore, Step 14. 476 463 * Write MFC_CNTL[Pc]=1 (purge queue). 477 464 */ 478 - out_be64(&priv2->mfc_control_RW, MFC_CNTL_PURGE_DMA_REQUEST); 465 + out_be64(&priv2->mfc_control_RW, 466 + MFC_CNTL_PURGE_DMA_REQUEST | 467 + MFC_CNTL_SUSPEND_MASK); 479 468 eieio(); 480 469 } 481 470 ··· 740 725 /* Save, Step 48: 741 726 * Restore, Step 23. 742 727 * Change the software context switch pending flag 743 - * to context switch active. 728 + * to context switch active. This implementation does 729 + * not uses a switch active flag. 744 730 * 745 - * This implementation does not uses a switch active flag. 731 + * Now that we have saved the mfc in the csa, we can add in the 732 + * restart command if an exception occurred. 746 733 */ 734 + if (test_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags)) 735 + csa->priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND; 747 736 clear_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags); 748 737 mb(); 749 738 } ··· 1709 1690 eieio(); 1710 1691 } 1711 1692 1693 + static inline void set_int_route(struct spu_state *csa, struct spu *spu) 1694 + { 1695 + struct spu_context *ctx = spu->ctx; 1696 + 1697 + spu_cpu_affinity_set(spu, ctx->last_ran); 1698 + } 1699 + 1712 1700 static inline void restore_other_spu_access(struct spu_state *csa, 1713 1701 struct spu *spu) 1714 1702 { ··· 1747 1721 */ 1748 1722 out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW); 1749 1723 eieio(); 1724 + 1750 1725 /* 1751 - * FIXME: this is to restart a DMA that we were processing 1752 - * before the save. better remember the fault information 1753 - * in the csa instead. 1726 + * The queue is put back into the same state that was evident prior to 1727 + * the context switch. The suspend flag is added to the saved state in 1728 + * the csa, if the operational state was suspending or suspended. In 1729 + * this case, the code that suspended the mfc is responsible for 1730 + * continuing it. Note that SPE faults do not change the operational 1731 + * state of the spu. 1754 1732 */ 1755 - if ((csa->priv2.mfc_control_RW & MFC_CNTL_SUSPEND_DMA_QUEUE_MASK)) { 1756 - out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND); 1757 - eieio(); 1758 - } 1759 1733 } 1760 1734 1761 1735 static inline void enable_user_access(struct spu_state *csa, struct spu *spu) ··· 1814 1788 save_spu_runcntl(prev, spu); /* Step 9. */ 1815 1789 save_mfc_sr1(prev, spu); /* Step 10. */ 1816 1790 save_spu_status(prev, spu); /* Step 11. */ 1817 - save_mfc_decr(prev, spu); /* Step 12. */ 1791 + save_mfc_stopped_status(prev, spu); /* Step 12. */ 1818 1792 halt_mfc_decr(prev, spu); /* Step 13. */ 1819 1793 save_timebase(prev, spu); /* Step 14. */ 1820 1794 remove_other_spu_access(prev, spu); /* Step 15. */ ··· 2026 2000 check_ppuint_mb_stat(next, spu); /* Step 67. */ 2027 2001 spu_invalidate_slbs(spu); /* Modified Step 68. */ 2028 2002 restore_mfc_sr1(next, spu); /* Step 69. */ 2003 + set_int_route(next, spu); /* NEW */ 2029 2004 restore_other_spu_access(next, spu); /* Step 70. */ 2030 2005 restore_spu_runcntl(next, spu); /* Step 71. */ 2031 2006 restore_mfc_cntl(next, spu); /* Step 72. */
+131 -49
arch/powerpc/sysdev/ppc4xx_pci.c
··· 1387 1387 resource_size_t size = res->end - res->start + 1; 1388 1388 u64 sa; 1389 1389 1390 - /* Calculate window size */ 1391 - sa = (0xffffffffffffffffull << ilog2(size));; 1392 - if (res->flags & IORESOURCE_PREFETCH) 1393 - sa |= 0x8; 1390 + if (port->endpoint) { 1391 + resource_size_t ep_addr = 0; 1392 + resource_size_t ep_size = 32 << 20; 1394 1393 1395 - out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa)); 1396 - out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa)); 1394 + /* Currently we map a fixed 64MByte window to PLB address 1395 + * 0 (SDRAM). This should probably be configurable via a dts 1396 + * property. 1397 + */ 1397 1398 1398 - /* The setup of the split looks weird to me ... let's see if it works */ 1399 - out_le32(mbase + PECFG_PIM0LAL, 0x00000000); 1400 - out_le32(mbase + PECFG_PIM0LAH, 0x00000000); 1401 - out_le32(mbase + PECFG_PIM1LAL, 0x00000000); 1402 - out_le32(mbase + PECFG_PIM1LAH, 0x00000000); 1403 - out_le32(mbase + PECFG_PIM01SAH, 0xffff0000); 1404 - out_le32(mbase + PECFG_PIM01SAL, 0x00000000); 1399 + /* Calculate window size */ 1400 + sa = (0xffffffffffffffffull << ilog2(ep_size));; 1401 + 1402 + /* Setup BAR0 */ 1403 + out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa)); 1404 + out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa) | 1405 + PCI_BASE_ADDRESS_MEM_TYPE_64); 1406 + 1407 + /* Disable BAR1 & BAR2 */ 1408 + out_le32(mbase + PECFG_BAR1MPA, 0); 1409 + out_le32(mbase + PECFG_BAR2HMPA, 0); 1410 + out_le32(mbase + PECFG_BAR2LMPA, 0); 1411 + 1412 + out_le32(mbase + PECFG_PIM01SAH, RES_TO_U32_HIGH(sa)); 1413 + out_le32(mbase + PECFG_PIM01SAL, RES_TO_U32_LOW(sa)); 1414 + 1415 + out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(ep_addr)); 1416 + out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(ep_addr)); 1417 + } else { 1418 + /* Calculate window size */ 1419 + sa = (0xffffffffffffffffull << ilog2(size));; 1420 + if (res->flags & IORESOURCE_PREFETCH) 1421 + sa |= 0x8; 1422 + 1423 + out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa)); 1424 + out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa)); 1425 + 1426 + /* The setup of the split looks weird to me ... let's see 1427 + * if it works 1428 + */ 1429 + out_le32(mbase + PECFG_PIM0LAL, 0x00000000); 1430 + out_le32(mbase + PECFG_PIM0LAH, 0x00000000); 1431 + out_le32(mbase + PECFG_PIM1LAL, 0x00000000); 1432 + out_le32(mbase + PECFG_PIM1LAH, 0x00000000); 1433 + out_le32(mbase + PECFG_PIM01SAH, 0xffff0000); 1434 + out_le32(mbase + PECFG_PIM01SAL, 0x00000000); 1435 + 1436 + out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(res->start)); 1437 + out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(res->start)); 1438 + } 1405 1439 1406 1440 /* Enable inbound mapping */ 1407 1441 out_le32(mbase + PECFG_PIMEN, 0x1); 1408 - 1409 - out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(res->start)); 1410 - out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(res->start)); 1411 1442 1412 1443 /* Enable I/O, Mem, and Busmaster cycles */ 1413 1444 out_le16(mbase + PCI_COMMAND, ··· 1453 1422 const int *bus_range; 1454 1423 int primary = 0, busses; 1455 1424 void __iomem *mbase = NULL, *cfg_data = NULL; 1456 - 1457 - /* XXX FIXME: Handle endpoint mode properly */ 1458 - if (port->endpoint) { 1459 - printk(KERN_WARNING "PCIE%d: Port in endpoint mode !\n", 1460 - port->index); 1461 - return; 1462 - } 1425 + const u32 *pval; 1426 + u32 val; 1463 1427 1464 1428 /* Check if primary bridge */ 1465 1429 if (of_get_property(port->node, "primary", NULL)) ··· 1488 1462 hose->last_busno = hose->first_busno + busses; 1489 1463 } 1490 1464 1491 - /* We map the external config space in cfg_data and the host config 1492 - * space in cfg_addr. External space is 1M per bus, internal space 1493 - * is 4K 1465 + if (!port->endpoint) { 1466 + /* Only map the external config space in cfg_data for 1467 + * PCIe root-complexes. External space is 1M per bus 1468 + */ 1469 + cfg_data = ioremap(port->cfg_space.start + 1470 + (hose->first_busno + 1) * 0x100000, 1471 + busses * 0x100000); 1472 + if (cfg_data == NULL) { 1473 + printk(KERN_ERR "%s: Can't map external config space !", 1474 + port->node->full_name); 1475 + goto fail; 1476 + } 1477 + hose->cfg_data = cfg_data; 1478 + } 1479 + 1480 + /* Always map the host config space in cfg_addr. 1481 + * Internal space is 4K 1494 1482 */ 1495 - cfg_data = ioremap(port->cfg_space.start + 1496 - (hose->first_busno + 1) * 0x100000, 1497 - busses * 0x100000); 1498 1483 mbase = ioremap(port->cfg_space.start + 0x10000000, 0x1000); 1499 - if (cfg_data == NULL || mbase == NULL) { 1500 - printk(KERN_ERR "%s: Can't map config space !", 1484 + if (mbase == NULL) { 1485 + printk(KERN_ERR "%s: Can't map internal config space !", 1501 1486 port->node->full_name); 1502 1487 goto fail; 1503 1488 } 1504 - 1505 - hose->cfg_data = cfg_data; 1506 1489 hose->cfg_addr = mbase; 1507 1490 1508 1491 pr_debug("PCIE %s, bus %d..%d\n", port->node->full_name, ··· 1524 1489 port->hose = hose; 1525 1490 mbase = (void __iomem *)hose->cfg_addr; 1526 1491 1527 - /* 1528 - * Set bus numbers on our root port 1529 - */ 1530 - out_8(mbase + PCI_PRIMARY_BUS, hose->first_busno); 1531 - out_8(mbase + PCI_SECONDARY_BUS, hose->first_busno + 1); 1532 - out_8(mbase + PCI_SUBORDINATE_BUS, hose->last_busno); 1492 + if (!port->endpoint) { 1493 + /* 1494 + * Set bus numbers on our root port 1495 + */ 1496 + out_8(mbase + PCI_PRIMARY_BUS, hose->first_busno); 1497 + out_8(mbase + PCI_SECONDARY_BUS, hose->first_busno + 1); 1498 + out_8(mbase + PCI_SUBORDINATE_BUS, hose->last_busno); 1499 + } 1533 1500 1534 1501 /* 1535 1502 * OMRs are already reset, also disable PIMs ··· 1552 1515 ppc4xx_configure_pciex_PIMs(port, hose, mbase, &dma_window); 1553 1516 1554 1517 /* The root complex doesn't show up if we don't set some vendor 1555 - * and device IDs into it. Those are the same bogus one that the 1556 - * initial code in arch/ppc add. We might want to change that. 1518 + * and device IDs into it. The defaults below are the same bogus 1519 + * one that the initial code in arch/ppc had. This can be 1520 + * overwritten by setting the "vendor-id/device-id" properties 1521 + * in the pciex node. 1557 1522 */ 1558 - out_le16(mbase + 0x200, 0xaaa0 + port->index); 1559 - out_le16(mbase + 0x202, 0xbed0 + port->index); 1560 1523 1561 - /* Set Class Code to PCI-PCI bridge and Revision Id to 1 */ 1562 - out_le32(mbase + 0x208, 0x06040001); 1524 + /* Get the (optional) vendor-/device-id from the device-tree */ 1525 + pval = of_get_property(port->node, "vendor-id", NULL); 1526 + if (pval) { 1527 + val = *pval; 1528 + } else { 1529 + if (!port->endpoint) 1530 + val = 0xaaa0 + port->index; 1531 + else 1532 + val = 0xeee0 + port->index; 1533 + } 1534 + out_le16(mbase + 0x200, val); 1563 1535 1564 - printk(KERN_INFO "PCIE%d: successfully set as root-complex\n", 1565 - port->index); 1536 + pval = of_get_property(port->node, "device-id", NULL); 1537 + if (pval) { 1538 + val = *pval; 1539 + } else { 1540 + if (!port->endpoint) 1541 + val = 0xbed0 + port->index; 1542 + else 1543 + val = 0xfed0 + port->index; 1544 + } 1545 + out_le16(mbase + 0x202, val); 1546 + 1547 + if (!port->endpoint) { 1548 + /* Set Class Code to PCI-PCI bridge and Revision Id to 1 */ 1549 + out_le32(mbase + 0x208, 0x06040001); 1550 + 1551 + printk(KERN_INFO "PCIE%d: successfully set as root-complex\n", 1552 + port->index); 1553 + } else { 1554 + /* Set Class Code to Processor/PPC */ 1555 + out_le32(mbase + 0x208, 0x0b200001); 1556 + 1557 + printk(KERN_INFO "PCIE%d: successfully set as endpoint\n", 1558 + port->index); 1559 + } 1560 + 1566 1561 return; 1567 1562 fail: 1568 1563 if (hose) ··· 1611 1542 const u32 *pval; 1612 1543 int portno; 1613 1544 unsigned int dcrs; 1545 + const char *val; 1614 1546 1615 1547 /* First, proceed to core initialization as we assume there's 1616 1548 * only one PCIe core in the system ··· 1643 1573 } 1644 1574 port->sdr_base = *pval; 1645 1575 1646 - /* XXX Currently, we only support root complex mode */ 1647 - port->endpoint = 0; 1576 + /* Check if device_type property is set to "pci" or "pci-endpoint". 1577 + * Resulting from this setup this PCIe port will be configured 1578 + * as root-complex or as endpoint. 1579 + */ 1580 + val = of_get_property(port->node, "device_type", NULL); 1581 + if (!strcmp(val, "pci-endpoint")) { 1582 + port->endpoint = 1; 1583 + } else if (!strcmp(val, "pci")) { 1584 + port->endpoint = 0; 1585 + } else { 1586 + printk(KERN_ERR "PCIE: missing or incorrect device_type for %s\n", 1587 + np->full_name); 1588 + return; 1589 + } 1648 1590 1649 1591 /* Fetch config space registers address */ 1650 1592 if (of_address_to_resource(np, 0, &port->cfg_space)) {
+4 -2
arch/powerpc/xmon/xmon.c
··· 2842 2842 DUMP_FIELD(spu, "0x%lx", ls_size); 2843 2843 DUMP_FIELD(spu, "0x%x", node); 2844 2844 DUMP_FIELD(spu, "0x%lx", flags); 2845 - DUMP_FIELD(spu, "0x%lx", dar); 2846 - DUMP_FIELD(spu, "0x%lx", dsisr); 2847 2845 DUMP_FIELD(spu, "%d", class_0_pending); 2846 + DUMP_FIELD(spu, "0x%lx", class_0_dar); 2847 + DUMP_FIELD(spu, "0x%lx", class_0_dsisr); 2848 + DUMP_FIELD(spu, "0x%lx", class_1_dar); 2849 + DUMP_FIELD(spu, "0x%lx", class_1_dsisr); 2848 2850 DUMP_FIELD(spu, "0x%lx", irqs[0]); 2849 2851 DUMP_FIELD(spu, "0x%lx", irqs[1]); 2850 2852 DUMP_FIELD(spu, "0x%lx", irqs[2]);
+7
include/asm-powerpc/pgtable-ppc32.h
··· 209 209 * 0 1 2 3 4 ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 210 210 * - - - - - - U0 U1 U2 U3 W I M G E - UX UW UR SX SW SR 211 211 * 212 + * Newer 440 cores (440x6 as used on AMCC 460EX/460GT) have additional 213 + * TLB2 storage attibute fields. Those are: 214 + * 215 + * TLB2: 216 + * 0...10 11 12 13 14 15 16...31 217 + * no change WL1 IL1I IL1D IL2I IL2D no change 218 + * 212 219 * There are some constrains and options, to decide mapping software bits 213 220 * into TLB entry. 214 221 *
+6 -3
include/asm-powerpc/spu.h
··· 100 100 101 101 /* Flag indicating progress during context switch. */ 102 102 #define SPU_CONTEXT_SWITCH_PENDING 0UL 103 + #define SPU_CONTEXT_FAULT_PENDING 1UL 103 104 104 105 struct spu_context; 105 106 struct spu_runqueue; ··· 129 128 unsigned int irqs[3]; 130 129 u32 node; 131 130 u64 flags; 132 - u64 dar; 133 - u64 dsisr; 134 131 u64 class_0_pending; 132 + u64 class_0_dar; 133 + u64 class_0_dsisr; 134 + u64 class_1_dar; 135 + u64 class_1_dsisr; 135 136 size_t ls_size; 136 137 unsigned int slb_replace; 137 138 struct mm_struct *mm; ··· 146 143 147 144 void (* wbox_callback)(struct spu *spu); 148 145 void (* ibox_callback)(struct spu *spu); 149 - void (* stop_callback)(struct spu *spu); 146 + void (* stop_callback)(struct spu *spu, int irq); 150 147 void (* mfc_callback)(struct spu *spu); 151 148 152 149 char irq_c0[8];
+2 -1
include/asm-powerpc/spu_csa.h
··· 254 254 u64 spu_chnldata_RW[32]; 255 255 u32 spu_mailbox_data[4]; 256 256 u32 pu_mailbox_data[1]; 257 - u64 dar, dsisr, class_0_pending; 257 + u64 class_0_dar, class_0_dsisr, class_0_pending; 258 + u64 class_1_dar, class_1_dsisr; 258 259 unsigned long suspend_time; 259 260 spinlock_t register_lock; 260 261 };
-4
include/linux/of_i2c.h
··· 14 14 15 15 #include <linux/i2c.h> 16 16 17 - #ifdef CONFIG_OF_I2C 18 - 19 17 void of_register_i2c_devices(struct i2c_adapter *adap, 20 18 struct device_node *adap_node); 21 - 22 - #endif /* CONFIG_OF_I2C */ 23 19 24 20 #endif /* __LINUX_OF_I2C_H */