Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc

* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (25 commits)
powerpc: Disable 64K hugetlb support when doing 64K SPU mappings
powerpc/powermac: Fixup default serial port device for pmac_zilog
powerpc/powermac: Use sane default baudrate for SCC debugging
powerpc/mm: Implement _PAGE_SPECIAL & pte_special() for 64-bit
powerpc: Show processor cache information in sysfs
powerpc: Make core id information available to userspace
powerpc: Make core sibling information available to userspace
powerpc/vio: More fallout from dma_mapping_error API change
ibmveth: Fix multiple errors with dma_mapping_error conversion
powerpc/pseries: Fix CMO sysdev attribute API change fallout
powerpc: Enable tracehook for the architecture
powerpc: Add TIF_NOTIFY_RESUME support for tracehook
powerpc: Add asm/syscall.h with the tracehook entry points
powerpc: Make syscall tracing use tracehook.h helpers
powerpc: Call tracehook_signal_handler() when setting up signal frames
powerpc: Update cpu_sibling_maps dynamically
powerpc: register_cpu_online should be __cpuinit
powerpc: kill useless SMT code in prom_hold_cpus
powerpc: Fix 8xx build failure
powerpc: Fix vio build warnings
...

+1040 -261
+1
arch/powerpc/Kconfig
··· 117 select HAVE_KPROBES 118 select HAVE_ARCH_KGDB 119 select HAVE_KRETPROBES 120 select HAVE_LMB 121 select HAVE_DMA_ATTRS if PPC64 122 select USE_GENERIC_SMP_HELPERS if SMP
··· 117 select HAVE_KPROBES 118 select HAVE_ARCH_KGDB 119 select HAVE_KRETPROBES 120 + select HAVE_ARCH_TRACEHOOK 121 select HAVE_LMB 122 select HAVE_DMA_ATTRS if PPC64 123 select USE_GENERIC_SMP_HELPERS if SMP
+11 -6
arch/powerpc/kernel/entry_32.S
··· 148 /* Check to see if the dbcr0 register is set up to debug. Use the 149 internal debug mode bit to do this. */ 150 lwz r12,THREAD_DBCR0(r12) 151 - andis. r12,r12,(DBCR0_IDM | DBSR_DAC1R | DBSR_DAC1W)@h 152 beq+ 3f 153 /* From user and task is ptraced - load up global dbcr0 */ 154 li r12,-1 /* clear all pending debug events */ ··· 292 /* If the process has its own DBCR0 value, load it up. The internal 293 debug mode bit tells us that dbcr0 should be loaded. */ 294 lwz r0,THREAD+THREAD_DBCR0(r2) 295 - andis. r10,r0,(DBCR0_IDM | DBSR_DAC1R | DBSR_DAC1W)@h 296 bnel- load_dbcr0 297 #endif 298 #ifdef CONFIG_44x ··· 343 stw r0,_TRAP(r1) 344 addi r3,r1,STACK_FRAME_OVERHEAD 345 bl do_syscall_trace_enter 346 - lwz r0,GPR0(r1) /* Restore original registers */ 347 lwz r3,GPR3(r1) 348 lwz r4,GPR4(r1) 349 lwz r5,GPR5(r1) ··· 725 /* Check whether this process has its own DBCR0 value. The internal 726 debug mode bit tells us that dbcr0 should be loaded. */ 727 lwz r0,THREAD+THREAD_DBCR0(r2) 728 - andis. r10,r0,(DBCR0_IDM | DBSR_DAC1R | DBSR_DAC1W)@h 729 bnel- load_dbcr0 730 #endif 731 ··· 1060 SAVE_NVGPRS(r1) 1061 rlwinm r3,r3,0,0,30 1062 stw r3,_TRAP(r1) 1063 - 2: li r3,0 1064 - addi r4,r1,STACK_FRAME_OVERHEAD 1065 bl do_signal 1066 REST_NVGPRS(r1) 1067 b recheck
··· 148 /* Check to see if the dbcr0 register is set up to debug. Use the 149 internal debug mode bit to do this. */ 150 lwz r12,THREAD_DBCR0(r12) 151 + andis. r12,r12,DBCR0_IDM@h 152 beq+ 3f 153 /* From user and task is ptraced - load up global dbcr0 */ 154 li r12,-1 /* clear all pending debug events */ ··· 292 /* If the process has its own DBCR0 value, load it up. The internal 293 debug mode bit tells us that dbcr0 should be loaded. */ 294 lwz r0,THREAD+THREAD_DBCR0(r2) 295 + andis. r10,r0,DBCR0_IDM@h 296 bnel- load_dbcr0 297 #endif 298 #ifdef CONFIG_44x ··· 343 stw r0,_TRAP(r1) 344 addi r3,r1,STACK_FRAME_OVERHEAD 345 bl do_syscall_trace_enter 346 + /* 347 + * Restore argument registers possibly just changed. 348 + * We use the return value of do_syscall_trace_enter 349 + * for call number to look up in the table (r0). 350 + */ 351 + mr r0,r3 352 lwz r3,GPR3(r1) 353 lwz r4,GPR4(r1) 354 lwz r5,GPR5(r1) ··· 720 /* Check whether this process has its own DBCR0 value. The internal 721 debug mode bit tells us that dbcr0 should be loaded. */ 722 lwz r0,THREAD+THREAD_DBCR0(r2) 723 + andis. r10,r0,DBCR0_IDM@h 724 bnel- load_dbcr0 725 #endif 726 ··· 1055 SAVE_NVGPRS(r1) 1056 rlwinm r3,r3,0,0,30 1057 stw r3,_TRAP(r1) 1058 + 2: addi r3,r1,STACK_FRAME_OVERHEAD 1059 + mr r4,r9 1060 bl do_signal 1061 REST_NVGPRS(r1) 1062 b recheck
+7 -3
arch/powerpc/kernel/entry_64.S
··· 214 bl .save_nvgprs 215 addi r3,r1,STACK_FRAME_OVERHEAD 216 bl .do_syscall_trace_enter 217 - ld r0,GPR0(r1) /* Restore original registers */ 218 ld r3,GPR3(r1) 219 ld r4,GPR4(r1) 220 ld r5,GPR5(r1) ··· 643 b .ret_from_except_lite 644 645 1: bl .save_nvgprs 646 - li r3,0 647 - addi r4,r1,STACK_FRAME_OVERHEAD 648 bl .do_signal 649 b .ret_from_except 650
··· 214 bl .save_nvgprs 215 addi r3,r1,STACK_FRAME_OVERHEAD 216 bl .do_syscall_trace_enter 217 + /* 218 + * Restore argument registers possibly just changed. 219 + * We use the return value of do_syscall_trace_enter 220 + * for the call number to look up in the table (r0). 221 + */ 222 + mr r0,r3 223 ld r3,GPR3(r1) 224 ld r4,GPR4(r1) 225 ld r5,GPR5(r1) ··· 638 b .ret_from_except_lite 639 640 1: bl .save_nvgprs 641 + addi r3,r1,STACK_FRAME_OVERHEAD 642 bl .do_signal 643 b .ret_from_except 644
+18 -28
arch/powerpc/kernel/legacy_serial.c
··· 493 device_initcall(serial_dev_init); 494 495 496 /* 497 * This is called very early, as part of console_init() (typically just after 498 * time_init()). This function is respondible for trying to find a good 499 * default console on serial ports. It tries to match the open firmware 500 - * default output with one of the available serial console drivers, either 501 - * one of the platform serial ports that have been probed earlier by 502 - * find_legacy_serial_ports() or some more platform specific ones. 503 */ 504 static int __init check_legacy_serial_console(void) 505 { 506 struct device_node *prom_stdout = NULL; 507 - int speed = 0, offset = 0; 508 const char *name; 509 const u32 *spd; 510 ··· 548 if (spd) 549 speed = *spd; 550 551 - if (0) 552 - ; 553 - #ifdef CONFIG_SERIAL_8250_CONSOLE 554 - else if (strcmp(name, "serial") == 0) { 555 - int i; 556 - /* Look for it in probed array */ 557 - for (i = 0; i < legacy_serial_count; i++) { 558 - if (prom_stdout != legacy_serial_infos[i].np) 559 - continue; 560 - offset = i; 561 - speed = legacy_serial_infos[i].speed; 562 - break; 563 - } 564 - if (i >= legacy_serial_count) 565 - goto not_found; 566 - } 567 - #endif /* CONFIG_SERIAL_8250_CONSOLE */ 568 - #ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE 569 - else if (strcmp(name, "ch-a") == 0) 570 - offset = 0; 571 - else if (strcmp(name, "ch-b") == 0) 572 - offset = 1; 573 - #endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */ 574 - else 575 goto not_found; 576 of_node_put(prom_stdout); 577 578 DBG("Found serial console at ttyS%d\n", offset); ··· 580 } 581 console_initcall(check_legacy_serial_console); 582
··· 493 device_initcall(serial_dev_init); 494 495 496 + #ifdef CONFIG_SERIAL_8250_CONSOLE 497 /* 498 * This is called very early, as part of console_init() (typically just after 499 * time_init()). This function is respondible for trying to find a good 500 * default console on serial ports. It tries to match the open firmware 501 + * default output with one of the available serial console drivers that have 502 + * been probed earlier by find_legacy_serial_ports() 503 */ 504 static int __init check_legacy_serial_console(void) 505 { 506 struct device_node *prom_stdout = NULL; 507 + int i, speed = 0, offset = 0; 508 const char *name; 509 const u32 *spd; 510 ··· 548 if (spd) 549 speed = *spd; 550 551 + if (strcmp(name, "serial") != 0) 552 goto not_found; 553 + 554 + /* Look for it in probed array */ 555 + for (i = 0; i < legacy_serial_count; i++) { 556 + if (prom_stdout != legacy_serial_infos[i].np) 557 + continue; 558 + offset = i; 559 + speed = legacy_serial_infos[i].speed; 560 + break; 561 + } 562 + if (i >= legacy_serial_count) 563 + goto not_found; 564 + 565 of_node_put(prom_stdout); 566 567 DBG("Found serial console at ttyS%d\n", offset); ··· 591 } 592 console_initcall(check_legacy_serial_console); 593 594 + #endif /* CONFIG_SERIAL_8250_CONSOLE */
+4 -4
arch/powerpc/kernel/process.c
··· 254 return; 255 256 /* Clear the DAC and struct entries. One shot trigger */ 257 - #if (defined(CONFIG_44x) || defined(CONFIG_BOOKE)) 258 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~(DBSR_DAC1R | DBSR_DAC1W 259 | DBCR0_IDM)); 260 #endif ··· 286 mtspr(SPRN_DABR, dabr); 287 #endif 288 289 - #if defined(CONFIG_44x) || defined(CONFIG_BOOKE) 290 mtspr(SPRN_DAC1, dabr); 291 #endif 292 ··· 373 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) 374 set_dabr(new->thread.dabr); 375 376 - #if defined(CONFIG_44x) || defined(CONFIG_BOOKE) 377 /* If new thread DAC (HW breakpoint) is the same then leave it */ 378 if (new->thread.dabr) 379 set_dabr(new->thread.dabr); ··· 568 current->thread.dabr = 0; 569 set_dabr(0); 570 571 - #if defined(CONFIG_44x) || defined(CONFIG_BOOKE) 572 current->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W); 573 #endif 574 }
··· 254 return; 255 256 /* Clear the DAC and struct entries. One shot trigger */ 257 + #if defined(CONFIG_BOOKE) 258 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~(DBSR_DAC1R | DBSR_DAC1W 259 | DBCR0_IDM)); 260 #endif ··· 286 mtspr(SPRN_DABR, dabr); 287 #endif 288 289 + #if defined(CONFIG_BOOKE) 290 mtspr(SPRN_DAC1, dabr); 291 #endif 292 ··· 373 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) 374 set_dabr(new->thread.dabr); 375 376 + #if defined(CONFIG_BOOKE) 377 /* If new thread DAC (HW breakpoint) is the same then leave it */ 378 if (new->thread.dabr) 379 set_dabr(new->thread.dabr); ··· 568 current->thread.dabr = 0; 569 set_dabr(0); 570 571 + #if defined(CONFIG_BOOKE) 572 current->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W); 573 #endif 574 }
+3 -36
arch/powerpc/kernel/prom_init.c
··· 205 static cell_t __initdata regbuf[1024]; 206 207 208 - #define MAX_CPU_THREADS 2 209 - 210 /* 211 * Error results ... some OF calls will return "-1" on error, some 212 * will return 0, some will return either. To simplify, here are ··· 1337 unsigned int reg; 1338 phandle node; 1339 char type[64]; 1340 - int cpuid = 0; 1341 - unsigned int interrupt_server[MAX_CPU_THREADS]; 1342 - unsigned int cpu_threads, hw_cpu_num; 1343 - int propsize; 1344 struct prom_t *_prom = &RELOC(prom); 1345 unsigned long *spinloop 1346 = (void *) LOW_ADDR(__secondary_hold_spinloop); ··· 1380 reg = -1; 1381 prom_getprop(node, "reg", &reg, sizeof(reg)); 1382 1383 - prom_debug("\ncpuid = 0x%x\n", cpuid); 1384 prom_debug("cpu hw idx = 0x%x\n", reg); 1385 1386 /* Init the acknowledge var which will be reset by ··· 1388 */ 1389 *acknowledge = (unsigned long)-1; 1390 1391 - propsize = prom_getprop(node, "ibm,ppc-interrupt-server#s", 1392 - &interrupt_server, 1393 - sizeof(interrupt_server)); 1394 - if (propsize < 0) { 1395 - /* no property. old hardware has no SMT */ 1396 - cpu_threads = 1; 1397 - interrupt_server[0] = reg; /* fake it with phys id */ 1398 - } else { 1399 - /* We have a threaded processor */ 1400 - cpu_threads = propsize / sizeof(u32); 1401 - if (cpu_threads > MAX_CPU_THREADS) { 1402 - prom_printf("SMT: too many threads!\n" 1403 - "SMT: found %x, max is %x\n", 1404 - cpu_threads, MAX_CPU_THREADS); 1405 - cpu_threads = 1; /* ToDo: panic? */ 1406 - } 1407 - } 1408 - 1409 - hw_cpu_num = interrupt_server[0]; 1410 - if (hw_cpu_num != _prom->cpu) { 1411 /* Primary Thread of non-boot cpu */ 1412 - prom_printf("%x : starting cpu hw idx %x... ", cpuid, reg); 1413 call_prom("start-cpu", 3, 0, node, 1414 secondary_hold, reg); 1415 ··· 1405 } 1406 #ifdef CONFIG_SMP 1407 else 1408 - prom_printf("%x : boot cpu %x\n", cpuid, reg); 1409 #endif /* CONFIG_SMP */ 1410 - 1411 - /* Reserve cpu #s for secondary threads. They start later. */ 1412 - cpuid += cpu_threads; 1413 } 1414 - 1415 - if (cpuid > NR_CPUS) 1416 - prom_printf("WARNING: maximum CPUs (" __stringify(NR_CPUS) 1417 - ") exceeded: ignoring extras\n"); 1418 1419 prom_debug("prom_hold_cpus: end...\n"); 1420 }
··· 205 static cell_t __initdata regbuf[1024]; 206 207 208 /* 209 * Error results ... some OF calls will return "-1" on error, some 210 * will return 0, some will return either. To simplify, here are ··· 1339 unsigned int reg; 1340 phandle node; 1341 char type[64]; 1342 struct prom_t *_prom = &RELOC(prom); 1343 unsigned long *spinloop 1344 = (void *) LOW_ADDR(__secondary_hold_spinloop); ··· 1386 reg = -1; 1387 prom_getprop(node, "reg", &reg, sizeof(reg)); 1388 1389 prom_debug("cpu hw idx = 0x%x\n", reg); 1390 1391 /* Init the acknowledge var which will be reset by ··· 1395 */ 1396 *acknowledge = (unsigned long)-1; 1397 1398 + if (reg != _prom->cpu) { 1399 /* Primary Thread of non-boot cpu */ 1400 + prom_printf("starting cpu hw idx %x... ", reg); 1401 call_prom("start-cpu", 3, 0, node, 1402 secondary_hold, reg); 1403 ··· 1431 } 1432 #ifdef CONFIG_SMP 1433 else 1434 + prom_printf("boot cpu hw idx %x\n", reg); 1435 #endif /* CONFIG_SMP */ 1436 } 1437 1438 prom_debug("prom_hold_cpus: end...\n"); 1439 }
+26 -28
arch/powerpc/kernel/ptrace.c
··· 22 #include <linux/errno.h> 23 #include <linux/ptrace.h> 24 #include <linux/regset.h> 25 #include <linux/elf.h> 26 #include <linux/user.h> 27 #include <linux/security.h> ··· 718 struct pt_regs *regs = task->thread.regs; 719 720 721 - #if defined(CONFIG_44x) || defined(CONFIG_BOOKE) 722 /* If DAC then do not single step, skip */ 723 if (task->thread.dabr) 724 return; ··· 745 if (addr > 0) 746 return -EINVAL; 747 748 if ((data & ~0x7UL) >= TASK_SIZE) 749 return -EIO; 750 751 - #ifdef CONFIG_PPC64 752 753 /* For processors using DABR (i.e. 970), the bottom 3 bits are flags. 754 * It was assumed, on previous implementations, that 3 bits were ··· 771 task->thread.dabr = data; 772 773 #endif 774 - #if defined(CONFIG_44x) || defined(CONFIG_BOOKE) 775 776 /* As described above, it was assumed 3 bits were passed with the data 777 * address, but we will assume only the mode bits will be passed ··· 1015 return ret; 1016 } 1017 1018 - static void do_syscall_trace(void) 1019 { 1020 - /* the 0x80 provides a way for the tracing parent to distinguish 1021 - between a syscall stop and SIGTRAP delivery */ 1022 - ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) 1023 - ? 0x80 : 0)); 1024 1025 - /* 1026 - * this isn't the same as continuing with a signal, but it will do 1027 - * for normal use. strace only continues with a signal if the 1028 - * stopping signal is not SIGTRAP. -brl 1029 - */ 1030 - if (current->exit_code) { 1031 - send_sig(current->exit_code, current, 1); 1032 - current->exit_code = 0; 1033 - } 1034 - } 1035 - 1036 - void do_syscall_trace_enter(struct pt_regs *regs) 1037 - { 1038 secure_computing(regs->gpr[0]); 1039 1040 - if (test_thread_flag(TIF_SYSCALL_TRACE) 1041 - && (current->ptrace & PT_PTRACED)) 1042 - do_syscall_trace(); 1043 1044 if (unlikely(current->audit_context)) { 1045 #ifdef CONFIG_PPC64 ··· 1050 regs->gpr[5] & 0xffffffff, 1051 regs->gpr[6] & 0xffffffff); 1052 } 1053 } 1054 1055 void do_syscall_trace_leave(struct pt_regs *regs) 1056 { 1057 if (unlikely(current->audit_context)) 1058 audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS, 1059 regs->result); 1060 1061 - if ((test_thread_flag(TIF_SYSCALL_TRACE) 1062 - || test_thread_flag(TIF_SINGLESTEP)) 1063 - && (current->ptrace & PT_PTRACED)) 1064 - do_syscall_trace(); 1065 }
··· 22 #include <linux/errno.h> 23 #include <linux/ptrace.h> 24 #include <linux/regset.h> 25 + #include <linux/tracehook.h> 26 #include <linux/elf.h> 27 #include <linux/user.h> 28 #include <linux/security.h> ··· 717 struct pt_regs *regs = task->thread.regs; 718 719 720 + #if defined(CONFIG_BOOKE) 721 /* If DAC then do not single step, skip */ 722 if (task->thread.dabr) 723 return; ··· 744 if (addr > 0) 745 return -EINVAL; 746 747 + /* The bottom 3 bits in dabr are flags */ 748 if ((data & ~0x7UL) >= TASK_SIZE) 749 return -EIO; 750 751 + #ifndef CONFIG_BOOKE 752 753 /* For processors using DABR (i.e. 970), the bottom 3 bits are flags. 754 * It was assumed, on previous implementations, that 3 bits were ··· 769 task->thread.dabr = data; 770 771 #endif 772 + #if defined(CONFIG_BOOKE) 773 774 /* As described above, it was assumed 3 bits were passed with the data 775 * address, but we will assume only the mode bits will be passed ··· 1013 return ret; 1014 } 1015 1016 + /* 1017 + * We must return the syscall number to actually look up in the table. 1018 + * This can be -1L to skip running any syscall at all. 1019 + */ 1020 + long do_syscall_trace_enter(struct pt_regs *regs) 1021 { 1022 + long ret = 0; 1023 1024 secure_computing(regs->gpr[0]); 1025 1026 + if (test_thread_flag(TIF_SYSCALL_TRACE) && 1027 + tracehook_report_syscall_entry(regs)) 1028 + /* 1029 + * Tracing decided this syscall should not happen. 1030 + * We'll return a bogus call number to get an ENOSYS 1031 + * error, but leave the original number in regs->gpr[0]. 1032 + */ 1033 + ret = -1L; 1034 1035 if (unlikely(current->audit_context)) { 1036 #ifdef CONFIG_PPC64 ··· 1055 regs->gpr[5] & 0xffffffff, 1056 regs->gpr[6] & 0xffffffff); 1057 } 1058 + 1059 + return ret ?: regs->gpr[0]; 1060 } 1061 1062 void do_syscall_trace_leave(struct pt_regs *regs) 1063 { 1064 + int step; 1065 + 1066 if (unlikely(current->audit_context)) 1067 audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS, 1068 regs->result); 1069 1070 + step = test_thread_flag(TIF_SINGLESTEP); 1071 + if (step || test_thread_flag(TIF_SYSCALL_TRACE)) 1072 + tracehook_report_syscall_exit(regs, step); 1073 }
-24
arch/powerpc/kernel/setup-common.c
··· 367 * setup_cpu_maps - initialize the following cpu maps: 368 * cpu_possible_map 369 * cpu_present_map 370 - * cpu_sibling_map 371 * 372 * Having the possible map set up early allows us to restrict allocations 373 * of things like irqstacks to num_possible_cpus() rather than NR_CPUS. ··· 473 * here will have to be reworked 474 */ 475 cpu_init_thread_core_maps(nthreads); 476 - } 477 - 478 - /* 479 - * Being that cpu_sibling_map is now a per_cpu array, then it cannot 480 - * be initialized until the per_cpu areas have been created. This 481 - * function is now called from setup_per_cpu_areas(). 482 - */ 483 - void __init smp_setup_cpu_sibling_map(void) 484 - { 485 - #ifdef CONFIG_PPC64 486 - int i, cpu, base; 487 - 488 - for_each_possible_cpu(cpu) { 489 - DBG("Sibling map for CPU %d:", cpu); 490 - base = cpu_first_thread_in_core(cpu); 491 - for (i = 0; i < threads_per_core; i++) { 492 - cpu_set(base + i, per_cpu(cpu_sibling_map, cpu)); 493 - DBG(" %d", base + i); 494 - } 495 - DBG("\n"); 496 - } 497 - 498 - #endif /* CONFIG_PPC64 */ 499 } 500 #endif /* CONFIG_SMP */ 501
··· 367 * setup_cpu_maps - initialize the following cpu maps: 368 * cpu_possible_map 369 * cpu_present_map 370 * 371 * Having the possible map set up early allows us to restrict allocations 372 * of things like irqstacks to num_possible_cpus() rather than NR_CPUS. ··· 474 * here will have to be reworked 475 */ 476 cpu_init_thread_core_maps(nthreads); 477 } 478 #endif /* CONFIG_SMP */ 479
-3
arch/powerpc/kernel/setup_64.c
··· 611 paca[i].data_offset = ptr - __per_cpu_start; 612 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 613 } 614 - 615 - /* Now that per_cpu is setup, initialize cpu_sibling_map */ 616 - smp_setup_cpu_sibling_map(); 617 } 618 #endif 619
··· 611 paca[i].data_offset = ptr - __per_cpu_start; 612 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 613 } 614 } 615 #endif 616
+20 -3
arch/powerpc/kernel/signal.c
··· 9 * this archive for more details. 10 */ 11 12 - #include <linux/ptrace.h> 13 #include <linux/signal.h> 14 #include <asm/uaccess.h> 15 #include <asm/unistd.h> ··· 112 } 113 } 114 115 - int do_signal(sigset_t *oldset, struct pt_regs *regs) 116 { 117 siginfo_t info; 118 int signr; ··· 147 */ 148 if (current->thread.dabr) { 149 set_dabr(current->thread.dabr); 150 - #if defined(CONFIG_44x) || defined(CONFIG_BOOKE) 151 mtspr(SPRN_DBCR0, current->thread.dbcr0); 152 #endif 153 } ··· 177 * its frame, and we can clear the TLF_RESTORE_SIGMASK flag. 178 */ 179 current_thread_info()->local_flags &= ~_TLF_RESTORE_SIGMASK; 180 } 181 182 return ret; 183 } 184 185 long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
··· 9 * this archive for more details. 10 */ 11 12 + #include <linux/tracehook.h> 13 #include <linux/signal.h> 14 #include <asm/uaccess.h> 15 #include <asm/unistd.h> ··· 112 } 113 } 114 115 + static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs) 116 { 117 siginfo_t info; 118 int signr; ··· 147 */ 148 if (current->thread.dabr) { 149 set_dabr(current->thread.dabr); 150 + #if defined(CONFIG_BOOKE) 151 mtspr(SPRN_DBCR0, current->thread.dbcr0); 152 #endif 153 } ··· 177 * its frame, and we can clear the TLF_RESTORE_SIGMASK flag. 178 */ 179 current_thread_info()->local_flags &= ~_TLF_RESTORE_SIGMASK; 180 + 181 + /* 182 + * Let tracing know that we've done the handler setup. 183 + */ 184 + tracehook_signal_handler(signr, &info, &ka, regs, 185 + test_thread_flag(TIF_SINGLESTEP)); 186 } 187 188 return ret; 189 + } 190 + 191 + void do_signal(struct pt_regs *regs, unsigned long thread_info_flags) 192 + { 193 + if (thread_info_flags & _TIF_SIGPENDING) 194 + do_signal_pending(NULL, regs); 195 + 196 + if (thread_info_flags & _TIF_NOTIFY_RESUME) { 197 + clear_thread_flag(TIF_NOTIFY_RESUME); 198 + tracehook_notify_resume(regs); 199 + } 200 } 201 202 long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
+116 -3
arch/powerpc/kernel/smp.c
··· 41 #include <asm/smp.h> 42 #include <asm/time.h> 43 #include <asm/machdep.h> 44 #include <asm/cputable.h> 45 #include <asm/system.h> 46 #include <asm/mpic.h> ··· 63 cpumask_t cpu_possible_map = CPU_MASK_NONE; 64 cpumask_t cpu_online_map = CPU_MASK_NONE; 65 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; 66 67 EXPORT_SYMBOL(cpu_online_map); 68 EXPORT_SYMBOL(cpu_possible_map); 69 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 70 71 /* SMP operations for this machine */ 72 struct smp_ops_t *smp_ops; ··· 231 BUG_ON(smp_processor_id() != boot_cpuid); 232 233 cpu_set(boot_cpuid, cpu_online_map); 234 #ifdef CONFIG_PPC64 235 paca[boot_cpuid].__current = current; 236 #endif ··· 380 return 0; 381 } 382 383 384 /* Activate a secondary processor. */ 385 int __devinit start_secondary(void *unused) 386 { 387 unsigned int cpu = smp_processor_id(); 388 389 atomic_inc(&init_mm.mm_count); 390 current->active_mm = &init_mm; ··· 454 455 ipi_call_lock(); 456 cpu_set(cpu, cpu_online_map); 457 ipi_call_unlock(); 458 459 local_irq_enable(); ··· 518 #ifdef CONFIG_HOTPLUG_CPU 519 int __cpu_disable(void) 520 { 521 - if (smp_ops->cpu_disable) 522 - return smp_ops->cpu_disable(); 523 524 - return -ENOSYS; 525 } 526 527 void __cpu_die(unsigned int cpu)
··· 41 #include <asm/smp.h> 42 #include <asm/time.h> 43 #include <asm/machdep.h> 44 + #include <asm/cputhreads.h> 45 #include <asm/cputable.h> 46 #include <asm/system.h> 47 #include <asm/mpic.h> ··· 62 cpumask_t cpu_possible_map = CPU_MASK_NONE; 63 cpumask_t cpu_online_map = CPU_MASK_NONE; 64 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; 65 + DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE; 66 67 EXPORT_SYMBOL(cpu_online_map); 68 EXPORT_SYMBOL(cpu_possible_map); 69 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 70 + EXPORT_PER_CPU_SYMBOL(cpu_core_map); 71 72 /* SMP operations for this machine */ 73 struct smp_ops_t *smp_ops; ··· 228 BUG_ON(smp_processor_id() != boot_cpuid); 229 230 cpu_set(boot_cpuid, cpu_online_map); 231 + cpu_set(boot_cpuid, per_cpu(cpu_sibling_map, boot_cpuid)); 232 + cpu_set(boot_cpuid, per_cpu(cpu_core_map, boot_cpuid)); 233 #ifdef CONFIG_PPC64 234 paca[boot_cpuid].__current = current; 235 #endif ··· 375 return 0; 376 } 377 378 + /* Return the value of the reg property corresponding to the given 379 + * logical cpu. 380 + */ 381 + int cpu_to_core_id(int cpu) 382 + { 383 + struct device_node *np; 384 + const int *reg; 385 + int id = -1; 386 + 387 + np = of_get_cpu_node(cpu, NULL); 388 + if (!np) 389 + goto out; 390 + 391 + reg = of_get_property(np, "reg", NULL); 392 + if (!reg) 393 + goto out; 394 + 395 + id = *reg; 396 + out: 397 + of_node_put(np); 398 + return id; 399 + } 400 + 401 + /* Must be called when no change can occur to cpu_present_map, 402 + * i.e. during cpu online or offline. 403 + */ 404 + static struct device_node *cpu_to_l2cache(int cpu) 405 + { 406 + struct device_node *np; 407 + const phandle *php; 408 + phandle ph; 409 + 410 + if (!cpu_present(cpu)) 411 + return NULL; 412 + 413 + np = of_get_cpu_node(cpu, NULL); 414 + if (np == NULL) 415 + return NULL; 416 + 417 + php = of_get_property(np, "l2-cache", NULL); 418 + if (php == NULL) 419 + return NULL; 420 + ph = *php; 421 + of_node_put(np); 422 + 423 + return of_find_node_by_phandle(ph); 424 + } 425 426 /* Activate a secondary processor. */ 427 int __devinit start_secondary(void *unused) 428 { 429 unsigned int cpu = smp_processor_id(); 430 + struct device_node *l2_cache; 431 + int i, base; 432 433 atomic_inc(&init_mm.mm_count); 434 current->active_mm = &init_mm; ··· 400 401 ipi_call_lock(); 402 cpu_set(cpu, cpu_online_map); 403 + /* Update sibling maps */ 404 + base = cpu_first_thread_in_core(cpu); 405 + for (i = 0; i < threads_per_core; i++) { 406 + if (cpu_is_offline(base + i)) 407 + continue; 408 + cpu_set(cpu, per_cpu(cpu_sibling_map, base + i)); 409 + cpu_set(base + i, per_cpu(cpu_sibling_map, cpu)); 410 + 411 + /* cpu_core_map should be a superset of 412 + * cpu_sibling_map even if we don't have cache 413 + * information, so update the former here, too. 414 + */ 415 + cpu_set(cpu, per_cpu(cpu_core_map, base +i)); 416 + cpu_set(base + i, per_cpu(cpu_core_map, cpu)); 417 + } 418 + l2_cache = cpu_to_l2cache(cpu); 419 + for_each_online_cpu(i) { 420 + struct device_node *np = cpu_to_l2cache(i); 421 + if (!np) 422 + continue; 423 + if (np == l2_cache) { 424 + cpu_set(cpu, per_cpu(cpu_core_map, i)); 425 + cpu_set(i, per_cpu(cpu_core_map, cpu)); 426 + } 427 + of_node_put(np); 428 + } 429 + of_node_put(l2_cache); 430 ipi_call_unlock(); 431 432 local_irq_enable(); ··· 437 #ifdef CONFIG_HOTPLUG_CPU 438 int __cpu_disable(void) 439 { 440 + struct device_node *l2_cache; 441 + int cpu = smp_processor_id(); 442 + int base, i; 443 + int err; 444 445 + if (!smp_ops->cpu_disable) 446 + return -ENOSYS; 447 + 448 + err = smp_ops->cpu_disable(); 449 + if (err) 450 + return err; 451 + 452 + /* Update sibling maps */ 453 + base = cpu_first_thread_in_core(cpu); 454 + for (i = 0; i < threads_per_core; i++) { 455 + cpu_clear(cpu, per_cpu(cpu_sibling_map, base + i)); 456 + cpu_clear(base + i, per_cpu(cpu_sibling_map, cpu)); 457 + cpu_clear(cpu, per_cpu(cpu_core_map, base +i)); 458 + cpu_clear(base + i, per_cpu(cpu_core_map, cpu)); 459 + } 460 + 461 + l2_cache = cpu_to_l2cache(cpu); 462 + for_each_present_cpu(i) { 463 + struct device_node *np = cpu_to_l2cache(i); 464 + if (!np) 465 + continue; 466 + if (np == l2_cache) { 467 + cpu_clear(cpu, per_cpu(cpu_core_map, i)); 468 + cpu_clear(i, per_cpu(cpu_core_map, cpu)); 469 + } 470 + of_node_put(np); 471 + } 472 + of_node_put(l2_cache); 473 + 474 + 475 + return 0; 476 } 477 478 void __cpu_die(unsigned int cpu)
-1
arch/powerpc/kernel/stacktrace.c
··· 13 #include <linux/module.h> 14 #include <linux/sched.h> 15 #include <linux/stacktrace.h> 16 - #include <linux/module.h> 17 #include <asm/ptrace.h> 18 #include <asm/processor.h> 19
··· 13 #include <linux/module.h> 14 #include <linux/sched.h> 15 #include <linux/stacktrace.h> 16 #include <asm/ptrace.h> 17 #include <asm/processor.h> 18
+310 -1
arch/powerpc/kernel/sysfs.c
··· 22 23 static DEFINE_PER_CPU(struct cpu, cpu_devices); 24 25 /* SMT stuff */ 26 27 #ifdef CONFIG_PPC_MULTIPLATFORM ··· 299 #endif /* CONFIG_DEBUG_KERNEL */ 300 }; 301 302 303 - static void register_cpu_online(unsigned int cpu) 304 { 305 struct cpu *c = &per_cpu(cpu_devices, cpu); 306 struct sys_device *s = &c->sysdev; ··· 629 630 if (cpu_has_feature(CPU_FTR_DSCR)) 631 sysdev_create_file(s, &attr_dscr); 632 } 633 634 #ifdef CONFIG_HOTPLUG_CPU 635 static void unregister_cpu_online(unsigned int cpu) 636 { 637 struct cpu *c = &per_cpu(cpu_devices, cpu); ··· 706 707 if (cpu_has_feature(CPU_FTR_DSCR)) 708 sysdev_remove_file(s, &attr_dscr); 709 } 710 #endif /* CONFIG_HOTPLUG_CPU */ 711
··· 22 23 static DEFINE_PER_CPU(struct cpu, cpu_devices); 24 25 + static DEFINE_PER_CPU(struct kobject *, cache_toplevel); 26 + 27 /* SMT stuff */ 28 29 #ifdef CONFIG_PPC_MULTIPLATFORM ··· 297 #endif /* CONFIG_DEBUG_KERNEL */ 298 }; 299 300 + struct cache_desc { 301 + struct kobject kobj; 302 + struct cache_desc *next; 303 + const char *type; /* Instruction, Data, or Unified */ 304 + u32 size; /* total cache size in KB */ 305 + u32 line_size; /* in bytes */ 306 + u32 nr_sets; /* number of sets */ 307 + u32 level; /* e.g. 1, 2, 3... */ 308 + u32 associativity; /* e.g. 8-way... 0 is fully associative */ 309 + }; 310 311 + DEFINE_PER_CPU(struct cache_desc *, cache_desc); 312 + 313 + static struct cache_desc *kobj_to_cache_desc(struct kobject *k) 314 + { 315 + return container_of(k, struct cache_desc, kobj); 316 + } 317 + 318 + static void cache_desc_release(struct kobject *k) 319 + { 320 + struct cache_desc *desc = kobj_to_cache_desc(k); 321 + 322 + pr_debug("%s: releasing %s\n", __func__, kobject_name(k)); 323 + 324 + if (desc->next) 325 + kobject_put(&desc->next->kobj); 326 + 327 + kfree(kobj_to_cache_desc(k)); 328 + } 329 + 330 + static ssize_t cache_desc_show(struct kobject *k, struct attribute *attr, char *buf) 331 + { 332 + struct kobj_attribute *kobj_attr; 333 + 334 + kobj_attr = container_of(attr, struct kobj_attribute, attr); 335 + 336 + return kobj_attr->show(k, kobj_attr, buf); 337 + } 338 + 339 + static struct sysfs_ops cache_desc_sysfs_ops = { 340 + .show = cache_desc_show, 341 + }; 342 + 343 + static struct kobj_type cache_desc_type = { 344 + .release = cache_desc_release, 345 + .sysfs_ops = &cache_desc_sysfs_ops, 346 + }; 347 + 348 + static ssize_t cache_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 349 + { 350 + struct cache_desc *cache = kobj_to_cache_desc(k); 351 + 352 + return sprintf(buf, "%uK\n", cache->size); 353 + } 354 + 355 + static struct kobj_attribute cache_size_attr = 356 + __ATTR(size, 0444, cache_size_show, NULL); 357 + 358 + static ssize_t cache_line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 359 + { 360 + struct cache_desc *cache = kobj_to_cache_desc(k); 361 + 362 + return sprintf(buf, "%u\n", cache->line_size); 363 + } 364 + 365 + static struct kobj_attribute cache_line_size_attr = 366 + __ATTR(coherency_line_size, 0444, cache_line_size_show, NULL); 367 + 368 + static ssize_t cache_nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 369 + { 370 + struct cache_desc *cache = kobj_to_cache_desc(k); 371 + 372 + return sprintf(buf, "%u\n", cache->nr_sets); 373 + } 374 + 375 + static struct kobj_attribute cache_nr_sets_attr = 376 + __ATTR(number_of_sets, 0444, cache_nr_sets_show, NULL); 377 + 378 + static ssize_t cache_type_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 379 + { 380 + struct cache_desc *cache = kobj_to_cache_desc(k); 381 + 382 + return sprintf(buf, "%s\n", cache->type); 383 + } 384 + 385 + static struct kobj_attribute cache_type_attr = 386 + __ATTR(type, 0444, cache_type_show, NULL); 387 + 388 + static ssize_t cache_level_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 389 + { 390 + struct cache_desc *cache = kobj_to_cache_desc(k); 391 + 392 + return sprintf(buf, "%u\n", cache->level); 393 + } 394 + 395 + static struct kobj_attribute cache_level_attr = 396 + __ATTR(level, 0444, cache_level_show, NULL); 397 + 398 + static ssize_t cache_assoc_show(struct kobject *k, struct kobj_attribute *attr, char *buf) 399 + { 400 + struct cache_desc *cache = kobj_to_cache_desc(k); 401 + 402 + return sprintf(buf, "%u\n", cache->associativity); 403 + } 404 + 405 + static struct kobj_attribute cache_assoc_attr = 406 + __ATTR(ways_of_associativity, 0444, cache_assoc_show, NULL); 407 + 408 + struct cache_desc_info { 409 + const char *type; 410 + const char *size_prop; 411 + const char *line_size_prop; 412 + const char *nr_sets_prop; 413 + }; 414 + 415 + /* PowerPC Processor binding says the [di]-cache-* must be equal on 416 + * unified caches, so just use d-cache properties. */ 417 + static struct cache_desc_info ucache_info = { 418 + .type = "Unified", 419 + .size_prop = "d-cache-size", 420 + .line_size_prop = "d-cache-line-size", 421 + .nr_sets_prop = "d-cache-sets", 422 + }; 423 + 424 + static struct cache_desc_info dcache_info = { 425 + .type = "Data", 426 + .size_prop = "d-cache-size", 427 + .line_size_prop = "d-cache-line-size", 428 + .nr_sets_prop = "d-cache-sets", 429 + }; 430 + 431 + static struct cache_desc_info icache_info = { 432 + .type = "Instruction", 433 + .size_prop = "i-cache-size", 434 + .line_size_prop = "i-cache-line-size", 435 + .nr_sets_prop = "i-cache-sets", 436 + }; 437 + 438 + static struct cache_desc * __cpuinit create_cache_desc(struct device_node *np, struct kobject *parent, int index, int level, struct cache_desc_info *info) 439 + { 440 + const u32 *cache_line_size; 441 + struct cache_desc *new; 442 + const u32 *cache_size; 443 + const u32 *nr_sets; 444 + int rc; 445 + 446 + new = kzalloc(sizeof(*new), GFP_KERNEL); 447 + if (!new) 448 + return NULL; 449 + 450 + rc = kobject_init_and_add(&new->kobj, &cache_desc_type, parent, 451 + "index%d", index); 452 + if (rc) 453 + goto err; 454 + 455 + /* type */ 456 + new->type = info->type; 457 + rc = sysfs_create_file(&new->kobj, &cache_type_attr.attr); 458 + WARN_ON(rc); 459 + 460 + /* level */ 461 + new->level = level; 462 + rc = sysfs_create_file(&new->kobj, &cache_level_attr.attr); 463 + WARN_ON(rc); 464 + 465 + /* size */ 466 + cache_size = of_get_property(np, info->size_prop, NULL); 467 + if (cache_size) { 468 + new->size = *cache_size / 1024; 469 + rc = sysfs_create_file(&new->kobj, 470 + &cache_size_attr.attr); 471 + WARN_ON(rc); 472 + } 473 + 474 + /* coherency_line_size */ 475 + cache_line_size = of_get_property(np, info->line_size_prop, NULL); 476 + if (cache_line_size) { 477 + new->line_size = *cache_line_size; 478 + rc = sysfs_create_file(&new->kobj, 479 + &cache_line_size_attr.attr); 480 + WARN_ON(rc); 481 + } 482 + 483 + /* number_of_sets */ 484 + nr_sets = of_get_property(np, info->nr_sets_prop, NULL); 485 + if (nr_sets) { 486 + new->nr_sets = *nr_sets; 487 + rc = sysfs_create_file(&new->kobj, 488 + &cache_nr_sets_attr.attr); 489 + WARN_ON(rc); 490 + } 491 + 492 + /* ways_of_associativity */ 493 + if (new->nr_sets == 1) { 494 + /* fully associative */ 495 + new->associativity = 0; 496 + goto create_assoc; 497 + } 498 + 499 + if (new->nr_sets && new->size && new->line_size) { 500 + /* If we have values for all of these we can derive 501 + * the associativity. */ 502 + new->associativity = 503 + ((new->size * 1024) / new->nr_sets) / new->line_size; 504 + create_assoc: 505 + rc = sysfs_create_file(&new->kobj, 506 + &cache_assoc_attr.attr); 507 + WARN_ON(rc); 508 + } 509 + 510 + return new; 511 + err: 512 + kfree(new); 513 + return NULL; 514 + } 515 + 516 + static bool cache_is_unified(struct device_node *np) 517 + { 518 + return of_get_property(np, "cache-unified", NULL); 519 + } 520 + 521 + static struct cache_desc * __cpuinit create_cache_index_info(struct device_node *np, struct kobject *parent, int index, int level) 522 + { 523 + const phandle *next_cache_phandle; 524 + struct device_node *next_cache; 525 + struct cache_desc *new, **end; 526 + 527 + pr_debug("%s(node = %s, index = %d)\n", __func__, np->full_name, index); 528 + 529 + if (cache_is_unified(np)) { 530 + new = create_cache_desc(np, parent, index, level, 531 + &ucache_info); 532 + } else { 533 + new = create_cache_desc(np, parent, index, level, 534 + &dcache_info); 535 + if (new) { 536 + index++; 537 + new->next = create_cache_desc(np, parent, index, level, 538 + &icache_info); 539 + } 540 + } 541 + if (!new) 542 + return NULL; 543 + 544 + end = &new->next; 545 + while (*end) 546 + end = &(*end)->next; 547 + 548 + next_cache_phandle = of_get_property(np, "l2-cache", NULL); 549 + if (!next_cache_phandle) 550 + goto out; 551 + 552 + next_cache = of_find_node_by_phandle(*next_cache_phandle); 553 + if (!next_cache) 554 + goto out; 555 + 556 + *end = create_cache_index_info(next_cache, parent, ++index, ++level); 557 + 558 + of_node_put(next_cache); 559 + out: 560 + return new; 561 + } 562 + 563 + static void __cpuinit create_cache_info(struct sys_device *sysdev) 564 + { 565 + struct kobject *cache_toplevel; 566 + struct device_node *np = NULL; 567 + int cpu = sysdev->id; 568 + 569 + cache_toplevel = kobject_create_and_add("cache", &sysdev->kobj); 570 + if (!cache_toplevel) 571 + return; 572 + per_cpu(cache_toplevel, cpu) = cache_toplevel; 573 + np = of_get_cpu_node(cpu, NULL); 574 + if (np != NULL) { 575 + per_cpu(cache_desc, cpu) = 576 + create_cache_index_info(np, cache_toplevel, 0, 1); 577 + of_node_put(np); 578 + } 579 + return; 580 + } 581 + 582 + static void __cpuinit register_cpu_online(unsigned int cpu) 583 { 584 struct cpu *c = &per_cpu(cpu_devices, cpu); 585 struct sys_device *s = &c->sysdev; ··· 346 347 if (cpu_has_feature(CPU_FTR_DSCR)) 348 sysdev_create_file(s, &attr_dscr); 349 + 350 + create_cache_info(s); 351 } 352 353 #ifdef CONFIG_HOTPLUG_CPU 354 + static void remove_cache_info(struct sys_device *sysdev) 355 + { 356 + struct kobject *cache_toplevel; 357 + struct cache_desc *cache_desc; 358 + int cpu = sysdev->id; 359 + 360 + cache_desc = per_cpu(cache_desc, cpu); 361 + if (cache_desc != NULL) { 362 + sysfs_remove_file(&cache_desc->kobj, &cache_size_attr.attr); 363 + sysfs_remove_file(&cache_desc->kobj, &cache_line_size_attr.attr); 364 + sysfs_remove_file(&cache_desc->kobj, &cache_type_attr.attr); 365 + sysfs_remove_file(&cache_desc->kobj, &cache_level_attr.attr); 366 + sysfs_remove_file(&cache_desc->kobj, &cache_nr_sets_attr.attr); 367 + sysfs_remove_file(&cache_desc->kobj, &cache_assoc_attr.attr); 368 + 369 + kobject_put(&cache_desc->kobj); 370 + } 371 + cache_toplevel = per_cpu(cache_toplevel, cpu); 372 + if (cache_toplevel != NULL) 373 + kobject_put(cache_toplevel); 374 + } 375 + 376 static void unregister_cpu_online(unsigned int cpu) 377 { 378 struct cpu *c = &per_cpu(cpu_devices, cpu); ··· 399 400 if (cpu_has_feature(CPU_FTR_DSCR)) 401 sysdev_remove_file(s, &attr_dscr); 402 + 403 + remove_cache_info(s); 404 } 405 #endif /* CONFIG_HOTPLUG_CPU */ 406
+3 -3
arch/powerpc/kernel/vio.c
··· 530 } 531 532 ret = dma_iommu_ops.map_single(dev, vaddr, size, direction, attrs); 533 - if (unlikely(dma_mapping_error(ret))) { 534 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); 535 atomic_inc(&viodev->cmo.allocs_failed); 536 } ··· 1031 static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; } 1032 static void vio_cmo_bus_remove(struct vio_dev *viodev) {} 1033 static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {} 1034 - static void vio_cmo_bus_init() {} 1035 - static void vio_cmo_sysfs_init() { } 1036 #endif /* CONFIG_PPC_SMLPAR */ 1037 EXPORT_SYMBOL(vio_cmo_entitlement_update); 1038 EXPORT_SYMBOL(vio_cmo_set_dev_desired);
··· 530 } 531 532 ret = dma_iommu_ops.map_single(dev, vaddr, size, direction, attrs); 533 + if (unlikely(dma_mapping_error(dev, ret))) { 534 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); 535 atomic_inc(&viodev->cmo.allocs_failed); 536 } ··· 1031 static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; } 1032 static void vio_cmo_bus_remove(struct vio_dev *viodev) {} 1033 static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {} 1034 + static void vio_cmo_bus_init(void) {} 1035 + static void vio_cmo_sysfs_init(void) { } 1036 #endif /* CONFIG_PPC_SMLPAR */ 1037 EXPORT_SYMBOL(vio_cmo_entitlement_update); 1038 EXPORT_SYMBOL(vio_cmo_set_dev_desired);
+8 -1
arch/powerpc/mm/hugetlbpage.c
··· 736 737 if (!cpu_has_feature(CPU_FTR_16M_PAGE)) 738 return -ENODEV; 739 /* Add supported huge page sizes. Need to change HUGE_MAX_HSTATE 740 * and adjust PTE_NONCACHE_NUM if the number of supported huge page 741 * sizes changes. 742 */ 743 set_huge_psize(MMU_PAGE_16M); 744 - set_huge_psize(MMU_PAGE_64K); 745 set_huge_psize(MMU_PAGE_16G); 746 747 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { 748 if (mmu_huge_psizes[psize]) {
··· 736 737 if (!cpu_has_feature(CPU_FTR_16M_PAGE)) 738 return -ENODEV; 739 + 740 /* Add supported huge page sizes. Need to change HUGE_MAX_HSTATE 741 * and adjust PTE_NONCACHE_NUM if the number of supported huge page 742 * sizes changes. 743 */ 744 set_huge_psize(MMU_PAGE_16M); 745 set_huge_psize(MMU_PAGE_16G); 746 + 747 + /* Temporarily disable support for 64K huge pages when 64K SPU local 748 + * store support is enabled as the current implementation conflicts. 749 + */ 750 + #ifndef CONFIG_SPU_FS_64K_LS 751 + set_huge_psize(MMU_PAGE_64K); 752 + #endif 753 754 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { 755 if (mmu_huge_psizes[psize]) {
+72
arch/powerpc/platforms/powermac/setup.c
··· 541 } 542 machine_device_initcall(powermac, pmac_declare_of_platform_devices); 543 544 /* 545 * Called very early, MMU is off, device-tree isn't unflattened 546 */
··· 541 } 542 machine_device_initcall(powermac, pmac_declare_of_platform_devices); 543 544 + #ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE 545 + /* 546 + * This is called very early, as part of console_init() (typically just after 547 + * time_init()). This function is respondible for trying to find a good 548 + * default console on serial ports. It tries to match the open firmware 549 + * default output with one of the available serial console drivers. 550 + */ 551 + static int __init check_pmac_serial_console(void) 552 + { 553 + struct device_node *prom_stdout = NULL; 554 + int offset = 0; 555 + const char *name; 556 + #ifdef CONFIG_SERIAL_PMACZILOG_TTYS 557 + char *devname = "ttyS"; 558 + #else 559 + char *devname = "ttyPZ"; 560 + #endif 561 + 562 + pr_debug(" -> check_pmac_serial_console()\n"); 563 + 564 + /* The user has requested a console so this is already set up. */ 565 + if (strstr(boot_command_line, "console=")) { 566 + pr_debug(" console was specified !\n"); 567 + return -EBUSY; 568 + } 569 + 570 + if (!of_chosen) { 571 + pr_debug(" of_chosen is NULL !\n"); 572 + return -ENODEV; 573 + } 574 + 575 + /* We are getting a weird phandle from OF ... */ 576 + /* ... So use the full path instead */ 577 + name = of_get_property(of_chosen, "linux,stdout-path", NULL); 578 + if (name == NULL) { 579 + pr_debug(" no linux,stdout-path !\n"); 580 + return -ENODEV; 581 + } 582 + prom_stdout = of_find_node_by_path(name); 583 + if (!prom_stdout) { 584 + pr_debug(" can't find stdout package %s !\n", name); 585 + return -ENODEV; 586 + } 587 + pr_debug("stdout is %s\n", prom_stdout->full_name); 588 + 589 + name = of_get_property(prom_stdout, "name", NULL); 590 + if (!name) { 591 + pr_debug(" stdout package has no name !\n"); 592 + goto not_found; 593 + } 594 + 595 + if (strcmp(name, "ch-a") == 0) 596 + offset = 0; 597 + else if (strcmp(name, "ch-b") == 0) 598 + offset = 1; 599 + else 600 + goto not_found; 601 + of_node_put(prom_stdout); 602 + 603 + pr_debug("Found serial console at %s%d\n", devname, offset); 604 + 605 + return add_preferred_console(devname, offset, NULL); 606 + 607 + not_found: 608 + pr_debug("No preferred console found !\n"); 609 + of_node_put(prom_stdout); 610 + return -ENODEV; 611 + } 612 + console_initcall(check_pmac_serial_console); 613 + 614 + #endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */ 615 + 616 /* 617 * Called very early, MMU is off, device-tree isn't unflattened 618 */
+11 -1
arch/powerpc/platforms/powermac/udbg_scc.c
··· 125 out_8(sccc, 0xc0); 126 127 /* If SCC was the OF output port, read the BRG value, else 128 - * Setup for 57600 8N1 129 */ 130 if (ch_def != NULL) { 131 out_8(sccc, 13); 132 scc_inittab[1] = in_8(sccc); 133 out_8(sccc, 12); 134 scc_inittab[3] = in_8(sccc); 135 } 136 137 for (i = 0; i < sizeof(scc_inittab); ++i)
··· 125 out_8(sccc, 0xc0); 126 127 /* If SCC was the OF output port, read the BRG value, else 128 + * Setup for 38400 or 57600 8N1 depending on the machine 129 */ 130 if (ch_def != NULL) { 131 out_8(sccc, 13); 132 scc_inittab[1] = in_8(sccc); 133 out_8(sccc, 12); 134 scc_inittab[3] = in_8(sccc); 135 + } else if (machine_is_compatible("RackMac1,1") 136 + || machine_is_compatible("RackMac1,2") 137 + || machine_is_compatible("MacRISC4")) { 138 + /* Xserves and G5s default to 57600 */ 139 + scc_inittab[1] = 0; 140 + scc_inittab[3] = 0; 141 + } else { 142 + /* Others default to 38400 */ 143 + scc_inittab[1] = 0; 144 + scc_inittab[3] = 1; 145 } 146 147 for (i = 0; i < sizeof(scc_inittab); ++i)
+6 -2
arch/powerpc/platforms/pseries/cmm.c
··· 289 } 290 291 #define CMM_SHOW(name, format, args...) \ 292 - static ssize_t show_##name(struct sys_device *dev, char *buf) \ 293 { \ 294 return sprintf(buf, format, ##args); \ 295 } \ ··· 300 CMM_SHOW(loaned_kb, "%lu\n", PAGES2KB(loaned_pages)); 301 CMM_SHOW(loaned_target_kb, "%lu\n", PAGES2KB(loaned_pages_target)); 302 303 - static ssize_t show_oom_pages(struct sys_device *dev, char *buf) 304 { 305 return sprintf(buf, "%lu\n", PAGES2KB(oom_freed_pages)); 306 } 307 308 static ssize_t store_oom_pages(struct sys_device *dev, 309 const char *buf, size_t count) 310 { 311 unsigned long val = simple_strtoul (buf, NULL, 10);
··· 289 } 290 291 #define CMM_SHOW(name, format, args...) \ 292 + static ssize_t show_##name(struct sys_device *dev, \ 293 + struct sysdev_attribute *attr, \ 294 + char *buf) \ 295 { \ 296 return sprintf(buf, format, ##args); \ 297 } \ ··· 298 CMM_SHOW(loaned_kb, "%lu\n", PAGES2KB(loaned_pages)); 299 CMM_SHOW(loaned_target_kb, "%lu\n", PAGES2KB(loaned_pages_target)); 300 301 + static ssize_t show_oom_pages(struct sys_device *dev, 302 + struct sysdev_attribute *attr, char *buf) 303 { 304 return sprintf(buf, "%lu\n", PAGES2KB(oom_freed_pages)); 305 } 306 307 static ssize_t store_oom_pages(struct sys_device *dev, 308 + struct sysdev_attribute *attr, 309 const char *buf, size_t count) 310 { 311 unsigned long val = simple_strtoul (buf, NULL, 10);
+4 -4
drivers/net/ibmveth.c
··· 260 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, 261 pool->buff_size, DMA_FROM_DEVICE); 262 263 - if (dma_mapping_error((&adapter->vdev->dev, dma_addr)) 264 goto failure; 265 266 pool->free_map[free_index] = IBM_VETH_INVALID_MAP; ··· 294 pool->consumer_index = pool->size - 1; 295 else 296 pool->consumer_index--; 297 - if (!dma_mapping_error((&adapter->vdev->dev, dma_addr)) 298 dma_unmap_single(&adapter->vdev->dev, 299 pool->dma_addr[index], pool->buff_size, 300 DMA_FROM_DEVICE); ··· 488 &adapter->rx_buff_pool[i]); 489 490 if (adapter->bounce_buffer != NULL) { 491 - if (!dma_mapping_error(adapter->bounce_buffer_dma)) { 492 dma_unmap_single(&adapter->vdev->dev, 493 adapter->bounce_buffer_dma, 494 adapter->netdev->mtu + IBMVETH_BUFF_OH, ··· 924 buf[1] = 0; 925 } 926 927 - if (dma_mapping_error((&adapter->vdev->dev, data_dma_addr)) { 928 if (!firmware_has_feature(FW_FEATURE_CMO)) 929 ibmveth_error_printk("tx: unable to map xmit buffer\n"); 930 skb_copy_from_linear_data(skb, adapter->bounce_buffer,
··· 260 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, 261 pool->buff_size, DMA_FROM_DEVICE); 262 263 + if (dma_mapping_error(&adapter->vdev->dev, dma_addr)) 264 goto failure; 265 266 pool->free_map[free_index] = IBM_VETH_INVALID_MAP; ··· 294 pool->consumer_index = pool->size - 1; 295 else 296 pool->consumer_index--; 297 + if (!dma_mapping_error(&adapter->vdev->dev, dma_addr)) 298 dma_unmap_single(&adapter->vdev->dev, 299 pool->dma_addr[index], pool->buff_size, 300 DMA_FROM_DEVICE); ··· 488 &adapter->rx_buff_pool[i]); 489 490 if (adapter->bounce_buffer != NULL) { 491 + if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) { 492 dma_unmap_single(&adapter->vdev->dev, 493 adapter->bounce_buffer_dma, 494 adapter->netdev->mtu + IBMVETH_BUFF_OH, ··· 924 buf[1] = 0; 925 } 926 927 + if (dma_mapping_error(&adapter->vdev->dev, data_dma_addr)) { 928 if (!firmware_has_feature(FW_FEATURE_CMO)) 929 ibmveth_error_printk("tx: unable to map xmit buffer\n"); 930 skb_copy_from_linear_data(skb, adapter->bounce_buffer,
+6
drivers/of/Kconfig
··· 13 depends on PPC_OF && I2C 14 help 15 OpenFirmware I2C accessors
··· 13 depends on PPC_OF && I2C 14 help 15 OpenFirmware I2C accessors 16 + 17 + config OF_SPI 18 + def_tristate SPI 19 + depends on OF && PPC_OF && SPI 20 + help 21 + OpenFirmware SPI accessors
+1
drivers/of/Makefile
··· 2 obj-$(CONFIG_OF_DEVICE) += device.o platform.o 3 obj-$(CONFIG_OF_GPIO) += gpio.o 4 obj-$(CONFIG_OF_I2C) += of_i2c.o
··· 2 obj-$(CONFIG_OF_DEVICE) += device.o platform.o 3 obj-$(CONFIG_OF_GPIO) += gpio.o 4 obj-$(CONFIG_OF_I2C) += of_i2c.o 5 + obj-$(CONFIG_OF_SPI) += of_spi.o
+88
drivers/of/base.c
··· 385 return np; 386 } 387 EXPORT_SYMBOL(of_find_matching_node);
··· 385 return np; 386 } 387 EXPORT_SYMBOL(of_find_matching_node); 388 + 389 + /** 390 + * of_modalias_table: Table of explicit compatible ==> modalias mappings 391 + * 392 + * This table allows particulare compatible property values to be mapped 393 + * to modalias strings. This is useful for busses which do not directly 394 + * understand the OF device tree but are populated based on data contained 395 + * within the device tree. SPI and I2C are the two current users of this 396 + * table. 397 + * 398 + * In most cases, devices do not need to be listed in this table because 399 + * the modalias value can be derived directly from the compatible table. 400 + * However, if for any reason a value cannot be derived, then this table 401 + * provides a method to override the implicit derivation. 402 + * 403 + * At the moment, a single table is used for all bus types because it is 404 + * assumed that the data size is small and that the compatible values 405 + * should already be distinct enough to differentiate between SPI, I2C 406 + * and other devices. 407 + */ 408 + struct of_modalias_table { 409 + char *of_device; 410 + char *modalias; 411 + }; 412 + static struct of_modalias_table of_modalias_table[] = { 413 + /* Empty for now; add entries as needed */ 414 + }; 415 + 416 + /** 417 + * of_modalias_node - Lookup appropriate modalias for a device node 418 + * @node: pointer to a device tree node 419 + * @modalias: Pointer to buffer that modalias value will be copied into 420 + * @len: Length of modalias value 421 + * 422 + * Based on the value of the compatible property, this routine will determine 423 + * an appropriate modalias value for a particular device tree node. Three 424 + * separate methods are used to derive a modalias value. 425 + * 426 + * First method is to lookup the compatible value in of_modalias_table. 427 + * Second is to look for a "linux,<modalias>" entry in the compatible list 428 + * and used that for modalias. Third is to strip off the manufacturer 429 + * prefix from the first compatible entry and use the remainder as modalias 430 + * 431 + * This routine returns 0 on success 432 + */ 433 + int of_modalias_node(struct device_node *node, char *modalias, int len) 434 + { 435 + int i, cplen; 436 + const char *compatible; 437 + const char *p; 438 + 439 + /* 1. search for exception list entry */ 440 + for (i = 0; i < ARRAY_SIZE(of_modalias_table); i++) { 441 + compatible = of_modalias_table[i].of_device; 442 + if (!of_device_is_compatible(node, compatible)) 443 + continue; 444 + strlcpy(modalias, of_modalias_table[i].modalias, len); 445 + return 0; 446 + } 447 + 448 + compatible = of_get_property(node, "compatible", &cplen); 449 + if (!compatible) 450 + return -ENODEV; 451 + 452 + /* 2. search for linux,<modalias> entry */ 453 + p = compatible; 454 + while (cplen > 0) { 455 + if (!strncmp(p, "linux,", 6)) { 456 + p += 6; 457 + strlcpy(modalias, p, len); 458 + return 0; 459 + } 460 + 461 + i = strlen(p) + 1; 462 + p += i; 463 + cplen -= i; 464 + } 465 + 466 + /* 3. take first compatible entry and strip manufacturer */ 467 + p = strchr(compatible, ','); 468 + if (!p) 469 + return -ENODEV; 470 + p++; 471 + strlcpy(modalias, p, len); 472 + return 0; 473 + } 474 + EXPORT_SYMBOL_GPL(of_modalias_node); 475 +
+3 -61
drivers/of/of_i2c.c
··· 16 #include <linux/of_i2c.h> 17 #include <linux/module.h> 18 19 - struct i2c_driver_device { 20 - char *of_device; 21 - char *i2c_type; 22 - }; 23 - 24 - static struct i2c_driver_device i2c_devices[] = { 25 - }; 26 - 27 - static int of_find_i2c_driver(struct device_node *node, 28 - struct i2c_board_info *info) 29 - { 30 - int i, cplen; 31 - const char *compatible; 32 - const char *p; 33 - 34 - /* 1. search for exception list entry */ 35 - for (i = 0; i < ARRAY_SIZE(i2c_devices); i++) { 36 - if (!of_device_is_compatible(node, i2c_devices[i].of_device)) 37 - continue; 38 - if (strlcpy(info->type, i2c_devices[i].i2c_type, 39 - I2C_NAME_SIZE) >= I2C_NAME_SIZE) 40 - return -ENOMEM; 41 - 42 - return 0; 43 - } 44 - 45 - compatible = of_get_property(node, "compatible", &cplen); 46 - if (!compatible) 47 - return -ENODEV; 48 - 49 - /* 2. search for linux,<i2c-type> entry */ 50 - p = compatible; 51 - while (cplen > 0) { 52 - if (!strncmp(p, "linux,", 6)) { 53 - p += 6; 54 - if (strlcpy(info->type, p, 55 - I2C_NAME_SIZE) >= I2C_NAME_SIZE) 56 - return -ENOMEM; 57 - return 0; 58 - } 59 - 60 - i = strlen(p) + 1; 61 - p += i; 62 - cplen -= i; 63 - } 64 - 65 - /* 3. take fist compatible entry and strip manufacturer */ 66 - p = strchr(compatible, ','); 67 - if (!p) 68 - return -ENODEV; 69 - p++; 70 - if (strlcpy(info->type, p, I2C_NAME_SIZE) >= I2C_NAME_SIZE) 71 - return -ENOMEM; 72 - return 0; 73 - } 74 - 75 void of_register_i2c_devices(struct i2c_adapter *adap, 76 struct device_node *adap_node) 77 { ··· 27 const u32 *addr; 28 int len; 29 30 addr = of_get_property(node, "reg", &len); 31 if (!addr || len < sizeof(int) || *addr > (1 << 10) - 1) { 32 printk(KERN_ERR ··· 38 } 39 40 info.irq = irq_of_parse_and_map(node, 0); 41 - 42 - if (of_find_i2c_driver(node, &info) < 0) { 43 - irq_dispose_mapping(info.irq); 44 - continue; 45 - } 46 47 info.addr = *addr; 48
··· 16 #include <linux/of_i2c.h> 17 #include <linux/module.h> 18 19 void of_register_i2c_devices(struct i2c_adapter *adap, 20 struct device_node *adap_node) 21 { ··· 83 const u32 *addr; 84 int len; 85 86 + if (of_modalias_node(node, info.type, sizeof(info.type)) < 0) 87 + continue; 88 + 89 addr = of_get_property(node, "reg", &len); 90 if (!addr || len < sizeof(int) || *addr > (1 << 10) - 1) { 91 printk(KERN_ERR ··· 91 } 92 93 info.irq = irq_of_parse_and_map(node, 0); 94 95 info.addr = *addr; 96
+93
drivers/of/of_spi.c
···
··· 1 + /* 2 + * SPI OF support routines 3 + * Copyright (C) 2008 Secret Lab Technologies Ltd. 4 + * 5 + * Support routines for deriving SPI device attachments from the device 6 + * tree. 7 + */ 8 + 9 + #include <linux/of.h> 10 + #include <linux/device.h> 11 + #include <linux/spi/spi.h> 12 + #include <linux/of_spi.h> 13 + 14 + /** 15 + * of_register_spi_devices - Register child devices onto the SPI bus 16 + * @master: Pointer to spi_master device 17 + * @np: parent node of SPI device nodes 18 + * 19 + * Registers an spi_device for each child node of 'np' which has a 'reg' 20 + * property. 21 + */ 22 + void of_register_spi_devices(struct spi_master *master, struct device_node *np) 23 + { 24 + struct spi_device *spi; 25 + struct device_node *nc; 26 + const u32 *prop; 27 + int rc; 28 + int len; 29 + 30 + for_each_child_of_node(np, nc) { 31 + /* Alloc an spi_device */ 32 + spi = spi_alloc_device(master); 33 + if (!spi) { 34 + dev_err(&master->dev, "spi_device alloc error for %s\n", 35 + nc->full_name); 36 + spi_dev_put(spi); 37 + continue; 38 + } 39 + 40 + /* Select device driver */ 41 + if (of_modalias_node(nc, spi->modalias, 42 + sizeof(spi->modalias)) < 0) { 43 + dev_err(&master->dev, "cannot find modalias for %s\n", 44 + nc->full_name); 45 + spi_dev_put(spi); 46 + continue; 47 + } 48 + 49 + /* Device address */ 50 + prop = of_get_property(nc, "reg", &len); 51 + if (!prop || len < sizeof(*prop)) { 52 + dev_err(&master->dev, "%s has no 'reg' property\n", 53 + nc->full_name); 54 + spi_dev_put(spi); 55 + continue; 56 + } 57 + spi->chip_select = *prop; 58 + 59 + /* Mode (clock phase/polarity/etc.) */ 60 + if (of_find_property(nc, "spi-cpha", NULL)) 61 + spi->mode |= SPI_CPHA; 62 + if (of_find_property(nc, "spi-cpol", NULL)) 63 + spi->mode |= SPI_CPOL; 64 + 65 + /* Device speed */ 66 + prop = of_get_property(nc, "spi-max-frequency", &len); 67 + if (!prop || len < sizeof(*prop)) { 68 + dev_err(&master->dev, "%s has no 'spi-max-frequency' property\n", 69 + nc->full_name); 70 + spi_dev_put(spi); 71 + continue; 72 + } 73 + spi->max_speed_hz = *prop; 74 + 75 + /* IRQ */ 76 + spi->irq = irq_of_parse_and_map(nc, 0); 77 + 78 + /* Store a pointer to the node in the device structure */ 79 + of_node_get(nc); 80 + spi->dev.archdata.of_node = nc; 81 + 82 + /* Register the new device */ 83 + request_module(spi->modalias); 84 + rc = spi_add_device(spi); 85 + if (rc) { 86 + dev_err(&master->dev, "spi_device register error %s\n", 87 + nc->full_name); 88 + spi_dev_put(spi); 89 + } 90 + 91 + } 92 + } 93 + EXPORT_SYMBOL(of_register_spi_devices);
+95 -44
drivers/spi/spi.c
··· 178 static LIST_HEAD(board_list); 179 static DEFINE_MUTEX(board_lock); 180 181 182 /** 183 * spi_new_device - instantiate one new SPI device ··· 287 struct spi_board_info *chip) 288 { 289 struct spi_device *proxy; 290 - struct device *dev = master->dev.parent; 291 int status; 292 293 /* NOTE: caller did any chip->bus_num checks necessary. ··· 296 * suggests syslogged diagnostics are best here (ugh). 297 */ 298 299 - /* Chipselects are numbered 0..max; validate. */ 300 - if (chip->chip_select >= master->num_chipselect) { 301 - dev_err(dev, "cs%d > max %d\n", 302 - chip->chip_select, 303 - master->num_chipselect); 304 - return NULL; 305 - } 306 - 307 - if (!spi_master_get(master)) 308 return NULL; 309 310 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); 311 312 - proxy = kzalloc(sizeof *proxy, GFP_KERNEL); 313 - if (!proxy) { 314 - dev_err(dev, "can't alloc dev for cs%d\n", 315 - chip->chip_select); 316 - goto fail; 317 - } 318 - proxy->master = master; 319 proxy->chip_select = chip->chip_select; 320 proxy->max_speed_hz = chip->max_speed_hz; 321 proxy->mode = chip->mode; 322 proxy->irq = chip->irq; 323 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); 324 - 325 - snprintf(proxy->dev.bus_id, sizeof proxy->dev.bus_id, 326 - "%s.%u", master->dev.bus_id, 327 - chip->chip_select); 328 - proxy->dev.parent = dev; 329 - proxy->dev.bus = &spi_bus_type; 330 proxy->dev.platform_data = (void *) chip->platform_data; 331 proxy->controller_data = chip->controller_data; 332 proxy->controller_state = NULL; 333 - proxy->dev.release = spidev_release; 334 335 - /* drivers may modify this initial i/o setup */ 336 - status = master->setup(proxy); 337 if (status < 0) { 338 - dev_err(dev, "can't %s %s, status %d\n", 339 - "setup", proxy->dev.bus_id, status); 340 - goto fail; 341 } 342 343 - /* driver core catches callers that misbehave by defining 344 - * devices that already exist. 345 - */ 346 - status = device_register(&proxy->dev); 347 - if (status < 0) { 348 - dev_err(dev, "can't %s %s, status %d\n", 349 - "add", proxy->dev.bus_id, status); 350 - goto fail; 351 - } 352 - dev_dbg(dev, "registered child %s\n", proxy->dev.bus_id); 353 return proxy; 354 - 355 - fail: 356 - spi_master_put(master); 357 - kfree(proxy); 358 - return NULL; 359 } 360 EXPORT_SYMBOL_GPL(spi_new_device); 361
··· 178 static LIST_HEAD(board_list); 179 static DEFINE_MUTEX(board_lock); 180 181 + /** 182 + * spi_alloc_device - Allocate a new SPI device 183 + * @master: Controller to which device is connected 184 + * Context: can sleep 185 + * 186 + * Allows a driver to allocate and initialize a spi_device without 187 + * registering it immediately. This allows a driver to directly 188 + * fill the spi_device with device parameters before calling 189 + * spi_add_device() on it. 190 + * 191 + * Caller is responsible to call spi_add_device() on the returned 192 + * spi_device structure to add it to the SPI master. If the caller 193 + * needs to discard the spi_device without adding it, then it should 194 + * call spi_dev_put() on it. 195 + * 196 + * Returns a pointer to the new device, or NULL. 197 + */ 198 + struct spi_device *spi_alloc_device(struct spi_master *master) 199 + { 200 + struct spi_device *spi; 201 + struct device *dev = master->dev.parent; 202 + 203 + if (!spi_master_get(master)) 204 + return NULL; 205 + 206 + spi = kzalloc(sizeof *spi, GFP_KERNEL); 207 + if (!spi) { 208 + dev_err(dev, "cannot alloc spi_device\n"); 209 + spi_master_put(master); 210 + return NULL; 211 + } 212 + 213 + spi->master = master; 214 + spi->dev.parent = dev; 215 + spi->dev.bus = &spi_bus_type; 216 + spi->dev.release = spidev_release; 217 + device_initialize(&spi->dev); 218 + return spi; 219 + } 220 + EXPORT_SYMBOL_GPL(spi_alloc_device); 221 + 222 + /** 223 + * spi_add_device - Add spi_device allocated with spi_alloc_device 224 + * @spi: spi_device to register 225 + * 226 + * Companion function to spi_alloc_device. Devices allocated with 227 + * spi_alloc_device can be added onto the spi bus with this function. 228 + * 229 + * Returns 0 on success; non-zero on failure 230 + */ 231 + int spi_add_device(struct spi_device *spi) 232 + { 233 + struct device *dev = spi->master->dev.parent; 234 + int status; 235 + 236 + /* Chipselects are numbered 0..max; validate. */ 237 + if (spi->chip_select >= spi->master->num_chipselect) { 238 + dev_err(dev, "cs%d >= max %d\n", 239 + spi->chip_select, 240 + spi->master->num_chipselect); 241 + return -EINVAL; 242 + } 243 + 244 + /* Set the bus ID string */ 245 + snprintf(spi->dev.bus_id, sizeof spi->dev.bus_id, 246 + "%s.%u", spi->master->dev.bus_id, 247 + spi->chip_select); 248 + 249 + /* drivers may modify this initial i/o setup */ 250 + status = spi->master->setup(spi); 251 + if (status < 0) { 252 + dev_err(dev, "can't %s %s, status %d\n", 253 + "setup", spi->dev.bus_id, status); 254 + return status; 255 + } 256 + 257 + /* driver core catches callers that misbehave by defining 258 + * devices that already exist. 259 + */ 260 + status = device_add(&spi->dev); 261 + if (status < 0) { 262 + dev_err(dev, "can't %s %s, status %d\n", 263 + "add", spi->dev.bus_id, status); 264 + return status; 265 + } 266 + 267 + dev_dbg(dev, "registered child %s\n", spi->dev.bus_id); 268 + return 0; 269 + } 270 + EXPORT_SYMBOL_GPL(spi_add_device); 271 272 /** 273 * spi_new_device - instantiate one new SPI device ··· 197 struct spi_board_info *chip) 198 { 199 struct spi_device *proxy; 200 int status; 201 202 /* NOTE: caller did any chip->bus_num checks necessary. ··· 207 * suggests syslogged diagnostics are best here (ugh). 208 */ 209 210 + proxy = spi_alloc_device(master); 211 + if (!proxy) 212 return NULL; 213 214 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); 215 216 proxy->chip_select = chip->chip_select; 217 proxy->max_speed_hz = chip->max_speed_hz; 218 proxy->mode = chip->mode; 219 proxy->irq = chip->irq; 220 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); 221 proxy->dev.platform_data = (void *) chip->platform_data; 222 proxy->controller_data = chip->controller_data; 223 proxy->controller_state = NULL; 224 225 + status = spi_add_device(proxy); 226 if (status < 0) { 227 + spi_dev_put(proxy); 228 + return NULL; 229 } 230 231 return proxy; 232 } 233 EXPORT_SYMBOL_GPL(spi_new_device); 234
+2
include/asm-powerpc/pgtable-4k.h
··· 46 #define _PAGE_GROUP_IX 0x7000 /* software: HPTE index within group */ 47 #define _PAGE_F_SECOND _PAGE_SECONDARY 48 #define _PAGE_F_GIX _PAGE_GROUP_IX 49 50 /* PTE flags to conserve for HPTE identification */ 51 #define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \
··· 46 #define _PAGE_GROUP_IX 0x7000 /* software: HPTE index within group */ 47 #define _PAGE_F_SECOND _PAGE_SECONDARY 48 #define _PAGE_F_GIX _PAGE_GROUP_IX 49 + #define _PAGE_SPECIAL 0x10000 /* software: special page */ 50 + #define __HAVE_ARCH_PTE_SPECIAL 51 52 /* PTE flags to conserve for HPTE identification */ 53 #define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \
+2
include/asm-powerpc/pgtable-64k.h
··· 70 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 71 72 /* Additional PTE bits (don't change without checking asm in hash_low.S) */ 73 #define _PAGE_HPTE_SUB 0x0ffff000 /* combo only: sub pages HPTE bits */ 74 #define _PAGE_HPTE_SUB0 0x08000000 /* combo only: first sub page */ 75 #define _PAGE_COMBO 0x10000000 /* this is a combo 4k page */
··· 70 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 71 72 /* Additional PTE bits (don't change without checking asm in hash_low.S) */ 73 + #define __HAVE_ARCH_PTE_SPECIAL 74 + #define _PAGE_SPECIAL 0x00000400 /* software: special page */ 75 #define _PAGE_HPTE_SUB 0x0ffff000 /* combo only: sub pages HPTE bits */ 76 #define _PAGE_HPTE_SUB0 0x08000000 /* combo only: first sub page */ 77 #define _PAGE_COMBO 0x10000000 /* this is a combo 4k page */
+3
include/asm-powerpc/pgtable-ppc32.h
··· 401 #ifndef _PAGE_COHERENT 402 #define _PAGE_COHERENT 0 403 #endif 404 #ifndef _PMD_PRESENT_MASK 405 #define _PMD_PRESENT_MASK _PMD_PRESENT 406 #endif
··· 401 #ifndef _PAGE_COHERENT 402 #define _PAGE_COHERENT 0 403 #endif 404 + #ifndef _PAGE_WRITETHRU 405 + #define _PAGE_WRITETHRU 0 406 + #endif 407 #ifndef _PMD_PRESENT_MASK 408 #define _PMD_PRESENT_MASK _PMD_PRESENT 409 #endif
+2 -2
include/asm-powerpc/pgtable-ppc64.h
··· 245 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;} 246 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;} 247 static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;} 248 - static inline int pte_special(pte_t pte) { return 0; } 249 250 static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } 251 static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } ··· 265 static inline pte_t pte_mkhuge(pte_t pte) { 266 return pte; } 267 static inline pte_t pte_mkspecial(pte_t pte) { 268 - return pte; } 269 static inline unsigned long pte_pgprot(pte_t pte) 270 { 271 return __pgprot(pte_val(pte)) & PAGE_PROT_BITS;
··· 245 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;} 246 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;} 247 static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;} 248 + static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } 249 250 static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } 251 static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } ··· 265 static inline pte_t pte_mkhuge(pte_t pte) { 266 return pte; } 267 static inline pte_t pte_mkspecial(pte_t pte) { 268 + pte_val(pte) |= _PAGE_SPECIAL; return pte; } 269 static inline unsigned long pte_pgprot(pte_t pte) 270 { 271 return __pgprot(pte_val(pte)) & PAGE_PROT_BITS;
+1
include/asm-powerpc/ptrace.h
··· 84 #ifndef __ASSEMBLY__ 85 86 #define instruction_pointer(regs) ((regs)->nip) 87 #define regs_return_value(regs) ((regs)->gpr[3]) 88 89 #ifdef CONFIG_SMP
··· 84 #ifndef __ASSEMBLY__ 85 86 #define instruction_pointer(regs) ((regs)->nip) 87 + #define user_stack_pointer(regs) ((regs)->gpr[1]) 88 #define regs_return_value(regs) ((regs)->gpr[3]) 89 90 #ifdef CONFIG_SMP
+1 -2
include/asm-powerpc/signal.h
··· 122 123 #ifdef __KERNEL__ 124 struct pt_regs; 125 - extern int do_signal(sigset_t *oldset, struct pt_regs *regs); 126 - extern int do_signal32(sigset_t *oldset, struct pt_regs *regs); 127 #define ptrace_signal_deliver(regs, cookie) do { } while (0) 128 #endif /* __KERNEL__ */ 129
··· 122 123 #ifdef __KERNEL__ 124 struct pt_regs; 125 + extern void do_signal(struct pt_regs *regs, unsigned long thread_info_flags); 126 #define ptrace_signal_deliver(regs, cookie) do { } while (0) 127 #endif /* __KERNEL__ */ 128
+2
include/asm-powerpc/smp.h
··· 62 #endif 63 64 DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); 65 66 /* Since OpenPIC has only 4 IPIs, we use slightly different message numbers. 67 *
··· 62 #endif 63 64 DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); 65 + DECLARE_PER_CPU(cpumask_t, cpu_core_map); 66 + extern int cpu_to_core_id(int cpu); 67 68 /* Since OpenPIC has only 4 IPIs, we use slightly different message numbers. 69 *
+84
include/asm-powerpc/syscall.h
···
··· 1 + /* 2 + * Access to user system call parameters and results 3 + * 4 + * Copyright (C) 2008 Red Hat, Inc. All rights reserved. 5 + * 6 + * This copyrighted material is made available to anyone wishing to use, 7 + * modify, copy, or redistribute it subject to the terms and conditions 8 + * of the GNU General Public License v.2. 9 + * 10 + * See asm-generic/syscall.h for descriptions of what we must do here. 11 + */ 12 + 13 + #ifndef _ASM_SYSCALL_H 14 + #define _ASM_SYSCALL_H 1 15 + 16 + #include <linux/sched.h> 17 + 18 + static inline long syscall_get_nr(struct task_struct *task, 19 + struct pt_regs *regs) 20 + { 21 + return TRAP(regs) == 0xc00 ? regs->gpr[0] : -1L; 22 + } 23 + 24 + static inline void syscall_rollback(struct task_struct *task, 25 + struct pt_regs *regs) 26 + { 27 + regs->gpr[3] = regs->orig_gpr3; 28 + } 29 + 30 + static inline long syscall_get_error(struct task_struct *task, 31 + struct pt_regs *regs) 32 + { 33 + return (regs->ccr & 0x1000) ? -regs->gpr[3] : 0; 34 + } 35 + 36 + static inline long syscall_get_return_value(struct task_struct *task, 37 + struct pt_regs *regs) 38 + { 39 + return regs->gpr[3]; 40 + } 41 + 42 + static inline void syscall_set_return_value(struct task_struct *task, 43 + struct pt_regs *regs, 44 + int error, long val) 45 + { 46 + if (error) { 47 + regs->ccr |= 0x1000L; 48 + regs->gpr[3] = -error; 49 + } else { 50 + regs->ccr &= ~0x1000L; 51 + regs->gpr[3] = val; 52 + } 53 + } 54 + 55 + static inline void syscall_get_arguments(struct task_struct *task, 56 + struct pt_regs *regs, 57 + unsigned int i, unsigned int n, 58 + unsigned long *args) 59 + { 60 + BUG_ON(i + n > 6); 61 + #ifdef CONFIG_PPC64 62 + if (test_tsk_thread_flag(task, TIF_32BIT)) { 63 + /* 64 + * Zero-extend 32-bit argument values. The high bits are 65 + * garbage ignored by the actual syscall dispatch. 66 + */ 67 + while (n-- > 0) 68 + args[n] = (u32) regs->gpr[3 + i + n]; 69 + return; 70 + } 71 + #endif 72 + memcpy(args, &regs->gpr[3 + i], n * sizeof(args[0])); 73 + } 74 + 75 + static inline void syscall_set_arguments(struct task_struct *task, 76 + struct pt_regs *regs, 77 + unsigned int i, unsigned int n, 78 + const unsigned long *args) 79 + { 80 + BUG_ON(i + n > 6); 81 + memcpy(&regs->gpr[3 + i], args, n * sizeof(args[0])); 82 + } 83 + 84 + #endif /* _ASM_SYSCALL_H */
+4 -1
include/asm-powerpc/thread_info.h
··· 108 #define TIF_SECCOMP 10 /* secure computing */ 109 #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ 110 #define TIF_NOERROR 12 /* Force successful syscall return */ 111 #define TIF_FREEZE 14 /* Freezing for suspend */ 112 #define TIF_RUNLATCH 15 /* Is the runlatch enabled? */ 113 #define TIF_ABI_PENDING 16 /* 32/64 bit switch needed */ ··· 126 #define _TIF_SECCOMP (1<<TIF_SECCOMP) 127 #define _TIF_RESTOREALL (1<<TIF_RESTOREALL) 128 #define _TIF_NOERROR (1<<TIF_NOERROR) 129 #define _TIF_FREEZE (1<<TIF_FREEZE) 130 #define _TIF_RUNLATCH (1<<TIF_RUNLATCH) 131 #define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) 132 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP) 133 134 - #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED) 135 #define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR) 136 137 /* Bits in local_flags */
··· 108 #define TIF_SECCOMP 10 /* secure computing */ 109 #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ 110 #define TIF_NOERROR 12 /* Force successful syscall return */ 111 + #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */ 112 #define TIF_FREEZE 14 /* Freezing for suspend */ 113 #define TIF_RUNLATCH 15 /* Is the runlatch enabled? */ 114 #define TIF_ABI_PENDING 16 /* 32/64 bit switch needed */ ··· 125 #define _TIF_SECCOMP (1<<TIF_SECCOMP) 126 #define _TIF_RESTOREALL (1<<TIF_RESTOREALL) 127 #define _TIF_NOERROR (1<<TIF_NOERROR) 128 + #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 129 #define _TIF_FREEZE (1<<TIF_FREEZE) 130 #define _TIF_RUNLATCH (1<<TIF_RUNLATCH) 131 #define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) 132 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP) 133 134 + #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ 135 + _TIF_NOTIFY_RESUME) 136 #define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR) 137 138 /* Bits in local_flags */
+2
include/asm-powerpc/topology.h
··· 108 #include <asm/smp.h> 109 110 #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) 111 #endif 112 #endif 113
··· 108 #include <asm/smp.h> 109 110 #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) 111 + #define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) 112 + #define topology_core_id(cpu) (cpu_to_core_id(cpu)) 113 #endif 114 #endif 115
+1
include/linux/of.h
··· 70 extern int of_n_size_cells(struct device_node *np); 71 extern const struct of_device_id *of_match_node( 72 const struct of_device_id *matches, const struct device_node *node); 73 74 #endif /* _LINUX_OF_H */
··· 70 extern int of_n_size_cells(struct device_node *np); 71 extern const struct of_device_id *of_match_node( 72 const struct of_device_id *matches, const struct device_node *node); 73 + extern int of_modalias_node(struct device_node *node, char *modalias, int len); 74 75 #endif /* _LINUX_OF_H */
+18
include/linux/of_spi.h
···
··· 1 + /* 2 + * OpenFirmware SPI support routines 3 + * Copyright (C) 2008 Secret Lab Technologies Ltd. 4 + * 5 + * Support routines for deriving SPI device attachments from the device 6 + * tree. 7 + */ 8 + 9 + #ifndef __LINUX_OF_SPI_H 10 + #define __LINUX_OF_SPI_H 11 + 12 + #include <linux/of.h> 13 + #include <linux/spi/spi.h> 14 + 15 + extern void of_register_spi_devices(struct spi_master *master, 16 + struct device_node *np); 17 + 18 + #endif /* __LINUX_OF_SPI */
+12
include/linux/spi/spi.h
··· 778 * use spi_new_device() to describe each device. You can also call 779 * spi_unregister_device() to start making that device vanish, but 780 * normally that would be handled by spi_unregister_master(). 781 */ 782 extern struct spi_device * 783 spi_new_device(struct spi_master *, struct spi_board_info *); 784
··· 778 * use spi_new_device() to describe each device. You can also call 779 * spi_unregister_device() to start making that device vanish, but 780 * normally that would be handled by spi_unregister_master(). 781 + * 782 + * You can also use spi_alloc_device() and spi_add_device() to use a two 783 + * stage registration sequence for each spi_device. This gives the caller 784 + * some more control over the spi_device structure before it is registered, 785 + * but requires that caller to initialize fields that would otherwise 786 + * be defined using the board info. 787 */ 788 + extern struct spi_device * 789 + spi_alloc_device(struct spi_master *master); 790 + 791 + extern int 792 + spi_add_device(struct spi_device *spi); 793 + 794 extern struct spi_device * 795 spi_new_device(struct spi_master *, struct spi_board_info *); 796