Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm

Pull ARM updates from Russell King:
"In this ARM merge, we remove more lines than we add. Changes include:

- Enable imprecise aborts early, so that bus errors aren't masked
until later in the boot. This has the side effect that boot
loaders which provoke these aborts can cause the kernel to crash
early in boot, so we install a handler to report this event around
the site where these are enabled.

- Remove the buggy but impossible to enable cmpxchg syscall code.

- Add unwinding annotations to some assembly code.

- Add support for atomic half-word exchange for ARMv6k+.

- Reduce ioremap() alignment for SMP/LPAE cases where we don't need
the large alignment.

- Addition of an "optimal" 3G configuration for systems with 1G of
RAM.

- Increase vmalloc space by 128M.

- Constify some SMP operations structures, which have never been
writable.

- Improve ARMs dma_mmap() support for mapping DMA coherent mappings
into userspace.

- Fix to the NMI backtrace code in the IPI case on ARM where the
failing CPU gets stuck for 10s waiting for its own IPI to be
delivered.

- Removal of legacy PM support from the AMBA bus driver.

- Another fix for the previous fix of vdsomunge"

* 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm: (23 commits)
ARM: 8449/1: fix bug in vdsomunge swab32 macro
arm: add missing of_node_put
ARM: 8447/1: catch pending imprecise abort on unmask
ARM: 8446/1: amba: Remove unused callbacks for legacy system PM
ARM: 8443/1: Adding support for atomic half word exchange
ARM: clean up TWD after previous patch
ARM: 8441/2: twd: Don't set CLOCK_EVT_FEAT_C3STOP unconditionally
ARM: 8440/1: remove obsolete documentation
ARM: make highpte an expert option
ARM: 8433/1: add a VMSPLIT_3G_OPT config option
ARM: 8439/1: Fix backtrace generation when IPI is masked
ARM: 8428/1: kgdb: Fix registers on sleeping tasks
ARM: 8427/1: dma-mapping: add support for offset parameter in dma_mmap()
ARM: 8426/1: dma-mapping: add missing range check in dma_mmap()
ARM: remove user cmpxchg syscall
ARM: 8438/1: Add unwinding to __clear_user_std()
ARM: 8436/1: hw_breakpoint: remove unnecessary header
ARM: 8434/2: Revert "7655/1: smp_twd: make twd_local_timer_of_register() no-op for nosmp"
ARM: 8432/1: move VMALLOC_END from 0xff000000 to 0xff800000
ARM: 8430/1: use default ioremap alignment for SMP or LPAE
...

+128 -158
-16
Documentation/arm/SA1100/Victor
··· 1 - Victor is known as a "digital talking book player" manufactured by 2 - VisuAide, Inc. to be used by blind people. 3 - 4 - For more information related to Victor, see: 5 - 6 - http://www.humanware.com/en-usa/products 7 - 8 - Of course Victor is using Linux as its main operating system. 9 - The Victor implementation for Linux is maintained by Nicolas Pitre: 10 - 11 - nico@visuaide.com 12 - nico@fluxnic.net 13 - 14 - For any comments, please feel free to contact me through the above 15 - addresses. 16 -
+1 -1
Documentation/arm/memory.txt
··· 54 54 located here through iotable_init(). 55 55 VMALLOC_START is based upon the value 56 56 of the high_memory variable, and VMALLOC_END 57 - is equal to 0xff000000. 57 + is equal to 0xff800000. 58 58 59 59 PAGE_OFFSET high_memory-1 Kernel direct-mapped RAM region. 60 60 This maps the platforms RAM, and typically
+5
Documentation/devicetree/bindings/arm/twd.txt
··· 19 19 - reg : Specify the base address and the size of the TWD timer 20 20 register window. 21 21 22 + Optional 23 + 24 + - always-on : a boolean property. If present, the timer is powered through 25 + an always-on power domain, therefore it never loses context. 26 + 22 27 Example: 23 28 24 29 twd-timer@2c000600 {
+5 -2
arch/arm/Kconfig
··· 1411 1411 1412 1412 config HAVE_ARM_TWD 1413 1413 bool 1414 - depends on SMP 1415 1414 select CLKSRC_OF if OF 1416 1415 help 1417 1416 This options enables support for the ARM timer and watchdog unit ··· 1470 1471 1471 1472 config VMSPLIT_3G 1472 1473 bool "3G/1G user/kernel split" 1474 + config VMSPLIT_3G_OPT 1475 + bool "3G/1G user/kernel split (for full 1G low memory)" 1473 1476 config VMSPLIT_2G 1474 1477 bool "2G/2G user/kernel split" 1475 1478 config VMSPLIT_1G ··· 1483 1482 default PHYS_OFFSET if !MMU 1484 1483 default 0x40000000 if VMSPLIT_1G 1485 1484 default 0x80000000 if VMSPLIT_2G 1485 + default 0xB0000000 if VMSPLIT_3G_OPT 1486 1486 default 0xC0000000 1487 1487 1488 1488 config NR_CPUS ··· 1698 1696 If unsure, say n. 1699 1697 1700 1698 config HIGHPTE 1701 - bool "Allocate 2nd-level pagetables from highmem" 1699 + bool "Allocate 2nd-level pagetables from highmem" if EXPERT 1702 1700 depends on HIGHMEM 1701 + default y 1703 1702 help 1704 1703 The VM uses one page of physical memory for each page table. 1705 1704 For systems with a lot of processes, this can use a lot of
+12
arch/arm/include/asm/cmpxchg.h
··· 39 39 40 40 switch (size) { 41 41 #if __LINUX_ARM_ARCH__ >= 6 42 + #ifndef CONFIG_CPU_V6 /* MIN ARCH >= V6K */ 42 43 case 1: 43 44 asm volatile("@ __xchg1\n" 44 45 "1: ldrexb %0, [%3]\n" ··· 50 49 : "r" (x), "r" (ptr) 51 50 : "memory", "cc"); 52 51 break; 52 + case 2: 53 + asm volatile("@ __xchg2\n" 54 + "1: ldrexh %0, [%3]\n" 55 + " strexh %1, %2, [%3]\n" 56 + " teq %1, #0\n" 57 + " bne 1b" 58 + : "=&r" (ret), "=&r" (tmp) 59 + : "r" (x), "r" (ptr) 60 + : "memory", "cc"); 61 + break; 62 + #endif 53 63 case 4: 54 64 asm volatile("@ __xchg4\n" 55 65 "1: ldrex %0, [%3]\n"
+10
arch/arm/include/asm/irqflags.h
··· 54 54 55 55 #define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc") 56 56 #define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc") 57 + 58 + #ifndef CONFIG_CPU_V7M 59 + #define local_abt_enable() __asm__("cpsie a @ __sta" : : : "memory", "cc") 60 + #define local_abt_disable() __asm__("cpsid a @ __cla" : : : "memory", "cc") 61 + #else 62 + #define local_abt_enable() do { } while (0) 63 + #define local_abt_disable() do { } while (0) 64 + #endif 57 65 #else 58 66 59 67 /* ··· 144 136 : "memory", "cc"); \ 145 137 }) 146 138 139 + #define local_abt_enable() do { } while (0) 140 + #define local_abt_disable() do { } while (0) 147 141 #endif 148 142 149 143 /*
+1 -1
arch/arm/include/asm/mach/arch.h
··· 47 47 unsigned l2c_aux_val; /* L2 cache aux value */ 48 48 unsigned l2c_aux_mask; /* L2 cache aux mask */ 49 49 void (*l2c_write_sec)(unsigned long, unsigned); 50 - struct smp_operations *smp; /* SMP operations */ 50 + const struct smp_operations *smp; /* SMP operations */ 51 51 bool (*smp_init)(void); 52 52 void (*fixup)(struct tag *, char **); 53 53 void (*dt_fixup)(void);
+2
arch/arm/include/asm/memory.h
··· 76 76 */ 77 77 #define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff)) 78 78 79 + #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) 79 80 /* 80 81 * Allow 16MB-aligned ioremap pages 81 82 */ 82 83 #define IOREMAP_MAX_ORDER 24 84 + #endif 83 85 84 86 #else /* CONFIG_MMU */ 85 87
+1 -1
arch/arm/include/asm/pgtable.h
··· 43 43 */ 44 44 #define VMALLOC_OFFSET (8*1024*1024) 45 45 #define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) 46 - #define VMALLOC_END 0xff000000UL 46 + #define VMALLOC_END 0xff800000UL 47 47 48 48 #define LIBRARY_TEXT_START 0x0c000000 49 49
+2 -2
arch/arm/include/asm/smp.h
··· 112 112 113 113 struct of_cpu_method { 114 114 const char *method; 115 - struct smp_operations *ops; 115 + const struct smp_operations *ops; 116 116 }; 117 117 118 118 #define CPU_METHOD_OF_DECLARE(name, _method, _ops) \ ··· 122 122 /* 123 123 * set platform specific SMP operations 124 124 */ 125 - extern void smp_set_ops(struct smp_operations *); 125 + extern void smp_set_ops(const struct smp_operations *); 126 126 127 127 #endif /* ifndef __ASM_ARM_SMP_H */
-7
arch/arm/include/asm/unistd.h
··· 21 21 */ 22 22 #define __NR_syscalls (392) 23 23 24 - /* 25 - * *NOTE*: This is a ghost syscall private to the kernel. Only the 26 - * __kuser_cmpxchg code in entry-armv.S should be aware of its 27 - * existence. Don't ever use this from user code. 28 - */ 29 - #define __ARM_NR_cmpxchg (__ARM_NR_BASE+0x00fff0) 30 - 31 24 #define __ARCH_WANT_STAT64 32 25 #define __ARCH_WANT_SYS_GETHOSTNAME 33 26 #define __ARCH_WANT_SYS_PAUSE
+9 -3
arch/arm/kernel/devtree.c
··· 101 101 if (of_property_read_u32(cpu, "reg", &hwid)) { 102 102 pr_debug(" * %s missing reg property\n", 103 103 cpu->full_name); 104 + of_node_put(cpu); 104 105 return; 105 106 } 106 107 ··· 109 108 * 8 MSBs must be set to 0 in the DT since the reg property 110 109 * defines the MPIDR[23:0]. 111 110 */ 112 - if (hwid & ~MPIDR_HWID_BITMASK) 111 + if (hwid & ~MPIDR_HWID_BITMASK) { 112 + of_node_put(cpu); 113 113 return; 114 + } 114 115 115 116 /* 116 117 * Duplicate MPIDRs are a recipe for disaster. ··· 122 119 * to avoid matching valid MPIDR[23:0] values. 123 120 */ 124 121 for (j = 0; j < cpuidx; j++) 125 - if (WARN(tmp_map[j] == hwid, "Duplicate /cpu reg " 126 - "properties in the DT\n")) 122 + if (WARN(tmp_map[j] == hwid, 123 + "Duplicate /cpu reg properties in the DT\n")) { 124 + of_node_put(cpu); 127 125 return; 126 + } 128 127 129 128 /* 130 129 * Build a stashed array of MPIDR values. Numbering scheme ··· 148 143 "max cores %u, capping them\n", 149 144 cpuidx, nr_cpu_ids)) { 150 145 cpuidx = nr_cpu_ids; 146 + of_node_put(cpu); 151 147 break; 152 148 } 153 149
+3 -30
arch/arm/kernel/entry-armv.S
··· 427 427 .endm 428 428 429 429 .macro kuser_cmpxchg_check 430 - #if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \ 431 - !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) 430 + #if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) 432 431 #ifndef CONFIG_MMU 433 432 #warning "NPTL on non MMU needs fixing" 434 433 #else ··· 858 859 859 860 __kuser_cmpxchg64: @ 0xffff0f60 860 861 861 - #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) 862 - 863 - /* 864 - * Poor you. No fast solution possible... 865 - * The kernel itself must perform the operation. 866 - * A special ghost syscall is used for that (see traps.c). 867 - */ 868 - stmfd sp!, {r7, lr} 869 - ldr r7, 1f @ it's 20 bits 870 - swi __ARM_NR_cmpxchg64 871 - ldmfd sp!, {r7, pc} 872 - 1: .word __ARM_NR_cmpxchg64 873 - 874 - #elif defined(CONFIG_CPU_32v6K) 862 + #if defined(CONFIG_CPU_32v6K) 875 863 876 864 stmfd sp!, {r4, r5, r6, r7} 877 865 ldrd r4, r5, [r0] @ load old val ··· 934 948 935 949 __kuser_cmpxchg: @ 0xffff0fc0 936 950 937 - #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) 938 - 939 - /* 940 - * Poor you. No fast solution possible... 941 - * The kernel itself must perform the operation. 942 - * A special ghost syscall is used for that (see traps.c). 943 - */ 944 - stmfd sp!, {r7, lr} 945 - ldr r7, 1f @ it's 20 bits 946 - swi __ARM_NR_cmpxchg 947 - ldmfd sp!, {r7, pc} 948 - 1: .word __ARM_NR_cmpxchg 949 - 950 - #elif __LINUX_ARM_ARCH__ < 6 951 + #if __LINUX_ARM_ARCH__ < 6 951 952 952 953 #ifdef CONFIG_MMU 953 954
-1
arch/arm/kernel/hw_breakpoint.c
··· 35 35 #include <asm/cputype.h> 36 36 #include <asm/current.h> 37 37 #include <asm/hw_breakpoint.h> 38 - #include <asm/kdebug.h> 39 38 #include <asm/traps.h> 40 39 41 40 /* Breakpoint currently in use for each BRP. */
+12 -19
arch/arm/kernel/kgdb.c
··· 74 74 void 75 75 sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) 76 76 { 77 - struct pt_regs *thread_regs; 77 + struct thread_info *ti; 78 78 int regno; 79 79 80 80 /* Just making sure... */ ··· 86 86 gdb_regs[regno] = 0; 87 87 88 88 /* Otherwise, we have only some registers from switch_to() */ 89 - thread_regs = task_pt_regs(task); 90 - gdb_regs[_R0] = thread_regs->ARM_r0; 91 - gdb_regs[_R1] = thread_regs->ARM_r1; 92 - gdb_regs[_R2] = thread_regs->ARM_r2; 93 - gdb_regs[_R3] = thread_regs->ARM_r3; 94 - gdb_regs[_R4] = thread_regs->ARM_r4; 95 - gdb_regs[_R5] = thread_regs->ARM_r5; 96 - gdb_regs[_R6] = thread_regs->ARM_r6; 97 - gdb_regs[_R7] = thread_regs->ARM_r7; 98 - gdb_regs[_R8] = thread_regs->ARM_r8; 99 - gdb_regs[_R9] = thread_regs->ARM_r9; 100 - gdb_regs[_R10] = thread_regs->ARM_r10; 101 - gdb_regs[_FP] = thread_regs->ARM_fp; 102 - gdb_regs[_IP] = thread_regs->ARM_ip; 103 - gdb_regs[_SPT] = thread_regs->ARM_sp; 104 - gdb_regs[_LR] = thread_regs->ARM_lr; 105 - gdb_regs[_PC] = thread_regs->ARM_pc; 106 - gdb_regs[_CPSR] = thread_regs->ARM_cpsr; 89 + ti = task_thread_info(task); 90 + gdb_regs[_R4] = ti->cpu_context.r4; 91 + gdb_regs[_R5] = ti->cpu_context.r5; 92 + gdb_regs[_R6] = ti->cpu_context.r6; 93 + gdb_regs[_R7] = ti->cpu_context.r7; 94 + gdb_regs[_R8] = ti->cpu_context.r8; 95 + gdb_regs[_R9] = ti->cpu_context.r9; 96 + gdb_regs[_R10] = ti->cpu_context.sl; 97 + gdb_regs[_FP] = ti->cpu_context.fp; 98 + gdb_regs[_SPT] = ti->cpu_context.sp; 99 + gdb_regs[_PC] = ti->cpu_context.pc; 107 100 } 108 101 109 102 void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+11 -1
arch/arm/kernel/smp.c
··· 80 80 81 81 static struct smp_operations smp_ops; 82 82 83 - void __init smp_set_ops(struct smp_operations *ops) 83 + void __init smp_set_ops(const struct smp_operations *ops) 84 84 { 85 85 if (ops) 86 86 smp_ops = *ops; ··· 400 400 401 401 local_irq_enable(); 402 402 local_fiq_enable(); 403 + local_abt_enable(); 403 404 404 405 /* 405 406 * OK, it's off to the idle thread for us ··· 749 748 750 749 static void raise_nmi(cpumask_t *mask) 751 750 { 751 + /* 752 + * Generate the backtrace directly if we are running in a calling 753 + * context that is not preemptible by the backtrace IPI. Note 754 + * that nmi_cpu_backtrace() automatically removes the current cpu 755 + * from mask. 756 + */ 757 + if (cpumask_test_cpu(smp_processor_id(), mask) && irqs_disabled()) 758 + nmi_cpu_backtrace(NULL); 759 + 752 760 smp_cross_call(mask, IPI_CPU_BACKTRACE); 753 761 } 754 762
+5 -6
arch/arm/kernel/smp_twd.c
··· 23 23 #include <linux/of_irq.h> 24 24 #include <linux/of_address.h> 25 25 26 - #include <asm/smp_plat.h> 27 26 #include <asm/smp_twd.h> 28 27 29 28 /* set up by the platform code */ ··· 33 34 static DEFINE_PER_CPU(bool, percpu_setup_called); 34 35 35 36 static struct clock_event_device __percpu *twd_evt; 37 + static unsigned int twd_features = 38 + CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; 36 39 static int twd_ppi; 37 40 38 41 static int twd_shutdown(struct clock_event_device *clk) ··· 295 294 writel_relaxed(0, twd_base + TWD_TIMER_CONTROL); 296 295 297 296 clk->name = "local_timer"; 298 - clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | 299 - CLOCK_EVT_FEAT_C3STOP; 297 + clk->features = twd_features; 300 298 clk->rating = 350; 301 299 clk->set_state_shutdown = twd_shutdown; 302 300 clk->set_state_periodic = twd_set_periodic; ··· 350 350 goto out_irq; 351 351 352 352 twd_get_clock(np); 353 + if (!of_property_read_bool(np, "always-on")) 354 + twd_features |= CLOCK_EVT_FEAT_C3STOP; 353 355 354 356 /* 355 357 * Immediately configure the timer on the boot CPU, unless we need ··· 393 391 static void __init twd_local_timer_of_register(struct device_node *np) 394 392 { 395 393 int err; 396 - 397 - if (!is_smp() || !setup_max_cpus) 398 - return; 399 394 400 395 twd_ppi = irq_of_parse_and_map(np, 0); 401 396 if (!twd_ppi) {
-52
arch/arm/kernel/traps.c
··· 625 625 set_tls(regs->ARM_r0); 626 626 return 0; 627 627 628 - #ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG 629 - /* 630 - * Atomically store r1 in *r2 if *r2 is equal to r0 for user space. 631 - * Return zero in r0 if *MEM was changed or non-zero if no exchange 632 - * happened. Also set the user C flag accordingly. 633 - * If access permissions have to be fixed up then non-zero is 634 - * returned and the operation has to be re-attempted. 635 - * 636 - * *NOTE*: This is a ghost syscall private to the kernel. Only the 637 - * __kuser_cmpxchg code in entry-armv.S should be aware of its 638 - * existence. Don't ever use this from user code. 639 - */ 640 - case NR(cmpxchg): 641 - for (;;) { 642 - extern void do_DataAbort(unsigned long addr, unsigned int fsr, 643 - struct pt_regs *regs); 644 - unsigned long val; 645 - unsigned long addr = regs->ARM_r2; 646 - struct mm_struct *mm = current->mm; 647 - pgd_t *pgd; pmd_t *pmd; pte_t *pte; 648 - spinlock_t *ptl; 649 - 650 - regs->ARM_cpsr &= ~PSR_C_BIT; 651 - down_read(&mm->mmap_sem); 652 - pgd = pgd_offset(mm, addr); 653 - if (!pgd_present(*pgd)) 654 - goto bad_access; 655 - pmd = pmd_offset(pgd, addr); 656 - if (!pmd_present(*pmd)) 657 - goto bad_access; 658 - pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 659 - if (!pte_present(*pte) || !pte_write(*pte) || !pte_dirty(*pte)) { 660 - pte_unmap_unlock(pte, ptl); 661 - goto bad_access; 662 - } 663 - val = *(unsigned long *)addr; 664 - val -= regs->ARM_r0; 665 - if (val == 0) { 666 - *(unsigned long *)addr = regs->ARM_r1; 667 - regs->ARM_cpsr |= PSR_C_BIT; 668 - } 669 - pte_unmap_unlock(pte, ptl); 670 - up_read(&mm->mmap_sem); 671 - return val; 672 - 673 - bad_access: 674 - up_read(&mm->mmap_sem); 675 - /* simulate a write access fault */ 676 - do_DataAbort(addr, 15 + (1 << 11), regs); 677 - } 678 - #endif 679 - 680 628 default: 681 629 /* Calls 9f00xx..9f07ff are defined to return -ENOSYS 682 630 if not implemented, rather than raising SIGILL. This
+4
arch/arm/lib/clear_user.S
··· 9 9 */ 10 10 #include <linux/linkage.h> 11 11 #include <asm/assembler.h> 12 + #include <asm/unwind.h> 12 13 13 14 .text 14 15 ··· 21 20 */ 22 21 ENTRY(__clear_user_std) 23 22 WEAK(arm_clear_user) 23 + UNWIND(.fnstart) 24 + UNWIND(.save {r1, lr}) 24 25 stmfd sp!, {r1, lr} 25 26 mov r2, #0 26 27 cmp r1, #4 ··· 47 44 USER( strnebt r2, [r0]) 48 45 mov r0, #0 49 46 ldmfd sp!, {r1, pc} 47 + UNWIND(.fnend) 50 48 ENDPROC(arm_clear_user) 51 49 ENDPROC(__clear_user_std) 52 50
-12
arch/arm/mm/Kconfig
··· 419 419 config CPU_32v3 420 420 bool 421 421 select CPU_USE_DOMAINS if MMU 422 - select NEEDS_SYSCALL_FOR_CMPXCHG if SMP 423 422 select NEED_KUSER_HELPERS 424 423 select TLS_REG_EMUL if SMP || !MMU 425 424 426 425 config CPU_32v4 427 426 bool 428 427 select CPU_USE_DOMAINS if MMU 429 - select NEEDS_SYSCALL_FOR_CMPXCHG if SMP 430 428 select NEED_KUSER_HELPERS 431 429 select TLS_REG_EMUL if SMP || !MMU 432 430 433 431 config CPU_32v4T 434 432 bool 435 433 select CPU_USE_DOMAINS if MMU 436 - select NEEDS_SYSCALL_FOR_CMPXCHG if SMP 437 434 select NEED_KUSER_HELPERS 438 435 select TLS_REG_EMUL if SMP || !MMU 439 436 440 437 config CPU_32v5 441 438 bool 442 439 select CPU_USE_DOMAINS if MMU 443 - select NEEDS_SYSCALL_FOR_CMPXCHG if SMP 444 440 select NEED_KUSER_HELPERS 445 441 select TLS_REG_EMUL if SMP || !MMU 446 442 ··· 800 804 An SMP system using a pre-ARMv6 processor (there are apparently 801 805 a few prototypes like that in existence) and therefore access to 802 806 that required register must be emulated. 803 - 804 - config NEEDS_SYSCALL_FOR_CMPXCHG 805 - bool 806 - select NEED_KUSER_HELPERS 807 - help 808 - SMP on a pre-ARMv6 processor? Well OK then. 809 - Forget about fast user space cmpxchg support. 810 - It is just not possible. 811 807 812 808 config NEED_KUSER_HELPERS 813 809 bool
+7
arch/arm/mm/dma-mapping.c
··· 1407 1407 unsigned long uaddr = vma->vm_start; 1408 1408 unsigned long usize = vma->vm_end - vma->vm_start; 1409 1409 struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1410 + unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 1411 + unsigned long off = vma->vm_pgoff; 1410 1412 1411 1413 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 1412 1414 1413 1415 if (!pages) 1414 1416 return -ENXIO; 1417 + 1418 + if (off >= nr_pages || (usize >> PAGE_SHIFT) > nr_pages - off) 1419 + return -ENXIO; 1420 + 1421 + pages += off; 1415 1422 1416 1423 do { 1417 1424 int ret = vm_insert_page(vma, uaddr, *pages++);
+22
arch/arm/mm/fault.c
··· 593 593 arm_notify_die("", regs, &info, ifsr, 0); 594 594 } 595 595 596 + /* 597 + * Abort handler to be used only during first unmasking of asynchronous aborts 598 + * on the boot CPU. This makes sure that the machine will not die if the 599 + * firmware/bootloader left an imprecise abort pending for us to trip over. 600 + */ 601 + static int __init early_abort_handler(unsigned long addr, unsigned int fsr, 602 + struct pt_regs *regs) 603 + { 604 + pr_warn("Hit pending asynchronous external abort (FSR=0x%08x) during " 605 + "first unmask, this is most likely caused by a " 606 + "firmware/bootloader bug.\n", fsr); 607 + 608 + return 0; 609 + } 610 + 611 + void __init early_abt_enable(void) 612 + { 613 + fsr_info[22].fn = early_abort_handler; 614 + local_abt_enable(); 615 + fsr_info[22].fn = do_bad; 616 + } 617 + 596 618 #ifndef CONFIG_ARM_LPAE 597 619 static int __init exceptions_init(void) 598 620 {
+1
arch/arm/mm/fault.h
··· 24 24 25 25 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs); 26 26 unsigned long search_exception_table(unsigned long addr); 27 + void early_abt_enable(void); 27 28 28 29 #endif /* __ARCH_ARM_FAULT_H */
+4
arch/arm/mm/mmu.c
··· 38 38 #include <asm/mach/pci.h> 39 39 #include <asm/fixmap.h> 40 40 41 + #include "fault.h" 41 42 #include "mm.h" 42 43 #include "tcm.h" 43 44 ··· 1364 1363 */ 1365 1364 local_flush_tlb_all(); 1366 1365 flush_cache_all(); 1366 + 1367 + /* Enable asynchronous aborts */ 1368 + early_abt_enable(); 1367 1369 } 1368 1370 1369 1371 static void __init kmap_init(void)
+1 -1
arch/arm/vdso/vdsomunge.c
··· 66 66 ((((x) & 0x000000ff) << 24) | \ 67 67 (((x) & 0x0000ff00) << 8) | \ 68 68 (((x) & 0x00ff0000) >> 8) | \ 69 - (((x) & 0xff000000) << 24)) 69 + (((x) & 0xff000000) >> 24)) 70 70 71 71 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 72 72 #define HOST_ORDER ELFDATA2LSB
-2
include/linux/amba/bus.h
··· 41 41 int (*probe)(struct amba_device *, const struct amba_id *); 42 42 int (*remove)(struct amba_device *); 43 43 void (*shutdown)(struct amba_device *); 44 - int (*suspend)(struct amba_device *, pm_message_t); 45 - int (*resume)(struct amba_device *); 46 44 const struct amba_id *id_table; 47 45 }; 48 46
+10 -1
lib/nmi_backtrace.c
··· 43 43 printk("%.*s", (end - start) + 1, buf); 44 44 } 45 45 46 + /* 47 + * When raise() is called it will be is passed a pointer to the 48 + * backtrace_mask. Architectures that call nmi_cpu_backtrace() 49 + * directly from their raise() functions may rely on the mask 50 + * they are passed being updated as a side effect of this call. 51 + */ 46 52 void nmi_trigger_all_cpu_backtrace(bool include_self, 47 53 void (*raise)(cpumask_t *mask)) 48 54 { ··· 155 149 /* Replace printk to write into the NMI seq */ 156 150 this_cpu_write(printk_func, nmi_vprintk); 157 151 pr_warn("NMI backtrace for cpu %d\n", cpu); 158 - show_regs(regs); 152 + if (regs) 153 + show_regs(regs); 154 + else 155 + dump_stack(); 159 156 this_cpu_write(printk_func, printk_func_save); 160 157 161 158 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));