Merge master.kernel.org:/home/rmk/linux-2.6-arm

* master.kernel.org:/home/rmk/linux-2.6-arm:
ARM: pxa: fix page table corruption on resume
ARM: it8152: add IT8152_LAST_IRQ definition to fix build error
ARM: pxa: PXA_ESERIES depends on FB_W100.
ARM: 6605/1: Add missing include "asm/memory.h"
ARM: 6540/1: Stop irqsoff trace on return to user
ARM: 6537/1: update Nomadik, U300 and Ux500 maintainers
ARM: 6536/1: Add missing SZ_{32,64,128}
ARM: fix cache-feroceon-l2 after stack based kmap_atomic()
ARM: fix cache-xsc3l2 after stack based kmap_atomic()
ARM: get rid of kmap_high_l1_vipt()
ARM: smp: avoid incrementing mm_users on CPU startup
ARM: pxa: PXA_ESERIES depends on FB_W100.

+16 -1
MAINTAINERS
··· 792 792 793 793 ARM/NOMADIK ARCHITECTURE 794 794 M: Alessandro Rubini <rubini@unipv.it> 795 + M: Linus Walleij <linus.walleij@stericsson.com> 795 796 M: STEricsson <STEricsson_nomadik_linux@list.st.com> 796 797 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 797 798 S: Maintained 798 799 F: arch/arm/mach-nomadik/ 799 800 F: arch/arm/plat-nomadik/ 801 + F: drivers/i2c/busses/i2c-nomadik.c 802 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git 800 803 801 804 ARM/OPENMOKO NEO FREERUNNER (GTA02) MACHINE SUPPORT 802 805 M: Nelson Castillo <arhuaco@freaks-unidos.net> ··· 1001 998 F: drivers/rtc/rtc-coh901331.c 1002 999 F: drivers/watchdog/coh901327_wdt.c 1003 1000 F: drivers/dma/coh901318* 1001 + F: drivers/mfd/ab3100* 1002 + F: drivers/rtc/rtc-ab3100.c 1003 + F: drivers/rtc/rtc-coh901331.c 1004 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git 1004 1005 1005 - ARM/U8500 ARM ARCHITECTURE 1006 + ARM/Ux500 ARM ARCHITECTURE 1006 1007 M: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com> 1008 + M: Linus Walleij <linus.walleij@stericsson.com> 1007 1009 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1008 1010 S: Maintained 1009 1011 F: arch/arm/mach-ux500/ 1012 + F: drivers/dma/ste_dma40* 1013 + F: drivers/mfd/ab3550* 1014 + F: drivers/mfd/abx500* 1015 + F: drivers/mfd/ab8500* 1016 + F: drivers/mfd/stmpe* 1017 + F: drivers/rtc/rtc-ab8500.c 1018 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git 1010 1019 1011 1020 ARM/VFP SUPPORT 1012 1021 M: Russell King <linux@arm.linux.org.uk>
+1
arch/arm/include/asm/hardware/it8152.h
··· 76 76 IT8152_PD_IRQ(0) Audio controller (ACR) 77 77 */ 78 78 #define IT8152_IRQ(x) (IRQ_BOARD_START + (x)) 79 + #define IT8152_LAST_IRQ (IRQ_BOARD_START + 40) 79 80 80 81 /* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */ 81 82 #define IT8152_LD_IRQ_COUNT 9
-3
arch/arm/include/asm/highmem.h
··· 25 25 extern void *kmap_high_get(struct page *page); 26 26 extern void kunmap_high(struct page *page); 27 27 28 - extern void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte); 29 - extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte); 30 - 31 28 /* 32 29 * The following functions are already defined by <linux/highmem.h> 33 30 * when CONFIG_HIGHMEM is not set.
+3 -3
arch/arm/include/asm/sizes.h
··· 13 13 * along with this program; if not, write to the Free Software 14 14 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 15 15 */ 16 - /* DO NOT EDIT!! - this file automatically generated 17 - * from .s file by awk -f s2h.awk 18 - */ 19 16 /* Size definitions 20 17 * Copyright (C) ARM Limited 1998. All rights reserved. 21 18 */ ··· 22 25 23 26 /* handy sizes */ 24 27 #define SZ_16 0x00000010 28 + #define SZ_32 0x00000020 29 + #define SZ_64 0x00000040 30 + #define SZ_128 0x00000080 25 31 #define SZ_256 0x00000100 26 32 #define SZ_512 0x00000200 27 33
+1
arch/arm/include/asm/system.h
··· 150 150 #define rmb() dmb() 151 151 #define wmb() mb() 152 152 #else 153 + #include <asm/memory.h> 153 154 #define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 154 155 #define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 155 156 #define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
+6
arch/arm/kernel/entry-common.S
··· 29 29 ldr r1, [tsk, #TI_FLAGS] 30 30 tst r1, #_TIF_WORK_MASK 31 31 bne fast_work_pending 32 + #if defined(CONFIG_IRQSOFF_TRACER) 33 + asm_trace_hardirqs_on 34 + #endif 32 35 33 36 /* perform architecture specific actions before user return */ 34 37 arch_ret_to_user r1, lr ··· 68 65 tst r1, #_TIF_WORK_MASK 69 66 bne work_pending 70 67 no_work_pending: 68 + #if defined(CONFIG_IRQSOFF_TRACER) 69 + asm_trace_hardirqs_on 70 + #endif 71 71 /* perform architecture specific actions before user return */ 72 72 arch_ret_to_user r1, lr 73 73
-1
arch/arm/kernel/smp.c
··· 310 310 * All kernel threads share the same mm context; grab a 311 311 * reference and switch to it. 312 312 */ 313 - atomic_inc(&mm->mm_users); 314 313 atomic_inc(&mm->mm_count); 315 314 current->active_mm = mm; 316 315 cpumask_set_cpu(cpu, mm_cpumask(mm));
+1
arch/arm/mach-pxa/Kconfig
··· 540 540 config ARCH_PXA_ESERIES 541 541 bool "PXA based Toshiba e-series PDAs" 542 542 select PXA25x 543 + select FB_W100 543 544 544 545 config MACH_E330 545 546 bool "Toshiba e330"
+2 -2
arch/arm/mach-pxa/sleep.S
··· 353 353 354 354 @ Let us ensure we jump to resume_after_mmu only when the mcr above 355 355 @ actually took effect. They call it the "cpwait" operation. 356 - mrc p15, 0, r1, c2, c0, 0 @ queue a dependency on CP15 357 - sub pc, r2, r1, lsr #32 @ jump to virtual addr 356 + mrc p15, 0, r0, c2, c0, 0 @ queue a dependency on CP15 357 + sub pc, r2, r0, lsr #32 @ jump to virtual addr 358 358 nop 359 359 nop 360 360 nop
+19 -18
arch/arm/mm/cache-feroceon-l2.c
··· 13 13 */ 14 14 15 15 #include <linux/init.h> 16 + #include <linux/highmem.h> 16 17 #include <asm/cacheflush.h> 17 - #include <asm/kmap_types.h> 18 - #include <asm/fixmap.h> 19 - #include <asm/pgtable.h> 20 - #include <asm/tlbflush.h> 21 18 #include <plat/cache-feroceon-l2.h> 22 - #include "mm.h" 23 19 24 20 /* 25 21 * Low-level cache maintenance operations. ··· 35 39 * between which we don't want to be preempted. 36 40 */ 37 41 38 - static inline unsigned long l2_start_va(unsigned long paddr) 42 + static inline unsigned long l2_get_va(unsigned long paddr) 39 43 { 40 44 #ifdef CONFIG_HIGHMEM 41 45 /* 42 - * Let's do our own fixmap stuff in a minimal way here. 43 46 * Because range ops can't be done on physical addresses, 44 47 * we simply install a virtual mapping for it only for the 45 48 * TLB lookup to occur, hence no need to flush the untouched 46 - * memory mapping. This is protected with the disabling of 47 - * interrupts by the caller. 49 + * memory mapping afterwards (note: a cache flush may happen 50 + * in some circumstances depending on the path taken in kunmap_atomic). 48 51 */ 49 - unsigned long idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id(); 50 - unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 51 - set_pte_ext(TOP_PTE(vaddr), pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL), 0); 52 - local_flush_tlb_kernel_page(vaddr); 53 - return vaddr + (paddr & ~PAGE_MASK); 52 + void *vaddr = kmap_atomic_pfn(paddr >> PAGE_SHIFT); 53 + return (unsigned long)vaddr + (paddr & ~PAGE_MASK); 54 54 #else 55 55 return __phys_to_virt(paddr); 56 + #endif 57 + } 58 + 59 + static inline void l2_put_va(unsigned long vaddr) 60 + { 61 + #ifdef CONFIG_HIGHMEM 62 + kunmap_atomic((void *)vaddr); 56 63 #endif 57 64 } 58 65 ··· 75 76 */ 76 77 BUG_ON((start ^ end) >> PAGE_SHIFT); 77 78 78 - raw_local_irq_save(flags); 79 - va_start = l2_start_va(start); 79 + va_start = l2_get_va(start); 80 80 va_end = va_start + (end - start); 81 + raw_local_irq_save(flags); 81 82 __asm__("mcr p15, 1, %0, c15, c9, 4\n\t" 82 83 "mcr p15, 1, %1, c15, c9, 5" 83 84 : : "r" (va_start), "r" (va_end)); 84 85 raw_local_irq_restore(flags); 86 + l2_put_va(va_start); 85 87 } 86 88 87 89 static inline void l2_clean_inv_pa(unsigned long addr) ··· 106 106 */ 107 107 BUG_ON((start ^ end) >> PAGE_SHIFT); 108 108 109 - raw_local_irq_save(flags); 110 - va_start = l2_start_va(start); 109 + va_start = l2_get_va(start); 111 110 va_end = va_start + (end - start); 111 + raw_local_irq_save(flags); 112 112 __asm__("mcr p15, 1, %0, c15, c11, 4\n\t" 113 113 "mcr p15, 1, %1, c15, c11, 5" 114 114 : : "r" (va_start), "r" (va_end)); 115 115 raw_local_irq_restore(flags); 116 + l2_put_va(va_start); 116 117 } 117 118 118 119 static inline void l2_inv_all(void)
+21 -36
arch/arm/mm/cache-xsc3l2.c
··· 17 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 18 */ 19 19 #include <linux/init.h> 20 + #include <linux/highmem.h> 20 21 #include <asm/system.h> 21 22 #include <asm/cputype.h> 22 23 #include <asm/cacheflush.h> 23 - #include <asm/kmap_types.h> 24 - #include <asm/fixmap.h> 25 - #include <asm/pgtable.h> 26 - #include <asm/tlbflush.h> 27 - #include "mm.h" 28 24 29 25 #define CR_L2 (1 << 26) 30 26 ··· 67 71 dsb(); 68 72 } 69 73 74 + static inline void l2_unmap_va(unsigned long va) 75 + { 70 76 #ifdef CONFIG_HIGHMEM 71 - #define l2_map_save_flags(x) raw_local_save_flags(x) 72 - #define l2_map_restore_flags(x) raw_local_irq_restore(x) 73 - #else 74 - #define l2_map_save_flags(x) ((x) = 0) 75 - #define l2_map_restore_flags(x) ((void)(x)) 77 + if (va != -1) 78 + kunmap_atomic((void *)va); 76 79 #endif 80 + } 77 81 78 - static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va, 79 - unsigned long flags) 82 + static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va) 80 83 { 81 84 #ifdef CONFIG_HIGHMEM 82 85 unsigned long va = prev_va & PAGE_MASK; ··· 84 89 /* 85 90 * Switching to a new page. Because cache ops are 86 91 * using virtual addresses only, we must put a mapping 87 - * in place for it. We also enable interrupts for a 88 - * short while and disable them again to protect this 89 - * mapping. 92 + * in place for it. 90 93 */ 91 - unsigned long idx; 92 - raw_local_irq_restore(flags); 93 - idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id(); 94 - va = __fix_to_virt(FIX_KMAP_BEGIN + idx); 95 - raw_local_irq_restore(flags | PSR_I_BIT); 96 - set_pte_ext(TOP_PTE(va), pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL), 0); 97 - local_flush_tlb_kernel_page(va); 94 + l2_unmap_va(prev_va); 95 + va = (unsigned long)kmap_atomic_pfn(pa >> PAGE_SHIFT); 98 96 } 99 97 return va + (pa_offset >> (32 - PAGE_SHIFT)); 100 98 #else ··· 97 109 98 110 static void xsc3_l2_inv_range(unsigned long start, unsigned long end) 99 111 { 100 - unsigned long vaddr, flags; 112 + unsigned long vaddr; 101 113 102 114 if (start == 0 && end == -1ul) { 103 115 xsc3_l2_inv_all(); ··· 105 117 } 106 118 107 119 vaddr = -1; /* to force the first mapping */ 108 - l2_map_save_flags(flags); 109 120 110 121 /* 111 122 * Clean and invalidate partial first cache line. 112 123 */ 113 124 if (start & (CACHE_LINE_SIZE - 1)) { 114 - vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr, flags); 125 + vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr); 115 126 xsc3_l2_clean_mva(vaddr); 116 127 xsc3_l2_inv_mva(vaddr); 117 128 start = (start | (CACHE_LINE_SIZE - 1)) + 1; ··· 120 133 * Invalidate all full cache lines between 'start' and 'end'. 121 134 */ 122 135 while (start < (end & ~(CACHE_LINE_SIZE - 1))) { 123 - vaddr = l2_map_va(start, vaddr, flags); 136 + vaddr = l2_map_va(start, vaddr); 124 137 xsc3_l2_inv_mva(vaddr); 125 138 start += CACHE_LINE_SIZE; 126 139 } ··· 129 142 * Clean and invalidate partial last cache line. 130 143 */ 131 144 if (start < end) { 132 - vaddr = l2_map_va(start, vaddr, flags); 145 + vaddr = l2_map_va(start, vaddr); 133 146 xsc3_l2_clean_mva(vaddr); 134 147 xsc3_l2_inv_mva(vaddr); 135 148 } 136 149 137 - l2_map_restore_flags(flags); 150 + l2_unmap_va(vaddr); 138 151 139 152 dsb(); 140 153 } 141 154 142 155 static void xsc3_l2_clean_range(unsigned long start, unsigned long end) 143 156 { 144 - unsigned long vaddr, flags; 157 + unsigned long vaddr; 145 158 146 159 vaddr = -1; /* to force the first mapping */ 147 - l2_map_save_flags(flags); 148 160 149 161 start &= ~(CACHE_LINE_SIZE - 1); 150 162 while (start < end) { 151 - vaddr = l2_map_va(start, vaddr, flags); 163 + vaddr = l2_map_va(start, vaddr); 152 164 xsc3_l2_clean_mva(vaddr); 153 165 start += CACHE_LINE_SIZE; 154 166 } 155 167 156 - l2_map_restore_flags(flags); 168 + l2_unmap_va(vaddr); 157 169 158 170 dsb(); 159 171 } ··· 179 193 180 194 static void xsc3_l2_flush_range(unsigned long start, unsigned long end) 181 195 { 182 - unsigned long vaddr, flags; 196 + unsigned long vaddr; 183 197 184 198 if (start == 0 && end == -1ul) { 185 199 xsc3_l2_flush_all(); ··· 187 201 } 188 202 189 203 vaddr = -1; /* to force the first mapping */ 190 - l2_map_save_flags(flags); 191 204 192 205 start &= ~(CACHE_LINE_SIZE - 1); 193 206 while (start < end) { 194 - vaddr = l2_map_va(start, vaddr, flags); 207 + vaddr = l2_map_va(start, vaddr); 195 208 xsc3_l2_clean_mva(vaddr); 196 209 xsc3_l2_inv_mva(vaddr); 197 210 start += CACHE_LINE_SIZE; 198 211 } 199 212 200 - l2_map_restore_flags(flags); 213 + l2_unmap_va(vaddr); 201 214 202 215 dsb(); 203 216 }
+4 -3
arch/arm/mm/dma-mapping.c
··· 17 17 #include <linux/init.h> 18 18 #include <linux/device.h> 19 19 #include <linux/dma-mapping.h> 20 + #include <linux/highmem.h> 20 21 21 22 #include <asm/memory.h> 22 23 #include <asm/highmem.h> ··· 481 480 op(vaddr, len, dir); 482 481 kunmap_high(page); 483 482 } else if (cache_is_vipt()) { 484 - pte_t saved_pte; 485 - vaddr = kmap_high_l1_vipt(page, &saved_pte); 483 + /* unmapped pages might still be cached */ 484 + vaddr = kmap_atomic(page); 486 485 op(vaddr + offset, len, dir); 487 - kunmap_high_l1_vipt(page, saved_pte); 486 + kunmap_atomic(vaddr); 488 487 } 489 488 } else { 490 489 vaddr = page_address(page) + offset;
+4 -3
arch/arm/mm/flush.c
··· 10 10 #include <linux/module.h> 11 11 #include <linux/mm.h> 12 12 #include <linux/pagemap.h> 13 + #include <linux/highmem.h> 13 14 14 15 #include <asm/cacheflush.h> 15 16 #include <asm/cachetype.h> ··· 181 180 __cpuc_flush_dcache_area(addr, PAGE_SIZE); 182 181 kunmap_high(page); 183 182 } else if (cache_is_vipt()) { 184 - pte_t saved_pte; 185 - addr = kmap_high_l1_vipt(page, &saved_pte); 183 + /* unmapped pages might still be cached */ 184 + addr = kmap_atomic(page); 186 185 __cpuc_flush_dcache_area(addr, PAGE_SIZE); 187 - kunmap_high_l1_vipt(page, saved_pte); 186 + kunmap_atomic(addr); 188 187 } 189 188 } 190 189
-87
arch/arm/mm/highmem.c
··· 140 140 pte = TOP_PTE(vaddr); 141 141 return pte_page(*pte); 142 142 } 143 - 144 - #ifdef CONFIG_CPU_CACHE_VIPT 145 - 146 - #include <linux/percpu.h> 147 - 148 - /* 149 - * The VIVT cache of a highmem page is always flushed before the page 150 - * is unmapped. Hence unmapped highmem pages need no cache maintenance 151 - * in that case. 152 - * 153 - * However unmapped pages may still be cached with a VIPT cache, and 154 - * it is not possible to perform cache maintenance on them using physical 155 - * addresses unfortunately. So we have no choice but to set up a temporary 156 - * virtual mapping for that purpose. 157 - * 158 - * Yet this VIPT cache maintenance may be triggered from DMA support 159 - * functions which are possibly called from interrupt context. As we don't 160 - * want to keep interrupt disabled all the time when such maintenance is 161 - * taking place, we therefore allow for some reentrancy by preserving and 162 - * restoring the previous fixmap entry before the interrupted context is 163 - * resumed. If the reentrancy depth is 0 then there is no need to restore 164 - * the previous fixmap, and leaving the current one in place allow it to 165 - * be reused the next time without a TLB flush (common with DMA). 166 - */ 167 - 168 - static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth); 169 - 170 - void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte) 171 - { 172 - unsigned int idx, cpu; 173 - int *depth; 174 - unsigned long vaddr, flags; 175 - pte_t pte, *ptep; 176 - 177 - if (!in_interrupt()) 178 - preempt_disable(); 179 - 180 - cpu = smp_processor_id(); 181 - depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); 182 - 183 - idx = KM_L1_CACHE + KM_TYPE_NR * cpu; 184 - vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 185 - ptep = TOP_PTE(vaddr); 186 - pte = mk_pte(page, kmap_prot); 187 - 188 - raw_local_irq_save(flags); 189 - (*depth)++; 190 - if (pte_val(*ptep) == pte_val(pte)) { 191 - *saved_pte = pte; 192 - } else { 193 - *saved_pte = *ptep; 194 - set_pte_ext(ptep, pte, 0); 195 - local_flush_tlb_kernel_page(vaddr); 196 - } 197 - raw_local_irq_restore(flags); 198 - 199 - return (void *)vaddr; 200 - } 201 - 202 - void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte) 203 - { 204 - unsigned int idx, cpu = smp_processor_id(); 205 - int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); 206 - unsigned long vaddr, flags; 207 - pte_t pte, *ptep; 208 - 209 - idx = KM_L1_CACHE + KM_TYPE_NR * cpu; 210 - vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 211 - ptep = TOP_PTE(vaddr); 212 - pte = mk_pte(page, kmap_prot); 213 - 214 - BUG_ON(pte_val(*ptep) != pte_val(pte)); 215 - BUG_ON(*depth <= 0); 216 - 217 - raw_local_irq_save(flags); 218 - (*depth)--; 219 - if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) { 220 - set_pte_ext(ptep, saved_pte, 0); 221 - local_flush_tlb_kernel_page(vaddr); 222 - } 223 - raw_local_irq_restore(flags); 224 - 225 - if (!in_interrupt()) 226 - preempt_enable(); 227 - } 228 - 229 - #endif /* CONFIG_CPU_CACHE_VIPT */