Merge branch 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm

Pull ARM fixes from Russell King:
"This resolves some further issues with the dma mask changes on ARM
which have been found by TI and others, and also some corner cases
with the updates to the virtual to physical address translations.

Konstantin also found some problems with the unwinder, which now
performs tighter verification that the stack is valid while unwinding"

* 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm:
ARM: fix asm/memory.h build error
ARM: 7917/1: cacheflush: correctly limit range of memory region being flushed
ARM: 7913/1: fix framepointer check in unwind_frame
ARM: 7912/1: check stack pointer in get_wchan
ARM: 7909/1: mm: Call setup_dma_zone() post early_paging_init()
ARM: 7908/1: mm: Fix the arm_dma_limit calculation
ARM: another fix for the DMA mapping checks

+67 -78
+14 -17
arch/arm/include/asm/memory.h
··· 100 #define TASK_UNMAPPED_BASE UL(0x00000000) 101 #endif 102 103 - #ifndef PHYS_OFFSET 104 - #define PHYS_OFFSET UL(CONFIG_DRAM_BASE) 105 - #endif 106 - 107 #ifndef END_MEM 108 #define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE) 109 #endif 110 111 #ifndef PAGE_OFFSET 112 - #define PAGE_OFFSET (PHYS_OFFSET) 113 #endif 114 115 /* 116 * The module can be at any place in ram in nommu mode. 117 */ 118 #define MODULES_END (END_MEM) 119 - #define MODULES_VADDR (PHYS_OFFSET) 120 121 #define XIP_VIRT_ADDR(physaddr) (physaddr) 122 ··· 152 #define ARCH_PGD_SHIFT 0 153 #endif 154 #define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1) 155 156 #ifndef __ASSEMBLY__ 157 ··· 245 246 #else 247 248 static inline phys_addr_t __virt_to_phys(unsigned long x) 249 { 250 return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET; ··· 259 260 #endif 261 #endif 262 - #endif /* __ASSEMBLY__ */ 263 - 264 - #ifndef PHYS_OFFSET 265 - #ifdef PLAT_PHYS_OFFSET 266 - #define PHYS_OFFSET PLAT_PHYS_OFFSET 267 - #else 268 - #define PHYS_OFFSET UL(CONFIG_PHYS_OFFSET) 269 - #endif 270 - #endif 271 - 272 - #ifndef __ASSEMBLY__ 273 274 /* 275 * PFNs are used to describe any physical page; this means
··· 100 #define TASK_UNMAPPED_BASE UL(0x00000000) 101 #endif 102 103 #ifndef END_MEM 104 #define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE) 105 #endif 106 107 #ifndef PAGE_OFFSET 108 + #define PAGE_OFFSET PLAT_PHYS_OFFSET 109 #endif 110 111 /* 112 * The module can be at any place in ram in nommu mode. 113 */ 114 #define MODULES_END (END_MEM) 115 + #define MODULES_VADDR PAGE_OFFSET 116 117 #define XIP_VIRT_ADDR(physaddr) (physaddr) 118 ··· 156 #define ARCH_PGD_SHIFT 0 157 #endif 158 #define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1) 159 + 160 + /* 161 + * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical 162 + * memory. This is used for XIP and NoMMU kernels, or by kernels which 163 + * have their own mach/memory.h. Assembly code must always use 164 + * PLAT_PHYS_OFFSET and not PHYS_OFFSET. 165 + */ 166 + #ifndef PLAT_PHYS_OFFSET 167 + #define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET) 168 + #endif 169 170 #ifndef __ASSEMBLY__ 171 ··· 239 240 #else 241 242 + #define PHYS_OFFSET PLAT_PHYS_OFFSET 243 + 244 static inline phys_addr_t __virt_to_phys(unsigned long x) 245 { 246 return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET; ··· 251 252 #endif 253 #endif 254 255 /* 256 * PFNs are used to describe any physical page; this means
+2 -2
arch/arm/kernel/head-nommu.S
··· 68 69 #ifdef CONFIG_ARM_MPU 70 /* Calculate the size of a region covering just the kernel */ 71 - ldr r5, =PHYS_OFFSET @ Region start: PHYS_OFFSET 72 ldr r6, =(_end) @ Cover whole kernel 73 sub r6, r6, r5 @ Minimum size of region to map 74 clz r6, r6 @ Region size must be 2^N... ··· 213 set_region_nr r0, #MPU_RAM_REGION 214 isb 215 /* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */ 216 - ldr r0, =PHYS_OFFSET @ RAM starts at PHYS_OFFSET 217 ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL) 218 219 setup_region r0, r5, r6, MPU_DATA_SIDE @ PHYS_OFFSET, shared, enabled
··· 68 69 #ifdef CONFIG_ARM_MPU 70 /* Calculate the size of a region covering just the kernel */ 71 + ldr r5, =PLAT_PHYS_OFFSET @ Region start: PHYS_OFFSET 72 ldr r6, =(_end) @ Cover whole kernel 73 sub r6, r6, r5 @ Minimum size of region to map 74 clz r6, r6 @ Region size must be 2^N... ··· 213 set_region_nr r0, #MPU_RAM_REGION 214 isb 215 /* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */ 216 + ldr r0, =PLAT_PHYS_OFFSET @ RAM starts at PHYS_OFFSET 217 ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL) 218 219 setup_region r0, r5, r6, MPU_DATA_SIDE @ PHYS_OFFSET, shared, enabled
+1 -1
arch/arm/kernel/head.S
··· 110 sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET) 111 add r8, r8, r4 @ PHYS_OFFSET 112 #else 113 - ldr r8, =PHYS_OFFSET @ always constant in this case 114 #endif 115 116 /*
··· 110 sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET) 111 add r8, r8, r4 @ PHYS_OFFSET 112 #else 113 + ldr r8, =PLAT_PHYS_OFFSET @ always constant in this case 114 #endif 115 116 /*
+5 -2
arch/arm/kernel/process.c
··· 404 unsigned long get_wchan(struct task_struct *p) 405 { 406 struct stackframe frame; 407 int count = 0; 408 if (!p || p == current || p->state == TASK_RUNNING) 409 return 0; ··· 413 frame.sp = thread_saved_sp(p); 414 frame.lr = 0; /* recovered from the stack */ 415 frame.pc = thread_saved_pc(p); 416 do { 417 - int ret = unwind_frame(&frame); 418 - if (ret < 0) 419 return 0; 420 if (!in_sched_functions(frame.pc)) 421 return frame.pc;
··· 404 unsigned long get_wchan(struct task_struct *p) 405 { 406 struct stackframe frame; 407 + unsigned long stack_page; 408 int count = 0; 409 if (!p || p == current || p->state == TASK_RUNNING) 410 return 0; ··· 412 frame.sp = thread_saved_sp(p); 413 frame.lr = 0; /* recovered from the stack */ 414 frame.pc = thread_saved_pc(p); 415 + stack_page = (unsigned long)task_stack_page(p); 416 do { 417 + if (frame.sp < stack_page || 418 + frame.sp >= stack_page + THREAD_SIZE || 419 + unwind_frame(&frame) < 0) 420 return 0; 421 if (!in_sched_functions(frame.pc)) 422 return frame.pc;
+1 -2
arch/arm/kernel/setup.c
··· 873 machine_desc = mdesc; 874 machine_name = mdesc->name; 875 876 - setup_dma_zone(mdesc); 877 - 878 if (mdesc->reboot_mode != REBOOT_HARD) 879 reboot_mode = mdesc->reboot_mode; 880 ··· 890 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); 891 892 early_paging_init(mdesc, lookup_processor_type(read_cpuid_id())); 893 sanity_check_meminfo(); 894 arm_memblock_init(&meminfo, mdesc); 895
··· 873 machine_desc = mdesc; 874 machine_name = mdesc->name; 875 876 if (mdesc->reboot_mode != REBOOT_HARD) 877 reboot_mode = mdesc->reboot_mode; 878 ··· 892 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); 893 894 early_paging_init(mdesc, lookup_processor_type(read_cpuid_id())); 895 + setup_dma_zone(mdesc); 896 sanity_check_meminfo(); 897 arm_memblock_init(&meminfo, mdesc); 898
+1 -1
arch/arm/kernel/stacktrace.c
··· 31 high = ALIGN(low, THREAD_SIZE); 32 33 /* check current frame pointer is within bounds */ 34 - if (fp < (low + 12) || fp + 4 >= high) 35 return -EINVAL; 36 37 /* restore the registers from the stack frame */
··· 31 high = ALIGN(low, THREAD_SIZE); 32 33 /* check current frame pointer is within bounds */ 34 + if (fp < low + 12 || fp > high - 4) 35 return -EINVAL; 36 37 /* restore the registers from the stack frame */
+2 -1
arch/arm/kernel/traps.c
··· 509 __do_cache_op(unsigned long start, unsigned long end) 510 { 511 int ret; 512 - unsigned long chunk = PAGE_SIZE; 513 514 do { 515 if (signal_pending(current)) { 516 struct thread_info *ti = current_thread_info(); 517
··· 509 __do_cache_op(unsigned long start, unsigned long end) 510 { 511 int ret; 512 513 do { 514 + unsigned long chunk = min(PAGE_SIZE, end - start); 515 + 516 if (signal_pending(current)) { 517 struct thread_info *ti = current_thread_info(); 518
+40 -51
arch/arm/mm/dma-mapping.c
··· 158 }; 159 EXPORT_SYMBOL(arm_coherent_dma_ops); 160 161 static u64 get_coherent_dma_mask(struct device *dev) 162 { 163 u64 mask = (u64)DMA_BIT_MASK(32); 164 165 if (dev) { 166 - unsigned long max_dma_pfn; 167 - 168 mask = dev->coherent_dma_mask; 169 170 /* ··· 212 return 0; 213 } 214 215 - max_dma_pfn = min(max_pfn, arm_dma_pfn_limit); 216 - 217 - /* 218 - * If the mask allows for more memory than we can address, 219 - * and we actually have that much memory, then fail the 220 - * allocation. 221 - */ 222 - if (sizeof(mask) != sizeof(dma_addr_t) && 223 - mask > (dma_addr_t)~0 && 224 - dma_to_pfn(dev, ~0) > max_dma_pfn) { 225 - dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", 226 - mask); 227 - dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n"); 228 return 0; 229 - } 230 - 231 - /* 232 - * Now check that the mask, when translated to a PFN, 233 - * fits within the allowable addresses which we can 234 - * allocate. 235 - */ 236 - if (dma_to_pfn(dev, mask) < max_dma_pfn) { 237 - dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n", 238 - mask, 239 - dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1, 240 - arm_dma_pfn_limit + 1); 241 - return 0; 242 - } 243 } 244 245 return mask; ··· 1042 */ 1043 int dma_supported(struct device *dev, u64 mask) 1044 { 1045 - unsigned long limit; 1046 - 1047 - /* 1048 - * If the mask allows for more memory than we can address, 1049 - * and we actually have that much memory, then we must 1050 - * indicate that DMA to this device is not supported. 1051 - */ 1052 - if (sizeof(mask) != sizeof(dma_addr_t) && 1053 - mask > (dma_addr_t)~0 && 1054 - dma_to_pfn(dev, ~0) > arm_dma_pfn_limit) 1055 - return 0; 1056 - 1057 - /* 1058 - * Translate the device's DMA mask to a PFN limit. This 1059 - * PFN number includes the page which we can DMA to. 1060 - */ 1061 - limit = dma_to_pfn(dev, mask); 1062 - 1063 - if (limit < arm_dma_pfn_limit) 1064 - return 0; 1065 - 1066 - return 1; 1067 } 1068 EXPORT_SYMBOL(dma_supported); 1069
··· 158 }; 159 EXPORT_SYMBOL(arm_coherent_dma_ops); 160 161 + static int __dma_supported(struct device *dev, u64 mask, bool warn) 162 + { 163 + unsigned long max_dma_pfn; 164 + 165 + /* 166 + * If the mask allows for more memory than we can address, 167 + * and we actually have that much memory, then we must 168 + * indicate that DMA to this device is not supported. 169 + */ 170 + if (sizeof(mask) != sizeof(dma_addr_t) && 171 + mask > (dma_addr_t)~0 && 172 + dma_to_pfn(dev, ~0) < max_pfn) { 173 + if (warn) { 174 + dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", 175 + mask); 176 + dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n"); 177 + } 178 + return 0; 179 + } 180 + 181 + max_dma_pfn = min(max_pfn, arm_dma_pfn_limit); 182 + 183 + /* 184 + * Translate the device's DMA mask to a PFN limit. This 185 + * PFN number includes the page which we can DMA to. 186 + */ 187 + if (dma_to_pfn(dev, mask) < max_dma_pfn) { 188 + if (warn) 189 + dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n", 190 + mask, 191 + dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1, 192 + max_dma_pfn + 1); 193 + return 0; 194 + } 195 + 196 + return 1; 197 + } 198 + 199 static u64 get_coherent_dma_mask(struct device *dev) 200 { 201 u64 mask = (u64)DMA_BIT_MASK(32); 202 203 if (dev) { 204 mask = dev->coherent_dma_mask; 205 206 /* ··· 176 return 0; 177 } 178 179 + if (!__dma_supported(dev, mask, true)) 180 return 0; 181 } 182 183 return mask; ··· 1032 */ 1033 int dma_supported(struct device *dev, u64 mask) 1034 { 1035 + return __dma_supported(dev, mask, false); 1036 } 1037 EXPORT_SYMBOL(dma_supported); 1038
+1 -1
arch/arm/mm/init.c
··· 229 #ifdef CONFIG_ZONE_DMA 230 if (mdesc->dma_zone_size) { 231 arm_dma_zone_size = mdesc->dma_zone_size; 232 - arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; 233 } else 234 arm_dma_limit = 0xffffffff; 235 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
··· 229 #ifdef CONFIG_ZONE_DMA 230 if (mdesc->dma_zone_size) { 231 arm_dma_zone_size = mdesc->dma_zone_size; 232 + arm_dma_limit = __pv_phys_offset + arm_dma_zone_size - 1; 233 } else 234 arm_dma_limit = 0xffffffff; 235 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;