Merge branch 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm

Pull ARM fixes from Russell King:
"This resolves some further issues with the dma mask changes on ARM
which have been found by TI and others, and also some corner cases
with the updates to the virtual to physical address translations.

Konstantin also found some problems with the unwinder, which now
performs tighter verification that the stack is valid while unwinding"

* 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm:
ARM: fix asm/memory.h build error
ARM: 7917/1: cacheflush: correctly limit range of memory region being flushed
ARM: 7913/1: fix framepointer check in unwind_frame
ARM: 7912/1: check stack pointer in get_wchan
ARM: 7909/1: mm: Call setup_dma_zone() post early_paging_init()
ARM: 7908/1: mm: Fix the arm_dma_limit calculation
ARM: another fix for the DMA mapping checks

+67 -78
+14 -17
arch/arm/include/asm/memory.h
··· 100 100 #define TASK_UNMAPPED_BASE UL(0x00000000) 101 101 #endif 102 102 103 - #ifndef PHYS_OFFSET 104 - #define PHYS_OFFSET UL(CONFIG_DRAM_BASE) 105 - #endif 106 - 107 103 #ifndef END_MEM 108 104 #define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE) 109 105 #endif 110 106 111 107 #ifndef PAGE_OFFSET 112 - #define PAGE_OFFSET (PHYS_OFFSET) 108 + #define PAGE_OFFSET PLAT_PHYS_OFFSET 113 109 #endif 114 110 115 111 /* 116 112 * The module can be at any place in ram in nommu mode. 117 113 */ 118 114 #define MODULES_END (END_MEM) 119 - #define MODULES_VADDR (PHYS_OFFSET) 115 + #define MODULES_VADDR PAGE_OFFSET 120 116 121 117 #define XIP_VIRT_ADDR(physaddr) (physaddr) 122 118 ··· 152 156 #define ARCH_PGD_SHIFT 0 153 157 #endif 154 158 #define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1) 159 + 160 + /* 161 + * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical 162 + * memory. This is used for XIP and NoMMU kernels, or by kernels which 163 + * have their own mach/memory.h. Assembly code must always use 164 + * PLAT_PHYS_OFFSET and not PHYS_OFFSET. 165 + */ 166 + #ifndef PLAT_PHYS_OFFSET 167 + #define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET) 168 + #endif 155 169 156 170 #ifndef __ASSEMBLY__ 157 171 ··· 245 239 246 240 #else 247 241 242 + #define PHYS_OFFSET PLAT_PHYS_OFFSET 243 + 248 244 static inline phys_addr_t __virt_to_phys(unsigned long x) 249 245 { 250 246 return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET; ··· 259 251 260 252 #endif 261 253 #endif 262 - #endif /* __ASSEMBLY__ */ 263 - 264 - #ifndef PHYS_OFFSET 265 - #ifdef PLAT_PHYS_OFFSET 266 - #define PHYS_OFFSET PLAT_PHYS_OFFSET 267 - #else 268 - #define PHYS_OFFSET UL(CONFIG_PHYS_OFFSET) 269 - #endif 270 - #endif 271 - 272 - #ifndef __ASSEMBLY__ 273 254 274 255 /* 275 256 * PFNs are used to describe any physical page; this means
+2 -2
arch/arm/kernel/head-nommu.S
··· 68 68 69 69 #ifdef CONFIG_ARM_MPU 70 70 /* Calculate the size of a region covering just the kernel */ 71 - ldr r5, =PHYS_OFFSET @ Region start: PHYS_OFFSET 71 + ldr r5, =PLAT_PHYS_OFFSET @ Region start: PHYS_OFFSET 72 72 ldr r6, =(_end) @ Cover whole kernel 73 73 sub r6, r6, r5 @ Minimum size of region to map 74 74 clz r6, r6 @ Region size must be 2^N... ··· 213 213 set_region_nr r0, #MPU_RAM_REGION 214 214 isb 215 215 /* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */ 216 - ldr r0, =PHYS_OFFSET @ RAM starts at PHYS_OFFSET 216 + ldr r0, =PLAT_PHYS_OFFSET @ RAM starts at PHYS_OFFSET 217 217 ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL) 218 218 219 219 setup_region r0, r5, r6, MPU_DATA_SIDE @ PHYS_OFFSET, shared, enabled
+1 -1
arch/arm/kernel/head.S
··· 110 110 sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET) 111 111 add r8, r8, r4 @ PHYS_OFFSET 112 112 #else 113 - ldr r8, =PHYS_OFFSET @ always constant in this case 113 + ldr r8, =PLAT_PHYS_OFFSET @ always constant in this case 114 114 #endif 115 115 116 116 /*
+5 -2
arch/arm/kernel/process.c
··· 404 404 unsigned long get_wchan(struct task_struct *p) 405 405 { 406 406 struct stackframe frame; 407 + unsigned long stack_page; 407 408 int count = 0; 408 409 if (!p || p == current || p->state == TASK_RUNNING) 409 410 return 0; ··· 413 412 frame.sp = thread_saved_sp(p); 414 413 frame.lr = 0; /* recovered from the stack */ 415 414 frame.pc = thread_saved_pc(p); 415 + stack_page = (unsigned long)task_stack_page(p); 416 416 do { 417 - int ret = unwind_frame(&frame); 418 - if (ret < 0) 417 + if (frame.sp < stack_page || 418 + frame.sp >= stack_page + THREAD_SIZE || 419 + unwind_frame(&frame) < 0) 419 420 return 0; 420 421 if (!in_sched_functions(frame.pc)) 421 422 return frame.pc;
+1 -2
arch/arm/kernel/setup.c
··· 873 873 machine_desc = mdesc; 874 874 machine_name = mdesc->name; 875 875 876 - setup_dma_zone(mdesc); 877 - 878 876 if (mdesc->reboot_mode != REBOOT_HARD) 879 877 reboot_mode = mdesc->reboot_mode; 880 878 ··· 890 892 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); 891 893 892 894 early_paging_init(mdesc, lookup_processor_type(read_cpuid_id())); 895 + setup_dma_zone(mdesc); 893 896 sanity_check_meminfo(); 894 897 arm_memblock_init(&meminfo, mdesc); 895 898
+1 -1
arch/arm/kernel/stacktrace.c
··· 31 31 high = ALIGN(low, THREAD_SIZE); 32 32 33 33 /* check current frame pointer is within bounds */ 34 - if (fp < (low + 12) || fp + 4 >= high) 34 + if (fp < low + 12 || fp > high - 4) 35 35 return -EINVAL; 36 36 37 37 /* restore the registers from the stack frame */
+2 -1
arch/arm/kernel/traps.c
··· 509 509 __do_cache_op(unsigned long start, unsigned long end) 510 510 { 511 511 int ret; 512 - unsigned long chunk = PAGE_SIZE; 513 512 514 513 do { 514 + unsigned long chunk = min(PAGE_SIZE, end - start); 515 + 515 516 if (signal_pending(current)) { 516 517 struct thread_info *ti = current_thread_info(); 517 518
+40 -51
arch/arm/mm/dma-mapping.c
··· 158 158 }; 159 159 EXPORT_SYMBOL(arm_coherent_dma_ops); 160 160 161 + static int __dma_supported(struct device *dev, u64 mask, bool warn) 162 + { 163 + unsigned long max_dma_pfn; 164 + 165 + /* 166 + * If the mask allows for more memory than we can address, 167 + * and we actually have that much memory, then we must 168 + * indicate that DMA to this device is not supported. 169 + */ 170 + if (sizeof(mask) != sizeof(dma_addr_t) && 171 + mask > (dma_addr_t)~0 && 172 + dma_to_pfn(dev, ~0) < max_pfn) { 173 + if (warn) { 174 + dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", 175 + mask); 176 + dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n"); 177 + } 178 + return 0; 179 + } 180 + 181 + max_dma_pfn = min(max_pfn, arm_dma_pfn_limit); 182 + 183 + /* 184 + * Translate the device's DMA mask to a PFN limit. This 185 + * PFN number includes the page which we can DMA to. 186 + */ 187 + if (dma_to_pfn(dev, mask) < max_dma_pfn) { 188 + if (warn) 189 + dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n", 190 + mask, 191 + dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1, 192 + max_dma_pfn + 1); 193 + return 0; 194 + } 195 + 196 + return 1; 197 + } 198 + 161 199 static u64 get_coherent_dma_mask(struct device *dev) 162 200 { 163 201 u64 mask = (u64)DMA_BIT_MASK(32); 164 202 165 203 if (dev) { 166 - unsigned long max_dma_pfn; 167 - 168 204 mask = dev->coherent_dma_mask; 169 205 170 206 /* ··· 212 176 return 0; 213 177 } 214 178 215 - max_dma_pfn = min(max_pfn, arm_dma_pfn_limit); 216 - 217 - /* 218 - * If the mask allows for more memory than we can address, 219 - * and we actually have that much memory, then fail the 220 - * allocation. 221 - */ 222 - if (sizeof(mask) != sizeof(dma_addr_t) && 223 - mask > (dma_addr_t)~0 && 224 - dma_to_pfn(dev, ~0) > max_dma_pfn) { 225 - dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", 226 - mask); 227 - dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n"); 179 + if (!__dma_supported(dev, mask, true)) 228 180 return 0; 229 - } 230 - 231 - /* 232 - * Now check that the mask, when translated to a PFN, 233 - * fits within the allowable addresses which we can 234 - * allocate. 235 - */ 236 - if (dma_to_pfn(dev, mask) < max_dma_pfn) { 237 - dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n", 238 - mask, 239 - dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1, 240 - arm_dma_pfn_limit + 1); 241 - return 0; 242 - } 243 181 } 244 182 245 183 return mask; ··· 1042 1032 */ 1043 1033 int dma_supported(struct device *dev, u64 mask) 1044 1034 { 1045 - unsigned long limit; 1046 - 1047 - /* 1048 - * If the mask allows for more memory than we can address, 1049 - * and we actually have that much memory, then we must 1050 - * indicate that DMA to this device is not supported. 1051 - */ 1052 - if (sizeof(mask) != sizeof(dma_addr_t) && 1053 - mask > (dma_addr_t)~0 && 1054 - dma_to_pfn(dev, ~0) > arm_dma_pfn_limit) 1055 - return 0; 1056 - 1057 - /* 1058 - * Translate the device's DMA mask to a PFN limit. This 1059 - * PFN number includes the page which we can DMA to. 1060 - */ 1061 - limit = dma_to_pfn(dev, mask); 1062 - 1063 - if (limit < arm_dma_pfn_limit) 1064 - return 0; 1065 - 1066 - return 1; 1035 + return __dma_supported(dev, mask, false); 1067 1036 } 1068 1037 EXPORT_SYMBOL(dma_supported); 1069 1038
+1 -1
arch/arm/mm/init.c
··· 229 229 #ifdef CONFIG_ZONE_DMA 230 230 if (mdesc->dma_zone_size) { 231 231 arm_dma_zone_size = mdesc->dma_zone_size; 232 - arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; 232 + arm_dma_limit = __pv_phys_offset + arm_dma_zone_size - 1; 233 233 } else 234 234 arm_dma_limit = 0xffffffff; 235 235 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;