Merge branch 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm

Pull ARM fixes from Russell King:
"A few fixes for ARM. Some of these are correctness issues:
- TLBs must be flushed after the old mappings are removed by the DMA
mapping code, but before the new mappings are established.
- An off-by-one entry error in the Keystone LPAE setup code.

Fixes include:
- ensuring that the identity mapping for LPAE does not remove the
kernel image from the identity map.
- preventing userspace from trapping into kgdb.
- fixing a preemption issue in the Intel iwmmxt code.
- fixing a build error with nommu.

Other changes include:
- Adding a note about which areas of memory are expected to be
accessible while the identity mapping tables are in place"

* 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm:
ARM: 8124/1: don't enter kgdb when userspace executes a kgdb break instruction
ARM: idmap: add identity mapping usage note
ARM: 8115/1: LPAE: reduce damage caused by idmap to virtual memory layout
ARM: fix alignment of keystone page table fixup
ARM: 8112/1: only select ARM_PATCH_PHYS_VIRT if MMU is enabled
ARM: 8100/1: Fix preemption disable in iwmmxt_task_enable()
ARM: DMA: ensure that old section mappings are flushed from the TLB

+43 -17
+2 -2
arch/arm/Kconfig
··· 313 config ARCH_INTEGRATOR 314 bool "ARM Ltd. Integrator family" 315 select ARM_AMBA 316 - select ARM_PATCH_PHYS_VIRT 317 select AUTO_ZRELADDR 318 select COMMON_CLK 319 select COMMON_CLK_VERSATILE ··· 659 config ARCH_SHMOBILE_LEGACY 660 bool "Renesas ARM SoCs (non-multiplatform)" 661 select ARCH_SHMOBILE 662 - select ARM_PATCH_PHYS_VIRT 663 select CLKDEV_LOOKUP 664 select GENERIC_CLOCKEVENTS 665 select HAVE_ARM_SCU if SMP
··· 313 config ARCH_INTEGRATOR 314 bool "ARM Ltd. Integrator family" 315 select ARM_AMBA 316 + select ARM_PATCH_PHYS_VIRT if MMU 317 select AUTO_ZRELADDR 318 select COMMON_CLK 319 select COMMON_CLK_VERSATILE ··· 659 config ARCH_SHMOBILE_LEGACY 660 bool "Renesas ARM SoCs (non-multiplatform)" 661 select ARCH_SHMOBILE 662 + select ARM_PATCH_PHYS_VIRT if MMU 663 select CLKDEV_LOOKUP 664 select GENERIC_CLOCKEVENTS 665 select HAVE_ARM_SCU if SMP
+12 -11
arch/arm/kernel/iwmmxt.S
··· 94 95 mrc p15, 0, r2, c2, c0, 0 96 mov r2, r2 @ cpwait 97 98 - teq r1, #0 @ test for last ownership 99 - mov lr, r9 @ normal exit from exception 100 - beq concan_load @ no owner, skip save 101 102 concan_save: 103 104 tmrc r2, wCon 105 ··· 144 wstrd wR15, [r1, #MMX_WR15] 145 146 2: teq r0, #0 @ anything to load? 147 - beq 3f 148 149 concan_load: 150 ··· 177 @ clear CUP/MUP (only if r1 != 0) 178 teq r1, #0 179 mov r2, #0 180 - beq 3f 181 - tmcr wCon, r2 182 183 - 3: 184 - #ifdef CONFIG_PREEMPT_COUNT 185 - get_thread_info r10 186 - #endif 187 - 4: dec_preempt_count r10, r3 188 mov pc, lr 189 190 /*
··· 94 95 mrc p15, 0, r2, c2, c0, 0 96 mov r2, r2 @ cpwait 97 + bl concan_save 98 99 + #ifdef CONFIG_PREEMPT_COUNT 100 + get_thread_info r10 101 + #endif 102 + 4: dec_preempt_count r10, r3 103 + mov pc, r9 @ normal exit from exception 104 105 concan_save: 106 + 107 + teq r1, #0 @ test for last ownership 108 + beq concan_load @ no owner, skip save 109 110 tmrc r2, wCon 111 ··· 138 wstrd wR15, [r1, #MMX_WR15] 139 140 2: teq r0, #0 @ anything to load? 141 + moveq pc, lr @ if not, return 142 143 concan_load: 144 ··· 171 @ clear CUP/MUP (only if r1 != 0) 172 teq r1, #0 173 mov r2, #0 174 + moveq pc, lr 175 176 + tmcr wCon, r2 177 mov pc, lr 178 179 /*
+4
arch/arm/kernel/kgdb.c
··· 160 static struct undef_hook kgdb_brkpt_hook = { 161 .instr_mask = 0xffffffff, 162 .instr_val = KGDB_BREAKINST, 163 .fn = kgdb_brk_fn 164 }; 165 166 static struct undef_hook kgdb_compiled_brkpt_hook = { 167 .instr_mask = 0xffffffff, 168 .instr_val = KGDB_COMPILED_BREAK, 169 .fn = kgdb_compiled_brk_fn 170 }; 171
··· 160 static struct undef_hook kgdb_brkpt_hook = { 161 .instr_mask = 0xffffffff, 162 .instr_val = KGDB_BREAKINST, 163 + .cpsr_mask = MODE_MASK, 164 + .cpsr_val = SVC_MODE, 165 .fn = kgdb_brk_fn 166 }; 167 168 static struct undef_hook kgdb_compiled_brkpt_hook = { 169 .instr_mask = 0xffffffff, 170 .instr_val = KGDB_COMPILED_BREAK, 171 + .cpsr_mask = MODE_MASK, 172 + .cpsr_val = SVC_MODE, 173 .fn = kgdb_compiled_brk_fn 174 }; 175
+10 -1
arch/arm/mm/dma-mapping.c
··· 461 map.type = MT_MEMORY_DMA_READY; 462 463 /* 464 - * Clear previous low-memory mapping 465 */ 466 for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); 467 addr += PMD_SIZE) 468 pmd_clear(pmd_off_k(addr)); 469 470 iotable_init(&map, 1); 471 }
··· 461 map.type = MT_MEMORY_DMA_READY; 462 463 /* 464 + * Clear previous low-memory mapping to ensure that the 465 + * TLB does not see any conflicting entries, then flush 466 + * the TLB of the old entries before creating new mappings. 467 + * 468 + * This ensures that any speculatively loaded TLB entries 469 + * (even though they may be rare) can not cause any problems, 470 + * and ensures that this code is architecturally compliant. 471 */ 472 for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); 473 addr += PMD_SIZE) 474 pmd_clear(pmd_off_k(addr)); 475 + 476 + flush_tlb_kernel_range(__phys_to_virt(start), 477 + __phys_to_virt(end)); 478 479 iotable_init(&map, 1); 480 }
+12
arch/arm/mm/idmap.c
··· 9 #include <asm/sections.h> 10 #include <asm/system_info.h> 11 12 pgd_t *idmap_pgd; 13 phys_addr_t (*arch_virt_to_idmap) (unsigned long x); 14 ··· 30 pr_warning("Failed to allocate identity pmd.\n"); 31 return; 32 } 33 pud_populate(&init_mm, pud, pmd); 34 pmd += pmd_index(addr); 35 } else
··· 9 #include <asm/sections.h> 10 #include <asm/system_info.h> 11 12 + /* 13 + * Note: accesses outside of the kernel image and the identity map area 14 + * are not supported on any CPU using the idmap tables as its current 15 + * page tables. 16 + */ 17 pgd_t *idmap_pgd; 18 phys_addr_t (*arch_virt_to_idmap) (unsigned long x); 19 ··· 25 pr_warning("Failed to allocate identity pmd.\n"); 26 return; 27 } 28 + /* 29 + * Copy the original PMD to ensure that the PMD entries for 30 + * the kernel image are preserved. 31 + */ 32 + if (!pud_none(*pud)) 33 + memcpy(pmd, pmd_offset(pud, 0), 34 + PTRS_PER_PMD * sizeof(pmd_t)); 35 pud_populate(&init_mm, pud, pmd); 36 pmd += pmd_index(addr); 37 } else
+3 -3
arch/arm/mm/mmu.c
··· 1406 return; 1407 1408 /* remap kernel code and data */ 1409 - map_start = init_mm.start_code; 1410 - map_end = init_mm.brk; 1411 1412 /* get a handle on things... */ 1413 pgd0 = pgd_offset_k(0); ··· 1442 } 1443 1444 /* remap pmds for kernel mapping */ 1445 - phys = __pa(map_start) & PMD_MASK; 1446 do { 1447 *pmdk++ = __pmd(phys | pmdprot); 1448 phys += PMD_SIZE;
··· 1406 return; 1407 1408 /* remap kernel code and data */ 1409 + map_start = init_mm.start_code & PMD_MASK; 1410 + map_end = ALIGN(init_mm.brk, PMD_SIZE); 1411 1412 /* get a handle on things... */ 1413 pgd0 = pgd_offset_k(0); ··· 1442 } 1443 1444 /* remap pmds for kernel mapping */ 1445 + phys = __pa(map_start); 1446 do { 1447 *pmdk++ = __pmd(phys | pmdprot); 1448 phys += PMD_SIZE;