Merge branch 'fixes' of git://git.linaro.org/people/rmk/linux-arm

Pull ARM fixes from Russell King:
"A number of fixes:

Patrik found a problem with preempt counting in the VFP assembly
functions which can cause the preempt count to be upset.

Nicolas fixed a problem with the parsing of the DT when it straddles a
1MB boundary.

Subhash Jadavani reported a problem with sparsemem and our highmem
support for cache maintanence for DMA areas, and TI found a bug in
their strongly ordered memory mapping type.

Also, three fixes by way of Will Deacon's tree from Dave Martin for
instruction compatibility and Marc Zyngier to fix hypervisor boot mode
issues."

* 'fixes' of git://git.linaro.org/people/rmk/linux-arm:
ARM: 7629/1: mm: Fix missing XN flag for for MT_MEMORY_SO
ARM: DMA: Fix struct page iterator in dma_cache_maint() to work with sparsemem
ARM: 7628/1: head.S: map one extra section for the ATAG/DTB area
ARM: 7627/1: Predicate preempt logic on PREEMP_COUNT not PREEMPT alone
ARM: virt: simplify __hyp_stub_install epilog
ARM: virt: boot secondary CPUs through the right entry point
ARM: virt: Avoid bx instruction for compatibility with <=ARMv4

+26 -27
+4 -1
arch/arm/kernel/head.S
··· 246 247 /* 248 * Then map boot params address in r2 if specified. 249 */ 250 mov r0, r2, lsr #SECTION_SHIFT 251 movs r0, r0, lsl #SECTION_SHIFT ··· 254 addne r3, r3, #PAGE_OFFSET 255 addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER) 256 orrne r6, r7, r0 257 strne r6, [r3] 258 259 #ifdef CONFIG_DEBUG_LL ··· 334 * as it has already been validated by the primary processor. 335 */ 336 #ifdef CONFIG_ARM_VIRT_EXT 337 - bl __hyp_stub_install 338 #endif 339 safe_svcmode_maskall r9 340
··· 246 247 /* 248 * Then map boot params address in r2 if specified. 249 + * We map 2 sections in case the ATAGs/DTB crosses a section boundary. 250 */ 251 mov r0, r2, lsr #SECTION_SHIFT 252 movs r0, r0, lsl #SECTION_SHIFT ··· 253 addne r3, r3, #PAGE_OFFSET 254 addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER) 255 orrne r6, r7, r0 256 + strne r6, [r3], #1 << PMD_ORDER 257 + addne r6, r6, #1 << SECTION_SHIFT 258 strne r6, [r3] 259 260 #ifdef CONFIG_DEBUG_LL ··· 331 * as it has already been validated by the primary processor. 332 */ 333 #ifdef CONFIG_ARM_VIRT_EXT 334 + bl __hyp_stub_install_secondary 335 #endif 336 safe_svcmode_maskall r9 337
+6 -12
arch/arm/kernel/hyp-stub.S
··· 99 * immediately. 100 */ 101 compare_cpu_mode_with_primary r4, r5, r6, r7 102 - bxne lr 103 104 /* 105 * Once we have given up on one CPU, we do not try to install the ··· 111 */ 112 113 cmp r4, #HYP_MODE 114 - bxne lr @ give up if the CPU is not in HYP mode 115 116 /* 117 * Configure HSCTLR to set correct exception endianness/instruction set ··· 120 * Eventually, CPU-specific code might be needed -- assume not for now 121 * 122 * This code relies on the "eret" instruction to synchronize the 123 - * various coprocessor accesses. 124 */ 125 @ Now install the hypervisor stub: 126 adr r7, __hyp_stub_vectors ··· 156 1: 157 #endif 158 159 - bic r7, r4, #MODE_MASK 160 - orr r7, r7, #SVC_MODE 161 - THUMB( orr r7, r7, #PSR_T_BIT ) 162 - msr spsr_cxsf, r7 @ This is SPSR_hyp. 163 - 164 - __MSR_ELR_HYP(14) @ msr elr_hyp, lr 165 - __ERET @ return, switching to SVC mode 166 - @ The boot CPU mode is left in r4. 167 ENDPROC(__hyp_stub_install_secondary) 168 169 __hyp_stub_do_trap: ··· 194 @ fall through 195 ENTRY(__hyp_set_vectors) 196 __HVC(0) 197 - bx lr 198 ENDPROC(__hyp_set_vectors) 199 200 #ifndef ZIMAGE
··· 99 * immediately. 100 */ 101 compare_cpu_mode_with_primary r4, r5, r6, r7 102 + movne pc, lr 103 104 /* 105 * Once we have given up on one CPU, we do not try to install the ··· 111 */ 112 113 cmp r4, #HYP_MODE 114 + movne pc, lr @ give up if the CPU is not in HYP mode 115 116 /* 117 * Configure HSCTLR to set correct exception endianness/instruction set ··· 120 * Eventually, CPU-specific code might be needed -- assume not for now 121 * 122 * This code relies on the "eret" instruction to synchronize the 123 + * various coprocessor accesses. This is done when we switch to SVC 124 + * (see safe_svcmode_maskall). 125 */ 126 @ Now install the hypervisor stub: 127 adr r7, __hyp_stub_vectors ··· 155 1: 156 #endif 157 158 + bx lr @ The boot CPU mode is left in r4. 159 ENDPROC(__hyp_stub_install_secondary) 160 161 __hyp_stub_do_trap: ··· 200 @ fall through 201 ENTRY(__hyp_set_vectors) 202 __HVC(0) 203 + mov pc, lr 204 ENDPROC(__hyp_set_vectors) 205 206 #ifndef ZIMAGE
+10 -8
arch/arm/mm/dma-mapping.c
··· 774 size_t size, enum dma_data_direction dir, 775 void (*op)(const void *, size_t, int)) 776 { 777 /* 778 * A single sg entry may refer to multiple physically contiguous 779 * pages. But we still need to process highmem pages individually. 780 * If highmem is not configured then the bulk of this loop gets 781 * optimized out. 782 */ 783 - size_t left = size; 784 do { 785 size_t len = left; 786 void *vaddr; 787 788 if (PageHighMem(page)) { 789 - if (len + offset > PAGE_SIZE) { 790 - if (offset >= PAGE_SIZE) { 791 - page += offset / PAGE_SIZE; 792 - offset %= PAGE_SIZE; 793 - } 794 len = PAGE_SIZE - offset; 795 - } 796 vaddr = kmap_high_get(page); 797 if (vaddr) { 798 vaddr += offset; ··· 811 op(vaddr, len, dir); 812 } 813 offset = 0; 814 - page++; 815 left -= len; 816 } while (left); 817 }
··· 774 size_t size, enum dma_data_direction dir, 775 void (*op)(const void *, size_t, int)) 776 { 777 + unsigned long pfn; 778 + size_t left = size; 779 + 780 + pfn = page_to_pfn(page) + offset / PAGE_SIZE; 781 + offset %= PAGE_SIZE; 782 + 783 /* 784 * A single sg entry may refer to multiple physically contiguous 785 * pages. But we still need to process highmem pages individually. 786 * If highmem is not configured then the bulk of this loop gets 787 * optimized out. 788 */ 789 do { 790 size_t len = left; 791 void *vaddr; 792 793 + page = pfn_to_page(pfn); 794 + 795 if (PageHighMem(page)) { 796 + if (len + offset > PAGE_SIZE) 797 len = PAGE_SIZE - offset; 798 vaddr = kmap_high_get(page); 799 if (vaddr) { 800 vaddr += offset; ··· 809 op(vaddr, len, dir); 810 } 811 offset = 0; 812 + pfn++; 813 left -= len; 814 } while (left); 815 }
+1 -1
arch/arm/mm/mmu.c
··· 283 }, 284 [MT_MEMORY_SO] = { 285 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 286 - L_PTE_MT_UNCACHED, 287 .prot_l1 = PMD_TYPE_TABLE, 288 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S | 289 PMD_SECT_UNCACHED | PMD_SECT_XN,
··· 283 }, 284 [MT_MEMORY_SO] = { 285 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 286 + L_PTE_MT_UNCACHED | L_PTE_XN, 287 .prot_l1 = PMD_TYPE_TABLE, 288 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S | 289 PMD_SECT_UNCACHED | PMD_SECT_XN,
+3 -3
arch/arm/vfp/entry.S
··· 22 @ IRQs disabled. 23 @ 24 ENTRY(do_vfp) 25 - #ifdef CONFIG_PREEMPT 26 ldr r4, [r10, #TI_PREEMPT] @ get preempt count 27 add r11, r4, #1 @ increment it 28 str r11, [r10, #TI_PREEMPT] ··· 35 ENDPROC(do_vfp) 36 37 ENTRY(vfp_null_entry) 38 - #ifdef CONFIG_PREEMPT 39 get_thread_info r10 40 ldr r4, [r10, #TI_PREEMPT] @ get preempt count 41 sub r11, r4, #1 @ decrement it ··· 53 54 __INIT 55 ENTRY(vfp_testing_entry) 56 - #ifdef CONFIG_PREEMPT 57 get_thread_info r10 58 ldr r4, [r10, #TI_PREEMPT] @ get preempt count 59 sub r11, r4, #1 @ decrement it
··· 22 @ IRQs disabled. 23 @ 24 ENTRY(do_vfp) 25 + #ifdef CONFIG_PREEMPT_COUNT 26 ldr r4, [r10, #TI_PREEMPT] @ get preempt count 27 add r11, r4, #1 @ increment it 28 str r11, [r10, #TI_PREEMPT] ··· 35 ENDPROC(do_vfp) 36 37 ENTRY(vfp_null_entry) 38 + #ifdef CONFIG_PREEMPT_COUNT 39 get_thread_info r10 40 ldr r4, [r10, #TI_PREEMPT] @ get preempt count 41 sub r11, r4, #1 @ decrement it ··· 53 54 __INIT 55 ENTRY(vfp_testing_entry) 56 + #ifdef CONFIG_PREEMPT_COUNT 57 get_thread_info r10 58 ldr r4, [r10, #TI_PREEMPT] @ get preempt count 59 sub r11, r4, #1 @ decrement it
+2 -2
arch/arm/vfp/vfphw.S
··· 168 @ else it's one 32-bit instruction, so 169 @ always subtract 4 from the following 170 @ instruction address. 171 - #ifdef CONFIG_PREEMPT 172 get_thread_info r10 173 ldr r4, [r10, #TI_PREEMPT] @ get preempt count 174 sub r11, r4, #1 @ decrement it ··· 192 @ not recognised by VFP 193 194 DBGSTR "not VFP" 195 - #ifdef CONFIG_PREEMPT 196 get_thread_info r10 197 ldr r4, [r10, #TI_PREEMPT] @ get preempt count 198 sub r11, r4, #1 @ decrement it
··· 168 @ else it's one 32-bit instruction, so 169 @ always subtract 4 from the following 170 @ instruction address. 171 + #ifdef CONFIG_PREEMPT_COUNT 172 get_thread_info r10 173 ldr r4, [r10, #TI_PREEMPT] @ get preempt count 174 sub r11, r4, #1 @ decrement it ··· 192 @ not recognised by VFP 193 194 DBGSTR "not VFP" 195 + #ifdef CONFIG_PREEMPT_COUNT 196 get_thread_info r10 197 ldr r4, [r10, #TI_PREEMPT] @ get preempt count 198 sub r11, r4, #1 @ decrement it