Merge branch 'fixes' of git://git.linaro.org/people/rmk/linux-arm

Pull ARM fixes from Russell King:
"A number of fixes:

Patrik found a problem with preempt counting in the VFP assembly
functions which can cause the preempt count to be upset.

Nicolas fixed a problem with the parsing of the DT when it straddles a
1MB boundary.

Subhash Jadavani reported a problem with sparsemem and our highmem
support for cache maintanence for DMA areas, and TI found a bug in
their strongly ordered memory mapping type.

Also, three fixes by way of Will Deacon's tree from Dave Martin for
instruction compatibility and Marc Zyngier to fix hypervisor boot mode
issues."

* 'fixes' of git://git.linaro.org/people/rmk/linux-arm:
ARM: 7629/1: mm: Fix missing XN flag for for MT_MEMORY_SO
ARM: DMA: Fix struct page iterator in dma_cache_maint() to work with sparsemem
ARM: 7628/1: head.S: map one extra section for the ATAG/DTB area
ARM: 7627/1: Predicate preempt logic on PREEMP_COUNT not PREEMPT alone
ARM: virt: simplify __hyp_stub_install epilog
ARM: virt: boot secondary CPUs through the right entry point
ARM: virt: Avoid bx instruction for compatibility with <=ARMv4

+26 -27
+4 -1
arch/arm/kernel/head.S
··· 246 246 247 247 /* 248 248 * Then map boot params address in r2 if specified. 249 + * We map 2 sections in case the ATAGs/DTB crosses a section boundary. 249 250 */ 250 251 mov r0, r2, lsr #SECTION_SHIFT 251 252 movs r0, r0, lsl #SECTION_SHIFT ··· 254 253 addne r3, r3, #PAGE_OFFSET 255 254 addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER) 256 255 orrne r6, r7, r0 256 + strne r6, [r3], #1 << PMD_ORDER 257 + addne r6, r6, #1 << SECTION_SHIFT 257 258 strne r6, [r3] 258 259 259 260 #ifdef CONFIG_DEBUG_LL ··· 334 331 * as it has already been validated by the primary processor. 335 332 */ 336 333 #ifdef CONFIG_ARM_VIRT_EXT 337 - bl __hyp_stub_install 334 + bl __hyp_stub_install_secondary 338 335 #endif 339 336 safe_svcmode_maskall r9 340 337
+6 -12
arch/arm/kernel/hyp-stub.S
··· 99 99 * immediately. 100 100 */ 101 101 compare_cpu_mode_with_primary r4, r5, r6, r7 102 - bxne lr 102 + movne pc, lr 103 103 104 104 /* 105 105 * Once we have given up on one CPU, we do not try to install the ··· 111 111 */ 112 112 113 113 cmp r4, #HYP_MODE 114 - bxne lr @ give up if the CPU is not in HYP mode 114 + movne pc, lr @ give up if the CPU is not in HYP mode 115 115 116 116 /* 117 117 * Configure HSCTLR to set correct exception endianness/instruction set ··· 120 120 * Eventually, CPU-specific code might be needed -- assume not for now 121 121 * 122 122 * This code relies on the "eret" instruction to synchronize the 123 - * various coprocessor accesses. 123 + * various coprocessor accesses. This is done when we switch to SVC 124 + * (see safe_svcmode_maskall). 124 125 */ 125 126 @ Now install the hypervisor stub: 126 127 adr r7, __hyp_stub_vectors ··· 156 155 1: 157 156 #endif 158 157 159 - bic r7, r4, #MODE_MASK 160 - orr r7, r7, #SVC_MODE 161 - THUMB( orr r7, r7, #PSR_T_BIT ) 162 - msr spsr_cxsf, r7 @ This is SPSR_hyp. 163 - 164 - __MSR_ELR_HYP(14) @ msr elr_hyp, lr 165 - __ERET @ return, switching to SVC mode 166 - @ The boot CPU mode is left in r4. 158 + bx lr @ The boot CPU mode is left in r4. 167 159 ENDPROC(__hyp_stub_install_secondary) 168 160 169 161 __hyp_stub_do_trap: ··· 194 200 @ fall through 195 201 ENTRY(__hyp_set_vectors) 196 202 __HVC(0) 197 - bx lr 203 + mov pc, lr 198 204 ENDPROC(__hyp_set_vectors) 199 205 200 206 #ifndef ZIMAGE
+10 -8
arch/arm/mm/dma-mapping.c
··· 774 774 size_t size, enum dma_data_direction dir, 775 775 void (*op)(const void *, size_t, int)) 776 776 { 777 + unsigned long pfn; 778 + size_t left = size; 779 + 780 + pfn = page_to_pfn(page) + offset / PAGE_SIZE; 781 + offset %= PAGE_SIZE; 782 + 777 783 /* 778 784 * A single sg entry may refer to multiple physically contiguous 779 785 * pages. But we still need to process highmem pages individually. 780 786 * If highmem is not configured then the bulk of this loop gets 781 787 * optimized out. 782 788 */ 783 - size_t left = size; 784 789 do { 785 790 size_t len = left; 786 791 void *vaddr; 787 792 793 + page = pfn_to_page(pfn); 794 + 788 795 if (PageHighMem(page)) { 789 - if (len + offset > PAGE_SIZE) { 790 - if (offset >= PAGE_SIZE) { 791 - page += offset / PAGE_SIZE; 792 - offset %= PAGE_SIZE; 793 - } 796 + if (len + offset > PAGE_SIZE) 794 797 len = PAGE_SIZE - offset; 795 - } 796 798 vaddr = kmap_high_get(page); 797 799 if (vaddr) { 798 800 vaddr += offset; ··· 811 809 op(vaddr, len, dir); 812 810 } 813 811 offset = 0; 814 - page++; 812 + pfn++; 815 813 left -= len; 816 814 } while (left); 817 815 }
+1 -1
arch/arm/mm/mmu.c
··· 283 283 }, 284 284 [MT_MEMORY_SO] = { 285 285 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 286 - L_PTE_MT_UNCACHED, 286 + L_PTE_MT_UNCACHED | L_PTE_XN, 287 287 .prot_l1 = PMD_TYPE_TABLE, 288 288 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S | 289 289 PMD_SECT_UNCACHED | PMD_SECT_XN,
+3 -3
arch/arm/vfp/entry.S
··· 22 22 @ IRQs disabled. 23 23 @ 24 24 ENTRY(do_vfp) 25 - #ifdef CONFIG_PREEMPT 25 + #ifdef CONFIG_PREEMPT_COUNT 26 26 ldr r4, [r10, #TI_PREEMPT] @ get preempt count 27 27 add r11, r4, #1 @ increment it 28 28 str r11, [r10, #TI_PREEMPT] ··· 35 35 ENDPROC(do_vfp) 36 36 37 37 ENTRY(vfp_null_entry) 38 - #ifdef CONFIG_PREEMPT 38 + #ifdef CONFIG_PREEMPT_COUNT 39 39 get_thread_info r10 40 40 ldr r4, [r10, #TI_PREEMPT] @ get preempt count 41 41 sub r11, r4, #1 @ decrement it ··· 53 53 54 54 __INIT 55 55 ENTRY(vfp_testing_entry) 56 - #ifdef CONFIG_PREEMPT 56 + #ifdef CONFIG_PREEMPT_COUNT 57 57 get_thread_info r10 58 58 ldr r4, [r10, #TI_PREEMPT] @ get preempt count 59 59 sub r11, r4, #1 @ decrement it
+2 -2
arch/arm/vfp/vfphw.S
··· 168 168 @ else it's one 32-bit instruction, so 169 169 @ always subtract 4 from the following 170 170 @ instruction address. 171 - #ifdef CONFIG_PREEMPT 171 + #ifdef CONFIG_PREEMPT_COUNT 172 172 get_thread_info r10 173 173 ldr r4, [r10, #TI_PREEMPT] @ get preempt count 174 174 sub r11, r4, #1 @ decrement it ··· 192 192 @ not recognised by VFP 193 193 194 194 DBGSTR "not VFP" 195 - #ifdef CONFIG_PREEMPT 195 + #ifdef CONFIG_PREEMPT_COUNT 196 196 get_thread_info r10 197 197 ldr r4, [r10, #TI_PREEMPT] @ get preempt count 198 198 sub r11, r4, #1 @ decrement it