Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pyll ARM64 patches from Catalin Marinas:
- Build fix with DMA_CMA enabled
- Introduction of PTE_WRITE to distinguish between writable but clean
and truly read-only pages
- FIQs enabling/disabling clean-up (they aren't used on arm64)
- CPU resume fix for the per-cpu offset restoring
- Code comment typos

* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
arm64: mm: Introduce PTE_WRITE
arm64: mm: Remove PTE_BIT_FUNC macro
arm64: FIQs are unused
arm64: mm: fix the function name in comment of cpu_do_switch_mm
arm64: fix build error if DMA_CMA is enabled
arm64: kernel: fix per-cpu offset restore on resume
arm64: mm: fix the function name in comment of __flush_dcache_area
arm64: mm: use ubfm for dcache_line_size

+74 -43
-1
arch/arm64/include/asm/dma-contiguous.h
··· 18 18 #ifdef CONFIG_DMA_CMA 19 19 20 20 #include <linux/types.h> 21 - #include <asm-generic/dma-contiguous.h> 22 21 23 22 static inline void 24 23 dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { }
+63 -30
arch/arm64/include/asm/pgtable.h
··· 28 28 #define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */ 29 29 #define PTE_DIRTY (_AT(pteval_t, 1) << 55) 30 30 #define PTE_SPECIAL (_AT(pteval_t, 1) << 56) 31 - /* bit 57 for PMD_SECT_SPLITTING */ 31 + #define PTE_WRITE (_AT(pteval_t, 1) << 57) 32 32 #define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */ 33 33 34 34 /* ··· 67 67 68 68 #define _MOD_PROT(p, b) __pgprot_modify(p, 0, b) 69 69 70 - #define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN) 71 - #define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) 72 - #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN) 73 - #define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) 74 - #define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) 75 - #define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) 76 - #define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) 77 - #define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY) 78 - #define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY) 70 + #define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE | PTE_PXN | PTE_UXN) 71 + #define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) 72 + #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) 73 + #define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) 74 + #define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN) 75 + #define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) 76 + #define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN) 77 + #define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) 78 + #define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY | PTE_WRITE) 79 79 80 80 #define PAGE_HYP _MOD_PROT(pgprot_default, PTE_HYP) 81 81 #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) ··· 83 83 #define PAGE_S2 __pgprot_modify(pgprot_default, PTE_S2_MEMATTR_MASK, PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) 84 84 #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN) 85 85 86 - #define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN) 87 - #define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) 88 - #define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) 89 - #define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) 90 - #define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) 91 - #define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) 92 - #define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) 86 + #define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN) 87 + #define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) 88 + #define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) 89 + #define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) 90 + #define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) 91 + #define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) 92 + #define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) 93 93 94 94 #endif /* __ASSEMBLY__ */ 95 95 ··· 140 140 #define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY) 141 141 #define pte_young(pte) (pte_val(pte) & PTE_AF) 142 142 #define pte_special(pte) (pte_val(pte) & PTE_SPECIAL) 143 - #define pte_write(pte) (!(pte_val(pte) & PTE_RDONLY)) 143 + #define pte_write(pte) (pte_val(pte) & PTE_WRITE) 144 144 #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) 145 145 146 146 #define pte_valid_user(pte) \ 147 147 ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) 148 148 149 - #define PTE_BIT_FUNC(fn,op) \ 150 - static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } 149 + static inline pte_t pte_wrprotect(pte_t pte) 150 + { 151 + pte_val(pte) &= ~PTE_WRITE; 152 + return pte; 153 + } 151 154 152 - PTE_BIT_FUNC(wrprotect, |= PTE_RDONLY); 153 - PTE_BIT_FUNC(mkwrite, &= ~PTE_RDONLY); 154 - PTE_BIT_FUNC(mkclean, &= ~PTE_DIRTY); 155 - PTE_BIT_FUNC(mkdirty, |= PTE_DIRTY); 156 - PTE_BIT_FUNC(mkold, &= ~PTE_AF); 157 - PTE_BIT_FUNC(mkyoung, |= PTE_AF); 158 - PTE_BIT_FUNC(mkspecial, |= PTE_SPECIAL); 155 + static inline pte_t pte_mkwrite(pte_t pte) 156 + { 157 + pte_val(pte) |= PTE_WRITE; 158 + return pte; 159 + } 160 + 161 + static inline pte_t pte_mkclean(pte_t pte) 162 + { 163 + pte_val(pte) &= ~PTE_DIRTY; 164 + return pte; 165 + } 166 + 167 + static inline pte_t pte_mkdirty(pte_t pte) 168 + { 169 + pte_val(pte) |= PTE_DIRTY; 170 + return pte; 171 + } 172 + 173 + static inline pte_t pte_mkold(pte_t pte) 174 + { 175 + pte_val(pte) &= ~PTE_AF; 176 + return pte; 177 + } 178 + 179 + static inline pte_t pte_mkyoung(pte_t pte) 180 + { 181 + pte_val(pte) |= PTE_AF; 182 + return pte; 183 + } 184 + 185 + static inline pte_t pte_mkspecial(pte_t pte) 186 + { 187 + pte_val(pte) |= PTE_SPECIAL; 188 + return pte; 189 + } 159 190 160 191 static inline void set_pte(pte_t *ptep, pte_t pte) 161 192 { ··· 201 170 if (pte_valid_user(pte)) { 202 171 if (pte_exec(pte)) 203 172 __sync_icache_dcache(pte, addr); 204 - if (!pte_dirty(pte)) 205 - pte = pte_wrprotect(pte); 173 + if (pte_dirty(pte) && pte_write(pte)) 174 + pte_val(pte) &= ~PTE_RDONLY; 175 + else 176 + pte_val(pte) |= PTE_RDONLY; 206 177 } 207 178 208 179 set_pte(ptep, pte); ··· 378 345 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 379 346 { 380 347 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | 381 - PTE_PROT_NONE | PTE_VALID; 348 + PTE_PROT_NONE | PTE_VALID | PTE_WRITE; 382 349 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); 383 350 return pte; 384 351 }
-6
arch/arm64/kernel/process.c
··· 85 85 void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); 86 86 EXPORT_SYMBOL_GPL(arm_pm_restart); 87 87 88 - void arch_cpu_idle_prepare(void) 89 - { 90 - local_fiq_enable(); 91 - } 92 - 93 88 /* 94 89 * This is our default idle handler. 95 90 */ ··· 133 138 134 139 /* Disable interrupts first */ 135 140 local_irq_disable(); 136 - local_fiq_disable(); 137 141 138 142 /* Now call the architecture specific reboot code. */ 139 143 if (arm_pm_restart)
-2
arch/arm64/kernel/smp.c
··· 161 161 complete(&cpu_running); 162 162 163 163 local_irq_enable(); 164 - local_fiq_enable(); 165 164 local_async_enable(); 166 165 167 166 /* ··· 494 495 495 496 set_cpu_online(cpu, false); 496 497 497 - local_fiq_disable(); 498 498 local_irq_disable(); 499 499 500 500 while (1)
+8
arch/arm64/kernel/suspend.c
··· 1 + #include <linux/percpu.h> 1 2 #include <linux/slab.h> 2 3 #include <asm/cacheflush.h> 3 4 #include <asm/cpu_ops.h> ··· 90 89 if (ret == 0) { 91 90 cpu_switch_mm(mm->pgd, mm); 92 91 flush_tlb_all(); 92 + 93 + /* 94 + * Restore per-cpu offset before any kernel 95 + * subsystem relying on it has a chance to run. 96 + */ 97 + set_my_cpu_offset(per_cpu_offset(cpu)); 98 + 93 99 /* 94 100 * Restore HW breakpoint registers to sane values 95 101 * before debug exceptions are possibly reenabled
+1 -1
arch/arm64/mm/cache.S
··· 146 146 ENDPROC(__flush_cache_user_range) 147 147 148 148 /* 149 - * __flush_kern_dcache_page(kaddr) 149 + * __flush_dcache_area(kaddr, size) 150 150 * 151 151 * Ensure that the data held in the page kaddr is written back to the 152 152 * page in question.
+1 -2
arch/arm64/mm/proc-macros.S
··· 38 38 */ 39 39 .macro dcache_line_size, reg, tmp 40 40 mrs \tmp, ctr_el0 // read CTR 41 - lsr \tmp, \tmp, #16 42 - and \tmp, \tmp, #0xf // cache line size encoding 41 + ubfm \tmp, \tmp, #16, #19 // cache line size encoding 43 42 mov \reg, #4 // bytes per word 44 43 lsl \reg, \reg, \tmp // actual cache line size 45 44 .endm
+1 -1
arch/arm64/mm/proc.S
··· 150 150 #endif 151 151 152 152 /* 153 - * cpu_switch_mm(pgd_phys, tsk) 153 + * cpu_do_switch_mm(pgd_phys, tsk) 154 154 * 155 155 * Set the translation table base pointer to be pgd_phys. 156 156 *