Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:
"The main fix here addresses a kernel panic triggered on Qualcomm
QDF2400 due to incorrect register usage in an erratum workaround
introduced during the merge window.

Summary:

- Fix kernel panic on specific Qualcomm platform due to broken
erratum workaround

- Revert contiguous bit support due to TLB conflict aborts in
simulation

- Don't treat all CPU ID register fields as 4-bit quantities"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
arm64/cpufeature: check correct field width when updating sys_val
Revert "arm64: mm: set the contiguous bit for kernel mappings where appropriate"
arm64: Avoid clobbering mm in erratum workaround on QDF2400

+15 -35
+10 -4
arch/arm64/include/asm/cpufeature.h
··· 184 184 } 185 185 186 186 static inline int __attribute_const__ 187 - cpuid_feature_extract_field(u64 features, int field, bool sign) 187 + cpuid_feature_extract_field_width(u64 features, int field, int width, bool sign) 188 188 { 189 189 return (sign) ? 190 - cpuid_feature_extract_signed_field(features, field) : 191 - cpuid_feature_extract_unsigned_field(features, field); 190 + cpuid_feature_extract_signed_field_width(features, field, width) : 191 + cpuid_feature_extract_unsigned_field_width(features, field, width); 192 + } 193 + 194 + static inline int __attribute_const__ 195 + cpuid_feature_extract_field(u64 features, int field, bool sign) 196 + { 197 + return cpuid_feature_extract_field_width(features, field, 4, sign); 192 198 } 193 199 194 200 static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val) 195 201 { 196 - return (s64)cpuid_feature_extract_field(val, ftrp->shift, ftrp->sign); 202 + return (s64)cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width, ftrp->sign); 197 203 } 198 204 199 205 static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
+4 -30
arch/arm64/mm/mmu.c
··· 109 109 static void alloc_init_pte(pmd_t *pmd, unsigned long addr, 110 110 unsigned long end, unsigned long pfn, 111 111 pgprot_t prot, 112 - phys_addr_t (*pgtable_alloc)(void), 113 - bool page_mappings_only) 112 + phys_addr_t (*pgtable_alloc)(void)) 114 113 { 115 - pgprot_t __prot = prot; 116 114 pte_t *pte; 117 115 118 116 BUG_ON(pmd_sect(*pmd)); ··· 128 130 do { 129 131 pte_t old_pte = *pte; 130 132 131 - /* 132 - * Set the contiguous bit for the subsequent group of PTEs if 133 - * its size and alignment are appropriate. 134 - */ 135 - if (((addr | PFN_PHYS(pfn)) & ~CONT_PTE_MASK) == 0) { 136 - if (end - addr >= CONT_PTE_SIZE && !page_mappings_only) 137 - __prot = __pgprot(pgprot_val(prot) | PTE_CONT); 138 - else 139 - __prot = prot; 140 - } 141 - 142 - set_pte(pte, pfn_pte(pfn, __prot)); 133 + set_pte(pte, pfn_pte(pfn, prot)); 143 134 pfn++; 144 135 145 136 /* ··· 147 160 phys_addr_t (*pgtable_alloc)(void), 148 161 bool page_mappings_only) 149 162 { 150 - pgprot_t __prot = prot; 151 163 pmd_t *pmd; 152 164 unsigned long next; 153 165 ··· 173 187 /* try section mapping first */ 174 188 if (((addr | next | phys) & ~SECTION_MASK) == 0 && 175 189 !page_mappings_only) { 176 - /* 177 - * Set the contiguous bit for the subsequent group of 178 - * PMDs if its size and alignment are appropriate. 179 - */ 180 - if (((addr | phys) & ~CONT_PMD_MASK) == 0) { 181 - if (end - addr >= CONT_PMD_SIZE) 182 - __prot = __pgprot(pgprot_val(prot) | 183 - PTE_CONT); 184 - else 185 - __prot = prot; 186 - } 187 - pmd_set_huge(pmd, phys, __prot); 190 + pmd_set_huge(pmd, phys, prot); 188 191 189 192 /* 190 193 * After the PMD entry has been populated once, we ··· 183 208 pmd_val(*pmd))); 184 209 } else { 185 210 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys), 186 - prot, pgtable_alloc, 187 - page_mappings_only); 211 + prot, pgtable_alloc); 188 212 189 213 BUG_ON(pmd_val(old_pmd) != 0 && 190 214 pmd_val(old_pmd) != pmd_val(*pmd));
+1 -1
arch/arm64/mm/proc.S
··· 138 138 * - pgd_phys - physical address of new TTB 139 139 */ 140 140 ENTRY(cpu_do_switch_mm) 141 - pre_ttbr0_update_workaround x0, x1, x2 141 + pre_ttbr0_update_workaround x0, x2, x3 142 142 mmid x1, x1 // get mm->context.id 143 143 bfi x0, x1, #48, #16 // set the ASID 144 144 msr ttbr0_el1, x0 // set TTBR0