Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:

- fix HugeTLB leak due to CoW and PTE_RDONLY mismatch

- avoid accessing unmapped FDT fields when checking validity

- correctly account for vDSO AUX entry in ARCH_DLINFO

- fix kallsyms with absolute expressions in linker script

- kill unnecessary symbol-based relocs in vmlinux

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
arm64: Fix copy-on-write referencing in HugeTLB
arm64: mm: avoid fdt_check_header() before the FDT is fully mapped
arm64: Define AT_VECTOR_SIZE_ARCH for ARCH_DLINFO
arm64: relocatable: suppress R_AARCH64_ABS64 relocations in vmlinux
arm64: vmlinux.lds: make __rela_offset and __dynsym_offset ABSOLUTE

+30 -34
+1 -1
arch/arm64/Makefile
··· 15 15 GZFLAGS :=-9 16 16 17 17 ifneq ($(CONFIG_RELOCATABLE),) 18 - LDFLAGS_vmlinux += -pie 18 + LDFLAGS_vmlinux += -pie -Bsymbolic 19 19 endif 20 20 21 21 KBUILD_DEFCONFIG := defconfig
+1
arch/arm64/include/asm/elf.h
··· 140 140 141 141 #define SET_PERSONALITY(ex) clear_thread_flag(TIF_32BIT); 142 142 143 + /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ 143 144 #define ARCH_DLINFO \ 144 145 do { \ 145 146 NEW_AUX_ENT(AT_SYSINFO_EHDR, \
+17
arch/arm64/include/asm/pgtable.h
··· 224 224 set_pte(ptep, pte); 225 225 } 226 226 227 + #define __HAVE_ARCH_PTE_SAME 228 + static inline int pte_same(pte_t pte_a, pte_t pte_b) 229 + { 230 + pteval_t lhs, rhs; 231 + 232 + lhs = pte_val(pte_a); 233 + rhs = pte_val(pte_b); 234 + 235 + if (pte_present(pte_a)) 236 + lhs &= ~PTE_RDONLY; 237 + 238 + if (pte_present(pte_b)) 239 + rhs &= ~PTE_RDONLY; 240 + 241 + return (lhs == rhs); 242 + } 243 + 227 244 /* 228 245 * Huge pte definitions. 229 246 */
+2
arch/arm64/include/uapi/asm/auxvec.h
··· 19 19 /* vDSO location */ 20 20 #define AT_SYSINFO_EHDR 33 21 21 22 + #define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */ 23 + 22 24 #endif
+3 -18
arch/arm64/kernel/head.S
··· 781 781 * Iterate over each entry in the relocation table, and apply the 782 782 * relocations in place. 783 783 */ 784 - ldr w8, =__dynsym_offset // offset to symbol table 785 784 ldr w9, =__rela_offset // offset to reloc table 786 785 ldr w10, =__rela_size // size of reloc table 787 786 788 787 mov_q x11, KIMAGE_VADDR // default virtual offset 789 788 add x11, x11, x23 // actual virtual offset 790 - add x8, x8, x11 // __va(.dynsym) 791 789 add x9, x9, x11 // __va(.rela) 792 790 add x10, x9, x10 // __va(.rela) + sizeof(.rela) 793 791 794 792 0: cmp x9, x10 795 - b.hs 2f 793 + b.hs 1f 796 794 ldp x11, x12, [x9], #24 797 795 ldr x13, [x9, #-8] 798 796 cmp w12, #R_AARCH64_RELATIVE 799 - b.ne 1f 797 + b.ne 0b 800 798 add x13, x13, x23 // relocate 801 799 str x13, [x11, x23] 802 800 b 0b 803 801 804 - 1: cmp w12, #R_AARCH64_ABS64 805 - b.ne 0b 806 - add x12, x12, x12, lsl #1 // symtab offset: 24x top word 807 - add x12, x8, x12, lsr #(32 - 3) // ... shifted into bottom word 808 - ldrsh w14, [x12, #6] // Elf64_Sym::st_shndx 809 - ldr x15, [x12, #8] // Elf64_Sym::st_value 810 - cmp w14, #-0xf // SHN_ABS (0xfff1) ? 811 - add x14, x15, x23 // relocate 812 - csel x15, x14, x15, ne 813 - add x15, x13, x15 814 - str x15, [x11, x23] 815 - b 0b 816 - 817 - 2: 802 + 1: 818 803 #endif 819 804 ldr x8, =__primary_switched 820 805 br x8
+2 -11
arch/arm64/kernel/vmlinux.lds.S
··· 103 103 *(.discard) 104 104 *(.discard.*) 105 105 *(.interp .dynamic) 106 + *(.dynsym .dynstr .hash) 106 107 } 107 108 108 109 . = KIMAGE_VADDR + TEXT_OFFSET; ··· 175 174 .rela : ALIGN(8) { 176 175 *(.rela .rela*) 177 176 } 178 - .dynsym : ALIGN(8) { 179 - *(.dynsym) 180 - } 181 - .dynstr : { 182 - *(.dynstr) 183 - } 184 - .hash : { 185 - *(.hash) 186 - } 187 177 188 - __rela_offset = ADDR(.rela) - KIMAGE_VADDR; 178 + __rela_offset = ABSOLUTE(ADDR(.rela) - KIMAGE_VADDR); 189 179 __rela_size = SIZEOF(.rela); 190 - __dynsym_offset = ADDR(.dynsym) - KIMAGE_VADDR; 191 180 192 181 . = ALIGN(SEGMENT_ALIGN); 193 182 __init_end = .;
+4 -4
arch/arm64/mm/mmu.c
··· 686 686 /* 687 687 * Check whether the physical FDT address is set and meets the minimum 688 688 * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be 689 - * at least 8 bytes so that we can always access the size field of the 690 - * FDT header after mapping the first chunk, double check here if that 691 - * is indeed the case. 689 + * at least 8 bytes so that we can always access the magic and size 690 + * fields of the FDT header after mapping the first chunk, double check 691 + * here if that is indeed the case. 692 692 */ 693 693 BUILD_BUG_ON(MIN_FDT_ALIGN < 8); 694 694 if (!dt_phys || dt_phys % MIN_FDT_ALIGN) ··· 716 716 create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), 717 717 dt_virt_base, SWAPPER_BLOCK_SIZE, prot); 718 718 719 - if (fdt_check_header(dt_virt) != 0) 719 + if (fdt_magic(dt_virt) != FDT_MAGIC) 720 720 return NULL; 721 721 722 722 *size = fdt_totalsize(dt_virt);