Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'fixes' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM fixes from Russell King:

- omit EFI memory map sorting, which was recently introduced, but
caused problems with the decompressor due to additional sections
being emitted.

- avoid unaligned load fault-generating instructions in the
decompressor by switching to a private unaligned implementation.

- add a symbol into the decompressor to further debug non-boot
situations (ld's documentation is extremely poor for how "." works,
ld doesn't seem to follow its own documentation!)

- parse endian information to sparse

* 'fixes' of git://git.armlinux.org.uk/~rmk/linux-arm:
ARM: add debug ".edata_real" symbol
ARM: 8716/1: pass endianness info to sparse
efi/libstub: arm: omit sorting of the UEFI memory map
ARM: 8715/1: add a private asm/unaligned.h

+46 -6
+2
arch/arm/Makefile
··· 44 44 45 45 ifeq ($(CONFIG_CPU_BIG_ENDIAN),y) 46 46 KBUILD_CPPFLAGS += -mbig-endian 47 + CHECKFLAGS += -D__ARMEB__ 47 48 AS += -EB 48 49 LD += -EB 49 50 else 50 51 KBUILD_CPPFLAGS += -mlittle-endian 52 + CHECKFLAGS += -D__ARMEL__ 51 53 AS += -EL 52 54 LD += -EL 53 55 endif
+9
arch/arm/boot/compressed/vmlinux.lds.S
··· 85 85 86 86 _edata = .; 87 87 88 + /* 89 + * The image_end section appears after any additional loadable sections 90 + * that the linker may decide to insert in the binary image. Having 91 + * this symbol allows further debug in the near future. 92 + */ 93 + .image_end (NOLOAD) : { 94 + _edata_real = .; 95 + } 96 + 88 97 _magic_sig = ZIMAGE_MAGIC(0x016f2818); 89 98 _magic_start = ZIMAGE_MAGIC(_start); 90 99 _magic_end = ZIMAGE_MAGIC(_edata);
-1
arch/arm/include/asm/Kbuild
··· 20 20 generic-y += sizes.h 21 21 generic-y += timex.h 22 22 generic-y += trace_clock.h 23 - generic-y += unaligned.h 24 23 25 24 generated-y += mach-types.h 26 25 generated-y += unistd-nr.h
+27
arch/arm/include/asm/unaligned.h
··· 1 + #ifndef __ASM_ARM_UNALIGNED_H 2 + #define __ASM_ARM_UNALIGNED_H 3 + 4 + /* 5 + * We generally want to set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS on ARMv6+, 6 + * but we don't want to use linux/unaligned/access_ok.h since that can lead 7 + * to traps on unaligned stm/ldm or strd/ldrd. 8 + */ 9 + #include <asm/byteorder.h> 10 + 11 + #if defined(__LITTLE_ENDIAN) 12 + # include <linux/unaligned/le_struct.h> 13 + # include <linux/unaligned/be_byteshift.h> 14 + # include <linux/unaligned/generic.h> 15 + # define get_unaligned __get_unaligned_le 16 + # define put_unaligned __put_unaligned_le 17 + #elif defined(__BIG_ENDIAN) 18 + # include <linux/unaligned/be_struct.h> 19 + # include <linux/unaligned/le_byteshift.h> 20 + # include <linux/unaligned/generic.h> 21 + # define get_unaligned __get_unaligned_be 22 + # define put_unaligned __put_unaligned_be 23 + #else 24 + # error need to define endianess 25 + #endif 26 + 27 + #endif /* __ASM_ARM_UNALIGNED_H */
+3 -3
drivers/firmware/efi/libstub/Makefile
··· 34 34 lib-$(CONFIG_RESET_ATTACK_MITIGATION) += tpm.o 35 35 36 36 # include the stub's generic dependencies from lib/ when building for ARM/arm64 37 - arm-deps := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c sort.c 37 + arm-deps-y := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c 38 + arm-deps-$(CONFIG_ARM64) += sort.c 38 39 39 40 $(obj)/lib-%.o: $(srctree)/lib/%.c FORCE 40 41 $(call if_changed_rule,cc_o_c) 41 42 42 43 lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o string.o random.o \ 43 - $(patsubst %.c,lib-%.o,$(arm-deps)) 44 + $(patsubst %.c,lib-%.o,$(arm-deps-y)) 44 45 45 46 lib-$(CONFIG_ARM) += arm32-stub.o 46 47 lib-$(CONFIG_ARM64) += arm64-stub.o ··· 92 91 # explicitly by the decompressor linker script. 93 92 # 94 93 STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub 95 - STUBCOPY_RM-$(CONFIG_ARM) += -R ___ksymtab+sort -R ___kcrctab+sort 96 94 STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS
+5 -2
drivers/firmware/efi/libstub/arm-stub.c
··· 350 350 * The easiest way to find adjacent regions is to sort the memory map 351 351 * before traversing it. 352 352 */ 353 - sort(memory_map, map_size / desc_size, desc_size, cmp_mem_desc, NULL); 353 + if (IS_ENABLED(CONFIG_ARM64)) 354 + sort(memory_map, map_size / desc_size, desc_size, cmp_mem_desc, 355 + NULL); 354 356 355 357 for (l = 0; l < map_size; l += desc_size, prev = in) { 356 358 u64 paddr, size; ··· 369 367 * a 4k page size kernel to kexec a 64k page size kernel and 370 368 * vice versa. 371 369 */ 372 - if (!regions_are_adjacent(prev, in) || 370 + if ((IS_ENABLED(CONFIG_ARM64) && 371 + !regions_are_adjacent(prev, in)) || 373 372 !regions_have_compatible_memory_type_attrs(prev, in)) { 374 373 375 374 paddr = round_down(in->phys_addr, SZ_64K);