Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:
"I'm safely chained back up to my desk, so please pull these arm64
fixes for -rc1 that address some issues that cropped up during the
merge window:

- Prevent KASLR from mapping the top page of the virtual address
space

- Fix device-tree probing of SDEI driver

- Fix incorrect register offset definition in Hisilicon DDRC PMU
driver

- Fix compilation issue with older binutils not liking unsigned
immediates

- Fix uapi headers so that libc can provide its own sigcontext
definition

- Fix handling of private compat syscalls

- Hook up compat io_pgetevents() syscall for 32-bit tasks

- Cleanup to arm64 Makefile (including now to avoid silly conflicts)"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
arm64: compat: Hook up io_pgetevents() for 32-bit tasks
arm64: compat: Don't pull syscall number from regs in arm_compat_syscall
arm64: compat: Avoid sending SIGILL for unallocated syscall numbers
arm64/sve: Disentangle <uapi/asm/ptrace.h> from <uapi/asm/sigcontext.h>
arm64/sve: ptrace: Fix SVE_PT_REGS_OFFSET definition
drivers/perf: hisi: Fixup one DDRC PMU register offset
arm64: replace arm64-obj-* in Makefile with obj-*
arm64: kaslr: Reserve size of ARM64_MEMSTART_ALIGN in linear region
firmware: arm_sdei: Fix DT platform device creation
firmware: arm_sdei: fix wrong of_node_put() in init function
arm64: entry: remove unused register aliases
arm64: smp: Fix compilation error

+153 -117
+5 -3
arch/arm64/include/asm/smp.h
··· 16 16 #ifndef __ASM_SMP_H 17 17 #define __ASM_SMP_H 18 18 19 + #include <linux/const.h> 20 + 19 21 /* Values for secondary_data.status */ 20 22 #define CPU_STUCK_REASON_SHIFT (8) 21 - #define CPU_BOOT_STATUS_MASK ((1U << CPU_STUCK_REASON_SHIFT) - 1) 23 + #define CPU_BOOT_STATUS_MASK ((UL(1) << CPU_STUCK_REASON_SHIFT) - 1) 22 24 23 25 #define CPU_MMU_OFF (-1) 24 26 #define CPU_BOOT_SUCCESS (0) ··· 31 29 /* Fatal system error detected by secondary CPU, crash the system */ 32 30 #define CPU_PANIC_KERNEL (3) 33 31 34 - #define CPU_STUCK_REASON_52_BIT_VA (1U << CPU_STUCK_REASON_SHIFT) 35 - #define CPU_STUCK_REASON_NO_GRAN (2U << CPU_STUCK_REASON_SHIFT) 32 + #define CPU_STUCK_REASON_52_BIT_VA (UL(1) << CPU_STUCK_REASON_SHIFT) 33 + #define CPU_STUCK_REASON_NO_GRAN (UL(2) << CPU_STUCK_REASON_SHIFT) 36 34 37 35 #ifndef __ASSEMBLY__ 38 36
+4 -3
arch/arm64/include/asm/unistd.h
··· 40 40 * The following SVCs are ARM private. 41 41 */ 42 42 #define __ARM_NR_COMPAT_BASE 0x0f0000 43 - #define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) 44 - #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) 43 + #define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE + 2) 44 + #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5) 45 + #define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800) 45 46 46 - #define __NR_compat_syscalls 399 47 + #define __NR_compat_syscalls 400 47 48 #endif 48 49 49 50 #define __ARCH_WANT_SYS_CLONE
+2
arch/arm64/include/asm/unistd32.h
··· 819 819 __SYSCALL(__NR_statx, sys_statx) 820 820 #define __NR_rseq 398 821 821 __SYSCALL(__NR_rseq, sys_rseq) 822 + #define __NR_io_pgetevents 399 823 + __SYSCALL(__NR_io_pgetevents, compat_sys_io_pgetevents) 822 824 823 825 /* 824 826 * Please add new compat syscalls above this comment and update
+18 -21
arch/arm64/include/uapi/asm/ptrace.h
··· 23 23 #include <linux/types.h> 24 24 25 25 #include <asm/hwcap.h> 26 - #include <asm/sigcontext.h> 26 + #include <asm/sve_context.h> 27 27 28 28 29 29 /* ··· 130 130 */ 131 131 132 132 /* Offset from the start of struct user_sve_header to the register data */ 133 - #define SVE_PT_REGS_OFFSET \ 134 - ((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1)) \ 135 - / SVE_VQ_BYTES * SVE_VQ_BYTES) 133 + #define SVE_PT_REGS_OFFSET \ 134 + ((sizeof(struct user_sve_header) + (__SVE_VQ_BYTES - 1)) \ 135 + / __SVE_VQ_BYTES * __SVE_VQ_BYTES) 136 136 137 137 /* 138 138 * The register data content and layout depends on the value of the ··· 178 178 * Additional data might be appended in the future. 179 179 */ 180 180 181 - #define SVE_PT_SVE_ZREG_SIZE(vq) SVE_SIG_ZREG_SIZE(vq) 182 - #define SVE_PT_SVE_PREG_SIZE(vq) SVE_SIG_PREG_SIZE(vq) 183 - #define SVE_PT_SVE_FFR_SIZE(vq) SVE_SIG_FFR_SIZE(vq) 181 + #define SVE_PT_SVE_ZREG_SIZE(vq) __SVE_ZREG_SIZE(vq) 182 + #define SVE_PT_SVE_PREG_SIZE(vq) __SVE_PREG_SIZE(vq) 183 + #define SVE_PT_SVE_FFR_SIZE(vq) __SVE_FFR_SIZE(vq) 184 184 #define SVE_PT_SVE_FPSR_SIZE sizeof(__u32) 185 185 #define SVE_PT_SVE_FPCR_SIZE sizeof(__u32) 186 - 187 - #define __SVE_SIG_TO_PT(offset) \ 188 - ((offset) - SVE_SIG_REGS_OFFSET + SVE_PT_REGS_OFFSET) 189 186 190 187 #define SVE_PT_SVE_OFFSET SVE_PT_REGS_OFFSET 191 188 192 189 #define SVE_PT_SVE_ZREGS_OFFSET \ 193 - __SVE_SIG_TO_PT(SVE_SIG_ZREGS_OFFSET) 190 + (SVE_PT_REGS_OFFSET + __SVE_ZREGS_OFFSET) 194 191 #define SVE_PT_SVE_ZREG_OFFSET(vq, n) \ 195 - __SVE_SIG_TO_PT(SVE_SIG_ZREG_OFFSET(vq, n)) 192 + (SVE_PT_REGS_OFFSET + __SVE_ZREG_OFFSET(vq, n)) 196 193 #define SVE_PT_SVE_ZREGS_SIZE(vq) \ 197 - (SVE_PT_SVE_ZREG_OFFSET(vq, SVE_NUM_ZREGS) - SVE_PT_SVE_ZREGS_OFFSET) 194 + (SVE_PT_SVE_ZREG_OFFSET(vq, __SVE_NUM_ZREGS) - SVE_PT_SVE_ZREGS_OFFSET) 198 195 199 196 #define SVE_PT_SVE_PREGS_OFFSET(vq) \ 200 - __SVE_SIG_TO_PT(SVE_SIG_PREGS_OFFSET(vq)) 197 + (SVE_PT_REGS_OFFSET + __SVE_PREGS_OFFSET(vq)) 201 198 #define SVE_PT_SVE_PREG_OFFSET(vq, n) \ 202 - __SVE_SIG_TO_PT(SVE_SIG_PREG_OFFSET(vq, n)) 199 + (SVE_PT_REGS_OFFSET + __SVE_PREG_OFFSET(vq, n)) 203 200 #define SVE_PT_SVE_PREGS_SIZE(vq) \ 204 - (SVE_PT_SVE_PREG_OFFSET(vq, SVE_NUM_PREGS) - \ 201 + (SVE_PT_SVE_PREG_OFFSET(vq, __SVE_NUM_PREGS) - \ 205 202 SVE_PT_SVE_PREGS_OFFSET(vq)) 206 203 207 204 #define SVE_PT_SVE_FFR_OFFSET(vq) \ 208 - __SVE_SIG_TO_PT(SVE_SIG_FFR_OFFSET(vq)) 205 + (SVE_PT_REGS_OFFSET + __SVE_FFR_OFFSET(vq)) 209 206 210 207 #define SVE_PT_SVE_FPSR_OFFSET(vq) \ 211 208 ((SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq) + \ 212 - (SVE_VQ_BYTES - 1)) \ 213 - / SVE_VQ_BYTES * SVE_VQ_BYTES) 209 + (__SVE_VQ_BYTES - 1)) \ 210 + / __SVE_VQ_BYTES * __SVE_VQ_BYTES) 214 211 #define SVE_PT_SVE_FPCR_OFFSET(vq) \ 215 212 (SVE_PT_SVE_FPSR_OFFSET(vq) + SVE_PT_SVE_FPSR_SIZE) 216 213 ··· 218 221 219 222 #define SVE_PT_SVE_SIZE(vq, flags) \ 220 223 ((SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE \ 221 - - SVE_PT_SVE_OFFSET + (SVE_VQ_BYTES - 1)) \ 222 - / SVE_VQ_BYTES * SVE_VQ_BYTES) 224 + - SVE_PT_SVE_OFFSET + (__SVE_VQ_BYTES - 1)) \ 225 + / __SVE_VQ_BYTES * __SVE_VQ_BYTES) 223 226 224 227 #define SVE_PT_SIZE(vq, flags) \ 225 228 (((flags) & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE ? \
+28 -28
arch/arm64/include/uapi/asm/sigcontext.h
··· 130 130 131 131 #endif /* !__ASSEMBLY__ */ 132 132 133 + #include <asm/sve_context.h> 134 + 133 135 /* 134 136 * The SVE architecture leaves space for future expansion of the 135 137 * vector length beyond its initial architectural limit of 2048 bits ··· 140 138 * See linux/Documentation/arm64/sve.txt for a description of the VL/VQ 141 139 * terminology. 142 140 */ 143 - #define SVE_VQ_BYTES 16 /* number of bytes per quadword */ 141 + #define SVE_VQ_BYTES __SVE_VQ_BYTES /* bytes per quadword */ 144 142 145 - #define SVE_VQ_MIN 1 146 - #define SVE_VQ_MAX 512 143 + #define SVE_VQ_MIN __SVE_VQ_MIN 144 + #define SVE_VQ_MAX __SVE_VQ_MAX 147 145 148 - #define SVE_VL_MIN (SVE_VQ_MIN * SVE_VQ_BYTES) 149 - #define SVE_VL_MAX (SVE_VQ_MAX * SVE_VQ_BYTES) 146 + #define SVE_VL_MIN __SVE_VL_MIN 147 + #define SVE_VL_MAX __SVE_VL_MAX 150 148 151 - #define SVE_NUM_ZREGS 32 152 - #define SVE_NUM_PREGS 16 149 + #define SVE_NUM_ZREGS __SVE_NUM_ZREGS 150 + #define SVE_NUM_PREGS __SVE_NUM_PREGS 153 151 154 - #define sve_vl_valid(vl) \ 155 - ((vl) % SVE_VQ_BYTES == 0 && (vl) >= SVE_VL_MIN && (vl) <= SVE_VL_MAX) 156 - #define sve_vq_from_vl(vl) ((vl) / SVE_VQ_BYTES) 157 - #define sve_vl_from_vq(vq) ((vq) * SVE_VQ_BYTES) 152 + #define sve_vl_valid(vl) __sve_vl_valid(vl) 153 + #define sve_vq_from_vl(vl) __sve_vq_from_vl(vl) 154 + #define sve_vl_from_vq(vq) __sve_vl_from_vq(vq) 158 155 159 156 /* 160 157 * If the SVE registers are currently live for the thread at signal delivery, ··· 206 205 * Additional data might be appended in the future. 207 206 */ 208 207 209 - #define SVE_SIG_ZREG_SIZE(vq) ((__u32)(vq) * SVE_VQ_BYTES) 210 - #define SVE_SIG_PREG_SIZE(vq) ((__u32)(vq) * (SVE_VQ_BYTES / 8)) 211 - #define SVE_SIG_FFR_SIZE(vq) SVE_SIG_PREG_SIZE(vq) 208 + #define SVE_SIG_ZREG_SIZE(vq) __SVE_ZREG_SIZE(vq) 209 + #define SVE_SIG_PREG_SIZE(vq) __SVE_PREG_SIZE(vq) 210 + #define SVE_SIG_FFR_SIZE(vq) __SVE_FFR_SIZE(vq) 212 211 213 212 #define SVE_SIG_REGS_OFFSET \ 214 - ((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1)) \ 215 - / SVE_VQ_BYTES * SVE_VQ_BYTES) 213 + ((sizeof(struct sve_context) + (__SVE_VQ_BYTES - 1)) \ 214 + / __SVE_VQ_BYTES * __SVE_VQ_BYTES) 216 215 217 - #define SVE_SIG_ZREGS_OFFSET SVE_SIG_REGS_OFFSET 216 + #define SVE_SIG_ZREGS_OFFSET \ 217 + (SVE_SIG_REGS_OFFSET + __SVE_ZREGS_OFFSET) 218 218 #define SVE_SIG_ZREG_OFFSET(vq, n) \ 219 - (SVE_SIG_ZREGS_OFFSET + SVE_SIG_ZREG_SIZE(vq) * (n)) 220 - #define SVE_SIG_ZREGS_SIZE(vq) \ 221 - (SVE_SIG_ZREG_OFFSET(vq, SVE_NUM_ZREGS) - SVE_SIG_ZREGS_OFFSET) 219 + (SVE_SIG_REGS_OFFSET + __SVE_ZREG_OFFSET(vq, n)) 220 + #define SVE_SIG_ZREGS_SIZE(vq) __SVE_ZREGS_SIZE(vq) 222 221 223 222 #define SVE_SIG_PREGS_OFFSET(vq) \ 224 - (SVE_SIG_ZREGS_OFFSET + SVE_SIG_ZREGS_SIZE(vq)) 223 + (SVE_SIG_REGS_OFFSET + __SVE_PREGS_OFFSET(vq)) 225 224 #define SVE_SIG_PREG_OFFSET(vq, n) \ 226 - (SVE_SIG_PREGS_OFFSET(vq) + SVE_SIG_PREG_SIZE(vq) * (n)) 227 - #define SVE_SIG_PREGS_SIZE(vq) \ 228 - (SVE_SIG_PREG_OFFSET(vq, SVE_NUM_PREGS) - SVE_SIG_PREGS_OFFSET(vq)) 225 + (SVE_SIG_REGS_OFFSET + __SVE_PREG_OFFSET(vq, n)) 226 + #define SVE_SIG_PREGS_SIZE(vq) __SVE_PREGS_SIZE(vq) 229 227 230 228 #define SVE_SIG_FFR_OFFSET(vq) \ 231 - (SVE_SIG_PREGS_OFFSET(vq) + SVE_SIG_PREGS_SIZE(vq)) 229 + (SVE_SIG_REGS_OFFSET + __SVE_FFR_OFFSET(vq)) 232 230 233 231 #define SVE_SIG_REGS_SIZE(vq) \ 234 - (SVE_SIG_FFR_OFFSET(vq) + SVE_SIG_FFR_SIZE(vq) - SVE_SIG_REGS_OFFSET) 232 + (__SVE_FFR_OFFSET(vq) + __SVE_FFR_SIZE(vq)) 235 233 236 - #define SVE_SIG_CONTEXT_SIZE(vq) (SVE_SIG_REGS_OFFSET + SVE_SIG_REGS_SIZE(vq)) 237 - 234 + #define SVE_SIG_CONTEXT_SIZE(vq) \ 235 + (SVE_SIG_REGS_OFFSET + SVE_SIG_REGS_SIZE(vq)) 238 236 239 237 #endif /* _UAPI__ASM_SIGCONTEXT_H */
+53
arch/arm64/include/uapi/asm/sve_context.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 + /* Copyright (C) 2017-2018 ARM Limited */ 3 + 4 + /* 5 + * For use by other UAPI headers only. 6 + * Do not make direct use of header or its definitions. 7 + */ 8 + 9 + #ifndef _UAPI__ASM_SVE_CONTEXT_H 10 + #define _UAPI__ASM_SVE_CONTEXT_H 11 + 12 + #include <linux/types.h> 13 + 14 + #define __SVE_VQ_BYTES 16 /* number of bytes per quadword */ 15 + 16 + #define __SVE_VQ_MIN 1 17 + #define __SVE_VQ_MAX 512 18 + 19 + #define __SVE_VL_MIN (__SVE_VQ_MIN * __SVE_VQ_BYTES) 20 + #define __SVE_VL_MAX (__SVE_VQ_MAX * __SVE_VQ_BYTES) 21 + 22 + #define __SVE_NUM_ZREGS 32 23 + #define __SVE_NUM_PREGS 16 24 + 25 + #define __sve_vl_valid(vl) \ 26 + ((vl) % __SVE_VQ_BYTES == 0 && \ 27 + (vl) >= __SVE_VL_MIN && \ 28 + (vl) <= __SVE_VL_MAX) 29 + 30 + #define __sve_vq_from_vl(vl) ((vl) / __SVE_VQ_BYTES) 31 + #define __sve_vl_from_vq(vq) ((vq) * __SVE_VQ_BYTES) 32 + 33 + #define __SVE_ZREG_SIZE(vq) ((__u32)(vq) * __SVE_VQ_BYTES) 34 + #define __SVE_PREG_SIZE(vq) ((__u32)(vq) * (__SVE_VQ_BYTES / 8)) 35 + #define __SVE_FFR_SIZE(vq) __SVE_PREG_SIZE(vq) 36 + 37 + #define __SVE_ZREGS_OFFSET 0 38 + #define __SVE_ZREG_OFFSET(vq, n) \ 39 + (__SVE_ZREGS_OFFSET + __SVE_ZREG_SIZE(vq) * (n)) 40 + #define __SVE_ZREGS_SIZE(vq) \ 41 + (__SVE_ZREG_OFFSET(vq, __SVE_NUM_ZREGS) - __SVE_ZREGS_OFFSET) 42 + 43 + #define __SVE_PREGS_OFFSET(vq) \ 44 + (__SVE_ZREGS_OFFSET + __SVE_ZREGS_SIZE(vq)) 45 + #define __SVE_PREG_OFFSET(vq, n) \ 46 + (__SVE_PREGS_OFFSET(vq) + __SVE_PREG_SIZE(vq) * (n)) 47 + #define __SVE_PREGS_SIZE(vq) \ 48 + (__SVE_PREG_OFFSET(vq, __SVE_NUM_PREGS) - __SVE_PREGS_OFFSET(vq)) 49 + 50 + #define __SVE_FFR_OFFSET(vq) \ 51 + (__SVE_PREGS_OFFSET(vq) + __SVE_PREGS_SIZE(vq)) 52 + 53 + #endif /* ! _UAPI__ASM_SVE_CONTEXT_H */
+30 -31
arch/arm64/kernel/Makefile
··· 12 12 CFLAGS_REMOVE_return_address.o = -pg 13 13 14 14 # Object file lists. 15 - arm64-obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ 15 + obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ 16 16 entry-fpsimd.o process.o ptrace.o setup.o signal.o \ 17 17 sys.o stacktrace.o time.o traps.o io.o vdso.o \ 18 18 hyp-stub.o psci.o cpu_ops.o insn.o \ ··· 27 27 $(obj)/%.stub.o: $(obj)/%.o FORCE 28 28 $(call if_changed,objcopy) 29 29 30 - arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ 30 + obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ 31 31 sys_compat.o 32 - arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o 33 - arm64-obj-$(CONFIG_MODULES) += module.o 34 - arm64-obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o 35 - arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o 36 - arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o 37 - arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o 38 - arm64-obj-$(CONFIG_CPU_PM) += sleep.o suspend.o 39 - arm64-obj-$(CONFIG_CPU_IDLE) += cpuidle.o 40 - arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o 41 - arm64-obj-$(CONFIG_KGDB) += kgdb.o 42 - arm64-obj-$(CONFIG_EFI) += efi.o efi-entry.stub.o \ 32 + obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o 33 + obj-$(CONFIG_MODULES) += module.o 34 + obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o 35 + obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o 36 + obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o 37 + obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o 38 + obj-$(CONFIG_CPU_PM) += sleep.o suspend.o 39 + obj-$(CONFIG_CPU_IDLE) += cpuidle.o 40 + obj-$(CONFIG_JUMP_LABEL) += jump_label.o 41 + obj-$(CONFIG_KGDB) += kgdb.o 42 + obj-$(CONFIG_EFI) += efi.o efi-entry.stub.o \ 43 43 efi-rt-wrapper.o 44 - arm64-obj-$(CONFIG_PCI) += pci.o 45 - arm64-obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o 46 - arm64-obj-$(CONFIG_ACPI) += acpi.o 47 - arm64-obj-$(CONFIG_ACPI_NUMA) += acpi_numa.o 48 - arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o 49 - arm64-obj-$(CONFIG_PARAVIRT) += paravirt.o 50 - arm64-obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o 51 - arm64-obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o 52 - arm64-obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o \ 44 + obj-$(CONFIG_PCI) += pci.o 45 + obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o 46 + obj-$(CONFIG_ACPI) += acpi.o 47 + obj-$(CONFIG_ACPI_NUMA) += acpi_numa.o 48 + obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o 49 + obj-$(CONFIG_PARAVIRT) += paravirt.o 50 + obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o 51 + obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o 52 + obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o \ 53 53 cpu-reset.o 54 - arm64-obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o 55 - arm64-obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o 54 + obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o 55 + obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o 56 56 arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o 57 - arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o 58 - arm64-obj-$(CONFIG_CRASH_CORE) += crash_core.o 59 - arm64-obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o 60 - arm64-obj-$(CONFIG_ARM64_SSBD) += ssbd.o 61 - arm64-obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o 57 + obj-$(CONFIG_CRASH_DUMP) += crash_dump.o 58 + obj-$(CONFIG_CRASH_CORE) += crash_core.o 59 + obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o 60 + obj-$(CONFIG_ARM64_SSBD) += ssbd.o 61 + obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o 62 62 63 - obj-y += $(arm64-obj-y) vdso/ probes/ 64 - obj-m += $(arm64-obj-m) 63 + obj-y += vdso/ probes/ 65 64 head-y := head.o 66 65 extra-y += $(head-y) vmlinux.lds 67 66
+1 -11
arch/arm64/kernel/entry.S
··· 392 392 mov sp, x19 393 393 .endm 394 394 395 - /* 396 - * These are the registers used in the syscall handler, and allow us to 397 - * have in theory up to 7 arguments to a function - x0 to x6. 398 - * 399 - * x7 is reserved for the system call number in 32-bit mode. 400 - */ 401 - wsc_nr .req w25 // number of system calls 402 - xsc_nr .req x25 // number of system calls (zero-extended) 403 - wscno .req w26 // syscall number 404 - xscno .req x26 // syscall number (zero-extended) 405 - stbl .req x27 // syscall table pointer 395 + /* GPRs used by entry code */ 406 396 tsk .req x28 // current thread_info 407 397 408 398 /*
+5 -6
arch/arm64/kernel/sys_compat.c
··· 66 66 /* 67 67 * Handle all unrecognised system calls. 68 68 */ 69 - long compat_arm_syscall(struct pt_regs *regs) 69 + long compat_arm_syscall(struct pt_regs *regs, int scno) 70 70 { 71 - unsigned int no = regs->regs[7]; 72 71 void __user *addr; 73 72 74 - switch (no) { 73 + switch (scno) { 75 74 /* 76 75 * Flush a region from virtual address 'r0' to virtual address 'r1' 77 76 * _exclusive_. There is no alignment requirement on either address; ··· 101 102 102 103 default: 103 104 /* 104 - * Calls 9f00xx..9f07ff are defined to return -ENOSYS 105 + * Calls 0xf0xxx..0xf07ff are defined to return -ENOSYS 105 106 * if not implemented, rather than raising SIGILL. This 106 107 * way the calling program can gracefully determine whether 107 108 * a feature is supported. 108 109 */ 109 - if ((no & 0xffff) <= 0x7ff) 110 + if (scno < __ARM_NR_COMPAT_END) 110 111 return -ENOSYS; 111 112 break; 112 113 } ··· 115 116 (compat_thumb_mode(regs) ? 2 : 4); 116 117 117 118 arm64_notify_die("Oops - bad compat syscall(2)", regs, 118 - SIGILL, ILL_ILLTRP, addr, no); 119 + SIGILL, ILL_ILLTRP, addr, scno); 119 120 return 0; 120 121 }
+4 -5
arch/arm64/kernel/syscall.c
··· 13 13 #include <asm/thread_info.h> 14 14 #include <asm/unistd.h> 15 15 16 - long compat_arm_syscall(struct pt_regs *regs); 17 - 16 + long compat_arm_syscall(struct pt_regs *regs, int scno); 18 17 long sys_ni_syscall(void); 19 18 20 - asmlinkage long do_ni_syscall(struct pt_regs *regs) 19 + static long do_ni_syscall(struct pt_regs *regs, int scno) 21 20 { 22 21 #ifdef CONFIG_COMPAT 23 22 long ret; 24 23 if (is_compat_task()) { 25 - ret = compat_arm_syscall(regs); 24 + ret = compat_arm_syscall(regs, scno); 26 25 if (ret != -ENOSYS) 27 26 return ret; 28 27 } ··· 46 47 syscall_fn = syscall_table[array_index_nospec(scno, sc_nr)]; 47 48 ret = __invoke_syscall(regs, syscall_fn); 48 49 } else { 49 - ret = do_ni_syscall(regs); 50 + ret = do_ni_syscall(regs, scno); 50 51 } 51 52 52 53 regs->regs[0] = ret;
+1 -1
arch/arm64/mm/init.c
··· 439 439 * memory spans, randomize the linear region as well. 440 440 */ 441 441 if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) { 442 - range = range / ARM64_MEMSTART_ALIGN + 1; 442 + range /= ARM64_MEMSTART_ALIGN; 443 443 memstart_addr -= ARM64_MEMSTART_ALIGN * 444 444 ((range * memstart_offset_seed) >> 16); 445 445 }
-6
drivers/firmware/arm_sdei.c
··· 1009 1009 1010 1010 static bool __init sdei_present_dt(void) 1011 1011 { 1012 - struct platform_device *pdev; 1013 1012 struct device_node *np, *fw_np; 1014 1013 1015 1014 fw_np = of_find_node_by_name(NULL, "firmware"); ··· 1016 1017 return false; 1017 1018 1018 1019 np = of_find_matching_node(fw_np, sdei_of_match); 1019 - of_node_put(fw_np); 1020 1020 if (!np) 1021 1021 return false; 1022 - 1023 - pdev = of_platform_device_create(np, sdei_driver.driver.name, NULL); 1024 1022 of_node_put(np); 1025 - if (!pdev) 1026 - return false; 1027 1023 1028 1024 return true; 1029 1025 }
+2 -2
drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
··· 30 30 #define DDRC_FLUX_RCMD 0x38c 31 31 #define DDRC_PRE_CMD 0x3c0 32 32 #define DDRC_ACT_CMD 0x3c4 33 - #define DDRC_BNK_CHG 0x3c8 34 33 #define DDRC_RNK_CHG 0x3cc 34 + #define DDRC_RW_CHG 0x3d0 35 35 #define DDRC_EVENT_CTRL 0x6C0 36 36 #define DDRC_INT_MASK 0x6c8 37 37 #define DDRC_INT_STATUS 0x6cc ··· 51 51 52 52 static const u32 ddrc_reg_off[] = { 53 53 DDRC_FLUX_WR, DDRC_FLUX_RD, DDRC_FLUX_WCMD, DDRC_FLUX_RCMD, 54 - DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_BNK_CHG, DDRC_RNK_CHG 54 + DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_RNK_CHG, DDRC_RW_CHG 55 55 }; 56 56 57 57 /*