Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 's390-6.14-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull more s390 updates from Alexander Gordeev:

- The rework that uncoupled physical and virtual address spaces
inadvertently prevented KASAN shadow mappings from using large pages.
Restore large page mappings for KASAN shadows

- Add decompressor routine physmem_alloc() that may fail, unlike
physmem_alloc_or_die(). This allows callers to implement fallback
paths

- Allow falling back from large pages to smaller pages (1MB or 4KB) if
the allocation of 2GB pages in the decompressor can not be fulfilled

- Add to the decompressor boot print support of "%%" format string,
width and padding hadnling, length modifiers and decimal conversion
specifiers

- Add to the decompressor message severity levels similar to kernel
ones. Support command-line options that control console output
verbosity

- Replaces boot_printk() calls with appropriate loglevel- specific
helpers such as boot_emerg(), boot_warn(), and boot_debug().

- Collect all boot messages into a ring buffer independent of the
current log level. This is particularly useful for early crash
analysis

- If 'earlyprintk' command line parameter is not specified, store
decompressor boot messages in a ring buffer to be printed later by
the kernel, once the console driver is registered

- Add 'bootdebug' command line parameter to enable printing of
decompressor debug messages when needed. That parameters allows
message suppressing and filtering

- Dump boot messages on a decompressor crash, but only if 'bootdebug'
command line parameter is enabled

- When CONFIG_PRINTK_TIME is enabled, add timestamps to boot messages
in the same format as regular printk()

- Dump physical memory tracking information on boot: online ranges,
reserved areas and vmem allocations

- Dump virtual memory layout and randomization details

- Improve decompression error reporting and dump the message ring
buffer in case the boot failed and system halted

- Add an exception handler which handles exceptions when FPU control
register is attempted to be set to an invalid value. Remove '.fixup'
section as result of this change

- Use 'A', 'O', and 'R' inline assembly format flags, which allows
recent Clang compilers to generate better FPU code

- Rework uaccess code so it reads better and generates more efficient
code

- Cleanup futex inline assembly code

- Disable KMSAN instrumention for futex inline assemblies, which
contain dereferenced user pointers. Otherwise, shadows for the user
pointers would be accessed

- PFs which are not initially configured but in standby create only a
single-function PCI domain. If they are configured later on, sibling
PFs and their child VFs will not be added to their PCI domain
breaking SR-IOV expectations.

Fix that by allowing initially configured but in standby PFs create
multi-function PCI domains

- Add '-std=gnu11' to decompressor and purgatory CFLAGS to avoid
compile errors caused by kernel's own definitions of 'bool', 'false',
and 'true' conflicting with the C23 reserved keywords

- Fix sclp subsystem failure when a sclp console is not present

- Fix misuse of non-NULL terminated strings in vmlogrdr driver

- Various other small improvements, cleanups and fixes

* tag 's390-6.14-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (53 commits)
s390/vmlogrdr: Use array instead of string initializer
s390/vmlogrdr: Use internal_name for error messages
s390/sclp: Initialize sclp subsystem via arch_cpu_finalize_init()
s390/tools: Use array instead of string initializer
s390/vmem: Fix null-pointer-arithmetic warning in vmem_map_init()
s390: Add '-std=gnu11' to decompressor and purgatory CFLAGS
s390/bitops: Use correct constraint for arch_test_bit() inline assembly
s390/pci: Fix SR-IOV for PFs initially in standby
s390/futex: Avoid KMSAN instrumention for user pointers
s390/uaccess: Rename get_put_user_noinstr_attributes to uaccess_kmsan_or_inline
s390/futex: Cleanup futex_atomic_cmpxchg_inatomic()
s390/futex: Generate futex atomic op functions
s390/uaccess: Remove INLINE_COPY_FROM_USER and INLINE_COPY_TO_USER
s390/uaccess: Use asm goto for put_user()/get_user()
s390/uaccess: Remove usage of the oac specifier
s390/uaccess: Replace EX_TABLE_UA_LOAD_MEM exception handling
s390/uaccess: Cleanup noinstr __put_user()/__get_user() inline assembly constraints
s390/uaccess: Remove __put_user_fn()/__get_user_fn() wrappers
s390/uaccess: Move put_user() / __put_user() close to put_user() asm code
s390/uaccess: Use asm goto for __mvc_kernel_nofault()
...

+1124 -683
+8 -1
arch/s390/Kconfig
··· 52 52 depends on KASAN 53 53 default 0x1C000000000000 54 54 55 - config GCC_ASM_FLAG_OUTPUT_BROKEN 55 + config CC_ASM_FLAG_OUTPUT_BROKEN 56 56 def_bool CC_IS_GCC && GCC_VERSION < 140200 57 57 help 58 58 GCC versions before 14.2.0 may die with an internal 59 59 compiler error in some configurations if flag output 60 60 operands are used within inline assemblies. 61 + 62 + config CC_HAS_ASM_AOR_FORMAT_FLAGS 63 + def_bool !(CC_IS_CLANG && CLANG_VERSION < 190100) 64 + help 65 + Clang versions before 19.1.0 do not support A, 66 + O, and R inline assembly format flags. 61 67 62 68 config S390 63 69 def_bool y ··· 78 72 select ARCH_ENABLE_MEMORY_HOTPLUG if SPARSEMEM 79 73 select ARCH_ENABLE_MEMORY_HOTREMOVE 80 74 select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2 75 + select ARCH_HAS_CPU_FINALIZE_INIT 81 76 select ARCH_HAS_CRC32 82 77 select ARCH_HAS_CURRENT_STACK_POINTER 83 78 select ARCH_HAS_DEBUG_VIRTUAL
+1 -1
arch/s390/Makefile
··· 22 22 ifndef CONFIG_AS_IS_LLVM 23 23 KBUILD_AFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),$(aflags_dwarf)) 24 24 endif 25 - KBUILD_CFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -O2 -mpacked-stack 25 + KBUILD_CFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -O2 -mpacked-stack -std=gnu11 26 26 KBUILD_CFLAGS_DECOMPRESSOR += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY 27 27 KBUILD_CFLAGS_DECOMPRESSOR += -D__DECOMPRESSOR 28 28 KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float -mbackchain
+5 -5
arch/s390/boot/als.c
··· 46 46 * z/VM adds a four character prefix. 47 47 */ 48 48 if (strlen(als_str) > 70) { 49 - boot_printk("%s\n", als_str); 49 + boot_emerg("%s\n", als_str); 50 50 *als_str = '\0'; 51 51 } 52 52 u16_to_decimal(val_str, i * BITS_PER_LONG + j); ··· 54 54 first = 0; 55 55 } 56 56 } 57 - boot_printk("%s\n", als_str); 57 + boot_emerg("%s\n", als_str); 58 58 } 59 59 60 60 static void facility_mismatch(void) ··· 62 62 struct cpuid id; 63 63 64 64 get_cpu_id(&id); 65 - boot_printk("The Linux kernel requires more recent processor hardware\n"); 66 - boot_printk("Detected machine-type number: %4x\n", id.machine); 65 + boot_emerg("The Linux kernel requires more recent processor hardware\n"); 66 + boot_emerg("Detected machine-type number: %4x\n", id.machine); 67 67 print_missing_facilities(); 68 - boot_printk("See Principles of Operations for facility bits\n"); 68 + boot_emerg("See Principles of Operations for facility bits\n"); 69 69 disabled_wait(); 70 70 } 71 71
+23 -3
arch/s390/boot/boot.h
··· 8 8 9 9 #ifndef __ASSEMBLY__ 10 10 11 + #include <linux/printk.h> 11 12 #include <asm/physmem_info.h> 12 13 13 14 struct machine_info { ··· 48 47 void physmem_reserve(enum reserved_range_type type, unsigned long addr, unsigned long size); 49 48 void physmem_free(enum reserved_range_type type); 50 49 /* for continuous/multiple allocations per type */ 51 - unsigned long physmem_alloc_top_down(enum reserved_range_type type, unsigned long size, 52 - unsigned long align); 50 + unsigned long physmem_alloc_or_die(enum reserved_range_type type, unsigned long size, 51 + unsigned long align); 52 + unsigned long physmem_alloc(enum reserved_range_type type, unsigned long size, 53 + unsigned long align, bool die_on_oom); 53 54 /* for single allocations, 1 per type */ 54 55 unsigned long physmem_alloc_range(enum reserved_range_type type, unsigned long size, 55 56 unsigned long align, unsigned long min, unsigned long max, 56 57 bool die_on_oom); 57 58 unsigned long get_physmem_alloc_pos(void); 59 + void dump_physmem_reserved(void); 58 60 bool ipl_report_certs_intersects(unsigned long addr, unsigned long size, 59 61 unsigned long *intersection_start); 60 62 bool is_ipl_block_dump(void); ··· 73 69 unsigned long randomize_within_range(unsigned long size, unsigned long align, 74 70 unsigned long min, unsigned long max); 75 71 void setup_vmem(unsigned long kernel_start, unsigned long kernel_end, unsigned long asce_limit); 76 - void __printf(1, 2) boot_printk(const char *fmt, ...); 72 + int __printf(1, 2) boot_printk(const char *fmt, ...); 77 73 void print_stacktrace(unsigned long sp); 78 74 void error(char *m); 79 75 int get_random(unsigned long limit, unsigned long *value); 76 + void boot_rb_dump(void); 77 + 78 + #ifndef boot_fmt 79 + #define boot_fmt(fmt) fmt 80 + #endif 81 + 82 + #define boot_emerg(fmt, ...) boot_printk(KERN_EMERG boot_fmt(fmt), ##__VA_ARGS__) 83 + #define boot_alert(fmt, ...) boot_printk(KERN_ALERT boot_fmt(fmt), ##__VA_ARGS__) 84 + #define boot_crit(fmt, ...) boot_printk(KERN_CRIT boot_fmt(fmt), ##__VA_ARGS__) 85 + #define boot_err(fmt, ...) boot_printk(KERN_ERR boot_fmt(fmt), ##__VA_ARGS__) 86 + #define boot_warn(fmt, ...) boot_printk(KERN_WARNING boot_fmt(fmt), ##__VA_ARGS__) 87 + #define boot_notice(fmt, ...) boot_printk(KERN_NOTICE boot_fmt(fmt), ##__VA_ARGS__) 88 + #define boot_info(fmt, ...) boot_printk(KERN_INFO boot_fmt(fmt), ##__VA_ARGS__) 89 + #define boot_debug(fmt, ...) boot_printk(KERN_DEBUG boot_fmt(fmt), ##__VA_ARGS__) 80 90 81 91 extern struct machine_info machine; 92 + extern int boot_console_loglevel; 93 + extern bool boot_ignore_loglevel; 82 94 83 95 /* Symbols defined by linker scripts */ 84 96 extern const char kernel_version[];
+11 -1
arch/s390/boot/decompressor.c
··· 9 9 10 10 #include <linux/kernel.h> 11 11 #include <linux/string.h> 12 + #include <asm/boot_data.h> 12 13 #include <asm/page.h> 13 14 #include "decompressor.h" 14 15 #include "boot.h" ··· 64 63 #include "../../../../lib/decompress_unzstd.c" 65 64 #endif 66 65 66 + static void decompress_error(char *m) 67 + { 68 + if (bootdebug) 69 + boot_rb_dump(); 70 + boot_emerg("Decompression error: %s\n", m); 71 + boot_emerg(" -- System halted\n"); 72 + disabled_wait(); 73 + } 74 + 67 75 unsigned long mem_safe_offset(void) 68 76 { 69 77 return ALIGN(free_mem_end_ptr, PAGE_SIZE); ··· 81 71 void deploy_kernel(void *output) 82 72 { 83 73 __decompress(_compressed_start, _compressed_end - _compressed_start, 84 - NULL, NULL, output, vmlinux.image_size, NULL, error); 74 + NULL, NULL, output, vmlinux.image_size, NULL, decompress_error); 85 75 }
+19 -1
arch/s390/boot/ipl_parm.c
··· 215 215 216 216 for (i = 0; i < ARRAY_SIZE(als); i++) { 217 217 if ((stfle_fac_list[i] & als[i]) != als[i]) { 218 - boot_printk("Warning: The Linux kernel requires facilities cleared via command line option\n"); 218 + boot_emerg("The Linux kernel requires facilities cleared via command line option\n"); 219 219 print_missing_facilities(); 220 220 break; 221 221 } ··· 313 313 #endif 314 314 if (!strcmp(param, "relocate_lowcore") && test_facility(193)) 315 315 relocate_lowcore = 1; 316 + if (!strcmp(param, "earlyprintk")) 317 + boot_earlyprintk = true; 318 + if (!strcmp(param, "debug")) 319 + boot_console_loglevel = CONSOLE_LOGLEVEL_DEBUG; 320 + if (!strcmp(param, "bootdebug")) { 321 + bootdebug = true; 322 + if (val) 323 + strncpy(bootdebug_filter, val, sizeof(bootdebug_filter) - 1); 324 + } 325 + if (!strcmp(param, "quiet")) 326 + boot_console_loglevel = CONSOLE_LOGLEVEL_QUIET; 327 + if (!strcmp(param, "ignore_loglevel")) 328 + boot_ignore_loglevel = true; 329 + if (!strcmp(param, "loglevel")) { 330 + boot_console_loglevel = simple_strtoull(val, NULL, 10); 331 + if (boot_console_loglevel < CONSOLE_LOGLEVEL_MIN) 332 + boot_console_loglevel = CONSOLE_LOGLEVEL_MIN; 333 + } 316 334 } 317 335 }
+1 -2
arch/s390/boot/ipl_report.c
··· 30 30 { 31 31 struct ipl_rb_certificate_entry *cert; 32 32 struct ipl_rb_component_entry *comp; 33 - size_t size; 34 33 35 34 /* 36 35 * Find the length for the IPL report boot data ··· 154 155 return; 155 156 156 157 size = get_cert_comp_list_size(); 157 - early_ipl_comp_list_addr = physmem_alloc_top_down(RR_CERT_COMP_LIST, size, sizeof(int)); 158 + early_ipl_comp_list_addr = physmem_alloc_or_die(RR_CERT_COMP_LIST, size, sizeof(int)); 158 159 ipl_cert_list_addr = early_ipl_comp_list_addr + early_ipl_comp_list_size; 159 160 160 161 copy_components_bootdata();
+2 -2
arch/s390/boot/kaslr.c
··· 32 32 static int check_prng(void) 33 33 { 34 34 if (!cpacf_query_func(CPACF_KMC, CPACF_KMC_PRNG)) { 35 - boot_printk("KASLR disabled: CPU has no PRNG\n"); 35 + boot_warn("KASLR disabled: CPU has no PRNG\n"); 36 36 return 0; 37 37 } 38 38 if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG)) ··· 168 168 * cannot have chains. 169 169 * 170 170 * On the other hand, "dynamic" or "repetitive" allocations are done via 171 - * physmem_alloc_top_down(). These allocations are tightly packed together 171 + * physmem_alloc_or_die(). These allocations are tightly packed together 172 172 * top down from the end of online memory. physmem_alloc_pos represents 173 173 * current position where those allocations start. 174 174 *
+27 -26
arch/s390/boot/pgm_check_info.c
··· 17 17 (unsigned long)_stack_end }; 18 18 bool first = true; 19 19 20 - boot_printk("Call Trace:\n"); 20 + boot_emerg("Call Trace:\n"); 21 21 while (!(sp & 0x7) && on_stack(&boot_stack, sp, sizeof(struct stack_frame))) { 22 22 struct stack_frame *sf = (struct stack_frame *)sp; 23 23 24 - boot_printk(first ? "(sp:%016lx [<%016lx>] %pS)\n" : 25 - " sp:%016lx [<%016lx>] %pS\n", 26 - sp, sf->gprs[8], (void *)sf->gprs[8]); 24 + if (first) 25 + boot_emerg("(sp:%016lx [<%016lx>] %pS)\n", sp, sf->gprs[8], (void *)sf->gprs[8]); 26 + else 27 + boot_emerg(" sp:%016lx [<%016lx>] %pS\n", sp, sf->gprs[8], (void *)sf->gprs[8]); 27 28 if (sf->back_chain <= sp) 28 29 break; 29 30 sp = sf->back_chain; ··· 37 36 unsigned long *gpregs = (unsigned long *)get_lowcore()->gpregs_save_area; 38 37 struct psw_bits *psw = &psw_bits(get_lowcore()->psw_save_area); 39 38 40 - boot_printk("Linux version %s\n", kernel_version); 39 + if (bootdebug) 40 + boot_rb_dump(); 41 + boot_emerg("Linux version %s\n", kernel_version); 41 42 if (!is_prot_virt_guest() && early_command_line[0]) 42 - boot_printk("Kernel command line: %s\n", early_command_line); 43 - boot_printk("Kernel fault: interruption code %04x ilc:%x\n", 44 - get_lowcore()->pgm_code, get_lowcore()->pgm_ilc >> 1); 43 + boot_emerg("Kernel command line: %s\n", early_command_line); 44 + boot_emerg("Kernel fault: interruption code %04x ilc:%d\n", 45 + get_lowcore()->pgm_code, get_lowcore()->pgm_ilc >> 1); 45 46 if (kaslr_enabled()) { 46 - boot_printk("Kernel random base: %lx\n", __kaslr_offset); 47 - boot_printk("Kernel random base phys: %lx\n", __kaslr_offset_phys); 47 + boot_emerg("Kernel random base: %lx\n", __kaslr_offset); 48 + boot_emerg("Kernel random base phys: %lx\n", __kaslr_offset_phys); 48 49 } 49 - boot_printk("PSW : %016lx %016lx (%pS)\n", 50 - get_lowcore()->psw_save_area.mask, 51 - get_lowcore()->psw_save_area.addr, 52 - (void *)get_lowcore()->psw_save_area.addr); 53 - boot_printk( 54 - " R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x P:%x AS:%x CC:%x PM:%x RI:%x EA:%x\n", 55 - psw->per, psw->dat, psw->io, psw->ext, psw->key, psw->mcheck, 56 - psw->wait, psw->pstate, psw->as, psw->cc, psw->pm, psw->ri, 57 - psw->eaba); 58 - boot_printk("GPRS: %016lx %016lx %016lx %016lx\n", gpregs[0], gpregs[1], gpregs[2], gpregs[3]); 59 - boot_printk(" %016lx %016lx %016lx %016lx\n", gpregs[4], gpregs[5], gpregs[6], gpregs[7]); 60 - boot_printk(" %016lx %016lx %016lx %016lx\n", gpregs[8], gpregs[9], gpregs[10], gpregs[11]); 61 - boot_printk(" %016lx %016lx %016lx %016lx\n", gpregs[12], gpregs[13], gpregs[14], gpregs[15]); 50 + boot_emerg("PSW : %016lx %016lx (%pS)\n", 51 + get_lowcore()->psw_save_area.mask, 52 + get_lowcore()->psw_save_area.addr, 53 + (void *)get_lowcore()->psw_save_area.addr); 54 + boot_emerg(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x P:%x AS:%x CC:%x PM:%x RI:%x EA:%x\n", 55 + psw->per, psw->dat, psw->io, psw->ext, psw->key, psw->mcheck, 56 + psw->wait, psw->pstate, psw->as, psw->cc, psw->pm, psw->ri, psw->eaba); 57 + boot_emerg("GPRS: %016lx %016lx %016lx %016lx\n", gpregs[0], gpregs[1], gpregs[2], gpregs[3]); 58 + boot_emerg(" %016lx %016lx %016lx %016lx\n", gpregs[4], gpregs[5], gpregs[6], gpregs[7]); 59 + boot_emerg(" %016lx %016lx %016lx %016lx\n", gpregs[8], gpregs[9], gpregs[10], gpregs[11]); 60 + boot_emerg(" %016lx %016lx %016lx %016lx\n", gpregs[12], gpregs[13], gpregs[14], gpregs[15]); 62 61 print_stacktrace(get_lowcore()->gpregs_save_area[15]); 63 - boot_printk("Last Breaking-Event-Address:\n"); 64 - boot_printk(" [<%016lx>] %pS\n", (unsigned long)get_lowcore()->pgm_last_break, 65 - (void *)get_lowcore()->pgm_last_break); 62 + boot_emerg("Last Breaking-Event-Address:\n"); 63 + boot_emerg(" [<%016lx>] %pS\n", (unsigned long)get_lowcore()->pgm_last_break, 64 + (void *)get_lowcore()->pgm_last_break); 66 65 }
+77 -26
arch/s390/boot/physmem_info.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 + #define boot_fmt(fmt) "physmem: " fmt 2 3 #include <linux/processor.h> 3 4 #include <linux/errno.h> 4 5 #include <linux/init.h> ··· 29 28 return &physmem_info.online[n]; 30 29 if (unlikely(!physmem_info.online_extended)) { 31 30 physmem_info.online_extended = (struct physmem_range *)physmem_alloc_range( 32 - RR_MEM_DETECT_EXTENDED, ENTRIES_EXTENDED_MAX, sizeof(long), 0, 31 + RR_MEM_DETECT_EXT, ENTRIES_EXTENDED_MAX, sizeof(long), 0, 33 32 physmem_alloc_pos, true); 34 33 } 35 34 return &physmem_info.online_extended[n - MEM_INLINED_ENTRIES]; ··· 208 207 max_physmem_end = search_mem_end(); 209 208 physmem_info.info_source = MEM_DETECT_BIN_SEARCH; 210 209 } 210 + boot_debug("Max physical memory: 0x%016lx (info source: %s)\n", max_physmem_end, 211 + get_physmem_info_source()); 211 212 return max_physmem_end; 212 213 } 213 214 214 215 void detect_physmem_online_ranges(unsigned long max_physmem_end) 215 216 { 217 + unsigned long start, end; 218 + int i; 219 + 216 220 if (!sclp_early_read_storage_info()) { 217 221 physmem_info.info_source = MEM_DETECT_SCLP_STOR_INFO; 218 222 } else if (physmem_info.info_source == MEM_DETECT_DIAG500_STOR_LIMIT) { ··· 232 226 } else if (max_physmem_end) { 233 227 add_physmem_online_range(0, max_physmem_end); 234 228 } 229 + boot_debug("Online memory ranges (info source: %s):\n", get_physmem_info_source()); 230 + for_each_physmem_online_range(i, &start, &end) 231 + boot_debug(" online [%d]: 0x%016lx-0x%016lx\n", i, start, end); 235 232 } 236 233 237 234 void physmem_set_usable_limit(unsigned long limit) 238 235 { 239 236 physmem_info.usable = limit; 240 237 physmem_alloc_pos = limit; 238 + boot_debug("Usable memory limit: 0x%016lx\n", limit); 241 239 } 242 240 243 241 static void die_oom(unsigned long size, unsigned long align, unsigned long min, unsigned long max) ··· 251 241 enum reserved_range_type t; 252 242 int i; 253 243 254 - boot_printk("Linux version %s\n", kernel_version); 244 + boot_emerg("Linux version %s\n", kernel_version); 255 245 if (!is_prot_virt_guest() && early_command_line[0]) 256 - boot_printk("Kernel command line: %s\n", early_command_line); 257 - boot_printk("Out of memory allocating %lx bytes %lx aligned in range %lx:%lx\n", 258 - size, align, min, max); 259 - boot_printk("Reserved memory ranges:\n"); 246 + boot_emerg("Kernel command line: %s\n", early_command_line); 247 + boot_emerg("Out of memory allocating %lu bytes 0x%lx aligned in range %lx:%lx\n", 248 + size, align, min, max); 249 + boot_emerg("Reserved memory ranges:\n"); 260 250 for_each_physmem_reserved_range(t, range, &start, &end) { 261 - boot_printk("%016lx %016lx %s\n", start, end, get_rr_type_name(t)); 251 + boot_emerg("%016lx %016lx %s\n", start, end, get_rr_type_name(t)); 262 252 total_reserved_mem += end - start; 263 253 } 264 - boot_printk("Usable online memory ranges (info source: %s [%x]):\n", 265 - get_physmem_info_source(), physmem_info.info_source); 254 + boot_emerg("Usable online memory ranges (info source: %s [%d]):\n", 255 + get_physmem_info_source(), physmem_info.info_source); 266 256 for_each_physmem_usable_range(i, &start, &end) { 267 - boot_printk("%016lx %016lx\n", start, end); 257 + boot_emerg("%016lx %016lx\n", start, end); 268 258 total_mem += end - start; 269 259 } 270 - boot_printk("Usable online memory total: %lx Reserved: %lx Free: %lx\n", 271 - total_mem, total_reserved_mem, 272 - total_mem > total_reserved_mem ? total_mem - total_reserved_mem : 0); 260 + boot_emerg("Usable online memory total: %lu Reserved: %lu Free: %lu\n", 261 + total_mem, total_reserved_mem, 262 + total_mem > total_reserved_mem ? total_mem - total_reserved_mem : 0); 273 263 print_stacktrace(current_frame_address()); 274 - boot_printk("\n\n -- System halted\n"); 264 + boot_emerg(" -- System halted\n"); 275 265 disabled_wait(); 276 266 } 277 267 278 - void physmem_reserve(enum reserved_range_type type, unsigned long addr, unsigned long size) 268 + static void _physmem_reserve(enum reserved_range_type type, unsigned long addr, unsigned long size) 279 269 { 280 270 physmem_info.reserved[type].start = addr; 281 271 physmem_info.reserved[type].end = addr + size; 282 272 } 283 273 274 + void physmem_reserve(enum reserved_range_type type, unsigned long addr, unsigned long size) 275 + { 276 + _physmem_reserve(type, addr, size); 277 + boot_debug("%-14s 0x%016lx-0x%016lx %s\n", "Reserve:", addr, addr + size, 278 + get_rr_type_name(type)); 279 + } 280 + 284 281 void physmem_free(enum reserved_range_type type) 285 282 { 283 + boot_debug("%-14s 0x%016lx-0x%016lx %s\n", "Free:", physmem_info.reserved[type].start, 284 + physmem_info.reserved[type].end, get_rr_type_name(type)); 286 285 physmem_info.reserved[type].start = 0; 287 286 physmem_info.reserved[type].end = 0; 288 287 } ··· 358 339 max = min(max, physmem_alloc_pos); 359 340 addr = __physmem_alloc_range(size, align, min, max, 0, NULL, die_on_oom); 360 341 if (addr) 361 - physmem_reserve(type, addr, size); 342 + _physmem_reserve(type, addr, size); 343 + boot_debug("%-14s 0x%016lx-0x%016lx %s\n", "Alloc range:", addr, addr + size, 344 + get_rr_type_name(type)); 362 345 return addr; 363 346 } 364 347 365 - unsigned long physmem_alloc_top_down(enum reserved_range_type type, unsigned long size, 366 - unsigned long align) 348 + unsigned long physmem_alloc(enum reserved_range_type type, unsigned long size, 349 + unsigned long align, bool die_on_oom) 367 350 { 368 351 struct reserved_range *range = &physmem_info.reserved[type]; 369 - struct reserved_range *new_range; 352 + struct reserved_range *new_range = NULL; 370 353 unsigned int ranges_left; 371 354 unsigned long addr; 372 355 373 356 addr = __physmem_alloc_range(size, align, 0, physmem_alloc_pos, physmem_alloc_ranges, 374 - &ranges_left, true); 357 + &ranges_left, die_on_oom); 358 + if (!addr) 359 + return 0; 375 360 /* if not a consecutive allocation of the same type or first allocation */ 376 361 if (range->start != addr + size) { 377 362 if (range->end) { 378 - physmem_alloc_pos = __physmem_alloc_range( 379 - sizeof(struct reserved_range), 0, 0, physmem_alloc_pos, 380 - physmem_alloc_ranges, &ranges_left, true); 381 - new_range = (struct reserved_range *)physmem_alloc_pos; 363 + addr = __physmem_alloc_range(sizeof(struct reserved_range), 0, 0, 364 + physmem_alloc_pos, physmem_alloc_ranges, 365 + &ranges_left, true); 366 + new_range = (struct reserved_range *)addr; 367 + addr = __physmem_alloc_range(size, align, 0, addr, ranges_left, 368 + &ranges_left, die_on_oom); 369 + if (!addr) 370 + return 0; 382 371 *new_range = *range; 383 372 range->chain = new_range; 384 - addr = __physmem_alloc_range(size, align, 0, physmem_alloc_pos, 385 - ranges_left, &ranges_left, true); 386 373 } 387 374 range->end = addr + size; 375 + } 376 + if (type != RR_VMEM) { 377 + boot_debug("%-14s 0x%016lx-0x%016lx %-20s align 0x%lx split %d\n", "Alloc topdown:", 378 + addr, addr + size, get_rr_type_name(type), align, !!new_range); 388 379 } 389 380 range->start = addr; 390 381 physmem_alloc_pos = addr; ··· 402 373 return addr; 403 374 } 404 375 376 + unsigned long physmem_alloc_or_die(enum reserved_range_type type, unsigned long size, 377 + unsigned long align) 378 + { 379 + return physmem_alloc(type, size, align, true); 380 + } 381 + 405 382 unsigned long get_physmem_alloc_pos(void) 406 383 { 407 384 return physmem_alloc_pos; 385 + } 386 + 387 + void dump_physmem_reserved(void) 388 + { 389 + struct reserved_range *range; 390 + enum reserved_range_type t; 391 + unsigned long start, end; 392 + 393 + boot_debug("Reserved memory ranges:\n"); 394 + for_each_physmem_reserved_range(t, range, &start, &end) { 395 + if (end) { 396 + boot_debug("%-14s 0x%016lx-0x%016lx @%012lx chain %012lx\n", 397 + get_rr_type_name(t), start, end, (unsigned long)range, 398 + (unsigned long)range->chain); 399 + } 400 + } 408 401 }
+199 -25
arch/s390/boot/printk.c
··· 5 5 #include <linux/ctype.h> 6 6 #include <asm/stacktrace.h> 7 7 #include <asm/boot_data.h> 8 + #include <asm/sections.h> 8 9 #include <asm/lowcore.h> 9 10 #include <asm/setup.h> 10 11 #include <asm/sclp.h> 11 12 #include <asm/uv.h> 12 13 #include "boot.h" 13 14 15 + int boot_console_loglevel = CONFIG_CONSOLE_LOGLEVEL_DEFAULT; 16 + bool boot_ignore_loglevel; 17 + char __bootdata(boot_rb)[PAGE_SIZE * 2]; 18 + bool __bootdata(boot_earlyprintk); 19 + size_t __bootdata(boot_rb_off); 20 + char __bootdata(bootdebug_filter)[128]; 21 + bool __bootdata(bootdebug); 22 + 23 + static void boot_rb_add(const char *str, size_t len) 24 + { 25 + /* leave double '\0' in the end */ 26 + size_t avail = sizeof(boot_rb) - boot_rb_off - 1; 27 + 28 + /* store strings separated by '\0' */ 29 + if (len + 1 > avail) 30 + boot_rb_off = 0; 31 + strcpy(boot_rb + boot_rb_off, str); 32 + boot_rb_off += len + 1; 33 + } 34 + 35 + static void print_rb_entry(const char *str) 36 + { 37 + sclp_early_printk(printk_skip_level(str)); 38 + } 39 + 40 + static bool debug_messages_printed(void) 41 + { 42 + return boot_earlyprintk && (boot_ignore_loglevel || boot_console_loglevel > LOGLEVEL_DEBUG); 43 + } 44 + 45 + void boot_rb_dump(void) 46 + { 47 + if (debug_messages_printed()) 48 + return; 49 + sclp_early_printk("Boot messages ring buffer:\n"); 50 + boot_rb_foreach(print_rb_entry); 51 + } 52 + 14 53 const char hex_asc[] = "0123456789abcdef"; 15 54 16 55 static char *as_hex(char *dst, unsigned long val, int pad) 17 56 { 18 - char *p, *end = p = dst + max(pad, (int)__fls(val | 1) / 4 + 1); 57 + char *p = dst + max(pad, (int)__fls(val | 1) / 4 + 1); 19 58 20 - for (*p-- = 0; p >= dst; val >>= 4) 59 + for (*p-- = '\0'; p >= dst; val >>= 4) 21 60 *p-- = hex_asc[val & 0x0f]; 22 - return end; 61 + return dst; 62 + } 63 + 64 + #define MAX_NUMLEN 21 65 + static char *as_dec(char *buf, unsigned long val, bool is_signed) 66 + { 67 + bool negative = false; 68 + char *p = buf + MAX_NUMLEN; 69 + 70 + if (is_signed && (long)val < 0) { 71 + val = (val == LONG_MIN ? LONG_MIN : -(long)val); 72 + negative = true; 73 + } 74 + 75 + *--p = '\0'; 76 + do { 77 + *--p = '0' + (val % 10); 78 + val /= 10; 79 + } while (val); 80 + 81 + if (negative) 82 + *--p = '-'; 83 + return p; 84 + } 85 + 86 + static ssize_t strpad(char *dst, size_t dst_size, const char *src, 87 + int _pad, bool zero_pad, bool decimal) 88 + { 89 + ssize_t len = strlen(src), pad = _pad; 90 + char *p = dst; 91 + 92 + if (max(len, abs(pad)) >= dst_size) 93 + return -E2BIG; 94 + 95 + if (pad > len) { 96 + if (decimal && zero_pad && *src == '-') { 97 + *p++ = '-'; 98 + src++; 99 + len--; 100 + pad--; 101 + } 102 + memset(p, zero_pad ? '0' : ' ', pad - len); 103 + p += pad - len; 104 + } 105 + memcpy(p, src, len); 106 + p += len; 107 + if (pad < 0 && -pad > len) { 108 + memset(p, ' ', -pad - len); 109 + p += -pad - len; 110 + } 111 + *p = '\0'; 112 + return p - dst; 23 113 } 24 114 25 115 static char *symstart(char *p) ··· 148 58 return NULL; 149 59 } 150 60 151 - static noinline char *strsym(void *ip) 61 + #define MAX_SYMLEN 64 62 + static noinline char *strsym(char *buf, void *ip) 152 63 { 153 - static char buf[64]; 154 64 unsigned short off; 155 65 unsigned short len; 156 66 char *p; 157 67 158 68 p = findsym((unsigned long)ip, &off, &len); 159 69 if (p) { 160 - strncpy(buf, p, sizeof(buf)); 70 + strncpy(buf, p, MAX_SYMLEN); 161 71 /* reserve 15 bytes for offset/len in symbol+0x1234/0x1234 */ 162 - p = buf + strnlen(buf, sizeof(buf) - 15); 72 + p = buf + strnlen(buf, MAX_SYMLEN - 15); 163 73 strcpy(p, "+0x"); 164 - p = as_hex(p + 3, off, 0); 165 - strcpy(p, "/0x"); 166 - as_hex(p + 3, len, 0); 74 + as_hex(p + 3, off, 0); 75 + strcat(p, "/0x"); 76 + as_hex(p + strlen(p), len, 0); 167 77 } else { 168 78 as_hex(buf, (unsigned long)ip, 16); 169 79 } 170 80 return buf; 171 81 } 172 82 173 - void boot_printk(const char *fmt, ...) 83 + static inline int printk_loglevel(const char *buf) 84 + { 85 + if (buf[0] == KERN_SOH_ASCII && buf[1]) { 86 + switch (buf[1]) { 87 + case '0' ... '7': 88 + return buf[1] - '0'; 89 + } 90 + } 91 + return MESSAGE_LOGLEVEL_DEFAULT; 92 + } 93 + 94 + static void boot_console_earlyprintk(const char *buf) 95 + { 96 + int level = printk_loglevel(buf); 97 + 98 + /* always print emergency messages */ 99 + if (level > LOGLEVEL_EMERG && !boot_earlyprintk) 100 + return; 101 + buf = printk_skip_level(buf); 102 + /* print debug messages only when bootdebug is enabled */ 103 + if (level == LOGLEVEL_DEBUG && (!bootdebug || !bootdebug_filter_match(skip_timestamp(buf)))) 104 + return; 105 + if (boot_ignore_loglevel || level < boot_console_loglevel) 106 + sclp_early_printk(buf); 107 + } 108 + 109 + static char *add_timestamp(char *buf) 110 + { 111 + #ifdef CONFIG_PRINTK_TIME 112 + union tod_clock *boot_clock = (union tod_clock *)&get_lowcore()->boot_clock; 113 + unsigned long ns = tod_to_ns(get_tod_clock() - boot_clock->tod); 114 + char ts[MAX_NUMLEN]; 115 + 116 + *buf++ = '['; 117 + buf += strpad(buf, MAX_NUMLEN, as_dec(ts, ns / NSEC_PER_SEC, 0), 5, 0, 0); 118 + *buf++ = '.'; 119 + buf += strpad(buf, MAX_NUMLEN, as_dec(ts, (ns % NSEC_PER_SEC) / NSEC_PER_USEC, 0), 6, 1, 0); 120 + *buf++ = ']'; 121 + *buf++ = ' '; 122 + #endif 123 + return buf; 124 + } 125 + 126 + #define va_arg_len_type(args, lenmod, typemod) \ 127 + ((lenmod == 'l') ? va_arg(args, typemod long) : \ 128 + (lenmod == 'h') ? (typemod short)va_arg(args, typemod int) : \ 129 + (lenmod == 'H') ? (typemod char)va_arg(args, typemod int) : \ 130 + (lenmod == 'z') ? va_arg(args, typemod long) : \ 131 + va_arg(args, typemod int)) 132 + 133 + int boot_printk(const char *fmt, ...) 174 134 { 175 135 char buf[1024] = { 0 }; 176 136 char *end = buf + sizeof(buf) - 1; /* make sure buf is 0 terminated */ 177 - unsigned long pad; 178 - char *p = buf; 137 + bool zero_pad, decimal; 138 + char *strval, *p = buf; 139 + char valbuf[MAX(MAX_SYMLEN, MAX_NUMLEN)]; 179 140 va_list args; 141 + char lenmod; 142 + ssize_t len; 143 + int pad; 144 + 145 + *p++ = KERN_SOH_ASCII; 146 + *p++ = printk_get_level(fmt) ?: '0' + MESSAGE_LOGLEVEL_DEFAULT; 147 + p = add_timestamp(p); 148 + fmt = printk_skip_level(fmt); 180 149 181 150 va_start(args, fmt); 182 151 for (; p < end && *fmt; fmt++) { ··· 243 94 *p++ = *fmt; 244 95 continue; 245 96 } 246 - pad = isdigit(*++fmt) ? simple_strtol(fmt, (char **)&fmt, 10) : 0; 97 + if (*++fmt == '%') { 98 + *p++ = '%'; 99 + continue; 100 + } 101 + zero_pad = (*fmt == '0'); 102 + pad = simple_strtol(fmt, (char **)&fmt, 10); 103 + lenmod = (*fmt == 'h' || *fmt == 'l' || *fmt == 'z') ? *fmt++ : 0; 104 + if (lenmod == 'h' && *fmt == 'h') { 105 + lenmod = 'H'; 106 + fmt++; 107 + } 108 + decimal = false; 247 109 switch (*fmt) { 248 110 case 's': 249 - p = buf + strlcat(buf, va_arg(args, char *), sizeof(buf)); 111 + if (lenmod) 112 + goto out; 113 + strval = va_arg(args, char *); 114 + zero_pad = false; 250 115 break; 251 116 case 'p': 252 - if (*++fmt != 'S') 117 + if (*++fmt != 'S' || lenmod) 253 118 goto out; 254 - p = buf + strlcat(buf, strsym(va_arg(args, void *)), sizeof(buf)); 119 + strval = strsym(valbuf, va_arg(args, void *)); 120 + zero_pad = false; 255 121 break; 256 - case 'l': 257 - if (*++fmt != 'x' || end - p <= max(sizeof(long) * 2, pad)) 258 - goto out; 259 - p = as_hex(p, va_arg(args, unsigned long), pad); 122 + case 'd': 123 + case 'i': 124 + strval = as_dec(valbuf, va_arg_len_type(args, lenmod, signed), 1); 125 + decimal = true; 126 + break; 127 + case 'u': 128 + strval = as_dec(valbuf, va_arg_len_type(args, lenmod, unsigned), 0); 260 129 break; 261 130 case 'x': 262 - if (end - p <= max(sizeof(int) * 2, pad)) 263 - goto out; 264 - p = as_hex(p, va_arg(args, unsigned int), pad); 131 + strval = as_hex(valbuf, va_arg_len_type(args, lenmod, unsigned), 0); 265 132 break; 266 133 default: 267 134 goto out; 268 135 } 136 + len = strpad(p, end - p, strval, pad, zero_pad, decimal); 137 + if (len == -E2BIG) 138 + break; 139 + p += len; 269 140 } 270 141 out: 271 142 va_end(args); 272 - sclp_early_printk(buf); 143 + len = strlen(buf); 144 + if (len) { 145 + boot_rb_add(buf, len); 146 + boot_console_earlyprintk(buf); 147 + } 148 + return len; 273 149 }
+37 -10
arch/s390/boot/startup.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 + #define boot_fmt(fmt) "startup: " fmt 2 3 #include <linux/string.h> 3 4 #include <linux/elf.h> 4 5 #include <asm/page-states.h> ··· 43 42 44 43 void error(char *x) 45 44 { 46 - boot_printk("\n\n%s\n\n -- System halted", x); 45 + boot_emerg("%s\n", x); 46 + boot_emerg(" -- System halted\n"); 47 47 disabled_wait(); 48 48 } 49 49 ··· 145 143 return; 146 144 old_addr = addr; 147 145 physmem_free(RR_INITRD); 148 - addr = physmem_alloc_top_down(RR_INITRD, size, 0); 146 + addr = physmem_alloc_or_die(RR_INITRD, size, 0); 149 147 memmove((void *)addr, (void *)old_addr, size); 150 148 } 151 149 ··· 224 222 if (oldmem_data.start) { 225 223 __kaslr_enabled = 0; 226 224 ident_map_size = min(ident_map_size, oldmem_data.size); 225 + boot_debug("kdump memory limit: 0x%016lx\n", oldmem_data.size); 227 226 } else if (ipl_block_valid && is_ipl_block_dump()) { 228 227 __kaslr_enabled = 0; 229 - if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size) 228 + if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size) { 230 229 ident_map_size = min(ident_map_size, hsa_size); 230 + boot_debug("Stand-alone dump limit: 0x%016lx\n", hsa_size); 231 + } 231 232 } 232 233 #endif 234 + boot_debug("Identity map size: 0x%016lx\n", ident_map_size); 233 235 } 234 236 235 237 #define FIXMAP_SIZE round_up(MEMCPY_REAL_SIZE + ABS_LOWCORE_MAP_SIZE, sizeof(struct lowcore)) ··· 273 267 BUILD_BUG_ON(!IS_ALIGNED(__NO_KASLR_START_KERNEL, THREAD_SIZE)); 274 268 BUILD_BUG_ON(__NO_KASLR_END_KERNEL > _REGION1_SIZE); 275 269 vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION3_SIZE); 270 + boot_debug("vmem size estimated: 0x%016lx\n", vsize); 276 271 if (IS_ENABLED(CONFIG_KASAN) || __NO_KASLR_END_KERNEL > _REGION2_SIZE || 277 272 (vsize > _REGION2_SIZE && kaslr_enabled())) { 278 273 asce_limit = _REGION1_SIZE; ··· 297 290 * otherwise asce_limit and rte_size would have been adjusted. 298 291 */ 299 292 vmax = adjust_to_uv_max(asce_limit); 293 + boot_debug("%d level paging 0x%016lx vmax\n", vmax == _REGION1_SIZE ? 4 : 3, vmax); 300 294 #ifdef CONFIG_KASAN 301 295 BUILD_BUG_ON(__NO_KASLR_END_KERNEL > KASAN_SHADOW_START); 296 + boot_debug("KASAN shadow area: 0x%016lx-0x%016lx\n", KASAN_SHADOW_START, KASAN_SHADOW_END); 302 297 /* force vmalloc and modules below kasan shadow */ 303 298 vmax = min(vmax, KASAN_SHADOW_START); 304 299 #endif ··· 314 305 pos = 0; 315 306 kernel_end = vmax - pos * THREAD_SIZE; 316 307 kernel_start = round_down(kernel_end - kernel_size, THREAD_SIZE); 308 + boot_debug("Randomization range: 0x%016lx-0x%016lx\n", vmax - kaslr_len, vmax); 309 + boot_debug("kernel image: 0x%016lx-0x%016lx (kaslr)\n", kernel_start, 310 + kernel_size + kernel_size); 317 311 } else if (vmax < __NO_KASLR_END_KERNEL || vsize > __NO_KASLR_END_KERNEL) { 318 312 kernel_start = round_down(vmax - kernel_size, THREAD_SIZE); 319 - boot_printk("The kernel base address is forced to %lx\n", kernel_start); 313 + boot_debug("kernel image: 0x%016lx-0x%016lx (constrained)\n", kernel_start, 314 + kernel_start + kernel_size); 320 315 } else { 321 316 kernel_start = __NO_KASLR_START_KERNEL; 317 + boot_debug("kernel image: 0x%016lx-0x%016lx (nokaslr)\n", kernel_start, 318 + kernel_start + kernel_size); 322 319 } 323 320 __kaslr_offset = kernel_start; 321 + boot_debug("__kaslr_offset: 0x%016lx\n", __kaslr_offset); 324 322 325 323 MODULES_END = round_down(kernel_start, _SEGMENT_SIZE); 326 324 MODULES_VADDR = MODULES_END - MODULES_LEN; 327 325 VMALLOC_END = MODULES_VADDR; 328 326 if (IS_ENABLED(CONFIG_KMSAN)) 329 327 VMALLOC_END -= MODULES_LEN * 2; 328 + boot_debug("modules area: 0x%016lx-0x%016lx\n", MODULES_VADDR, MODULES_END); 330 329 331 330 /* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */ 332 331 vsize = (VMALLOC_END - FIXMAP_SIZE) / 2; ··· 346 329 VMALLOC_END -= vmalloc_size * 2; 347 330 } 348 331 VMALLOC_START = VMALLOC_END - vmalloc_size; 332 + boot_debug("vmalloc area: 0x%016lx-0x%016lx\n", VMALLOC_START, VMALLOC_END); 349 333 350 334 __memcpy_real_area = round_down(VMALLOC_START - MEMCPY_REAL_SIZE, PAGE_SIZE); 335 + boot_debug("memcpy real area: 0x%016lx-0x%016lx\n", __memcpy_real_area, 336 + __memcpy_real_area + MEMCPY_REAL_SIZE); 351 337 __abs_lowcore = round_down(__memcpy_real_area - ABS_LOWCORE_MAP_SIZE, 352 338 sizeof(struct lowcore)); 339 + boot_debug("abs lowcore: 0x%016lx-0x%016lx\n", __abs_lowcore, 340 + __abs_lowcore + ABS_LOWCORE_MAP_SIZE); 353 341 354 342 /* split remaining virtual space between 1:1 mapping & vmemmap array */ 355 343 pages = __abs_lowcore / (PAGE_SIZE + sizeof(struct page)); ··· 374 352 BUILD_BUG_ON(MAX_DCSS_ADDR > (1UL << MAX_PHYSMEM_BITS)); 375 353 max_mappable = max(ident_map_size, MAX_DCSS_ADDR); 376 354 max_mappable = min(max_mappable, vmemmap_start); 377 - if (IS_ENABLED(CONFIG_RANDOMIZE_IDENTITY_BASE)) 378 - __identity_base = round_down(vmemmap_start - max_mappable, rte_size); 355 + #ifdef CONFIG_RANDOMIZE_IDENTITY_BASE 356 + __identity_base = round_down(vmemmap_start - max_mappable, rte_size); 357 + #endif 358 + boot_debug("identity map: 0x%016lx-0x%016lx\n", __identity_base, 359 + __identity_base + ident_map_size); 379 360 380 361 return asce_limit; 381 362 } ··· 437 412 psw_t psw; 438 413 439 414 setup_lpp(); 415 + store_ipl_parmblock(); 416 + uv_query_info(); 417 + setup_boot_command_line(); 418 + parse_boot_command_line(); 440 419 441 420 /* 442 421 * Non-randomized kernel physical start address must be _SEGMENT_SIZE ··· 460 431 oldmem_data.start = parmarea.oldmem_base; 461 432 oldmem_data.size = parmarea.oldmem_size; 462 433 463 - store_ipl_parmblock(); 464 434 read_ipl_report(); 465 - uv_query_info(); 466 435 sclp_early_read_info(); 467 - setup_boot_command_line(); 468 - parse_boot_command_line(); 469 436 detect_facilities(); 470 437 cmma_init(); 471 438 sanitize_prot_virt_host(); ··· 551 526 __kaslr_offset, __kaslr_offset_phys); 552 527 kaslr_adjust_got(__kaslr_offset); 553 528 setup_vmem(__kaslr_offset, __kaslr_offset + kernel_size, asce_limit); 529 + dump_physmem_reserved(); 554 530 copy_bootdata(); 555 531 __apply_alternatives((struct alt_instr *)_vmlinux_info.alt_instructions, 556 532 (struct alt_instr *)_vmlinux_info.alt_instructions_end, ··· 568 542 */ 569 543 psw.addr = __kaslr_offset + vmlinux.entry; 570 544 psw.mask = PSW_KERNEL_BITS; 545 + boot_debug("Starting kernel at: 0x%016lx\n", psw.addr); 571 546 __load_psw(psw); 572 547 }
+103 -32
arch/s390/boot/vmem.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 + #define boot_fmt(fmt) "vmem: " fmt 2 3 #include <linux/sched/task.h> 3 4 #include <linux/pgtable.h> 4 5 #include <linux/kasan.h> ··· 14 13 #include "decompressor.h" 15 14 #include "boot.h" 16 15 16 + #define INVALID_PHYS_ADDR (~(phys_addr_t)0) 17 17 struct ctlreg __bootdata_preserved(s390_invalid_asce); 18 18 19 19 #ifdef CONFIG_PROC_FS ··· 33 31 POPULATE_IDENTITY, 34 32 POPULATE_KERNEL, 35 33 #ifdef CONFIG_KASAN 34 + /* KASAN modes should be last and grouped together, see is_kasan_populate_mode() */ 36 35 POPULATE_KASAN_MAP_SHADOW, 37 36 POPULATE_KASAN_ZERO_SHADOW, 38 37 POPULATE_KASAN_SHALLOW 39 38 #endif 40 39 }; 40 + 41 + #define POPULATE_MODE_NAME(t) case POPULATE_ ## t: return #t 42 + static inline const char *get_populate_mode_name(enum populate_mode t) 43 + { 44 + switch (t) { 45 + POPULATE_MODE_NAME(NONE); 46 + POPULATE_MODE_NAME(DIRECT); 47 + POPULATE_MODE_NAME(LOWCORE); 48 + POPULATE_MODE_NAME(ABS_LOWCORE); 49 + POPULATE_MODE_NAME(IDENTITY); 50 + POPULATE_MODE_NAME(KERNEL); 51 + #ifdef CONFIG_KASAN 52 + POPULATE_MODE_NAME(KASAN_MAP_SHADOW); 53 + POPULATE_MODE_NAME(KASAN_ZERO_SHADOW); 54 + POPULATE_MODE_NAME(KASAN_SHALLOW); 55 + #endif 56 + default: 57 + return "UNKNOWN"; 58 + } 59 + } 60 + 61 + static bool is_kasan_populate_mode(enum populate_mode mode) 62 + { 63 + #ifdef CONFIG_KASAN 64 + return mode >= POPULATE_KASAN_MAP_SHADOW; 65 + #else 66 + return false; 67 + #endif 68 + } 41 69 42 70 static void pgtable_populate(unsigned long addr, unsigned long end, enum populate_mode mode); 43 71 ··· 84 52 85 53 static inline void kasan_populate(unsigned long start, unsigned long end, enum populate_mode mode) 86 54 { 87 - start = PAGE_ALIGN_DOWN(__sha(start)); 88 - end = PAGE_ALIGN(__sha(end)); 89 - pgtable_populate(start, end, mode); 55 + unsigned long sha_start = PAGE_ALIGN_DOWN(__sha(start)); 56 + unsigned long sha_end = PAGE_ALIGN(__sha(end)); 57 + 58 + boot_debug("%-17s 0x%016lx-0x%016lx >> 0x%016lx-0x%016lx\n", get_populate_mode_name(mode), 59 + start, end, sha_start, sha_end); 60 + pgtable_populate(sha_start, sha_end, mode); 90 61 } 91 62 92 63 static void kasan_populate_shadow(unsigned long kernel_start, unsigned long kernel_end) ··· 235 200 unsigned long size = PAGE_SIZE << CRST_ALLOC_ORDER; 236 201 unsigned long *table; 237 202 238 - table = (unsigned long *)physmem_alloc_top_down(RR_VMEM, size, size); 203 + table = (unsigned long *)physmem_alloc_or_die(RR_VMEM, size, size); 239 204 crst_table_init(table, val); 240 205 __arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER); 241 206 return table; ··· 251 216 * during POPULATE_KASAN_MAP_SHADOW when EDAT is off 252 217 */ 253 218 if (!pte_leftover) { 254 - pte_leftover = (void *)physmem_alloc_top_down(RR_VMEM, PAGE_SIZE, PAGE_SIZE); 219 + pte_leftover = (void *)physmem_alloc_or_die(RR_VMEM, PAGE_SIZE, PAGE_SIZE); 255 220 pte = pte_leftover + _PAGE_TABLE_SIZE; 256 221 __arch_set_page_dat(pte, 1); 257 222 } else { ··· 263 228 return pte; 264 229 } 265 230 266 - static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_mode mode) 231 + static unsigned long resolve_pa_may_alloc(unsigned long addr, unsigned long size, 232 + enum populate_mode mode) 267 233 { 268 234 switch (mode) { 269 235 case POPULATE_NONE: 270 - return -1; 236 + return INVALID_PHYS_ADDR; 271 237 case POPULATE_DIRECT: 272 238 return addr; 273 239 case POPULATE_LOWCORE: ··· 281 245 return __identity_pa(addr); 282 246 #ifdef CONFIG_KASAN 283 247 case POPULATE_KASAN_MAP_SHADOW: 284 - addr = physmem_alloc_top_down(RR_VMEM, size, size); 285 - memset((void *)addr, 0, size); 286 - return addr; 248 + /* Allow to fail large page allocations, this will fall back to 1mb/4k pages */ 249 + addr = physmem_alloc(RR_VMEM, size, size, size == PAGE_SIZE); 250 + if (addr) { 251 + memset((void *)addr, 0, size); 252 + return addr; 253 + } 254 + return INVALID_PHYS_ADDR; 287 255 #endif 288 256 default: 289 - return -1; 257 + return INVALID_PHYS_ADDR; 290 258 } 291 259 } 292 260 293 - static bool large_allowed(enum populate_mode mode) 261 + static bool large_page_mapping_allowed(enum populate_mode mode) 294 262 { 295 - return (mode == POPULATE_DIRECT) || (mode == POPULATE_IDENTITY) || (mode == POPULATE_KERNEL); 263 + switch (mode) { 264 + case POPULATE_DIRECT: 265 + case POPULATE_IDENTITY: 266 + case POPULATE_KERNEL: 267 + #ifdef CONFIG_KASAN 268 + case POPULATE_KASAN_MAP_SHADOW: 269 + #endif 270 + return true; 271 + default: 272 + return false; 273 + } 296 274 } 297 275 298 - static bool can_large_pud(pud_t *pu_dir, unsigned long addr, unsigned long end, 299 - enum populate_mode mode) 276 + static unsigned long try_get_large_pud_pa(pud_t *pu_dir, unsigned long addr, unsigned long end, 277 + enum populate_mode mode) 300 278 { 301 - unsigned long size = end - addr; 279 + unsigned long pa, size = end - addr; 302 280 303 - return machine.has_edat2 && large_allowed(mode) && 304 - IS_ALIGNED(addr, PUD_SIZE) && (size >= PUD_SIZE) && 305 - IS_ALIGNED(_pa(addr, size, mode), PUD_SIZE); 281 + if (!machine.has_edat2 || !large_page_mapping_allowed(mode) || 282 + !IS_ALIGNED(addr, PUD_SIZE) || (size < PUD_SIZE)) 283 + return INVALID_PHYS_ADDR; 284 + 285 + pa = resolve_pa_may_alloc(addr, size, mode); 286 + if (!IS_ALIGNED(pa, PUD_SIZE)) 287 + return INVALID_PHYS_ADDR; 288 + 289 + return pa; 306 290 } 307 291 308 - static bool can_large_pmd(pmd_t *pm_dir, unsigned long addr, unsigned long end, 309 - enum populate_mode mode) 292 + static unsigned long try_get_large_pmd_pa(pmd_t *pm_dir, unsigned long addr, unsigned long end, 293 + enum populate_mode mode) 310 294 { 311 - unsigned long size = end - addr; 295 + unsigned long pa, size = end - addr; 312 296 313 - return machine.has_edat1 && large_allowed(mode) && 314 - IS_ALIGNED(addr, PMD_SIZE) && (size >= PMD_SIZE) && 315 - IS_ALIGNED(_pa(addr, size, mode), PMD_SIZE); 297 + if (!machine.has_edat1 || !large_page_mapping_allowed(mode) || 298 + !IS_ALIGNED(addr, PMD_SIZE) || (size < PMD_SIZE)) 299 + return INVALID_PHYS_ADDR; 300 + 301 + pa = resolve_pa_may_alloc(addr, size, mode); 302 + if (!IS_ALIGNED(pa, PMD_SIZE)) 303 + return INVALID_PHYS_ADDR; 304 + 305 + return pa; 316 306 } 317 307 318 308 static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long end, ··· 352 290 if (pte_none(*pte)) { 353 291 if (kasan_pte_populate_zero_shadow(pte, mode)) 354 292 continue; 355 - entry = __pte(_pa(addr, PAGE_SIZE, mode)); 293 + entry = __pte(resolve_pa_may_alloc(addr, PAGE_SIZE, mode)); 356 294 entry = set_pte_bit(entry, PAGE_KERNEL); 357 295 set_pte(pte, entry); 358 296 pages++; ··· 365 303 static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long end, 366 304 enum populate_mode mode) 367 305 { 368 - unsigned long next, pages = 0; 306 + unsigned long pa, next, pages = 0; 369 307 pmd_t *pmd, entry; 370 308 pte_t *pte; 371 309 ··· 375 313 if (pmd_none(*pmd)) { 376 314 if (kasan_pmd_populate_zero_shadow(pmd, addr, next, mode)) 377 315 continue; 378 - if (can_large_pmd(pmd, addr, next, mode)) { 379 - entry = __pmd(_pa(addr, _SEGMENT_SIZE, mode)); 316 + pa = try_get_large_pmd_pa(pmd, addr, next, mode); 317 + if (pa != INVALID_PHYS_ADDR) { 318 + entry = __pmd(pa); 380 319 entry = set_pmd_bit(entry, SEGMENT_KERNEL); 381 320 set_pmd(pmd, entry); 382 321 pages++; ··· 397 334 static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long end, 398 335 enum populate_mode mode) 399 336 { 400 - unsigned long next, pages = 0; 337 + unsigned long pa, next, pages = 0; 401 338 pud_t *pud, entry; 402 339 pmd_t *pmd; 403 340 ··· 407 344 if (pud_none(*pud)) { 408 345 if (kasan_pud_populate_zero_shadow(pud, addr, next, mode)) 409 346 continue; 410 - if (can_large_pud(pud, addr, next, mode)) { 411 - entry = __pud(_pa(addr, _REGION3_SIZE, mode)); 347 + pa = try_get_large_pud_pa(pud, addr, next, mode); 348 + if (pa != INVALID_PHYS_ADDR) { 349 + entry = __pud(pa); 412 350 entry = set_pud_bit(entry, REGION3_KERNEL); 413 351 set_pud(pud, entry); 414 352 pages++; ··· 451 387 unsigned long next; 452 388 pgd_t *pgd; 453 389 p4d_t *p4d; 390 + 391 + if (!is_kasan_populate_mode(mode)) { 392 + boot_debug("%-17s 0x%016lx-0x%016lx -> 0x%016lx-0x%016lx\n", 393 + get_populate_mode_name(mode), addr, end, 394 + resolve_pa_may_alloc(addr, 0, mode), 395 + resolve_pa_may_alloc(end - 1, 0, mode) + 1); 396 + } 454 397 455 398 pgd = pgd_offset(&init_mm, addr); 456 399 for (; addr < end; addr = next, pgd++) {
+7 -7
arch/s390/include/asm/asm-extable.h
··· 9 9 #define EX_TYPE_NONE 0 10 10 #define EX_TYPE_FIXUP 1 11 11 #define EX_TYPE_BPF 2 12 - #define EX_TYPE_UA_STORE 3 13 - #define EX_TYPE_UA_LOAD_MEM 4 12 + #define EX_TYPE_UA_FAULT 3 14 13 #define EX_TYPE_UA_LOAD_REG 5 15 14 #define EX_TYPE_UA_LOAD_REGPAIR 6 16 15 #define EX_TYPE_ZEROPAD 7 16 + #define EX_TYPE_FPC 8 17 17 18 18 #define EX_DATA_REG_ERR_SHIFT 0 19 19 #define EX_DATA_REG_ERR GENMASK(3, 0) ··· 69 69 #define EX_TABLE_AMODE31(_fault, _target) \ 70 70 __EX_TABLE(.amode31.ex_table, _fault, _target, EX_TYPE_FIXUP, __stringify(%%r0), __stringify(%%r0), 0) 71 71 72 - #define EX_TABLE_UA_STORE(_fault, _target, _regerr) \ 73 - __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_STORE, _regerr, _regerr, 0) 74 - 75 - #define EX_TABLE_UA_LOAD_MEM(_fault, _target, _regerr, _regmem, _len) \ 76 - __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_MEM, _regerr, _regmem, _len) 72 + #define EX_TABLE_UA_FAULT(_fault, _target, _regerr) \ 73 + __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_FAULT, _regerr, _regerr, 0) 77 74 78 75 #define EX_TABLE_UA_LOAD_REG(_fault, _target, _regerr, _regzero) \ 79 76 __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_REG, _regerr, _regzero, 0) ··· 80 83 81 84 #define EX_TABLE_ZEROPAD(_fault, _target, _regdata, _regaddr) \ 82 85 __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_ZEROPAD, _regdata, _regaddr, 0) 86 + 87 + #define EX_TABLE_FPC(_fault, _target) \ 88 + __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_FPC, __stringify(%%r0), __stringify(%%r0), 0) 83 89 84 90 #endif /* __ASM_EXTABLE_H */
+1 -1
arch/s390/include/asm/asm.h
··· 28 28 * [var] also contains the program mask. CC_TRANSFORM() moves the condition 29 29 * code to the two least significant bits and sets all other bits to zero. 30 30 */ 31 - #if defined(__GCC_ASM_FLAG_OUTPUTS__) && !(IS_ENABLED(CONFIG_GCC_ASM_FLAG_OUTPUT_BROKEN)) 31 + #if defined(__GCC_ASM_FLAG_OUTPUTS__) && !(IS_ENABLED(CONFIG_CC_ASM_FLAG_OUTPUT_BROKEN)) 32 32 33 33 #define __HAVE_ASM_FLAG_OUTPUTS__ 34 34
+1 -1
arch/s390/include/asm/bitops.h
··· 60 60 asm volatile( 61 61 " tm %[addr],%[mask]\n" 62 62 : "=@cc" (cc) 63 - : [addr] "R" (*addr), [mask] "I" (mask) 63 + : [addr] "Q" (*addr), [mask] "I" (mask) 64 64 ); 65 65 return cc == 3; 66 66 }
+51
arch/s390/include/asm/boot_data.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 2 #ifndef _ASM_S390_BOOT_DATA_H 3 3 4 + #include <linux/string.h> 4 5 #include <asm/setup.h> 5 6 #include <asm/ipl.h> 6 7 ··· 15 14 16 15 extern unsigned long early_ipl_comp_list_addr; 17 16 extern unsigned long early_ipl_comp_list_size; 17 + 18 + extern char boot_rb[PAGE_SIZE * 2]; 19 + extern bool boot_earlyprintk; 20 + extern size_t boot_rb_off; 21 + extern char bootdebug_filter[128]; 22 + extern bool bootdebug; 23 + 24 + #define boot_rb_foreach(cb) \ 25 + do { \ 26 + size_t off = boot_rb_off + strlen(boot_rb + boot_rb_off) + 1; \ 27 + size_t len; \ 28 + for (; off < sizeof(boot_rb) && (len = strlen(boot_rb + off)); off += len + 1) \ 29 + cb(boot_rb + off); \ 30 + for (off = 0; off < boot_rb_off && (len = strlen(boot_rb + off)); off += len + 1) \ 31 + cb(boot_rb + off); \ 32 + } while (0) 33 + 34 + /* 35 + * bootdebug_filter is a comma separated list of strings, 36 + * where each string can be a prefix of the message. 37 + */ 38 + static inline bool bootdebug_filter_match(const char *buf) 39 + { 40 + char *p = bootdebug_filter, *s; 41 + char *end; 42 + 43 + if (!*p) 44 + return true; 45 + 46 + end = p + strlen(p); 47 + while (p < end) { 48 + p = skip_spaces(p); 49 + s = memscan(p, ',', end - p); 50 + if (!strncmp(p, buf, s - p)) 51 + return true; 52 + p = s + 1; 53 + } 54 + return false; 55 + } 56 + 57 + static inline const char *skip_timestamp(const char *buf) 58 + { 59 + #ifdef CONFIG_PRINTK_TIME 60 + const char *p = memchr(buf, ']', strlen(buf)); 61 + 62 + if (p && p[1] == ' ') 63 + return p + 2; 64 + #endif 65 + return buf; 66 + } 18 67 19 68 #endif /* _ASM_S390_BOOT_DATA_H */
+96 -103
arch/s390/include/asm/fpu-insn.h
··· 100 100 */ 101 101 static inline void fpu_lfpc_safe(unsigned int *fpc) 102 102 { 103 - u32 tmp; 104 - 105 103 instrument_read(fpc, sizeof(*fpc)); 106 104 asm_inline volatile( 107 - "0: lfpc %[fpc]\n" 108 - "1: nopr %%r7\n" 109 - ".pushsection .fixup, \"ax\"\n" 110 - "2: lghi %[tmp],0\n" 111 - " sfpc %[tmp]\n" 112 - " jg 1b\n" 113 - ".popsection\n" 114 - EX_TABLE(1b, 2b) 115 - : [tmp] "=d" (tmp) 105 + " lfpc %[fpc]\n" 106 + "0: nopr %%r7\n" 107 + EX_TABLE_FPC(0b, 0b) 108 + : 116 109 : [fpc] "Q" (*fpc) 117 110 : "memory"); 118 111 } ··· 176 183 : "memory"); 177 184 } 178 185 179 - #ifdef CONFIG_CC_IS_CLANG 186 + #ifdef CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS 187 + 188 + static __always_inline void fpu_vl(u8 v1, const void *vxr) 189 + { 190 + instrument_read(vxr, sizeof(__vector128)); 191 + asm volatile("VL %[v1],%O[vxr],,%R[vxr]\n" 192 + : 193 + : [vxr] "Q" (*(__vector128 *)vxr), 194 + [v1] "I" (v1) 195 + : "memory"); 196 + } 197 + 198 + #else /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */ 180 199 181 200 static __always_inline void fpu_vl(u8 v1, const void *vxr) 182 201 { ··· 202 197 : "memory", "1"); 203 198 } 204 199 205 - #else /* CONFIG_CC_IS_CLANG */ 206 - 207 - static __always_inline void fpu_vl(u8 v1, const void *vxr) 208 - { 209 - instrument_read(vxr, sizeof(__vector128)); 210 - asm volatile("VL %[v1],%O[vxr],,%R[vxr]\n" 211 - : 212 - : [vxr] "Q" (*(__vector128 *)vxr), 213 - [v1] "I" (v1) 214 - : "memory"); 215 - } 216 - 217 - #endif /* CONFIG_CC_IS_CLANG */ 200 + #endif /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */ 218 201 219 202 static __always_inline void fpu_vleib(u8 v, s16 val, u8 index) 220 203 { ··· 231 238 return val; 232 239 } 233 240 234 - #ifdef CONFIG_CC_IS_CLANG 241 + #ifdef CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS 242 + 243 + static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr) 244 + { 245 + unsigned int size; 246 + 247 + size = min(index + 1, sizeof(__vector128)); 248 + instrument_read(vxr, size); 249 + asm volatile("VLL %[v1],%[index],%O[vxr],%R[vxr]\n" 250 + : 251 + : [vxr] "Q" (*(u8 *)vxr), 252 + [index] "d" (index), 253 + [v1] "I" (v1) 254 + : "memory"); 255 + } 256 + 257 + #else /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */ 235 258 236 259 static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr) 237 260 { ··· 265 256 : "memory", "1"); 266 257 } 267 258 268 - #else /* CONFIG_CC_IS_CLANG */ 259 + #endif /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */ 269 260 270 - static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr) 271 - { 272 - unsigned int size; 261 + #ifdef CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS 273 262 274 - size = min(index + 1, sizeof(__vector128)); 275 - instrument_read(vxr, size); 276 - asm volatile("VLL %[v1],%[index],%O[vxr],%R[vxr]\n" 277 - : 278 - : [vxr] "Q" (*(u8 *)vxr), 279 - [index] "d" (index), 280 - [v1] "I" (v1) 281 - : "memory"); 282 - } 263 + #define fpu_vlm(_v1, _v3, _vxrs) \ 264 + ({ \ 265 + unsigned int size = ((_v3) - (_v1) + 1) * sizeof(__vector128); \ 266 + struct { \ 267 + __vector128 _v[(_v3) - (_v1) + 1]; \ 268 + } *_v = (void *)(_vxrs); \ 269 + \ 270 + instrument_read(_v, size); \ 271 + asm volatile("VLM %[v1],%[v3],%O[vxrs],%R[vxrs]\n" \ 272 + : \ 273 + : [vxrs] "Q" (*_v), \ 274 + [v1] "I" (_v1), [v3] "I" (_v3) \ 275 + : "memory"); \ 276 + (_v3) - (_v1) + 1; \ 277 + }) 283 278 284 - #endif /* CONFIG_CC_IS_CLANG */ 285 - 286 - #ifdef CONFIG_CC_IS_CLANG 279 + #else /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */ 287 280 288 281 #define fpu_vlm(_v1, _v3, _vxrs) \ 289 282 ({ \ ··· 305 294 (_v3) - (_v1) + 1; \ 306 295 }) 307 296 308 - #else /* CONFIG_CC_IS_CLANG */ 309 - 310 - #define fpu_vlm(_v1, _v3, _vxrs) \ 311 - ({ \ 312 - unsigned int size = ((_v3) - (_v1) + 1) * sizeof(__vector128); \ 313 - struct { \ 314 - __vector128 _v[(_v3) - (_v1) + 1]; \ 315 - } *_v = (void *)(_vxrs); \ 316 - \ 317 - instrument_read(_v, size); \ 318 - asm volatile("VLM %[v1],%[v3],%O[vxrs],%R[vxrs]\n" \ 319 - : \ 320 - : [vxrs] "Q" (*_v), \ 321 - [v1] "I" (_v1), [v3] "I" (_v3) \ 322 - : "memory"); \ 323 - (_v3) - (_v1) + 1; \ 324 - }) 325 - 326 - #endif /* CONFIG_CC_IS_CLANG */ 297 + #endif /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */ 327 298 328 299 static __always_inline void fpu_vlr(u8 v1, u8 v2) 329 300 { ··· 355 362 : "memory"); 356 363 } 357 364 358 - #ifdef CONFIG_CC_IS_CLANG 365 + #ifdef CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS 366 + 367 + static __always_inline void fpu_vst(u8 v1, const void *vxr) 368 + { 369 + instrument_write(vxr, sizeof(__vector128)); 370 + asm volatile("VST %[v1],%O[vxr],,%R[vxr]\n" 371 + : [vxr] "=Q" (*(__vector128 *)vxr) 372 + : [v1] "I" (v1) 373 + : "memory"); 374 + } 375 + 376 + #else /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */ 359 377 360 378 static __always_inline void fpu_vst(u8 v1, const void *vxr) 361 379 { ··· 379 375 : "memory", "1"); 380 376 } 381 377 382 - #else /* CONFIG_CC_IS_CLANG */ 378 + #endif /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */ 383 379 384 - static __always_inline void fpu_vst(u8 v1, const void *vxr) 380 + #ifdef CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS 381 + 382 + static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr) 385 383 { 386 - instrument_write(vxr, sizeof(__vector128)); 387 - asm volatile("VST %[v1],%O[vxr],,%R[vxr]\n" 388 - : [vxr] "=Q" (*(__vector128 *)vxr) 389 - : [v1] "I" (v1) 384 + unsigned int size; 385 + 386 + size = min(index + 1, sizeof(__vector128)); 387 + instrument_write(vxr, size); 388 + asm volatile("VSTL %[v1],%[index],%O[vxr],%R[vxr]\n" 389 + : [vxr] "=Q" (*(u8 *)vxr) 390 + : [index] "d" (index), [v1] "I" (v1) 390 391 : "memory"); 391 392 } 392 393 393 - #endif /* CONFIG_CC_IS_CLANG */ 394 - 395 - #ifdef CONFIG_CC_IS_CLANG 394 + #else /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */ 396 395 397 396 static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr) 398 397 { ··· 411 404 : "memory", "1"); 412 405 } 413 406 414 - #else /* CONFIG_CC_IS_CLANG */ 407 + #endif /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */ 415 408 416 - static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr) 417 - { 418 - unsigned int size; 409 + #ifdef CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS 419 410 420 - size = min(index + 1, sizeof(__vector128)); 421 - instrument_write(vxr, size); 422 - asm volatile("VSTL %[v1],%[index],%O[vxr],%R[vxr]\n" 423 - : [vxr] "=Q" (*(u8 *)vxr) 424 - : [index] "d" (index), [v1] "I" (v1) 425 - : "memory"); 426 - } 411 + #define fpu_vstm(_v1, _v3, _vxrs) \ 412 + ({ \ 413 + unsigned int size = ((_v3) - (_v1) + 1) * sizeof(__vector128); \ 414 + struct { \ 415 + __vector128 _v[(_v3) - (_v1) + 1]; \ 416 + } *_v = (void *)(_vxrs); \ 417 + \ 418 + instrument_write(_v, size); \ 419 + asm volatile("VSTM %[v1],%[v3],%O[vxrs],%R[vxrs]\n" \ 420 + : [vxrs] "=Q" (*_v) \ 421 + : [v1] "I" (_v1), [v3] "I" (_v3) \ 422 + : "memory"); \ 423 + (_v3) - (_v1) + 1; \ 424 + }) 427 425 428 - #endif /* CONFIG_CC_IS_CLANG */ 429 - 430 - #ifdef CONFIG_CC_IS_CLANG 426 + #else /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */ 431 427 432 428 #define fpu_vstm(_v1, _v3, _vxrs) \ 433 429 ({ \ ··· 449 439 (_v3) - (_v1) + 1; \ 450 440 }) 451 441 452 - #else /* CONFIG_CC_IS_CLANG */ 453 - 454 - #define fpu_vstm(_v1, _v3, _vxrs) \ 455 - ({ \ 456 - unsigned int size = ((_v3) - (_v1) + 1) * sizeof(__vector128); \ 457 - struct { \ 458 - __vector128 _v[(_v3) - (_v1) + 1]; \ 459 - } *_v = (void *)(_vxrs); \ 460 - \ 461 - instrument_write(_v, size); \ 462 - asm volatile("VSTM %[v1],%[v3],%O[vxrs],%R[vxrs]\n" \ 463 - : [vxrs] "=Q" (*_v) \ 464 - : [v1] "I" (_v1), [v3] "I" (_v3) \ 465 - : "memory"); \ 466 - (_v3) - (_v1) + 1; \ 467 - }) 468 - 469 - #endif /* CONFIG_CC_IS_CLANG */ 442 + #endif /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */ 470 443 471 444 static __always_inline void fpu_vupllf(u8 v1, u8 v2) 472 445 {
+61 -46
arch/s390/include/asm/futex.h
··· 2 2 #ifndef _ASM_S390_FUTEX_H 3 3 #define _ASM_S390_FUTEX_H 4 4 5 + #include <linux/instrumented.h> 5 6 #include <linux/uaccess.h> 6 7 #include <linux/futex.h> 7 8 #include <asm/asm-extable.h> 8 9 #include <asm/mmu_context.h> 9 10 #include <asm/errno.h> 10 11 11 - #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \ 12 - asm volatile( \ 13 - " sacf 256\n" \ 14 - "0: l %1,0(%6)\n" \ 15 - "1:"insn \ 16 - "2: cs %1,%2,0(%6)\n" \ 17 - "3: jl 1b\n" \ 18 - " lhi %0,0\n" \ 19 - "4: sacf 768\n" \ 20 - EX_TABLE(0b,4b) EX_TABLE(1b,4b) \ 21 - EX_TABLE(2b,4b) EX_TABLE(3b,4b) \ 22 - : "=d" (ret), "=&d" (oldval), "=&d" (newval), \ 23 - "=m" (*uaddr) \ 24 - : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ 25 - "m" (*uaddr) : "cc"); 12 + #define FUTEX_OP_FUNC(name, insn) \ 13 + static uaccess_kmsan_or_inline int \ 14 + __futex_atomic_##name(int oparg, int *old, u32 __user *uaddr) \ 15 + { \ 16 + int rc, new; \ 17 + \ 18 + instrument_copy_from_user_before(old, uaddr, sizeof(*old)); \ 19 + asm_inline volatile( \ 20 + " sacf 256\n" \ 21 + "0: l %[old],%[uaddr]\n" \ 22 + "1:"insn \ 23 + "2: cs %[old],%[new],%[uaddr]\n" \ 24 + "3: jl 1b\n" \ 25 + " lhi %[rc],0\n" \ 26 + "4: sacf 768\n" \ 27 + EX_TABLE_UA_FAULT(0b, 4b, %[rc]) \ 28 + EX_TABLE_UA_FAULT(1b, 4b, %[rc]) \ 29 + EX_TABLE_UA_FAULT(2b, 4b, %[rc]) \ 30 + EX_TABLE_UA_FAULT(3b, 4b, %[rc]) \ 31 + : [rc] "=d" (rc), [old] "=&d" (*old), \ 32 + [new] "=&d" (new), [uaddr] "+Q" (*uaddr) \ 33 + : [oparg] "d" (oparg) \ 34 + : "cc"); \ 35 + if (!rc) \ 36 + instrument_copy_from_user_after(old, uaddr, sizeof(*old), 0); \ 37 + return rc; \ 38 + } 26 39 27 - static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, 28 - u32 __user *uaddr) 40 + FUTEX_OP_FUNC(set, "lr %[new],%[oparg]\n") 41 + FUTEX_OP_FUNC(add, "lr %[new],%[old]\n ar %[new],%[oparg]\n") 42 + FUTEX_OP_FUNC(or, "lr %[new],%[old]\n or %[new],%[oparg]\n") 43 + FUTEX_OP_FUNC(and, "lr %[new],%[old]\n nr %[new],%[oparg]\n") 44 + FUTEX_OP_FUNC(xor, "lr %[new],%[old]\n xr %[new],%[oparg]\n") 45 + 46 + static inline 47 + int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) 29 48 { 30 - int oldval = 0, newval, ret; 49 + int old, rc; 31 50 32 51 switch (op) { 33 52 case FUTEX_OP_SET: 34 - __futex_atomic_op("lr %2,%5\n", 35 - ret, oldval, newval, uaddr, oparg); 53 + rc = __futex_atomic_set(oparg, &old, uaddr); 36 54 break; 37 55 case FUTEX_OP_ADD: 38 - __futex_atomic_op("lr %2,%1\nar %2,%5\n", 39 - ret, oldval, newval, uaddr, oparg); 56 + rc = __futex_atomic_add(oparg, &old, uaddr); 40 57 break; 41 58 case FUTEX_OP_OR: 42 - __futex_atomic_op("lr %2,%1\nor %2,%5\n", 43 - ret, oldval, newval, uaddr, oparg); 59 + rc = __futex_atomic_or(oparg, &old, uaddr); 44 60 break; 45 61 case FUTEX_OP_ANDN: 46 - __futex_atomic_op("lr %2,%1\nnr %2,%5\n", 47 - ret, oldval, newval, uaddr, ~oparg); 62 + rc = __futex_atomic_and(~oparg, &old, uaddr); 48 63 break; 49 64 case FUTEX_OP_XOR: 50 - __futex_atomic_op("lr %2,%1\nxr %2,%5\n", 51 - ret, oldval, newval, uaddr, oparg); 65 + rc = __futex_atomic_xor(oparg, &old, uaddr); 52 66 break; 53 67 default: 54 - ret = -ENOSYS; 68 + rc = -ENOSYS; 55 69 } 56 - 57 - if (!ret) 58 - *oval = oldval; 59 - 60 - return ret; 70 + if (!rc) 71 + *oval = old; 72 + return rc; 61 73 } 62 74 63 - static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, 64 - u32 oldval, u32 newval) 75 + static uaccess_kmsan_or_inline 76 + int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval) 65 77 { 66 - int ret; 78 + int rc; 67 79 68 - asm volatile( 69 - " sacf 256\n" 70 - "0: cs %1,%4,0(%5)\n" 71 - "1: la %0,0\n" 72 - "2: sacf 768\n" 73 - EX_TABLE(0b,2b) EX_TABLE(1b,2b) 74 - : "=d" (ret), "+d" (oldval), "=m" (*uaddr) 75 - : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) 80 + instrument_copy_from_user_before(uval, uaddr, sizeof(*uval)); 81 + asm_inline volatile( 82 + " sacf 256\n" 83 + "0: cs %[old],%[new],%[uaddr]\n" 84 + "1: lhi %[rc],0\n" 85 + "2: sacf 768\n" 86 + EX_TABLE_UA_FAULT(0b, 2b, %[rc]) 87 + EX_TABLE_UA_FAULT(1b, 2b, %[rc]) 88 + : [rc] "=d" (rc), [old] "+d" (oldval), [uaddr] "+Q" (*uaddr) 89 + : [new] "d" (newval) 76 90 : "cc", "memory"); 77 91 *uval = oldval; 78 - return ret; 92 + instrument_copy_from_user_after(uval, uaddr, sizeof(*uval), 0); 93 + return rc; 79 94 } 80 95 81 96 #endif /* _ASM_S390_FUTEX_H */
+4
arch/s390/include/asm/page.h
··· 184 184 185 185 #define __kaslr_offset vm_layout.kaslr_offset 186 186 #define __kaslr_offset_phys vm_layout.kaslr_offset_phys 187 + #ifdef CONFIG_RANDOMIZE_IDENTITY_BASE 187 188 #define __identity_base vm_layout.identity_base 189 + #else 190 + #define __identity_base 0UL 191 + #endif 188 192 #define ident_map_size vm_layout.identity_size 189 193 190 194 static inline unsigned long kaslr_offset(void)
+2 -2
arch/s390/include/asm/physmem_info.h
··· 26 26 RR_AMODE31, 27 27 RR_IPLREPORT, 28 28 RR_CERT_COMP_LIST, 29 - RR_MEM_DETECT_EXTENDED, 29 + RR_MEM_DETECT_EXT, 30 30 RR_VMEM, 31 31 RR_MAX 32 32 }; ··· 128 128 RR_TYPE_NAME(AMODE31); 129 129 RR_TYPE_NAME(IPLREPORT); 130 130 RR_TYPE_NAME(CERT_COMP_LIST); 131 - RR_TYPE_NAME(MEM_DETECT_EXTENDED); 131 + RR_TYPE_NAME(MEM_DETECT_EXT); 132 132 RR_TYPE_NAME(VMEM); 133 133 default: 134 134 return "UNKNOWN";
+1
arch/s390/include/asm/sclp.h
··· 172 172 void __sclp_early_printk(const char *s, unsigned int len); 173 173 void sclp_emergency_printk(const char *s); 174 174 175 + int sclp_init(void); 175 176 int sclp_early_get_memsize(unsigned long *mem); 176 177 int sclp_early_get_hsa_size(unsigned long *hsa_size); 177 178 int _sclp_get_core_info(struct sclp_core_info *info);
+309 -253
arch/s390/include/asm/uaccess.h
··· 22 22 23 23 void debug_user_asce(int exit); 24 24 25 - unsigned long __must_check 26 - raw_copy_from_user(void *to, const void __user *from, unsigned long n); 25 + union oac { 26 + unsigned int val; 27 + struct { 28 + struct { 29 + unsigned short key : 4; 30 + unsigned short : 4; 31 + unsigned short as : 2; 32 + unsigned short : 4; 33 + unsigned short k : 1; 34 + unsigned short a : 1; 35 + } oac1; 36 + struct { 37 + unsigned short key : 4; 38 + unsigned short : 4; 39 + unsigned short as : 2; 40 + unsigned short : 4; 41 + unsigned short k : 1; 42 + unsigned short a : 1; 43 + } oac2; 44 + }; 45 + }; 27 46 28 - unsigned long __must_check 29 - raw_copy_to_user(void __user *to, const void *from, unsigned long n); 47 + static __always_inline __must_check unsigned long 48 + raw_copy_from_user_key(void *to, const void __user *from, unsigned long size, unsigned long key) 49 + { 50 + unsigned long rem; 51 + union oac spec = { 52 + .oac2.key = key, 53 + .oac2.as = PSW_BITS_AS_SECONDARY, 54 + .oac2.k = 1, 55 + .oac2.a = 1, 56 + }; 30 57 31 - #ifndef CONFIG_KASAN 32 - #define INLINE_COPY_FROM_USER 33 - #define INLINE_COPY_TO_USER 34 - #endif 58 + asm_inline volatile( 59 + " lr %%r0,%[spec]\n" 60 + "0: mvcos 0(%[to]),0(%[from]),%[size]\n" 61 + "1: jz 5f\n" 62 + " algr %[size],%[val]\n" 63 + " slgr %[from],%[val]\n" 64 + " slgr %[to],%[val]\n" 65 + " j 0b\n" 66 + "2: la %[rem],4095(%[from])\n" /* rem = from + 4095 */ 67 + " nr %[rem],%[val]\n" /* rem = (from + 4095) & -4096 */ 68 + " slgr %[rem],%[from]\n" 69 + " clgr %[size],%[rem]\n" /* copy crosses next page boundary? */ 70 + " jnh 6f\n" 71 + "3: mvcos 0(%[to]),0(%[from]),%[rem]\n" 72 + "4: slgr %[size],%[rem]\n" 73 + " j 6f\n" 74 + "5: lghi %[size],0\n" 75 + "6:\n" 76 + EX_TABLE(0b, 2b) 77 + EX_TABLE(1b, 2b) 78 + EX_TABLE(3b, 6b) 79 + EX_TABLE(4b, 6b) 80 + : [size] "+&a" (size), [from] "+&a" (from), [to] "+&a" (to), [rem] "=&a" (rem) 81 + : [val] "a" (-4096UL), [spec] "d" (spec.val) 82 + : "cc", "memory", "0"); 83 + return size; 84 + } 85 + 86 + static __always_inline __must_check unsigned long 87 + raw_copy_from_user(void *to, const void __user *from, unsigned long n) 88 + { 89 + return raw_copy_from_user_key(to, from, n, 0); 90 + } 91 + 92 + static __always_inline __must_check unsigned long 93 + raw_copy_to_user_key(void __user *to, const void *from, unsigned long size, unsigned long key) 94 + { 95 + unsigned long rem; 96 + union oac spec = { 97 + .oac1.key = key, 98 + .oac1.as = PSW_BITS_AS_SECONDARY, 99 + .oac1.k = 1, 100 + .oac1.a = 1, 101 + }; 102 + 103 + asm_inline volatile( 104 + " lr %%r0,%[spec]\n" 105 + "0: mvcos 0(%[to]),0(%[from]),%[size]\n" 106 + "1: jz 5f\n" 107 + " algr %[size],%[val]\n" 108 + " slgr %[to],%[val]\n" 109 + " slgr %[from],%[val]\n" 110 + " j 0b\n" 111 + "2: la %[rem],4095(%[to])\n" /* rem = to + 4095 */ 112 + " nr %[rem],%[val]\n" /* rem = (to + 4095) & -4096 */ 113 + " slgr %[rem],%[to]\n" 114 + " clgr %[size],%[rem]\n" /* copy crosses next page boundary? */ 115 + " jnh 6f\n" 116 + "3: mvcos 0(%[to]),0(%[from]),%[rem]\n" 117 + "4: slgr %[size],%[rem]\n" 118 + " j 6f\n" 119 + "5: lghi %[size],0\n" 120 + "6:\n" 121 + EX_TABLE(0b, 2b) 122 + EX_TABLE(1b, 2b) 123 + EX_TABLE(3b, 6b) 124 + EX_TABLE(4b, 6b) 125 + : [size] "+&a" (size), [to] "+&a" (to), [from] "+&a" (from), [rem] "=&a" (rem) 126 + : [val] "a" (-4096UL), [spec] "d" (spec.val) 127 + : "cc", "memory", "0"); 128 + return size; 129 + } 130 + 131 + static __always_inline __must_check unsigned long 132 + raw_copy_to_user(void __user *to, const void *from, unsigned long n) 133 + { 134 + return raw_copy_to_user_key(to, from, n, 0); 135 + } 35 136 36 137 unsigned long __must_check 37 138 _copy_from_user_key(void *to, const void __user *from, unsigned long n, unsigned long key); ··· 156 55 return n; 157 56 } 158 57 159 - union oac { 160 - unsigned int val; 161 - struct { 162 - struct { 163 - unsigned short key : 4; 164 - unsigned short : 4; 165 - unsigned short as : 2; 166 - unsigned short : 4; 167 - unsigned short k : 1; 168 - unsigned short a : 1; 169 - } oac1; 170 - struct { 171 - unsigned short key : 4; 172 - unsigned short : 4; 173 - unsigned short as : 2; 174 - unsigned short : 4; 175 - unsigned short k : 1; 176 - unsigned short a : 1; 177 - } oac2; 178 - }; 179 - }; 180 - 181 58 int __noreturn __put_user_bad(void); 182 59 183 60 #ifdef CONFIG_KMSAN 184 - #define get_put_user_noinstr_attributes \ 185 - noinline __maybe_unused __no_sanitize_memory 61 + #define uaccess_kmsan_or_inline noinline __maybe_unused __no_sanitize_memory 186 62 #else 187 - #define get_put_user_noinstr_attributes __always_inline 63 + #define uaccess_kmsan_or_inline __always_inline 188 64 #endif 189 65 190 - #define DEFINE_PUT_USER(type) \ 191 - static get_put_user_noinstr_attributes int \ 66 + #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT 67 + 68 + #define DEFINE_PUT_USER_NOINSTR(type) \ 69 + static uaccess_kmsan_or_inline int \ 192 70 __put_user_##type##_noinstr(unsigned type __user *to, \ 193 71 unsigned type *from, \ 194 72 unsigned long size) \ 195 73 { \ 196 - union oac __oac_spec = { \ 197 - .oac1.as = PSW_BITS_AS_SECONDARY, \ 198 - .oac1.a = 1, \ 199 - }; \ 74 + asm goto( \ 75 + " llilh %%r0,%[spec]\n" \ 76 + "0: mvcos %[to],%[from],%[size]\n" \ 77 + "1: nopr %%r7\n" \ 78 + EX_TABLE(0b, %l[Efault]) \ 79 + EX_TABLE(1b, %l[Efault]) \ 80 + : [to] "+Q" (*to) \ 81 + : [size] "d" (size), [from] "Q" (*from), \ 82 + [spec] "I" (0x81) \ 83 + : "cc", "0" \ 84 + : Efault \ 85 + ); \ 86 + return 0; \ 87 + Efault: \ 88 + return -EFAULT; \ 89 + } 90 + 91 + #else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */ 92 + 93 + #define DEFINE_PUT_USER_NOINSTR(type) \ 94 + static uaccess_kmsan_or_inline int \ 95 + __put_user_##type##_noinstr(unsigned type __user *to, \ 96 + unsigned type *from, \ 97 + unsigned long size) \ 98 + { \ 200 99 int rc; \ 201 100 \ 202 101 asm volatile( \ 203 - " lr 0,%[spec]\n" \ 204 - "0: mvcos %[_to],%[_from],%[_size]\n" \ 205 - "1: xr %[rc],%[rc]\n" \ 102 + " llilh %%r0,%[spec]\n" \ 103 + "0: mvcos %[to],%[from],%[size]\n" \ 104 + "1: lhi %[rc],0\n" \ 206 105 "2:\n" \ 207 - EX_TABLE_UA_STORE(0b, 2b, %[rc]) \ 208 - EX_TABLE_UA_STORE(1b, 2b, %[rc]) \ 209 - : [rc] "=&d" (rc), [_to] "+Q" (*(to)) \ 210 - : [_size] "d" (size), [_from] "Q" (*(from)), \ 211 - [spec] "d" (__oac_spec.val) \ 106 + EX_TABLE_UA_FAULT(0b, 2b, %[rc]) \ 107 + EX_TABLE_UA_FAULT(1b, 2b, %[rc]) \ 108 + : [rc] "=d" (rc), [to] "+Q" (*to) \ 109 + : [size] "d" (size), [from] "Q" (*from), \ 110 + [spec] "I" (0x81) \ 212 111 : "cc", "0"); \ 213 112 return rc; \ 214 - } \ 215 - \ 113 + } 114 + 115 + #endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */ 116 + 117 + DEFINE_PUT_USER_NOINSTR(char); 118 + DEFINE_PUT_USER_NOINSTR(short); 119 + DEFINE_PUT_USER_NOINSTR(int); 120 + DEFINE_PUT_USER_NOINSTR(long); 121 + 122 + #define DEFINE_PUT_USER(type) \ 216 123 static __always_inline int \ 217 124 __put_user_##type(unsigned type __user *to, unsigned type *from, \ 218 125 unsigned long size) \ ··· 237 128 DEFINE_PUT_USER(int); 238 129 DEFINE_PUT_USER(long); 239 130 240 - static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size) 241 - { 242 - int rc; 131 + #define __put_user(x, ptr) \ 132 + ({ \ 133 + __typeof__(*(ptr)) __x = (x); \ 134 + int __prc; \ 135 + \ 136 + __chk_user_ptr(ptr); \ 137 + switch (sizeof(*(ptr))) { \ 138 + case 1: \ 139 + __prc = __put_user_char((unsigned char __user *)(ptr), \ 140 + (unsigned char *)&__x, \ 141 + sizeof(*(ptr))); \ 142 + break; \ 143 + case 2: \ 144 + __prc = __put_user_short((unsigned short __user *)(ptr),\ 145 + (unsigned short *)&__x, \ 146 + sizeof(*(ptr))); \ 147 + break; \ 148 + case 4: \ 149 + __prc = __put_user_int((unsigned int __user *)(ptr), \ 150 + (unsigned int *)&__x, \ 151 + sizeof(*(ptr))); \ 152 + break; \ 153 + case 8: \ 154 + __prc = __put_user_long((unsigned long __user *)(ptr), \ 155 + (unsigned long *)&__x, \ 156 + sizeof(*(ptr))); \ 157 + break; \ 158 + default: \ 159 + __prc = __put_user_bad(); \ 160 + break; \ 161 + } \ 162 + __builtin_expect(__prc, 0); \ 163 + }) 243 164 244 - switch (size) { 245 - case 1: 246 - rc = __put_user_char((unsigned char __user *)ptr, 247 - (unsigned char *)x, 248 - size); 249 - break; 250 - case 2: 251 - rc = __put_user_short((unsigned short __user *)ptr, 252 - (unsigned short *)x, 253 - size); 254 - break; 255 - case 4: 256 - rc = __put_user_int((unsigned int __user *)ptr, 257 - (unsigned int *)x, 258 - size); 259 - break; 260 - case 8: 261 - rc = __put_user_long((unsigned long __user *)ptr, 262 - (unsigned long *)x, 263 - size); 264 - break; 265 - default: 266 - __put_user_bad(); 267 - break; 268 - } 269 - return rc; 270 - } 165 + #define put_user(x, ptr) \ 166 + ({ \ 167 + might_fault(); \ 168 + __put_user(x, ptr); \ 169 + }) 271 170 272 171 int __noreturn __get_user_bad(void); 273 172 274 - #define DEFINE_GET_USER(type) \ 275 - static get_put_user_noinstr_attributes int \ 173 + #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT 174 + 175 + #define DEFINE_GET_USER_NOINSTR(type) \ 176 + static uaccess_kmsan_or_inline int \ 276 177 __get_user_##type##_noinstr(unsigned type *to, \ 277 - unsigned type __user *from, \ 178 + const unsigned type __user *from, \ 278 179 unsigned long size) \ 279 180 { \ 280 - union oac __oac_spec = { \ 281 - .oac2.as = PSW_BITS_AS_SECONDARY, \ 282 - .oac2.a = 1, \ 283 - }; \ 181 + asm goto( \ 182 + " lhi %%r0,%[spec]\n" \ 183 + "0: mvcos %[to],%[from],%[size]\n" \ 184 + "1: nopr %%r7\n" \ 185 + EX_TABLE(0b, %l[Efault]) \ 186 + EX_TABLE(1b, %l[Efault]) \ 187 + : [to] "=Q" (*to) \ 188 + : [size] "d" (size), [from] "Q" (*from), \ 189 + [spec] "I" (0x81) \ 190 + : "cc", "0" \ 191 + : Efault \ 192 + ); \ 193 + return 0; \ 194 + Efault: \ 195 + *to = 0; \ 196 + return -EFAULT; \ 197 + } 198 + 199 + #else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */ 200 + 201 + #define DEFINE_GET_USER_NOINSTR(type) \ 202 + static uaccess_kmsan_or_inline int \ 203 + __get_user_##type##_noinstr(unsigned type *to, \ 204 + const unsigned type __user *from, \ 205 + unsigned long size) \ 206 + { \ 284 207 int rc; \ 285 208 \ 286 209 asm volatile( \ 287 - " lr 0,%[spec]\n" \ 288 - "0: mvcos 0(%[_to]),%[_from],%[_size]\n" \ 289 - "1: xr %[rc],%[rc]\n" \ 210 + " lhi %%r0,%[spec]\n" \ 211 + "0: mvcos %[to],%[from],%[size]\n" \ 212 + "1: lhi %[rc],0\n" \ 290 213 "2:\n" \ 291 - EX_TABLE_UA_LOAD_MEM(0b, 2b, %[rc], %[_to], %[_ksize]) \ 292 - EX_TABLE_UA_LOAD_MEM(1b, 2b, %[rc], %[_to], %[_ksize]) \ 293 - : [rc] "=&d" (rc), "=Q" (*(to)) \ 294 - : [_size] "d" (size), [_from] "Q" (*(from)), \ 295 - [spec] "d" (__oac_spec.val), [_to] "a" (to), \ 296 - [_ksize] "K" (size) \ 214 + EX_TABLE_UA_FAULT(0b, 2b, %[rc]) \ 215 + EX_TABLE_UA_FAULT(1b, 2b, %[rc]) \ 216 + : [rc] "=d" (rc), [to] "=Q" (*to) \ 217 + : [size] "d" (size), [from] "Q" (*from), \ 218 + [spec] "I" (0x81) \ 297 219 : "cc", "0"); \ 220 + if (likely(!rc)) \ 221 + return 0; \ 222 + *to = 0; \ 298 223 return rc; \ 299 - } \ 300 - \ 224 + } 225 + 226 + #endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */ 227 + 228 + DEFINE_GET_USER_NOINSTR(char); 229 + DEFINE_GET_USER_NOINSTR(short); 230 + DEFINE_GET_USER_NOINSTR(int); 231 + DEFINE_GET_USER_NOINSTR(long); 232 + 233 + #define DEFINE_GET_USER(type) \ 301 234 static __always_inline int \ 302 - __get_user_##type(unsigned type *to, unsigned type __user *from, \ 235 + __get_user_##type(unsigned type *to, const unsigned type __user *from, \ 303 236 unsigned long size) \ 304 237 { \ 305 238 int rc; \ ··· 356 205 DEFINE_GET_USER(int); 357 206 DEFINE_GET_USER(long); 358 207 359 - static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size) 360 - { 361 - int rc; 362 - 363 - switch (size) { 364 - case 1: 365 - rc = __get_user_char((unsigned char *)x, 366 - (unsigned char __user *)ptr, 367 - size); 368 - break; 369 - case 2: 370 - rc = __get_user_short((unsigned short *)x, 371 - (unsigned short __user *)ptr, 372 - size); 373 - break; 374 - case 4: 375 - rc = __get_user_int((unsigned int *)x, 376 - (unsigned int __user *)ptr, 377 - size); 378 - break; 379 - case 8: 380 - rc = __get_user_long((unsigned long *)x, 381 - (unsigned long __user *)ptr, 382 - size); 383 - break; 384 - default: 385 - __get_user_bad(); 386 - break; 387 - } 388 - return rc; 389 - } 390 - 391 - /* 392 - * These are the main single-value transfer routines. They automatically 393 - * use the right size if we just have the right pointer type. 394 - */ 395 - #define __put_user(x, ptr) \ 396 - ({ \ 397 - __typeof__(*(ptr)) __x = (x); \ 398 - int __pu_err = -EFAULT; \ 399 - \ 400 - __chk_user_ptr(ptr); \ 401 - switch (sizeof(*(ptr))) { \ 402 - case 1: \ 403 - case 2: \ 404 - case 4: \ 405 - case 8: \ 406 - __pu_err = __put_user_fn(&__x, ptr, sizeof(*(ptr))); \ 407 - break; \ 408 - default: \ 409 - __put_user_bad(); \ 410 - break; \ 411 - } \ 412 - __builtin_expect(__pu_err, 0); \ 413 - }) 414 - 415 - #define put_user(x, ptr) \ 416 - ({ \ 417 - might_fault(); \ 418 - __put_user(x, ptr); \ 419 - }) 420 - 421 208 #define __get_user(x, ptr) \ 422 209 ({ \ 423 - int __gu_err = -EFAULT; \ 210 + const __user void *____guptr = (ptr); \ 211 + int __grc; \ 424 212 \ 425 213 __chk_user_ptr(ptr); \ 426 214 switch (sizeof(*(ptr))) { \ 427 215 case 1: { \ 216 + const unsigned char __user *__guptr = ____guptr; \ 428 217 unsigned char __x; \ 429 218 \ 430 - __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \ 219 + __grc = __get_user_char(&__x, __guptr, sizeof(*(ptr))); \ 431 220 (x) = *(__force __typeof__(*(ptr)) *)&__x; \ 432 221 break; \ 433 222 }; \ 434 223 case 2: { \ 224 + const unsigned short __user *__guptr = ____guptr; \ 435 225 unsigned short __x; \ 436 226 \ 437 - __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \ 227 + __grc = __get_user_short(&__x, __guptr, sizeof(*(ptr)));\ 438 228 (x) = *(__force __typeof__(*(ptr)) *)&__x; \ 439 229 break; \ 440 230 }; \ 441 231 case 4: { \ 232 + const unsigned int __user *__guptr = ____guptr; \ 442 233 unsigned int __x; \ 443 234 \ 444 - __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \ 235 + __grc = __get_user_int(&__x, __guptr, sizeof(*(ptr))); \ 445 236 (x) = *(__force __typeof__(*(ptr)) *)&__x; \ 446 237 break; \ 447 238 }; \ 448 239 case 8: { \ 240 + const unsigned long __user *__guptr = ____guptr; \ 449 241 unsigned long __x; \ 450 242 \ 451 - __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \ 243 + __grc = __get_user_long(&__x, __guptr, sizeof(*(ptr))); \ 452 244 (x) = *(__force __typeof__(*(ptr)) *)&__x; \ 453 245 break; \ 454 246 }; \ 455 247 default: \ 456 - __get_user_bad(); \ 248 + __grc = __get_user_bad(); \ 457 249 break; \ 458 250 } \ 459 - __builtin_expect(__gu_err, 0); \ 251 + __builtin_expect(__grc, 0); \ 460 252 }) 461 253 462 254 #define get_user(x, ptr) \ ··· 435 341 return __s390_kernel_write(dst, src, size); 436 342 } 437 343 438 - int __noreturn __put_kernel_bad(void); 344 + void __noreturn __mvc_kernel_nofault_bad(void); 439 345 440 - #define __put_kernel_asm(val, to, insn) \ 441 - ({ \ 442 - int __rc; \ 443 - \ 444 - asm volatile( \ 445 - "0: " insn " %[_val],%[_to]\n" \ 446 - "1: xr %[rc],%[rc]\n" \ 447 - "2:\n" \ 448 - EX_TABLE_UA_STORE(0b, 2b, %[rc]) \ 449 - EX_TABLE_UA_STORE(1b, 2b, %[rc]) \ 450 - : [rc] "=d" (__rc), [_to] "+Q" (*(to)) \ 451 - : [_val] "d" (val) \ 452 - : "cc"); \ 453 - __rc; \ 454 - }) 346 + #if defined(CONFIG_CC_HAS_ASM_GOTO_OUTPUT) && defined(CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS) 455 347 456 - #define __put_kernel_nofault(dst, src, type, err_label) \ 348 + #define __mvc_kernel_nofault(dst, src, type, err_label) \ 457 349 do { \ 458 - unsigned long __x = (unsigned long)(*((type *)(src))); \ 459 - int __pk_err; \ 350 + switch (sizeof(type)) { \ 351 + case 1: \ 352 + case 2: \ 353 + case 4: \ 354 + case 8: \ 355 + asm goto( \ 356 + "0: mvc %O[_dst](%[_len],%R[_dst]),%[_src]\n" \ 357 + "1: nopr %%r7\n" \ 358 + EX_TABLE(0b, %l[err_label]) \ 359 + EX_TABLE(1b, %l[err_label]) \ 360 + : [_dst] "=Q" (*(type *)dst) \ 361 + : [_src] "Q" (*(type *)(src)), \ 362 + [_len] "I" (sizeof(type)) \ 363 + : \ 364 + : err_label); \ 365 + break; \ 366 + default: \ 367 + __mvc_kernel_nofault_bad(); \ 368 + break; \ 369 + } \ 370 + } while (0) 371 + 372 + #else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT) && CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */ 373 + 374 + #define __mvc_kernel_nofault(dst, src, type, err_label) \ 375 + do { \ 376 + type *(__dst) = (type *)(dst); \ 377 + int __rc; \ 460 378 \ 461 379 switch (sizeof(type)) { \ 462 380 case 1: \ 463 - __pk_err = __put_kernel_asm(__x, (type *)(dst), "stc"); \ 464 - break; \ 465 381 case 2: \ 466 - __pk_err = __put_kernel_asm(__x, (type *)(dst), "sth"); \ 467 - break; \ 468 382 case 4: \ 469 - __pk_err = __put_kernel_asm(__x, (type *)(dst), "st"); \ 470 - break; \ 471 383 case 8: \ 472 - __pk_err = __put_kernel_asm(__x, (type *)(dst), "stg"); \ 384 + asm_inline volatile( \ 385 + "0: mvc 0(%[_len],%[_dst]),%[_src]\n" \ 386 + "1: lhi %[_rc],0\n" \ 387 + "2:\n" \ 388 + EX_TABLE_UA_FAULT(0b, 2b, %[_rc]) \ 389 + EX_TABLE_UA_FAULT(1b, 2b, %[_rc]) \ 390 + : [_rc] "=d" (__rc), \ 391 + "=m" (*__dst) \ 392 + : [_src] "Q" (*(type *)(src)), \ 393 + [_dst] "a" (__dst), \ 394 + [_len] "I" (sizeof(type))); \ 395 + if (__rc) \ 396 + goto err_label; \ 473 397 break; \ 474 398 default: \ 475 - __pk_err = __put_kernel_bad(); \ 399 + __mvc_kernel_nofault_bad(); \ 476 400 break; \ 477 401 } \ 478 - if (unlikely(__pk_err)) \ 479 - goto err_label; \ 480 402 } while (0) 481 403 482 - int __noreturn __get_kernel_bad(void); 404 + #endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT && CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */ 483 405 484 - #define __get_kernel_asm(val, from, insn) \ 485 - ({ \ 486 - int __rc; \ 487 - \ 488 - asm volatile( \ 489 - "0: " insn " %[_val],%[_from]\n" \ 490 - "1: xr %[rc],%[rc]\n" \ 491 - "2:\n" \ 492 - EX_TABLE_UA_LOAD_REG(0b, 2b, %[rc], %[_val]) \ 493 - EX_TABLE_UA_LOAD_REG(1b, 2b, %[rc], %[_val]) \ 494 - : [rc] "=d" (__rc), [_val] "=d" (val) \ 495 - : [_from] "Q" (*(from)) \ 496 - : "cc"); \ 497 - __rc; \ 498 - }) 499 - 500 - #define __get_kernel_nofault(dst, src, type, err_label) \ 501 - do { \ 502 - int __gk_err; \ 503 - \ 504 - switch (sizeof(type)) { \ 505 - case 1: { \ 506 - unsigned char __x; \ 507 - \ 508 - __gk_err = __get_kernel_asm(__x, (type *)(src), "ic"); \ 509 - *((type *)(dst)) = (type)__x; \ 510 - break; \ 511 - }; \ 512 - case 2: { \ 513 - unsigned short __x; \ 514 - \ 515 - __gk_err = __get_kernel_asm(__x, (type *)(src), "lh"); \ 516 - *((type *)(dst)) = (type)__x; \ 517 - break; \ 518 - }; \ 519 - case 4: { \ 520 - unsigned int __x; \ 521 - \ 522 - __gk_err = __get_kernel_asm(__x, (type *)(src), "l"); \ 523 - *((type *)(dst)) = (type)__x; \ 524 - break; \ 525 - }; \ 526 - case 8: { \ 527 - unsigned long __x; \ 528 - \ 529 - __gk_err = __get_kernel_asm(__x, (type *)(src), "lg"); \ 530 - *((type *)(dst)) = (type)__x; \ 531 - break; \ 532 - }; \ 533 - default: \ 534 - __gk_err = __get_kernel_bad(); \ 535 - break; \ 536 - } \ 537 - if (unlikely(__gk_err)) \ 538 - goto err_label; \ 539 - } while (0) 406 + #define __get_kernel_nofault __mvc_kernel_nofault 407 + #define __put_kernel_nofault __mvc_kernel_nofault 540 408 541 409 void __cmpxchg_user_key_called_with_bad_pointer(void); 542 410
+2 -1
arch/s390/kernel/early.c
··· 50 50 decompressor_handled_param(nokaslr); 51 51 decompressor_handled_param(cmma); 52 52 decompressor_handled_param(relocate_lowcore); 53 + decompressor_handled_param(bootdebug); 53 54 #if IS_ENABLED(CONFIG_KVM) 54 55 decompressor_handled_param(prot_virt); 55 56 #endif ··· 59 58 { 60 59 #ifdef CONFIG_KASAN 61 60 init_task.kasan_depth = 0; 62 - sclp_early_printk("KernelAddressSanitizer initialized\n"); 61 + pr_info("KernelAddressSanitizer initialized\n"); 63 62 #endif 64 63 } 65 64
+34 -3
arch/s390/kernel/setup.c
··· 157 157 EXPORT_SYMBOL(stfle_fac_list); 158 158 struct oldmem_data __bootdata_preserved(oldmem_data); 159 159 160 + char __bootdata(boot_rb)[PAGE_SIZE * 2]; 161 + bool __bootdata(boot_earlyprintk); 162 + size_t __bootdata(boot_rb_off); 163 + char __bootdata(bootdebug_filter)[128]; 164 + bool __bootdata(bootdebug); 165 + 160 166 unsigned long __bootdata_preserved(VMALLOC_START); 161 167 EXPORT_SYMBOL(VMALLOC_START); 162 168 ··· 692 686 { 693 687 unsigned long addr, size; 694 688 695 - if (get_physmem_reserved(RR_MEM_DETECT_EXTENDED, &addr, &size)) 689 + if (get_physmem_reserved(RR_MEM_DETECT_EXT, &addr, &size)) 696 690 memblock_reserve(addr, size); 697 691 } 698 692 ··· 700 694 { 701 695 unsigned long addr, size; 702 696 703 - if (get_physmem_reserved(RR_MEM_DETECT_EXTENDED, &addr, &size)) 697 + if (get_physmem_reserved(RR_MEM_DETECT_EXT, &addr, &size)) 704 698 memblock_phys_free(addr, size); 705 699 } 706 700 ··· 730 724 void *lowcore_end = lowcore_start + sizeof(struct lowcore); 731 725 void *start, *end; 732 726 733 - if ((void *)__identity_base < lowcore_end) { 727 + if (absolute_pointer(__identity_base) < lowcore_end) { 734 728 start = max(lowcore_start, (void *)__identity_base); 735 729 end = min(lowcore_end, (void *)(__identity_base + ident_map_size)); 736 730 memblock_reserve(__pa(start), __pa(end)); ··· 872 866 } 873 867 874 868 /* 869 + * Print avoiding interpretation of % in buf and taking bootdebug option 870 + * into consideration. 871 + */ 872 + static void __init print_rb_entry(const char *buf) 873 + { 874 + char fmt[] = KERN_SOH "0boot: %s"; 875 + int level = printk_get_level(buf); 876 + 877 + buf = skip_timestamp(printk_skip_level(buf)); 878 + if (level == KERN_DEBUG[1] && (!bootdebug || !bootdebug_filter_match(buf))) 879 + return; 880 + 881 + fmt[1] = level; 882 + printk(fmt, buf); 883 + } 884 + 885 + /* 875 886 * Setup function called from init/main.c just after the banner 876 887 * was printed. 877 888 */ ··· 907 884 pr_info("Linux is running natively in 64-bit mode\n"); 908 885 else 909 886 pr_info("Linux is running as a guest in 64-bit mode\n"); 887 + /* Print decompressor messages if not already printed */ 888 + if (!boot_earlyprintk) 889 + boot_rb_foreach(print_rb_entry); 910 890 911 891 if (have_relocated_lowcore()) 912 892 pr_info("Lowcore relocated to 0x%px\n", get_lowcore()); ··· 1012 986 1013 987 /* Add system specific data to the random pool */ 1014 988 setup_randomness(); 989 + } 990 + 991 + void __init arch_cpu_finalize_init(void) 992 + { 993 + sclp_init(); 1015 994 }
-1
arch/s390/kernel/vmlinux.lds.S
··· 52 52 SOFTIRQENTRY_TEXT 53 53 FTRACE_HOTPATCH_TRAMPOLINES_TEXT 54 54 *(.text.*_indirect_*) 55 - *(.fixup) 56 55 *(.gnu.warning) 57 56 . = ALIGN(PAGE_SIZE); 58 57 _etext = .; /* End of text section */
-90
arch/s390/lib/uaccess.c
··· 31 31 } 32 32 #endif /*CONFIG_DEBUG_ENTRY */ 33 33 34 - static unsigned long raw_copy_from_user_key(void *to, const void __user *from, 35 - unsigned long size, unsigned long key) 36 - { 37 - unsigned long rem; 38 - union oac spec = { 39 - .oac2.key = key, 40 - .oac2.as = PSW_BITS_AS_SECONDARY, 41 - .oac2.k = 1, 42 - .oac2.a = 1, 43 - }; 44 - 45 - asm volatile( 46 - " lr 0,%[spec]\n" 47 - "0: mvcos 0(%[to]),0(%[from]),%[size]\n" 48 - "1: jz 5f\n" 49 - " algr %[size],%[val]\n" 50 - " slgr %[from],%[val]\n" 51 - " slgr %[to],%[val]\n" 52 - " j 0b\n" 53 - "2: la %[rem],4095(%[from])\n" /* rem = from + 4095 */ 54 - " nr %[rem],%[val]\n" /* rem = (from + 4095) & -4096 */ 55 - " slgr %[rem],%[from]\n" 56 - " clgr %[size],%[rem]\n" /* copy crosses next page boundary? */ 57 - " jnh 6f\n" 58 - "3: mvcos 0(%[to]),0(%[from]),%[rem]\n" 59 - "4: slgr %[size],%[rem]\n" 60 - " j 6f\n" 61 - "5: slgr %[size],%[size]\n" 62 - "6:\n" 63 - EX_TABLE(0b, 2b) 64 - EX_TABLE(1b, 2b) 65 - EX_TABLE(3b, 6b) 66 - EX_TABLE(4b, 6b) 67 - : [size] "+&a" (size), [from] "+&a" (from), [to] "+&a" (to), [rem] "=&a" (rem) 68 - : [val] "a" (-4096UL), [spec] "d" (spec.val) 69 - : "cc", "memory", "0"); 70 - return size; 71 - } 72 - 73 - unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n) 74 - { 75 - return raw_copy_from_user_key(to, from, n, 0); 76 - } 77 - EXPORT_SYMBOL(raw_copy_from_user); 78 - 79 34 unsigned long _copy_from_user_key(void *to, const void __user *from, 80 35 unsigned long n, unsigned long key) 81 36 { ··· 47 92 return res; 48 93 } 49 94 EXPORT_SYMBOL(_copy_from_user_key); 50 - 51 - static unsigned long raw_copy_to_user_key(void __user *to, const void *from, 52 - unsigned long size, unsigned long key) 53 - { 54 - unsigned long rem; 55 - union oac spec = { 56 - .oac1.key = key, 57 - .oac1.as = PSW_BITS_AS_SECONDARY, 58 - .oac1.k = 1, 59 - .oac1.a = 1, 60 - }; 61 - 62 - asm volatile( 63 - " lr 0,%[spec]\n" 64 - "0: mvcos 0(%[to]),0(%[from]),%[size]\n" 65 - "1: jz 5f\n" 66 - " algr %[size],%[val]\n" 67 - " slgr %[to],%[val]\n" 68 - " slgr %[from],%[val]\n" 69 - " j 0b\n" 70 - "2: la %[rem],4095(%[to])\n" /* rem = to + 4095 */ 71 - " nr %[rem],%[val]\n" /* rem = (to + 4095) & -4096 */ 72 - " slgr %[rem],%[to]\n" 73 - " clgr %[size],%[rem]\n" /* copy crosses next page boundary? */ 74 - " jnh 6f\n" 75 - "3: mvcos 0(%[to]),0(%[from]),%[rem]\n" 76 - "4: slgr %[size],%[rem]\n" 77 - " j 6f\n" 78 - "5: slgr %[size],%[size]\n" 79 - "6:\n" 80 - EX_TABLE(0b, 2b) 81 - EX_TABLE(1b, 2b) 82 - EX_TABLE(3b, 6b) 83 - EX_TABLE(4b, 6b) 84 - : [size] "+&a" (size), [to] "+&a" (to), [from] "+&a" (from), [rem] "=&a" (rem) 85 - : [val] "a" (-4096UL), [spec] "d" (spec.val) 86 - : "cc", "memory", "0"); 87 - return size; 88 - } 89 - 90 - unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n) 91 - { 92 - return raw_copy_to_user_key(to, from, n, 0); 93 - } 94 - EXPORT_SYMBOL(raw_copy_to_user); 95 95 96 96 unsigned long _copy_to_user_key(void __user *to, const void *from, 97 97 unsigned long n, unsigned long key)
+13 -17
arch/s390/mm/extable.c
··· 7 7 #include <linux/panic.h> 8 8 #include <asm/asm-extable.h> 9 9 #include <asm/extable.h> 10 + #include <asm/fpu.h> 10 11 11 12 const struct exception_table_entry *s390_search_extables(unsigned long addr) 12 13 { ··· 27 26 return true; 28 27 } 29 28 30 - static bool ex_handler_ua_store(const struct exception_table_entry *ex, struct pt_regs *regs) 29 + static bool ex_handler_ua_fault(const struct exception_table_entry *ex, struct pt_regs *regs) 31 30 { 32 31 unsigned int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data); 33 32 34 33 regs->gprs[reg_err] = -EFAULT; 35 - regs->psw.addr = extable_fixup(ex); 36 - return true; 37 - } 38 - 39 - static bool ex_handler_ua_load_mem(const struct exception_table_entry *ex, struct pt_regs *regs) 40 - { 41 - unsigned int reg_addr = FIELD_GET(EX_DATA_REG_ADDR, ex->data); 42 - unsigned int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data); 43 - size_t len = FIELD_GET(EX_DATA_LEN, ex->data); 44 - 45 - regs->gprs[reg_err] = -EFAULT; 46 - memset((void *)regs->gprs[reg_addr], 0, len); 47 34 regs->psw.addr = extable_fixup(ex); 48 35 return true; 49 36 } ··· 66 77 return true; 67 78 } 68 79 80 + static bool ex_handler_fpc(const struct exception_table_entry *ex, struct pt_regs *regs) 81 + { 82 + fpu_sfpc(0); 83 + regs->psw.addr = extable_fixup(ex); 84 + return true; 85 + } 86 + 69 87 bool fixup_exception(struct pt_regs *regs) 70 88 { 71 89 const struct exception_table_entry *ex; ··· 85 89 return ex_handler_fixup(ex, regs); 86 90 case EX_TYPE_BPF: 87 91 return ex_handler_bpf(ex, regs); 88 - case EX_TYPE_UA_STORE: 89 - return ex_handler_ua_store(ex, regs); 90 - case EX_TYPE_UA_LOAD_MEM: 91 - return ex_handler_ua_load_mem(ex, regs); 92 + case EX_TYPE_UA_FAULT: 93 + return ex_handler_ua_fault(ex, regs); 92 94 case EX_TYPE_UA_LOAD_REG: 93 95 return ex_handler_ua_load_reg(ex, false, regs); 94 96 case EX_TYPE_UA_LOAD_REGPAIR: 95 97 return ex_handler_ua_load_reg(ex, true, regs); 96 98 case EX_TYPE_ZEROPAD: 97 99 return ex_handler_zeropad(ex, regs); 100 + case EX_TYPE_FPC: 101 + return ex_handler_fpc(ex, regs); 98 102 } 99 103 panic("invalid exception table entry"); 100 104 }
+1 -1
arch/s390/mm/vmem.c
··· 662 662 if (!static_key_enabled(&cpu_has_bear)) 663 663 set_memory_x(0, 1); 664 664 if (debug_pagealloc_enabled()) 665 - __set_memory_4k(__va(0), __va(0) + ident_map_size); 665 + __set_memory_4k(__va(0), absolute_pointer(__va(0)) + ident_map_size); 666 666 pr_info("Write protected kernel read-only data: %luk\n", 667 667 (unsigned long)(__end_rodata - _stext) >> 10); 668 668 }
-1
arch/s390/pci/pci_bus.c
··· 171 171 static bool zpci_bus_is_multifunction_root(struct zpci_dev *zdev) 172 172 { 173 173 return !s390_pci_no_rid && zdev->rid_available && 174 - zpci_is_device_configured(zdev) && 175 174 !zdev->vfn; 176 175 } 177 176
+1 -1
arch/s390/purgatory/Makefile
··· 13 13 $(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S FORCE 14 14 $(call if_changed_rule,as_o_S) 15 15 16 - KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes 16 + KBUILD_CFLAGS := -std=gnu11 -fno-strict-aliasing -Wall -Wstrict-prototypes 17 17 KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare 18 18 KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding 19 19 KBUILD_CFLAGS += -Os -m64 -msoft-float -fno-common
+21 -6
arch/s390/tools/gen_opcode_table.c
··· 201 201 return strcmp(((struct insn *)a)->name, ((struct insn *)b)->name); 202 202 } 203 203 204 + static void print_insn_name(const char *name) 205 + { 206 + size_t i, len; 207 + 208 + len = strlen(name); 209 + printf("{"); 210 + for (i = 0; i < len; i++) 211 + printf(" \'%c\',", name[i]); 212 + printf(" }"); 213 + } 214 + 204 215 static void print_long_insn(struct gen_opcode *desc) 205 216 { 206 217 struct insn *insn; ··· 234 223 insn = &desc->insn[i]; 235 224 if (insn->name_len < 6) 236 225 continue; 237 - printf("\t[LONG_INSN_%s] = \"%s\", \\\n", insn->upper, insn->name); 226 + printf("\t[LONG_INSN_%s] = ", insn->upper); 227 + print_insn_name(insn->name); 228 + printf(", \\\n"); 238 229 } 239 230 printf("}\n\n"); 240 231 } ··· 249 236 if (insn->type->byte != 0) 250 237 opcode += 2; 251 238 printf("\t[%4d] = { .opfrag = 0x%s, .format = INSTR_%s, ", nr, opcode, insn->format); 252 - if (insn->name_len < 6) 253 - printf(".name = \"%s\" ", insn->name); 254 - else 255 - printf(".offset = LONG_INSN_%s ", insn->upper); 256 - printf("}, \\\n"); 239 + if (insn->name_len < 6) { 240 + printf(".name = "); 241 + print_insn_name(insn->name); 242 + } else { 243 + printf(".offset = LONG_INSN_%s", insn->upper); 244 + } 245 + printf(" }, \\\n"); 257 246 } 258 247 259 248 static void add_to_group(struct gen_opcode *desc, struct insn *insn, int offset)
+2 -10
drivers/s390/char/sclp.c
··· 245 245 static void sclp_process_queue(void); 246 246 static void __sclp_make_read_req(void); 247 247 static int sclp_init_mask(int calculate); 248 - static int sclp_init(void); 249 248 250 249 static void 251 250 __sclp_queue_read_req(void) ··· 1250 1251 1251 1252 /* Initialize SCLP driver. Return zero if driver is operational, non-zero 1252 1253 * otherwise. */ 1253 - static int 1254 - sclp_init(void) 1254 + int sclp_init(void) 1255 1255 { 1256 1256 unsigned long flags; 1257 1257 int rc = 0; ··· 1303 1305 1304 1306 static __init int sclp_initcall(void) 1305 1307 { 1306 - int rc; 1307 - 1308 - rc = platform_driver_register(&sclp_pdrv); 1309 - if (rc) 1310 - return rc; 1311 - 1312 - return sclp_init(); 1308 + return platform_driver_register(&sclp_pdrv); 1313 1309 } 1314 1310 1315 1311 arch_initcall(sclp_initcall);
+4 -4
drivers/s390/char/vmlogrdr.c
··· 123 123 */ 124 124 125 125 static struct vmlogrdr_priv_t sys_ser[] = { 126 - { .system_service = "*LOGREC ", 126 + { .system_service = { '*', 'L', 'O', 'G', 'R', 'E', 'C', ' ' }, 127 127 .internal_name = "logrec", 128 128 .recording_name = "EREP", 129 129 .minor_num = 0, ··· 132 132 .autorecording = 1, 133 133 .autopurge = 1, 134 134 }, 135 - { .system_service = "*ACCOUNT", 135 + { .system_service = { '*', 'A', 'C', 'C', 'O', 'U', 'N', 'T' }, 136 136 .internal_name = "account", 137 137 .recording_name = "ACCOUNT", 138 138 .minor_num = 1, ··· 141 141 .autorecording = 1, 142 142 .autopurge = 1, 143 143 }, 144 - { .system_service = "*SYMPTOM", 144 + { .system_service = { '*', 'S', 'Y', 'M', 'P', 'T', 'O', 'M' }, 145 145 .internal_name = "symptom", 146 146 .recording_name = "SYMPTOM", 147 147 .minor_num = 2, ··· 356 356 if (connect_rc) { 357 357 pr_err("vmlogrdr: iucv connection to %s " 358 358 "failed with rc %i \n", 359 - logptr->system_service, connect_rc); 359 + logptr->internal_name, connect_rc); 360 360 goto out_path; 361 361 } 362 362