Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 's390-6.6-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull more s390 updates from Heiko Carstens:

- A couple of virtual vs physical address confusion fixes

- Rework locking in dcssblk driver to address a lockdep warning

- Remove support for "noexec" kernel command line option since there is
no use case where it would make sense

- Simplify kernel mapping setup and get rid of quite a bit of code

- Add architecture specific __set_memory_yy() functions which allow us
to modify kernel mappings. Unlike the set_memory_xx() variants they
take void pointer start and end parameters, which allows using them
without the usual casts, and also to use them on areas larger than
8TB.

Note that the set_memory_xx() family comes with an int num_pages
parameter which overflows with 8TB. This could be addressed by
changing the num_pages parameter to unsigned long, however requires
to change all architectures, since the module code expects an int
parameter (see module_set_memory()).

This was indeed an issue since for debug_pagealloc() we call
set_memory_4k() on the whole identity mapping. Therefore address this
for now with the __set_memory_yy() variant, and address common code
later

- Use dev_set_name() and also fix memory leak in zcrypt driver error
handling

- Remove unused lsi_mask from airq_struct

- Add warning for invalid kernel mapping requests

* tag 's390-6.6-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
s390/vmem: do not silently ignore mapping limit
s390/zcrypt: utilize dev_set_name() ability to use a formatted string
s390/zcrypt: don't leak memory if dev_set_name() fails
s390/mm: fix MAX_DMA_ADDRESS physical vs virtual confusion
s390/airq: remove lsi_mask from airq_struct
s390/mm: use __set_memory() variants where useful
s390/set_memory: add __set_memory() variant
s390/set_memory: generate all set_memory() functions
s390/mm: improve description of mapping permissions of prefix pages
s390/amode31: change type of __samode31, __eamode31, etc
s390/mm: simplify kernel mapping setup
s390: remove "noexec" option
s390/vmem: fix virtual vs physical address confusion
s390/dcssblk: fix lockdep warning
s390/monreader: fix virtual vs physical address confusion

+99 -224
-7
arch/s390/boot/ipl_parm.c
··· 19 19 }; 20 20 21 21 char __bootdata(early_command_line)[COMMAND_LINE_SIZE]; 22 - int __bootdata(noexec_disabled); 23 22 24 23 unsigned int __bootdata_preserved(zlib_dfltcc_support) = ZLIB_DFLTCC_FULL; 25 24 struct ipl_parameter_block __bootdata_preserved(ipl_block); ··· 287 288 zlib_dfltcc_support = ZLIB_DFLTCC_INFLATE_ONLY; 288 289 else if (!strcmp(val, "always")) 289 290 zlib_dfltcc_support = ZLIB_DFLTCC_FULL_DEBUG; 290 - } 291 - 292 - if (!strcmp(param, "noexec")) { 293 - rc = kstrtobool(val, &enabled); 294 - if (!rc && !enabled) 295 - noexec_disabled = 1; 296 291 } 297 292 298 293 if (!strcmp(param, "facilities") && val)
+1 -3
arch/s390/boot/startup.c
··· 53 53 } 54 54 if (test_facility(78)) 55 55 machine.has_edat2 = 1; 56 - if (!noexec_disabled && test_facility(130)) { 56 + if (test_facility(130)) 57 57 machine.has_nx = 1; 58 - __ctl_set_bit(0, 20); 59 - } 60 58 } 61 59 62 60 static void setup_lpp(void)
+9 -3
arch/s390/boot/vmem.c
··· 287 287 if (kasan_pte_populate_zero_shadow(pte, mode)) 288 288 continue; 289 289 entry = __pte(_pa(addr, PAGE_SIZE, mode)); 290 - entry = set_pte_bit(entry, PAGE_KERNEL_EXEC); 290 + entry = set_pte_bit(entry, PAGE_KERNEL); 291 + if (!machine.has_nx) 292 + entry = clear_pte_bit(entry, __pgprot(_PAGE_NOEXEC)); 291 293 set_pte(pte, entry); 292 294 pages++; 293 295 } ··· 313 311 continue; 314 312 if (can_large_pmd(pmd, addr, next)) { 315 313 entry = __pmd(_pa(addr, _SEGMENT_SIZE, mode)); 316 - entry = set_pmd_bit(entry, SEGMENT_KERNEL_EXEC); 314 + entry = set_pmd_bit(entry, SEGMENT_KERNEL); 315 + if (!machine.has_nx) 316 + entry = clear_pmd_bit(entry, __pgprot(_SEGMENT_ENTRY_NOEXEC)); 317 317 set_pmd(pmd, entry); 318 318 pages++; 319 319 continue; ··· 346 342 continue; 347 343 if (can_large_pud(pud, addr, next)) { 348 344 entry = __pud(_pa(addr, _REGION3_SIZE, mode)); 349 - entry = set_pud_bit(entry, REGION3_KERNEL_EXEC); 345 + entry = set_pud_bit(entry, REGION3_KERNEL); 346 + if (!machine.has_nx) 347 + entry = clear_pud_bit(entry, __pgprot(_REGION_ENTRY_NOEXEC)); 350 348 set_pud(pud, entry); 351 349 pages++; 352 350 continue;
-1
arch/s390/include/asm/airq.h
··· 18 18 struct hlist_node list; /* Handler queueing. */ 19 19 void (*handler)(struct airq_struct *airq, struct tpi_info *tpi_info); 20 20 u8 *lsi_ptr; /* Local-Summary-Indicator pointer */ 21 - u8 lsi_mask; /* Local-Summary-Indicator mask */ 22 21 u8 isc; /* Interrupt-subclass */ 23 22 u8 flags; 24 23 };
+1 -1
arch/s390/include/asm/dma.h
··· 9 9 * to DMA. It _is_ used for the s390 memory zone split at 2GB caused 10 10 * by the 31 bit heritage. 11 11 */ 12 - #define MAX_DMA_ADDRESS 0x80000000 12 + #define MAX_DMA_ADDRESS __va(0x80000000) 13 13 14 14 #endif /* _ASM_S390_DMA_H */
+2 -2
arch/s390/include/asm/sections.h
··· 23 23 */ 24 24 #define __bootdata_preserved(var) __section(".boot.preserved.data." #var) var 25 25 26 - extern unsigned long __samode31, __eamode31; 27 - extern unsigned long __stext_amode31, __etext_amode31; 26 + extern char *__samode31, *__eamode31; 27 + extern char *__stext_amode31, *__etext_amode31; 28 28 29 29 #endif
+31 -33
arch/s390/include/asm/set_memory.h
··· 24 24 #define SET_MEMORY_INV BIT(_SET_MEMORY_INV_BIT) 25 25 #define SET_MEMORY_DEF BIT(_SET_MEMORY_DEF_BIT) 26 26 27 - int __set_memory(unsigned long addr, int numpages, unsigned long flags); 28 - 29 - static inline int set_memory_ro(unsigned long addr, int numpages) 30 - { 31 - return __set_memory(addr, numpages, SET_MEMORY_RO); 32 - } 33 - 34 - static inline int set_memory_rw(unsigned long addr, int numpages) 35 - { 36 - return __set_memory(addr, numpages, SET_MEMORY_RW); 37 - } 38 - 39 - static inline int set_memory_nx(unsigned long addr, int numpages) 40 - { 41 - return __set_memory(addr, numpages, SET_MEMORY_NX); 42 - } 43 - 44 - static inline int set_memory_x(unsigned long addr, int numpages) 45 - { 46 - return __set_memory(addr, numpages, SET_MEMORY_X); 47 - } 27 + int __set_memory(unsigned long addr, unsigned long numpages, unsigned long flags); 48 28 49 29 #define set_memory_rox set_memory_rox 50 - static inline int set_memory_rox(unsigned long addr, int numpages) 51 - { 52 - return __set_memory(addr, numpages, SET_MEMORY_RO | SET_MEMORY_X); 30 + 31 + /* 32 + * Generate two variants of each set_memory() function: 33 + * 34 + * set_memory_yy(unsigned long addr, int numpages); 35 + * __set_memory_yy(void *start, void *end); 36 + * 37 + * The second variant exists for both convenience to avoid the usual 38 + * (unsigned long) casts, but unlike the first variant it can also be used 39 + * for areas larger than 8TB, which may happen at memory initialization. 40 + */ 41 + #define __SET_MEMORY_FUNC(fname, flags) \ 42 + static inline int fname(unsigned long addr, int numpages) \ 43 + { \ 44 + return __set_memory(addr, numpages, (flags)); \ 45 + } \ 46 + \ 47 + static inline int __##fname(void *start, void *end) \ 48 + { \ 49 + unsigned long numpages; \ 50 + \ 51 + numpages = (end - start) >> PAGE_SHIFT; \ 52 + return __set_memory((unsigned long)start, numpages, (flags)); \ 53 53 } 54 54 55 - static inline int set_memory_rwnx(unsigned long addr, int numpages) 56 - { 57 - return __set_memory(addr, numpages, SET_MEMORY_RW | SET_MEMORY_NX); 58 - } 59 - 60 - static inline int set_memory_4k(unsigned long addr, int numpages) 61 - { 62 - return __set_memory(addr, numpages, SET_MEMORY_4K); 63 - } 55 + __SET_MEMORY_FUNC(set_memory_ro, SET_MEMORY_RO) 56 + __SET_MEMORY_FUNC(set_memory_rw, SET_MEMORY_RW) 57 + __SET_MEMORY_FUNC(set_memory_nx, SET_MEMORY_NX) 58 + __SET_MEMORY_FUNC(set_memory_x, SET_MEMORY_X) 59 + __SET_MEMORY_FUNC(set_memory_rox, SET_MEMORY_RO | SET_MEMORY_X) 60 + __SET_MEMORY_FUNC(set_memory_rwnx, SET_MEMORY_RW | SET_MEMORY_NX) 61 + __SET_MEMORY_FUNC(set_memory_4k, SET_MEMORY_4K) 64 62 65 63 int set_direct_map_invalid_noflush(struct page *page); 66 64 int set_direct_map_default_noflush(struct page *page);
-1
arch/s390/include/asm/setup.h
··· 72 72 #define ZLIB_DFLTCC_INFLATE_ONLY 3 73 73 #define ZLIB_DFLTCC_FULL_DEBUG 4 74 74 75 - extern int noexec_disabled; 76 75 extern unsigned long ident_map_size; 77 76 extern unsigned long max_mappable; 78 77
+1 -4
arch/s390/kernel/early.c
··· 44 44 decompressor_handled_param(mem); 45 45 decompressor_handled_param(vmalloc); 46 46 decompressor_handled_param(dfltcc); 47 - decompressor_handled_param(noexec); 48 47 decompressor_handled_param(facilities); 49 48 decompressor_handled_param(nokaslr); 50 49 #if IS_ENABLED(CONFIG_KVM) ··· 232 233 S390_lowcore.machine_flags |= MACHINE_FLAG_VX; 233 234 __ctl_set_bit(0, 17); 234 235 } 235 - if (test_facility(130) && !noexec_disabled) { 236 + if (test_facility(130)) 236 237 S390_lowcore.machine_flags |= MACHINE_FLAG_NX; 237 - __ctl_set_bit(0, 20); 238 - } 239 238 if (test_facility(133)) 240 239 S390_lowcore.machine_flags |= MACHINE_FLAG_GS; 241 240 if (test_facility(139) && (tod_clock_base.tod >> 63)) {
+2 -2
arch/s390/kernel/machine_kexec.c
··· 216 216 VMCOREINFO_SYMBOL(lowcore_ptr); 217 217 VMCOREINFO_SYMBOL(high_memory); 218 218 VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS); 219 - vmcoreinfo_append_str("SAMODE31=%lx\n", __samode31); 220 - vmcoreinfo_append_str("EAMODE31=%lx\n", __eamode31); 219 + vmcoreinfo_append_str("SAMODE31=%lx\n", (unsigned long)__samode31); 220 + vmcoreinfo_append_str("EAMODE31=%lx\n", (unsigned long)__eamode31); 221 221 vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset()); 222 222 abs_lc = get_abs_lowcore(); 223 223 abs_lc->vmcore_info = paddr_vmcoreinfo_note();
+8 -9
arch/s390/kernel/setup.c
··· 97 97 * relocated above 2 GB, because it has to use 31 bit addresses. 98 98 * Such code and data is part of the .amode31 section. 99 99 */ 100 - unsigned long __amode31_ref __samode31 = (unsigned long)&_samode31; 101 - unsigned long __amode31_ref __eamode31 = (unsigned long)&_eamode31; 102 - unsigned long __amode31_ref __stext_amode31 = (unsigned long)&_stext_amode31; 103 - unsigned long __amode31_ref __etext_amode31 = (unsigned long)&_etext_amode31; 100 + char __amode31_ref *__samode31 = _samode31; 101 + char __amode31_ref *__eamode31 = _eamode31; 102 + char __amode31_ref *__stext_amode31 = _stext_amode31; 103 + char __amode31_ref *__etext_amode31 = _etext_amode31; 104 104 struct exception_table_entry __amode31_ref *__start_amode31_ex_table = _start_amode31_ex_table; 105 105 struct exception_table_entry __amode31_ref *__stop_amode31_ex_table = _stop_amode31_ex_table; 106 106 ··· 145 145 static u32 __amode31_ref *__ctl_linkage_stack = __ctl_linkage_stack_amode31; 146 146 static u32 __amode31_ref *__ctl_duct = __ctl_duct_amode31; 147 147 148 - int __bootdata(noexec_disabled); 149 148 unsigned long __bootdata_preserved(max_mappable); 150 149 unsigned long __bootdata(ident_map_size); 151 150 struct physmem_info __bootdata(physmem_info); ··· 770 771 static void __init relocate_amode31_section(void) 771 772 { 772 773 unsigned long amode31_size = __eamode31 - __samode31; 773 - long amode31_offset = physmem_info.reserved[RR_AMODE31].start - __samode31; 774 - long *ptr; 774 + long amode31_offset, *ptr; 775 775 776 + amode31_offset = physmem_info.reserved[RR_AMODE31].start - (unsigned long)__samode31; 776 777 pr_info("Relocating AMODE31 section of size 0x%08lx\n", amode31_size); 777 778 778 779 /* Move original AMODE31 section to the new one */ 779 - memmove((void *)physmem_info.reserved[RR_AMODE31].start, (void *)__samode31, amode31_size); 780 + memmove((void *)physmem_info.reserved[RR_AMODE31].start, __samode31, amode31_size); 780 781 /* Zero out the old AMODE31 section to catch invalid accesses within it */ 781 - memset((void *)__samode31, 0, amode31_size); 782 + memset(__samode31, 0, amode31_size); 782 783 783 784 /* Update all AMODE31 region references */ 784 785 for (ptr = _start_amode31_refs; ptr != _end_amode31_refs; ptr++)
+2 -1
arch/s390/kvm/interrupt.c
··· 3398 3398 3399 3399 static struct airq_struct gib_alert_irq = { 3400 3400 .handler = gib_alert_irq_handler, 3401 - .lsi_ptr = &gib_alert_irq.lsi_mask, 3402 3401 }; 3403 3402 3404 3403 void kvm_s390_gib_destroy(void) ··· 3437 3438 rc = -EIO; 3438 3439 goto out_free_gib; 3439 3440 } 3441 + /* adapter interrupts used for AP (applicable here) don't use the LSI */ 3442 + *gib_alert_irq.lsi_ptr = 0xff; 3440 3443 3441 3444 gib->nisc = nisc; 3442 3445 gib_origin = virt_to_phys(gib);
+2 -2
arch/s390/mm/dump_pagetables.c
··· 290 290 max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2; 291 291 max_addr = 1UL << (max_addr * 11 + 31); 292 292 address_markers[IDENTITY_AFTER_END_NR].start_address = ident_map_size; 293 - address_markers[AMODE31_START_NR].start_address = __samode31; 294 - address_markers[AMODE31_END_NR].start_address = __eamode31; 293 + address_markers[AMODE31_START_NR].start_address = (unsigned long)__samode31; 294 + address_markers[AMODE31_END_NR].start_address = (unsigned long)__eamode31; 295 295 address_markers[MODULES_NR].start_address = MODULES_VADDR; 296 296 address_markers[MODULES_END_NR].start_address = MODULES_END; 297 297 address_markers[ABS_LOWCORE_NR].start_address = __abs_lowcore;
+2 -2
arch/s390/mm/init.c
··· 98 98 sparse_init(); 99 99 zone_dma_bits = 31; 100 100 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 101 - max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); 101 + max_zone_pfns[ZONE_DMA] = virt_to_pfn(MAX_DMA_ADDRESS); 102 102 max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 103 103 free_area_init(max_zone_pfns); 104 104 } ··· 107 107 { 108 108 unsigned long size = __end_ro_after_init - __start_ro_after_init; 109 109 110 - set_memory_ro((unsigned long)__start_ro_after_init, size >> PAGE_SHIFT); 110 + __set_memory_ro(__start_ro_after_init, __end_ro_after_init); 111 111 pr_info("Write protected read-only-after-init data: %luk\n", size >> 10); 112 112 debug_checkwx(); 113 113 }
+1 -1
arch/s390/mm/pageattr.c
··· 373 373 return rc; 374 374 } 375 375 376 - int __set_memory(unsigned long addr, int numpages, unsigned long flags) 376 + int __set_memory(unsigned long addr, unsigned long numpages, unsigned long flags) 377 377 { 378 378 unsigned long end; 379 379 int rc;
+21 -127
arch/s390/mm/vmem.c
··· 5 5 6 6 #include <linux/memory_hotplug.h> 7 7 #include <linux/memblock.h> 8 - #include <linux/kasan.h> 9 8 #include <linux/pfn.h> 10 9 #include <linux/mm.h> 11 10 #include <linux/init.h> ··· 290 291 291 292 static void try_free_pmd_table(pud_t *pud, unsigned long start) 292 293 { 293 - const unsigned long end = start + PUD_SIZE; 294 294 pmd_t *pmd; 295 295 int i; 296 - 297 - /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */ 298 - if (end > VMALLOC_START) 299 - return; 300 296 301 297 pmd = pmd_offset(pud, start); 302 298 for (i = 0; i < PTRS_PER_PMD; i++, pmd++) ··· 357 363 358 364 static void try_free_pud_table(p4d_t *p4d, unsigned long start) 359 365 { 360 - const unsigned long end = start + P4D_SIZE; 361 366 pud_t *pud; 362 367 int i; 363 - 364 - /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */ 365 - if (end > VMALLOC_START) 366 - return; 367 368 368 369 pud = pud_offset(p4d, start); 369 370 for (i = 0; i < PTRS_PER_PUD; i++, pud++) { ··· 402 413 403 414 static void try_free_p4d_table(pgd_t *pgd, unsigned long start) 404 415 { 405 - const unsigned long end = start + PGDIR_SIZE; 406 416 p4d_t *p4d; 407 417 int i; 408 - 409 - /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */ 410 - if (end > VMALLOC_START) 411 - return; 412 418 413 419 p4d = p4d_offset(pgd, start); 414 420 for (i = 0; i < PTRS_PER_P4D; i++, p4d++) { ··· 423 439 p4d_t *p4d; 424 440 425 441 if (WARN_ON_ONCE(!PAGE_ALIGNED(start | end))) 442 + return -EINVAL; 443 + /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */ 444 + if (WARN_ON_ONCE(end > VMALLOC_START)) 426 445 return -EINVAL; 427 446 for (addr = start; addr < end; addr = next) { 428 447 next = pgd_addr_end(addr, end); ··· 637 650 mutex_unlock(&vmem_mutex); 638 651 } 639 652 640 - static int __init memblock_region_cmp(const void *a, const void *b) 641 - { 642 - const struct memblock_region *r1 = a; 643 - const struct memblock_region *r2 = b; 644 - 645 - if (r1->base < r2->base) 646 - return -1; 647 - if (r1->base > r2->base) 648 - return 1; 649 - return 0; 650 - } 651 - 652 - static void __init memblock_region_swap(void *a, void *b, int size) 653 - { 654 - swap(*(struct memblock_region *)a, *(struct memblock_region *)b); 655 - } 656 - 657 - #ifdef CONFIG_KASAN 658 - #define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x)) 659 - 660 - static inline int set_memory_kasan(unsigned long start, unsigned long end) 661 - { 662 - start = PAGE_ALIGN_DOWN(__sha(start)); 663 - end = PAGE_ALIGN(__sha(end)); 664 - return set_memory_rwnx(start, (end - start) >> PAGE_SHIFT); 665 - } 666 - #endif 667 - 668 - /* 669 - * map whole physical memory to virtual memory (identity mapping) 670 - * we reserve enough space in the vmalloc area for vmemmap to hotplug 671 - * additional memory segments. 672 - */ 673 653 void __init vmem_map_init(void) 674 654 { 675 - struct memblock_region memory_rwx_regions[] = { 676 - { 677 - .base = 0, 678 - .size = sizeof(struct lowcore), 679 - .flags = MEMBLOCK_NONE, 680 - #ifdef CONFIG_NUMA 681 - .nid = NUMA_NO_NODE, 682 - #endif 683 - }, 684 - { 685 - .base = __pa(_stext), 686 - .size = _etext - _stext, 687 - .flags = MEMBLOCK_NONE, 688 - #ifdef CONFIG_NUMA 689 - .nid = NUMA_NO_NODE, 690 - #endif 691 - }, 692 - { 693 - .base = __pa(_sinittext), 694 - .size = _einittext - _sinittext, 695 - .flags = MEMBLOCK_NONE, 696 - #ifdef CONFIG_NUMA 697 - .nid = NUMA_NO_NODE, 698 - #endif 699 - }, 700 - { 701 - .base = __stext_amode31, 702 - .size = __etext_amode31 - __stext_amode31, 703 - .flags = MEMBLOCK_NONE, 704 - #ifdef CONFIG_NUMA 705 - .nid = NUMA_NO_NODE, 706 - #endif 707 - }, 708 - }; 709 - struct memblock_type memory_rwx = { 710 - .regions = memory_rwx_regions, 711 - .cnt = ARRAY_SIZE(memory_rwx_regions), 712 - .max = ARRAY_SIZE(memory_rwx_regions), 713 - }; 714 - phys_addr_t base, end; 715 - u64 i; 716 - 655 + __set_memory_rox(_stext, _etext); 656 + __set_memory_ro(_etext, __end_rodata); 657 + __set_memory_rox(_sinittext, _einittext); 658 + __set_memory_rox(__stext_amode31, __etext_amode31); 717 659 /* 718 - * Set RW+NX attribute on all memory, except regions enumerated with 719 - * memory_rwx exclude type. These regions need different attributes, 720 - * which are enforced afterwards. 721 - * 722 - * __for_each_mem_range() iterate and exclude types should be sorted. 723 - * The relative location of _stext and _sinittext is hardcoded in the 724 - * linker script. However a location of __stext_amode31 and the kernel 725 - * image itself are chosen dynamically. Thus, sort the exclude type. 660 + * If the BEAR-enhancement facility is not installed the first 661 + * prefix page is used to return to the previous context with 662 + * an LPSWE instruction and therefore must be executable. 726 663 */ 727 - sort(&memory_rwx_regions, 728 - ARRAY_SIZE(memory_rwx_regions), sizeof(memory_rwx_regions[0]), 729 - memblock_region_cmp, memblock_region_swap); 730 - __for_each_mem_range(i, &memblock.memory, &memory_rwx, 731 - NUMA_NO_NODE, MEMBLOCK_NONE, &base, &end, NULL) { 732 - set_memory_rwnx((unsigned long)__va(base), 733 - (end - base) >> PAGE_SHIFT); 664 + if (!static_key_enabled(&cpu_has_bear)) 665 + set_memory_x(0, 1); 666 + if (debug_pagealloc_enabled()) { 667 + /* 668 + * Use RELOC_HIDE() as long as __va(0) translates to NULL, 669 + * since performing pointer arithmetic on a NULL pointer 670 + * has undefined behavior and generates compiler warnings. 671 + */ 672 + __set_memory_4k(__va(0), RELOC_HIDE(__va(0), ident_map_size)); 734 673 } 735 - 736 - #ifdef CONFIG_KASAN 737 - for_each_mem_range(i, &base, &end) 738 - set_memory_kasan(base, end); 739 - #endif 740 - set_memory_rox((unsigned long)_stext, 741 - (unsigned long)(_etext - _stext) >> PAGE_SHIFT); 742 - set_memory_ro((unsigned long)_etext, 743 - (unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT); 744 - set_memory_rox((unsigned long)_sinittext, 745 - (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT); 746 - set_memory_rox(__stext_amode31, 747 - (__etext_amode31 - __stext_amode31) >> PAGE_SHIFT); 748 - 749 - /* lowcore must be executable for LPSWE */ 750 - if (static_key_enabled(&cpu_has_bear)) 751 - set_memory_nx(0, 1); 752 - set_memory_nx(PAGE_SIZE, 1); 753 - if (debug_pagealloc_enabled()) 754 - set_memory_4k(0, ident_map_size >> PAGE_SHIFT); 755 - 674 + if (MACHINE_HAS_NX) 675 + ctl_set_bit(0, 20); 756 676 pr_info("Write protected kernel read-only data: %luk\n", 757 677 (unsigned long)(__end_rodata - _stext) >> 10); 758 678 }
+6 -7
drivers/s390/block/dcssblk.c
··· 411 411 segment_unload(entry->segment_name); 412 412 } 413 413 list_del(&dev_info->lh); 414 + up_write(&dcssblk_devices_sem); 414 415 415 416 dax_remove_host(dev_info->gd); 416 417 kill_dax(dev_info->dax_dev); 417 418 put_dax(dev_info->dax_dev); 418 419 del_gendisk(dev_info->gd); 419 420 put_disk(dev_info->gd); 420 - up_write(&dcssblk_devices_sem); 421 421 422 422 if (device_remove_file_self(dev, attr)) { 423 423 device_unregister(dev); ··· 790 790 } 791 791 792 792 list_del(&dev_info->lh); 793 + /* unload all related segments */ 794 + list_for_each_entry(entry, &dev_info->seg_list, lh) 795 + segment_unload(entry->segment_name); 796 + up_write(&dcssblk_devices_sem); 797 + 793 798 dax_remove_host(dev_info->gd); 794 799 kill_dax(dev_info->dax_dev); 795 800 put_dax(dev_info->dax_dev); 796 801 del_gendisk(dev_info->gd); 797 802 put_disk(dev_info->gd); 798 - 799 - /* unload all related segments */ 800 - list_for_each_entry(entry, &dev_info->seg_list, lh) 801 - segment_unload(entry->segment_name); 802 - 803 - up_write(&dcssblk_devices_sem); 804 803 805 804 device_unregister(&dev_info->dev); 806 805 put_device(&dev_info->dev);
+5 -7
drivers/s390/char/monreader.c
··· 111 111 112 112 static inline u8 mon_mca_type(struct mon_msg *monmsg, u8 index) 113 113 { 114 - return *((u8 *) mon_mca_start(monmsg) + monmsg->mca_offset + index); 114 + return *((u8 *)__va(mon_mca_start(monmsg)) + monmsg->mca_offset + index); 115 115 } 116 116 117 117 static inline u32 mon_mca_size(struct mon_msg *monmsg) ··· 121 121 122 122 static inline u32 mon_rec_start(struct mon_msg *monmsg) 123 123 { 124 - return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 4)); 124 + return *((u32 *)(__va(mon_mca_start(monmsg)) + monmsg->mca_offset + 4)); 125 125 } 126 126 127 127 static inline u32 mon_rec_end(struct mon_msg *monmsg) 128 128 { 129 - return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 8)); 129 + return *((u32 *)(__va(mon_mca_start(monmsg)) + monmsg->mca_offset + 8)); 130 130 } 131 131 132 132 static int mon_check_mca(struct mon_msg *monmsg) ··· 392 392 mce_start = mon_mca_start(monmsg) + monmsg->mca_offset; 393 393 if ((monmsg->pos >= mce_start) && (monmsg->pos < mce_start + 12)) { 394 394 count = min(count, (size_t) mce_start + 12 - monmsg->pos); 395 - ret = copy_to_user(data, (void *) (unsigned long) monmsg->pos, 396 - count); 395 + ret = copy_to_user(data, __va(monmsg->pos), count); 397 396 if (ret) 398 397 return -EFAULT; 399 398 monmsg->pos += count; ··· 405 406 if (monmsg->pos <= mon_rec_end(monmsg)) { 406 407 count = min(count, (size_t) mon_rec_end(monmsg) - monmsg->pos 407 408 + 1); 408 - ret = copy_to_user(data, (void *) (unsigned long) monmsg->pos, 409 - count); 409 + ret = copy_to_user(data, __va(monmsg->pos), count); 410 410 if (ret) 411 411 return -EFAULT; 412 412 monmsg->pos += count;
+1 -3
drivers/s390/cio/airq.c
··· 49 49 return -ENOMEM; 50 50 airq->flags |= AIRQ_PTR_ALLOCATED; 51 51 } 52 - if (!airq->lsi_mask) 53 - airq->lsi_mask = 0xff; 54 52 snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%p", airq); 55 53 CIO_TRACE_EVENT(4, dbf_txt); 56 54 isc_register(airq->isc); ··· 96 98 head = &airq_lists[tpi_info->isc]; 97 99 rcu_read_lock(); 98 100 hlist_for_each_entry_rcu(airq, head, list) 99 - if ((*airq->lsi_ptr & airq->lsi_mask) != 0) 101 + if (*airq->lsi_ptr != 0) 100 102 airq->handler(airq, tpi_info); 101 103 rcu_read_unlock(); 102 104
+4 -7
drivers/s390/crypto/zcrypt_api.c
··· 366 366 { 367 367 dev_t devt; 368 368 int i, rc = 0; 369 - char nodename[ZCDN_MAX_NAME]; 370 369 struct zcdn_device *zcdndev; 371 370 372 371 if (mutex_lock_interruptible(&ap_perms_mutex)) ··· 406 407 zcdndev->device.devt = devt; 407 408 zcdndev->device.groups = zcdn_dev_attr_groups; 408 409 if (name[0]) 409 - strncpy(nodename, name, sizeof(nodename)); 410 + rc = dev_set_name(&zcdndev->device, "%s", name); 410 411 else 411 - snprintf(nodename, sizeof(nodename), 412 - ZCRYPT_NAME "_%d", (int)MINOR(devt)); 413 - nodename[sizeof(nodename) - 1] = '\0'; 414 - if (dev_set_name(&zcdndev->device, nodename)) { 415 - rc = -EINVAL; 412 + rc = dev_set_name(&zcdndev->device, ZCRYPT_NAME "_%d", (int)MINOR(devt)); 413 + if (rc) { 414 + kfree(zcdndev); 416 415 goto unlockout; 417 416 } 418 417 rc = device_register(&zcdndev->device);
-1
drivers/s390/virtio/virtio_ccw.c
··· 250 250 info->airq.handler = virtio_airq_handler; 251 251 info->summary_indicator_idx = index; 252 252 info->airq.lsi_ptr = get_summary_indicator(info); 253 - info->airq.lsi_mask = 0xff; 254 253 info->airq.isc = VIRTIO_AIRQ_ISC; 255 254 rc = register_adapter_interrupt(&info->airq); 256 255 if (rc) {