Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390/mm: allocate Absolute Lowcore Area in decompressor

Move Absolute Lowcore Area allocation to the decompressor.
As result, get_abs_lowcore() and put_abs_lowcore() access
brackets become really straight and do not require complex
execution context analysis and LAP and interrupts tackling.

Reviewed-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>

authored by

Alexander Gordeev and committed by
Heiko Carstens
2154e0b3 8e9205d2

+36 -75
+2
arch/s390/boot/boot.h
··· 66 66 extern struct vmlinux_info _vmlinux_info; 67 67 #define vmlinux _vmlinux_info 68 68 69 + #define __abs_lowcore_pa(x) (((unsigned long)(x) - __abs_lowcore) % sizeof(struct lowcore)) 70 + 69 71 #endif /* __ASSEMBLY__ */ 70 72 #endif /* BOOT_BOOT_H */
+6
arch/s390/boot/vmem.c
··· 6 6 #include <asm/sections.h> 7 7 #include <asm/mem_detect.h> 8 8 #include <asm/maccess.h> 9 + #include <asm/abs_lowcore.h> 9 10 #include "decompressor.h" 10 11 #include "boot.h" 11 12 ··· 30 29 enum populate_mode { 31 30 POPULATE_NONE, 32 31 POPULATE_ONE2ONE, 32 + POPULATE_ABS_LOWCORE, 33 33 }; 34 34 35 35 static void boot_check_oom(void) ··· 104 102 return -1; 105 103 case POPULATE_ONE2ONE: 106 104 return addr; 105 + case POPULATE_ABS_LOWCORE: 106 + return __abs_lowcore_pa(addr); 107 107 default: 108 108 return -1; 109 109 } ··· 275 271 pgtable_populate_begin(online_end); 276 272 pgtable_populate(0, sizeof(struct lowcore), POPULATE_ONE2ONE); 277 273 pgtable_populate(0, online_end, POPULATE_ONE2ONE); 274 + pgtable_populate(__abs_lowcore, __abs_lowcore + sizeof(struct lowcore), 275 + POPULATE_ABS_LOWCORE); 278 276 pgtable_populate(__memcpy_real_area, __memcpy_real_area + PAGE_SIZE, 279 277 POPULATE_NONE); 280 278 memcpy_real_ptep = __virt_to_kpte(__memcpy_real_area);
+13 -3
arch/s390/include/asm/abs_lowcore.h
··· 7 7 #define ABS_LOWCORE_MAP_SIZE (NR_CPUS * sizeof(struct lowcore)) 8 8 9 9 extern unsigned long __abs_lowcore; 10 - extern bool abs_lowcore_mapped; 11 10 12 - struct lowcore *get_abs_lowcore(unsigned long *flags); 13 - void put_abs_lowcore(struct lowcore *lc, unsigned long flags); 14 11 int abs_lowcore_map(int cpu, struct lowcore *lc, bool alloc); 15 12 void abs_lowcore_unmap(int cpu); 13 + 14 + static inline struct lowcore *get_abs_lowcore(void) 15 + { 16 + int cpu; 17 + 18 + cpu = get_cpu(); 19 + return ((struct lowcore *)__abs_lowcore) + cpu; 20 + } 21 + 22 + static inline void put_abs_lowcore(struct lowcore *lc) 23 + { 24 + put_cpu(); 25 + } 16 26 17 27 #endif /* _ASM_S390_ABS_LOWCORE_H */
-49
arch/s390/kernel/abs_lowcore.c
··· 3 3 #include <linux/pgtable.h> 4 4 #include <asm/abs_lowcore.h> 5 5 6 - #define ABS_LOWCORE_UNMAPPED 1 7 - #define ABS_LOWCORE_LAP_ON 2 8 - #define ABS_LOWCORE_IRQS_ON 4 9 - 10 6 unsigned long __bootdata_preserved(__abs_lowcore); 11 - bool __ro_after_init abs_lowcore_mapped; 12 7 13 8 int abs_lowcore_map(int cpu, struct lowcore *lc, bool alloc) 14 9 { ··· 43 48 vmem_unmap_4k_page(addr); 44 49 addr += PAGE_SIZE; 45 50 } 46 - } 47 - 48 - struct lowcore *get_abs_lowcore(unsigned long *flags) 49 - { 50 - unsigned long irq_flags; 51 - union ctlreg0 cr0; 52 - int cpu; 53 - 54 - *flags = 0; 55 - cpu = get_cpu(); 56 - if (abs_lowcore_mapped) { 57 - return ((struct lowcore *)__abs_lowcore) + cpu; 58 - } else { 59 - if (cpu != 0) 60 - panic("Invalid unmapped absolute lowcore access\n"); 61 - local_irq_save(irq_flags); 62 - if (!irqs_disabled_flags(irq_flags)) 63 - *flags |= ABS_LOWCORE_IRQS_ON; 64 - __ctl_store(cr0.val, 0, 0); 65 - if (cr0.lap) { 66 - *flags |= ABS_LOWCORE_LAP_ON; 67 - __ctl_clear_bit(0, 28); 68 - } 69 - *flags |= ABS_LOWCORE_UNMAPPED; 70 - return lowcore_ptr[0]; 71 - } 72 - } 73 - 74 - void put_abs_lowcore(struct lowcore *lc, unsigned long flags) 75 - { 76 - if (abs_lowcore_mapped) { 77 - if (flags) 78 - panic("Invalid mapped absolute lowcore release\n"); 79 - } else { 80 - if (smp_processor_id() != 0) 81 - panic("Invalid mapped absolute lowcore access\n"); 82 - if (!(flags & ABS_LOWCORE_UNMAPPED)) 83 - panic("Invalid unmapped absolute lowcore release\n"); 84 - if (flags & ABS_LOWCORE_LAP_ON) 85 - __ctl_set_bit(0, 28); 86 - if (flags & ABS_LOWCORE_IRQS_ON) 87 - local_irq_enable(); 88 - } 89 - put_cpu(); 90 51 }
+2 -3
arch/s390/kernel/ipl.c
··· 1986 1986 { 1987 1987 unsigned long ipib = (unsigned long) reipl_block_actual; 1988 1988 struct lowcore *abs_lc; 1989 - unsigned long flags; 1990 1989 unsigned int csum; 1991 1990 1992 1991 csum = (__force unsigned int) 1993 1992 csum_partial(reipl_block_actual, reipl_block_actual->hdr.len, 0); 1994 - abs_lc = get_abs_lowcore(&flags); 1993 + abs_lc = get_abs_lowcore(); 1995 1994 abs_lc->ipib = ipib; 1996 1995 abs_lc->ipib_checksum = csum; 1997 - put_abs_lowcore(abs_lc, flags); 1996 + put_abs_lowcore(abs_lc); 1998 1997 dump_run(trigger); 1999 1998 } 2000 1999
+2 -3
arch/s390/kernel/machine_kexec.c
··· 224 224 void arch_crash_save_vmcoreinfo(void) 225 225 { 226 226 struct lowcore *abs_lc; 227 - unsigned long flags; 228 227 229 228 VMCOREINFO_SYMBOL(lowcore_ptr); 230 229 VMCOREINFO_SYMBOL(high_memory); ··· 231 232 vmcoreinfo_append_str("SAMODE31=%lx\n", __samode31); 232 233 vmcoreinfo_append_str("EAMODE31=%lx\n", __eamode31); 233 234 vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset()); 234 - abs_lc = get_abs_lowcore(&flags); 235 + abs_lc = get_abs_lowcore(); 235 236 abs_lc->vmcore_info = paddr_vmcoreinfo_note(); 236 - put_abs_lowcore(abs_lc, flags); 237 + put_abs_lowcore(abs_lc); 237 238 } 238 239 239 240 void machine_shutdown(void)
+2 -3
arch/s390/kernel/os_info.c
··· 59 59 void __init os_info_init(void) 60 60 { 61 61 struct lowcore *abs_lc; 62 - unsigned long flags; 63 62 64 63 os_info.version_major = OS_INFO_VERSION_MAJOR; 65 64 os_info.version_minor = OS_INFO_VERSION_MINOR; 66 65 os_info.magic = OS_INFO_MAGIC; 67 66 os_info.csum = os_info_csum(&os_info); 68 - abs_lc = get_abs_lowcore(&flags); 67 + abs_lc = get_abs_lowcore(); 69 68 abs_lc->os_info = __pa(&os_info); 70 - put_abs_lowcore(abs_lc, flags); 69 + put_abs_lowcore(abs_lc); 71 70 } 72 71 73 72 #ifdef CONFIG_CRASH_DUMP
+3 -5
arch/s390/kernel/setup.c
··· 418 418 { 419 419 struct lowcore *lc, *abs_lc; 420 420 unsigned long mcck_stack; 421 - unsigned long flags; 422 421 423 422 /* 424 423 * Setup lowcore for boot cpu ··· 492 493 lc->kernel_asce = S390_lowcore.kernel_asce; 493 494 lc->user_asce = S390_lowcore.user_asce; 494 495 495 - abs_lc = get_abs_lowcore(&flags); 496 + abs_lc = get_abs_lowcore(); 496 497 abs_lc->restart_stack = lc->restart_stack; 497 498 abs_lc->restart_fn = lc->restart_fn; 498 499 abs_lc->restart_data = lc->restart_data; ··· 502 503 memcpy(abs_lc->cregs_save_area, lc->cregs_save_area, sizeof(abs_lc->cregs_save_area)); 503 504 abs_lc->program_new_psw = lc->program_new_psw; 504 505 abs_lc->mcesad = lc->mcesad; 505 - put_abs_lowcore(abs_lc, flags); 506 + put_abs_lowcore(abs_lc); 506 507 507 508 set_prefix(__pa(lc)); 508 509 lowcore_ptr[0] = lc; 509 - if (abs_lowcore_map(0, lowcore_ptr[0], true)) 510 + if (abs_lowcore_map(0, lowcore_ptr[0], false)) 510 511 panic("Couldn't setup absolute lowcore"); 511 - abs_lowcore_mapped = true; 512 512 } 513 513 514 514 static struct resource code_resource = {
+4 -6
arch/s390/kernel/smp.c
··· 323 323 { 324 324 struct lowcore *lc, *abs_lc; 325 325 unsigned int source_cpu; 326 - unsigned long flags; 327 326 328 327 lc = lowcore_ptr[pcpu - pcpu_devices]; 329 328 source_cpu = stap(); ··· 340 341 lc->restart_data = (unsigned long)data; 341 342 lc->restart_source = source_cpu; 342 343 } else { 343 - abs_lc = get_abs_lowcore(&flags); 344 + abs_lc = get_abs_lowcore(); 344 345 abs_lc->restart_stack = stack; 345 346 abs_lc->restart_fn = (unsigned long)func; 346 347 abs_lc->restart_data = (unsigned long)data; 347 348 abs_lc->restart_source = source_cpu; 348 - put_abs_lowcore(abs_lc, flags); 349 + put_abs_lowcore(abs_lc); 349 350 } 350 351 __bpon(); 351 352 asm volatile( ··· 592 593 { 593 594 struct ec_creg_mask_parms parms = { .cr = cr, }; 594 595 struct lowcore *abs_lc; 595 - unsigned long flags; 596 596 u64 ctlreg; 597 597 598 598 if (set) { ··· 602 604 parms.andval = ~(1UL << bit); 603 605 } 604 606 spin_lock(&ctl_lock); 605 - abs_lc = get_abs_lowcore(&flags); 607 + abs_lc = get_abs_lowcore(); 606 608 ctlreg = abs_lc->cregs_save_area[cr]; 607 609 ctlreg = (ctlreg & parms.andval) | parms.orval; 608 610 abs_lc->cregs_save_area[cr] = ctlreg; 609 - put_abs_lowcore(abs_lc, flags); 611 + put_abs_lowcore(abs_lc); 610 612 spin_unlock(&ctl_lock); 611 613 on_each_cpu(smp_ctl_bit_callback, &parms, 1); 612 614 }
+2 -3
arch/s390/mm/maccess.c
··· 151 151 void *ptr = phys_to_virt(addr); 152 152 void *bounce = ptr; 153 153 struct lowcore *abs_lc; 154 - unsigned long flags; 155 154 unsigned long size; 156 155 int this_cpu, cpu; 157 156 ··· 166 167 goto out; 167 168 size = PAGE_SIZE - (addr & ~PAGE_MASK); 168 169 if (addr < sizeof(struct lowcore)) { 169 - abs_lc = get_abs_lowcore(&flags); 170 + abs_lc = get_abs_lowcore(); 170 171 ptr = (void *)abs_lc + addr; 171 172 memcpy(bounce, ptr, size); 172 - put_abs_lowcore(abs_lc, flags); 173 + put_abs_lowcore(abs_lc); 173 174 } else if (cpu == this_cpu) { 174 175 ptr = (void *)(addr - virt_to_phys(lowcore_ptr[cpu])); 175 176 memcpy(bounce, ptr, size);