Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 's390-5.7-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Vasily Gorbik:

- Update maintainers. Niklas Schnelle takes over zpci and Vineeth
Vijayan common io code.

- Extend cpuinfo to include topology information.

- Add new extended counters for IBM z15 and sampling buffer allocation
rework in perf code.

- Add control over zeroing out memory during system restart.

- CCA protected key block version 2 support and other
fixes/improvements in crypto code.

- Convert to new fallthrough; annotations.

- Replace zero-length arrays with flexible-arrays.

- QDIO debugfs and other small improvements.

- Drop 2-level paging support optimization for compat tasks. Varios mm
cleanups.

- Remove broken and unused hibernate / power management support.

- Remove fake numa support which does not bring any benefits.

- Exclude offline CPUs from CPU topology masks to be more consistent
with other architectures.

- Prevent last branching instruction address leaking to userspace.

- Other small various fixes and improvements all over the code.

* tag 's390-5.7-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (57 commits)
s390/mm: cleanup init_new_context() callback
s390/mm: cleanup virtual memory constants usage
s390/mm: remove page table downgrade support
s390/qdio: set qdio_irq->cdev at allocation time
s390/qdio: remove unused function declarations
s390/ccwgroup: remove pm support
s390/ap: remove power management code from ap bus and drivers
s390/zcrypt: use kvmalloc instead of kmalloc for 256k alloc
s390/mm: cleanup arch_get_unmapped_area() and friends
s390/ism: remove pm support
s390/cio: use fallthrough;
s390/vfio: use fallthrough;
s390/zcrypt: use fallthrough;
s390: use fallthrough;
s390/cpum_sf: Fix wrong page count in error message
s390/diag: fix display of diagnose call statistics
s390/ap: Remove ap device suspend and resume callbacks
s390/pci: Improve handling of unset UID
s390/pci: Fix zpci_alloc_domain() over allocation
s390/qdio: pass ISC as parameter to chsc_sadc()
...

+803 -2812
+2 -2
MAINTAINERS
··· 14613 14613 F: Documentation/driver-api/s390-drivers.rst 14614 14614 14615 14615 S390 COMMON I/O LAYER 14616 - M: Sebastian Ott <sebott@linux.ibm.com> 14616 + M: Vineeth Vijayan <vneethv@linux.ibm.com> 14617 14617 M: Peter Oberparleiter <oberpar@linux.ibm.com> 14618 14618 L: linux-s390@vger.kernel.org 14619 14619 W: http://www.ibm.com/developerworks/linux/linux390/ ··· 14655 14655 F: drivers/s390/net/ 14656 14656 14657 14657 S390 PCI SUBSYSTEM 14658 - M: Sebastian Ott <sebott@linux.ibm.com> 14658 + M: Niklas Schnelle <schnelle@linux.ibm.com> 14659 14659 M: Gerald Schaefer <gerald.schaefer@de.ibm.com> 14660 14660 L: linux-s390@vger.kernel.org 14661 14661 W: http://www.ibm.com/developerworks/linux/linux390/
+3 -70
arch/s390/Kconfig
··· 102 102 select ARCH_INLINE_WRITE_UNLOCK_IRQ 103 103 select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE 104 104 select ARCH_KEEP_MEMBLOCK 105 - select ARCH_SAVE_PAGE_KEYS if HIBERNATION 106 105 select ARCH_STACKWALK 107 106 select ARCH_SUPPORTS_ATOMIC_RMW 108 107 select ARCH_SUPPORTS_NUMA_BALANCING 109 108 select ARCH_USE_BUILTIN_BSWAP 110 109 select ARCH_USE_CMPXCHG_LOCKREF 111 110 select ARCH_WANTS_DYNAMIC_TASK_STRUCT 111 + select ARCH_WANT_DEFAULT_BPF_JIT 112 112 select ARCH_WANT_IPC_PARSE_VERSION 113 113 select BUILDTIME_TABLE_SORT 114 114 select CLONE_BACKWARDS2 ··· 451 451 config HOTPLUG_CPU 452 452 def_bool y 453 453 454 - # Some NUMA nodes have memory ranges that span 455 - # other nodes. Even though a pfn is valid and 456 - # between a node's start and end pfns, it may not 457 - # reside on that node. See memmap_init_zone() 458 - # for details. <- They meant memory holes! 459 - config NODES_SPAN_OTHER_NODES 460 - def_bool NUMA 461 - 462 454 config NUMA 463 455 bool "NUMA support" 464 456 depends on SCHED_TOPOLOGY ··· 460 468 461 469 This option adds NUMA support to the kernel. 462 470 463 - An operation mode can be selected by appending 464 - numa=<method> to the kernel command line. 465 - 466 - The default behaviour is identical to appending numa=plain to 467 - the command line. This will create just one node with all 468 - available memory and all CPUs in it. 469 - 470 471 config NODES_SHIFT 471 - int "Maximum NUMA nodes (as a power of 2)" 472 - range 1 10 473 - depends on NUMA 474 - default "4" 475 - help 476 - Specify the maximum number of NUMA nodes available on the target 477 - system. Increases memory reserved to accommodate various tables. 478 - 479 - menu "Select NUMA modes" 480 - depends on NUMA 481 - 482 - config NUMA_EMU 483 - bool "NUMA emulation" 484 - default y 485 - help 486 - Numa emulation mode will split the available system memory into 487 - equal chunks which then are distributed over the configured number 488 - of nodes in a round-robin manner. 489 - 490 - The number of fake nodes is limited by the number of available memory 491 - chunks (i.e. memory size / fake size) and the number of supported 492 - nodes in the kernel. 493 - 494 - The CPUs are assigned to the nodes in a way that partially respects 495 - the original machine topology (if supported by the machine). 496 - Fair distribution of the CPUs is not guaranteed. 497 - 498 - config EMU_SIZE 499 - hex "NUMA emulation memory chunk size" 500 - default 0x10000000 501 - range 0x400000 0x100000000 502 - depends on NUMA_EMU 503 - help 504 - Select the default size by which the memory is chopped and then 505 - assigned to emulated NUMA nodes. 506 - 507 - This can be overridden by specifying 508 - 509 - emu_size=<n> 510 - 511 - on the kernel command line where also suffixes K, M, G, and T are 512 - supported. 513 - 514 - endmenu 472 + int 473 + default "1" 515 474 516 475 config SCHED_SMT 517 476 def_bool n ··· 810 867 811 868 If unsure, say Y. 812 869 813 - menu "Power Management" 814 - 815 - config ARCH_HIBERNATION_POSSIBLE 816 - def_bool y 817 - 818 - source "kernel/power/Kconfig" 819 - 820 - endmenu 821 - 822 870 config CCW 823 871 def_bool y 824 872 ··· 944 1010 select TTY 945 1011 select VIRTUALIZATION 946 1012 select VIRTIO 947 - select VIRTIO_CONSOLE 948 1013 help 949 1014 Enabling this option adds support for virtio based paravirtual device 950 1015 drivers on s390.
+1 -1
arch/s390/appldata/appldata_os.c
··· 75 75 (waiting for I/O) */ 76 76 77 77 /* per cpu data */ 78 - struct appldata_os_per_cpu os_cpu[0]; 78 + struct appldata_os_per_cpu os_cpu[]; 79 79 } __attribute__((packed)); 80 80 81 81 static struct appldata_os_data *appldata_os_data;
+6 -11
arch/s390/boot/install.sh
··· 21 21 if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi 22 22 if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi 23 23 24 - # Default install - same as make zlilo 24 + echo "Warning: '${INSTALLKERNEL}' command not available - additional " \ 25 + "bootloader config required" >&2 26 + if [ -f $4/vmlinuz-$1 ]; then mv $4/vmlinuz-$1 $4/vmlinuz-$1.old; fi 27 + if [ -f $4/System.map-$1 ]; then mv $4/System.map-$1 $4/System.map-$1.old; fi 25 28 26 - if [ -f $4/vmlinuz ]; then 27 - mv $4/vmlinuz $4/vmlinuz.old 28 - fi 29 - 30 - if [ -f $4/System.map ]; then 31 - mv $4/System.map $4/System.old 32 - fi 33 - 34 - cat $2 > $4/vmlinuz 35 - cp $3 $4/System.map 29 + cat $2 > $4/vmlinuz-$1 30 + cp $3 $4/System.map-$1
+1
arch/s390/configs/debug_defconfig
··· 532 532 # CONFIG_SERIO is not set 533 533 CONFIG_LEGACY_PTY_COUNT=0 534 534 CONFIG_NULL_TTY=m 535 + CONFIG_VIRTIO_CONSOLE=y 535 536 CONFIG_HW_RANDOM_VIRTIO=m 536 537 CONFIG_RAW_DRIVER=m 537 538 CONFIG_HANGCHECK_TIMER=m
+1
arch/s390/configs/defconfig
··· 528 528 # CONFIG_SERIO is not set 529 529 CONFIG_LEGACY_PTY_COUNT=0 530 530 CONFIG_NULL_TTY=m 531 + CONFIG_VIRTIO_CONSOLE=y 531 532 CONFIG_HW_RANDOM_VIRTIO=m 532 533 CONFIG_RAW_DRIVER=m 533 534 CONFIG_HANGCHECK_TIMER=m
+3
arch/s390/crypto/aes_s390.c
··· 342 342 memcpy(walk.iv, param.iv, AES_BLOCK_SIZE); 343 343 ret = skcipher_walk_done(&walk, nbytes - n); 344 344 } 345 + memzero_explicit(&param, sizeof(param)); 345 346 return ret; 346 347 } 347 348 ··· 471 470 walk.dst.virt.addr, walk.src.virt.addr, n); 472 471 ret = skcipher_walk_done(&walk, nbytes - n); 473 472 } 473 + memzero_explicit(&pcc_param, sizeof(pcc_param)); 474 + memzero_explicit(&xts_param, sizeof(xts_param)); 474 475 return ret; 475 476 } 476 477
-1
arch/s390/include/asm/hw_irq.h
··· 7 7 8 8 void __init init_airq_interrupts(void); 9 9 void __init init_cio_interrupts(void); 10 - void __init init_ext_interrupts(void); 11 10 12 11 #endif
+1
arch/s390/include/asm/ipl.h
··· 119 119 DIAG308_LOAD_NORMAL_DUMP = 4, 120 120 DIAG308_SET = 5, 121 121 DIAG308_STORE = 6, 122 + DIAG308_LOAD_NORMAL = 7, 122 123 }; 123 124 124 125 enum diag308_rc {
+3 -1
arch/s390/include/asm/lowcore.h
··· 141 141 142 142 /* br %r1 trampoline */ 143 143 __u16 br_r1_trampoline; /* 0x0400 */ 144 - __u8 pad_0x0402[0x0e00-0x0402]; /* 0x0402 */ 144 + __u32 return_lpswe; /* 0x0402 */ 145 + __u32 return_mcck_lpswe; /* 0x0406 */ 146 + __u8 pad_0x040a[0x0e00-0x040a]; /* 0x040a */ 145 147 146 148 /* 147 149 * 0xe00 contains the address of the IPL Parameter Information
-2
arch/s390/include/asm/mmu.h
··· 34 34 unsigned int uses_cmm:1; 35 35 /* The gmaps associated with this context are allowed to use huge pages. */ 36 36 unsigned int allow_gmap_hpage_1m:1; 37 - /* The mmu context is for compat task */ 38 - unsigned int compat_mm:1; 39 37 } mm_context_t; 40 38 41 39 #define INIT_MM_CONTEXT(name) \
+23 -21
arch/s390/include/asm/mmu_context.h
··· 18 18 static inline int init_new_context(struct task_struct *tsk, 19 19 struct mm_struct *mm) 20 20 { 21 + unsigned long asce_type, init_entry; 22 + 21 23 spin_lock_init(&mm->context.lock); 22 24 INIT_LIST_HEAD(&mm->context.pgtable_list); 23 25 INIT_LIST_HEAD(&mm->context.gmap_list); ··· 28 26 atomic_set(&mm->context.is_protected, 0); 29 27 mm->context.gmap_asce = 0; 30 28 mm->context.flush_mm = 0; 31 - mm->context.compat_mm = test_thread_flag(TIF_31BIT); 32 29 #ifdef CONFIG_PGSTE 33 30 mm->context.alloc_pgste = page_table_allocate_pgste || 34 31 test_thread_flag(TIF_PGSTE) || ··· 38 37 mm->context.allow_gmap_hpage_1m = 0; 39 38 #endif 40 39 switch (mm->context.asce_limit) { 41 - case _REGION2_SIZE: 40 + default: 42 41 /* 43 - * forked 3-level task, fall through to set new asce with new 44 - * mm->pgd 42 + * context created by exec, the value of asce_limit can 43 + * only be zero in this case 45 44 */ 46 - case 0: 47 - /* context created by exec, set asce limit to 4TB */ 48 - mm->context.asce_limit = STACK_TOP_MAX; 49 - mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 50 - _ASCE_USER_BITS | _ASCE_TYPE_REGION3; 45 + VM_BUG_ON(mm->context.asce_limit); 46 + /* continue as 3-level task */ 47 + mm->context.asce_limit = _REGION2_SIZE; 48 + fallthrough; 49 + case _REGION2_SIZE: 50 + /* forked 3-level task */ 51 + init_entry = _REGION3_ENTRY_EMPTY; 52 + asce_type = _ASCE_TYPE_REGION3; 51 53 break; 52 - case -PAGE_SIZE: 53 - /* forked 5-level task, set new asce with new_mm->pgd */ 54 - mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 55 - _ASCE_USER_BITS | _ASCE_TYPE_REGION1; 54 + case TASK_SIZE_MAX: 55 + /* forked 5-level task */ 56 + init_entry = _REGION1_ENTRY_EMPTY; 57 + asce_type = _ASCE_TYPE_REGION1; 56 58 break; 57 59 case _REGION1_SIZE: 58 - /* forked 4-level task, set new asce with new mm->pgd */ 59 - mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 60 - _ASCE_USER_BITS | _ASCE_TYPE_REGION2; 60 + /* forked 4-level task */ 61 + init_entry = _REGION2_ENTRY_EMPTY; 62 + asce_type = _ASCE_TYPE_REGION2; 61 63 break; 62 - case _REGION3_SIZE: 63 - /* forked 2-level compat task, set new asce with new mm->pgd */ 64 - mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 65 - _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT; 66 64 } 67 - crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); 65 + mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 66 + _ASCE_USER_BITS | asce_type; 67 + crst_table_init((unsigned long *) mm->pgd, init_entry); 68 68 return 0; 69 69 } 70 70
+1 -12
arch/s390/include/asm/numa.h
··· 13 13 #ifdef CONFIG_NUMA 14 14 15 15 #include <linux/numa.h> 16 - #include <linux/cpumask.h> 17 16 18 17 void numa_setup(void); 19 - int numa_pfn_to_nid(unsigned long pfn); 20 - int __node_distance(int a, int b); 21 - void numa_update_cpu_topology(void); 22 - 23 - extern cpumask_t node_to_cpumask_map[MAX_NUMNODES]; 24 - extern int numa_debug_enabled; 25 18 26 19 #else 27 20 28 21 static inline void numa_setup(void) { } 29 - static inline void numa_update_cpu_topology(void) { } 30 - static inline int numa_pfn_to_nid(unsigned long pfn) 31 - { 32 - return 0; 33 - } 34 22 35 23 #endif /* CONFIG_NUMA */ 24 + 36 25 #endif /* _ASM_S390_NUMA_H */
+9 -9
arch/s390/include/asm/page.h
··· 166 166 #define __pa(x) ((unsigned long)(x)) 167 167 #define __va(x) ((void *)(unsigned long)(x)) 168 168 169 - #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) 170 - #define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT) 169 + #define phys_to_pfn(phys) ((phys) >> PAGE_SHIFT) 170 + #define pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) 171 + 172 + #define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys)) 173 + #define page_to_phys(page) pfn_to_phys(page_to_pfn(page)) 174 + 175 + #define pfn_to_virt(pfn) __va(pfn_to_phys(pfn)) 176 + #define virt_to_pfn(kaddr) (phys_to_pfn(__pa(kaddr))) 171 177 #define pfn_to_kaddr(pfn) pfn_to_virt(pfn) 172 178 173 179 #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) 174 180 #define page_to_virt(page) pfn_to_virt(page_to_pfn(page)) 175 181 176 - #define phys_to_pfn(kaddr) ((kaddr) >> PAGE_SHIFT) 177 - #define pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) 178 - 179 - #define phys_to_page(kaddr) pfn_to_page(phys_to_pfn(kaddr)) 180 - #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 181 - 182 - #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 182 + #define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr)) 183 183 184 184 #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \ 185 185 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+6
arch/s390/include/asm/pci.h
··· 5 5 #include <linux/pci.h> 6 6 #include <linux/mutex.h> 7 7 #include <linux/iommu.h> 8 + #include <linux/pci_hotplug.h> 8 9 #include <asm-generic/pci.h> 9 10 #include <asm/pci_clp.h> 10 11 #include <asm/pci_debug.h> ··· 26 25 27 26 #define ZPCI_NR_DMA_SPACES 1 28 27 #define ZPCI_NR_DEVICES CONFIG_PCI_NR_FUNCTIONS 28 + #define ZPCI_DOMAIN_BITMAP_SIZE (1 << 16) 29 29 30 30 /* PCI Function Controls */ 31 31 #define ZPCI_FC_FN_ENABLED 0x80 ··· 98 96 struct zpci_dev { 99 97 struct pci_bus *bus; 100 98 struct list_head entry; /* list of all zpci_devices, needed for hotplug, etc. */ 99 + struct hotplug_slot hotplug_slot; 101 100 102 101 enum zpci_state state; 103 102 u32 fid; /* function ID, used by sclp */ ··· 188 185 int clp_enable_fh(struct zpci_dev *, u8); 189 186 int clp_disable_fh(struct zpci_dev *); 190 187 int clp_get_state(u32 fid, enum zpci_state *state); 188 + 189 + /* UID */ 190 + void update_uid_checking(bool new); 191 191 192 192 /* IOMMU Interface */ 193 193 int zpci_init_iommu(struct zpci_dev *zdev);
+15 -26
arch/s390/include/asm/pgalloc.h
··· 34 34 memset64((u64 *)crst, entry, _CRST_ENTRIES); 35 35 } 36 36 37 - static inline unsigned long pgd_entry_type(struct mm_struct *mm) 38 - { 39 - if (mm_pmd_folded(mm)) 40 - return _SEGMENT_ENTRY_EMPTY; 41 - if (mm_pud_folded(mm)) 42 - return _REGION3_ENTRY_EMPTY; 43 - if (mm_p4d_folded(mm)) 44 - return _REGION2_ENTRY_EMPTY; 45 - return _REGION1_ENTRY_EMPTY; 46 - } 47 - 48 37 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit); 49 - void crst_table_downgrade(struct mm_struct *); 38 + 39 + static inline unsigned long check_asce_limit(struct mm_struct *mm, unsigned long addr, 40 + unsigned long len) 41 + { 42 + int rc; 43 + 44 + if (addr + len > mm->context.asce_limit && 45 + addr + len <= TASK_SIZE) { 46 + rc = crst_table_upgrade(mm, addr + len); 47 + if (rc) 48 + return (unsigned long) rc; 49 + } 50 + return addr; 51 + } 50 52 51 53 static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address) 52 54 { ··· 118 116 119 117 static inline pgd_t *pgd_alloc(struct mm_struct *mm) 120 118 { 121 - unsigned long *table = crst_table_alloc(mm); 122 - 123 - if (!table) 124 - return NULL; 125 - if (mm->context.asce_limit == _REGION3_SIZE) { 126 - /* Forking a compat process with 2 page table levels */ 127 - if (!pgtable_pmd_page_ctor(virt_to_page(table))) { 128 - crst_table_free(mm, table); 129 - return NULL; 130 - } 131 - } 132 - return (pgd_t *) table; 119 + return (pgd_t *) crst_table_alloc(mm); 133 120 } 134 121 135 122 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) 136 123 { 137 - if (mm->context.asce_limit == _REGION3_SIZE) 138 - pgtable_pmd_page_dtor(virt_to_page(pgd)); 139 124 crst_table_free(mm, (unsigned long *) pgd); 140 125 } 141 126
+5 -5
arch/s390/include/asm/processor.h
··· 92 92 */ 93 93 94 94 #define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_31BIT) ? \ 95 - (1UL << 31) : -PAGE_SIZE) 95 + _REGION3_SIZE : TASK_SIZE_MAX) 96 96 #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \ 97 - (1UL << 30) : (1UL << 41)) 97 + (_REGION3_SIZE >> 1) : (_REGION2_SIZE >> 1)) 98 98 #define TASK_SIZE TASK_SIZE_OF(current) 99 99 #define TASK_SIZE_MAX (-PAGE_SIZE) 100 100 101 101 #define STACK_TOP (test_thread_flag(TIF_31BIT) ? \ 102 - (1UL << 31) : (1UL << 42)) 103 - #define STACK_TOP_MAX (1UL << 42) 102 + _REGION3_SIZE : _REGION2_SIZE) 103 + #define STACK_TOP_MAX _REGION2_SIZE 104 104 105 105 #define HAVE_ARCH_PICK_MMAP_LAYOUT 106 106 ··· 161 161 #define INIT_THREAD { \ 162 162 .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \ 163 163 .fpu.regs = (void *) init_task.thread.fpu.fprs, \ 164 + .last_break = 1, \ 164 165 } 165 166 166 167 /* ··· 178 177 regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \ 179 178 regs->psw.addr = new_psw; \ 180 179 regs->gprs[15] = new_stackp; \ 181 - crst_table_downgrade(current->mm); \ 182 180 execve_tail(); \ 183 181 } while (0) 184 182
+7
arch/s390/include/asm/setup.h
··· 8 8 9 9 #include <linux/bits.h> 10 10 #include <uapi/asm/setup.h> 11 + #include <linux/build_bug.h> 11 12 12 13 #define EP_OFFSET 0x10008 13 14 #define EP_STRING "S390EP" ··· 161 160 static inline unsigned long kaslr_offset(void) 162 161 { 163 162 return __kaslr_offset; 163 + } 164 + 165 + static inline u32 gen_lpswe(unsigned long addr) 166 + { 167 + BUILD_BUG_ON(addr > 0xfff); 168 + return 0xb2b20000 | addr; 164 169 } 165 170 166 171 #else /* __ASSEMBLY__ */
+1
arch/s390/include/asm/smp.h
··· 34 34 extern void smp_yield_cpu(int cpu); 35 35 extern void smp_cpu_set_polarization(int cpu, int val); 36 36 extern int smp_cpu_get_polarization(int cpu); 37 + extern int smp_cpu_get_cpu_address(int cpu); 37 38 extern void smp_fill_possible_mask(void); 38 39 extern void smp_detect_cpus(void); 39 40
+11 -4
arch/s390/include/asm/topology.h
··· 16 16 unsigned short socket_id; 17 17 unsigned short book_id; 18 18 unsigned short drawer_id; 19 - unsigned short node_id; 20 19 unsigned short dedicated : 1; 20 + int booted_cores; 21 21 cpumask_t thread_mask; 22 22 cpumask_t core_mask; 23 23 cpumask_t book_mask; ··· 25 25 }; 26 26 27 27 extern struct cpu_topology_s390 cpu_topology[NR_CPUS]; 28 - extern cpumask_t cpus_with_topology; 29 28 30 29 #define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id) 31 30 #define topology_thread_id(cpu) (cpu_topology[cpu].thread_id) ··· 36 37 #define topology_drawer_id(cpu) (cpu_topology[cpu].drawer_id) 37 38 #define topology_drawer_cpumask(cpu) (&cpu_topology[cpu].drawer_mask) 38 39 #define topology_cpu_dedicated(cpu) (cpu_topology[cpu].dedicated) 40 + #define topology_booted_cores(cpu) (cpu_topology[cpu].booted_cores) 39 41 40 42 #define mc_capable() 1 41 43 ··· 45 45 int topology_set_cpu_management(int fc); 46 46 void topology_schedule_update(void); 47 47 void store_topology(struct sysinfo_15_1_x *info); 48 + void update_cpu_masks(void); 48 49 void topology_expect_change(void); 49 50 const struct cpumask *cpu_coregroup_mask(int cpu); 50 51 ··· 55 54 static inline void topology_schedule_update(void) { } 56 55 static inline int topology_cpu_init(struct cpu *cpu) { return 0; } 57 56 static inline int topology_cpu_dedicated(int cpu_nr) { return 0; } 57 + static inline int topology_booted_cores(int cpu_nr) { return 1; } 58 + static inline void update_cpu_masks(void) { } 58 59 static inline void topology_expect_change(void) { } 59 60 60 61 #endif /* CONFIG_SCHED_TOPOLOGY */ ··· 74 71 #define cpu_to_node cpu_to_node 75 72 static inline int cpu_to_node(int cpu) 76 73 { 77 - return cpu_topology[cpu].node_id; 74 + return 0; 78 75 } 79 76 80 77 /* Returns a pointer to the cpumask of CPUs on node 'node'. */ 81 78 #define cpumask_of_node cpumask_of_node 82 79 static inline const struct cpumask *cpumask_of_node(int node) 83 80 { 84 - return &node_to_cpumask_map[node]; 81 + return cpu_possible_mask; 85 82 } 86 83 87 84 #define pcibus_to_node(bus) __pcibus_to_node(bus) 88 85 89 86 #define node_distance(a, b) __node_distance(a, b) 87 + static inline int __node_distance(int a, int b) 88 + { 89 + return 0; 90 + } 90 91 91 92 #else /* !CONFIG_NUMA */ 92 93
-1
arch/s390/kernel/Makefile
··· 54 54 55 55 obj-$(CONFIG_MODULES) += module.o 56 56 obj-$(CONFIG_SCHED_TOPOLOGY) += topology.o 57 - obj-$(CONFIG_HIBERNATION) += suspend.o swsusp.o 58 57 obj-$(CONFIG_AUDIT) += audit.o 59 58 compat-obj-$(CONFIG_AUDIT) += compat_audit.o 60 59 obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o
+2
arch/s390/kernel/asm-offsets.c
··· 124 124 OFFSET(__LC_EXT_DAMAGE_CODE, lowcore, external_damage_code); 125 125 OFFSET(__LC_MCCK_FAIL_STOR_ADDR, lowcore, failing_storage_address); 126 126 OFFSET(__LC_LAST_BREAK, lowcore, breaking_event_addr); 127 + OFFSET(__LC_RETURN_LPSWE, lowcore, return_lpswe); 128 + OFFSET(__LC_RETURN_MCCK_LPSWE, lowcore, return_mcck_lpswe); 127 129 OFFSET(__LC_RST_OLD_PSW, lowcore, restart_old_psw); 128 130 OFFSET(__LC_EXT_OLD_PSW, lowcore, external_old_psw); 129 131 OFFSET(__LC_SVC_OLD_PSW, lowcore, svc_old_psw);
+1 -1
arch/s390/kernel/diag.c
··· 84 84 85 85 static void *show_diag_stat_start(struct seq_file *m, loff_t *pos) 86 86 { 87 - return *pos <= nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL; 87 + return *pos <= NR_DIAG_STAT ? (void *)((unsigned long) *pos + 1) : NULL; 88 88 } 89 89 90 90 static void *show_diag_stat_next(struct seq_file *m, void *v, loff_t *pos)
+39 -26
arch/s390/kernel/entry.S
··· 115 115 116 116 .macro SWITCH_ASYNC savearea,timer 117 117 tmhh %r8,0x0001 # interrupting from user ? 118 - jnz 1f 118 + jnz 2f 119 119 lgr %r14,%r9 120 + cghi %r14,__LC_RETURN_LPSWE 121 + je 0f 120 122 slg %r14,BASED(.Lcritical_start) 121 123 clg %r14,BASED(.Lcritical_length) 122 - jhe 0f 124 + jhe 1f 125 + 0: 123 126 lghi %r11,\savearea # inside critical section, do cleanup 124 127 brasl %r14,cleanup_critical 125 128 tmhh %r8,0x0001 # retest problem state after cleanup 126 - jnz 1f 127 - 0: lg %r14,__LC_ASYNC_STACK # are we already on the target stack? 129 + jnz 2f 130 + 1: lg %r14,__LC_ASYNC_STACK # are we already on the target stack? 128 131 slgr %r14,%r15 129 132 srag %r14,%r14,STACK_SHIFT 130 - jnz 2f 133 + jnz 3f 131 134 CHECK_STACK \savearea 132 135 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 133 - j 3f 134 - 1: UPDATE_VTIME %r14,%r15,\timer 136 + j 4f 137 + 2: UPDATE_VTIME %r14,%r15,\timer 135 138 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 136 - 2: lg %r15,__LC_ASYNC_STACK # load async stack 137 - 3: la %r11,STACK_FRAME_OVERHEAD(%r15) 139 + 3: lg %r15,__LC_ASYNC_STACK # load async stack 140 + 4: la %r11,STACK_FRAME_OVERHEAD(%r15) 138 141 .endm 139 142 140 143 .macro UPDATE_VTIME w1,w2,enter_timer ··· 404 401 stpt __LC_EXIT_TIMER 405 402 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 406 403 lmg %r11,%r15,__PT_R11(%r11) 407 - lpswe __LC_RETURN_PSW 404 + b __LC_RETURN_LPSWE(%r0) 408 405 .Lsysc_done: 409 406 410 407 # ··· 611 608 BPOFF 612 609 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 613 610 lg %r10,__LC_LAST_BREAK 614 - lg %r12,__LC_CURRENT 611 + srag %r11,%r10,12 612 + jnz 0f 613 + /* if __LC_LAST_BREAK is < 4096, it contains one of 614 + * the lpswe addresses in lowcore. Set it to 1 (initial state) 615 + * to prevent leaking that address to userspace. 616 + */ 617 + lghi %r10,1 618 + 0: lg %r12,__LC_CURRENT 615 619 lghi %r11,0 616 620 larl %r13,cleanup_critical 617 621 lmg %r8,%r9,__LC_PGM_OLD_PSW 618 622 tmhh %r8,0x0001 # test problem state bit 619 - jnz 2f # -> fault in user space 623 + jnz 3f # -> fault in user space 620 624 #if IS_ENABLED(CONFIG_KVM) 621 625 # cleanup critical section for program checks in sie64a 622 626 lgr %r14,%r9 623 627 slg %r14,BASED(.Lsie_critical_start) 624 628 clg %r14,BASED(.Lsie_critical_length) 625 - jhe 0f 629 + jhe 1f 626 630 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer 627 631 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE 628 632 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 629 633 larl %r9,sie_exit # skip forward to sie_exit 630 634 lghi %r11,_PIF_GUEST_FAULT 631 635 #endif 632 - 0: tmhh %r8,0x4000 # PER bit set in old PSW ? 633 - jnz 1f # -> enabled, can't be a double fault 636 + 1: tmhh %r8,0x4000 # PER bit set in old PSW ? 637 + jnz 2f # -> enabled, can't be a double fault 634 638 tm __LC_PGM_ILC+3,0x80 # check for per exception 635 639 jnz .Lpgm_svcper # -> single stepped svc 636 - 1: CHECK_STACK __LC_SAVE_AREA_SYNC 640 + 2: CHECK_STACK __LC_SAVE_AREA_SYNC 637 641 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 638 - # CHECK_VMAP_STACK branches to stack_overflow or 4f 639 - CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f 640 - 2: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER 642 + # CHECK_VMAP_STACK branches to stack_overflow or 5f 643 + CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,5f 644 + 3: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER 641 645 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 642 646 lg %r15,__LC_KERNEL_STACK 643 647 lgr %r14,%r12 644 648 aghi %r14,__TASK_thread # pointer to thread_struct 645 649 lghi %r13,__LC_PGM_TDB 646 650 tm __LC_PGM_ILC+2,0x02 # check for transaction abort 647 - jz 3f 651 + jz 4f 648 652 mvc __THREAD_trap_tdb(256,%r14),0(%r13) 649 - 3: stg %r10,__THREAD_last_break(%r14) 650 - 4: lgr %r13,%r11 653 + 4: stg %r10,__THREAD_last_break(%r14) 654 + 5: lgr %r13,%r11 651 655 la %r11,STACK_FRAME_OVERHEAD(%r15) 652 656 stmg %r0,%r7,__PT_R0(%r11) 653 657 # clear user controlled registers to prevent speculative use ··· 673 663 stg %r13,__PT_FLAGS(%r11) 674 664 stg %r10,__PT_ARGS(%r11) 675 665 tm __LC_PGM_ILC+3,0x80 # check for per exception 676 - jz 5f 666 + jz 6f 677 667 tmhh %r8,0x0001 # kernel per event ? 678 668 jz .Lpgm_kprobe 679 669 oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP 680 670 mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS 681 671 mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE 682 672 mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID 683 - 5: REENABLE_IRQS 673 + 6: REENABLE_IRQS 684 674 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 685 675 larl %r1,pgm_check_table 686 676 llgh %r10,__PT_INT_CODE+2(%r11) ··· 785 775 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 786 776 .Lio_exit_kernel: 787 777 lmg %r11,%r15,__PT_R11(%r11) 788 - lpswe __LC_RETURN_PSW 778 + b __LC_RETURN_LPSWE(%r0) 789 779 .Lio_done: 790 780 791 781 # ··· 1224 1214 stpt __LC_EXIT_TIMER 1225 1215 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 1226 1216 0: lmg %r11,%r15,__PT_R11(%r11) 1227 - lpswe __LC_RETURN_MCCK_PSW 1217 + b __LC_RETURN_MCCK_LPSWE 1228 1218 1229 1219 .Lmcck_panic: 1230 1220 lg %r15,__LC_NODAT_STACK ··· 1281 1271 #endif 1282 1272 1283 1273 ENTRY(cleanup_critical) 1274 + cghi %r9,__LC_RETURN_LPSWE 1275 + je .Lcleanup_lpswe 1284 1276 #if IS_ENABLED(CONFIG_KVM) 1285 1277 clg %r9,BASED(.Lcleanup_table_sie) # .Lsie_gmap 1286 1278 jl 0f ··· 1436 1424 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) 1437 1425 mvc 0(64,%r11),__PT_R8(%r9) 1438 1426 lmg %r0,%r7,__PT_R0(%r9) 1427 + .Lcleanup_lpswe: 1439 1428 1: lmg %r8,%r9,__LC_RETURN_PSW 1440 1429 BR_EX %r14,%r11 1441 1430 .Lcleanup_sysc_restore_insn:
+66 -7
arch/s390/kernel/ipl.c
··· 144 144 145 145 static struct sclp_ipl_info sclp_ipl_info; 146 146 147 + static bool reipl_fcp_clear; 148 + static bool reipl_ccw_clear; 149 + 147 150 static inline int __diag308(unsigned long subcode, void *addr) 148 151 { 149 152 register unsigned long _addr asm("0") = (unsigned long) addr; ··· 694 691 __ATTR(loadparm, S_IRUGO | S_IWUSR, reipl_fcp_loadparm_show, 695 692 reipl_fcp_loadparm_store); 696 693 694 + static ssize_t reipl_fcp_clear_show(struct kobject *kobj, 695 + struct kobj_attribute *attr, char *page) 696 + { 697 + return sprintf(page, "%u\n", reipl_fcp_clear); 698 + } 699 + 700 + static ssize_t reipl_fcp_clear_store(struct kobject *kobj, 701 + struct kobj_attribute *attr, 702 + const char *buf, size_t len) 703 + { 704 + if (strtobool(buf, &reipl_fcp_clear) < 0) 705 + return -EINVAL; 706 + return len; 707 + } 708 + 697 709 static struct attribute *reipl_fcp_attrs[] = { 698 710 &sys_reipl_fcp_device_attr.attr, 699 711 &sys_reipl_fcp_wwpn_attr.attr, ··· 723 705 .attrs = reipl_fcp_attrs, 724 706 .bin_attrs = reipl_fcp_bin_attrs, 725 707 }; 708 + 709 + static struct kobj_attribute sys_reipl_fcp_clear_attr = 710 + __ATTR(clear, 0644, reipl_fcp_clear_show, reipl_fcp_clear_store); 726 711 727 712 /* CCW reipl device attributes */ 728 713 DEFINE_IPL_CCW_ATTR_RW(reipl_ccw, device, reipl_block_ccw->ccw); ··· 762 741 __ATTR(loadparm, S_IRUGO | S_IWUSR, reipl_ccw_loadparm_show, 763 742 reipl_ccw_loadparm_store); 764 743 744 + static ssize_t reipl_ccw_clear_show(struct kobject *kobj, 745 + struct kobj_attribute *attr, char *page) 746 + { 747 + return sprintf(page, "%u\n", reipl_ccw_clear); 748 + } 749 + 750 + static ssize_t reipl_ccw_clear_store(struct kobject *kobj, 751 + struct kobj_attribute *attr, 752 + const char *buf, size_t len) 753 + { 754 + if (strtobool(buf, &reipl_ccw_clear) < 0) 755 + return -EINVAL; 756 + return len; 757 + } 758 + 759 + static struct kobj_attribute sys_reipl_ccw_clear_attr = 760 + __ATTR(clear, 0644, reipl_ccw_clear_show, reipl_ccw_clear_store); 761 + 765 762 static struct attribute *reipl_ccw_attrs_vm[] = { 766 763 &sys_reipl_ccw_device_attr.attr, 767 764 &sys_reipl_ccw_loadparm_attr.attr, 768 765 &sys_reipl_ccw_vmparm_attr.attr, 766 + &sys_reipl_ccw_clear_attr.attr, 769 767 NULL, 770 768 }; 771 769 772 770 static struct attribute *reipl_ccw_attrs_lpar[] = { 773 771 &sys_reipl_ccw_device_attr.attr, 774 772 &sys_reipl_ccw_loadparm_attr.attr, 773 + &sys_reipl_ccw_clear_attr.attr, 775 774 NULL, 776 775 }; 777 776 ··· 933 892 switch (reipl_type) { 934 893 case IPL_TYPE_CCW: 935 894 diag308(DIAG308_SET, reipl_block_ccw); 936 - diag308(DIAG308_LOAD_CLEAR, NULL); 895 + if (reipl_ccw_clear) 896 + diag308(DIAG308_LOAD_CLEAR, NULL); 897 + else 898 + diag308(DIAG308_LOAD_NORMAL_DUMP, NULL); 937 899 break; 938 900 case IPL_TYPE_FCP: 939 901 diag308(DIAG308_SET, reipl_block_fcp); 940 - diag308(DIAG308_LOAD_CLEAR, NULL); 902 + if (reipl_fcp_clear) 903 + diag308(DIAG308_LOAD_CLEAR, NULL); 904 + else 905 + diag308(DIAG308_LOAD_NORMAL, NULL); 941 906 break; 942 907 case IPL_TYPE_NSS: 943 908 diag308(DIAG308_SET, reipl_block_nss); ··· 1055 1008 } 1056 1009 1057 1010 rc = sysfs_create_group(&reipl_fcp_kset->kobj, &reipl_fcp_attr_group); 1058 - if (rc) { 1059 - kset_unregister(reipl_fcp_kset); 1060 - free_page((unsigned long) reipl_block_fcp); 1061 - return rc; 1062 - } 1011 + if (rc) 1012 + goto out1; 1013 + 1014 + if (test_facility(141)) { 1015 + rc = sysfs_create_file(&reipl_fcp_kset->kobj, 1016 + &sys_reipl_fcp_clear_attr.attr); 1017 + if (rc) 1018 + goto out2; 1019 + } else 1020 + reipl_fcp_clear = true; 1063 1021 1064 1022 if (ipl_info.type == IPL_TYPE_FCP) { 1065 1023 memcpy(reipl_block_fcp, &ipl_block, sizeof(ipl_block)); ··· 1084 1032 } 1085 1033 reipl_capabilities |= IPL_TYPE_FCP; 1086 1034 return 0; 1035 + 1036 + out2: 1037 + sysfs_remove_group(&reipl_fcp_kset->kobj, &reipl_fcp_attr_group); 1038 + out1: 1039 + kset_unregister(reipl_fcp_kset); 1040 + free_page((unsigned long) reipl_block_fcp); 1041 + return rc; 1087 1042 } 1088 1043 1089 1044 static int __init reipl_type_init(void)
+11 -15
arch/s390/kernel/irq.c
··· 95 95 {.irq = CPU_RST, .name = "RST", .desc = "[CPU] CPU Restart"}, 96 96 }; 97 97 98 - void __init init_IRQ(void) 99 - { 100 - BUILD_BUG_ON(ARRAY_SIZE(irqclass_sub_desc) != NR_ARCH_IRQS); 101 - init_cio_interrupts(); 102 - init_airq_interrupts(); 103 - init_ext_interrupts(); 104 - } 105 - 106 98 void do_IRQ(struct pt_regs *regs, int irq) 107 99 { 108 100 struct pt_regs *old_regs; ··· 286 294 return IRQ_HANDLED; 287 295 } 288 296 289 - static struct irqaction external_interrupt = { 290 - .name = "EXT", 291 - .handler = do_ext_interrupt, 292 - }; 293 - 294 - void __init init_ext_interrupts(void) 297 + static void __init init_ext_interrupts(void) 295 298 { 296 299 int idx; 297 300 ··· 295 308 296 309 irq_set_chip_and_handler(EXT_INTERRUPT, 297 310 &dummy_irq_chip, handle_percpu_irq); 298 - setup_irq(EXT_INTERRUPT, &external_interrupt); 311 + if (request_irq(EXT_INTERRUPT, do_ext_interrupt, 0, "EXT", NULL)) 312 + panic("Failed to register EXT interrupt\n"); 313 + } 314 + 315 + void __init init_IRQ(void) 316 + { 317 + BUILD_BUG_ON(ARRAY_SIZE(irqclass_sub_desc) != NR_ARCH_IRQS); 318 + init_cio_interrupts(); 319 + init_airq_interrupts(); 320 + init_ext_interrupts(); 299 321 } 300 322 301 323 static DEFINE_SPINLOCK(irq_subclass_lock);
-31
arch/s390/kernel/machine_kexec.c
··· 14 14 #include <linux/reboot.h> 15 15 #include <linux/ftrace.h> 16 16 #include <linux/debug_locks.h> 17 - #include <linux/suspend.h> 18 17 #include <asm/cio.h> 19 18 #include <asm/setup.h> 20 19 #include <asm/pgtable.h> ··· 36 37 extern const unsigned long long relocate_kernel_len; 37 38 38 39 #ifdef CONFIG_CRASH_DUMP 39 - 40 - /* 41 - * PM notifier callback for kdump 42 - */ 43 - static int machine_kdump_pm_cb(struct notifier_block *nb, unsigned long action, 44 - void *ptr) 45 - { 46 - switch (action) { 47 - case PM_SUSPEND_PREPARE: 48 - case PM_HIBERNATION_PREPARE: 49 - if (kexec_crash_image) 50 - arch_kexec_unprotect_crashkres(); 51 - break; 52 - case PM_POST_SUSPEND: 53 - case PM_POST_HIBERNATION: 54 - if (kexec_crash_image) 55 - arch_kexec_protect_crashkres(); 56 - break; 57 - default: 58 - return NOTIFY_DONE; 59 - } 60 - return NOTIFY_OK; 61 - } 62 - 63 - static int __init machine_kdump_pm_init(void) 64 - { 65 - pm_notifier(machine_kdump_pm_cb, 0); 66 - return 0; 67 - } 68 - arch_initcall(machine_kdump_pm_init); 69 40 70 41 /* 71 42 * Reset the system, copy boot CPU registers to absolute zero,
+122 -1
arch/s390/kernel/perf_cpum_cf_events.c
··· 238 238 CPUMF_EVENT_ATTR(cf_z14, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0); 239 239 CPUMF_EVENT_ATTR(cf_z14, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1); 240 240 241 + CPUMF_EVENT_ATTR(cf_z15, L1D_RO_EXCL_WRITES, 0x0080); 242 + CPUMF_EVENT_ATTR(cf_z15, DTLB2_WRITES, 0x0081); 243 + CPUMF_EVENT_ATTR(cf_z15, DTLB2_MISSES, 0x0082); 244 + CPUMF_EVENT_ATTR(cf_z15, DTLB2_HPAGE_WRITES, 0x0083); 245 + CPUMF_EVENT_ATTR(cf_z15, DTLB2_GPAGE_WRITES, 0x0084); 246 + CPUMF_EVENT_ATTR(cf_z15, L1D_L2D_SOURCED_WRITES, 0x0085); 247 + CPUMF_EVENT_ATTR(cf_z15, ITLB2_WRITES, 0x0086); 248 + CPUMF_EVENT_ATTR(cf_z15, ITLB2_MISSES, 0x0087); 249 + CPUMF_EVENT_ATTR(cf_z15, L1I_L2I_SOURCED_WRITES, 0x0088); 250 + CPUMF_EVENT_ATTR(cf_z15, TLB2_PTE_WRITES, 0x0089); 251 + CPUMF_EVENT_ATTR(cf_z15, TLB2_CRSTE_WRITES, 0x008a); 252 + CPUMF_EVENT_ATTR(cf_z15, TLB2_ENGINES_BUSY, 0x008b); 253 + CPUMF_EVENT_ATTR(cf_z15, TX_C_TEND, 0x008c); 254 + CPUMF_EVENT_ATTR(cf_z15, TX_NC_TEND, 0x008d); 255 + CPUMF_EVENT_ATTR(cf_z15, L1C_TLB2_MISSES, 0x008f); 256 + CPUMF_EVENT_ATTR(cf_z15, L1D_ONCHIP_L3_SOURCED_WRITES, 0x0090); 257 + CPUMF_EVENT_ATTR(cf_z15, L1D_ONCHIP_MEMORY_SOURCED_WRITES, 0x0091); 258 + CPUMF_EVENT_ATTR(cf_z15, L1D_ONCHIP_L3_SOURCED_WRITES_IV, 0x0092); 259 + CPUMF_EVENT_ATTR(cf_z15, L1D_ONCLUSTER_L3_SOURCED_WRITES, 0x0093); 260 + CPUMF_EVENT_ATTR(cf_z15, L1D_ONCLUSTER_MEMORY_SOURCED_WRITES, 0x0094); 261 + CPUMF_EVENT_ATTR(cf_z15, L1D_ONCLUSTER_L3_SOURCED_WRITES_IV, 0x0095); 262 + CPUMF_EVENT_ATTR(cf_z15, L1D_OFFCLUSTER_L3_SOURCED_WRITES, 0x0096); 263 + CPUMF_EVENT_ATTR(cf_z15, L1D_OFFCLUSTER_MEMORY_SOURCED_WRITES, 0x0097); 264 + CPUMF_EVENT_ATTR(cf_z15, L1D_OFFCLUSTER_L3_SOURCED_WRITES_IV, 0x0098); 265 + CPUMF_EVENT_ATTR(cf_z15, L1D_OFFDRAWER_L3_SOURCED_WRITES, 0x0099); 266 + CPUMF_EVENT_ATTR(cf_z15, L1D_OFFDRAWER_MEMORY_SOURCED_WRITES, 0x009a); 267 + CPUMF_EVENT_ATTR(cf_z15, L1D_OFFDRAWER_L3_SOURCED_WRITES_IV, 0x009b); 268 + CPUMF_EVENT_ATTR(cf_z15, L1D_ONDRAWER_L4_SOURCED_WRITES, 0x009c); 269 + CPUMF_EVENT_ATTR(cf_z15, L1D_OFFDRAWER_L4_SOURCED_WRITES, 0x009d); 270 + CPUMF_EVENT_ATTR(cf_z15, L1D_ONCHIP_L3_SOURCED_WRITES_RO, 0x009e); 271 + CPUMF_EVENT_ATTR(cf_z15, L1I_ONCHIP_L3_SOURCED_WRITES, 0x00a2); 272 + CPUMF_EVENT_ATTR(cf_z15, L1I_ONCHIP_MEMORY_SOURCED_WRITES, 0x00a3); 273 + CPUMF_EVENT_ATTR(cf_z15, L1I_ONCHIP_L3_SOURCED_WRITES_IV, 0x00a4); 274 + CPUMF_EVENT_ATTR(cf_z15, L1I_ONCLUSTER_L3_SOURCED_WRITES, 0x00a5); 275 + CPUMF_EVENT_ATTR(cf_z15, L1I_ONCLUSTER_MEMORY_SOURCED_WRITES, 0x00a6); 276 + CPUMF_EVENT_ATTR(cf_z15, L1I_ONCLUSTER_L3_SOURCED_WRITES_IV, 0x00a7); 277 + CPUMF_EVENT_ATTR(cf_z15, L1I_OFFCLUSTER_L3_SOURCED_WRITES, 0x00a8); 278 + CPUMF_EVENT_ATTR(cf_z15, L1I_OFFCLUSTER_MEMORY_SOURCED_WRITES, 0x00a9); 279 + CPUMF_EVENT_ATTR(cf_z15, L1I_OFFCLUSTER_L3_SOURCED_WRITES_IV, 0x00aa); 280 + CPUMF_EVENT_ATTR(cf_z15, L1I_OFFDRAWER_L3_SOURCED_WRITES, 0x00ab); 281 + CPUMF_EVENT_ATTR(cf_z15, L1I_OFFDRAWER_MEMORY_SOURCED_WRITES, 0x00ac); 282 + CPUMF_EVENT_ATTR(cf_z15, L1I_OFFDRAWER_L3_SOURCED_WRITES_IV, 0x00ad); 283 + CPUMF_EVENT_ATTR(cf_z15, L1I_ONDRAWER_L4_SOURCED_WRITES, 0x00ae); 284 + CPUMF_EVENT_ATTR(cf_z15, L1I_OFFDRAWER_L4_SOURCED_WRITES, 0x00af); 285 + CPUMF_EVENT_ATTR(cf_z15, BCD_DFP_EXECUTION_SLOTS, 0x00e0); 286 + CPUMF_EVENT_ATTR(cf_z15, VX_BCD_EXECUTION_SLOTS, 0x00e1); 287 + CPUMF_EVENT_ATTR(cf_z15, DECIMAL_INSTRUCTIONS, 0x00e2); 288 + CPUMF_EVENT_ATTR(cf_z15, LAST_HOST_TRANSLATIONS, 0x00e8); 289 + CPUMF_EVENT_ATTR(cf_z15, TX_NC_TABORT, 0x00f3); 290 + CPUMF_EVENT_ATTR(cf_z15, TX_C_TABORT_NO_SPECIAL, 0x00f4); 291 + CPUMF_EVENT_ATTR(cf_z15, TX_C_TABORT_SPECIAL, 0x00f5); 292 + CPUMF_EVENT_ATTR(cf_z15, DFLT_ACCESS, 0x00f7); 293 + CPUMF_EVENT_ATTR(cf_z15, DFLT_CYCLES, 0x00fc); 294 + CPUMF_EVENT_ATTR(cf_z15, DFLT_CC, 0x00108); 295 + CPUMF_EVENT_ATTR(cf_z15, DFLT_CCERROR, 0x00109); 296 + CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0); 297 + CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1); 298 + 241 299 static struct attribute *cpumcf_fvn1_pmu_event_attr[] __initdata = { 242 300 CPUMF_EVENT_PTR(cf_fvn1, CPU_CYCLES), 243 301 CPUMF_EVENT_PTR(cf_fvn1, INSTRUCTIONS), ··· 574 516 NULL, 575 517 }; 576 518 519 + static struct attribute *cpumcf_z15_pmu_event_attr[] __initdata = { 520 + CPUMF_EVENT_PTR(cf_z15, L1D_RO_EXCL_WRITES), 521 + CPUMF_EVENT_PTR(cf_z15, DTLB2_WRITES), 522 + CPUMF_EVENT_PTR(cf_z15, DTLB2_MISSES), 523 + CPUMF_EVENT_PTR(cf_z15, DTLB2_HPAGE_WRITES), 524 + CPUMF_EVENT_PTR(cf_z15, DTLB2_GPAGE_WRITES), 525 + CPUMF_EVENT_PTR(cf_z15, L1D_L2D_SOURCED_WRITES), 526 + CPUMF_EVENT_PTR(cf_z15, ITLB2_WRITES), 527 + CPUMF_EVENT_PTR(cf_z15, ITLB2_MISSES), 528 + CPUMF_EVENT_PTR(cf_z15, L1I_L2I_SOURCED_WRITES), 529 + CPUMF_EVENT_PTR(cf_z15, TLB2_PTE_WRITES), 530 + CPUMF_EVENT_PTR(cf_z15, TLB2_CRSTE_WRITES), 531 + CPUMF_EVENT_PTR(cf_z15, TLB2_ENGINES_BUSY), 532 + CPUMF_EVENT_PTR(cf_z15, TX_C_TEND), 533 + CPUMF_EVENT_PTR(cf_z15, TX_NC_TEND), 534 + CPUMF_EVENT_PTR(cf_z15, L1C_TLB2_MISSES), 535 + CPUMF_EVENT_PTR(cf_z15, L1D_ONCHIP_L3_SOURCED_WRITES), 536 + CPUMF_EVENT_PTR(cf_z15, L1D_ONCHIP_MEMORY_SOURCED_WRITES), 537 + CPUMF_EVENT_PTR(cf_z15, L1D_ONCHIP_L3_SOURCED_WRITES_IV), 538 + CPUMF_EVENT_PTR(cf_z15, L1D_ONCLUSTER_L3_SOURCED_WRITES), 539 + CPUMF_EVENT_PTR(cf_z15, L1D_ONCLUSTER_MEMORY_SOURCED_WRITES), 540 + CPUMF_EVENT_PTR(cf_z15, L1D_ONCLUSTER_L3_SOURCED_WRITES_IV), 541 + CPUMF_EVENT_PTR(cf_z15, L1D_OFFCLUSTER_L3_SOURCED_WRITES), 542 + CPUMF_EVENT_PTR(cf_z15, L1D_OFFCLUSTER_MEMORY_SOURCED_WRITES), 543 + CPUMF_EVENT_PTR(cf_z15, L1D_OFFCLUSTER_L3_SOURCED_WRITES_IV), 544 + CPUMF_EVENT_PTR(cf_z15, L1D_OFFDRAWER_L3_SOURCED_WRITES), 545 + CPUMF_EVENT_PTR(cf_z15, L1D_OFFDRAWER_MEMORY_SOURCED_WRITES), 546 + CPUMF_EVENT_PTR(cf_z15, L1D_OFFDRAWER_L3_SOURCED_WRITES_IV), 547 + CPUMF_EVENT_PTR(cf_z15, L1D_ONDRAWER_L4_SOURCED_WRITES), 548 + CPUMF_EVENT_PTR(cf_z15, L1D_OFFDRAWER_L4_SOURCED_WRITES), 549 + CPUMF_EVENT_PTR(cf_z15, L1D_ONCHIP_L3_SOURCED_WRITES_RO), 550 + CPUMF_EVENT_PTR(cf_z15, L1I_ONCHIP_L3_SOURCED_WRITES), 551 + CPUMF_EVENT_PTR(cf_z15, L1I_ONCHIP_MEMORY_SOURCED_WRITES), 552 + CPUMF_EVENT_PTR(cf_z15, L1I_ONCHIP_L3_SOURCED_WRITES_IV), 553 + CPUMF_EVENT_PTR(cf_z15, L1I_ONCLUSTER_L3_SOURCED_WRITES), 554 + CPUMF_EVENT_PTR(cf_z15, L1I_ONCLUSTER_MEMORY_SOURCED_WRITES), 555 + CPUMF_EVENT_PTR(cf_z15, L1I_ONCLUSTER_L3_SOURCED_WRITES_IV), 556 + CPUMF_EVENT_PTR(cf_z15, L1I_OFFCLUSTER_L3_SOURCED_WRITES), 557 + CPUMF_EVENT_PTR(cf_z15, L1I_OFFCLUSTER_MEMORY_SOURCED_WRITES), 558 + CPUMF_EVENT_PTR(cf_z15, L1I_OFFCLUSTER_L3_SOURCED_WRITES_IV), 559 + CPUMF_EVENT_PTR(cf_z15, L1I_OFFDRAWER_L3_SOURCED_WRITES), 560 + CPUMF_EVENT_PTR(cf_z15, L1I_OFFDRAWER_MEMORY_SOURCED_WRITES), 561 + CPUMF_EVENT_PTR(cf_z15, L1I_OFFDRAWER_L3_SOURCED_WRITES_IV), 562 + CPUMF_EVENT_PTR(cf_z15, L1I_ONDRAWER_L4_SOURCED_WRITES), 563 + CPUMF_EVENT_PTR(cf_z15, L1I_OFFDRAWER_L4_SOURCED_WRITES), 564 + CPUMF_EVENT_PTR(cf_z15, BCD_DFP_EXECUTION_SLOTS), 565 + CPUMF_EVENT_PTR(cf_z15, VX_BCD_EXECUTION_SLOTS), 566 + CPUMF_EVENT_PTR(cf_z15, DECIMAL_INSTRUCTIONS), 567 + CPUMF_EVENT_PTR(cf_z15, LAST_HOST_TRANSLATIONS), 568 + CPUMF_EVENT_PTR(cf_z15, TX_NC_TABORT), 569 + CPUMF_EVENT_PTR(cf_z15, TX_C_TABORT_NO_SPECIAL), 570 + CPUMF_EVENT_PTR(cf_z15, TX_C_TABORT_SPECIAL), 571 + CPUMF_EVENT_PTR(cf_z15, DFLT_ACCESS), 572 + CPUMF_EVENT_PTR(cf_z15, DFLT_CYCLES), 573 + CPUMF_EVENT_PTR(cf_z15, DFLT_CC), 574 + CPUMF_EVENT_PTR(cf_z15, DFLT_CCERROR), 575 + CPUMF_EVENT_PTR(cf_z15, MT_DIAG_CYCLES_ONE_THR_ACTIVE), 576 + CPUMF_EVENT_PTR(cf_z15, MT_DIAG_CYCLES_TWO_THR_ACTIVE), 577 + NULL, 578 + }; 579 + 577 580 /* END: CPUM_CF COUNTER DEFINITIONS ===================================== */ 578 581 579 582 static struct attribute_group cpumcf_pmu_events_group = { ··· 743 624 break; 744 625 case 0x3906: 745 626 case 0x3907: 627 + model = cpumcf_z14_pmu_event_attr; 628 + break; 746 629 case 0x8561: 747 630 case 0x8562: 748 - model = cpumcf_z14_pmu_event_attr; 631 + model = cpumcf_z15_pmu_event_attr; 749 632 break; 750 633 default: 751 634 model = none;
+24 -20
arch/s390/kernel/perf_cpum_sf.c
··· 372 372 373 373 static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc) 374 374 { 375 - unsigned long n_sdb, freq, factor; 375 + unsigned long n_sdb, freq; 376 376 size_t sample_size; 377 377 378 378 /* Calculate sampling buffers using 4K pages 379 379 * 380 - * 1. Determine the sample data size which depends on the used 381 - * sampling functions, for example, basic-sampling or 382 - * basic-sampling with diagnostic-sampling. 380 + * 1. The sampling size is 32 bytes for basic sampling. This size 381 + * is the same for all machine types. Diagnostic 382 + * sampling uses auxlilary data buffer setup which provides the 383 + * memory for SDBs using linux common code auxiliary trace 384 + * setup. 383 385 * 384 - * 2. Use the sampling frequency as input. The sampling buffer is 385 - * designed for almost one second. This can be adjusted through 386 - * the "factor" variable. 387 - * In any case, alloc_sampling_buffer() sets the Alert Request 386 + * 2. Function alloc_sampling_buffer() sets the Alert Request 388 387 * Control indicator to trigger a measurement-alert to harvest 389 - * sample-data-blocks (sdb). 388 + * sample-data-blocks (SDB). This is done per SDB. This 389 + * measurement alert interrupt fires quick enough to handle 390 + * one SDB, on very high frequency and work loads there might 391 + * be 2 to 3 SBDs available for sample processing. 392 + * Currently there is no need for setup alert request on every 393 + * n-th page. This is counterproductive as one IRQ triggers 394 + * a very high number of samples to be processed at one IRQ. 390 395 * 391 - * 3. Compute the number of sample-data-blocks and ensure a minimum 392 - * of CPUM_SF_MIN_SDB. Also ensure the upper limit does not 393 - * exceed a "calculated" maximum. The symbolic maximum is 394 - * designed for basic-sampling only and needs to be increased if 395 - * diagnostic-sampling is active. 396 - * See also the remarks for these symbolic constants. 396 + * 3. Use the sampling frequency as input. 397 + * Compute the number of SDBs and ensure a minimum 398 + * of CPUM_SF_MIN_SDB. Depending on frequency add some more 399 + * SDBs to handle a higher sampling rate. 400 + * Use a minimum of CPUM_SF_MIN_SDB and allow for 100 samples 401 + * (one SDB) for every 10000 HZ frequency increment. 397 402 * 398 403 * 4. Compute the number of sample-data-block-tables (SDBT) and 399 404 * ensure a minimum of CPUM_SF_MIN_SDBT (one table can manage up ··· 406 401 */ 407 402 sample_size = sizeof(struct hws_basic_entry); 408 403 freq = sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc)); 409 - factor = 1; 410 - n_sdb = DIV_ROUND_UP(freq, factor * ((PAGE_SIZE-64) / sample_size)); 411 - if (n_sdb < CPUM_SF_MIN_SDB) 412 - n_sdb = CPUM_SF_MIN_SDB; 404 + n_sdb = CPUM_SF_MIN_SDB + DIV_ROUND_UP(freq, 10000); 413 405 414 406 /* If there is already a sampling buffer allocated, it is very likely 415 407 * that the sampling facility is enabled too. If the event to be ··· 1578 1576 unsigned long range = 0, size; 1579 1577 unsigned long long overflow = 0; 1580 1578 struct perf_output_handle *handle = &cpuhw->handle; 1579 + unsigned long num_sdb; 1581 1580 1582 1581 aux = perf_get_aux(handle); 1583 1582 if (WARN_ON_ONCE(!aux)) ··· 1590 1587 size >> PAGE_SHIFT); 1591 1588 perf_aux_output_end(handle, size); 1592 1589 1590 + num_sdb = aux->sfb.num_sdb; 1593 1591 while (!done) { 1594 1592 /* Get an output handle */ 1595 1593 aux = perf_aux_output_begin(handle, cpuhw->event); 1596 1594 if (handle->size == 0) { 1597 1595 pr_err("The AUX buffer with %lu pages for the " 1598 1596 "diagnostic-sampling mode is full\n", 1599 - aux->sfb.num_sdb); 1597 + num_sdb); 1600 1598 debug_sprintf_event(sfdbg, 1, 1601 1599 "%s: AUX buffer used up\n", 1602 1600 __func__);
+1
arch/s390/kernel/process.c
··· 106 106 p->thread.system_timer = 0; 107 107 p->thread.hardirq_timer = 0; 108 108 p->thread.softirq_timer = 0; 109 + p->thread.last_break = 1; 109 110 110 111 frame->sf.back_chain = 0; 111 112 /* new return point is ret_from_fork */
+31 -3
arch/s390/kernel/processor.c
··· 151 151 } 152 152 } 153 153 154 + static void show_cpu_topology(struct seq_file *m, unsigned long n) 155 + { 156 + #ifdef CONFIG_SCHED_TOPOLOGY 157 + seq_printf(m, "physical id : %d\n", topology_physical_package_id(n)); 158 + seq_printf(m, "core id : %d\n", topology_core_id(n)); 159 + seq_printf(m, "book id : %d\n", topology_book_id(n)); 160 + seq_printf(m, "drawer id : %d\n", topology_drawer_id(n)); 161 + seq_printf(m, "dedicated : %d\n", topology_cpu_dedicated(n)); 162 + seq_printf(m, "address : %d\n", smp_cpu_get_cpu_address(n)); 163 + seq_printf(m, "siblings : %d\n", cpumask_weight(topology_core_cpumask(n))); 164 + seq_printf(m, "cpu cores : %d\n", topology_booted_cores(n)); 165 + #endif /* CONFIG_SCHED_TOPOLOGY */ 166 + } 167 + 168 + static void show_cpu_ids(struct seq_file *m, unsigned long n) 169 + { 170 + struct cpuid *id = &per_cpu(cpu_info.cpu_id, n); 171 + 172 + seq_printf(m, "version : %02X\n", id->version); 173 + seq_printf(m, "identification : %06X\n", id->ident); 174 + seq_printf(m, "machine : %04X\n", id->machine); 175 + } 176 + 154 177 static void show_cpu_mhz(struct seq_file *m, unsigned long n) 155 178 { 156 179 struct cpu_info *c = per_cpu_ptr(&cpu_info, n); 157 180 181 + if (!machine_has_cpu_mhz) 182 + return; 158 183 seq_printf(m, "cpu MHz dynamic : %d\n", c->cpu_mhz_dynamic); 159 184 seq_printf(m, "cpu MHz static : %d\n", c->cpu_mhz_static); 160 185 } ··· 190 165 static int show_cpuinfo(struct seq_file *m, void *v) 191 166 { 192 167 unsigned long n = (unsigned long) v - 1; 168 + unsigned long first = cpumask_first(cpu_online_mask); 193 169 194 - if (!n) 170 + if (n == first) 195 171 show_cpu_summary(m, v); 196 - if (!machine_has_cpu_mhz) 197 - return 0; 198 172 seq_printf(m, "\ncpu number : %ld\n", n); 173 + show_cpu_topology(m, n); 174 + show_cpu_ids(m, n); 199 175 show_cpu_mhz(m, n); 200 176 return 0; 201 177 } ··· 205 179 { 206 180 if (*pos) 207 181 *pos = cpumask_next(*pos - 1, cpu_online_mask); 182 + else 183 + *pos = cpumask_first(cpu_online_mask); 208 184 return *pos < nr_cpu_ids ? (void *)*pos + 1 : NULL; 209 185 } 210 186
+4
arch/s390/kernel/setup.c
··· 73 73 #include <asm/nospec-branch.h> 74 74 #include <asm/mem_detect.h> 75 75 #include <asm/uv.h> 76 + #include <asm/asm-offsets.h> 76 77 #include "entry.h" 77 78 78 79 /* ··· 447 446 lc->spinlock_index = 0; 448 447 arch_spin_lock_setup(0); 449 448 lc->br_r1_trampoline = 0x07f1; /* br %r1 */ 449 + lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW); 450 + lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW); 450 451 451 452 set_prefix((u32)(unsigned long) lc); 452 453 lowcore_ptr[0] = lc; ··· 792 789 memblock_physmem_add(start, end - start); 793 790 } 794 791 memblock_set_bottom_up(false); 792 + memblock_set_node(0, ULONG_MAX, &memblock.memory, 0); 795 793 memblock_dump_all(); 796 794 } 797 795
+2 -2
arch/s390/kernel/signal.c
··· 487 487 regs->gprs[2] = -EINTR; 488 488 break; 489 489 } 490 - /* fallthrough */ 490 + fallthrough; 491 491 case -ERESTARTNOINTR: 492 492 regs->gprs[2] = regs->orig_gpr2; 493 493 regs->psw.addr = ··· 514 514 case -ERESTART_RESTARTBLOCK: 515 515 /* Restart with sys_restart_syscall */ 516 516 regs->int_code = __NR_restart_syscall; 517 - /* fallthrough */ 517 + fallthrough; 518 518 case -ERESTARTNOHAND: 519 519 case -ERESTARTSYS: 520 520 case -ERESTARTNOINTR:
+11 -2
arch/s390/kernel/smp.c
··· 212 212 lc->spinlock_lockval = arch_spin_lockval(cpu); 213 213 lc->spinlock_index = 0; 214 214 lc->br_r1_trampoline = 0x07f1; /* br %r1 */ 215 + lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW); 216 + lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW); 215 217 if (nmi_alloc_per_cpu(lc)) 216 218 goto out_async; 217 219 if (vdso_alloc_per_cpu(lc)) ··· 703 701 return pcpu_devices[cpu].polarization; 704 702 } 705 703 704 + int smp_cpu_get_cpu_address(int cpu) 705 + { 706 + return pcpu_devices[cpu].address; 707 + } 708 + 706 709 static void __ref smp_get_core_info(struct sclp_core_info *info, int early) 707 710 { 708 711 static int use_sigp_detection; ··· 858 851 init_cpu_timer(); 859 852 vtime_init(); 860 853 pfault_init(); 861 - notify_cpu_starting(smp_processor_id()); 854 + notify_cpu_starting(cpu); 862 855 if (topology_cpu_dedicated(cpu)) 863 856 set_cpu_flag(CIF_DEDICATED_CPU); 864 857 else 865 858 clear_cpu_flag(CIF_DEDICATED_CPU); 866 - set_cpu_online(smp_processor_id(), true); 859 + set_cpu_online(cpu, true); 860 + update_cpu_masks(); 867 861 inc_irq_stat(CPU_RST); 868 862 local_irq_enable(); 869 863 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); ··· 936 928 /* Handle possible pending IPIs */ 937 929 smp_handle_ext_call(); 938 930 set_cpu_online(smp_processor_id(), false); 931 + update_cpu_masks(); 939 932 /* Disable pseudo page faults on this cpu. */ 940 933 pfault_fini(); 941 934 /* Disable interrupt sources via control register. */
-240
arch/s390/kernel/suspend.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - /* 3 - * Suspend support specific for s390. 4 - * 5 - * Copyright IBM Corp. 2009 6 - * 7 - * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com> 8 - */ 9 - 10 - #include <linux/pfn.h> 11 - #include <linux/suspend.h> 12 - #include <linux/mm.h> 13 - #include <linux/pci.h> 14 - #include <asm/ctl_reg.h> 15 - #include <asm/ipl.h> 16 - #include <asm/cio.h> 17 - #include <asm/sections.h> 18 - #include "entry.h" 19 - 20 - /* 21 - * The restore of the saved pages in an hibernation image will set 22 - * the change and referenced bits in the storage key for each page. 23 - * Overindication of the referenced bits after an hibernation cycle 24 - * does not cause any harm but the overindication of the change bits 25 - * would cause trouble. 26 - * Use the ARCH_SAVE_PAGE_KEYS hooks to save the storage key of each 27 - * page to the most significant byte of the associated page frame 28 - * number in the hibernation image. 29 - */ 30 - 31 - /* 32 - * Key storage is allocated as a linked list of pages. 33 - * The size of the keys array is (PAGE_SIZE - sizeof(long)) 34 - */ 35 - struct page_key_data { 36 - struct page_key_data *next; 37 - unsigned char data[]; 38 - }; 39 - 40 - #define PAGE_KEY_DATA_SIZE (PAGE_SIZE - sizeof(struct page_key_data *)) 41 - 42 - static struct page_key_data *page_key_data; 43 - static struct page_key_data *page_key_rp, *page_key_wp; 44 - static unsigned long page_key_rx, page_key_wx; 45 - unsigned long suspend_zero_pages; 46 - 47 - /* 48 - * For each page in the hibernation image one additional byte is 49 - * stored in the most significant byte of the page frame number. 50 - * On suspend no additional memory is required but on resume the 51 - * keys need to be memorized until the page data has been restored. 52 - * Only then can the storage keys be set to their old state. 53 - */ 54 - unsigned long page_key_additional_pages(unsigned long pages) 55 - { 56 - return DIV_ROUND_UP(pages, PAGE_KEY_DATA_SIZE); 57 - } 58 - 59 - /* 60 - * Free page_key_data list of arrays. 61 - */ 62 - void page_key_free(void) 63 - { 64 - struct page_key_data *pkd; 65 - 66 - while (page_key_data) { 67 - pkd = page_key_data; 68 - page_key_data = pkd->next; 69 - free_page((unsigned long) pkd); 70 - } 71 - } 72 - 73 - /* 74 - * Allocate page_key_data list of arrays with enough room to store 75 - * one byte for each page in the hibernation image. 76 - */ 77 - int page_key_alloc(unsigned long pages) 78 - { 79 - struct page_key_data *pk; 80 - unsigned long size; 81 - 82 - size = DIV_ROUND_UP(pages, PAGE_KEY_DATA_SIZE); 83 - while (size--) { 84 - pk = (struct page_key_data *) get_zeroed_page(GFP_KERNEL); 85 - if (!pk) { 86 - page_key_free(); 87 - return -ENOMEM; 88 - } 89 - pk->next = page_key_data; 90 - page_key_data = pk; 91 - } 92 - page_key_rp = page_key_wp = page_key_data; 93 - page_key_rx = page_key_wx = 0; 94 - return 0; 95 - } 96 - 97 - /* 98 - * Save the storage key into the upper 8 bits of the page frame number. 99 - */ 100 - void page_key_read(unsigned long *pfn) 101 - { 102 - struct page *page; 103 - unsigned long addr; 104 - unsigned char key; 105 - 106 - page = pfn_to_page(*pfn); 107 - addr = (unsigned long) page_address(page); 108 - key = (unsigned char) page_get_storage_key(addr) & 0x7f; 109 - if (arch_test_page_nodat(page)) 110 - key |= 0x80; 111 - *(unsigned char *) pfn = key; 112 - } 113 - 114 - /* 115 - * Extract the storage key from the upper 8 bits of the page frame number 116 - * and store it in the page_key_data list of arrays. 117 - */ 118 - void page_key_memorize(unsigned long *pfn) 119 - { 120 - page_key_wp->data[page_key_wx] = *(unsigned char *) pfn; 121 - *(unsigned char *) pfn = 0; 122 - if (++page_key_wx < PAGE_KEY_DATA_SIZE) 123 - return; 124 - page_key_wp = page_key_wp->next; 125 - page_key_wx = 0; 126 - } 127 - 128 - /* 129 - * Get the next key from the page_key_data list of arrays and set the 130 - * storage key of the page referred by @address. If @address refers to 131 - * a "safe" page the swsusp_arch_resume code will transfer the storage 132 - * key from the buffer page to the original page. 133 - */ 134 - void page_key_write(void *address) 135 - { 136 - struct page *page; 137 - unsigned char key; 138 - 139 - key = page_key_rp->data[page_key_rx]; 140 - page_set_storage_key((unsigned long) address, key & 0x7f, 0); 141 - page = virt_to_page(address); 142 - if (key & 0x80) 143 - arch_set_page_nodat(page, 0); 144 - else 145 - arch_set_page_dat(page, 0); 146 - if (++page_key_rx >= PAGE_KEY_DATA_SIZE) 147 - return; 148 - page_key_rp = page_key_rp->next; 149 - page_key_rx = 0; 150 - } 151 - 152 - int pfn_is_nosave(unsigned long pfn) 153 - { 154 - unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin)); 155 - unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end)); 156 - unsigned long end_rodata_pfn = PFN_DOWN(__pa(__end_rodata)) - 1; 157 - unsigned long stext_pfn = PFN_DOWN(__pa(_stext)); 158 - 159 - /* Always save lowcore pages (LC protection might be enabled). */ 160 - if (pfn <= LC_PAGES) 161 - return 0; 162 - if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn) 163 - return 1; 164 - /* Skip memory holes and read-only pages (DCSS, ...). */ 165 - if (pfn >= stext_pfn && pfn <= end_rodata_pfn) 166 - return 0; 167 - if (tprot(PFN_PHYS(pfn))) 168 - return 1; 169 - return 0; 170 - } 171 - 172 - /* 173 - * PM notifier callback for suspend 174 - */ 175 - static int suspend_pm_cb(struct notifier_block *nb, unsigned long action, 176 - void *ptr) 177 - { 178 - switch (action) { 179 - case PM_SUSPEND_PREPARE: 180 - case PM_HIBERNATION_PREPARE: 181 - suspend_zero_pages = __get_free_pages(GFP_KERNEL, LC_ORDER); 182 - if (!suspend_zero_pages) 183 - return NOTIFY_BAD; 184 - break; 185 - case PM_POST_SUSPEND: 186 - case PM_POST_HIBERNATION: 187 - free_pages(suspend_zero_pages, LC_ORDER); 188 - break; 189 - default: 190 - return NOTIFY_DONE; 191 - } 192 - return NOTIFY_OK; 193 - } 194 - 195 - static int __init suspend_pm_init(void) 196 - { 197 - pm_notifier(suspend_pm_cb, 0); 198 - return 0; 199 - } 200 - arch_initcall(suspend_pm_init); 201 - 202 - void save_processor_state(void) 203 - { 204 - /* swsusp_arch_suspend() actually saves all cpu register contents. 205 - * Machine checks must be disabled since swsusp_arch_suspend() stores 206 - * register contents to their lowcore save areas. That's the same 207 - * place where register contents on machine checks would be saved. 208 - * To avoid register corruption disable machine checks. 209 - * We must also disable machine checks in the new psw mask for 210 - * program checks, since swsusp_arch_suspend() may generate program 211 - * checks. Disabling machine checks for all other new psw masks is 212 - * just paranoia. 213 - */ 214 - local_mcck_disable(); 215 - /* Disable lowcore protection */ 216 - __ctl_clear_bit(0,28); 217 - S390_lowcore.external_new_psw.mask &= ~PSW_MASK_MCHECK; 218 - S390_lowcore.svc_new_psw.mask &= ~PSW_MASK_MCHECK; 219 - S390_lowcore.io_new_psw.mask &= ~PSW_MASK_MCHECK; 220 - S390_lowcore.program_new_psw.mask &= ~PSW_MASK_MCHECK; 221 - } 222 - 223 - void restore_processor_state(void) 224 - { 225 - S390_lowcore.external_new_psw.mask |= PSW_MASK_MCHECK; 226 - S390_lowcore.svc_new_psw.mask |= PSW_MASK_MCHECK; 227 - S390_lowcore.io_new_psw.mask |= PSW_MASK_MCHECK; 228 - S390_lowcore.program_new_psw.mask |= PSW_MASK_MCHECK; 229 - /* Enable lowcore protection */ 230 - __ctl_set_bit(0,28); 231 - local_mcck_enable(); 232 - } 233 - 234 - /* Called at the end of swsusp_arch_resume */ 235 - void s390_early_resume(void) 236 - { 237 - lgr_info_log(); 238 - channel_subsystem_reinit(); 239 - zpci_rescan(); 240 - }
-276
arch/s390/kernel/swsusp.S
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - /* 3 - * S390 64-bit swsusp implementation 4 - * 5 - * Copyright IBM Corp. 2009 6 - * 7 - * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com> 8 - * Michael Holzheu <holzheu@linux.vnet.ibm.com> 9 - */ 10 - 11 - #include <linux/linkage.h> 12 - #include <asm/page.h> 13 - #include <asm/ptrace.h> 14 - #include <asm/thread_info.h> 15 - #include <asm/asm-offsets.h> 16 - #include <asm/nospec-insn.h> 17 - #include <asm/sigp.h> 18 - 19 - /* 20 - * Save register context in absolute 0 lowcore and call swsusp_save() to 21 - * create in-memory kernel image. The context is saved in the designated 22 - * "store status" memory locations (see POP). 23 - * We return from this function twice. The first time during the suspend to 24 - * disk process. The second time via the swsusp_arch_resume() function 25 - * (see below) in the resume process. 26 - * This function runs with disabled interrupts. 27 - */ 28 - GEN_BR_THUNK %r14 29 - 30 - .section .text 31 - ENTRY(swsusp_arch_suspend) 32 - lg %r1,__LC_NODAT_STACK 33 - stmg %r6,%r15,__SF_GPRS(%r1) 34 - aghi %r1,-STACK_FRAME_OVERHEAD 35 - stg %r15,__SF_BACKCHAIN(%r1) 36 - lgr %r15,%r1 37 - 38 - /* Store FPU registers */ 39 - brasl %r14,save_fpu_regs 40 - 41 - /* Deactivate DAT */ 42 - stnsm __SF_EMPTY(%r15),0xfb 43 - 44 - /* Store prefix register on stack */ 45 - stpx __SF_EMPTY(%r15) 46 - 47 - /* Save prefix register contents for lowcore copy */ 48 - llgf %r10,__SF_EMPTY(%r15) 49 - 50 - /* Get pointer to save area */ 51 - lghi %r1,0x1000 52 - 53 - /* Save CPU address */ 54 - stap __LC_EXT_CPU_ADDR(%r0) 55 - 56 - /* Store registers */ 57 - mvc 0x318(4,%r1),__SF_EMPTY(%r15) /* move prefix to lowcore */ 58 - stam %a0,%a15,0x340(%r1) /* store access registers */ 59 - stctg %c0,%c15,0x380(%r1) /* store control registers */ 60 - stmg %r0,%r15,0x280(%r1) /* store general registers */ 61 - 62 - stpt 0x328(%r1) /* store timer */ 63 - stck __SF_EMPTY(%r15) /* store clock */ 64 - stckc 0x330(%r1) /* store clock comparator */ 65 - 66 - /* Update cputime accounting before going to sleep */ 67 - lg %r0,__LC_LAST_UPDATE_TIMER 68 - slg %r0,0x328(%r1) 69 - alg %r0,__LC_SYSTEM_TIMER 70 - stg %r0,__LC_SYSTEM_TIMER 71 - mvc __LC_LAST_UPDATE_TIMER(8),0x328(%r1) 72 - lg %r0,__LC_LAST_UPDATE_CLOCK 73 - slg %r0,__SF_EMPTY(%r15) 74 - alg %r0,__LC_STEAL_TIMER 75 - stg %r0,__LC_STEAL_TIMER 76 - mvc __LC_LAST_UPDATE_CLOCK(8),__SF_EMPTY(%r15) 77 - 78 - /* Activate DAT */ 79 - stosm __SF_EMPTY(%r15),0x04 80 - 81 - /* Set prefix page to zero */ 82 - xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15) 83 - spx __SF_EMPTY(%r15) 84 - 85 - /* Save absolute zero pages */ 86 - larl %r2,suspend_zero_pages 87 - lg %r2,0(%r2) 88 - lghi %r4,0 89 - lghi %r3,2*PAGE_SIZE 90 - lghi %r5,2*PAGE_SIZE 91 - 1: mvcle %r2,%r4,0 92 - jo 1b 93 - 94 - /* Copy lowcore to absolute zero lowcore */ 95 - lghi %r2,0 96 - lgr %r4,%r10 97 - lghi %r3,2*PAGE_SIZE 98 - lghi %r5,2*PAGE_SIZE 99 - 1: mvcle %r2,%r4,0 100 - jo 1b 101 - 102 - /* Save image */ 103 - brasl %r14,swsusp_save 104 - 105 - /* Restore prefix register and return */ 106 - lghi %r1,0x1000 107 - spx 0x318(%r1) 108 - lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) 109 - lghi %r2,0 110 - BR_EX %r14 111 - ENDPROC(swsusp_arch_suspend) 112 - 113 - /* 114 - * Restore saved memory image to correct place and restore register context. 115 - * Then we return to the function that called swsusp_arch_suspend(). 116 - * swsusp_arch_resume() runs with disabled interrupts. 117 - */ 118 - ENTRY(swsusp_arch_resume) 119 - stmg %r6,%r15,__SF_GPRS(%r15) 120 - lgr %r1,%r15 121 - aghi %r15,-STACK_FRAME_OVERHEAD 122 - stg %r1,__SF_BACKCHAIN(%r15) 123 - 124 - /* Make all free pages stable */ 125 - lghi %r2,1 126 - brasl %r14,arch_set_page_states 127 - 128 - /* Set prefix page to zero */ 129 - xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15) 130 - spx __SF_EMPTY(%r15) 131 - 132 - /* Deactivate DAT */ 133 - stnsm __SF_EMPTY(%r15),0xfb 134 - 135 - /* Restore saved image */ 136 - larl %r1,restore_pblist 137 - lg %r1,0(%r1) 138 - ltgr %r1,%r1 139 - jz 2f 140 - 0: 141 - lg %r2,8(%r1) 142 - lg %r4,0(%r1) 143 - iske %r0,%r4 144 - lghi %r3,PAGE_SIZE 145 - lghi %r5,PAGE_SIZE 146 - 1: 147 - mvcle %r2,%r4,0 148 - jo 1b 149 - lg %r2,8(%r1) 150 - sske %r0,%r2 151 - lg %r1,16(%r1) 152 - ltgr %r1,%r1 153 - jnz 0b 154 - 2: 155 - ptlb /* flush tlb */ 156 - 157 - /* Reset System */ 158 - larl %r1,.Lnew_pgm_check_psw 159 - epsw %r2,%r3 160 - stm %r2,%r3,0(%r1) 161 - mvc __LC_PGM_NEW_PSW(16,%r0),0(%r1) 162 - larl %r1,__swsusp_reset_dma 163 - lg %r1,0(%r1) 164 - BASR_EX %r14,%r1 165 - larl %r1,smp_cpu_mt_shift 166 - icm %r1,15,0(%r1) 167 - jz smt_done 168 - llgfr %r1,%r1 169 - smt_loop: 170 - sigp %r1,%r0,SIGP_SET_MULTI_THREADING 171 - brc 8,smt_done /* accepted */ 172 - brc 2,smt_loop /* busy, try again */ 173 - smt_done: 174 - larl %r1,.Lnew_pgm_check_psw 175 - lpswe 0(%r1) 176 - pgm_check_entry: 177 - 178 - /* Switch to original suspend CPU */ 179 - larl %r1,.Lresume_cpu /* Resume CPU address: r2 */ 180 - stap 0(%r1) 181 - llgh %r2,0(%r1) 182 - llgh %r1,__LC_EXT_CPU_ADDR(%r0) /* Suspend CPU address: r1 */ 183 - cgr %r1,%r2 184 - je restore_registers /* r1 = r2 -> nothing to do */ 185 - larl %r4,.Lrestart_suspend_psw /* Set new restart PSW */ 186 - mvc __LC_RST_NEW_PSW(16,%r0),0(%r4) 187 - 3: 188 - sigp %r9,%r1,SIGP_INITIAL_CPU_RESET /* sigp initial cpu reset */ 189 - brc 8,4f /* accepted */ 190 - brc 2,3b /* busy, try again */ 191 - 192 - /* Suspend CPU not available -> panic */ 193 - larl %r15,init_thread_union+THREAD_SIZE-STACK_FRAME_OVERHEAD 194 - larl %r2,.Lpanic_string 195 - brasl %r14,sclp_early_printk_force 196 - larl %r3,.Ldisabled_wait_31 197 - lpsw 0(%r3) 198 - 4: 199 - /* Switch to suspend CPU */ 200 - sigp %r9,%r1,SIGP_RESTART /* sigp restart to suspend CPU */ 201 - brc 2,4b /* busy, try again */ 202 - 5: 203 - sigp %r9,%r2,SIGP_STOP /* sigp stop to current resume CPU */ 204 - brc 2,5b /* busy, try again */ 205 - 6: j 6b 206 - 207 - restart_suspend: 208 - larl %r1,.Lresume_cpu 209 - llgh %r2,0(%r1) 210 - 7: 211 - sigp %r9,%r2,SIGP_SENSE /* sigp sense, wait for resume CPU */ 212 - brc 8,7b /* accepted, status 0, still running */ 213 - brc 2,7b /* busy, try again */ 214 - tmll %r9,0x40 /* Test if resume CPU is stopped */ 215 - jz 7b 216 - 217 - restore_registers: 218 - /* Restore registers */ 219 - lghi %r13,0x1000 /* %r1 = pointer to save area */ 220 - 221 - /* Ignore time spent in suspended state. */ 222 - llgf %r1,0x318(%r13) 223 - stck __LC_LAST_UPDATE_CLOCK(%r1) 224 - spt 0x328(%r13) /* reprogram timer */ 225 - //sckc 0x330(%r13) /* set clock comparator */ 226 - 227 - lctlg %c0,%c15,0x380(%r13) /* load control registers */ 228 - lam %a0,%a15,0x340(%r13) /* load access registers */ 229 - 230 - /* Load old stack */ 231 - lg %r15,0x2f8(%r13) 232 - 233 - /* Save prefix register */ 234 - mvc __SF_EMPTY(4,%r15),0x318(%r13) 235 - 236 - /* Restore absolute zero pages */ 237 - lghi %r2,0 238 - larl %r4,suspend_zero_pages 239 - lg %r4,0(%r4) 240 - lghi %r3,2*PAGE_SIZE 241 - lghi %r5,2*PAGE_SIZE 242 - 1: mvcle %r2,%r4,0 243 - jo 1b 244 - 245 - /* Restore prefix register */ 246 - spx __SF_EMPTY(%r15) 247 - 248 - /* Activate DAT */ 249 - stosm __SF_EMPTY(%r15),0x04 250 - 251 - /* Make all free pages unstable */ 252 - lghi %r2,0 253 - brasl %r14,arch_set_page_states 254 - 255 - /* Call arch specific early resume code */ 256 - brasl %r14,s390_early_resume 257 - 258 - /* Return 0 */ 259 - lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) 260 - lghi %r2,0 261 - BR_EX %r14 262 - ENDPROC(swsusp_arch_resume) 263 - 264 - .section .data..nosave,"aw",@progbits 265 - .align 8 266 - .Ldisabled_wait_31: 267 - .long 0x000a0000,0x00000000 268 - .Lpanic_string: 269 - .asciz "Resume not possible because suspend CPU is no longer available\n" 270 - .align 8 271 - .Lrestart_suspend_psw: 272 - .quad 0x0000000180000000,restart_suspend 273 - .Lnew_pgm_check_psw: 274 - .quad 0,pgm_check_entry 275 - .Lresume_cpu: 276 - .byte 0,0
+22 -12
arch/s390/kernel/topology.c
··· 26 26 #include <linux/nodemask.h> 27 27 #include <linux/node.h> 28 28 #include <asm/sysinfo.h> 29 - #include <asm/numa.h> 30 29 31 30 #define PTF_HORIZONTAL (0UL) 32 31 #define PTF_VERTICAL (1UL) ··· 62 63 struct cpu_topology_s390 cpu_topology[NR_CPUS]; 63 64 EXPORT_SYMBOL_GPL(cpu_topology); 64 65 65 - cpumask_t cpus_with_topology; 66 - 67 66 static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) 68 67 { 69 68 cpumask_t mask; ··· 83 86 cpumask_copy(&mask, cpu_present_mask); 84 87 break; 85 88 default: 86 - /* fallthrough */ 89 + fallthrough; 87 90 case TOPOLOGY_MODE_SINGLE: 88 91 cpumask_copy(&mask, cpumask_of(cpu)); 89 92 break; 90 93 } 94 + cpumask_and(&mask, &mask, cpu_online_mask); 91 95 return mask; 92 96 } 93 97 ··· 104 106 for (i = 0; i <= smp_cpu_mtid; i++) 105 107 if (cpu_present(cpu + i)) 106 108 cpumask_set_cpu(cpu + i, &mask); 109 + cpumask_and(&mask, &mask, cpu_online_mask); 107 110 return mask; 108 111 } 109 112 ··· 137 138 cpumask_set_cpu(lcpu + i, &drawer->mask); 138 139 cpumask_set_cpu(lcpu + i, &book->mask); 139 140 cpumask_set_cpu(lcpu + i, &socket->mask); 140 - cpumask_set_cpu(lcpu + i, &cpus_with_topology); 141 141 smp_cpu_set_polarization(lcpu + i, tl_core->pp); 142 142 } 143 143 } ··· 243 245 return rc; 244 246 } 245 247 246 - static void update_cpu_masks(void) 248 + void update_cpu_masks(void) 247 249 { 248 - struct cpu_topology_s390 *topo; 249 - int cpu, id; 250 + struct cpu_topology_s390 *topo, *topo_package, *topo_sibling; 251 + int cpu, sibling, pkg_first, smt_first, id; 250 252 251 253 for_each_possible_cpu(cpu) { 252 254 topo = &cpu_topology[cpu]; ··· 254 256 topo->core_mask = cpu_group_map(&socket_info, cpu); 255 257 topo->book_mask = cpu_group_map(&book_info, cpu); 256 258 topo->drawer_mask = cpu_group_map(&drawer_info, cpu); 259 + topo->booted_cores = 0; 257 260 if (topology_mode != TOPOLOGY_MODE_HW) { 258 261 id = topology_mode == TOPOLOGY_MODE_PACKAGE ? 0 : cpu; 259 262 topo->thread_id = cpu; ··· 262 263 topo->socket_id = id; 263 264 topo->book_id = id; 264 265 topo->drawer_id = id; 265 - if (cpu_present(cpu)) 266 - cpumask_set_cpu(cpu, &cpus_with_topology); 267 266 } 268 267 } 269 - numa_update_cpu_topology(); 268 + for_each_online_cpu(cpu) { 269 + topo = &cpu_topology[cpu]; 270 + pkg_first = cpumask_first(&topo->core_mask); 271 + topo_package = &cpu_topology[pkg_first]; 272 + if (cpu == pkg_first) { 273 + for_each_cpu(sibling, &topo->core_mask) { 274 + topo_sibling = &cpu_topology[sibling]; 275 + smt_first = cpumask_first(&topo_sibling->thread_mask); 276 + if (sibling == smt_first) 277 + topo_package->booted_cores++; 278 + } 279 + } else { 280 + topo->booted_cores = topo_package->booted_cores; 281 + } 282 + } 270 283 } 271 284 272 285 void store_topology(struct sysinfo_15_1_x *info) ··· 300 289 int rc = 0; 301 290 302 291 mutex_lock(&smp_cpu_state_mutex); 303 - cpumask_clear(&cpus_with_topology); 304 292 if (MACHINE_HAS_TOPOLOGY) { 305 293 rc = 1; 306 294 store_topology(info);
+1 -1
arch/s390/kernel/traps.c
··· 271 271 } 272 272 NOKPROBE_SYMBOL(kernel_stack_overflow); 273 273 274 - static void test_monitor_call(void) 274 + static void __init test_monitor_call(void) 275 275 { 276 276 int val = 1; 277 277
+3 -43
arch/s390/mm/cmm.c
··· 19 19 #include <linux/swap.h> 20 20 #include <linux/kthread.h> 21 21 #include <linux/oom.h> 22 - #include <linux/suspend.h> 23 22 #include <linux/uaccess.h> 24 23 25 24 #include <asm/pgalloc.h> ··· 48 49 static volatile long cmm_timed_pages_target; 49 50 static long cmm_timeout_pages; 50 51 static long cmm_timeout_seconds; 51 - static int cmm_suspended; 52 52 53 53 static struct cmm_page_array *cmm_page_list; 54 54 static struct cmm_page_array *cmm_timed_page_list; ··· 149 151 150 152 while (1) { 151 153 rc = wait_event_interruptible(cmm_thread_wait, 152 - (!cmm_suspended && (cmm_pages != cmm_pages_target || 153 - cmm_timed_pages != cmm_timed_pages_target)) || 154 - kthread_should_stop()); 154 + cmm_pages != cmm_pages_target || 155 + cmm_timed_pages != cmm_timed_pages_target || 156 + kthread_should_stop()); 155 157 if (kthread_should_stop() || rc == -ERESTARTSYS) { 156 158 cmm_pages_target = cmm_pages; 157 159 cmm_timed_pages_target = cmm_timed_pages; ··· 388 390 389 391 static struct ctl_table_header *cmm_sysctl_header; 390 392 391 - static int cmm_suspend(void) 392 - { 393 - cmm_suspended = 1; 394 - cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list); 395 - cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list); 396 - return 0; 397 - } 398 - 399 - static int cmm_resume(void) 400 - { 401 - cmm_suspended = 0; 402 - cmm_kick_thread(); 403 - return 0; 404 - } 405 - 406 - static int cmm_power_event(struct notifier_block *this, 407 - unsigned long event, void *ptr) 408 - { 409 - switch (event) { 410 - case PM_POST_HIBERNATION: 411 - return cmm_resume(); 412 - case PM_HIBERNATION_PREPARE: 413 - return cmm_suspend(); 414 - default: 415 - return NOTIFY_DONE; 416 - } 417 - } 418 - 419 - static struct notifier_block cmm_power_notifier = { 420 - .notifier_call = cmm_power_event, 421 - }; 422 - 423 393 static int __init cmm_init(void) 424 394 { 425 395 int rc = -ENOMEM; ··· 412 446 rc = register_oom_notifier(&cmm_oom_nb); 413 447 if (rc < 0) 414 448 goto out_oom_notify; 415 - rc = register_pm_notifier(&cmm_power_notifier); 416 - if (rc) 417 - goto out_pm; 418 449 cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread"); 419 450 if (!IS_ERR(cmm_thread_ptr)) 420 451 return 0; 421 452 422 453 rc = PTR_ERR(cmm_thread_ptr); 423 - unregister_pm_notifier(&cmm_power_notifier); 424 - out_pm: 425 454 unregister_oom_notifier(&cmm_oom_nb); 426 455 out_oom_notify: 427 456 #ifdef CONFIG_CMM_IUCV ··· 436 475 #ifdef CONFIG_CMM_IUCV 437 476 smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target); 438 477 #endif 439 - unregister_pm_notifier(&cmm_power_notifier); 440 478 unregister_oom_notifier(&cmm_oom_nb); 441 479 kthread_stop(cmm_thread_ptr); 442 480 del_timer_sync(&cmm_timer);
+10 -11
arch/s390/mm/fault.c
··· 45 45 #define __SUBCODE_MASK 0x0600 46 46 #define __PF_RES_FIELD 0x8000000000000000ULL 47 47 48 - #define VM_FAULT_BADCONTEXT 0x010000 49 - #define VM_FAULT_BADMAP 0x020000 50 - #define VM_FAULT_BADACCESS 0x040000 51 - #define VM_FAULT_SIGNAL 0x080000 52 - #define VM_FAULT_PFAULT 0x100000 48 + #define VM_FAULT_BADCONTEXT ((__force vm_fault_t) 0x010000) 49 + #define VM_FAULT_BADMAP ((__force vm_fault_t) 0x020000) 50 + #define VM_FAULT_BADACCESS ((__force vm_fault_t) 0x040000) 51 + #define VM_FAULT_SIGNAL ((__force vm_fault_t) 0x080000) 52 + #define VM_FAULT_PFAULT ((__force vm_fault_t) 0x100000) 53 53 54 54 enum fault_type { 55 55 KERNEL_FAULT, ··· 123 123 if (*table & _REGION_ENTRY_INVALID) 124 124 goto out; 125 125 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 126 - /* fallthrough */ 126 + fallthrough; 127 127 case _ASCE_TYPE_REGION2: 128 128 table += (address & _REGION2_INDEX) >> _REGION2_SHIFT; 129 129 if (bad_address(table)) ··· 132 132 if (*table & _REGION_ENTRY_INVALID) 133 133 goto out; 134 134 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 135 - /* fallthrough */ 135 + fallthrough; 136 136 case _ASCE_TYPE_REGION3: 137 137 table += (address & _REGION3_INDEX) >> _REGION3_SHIFT; 138 138 if (bad_address(table)) ··· 141 141 if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE)) 142 142 goto out; 143 143 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 144 - /* fallthrough */ 144 + fallthrough; 145 145 case _ASCE_TYPE_SEGMENT: 146 146 table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT; 147 147 if (bad_address(table)) ··· 328 328 case VM_FAULT_BADACCESS: 329 329 if (access == VM_EXEC && signal_return(regs) == 0) 330 330 break; 331 - /* fallthrough */ 331 + fallthrough; 332 332 case VM_FAULT_BADMAP: 333 333 /* Bad memory access. Check if it is kernel or user space. */ 334 334 if (user_mode(regs)) { ··· 338 338 do_sigsegv(regs, si_code); 339 339 break; 340 340 } 341 - /* fallthrough */ 341 + fallthrough; 342 342 case VM_FAULT_BADCONTEXT: 343 - /* fallthrough */ 344 343 case VM_FAULT_PFAULT: 345 344 do_no_context(regs); 346 345 break;
+2 -9
arch/s390/mm/hugetlbpage.c
··· 326 326 struct hstate *h = hstate_file(file); 327 327 struct mm_struct *mm = current->mm; 328 328 struct vm_area_struct *vma; 329 - int rc; 330 329 331 330 if (len & ~huge_page_mask(h)) 332 331 return -EINVAL; ··· 352 353 else 353 354 addr = hugetlb_get_unmapped_area_topdown(file, addr, len, 354 355 pgoff, flags); 355 - if (addr & ~PAGE_MASK) 356 + if (offset_in_page(addr)) 356 357 return addr; 357 358 358 359 check_asce_limit: 359 - if (addr + len > current->mm->context.asce_limit && 360 - addr + len <= TASK_SIZE) { 361 - rc = crst_table_upgrade(mm, addr + len); 362 - if (rc) 363 - return (unsigned long) rc; 364 - } 365 - return addr; 360 + return check_asce_limit(mm, addr, len); 366 361 }
+11 -29
arch/s390/mm/mmap.c
··· 72 72 return PAGE_ALIGN(STACK_TOP - gap - rnd); 73 73 } 74 74 75 - unsigned long 76 - arch_get_unmapped_area(struct file *filp, unsigned long addr, 77 - unsigned long len, unsigned long pgoff, unsigned long flags) 75 + unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, 76 + unsigned long len, unsigned long pgoff, 77 + unsigned long flags) 78 78 { 79 79 struct mm_struct *mm = current->mm; 80 80 struct vm_area_struct *vma; 81 81 struct vm_unmapped_area_info info; 82 - int rc; 83 82 84 83 if (len > TASK_SIZE - mmap_min_addr) 85 84 return -ENOMEM; ··· 104 105 info.align_mask = 0; 105 106 info.align_offset = pgoff << PAGE_SHIFT; 106 107 addr = vm_unmapped_area(&info); 107 - if (addr & ~PAGE_MASK) 108 + if (offset_in_page(addr)) 108 109 return addr; 109 110 110 111 check_asce_limit: 111 - if (addr + len > current->mm->context.asce_limit && 112 - addr + len <= TASK_SIZE) { 113 - rc = crst_table_upgrade(mm, addr + len); 114 - if (rc) 115 - return (unsigned long) rc; 116 - } 117 - 118 - return addr; 112 + return check_asce_limit(mm, addr, len); 119 113 } 120 114 121 - unsigned long 122 - arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, 123 - const unsigned long len, const unsigned long pgoff, 124 - const unsigned long flags) 115 + unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, 116 + unsigned long len, unsigned long pgoff, 117 + unsigned long flags) 125 118 { 126 119 struct vm_area_struct *vma; 127 120 struct mm_struct *mm = current->mm; 128 - unsigned long addr = addr0; 129 121 struct vm_unmapped_area_info info; 130 - int rc; 131 122 132 123 /* requested length too big for entire address space */ 133 124 if (len > TASK_SIZE - mmap_min_addr) ··· 152 163 * can happen with large stack limits and large mmap() 153 164 * allocations. 154 165 */ 155 - if (addr & ~PAGE_MASK) { 166 + if (offset_in_page(addr)) { 156 167 VM_BUG_ON(addr != -ENOMEM); 157 168 info.flags = 0; 158 169 info.low_limit = TASK_UNMAPPED_BASE; 159 170 info.high_limit = TASK_SIZE; 160 171 addr = vm_unmapped_area(&info); 161 - if (addr & ~PAGE_MASK) 172 + if (offset_in_page(addr)) 162 173 return addr; 163 174 } 164 175 165 176 check_asce_limit: 166 - if (addr + len > current->mm->context.asce_limit && 167 - addr + len <= TASK_SIZE) { 168 - rc = crst_table_upgrade(mm, addr + len); 169 - if (rc) 170 - return (unsigned long) rc; 171 - } 172 - 173 - return addr; 177 + return check_asce_limit(mm, addr, len); 174 178 } 175 179 176 180 /*
-16
arch/s390/mm/pageattr.c
··· 367 367 } 368 368 } 369 369 370 - #ifdef CONFIG_HIBERNATION 371 - bool kernel_page_present(struct page *page) 372 - { 373 - unsigned long addr; 374 - int cc; 375 - 376 - addr = page_to_phys(page); 377 - asm volatile( 378 - " lra %1,0(%1)\n" 379 - " ipm %0\n" 380 - " srl %0,28" 381 - : "=d" (cc), "+a" (addr) : : "cc"); 382 - return cc == 0; 383 - } 384 - #endif /* CONFIG_HIBERNATION */ 385 - 386 370 #endif /* CONFIG_DEBUG_PAGEALLOC */
+55 -57
arch/s390/mm/pgalloc.c
··· 77 77 78 78 int crst_table_upgrade(struct mm_struct *mm, unsigned long end) 79 79 { 80 - unsigned long *table, *pgd; 81 - int rc, notify; 80 + unsigned long *pgd = NULL, *p4d = NULL, *__pgd; 81 + unsigned long asce_limit = mm->context.asce_limit; 82 82 83 83 /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */ 84 - VM_BUG_ON(mm->context.asce_limit < _REGION2_SIZE); 85 - rc = 0; 86 - notify = 0; 87 - while (mm->context.asce_limit < end) { 88 - table = crst_table_alloc(mm); 89 - if (!table) { 90 - rc = -ENOMEM; 91 - break; 92 - } 93 - spin_lock_bh(&mm->page_table_lock); 94 - pgd = (unsigned long *) mm->pgd; 95 - if (mm->context.asce_limit == _REGION2_SIZE) { 96 - crst_table_init(table, _REGION2_ENTRY_EMPTY); 97 - p4d_populate(mm, (p4d_t *) table, (pud_t *) pgd); 98 - mm->pgd = (pgd_t *) table; 99 - mm->context.asce_limit = _REGION1_SIZE; 100 - mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 101 - _ASCE_USER_BITS | _ASCE_TYPE_REGION2; 102 - mm_inc_nr_puds(mm); 103 - } else { 104 - crst_table_init(table, _REGION1_ENTRY_EMPTY); 105 - pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd); 106 - mm->pgd = (pgd_t *) table; 107 - mm->context.asce_limit = -PAGE_SIZE; 108 - mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 109 - _ASCE_USER_BITS | _ASCE_TYPE_REGION1; 110 - } 111 - notify = 1; 112 - spin_unlock_bh(&mm->page_table_lock); 84 + VM_BUG_ON(asce_limit < _REGION2_SIZE); 85 + 86 + if (end <= asce_limit) 87 + return 0; 88 + 89 + if (asce_limit == _REGION2_SIZE) { 90 + p4d = crst_table_alloc(mm); 91 + if (unlikely(!p4d)) 92 + goto err_p4d; 93 + crst_table_init(p4d, _REGION2_ENTRY_EMPTY); 113 94 } 114 - if (notify) 115 - on_each_cpu(__crst_table_upgrade, mm, 0); 116 - return rc; 117 - } 118 - 119 - void crst_table_downgrade(struct mm_struct *mm) 120 - { 121 - pgd_t *pgd; 122 - 123 - /* downgrade should only happen from 3 to 2 levels (compat only) */ 124 - VM_BUG_ON(mm->context.asce_limit != _REGION2_SIZE); 125 - 126 - if (current->active_mm == mm) { 127 - clear_user_asce(); 128 - __tlb_flush_mm(mm); 95 + if (end > _REGION1_SIZE) { 96 + pgd = crst_table_alloc(mm); 97 + if (unlikely(!pgd)) 98 + goto err_pgd; 99 + crst_table_init(pgd, _REGION1_ENTRY_EMPTY); 129 100 } 130 101 131 - pgd = mm->pgd; 132 - mm_dec_nr_pmds(mm); 133 - mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN); 134 - mm->context.asce_limit = _REGION3_SIZE; 135 - mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 136 - _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT; 137 - crst_table_free(mm, (unsigned long *) pgd); 102 + spin_lock_bh(&mm->page_table_lock); 138 103 139 - if (current->active_mm == mm) 140 - set_user_asce(mm); 104 + /* 105 + * This routine gets called with mmap_sem lock held and there is 106 + * no reason to optimize for the case of otherwise. However, if 107 + * that would ever change, the below check will let us know. 108 + */ 109 + VM_BUG_ON(asce_limit != mm->context.asce_limit); 110 + 111 + if (p4d) { 112 + __pgd = (unsigned long *) mm->pgd; 113 + p4d_populate(mm, (p4d_t *) p4d, (pud_t *) __pgd); 114 + mm->pgd = (pgd_t *) p4d; 115 + mm->context.asce_limit = _REGION1_SIZE; 116 + mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 117 + _ASCE_USER_BITS | _ASCE_TYPE_REGION2; 118 + mm_inc_nr_puds(mm); 119 + } 120 + if (pgd) { 121 + __pgd = (unsigned long *) mm->pgd; 122 + pgd_populate(mm, (pgd_t *) pgd, (p4d_t *) __pgd); 123 + mm->pgd = (pgd_t *) pgd; 124 + mm->context.asce_limit = TASK_SIZE_MAX; 125 + mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 126 + _ASCE_USER_BITS | _ASCE_TYPE_REGION1; 127 + } 128 + 129 + spin_unlock_bh(&mm->page_table_lock); 130 + 131 + on_each_cpu(__crst_table_upgrade, mm, 0); 132 + 133 + return 0; 134 + 135 + err_pgd: 136 + crst_table_free(mm, p4d); 137 + err_p4d: 138 + return -ENOMEM; 141 139 } 142 140 143 141 static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits) ··· 302 304 mask >>= 24; 303 305 if (mask != 0) 304 306 break; 305 - /* fallthrough */ 307 + fallthrough; 306 308 case 3: /* 4K page table with pgstes */ 307 309 if (mask & 3) 308 310 atomic_xor_bits(&page->_refcount, 3 << 24); ··· 527 529 base_region2_walk(table, 0, _REGION1_SIZE, 0); 528 530 break; 529 531 case _ASCE_TYPE_REGION1: 530 - base_region1_walk(table, 0, -_PAGE_SIZE, 0); 532 + base_region1_walk(table, 0, TASK_SIZE_MAX, 0); 531 533 break; 532 534 } 533 535 base_crst_free(table);
+4
arch/s390/mm/vmem.c
··· 415 415 SET_MEMORY_RO | SET_MEMORY_X); 416 416 __set_memory(__stext_dma, (__etext_dma - __stext_dma) >> PAGE_SHIFT, 417 417 SET_MEMORY_RO | SET_MEMORY_X); 418 + 419 + /* we need lowcore executable for our LPSWE instructions */ 420 + set_memory_x(0, 1); 421 + 418 422 pr_info("Write protected kernel read-only data: %luk\n", 419 423 (unsigned long)(__end_rodata - _stext) >> 10); 420 424 }
-2
arch/s390/numa/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 obj-y += numa.o 3 - obj-y += toptree.o 4 - obj-$(CONFIG_NUMA_EMU) += mode_emu.o
-577
arch/s390/numa/mode_emu.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - /* 3 - * NUMA support for s390 4 - * 5 - * NUMA emulation (aka fake NUMA) distributes the available memory to nodes 6 - * without using real topology information about the physical memory of the 7 - * machine. 8 - * 9 - * It distributes the available CPUs to nodes while respecting the original 10 - * machine topology information. This is done by trying to avoid to separate 11 - * CPUs which reside on the same book or even on the same MC. 12 - * 13 - * Because the current Linux scheduler code requires a stable cpu to node 14 - * mapping, cores are pinned to nodes when the first CPU thread is set online. 15 - * 16 - * Copyright IBM Corp. 2015 17 - */ 18 - 19 - #define KMSG_COMPONENT "numa_emu" 20 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 21 - 22 - #include <linux/kernel.h> 23 - #include <linux/cpumask.h> 24 - #include <linux/memblock.h> 25 - #include <linux/node.h> 26 - #include <linux/memory.h> 27 - #include <linux/slab.h> 28 - #include <asm/smp.h> 29 - #include <asm/topology.h> 30 - #include "numa_mode.h" 31 - #include "toptree.h" 32 - 33 - /* Distances between the different system components */ 34 - #define DIST_EMPTY 0 35 - #define DIST_CORE 1 36 - #define DIST_MC 2 37 - #define DIST_BOOK 3 38 - #define DIST_DRAWER 4 39 - #define DIST_MAX 5 40 - 41 - /* Node distance reported to common code */ 42 - #define EMU_NODE_DIST 10 43 - 44 - /* Node ID for free (not yet pinned) cores */ 45 - #define NODE_ID_FREE -1 46 - 47 - /* Different levels of toptree */ 48 - enum toptree_level {CORE, MC, BOOK, DRAWER, NODE, TOPOLOGY}; 49 - 50 - /* The two toptree IDs */ 51 - enum {TOPTREE_ID_PHYS, TOPTREE_ID_NUMA}; 52 - 53 - /* Number of NUMA nodes */ 54 - static int emu_nodes = 1; 55 - /* NUMA stripe size */ 56 - static unsigned long emu_size; 57 - 58 - /* 59 - * Node to core pinning information updates are protected by 60 - * "sched_domains_mutex". 61 - */ 62 - static struct { 63 - s32 to_node_id[CONFIG_NR_CPUS]; /* Pinned core to node mapping */ 64 - int total; /* Total number of pinned cores */ 65 - int per_node_target; /* Cores per node without extra cores */ 66 - int per_node[MAX_NUMNODES]; /* Number of cores pinned to node */ 67 - } *emu_cores; 68 - 69 - /* 70 - * Pin a core to a node 71 - */ 72 - static void pin_core_to_node(int core_id, int node_id) 73 - { 74 - if (emu_cores->to_node_id[core_id] == NODE_ID_FREE) { 75 - emu_cores->per_node[node_id]++; 76 - emu_cores->to_node_id[core_id] = node_id; 77 - emu_cores->total++; 78 - } else { 79 - WARN_ON(emu_cores->to_node_id[core_id] != node_id); 80 - } 81 - } 82 - 83 - /* 84 - * Number of pinned cores of a node 85 - */ 86 - static int cores_pinned(struct toptree *node) 87 - { 88 - return emu_cores->per_node[node->id]; 89 - } 90 - 91 - /* 92 - * ID of the node where the core is pinned (or NODE_ID_FREE) 93 - */ 94 - static int core_pinned_to_node_id(struct toptree *core) 95 - { 96 - return emu_cores->to_node_id[core->id]; 97 - } 98 - 99 - /* 100 - * Number of cores in the tree that are not yet pinned 101 - */ 102 - static int cores_free(struct toptree *tree) 103 - { 104 - struct toptree *core; 105 - int count = 0; 106 - 107 - toptree_for_each(core, tree, CORE) { 108 - if (core_pinned_to_node_id(core) == NODE_ID_FREE) 109 - count++; 110 - } 111 - return count; 112 - } 113 - 114 - /* 115 - * Return node of core 116 - */ 117 - static struct toptree *core_node(struct toptree *core) 118 - { 119 - return core->parent->parent->parent->parent; 120 - } 121 - 122 - /* 123 - * Return drawer of core 124 - */ 125 - static struct toptree *core_drawer(struct toptree *core) 126 - { 127 - return core->parent->parent->parent; 128 - } 129 - 130 - /* 131 - * Return book of core 132 - */ 133 - static struct toptree *core_book(struct toptree *core) 134 - { 135 - return core->parent->parent; 136 - } 137 - 138 - /* 139 - * Return mc of core 140 - */ 141 - static struct toptree *core_mc(struct toptree *core) 142 - { 143 - return core->parent; 144 - } 145 - 146 - /* 147 - * Distance between two cores 148 - */ 149 - static int dist_core_to_core(struct toptree *core1, struct toptree *core2) 150 - { 151 - if (core_drawer(core1)->id != core_drawer(core2)->id) 152 - return DIST_DRAWER; 153 - if (core_book(core1)->id != core_book(core2)->id) 154 - return DIST_BOOK; 155 - if (core_mc(core1)->id != core_mc(core2)->id) 156 - return DIST_MC; 157 - /* Same core or sibling on same MC */ 158 - return DIST_CORE; 159 - } 160 - 161 - /* 162 - * Distance of a node to a core 163 - */ 164 - static int dist_node_to_core(struct toptree *node, struct toptree *core) 165 - { 166 - struct toptree *core_node; 167 - int dist_min = DIST_MAX; 168 - 169 - toptree_for_each(core_node, node, CORE) 170 - dist_min = min(dist_min, dist_core_to_core(core_node, core)); 171 - return dist_min == DIST_MAX ? DIST_EMPTY : dist_min; 172 - } 173 - 174 - /* 175 - * Unify will delete empty nodes, therefore recreate nodes. 176 - */ 177 - static void toptree_unify_tree(struct toptree *tree) 178 - { 179 - int nid; 180 - 181 - toptree_unify(tree); 182 - for (nid = 0; nid < emu_nodes; nid++) 183 - toptree_get_child(tree, nid); 184 - } 185 - 186 - /* 187 - * Find the best/nearest node for a given core and ensure that no node 188 - * gets more than "emu_cores->per_node_target + extra" cores. 189 - */ 190 - static struct toptree *node_for_core(struct toptree *numa, struct toptree *core, 191 - int extra) 192 - { 193 - struct toptree *node, *node_best = NULL; 194 - int dist_cur, dist_best, cores_target; 195 - 196 - cores_target = emu_cores->per_node_target + extra; 197 - dist_best = DIST_MAX; 198 - node_best = NULL; 199 - toptree_for_each(node, numa, NODE) { 200 - /* Already pinned cores must use their nodes */ 201 - if (core_pinned_to_node_id(core) == node->id) { 202 - node_best = node; 203 - break; 204 - } 205 - /* Skip nodes that already have enough cores */ 206 - if (cores_pinned(node) >= cores_target) 207 - continue; 208 - dist_cur = dist_node_to_core(node, core); 209 - if (dist_cur < dist_best) { 210 - dist_best = dist_cur; 211 - node_best = node; 212 - } 213 - } 214 - return node_best; 215 - } 216 - 217 - /* 218 - * Find the best node for each core with respect to "extra" core count 219 - */ 220 - static void toptree_to_numa_single(struct toptree *numa, struct toptree *phys, 221 - int extra) 222 - { 223 - struct toptree *node, *core, *tmp; 224 - 225 - toptree_for_each_safe(core, tmp, phys, CORE) { 226 - node = node_for_core(numa, core, extra); 227 - if (!node) 228 - return; 229 - toptree_move(core, node); 230 - pin_core_to_node(core->id, node->id); 231 - } 232 - } 233 - 234 - /* 235 - * Move structures of given level to specified NUMA node 236 - */ 237 - static void move_level_to_numa_node(struct toptree *node, struct toptree *phys, 238 - enum toptree_level level, bool perfect) 239 - { 240 - int cores_free, cores_target = emu_cores->per_node_target; 241 - struct toptree *cur, *tmp; 242 - 243 - toptree_for_each_safe(cur, tmp, phys, level) { 244 - cores_free = cores_target - toptree_count(node, CORE); 245 - if (perfect) { 246 - if (cores_free == toptree_count(cur, CORE)) 247 - toptree_move(cur, node); 248 - } else { 249 - if (cores_free >= toptree_count(cur, CORE)) 250 - toptree_move(cur, node); 251 - } 252 - } 253 - } 254 - 255 - /* 256 - * Move structures of a given level to NUMA nodes. If "perfect" is specified 257 - * move only perfectly fitting structures. Otherwise move also smaller 258 - * than needed structures. 259 - */ 260 - static void move_level_to_numa(struct toptree *numa, struct toptree *phys, 261 - enum toptree_level level, bool perfect) 262 - { 263 - struct toptree *node; 264 - 265 - toptree_for_each(node, numa, NODE) 266 - move_level_to_numa_node(node, phys, level, perfect); 267 - } 268 - 269 - /* 270 - * For the first run try to move the big structures 271 - */ 272 - static void toptree_to_numa_first(struct toptree *numa, struct toptree *phys) 273 - { 274 - struct toptree *core; 275 - 276 - /* Always try to move perfectly fitting structures first */ 277 - move_level_to_numa(numa, phys, DRAWER, true); 278 - move_level_to_numa(numa, phys, DRAWER, false); 279 - move_level_to_numa(numa, phys, BOOK, true); 280 - move_level_to_numa(numa, phys, BOOK, false); 281 - move_level_to_numa(numa, phys, MC, true); 282 - move_level_to_numa(numa, phys, MC, false); 283 - /* Now pin all the moved cores */ 284 - toptree_for_each(core, numa, CORE) 285 - pin_core_to_node(core->id, core_node(core)->id); 286 - } 287 - 288 - /* 289 - * Allocate new topology and create required nodes 290 - */ 291 - static struct toptree *toptree_new(int id, int nodes) 292 - { 293 - struct toptree *tree; 294 - int nid; 295 - 296 - tree = toptree_alloc(TOPOLOGY, id); 297 - if (!tree) 298 - goto fail; 299 - for (nid = 0; nid < nodes; nid++) { 300 - if (!toptree_get_child(tree, nid)) 301 - goto fail; 302 - } 303 - return tree; 304 - fail: 305 - panic("NUMA emulation could not allocate topology"); 306 - } 307 - 308 - /* 309 - * Allocate and initialize core to node mapping 310 - */ 311 - static void __ref create_core_to_node_map(void) 312 - { 313 - int i; 314 - 315 - emu_cores = memblock_alloc(sizeof(*emu_cores), 8); 316 - if (!emu_cores) 317 - panic("%s: Failed to allocate %zu bytes align=0x%x\n", 318 - __func__, sizeof(*emu_cores), 8); 319 - for (i = 0; i < ARRAY_SIZE(emu_cores->to_node_id); i++) 320 - emu_cores->to_node_id[i] = NODE_ID_FREE; 321 - } 322 - 323 - /* 324 - * Move cores from physical topology into NUMA target topology 325 - * and try to keep as much of the physical topology as possible. 326 - */ 327 - static struct toptree *toptree_to_numa(struct toptree *phys) 328 - { 329 - static int first = 1; 330 - struct toptree *numa; 331 - int cores_total; 332 - 333 - cores_total = emu_cores->total + cores_free(phys); 334 - emu_cores->per_node_target = cores_total / emu_nodes; 335 - numa = toptree_new(TOPTREE_ID_NUMA, emu_nodes); 336 - if (first) { 337 - toptree_to_numa_first(numa, phys); 338 - first = 0; 339 - } 340 - toptree_to_numa_single(numa, phys, 0); 341 - toptree_to_numa_single(numa, phys, 1); 342 - toptree_unify_tree(numa); 343 - 344 - WARN_ON(cpumask_weight(&phys->mask)); 345 - return numa; 346 - } 347 - 348 - /* 349 - * Create a toptree out of the physical topology that we got from the hypervisor 350 - */ 351 - static struct toptree *toptree_from_topology(void) 352 - { 353 - struct toptree *phys, *node, *drawer, *book, *mc, *core; 354 - struct cpu_topology_s390 *top; 355 - int cpu; 356 - 357 - phys = toptree_new(TOPTREE_ID_PHYS, 1); 358 - 359 - for_each_cpu(cpu, &cpus_with_topology) { 360 - top = &cpu_topology[cpu]; 361 - node = toptree_get_child(phys, 0); 362 - drawer = toptree_get_child(node, top->drawer_id); 363 - book = toptree_get_child(drawer, top->book_id); 364 - mc = toptree_get_child(book, top->socket_id); 365 - core = toptree_get_child(mc, smp_get_base_cpu(cpu)); 366 - if (!drawer || !book || !mc || !core) 367 - panic("NUMA emulation could not allocate memory"); 368 - cpumask_set_cpu(cpu, &core->mask); 369 - toptree_update_mask(mc); 370 - } 371 - return phys; 372 - } 373 - 374 - /* 375 - * Add toptree core to topology and create correct CPU masks 376 - */ 377 - static void topology_add_core(struct toptree *core) 378 - { 379 - struct cpu_topology_s390 *top; 380 - int cpu; 381 - 382 - for_each_cpu(cpu, &core->mask) { 383 - top = &cpu_topology[cpu]; 384 - cpumask_copy(&top->thread_mask, &core->mask); 385 - cpumask_copy(&top->core_mask, &core_mc(core)->mask); 386 - cpumask_copy(&top->book_mask, &core_book(core)->mask); 387 - cpumask_copy(&top->drawer_mask, &core_drawer(core)->mask); 388 - cpumask_set_cpu(cpu, &node_to_cpumask_map[core_node(core)->id]); 389 - top->node_id = core_node(core)->id; 390 - } 391 - } 392 - 393 - /* 394 - * Apply toptree to topology and create CPU masks 395 - */ 396 - static void toptree_to_topology(struct toptree *numa) 397 - { 398 - struct toptree *core; 399 - int i; 400 - 401 - /* Clear all node masks */ 402 - for (i = 0; i < MAX_NUMNODES; i++) 403 - cpumask_clear(&node_to_cpumask_map[i]); 404 - 405 - /* Rebuild all masks */ 406 - toptree_for_each(core, numa, CORE) 407 - topology_add_core(core); 408 - } 409 - 410 - /* 411 - * Show the node to core mapping 412 - */ 413 - static void print_node_to_core_map(void) 414 - { 415 - int nid, cid; 416 - 417 - if (!numa_debug_enabled) 418 - return; 419 - printk(KERN_DEBUG "NUMA node to core mapping\n"); 420 - for (nid = 0; nid < emu_nodes; nid++) { 421 - printk(KERN_DEBUG " node %3d: ", nid); 422 - for (cid = 0; cid < ARRAY_SIZE(emu_cores->to_node_id); cid++) { 423 - if (emu_cores->to_node_id[cid] == nid) 424 - printk(KERN_CONT "%d ", cid); 425 - } 426 - printk(KERN_CONT "\n"); 427 - } 428 - } 429 - 430 - static void pin_all_possible_cpus(void) 431 - { 432 - int core_id, node_id, cpu; 433 - static int initialized; 434 - 435 - if (initialized) 436 - return; 437 - print_node_to_core_map(); 438 - node_id = 0; 439 - for_each_possible_cpu(cpu) { 440 - core_id = smp_get_base_cpu(cpu); 441 - if (emu_cores->to_node_id[core_id] != NODE_ID_FREE) 442 - continue; 443 - pin_core_to_node(core_id, node_id); 444 - cpu_topology[cpu].node_id = node_id; 445 - node_id = (node_id + 1) % emu_nodes; 446 - } 447 - print_node_to_core_map(); 448 - initialized = 1; 449 - } 450 - 451 - /* 452 - * Transfer physical topology into a NUMA topology and modify CPU masks 453 - * according to the NUMA topology. 454 - * 455 - * Must be called with "sched_domains_mutex" lock held. 456 - */ 457 - static void emu_update_cpu_topology(void) 458 - { 459 - struct toptree *phys, *numa; 460 - 461 - if (emu_cores == NULL) 462 - create_core_to_node_map(); 463 - phys = toptree_from_topology(); 464 - numa = toptree_to_numa(phys); 465 - toptree_free(phys); 466 - toptree_to_topology(numa); 467 - toptree_free(numa); 468 - pin_all_possible_cpus(); 469 - } 470 - 471 - /* 472 - * If emu_size is not set, use CONFIG_EMU_SIZE. Then round to minimum 473 - * alignment (needed for memory hotplug). 474 - */ 475 - static unsigned long emu_setup_size_adjust(unsigned long size) 476 - { 477 - unsigned long size_new; 478 - 479 - size = size ? : CONFIG_EMU_SIZE; 480 - size_new = roundup(size, memory_block_size_bytes()); 481 - if (size_new == size) 482 - return size; 483 - pr_warn("Increasing memory stripe size from %ld MB to %ld MB\n", 484 - size >> 20, size_new >> 20); 485 - return size_new; 486 - } 487 - 488 - /* 489 - * If we have not enough memory for the specified nodes, reduce the node count. 490 - */ 491 - static int emu_setup_nodes_adjust(int nodes) 492 - { 493 - int nodes_max; 494 - 495 - nodes_max = memblock.memory.total_size / emu_size; 496 - nodes_max = max(nodes_max, 1); 497 - if (nodes_max >= nodes) 498 - return nodes; 499 - pr_warn("Not enough memory for %d nodes, reducing node count\n", nodes); 500 - return nodes_max; 501 - } 502 - 503 - /* 504 - * Early emu setup 505 - */ 506 - static void emu_setup(void) 507 - { 508 - int nid; 509 - 510 - emu_size = emu_setup_size_adjust(emu_size); 511 - emu_nodes = emu_setup_nodes_adjust(emu_nodes); 512 - for (nid = 0; nid < emu_nodes; nid++) 513 - node_set(nid, node_possible_map); 514 - pr_info("Creating %d nodes with memory stripe size %ld MB\n", 515 - emu_nodes, emu_size >> 20); 516 - } 517 - 518 - /* 519 - * Return node id for given page number 520 - */ 521 - static int emu_pfn_to_nid(unsigned long pfn) 522 - { 523 - return (pfn / (emu_size >> PAGE_SHIFT)) % emu_nodes; 524 - } 525 - 526 - /* 527 - * Return stripe size 528 - */ 529 - static unsigned long emu_align(void) 530 - { 531 - return emu_size; 532 - } 533 - 534 - /* 535 - * Return distance between two nodes 536 - */ 537 - static int emu_distance(int node1, int node2) 538 - { 539 - return (node1 != node2) * EMU_NODE_DIST; 540 - } 541 - 542 - /* 543 - * Define callbacks for generic s390 NUMA infrastructure 544 - */ 545 - const struct numa_mode numa_mode_emu = { 546 - .name = "emu", 547 - .setup = emu_setup, 548 - .update_cpu_topology = emu_update_cpu_topology, 549 - .__pfn_to_nid = emu_pfn_to_nid, 550 - .align = emu_align, 551 - .distance = emu_distance, 552 - }; 553 - 554 - /* 555 - * Kernel parameter: emu_nodes=<n> 556 - */ 557 - static int __init early_parse_emu_nodes(char *p) 558 - { 559 - int count; 560 - 561 - if (!p || kstrtoint(p, 0, &count) != 0 || count <= 0) 562 - return 0; 563 - emu_nodes = min(count, MAX_NUMNODES); 564 - return 0; 565 - } 566 - early_param("emu_nodes", early_parse_emu_nodes); 567 - 568 - /* 569 - * Kernel parameter: emu_size=[<n>[k|M|G|T]] 570 - */ 571 - static int __init early_parse_emu_size(char *p) 572 - { 573 - if (p) 574 - emu_size = memparse(p, NULL); 575 - return 0; 576 - } 577 - early_param("emu_size", early_parse_emu_size);
+9 -138
arch/s390/numa/numa.c
··· 7 7 * Copyright IBM Corp. 2015 8 8 */ 9 9 10 - #define KMSG_COMPONENT "numa" 11 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 - 13 10 #include <linux/kernel.h> 14 11 #include <linux/mmzone.h> 15 12 #include <linux/cpumask.h> 16 13 #include <linux/memblock.h> 17 - #include <linux/slab.h> 18 14 #include <linux/node.h> 19 - 20 15 #include <asm/numa.h> 21 - #include "numa_mode.h" 22 16 23 - pg_data_t *node_data[MAX_NUMNODES]; 17 + struct pglist_data *node_data[MAX_NUMNODES]; 24 18 EXPORT_SYMBOL(node_data); 25 19 26 - cpumask_t node_to_cpumask_map[MAX_NUMNODES]; 27 - EXPORT_SYMBOL(node_to_cpumask_map); 28 - 29 - static void plain_setup(void) 20 + void __init numa_setup(void) 30 21 { 22 + int nid; 23 + 24 + nodes_clear(node_possible_map); 31 25 node_set(0, node_possible_map); 32 - } 33 - 34 - const struct numa_mode numa_mode_plain = { 35 - .name = "plain", 36 - .setup = plain_setup, 37 - }; 38 - 39 - static const struct numa_mode *mode = &numa_mode_plain; 40 - 41 - int numa_pfn_to_nid(unsigned long pfn) 42 - { 43 - return mode->__pfn_to_nid ? mode->__pfn_to_nid(pfn) : 0; 44 - } 45 - 46 - void numa_update_cpu_topology(void) 47 - { 48 - if (mode->update_cpu_topology) 49 - mode->update_cpu_topology(); 50 - } 51 - 52 - int __node_distance(int a, int b) 53 - { 54 - return mode->distance ? mode->distance(a, b) : 0; 55 - } 56 - EXPORT_SYMBOL(__node_distance); 57 - 58 - int numa_debug_enabled; 59 - 60 - /* 61 - * numa_setup_memory() - Assign bootmem to nodes 62 - * 63 - * The memory is first added to memblock without any respect to nodes. 64 - * This is fixed before remaining memblock memory is handed over to the 65 - * buddy allocator. 66 - * An important side effect is that large bootmem allocations might easily 67 - * cross node boundaries, which can be needed for large allocations with 68 - * smaller memory stripes in each node (i.e. when using NUMA emulation). 69 - * 70 - * Memory defines nodes: 71 - * Therefore this routine also sets the nodes online with memory. 72 - */ 73 - static void __init numa_setup_memory(void) 74 - { 75 - unsigned long cur_base, align, end_of_dram; 76 - int nid = 0; 77 - 78 - end_of_dram = memblock_end_of_DRAM(); 79 - align = mode->align ? mode->align() : ULONG_MAX; 80 - 81 - /* 82 - * Step through all available memory and assign it to the nodes 83 - * indicated by the mode implementation. 84 - * All nodes which are seen here will be set online. 85 - */ 86 - cur_base = 0; 87 - do { 88 - nid = numa_pfn_to_nid(PFN_DOWN(cur_base)); 89 - node_set_online(nid); 90 - memblock_set_node(cur_base, align, &memblock.memory, nid); 91 - cur_base += align; 92 - } while (cur_base < end_of_dram); 93 - 94 - /* Allocate and fill out node_data */ 26 + node_set_online(0); 95 27 for (nid = 0; nid < MAX_NUMNODES; nid++) { 96 28 NODE_DATA(nid) = memblock_alloc(sizeof(pg_data_t), 8); 97 29 if (!NODE_DATA(nid)) 98 30 panic("%s: Failed to allocate %zu bytes align=0x%x\n", 99 31 __func__, sizeof(pg_data_t), 8); 100 32 } 101 - 102 - for_each_online_node(nid) { 103 - unsigned long start_pfn, end_pfn; 104 - unsigned long t_start, t_end; 105 - int i; 106 - 107 - start_pfn = ULONG_MAX; 108 - end_pfn = 0; 109 - for_each_mem_pfn_range(i, nid, &t_start, &t_end, NULL) { 110 - if (t_start < start_pfn) 111 - start_pfn = t_start; 112 - if (t_end > end_pfn) 113 - end_pfn = t_end; 114 - } 115 - NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; 116 - NODE_DATA(nid)->node_id = nid; 117 - } 33 + NODE_DATA(0)->node_spanned_pages = memblock_end_of_DRAM() >> PAGE_SHIFT; 34 + NODE_DATA(0)->node_id = 0; 118 35 } 119 36 120 - /* 121 - * numa_setup() - Earliest initialization 122 - * 123 - * Assign the mode and call the mode's setup routine. 124 - */ 125 - void __init numa_setup(void) 126 - { 127 - pr_info("NUMA mode: %s\n", mode->name); 128 - nodes_clear(node_possible_map); 129 - /* Initially attach all possible CPUs to node 0. */ 130 - cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask); 131 - if (mode->setup) 132 - mode->setup(); 133 - numa_setup_memory(); 134 - memblock_dump_all(); 135 - } 136 - 137 - /* 138 - * numa_init_late() - Initialization initcall 139 - * 140 - * Register NUMA nodes. 141 - */ 142 37 static int __init numa_init_late(void) 143 38 { 144 - int nid; 145 - 146 - for_each_online_node(nid) 147 - register_one_node(nid); 39 + register_one_node(0); 148 40 return 0; 149 41 } 150 42 arch_initcall(numa_init_late); 151 - 152 - static int __init parse_debug(char *parm) 153 - { 154 - numa_debug_enabled = 1; 155 - return 0; 156 - } 157 - early_param("numa_debug", parse_debug); 158 - 159 - static int __init parse_numa(char *parm) 160 - { 161 - if (!parm) 162 - return 1; 163 - if (strcmp(parm, numa_mode_plain.name) == 0) 164 - mode = &numa_mode_plain; 165 - #ifdef CONFIG_NUMA_EMU 166 - if (strcmp(parm, numa_mode_emu.name) == 0) 167 - mode = &numa_mode_emu; 168 - #endif 169 - return 0; 170 - } 171 - early_param("numa", parse_numa);
-25
arch/s390/numa/numa_mode.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - /* 3 - * NUMA support for s390 4 - * 5 - * Define declarations used for communication between NUMA mode 6 - * implementations and NUMA core functionality. 7 - * 8 - * Copyright IBM Corp. 2015 9 - */ 10 - #ifndef __S390_NUMA_MODE_H 11 - #define __S390_NUMA_MODE_H 12 - 13 - struct numa_mode { 14 - char *name; /* Name of mode */ 15 - void (*setup)(void); /* Initizalize mode */ 16 - void (*update_cpu_topology)(void); /* Called by topology code */ 17 - int (*__pfn_to_nid)(unsigned long pfn); /* PFN to node ID */ 18 - unsigned long (*align)(void); /* Minimum node alignment */ 19 - int (*distance)(int a, int b); /* Distance between two nodes */ 20 - }; 21 - 22 - extern const struct numa_mode numa_mode_plain; 23 - extern const struct numa_mode numa_mode_emu; 24 - 25 - #endif /* __S390_NUMA_MODE_H */
-351
arch/s390/numa/toptree.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - /* 3 - * NUMA support for s390 4 - * 5 - * A tree structure used for machine topology mangling 6 - * 7 - * Copyright IBM Corp. 2015 8 - */ 9 - 10 - #include <linux/kernel.h> 11 - #include <linux/memblock.h> 12 - #include <linux/cpumask.h> 13 - #include <linux/list.h> 14 - #include <linux/list_sort.h> 15 - #include <linux/slab.h> 16 - #include <asm/numa.h> 17 - 18 - #include "toptree.h" 19 - 20 - /** 21 - * toptree_alloc - Allocate and initialize a new tree node. 22 - * @level: The node's vertical level; level 0 contains the leaves. 23 - * @id: ID number, explicitly not unique beyond scope of node's siblings 24 - * 25 - * Allocate a new tree node and initialize it. 26 - * 27 - * RETURNS: 28 - * Pointer to the new tree node or NULL on error 29 - */ 30 - struct toptree __ref *toptree_alloc(int level, int id) 31 - { 32 - struct toptree *res; 33 - 34 - if (slab_is_available()) 35 - res = kzalloc(sizeof(*res), GFP_KERNEL); 36 - else 37 - res = memblock_alloc(sizeof(*res), 8); 38 - if (!res) 39 - return res; 40 - 41 - INIT_LIST_HEAD(&res->children); 42 - INIT_LIST_HEAD(&res->sibling); 43 - cpumask_clear(&res->mask); 44 - res->level = level; 45 - res->id = id; 46 - return res; 47 - } 48 - 49 - /** 50 - * toptree_remove - Remove a tree node from a tree 51 - * @cand: Pointer to the node to remove 52 - * 53 - * The node is detached from its parent node. The parent node's 54 - * masks will be updated to reflect the loss of the child. 55 - */ 56 - static void toptree_remove(struct toptree *cand) 57 - { 58 - struct toptree *oldparent; 59 - 60 - list_del_init(&cand->sibling); 61 - oldparent = cand->parent; 62 - cand->parent = NULL; 63 - toptree_update_mask(oldparent); 64 - } 65 - 66 - /** 67 - * toptree_free - discard a tree node 68 - * @cand: Pointer to the tree node to discard 69 - * 70 - * Checks if @cand is attached to a parent node. Detaches it 71 - * cleanly using toptree_remove. Possible children are freed 72 - * recursively. In the end @cand itself is freed. 73 - */ 74 - void __ref toptree_free(struct toptree *cand) 75 - { 76 - struct toptree *child, *tmp; 77 - 78 - if (cand->parent) 79 - toptree_remove(cand); 80 - toptree_for_each_child_safe(child, tmp, cand) 81 - toptree_free(child); 82 - if (slab_is_available()) 83 - kfree(cand); 84 - else 85 - memblock_free_early((unsigned long)cand, sizeof(*cand)); 86 - } 87 - 88 - /** 89 - * toptree_update_mask - Update node bitmasks 90 - * @cand: Pointer to a tree node 91 - * 92 - * The node's cpumask will be updated by combining all children's 93 - * masks. Then toptree_update_mask is called recursively for the 94 - * parent if applicable. 95 - * 96 - * NOTE: 97 - * This must not be called on leaves. If called on a leaf, its 98 - * CPU mask is cleared and lost. 99 - */ 100 - void toptree_update_mask(struct toptree *cand) 101 - { 102 - struct toptree *child; 103 - 104 - cpumask_clear(&cand->mask); 105 - list_for_each_entry(child, &cand->children, sibling) 106 - cpumask_or(&cand->mask, &cand->mask, &child->mask); 107 - if (cand->parent) 108 - toptree_update_mask(cand->parent); 109 - } 110 - 111 - /** 112 - * toptree_insert - Insert a tree node into tree 113 - * @cand: Pointer to the node to insert 114 - * @target: Pointer to the node to which @cand will added as a child 115 - * 116 - * Insert a tree node into a tree. Masks will be updated automatically. 117 - * 118 - * RETURNS: 119 - * 0 on success, -1 if NULL is passed as argument or the node levels 120 - * don't fit. 121 - */ 122 - static int toptree_insert(struct toptree *cand, struct toptree *target) 123 - { 124 - if (!cand || !target) 125 - return -1; 126 - if (target->level != (cand->level + 1)) 127 - return -1; 128 - list_add_tail(&cand->sibling, &target->children); 129 - cand->parent = target; 130 - toptree_update_mask(target); 131 - return 0; 132 - } 133 - 134 - /** 135 - * toptree_move_children - Move all child nodes of a node to a new place 136 - * @cand: Pointer to the node whose children are to be moved 137 - * @target: Pointer to the node to which @cand's children will be attached 138 - * 139 - * Take all child nodes of @cand and move them using toptree_move. 140 - */ 141 - static void toptree_move_children(struct toptree *cand, struct toptree *target) 142 - { 143 - struct toptree *child, *tmp; 144 - 145 - toptree_for_each_child_safe(child, tmp, cand) 146 - toptree_move(child, target); 147 - } 148 - 149 - /** 150 - * toptree_unify - Merge children with same ID 151 - * @cand: Pointer to node whose direct children should be made unique 152 - * 153 - * When mangling the tree it is possible that a node has two or more children 154 - * which have the same ID. This routine merges these children into one and 155 - * moves all children of the merged nodes into the unified node. 156 - */ 157 - void toptree_unify(struct toptree *cand) 158 - { 159 - struct toptree *child, *tmp, *cand_copy; 160 - 161 - /* Threads cannot be split, cores are not split */ 162 - if (cand->level < 2) 163 - return; 164 - 165 - cand_copy = toptree_alloc(cand->level, 0); 166 - toptree_for_each_child_safe(child, tmp, cand) { 167 - struct toptree *tmpchild; 168 - 169 - if (!cpumask_empty(&child->mask)) { 170 - tmpchild = toptree_get_child(cand_copy, child->id); 171 - toptree_move_children(child, tmpchild); 172 - } 173 - toptree_free(child); 174 - } 175 - toptree_move_children(cand_copy, cand); 176 - toptree_free(cand_copy); 177 - 178 - toptree_for_each_child(child, cand) 179 - toptree_unify(child); 180 - } 181 - 182 - /** 183 - * toptree_move - Move a node to another context 184 - * @cand: Pointer to the node to move 185 - * @target: Pointer to the node where @cand should go 186 - * 187 - * In the easiest case @cand is exactly on the level below @target 188 - * and will be immediately moved to the target. 189 - * 190 - * If @target's level is not the direct parent level of @cand, 191 - * nodes for the missing levels are created and put between 192 - * @cand and @target. The "stacking" nodes' IDs are taken from 193 - * @cand's parents. 194 - * 195 - * After this it is likely to have redundant nodes in the tree 196 - * which are addressed by means of toptree_unify. 197 - */ 198 - void toptree_move(struct toptree *cand, struct toptree *target) 199 - { 200 - struct toptree *stack_target, *real_insert_point, *ptr, *tmp; 201 - 202 - if (cand->level + 1 == target->level) { 203 - toptree_remove(cand); 204 - toptree_insert(cand, target); 205 - return; 206 - } 207 - 208 - real_insert_point = NULL; 209 - ptr = cand; 210 - stack_target = NULL; 211 - 212 - do { 213 - tmp = stack_target; 214 - stack_target = toptree_alloc(ptr->level + 1, 215 - ptr->parent->id); 216 - toptree_insert(tmp, stack_target); 217 - if (!real_insert_point) 218 - real_insert_point = stack_target; 219 - ptr = ptr->parent; 220 - } while (stack_target->level < (target->level - 1)); 221 - 222 - toptree_remove(cand); 223 - toptree_insert(cand, real_insert_point); 224 - toptree_insert(stack_target, target); 225 - } 226 - 227 - /** 228 - * toptree_get_child - Access a tree node's child by its ID 229 - * @cand: Pointer to tree node whose child is to access 230 - * @id: The desired child's ID 231 - * 232 - * @cand's children are searched for a child with matching ID. 233 - * If no match can be found, a new child with the desired ID 234 - * is created and returned. 235 - */ 236 - struct toptree *toptree_get_child(struct toptree *cand, int id) 237 - { 238 - struct toptree *child; 239 - 240 - toptree_for_each_child(child, cand) 241 - if (child->id == id) 242 - return child; 243 - child = toptree_alloc(cand->level-1, id); 244 - toptree_insert(child, cand); 245 - return child; 246 - } 247 - 248 - /** 249 - * toptree_first - Find the first descendant on specified level 250 - * @context: Pointer to tree node whose descendants are to be used 251 - * @level: The level of interest 252 - * 253 - * RETURNS: 254 - * @context's first descendant on the specified level, or NULL 255 - * if there is no matching descendant 256 - */ 257 - struct toptree *toptree_first(struct toptree *context, int level) 258 - { 259 - struct toptree *child, *tmp; 260 - 261 - if (context->level == level) 262 - return context; 263 - 264 - if (!list_empty(&context->children)) { 265 - list_for_each_entry(child, &context->children, sibling) { 266 - tmp = toptree_first(child, level); 267 - if (tmp) 268 - return tmp; 269 - } 270 - } 271 - return NULL; 272 - } 273 - 274 - /** 275 - * toptree_next_sibling - Return next sibling 276 - * @cur: Pointer to a tree node 277 - * 278 - * RETURNS: 279 - * If @cur has a parent and is not the last in the parent's children list, 280 - * the next sibling is returned. Or NULL when there are no siblings left. 281 - */ 282 - static struct toptree *toptree_next_sibling(struct toptree *cur) 283 - { 284 - if (cur->parent == NULL) 285 - return NULL; 286 - 287 - if (cur == list_last_entry(&cur->parent->children, 288 - struct toptree, sibling)) 289 - return NULL; 290 - return (struct toptree *) list_next_entry(cur, sibling); 291 - } 292 - 293 - /** 294 - * toptree_next - Tree traversal function 295 - * @cur: Pointer to current element 296 - * @context: Pointer to the root node of the tree or subtree to 297 - * be traversed. 298 - * @level: The level of interest. 299 - * 300 - * RETURNS: 301 - * Pointer to the next node on level @level 302 - * or NULL when there is no next node. 303 - */ 304 - struct toptree *toptree_next(struct toptree *cur, struct toptree *context, 305 - int level) 306 - { 307 - struct toptree *cur_context, *tmp; 308 - 309 - if (!cur) 310 - return NULL; 311 - 312 - if (context->level == level) 313 - return NULL; 314 - 315 - tmp = toptree_next_sibling(cur); 316 - if (tmp != NULL) 317 - return tmp; 318 - 319 - cur_context = cur; 320 - while (cur_context->level < context->level - 1) { 321 - /* Step up */ 322 - cur_context = cur_context->parent; 323 - /* Step aside */ 324 - tmp = toptree_next_sibling(cur_context); 325 - if (tmp != NULL) { 326 - /* Step down */ 327 - tmp = toptree_first(tmp, level); 328 - if (tmp != NULL) 329 - return tmp; 330 - } 331 - } 332 - return NULL; 333 - } 334 - 335 - /** 336 - * toptree_count - Count descendants on specified level 337 - * @context: Pointer to node whose descendants are to be considered 338 - * @level: Only descendants on the specified level will be counted 339 - * 340 - * RETURNS: 341 - * Number of descendants on the specified level 342 - */ 343 - int toptree_count(struct toptree *context, int level) 344 - { 345 - struct toptree *cur; 346 - int cnt = 0; 347 - 348 - toptree_for_each(cur, context, level) 349 - cnt++; 350 - return cnt; 351 - }
-61
arch/s390/numa/toptree.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - /* 3 - * NUMA support for s390 4 - * 5 - * A tree structure used for machine topology mangling 6 - * 7 - * Copyright IBM Corp. 2015 8 - */ 9 - #ifndef S390_TOPTREE_H 10 - #define S390_TOPTREE_H 11 - 12 - #include <linux/cpumask.h> 13 - #include <linux/list.h> 14 - 15 - struct toptree { 16 - int level; 17 - int id; 18 - cpumask_t mask; 19 - struct toptree *parent; 20 - struct list_head sibling; 21 - struct list_head children; 22 - }; 23 - 24 - struct toptree *toptree_alloc(int level, int id); 25 - void toptree_free(struct toptree *cand); 26 - void toptree_update_mask(struct toptree *cand); 27 - void toptree_unify(struct toptree *cand); 28 - struct toptree *toptree_get_child(struct toptree *cand, int id); 29 - void toptree_move(struct toptree *cand, struct toptree *target); 30 - int toptree_count(struct toptree *context, int level); 31 - 32 - struct toptree *toptree_first(struct toptree *context, int level); 33 - struct toptree *toptree_next(struct toptree *cur, struct toptree *context, 34 - int level); 35 - 36 - #define toptree_for_each_child(child, ptree) \ 37 - list_for_each_entry(child, &ptree->children, sibling) 38 - 39 - #define toptree_for_each_child_safe(child, ptmp, ptree) \ 40 - list_for_each_entry_safe(child, ptmp, &ptree->children, sibling) 41 - 42 - #define toptree_is_last(ptree) \ 43 - ((ptree->parent == NULL) || \ 44 - (ptree->parent->children.prev == &ptree->sibling)) 45 - 46 - #define toptree_for_each(ptree, cont, ttype) \ 47 - for (ptree = toptree_first(cont, ttype); \ 48 - ptree != NULL; \ 49 - ptree = toptree_next(ptree, cont, ttype)) 50 - 51 - #define toptree_for_each_safe(ptree, tmp, cont, ttype) \ 52 - for (ptree = toptree_first(cont, ttype), \ 53 - tmp = toptree_next(ptree, cont, ttype); \ 54 - ptree != NULL; \ 55 - ptree = tmp, \ 56 - tmp = toptree_next(ptree, cont, ttype)) 57 - 58 - #define toptree_for_each_sibling(ptree, start) \ 59 - toptree_for_each(ptree, start->parent, start->level) 60 - 61 - #endif /* S390_TOPTREE_H */
+25 -58
arch/s390/pci/pci.c
··· 40 40 static LIST_HEAD(zpci_list); 41 41 static DEFINE_SPINLOCK(zpci_list_lock); 42 42 43 - static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES); 43 + static DECLARE_BITMAP(zpci_domain, ZPCI_DOMAIN_BITMAP_SIZE); 44 44 static DEFINE_SPINLOCK(zpci_domain_lock); 45 + static unsigned int zpci_num_domains_allocated; 45 46 46 47 #define ZPCI_IOMAP_ENTRIES \ 47 48 min(((unsigned long) ZPCI_NR_DEVICES * PCI_STD_NUM_BARS / 2), \ ··· 608 607 zpci_debug_exit_device(zdev); 609 608 } 610 609 611 - #ifdef CONFIG_HIBERNATE_CALLBACKS 612 - static int zpci_restore(struct device *dev) 613 - { 614 - struct pci_dev *pdev = to_pci_dev(dev); 615 - struct zpci_dev *zdev = to_zpci(pdev); 616 - int ret = 0; 617 - 618 - if (zdev->state != ZPCI_FN_STATE_ONLINE) 619 - goto out; 620 - 621 - ret = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES); 622 - if (ret) 623 - goto out; 624 - 625 - zpci_map_resources(pdev); 626 - zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma, 627 - (u64) zdev->dma_table); 628 - 629 - out: 630 - return ret; 631 - } 632 - 633 - static int zpci_freeze(struct device *dev) 634 - { 635 - struct pci_dev *pdev = to_pci_dev(dev); 636 - struct zpci_dev *zdev = to_zpci(pdev); 637 - 638 - if (zdev->state != ZPCI_FN_STATE_ONLINE) 639 - return 0; 640 - 641 - zpci_unregister_ioat(zdev, 0); 642 - zpci_unmap_resources(pdev); 643 - return clp_disable_fh(zdev); 644 - } 645 - 646 - struct dev_pm_ops pcibios_pm_ops = { 647 - .thaw_noirq = zpci_restore, 648 - .freeze_noirq = zpci_freeze, 649 - .restore_noirq = zpci_restore, 650 - .poweroff_noirq = zpci_freeze, 651 - }; 652 - #endif /* CONFIG_HIBERNATE_CALLBACKS */ 653 - 654 610 static int zpci_alloc_domain(struct zpci_dev *zdev) 655 611 { 612 + spin_lock(&zpci_domain_lock); 613 + if (zpci_num_domains_allocated > (ZPCI_NR_DEVICES - 1)) { 614 + spin_unlock(&zpci_domain_lock); 615 + pr_err("Adding PCI function %08x failed because the configured limit of %d is reached\n", 616 + zdev->fid, ZPCI_NR_DEVICES); 617 + return -ENOSPC; 618 + } 619 + 656 620 if (zpci_unique_uid) { 657 621 zdev->domain = (u16) zdev->uid; 658 - if (zdev->domain >= ZPCI_NR_DEVICES) 659 - return 0; 622 + if (zdev->domain == 0) { 623 + pr_warn("UID checking is active but no UID is set for PCI function %08x, so automatic domain allocation is used instead\n", 624 + zdev->fid); 625 + update_uid_checking(false); 626 + goto auto_allocate; 627 + } 660 628 661 - spin_lock(&zpci_domain_lock); 662 629 if (test_bit(zdev->domain, zpci_domain)) { 663 630 spin_unlock(&zpci_domain_lock); 664 631 pr_err("Adding PCI function %08x failed because domain %04x is already assigned\n", ··· 634 665 return -EEXIST; 635 666 } 636 667 set_bit(zdev->domain, zpci_domain); 668 + zpci_num_domains_allocated++; 637 669 spin_unlock(&zpci_domain_lock); 638 670 return 0; 639 671 } 640 - 641 - spin_lock(&zpci_domain_lock); 672 + auto_allocate: 673 + /* 674 + * We can always auto allocate domains below ZPCI_NR_DEVICES. 675 + * There is either a free domain or we have reached the maximum in 676 + * which case we would have bailed earlier. 677 + */ 642 678 zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES); 643 - if (zdev->domain == ZPCI_NR_DEVICES) { 644 - spin_unlock(&zpci_domain_lock); 645 - pr_err("Adding PCI function %08x failed because the configured limit of %d is reached\n", 646 - zdev->fid, ZPCI_NR_DEVICES); 647 - return -ENOSPC; 648 - } 649 679 set_bit(zdev->domain, zpci_domain); 680 + zpci_num_domains_allocated++; 650 681 spin_unlock(&zpci_domain_lock); 651 682 return 0; 652 683 } 653 684 654 685 static void zpci_free_domain(struct zpci_dev *zdev) 655 686 { 656 - if (zdev->domain >= ZPCI_NR_DEVICES) 657 - return; 658 - 659 687 spin_lock(&zpci_domain_lock); 660 688 clear_bit(zdev->domain, zpci_domain); 689 + zpci_num_domains_allocated--; 661 690 spin_unlock(&zpci_domain_lock); 662 691 } 663 692
+1 -1
arch/s390/pci/pci_clp.c
··· 24 24 25 25 bool zpci_unique_uid; 26 26 27 - static void update_uid_checking(bool new) 27 + void update_uid_checking(bool new) 28 28 { 29 29 if (zpci_unique_uid != new) 30 30 zpci_dbg(1, "uid checking:%d\n", new);
+29 -70
drivers/pci/hotplug/s390_pci_hpc.c
··· 19 19 #include <asm/sclp.h> 20 20 21 21 #define SLOT_NAME_SIZE 10 22 - static LIST_HEAD(s390_hotplug_slot_list); 23 22 24 23 static int zpci_fn_configured(enum zpci_state state) 25 24 { ··· 26 27 state == ZPCI_FN_STATE_ONLINE; 27 28 } 28 29 29 - /* 30 - * struct slot - slot information for each *physical* slot 31 - */ 32 - struct slot { 33 - struct list_head slot_list; 34 - struct hotplug_slot hotplug_slot; 35 - struct zpci_dev *zdev; 36 - }; 37 - 38 - static inline struct slot *to_slot(struct hotplug_slot *hotplug_slot) 30 + static inline int zdev_configure(struct zpci_dev *zdev) 39 31 { 40 - return container_of(hotplug_slot, struct slot, hotplug_slot); 41 - } 32 + int ret = sclp_pci_configure(zdev->fid); 42 33 43 - static inline int slot_configure(struct slot *slot) 44 - { 45 - int ret = sclp_pci_configure(slot->zdev->fid); 46 - 47 - zpci_dbg(3, "conf fid:%x, rc:%d\n", slot->zdev->fid, ret); 34 + zpci_dbg(3, "conf fid:%x, rc:%d\n", zdev->fid, ret); 48 35 if (!ret) 49 - slot->zdev->state = ZPCI_FN_STATE_CONFIGURED; 36 + zdev->state = ZPCI_FN_STATE_CONFIGURED; 50 37 51 38 return ret; 52 39 } 53 40 54 - static inline int slot_deconfigure(struct slot *slot) 41 + static inline int zdev_deconfigure(struct zpci_dev *zdev) 55 42 { 56 - int ret = sclp_pci_deconfigure(slot->zdev->fid); 43 + int ret = sclp_pci_deconfigure(zdev->fid); 57 44 58 - zpci_dbg(3, "deconf fid:%x, rc:%d\n", slot->zdev->fid, ret); 45 + zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, ret); 59 46 if (!ret) 60 - slot->zdev->state = ZPCI_FN_STATE_STANDBY; 47 + zdev->state = ZPCI_FN_STATE_STANDBY; 61 48 62 49 return ret; 63 50 } 64 51 65 52 static int enable_slot(struct hotplug_slot *hotplug_slot) 66 53 { 67 - struct slot *slot = to_slot(hotplug_slot); 54 + struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev, 55 + hotplug_slot); 68 56 int rc; 69 57 70 - if (slot->zdev->state != ZPCI_FN_STATE_STANDBY) 58 + if (zdev->state != ZPCI_FN_STATE_STANDBY) 71 59 return -EIO; 72 60 73 - rc = slot_configure(slot); 61 + rc = zdev_configure(zdev); 74 62 if (rc) 75 63 return rc; 76 64 77 - rc = zpci_enable_device(slot->zdev); 65 + rc = zpci_enable_device(zdev); 78 66 if (rc) 79 67 goto out_deconfigure; 80 68 81 - pci_scan_slot(slot->zdev->bus, ZPCI_DEVFN); 69 + pci_scan_slot(zdev->bus, ZPCI_DEVFN); 82 70 pci_lock_rescan_remove(); 83 - pci_bus_add_devices(slot->zdev->bus); 71 + pci_bus_add_devices(zdev->bus); 84 72 pci_unlock_rescan_remove(); 85 73 86 74 return rc; 87 75 88 76 out_deconfigure: 89 - slot_deconfigure(slot); 77 + zdev_deconfigure(zdev); 90 78 return rc; 91 79 } 92 80 93 81 static int disable_slot(struct hotplug_slot *hotplug_slot) 94 82 { 95 - struct slot *slot = to_slot(hotplug_slot); 83 + struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev, 84 + hotplug_slot); 96 85 struct pci_dev *pdev; 97 86 int rc; 98 87 99 - if (!zpci_fn_configured(slot->zdev->state)) 88 + if (!zpci_fn_configured(zdev->state)) 100 89 return -EIO; 101 90 102 - pdev = pci_get_slot(slot->zdev->bus, ZPCI_DEVFN); 91 + pdev = pci_get_slot(zdev->bus, ZPCI_DEVFN); 103 92 if (pdev) { 104 93 pci_stop_and_remove_bus_device_locked(pdev); 105 94 pci_dev_put(pdev); 106 95 } 107 96 108 - rc = zpci_disable_device(slot->zdev); 97 + rc = zpci_disable_device(zdev); 109 98 if (rc) 110 99 return rc; 111 100 112 - return slot_deconfigure(slot); 101 + return zdev_deconfigure(zdev); 113 102 } 114 103 115 104 static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value) 116 105 { 117 - struct slot *slot = to_slot(hotplug_slot); 106 + struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev, 107 + hotplug_slot); 118 108 119 - switch (slot->zdev->state) { 109 + switch (zdev->state) { 120 110 case ZPCI_FN_STATE_STANDBY: 121 111 *value = 0; 122 112 break; ··· 133 145 int zpci_init_slot(struct zpci_dev *zdev) 134 146 { 135 147 char name[SLOT_NAME_SIZE]; 136 - struct slot *slot; 137 - int rc; 138 148 139 - if (!zdev) 140 - return 0; 141 - 142 - slot = kzalloc(sizeof(*slot), GFP_KERNEL); 143 - if (!slot) 144 - goto error; 145 - 146 - slot->zdev = zdev; 147 - slot->hotplug_slot.ops = &s390_hotplug_slot_ops; 149 + zdev->hotplug_slot.ops = &s390_hotplug_slot_ops; 148 150 149 151 snprintf(name, SLOT_NAME_SIZE, "%08x", zdev->fid); 150 - rc = pci_hp_register(&slot->hotplug_slot, zdev->bus, 151 - ZPCI_DEVFN, name); 152 - if (rc) 153 - goto error_reg; 154 - 155 - list_add(&slot->slot_list, &s390_hotplug_slot_list); 156 - return 0; 157 - 158 - error_reg: 159 - kfree(slot); 160 - error: 161 - return -ENOMEM; 152 + return pci_hp_register(&zdev->hotplug_slot, zdev->bus, 153 + ZPCI_DEVFN, name); 162 154 } 163 155 164 156 void zpci_exit_slot(struct zpci_dev *zdev) 165 157 { 166 - struct slot *slot, *next; 167 - 168 - list_for_each_entry_safe(slot, next, &s390_hotplug_slot_list, 169 - slot_list) { 170 - if (slot->zdev != zdev) 171 - continue; 172 - list_del(&slot->slot_list); 173 - pci_hp_deregister(&slot->hotplug_slot); 174 - kfree(slot); 175 - } 158 + pci_hp_deregister(&zdev->hotplug_slot); 176 159 }
+1 -1
drivers/s390/block/dasd_diag.c
··· 58 58 59 59 struct dasd_diag_req { 60 60 unsigned int block_count; 61 - struct dasd_diag_bio bio[0]; 61 + struct dasd_diag_bio bio[]; 62 62 }; 63 63 64 64 static const u8 DASD_DIAG_CMS1[] = { 0xc3, 0xd4, 0xe2, 0xf1 };/* EBCDIC CMS1 */
+1 -1
drivers/s390/block/dasd_eckd.h
··· 220 220 __u8 imbedded_count; 221 221 __u8 extended_operation; 222 222 __u16 extended_parameter_length; 223 - __u8 extended_parameter[0]; 223 + __u8 extended_parameter[]; 224 224 } __attribute__ ((packed)); 225 225 226 226 /* Prefix data for format 0x00 and 0x01 */
+1 -1
drivers/s390/char/con3215.c
··· 398 398 } 399 399 if (dstat == 0x08) 400 400 break; 401 - /* else, fall through */ 401 + fallthrough; 402 402 case 0x04: 403 403 /* Device end interrupt. */ 404 404 if ((raw = req->info) == NULL)
+1 -1
drivers/s390/char/hmcdrv_ftp.c
··· 137 137 while ((*cmd != '\0') && !iscntrl(*cmd)) 138 138 ++cmd; 139 139 ftp->fname = start; 140 - /* fall through */ 140 + fallthrough; 141 141 default: 142 142 *cmd = '\0'; 143 143 break;
+1 -1
drivers/s390/char/raw3270.h
··· 211 211 struct list_head update; 212 212 unsigned long size; 213 213 unsigned long len; 214 - char string[0]; 214 + char string[]; 215 215 } __attribute__ ((aligned(8))); 216 216 217 217 static inline struct string *
+1 -1
drivers/s390/char/sclp_cmd.c
··· 406 406 if (!size) 407 407 goto skip_add; 408 408 for (addr = start; addr < start + size; addr += block_size) 409 - add_memory(numa_pfn_to_nid(PFN_DOWN(addr)), addr, block_size); 409 + add_memory(0, addr, block_size); 410 410 skip_add: 411 411 first_rn = rn; 412 412 num = 1;
+1 -1
drivers/s390/char/sclp_pci.c
··· 39 39 u8 atype; 40 40 u32 fh; 41 41 u32 fid; 42 - u8 data[0]; 42 + u8 data[]; 43 43 } __packed; 44 44 45 45 struct err_notify_sccb {
+1 -1
drivers/s390/char/sclp_sdias.c
··· 214 214 break; 215 215 case SDIAS_EVSTATE_NO_DATA: 216 216 TRACE("no data\n"); 217 - /* fall through */ 217 + fallthrough; 218 218 default: 219 219 pr_err("Error from SCLP while copying hsa. Event status = %x\n", 220 220 sdias_evbuf.event_status);
+3 -3
drivers/s390/char/tape_core.c
··· 677 677 switch (device->tape_state) { 678 678 case TS_INIT: 679 679 tape_state_set(device, TS_NOT_OPER); 680 - /* fallthrough */ 680 + fallthrough; 681 681 case TS_NOT_OPER: 682 682 /* 683 683 * Nothing to do. ··· 950 950 break; 951 951 if (device->tape_state == TS_UNUSED) 952 952 break; 953 - /* fallthrough */ 953 + fallthrough; 954 954 default: 955 955 if (device->tape_state == TS_BLKUSE) 956 956 break; ··· 1118 1118 case -ETIMEDOUT: 1119 1119 DBF_LH(1, "(%08x): Request timed out\n", 1120 1120 device->cdev_id); 1121 - /* fallthrough */ 1121 + fallthrough; 1122 1122 case -EIO: 1123 1123 __tape_end_request(device, request, -EIO); 1124 1124 break;
+2 -6
drivers/s390/cio/airq.c
··· 105 105 return IRQ_HANDLED; 106 106 } 107 107 108 - static struct irqaction airq_interrupt = { 109 - .name = "AIO", 110 - .handler = do_airq_interrupt, 111 - }; 112 - 113 108 void __init init_airq_interrupts(void) 114 109 { 115 110 irq_set_chip_and_handler(THIN_INTERRUPT, 116 111 &dummy_irq_chip, handle_percpu_irq); 117 - setup_irq(THIN_INTERRUPT, &airq_interrupt); 112 + if (request_irq(THIN_INTERRUPT, do_airq_interrupt, 0, "AIO", NULL)) 113 + panic("Failed to register AIO interrupt\n"); 118 114 } 119 115 120 116 static inline unsigned long iv_size(unsigned long bits)
-69
drivers/s390/cio/ccwgroup.c
··· 485 485 gdrv->shutdown(gdev); 486 486 } 487 487 488 - static int ccwgroup_pm_prepare(struct device *dev) 489 - { 490 - struct ccwgroup_device *gdev = to_ccwgroupdev(dev); 491 - struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver); 492 - 493 - /* Fail while device is being set online/offline. */ 494 - if (atomic_read(&gdev->onoff)) 495 - return -EAGAIN; 496 - 497 - if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE) 498 - return 0; 499 - 500 - return gdrv->prepare ? gdrv->prepare(gdev) : 0; 501 - } 502 - 503 - static void ccwgroup_pm_complete(struct device *dev) 504 - { 505 - struct ccwgroup_device *gdev = to_ccwgroupdev(dev); 506 - struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver); 507 - 508 - if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE) 509 - return; 510 - 511 - if (gdrv->complete) 512 - gdrv->complete(gdev); 513 - } 514 - 515 - static int ccwgroup_pm_freeze(struct device *dev) 516 - { 517 - struct ccwgroup_device *gdev = to_ccwgroupdev(dev); 518 - struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver); 519 - 520 - if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE) 521 - return 0; 522 - 523 - return gdrv->freeze ? gdrv->freeze(gdev) : 0; 524 - } 525 - 526 - static int ccwgroup_pm_thaw(struct device *dev) 527 - { 528 - struct ccwgroup_device *gdev = to_ccwgroupdev(dev); 529 - struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver); 530 - 531 - if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE) 532 - return 0; 533 - 534 - return gdrv->thaw ? gdrv->thaw(gdev) : 0; 535 - } 536 - 537 - static int ccwgroup_pm_restore(struct device *dev) 538 - { 539 - struct ccwgroup_device *gdev = to_ccwgroupdev(dev); 540 - struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver); 541 - 542 - if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE) 543 - return 0; 544 - 545 - return gdrv->restore ? gdrv->restore(gdev) : 0; 546 - } 547 - 548 - static const struct dev_pm_ops ccwgroup_pm_ops = { 549 - .prepare = ccwgroup_pm_prepare, 550 - .complete = ccwgroup_pm_complete, 551 - .freeze = ccwgroup_pm_freeze, 552 - .thaw = ccwgroup_pm_thaw, 553 - .restore = ccwgroup_pm_restore, 554 - }; 555 - 556 488 static struct bus_type ccwgroup_bus_type = { 557 489 .name = "ccwgroup", 558 490 .remove = ccwgroup_remove, 559 491 .shutdown = ccwgroup_shutdown, 560 - .pm = &ccwgroup_pm_ops, 561 492 }; 562 493 563 494 bool dev_is_ccwgroup(struct device *dev)
+3 -2
drivers/s390/cio/chsc.c
··· 180 180 * @scssc: request and response block for SADC 181 181 * @summary_indicator_addr: summary indicator address 182 182 * @subchannel_indicator_addr: subchannel indicator address 183 + * @isc: Interruption Subclass for this subchannel 183 184 * 184 185 * Returns 0 on success. 185 186 */ 186 187 int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc, 187 - u64 summary_indicator_addr, u64 subchannel_indicator_addr) 188 + u64 summary_indicator_addr, u64 subchannel_indicator_addr, u8 isc) 188 189 { 189 190 memset(scssc, 0, sizeof(*scssc)); 190 191 scssc->request.length = 0x0fe0; ··· 197 196 198 197 scssc->ks = PAGE_DEFAULT_KEY >> 4; 199 198 scssc->kc = PAGE_DEFAULT_KEY >> 4; 200 - scssc->isc = QDIO_AIRQ_ISC; 199 + scssc->isc = isc; 201 200 scssc->schid = schid; 202 201 203 202 /* enable the time delay disablement facility */
+2 -1
drivers/s390/cio/chsc.h
··· 163 163 int chsc_get_channel_measurement_chars(struct channel_path *chp); 164 164 int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd); 165 165 int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc, 166 - u64 summary_indicator_addr, u64 subchannel_indicator_addr); 166 + u64 summary_indicator_addr, u64 subchannel_indicator_addr, 167 + u8 isc); 167 168 int chsc_sgib(u32 origin); 168 169 int chsc_error_from_response(int response); 169 170
+2 -6
drivers/s390/cio/cio.c
··· 563 563 return IRQ_HANDLED; 564 564 } 565 565 566 - static struct irqaction io_interrupt = { 567 - .name = "I/O", 568 - .handler = do_cio_interrupt, 569 - }; 570 - 571 566 void __init init_cio_interrupts(void) 572 567 { 573 568 irq_set_chip_and_handler(IO_INTERRUPT, 574 569 &dummy_irq_chip, handle_percpu_irq); 575 - setup_irq(IO_INTERRUPT, &io_interrupt); 570 + if (request_irq(IO_INTERRUPT, do_cio_interrupt, 0, "I/O", NULL)) 571 + panic("Failed to register I/O interrupt\n"); 576 572 } 577 573 578 574 #ifdef CONFIG_CCW_CONSOLE
+2 -2
drivers/s390/cio/device.c
··· 1262 1262 sch = to_subchannel(cdev->dev.parent); 1263 1263 if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm) 1264 1264 break; 1265 - /* fall through */ 1265 + fallthrough; 1266 1266 case DEV_STATE_DISCONNECTED: 1267 1267 CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n", 1268 1268 cdev->private->dev_id.ssid, ··· 2091 2091 case CDEV_TODO_UNREG_EVAL: 2092 2092 if (!sch_is_pseudo_sch(sch)) 2093 2093 css_schedule_eval(sch->schid); 2094 - /* fall-through */ 2094 + fallthrough; 2095 2095 case CDEV_TODO_UNREG: 2096 2096 if (sch_is_pseudo_sch(sch)) 2097 2097 ccw_device_unregister(cdev);
+1 -1
drivers/s390/cio/idset.c
··· 13 13 struct idset { 14 14 int num_ssid; 15 15 int num_id; 16 - unsigned long bitmap[0]; 16 + unsigned long bitmap[]; 17 17 }; 18 18 19 19 static inline unsigned long bitmap_size(int num_ssid, int num_id)
+2 -7
drivers/s390/cio/qdio.h
··· 250 250 /* upper-layer program handler */ 251 251 qdio_handler_t (*handler); 252 252 253 - struct dentry *debugfs_q; 254 253 struct qdio_irq *irq_ptr; 255 254 struct sl *sl; 256 255 /* ··· 265 266 struct ccw_device *cdev; 266 267 struct list_head entry; /* list of thinint devices */ 267 268 struct dentry *debugfs_dev; 268 - struct dentry *debugfs_perf; 269 269 270 270 unsigned long int_parm; 271 271 struct subchannel_id schid; ··· 389 391 int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr, 390 392 struct subchannel_id *schid, 391 393 struct qdio_ssqd_desc *data); 392 - int qdio_setup_irq(struct qdio_initialize *init_data); 393 - void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, 394 - struct ccw_device *cdev); 394 + int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data); 395 + void qdio_print_subchannel_info(struct qdio_irq *irq_ptr); 395 396 void qdio_release_memory(struct qdio_irq *irq_ptr); 396 - int qdio_setup_create_sysfs(struct ccw_device *cdev); 397 - void qdio_setup_destroy_sysfs(struct ccw_device *cdev); 398 397 int qdio_setup_init(void); 399 398 void qdio_setup_exit(void); 400 399 int qdio_enable_async_operation(struct qdio_output_q *q);
+30 -29
drivers/s390/cio/qdio_debug.c
··· 81 81 82 82 /* allocate trace view for the interface */ 83 83 snprintf(text, QDIO_DBF_NAME_LEN, "qdio_%s", 84 - dev_name(&init_data->cdev->dev)); 84 + dev_name(&irq_ptr->cdev->dev)); 85 85 irq_ptr->debug_area = qdio_get_dbf_entry(text); 86 86 if (irq_ptr->debug_area) 87 87 DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf reused"); ··· 190 190 191 191 DEFINE_SHOW_ATTRIBUTE(qstat); 192 192 193 + static int ssqd_show(struct seq_file *m, void *v) 194 + { 195 + struct ccw_device *cdev = m->private; 196 + struct qdio_ssqd_desc ssqd; 197 + int rc; 198 + 199 + rc = qdio_get_ssqd_desc(cdev, &ssqd); 200 + if (rc) 201 + return rc; 202 + 203 + seq_hex_dump(m, "", DUMP_PREFIX_NONE, 16, 4, &ssqd, sizeof(ssqd), 204 + false); 205 + return 0; 206 + } 207 + 208 + DEFINE_SHOW_ATTRIBUTE(ssqd); 209 + 193 210 static char *qperf_names[] = { 194 211 "Assumed adapter interrupts", 195 212 "QDIO interrupts", ··· 301 284 .release = single_release, 302 285 }; 303 286 304 - static void setup_debugfs_entry(struct qdio_q *q) 287 + static void setup_debugfs_entry(struct dentry *parent, struct qdio_q *q) 305 288 { 306 289 char name[QDIO_DEBUGFS_NAME_LEN]; 307 290 308 291 snprintf(name, QDIO_DEBUGFS_NAME_LEN, "%s_%d", 309 292 q->is_input_q ? "input" : "output", 310 293 q->nr); 311 - q->debugfs_q = debugfs_create_file(name, 0444, 312 - q->irq_ptr->debugfs_dev, q, &qstat_fops); 313 - if (IS_ERR(q->debugfs_q)) 314 - q->debugfs_q = NULL; 294 + debugfs_create_file(name, 0444, parent, q, &qstat_fops); 315 295 } 316 296 317 - void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev) 297 + void qdio_setup_debug_entries(struct qdio_irq *irq_ptr) 318 298 { 319 299 struct qdio_q *q; 320 300 int i; 321 301 322 - irq_ptr->debugfs_dev = debugfs_create_dir(dev_name(&cdev->dev), 302 + irq_ptr->debugfs_dev = debugfs_create_dir(dev_name(&irq_ptr->cdev->dev), 323 303 debugfs_root); 324 - if (IS_ERR(irq_ptr->debugfs_dev)) 325 - irq_ptr->debugfs_dev = NULL; 326 - 327 - irq_ptr->debugfs_perf = debugfs_create_file("statistics", 328 - S_IFREG | S_IRUGO | S_IWUSR, 329 - irq_ptr->debugfs_dev, irq_ptr, 330 - &debugfs_perf_fops); 331 - if (IS_ERR(irq_ptr->debugfs_perf)) 332 - irq_ptr->debugfs_perf = NULL; 304 + debugfs_create_file("statistics", S_IFREG | S_IRUGO | S_IWUSR, 305 + irq_ptr->debugfs_dev, irq_ptr, &debugfs_perf_fops); 306 + debugfs_create_file("ssqd", 0444, irq_ptr->debugfs_dev, irq_ptr->cdev, 307 + &ssqd_fops); 333 308 334 309 for_each_input_queue(irq_ptr, q, i) 335 - setup_debugfs_entry(q); 310 + setup_debugfs_entry(irq_ptr->debugfs_dev, q); 336 311 for_each_output_queue(irq_ptr, q, i) 337 - setup_debugfs_entry(q); 312 + setup_debugfs_entry(irq_ptr->debugfs_dev, q); 338 313 } 339 314 340 315 void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr) 341 316 { 342 - struct qdio_q *q; 343 - int i; 344 - 345 - for_each_input_queue(irq_ptr, q, i) 346 - debugfs_remove(q->debugfs_q); 347 - for_each_output_queue(irq_ptr, q, i) 348 - debugfs_remove(q->debugfs_q); 349 - debugfs_remove(irq_ptr->debugfs_perf); 350 - debugfs_remove(irq_ptr->debugfs_dev); 317 + debugfs_remove_recursive(irq_ptr->debugfs_dev); 351 318 } 352 319 353 320 int __init qdio_debug_init(void) ··· 353 352 void qdio_debug_exit(void) 354 353 { 355 354 qdio_clear_dbf_list(); 356 - debugfs_remove(debugfs_root); 355 + debugfs_remove_recursive(debugfs_root); 357 356 debug_unregister(qdio_dbf_setup); 358 357 debug_unregister(qdio_dbf_error); 359 358 }
+1 -2
drivers/s390/cio/qdio_debug.h
··· 66 66 67 67 int qdio_allocate_dbf(struct qdio_initialize *init_data, 68 68 struct qdio_irq *irq_ptr); 69 - void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, 70 - struct ccw_device *cdev); 69 + void qdio_setup_debug_entries(struct qdio_irq *irq_ptr); 71 70 void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr); 72 71 int qdio_debug_init(void); 73 72 void qdio_debug_exit(void);
+11 -12
drivers/s390/cio/qdio_main.c
··· 1100 1100 } 1101 1101 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); 1102 1102 1103 - static void qdio_shutdown_queues(struct ccw_device *cdev) 1103 + static void qdio_shutdown_queues(struct qdio_irq *irq_ptr) 1104 1104 { 1105 - struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1106 1105 struct qdio_q *q; 1107 1106 int i; 1108 1107 ··· 1149 1150 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); 1150 1151 1151 1152 tiqdio_remove_device(irq_ptr); 1152 - qdio_shutdown_queues(cdev); 1153 + qdio_shutdown_queues(irq_ptr); 1153 1154 qdio_shutdown_debug_entries(irq_ptr); 1154 1155 1155 1156 /* cleanup subchannel */ ··· 1224 1225 */ 1225 1226 int qdio_allocate(struct qdio_initialize *init_data) 1226 1227 { 1228 + struct ccw_device *cdev = init_data->cdev; 1227 1229 struct subchannel_id schid; 1228 1230 struct qdio_irq *irq_ptr; 1229 1231 1230 - ccw_device_get_schid(init_data->cdev, &schid); 1232 + ccw_device_get_schid(cdev, &schid); 1231 1233 DBF_EVENT("qallocate:%4x", schid.sch_no); 1232 1234 1233 1235 if ((init_data->no_input_qs && !init_data->input_handler) || ··· 1248 1248 if (!irq_ptr) 1249 1249 goto out_err; 1250 1250 1251 + irq_ptr->cdev = cdev; 1251 1252 mutex_init(&irq_ptr->setup_mutex); 1252 1253 if (qdio_allocate_dbf(init_data, irq_ptr)) 1253 1254 goto out_rel; ··· 1273 1272 goto out_rel; 1274 1273 1275 1274 INIT_LIST_HEAD(&irq_ptr->entry); 1276 - init_data->cdev->private->qdio_data = irq_ptr; 1275 + cdev->private->qdio_data = irq_ptr; 1277 1276 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); 1278 1277 return 0; 1279 1278 out_rel: ··· 1312 1311 int qdio_establish(struct qdio_initialize *init_data) 1313 1312 { 1314 1313 struct ccw_device *cdev = init_data->cdev; 1314 + struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1315 1315 struct subchannel_id schid; 1316 - struct qdio_irq *irq_ptr; 1317 1316 int rc; 1318 1317 1319 1318 ccw_device_get_schid(cdev, &schid); 1320 1319 DBF_EVENT("qestablish:%4x", schid.sch_no); 1321 1320 1322 - irq_ptr = cdev->private->qdio_data; 1323 1321 if (!irq_ptr) 1324 1322 return -ENODEV; 1325 1323 1326 1324 mutex_lock(&irq_ptr->setup_mutex); 1327 - qdio_setup_irq(init_data); 1325 + qdio_setup_irq(irq_ptr, init_data); 1328 1326 1329 1327 rc = qdio_establish_thinint(irq_ptr); 1330 1328 if (rc) { ··· 1369 1369 qdio_init_buf_states(irq_ptr); 1370 1370 1371 1371 mutex_unlock(&irq_ptr->setup_mutex); 1372 - qdio_print_subchannel_info(irq_ptr, cdev); 1373 - qdio_setup_debug_entries(irq_ptr, cdev); 1372 + qdio_print_subchannel_info(irq_ptr); 1373 + qdio_setup_debug_entries(irq_ptr); 1374 1374 return 0; 1375 1375 } 1376 1376 EXPORT_SYMBOL_GPL(qdio_establish); ··· 1381 1381 */ 1382 1382 int qdio_activate(struct ccw_device *cdev) 1383 1383 { 1384 + struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1384 1385 struct subchannel_id schid; 1385 - struct qdio_irq *irq_ptr; 1386 1386 int rc; 1387 1387 1388 1388 ccw_device_get_schid(cdev, &schid); 1389 1389 DBF_EVENT("qactivate:%4x", schid.sch_no); 1390 1390 1391 - irq_ptr = cdev->private->qdio_data; 1392 1391 if (!irq_ptr) 1393 1392 return -ENODEV; 1394 1393
+14 -15
drivers/s390/cio/qdio_setup.c
··· 449 449 memcpy(irq_ptr->qib.ebcnam, init_data->adapter_name, 8); 450 450 } 451 451 452 - int qdio_setup_irq(struct qdio_initialize *init_data) 452 + int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data) 453 453 { 454 + struct ccw_device *cdev = irq_ptr->cdev; 454 455 struct ciw *ciw; 455 - struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data; 456 456 457 457 memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib)); 458 458 memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag)); ··· 460 460 memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc)); 461 461 memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat)); 462 462 463 - irq_ptr->debugfs_dev = irq_ptr->debugfs_perf = NULL; 464 - irq_ptr->sch_token = irq_ptr->state = irq_ptr->perf_stat_enabled = 0; 463 + irq_ptr->debugfs_dev = NULL; 464 + irq_ptr->sch_token = irq_ptr->perf_stat_enabled = 0; 465 + irq_ptr->state = QDIO_IRQ_STATE_INACTIVE; 465 466 466 467 /* wipes qib.ac, required by ar7063 */ 467 468 memset(irq_ptr->qdr, 0, sizeof(struct qdr)); ··· 470 469 irq_ptr->int_parm = init_data->int_parm; 471 470 irq_ptr->nr_input_qs = init_data->no_input_qs; 472 471 irq_ptr->nr_output_qs = init_data->no_output_qs; 473 - irq_ptr->cdev = init_data->cdev; 474 472 irq_ptr->scan_threshold = init_data->scan_threshold; 475 - ccw_device_get_schid(irq_ptr->cdev, &irq_ptr->schid); 473 + ccw_device_get_schid(cdev, &irq_ptr->schid); 476 474 setup_queues(irq_ptr, init_data); 477 475 478 476 if (init_data->irq_poll) { ··· 494 494 /* qdr, qib, sls, slsbs, slibs, sbales are filled now */ 495 495 496 496 /* get qdio commands */ 497 - ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE); 497 + ciw = ccw_device_get_ciw(cdev, CIW_TYPE_EQUEUE); 498 498 if (!ciw) { 499 499 DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no); 500 500 return -EINVAL; 501 501 } 502 502 irq_ptr->equeue = *ciw; 503 503 504 - ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE); 504 + ciw = ccw_device_get_ciw(cdev, CIW_TYPE_AQUEUE); 505 505 if (!ciw) { 506 506 DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no); 507 507 return -EINVAL; ··· 509 509 irq_ptr->aqueue = *ciw; 510 510 511 511 /* set new interrupt handler */ 512 - spin_lock_irq(get_ccwdev_lock(irq_ptr->cdev)); 513 - irq_ptr->orig_handler = init_data->cdev->handler; 514 - init_data->cdev->handler = qdio_int_handler; 515 - spin_unlock_irq(get_ccwdev_lock(irq_ptr->cdev)); 512 + spin_lock_irq(get_ccwdev_lock(cdev)); 513 + irq_ptr->orig_handler = cdev->handler; 514 + cdev->handler = qdio_int_handler; 515 + spin_unlock_irq(get_ccwdev_lock(cdev)); 516 516 return 0; 517 517 } 518 518 519 - void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, 520 - struct ccw_device *cdev) 519 + void qdio_print_subchannel_info(struct qdio_irq *irq_ptr) 521 520 { 522 521 char s[80]; 523 522 524 523 snprintf(s, 80, "qdio: %s %s on SC %x using " 525 524 "AI:%d QEBSM:%d PRI:%d TDD:%d SIGA:%s%s%s%s%s\n", 526 - dev_name(&cdev->dev), 525 + dev_name(&irq_ptr->cdev->dev), 527 526 (irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" : 528 527 ((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"), 529 528 irq_ptr->schid.sch_no,
+1 -1
drivers/s390/cio/qdio_thinint.c
··· 207 207 } 208 208 209 209 rc = chsc_sadc(irq_ptr->schid, scssc, summary_indicator_addr, 210 - subchannel_indicator_addr); 210 + subchannel_indicator_addr, tiqdio_airq.isc); 211 211 if (rc) { 212 212 DBF_ERROR("%4x SSI r:%4x", irq_ptr->schid.sch_no, 213 213 scssc->response.code);
+42 -170
drivers/s390/crypto/ap_bus.c
··· 18 18 #include <linux/init.h> 19 19 #include <linux/delay.h> 20 20 #include <linux/err.h> 21 + #include <linux/freezer.h> 21 22 #include <linux/interrupt.h> 22 23 #include <linux/workqueue.h> 23 24 #include <linux/slab.h> 24 25 #include <linux/notifier.h> 25 26 #include <linux/kthread.h> 26 27 #include <linux/mutex.h> 27 - #include <linux/suspend.h> 28 28 #include <asm/airq.h> 29 29 #include <linux/atomic.h> 30 30 #include <asm/isc.h> ··· 103 103 */ 104 104 static unsigned long long poll_timeout = 250000; 105 105 106 - /* Suspend flag */ 107 - static int ap_suspend_flag; 108 106 /* Maximum domain id */ 109 107 static int ap_max_domain_id; 110 - /* 111 - * Flag to check if domain was set through module parameter domain=. This is 112 - * important when supsend and resume is done in a z/VM environment where the 113 - * domain might change. 114 - */ 115 - static int user_set_domain; 108 + 116 109 static struct bus_type ap_bus_type; 117 110 118 111 /* Adapter interrupt definitions */ ··· 353 360 wake_up(&ap_poll_wait); 354 361 break; 355 362 } 356 - /* Fall through */ 363 + fallthrough; 357 364 case AP_WAIT_TIMEOUT: 358 365 spin_lock_bh(&ap_poll_timer_lock); 359 366 if (!hrtimer_is_queued(&ap_poll_timer)) { ··· 379 386 { 380 387 struct ap_queue *aq = from_timer(aq, t, timeout); 381 388 382 - if (ap_suspend_flag) 383 - return; 384 389 spin_lock_bh(&aq->lock); 385 390 ap_wait(ap_sm_event(aq, AP_EVENT_TIMEOUT)); 386 391 spin_unlock_bh(&aq->lock); ··· 392 401 */ 393 402 static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused) 394 403 { 395 - if (!ap_suspend_flag) 396 - tasklet_schedule(&ap_tasklet); 404 + tasklet_schedule(&ap_tasklet); 397 405 return HRTIMER_NORESTART; 398 406 } 399 407 ··· 403 413 static void ap_interrupt_handler(struct airq_struct *airq, bool floating) 404 414 { 405 415 inc_irq_stat(IRQIO_APB); 406 - if (!ap_suspend_flag) 407 - tasklet_schedule(&ap_tasklet); 416 + tasklet_schedule(&ap_tasklet); 408 417 } 409 418 410 419 /** ··· 475 486 while (!kthread_should_stop()) { 476 487 add_wait_queue(&ap_poll_wait, &wait); 477 488 set_current_state(TASK_INTERRUPTIBLE); 478 - if (ap_suspend_flag || !ap_pending_requests()) { 489 + if (!ap_pending_requests()) { 479 490 schedule(); 480 491 try_to_freeze(); 481 492 } ··· 576 587 return retval; 577 588 } 578 589 579 - static int ap_dev_suspend(struct device *dev) 580 - { 581 - struct ap_device *ap_dev = to_ap_dev(dev); 582 - 583 - if (ap_dev->drv && ap_dev->drv->suspend) 584 - ap_dev->drv->suspend(ap_dev); 585 - return 0; 586 - } 587 - 588 - static int ap_dev_resume(struct device *dev) 589 - { 590 - struct ap_device *ap_dev = to_ap_dev(dev); 591 - 592 - if (ap_dev->drv && ap_dev->drv->resume) 593 - ap_dev->drv->resume(ap_dev); 594 - return 0; 595 - } 596 - 597 - static void ap_bus_suspend(void) 598 - { 599 - AP_DBF(DBF_DEBUG, "%s running\n", __func__); 600 - 601 - ap_suspend_flag = 1; 602 - /* 603 - * Disable scanning for devices, thus we do not want to scan 604 - * for them after removing. 605 - */ 606 - flush_work(&ap_scan_work); 607 - tasklet_disable(&ap_tasklet); 608 - } 609 - 610 - static int __ap_card_devices_unregister(struct device *dev, void *dummy) 611 - { 612 - if (is_card_dev(dev)) 613 - device_unregister(dev); 614 - return 0; 615 - } 616 - 617 - static int __ap_queue_devices_unregister(struct device *dev, void *dummy) 618 - { 619 - if (is_queue_dev(dev)) 620 - device_unregister(dev); 621 - return 0; 622 - } 623 - 624 590 static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data) 625 591 { 626 592 if (is_queue_dev(dev) && ··· 584 640 return 0; 585 641 } 586 642 587 - static void ap_bus_resume(void) 588 - { 589 - int rc; 590 - 591 - AP_DBF(DBF_DEBUG, "%s running\n", __func__); 592 - 593 - /* remove all queue devices */ 594 - bus_for_each_dev(&ap_bus_type, NULL, NULL, 595 - __ap_queue_devices_unregister); 596 - /* remove all card devices */ 597 - bus_for_each_dev(&ap_bus_type, NULL, NULL, 598 - __ap_card_devices_unregister); 599 - 600 - /* Reset thin interrupt setting */ 601 - if (ap_interrupts_available() && !ap_using_interrupts()) { 602 - rc = register_adapter_interrupt(&ap_airq); 603 - ap_airq_flag = (rc == 0); 604 - } 605 - if (!ap_interrupts_available() && ap_using_interrupts()) { 606 - unregister_adapter_interrupt(&ap_airq); 607 - ap_airq_flag = 0; 608 - } 609 - /* Reset domain */ 610 - if (!user_set_domain) 611 - ap_domain_index = -1; 612 - /* Get things going again */ 613 - ap_suspend_flag = 0; 614 - if (ap_airq_flag) 615 - xchg(ap_airq.lsi_ptr, 0); 616 - tasklet_enable(&ap_tasklet); 617 - queue_work(system_long_wq, &ap_scan_work); 618 - } 619 - 620 - static int ap_power_event(struct notifier_block *this, unsigned long event, 621 - void *ptr) 622 - { 623 - switch (event) { 624 - case PM_HIBERNATION_PREPARE: 625 - case PM_SUSPEND_PREPARE: 626 - ap_bus_suspend(); 627 - break; 628 - case PM_POST_HIBERNATION: 629 - case PM_POST_SUSPEND: 630 - ap_bus_resume(); 631 - break; 632 - default: 633 - break; 634 - } 635 - return NOTIFY_DONE; 636 - } 637 - static struct notifier_block ap_power_notifier = { 638 - .notifier_call = ap_power_event, 639 - }; 640 - 641 - static SIMPLE_DEV_PM_OPS(ap_bus_pm_ops, ap_dev_suspend, ap_dev_resume); 642 - 643 643 static struct bus_type ap_bus_type = { 644 644 .name = "ap", 645 645 .match = &ap_bus_match, 646 646 .uevent = &ap_uevent, 647 - .pm = &ap_bus_pm_ops, 648 647 }; 649 648 650 649 static int __ap_revise_reserved(struct device *dev, void *dummy) ··· 760 873 761 874 void ap_bus_force_rescan(void) 762 875 { 763 - if (ap_suspend_flag) 764 - return; 765 876 /* processing a asynchronous bus rescan */ 766 877 del_timer(&ap_config_timer); 767 878 queue_work(system_long_wq, &ap_scan_work); ··· 906 1021 907 1022 static ssize_t ap_domain_show(struct bus_type *bus, char *buf) 908 1023 { 909 - return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index); 1024 + return scnprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index); 910 1025 } 911 1026 912 1027 static ssize_t ap_domain_store(struct bus_type *bus, ··· 932 1047 static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf) 933 1048 { 934 1049 if (!ap_configuration) /* QCI not supported */ 935 - return snprintf(buf, PAGE_SIZE, "not supported\n"); 1050 + return scnprintf(buf, PAGE_SIZE, "not supported\n"); 936 1051 937 - return snprintf(buf, PAGE_SIZE, 938 - "0x%08x%08x%08x%08x%08x%08x%08x%08x\n", 939 - ap_configuration->adm[0], ap_configuration->adm[1], 940 - ap_configuration->adm[2], ap_configuration->adm[3], 941 - ap_configuration->adm[4], ap_configuration->adm[5], 942 - ap_configuration->adm[6], ap_configuration->adm[7]); 1052 + return scnprintf(buf, PAGE_SIZE, 1053 + "0x%08x%08x%08x%08x%08x%08x%08x%08x\n", 1054 + ap_configuration->adm[0], ap_configuration->adm[1], 1055 + ap_configuration->adm[2], ap_configuration->adm[3], 1056 + ap_configuration->adm[4], ap_configuration->adm[5], 1057 + ap_configuration->adm[6], ap_configuration->adm[7]); 943 1058 } 944 1059 945 1060 static BUS_ATTR_RO(ap_control_domain_mask); ··· 947 1062 static ssize_t ap_usage_domain_mask_show(struct bus_type *bus, char *buf) 948 1063 { 949 1064 if (!ap_configuration) /* QCI not supported */ 950 - return snprintf(buf, PAGE_SIZE, "not supported\n"); 1065 + return scnprintf(buf, PAGE_SIZE, "not supported\n"); 951 1066 952 - return snprintf(buf, PAGE_SIZE, 953 - "0x%08x%08x%08x%08x%08x%08x%08x%08x\n", 954 - ap_configuration->aqm[0], ap_configuration->aqm[1], 955 - ap_configuration->aqm[2], ap_configuration->aqm[3], 956 - ap_configuration->aqm[4], ap_configuration->aqm[5], 957 - ap_configuration->aqm[6], ap_configuration->aqm[7]); 1067 + return scnprintf(buf, PAGE_SIZE, 1068 + "0x%08x%08x%08x%08x%08x%08x%08x%08x\n", 1069 + ap_configuration->aqm[0], ap_configuration->aqm[1], 1070 + ap_configuration->aqm[2], ap_configuration->aqm[3], 1071 + ap_configuration->aqm[4], ap_configuration->aqm[5], 1072 + ap_configuration->aqm[6], ap_configuration->aqm[7]); 958 1073 } 959 1074 960 1075 static BUS_ATTR_RO(ap_usage_domain_mask); ··· 962 1077 static ssize_t ap_adapter_mask_show(struct bus_type *bus, char *buf) 963 1078 { 964 1079 if (!ap_configuration) /* QCI not supported */ 965 - return snprintf(buf, PAGE_SIZE, "not supported\n"); 1080 + return scnprintf(buf, PAGE_SIZE, "not supported\n"); 966 1081 967 - return snprintf(buf, PAGE_SIZE, 968 - "0x%08x%08x%08x%08x%08x%08x%08x%08x\n", 969 - ap_configuration->apm[0], ap_configuration->apm[1], 970 - ap_configuration->apm[2], ap_configuration->apm[3], 971 - ap_configuration->apm[4], ap_configuration->apm[5], 972 - ap_configuration->apm[6], ap_configuration->apm[7]); 1082 + return scnprintf(buf, PAGE_SIZE, 1083 + "0x%08x%08x%08x%08x%08x%08x%08x%08x\n", 1084 + ap_configuration->apm[0], ap_configuration->apm[1], 1085 + ap_configuration->apm[2], ap_configuration->apm[3], 1086 + ap_configuration->apm[4], ap_configuration->apm[5], 1087 + ap_configuration->apm[6], ap_configuration->apm[7]); 973 1088 } 974 1089 975 1090 static BUS_ATTR_RO(ap_adapter_mask); 976 1091 977 1092 static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf) 978 1093 { 979 - return snprintf(buf, PAGE_SIZE, "%d\n", 980 - ap_using_interrupts() ? 1 : 0); 1094 + return scnprintf(buf, PAGE_SIZE, "%d\n", 1095 + ap_using_interrupts() ? 1 : 0); 981 1096 } 982 1097 983 1098 static BUS_ATTR_RO(ap_interrupts); 984 1099 985 1100 static ssize_t config_time_show(struct bus_type *bus, char *buf) 986 1101 { 987 - return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time); 1102 + return scnprintf(buf, PAGE_SIZE, "%d\n", ap_config_time); 988 1103 } 989 1104 990 1105 static ssize_t config_time_store(struct bus_type *bus, ··· 1003 1118 1004 1119 static ssize_t poll_thread_show(struct bus_type *bus, char *buf) 1005 1120 { 1006 - return snprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0); 1121 + return scnprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0); 1007 1122 } 1008 1123 1009 1124 static ssize_t poll_thread_store(struct bus_type *bus, ··· 1026 1141 1027 1142 static ssize_t poll_timeout_show(struct bus_type *bus, char *buf) 1028 1143 { 1029 - return snprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout); 1144 + return scnprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout); 1030 1145 } 1031 1146 1032 1147 static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf, ··· 1061 1176 max_domain_id = ap_max_domain_id ? : -1; 1062 1177 else 1063 1178 max_domain_id = 15; 1064 - return snprintf(buf, PAGE_SIZE, "%d\n", max_domain_id); 1179 + return scnprintf(buf, PAGE_SIZE, "%d\n", max_domain_id); 1065 1180 } 1066 1181 1067 1182 static BUS_ATTR_RO(ap_max_domain_id); ··· 1072 1187 1073 1188 if (mutex_lock_interruptible(&ap_perms_mutex)) 1074 1189 return -ERESTARTSYS; 1075 - rc = snprintf(buf, PAGE_SIZE, 1076 - "0x%016lx%016lx%016lx%016lx\n", 1077 - ap_perms.apm[0], ap_perms.apm[1], 1078 - ap_perms.apm[2], ap_perms.apm[3]); 1190 + rc = scnprintf(buf, PAGE_SIZE, 1191 + "0x%016lx%016lx%016lx%016lx\n", 1192 + ap_perms.apm[0], ap_perms.apm[1], 1193 + ap_perms.apm[2], ap_perms.apm[3]); 1079 1194 mutex_unlock(&ap_perms_mutex); 1080 1195 1081 1196 return rc; ··· 1103 1218 1104 1219 if (mutex_lock_interruptible(&ap_perms_mutex)) 1105 1220 return -ERESTARTSYS; 1106 - rc = snprintf(buf, PAGE_SIZE, 1107 - "0x%016lx%016lx%016lx%016lx\n", 1108 - ap_perms.aqm[0], ap_perms.aqm[1], 1109 - ap_perms.aqm[2], ap_perms.aqm[3]); 1221 + rc = scnprintf(buf, PAGE_SIZE, 1222 + "0x%016lx%016lx%016lx%016lx\n", 1223 + ap_perms.aqm[0], ap_perms.aqm[1], 1224 + ap_perms.aqm[2], ap_perms.aqm[3]); 1110 1225 mutex_unlock(&ap_perms_mutex); 1111 1226 1112 1227 return rc; ··· 1452 1567 1453 1568 static void ap_config_timeout(struct timer_list *unused) 1454 1569 { 1455 - if (ap_suspend_flag) 1456 - return; 1457 1570 queue_work(system_long_wq, &ap_scan_work); 1458 1571 } 1459 1572 ··· 1524 1641 ap_domain_index); 1525 1642 ap_domain_index = -1; 1526 1643 } 1527 - /* In resume callback we need to know if the user had set the domain. 1528 - * If so, we can not just reset it. 1529 - */ 1530 - if (ap_domain_index >= 0) 1531 - user_set_domain = 1; 1532 1644 1533 1645 if (ap_interrupts_available()) { 1534 1646 rc = register_adapter_interrupt(&ap_airq); ··· 1566 1688 goto out_work; 1567 1689 } 1568 1690 1569 - rc = register_pm_notifier(&ap_power_notifier); 1570 - if (rc) 1571 - goto out_pm; 1572 - 1573 1691 queue_work(system_long_wq, &ap_scan_work); 1574 1692 initialised = true; 1575 1693 1576 1694 return 0; 1577 1695 1578 - out_pm: 1579 - ap_poll_thread_stop(); 1580 1696 out_work: 1581 1697 hrtimer_cancel(&ap_poll_timer); 1582 1698 root_device_unregister(ap_root_device);
-5
drivers/s390/crypto/ap_bus.h
··· 91 91 AP_STATE_IDLE, 92 92 AP_STATE_WORKING, 93 93 AP_STATE_QUEUE_FULL, 94 - AP_STATE_SUSPEND_WAIT, 95 94 AP_STATE_REMOVE, /* about to be removed from driver */ 96 95 AP_STATE_UNBOUND, /* momentary not bound to a driver */ 97 96 AP_STATE_BORKED, /* broken */ ··· 135 136 136 137 int (*probe)(struct ap_device *); 137 138 void (*remove)(struct ap_device *); 138 - void (*suspend)(struct ap_device *); 139 - void (*resume)(struct ap_device *); 140 139 }; 141 140 142 141 #define to_ap_drv(x) container_of((x), struct ap_driver, driver) ··· 256 259 struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type); 257 260 void ap_queue_prepare_remove(struct ap_queue *aq); 258 261 void ap_queue_remove(struct ap_queue *aq); 259 - void ap_queue_suspend(struct ap_device *ap_dev); 260 - void ap_queue_resume(struct ap_device *ap_dev); 261 262 void ap_queue_init_state(struct ap_queue *aq); 262 263 263 264 struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type,
+9 -8
drivers/s390/crypto/ap_card.c
··· 23 23 { 24 24 struct ap_card *ac = to_ap_card(dev); 25 25 26 - return snprintf(buf, PAGE_SIZE, "%d\n", ac->ap_dev.device_type); 26 + return scnprintf(buf, PAGE_SIZE, "%d\n", ac->ap_dev.device_type); 27 27 } 28 28 29 29 static DEVICE_ATTR_RO(hwtype); ··· 33 33 { 34 34 struct ap_card *ac = to_ap_card(dev); 35 35 36 - return snprintf(buf, PAGE_SIZE, "%d\n", ac->raw_hwtype); 36 + return scnprintf(buf, PAGE_SIZE, "%d\n", ac->raw_hwtype); 37 37 } 38 38 39 39 static DEVICE_ATTR_RO(raw_hwtype); ··· 43 43 { 44 44 struct ap_card *ac = to_ap_card(dev); 45 45 46 - return snprintf(buf, PAGE_SIZE, "%d\n", ac->queue_depth); 46 + return scnprintf(buf, PAGE_SIZE, "%d\n", ac->queue_depth); 47 47 } 48 48 49 49 static DEVICE_ATTR_RO(depth); ··· 53 53 { 54 54 struct ap_card *ac = to_ap_card(dev); 55 55 56 - return snprintf(buf, PAGE_SIZE, "0x%08X\n", ac->functions); 56 + return scnprintf(buf, PAGE_SIZE, "0x%08X\n", ac->functions); 57 57 } 58 58 59 59 static DEVICE_ATTR_RO(ap_functions); ··· 69 69 spin_lock_bh(&ap_list_lock); 70 70 req_cnt = atomic64_read(&ac->total_request_count); 71 71 spin_unlock_bh(&ap_list_lock); 72 - return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt); 72 + return scnprintf(buf, PAGE_SIZE, "%llu\n", req_cnt); 73 73 } 74 74 75 75 static ssize_t request_count_store(struct device *dev, ··· 102 102 for_each_ap_queue(aq, ac) 103 103 reqq_cnt += aq->requestq_count; 104 104 spin_unlock_bh(&ap_list_lock); 105 - return snprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt); 105 + return scnprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt); 106 106 } 107 107 108 108 static DEVICE_ATTR_RO(requestq_count); ··· 119 119 for_each_ap_queue(aq, ac) 120 120 penq_cnt += aq->pendingq_count; 121 121 spin_unlock_bh(&ap_list_lock); 122 - return snprintf(buf, PAGE_SIZE, "%d\n", penq_cnt); 122 + return scnprintf(buf, PAGE_SIZE, "%d\n", penq_cnt); 123 123 } 124 124 125 125 static DEVICE_ATTR_RO(pendingq_count); ··· 127 127 static ssize_t modalias_show(struct device *dev, 128 128 struct device_attribute *attr, char *buf) 129 129 { 130 - return sprintf(buf, "ap:t%02X\n", to_ap_dev(dev)->device_type); 130 + return scnprintf(buf, PAGE_SIZE, "ap:t%02X\n", 131 + to_ap_dev(dev)->device_type); 131 132 } 132 133 133 134 static DEVICE_ATTR_RO(modalias);
+12 -63
drivers/s390/crypto/ap_queue.c
··· 152 152 ap_msg->receive(aq, ap_msg, aq->reply); 153 153 break; 154 154 } 155 - /* fall through */ 155 + fallthrough; 156 156 case AP_RESPONSE_NO_PENDING_REPLY: 157 157 if (!status.queue_empty || aq->queue_count <= 0) 158 158 break; ··· 201 201 } 202 202 203 203 /** 204 - * ap_sm_suspend_read(): Receive pending reply messages from an AP queue 205 - * without changing the device state in between. In suspend mode we don't 206 - * allow sending new requests, therefore just fetch pending replies. 207 - * @aq: pointer to the AP queue 208 - * 209 - * Returns AP_WAIT_NONE or AP_WAIT_AGAIN 210 - */ 211 - static enum ap_wait ap_sm_suspend_read(struct ap_queue *aq) 212 - { 213 - struct ap_queue_status status; 214 - 215 - if (!aq->reply) 216 - return AP_WAIT_NONE; 217 - status = ap_sm_recv(aq); 218 - switch (status.response_code) { 219 - case AP_RESPONSE_NORMAL: 220 - if (aq->queue_count > 0) 221 - return AP_WAIT_AGAIN; 222 - /* fall through */ 223 - default: 224 - return AP_WAIT_NONE; 225 - } 226 - } 227 - 228 - /** 229 204 * ap_sm_write(): Send messages from the request queue to an AP queue. 230 205 * @aq: pointer to the AP queue 231 206 * ··· 229 254 aq->state = AP_STATE_WORKING; 230 255 return AP_WAIT_AGAIN; 231 256 } 232 - /* fall through */ 257 + fallthrough; 233 258 case AP_RESPONSE_Q_FULL: 234 259 aq->state = AP_STATE_QUEUE_FULL; 235 260 return AP_WAIT_INTERRUPT; ··· 355 380 case AP_RESPONSE_NORMAL: 356 381 if (aq->queue_count > 0) 357 382 return AP_WAIT_AGAIN; 358 - /* fallthrough */ 383 + fallthrough; 359 384 case AP_RESPONSE_NO_PENDING_REPLY: 360 385 return AP_WAIT_TIMEOUT; 361 386 default: ··· 392 417 [AP_EVENT_POLL] = ap_sm_read, 393 418 [AP_EVENT_TIMEOUT] = ap_sm_reset, 394 419 }, 395 - [AP_STATE_SUSPEND_WAIT] = { 396 - [AP_EVENT_POLL] = ap_sm_suspend_read, 397 - [AP_EVENT_TIMEOUT] = ap_sm_nop, 398 - }, 399 420 [AP_STATE_REMOVE] = { 400 421 [AP_EVENT_POLL] = ap_sm_nop, 401 422 [AP_EVENT_TIMEOUT] = ap_sm_nop, ··· 421 450 } 422 451 423 452 /* 424 - * Power management for queue devices 425 - */ 426 - void ap_queue_suspend(struct ap_device *ap_dev) 427 - { 428 - struct ap_queue *aq = to_ap_queue(&ap_dev->device); 429 - 430 - /* Poll on the device until all requests are finished. */ 431 - spin_lock_bh(&aq->lock); 432 - aq->state = AP_STATE_SUSPEND_WAIT; 433 - while (ap_sm_event(aq, AP_EVENT_POLL) != AP_WAIT_NONE) 434 - ; 435 - aq->state = AP_STATE_BORKED; 436 - spin_unlock_bh(&aq->lock); 437 - } 438 - EXPORT_SYMBOL(ap_queue_suspend); 439 - 440 - void ap_queue_resume(struct ap_device *ap_dev) 441 - { 442 - } 443 - EXPORT_SYMBOL(ap_queue_resume); 444 - 445 - /* 446 453 * AP queue related attributes. 447 454 */ 448 455 static ssize_t request_count_show(struct device *dev, ··· 433 484 spin_lock_bh(&aq->lock); 434 485 req_cnt = aq->total_request_count; 435 486 spin_unlock_bh(&aq->lock); 436 - return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt); 487 + return scnprintf(buf, PAGE_SIZE, "%llu\n", req_cnt); 437 488 } 438 489 439 490 static ssize_t request_count_store(struct device *dev, ··· 460 511 spin_lock_bh(&aq->lock); 461 512 reqq_cnt = aq->requestq_count; 462 513 spin_unlock_bh(&aq->lock); 463 - return snprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt); 514 + return scnprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt); 464 515 } 465 516 466 517 static DEVICE_ATTR_RO(requestq_count); ··· 474 525 spin_lock_bh(&aq->lock); 475 526 penq_cnt = aq->pendingq_count; 476 527 spin_unlock_bh(&aq->lock); 477 - return snprintf(buf, PAGE_SIZE, "%d\n", penq_cnt); 528 + return scnprintf(buf, PAGE_SIZE, "%d\n", penq_cnt); 478 529 } 479 530 480 531 static DEVICE_ATTR_RO(pendingq_count); ··· 489 540 switch (aq->state) { 490 541 case AP_STATE_RESET_START: 491 542 case AP_STATE_RESET_WAIT: 492 - rc = snprintf(buf, PAGE_SIZE, "Reset in progress.\n"); 543 + rc = scnprintf(buf, PAGE_SIZE, "Reset in progress.\n"); 493 544 break; 494 545 case AP_STATE_WORKING: 495 546 case AP_STATE_QUEUE_FULL: 496 - rc = snprintf(buf, PAGE_SIZE, "Reset Timer armed.\n"); 547 + rc = scnprintf(buf, PAGE_SIZE, "Reset Timer armed.\n"); 497 548 break; 498 549 default: 499 - rc = snprintf(buf, PAGE_SIZE, "No Reset Timer set.\n"); 550 + rc = scnprintf(buf, PAGE_SIZE, "No Reset Timer set.\n"); 500 551 } 501 552 spin_unlock_bh(&aq->lock); 502 553 return rc; ··· 530 581 531 582 spin_lock_bh(&aq->lock); 532 583 if (aq->state == AP_STATE_SETIRQ_WAIT) 533 - rc = snprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n"); 584 + rc = scnprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n"); 534 585 else if (aq->interrupt == AP_INTR_ENABLED) 535 - rc = snprintf(buf, PAGE_SIZE, "Interrupts enabled.\n"); 586 + rc = scnprintf(buf, PAGE_SIZE, "Interrupts enabled.\n"); 536 587 else 537 - rc = snprintf(buf, PAGE_SIZE, "Interrupts disabled.\n"); 588 + rc = scnprintf(buf, PAGE_SIZE, "Interrupts disabled.\n"); 538 589 spin_unlock_bh(&aq->lock); 539 590 return rc; 540 591 }
+1 -1
drivers/s390/crypto/pkey_api.c
··· 80 80 u8 res1[3]; 81 81 u32 keytype; /* key type, one of the PKEY_KEYTYPE values */ 82 82 u32 len; /* bytes actually stored in clearkey[] */ 83 - u8 clearkey[0]; /* clear key value */ 83 + u8 clearkey[]; /* clear key value */ 84 84 } __packed; 85 85 86 86 /*
+1 -1
drivers/s390/crypto/vfio_ap_ops.c
··· 90 90 case AP_RESPONSE_RESET_IN_PROGRESS: 91 91 if (!status.irq_enabled) 92 92 return; 93 - /* Fall through */ 93 + fallthrough; 94 94 case AP_RESPONSE_BUSY: 95 95 msleep(20); 96 96 break;
+3 -3
drivers/s390/crypto/zcrypt_card.c
··· 41 41 { 42 42 struct zcrypt_card *zc = to_ap_card(dev)->private; 43 43 44 - return snprintf(buf, PAGE_SIZE, "%s\n", zc->type_string); 44 + return scnprintf(buf, PAGE_SIZE, "%s\n", zc->type_string); 45 45 } 46 46 47 47 static DEVICE_ATTR_RO(type); ··· 52 52 { 53 53 struct zcrypt_card *zc = to_ap_card(dev)->private; 54 54 55 - return snprintf(buf, PAGE_SIZE, "%d\n", zc->online); 55 + return scnprintf(buf, PAGE_SIZE, "%d\n", zc->online); 56 56 } 57 57 58 58 static ssize_t online_store(struct device *dev, ··· 86 86 { 87 87 struct zcrypt_card *zc = to_ap_card(dev)->private; 88 88 89 - return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zc->load)); 89 + return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zc->load)); 90 90 } 91 91 92 92 static DEVICE_ATTR_RO(load);
+17 -16
drivers/s390/crypto/zcrypt_ccamisc.c
··· 592 592 u8 pad2[1]; 593 593 u8 vptype; 594 594 u8 vp[32]; /* verification pattern */ 595 - } keyblock; 595 + } ckb; 596 596 } lv3; 597 597 } __packed * prepparm; 598 598 ··· 650 650 prepparm = (struct uskrepparm *) prepcblk->rpl_parmb; 651 651 652 652 /* check the returned keyblock */ 653 - if (prepparm->lv3.keyblock.version != 0x01) { 654 - DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x != 0x01\n", 655 - __func__, (int) prepparm->lv3.keyblock.version); 653 + if (prepparm->lv3.ckb.version != 0x01 && 654 + prepparm->lv3.ckb.version != 0x02) { 655 + DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x\n", 656 + __func__, (int) prepparm->lv3.ckb.version); 656 657 rc = -EIO; 657 658 goto out; 658 659 } 659 660 660 661 /* copy the tanslated protected key */ 661 - switch (prepparm->lv3.keyblock.len) { 662 + switch (prepparm->lv3.ckb.len) { 662 663 case 16+32: 663 664 /* AES 128 protected key */ 664 665 if (protkeytype) ··· 677 676 break; 678 677 default: 679 678 DEBUG_ERR("%s unknown/unsupported keylen %d\n", 680 - __func__, prepparm->lv3.keyblock.len); 679 + __func__, prepparm->lv3.ckb.len); 681 680 rc = -EIO; 682 681 goto out; 683 682 } 684 - memcpy(protkey, prepparm->lv3.keyblock.key, prepparm->lv3.keyblock.len); 683 + memcpy(protkey, prepparm->lv3.ckb.key, prepparm->lv3.ckb.len); 685 684 if (protkeylen) 686 - *protkeylen = prepparm->lv3.keyblock.len; 685 + *protkeylen = prepparm->lv3.ckb.len; 687 686 688 687 out: 689 688 free_cprbmem(mem, PARMBSIZE, 0); ··· 1261 1260 prepparm = (struct aurepparm *) prepcblk->rpl_parmb; 1262 1261 1263 1262 /* check the returned keyblock */ 1264 - if (prepparm->vud.ckb.version != 0x01) { 1265 - DEBUG_ERR( 1266 - "%s reply param keyblock version mismatch 0x%02x != 0x01\n", 1267 - __func__, (int) prepparm->vud.ckb.version); 1263 + if (prepparm->vud.ckb.version != 0x01 && 1264 + prepparm->vud.ckb.version != 0x02) { 1265 + DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x\n", 1266 + __func__, (int) prepparm->vud.ckb.version); 1268 1267 rc = -EIO; 1269 1268 goto out; 1270 1269 } ··· 1569 1568 return -EINVAL; 1570 1569 1571 1570 /* fetch status of all crypto cards */ 1572 - device_status = kmalloc_array(MAX_ZDEV_ENTRIES_EXT, 1573 - sizeof(struct zcrypt_device_status_ext), 1574 - GFP_KERNEL); 1571 + device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT, 1572 + sizeof(struct zcrypt_device_status_ext), 1573 + GFP_KERNEL); 1575 1574 if (!device_status) 1576 1575 return -ENOMEM; 1577 1576 zcrypt_device_status_mask_ext(device_status); ··· 1641 1640 } else 1642 1641 rc = -ENODEV; 1643 1642 1644 - kfree(device_status); 1643 + kvfree(device_status); 1645 1644 return rc; 1646 1645 } 1647 1646
+1 -1
drivers/s390/crypto/zcrypt_ccamisc.h
··· 90 90 u16 kmf1; /* key management field 1 */ 91 91 u16 kmf2; /* key management field 2 */ 92 92 u16 kmf3; /* key management field 3 */ 93 - u8 vdata[0]; /* variable part data follows */ 93 + u8 vdata[]; /* variable part data follows */ 94 94 } __packed; 95 95 96 96 /* Some defines for the CCA AES cipherkeytoken kmf1 field */
-2
drivers/s390/crypto/zcrypt_cex2a.c
··· 204 204 static struct ap_driver zcrypt_cex2a_queue_driver = { 205 205 .probe = zcrypt_cex2a_queue_probe, 206 206 .remove = zcrypt_cex2a_queue_remove, 207 - .suspend = ap_queue_suspend, 208 - .resume = ap_queue_resume, 209 207 .ids = zcrypt_cex2a_queue_ids, 210 208 .flags = AP_DRIVER_FLAG_DEFAULT, 211 209 };
-2
drivers/s390/crypto/zcrypt_cex2c.c
··· 260 260 static struct ap_driver zcrypt_cex2c_queue_driver = { 261 261 .probe = zcrypt_cex2c_queue_probe, 262 262 .remove = zcrypt_cex2c_queue_remove, 263 - .suspend = ap_queue_suspend, 264 - .resume = ap_queue_resume, 265 263 .ids = zcrypt_cex2c_queue_ids, 266 264 .flags = AP_DRIVER_FLAG_DEFAULT, 267 265 };
+38 -38
drivers/s390/crypto/zcrypt_cex4.c
··· 87 87 if (ap_domain_index >= 0) 88 88 cca_get_info(ac->id, ap_domain_index, &ci, zc->online); 89 89 90 - return snprintf(buf, PAGE_SIZE, "%s\n", ci.serial); 90 + return scnprintf(buf, PAGE_SIZE, "%s\n", ci.serial); 91 91 } 92 92 93 93 static struct device_attribute dev_attr_cca_serialnr = ··· 122 122 &ci, zq->online); 123 123 124 124 if (ci.new_mk_state >= '1' && ci.new_mk_state <= '3') 125 - n = snprintf(buf, PAGE_SIZE, "AES NEW: %s 0x%016llx\n", 126 - new_state[ci.new_mk_state - '1'], ci.new_mkvp); 125 + n = scnprintf(buf, PAGE_SIZE, "AES NEW: %s 0x%016llx\n", 126 + new_state[ci.new_mk_state - '1'], ci.new_mkvp); 127 127 else 128 - n = snprintf(buf, PAGE_SIZE, "AES NEW: - -\n"); 128 + n = scnprintf(buf, PAGE_SIZE, "AES NEW: - -\n"); 129 129 130 130 if (ci.cur_mk_state >= '1' && ci.cur_mk_state <= '2') 131 - n += snprintf(buf + n, PAGE_SIZE - n, "AES CUR: %s 0x%016llx\n", 132 - cao_state[ci.cur_mk_state - '1'], ci.cur_mkvp); 131 + n += scnprintf(buf + n, PAGE_SIZE - n, 132 + "AES CUR: %s 0x%016llx\n", 133 + cao_state[ci.cur_mk_state - '1'], ci.cur_mkvp); 133 134 else 134 - n += snprintf(buf + n, PAGE_SIZE - n, "AES CUR: - -\n"); 135 + n += scnprintf(buf + n, PAGE_SIZE - n, "AES CUR: - -\n"); 135 136 136 137 if (ci.old_mk_state >= '1' && ci.old_mk_state <= '2') 137 - n += snprintf(buf + n, PAGE_SIZE - n, "AES OLD: %s 0x%016llx\n", 138 - cao_state[ci.old_mk_state - '1'], ci.old_mkvp); 138 + n += scnprintf(buf + n, PAGE_SIZE - n, 139 + "AES OLD: %s 0x%016llx\n", 140 + cao_state[ci.old_mk_state - '1'], ci.old_mkvp); 139 141 else 140 - n += snprintf(buf + n, PAGE_SIZE - n, "AES OLD: - -\n"); 142 + n += scnprintf(buf + n, PAGE_SIZE - n, "AES OLD: - -\n"); 141 143 142 144 return n; 143 145 } ··· 172 170 ep11_get_card_info(ac->id, &ci, zc->online); 173 171 174 172 if (ci.API_ord_nr > 0) 175 - return snprintf(buf, PAGE_SIZE, "%u\n", ci.API_ord_nr); 173 + return scnprintf(buf, PAGE_SIZE, "%u\n", ci.API_ord_nr); 176 174 else 177 - return snprintf(buf, PAGE_SIZE, "\n"); 175 + return scnprintf(buf, PAGE_SIZE, "\n"); 178 176 } 179 177 180 178 static struct device_attribute dev_attr_ep11_api_ordinalnr = ··· 193 191 ep11_get_card_info(ac->id, &ci, zc->online); 194 192 195 193 if (ci.FW_version > 0) 196 - return snprintf(buf, PAGE_SIZE, "%d.%d\n", 197 - (int)(ci.FW_version >> 8), 198 - (int)(ci.FW_version & 0xFF)); 194 + return scnprintf(buf, PAGE_SIZE, "%d.%d\n", 195 + (int)(ci.FW_version >> 8), 196 + (int)(ci.FW_version & 0xFF)); 199 197 else 200 - return snprintf(buf, PAGE_SIZE, "\n"); 198 + return scnprintf(buf, PAGE_SIZE, "\n"); 201 199 } 202 200 203 201 static struct device_attribute dev_attr_ep11_fw_version = ··· 216 214 ep11_get_card_info(ac->id, &ci, zc->online); 217 215 218 216 if (ci.serial[0]) 219 - return snprintf(buf, PAGE_SIZE, "%16.16s\n", ci.serial); 217 + return scnprintf(buf, PAGE_SIZE, "%16.16s\n", ci.serial); 220 218 else 221 - return snprintf(buf, PAGE_SIZE, "\n"); 219 + return scnprintf(buf, PAGE_SIZE, "\n"); 222 220 } 223 221 224 222 static struct device_attribute dev_attr_ep11_serialnr = ··· 253 251 if (ci.op_mode & (1 << ep11_op_modes[i].mode_bit)) { 254 252 if (n > 0) 255 253 buf[n++] = ' '; 256 - n += snprintf(buf + n, PAGE_SIZE - n, 257 - "%s", ep11_op_modes[i].mode_txt); 254 + n += scnprintf(buf + n, PAGE_SIZE - n, 255 + "%s", ep11_op_modes[i].mode_txt); 258 256 } 259 257 } 260 - n += snprintf(buf + n, PAGE_SIZE - n, "\n"); 258 + n += scnprintf(buf + n, PAGE_SIZE - n, "\n"); 261 259 262 260 return n; 263 261 } ··· 300 298 &di); 301 299 302 300 if (di.cur_wk_state == '0') { 303 - n = snprintf(buf, PAGE_SIZE, "WK CUR: %s -\n", 304 - cwk_state[di.cur_wk_state - '0']); 301 + n = scnprintf(buf, PAGE_SIZE, "WK CUR: %s -\n", 302 + cwk_state[di.cur_wk_state - '0']); 305 303 } else if (di.cur_wk_state == '1') { 306 - n = snprintf(buf, PAGE_SIZE, "WK CUR: %s 0x", 307 - cwk_state[di.cur_wk_state - '0']); 304 + n = scnprintf(buf, PAGE_SIZE, "WK CUR: %s 0x", 305 + cwk_state[di.cur_wk_state - '0']); 308 306 bin2hex(buf + n, di.cur_wkvp, sizeof(di.cur_wkvp)); 309 307 n += 2 * sizeof(di.cur_wkvp); 310 - n += snprintf(buf + n, PAGE_SIZE - n, "\n"); 308 + n += scnprintf(buf + n, PAGE_SIZE - n, "\n"); 311 309 } else 312 - n = snprintf(buf, PAGE_SIZE, "WK CUR: - -\n"); 310 + n = scnprintf(buf, PAGE_SIZE, "WK CUR: - -\n"); 313 311 314 312 if (di.new_wk_state == '0') { 315 - n += snprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s -\n", 316 - nwk_state[di.new_wk_state - '0']); 313 + n += scnprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s -\n", 314 + nwk_state[di.new_wk_state - '0']); 317 315 } else if (di.new_wk_state >= '1' && di.new_wk_state <= '2') { 318 - n += snprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s 0x", 319 - nwk_state[di.new_wk_state - '0']); 316 + n += scnprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s 0x", 317 + nwk_state[di.new_wk_state - '0']); 320 318 bin2hex(buf + n, di.new_wkvp, sizeof(di.new_wkvp)); 321 319 n += 2 * sizeof(di.new_wkvp); 322 - n += snprintf(buf + n, PAGE_SIZE - n, "\n"); 320 + n += scnprintf(buf + n, PAGE_SIZE - n, "\n"); 323 321 } else 324 - n += snprintf(buf + n, PAGE_SIZE - n, "WK NEW: - -\n"); 322 + n += scnprintf(buf + n, PAGE_SIZE - n, "WK NEW: - -\n"); 325 323 326 324 return n; 327 325 } ··· 348 346 if (di.op_mode & (1 << ep11_op_modes[i].mode_bit)) { 349 347 if (n > 0) 350 348 buf[n++] = ' '; 351 - n += snprintf(buf + n, PAGE_SIZE - n, 352 - "%s", ep11_op_modes[i].mode_txt); 349 + n += scnprintf(buf + n, PAGE_SIZE - n, 350 + "%s", ep11_op_modes[i].mode_txt); 353 351 } 354 352 } 355 - n += snprintf(buf + n, PAGE_SIZE - n, "\n"); 353 + n += scnprintf(buf + n, PAGE_SIZE - n, "\n"); 356 354 357 355 return n; 358 356 } ··· 656 654 static struct ap_driver zcrypt_cex4_queue_driver = { 657 655 .probe = zcrypt_cex4_queue_probe, 658 656 .remove = zcrypt_cex4_queue_remove, 659 - .suspend = ap_queue_suspend, 660 - .resume = ap_queue_resume, 661 657 .ids = zcrypt_cex4_queue_ids, 662 658 .flags = AP_DRIVER_FLAG_DEFAULT, 663 659 };
+5 -5
drivers/s390/crypto/zcrypt_ep11misc.c
··· 1217 1217 struct ep11_card_info eci; 1218 1218 1219 1219 /* fetch status of all crypto cards */ 1220 - device_status = kmalloc_array(MAX_ZDEV_ENTRIES_EXT, 1221 - sizeof(struct zcrypt_device_status_ext), 1222 - GFP_KERNEL); 1220 + device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT, 1221 + sizeof(struct zcrypt_device_status_ext), 1222 + GFP_KERNEL); 1223 1223 if (!device_status) 1224 1224 return -ENOMEM; 1225 1225 zcrypt_device_status_mask_ext(device_status); ··· 1227 1227 /* allocate 1k space for up to 256 apqns */ 1228 1228 _apqns = kmalloc_array(256, sizeof(u32), GFP_KERNEL); 1229 1229 if (!_apqns) { 1230 - kfree(device_status); 1230 + kvfree(device_status); 1231 1231 return -ENOMEM; 1232 1232 } 1233 1233 ··· 1282 1282 rc = 0; 1283 1283 } 1284 1284 1285 - kfree(device_status); 1285 + kvfree(device_status); 1286 1286 return rc; 1287 1287 } 1288 1288 EXPORT_SYMBOL(ep11_findcard2);
+5 -5
drivers/s390/crypto/zcrypt_msgtype6.c
··· 590 590 struct CPRBX cprbx; 591 591 unsigned char pad[4]; /* 4 byte function code/rules block ? */ 592 592 unsigned short length; 593 - char text[0]; 593 + char text[]; 594 594 } __packed; 595 595 596 596 struct type86_ep11_reply { ··· 801 801 if (msg->cprbx.cprb_ver_id == 0x02) 802 802 return convert_type86_ica(zq, reply, 803 803 outputdata, outputdatalength); 804 - /* fall through - wrong cprb version is an unknown response */ 804 + fallthrough; /* wrong cprb version is an unknown response */ 805 805 default: /* Unknown response type, this should NEVER EVER happen */ 806 806 zq->online = 0; 807 807 pr_err("Cryptographic device %02x.%04x failed and was set offline\n", ··· 834 834 } 835 835 if (msg->cprbx.cprb_ver_id == 0x02) 836 836 return convert_type86_xcrb(zq, reply, xcRB); 837 - /* fall through - wrong cprb version is an unknown response */ 837 + fallthrough; /* wrong cprb version is an unknown response */ 838 838 default: /* Unknown response type, this should NEVER EVER happen */ 839 839 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ 840 840 zq->online = 0; ··· 864 864 return convert_error(zq, reply); 865 865 if (msg->cprbx.cprb_ver_id == 0x04) 866 866 return convert_type86_ep11_xcrb(zq, reply, xcRB); 867 - /* fall through - wrong cprb version is an unknown resp */ 867 + fallthrough; /* wrong cprb version is an unknown resp */ 868 868 default: /* Unknown response type, this should NEVER EVER happen */ 869 869 zq->online = 0; 870 870 pr_err("Cryptographic device %02x.%04x failed and was set offline\n", ··· 894 894 return -EINVAL; 895 895 if (msg->cprbx.cprb_ver_id == 0x02) 896 896 return convert_type86_rng(zq, reply, data); 897 - /* fall through - wrong cprb version is an unknown response */ 897 + fallthrough; /* wrong cprb version is an unknown response */ 898 898 default: /* Unknown response type, this should NEVER EVER happen */ 899 899 zq->online = 0; 900 900 pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+2 -2
drivers/s390/crypto/zcrypt_queue.c
··· 42 42 { 43 43 struct zcrypt_queue *zq = to_ap_queue(dev)->private; 44 44 45 - return snprintf(buf, PAGE_SIZE, "%d\n", zq->online); 45 + return scnprintf(buf, PAGE_SIZE, "%d\n", zq->online); 46 46 } 47 47 48 48 static ssize_t online_store(struct device *dev, ··· 78 78 { 79 79 struct zcrypt_queue *zq = to_ap_queue(dev)->private; 80 80 81 - return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zq->load)); 81 + return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zq->load)); 82 82 } 83 83 84 84 static DEVICE_ATTR_RO(load);
-20
drivers/s390/net/ism_drv.c
··· 567 567 kfree(ism); 568 568 } 569 569 570 - static int ism_suspend(struct device *dev) 571 - { 572 - struct ism_dev *ism = dev_get_drvdata(dev); 573 - 574 - ism_dev_exit(ism); 575 - return 0; 576 - } 577 - 578 - static int ism_resume(struct device *dev) 579 - { 580 - struct ism_dev *ism = dev_get_drvdata(dev); 581 - 582 - return ism_dev_init(ism); 583 - } 584 - 585 - static SIMPLE_DEV_PM_OPS(ism_pm_ops, ism_suspend, ism_resume); 586 - 587 570 static struct pci_driver ism_driver = { 588 571 .name = DRV_NAME, 589 572 .id_table = ism_device_table, 590 573 .probe = ism_probe, 591 574 .remove = ism_remove, 592 - .driver = { 593 - .pm = &ism_pm_ops, 594 - }, 595 575 }; 596 576 597 577 static int __init ism_init(void)
-34
include/linux/suspend.h
··· 566 566 567 567 #endif /* !CONFIG_PM_AUTOSLEEP */ 568 568 569 - #ifdef CONFIG_ARCH_SAVE_PAGE_KEYS 570 - /* 571 - * The ARCH_SAVE_PAGE_KEYS functions can be used by an architecture 572 - * to save/restore additional information to/from the array of page 573 - * frame numbers in the hibernation image. For s390 this is used to 574 - * save and restore the storage key for each page that is included 575 - * in the hibernation image. 576 - */ 577 - unsigned long page_key_additional_pages(unsigned long pages); 578 - int page_key_alloc(unsigned long pages); 579 - void page_key_free(void); 580 - void page_key_read(unsigned long *pfn); 581 - void page_key_memorize(unsigned long *pfn); 582 - void page_key_write(void *address); 583 - 584 - #else /* !CONFIG_ARCH_SAVE_PAGE_KEYS */ 585 - 586 - static inline unsigned long page_key_additional_pages(unsigned long pages) 587 - { 588 - return 0; 589 - } 590 - 591 - static inline int page_key_alloc(unsigned long pages) 592 - { 593 - return 0; 594 - } 595 - 596 - static inline void page_key_free(void) {} 597 - static inline void page_key_read(unsigned long *pfn) {} 598 - static inline void page_key_memorize(unsigned long *pfn) {} 599 - static inline void page_key_write(void *address) {} 600 - 601 - #endif /* !CONFIG_ARCH_SAVE_PAGE_KEYS */ 602 - 603 569 #endif /* _LINUX_SUSPEND_H */
-3
kernel/power/Kconfig
··· 80 80 81 81 For more information take a look at <file:Documentation/power/swsusp.rst>. 82 82 83 - config ARCH_SAVE_PAGE_KEYS 84 - bool 85 - 86 83 config PM_STD_PARTITION 87 84 string "Default resume partition" 88 85 depends on HIBERNATION
-18
kernel/power/snapshot.c
··· 1744 1744 count += highmem; 1745 1745 count -= totalreserve_pages; 1746 1746 1747 - /* Add number of pages required for page keys (s390 only). */ 1748 - size += page_key_additional_pages(saveable); 1749 - 1750 1747 /* Compute the maximum number of saveable pages to leave in memory. */ 1751 1748 max_size = (count - (size + PAGES_FOR_IO)) / 2 1752 1749 - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE); ··· 2072 2075 buf[j] = memory_bm_next_pfn(bm); 2073 2076 if (unlikely(buf[j] == BM_END_OF_MAP)) 2074 2077 break; 2075 - /* Save page key for data page (s390 only). */ 2076 - page_key_read(buf + j); 2077 2078 } 2078 2079 } 2079 2080 ··· 2220 2225 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) { 2221 2226 if (unlikely(buf[j] == BM_END_OF_MAP)) 2222 2227 break; 2223 - 2224 - /* Extract and buffer page key for data page (s390 only). */ 2225 - page_key_memorize(buf + j); 2226 2228 2227 2229 if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j])) 2228 2230 memory_bm_set_bit(bm, buf[j]); ··· 2615 2623 if (error) 2616 2624 return error; 2617 2625 2618 - /* Allocate buffer for page keys. */ 2619 - error = page_key_alloc(nr_copy_pages); 2620 - if (error) 2621 - return error; 2622 - 2623 2626 hibernate_restore_protection_begin(); 2624 2627 } else if (handle->cur <= nr_meta_pages + 1) { 2625 2628 error = unpack_orig_pfns(buffer, &copy_bm); ··· 2636 2649 } 2637 2650 } else { 2638 2651 copy_last_highmem_page(); 2639 - /* Restore page key for data page (s390 only). */ 2640 - page_key_write(handle->buffer); 2641 2652 hibernate_restore_protect_page(handle->buffer); 2642 2653 handle->buffer = get_buffer(&orig_bm, &ca); 2643 2654 if (IS_ERR(handle->buffer)) ··· 2658 2673 void snapshot_write_finalize(struct snapshot_handle *handle) 2659 2674 { 2660 2675 copy_last_highmem_page(); 2661 - /* Restore page key for data page (s390 only). */ 2662 - page_key_write(handle->buffer); 2663 - page_key_free(); 2664 2676 hibernate_restore_protect_page(handle->buffer); 2665 2677 /* Do that only if we have loaded the image entirely */ 2666 2678 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {