Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Martin Schwidefsky:
"The bulk of the s390 patches for 4.13. Some new things but mostly bug
fixes and cleanups. Noteworthy changes:

- The SCM block driver is converted to blk-mq

- Switch s390 to 5 level page tables. The virtual address space for a
user space process can now have up to 16EB-4KB.

- Introduce a ELF phdr flag for qemu to avoid the global
vm.alloc_pgste which forces all processes to large page tables

- A couple of PCI improvements to improve error recovery

- Included is the merge of the base support for proper machine checks
for KVM"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (52 commits)
s390/dasd: Fix faulty ENODEV for RO sysfs attribute
s390/pci: recognize name clashes with uids
s390/pci: provide more debug information
s390/pci: fix handling of PEC 306
s390/pci: improve pci hotplug
s390/pci: introduce clp_get_state
s390/pci: improve error handling during fmb (de)registration
s390/pci: improve unreg_ioat error handling
s390/pci: improve error handling during interrupt deregistration
s390/pci: don't cleanup in arch_setup_msi_irqs
KVM: s390: Backup the guest's machine check info
s390/nmi: s390: New low level handling for machine check happening in guest
s390/fpu: export save_fpu_regs for all configs
s390/kvm: avoid global config of vm.alloc_pgste=1
s390: rename struct psw_bits members
s390: rename psw_bits enums
s390/mm: use correct address space when enabling DAT
s390/cio: introduce io_subchannel_type
s390/ipl: revert Load Normal semantics for LPAR CCW-type re-IPL
s390/dumpstack: remove raw stack dump
...

+1156 -1137
+2 -1
arch/s390/Kconfig
··· 64 64 65 65 config S390 66 66 def_bool y 67 + select ARCH_BINFMT_ELF_STATE 67 68 select ARCH_HAS_DEVMEM_IS_ALLOWED 68 69 select ARCH_HAS_ELF_RANDOMIZE 69 70 select ARCH_HAS_GCOV_PROFILE_ALL ··· 185 184 186 185 config PGTABLE_LEVELS 187 186 int 188 - default 4 187 + default 5 189 188 190 189 source "init/Kconfig" 191 190
+2 -1
arch/s390/crypto/Makefile
··· 6 6 obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o sha_common.o 7 7 obj-$(CONFIG_CRYPTO_SHA512_S390) += sha512_s390.o sha_common.o 8 8 obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o 9 - obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o paes_s390.o 9 + obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o 10 + obj-$(CONFIG_CRYPTO_PAES_S390) += paes_s390.o 10 11 obj-$(CONFIG_S390_PRNG) += prng.o 11 12 obj-$(CONFIG_CRYPTO_GHASH_S390) += ghash_s390.o 12 13 obj-$(CONFIG_CRYPTO_CRC32_S390) += crc32-vx_s390.o
+1
arch/s390/crypto/arch_random.c
··· 12 12 13 13 #include <linux/kernel.h> 14 14 #include <linux/atomic.h> 15 + #include <linux/random.h> 15 16 #include <linux/static_key.h> 16 17 #include <asm/cpacf.h> 17 18
+2
arch/s390/include/asm/Kbuild
··· 1 1 generic-y += asm-offsets.h 2 2 generic-y += cacheflush.h 3 3 generic-y += clkdev.h 4 + generic-y += device.h 4 5 generic-y += dma-contiguous.h 5 6 generic-y += div64.h 6 7 generic-y += emergency-restart.h 7 8 generic-y += export.h 9 + generic-y += fb.h 8 10 generic-y += irq_regs.h 9 11 generic-y += irq_work.h 10 12 generic-y += kmap_types.h
-10
arch/s390/include/asm/device.h
··· 1 - /* 2 - * Arch specific extensions to struct device 3 - * 4 - * This file is released under the GPLv2 5 - */ 6 - struct dev_archdata { 7 - }; 8 - 9 - struct pdev_archdata { 10 - };
+32
arch/s390/include/asm/elf.h
··· 117 117 #define ELF_DATA ELFDATA2MSB 118 118 #define ELF_ARCH EM_S390 119 119 120 + /* s390 specific phdr types */ 121 + #define PT_S390_PGSTE 0x70000000 122 + 120 123 /* 121 124 * ELF register definitions.. 122 125 */ ··· 153 150 (((x)->e_machine == EM_S390 || (x)->e_machine == EM_S390_OLD) \ 154 151 && (x)->e_ident[EI_CLASS] == ELF_CLASS) 155 152 #define compat_start_thread start_thread31 153 + 154 + struct arch_elf_state { 155 + int rc; 156 + }; 157 + 158 + #define INIT_ARCH_ELF_STATE { .rc = 0 } 159 + 160 + #define arch_check_elf(ehdr, interp, interp_ehdr, state) (0) 161 + #ifdef CONFIG_PGSTE 162 + #define arch_elf_pt_proc(ehdr, phdr, elf, interp, state) \ 163 + ({ \ 164 + struct arch_elf_state *_state = state; \ 165 + if ((phdr)->p_type == PT_S390_PGSTE && \ 166 + !page_table_allocate_pgste && \ 167 + !test_thread_flag(TIF_PGSTE) && \ 168 + !current->mm->context.alloc_pgste) { \ 169 + set_thread_flag(TIF_PGSTE); \ 170 + set_pt_regs_flag(task_pt_regs(current), \ 171 + PIF_SYSCALL_RESTART); \ 172 + _state->rc = -EAGAIN; \ 173 + } \ 174 + _state->rc; \ 175 + }) 176 + #else 177 + #define arch_elf_pt_proc(ehdr, phdr, elf, interp, state) \ 178 + ({ \ 179 + (state)->rc; \ 180 + }) 181 + #endif 156 182 157 183 /* For SVR4/S390 the function pointer to be registered with `atexit` is 158 184 passed in R14. */
-12
arch/s390/include/asm/fb.h
··· 1 - #ifndef _ASM_FB_H_ 2 - #define _ASM_FB_H_ 3 - #include <linux/fb.h> 4 - 5 - #define fb_pgprotect(...) do {} while (0) 6 - 7 - static inline int fb_is_primary_device(struct fb_info *info) 8 - { 9 - return 0; 10 - } 11 - 12 - #endif /* _ASM_FB_H_ */
+2 -2
arch/s390/include/asm/io.h
··· 25 25 26 26 #define IO_SPACE_LIMIT 0 27 27 28 - #ifdef CONFIG_PCI 29 - 30 28 #define ioremap_nocache(addr, size) ioremap(addr, size) 31 29 #define ioremap_wc ioremap_nocache 32 30 #define ioremap_wt ioremap_nocache ··· 46 48 static inline void ioport_unmap(void __iomem *p) 47 49 { 48 50 } 51 + 52 + #ifdef CONFIG_PCI 49 53 50 54 /* 51 55 * s390 needs a private implementation of pci_iomap since ioremap with its
+16 -1
arch/s390/include/asm/kvm_host.h
··· 107 107 struct esca_entry cpu[KVM_S390_ESCA_CPU_SLOTS]; 108 108 } __packed; 109 109 110 + /* 111 + * This struct is used to store some machine check info from lowcore 112 + * for machine checks that happen while the guest is running. 113 + * This info in host's lowcore might be overwritten by a second machine 114 + * check from host when host is in the machine check's high-level handling. 115 + * The size is 24 bytes. 116 + */ 117 + struct mcck_volatile_info { 118 + __u64 mcic; 119 + __u64 failing_storage_address; 120 + __u32 ext_damage_code; 121 + __u32 reserved; 122 + }; 123 + 110 124 #define CPUSTAT_STOPPED 0x80000000 111 125 #define CPUSTAT_WAIT 0x10000000 112 126 #define CPUSTAT_ECALL_PEND 0x08000000 ··· 278 264 279 265 struct sie_page { 280 266 struct kvm_s390_sie_block sie_block; 281 - __u8 reserved200[1024]; /* 0x0200 */ 267 + struct mcck_volatile_info mcck_info; /* 0x0200 */ 268 + __u8 reserved218[1000]; /* 0x0218 */ 282 269 struct kvm_s390_itdb itdb; /* 0x0600 */ 283 270 __u8 reserved700[2304]; /* 0x0700 */ 284 271 } __packed;
+3 -1
arch/s390/include/asm/mmu_context.h
··· 25 25 mm->context.gmap_asce = 0; 26 26 mm->context.flush_mm = 0; 27 27 #ifdef CONFIG_PGSTE 28 - mm->context.alloc_pgste = page_table_allocate_pgste; 28 + mm->context.alloc_pgste = page_table_allocate_pgste || 29 + test_thread_flag(TIF_PGSTE) || 30 + current->mm->context.alloc_pgste; 29 31 mm->context.has_pgste = 0; 30 32 mm->context.use_skey = 0; 31 33 mm->context.use_cmma = 0;
+7
arch/s390/include/asm/nmi.h
··· 14 14 #include <linux/const.h> 15 15 #include <linux/types.h> 16 16 17 + #define MCIC_SUBCLASS_MASK (1ULL<<63 | 1ULL<<62 | 1ULL<<61 | \ 18 + 1ULL<<59 | 1ULL<<58 | 1ULL<<56 | \ 19 + 1ULL<<55 | 1ULL<<54 | 1ULL<<53 | \ 20 + 1ULL<<52 | 1ULL<<47 | 1ULL<<46 | \ 21 + 1ULL<<45 | 1ULL<<44) 17 22 #define MCCK_CODE_SYSTEM_DAMAGE _BITUL(63) 23 + #define MCCK_CODE_EXT_DAMAGE _BITUL(63 - 5) 24 + #define MCCK_CODE_CP _BITUL(63 - 9) 18 25 #define MCCK_CODE_CPU_TIMER_VALID _BITUL(63 - 46) 19 26 #define MCCK_CODE_PSW_MWP_VALID _BITUL(63 - 20) 20 27 #define MCCK_CODE_PSW_IA_VALID _BITUL(63 - 23)
+3
arch/s390/include/asm/page.h
··· 74 74 typedef struct { unsigned long pte; } pte_t; 75 75 typedef struct { unsigned long pmd; } pmd_t; 76 76 typedef struct { unsigned long pud; } pud_t; 77 + typedef struct { unsigned long p4d; } p4d_t; 77 78 typedef struct { unsigned long pgd; } pgd_t; 78 79 typedef pte_t *pgtable_t; 79 80 ··· 83 82 #define pte_val(x) ((x).pte) 84 83 #define pmd_val(x) ((x).pmd) 85 84 #define pud_val(x) ((x).pud) 85 + #define p4d_val(x) ((x).p4d) 86 86 #define pgd_val(x) ((x).pgd) 87 87 88 88 #define __pgste(x) ((pgste_t) { (x) } ) 89 89 #define __pte(x) ((pte_t) { (x) } ) 90 90 #define __pmd(x) ((pmd_t) { (x) } ) 91 91 #define __pud(x) ((pud_t) { (x) } ) 92 + #define __p4d(x) ((p4d_t) { (x) } ) 92 93 #define __pgd(x) ((pgd_t) { (x) } ) 93 94 #define __pgprot(x) ((pgprot_t) { (x) } ) 94 95
+8 -7
arch/s390/include/asm/pci.h
··· 70 70 } __packed __aligned(128); 71 71 72 72 enum zpci_state { 73 - ZPCI_FN_STATE_RESERVED, 74 - ZPCI_FN_STATE_STANDBY, 75 - ZPCI_FN_STATE_CONFIGURED, 76 - ZPCI_FN_STATE_ONLINE, 77 - NR_ZPCI_FN_STATES, 73 + ZPCI_FN_STATE_STANDBY = 0, 74 + ZPCI_FN_STATE_CONFIGURED = 1, 75 + ZPCI_FN_STATE_RESERVED = 2, 76 + ZPCI_FN_STATE_ONLINE = 3, 78 77 }; 79 78 80 79 struct zpci_bar_struct { ··· 108 109 u64 msi_addr; /* MSI address */ 109 110 unsigned int max_msi; /* maximum number of MSI's */ 110 111 struct airq_iv *aibv; /* adapter interrupt bit vector */ 111 - unsigned int aisb; /* number of the summary bit */ 112 + unsigned long aisb; /* number of the summary bit */ 112 113 113 114 /* DMA stuff */ 114 115 unsigned long *dma_table; ··· 158 159 ----------------------------------------------------------------------------- */ 159 160 /* Base stuff */ 160 161 int zpci_create_device(struct zpci_dev *); 162 + void zpci_remove_device(struct zpci_dev *zdev); 161 163 int zpci_enable_device(struct zpci_dev *); 162 164 int zpci_disable_device(struct zpci_dev *); 163 - void zpci_stop_device(struct zpci_dev *); 164 165 int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64); 165 166 int zpci_unregister_ioat(struct zpci_dev *, u8); 167 + void zpci_remove_reserved_devices(void); 166 168 167 169 /* CLP */ 168 170 int clp_scan_pci_devices(void); ··· 172 172 int clp_add_pci_device(u32, u32, int); 173 173 int clp_enable_fh(struct zpci_dev *, u8); 174 174 int clp_disable_fh(struct zpci_dev *); 175 + int clp_get_state(u32 fid, enum zpci_state *state); 175 176 176 177 #ifdef CONFIG_PCI 177 178 /* Error handling and recovery */
+1 -1
arch/s390/include/asm/pci_insn.h
··· 76 76 u32 gd; 77 77 } __packed __aligned(8); 78 78 79 - int zpci_mod_fc(u64 req, struct zpci_fib *fib); 79 + u8 zpci_mod_fc(u64 req, struct zpci_fib *fib, u8 *status); 80 80 int zpci_refresh_trans(u64 fn, u64 addr, u64 range); 81 81 int zpci_load(u64 *data, u64 req, u64 offset); 82 82 int zpci_store(u64 data, u64 req, u64 offset);
+21 -4
arch/s390/include/asm/pgalloc.h
··· 51 51 return _SEGMENT_ENTRY_EMPTY; 52 52 if (mm->context.asce_limit <= (1UL << 42)) 53 53 return _REGION3_ENTRY_EMPTY; 54 - return _REGION2_ENTRY_EMPTY; 54 + if (mm->context.asce_limit <= (1UL << 53)) 55 + return _REGION2_ENTRY_EMPTY; 56 + return _REGION1_ENTRY_EMPTY; 55 57 } 56 58 57 - int crst_table_upgrade(struct mm_struct *); 59 + int crst_table_upgrade(struct mm_struct *mm, unsigned long limit); 58 60 void crst_table_downgrade(struct mm_struct *); 61 + 62 + static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address) 63 + { 64 + unsigned long *table = crst_table_alloc(mm); 65 + 66 + if (table) 67 + crst_table_init(table, _REGION2_ENTRY_EMPTY); 68 + return (p4d_t *) table; 69 + } 70 + #define p4d_free(mm, p4d) crst_table_free(mm, (unsigned long *) p4d) 59 71 60 72 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) 61 73 { ··· 98 86 crst_table_free(mm, (unsigned long *) pmd); 99 87 } 100 88 101 - static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) 89 + static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d) 102 90 { 103 - pgd_val(*pgd) = _REGION2_ENTRY | __pa(pud); 91 + pgd_val(*pgd) = _REGION1_ENTRY | __pa(p4d); 92 + } 93 + 94 + static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud) 95 + { 96 + p4d_val(*p4d) = _REGION2_ENTRY | __pa(pud); 104 97 } 105 98 106 99 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+86 -19
arch/s390/include/asm/pgtable.h
··· 24 24 * the S390 page table tree. 25 25 */ 26 26 #ifndef __ASSEMBLY__ 27 - #include <asm-generic/5level-fixup.h> 28 27 #include <linux/sched.h> 29 28 #include <linux/mm_types.h> 30 29 #include <linux/page-flags.h> ··· 86 87 */ 87 88 #define PMD_SHIFT 20 88 89 #define PUD_SHIFT 31 89 - #define PGDIR_SHIFT 42 90 + #define P4D_SHIFT 42 91 + #define PGDIR_SHIFT 53 90 92 91 93 #define PMD_SIZE (1UL << PMD_SHIFT) 92 94 #define PMD_MASK (~(PMD_SIZE-1)) 93 95 #define PUD_SIZE (1UL << PUD_SHIFT) 94 96 #define PUD_MASK (~(PUD_SIZE-1)) 97 + #define P4D_SIZE (1UL << P4D_SHIFT) 98 + #define P4D_MASK (~(P4D_SIZE-1)) 95 99 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 96 100 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 97 101 ··· 107 105 #define PTRS_PER_PTE 256 108 106 #define PTRS_PER_PMD 2048 109 107 #define PTRS_PER_PUD 2048 108 + #define PTRS_PER_P4D 2048 110 109 #define PTRS_PER_PGD 2048 111 110 112 111 #define FIRST_USER_ADDRESS 0UL ··· 118 115 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e)) 119 116 #define pud_ERROR(e) \ 120 117 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e)) 118 + #define p4d_ERROR(e) \ 119 + printk("%s:%d: bad p4d %p.\n", __FILE__, __LINE__, (void *) p4d_val(e)) 121 120 #define pgd_ERROR(e) \ 122 121 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e)) 123 122 ··· 301 296 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID) 302 297 303 298 #define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */ 304 - #define _REGION3_ENTRY_ORIGIN ~0x7ffUL/* region third table origin */ 305 - 306 299 #define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */ 307 300 #define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */ 308 301 #define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */ ··· 313 310 #define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */ 314 311 #endif 315 312 316 - #define _REGION_ENTRY_BITS 0xfffffffffffff227UL 317 - #define _REGION_ENTRY_BITS_LARGE 0xffffffff8000fe27UL 313 + #define _REGION_ENTRY_BITS 0xfffffffffffff22fUL 314 + #define _REGION_ENTRY_BITS_LARGE 0xffffffff8000fe2fUL 318 315 319 316 /* Bits in the segment table entry */ 320 317 #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL ··· 563 560 } 564 561 565 562 /* 566 - * pgd/pmd/pte query functions 563 + * pgd/p4d/pud/pmd/pte query functions 567 564 */ 565 + static inline int pgd_folded(pgd_t pgd) 566 + { 567 + return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1; 568 + } 569 + 568 570 static inline int pgd_present(pgd_t pgd) 569 571 { 570 - if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) 572 + if (pgd_folded(pgd)) 571 573 return 1; 572 574 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL; 573 575 } 574 576 575 577 static inline int pgd_none(pgd_t pgd) 576 578 { 577 - if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) 579 + if (pgd_folded(pgd)) 578 580 return 0; 579 581 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL; 580 582 } ··· 597 589 return (pgd_val(pgd) & mask) != 0; 598 590 } 599 591 592 + static inline int p4d_folded(p4d_t p4d) 593 + { 594 + return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2; 595 + } 596 + 597 + static inline int p4d_present(p4d_t p4d) 598 + { 599 + if (p4d_folded(p4d)) 600 + return 1; 601 + return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL; 602 + } 603 + 604 + static inline int p4d_none(p4d_t p4d) 605 + { 606 + if (p4d_folded(p4d)) 607 + return 0; 608 + return p4d_val(p4d) == _REGION2_ENTRY_EMPTY; 609 + } 610 + 611 + static inline unsigned long p4d_pfn(p4d_t p4d) 612 + { 613 + unsigned long origin_mask; 614 + 615 + origin_mask = _REGION_ENTRY_ORIGIN; 616 + return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT; 617 + } 618 + 619 + static inline int pud_folded(pud_t pud) 620 + { 621 + return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3; 622 + } 623 + 600 624 static inline int pud_present(pud_t pud) 601 625 { 602 - if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) 626 + if (pud_folded(pud)) 603 627 return 1; 604 628 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL; 605 629 } 606 630 607 631 static inline int pud_none(pud_t pud) 608 632 { 609 - if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) 633 + if (pud_folded(pud)) 610 634 return 0; 611 635 return pud_val(pud) == _REGION3_ENTRY_EMPTY; 612 636 } ··· 654 614 { 655 615 unsigned long origin_mask; 656 616 657 - origin_mask = _REGION3_ENTRY_ORIGIN; 617 + origin_mask = _REGION_ENTRY_ORIGIN; 658 618 if (pud_large(pud)) 659 619 origin_mask = _REGION3_ENTRY_ORIGIN_LARGE; 660 620 return (pud_val(pud) & origin_mask) >> PAGE_SHIFT; ··· 679 639 if (pud_large(pud)) 680 640 return (pud_val(pud) & ~_REGION_ENTRY_BITS_LARGE) != 0; 681 641 return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0; 642 + } 643 + 644 + static inline int p4d_bad(p4d_t p4d) 645 + { 646 + if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) 647 + return pud_bad(__pud(p4d_val(p4d))); 648 + return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0; 682 649 } 683 650 684 651 static inline int pmd_present(pmd_t pmd) ··· 841 794 842 795 static inline void pgd_clear(pgd_t *pgd) 843 796 { 844 - if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 845 - pgd_val(*pgd) = _REGION2_ENTRY_EMPTY; 797 + if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1) 798 + pgd_val(*pgd) = _REGION1_ENTRY_EMPTY; 799 + } 800 + 801 + static inline void p4d_clear(p4d_t *p4d) 802 + { 803 + if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 804 + p4d_val(*p4d) = _REGION2_ENTRY_EMPTY; 846 805 } 847 806 848 807 static inline void pud_clear(pud_t *pud) ··· 1142 1089 } 1143 1090 1144 1091 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) 1092 + #define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1)) 1145 1093 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 1146 1094 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) 1147 1095 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1)) ··· 1152 1098 1153 1099 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) 1154 1100 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN) 1101 + #define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN) 1155 1102 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) 1156 1103 1157 - static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) 1104 + static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) 1158 1105 { 1159 - pud_t *pud = (pud_t *) pgd; 1160 - if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 1161 - pud = (pud_t *) pgd_deref(*pgd); 1162 - return pud + pud_index(address); 1106 + p4d_t *p4d = (p4d_t *) pgd; 1107 + 1108 + if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1) 1109 + p4d = (p4d_t *) pgd_deref(*pgd); 1110 + return p4d + p4d_index(address); 1111 + } 1112 + 1113 + static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) 1114 + { 1115 + pud_t *pud = (pud_t *) p4d; 1116 + 1117 + if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 1118 + pud = (pud_t *) p4d_deref(*p4d); 1119 + return pud + pud_index(address); 1163 1120 } 1164 1121 1165 1122 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) 1166 1123 { 1167 1124 pmd_t *pmd = (pmd_t *) pud; 1125 + 1168 1126 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 1169 1127 pmd = (pmd_t *) pud_deref(*pud); 1170 1128 return pmd + pmd_index(address); ··· 1188 1122 1189 1123 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd)) 1190 1124 #define pud_page(pud) pfn_to_page(pud_pfn(pud)) 1125 + #define p4d_page(pud) pfn_to_page(p4d_pfn(p4d)) 1191 1126 1192 1127 /* Find an entry in the lowest level page table.. */ 1193 1128 #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
+4 -2
arch/s390/include/asm/processor.h
··· 20 20 #define CIF_FPU 4 /* restore FPU registers */ 21 21 #define CIF_IGNORE_IRQ 5 /* ignore interrupt (for udelay) */ 22 22 #define CIF_ENABLED_WAIT 6 /* in enabled wait state */ 23 + #define CIF_MCCK_GUEST 7 /* machine check happening in guest */ 23 24 24 25 #define _CIF_MCCK_PENDING _BITUL(CIF_MCCK_PENDING) 25 26 #define _CIF_ASCE_PRIMARY _BITUL(CIF_ASCE_PRIMARY) ··· 29 28 #define _CIF_FPU _BITUL(CIF_FPU) 30 29 #define _CIF_IGNORE_IRQ _BITUL(CIF_IGNORE_IRQ) 31 30 #define _CIF_ENABLED_WAIT _BITUL(CIF_ENABLED_WAIT) 31 + #define _CIF_MCCK_GUEST _BITUL(CIF_MCCK_GUEST) 32 32 33 33 #ifndef __ASSEMBLY__ 34 34 ··· 94 92 */ 95 93 96 94 #define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_31BIT) ? \ 97 - (1UL << 31) : (1UL << 53)) 95 + (1UL << 31) : -PAGE_SIZE) 98 96 #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \ 99 97 (1UL << 30) : (1UL << 41)) 100 98 #define TASK_SIZE TASK_SIZE_OF(current) 101 - #define TASK_SIZE_MAX (1UL << 53) 99 + #define TASK_SIZE_MAX (-PAGE_SIZE) 102 100 103 101 #define STACK_TOP (test_thread_flag(TIF_31BIT) ? \ 104 102 (1UL << 31) : (1UL << 42))
+28 -26
arch/s390/include/asm/ptrace.h
··· 11 11 12 12 #define PIF_SYSCALL 0 /* inside a system call */ 13 13 #define PIF_PER_TRAP 1 /* deliver sigtrap on return to user */ 14 + #define PIF_SYSCALL_RESTART 2 /* restart the current system call */ 14 15 15 16 #define _PIF_SYSCALL _BITUL(PIF_SYSCALL) 16 17 #define _PIF_PER_TRAP _BITUL(PIF_PER_TRAP) 18 + #define _PIF_SYSCALL_RESTART _BITUL(PIF_SYSCALL_RESTART) 17 19 18 20 #ifndef __ASSEMBLY__ 19 21 ··· 26 24 PSW_MASK_PSTATE | PSW_ASC_PRIMARY) 27 25 28 26 struct psw_bits { 29 - unsigned long : 1; 30 - unsigned long r : 1; /* PER-Mask */ 31 - unsigned long : 3; 32 - unsigned long t : 1; /* DAT Mode */ 33 - unsigned long i : 1; /* Input/Output Mask */ 34 - unsigned long e : 1; /* External Mask */ 35 - unsigned long key : 4; /* PSW Key */ 36 - unsigned long : 1; 37 - unsigned long m : 1; /* Machine-Check Mask */ 38 - unsigned long w : 1; /* Wait State */ 39 - unsigned long p : 1; /* Problem State */ 40 - unsigned long as : 2; /* Address Space Control */ 41 - unsigned long cc : 2; /* Condition Code */ 42 - unsigned long pm : 4; /* Program Mask */ 43 - unsigned long ri : 1; /* Runtime Instrumentation */ 44 - unsigned long : 6; 45 - unsigned long eaba : 2; /* Addressing Mode */ 46 - unsigned long : 31; 47 - unsigned long ia : 64; /* Instruction Address */ 27 + unsigned long : 1; 28 + unsigned long per : 1; /* PER-Mask */ 29 + unsigned long : 3; 30 + unsigned long dat : 1; /* DAT Mode */ 31 + unsigned long io : 1; /* Input/Output Mask */ 32 + unsigned long ext : 1; /* External Mask */ 33 + unsigned long key : 4; /* PSW Key */ 34 + unsigned long : 1; 35 + unsigned long mcheck : 1; /* Machine-Check Mask */ 36 + unsigned long wait : 1; /* Wait State */ 37 + unsigned long pstate : 1; /* Problem State */ 38 + unsigned long as : 2; /* Address Space Control */ 39 + unsigned long cc : 2; /* Condition Code */ 40 + unsigned long pm : 4; /* Program Mask */ 41 + unsigned long ri : 1; /* Runtime Instrumentation */ 42 + unsigned long : 6; 43 + unsigned long eaba : 2; /* Addressing Mode */ 44 + unsigned long : 31; 45 + unsigned long ia : 64; /* Instruction Address */ 48 46 }; 49 47 50 48 enum { 51 - PSW_AMODE_24BIT = 0, 52 - PSW_AMODE_31BIT = 1, 53 - PSW_AMODE_64BIT = 3 49 + PSW_BITS_AMODE_24BIT = 0, 50 + PSW_BITS_AMODE_31BIT = 1, 51 + PSW_BITS_AMODE_64BIT = 3 54 52 }; 55 53 56 54 enum { 57 - PSW_AS_PRIMARY = 0, 58 - PSW_AS_ACCREG = 1, 59 - PSW_AS_SECONDARY = 2, 60 - PSW_AS_HOME = 3 55 + PSW_BITS_AS_PRIMARY = 0, 56 + PSW_BITS_AS_ACCREG = 1, 57 + PSW_BITS_AS_SECONDARY = 2, 58 + PSW_BITS_AS_HOME = 3 61 59 }; 62 60 63 61 #define psw_bits(__psw) (*({ \
+1 -1
arch/s390/include/asm/sigp.h
··· 59 59 int cc; 60 60 61 61 cc = ____pcpu_sigp(addr, order, parm, &_status); 62 - if (status && cc == 1) 62 + if (status && cc == SIGP_CC_STATUS_STORED) 63 63 *status = _status; 64 64 return cc; 65 65 }
+1
arch/s390/include/asm/thread_info.h
··· 58 58 #define TIF_UPROBE 3 /* breakpointed or single-stepping */ 59 59 #define TIF_GUARDED_STORAGE 4 /* load guarded storage control block */ 60 60 #define TIF_PATCH_PENDING 5 /* pending live patching update */ 61 + #define TIF_PGSTE 6 /* New mm's will use 4K page tables */ 61 62 62 63 #define TIF_31BIT 16 /* 32bit process */ 63 64 #define TIF_MEMDIE 17 /* is terminating due to OOM killer */
+15
arch/s390/include/asm/tlb.h
··· 137 137 } 138 138 139 139 /* 140 + * p4d_free_tlb frees a pud table and clears the CRSTE for the 141 + * region second table entry from the tlb. 142 + * If the mm uses a four level page table the single p4d is freed 143 + * as the pgd. p4d_free_tlb checks the asce_limit against 8PB 144 + * to avoid the double free of the p4d in this case. 145 + */ 146 + static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, 147 + unsigned long address) 148 + { 149 + if (tlb->mm->context.asce_limit <= (1UL << 53)) 150 + return; 151 + tlb_remove_table(tlb, p4d); 152 + } 153 + 154 + /* 140 155 * pud_free_tlb frees a pud table and clears the CRSTE for the 141 156 * region third table entry from the tlb. 142 157 * If the mm uses a three level page table the single pud is freed
+3
arch/s390/kernel/asm-offsets.c
··· 58 58 OFFSET(__SF_BACKCHAIN, stack_frame, back_chain); 59 59 OFFSET(__SF_GPRS, stack_frame, gprs); 60 60 OFFSET(__SF_EMPTY, stack_frame, empty1); 61 + OFFSET(__SF_SIE_CONTROL, stack_frame, empty1[0]); 62 + OFFSET(__SF_SIE_SAVEAREA, stack_frame, empty1[1]); 63 + OFFSET(__SF_SIE_REASON, stack_frame, empty1[2]); 61 64 BLANK(); 62 65 /* timeval/timezone offsets for use by vdso */ 63 66 OFFSET(__VDSO_UPD_COUNT, vdso_data, tb_update_count);
+6 -27
arch/s390/kernel/dumpstack.c
··· 98 98 return 0; 99 99 } 100 100 101 - static void show_trace(struct task_struct *task, unsigned long sp) 101 + void show_stack(struct task_struct *task, unsigned long *stack) 102 102 { 103 + unsigned long sp = (unsigned long) stack; 104 + 103 105 if (!sp) 104 106 sp = task ? task->thread.ksp : current_stack_pointer(); 105 107 printk("Call Trace:\n"); ··· 109 107 if (!task) 110 108 task = current; 111 109 debug_show_held_locks(task); 112 - } 113 - 114 - void show_stack(struct task_struct *task, unsigned long *sp) 115 - { 116 - unsigned long *stack; 117 - int i; 118 - 119 - stack = sp; 120 - if (!stack) { 121 - if (!task) 122 - stack = (unsigned long *)current_stack_pointer(); 123 - else 124 - stack = (unsigned long *)task->thread.ksp; 125 - } 126 - printk(KERN_DEFAULT "Stack:\n"); 127 - for (i = 0; i < 20; i++) { 128 - if (((addr_t) stack & (THREAD_SIZE-1)) == 0) 129 - break; 130 - if (i % 4 == 0) 131 - printk(KERN_DEFAULT " "); 132 - pr_cont("%016lx%c", *stack++, i % 4 == 3 ? '\n' : ' '); 133 - } 134 - show_trace(task, (unsigned long)sp); 135 110 } 136 111 137 112 static void show_last_breaking_event(struct pt_regs *regs) ··· 128 149 pr_cont(" (%pSR)", (void *)regs->psw.addr); 129 150 pr_cont("\n"); 130 151 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x " 131 - "P:%x AS:%x CC:%x PM:%x", psw->r, psw->t, psw->i, psw->e, 132 - psw->key, psw->m, psw->w, psw->p, psw->as, psw->cc, psw->pm); 152 + "P:%x AS:%x CC:%x PM:%x", psw->per, psw->dat, psw->io, psw->ext, 153 + psw->key, psw->mcheck, psw->wait, psw->pstate, psw->as, psw->cc, psw->pm); 133 154 pr_cont(" RI:%x EA:%x\n", psw->ri, psw->eaba); 134 155 printk("%s GPRS: %016lx %016lx %016lx %016lx\n", mode, 135 156 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); ··· 148 169 show_registers(regs); 149 170 /* Show stack backtrace if pt_regs is from kernel mode */ 150 171 if (!user_mode(regs)) 151 - show_trace(NULL, regs->gprs[15]); 172 + show_stack(NULL, (unsigned long *) regs->gprs[15]); 152 173 show_last_breaking_event(regs); 153 174 } 154 175
+26 -4
arch/s390/kernel/entry.S
··· 52 52 _TIF_SYSCALL_TRACEPOINT) 53 53 _CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE_PRIMARY | \ 54 54 _CIF_ASCE_SECONDARY | _CIF_FPU) 55 - _PIF_WORK = (_PIF_PER_TRAP) 55 + _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART) 56 56 57 57 #define BASED(name) name-cleanup_critical(%r13) 58 58 ··· 225 225 jnz .Lsie_skip 226 226 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 227 227 jo .Lsie_skip # exit if fp/vx regs changed 228 + .Lsie_entry: 228 229 sie 0(%r14) 229 230 .Lsie_skip: 230 231 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE ··· 335 334 jo .Lsysc_mcck_pending 336 335 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED 337 336 jo .Lsysc_reschedule 337 + TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART 338 + jo .Lsysc_syscall_restart 338 339 #ifdef CONFIG_UPROBES 339 340 TSTMSK __TI_flags(%r12),_TIF_UPROBE 340 341 jo .Lsysc_uprobe_notify ··· 350 347 jo .Lsysc_patch_pending # handle live patching just before 351 348 # signals and possible syscall restart 352 349 #endif 350 + TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART 351 + jo .Lsysc_syscall_restart 353 352 TSTMSK __TI_flags(%r12),_TIF_SIGPENDING 354 353 jo .Lsysc_sigpending 355 354 TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME ··· 451 446 lgr %r2,%r11 # pass pointer to pt_regs 452 447 larl %r14,.Lsysc_return 453 448 jg do_per_trap 449 + 450 + # 451 + # _PIF_SYSCALL_RESTART is set, repeat the current system call 452 + # 453 + .Lsysc_syscall_restart: 454 + ni __PT_FLAGS+7(%r11),255-_PIF_SYSCALL_RESTART 455 + lmg %r1,%r7,__PT_R1(%r11) # load svc arguments 456 + lg %r2,__PT_ORIG_GPR2(%r11) 457 + j .Lsysc_do_svc 454 458 455 459 # 456 460 # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before ··· 895 881 oi __LC_CPU_FLAGS+7,_CIF_FPU 896 882 br %r14 897 883 .Lsave_fpu_regs_end: 898 - #if IS_ENABLED(CONFIG_KVM) 899 884 EXPORT_SYMBOL(save_fpu_regs) 900 - #endif 901 885 902 886 /* 903 887 * Load floating-point controls and floating-point or vector registers. ··· 1123 1111 .quad .Lsie_done 1124 1112 1125 1113 .Lcleanup_sie: 1126 - lg %r9,__SF_EMPTY(%r15) # get control block pointer 1114 + cghi %r11,__LC_SAVE_AREA_ASYNC #Is this in normal interrupt? 1115 + je 1f 1116 + slg %r9,BASED(.Lsie_crit_mcck_start) 1117 + clg %r9,BASED(.Lsie_crit_mcck_length) 1118 + jh 1f 1119 + oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST 1120 + 1: lg %r9,__SF_EMPTY(%r15) # get control block pointer 1127 1121 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE 1128 1122 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 1129 1123 larl %r9,sie_exit # skip forward to sie_exit ··· 1314 1296 .quad .Lsie_gmap 1315 1297 .Lsie_critical_length: 1316 1298 .quad .Lsie_done - .Lsie_gmap 1299 + .Lsie_crit_mcck_start: 1300 + .quad .Lsie_entry 1301 + .Lsie_crit_mcck_length: 1302 + .quad .Lsie_skip - .Lsie_entry 1317 1303 #endif 1318 1304 1319 1305 .section .rodata, "a"
+74 -10
arch/s390/kernel/nmi.c
··· 25 25 #include <asm/crw.h> 26 26 #include <asm/switch_to.h> 27 27 #include <asm/ctl_reg.h> 28 + #include <asm/asm-offsets.h> 29 + #include <linux/kvm_host.h> 28 30 29 31 struct mcck_struct { 30 32 unsigned int kill_task : 1; ··· 276 274 return kill_task; 277 275 } 278 276 277 + /* 278 + * Backup the guest's machine check info to its description block 279 + */ 280 + static void notrace s390_backup_mcck_info(struct pt_regs *regs) 281 + { 282 + struct mcck_volatile_info *mcck_backup; 283 + struct sie_page *sie_page; 284 + 285 + /* r14 contains the sie block, which was set in sie64a */ 286 + struct kvm_s390_sie_block *sie_block = 287 + (struct kvm_s390_sie_block *) regs->gprs[14]; 288 + 289 + if (sie_block == NULL) 290 + /* Something's seriously wrong, stop system. */ 291 + s390_handle_damage(); 292 + 293 + sie_page = container_of(sie_block, struct sie_page, sie_block); 294 + mcck_backup = &sie_page->mcck_info; 295 + mcck_backup->mcic = S390_lowcore.mcck_interruption_code & 296 + ~(MCCK_CODE_CP | MCCK_CODE_EXT_DAMAGE); 297 + mcck_backup->ext_damage_code = S390_lowcore.external_damage_code; 298 + mcck_backup->failing_storage_address 299 + = S390_lowcore.failing_storage_address; 300 + } 301 + 279 302 #define MAX_IPD_COUNT 29 280 303 #define MAX_IPD_TIME (5 * 60 * USEC_PER_SEC) /* 5 minutes */ 281 304 282 305 #define ED_STP_ISLAND 6 /* External damage STP island check */ 283 306 #define ED_STP_SYNC 7 /* External damage STP sync check */ 307 + 308 + #define MCCK_CODE_NO_GUEST (MCCK_CODE_CP | MCCK_CODE_EXT_DAMAGE) 284 309 285 310 /* 286 311 * machine check handler. ··· 320 291 struct mcck_struct *mcck; 321 292 unsigned long long tmp; 322 293 union mci mci; 294 + unsigned long mcck_dam_code; 323 295 324 296 nmi_enter(); 325 297 inc_irq_stat(NMI_NMI); ··· 331 301 /* System damage -> stopping machine */ 332 302 s390_handle_damage(); 333 303 } 334 - if (mci.pd) { 304 + 305 + /* 306 + * Reinject the instruction processing damages' machine checks 307 + * including Delayed Access Exception into the guest 308 + * instead of damaging the host if they happen in the guest. 309 + */ 310 + if (mci.pd && !test_cpu_flag(CIF_MCCK_GUEST)) { 335 311 if (mci.b) { 336 312 /* Processing backup -> verify if we can survive this */ 337 313 u64 z_mcic, o_mcic, t_mcic; ··· 381 345 mcck->mcck_code = mci.val; 382 346 set_cpu_flag(CIF_MCCK_PENDING); 383 347 } 348 + 349 + /* 350 + * Backup the machine check's info if it happens when the guest 351 + * is running. 352 + */ 353 + if (test_cpu_flag(CIF_MCCK_GUEST)) 354 + s390_backup_mcck_info(regs); 355 + 384 356 if (mci.cd) { 385 357 /* Timing facility damage */ 386 358 s390_handle_damage(); ··· 402 358 if (mcck->stp_queue) 403 359 set_cpu_flag(CIF_MCCK_PENDING); 404 360 } 405 - if (mci.se) 406 - /* Storage error uncorrected */ 407 - s390_handle_damage(); 408 - if (mci.ke) 409 - /* Storage key-error uncorrected */ 410 - s390_handle_damage(); 411 - if (mci.ds && mci.fa) 412 - /* Storage degradation */ 413 - s390_handle_damage(); 361 + 362 + /* 363 + * Reinject storage related machine checks into the guest if they 364 + * happen when the guest is running. 365 + */ 366 + if (!test_cpu_flag(CIF_MCCK_GUEST)) { 367 + if (mci.se) 368 + /* Storage error uncorrected */ 369 + s390_handle_damage(); 370 + if (mci.ke) 371 + /* Storage key-error uncorrected */ 372 + s390_handle_damage(); 373 + if (mci.ds && mci.fa) 374 + /* Storage degradation */ 375 + s390_handle_damage(); 376 + } 414 377 if (mci.cp) { 415 378 /* Channel report word pending */ 416 379 mcck->channel_report = 1; ··· 428 377 mcck->warning = 1; 429 378 set_cpu_flag(CIF_MCCK_PENDING); 430 379 } 380 + 381 + /* 382 + * If there are only Channel Report Pending and External Damage 383 + * machine checks, they will not be reinjected into the guest 384 + * because they refer to host conditions only. 385 + */ 386 + mcck_dam_code = (mci.val & MCIC_SUBCLASS_MASK); 387 + if (test_cpu_flag(CIF_MCCK_GUEST) && 388 + (mcck_dam_code & MCCK_CODE_NO_GUEST) != mcck_dam_code) { 389 + /* Set exit reason code for host's later handling */ 390 + *((long *)(regs->gprs[15] + __SF_SIE_REASON)) = -EINTR; 391 + } 392 + clear_cpu_flag(CIF_MCCK_GUEST); 431 393 nmi_exit(); 432 394 } 433 395
+5 -5
arch/s390/kernel/perf_cpum_sf.c
··· 995 995 regs.int_parm = CPU_MF_INT_SF_PRA; 996 996 sde_regs = (struct perf_sf_sde_regs *) &regs.int_parm_long; 997 997 998 - psw_bits(regs.psw).ia = sfr->basic.ia; 999 - psw_bits(regs.psw).t = sfr->basic.T; 1000 - psw_bits(regs.psw).w = sfr->basic.W; 1001 - psw_bits(regs.psw).p = sfr->basic.P; 1002 - psw_bits(regs.psw).as = sfr->basic.AS; 998 + psw_bits(regs.psw).ia = sfr->basic.ia; 999 + psw_bits(regs.psw).dat = sfr->basic.T; 1000 + psw_bits(regs.psw).wait = sfr->basic.W; 1001 + psw_bits(regs.psw).per = sfr->basic.P; 1002 + psw_bits(regs.psw).as = sfr->basic.AS; 1003 1003 1004 1004 /* 1005 1005 * Use the hardware provided configuration level to decide if the
+1 -2
arch/s390/kernel/perf_event.c
··· 245 245 struct perf_pmu_events_attr *pmu_attr; 246 246 247 247 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); 248 - return sprintf(page, "event=0x%04llx,name=%s\n", 249 - pmu_attr->id, attr->attr.name); 248 + return sprintf(page, "event=0x%04llx\n", pmu_attr->id); 250 249 }
+13 -2
arch/s390/kernel/ptrace.c
··· 1160 1160 return -ENODEV; 1161 1161 if (!data) 1162 1162 return -ENODATA; 1163 + if (target == current) 1164 + save_gs_cb(data); 1163 1165 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 1164 1166 data, 0, sizeof(struct gs_cb)); 1165 1167 } ··· 1172 1170 const void *kbuf, const void __user *ubuf) 1173 1171 { 1174 1172 struct gs_cb *data = target->thread.gs_cb; 1173 + int rc; 1175 1174 1176 1175 if (!MACHINE_HAS_GS) 1177 1176 return -ENODEV; ··· 1180 1177 data = kzalloc(sizeof(*data), GFP_KERNEL); 1181 1178 if (!data) 1182 1179 return -ENOMEM; 1180 + data->gsd = 25; 1183 1181 target->thread.gs_cb = data; 1182 + if (target == current) 1183 + __ctl_set_bit(2, 4); 1184 + } else if (target == current) { 1185 + save_gs_cb(data); 1184 1186 } 1185 - return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1186 - data, 0, sizeof(struct gs_cb)); 1187 + rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1188 + data, 0, sizeof(struct gs_cb)); 1189 + if (target == current) 1190 + restore_gs_cb(data); 1191 + return rc; 1187 1192 } 1188 1193 1189 1194 static int s390_gs_bc_get(struct task_struct *target,
+3
arch/s390/kernel/smp.c
··· 26 26 #include <linux/err.h> 27 27 #include <linux/spinlock.h> 28 28 #include <linux/kernel_stat.h> 29 + #include <linux/kmemleak.h> 29 30 #include <linux/delay.h> 30 31 #include <linux/interrupt.h> 31 32 #include <linux/irqflags.h> ··· 208 207 kmem_cache_alloc(pcpu_mcesa_cache, GFP_KERNEL); 209 208 if (!mcesa_origin) 210 209 goto out; 210 + /* The pointer is stored with mcesa_bits ORed in */ 211 + kmemleak_not_leak((void *) mcesa_origin); 211 212 mcesa_bits = MACHINE_HAS_GS ? 11 : 0; 212 213 } 213 214 } else {
+1
arch/s390/kernel/traps.c
··· 21 21 #include <linux/mm.h> 22 22 #include <linux/slab.h> 23 23 #include <linux/uaccess.h> 24 + #include <linux/cpu.h> 24 25 #include <asm/fpu/api.h> 25 26 #include "entry.h" 26 27
+6 -6
arch/s390/kernel/uprobes.c
··· 27 27 28 28 int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) 29 29 { 30 - if (psw_bits(regs->psw).eaba == PSW_AMODE_24BIT) 30 + if (psw_bits(regs->psw).eaba == PSW_BITS_AMODE_24BIT) 31 31 return -EINVAL; 32 - if (!is_compat_task() && psw_bits(regs->psw).eaba == PSW_AMODE_31BIT) 32 + if (!is_compat_task() && psw_bits(regs->psw).eaba == PSW_BITS_AMODE_31BIT) 33 33 return -EINVAL; 34 34 clear_pt_regs_flag(regs, PIF_PER_TRAP); 35 - auprobe->saved_per = psw_bits(regs->psw).r; 35 + auprobe->saved_per = psw_bits(regs->psw).per; 36 36 auprobe->saved_int_code = regs->int_code; 37 37 regs->int_code = UPROBE_TRAP_NR; 38 38 regs->psw.addr = current->utask->xol_vaddr; ··· 81 81 82 82 clear_tsk_thread_flag(current, TIF_UPROBE_SINGLESTEP); 83 83 update_cr_regs(current); 84 - psw_bits(regs->psw).r = auprobe->saved_per; 84 + psw_bits(regs->psw).per = auprobe->saved_per; 85 85 regs->int_code = auprobe->saved_int_code; 86 86 87 87 if (fixup & FIXUP_PSW_NORMAL) ··· 372 372 373 373 bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) 374 374 { 375 - if ((psw_bits(regs->psw).eaba == PSW_AMODE_24BIT) || 376 - ((psw_bits(regs->psw).eaba == PSW_AMODE_31BIT) && 375 + if ((psw_bits(regs->psw).eaba == PSW_BITS_AMODE_24BIT) || 376 + ((psw_bits(regs->psw).eaba == PSW_BITS_AMODE_31BIT) && 377 377 !is_compat_task())) { 378 378 regs->psw.addr = __rewind_psw(regs->psw, UPROBE_SWBP_INSN_SIZE); 379 379 do_report_trap(regs, SIGILL, ILL_ILLADR, NULL);
+64 -27
arch/s390/kernel/vdso.c
··· 50 50 */ 51 51 unsigned int __read_mostly vdso_enabled = 1; 52 52 53 + static int vdso_fault(const struct vm_special_mapping *sm, 54 + struct vm_area_struct *vma, struct vm_fault *vmf) 55 + { 56 + struct page **vdso_pagelist; 57 + unsigned long vdso_pages; 58 + 59 + vdso_pagelist = vdso64_pagelist; 60 + vdso_pages = vdso64_pages; 61 + #ifdef CONFIG_COMPAT 62 + if (is_compat_task()) { 63 + vdso_pagelist = vdso32_pagelist; 64 + vdso_pages = vdso32_pages; 65 + } 66 + #endif 67 + 68 + if (vmf->pgoff >= vdso_pages) 69 + return VM_FAULT_SIGBUS; 70 + 71 + vmf->page = vdso_pagelist[vmf->pgoff]; 72 + get_page(vmf->page); 73 + return 0; 74 + } 75 + 76 + static int vdso_mremap(const struct vm_special_mapping *sm, 77 + struct vm_area_struct *vma) 78 + { 79 + unsigned long vdso_pages; 80 + 81 + vdso_pages = vdso64_pages; 82 + #ifdef CONFIG_COMPAT 83 + if (is_compat_task()) 84 + vdso_pages = vdso32_pages; 85 + #endif 86 + 87 + if ((vdso_pages << PAGE_SHIFT) != vma->vm_end - vma->vm_start) 88 + return -EINVAL; 89 + 90 + if (WARN_ON_ONCE(current->mm != vma->vm_mm)) 91 + return -EFAULT; 92 + 93 + current->mm->context.vdso_base = vma->vm_start; 94 + return 0; 95 + } 96 + 97 + static const struct vm_special_mapping vdso_mapping = { 98 + .name = "[vdso]", 99 + .fault = vdso_fault, 100 + .mremap = vdso_mremap, 101 + }; 102 + 53 103 static int __init vdso_setup(char *s) 54 104 { 55 105 unsigned long val; ··· 231 181 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) 232 182 { 233 183 struct mm_struct *mm = current->mm; 234 - struct page **vdso_pagelist; 184 + struct vm_area_struct *vma; 235 185 unsigned long vdso_pages; 236 186 unsigned long vdso_base; 237 187 int rc; ··· 244 194 if (!uses_interp) 245 195 return 0; 246 196 247 - vdso_pagelist = vdso64_pagelist; 248 197 vdso_pages = vdso64_pages; 249 198 #ifdef CONFIG_COMPAT 250 - if (is_compat_task()) { 251 - vdso_pagelist = vdso32_pagelist; 199 + if (is_compat_task()) 252 200 vdso_pages = vdso32_pages; 253 - } 254 201 #endif 255 202 /* 256 203 * vDSO has a problem and was disabled, just don't "enable" it for ··· 255 208 */ 256 209 if (vdso_pages == 0) 257 210 return 0; 258 - 259 - current->mm->context.vdso_base = 0; 260 211 261 212 /* 262 213 * pick a base address for the vDSO in process space. We try to put ··· 270 225 } 271 226 272 227 /* 273 - * Put vDSO base into mm struct. We need to do this before calling 274 - * install_special_mapping or the perf counter mmap tracking code 275 - * will fail to recognise it as a vDSO (since arch_vma_name fails). 276 - */ 277 - current->mm->context.vdso_base = vdso_base; 278 - 279 - /* 280 228 * our vma flags don't have VM_WRITE so by default, the process 281 229 * isn't allowed to write those pages. 282 230 * gdb can break that with ptrace interface, and thus trigger COW ··· 279 241 * It's fine to use that for setting breakpoints in the vDSO code 280 242 * pages though. 281 243 */ 282 - rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT, 283 - VM_READ|VM_EXEC| 284 - VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 285 - vdso_pagelist); 286 - if (rc) 287 - current->mm->context.vdso_base = 0; 244 + vma = _install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT, 245 + VM_READ|VM_EXEC| 246 + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 247 + &vdso_mapping); 248 + if (IS_ERR(vma)) { 249 + rc = PTR_ERR(vma); 250 + goto out_up; 251 + } 252 + 253 + current->mm->context.vdso_base = vdso_base; 254 + rc = 0; 255 + 288 256 out_up: 289 257 up_write(&mm->mmap_sem); 290 258 return rc; 291 - } 292 - 293 - const char *arch_vma_name(struct vm_area_struct *vma) 294 - { 295 - if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base) 296 - return "[vdso]"; 297 - return NULL; 298 259 } 299 260 300 261 static int __init vdso_init(void)
+5 -9
arch/s390/kernel/vtime.c
··· 110 110 return vtime; 111 111 } 112 112 113 - static void account_system_index_scaled(struct task_struct *p, 114 - u64 cputime, u64 scaled, 113 + static void account_system_index_scaled(struct task_struct *p, u64 cputime, 115 114 enum cpu_usage_stat index) 116 115 { 117 - p->stimescaled += cputime_to_nsecs(scaled); 116 + p->stimescaled += cputime_to_nsecs(scale_vtime(cputime)); 118 117 account_system_index_time(p, cputime_to_nsecs(cputime), index); 119 118 } 120 119 ··· 175 176 } 176 177 177 178 if (system) 178 - account_system_index_scaled(tsk, system, scale_vtime(system), 179 - CPUTIME_SYSTEM); 179 + account_system_index_scaled(tsk, system, CPUTIME_SYSTEM); 180 180 if (hardirq) 181 - account_system_index_scaled(tsk, hardirq, scale_vtime(hardirq), 182 - CPUTIME_IRQ); 181 + account_system_index_scaled(tsk, hardirq, CPUTIME_IRQ); 183 182 if (softirq) 184 - account_system_index_scaled(tsk, softirq, scale_vtime(softirq), 185 - CPUTIME_SOFTIRQ); 183 + account_system_index_scaled(tsk, softirq, CPUTIME_SOFTIRQ); 186 184 187 185 steal = S390_lowcore.steal_timer; 188 186 if ((s64) steal > 0) {
+11 -11
arch/s390/kvm/gaccess.c
··· 551 551 int rc; 552 552 struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw); 553 553 554 - if (!psw.t) { 554 + if (!psw.dat) { 555 555 asce->val = 0; 556 556 asce->r = 1; 557 557 return 0; 558 558 } 559 559 560 - if (mode == GACC_IFETCH) 561 - psw.as = psw.as == PSW_AS_HOME ? PSW_AS_HOME : PSW_AS_PRIMARY; 560 + if ((mode == GACC_IFETCH) && (psw.as != PSW_BITS_AS_HOME)) 561 + psw.as = PSW_BITS_AS_PRIMARY; 562 562 563 563 switch (psw.as) { 564 - case PSW_AS_PRIMARY: 564 + case PSW_BITS_AS_PRIMARY: 565 565 asce->val = vcpu->arch.sie_block->gcr[1]; 566 566 return 0; 567 - case PSW_AS_SECONDARY: 567 + case PSW_BITS_AS_SECONDARY: 568 568 asce->val = vcpu->arch.sie_block->gcr[7]; 569 569 return 0; 570 - case PSW_AS_HOME: 570 + case PSW_BITS_AS_HOME: 571 571 asce->val = vcpu->arch.sie_block->gcr[13]; 572 572 return 0; 573 - case PSW_AS_ACCREG: 573 + case PSW_BITS_AS_ACCREG: 574 574 rc = ar_translation(vcpu, asce, ar, mode); 575 575 if (rc > 0) 576 576 return trans_exc(vcpu, rc, ga, ar, mode, PROT_TYPE_ALC); ··· 771 771 772 772 if (!ctlreg0.lap) 773 773 return 0; 774 - if (psw_bits(*psw).t && asce.p) 774 + if (psw_bits(*psw).dat && asce.p) 775 775 return 0; 776 776 return 1; 777 777 } ··· 790 790 return trans_exc(vcpu, PGM_PROTECTION, ga, ar, mode, 791 791 PROT_TYPE_LA); 792 792 ga &= PAGE_MASK; 793 - if (psw_bits(*psw).t) { 793 + if (psw_bits(*psw).dat) { 794 794 rc = guest_translate(vcpu, ga, pages, asce, mode); 795 795 if (rc < 0) 796 796 return rc; ··· 831 831 pages = vmalloc(nr_pages * sizeof(unsigned long)); 832 832 if (!pages) 833 833 return -ENOMEM; 834 - need_ipte_lock = psw_bits(*psw).t && !asce.r; 834 + need_ipte_lock = psw_bits(*psw).dat && !asce.r; 835 835 if (need_ipte_lock) 836 836 ipte_lock(vcpu); 837 837 rc = guest_page_range(vcpu, ga, ar, pages, nr_pages, asce, mode); ··· 899 899 mode, PROT_TYPE_LA); 900 900 } 901 901 902 - if (psw_bits(*psw).t && !asce.r) { /* Use DAT? */ 902 + if (psw_bits(*psw).dat && !asce.r) { /* Use DAT? */ 903 903 rc = guest_translate(vcpu, gva, gpa, asce, mode); 904 904 if (rc > 0) 905 905 return trans_exc(vcpu, rc, gva, 0, mode, PROT_TYPE_DAT);
+2 -2
arch/s390/kvm/gaccess.h
··· 57 57 { 58 58 psw_t *psw = &vcpu->arch.sie_block->gpsw; 59 59 60 - if (psw_bits(*psw).eaba == PSW_AMODE_64BIT) 60 + if (psw_bits(*psw).eaba == PSW_BITS_AMODE_64BIT) 61 61 return ga; 62 - if (psw_bits(*psw).eaba == PSW_AMODE_31BIT) 62 + if (psw_bits(*psw).eaba == PSW_BITS_AMODE_31BIT) 63 63 return ga & ((1UL << 31) - 1); 64 64 return ga & ((1UL << 24) - 1); 65 65 }
+3 -3
arch/s390/kvm/guestdbg.c
··· 613 613 * instruction. Check primary and home space-switch-event 614 614 * controls. (theoretically home -> home produced no event) 615 615 */ 616 - if (((new_as == PSW_AS_HOME) ^ old_as_is_home(vcpu)) && 617 - (pssec(vcpu) || hssec(vcpu))) 616 + if (((new_as == PSW_BITS_AS_HOME) ^ old_as_is_home(vcpu)) && 617 + (pssec(vcpu) || hssec(vcpu))) 618 618 vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH; 619 619 620 620 /* 621 621 * PT, PTI, PR, PC instruction operate on primary AS only. Check 622 622 * if the primary-space-switch-event control was or got set. 623 623 */ 624 - if (new_as == PSW_AS_PRIMARY && !old_as_is_home(vcpu) && 624 + if (new_as == PSW_BITS_AS_PRIMARY && !old_as_is_home(vcpu) && 625 625 (pssec(vcpu) || old_ssec(vcpu))) 626 626 vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH; 627 627 }
+1
arch/s390/kvm/kvm-s390.c
··· 2067 2067 if (!vcpu) 2068 2068 goto out; 2069 2069 2070 + BUILD_BUG_ON(sizeof(struct sie_page) != 4096); 2070 2071 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL); 2071 2072 if (!sie_page) 2072 2073 goto out_free_cpu;
+4 -4
arch/s390/kvm/priv.c
··· 361 361 } 362 362 } 363 363 if (m3 & SSKE_MB) { 364 - if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_64BIT) 364 + if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) 365 365 vcpu->run->s.regs.gprs[reg2] &= ~PAGE_MASK; 366 366 else 367 367 vcpu->run->s.regs.gprs[reg2] &= ~0xfffff000UL; ··· 374 374 static int handle_ipte_interlock(struct kvm_vcpu *vcpu) 375 375 { 376 376 vcpu->stat.instruction_ipte_interlock++; 377 - if (psw_bits(vcpu->arch.sie_block->gpsw).p) 377 + if (psw_bits(vcpu->arch.sie_block->gpsw).pstate) 378 378 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 379 379 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu)); 380 380 kvm_s390_retry_instr(vcpu); ··· 901 901 /* only support 2G frame size if EDAT2 is available and we are 902 902 not in 24-bit addressing mode */ 903 903 if (!test_kvm_facility(vcpu->kvm, 78) || 904 - psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_24BIT) 904 + psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_24BIT) 905 905 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 906 906 end = (start + (1UL << 31)) & ~((1UL << 31) - 1); 907 907 break; ··· 938 938 start += PAGE_SIZE; 939 939 } 940 940 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { 941 - if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_64BIT) { 941 + if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) { 942 942 vcpu->run->s.regs.gprs[reg2] = end; 943 943 } else { 944 944 vcpu->run->s.regs.gprs[reg2] &= ~0xffffffffUL;
+20 -3
arch/s390/mm/dump_pagetables.c
··· 149 149 } 150 150 151 151 static void walk_pud_level(struct seq_file *m, struct pg_state *st, 152 - pgd_t *pgd, unsigned long addr) 152 + p4d_t *p4d, unsigned long addr) 153 153 { 154 154 unsigned int prot; 155 155 pud_t *pud; ··· 157 157 158 158 for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++) { 159 159 st->current_address = addr; 160 - pud = pud_offset(pgd, addr); 160 + pud = pud_offset(p4d, addr); 161 161 if (!pud_none(*pud)) 162 162 if (pud_large(*pud)) { 163 163 prot = pud_val(*pud) & ··· 169 169 else 170 170 note_page(m, st, _PAGE_INVALID, 2); 171 171 addr += PUD_SIZE; 172 + } 173 + } 174 + 175 + static void walk_p4d_level(struct seq_file *m, struct pg_state *st, 176 + pgd_t *pgd, unsigned long addr) 177 + { 178 + p4d_t *p4d; 179 + int i; 180 + 181 + for (i = 0; i < PTRS_PER_P4D && addr < max_addr; i++) { 182 + st->current_address = addr; 183 + p4d = p4d_offset(pgd, addr); 184 + if (!p4d_none(*p4d)) 185 + walk_pud_level(m, st, p4d, addr); 186 + else 187 + note_page(m, st, _PAGE_INVALID, 2); 188 + addr += P4D_SIZE; 172 189 } 173 190 } 174 191 ··· 201 184 st.current_address = addr; 202 185 pgd = pgd_offset_k(addr); 203 186 if (!pgd_none(*pgd)) 204 - walk_pud_level(m, &st, pgd, addr); 187 + walk_p4d_level(m, &st, pgd, addr); 205 188 else 206 189 note_page(m, &st, _PAGE_INVALID, 1); 207 190 addr += PGDIR_SIZE;
+1 -1
arch/s390/mm/fault.c
··· 130 130 131 131 static void dump_pagetable(unsigned long asce, unsigned long address) 132 132 { 133 - unsigned long *table = __va(asce & PAGE_MASK); 133 + unsigned long *table = __va(asce & _ASCE_ORIGIN); 134 134 135 135 pr_alert("AS:%016lx ", asce); 136 136 switch (asce & _ASCE_TYPE_MASK) {
+7 -4
arch/s390/mm/gmap.c
··· 125 125 struct radix_tree_iter iter; 126 126 unsigned long indices[16]; 127 127 unsigned long index; 128 - void **slot; 128 + void __rcu **slot; 129 129 int i, nr; 130 130 131 131 /* A radix tree is freed by deleting all of its entries */ ··· 150 150 struct radix_tree_iter iter; 151 151 unsigned long indices[16]; 152 152 unsigned long index; 153 - void **slot; 153 + void __rcu **slot; 154 154 int i, nr; 155 155 156 156 /* A radix tree is freed by deleting all of its entries */ ··· 537 537 unsigned long *table; 538 538 spinlock_t *ptl; 539 539 pgd_t *pgd; 540 + p4d_t *p4d; 540 541 pud_t *pud; 541 542 pmd_t *pmd; 542 543 int rc; ··· 574 573 mm = gmap->mm; 575 574 pgd = pgd_offset(mm, vmaddr); 576 575 VM_BUG_ON(pgd_none(*pgd)); 577 - pud = pud_offset(pgd, vmaddr); 576 + p4d = p4d_offset(pgd, vmaddr); 577 + VM_BUG_ON(p4d_none(*p4d)); 578 + pud = pud_offset(p4d, vmaddr); 578 579 VM_BUG_ON(pud_none(*pud)); 579 580 /* large puds cannot yet be handled */ 580 581 if (pud_large(*pud)) ··· 1011 1008 static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr, 1012 1009 struct gmap_rmap *rmap) 1013 1010 { 1014 - void **slot; 1011 + void __rcu **slot; 1015 1012 1016 1013 BUG_ON(!gmap_is_shadow(sg)); 1017 1014 slot = radix_tree_lookup_slot(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
+28 -5
arch/s390/mm/gup.c
··· 166 166 return 1; 167 167 } 168 168 169 - static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, 169 + static inline int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, 170 170 unsigned long end, int write, struct page **pages, int *nr) 171 171 { 172 172 unsigned long next; 173 173 pud_t *pudp, pud; 174 174 175 - pudp = (pud_t *) pgdp; 176 - if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 177 - pudp = (pud_t *) pgd_deref(pgd); 175 + pudp = (pud_t *) p4dp; 176 + if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 177 + pudp = (pud_t *) p4d_deref(p4d); 178 178 pudp += pud_index(addr); 179 179 do { 180 180 pud = *pudp; ··· 190 190 nr)) 191 191 return 0; 192 192 } while (pudp++, addr = next, addr != end); 193 + 194 + return 1; 195 + } 196 + 197 + static inline int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, 198 + unsigned long end, int write, struct page **pages, int *nr) 199 + { 200 + unsigned long next; 201 + p4d_t *p4dp, p4d; 202 + 203 + p4dp = (p4d_t *) pgdp; 204 + if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1) 205 + p4dp = (p4d_t *) pgd_deref(pgd); 206 + p4dp += p4d_index(addr); 207 + do { 208 + p4d = *p4dp; 209 + barrier(); 210 + next = p4d_addr_end(addr, end); 211 + if (p4d_none(p4d)) 212 + return 0; 213 + if (!gup_pud_range(p4dp, p4d, addr, next, write, pages, nr)) 214 + return 0; 215 + } while (p4dp++, addr = next, addr != end); 193 216 194 217 return 1; 195 218 } ··· 251 228 next = pgd_addr_end(addr, end); 252 229 if (pgd_none(pgd)) 253 230 break; 254 - if (!gup_pud_range(pgdp, pgd, addr, next, write, pages, &nr)) 231 + if (!gup_p4d_range(pgdp, pgd, addr, next, write, pages, &nr)) 255 232 break; 256 233 } while (pgdp++, addr = next, addr != end); 257 234 local_irq_restore(flags);
+19 -11
arch/s390/mm/hugetlbpage.c
··· 162 162 unsigned long addr, unsigned long sz) 163 163 { 164 164 pgd_t *pgdp; 165 + p4d_t *p4dp; 165 166 pud_t *pudp; 166 167 pmd_t *pmdp = NULL; 167 168 168 169 pgdp = pgd_offset(mm, addr); 169 - pudp = pud_alloc(mm, pgdp, addr); 170 - if (pudp) { 171 - if (sz == PUD_SIZE) 172 - return (pte_t *) pudp; 173 - else if (sz == PMD_SIZE) 174 - pmdp = pmd_alloc(mm, pudp, addr); 170 + p4dp = p4d_alloc(mm, pgdp, addr); 171 + if (p4dp) { 172 + pudp = pud_alloc(mm, p4dp, addr); 173 + if (pudp) { 174 + if (sz == PUD_SIZE) 175 + return (pte_t *) pudp; 176 + else if (sz == PMD_SIZE) 177 + pmdp = pmd_alloc(mm, pudp, addr); 178 + } 175 179 } 176 180 return (pte_t *) pmdp; 177 181 } ··· 183 179 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 184 180 { 185 181 pgd_t *pgdp; 182 + p4d_t *p4dp; 186 183 pud_t *pudp; 187 184 pmd_t *pmdp = NULL; 188 185 189 186 pgdp = pgd_offset(mm, addr); 190 187 if (pgd_present(*pgdp)) { 191 - pudp = pud_offset(pgdp, addr); 192 - if (pud_present(*pudp)) { 193 - if (pud_large(*pudp)) 194 - return (pte_t *) pudp; 195 - pmdp = pmd_offset(pudp, addr); 188 + p4dp = p4d_offset(pgdp, addr); 189 + if (p4d_present(*p4dp)) { 190 + pudp = pud_offset(p4dp, addr); 191 + if (pud_present(*pudp)) { 192 + if (pud_large(*pudp)) 193 + return (pte_t *) pudp; 194 + pmdp = pmd_offset(pudp, addr); 195 + } 196 196 } 197 197 } 198 198 return (pte_t *) pmdp;
+5 -1
arch/s390/mm/init.c
··· 81 81 { 82 82 unsigned long max_zone_pfns[MAX_NR_ZONES]; 83 83 unsigned long pgd_type, asce_bits; 84 + psw_t psw; 84 85 85 86 init_mm.pgd = swapper_pg_dir; 86 87 if (VMALLOC_END > (1UL << 42)) { ··· 101 100 __ctl_load(S390_lowcore.kernel_asce, 1, 1); 102 101 __ctl_load(S390_lowcore.kernel_asce, 7, 7); 103 102 __ctl_load(S390_lowcore.kernel_asce, 13, 13); 104 - __arch_local_irq_stosm(0x04); 103 + psw.mask = __extract_psw(); 104 + psw_bits(psw).dat = 1; 105 + psw_bits(psw).as = PSW_BITS_AS_HOME; 106 + __load_psw_mask(psw.mask); 105 107 106 108 sparse_memory_present_with_active_regions(MAX_NUMNODES); 107 109 sparse_init();
+2 -2
arch/s390/mm/mmap.c
··· 120 120 121 121 check_asce_limit: 122 122 if (addr + len > current->mm->context.asce_limit) { 123 - rc = crst_table_upgrade(mm); 123 + rc = crst_table_upgrade(mm, addr + len); 124 124 if (rc) 125 125 return (unsigned long) rc; 126 126 } ··· 184 184 185 185 check_asce_limit: 186 186 if (addr + len > current->mm->context.asce_limit) { 187 - rc = crst_table_upgrade(mm); 187 + rc = crst_table_upgrade(mm, addr + len); 188 188 if (rc) 189 189 return (unsigned long) rc; 190 190 }
+26 -4
arch/s390/mm/pageattr.c
··· 229 229 pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3); 230 230 } 231 231 232 - static int walk_pud_level(pgd_t *pgd, unsigned long addr, unsigned long end, 232 + static int walk_pud_level(p4d_t *p4d, unsigned long addr, unsigned long end, 233 233 unsigned long flags) 234 234 { 235 235 unsigned long next; 236 236 pud_t *pudp; 237 237 int rc = 0; 238 238 239 - pudp = pud_offset(pgd, addr); 239 + pudp = pud_offset(p4d, addr); 240 240 do { 241 241 if (pud_none(*pudp)) 242 242 return -EINVAL; ··· 253 253 rc = walk_pmd_level(pudp, addr, next, flags); 254 254 } 255 255 pudp++; 256 + addr = next; 257 + cond_resched(); 258 + } while (addr < end && !rc); 259 + return rc; 260 + } 261 + 262 + static int walk_p4d_level(pgd_t *pgd, unsigned long addr, unsigned long end, 263 + unsigned long flags) 264 + { 265 + unsigned long next; 266 + p4d_t *p4dp; 267 + int rc = 0; 268 + 269 + p4dp = p4d_offset(pgd, addr); 270 + do { 271 + if (p4d_none(*p4dp)) 272 + return -EINVAL; 273 + next = p4d_addr_end(addr, end); 274 + rc = walk_pud_level(p4dp, addr, next, flags); 275 + p4dp++; 256 276 addr = next; 257 277 cond_resched(); 258 278 } while (addr < end && !rc); ··· 298 278 if (pgd_none(*pgdp)) 299 279 break; 300 280 next = pgd_addr_end(addr, end); 301 - rc = walk_pud_level(pgdp, addr, next, flags); 281 + rc = walk_p4d_level(pgdp, addr, next, flags); 302 282 if (rc) 303 283 break; 304 284 cond_resched(); ··· 339 319 unsigned long address; 340 320 int nr, i, j; 341 321 pgd_t *pgd; 322 + p4d_t *p4d; 342 323 pud_t *pud; 343 324 pmd_t *pmd; 344 325 pte_t *pte; ··· 347 326 for (i = 0; i < numpages;) { 348 327 address = page_to_phys(page + i); 349 328 pgd = pgd_offset_k(address); 350 - pud = pud_offset(pgd, address); 329 + p4d = p4d_offset(pgd, address); 330 + pud = pud_offset(p4d, address); 351 331 pmd = pmd_offset(pud, address); 352 332 pte = pte_offset_kernel(pmd, address); 353 333 nr = (unsigned long)pte >> ilog2(sizeof(long));
+37 -20
arch/s390/mm/pgalloc.c
··· 76 76 __tlb_flush_local(); 77 77 } 78 78 79 - int crst_table_upgrade(struct mm_struct *mm) 79 + int crst_table_upgrade(struct mm_struct *mm, unsigned long end) 80 80 { 81 81 unsigned long *table, *pgd; 82 + int rc, notify; 82 83 83 - /* upgrade should only happen from 3 to 4 levels */ 84 - BUG_ON(mm->context.asce_limit != (1UL << 42)); 85 - 86 - table = crst_table_alloc(mm); 87 - if (!table) 84 + /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */ 85 + BUG_ON(mm->context.asce_limit < (1UL << 42)); 86 + if (end >= TASK_SIZE_MAX) 88 87 return -ENOMEM; 89 - 90 - spin_lock_bh(&mm->page_table_lock); 91 - pgd = (unsigned long *) mm->pgd; 92 - crst_table_init(table, _REGION2_ENTRY_EMPTY); 93 - pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd); 94 - mm->pgd = (pgd_t *) table; 95 - mm->context.asce_limit = 1UL << 53; 96 - mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 97 - _ASCE_USER_BITS | _ASCE_TYPE_REGION2; 98 - spin_unlock_bh(&mm->page_table_lock); 99 - 100 - on_each_cpu(__crst_table_upgrade, mm, 0); 101 - return 0; 88 + rc = 0; 89 + notify = 0; 90 + while (mm->context.asce_limit < end) { 91 + table = crst_table_alloc(mm); 92 + if (!table) { 93 + rc = -ENOMEM; 94 + break; 95 + } 96 + spin_lock_bh(&mm->page_table_lock); 97 + pgd = (unsigned long *) mm->pgd; 98 + if (mm->context.asce_limit == (1UL << 42)) { 99 + crst_table_init(table, _REGION2_ENTRY_EMPTY); 100 + p4d_populate(mm, (p4d_t *) table, (pud_t *) pgd); 101 + mm->pgd = (pgd_t *) table; 102 + mm->context.asce_limit = 1UL << 53; 103 + mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 104 + _ASCE_USER_BITS | _ASCE_TYPE_REGION2; 105 + } else { 106 + crst_table_init(table, _REGION1_ENTRY_EMPTY); 107 + pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd); 108 + mm->pgd = (pgd_t *) table; 109 + mm->context.asce_limit = -PAGE_SIZE; 110 + mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 111 + _ASCE_USER_BITS | _ASCE_TYPE_REGION1; 112 + } 113 + notify = 1; 114 + spin_unlock_bh(&mm->page_table_lock); 115 + } 116 + if (notify) 117 + on_each_cpu(__crst_table_upgrade, mm, 0); 118 + return rc; 102 119 } 103 120 104 121 void crst_table_downgrade(struct mm_struct *mm) ··· 291 274 struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 292 275 293 276 switch (mask) { 294 - case 0: /* pmd or pud */ 277 + case 0: /* pmd, pud, or p4d */ 295 278 free_pages((unsigned long) table, 2); 296 279 break; 297 280 case 1: /* lower 2K of a 4K page table */
+5 -1
arch/s390/mm/pgtable.c
··· 610 610 { 611 611 spinlock_t *ptl; 612 612 pgd_t *pgd; 613 + p4d_t *p4d; 613 614 pud_t *pud; 614 615 pmd_t *pmd; 615 616 pgste_t pgste; ··· 619 618 bool dirty; 620 619 621 620 pgd = pgd_offset(mm, addr); 622 - pud = pud_alloc(mm, pgd, addr); 621 + p4d = p4d_alloc(mm, pgd, addr); 622 + if (!p4d) 623 + return false; 624 + pud = pud_alloc(mm, p4d, addr); 623 625 if (!pud) 624 626 return false; 625 627 pmd = pmd_alloc(mm, pud, addr);
+39 -5
arch/s390/mm/vmem.c
··· 38 38 return (void *) memblock_alloc(size, size); 39 39 } 40 40 41 + static inline p4d_t *vmem_p4d_alloc(void) 42 + { 43 + p4d_t *p4d = NULL; 44 + 45 + p4d = vmem_alloc_pages(2); 46 + if (!p4d) 47 + return NULL; 48 + clear_table((unsigned long *) p4d, _REGION2_ENTRY_EMPTY, PAGE_SIZE * 4); 49 + return p4d; 50 + } 51 + 41 52 static inline pud_t *vmem_pud_alloc(void) 42 53 { 43 54 pud_t *pud = NULL; ··· 96 85 unsigned long end = start + size; 97 86 unsigned long address = start; 98 87 pgd_t *pg_dir; 88 + p4d_t *p4_dir; 99 89 pud_t *pu_dir; 100 90 pmd_t *pm_dir; 101 91 pte_t *pt_dir; ··· 114 102 while (address < end) { 115 103 pg_dir = pgd_offset_k(address); 116 104 if (pgd_none(*pg_dir)) { 105 + p4_dir = vmem_p4d_alloc(); 106 + if (!p4_dir) 107 + goto out; 108 + pgd_populate(&init_mm, pg_dir, p4_dir); 109 + } 110 + p4_dir = p4d_offset(pg_dir, address); 111 + if (p4d_none(*p4_dir)) { 117 112 pu_dir = vmem_pud_alloc(); 118 113 if (!pu_dir) 119 114 goto out; 120 - pgd_populate(&init_mm, pg_dir, pu_dir); 115 + p4d_populate(&init_mm, p4_dir, pu_dir); 121 116 } 122 - pu_dir = pud_offset(pg_dir, address); 117 + pu_dir = pud_offset(p4_dir, address); 123 118 if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address && 124 119 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end) && 125 120 !debug_pagealloc_enabled()) { ··· 180 161 unsigned long end = start + size; 181 162 unsigned long address = start; 182 163 pgd_t *pg_dir; 164 + p4d_t *p4_dir; 183 165 pud_t *pu_dir; 184 166 pmd_t *pm_dir; 185 167 pte_t *pt_dir; ··· 192 172 address += PGDIR_SIZE; 193 173 continue; 194 174 } 195 - pu_dir = pud_offset(pg_dir, address); 175 + p4_dir = p4d_offset(pg_dir, address); 176 + if (p4d_none(*p4_dir)) { 177 + address += P4D_SIZE; 178 + continue; 179 + } 180 + pu_dir = pud_offset(p4_dir, address); 196 181 if (pud_none(*pu_dir)) { 197 182 address += PUD_SIZE; 198 183 continue; ··· 238 213 unsigned long pgt_prot, sgt_prot; 239 214 unsigned long address = start; 240 215 pgd_t *pg_dir; 216 + p4d_t *p4_dir; 241 217 pud_t *pu_dir; 242 218 pmd_t *pm_dir; 243 219 pte_t *pt_dir; ··· 253 227 for (address = start; address < end;) { 254 228 pg_dir = pgd_offset_k(address); 255 229 if (pgd_none(*pg_dir)) { 230 + p4_dir = vmem_p4d_alloc(); 231 + if (!p4_dir) 232 + goto out; 233 + pgd_populate(&init_mm, pg_dir, p4_dir); 234 + } 235 + 236 + p4_dir = p4d_offset(pg_dir, address); 237 + if (p4d_none(*p4_dir)) { 256 238 pu_dir = vmem_pud_alloc(); 257 239 if (!pu_dir) 258 240 goto out; 259 - pgd_populate(&init_mm, pg_dir, pu_dir); 241 + p4d_populate(&init_mm, p4_dir, pu_dir); 260 242 } 261 243 262 - pu_dir = pud_offset(pg_dir, address); 244 + pu_dir = pud_offset(p4_dir, address); 263 245 if (pud_none(*pu_dir)) { 264 246 pm_dir = vmem_pmd_alloc(); 265 247 if (!pm_dir)
+100 -73
arch/s390/pci/pci.c
··· 86 86 return zdev; 87 87 } 88 88 89 + void zpci_remove_reserved_devices(void) 90 + { 91 + struct zpci_dev *tmp, *zdev; 92 + enum zpci_state state; 93 + LIST_HEAD(remove); 94 + 95 + spin_lock(&zpci_list_lock); 96 + list_for_each_entry_safe(zdev, tmp, &zpci_list, entry) { 97 + if (zdev->state == ZPCI_FN_STATE_STANDBY && 98 + !clp_get_state(zdev->fid, &state) && 99 + state == ZPCI_FN_STATE_RESERVED) 100 + list_move_tail(&zdev->entry, &remove); 101 + } 102 + spin_unlock(&zpci_list_lock); 103 + 104 + list_for_each_entry_safe(zdev, tmp, &remove, entry) 105 + zpci_remove_device(zdev); 106 + } 107 + 89 108 static struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus) 90 109 { 91 110 return (bus && bus->sysdata) ? (struct zpci_dev *) bus->sysdata : NULL; ··· 127 108 { 128 109 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT); 129 110 struct zpci_fib fib = {0}; 111 + u8 status; 130 112 131 113 fib.isc = PCI_ISC; 132 114 fib.sum = 1; /* enable summary notifications */ ··· 137 117 fib.aisb = (unsigned long) zpci_aisb_iv->vector + (zdev->aisb/64)*8; 138 118 fib.aisbo = zdev->aisb & 63; 139 119 140 - return zpci_mod_fc(req, &fib); 120 + return zpci_mod_fc(req, &fib, &status) ? -EIO : 0; 141 121 } 142 122 143 - struct mod_pci_args { 144 - u64 base; 145 - u64 limit; 146 - u64 iota; 147 - u64 fmb_addr; 148 - }; 149 - 150 - static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args *args) 123 + /* Modify PCI: Unregister adapter interruptions */ 124 + static int zpci_clear_airq(struct zpci_dev *zdev) 151 125 { 152 - u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, fn); 126 + u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_DEREG_INT); 153 127 struct zpci_fib fib = {0}; 128 + u8 cc, status; 154 129 155 - fib.pba = args->base; 156 - fib.pal = args->limit; 157 - fib.iota = args->iota; 158 - fib.fmb_addr = args->fmb_addr; 130 + cc = zpci_mod_fc(req, &fib, &status); 131 + if (cc == 3 || (cc == 1 && status == 24)) 132 + /* Function already gone or IRQs already deregistered. */ 133 + cc = 0; 159 134 160 - return zpci_mod_fc(req, &fib); 135 + return cc ? -EIO : 0; 161 136 } 162 137 163 138 /* Modify PCI: Register I/O address translation parameters */ 164 139 int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas, 165 140 u64 base, u64 limit, u64 iota) 166 141 { 167 - struct mod_pci_args args = { base, limit, iota, 0 }; 142 + u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_REG_IOAT); 143 + struct zpci_fib fib = {0}; 144 + u8 status; 168 145 169 146 WARN_ON_ONCE(iota & 0x3fff); 170 - args.iota |= ZPCI_IOTA_RTTO_FLAG; 171 - return mod_pci(zdev, ZPCI_MOD_FC_REG_IOAT, dmaas, &args); 147 + fib.pba = base; 148 + fib.pal = limit; 149 + fib.iota = iota | ZPCI_IOTA_RTTO_FLAG; 150 + return zpci_mod_fc(req, &fib, &status) ? -EIO : 0; 172 151 } 173 152 174 153 /* Modify PCI: Unregister I/O address translation parameters */ 175 154 int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas) 176 155 { 177 - struct mod_pci_args args = { 0, 0, 0, 0 }; 156 + u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_DEREG_IOAT); 157 + struct zpci_fib fib = {0}; 158 + u8 cc, status; 178 159 179 - return mod_pci(zdev, ZPCI_MOD_FC_DEREG_IOAT, dmaas, &args); 180 - } 181 - 182 - /* Modify PCI: Unregister adapter interruptions */ 183 - static int zpci_clear_airq(struct zpci_dev *zdev) 184 - { 185 - struct mod_pci_args args = { 0, 0, 0, 0 }; 186 - 187 - return mod_pci(zdev, ZPCI_MOD_FC_DEREG_INT, 0, &args); 160 + cc = zpci_mod_fc(req, &fib, &status); 161 + if (cc == 3) /* Function already gone. */ 162 + cc = 0; 163 + return cc ? -EIO : 0; 188 164 } 189 165 190 166 /* Modify PCI: Set PCI function measurement parameters */ 191 167 int zpci_fmb_enable_device(struct zpci_dev *zdev) 192 168 { 193 - struct mod_pci_args args = { 0, 0, 0, 0 }; 169 + u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE); 170 + struct zpci_fib fib = {0}; 171 + u8 cc, status; 194 172 195 173 if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length) 196 174 return -EINVAL; ··· 203 185 atomic64_set(&zdev->mapped_pages, 0); 204 186 atomic64_set(&zdev->unmapped_pages, 0); 205 187 206 - args.fmb_addr = virt_to_phys(zdev->fmb); 207 - return mod_pci(zdev, ZPCI_MOD_FC_SET_MEASURE, 0, &args); 188 + fib.fmb_addr = virt_to_phys(zdev->fmb); 189 + cc = zpci_mod_fc(req, &fib, &status); 190 + if (cc) { 191 + kmem_cache_free(zdev_fmb_cache, zdev->fmb); 192 + zdev->fmb = NULL; 193 + } 194 + return cc ? -EIO : 0; 208 195 } 209 196 210 197 /* Modify PCI: Disable PCI function measurement */ 211 198 int zpci_fmb_disable_device(struct zpci_dev *zdev) 212 199 { 213 - struct mod_pci_args args = { 0, 0, 0, 0 }; 214 - int rc; 200 + u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE); 201 + struct zpci_fib fib = {0}; 202 + u8 cc, status; 215 203 216 204 if (!zdev->fmb) 217 205 return -EINVAL; 218 206 219 207 /* Function measurement is disabled if fmb address is zero */ 220 - rc = mod_pci(zdev, ZPCI_MOD_FC_SET_MEASURE, 0, &args); 208 + cc = zpci_mod_fc(req, &fib, &status); 209 + if (cc == 3) /* Function already gone. */ 210 + cc = 0; 221 211 222 - kmem_cache_free(zdev_fmb_cache, zdev->fmb); 223 - zdev->fmb = NULL; 224 - return rc; 212 + if (!cc) { 213 + kmem_cache_free(zdev_fmb_cache, zdev->fmb); 214 + zdev->fmb = NULL; 215 + } 216 + return cc ? -EIO : 0; 225 217 } 226 218 227 219 static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len) ··· 400 372 struct msi_msg msg; 401 373 int rc, irq; 402 374 375 + zdev->aisb = -1UL; 403 376 if (type == PCI_CAP_ID_MSI && nvec > 1) 404 377 return 1; 405 378 msi_vecs = min_t(unsigned int, nvec, zdev->max_msi); 406 379 407 380 /* Allocate adapter summary indicator bit */ 408 - rc = -EIO; 409 381 aisb = airq_iv_alloc_bit(zpci_aisb_iv); 410 382 if (aisb == -1UL) 411 - goto out; 383 + return -EIO; 412 384 zdev->aisb = aisb; 413 385 414 386 /* Create adapter interrupt vector */ 415 - rc = -ENOMEM; 416 387 zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK); 417 388 if (!zdev->aibv) 418 - goto out_si; 389 + return -ENOMEM; 419 390 420 391 /* Wire up shortcut pointer */ 421 392 zpci_aibv[aisb] = zdev->aibv; ··· 425 398 rc = -EIO; 426 399 irq = irq_alloc_desc(0); /* Alloc irq on node 0 */ 427 400 if (irq < 0) 428 - goto out_msi; 401 + return -ENOMEM; 429 402 rc = irq_set_msi_desc(irq, msi); 430 403 if (rc) 431 - goto out_msi; 404 + return rc; 432 405 irq_set_chip_and_handler(irq, &zpci_irq_chip, 433 406 handle_simple_irq); 434 407 msg.data = hwirq; ··· 442 415 /* Enable adapter interrupts */ 443 416 rc = zpci_set_airq(zdev); 444 417 if (rc) 445 - goto out_msi; 418 + return rc; 446 419 447 420 return (msi_vecs == nvec) ? 0 : msi_vecs; 448 - 449 - out_msi: 450 - for_each_pci_msi_entry(msi, pdev) { 451 - if (hwirq-- == 0) 452 - break; 453 - irq_set_msi_desc(msi->irq, NULL); 454 - irq_free_desc(msi->irq); 455 - msi->msg.address_lo = 0; 456 - msi->msg.address_hi = 0; 457 - msi->msg.data = 0; 458 - msi->irq = 0; 459 - } 460 - zpci_aibv[aisb] = NULL; 461 - airq_iv_release(zdev->aibv); 462 - out_si: 463 - airq_iv_free_bit(zpci_aisb_iv, aisb); 464 - out: 465 - return rc; 466 421 } 467 422 468 423 void arch_teardown_msi_irqs(struct pci_dev *pdev) ··· 460 451 461 452 /* Release MSI interrupts */ 462 453 for_each_pci_msi_entry(msi, pdev) { 454 + if (!msi->irq) 455 + continue; 463 456 if (msi->msi_attrib.is_msix) 464 457 __pci_msix_desc_mask_irq(msi, 1); 465 458 else ··· 474 463 msi->irq = 0; 475 464 } 476 465 477 - zpci_aibv[zdev->aisb] = NULL; 478 - airq_iv_release(zdev->aibv); 479 - airq_iv_free_bit(zpci_aisb_iv, zdev->aisb); 466 + if (zdev->aisb != -1UL) { 467 + zpci_aibv[zdev->aisb] = NULL; 468 + airq_iv_free_bit(zpci_aisb_iv, zdev->aisb); 469 + zdev->aisb = -1UL; 470 + } 471 + if (zdev->aibv) { 472 + airq_iv_release(zdev->aibv); 473 + zdev->aibv = NULL; 474 + } 480 475 } 481 476 482 477 static void zpci_map_resources(struct pci_dev *pdev) ··· 736 719 { 737 720 if (zpci_unique_uid) { 738 721 zdev->domain = (u16) zdev->uid; 722 + if (zdev->domain >= ZPCI_NR_DEVICES) 723 + return 0; 724 + 725 + spin_lock(&zpci_domain_lock); 726 + if (test_bit(zdev->domain, zpci_domain)) { 727 + spin_unlock(&zpci_domain_lock); 728 + return -EEXIST; 729 + } 730 + set_bit(zdev->domain, zpci_domain); 731 + spin_unlock(&zpci_domain_lock); 739 732 return 0; 740 733 } 741 734 ··· 762 735 763 736 static void zpci_free_domain(struct zpci_dev *zdev) 764 737 { 765 - if (zpci_unique_uid) 738 + if (zdev->domain >= ZPCI_NR_DEVICES) 766 739 return; 767 740 768 741 spin_lock(&zpci_domain_lock); ··· 782 755 list_del(&zdev->entry); 783 756 spin_unlock(&zpci_list_lock); 784 757 758 + zpci_dbg(3, "rem fid:%x\n", zdev->fid); 785 759 kfree(zdev); 786 760 } 787 761 ··· 875 847 return rc; 876 848 } 877 849 878 - void zpci_stop_device(struct zpci_dev *zdev) 850 + void zpci_remove_device(struct zpci_dev *zdev) 879 851 { 880 - zpci_dma_exit_device(zdev); 881 - /* 882 - * Note: SCLP disables fh via set-pci-fn so don't 883 - * do that here. 884 - */ 852 + if (!zdev->bus) 853 + return; 854 + 855 + pci_stop_root_bus(zdev->bus); 856 + pci_remove_root_bus(zdev->bus); 885 857 } 886 - EXPORT_SYMBOL_GPL(zpci_stop_device); 887 858 888 859 int zpci_report_error(struct pci_dev *pdev, 889 860 struct zpci_report_error_header *report)
+47 -30
arch/s390/pci/pci_clp.c
··· 193 193 int clp_add_pci_device(u32 fid, u32 fh, int configured) 194 194 { 195 195 struct zpci_dev *zdev; 196 - int rc; 196 + int rc = -ENOMEM; 197 197 198 198 zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, configured); 199 199 zdev = kzalloc(sizeof(*zdev), GFP_KERNEL); 200 200 if (!zdev) 201 - return -ENOMEM; 201 + goto error; 202 202 203 203 zdev->fh = fh; 204 204 zdev->fid = fid; ··· 219 219 return 0; 220 220 221 221 error: 222 + zpci_dbg(0, "add fid:%x, rc:%d\n", fid, rc); 222 223 kfree(zdev); 223 224 return rc; 224 225 } ··· 296 295 return rc; 297 296 } 298 297 299 - static int clp_list_pci(struct clp_req_rsp_list_pci *rrb, 300 - void (*cb)(struct clp_fh_list_entry *entry)) 298 + static int clp_list_pci(struct clp_req_rsp_list_pci *rrb, void *data, 299 + void (*cb)(struct clp_fh_list_entry *, void *)) 301 300 { 302 301 u64 resume_token = 0; 303 302 int entries, i, rc; ··· 328 327 329 328 resume_token = rrb->response.resume_token; 330 329 for (i = 0; i < entries; i++) 331 - cb(&rrb->response.fh_list[i]); 330 + cb(&rrb->response.fh_list[i], data); 332 331 } while (resume_token); 333 332 out: 334 333 return rc; 335 334 } 336 335 337 - static void __clp_add(struct clp_fh_list_entry *entry) 338 - { 339 - if (!entry->vendor_id) 340 - return; 341 - 342 - clp_add_pci_device(entry->fid, entry->fh, entry->config_state); 343 - } 344 - 345 - static void __clp_rescan(struct clp_fh_list_entry *entry) 336 + static void __clp_add(struct clp_fh_list_entry *entry, void *data) 346 337 { 347 338 struct zpci_dev *zdev; 348 339 ··· 342 349 return; 343 350 344 351 zdev = get_zdev_by_fid(entry->fid); 345 - if (!zdev) { 352 + if (!zdev) 346 353 clp_add_pci_device(entry->fid, entry->fh, entry->config_state); 347 - return; 348 - } 349 - 350 - if (!entry->config_state) { 351 - /* 352 - * The handle is already disabled, that means no iota/irq freeing via 353 - * the firmware interfaces anymore. Need to free resources manually 354 - * (DMA memory, debug, sysfs)... 355 - */ 356 - zpci_stop_device(zdev); 357 - } 358 354 } 359 355 360 - static void __clp_update(struct clp_fh_list_entry *entry) 356 + static void __clp_update(struct clp_fh_list_entry *entry, void *data) 361 357 { 362 358 struct zpci_dev *zdev; 363 359 ··· 369 387 if (!rrb) 370 388 return -ENOMEM; 371 389 372 - rc = clp_list_pci(rrb, __clp_add); 390 + rc = clp_list_pci(rrb, NULL, __clp_add); 373 391 374 392 clp_free_block(rrb); 375 393 return rc; ··· 380 398 struct clp_req_rsp_list_pci *rrb; 381 399 int rc; 382 400 401 + zpci_remove_reserved_devices(); 402 + 383 403 rrb = clp_alloc_block(GFP_KERNEL); 384 404 if (!rrb) 385 405 return -ENOMEM; 386 406 387 - rc = clp_list_pci(rrb, __clp_rescan); 407 + rc = clp_list_pci(rrb, NULL, __clp_add); 388 408 389 409 clp_free_block(rrb); 390 410 return rc; ··· 401 417 if (!rrb) 402 418 return -ENOMEM; 403 419 404 - rc = clp_list_pci(rrb, __clp_update); 420 + rc = clp_list_pci(rrb, NULL, __clp_update); 421 + 422 + clp_free_block(rrb); 423 + return rc; 424 + } 425 + 426 + struct clp_state_data { 427 + u32 fid; 428 + enum zpci_state state; 429 + }; 430 + 431 + static void __clp_get_state(struct clp_fh_list_entry *entry, void *data) 432 + { 433 + struct clp_state_data *sd = data; 434 + 435 + if (entry->fid != sd->fid) 436 + return; 437 + 438 + sd->state = entry->config_state; 439 + } 440 + 441 + int clp_get_state(u32 fid, enum zpci_state *state) 442 + { 443 + struct clp_req_rsp_list_pci *rrb; 444 + struct clp_state_data sd = {fid, ZPCI_FN_STATE_RESERVED}; 445 + int rc; 446 + 447 + rrb = clp_alloc_block(GFP_KERNEL); 448 + if (!rrb) 449 + return -ENOMEM; 450 + 451 + rc = clp_list_pci(rrb, &sd, __clp_get_state); 452 + if (!rc) 453 + *state = sd.state; 405 454 406 455 clp_free_block(rrb); 407 456 return rc;
+3 -1
arch/s390/pci/pci_dma.c
··· 601 601 */ 602 602 WARN_ON(zdev->s390_domain); 603 603 604 - zpci_unregister_ioat(zdev, 0); 604 + if (zpci_unregister_ioat(zdev, 0)) 605 + return; 606 + 605 607 dma_cleanup_tables(zdev->dma_table); 606 608 zdev->dma_table = NULL; 607 609 vfree(zdev->iommu_bitmap);
+11 -3
arch/s390/pci/pci_event.c
··· 74 74 { 75 75 struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid); 76 76 struct pci_dev *pdev = NULL; 77 + enum zpci_state state; 77 78 int ret; 78 79 79 80 if (zdev) ··· 109 108 clp_add_pci_device(ccdf->fid, ccdf->fh, 0); 110 109 break; 111 110 case 0x0303: /* Deconfiguration requested */ 111 + if (!zdev) 112 + break; 112 113 if (pdev) 113 114 pci_stop_and_remove_bus_device_locked(pdev); 114 115 ··· 124 121 zdev->state = ZPCI_FN_STATE_STANDBY; 125 122 126 123 break; 127 - case 0x0304: /* Configured -> Standby */ 124 + case 0x0304: /* Configured -> Standby|Reserved */ 125 + if (!zdev) 126 + break; 128 127 if (pdev) { 129 128 /* Give the driver a hint that the function is 130 129 * already unusable. */ ··· 137 132 zdev->fh = ccdf->fh; 138 133 zpci_disable_device(zdev); 139 134 zdev->state = ZPCI_FN_STATE_STANDBY; 135 + if (!clp_get_state(ccdf->fid, &state) && 136 + state == ZPCI_FN_STATE_RESERVED) { 137 + zpci_remove_device(zdev); 138 + } 140 139 break; 141 140 case 0x0306: /* 0x308 or 0x302 for multiple devices */ 142 141 clp_rescan_pci_devices(); ··· 148 139 case 0x0308: /* Standby -> Reserved */ 149 140 if (!zdev) 150 141 break; 151 - pci_stop_root_bus(zdev->bus); 152 - pci_remove_root_bus(zdev->bus); 142 + zpci_remove_device(zdev); 153 143 break; 154 144 default: 155 145 break;
+5 -5
arch/s390/pci/pci_insn.c
··· 40 40 return cc; 41 41 } 42 42 43 - int zpci_mod_fc(u64 req, struct zpci_fib *fib) 43 + u8 zpci_mod_fc(u64 req, struct zpci_fib *fib, u8 *status) 44 44 { 45 - u8 cc, status; 45 + u8 cc; 46 46 47 47 do { 48 - cc = __mpcifc(req, fib, &status); 48 + cc = __mpcifc(req, fib, status); 49 49 if (cc == 2) 50 50 msleep(ZPCI_INSN_BUSY_DELAY); 51 51 } while (cc == 2); 52 52 53 53 if (cc) 54 - zpci_err_insn(cc, status, req, 0); 54 + zpci_err_insn(cc, *status, req, 0); 55 55 56 - return (cc) ? -EIO : 0; 56 + return cc; 57 57 } 58 58 59 59 /* Refresh PCI Translations */
-2
arch/s390/tools/gen_facilities.c
··· 34 34 18, /* long displacement facility */ 35 35 #endif 36 36 #ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES 37 - 7, /* stfle */ 38 - 17, /* message security assist */ 39 37 21, /* extended-immediate facility */ 40 38 25, /* store clock fast */ 41 39 #endif
+14 -1
drivers/crypto/Kconfig
··· 89 89 requires to have at least one CEX card in coprocessor mode 90 90 available at runtime. 91 91 92 + config CRYPTO_PAES_S390 93 + tristate "PAES cipher algorithms" 94 + depends on S390 95 + depends on ZCRYPT 96 + depends on PKEY 97 + select CRYPTO_ALGAPI 98 + select CRYPTO_BLKCIPHER 99 + help 100 + This is the s390 hardware accelerated implementation of the 101 + AES cipher algorithms for use with protected key. 102 + 103 + Select this option if you want to use the paes cipher 104 + for example to use protected key encrypted devices. 105 + 92 106 config CRYPTO_SHA1_S390 93 107 tristate "SHA1 digest algorithm" 94 108 depends on S390 ··· 151 137 depends on S390 152 138 select CRYPTO_ALGAPI 153 139 select CRYPTO_BLKCIPHER 154 - select PKEY 155 140 help 156 141 This is the s390 hardware accelerated implementation of the 157 142 AES cipher algorithms (FIPS-197).
-7
drivers/s390/block/Kconfig
··· 82 82 83 83 To compile this driver as a module, choose M here: the 84 84 module will be called scm_block. 85 - 86 - config SCM_BLOCK_CLUSTER_WRITE 87 - def_bool y 88 - prompt "SCM force cluster writes" 89 - depends on SCM_BLOCK 90 - help 91 - Force writes to Storage Class Memory (SCM) to be in done in clusters.
-3
drivers/s390/block/Makefile
··· 19 19 obj-$(CONFIG_DCSSBLK) += dcssblk.o 20 20 21 21 scm_block-objs := scm_drv.o scm_blk.o 22 - ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE 23 - scm_block-objs += scm_blk_cluster.o 24 - endif 25 22 obj-$(CONFIG_SCM_BLOCK) += scm_block.o
+44 -32
drivers/s390/block/dasd.c
··· 1965 1965 { 1966 1966 int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM); 1967 1967 1968 - if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 1969 - /* dasd is being set offline. */ 1968 + if (test_bit(DASD_FLAG_OFFLINE, &device->flags) && 1969 + !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 1970 + /* 1971 + * dasd is being set offline 1972 + * but it is no safe offline where we have to allow I/O 1973 + */ 1970 1974 return 1; 1971 1975 } 1972 1976 if (device->stopped) { ··· 3574 3570 else 3575 3571 pr_warn("%s: The DASD cannot be set offline while it is in use\n", 3576 3572 dev_name(&cdev->dev)); 3577 - clear_bit(DASD_FLAG_OFFLINE, &device->flags); 3578 - goto out_busy; 3573 + rc = -EBUSY; 3574 + goto out_err; 3579 3575 } 3580 3576 } 3581 3577 3582 - if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3583 - /* 3584 - * safe offline already running 3585 - * could only be called by normal offline so safe_offline flag 3586 - * needs to be removed to run normal offline and kill all I/O 3587 - */ 3588 - if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) 3589 - /* Already doing normal offline processing */ 3590 - goto out_busy; 3591 - else 3592 - clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags); 3593 - } else { 3594 - if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) 3595 - /* Already doing offline processing */ 3596 - goto out_busy; 3578 + /* 3579 + * Test if the offline processing is already running and exit if so. 3580 + * If a safe offline is being processed this could only be a normal 3581 + * offline that should be able to overtake the safe offline and 3582 + * cancel any I/O we do not want to wait for any longer 3583 + */ 3584 + if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 3585 + if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3586 + clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, 3587 + &device->flags); 3588 + } else { 3589 + rc = -EBUSY; 3590 + goto out_err; 3591 + } 3597 3592 } 3598 - 3599 3593 set_bit(DASD_FLAG_OFFLINE, &device->flags); 3600 - spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3601 3594 3602 3595 /* 3603 - * if safe_offline called set safe_offline_running flag and 3596 + * if safe_offline is called set safe_offline_running flag and 3604 3597 * clear safe_offline so that a call to normal offline 3605 3598 * can overrun safe_offline processing 3606 3599 */ 3607 3600 if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) && 3608 3601 !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3602 + /* need to unlock here to wait for outstanding I/O */ 3603 + spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3609 3604 /* 3610 3605 * If we want to set the device safe offline all IO operations 3611 3606 * should be finished before continuing the offline process 3612 3607 * so sync bdev first and then wait for our queues to become 3613 3608 * empty 3614 3609 */ 3615 - /* sync blockdev and partitions */ 3616 3610 if (device->block) { 3617 3611 rc = fsync_bdev(device->block->bdev); 3618 3612 if (rc != 0) 3619 3613 goto interrupted; 3620 3614 } 3621 - /* schedule device tasklet and wait for completion */ 3622 3615 dasd_schedule_device_bh(device); 3623 3616 rc = wait_event_interruptible(shutdown_waitq, 3624 3617 _wait_for_empty_queues(device)); 3625 3618 if (rc != 0) 3626 3619 goto interrupted; 3620 + 3621 + /* 3622 + * check if a normal offline process overtook the offline 3623 + * processing in this case simply do nothing beside returning 3624 + * that we got interrupted 3625 + * otherwise mark safe offline as not running any longer and 3626 + * continue with normal offline 3627 + */ 3628 + spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3629 + if (!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3630 + rc = -ERESTARTSYS; 3631 + goto out_err; 3632 + } 3633 + clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); 3627 3634 } 3635 + spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3628 3636 3629 3637 dasd_set_target_state(device, DASD_STATE_NEW); 3630 3638 /* dasd_delete_device destroys the device reference. */ ··· 3648 3632 */ 3649 3633 if (block) 3650 3634 dasd_free_block(block); 3635 + 3651 3636 return 0; 3652 3637 3653 3638 interrupted: 3654 3639 /* interrupted by signal */ 3655 - clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags); 3640 + spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3656 3641 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); 3657 3642 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 3658 - dasd_put_device(device); 3659 - 3660 - return rc; 3661 - 3662 - out_busy: 3643 + out_err: 3663 3644 dasd_put_device(device); 3664 3645 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3665 - 3666 - return -EBUSY; 3646 + return rc; 3667 3647 } 3668 3648 EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 3669 3649
+51 -24
drivers/s390/block/dasd_devmap.c
··· 315 315 char *features_str = NULL; 316 316 char *from_str = NULL; 317 317 char *to_str = NULL; 318 - size_t len = strlen(range) + 1; 319 - char tmp[len]; 318 + int rc = 0; 319 + char *tmp; 320 320 321 - strlcpy(tmp, range, len); 321 + tmp = kstrdup(range, GFP_KERNEL); 322 + if (!tmp) 323 + return -ENOMEM; 322 324 323 - if (dasd_evaluate_range_param(tmp, &from_str, &to_str, &features_str)) 324 - goto out_err; 325 + if (dasd_evaluate_range_param(tmp, &from_str, &to_str, &features_str)) { 326 + rc = -EINVAL; 327 + goto out; 328 + } 325 329 326 - if (dasd_busid(from_str, &from_id0, &from_id1, &from)) 327 - goto out_err; 330 + if (dasd_busid(from_str, &from_id0, &from_id1, &from)) { 331 + rc = -EINVAL; 332 + goto out; 333 + } 328 334 329 335 to = from; 330 336 to_id0 = from_id0; 331 337 to_id1 = from_id1; 332 338 if (to_str) { 333 - if (dasd_busid(to_str, &to_id0, &to_id1, &to)) 334 - goto out_err; 339 + if (dasd_busid(to_str, &to_id0, &to_id1, &to)) { 340 + rc = -EINVAL; 341 + goto out; 342 + } 335 343 if (from_id0 != to_id0 || from_id1 != to_id1 || from > to) { 336 344 pr_err("%s is not a valid device range\n", range); 337 - goto out_err; 345 + rc = -EINVAL; 346 + goto out; 338 347 } 339 348 } 340 349 341 350 features = dasd_feature_list(features_str); 342 - if (features < 0) 343 - goto out_err; 351 + if (features < 0) { 352 + rc = -EINVAL; 353 + goto out; 354 + } 344 355 /* each device in dasd= parameter should be set initially online */ 345 356 features |= DASD_FEATURE_INITIAL_ONLINE; 346 357 while (from <= to) { 347 358 sprintf(bus_id, "%01x.%01x.%04x", from_id0, from_id1, from++); 348 359 devmap = dasd_add_busid(bus_id, features); 349 - if (IS_ERR(devmap)) 350 - return PTR_ERR(devmap); 360 + if (IS_ERR(devmap)) { 361 + rc = PTR_ERR(devmap); 362 + goto out; 363 + } 351 364 } 352 365 353 - return 0; 366 + out: 367 + kfree(tmp); 354 368 355 - out_err: 356 - return -EINVAL; 369 + return rc; 357 370 } 358 371 359 372 /* ··· 748 735 dasd_ro_show(struct device *dev, struct device_attribute *attr, char *buf) 749 736 { 750 737 struct dasd_devmap *devmap; 751 - int ro_flag; 738 + struct dasd_device *device; 739 + int ro_flag = 0; 752 740 753 741 devmap = dasd_find_busid(dev_name(dev)); 754 - if (!IS_ERR(devmap)) 755 - ro_flag = (devmap->features & DASD_FEATURE_READONLY) != 0; 756 - else 757 - ro_flag = (DASD_FEATURE_DEFAULT & DASD_FEATURE_READONLY) != 0; 742 + if (IS_ERR(devmap)) 743 + goto out; 744 + 745 + ro_flag = !!(devmap->features & DASD_FEATURE_READONLY); 746 + 747 + spin_lock(&dasd_devmap_lock); 748 + device = devmap->device; 749 + if (device) 750 + ro_flag |= test_bit(DASD_FLAG_DEVICE_RO, &device->flags); 751 + spin_unlock(&dasd_devmap_lock); 752 + 753 + out: 758 754 return snprintf(buf, PAGE_SIZE, ro_flag ? "1\n" : "0\n"); 759 755 } 760 756 ··· 786 764 787 765 device = dasd_device_from_cdev(cdev); 788 766 if (IS_ERR(device)) 789 - return PTR_ERR(device); 767 + return count; 790 768 791 769 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 792 770 val = val || test_bit(DASD_FLAG_DEVICE_RO, &device->flags); ··· 950 928 { 951 929 struct ccw_device *cdev = to_ccwdev(dev); 952 930 struct dasd_device *device; 931 + unsigned long flags; 953 932 int rc; 954 933 955 - device = dasd_device_from_cdev(cdev); 934 + spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 935 + device = dasd_device_from_cdev_locked(cdev); 956 936 if (IS_ERR(device)) { 957 937 rc = PTR_ERR(device); 938 + spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 958 939 goto out; 959 940 } 960 941 ··· 965 940 test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 966 941 /* Already doing offline processing */ 967 942 dasd_put_device(device); 943 + spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 968 944 rc = -EBUSY; 969 945 goto out; 970 946 } 971 947 972 948 set_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags); 973 949 dasd_put_device(device); 950 + spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 974 951 975 952 rc = ccw_device_set_offline(cdev); 976 953
+139 -165
drivers/s390/block/scm_blk.c
··· 13 13 #include <linux/mempool.h> 14 14 #include <linux/module.h> 15 15 #include <linux/blkdev.h> 16 + #include <linux/blk-mq.h> 16 17 #include <linux/genhd.h> 17 18 #include <linux/slab.h> 18 19 #include <linux/list.h> ··· 43 42 struct aob_rq_header *aobrq = to_aobrq(scmrq); 44 43 45 44 free_page((unsigned long) scmrq->aob); 46 - __scm_free_rq_cluster(scmrq); 47 45 kfree(scmrq->request); 48 46 kfree(aobrq); 49 47 } ··· 82 82 if (!scmrq->request) 83 83 goto free; 84 84 85 - if (__scm_alloc_rq_cluster(scmrq)) 86 - goto free; 87 - 88 85 INIT_LIST_HEAD(&scmrq->list); 89 86 spin_lock_irq(&list_lock); 90 87 list_add(&scmrq->list, &inactive_requests); ··· 111 114 { 112 115 struct scm_request *scmrq = NULL; 113 116 114 - spin_lock(&list_lock); 117 + spin_lock_irq(&list_lock); 115 118 if (list_empty(&inactive_requests)) 116 119 goto out; 117 120 scmrq = list_first_entry(&inactive_requests, struct scm_request, list); 118 121 list_del(&scmrq->list); 119 122 out: 120 - spin_unlock(&list_lock); 123 + spin_unlock_irq(&list_lock); 121 124 return scmrq; 122 125 } 123 126 ··· 231 234 scmrq->error = BLK_STS_OK; 232 235 /* We don't use all msbs - place aidaws at the end of the aob page. */ 233 236 scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io]; 234 - scm_request_cluster_init(scmrq); 235 237 } 236 238 237 - static void scm_ensure_queue_restart(struct scm_blk_dev *bdev) 239 + static void scm_request_requeue(struct scm_request *scmrq) 238 240 { 239 - if (atomic_read(&bdev->queued_reqs)) { 240 - /* Queue restart is triggered by the next interrupt. */ 241 - return; 241 + struct scm_blk_dev *bdev = scmrq->bdev; 242 + int i; 243 + 244 + for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) 245 + blk_mq_requeue_request(scmrq->request[i], false); 246 + 247 + atomic_dec(&bdev->queued_reqs); 248 + scm_request_done(scmrq); 249 + blk_mq_kick_requeue_list(bdev->rq); 250 + } 251 + 252 + static void scm_request_finish(struct scm_request *scmrq) 253 + { 254 + struct scm_blk_dev *bdev = scmrq->bdev; 255 + int i; 256 + 257 + for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) { 258 + if (scmrq->error) 259 + blk_mq_end_request(scmrq->request[i], scmrq->error); 260 + else 261 + blk_mq_complete_request(scmrq->request[i]); 242 262 } 243 - blk_delay_queue(bdev->rq, SCM_QUEUE_DELAY); 244 - } 245 - 246 - void scm_request_requeue(struct scm_request *scmrq) 247 - { 248 - struct scm_blk_dev *bdev = scmrq->bdev; 249 - int i; 250 - 251 - scm_release_cluster(scmrq); 252 - for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) 253 - blk_requeue_request(bdev->rq, scmrq->request[i]); 254 - 255 - atomic_dec(&bdev->queued_reqs); 256 - scm_request_done(scmrq); 257 - scm_ensure_queue_restart(bdev); 258 - } 259 - 260 - void scm_request_finish(struct scm_request *scmrq) 261 - { 262 - struct scm_blk_dev *bdev = scmrq->bdev; 263 - int i; 264 - 265 - scm_release_cluster(scmrq); 266 - for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) 267 - blk_end_request_all(scmrq->request[i], scmrq->error); 268 263 269 264 atomic_dec(&bdev->queued_reqs); 270 265 scm_request_done(scmrq); 271 266 } 272 267 273 - static int scm_request_start(struct scm_request *scmrq) 268 + static void scm_request_start(struct scm_request *scmrq) 274 269 { 275 270 struct scm_blk_dev *bdev = scmrq->bdev; 276 - int ret; 277 271 278 272 atomic_inc(&bdev->queued_reqs); 279 - if (!scmrq->aob->request.msb_count) { 280 - scm_request_requeue(scmrq); 281 - return -EINVAL; 282 - } 283 - 284 - ret = eadm_start_aob(scmrq->aob); 285 - if (ret) { 273 + if (eadm_start_aob(scmrq->aob)) { 286 274 SCM_LOG(5, "no subchannel"); 287 275 scm_request_requeue(scmrq); 288 276 } 289 - return ret; 290 277 } 291 278 292 - static void scm_blk_request(struct request_queue *rq) 279 + struct scm_queue { 280 + struct scm_request *scmrq; 281 + spinlock_t lock; 282 + }; 283 + 284 + static int scm_blk_request(struct blk_mq_hw_ctx *hctx, 285 + const struct blk_mq_queue_data *qd) 293 286 { 294 - struct scm_device *scmdev = rq->queuedata; 287 + struct scm_device *scmdev = hctx->queue->queuedata; 295 288 struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev); 296 - struct scm_request *scmrq = NULL; 297 - struct request *req; 289 + struct scm_queue *sq = hctx->driver_data; 290 + struct request *req = qd->rq; 291 + struct scm_request *scmrq; 298 292 299 - while ((req = blk_peek_request(rq))) { 300 - if (!scm_permit_request(bdev, req)) 301 - goto out; 302 - 303 - if (!scmrq) { 304 - scmrq = scm_request_fetch(); 305 - if (!scmrq) { 306 - SCM_LOG(5, "no request"); 307 - goto out; 308 - } 309 - scm_request_init(bdev, scmrq); 310 - } 311 - scm_request_set(scmrq, req); 312 - 313 - if (!scm_reserve_cluster(scmrq)) { 314 - SCM_LOG(5, "cluster busy"); 315 - scm_request_set(scmrq, NULL); 316 - if (scmrq->aob->request.msb_count) 317 - goto out; 318 - 319 - scm_request_done(scmrq); 320 - return; 321 - } 322 - 323 - if (scm_need_cluster_request(scmrq)) { 324 - if (scmrq->aob->request.msb_count) { 325 - /* Start cluster requests separately. */ 326 - scm_request_set(scmrq, NULL); 327 - if (scm_request_start(scmrq)) 328 - return; 329 - } else { 330 - atomic_inc(&bdev->queued_reqs); 331 - blk_start_request(req); 332 - scm_initiate_cluster_request(scmrq); 333 - } 334 - scmrq = NULL; 335 - continue; 336 - } 337 - 338 - if (scm_request_prepare(scmrq)) { 339 - SCM_LOG(5, "aidaw alloc failed"); 340 - scm_request_set(scmrq, NULL); 341 - goto out; 342 - } 343 - blk_start_request(req); 344 - 345 - if (scmrq->aob->request.msb_count < nr_requests_per_io) 346 - continue; 347 - 348 - if (scm_request_start(scmrq)) 349 - return; 350 - 351 - scmrq = NULL; 293 + spin_lock(&sq->lock); 294 + if (!scm_permit_request(bdev, req)) { 295 + spin_unlock(&sq->lock); 296 + return BLK_MQ_RQ_QUEUE_BUSY; 352 297 } 353 - out: 354 - if (scmrq) 298 + 299 + scmrq = sq->scmrq; 300 + if (!scmrq) { 301 + scmrq = scm_request_fetch(); 302 + if (!scmrq) { 303 + SCM_LOG(5, "no request"); 304 + spin_unlock(&sq->lock); 305 + return BLK_MQ_RQ_QUEUE_BUSY; 306 + } 307 + scm_request_init(bdev, scmrq); 308 + sq->scmrq = scmrq; 309 + } 310 + scm_request_set(scmrq, req); 311 + 312 + if (scm_request_prepare(scmrq)) { 313 + SCM_LOG(5, "aidaw alloc failed"); 314 + scm_request_set(scmrq, NULL); 315 + 316 + if (scmrq->aob->request.msb_count) 317 + scm_request_start(scmrq); 318 + 319 + sq->scmrq = NULL; 320 + spin_unlock(&sq->lock); 321 + return BLK_MQ_RQ_QUEUE_BUSY; 322 + } 323 + blk_mq_start_request(req); 324 + 325 + if (qd->last || scmrq->aob->request.msb_count == nr_requests_per_io) { 355 326 scm_request_start(scmrq); 356 - else 357 - scm_ensure_queue_restart(bdev); 327 + sq->scmrq = NULL; 328 + } 329 + spin_unlock(&sq->lock); 330 + return BLK_MQ_RQ_QUEUE_OK; 331 + } 332 + 333 + static int scm_blk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 334 + unsigned int idx) 335 + { 336 + struct scm_queue *qd = kzalloc(sizeof(*qd), GFP_KERNEL); 337 + 338 + if (!qd) 339 + return -ENOMEM; 340 + 341 + spin_lock_init(&qd->lock); 342 + hctx->driver_data = qd; 343 + 344 + return 0; 345 + } 346 + 347 + static void scm_blk_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx) 348 + { 349 + struct scm_queue *qd = hctx->driver_data; 350 + 351 + WARN_ON(qd->scmrq); 352 + kfree(hctx->driver_data); 353 + hctx->driver_data = NULL; 358 354 } 359 355 360 356 static void __scmrq_log_error(struct scm_request *scmrq) ··· 365 375 else 366 376 pr_err("An I/O operation to SCM failed with rc=%d\n", 367 377 scmrq->error); 368 - } 369 - 370 - void scm_blk_irq(struct scm_device *scmdev, void *data, blk_status_t error) 371 - { 372 - struct scm_request *scmrq = data; 373 - struct scm_blk_dev *bdev = scmrq->bdev; 374 - 375 - scmrq->error = error; 376 - if (error) 377 - __scmrq_log_error(scmrq); 378 - 379 - spin_lock(&bdev->lock); 380 - list_add_tail(&scmrq->list, &bdev->finished_requests); 381 - spin_unlock(&bdev->lock); 382 - tasklet_hi_schedule(&bdev->tasklet); 383 378 } 384 379 385 380 static void scm_blk_handle_error(struct scm_request *scmrq) ··· 394 419 return; 395 420 396 421 requeue: 397 - spin_lock_irqsave(&bdev->rq_lock, flags); 398 422 scm_request_requeue(scmrq); 399 - spin_unlock_irqrestore(&bdev->rq_lock, flags); 400 423 } 401 424 402 - static void scm_blk_tasklet(struct scm_blk_dev *bdev) 425 + void scm_blk_irq(struct scm_device *scmdev, void *data, blk_status_t error) 403 426 { 404 - struct scm_request *scmrq; 405 - unsigned long flags; 427 + struct scm_request *scmrq = data; 406 428 407 - spin_lock_irqsave(&bdev->lock, flags); 408 - while (!list_empty(&bdev->finished_requests)) { 409 - scmrq = list_first_entry(&bdev->finished_requests, 410 - struct scm_request, list); 411 - list_del(&scmrq->list); 412 - spin_unlock_irqrestore(&bdev->lock, flags); 413 - 414 - if (scmrq->error && scmrq->retries-- > 0) { 429 + scmrq->error = error; 430 + if (error) { 431 + __scmrq_log_error(scmrq); 432 + if (scmrq->retries-- > 0) { 415 433 scm_blk_handle_error(scmrq); 416 - 417 - /* Request restarted or requeued, handle next. */ 418 - spin_lock_irqsave(&bdev->lock, flags); 419 - continue; 434 + return; 420 435 } 421 - 422 - if (scm_test_cluster_request(scmrq)) { 423 - scm_cluster_request_irq(scmrq); 424 - spin_lock_irqsave(&bdev->lock, flags); 425 - continue; 426 - } 427 - 428 - scm_request_finish(scmrq); 429 - spin_lock_irqsave(&bdev->lock, flags); 430 436 } 431 - spin_unlock_irqrestore(&bdev->lock, flags); 432 - /* Look out for more requests. */ 433 - blk_run_queue(bdev->rq); 437 + 438 + scm_request_finish(scmrq); 439 + } 440 + 441 + static void scm_blk_request_done(struct request *req) 442 + { 443 + blk_mq_end_request(req, 0); 434 444 } 435 445 436 446 static const struct block_device_operations scm_blk_devops = { 437 447 .owner = THIS_MODULE, 438 448 }; 439 449 450 + static const struct blk_mq_ops scm_mq_ops = { 451 + .queue_rq = scm_blk_request, 452 + .complete = scm_blk_request_done, 453 + .init_hctx = scm_blk_init_hctx, 454 + .exit_hctx = scm_blk_exit_hctx, 455 + }; 456 + 440 457 int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) 441 458 { 442 - struct request_queue *rq; 443 - int len, ret = -ENOMEM; 444 459 unsigned int devindex, nr_max_blk; 460 + struct request_queue *rq; 461 + int len, ret; 445 462 446 463 devindex = atomic_inc_return(&nr_devices) - 1; 447 464 /* scma..scmz + scmaa..scmzz */ ··· 444 477 445 478 bdev->scmdev = scmdev; 446 479 bdev->state = SCM_OPER; 447 - spin_lock_init(&bdev->rq_lock); 448 480 spin_lock_init(&bdev->lock); 449 - INIT_LIST_HEAD(&bdev->finished_requests); 450 481 atomic_set(&bdev->queued_reqs, 0); 451 - tasklet_init(&bdev->tasklet, 452 - (void (*)(unsigned long)) scm_blk_tasklet, 453 - (unsigned long) bdev); 454 482 455 - rq = blk_init_queue(scm_blk_request, &bdev->rq_lock); 456 - if (!rq) 483 + bdev->tag_set.ops = &scm_mq_ops; 484 + bdev->tag_set.nr_hw_queues = nr_requests; 485 + bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests; 486 + bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 487 + 488 + ret = blk_mq_alloc_tag_set(&bdev->tag_set); 489 + if (ret) 457 490 goto out; 458 491 492 + rq = blk_mq_init_queue(&bdev->tag_set); 493 + if (IS_ERR(rq)) { 494 + ret = PTR_ERR(rq); 495 + goto out_tag; 496 + } 459 497 bdev->rq = rq; 460 498 nr_max_blk = min(scmdev->nr_max_block, 461 499 (unsigned int) (PAGE_SIZE / sizeof(struct aidaw))); ··· 470 498 blk_queue_max_segments(rq, nr_max_blk); 471 499 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq); 472 500 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, rq); 473 - scm_blk_dev_cluster_setup(bdev); 474 501 475 502 bdev->gendisk = alloc_disk(SCM_NR_PARTS); 476 - if (!bdev->gendisk) 503 + if (!bdev->gendisk) { 504 + ret = -ENOMEM; 477 505 goto out_queue; 478 - 506 + } 479 507 rq->queuedata = scmdev; 480 508 bdev->gendisk->private_data = scmdev; 481 509 bdev->gendisk->fops = &scm_blk_devops; ··· 500 528 501 529 out_queue: 502 530 blk_cleanup_queue(rq); 531 + out_tag: 532 + blk_mq_free_tag_set(&bdev->tag_set); 503 533 out: 504 534 atomic_dec(&nr_devices); 505 535 return ret; ··· 509 535 510 536 void scm_blk_dev_cleanup(struct scm_blk_dev *bdev) 511 537 { 512 - tasklet_kill(&bdev->tasklet); 513 538 del_gendisk(bdev->gendisk); 514 539 blk_cleanup_queue(bdev->gendisk->queue); 540 + blk_mq_free_tag_set(&bdev->tag_set); 515 541 put_disk(bdev->gendisk); 516 542 } 517 543 ··· 532 558 if (!nr_requests_per_io || nr_requests_per_io > 64) 533 559 return false; 534 560 535 - return scm_cluster_size_valid(); 561 + return true; 536 562 } 537 563 538 564 static int __init scm_blk_init(void)
+3 -57
drivers/s390/block/scm_blk.h
··· 4 4 #include <linux/interrupt.h> 5 5 #include <linux/spinlock.h> 6 6 #include <linux/blkdev.h> 7 + #include <linux/blk-mq.h> 7 8 #include <linux/genhd.h> 8 9 #include <linux/list.h> 9 10 ··· 15 14 #define SCM_QUEUE_DELAY 5 16 15 17 16 struct scm_blk_dev { 18 - struct tasklet_struct tasklet; 19 17 struct request_queue *rq; 20 18 struct gendisk *gendisk; 19 + struct blk_mq_tag_set tag_set; 21 20 struct scm_device *scmdev; 22 - spinlock_t rq_lock; /* guard the request queue */ 23 - spinlock_t lock; /* guard the rest of the blockdev */ 21 + spinlock_t lock; 24 22 atomic_t queued_reqs; 25 23 enum {SCM_OPER, SCM_WR_PROHIBIT} state; 26 24 struct list_head finished_requests; 27 - #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE 28 - struct list_head cluster_list; 29 - #endif 30 25 }; 31 26 32 27 struct scm_request { ··· 33 36 struct list_head list; 34 37 u8 retries; 35 38 blk_status_t error; 36 - #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE 37 - struct { 38 - enum {CLUSTER_NONE, CLUSTER_READ, CLUSTER_WRITE} state; 39 - struct list_head list; 40 - void **buf; 41 - } cluster; 42 - #endif 43 39 }; 44 40 45 41 #define to_aobrq(rq) container_of((void *) rq, struct aob_rq_header, data) ··· 42 52 void scm_blk_set_available(struct scm_blk_dev *); 43 53 void scm_blk_irq(struct scm_device *, void *, blk_status_t); 44 54 45 - void scm_request_finish(struct scm_request *); 46 - void scm_request_requeue(struct scm_request *); 47 - 48 55 struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes); 49 56 50 57 int scm_drv_init(void); 51 58 void scm_drv_cleanup(void); 52 - 53 - #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE 54 - void __scm_free_rq_cluster(struct scm_request *); 55 - int __scm_alloc_rq_cluster(struct scm_request *); 56 - void scm_request_cluster_init(struct scm_request *); 57 - bool scm_reserve_cluster(struct scm_request *); 58 - void scm_release_cluster(struct scm_request *); 59 - void scm_blk_dev_cluster_setup(struct scm_blk_dev *); 60 - bool scm_need_cluster_request(struct scm_request *); 61 - void scm_initiate_cluster_request(struct scm_request *); 62 - void scm_cluster_request_irq(struct scm_request *); 63 - bool scm_test_cluster_request(struct scm_request *); 64 - bool scm_cluster_size_valid(void); 65 - #else /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */ 66 - static inline void __scm_free_rq_cluster(struct scm_request *scmrq) {} 67 - static inline int __scm_alloc_rq_cluster(struct scm_request *scmrq) 68 - { 69 - return 0; 70 - } 71 - static inline void scm_request_cluster_init(struct scm_request *scmrq) {} 72 - static inline bool scm_reserve_cluster(struct scm_request *scmrq) 73 - { 74 - return true; 75 - } 76 - static inline void scm_release_cluster(struct scm_request *scmrq) {} 77 - static inline void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev) {} 78 - static inline bool scm_need_cluster_request(struct scm_request *scmrq) 79 - { 80 - return false; 81 - } 82 - static inline void scm_initiate_cluster_request(struct scm_request *scmrq) {} 83 - static inline void scm_cluster_request_irq(struct scm_request *scmrq) {} 84 - static inline bool scm_test_cluster_request(struct scm_request *scmrq) 85 - { 86 - return false; 87 - } 88 - static inline bool scm_cluster_size_valid(void) 89 - { 90 - return true; 91 - } 92 - #endif /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */ 93 59 94 60 extern debug_info_t *scm_debug; 95 61
-255
drivers/s390/block/scm_blk_cluster.c
··· 1 - /* 2 - * Block driver for s390 storage class memory. 3 - * 4 - * Copyright IBM Corp. 2012 5 - * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com> 6 - */ 7 - 8 - #include <linux/spinlock.h> 9 - #include <linux/module.h> 10 - #include <linux/blkdev.h> 11 - #include <linux/genhd.h> 12 - #include <linux/slab.h> 13 - #include <linux/list.h> 14 - #include <asm/eadm.h> 15 - #include "scm_blk.h" 16 - 17 - static unsigned int write_cluster_size = 64; 18 - module_param(write_cluster_size, uint, S_IRUGO); 19 - MODULE_PARM_DESC(write_cluster_size, 20 - "Number of pages used for contiguous writes."); 21 - 22 - #define CLUSTER_SIZE (write_cluster_size * PAGE_SIZE) 23 - 24 - void __scm_free_rq_cluster(struct scm_request *scmrq) 25 - { 26 - int i; 27 - 28 - if (!scmrq->cluster.buf) 29 - return; 30 - 31 - for (i = 0; i < 2 * write_cluster_size; i++) 32 - free_page((unsigned long) scmrq->cluster.buf[i]); 33 - 34 - kfree(scmrq->cluster.buf); 35 - } 36 - 37 - int __scm_alloc_rq_cluster(struct scm_request *scmrq) 38 - { 39 - int i; 40 - 41 - scmrq->cluster.buf = kzalloc(sizeof(void *) * 2 * write_cluster_size, 42 - GFP_KERNEL); 43 - if (!scmrq->cluster.buf) 44 - return -ENOMEM; 45 - 46 - for (i = 0; i < 2 * write_cluster_size; i++) { 47 - scmrq->cluster.buf[i] = (void *) get_zeroed_page(GFP_DMA); 48 - if (!scmrq->cluster.buf[i]) 49 - return -ENOMEM; 50 - } 51 - INIT_LIST_HEAD(&scmrq->cluster.list); 52 - return 0; 53 - } 54 - 55 - void scm_request_cluster_init(struct scm_request *scmrq) 56 - { 57 - scmrq->cluster.state = CLUSTER_NONE; 58 - } 59 - 60 - static bool clusters_intersect(struct request *A, struct request *B) 61 - { 62 - unsigned long firstA, lastA, firstB, lastB; 63 - 64 - firstA = ((u64) blk_rq_pos(A) << 9) / CLUSTER_SIZE; 65 - lastA = (((u64) blk_rq_pos(A) << 9) + 66 - blk_rq_bytes(A) - 1) / CLUSTER_SIZE; 67 - 68 - firstB = ((u64) blk_rq_pos(B) << 9) / CLUSTER_SIZE; 69 - lastB = (((u64) blk_rq_pos(B) << 9) + 70 - blk_rq_bytes(B) - 1) / CLUSTER_SIZE; 71 - 72 - return (firstB <= lastA && firstA <= lastB); 73 - } 74 - 75 - bool scm_reserve_cluster(struct scm_request *scmrq) 76 - { 77 - struct request *req = scmrq->request[scmrq->aob->request.msb_count]; 78 - struct scm_blk_dev *bdev = scmrq->bdev; 79 - struct scm_request *iter; 80 - int pos, add = 1; 81 - 82 - if (write_cluster_size == 0) 83 - return true; 84 - 85 - spin_lock(&bdev->lock); 86 - list_for_each_entry(iter, &bdev->cluster_list, cluster.list) { 87 - if (iter == scmrq) { 88 - /* 89 - * We don't have to use clusters_intersect here, since 90 - * cluster requests are always started separately. 91 - */ 92 - add = 0; 93 - continue; 94 - } 95 - for (pos = 0; pos < iter->aob->request.msb_count; pos++) { 96 - if (clusters_intersect(req, iter->request[pos]) && 97 - (rq_data_dir(req) == WRITE || 98 - rq_data_dir(iter->request[pos]) == WRITE)) { 99 - spin_unlock(&bdev->lock); 100 - return false; 101 - } 102 - } 103 - } 104 - if (add) 105 - list_add(&scmrq->cluster.list, &bdev->cluster_list); 106 - spin_unlock(&bdev->lock); 107 - 108 - return true; 109 - } 110 - 111 - void scm_release_cluster(struct scm_request *scmrq) 112 - { 113 - struct scm_blk_dev *bdev = scmrq->bdev; 114 - unsigned long flags; 115 - 116 - if (write_cluster_size == 0) 117 - return; 118 - 119 - spin_lock_irqsave(&bdev->lock, flags); 120 - list_del(&scmrq->cluster.list); 121 - spin_unlock_irqrestore(&bdev->lock, flags); 122 - } 123 - 124 - void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev) 125 - { 126 - INIT_LIST_HEAD(&bdev->cluster_list); 127 - blk_queue_io_opt(bdev->rq, CLUSTER_SIZE); 128 - } 129 - 130 - static int scm_prepare_cluster_request(struct scm_request *scmrq) 131 - { 132 - struct scm_blk_dev *bdev = scmrq->bdev; 133 - struct scm_device *scmdev = bdev->gendisk->private_data; 134 - struct request *req = scmrq->request[0]; 135 - struct msb *msb = &scmrq->aob->msb[0]; 136 - struct req_iterator iter; 137 - struct aidaw *aidaw; 138 - struct bio_vec bv; 139 - int i = 0; 140 - u64 addr; 141 - 142 - switch (scmrq->cluster.state) { 143 - case CLUSTER_NONE: 144 - scmrq->cluster.state = CLUSTER_READ; 145 - /* fall through */ 146 - case CLUSTER_READ: 147 - msb->bs = MSB_BS_4K; 148 - msb->oc = MSB_OC_READ; 149 - msb->flags = MSB_FLAG_IDA; 150 - msb->blk_count = write_cluster_size; 151 - 152 - addr = scmdev->address + ((u64) blk_rq_pos(req) << 9); 153 - msb->scm_addr = round_down(addr, CLUSTER_SIZE); 154 - 155 - if (msb->scm_addr != 156 - round_down(addr + (u64) blk_rq_bytes(req) - 1, 157 - CLUSTER_SIZE)) 158 - msb->blk_count = 2 * write_cluster_size; 159 - 160 - aidaw = scm_aidaw_fetch(scmrq, msb->blk_count * PAGE_SIZE); 161 - if (!aidaw) 162 - return -ENOMEM; 163 - 164 - scmrq->aob->request.msb_count = 1; 165 - msb->data_addr = (u64) aidaw; 166 - for (i = 0; i < msb->blk_count; i++) { 167 - aidaw->data_addr = (u64) scmrq->cluster.buf[i]; 168 - aidaw++; 169 - } 170 - 171 - break; 172 - case CLUSTER_WRITE: 173 - aidaw = (void *) msb->data_addr; 174 - msb->oc = MSB_OC_WRITE; 175 - 176 - for (addr = msb->scm_addr; 177 - addr < scmdev->address + ((u64) blk_rq_pos(req) << 9); 178 - addr += PAGE_SIZE) { 179 - aidaw->data_addr = (u64) scmrq->cluster.buf[i]; 180 - aidaw++; 181 - i++; 182 - } 183 - rq_for_each_segment(bv, req, iter) { 184 - aidaw->data_addr = (u64) page_address(bv.bv_page); 185 - aidaw++; 186 - i++; 187 - } 188 - for (; i < msb->blk_count; i++) { 189 - aidaw->data_addr = (u64) scmrq->cluster.buf[i]; 190 - aidaw++; 191 - } 192 - break; 193 - } 194 - return 0; 195 - } 196 - 197 - bool scm_need_cluster_request(struct scm_request *scmrq) 198 - { 199 - int pos = scmrq->aob->request.msb_count; 200 - 201 - if (rq_data_dir(scmrq->request[pos]) == READ) 202 - return false; 203 - 204 - return blk_rq_bytes(scmrq->request[pos]) < CLUSTER_SIZE; 205 - } 206 - 207 - /* Called with queue lock held. */ 208 - void scm_initiate_cluster_request(struct scm_request *scmrq) 209 - { 210 - if (scm_prepare_cluster_request(scmrq)) 211 - goto requeue; 212 - if (eadm_start_aob(scmrq->aob)) 213 - goto requeue; 214 - return; 215 - requeue: 216 - scm_request_requeue(scmrq); 217 - } 218 - 219 - bool scm_test_cluster_request(struct scm_request *scmrq) 220 - { 221 - return scmrq->cluster.state != CLUSTER_NONE; 222 - } 223 - 224 - void scm_cluster_request_irq(struct scm_request *scmrq) 225 - { 226 - struct scm_blk_dev *bdev = scmrq->bdev; 227 - unsigned long flags; 228 - 229 - switch (scmrq->cluster.state) { 230 - case CLUSTER_NONE: 231 - BUG(); 232 - break; 233 - case CLUSTER_READ: 234 - if (scmrq->error) { 235 - scm_request_finish(scmrq); 236 - break; 237 - } 238 - scmrq->cluster.state = CLUSTER_WRITE; 239 - spin_lock_irqsave(&bdev->rq_lock, flags); 240 - scm_initiate_cluster_request(scmrq); 241 - spin_unlock_irqrestore(&bdev->rq_lock, flags); 242 - break; 243 - case CLUSTER_WRITE: 244 - scm_request_finish(scmrq); 245 - break; 246 - } 247 - } 248 - 249 - bool scm_cluster_size_valid(void) 250 - { 251 - if (write_cluster_size == 1 || write_cluster_size > 128) 252 - return false; 253 - 254 - return !(write_cluster_size & (write_cluster_size - 1)); 255 - }
+49
drivers/s390/cio/css.c
··· 296 296 NULL, 297 297 }; 298 298 299 + static ssize_t chpids_show(struct device *dev, 300 + struct device_attribute *attr, 301 + char *buf) 302 + { 303 + struct subchannel *sch = to_subchannel(dev); 304 + struct chsc_ssd_info *ssd = &sch->ssd_info; 305 + ssize_t ret = 0; 306 + int mask; 307 + int chp; 308 + 309 + for (chp = 0; chp < 8; chp++) { 310 + mask = 0x80 >> chp; 311 + if (ssd->path_mask & mask) 312 + ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id); 313 + else 314 + ret += sprintf(buf + ret, "00 "); 315 + } 316 + ret += sprintf(buf + ret, "\n"); 317 + return ret; 318 + } 319 + static DEVICE_ATTR(chpids, 0444, chpids_show, NULL); 320 + 321 + static ssize_t pimpampom_show(struct device *dev, 322 + struct device_attribute *attr, 323 + char *buf) 324 + { 325 + struct subchannel *sch = to_subchannel(dev); 326 + struct pmcw *pmcw = &sch->schib.pmcw; 327 + 328 + return sprintf(buf, "%02x %02x %02x\n", 329 + pmcw->pim, pmcw->pam, pmcw->pom); 330 + } 331 + static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL); 332 + 333 + static struct attribute *io_subchannel_type_attrs[] = { 334 + &dev_attr_chpids.attr, 335 + &dev_attr_pimpampom.attr, 336 + NULL, 337 + }; 338 + ATTRIBUTE_GROUPS(io_subchannel_type); 339 + 340 + static const struct device_type io_subchannel_type = { 341 + .groups = io_subchannel_type_groups, 342 + }; 343 + 299 344 int css_register_subchannel(struct subchannel *sch) 300 345 { 301 346 int ret; ··· 349 304 sch->dev.parent = &channel_subsystems[0]->device; 350 305 sch->dev.bus = &css_bus_type; 351 306 sch->dev.groups = default_subch_attr_groups; 307 + 308 + if (sch->st == SUBCHANNEL_TYPE_IO) 309 + sch->dev.type = &io_subchannel_type; 310 + 352 311 /* 353 312 * We don't want to generate uevents for I/O subchannels that don't 354 313 * have a working ccw device behind them since they will be
-42
drivers/s390/cio/device.c
··· 208 208 209 209 /************************ device handling **************************/ 210 210 211 - /* 212 - * A ccw_device has some interfaces in sysfs in addition to the 213 - * standard ones. 214 - * The following entries are designed to export the information which 215 - * resided in 2.4 in /proc/subchannels. Subchannel and device number 216 - * are obvious, so they don't have an entry :) 217 - * TODO: Split chpids and pimpampom up? Where is "in use" in the tree? 218 - */ 219 - static ssize_t 220 - chpids_show (struct device * dev, struct device_attribute *attr, char * buf) 221 - { 222 - struct subchannel *sch = to_subchannel(dev); 223 - struct chsc_ssd_info *ssd = &sch->ssd_info; 224 - ssize_t ret = 0; 225 - int chp; 226 - int mask; 227 - 228 - for (chp = 0; chp < 8; chp++) { 229 - mask = 0x80 >> chp; 230 - if (ssd->path_mask & mask) 231 - ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id); 232 - else 233 - ret += sprintf(buf + ret, "00 "); 234 - } 235 - ret += sprintf (buf+ret, "\n"); 236 - return min((ssize_t)PAGE_SIZE, ret); 237 - } 238 - 239 - static ssize_t 240 - pimpampom_show (struct device * dev, struct device_attribute *attr, char * buf) 241 - { 242 - struct subchannel *sch = to_subchannel(dev); 243 - struct pmcw *pmcw = &sch->schib.pmcw; 244 - 245 - return sprintf (buf, "%02x %02x %02x\n", 246 - pmcw->pim, pmcw->pam, pmcw->pom); 247 - } 248 - 249 211 static ssize_t 250 212 devtype_show (struct device *dev, struct device_attribute *attr, char *buf) 251 213 { ··· 598 636 return sprintf(buf, "%02x\n", sch->vpm); 599 637 } 600 638 601 - static DEVICE_ATTR(chpids, 0444, chpids_show, NULL); 602 - static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL); 603 639 static DEVICE_ATTR(devtype, 0444, devtype_show, NULL); 604 640 static DEVICE_ATTR(cutype, 0444, cutype_show, NULL); 605 641 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); ··· 607 647 static DEVICE_ATTR(vpm, 0444, vpm_show, NULL); 608 648 609 649 static struct attribute *io_subchannel_attrs[] = { 610 - &dev_attr_chpids.attr, 611 - &dev_attr_pimpampom.attr, 612 650 &dev_attr_logging.attr, 613 651 &dev_attr_vpm.attr, 614 652 NULL,
+1 -57
drivers/s390/cio/vfio_ccw_drv.c
··· 90 90 } 91 91 92 92 /* 93 - * Sysfs interfaces 94 - */ 95 - static ssize_t chpids_show(struct device *dev, 96 - struct device_attribute *attr, 97 - char *buf) 98 - { 99 - struct subchannel *sch = to_subchannel(dev); 100 - struct chsc_ssd_info *ssd = &sch->ssd_info; 101 - ssize_t ret = 0; 102 - int chp; 103 - int mask; 104 - 105 - for (chp = 0; chp < 8; chp++) { 106 - mask = 0x80 >> chp; 107 - if (ssd->path_mask & mask) 108 - ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id); 109 - else 110 - ret += sprintf(buf + ret, "00 "); 111 - } 112 - ret += sprintf(buf+ret, "\n"); 113 - return ret; 114 - } 115 - 116 - static ssize_t pimpampom_show(struct device *dev, 117 - struct device_attribute *attr, 118 - char *buf) 119 - { 120 - struct subchannel *sch = to_subchannel(dev); 121 - struct pmcw *pmcw = &sch->schib.pmcw; 122 - 123 - return sprintf(buf, "%02x %02x %02x\n", 124 - pmcw->pim, pmcw->pam, pmcw->pom); 125 - } 126 - 127 - static DEVICE_ATTR(chpids, 0444, chpids_show, NULL); 128 - static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL); 129 - 130 - static struct attribute *vfio_subchannel_attrs[] = { 131 - &dev_attr_chpids.attr, 132 - &dev_attr_pimpampom.attr, 133 - NULL, 134 - }; 135 - 136 - static struct attribute_group vfio_subchannel_attr_group = { 137 - .attrs = vfio_subchannel_attrs, 138 - }; 139 - 140 - /* 141 93 * Css driver callbacks 142 94 */ 143 95 static void vfio_ccw_sch_irq(struct subchannel *sch) ··· 126 174 if (ret) 127 175 goto out_free; 128 176 129 - ret = sysfs_create_group(&sch->dev.kobj, &vfio_subchannel_attr_group); 130 - if (ret) 131 - goto out_disable; 132 - 133 177 ret = vfio_ccw_mdev_reg(sch); 134 178 if (ret) 135 - goto out_rm_group; 179 + goto out_disable; 136 180 137 181 INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo); 138 182 atomic_set(&private->avail, 1); ··· 136 188 137 189 return 0; 138 190 139 - out_rm_group: 140 - sysfs_remove_group(&sch->dev.kobj, &vfio_subchannel_attr_group); 141 191 out_disable: 142 192 cio_disable_subchannel(sch); 143 193 out_free: ··· 151 205 vfio_ccw_sch_quiesce(sch); 152 206 153 207 vfio_ccw_mdev_unreg(sch); 154 - 155 - sysfs_remove_group(&sch->dev.kobj, &vfio_subchannel_attr_group); 156 208 157 209 dev_set_drvdata(&sch->dev, NULL); 158 210
+15 -6
drivers/s390/crypto/ap_bus.c
··· 766 766 ap_domain_index = domain; 767 767 spin_unlock_bh(&ap_domain_lock); 768 768 769 - AP_DBF(DBF_DEBUG, "store new default domain=%d\n", domain); 769 + AP_DBF(DBF_DEBUG, "stored new default domain=%d\n", domain); 770 770 771 771 return count; 772 772 } ··· 952 952 } 953 953 if (best_domain >= 0){ 954 954 ap_domain_index = best_domain; 955 + AP_DBF(DBF_DEBUG, "new ap_domain_index=%d\n", ap_domain_index); 955 956 spin_unlock_bh(&ap_domain_lock); 956 957 return 0; 957 958 } ··· 989 988 ap_qid_t qid; 990 989 int depth = 0, type = 0; 991 990 unsigned int functions = 0; 992 - int rc, id, dom, borked, domains; 991 + int rc, id, dom, borked, domains, defdomdevs = 0; 993 992 994 993 AP_DBF(DBF_DEBUG, "ap_scan_bus running\n"); 995 994 ··· 1053 1052 put_device(dev); 1054 1053 if (!borked) { 1055 1054 domains++; 1055 + if (dom == ap_domain_index) 1056 + defdomdevs++; 1056 1057 continue; 1057 1058 } 1058 1059 } ··· 1101 1098 continue; 1102 1099 } 1103 1100 domains++; 1101 + if (dom == ap_domain_index) 1102 + defdomdevs++; 1104 1103 } /* end domain loop */ 1105 1104 if (ac) { 1106 1105 /* remove card dev if there are no queue devices */ ··· 1111 1106 put_device(&ac->ap_dev.device); 1112 1107 } 1113 1108 } /* end device loop */ 1109 + 1110 + if (defdomdevs < 1) 1111 + AP_DBF(DBF_INFO, "no queue device with default domain %d available\n", 1112 + ap_domain_index); 1113 + 1114 1114 out: 1115 1115 mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ); 1116 1116 } ··· 1184 1174 ap_init_configuration(); 1185 1175 1186 1176 if (ap_configuration) 1187 - max_domain_id = ap_max_domain_id ? : (AP_DOMAINS - 1); 1177 + max_domain_id = 1178 + ap_max_domain_id ? ap_max_domain_id : AP_DOMAINS - 1; 1188 1179 else 1189 1180 max_domain_id = 15; 1190 1181 if (ap_domain_index < -1 || ap_domain_index > max_domain_id) { 1191 1182 pr_warn("%d is not a valid cryptographic domain\n", 1192 1183 ap_domain_index); 1193 - rc = -EINVAL; 1194 - goto out_free; 1184 + ap_domain_index = -1; 1195 1185 } 1196 1186 /* In resume callback we need to know if the user had set the domain. 1197 1187 * If so, we can not just reset it. ··· 1264 1254 unregister_reset_call(&ap_reset_call); 1265 1255 if (ap_using_interrupts()) 1266 1256 unregister_adapter_interrupt(&ap_airq); 1267 - out_free: 1268 1257 kfree(ap_configuration); 1269 1258 return rc; 1270 1259 }
+3 -3
drivers/s390/crypto/pkey_api.c
··· 178 178 pxcrb->user_defined = (cardnr == 0xFFFF ? AUTOSELECT : cardnr); 179 179 pxcrb->request_control_blk_length = 180 180 preqcblk->cprb_len + preqcblk->req_parml; 181 - pxcrb->request_control_blk_addr = (void *) preqcblk; 181 + pxcrb->request_control_blk_addr = (void __user *) preqcblk; 182 182 pxcrb->reply_control_blk_length = preqcblk->rpl_msgbl; 183 - pxcrb->reply_control_blk_addr = (void *) prepcblk; 183 + pxcrb->reply_control_blk_addr = (void __user *) prepcblk; 184 184 } 185 185 186 186 /* ··· 1194 1194 /* 1195 1195 * Module init 1196 1196 */ 1197 - int __init pkey_init(void) 1197 + static int __init pkey_init(void) 1198 1198 { 1199 1199 cpacf_mask_t pckmo_functions; 1200 1200
+10 -2
drivers/s390/crypto/zcrypt_api.c
··· 821 821 do { 822 822 rc = zcrypt_rsa_modexpo(&mex); 823 823 } while (rc == -EAGAIN); 824 - if (rc) 824 + if (rc) { 825 + ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d", rc); 825 826 return rc; 827 + } 826 828 return put_user(mex.outputdatalength, &umex->outputdatalength); 827 829 } 828 830 case ICARSACRT: { ··· 840 838 do { 841 839 rc = zcrypt_rsa_crt(&crt); 842 840 } while (rc == -EAGAIN); 843 - if (rc) 841 + if (rc) { 842 + ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d", rc); 844 843 return rc; 844 + } 845 845 return put_user(crt.outputdatalength, &ucrt->outputdatalength); 846 846 } 847 847 case ZSECSENDCPRB: { ··· 859 855 do { 860 856 rc = zcrypt_send_cprb(&xcRB); 861 857 } while (rc == -EAGAIN); 858 + if (rc) 859 + ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d", rc); 862 860 if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB))) 863 861 return -EFAULT; 864 862 return rc; ··· 878 872 do { 879 873 rc = zcrypt_send_ep11_cprb(&xcrb); 880 874 } while (rc == -EAGAIN); 875 + if (rc) 876 + ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d", rc); 881 877 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) 882 878 return -EFAULT; 883 879 return rc;
+6 -109
drivers/s390/crypto/zcrypt_cca_key.h
··· 48 48 49 49 #define CCA_TKN_HDR_ID_EXT 0x1E 50 50 51 - /** 52 - * mapping for the cca private ME section 53 - */ 54 - struct cca_private_ext_ME_sec { 55 - unsigned char section_identifier; 56 - unsigned char version; 57 - unsigned short section_length; 58 - unsigned char private_key_hash[20]; 59 - unsigned char reserved1[4]; 60 - unsigned char key_format; 61 - unsigned char reserved2; 62 - unsigned char key_name_hash[20]; 63 - unsigned char key_use_flags[4]; 64 - unsigned char reserved3[6]; 65 - unsigned char reserved4[24]; 66 - unsigned char confounder[24]; 67 - unsigned char exponent[128]; 68 - unsigned char modulus[128]; 69 - } __attribute__((packed)); 70 - 71 51 #define CCA_PVT_USAGE_ALL 0x80 72 52 73 53 /** ··· 104 124 #define CCA_PVT_EXT_CRT_SEC_FMT_CL 0x40 105 125 106 126 /** 107 - * Set up private key fields of a type6 MEX message. 108 - * Note that all numerics in the key token are big-endian, 109 - * while the entries in the key block header are little-endian. 110 - * 111 - * @mex: pointer to user input data 112 - * @p: pointer to memory area for the key 113 - * 114 - * Returns the size of the key area or -EFAULT 115 - */ 116 - static inline int zcrypt_type6_mex_key_de(struct ica_rsa_modexpo *mex, 117 - void *p, int big_endian) 118 - { 119 - static struct cca_token_hdr static_pvt_me_hdr = { 120 - .token_identifier = 0x1E, 121 - .token_length = 0x0183, 122 - }; 123 - static struct cca_private_ext_ME_sec static_pvt_me_sec = { 124 - .section_identifier = 0x02, 125 - .section_length = 0x016C, 126 - .key_use_flags = {0x80,0x00,0x00,0x00}, 127 - }; 128 - static struct cca_public_sec static_pub_me_sec = { 129 - .section_identifier = 0x04, 130 - .section_length = 0x000F, 131 - .exponent_len = 0x0003, 132 - }; 133 - static char pk_exponent[3] = { 0x01, 0x00, 0x01 }; 134 - struct { 135 - struct T6_keyBlock_hdr t6_hdr; 136 - struct cca_token_hdr pvtMeHdr; 137 - struct cca_private_ext_ME_sec pvtMeSec; 138 - struct cca_public_sec pubMeSec; 139 - char exponent[3]; 140 - } __attribute__((packed)) *key = p; 141 - unsigned char *temp; 142 - 143 - memset(key, 0, sizeof(*key)); 144 - 145 - if (big_endian) { 146 - key->t6_hdr.blen = cpu_to_be16(0x189); 147 - key->t6_hdr.ulen = cpu_to_be16(0x189 - 2); 148 - } else { 149 - key->t6_hdr.blen = cpu_to_le16(0x189); 150 - key->t6_hdr.ulen = cpu_to_le16(0x189 - 2); 151 - } 152 - key->pvtMeHdr = static_pvt_me_hdr; 153 - key->pvtMeSec = static_pvt_me_sec; 154 - key->pubMeSec = static_pub_me_sec; 155 - /* 156 - * In a private key, the modulus doesn't appear in the public 157 - * section. So, an arbitrary public exponent of 0x010001 will be 158 - * used. 159 - */ 160 - memcpy(key->exponent, pk_exponent, 3); 161 - 162 - /* key parameter block */ 163 - temp = key->pvtMeSec.exponent + 164 - sizeof(key->pvtMeSec.exponent) - mex->inputdatalength; 165 - if (copy_from_user(temp, mex->b_key, mex->inputdatalength)) 166 - return -EFAULT; 167 - 168 - /* modulus */ 169 - temp = key->pvtMeSec.modulus + 170 - sizeof(key->pvtMeSec.modulus) - mex->inputdatalength; 171 - if (copy_from_user(temp, mex->n_modulus, mex->inputdatalength)) 172 - return -EFAULT; 173 - key->pubMeSec.modulus_bit_len = 8 * mex->inputdatalength; 174 - return sizeof(*key); 175 - } 176 - 177 - /** 178 127 * Set up private key fields of a type6 MEX message. The _pad variant 179 128 * strips leading zeroes from the b_key. 180 129 * Note that all numerics in the key token are big-endian, ··· 114 205 * 115 206 * Returns the size of the key area or -EFAULT 116 207 */ 117 - static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex, 118 - void *p, int big_endian) 208 + static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex, void *p) 119 209 { 120 210 static struct cca_token_hdr static_pub_hdr = { 121 211 .token_identifier = 0x1E, ··· 159 251 2*mex->inputdatalength - i; 160 252 key->pubHdr.token_length = 161 253 key->pubSec.section_length + sizeof(key->pubHdr); 162 - if (big_endian) { 163 - key->t6_hdr.ulen = cpu_to_be16(key->pubHdr.token_length + 4); 164 - key->t6_hdr.blen = cpu_to_be16(key->pubHdr.token_length + 6); 165 - } else { 166 - key->t6_hdr.ulen = cpu_to_le16(key->pubHdr.token_length + 4); 167 - key->t6_hdr.blen = cpu_to_le16(key->pubHdr.token_length + 6); 168 - } 254 + key->t6_hdr.ulen = key->pubHdr.token_length + 4; 255 + key->t6_hdr.blen = key->pubHdr.token_length + 6; 169 256 return sizeof(*key) + 2*mex->inputdatalength - i; 170 257 } 171 258 ··· 174 271 * 175 272 * Returns the size of the key area or -EFAULT 176 273 */ 177 - static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt, 178 - void *p, int big_endian) 274 + static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt, void *p) 179 275 { 180 276 static struct cca_public_sec static_cca_pub_sec = { 181 277 .section_identifier = 4, ··· 200 298 size = sizeof(*key) + key_len + sizeof(*pub) + 3; 201 299 202 300 /* parameter block.key block */ 203 - if (big_endian) { 204 - key->t6_hdr.blen = cpu_to_be16(size); 205 - key->t6_hdr.ulen = cpu_to_be16(size - 2); 206 - } else { 207 - key->t6_hdr.blen = cpu_to_le16(size); 208 - key->t6_hdr.ulen = cpu_to_le16(size - 2); 209 - } 301 + key->t6_hdr.blen = size; 302 + key->t6_hdr.ulen = size - 2; 210 303 211 304 /* key token header */ 212 305 key->token.token_identifier = CCA_TKN_HDR_ID_EXT;
+2 -2
drivers/s390/crypto/zcrypt_msgtype6.c
··· 291 291 return -EFAULT; 292 292 293 293 /* Set up key which is located after the variable length text. */ 294 - size = zcrypt_type6_mex_key_en(mex, msg->text+mex->inputdatalength, 1); 294 + size = zcrypt_type6_mex_key_en(mex, msg->text+mex->inputdatalength); 295 295 if (size < 0) 296 296 return size; 297 297 size += sizeof(*msg) + mex->inputdatalength; ··· 353 353 return -EFAULT; 354 354 355 355 /* Set up key which is located after the variable length text. */ 356 - size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength, 1); 356 + size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength); 357 357 if (size < 0) 358 358 return size; 359 359 size += sizeof(*msg) + crt->inputdatalength; /* total size of msg */
+1 -1
drivers/tty/hvc/Kconfig
··· 44 44 45 45 config HVC_IUCV 46 46 bool "z/VM IUCV Hypervisor console support (VM only)" 47 - depends on S390 47 + depends on S390 && NET 48 48 select HVC_DRIVER 49 49 select IUCV 50 50 default y