Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 update from Martin Schwidefsky:
"The most prominent change in this patch set is the software dirty bit
patch for s390. It removes __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY and
the page_test_and_clear_dirty primitive which makes the common memory
management code a bit less obscure.

Heiko fixed most of the PCI related fallout, more often than not
missing GENERIC_HARDIRQS dependencies. Notable is one of the 3270
patches which adds an export to tty_io to be able to resize a tty.

The rest is the usual bunch of cleanups and bug fixes."

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (42 commits)
s390/module: Add missing R_390_NONE relocation type
drivers/gpio: add missing GENERIC_HARDIRQ dependency
drivers/input: add couple of missing GENERIC_HARDIRQS dependencies
s390/cleanup: rename SPP to LPP
s390/mm: implement software dirty bits
s390/mm: Fix crst upgrade of mmap with MAP_FIXED
s390/linker skript: discard exit.data at runtime
drivers/media: add missing GENERIC_HARDIRQS dependency
s390/bpf,jit: add vlan tag support
drivers/net,AT91RM9200: add missing GENERIC_HARDIRQS dependency
iucv: fix kernel panic at reboot
s390/Kconfig: sort list of arch selected config options
phylib: remove !S390 dependeny from Kconfig
uio: remove !S390 dependency from Kconfig
dasd: fix sysfs cleanup in dasd_generic_remove
s390/pci: fix hotplug module init
s390/pci: cleanup clp page allocation
s390/pci: cleanup clp inline assembly
s390/perf: cpum_cf: fallback to software sampling events
s390/mm: provide PAGE_SHARED define
...

+1216 -930
+1 -1
MAINTAINERS
··· 6519 6519 F: drivers/s390/net/ 6520 6520 6521 6521 S390 ZCRYPT DRIVER 6522 - M: Holger Dengler <hd@linux.vnet.ibm.com> 6522 + M: Ingo Tuchscherer <ingo.tuchscherer@de.ibm.com> 6523 6523 M: linux390@de.ibm.com 6524 6524 L: linux-s390@vger.kernel.org 6525 6525 W: http://www.ibm.com/developerworks/linux/linux390/
+57 -56
arch/s390/Kconfig
··· 60 60 61 61 config S390 62 62 def_bool y 63 - select USE_GENERIC_SMP_HELPERS if SMP 64 - select GENERIC_CPU_DEVICES if !SMP 65 - select HAVE_SYSCALL_WRAPPERS 66 - select HAVE_FUNCTION_TRACER 67 - select HAVE_FUNCTION_TRACE_MCOUNT_TEST 68 - select HAVE_FTRACE_MCOUNT_RECORD 69 - select HAVE_C_RECORDMCOUNT 70 - select HAVE_SYSCALL_TRACEPOINTS 71 - select SYSCTL_EXCEPTION_TRACE 72 - select HAVE_DYNAMIC_FTRACE 73 - select HAVE_FUNCTION_GRAPH_TRACER 74 - select HAVE_REGS_AND_STACK_ACCESS_API 75 - select HAVE_OPROFILE 76 - select HAVE_KPROBES 77 - select HAVE_KRETPROBES 78 - select HAVE_KVM if 64BIT 79 - select HAVE_ARCH_TRACEHOOK 80 - select INIT_ALL_POSSIBLE 81 - select HAVE_PERF_EVENTS 82 - select ARCH_HAVE_NMI_SAFE_CMPXCHG 83 - select HAVE_DEBUG_KMEMLEAK 84 - select HAVE_KERNEL_GZIP 85 - select HAVE_KERNEL_BZIP2 86 - select HAVE_KERNEL_LZMA 87 - select HAVE_KERNEL_LZO 88 - select HAVE_KERNEL_XZ 89 - select HAVE_ARCH_MUTEX_CPU_RELAX 90 - select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 91 - select HAVE_BPF_JIT if 64BIT && PACK_STACK 92 - select ARCH_SAVE_PAGE_KEYS if HIBERNATION 93 - select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 94 - select HAVE_MEMBLOCK 95 - select HAVE_MEMBLOCK_NODE_MAP 96 - select HAVE_CMPXCHG_LOCAL 97 - select HAVE_CMPXCHG_DOUBLE 98 - select HAVE_ALIGNED_STRUCT_PAGE if SLUB 99 - select HAVE_VIRT_CPU_ACCOUNTING 100 - select VIRT_CPU_ACCOUNTING 101 63 select ARCH_DISCARD_MEMBLOCK 102 - select BUILDTIME_EXTABLE_SORT 103 - select ARCH_INLINE_SPIN_TRYLOCK 104 - select ARCH_INLINE_SPIN_TRYLOCK_BH 105 - select ARCH_INLINE_SPIN_LOCK 106 - select ARCH_INLINE_SPIN_LOCK_BH 107 - select ARCH_INLINE_SPIN_LOCK_IRQ 108 - select ARCH_INLINE_SPIN_LOCK_IRQSAVE 109 - select ARCH_INLINE_SPIN_UNLOCK 110 - select ARCH_INLINE_SPIN_UNLOCK_BH 111 - select ARCH_INLINE_SPIN_UNLOCK_IRQ 112 - select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE 113 - select ARCH_INLINE_READ_TRYLOCK 64 + select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 65 + select ARCH_HAVE_NMI_SAFE_CMPXCHG 114 66 select ARCH_INLINE_READ_LOCK 115 67 select ARCH_INLINE_READ_LOCK_BH 116 68 select ARCH_INLINE_READ_LOCK_IRQ 117 69 select ARCH_INLINE_READ_LOCK_IRQSAVE 70 + select ARCH_INLINE_READ_TRYLOCK 118 71 select ARCH_INLINE_READ_UNLOCK 119 72 select ARCH_INLINE_READ_UNLOCK_BH 120 73 select ARCH_INLINE_READ_UNLOCK_IRQ 121 74 select ARCH_INLINE_READ_UNLOCK_IRQRESTORE 122 - select ARCH_INLINE_WRITE_TRYLOCK 75 + select ARCH_INLINE_SPIN_LOCK 76 + select ARCH_INLINE_SPIN_LOCK_BH 77 + select ARCH_INLINE_SPIN_LOCK_IRQ 78 + select ARCH_INLINE_SPIN_LOCK_IRQSAVE 79 + select ARCH_INLINE_SPIN_TRYLOCK 80 + select ARCH_INLINE_SPIN_TRYLOCK_BH 81 + select ARCH_INLINE_SPIN_UNLOCK 82 + select ARCH_INLINE_SPIN_UNLOCK_BH 83 + select ARCH_INLINE_SPIN_UNLOCK_IRQ 84 + select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE 123 85 select ARCH_INLINE_WRITE_LOCK 124 86 select ARCH_INLINE_WRITE_LOCK_BH 125 87 select ARCH_INLINE_WRITE_LOCK_IRQ 126 88 select ARCH_INLINE_WRITE_LOCK_IRQSAVE 89 + select ARCH_INLINE_WRITE_TRYLOCK 127 90 select ARCH_INLINE_WRITE_UNLOCK 128 91 select ARCH_INLINE_WRITE_UNLOCK_BH 129 92 select ARCH_INLINE_WRITE_UNLOCK_IRQ 130 93 select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE 131 - select HAVE_UID16 if 32BIT 94 + select ARCH_SAVE_PAGE_KEYS if HIBERNATION 132 95 select ARCH_WANT_IPC_PARSE_VERSION 133 - select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT 96 + select BUILDTIME_EXTABLE_SORT 97 + select CLONE_BACKWARDS2 98 + select GENERIC_CLOCKEVENTS 99 + select GENERIC_CPU_DEVICES if !SMP 100 + select GENERIC_KERNEL_THREAD 134 101 select GENERIC_SMP_IDLE_THREAD 135 102 select GENERIC_TIME_VSYSCALL_OLD 136 - select GENERIC_CLOCKEVENTS 137 - select KTIME_SCALAR if 32BIT 103 + select HAVE_ALIGNED_STRUCT_PAGE if SLUB 104 + select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 105 + select HAVE_ARCH_MUTEX_CPU_RELAX 138 106 select HAVE_ARCH_SECCOMP_FILTER 107 + select HAVE_ARCH_TRACEHOOK 108 + select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT 109 + select HAVE_BPF_JIT if 64BIT && PACK_STACK 110 + select HAVE_CMPXCHG_DOUBLE 111 + select HAVE_CMPXCHG_LOCAL 112 + select HAVE_C_RECORDMCOUNT 113 + select HAVE_DEBUG_KMEMLEAK 114 + select HAVE_DYNAMIC_FTRACE 115 + select HAVE_FTRACE_MCOUNT_RECORD 116 + select HAVE_FUNCTION_GRAPH_TRACER 117 + select HAVE_FUNCTION_TRACER 118 + select HAVE_FUNCTION_TRACE_MCOUNT_TEST 119 + select HAVE_KERNEL_BZIP2 120 + select HAVE_KERNEL_GZIP 121 + select HAVE_KERNEL_LZMA 122 + select HAVE_KERNEL_LZO 123 + select HAVE_KERNEL_XZ 124 + select HAVE_KPROBES 125 + select HAVE_KRETPROBES 126 + select HAVE_KVM if 64BIT 127 + select HAVE_MEMBLOCK 128 + select HAVE_MEMBLOCK_NODE_MAP 139 129 select HAVE_MOD_ARCH_SPECIFIC 130 + select HAVE_OPROFILE 131 + select HAVE_PERF_EVENTS 132 + select HAVE_REGS_AND_STACK_ACCESS_API 133 + select HAVE_SYSCALL_TRACEPOINTS 134 + select HAVE_SYSCALL_WRAPPERS 135 + select HAVE_UID16 if 32BIT 136 + select HAVE_VIRT_CPU_ACCOUNTING 137 + select INIT_ALL_POSSIBLE 138 + select KTIME_SCALAR if 32BIT 140 139 select MODULES_USE_ELF_RELA 141 - select CLONE_BACKWARDS2 140 + select SYSCTL_EXCEPTION_TRACE 141 + select USE_GENERIC_SMP_HELPERS if SMP 142 + select VIRT_CPU_ACCOUNTING 142 143 143 144 config SCHED_OMIT_FRAME_POINTER 144 145 def_bool y
+1 -1
arch/s390/appldata/appldata_mem.c
··· 108 108 mem_data->totalswap = P2K(val.totalswap); 109 109 mem_data->freeswap = P2K(val.freeswap); 110 110 111 - mem_data->timestamp = get_clock(); 111 + mem_data->timestamp = get_tod_clock(); 112 112 mem_data->sync_count_2++; 113 113 } 114 114
+1 -1
arch/s390/appldata/appldata_net_sum.c
··· 111 111 net_data->tx_dropped = tx_dropped; 112 112 net_data->collisions = collisions; 113 113 114 - net_data->timestamp = get_clock(); 114 + net_data->timestamp = get_tod_clock(); 115 115 net_data->sync_count_2++; 116 116 } 117 117
+1 -1
arch/s390/appldata/appldata_os.c
··· 156 156 } 157 157 ops.size = new_size; 158 158 } 159 - os_data->timestamp = get_clock(); 159 + os_data->timestamp = get_tod_clock(); 160 160 os_data->sync_count_2++; 161 161 } 162 162
+1 -1
arch/s390/hypfs/hypfs_vm.c
··· 245 245 d2fc = diag2fc_store(guest_query, &count, sizeof(d2fc->hdr)); 246 246 if (IS_ERR(d2fc)) 247 247 return PTR_ERR(d2fc); 248 - get_clock_ext(d2fc->hdr.tod_ext); 248 + get_tod_clock_ext(d2fc->hdr.tod_ext); 249 249 d2fc->hdr.len = count * sizeof(struct diag2fc_data); 250 250 d2fc->hdr.version = DBFS_D2FC_HDR_VERSION; 251 251 d2fc->hdr.count = count;
+3 -6
arch/s390/include/asm/barrier.h
··· 13 13 * to devices. 14 14 */ 15 15 16 - static inline void mb(void) 17 - { 18 16 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 19 - /* Fast-BCR without checkpoint synchronization */ 20 - asm volatile("bcr 14,0" : : : "memory"); 17 + /* Fast-BCR without checkpoint synchronization */ 18 + #define mb() do { asm volatile("bcr 14,0" : : : "memory"); } while (0) 21 19 #else 22 - asm volatile("bcr 15,0" : : : "memory"); 20 + #define mb() do { asm volatile("bcr 15,0" : : : "memory"); } while (0) 23 21 #endif 24 - } 25 22 26 23 #define rmb() mb() 27 24 #define wmb() mb()
+1 -1
arch/s390/include/asm/clp.h
··· 2 2 #define _ASM_S390_CLP_H 3 3 4 4 /* CLP common request & response block size */ 5 - #define CLP_BLK_SIZE (PAGE_SIZE * 2) 5 + #define CLP_BLK_SIZE PAGE_SIZE 6 6 7 7 struct clp_req_hdr { 8 8 u16 len;
+2 -2
arch/s390/include/asm/cpu_mf.h
··· 34 34 /* CPU measurement facility support */ 35 35 static inline int cpum_cf_avail(void) 36 36 { 37 - return MACHINE_HAS_SPP && test_facility(67); 37 + return MACHINE_HAS_LPP && test_facility(67); 38 38 } 39 39 40 40 static inline int cpum_sf_avail(void) 41 41 { 42 - return MACHINE_HAS_SPP && test_facility(68); 42 + return MACHINE_HAS_LPP && test_facility(68); 43 43 } 44 44 45 45
+5 -3
arch/s390/include/asm/dma-mapping.h
··· 19 19 } 20 20 21 21 extern int dma_set_mask(struct device *dev, u64 mask); 22 - extern int dma_is_consistent(struct device *dev, dma_addr_t dma_handle); 23 - extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 24 - enum dma_data_direction direction); 22 + 23 + static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 24 + enum dma_data_direction direction) 25 + { 26 + } 25 27 26 28 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 27 29 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+2 -2
arch/s390/include/asm/mman.h
··· 9 9 #include <uapi/asm/mman.h> 10 10 11 11 #if !defined(__ASSEMBLY__) && defined(CONFIG_64BIT) 12 - int s390_mmap_check(unsigned long addr, unsigned long len); 13 - #define arch_mmap_check(addr,len,flags) s390_mmap_check(addr,len) 12 + int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags); 13 + #define arch_mmap_check(addr, len, flags) s390_mmap_check(addr, len, flags) 14 14 #endif 15 15 #endif /* __S390_MMAN_H__ */
-22
arch/s390/include/asm/page.h
··· 155 155 #define _PAGE_ACC_BITS 0xf0 /* HW access control bits */ 156 156 157 157 /* 158 - * Test and clear dirty bit in storage key. 159 - * We can't clear the changed bit atomically. This is a potential 160 - * race against modification of the referenced bit. This function 161 - * should therefore only be called if it is not mapped in any 162 - * address space. 163 - * 164 - * Note that the bit gets set whenever page content is changed. That means 165 - * also when the page is modified by DMA or from inside the kernel. 166 - */ 167 - #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY 168 - static inline int page_test_and_clear_dirty(unsigned long pfn, int mapped) 169 - { 170 - unsigned char skey; 171 - 172 - skey = page_get_storage_key(pfn << PAGE_SHIFT); 173 - if (!(skey & _PAGE_CHANGED)) 174 - return 0; 175 - page_set_storage_key(pfn << PAGE_SHIFT, skey & ~_PAGE_CHANGED, mapped); 176 - return 1; 177 - } 178 - 179 - /* 180 158 * Test and clear referenced bit in storage key. 181 159 */ 182 160 #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
+9 -2
arch/s390/include/asm/pci.h
··· 160 160 int zpci_msihash_init(void); 161 161 void zpci_msihash_exit(void); 162 162 163 + #ifdef CONFIG_PCI 163 164 /* Error handling and recovery */ 164 165 void zpci_event_error(void *); 165 166 void zpci_event_availability(void *); 167 + #else /* CONFIG_PCI */ 168 + static inline void zpci_event_error(void *e) {} 169 + static inline void zpci_event_availability(void *e) {} 170 + #endif /* CONFIG_PCI */ 166 171 167 172 /* Helpers */ 168 173 struct zpci_dev *get_zdev(struct pci_dev *); ··· 185 180 /* Hotplug */ 186 181 extern struct mutex zpci_list_lock; 187 182 extern struct list_head zpci_list; 188 - extern struct pci_hp_callback_ops hotplug_ops; 189 - extern unsigned int pci_probe; 183 + extern unsigned int s390_pci_probe; 184 + 185 + void zpci_register_hp_ops(struct pci_hp_callback_ops *); 186 + void zpci_deregister_hp_ops(void); 190 187 191 188 /* FMB */ 192 189 int zpci_fmb_enable_device(struct zpci_dev *);
+89 -43
arch/s390/include/asm/pgtable.h
··· 29 29 #ifndef __ASSEMBLY__ 30 30 #include <linux/sched.h> 31 31 #include <linux/mm_types.h> 32 + #include <linux/page-flags.h> 32 33 #include <asm/bug.h> 33 34 #include <asm/page.h> 34 35 ··· 222 221 /* Software bits in the page table entry */ 223 222 #define _PAGE_SWT 0x001 /* SW pte type bit t */ 224 223 #define _PAGE_SWX 0x002 /* SW pte type bit x */ 225 - #define _PAGE_SWC 0x004 /* SW pte changed bit (for KVM) */ 226 - #define _PAGE_SWR 0x008 /* SW pte referenced bit (for KVM) */ 227 - #define _PAGE_SPECIAL 0x010 /* SW associated with special page */ 224 + #define _PAGE_SWC 0x004 /* SW pte changed bit */ 225 + #define _PAGE_SWR 0x008 /* SW pte referenced bit */ 226 + #define _PAGE_SWW 0x010 /* SW pte write bit */ 227 + #define _PAGE_SPECIAL 0x020 /* SW associated with special page */ 228 228 #define __HAVE_ARCH_PTE_SPECIAL 229 229 230 230 /* Set of bits not changed in pte_modify */ 231 - #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_SWC | _PAGE_SWR) 231 + #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_CO | \ 232 + _PAGE_SWC | _PAGE_SWR) 232 233 233 234 /* Six different types of pages. */ 234 235 #define _PAGE_TYPE_EMPTY 0x400 ··· 324 321 325 322 /* Bits in the region table entry */ 326 323 #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */ 324 + #define _REGION_ENTRY_RO 0x200 /* region protection bit */ 327 325 #define _REGION_ENTRY_INV 0x20 /* invalid region table entry */ 328 326 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */ 329 327 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */ ··· 386 382 */ 387 383 #define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) 388 384 #define PAGE_RO __pgprot(_PAGE_TYPE_RO) 389 - #define PAGE_RW __pgprot(_PAGE_TYPE_RW) 385 + #define PAGE_RW __pgprot(_PAGE_TYPE_RO | _PAGE_SWW) 386 + #define PAGE_RWC __pgprot(_PAGE_TYPE_RW | _PAGE_SWW | _PAGE_SWC) 390 387 391 - #define PAGE_KERNEL PAGE_RW 388 + #define PAGE_KERNEL PAGE_RWC 389 + #define PAGE_SHARED PAGE_KERNEL 392 390 #define PAGE_COPY PAGE_RO 393 391 394 392 /* ··· 637 631 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); 638 632 /* Clear page changed & referenced bit in the storage key */ 639 633 if (bits & _PAGE_CHANGED) 640 - page_set_storage_key(address, skey ^ bits, 1); 634 + page_set_storage_key(address, skey ^ bits, 0); 641 635 else if (bits) 642 636 page_reset_referenced(address); 643 637 /* Transfer page changed & referenced bit to guest bits in pgste */ 644 638 pgste_val(pgste) |= bits << 48; /* RCP_GR_BIT & RCP_GC_BIT */ 645 639 /* Get host changed & referenced bits from pgste */ 646 640 bits |= (pgste_val(pgste) & (RCP_HR_BIT | RCP_HC_BIT)) >> 52; 647 - /* Clear host bits in pgste. */ 641 + /* Transfer page changed & referenced bit to kvm user bits */ 642 + pgste_val(pgste) |= bits << 45; /* KVM_UR_BIT & KVM_UC_BIT */ 643 + /* Clear relevant host bits in pgste. */ 648 644 pgste_val(pgste) &= ~(RCP_HR_BIT | RCP_HC_BIT); 649 645 pgste_val(pgste) &= ~(RCP_ACC_BITS | RCP_FP_BIT); 650 646 /* Copy page access key and fetch protection bit to pgste */ 651 647 pgste_val(pgste) |= 652 648 (unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; 653 - /* Transfer changed and referenced to kvm user bits */ 654 - pgste_val(pgste) |= bits << 45; /* KVM_UR_BIT & KVM_UC_BIT */ 655 - /* Transfer changed & referenced to pte sofware bits */ 656 - pte_val(*ptep) |= bits << 1; /* _PAGE_SWR & _PAGE_SWC */ 649 + /* Transfer referenced bit to pte */ 650 + pte_val(*ptep) |= (bits & _PAGE_REFERENCED) << 1; 657 651 #endif 658 652 return pgste; 659 653 ··· 666 660 667 661 if (!pte_present(*ptep)) 668 662 return pgste; 663 + /* Get referenced bit from storage key */ 669 664 young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK); 670 - /* Transfer page referenced bit to pte software bit (host view) */ 671 - if (young || (pgste_val(pgste) & RCP_HR_BIT)) 665 + if (young) 666 + pgste_val(pgste) |= RCP_GR_BIT; 667 + /* Get host referenced bit from pgste */ 668 + if (pgste_val(pgste) & RCP_HR_BIT) { 669 + pgste_val(pgste) &= ~RCP_HR_BIT; 670 + young = 1; 671 + } 672 + /* Transfer referenced bit to kvm user bits and pte */ 673 + if (young) { 674 + pgste_val(pgste) |= KVM_UR_BIT; 672 675 pte_val(*ptep) |= _PAGE_SWR; 673 - /* Clear host referenced bit in pgste. */ 674 - pgste_val(pgste) &= ~RCP_HR_BIT; 675 - /* Transfer page referenced bit to guest bit in pgste */ 676 - pgste_val(pgste) |= (unsigned long) young << 50; /* set RCP_GR_BIT */ 676 + } 677 677 #endif 678 678 return pgste; 679 - 680 679 } 681 680 682 - static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry) 681 + static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry) 683 682 { 684 683 #ifdef CONFIG_PGSTE 685 684 unsigned long address; ··· 698 687 /* Set page access key and fetch protection bit from pgste */ 699 688 nkey |= (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56; 700 689 if (okey != nkey) 701 - page_set_storage_key(address, nkey, 1); 690 + page_set_storage_key(address, nkey, 0); 702 691 #endif 692 + } 693 + 694 + static inline void pgste_set_pte(pte_t *ptep, pte_t entry) 695 + { 696 + if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_SWW)) { 697 + /* 698 + * Without enhanced suppression-on-protection force 699 + * the dirty bit on for all writable ptes. 700 + */ 701 + pte_val(entry) |= _PAGE_SWC; 702 + pte_val(entry) &= ~_PAGE_RO; 703 + } 704 + *ptep = entry; 703 705 } 704 706 705 707 /** ··· 773 749 774 750 if (mm_has_pgste(mm)) { 775 751 pgste = pgste_get_lock(ptep); 776 - pgste_set_pte(ptep, pgste, entry); 777 - *ptep = entry; 752 + pgste_set_key(ptep, pgste, entry); 753 + pgste_set_pte(ptep, entry); 778 754 pgste_set_unlock(ptep, pgste); 779 - } else 755 + } else { 756 + if (!(pte_val(entry) & _PAGE_INVALID) && MACHINE_HAS_EDAT1) 757 + pte_val(entry) |= _PAGE_CO; 780 758 *ptep = entry; 759 + } 781 760 } 782 761 783 762 /* ··· 789 762 */ 790 763 static inline int pte_write(pte_t pte) 791 764 { 792 - return (pte_val(pte) & _PAGE_RO) == 0; 765 + return (pte_val(pte) & _PAGE_SWW) != 0; 793 766 } 794 767 795 768 static inline int pte_dirty(pte_t pte) 796 769 { 797 - #ifdef CONFIG_PGSTE 798 - if (pte_val(pte) & _PAGE_SWC) 799 - return 1; 800 - #endif 801 - return 0; 770 + return (pte_val(pte) & _PAGE_SWC) != 0; 802 771 } 803 772 804 773 static inline int pte_young(pte_t pte) ··· 844 821 { 845 822 pte_val(pte) &= _PAGE_CHG_MASK; 846 823 pte_val(pte) |= pgprot_val(newprot); 824 + if ((pte_val(pte) & _PAGE_SWC) && (pte_val(pte) & _PAGE_SWW)) 825 + pte_val(pte) &= ~_PAGE_RO; 847 826 return pte; 848 827 } 849 828 850 829 static inline pte_t pte_wrprotect(pte_t pte) 851 830 { 831 + pte_val(pte) &= ~_PAGE_SWW; 852 832 /* Do not clobber _PAGE_TYPE_NONE pages! */ 853 833 if (!(pte_val(pte) & _PAGE_INVALID)) 854 834 pte_val(pte) |= _PAGE_RO; ··· 860 834 861 835 static inline pte_t pte_mkwrite(pte_t pte) 862 836 { 863 - pte_val(pte) &= ~_PAGE_RO; 837 + pte_val(pte) |= _PAGE_SWW; 838 + if (pte_val(pte) & _PAGE_SWC) 839 + pte_val(pte) &= ~_PAGE_RO; 864 840 return pte; 865 841 } 866 842 867 843 static inline pte_t pte_mkclean(pte_t pte) 868 844 { 869 - #ifdef CONFIG_PGSTE 870 845 pte_val(pte) &= ~_PAGE_SWC; 871 - #endif 846 + /* Do not clobber _PAGE_TYPE_NONE pages! */ 847 + if (!(pte_val(pte) & _PAGE_INVALID)) 848 + pte_val(pte) |= _PAGE_RO; 872 849 return pte; 873 850 } 874 851 875 852 static inline pte_t pte_mkdirty(pte_t pte) 876 853 { 854 + pte_val(pte) |= _PAGE_SWC; 855 + if (pte_val(pte) & _PAGE_SWW) 856 + pte_val(pte) &= ~_PAGE_RO; 877 857 return pte; 878 858 } 879 859 ··· 917 885 pte_val(pte) |= _SEGMENT_ENTRY_INV; 918 886 } 919 887 /* 920 - * Clear SW pte bits SWT and SWX, there are no SW bits in a segment 921 - * table entry. 888 + * Clear SW pte bits, there are no SW bits in a segment table entry. 922 889 */ 923 - pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX); 890 + pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX | _PAGE_SWC | 891 + _PAGE_SWR | _PAGE_SWW); 924 892 /* 925 893 * Also set the change-override bit because we don't need dirty bit 926 894 * tracking for hugetlbfs pages. ··· 1072 1040 unsigned long address, 1073 1041 pte_t *ptep, pte_t pte) 1074 1042 { 1075 - *ptep = pte; 1076 - if (mm_has_pgste(mm)) 1043 + if (mm_has_pgste(mm)) { 1044 + pgste_set_pte(ptep, pte); 1077 1045 pgste_set_unlock(ptep, *(pgste_t *)(ptep + PTRS_PER_PTE)); 1046 + } else 1047 + *ptep = pte; 1078 1048 } 1079 1049 1080 1050 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH ··· 1144 1110 1145 1111 if (!mm_exclusive(mm)) 1146 1112 __ptep_ipte(address, ptep); 1147 - *ptep = pte_wrprotect(pte); 1113 + pte = pte_wrprotect(pte); 1148 1114 1149 - if (mm_has_pgste(mm)) 1115 + if (mm_has_pgste(mm)) { 1116 + pgste_set_pte(ptep, pte); 1150 1117 pgste_set_unlock(ptep, pgste); 1118 + } else 1119 + *ptep = pte; 1151 1120 } 1152 1121 return pte; 1153 1122 } ··· 1168 1131 pgste = pgste_get_lock(ptep); 1169 1132 1170 1133 __ptep_ipte(address, ptep); 1171 - *ptep = entry; 1172 1134 1173 - if (mm_has_pgste(vma->vm_mm)) 1135 + if (mm_has_pgste(vma->vm_mm)) { 1136 + pgste_set_pte(ptep, entry); 1174 1137 pgste_set_unlock(ptep, pgste); 1138 + } else 1139 + *ptep = entry; 1175 1140 return 1; 1176 1141 } 1177 1142 ··· 1191 1152 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) 1192 1153 { 1193 1154 unsigned long physpage = page_to_phys(page); 1155 + pte_t __pte = mk_pte_phys(physpage, pgprot); 1194 1156 1195 - return mk_pte_phys(physpage, pgprot); 1157 + if ((pte_val(__pte) & _PAGE_SWW) && PageDirty(page)) { 1158 + pte_val(__pte) |= _PAGE_SWC; 1159 + pte_val(__pte) &= ~_PAGE_RO; 1160 + } 1161 + return __pte; 1196 1162 } 1197 1163 1198 1164 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) ··· 1289 1245 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 1290 1246 pmd_t *pmdp, pmd_t entry) 1291 1247 { 1248 + if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1) 1249 + pmd_val(entry) |= _SEGMENT_ENTRY_CO; 1292 1250 *pmdp = entry; 1293 1251 } 1294 1252
-1
arch/s390/include/asm/sclp.h
··· 46 46 void sclp_facilities_detect(void); 47 47 unsigned long long sclp_get_rnmax(void); 48 48 unsigned long long sclp_get_rzm(void); 49 - u8 sclp_get_fac85(void); 50 49 int sclp_sdias_blk_count(void); 51 50 int sclp_sdias_copy(void *dest, int blk_num, int nr_blks); 52 51 int sclp_chp_configure(struct chp_id chpid);
+12 -10
arch/s390/include/asm/setup.h
··· 64 64 65 65 #define MACHINE_FLAG_VM (1UL << 0) 66 66 #define MACHINE_FLAG_IEEE (1UL << 1) 67 - #define MACHINE_FLAG_CSP (1UL << 3) 68 - #define MACHINE_FLAG_MVPG (1UL << 4) 69 - #define MACHINE_FLAG_DIAG44 (1UL << 5) 70 - #define MACHINE_FLAG_IDTE (1UL << 6) 71 - #define MACHINE_FLAG_DIAG9C (1UL << 7) 72 - #define MACHINE_FLAG_MVCOS (1UL << 8) 73 - #define MACHINE_FLAG_KVM (1UL << 9) 67 + #define MACHINE_FLAG_CSP (1UL << 2) 68 + #define MACHINE_FLAG_MVPG (1UL << 3) 69 + #define MACHINE_FLAG_DIAG44 (1UL << 4) 70 + #define MACHINE_FLAG_IDTE (1UL << 5) 71 + #define MACHINE_FLAG_DIAG9C (1UL << 6) 72 + #define MACHINE_FLAG_MVCOS (1UL << 7) 73 + #define MACHINE_FLAG_KVM (1UL << 8) 74 + #define MACHINE_FLAG_ESOP (1UL << 9) 74 75 #define MACHINE_FLAG_EDAT1 (1UL << 10) 75 76 #define MACHINE_FLAG_EDAT2 (1UL << 11) 76 77 #define MACHINE_FLAG_LPAR (1UL << 12) 77 - #define MACHINE_FLAG_SPP (1UL << 13) 78 + #define MACHINE_FLAG_LPP (1UL << 13) 78 79 #define MACHINE_FLAG_TOPOLOGY (1UL << 14) 79 80 #define MACHINE_FLAG_TE (1UL << 15) 80 81 #define MACHINE_FLAG_RRBM (1UL << 16) ··· 85 84 #define MACHINE_IS_LPAR (S390_lowcore.machine_flags & MACHINE_FLAG_LPAR) 86 85 87 86 #define MACHINE_HAS_DIAG9C (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C) 87 + #define MACHINE_HAS_ESOP (S390_lowcore.machine_flags & MACHINE_FLAG_ESOP) 88 88 #define MACHINE_HAS_PFMF MACHINE_HAS_EDAT1 89 89 #define MACHINE_HAS_HPAGE MACHINE_HAS_EDAT1 90 90 ··· 98 96 #define MACHINE_HAS_MVCOS (0) 99 97 #define MACHINE_HAS_EDAT1 (0) 100 98 #define MACHINE_HAS_EDAT2 (0) 101 - #define MACHINE_HAS_SPP (0) 99 + #define MACHINE_HAS_LPP (0) 102 100 #define MACHINE_HAS_TOPOLOGY (0) 103 101 #define MACHINE_HAS_TE (0) 104 102 #define MACHINE_HAS_RRBM (0) ··· 111 109 #define MACHINE_HAS_MVCOS (S390_lowcore.machine_flags & MACHINE_FLAG_MVCOS) 112 110 #define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1) 113 111 #define MACHINE_HAS_EDAT2 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2) 114 - #define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP) 112 + #define MACHINE_HAS_LPP (S390_lowcore.machine_flags & MACHINE_FLAG_LPP) 115 113 #define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY) 116 114 #define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE) 117 115 #define MACHINE_HAS_RRBM (S390_lowcore.machine_flags & MACHINE_FLAG_RRBM)
+9 -9
arch/s390/include/asm/timex.h
··· 15 15 #define TOD_UNIX_EPOCH 0x7d91048bca000000ULL 16 16 17 17 /* Inline functions for clock register access. */ 18 - static inline int set_clock(__u64 time) 18 + static inline int set_tod_clock(__u64 time) 19 19 { 20 20 int cc; 21 21 ··· 27 27 return cc; 28 28 } 29 29 30 - static inline int store_clock(__u64 *time) 30 + static inline int store_tod_clock(__u64 *time) 31 31 { 32 32 int cc; 33 33 ··· 71 71 72 72 typedef unsigned long long cycles_t; 73 73 74 - static inline unsigned long long get_clock(void) 74 + static inline unsigned long long get_tod_clock(void) 75 75 { 76 76 unsigned long long clk; 77 77 ··· 83 83 return clk; 84 84 } 85 85 86 - static inline void get_clock_ext(char *clk) 86 + static inline void get_tod_clock_ext(char *clk) 87 87 { 88 88 asm volatile("stcke %0" : "=Q" (*clk) : : "cc"); 89 89 } 90 90 91 - static inline unsigned long long get_clock_xt(void) 91 + static inline unsigned long long get_tod_clock_xt(void) 92 92 { 93 93 unsigned char clk[16]; 94 - get_clock_ext(clk); 94 + get_tod_clock_ext(clk); 95 95 return *((unsigned long long *)&clk[1]); 96 96 } 97 97 98 98 static inline cycles_t get_cycles(void) 99 99 { 100 - return (cycles_t) get_clock() >> 2; 100 + return (cycles_t) get_tod_clock() >> 2; 101 101 } 102 102 103 103 int get_sync_clock(unsigned long long *clock); ··· 123 123 * function, otherwise the returned value is not guaranteed to 124 124 * be monotonic. 125 125 */ 126 - static inline unsigned long long get_clock_monotonic(void) 126 + static inline unsigned long long get_tod_clock_monotonic(void) 127 127 { 128 - return get_clock_xt() - sched_clock_base_cc; 128 + return get_tod_clock_xt() - sched_clock_base_cc; 129 129 } 130 130 131 131 /**
+1 -1
arch/s390/kernel/debug.c
··· 867 867 debug_finish_entry(debug_info_t * id, debug_entry_t* active, int level, 868 868 int exception) 869 869 { 870 - active->id.stck = get_clock(); 870 + active->id.stck = get_tod_clock(); 871 871 active->id.fields.cpuid = smp_processor_id(); 872 872 active->caller = __builtin_return_address(0); 873 873 active->id.fields.exception = exception;
-1
arch/s390/kernel/dis.c
··· 840 840 { "stcke", 0x78, INSTR_S_RD }, 841 841 { "sacf", 0x79, INSTR_S_RD }, 842 842 { "stsi", 0x7d, INSTR_S_RD }, 843 - { "spp", 0x80, INSTR_S_RD }, 844 843 { "srnm", 0x99, INSTR_S_RD }, 845 844 { "stfpc", 0x9c, INSTR_S_RD }, 846 845 { "lfpc", 0x9d, INSTR_S_RD },
+4 -4
arch/s390/kernel/early.c
··· 47 47 { 48 48 u64 time; 49 49 50 - if (store_clock(&time) == 0) 50 + if (store_tod_clock(&time) == 0) 51 51 return; 52 52 /* TOD clock not running. Set the clock to Unix Epoch. */ 53 - if (set_clock(TOD_UNIX_EPOCH) != 0 || store_clock(&time) != 0) 53 + if (set_tod_clock(TOD_UNIX_EPOCH) != 0 || store_tod_clock(&time) != 0) 54 54 disabled_wait(0); 55 55 56 56 sched_clock_base_cc = TOD_UNIX_EPOCH; ··· 173 173 } 174 174 175 175 /* re-initialize cputime accounting. */ 176 - sched_clock_base_cc = get_clock(); 176 + sched_clock_base_cc = get_tod_clock(); 177 177 S390_lowcore.last_update_clock = sched_clock_base_cc; 178 178 S390_lowcore.last_update_timer = 0x7fffffffffffffffULL; 179 179 S390_lowcore.user_timer = 0; ··· 381 381 if (test_facility(27)) 382 382 S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS; 383 383 if (test_facility(40)) 384 - S390_lowcore.machine_flags |= MACHINE_FLAG_SPP; 384 + S390_lowcore.machine_flags |= MACHINE_FLAG_LPP; 385 385 if (test_facility(50) && test_facility(73)) 386 386 S390_lowcore.machine_flags |= MACHINE_FLAG_TE; 387 387 if (test_facility(66))
+5 -5
arch/s390/kernel/entry64.S
··· 72 72 #endif 73 73 .endm 74 74 75 - .macro SPP newpp 75 + .macro LPP newpp 76 76 #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) 77 - tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP 77 + tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP 78 78 jz .+8 79 79 .insn s,0xb2800000,\newpp 80 80 #endif ··· 96 96 jhe .+22 97 97 .endif 98 98 lg %r9,BASED(.Lsie_loop) 99 - SPP BASED(.Lhost_id) # set host id 99 + LPP BASED(.Lhost_id) # set host id 100 100 #endif 101 101 .endm 102 102 ··· 967 967 lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce 968 968 sie_gmap: 969 969 lg %r14,__SF_EMPTY(%r15) # get control block pointer 970 - SPP __SF_EMPTY(%r15) # set guest id 970 + LPP __SF_EMPTY(%r15) # set guest id 971 971 sie 0(%r14) 972 972 sie_done: 973 - SPP __SF_EMPTY+16(%r15) # set host id 973 + LPP __SF_EMPTY+16(%r15) # set host id 974 974 lg %r14,__LC_THREAD_INFO # pointer thread_info struct 975 975 sie_exit: 976 976 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
+12 -4
arch/s390/kernel/ipl.c
··· 1414 1414 1415 1415 static struct kset *dump_kset; 1416 1416 1417 + static void diag308_dump(void *dump_block) 1418 + { 1419 + diag308(DIAG308_SET, dump_block); 1420 + while (1) { 1421 + if (diag308(DIAG308_DUMP, NULL) != 0x302) 1422 + break; 1423 + udelay_simple(USEC_PER_SEC); 1424 + } 1425 + } 1426 + 1417 1427 static void __dump_run(void *unused) 1418 1428 { 1419 1429 struct ccw_dev_id devid; ··· 1442 1432 __cpcmd(buf, NULL, 0, NULL); 1443 1433 break; 1444 1434 case DUMP_METHOD_CCW_DIAG: 1445 - diag308(DIAG308_SET, dump_block_ccw); 1446 - diag308(DIAG308_DUMP, NULL); 1435 + diag308_dump(dump_block_ccw); 1447 1436 break; 1448 1437 case DUMP_METHOD_FCP_DIAG: 1449 - diag308(DIAG308_SET, dump_block_fcp); 1450 - diag308(DIAG308_DUMP, NULL); 1438 + diag308_dump(dump_block_fcp); 1451 1439 break; 1452 1440 default: 1453 1441 break;
+92 -51
arch/s390/kernel/module.c
··· 65 65 vfree(module_region); 66 66 } 67 67 68 - static void 69 - check_rela(Elf_Rela *rela, struct module *me) 68 + static void check_rela(Elf_Rela *rela, struct module *me) 70 69 { 71 70 struct mod_arch_syminfo *info; 72 71 ··· 114 115 * Account for GOT and PLT relocations. We can't add sections for 115 116 * got and plt but we can increase the core module size. 116 117 */ 117 - int 118 - module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, 119 - char *secstrings, struct module *me) 118 + int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, 119 + char *secstrings, struct module *me) 120 120 { 121 121 Elf_Shdr *symtab; 122 122 Elf_Sym *symbols; ··· 177 179 return 0; 178 180 } 179 181 180 - static int 181 - apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, 182 - struct module *me) 182 + static int apply_rela_bits(Elf_Addr loc, Elf_Addr val, 183 + int sign, int bits, int shift) 184 + { 185 + unsigned long umax; 186 + long min, max; 187 + 188 + if (val & ((1UL << shift) - 1)) 189 + return -ENOEXEC; 190 + if (sign) { 191 + val = (Elf_Addr)(((long) val) >> shift); 192 + min = -(1L << (bits - 1)); 193 + max = (1L << (bits - 1)) - 1; 194 + if ((long) val < min || (long) val > max) 195 + return -ENOEXEC; 196 + } else { 197 + val >>= shift; 198 + umax = ((1UL << (bits - 1)) << 1) - 1; 199 + if ((unsigned long) val > umax) 200 + return -ENOEXEC; 201 + } 202 + 203 + if (bits == 8) 204 + *(unsigned char *) loc = val; 205 + else if (bits == 12) 206 + *(unsigned short *) loc = (val & 0xfff) | 207 + (*(unsigned short *) loc & 0xf000); 208 + else if (bits == 16) 209 + *(unsigned short *) loc = val; 210 + else if (bits == 20) 211 + *(unsigned int *) loc = (val & 0xfff) << 16 | 212 + (val & 0xff000) >> 4 | 213 + (*(unsigned int *) loc & 0xf00000ff); 214 + else if (bits == 32) 215 + *(unsigned int *) loc = val; 216 + else if (bits == 64) 217 + *(unsigned long *) loc = val; 218 + return 0; 219 + } 220 + 221 + static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, 222 + const char *strtab, struct module *me) 183 223 { 184 224 struct mod_arch_syminfo *info; 185 225 Elf_Addr loc, val; 186 226 int r_type, r_sym; 227 + int rc; 187 228 188 229 /* This is where to make the change */ 189 230 loc = base + rela->r_offset; ··· 234 197 val = symtab[r_sym].st_value; 235 198 236 199 switch (r_type) { 200 + case R_390_NONE: /* No relocation. */ 201 + rc = 0; 202 + break; 237 203 case R_390_8: /* Direct 8 bit. */ 238 204 case R_390_12: /* Direct 12 bit. */ 239 205 case R_390_16: /* Direct 16 bit. */ ··· 245 205 case R_390_64: /* Direct 64 bit. */ 246 206 val += rela->r_addend; 247 207 if (r_type == R_390_8) 248 - *(unsigned char *) loc = val; 208 + rc = apply_rela_bits(loc, val, 0, 8, 0); 249 209 else if (r_type == R_390_12) 250 - *(unsigned short *) loc = (val & 0xfff) | 251 - (*(unsigned short *) loc & 0xf000); 210 + rc = apply_rela_bits(loc, val, 0, 12, 0); 252 211 else if (r_type == R_390_16) 253 - *(unsigned short *) loc = val; 212 + rc = apply_rela_bits(loc, val, 0, 16, 0); 254 213 else if (r_type == R_390_20) 255 - *(unsigned int *) loc = 256 - (*(unsigned int *) loc & 0xf00000ff) | 257 - (val & 0xfff) << 16 | (val & 0xff000) >> 4; 214 + rc = apply_rela_bits(loc, val, 1, 20, 0); 258 215 else if (r_type == R_390_32) 259 - *(unsigned int *) loc = val; 216 + rc = apply_rela_bits(loc, val, 0, 32, 0); 260 217 else if (r_type == R_390_64) 261 - *(unsigned long *) loc = val; 218 + rc = apply_rela_bits(loc, val, 0, 64, 0); 262 219 break; 263 220 case R_390_PC16: /* PC relative 16 bit. */ 264 221 case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */ ··· 264 227 case R_390_PC64: /* PC relative 64 bit. */ 265 228 val += rela->r_addend - loc; 266 229 if (r_type == R_390_PC16) 267 - *(unsigned short *) loc = val; 230 + rc = apply_rela_bits(loc, val, 1, 16, 0); 268 231 else if (r_type == R_390_PC16DBL) 269 - *(unsigned short *) loc = val >> 1; 232 + rc = apply_rela_bits(loc, val, 1, 16, 1); 270 233 else if (r_type == R_390_PC32DBL) 271 - *(unsigned int *) loc = val >> 1; 234 + rc = apply_rela_bits(loc, val, 1, 32, 1); 272 235 else if (r_type == R_390_PC32) 273 - *(unsigned int *) loc = val; 236 + rc = apply_rela_bits(loc, val, 1, 32, 0); 274 237 else if (r_type == R_390_PC64) 275 - *(unsigned long *) loc = val; 238 + rc = apply_rela_bits(loc, val, 1, 64, 0); 276 239 break; 277 240 case R_390_GOT12: /* 12 bit GOT offset. */ 278 241 case R_390_GOT16: /* 16 bit GOT offset. */ ··· 297 260 val = info->got_offset + rela->r_addend; 298 261 if (r_type == R_390_GOT12 || 299 262 r_type == R_390_GOTPLT12) 300 - *(unsigned short *) loc = (val & 0xfff) | 301 - (*(unsigned short *) loc & 0xf000); 263 + rc = apply_rela_bits(loc, val, 0, 12, 0); 302 264 else if (r_type == R_390_GOT16 || 303 265 r_type == R_390_GOTPLT16) 304 - *(unsigned short *) loc = val; 266 + rc = apply_rela_bits(loc, val, 0, 16, 0); 305 267 else if (r_type == R_390_GOT20 || 306 268 r_type == R_390_GOTPLT20) 307 - *(unsigned int *) loc = 308 - (*(unsigned int *) loc & 0xf00000ff) | 309 - (val & 0xfff) << 16 | (val & 0xff000) >> 4; 269 + rc = apply_rela_bits(loc, val, 1, 20, 0); 310 270 else if (r_type == R_390_GOT32 || 311 271 r_type == R_390_GOTPLT32) 312 - *(unsigned int *) loc = val; 313 - else if (r_type == R_390_GOTENT || 314 - r_type == R_390_GOTPLTENT) 315 - *(unsigned int *) loc = 316 - (val + (Elf_Addr) me->module_core - loc) >> 1; 272 + rc = apply_rela_bits(loc, val, 0, 32, 0); 317 273 else if (r_type == R_390_GOT64 || 318 274 r_type == R_390_GOTPLT64) 319 - *(unsigned long *) loc = val; 275 + rc = apply_rela_bits(loc, val, 0, 64, 0); 276 + else if (r_type == R_390_GOTENT || 277 + r_type == R_390_GOTPLTENT) { 278 + val += (Elf_Addr) me->module_core - loc; 279 + rc = apply_rela_bits(loc, val, 1, 32, 1); 280 + } 320 281 break; 321 282 case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */ 322 283 case R_390_PLT32DBL: /* 32 bit PC rel. PLT shifted by 1. */ ··· 356 321 val += rela->r_addend - loc; 357 322 } 358 323 if (r_type == R_390_PLT16DBL) 359 - *(unsigned short *) loc = val >> 1; 324 + rc = apply_rela_bits(loc, val, 1, 16, 1); 360 325 else if (r_type == R_390_PLTOFF16) 361 - *(unsigned short *) loc = val; 326 + rc = apply_rela_bits(loc, val, 0, 16, 0); 362 327 else if (r_type == R_390_PLT32DBL) 363 - *(unsigned int *) loc = val >> 1; 328 + rc = apply_rela_bits(loc, val, 1, 32, 1); 364 329 else if (r_type == R_390_PLT32 || 365 330 r_type == R_390_PLTOFF32) 366 - *(unsigned int *) loc = val; 331 + rc = apply_rela_bits(loc, val, 0, 32, 0); 367 332 else if (r_type == R_390_PLT64 || 368 333 r_type == R_390_PLTOFF64) 369 - *(unsigned long *) loc = val; 334 + rc = apply_rela_bits(loc, val, 0, 64, 0); 370 335 break; 371 336 case R_390_GOTOFF16: /* 16 bit offset to GOT. */ 372 337 case R_390_GOTOFF32: /* 32 bit offset to GOT. */ ··· 374 339 val = val + rela->r_addend - 375 340 ((Elf_Addr) me->module_core + me->arch.got_offset); 376 341 if (r_type == R_390_GOTOFF16) 377 - *(unsigned short *) loc = val; 342 + rc = apply_rela_bits(loc, val, 0, 16, 0); 378 343 else if (r_type == R_390_GOTOFF32) 379 - *(unsigned int *) loc = val; 344 + rc = apply_rela_bits(loc, val, 0, 32, 0); 380 345 else if (r_type == R_390_GOTOFF64) 381 - *(unsigned long *) loc = val; 346 + rc = apply_rela_bits(loc, val, 0, 64, 0); 382 347 break; 383 348 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */ 384 349 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */ 385 350 val = (Elf_Addr) me->module_core + me->arch.got_offset + 386 351 rela->r_addend - loc; 387 352 if (r_type == R_390_GOTPC) 388 - *(unsigned int *) loc = val; 353 + rc = apply_rela_bits(loc, val, 1, 32, 0); 389 354 else if (r_type == R_390_GOTPCDBL) 390 - *(unsigned int *) loc = val >> 1; 355 + rc = apply_rela_bits(loc, val, 1, 32, 1); 391 356 break; 392 357 case R_390_COPY: 393 358 case R_390_GLOB_DAT: /* Create GOT entry. */ ··· 395 360 case R_390_RELATIVE: /* Adjust by program base. */ 396 361 /* Only needed if we want to support loading of 397 362 modules linked with -shared. */ 398 - break; 363 + return -ENOEXEC; 399 364 default: 400 - printk(KERN_ERR "module %s: Unknown relocation: %u\n", 365 + printk(KERN_ERR "module %s: unknown relocation: %u\n", 401 366 me->name, r_type); 402 367 return -ENOEXEC; 368 + } 369 + if (rc) { 370 + printk(KERN_ERR "module %s: relocation error for symbol %s " 371 + "(r_type %i, value 0x%lx)\n", 372 + me->name, strtab + symtab[r_sym].st_name, 373 + r_type, (unsigned long) val); 374 + return rc; 403 375 } 404 376 return 0; 405 377 } 406 378 407 - int 408 - apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, 409 - unsigned int symindex, unsigned int relsec, 410 - struct module *me) 379 + int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, 380 + unsigned int symindex, unsigned int relsec, 381 + struct module *me) 411 382 { 412 383 Elf_Addr base; 413 384 Elf_Sym *symtab; ··· 429 388 n = sechdrs[relsec].sh_size / sizeof(Elf_Rela); 430 389 431 390 for (i = 0; i < n; i++, rela++) { 432 - rc = apply_rela(rela, base, symtab, me); 391 + rc = apply_rela(rela, base, symtab, strtab, me); 433 392 if (rc) 434 393 return rc; 435 394 }
+1 -1
arch/s390/kernel/nmi.c
··· 293 293 * retry this instruction. 294 294 */ 295 295 spin_lock(&ipd_lock); 296 - tmp = get_clock(); 296 + tmp = get_tod_clock(); 297 297 if (((tmp - last_ipd) >> 12) < MAX_IPD_TIME) 298 298 ipd_count++; 299 299 else
+6 -7
arch/s390/kernel/perf_cpum_cf.c
··· 367 367 if (ev >= PERF_CPUM_CF_MAX_CTR) 368 368 return -EINVAL; 369 369 370 - /* The CPU measurement counter facility does not have any interrupts 371 - * to do sampling. Sampling must be provided by external means, 372 - * for example, by timers. 373 - */ 374 - if (hwc->sample_period) 375 - return -EINVAL; 376 - 377 370 /* Use the hardware perf event structure to store the counter number 378 371 * in 'config' member and the counter set to which the counter belongs 379 372 * in the 'config_base'. The counter set (config_base) is then used ··· 411 418 case PERF_TYPE_HARDWARE: 412 419 case PERF_TYPE_HW_CACHE: 413 420 case PERF_TYPE_RAW: 421 + /* The CPU measurement counter facility does not have overflow 422 + * interrupts to do sampling. Sampling must be provided by 423 + * external means, for example, by timers. 424 + */ 425 + if (is_sampling_event(event)) 426 + return -ENOENT; 414 427 err = __hw_perf_event_init(event); 415 428 break; 416 429 default:
+5 -5
arch/s390/kernel/smp.c
··· 365 365 u64 end; 366 366 int cpu; 367 367 368 - end = get_clock() + (1000000UL << 12); 368 + end = get_tod_clock() + (1000000UL << 12); 369 369 for_each_cpu(cpu, cpumask) { 370 370 struct pcpu *pcpu = pcpu_devices + cpu; 371 371 set_bit(ec_stop_cpu, &pcpu->ec_mask); 372 372 while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL, 373 373 0, NULL) == SIGP_CC_BUSY && 374 - get_clock() < end) 374 + get_tod_clock() < end) 375 375 cpu_relax(); 376 376 } 377 - while (get_clock() < end) { 377 + while (get_tod_clock() < end) { 378 378 for_each_cpu(cpu, cpumask) 379 379 if (pcpu_stopped(pcpu_devices + cpu)) 380 380 cpumask_clear_cpu(cpu, cpumask); ··· 694 694 */ 695 695 static void __cpuinit smp_start_secondary(void *cpuvoid) 696 696 { 697 - S390_lowcore.last_update_clock = get_clock(); 697 + S390_lowcore.last_update_clock = get_tod_clock(); 698 698 S390_lowcore.restart_stack = (unsigned long) restart_stack; 699 699 S390_lowcore.restart_fn = (unsigned long) do_restart; 700 700 S390_lowcore.restart_data = 0; ··· 947 947 unsigned int sequence; 948 948 949 949 do { 950 - now = get_clock(); 950 + now = get_tod_clock(); 951 951 sequence = ACCESS_ONCE(idle->sequence); 952 952 idle_time = ACCESS_ONCE(idle->idle_time); 953 953 idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
+13 -13
arch/s390/kernel/time.c
··· 63 63 */ 64 64 unsigned long long notrace __kprobes sched_clock(void) 65 65 { 66 - return tod_to_ns(get_clock_monotonic()); 66 + return tod_to_ns(get_tod_clock_monotonic()); 67 67 } 68 68 69 69 /* ··· 194 194 195 195 void read_persistent_clock(struct timespec *ts) 196 196 { 197 - tod_to_timeval(get_clock() - TOD_UNIX_EPOCH, ts); 197 + tod_to_timeval(get_tod_clock() - TOD_UNIX_EPOCH, ts); 198 198 } 199 199 200 200 void read_boot_clock(struct timespec *ts) ··· 204 204 205 205 static cycle_t read_tod_clock(struct clocksource *cs) 206 206 { 207 - return get_clock(); 207 + return get_tod_clock(); 208 208 } 209 209 210 210 static struct clocksource clocksource_tod = { ··· 342 342 343 343 sw_ptr = &get_cpu_var(clock_sync_word); 344 344 sw0 = atomic_read(sw_ptr); 345 - *clock = get_clock(); 345 + *clock = get_tod_clock(); 346 346 sw1 = atomic_read(sw_ptr); 347 347 put_cpu_var(clock_sync_word); 348 348 if (sw0 == sw1 && (sw0 & 0x80000000U)) ··· 486 486 .p0 = 0, .p1 = 0, ._pad1 = 0, .ea = 0, 487 487 .es = 0, .sl = 0 }; 488 488 if (etr_setr(&etr_eacr) == 0) { 489 - etr_tolec = get_clock(); 489 + etr_tolec = get_tod_clock(); 490 490 set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags); 491 491 if (etr_port0_online && etr_port1_online) 492 492 set_bit(CLOCK_SYNC_ETR, &clock_sync_flags); ··· 768 768 __ctl_set_bit(14, 21); 769 769 __ctl_set_bit(0, 29); 770 770 clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32; 771 - old_clock = get_clock(); 772 - if (set_clock(clock) == 0) { 771 + old_clock = get_tod_clock(); 772 + if (set_tod_clock(clock) == 0) { 773 773 __udelay(1); /* Wait for the clock to start. */ 774 774 __ctl_clear_bit(0, 29); 775 775 __ctl_clear_bit(14, 21); ··· 845 845 * assume that this can have caused an stepping 846 846 * port switch. 847 847 */ 848 - etr_tolec = get_clock(); 848 + etr_tolec = get_tod_clock(); 849 849 eacr.p0 = etr_port0_online; 850 850 if (!eacr.p0) 851 851 eacr.e0 = 0; ··· 858 858 * assume that this can have caused an stepping 859 859 * port switch. 860 860 */ 861 - etr_tolec = get_clock(); 861 + etr_tolec = get_tod_clock(); 862 862 eacr.p1 = etr_port1_online; 863 863 if (!eacr.p1) 864 864 eacr.e1 = 0; ··· 974 974 etr_eacr = eacr; 975 975 etr_setr(&etr_eacr); 976 976 if (dp_changed) 977 - etr_tolec = get_clock(); 977 + etr_tolec = get_tod_clock(); 978 978 } 979 979 980 980 /* ··· 1012 1012 /* Store aib to get the current ETR status word. */ 1013 1013 BUG_ON(etr_stetr(&aib) != 0); 1014 1014 etr_port0.esw = etr_port1.esw = aib.esw; /* Copy status word. */ 1015 - now = get_clock(); 1015 + now = get_tod_clock(); 1016 1016 1017 1017 /* 1018 1018 * Update the port information if the last stepping port change ··· 1537 1537 if (stp_info.todoff[0] || stp_info.todoff[1] || 1538 1538 stp_info.todoff[2] || stp_info.todoff[3] || 1539 1539 stp_info.tmd != 2) { 1540 - old_clock = get_clock(); 1540 + old_clock = get_tod_clock(); 1541 1541 rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0); 1542 1542 if (rc == 0) { 1543 - delta = adjust_time(old_clock, get_clock(), 0); 1543 + delta = adjust_time(old_clock, get_tod_clock(), 0); 1544 1544 fixup_clock_comparator(delta); 1545 1545 rc = chsc_sstpi(stp_page, &stp_info, 1546 1546 sizeof(struct stp_sstpi));
+4
arch/s390/kernel/vmlinux.lds.S
··· 75 75 EXIT_TEXT 76 76 } 77 77 78 + .exit.data : { 79 + EXIT_DATA 80 + } 81 + 78 82 /* early.c uses stsi, which requires page aligned data. */ 79 83 . = ALIGN(PAGE_SIZE); 80 84 INIT_DATA_SECTION(0x100)
+1 -1
arch/s390/kernel/vtime.c
··· 191 191 unsigned int sequence; 192 192 193 193 do { 194 - now = get_clock(); 194 + now = get_tod_clock(); 195 195 sequence = ACCESS_ONCE(idle->sequence); 196 196 idle_enter = ACCESS_ONCE(idle->clock_idle_enter); 197 197 idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
+3 -3
arch/s390/kvm/interrupt.c
··· 362 362 } 363 363 364 364 if ((!rc) && (vcpu->arch.sie_block->ckc < 365 - get_clock() + vcpu->arch.sie_block->epoch)) { 365 + get_tod_clock() + vcpu->arch.sie_block->epoch)) { 366 366 if ((!psw_extint_disabled(vcpu)) && 367 367 (vcpu->arch.sie_block->gcr[0] & 0x800ul)) 368 368 rc = 1; ··· 402 402 goto no_timer; 403 403 } 404 404 405 - now = get_clock() + vcpu->arch.sie_block->epoch; 405 + now = get_tod_clock() + vcpu->arch.sie_block->epoch; 406 406 if (vcpu->arch.sie_block->ckc < now) { 407 407 __unset_cpu_idle(vcpu); 408 408 return 0; ··· 492 492 } 493 493 494 494 if ((vcpu->arch.sie_block->ckc < 495 - get_clock() + vcpu->arch.sie_block->epoch)) 495 + get_tod_clock() + vcpu->arch.sie_block->epoch)) 496 496 __try_deliver_ckc_interrupt(vcpu); 497 497 498 498 if (atomic_read(&fi->active)) {
+1 -1
arch/s390/kvm/kvm-s390.c
··· 147 147 r = KVM_MAX_VCPUS; 148 148 break; 149 149 case KVM_CAP_S390_COW: 150 - r = sclp_get_fac85() & 0x2; 150 + r = MACHINE_HAS_ESOP; 151 151 break; 152 152 default: 153 153 r = 0;
+8 -8
arch/s390/lib/delay.c
··· 32 32 unsigned long cr0, cr6, new; 33 33 u64 clock_saved, end; 34 34 35 - end = get_clock() + (usecs << 12); 35 + end = get_tod_clock() + (usecs << 12); 36 36 clock_saved = local_tick_disable(); 37 37 __ctl_store(cr0, 0, 0); 38 38 __ctl_store(cr6, 6, 6); ··· 45 45 set_clock_comparator(end); 46 46 vtime_stop_cpu(); 47 47 local_irq_disable(); 48 - } while (get_clock() < end); 48 + } while (get_tod_clock() < end); 49 49 lockdep_on(); 50 50 __ctl_load(cr0, 0, 0); 51 51 __ctl_load(cr6, 6, 6); ··· 56 56 { 57 57 u64 clock_saved, end; 58 58 59 - end = get_clock() + (usecs << 12); 59 + end = get_tod_clock() + (usecs << 12); 60 60 do { 61 61 clock_saved = 0; 62 62 if (end < S390_lowcore.clock_comparator) { ··· 67 67 local_irq_disable(); 68 68 if (clock_saved) 69 69 local_tick_enable(clock_saved); 70 - } while (get_clock() < end); 70 + } while (get_tod_clock() < end); 71 71 } 72 72 73 73 /* ··· 111 111 { 112 112 u64 end; 113 113 114 - end = get_clock() + (usecs << 12); 115 - while (get_clock() < end) 114 + end = get_tod_clock() + (usecs << 12); 115 + while (get_tod_clock() < end) 116 116 cpu_relax(); 117 117 } 118 118 ··· 122 122 123 123 nsecs <<= 9; 124 124 do_div(nsecs, 125); 125 - end = get_clock() + nsecs; 125 + end = get_tod_clock() + nsecs; 126 126 if (nsecs & ~0xfffUL) 127 127 __udelay(nsecs >> 12); 128 - while (get_clock() < end) 128 + while (get_tod_clock() < end) 129 129 barrier(); 130 130 } 131 131 EXPORT_SYMBOL(__ndelay);
+1 -1
arch/s390/lib/uaccess_pt.c
··· 50 50 ptep = pte_offset_map(pmd, addr); 51 51 if (!pte_present(*ptep)) 52 52 return -0x11UL; 53 - if (write && !pte_write(*ptep)) 53 + if (write && (!pte_write(*ptep) || !pte_dirty(*ptep))) 54 54 return -0x04UL; 55 55 56 56 return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK);
+6 -3
arch/s390/mm/mmap.c
··· 101 101 102 102 #else 103 103 104 - int s390_mmap_check(unsigned long addr, unsigned long len) 104 + int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) 105 105 { 106 106 int rc; 107 107 108 - if (!is_compat_task() && 109 - len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) { 108 + if (is_compat_task() || (TASK_SIZE >= (1UL << 53))) 109 + return 0; 110 + if (!(flags & MAP_FIXED)) 111 + addr = 0; 112 + if ((addr + len) >= TASK_SIZE) { 110 113 rc = crst_table_upgrade(current->mm, 1UL << 53); 111 114 if (rc) 112 115 return rc;
+1 -1
arch/s390/mm/pageattr.c
··· 127 127 pte_val(*pte) = _PAGE_TYPE_EMPTY; 128 128 continue; 129 129 } 130 - *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW)); 130 + pte_val(*pte) = __pa(address); 131 131 } 132 132 } 133 133
+10 -14
arch/s390/mm/vmem.c
··· 85 85 pud_t *pu_dir; 86 86 pmd_t *pm_dir; 87 87 pte_t *pt_dir; 88 - pte_t pte; 89 88 int ret = -ENOMEM; 90 89 91 90 while (address < end) { 92 - pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0)); 93 91 pg_dir = pgd_offset_k(address); 94 92 if (pgd_none(*pg_dir)) { 95 93 pu_dir = vmem_pud_alloc(); ··· 99 101 #if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC) 100 102 if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address && 101 103 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) { 102 - pte_val(pte) |= _REGION3_ENTRY_LARGE; 103 - pte_val(pte) |= _REGION_ENTRY_TYPE_R3; 104 - pud_val(*pu_dir) = pte_val(pte); 104 + pud_val(*pu_dir) = __pa(address) | 105 + _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE | 106 + (ro ? _REGION_ENTRY_RO : 0); 105 107 address += PUD_SIZE; 106 108 continue; 107 109 } ··· 116 118 #if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC) 117 119 if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address && 118 120 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) { 119 - pte_val(pte) |= _SEGMENT_ENTRY_LARGE; 120 - pmd_val(*pm_dir) = pte_val(pte); 121 + pmd_val(*pm_dir) = __pa(address) | 122 + _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE | 123 + (ro ? _SEGMENT_ENTRY_RO : 0); 121 124 address += PMD_SIZE; 122 125 continue; 123 126 } ··· 131 132 } 132 133 133 134 pt_dir = pte_offset_kernel(pm_dir, address); 134 - *pt_dir = pte; 135 + pte_val(*pt_dir) = __pa(address) | (ro ? _PAGE_RO : 0); 135 136 address += PAGE_SIZE; 136 137 } 137 138 ret = 0; ··· 198 199 pud_t *pu_dir; 199 200 pmd_t *pm_dir; 200 201 pte_t *pt_dir; 201 - pte_t pte; 202 202 int ret = -ENOMEM; 203 203 204 204 start_addr = (unsigned long) start; ··· 235 237 new_page = vmemmap_alloc_block(PMD_SIZE, node); 236 238 if (!new_page) 237 239 goto out; 238 - pte = mk_pte_phys(__pa(new_page), PAGE_RW); 239 - pte_val(pte) |= _SEGMENT_ENTRY_LARGE; 240 - pmd_val(*pm_dir) = pte_val(pte); 240 + pmd_val(*pm_dir) = __pa(new_page) | 241 + _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE; 241 242 address = (address + PMD_SIZE) & PMD_MASK; 242 243 continue; 243 244 } ··· 257 260 new_page =__pa(vmem_alloc_pages(0)); 258 261 if (!new_page) 259 262 goto out; 260 - pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); 261 - *pt_dir = pte; 263 + pte_val(*pt_dir) = __pa(new_page); 262 264 } 263 265 address += PAGE_SIZE; 264 266 }
+21
arch/s390/net/bpf_jit_comp.c
··· 7 7 */ 8 8 #include <linux/moduleloader.h> 9 9 #include <linux/netdevice.h> 10 + #include <linux/if_vlan.h> 10 11 #include <linux/filter.h> 11 12 #include <asm/cacheflush.h> 12 13 #include <asm/processor.h> ··· 255 254 case BPF_S_ANC_HATYPE: 256 255 case BPF_S_ANC_RXHASH: 257 256 case BPF_S_ANC_CPU: 257 + case BPF_S_ANC_VLAN_TAG: 258 + case BPF_S_ANC_VLAN_TAG_PRESENT: 258 259 case BPF_S_RET_K: 259 260 /* first instruction sets A register */ 260 261 break; ··· 701 698 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4); 702 699 /* l %r5,<d(rxhash)>(%r2) */ 703 700 EMIT4_DISP(0x58502000, offsetof(struct sk_buff, rxhash)); 701 + break; 702 + case BPF_S_ANC_VLAN_TAG: 703 + case BPF_S_ANC_VLAN_TAG_PRESENT: 704 + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); 705 + BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000); 706 + /* lhi %r5,0 */ 707 + EMIT4(0xa7580000); 708 + /* icm %r5,3,<d(vlan_tci)>(%r2) */ 709 + EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, vlan_tci)); 710 + if (filter->code == BPF_S_ANC_VLAN_TAG) { 711 + /* nill %r5,0xefff */ 712 + EMIT4_IMM(0xa5570000, ~VLAN_TAG_PRESENT); 713 + } else { 714 + /* nill %r5,0x1000 */ 715 + EMIT4_IMM(0xa5570000, VLAN_TAG_PRESENT); 716 + /* srl %r5,12 */ 717 + EMIT4_DISP(0x88500000, 12); 718 + } 704 719 break; 705 720 case BPF_S_ANC_CPU: /* A = smp_processor_id() */ 706 721 #ifdef CONFIG_SMP
+25 -10
arch/s390/pci/pci.c
··· 51 51 DEFINE_MUTEX(zpci_list_lock); 52 52 EXPORT_SYMBOL_GPL(zpci_list_lock); 53 53 54 - struct pci_hp_callback_ops hotplug_ops; 55 - EXPORT_SYMBOL_GPL(hotplug_ops); 54 + static struct pci_hp_callback_ops *hotplug_ops; 56 55 57 56 static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES); 58 57 static DEFINE_SPINLOCK(zpci_domain_lock); ··· 973 974 974 975 mutex_lock(&zpci_list_lock); 975 976 list_add_tail(&zdev->entry, &zpci_list); 976 - if (hotplug_ops.create_slot) 977 - hotplug_ops.create_slot(zdev); 977 + if (hotplug_ops) 978 + hotplug_ops->create_slot(zdev); 978 979 mutex_unlock(&zpci_list_lock); 979 980 980 981 if (zdev->state == ZPCI_FN_STATE_STANDBY) ··· 988 989 out_start: 989 990 mutex_lock(&zpci_list_lock); 990 991 list_del(&zdev->entry); 991 - if (hotplug_ops.remove_slot) 992 - hotplug_ops.remove_slot(zdev); 992 + if (hotplug_ops) 993 + hotplug_ops->remove_slot(zdev); 993 994 mutex_unlock(&zpci_list_lock); 994 995 out_bus: 995 996 zpci_free_domain(zdev); ··· 1071 1072 kmem_cache_destroy(zdev_fmb_cache); 1072 1073 } 1073 1074 1074 - unsigned int pci_probe = 1; 1075 - EXPORT_SYMBOL_GPL(pci_probe); 1075 + void zpci_register_hp_ops(struct pci_hp_callback_ops *ops) 1076 + { 1077 + mutex_lock(&zpci_list_lock); 1078 + hotplug_ops = ops; 1079 + mutex_unlock(&zpci_list_lock); 1080 + } 1081 + EXPORT_SYMBOL_GPL(zpci_register_hp_ops); 1082 + 1083 + void zpci_deregister_hp_ops(void) 1084 + { 1085 + mutex_lock(&zpci_list_lock); 1086 + hotplug_ops = NULL; 1087 + mutex_unlock(&zpci_list_lock); 1088 + } 1089 + EXPORT_SYMBOL_GPL(zpci_deregister_hp_ops); 1090 + 1091 + unsigned int s390_pci_probe = 1; 1092 + EXPORT_SYMBOL_GPL(s390_pci_probe); 1076 1093 1077 1094 char * __init pcibios_setup(char *str) 1078 1095 { 1079 1096 if (!strcmp(str, "off")) { 1080 - pci_probe = 0; 1097 + s390_pci_probe = 0; 1081 1098 return NULL; 1082 1099 } 1083 1100 return str; ··· 1103 1088 { 1104 1089 int rc; 1105 1090 1106 - if (!pci_probe) 1091 + if (!s390_pci_probe) 1107 1092 return 0; 1108 1093 1109 1094 if (!test_facility(2) || !test_facility(69)
+7 -7
arch/s390/pci/pci_clp.c
··· 19 19 * Call Logical Processor 20 20 * Retry logic is handled by the caller. 21 21 */ 22 - static inline u8 clp_instr(void *req) 22 + static inline u8 clp_instr(void *data) 23 23 { 24 - u64 ilpm; 24 + struct { u8 _[CLP_BLK_SIZE]; } *req = data; 25 + u64 ignored; 25 26 u8 cc; 26 27 27 28 asm volatile ( 28 - " .insn rrf,0xb9a00000,%[ilpm],%[req],0x0,0x2\n" 29 + " .insn rrf,0xb9a00000,%[ign],%[req],0x0,0x2\n" 29 30 " ipm %[cc]\n" 30 31 " srl %[cc],28\n" 31 - : [cc] "=d" (cc), [ilpm] "=d" (ilpm) 32 + : [cc] "=d" (cc), [ign] "=d" (ignored), "+m" (*req) 32 33 : [req] "a" (req) 33 - : "cc", "memory"); 34 + : "cc"); 34 35 return cc; 35 36 } 36 37 37 38 static void *clp_alloc_block(void) 38 39 { 39 - struct page *page = alloc_pages(GFP_KERNEL, get_order(CLP_BLK_SIZE)); 40 - return (page) ? page_address(page) : NULL; 40 + return (void *) __get_free_pages(GFP_KERNEL, get_order(CLP_BLK_SIZE)); 41 41 } 42 42 43 43 static void clp_free_block(void *ptr)
+1 -1
drivers/ata/Kconfig
··· 14 14 tristate "Serial ATA and Parallel ATA drivers" 15 15 depends on HAS_IOMEM 16 16 depends on BLOCK 17 - depends on !(M32R || M68K) || BROKEN 17 + depends on !(M32R || M68K || S390) || BROKEN 18 18 select SCSI 19 19 ---help--- 20 20 If you want to use a ATA hard disk, ATA tape drive, ATA CD-ROM or
+2 -2
drivers/gpio/Kconfig
··· 277 277 278 278 config GPIO_VX855 279 279 tristate "VIA VX855/VX875 GPIO" 280 - depends on PCI 280 + depends on PCI && GENERIC_HARDIRQS 281 281 select MFD_CORE 282 282 select MFD_VX855 283 283 help ··· 599 599 600 600 config GPIO_RDC321X 601 601 tristate "RDC R-321x GPIO support" 602 - depends on PCI 602 + depends on PCI && GENERIC_HARDIRQS 603 603 select MFD_CORE 604 604 select MFD_RDC321X 605 605 help
+1 -1
drivers/media/radio/Kconfig
··· 180 180 181 181 config RADIO_WL1273 182 182 tristate "Texas Instruments WL1273 I2C FM Radio" 183 - depends on I2C && VIDEO_V4L2 183 + depends on I2C && VIDEO_V4L2 && GENERIC_HARDIRQS 184 184 select MFD_CORE 185 185 select MFD_WL1273_CORE 186 186 select FW_LOADER
+1
drivers/net/ethernet/cadence/Kconfig
··· 22 22 23 23 config ARM_AT91_ETHER 24 24 tristate "AT91RM9200 Ethernet support" 25 + depends on GENERIC_HARDIRQS 25 26 select NET_CORE 26 27 select MACB 27 28 ---help---
-1
drivers/net/phy/Kconfig
··· 4 4 5 5 menuconfig PHYLIB 6 6 tristate "PHY Device support and infrastructure" 7 - depends on !S390 8 7 depends on NETDEVICES 9 8 help 10 9 Ethernet controllers are usually attached to PHY
+1 -1
drivers/parport/Kconfig
··· 35 35 36 36 config PARPORT_PC 37 37 tristate "PC-style hardware" 38 - depends on (!SPARC64 || PCI) && !SPARC32 && !M32R && !FRV && \ 38 + depends on (!SPARC64 || PCI) && !SPARC32 && !M32R && !FRV && !S390 && \ 39 39 (!M68K || ISA) && !MN10300 && !AVR32 && !BLACKFIN && !XTENSA 40 40 ---help--- 41 41 You should say Y here if you have a PC-style parallel port. All
+26 -34
drivers/pci/hotplug/s390_pci_hpc.c
··· 172 172 return -ENOMEM; 173 173 } 174 174 175 - static int __init init_pci_slots(void) 176 - { 177 - struct zpci_dev *zdev; 178 - int device = 0; 179 - 180 - /* 181 - * Create a structure for each slot, and register that slot 182 - * with the pci_hotplug subsystem. 183 - */ 184 - mutex_lock(&zpci_list_lock); 185 - list_for_each_entry(zdev, &zpci_list, entry) { 186 - init_pci_slot(zdev); 187 - device++; 188 - } 189 - 190 - mutex_unlock(&zpci_list_lock); 191 - return (device) ? 0 : -ENODEV; 192 - } 193 - 194 175 static void exit_pci_slot(struct zpci_dev *zdev) 195 176 { 196 177 struct list_head *tmp, *n; ··· 184 203 list_del(&slot->slot_list); 185 204 pci_hp_deregister(slot->hotplug_slot); 186 205 } 206 + } 207 + 208 + static struct pci_hp_callback_ops hp_ops = { 209 + .create_slot = init_pci_slot, 210 + .remove_slot = exit_pci_slot, 211 + }; 212 + 213 + static void __init init_pci_slots(void) 214 + { 215 + struct zpci_dev *zdev; 216 + 217 + /* 218 + * Create a structure for each slot, and register that slot 219 + * with the pci_hotplug subsystem. 220 + */ 221 + mutex_lock(&zpci_list_lock); 222 + list_for_each_entry(zdev, &zpci_list, entry) { 223 + init_pci_slot(zdev); 224 + } 225 + mutex_unlock(&zpci_list_lock); 187 226 } 188 227 189 228 static void __exit exit_pci_slots(void) ··· 225 224 226 225 static int __init pci_hotplug_s390_init(void) 227 226 { 228 - /* 229 - * Do specific initialization stuff for your driver here 230 - * like initializing your controller hardware (if any) and 231 - * determining the number of slots you have in the system 232 - * right now. 233 - */ 234 - 235 - if (!pci_probe) 227 + if (!s390_pci_probe) 236 228 return -EOPNOTSUPP; 237 229 238 - /* register callbacks for slot handling from arch code */ 239 - mutex_lock(&zpci_list_lock); 240 - hotplug_ops.create_slot = init_pci_slot; 241 - hotplug_ops.remove_slot = exit_pci_slot; 242 - mutex_unlock(&zpci_list_lock); 243 - pr_info("registered hotplug slot callbacks\n"); 244 - return init_pci_slots(); 230 + zpci_register_hp_ops(&hp_ops); 231 + init_pci_slots(); 232 + 233 + return 0; 245 234 } 246 235 247 236 static void __exit pci_hotplug_s390_exit(void) 248 237 { 249 238 exit_pci_slots(); 239 + zpci_deregister_hp_ops(); 250 240 } 251 241 252 242 module_init(pci_hotplug_s390_init);
+13 -10
drivers/s390/block/dasd.c
··· 1352 1352 switch (rc) { 1353 1353 case 0: /* termination successful */ 1354 1354 cqr->status = DASD_CQR_CLEAR_PENDING; 1355 - cqr->stopclk = get_clock(); 1355 + cqr->stopclk = get_tod_clock(); 1356 1356 cqr->starttime = 0; 1357 1357 DBF_DEV_EVENT(DBF_DEBUG, device, 1358 1358 "terminate cqr %p successful", ··· 1420 1420 cqr->status = DASD_CQR_ERROR; 1421 1421 return -EIO; 1422 1422 } 1423 - cqr->startclk = get_clock(); 1423 + cqr->startclk = get_tod_clock(); 1424 1424 cqr->starttime = jiffies; 1425 1425 cqr->retries--; 1426 1426 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { ··· 1623 1623 return; 1624 1624 } 1625 1625 1626 - now = get_clock(); 1626 + now = get_tod_clock(); 1627 1627 cqr = (struct dasd_ccw_req *) intparm; 1628 1628 /* check for conditions that should be handled immediately */ 1629 1629 if (!cqr || ··· 1963 1963 } 1964 1964 break; 1965 1965 case DASD_CQR_QUEUED: 1966 - cqr->stopclk = get_clock(); 1966 + cqr->stopclk = get_tod_clock(); 1967 1967 cqr->status = DASD_CQR_CLEARED; 1968 1968 break; 1969 1969 default: /* no need to modify the others */ ··· 2210 2210 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2211 2211 } 2212 2212 2213 - maincqr->endclk = get_clock(); 2213 + maincqr->endclk = get_tod_clock(); 2214 2214 if ((maincqr->status != DASD_CQR_DONE) && 2215 2215 (maincqr->intrc != -ERESTARTSYS)) 2216 2216 dasd_log_sense(maincqr, &maincqr->irb); ··· 2340 2340 "Cancelling request %p failed with rc=%d\n", 2341 2341 cqr, rc); 2342 2342 } else { 2343 - cqr->stopclk = get_clock(); 2343 + cqr->stopclk = get_tod_clock(); 2344 2344 } 2345 2345 break; 2346 2346 default: /* already finished or clear pending - do nothing */ ··· 2568 2568 } 2569 2569 2570 2570 /* Rechain finished requests to final queue */ 2571 - cqr->endclk = get_clock(); 2571 + cqr->endclk = get_tod_clock(); 2572 2572 list_move_tail(&cqr->blocklist, final_queue); 2573 2573 } 2574 2574 } ··· 2711 2711 } 2712 2712 /* call the callback function */ 2713 2713 spin_lock_irq(&block->request_queue_lock); 2714 - cqr->endclk = get_clock(); 2714 + cqr->endclk = get_tod_clock(); 2715 2715 list_del_init(&cqr->blocklist); 2716 2716 __dasd_cleanup_cqr(cqr); 2717 2717 spin_unlock_irq(&block->request_queue_lock); ··· 3042 3042 cdev->handler = NULL; 3043 3043 3044 3044 device = dasd_device_from_cdev(cdev); 3045 - if (IS_ERR(device)) 3045 + if (IS_ERR(device)) { 3046 + dasd_remove_sysfs_files(cdev); 3046 3047 return; 3048 + } 3047 3049 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) && 3048 3050 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3049 3051 /* Already doing offline processing */ 3050 3052 dasd_put_device(device); 3053 + dasd_remove_sysfs_files(cdev); 3051 3054 return; 3052 3055 } 3053 3056 /* ··· 3507 3504 cqr->memdev = device; 3508 3505 cqr->expires = 10*HZ; 3509 3506 cqr->retries = 256; 3510 - cqr->buildclk = get_clock(); 3507 + cqr->buildclk = get_tod_clock(); 3511 3508 cqr->status = DASD_CQR_FILLED; 3512 3509 return cqr; 3513 3510 }
+4 -4
drivers/s390/block/dasd_3990_erp.c
··· 229 229 dctl_cqr->expires = 5 * 60 * HZ; 230 230 dctl_cqr->retries = 2; 231 231 232 - dctl_cqr->buildclk = get_clock(); 232 + dctl_cqr->buildclk = get_tod_clock(); 233 233 234 234 dctl_cqr->status = DASD_CQR_FILLED; 235 235 ··· 1719 1719 erp->magic = default_erp->magic; 1720 1720 erp->expires = default_erp->expires; 1721 1721 erp->retries = 256; 1722 - erp->buildclk = get_clock(); 1722 + erp->buildclk = get_tod_clock(); 1723 1723 erp->status = DASD_CQR_FILLED; 1724 1724 1725 1725 /* remove the default erp */ ··· 2322 2322 DBF_DEV_EVENT(DBF_ERR, device, "%s", 2323 2323 "Unable to allocate ERP request"); 2324 2324 cqr->status = DASD_CQR_FAILED; 2325 - cqr->stopclk = get_clock (); 2325 + cqr->stopclk = get_tod_clock(); 2326 2326 } else { 2327 2327 DBF_DEV_EVENT(DBF_ERR, device, 2328 2328 "Unable to allocate ERP request " ··· 2364 2364 erp->magic = cqr->magic; 2365 2365 erp->expires = cqr->expires; 2366 2366 erp->retries = 256; 2367 - erp->buildclk = get_clock(); 2367 + erp->buildclk = get_tod_clock(); 2368 2368 erp->status = DASD_CQR_FILLED; 2369 2369 2370 2370 return erp;
+2 -2
drivers/s390/block/dasd_alias.c
··· 448 448 ccw->count = sizeof(*(lcu->uac)); 449 449 ccw->cda = (__u32)(addr_t) lcu->uac; 450 450 451 - cqr->buildclk = get_clock(); 451 + cqr->buildclk = get_tod_clock(); 452 452 cqr->status = DASD_CQR_FILLED; 453 453 454 454 /* need to unset flag here to detect race with summary unit check */ ··· 733 733 cqr->memdev = device; 734 734 cqr->block = NULL; 735 735 cqr->expires = 5 * HZ; 736 - cqr->buildclk = get_clock(); 736 + cqr->buildclk = get_tod_clock(); 737 737 cqr->status = DASD_CQR_FILLED; 738 738 739 739 rc = dasd_sleep_on_immediatly(cqr);
+5 -5
drivers/s390/block/dasd_diag.c
··· 184 184 private->iob.bio_list = dreq->bio; 185 185 private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT; 186 186 187 - cqr->startclk = get_clock(); 187 + cqr->startclk = get_tod_clock(); 188 188 cqr->starttime = jiffies; 189 189 cqr->retries--; 190 190 191 191 rc = dia250(&private->iob, RW_BIO); 192 192 switch (rc) { 193 193 case 0: /* Synchronous I/O finished successfully */ 194 - cqr->stopclk = get_clock(); 194 + cqr->stopclk = get_tod_clock(); 195 195 cqr->status = DASD_CQR_SUCCESS; 196 196 /* Indicate to calling function that only a dasd_schedule_bh() 197 197 and no timer is needed */ ··· 222 222 mdsk_term_io(device); 223 223 mdsk_init_io(device, device->block->bp_block, 0, NULL); 224 224 cqr->status = DASD_CQR_CLEAR_PENDING; 225 - cqr->stopclk = get_clock(); 225 + cqr->stopclk = get_tod_clock(); 226 226 dasd_schedule_device_bh(device); 227 227 return 0; 228 228 } ··· 276 276 return; 277 277 } 278 278 279 - cqr->stopclk = get_clock(); 279 + cqr->stopclk = get_tod_clock(); 280 280 281 281 expires = 0; 282 282 if ((ext_code.subcode & 0xff) == 0) { ··· 556 556 } 557 557 } 558 558 cqr->retries = DIAG_MAX_RETRIES; 559 - cqr->buildclk = get_clock(); 559 + cqr->buildclk = get_tod_clock(); 560 560 if (blk_noretry_request(req) || 561 561 block->base->features & DASD_FEATURE_FAILFAST) 562 562 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+15 -15
drivers/s390/block/dasd_eckd.c
··· 862 862 cqr->expires = 10*HZ; 863 863 cqr->lpm = lpm; 864 864 cqr->retries = 256; 865 - cqr->buildclk = get_clock(); 865 + cqr->buildclk = get_tod_clock(); 866 866 cqr->status = DASD_CQR_FILLED; 867 867 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags); 868 868 } ··· 1449 1449 ccw->count = sizeof(struct dasd_rssd_features); 1450 1450 ccw->cda = (__u32)(addr_t) features; 1451 1451 1452 - cqr->buildclk = get_clock(); 1452 + cqr->buildclk = get_tod_clock(); 1453 1453 cqr->status = DASD_CQR_FILLED; 1454 1454 rc = dasd_sleep_on(cqr); 1455 1455 if (rc == 0) { ··· 1501 1501 cqr->block = NULL; 1502 1502 cqr->retries = 256; 1503 1503 cqr->expires = 10*HZ; 1504 - cqr->buildclk = get_clock(); 1504 + cqr->buildclk = get_tod_clock(); 1505 1505 cqr->status = DASD_CQR_FILLED; 1506 1506 return cqr; 1507 1507 } ··· 1841 1841 cqr->startdev = device; 1842 1842 cqr->memdev = device; 1843 1843 cqr->retries = 255; 1844 - cqr->buildclk = get_clock(); 1844 + cqr->buildclk = get_tod_clock(); 1845 1845 cqr->status = DASD_CQR_FILLED; 1846 1846 return cqr; 1847 1847 } ··· 2241 2241 fcp->startdev = device; 2242 2242 fcp->memdev = device; 2243 2243 fcp->retries = 256; 2244 - fcp->buildclk = get_clock(); 2244 + fcp->buildclk = get_tod_clock(); 2245 2245 fcp->status = DASD_CQR_FILLED; 2246 2246 return fcp; 2247 2247 } ··· 2530 2530 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 2531 2531 cqr->lpm = startdev->path_data.ppm; 2532 2532 cqr->retries = 256; 2533 - cqr->buildclk = get_clock(); 2533 + cqr->buildclk = get_tod_clock(); 2534 2534 cqr->status = DASD_CQR_FILLED; 2535 2535 return cqr; 2536 2536 } ··· 2705 2705 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 2706 2706 cqr->lpm = startdev->path_data.ppm; 2707 2707 cqr->retries = 256; 2708 - cqr->buildclk = get_clock(); 2708 + cqr->buildclk = get_tod_clock(); 2709 2709 cqr->status = DASD_CQR_FILLED; 2710 2710 return cqr; 2711 2711 } ··· 2998 2998 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 2999 2999 cqr->lpm = startdev->path_data.ppm; 3000 3000 cqr->retries = 256; 3001 - cqr->buildclk = get_clock(); 3001 + cqr->buildclk = get_tod_clock(); 3002 3002 cqr->status = DASD_CQR_FILLED; 3003 3003 return cqr; 3004 3004 out_error: ··· 3201 3201 cqr->expires = startdev->default_expires * HZ; 3202 3202 cqr->lpm = startdev->path_data.ppm; 3203 3203 cqr->retries = 256; 3204 - cqr->buildclk = get_clock(); 3204 + cqr->buildclk = get_tod_clock(); 3205 3205 cqr->status = DASD_CQR_FILLED; 3206 3206 3207 3207 if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN) ··· 3402 3402 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 3403 3403 cqr->retries = 2; /* set retry counter to enable basic ERP */ 3404 3404 cqr->expires = 2 * HZ; 3405 - cqr->buildclk = get_clock(); 3405 + cqr->buildclk = get_tod_clock(); 3406 3406 cqr->status = DASD_CQR_FILLED; 3407 3407 3408 3408 rc = dasd_sleep_on_immediatly(cqr); ··· 3457 3457 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 3458 3458 cqr->retries = 2; /* set retry counter to enable basic ERP */ 3459 3459 cqr->expires = 2 * HZ; 3460 - cqr->buildclk = get_clock(); 3460 + cqr->buildclk = get_tod_clock(); 3461 3461 cqr->status = DASD_CQR_FILLED; 3462 3462 3463 3463 rc = dasd_sleep_on_immediatly(cqr); ··· 3511 3511 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 3512 3512 cqr->retries = 2; /* set retry counter to enable basic ERP */ 3513 3513 cqr->expires = 2 * HZ; 3514 - cqr->buildclk = get_clock(); 3514 + cqr->buildclk = get_tod_clock(); 3515 3515 cqr->status = DASD_CQR_FILLED; 3516 3516 3517 3517 rc = dasd_sleep_on_immediatly(cqr); ··· 3572 3572 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags); 3573 3573 cqr->retries = 5; 3574 3574 cqr->expires = 10 * HZ; 3575 - cqr->buildclk = get_clock(); 3575 + cqr->buildclk = get_tod_clock(); 3576 3576 cqr->status = DASD_CQR_FILLED; 3577 3577 cqr->lpm = usrparm.path_mask; 3578 3578 ··· 3642 3642 ccw->count = sizeof(struct dasd_rssd_perf_stats_t); 3643 3643 ccw->cda = (__u32)(addr_t) stats; 3644 3644 3645 - cqr->buildclk = get_clock(); 3645 + cqr->buildclk = get_tod_clock(); 3646 3646 cqr->status = DASD_CQR_FILLED; 3647 3647 rc = dasd_sleep_on(cqr); 3648 3648 if (rc == 0) { ··· 3768 3768 cqr->memdev = device; 3769 3769 cqr->retries = 3; 3770 3770 cqr->expires = 10 * HZ; 3771 - cqr->buildclk = get_clock(); 3771 + cqr->buildclk = get_tod_clock(); 3772 3772 cqr->status = DASD_CQR_FILLED; 3773 3773 3774 3774 /* Build the ccws */
+1 -1
drivers/s390/block/dasd_eer.c
··· 481 481 ccw->flags = 0; 482 482 ccw->cda = (__u32)(addr_t) cqr->data; 483 483 484 - cqr->buildclk = get_clock(); 484 + cqr->buildclk = get_tod_clock(); 485 485 cqr->status = DASD_CQR_FILLED; 486 486 cqr->callback = dasd_eer_snss_cb; 487 487
+2 -2
drivers/s390/block/dasd_erp.c
··· 102 102 pr_err("%s: default ERP has run out of retries and failed\n", 103 103 dev_name(&device->cdev->dev)); 104 104 cqr->status = DASD_CQR_FAILED; 105 - cqr->stopclk = get_clock(); 105 + cqr->stopclk = get_tod_clock(); 106 106 } 107 107 return cqr; 108 108 } /* end dasd_default_erp_action */ ··· 146 146 cqr->status = DASD_CQR_DONE; 147 147 else { 148 148 cqr->status = DASD_CQR_FAILED; 149 - cqr->stopclk = get_clock(); 149 + cqr->stopclk = get_tod_clock(); 150 150 } 151 151 152 152 return cqr;
+1 -1
drivers/s390/block/dasd_fba.c
··· 370 370 cqr->block = block; 371 371 cqr->expires = memdev->default_expires * HZ; /* default 5 minutes */ 372 372 cqr->retries = 32; 373 - cqr->buildclk = get_clock(); 373 + cqr->buildclk = get_tod_clock(); 374 374 cqr->status = DASD_CQR_FILLED; 375 375 return cqr; 376 376 }
+28 -13
drivers/s390/block/scm_blk.h
··· 68 68 void scm_cluster_request_irq(struct scm_request *); 69 69 bool scm_test_cluster_request(struct scm_request *); 70 70 bool scm_cluster_size_valid(void); 71 - #else 72 - #define __scm_free_rq_cluster(scmrq) {} 73 - #define __scm_alloc_rq_cluster(scmrq) 0 74 - #define scm_request_cluster_init(scmrq) {} 75 - #define scm_reserve_cluster(scmrq) true 76 - #define scm_release_cluster(scmrq) {} 77 - #define scm_blk_dev_cluster_setup(bdev) {} 78 - #define scm_need_cluster_request(scmrq) false 79 - #define scm_initiate_cluster_request(scmrq) {} 80 - #define scm_cluster_request_irq(scmrq) {} 81 - #define scm_test_cluster_request(scmrq) false 82 - #define scm_cluster_size_valid() true 83 - #endif 71 + #else /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */ 72 + static inline void __scm_free_rq_cluster(struct scm_request *scmrq) {} 73 + static inline int __scm_alloc_rq_cluster(struct scm_request *scmrq) 74 + { 75 + return 0; 76 + } 77 + static inline void scm_request_cluster_init(struct scm_request *scmrq) {} 78 + static inline bool scm_reserve_cluster(struct scm_request *scmrq) 79 + { 80 + return true; 81 + } 82 + static inline void scm_release_cluster(struct scm_request *scmrq) {} 83 + static inline void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev) {} 84 + static inline bool scm_need_cluster_request(struct scm_request *scmrq) 85 + { 86 + return false; 87 + } 88 + static inline void scm_initiate_cluster_request(struct scm_request *scmrq) {} 89 + static inline void scm_cluster_request_irq(struct scm_request *scmrq) {} 90 + static inline bool scm_test_cluster_request(struct scm_request *scmrq) 91 + { 92 + return false; 93 + } 94 + static inline bool scm_cluster_size_valid(void) 95 + { 96 + return true; 97 + } 98 + #endif /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */ 84 99 85 100 extern debug_info_t *scm_debug; 86 101
+26 -3
drivers/s390/char/fs3270.c
··· 443 443 tty_kref_put(tty); 444 444 return -ENODEV; 445 445 } 446 - minor = tty->index + RAW3270_FIRSTMINOR; 446 + minor = tty->index; 447 447 tty_kref_put(tty); 448 448 } 449 449 mutex_lock(&fs3270_mutex); ··· 524 524 .llseek = no_llseek, 525 525 }; 526 526 527 + void fs3270_create_cb(int minor) 528 + { 529 + __register_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub", &fs3270_fops); 530 + device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, minor), 531 + NULL, "3270/tub%d", minor); 532 + } 533 + 534 + void fs3270_destroy_cb(int minor) 535 + { 536 + device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, minor)); 537 + __unregister_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub"); 538 + } 539 + 540 + struct raw3270_notifier fs3270_notifier = 541 + { 542 + .create = fs3270_create_cb, 543 + .destroy = fs3270_destroy_cb, 544 + }; 545 + 527 546 /* 528 547 * 3270 fullscreen driver initialization. 529 548 */ ··· 551 532 { 552 533 int rc; 553 534 554 - rc = register_chrdev(IBM_FS3270_MAJOR, "fs3270", &fs3270_fops); 535 + rc = __register_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270", &fs3270_fops); 555 536 if (rc) 556 537 return rc; 538 + device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, 0), 539 + NULL, "3270/tub"); 540 + raw3270_register_notifier(&fs3270_notifier); 557 541 return 0; 558 542 } 559 543 560 544 static void __exit 561 545 fs3270_exit(void) 562 546 { 563 - unregister_chrdev(IBM_FS3270_MAJOR, "fs3270"); 547 + raw3270_unregister_notifier(&fs3270_notifier); 548 + __unregister_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270"); 564 549 } 565 550 566 551 MODULE_LICENSE("GPL");
+261 -350
drivers/s390/char/raw3270.c
··· 28 28 #include <linux/device.h> 29 29 #include <linux/mutex.h> 30 30 31 - static struct class *class3270; 31 + struct class *class3270; 32 32 33 33 /* The main 3270 data structure. */ 34 34 struct raw3270 { ··· 37 37 int minor; 38 38 39 39 short model, rows, cols; 40 + unsigned int state; 40 41 unsigned long flags; 41 42 42 43 struct list_head req_queue; /* Request queue. */ ··· 47 46 struct timer_list timer; /* Device timer. */ 48 47 49 48 unsigned char *ascebc; /* ascii -> ebcdic table */ 50 - struct device *clttydev; /* 3270-class tty device ptr */ 51 - struct device *cltubdev; /* 3270-class tub device ptr */ 52 49 53 - struct raw3270_request init_request; 50 + struct raw3270_view init_view; 51 + struct raw3270_request init_reset; 52 + struct raw3270_request init_readpart; 53 + struct raw3270_request init_readmod; 54 54 unsigned char init_data[256]; 55 55 }; 56 + 57 + /* raw3270->state */ 58 + #define RAW3270_STATE_INIT 0 /* Initial state */ 59 + #define RAW3270_STATE_RESET 1 /* Reset command is pending */ 60 + #define RAW3270_STATE_W4ATTN 2 /* Wait for attention interrupt */ 61 + #define RAW3270_STATE_READMOD 3 /* Read partition is pending */ 62 + #define RAW3270_STATE_READY 4 /* Device is usable by views */ 56 63 57 64 /* raw3270->flags */ 58 65 #define RAW3270_FLAGS_14BITADDR 0 /* 14-bit buffer addresses */ 59 66 #define RAW3270_FLAGS_BUSY 1 /* Device busy, leave it alone */ 60 - #define RAW3270_FLAGS_ATTN 2 /* Device sent an ATTN interrupt */ 61 - #define RAW3270_FLAGS_READY 4 /* Device is useable by views */ 62 - #define RAW3270_FLAGS_CONSOLE 8 /* Device is the console. */ 63 - #define RAW3270_FLAGS_FROZEN 16 /* set if 3270 is frozen for suspend */ 67 + #define RAW3270_FLAGS_CONSOLE 2 /* Device is the console. */ 68 + #define RAW3270_FLAGS_FROZEN 3 /* set if 3270 is frozen for suspend */ 64 69 65 70 /* Semaphore to protect global data of raw3270 (devices, views, etc). */ 66 71 static DEFINE_MUTEX(raw3270_mutex); ··· 103 96 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 104 97 0xf8, 0xf9, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f 105 98 }; 99 + 100 + static inline int raw3270_state_ready(struct raw3270 *rp) 101 + { 102 + return rp->state == RAW3270_STATE_READY; 103 + } 104 + 105 + static inline int raw3270_state_final(struct raw3270 *rp) 106 + { 107 + return rp->state == RAW3270_STATE_INIT || 108 + rp->state == RAW3270_STATE_READY; 109 + } 106 110 107 111 void 108 112 raw3270_buffer_address(struct raw3270 *rp, char *cp, unsigned short addr) ··· 232 214 * Stop running ccw. 233 215 */ 234 216 static int 235 - raw3270_halt_io_nolock(struct raw3270 *rp, struct raw3270_request *rq) 217 + __raw3270_halt_io(struct raw3270 *rp, struct raw3270_request *rq) 236 218 { 237 219 int retries; 238 220 int rc; ··· 248 230 if (rc == 0) 249 231 break; /* termination successful */ 250 232 } 251 - return rc; 252 - } 253 - 254 - static int 255 - raw3270_halt_io(struct raw3270 *rp, struct raw3270_request *rq) 256 - { 257 - unsigned long flags; 258 - int rc; 259 - 260 - spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); 261 - rc = raw3270_halt_io_nolock(rp, rq); 262 - spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); 263 233 return rc; 264 234 } 265 235 ··· 287 281 if (!rp || rp->view != view || 288 282 test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) 289 283 rc = -EACCES; 290 - else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags)) 291 - rc = -ENODEV; 284 + else if (!raw3270_state_ready(rp)) 285 + rc = -EBUSY; 292 286 else 293 287 rc = __raw3270_start(rp, view, rq); 294 288 spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags); ··· 305 299 if (!rp || rp->view != view || 306 300 test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) 307 301 rc = -EACCES; 308 - else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags)) 309 - rc = -ENODEV; 302 + else if (!raw3270_state_ready(rp)) 303 + rc = -EBUSY; 310 304 else 311 305 rc = __raw3270_start(rp, view, rq); 312 306 return rc; ··· 384 378 case RAW3270_IO_STOP: 385 379 if (!rq) 386 380 break; 387 - raw3270_halt_io_nolock(rp, rq); 381 + __raw3270_halt_io(rp, rq); 388 382 rq->rc = -EIO; 389 383 break; 390 384 default: ··· 419 413 } 420 414 421 415 /* 422 - * Size sensing. 416 + * To determine the size of the 3270 device we need to do: 417 + * 1) send a 'read partition' data stream to the device 418 + * 2) wait for the attn interrupt that precedes the query reply 419 + * 3) do a read modified to get the query reply 420 + * To make things worse we have to cope with intervention 421 + * required (3270 device switched to 'stand-by') and command 422 + * rejects (old devices that can't do 'read partition'). 423 423 */ 424 - 425 424 struct raw3270_ua { /* Query Reply structure for Usable Area */ 426 425 struct { /* Usable Area Query Reply Base */ 427 426 short l; /* Length of this structured field */ ··· 462 451 } __attribute__ ((packed)) aua; 463 452 } __attribute__ ((packed)); 464 453 465 - static struct diag210 raw3270_init_diag210; 466 - static DEFINE_MUTEX(raw3270_init_mutex); 467 - 468 - static int 469 - raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq, 470 - struct irb *irb) 471 - { 472 - /* 473 - * Unit-Check Processing: 474 - * Expect Command Reject or Intervention Required. 475 - */ 476 - if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { 477 - /* Request finished abnormally. */ 478 - if (irb->ecw[0] & SNS0_INTERVENTION_REQ) { 479 - set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags); 480 - return RAW3270_IO_BUSY; 481 - } 482 - } 483 - if (rq) { 484 - if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { 485 - if (irb->ecw[0] & SNS0_CMD_REJECT) 486 - rq->rc = -EOPNOTSUPP; 487 - else 488 - rq->rc = -EIO; 489 - } else 490 - /* Request finished normally. Copy residual count. */ 491 - rq->rescnt = irb->scsw.cmd.count; 492 - } 493 - if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) { 494 - set_bit(RAW3270_FLAGS_ATTN, &view->dev->flags); 495 - wake_up(&raw3270_wait_queue); 496 - } 497 - return RAW3270_IO_DONE; 498 - } 499 - 500 - static struct raw3270_fn raw3270_init_fn = { 501 - .intv = raw3270_init_irq 502 - }; 503 - 504 - static struct raw3270_view raw3270_init_view = { 505 - .fn = &raw3270_init_fn 506 - }; 507 - 508 - /* 509 - * raw3270_wait/raw3270_wait_interruptible/__raw3270_wakeup 510 - * Wait for end of request. The request must have been started 511 - * with raw3270_start, rc = 0. The device lock may NOT have been 512 - * released between calling raw3270_start and raw3270_wait. 513 - */ 514 454 static void 515 - raw3270_wake_init(struct raw3270_request *rq, void *data) 516 - { 517 - wake_up((wait_queue_head_t *) data); 518 - } 519 - 520 - /* 521 - * Special wait function that can cope with console initialization. 522 - */ 523 - static int 524 - raw3270_start_init(struct raw3270 *rp, struct raw3270_view *view, 525 - struct raw3270_request *rq) 526 - { 527 - unsigned long flags; 528 - int rc; 529 - 530 - #ifdef CONFIG_TN3270_CONSOLE 531 - if (raw3270_registered == 0) { 532 - spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags); 533 - rq->callback = NULL; 534 - rc = __raw3270_start(rp, view, rq); 535 - if (rc == 0) 536 - while (!raw3270_request_final(rq)) { 537 - wait_cons_dev(); 538 - barrier(); 539 - } 540 - spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags); 541 - return rq->rc; 542 - } 543 - #endif 544 - rq->callback = raw3270_wake_init; 545 - rq->callback_data = &raw3270_wait_queue; 546 - spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags); 547 - rc = __raw3270_start(rp, view, rq); 548 - spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags); 549 - if (rc) 550 - return rc; 551 - /* Now wait for the completion. */ 552 - rc = wait_event_interruptible(raw3270_wait_queue, 553 - raw3270_request_final(rq)); 554 - if (rc == -ERESTARTSYS) { /* Interrupted by a signal. */ 555 - raw3270_halt_io(view->dev, rq); 556 - /* No wait for the halt to complete. */ 557 - wait_event(raw3270_wait_queue, raw3270_request_final(rq)); 558 - return -ERESTARTSYS; 559 - } 560 - return rq->rc; 561 - } 562 - 563 - static int 564 - __raw3270_size_device_vm(struct raw3270 *rp) 455 + raw3270_size_device_vm(struct raw3270 *rp) 565 456 { 566 457 int rc, model; 567 458 struct ccw_dev_id dev_id; 459 + struct diag210 diag_data; 568 460 569 461 ccw_device_get_id(rp->cdev, &dev_id); 570 - raw3270_init_diag210.vrdcdvno = dev_id.devno; 571 - raw3270_init_diag210.vrdclen = sizeof(struct diag210); 572 - rc = diag210(&raw3270_init_diag210); 573 - if (rc) 574 - return rc; 575 - model = raw3270_init_diag210.vrdccrmd; 462 + diag_data.vrdcdvno = dev_id.devno; 463 + diag_data.vrdclen = sizeof(struct diag210); 464 + rc = diag210(&diag_data); 465 + model = diag_data.vrdccrmd; 466 + /* Use default model 2 if the size could not be detected */ 467 + if (rc || model < 2 || model > 5) 468 + model = 2; 576 469 switch (model) { 577 470 case 2: 578 471 rp->model = model; ··· 498 583 rp->rows = 27; 499 584 rp->cols = 132; 500 585 break; 501 - default: 502 - rc = -EOPNOTSUPP; 503 - break; 504 586 } 505 - return rc; 506 587 } 507 588 508 - static int 509 - __raw3270_size_device(struct raw3270 *rp) 589 + static void 590 + raw3270_size_device(struct raw3270 *rp) 510 591 { 511 - static const unsigned char wbuf[] = 512 - { 0x00, 0x07, 0x01, 0xff, 0x03, 0x00, 0x81 }; 513 592 struct raw3270_ua *uap; 514 - int rc; 515 593 516 - /* 517 - * To determine the size of the 3270 device we need to do: 518 - * 1) send a 'read partition' data stream to the device 519 - * 2) wait for the attn interrupt that precedes the query reply 520 - * 3) do a read modified to get the query reply 521 - * To make things worse we have to cope with intervention 522 - * required (3270 device switched to 'stand-by') and command 523 - * rejects (old devices that can't do 'read partition'). 524 - */ 525 - memset(&rp->init_request, 0, sizeof(rp->init_request)); 526 - memset(&rp->init_data, 0, 256); 527 - /* Store 'read partition' data stream to init_data */ 528 - memcpy(&rp->init_data, wbuf, sizeof(wbuf)); 529 - INIT_LIST_HEAD(&rp->init_request.list); 530 - rp->init_request.ccw.cmd_code = TC_WRITESF; 531 - rp->init_request.ccw.flags = CCW_FLAG_SLI; 532 - rp->init_request.ccw.count = sizeof(wbuf); 533 - rp->init_request.ccw.cda = (__u32) __pa(&rp->init_data); 534 - 535 - rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request); 536 - if (rc) 537 - /* Check error cases: -ERESTARTSYS, -EIO and -EOPNOTSUPP */ 538 - return rc; 539 - 540 - /* Wait for attention interrupt. */ 541 - #ifdef CONFIG_TN3270_CONSOLE 542 - if (raw3270_registered == 0) { 543 - unsigned long flags; 544 - 545 - spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); 546 - while (!test_and_clear_bit(RAW3270_FLAGS_ATTN, &rp->flags)) 547 - wait_cons_dev(); 548 - spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); 549 - } else 550 - #endif 551 - rc = wait_event_interruptible(raw3270_wait_queue, 552 - test_and_clear_bit(RAW3270_FLAGS_ATTN, &rp->flags)); 553 - if (rc) 554 - return rc; 555 - 556 - /* 557 - * The device accepted the 'read partition' command. Now 558 - * set up a read ccw and issue it. 559 - */ 560 - rp->init_request.ccw.cmd_code = TC_READMOD; 561 - rp->init_request.ccw.flags = CCW_FLAG_SLI; 562 - rp->init_request.ccw.count = sizeof(rp->init_data); 563 - rp->init_request.ccw.cda = (__u32) __pa(rp->init_data); 564 - rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request); 565 - if (rc) 566 - return rc; 567 594 /* Got a Query Reply */ 568 595 uap = (struct raw3270_ua *) (rp->init_data + 1); 569 596 /* Paranoia check. */ 570 - if (rp->init_data[0] != 0x88 || uap->uab.qcode != 0x81) 571 - return -EOPNOTSUPP; 597 + if (rp->init_readmod.rc || rp->init_data[0] != 0x88 || 598 + uap->uab.qcode != 0x81) { 599 + /* Couldn't detect size. Use default model 2. */ 600 + rp->model = 2; 601 + rp->rows = 24; 602 + rp->cols = 80; 603 + return; 604 + } 572 605 /* Copy rows/columns of default Usable Area */ 573 606 rp->rows = uap->uab.h; 574 607 rp->cols = uap->uab.w; ··· 529 666 rp->rows = uap->aua.hauai; 530 667 rp->cols = uap->aua.wauai; 531 668 } 532 - return 0; 669 + /* Try to find a model. */ 670 + rp->model = 0; 671 + if (rp->rows == 24 && rp->cols == 80) 672 + rp->model = 2; 673 + if (rp->rows == 32 && rp->cols == 80) 674 + rp->model = 3; 675 + if (rp->rows == 43 && rp->cols == 80) 676 + rp->model = 4; 677 + if (rp->rows == 27 && rp->cols == 132) 678 + rp->model = 5; 679 + } 680 + 681 + static void 682 + raw3270_size_device_done(struct raw3270 *rp) 683 + { 684 + struct raw3270_view *view; 685 + 686 + rp->view = NULL; 687 + rp->state = RAW3270_STATE_READY; 688 + /* Notify views about new size */ 689 + list_for_each_entry(view, &rp->view_list, list) 690 + if (view->fn->resize) 691 + view->fn->resize(view, rp->model, rp->rows, rp->cols); 692 + /* Setup processing done, now activate a view */ 693 + list_for_each_entry(view, &rp->view_list, list) { 694 + rp->view = view; 695 + if (view->fn->activate(view) == 0) 696 + break; 697 + rp->view = NULL; 698 + } 699 + } 700 + 701 + static void 702 + raw3270_read_modified_cb(struct raw3270_request *rq, void *data) 703 + { 704 + struct raw3270 *rp = rq->view->dev; 705 + 706 + raw3270_size_device(rp); 707 + raw3270_size_device_done(rp); 708 + } 709 + 710 + static void 711 + raw3270_read_modified(struct raw3270 *rp) 712 + { 713 + if (rp->state != RAW3270_STATE_W4ATTN) 714 + return; 715 + /* Use 'read modified' to get the result of a read partition. */ 716 + memset(&rp->init_readmod, 0, sizeof(rp->init_readmod)); 717 + memset(&rp->init_data, 0, sizeof(rp->init_data)); 718 + rp->init_readmod.ccw.cmd_code = TC_READMOD; 719 + rp->init_readmod.ccw.flags = CCW_FLAG_SLI; 720 + rp->init_readmod.ccw.count = sizeof(rp->init_data); 721 + rp->init_readmod.ccw.cda = (__u32) __pa(rp->init_data); 722 + rp->init_readmod.callback = raw3270_read_modified_cb; 723 + rp->state = RAW3270_STATE_READMOD; 724 + raw3270_start_irq(&rp->init_view, &rp->init_readmod); 725 + } 726 + 727 + static void 728 + raw3270_writesf_readpart(struct raw3270 *rp) 729 + { 730 + static const unsigned char wbuf[] = 731 + { 0x00, 0x07, 0x01, 0xff, 0x03, 0x00, 0x81 }; 732 + 733 + /* Store 'read partition' data stream to init_data */ 734 + memset(&rp->init_readpart, 0, sizeof(rp->init_readpart)); 735 + memset(&rp->init_data, 0, sizeof(rp->init_data)); 736 + memcpy(&rp->init_data, wbuf, sizeof(wbuf)); 737 + rp->init_readpart.ccw.cmd_code = TC_WRITESF; 738 + rp->init_readpart.ccw.flags = CCW_FLAG_SLI; 739 + rp->init_readpart.ccw.count = sizeof(wbuf); 740 + rp->init_readpart.ccw.cda = (__u32) __pa(&rp->init_data); 741 + rp->state = RAW3270_STATE_W4ATTN; 742 + raw3270_start_irq(&rp->init_view, &rp->init_readpart); 743 + } 744 + 745 + /* 746 + * Device reset 747 + */ 748 + static void 749 + raw3270_reset_device_cb(struct raw3270_request *rq, void *data) 750 + { 751 + struct raw3270 *rp = rq->view->dev; 752 + 753 + if (rp->state != RAW3270_STATE_RESET) 754 + return; 755 + if (rq && rq->rc) { 756 + /* Reset command failed. */ 757 + rp->state = RAW3270_STATE_INIT; 758 + } else if (0 && MACHINE_IS_VM) { 759 + raw3270_size_device_vm(rp); 760 + raw3270_size_device_done(rp); 761 + } else 762 + raw3270_writesf_readpart(rp); 533 763 } 534 764 535 765 static int 536 - raw3270_size_device(struct raw3270 *rp) 766 + __raw3270_reset_device(struct raw3270 *rp) 537 767 { 538 768 int rc; 539 769 540 - mutex_lock(&raw3270_init_mutex); 541 - rp->view = &raw3270_init_view; 542 - raw3270_init_view.dev = rp; 543 - if (MACHINE_IS_VM) 544 - rc = __raw3270_size_device_vm(rp); 545 - else 546 - rc = __raw3270_size_device(rp); 547 - raw3270_init_view.dev = NULL; 548 - rp->view = NULL; 549 - mutex_unlock(&raw3270_init_mutex); 550 - if (rc == 0) { /* Found something. */ 551 - /* Try to find a model. */ 552 - rp->model = 0; 553 - if (rp->rows == 24 && rp->cols == 80) 554 - rp->model = 2; 555 - if (rp->rows == 32 && rp->cols == 80) 556 - rp->model = 3; 557 - if (rp->rows == 43 && rp->cols == 80) 558 - rp->model = 4; 559 - if (rp->rows == 27 && rp->cols == 132) 560 - rp->model = 5; 561 - } else { 562 - /* Couldn't detect size. Use default model 2. */ 563 - rp->model = 2; 564 - rp->rows = 24; 565 - rp->cols = 80; 566 - return 0; 567 - } 770 + /* Store reset data stream to init_data/init_reset */ 771 + memset(&rp->init_reset, 0, sizeof(rp->init_reset)); 772 + memset(&rp->init_data, 0, sizeof(rp->init_data)); 773 + rp->init_data[0] = TW_KR; 774 + rp->init_reset.ccw.cmd_code = TC_EWRITEA; 775 + rp->init_reset.ccw.flags = CCW_FLAG_SLI; 776 + rp->init_reset.ccw.count = 1; 777 + rp->init_reset.ccw.cda = (__u32) __pa(rp->init_data); 778 + rp->init_reset.callback = raw3270_reset_device_cb; 779 + rc = __raw3270_start(rp, &rp->init_view, &rp->init_reset); 780 + if (rc == 0 && rp->state == RAW3270_STATE_INIT) 781 + rp->state = RAW3270_STATE_RESET; 568 782 return rc; 569 783 } 570 784 571 785 static int 572 786 raw3270_reset_device(struct raw3270 *rp) 573 787 { 788 + unsigned long flags; 574 789 int rc; 575 790 576 - mutex_lock(&raw3270_init_mutex); 577 - memset(&rp->init_request, 0, sizeof(rp->init_request)); 578 - memset(&rp->init_data, 0, sizeof(rp->init_data)); 579 - /* Store reset data stream to init_data/init_request */ 580 - rp->init_data[0] = TW_KR; 581 - INIT_LIST_HEAD(&rp->init_request.list); 582 - rp->init_request.ccw.cmd_code = TC_EWRITEA; 583 - rp->init_request.ccw.flags = CCW_FLAG_SLI; 584 - rp->init_request.ccw.count = 1; 585 - rp->init_request.ccw.cda = (__u32) __pa(rp->init_data); 586 - rp->view = &raw3270_init_view; 587 - raw3270_init_view.dev = rp; 588 - rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request); 589 - raw3270_init_view.dev = NULL; 590 - rp->view = NULL; 591 - mutex_unlock(&raw3270_init_mutex); 791 + spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); 792 + rc = __raw3270_reset_device(rp); 793 + spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); 592 794 return rc; 593 795 } 594 796 ··· 667 739 if (!rp || rp->view != view || 668 740 test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) 669 741 rc = -EACCES; 670 - else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags)) 671 - rc = -ENODEV; 742 + else if (!raw3270_state_ready(rp)) 743 + rc = -EBUSY; 672 744 else 673 745 rc = raw3270_reset_device(view->dev); 674 746 return rc; 675 747 } 748 + 749 + static int 750 + raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq, 751 + struct irb *irb) 752 + { 753 + struct raw3270 *rp; 754 + 755 + /* 756 + * Unit-Check Processing: 757 + * Expect Command Reject or Intervention Required. 758 + */ 759 + if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { 760 + /* Request finished abnormally. */ 761 + if (irb->ecw[0] & SNS0_INTERVENTION_REQ) { 762 + set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags); 763 + return RAW3270_IO_BUSY; 764 + } 765 + } 766 + if (rq) { 767 + if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { 768 + if (irb->ecw[0] & SNS0_CMD_REJECT) 769 + rq->rc = -EOPNOTSUPP; 770 + else 771 + rq->rc = -EIO; 772 + } 773 + } 774 + if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) { 775 + /* Queue read modified after attention interrupt */ 776 + rp = view->dev; 777 + raw3270_read_modified(rp); 778 + } 779 + return RAW3270_IO_DONE; 780 + } 781 + 782 + static struct raw3270_fn raw3270_init_fn = { 783 + .intv = raw3270_init_irq 784 + }; 676 785 677 786 /* 678 787 * Setup new 3270 device. ··· 738 773 739 774 INIT_LIST_HEAD(&rp->req_queue); 740 775 INIT_LIST_HEAD(&rp->view_list); 776 + 777 + rp->init_view.dev = rp; 778 + rp->init_view.fn = &raw3270_init_fn; 779 + rp->view = &rp->init_view; 741 780 742 781 /* 743 782 * Add device to list and find the smallest unused minor ··· 781 812 */ 782 813 struct raw3270 __init *raw3270_setup_console(struct ccw_device *cdev) 783 814 { 815 + unsigned long flags; 784 816 struct raw3270 *rp; 785 817 char *ascebc; 786 818 int rc; ··· 792 822 if (rc) 793 823 return ERR_PTR(rc); 794 824 set_bit(RAW3270_FLAGS_CONSOLE, &rp->flags); 795 - rc = raw3270_reset_device(rp); 796 - if (rc) 797 - return ERR_PTR(rc); 798 - rc = raw3270_size_device(rp); 799 - if (rc) 800 - return ERR_PTR(rc); 801 - rc = raw3270_reset_device(rp); 802 - if (rc) 803 - return ERR_PTR(rc); 804 - set_bit(RAW3270_FLAGS_READY, &rp->flags); 825 + spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); 826 + do { 827 + __raw3270_reset_device(rp); 828 + while (!raw3270_state_final(rp)) { 829 + wait_cons_dev(); 830 + barrier(); 831 + } 832 + } while (rp->state != RAW3270_STATE_READY); 833 + spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); 805 834 return rp; 806 835 } 807 836 ··· 862 893 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); 863 894 if (rp->view == view) 864 895 rc = 0; 865 - else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags)) 866 - rc = -ENODEV; 896 + else if (!raw3270_state_ready(rp)) 897 + rc = -EBUSY; 867 898 else if (test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) 868 899 rc = -EACCES; 869 900 else { 870 901 oldview = NULL; 871 - if (rp->view) { 902 + if (rp->view && rp->view->fn->deactivate) { 872 903 oldview = rp->view; 873 904 oldview->fn->deactivate(oldview); 874 905 } ··· 913 944 list_del_init(&view->list); 914 945 list_add_tail(&view->list, &rp->view_list); 915 946 /* Try to activate another view. */ 916 - if (test_bit(RAW3270_FLAGS_READY, &rp->flags) && 947 + if (raw3270_state_ready(rp) && 917 948 !test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) { 918 949 list_for_each_entry(view, &rp->view_list, list) { 919 950 rp->view = view; ··· 944 975 if (rp->minor != minor) 945 976 continue; 946 977 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); 947 - if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) { 948 - atomic_set(&view->ref_count, 2); 949 - view->dev = rp; 950 - view->fn = fn; 951 - view->model = rp->model; 952 - view->rows = rp->rows; 953 - view->cols = rp->cols; 954 - view->ascebc = rp->ascebc; 955 - spin_lock_init(&view->lock); 956 - list_add(&view->list, &rp->view_list); 957 - rc = 0; 958 - } 978 + atomic_set(&view->ref_count, 2); 979 + view->dev = rp; 980 + view->fn = fn; 981 + view->model = rp->model; 982 + view->rows = rp->rows; 983 + view->cols = rp->cols; 984 + view->ascebc = rp->ascebc; 985 + spin_lock_init(&view->lock); 986 + list_add(&view->list, &rp->view_list); 987 + rc = 0; 959 988 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); 960 989 break; 961 990 } ··· 977 1010 if (rp->minor != minor) 978 1011 continue; 979 1012 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); 980 - if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) { 981 - view = ERR_PTR(-ENOENT); 982 - list_for_each_entry(tmp, &rp->view_list, list) { 983 - if (tmp->fn == fn) { 984 - raw3270_get_view(tmp); 985 - view = tmp; 986 - break; 987 - } 1013 + list_for_each_entry(tmp, &rp->view_list, list) { 1014 + if (tmp->fn == fn) { 1015 + raw3270_get_view(tmp); 1016 + view = tmp; 1017 + break; 988 1018 } 989 1019 } 990 1020 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); ··· 1008 1044 rp->view = NULL; 1009 1045 } 1010 1046 list_del_init(&view->list); 1011 - if (!rp->view && test_bit(RAW3270_FLAGS_READY, &rp->flags) && 1047 + if (!rp->view && raw3270_state_ready(rp) && 1012 1048 !test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) { 1013 1049 /* Try to activate another view. */ 1014 1050 list_for_each_entry(nv, &rp->view_list, list) { ··· 1036 1072 1037 1073 /* Remove from device chain. */ 1038 1074 mutex_lock(&raw3270_mutex); 1039 - if (rp->clttydev && !IS_ERR(rp->clttydev)) 1040 - device_destroy(class3270, MKDEV(IBM_TTY3270_MAJOR, rp->minor)); 1041 - if (rp->cltubdev && !IS_ERR(rp->cltubdev)) 1042 - device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, rp->minor)); 1043 1075 list_del_init(&rp->list); 1044 1076 mutex_unlock(&raw3270_mutex); 1045 1077 ··· 1099 1139 1100 1140 static int raw3270_create_attributes(struct raw3270 *rp) 1101 1141 { 1102 - int rc; 1103 - 1104 - rc = sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group); 1105 - if (rc) 1106 - goto out; 1107 - 1108 - rp->clttydev = device_create(class3270, &rp->cdev->dev, 1109 - MKDEV(IBM_TTY3270_MAJOR, rp->minor), NULL, 1110 - "tty%s", dev_name(&rp->cdev->dev)); 1111 - if (IS_ERR(rp->clttydev)) { 1112 - rc = PTR_ERR(rp->clttydev); 1113 - goto out_ttydev; 1114 - } 1115 - 1116 - rp->cltubdev = device_create(class3270, &rp->cdev->dev, 1117 - MKDEV(IBM_FS3270_MAJOR, rp->minor), NULL, 1118 - "tub%s", dev_name(&rp->cdev->dev)); 1119 - if (!IS_ERR(rp->cltubdev)) 1120 - goto out; 1121 - 1122 - rc = PTR_ERR(rp->cltubdev); 1123 - device_destroy(class3270, MKDEV(IBM_TTY3270_MAJOR, rp->minor)); 1124 - 1125 - out_ttydev: 1126 - sysfs_remove_group(&rp->cdev->dev.kobj, &raw3270_attr_group); 1127 - out: 1128 - return rc; 1142 + return sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group); 1129 1143 } 1130 1144 1131 1145 /* 1132 1146 * Notifier for device addition/removal 1133 1147 */ 1134 - struct raw3270_notifier { 1135 - struct list_head list; 1136 - void (*notifier)(int, int); 1137 - }; 1138 - 1139 1148 static LIST_HEAD(raw3270_notifier); 1140 1149 1141 - int raw3270_register_notifier(void (*notifier)(int, int)) 1150 + int raw3270_register_notifier(struct raw3270_notifier *notifier) 1142 1151 { 1143 - struct raw3270_notifier *np; 1144 1152 struct raw3270 *rp; 1145 1153 1146 - np = kmalloc(sizeof(struct raw3270_notifier), GFP_KERNEL); 1147 - if (!np) 1148 - return -ENOMEM; 1149 - np->notifier = notifier; 1150 1154 mutex_lock(&raw3270_mutex); 1151 - list_add_tail(&np->list, &raw3270_notifier); 1152 - list_for_each_entry(rp, &raw3270_devices, list) { 1153 - get_device(&rp->cdev->dev); 1154 - notifier(rp->minor, 1); 1155 - } 1155 + list_add_tail(&notifier->list, &raw3270_notifier); 1156 + list_for_each_entry(rp, &raw3270_devices, list) 1157 + notifier->create(rp->minor); 1156 1158 mutex_unlock(&raw3270_mutex); 1157 1159 return 0; 1158 1160 } 1159 1161 1160 - void raw3270_unregister_notifier(void (*notifier)(int, int)) 1162 + void raw3270_unregister_notifier(struct raw3270_notifier *notifier) 1161 1163 { 1162 - struct raw3270_notifier *np; 1164 + struct raw3270 *rp; 1163 1165 1164 1166 mutex_lock(&raw3270_mutex); 1165 - list_for_each_entry(np, &raw3270_notifier, list) 1166 - if (np->notifier == notifier) { 1167 - list_del(&np->list); 1168 - kfree(np); 1169 - break; 1170 - } 1167 + list_for_each_entry(rp, &raw3270_devices, list) 1168 + notifier->destroy(rp->minor); 1169 + list_del(&notifier->list); 1171 1170 mutex_unlock(&raw3270_mutex); 1172 1171 } 1173 1172 ··· 1136 1217 static int 1137 1218 raw3270_set_online (struct ccw_device *cdev) 1138 1219 { 1139 - struct raw3270 *rp; 1140 1220 struct raw3270_notifier *np; 1221 + struct raw3270 *rp; 1141 1222 int rc; 1142 1223 1143 1224 rp = raw3270_create_device(cdev); 1144 1225 if (IS_ERR(rp)) 1145 1226 return PTR_ERR(rp); 1146 - rc = raw3270_reset_device(rp); 1147 - if (rc) 1148 - goto failure; 1149 - rc = raw3270_size_device(rp); 1150 - if (rc) 1151 - goto failure; 1152 - rc = raw3270_reset_device(rp); 1153 - if (rc) 1154 - goto failure; 1155 1227 rc = raw3270_create_attributes(rp); 1156 1228 if (rc) 1157 1229 goto failure; 1158 - set_bit(RAW3270_FLAGS_READY, &rp->flags); 1230 + raw3270_reset_device(rp); 1159 1231 mutex_lock(&raw3270_mutex); 1160 1232 list_for_each_entry(np, &raw3270_notifier, list) 1161 - np->notifier(rp->minor, 1); 1233 + np->create(rp->minor); 1162 1234 mutex_unlock(&raw3270_mutex); 1163 1235 return 0; 1164 1236 ··· 1178 1268 */ 1179 1269 if (rp == NULL) 1180 1270 return; 1181 - clear_bit(RAW3270_FLAGS_READY, &rp->flags); 1182 1271 1183 1272 sysfs_remove_group(&cdev->dev.kobj, &raw3270_attr_group); 1184 1273 1185 1274 /* Deactivate current view and remove all views. */ 1186 1275 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1187 1276 if (rp->view) { 1188 - rp->view->fn->deactivate(rp->view); 1277 + if (rp->view->fn->deactivate) 1278 + rp->view->fn->deactivate(rp->view); 1189 1279 rp->view = NULL; 1190 1280 } 1191 1281 while (!list_empty(&rp->view_list)) { ··· 1200 1290 1201 1291 mutex_lock(&raw3270_mutex); 1202 1292 list_for_each_entry(np, &raw3270_notifier, list) 1203 - np->notifier(rp->minor, 0); 1293 + np->destroy(rp->minor); 1204 1294 mutex_unlock(&raw3270_mutex); 1205 1295 1206 1296 /* Reset 3270 device. */ ··· 1234 1324 if (!rp) 1235 1325 return 0; 1236 1326 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); 1237 - if (rp->view) 1327 + if (rp->view && rp->view->fn->deactivate) 1238 1328 rp->view->fn->deactivate(rp->view); 1239 1329 if (!test_bit(RAW3270_FLAGS_CONSOLE, &rp->flags)) { 1240 1330 /* ··· 1261 1351 return 0; 1262 1352 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); 1263 1353 clear_bit(RAW3270_FLAGS_FROZEN, &rp->flags); 1264 - if (rp->view) 1354 + if (rp->view && rp->view->fn->activate) 1265 1355 rp->view->fn->activate(rp->view); 1266 1356 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); 1267 1357 return 0; ··· 1344 1434 module_init(raw3270_init); 1345 1435 module_exit(raw3270_exit); 1346 1436 1437 + EXPORT_SYMBOL(class3270); 1347 1438 EXPORT_SYMBOL(raw3270_request_alloc); 1348 1439 EXPORT_SYMBOL(raw3270_request_free); 1349 1440 EXPORT_SYMBOL(raw3270_request_reset);
+10 -2
drivers/s390/char/raw3270.h
··· 91 91 92 92 struct raw3270; 93 93 struct raw3270_view; 94 + extern struct class *class3270; 94 95 95 96 /* 3270 CCW request */ 96 97 struct raw3270_request { ··· 141 140 struct raw3270_request *, struct irb *); 142 141 void (*release)(struct raw3270_view *); 143 142 void (*free)(struct raw3270_view *); 143 + void (*resize)(struct raw3270_view *, int, int, int); 144 144 }; 145 145 146 146 /* ··· 194 192 void raw3270_wait_cons_dev(struct raw3270 *); 195 193 196 194 /* Notifier for device addition/removal */ 197 - int raw3270_register_notifier(void (*notifier)(int, int)); 198 - void raw3270_unregister_notifier(void (*notifier)(int, int)); 195 + struct raw3270_notifier { 196 + struct list_head list; 197 + void (*create)(int minor); 198 + void (*destroy)(int minor); 199 + }; 200 + 201 + int raw3270_register_notifier(struct raw3270_notifier *); 202 + void raw3270_unregister_notifier(struct raw3270_notifier *); 199 203 void raw3270_pm_unfreeze(struct raw3270_view *); 200 204 201 205 /*
+2 -2
drivers/s390/char/sclp.c
··· 450 450 timeout = 0; 451 451 if (timer_pending(&sclp_request_timer)) { 452 452 /* Get timeout TOD value */ 453 - timeout = get_clock() + 453 + timeout = get_tod_clock() + 454 454 sclp_tod_from_jiffies(sclp_request_timer.expires - 455 455 jiffies); 456 456 } ··· 472 472 while (sclp_running_state != sclp_running_state_idle) { 473 473 /* Check for expired request timer */ 474 474 if (timer_pending(&sclp_request_timer) && 475 - get_clock() > timeout && 475 + get_tod_clock() > timeout && 476 476 del_timer(&sclp_request_timer)) 477 477 sclp_request_timer.function(sclp_request_timer.data); 478 478 cpu_relax();
+2 -8
drivers/s390/char/sclp_cmd.c
··· 56 56 57 57 u64 sclp_facilities; 58 58 static u8 sclp_fac84; 59 - static u8 sclp_fac85; 60 59 static unsigned long long rzm; 61 60 static unsigned long long rnmax; 62 61 ··· 130 131 sccb = &early_read_info_sccb; 131 132 sclp_facilities = sccb->facilities; 132 133 sclp_fac84 = sccb->fac84; 133 - sclp_fac85 = sccb->fac85; 134 + if (sccb->fac85 & 0x02) 135 + S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP; 134 136 rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; 135 137 rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; 136 138 rzm <<= 20; ··· 170 170 { 171 171 return rzm; 172 172 } 173 - 174 - u8 sclp_get_fac85(void) 175 - { 176 - return sclp_fac85; 177 - } 178 - EXPORT_SYMBOL_GPL(sclp_get_fac85); 179 173 180 174 /* 181 175 * This function will be called after sclp_facilities_detect(), which gets
+140 -47
drivers/s390/char/tty3270.c
··· 15 15 #include <linux/init.h> 16 16 #include <linux/console.h> 17 17 #include <linux/interrupt.h> 18 + #include <linux/workqueue.h> 18 19 19 20 #include <linux/slab.h> 20 21 #include <linux/bootmem.h> ··· 81 80 unsigned int highlight; /* Blink/reverse/underscore */ 82 81 unsigned int f_color; /* Foreground color */ 83 82 struct tty3270_line *screen; 83 + unsigned int n_model, n_cols, n_rows; /* New model & size */ 84 + struct work_struct resize_work; 84 85 85 86 /* Input stuff. */ 86 87 struct string *prompt; /* Output string for input area. */ ··· 118 115 #define TTY_UPDATE_ALL 16 /* Recreate screen. */ 119 116 120 117 static void tty3270_update(struct tty3270 *); 118 + static void tty3270_resize_work(struct work_struct *work); 121 119 122 120 /* 123 121 * Setup timeout for a device. On timeout trigger an update. ··· 687 683 INIT_LIST_HEAD(&tp->update); 688 684 INIT_LIST_HEAD(&tp->rcl_lines); 689 685 tp->rcl_max = 20; 690 - tty_port_init(&tp->port); 691 - setup_timer(&tp->timer, (void (*)(unsigned long)) tty3270_update, 692 - (unsigned long) tp); 693 - tasklet_init(&tp->readlet, 694 - (void (*)(unsigned long)) tty3270_read_tasklet, 695 - (unsigned long) tp->read); 696 686 697 687 for (pages = 0; pages < TTY3270_STRING_PAGES; pages++) { 698 688 tp->freemem_pages[pages] = (void *) ··· 708 710 tp->kbd = kbd_alloc(); 709 711 if (!tp->kbd) 710 712 goto out_reset; 713 + 714 + tty_port_init(&tp->port); 715 + setup_timer(&tp->timer, (void (*)(unsigned long)) tty3270_update, 716 + (unsigned long) tp); 717 + tasklet_init(&tp->readlet, 718 + (void (*)(unsigned long)) tty3270_read_tasklet, 719 + (unsigned long) tp->read); 720 + INIT_WORK(&tp->resize_work, tty3270_resize_work); 721 + 711 722 return tp; 712 723 713 724 out_reset: ··· 759 752 /* 760 753 * Allocate tty3270 screen. 761 754 */ 762 - static int 763 - tty3270_alloc_screen(struct tty3270 *tp) 755 + static struct tty3270_line * 756 + tty3270_alloc_screen(unsigned int rows, unsigned int cols) 764 757 { 758 + struct tty3270_line *screen; 765 759 unsigned long size; 766 760 int lines; 767 761 768 - size = sizeof(struct tty3270_line) * (tp->view.rows - 2); 769 - tp->screen = kzalloc(size, GFP_KERNEL); 770 - if (!tp->screen) 762 + size = sizeof(struct tty3270_line) * (rows - 2); 763 + screen = kzalloc(size, GFP_KERNEL); 764 + if (!screen) 771 765 goto out_err; 772 - for (lines = 0; lines < tp->view.rows - 2; lines++) { 773 - size = sizeof(struct tty3270_cell) * tp->view.cols; 774 - tp->screen[lines].cells = kzalloc(size, GFP_KERNEL); 775 - if (!tp->screen[lines].cells) 766 + for (lines = 0; lines < rows - 2; lines++) { 767 + size = sizeof(struct tty3270_cell) * cols; 768 + screen[lines].cells = kzalloc(size, GFP_KERNEL); 769 + if (!screen[lines].cells) 776 770 goto out_screen; 777 771 } 778 - return 0; 772 + return screen; 779 773 out_screen: 780 774 while (lines--) 781 - kfree(tp->screen[lines].cells); 782 - kfree(tp->screen); 775 + kfree(screen[lines].cells); 776 + kfree(screen); 783 777 out_err: 784 - return -ENOMEM; 778 + return ERR_PTR(-ENOMEM); 785 779 } 786 780 787 781 /* 788 782 * Free tty3270 screen. 789 783 */ 790 784 static void 791 - tty3270_free_screen(struct tty3270 *tp) 785 + tty3270_free_screen(struct tty3270_line *screen, unsigned int rows) 792 786 { 793 787 int lines; 794 788 795 - for (lines = 0; lines < tp->view.rows - 2; lines++) 796 - kfree(tp->screen[lines].cells); 797 - kfree(tp->screen); 789 + for (lines = 0; lines < rows - 2; lines++) 790 + kfree(screen[lines].cells); 791 + kfree(screen); 792 + } 793 + 794 + /* 795 + * Resize tty3270 screen 796 + */ 797 + static void tty3270_resize_work(struct work_struct *work) 798 + { 799 + struct tty3270 *tp = container_of(work, struct tty3270, resize_work); 800 + struct tty3270_line *screen, *oscreen; 801 + struct tty_struct *tty; 802 + unsigned int orows; 803 + struct winsize ws; 804 + 805 + screen = tty3270_alloc_screen(tp->n_rows, tp->n_cols); 806 + if (!screen) 807 + return; 808 + /* Switch to new output size */ 809 + spin_lock_bh(&tp->view.lock); 810 + oscreen = tp->screen; 811 + orows = tp->view.rows; 812 + tp->view.model = tp->n_model; 813 + tp->view.rows = tp->n_rows; 814 + tp->view.cols = tp->n_cols; 815 + tp->screen = screen; 816 + free_string(&tp->freemem, tp->prompt); 817 + free_string(&tp->freemem, tp->status); 818 + tty3270_create_prompt(tp); 819 + tty3270_create_status(tp); 820 + tp->nr_up = 0; 821 + while (tp->nr_lines < tp->view.rows - 2) 822 + tty3270_blank_line(tp); 823 + tp->update_flags = TTY_UPDATE_ALL; 824 + spin_unlock_bh(&tp->view.lock); 825 + tty3270_free_screen(oscreen, orows); 826 + tty3270_set_timer(tp, 1); 827 + /* Informat tty layer about new size */ 828 + tty = tty_port_tty_get(&tp->port); 829 + if (!tty) 830 + return; 831 + ws.ws_row = tp->view.rows - 2; 832 + ws.ws_col = tp->view.cols; 833 + tty_do_resize(tty, &ws); 834 + } 835 + 836 + static void 837 + tty3270_resize(struct raw3270_view *view, int model, int rows, int cols) 838 + { 839 + struct tty3270 *tp = container_of(view, struct tty3270, view); 840 + 841 + tp->n_model = model; 842 + tp->n_rows = rows; 843 + tp->n_cols = cols; 844 + schedule_work(&tp->resize_work); 798 845 } 799 846 800 847 /* ··· 876 815 tty3270_free(struct raw3270_view *view) 877 816 { 878 817 struct tty3270 *tp = container_of(view, struct tty3270, view); 879 - tty3270_free_screen(tp); 818 + 819 + tty3270_free_screen(tp->screen, tp->view.rows); 880 820 tty3270_free_view(tp); 881 821 } 882 822 ··· 889 827 { 890 828 int i; 891 829 892 - for (i = 0; i < tty3270_max_index; i++) { 893 - struct raw3270_view *view = 894 - raw3270_find_view(&tty3270_fn, i + RAW3270_FIRSTMINOR); 830 + for (i = RAW3270_FIRSTMINOR; i <= tty3270_max_index; i++) { 831 + struct raw3270_view *view = raw3270_find_view(&tty3270_fn, i); 895 832 if (!IS_ERR(view)) 896 833 raw3270_del_view(view); 897 834 } ··· 901 840 .deactivate = tty3270_deactivate, 902 841 .intv = (void *) tty3270_irq, 903 842 .release = tty3270_release, 904 - .free = tty3270_free 843 + .free = tty3270_free, 844 + .resize = tty3270_resize 905 845 }; 906 846 907 847 /* ··· 915 853 int i, rc; 916 854 917 855 /* Check if the tty3270 is already there. */ 918 - view = raw3270_find_view(&tty3270_fn, 919 - tty->index + RAW3270_FIRSTMINOR); 856 + view = raw3270_find_view(&tty3270_fn, tty->index); 920 857 if (!IS_ERR(view)) { 921 858 tp = container_of(view, struct tty3270, view); 922 859 tty->driver_data = tp; ··· 927 866 tp->inattr = TF_INPUT; 928 867 return tty_port_install(&tp->port, driver, tty); 929 868 } 930 - if (tty3270_max_index < tty->index + 1) 931 - tty3270_max_index = tty->index + 1; 932 - 933 - /* Quick exit if there is no device for tty->index. */ 934 - if (PTR_ERR(view) == -ENODEV) 935 - return -ENODEV; 869 + if (tty3270_max_index < tty->index) 870 + tty3270_max_index = tty->index; 936 871 937 872 /* Allocate tty3270 structure on first open. */ 938 873 tp = tty3270_alloc_view(); 939 874 if (IS_ERR(tp)) 940 875 return PTR_ERR(tp); 941 876 942 - rc = raw3270_add_view(&tp->view, &tty3270_fn, 943 - tty->index + RAW3270_FIRSTMINOR); 877 + rc = raw3270_add_view(&tp->view, &tty3270_fn, tty->index); 944 878 if (rc) { 945 879 tty3270_free_view(tp); 946 880 return rc; 947 881 } 948 882 949 - rc = tty3270_alloc_screen(tp); 950 - if (rc) { 883 + tp->screen = tty3270_alloc_screen(tp->view.cols, tp->view.rows); 884 + if (IS_ERR(tp->screen)) { 885 + rc = PTR_ERR(tp->screen); 951 886 raw3270_put_view(&tp->view); 952 887 raw3270_del_view(&tp->view); 888 + tty3270_free_view(tp); 953 889 return rc; 954 890 } 955 891 ··· 980 922 981 923 tty->driver_data = tp; 982 924 925 + return 0; 926 + } 927 + 928 + /* 929 + * This routine is called whenever a 3270 tty is opened. 930 + */ 931 + static int 932 + tty3270_open(struct tty_struct *tty, struct file *filp) 933 + { 934 + struct tty3270 *tp = tty->driver_data; 935 + struct tty_port *port = &tp->port; 936 + 937 + port->count++; 938 + tty_port_tty_set(port, tty); 983 939 return 0; 984 940 } 985 941 ··· 1825 1753 static const struct tty_operations tty3270_ops = { 1826 1754 .install = tty3270_install, 1827 1755 .cleanup = tty3270_cleanup, 1756 + .open = tty3270_open, 1828 1757 .close = tty3270_close, 1829 1758 .write = tty3270_write, 1830 1759 .put_char = tty3270_put_char, ··· 1844 1771 .set_termios = tty3270_set_termios 1845 1772 }; 1846 1773 1774 + void tty3270_create_cb(int minor) 1775 + { 1776 + tty_register_device(tty3270_driver, minor, NULL); 1777 + } 1778 + 1779 + void tty3270_destroy_cb(int minor) 1780 + { 1781 + tty_unregister_device(tty3270_driver, minor); 1782 + } 1783 + 1784 + struct raw3270_notifier tty3270_notifier = 1785 + { 1786 + .create = tty3270_create_cb, 1787 + .destroy = tty3270_destroy_cb, 1788 + }; 1789 + 1847 1790 /* 1848 1791 * 3270 tty registration code called from tty_init(). 1849 1792 * Most kernel services (incl. kmalloc) are available at this poimt. ··· 1869 1780 struct tty_driver *driver; 1870 1781 int ret; 1871 1782 1872 - driver = alloc_tty_driver(RAW3270_MAXDEVS); 1873 - if (!driver) 1874 - return -ENOMEM; 1783 + driver = tty_alloc_driver(RAW3270_MAXDEVS, 1784 + TTY_DRIVER_REAL_RAW | 1785 + TTY_DRIVER_DYNAMIC_DEV | 1786 + TTY_DRIVER_RESET_TERMIOS); 1787 + if (IS_ERR(driver)) 1788 + return PTR_ERR(driver); 1875 1789 1876 1790 /* 1877 1791 * Initialize the tty_driver structure 1878 1792 * Entries in tty3270_driver that are NOT initialized: 1879 1793 * proc_entry, set_termios, flush_buffer, set_ldisc, write_proc 1880 1794 */ 1881 - driver->driver_name = "ttyTUB"; 1882 - driver->name = "ttyTUB"; 1795 + driver->driver_name = "tty3270"; 1796 + driver->name = "3270/tty"; 1883 1797 driver->major = IBM_TTY3270_MAJOR; 1884 - driver->minor_start = RAW3270_FIRSTMINOR; 1798 + driver->minor_start = 0; 1885 1799 driver->type = TTY_DRIVER_TYPE_SYSTEM; 1886 1800 driver->subtype = SYSTEM_TYPE_TTY; 1887 1801 driver->init_termios = tty_std_termios; 1888 - driver->flags = TTY_DRIVER_RESET_TERMIOS; 1889 1802 tty_set_operations(driver, &tty3270_ops); 1890 1803 ret = tty_register_driver(driver); 1891 1804 if (ret) { ··· 1895 1804 return ret; 1896 1805 } 1897 1806 tty3270_driver = driver; 1807 + raw3270_register_notifier(&tty3270_notifier); 1898 1808 return 0; 1899 1809 } 1900 1810 ··· 1904 1812 { 1905 1813 struct tty_driver *driver; 1906 1814 1815 + raw3270_unregister_notifier(&tty3270_notifier); 1907 1816 driver = tty3270_driver; 1908 1817 tty3270_driver = NULL; 1909 1818 tty_unregister_driver(driver);
+60 -4
drivers/s390/char/zcore.c
··· 62 62 static struct dentry *zcore_file; 63 63 static struct dentry *zcore_memmap_file; 64 64 static struct dentry *zcore_reipl_file; 65 + static struct dentry *zcore_hsa_file; 65 66 static struct ipl_parameter_block *ipl_block; 66 67 67 68 /* ··· 78 77 int offs, blk_num; 79 78 static char buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); 80 79 80 + if (!hsa_available) 81 + return -ENODATA; 81 82 if (count == 0) 82 83 return 0; 83 84 ··· 281 278 } 282 279 283 280 /* 281 + * Release the HSA 282 + */ 283 + static void release_hsa(void) 284 + { 285 + diag308(DIAG308_REL_HSA, NULL); 286 + hsa_available = 0; 287 + } 288 + 289 + /* 284 290 * Read routine for zcore character device 285 291 * First 4K are dump header 286 292 * Next 32MB are HSA Memory ··· 375 363 376 364 static int zcore_release(struct inode *inode, struct file *filep) 377 365 { 378 - diag308(DIAG308_REL_HSA, NULL); 379 - hsa_available = 0; 366 + if (hsa_available) 367 + release_hsa(); 380 368 return 0; 381 369 } 382 370 ··· 483 471 .write = zcore_reipl_write, 484 472 .open = zcore_reipl_open, 485 473 .release = zcore_reipl_release, 474 + .llseek = no_llseek, 475 + }; 476 + 477 + static ssize_t zcore_hsa_read(struct file *filp, char __user *buf, 478 + size_t count, loff_t *ppos) 479 + { 480 + static char str[18]; 481 + 482 + if (hsa_available) 483 + snprintf(str, sizeof(str), "%lx\n", ZFCPDUMP_HSA_SIZE); 484 + else 485 + snprintf(str, sizeof(str), "0\n"); 486 + return simple_read_from_buffer(buf, count, ppos, str, strlen(str)); 487 + } 488 + 489 + static ssize_t zcore_hsa_write(struct file *filp, const char __user *buf, 490 + size_t count, loff_t *ppos) 491 + { 492 + char value; 493 + 494 + if (*ppos != 0) 495 + return -EPIPE; 496 + if (copy_from_user(&value, buf, 1)) 497 + return -EFAULT; 498 + if (value != '0') 499 + return -EINVAL; 500 + release_hsa(); 501 + return count; 502 + } 503 + 504 + static const struct file_operations zcore_hsa_fops = { 505 + .owner = THIS_MODULE, 506 + .write = zcore_hsa_write, 507 + .read = zcore_hsa_read, 508 + .open = nonseekable_open, 486 509 .llseek = no_llseek, 487 510 }; 488 511 ··· 637 590 hdr->rmem_size = memory; 638 591 hdr->mem_end = sys_info.mem_size; 639 592 hdr->num_pages = memory / PAGE_SIZE; 640 - hdr->tod = get_clock(); 593 + hdr->tod = get_tod_clock(); 641 594 get_cpu_id(&hdr->cpu_id); 642 595 for (i = 0; zfcpdump_save_areas[i]; i++) { 643 596 prefix = zfcpdump_save_areas[i]->pref_reg; ··· 705 658 rc = check_sdias(); 706 659 if (rc) 707 660 goto fail; 661 + hsa_available = 1; 708 662 709 663 rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1); 710 664 if (rc) ··· 762 714 rc = -ENOMEM; 763 715 goto fail_memmap_file; 764 716 } 765 - hsa_available = 1; 717 + zcore_hsa_file = debugfs_create_file("hsa", S_IRUSR|S_IWUSR, zcore_dir, 718 + NULL, &zcore_hsa_fops); 719 + if (!zcore_hsa_file) { 720 + rc = -ENOMEM; 721 + goto fail_reipl_file; 722 + } 766 723 return 0; 767 724 725 + fail_reipl_file: 726 + debugfs_remove(zcore_reipl_file); 768 727 fail_memmap_file: 769 728 debugfs_remove(zcore_memmap_file); 770 729 fail_file: ··· 788 733 debug_unregister(zcore_dbf); 789 734 sclp_sdias_exit(); 790 735 free_page((unsigned long) ipl_block); 736 + debugfs_remove(zcore_hsa_file); 791 737 debugfs_remove(zcore_reipl_file); 792 738 debugfs_remove(zcore_memmap_file); 793 739 debugfs_remove(zcore_file);
+32 -36
drivers/s390/cio/chsc.c
··· 435 435 436 436 static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) 437 437 { 438 - #ifdef CONFIG_PCI 439 438 switch (sei_area->cc) { 440 439 case 1: 441 440 zpci_event_error(sei_area->ccdf); ··· 443 444 zpci_event_availability(sei_area->ccdf); 444 445 break; 445 446 default: 446 - CIO_CRW_EVENT(2, "chsc: unhandled sei content code %d\n", 447 + CIO_CRW_EVENT(2, "chsc: sei nt2 unhandled cc=%d\n", 447 448 sei_area->cc); 448 449 break; 449 450 } 450 - #endif 451 451 } 452 452 453 453 static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area) ··· 469 471 chsc_process_sei_scm_change(sei_area); 470 472 break; 471 473 default: /* other stuff */ 472 - CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", 474 + CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n", 473 475 sei_area->cc); 474 476 break; 475 477 } 478 + 479 + /* Check if we might have lost some information. */ 480 + if (sei_area->flags & 0x40) { 481 + CIO_CRW_EVENT(2, "chsc: event overflow\n"); 482 + css_schedule_eval_all(); 483 + } 476 484 } 477 485 478 - static int __chsc_process_crw(struct chsc_sei *sei, u64 ntsm) 486 + static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm) 479 487 { 480 488 do { 481 489 memset(sei, 0, sizeof(*sei)); ··· 492 488 if (chsc(sei)) 493 489 break; 494 490 495 - if (sei->response.code == 0x0001) { 496 - CIO_CRW_EVENT(2, "chsc: sei successful\n"); 497 - 498 - /* Check if we might have lost some information. */ 499 - if (sei->u.nt0_area.flags & 0x40) { 500 - CIO_CRW_EVENT(2, "chsc: event overflow\n"); 501 - css_schedule_eval_all(); 502 - } 503 - 504 - switch (sei->nt) { 505 - case 0: 506 - chsc_process_sei_nt0(&sei->u.nt0_area); 507 - break; 508 - case 2: 509 - chsc_process_sei_nt2(&sei->u.nt2_area); 510 - break; 511 - default: 512 - CIO_CRW_EVENT(2, "chsc: unhandled nt=%d\n", 513 - sei->nt); 514 - break; 515 - } 516 - } else { 491 + if (sei->response.code != 0x0001) { 517 492 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", 518 493 sei->response.code); 519 494 break; 520 495 } 521 - } while (sei->u.nt0_area.flags & 0x80); 522 496 523 - return 0; 497 + CIO_CRW_EVENT(2, "chsc: sei successful (nt=%d)\n", sei->nt); 498 + switch (sei->nt) { 499 + case 0: 500 + chsc_process_sei_nt0(&sei->u.nt0_area); 501 + break; 502 + case 2: 503 + chsc_process_sei_nt2(&sei->u.nt2_area); 504 + break; 505 + default: 506 + CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt); 507 + break; 508 + } 509 + } while (sei->u.nt0_area.flags & 0x80); 524 510 } 525 511 512 + /* 513 + * Handle channel subsystem related CRWs. 514 + * Use store event information to find out what's going on. 515 + * 516 + * Note: Access to sei_page is serialized through machine check handler 517 + * thread, so no need for locking. 518 + */ 526 519 static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) 527 520 { 528 - struct chsc_sei *sei; 521 + struct chsc_sei *sei = sei_page; 529 522 530 523 if (overflow) { 531 524 css_schedule_eval_all(); ··· 532 531 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 533 532 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, 534 533 crw0->erc, crw0->rsid); 535 - if (!sei_page) 536 - return; 537 - /* Access to sei_page is serialized through machine check handler 538 - * thread, so no need for locking. */ 539 - sei = sei_page; 540 534 541 535 CIO_TRACE_EVENT(2, "prcss"); 542 - __chsc_process_crw(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2); 536 + chsc_process_event_information(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2); 543 537 } 544 538 545 539 void chsc_chp_online(struct chp_id chpid)
+1 -1
drivers/s390/cio/chsc.h
··· 157 157 #ifdef CONFIG_SCM_BUS 158 158 int scm_update_information(void); 159 159 #else /* CONFIG_SCM_BUS */ 160 - #define scm_update_information() 0 160 + static inline int scm_update_information(void) { return 0; } 161 161 #endif /* CONFIG_SCM_BUS */ 162 162 163 163
+2 -2
drivers/s390/cio/cio.c
··· 962 962 atomic_inc(&chpid_reset_count); 963 963 } 964 964 /* Wait for machine check for all channel paths. */ 965 - timeout = get_clock() + (RCHP_TIMEOUT << 12); 965 + timeout = get_tod_clock() + (RCHP_TIMEOUT << 12); 966 966 while (atomic_read(&chpid_reset_count) != 0) { 967 - if (get_clock() > timeout) 967 + if (get_tod_clock() > timeout) 968 968 break; 969 969 cpu_relax(); 970 970 }
+3 -3
drivers/s390/cio/cmf.c
··· 33 33 #include <linux/module.h> 34 34 #include <linux/moduleparam.h> 35 35 #include <linux/slab.h> 36 - #include <linux/timex.h> /* get_clock() */ 36 + #include <linux/timex.h> /* get_tod_clock() */ 37 37 38 38 #include <asm/ccwdev.h> 39 39 #include <asm/cio.h> ··· 326 326 memcpy(cmb_data->last_block, hw_block, cmb_data->size); 327 327 memcpy(reference_buf, hw_block, cmb_data->size); 328 328 } while (memcmp(cmb_data->last_block, reference_buf, cmb_data->size)); 329 - cmb_data->last_update = get_clock(); 329 + cmb_data->last_update = get_tod_clock(); 330 330 kfree(reference_buf); 331 331 return 0; 332 332 } ··· 428 428 memset(cmbops->align(cmb_data->hw_block), 0, cmb_data->size); 429 429 cmb_data->last_update = 0; 430 430 } 431 - cdev->private->cmb_start_time = get_clock(); 431 + cdev->private->cmb_start_time = get_tod_clock(); 432 432 spin_unlock_irq(cdev->ccwlock); 433 433 } 434 434
+1 -1
drivers/s390/cio/css.c
··· 780 780 css->cssid = nr; 781 781 dev_set_name(&css->device, "css%x", nr); 782 782 css->device.release = channel_subsystem_release; 783 - tod_high = (u32) (get_clock() >> 32); 783 + tod_high = (u32) (get_tod_clock() >> 32); 784 784 css_generate_pgid(css, tod_high); 785 785 return 0; 786 786 }
+10
drivers/s390/cio/device.c
··· 632 632 return count; 633 633 } 634 634 635 + static ssize_t vpm_show(struct device *dev, struct device_attribute *attr, 636 + char *buf) 637 + { 638 + struct subchannel *sch = to_subchannel(dev); 639 + 640 + return sprintf(buf, "%02x\n", sch->vpm); 641 + } 642 + 635 643 static DEVICE_ATTR(chpids, 0444, chpids_show, NULL); 636 644 static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL); 637 645 static DEVICE_ATTR(devtype, 0444, devtype_show, NULL); ··· 648 640 static DEVICE_ATTR(online, 0644, online_show, online_store); 649 641 static DEVICE_ATTR(availability, 0444, available_show, NULL); 650 642 static DEVICE_ATTR(logging, 0200, NULL, initiate_logging); 643 + static DEVICE_ATTR(vpm, 0444, vpm_show, NULL); 651 644 652 645 static struct attribute *io_subchannel_attrs[] = { 653 646 &dev_attr_chpids.attr, 654 647 &dev_attr_pimpampom.attr, 655 648 &dev_attr_logging.attr, 649 + &dev_attr_vpm.attr, 656 650 NULL, 657 651 }; 658 652
+1 -1
drivers/s390/cio/device_fsm.c
··· 47 47 cc = stsch_err(sch->schid, &schib); 48 48 49 49 printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " 50 - "device information:\n", get_clock()); 50 + "device information:\n", get_tod_clock()); 51 51 printk(KERN_WARNING "cio: orb:\n"); 52 52 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 53 53 orb, sizeof(*orb), 0);
+105 -18
drivers/s390/cio/device_pgid.c
··· 23 23 #define PGID_RETRIES 256 24 24 #define PGID_TIMEOUT (10 * HZ) 25 25 26 + static void verify_start(struct ccw_device *cdev); 27 + 26 28 /* 27 29 * Process path verification data and report result. 28 30 */ ··· 72 70 struct subchannel *sch = to_subchannel(cdev->dev.parent); 73 71 struct ccw_request *req = &cdev->private->req; 74 72 75 - /* Adjust lpm. */ 76 - req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm); 73 + req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm & 74 + ~cdev->private->path_noirq_mask); 77 75 if (!req->lpm) 78 76 goto out_nopath; 79 77 nop_build_cp(cdev); ··· 104 102 struct subchannel *sch = to_subchannel(cdev->dev.parent); 105 103 struct ccw_request *req = &cdev->private->req; 106 104 107 - if (rc == 0) 105 + switch (rc) { 106 + case 0: 108 107 sch->vpm |= req->lpm; 109 - else if (rc != -EACCES) 108 + break; 109 + case -ETIME: 110 + cdev->private->path_noirq_mask |= req->lpm; 111 + break; 112 + case -EACCES: 113 + cdev->private->path_notoper_mask |= req->lpm; 114 + break; 115 + default: 110 116 goto err; 117 + } 118 + /* Continue on the next path. */ 111 119 req->lpm >>= 1; 112 120 nop_do(cdev); 113 121 return; ··· 142 130 cp->count = sizeof(*pgid); 143 131 cp->flags = CCW_FLAG_SLI; 144 132 req->cp = cp; 133 + } 134 + 135 + static void pgid_wipeout_callback(struct ccw_device *cdev, void *data, int rc) 136 + { 137 + if (rc) { 138 + /* We don't know the path groups' state. Abort. */ 139 + verify_done(cdev, rc); 140 + return; 141 + } 142 + /* 143 + * Path groups have been reset. Restart path verification but 144 + * leave paths in path_noirq_mask out. 145 + */ 146 + cdev->private->flags.pgid_unknown = 0; 147 + verify_start(cdev); 148 + } 149 + 150 + /* 151 + * Reset pathgroups and restart path verification, leave unusable paths out. 152 + */ 153 + static void pgid_wipeout_start(struct ccw_device *cdev) 154 + { 155 + struct subchannel *sch = to_subchannel(cdev->dev.parent); 156 + struct ccw_dev_id *id = &cdev->private->dev_id; 157 + struct ccw_request *req = &cdev->private->req; 158 + u8 fn; 159 + 160 + CIO_MSG_EVENT(2, "wipe: device 0.%x.%04x: pvm=%02x nim=%02x\n", 161 + id->ssid, id->devno, cdev->private->pgid_valid_mask, 162 + cdev->private->path_noirq_mask); 163 + 164 + /* Initialize request data. */ 165 + memset(req, 0, sizeof(*req)); 166 + req->timeout = PGID_TIMEOUT; 167 + req->maxretries = PGID_RETRIES; 168 + req->lpm = sch->schib.pmcw.pam; 169 + req->callback = pgid_wipeout_callback; 170 + fn = SPID_FUNC_DISBAND; 171 + if (cdev->private->flags.mpath) 172 + fn |= SPID_FUNC_MULTI_PATH; 173 + spid_build_cp(cdev, fn); 174 + ccw_request_start(cdev); 145 175 } 146 176 147 177 /* ··· 211 157 return; 212 158 213 159 out_nopath: 160 + if (cdev->private->flags.pgid_unknown) { 161 + /* At least one SPID could be partially done. */ 162 + pgid_wipeout_start(cdev); 163 + return; 164 + } 214 165 verify_done(cdev, sch->vpm ? 0 : -EACCES); 215 166 } 216 - 217 - static void verify_start(struct ccw_device *cdev); 218 167 219 168 /* 220 169 * Process SET PGID request result for a single path. ··· 231 174 case 0: 232 175 sch->vpm |= req->lpm & sch->opm; 233 176 break; 177 + case -ETIME: 178 + cdev->private->flags.pgid_unknown = 1; 179 + cdev->private->path_noirq_mask |= req->lpm; 180 + break; 234 181 case -EACCES: 182 + cdev->private->path_notoper_mask |= req->lpm; 235 183 break; 236 184 case -EOPNOTSUPP: 237 185 if (cdev->private->flags.mpath) { ··· 392 330 else { 393 331 donepm = pgid_to_donepm(cdev); 394 332 sch->vpm = donepm & sch->opm; 395 - cdev->private->pgid_todo_mask &= ~donepm; 396 333 cdev->private->pgid_reset_mask |= reset; 334 + cdev->private->pgid_todo_mask &= 335 + ~(donepm | cdev->private->path_noirq_mask); 397 336 pgid_fill(cdev, pgid); 398 337 } 399 338 out: ··· 404 341 cdev->private->pgid_todo_mask, mismatch, reserved, reset); 405 342 switch (rc) { 406 343 case 0: 344 + if (cdev->private->flags.pgid_unknown) { 345 + pgid_wipeout_start(cdev); 346 + return; 347 + } 407 348 /* Anything left to do? */ 408 349 if (cdev->private->pgid_todo_mask == 0) { 409 350 verify_done(cdev, sch->vpm == 0 ? -EACCES : 0); ··· 451 384 { 452 385 struct subchannel *sch = to_subchannel(cdev->dev.parent); 453 386 struct ccw_request *req = &cdev->private->req; 387 + int ret; 454 388 455 - /* Adjust lpm if paths are not set in pam. */ 456 - req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam); 389 + req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & 390 + ~cdev->private->path_noirq_mask); 457 391 if (!req->lpm) 458 392 goto out_nopath; 459 393 snid_build_cp(cdev); ··· 462 394 return; 463 395 464 396 out_nopath: 465 - snid_done(cdev, cdev->private->pgid_valid_mask ? 0 : -EACCES); 397 + if (cdev->private->pgid_valid_mask) 398 + ret = 0; 399 + else if (cdev->private->path_noirq_mask) 400 + ret = -ETIME; 401 + else 402 + ret = -EACCES; 403 + snid_done(cdev, ret); 466 404 } 467 405 468 406 /* ··· 478 404 { 479 405 struct ccw_request *req = &cdev->private->req; 480 406 481 - if (rc == 0) 407 + switch (rc) { 408 + case 0: 482 409 cdev->private->pgid_valid_mask |= req->lpm; 483 - else if (rc != -EACCES) 410 + break; 411 + case -ETIME: 412 + cdev->private->flags.pgid_unknown = 1; 413 + cdev->private->path_noirq_mask |= req->lpm; 414 + break; 415 + case -EACCES: 416 + cdev->private->path_notoper_mask |= req->lpm; 417 + break; 418 + default: 484 419 goto err; 420 + } 421 + /* Continue on the next path. */ 485 422 req->lpm >>= 1; 486 423 snid_do(cdev); 487 424 return; ··· 512 427 513 428 sch->vpm = 0; 514 429 sch->lpm = sch->schib.pmcw.pam; 430 + 431 + /* Initialize PGID data. */ 432 + memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid)); 433 + cdev->private->pgid_valid_mask = 0; 434 + cdev->private->pgid_todo_mask = sch->schib.pmcw.pam; 435 + cdev->private->path_notoper_mask = 0; 436 + 515 437 /* Initialize request data. */ 516 438 memset(req, 0, sizeof(*req)); 517 439 req->timeout = PGID_TIMEOUT; ··· 551 459 */ 552 460 void ccw_device_verify_start(struct ccw_device *cdev) 553 461 { 554 - struct subchannel *sch = to_subchannel(cdev->dev.parent); 555 - 556 462 CIO_TRACE_EVENT(4, "vrfy"); 557 463 CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id)); 558 - /* Initialize PGID data. */ 559 - memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid)); 560 - cdev->private->pgid_valid_mask = 0; 561 - cdev->private->pgid_todo_mask = sch->schib.pmcw.pam; 562 464 /* 563 465 * Initialize pathgroup and multipath state with target values. 564 466 * They may change in the course of path verification. ··· 560 474 cdev->private->flags.pgroup = cdev->private->options.pgroup; 561 475 cdev->private->flags.mpath = cdev->private->options.mpath; 562 476 cdev->private->flags.doverify = 0; 477 + cdev->private->path_noirq_mask = 0; 563 478 verify_start(cdev); 564 479 } 565 480
+5
drivers/s390/cio/io_sch.h
··· 126 126 u8 pgid_valid_mask; /* mask of valid PGIDs */ 127 127 u8 pgid_todo_mask; /* mask of PGIDs to be adjusted */ 128 128 u8 pgid_reset_mask; /* mask of PGIDs which were reset */ 129 + u8 path_noirq_mask; /* mask of paths for which no irq was 130 + received */ 131 + u8 path_notoper_mask; /* mask of paths which were found 132 + not operable */ 129 133 u8 path_gone_mask; /* mask of paths, that became unavailable */ 130 134 u8 path_new_mask; /* mask of paths, that became available */ 131 135 struct { ··· 149 145 unsigned int resuming:1; /* recognition while resume */ 150 146 unsigned int pgroup:1; /* pathgroup is set up */ 151 147 unsigned int mpath:1; /* multipathing is set up */ 148 + unsigned int pgid_unknown:1;/* unknown pgid state */ 152 149 unsigned int initialized:1; /* set if initial reference held */ 153 150 } __attribute__((packed)) flags; 154 151 unsigned long intparm; /* user interruption parameter */
+6 -6
drivers/s390/cio/qdio_main.c
··· 338 338 retries++; 339 339 340 340 if (!start_time) { 341 - start_time = get_clock(); 341 + start_time = get_tod_clock(); 342 342 goto again; 343 343 } 344 - if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE) 344 + if ((get_tod_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE) 345 345 goto again; 346 346 } 347 347 if (retries) { ··· 504 504 int count, stop; 505 505 unsigned char state = 0; 506 506 507 - q->timestamp = get_clock(); 507 + q->timestamp = get_tod_clock(); 508 508 509 509 /* 510 510 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved ··· 563 563 if (bufnr != q->last_move) { 564 564 q->last_move = bufnr; 565 565 if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR) 566 - q->u.in.timestamp = get_clock(); 566 + q->u.in.timestamp = get_tod_clock(); 567 567 return 1; 568 568 } else 569 569 return 0; ··· 595 595 * At this point we know, that inbound first_to_check 596 596 * has (probably) not moved (see qdio_inbound_processing). 597 597 */ 598 - if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { 598 + if (get_tod_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { 599 599 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", 600 600 q->first_to_check); 601 601 return 1; ··· 772 772 int count, stop; 773 773 unsigned char state = 0; 774 774 775 - q->timestamp = get_clock(); 775 + q->timestamp = get_tod_clock(); 776 776 777 777 if (need_siga_sync(q)) 778 778 if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
+1 -1
drivers/s390/net/qeth_core.h
··· 818 818 819 819 static inline int qeth_get_micros(void) 820 820 { 821 - return (int) (get_clock() >> 12); 821 + return (int) (get_tod_clock() >> 12); 822 822 } 823 823 824 824 static inline int qeth_get_ip_version(struct sk_buff *skb)
+1 -1
drivers/s390/scsi/zfcp_fsf.c
··· 727 727 zfcp_reqlist_add(adapter->req_list, req); 728 728 729 729 req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free); 730 - req->issued = get_clock(); 730 + req->issued = get_tod_clock(); 731 731 if (zfcp_qdio_send(qdio, &req->qdio_req)) { 732 732 del_timer(&req->timer); 733 733 /* lookup request again, list might have changed */
+1 -1
drivers/s390/scsi/zfcp_qdio.c
··· 68 68 unsigned long long now, span; 69 69 int used; 70 70 71 - now = get_clock_monotonic(); 71 + now = get_tod_clock_monotonic(); 72 72 span = (now - qdio->req_q_time) >> 12; 73 73 used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free); 74 74 qdio->req_q_util += used * span;
+1
drivers/tty/tty_io.c
··· 2199 2199 mutex_unlock(&tty->termios_mutex); 2200 2200 return 0; 2201 2201 } 2202 + EXPORT_SYMBOL(tty_do_resize); 2202 2203 2203 2204 /** 2204 2205 * tiocswinsz - implement window size set ioctl
-1
drivers/uio/Kconfig
··· 1 1 menuconfig UIO 2 2 tristate "Userspace I/O drivers" 3 - depends on !S390 4 3 help 5 4 Enable this to allow the userspace driver core code to be 6 5 built. This code allows userspace programs easy access to
+17 -3
include/asm-generic/io.h
··· 53 53 #endif 54 54 55 55 #define readb __raw_readb 56 - #define readw(addr) __le16_to_cpu(__raw_readw(addr)) 57 - #define readl(addr) __le32_to_cpu(__raw_readl(addr)) 56 + 57 + #define readw readw 58 + static inline u16 readw(const volatile void __iomem *addr) 59 + { 60 + return __le16_to_cpu(__raw_readw(addr)); 61 + } 62 + 63 + #define readl readl 64 + static inline u32 readl(const volatile void __iomem *addr) 65 + { 66 + return __le32_to_cpu(__raw_readl(addr)); 67 + } 58 68 59 69 #ifndef __raw_writeb 60 70 static inline void __raw_writeb(u8 b, volatile void __iomem *addr) ··· 99 89 } 100 90 #endif 101 91 102 - #define readq(addr) __le64_to_cpu(__raw_readq(addr)) 92 + #define readq readq 93 + static inline u64 readq(const volatile void __iomem *addr) 94 + { 95 + return __le64_to_cpu(__raw_readq(addr)); 96 + } 103 97 104 98 #ifndef __raw_writeq 105 99 static inline void __raw_writeq(u64 b, volatile void __iomem *addr)
-10
include/asm-generic/pgtable.h
··· 197 197 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 198 198 #endif 199 199 200 - #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY 201 - #define page_test_and_clear_dirty(pfn, mapped) (0) 202 - #endif 203 - 204 - #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY 205 - #define pte_maybe_dirty(pte) pte_dirty(pte) 206 - #else 207 - #define pte_maybe_dirty(pte) (1) 208 - #endif 209 - 210 200 #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG 211 201 #define page_test_and_clear_young(pfn) (0) 212 202 #endif
-8
include/linux/page-flags.h
··· 303 303 304 304 static inline void SetPageUptodate(struct page *page) 305 305 { 306 - #ifdef CONFIG_S390 307 - if (!test_and_set_bit(PG_uptodate, &page->flags)) 308 - page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY, 0); 309 - #else 310 306 /* 311 307 * Memory barrier must be issued before setting the PG_uptodate bit, 312 308 * so that all previous stores issued in order to bring the page 313 309 * uptodate are actually visible before PageUptodate becomes true. 314 - * 315 - * s390 doesn't need an explicit smp_wmb here because the test and 316 - * set bit already provides full barriers. 317 310 */ 318 311 smp_wmb(); 319 312 set_bit(PG_uptodate, &(page)->flags); 320 - #endif 321 313 } 322 314 323 315 CLEARPAGEFLAG(Uptodate, uptodate)
-24
mm/rmap.c
··· 1126 1126 */ 1127 1127 void page_remove_rmap(struct page *page) 1128 1128 { 1129 - struct address_space *mapping = page_mapping(page); 1130 1129 bool anon = PageAnon(page); 1131 1130 bool locked; 1132 1131 unsigned long flags; ··· 1142 1143 if (!atomic_add_negative(-1, &page->_mapcount)) 1143 1144 goto out; 1144 1145 1145 - /* 1146 - * Now that the last pte has gone, s390 must transfer dirty 1147 - * flag from storage key to struct page. We can usually skip 1148 - * this if the page is anon, so about to be freed; but perhaps 1149 - * not if it's in swapcache - there might be another pte slot 1150 - * containing the swap entry, but page not yet written to swap. 1151 - * 1152 - * And we can skip it on file pages, so long as the filesystem 1153 - * participates in dirty tracking (note that this is not only an 1154 - * optimization but also solves problems caused by dirty flag in 1155 - * storage key getting set by a write from inside kernel); but need to 1156 - * catch shm and tmpfs and ramfs pages which have been modified since 1157 - * creation by read fault. 1158 - * 1159 - * Note that mapping must be decided above, before decrementing 1160 - * mapcount (which luckily provides a barrier): once page is unmapped, 1161 - * it could be truncated and page->mapping reset to NULL at any moment. 1162 - * Note also that we are relying on page_mapping(page) to set mapping 1163 - * to &swapper_space when PageSwapCache(page). 1164 - */ 1165 - if (mapping && !mapping_cap_account_dirty(mapping) && 1166 - page_test_and_clear_dirty(page_to_pfn(page), 1)) 1167 - set_page_dirty(page); 1168 1146 /* 1169 1147 * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED 1170 1148 * and not charged by memcg for now.
+4 -1
net/iucv/iucv.c
··· 831 831 { 832 832 int i; 833 833 834 + if (cpumask_empty(&iucv_irq_cpumask)) 835 + return NOTIFY_DONE; 836 + 834 837 get_online_cpus(); 835 - on_each_cpu(iucv_block_cpu, NULL, 1); 838 + on_each_cpu_mask(&iucv_irq_cpumask, iucv_block_cpu, NULL, 1); 836 839 preempt_disable(); 837 840 for (i = 0; i < iucv_max_pathid; i++) { 838 841 if (iucv_path_table[i])