Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6

* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6:
[S390] dcss: Initialize workqueue before using it.
[S390] Remove BUILD_BUG_ON() in vmem code.
[S390] sclp_tty/sclp_vt220: Fix scheduling while atomic
[S390] dasd: fix panic caused by alias device offline
[S390] dasd: add ifcc handling
[S390] latencytop s390 support.
[S390] Implement ext2_find_next_bit.
[S390] Cleanup & optimize bitops.
[S390] Define GENERIC_LOCKBREAK.
[S390] console: allow vt220 console to be the only console
[S390] Fix couple of section mismatches.
[S390] Fix smp_call_function_mask semantics.
[S390] Fix linker script.
[S390] DEBUG_PAGEALLOC support for s390.
[S390] cio: Add shutdown callback for ccwgroup.
[S390] cio: Update documentation.
[S390] cio: Clean up chsc response code handling.
[S390] cio: make sense id procedure work with partial hardware response

+595 -528
+16 -5
Documentation/DocBook/s390-drivers.tmpl
··· 59 59 <title>Introduction</title> 60 60 <para> 61 61 This document describes the interfaces available for device drivers that 62 - drive s390 based channel attached devices. This includes interfaces for 62 + drive s390 based channel attached I/O devices. This includes interfaces for 63 63 interaction with the hardware and interfaces for interacting with the 64 64 common driver core. Those interfaces are provided by the s390 common I/O 65 65 layer. ··· 86 86 The ccw bus typically contains the majority of devices available to 87 87 a s390 system. Named after the channel command word (ccw), the basic 88 88 command structure used to address its devices, the ccw bus contains 89 - so-called channel attached devices. They are addressed via subchannels, 90 - visible on the css bus. A device driver, however, will never interact 91 - with the subchannel directly, but only via the device on the ccw bus, 89 + so-called channel attached devices. They are addressed via I/O 90 + subchannels, visible on the css bus. A device driver for 91 + channel-attached devices, however, will never interact with the 92 + subchannel directly, but only via the I/O device on the ccw bus, 92 93 the ccw device. 93 94 </para> 94 95 <sect1 id="channelIO"> ··· 117 116 !Iinclude/asm-s390/ccwdev.h 118 117 !Edrivers/s390/cio/device.c 119 118 !Edrivers/s390/cio/device_ops.c 120 - !Edrivers/s390/cio/airq.c 121 119 </sect1> 122 120 <sect1 id="cmf"> 123 121 <title>The channel-measurement facility</title> ··· 145 145 !Iinclude/asm-s390/ccwgroup.h 146 146 !Edrivers/s390/cio/ccwgroup.c 147 147 </sect1> 148 + </chapter> 149 + 150 + <chapter id="genericinterfaces"> 151 + <title>Generic interfaces</title> 152 + <para> 153 + Some interfaces are available to other drivers that do not necessarily 154 + have anything to do with the busses described above, but still are 155 + indirectly using basic infrastructure in the common I/O layer. 156 + One example is the support for adapter interrupts. 157 + </para> 158 + !Edrivers/s390/cio/airq.c 148 159 </chapter> 149 160 150 161 </book>
+8
arch/s390/Kconfig
··· 16 16 config STACKTRACE_SUPPORT 17 17 def_bool y 18 18 19 + config HAVE_LATENCYTOP_SUPPORT 20 + def_bool y 21 + 19 22 config RWSEM_GENERIC_SPINLOCK 20 23 bool 21 24 ··· 49 46 50 47 config NO_DMA 51 48 def_bool y 49 + 50 + config GENERIC_LOCKBREAK 51 + bool 52 + default y 53 + depends on SMP && PREEMPT 52 54 53 55 mainmenu "Linux Kernel Configuration" 54 56
+8
arch/s390/Kconfig.debug
··· 6 6 7 7 source "lib/Kconfig.debug" 8 8 9 + config DEBUG_PAGEALLOC 10 + bool "Debug page memory allocations" 11 + depends on DEBUG_KERNEL 12 + help 13 + Unmap pages from the kernel linear mapping after free_pages(). 14 + This results in a slowdown, but helps to find certain types of 15 + memory corruptions. 16 + 9 17 endmenu
+2 -5
arch/s390/kernel/entry.S
··· 11 11 12 12 #include <linux/sys.h> 13 13 #include <linux/linkage.h> 14 + #include <linux/init.h> 14 15 #include <asm/cache.h> 15 16 #include <asm/lowcore.h> 16 17 #include <asm/errno.h> ··· 831 830 * Restart interruption handler, kick starter for additional CPUs 832 831 */ 833 832 #ifdef CONFIG_SMP 834 - #ifndef CONFIG_HOTPLUG_CPU 835 - .section .init.text,"ax" 836 - #endif 833 + __CPUINIT 837 834 .globl restart_int_handler 838 835 restart_int_handler: 839 836 l %r15,__LC_SAVE_AREA+60 # load ksp ··· 844 845 br %r14 # branch to start_secondary 845 846 restart_addr: 846 847 .long start_secondary 847 - #ifndef CONFIG_HOTPLUG_CPU 848 848 .previous 849 - #endif 850 849 #else 851 850 /* 852 851 * If we do not run with SMP enabled, let the new CPU crash ...
+2 -5
arch/s390/kernel/entry64.S
··· 11 11 12 12 #include <linux/sys.h> 13 13 #include <linux/linkage.h> 14 + #include <linux/init.h> 14 15 #include <asm/cache.h> 15 16 #include <asm/lowcore.h> 16 17 #include <asm/errno.h> ··· 802 801 * Restart interruption handler, kick starter for additional CPUs 803 802 */ 804 803 #ifdef CONFIG_SMP 805 - #ifndef CONFIG_HOTPLUG_CPU 806 - .section .init.text,"ax" 807 - #endif 804 + __CPUINIT 808 805 .globl restart_int_handler 809 806 restart_int_handler: 810 807 lg %r15,__LC_SAVE_AREA+120 # load ksp ··· 813 814 lmg %r6,%r15,__SF_GPRS(%r15) # load registers from clone 814 815 stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on 815 816 jg start_secondary 816 - #ifndef CONFIG_HOTPLUG_CPU 817 817 .previous 818 - #endif 819 818 #else 820 819 /* 821 820 * If we do not run with SMP enabled, let the new CPU crash ...
+18 -9
arch/s390/kernel/ipl.c
··· 439 439 reipl_ccw_dev(&ipl_info.data.ccw.dev_id); 440 440 } 441 441 442 - static int ipl_init(void) 442 + static int __init ipl_init(void) 443 443 { 444 444 int rc; 445 445 ··· 471 471 return 0; 472 472 } 473 473 474 - static struct shutdown_action ipl_action = {SHUTDOWN_ACTION_IPL_STR, ipl_run, 475 - ipl_init}; 474 + static struct shutdown_action __refdata ipl_action = { 475 + .name = SHUTDOWN_ACTION_IPL_STR, 476 + .fn = ipl_run, 477 + .init = ipl_init, 478 + }; 476 479 477 480 /* 478 481 * reipl shutdown action: Reboot Linux on shutdown. ··· 795 792 return 0; 796 793 } 797 794 798 - static int reipl_init(void) 795 + static int __init reipl_init(void) 799 796 { 800 797 int rc; 801 798 ··· 822 819 return 0; 823 820 } 824 821 825 - static struct shutdown_action reipl_action = {SHUTDOWN_ACTION_REIPL_STR, 826 - reipl_run, reipl_init}; 822 + static struct shutdown_action __refdata reipl_action = { 823 + .name = SHUTDOWN_ACTION_REIPL_STR, 824 + .fn = reipl_run, 825 + .init = reipl_init, 826 + }; 827 827 828 828 /* 829 829 * dump shutdown action: Dump Linux on shutdown. ··· 1004 998 return 0; 1005 999 } 1006 1000 1007 - static int dump_init(void) 1001 + static int __init dump_init(void) 1008 1002 { 1009 1003 int rc; 1010 1004 ··· 1026 1020 return 0; 1027 1021 } 1028 1022 1029 - static struct shutdown_action dump_action = {SHUTDOWN_ACTION_DUMP_STR, 1030 - dump_run, dump_init}; 1023 + static struct shutdown_action __refdata dump_action = { 1024 + .name = SHUTDOWN_ACTION_DUMP_STR, 1025 + .fn = dump_run, 1026 + .init = dump_init, 1027 + }; 1031 1028 1032 1029 /* 1033 1030 * vmcmd shutdown action: Trigger vm command on shutdown.
+7 -7
arch/s390/kernel/setup.c
··· 77 77 unsigned long elf_hwcap = 0; 78 78 char elf_platform[ELF_PLATFORM_SIZE]; 79 79 80 - struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS]; 80 + struct mem_chunk __meminitdata memory_chunk[MEMORY_CHUNKS]; 81 81 volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ 82 82 static unsigned long __initdata memory_end; 83 83 ··· 145 145 146 146 static int __init conmode_setup(char *str) 147 147 { 148 - #if defined(CONFIG_SCLP_CONSOLE) 148 + #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) 149 149 if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0) 150 150 SET_CONSOLE_SCLP; 151 151 #endif ··· 183 183 */ 184 184 cpcmd("TERM CONMODE 3215", NULL, 0, NULL); 185 185 if (ptr == NULL) { 186 - #if defined(CONFIG_SCLP_CONSOLE) 186 + #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) 187 187 SET_CONSOLE_SCLP; 188 188 #endif 189 189 return; ··· 193 193 SET_CONSOLE_3270; 194 194 #elif defined(CONFIG_TN3215_CONSOLE) 195 195 SET_CONSOLE_3215; 196 - #elif defined(CONFIG_SCLP_CONSOLE) 196 + #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) 197 197 SET_CONSOLE_SCLP; 198 198 #endif 199 199 } else if (strncmp(ptr + 8, "3215", 4) == 0) { ··· 201 201 SET_CONSOLE_3215; 202 202 #elif defined(CONFIG_TN3270_CONSOLE) 203 203 SET_CONSOLE_3270; 204 - #elif defined(CONFIG_SCLP_CONSOLE) 204 + #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) 205 205 SET_CONSOLE_SCLP; 206 206 #endif 207 207 } ··· 212 212 SET_CONSOLE_3270; 213 213 #endif 214 214 } else { 215 - #if defined(CONFIG_SCLP_CONSOLE) 215 + #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) 216 216 SET_CONSOLE_SCLP; 217 217 #endif 218 218 } ··· 528 528 memory_size = 0; 529 529 memory_end &= PAGE_MASK; 530 530 531 - max_mem = memory_end ? min(VMALLOC_START, memory_end) : VMALLOC_START; 531 + max_mem = memory_end ? min(VMEM_MAX_PHYS, memory_end) : VMEM_MAX_PHYS; 532 532 memory_end = min(max_mem, memory_end); 533 533 534 534 /*
+6 -7
arch/s390/kernel/smp.c
··· 225 225 * You must not call this function with disabled interrupts or from a 226 226 * hardware interrupt handler or from a bottom half handler. 227 227 */ 228 - int 229 - smp_call_function_mask(cpumask_t mask, 230 - void (*func)(void *), void *info, 231 - int wait) 228 + int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, 229 + int wait) 232 230 { 233 231 preempt_disable(); 232 + cpu_clear(smp_processor_id(), mask); 234 233 __smp_call_function_map(func, info, 0, wait, mask); 235 234 preempt_enable(); 236 235 return 0; ··· 1007 1008 .notifier_call = smp_cpu_notify, 1008 1009 }; 1009 1010 1010 - static int smp_add_present_cpu(int cpu) 1011 + static int __devinit smp_add_present_cpu(int cpu) 1011 1012 { 1012 1013 struct cpu *c = &per_cpu(cpu_devices, cpu); 1013 1014 struct sys_device *s = &c->sysdev; ··· 1035 1036 } 1036 1037 1037 1038 #ifdef CONFIG_HOTPLUG_CPU 1038 - static ssize_t rescan_store(struct sys_device *dev, const char *buf, 1039 - size_t count) 1039 + static ssize_t __ref rescan_store(struct sys_device *dev, 1040 + const char *buf, size_t count) 1040 1041 { 1041 1042 cpumask_t newcpus; 1042 1043 int cpu;
+23 -8
arch/s390/kernel/stacktrace.c
··· 14 14 static unsigned long save_context_stack(struct stack_trace *trace, 15 15 unsigned long sp, 16 16 unsigned long low, 17 - unsigned long high) 17 + unsigned long high, 18 + int savesched) 18 19 { 19 20 struct stack_frame *sf; 20 21 struct pt_regs *regs; ··· 48 47 return sp; 49 48 regs = (struct pt_regs *)sp; 50 49 addr = regs->psw.addr & PSW_ADDR_INSN; 51 - if (!trace->skip) 52 - trace->entries[trace->nr_entries++] = addr; 53 - else 54 - trace->skip--; 50 + if (savesched || !in_sched_functions(addr)) { 51 + if (!trace->skip) 52 + trace->entries[trace->nr_entries++] = addr; 53 + else 54 + trace->skip--; 55 + } 55 56 if (trace->nr_entries >= trace->max_entries) 56 57 return sp; 57 58 low = sp; ··· 69 66 orig_sp = sp & PSW_ADDR_INSN; 70 67 new_sp = save_context_stack(trace, orig_sp, 71 68 S390_lowcore.panic_stack - PAGE_SIZE, 72 - S390_lowcore.panic_stack); 69 + S390_lowcore.panic_stack, 1); 73 70 if (new_sp != orig_sp) 74 71 return; 75 72 new_sp = save_context_stack(trace, new_sp, 76 73 S390_lowcore.async_stack - ASYNC_SIZE, 77 - S390_lowcore.async_stack); 74 + S390_lowcore.async_stack, 1); 78 75 if (new_sp != orig_sp) 79 76 return; 80 77 save_context_stack(trace, new_sp, 81 78 S390_lowcore.thread_info, 82 - S390_lowcore.thread_info + THREAD_SIZE); 79 + S390_lowcore.thread_info + THREAD_SIZE, 1); 80 + } 81 + 82 + void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) 83 + { 84 + unsigned long sp, low, high; 85 + 86 + sp = tsk->thread.ksp & PSW_ADDR_INSN; 87 + low = (unsigned long) task_stack_page(tsk); 88 + high = (unsigned long) task_pt_regs(tsk); 89 + save_context_stack(trace, sp, low, high, 0); 90 + if (trace->nr_entries < trace->max_entries) 91 + trace->entries[trace->nr_entries++] = ULONG_MAX; 83 92 }
+4 -1
arch/s390/kernel/traps.c
··· 271 271 printk("PREEMPT "); 272 272 #endif 273 273 #ifdef CONFIG_SMP 274 - printk("SMP"); 274 + printk("SMP "); 275 + #endif 276 + #ifdef CONFIG_DEBUG_PAGEALLOC 277 + printk("DEBUG_PAGEALLOC"); 275 278 #endif 276 279 printk("\n"); 277 280 notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
+1 -1
arch/s390/kernel/vmlinux.lds.S
··· 35 35 KPROBES_TEXT 36 36 *(.fixup) 37 37 *(.gnu.warning) 38 - } = 0x0700 38 + } :text = 0x0700 39 39 40 40 _etext = .; /* End of text section */ 41 41
+27
arch/s390/mm/init.c
··· 167 167 PFN_ALIGN((unsigned long)&_eshared) - 1); 168 168 } 169 169 170 + #ifdef CONFIG_DEBUG_PAGEALLOC 171 + void kernel_map_pages(struct page *page, int numpages, int enable) 172 + { 173 + pgd_t *pgd; 174 + pud_t *pud; 175 + pmd_t *pmd; 176 + pte_t *pte; 177 + unsigned long address; 178 + int i; 179 + 180 + for (i = 0; i < numpages; i++) { 181 + address = page_to_phys(page + i); 182 + pgd = pgd_offset_k(address); 183 + pud = pud_offset(pgd, address); 184 + pmd = pmd_offset(pud, address); 185 + pte = pte_offset_kernel(pmd, address); 186 + if (!enable) { 187 + ptep_invalidate(address, pte); 188 + continue; 189 + } 190 + *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW)); 191 + /* Flush cpu write queue. */ 192 + mb(); 193 + } 194 + } 195 + #endif 196 + 170 197 void free_initmem(void) 171 198 { 172 199 unsigned long addr;
+2 -3
arch/s390/mm/vmem.c
··· 62 62 } 63 63 } 64 64 65 - static void __init_refok *vmem_alloc_pages(unsigned int order) 65 + static void __ref *vmem_alloc_pages(unsigned int order) 66 66 { 67 67 if (slab_is_available()) 68 68 return (void *)__get_free_pages(GFP_KERNEL, order); ··· 250 250 { 251 251 struct memory_segment *tmp; 252 252 253 - if (seg->start + seg->size >= VMALLOC_START || 253 + if (seg->start + seg->size >= VMEM_MAX_PHYS || 254 254 seg->start + seg->size < seg->start) 255 255 return -ERANGE; 256 256 ··· 360 360 { 361 361 int i; 362 362 363 - BUILD_BUG_ON((unsigned long)VMEM_MAP + VMEM_MAP_SIZE > VMEM_MAP_MAX); 364 363 NODE_DATA(0)->node_mem_map = VMEM_MAP; 365 364 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) 366 365 vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size);
+7 -12
drivers/s390/block/dasd.c
··· 1057 1057 if (device->features & DASD_FEATURE_ERPLOG) { 1058 1058 dasd_log_sense(cqr, irb); 1059 1059 } 1060 - /* If we have no sense data, or we just don't want complex ERP 1061 - * for this request, but if we have retries left, then just 1062 - * reset this request and retry it in the fastpath 1060 + /* 1061 + * If we don't want complex ERP for this request, then just 1062 + * reset this and retry it in the fastpath 1063 1063 */ 1064 - if (!(cqr->irb.esw.esw0.erw.cons && 1065 - test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) && 1064 + if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && 1066 1065 cqr->retries > 0) { 1067 1066 DEV_MESSAGE(KERN_DEBUG, device, 1068 1067 "default ERP in fastpath (%i retries left)", ··· 1706 1707 1707 1708 req = (struct request *) cqr->callback_data; 1708 1709 dasd_profile_end(cqr->block, cqr, req); 1709 - status = cqr->memdev->discipline->free_cp(cqr, req); 1710 + status = cqr->block->base->discipline->free_cp(cqr, req); 1710 1711 if (status <= 0) 1711 1712 error = status ? status : -EIO; 1712 1713 dasd_end_request(req, error); ··· 1741 1742 1742 1743 /* Process requests that may be recovered */ 1743 1744 if (cqr->status == DASD_CQR_NEED_ERP) { 1744 - if (cqr->irb.esw.esw0.erw.cons && 1745 - test_bit(DASD_CQR_FLAGS_USE_ERP, 1746 - &cqr->flags)) { 1747 - erp_fn = base->discipline->erp_action(cqr); 1748 - erp_fn(cqr); 1749 - } 1745 + erp_fn = base->discipline->erp_action(cqr); 1746 + erp_fn(cqr); 1750 1747 goto restart; 1751 1748 } 1752 1749
+46 -16
drivers/s390/block/dasd_3990_erp.c
··· 164 164 165 165 /* reset status to submit the request again... */ 166 166 erp->status = DASD_CQR_FILLED; 167 - erp->retries = 1; 167 + erp->retries = 10; 168 168 } else { 169 169 DEV_MESSAGE(KERN_ERR, device, 170 170 "No alternate channel path left (lpum=%x / " ··· 301 301 erp->function = dasd_3990_erp_action_4; 302 302 303 303 } else { 304 - 305 - if (sense[25] == 0x1D) { /* state change pending */ 304 + if (sense && (sense[25] == 0x1D)) { /* state change pending */ 306 305 307 306 DEV_MESSAGE(KERN_INFO, device, 308 307 "waiting for state change pending " ··· 310 311 311 312 dasd_3990_erp_block_queue(erp, 30*HZ); 312 313 313 - } else if (sense[25] == 0x1E) { /* busy */ 314 + } else if (sense && (sense[25] == 0x1E)) { /* busy */ 314 315 DEV_MESSAGE(KERN_INFO, device, 315 316 "busy - redriving request later, " 316 317 "%d retries left", ··· 2119 2120 */ 2120 2121 2121 2122 /* 2123 + * DASD_3990_ERP_CONTROL_CHECK 2124 + * 2125 + * DESCRIPTION 2126 + * Does a generic inspection if a control check occured and sets up 2127 + * the related error recovery procedure 2128 + * 2129 + * PARAMETER 2130 + * erp pointer to the currently created default ERP 2131 + * 2132 + * RETURN VALUES 2133 + * erp_filled pointer to the erp 2134 + */ 2135 + 2136 + static struct dasd_ccw_req * 2137 + dasd_3990_erp_control_check(struct dasd_ccw_req *erp) 2138 + { 2139 + struct dasd_device *device = erp->startdev; 2140 + 2141 + if (erp->refers->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK 2142 + | SCHN_STAT_CHN_CTRL_CHK)) { 2143 + DEV_MESSAGE(KERN_DEBUG, device, "%s", 2144 + "channel or interface control check"); 2145 + erp = dasd_3990_erp_action_4(erp, NULL); 2146 + } 2147 + return erp; 2148 + } 2149 + 2150 + /* 2122 2151 * DASD_3990_ERP_INSPECT 2123 2152 * 2124 2153 * DESCRIPTION ··· 2172 2145 if (erp_new) 2173 2146 return erp_new; 2174 2147 2148 + /* check if no concurrent sens is available */ 2149 + if (!erp->refers->irb.esw.esw0.erw.cons) 2150 + erp_new = dasd_3990_erp_control_check(erp); 2175 2151 /* distinguish between 24 and 32 byte sense data */ 2176 - if (sense[27] & DASD_SENSE_BIT_0) { 2152 + else if (sense[27] & DASD_SENSE_BIT_0) { 2177 2153 2178 2154 /* inspect the 24 byte sense data */ 2179 2155 erp_new = dasd_3990_erp_inspect_24(erp, sense); ··· 2315 2285 // return 0; /* CCW doesn't match */ 2316 2286 } 2317 2287 2288 + if (cqr1->irb.esw.esw0.erw.cons != cqr2->irb.esw.esw0.erw.cons) 2289 + return 0; 2290 + 2291 + if ((cqr1->irb.esw.esw0.erw.cons == 0) && 2292 + (cqr2->irb.esw.esw0.erw.cons == 0)) { 2293 + if ((cqr1->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK | 2294 + SCHN_STAT_CHN_CTRL_CHK)) == 2295 + (cqr2->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK | 2296 + SCHN_STAT_CHN_CTRL_CHK))) 2297 + return 1; /* match with ifcc*/ 2298 + } 2318 2299 /* check sense data; byte 0-2,25,27 */ 2319 2300 if (!((memcmp (cqr1->irb.ecw, cqr2->irb.ecw, 3) == 0) && 2320 2301 (cqr1->irb.ecw[27] == cqr2->irb.ecw[27]) && ··· 2600 2559 cqr->status = DASD_CQR_DONE; 2601 2560 2602 2561 return cqr; 2603 - } 2604 - /* check if sense data are available */ 2605 - if (!cqr->irb.ecw) { 2606 - DEV_MESSAGE(KERN_DEBUG, device, 2607 - "ERP called witout sense data avail ..." 2608 - "request %p - NO ERP possible", cqr); 2609 - 2610 - cqr->status = DASD_CQR_FAILED; 2611 - 2612 - return cqr; 2613 - 2614 2562 } 2615 2563 2616 2564 /* check if error happened before */
+2 -3
drivers/s390/block/dcssblk.c
··· 415 415 dev_info->gd->queue = dev_info->dcssblk_queue; 416 416 dev_info->gd->private_data = dev_info; 417 417 dev_info->gd->driverfs_dev = &dev_info->dev; 418 + blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request); 419 + blk_queue_hardsect_size(dev_info->dcssblk_queue, 4096); 418 420 /* 419 421 * load the segment 420 422 */ ··· 473 471 rc = device_create_file(&dev_info->dev, &dev_attr_save); 474 472 if (rc) 475 473 goto unregister_dev; 476 - 477 - blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request); 478 - blk_queue_hardsect_size(dev_info->dcssblk_queue, 4096); 479 474 480 475 add_disk(dev_info->gd); 481 476
+1 -1
drivers/s390/char/sclp_tty.c
··· 332 332 if (sclp_ttybuf == NULL) { 333 333 while (list_empty(&sclp_tty_pages)) { 334 334 spin_unlock_irqrestore(&sclp_tty_lock, flags); 335 - if (in_interrupt()) 335 + if (in_atomic()) 336 336 sclp_sync_wait(); 337 337 else 338 338 wait_event(sclp_tty_waitq,
+1 -1
drivers/s390/char/sclp_vt220.c
··· 400 400 while (list_empty(&sclp_vt220_empty)) { 401 401 spin_unlock_irqrestore(&sclp_vt220_lock, 402 402 flags); 403 - if (in_interrupt()) 403 + if (in_atomic()) 404 404 sclp_sync_wait(); 405 405 else 406 406 wait_event(sclp_vt220_waitq,
+12
drivers/s390/cio/ccwgroup.c
··· 391 391 return 0; 392 392 } 393 393 394 + static void ccwgroup_shutdown(struct device *dev) 395 + { 396 + struct ccwgroup_device *gdev; 397 + struct ccwgroup_driver *gdrv; 398 + 399 + gdev = to_ccwgroupdev(dev); 400 + gdrv = to_ccwgroupdrv(dev->driver); 401 + if (gdrv && gdrv->shutdown) 402 + gdrv->shutdown(gdev); 403 + } 404 + 394 405 static struct bus_type ccwgroup_bus_type = { 395 406 .name = "ccwgroup", 396 407 .match = ccwgroup_bus_match, 397 408 .uevent = ccwgroup_uevent, 398 409 .probe = ccwgroup_probe, 399 410 .remove = ccwgroup_remove, 411 + .shutdown = ccwgroup_shutdown, 400 412 }; 401 413 402 414 /**
+55 -92
drivers/s390/cio/chsc.c
··· 26 26 27 27 static void *sei_page; 28 28 29 + static int chsc_error_from_response(int response) 30 + { 31 + switch (response) { 32 + case 0x0001: 33 + return 0; 34 + case 0x0002: 35 + case 0x0003: 36 + case 0x0006: 37 + case 0x0007: 38 + case 0x0008: 39 + case 0x000a: 40 + return -EINVAL; 41 + case 0x0004: 42 + return -EOPNOTSUPP; 43 + default: 44 + return -EIO; 45 + } 46 + } 47 + 29 48 struct chsc_ssd_area { 30 49 struct chsc_header request; 31 50 u16 :10; ··· 94 75 ret = (ccode == 3) ? -ENODEV : -EBUSY; 95 76 goto out_free; 96 77 } 97 - if (ssd_area->response.code != 0x0001) { 78 + ret = chsc_error_from_response(ssd_area->response.code); 79 + if (ret != 0) { 98 80 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n", 99 81 schid.ssid, schid.sch_no, 100 82 ssd_area->response.code); 101 - ret = -EIO; 102 83 goto out_free; 103 84 } 104 85 if (!ssd_area->sch_valid) { ··· 736 717 return (ccode == 3) ? -ENODEV : -EBUSY; 737 718 738 719 switch (secm_area->response.code) { 739 - case 0x0001: /* Success. */ 740 - ret = 0; 741 - break; 742 - case 0x0003: /* Invalid block. */ 743 - case 0x0007: /* Invalid format. */ 744 - case 0x0008: /* Other invalid block. */ 745 - CIO_CRW_EVENT(2, "Error in chsc request block!\n"); 720 + case 0x0102: 721 + case 0x0103: 746 722 ret = -EINVAL; 747 - break; 748 - case 0x0004: /* Command not provided in model. */ 749 - CIO_CRW_EVENT(2, "Model does not provide secm\n"); 750 - ret = -EOPNOTSUPP; 751 - break; 752 - case 0x0102: /* cub adresses incorrect */ 753 - CIO_CRW_EVENT(2, "Invalid addresses in chsc request block\n"); 754 - ret = -EINVAL; 755 - break; 756 - case 0x0103: /* key error */ 757 - CIO_CRW_EVENT(2, "Access key error in secm\n"); 758 - ret = -EINVAL; 759 - break; 760 - case 0x0105: /* error while starting */ 761 - CIO_CRW_EVENT(2, "Error while starting channel measurement\n"); 762 - ret = -EIO; 763 - break; 764 723 default: 765 - CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", 766 - secm_area->response.code); 767 - ret = -EIO; 724 + ret = chsc_error_from_response(secm_area->response.code); 768 725 } 726 + if (ret != 0) 727 + CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n", 728 + secm_area->response.code); 769 729 return ret; 770 730 } 771 731 ··· 825 827 goto out; 826 828 } 827 829 828 - switch (scpd_area->response.code) { 829 - case 0x0001: /* Success. */ 830 + ret = chsc_error_from_response(scpd_area->response.code); 831 + if (ret == 0) 832 + /* Success. */ 830 833 memcpy(desc, &scpd_area->desc, 831 834 sizeof(struct channel_path_desc)); 832 - ret = 0; 833 - break; 834 - case 0x0003: /* Invalid block. */ 835 - case 0x0007: /* Invalid format. */ 836 - case 0x0008: /* Other invalid block. */ 837 - CIO_CRW_EVENT(2, "Error in chsc request block!\n"); 838 - ret = -EINVAL; 839 - break; 840 - case 0x0004: /* Command not provided in model. */ 841 - CIO_CRW_EVENT(2, "Model does not provide scpd\n"); 842 - ret = -EOPNOTSUPP; 843 - break; 844 - default: 845 - CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", 835 + else 836 + CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", 846 837 scpd_area->response.code); 847 - ret = -EIO; 848 - } 849 838 out: 850 839 free_page((unsigned long)scpd_area); 851 840 return ret; ··· 908 923 goto out; 909 924 } 910 925 911 - switch (scmc_area->response.code) { 912 - case 0x0001: /* Success. */ 926 + ret = chsc_error_from_response(scmc_area->response.code); 927 + if (ret == 0) { 928 + /* Success. */ 913 929 if (!scmc_area->not_valid) { 914 930 chp->cmg = scmc_area->cmg; 915 931 chp->shared = scmc_area->shared; ··· 921 935 chp->cmg = -1; 922 936 chp->shared = -1; 923 937 } 924 - ret = 0; 925 - break; 926 - case 0x0003: /* Invalid block. */ 927 - case 0x0007: /* Invalid format. */ 928 - case 0x0008: /* Invalid bit combination. */ 929 - CIO_CRW_EVENT(2, "Error in chsc request block!\n"); 930 - ret = -EINVAL; 931 - break; 932 - case 0x0004: /* Command not provided. */ 933 - CIO_CRW_EVENT(2, "Model does not provide scmc\n"); 934 - ret = -EOPNOTSUPP; 935 - break; 936 - default: 937 - CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", 938 + } else { 939 + CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n", 938 940 scmc_area->response.code); 939 - ret = -EIO; 940 941 } 941 942 out: 942 943 free_page((unsigned long)scmc_area); ··· 975 1002 ret = (ret == 3) ? -ENODEV : -EBUSY; 976 1003 goto out; 977 1004 } 1005 + 978 1006 switch (sda_area->response.code) { 979 - case 0x0001: /* everything ok */ 980 - ret = 0; 981 - break; 982 - case 0x0003: /* invalid request block */ 983 - case 0x0007: 984 - ret = -EINVAL; 985 - break; 986 - case 0x0004: /* command not provided */ 987 - case 0x0101: /* facility not provided */ 1007 + case 0x0101: 988 1008 ret = -EOPNOTSUPP; 989 1009 break; 990 - default: /* something went wrong */ 991 - ret = -EIO; 1010 + default: 1011 + ret = chsc_error_from_response(sda_area->response.code); 992 1012 } 1013 + if (ret != 0) 1014 + CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", 1015 + operation_code, sda_area->response.code); 993 1016 out: 994 1017 free_page((unsigned long)sda_area); 995 1018 return ret; ··· 1010 1041 } __attribute__ ((packed)) *scsc_area; 1011 1042 1012 1043 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1013 - if (!scsc_area) { 1014 - CIO_MSG_EVENT(0, "Was not able to determine available " 1015 - "CHSCs due to no memory.\n"); 1044 + if (!scsc_area) 1016 1045 return -ENOMEM; 1017 - } 1018 1046 1019 1047 scsc_area->request.length = 0x0010; 1020 1048 scsc_area->request.code = 0x0010; 1021 1049 1022 1050 result = chsc(scsc_area); 1023 1051 if (result) { 1024 - CIO_MSG_EVENT(0, "Was not able to determine available CHSCs, " 1025 - "cc=%i.\n", result); 1026 - result = -EIO; 1052 + result = (result == 3) ? -ENODEV : -EBUSY; 1027 1053 goto exit; 1028 1054 } 1029 1055 1030 - if (scsc_area->response.code != 1) { 1031 - CIO_MSG_EVENT(0, "Was not able to determine " 1032 - "available CHSCs.\n"); 1033 - result = -EIO; 1034 - goto exit; 1035 - } 1036 - memcpy(&css_general_characteristics, scsc_area->general_char, 1037 - sizeof(css_general_characteristics)); 1038 - memcpy(&css_chsc_characteristics, scsc_area->chsc_char, 1039 - sizeof(css_chsc_characteristics)); 1056 + result = chsc_error_from_response(scsc_area->response.code); 1057 + if (result == 0) { 1058 + memcpy(&css_general_characteristics, scsc_area->general_char, 1059 + sizeof(css_general_characteristics)); 1060 + memcpy(&css_chsc_characteristics, scsc_area->chsc_char, 1061 + sizeof(css_chsc_characteristics)); 1062 + } else 1063 + CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n", 1064 + scsc_area->response.code); 1040 1065 exit: 1041 1066 free_page ((unsigned long) scsc_area); 1042 1067 return result;
+64 -43
drivers/s390/cio/device_id.c
··· 26 26 #include "ioasm.h" 27 27 #include "io_sch.h" 28 28 29 - /* 30 - * Input : 31 - * devno - device number 32 - * ps - pointer to sense ID data area 33 - * Output : none 29 + /** 30 + * vm_vdev_to_cu_type - Convert vm virtual device into control unit type 31 + * for certain devices. 32 + * @class: virtual device class 33 + * @type: virtual device type 34 + * 35 + * Returns control unit type if a match was made or %0xffff otherwise. 34 36 */ 35 - static void 36 - VM_virtual_device_info (__u16 devno, struct senseid *ps) 37 + static int vm_vdev_to_cu_type(int class, int type) 37 38 { 38 39 static struct { 39 - int vrdcvcla, vrdcvtyp, cu_type; 40 + int class, type, cu_type; 40 41 } vm_devices[] = { 41 42 { 0x08, 0x01, 0x3480 }, 42 43 { 0x08, 0x02, 0x3430 }, ··· 69 68 { 0x40, 0xc0, 0x5080 }, 70 69 { 0x80, 0x00, 0x3215 }, 71 70 }; 71 + int i; 72 + 73 + for (i = 0; i < ARRAY_SIZE(vm_devices); i++) 74 + if (class == vm_devices[i].class && type == vm_devices[i].type) 75 + return vm_devices[i].cu_type; 76 + 77 + return 0xffff; 78 + } 79 + 80 + /** 81 + * diag_get_dev_info - retrieve device information via DIAG X'210' 82 + * @devno: device number 83 + * @ps: pointer to sense ID data area 84 + * 85 + * Returns zero on success, non-zero otherwise. 86 + */ 87 + static int diag_get_dev_info(u16 devno, struct senseid *ps) 88 + { 72 89 struct diag210 diag_data; 73 - int ccode, i; 90 + int ccode; 74 91 75 92 CIO_TRACE_EVENT (4, "VMvdinf"); 76 93 ··· 98 79 }; 99 80 100 81 ccode = diag210 (&diag_data); 101 - ps->reserved = 0xff; 82 + if ((ccode == 0) || (ccode == 2)) { 83 + ps->reserved = 0xff; 102 84 103 - /* Special case for bloody osa devices. */ 104 - if (diag_data.vrdcvcla == 0x02 && 105 - diag_data.vrdcvtyp == 0x20) { 106 - ps->cu_type = 0x3088; 107 - ps->cu_model = 0x60; 108 - return; 109 - } 110 - for (i = 0; i < ARRAY_SIZE(vm_devices); i++) 111 - if (diag_data.vrdcvcla == vm_devices[i].vrdcvcla && 112 - diag_data.vrdcvtyp == vm_devices[i].vrdcvtyp) { 113 - ps->cu_type = vm_devices[i].cu_type; 114 - return; 85 + /* Special case for osa devices. */ 86 + if (diag_data.vrdcvcla == 0x02 && diag_data.vrdcvtyp == 0x20) { 87 + ps->cu_type = 0x3088; 88 + ps->cu_model = 0x60; 89 + return 0; 115 90 } 91 + ps->cu_type = vm_vdev_to_cu_type(diag_data.vrdcvcla, 92 + diag_data.vrdcvtyp); 93 + if (ps->cu_type != 0xffff) 94 + return 0; 95 + } 96 + 116 97 CIO_MSG_EVENT(0, "DIAG X'210' for device %04X returned (cc = %d):" 117 98 "vdev class : %02X, vdev type : %04X \n ... " 118 99 "rdev class : %02X, rdev type : %04X, " ··· 121 102 diag_data.vrdcvcla, diag_data.vrdcvtyp, 122 103 diag_data.vrdcrccl, diag_data.vrdccrty, 123 104 diag_data.vrdccrmd); 105 + 106 + return -ENODEV; 124 107 } 125 108 126 109 /* ··· 151 130 /* Try on every path. */ 152 131 ret = -ENODEV; 153 132 while (cdev->private->imask != 0) { 133 + cdev->private->senseid.cu_type = 0xFFFF; 154 134 if ((sch->opm & cdev->private->imask) != 0 && 155 135 cdev->private->iretry > 0) { 156 136 cdev->private->iretry--; ··· 175 153 int ret; 176 154 177 155 memset (&cdev->private->senseid, 0, sizeof (struct senseid)); 178 - cdev->private->senseid.cu_type = 0xFFFF; 179 156 cdev->private->imask = 0x80; 180 157 cdev->private->iretry = 5; 181 158 ret = __ccw_device_sense_id_start(cdev); ··· 194 173 195 174 sch = to_subchannel(cdev->dev.parent); 196 175 irb = &cdev->private->irb; 197 - /* Did we get a proper answer ? */ 198 - if (cdev->private->senseid.cu_type != 0xFFFF && 199 - cdev->private->senseid.reserved == 0xFF) { 200 - if (irb->scsw.count < sizeof (struct senseid) - 8) 201 - cdev->private->flags.esid = 1; 202 - return 0; /* Success */ 203 - } 176 + 204 177 /* Check the error cases. */ 205 178 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { 206 179 /* Retry Sense ID if requested. */ ··· 246 231 sch->schid.ssid, sch->schid.sch_no); 247 232 return -EACCES; 248 233 } 234 + 235 + /* Did we get a proper answer ? */ 236 + if (irb->scsw.cc == 0 && cdev->private->senseid.cu_type != 0xFFFF && 237 + cdev->private->senseid.reserved == 0xFF) { 238 + if (irb->scsw.count < sizeof(struct senseid) - 8) 239 + cdev->private->flags.esid = 1; 240 + return 0; /* Success */ 241 + } 242 + 249 243 /* Hmm, whatever happened, try again. */ 250 244 CIO_MSG_EVENT(2, "SenseID : start_IO() for device %04x on " 251 245 "subchannel 0.%x.%04x returns status %02X%02X\n", ··· 307 283 break; 308 284 /* fall through. */ 309 285 default: /* Sense ID failed. Try asking VM. */ 310 - if (MACHINE_IS_VM) { 311 - VM_virtual_device_info (cdev->private->dev_id.devno, 286 + if (MACHINE_IS_VM) 287 + ret = diag_get_dev_info(cdev->private->dev_id.devno, 312 288 &cdev->private->senseid); 313 - if (cdev->private->senseid.cu_type != 0xFFFF) { 314 - /* Got the device information from VM. */ 315 - ccw_device_sense_id_done(cdev, 0); 316 - return; 317 - } 318 - } 319 - /* 320 - * If we can't couldn't identify the device type we 321 - * consider the device "not operational". 322 - */ 323 - ccw_device_sense_id_done(cdev, -ENODEV); 289 + else 290 + /* 291 + * If we can't couldn't identify the device type we 292 + * consider the device "not operational". 293 + */ 294 + ret = -ENODEV; 295 + 296 + ccw_device_sense_id_done(cdev, ret); 324 297 break; 325 298 } 326 299 }
+268 -306
include/asm-s390/bitops.h
··· 440 440 __test_bit((nr),(addr)) ) 441 441 442 442 /* 443 - * ffz = Find First Zero in word. Undefined if no zero exists, 444 - * so code should check against ~0UL first.. 443 + * Optimized find bit helper functions. 445 444 */ 446 - static inline unsigned long ffz(unsigned long word) 447 - { 448 - unsigned long bit = 0; 449 445 446 + /** 447 + * __ffz_word_loop - find byte offset of first long != -1UL 448 + * @addr: pointer to array of unsigned long 449 + * @size: size of the array in bits 450 + */ 451 + static inline unsigned long __ffz_word_loop(const unsigned long *addr, 452 + unsigned long size) 453 + { 454 + typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; 455 + unsigned long bytes = 0; 456 + 457 + asm volatile( 458 + #ifndef __s390x__ 459 + " ahi %1,31\n" 460 + " srl %1,5\n" 461 + "0: c %2,0(%0,%3)\n" 462 + " jne 1f\n" 463 + " la %0,4(%0)\n" 464 + " brct %1,0b\n" 465 + "1:\n" 466 + #else 467 + " aghi %1,63\n" 468 + " srlg %1,%1,6\n" 469 + "0: cg %2,0(%0,%3)\n" 470 + " jne 1f\n" 471 + " la %0,8(%0)\n" 472 + " brct %1,0b\n" 473 + "1:\n" 474 + #endif 475 + : "+a" (bytes), "+d" (size) 476 + : "d" (-1UL), "a" (addr), "m" (*(addrtype *) addr) 477 + : "cc" ); 478 + return bytes; 479 + } 480 + 481 + /** 482 + * __ffs_word_loop - find byte offset of first long != 0UL 483 + * @addr: pointer to array of unsigned long 484 + * @size: size of the array in bits 485 + */ 486 + static inline unsigned long __ffs_word_loop(const unsigned long *addr, 487 + unsigned long size) 488 + { 489 + typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; 490 + unsigned long bytes = 0; 491 + 492 + asm volatile( 493 + #ifndef __s390x__ 494 + " ahi %1,31\n" 495 + " srl %1,5\n" 496 + "0: c %2,0(%0,%3)\n" 497 + " jne 1f\n" 498 + " la %0,4(%0)\n" 499 + " brct %1,0b\n" 500 + "1:\n" 501 + #else 502 + " aghi %1,63\n" 503 + " srlg %1,%1,6\n" 504 + "0: cg %2,0(%0,%3)\n" 505 + " jne 1f\n" 506 + " la %0,8(%0)\n" 507 + " brct %1,0b\n" 508 + "1:\n" 509 + #endif 510 + : "+a" (bytes), "+a" (size) 511 + : "d" (0UL), "a" (addr), "m" (*(addrtype *) addr) 512 + : "cc" ); 513 + return bytes; 514 + } 515 + 516 + /** 517 + * __ffz_word - add number of the first unset bit 518 + * @nr: base value the bit number is added to 519 + * @word: the word that is searched for unset bits 520 + */ 521 + static inline unsigned long __ffz_word(unsigned long nr, unsigned long word) 522 + { 450 523 #ifdef __s390x__ 451 524 if (likely((word & 0xffffffff) == 0xffffffff)) { 452 525 word >>= 32; 453 - bit += 32; 526 + nr += 32; 454 527 } 455 528 #endif 456 529 if (likely((word & 0xffff) == 0xffff)) { 457 530 word >>= 16; 458 - bit += 16; 531 + nr += 16; 459 532 } 460 533 if (likely((word & 0xff) == 0xff)) { 461 534 word >>= 8; 462 - bit += 8; 535 + nr += 8; 463 536 } 464 - return bit + _zb_findmap[word & 0xff]; 537 + return nr + _zb_findmap[(unsigned char) word]; 465 538 } 466 539 467 - /* 468 - * __ffs = find first bit in word. Undefined if no bit exists, 469 - * so code should check against 0UL first.. 540 + /** 541 + * __ffs_word - add number of the first set bit 542 + * @nr: base value the bit number is added to 543 + * @word: the word that is searched for set bits 470 544 */ 471 - static inline unsigned long __ffs (unsigned long word) 545 + static inline unsigned long __ffs_word(unsigned long nr, unsigned long word) 472 546 { 473 - unsigned long bit = 0; 474 - 475 547 #ifdef __s390x__ 476 548 if (likely((word & 0xffffffff) == 0)) { 477 549 word >>= 32; 478 - bit += 32; 550 + nr += 32; 479 551 } 480 552 #endif 481 553 if (likely((word & 0xffff) == 0)) { 482 554 word >>= 16; 483 - bit += 16; 555 + nr += 16; 484 556 } 485 557 if (likely((word & 0xff) == 0)) { 486 558 word >>= 8; 487 - bit += 8; 559 + nr += 8; 488 560 } 489 - return bit + _sb_findmap[word & 0xff]; 561 + return nr + _sb_findmap[(unsigned char) word]; 562 + } 563 + 564 + 565 + /** 566 + * __load_ulong_be - load big endian unsigned long 567 + * @p: pointer to array of unsigned long 568 + * @offset: byte offset of source value in the array 569 + */ 570 + static inline unsigned long __load_ulong_be(const unsigned long *p, 571 + unsigned long offset) 572 + { 573 + p = (unsigned long *)((unsigned long) p + offset); 574 + return *p; 575 + } 576 + 577 + /** 578 + * __load_ulong_le - load little endian unsigned long 579 + * @p: pointer to array of unsigned long 580 + * @offset: byte offset of source value in the array 581 + */ 582 + static inline unsigned long __load_ulong_le(const unsigned long *p, 583 + unsigned long offset) 584 + { 585 + unsigned long word; 586 + 587 + p = (unsigned long *)((unsigned long) p + offset); 588 + #ifndef __s390x__ 589 + asm volatile( 590 + " ic %0,0(%1)\n" 591 + " icm %0,2,1(%1)\n" 592 + " icm %0,4,2(%1)\n" 593 + " icm %0,8,3(%1)" 594 + : "=&d" (word) : "a" (p), "m" (*p) : "cc"); 595 + #else 596 + asm volatile( 597 + " lrvg %0,%1" 598 + : "=d" (word) : "m" (*p) ); 599 + #endif 600 + return word; 490 601 } 491 602 492 603 /* 493 - * Find-bit routines.. 604 + * The various find bit functions. 494 605 */ 495 606 496 - #ifndef __s390x__ 497 - 498 - static inline int 499 - find_first_zero_bit(const unsigned long * addr, unsigned long size) 607 + /* 608 + * ffz - find first zero in word. 609 + * @word: The word to search 610 + * 611 + * Undefined if no zero exists, so code should check against ~0UL first. 612 + */ 613 + static inline unsigned long ffz(unsigned long word) 500 614 { 501 - typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; 502 - unsigned long cmp, count; 503 - unsigned int res; 615 + return __ffz_word(0, word); 616 + } 617 + 618 + /** 619 + * __ffs - find first bit in word. 620 + * @word: The word to search 621 + * 622 + * Undefined if no bit exists, so code should check against 0 first. 623 + */ 624 + static inline unsigned long __ffs (unsigned long word) 625 + { 626 + return __ffs_word(0, word); 627 + } 628 + 629 + /** 630 + * ffs - find first bit set 631 + * @x: the word to search 632 + * 633 + * This is defined the same way as 634 + * the libc and compiler builtin ffs routines, therefore 635 + * differs in spirit from the above ffz (man ffs). 636 + */ 637 + static inline int ffs(int x) 638 + { 639 + if (!x) 640 + return 0; 641 + return __ffs_word(1, x); 642 + } 643 + 644 + /** 645 + * find_first_zero_bit - find the first zero bit in a memory region 646 + * @addr: The address to start the search at 647 + * @size: The maximum size to search 648 + * 649 + * Returns the bit-number of the first zero bit, not the number of the byte 650 + * containing a bit. 651 + */ 652 + static inline unsigned long find_first_zero_bit(const unsigned long *addr, 653 + unsigned long size) 654 + { 655 + unsigned long bytes, bits; 504 656 505 657 if (!size) 506 658 return 0; 507 - asm volatile( 508 - " lhi %1,-1\n" 509 - " lr %2,%3\n" 510 - " slr %0,%0\n" 511 - " ahi %2,31\n" 512 - " srl %2,5\n" 513 - "0: c %1,0(%0,%4)\n" 514 - " jne 1f\n" 515 - " la %0,4(%0)\n" 516 - " brct %2,0b\n" 517 - " lr %0,%3\n" 518 - " j 4f\n" 519 - "1: l %2,0(%0,%4)\n" 520 - " sll %0,3\n" 521 - " lhi %1,0xff\n" 522 - " tml %2,0xffff\n" 523 - " jno 2f\n" 524 - " ahi %0,16\n" 525 - " srl %2,16\n" 526 - "2: tml %2,0x00ff\n" 527 - " jno 3f\n" 528 - " ahi %0,8\n" 529 - " srl %2,8\n" 530 - "3: nr %2,%1\n" 531 - " ic %2,0(%2,%5)\n" 532 - " alr %0,%2\n" 533 - "4:" 534 - : "=&a" (res), "=&d" (cmp), "=&a" (count) 535 - : "a" (size), "a" (addr), "a" (&_zb_findmap), 536 - "m" (*(addrtype *) addr) : "cc"); 537 - return (res < size) ? res : size; 659 + bytes = __ffz_word_loop(addr, size); 660 + bits = __ffz_word(bytes*8, __load_ulong_be(addr, bytes)); 661 + return (bits < size) ? bits : size; 538 662 } 539 663 540 - static inline int 541 - find_first_bit(const unsigned long * addr, unsigned long size) 664 + /** 665 + * find_first_bit - find the first set bit in a memory region 666 + * @addr: The address to start the search at 667 + * @size: The maximum size to search 668 + * 669 + * Returns the bit-number of the first set bit, not the number of the byte 670 + * containing a bit. 671 + */ 672 + static inline unsigned long find_first_bit(const unsigned long * addr, 673 + unsigned long size) 542 674 { 543 - typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; 544 - unsigned long cmp, count; 545 - unsigned int res; 675 + unsigned long bytes, bits; 546 676 547 677 if (!size) 548 678 return 0; 549 - asm volatile( 550 - " slr %1,%1\n" 551 - " lr %2,%3\n" 552 - " slr %0,%0\n" 553 - " ahi %2,31\n" 554 - " srl %2,5\n" 555 - "0: c %1,0(%0,%4)\n" 556 - " jne 1f\n" 557 - " la %0,4(%0)\n" 558 - " brct %2,0b\n" 559 - " lr %0,%3\n" 560 - " j 4f\n" 561 - "1: l %2,0(%0,%4)\n" 562 - " sll %0,3\n" 563 - " lhi %1,0xff\n" 564 - " tml %2,0xffff\n" 565 - " jnz 2f\n" 566 - " ahi %0,16\n" 567 - " srl %2,16\n" 568 - "2: tml %2,0x00ff\n" 569 - " jnz 3f\n" 570 - " ahi %0,8\n" 571 - " srl %2,8\n" 572 - "3: nr %2,%1\n" 573 - " ic %2,0(%2,%5)\n" 574 - " alr %0,%2\n" 575 - "4:" 576 - : "=&a" (res), "=&d" (cmp), "=&a" (count) 577 - : "a" (size), "a" (addr), "a" (&_sb_findmap), 578 - "m" (*(addrtype *) addr) : "cc"); 579 - return (res < size) ? res : size; 679 + bytes = __ffs_word_loop(addr, size); 680 + bits = __ffs_word(bytes*8, __load_ulong_be(addr, bytes)); 681 + return (bits < size) ? bits : size; 580 682 } 581 683 582 - #else /* __s390x__ */ 583 - 584 - static inline unsigned long 585 - find_first_zero_bit(const unsigned long * addr, unsigned long size) 586 - { 587 - typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; 588 - unsigned long res, cmp, count; 589 - 590 - if (!size) 591 - return 0; 592 - asm volatile( 593 - " lghi %1,-1\n" 594 - " lgr %2,%3\n" 595 - " slgr %0,%0\n" 596 - " aghi %2,63\n" 597 - " srlg %2,%2,6\n" 598 - "0: cg %1,0(%0,%4)\n" 599 - " jne 1f\n" 600 - " la %0,8(%0)\n" 601 - " brct %2,0b\n" 602 - " lgr %0,%3\n" 603 - " j 5f\n" 604 - "1: lg %2,0(%0,%4)\n" 605 - " sllg %0,%0,3\n" 606 - " clr %2,%1\n" 607 - " jne 2f\n" 608 - " aghi %0,32\n" 609 - " srlg %2,%2,32\n" 610 - "2: lghi %1,0xff\n" 611 - " tmll %2,0xffff\n" 612 - " jno 3f\n" 613 - " aghi %0,16\n" 614 - " srl %2,16\n" 615 - "3: tmll %2,0x00ff\n" 616 - " jno 4f\n" 617 - " aghi %0,8\n" 618 - " srl %2,8\n" 619 - "4: ngr %2,%1\n" 620 - " ic %2,0(%2,%5)\n" 621 - " algr %0,%2\n" 622 - "5:" 623 - : "=&a" (res), "=&d" (cmp), "=&a" (count) 624 - : "a" (size), "a" (addr), "a" (&_zb_findmap), 625 - "m" (*(addrtype *) addr) : "cc"); 626 - return (res < size) ? res : size; 627 - } 628 - 629 - static inline unsigned long 630 - find_first_bit(const unsigned long * addr, unsigned long size) 631 - { 632 - typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; 633 - unsigned long res, cmp, count; 634 - 635 - if (!size) 636 - return 0; 637 - asm volatile( 638 - " slgr %1,%1\n" 639 - " lgr %2,%3\n" 640 - " slgr %0,%0\n" 641 - " aghi %2,63\n" 642 - " srlg %2,%2,6\n" 643 - "0: cg %1,0(%0,%4)\n" 644 - " jne 1f\n" 645 - " aghi %0,8\n" 646 - " brct %2,0b\n" 647 - " lgr %0,%3\n" 648 - " j 5f\n" 649 - "1: lg %2,0(%0,%4)\n" 650 - " sllg %0,%0,3\n" 651 - " clr %2,%1\n" 652 - " jne 2f\n" 653 - " aghi %0,32\n" 654 - " srlg %2,%2,32\n" 655 - "2: lghi %1,0xff\n" 656 - " tmll %2,0xffff\n" 657 - " jnz 3f\n" 658 - " aghi %0,16\n" 659 - " srl %2,16\n" 660 - "3: tmll %2,0x00ff\n" 661 - " jnz 4f\n" 662 - " aghi %0,8\n" 663 - " srl %2,8\n" 664 - "4: ngr %2,%1\n" 665 - " ic %2,0(%2,%5)\n" 666 - " algr %0,%2\n" 667 - "5:" 668 - : "=&a" (res), "=&d" (cmp), "=&a" (count) 669 - : "a" (size), "a" (addr), "a" (&_sb_findmap), 670 - "m" (*(addrtype *) addr) : "cc"); 671 - return (res < size) ? res : size; 672 - } 673 - 674 - #endif /* __s390x__ */ 675 - 676 - static inline int 677 - find_next_zero_bit (const unsigned long * addr, unsigned long size, 678 - unsigned long offset) 684 + /** 685 + * find_next_zero_bit - find the first zero bit in a memory region 686 + * @addr: The address to base the search on 687 + * @offset: The bitnumber to start searching at 688 + * @size: The maximum size to search 689 + */ 690 + static inline int find_next_zero_bit (const unsigned long * addr, 691 + unsigned long size, 692 + unsigned long offset) 679 693 { 680 694 const unsigned long *p; 681 695 unsigned long bit, set; ··· 702 688 p = addr + offset / __BITOPS_WORDSIZE; 703 689 if (bit) { 704 690 /* 705 - * s390 version of ffz returns __BITOPS_WORDSIZE 691 + * __ffz_word returns __BITOPS_WORDSIZE 706 692 * if no zero bit is present in the word. 707 693 */ 708 - set = ffz(*p >> bit) + bit; 694 + set = __ffz_word(0, *p >> bit) + bit; 709 695 if (set >= size) 710 696 return size + offset; 711 697 if (set < __BITOPS_WORDSIZE) ··· 717 703 return offset + find_first_zero_bit(p, size); 718 704 } 719 705 720 - static inline int 721 - find_next_bit (const unsigned long * addr, unsigned long size, 722 - unsigned long offset) 706 + /** 707 + * find_next_bit - find the first set bit in a memory region 708 + * @addr: The address to base the search on 709 + * @offset: The bitnumber to start searching at 710 + * @size: The maximum size to search 711 + */ 712 + static inline int find_next_bit (const unsigned long * addr, 713 + unsigned long size, 714 + unsigned long offset) 723 715 { 724 716 const unsigned long *p; 725 717 unsigned long bit, set; ··· 738 718 p = addr + offset / __BITOPS_WORDSIZE; 739 719 if (bit) { 740 720 /* 741 - * s390 version of __ffs returns __BITOPS_WORDSIZE 721 + * __ffs_word returns __BITOPS_WORDSIZE 742 722 * if no one bit is present in the word. 743 723 */ 744 - set = __ffs(*p & (~0UL << bit)); 724 + set = __ffs_word(0, *p & (~0UL << bit)); 745 725 if (set >= size) 746 726 return size + offset; 747 727 if (set < __BITOPS_WORDSIZE) ··· 763 743 { 764 744 return find_first_bit(b, 140); 765 745 } 766 - 767 - #include <asm-generic/bitops/ffs.h> 768 746 769 747 #include <asm-generic/bitops/fls.h> 770 748 #include <asm-generic/bitops/fls64.h> ··· 790 772 test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) 791 773 #define ext2_test_bit(nr, addr) \ 792 774 test_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) 793 - #define ext2_find_next_bit(addr, size, off) \ 794 - generic_find_next_le_bit((unsigned long *)(addr), (size), (off)) 795 775 796 - #ifndef __s390x__ 797 - 798 - static inline int 799 - ext2_find_first_zero_bit(void *vaddr, unsigned int size) 776 + static inline int ext2_find_first_zero_bit(void *vaddr, unsigned int size) 800 777 { 801 - typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; 802 - unsigned long cmp, count; 803 - unsigned int res; 778 + unsigned long bytes, bits; 804 779 805 780 if (!size) 806 781 return 0; 807 - asm volatile( 808 - " lhi %1,-1\n" 809 - " lr %2,%3\n" 810 - " ahi %2,31\n" 811 - " srl %2,5\n" 812 - " slr %0,%0\n" 813 - "0: cl %1,0(%0,%4)\n" 814 - " jne 1f\n" 815 - " ahi %0,4\n" 816 - " brct %2,0b\n" 817 - " lr %0,%3\n" 818 - " j 4f\n" 819 - "1: l %2,0(%0,%4)\n" 820 - " sll %0,3\n" 821 - " ahi %0,24\n" 822 - " lhi %1,0xff\n" 823 - " tmh %2,0xffff\n" 824 - " jo 2f\n" 825 - " ahi %0,-16\n" 826 - " srl %2,16\n" 827 - "2: tml %2,0xff00\n" 828 - " jo 3f\n" 829 - " ahi %0,-8\n" 830 - " srl %2,8\n" 831 - "3: nr %2,%1\n" 832 - " ic %2,0(%2,%5)\n" 833 - " alr %0,%2\n" 834 - "4:" 835 - : "=&a" (res), "=&d" (cmp), "=&a" (count) 836 - : "a" (size), "a" (vaddr), "a" (&_zb_findmap), 837 - "m" (*(addrtype *) vaddr) : "cc"); 838 - return (res < size) ? res : size; 782 + bytes = __ffz_word_loop(vaddr, size); 783 + bits = __ffz_word(bytes*8, __load_ulong_le(vaddr, bytes)); 784 + return (bits < size) ? bits : size; 839 785 } 840 786 841 - #else /* __s390x__ */ 842 - 843 - static inline unsigned long 844 - ext2_find_first_zero_bit(void *vaddr, unsigned long size) 845 - { 846 - typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; 847 - unsigned long res, cmp, count; 848 - 849 - if (!size) 850 - return 0; 851 - asm volatile( 852 - " lghi %1,-1\n" 853 - " lgr %2,%3\n" 854 - " aghi %2,63\n" 855 - " srlg %2,%2,6\n" 856 - " slgr %0,%0\n" 857 - "0: clg %1,0(%0,%4)\n" 858 - " jne 1f\n" 859 - " aghi %0,8\n" 860 - " brct %2,0b\n" 861 - " lgr %0,%3\n" 862 - " j 5f\n" 863 - "1: cl %1,0(%0,%4)\n" 864 - " jne 2f\n" 865 - " aghi %0,4\n" 866 - "2: l %2,0(%0,%4)\n" 867 - " sllg %0,%0,3\n" 868 - " aghi %0,24\n" 869 - " lghi %1,0xff\n" 870 - " tmlh %2,0xffff\n" 871 - " jo 3f\n" 872 - " aghi %0,-16\n" 873 - " srl %2,16\n" 874 - "3: tmll %2,0xff00\n" 875 - " jo 4f\n" 876 - " aghi %0,-8\n" 877 - " srl %2,8\n" 878 - "4: ngr %2,%1\n" 879 - " ic %2,0(%2,%5)\n" 880 - " algr %0,%2\n" 881 - "5:" 882 - : "=&a" (res), "=&d" (cmp), "=&a" (count) 883 - : "a" (size), "a" (vaddr), "a" (&_zb_findmap), 884 - "m" (*(addrtype *) vaddr) : "cc"); 885 - return (res < size) ? res : size; 886 - } 887 - 888 - #endif /* __s390x__ */ 889 - 890 - static inline int 891 - ext2_find_next_zero_bit(void *vaddr, unsigned long size, unsigned long offset) 787 + static inline int ext2_find_next_zero_bit(void *vaddr, unsigned long size, 788 + unsigned long offset) 892 789 { 893 790 unsigned long *addr = vaddr, *p; 894 - unsigned long word, bit, set; 791 + unsigned long bit, set; 895 792 896 793 if (offset >= size) 897 794 return size; ··· 815 882 size -= offset; 816 883 p = addr + offset / __BITOPS_WORDSIZE; 817 884 if (bit) { 818 - #ifndef __s390x__ 819 - asm volatile( 820 - " ic %0,0(%1)\n" 821 - " icm %0,2,1(%1)\n" 822 - " icm %0,4,2(%1)\n" 823 - " icm %0,8,3(%1)" 824 - : "=&a" (word) : "a" (p), "m" (*p) : "cc"); 825 - #else 826 - asm volatile( 827 - " lrvg %0,%1" 828 - : "=a" (word) : "m" (*p) ); 829 - #endif 830 885 /* 831 886 * s390 version of ffz returns __BITOPS_WORDSIZE 832 887 * if no zero bit is present in the word. 833 888 */ 834 - set = ffz(word >> bit) + bit; 889 + set = ffz(__load_ulong_le(p, 0) >> bit) + bit; 835 890 if (set >= size) 836 891 return size + offset; 837 892 if (set < __BITOPS_WORDSIZE) ··· 829 908 p++; 830 909 } 831 910 return offset + ext2_find_first_zero_bit(p, size); 911 + } 912 + 913 + static inline unsigned long ext2_find_first_bit(void *vaddr, 914 + unsigned long size) 915 + { 916 + unsigned long bytes, bits; 917 + 918 + if (!size) 919 + return 0; 920 + bytes = __ffs_word_loop(vaddr, size); 921 + bits = __ffs_word(bytes*8, __load_ulong_le(vaddr, bytes)); 922 + return (bits < size) ? bits : size; 923 + } 924 + 925 + static inline int ext2_find_next_bit(void *vaddr, unsigned long size, 926 + unsigned long offset) 927 + { 928 + unsigned long *addr = vaddr, *p; 929 + unsigned long bit, set; 930 + 931 + if (offset >= size) 932 + return size; 933 + bit = offset & (__BITOPS_WORDSIZE - 1); 934 + offset -= bit; 935 + size -= offset; 936 + p = addr + offset / __BITOPS_WORDSIZE; 937 + if (bit) { 938 + /* 939 + * s390 version of ffz returns __BITOPS_WORDSIZE 940 + * if no zero bit is present in the word. 941 + */ 942 + set = ffs(__load_ulong_le(p, 0) >> bit) + bit; 943 + if (set >= size) 944 + return size + offset; 945 + if (set < __BITOPS_WORDSIZE) 946 + return set + offset; 947 + offset += __BITOPS_WORDSIZE; 948 + size -= __BITOPS_WORDSIZE; 949 + p++; 950 + } 951 + return offset + ext2_find_first_bit(p, size); 832 952 } 833 953 834 954 #include <asm-generic/bitops/minix.h>
+4
include/asm-s390/cacheflush.h
··· 24 24 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 25 25 memcpy(dst, src, len) 26 26 27 + #ifdef CONFIG_DEBUG_PAGEALLOC 28 + void kernel_map_pages(struct page *page, int numpages, int enable); 29 + #endif 30 + 27 31 #endif /* _S390_CACHEFLUSH_H */
+2
include/asm-s390/ccwgroup.h
··· 37 37 * @remove: function called on remove 38 38 * @set_online: function called when device is set online 39 39 * @set_offline: function called when device is set offline 40 + * @shutdown: function called when device is shut down 40 41 * @driver: embedded driver structure 41 42 */ 42 43 struct ccwgroup_driver { ··· 50 49 void (*remove) (struct ccwgroup_device *); 51 50 int (*set_online) (struct ccwgroup_device *); 52 51 int (*set_offline) (struct ccwgroup_device *); 52 + void (*shutdown)(struct ccwgroup_device *); 53 53 54 54 struct device_driver driver; 55 55 };
+9 -3
include/asm-s390/pgtable.h
··· 115 115 #ifndef __s390x__ 116 116 #define VMALLOC_START 0x78000000UL 117 117 #define VMALLOC_END 0x7e000000UL 118 - #define VMEM_MAP_MAX 0x80000000UL 118 + #define VMEM_MAP_END 0x80000000UL 119 119 #else /* __s390x__ */ 120 120 #define VMALLOC_START 0x3e000000000UL 121 121 #define VMALLOC_END 0x3e040000000UL 122 - #define VMEM_MAP_MAX 0x40000000000UL 122 + #define VMEM_MAP_END 0x40000000000UL 123 123 #endif /* __s390x__ */ 124 124 125 + /* 126 + * VMEM_MAX_PHYS is the highest physical address that can be added to the 1:1 127 + * mapping. This needs to be calculated at compile time since the size of the 128 + * VMEM_MAP is static but the size of struct page can change. 129 + */ 130 + #define VMEM_MAX_PHYS min(VMALLOC_START, ((VMEM_MAP_END - VMALLOC_END) / \ 131 + sizeof(struct page) * PAGE_SIZE) & ~((16 << 20) - 1)) 125 132 #define VMEM_MAP ((struct page *) VMALLOC_END) 126 - #define VMEM_MAP_SIZE ((VMALLOC_START / PAGE_SIZE) * sizeof(struct page)) 127 133 128 134 /* 129 135 * A 31 bit pagetable entry of S390 has following format: