Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6

* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6:
[S390] Update default configuration.
[S390] disassembler: fix idte instruction format.
[S390] tape: fix race with stack local wait_queue_head_t.
[S390] 3270: fix race with stack local wait_queue_head_t.
[S390] dasd: use a generic wait_queue for sleep_on
[S390] sclp_vt220: fix scheduling while atomic bug.
[S390] showmem: Only walk spanned pages.
[S390] appldata: prevent cpu hotplug when walking cpu_online_map.
[S390] Fix section mismatch warnings.
[S390] s390 types: make dma_addr_t 64 bit capable
[S390] tape: Fix race condition in tape block device driver
[S390] fix sparsemem related compile error with allnoconfig on s390

+101 -87
+3
arch/s390/Kconfig
··· 308 config ARCH_SPARSEMEM_DEFAULT 309 def_bool y 310 311 source "mm/Kconfig" 312 313 comment "I/O subsystem configuration"
··· 308 config ARCH_SPARSEMEM_DEFAULT 309 def_bool y 310 311 + config ARCH_SELECT_MEMORY_MODEL 312 + def_bool y 313 + 314 source "mm/Kconfig" 315 316 comment "I/O subsystem configuration"
+8
arch/s390/appldata/appldata_base.c
··· 130 131 P_DEBUG(" -= Work Queue =-\n"); 132 i = 0; 133 spin_lock(&appldata_ops_lock); 134 list_for_each(lh, &appldata_ops_list) { 135 ops = list_entry(lh, struct appldata_ops, list); ··· 141 } 142 } 143 spin_unlock(&appldata_ops_lock); 144 } 145 146 /* ··· 268 len = *lenp; 269 if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) 270 return -EFAULT; 271 spin_lock(&appldata_timer_lock); 272 if (buf[0] == '1') 273 __appldata_vtimer_setup(APPLDATA_ADD_TIMER); 274 else if (buf[0] == '0') 275 __appldata_vtimer_setup(APPLDATA_DEL_TIMER); 276 spin_unlock(&appldata_timer_lock); 277 out: 278 *lenp = len; 279 *ppos += len; ··· 318 return -EINVAL; 319 } 320 321 spin_lock(&appldata_timer_lock); 322 appldata_interval = interval; 323 __appldata_vtimer_setup(APPLDATA_MOD_TIMER); 324 spin_unlock(&appldata_timer_lock); 325 326 P_INFO("Monitoring CPU interval set to %u milliseconds.\n", 327 interval); ··· 562 return -ENOMEM; 563 } 564 565 for_each_online_cpu(i) 566 appldata_online_cpu(i); 567 568 /* Register cpu hotplug notifier */ 569 register_hotcpu_notifier(&appldata_nb);
··· 130 131 P_DEBUG(" -= Work Queue =-\n"); 132 i = 0; 133 + get_online_cpus(); 134 spin_lock(&appldata_ops_lock); 135 list_for_each(lh, &appldata_ops_list) { 136 ops = list_entry(lh, struct appldata_ops, list); ··· 140 } 141 } 142 spin_unlock(&appldata_ops_lock); 143 + put_online_cpus(); 144 } 145 146 /* ··· 266 len = *lenp; 267 if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) 268 return -EFAULT; 269 + get_online_cpus(); 270 spin_lock(&appldata_timer_lock); 271 if (buf[0] == '1') 272 __appldata_vtimer_setup(APPLDATA_ADD_TIMER); 273 else if (buf[0] == '0') 274 __appldata_vtimer_setup(APPLDATA_DEL_TIMER); 275 spin_unlock(&appldata_timer_lock); 276 + put_online_cpus(); 277 out: 278 *lenp = len; 279 *ppos += len; ··· 314 return -EINVAL; 315 } 316 317 + get_online_cpus(); 318 spin_lock(&appldata_timer_lock); 319 appldata_interval = interval; 320 __appldata_vtimer_setup(APPLDATA_MOD_TIMER); 321 spin_unlock(&appldata_timer_lock); 322 + put_online_cpus(); 323 324 P_INFO("Monitoring CPU interval set to %u milliseconds.\n", 325 interval); ··· 556 return -ENOMEM; 557 } 558 559 + get_online_cpus(); 560 for_each_online_cpu(i) 561 appldata_online_cpu(i); 562 + put_online_cpus(); 563 564 /* Register cpu hotplug notifier */ 565 register_hotcpu_notifier(&appldata_nb);
+8 -3
arch/s390/defconfig
··· 1 # 2 # Automatically generated make config: don't edit 3 - # Linux kernel version: 2.6.25 4 - # Wed Apr 30 11:07:45 2008 5 # 6 CONFIG_SCHED_MC=y 7 CONFIG_MMU=y ··· 103 # CONFIG_TINY_SHMEM is not set 104 CONFIG_BASE_SMALL=0 105 CONFIG_MODULES=y 106 CONFIG_MODULE_UNLOAD=y 107 # CONFIG_MODULE_FORCE_UNLOAD is not set 108 CONFIG_MODVERSIONS=y ··· 174 # CONFIG_PREEMPT_RCU is not set 175 CONFIG_ARCH_SPARSEMEM_ENABLE=y 176 CONFIG_ARCH_SPARSEMEM_DEFAULT=y 177 CONFIG_SELECT_MEMORY_MODEL=y 178 # CONFIG_FLATMEM_MANUAL is not set 179 # CONFIG_DISCONTIGMEM_MANUAL is not set ··· 212 CONFIG_PFAULT=y 213 # CONFIG_SHARED_KERNEL is not set 214 # CONFIG_CMM is not set 215 CONFIG_VIRT_TIMER=y 216 CONFIG_VIRT_CPU_ACCOUNTING=y 217 # CONFIG_APPLDATA_BASE is not set ··· 623 # 624 # CONFIG_MEMSTICK is not set 625 # CONFIG_NEW_LEDS is not set 626 627 # 628 # File systems ··· 758 CONFIG_MAGIC_SYSRQ=y 759 # CONFIG_UNUSED_SYMBOLS is not set 760 CONFIG_DEBUG_FS=y 761 - CONFIG_HEADERS_CHECK=y 762 CONFIG_DEBUG_KERNEL=y 763 # CONFIG_SCHED_DEBUG is not set 764 # CONFIG_SCHEDSTATS is not set 765 # CONFIG_TIMER_STATS is not set 766 # CONFIG_DEBUG_SLAB is not set 767 CONFIG_DEBUG_PREEMPT=y 768 # CONFIG_DEBUG_RT_MUTEXES is not set
··· 1 # 2 # Automatically generated make config: don't edit 3 + # Linux kernel version: 2.6.26-rc4 4 + # Fri May 30 09:49:33 2008 5 # 6 CONFIG_SCHED_MC=y 7 CONFIG_MMU=y ··· 103 # CONFIG_TINY_SHMEM is not set 104 CONFIG_BASE_SMALL=0 105 CONFIG_MODULES=y 106 + # CONFIG_MODULE_FORCE_LOAD is not set 107 CONFIG_MODULE_UNLOAD=y 108 # CONFIG_MODULE_FORCE_UNLOAD is not set 109 CONFIG_MODVERSIONS=y ··· 173 # CONFIG_PREEMPT_RCU is not set 174 CONFIG_ARCH_SPARSEMEM_ENABLE=y 175 CONFIG_ARCH_SPARSEMEM_DEFAULT=y 176 + CONFIG_ARCH_SELECT_MEMORY_MODEL=y 177 CONFIG_SELECT_MEMORY_MODEL=y 178 # CONFIG_FLATMEM_MANUAL is not set 179 # CONFIG_DISCONTIGMEM_MANUAL is not set ··· 210 CONFIG_PFAULT=y 211 # CONFIG_SHARED_KERNEL is not set 212 # CONFIG_CMM is not set 213 + # CONFIG_PAGE_STATES is not set 214 CONFIG_VIRT_TIMER=y 215 CONFIG_VIRT_CPU_ACCOUNTING=y 216 # CONFIG_APPLDATA_BASE is not set ··· 620 # 621 # CONFIG_MEMSTICK is not set 622 # CONFIG_NEW_LEDS is not set 623 + CONFIG_ACCESSIBILITY=y 624 625 # 626 # File systems ··· 754 CONFIG_MAGIC_SYSRQ=y 755 # CONFIG_UNUSED_SYMBOLS is not set 756 CONFIG_DEBUG_FS=y 757 + # CONFIG_HEADERS_CHECK is not set 758 CONFIG_DEBUG_KERNEL=y 759 # CONFIG_SCHED_DEBUG is not set 760 # CONFIG_SCHEDSTATS is not set 761 # CONFIG_TIMER_STATS is not set 762 + # CONFIG_DEBUG_OBJECTS is not set 763 # CONFIG_DEBUG_SLAB is not set 764 CONFIG_DEBUG_PREEMPT=y 765 # CONFIG_DEBUG_RT_MUTEXES is not set
+1 -1
arch/s390/kernel/dis.c
··· 208 [INSTR_RRF_F0FF] = { 0xff, F_16,F_24,F_28,0,0,0 }, /* e.g. madbr */ 209 [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 },/* e.g. didbr */ 210 [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 },/* e.g. .insn */ 211 - [INSTR_RRF_R0RR] = { 0xff, R_24,R_28,R_16,0,0,0 }, /* e.g. idte */ 212 [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 }, /* e.g. fixr */ 213 [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 }, /* e.g. cfebr */ 214 [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 }, /* e.g. sske */
··· 208 [INSTR_RRF_F0FF] = { 0xff, F_16,F_24,F_28,0,0,0 }, /* e.g. madbr */ 209 [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 },/* e.g. didbr */ 210 [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 },/* e.g. .insn */ 211 + [INSTR_RRF_R0RR] = { 0xff, R_24,R_16,R_28,0,0,0 }, /* e.g. idte */ 212 [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 }, /* e.g. fixr */ 213 [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 }, /* e.g. cfebr */ 214 [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 }, /* e.g. sske */
+1 -1
arch/s390/kernel/smp.c
··· 1089 1090 #ifdef CONFIG_HOTPLUG_CPU 1091 1092 - int smp_rescan_cpus(void) 1093 { 1094 cpumask_t newcpus; 1095 int cpu;
··· 1089 1090 #ifdef CONFIG_HOTPLUG_CPU 1091 1092 + int __ref smp_rescan_cpus(void) 1093 { 1094 cpumask_t newcpus; 1095 int cpu;
+23 -26
arch/s390/mm/init.c
··· 44 45 void show_mem(void) 46 { 47 - int i, total = 0, reserved = 0; 48 - int shared = 0, cached = 0; 49 struct page *page; 50 51 printk("Mem-info:\n"); 52 show_free_areas(); 53 - i = max_mapnr; 54 - while (i-- > 0) { 55 - if (!pfn_valid(i)) 56 - continue; 57 - page = pfn_to_page(i); 58 - total++; 59 - if (PageReserved(page)) 60 - reserved++; 61 - else if (PageSwapCache(page)) 62 - cached++; 63 - else if (page_count(page)) 64 - shared += page_count(page) - 1; 65 } 66 - printk("%d pages of RAM\n", total); 67 - printk("%d reserved pages\n", reserved); 68 - printk("%d pages shared\n", shared); 69 - printk("%d pages swap cached\n", cached); 70 - 71 - printk("%lu pages dirty\n", global_page_state(NR_FILE_DIRTY)); 72 - printk("%lu pages writeback\n", global_page_state(NR_WRITEBACK)); 73 - printk("%lu pages mapped\n", global_page_state(NR_FILE_MAPPED)); 74 - printk("%lu pages slab\n", 75 - global_page_state(NR_SLAB_RECLAIMABLE) + 76 - global_page_state(NR_SLAB_UNRECLAIMABLE)); 77 - printk("%lu pages pagetables\n", global_page_state(NR_PAGETABLE)); 78 } 79 80 /*
··· 44 45 void show_mem(void) 46 { 47 + unsigned long i, total = 0, reserved = 0; 48 + unsigned long shared = 0, cached = 0; 49 + unsigned long flags; 50 struct page *page; 51 + pg_data_t *pgdat; 52 53 printk("Mem-info:\n"); 54 show_free_areas(); 55 + for_each_online_pgdat(pgdat) { 56 + pgdat_resize_lock(pgdat, &flags); 57 + for (i = 0; i < pgdat->node_spanned_pages; i++) { 58 + if (!pfn_valid(pgdat->node_start_pfn + i)) 59 + continue; 60 + page = pfn_to_page(pgdat->node_start_pfn + i); 61 + total++; 62 + if (PageReserved(page)) 63 + reserved++; 64 + else if (PageSwapCache(page)) 65 + cached++; 66 + else if (page_count(page)) 67 + shared += page_count(page) - 1; 68 + } 69 + pgdat_resize_unlock(pgdat, &flags); 70 } 71 + printk("%ld pages of RAM\n", total); 72 + printk("%ld reserved pages\n", reserved); 73 + printk("%ld pages shared\n", shared); 74 + printk("%ld pages swap cached\n", cached); 75 } 76 77 /*
+13 -5
arch/s390/mm/vmem.c
··· 27 28 static LIST_HEAD(mem_segs); 29 30 - static pud_t *vmem_pud_alloc(void) 31 { 32 pud_t *pud = NULL; 33 34 #ifdef CONFIG_64BIT 35 - pud = vmemmap_alloc_block(PAGE_SIZE * 4, 0); 36 if (!pud) 37 return NULL; 38 clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4); ··· 47 return pud; 48 } 49 50 - static pmd_t *vmem_pmd_alloc(void) 51 { 52 pmd_t *pmd = NULL; 53 54 #ifdef CONFIG_64BIT 55 - pmd = vmemmap_alloc_block(PAGE_SIZE * 4, 0); 56 if (!pmd) 57 return NULL; 58 clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4); ··· 214 if (pte_none(*pt_dir)) { 215 unsigned long new_page; 216 217 - new_page =__pa(vmemmap_alloc_block(PAGE_SIZE, 0)); 218 if (!new_page) 219 goto out; 220 pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); 221 *pt_dir = pte; 222 } 223 } 224 ret = 0; 225 out: 226 flush_tlb_kernel_range(start_addr, end_addr);
··· 27 28 static LIST_HEAD(mem_segs); 29 30 + static void __ref *vmem_alloc_pages(unsigned int order) 31 + { 32 + if (slab_is_available()) 33 + return (void *)__get_free_pages(GFP_KERNEL, order); 34 + return alloc_bootmem_pages((1 << order) * PAGE_SIZE); 35 + } 36 + 37 + static inline pud_t *vmem_pud_alloc(void) 38 { 39 pud_t *pud = NULL; 40 41 #ifdef CONFIG_64BIT 42 + pud = vmem_alloc_pages(2); 43 if (!pud) 44 return NULL; 45 clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4); ··· 40 return pud; 41 } 42 43 + static inline pmd_t *vmem_pmd_alloc(void) 44 { 45 pmd_t *pmd = NULL; 46 47 #ifdef CONFIG_64BIT 48 + pmd = vmem_alloc_pages(2); 49 if (!pmd) 50 return NULL; 51 clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4); ··· 207 if (pte_none(*pt_dir)) { 208 unsigned long new_page; 209 210 + new_page =__pa(vmem_alloc_pages(0)); 211 if (!new_page) 212 goto out; 213 pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); 214 *pt_dir = pte; 215 } 216 } 217 + memset(start, 0, nr * sizeof(struct page)); 218 ret = 0; 219 out: 220 flush_tlb_kernel_range(start_addr, end_addr);
+14 -14
drivers/s390/block/dasd.c
··· 63 */ 64 static wait_queue_head_t dasd_init_waitq; 65 static wait_queue_head_t dasd_flush_wq; 66 67 /* 68 * Allocate memory for a new device structure. ··· 1152 struct list_head *l, *n; 1153 struct dasd_ccw_req *cqr; 1154 struct dasd_block *block; 1155 1156 list_for_each_safe(l, n, final_queue) { 1157 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1158 list_del_init(&cqr->devlist); 1159 block = cqr->block; 1160 if (block) 1161 spin_lock_bh(&block->queue_lock); 1162 switch (cqr->status) { ··· 1181 BUG(); 1182 } 1183 if (cqr->callback != NULL) 1184 - (cqr->callback)(cqr, cqr->callback_data); 1185 if (block) 1186 spin_unlock_bh(&block->queue_lock); 1187 } ··· 1411 */ 1412 int dasd_sleep_on(struct dasd_ccw_req *cqr) 1413 { 1414 - wait_queue_head_t wait_q; 1415 struct dasd_device *device; 1416 int rc; 1417 1418 device = cqr->startdev; 1419 1420 - init_waitqueue_head (&wait_q); 1421 cqr->callback = dasd_wakeup_cb; 1422 - cqr->callback_data = (void *) &wait_q; 1423 dasd_add_request_tail(cqr); 1424 - wait_event(wait_q, _wait_for_wakeup(cqr)); 1425 1426 /* Request status is either done or failed. */ 1427 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; ··· 1432 */ 1433 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) 1434 { 1435 - wait_queue_head_t wait_q; 1436 struct dasd_device *device; 1437 int rc; 1438 1439 device = cqr->startdev; 1440 - init_waitqueue_head (&wait_q); 1441 cqr->callback = dasd_wakeup_cb; 1442 - cqr->callback_data = (void *) &wait_q; 1443 dasd_add_request_tail(cqr); 1444 - rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr)); 1445 if (rc == -ERESTARTSYS) { 1446 dasd_cancel_req(cqr); 1447 /* wait (non-interruptible) for final status */ 1448 - wait_event(wait_q, _wait_for_wakeup(cqr)); 1449 } 1450 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; 1451 return rc; ··· 1467 1468 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) 1469 { 1470 - wait_queue_head_t wait_q; 1471 struct dasd_device *device; 1472 int rc; 1473 ··· 1478 return rc; 1479 } 1480 1481 - init_waitqueue_head (&wait_q); 1482 cqr->callback = dasd_wakeup_cb; 1483 - cqr->callback_data = (void *) &wait_q; 1484 cqr->status = DASD_CQR_QUEUED; 1485 list_add(&cqr->devlist, &device->ccw_queue); 1486 ··· 1488 1489 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1490 1491 - wait_event(wait_q, _wait_for_wakeup(cqr)); 1492 1493 /* Request status is either done or failed. */ 1494 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; ··· 2429 2430 init_waitqueue_head(&dasd_init_waitq); 2431 init_waitqueue_head(&dasd_flush_wq); 2432 2433 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 2434 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
··· 63 */ 64 static wait_queue_head_t dasd_init_waitq; 65 static wait_queue_head_t dasd_flush_wq; 66 + static wait_queue_head_t generic_waitq; 67 68 /* 69 * Allocate memory for a new device structure. ··· 1151 struct list_head *l, *n; 1152 struct dasd_ccw_req *cqr; 1153 struct dasd_block *block; 1154 + void (*callback)(struct dasd_ccw_req *, void *data); 1155 + void *callback_data; 1156 1157 list_for_each_safe(l, n, final_queue) { 1158 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1159 list_del_init(&cqr->devlist); 1160 block = cqr->block; 1161 + callback = cqr->callback; 1162 + callback_data = cqr->callback_data; 1163 if (block) 1164 spin_lock_bh(&block->queue_lock); 1165 switch (cqr->status) { ··· 1176 BUG(); 1177 } 1178 if (cqr->callback != NULL) 1179 + (callback)(cqr, callback_data); 1180 if (block) 1181 spin_unlock_bh(&block->queue_lock); 1182 } ··· 1406 */ 1407 int dasd_sleep_on(struct dasd_ccw_req *cqr) 1408 { 1409 struct dasd_device *device; 1410 int rc; 1411 1412 device = cqr->startdev; 1413 1414 cqr->callback = dasd_wakeup_cb; 1415 + cqr->callback_data = (void *) &generic_waitq; 1416 dasd_add_request_tail(cqr); 1417 + wait_event(generic_waitq, _wait_for_wakeup(cqr)); 1418 1419 /* Request status is either done or failed. */ 1420 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; ··· 1429 */ 1430 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) 1431 { 1432 struct dasd_device *device; 1433 int rc; 1434 1435 device = cqr->startdev; 1436 cqr->callback = dasd_wakeup_cb; 1437 + cqr->callback_data = (void *) &generic_waitq; 1438 dasd_add_request_tail(cqr); 1439 + rc = wait_event_interruptible(generic_waitq, _wait_for_wakeup(cqr)); 1440 if (rc == -ERESTARTSYS) { 1441 dasd_cancel_req(cqr); 1442 /* wait (non-interruptible) for final status */ 1443 + wait_event(generic_waitq, _wait_for_wakeup(cqr)); 1444 } 1445 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; 1446 return rc; ··· 1466 1467 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) 1468 { 1469 struct dasd_device *device; 1470 int rc; 1471 ··· 1478 return rc; 1479 } 1480 1481 cqr->callback = dasd_wakeup_cb; 1482 + cqr->callback_data = (void *) &generic_waitq; 1483 cqr->status = DASD_CQR_QUEUED; 1484 list_add(&cqr->devlist, &device->ccw_queue); 1485 ··· 1489 1490 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1491 1492 + wait_event(generic_waitq, _wait_for_wakeup(cqr)); 1493 1494 /* Request status is either done or failed. */ 1495 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; ··· 2430 2431 init_waitqueue_head(&dasd_init_waitq); 2432 init_waitqueue_head(&dasd_flush_wq); 2433 + init_waitqueue_head(&generic_waitq); 2434 2435 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 2436 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
+4 -5
drivers/s390/char/raw3270.c
··· 549 struct raw3270_request *rq) 550 { 551 unsigned long flags; 552 - wait_queue_head_t wq; 553 int rc; 554 555 #ifdef CONFIG_TN3270_CONSOLE ··· 565 return rq->rc; 566 } 567 #endif 568 - init_waitqueue_head(&wq); 569 rq->callback = raw3270_wake_init; 570 - rq->callback_data = &wq; 571 spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags); 572 rc = __raw3270_start(rp, view, rq); 573 spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags); 574 if (rc) 575 return rc; 576 /* Now wait for the completion. */ 577 - rc = wait_event_interruptible(wq, raw3270_request_final(rq)); 578 if (rc == -ERESTARTSYS) { /* Interrupted by a signal. */ 579 raw3270_halt_io(view->dev, rq); 580 /* No wait for the halt to complete. */ 581 - wait_event(wq, raw3270_request_final(rq)); 582 return -ERESTARTSYS; 583 } 584 return rq->rc;
··· 549 struct raw3270_request *rq) 550 { 551 unsigned long flags; 552 int rc; 553 554 #ifdef CONFIG_TN3270_CONSOLE ··· 566 return rq->rc; 567 } 568 #endif 569 rq->callback = raw3270_wake_init; 570 + rq->callback_data = &raw3270_wait_queue; 571 spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags); 572 rc = __raw3270_start(rp, view, rq); 573 spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags); 574 if (rc) 575 return rc; 576 /* Now wait for the completion. */ 577 + rc = wait_event_interruptible(raw3270_wait_queue, 578 + raw3270_request_final(rq)); 579 if (rc == -ERESTARTSYS) { /* Interrupted by a signal. */ 580 raw3270_halt_io(view->dev, rq); 581 /* No wait for the halt to complete. */ 582 + wait_event(raw3270_wait_queue, raw3270_request_final(rq)); 583 return -ERESTARTSYS; 584 } 585 return rq->rc;
+1 -1
drivers/s390/char/sclp_config.c
··· 40 put_online_cpus(); 41 } 42 43 - static void sclp_cpu_change_notify(struct work_struct *work) 44 { 45 smp_rescan_cpus(); 46 }
··· 40 put_online_cpus(); 41 } 42 43 + static void __ref sclp_cpu_change_notify(struct work_struct *work) 44 { 45 smp_rescan_cpus(); 46 }
+7 -20
drivers/s390/char/sclp_vt220.c
··· 71 /* Number of requests in outqueue */ 72 static int sclp_vt220_outqueue_count; 73 74 - /* Wait queue used to delay write requests while we've run out of buffers */ 75 - static wait_queue_head_t sclp_vt220_waitq; 76 - 77 /* Timer used for delaying write requests to merge subsequent messages into 78 * a single buffer */ 79 static struct timer_list sclp_vt220_timer; ··· 130 } while (request && __sclp_vt220_emit(request)); 131 if (request == NULL && sclp_vt220_flush_later) 132 sclp_vt220_emit_current(); 133 - wake_up(&sclp_vt220_waitq); 134 /* Check if the tty needs a wake up call */ 135 if (sclp_vt220_tty != NULL) { 136 tty_wakeup(sclp_vt220_tty); ··· 379 */ 380 static int 381 __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule, 382 - int convertlf, int may_schedule) 383 { 384 unsigned long flags; 385 void *page; ··· 391 overall_written = 0; 392 spin_lock_irqsave(&sclp_vt220_lock, flags); 393 do { 394 - /* Create a sclp output buffer if none exists yet */ 395 if (sclp_vt220_current_request == NULL) { 396 while (list_empty(&sclp_vt220_empty)) { 397 spin_unlock_irqrestore(&sclp_vt220_lock, flags); 398 - if (in_interrupt() || !may_schedule) 399 - sclp_sync_wait(); 400 else 401 - wait_event(sclp_vt220_waitq, 402 - !list_empty(&sclp_vt220_empty)); 403 spin_lock_irqsave(&sclp_vt220_lock, flags); 404 } 405 page = (void *) sclp_vt220_empty.next; ··· 432 add_timer(&sclp_vt220_timer); 433 } 434 spin_unlock_irqrestore(&sclp_vt220_lock, flags); 435 return overall_written; 436 } 437 ··· 516 * character to the tty device. If the kernel uses this routine, 517 * it must call the flush_chars() routine (if defined) when it is 518 * done stuffing characters into the driver. 519 - * 520 - * NOTE: include/linux/tty_driver.h specifies that a character should be 521 - * ignored if there is no room in the queue. This driver implements a different 522 - * semantic in that it will block when there is no more room left. 523 - * 524 - * FIXME: putchar can currently be called from BH and other non blocking 525 - * handlers so this semantic isn't a good idea. 526 */ 527 static int 528 sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch) 529 { 530 - __sclp_vt220_write(&ch, 1, 0, 0, 1); 531 - return 1; 532 } 533 534 /* ··· 641 spin_lock_init(&sclp_vt220_lock); 642 INIT_LIST_HEAD(&sclp_vt220_empty); 643 INIT_LIST_HEAD(&sclp_vt220_outqueue); 644 - init_waitqueue_head(&sclp_vt220_waitq); 645 init_timer(&sclp_vt220_timer); 646 sclp_vt220_current_request = NULL; 647 sclp_vt220_buffered_chars = 0;
··· 71 /* Number of requests in outqueue */ 72 static int sclp_vt220_outqueue_count; 73 74 /* Timer used for delaying write requests to merge subsequent messages into 75 * a single buffer */ 76 static struct timer_list sclp_vt220_timer; ··· 133 } while (request && __sclp_vt220_emit(request)); 134 if (request == NULL && sclp_vt220_flush_later) 135 sclp_vt220_emit_current(); 136 /* Check if the tty needs a wake up call */ 137 if (sclp_vt220_tty != NULL) { 138 tty_wakeup(sclp_vt220_tty); ··· 383 */ 384 static int 385 __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule, 386 + int convertlf, int may_fail) 387 { 388 unsigned long flags; 389 void *page; ··· 395 overall_written = 0; 396 spin_lock_irqsave(&sclp_vt220_lock, flags); 397 do { 398 + /* Create an sclp output buffer if none exists yet */ 399 if (sclp_vt220_current_request == NULL) { 400 while (list_empty(&sclp_vt220_empty)) { 401 spin_unlock_irqrestore(&sclp_vt220_lock, flags); 402 + if (may_fail) 403 + goto out; 404 else 405 + sclp_sync_wait(); 406 spin_lock_irqsave(&sclp_vt220_lock, flags); 407 } 408 page = (void *) sclp_vt220_empty.next; ··· 437 add_timer(&sclp_vt220_timer); 438 } 439 spin_unlock_irqrestore(&sclp_vt220_lock, flags); 440 + out: 441 return overall_written; 442 } 443 ··· 520 * character to the tty device. If the kernel uses this routine, 521 * it must call the flush_chars() routine (if defined) when it is 522 * done stuffing characters into the driver. 523 */ 524 static int 525 sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch) 526 { 527 + return __sclp_vt220_write(&ch, 1, 0, 0, 1); 528 } 529 530 /* ··· 653 spin_lock_init(&sclp_vt220_lock); 654 INIT_LIST_HEAD(&sclp_vt220_empty); 655 INIT_LIST_HEAD(&sclp_vt220_outqueue); 656 init_timer(&sclp_vt220_timer); 657 sclp_vt220_current_request = NULL; 658 sclp_vt220_buffered_chars = 0;
+3
drivers/s390/char/tape.h
··· 231 /* Request queue. */ 232 struct list_head req_queue; 233 234 /* Each tape device has (currently) two minor numbers. */ 235 int first_minor; 236
··· 231 /* Request queue. */ 232 struct list_head req_queue; 233 234 + /* Request wait queue. */ 235 + wait_queue_head_t wait_queue; 236 + 237 /* Each tape device has (currently) two minor numbers. */ 238 int first_minor; 239
+2 -2
drivers/s390/char/tape_block.c
··· 179 tapeblock_end_request(req, -EIO); 180 continue; 181 } 182 spin_unlock_irq(&device->blk_data.request_queue_lock); 183 rc = tapeblock_start_request(device, req); 184 spin_lock_irq(&device->blk_data.request_queue_lock); 185 - blkdev_dequeue_request(req); 186 - nr_queued++; 187 } 188 spin_unlock_irq(&device->blk_data.request_queue_lock); 189 atomic_set(&device->blk_data.requeue_scheduled, 0);
··· 179 tapeblock_end_request(req, -EIO); 180 continue; 181 } 182 + blkdev_dequeue_request(req); 183 + nr_queued++; 184 spin_unlock_irq(&device->blk_data.request_queue_lock); 185 rc = tapeblock_start_request(device, req); 186 spin_lock_irq(&device->blk_data.request_queue_lock); 187 } 188 spin_unlock_irq(&device->blk_data.request_queue_lock); 189 atomic_set(&device->blk_data.requeue_scheduled, 0);
+7 -9
drivers/s390/char/tape_core.c
··· 449 INIT_LIST_HEAD(&device->req_queue); 450 INIT_LIST_HEAD(&device->node); 451 init_waitqueue_head(&device->state_change_wq); 452 device->tape_state = TS_INIT; 453 device->medium_state = MS_UNKNOWN; 454 *device->modeset_byte = 0; ··· 955 int 956 tape_do_io(struct tape_device *device, struct tape_request *request) 957 { 958 - wait_queue_head_t wq; 959 int rc; 960 961 - init_waitqueue_head(&wq); 962 spin_lock_irq(get_ccwdev_lock(device->cdev)); 963 /* Setup callback */ 964 request->callback = __tape_wake_up; 965 - request->callback_data = &wq; 966 /* Add request to request queue and try to start it. */ 967 rc = __tape_start_request(device, request); 968 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 969 if (rc) 970 return rc; 971 /* Request added to the queue. Wait for its completion. */ 972 - wait_event(wq, (request->callback == NULL)); 973 /* Get rc from request */ 974 return request->rc; 975 } ··· 988 tape_do_io_interruptible(struct tape_device *device, 989 struct tape_request *request) 990 { 991 - wait_queue_head_t wq; 992 int rc; 993 994 - init_waitqueue_head(&wq); 995 spin_lock_irq(get_ccwdev_lock(device->cdev)); 996 /* Setup callback */ 997 request->callback = __tape_wake_up_interruptible; 998 - request->callback_data = &wq; 999 rc = __tape_start_request(device, request); 1000 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1001 if (rc) 1002 return rc; 1003 /* Request added to the queue. Wait for its completion. */ 1004 - rc = wait_event_interruptible(wq, (request->callback == NULL)); 1005 if (rc != -ERESTARTSYS) 1006 /* Request finished normally. */ 1007 return request->rc; ··· 1013 /* Wait for the interrupt that acknowledges the halt. */ 1014 do { 1015 rc = wait_event_interruptible( 1016 - wq, 1017 (request->callback == NULL) 1018 ); 1019 } while (rc == -ERESTARTSYS);
··· 449 INIT_LIST_HEAD(&device->req_queue); 450 INIT_LIST_HEAD(&device->node); 451 init_waitqueue_head(&device->state_change_wq); 452 + init_waitqueue_head(&device->wait_queue); 453 device->tape_state = TS_INIT; 454 device->medium_state = MS_UNKNOWN; 455 *device->modeset_byte = 0; ··· 954 int 955 tape_do_io(struct tape_device *device, struct tape_request *request) 956 { 957 int rc; 958 959 spin_lock_irq(get_ccwdev_lock(device->cdev)); 960 /* Setup callback */ 961 request->callback = __tape_wake_up; 962 + request->callback_data = &device->wait_queue; 963 /* Add request to request queue and try to start it. */ 964 rc = __tape_start_request(device, request); 965 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 966 if (rc) 967 return rc; 968 /* Request added to the queue. Wait for its completion. */ 969 + wait_event(device->wait_queue, (request->callback == NULL)); 970 /* Get rc from request */ 971 return request->rc; 972 } ··· 989 tape_do_io_interruptible(struct tape_device *device, 990 struct tape_request *request) 991 { 992 int rc; 993 994 spin_lock_irq(get_ccwdev_lock(device->cdev)); 995 /* Setup callback */ 996 request->callback = __tape_wake_up_interruptible; 997 + request->callback_data = &device->wait_queue; 998 rc = __tape_start_request(device, request); 999 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1000 if (rc) 1001 return rc; 1002 /* Request added to the queue. Wait for its completion. */ 1003 + rc = wait_event_interruptible(device->wait_queue, 1004 + (request->callback == NULL)); 1005 if (rc != -ERESTARTSYS) 1006 /* Request finished normally. */ 1007 return request->rc; ··· 1015 /* Wait for the interrupt that acknowledges the halt. */ 1016 do { 1017 rc = wait_event_interruptible( 1018 + device->wait_queue, 1019 (request->callback == NULL) 1020 ); 1021 } while (rc == -ERESTARTSYS);
+6
include/asm-s390/types.h
··· 40 41 #ifndef __ASSEMBLY__ 42 43 typedef u32 dma_addr_t; 44 45 #ifndef __s390x__ 46 typedef union {
··· 40 41 #ifndef __ASSEMBLY__ 42 43 + typedef u64 dma64_addr_t; 44 + #ifdef __s390x__ 45 + /* DMA addresses come in 32-bit and 64-bit flavours. */ 46 + typedef u64 dma_addr_t; 47 + #else 48 typedef u32 dma_addr_t; 49 + #endif 50 51 #ifndef __s390x__ 52 typedef union {