Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6

* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6:
[S390] stacktrace bug.
[S390] cio: remove casts from/to (void *).
[S390] cio: Remove grace period for vary off chpid.
[S390] cio: Use ccw_dev_id and subchannel_id in ccw_device_private
[S390] monwriter kzalloc size.
[S390] cio: add missing KERN_INFO printk header.
[S390] irq change improvements.

+123 -176
+1 -1
arch/s390/appldata/appldata_base.c
··· 109 * 110 * schedule work and reschedule timer 111 */ 112 - static void appldata_timer_function(unsigned long data, struct pt_regs *regs) 113 { 114 P_DEBUG(" -= Timer =-\n"); 115 P_DEBUG("CPU: %i, expire_count: %i\n", smp_processor_id(),
··· 109 * 110 * schedule work and reschedule timer 111 */ 112 + static void appldata_timer_function(unsigned long data) 113 { 114 P_DEBUG(" -= Timer =-\n"); 115 P_DEBUG("CPU: %i, expire_count: %i\n", smp_processor_id(),
+2 -2
arch/s390/kernel/s390_ext.c
··· 117 int index; 118 struct pt_regs *old_regs; 119 120 - irq_enter(); 121 old_regs = set_irq_regs(regs); 122 asm volatile ("mc 0,0"); 123 if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer) 124 /** ··· 134 p->handler(code); 135 } 136 } 137 - set_irq_regs(old_regs); 138 irq_exit(); 139 } 140 141 EXPORT_SYMBOL(register_external_interrupt);
··· 117 int index; 118 struct pt_regs *old_regs; 119 120 old_regs = set_irq_regs(regs); 121 + irq_enter(); 122 asm volatile ("mc 0,0"); 123 if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer) 124 /** ··· 134 p->handler(code); 135 } 136 } 137 irq_exit(); 138 + set_irq_regs(old_regs); 139 } 140 141 EXPORT_SYMBOL(register_external_interrupt);
+8 -9
arch/s390/kernel/stacktrace.c
··· 62 void save_stack_trace(struct stack_trace *trace, struct task_struct *task) 63 { 64 register unsigned long sp asm ("15"); 65 - unsigned long orig_sp; 66 67 - sp &= PSW_ADDR_INSN; 68 - orig_sp = sp; 69 70 - sp = save_context_stack(trace, &trace->skip, sp, 71 S390_lowcore.panic_stack - PAGE_SIZE, 72 S390_lowcore.panic_stack); 73 - if ((sp != orig_sp) && !trace->all_contexts) 74 return; 75 - sp = save_context_stack(trace, &trace->skip, sp, 76 S390_lowcore.async_stack - ASYNC_SIZE, 77 S390_lowcore.async_stack); 78 - if ((sp != orig_sp) && !trace->all_contexts) 79 return; 80 if (task) 81 - save_context_stack(trace, &trace->skip, sp, 82 (unsigned long) task_stack_page(task), 83 (unsigned long) task_stack_page(task) + THREAD_SIZE); 84 else 85 - save_context_stack(trace, &trace->skip, sp, 86 S390_lowcore.thread_info, 87 S390_lowcore.thread_info + THREAD_SIZE); 88 return;
··· 62 void save_stack_trace(struct stack_trace *trace, struct task_struct *task) 63 { 64 register unsigned long sp asm ("15"); 65 + unsigned long orig_sp, new_sp; 66 67 + orig_sp = sp & PSW_ADDR_INSN; 68 69 + new_sp = save_context_stack(trace, &trace->skip, orig_sp, 70 S390_lowcore.panic_stack - PAGE_SIZE, 71 S390_lowcore.panic_stack); 72 + if ((new_sp != orig_sp) && !trace->all_contexts) 73 return; 74 + new_sp = save_context_stack(trace, &trace->skip, new_sp, 75 S390_lowcore.async_stack - ASYNC_SIZE, 76 S390_lowcore.async_stack); 77 + if ((new_sp != orig_sp) && !trace->all_contexts) 78 return; 79 if (task) 80 + save_context_stack(trace, &trace->skip, new_sp, 81 (unsigned long) task_stack_page(task), 82 (unsigned long) task_stack_page(task) + THREAD_SIZE); 83 else 84 + save_context_stack(trace, &trace->skip, new_sp, 85 S390_lowcore.thread_info, 86 S390_lowcore.thread_info + THREAD_SIZE); 87 return;
+4 -4
arch/s390/kernel/vtime.c
··· 209 * Do the callback functions of expired vtimer events. 210 * Called from within the interrupt handler. 211 */ 212 - static void do_callbacks(struct list_head *cb_list, struct pt_regs *regs) 213 { 214 struct vtimer_queue *vt_list; 215 struct vtimer_list *event, *tmp; 216 - void (*fn)(unsigned long, struct pt_regs*); 217 unsigned long data; 218 219 if (list_empty(cb_list)) ··· 224 list_for_each_entry_safe(event, tmp, cb_list, entry) { 225 fn = event->function; 226 data = event->data; 227 - fn(data, regs); 228 229 if (!event->interval) 230 /* delete one shot timer */ ··· 275 list_move_tail(&event->entry, &cb_list); 276 } 277 spin_unlock(&vt_list->lock); 278 - do_callbacks(&cb_list, get_irq_regs()); 279 280 /* next event is first in list */ 281 spin_lock(&vt_list->lock);
··· 209 * Do the callback functions of expired vtimer events. 210 * Called from within the interrupt handler. 211 */ 212 + static void do_callbacks(struct list_head *cb_list) 213 { 214 struct vtimer_queue *vt_list; 215 struct vtimer_list *event, *tmp; 216 + void (*fn)(unsigned long); 217 unsigned long data; 218 219 if (list_empty(cb_list)) ··· 224 list_for_each_entry_safe(event, tmp, cb_list, entry) { 225 fn = event->function; 226 data = event->data; 227 + fn(data); 228 229 if (!event->interval) 230 /* delete one shot timer */ ··· 275 list_move_tail(&event->entry, &cb_list); 276 } 277 spin_unlock(&vt_list->lock); 278 + do_callbacks(&cb_list); 279 280 /* next event is first in list */ 281 spin_lock(&vt_list->lock);
+1 -1
drivers/s390/char/monwriter.c
··· 110 monbuf = kzalloc(sizeof(struct mon_buf), GFP_KERNEL); 111 if (!monbuf) 112 return -ENOMEM; 113 - monbuf->data = kzalloc(monbuf->hdr.datalen, 114 GFP_KERNEL | GFP_DMA); 115 if (!monbuf->data) { 116 kfree(monbuf);
··· 110 monbuf = kzalloc(sizeof(struct mon_buf), GFP_KERNEL); 111 if (!monbuf) 112 return -ENOMEM; 113 + monbuf->data = kzalloc(monhdr->datalen, 114 GFP_KERNEL | GFP_DMA); 115 if (!monbuf->data) { 116 kfree(monbuf);
+9 -14
drivers/s390/cio/chsc.c
··· 370 struct res_acc_data *res_data; 371 struct subchannel *sch; 372 373 - res_data = (struct res_acc_data *)data; 374 sch = get_subchannel_by_schid(schid); 375 if (!sch) 376 /* Check if a subchannel is newly available. */ ··· 444 u32 isinfo[28]; 445 } *lir; 446 447 - lir = (struct lir*) data; 448 if (!(lir->iq&0x80)) 449 /* NULL link incident record */ 450 return -EINVAL; ··· 628 struct channel_path *chp; 629 struct subchannel *sch; 630 631 - chp = (struct channel_path *)data; 632 sch = get_subchannel_by_schid(schid); 633 if (!sch) 634 /* Check if the subchannel is now available. */ ··· 707 return chp_add(chpid); 708 } 709 710 - static inline int 711 - __check_for_io_and_kill(struct subchannel *sch, int index) 712 { 713 int cc; 714 ··· 717 cc = stsch(sch->schid, &sch->schib); 718 if (cc) 719 return 0; 720 - if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) { 721 - device_set_waiting(sch); 722 return 1; 723 - } 724 return 0; 725 } 726 ··· 747 } else { 748 sch->opm &= ~(0x80 >> chp); 749 sch->lpm &= ~(0x80 >> chp); 750 - /* 751 - * Give running I/O a grace period in which it 752 - * can successfully terminate, even using the 753 - * just varied off path. Then kill it. 754 - */ 755 - if (!__check_for_io_and_kill(sch, chp) && !sch->lpm) { 756 if (css_enqueue_subchannel_slow(sch->schid)) { 757 css_clear_subchannel_slow_list(); 758 need_rescan = 1;
··· 370 struct res_acc_data *res_data; 371 struct subchannel *sch; 372 373 + res_data = data; 374 sch = get_subchannel_by_schid(schid); 375 if (!sch) 376 /* Check if a subchannel is newly available. */ ··· 444 u32 isinfo[28]; 445 } *lir; 446 447 + lir = data; 448 if (!(lir->iq&0x80)) 449 /* NULL link incident record */ 450 return -EINVAL; ··· 628 struct channel_path *chp; 629 struct subchannel *sch; 630 631 + chp = data; 632 sch = get_subchannel_by_schid(schid); 633 if (!sch) 634 /* Check if the subchannel is now available. */ ··· 707 return chp_add(chpid); 708 } 709 710 + static inline int check_for_io_on_path(struct subchannel *sch, int index) 711 { 712 int cc; 713 ··· 718 cc = stsch(sch->schid, &sch->schib); 719 if (cc) 720 return 0; 721 + if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) 722 return 1; 723 return 0; 724 } 725 ··· 750 } else { 751 sch->opm &= ~(0x80 >> chp); 752 sch->lpm &= ~(0x80 >> chp); 753 + if (check_for_io_on_path(sch, chp)) 754 + /* Path verification is done after killing. */ 755 + device_kill_io(sch); 756 + else if (!sch->lpm) { 757 if (css_enqueue_subchannel_slow(sch->schid)) { 758 css_clear_subchannel_slow_list(); 759 need_rescan = 1;
+2 -2
drivers/s390/cio/cio.c
··· 609 struct irb *irb; 610 struct pt_regs *old_regs; 611 612 - irq_enter (); 613 old_regs = set_irq_regs(regs); 614 asm volatile ("mc 0,0"); 615 if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer) 616 /** ··· 655 * out of the sie which costs more cycles than it saves. 656 */ 657 } while (!MACHINE_IS_VM && tpi (NULL) != 0); 658 set_irq_regs(old_regs); 659 - irq_exit (); 660 } 661 662 #ifdef CONFIG_CCW_CONSOLE
··· 609 struct irb *irb; 610 struct pt_regs *old_regs; 611 612 old_regs = set_irq_regs(regs); 613 + irq_enter(); 614 asm volatile ("mc 0,0"); 615 if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer) 616 /** ··· 655 * out of the sie which costs more cycles than it saves. 656 */ 657 } while (!MACHINE_IS_VM && tpi (NULL) != 0); 658 + irq_exit(); 659 set_irq_regs(old_regs); 660 } 661 662 #ifdef CONFIG_CCW_CONSOLE
+1 -1
drivers/s390/cio/css.c
··· 177 struct device *dev; 178 179 dev = bus_find_device(&css_bus_type, NULL, 180 - (void *)&schid, check_subchannel); 181 182 return dev ? to_subchannel(dev) : NULL; 183 }
··· 177 struct device *dev; 178 179 dev = bus_find_device(&css_bus_type, NULL, 180 + &schid, check_subchannel); 181 182 return dev ? to_subchannel(dev) : NULL; 183 }
+3 -4
drivers/s390/cio/css.h
··· 76 int state; /* device state */ 77 atomic_t onoff; 78 unsigned long registered; 79 - __u16 devno; /* device number */ 80 - __u16 sch_no; /* subchannel number */ 81 - __u8 ssid; /* subchannel set id */ 82 __u8 imask; /* lpm mask for SNID/SID/SPGID */ 83 int iretry; /* retry counter SNID/SID/SPGID */ 84 struct { ··· 170 171 /* Helper functions for vary on/off. */ 172 int device_is_online(struct subchannel *); 173 - void device_set_waiting(struct subchannel *); 174 175 /* Machine check helper function. */ 176 void device_kill_pending_timer(struct subchannel *);
··· 76 int state; /* device state */ 77 atomic_t onoff; 78 unsigned long registered; 79 + struct ccw_dev_id dev_id; /* device id */ 80 + struct subchannel_id schid; /* subchannel number */ 81 __u8 imask; /* lpm mask for SNID/SID/SPGID */ 82 int iretry; /* retry counter SNID/SID/SPGID */ 83 struct { ··· 171 172 /* Helper functions for vary on/off. */ 173 int device_is_online(struct subchannel *); 174 + void device_kill_io(struct subchannel *); 175 176 /* Machine check helper function. */ 177 void device_kill_pending_timer(struct subchannel *);
+23 -25
drivers/s390/cio/device.c
··· 552 } 553 554 struct match_data { 555 - unsigned int devno; 556 - unsigned int ssid; 557 struct ccw_device * sibling; 558 }; 559 560 static int 561 match_devno(struct device * dev, void * data) 562 { 563 - struct match_data * d = (struct match_data *)data; 564 struct ccw_device * cdev; 565 566 cdev = to_ccwdev(dev); 567 if ((cdev->private->state == DEV_STATE_DISCONNECTED) && 568 - (cdev->private->devno == d->devno) && 569 - (cdev->private->ssid == d->ssid) && 570 (cdev != d->sibling)) { 571 cdev->private->state = DEV_STATE_NOT_OPER; 572 return 1; ··· 572 return 0; 573 } 574 575 - static struct ccw_device * 576 - get_disc_ccwdev_by_devno(unsigned int devno, unsigned int ssid, 577 - struct ccw_device *sibling) 578 { 579 struct device *dev; 580 struct match_data data; 581 582 - data.devno = devno; 583 - data.ssid = ssid; 584 data.sibling = sibling; 585 dev = bus_find_device(&ccw_bus_type, NULL, &data, match_devno); 586 ··· 591 592 struct ccw_device *cdev; 593 594 - cdev = (struct ccw_device *)data; 595 if (device_add(&cdev->dev)) { 596 put_device(&cdev->dev); 597 return; ··· 612 struct subchannel *sch; 613 int need_rename; 614 615 - cdev = (struct ccw_device *)data; 616 sch = to_subchannel(cdev->dev.parent); 617 - if (cdev->private->devno != sch->schib.pmcw.dev) { 618 /* 619 * The device number has changed. This is usually only when 620 * a device has been detached under VM and then re-appeared ··· 629 * get possibly sick... 630 */ 631 struct ccw_device *other_cdev; 632 633 need_rename = 1; 634 - other_cdev = get_disc_ccwdev_by_devno(sch->schib.pmcw.dev, 635 - sch->schid.ssid, cdev); 636 if (other_cdev) { 637 struct subchannel *other_sch; 638 ··· 650 } 651 /* Update ssd info here. */ 652 css_get_ssd_info(sch); 653 - cdev->private->devno = sch->schib.pmcw.dev; 654 } else 655 need_rename = 0; 656 device_remove_files(&cdev->dev); ··· 660 snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", 661 sch->schid.ssid, sch->schib.pmcw.dev); 662 PREPARE_WORK(&cdev->private->kick_work, 663 - ccw_device_add_changed, (void *)cdev); 664 queue_work(ccw_device_work, &cdev->private->kick_work); 665 } 666 ··· 685 int ret; 686 unsigned long flags; 687 688 - cdev = (struct ccw_device *) data; 689 sch = to_subchannel(cdev->dev.parent); 690 691 if (klist_node_attached(&cdev->dev.knode_parent)) { ··· 757 break; 758 sch = to_subchannel(cdev->dev.parent); 759 PREPARE_WORK(&cdev->private->kick_work, 760 - ccw_device_call_sch_unregister, (void *) cdev); 761 queue_work(slow_path_wq, &cdev->private->kick_work); 762 if (atomic_dec_and_test(&ccw_device_init_count)) 763 wake_up(&ccw_device_init_wq); ··· 772 if (!get_device(&cdev->dev)) 773 break; 774 PREPARE_WORK(&cdev->private->kick_work, 775 - io_subchannel_register, (void *) cdev); 776 queue_work(slow_path_wq, &cdev->private->kick_work); 777 break; 778 } ··· 790 791 /* Init private data. */ 792 priv = cdev->private; 793 - priv->devno = sch->schib.pmcw.dev; 794 - priv->ssid = sch->schid.ssid; 795 - priv->sch_no = sch->schid.sch_no; 796 priv->state = DEV_STATE_NOT_OPER; 797 INIT_LIST_HEAD(&priv->cmb_list); 798 init_waitqueue_head(&priv->wait_q); ··· 910 */ 911 if (get_device(&cdev->dev)) { 912 PREPARE_WORK(&cdev->private->kick_work, 913 - ccw_device_unregister, (void *) cdev); 914 queue_work(ccw_device_work, &cdev->private->kick_work); 915 } 916 return 0; ··· 1053 { 1054 char *bus_id; 1055 1056 - bus_id = (char *)id; 1057 1058 return (strncmp(bus_id, dev->bus_id, BUS_ID_SIZE) == 0); 1059 }
··· 552 } 553 554 struct match_data { 555 + struct ccw_dev_id dev_id; 556 struct ccw_device * sibling; 557 }; 558 559 static int 560 match_devno(struct device * dev, void * data) 561 { 562 + struct match_data * d = data; 563 struct ccw_device * cdev; 564 565 cdev = to_ccwdev(dev); 566 if ((cdev->private->state == DEV_STATE_DISCONNECTED) && 567 + ccw_dev_id_is_equal(&cdev->private->dev_id, &d->dev_id) && 568 (cdev != d->sibling)) { 569 cdev->private->state = DEV_STATE_NOT_OPER; 570 return 1; ··· 574 return 0; 575 } 576 577 + static struct ccw_device * get_disc_ccwdev_by_dev_id(struct ccw_dev_id *dev_id, 578 + struct ccw_device *sibling) 579 { 580 struct device *dev; 581 struct match_data data; 582 583 + data.dev_id = *dev_id; 584 data.sibling = sibling; 585 dev = bus_find_device(&ccw_bus_type, NULL, &data, match_devno); 586 ··· 595 596 struct ccw_device *cdev; 597 598 + cdev = data; 599 if (device_add(&cdev->dev)) { 600 put_device(&cdev->dev); 601 return; ··· 616 struct subchannel *sch; 617 int need_rename; 618 619 + cdev = data; 620 sch = to_subchannel(cdev->dev.parent); 621 + if (cdev->private->dev_id.devno != sch->schib.pmcw.dev) { 622 /* 623 * The device number has changed. This is usually only when 624 * a device has been detached under VM and then re-appeared ··· 633 * get possibly sick... 634 */ 635 struct ccw_device *other_cdev; 636 + struct ccw_dev_id dev_id; 637 638 need_rename = 1; 639 + dev_id.devno = sch->schib.pmcw.dev; 640 + dev_id.ssid = sch->schid.ssid; 641 + other_cdev = get_disc_ccwdev_by_dev_id(&dev_id, cdev); 642 if (other_cdev) { 643 struct subchannel *other_sch; 644 ··· 652 } 653 /* Update ssd info here. */ 654 css_get_ssd_info(sch); 655 + cdev->private->dev_id.devno = sch->schib.pmcw.dev; 656 } else 657 need_rename = 0; 658 device_remove_files(&cdev->dev); ··· 662 snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", 663 sch->schid.ssid, sch->schib.pmcw.dev); 664 PREPARE_WORK(&cdev->private->kick_work, 665 + ccw_device_add_changed, cdev); 666 queue_work(ccw_device_work, &cdev->private->kick_work); 667 } 668 ··· 687 int ret; 688 unsigned long flags; 689 690 + cdev = data; 691 sch = to_subchannel(cdev->dev.parent); 692 693 if (klist_node_attached(&cdev->dev.knode_parent)) { ··· 759 break; 760 sch = to_subchannel(cdev->dev.parent); 761 PREPARE_WORK(&cdev->private->kick_work, 762 + ccw_device_call_sch_unregister, cdev); 763 queue_work(slow_path_wq, &cdev->private->kick_work); 764 if (atomic_dec_and_test(&ccw_device_init_count)) 765 wake_up(&ccw_device_init_wq); ··· 774 if (!get_device(&cdev->dev)) 775 break; 776 PREPARE_WORK(&cdev->private->kick_work, 777 + io_subchannel_register, cdev); 778 queue_work(slow_path_wq, &cdev->private->kick_work); 779 break; 780 } ··· 792 793 /* Init private data. */ 794 priv = cdev->private; 795 + priv->dev_id.devno = sch->schib.pmcw.dev; 796 + priv->dev_id.ssid = sch->schid.ssid; 797 + priv->schid = sch->schid; 798 priv->state = DEV_STATE_NOT_OPER; 799 INIT_LIST_HEAD(&priv->cmb_list); 800 init_waitqueue_head(&priv->wait_q); ··· 912 */ 913 if (get_device(&cdev->dev)) { 914 PREPARE_WORK(&cdev->private->kick_work, 915 + ccw_device_unregister, cdev); 916 queue_work(ccw_device_work, &cdev->private->kick_work); 917 } 918 return 0; ··· 1055 { 1056 char *bus_id; 1057 1058 + bus_id = id; 1059 1060 return (strncmp(bus_id, dev->bus_id, BUS_ID_SIZE) == 0); 1061 }
-1
drivers/s390/cio/device.h
··· 21 /* states to wait for i/o completion before doing something */ 22 DEV_STATE_CLEAR_VERIFY, 23 DEV_STATE_TIMEOUT_KILL, 24 - DEV_STATE_WAIT4IO, 25 DEV_STATE_QUIESCE, 26 /* special states for devices gone not operational */ 27 DEV_STATE_DISCONNECTED,
··· 21 /* states to wait for i/o completion before doing something */ 22 DEV_STATE_CLEAR_VERIFY, 23 DEV_STATE_TIMEOUT_KILL, 24 DEV_STATE_QUIESCE, 25 /* special states for devices gone not operational */ 26 DEV_STATE_DISCONNECTED,
+31 -82
drivers/s390/cio/device_fsm.c
··· 59 cdev->private->state = DEV_STATE_DISCONNECTED; 60 } 61 62 - void 63 - device_set_waiting(struct subchannel *sch) 64 - { 65 - struct ccw_device *cdev; 66 - 67 - if (!sch->dev.driver_data) 68 - return; 69 - cdev = sch->dev.driver_data; 70 - ccw_device_set_timeout(cdev, 10*HZ); 71 - cdev->private->state = DEV_STATE_WAIT4IO; 72 - } 73 - 74 /* 75 * Timeout function. It just triggers a DEV_EVENT_TIMEOUT. 76 */ ··· 171 cdev->id.cu_model != cdev->private->senseid.cu_model || 172 cdev->id.dev_type != cdev->private->senseid.dev_type || 173 cdev->id.dev_model != cdev->private->senseid.dev_model || 174 - cdev->private->devno != sch->schib.pmcw.dev) { 175 PREPARE_WORK(&cdev->private->kick_work, 176 - ccw_device_do_unreg_rereg, (void *)cdev); 177 queue_work(ccw_device_work, &cdev->private->kick_work); 178 return 0; 179 } ··· 243 case DEV_STATE_NOT_OPER: 244 CIO_DEBUG(KERN_WARNING, 2, 245 "SenseID : unknown device %04x on subchannel " 246 - "0.%x.%04x\n", cdev->private->devno, 247 sch->schid.ssid, sch->schid.sch_no); 248 break; 249 case DEV_STATE_OFFLINE: ··· 270 CIO_DEBUG(KERN_INFO, 2, "SenseID : device 0.%x.%04x reports: " 271 "CU Type/Mod = %04X/%02X, Dev Type/Mod = " 272 "%04X/%02X\n", 273 - cdev->private->ssid, cdev->private->devno, 274 cdev->id.cu_type, cdev->id.cu_model, 275 cdev->id.dev_type, cdev->id.dev_model); 276 break; 277 case DEV_STATE_BOXED: 278 CIO_DEBUG(KERN_WARNING, 2, 279 "SenseID : boxed device %04x on subchannel " 280 - "0.%x.%04x\n", cdev->private->devno, 281 sch->schid.ssid, sch->schid.sch_no); 282 break; 283 } ··· 314 struct subchannel *sch; 315 int ret; 316 317 - cdev = (struct ccw_device *)data; 318 sch = to_subchannel(cdev->dev.parent); 319 ret = (sch->driver && sch->driver->notify) ? 320 sch->driver->notify(&sch->dev, CIO_OPER) : 0; 321 if (!ret) 322 /* Driver doesn't want device back. */ 323 - ccw_device_do_unreg_rereg((void *)cdev); 324 else { 325 /* Reenable channel measurements, if needed. */ 326 cmf_reenable(cdev); ··· 352 if (state == DEV_STATE_BOXED) 353 CIO_DEBUG(KERN_WARNING, 2, 354 "Boxed device %04x on subchannel %04x\n", 355 - cdev->private->devno, sch->schid.sch_no); 356 357 if (cdev->private->flags.donotify) { 358 cdev->private->flags.donotify = 0; 359 PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify, 360 - (void *)cdev); 361 queue_work(ccw_device_notify_work, &cdev->private->kick_work); 362 } 363 wake_up(&cdev->private->wait_q); ··· 401 /* PGID mismatch, can't pathgroup. */ 402 CIO_MSG_EVENT(0, "SNID - pgid mismatch for device " 403 "0.%x.%04x, can't pathgroup\n", 404 - cdev->private->ssid, cdev->private->devno); 405 cdev->private->options.pgroup = 0; 406 return; 407 } ··· 513 struct subchannel *sch; 514 int ret; 515 516 - cdev = (struct ccw_device *)data; 517 sch = to_subchannel(cdev->dev.parent); 518 /* Extra sanity. */ 519 if (sch->lpm) ··· 527 if (get_device(&cdev->dev)) { 528 PREPARE_WORK(&cdev->private->kick_work, 529 ccw_device_call_sch_unregister, 530 - (void *)cdev); 531 queue_work(ccw_device_work, 532 &cdev->private->kick_work); 533 } else ··· 582 break; 583 default: 584 PREPARE_WORK(&cdev->private->kick_work, 585 - ccw_device_nopath_notify, (void *)cdev); 586 queue_work(ccw_device_notify_work, &cdev->private->kick_work); 587 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 588 break; ··· 713 sch = to_subchannel(cdev->dev.parent); 714 if (get_device(&cdev->dev)) { 715 PREPARE_WORK(&cdev->private->kick_work, 716 - ccw_device_call_sch_unregister, (void *)cdev); 717 queue_work(ccw_device_work, &cdev->private->kick_work); 718 } 719 wake_up(&cdev->private->wait_q); ··· 744 } 745 if (get_device(&cdev->dev)) { 746 PREPARE_WORK(&cdev->private->kick_work, 747 - ccw_device_call_sch_unregister, (void *)cdev); 748 queue_work(ccw_device_work, &cdev->private->kick_work); 749 } 750 wake_up(&cdev->private->wait_q); ··· 849 sch = to_subchannel(cdev->dev.parent); 850 if (!sch->lpm) { 851 PREPARE_WORK(&cdev->private->kick_work, 852 - ccw_device_nopath_notify, (void *)cdev); 853 queue_work(ccw_device_notify_work, 854 &cdev->private->kick_work); 855 } else ··· 875 /* Basic sense hasn't started. Try again. */ 876 ccw_device_do_sense(cdev, irb); 877 else { 878 - printk("Huh? %s(%s): unsolicited interrupt...\n", 879 __FUNCTION__, cdev->dev.bus_id); 880 if (cdev->handler) 881 cdev->handler (cdev, 0, irb); ··· 935 cdev->private->state = DEV_STATE_ONLINE; 936 if (cdev->handler) 937 cdev->handler(cdev, cdev->private->intparm, 938 - ERR_PTR(-ETIMEDOUT)); 939 if (!sch->lpm) { 940 PREPARE_WORK(&cdev->private->kick_work, 941 - ccw_device_nopath_notify, (void *)cdev); 942 queue_work(ccw_device_notify_work, &cdev->private->kick_work); 943 } else if (cdev->private->flags.doverify) 944 /* Start delayed path verification. */ ··· 961 sch = to_subchannel(cdev->dev.parent); 962 if (!sch->lpm) { 963 PREPARE_WORK(&cdev->private->kick_work, 964 - ccw_device_nopath_notify, (void *)cdev); 965 queue_work(ccw_device_notify_work, 966 &cdev->private->kick_work); 967 } else ··· 972 cdev->private->state = DEV_STATE_ONLINE; 973 if (cdev->handler) 974 cdev->handler(cdev, cdev->private->intparm, 975 - ERR_PTR(-ETIMEDOUT)); 976 } 977 978 - static void 979 - ccw_device_wait4io_irq(struct ccw_device *cdev, enum dev_event dev_event) 980 - { 981 - struct irb *irb; 982 - struct subchannel *sch; 983 - 984 - irb = (struct irb *) __LC_IRB; 985 - /* 986 - * Accumulate status and find out if a basic sense is needed. 987 - * This is fine since we have already adapted the lpm. 988 - */ 989 - ccw_device_accumulate_irb(cdev, irb); 990 - if (cdev->private->flags.dosense) { 991 - if (ccw_device_do_sense(cdev, irb) == 0) { 992 - cdev->private->state = DEV_STATE_W4SENSE; 993 - } 994 - return; 995 - } 996 - 997 - /* Iff device is idle, reset timeout. */ 998 - sch = to_subchannel(cdev->dev.parent); 999 - if (!stsch(sch->schid, &sch->schib)) 1000 - if (sch->schib.scsw.actl == 0) 1001 - ccw_device_set_timeout(cdev, 0); 1002 - /* Call the handler. */ 1003 - ccw_device_call_handler(cdev); 1004 - if (!sch->lpm) { 1005 - PREPARE_WORK(&cdev->private->kick_work, 1006 - ccw_device_nopath_notify, (void *)cdev); 1007 - queue_work(ccw_device_notify_work, &cdev->private->kick_work); 1008 - } else if (cdev->private->flags.doverify) 1009 - ccw_device_online_verify(cdev, 0); 1010 - } 1011 - 1012 - static void 1013 - ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event) 1014 { 1015 int ret; 1016 - struct subchannel *sch; 1017 1018 - sch = to_subchannel(cdev->dev.parent); 1019 - ccw_device_set_timeout(cdev, 0); 1020 ret = ccw_device_cancel_halt_clear(cdev); 1021 if (ret == -EBUSY) { 1022 ccw_device_set_timeout(cdev, 3*HZ); ··· 990 if (ret == -ENODEV) { 991 if (!sch->lpm) { 992 PREPARE_WORK(&cdev->private->kick_work, 993 - ccw_device_nopath_notify, (void *)cdev); 994 queue_work(ccw_device_notify_work, 995 &cdev->private->kick_work); 996 } else ··· 999 } 1000 if (cdev->handler) 1001 cdev->handler(cdev, cdev->private->intparm, 1002 - ERR_PTR(-ETIMEDOUT)); 1003 if (!sch->lpm) { 1004 PREPARE_WORK(&cdev->private->kick_work, 1005 - ccw_device_nopath_notify, (void *)cdev); 1006 queue_work(ccw_device_notify_work, &cdev->private->kick_work); 1007 - } else if (cdev->private->flags.doverify) 1008 /* Start delayed path verification. */ 1009 ccw_device_online_verify(cdev, 0); 1010 } ··· 1240 [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq, 1241 [DEV_EVENT_TIMEOUT] = ccw_device_killing_timeout, 1242 [DEV_EVENT_VERIFY] = ccw_device_nop, //FIXME 1243 - }, 1244 - [DEV_STATE_WAIT4IO] = { 1245 - [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, 1246 - [DEV_EVENT_INTERRUPT] = ccw_device_wait4io_irq, 1247 - [DEV_EVENT_TIMEOUT] = ccw_device_wait4io_timeout, 1248 - [DEV_EVENT_VERIFY] = ccw_device_delay_verify, 1249 }, 1250 [DEV_STATE_QUIESCE] = { 1251 [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done,
··· 59 cdev->private->state = DEV_STATE_DISCONNECTED; 60 } 61 62 /* 63 * Timeout function. It just triggers a DEV_EVENT_TIMEOUT. 64 */ ··· 183 cdev->id.cu_model != cdev->private->senseid.cu_model || 184 cdev->id.dev_type != cdev->private->senseid.dev_type || 185 cdev->id.dev_model != cdev->private->senseid.dev_model || 186 + cdev->private->dev_id.devno != sch->schib.pmcw.dev) { 187 PREPARE_WORK(&cdev->private->kick_work, 188 + ccw_device_do_unreg_rereg, cdev); 189 queue_work(ccw_device_work, &cdev->private->kick_work); 190 return 0; 191 } ··· 255 case DEV_STATE_NOT_OPER: 256 CIO_DEBUG(KERN_WARNING, 2, 257 "SenseID : unknown device %04x on subchannel " 258 + "0.%x.%04x\n", cdev->private->dev_id.devno, 259 sch->schid.ssid, sch->schid.sch_no); 260 break; 261 case DEV_STATE_OFFLINE: ··· 282 CIO_DEBUG(KERN_INFO, 2, "SenseID : device 0.%x.%04x reports: " 283 "CU Type/Mod = %04X/%02X, Dev Type/Mod = " 284 "%04X/%02X\n", 285 + cdev->private->dev_id.ssid, 286 + cdev->private->dev_id.devno, 287 cdev->id.cu_type, cdev->id.cu_model, 288 cdev->id.dev_type, cdev->id.dev_model); 289 break; 290 case DEV_STATE_BOXED: 291 CIO_DEBUG(KERN_WARNING, 2, 292 "SenseID : boxed device %04x on subchannel " 293 + "0.%x.%04x\n", cdev->private->dev_id.devno, 294 sch->schid.ssid, sch->schid.sch_no); 295 break; 296 } ··· 325 struct subchannel *sch; 326 int ret; 327 328 + cdev = data; 329 sch = to_subchannel(cdev->dev.parent); 330 ret = (sch->driver && sch->driver->notify) ? 331 sch->driver->notify(&sch->dev, CIO_OPER) : 0; 332 if (!ret) 333 /* Driver doesn't want device back. */ 334 + ccw_device_do_unreg_rereg(cdev); 335 else { 336 /* Reenable channel measurements, if needed. */ 337 cmf_reenable(cdev); ··· 363 if (state == DEV_STATE_BOXED) 364 CIO_DEBUG(KERN_WARNING, 2, 365 "Boxed device %04x on subchannel %04x\n", 366 + cdev->private->dev_id.devno, sch->schid.sch_no); 367 368 if (cdev->private->flags.donotify) { 369 cdev->private->flags.donotify = 0; 370 PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify, 371 + cdev); 372 queue_work(ccw_device_notify_work, &cdev->private->kick_work); 373 } 374 wake_up(&cdev->private->wait_q); ··· 412 /* PGID mismatch, can't pathgroup. */ 413 CIO_MSG_EVENT(0, "SNID - pgid mismatch for device " 414 "0.%x.%04x, can't pathgroup\n", 415 + cdev->private->dev_id.ssid, 416 + cdev->private->dev_id.devno); 417 cdev->private->options.pgroup = 0; 418 return; 419 } ··· 523 struct subchannel *sch; 524 int ret; 525 526 + cdev = data; 527 sch = to_subchannel(cdev->dev.parent); 528 /* Extra sanity. */ 529 if (sch->lpm) ··· 537 if (get_device(&cdev->dev)) { 538 PREPARE_WORK(&cdev->private->kick_work, 539 ccw_device_call_sch_unregister, 540 + cdev); 541 queue_work(ccw_device_work, 542 &cdev->private->kick_work); 543 } else ··· 592 break; 593 default: 594 PREPARE_WORK(&cdev->private->kick_work, 595 + ccw_device_nopath_notify, cdev); 596 queue_work(ccw_device_notify_work, &cdev->private->kick_work); 597 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 598 break; ··· 723 sch = to_subchannel(cdev->dev.parent); 724 if (get_device(&cdev->dev)) { 725 PREPARE_WORK(&cdev->private->kick_work, 726 + ccw_device_call_sch_unregister, cdev); 727 queue_work(ccw_device_work, &cdev->private->kick_work); 728 } 729 wake_up(&cdev->private->wait_q); ··· 754 } 755 if (get_device(&cdev->dev)) { 756 PREPARE_WORK(&cdev->private->kick_work, 757 + ccw_device_call_sch_unregister, cdev); 758 queue_work(ccw_device_work, &cdev->private->kick_work); 759 } 760 wake_up(&cdev->private->wait_q); ··· 859 sch = to_subchannel(cdev->dev.parent); 860 if (!sch->lpm) { 861 PREPARE_WORK(&cdev->private->kick_work, 862 + ccw_device_nopath_notify, cdev); 863 queue_work(ccw_device_notify_work, 864 &cdev->private->kick_work); 865 } else ··· 885 /* Basic sense hasn't started. Try again. */ 886 ccw_device_do_sense(cdev, irb); 887 else { 888 + printk(KERN_INFO "Huh? %s(%s): unsolicited " 889 + "interrupt...\n", 890 __FUNCTION__, cdev->dev.bus_id); 891 if (cdev->handler) 892 cdev->handler (cdev, 0, irb); ··· 944 cdev->private->state = DEV_STATE_ONLINE; 945 if (cdev->handler) 946 cdev->handler(cdev, cdev->private->intparm, 947 + ERR_PTR(-EIO)); 948 if (!sch->lpm) { 949 PREPARE_WORK(&cdev->private->kick_work, 950 + ccw_device_nopath_notify, cdev); 951 queue_work(ccw_device_notify_work, &cdev->private->kick_work); 952 } else if (cdev->private->flags.doverify) 953 /* Start delayed path verification. */ ··· 970 sch = to_subchannel(cdev->dev.parent); 971 if (!sch->lpm) { 972 PREPARE_WORK(&cdev->private->kick_work, 973 + ccw_device_nopath_notify, cdev); 974 queue_work(ccw_device_notify_work, 975 &cdev->private->kick_work); 976 } else ··· 981 cdev->private->state = DEV_STATE_ONLINE; 982 if (cdev->handler) 983 cdev->handler(cdev, cdev->private->intparm, 984 + ERR_PTR(-EIO)); 985 } 986 987 + void device_kill_io(struct subchannel *sch) 988 { 989 int ret; 990 + struct ccw_device *cdev; 991 992 + cdev = sch->dev.driver_data; 993 ret = ccw_device_cancel_halt_clear(cdev); 994 if (ret == -EBUSY) { 995 ccw_device_set_timeout(cdev, 3*HZ); ··· 1035 if (ret == -ENODEV) { 1036 if (!sch->lpm) { 1037 PREPARE_WORK(&cdev->private->kick_work, 1038 + ccw_device_nopath_notify, cdev); 1039 queue_work(ccw_device_notify_work, 1040 &cdev->private->kick_work); 1041 } else ··· 1044 } 1045 if (cdev->handler) 1046 cdev->handler(cdev, cdev->private->intparm, 1047 + ERR_PTR(-EIO)); 1048 if (!sch->lpm) { 1049 PREPARE_WORK(&cdev->private->kick_work, 1050 + ccw_device_nopath_notify, cdev); 1051 queue_work(ccw_device_notify_work, &cdev->private->kick_work); 1052 + } else 1053 /* Start delayed path verification. */ 1054 ccw_device_online_verify(cdev, 0); 1055 } ··· 1285 [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq, 1286 [DEV_EVENT_TIMEOUT] = ccw_device_killing_timeout, 1287 [DEV_EVENT_VERIFY] = ccw_device_nop, //FIXME 1288 }, 1289 [DEV_STATE_QUIESCE] = { 1290 [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done,
+8 -6
drivers/s390/cio/device_id.c
··· 251 */ 252 CIO_MSG_EVENT(2, "SenseID : device %04x on Subchannel " 253 "0.%x.%04x reports cmd reject\n", 254 - cdev->private->devno, sch->schid.ssid, 255 sch->schid.sch_no); 256 return -EOPNOTSUPP; 257 } ··· 259 CIO_MSG_EVENT(2, "SenseID : UC on dev 0.%x.%04x, " 260 "lpum %02X, cnt %02d, sns :" 261 " %02X%02X%02X%02X %02X%02X%02X%02X ...\n", 262 - cdev->private->ssid, cdev->private->devno, 263 irb->esw.esw0.sublog.lpum, 264 irb->esw.esw0.erw.scnt, 265 irb->ecw[0], irb->ecw[1], ··· 275 CIO_MSG_EVENT(2, "SenseID : path %02X for device %04x " 276 "on subchannel 0.%x.%04x is " 277 "'not operational'\n", sch->orb.lpm, 278 - cdev->private->devno, sch->schid.ssid, 279 - sch->schid.sch_no); 280 return -EACCES; 281 } 282 /* Hmm, whatever happened, try again. */ 283 CIO_MSG_EVENT(2, "SenseID : start_IO() for device %04x on " 284 "subchannel 0.%x.%04x returns status %02X%02X\n", 285 - cdev->private->devno, sch->schid.ssid, sch->schid.sch_no, 286 irb->scsw.dstat, irb->scsw.cstat); 287 return -EAGAIN; 288 } ··· 332 /* fall through. */ 333 default: /* Sense ID failed. Try asking VM. */ 334 if (MACHINE_IS_VM) { 335 - VM_virtual_device_info (cdev->private->devno, 336 &cdev->private->senseid); 337 if (cdev->private->senseid.cu_type != 0xFFFF) { 338 /* Got the device information from VM. */
··· 251 */ 252 CIO_MSG_EVENT(2, "SenseID : device %04x on Subchannel " 253 "0.%x.%04x reports cmd reject\n", 254 + cdev->private->dev_id.devno, sch->schid.ssid, 255 sch->schid.sch_no); 256 return -EOPNOTSUPP; 257 } ··· 259 CIO_MSG_EVENT(2, "SenseID : UC on dev 0.%x.%04x, " 260 "lpum %02X, cnt %02d, sns :" 261 " %02X%02X%02X%02X %02X%02X%02X%02X ...\n", 262 + cdev->private->dev_id.ssid, 263 + cdev->private->dev_id.devno, 264 irb->esw.esw0.sublog.lpum, 265 irb->esw.esw0.erw.scnt, 266 irb->ecw[0], irb->ecw[1], ··· 274 CIO_MSG_EVENT(2, "SenseID : path %02X for device %04x " 275 "on subchannel 0.%x.%04x is " 276 "'not operational'\n", sch->orb.lpm, 277 + cdev->private->dev_id.devno, 278 + sch->schid.ssid, sch->schid.sch_no); 279 return -EACCES; 280 } 281 /* Hmm, whatever happened, try again. */ 282 CIO_MSG_EVENT(2, "SenseID : start_IO() for device %04x on " 283 "subchannel 0.%x.%04x returns status %02X%02X\n", 284 + cdev->private->dev_id.devno, sch->schid.ssid, 285 + sch->schid.sch_no, 286 irb->scsw.dstat, irb->scsw.cstat); 287 return -EAGAIN; 288 } ··· 330 /* fall through. */ 331 default: /* Sense ID failed. Try asking VM. */ 332 if (MACHINE_IS_VM) { 333 + VM_virtual_device_info (cdev->private->dev_id.devno, 334 &cdev->private->senseid); 335 if (cdev->private->senseid.cu_type != 0xFFFF) { 336 /* Got the device information from VM. */
+2 -4
drivers/s390/cio/device_ops.c
··· 50 if (cdev->private->state == DEV_STATE_NOT_OPER) 51 return -ENODEV; 52 if (cdev->private->state != DEV_STATE_ONLINE && 53 - cdev->private->state != DEV_STATE_WAIT4IO && 54 cdev->private->state != DEV_STATE_W4SENSE) 55 return -EINVAL; 56 sch = to_subchannel(cdev->dev.parent); ··· 154 if (cdev->private->state == DEV_STATE_NOT_OPER) 155 return -ENODEV; 156 if (cdev->private->state != DEV_STATE_ONLINE && 157 - cdev->private->state != DEV_STATE_WAIT4IO && 158 cdev->private->state != DEV_STATE_W4SENSE) 159 return -EINVAL; 160 sch = to_subchannel(cdev->dev.parent); ··· 590 int 591 _ccw_device_get_subchannel_number(struct ccw_device *cdev) 592 { 593 - return cdev->private->sch_no; 594 } 595 596 int 597 _ccw_device_get_device_number(struct ccw_device *cdev) 598 { 599 - return cdev->private->devno; 600 } 601 602
··· 50 if (cdev->private->state == DEV_STATE_NOT_OPER) 51 return -ENODEV; 52 if (cdev->private->state != DEV_STATE_ONLINE && 53 cdev->private->state != DEV_STATE_W4SENSE) 54 return -EINVAL; 55 sch = to_subchannel(cdev->dev.parent); ··· 155 if (cdev->private->state == DEV_STATE_NOT_OPER) 156 return -ENODEV; 157 if (cdev->private->state != DEV_STATE_ONLINE && 158 cdev->private->state != DEV_STATE_W4SENSE) 159 return -EINVAL; 160 sch = to_subchannel(cdev->dev.parent); ··· 592 int 593 _ccw_device_get_subchannel_number(struct ccw_device *cdev) 594 { 595 + return cdev->private->schid.sch_no; 596 } 597 598 int 599 _ccw_device_get_device_number(struct ccw_device *cdev) 600 { 601 + return cdev->private->dev_id.devno; 602 } 603 604
+13 -10
drivers/s390/cio/device_pgid.c
··· 79 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel " 80 "0.%x.%04x, lpm %02X, became 'not " 81 "operational'\n", 82 - cdev->private->devno, sch->schid.ssid, 83 sch->schid.sch_no, cdev->private->imask); 84 85 } ··· 136 CIO_MSG_EVENT(2, "SNID - device 0.%x.%04x, unit check, " 137 "lpum %02X, cnt %02d, sns : " 138 "%02X%02X%02X%02X %02X%02X%02X%02X ...\n", 139 - cdev->private->ssid, cdev->private->devno, 140 irb->esw.esw0.sublog.lpum, 141 irb->esw.esw0.erw.scnt, 142 irb->ecw[0], irb->ecw[1], ··· 149 if (irb->scsw.cc == 3) { 150 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x," 151 " lpm %02X, became 'not operational'\n", 152 - cdev->private->devno, sch->schid.ssid, 153 sch->schid.sch_no, sch->orb.lpm); 154 return -EACCES; 155 } ··· 157 if (cdev->private->pgid[i].inf.ps.state2 == SNID_STATE2_RESVD_ELSE) { 158 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x " 159 "is reserved by someone else\n", 160 - cdev->private->devno, sch->schid.ssid, 161 sch->schid.sch_no); 162 return -EUSERS; 163 } ··· 263 /* PGID command failed on this path. */ 264 CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel " 265 "0.%x.%04x, lpm %02X, became 'not operational'\n", 266 - cdev->private->devno, sch->schid.ssid, 267 sch->schid.sch_no, cdev->private->imask); 268 return ret; 269 } ··· 303 /* nop command failed on this path. */ 304 CIO_MSG_EVENT(2, "NOP - Device %04x on Subchannel " 305 "0.%x.%04x, lpm %02X, became 'not operational'\n", 306 - cdev->private->devno, sch->schid.ssid, 307 sch->schid.sch_no, cdev->private->imask); 308 return ret; 309 } ··· 330 CIO_MSG_EVENT(2, "SPID - device 0.%x.%04x, unit check, " 331 "cnt %02d, " 332 "sns : %02X%02X%02X%02X %02X%02X%02X%02X ...\n", 333 - cdev->private->ssid, 334 - cdev->private->devno, irb->esw.esw0.erw.scnt, 335 irb->ecw[0], irb->ecw[1], 336 irb->ecw[2], irb->ecw[3], 337 irb->ecw[4], irb->ecw[5], ··· 342 if (irb->scsw.cc == 3) { 343 CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel 0.%x.%04x," 344 " lpm %02X, became 'not operational'\n", 345 - cdev->private->devno, sch->schid.ssid, 346 sch->schid.sch_no, cdev->private->imask); 347 return -EACCES; 348 } ··· 365 if (irb->scsw.cc == 3) { 366 CIO_MSG_EVENT(2, "NOP - Device %04x on Subchannel 0.%x.%04x," 367 " lpm %02X, became 'not operational'\n", 368 - cdev->private->devno, sch->schid.ssid, 369 sch->schid.sch_no, cdev->private->imask); 370 return -EACCES; 371 }
··· 79 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel " 80 "0.%x.%04x, lpm %02X, became 'not " 81 "operational'\n", 82 + cdev->private->dev_id.devno, 83 + sch->schid.ssid, 84 sch->schid.sch_no, cdev->private->imask); 85 86 } ··· 135 CIO_MSG_EVENT(2, "SNID - device 0.%x.%04x, unit check, " 136 "lpum %02X, cnt %02d, sns : " 137 "%02X%02X%02X%02X %02X%02X%02X%02X ...\n", 138 + cdev->private->dev_id.ssid, 139 + cdev->private->dev_id.devno, 140 irb->esw.esw0.sublog.lpum, 141 irb->esw.esw0.erw.scnt, 142 irb->ecw[0], irb->ecw[1], ··· 147 if (irb->scsw.cc == 3) { 148 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x," 149 " lpm %02X, became 'not operational'\n", 150 + cdev->private->dev_id.devno, sch->schid.ssid, 151 sch->schid.sch_no, sch->orb.lpm); 152 return -EACCES; 153 } ··· 155 if (cdev->private->pgid[i].inf.ps.state2 == SNID_STATE2_RESVD_ELSE) { 156 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x " 157 "is reserved by someone else\n", 158 + cdev->private->dev_id.devno, sch->schid.ssid, 159 sch->schid.sch_no); 160 return -EUSERS; 161 } ··· 261 /* PGID command failed on this path. */ 262 CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel " 263 "0.%x.%04x, lpm %02X, became 'not operational'\n", 264 + cdev->private->dev_id.devno, sch->schid.ssid, 265 sch->schid.sch_no, cdev->private->imask); 266 return ret; 267 } ··· 301 /* nop command failed on this path. */ 302 CIO_MSG_EVENT(2, "NOP - Device %04x on Subchannel " 303 "0.%x.%04x, lpm %02X, became 'not operational'\n", 304 + cdev->private->dev_id.devno, sch->schid.ssid, 305 sch->schid.sch_no, cdev->private->imask); 306 return ret; 307 } ··· 328 CIO_MSG_EVENT(2, "SPID - device 0.%x.%04x, unit check, " 329 "cnt %02d, " 330 "sns : %02X%02X%02X%02X %02X%02X%02X%02X ...\n", 331 + cdev->private->dev_id.ssid, 332 + cdev->private->dev_id.devno, 333 + irb->esw.esw0.erw.scnt, 334 irb->ecw[0], irb->ecw[1], 335 irb->ecw[2], irb->ecw[3], 336 irb->ecw[4], irb->ecw[5], ··· 339 if (irb->scsw.cc == 3) { 340 CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel 0.%x.%04x," 341 " lpm %02X, became 'not operational'\n", 342 + cdev->private->dev_id.devno, sch->schid.ssid, 343 sch->schid.sch_no, cdev->private->imask); 344 return -EACCES; 345 } ··· 362 if (irb->scsw.cc == 3) { 363 CIO_MSG_EVENT(2, "NOP - Device %04x on Subchannel 0.%x.%04x," 364 " lpm %02X, became 'not operational'\n", 365 + cdev->private->dev_id.devno, sch->schid.ssid, 366 sch->schid.sch_no, cdev->private->imask); 367 return -EACCES; 368 }
+3 -4
drivers/s390/cio/device_status.c
··· 32 SCHN_STAT_CHN_CTRL_CHK | 33 SCHN_STAT_INTF_CTRL_CHK))) 34 return; 35 - 36 CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check " 37 "received" 38 " ... device %04x on subchannel 0.%x.%04x, dev_stat " 39 ": %02X sch_stat : %02X\n", 40 - cdev->private->devno, cdev->private->ssid, 41 - cdev->private->sch_no, 42 irb->scsw.dstat, irb->scsw.cstat); 43 44 if (irb->scsw.cc != 3) { 45 char dbf_text[15]; 46 47 - sprintf(dbf_text, "chk%x", cdev->private->sch_no); 48 CIO_TRACE_EVENT(0, dbf_text); 49 CIO_HEX_EVENT(0, irb, sizeof (struct irb)); 50 }
··· 32 SCHN_STAT_CHN_CTRL_CHK | 33 SCHN_STAT_INTF_CTRL_CHK))) 34 return; 35 CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check " 36 "received" 37 " ... device %04x on subchannel 0.%x.%04x, dev_stat " 38 ": %02X sch_stat : %02X\n", 39 + cdev->private->dev_id.devno, cdev->private->schid.ssid, 40 + cdev->private->schid.sch_no, 41 irb->scsw.dstat, irb->scsw.cstat); 42 43 if (irb->scsw.cc != 3) { 44 char dbf_text[15]; 45 46 + sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no); 47 CIO_TRACE_EVENT(0, dbf_text); 48 CIO_HEX_EVENT(0, irb, sizeof (struct irb)); 49 }
+5 -5
drivers/s390/cio/qdio.c
··· 1741 void *ptr; 1742 int available; 1743 1744 - sprintf(dbf_text,"qfqs%4x",cdev->private->sch_no); 1745 QDIO_DBF_TEXT0(0,setup,dbf_text); 1746 for (i=0;i<no_input_qs;i++) { 1747 q=irq_ptr->input_qs[i]; ··· 2924 2925 irq_ptr = cdev->private->qdio_data; 2926 2927 - sprintf(dbf_text,"qehi%4x",cdev->private->sch_no); 2928 QDIO_DBF_TEXT0(0,setup,dbf_text); 2929 QDIO_DBF_TEXT0(0,trace,dbf_text); 2930 ··· 2943 int rc; 2944 char dbf_text[15]; 2945 2946 - sprintf(dbf_text,"qini%4x",init_data->cdev->private->sch_no); 2947 QDIO_DBF_TEXT0(0,setup,dbf_text); 2948 QDIO_DBF_TEXT0(0,trace,dbf_text); 2949 ··· 2964 struct qdio_irq *irq_ptr; 2965 char dbf_text[15]; 2966 2967 - sprintf(dbf_text,"qalc%4x",init_data->cdev->private->sch_no); 2968 QDIO_DBF_TEXT0(0,setup,dbf_text); 2969 QDIO_DBF_TEXT0(0,trace,dbf_text); 2970 if ( (init_data->no_input_qs>QDIO_MAX_QUEUES_PER_IRQ) || ··· 3187 tiqdio_set_delay_target(irq_ptr,TIQDIO_DELAY_TARGET); 3188 } 3189 3190 - sprintf(dbf_text,"qest%4x",cdev->private->sch_no); 3191 QDIO_DBF_TEXT0(0,setup,dbf_text); 3192 QDIO_DBF_TEXT0(0,trace,dbf_text); 3193
··· 1741 void *ptr; 1742 int available; 1743 1744 + sprintf(dbf_text,"qfqs%4x",cdev->private->schid.sch_no); 1745 QDIO_DBF_TEXT0(0,setup,dbf_text); 1746 for (i=0;i<no_input_qs;i++) { 1747 q=irq_ptr->input_qs[i]; ··· 2924 2925 irq_ptr = cdev->private->qdio_data; 2926 2927 + sprintf(dbf_text,"qehi%4x",cdev->private->schid.sch_no); 2928 QDIO_DBF_TEXT0(0,setup,dbf_text); 2929 QDIO_DBF_TEXT0(0,trace,dbf_text); 2930 ··· 2943 int rc; 2944 char dbf_text[15]; 2945 2946 + sprintf(dbf_text,"qini%4x",init_data->cdev->private->schid.sch_no); 2947 QDIO_DBF_TEXT0(0,setup,dbf_text); 2948 QDIO_DBF_TEXT0(0,trace,dbf_text); 2949 ··· 2964 struct qdio_irq *irq_ptr; 2965 char dbf_text[15]; 2966 2967 + sprintf(dbf_text,"qalc%4x",init_data->cdev->private->schid.sch_no); 2968 QDIO_DBF_TEXT0(0,setup,dbf_text); 2969 QDIO_DBF_TEXT0(0,trace,dbf_text); 2970 if ( (init_data->no_input_qs>QDIO_MAX_QUEUES_PER_IRQ) || ··· 3187 tiqdio_set_delay_target(irq_ptr,TIQDIO_DELAY_TARGET); 3188 } 3189 3190 + sprintf(dbf_text,"qest%4x",cdev->private->schid.sch_no); 3191 QDIO_DBF_TEXT0(0,setup,dbf_text); 3192 QDIO_DBF_TEXT0(0,trace,dbf_text); 3193
+6
include/asm-s390/cio.h
··· 275 u16 devno; 276 }; 277 278 extern int diag210(struct diag210 *addr); 279 280 extern void wait_cons_dev(void);
··· 275 u16 devno; 276 }; 277 278 + static inline int ccw_dev_id_is_equal(struct ccw_dev_id *dev_id1, 279 + struct ccw_dev_id *dev_id2) 280 + { 281 + return !memcmp(dev_id1, dev_id2, sizeof(struct ccw_dev_id)); 282 + } 283 + 284 extern int diag210(struct diag210 *addr); 285 286 extern void wait_cons_dev(void);
+1 -1
include/asm-s390/timer.h
··· 26 spinlock_t lock; 27 unsigned long magic; 28 29 - void (*function)(unsigned long, struct pt_regs*); 30 unsigned long data; 31 }; 32
··· 26 spinlock_t lock; 27 unsigned long magic; 28 29 + void (*function)(unsigned long); 30 unsigned long data; 31 }; 32