[S390] cio: remove casts from/to (void *).

Signed-off-by: Cornelia Huck <cornelia.huck@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

authored by Cornelia Huck and committed by Martin Schwidefsky 12975aef e7769b48

+26 -26
+3 -3
drivers/s390/cio/chsc.c
··· 370 struct res_acc_data *res_data; 371 struct subchannel *sch; 372 373 - res_data = (struct res_acc_data *)data; 374 sch = get_subchannel_by_schid(schid); 375 if (!sch) 376 /* Check if a subchannel is newly available. */ ··· 444 u32 isinfo[28]; 445 } *lir; 446 447 - lir = (struct lir*) data; 448 if (!(lir->iq&0x80)) 449 /* NULL link incident record */ 450 return -EINVAL; ··· 628 struct channel_path *chp; 629 struct subchannel *sch; 630 631 - chp = (struct channel_path *)data; 632 sch = get_subchannel_by_schid(schid); 633 if (!sch) 634 /* Check if the subchannel is now available. */
··· 370 struct res_acc_data *res_data; 371 struct subchannel *sch; 372 373 + res_data = data; 374 sch = get_subchannel_by_schid(schid); 375 if (!sch) 376 /* Check if a subchannel is newly available. */ ··· 444 u32 isinfo[28]; 445 } *lir; 446 447 + lir = data; 448 if (!(lir->iq&0x80)) 449 /* NULL link incident record */ 450 return -EINVAL; ··· 628 struct channel_path *chp; 629 struct subchannel *sch; 630 631 + chp = data; 632 sch = get_subchannel_by_schid(schid); 633 if (!sch) 634 /* Check if the subchannel is now available. */
+1 -1
drivers/s390/cio/css.c
··· 177 struct device *dev; 178 179 dev = bus_find_device(&css_bus_type, NULL, 180 - (void *)&schid, check_subchannel); 181 182 return dev ? to_subchannel(dev) : NULL; 183 }
··· 177 struct device *dev; 178 179 dev = bus_find_device(&css_bus_type, NULL, 180 + &schid, check_subchannel); 181 182 return dev ? to_subchannel(dev) : NULL; 183 }
+8 -8
drivers/s390/cio/device.c
··· 591 592 struct ccw_device *cdev; 593 594 - cdev = (struct ccw_device *)data; 595 if (device_add(&cdev->dev)) { 596 put_device(&cdev->dev); 597 return; ··· 612 struct subchannel *sch; 613 int need_rename; 614 615 - cdev = (struct ccw_device *)data; 616 sch = to_subchannel(cdev->dev.parent); 617 if (cdev->private->dev_id.devno != sch->schib.pmcw.dev) { 618 /* ··· 660 snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", 661 sch->schid.ssid, sch->schib.pmcw.dev); 662 PREPARE_WORK(&cdev->private->kick_work, 663 - ccw_device_add_changed, (void *)cdev); 664 queue_work(ccw_device_work, &cdev->private->kick_work); 665 } 666 ··· 685 int ret; 686 unsigned long flags; 687 688 - cdev = (struct ccw_device *) data; 689 sch = to_subchannel(cdev->dev.parent); 690 691 if (klist_node_attached(&cdev->dev.knode_parent)) { ··· 757 break; 758 sch = to_subchannel(cdev->dev.parent); 759 PREPARE_WORK(&cdev->private->kick_work, 760 - ccw_device_call_sch_unregister, (void *) cdev); 761 queue_work(slow_path_wq, &cdev->private->kick_work); 762 if (atomic_dec_and_test(&ccw_device_init_count)) 763 wake_up(&ccw_device_init_wq); ··· 772 if (!get_device(&cdev->dev)) 773 break; 774 PREPARE_WORK(&cdev->private->kick_work, 775 - io_subchannel_register, (void *) cdev); 776 queue_work(slow_path_wq, &cdev->private->kick_work); 777 break; 778 } ··· 910 */ 911 if (get_device(&cdev->dev)) { 912 PREPARE_WORK(&cdev->private->kick_work, 913 - ccw_device_unregister, (void *) cdev); 914 queue_work(ccw_device_work, &cdev->private->kick_work); 915 } 916 return 0; ··· 1053 { 1054 char *bus_id; 1055 1056 - bus_id = (char *)id; 1057 1058 return (strncmp(bus_id, dev->bus_id, BUS_ID_SIZE) == 0); 1059 }
··· 591 592 struct ccw_device *cdev; 593 594 + cdev = data; 595 if (device_add(&cdev->dev)) { 596 put_device(&cdev->dev); 597 return; ··· 612 struct subchannel *sch; 613 int need_rename; 614 615 + cdev = data; 616 sch = to_subchannel(cdev->dev.parent); 617 if (cdev->private->dev_id.devno != sch->schib.pmcw.dev) { 618 /* ··· 660 snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", 661 sch->schid.ssid, sch->schib.pmcw.dev); 662 PREPARE_WORK(&cdev->private->kick_work, 663 + ccw_device_add_changed, cdev); 664 queue_work(ccw_device_work, &cdev->private->kick_work); 665 } 666 ··· 685 int ret; 686 unsigned long flags; 687 688 + cdev = data; 689 sch = to_subchannel(cdev->dev.parent); 690 691 if (klist_node_attached(&cdev->dev.knode_parent)) { ··· 757 break; 758 sch = to_subchannel(cdev->dev.parent); 759 PREPARE_WORK(&cdev->private->kick_work, 760 + ccw_device_call_sch_unregister, cdev); 761 queue_work(slow_path_wq, &cdev->private->kick_work); 762 if (atomic_dec_and_test(&ccw_device_init_count)) 763 wake_up(&ccw_device_init_wq); ··· 772 if (!get_device(&cdev->dev)) 773 break; 774 PREPARE_WORK(&cdev->private->kick_work, 775 + io_subchannel_register, cdev); 776 queue_work(slow_path_wq, &cdev->private->kick_work); 777 break; 778 } ··· 910 */ 911 if (get_device(&cdev->dev)) { 912 PREPARE_WORK(&cdev->private->kick_work, 913 + ccw_device_unregister, cdev); 914 queue_work(ccw_device_work, &cdev->private->kick_work); 915 } 916 return 0; ··· 1053 { 1054 char *bus_id; 1055 1056 + bus_id = id; 1057 1058 return (strncmp(bus_id, dev->bus_id, BUS_ID_SIZE) == 0); 1059 }
+14 -14
drivers/s390/cio/device_fsm.c
··· 173 cdev->id.dev_model != cdev->private->senseid.dev_model || 174 cdev->private->dev_id.devno != sch->schib.pmcw.dev) { 175 PREPARE_WORK(&cdev->private->kick_work, 176 - ccw_device_do_unreg_rereg, (void *)cdev); 177 queue_work(ccw_device_work, &cdev->private->kick_work); 178 return 0; 179 } ··· 314 struct subchannel *sch; 315 int ret; 316 317 - cdev = (struct ccw_device *)data; 318 sch = to_subchannel(cdev->dev.parent); 319 ret = (sch->driver && sch->driver->notify) ? 320 sch->driver->notify(&sch->dev, CIO_OPER) : 0; 321 if (!ret) 322 /* Driver doesn't want device back. */ 323 - ccw_device_do_unreg_rereg((void *)cdev); 324 else { 325 /* Reenable channel measurements, if needed. */ 326 cmf_reenable(cdev); ··· 357 if (cdev->private->flags.donotify) { 358 cdev->private->flags.donotify = 0; 359 PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify, 360 - (void *)cdev); 361 queue_work(ccw_device_notify_work, &cdev->private->kick_work); 362 } 363 wake_up(&cdev->private->wait_q); ··· 513 struct subchannel *sch; 514 int ret; 515 516 - cdev = (struct ccw_device *)data; 517 sch = to_subchannel(cdev->dev.parent); 518 /* Extra sanity. */ 519 if (sch->lpm) ··· 527 if (get_device(&cdev->dev)) { 528 PREPARE_WORK(&cdev->private->kick_work, 529 ccw_device_call_sch_unregister, 530 - (void *)cdev); 531 queue_work(ccw_device_work, 532 &cdev->private->kick_work); 533 } else ··· 582 break; 583 default: 584 PREPARE_WORK(&cdev->private->kick_work, 585 - ccw_device_nopath_notify, (void *)cdev); 586 queue_work(ccw_device_notify_work, &cdev->private->kick_work); 587 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 588 break; ··· 713 sch = to_subchannel(cdev->dev.parent); 714 if (get_device(&cdev->dev)) { 715 PREPARE_WORK(&cdev->private->kick_work, 716 - ccw_device_call_sch_unregister, (void *)cdev); 717 queue_work(ccw_device_work, &cdev->private->kick_work); 718 } 719 wake_up(&cdev->private->wait_q); ··· 744 } 745 if (get_device(&cdev->dev)) { 746 PREPARE_WORK(&cdev->private->kick_work, 747 - ccw_device_call_sch_unregister, (void *)cdev); 748 queue_work(ccw_device_work, &cdev->private->kick_work); 749 } 750 wake_up(&cdev->private->wait_q); ··· 849 sch = to_subchannel(cdev->dev.parent); 850 if (!sch->lpm) { 851 PREPARE_WORK(&cdev->private->kick_work, 852 - ccw_device_nopath_notify, (void *)cdev); 853 queue_work(ccw_device_notify_work, 854 &cdev->private->kick_work); 855 } else ··· 938 ERR_PTR(-EIO)); 939 if (!sch->lpm) { 940 PREPARE_WORK(&cdev->private->kick_work, 941 - ccw_device_nopath_notify, (void *)cdev); 942 queue_work(ccw_device_notify_work, &cdev->private->kick_work); 943 } else if (cdev->private->flags.doverify) 944 /* Start delayed path verification. */ ··· 961 sch = to_subchannel(cdev->dev.parent); 962 if (!sch->lpm) { 963 PREPARE_WORK(&cdev->private->kick_work, 964 - ccw_device_nopath_notify, (void *)cdev); 965 queue_work(ccw_device_notify_work, 966 &cdev->private->kick_work); 967 } else ··· 990 if (ret == -ENODEV) { 991 if (!sch->lpm) { 992 PREPARE_WORK(&cdev->private->kick_work, 993 - ccw_device_nopath_notify, (void *)cdev); 994 queue_work(ccw_device_notify_work, 995 &cdev->private->kick_work); 996 } else ··· 1002 ERR_PTR(-EIO)); 1003 if (!sch->lpm) { 1004 PREPARE_WORK(&cdev->private->kick_work, 1005 - ccw_device_nopath_notify, (void *)cdev); 1006 queue_work(ccw_device_notify_work, &cdev->private->kick_work); 1007 } else 1008 /* Start delayed path verification. */
··· 173 cdev->id.dev_model != cdev->private->senseid.dev_model || 174 cdev->private->dev_id.devno != sch->schib.pmcw.dev) { 175 PREPARE_WORK(&cdev->private->kick_work, 176 + ccw_device_do_unreg_rereg, cdev); 177 queue_work(ccw_device_work, &cdev->private->kick_work); 178 return 0; 179 } ··· 314 struct subchannel *sch; 315 int ret; 316 317 + cdev = data; 318 sch = to_subchannel(cdev->dev.parent); 319 ret = (sch->driver && sch->driver->notify) ? 320 sch->driver->notify(&sch->dev, CIO_OPER) : 0; 321 if (!ret) 322 /* Driver doesn't want device back. */ 323 + ccw_device_do_unreg_rereg(cdev); 324 else { 325 /* Reenable channel measurements, if needed. */ 326 cmf_reenable(cdev); ··· 357 if (cdev->private->flags.donotify) { 358 cdev->private->flags.donotify = 0; 359 PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify, 360 + cdev); 361 queue_work(ccw_device_notify_work, &cdev->private->kick_work); 362 } 363 wake_up(&cdev->private->wait_q); ··· 513 struct subchannel *sch; 514 int ret; 515 516 + cdev = data; 517 sch = to_subchannel(cdev->dev.parent); 518 /* Extra sanity. */ 519 if (sch->lpm) ··· 527 if (get_device(&cdev->dev)) { 528 PREPARE_WORK(&cdev->private->kick_work, 529 ccw_device_call_sch_unregister, 530 + cdev); 531 queue_work(ccw_device_work, 532 &cdev->private->kick_work); 533 } else ··· 582 break; 583 default: 584 PREPARE_WORK(&cdev->private->kick_work, 585 + ccw_device_nopath_notify, cdev); 586 queue_work(ccw_device_notify_work, &cdev->private->kick_work); 587 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 588 break; ··· 713 sch = to_subchannel(cdev->dev.parent); 714 if (get_device(&cdev->dev)) { 715 PREPARE_WORK(&cdev->private->kick_work, 716 + ccw_device_call_sch_unregister, cdev); 717 queue_work(ccw_device_work, &cdev->private->kick_work); 718 } 719 wake_up(&cdev->private->wait_q); ··· 744 } 745 if (get_device(&cdev->dev)) { 746 PREPARE_WORK(&cdev->private->kick_work, 747 + ccw_device_call_sch_unregister, cdev); 748 queue_work(ccw_device_work, &cdev->private->kick_work); 749 } 750 wake_up(&cdev->private->wait_q); ··· 849 sch = to_subchannel(cdev->dev.parent); 850 if (!sch->lpm) { 851 PREPARE_WORK(&cdev->private->kick_work, 852 + ccw_device_nopath_notify, cdev); 853 queue_work(ccw_device_notify_work, 854 &cdev->private->kick_work); 855 } else ··· 938 ERR_PTR(-EIO)); 939 if (!sch->lpm) { 940 PREPARE_WORK(&cdev->private->kick_work, 941 + ccw_device_nopath_notify, cdev); 942 queue_work(ccw_device_notify_work, &cdev->private->kick_work); 943 } else if (cdev->private->flags.doverify) 944 /* Start delayed path verification. */ ··· 961 sch = to_subchannel(cdev->dev.parent); 962 if (!sch->lpm) { 963 PREPARE_WORK(&cdev->private->kick_work, 964 + ccw_device_nopath_notify, cdev); 965 queue_work(ccw_device_notify_work, 966 &cdev->private->kick_work); 967 } else ··· 990 if (ret == -ENODEV) { 991 if (!sch->lpm) { 992 PREPARE_WORK(&cdev->private->kick_work, 993 + ccw_device_nopath_notify, cdev); 994 queue_work(ccw_device_notify_work, 995 &cdev->private->kick_work); 996 } else ··· 1002 ERR_PTR(-EIO)); 1003 if (!sch->lpm) { 1004 PREPARE_WORK(&cdev->private->kick_work, 1005 + ccw_device_nopath_notify, cdev); 1006 queue_work(ccw_device_notify_work, &cdev->private->kick_work); 1007 } else 1008 /* Start delayed path verification. */