Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390/cio: add basic protected virtualization support

As virtio-ccw devices are channel devices, we need to use the
dma area within the common I/O layer for any communication with
the hypervisor.

Note that we do not need to use that area for control blocks
directly referenced by instructions, e.g. the orb.

It handles neither QDIO in the common code, nor any device type specific
stuff (like channel programs constructed by the DASD driver).

An interesting side effect is that virtio structures are now going to
get allocated in 31 bit addressable storage.

Signed-off-by: Halil Pasic <pasic@linux.ibm.com>
Reviewed-by: Sebastian Ott <sebott@linux.ibm.com>
Reviewed-by: Cornelia Huck <cohuck@redhat.com>
Reviewed-by: Michael Mueller <mimu@linux.ibm.com>
Tested-by: Michael Mueller <mimu@linux.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>

authored by

Halil Pasic and committed by
Heiko Carstens
37db8985 bb99332a

+164 -83
+4
arch/s390/include/asm/ccwdev.h
··· 226 226 extern void ccw_device_wait_idle(struct ccw_device *); 227 227 extern int ccw_device_force_console(struct ccw_device *); 228 228 229 + extern void *ccw_device_dma_zalloc(struct ccw_device *cdev, size_t size); 230 + extern void ccw_device_dma_free(struct ccw_device *cdev, 231 + void *cpu_addr, size_t size); 232 + 229 233 int ccw_device_siosl(struct ccw_device *); 230 234 231 235 extern void ccw_device_get_schid(struct ccw_device *, struct subchannel_id *);
+5 -4
drivers/s390/cio/ccwreq.c
··· 63 63 return; 64 64 req->done = 1; 65 65 ccw_device_set_timeout(cdev, 0); 66 - memset(&cdev->private->irb, 0, sizeof(struct irb)); 66 + memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb)); 67 67 if (rc && rc != -ENODEV && req->drc) 68 68 rc = req->drc; 69 69 req->callback(cdev, req->data, rc); ··· 86 86 continue; 87 87 } 88 88 /* Perform start function. */ 89 - memset(&cdev->private->irb, 0, sizeof(struct irb)); 89 + memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb)); 90 90 rc = cio_start(sch, cp, (u8) req->mask); 91 91 if (rc == 0) { 92 92 /* I/O started successfully. */ ··· 169 169 */ 170 170 static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb) 171 171 { 172 - struct irb *irb = &cdev->private->irb; 172 + struct irb *irb = &cdev->private->dma_area->irb; 173 173 struct cmd_scsw *scsw = &irb->scsw.cmd; 174 174 enum uc_todo todo; 175 175 ··· 187 187 CIO_TRACE_EVENT(2, "sensedata"); 188 188 CIO_HEX_EVENT(2, &cdev->private->dev_id, 189 189 sizeof(struct ccw_dev_id)); 190 - CIO_HEX_EVENT(2, &cdev->private->irb.ecw, SENSE_MAX_COUNT); 190 + CIO_HEX_EVENT(2, &cdev->private->dma_area->irb.ecw, 191 + SENSE_MAX_COUNT); 191 192 /* Check for command reject. */ 192 193 if (irb->ecw[0] & SNS0_CMD_REJECT) 193 194 return IO_REJECTED;
+57 -11
drivers/s390/cio/device.c
··· 24 24 #include <linux/timer.h> 25 25 #include <linux/kernel_stat.h> 26 26 #include <linux/sched/signal.h> 27 + #include <linux/dma-mapping.h> 27 28 28 29 #include <asm/ccwdev.h> 29 30 #include <asm/cio.h> ··· 688 687 struct ccw_device *cdev; 689 688 690 689 cdev = to_ccwdev(dev); 690 + cio_gp_dma_free(cdev->private->dma_pool, cdev->private->dma_area, 691 + sizeof(*cdev->private->dma_area)); 692 + cio_gp_dma_destroy(cdev->private->dma_pool, &cdev->dev); 691 693 /* Release reference of parent subchannel. */ 692 694 put_device(cdev->dev.parent); 693 695 kfree(cdev->private); ··· 700 696 static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch) 701 697 { 702 698 struct ccw_device *cdev; 699 + struct gen_pool *dma_pool; 703 700 704 701 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 705 - if (cdev) { 706 - cdev->private = kzalloc(sizeof(struct ccw_device_private), 707 - GFP_KERNEL | GFP_DMA); 708 - if (cdev->private) 709 - return cdev; 710 - } 702 + if (!cdev) 703 + goto err_cdev; 704 + cdev->private = kzalloc(sizeof(struct ccw_device_private), 705 + GFP_KERNEL | GFP_DMA); 706 + if (!cdev->private) 707 + goto err_priv; 708 + cdev->dev.coherent_dma_mask = sch->dev.coherent_dma_mask; 709 + cdev->dev.dma_mask = &cdev->dev.coherent_dma_mask; 710 + dma_pool = cio_gp_dma_create(&cdev->dev, 1); 711 + if (!dma_pool) 712 + goto err_dma_pool; 713 + cdev->private->dma_pool = dma_pool; 714 + cdev->private->dma_area = cio_gp_dma_zalloc(dma_pool, &cdev->dev, 715 + sizeof(*cdev->private->dma_area)); 716 + if (!cdev->private->dma_area) 717 + goto err_dma_area; 718 + return cdev; 719 + err_dma_area: 720 + cio_gp_dma_destroy(dma_pool, &cdev->dev); 721 + err_dma_pool: 722 + kfree(cdev->private); 723 + err_priv: 711 724 kfree(cdev); 725 + err_cdev: 712 726 return ERR_PTR(-ENOMEM); 713 727 } 714 728 ··· 906 884 wake_up(&ccw_device_init_wq); 907 885 break; 908 886 case DEV_STATE_OFFLINE: 909 - /* 887 + /* 910 888 * We can't register the device in interrupt context so 911 889 * we schedule a work item. 912 890 */ ··· 1084 1062 if (!io_priv) 1085 1063 goto out_schedule; 1086 1064 1065 + io_priv->dma_area = dma_alloc_coherent(&sch->dev, 1066 + sizeof(*io_priv->dma_area), 1067 + &io_priv->dma_area_dma, GFP_KERNEL); 1068 + if (!io_priv->dma_area) { 1069 + kfree(io_priv); 1070 + goto out_schedule; 1071 + } 1072 + 1087 1073 set_io_private(sch, io_priv); 1088 1074 css_schedule_eval(sch->schid); 1089 1075 return 0; ··· 1118 1088 set_io_private(sch, NULL); 1119 1089 spin_unlock_irq(sch->lock); 1120 1090 out_free: 1091 + dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area), 1092 + io_priv->dma_area, io_priv->dma_area_dma); 1121 1093 kfree(io_priv); 1122 1094 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); 1123 1095 return 0; ··· 1625 1593 return ERR_CAST(sch); 1626 1594 1627 1595 io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA); 1628 - if (!io_priv) { 1629 - put_device(&sch->dev); 1630 - return ERR_PTR(-ENOMEM); 1631 - } 1596 + if (!io_priv) 1597 + goto err_priv; 1598 + io_priv->dma_area = dma_alloc_coherent(&sch->dev, 1599 + sizeof(*io_priv->dma_area), 1600 + &io_priv->dma_area_dma, GFP_KERNEL); 1601 + if (!io_priv->dma_area) 1602 + goto err_dma_area; 1632 1603 set_io_private(sch, io_priv); 1633 1604 cdev = io_subchannel_create_ccwdev(sch); 1634 1605 if (IS_ERR(cdev)) { 1606 + dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area), 1607 + io_priv->dma_area, io_priv->dma_area_dma); 1608 + set_io_private(sch, NULL); 1635 1609 put_device(&sch->dev); 1636 1610 kfree(io_priv); 1637 1611 return cdev; ··· 1645 1607 cdev->drv = drv; 1646 1608 ccw_device_set_int_class(cdev); 1647 1609 return cdev; 1610 + 1611 + err_dma_area: 1612 + kfree(io_priv); 1613 + err_priv: 1614 + put_device(&sch->dev); 1615 + return ERR_PTR(-ENOMEM); 1648 1616 } 1649 1617 1650 1618 void __init ccw_device_destroy_console(struct ccw_device *cdev) ··· 1661 1617 set_io_private(sch, NULL); 1662 1618 put_device(&sch->dev); 1663 1619 put_device(&cdev->dev); 1620 + dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area), 1621 + io_priv->dma_area, io_priv->dma_area_dma); 1664 1622 kfree(io_priv); 1665 1623 } 1666 1624
+29 -20
drivers/s390/cio/device_fsm.c
··· 67 67 sizeof(struct tcw), 0); 68 68 } else { 69 69 printk(KERN_WARNING "cio: orb indicates command mode\n"); 70 - if ((void *)(addr_t)orb->cmd.cpa == &private->sense_ccw || 71 - (void *)(addr_t)orb->cmd.cpa == cdev->private->iccws) 70 + if ((void *)(addr_t)orb->cmd.cpa == 71 + &private->dma_area->sense_ccw || 72 + (void *)(addr_t)orb->cmd.cpa == 73 + cdev->private->dma_area->iccws) 72 74 printk(KERN_WARNING "cio: last channel program " 73 75 "(intern):\n"); 74 76 else ··· 145 143 void ccw_device_update_sense_data(struct ccw_device *cdev) 146 144 { 147 145 memset(&cdev->id, 0, sizeof(cdev->id)); 148 - cdev->id.cu_type = cdev->private->senseid.cu_type; 149 - cdev->id.cu_model = cdev->private->senseid.cu_model; 150 - cdev->id.dev_type = cdev->private->senseid.dev_type; 151 - cdev->id.dev_model = cdev->private->senseid.dev_model; 146 + cdev->id.cu_type = cdev->private->dma_area->senseid.cu_type; 147 + cdev->id.cu_model = cdev->private->dma_area->senseid.cu_model; 148 + cdev->id.dev_type = cdev->private->dma_area->senseid.dev_type; 149 + cdev->id.dev_model = cdev->private->dma_area->senseid.dev_model; 152 150 } 153 151 154 152 int ccw_device_test_sense_data(struct ccw_device *cdev) 155 153 { 156 - return cdev->id.cu_type == cdev->private->senseid.cu_type && 157 - cdev->id.cu_model == cdev->private->senseid.cu_model && 158 - cdev->id.dev_type == cdev->private->senseid.dev_type && 159 - cdev->id.dev_model == cdev->private->senseid.dev_model; 154 + return cdev->id.cu_type == 155 + cdev->private->dma_area->senseid.cu_type && 156 + cdev->id.cu_model == 157 + cdev->private->dma_area->senseid.cu_model && 158 + cdev->id.dev_type == 159 + cdev->private->dma_area->senseid.dev_type && 160 + cdev->id.dev_model == 161 + cdev->private->dma_area->senseid.dev_model; 160 162 } 161 163 162 164 /* ··· 348 342 cio_disable_subchannel(sch); 349 343 350 344 /* Reset device status. */ 351 - memset(&cdev->private->irb, 0, sizeof(struct irb)); 345 + memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb)); 352 346 353 347 cdev->private->state = state; 354 348 ··· 515 509 ccw_device_done(cdev, DEV_STATE_ONLINE); 516 510 /* Deliver fake irb to device driver, if needed. */ 517 511 if (cdev->private->flags.fake_irb) { 518 - create_fake_irb(&cdev->private->irb, 512 + create_fake_irb(&cdev->private->dma_area->irb, 519 513 cdev->private->flags.fake_irb); 520 514 cdev->private->flags.fake_irb = 0; 521 515 if (cdev->handler) 522 516 cdev->handler(cdev, cdev->private->intparm, 523 - &cdev->private->irb); 524 - memset(&cdev->private->irb, 0, sizeof(struct irb)); 517 + &cdev->private->dma_area->irb); 518 + memset(&cdev->private->dma_area->irb, 0, 519 + sizeof(struct irb)); 525 520 } 526 521 ccw_device_report_path_events(cdev); 527 522 ccw_device_handle_broken_paths(cdev); ··· 679 672 680 673 if (scsw_actl(&sch->schib.scsw) != 0 || 681 674 (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) || 682 - (scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) { 675 + (scsw_stctl(&cdev->private->dma_area->irb.scsw) & 676 + SCSW_STCTL_STATUS_PEND)) { 683 677 /* 684 678 * No final status yet or final status not yet delivered 685 679 * to the device driver. Can't do path verification now, ··· 727 719 * - fast notification was requested (primary status) 728 720 * - unsolicited interrupts 729 721 */ 730 - stctl = scsw_stctl(&cdev->private->irb.scsw); 722 + stctl = scsw_stctl(&cdev->private->dma_area->irb.scsw); 731 723 ending_status = (stctl & SCSW_STCTL_SEC_STATUS) || 732 724 (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) || 733 725 (stctl == SCSW_STCTL_STATUS_PEND); ··· 743 735 744 736 if (cdev->handler) 745 737 cdev->handler(cdev, cdev->private->intparm, 746 - &cdev->private->irb); 738 + &cdev->private->dma_area->irb); 747 739 748 - memset(&cdev->private->irb, 0, sizeof(struct irb)); 740 + memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb)); 749 741 return 1; 750 742 } 751 743 ··· 767 759 /* Unit check but no sense data. Need basic sense. */ 768 760 if (ccw_device_do_sense(cdev, irb) != 0) 769 761 goto call_handler_unsol; 770 - memcpy(&cdev->private->irb, irb, sizeof(struct irb)); 762 + memcpy(&cdev->private->dma_area->irb, irb, 763 + sizeof(struct irb)); 771 764 cdev->private->state = DEV_STATE_W4SENSE; 772 765 cdev->private->intparm = 0; 773 766 return; ··· 851 842 if (scsw_fctl(&irb->scsw) & 852 843 (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) { 853 844 cdev->private->flags.dosense = 0; 854 - memset(&cdev->private->irb, 0, sizeof(struct irb)); 845 + memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb)); 855 846 ccw_device_accumulate_irb(cdev, irb); 856 847 goto call_handler; 857 848 }
+11 -9
drivers/s390/cio/device_id.c
··· 99 99 static int diag210_get_dev_info(struct ccw_device *cdev) 100 100 { 101 101 struct ccw_dev_id *dev_id = &cdev->private->dev_id; 102 - struct senseid *senseid = &cdev->private->senseid; 102 + struct senseid *senseid = &cdev->private->dma_area->senseid; 103 103 struct diag210 diag_data; 104 104 int rc; 105 105 ··· 134 134 static void snsid_init(struct ccw_device *cdev) 135 135 { 136 136 cdev->private->flags.esid = 0; 137 - memset(&cdev->private->senseid, 0, sizeof(cdev->private->senseid)); 138 - cdev->private->senseid.cu_type = 0xffff; 137 + 138 + memset(&cdev->private->dma_area->senseid, 0, 139 + sizeof(cdev->private->dma_area->senseid)); 140 + cdev->private->dma_area->senseid.cu_type = 0xffff; 139 141 } 140 142 141 143 /* ··· 145 143 */ 146 144 static int snsid_check(struct ccw_device *cdev, void *data) 147 145 { 148 - struct cmd_scsw *scsw = &cdev->private->irb.scsw.cmd; 146 + struct cmd_scsw *scsw = &cdev->private->dma_area->irb.scsw.cmd; 149 147 int len = sizeof(struct senseid) - scsw->count; 150 148 151 149 /* Check for incomplete SENSE ID data. */ 152 150 if (len < SENSE_ID_MIN_LEN) 153 151 goto out_restart; 154 - if (cdev->private->senseid.cu_type == 0xffff) 152 + if (cdev->private->dma_area->senseid.cu_type == 0xffff) 155 153 goto out_restart; 156 154 /* Check for incompatible SENSE ID data. */ 157 - if (cdev->private->senseid.reserved != 0xff) 155 + if (cdev->private->dma_area->senseid.reserved != 0xff) 158 156 return -EOPNOTSUPP; 159 157 /* Check for extended-identification information. */ 160 158 if (len > SENSE_ID_BASIC_LEN) ··· 172 170 static void snsid_callback(struct ccw_device *cdev, void *data, int rc) 173 171 { 174 172 struct ccw_dev_id *id = &cdev->private->dev_id; 175 - struct senseid *senseid = &cdev->private->senseid; 173 + struct senseid *senseid = &cdev->private->dma_area->senseid; 176 174 int vm = 0; 177 175 178 176 if (rc && MACHINE_IS_VM) { ··· 202 200 { 203 201 struct subchannel *sch = to_subchannel(cdev->dev.parent); 204 202 struct ccw_request *req = &cdev->private->req; 205 - struct ccw1 *cp = cdev->private->iccws; 203 + struct ccw1 *cp = cdev->private->dma_area->iccws; 206 204 207 205 CIO_TRACE_EVENT(4, "snsid"); 208 206 CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id)); ··· 210 208 snsid_init(cdev); 211 209 /* Channel program setup. */ 212 210 cp->cmd_code = CCW_CMD_SENSE_ID; 213 - cp->cda = (u32) (addr_t) &cdev->private->senseid; 211 + cp->cda = (u32) (addr_t) &cdev->private->dma_area->senseid; 214 212 cp->count = sizeof(struct senseid); 215 213 cp->flags = CCW_FLAG_SLI; 216 214 /* Request setup. */
+19 -2
drivers/s390/cio/device_ops.c
··· 429 429 if (cdev->private->flags.esid == 0) 430 430 return NULL; 431 431 for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++) 432 - if (cdev->private->senseid.ciw[ciw_cnt].ct == ct) 433 - return cdev->private->senseid.ciw + ciw_cnt; 432 + if (cdev->private->dma_area->senseid.ciw[ciw_cnt].ct == ct) 433 + return cdev->private->dma_area->senseid.ciw + ciw_cnt; 434 434 return NULL; 435 435 } 436 436 ··· 698 698 *schid = sch->schid; 699 699 } 700 700 EXPORT_SYMBOL_GPL(ccw_device_get_schid); 701 + 702 + /* 703 + * Allocate zeroed dma coherent 31 bit addressable memory using 704 + * the subchannels dma pool. Maximal size of allocation supported 705 + * is PAGE_SIZE. 706 + */ 707 + void *ccw_device_dma_zalloc(struct ccw_device *cdev, size_t size) 708 + { 709 + return cio_gp_dma_zalloc(cdev->private->dma_pool, &cdev->dev, size); 710 + } 711 + EXPORT_SYMBOL(ccw_device_dma_zalloc); 712 + 713 + void ccw_device_dma_free(struct ccw_device *cdev, void *cpu_addr, size_t size) 714 + { 715 + cio_gp_dma_free(cdev->private->dma_pool, cpu_addr, size); 716 + } 717 + EXPORT_SYMBOL(ccw_device_dma_free); 701 718 702 719 EXPORT_SYMBOL(ccw_device_set_options_mask); 703 720 EXPORT_SYMBOL(ccw_device_set_options);
+12 -10
drivers/s390/cio/device_pgid.c
··· 57 57 static void nop_build_cp(struct ccw_device *cdev) 58 58 { 59 59 struct ccw_request *req = &cdev->private->req; 60 - struct ccw1 *cp = cdev->private->iccws; 60 + struct ccw1 *cp = cdev->private->dma_area->iccws; 61 61 62 62 cp->cmd_code = CCW_CMD_NOOP; 63 63 cp->cda = 0; ··· 134 134 static void spid_build_cp(struct ccw_device *cdev, u8 fn) 135 135 { 136 136 struct ccw_request *req = &cdev->private->req; 137 - struct ccw1 *cp = cdev->private->iccws; 137 + struct ccw1 *cp = cdev->private->dma_area->iccws; 138 138 int i = pathmask_to_pos(req->lpm); 139 - struct pgid *pgid = &cdev->private->pgid[i]; 139 + struct pgid *pgid = &cdev->private->dma_area->pgid[i]; 140 140 141 141 pgid->inf.fc = fn; 142 142 cp->cmd_code = CCW_CMD_SET_PGID; ··· 300 300 static void pgid_analyze(struct ccw_device *cdev, struct pgid **p, 301 301 int *mismatch, u8 *reserved, u8 *reset) 302 302 { 303 - struct pgid *pgid = &cdev->private->pgid[0]; 303 + struct pgid *pgid = &cdev->private->dma_area->pgid[0]; 304 304 struct pgid *first = NULL; 305 305 int lpm; 306 306 int i; ··· 342 342 lpm = 0x80 >> i; 343 343 if ((cdev->private->pgid_valid_mask & lpm) == 0) 344 344 continue; 345 - pgid = &cdev->private->pgid[i]; 345 + pgid = &cdev->private->dma_area->pgid[i]; 346 346 if (sch->opm & lpm) { 347 347 if (pgid->inf.ps.state1 != SNID_STATE1_GROUPED) 348 348 continue; ··· 368 368 int i; 369 369 370 370 for (i = 0; i < 8; i++) 371 - memcpy(&cdev->private->pgid[i], pgid, sizeof(struct pgid)); 371 + memcpy(&cdev->private->dma_area->pgid[i], pgid, 372 + sizeof(struct pgid)); 372 373 } 373 374 374 375 /* ··· 436 435 static void snid_build_cp(struct ccw_device *cdev) 437 436 { 438 437 struct ccw_request *req = &cdev->private->req; 439 - struct ccw1 *cp = cdev->private->iccws; 438 + struct ccw1 *cp = cdev->private->dma_area->iccws; 440 439 int i = pathmask_to_pos(req->lpm); 441 440 442 441 /* Channel program setup. */ 443 442 cp->cmd_code = CCW_CMD_SENSE_PGID; 444 - cp->cda = (u32) (addr_t) &cdev->private->pgid[i]; 443 + cp->cda = (u32) (addr_t) &cdev->private->dma_area->pgid[i]; 445 444 cp->count = sizeof(struct pgid); 446 445 cp->flags = CCW_FLAG_SLI; 447 446 req->cp = cp; ··· 517 516 sch->lpm = sch->schib.pmcw.pam; 518 517 519 518 /* Initialize PGID data. */ 520 - memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid)); 519 + memset(cdev->private->dma_area->pgid, 0, 520 + sizeof(cdev->private->dma_area->pgid)); 521 521 cdev->private->pgid_valid_mask = 0; 522 522 cdev->private->pgid_todo_mask = sch->schib.pmcw.pam; 523 523 cdev->private->path_notoper_mask = 0; ··· 628 626 static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2) 629 627 { 630 628 struct ccw_request *req = &cdev->private->req; 631 - struct ccw1 *cp = cdev->private->iccws; 629 + struct ccw1 *cp = cdev->private->dma_area->iccws; 632 630 633 631 cp[0].cmd_code = CCW_CMD_STLCK; 634 632 cp[0].cda = (u32) (addr_t) buf1;
+12 -12
drivers/s390/cio/device_status.c
··· 79 79 * are condition that have to be met for the extended control 80 80 * bit to have meaning. Sick. 81 81 */ 82 - cdev->private->irb.scsw.cmd.ectl = 0; 82 + cdev->private->dma_area->irb.scsw.cmd.ectl = 0; 83 83 if ((irb->scsw.cmd.stctl & SCSW_STCTL_ALERT_STATUS) && 84 84 !(irb->scsw.cmd.stctl & SCSW_STCTL_INTER_STATUS)) 85 - cdev->private->irb.scsw.cmd.ectl = irb->scsw.cmd.ectl; 85 + cdev->private->dma_area->irb.scsw.cmd.ectl = irb->scsw.cmd.ectl; 86 86 /* Check if extended control word is valid. */ 87 - if (!cdev->private->irb.scsw.cmd.ectl) 87 + if (!cdev->private->dma_area->irb.scsw.cmd.ectl) 88 88 return; 89 89 /* Copy concurrent sense / model dependent information. */ 90 - memcpy (&cdev->private->irb.ecw, irb->ecw, sizeof (irb->ecw)); 90 + memcpy(&cdev->private->dma_area->irb.ecw, irb->ecw, sizeof(irb->ecw)); 91 91 } 92 92 93 93 /* ··· 118 118 if (!ccw_device_accumulate_esw_valid(irb)) 119 119 return; 120 120 121 - cdev_irb = &cdev->private->irb; 121 + cdev_irb = &cdev->private->dma_area->irb; 122 122 123 123 /* Copy last path used mask. */ 124 124 cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum; ··· 210 210 ccw_device_path_notoper(cdev); 211 211 /* No irb accumulation for transport mode irbs. */ 212 212 if (scsw_is_tm(&irb->scsw)) { 213 - memcpy(&cdev->private->irb, irb, sizeof(struct irb)); 213 + memcpy(&cdev->private->dma_area->irb, irb, sizeof(struct irb)); 214 214 return; 215 215 } 216 216 /* ··· 219 219 if (!scsw_is_solicited(&irb->scsw)) 220 220 return; 221 221 222 - cdev_irb = &cdev->private->irb; 222 + cdev_irb = &cdev->private->dma_area->irb; 223 223 224 224 /* 225 225 * If the clear function had been performed, all formerly pending ··· 227 227 * intermediate accumulated status to the device driver. 228 228 */ 229 229 if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) 230 - memset(&cdev->private->irb, 0, sizeof(struct irb)); 230 + memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb)); 231 231 232 232 /* Copy bits which are valid only for the start function. */ 233 233 if (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) { ··· 329 329 /* 330 330 * We have ending status but no sense information. Do a basic sense. 331 331 */ 332 - sense_ccw = &to_io_private(sch)->sense_ccw; 332 + sense_ccw = &to_io_private(sch)->dma_area->sense_ccw; 333 333 sense_ccw->cmd_code = CCW_CMD_BASIC_SENSE; 334 - sense_ccw->cda = (__u32) __pa(cdev->private->irb.ecw); 334 + sense_ccw->cda = (__u32) __pa(cdev->private->dma_area->irb.ecw); 335 335 sense_ccw->count = SENSE_MAX_COUNT; 336 336 sense_ccw->flags = CCW_FLAG_SLI; 337 337 ··· 364 364 365 365 if (!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) && 366 366 (irb->scsw.cmd.dstat & DEV_STAT_CHN_END)) { 367 - cdev->private->irb.esw.esw0.erw.cons = 1; 367 + cdev->private->dma_area->irb.esw.esw0.erw.cons = 1; 368 368 cdev->private->flags.dosense = 0; 369 369 } 370 370 /* Check if path verification is required. */ ··· 386 386 /* Check for basic sense. */ 387 387 if (cdev->private->flags.dosense && 388 388 !(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) { 389 - cdev->private->irb.esw.esw0.erw.cons = 1; 389 + cdev->private->dma_area->irb.esw.esw0.erw.cons = 1; 390 390 cdev->private->flags.dosense = 0; 391 391 return 0; 392 392 }
+15 -5
drivers/s390/cio/io_sch.h
··· 9 9 #include "css.h" 10 10 #include "orb.h" 11 11 12 + struct io_subchannel_dma_area { 13 + struct ccw1 sense_ccw; /* static ccw for sense command */ 14 + }; 15 + 12 16 struct io_subchannel_private { 13 17 union orb orb; /* operation request block */ 14 - struct ccw1 sense_ccw; /* static ccw for sense command */ 15 18 struct ccw_device *cdev;/* pointer to the child ccw device */ 16 19 struct { 17 20 unsigned int suspend:1; /* allow suspend */ 18 21 unsigned int prefetch:1;/* deny prefetch */ 19 22 unsigned int inter:1; /* suppress intermediate interrupts */ 20 23 } __packed options; 24 + struct io_subchannel_dma_area *dma_area; 25 + dma_addr_t dma_area_dma; 21 26 } __aligned(8); 22 27 23 28 #define to_io_private(n) ((struct io_subchannel_private *) \ ··· 120 115 #define FAKE_CMD_IRB 1 121 116 #define FAKE_TM_IRB 2 122 117 118 + struct ccw_device_dma_area { 119 + struct senseid senseid; /* SenseID info */ 120 + struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */ 121 + struct irb irb; /* device status */ 122 + struct pgid pgid[8]; /* path group IDs per chpid*/ 123 + }; 124 + 123 125 struct ccw_device_private { 124 126 struct ccw_device *cdev; 125 127 struct subchannel *sch; ··· 168 156 } __attribute__((packed)) flags; 169 157 unsigned long intparm; /* user interruption parameter */ 170 158 struct qdio_irq *qdio_data; 171 - struct irb irb; /* device status */ 172 159 int async_kill_io_rc; 173 - struct senseid senseid; /* SenseID info */ 174 - struct pgid pgid[8]; /* path group IDs per chpid*/ 175 - struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */ 176 160 struct work_struct todo_work; 177 161 enum cdev_todo todo; 178 162 wait_queue_head_t wait_q; ··· 177 169 struct list_head cmb_list; /* list of measured devices */ 178 170 u64 cmb_start_time; /* clock value of cmb reset */ 179 171 void *cmb_wait; /* deferred cmb enable/disable */ 172 + struct gen_pool *dma_pool; 173 + struct ccw_device_dma_area *dma_area; 180 174 enum interruption_class int_class; 181 175 }; 182 176
-10
drivers/s390/virtio/virtio_ccw.c
··· 66 66 bool device_lost; 67 67 unsigned int config_ready; 68 68 void *airq_info; 69 - u64 dma_mask; 70 69 }; 71 70 72 71 struct vq_info_block_legacy { ··· 1254 1255 ret = -ENOMEM; 1255 1256 goto out_free; 1256 1257 } 1257 - 1258 1258 vcdev->vdev.dev.parent = &cdev->dev; 1259 - cdev->dev.dma_mask = &vcdev->dma_mask; 1260 - /* we are fine with common virtio infrastructure using 64 bit DMA */ 1261 - ret = dma_set_mask_and_coherent(&cdev->dev, DMA_BIT_MASK(64)); 1262 - if (ret) { 1263 - dev_warn(&cdev->dev, "Failed to enable 64-bit DMA.\n"); 1264 - goto out_free; 1265 - } 1266 - 1267 1259 vcdev->config_block = kzalloc(sizeof(*vcdev->config_block), 1268 1260 GFP_DMA | GFP_KERNEL); 1269 1261 if (!vcdev->config_block) {