Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

vfio-ccw: Wire up the CRW irq and CRW region

Use the IRQ to notify userspace that there is a CRW
pending in the region, related to path-availability
changes on the passthrough subchannel.

Signed-off-by: Farhan Ali <alifm@linux.ibm.com>
Signed-off-by: Eric Farman <farman@linux.ibm.com>
Reviewed-by: Cornelia Huck <cohuck@redhat.com>
Message-Id: <20200505122745.53208-8-farman@linux.ibm.com>
Signed-off-by: Cornelia Huck <cohuck@redhat.com>

authored by

Farhan Ali and committed by
Cornelia Huck
3f02cb2f d8cac29b

+74
+17
drivers/s390/cio/vfio_ccw_chp.c
··· 82 82 unsigned int i = VFIO_CCW_OFFSET_TO_INDEX(*ppos) - VFIO_CCW_NUM_REGIONS; 83 83 loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK; 84 84 struct ccw_crw_region *region; 85 + struct vfio_ccw_crw *crw; 85 86 int ret; 86 87 87 88 if (pos + count > sizeof(*region)) 88 89 return -EINVAL; 89 90 91 + crw = list_first_entry_or_null(&private->crw, 92 + struct vfio_ccw_crw, next); 93 + 94 + if (crw) 95 + list_del(&crw->next); 96 + 90 97 mutex_lock(&private->io_mutex); 91 98 region = private->region[i].data; 99 + 100 + if (crw) 101 + memcpy(&region->crw, &crw->crw, sizeof(region->crw)); 92 102 93 103 if (copy_to_user(buf, (void *)region + pos, count)) 94 104 ret = -EFAULT; ··· 108 98 region->crw = 0; 109 99 110 100 mutex_unlock(&private->io_mutex); 101 + 102 + kfree(crw); 103 + 104 + /* Notify the guest if more CRWs are on our queue */ 105 + if (!list_empty(&private->crw) && private->crw_trigger) 106 + eventfd_signal(private->crw_trigger, 1); 107 + 111 108 return ret; 112 109 } 113 110
+49
drivers/s390/cio/vfio_ccw_drv.c
··· 108 108 eventfd_signal(private->io_trigger, 1); 109 109 } 110 110 111 + static void vfio_ccw_crw_todo(struct work_struct *work) 112 + { 113 + struct vfio_ccw_private *private; 114 + 115 + private = container_of(work, struct vfio_ccw_private, crw_work); 116 + 117 + if (!list_empty(&private->crw) && private->crw_trigger) 118 + eventfd_signal(private->crw_trigger, 1); 119 + } 120 + 111 121 /* 112 122 * Css driver callbacks 113 123 */ ··· 196 186 if (ret) 197 187 goto out_free; 198 188 189 + INIT_LIST_HEAD(&private->crw); 199 190 INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo); 191 + INIT_WORK(&private->crw_work, vfio_ccw_crw_todo); 200 192 atomic_set(&private->avail, 1); 201 193 private->state = VFIO_CCW_STATE_STANDBY; 202 194 ··· 229 217 static int vfio_ccw_sch_remove(struct subchannel *sch) 230 218 { 231 219 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); 220 + struct vfio_ccw_crw *crw, *temp; 232 221 233 222 vfio_ccw_sch_quiesce(sch); 223 + 224 + list_for_each_entry_safe(crw, temp, &private->crw, next) { 225 + list_del(&crw->next); 226 + kfree(crw); 227 + } 234 228 235 229 vfio_ccw_mdev_unreg(sch); 236 230 ··· 299 281 return rc; 300 282 } 301 283 284 + static void vfio_ccw_queue_crw(struct vfio_ccw_private *private, 285 + unsigned int rsc, 286 + unsigned int erc, 287 + unsigned int rsid) 288 + { 289 + struct vfio_ccw_crw *crw; 290 + 291 + /* 292 + * If unable to allocate a CRW, just drop the event and 293 + * carry on. The guest will either see a later one or 294 + * learn when it issues its own store subchannel. 295 + */ 296 + crw = kzalloc(sizeof(*crw), GFP_ATOMIC); 297 + if (!crw) 298 + return; 299 + 300 + /* 301 + * Build the CRW based on the inputs given to us. 302 + */ 303 + crw->crw.rsc = rsc; 304 + crw->crw.erc = erc; 305 + crw->crw.rsid = rsid; 306 + 307 + list_add_tail(&crw->next, &private->crw); 308 + queue_work(vfio_ccw_work_q, &private->crw_work); 309 + } 310 + 302 311 static int vfio_ccw_chp_event(struct subchannel *sch, 303 312 struct chp_link *link, int event) 304 313 { ··· 356 311 /* Path is gone */ 357 312 if (sch->schib.pmcw.lpum & mask) 358 313 cio_cancel_halt_clear(sch, &retry); 314 + vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_PERRN, 315 + link->chpid.id); 359 316 break; 360 317 case CHP_VARY_ON: 361 318 /* Path logically turned on */ ··· 367 320 case CHP_ONLINE: 368 321 /* Path became available */ 369 322 sch->lpm |= mask & sch->opm; 323 + vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_INIT, 324 + link->chpid.id); 370 325 break; 371 326 } 372 327
+8
drivers/s390/cio/vfio_ccw_private.h
··· 17 17 #include <linux/eventfd.h> 18 18 #include <linux/workqueue.h> 19 19 #include <linux/vfio_ccw.h> 20 + #include <asm/crw.h> 20 21 #include <asm/debug.h> 21 22 22 23 #include "css.h" ··· 60 59 int vfio_ccw_register_schib_dev_regions(struct vfio_ccw_private *private); 61 60 int vfio_ccw_register_crw_dev_regions(struct vfio_ccw_private *private); 62 61 62 + struct vfio_ccw_crw { 63 + struct list_head next; 64 + struct crw crw; 65 + }; 66 + 63 67 /** 64 68 * struct vfio_ccw_private 65 69 * @sch: pointer to the subchannel ··· 104 98 struct channel_program cp; 105 99 struct irb irb; 106 100 union scsw scsw; 101 + struct list_head crw; 107 102 108 103 struct eventfd_ctx *io_trigger; 109 104 struct eventfd_ctx *crw_trigger; 110 105 struct work_struct io_work; 106 + struct work_struct crw_work; 111 107 } __aligned(8); 112 108 113 109 extern int vfio_ccw_mdev_reg(struct subchannel *sch);