Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost

Pull virtio fixes from Michael Tsirkin:
"This includes a couple of bugfixs for virtio.

The virtio console patch is actually also in x86/tip targeting 4.9
because it helps vmap stacks, but it also fixes IOMMU_PLATFORM which
was added in 4.8, and it seems important not to ship that in a broken
configuration"

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost:
virtio_console: Stop doing DMA on the stack
virtio: mark vring_dma_dev() static

+16 -9
+15 -8
drivers/char/virtio_console.c
··· 165 */ 166 struct virtqueue *c_ivq, *c_ovq; 167 168 /* Array of per-port IO virtqueues */ 169 struct virtqueue **in_vqs, **out_vqs; 170 ··· 566 unsigned int event, unsigned int value) 567 { 568 struct scatterlist sg[1]; 569 - struct virtio_console_control cpkt; 570 struct virtqueue *vq; 571 unsigned int len; 572 573 if (!use_multiport(portdev)) 574 return 0; 575 576 - cpkt.id = cpu_to_virtio32(portdev->vdev, port_id); 577 - cpkt.event = cpu_to_virtio16(portdev->vdev, event); 578 - cpkt.value = cpu_to_virtio16(portdev->vdev, value); 579 - 580 vq = portdev->c_ovq; 581 582 - sg_init_one(sg, &cpkt, sizeof(cpkt)); 583 - 584 spin_lock(&portdev->c_ovq_lock); 585 - if (virtqueue_add_outbuf(vq, sg, 1, &cpkt, GFP_ATOMIC) == 0) { 586 virtqueue_kick(vq); 587 while (!virtqueue_get_buf(vq, &len) 588 && !virtqueue_is_broken(vq)) 589 cpu_relax(); 590 } 591 spin_unlock(&portdev->c_ovq_lock); 592 return 0; 593 }
··· 165 */ 166 struct virtqueue *c_ivq, *c_ovq; 167 168 + /* 169 + * A control packet buffer for guest->host requests, protected 170 + * by c_ovq_lock. 171 + */ 172 + struct virtio_console_control cpkt; 173 + 174 /* Array of per-port IO virtqueues */ 175 struct virtqueue **in_vqs, **out_vqs; 176 ··· 560 unsigned int event, unsigned int value) 561 { 562 struct scatterlist sg[1]; 563 struct virtqueue *vq; 564 unsigned int len; 565 566 if (!use_multiport(portdev)) 567 return 0; 568 569 vq = portdev->c_ovq; 570 571 spin_lock(&portdev->c_ovq_lock); 572 + 573 + portdev->cpkt.id = cpu_to_virtio32(portdev->vdev, port_id); 574 + portdev->cpkt.event = cpu_to_virtio16(portdev->vdev, event); 575 + portdev->cpkt.value = cpu_to_virtio16(portdev->vdev, value); 576 + 577 + sg_init_one(sg, &portdev->cpkt, sizeof(struct virtio_console_control)); 578 + 579 + if (virtqueue_add_outbuf(vq, sg, 1, &portdev->cpkt, GFP_ATOMIC) == 0) { 580 virtqueue_kick(vq); 581 while (!virtqueue_get_buf(vq, &len) 582 && !virtqueue_is_broken(vq)) 583 cpu_relax(); 584 } 585 + 586 spin_unlock(&portdev->c_ovq_lock); 587 return 0; 588 }
+1 -1
drivers/virtio/virtio_ring.c
··· 167 * making all of the arch DMA ops work on the vring device itself 168 * is a mess. For now, we use the parent device for DMA ops. 169 */ 170 - struct device *vring_dma_dev(const struct vring_virtqueue *vq) 171 { 172 return vq->vq.vdev->dev.parent; 173 }
··· 167 * making all of the arch DMA ops work on the vring device itself 168 * is a mess. For now, we use the parent device for DMA ops. 169 */ 170 + static struct device *vring_dma_dev(const struct vring_virtqueue *vq) 171 { 172 return vq->vq.vdev->dev.parent; 173 }