Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'virtio' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus

* 'virtio' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus: (27 commits)
drivers/char: Eliminate use after free
virtio: console: Accept console size along with resize control message
virtio: console: Store each console's size in the console structure
virtio: console: Resize console port 0 on config intr only if multiport is off
virtio: console: Add support for nonblocking write()s
virtio: console: Rename wait_is_over() to will_read_block()
virtio: console: Don't always create a port 0 if using multiport
virtio: console: Use a control message to add ports
virtio: console: Move code around for future patches
virtio: console: Remove config work handler
virtio: console: Don't call hvc_remove() on unplugging console ports
virtio: console: Return -EPIPE to hvc_console if we lost the connection
virtio: console: Let host know of port or device add failures
virtio: console: Add a __send_control_msg() that can send messages without a valid port
virtio: Revert "virtio: disable multiport console support."
virtio: add_buf_gfp
trans_virtio: use virtqueue_xxx wrappers
virtio-rng: use virtqueue_xxx wrappers
virtio_ring: remove a level of indirection
virtio_net: use virtqueue_xxx wrappers
...

Fix up conflicts in drivers/net/virtio_net.c due to new virtqueue_xxx
wrappers changes conflicting with some other cleanups.

+542 -410
+43 -3
drivers/block/virtio_blk.c
··· 50 50 unsigned long flags; 51 51 52 52 spin_lock_irqsave(&vblk->lock, flags); 53 - while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) { 53 + while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) { 54 54 int error; 55 55 56 56 switch (vbr->status) { ··· 70 70 vbr->req->sense_len = vbr->in_hdr.sense_len; 71 71 vbr->req->errors = vbr->in_hdr.errors; 72 72 } 73 + if (blk_special_request(vbr->req)) 74 + vbr->req->errors = (error != 0); 73 75 74 76 __blk_end_request_all(vbr->req, error); 75 77 list_del(&vbr->list); ··· 102 100 break; 103 101 case REQ_TYPE_BLOCK_PC: 104 102 vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD; 103 + vbr->out_hdr.sector = 0; 104 + vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); 105 + break; 106 + case REQ_TYPE_SPECIAL: 107 + vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID; 105 108 vbr->out_hdr.sector = 0; 106 109 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); 107 110 break; ··· 158 151 } 159 152 } 160 153 161 - if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) { 154 + if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) { 162 155 mempool_free(vbr, vblk->pool); 163 156 return false; 164 157 } ··· 187 180 } 188 181 189 182 if (issued) 190 - vblk->vq->vq_ops->kick(vblk->vq); 183 + virtqueue_kick(vblk->vq); 191 184 } 192 185 193 186 static void virtblk_prepare_flush(struct request_queue *q, struct request *req) ··· 196 189 req->cmd[0] = REQ_LB_OP_FLUSH; 197 190 } 198 191 192 + /* return id (s/n) string for *disk to *id_str 193 + */ 194 + static int virtblk_get_id(struct gendisk *disk, char *id_str) 195 + { 196 + struct virtio_blk *vblk = disk->private_data; 197 + struct request *req; 198 + struct bio *bio; 199 + 200 + bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES, 201 + GFP_KERNEL); 202 + if (IS_ERR(bio)) 203 + return PTR_ERR(bio); 204 + 205 + req = blk_make_request(vblk->disk->queue, bio, GFP_KERNEL); 206 + if (IS_ERR(req)) { 207 + bio_put(bio); 208 + return PTR_ERR(req); 209 + } 210 + 211 + req->cmd_type = REQ_TYPE_SPECIAL; 212 + return blk_execute_rq(vblk->disk->queue, vblk->disk, req, false); 213 + } 214 + 199 215 static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, 200 216 unsigned cmd, unsigned long data) 201 217 { 202 218 struct gendisk *disk = bdev->bd_disk; 203 219 struct virtio_blk *vblk = disk->private_data; 204 220 221 + if (cmd == 0x56424944) { /* 'VBID' */ 222 + void __user *usr_data = (void __user *)data; 223 + char id_str[VIRTIO_BLK_ID_BYTES]; 224 + int err; 225 + 226 + err = virtblk_get_id(disk, id_str); 227 + if (!err && copy_to_user(usr_data, id_str, VIRTIO_BLK_ID_BYTES)) 228 + err = -EFAULT; 229 + return err; 230 + } 205 231 /* 206 232 * Only allow the generic SCSI ioctls if the host can support it. 207 233 */
+3 -3
drivers/char/hw_random/virtio-rng.c
··· 32 32 static void random_recv_done(struct virtqueue *vq) 33 33 { 34 34 /* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */ 35 - if (!vq->vq_ops->get_buf(vq, &data_avail)) 35 + if (!virtqueue_get_buf(vq, &data_avail)) 36 36 return; 37 37 38 38 complete(&have_data); ··· 46 46 sg_init_one(&sg, buf, size); 47 47 48 48 /* There should always be room for one buffer. */ 49 - if (vq->vq_ops->add_buf(vq, &sg, 0, 1, buf) < 0) 49 + if (virtqueue_add_buf(vq, &sg, 0, 1, buf) < 0) 50 50 BUG(); 51 51 52 - vq->vq_ops->kick(vq); 52 + virtqueue_kick(vq); 53 53 } 54 54 55 55 static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
+378 -324
drivers/char/virtio_console.c
··· 33 33 #include <linux/workqueue.h> 34 34 #include "hvc_console.h" 35 35 36 - /* Moved here from .h file in order to disable MULTIPORT. */ 37 - #define VIRTIO_CONSOLE_F_MULTIPORT 1 /* Does host provide multiple ports? */ 38 - 39 - struct virtio_console_multiport_conf { 40 - struct virtio_console_config config; 41 - /* max. number of ports this device can hold */ 42 - __u32 max_nr_ports; 43 - /* number of ports added so far */ 44 - __u32 nr_ports; 45 - } __attribute__((packed)); 46 - 47 - /* 48 - * A message that's passed between the Host and the Guest for a 49 - * particular port. 50 - */ 51 - struct virtio_console_control { 52 - __u32 id; /* Port number */ 53 - __u16 event; /* The kind of control event (see below) */ 54 - __u16 value; /* Extra information for the key */ 55 - }; 56 - 57 - /* Some events for control messages */ 58 - #define VIRTIO_CONSOLE_PORT_READY 0 59 - #define VIRTIO_CONSOLE_CONSOLE_PORT 1 60 - #define VIRTIO_CONSOLE_RESIZE 2 61 - #define VIRTIO_CONSOLE_PORT_OPEN 3 62 - #define VIRTIO_CONSOLE_PORT_NAME 4 63 - #define VIRTIO_CONSOLE_PORT_REMOVE 5 64 - 65 36 /* 66 37 * This is a global struct for storing common data for all the devices 67 38 * this driver handles. ··· 78 107 /* The hvc device associated with this console port */ 79 108 struct hvc_struct *hvc; 80 109 110 + /* The size of the console */ 111 + struct winsize ws; 112 + 81 113 /* 82 114 * This number identifies the number that we used to register 83 115 * with hvc in hvc_instantiate() and hvc_alloc(); this is the ··· 113 139 * notification 114 140 */ 115 141 struct work_struct control_work; 116 - struct work_struct config_work; 117 142 118 143 struct list_head ports; 119 144 ··· 123 150 spinlock_t cvq_lock; 124 151 125 152 /* The current config space is stored here */ 126 - struct virtio_console_multiport_conf config; 153 + struct virtio_console_config config; 127 154 128 155 /* The virtio device we're associated with */ 129 156 struct virtio_device *vdev; ··· 162 189 */ 163 190 spinlock_t inbuf_lock; 164 191 192 + /* Protect the operations on the out_vq. */ 193 + spinlock_t outvq_lock; 194 + 165 195 /* The IO vqs for this port */ 166 196 struct virtqueue *in_vq, *out_vq; 167 197 ··· 189 213 190 214 /* The 'id' to identify the port with the Host */ 191 215 u32 id; 216 + 217 + bool outvq_full; 192 218 193 219 /* Is the host device open */ 194 220 bool host_connected; ··· 306 328 unsigned int len; 307 329 308 330 vq = port->in_vq; 309 - buf = vq->vq_ops->get_buf(vq, &len); 331 + buf = virtqueue_get_buf(vq, &len); 310 332 if (buf) { 311 333 buf->len = len; 312 334 buf->offset = 0; ··· 327 349 328 350 sg_init_one(sg, buf->buf, buf->size); 329 351 330 - ret = vq->vq_ops->add_buf(vq, sg, 0, 1, buf); 331 - vq->vq_ops->kick(vq); 352 + ret = virtqueue_add_buf(vq, sg, 0, 1, buf); 353 + virtqueue_kick(vq); 332 354 return ret; 333 355 } 334 356 ··· 344 366 if (port->inbuf) 345 367 buf = port->inbuf; 346 368 else 347 - buf = vq->vq_ops->get_buf(vq, &len); 369 + buf = virtqueue_get_buf(vq, &len); 348 370 349 371 ret = 0; 350 372 while (buf) { ··· 352 374 ret++; 353 375 free_buf(buf); 354 376 } 355 - buf = vq->vq_ops->get_buf(vq, &len); 377 + buf = virtqueue_get_buf(vq, &len); 356 378 } 357 379 port->inbuf = NULL; 358 380 if (ret) ··· 381 403 return ret; 382 404 } 383 405 384 - static ssize_t send_control_msg(struct port *port, unsigned int event, 385 - unsigned int value) 406 + static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id, 407 + unsigned int event, unsigned int value) 386 408 { 387 409 struct scatterlist sg[1]; 388 410 struct virtio_console_control cpkt; 389 411 struct virtqueue *vq; 390 412 unsigned int len; 391 413 392 - if (!use_multiport(port->portdev)) 414 + if (!use_multiport(portdev)) 393 415 return 0; 394 416 395 - cpkt.id = port->id; 417 + cpkt.id = port_id; 396 418 cpkt.event = event; 397 419 cpkt.value = value; 398 420 399 - vq = port->portdev->c_ovq; 421 + vq = portdev->c_ovq; 400 422 401 423 sg_init_one(sg, &cpkt, sizeof(cpkt)); 402 - if (vq->vq_ops->add_buf(vq, sg, 1, 0, &cpkt) >= 0) { 403 - vq->vq_ops->kick(vq); 404 - while (!vq->vq_ops->get_buf(vq, &len)) 424 + if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt) >= 0) { 425 + virtqueue_kick(vq); 426 + while (!virtqueue_get_buf(vq, &len)) 405 427 cpu_relax(); 406 428 } 407 429 return 0; 408 430 } 409 431 410 - static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count) 432 + static ssize_t send_control_msg(struct port *port, unsigned int event, 433 + unsigned int value) 434 + { 435 + return __send_control_msg(port->portdev, port->id, event, value); 436 + } 437 + 438 + /* Callers must take the port->outvq_lock */ 439 + static void reclaim_consumed_buffers(struct port *port) 440 + { 441 + void *buf; 442 + unsigned int len; 443 + 444 + while ((buf = virtqueue_get_buf(port->out_vq, &len))) { 445 + kfree(buf); 446 + port->outvq_full = false; 447 + } 448 + } 449 + 450 + static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count, 451 + bool nonblock) 411 452 { 412 453 struct scatterlist sg[1]; 413 454 struct virtqueue *out_vq; 414 455 ssize_t ret; 456 + unsigned long flags; 415 457 unsigned int len; 416 458 417 459 out_vq = port->out_vq; 418 460 461 + spin_lock_irqsave(&port->outvq_lock, flags); 462 + 463 + reclaim_consumed_buffers(port); 464 + 419 465 sg_init_one(sg, in_buf, in_count); 420 - ret = out_vq->vq_ops->add_buf(out_vq, sg, 1, 0, in_buf); 466 + ret = virtqueue_add_buf(out_vq, sg, 1, 0, in_buf); 421 467 422 468 /* Tell Host to go! */ 423 - out_vq->vq_ops->kick(out_vq); 469 + virtqueue_kick(out_vq); 424 470 425 471 if (ret < 0) { 426 472 in_count = 0; 427 - goto fail; 473 + goto done; 428 474 } 429 475 430 - /* Wait till the host acknowledges it pushed out the data we sent. */ 431 - while (!out_vq->vq_ops->get_buf(out_vq, &len)) 476 + if (ret == 0) 477 + port->outvq_full = true; 478 + 479 + if (nonblock) 480 + goto done; 481 + 482 + /* 483 + * Wait till the host acknowledges it pushed out the data we 484 + * sent. This is done for ports in blocking mode or for data 485 + * from the hvc_console; the tty operations are performed with 486 + * spinlocks held so we can't sleep here. 487 + */ 488 + while (!virtqueue_get_buf(out_vq, &len)) 432 489 cpu_relax(); 433 - fail: 434 - /* We're expected to return the amount of data we wrote */ 490 + done: 491 + spin_unlock_irqrestore(&port->outvq_lock, flags); 492 + /* 493 + * We're expected to return the amount of data we wrote -- all 494 + * of it 495 + */ 435 496 return in_count; 436 497 } 437 498 ··· 520 503 } 521 504 522 505 /* The condition that must be true for polling to end */ 523 - static bool wait_is_over(struct port *port) 506 + static bool will_read_block(struct port *port) 524 507 { 525 - return port_has_data(port) || !port->host_connected; 508 + return !port_has_data(port) && port->host_connected; 509 + } 510 + 511 + static bool will_write_block(struct port *port) 512 + { 513 + bool ret; 514 + 515 + if (!port->host_connected) 516 + return true; 517 + 518 + spin_lock_irq(&port->outvq_lock); 519 + /* 520 + * Check if the Host has consumed any buffers since we last 521 + * sent data (this is only applicable for nonblocking ports). 522 + */ 523 + reclaim_consumed_buffers(port); 524 + ret = port->outvq_full; 525 + spin_unlock_irq(&port->outvq_lock); 526 + 527 + return ret; 526 528 } 527 529 528 530 static ssize_t port_fops_read(struct file *filp, char __user *ubuf, ··· 564 528 return -EAGAIN; 565 529 566 530 ret = wait_event_interruptible(port->waitqueue, 567 - wait_is_over(port)); 531 + !will_read_block(port)); 568 532 if (ret < 0) 569 533 return ret; 570 534 } ··· 590 554 struct port *port; 591 555 char *buf; 592 556 ssize_t ret; 557 + bool nonblock; 593 558 594 559 port = filp->private_data; 560 + 561 + nonblock = filp->f_flags & O_NONBLOCK; 562 + 563 + if (will_write_block(port)) { 564 + if (nonblock) 565 + return -EAGAIN; 566 + 567 + ret = wait_event_interruptible(port->waitqueue, 568 + !will_write_block(port)); 569 + if (ret < 0) 570 + return ret; 571 + } 595 572 596 573 count = min((size_t)(32 * 1024), count); 597 574 ··· 618 569 goto free_buf; 619 570 } 620 571 621 - ret = send_buf(port, buf, count); 572 + ret = send_buf(port, buf, count, nonblock); 573 + 574 + if (nonblock && ret > 0) 575 + goto out; 576 + 622 577 free_buf: 623 578 kfree(buf); 579 + out: 624 580 return ret; 625 581 } 626 582 ··· 640 586 ret = 0; 641 587 if (port->inbuf) 642 588 ret |= POLLIN | POLLRDNORM; 643 - if (port->host_connected) 589 + if (!will_write_block(port)) 644 590 ret |= POLLOUT; 645 591 if (!port->host_connected) 646 592 ret |= POLLHUP; ··· 663 609 discard_port_data(port); 664 610 665 611 spin_unlock_irq(&port->inbuf_lock); 612 + 613 + spin_lock_irq(&port->outvq_lock); 614 + reclaim_consumed_buffers(port); 615 + spin_unlock_irq(&port->outvq_lock); 666 616 667 617 return 0; 668 618 } ··· 695 637 696 638 port->guest_connected = true; 697 639 spin_unlock_irq(&port->inbuf_lock); 640 + 641 + spin_lock_irq(&port->outvq_lock); 642 + /* 643 + * There might be a chance that we missed reclaiming a few 644 + * buffers in the window of the port getting previously closed 645 + * and opening now. 646 + */ 647 + reclaim_consumed_buffers(port); 648 + spin_unlock_irq(&port->outvq_lock); 698 649 699 650 /* Notify host of port being opened */ 700 651 send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1); ··· 743 676 744 677 port = find_port_by_vtermno(vtermno); 745 678 if (!port) 746 - return 0; 679 + return -EPIPE; 747 680 748 - return send_buf(port, (void *)buf, count); 681 + return send_buf(port, (void *)buf, count, false); 749 682 } 750 683 751 684 /* ··· 759 692 { 760 693 struct port *port; 761 694 695 + /* If we've not set up the port yet, we have no input to give. */ 696 + if (unlikely(early_put_chars)) 697 + return 0; 698 + 762 699 port = find_port_by_vtermno(vtermno); 763 700 if (!port) 764 - return 0; 701 + return -EPIPE; 765 702 766 703 /* If we don't have an input queue yet, we can't get input. */ 767 704 BUG_ON(!port->in_vq); ··· 776 705 static void resize_console(struct port *port) 777 706 { 778 707 struct virtio_device *vdev; 779 - struct winsize ws; 780 708 781 709 /* The port could have been hot-unplugged */ 782 - if (!port) 710 + if (!port || !is_console_port(port)) 783 711 return; 784 712 785 713 vdev = port->portdev->vdev; 786 - if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) { 787 - vdev->config->get(vdev, 788 - offsetof(struct virtio_console_config, cols), 789 - &ws.ws_col, sizeof(u16)); 790 - vdev->config->get(vdev, 791 - offsetof(struct virtio_console_config, rows), 792 - &ws.ws_row, sizeof(u16)); 793 - hvc_resize(port->cons.hvc, ws); 794 - } 714 + if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) 715 + hvc_resize(port->cons.hvc, port->cons.ws); 795 716 } 796 717 797 718 /* We set the configuration at this point, since we now have a tty */ ··· 867 804 spin_unlock_irq(&pdrvdata_lock); 868 805 port->guest_connected = true; 869 806 807 + /* 808 + * Start using the new console output if this is the first 809 + * console to come up. 810 + */ 811 + if (early_put_chars) 812 + early_put_chars = NULL; 813 + 870 814 /* Notify host of port being opened */ 871 815 send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1); 872 816 ··· 929 859 out_offset += snprintf(buf + out_offset, out_count - out_offset, 930 860 "host_connected: %d\n", port->host_connected); 931 861 out_offset += snprintf(buf + out_offset, out_count - out_offset, 862 + "outvq_full: %d\n", port->outvq_full); 863 + out_offset += snprintf(buf + out_offset, out_count - out_offset, 932 864 "is_console: %s\n", 933 865 is_console_port(port) ? "yes" : "no"); 934 866 out_offset += snprintf(buf + out_offset, out_count - out_offset, ··· 947 875 .read = debugfs_read, 948 876 }; 949 877 878 + static void set_console_size(struct port *port, u16 rows, u16 cols) 879 + { 880 + if (!port || !is_console_port(port)) 881 + return; 882 + 883 + port->cons.ws.ws_row = rows; 884 + port->cons.ws.ws_col = cols; 885 + } 886 + 887 + static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) 888 + { 889 + struct port_buffer *buf; 890 + unsigned int nr_added_bufs; 891 + int ret; 892 + 893 + nr_added_bufs = 0; 894 + do { 895 + buf = alloc_buf(PAGE_SIZE); 896 + if (!buf) 897 + break; 898 + 899 + spin_lock_irq(lock); 900 + ret = add_inbuf(vq, buf); 901 + if (ret < 0) { 902 + spin_unlock_irq(lock); 903 + free_buf(buf); 904 + break; 905 + } 906 + nr_added_bufs++; 907 + spin_unlock_irq(lock); 908 + } while (ret > 0); 909 + 910 + return nr_added_bufs; 911 + } 912 + 913 + static int add_port(struct ports_device *portdev, u32 id) 914 + { 915 + char debugfs_name[16]; 916 + struct port *port; 917 + struct port_buffer *buf; 918 + dev_t devt; 919 + unsigned int nr_added_bufs; 920 + int err; 921 + 922 + port = kmalloc(sizeof(*port), GFP_KERNEL); 923 + if (!port) { 924 + err = -ENOMEM; 925 + goto fail; 926 + } 927 + 928 + port->portdev = portdev; 929 + port->id = id; 930 + 931 + port->name = NULL; 932 + port->inbuf = NULL; 933 + port->cons.hvc = NULL; 934 + 935 + port->cons.ws.ws_row = port->cons.ws.ws_col = 0; 936 + 937 + port->host_connected = port->guest_connected = false; 938 + 939 + port->outvq_full = false; 940 + 941 + port->in_vq = portdev->in_vqs[port->id]; 942 + port->out_vq = portdev->out_vqs[port->id]; 943 + 944 + cdev_init(&port->cdev, &port_fops); 945 + 946 + devt = MKDEV(portdev->chr_major, id); 947 + err = cdev_add(&port->cdev, devt, 1); 948 + if (err < 0) { 949 + dev_err(&port->portdev->vdev->dev, 950 + "Error %d adding cdev for port %u\n", err, id); 951 + goto free_port; 952 + } 953 + port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev, 954 + devt, port, "vport%up%u", 955 + port->portdev->drv_index, id); 956 + if (IS_ERR(port->dev)) { 957 + err = PTR_ERR(port->dev); 958 + dev_err(&port->portdev->vdev->dev, 959 + "Error %d creating device for port %u\n", 960 + err, id); 961 + goto free_cdev; 962 + } 963 + 964 + spin_lock_init(&port->inbuf_lock); 965 + spin_lock_init(&port->outvq_lock); 966 + init_waitqueue_head(&port->waitqueue); 967 + 968 + /* Fill the in_vq with buffers so the host can send us data. */ 969 + nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock); 970 + if (!nr_added_bufs) { 971 + dev_err(port->dev, "Error allocating inbufs\n"); 972 + err = -ENOMEM; 973 + goto free_device; 974 + } 975 + 976 + /* 977 + * If we're not using multiport support, this has to be a console port 978 + */ 979 + if (!use_multiport(port->portdev)) { 980 + err = init_port_console(port); 981 + if (err) 982 + goto free_inbufs; 983 + } 984 + 985 + spin_lock_irq(&portdev->ports_lock); 986 + list_add_tail(&port->list, &port->portdev->ports); 987 + spin_unlock_irq(&portdev->ports_lock); 988 + 989 + /* 990 + * Tell the Host we're set so that it can send us various 991 + * configuration parameters for this port (eg, port name, 992 + * caching, whether this is a console port, etc.) 993 + */ 994 + send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); 995 + 996 + if (pdrvdata.debugfs_dir) { 997 + /* 998 + * Finally, create the debugfs file that we can use to 999 + * inspect a port's state at any time 1000 + */ 1001 + sprintf(debugfs_name, "vport%up%u", 1002 + port->portdev->drv_index, id); 1003 + port->debugfs_file = debugfs_create_file(debugfs_name, 0444, 1004 + pdrvdata.debugfs_dir, 1005 + port, 1006 + &port_debugfs_ops); 1007 + } 1008 + return 0; 1009 + 1010 + free_inbufs: 1011 + while ((buf = virtqueue_detach_unused_buf(port->in_vq))) 1012 + free_buf(buf); 1013 + free_device: 1014 + device_destroy(pdrvdata.class, port->dev->devt); 1015 + free_cdev: 1016 + cdev_del(&port->cdev); 1017 + free_port: 1018 + kfree(port); 1019 + fail: 1020 + /* The host might want to notify management sw about port add failure */ 1021 + __send_control_msg(portdev, id, VIRTIO_CONSOLE_PORT_READY, 0); 1022 + return err; 1023 + } 1024 + 950 1025 /* Remove all port-specific data. */ 951 1026 static int remove_port(struct port *port) 952 1027 { ··· 1107 888 spin_lock_irq(&pdrvdata_lock); 1108 889 list_del(&port->cons.list); 1109 890 spin_unlock_irq(&pdrvdata_lock); 891 + #if 0 892 + /* 893 + * hvc_remove() not called as removing one hvc port 894 + * results in other hvc ports getting frozen. 895 + * 896 + * Once this is resolved in hvc, this functionality 897 + * will be enabled. Till that is done, the -EPIPE 898 + * return from get_chars() above will help 899 + * hvc_console.c to clean up on ports we remove here. 900 + */ 1110 901 hvc_remove(port->cons.hvc); 902 + #endif 1111 903 } 1112 904 if (port->guest_connected) 1113 905 send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0); ··· 1130 900 /* Remove unused data this port might have received. */ 1131 901 discard_port_data(port); 1132 902 903 + reclaim_consumed_buffers(port); 904 + 1133 905 /* Remove buffers we queued up for the Host to send us data in. */ 1134 - while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq))) 906 + while ((buf = virtqueue_detach_unused_buf(port->in_vq))) 1135 907 free_buf(buf); 1136 908 1137 909 kfree(port->name); ··· 1156 924 cpkt = (struct virtio_console_control *)(buf->buf + buf->offset); 1157 925 1158 926 port = find_port_by_id(portdev, cpkt->id); 1159 - if (!port) { 927 + if (!port && cpkt->event != VIRTIO_CONSOLE_PORT_ADD) { 1160 928 /* No valid header at start of buffer. Drop it. */ 1161 929 dev_dbg(&portdev->vdev->dev, 1162 930 "Invalid index %u in control packet\n", cpkt->id); ··· 1164 932 } 1165 933 1166 934 switch (cpkt->event) { 935 + case VIRTIO_CONSOLE_PORT_ADD: 936 + if (port) { 937 + dev_dbg(&portdev->vdev->dev, 938 + "Port %u already added\n", port->id); 939 + send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); 940 + break; 941 + } 942 + if (cpkt->id >= portdev->config.max_nr_ports) { 943 + dev_warn(&portdev->vdev->dev, 944 + "Request for adding port with out-of-bound id %u, max. supported id: %u\n", 945 + cpkt->id, portdev->config.max_nr_ports - 1); 946 + break; 947 + } 948 + add_port(portdev, cpkt->id); 949 + break; 950 + case VIRTIO_CONSOLE_PORT_REMOVE: 951 + remove_port(port); 952 + break; 1167 953 case VIRTIO_CONSOLE_CONSOLE_PORT: 1168 954 if (!cpkt->value) 1169 955 break; ··· 1194 944 * have to notify the host first. 1195 945 */ 1196 946 break; 1197 - case VIRTIO_CONSOLE_RESIZE: 947 + case VIRTIO_CONSOLE_RESIZE: { 948 + struct { 949 + __u16 rows; 950 + __u16 cols; 951 + } size; 952 + 1198 953 if (!is_console_port(port)) 1199 954 break; 955 + 956 + memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt), 957 + sizeof(size)); 958 + set_console_size(port, size.rows, size.cols); 959 + 1200 960 port->cons.hvc->irq_requested = 1; 1201 961 resize_console(port); 1202 962 break; 963 + } 1203 964 case VIRTIO_CONSOLE_PORT_OPEN: 1204 965 port->host_connected = cpkt->value; 1205 966 wake_up_interruptible(&port->waitqueue); 967 + /* 968 + * If the host port got closed and the host had any 969 + * unconsumed buffers, we'll be able to reclaim them 970 + * now. 971 + */ 972 + spin_lock_irq(&port->outvq_lock); 973 + reclaim_consumed_buffers(port); 974 + spin_unlock_irq(&port->outvq_lock); 1206 975 break; 1207 976 case VIRTIO_CONSOLE_PORT_NAME: 1208 977 /* ··· 1259 990 kobject_uevent(&port->dev->kobj, KOBJ_CHANGE); 1260 991 } 1261 992 break; 1262 - case VIRTIO_CONSOLE_PORT_REMOVE: 1263 - /* 1264 - * Hot unplug the port. We don't decrement nr_ports 1265 - * since we don't want to deal with extra complexities 1266 - * of using the lowest-available port id: We can just 1267 - * pick up the nr_ports number as the id and not have 1268 - * userspace send it to us. This helps us in two 1269 - * ways: 1270 - * 1271 - * - We don't need to have a 'port_id' field in the 1272 - * config space when a port is hot-added. This is a 1273 - * good thing as we might queue up multiple hotplug 1274 - * requests issued in our workqueue. 1275 - * 1276 - * - Another way to deal with this would have been to 1277 - * use a bitmap of the active ports and select the 1278 - * lowest non-active port from that map. That 1279 - * bloats the already tight config space and we 1280 - * would end up artificially limiting the 1281 - * max. number of ports to sizeof(bitmap). Right 1282 - * now we can support 2^32 ports (as the port id is 1283 - * stored in a u32 type). 1284 - * 1285 - */ 1286 - remove_port(port); 1287 - break; 1288 993 } 1289 994 } 1290 995 ··· 1273 1030 vq = portdev->c_ivq; 1274 1031 1275 1032 spin_lock(&portdev->cvq_lock); 1276 - while ((buf = vq->vq_ops->get_buf(vq, &len))) { 1033 + while ((buf = virtqueue_get_buf(vq, &len))) { 1277 1034 spin_unlock(&portdev->cvq_lock); 1278 1035 1279 1036 buf->len = len; ··· 1335 1092 struct ports_device *portdev; 1336 1093 1337 1094 portdev = vdev->priv; 1338 - if (use_multiport(portdev)) { 1339 - /* Handle port hot-add */ 1340 - schedule_work(&portdev->config_work); 1341 - } 1342 - /* 1343 - * We'll use this way of resizing only for legacy support. 1344 - * For newer userspace (VIRTIO_CONSOLE_F_MULTPORT+), use 1345 - * control messages to indicate console size changes so that 1346 - * it can be done per-port 1347 - */ 1348 - resize_console(find_port_by_id(portdev, 0)); 1349 - } 1350 1095 1351 - static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) 1352 - { 1353 - struct port_buffer *buf; 1354 - unsigned int nr_added_bufs; 1355 - int ret; 1356 - 1357 - nr_added_bufs = 0; 1358 - do { 1359 - buf = alloc_buf(PAGE_SIZE); 1360 - if (!buf) 1361 - break; 1362 - 1363 - spin_lock_irq(lock); 1364 - ret = add_inbuf(vq, buf); 1365 - if (ret < 0) { 1366 - spin_unlock_irq(lock); 1367 - free_buf(buf); 1368 - break; 1369 - } 1370 - nr_added_bufs++; 1371 - spin_unlock_irq(lock); 1372 - } while (ret > 0); 1373 - 1374 - return nr_added_bufs; 1375 - } 1376 - 1377 - static int add_port(struct ports_device *portdev, u32 id) 1378 - { 1379 - char debugfs_name[16]; 1380 - struct port *port; 1381 - struct port_buffer *buf; 1382 - dev_t devt; 1383 - unsigned int nr_added_bufs; 1384 - int err; 1385 - 1386 - port = kmalloc(sizeof(*port), GFP_KERNEL); 1387 - if (!port) { 1388 - err = -ENOMEM; 1389 - goto fail; 1390 - } 1391 - 1392 - port->portdev = portdev; 1393 - port->id = id; 1394 - 1395 - port->name = NULL; 1396 - port->inbuf = NULL; 1397 - port->cons.hvc = NULL; 1398 - 1399 - port->host_connected = port->guest_connected = false; 1400 - 1401 - port->in_vq = portdev->in_vqs[port->id]; 1402 - port->out_vq = portdev->out_vqs[port->id]; 1403 - 1404 - cdev_init(&port->cdev, &port_fops); 1405 - 1406 - devt = MKDEV(portdev->chr_major, id); 1407 - err = cdev_add(&port->cdev, devt, 1); 1408 - if (err < 0) { 1409 - dev_err(&port->portdev->vdev->dev, 1410 - "Error %d adding cdev for port %u\n", err, id); 1411 - goto free_port; 1412 - } 1413 - port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev, 1414 - devt, port, "vport%up%u", 1415 - port->portdev->drv_index, id); 1416 - if (IS_ERR(port->dev)) { 1417 - err = PTR_ERR(port->dev); 1418 - dev_err(&port->portdev->vdev->dev, 1419 - "Error %d creating device for port %u\n", 1420 - err, id); 1421 - goto free_cdev; 1422 - } 1423 - 1424 - spin_lock_init(&port->inbuf_lock); 1425 - init_waitqueue_head(&port->waitqueue); 1426 - 1427 - /* Fill the in_vq with buffers so the host can send us data. */ 1428 - nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock); 1429 - if (!nr_added_bufs) { 1430 - dev_err(port->dev, "Error allocating inbufs\n"); 1431 - err = -ENOMEM; 1432 - goto free_device; 1433 - } 1434 - 1435 - /* 1436 - * If we're not using multiport support, this has to be a console port 1437 - */ 1438 - if (!use_multiport(port->portdev)) { 1439 - err = init_port_console(port); 1440 - if (err) 1441 - goto free_inbufs; 1442 - } 1443 - 1444 - spin_lock_irq(&portdev->ports_lock); 1445 - list_add_tail(&port->list, &port->portdev->ports); 1446 - spin_unlock_irq(&portdev->ports_lock); 1447 - 1448 - /* 1449 - * Tell the Host we're set so that it can send us various 1450 - * configuration parameters for this port (eg, port name, 1451 - * caching, whether this is a console port, etc.) 1452 - */ 1453 - send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); 1454 - 1455 - if (pdrvdata.debugfs_dir) { 1456 - /* 1457 - * Finally, create the debugfs file that we can use to 1458 - * inspect a port's state at any time 1459 - */ 1460 - sprintf(debugfs_name, "vport%up%u", 1461 - port->portdev->drv_index, id); 1462 - port->debugfs_file = debugfs_create_file(debugfs_name, 0444, 1463 - pdrvdata.debugfs_dir, 1464 - port, 1465 - &port_debugfs_ops); 1466 - } 1467 - return 0; 1468 - 1469 - free_inbufs: 1470 - while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq))) 1471 - free_buf(buf); 1472 - free_device: 1473 - device_destroy(pdrvdata.class, port->dev->devt); 1474 - free_cdev: 1475 - cdev_del(&port->cdev); 1476 - free_port: 1477 - kfree(port); 1478 - fail: 1479 - return err; 1480 - } 1481 - 1482 - /* 1483 - * The workhandler for config-space updates. 1484 - * 1485 - * This is called when ports are hot-added. 1486 - */ 1487 - static void config_work_handler(struct work_struct *work) 1488 - { 1489 - struct virtio_console_multiport_conf virtconconf; 1490 - struct ports_device *portdev; 1491 - struct virtio_device *vdev; 1492 - int err; 1493 - 1494 - portdev = container_of(work, struct ports_device, config_work); 1495 - 1496 - vdev = portdev->vdev; 1497 - vdev->config->get(vdev, 1498 - offsetof(struct virtio_console_multiport_conf, 1499 - nr_ports), 1500 - &virtconconf.nr_ports, 1501 - sizeof(virtconconf.nr_ports)); 1502 - 1503 - if (portdev->config.nr_ports == virtconconf.nr_ports) { 1504 - /* 1505 - * Port 0 got hot-added. Since we already did all the 1506 - * other initialisation for it, just tell the Host 1507 - * that the port is ready if we find the port. In 1508 - * case the port was hot-removed earlier, we call 1509 - * add_port to add the port. 1510 - */ 1096 + if (!use_multiport(portdev)) { 1511 1097 struct port *port; 1098 + u16 rows, cols; 1099 + 1100 + vdev->config->get(vdev, 1101 + offsetof(struct virtio_console_config, cols), 1102 + &cols, sizeof(u16)); 1103 + vdev->config->get(vdev, 1104 + offsetof(struct virtio_console_config, rows), 1105 + &rows, sizeof(u16)); 1512 1106 1513 1107 port = find_port_by_id(portdev, 0); 1514 - if (!port) 1515 - add_port(portdev, 0); 1516 - else 1517 - send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); 1518 - return; 1519 - } 1520 - if (virtconconf.nr_ports > portdev->config.max_nr_ports) { 1521 - dev_warn(&vdev->dev, 1522 - "More ports specified (%u) than allowed (%u)", 1523 - portdev->config.nr_ports + 1, 1524 - portdev->config.max_nr_ports); 1525 - return; 1526 - } 1527 - if (virtconconf.nr_ports < portdev->config.nr_ports) 1528 - return; 1108 + set_console_size(port, rows, cols); 1529 1109 1530 - /* Hot-add ports */ 1531 - while (virtconconf.nr_ports - portdev->config.nr_ports) { 1532 - err = add_port(portdev, portdev->config.nr_ports); 1533 - if (err) 1534 - break; 1535 - portdev->config.nr_ports++; 1110 + /* 1111 + * We'll use this way of resizing only for legacy 1112 + * support. For newer userspace 1113 + * (VIRTIO_CONSOLE_F_MULTPORT+), use control messages 1114 + * to indicate console size changes so that it can be 1115 + * done per-port. 1116 + */ 1117 + resize_console(port); 1536 1118 } 1537 1119 } 1538 1120 ··· 1482 1414 static int __devinit virtcons_probe(struct virtio_device *vdev) 1483 1415 { 1484 1416 struct ports_device *portdev; 1485 - u32 i; 1486 1417 int err; 1487 1418 bool multiport; 1488 1419 ··· 1510 1443 } 1511 1444 1512 1445 multiport = false; 1513 - portdev->config.nr_ports = 1; 1514 1446 portdev->config.max_nr_ports = 1; 1515 - #if 0 /* Multiport is not quite ready yet --RR */ 1516 1447 if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) { 1517 1448 multiport = true; 1518 1449 vdev->features[0] |= 1 << VIRTIO_CONSOLE_F_MULTIPORT; 1519 1450 1520 - vdev->config->get(vdev, 1521 - offsetof(struct virtio_console_multiport_conf, 1522 - nr_ports), 1523 - &portdev->config.nr_ports, 1524 - sizeof(portdev->config.nr_ports)); 1525 - vdev->config->get(vdev, 1526 - offsetof(struct virtio_console_multiport_conf, 1527 - max_nr_ports), 1451 + vdev->config->get(vdev, offsetof(struct virtio_console_config, 1452 + max_nr_ports), 1528 1453 &portdev->config.max_nr_ports, 1529 1454 sizeof(portdev->config.max_nr_ports)); 1530 - if (portdev->config.nr_ports > portdev->config.max_nr_ports) { 1531 - dev_warn(&vdev->dev, 1532 - "More ports (%u) specified than allowed (%u). Will init %u ports.", 1533 - portdev->config.nr_ports, 1534 - portdev->config.max_nr_ports, 1535 - portdev->config.max_nr_ports); 1536 - 1537 - portdev->config.nr_ports = portdev->config.max_nr_ports; 1538 - } 1539 1455 } 1540 1456 1541 1457 /* Let the Host know we support multiple ports.*/ 1542 1458 vdev->config->finalize_features(vdev); 1543 - #endif 1544 1459 1545 1460 err = init_vqs(portdev); 1546 1461 if (err < 0) { ··· 1538 1489 1539 1490 spin_lock_init(&portdev->cvq_lock); 1540 1491 INIT_WORK(&portdev->control_work, &control_work_handler); 1541 - INIT_WORK(&portdev->config_work, &config_work_handler); 1542 1492 1543 1493 nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock); 1544 1494 if (!nr_added_bufs) { ··· 1546 1498 err = -ENOMEM; 1547 1499 goto free_vqs; 1548 1500 } 1501 + } else { 1502 + /* 1503 + * For backward compatibility: Create a console port 1504 + * if we're running on older host. 1505 + */ 1506 + add_port(portdev, 0); 1549 1507 } 1550 1508 1551 - for (i = 0; i < portdev->config.nr_ports; i++) 1552 - add_port(portdev, i); 1553 - 1554 - /* Start using the new console output. */ 1555 - early_put_chars = NULL; 1509 + __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, 1510 + VIRTIO_CONSOLE_DEVICE_READY, 1); 1556 1511 return 0; 1557 1512 1558 1513 free_vqs: 1514 + /* The host might want to notify mgmt sw about device add failure */ 1515 + __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, 1516 + VIRTIO_CONSOLE_DEVICE_READY, 0); 1559 1517 vdev->config->del_vqs(vdev); 1560 1518 kfree(portdev->in_vqs); 1561 1519 kfree(portdev->out_vqs); ··· 1583 1529 portdev = vdev->priv; 1584 1530 1585 1531 cancel_work_sync(&portdev->control_work); 1586 - cancel_work_sync(&portdev->config_work); 1587 1532 1588 1533 list_for_each_entry_safe(port, port2, &portdev->ports, list) 1589 1534 remove_port(port); 1590 1535 1591 1536 unregister_chrdev(portdev->chr_major, "virtio-portsdev"); 1592 1537 1593 - while ((buf = portdev->c_ivq->vq_ops->get_buf(portdev->c_ivq, &len))) 1538 + while ((buf = virtqueue_get_buf(portdev->c_ivq, &len))) 1594 1539 free_buf(buf); 1595 1540 1596 - while ((buf = portdev->c_ivq->vq_ops->detach_unused_buf(portdev->c_ivq))) 1541 + while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq))) 1597 1542 free_buf(buf); 1598 1543 1599 1544 vdev->config->del_vqs(vdev); ··· 1609 1556 1610 1557 static unsigned int features[] = { 1611 1558 VIRTIO_CONSOLE_F_SIZE, 1559 + VIRTIO_CONSOLE_F_MULTIPORT, 1612 1560 }; 1613 1561 1614 1562 static struct virtio_driver virtio_console = {
+23 -23
drivers/net/virtio_net.c
··· 122 122 struct virtnet_info *vi = svq->vdev->priv; 123 123 124 124 /* Suppress further interrupts. */ 125 - svq->vq_ops->disable_cb(svq); 125 + virtqueue_disable_cb(svq); 126 126 127 127 /* We were probably waiting for more output buffers. */ 128 128 netif_wake_queue(vi->dev); ··· 210 210 return -EINVAL; 211 211 } 212 212 213 - page = vi->rvq->vq_ops->get_buf(vi->rvq, &len); 213 + page = virtqueue_get_buf(vi->rvq, &len); 214 214 if (!page) { 215 215 pr_debug("%s: rx error: %d buffers missing\n", 216 216 skb->dev->name, hdr->mhdr.num_buffers); ··· 340 340 341 341 skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len); 342 342 343 - err = vi->rvq->vq_ops->add_buf(vi->rvq, vi->rx_sg, 0, 2, skb); 343 + err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 2, skb); 344 344 if (err < 0) 345 345 dev_kfree_skb(skb); 346 346 ··· 385 385 386 386 /* chain first in list head */ 387 387 first->private = (unsigned long)list; 388 - err = vi->rvq->vq_ops->add_buf(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2, 388 + err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2, 389 389 first); 390 390 if (err < 0) 391 391 give_pages(vi, first); ··· 404 404 405 405 sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE); 406 406 407 - err = vi->rvq->vq_ops->add_buf(vi->rvq, vi->rx_sg, 0, 1, page); 407 + err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 1, page); 408 408 if (err < 0) 409 409 give_pages(vi, page); 410 410 ··· 433 433 } while (err > 0); 434 434 if (unlikely(vi->num > vi->max)) 435 435 vi->max = vi->num; 436 - vi->rvq->vq_ops->kick(vi->rvq); 436 + virtqueue_kick(vi->rvq); 437 437 return !oom; 438 438 } 439 439 ··· 442 442 struct virtnet_info *vi = rvq->vdev->priv; 443 443 /* Schedule NAPI, Suppress further interrupts if successful. */ 444 444 if (napi_schedule_prep(&vi->napi)) { 445 - rvq->vq_ops->disable_cb(rvq); 445 + virtqueue_disable_cb(rvq); 446 446 __napi_schedule(&vi->napi); 447 447 } 448 448 } ··· 471 471 472 472 again: 473 473 while (received < budget && 474 - (buf = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) { 474 + (buf = virtqueue_get_buf(vi->rvq, &len)) != NULL) { 475 475 receive_buf(vi->dev, buf, len); 476 476 --vi->num; 477 477 received++; ··· 485 485 /* Out of packets? */ 486 486 if (received < budget) { 487 487 napi_complete(napi); 488 - if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq)) && 488 + if (unlikely(!virtqueue_enable_cb(vi->rvq)) && 489 489 napi_schedule_prep(napi)) { 490 - vi->rvq->vq_ops->disable_cb(vi->rvq); 490 + virtqueue_disable_cb(vi->rvq); 491 491 __napi_schedule(napi); 492 492 goto again; 493 493 } ··· 501 501 struct sk_buff *skb; 502 502 unsigned int len, tot_sgs = 0; 503 503 504 - while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) { 504 + while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) { 505 505 pr_debug("Sent skb %p\n", skb); 506 506 vi->dev->stats.tx_bytes += skb->len; 507 507 vi->dev->stats.tx_packets++; ··· 554 554 sg_set_buf(vi->tx_sg, &hdr->hdr, sizeof hdr->hdr); 555 555 556 556 hdr->num_sg = skb_to_sgvec(skb, vi->tx_sg + 1, 0, skb->len) + 1; 557 - return vi->svq->vq_ops->add_buf(vi->svq, vi->tx_sg, hdr->num_sg, 557 + return virtqueue_add_buf(vi->svq, vi->tx_sg, hdr->num_sg, 558 558 0, skb); 559 559 } 560 560 ··· 574 574 if (unlikely(capacity < 0)) { 575 575 netif_stop_queue(dev); 576 576 dev_warn(&dev->dev, "Unexpected full queue\n"); 577 - if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) { 578 - vi->svq->vq_ops->disable_cb(vi->svq); 577 + if (unlikely(!virtqueue_enable_cb(vi->svq))) { 578 + virtqueue_disable_cb(vi->svq); 579 579 netif_start_queue(dev); 580 580 goto again; 581 581 } 582 582 return NETDEV_TX_BUSY; 583 583 } 584 - vi->svq->vq_ops->kick(vi->svq); 584 + virtqueue_kick(vi->svq); 585 585 586 586 /* Don't wait up for transmitted skbs to be freed. */ 587 587 skb_orphan(skb); ··· 591 591 * before it gets out of hand. Naturally, this wastes entries. */ 592 592 if (capacity < 2+MAX_SKB_FRAGS) { 593 593 netif_stop_queue(dev); 594 - if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) { 594 + if (unlikely(!virtqueue_enable_cb(vi->svq))) { 595 595 /* More just got used, free them then recheck. */ 596 596 capacity += free_old_xmit_skbs(vi); 597 597 if (capacity >= 2+MAX_SKB_FRAGS) { 598 598 netif_start_queue(dev); 599 - vi->svq->vq_ops->disable_cb(vi->svq); 599 + virtqueue_disable_cb(vi->svq); 600 600 } 601 601 } 602 602 } ··· 641 641 * now. virtnet_poll wants re-enable the queue, so we disable here. 642 642 * We synchronize against interrupts via NAPI_STATE_SCHED */ 643 643 if (napi_schedule_prep(&vi->napi)) { 644 - vi->rvq->vq_ops->disable_cb(vi->rvq); 644 + virtqueue_disable_cb(vi->rvq); 645 645 __napi_schedule(&vi->napi); 646 646 } 647 647 return 0; ··· 678 678 sg_set_buf(&sg[i + 1], sg_virt(s), s->length); 679 679 sg_set_buf(&sg[out + in - 1], &status, sizeof(status)); 680 680 681 - BUG_ON(vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) < 0); 681 + BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi) < 0); 682 682 683 - vi->cvq->vq_ops->kick(vi->cvq); 683 + virtqueue_kick(vi->cvq); 684 684 685 685 /* 686 686 * Spin for a response, the kick causes an ioport write, trapping 687 687 * into the hypervisor, so the request should be handled immediately. 688 688 */ 689 - while (!vi->cvq->vq_ops->get_buf(vi->cvq, &tmp)) 689 + while (!virtqueue_get_buf(vi->cvq, &tmp)) 690 690 cpu_relax(); 691 691 692 692 return status == VIRTIO_NET_OK; ··· 1003 1003 { 1004 1004 void *buf; 1005 1005 while (1) { 1006 - buf = vi->svq->vq_ops->detach_unused_buf(vi->svq); 1006 + buf = virtqueue_detach_unused_buf(vi->svq); 1007 1007 if (!buf) 1008 1008 break; 1009 1009 dev_kfree_skb(buf); 1010 1010 } 1011 1011 while (1) { 1012 - buf = vi->rvq->vq_ops->detach_unused_buf(vi->rvq); 1012 + buf = virtqueue_detach_unused_buf(vi->rvq); 1013 1013 if (!buf) 1014 1014 break; 1015 1015 if (vi->mergeable_rx_bufs || vi->big_packets)
+8 -9
drivers/virtio/virtio_balloon.c
··· 75 75 struct virtio_balloon *vb; 76 76 unsigned int len; 77 77 78 - vb = vq->vq_ops->get_buf(vq, &len); 78 + vb = virtqueue_get_buf(vq, &len); 79 79 if (vb) 80 80 complete(&vb->acked); 81 81 } ··· 89 89 init_completion(&vb->acked); 90 90 91 91 /* We should always be able to add one buffer to an empty queue. */ 92 - if (vq->vq_ops->add_buf(vq, &sg, 1, 0, vb) < 0) 92 + if (virtqueue_add_buf(vq, &sg, 1, 0, vb) < 0) 93 93 BUG(); 94 - vq->vq_ops->kick(vq); 94 + virtqueue_kick(vq); 95 95 96 96 /* When host has read buffer, this completes via balloon_ack */ 97 97 wait_for_completion(&vb->acked); ··· 204 204 struct virtio_balloon *vb; 205 205 unsigned int len; 206 206 207 - vb = vq->vq_ops->get_buf(vq, &len); 207 + vb = virtqueue_get_buf(vq, &len); 208 208 if (!vb) 209 209 return; 210 210 vb->need_stats_update = 1; ··· 221 221 222 222 vq = vb->stats_vq; 223 223 sg_init_one(&sg, vb->stats, sizeof(vb->stats)); 224 - if (vq->vq_ops->add_buf(vq, &sg, 1, 0, vb) < 0) 224 + if (virtqueue_add_buf(vq, &sg, 1, 0, vb) < 0) 225 225 BUG(); 226 - vq->vq_ops->kick(vq); 226 + virtqueue_kick(vq); 227 227 } 228 228 229 229 static void virtballoon_changed(struct virtio_device *vdev) ··· 314 314 * use it to signal us later. 315 315 */ 316 316 sg_init_one(&sg, vb->stats, sizeof vb->stats); 317 - if (vb->stats_vq->vq_ops->add_buf(vb->stats_vq, 318 - &sg, 1, 0, vb) < 0) 317 + if (virtqueue_add_buf(vb->stats_vq, &sg, 1, 0, vb) < 0) 319 318 BUG(); 320 - vb->stats_vq->vq_ops->kick(vb->stats_vq); 319 + virtqueue_kick(vb->stats_vq); 321 320 } 322 321 323 322 vb->thread = kthread_run(balloon, vb, "vballoon");
+21 -23
drivers/virtio/virtio_ring.c
··· 110 110 static int vring_add_indirect(struct vring_virtqueue *vq, 111 111 struct scatterlist sg[], 112 112 unsigned int out, 113 - unsigned int in) 113 + unsigned int in, 114 + gfp_t gfp) 114 115 { 115 116 struct vring_desc *desc; 116 117 unsigned head; 117 118 int i; 118 119 119 - desc = kmalloc((out + in) * sizeof(struct vring_desc), GFP_ATOMIC); 120 + desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp); 120 121 if (!desc) 121 122 return vq->vring.num; 122 123 ··· 156 155 return head; 157 156 } 158 157 159 - static int vring_add_buf(struct virtqueue *_vq, 160 - struct scatterlist sg[], 161 - unsigned int out, 162 - unsigned int in, 163 - void *data) 158 + int virtqueue_add_buf_gfp(struct virtqueue *_vq, 159 + struct scatterlist sg[], 160 + unsigned int out, 161 + unsigned int in, 162 + void *data, 163 + gfp_t gfp) 164 164 { 165 165 struct vring_virtqueue *vq = to_vvq(_vq); 166 166 unsigned int i, avail, head, uninitialized_var(prev); ··· 173 171 /* If the host supports indirect descriptor tables, and we have multiple 174 172 * buffers, then go indirect. FIXME: tune this threshold */ 175 173 if (vq->indirect && (out + in) > 1 && vq->num_free) { 176 - head = vring_add_indirect(vq, sg, out, in); 174 + head = vring_add_indirect(vq, sg, out, in, gfp); 177 175 if (head != vq->vring.num) 178 176 goto add_head; 179 177 } ··· 234 232 return vq->num_free ? vq->vring.num : 0; 235 233 return vq->num_free; 236 234 } 235 + EXPORT_SYMBOL_GPL(virtqueue_add_buf_gfp); 237 236 238 - static void vring_kick(struct virtqueue *_vq) 237 + void virtqueue_kick(struct virtqueue *_vq) 239 238 { 240 239 struct vring_virtqueue *vq = to_vvq(_vq); 241 240 START_USE(vq); ··· 256 253 257 254 END_USE(vq); 258 255 } 256 + EXPORT_SYMBOL_GPL(virtqueue_kick); 259 257 260 258 static void detach_buf(struct vring_virtqueue *vq, unsigned int head) 261 259 { ··· 288 284 return vq->last_used_idx != vq->vring.used->idx; 289 285 } 290 286 291 - static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len) 287 + void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) 292 288 { 293 289 struct vring_virtqueue *vq = to_vvq(_vq); 294 290 void *ret; ··· 329 325 END_USE(vq); 330 326 return ret; 331 327 } 328 + EXPORT_SYMBOL_GPL(virtqueue_get_buf); 332 329 333 - static void vring_disable_cb(struct virtqueue *_vq) 330 + void virtqueue_disable_cb(struct virtqueue *_vq) 334 331 { 335 332 struct vring_virtqueue *vq = to_vvq(_vq); 336 333 337 334 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; 338 335 } 336 + EXPORT_SYMBOL_GPL(virtqueue_disable_cb); 339 337 340 - static bool vring_enable_cb(struct virtqueue *_vq) 338 + bool virtqueue_enable_cb(struct virtqueue *_vq) 341 339 { 342 340 struct vring_virtqueue *vq = to_vvq(_vq); 343 341 ··· 357 351 END_USE(vq); 358 352 return true; 359 353 } 354 + EXPORT_SYMBOL_GPL(virtqueue_enable_cb); 360 355 361 - static void *vring_detach_unused_buf(struct virtqueue *_vq) 356 + void *virtqueue_detach_unused_buf(struct virtqueue *_vq) 362 357 { 363 358 struct vring_virtqueue *vq = to_vvq(_vq); 364 359 unsigned int i; ··· 382 375 END_USE(vq); 383 376 return NULL; 384 377 } 378 + EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf); 385 379 386 380 irqreturn_t vring_interrupt(int irq, void *_vq) 387 381 { ··· 403 395 return IRQ_HANDLED; 404 396 } 405 397 EXPORT_SYMBOL_GPL(vring_interrupt); 406 - 407 - static struct virtqueue_ops vring_vq_ops = { 408 - .add_buf = vring_add_buf, 409 - .get_buf = vring_get_buf, 410 - .kick = vring_kick, 411 - .disable_cb = vring_disable_cb, 412 - .enable_cb = vring_enable_cb, 413 - .detach_unused_buf = vring_detach_unused_buf, 414 - }; 415 398 416 399 struct virtqueue *vring_new_virtqueue(unsigned int num, 417 400 unsigned int vring_align, ··· 428 429 vring_init(&vq->vring, num, pages, vring_align); 429 430 vq->vq.callback = callback; 430 431 vq->vq.vdev = vdev; 431 - vq->vq.vq_ops = &vring_vq_ops; 432 432 vq->vq.name = name; 433 433 vq->notify = notify; 434 434 vq->broken = false;
+33 -22
include/linux/virtio.h
··· 7 7 #include <linux/spinlock.h> 8 8 #include <linux/device.h> 9 9 #include <linux/mod_devicetable.h> 10 + #include <linux/gfp.h> 10 11 11 12 /** 12 13 * virtqueue - a queue to register buffers for sending or receiving. ··· 15 14 * @callback: the function to call when buffers are consumed (can be NULL). 16 15 * @name: the name of this virtqueue (mainly for debugging) 17 16 * @vdev: the virtio device this queue was created for. 18 - * @vq_ops: the operations for this virtqueue (see below). 19 17 * @priv: a pointer for the virtqueue implementation to use. 20 18 */ 21 19 struct virtqueue { ··· 22 22 void (*callback)(struct virtqueue *vq); 23 23 const char *name; 24 24 struct virtio_device *vdev; 25 - struct virtqueue_ops *vq_ops; 26 25 void *priv; 27 26 }; 28 27 29 28 /** 30 - * virtqueue_ops - operations for virtqueue abstraction layer 31 - * @add_buf: expose buffer to other end 29 + * operations for virtqueue 30 + * virtqueue_add_buf: expose buffer to other end 32 31 * vq: the struct virtqueue we're talking about. 33 32 * sg: the description of the buffer(s). 34 33 * out_num: the number of sg readable by other side 35 34 * in_num: the number of sg which are writable (after readable ones) 36 35 * data: the token identifying the buffer. 36 + * gfp: how to do memory allocations (if necessary). 37 37 * Returns remaining capacity of queue (sg segments) or a negative error. 38 - * @kick: update after add_buf 38 + * virtqueue_kick: update after add_buf 39 39 * vq: the struct virtqueue 40 40 * After one or more add_buf calls, invoke this to kick the other side. 41 - * @get_buf: get the next used buffer 41 + * virtqueue_get_buf: get the next used buffer 42 42 * vq: the struct virtqueue we're talking about. 43 43 * len: the length written into the buffer 44 44 * Returns NULL or the "data" token handed to add_buf. 45 - * @disable_cb: disable callbacks 45 + * virtqueue_disable_cb: disable callbacks 46 46 * vq: the struct virtqueue we're talking about. 47 47 * Note that this is not necessarily synchronous, hence unreliable and only 48 48 * useful as an optimization. 49 - * @enable_cb: restart callbacks after disable_cb. 49 + * virtqueue_enable_cb: restart callbacks after disable_cb. 50 50 * vq: the struct virtqueue we're talking about. 51 51 * This re-enables callbacks; it returns "false" if there are pending 52 52 * buffers in the queue, to detect a possible race between the driver 53 53 * checking for more work, and enabling callbacks. 54 - * @detach_unused_buf: detach first unused buffer 54 + * virtqueue_detach_unused_buf: detach first unused buffer 55 55 * vq: the struct virtqueue we're talking about. 56 56 * Returns NULL or the "data" token handed to add_buf 57 57 * 58 58 * Locking rules are straightforward: the driver is responsible for 59 59 * locking. No two operations may be invoked simultaneously, with the exception 60 - * of @disable_cb. 60 + * of virtqueue_disable_cb. 61 61 * 62 62 * All operations can be called in any context. 63 63 */ 64 - struct virtqueue_ops { 65 - int (*add_buf)(struct virtqueue *vq, 66 - struct scatterlist sg[], 67 - unsigned int out_num, 68 - unsigned int in_num, 69 - void *data); 70 64 71 - void (*kick)(struct virtqueue *vq); 65 + int virtqueue_add_buf_gfp(struct virtqueue *vq, 66 + struct scatterlist sg[], 67 + unsigned int out_num, 68 + unsigned int in_num, 69 + void *data, 70 + gfp_t gfp); 72 71 73 - void *(*get_buf)(struct virtqueue *vq, unsigned int *len); 72 + static inline int virtqueue_add_buf(struct virtqueue *vq, 73 + struct scatterlist sg[], 74 + unsigned int out_num, 75 + unsigned int in_num, 76 + void *data) 77 + { 78 + return virtqueue_add_buf_gfp(vq, sg, out_num, in_num, data, GFP_ATOMIC); 79 + } 74 80 75 - void (*disable_cb)(struct virtqueue *vq); 76 - bool (*enable_cb)(struct virtqueue *vq); 77 - void *(*detach_unused_buf)(struct virtqueue *vq); 78 - }; 81 + void virtqueue_kick(struct virtqueue *vq); 82 + 83 + void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len); 84 + 85 + void virtqueue_disable_cb(struct virtqueue *vq); 86 + 87 + bool virtqueue_enable_cb(struct virtqueue *vq); 88 + 89 + void *virtqueue_detach_unused_buf(struct virtqueue *vq); 79 90 80 91 /** 81 92 * virtio_device - representation of a device using virtio
+5
include/linux/virtio_blk.h
··· 17 17 #define VIRTIO_BLK_F_FLUSH 9 /* Cache flush command support */ 18 18 #define VIRTIO_BLK_F_TOPOLOGY 10 /* Topology information is available */ 19 19 20 + #define VIRTIO_BLK_ID_BYTES 20 /* ID string length */ 21 + 20 22 struct virtio_blk_config { 21 23 /* The capacity (in 512-byte sectors). */ 22 24 __u64 capacity; ··· 68 66 69 67 /* Cache flush command */ 70 68 #define VIRTIO_BLK_T_FLUSH 4 69 + 70 + /* Get device ID command */ 71 + #define VIRTIO_BLK_T_GET_ID 8 71 72 72 73 /* Barrier before this op. */ 73 74 #define VIRTIO_BLK_T_BARRIER 0x80000000
+25
include/linux/virtio_console.h
··· 12 12 13 13 /* Feature bits */ 14 14 #define VIRTIO_CONSOLE_F_SIZE 0 /* Does host provide console size? */ 15 + #define VIRTIO_CONSOLE_F_MULTIPORT 1 /* Does host provide multiple ports? */ 16 + 17 + #define VIRTIO_CONSOLE_BAD_ID (~(u32)0) 15 18 16 19 struct virtio_console_config { 17 20 /* colums of the screens */ 18 21 __u16 cols; 19 22 /* rows of the screens */ 20 23 __u16 rows; 24 + /* max. number of ports this device can hold */ 25 + __u32 max_nr_ports; 21 26 } __attribute__((packed)); 27 + 28 + /* 29 + * A message that's passed between the Host and the Guest for a 30 + * particular port. 31 + */ 32 + struct virtio_console_control { 33 + __u32 id; /* Port number */ 34 + __u16 event; /* The kind of control event (see below) */ 35 + __u16 value; /* Extra information for the key */ 36 + }; 37 + 38 + /* Some events for control messages */ 39 + #define VIRTIO_CONSOLE_DEVICE_READY 0 40 + #define VIRTIO_CONSOLE_PORT_ADD 1 41 + #define VIRTIO_CONSOLE_PORT_REMOVE 2 42 + #define VIRTIO_CONSOLE_PORT_READY 3 43 + #define VIRTIO_CONSOLE_CONSOLE_PORT 4 44 + #define VIRTIO_CONSOLE_RESIZE 5 45 + #define VIRTIO_CONSOLE_PORT_OPEN 6 46 + #define VIRTIO_CONSOLE_PORT_NAME 7 22 47 23 48 #ifdef __KERNEL__ 24 49 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int));
+3 -3
net/9p/trans_virtio.c
··· 137 137 138 138 P9_DPRINTK(P9_DEBUG_TRANS, ": request done\n"); 139 139 140 - while ((rc = chan->vq->vq_ops->get_buf(chan->vq, &len)) != NULL) { 140 + while ((rc = virtqueue_get_buf(chan->vq, &len)) != NULL) { 141 141 P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc); 142 142 P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag); 143 143 req = p9_tag_lookup(chan->client, rc->tag); ··· 209 209 210 210 req->status = REQ_STATUS_SENT; 211 211 212 - if (chan->vq->vq_ops->add_buf(chan->vq, chan->sg, out, in, req->tc) < 0) { 212 + if (virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc) < 0) { 213 213 P9_DPRINTK(P9_DEBUG_TRANS, 214 214 "9p debug: virtio rpc add_buf returned failure"); 215 215 return -EIO; 216 216 } 217 217 218 - chan->vq->vq_ops->kick(chan->vq); 218 + virtqueue_kick(chan->vq); 219 219 220 220 P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request kicked\n"); 221 221 return 0;