at v3.10-rc3 334 lines 7.6 kB view raw
1/* Copyright (C) 2009 Red Hat, Inc. 2 * Author: Michael S. Tsirkin <mst@redhat.com> 3 * 4 * This work is licensed under the terms of the GNU GPL, version 2. 5 * 6 * test virtio server in host kernel. 7 */ 8 9#include <linux/compat.h> 10#include <linux/eventfd.h> 11#include <linux/vhost.h> 12#include <linux/miscdevice.h> 13#include <linux/module.h> 14#include <linux/mutex.h> 15#include <linux/workqueue.h> 16#include <linux/rcupdate.h> 17#include <linux/file.h> 18#include <linux/slab.h> 19 20#include "test.h" 21#include "vhost.c" 22 23/* Max number of bytes transferred before requeueing the job. 24 * Using this limit prevents one virtqueue from starving others. */ 25#define VHOST_TEST_WEIGHT 0x80000 26 27enum { 28 VHOST_TEST_VQ = 0, 29 VHOST_TEST_VQ_MAX = 1, 30}; 31 32struct vhost_test { 33 struct vhost_dev dev; 34 struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX]; 35}; 36 37/* Expects to be always run from workqueue - which acts as 38 * read-size critical section for our kind of RCU. */ 39static void handle_vq(struct vhost_test *n) 40{ 41 struct vhost_virtqueue *vq = &n->dev.vqs[VHOST_TEST_VQ]; 42 unsigned out, in; 43 int head; 44 size_t len, total_len = 0; 45 void *private; 46 47 private = rcu_dereference_check(vq->private_data, 1); 48 if (!private) 49 return; 50 51 mutex_lock(&vq->mutex); 52 vhost_disable_notify(&n->dev, vq); 53 54 for (;;) { 55 head = vhost_get_vq_desc(&n->dev, vq, vq->iov, 56 ARRAY_SIZE(vq->iov), 57 &out, &in, 58 NULL, NULL); 59 /* On error, stop handling until the next kick. */ 60 if (unlikely(head < 0)) 61 break; 62 /* Nothing new? Wait for eventfd to tell us they refilled. */ 63 if (head == vq->num) { 64 if (unlikely(vhost_enable_notify(&n->dev, vq))) { 65 vhost_disable_notify(&n->dev, vq); 66 continue; 67 } 68 break; 69 } 70 if (in) { 71 vq_err(vq, "Unexpected descriptor format for TX: " 72 "out %d, int %d\n", out, in); 73 break; 74 } 75 len = iov_length(vq->iov, out); 76 /* Sanity check */ 77 if (!len) { 78 vq_err(vq, "Unexpected 0 len for TX\n"); 79 break; 80 } 81 vhost_add_used_and_signal(&n->dev, vq, head, 0); 82 total_len += len; 83 if (unlikely(total_len >= VHOST_TEST_WEIGHT)) { 84 vhost_poll_queue(&vq->poll); 85 break; 86 } 87 } 88 89 mutex_unlock(&vq->mutex); 90} 91 92static void handle_vq_kick(struct vhost_work *work) 93{ 94 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, 95 poll.work); 96 struct vhost_test *n = container_of(vq->dev, struct vhost_test, dev); 97 98 handle_vq(n); 99} 100 101static int vhost_test_open(struct inode *inode, struct file *f) 102{ 103 struct vhost_test *n = kmalloc(sizeof *n, GFP_KERNEL); 104 struct vhost_dev *dev; 105 int r; 106 107 if (!n) 108 return -ENOMEM; 109 110 dev = &n->dev; 111 n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick; 112 r = vhost_dev_init(dev, n->vqs, VHOST_TEST_VQ_MAX); 113 if (r < 0) { 114 kfree(n); 115 return r; 116 } 117 118 f->private_data = n; 119 120 return 0; 121} 122 123static void *vhost_test_stop_vq(struct vhost_test *n, 124 struct vhost_virtqueue *vq) 125{ 126 void *private; 127 128 mutex_lock(&vq->mutex); 129 private = rcu_dereference_protected(vq->private_data, 130 lockdep_is_held(&vq->mutex)); 131 rcu_assign_pointer(vq->private_data, NULL); 132 mutex_unlock(&vq->mutex); 133 return private; 134} 135 136static void vhost_test_stop(struct vhost_test *n, void **privatep) 137{ 138 *privatep = vhost_test_stop_vq(n, n->vqs + VHOST_TEST_VQ); 139} 140 141static void vhost_test_flush_vq(struct vhost_test *n, int index) 142{ 143 vhost_poll_flush(&n->dev.vqs[index].poll); 144} 145 146static void vhost_test_flush(struct vhost_test *n) 147{ 148 vhost_test_flush_vq(n, VHOST_TEST_VQ); 149} 150 151static int vhost_test_release(struct inode *inode, struct file *f) 152{ 153 struct vhost_test *n = f->private_data; 154 void *private; 155 156 vhost_test_stop(n, &private); 157 vhost_test_flush(n); 158 vhost_dev_cleanup(&n->dev, false); 159 /* We do an extra flush before freeing memory, 160 * since jobs can re-queue themselves. */ 161 vhost_test_flush(n); 162 kfree(n); 163 return 0; 164} 165 166static long vhost_test_run(struct vhost_test *n, int test) 167{ 168 void *priv, *oldpriv; 169 struct vhost_virtqueue *vq; 170 int r, index; 171 172 if (test < 0 || test > 1) 173 return -EINVAL; 174 175 mutex_lock(&n->dev.mutex); 176 r = vhost_dev_check_owner(&n->dev); 177 if (r) 178 goto err; 179 180 for (index = 0; index < n->dev.nvqs; ++index) { 181 /* Verify that ring has been setup correctly. */ 182 if (!vhost_vq_access_ok(&n->vqs[index])) { 183 r = -EFAULT; 184 goto err; 185 } 186 } 187 188 for (index = 0; index < n->dev.nvqs; ++index) { 189 vq = n->vqs + index; 190 mutex_lock(&vq->mutex); 191 priv = test ? n : NULL; 192 193 /* start polling new socket */ 194 oldpriv = rcu_dereference_protected(vq->private_data, 195 lockdep_is_held(&vq->mutex)); 196 rcu_assign_pointer(vq->private_data, priv); 197 198 r = vhost_init_used(&n->vqs[index]); 199 200 mutex_unlock(&vq->mutex); 201 202 if (r) 203 goto err; 204 205 if (oldpriv) { 206 vhost_test_flush_vq(n, index); 207 } 208 } 209 210 mutex_unlock(&n->dev.mutex); 211 return 0; 212 213err: 214 mutex_unlock(&n->dev.mutex); 215 return r; 216} 217 218static long vhost_test_reset_owner(struct vhost_test *n) 219{ 220 void *priv = NULL; 221 long err; 222 struct vhost_memory *memory; 223 224 mutex_lock(&n->dev.mutex); 225 err = vhost_dev_check_owner(&n->dev); 226 if (err) 227 goto done; 228 memory = vhost_dev_reset_owner_prepare(); 229 if (!memory) { 230 err = -ENOMEM; 231 goto done; 232 } 233 vhost_test_stop(n, &priv); 234 vhost_test_flush(n); 235 vhost_dev_reset_owner(&n->dev, memory); 236done: 237 mutex_unlock(&n->dev.mutex); 238 return err; 239} 240 241static int vhost_test_set_features(struct vhost_test *n, u64 features) 242{ 243 mutex_lock(&n->dev.mutex); 244 if ((features & (1 << VHOST_F_LOG_ALL)) && 245 !vhost_log_access_ok(&n->dev)) { 246 mutex_unlock(&n->dev.mutex); 247 return -EFAULT; 248 } 249 n->dev.acked_features = features; 250 smp_wmb(); 251 vhost_test_flush(n); 252 mutex_unlock(&n->dev.mutex); 253 return 0; 254} 255 256static long vhost_test_ioctl(struct file *f, unsigned int ioctl, 257 unsigned long arg) 258{ 259 struct vhost_test *n = f->private_data; 260 void __user *argp = (void __user *)arg; 261 u64 __user *featurep = argp; 262 int test; 263 u64 features; 264 int r; 265 switch (ioctl) { 266 case VHOST_TEST_RUN: 267 if (copy_from_user(&test, argp, sizeof test)) 268 return -EFAULT; 269 return vhost_test_run(n, test); 270 case VHOST_GET_FEATURES: 271 features = VHOST_NET_FEATURES; 272 if (copy_to_user(featurep, &features, sizeof features)) 273 return -EFAULT; 274 return 0; 275 case VHOST_SET_FEATURES: 276 if (copy_from_user(&features, featurep, sizeof features)) 277 return -EFAULT; 278 if (features & ~VHOST_NET_FEATURES) 279 return -EOPNOTSUPP; 280 return vhost_test_set_features(n, features); 281 case VHOST_RESET_OWNER: 282 return vhost_test_reset_owner(n); 283 default: 284 mutex_lock(&n->dev.mutex); 285 r = vhost_dev_ioctl(&n->dev, ioctl, argp); 286 if (r == -ENOIOCTLCMD) 287 r = vhost_vring_ioctl(&n->dev, ioctl, argp); 288 vhost_test_flush(n); 289 mutex_unlock(&n->dev.mutex); 290 return r; 291 } 292} 293 294#ifdef CONFIG_COMPAT 295static long vhost_test_compat_ioctl(struct file *f, unsigned int ioctl, 296 unsigned long arg) 297{ 298 return vhost_test_ioctl(f, ioctl, (unsigned long)compat_ptr(arg)); 299} 300#endif 301 302static const struct file_operations vhost_test_fops = { 303 .owner = THIS_MODULE, 304 .release = vhost_test_release, 305 .unlocked_ioctl = vhost_test_ioctl, 306#ifdef CONFIG_COMPAT 307 .compat_ioctl = vhost_test_compat_ioctl, 308#endif 309 .open = vhost_test_open, 310 .llseek = noop_llseek, 311}; 312 313static struct miscdevice vhost_test_misc = { 314 MISC_DYNAMIC_MINOR, 315 "vhost-test", 316 &vhost_test_fops, 317}; 318 319static int vhost_test_init(void) 320{ 321 return misc_register(&vhost_test_misc); 322} 323module_init(vhost_test_init); 324 325static void vhost_test_exit(void) 326{ 327 misc_deregister(&vhost_test_misc); 328} 329module_exit(vhost_test_exit); 330 331MODULE_VERSION("0.0.1"); 332MODULE_LICENSE("GPL v2"); 333MODULE_AUTHOR("Michael S. Tsirkin"); 334MODULE_DESCRIPTION("Host kernel side for virtio simulator");