at master 8.5 kB view raw
1// SPDX-License-Identifier: GPL-2.0-only 2/* Copyright (C) 2009 Red Hat, Inc. 3 * Author: Michael S. Tsirkin <mst@redhat.com> 4 * 5 * test virtio server in host kernel. 6 */ 7 8#include <linux/compat.h> 9#include <linux/eventfd.h> 10#include <linux/vhost.h> 11#include <linux/miscdevice.h> 12#include <linux/module.h> 13#include <linux/mutex.h> 14#include <linux/workqueue.h> 15#include <linux/file.h> 16#include <linux/slab.h> 17 18#include "test.h" 19#include "vhost.h" 20 21/* Max number of bytes transferred before requeueing the job. 22 * Using this limit prevents one virtqueue from starving others. */ 23#define VHOST_TEST_WEIGHT 0x80000 24 25/* Max number of packets transferred before requeueing the job. 26 * Using this limit prevents one virtqueue from starving others with 27 * pkts. 28 */ 29#define VHOST_TEST_PKT_WEIGHT 256 30 31static const int vhost_test_bits[] = { 32 VHOST_FEATURES 33}; 34 35#define VHOST_TEST_FEATURES VHOST_FEATURES_U64(vhost_test_bits, 0) 36 37enum { 38 VHOST_TEST_VQ = 0, 39 VHOST_TEST_VQ_MAX = 1, 40}; 41 42struct vhost_test { 43 struct vhost_dev dev; 44 struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX]; 45}; 46 47/* Expects to be always run from workqueue - which acts as 48 * read-size critical section for our kind of RCU. */ 49static void handle_vq(struct vhost_test *n) 50{ 51 struct vhost_virtqueue *vq = &n->vqs[VHOST_TEST_VQ]; 52 unsigned out, in; 53 int head; 54 size_t len, total_len = 0; 55 void *private; 56 57 mutex_lock(&vq->mutex); 58 private = vhost_vq_get_backend(vq); 59 if (!private) { 60 mutex_unlock(&vq->mutex); 61 return; 62 } 63 64 vhost_disable_notify(&n->dev, vq); 65 66 for (;;) { 67 head = vhost_get_vq_desc(vq, vq->iov, 68 ARRAY_SIZE(vq->iov), 69 &out, &in, 70 NULL, NULL); 71 /* On error, stop handling until the next kick. */ 72 if (unlikely(head < 0)) 73 break; 74 /* Nothing new? Wait for eventfd to tell us they refilled. */ 75 if (head == vq->num) { 76 if (unlikely(vhost_enable_notify(&n->dev, vq))) { 77 vhost_disable_notify(&n->dev, vq); 78 continue; 79 } 80 break; 81 } 82 if (in) { 83 vq_err(vq, "Unexpected descriptor format for TX: " 84 "out %d, int %d\n", out, in); 85 break; 86 } 87 len = iov_length(vq->iov, out); 88 /* Sanity check */ 89 if (!len) { 90 vq_err(vq, "Unexpected 0 len for TX\n"); 91 break; 92 } 93 vhost_add_used_and_signal(&n->dev, vq, head, 0); 94 total_len += len; 95 if (unlikely(vhost_exceeds_weight(vq, 0, total_len))) 96 break; 97 } 98 99 mutex_unlock(&vq->mutex); 100} 101 102static void handle_vq_kick(struct vhost_work *work) 103{ 104 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, 105 poll.work); 106 struct vhost_test *n = container_of(vq->dev, struct vhost_test, dev); 107 108 handle_vq(n); 109} 110 111static int vhost_test_open(struct inode *inode, struct file *f) 112{ 113 struct vhost_test *n = kmalloc(sizeof *n, GFP_KERNEL); 114 struct vhost_dev *dev; 115 struct vhost_virtqueue **vqs; 116 117 if (!n) 118 return -ENOMEM; 119 vqs = kmalloc_array(VHOST_TEST_VQ_MAX, sizeof(*vqs), GFP_KERNEL); 120 if (!vqs) { 121 kfree(n); 122 return -ENOMEM; 123 } 124 125 dev = &n->dev; 126 vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ]; 127 n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick; 128 vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV, 129 VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT, true, NULL); 130 131 f->private_data = n; 132 133 return 0; 134} 135 136static void *vhost_test_stop_vq(struct vhost_test *n, 137 struct vhost_virtqueue *vq) 138{ 139 void *private; 140 141 mutex_lock(&vq->mutex); 142 private = vhost_vq_get_backend(vq); 143 vhost_vq_set_backend(vq, NULL); 144 mutex_unlock(&vq->mutex); 145 return private; 146} 147 148static void vhost_test_stop(struct vhost_test *n, void **privatep) 149{ 150 *privatep = vhost_test_stop_vq(n, n->vqs + VHOST_TEST_VQ); 151} 152 153static void vhost_test_flush(struct vhost_test *n) 154{ 155 vhost_dev_flush(&n->dev); 156} 157 158static int vhost_test_release(struct inode *inode, struct file *f) 159{ 160 struct vhost_test *n = f->private_data; 161 void *private; 162 163 vhost_test_stop(n, &private); 164 vhost_test_flush(n); 165 vhost_dev_stop(&n->dev); 166 vhost_dev_cleanup(&n->dev); 167 kfree(n->dev.vqs); 168 kfree(n); 169 return 0; 170} 171 172static long vhost_test_run(struct vhost_test *n, int test) 173{ 174 void *priv, *oldpriv; 175 struct vhost_virtqueue *vq; 176 int r, index; 177 178 if (test < 0 || test > 1) 179 return -EINVAL; 180 181 mutex_lock(&n->dev.mutex); 182 r = vhost_dev_check_owner(&n->dev); 183 if (r) 184 goto err; 185 186 for (index = 0; index < n->dev.nvqs; ++index) { 187 /* Verify that ring has been setup correctly. */ 188 if (!vhost_vq_access_ok(&n->vqs[index])) { 189 r = -EFAULT; 190 goto err; 191 } 192 } 193 194 for (index = 0; index < n->dev.nvqs; ++index) { 195 vq = n->vqs + index; 196 mutex_lock(&vq->mutex); 197 priv = test ? n : NULL; 198 199 /* start polling new socket */ 200 oldpriv = vhost_vq_get_backend(vq); 201 vhost_vq_set_backend(vq, priv); 202 203 r = vhost_vq_init_access(&n->vqs[index]); 204 205 mutex_unlock(&vq->mutex); 206 207 if (r) 208 goto err; 209 210 if (oldpriv) { 211 vhost_test_flush(n); 212 } 213 } 214 215 mutex_unlock(&n->dev.mutex); 216 return 0; 217 218err: 219 mutex_unlock(&n->dev.mutex); 220 return r; 221} 222 223static long vhost_test_reset_owner(struct vhost_test *n) 224{ 225 void *priv = NULL; 226 long err; 227 struct vhost_iotlb *umem; 228 229 mutex_lock(&n->dev.mutex); 230 err = vhost_dev_check_owner(&n->dev); 231 if (err) 232 goto done; 233 umem = vhost_dev_reset_owner_prepare(); 234 if (!umem) { 235 err = -ENOMEM; 236 goto done; 237 } 238 vhost_test_stop(n, &priv); 239 vhost_test_flush(n); 240 vhost_dev_stop(&n->dev); 241 vhost_dev_reset_owner(&n->dev, umem); 242done: 243 mutex_unlock(&n->dev.mutex); 244 return err; 245} 246 247static int vhost_test_set_features(struct vhost_test *n, u64 features) 248{ 249 struct vhost_virtqueue *vq; 250 251 mutex_lock(&n->dev.mutex); 252 if ((features & (1 << VHOST_F_LOG_ALL)) && 253 !vhost_log_access_ok(&n->dev)) { 254 mutex_unlock(&n->dev.mutex); 255 return -EFAULT; 256 } 257 vq = &n->vqs[VHOST_TEST_VQ]; 258 mutex_lock(&vq->mutex); 259 vq->acked_features = features; 260 mutex_unlock(&vq->mutex); 261 mutex_unlock(&n->dev.mutex); 262 return 0; 263} 264 265static long vhost_test_set_backend(struct vhost_test *n, unsigned index, int fd) 266{ 267 static void *backend; 268 269 const bool enable = fd != -1; 270 struct vhost_virtqueue *vq; 271 int r; 272 273 mutex_lock(&n->dev.mutex); 274 r = vhost_dev_check_owner(&n->dev); 275 if (r) 276 goto err; 277 278 if (index >= VHOST_TEST_VQ_MAX) { 279 r = -ENOBUFS; 280 goto err; 281 } 282 vq = &n->vqs[index]; 283 mutex_lock(&vq->mutex); 284 285 /* Verify that ring has been setup correctly. */ 286 if (!vhost_vq_access_ok(vq)) { 287 r = -EFAULT; 288 goto err_vq; 289 } 290 if (!enable) { 291 vhost_poll_stop(&vq->poll); 292 backend = vhost_vq_get_backend(vq); 293 vhost_vq_set_backend(vq, NULL); 294 } else { 295 vhost_vq_set_backend(vq, backend); 296 r = vhost_vq_init_access(vq); 297 if (r == 0) 298 r = vhost_poll_start(&vq->poll, vq->kick); 299 } 300 301 mutex_unlock(&vq->mutex); 302 303 if (enable) { 304 vhost_test_flush(n); 305 } 306 307 mutex_unlock(&n->dev.mutex); 308 return 0; 309 310err_vq: 311 mutex_unlock(&vq->mutex); 312err: 313 mutex_unlock(&n->dev.mutex); 314 return r; 315} 316 317static long vhost_test_ioctl(struct file *f, unsigned int ioctl, 318 unsigned long arg) 319{ 320 struct vhost_vring_file backend; 321 struct vhost_test *n = f->private_data; 322 void __user *argp = (void __user *)arg; 323 u64 __user *featurep = argp; 324 int test; 325 u64 features; 326 int r; 327 switch (ioctl) { 328 case VHOST_TEST_RUN: 329 if (copy_from_user(&test, argp, sizeof test)) 330 return -EFAULT; 331 return vhost_test_run(n, test); 332 case VHOST_TEST_SET_BACKEND: 333 if (copy_from_user(&backend, argp, sizeof backend)) 334 return -EFAULT; 335 return vhost_test_set_backend(n, backend.index, backend.fd); 336 case VHOST_GET_FEATURES: 337 features = VHOST_TEST_FEATURES; 338 if (copy_to_user(featurep, &features, sizeof features)) 339 return -EFAULT; 340 return 0; 341 case VHOST_SET_FEATURES: 342 if (copy_from_user(&features, featurep, sizeof features)) 343 return -EFAULT; 344 if (features & ~VHOST_TEST_FEATURES) 345 return -EOPNOTSUPP; 346 return vhost_test_set_features(n, features); 347 case VHOST_RESET_OWNER: 348 return vhost_test_reset_owner(n); 349 default: 350 mutex_lock(&n->dev.mutex); 351 r = vhost_dev_ioctl(&n->dev, ioctl, argp); 352 if (r == -ENOIOCTLCMD) 353 r = vhost_vring_ioctl(&n->dev, ioctl, argp); 354 vhost_test_flush(n); 355 mutex_unlock(&n->dev.mutex); 356 return r; 357 } 358} 359 360static const struct file_operations vhost_test_fops = { 361 .owner = THIS_MODULE, 362 .release = vhost_test_release, 363 .unlocked_ioctl = vhost_test_ioctl, 364 .compat_ioctl = compat_ptr_ioctl, 365 .open = vhost_test_open, 366 .llseek = noop_llseek, 367}; 368 369static struct miscdevice vhost_test_misc = { 370 MISC_DYNAMIC_MINOR, 371 "vhost-test", 372 &vhost_test_fops, 373}; 374module_misc_device(vhost_test_misc); 375 376MODULE_VERSION("0.0.1"); 377MODULE_LICENSE("GPL v2"); 378MODULE_AUTHOR("Michael S. Tsirkin"); 379MODULE_DESCRIPTION("Host kernel side for virtio simulator");