Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost

Pull virtio updates from Michael Tsirkin:

- new vdpa features to allow creation and deletion of new devices

- virtio-blk support per-device queue depth

- fixes, cleanups all over the place

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: (31 commits)
virtio-input: add multi-touch support
virtio_mmio: fix one typo
vdpa/mlx5: fix param validation in mlx5_vdpa_get_config()
virtio_net: Fix fall-through warnings for Clang
virtio_input: Prevent EV_MSC/MSC_TIMESTAMP loop storm for MT.
virtio-blk: support per-device queue depth
virtio_vdpa: don't warn when fail to disable vq
virtio-pci: introduce modern device module
virito-pci-modern: rename map_capability() to vp_modern_map_capability()
virtio-pci-modern: introduce helper to get notification offset
virtio-pci-modern: introduce helper for getting queue nums
virtio-pci-modern: introduce helper for setting/geting queue size
virtio-pci-modern: introduce helper to set/get queue_enable
virtio-pci-modern: introduce vp_modern_queue_address()
virtio-pci-modern: introduce vp_modern_set_queue_vector()
virtio-pci-modern: introduce vp_modern_generation()
virtio-pci-modern: introduce helpers for setting and getting features
virtio-pci-modern: introduce helpers for setting and getting status
virtio-pci-modern: introduce helper to set config vector
virtio-pci-modern: introduce vp_modern_remove()
...

+1492 -507
+7 -4
drivers/block/virtio_blk.c
··· 705 705 u32 v, blk_size, max_size, sg_elems, opt_io_size; 706 706 u16 min_io_size; 707 707 u8 physical_block_exp, alignment_offset; 708 + unsigned int queue_depth; 708 709 709 710 if (!vdev->config->get) { 710 711 dev_err(&vdev->dev, "%s failure: config access disabled\n", ··· 757 756 } 758 757 759 758 /* Default queue sizing is to fill the ring. */ 760 - if (!virtblk_queue_depth) { 761 - virtblk_queue_depth = vblk->vqs[0].vq->num_free; 759 + if (likely(!virtblk_queue_depth)) { 760 + queue_depth = vblk->vqs[0].vq->num_free; 762 761 /* ... but without indirect descs, we use 2 descs per req */ 763 762 if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC)) 764 - virtblk_queue_depth /= 2; 763 + queue_depth /= 2; 764 + } else { 765 + queue_depth = virtblk_queue_depth; 765 766 } 766 767 767 768 memset(&vblk->tag_set, 0, sizeof(vblk->tag_set)); 768 769 vblk->tag_set.ops = &virtio_mq_ops; 769 - vblk->tag_set.queue_depth = virtblk_queue_depth; 770 + vblk->tag_set.queue_depth = queue_depth; 770 771 vblk->tag_set.numa_node = NUMA_NO_NODE; 771 772 vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 772 773 vblk->tag_set.cmd_size =
+1
drivers/net/virtio_net.c
··· 729 729 fallthrough; 730 730 case XDP_ABORTED: 731 731 trace_xdp_exception(vi->dev, xdp_prog, act); 732 + goto err_xdp; 732 733 case XDP_DROP: 733 734 goto err_xdp; 734 735 }
+1
drivers/vdpa/Kconfig
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 2 menuconfig VDPA 3 3 tristate "vDPA drivers" 4 + depends on NET 4 5 help 5 6 Enable this module to support vDPA device that uses a 6 7 datapath which complies with virtio specifications with
+1 -1
drivers/vdpa/ifcvf/ifcvf_main.c
··· 432 432 433 433 adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa, 434 434 dev, &ifc_vdpa_ops, 435 - IFCVF_MAX_QUEUE_PAIRS * 2); 435 + IFCVF_MAX_QUEUE_PAIRS * 2, NULL); 436 436 if (adapter == NULL) { 437 437 IFCVF_ERR(pdev, "Failed to allocate vDPA structure"); 438 438 return -ENOMEM;
+2 -2
drivers/vdpa/mlx5/net/mlx5_vnet.c
··· 1820 1820 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); 1821 1821 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); 1822 1822 1823 - if (offset + len < sizeof(struct virtio_net_config)) 1823 + if (offset + len <= sizeof(struct virtio_net_config)) 1824 1824 memcpy(buf, (u8 *)&ndev->config + offset, len); 1825 1825 } 1826 1826 ··· 1982 1982 max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS); 1983 1983 1984 1984 ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops, 1985 - 2 * mlx5_vdpa_max_qps(max_vqs)); 1985 + 2 * mlx5_vdpa_max_qps(max_vqs), NULL); 1986 1986 if (IS_ERR(ndev)) 1987 1987 return PTR_ERR(ndev); 1988 1988
+498 -5
drivers/vdpa/vdpa.c
··· 11 11 #include <linux/idr.h> 12 12 #include <linux/slab.h> 13 13 #include <linux/vdpa.h> 14 + #include <uapi/linux/vdpa.h> 15 + #include <net/genetlink.h> 16 + #include <linux/mod_devicetable.h> 14 17 18 + static LIST_HEAD(mdev_head); 19 + /* A global mutex that protects vdpa management device and device level operations. */ 20 + static DEFINE_MUTEX(vdpa_dev_mutex); 15 21 static DEFINE_IDA(vdpa_index_ida); 22 + 23 + static struct genl_family vdpa_nl_family; 16 24 17 25 static int vdpa_dev_probe(struct device *d) 18 26 { ··· 71 63 * @config: the bus operations that is supported by this device 72 64 * @nvqs: number of virtqueues supported by this device 73 65 * @size: size of the parent structure that contains private data 66 + * @name: name of the vdpa device; optional. 74 67 * 75 68 * Driver should use vdpa_alloc_device() wrapper macro instead of 76 69 * using this directly. ··· 81 72 */ 82 73 struct vdpa_device *__vdpa_alloc_device(struct device *parent, 83 74 const struct vdpa_config_ops *config, 84 - int nvqs, 85 - size_t size) 75 + int nvqs, size_t size, const char *name) 86 76 { 87 77 struct vdpa_device *vdev; 88 78 int err = -EINVAL; ··· 109 101 vdev->features_valid = false; 110 102 vdev->nvqs = nvqs; 111 103 112 - err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index); 104 + if (name) 105 + err = dev_set_name(&vdev->dev, "%s", name); 106 + else 107 + err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index); 113 108 if (err) 114 109 goto err_name; 115 110 ··· 129 118 } 130 119 EXPORT_SYMBOL_GPL(__vdpa_alloc_device); 131 120 121 + static int vdpa_name_match(struct device *dev, const void *data) 122 + { 123 + struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev); 124 + 125 + return (strcmp(dev_name(&vdev->dev), data) == 0); 126 + } 127 + 128 + static int __vdpa_register_device(struct vdpa_device *vdev) 129 + { 130 + struct device *dev; 131 + 132 + lockdep_assert_held(&vdpa_dev_mutex); 133 + dev = bus_find_device(&vdpa_bus, NULL, dev_name(&vdev->dev), vdpa_name_match); 134 + if (dev) { 135 + put_device(dev); 136 + return -EEXIST; 137 + } 138 + return device_add(&vdev->dev); 139 + } 140 + 141 + /** 142 + * _vdpa_register_device - register a vDPA device with vdpa lock held 143 + * Caller must have a succeed call of vdpa_alloc_device() before. 144 + * Caller must invoke this routine in the management device dev_add() 145 + * callback after setting up valid mgmtdev for this vdpa device. 146 + * @vdev: the vdpa device to be registered to vDPA bus 147 + * 148 + * Returns an error when fail to add device to vDPA bus 149 + */ 150 + int _vdpa_register_device(struct vdpa_device *vdev) 151 + { 152 + if (!vdev->mdev) 153 + return -EINVAL; 154 + 155 + return __vdpa_register_device(vdev); 156 + } 157 + EXPORT_SYMBOL_GPL(_vdpa_register_device); 158 + 132 159 /** 133 160 * vdpa_register_device - register a vDPA device 134 161 * Callers must have a succeed call of vdpa_alloc_device() before. ··· 176 127 */ 177 128 int vdpa_register_device(struct vdpa_device *vdev) 178 129 { 179 - return device_add(&vdev->dev); 130 + int err; 131 + 132 + mutex_lock(&vdpa_dev_mutex); 133 + err = __vdpa_register_device(vdev); 134 + mutex_unlock(&vdpa_dev_mutex); 135 + return err; 180 136 } 181 137 EXPORT_SYMBOL_GPL(vdpa_register_device); 138 + 139 + /** 140 + * _vdpa_unregister_device - unregister a vDPA device 141 + * Caller must invoke this routine as part of management device dev_del() 142 + * callback. 143 + * @vdev: the vdpa device to be unregisted from vDPA bus 144 + */ 145 + void _vdpa_unregister_device(struct vdpa_device *vdev) 146 + { 147 + lockdep_assert_held(&vdpa_dev_mutex); 148 + WARN_ON(!vdev->mdev); 149 + device_unregister(&vdev->dev); 150 + } 151 + EXPORT_SYMBOL_GPL(_vdpa_unregister_device); 182 152 183 153 /** 184 154 * vdpa_unregister_device - unregister a vDPA device ··· 205 137 */ 206 138 void vdpa_unregister_device(struct vdpa_device *vdev) 207 139 { 140 + mutex_lock(&vdpa_dev_mutex); 208 141 device_unregister(&vdev->dev); 142 + mutex_unlock(&vdpa_dev_mutex); 209 143 } 210 144 EXPORT_SYMBOL_GPL(vdpa_unregister_device); 211 145 ··· 237 167 } 238 168 EXPORT_SYMBOL_GPL(vdpa_unregister_driver); 239 169 170 + /** 171 + * vdpa_mgmtdev_register - register a vdpa management device 172 + * 173 + * @mdev: Pointer to vdpa management device 174 + * vdpa_mgmtdev_register() register a vdpa management device which supports 175 + * vdpa device management. 176 + */ 177 + int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev) 178 + { 179 + if (!mdev->device || !mdev->ops || !mdev->ops->dev_add || !mdev->ops->dev_del) 180 + return -EINVAL; 181 + 182 + INIT_LIST_HEAD(&mdev->list); 183 + mutex_lock(&vdpa_dev_mutex); 184 + list_add_tail(&mdev->list, &mdev_head); 185 + mutex_unlock(&vdpa_dev_mutex); 186 + return 0; 187 + } 188 + EXPORT_SYMBOL_GPL(vdpa_mgmtdev_register); 189 + 190 + static int vdpa_match_remove(struct device *dev, void *data) 191 + { 192 + struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev); 193 + struct vdpa_mgmt_dev *mdev = vdev->mdev; 194 + 195 + if (mdev == data) 196 + mdev->ops->dev_del(mdev, vdev); 197 + return 0; 198 + } 199 + 200 + void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev) 201 + { 202 + mutex_lock(&vdpa_dev_mutex); 203 + 204 + list_del(&mdev->list); 205 + 206 + /* Filter out all the entries belong to this management device and delete it. */ 207 + bus_for_each_dev(&vdpa_bus, NULL, mdev, vdpa_match_remove); 208 + 209 + mutex_unlock(&vdpa_dev_mutex); 210 + } 211 + EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister); 212 + 213 + static bool mgmtdev_handle_match(const struct vdpa_mgmt_dev *mdev, 214 + const char *busname, const char *devname) 215 + { 216 + /* Bus name is optional for simulated management device, so ignore the 217 + * device with bus if bus attribute is provided. 218 + */ 219 + if ((busname && !mdev->device->bus) || (!busname && mdev->device->bus)) 220 + return false; 221 + 222 + if (!busname && strcmp(dev_name(mdev->device), devname) == 0) 223 + return true; 224 + 225 + if (busname && (strcmp(mdev->device->bus->name, busname) == 0) && 226 + (strcmp(dev_name(mdev->device), devname) == 0)) 227 + return true; 228 + 229 + return false; 230 + } 231 + 232 + static struct vdpa_mgmt_dev *vdpa_mgmtdev_get_from_attr(struct nlattr **attrs) 233 + { 234 + struct vdpa_mgmt_dev *mdev; 235 + const char *busname = NULL; 236 + const char *devname; 237 + 238 + if (!attrs[VDPA_ATTR_MGMTDEV_DEV_NAME]) 239 + return ERR_PTR(-EINVAL); 240 + devname = nla_data(attrs[VDPA_ATTR_MGMTDEV_DEV_NAME]); 241 + if (attrs[VDPA_ATTR_MGMTDEV_BUS_NAME]) 242 + busname = nla_data(attrs[VDPA_ATTR_MGMTDEV_BUS_NAME]); 243 + 244 + list_for_each_entry(mdev, &mdev_head, list) { 245 + if (mgmtdev_handle_match(mdev, busname, devname)) 246 + return mdev; 247 + } 248 + return ERR_PTR(-ENODEV); 249 + } 250 + 251 + static int vdpa_nl_mgmtdev_handle_fill(struct sk_buff *msg, const struct vdpa_mgmt_dev *mdev) 252 + { 253 + if (mdev->device->bus && 254 + nla_put_string(msg, VDPA_ATTR_MGMTDEV_BUS_NAME, mdev->device->bus->name)) 255 + return -EMSGSIZE; 256 + if (nla_put_string(msg, VDPA_ATTR_MGMTDEV_DEV_NAME, dev_name(mdev->device))) 257 + return -EMSGSIZE; 258 + return 0; 259 + } 260 + 261 + static int vdpa_mgmtdev_fill(const struct vdpa_mgmt_dev *mdev, struct sk_buff *msg, 262 + u32 portid, u32 seq, int flags) 263 + { 264 + u64 supported_classes = 0; 265 + void *hdr; 266 + int i = 0; 267 + int err; 268 + 269 + hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_MGMTDEV_NEW); 270 + if (!hdr) 271 + return -EMSGSIZE; 272 + err = vdpa_nl_mgmtdev_handle_fill(msg, mdev); 273 + if (err) 274 + goto msg_err; 275 + 276 + while (mdev->id_table[i].device) { 277 + supported_classes |= BIT(mdev->id_table[i].device); 278 + i++; 279 + } 280 + 281 + if (nla_put_u64_64bit(msg, VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES, 282 + supported_classes, VDPA_ATTR_UNSPEC)) { 283 + err = -EMSGSIZE; 284 + goto msg_err; 285 + } 286 + 287 + genlmsg_end(msg, hdr); 288 + return 0; 289 + 290 + msg_err: 291 + genlmsg_cancel(msg, hdr); 292 + return err; 293 + } 294 + 295 + static int vdpa_nl_cmd_mgmtdev_get_doit(struct sk_buff *skb, struct genl_info *info) 296 + { 297 + struct vdpa_mgmt_dev *mdev; 298 + struct sk_buff *msg; 299 + int err; 300 + 301 + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 302 + if (!msg) 303 + return -ENOMEM; 304 + 305 + mutex_lock(&vdpa_dev_mutex); 306 + mdev = vdpa_mgmtdev_get_from_attr(info->attrs); 307 + if (IS_ERR(mdev)) { 308 + mutex_unlock(&vdpa_dev_mutex); 309 + NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified mgmt device"); 310 + err = PTR_ERR(mdev); 311 + goto out; 312 + } 313 + 314 + err = vdpa_mgmtdev_fill(mdev, msg, info->snd_portid, info->snd_seq, 0); 315 + mutex_unlock(&vdpa_dev_mutex); 316 + if (err) 317 + goto out; 318 + err = genlmsg_reply(msg, info); 319 + return err; 320 + 321 + out: 322 + nlmsg_free(msg); 323 + return err; 324 + } 325 + 326 + static int 327 + vdpa_nl_cmd_mgmtdev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb) 328 + { 329 + struct vdpa_mgmt_dev *mdev; 330 + int start = cb->args[0]; 331 + int idx = 0; 332 + int err; 333 + 334 + mutex_lock(&vdpa_dev_mutex); 335 + list_for_each_entry(mdev, &mdev_head, list) { 336 + if (idx < start) { 337 + idx++; 338 + continue; 339 + } 340 + err = vdpa_mgmtdev_fill(mdev, msg, NETLINK_CB(cb->skb).portid, 341 + cb->nlh->nlmsg_seq, NLM_F_MULTI); 342 + if (err) 343 + goto out; 344 + idx++; 345 + } 346 + out: 347 + mutex_unlock(&vdpa_dev_mutex); 348 + cb->args[0] = idx; 349 + return msg->len; 350 + } 351 + 352 + static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *info) 353 + { 354 + struct vdpa_mgmt_dev *mdev; 355 + const char *name; 356 + int err = 0; 357 + 358 + if (!info->attrs[VDPA_ATTR_DEV_NAME]) 359 + return -EINVAL; 360 + 361 + name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]); 362 + 363 + mutex_lock(&vdpa_dev_mutex); 364 + mdev = vdpa_mgmtdev_get_from_attr(info->attrs); 365 + if (IS_ERR(mdev)) { 366 + NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified management device"); 367 + err = PTR_ERR(mdev); 368 + goto err; 369 + } 370 + 371 + err = mdev->ops->dev_add(mdev, name); 372 + err: 373 + mutex_unlock(&vdpa_dev_mutex); 374 + return err; 375 + } 376 + 377 + static int vdpa_nl_cmd_dev_del_set_doit(struct sk_buff *skb, struct genl_info *info) 378 + { 379 + struct vdpa_mgmt_dev *mdev; 380 + struct vdpa_device *vdev; 381 + struct device *dev; 382 + const char *name; 383 + int err = 0; 384 + 385 + if (!info->attrs[VDPA_ATTR_DEV_NAME]) 386 + return -EINVAL; 387 + name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]); 388 + 389 + mutex_lock(&vdpa_dev_mutex); 390 + dev = bus_find_device(&vdpa_bus, NULL, name, vdpa_name_match); 391 + if (!dev) { 392 + NL_SET_ERR_MSG_MOD(info->extack, "device not found"); 393 + err = -ENODEV; 394 + goto dev_err; 395 + } 396 + vdev = container_of(dev, struct vdpa_device, dev); 397 + if (!vdev->mdev) { 398 + NL_SET_ERR_MSG_MOD(info->extack, "Only user created device can be deleted by user"); 399 + err = -EINVAL; 400 + goto mdev_err; 401 + } 402 + mdev = vdev->mdev; 403 + mdev->ops->dev_del(mdev, vdev); 404 + mdev_err: 405 + put_device(dev); 406 + dev_err: 407 + mutex_unlock(&vdpa_dev_mutex); 408 + return err; 409 + } 410 + 411 + static int 412 + vdpa_dev_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq, 413 + int flags, struct netlink_ext_ack *extack) 414 + { 415 + u16 max_vq_size; 416 + u32 device_id; 417 + u32 vendor_id; 418 + void *hdr; 419 + int err; 420 + 421 + hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_DEV_NEW); 422 + if (!hdr) 423 + return -EMSGSIZE; 424 + 425 + err = vdpa_nl_mgmtdev_handle_fill(msg, vdev->mdev); 426 + if (err) 427 + goto msg_err; 428 + 429 + device_id = vdev->config->get_device_id(vdev); 430 + vendor_id = vdev->config->get_vendor_id(vdev); 431 + max_vq_size = vdev->config->get_vq_num_max(vdev); 432 + 433 + err = -EMSGSIZE; 434 + if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) 435 + goto msg_err; 436 + if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) 437 + goto msg_err; 438 + if (nla_put_u32(msg, VDPA_ATTR_DEV_VENDOR_ID, vendor_id)) 439 + goto msg_err; 440 + if (nla_put_u32(msg, VDPA_ATTR_DEV_MAX_VQS, vdev->nvqs)) 441 + goto msg_err; 442 + if (nla_put_u16(msg, VDPA_ATTR_DEV_MAX_VQ_SIZE, max_vq_size)) 443 + goto msg_err; 444 + 445 + genlmsg_end(msg, hdr); 446 + return 0; 447 + 448 + msg_err: 449 + genlmsg_cancel(msg, hdr); 450 + return err; 451 + } 452 + 453 + static int vdpa_nl_cmd_dev_get_doit(struct sk_buff *skb, struct genl_info *info) 454 + { 455 + struct vdpa_device *vdev; 456 + struct sk_buff *msg; 457 + const char *devname; 458 + struct device *dev; 459 + int err; 460 + 461 + if (!info->attrs[VDPA_ATTR_DEV_NAME]) 462 + return -EINVAL; 463 + devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]); 464 + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 465 + if (!msg) 466 + return -ENOMEM; 467 + 468 + mutex_lock(&vdpa_dev_mutex); 469 + dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match); 470 + if (!dev) { 471 + NL_SET_ERR_MSG_MOD(info->extack, "device not found"); 472 + err = -ENODEV; 473 + goto err; 474 + } 475 + vdev = container_of(dev, struct vdpa_device, dev); 476 + if (!vdev->mdev) { 477 + err = -EINVAL; 478 + goto mdev_err; 479 + } 480 + err = vdpa_dev_fill(vdev, msg, info->snd_portid, info->snd_seq, 0, info->extack); 481 + if (!err) 482 + err = genlmsg_reply(msg, info); 483 + mdev_err: 484 + put_device(dev); 485 + err: 486 + mutex_unlock(&vdpa_dev_mutex); 487 + if (err) 488 + nlmsg_free(msg); 489 + return err; 490 + } 491 + 492 + struct vdpa_dev_dump_info { 493 + struct sk_buff *msg; 494 + struct netlink_callback *cb; 495 + int start_idx; 496 + int idx; 497 + }; 498 + 499 + static int vdpa_dev_dump(struct device *dev, void *data) 500 + { 501 + struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev); 502 + struct vdpa_dev_dump_info *info = data; 503 + int err; 504 + 505 + if (!vdev->mdev) 506 + return 0; 507 + if (info->idx < info->start_idx) { 508 + info->idx++; 509 + return 0; 510 + } 511 + err = vdpa_dev_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid, 512 + info->cb->nlh->nlmsg_seq, NLM_F_MULTI, info->cb->extack); 513 + if (err) 514 + return err; 515 + 516 + info->idx++; 517 + return 0; 518 + } 519 + 520 + static int vdpa_nl_cmd_dev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb) 521 + { 522 + struct vdpa_dev_dump_info info; 523 + 524 + info.msg = msg; 525 + info.cb = cb; 526 + info.start_idx = cb->args[0]; 527 + info.idx = 0; 528 + 529 + mutex_lock(&vdpa_dev_mutex); 530 + bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_dump); 531 + mutex_unlock(&vdpa_dev_mutex); 532 + cb->args[0] = info.idx; 533 + return msg->len; 534 + } 535 + 536 + static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = { 537 + [VDPA_ATTR_MGMTDEV_BUS_NAME] = { .type = NLA_NUL_STRING }, 538 + [VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING }, 539 + [VDPA_ATTR_DEV_NAME] = { .type = NLA_STRING }, 540 + }; 541 + 542 + static const struct genl_ops vdpa_nl_ops[] = { 543 + { 544 + .cmd = VDPA_CMD_MGMTDEV_GET, 545 + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 546 + .doit = vdpa_nl_cmd_mgmtdev_get_doit, 547 + .dumpit = vdpa_nl_cmd_mgmtdev_get_dumpit, 548 + }, 549 + { 550 + .cmd = VDPA_CMD_DEV_NEW, 551 + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 552 + .doit = vdpa_nl_cmd_dev_add_set_doit, 553 + .flags = GENL_ADMIN_PERM, 554 + }, 555 + { 556 + .cmd = VDPA_CMD_DEV_DEL, 557 + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 558 + .doit = vdpa_nl_cmd_dev_del_set_doit, 559 + .flags = GENL_ADMIN_PERM, 560 + }, 561 + { 562 + .cmd = VDPA_CMD_DEV_GET, 563 + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 564 + .doit = vdpa_nl_cmd_dev_get_doit, 565 + .dumpit = vdpa_nl_cmd_dev_get_dumpit, 566 + }, 567 + }; 568 + 569 + static struct genl_family vdpa_nl_family __ro_after_init = { 570 + .name = VDPA_GENL_NAME, 571 + .version = VDPA_GENL_VERSION, 572 + .maxattr = VDPA_ATTR_MAX, 573 + .policy = vdpa_nl_policy, 574 + .netnsok = false, 575 + .module = THIS_MODULE, 576 + .ops = vdpa_nl_ops, 577 + .n_ops = ARRAY_SIZE(vdpa_nl_ops), 578 + }; 579 + 240 580 static int vdpa_init(void) 241 581 { 242 - return bus_register(&vdpa_bus); 582 + int err; 583 + 584 + err = bus_register(&vdpa_bus); 585 + if (err) 586 + return err; 587 + err = genl_register_family(&vdpa_nl_family); 588 + if (err) 589 + goto err; 590 + return 0; 591 + 592 + err: 593 + bus_unregister(&vdpa_bus); 594 + return err; 243 595 } 244 596 245 597 static void __exit vdpa_exit(void) 246 598 { 599 + genl_unregister_family(&vdpa_nl_family); 247 600 bus_unregister(&vdpa_bus); 248 601 ida_destroy(&vdpa_index_ida); 249 602 }
+2 -1
drivers/vdpa/vdpa_sim/vdpa_sim.c
··· 235 235 ops = &vdpasim_config_ops; 236 236 237 237 vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops, 238 - dev_attr->nvqs); 238 + dev_attr->nvqs, dev_attr->name); 239 239 if (!vdpasim) 240 240 goto err_alloc; 241 241 ··· 249 249 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) 250 250 goto err_iommu; 251 251 set_dma_ops(dev, &vdpasim_dma_ops); 252 + vdpasim->vdpa.mdev = dev_attr->mgmt_dev; 252 253 253 254 vdpasim->config = kzalloc(dev_attr->config_size, GFP_KERNEL); 254 255 if (!vdpasim->config)
+2
drivers/vdpa/vdpa_sim/vdpa_sim.h
··· 33 33 }; 34 34 35 35 struct vdpasim_dev_attr { 36 + struct vdpa_mgmt_dev *mgmt_dev; 37 + const char *name; 36 38 u64 supported_features; 37 39 size_t config_size; 38 40 size_t buffer_size;
+73 -27
drivers/vdpa/vdpa_sim/vdpa_sim_net.c
··· 33 33 module_param(macaddr, charp, 0); 34 34 MODULE_PARM_DESC(macaddr, "Ethernet MAC address"); 35 35 36 - u8 macaddr_buf[ETH_ALEN]; 37 - 38 - static struct vdpasim *vdpasim_net_dev; 36 + static u8 macaddr_buf[ETH_ALEN]; 39 37 40 38 static void vdpasim_net_work(struct work_struct *work) 41 39 { ··· 118 120 memcpy(net_config->mac, macaddr_buf, ETH_ALEN); 119 121 } 120 122 121 - static int __init vdpasim_net_init(void) 123 + static void vdpasim_net_mgmtdev_release(struct device *dev) 124 + { 125 + } 126 + 127 + static struct device vdpasim_net_mgmtdev = { 128 + .init_name = "vdpasim_net", 129 + .release = vdpasim_net_mgmtdev_release, 130 + }; 131 + 132 + static int vdpasim_net_dev_add(struct vdpa_mgmt_dev *mdev, const char *name) 122 133 { 123 134 struct vdpasim_dev_attr dev_attr = {}; 135 + struct vdpasim *simdev; 124 136 int ret; 125 137 126 - if (macaddr) { 127 - mac_pton(macaddr, macaddr_buf); 128 - if (!is_valid_ether_addr(macaddr_buf)) { 129 - ret = -EADDRNOTAVAIL; 130 - goto out; 131 - } 132 - } else { 133 - eth_random_addr(macaddr_buf); 134 - } 135 - 138 + dev_attr.mgmt_dev = mdev; 139 + dev_attr.name = name; 136 140 dev_attr.id = VIRTIO_ID_NET; 137 141 dev_attr.supported_features = VDPASIM_NET_FEATURES; 138 142 dev_attr.nvqs = VDPASIM_NET_VQ_NUM; ··· 143 143 dev_attr.work_fn = vdpasim_net_work; 144 144 dev_attr.buffer_size = PAGE_SIZE; 145 145 146 - vdpasim_net_dev = vdpasim_create(&dev_attr); 147 - if (IS_ERR(vdpasim_net_dev)) { 148 - ret = PTR_ERR(vdpasim_net_dev); 149 - goto out; 150 - } 146 + simdev = vdpasim_create(&dev_attr); 147 + if (IS_ERR(simdev)) 148 + return PTR_ERR(simdev); 151 149 152 - ret = vdpa_register_device(&vdpasim_net_dev->vdpa); 150 + ret = _vdpa_register_device(&simdev->vdpa); 153 151 if (ret) 154 - goto put_dev; 152 + goto reg_err; 155 153 156 154 return 0; 157 155 158 - put_dev: 159 - put_device(&vdpasim_net_dev->vdpa.dev); 160 - out: 156 + reg_err: 157 + put_device(&simdev->vdpa.dev); 158 + return ret; 159 + } 160 + 161 + static void vdpasim_net_dev_del(struct vdpa_mgmt_dev *mdev, 162 + struct vdpa_device *dev) 163 + { 164 + struct vdpasim *simdev = container_of(dev, struct vdpasim, vdpa); 165 + 166 + _vdpa_unregister_device(&simdev->vdpa); 167 + } 168 + 169 + static const struct vdpa_mgmtdev_ops vdpasim_net_mgmtdev_ops = { 170 + .dev_add = vdpasim_net_dev_add, 171 + .dev_del = vdpasim_net_dev_del 172 + }; 173 + 174 + static struct virtio_device_id id_table[] = { 175 + { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, 176 + { 0 }, 177 + }; 178 + 179 + static struct vdpa_mgmt_dev mgmt_dev = { 180 + .device = &vdpasim_net_mgmtdev, 181 + .id_table = id_table, 182 + .ops = &vdpasim_net_mgmtdev_ops, 183 + }; 184 + 185 + static int __init vdpasim_net_init(void) 186 + { 187 + int ret; 188 + 189 + if (macaddr) { 190 + mac_pton(macaddr, macaddr_buf); 191 + if (!is_valid_ether_addr(macaddr_buf)) 192 + return -EADDRNOTAVAIL; 193 + } else { 194 + eth_random_addr(macaddr_buf); 195 + } 196 + 197 + ret = device_register(&vdpasim_net_mgmtdev); 198 + if (ret) 199 + return ret; 200 + 201 + ret = vdpa_mgmtdev_register(&mgmt_dev); 202 + if (ret) 203 + goto parent_err; 204 + return 0; 205 + 206 + parent_err: 207 + device_unregister(&vdpasim_net_mgmtdev); 161 208 return ret; 162 209 } 163 210 164 211 static void __exit vdpasim_net_exit(void) 165 212 { 166 - struct vdpa_device *vdpa = &vdpasim_net_dev->vdpa; 167 - 168 - vdpa_unregister_device(vdpa); 213 + vdpa_mgmtdev_unregister(&mgmt_dev); 214 + device_unregister(&vdpasim_net_mgmtdev); 169 215 } 170 216 171 217 module_init(vdpasim_net_init);
+3 -6
drivers/vhost/scsi.c
··· 1814 1814 struct vhost_virtqueue **vqs; 1815 1815 int r = -ENOMEM, i; 1816 1816 1817 - vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL); 1818 - if (!vs) { 1819 - vs = vzalloc(sizeof(*vs)); 1820 - if (!vs) 1821 - goto err_vs; 1822 - } 1817 + vs = kvzalloc(sizeof(*vs), GFP_KERNEL); 1818 + if (!vs) 1819 + goto err_vs; 1823 1820 1824 1821 vqs = kmalloc_array(VHOST_SCSI_MAX_VQ, sizeof(*vqs), GFP_KERNEL); 1825 1822 if (!vqs)
+9
drivers/virtio/Kconfig
··· 12 12 This option is selected if the architecture may need to enforce 13 13 VIRTIO_F_ACCESS_PLATFORM 14 14 15 + config VIRTIO_PCI_LIB 16 + tristate 17 + help 18 + Modern PCI device implementation. This module implements the 19 + basic probe and control for devices which are based on modern 20 + PCI device with possible vendor specific extensions. Any 21 + module that selects this module must depend on PCI. 22 + 15 23 menuconfig VIRTIO_MENU 16 24 bool "Virtio drivers" 17 25 default y ··· 29 21 config VIRTIO_PCI 30 22 tristate "PCI driver for virtio devices" 31 23 depends on PCI 24 + select VIRTIO_PCI_LIB 32 25 select VIRTIO 33 26 help 34 27 This driver provides support for virtio based paravirtual device
+1
drivers/virtio/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 obj-$(CONFIG_VIRTIO) += virtio.o virtio_ring.o 3 + obj-$(CONFIG_VIRTIO_PCI_LIB) += virtio_pci_modern_dev.o 3 4 obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o 4 5 obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o 5 6 virtio_pci-y := virtio_pci_modern.o virtio_pci_common.o
+25 -1
drivers/virtio/virtio_input.c
··· 7 7 8 8 #include <uapi/linux/virtio_ids.h> 9 9 #include <uapi/linux/virtio_input.h> 10 + #include <linux/input/mt.h> 10 11 11 12 struct virtio_input { 12 13 struct virtio_device *vdev; ··· 64 63 struct scatterlist sg[1]; 65 64 unsigned long flags; 66 65 int rc; 66 + 67 + /* 68 + * Since 29cc309d8bf1 (HID: hid-multitouch: forward MSC_TIMESTAMP), 69 + * EV_MSC/MSC_TIMESTAMP is added to each before EV_SYN event. 70 + * EV_MSC is configured as INPUT_PASS_TO_ALL. 71 + * In case of touch device: 72 + * BE pass EV_MSC/MSC_TIMESTAMP to FE on receiving event from evdev. 73 + * FE pass EV_MSC/MSC_TIMESTAMP back to BE. 74 + * BE writes EV_MSC/MSC_TIMESTAMP to evdev due to INPUT_PASS_TO_ALL. 75 + * BE receives extra EV_MSC/MSC_TIMESTAMP and pass to FE. 76 + * >>> Each new frame becomes larger and larger. 77 + * Disable EV_MSC/MSC_TIMESTAMP forwarding for MT. 78 + */ 79 + if (vi->idev->mt && type == EV_MSC && code == MSC_TIMESTAMP) 80 + return 0; 67 81 68 82 stsbuf = kzalloc(sizeof(*stsbuf), GFP_ATOMIC); 69 83 if (!stsbuf) ··· 220 204 struct virtio_input *vi; 221 205 unsigned long flags; 222 206 size_t size; 223 - int abs, err; 207 + int abs, err, nslots; 224 208 225 209 if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) 226 210 return -ENODEV; ··· 305 289 continue; 306 290 virtinput_cfg_abs(vi, abs); 307 291 } 292 + 293 + if (test_bit(ABS_MT_SLOT, vi->idev->absbit)) { 294 + nslots = input_abs_get_max(vi->idev, ABS_MT_SLOT) + 1; 295 + err = input_mt_init_slots(vi->idev, nslots, 0); 296 + if (err) 297 + goto err_mt_init_slots; 298 + } 308 299 } 309 300 310 301 virtio_device_ready(vdev); ··· 327 304 spin_lock_irqsave(&vi->lock, flags); 328 305 vi->ready = false; 329 306 spin_unlock_irqrestore(&vi->lock, flags); 307 + err_mt_init_slots: 330 308 input_free_device(vi->idev); 331 309 err_input_alloc: 332 310 vdev->config->del_vqs(vdev);
+1 -1
drivers/virtio/virtio_mem.c
··· 2577 2577 * actually in use (e.g., trying to reload the driver). 2578 2578 */ 2579 2579 if (vm->plugged_size) { 2580 - vm->unplug_all_required = 1; 2580 + vm->unplug_all_required = true; 2581 2581 dev_info(&vm->vdev->dev, "unplugging all memory is required\n"); 2582 2582 } 2583 2583
+1 -1
drivers/virtio/virtio_mmio.c
··· 126 126 /* Give virtio_ring a chance to accept features. */ 127 127 vring_transport_features(vdev); 128 128 129 - /* Make sure there is are no mixed devices */ 129 + /* Make sure there are no mixed devices */ 130 130 if (vm_dev->version == 2 && 131 131 !__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) { 132 132 dev_err(&vdev->dev, "New virtio-mmio devices (version 2) must provide VIRTIO_F_VERSION_1 feature!\n");
+2 -20
drivers/virtio/virtio_pci_common.h
··· 25 25 #include <linux/virtio_config.h> 26 26 #include <linux/virtio_ring.h> 27 27 #include <linux/virtio_pci.h> 28 + #include <linux/virtio_pci_modern.h> 28 29 #include <linux/highmem.h> 29 30 #include <linux/spinlock.h> 30 31 ··· 44 43 struct virtio_pci_device { 45 44 struct virtio_device vdev; 46 45 struct pci_dev *pci_dev; 46 + struct virtio_pci_modern_device mdev; 47 47 48 48 /* In legacy mode, these two point to within ->legacy. */ 49 49 /* Where to read and clear interrupt */ 50 50 u8 __iomem *isr; 51 - 52 - /* Modern only fields */ 53 - /* The IO mapping for the PCI config space (non-legacy mode) */ 54 - struct virtio_pci_common_cfg __iomem *common; 55 - /* Device-specific data (non-legacy mode) */ 56 - void __iomem *device; 57 - /* Base of vq notifications (non-legacy mode). */ 58 - void __iomem *notify_base; 59 - 60 - /* So we can sanity-check accesses. */ 61 - size_t notify_len; 62 - size_t device_len; 63 - 64 - /* Capability for when we need to map notifications per-vq. */ 65 - int notify_map_cap; 66 - 67 - /* Multiply queue_notify_off by this value. (non-legacy mode). */ 68 - u32 notify_offset_multiplier; 69 - 70 - int modern_bars; 71 51 72 52 /* Legacy only field */ 73 53 /* the IO mapping for the PCI config space */
+72 -432
drivers/virtio/virtio_pci_modern.c
··· 19 19 #define VIRTIO_RING_NO_LEGACY 20 20 #include "virtio_pci_common.h" 21 21 22 - /* 23 - * Type-safe wrappers for io accesses. 24 - * Use these to enforce at compile time the following spec requirement: 25 - * 26 - * The driver MUST access each field using the “natural” access 27 - * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses 28 - * for 16-bit fields and 8-bit accesses for 8-bit fields. 29 - */ 30 - static inline u8 vp_ioread8(const u8 __iomem *addr) 31 - { 32 - return ioread8(addr); 33 - } 34 - static inline u16 vp_ioread16 (const __le16 __iomem *addr) 35 - { 36 - return ioread16(addr); 37 - } 38 - 39 - static inline u32 vp_ioread32(const __le32 __iomem *addr) 40 - { 41 - return ioread32(addr); 42 - } 43 - 44 - static inline void vp_iowrite8(u8 value, u8 __iomem *addr) 45 - { 46 - iowrite8(value, addr); 47 - } 48 - 49 - static inline void vp_iowrite16(u16 value, __le16 __iomem *addr) 50 - { 51 - iowrite16(value, addr); 52 - } 53 - 54 - static inline void vp_iowrite32(u32 value, __le32 __iomem *addr) 55 - { 56 - iowrite32(value, addr); 57 - } 58 - 59 - static void vp_iowrite64_twopart(u64 val, 60 - __le32 __iomem *lo, __le32 __iomem *hi) 61 - { 62 - vp_iowrite32((u32)val, lo); 63 - vp_iowrite32(val >> 32, hi); 64 - } 65 - 66 - static void __iomem *map_capability(struct pci_dev *dev, int off, 67 - size_t minlen, 68 - u32 align, 69 - u32 start, u32 size, 70 - size_t *len) 71 - { 72 - u8 bar; 73 - u32 offset, length; 74 - void __iomem *p; 75 - 76 - pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap, 77 - bar), 78 - &bar); 79 - pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset), 80 - &offset); 81 - pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length), 82 - &length); 83 - 84 - if (length <= start) { 85 - dev_err(&dev->dev, 86 - "virtio_pci: bad capability len %u (>%u expected)\n", 87 - length, start); 88 - return NULL; 89 - } 90 - 91 - if (length - start < minlen) { 92 - dev_err(&dev->dev, 93 - "virtio_pci: bad capability len %u (>=%zu expected)\n", 94 - length, minlen); 95 - return NULL; 96 - } 97 - 98 - length -= start; 99 - 100 - if (start + offset < offset) { 101 - dev_err(&dev->dev, 102 - "virtio_pci: map wrap-around %u+%u\n", 103 - start, offset); 104 - return NULL; 105 - } 106 - 107 - offset += start; 108 - 109 - if (offset & (align - 1)) { 110 - dev_err(&dev->dev, 111 - "virtio_pci: offset %u not aligned to %u\n", 112 - offset, align); 113 - return NULL; 114 - } 115 - 116 - if (length > size) 117 - length = size; 118 - 119 - if (len) 120 - *len = length; 121 - 122 - if (minlen + offset < minlen || 123 - minlen + offset > pci_resource_len(dev, bar)) { 124 - dev_err(&dev->dev, 125 - "virtio_pci: map virtio %zu@%u " 126 - "out of range on bar %i length %lu\n", 127 - minlen, offset, 128 - bar, (unsigned long)pci_resource_len(dev, bar)); 129 - return NULL; 130 - } 131 - 132 - p = pci_iomap_range(dev, bar, offset, length); 133 - if (!p) 134 - dev_err(&dev->dev, 135 - "virtio_pci: unable to map virtio %u@%u on bar %i\n", 136 - length, offset, bar); 137 - return p; 138 - } 139 - 140 - /* virtio config->get_features() implementation */ 141 22 static u64 vp_get_features(struct virtio_device *vdev) 142 23 { 143 24 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 144 - u64 features; 145 25 146 - vp_iowrite32(0, &vp_dev->common->device_feature_select); 147 - features = vp_ioread32(&vp_dev->common->device_feature); 148 - vp_iowrite32(1, &vp_dev->common->device_feature_select); 149 - features |= ((u64)vp_ioread32(&vp_dev->common->device_feature) << 32); 150 - 151 - return features; 26 + return vp_modern_get_features(&vp_dev->mdev); 152 27 } 153 28 154 29 static void vp_transport_features(struct virtio_device *vdev, u64 features) ··· 54 179 return -EINVAL; 55 180 } 56 181 57 - vp_iowrite32(0, &vp_dev->common->guest_feature_select); 58 - vp_iowrite32((u32)vdev->features, &vp_dev->common->guest_feature); 59 - vp_iowrite32(1, &vp_dev->common->guest_feature_select); 60 - vp_iowrite32(vdev->features >> 32, &vp_dev->common->guest_feature); 182 + vp_modern_set_features(&vp_dev->mdev, vdev->features); 61 183 62 184 return 0; 63 185 } ··· 64 192 void *buf, unsigned len) 65 193 { 66 194 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 195 + struct virtio_pci_modern_device *mdev = &vp_dev->mdev; 196 + void __iomem *device = mdev->device; 67 197 u8 b; 68 198 __le16 w; 69 199 __le32 l; 70 200 71 - BUG_ON(offset + len > vp_dev->device_len); 201 + BUG_ON(offset + len > mdev->device_len); 72 202 73 203 switch (len) { 74 204 case 1: 75 - b = ioread8(vp_dev->device + offset); 205 + b = ioread8(device + offset); 76 206 memcpy(buf, &b, sizeof b); 77 207 break; 78 208 case 2: 79 - w = cpu_to_le16(ioread16(vp_dev->device + offset)); 209 + w = cpu_to_le16(ioread16(device + offset)); 80 210 memcpy(buf, &w, sizeof w); 81 211 break; 82 212 case 4: 83 - l = cpu_to_le32(ioread32(vp_dev->device + offset)); 213 + l = cpu_to_le32(ioread32(device + offset)); 84 214 memcpy(buf, &l, sizeof l); 85 215 break; 86 216 case 8: 87 - l = cpu_to_le32(ioread32(vp_dev->device + offset)); 217 + l = cpu_to_le32(ioread32(device + offset)); 88 218 memcpy(buf, &l, sizeof l); 89 - l = cpu_to_le32(ioread32(vp_dev->device + offset + sizeof l)); 219 + l = cpu_to_le32(ioread32(device + offset + sizeof l)); 90 220 memcpy(buf + sizeof l, &l, sizeof l); 91 221 break; 92 222 default: ··· 102 228 const void *buf, unsigned len) 103 229 { 104 230 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 231 + struct virtio_pci_modern_device *mdev = &vp_dev->mdev; 232 + void __iomem *device = mdev->device; 105 233 u8 b; 106 234 __le16 w; 107 235 __le32 l; 108 236 109 - BUG_ON(offset + len > vp_dev->device_len); 237 + BUG_ON(offset + len > mdev->device_len); 110 238 111 239 switch (len) { 112 240 case 1: 113 241 memcpy(&b, buf, sizeof b); 114 - iowrite8(b, vp_dev->device + offset); 242 + iowrite8(b, device + offset); 115 243 break; 116 244 case 2: 117 245 memcpy(&w, buf, sizeof w); 118 - iowrite16(le16_to_cpu(w), vp_dev->device + offset); 246 + iowrite16(le16_to_cpu(w), device + offset); 119 247 break; 120 248 case 4: 121 249 memcpy(&l, buf, sizeof l); 122 - iowrite32(le32_to_cpu(l), vp_dev->device + offset); 250 + iowrite32(le32_to_cpu(l), device + offset); 123 251 break; 124 252 case 8: 125 253 memcpy(&l, buf, sizeof l); 126 - iowrite32(le32_to_cpu(l), vp_dev->device + offset); 254 + iowrite32(le32_to_cpu(l), device + offset); 127 255 memcpy(&l, buf + sizeof l, sizeof l); 128 - iowrite32(le32_to_cpu(l), vp_dev->device + offset + sizeof l); 256 + iowrite32(le32_to_cpu(l), device + offset + sizeof l); 129 257 break; 130 258 default: 131 259 BUG(); ··· 137 261 static u32 vp_generation(struct virtio_device *vdev) 138 262 { 139 263 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 140 - return vp_ioread8(&vp_dev->common->config_generation); 264 + 265 + return vp_modern_generation(&vp_dev->mdev); 141 266 } 142 267 143 268 /* config->{get,set}_status() implementations */ 144 269 static u8 vp_get_status(struct virtio_device *vdev) 145 270 { 146 271 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 147 - return vp_ioread8(&vp_dev->common->device_status); 272 + 273 + return vp_modern_get_status(&vp_dev->mdev); 148 274 } 149 275 150 276 static void vp_set_status(struct virtio_device *vdev, u8 status) 151 277 { 152 278 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 279 + 153 280 /* We should never be setting status to 0. */ 154 281 BUG_ON(status == 0); 155 - vp_iowrite8(status, &vp_dev->common->device_status); 282 + vp_modern_set_status(&vp_dev->mdev, status); 156 283 } 157 284 158 285 static void vp_reset(struct virtio_device *vdev) 159 286 { 160 287 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 288 + struct virtio_pci_modern_device *mdev = &vp_dev->mdev; 289 + 161 290 /* 0 status means a reset. */ 162 - vp_iowrite8(0, &vp_dev->common->device_status); 291 + vp_modern_set_status(mdev, 0); 163 292 /* After writing 0 to device_status, the driver MUST wait for a read of 164 293 * device_status to return 0 before reinitializing the device. 165 294 * This will flush out the status write, and flush in device writes, 166 295 * including MSI-X interrupts, if any. 167 296 */ 168 - while (vp_ioread8(&vp_dev->common->device_status)) 297 + while (vp_modern_get_status(mdev)) 169 298 msleep(1); 170 299 /* Flush pending VQ/configuration callbacks. */ 171 300 vp_synchronize_vectors(vdev); ··· 178 297 179 298 static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector) 180 299 { 181 - /* Setup the vector used for configuration events */ 182 - vp_iowrite16(vector, &vp_dev->common->msix_config); 183 - /* Verify we had enough resources to assign the vector */ 184 - /* Will also flush the write out to device */ 185 - return vp_ioread16(&vp_dev->common->msix_config); 300 + return vp_modern_config_vector(&vp_dev->mdev, vector); 186 301 } 187 302 188 303 static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, ··· 189 312 bool ctx, 190 313 u16 msix_vec) 191 314 { 192 - struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common; 315 + 316 + struct virtio_pci_modern_device *mdev = &vp_dev->mdev; 193 317 struct virtqueue *vq; 194 318 u16 num, off; 195 319 int err; 196 320 197 - if (index >= vp_ioread16(&cfg->num_queues)) 321 + if (index >= vp_modern_get_num_queues(mdev)) 198 322 return ERR_PTR(-ENOENT); 199 323 200 - /* Select the queue we're interested in */ 201 - vp_iowrite16(index, &cfg->queue_select); 202 - 203 324 /* Check if queue is either not available or already active. */ 204 - num = vp_ioread16(&cfg->queue_size); 205 - if (!num || vp_ioread16(&cfg->queue_enable)) 325 + num = vp_modern_get_queue_size(mdev, index); 326 + if (!num || vp_modern_get_queue_enable(mdev, index)) 206 327 return ERR_PTR(-ENOENT); 207 328 208 329 if (num & (num - 1)) { ··· 209 334 } 210 335 211 336 /* get offset of notification word for this vq */ 212 - off = vp_ioread16(&cfg->queue_notify_off); 337 + off = vp_modern_get_queue_notify_off(mdev, index); 213 338 214 339 info->msix_vector = msix_vec; 215 340 ··· 222 347 return ERR_PTR(-ENOMEM); 223 348 224 349 /* activate the queue */ 225 - vp_iowrite16(virtqueue_get_vring_size(vq), &cfg->queue_size); 226 - vp_iowrite64_twopart(virtqueue_get_desc_addr(vq), 227 - &cfg->queue_desc_lo, &cfg->queue_desc_hi); 228 - vp_iowrite64_twopart(virtqueue_get_avail_addr(vq), 229 - &cfg->queue_avail_lo, &cfg->queue_avail_hi); 230 - vp_iowrite64_twopart(virtqueue_get_used_addr(vq), 231 - &cfg->queue_used_lo, &cfg->queue_used_hi); 350 + vp_modern_set_queue_size(mdev, index, virtqueue_get_vring_size(vq)); 351 + vp_modern_queue_address(mdev, index, virtqueue_get_desc_addr(vq), 352 + virtqueue_get_avail_addr(vq), 353 + virtqueue_get_used_addr(vq)); 232 354 233 - if (vp_dev->notify_base) { 355 + if (mdev->notify_base) { 234 356 /* offset should not wrap */ 235 - if ((u64)off * vp_dev->notify_offset_multiplier + 2 236 - > vp_dev->notify_len) { 237 - dev_warn(&vp_dev->pci_dev->dev, 357 + if ((u64)off * mdev->notify_offset_multiplier + 2 358 + > mdev->notify_len) { 359 + dev_warn(&mdev->pci_dev->dev, 238 360 "bad notification offset %u (x %u) " 239 361 "for queue %u > %zd", 240 - off, vp_dev->notify_offset_multiplier, 241 - index, vp_dev->notify_len); 362 + off, mdev->notify_offset_multiplier, 363 + index, mdev->notify_len); 242 364 err = -EINVAL; 243 365 goto err_map_notify; 244 366 } 245 - vq->priv = (void __force *)vp_dev->notify_base + 246 - off * vp_dev->notify_offset_multiplier; 367 + vq->priv = (void __force *)mdev->notify_base + 368 + off * mdev->notify_offset_multiplier; 247 369 } else { 248 - vq->priv = (void __force *)map_capability(vp_dev->pci_dev, 249 - vp_dev->notify_map_cap, 2, 2, 250 - off * vp_dev->notify_offset_multiplier, 2, 251 - NULL); 370 + vq->priv = (void __force *)vp_modern_map_capability(mdev, 371 + mdev->notify_map_cap, 2, 2, 372 + off * mdev->notify_offset_multiplier, 2, 373 + NULL); 252 374 } 253 375 254 376 if (!vq->priv) { ··· 254 382 } 255 383 256 384 if (msix_vec != VIRTIO_MSI_NO_VECTOR) { 257 - vp_iowrite16(msix_vec, &cfg->queue_msix_vector); 258 - msix_vec = vp_ioread16(&cfg->queue_msix_vector); 385 + msix_vec = vp_modern_queue_vector(mdev, index, msix_vec); 259 386 if (msix_vec == VIRTIO_MSI_NO_VECTOR) { 260 387 err = -EBUSY; 261 388 goto err_assign_vector; ··· 264 393 return vq; 265 394 266 395 err_assign_vector: 267 - if (!vp_dev->notify_base) 268 - pci_iounmap(vp_dev->pci_dev, (void __iomem __force *)vq->priv); 396 + if (!mdev->notify_base) 397 + pci_iounmap(mdev->pci_dev, (void __iomem __force *)vq->priv); 269 398 err_map_notify: 270 399 vring_del_virtqueue(vq); 271 400 return ERR_PTR(err); ··· 287 416 /* Select and activate all queues. Has to be done last: once we do 288 417 * this, there's no way to go back except reset. 289 418 */ 290 - list_for_each_entry(vq, &vdev->vqs, list) { 291 - vp_iowrite16(vq->index, &vp_dev->common->queue_select); 292 - vp_iowrite16(1, &vp_dev->common->queue_enable); 293 - } 419 + list_for_each_entry(vq, &vdev->vqs, list) 420 + vp_modern_set_queue_enable(&vp_dev->mdev, vq->index, true); 294 421 295 422 return 0; 296 423 } ··· 297 428 { 298 429 struct virtqueue *vq = info->vq; 299 430 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); 431 + struct virtio_pci_modern_device *mdev = &vp_dev->mdev; 300 432 301 - vp_iowrite16(vq->index, &vp_dev->common->queue_select); 433 + if (vp_dev->msix_enabled) 434 + vp_modern_queue_vector(mdev, vq->index, 435 + VIRTIO_MSI_NO_VECTOR); 302 436 303 - if (vp_dev->msix_enabled) { 304 - vp_iowrite16(VIRTIO_MSI_NO_VECTOR, 305 - &vp_dev->common->queue_msix_vector); 306 - /* Flush the write out to device */ 307 - vp_ioread16(&vp_dev->common->queue_msix_vector); 308 - } 309 - 310 - if (!vp_dev->notify_base) 311 - pci_iounmap(vp_dev->pci_dev, (void __force __iomem *)vq->priv); 437 + if (!mdev->notify_base) 438 + pci_iounmap(mdev->pci_dev, (void __force __iomem *)vq->priv); 312 439 313 440 vring_del_virtqueue(vq); 314 441 } ··· 436 571 .get_shm_region = vp_get_shm_region, 437 572 }; 438 573 439 - /** 440 - * virtio_pci_find_capability - walk capabilities to find device info. 441 - * @dev: the pci device 442 - * @cfg_type: the VIRTIO_PCI_CAP_* value we seek 443 - * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO. 444 - * @bars: the bitmask of BARs 445 - * 446 - * Returns offset of the capability, or 0. 447 - */ 448 - static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type, 449 - u32 ioresource_types, int *bars) 450 - { 451 - int pos; 452 - 453 - for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR); 454 - pos > 0; 455 - pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) { 456 - u8 type, bar; 457 - pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, 458 - cfg_type), 459 - &type); 460 - pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, 461 - bar), 462 - &bar); 463 - 464 - /* Ignore structures with reserved BAR values */ 465 - if (bar > 0x5) 466 - continue; 467 - 468 - if (type == cfg_type) { 469 - if (pci_resource_len(dev, bar) && 470 - pci_resource_flags(dev, bar) & ioresource_types) { 471 - *bars |= (1 << bar); 472 - return pos; 473 - } 474 - } 475 - } 476 - return 0; 477 - } 478 - 479 - /* This is part of the ABI. Don't screw with it. */ 480 - static inline void check_offsets(void) 481 - { 482 - /* Note: disk space was harmed in compilation of this function. */ 483 - BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR != 484 - offsetof(struct virtio_pci_cap, cap_vndr)); 485 - BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT != 486 - offsetof(struct virtio_pci_cap, cap_next)); 487 - BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN != 488 - offsetof(struct virtio_pci_cap, cap_len)); 489 - BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE != 490 - offsetof(struct virtio_pci_cap, cfg_type)); 491 - BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR != 492 - offsetof(struct virtio_pci_cap, bar)); 493 - BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET != 494 - offsetof(struct virtio_pci_cap, offset)); 495 - BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH != 496 - offsetof(struct virtio_pci_cap, length)); 497 - BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT != 498 - offsetof(struct virtio_pci_notify_cap, 499 - notify_off_multiplier)); 500 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT != 501 - offsetof(struct virtio_pci_common_cfg, 502 - device_feature_select)); 503 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF != 504 - offsetof(struct virtio_pci_common_cfg, device_feature)); 505 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT != 506 - offsetof(struct virtio_pci_common_cfg, 507 - guest_feature_select)); 508 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF != 509 - offsetof(struct virtio_pci_common_cfg, guest_feature)); 510 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX != 511 - offsetof(struct virtio_pci_common_cfg, msix_config)); 512 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ != 513 - offsetof(struct virtio_pci_common_cfg, num_queues)); 514 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS != 515 - offsetof(struct virtio_pci_common_cfg, device_status)); 516 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION != 517 - offsetof(struct virtio_pci_common_cfg, config_generation)); 518 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT != 519 - offsetof(struct virtio_pci_common_cfg, queue_select)); 520 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE != 521 - offsetof(struct virtio_pci_common_cfg, queue_size)); 522 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX != 523 - offsetof(struct virtio_pci_common_cfg, queue_msix_vector)); 524 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE != 525 - offsetof(struct virtio_pci_common_cfg, queue_enable)); 526 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF != 527 - offsetof(struct virtio_pci_common_cfg, queue_notify_off)); 528 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO != 529 - offsetof(struct virtio_pci_common_cfg, queue_desc_lo)); 530 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI != 531 - offsetof(struct virtio_pci_common_cfg, queue_desc_hi)); 532 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO != 533 - offsetof(struct virtio_pci_common_cfg, queue_avail_lo)); 534 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI != 535 - offsetof(struct virtio_pci_common_cfg, queue_avail_hi)); 536 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO != 537 - offsetof(struct virtio_pci_common_cfg, queue_used_lo)); 538 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI != 539 - offsetof(struct virtio_pci_common_cfg, queue_used_hi)); 540 - } 541 - 542 574 /* the PCI probing function */ 543 575 int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev) 544 576 { 577 + struct virtio_pci_modern_device *mdev = &vp_dev->mdev; 545 578 struct pci_dev *pci_dev = vp_dev->pci_dev; 546 - int err, common, isr, notify, device; 547 - u32 notify_length; 548 - u32 notify_offset; 579 + int err; 549 580 550 - check_offsets(); 581 + mdev->pci_dev = pci_dev; 551 582 552 - /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */ 553 - if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f) 554 - return -ENODEV; 555 - 556 - if (pci_dev->device < 0x1040) { 557 - /* Transitional devices: use the PCI subsystem device id as 558 - * virtio device id, same as legacy driver always did. 559 - */ 560 - vp_dev->vdev.id.device = pci_dev->subsystem_device; 561 - } else { 562 - /* Modern devices: simply use PCI device id, but start from 0x1040. */ 563 - vp_dev->vdev.id.device = pci_dev->device - 0x1040; 564 - } 565 - vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; 566 - 567 - /* check for a common config: if not, use legacy mode (bar 0). */ 568 - common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG, 569 - IORESOURCE_IO | IORESOURCE_MEM, 570 - &vp_dev->modern_bars); 571 - if (!common) { 572 - dev_info(&pci_dev->dev, 573 - "virtio_pci: leaving for legacy driver\n"); 574 - return -ENODEV; 575 - } 576 - 577 - /* If common is there, these should be too... */ 578 - isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG, 579 - IORESOURCE_IO | IORESOURCE_MEM, 580 - &vp_dev->modern_bars); 581 - notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG, 582 - IORESOURCE_IO | IORESOURCE_MEM, 583 - &vp_dev->modern_bars); 584 - if (!isr || !notify) { 585 - dev_err(&pci_dev->dev, 586 - "virtio_pci: missing capabilities %i/%i/%i\n", 587 - common, isr, notify); 588 - return -EINVAL; 589 - } 590 - 591 - err = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64)); 592 - if (err) 593 - err = dma_set_mask_and_coherent(&pci_dev->dev, 594 - DMA_BIT_MASK(32)); 595 - if (err) 596 - dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n"); 597 - 598 - /* Device capability is only mandatory for devices that have 599 - * device-specific configuration. 600 - */ 601 - device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG, 602 - IORESOURCE_IO | IORESOURCE_MEM, 603 - &vp_dev->modern_bars); 604 - 605 - err = pci_request_selected_regions(pci_dev, vp_dev->modern_bars, 606 - "virtio-pci-modern"); 583 + err = vp_modern_probe(mdev); 607 584 if (err) 608 585 return err; 609 586 610 - err = -EINVAL; 611 - vp_dev->common = map_capability(pci_dev, common, 612 - sizeof(struct virtio_pci_common_cfg), 4, 613 - 0, sizeof(struct virtio_pci_common_cfg), 614 - NULL); 615 - if (!vp_dev->common) 616 - goto err_map_common; 617 - vp_dev->isr = map_capability(pci_dev, isr, sizeof(u8), 1, 618 - 0, 1, 619 - NULL); 620 - if (!vp_dev->isr) 621 - goto err_map_isr; 622 - 623 - /* Read notify_off_multiplier from config space. */ 624 - pci_read_config_dword(pci_dev, 625 - notify + offsetof(struct virtio_pci_notify_cap, 626 - notify_off_multiplier), 627 - &vp_dev->notify_offset_multiplier); 628 - /* Read notify length and offset from config space. */ 629 - pci_read_config_dword(pci_dev, 630 - notify + offsetof(struct virtio_pci_notify_cap, 631 - cap.length), 632 - &notify_length); 633 - 634 - pci_read_config_dword(pci_dev, 635 - notify + offsetof(struct virtio_pci_notify_cap, 636 - cap.offset), 637 - &notify_offset); 638 - 639 - /* We don't know how many VQs we'll map, ahead of the time. 640 - * If notify length is small, map it all now. 641 - * Otherwise, map each VQ individually later. 642 - */ 643 - if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) { 644 - vp_dev->notify_base = map_capability(pci_dev, notify, 2, 2, 645 - 0, notify_length, 646 - &vp_dev->notify_len); 647 - if (!vp_dev->notify_base) 648 - goto err_map_notify; 649 - } else { 650 - vp_dev->notify_map_cap = notify; 651 - } 652 - 653 - /* Again, we don't know how much we should map, but PAGE_SIZE 654 - * is more than enough for all existing devices. 655 - */ 656 - if (device) { 657 - vp_dev->device = map_capability(pci_dev, device, 0, 4, 658 - 0, PAGE_SIZE, 659 - &vp_dev->device_len); 660 - if (!vp_dev->device) 661 - goto err_map_device; 662 - 587 + if (mdev->device) 663 588 vp_dev->vdev.config = &virtio_pci_config_ops; 664 - } else { 589 + else 665 590 vp_dev->vdev.config = &virtio_pci_config_nodev_ops; 666 - } 667 591 668 592 vp_dev->config_vector = vp_config_vector; 669 593 vp_dev->setup_vq = setup_vq; 670 594 vp_dev->del_vq = del_vq; 595 + vp_dev->isr = mdev->isr; 596 + vp_dev->vdev.id = mdev->id; 671 597 672 598 return 0; 673 - 674 - err_map_device: 675 - if (vp_dev->notify_base) 676 - pci_iounmap(pci_dev, vp_dev->notify_base); 677 - err_map_notify: 678 - pci_iounmap(pci_dev, vp_dev->isr); 679 - err_map_isr: 680 - pci_iounmap(pci_dev, vp_dev->common); 681 - err_map_common: 682 - return err; 683 599 } 684 600 685 601 void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev) 686 602 { 687 - struct pci_dev *pci_dev = vp_dev->pci_dev; 603 + struct virtio_pci_modern_device *mdev = &vp_dev->mdev; 688 604 689 - if (vp_dev->device) 690 - pci_iounmap(pci_dev, vp_dev->device); 691 - if (vp_dev->notify_base) 692 - pci_iounmap(pci_dev, vp_dev->notify_base); 693 - pci_iounmap(pci_dev, vp_dev->isr); 694 - pci_iounmap(pci_dev, vp_dev->common); 695 - pci_release_selected_regions(pci_dev, vp_dev->modern_bars); 605 + vp_modern_remove(mdev); 696 606 }
+599
drivers/virtio/virtio_pci_modern_dev.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-or-later 2 + 3 + #include <linux/virtio_pci_modern.h> 4 + #include <linux/module.h> 5 + #include <linux/pci.h> 6 + 7 + /* 8 + * vp_modern_map_capability - map a part of virtio pci capability 9 + * @mdev: the modern virtio-pci device 10 + * @off: offset of the capability 11 + * @minlen: minimal length of the capability 12 + * @align: align requirement 13 + * @start: start from the capability 14 + * @size: map size 15 + * @len: the length that is actually mapped 16 + * 17 + * Returns the io address of for the part of the capability 18 + */ 19 + void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off, 20 + size_t minlen, 21 + u32 align, 22 + u32 start, u32 size, 23 + size_t *len) 24 + { 25 + struct pci_dev *dev = mdev->pci_dev; 26 + u8 bar; 27 + u32 offset, length; 28 + void __iomem *p; 29 + 30 + pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap, 31 + bar), 32 + &bar); 33 + pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset), 34 + &offset); 35 + pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length), 36 + &length); 37 + 38 + if (length <= start) { 39 + dev_err(&dev->dev, 40 + "virtio_pci: bad capability len %u (>%u expected)\n", 41 + length, start); 42 + return NULL; 43 + } 44 + 45 + if (length - start < minlen) { 46 + dev_err(&dev->dev, 47 + "virtio_pci: bad capability len %u (>=%zu expected)\n", 48 + length, minlen); 49 + return NULL; 50 + } 51 + 52 + length -= start; 53 + 54 + if (start + offset < offset) { 55 + dev_err(&dev->dev, 56 + "virtio_pci: map wrap-around %u+%u\n", 57 + start, offset); 58 + return NULL; 59 + } 60 + 61 + offset += start; 62 + 63 + if (offset & (align - 1)) { 64 + dev_err(&dev->dev, 65 + "virtio_pci: offset %u not aligned to %u\n", 66 + offset, align); 67 + return NULL; 68 + } 69 + 70 + if (length > size) 71 + length = size; 72 + 73 + if (len) 74 + *len = length; 75 + 76 + if (minlen + offset < minlen || 77 + minlen + offset > pci_resource_len(dev, bar)) { 78 + dev_err(&dev->dev, 79 + "virtio_pci: map virtio %zu@%u " 80 + "out of range on bar %i length %lu\n", 81 + minlen, offset, 82 + bar, (unsigned long)pci_resource_len(dev, bar)); 83 + return NULL; 84 + } 85 + 86 + p = pci_iomap_range(dev, bar, offset, length); 87 + if (!p) 88 + dev_err(&dev->dev, 89 + "virtio_pci: unable to map virtio %u@%u on bar %i\n", 90 + length, offset, bar); 91 + return p; 92 + } 93 + EXPORT_SYMBOL_GPL(vp_modern_map_capability); 94 + 95 + /** 96 + * virtio_pci_find_capability - walk capabilities to find device info. 97 + * @dev: the pci device 98 + * @cfg_type: the VIRTIO_PCI_CAP_* value we seek 99 + * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO. 100 + * @bars: the bitmask of BARs 101 + * 102 + * Returns offset of the capability, or 0. 103 + */ 104 + static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type, 105 + u32 ioresource_types, int *bars) 106 + { 107 + int pos; 108 + 109 + for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR); 110 + pos > 0; 111 + pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) { 112 + u8 type, bar; 113 + pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, 114 + cfg_type), 115 + &type); 116 + pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, 117 + bar), 118 + &bar); 119 + 120 + /* Ignore structures with reserved BAR values */ 121 + if (bar > 0x5) 122 + continue; 123 + 124 + if (type == cfg_type) { 125 + if (pci_resource_len(dev, bar) && 126 + pci_resource_flags(dev, bar) & ioresource_types) { 127 + *bars |= (1 << bar); 128 + return pos; 129 + } 130 + } 131 + } 132 + return 0; 133 + } 134 + 135 + /* This is part of the ABI. Don't screw with it. */ 136 + static inline void check_offsets(void) 137 + { 138 + /* Note: disk space was harmed in compilation of this function. */ 139 + BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR != 140 + offsetof(struct virtio_pci_cap, cap_vndr)); 141 + BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT != 142 + offsetof(struct virtio_pci_cap, cap_next)); 143 + BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN != 144 + offsetof(struct virtio_pci_cap, cap_len)); 145 + BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE != 146 + offsetof(struct virtio_pci_cap, cfg_type)); 147 + BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR != 148 + offsetof(struct virtio_pci_cap, bar)); 149 + BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET != 150 + offsetof(struct virtio_pci_cap, offset)); 151 + BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH != 152 + offsetof(struct virtio_pci_cap, length)); 153 + BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT != 154 + offsetof(struct virtio_pci_notify_cap, 155 + notify_off_multiplier)); 156 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT != 157 + offsetof(struct virtio_pci_common_cfg, 158 + device_feature_select)); 159 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF != 160 + offsetof(struct virtio_pci_common_cfg, device_feature)); 161 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT != 162 + offsetof(struct virtio_pci_common_cfg, 163 + guest_feature_select)); 164 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF != 165 + offsetof(struct virtio_pci_common_cfg, guest_feature)); 166 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX != 167 + offsetof(struct virtio_pci_common_cfg, msix_config)); 168 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ != 169 + offsetof(struct virtio_pci_common_cfg, num_queues)); 170 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS != 171 + offsetof(struct virtio_pci_common_cfg, device_status)); 172 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION != 173 + offsetof(struct virtio_pci_common_cfg, config_generation)); 174 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT != 175 + offsetof(struct virtio_pci_common_cfg, queue_select)); 176 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE != 177 + offsetof(struct virtio_pci_common_cfg, queue_size)); 178 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX != 179 + offsetof(struct virtio_pci_common_cfg, queue_msix_vector)); 180 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE != 181 + offsetof(struct virtio_pci_common_cfg, queue_enable)); 182 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF != 183 + offsetof(struct virtio_pci_common_cfg, queue_notify_off)); 184 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO != 185 + offsetof(struct virtio_pci_common_cfg, queue_desc_lo)); 186 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI != 187 + offsetof(struct virtio_pci_common_cfg, queue_desc_hi)); 188 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO != 189 + offsetof(struct virtio_pci_common_cfg, queue_avail_lo)); 190 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI != 191 + offsetof(struct virtio_pci_common_cfg, queue_avail_hi)); 192 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO != 193 + offsetof(struct virtio_pci_common_cfg, queue_used_lo)); 194 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI != 195 + offsetof(struct virtio_pci_common_cfg, queue_used_hi)); 196 + } 197 + 198 + /* 199 + * vp_modern_probe: probe the modern virtio pci device, note that the 200 + * caller is required to enable PCI device before calling this function. 201 + * @mdev: the modern virtio-pci device 202 + * 203 + * Return 0 on succeed otherwise fail 204 + */ 205 + int vp_modern_probe(struct virtio_pci_modern_device *mdev) 206 + { 207 + struct pci_dev *pci_dev = mdev->pci_dev; 208 + int err, common, isr, notify, device; 209 + u32 notify_length; 210 + u32 notify_offset; 211 + 212 + check_offsets(); 213 + 214 + mdev->pci_dev = pci_dev; 215 + 216 + /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */ 217 + if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f) 218 + return -ENODEV; 219 + 220 + if (pci_dev->device < 0x1040) { 221 + /* Transitional devices: use the PCI subsystem device id as 222 + * virtio device id, same as legacy driver always did. 223 + */ 224 + mdev->id.device = pci_dev->subsystem_device; 225 + } else { 226 + /* Modern devices: simply use PCI device id, but start from 0x1040. */ 227 + mdev->id.device = pci_dev->device - 0x1040; 228 + } 229 + mdev->id.vendor = pci_dev->subsystem_vendor; 230 + 231 + /* check for a common config: if not, use legacy mode (bar 0). */ 232 + common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG, 233 + IORESOURCE_IO | IORESOURCE_MEM, 234 + &mdev->modern_bars); 235 + if (!common) { 236 + dev_info(&pci_dev->dev, 237 + "virtio_pci: leaving for legacy driver\n"); 238 + return -ENODEV; 239 + } 240 + 241 + /* If common is there, these should be too... */ 242 + isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG, 243 + IORESOURCE_IO | IORESOURCE_MEM, 244 + &mdev->modern_bars); 245 + notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG, 246 + IORESOURCE_IO | IORESOURCE_MEM, 247 + &mdev->modern_bars); 248 + if (!isr || !notify) { 249 + dev_err(&pci_dev->dev, 250 + "virtio_pci: missing capabilities %i/%i/%i\n", 251 + common, isr, notify); 252 + return -EINVAL; 253 + } 254 + 255 + err = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64)); 256 + if (err) 257 + err = dma_set_mask_and_coherent(&pci_dev->dev, 258 + DMA_BIT_MASK(32)); 259 + if (err) 260 + dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n"); 261 + 262 + /* Device capability is only mandatory for devices that have 263 + * device-specific configuration. 264 + */ 265 + device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG, 266 + IORESOURCE_IO | IORESOURCE_MEM, 267 + &mdev->modern_bars); 268 + 269 + err = pci_request_selected_regions(pci_dev, mdev->modern_bars, 270 + "virtio-pci-modern"); 271 + if (err) 272 + return err; 273 + 274 + err = -EINVAL; 275 + mdev->common = vp_modern_map_capability(mdev, common, 276 + sizeof(struct virtio_pci_common_cfg), 4, 277 + 0, sizeof(struct virtio_pci_common_cfg), 278 + NULL); 279 + if (!mdev->common) 280 + goto err_map_common; 281 + mdev->isr = vp_modern_map_capability(mdev, isr, sizeof(u8), 1, 282 + 0, 1, 283 + NULL); 284 + if (!mdev->isr) 285 + goto err_map_isr; 286 + 287 + /* Read notify_off_multiplier from config space. */ 288 + pci_read_config_dword(pci_dev, 289 + notify + offsetof(struct virtio_pci_notify_cap, 290 + notify_off_multiplier), 291 + &mdev->notify_offset_multiplier); 292 + /* Read notify length and offset from config space. */ 293 + pci_read_config_dword(pci_dev, 294 + notify + offsetof(struct virtio_pci_notify_cap, 295 + cap.length), 296 + &notify_length); 297 + 298 + pci_read_config_dword(pci_dev, 299 + notify + offsetof(struct virtio_pci_notify_cap, 300 + cap.offset), 301 + &notify_offset); 302 + 303 + /* We don't know how many VQs we'll map, ahead of the time. 304 + * If notify length is small, map it all now. 305 + * Otherwise, map each VQ individually later. 306 + */ 307 + if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) { 308 + mdev->notify_base = vp_modern_map_capability(mdev, notify, 309 + 2, 2, 310 + 0, notify_length, 311 + &mdev->notify_len); 312 + if (!mdev->notify_base) 313 + goto err_map_notify; 314 + } else { 315 + mdev->notify_map_cap = notify; 316 + } 317 + 318 + /* Again, we don't know how much we should map, but PAGE_SIZE 319 + * is more than enough for all existing devices. 320 + */ 321 + if (device) { 322 + mdev->device = vp_modern_map_capability(mdev, device, 0, 4, 323 + 0, PAGE_SIZE, 324 + &mdev->device_len); 325 + if (!mdev->device) 326 + goto err_map_device; 327 + } 328 + 329 + return 0; 330 + 331 + err_map_device: 332 + if (mdev->notify_base) 333 + pci_iounmap(pci_dev, mdev->notify_base); 334 + err_map_notify: 335 + pci_iounmap(pci_dev, mdev->isr); 336 + err_map_isr: 337 + pci_iounmap(pci_dev, mdev->common); 338 + err_map_common: 339 + return err; 340 + } 341 + EXPORT_SYMBOL_GPL(vp_modern_probe); 342 + 343 + /* 344 + * vp_modern_probe: remove and cleanup the modern virtio pci device 345 + * @mdev: the modern virtio-pci device 346 + */ 347 + void vp_modern_remove(struct virtio_pci_modern_device *mdev) 348 + { 349 + struct pci_dev *pci_dev = mdev->pci_dev; 350 + 351 + if (mdev->device) 352 + pci_iounmap(pci_dev, mdev->device); 353 + if (mdev->notify_base) 354 + pci_iounmap(pci_dev, mdev->notify_base); 355 + pci_iounmap(pci_dev, mdev->isr); 356 + pci_iounmap(pci_dev, mdev->common); 357 + pci_release_selected_regions(pci_dev, mdev->modern_bars); 358 + } 359 + EXPORT_SYMBOL_GPL(vp_modern_remove); 360 + 361 + /* 362 + * vp_modern_get_features - get features from device 363 + * @mdev: the modern virtio-pci device 364 + * 365 + * Returns the features read from the device 366 + */ 367 + u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev) 368 + { 369 + struct virtio_pci_common_cfg __iomem *cfg = mdev->common; 370 + 371 + u64 features; 372 + 373 + vp_iowrite32(0, &cfg->device_feature_select); 374 + features = vp_ioread32(&cfg->device_feature); 375 + vp_iowrite32(1, &cfg->device_feature_select); 376 + features |= ((u64)vp_ioread32(&cfg->device_feature) << 32); 377 + 378 + return features; 379 + } 380 + EXPORT_SYMBOL_GPL(vp_modern_get_features); 381 + 382 + /* 383 + * vp_modern_set_features - set features to device 384 + * @mdev: the modern virtio-pci device 385 + * @features: the features set to device 386 + */ 387 + void vp_modern_set_features(struct virtio_pci_modern_device *mdev, 388 + u64 features) 389 + { 390 + struct virtio_pci_common_cfg __iomem *cfg = mdev->common; 391 + 392 + vp_iowrite32(0, &cfg->guest_feature_select); 393 + vp_iowrite32((u32)features, &cfg->guest_feature); 394 + vp_iowrite32(1, &cfg->guest_feature_select); 395 + vp_iowrite32(features >> 32, &cfg->guest_feature); 396 + } 397 + EXPORT_SYMBOL_GPL(vp_modern_set_features); 398 + 399 + /* 400 + * vp_modern_generation - get the device genreation 401 + * @mdev: the modern virtio-pci device 402 + * 403 + * Returns the genreation read from device 404 + */ 405 + u32 vp_modern_generation(struct virtio_pci_modern_device *mdev) 406 + { 407 + struct virtio_pci_common_cfg __iomem *cfg = mdev->common; 408 + 409 + return vp_ioread8(&cfg->config_generation); 410 + } 411 + EXPORT_SYMBOL_GPL(vp_modern_generation); 412 + 413 + /* 414 + * vp_modern_get_status - get the device status 415 + * @mdev: the modern virtio-pci device 416 + * 417 + * Returns the status read from device 418 + */ 419 + u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev) 420 + { 421 + struct virtio_pci_common_cfg __iomem *cfg = mdev->common; 422 + 423 + return vp_ioread8(&cfg->device_status); 424 + } 425 + EXPORT_SYMBOL_GPL(vp_modern_get_status); 426 + 427 + /* 428 + * vp_modern_set_status - set status to device 429 + * @mdev: the modern virtio-pci device 430 + * @status: the status set to device 431 + */ 432 + void vp_modern_set_status(struct virtio_pci_modern_device *mdev, 433 + u8 status) 434 + { 435 + struct virtio_pci_common_cfg __iomem *cfg = mdev->common; 436 + 437 + vp_iowrite8(status, &cfg->device_status); 438 + } 439 + EXPORT_SYMBOL_GPL(vp_modern_set_status); 440 + 441 + /* 442 + * vp_modern_queue_vector - set the MSIX vector for a specific virtqueue 443 + * @mdev: the modern virtio-pci device 444 + * @index: queue index 445 + * @vector: the config vector 446 + * 447 + * Returns the config vector read from the device 448 + */ 449 + u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev, 450 + u16 index, u16 vector) 451 + { 452 + struct virtio_pci_common_cfg __iomem *cfg = mdev->common; 453 + 454 + vp_iowrite16(index, &cfg->queue_select); 455 + vp_iowrite16(vector, &cfg->queue_msix_vector); 456 + /* Flush the write out to device */ 457 + return vp_ioread16(&cfg->queue_msix_vector); 458 + } 459 + EXPORT_SYMBOL_GPL(vp_modern_queue_vector); 460 + 461 + /* 462 + * vp_modern_config_vector - set the vector for config interrupt 463 + * @mdev: the modern virtio-pci device 464 + * @vector: the config vector 465 + * 466 + * Returns the config vector read from the device 467 + */ 468 + u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev, 469 + u16 vector) 470 + { 471 + struct virtio_pci_common_cfg __iomem *cfg = mdev->common; 472 + 473 + /* Setup the vector used for configuration events */ 474 + vp_iowrite16(vector, &cfg->msix_config); 475 + /* Verify we had enough resources to assign the vector */ 476 + /* Will also flush the write out to device */ 477 + return vp_ioread16(&cfg->msix_config); 478 + } 479 + EXPORT_SYMBOL_GPL(vp_modern_config_vector); 480 + 481 + /* 482 + * vp_modern_queue_address - set the virtqueue address 483 + * @mdev: the modern virtio-pci device 484 + * @index: the queue index 485 + * @desc_addr: address of the descriptor area 486 + * @driver_addr: address of the driver area 487 + * @device_addr: address of the device area 488 + */ 489 + void vp_modern_queue_address(struct virtio_pci_modern_device *mdev, 490 + u16 index, u64 desc_addr, u64 driver_addr, 491 + u64 device_addr) 492 + { 493 + struct virtio_pci_common_cfg __iomem *cfg = mdev->common; 494 + 495 + vp_iowrite16(index, &cfg->queue_select); 496 + 497 + vp_iowrite64_twopart(desc_addr, &cfg->queue_desc_lo, 498 + &cfg->queue_desc_hi); 499 + vp_iowrite64_twopart(driver_addr, &cfg->queue_avail_lo, 500 + &cfg->queue_avail_hi); 501 + vp_iowrite64_twopart(device_addr, &cfg->queue_used_lo, 502 + &cfg->queue_used_hi); 503 + } 504 + EXPORT_SYMBOL_GPL(vp_modern_queue_address); 505 + 506 + /* 507 + * vp_modern_set_queue_enable - enable a virtqueue 508 + * @mdev: the modern virtio-pci device 509 + * @index: the queue index 510 + * @enable: whether the virtqueue is enable or not 511 + */ 512 + void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev, 513 + u16 index, bool enable) 514 + { 515 + vp_iowrite16(index, &mdev->common->queue_select); 516 + vp_iowrite16(enable, &mdev->common->queue_enable); 517 + } 518 + EXPORT_SYMBOL_GPL(vp_modern_set_queue_enable); 519 + 520 + /* 521 + * vp_modern_get_queue_enable - enable a virtqueue 522 + * @mdev: the modern virtio-pci device 523 + * @index: the queue index 524 + * 525 + * Returns whether a virtqueue is enabled or not 526 + */ 527 + bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev, 528 + u16 index) 529 + { 530 + vp_iowrite16(index, &mdev->common->queue_select); 531 + 532 + return vp_ioread16(&mdev->common->queue_enable); 533 + } 534 + EXPORT_SYMBOL_GPL(vp_modern_get_queue_enable); 535 + 536 + /* 537 + * vp_modern_set_queue_size - set size for a virtqueue 538 + * @mdev: the modern virtio-pci device 539 + * @index: the queue index 540 + * @size: the size of the virtqueue 541 + */ 542 + void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev, 543 + u16 index, u16 size) 544 + { 545 + vp_iowrite16(index, &mdev->common->queue_select); 546 + vp_iowrite16(size, &mdev->common->queue_size); 547 + 548 + } 549 + EXPORT_SYMBOL_GPL(vp_modern_set_queue_size); 550 + 551 + /* 552 + * vp_modern_get_queue_size - get size for a virtqueue 553 + * @mdev: the modern virtio-pci device 554 + * @index: the queue index 555 + * 556 + * Returns the size of the virtqueue 557 + */ 558 + u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev, 559 + u16 index) 560 + { 561 + vp_iowrite16(index, &mdev->common->queue_select); 562 + 563 + return vp_ioread16(&mdev->common->queue_size); 564 + 565 + } 566 + EXPORT_SYMBOL_GPL(vp_modern_get_queue_size); 567 + 568 + /* 569 + * vp_modern_get_num_queues - get the number of virtqueues 570 + * @mdev: the modern virtio-pci device 571 + * 572 + * Returns the number of virtqueues 573 + */ 574 + u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev) 575 + { 576 + return vp_ioread16(&mdev->common->num_queues); 577 + } 578 + EXPORT_SYMBOL_GPL(vp_modern_get_num_queues); 579 + 580 + /* 581 + * vp_modern_get_queue_notify_off - get notification offset for a virtqueue 582 + * @mdev: the modern virtio-pci device 583 + * @index: the queue index 584 + * 585 + * Returns the notification offset for a virtqueue 586 + */ 587 + u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev, 588 + u16 index) 589 + { 590 + vp_iowrite16(index, &mdev->common->queue_select); 591 + 592 + return vp_ioread16(&mdev->common->queue_notify_off); 593 + } 594 + EXPORT_SYMBOL_GPL(vp_modern_get_queue_notify_off); 595 + 596 + MODULE_VERSION("0.1"); 597 + MODULE_DESCRIPTION("Modern Virtio PCI Device"); 598 + MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>"); 599 + MODULE_LICENSE("GPL");
+1 -2
drivers/virtio/virtio_vdpa.c
··· 225 225 list_del(&info->node); 226 226 spin_unlock_irqrestore(&vd_dev->lock, flags); 227 227 228 - /* Select and deactivate the queue */ 228 + /* Select and deactivate the queue (best effort) */ 229 229 ops->set_vq_ready(vdpa, index, 0); 230 - WARN_ON(ops->get_vq_ready(vdpa, index)); 231 230 232 231 vring_del_virtqueue(vq); 233 232
+40 -4
include/linux/vdpa.h
··· 35 35 u16 avail_index; 36 36 }; 37 37 38 + struct vdpa_mgmt_dev; 39 + 38 40 /** 39 41 * vDPA device - representation of a vDPA device 40 42 * @dev: underlying device ··· 45 43 * @index: device index 46 44 * @features_valid: were features initialized? for legacy guests 47 45 * @nvqs: maximum number of supported virtqueues 46 + * @mdev: management device pointer; caller must setup when registering device as part 47 + * of dev_add() mgmtdev ops callback before invoking _vdpa_register_device(). 48 48 */ 49 49 struct vdpa_device { 50 50 struct device dev; ··· 55 51 unsigned int index; 56 52 bool features_valid; 57 53 int nvqs; 54 + struct vdpa_mgmt_dev *mdev; 58 55 }; 59 56 60 57 /** ··· 250 245 251 246 struct vdpa_device *__vdpa_alloc_device(struct device *parent, 252 247 const struct vdpa_config_ops *config, 253 - int nvqs, 254 - size_t size); 248 + int nvqs, size_t size, const char *name); 255 249 256 - #define vdpa_alloc_device(dev_struct, member, parent, config, nvqs) \ 250 + #define vdpa_alloc_device(dev_struct, member, parent, config, nvqs, name) \ 257 251 container_of(__vdpa_alloc_device( \ 258 252 parent, config, nvqs, \ 259 253 sizeof(dev_struct) + \ 260 254 BUILD_BUG_ON_ZERO(offsetof( \ 261 - dev_struct, member))), \ 255 + dev_struct, member)), name), \ 262 256 dev_struct, member) 263 257 264 258 int vdpa_register_device(struct vdpa_device *vdev); 265 259 void vdpa_unregister_device(struct vdpa_device *vdev); 260 + 261 + int _vdpa_register_device(struct vdpa_device *vdev); 262 + void _vdpa_unregister_device(struct vdpa_device *vdev); 266 263 267 264 /** 268 265 * vdpa_driver - operations for a vDPA driver ··· 342 335 vdpa_set_features(vdev, 0); 343 336 ops->get_config(vdev, offset, buf, len); 344 337 } 338 + 339 + /** 340 + * vdpa_mgmtdev_ops - vdpa device ops 341 + * @dev_add: Add a vdpa device using alloc and register 342 + * @mdev: parent device to use for device addition 343 + * @name: name of the new vdpa device 344 + * Driver need to add a new device using _vdpa_register_device() 345 + * after fully initializing the vdpa device. Driver must return 0 346 + * on success or appropriate error code. 347 + * @dev_del: Remove a vdpa device using unregister 348 + * @mdev: parent device to use for device removal 349 + * @dev: vdpa device to remove 350 + * Driver need to remove the specified device by calling 351 + * _vdpa_unregister_device(). 352 + */ 353 + struct vdpa_mgmtdev_ops { 354 + int (*dev_add)(struct vdpa_mgmt_dev *mdev, const char *name); 355 + void (*dev_del)(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev); 356 + }; 357 + 358 + struct vdpa_mgmt_dev { 359 + struct device *device; 360 + const struct vdpa_mgmtdev_ops *ops; 361 + const struct virtio_device_id *id_table; /* supported ids */ 362 + struct list_head list; 363 + }; 364 + 365 + int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev); 366 + void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev); 345 367 346 368 #endif /* _LINUX_VDPA_H */
+111
include/linux/virtio_pci_modern.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef _LINUX_VIRTIO_PCI_MODERN_H 3 + #define _LINUX_VIRTIO_PCI_MODERN_H 4 + 5 + #include <linux/pci.h> 6 + #include <linux/virtio_pci.h> 7 + 8 + struct virtio_pci_modern_device { 9 + struct pci_dev *pci_dev; 10 + 11 + struct virtio_pci_common_cfg __iomem *common; 12 + /* Device-specific data (non-legacy mode) */ 13 + void __iomem *device; 14 + /* Base of vq notifications (non-legacy mode). */ 15 + void __iomem *notify_base; 16 + /* Where to read and clear interrupt */ 17 + u8 __iomem *isr; 18 + 19 + /* So we can sanity-check accesses. */ 20 + size_t notify_len; 21 + size_t device_len; 22 + 23 + /* Capability for when we need to map notifications per-vq. */ 24 + int notify_map_cap; 25 + 26 + /* Multiply queue_notify_off by this value. (non-legacy mode). */ 27 + u32 notify_offset_multiplier; 28 + 29 + int modern_bars; 30 + 31 + struct virtio_device_id id; 32 + }; 33 + 34 + /* 35 + * Type-safe wrappers for io accesses. 36 + * Use these to enforce at compile time the following spec requirement: 37 + * 38 + * The driver MUST access each field using the “natural” access 39 + * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses 40 + * for 16-bit fields and 8-bit accesses for 8-bit fields. 41 + */ 42 + static inline u8 vp_ioread8(const u8 __iomem *addr) 43 + { 44 + return ioread8(addr); 45 + } 46 + static inline u16 vp_ioread16 (const __le16 __iomem *addr) 47 + { 48 + return ioread16(addr); 49 + } 50 + 51 + static inline u32 vp_ioread32(const __le32 __iomem *addr) 52 + { 53 + return ioread32(addr); 54 + } 55 + 56 + static inline void vp_iowrite8(u8 value, u8 __iomem *addr) 57 + { 58 + iowrite8(value, addr); 59 + } 60 + 61 + static inline void vp_iowrite16(u16 value, __le16 __iomem *addr) 62 + { 63 + iowrite16(value, addr); 64 + } 65 + 66 + static inline void vp_iowrite32(u32 value, __le32 __iomem *addr) 67 + { 68 + iowrite32(value, addr); 69 + } 70 + 71 + static inline void vp_iowrite64_twopart(u64 val, 72 + __le32 __iomem *lo, 73 + __le32 __iomem *hi) 74 + { 75 + vp_iowrite32((u32)val, lo); 76 + vp_iowrite32(val >> 32, hi); 77 + } 78 + 79 + u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev); 80 + void vp_modern_set_features(struct virtio_pci_modern_device *mdev, 81 + u64 features); 82 + u32 vp_modern_generation(struct virtio_pci_modern_device *mdev); 83 + u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev); 84 + void vp_modern_set_status(struct virtio_pci_modern_device *mdev, 85 + u8 status); 86 + u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev, 87 + u16 idx, u16 vector); 88 + u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev, 89 + u16 vector); 90 + void vp_modern_queue_address(struct virtio_pci_modern_device *mdev, 91 + u16 index, u64 desc_addr, u64 driver_addr, 92 + u64 device_addr); 93 + void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev, 94 + u16 idx, bool enable); 95 + bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev, 96 + u16 idx); 97 + void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev, 98 + u16 idx, u16 size); 99 + u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev, 100 + u16 idx); 101 + u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev); 102 + u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev, 103 + u16 idx); 104 + void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off, 105 + size_t minlen, 106 + u32 align, 107 + u32 start, u32 size, 108 + size_t *len); 109 + int vp_modern_probe(struct virtio_pci_modern_device *mdev); 110 + void vp_modern_remove(struct virtio_pci_modern_device *mdev); 111 + #endif
+40
include/uapi/linux/vdpa.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ 2 + /* 3 + * vdpa device management interface 4 + * Copyright (c) 2020 Mellanox Technologies Ltd. All rights reserved. 5 + */ 6 + 7 + #ifndef _UAPI_LINUX_VDPA_H_ 8 + #define _UAPI_LINUX_VDPA_H_ 9 + 10 + #define VDPA_GENL_NAME "vdpa" 11 + #define VDPA_GENL_VERSION 0x1 12 + 13 + enum vdpa_command { 14 + VDPA_CMD_UNSPEC, 15 + VDPA_CMD_MGMTDEV_NEW, 16 + VDPA_CMD_MGMTDEV_GET, /* can dump */ 17 + VDPA_CMD_DEV_NEW, 18 + VDPA_CMD_DEV_DEL, 19 + VDPA_CMD_DEV_GET, /* can dump */ 20 + }; 21 + 22 + enum vdpa_attr { 23 + VDPA_ATTR_UNSPEC, 24 + 25 + /* bus name (optional) + dev name together make the parent device handle */ 26 + VDPA_ATTR_MGMTDEV_BUS_NAME, /* string */ 27 + VDPA_ATTR_MGMTDEV_DEV_NAME, /* string */ 28 + VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES, /* u64 */ 29 + 30 + VDPA_ATTR_DEV_NAME, /* string */ 31 + VDPA_ATTR_DEV_ID, /* u32 */ 32 + VDPA_ATTR_DEV_VENDOR_ID, /* u32 */ 33 + VDPA_ATTR_DEV_MAX_VQS, /* u32 */ 34 + VDPA_ATTR_DEV_MAX_VQ_SIZE, /* u16 */ 35 + 36 + /* new attributes must be added above here */ 37 + VDPA_ATTR_MAX, 38 + }; 39 + 40 + #endif