Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

vdpa_sim: move buffer allocation in the devices

Currently, the vdpa_sim core does not use the buffer, but only
allocates it.

The buffer is used by devices differently, and some future devices
may not use it. So let's move all its management inside the devices.

Add a new `free` device callback called to clean up the resources
allocated by the device.

Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
Message-Id: <20230407133658.66339-2-sgarzare@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

authored by

Stefano Garzarella and committed by
Michael S. Tsirkin
112f23cd 5b250fac

+57 -21
+2 -5
drivers/vdpa/vdpa_sim/vdpa_sim.c
··· 261 261 for (i = 0; i < vdpasim->dev_attr.nas; i++) 262 262 vhost_iotlb_init(&vdpasim->iommu[i], max_iotlb_entries, 0); 263 263 264 - vdpasim->buffer = kvmalloc(dev_attr->buffer_size, GFP_KERNEL); 265 - if (!vdpasim->buffer) 266 - goto err_iommu; 267 - 268 264 for (i = 0; i < dev_attr->nvqs; i++) 269 265 vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0], 270 266 &vdpasim->iommu_lock); ··· 710 714 vringh_kiov_cleanup(&vdpasim->vqs[i].in_iov); 711 715 } 712 716 713 - kvfree(vdpasim->buffer); 717 + vdpasim->dev_attr.free(vdpasim); 718 + 714 719 for (i = 0; i < vdpasim->dev_attr.nas; i++) 715 720 vhost_iotlb_reset(&vdpasim->iommu[i]); 716 721 kfree(vdpasim->iommu);
+1 -2
drivers/vdpa/vdpa_sim/vdpa_sim.h
··· 39 39 u64 supported_features; 40 40 size_t alloc_size; 41 41 size_t config_size; 42 - size_t buffer_size; 43 42 int nvqs; 44 43 u32 id; 45 44 u32 ngroups; ··· 50 51 int (*get_stats)(struct vdpasim *vdpasim, u16 idx, 51 52 struct sk_buff *msg, 52 53 struct netlink_ext_ack *extack); 54 + void (*free)(struct vdpasim *vdpasim); 53 55 }; 54 56 55 57 /* State of each vdpasim device */ ··· 67 67 void *config; 68 68 struct vhost_iotlb *iommu; 69 69 bool *iommu_pt; 70 - void *buffer; 71 70 u32 status; 72 71 u32 generation; 73 72 u64 features;
+33 -7
drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
··· 43 43 #define VDPASIM_BLK_AS_NUM 1 44 44 #define VDPASIM_BLK_GROUP_NUM 1 45 45 46 + struct vdpasim_blk { 47 + struct vdpasim vdpasim; 48 + void *buffer; 49 + }; 50 + 51 + static struct vdpasim_blk *sim_to_blk(struct vdpasim *vdpasim) 52 + { 53 + return container_of(vdpasim, struct vdpasim_blk, vdpasim); 54 + } 55 + 46 56 static char vdpasim_blk_id[VIRTIO_BLK_ID_BYTES] = "vdpa_blk_sim"; 47 57 48 58 static bool vdpasim_blk_check_range(struct vdpasim *vdpasim, u64 start_sector, ··· 88 78 static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim, 89 79 struct vdpasim_virtqueue *vq) 90 80 { 81 + struct vdpasim_blk *blk = sim_to_blk(vdpasim); 91 82 size_t pushed = 0, to_pull, to_push; 92 83 struct virtio_blk_outhdr hdr; 93 84 bool handled = false; ··· 155 144 } 156 145 157 146 bytes = vringh_iov_push_iotlb(&vq->vring, &vq->in_iov, 158 - vdpasim->buffer + offset, 159 - to_push); 147 + blk->buffer + offset, to_push); 160 148 if (bytes < 0) { 161 149 dev_dbg(&vdpasim->vdpa.dev, 162 150 "vringh_iov_push_iotlb() error: %zd offset: 0x%llx len: 0x%zx\n", ··· 176 166 } 177 167 178 168 bytes = vringh_iov_pull_iotlb(&vq->vring, &vq->out_iov, 179 - vdpasim->buffer + offset, 180 - to_pull); 169 + blk->buffer + offset, to_pull); 181 170 if (bytes < 0) { 182 171 dev_dbg(&vdpasim->vdpa.dev, 183 172 "vringh_iov_pull_iotlb() error: %zd offset: 0x%llx len: 0x%zx\n", ··· 256 247 } 257 248 258 249 if (type == VIRTIO_BLK_T_WRITE_ZEROES) { 259 - memset(vdpasim->buffer + offset, 0, 250 + memset(blk->buffer + offset, 0, 260 251 num_sectors << SECTOR_SHIFT); 261 252 } 262 253 ··· 362 353 363 354 } 364 355 356 + static void vdpasim_blk_free(struct vdpasim *vdpasim) 357 + { 358 + struct vdpasim_blk *blk = sim_to_blk(vdpasim); 359 + 360 + kvfree(blk->buffer); 361 + } 362 + 365 363 static void vdpasim_blk_mgmtdev_release(struct device *dev) 366 364 { 367 365 } ··· 382 366 const struct vdpa_dev_set_config *config) 383 367 { 384 368 struct vdpasim_dev_attr dev_attr = {}; 369 + struct vdpasim_blk *blk; 385 370 struct vdpasim *simdev; 386 371 int ret; 387 372 ··· 393 376 dev_attr.nvqs = VDPASIM_BLK_VQ_NUM; 394 377 dev_attr.ngroups = VDPASIM_BLK_GROUP_NUM; 395 378 dev_attr.nas = VDPASIM_BLK_AS_NUM; 396 - dev_attr.alloc_size = sizeof(struct vdpasim); 379 + dev_attr.alloc_size = sizeof(struct vdpasim_blk); 397 380 dev_attr.config_size = sizeof(struct virtio_blk_config); 398 381 dev_attr.get_config = vdpasim_blk_get_config; 399 382 dev_attr.work_fn = vdpasim_blk_work; 400 - dev_attr.buffer_size = VDPASIM_BLK_CAPACITY << SECTOR_SHIFT; 383 + dev_attr.free = vdpasim_blk_free; 401 384 402 385 simdev = vdpasim_create(&dev_attr, config); 403 386 if (IS_ERR(simdev)) 404 387 return PTR_ERR(simdev); 388 + 389 + blk = sim_to_blk(simdev); 390 + 391 + blk->buffer = kvmalloc(VDPASIM_BLK_CAPACITY << SECTOR_SHIFT, 392 + GFP_KERNEL); 393 + if (!blk->buffer) { 394 + ret = -ENOMEM; 395 + goto put_dev; 396 + } 405 397 406 398 ret = _vdpa_register_device(&simdev->vdpa, VDPASIM_BLK_VQ_NUM); 407 399 if (ret)
+21 -7
drivers/vdpa/vdpa_sim/vdpa_sim_net.c
··· 58 58 struct vdpasim_dataq_stats tx_stats; 59 59 struct vdpasim_dataq_stats rx_stats; 60 60 struct vdpasim_cq_stats cq_stats; 61 + void *buffer; 61 62 }; 62 63 63 64 static struct vdpasim_net *sim_to_net(struct vdpasim *vdpasim) ··· 88 87 size_t hdr_len = modern ? sizeof(struct virtio_net_hdr_v1) : 89 88 sizeof(struct virtio_net_hdr); 90 89 struct virtio_net_config *vio_config = vdpasim->config; 90 + struct vdpasim_net *net = sim_to_net(vdpasim); 91 91 92 92 if (len < ETH_ALEN + hdr_len) 93 93 return false; 94 94 95 - if (is_broadcast_ether_addr(vdpasim->buffer + hdr_len) || 96 - is_multicast_ether_addr(vdpasim->buffer + hdr_len)) 95 + if (is_broadcast_ether_addr(net->buffer + hdr_len) || 96 + is_multicast_ether_addr(net->buffer + hdr_len)) 97 97 return true; 98 - if (!strncmp(vdpasim->buffer + hdr_len, vio_config->mac, ETH_ALEN)) 98 + if (!strncmp(net->buffer + hdr_len, vio_config->mac, ETH_ALEN)) 99 99 return true; 100 100 101 101 return false; ··· 227 225 228 226 ++tx_pkts; 229 227 read = vringh_iov_pull_iotlb(&txq->vring, &txq->out_iov, 230 - vdpasim->buffer, 231 - PAGE_SIZE); 228 + net->buffer, PAGE_SIZE); 232 229 233 230 tx_bytes += read; 234 231 ··· 246 245 } 247 246 248 247 write = vringh_iov_push_iotlb(&rxq->vring, &rxq->in_iov, 249 - vdpasim->buffer, read); 248 + net->buffer, read); 250 249 if (write <= 0) { 251 250 ++rx_errors; 252 251 break; ··· 428 427 vio_config->mtu = cpu_to_vdpasim16(vdpasim, 1500); 429 428 } 430 429 430 + static void vdpasim_net_free(struct vdpasim *vdpasim) 431 + { 432 + struct vdpasim_net *net = sim_to_net(vdpasim); 433 + 434 + kvfree(net->buffer); 435 + } 436 + 431 437 static void vdpasim_net_mgmtdev_release(struct device *dev) 432 438 { 433 439 } ··· 464 456 dev_attr.get_config = vdpasim_net_get_config; 465 457 dev_attr.work_fn = vdpasim_net_work; 466 458 dev_attr.get_stats = vdpasim_net_get_stats; 467 - dev_attr.buffer_size = PAGE_SIZE; 459 + dev_attr.free = vdpasim_net_free; 468 460 469 461 simdev = vdpasim_create(&dev_attr, config); 470 462 if (IS_ERR(simdev)) ··· 477 469 u64_stats_init(&net->tx_stats.syncp); 478 470 u64_stats_init(&net->rx_stats.syncp); 479 471 u64_stats_init(&net->cq_stats.syncp); 472 + 473 + net->buffer = kvmalloc(PAGE_SIZE, GFP_KERNEL); 474 + if (!net->buffer) { 475 + ret = -ENOMEM; 476 + goto reg_err; 477 + } 480 478 481 479 /* 482 480 * Initialization must be completed before this call, since it can