Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-linus-6.17-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen updates from Juergen Gross:

- fix for a UAF in the xen gntdev-dmabuf driver

- fix in the xen netfront driver avoiding spurious interrupts

- fix in the gntdev driver avoiding a large stack allocation

- cleanup removing some dead code

- build warning fix

- cleanup of the sysfs code in the xen-pciback driver

* tag 'for-linus-6.17-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
xen/netfront: Fix TX response spurious interrupts
xen/gntdev: remove struct gntdev_copy_batch from stack
xen: fix UAF in dmabuf_exp_from_pages()
xen: Remove some deadcode (x)
xen-pciback: Replace scnprintf() with sysfs_emit_at()
xen/xenbus: fix W=1 build warning in xenbus_va_dev_error function

+72 -85
-5
drivers/net/xen-netfront.c
··· 638 638 tx_stats->packets++; 639 639 u64_stats_update_end(&tx_stats->syncp); 640 640 641 - xennet_tx_buf_gc(queue); 642 - 643 641 return 0; 644 642 } 645 643 ··· 846 848 tx_stats->bytes += skb->len; 847 849 tx_stats->packets++; 848 850 u64_stats_update_end(&tx_stats->syncp); 849 - 850 - /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ 851 - xennet_tx_buf_gc(queue); 852 851 853 852 if (!netfront_tx_slot_available(queue)) 854 853 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
+4
drivers/xen/gntdev-common.h
··· 26 26 /* lock protects maps and freeable_maps. */ 27 27 struct mutex lock; 28 28 29 + /* Free instances of struct gntdev_copy_batch. */ 30 + struct gntdev_copy_batch *batch; 31 + struct mutex batch_lock; 32 + 29 33 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC 30 34 /* Device for which DMA memory is allocated. */ 31 35 struct device *dma_dev;
+10 -18
drivers/xen/gntdev-dmabuf.c
··· 357 357 static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args) 358 358 { 359 359 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 360 - struct gntdev_dmabuf *gntdev_dmabuf; 361 - int ret; 360 + struct gntdev_dmabuf *gntdev_dmabuf __free(kfree) = NULL; 361 + CLASS(get_unused_fd, ret)(O_CLOEXEC); 362 + 363 + if (ret < 0) 364 + return ret; 362 365 363 366 gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL); 364 367 if (!gntdev_dmabuf) ··· 386 383 exp_info.priv = gntdev_dmabuf; 387 384 388 385 gntdev_dmabuf->dmabuf = dma_buf_export(&exp_info); 389 - if (IS_ERR(gntdev_dmabuf->dmabuf)) { 390 - ret = PTR_ERR(gntdev_dmabuf->dmabuf); 391 - gntdev_dmabuf->dmabuf = NULL; 392 - goto fail; 393 - } 394 - 395 - ret = dma_buf_fd(gntdev_dmabuf->dmabuf, O_CLOEXEC); 396 - if (ret < 0) 397 - goto fail; 386 + if (IS_ERR(gntdev_dmabuf->dmabuf)) 387 + return PTR_ERR(gntdev_dmabuf->dmabuf); 398 388 399 389 gntdev_dmabuf->fd = ret; 400 390 args->fd = ret; 401 391 402 392 pr_debug("Exporting DMA buffer with fd %d\n", ret); 403 393 394 + get_file(gntdev_dmabuf->priv->filp); 404 395 mutex_lock(&args->dmabuf_priv->lock); 405 396 list_add(&gntdev_dmabuf->next, &args->dmabuf_priv->exp_list); 406 397 mutex_unlock(&args->dmabuf_priv->lock); 407 - get_file(gntdev_dmabuf->priv->filp); 408 - return 0; 409 398 410 - fail: 411 - if (gntdev_dmabuf->dmabuf) 412 - dma_buf_put(gntdev_dmabuf->dmabuf); 413 - kfree(gntdev_dmabuf); 414 - return ret; 399 + fd_install(take_fd(ret), no_free_ptr(gntdev_dmabuf)->dmabuf->file); 400 + return 0; 415 401 } 416 402 417 403 static struct gntdev_grant_map *
+50 -21
drivers/xen/gntdev.c
··· 56 56 "Gerd Hoffmann <kraxel@redhat.com>"); 57 57 MODULE_DESCRIPTION("User-space granted page access driver"); 58 58 59 + #define GNTDEV_COPY_BATCH 16 60 + 61 + struct gntdev_copy_batch { 62 + struct gnttab_copy ops[GNTDEV_COPY_BATCH]; 63 + struct page *pages[GNTDEV_COPY_BATCH]; 64 + s16 __user *status[GNTDEV_COPY_BATCH]; 65 + unsigned int nr_ops; 66 + unsigned int nr_pages; 67 + bool writeable; 68 + struct gntdev_copy_batch *next; 69 + }; 70 + 59 71 static unsigned int limit = 64*1024; 60 72 module_param(limit, uint, 0644); 61 73 MODULE_PARM_DESC(limit, ··· 596 584 INIT_LIST_HEAD(&priv->maps); 597 585 mutex_init(&priv->lock); 598 586 587 + mutex_init(&priv->batch_lock); 588 + 599 589 #ifdef CONFIG_XEN_GNTDEV_DMABUF 600 590 priv->dmabuf_priv = gntdev_dmabuf_init(flip); 601 591 if (IS_ERR(priv->dmabuf_priv)) { ··· 622 608 { 623 609 struct gntdev_priv *priv = flip->private_data; 624 610 struct gntdev_grant_map *map; 611 + struct gntdev_copy_batch *batch; 625 612 626 613 pr_debug("priv %p\n", priv); 627 614 ··· 634 619 gntdev_put_map(NULL /* already removed */, map); 635 620 } 636 621 mutex_unlock(&priv->lock); 622 + 623 + mutex_lock(&priv->batch_lock); 624 + while (priv->batch) { 625 + batch = priv->batch; 626 + priv->batch = batch->next; 627 + kfree(batch); 628 + } 629 + mutex_unlock(&priv->batch_lock); 637 630 638 631 #ifdef CONFIG_XEN_GNTDEV_DMABUF 639 632 gntdev_dmabuf_fini(priv->dmabuf_priv); ··· 808 785 return rc; 809 786 } 810 787 811 - #define GNTDEV_COPY_BATCH 16 812 - 813 - struct gntdev_copy_batch { 814 - struct gnttab_copy ops[GNTDEV_COPY_BATCH]; 815 - struct page *pages[GNTDEV_COPY_BATCH]; 816 - s16 __user *status[GNTDEV_COPY_BATCH]; 817 - unsigned int nr_ops; 818 - unsigned int nr_pages; 819 - bool writeable; 820 - }; 821 - 822 788 static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt, 823 789 unsigned long *gfn) 824 790 { ··· 965 953 static long gntdev_ioctl_grant_copy(struct gntdev_priv *priv, void __user *u) 966 954 { 967 955 struct ioctl_gntdev_grant_copy copy; 968 - struct gntdev_copy_batch batch; 956 + struct gntdev_copy_batch *batch; 969 957 unsigned int i; 970 958 int ret = 0; 971 959 972 960 if (copy_from_user(&copy, u, sizeof(copy))) 973 961 return -EFAULT; 974 962 975 - batch.nr_ops = 0; 976 - batch.nr_pages = 0; 963 + mutex_lock(&priv->batch_lock); 964 + if (!priv->batch) { 965 + batch = kmalloc(sizeof(*batch), GFP_KERNEL); 966 + } else { 967 + batch = priv->batch; 968 + priv->batch = batch->next; 969 + } 970 + mutex_unlock(&priv->batch_lock); 971 + if (!batch) 972 + return -ENOMEM; 973 + 974 + batch->nr_ops = 0; 975 + batch->nr_pages = 0; 977 976 978 977 for (i = 0; i < copy.count; i++) { 979 978 struct gntdev_grant_copy_segment seg; 980 979 981 980 if (copy_from_user(&seg, &copy.segments[i], sizeof(seg))) { 982 981 ret = -EFAULT; 982 + gntdev_put_pages(batch); 983 983 goto out; 984 984 } 985 985 986 - ret = gntdev_grant_copy_seg(&batch, &seg, &copy.segments[i].status); 987 - if (ret < 0) 986 + ret = gntdev_grant_copy_seg(batch, &seg, &copy.segments[i].status); 987 + if (ret < 0) { 988 + gntdev_put_pages(batch); 988 989 goto out; 990 + } 989 991 990 992 cond_resched(); 991 993 } 992 - if (batch.nr_ops) 993 - ret = gntdev_copy(&batch); 994 - return ret; 994 + if (batch->nr_ops) 995 + ret = gntdev_copy(batch); 995 996 996 - out: 997 - gntdev_put_pages(&batch); 997 + out: 998 + mutex_lock(&priv->batch_lock); 999 + batch->next = priv->batch; 1000 + priv->batch = batch; 1001 + mutex_unlock(&priv->batch_lock); 1002 + 998 1003 return ret; 999 1004 } 1000 1005
-6
drivers/xen/manage.c
··· 52 52 } 53 53 EXPORT_SYMBOL_GPL(xen_resume_notifier_register); 54 54 55 - void xen_resume_notifier_unregister(struct notifier_block *nb) 56 - { 57 - raw_notifier_chain_unregister(&xen_resume_notifier, nb); 58 - } 59 - EXPORT_SYMBOL_GPL(xen_resume_notifier_unregister); 60 - 61 55 #ifdef CONFIG_HIBERNATE_CALLBACKS 62 56 static int xen_suspend(void *data) 63 57 {
-8
drivers/xen/time.c
··· 136 136 } 137 137 } 138 138 139 - /* 140 - * Runstate accounting 141 - */ 142 - void xen_get_runstate_snapshot(struct vcpu_runstate_info *res) 143 - { 144 - xen_get_runstate_snapshot_cpu(res, smp_processor_id()); 145 - } 146 - 147 139 /* return true when a vcpu could run but has no real cpu to run on */ 148 140 bool xen_vcpu_stolen(int vcpu) 149 141 {
+6 -6
drivers/xen/xen-pciback/pci_stub.c
··· 1261 1261 if (count >= PAGE_SIZE) 1262 1262 break; 1263 1263 1264 - count += scnprintf(buf + count, PAGE_SIZE - count, 1264 + count += sysfs_emit_at(buf, count, 1265 1265 "%04x:%02x:%02x.%d\n", 1266 1266 pci_dev_id->domain, pci_dev_id->bus, 1267 1267 PCI_SLOT(pci_dev_id->devfn), ··· 1290 1290 if (!dev_data) 1291 1291 continue; 1292 1292 count += 1293 - scnprintf(buf + count, PAGE_SIZE - count, 1293 + sysfs_emit_at(buf, count, 1294 1294 "%s:%s:%sing:%ld\n", 1295 1295 pci_name(psdev->dev), 1296 1296 dev_data->isr_on ? "on" : "off", ··· 1375 1375 if (count >= PAGE_SIZE) 1376 1376 goto out; 1377 1377 1378 - count += scnprintf(buf + count, PAGE_SIZE - count, 1378 + count += sysfs_emit_at(buf, count, 1379 1379 "%02x:%02x.%01x\n\t%04x:%04x:%04x:%04x\n", 1380 1380 quirk->pdev->bus->number, 1381 1381 PCI_SLOT(quirk->pdev->devfn), ··· 1391 1391 if (count >= PAGE_SIZE) 1392 1392 goto out; 1393 1393 1394 - count += scnprintf(buf + count, PAGE_SIZE - count, 1394 + count += sysfs_emit_at(buf, count, 1395 1395 "\t\t%08x:%01x:%08x\n", 1396 1396 cfg_entry->base_offset + 1397 1397 field->offset, field->size, ··· 1462 1462 if (!dev_data || !dev_data->permissive) 1463 1463 continue; 1464 1464 count += 1465 - scnprintf(buf + count, PAGE_SIZE - count, "%s\n", 1465 + sysfs_emit_at(buf, count, "%s\n", 1466 1466 pci_name(psdev->dev)); 1467 1467 } 1468 1468 spin_unlock_irqrestore(&pcistub_devices_lock, flags); ··· 1521 1521 if (!dev_data || !dev_data->allow_interrupt_control) 1522 1522 continue; 1523 1523 count += 1524 - scnprintf(buf + count, PAGE_SIZE - count, "%s\n", 1524 + sysfs_emit_at(buf, count, "%s\n", 1525 1525 pci_name(psdev->dev)); 1526 1526 } 1527 1527 spin_unlock_irqrestore(&pcistub_devices_lock, flags);
+2
drivers/xen/xenbus/xenbus_client.c
··· 202 202 } 203 203 EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt); 204 204 205 + __printf(4, 5) 205 206 static void xenbus_switch_fatal(struct xenbus_device *, int, int, 206 207 const char *, ...); 207 208 ··· 288 287 } 289 288 EXPORT_SYMBOL_GPL(xenbus_frontend_closed); 290 289 290 + __printf(3, 0) 291 291 static void xenbus_va_dev_error(struct xenbus_device *dev, int err, 292 292 const char *fmt, va_list ap) 293 293 {
-17
drivers/xen/xenbus/xenbus_xs.c
··· 512 512 } 513 513 EXPORT_SYMBOL_GPL(xenbus_write); 514 514 515 - /* Create a new directory. */ 516 - int xenbus_mkdir(struct xenbus_transaction t, 517 - const char *dir, const char *node) 518 - { 519 - char *path; 520 - int ret; 521 - 522 - path = join(dir, node); 523 - if (IS_ERR(path)) 524 - return PTR_ERR(path); 525 - 526 - ret = xs_error(xs_single(t, XS_MKDIR, path, NULL)); 527 - kfree(path); 528 - return ret; 529 - } 530 - EXPORT_SYMBOL_GPL(xenbus_mkdir); 531 - 532 515 /* Destroy a file or directory (directories must be empty). */ 533 516 int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node) 534 517 {
-2
include/xen/xen-ops.h
··· 30 30 void xen_reboot(int reason); 31 31 32 32 void xen_resume_notifier_register(struct notifier_block *nb); 33 - void xen_resume_notifier_unregister(struct notifier_block *nb); 34 33 35 34 bool xen_vcpu_stolen(int vcpu); 36 35 void xen_setup_runstate_info(int cpu); 37 36 void xen_time_setup_guest(void); 38 37 void xen_manage_runstate_time(int action); 39 - void xen_get_runstate_snapshot(struct vcpu_runstate_info *res); 40 38 u64 xen_steal_clock(int cpu); 41 39 42 40 int xen_setup_shutdown_event(void);
-2
include/xen/xenbus.h
··· 154 154 const char *dir, const char *node, unsigned int *len); 155 155 int xenbus_write(struct xenbus_transaction t, 156 156 const char *dir, const char *node, const char *string); 157 - int xenbus_mkdir(struct xenbus_transaction t, 158 - const char *dir, const char *node); 159 157 int xenbus_exists(struct xenbus_transaction t, 160 158 const char *dir, const char *node); 161 159 int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node);