Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'char-misc-linus' into 'char-misc-next'

We need the virtbox changes in here as well to build on top of.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

+172 -112
+2 -1
drivers/block/zram/zram_drv.c
··· 2021 2021 return ret; 2022 2022 return scnprintf(buf, PAGE_SIZE, "%d\n", ret); 2023 2023 } 2024 - static CLASS_ATTR_RO(hot_add); 2024 + static struct class_attribute class_attr_hot_add = 2025 + __ATTR(hot_add, 0400, hot_add_show, NULL); 2025 2026 2026 2027 static ssize_t hot_remove_store(struct class *class, 2027 2028 struct class_attribute *attr,
+54 -42
drivers/hwtracing/coresight/coresight-cti.c
··· 747 747 return 0; 748 748 } 749 749 750 + static int cti_pm_setup(struct cti_drvdata *drvdata) 751 + { 752 + int ret; 753 + 754 + if (drvdata->ctidev.cpu == -1) 755 + return 0; 756 + 757 + if (nr_cti_cpu) 758 + goto done; 759 + 760 + cpus_read_lock(); 761 + ret = cpuhp_setup_state_nocalls_cpuslocked( 762 + CPUHP_AP_ARM_CORESIGHT_CTI_STARTING, 763 + "arm/coresight_cti:starting", 764 + cti_starting_cpu, cti_dying_cpu); 765 + if (ret) { 766 + cpus_read_unlock(); 767 + return ret; 768 + } 769 + 770 + ret = cpu_pm_register_notifier(&cti_cpu_pm_nb); 771 + cpus_read_unlock(); 772 + if (ret) { 773 + cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_CTI_STARTING); 774 + return ret; 775 + } 776 + 777 + done: 778 + nr_cti_cpu++; 779 + cti_cpu_drvdata[drvdata->ctidev.cpu] = drvdata; 780 + 781 + return 0; 782 + } 783 + 750 784 /* release PM registrations */ 751 785 static void cti_pm_release(struct cti_drvdata *drvdata) 752 786 { 753 - if (drvdata->ctidev.cpu >= 0) { 754 - if (--nr_cti_cpu == 0) { 755 - cpu_pm_unregister_notifier(&cti_cpu_pm_nb); 787 + if (drvdata->ctidev.cpu == -1) 788 + return; 756 789 757 - cpuhp_remove_state_nocalls( 758 - CPUHP_AP_ARM_CORESIGHT_CTI_STARTING); 759 - } 760 - cti_cpu_drvdata[drvdata->ctidev.cpu] = NULL; 790 + cti_cpu_drvdata[drvdata->ctidev.cpu] = NULL; 791 + if (--nr_cti_cpu == 0) { 792 + cpu_pm_unregister_notifier(&cti_cpu_pm_nb); 793 + cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_CTI_STARTING); 761 794 } 762 795 } 763 796 ··· 856 823 857 824 /* driver data*/ 858 825 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); 859 - if (!drvdata) { 860 - ret = -ENOMEM; 861 - dev_info(dev, "%s, mem err\n", __func__); 862 - goto err_out; 863 - } 826 + if (!drvdata) 827 + return -ENOMEM; 864 828 865 829 /* Validity for the resource is already checked by the AMBA core */ 866 830 base = devm_ioremap_resource(dev, res); 867 - if (IS_ERR(base)) { 868 - ret = PTR_ERR(base); 869 - dev_err(dev, "%s, remap err\n", __func__); 870 - goto err_out; 871 - } 831 + if (IS_ERR(base)) 832 + return PTR_ERR(base); 833 + 872 834 drvdata->base = base; 873 835 874 836 dev_set_drvdata(dev, drvdata); ··· 882 854 pdata = coresight_cti_get_platform_data(dev); 883 855 if (IS_ERR(pdata)) { 884 856 dev_err(dev, "coresight_cti_get_platform_data err\n"); 885 - ret = PTR_ERR(pdata); 886 - goto err_out; 857 + return PTR_ERR(pdata); 887 858 } 888 859 889 860 /* default to powered - could change on PM notifications */ ··· 894 867 drvdata->ctidev.cpu); 895 868 else 896 869 cti_desc.name = coresight_alloc_device_name(&cti_sys_devs, dev); 897 - if (!cti_desc.name) { 898 - ret = -ENOMEM; 899 - goto err_out; 900 - } 870 + if (!cti_desc.name) 871 + return -ENOMEM; 901 872 902 873 /* setup CPU power management handling for CPU bound CTI devices. */ 903 - if (drvdata->ctidev.cpu >= 0) { 904 - cti_cpu_drvdata[drvdata->ctidev.cpu] = drvdata; 905 - if (!nr_cti_cpu++) { 906 - cpus_read_lock(); 907 - ret = cpuhp_setup_state_nocalls_cpuslocked( 908 - CPUHP_AP_ARM_CORESIGHT_CTI_STARTING, 909 - "arm/coresight_cti:starting", 910 - cti_starting_cpu, cti_dying_cpu); 911 - 912 - if (!ret) 913 - ret = cpu_pm_register_notifier(&cti_cpu_pm_nb); 914 - cpus_read_unlock(); 915 - if (ret) 916 - goto err_out; 917 - } 918 - } 874 + ret = cti_pm_setup(drvdata); 875 + if (ret) 876 + return ret; 919 877 920 878 /* create dynamic attributes for connections */ 921 879 ret = cti_create_cons_sysfs(dev, drvdata); 922 880 if (ret) { 923 881 dev_err(dev, "%s: create dynamic sysfs entries failed\n", 924 882 cti_desc.name); 925 - goto err_out; 883 + goto pm_release; 926 884 } 927 885 928 886 /* set up coresight component description */ ··· 920 908 drvdata->csdev = coresight_register(&cti_desc); 921 909 if (IS_ERR(drvdata->csdev)) { 922 910 ret = PTR_ERR(drvdata->csdev); 923 - goto err_out; 911 + goto pm_release; 924 912 } 925 913 926 914 /* add to list of CTI devices */ ··· 939 927 dev_info(&drvdata->csdev->dev, "CTI initialized\n"); 940 928 return 0; 941 929 942 - err_out: 930 + pm_release: 943 931 cti_pm_release(drvdata); 944 932 return ret; 945 933 }
+54 -30
drivers/hwtracing/coresight/coresight-etm4x.c
··· 1388 1388 .notifier_call = etm4_cpu_pm_notify, 1389 1389 }; 1390 1390 1391 - static int etm4_cpu_pm_register(void) 1391 + /* Setup PM. Called with cpus locked. Deals with error conditions and counts */ 1392 + static int etm4_pm_setup_cpuslocked(void) 1392 1393 { 1393 - if (IS_ENABLED(CONFIG_CPU_PM)) 1394 - return cpu_pm_register_notifier(&etm4_cpu_pm_nb); 1394 + int ret; 1395 1395 1396 - return 0; 1396 + if (etm4_count++) 1397 + return 0; 1398 + 1399 + ret = cpu_pm_register_notifier(&etm4_cpu_pm_nb); 1400 + if (ret) 1401 + goto reduce_count; 1402 + 1403 + ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING, 1404 + "arm/coresight4:starting", 1405 + etm4_starting_cpu, etm4_dying_cpu); 1406 + 1407 + if (ret) 1408 + goto unregister_notifier; 1409 + 1410 + ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN, 1411 + "arm/coresight4:online", 1412 + etm4_online_cpu, NULL); 1413 + 1414 + /* HP dyn state ID returned in ret on success */ 1415 + if (ret > 0) { 1416 + hp_online = ret; 1417 + return 0; 1418 + } 1419 + 1420 + /* failed dyn state - remove others */ 1421 + cpuhp_remove_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING); 1422 + 1423 + unregister_notifier: 1424 + cpu_pm_unregister_notifier(&etm4_cpu_pm_nb); 1425 + 1426 + reduce_count: 1427 + --etm4_count; 1428 + return ret; 1397 1429 } 1398 1430 1399 - static void etm4_cpu_pm_unregister(void) 1431 + static void etm4_pm_clear(void) 1400 1432 { 1401 - if (IS_ENABLED(CONFIG_CPU_PM)) 1402 - cpu_pm_unregister_notifier(&etm4_cpu_pm_nb); 1433 + if (--etm4_count != 0) 1434 + return; 1435 + 1436 + cpu_pm_unregister_notifier(&etm4_cpu_pm_nb); 1437 + cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING); 1438 + if (hp_online) { 1439 + cpuhp_remove_state_nocalls(hp_online); 1440 + hp_online = 0; 1441 + } 1403 1442 } 1404 1443 1405 1444 static int etm4_probe(struct amba_device *adev, const struct amba_id *id) ··· 1492 1453 etm4_init_arch_data, drvdata, 1)) 1493 1454 dev_err(dev, "ETM arch init failed\n"); 1494 1455 1495 - if (!etm4_count++) { 1496 - cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING, 1497 - "arm/coresight4:starting", 1498 - etm4_starting_cpu, etm4_dying_cpu); 1499 - ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN, 1500 - "arm/coresight4:online", 1501 - etm4_online_cpu, NULL); 1502 - if (ret < 0) 1503 - goto err_arch_supported; 1504 - hp_online = ret; 1505 - 1506 - ret = etm4_cpu_pm_register(); 1507 - if (ret) 1508 - goto err_arch_supported; 1509 - } 1510 - 1456 + ret = etm4_pm_setup_cpuslocked(); 1511 1457 cpus_read_unlock(); 1458 + 1459 + /* etm4_pm_setup_cpuslocked() does its own cleanup - exit on error */ 1460 + if (ret) { 1461 + etmdrvdata[drvdata->cpu] = NULL; 1462 + return ret; 1463 + } 1512 1464 1513 1465 if (etm4_arch_supported(drvdata->arch) == false) { 1514 1466 ret = -EINVAL; ··· 1547 1517 1548 1518 err_arch_supported: 1549 1519 etmdrvdata[drvdata->cpu] = NULL; 1550 - if (--etm4_count == 0) { 1551 - etm4_cpu_pm_unregister(); 1552 - 1553 - cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING); 1554 - if (hp_online) 1555 - cpuhp_remove_state_nocalls(hp_online); 1556 - } 1520 + etm4_pm_clear(); 1557 1521 return ret; 1558 1522 } 1559 1523
+12 -12
drivers/misc/atmel-ssc.c
··· 10 10 #include <linux/clk.h> 11 11 #include <linux/err.h> 12 12 #include <linux/io.h> 13 - #include <linux/spinlock.h> 13 + #include <linux/mutex.h> 14 14 #include <linux/atmel-ssc.h> 15 15 #include <linux/slab.h> 16 16 #include <linux/module.h> ··· 20 20 #include "../../sound/soc/atmel/atmel_ssc_dai.h" 21 21 22 22 /* Serialize access to ssc_list and user count */ 23 - static DEFINE_SPINLOCK(user_lock); 23 + static DEFINE_MUTEX(user_lock); 24 24 static LIST_HEAD(ssc_list); 25 25 26 26 struct ssc_device *ssc_request(unsigned int ssc_num) ··· 28 28 int ssc_valid = 0; 29 29 struct ssc_device *ssc; 30 30 31 - spin_lock(&user_lock); 31 + mutex_lock(&user_lock); 32 32 list_for_each_entry(ssc, &ssc_list, list) { 33 33 if (ssc->pdev->dev.of_node) { 34 34 if (of_alias_get_id(ssc->pdev->dev.of_node, "ssc") ··· 44 44 } 45 45 46 46 if (!ssc_valid) { 47 - spin_unlock(&user_lock); 47 + mutex_unlock(&user_lock); 48 48 pr_err("ssc: ssc%d platform device is missing\n", ssc_num); 49 49 return ERR_PTR(-ENODEV); 50 50 } 51 51 52 52 if (ssc->user) { 53 - spin_unlock(&user_lock); 53 + mutex_unlock(&user_lock); 54 54 dev_dbg(&ssc->pdev->dev, "module busy\n"); 55 55 return ERR_PTR(-EBUSY); 56 56 } 57 57 ssc->user++; 58 - spin_unlock(&user_lock); 58 + mutex_unlock(&user_lock); 59 59 60 60 clk_prepare(ssc->clk); 61 61 ··· 67 67 { 68 68 bool disable_clk = true; 69 69 70 - spin_lock(&user_lock); 70 + mutex_lock(&user_lock); 71 71 if (ssc->user) 72 72 ssc->user--; 73 73 else { 74 74 disable_clk = false; 75 75 dev_dbg(&ssc->pdev->dev, "device already free\n"); 76 76 } 77 - spin_unlock(&user_lock); 77 + mutex_unlock(&user_lock); 78 78 79 79 if (disable_clk) 80 80 clk_unprepare(ssc->clk); ··· 237 237 return -ENXIO; 238 238 } 239 239 240 - spin_lock(&user_lock); 240 + mutex_lock(&user_lock); 241 241 list_add_tail(&ssc->list, &ssc_list); 242 - spin_unlock(&user_lock); 242 + mutex_unlock(&user_lock); 243 243 244 244 platform_set_drvdata(pdev, ssc); 245 245 ··· 258 258 259 259 ssc_sound_dai_remove(ssc); 260 260 261 - spin_lock(&user_lock); 261 + mutex_lock(&user_lock); 262 262 list_del(&ssc->list); 263 - spin_unlock(&user_lock); 263 + mutex_unlock(&user_lock); 264 264 265 265 return 0; 266 266 }
+1 -2
drivers/misc/mei/bus.c
··· 745 745 746 746 mei_cl_bus_module_put(cldev); 747 747 module_put(THIS_MODULE); 748 - dev->driver = NULL; 749 - return ret; 750 748 749 + return ret; 751 750 } 752 751 753 752 static ssize_t name_show(struct device *dev, struct device_attribute *a,
+3 -2
drivers/phy/allwinner/phy-sun4i-usb.c
··· 545 545 struct sun4i_usb_phy_data *data = 546 546 container_of(work, struct sun4i_usb_phy_data, detect.work); 547 547 struct phy *phy0 = data->phys[0].phy; 548 - struct sun4i_usb_phy *phy = phy_get_drvdata(phy0); 548 + struct sun4i_usb_phy *phy; 549 549 bool force_session_end, id_notify = false, vbus_notify = false; 550 550 int id_det, vbus_det; 551 551 552 - if (phy0 == NULL) 552 + if (!phy0) 553 553 return; 554 554 555 + phy = phy_get_drvdata(phy0); 555 556 id_det = sun4i_usb_phy0_get_id_det(data); 556 557 vbus_det = sun4i_usb_phy0_get_vbus_det(data); 557 558
+8 -6
drivers/phy/intel/phy-intel-combo.c
··· 134 134 135 135 reg_val = readl(base + reg); 136 136 reg_val &= ~mask; 137 - reg_val |= FIELD_PREP(mask, val); 137 + reg_val |= val; 138 138 writel(reg_val, base + reg); 139 139 } 140 140 ··· 169 169 return 0; 170 170 171 171 combo_phy_w32_off_mask(cbphy->app_base, PCIE_PHY_GEN_CTRL, 172 - PCIE_PHY_CLK_PAD, 0); 172 + PCIE_PHY_CLK_PAD, FIELD_PREP(PCIE_PHY_CLK_PAD, 0)); 173 173 174 174 /* Delay for stable clock PLL */ 175 175 usleep_range(50, 100); ··· 192 192 return 0; 193 193 194 194 combo_phy_w32_off_mask(cbphy->app_base, PCIE_PHY_GEN_CTRL, 195 - PCIE_PHY_CLK_PAD, 1); 195 + PCIE_PHY_CLK_PAD, FIELD_PREP(PCIE_PHY_CLK_PAD, 1)); 196 196 197 197 return 0; 198 198 } 199 199 200 200 static int intel_cbphy_set_mode(struct intel_combo_phy *cbphy) 201 201 { 202 - enum intel_combo_mode cb_mode = PHY_PCIE_MODE; 202 + enum intel_combo_mode cb_mode; 203 203 enum aggregated_mode aggr = cbphy->aggr_mode; 204 204 struct device *dev = cbphy->dev; 205 205 enum intel_phy_mode mode; ··· 224 224 225 225 cb_mode = SATA0_SATA1_MODE; 226 226 break; 227 + default: 228 + return -EINVAL; 227 229 } 228 230 229 231 ret = regmap_write(cbphy->hsiocfg, REG_COMBO_MODE(cbphy->bid), cb_mode); ··· 387 385 388 386 /* trigger auto RX adaptation */ 389 387 combo_phy_w32_off_mask(cr_base, CR_ADDR(PCS_XF_ATE_OVRD_IN_2, id), 390 - ADAPT_REQ_MSK, 3); 388 + ADAPT_REQ_MSK, FIELD_PREP(ADAPT_REQ_MSK, 3)); 391 389 /* Wait RX adaptation to finish */ 392 390 ret = readl_poll_timeout(cr_base + CR_ADDR(PCS_XF_RX_ADAPT_ACK, id), 393 391 val, val & RX_ADAPT_ACK_BIT, 10, 5000); ··· 398 396 399 397 /* Stop RX adaptation */ 400 398 combo_phy_w32_off_mask(cr_base, CR_ADDR(PCS_XF_ATE_OVRD_IN_2, id), 401 - ADAPT_REQ_MSK, 0); 399 + ADAPT_REQ_MSK, FIELD_PREP(ADAPT_REQ_MSK, 0)); 402 400 403 401 return ret; 404 402 }
+2 -2
drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
··· 607 607 platform_set_drvdata(pdev, inno); 608 608 609 609 inno->phy_base = devm_platform_ioremap_resource(pdev, 0); 610 - if (!inno->phy_base) 611 - return -ENOMEM; 610 + if (IS_ERR(inno->phy_base)) 611 + return PTR_ERR(inno->phy_base); 612 612 613 613 inno->ref_clk = devm_clk_get(dev, "ref"); 614 614 if (IS_ERR(inno->ref_clk)) {
+1 -1
drivers/phy/ti/phy-am654-serdes.c
··· 72 72 #define to_serdes_am654_clk_mux(_hw) \ 73 73 container_of(_hw, struct serdes_am654_clk_mux, hw) 74 74 75 - static struct regmap_config serdes_am654_regmap_config = { 75 + static const struct regmap_config serdes_am654_regmap_config = { 76 76 .reg_bits = 32, 77 77 .val_bits = 32, 78 78 .reg_stride = 4,
+5 -5
drivers/phy/ti/phy-j721e-wiz.c
··· 117 117 struct wiz_clk_divider { 118 118 struct clk_hw hw; 119 119 struct regmap_field *field; 120 - struct clk_div_table *table; 120 + const struct clk_div_table *table; 121 121 struct clk_init_data clk_data; 122 122 }; 123 123 ··· 131 131 132 132 struct wiz_clk_div_sel { 133 133 struct regmap_field *field; 134 - struct clk_div_table *table; 134 + const struct clk_div_table *table; 135 135 const char *node_name; 136 136 }; 137 137 ··· 173 173 }, 174 174 }; 175 175 176 - static struct clk_div_table clk_div_table[] = { 176 + static const struct clk_div_table clk_div_table[] = { 177 177 { .val = 0, .div = 1, }, 178 178 { .val = 1, .div = 2, }, 179 179 { .val = 2, .div = 4, }, ··· 559 559 560 560 static int wiz_div_clk_register(struct wiz *wiz, struct device_node *node, 561 561 struct regmap_field *field, 562 - struct clk_div_table *table) 562 + const struct clk_div_table *table) 563 563 { 564 564 struct device *dev = wiz->dev; 565 565 struct wiz_clk_divider *div; ··· 756 756 .deassert = wiz_phy_reset_deassert, 757 757 }; 758 758 759 - static struct regmap_config wiz_regmap_config = { 759 + static const struct regmap_config wiz_regmap_config = { 760 760 .reg_bits = 32, 761 761 .val_bits = 32, 762 762 .reg_stride = 4,
+3 -2
drivers/soundwire/intel.c
··· 930 930 931 931 /* TODO: Read supported rates/formats from hardware */ 932 932 for (i = off; i < (off + num); i++) { 933 - dais[i].name = kasprintf(GFP_KERNEL, "SDW%d Pin%d", 934 - cdns->instance, i); 933 + dais[i].name = devm_kasprintf(cdns->dev, GFP_KERNEL, 934 + "SDW%d Pin%d", 935 + cdns->instance, i); 935 936 if (!dais[i].name) 936 937 return -ENOMEM; 937 938
+2 -2
drivers/uio/uio_pdrv_genirq.c
··· 160 160 priv->pdev = pdev; 161 161 162 162 if (!uioinfo->irq) { 163 - ret = platform_get_irq(pdev, 0); 163 + ret = platform_get_irq_optional(pdev, 0); 164 164 uioinfo->irq = ret; 165 - if (ret == -ENXIO && pdev->dev.of_node) 165 + if (ret == -ENXIO) 166 166 uioinfo->irq = UIO_IRQ_NONE; 167 167 else if (ret == -EPROBE_DEFER) 168 168 return ret;
+4 -2
drivers/virt/vboxguest/vboxguest_core.c
··· 1444 1444 or_mask = caps->u.in.or_mask; 1445 1445 not_mask = caps->u.in.not_mask; 1446 1446 1447 - if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK) 1447 + if ((or_mask | not_mask) & ~VMMDEV_GUEST_CAPABILITIES_MASK) 1448 1448 return -EINVAL; 1449 1449 1450 1450 ret = vbg_set_session_capabilities(gdev, session, or_mask, not_mask, ··· 1520 1520 1521 1521 /* For VMMDEV_REQUEST hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT */ 1522 1522 if (req_no_size == VBG_IOCTL_VMMDEV_REQUEST(0) || 1523 - req == VBG_IOCTL_VMMDEV_REQUEST_BIG) 1523 + req == VBG_IOCTL_VMMDEV_REQUEST_BIG || 1524 + req == VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT) 1524 1525 return vbg_ioctl_vmmrequest(gdev, session, data); 1525 1526 1526 1527 if (hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT) ··· 1559 1558 case VBG_IOCTL_HGCM_CALL(0): 1560 1559 return vbg_ioctl_hgcm_call(gdev, session, f32bit, data); 1561 1560 case VBG_IOCTL_LOG(0): 1561 + case VBG_IOCTL_LOG_ALT(0): 1562 1562 return vbg_ioctl_log(data); 1563 1563 } 1564 1564
+15
drivers/virt/vboxguest/vboxguest_core.h
··· 15 15 #include <linux/vboxguest.h> 16 16 #include "vmmdev.h" 17 17 18 + /* 19 + * The mainline kernel version (this version) of the vboxguest module 20 + * contained a bug where it defined VBGL_IOCTL_VMMDEV_REQUEST_BIG and 21 + * VBGL_IOCTL_LOG using _IOC(_IOC_READ | _IOC_WRITE, 'V', ...) instead 22 + * of _IO(V, ...) as the out of tree VirtualBox upstream version does. 23 + * 24 + * These _ALT definitions keep compatibility with the wrong defines the 25 + * mainline kernel version used for a while. 26 + * Note the VirtualBox userspace bits have always been built against 27 + * VirtualBox upstream's headers, so this is likely not necessary. But 28 + * we must never break our ABI so we keep these around to be 100% sure. 29 + */ 30 + #define VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT _IOC(_IOC_READ | _IOC_WRITE, 'V', 3, 0) 31 + #define VBG_IOCTL_LOG_ALT(s) _IOC(_IOC_READ | _IOC_WRITE, 'V', 9, s) 32 + 18 33 struct vbg_session; 19 34 20 35 /** VBox guest memory balloon. */
+2 -1
drivers/virt/vboxguest/vboxguest_linux.c
··· 131 131 * the need for a bounce-buffer and another copy later on. 132 132 */ 133 133 is_vmmdev_req = (req & ~IOCSIZE_MASK) == VBG_IOCTL_VMMDEV_REQUEST(0) || 134 - req == VBG_IOCTL_VMMDEV_REQUEST_BIG; 134 + req == VBG_IOCTL_VMMDEV_REQUEST_BIG || 135 + req == VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT; 135 136 136 137 if (is_vmmdev_req) 137 138 buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT,
+2
drivers/virt/vboxguest/vmmdev.h
··· 206 206 * not. 207 207 */ 208 208 #define VMMDEV_GUEST_SUPPORTS_GRAPHICS BIT(2) 209 + /* The mask of valid capabilities, for sanity checking. */ 210 + #define VMMDEV_GUEST_CAPABILITIES_MASK 0x00000007U 209 211 210 212 /** struct vmmdev_hypervisorinfo - Hypervisor info structure. */ 211 213 struct vmmdev_hypervisorinfo {
+2 -2
include/uapi/linux/vboxguest.h
··· 103 103 104 104 105 105 /* IOCTL to perform a VMM Device request larger then 1KB. */ 106 - #define VBG_IOCTL_VMMDEV_REQUEST_BIG _IOC(_IOC_READ | _IOC_WRITE, 'V', 3, 0) 106 + #define VBG_IOCTL_VMMDEV_REQUEST_BIG _IO('V', 3) 107 107 108 108 109 109 /** VBG_IOCTL_HGCM_CONNECT data structure. */ ··· 198 198 } u; 199 199 }; 200 200 201 - #define VBG_IOCTL_LOG(s) _IOC(_IOC_READ | _IOC_WRITE, 'V', 9, s) 201 + #define VBG_IOCTL_LOG(s) _IO('V', 9) 202 202 203 203 204 204 /** VBG_IOCTL_WAIT_FOR_EVENTS data structure. */