Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'char-misc-4.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc

Pull char/misc updates from Greg KH:
"Here's the big set of char/misc patches for 4.5-rc1.

Nothing major, lots of different driver subsystem updates, full
details in the shortlog. All of these have been in linux-next for a
while"

* tag 'char-misc-4.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (71 commits)
mei: fix fasync return value on error
parport: avoid assignment in if
parport: remove unneeded space
parport: change style of NULL comparison
parport: remove unnecessary out of memory message
parport: remove braces
parport: quoted strings should not be split
parport: code indent should use tabs
parport: fix coding style
parport: EXPORT_SYMBOL should follow function
parport: remove trailing white space
parport: fix a trivial typo
coresight: Fix a typo in Kconfig
coresight: checking for NULL string in coresight_name_match()
Drivers: hv: vmbus: Treat Fibre Channel devices as performance critical
Drivers: hv: utils: fix hvt_op_poll() return value on transport destroy
Drivers: hv: vmbus: fix the building warning with hyperv-keyboard
extcon: add Maxim MAX3355 driver
Drivers: hv: ring_buffer: eliminate hv_ringbuffer_peek()
Drivers: hv: remove code duplication between vmbus_recvpacket()/vmbus_recvpacket_raw()
...

+1104 -676
+60
Documentation/devicetree/bindings/extcon/extcon-arizona.txt
··· 13 13 ARIZONA_ACCDET_MODE_HPR or 2 - Headphone detect mode is set to HPDETR 14 14 If this node is not mentioned or if the value is unknown, then 15 15 headphone detection mode is set to HPDETL. 16 + 17 + - wlf,use-jd2 : Use the additional JD input along with JD1 for dual pin jack 18 + detection. 19 + - wlf,use-jd2-nopull : Internal pull on JD2 is disabled when used for 20 + jack detection. 21 + - wlf,jd-invert : Invert the polarity of the jack detection switch 22 + 23 + - wlf,micd-software-compare : Use a software comparison to determine mic 24 + presence 25 + - wlf,micd-detect-debounce : Additional software microphone detection 26 + debounce specified in milliseconds. 27 + - wlf,micd-pol-gpio : GPIO specifier for the GPIO controlling the headset 28 + polarity if one exists. 29 + - wlf,micd-bias-start-time : Time allowed for MICBIAS to startup prior to 30 + performing microphone detection, specified as per the ARIZONA_MICD_TIME_XXX 31 + defines. 32 + - wlf,micd-rate : Delay between successive microphone detection measurements, 33 + specified as per the ARIZONA_MICD_TIME_XXX defines. 34 + - wlf,micd-dbtime : Microphone detection hardware debounces specified as the 35 + number of measurements to take, valid values being 2 and 4. 36 + - wlf,micd-timeout-ms : Timeout for microphone detection, specified in 37 + milliseconds. 38 + - wlf,micd-force-micbias : Force MICBIAS continuously on during microphone 39 + detection. 40 + - wlf,micd-configs : Headset polarity configurations (generally used for 41 + detection of CTIA / OMTP headsets), the field can be of variable length 42 + but should always be a multiple of 3 cells long, each three cell group 43 + represents one polarity configuration. 44 + The first cell defines the accessory detection pin, zero will use MICDET1 45 + and all other values will use MICDET2. 46 + The second cell represents the MICBIAS to be used. 47 + The third cell represents the value of the micd-pol-gpio pin. 48 + 49 + - wlf,gpsw : Settings for the general purpose switch 50 + 51 + Example: 52 + 53 + codec: wm8280@0 { 54 + compatible = "wlf,wm8280"; 55 + reg = <0>; 56 + ... 57 + 58 + wlf,use-jd2; 59 + wlf,use-jd2-nopull; 60 + wlf,jd-invert; 61 + 62 + wlf,micd-software-compare; 63 + wlf,micd-detect-debounce = <0>; 64 + wlf,micd-pol-gpio = <&codec 2 0>; 65 + wlf,micd-rate = <ARIZONA_MICD_TIME_8MS>; 66 + wlf,micd-dbtime = <4>; 67 + wlf,micd-timeout-ms = <100>; 68 + wlf,micd-force-micbias; 69 + wlf,micd-configs = < 70 + 0 1 0 /* MICDET1 MICBIAS1 GPIO=low */ 71 + 1 2 1 /* MICDET2 MICBIAS2 GPIO=high */ 72 + >; 73 + 74 + wlf,gpsw = <0>; 75 + };
+21
Documentation/devicetree/bindings/extcon/extcon-max3355.txt
··· 1 + Maxim Integrated MAX3355 USB OTG chip 2 + ------------------------------------- 3 + 4 + MAX3355 integrates a charge pump and comparators to enable a system with an 5 + integrated USB OTG dual-role transceiver to function as a USB OTG dual-role 6 + device. 7 + 8 + Required properties: 9 + - compatible: should be "maxim,max3355"; 10 + - maxim,shdn-gpios: should contain a phandle and GPIO specifier for the GPIO pin 11 + connected to the MAX3355's SHDN# pin; 12 + - id-gpios: should contain a phandle and GPIO specifier for the GPIO pin 13 + connected to the MAX3355's ID_OUT pin. 14 + 15 + Example: 16 + 17 + usb-otg { 18 + compatible = "maxim,max3355"; 19 + maxim,shdn-gpios = <&gpio2 4 GPIO_ACTIVE_LOW>; 20 + id-gpios = <&gpio5 31 GPIO_ACTIVE_HIGH>; 21 + };
+9
drivers/extcon/Kconfig
··· 52 52 Maxim MAX14577/77836. The MAX14577/77836 MUIC is a USB port accessory 53 53 detector and switch. 54 54 55 + config EXTCON_MAX3355 56 + tristate "Maxim MAX3355 USB OTG EXTCON Support" 57 + depends on GPIOLIB || COMPILE_TEST 58 + help 59 + If you say yes here you get support for the USB OTG role detection by 60 + MAX3355. The MAX3355 chip integrates a charge pump and comparators to 61 + enable a system with an integrated USB OTG dual-role transceiver to 62 + function as an USB OTG dual-role device. 63 + 55 64 config EXTCON_MAX77693 56 65 tristate "Maxim MAX77693 EXTCON Support" 57 66 depends on MFD_MAX77693 && INPUT
+1
drivers/extcon/Makefile
··· 8 8 obj-$(CONFIG_EXTCON_AXP288) += extcon-axp288.o 9 9 obj-$(CONFIG_EXTCON_GPIO) += extcon-gpio.o 10 10 obj-$(CONFIG_EXTCON_MAX14577) += extcon-max14577.o 11 + obj-$(CONFIG_EXTCON_MAX3355) += extcon-max3355.o 11 12 obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o 12 13 obj-$(CONFIG_EXTCON_MAX77843) += extcon-max77843.o 13 14 obj-$(CONFIG_EXTCON_MAX8997) += extcon-max8997.o
+68 -3
drivers/extcon/extcon-arizona.c
··· 1201 1201 regmap_update_bits(arizona->regmap, reg, mask, level); 1202 1202 } 1203 1203 1204 - static int arizona_extcon_device_get_pdata(struct arizona *arizona) 1204 + static int arizona_extcon_get_micd_configs(struct device *dev, 1205 + struct arizona *arizona) 1206 + { 1207 + const char * const prop = "wlf,micd-configs"; 1208 + const int entries_per_config = 3; 1209 + struct arizona_micd_config *micd_configs; 1210 + int nconfs, ret; 1211 + int i, j; 1212 + u32 *vals; 1213 + 1214 + nconfs = device_property_read_u32_array(arizona->dev, prop, NULL, 0); 1215 + if (nconfs <= 0) 1216 + return 0; 1217 + 1218 + vals = kcalloc(nconfs, sizeof(u32), GFP_KERNEL); 1219 + if (!vals) 1220 + return -ENOMEM; 1221 + 1222 + ret = device_property_read_u32_array(arizona->dev, prop, vals, nconfs); 1223 + if (ret < 0) 1224 + goto out; 1225 + 1226 + nconfs /= entries_per_config; 1227 + 1228 + micd_configs = devm_kzalloc(dev, 1229 + nconfs * sizeof(struct arizona_micd_range), 1230 + GFP_KERNEL); 1231 + if (!micd_configs) { 1232 + ret = -ENOMEM; 1233 + goto out; 1234 + } 1235 + 1236 + for (i = 0, j = 0; i < nconfs; ++i) { 1237 + micd_configs[i].src = vals[j++] ? ARIZONA_ACCDET_SRC : 0; 1238 + micd_configs[i].bias = vals[j++]; 1239 + micd_configs[i].gpio = vals[j++]; 1240 + } 1241 + 1242 + arizona->pdata.micd_configs = micd_configs; 1243 + arizona->pdata.num_micd_configs = nconfs; 1244 + 1245 + out: 1246 + kfree(vals); 1247 + return ret; 1248 + } 1249 + 1250 + static int arizona_extcon_device_get_pdata(struct device *dev, 1251 + struct arizona *arizona) 1205 1252 { 1206 1253 struct arizona_pdata *pdata = &arizona->pdata; 1207 1254 unsigned int val = ARIZONA_ACCDET_MODE_HPL; 1255 + int ret; 1208 1256 1209 1257 device_property_read_u32(arizona->dev, "wlf,hpdet-channel", &val); 1210 1258 switch (val) { ··· 1278 1230 device_property_read_u32(arizona->dev, "wlf,micd-dbtime", 1279 1231 &pdata->micd_dbtime); 1280 1232 1281 - device_property_read_u32(arizona->dev, "wlf,micd-timeout", 1233 + device_property_read_u32(arizona->dev, "wlf,micd-timeout-ms", 1282 1234 &pdata->micd_timeout); 1283 1235 1284 1236 pdata->micd_force_micbias = device_property_read_bool(arizona->dev, 1285 1237 "wlf,micd-force-micbias"); 1238 + 1239 + pdata->micd_software_compare = device_property_read_bool(arizona->dev, 1240 + "wlf,micd-software-compare"); 1241 + 1242 + pdata->jd_invert = device_property_read_bool(arizona->dev, 1243 + "wlf,jd-invert"); 1244 + 1245 + device_property_read_u32(arizona->dev, "wlf,gpsw", &pdata->gpsw); 1246 + 1247 + pdata->jd_gpio5 = device_property_read_bool(arizona->dev, 1248 + "wlf,use-jd2"); 1249 + pdata->jd_gpio5_nopull = device_property_read_bool(arizona->dev, 1250 + "wlf,use-jd2-nopull"); 1251 + 1252 + ret = arizona_extcon_get_micd_configs(dev, arizona); 1253 + if (ret < 0) 1254 + dev_err(arizona->dev, "Failed to read micd configs: %d\n", ret); 1286 1255 1287 1256 return 0; 1288 1257 } ··· 1322 1257 return -ENOMEM; 1323 1258 1324 1259 if (!dev_get_platdata(arizona->dev)) 1325 - arizona_extcon_device_get_pdata(arizona); 1260 + arizona_extcon_device_get_pdata(&pdev->dev, arizona); 1326 1261 1327 1262 info->micvdd = devm_regulator_get(&pdev->dev, "MICVDD"); 1328 1263 if (IS_ERR(info->micvdd)) {
+1 -1
drivers/extcon/extcon-max14577.c
··· 692 692 /* Support irq domain for max14577 MUIC device */ 693 693 for (i = 0; i < info->muic_irqs_num; i++) { 694 694 struct max14577_muic_irq *muic_irq = &info->muic_irqs[i]; 695 - unsigned int virq = 0; 695 + int virq = 0; 696 696 697 697 virq = regmap_irq_get_virq(max14577->irq_data, muic_irq->irq); 698 698 if (virq <= 0)
+146
drivers/extcon/extcon-max3355.c
··· 1 + /* 2 + * Maxim Integrated MAX3355 USB OTG chip extcon driver 3 + * 4 + * Copyright (C) 2014-2015 Cogent Embedded, Inc. 5 + * Author: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> 6 + * 7 + * This software is licensed under the terms of the GNU General Public 8 + * License version 2, as published by the Free Software Foundation, and 9 + * may be copied, distributed, and modified under those terms. 10 + */ 11 + 12 + #include <linux/extcon.h> 13 + #include <linux/gpio.h> 14 + #include <linux/gpio/consumer.h> 15 + #include <linux/interrupt.h> 16 + #include <linux/module.h> 17 + #include <linux/platform_device.h> 18 + 19 + struct max3355_data { 20 + struct extcon_dev *edev; 21 + struct gpio_desc *id_gpiod; 22 + struct gpio_desc *shdn_gpiod; 23 + }; 24 + 25 + static const unsigned int max3355_cable[] = { 26 + EXTCON_USB, 27 + EXTCON_USB_HOST, 28 + EXTCON_NONE, 29 + }; 30 + 31 + static irqreturn_t max3355_id_irq(int irq, void *dev_id) 32 + { 33 + struct max3355_data *data = dev_id; 34 + int id = gpiod_get_value_cansleep(data->id_gpiod); 35 + 36 + if (id) { 37 + /* 38 + * ID = 1 means USB HOST cable detached. 39 + * As we don't have event for USB peripheral cable attached, 40 + * we simulate USB peripheral attach here. 41 + */ 42 + extcon_set_cable_state_(data->edev, EXTCON_USB_HOST, false); 43 + extcon_set_cable_state_(data->edev, EXTCON_USB, true); 44 + } else { 45 + /* 46 + * ID = 0 means USB HOST cable attached. 47 + * As we don't have event for USB peripheral cable detached, 48 + * we simulate USB peripheral detach here. 49 + */ 50 + extcon_set_cable_state_(data->edev, EXTCON_USB, false); 51 + extcon_set_cable_state_(data->edev, EXTCON_USB_HOST, true); 52 + } 53 + 54 + return IRQ_HANDLED; 55 + } 56 + 57 + static int max3355_probe(struct platform_device *pdev) 58 + { 59 + struct max3355_data *data; 60 + struct gpio_desc *gpiod; 61 + int irq, err; 62 + 63 + data = devm_kzalloc(&pdev->dev, sizeof(struct max3355_data), 64 + GFP_KERNEL); 65 + if (!data) 66 + return -ENOMEM; 67 + 68 + gpiod = devm_gpiod_get(&pdev->dev, "id", GPIOD_IN); 69 + if (IS_ERR(gpiod)) { 70 + dev_err(&pdev->dev, "failed to get ID_OUT GPIO\n"); 71 + return PTR_ERR(gpiod); 72 + } 73 + data->id_gpiod = gpiod; 74 + 75 + gpiod = devm_gpiod_get(&pdev->dev, "maxim,shdn", GPIOD_OUT_HIGH); 76 + if (IS_ERR(gpiod)) { 77 + dev_err(&pdev->dev, "failed to get SHDN# GPIO\n"); 78 + return PTR_ERR(gpiod); 79 + } 80 + data->shdn_gpiod = gpiod; 81 + 82 + data->edev = devm_extcon_dev_allocate(&pdev->dev, max3355_cable); 83 + if (IS_ERR(data->edev)) { 84 + dev_err(&pdev->dev, "failed to allocate extcon device\n"); 85 + return PTR_ERR(data->edev); 86 + } 87 + 88 + err = devm_extcon_dev_register(&pdev->dev, data->edev); 89 + if (err < 0) { 90 + dev_err(&pdev->dev, "failed to register extcon device\n"); 91 + return err; 92 + } 93 + 94 + irq = gpiod_to_irq(data->id_gpiod); 95 + if (irq < 0) { 96 + dev_err(&pdev->dev, "failed to translate ID_OUT GPIO to IRQ\n"); 97 + return irq; 98 + } 99 + 100 + err = devm_request_threaded_irq(&pdev->dev, irq, NULL, max3355_id_irq, 101 + IRQF_ONESHOT | IRQF_NO_SUSPEND | 102 + IRQF_TRIGGER_RISING | 103 + IRQF_TRIGGER_FALLING, 104 + pdev->name, data); 105 + if (err < 0) { 106 + dev_err(&pdev->dev, "failed to request ID_OUT IRQ\n"); 107 + return err; 108 + } 109 + 110 + platform_set_drvdata(pdev, data); 111 + 112 + /* Perform initial detection */ 113 + max3355_id_irq(irq, data); 114 + 115 + return 0; 116 + } 117 + 118 + static int max3355_remove(struct platform_device *pdev) 119 + { 120 + struct max3355_data *data = platform_get_drvdata(pdev); 121 + 122 + gpiod_set_value_cansleep(data->shdn_gpiod, 0); 123 + 124 + return 0; 125 + } 126 + 127 + static const struct of_device_id max3355_match_table[] = { 128 + { .compatible = "maxim,max3355", }, 129 + { } 130 + }; 131 + MODULE_DEVICE_TABLE(of, max3355_match_table); 132 + 133 + static struct platform_driver max3355_driver = { 134 + .probe = max3355_probe, 135 + .remove = max3355_remove, 136 + .driver = { 137 + .name = "extcon-max3355", 138 + .of_match_table = max3355_match_table, 139 + }, 140 + }; 141 + 142 + module_platform_driver(max3355_driver); 143 + 144 + MODULE_AUTHOR("Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>"); 145 + MODULE_DESCRIPTION("Maxim MAX3355 extcon driver"); 146 + MODULE_LICENSE("GPL v2");
+2 -2
drivers/extcon/extcon-max77693.c
··· 1127 1127 /* Support irq domain for MAX77693 MUIC device */ 1128 1128 for (i = 0; i < ARRAY_SIZE(muic_irqs); i++) { 1129 1129 struct max77693_muic_irq *muic_irq = &muic_irqs[i]; 1130 - unsigned int virq = 0; 1130 + int virq; 1131 1131 1132 1132 virq = regmap_irq_get_virq(max77693->irq_data_muic, 1133 1133 muic_irq->irq); 1134 - if (!virq) 1134 + if (virq <= 0) 1135 1135 return -EINVAL; 1136 1136 muic_irq->virq = virq; 1137 1137
+1 -1
drivers/extcon/extcon-max77843.c
··· 811 811 812 812 for (i = 0; i < ARRAY_SIZE(max77843_muic_irqs); i++) { 813 813 struct max77843_muic_irq *muic_irq = &max77843_muic_irqs[i]; 814 - unsigned int virq = 0; 814 + int virq = 0; 815 815 816 816 virq = regmap_irq_get_virq(max77843->irq_data_muic, 817 817 muic_irq->irq);
+1 -1
drivers/extcon/extcon-rt8973a.c
··· 603 603 604 604 ret = devm_request_threaded_irq(info->dev, virq, NULL, 605 605 rt8973a_muic_irq_handler, 606 - IRQF_NO_SUSPEND, 606 + IRQF_NO_SUSPEND | IRQF_ONESHOT, 607 607 muic_irq->name, info); 608 608 if (ret) { 609 609 dev_err(info->dev,
+66 -72
drivers/hv/channel.c
··· 28 28 #include <linux/module.h> 29 29 #include <linux/hyperv.h> 30 30 #include <linux/uio.h> 31 + #include <linux/interrupt.h> 31 32 32 33 #include "hyperv_vmbus.h" 33 34 ··· 497 496 static int vmbus_close_internal(struct vmbus_channel *channel) 498 497 { 499 498 struct vmbus_channel_close_channel *msg; 499 + struct tasklet_struct *tasklet; 500 500 int ret; 501 + 502 + /* 503 + * process_chn_event(), running in the tasklet, can race 504 + * with vmbus_close_internal() in the case of SMP guest, e.g., when 505 + * the former is accessing channel->inbound.ring_buffer, the latter 506 + * could be freeing the ring_buffer pages. 507 + * 508 + * To resolve the race, we can serialize them by disabling the 509 + * tasklet when the latter is running here. 510 + */ 511 + tasklet = hv_context.event_dpc[channel->target_cpu]; 512 + tasklet_disable(tasklet); 513 + 514 + /* 515 + * In case a device driver's probe() fails (e.g., 516 + * util_probe() -> vmbus_open() returns -ENOMEM) and the device is 517 + * rescinded later (e.g., we dynamically disble an Integrated Service 518 + * in Hyper-V Manager), the driver's remove() invokes vmbus_close(): 519 + * here we should skip most of the below cleanup work. 520 + */ 521 + if (channel->state != CHANNEL_OPENED_STATE) { 522 + ret = -EINVAL; 523 + goto out; 524 + } 501 525 502 526 channel->state = CHANNEL_OPEN_STATE; 503 527 channel->sc_creation_callback = NULL; ··· 551 525 * If we failed to post the close msg, 552 526 * it is perhaps better to leak memory. 553 527 */ 554 - return ret; 528 + goto out; 555 529 } 556 530 557 531 /* Tear down the gpadl for the channel's ring buffer */ ··· 564 538 * If we failed to teardown gpadl, 565 539 * it is perhaps better to leak memory. 566 540 */ 567 - return ret; 541 + goto out; 568 542 } 569 543 } 570 544 ··· 575 549 free_pages((unsigned long)channel->ringbuffer_pages, 576 550 get_order(channel->ringbuffer_pagecount * PAGE_SIZE)); 577 551 578 - /* 579 - * If the channel has been rescinded; process device removal. 580 - */ 581 - if (channel->rescind) 582 - hv_process_channel_removal(channel, 583 - channel->offermsg.child_relid); 552 + out: 553 + tasklet_enable(tasklet); 554 + 584 555 return ret; 585 556 } 586 557 ··· 653 630 * on the ring. We will not signal if more data is 654 631 * to be placed. 655 632 * 633 + * Based on the channel signal state, we will decide 634 + * which signaling policy will be applied. 635 + * 656 636 * If we cannot write to the ring-buffer; signal the host 657 637 * even if we may not have written anything. This is a rare 658 638 * enough condition that it should not matter. 659 639 */ 640 + 641 + if (channel->signal_policy) 642 + signal = true; 643 + else 644 + kick_q = true; 645 + 660 646 if (((ret == 0) && kick_q && signal) || (ret)) 661 647 vmbus_setevent(channel); 662 648 ··· 765 733 * on the ring. We will not signal if more data is 766 734 * to be placed. 767 735 * 736 + * Based on the channel signal state, we will decide 737 + * which signaling policy will be applied. 738 + * 768 739 * If we cannot write to the ring-buffer; signal the host 769 740 * even if we may not have written anything. This is a rare 770 741 * enough condition that it should not matter. 771 742 */ 743 + 744 + if (channel->signal_policy) 745 + signal = true; 746 + else 747 + kick_q = true; 748 + 772 749 if (((ret == 0) && kick_q && signal) || (ret)) 773 750 vmbus_setevent(channel); 774 751 ··· 922 881 * 923 882 * Mainly used by Hyper-V drivers. 924 883 */ 925 - int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer, 926 - u32 bufferlen, u32 *buffer_actual_len, u64 *requestid) 884 + static inline int 885 + __vmbus_recvpacket(struct vmbus_channel *channel, void *buffer, 886 + u32 bufferlen, u32 *buffer_actual_len, u64 *requestid, 887 + bool raw) 927 888 { 928 - struct vmpacket_descriptor desc; 929 - u32 packetlen; 930 - u32 userlen; 931 889 int ret; 932 890 bool signal = false; 933 891 934 - *buffer_actual_len = 0; 935 - *requestid = 0; 936 - 937 - 938 - ret = hv_ringbuffer_peek(&channel->inbound, &desc, 939 - sizeof(struct vmpacket_descriptor)); 940 - if (ret != 0) 941 - return 0; 942 - 943 - packetlen = desc.len8 << 3; 944 - userlen = packetlen - (desc.offset8 << 3); 945 - 946 - *buffer_actual_len = userlen; 947 - 948 - if (userlen > bufferlen) { 949 - 950 - pr_err("Buffer too small - got %d needs %d\n", 951 - bufferlen, userlen); 952 - return -ETOOSMALL; 953 - } 954 - 955 - *requestid = desc.trans_id; 956 - 957 - /* Copy over the packet to the user buffer */ 958 - ret = hv_ringbuffer_read(&channel->inbound, buffer, userlen, 959 - (desc.offset8 << 3), &signal); 892 + ret = hv_ringbuffer_read(&channel->inbound, buffer, bufferlen, 893 + buffer_actual_len, requestid, &signal, raw); 960 894 961 895 if (signal) 962 896 vmbus_setevent(channel); 963 897 964 - return 0; 898 + return ret; 899 + } 900 + 901 + int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer, 902 + u32 bufferlen, u32 *buffer_actual_len, 903 + u64 *requestid) 904 + { 905 + return __vmbus_recvpacket(channel, buffer, bufferlen, 906 + buffer_actual_len, requestid, false); 965 907 } 966 908 EXPORT_SYMBOL(vmbus_recvpacket); 967 909 ··· 955 931 u32 bufferlen, u32 *buffer_actual_len, 956 932 u64 *requestid) 957 933 { 958 - struct vmpacket_descriptor desc; 959 - u32 packetlen; 960 - int ret; 961 - bool signal = false; 962 - 963 - *buffer_actual_len = 0; 964 - *requestid = 0; 965 - 966 - 967 - ret = hv_ringbuffer_peek(&channel->inbound, &desc, 968 - sizeof(struct vmpacket_descriptor)); 969 - if (ret != 0) 970 - return 0; 971 - 972 - 973 - packetlen = desc.len8 << 3; 974 - 975 - *buffer_actual_len = packetlen; 976 - 977 - if (packetlen > bufferlen) 978 - return -ENOBUFS; 979 - 980 - *requestid = desc.trans_id; 981 - 982 - /* Copy over the entire packet to the user buffer */ 983 - ret = hv_ringbuffer_read(&channel->inbound, buffer, packetlen, 0, 984 - &signal); 985 - 986 - if (signal) 987 - vmbus_setevent(channel); 988 - 989 - return ret; 934 + return __vmbus_recvpacket(channel, buffer, bufferlen, 935 + buffer_actual_len, requestid, true); 990 936 } 991 937 EXPORT_SYMBOL_GPL(vmbus_recvpacket_raw);
+31 -17
drivers/hv/channel_mgmt.c
··· 177 177 } 178 178 179 179 180 - void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) 180 + static void vmbus_release_relid(u32 relid) 181 181 { 182 182 struct vmbus_channel_relid_released msg; 183 - unsigned long flags; 184 - struct vmbus_channel *primary_channel; 185 183 186 184 memset(&msg, 0, sizeof(struct vmbus_channel_relid_released)); 187 185 msg.child_relid = relid; 188 186 msg.header.msgtype = CHANNELMSG_RELID_RELEASED; 189 187 vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released)); 188 + } 190 189 191 - if (channel == NULL) 192 - return; 190 + void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) 191 + { 192 + unsigned long flags; 193 + struct vmbus_channel *primary_channel; 194 + 195 + vmbus_release_relid(relid); 196 + 197 + BUG_ON(!channel->rescind); 193 198 194 199 if (channel->target_cpu != get_cpu()) { 195 200 put_cpu(); ··· 206 201 } 207 202 208 203 if (channel->primary_channel == NULL) { 209 - spin_lock_irqsave(&vmbus_connection.channel_lock, flags); 204 + mutex_lock(&vmbus_connection.channel_mutex); 210 205 list_del(&channel->listentry); 211 - spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); 206 + mutex_unlock(&vmbus_connection.channel_mutex); 212 207 213 208 primary_channel = channel; 214 209 } else { ··· 235 230 236 231 list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list, 237 232 listentry) { 238 - /* if we don't set rescind to true, vmbus_close_internal() 239 - * won't invoke hv_process_channel_removal(). 240 - */ 233 + /* hv_process_channel_removal() needs this */ 241 234 channel->rescind = true; 242 235 243 236 vmbus_device_unregister(channel->device_obj); ··· 253 250 unsigned long flags; 254 251 255 252 /* Make sure this is a new offer */ 256 - spin_lock_irqsave(&vmbus_connection.channel_lock, flags); 253 + mutex_lock(&vmbus_connection.channel_mutex); 257 254 258 255 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { 259 256 if (!uuid_le_cmp(channel->offermsg.offer.if_type, ··· 269 266 list_add_tail(&newchannel->listentry, 270 267 &vmbus_connection.chn_list); 271 268 272 - spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); 269 + mutex_unlock(&vmbus_connection.channel_mutex); 273 270 274 271 if (!fnew) { 275 272 /* ··· 339 336 return; 340 337 341 338 err_deq_chan: 342 - spin_lock_irqsave(&vmbus_connection.channel_lock, flags); 339 + vmbus_release_relid(newchannel->offermsg.child_relid); 340 + 341 + mutex_lock(&vmbus_connection.channel_mutex); 343 342 list_del(&newchannel->listentry); 344 - spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); 343 + mutex_unlock(&vmbus_connection.channel_mutex); 345 344 346 345 if (newchannel->target_cpu != get_cpu()) { 347 346 put_cpu(); ··· 361 356 enum { 362 357 IDE = 0, 363 358 SCSI, 359 + FC, 364 360 NIC, 365 361 ND_NIC, 362 + PCIE, 366 363 MAX_PERF_CHN, 367 364 }; 368 365 ··· 378 371 { HV_IDE_GUID, }, 379 372 /* Storage - SCSI */ 380 373 { HV_SCSI_GUID, }, 374 + /* Storage - FC */ 375 + { HV_SYNTHFC_GUID, }, 381 376 /* Network */ 382 377 { HV_NIC_GUID, }, 383 378 /* NetworkDirect Guest RDMA */ 384 379 { HV_ND_GUID, }, 380 + /* PCI Express Pass Through */ 381 + { HV_PCIE_GUID, }, 385 382 }; 386 383 387 384 ··· 416 405 struct cpumask *alloced_mask; 417 406 418 407 for (i = IDE; i < MAX_PERF_CHN; i++) { 419 - if (!memcmp(type_guid->b, hp_devs[i].guid, 420 - sizeof(uuid_le))) { 408 + if (!uuid_le_cmp(*type_guid, hp_devs[i].guid)) { 421 409 perf_chn = true; 422 410 break; 423 411 } ··· 595 585 channel = relid2channel(rescind->child_relid); 596 586 597 587 if (channel == NULL) { 598 - hv_process_channel_removal(NULL, rescind->child_relid); 588 + /* 589 + * This is very impossible, because in 590 + * vmbus_process_offer(), we have already invoked 591 + * vmbus_release_relid() on error. 592 + */ 599 593 return; 600 594 } 601 595
+10 -8
drivers/hv/connection.c
··· 83 83 msg->interrupt_page = virt_to_phys(vmbus_connection.int_page); 84 84 msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]); 85 85 msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]); 86 - if (version >= VERSION_WIN8_1) { 87 - msg->target_vcpu = hv_context.vp_index[get_cpu()]; 88 - put_cpu(); 89 - } 86 + /* 87 + * We want all channel messages to be delivered on CPU 0. 88 + * This has been the behavior pre-win8. This is not 89 + * perf issue and having all channel messages delivered on CPU 0 90 + * would be ok. 91 + */ 92 + msg->target_vcpu = 0; 90 93 91 94 /* 92 95 * Add to list before we send the request since we may ··· 149 146 spin_lock_init(&vmbus_connection.channelmsg_lock); 150 147 151 148 INIT_LIST_HEAD(&vmbus_connection.chn_list); 152 - spin_lock_init(&vmbus_connection.channel_lock); 149 + mutex_init(&vmbus_connection.channel_mutex); 153 150 154 151 /* 155 152 * Setup the vmbus event connection for channel interrupt ··· 285 282 { 286 283 struct vmbus_channel *channel; 287 284 struct vmbus_channel *found_channel = NULL; 288 - unsigned long flags; 289 285 struct list_head *cur, *tmp; 290 286 struct vmbus_channel *cur_sc; 291 287 292 - spin_lock_irqsave(&vmbus_connection.channel_lock, flags); 288 + mutex_lock(&vmbus_connection.channel_mutex); 293 289 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { 294 290 if (channel->offermsg.child_relid == relid) { 295 291 found_channel = channel; ··· 307 305 } 308 306 } 309 307 } 310 - spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); 308 + mutex_unlock(&vmbus_connection.channel_mutex); 311 309 312 310 return found_channel; 313 311 }
+15 -14
drivers/hv/hv.c
··· 89 89 } 90 90 91 91 /* 92 - * do_hypercall- Invoke the specified hypercall 92 + * hv_do_hypercall- Invoke the specified hypercall 93 93 */ 94 - static u64 do_hypercall(u64 control, void *input, void *output) 94 + u64 hv_do_hypercall(u64 control, void *input, void *output) 95 95 { 96 96 u64 input_address = (input) ? virt_to_phys(input) : 0; 97 97 u64 output_address = (output) ? virt_to_phys(output) : 0; ··· 132 132 return hv_status_lo | ((u64)hv_status_hi << 32); 133 133 #endif /* !x86_64 */ 134 134 } 135 + EXPORT_SYMBOL_GPL(hv_do_hypercall); 135 136 136 137 #ifdef CONFIG_X86_64 137 138 static cycle_t read_hv_clock_tsc(struct clocksource *arg) ··· 140 139 cycle_t current_tick; 141 140 struct ms_hyperv_tsc_page *tsc_pg = hv_context.tsc_page; 142 141 143 - if (tsc_pg->tsc_sequence != -1) { 142 + if (tsc_pg->tsc_sequence != 0) { 144 143 /* 145 144 * Use the tsc page to compute the value. 146 145 */ ··· 162 161 if (tsc_pg->tsc_sequence == sequence) 163 162 return current_tick; 164 163 165 - if (tsc_pg->tsc_sequence != -1) 164 + if (tsc_pg->tsc_sequence != 0) 166 165 continue; 167 166 /* 168 167 * Fallback using MSR method. ··· 193 192 { 194 193 int max_leaf; 195 194 union hv_x64_msr_hypercall_contents hypercall_msr; 196 - union hv_x64_msr_hypercall_contents tsc_msr; 197 195 void *virtaddr = NULL; 198 - void *va_tsc = NULL; 199 196 200 197 memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS); 201 198 memset(hv_context.synic_message_page, 0, ··· 239 240 240 241 #ifdef CONFIG_X86_64 241 242 if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) { 243 + union hv_x64_msr_hypercall_contents tsc_msr; 244 + void *va_tsc; 245 + 242 246 va_tsc = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL); 243 247 if (!va_tsc) 244 248 goto cleanup; ··· 317 315 { 318 316 319 317 struct hv_input_post_message *aligned_msg; 320 - u16 status; 318 + u64 status; 321 319 322 320 if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT) 323 321 return -EMSGSIZE; ··· 331 329 aligned_msg->payload_size = payload_size; 332 330 memcpy((void *)aligned_msg->payload, payload, payload_size); 333 331 334 - status = do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL) 335 - & 0xFFFF; 332 + status = hv_do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL); 336 333 337 334 put_cpu(); 338 - return status; 335 + return status & 0xFFFF; 339 336 } 340 337 341 338 ··· 344 343 * 345 344 * This involves a hypercall. 346 345 */ 347 - u16 hv_signal_event(void *con_id) 346 + int hv_signal_event(void *con_id) 348 347 { 349 - u16 status; 348 + u64 status; 350 349 351 - status = (do_hypercall(HVCALL_SIGNAL_EVENT, con_id, NULL) & 0xFFFF); 350 + status = hv_do_hypercall(HVCALL_SIGNAL_EVENT, con_id, NULL); 352 351 353 - return status; 352 + return status & 0xFFFF; 354 353 } 355 354 356 355 static int hv_ce_set_next_event(unsigned long delta,
+14 -23
drivers/hv/hv_fcopy.c
··· 51 51 struct hv_fcopy_hdr *fcopy_msg; /* current message */ 52 52 struct vmbus_channel *recv_channel; /* chn we got the request */ 53 53 u64 recv_req_id; /* request ID. */ 54 - void *fcopy_context; /* for the channel callback */ 55 54 } fcopy_transaction; 56 55 57 56 static void fcopy_respond_to_host(int error); ··· 66 67 */ 67 68 static int dm_reg_value; 68 69 70 + static void fcopy_poll_wrapper(void *channel) 71 + { 72 + /* Transaction is finished, reset the state here to avoid races. */ 73 + fcopy_transaction.state = HVUTIL_READY; 74 + hv_fcopy_onchannelcallback(channel); 75 + } 76 + 69 77 static void fcopy_timeout_func(struct work_struct *dummy) 70 78 { 71 79 /* ··· 80 74 * process the pending transaction. 81 75 */ 82 76 fcopy_respond_to_host(HV_E_FAIL); 83 - 84 - /* Transaction is finished, reset the state. */ 85 - if (fcopy_transaction.state > HVUTIL_READY) 86 - fcopy_transaction.state = HVUTIL_READY; 87 - 88 - hv_poll_channel(fcopy_transaction.fcopy_context, 89 - hv_fcopy_onchannelcallback); 77 + hv_poll_channel(fcopy_transaction.recv_channel, fcopy_poll_wrapper); 90 78 } 91 79 92 80 static int fcopy_handle_handshake(u32 version) ··· 108 108 return -EINVAL; 109 109 } 110 110 pr_debug("FCP: userspace daemon ver. %d registered\n", version); 111 - fcopy_transaction.state = HVUTIL_READY; 112 - hv_poll_channel(fcopy_transaction.fcopy_context, 113 - hv_fcopy_onchannelcallback); 111 + hv_poll_channel(fcopy_transaction.recv_channel, fcopy_poll_wrapper); 114 112 return 0; 115 113 } 116 114 ··· 225 227 int util_fw_version; 226 228 int fcopy_srv_version; 227 229 228 - if (fcopy_transaction.state > HVUTIL_READY) { 229 - /* 230 - * We will defer processing this callback once 231 - * the current transaction is complete. 232 - */ 233 - fcopy_transaction.fcopy_context = context; 230 + if (fcopy_transaction.state > HVUTIL_READY) 234 231 return; 235 - } 236 - fcopy_transaction.fcopy_context = NULL; 237 232 238 233 vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen, 239 234 &requestid); ··· 266 275 * Send the information to the user-level daemon. 267 276 */ 268 277 schedule_work(&fcopy_send_work); 269 - schedule_delayed_work(&fcopy_timeout_work, 5*HZ); 278 + schedule_delayed_work(&fcopy_timeout_work, 279 + HV_UTIL_TIMEOUT * HZ); 270 280 return; 271 281 } 272 282 icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE; ··· 296 304 if (cancel_delayed_work_sync(&fcopy_timeout_work)) { 297 305 fcopy_transaction.state = HVUTIL_USERSPACE_RECV; 298 306 fcopy_respond_to_host(*val); 299 - fcopy_transaction.state = HVUTIL_READY; 300 - hv_poll_channel(fcopy_transaction.fcopy_context, 301 - hv_fcopy_onchannelcallback); 307 + hv_poll_channel(fcopy_transaction.recv_channel, 308 + fcopy_poll_wrapper); 302 309 } 303 310 304 311 return 0;
+13 -20
drivers/hv/hv_kvp.c
··· 66 66 struct hv_kvp_msg *kvp_msg; /* current message */ 67 67 struct vmbus_channel *recv_channel; /* chn we got the request */ 68 68 u64 recv_req_id; /* request ID. */ 69 - void *kvp_context; /* for the channel callback */ 70 69 } kvp_transaction; 71 70 72 71 /* ··· 92 93 * This number has no meaning, it satisfies the registration protocol. 93 94 */ 94 95 #define HV_DRV_VERSION "3.1" 96 + 97 + static void kvp_poll_wrapper(void *channel) 98 + { 99 + /* Transaction is finished, reset the state here to avoid races. */ 100 + kvp_transaction.state = HVUTIL_READY; 101 + hv_kvp_onchannelcallback(channel); 102 + } 95 103 96 104 static void 97 105 kvp_register(int reg_value) ··· 127 121 */ 128 122 kvp_respond_to_host(NULL, HV_E_FAIL); 129 123 130 - /* Transaction is finished, reset the state. */ 131 - if (kvp_transaction.state > HVUTIL_READY) 132 - kvp_transaction.state = HVUTIL_READY; 133 - 134 - hv_poll_channel(kvp_transaction.kvp_context, 135 - hv_kvp_onchannelcallback); 124 + hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper); 136 125 } 137 126 138 127 static int kvp_handle_handshake(struct hv_kvp_msg *msg) ··· 154 153 pr_debug("KVP: userspace daemon ver. %d registered\n", 155 154 KVP_OP_REGISTER); 156 155 kvp_register(dm_reg_value); 157 - kvp_transaction.state = HVUTIL_READY; 156 + hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper); 158 157 159 158 return 0; 160 159 } ··· 219 218 */ 220 219 if (cancel_delayed_work_sync(&kvp_timeout_work)) { 221 220 kvp_respond_to_host(message, error); 222 - kvp_transaction.state = HVUTIL_READY; 223 - hv_poll_channel(kvp_transaction.kvp_context, 224 - hv_kvp_onchannelcallback); 221 + hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper); 225 222 } 226 223 227 224 return 0; ··· 595 596 int util_fw_version; 596 597 int kvp_srv_version; 597 598 598 - if (kvp_transaction.state > HVUTIL_READY) { 599 - /* 600 - * We will defer processing this callback once 601 - * the current transaction is complete. 602 - */ 603 - kvp_transaction.kvp_context = context; 599 + if (kvp_transaction.state > HVUTIL_READY) 604 600 return; 605 - } 606 - kvp_transaction.kvp_context = NULL; 607 601 608 602 vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 4, &recvlen, 609 603 &requestid); ··· 660 668 * user-mode not responding. 661 669 */ 662 670 schedule_work(&kvp_sendkey_work); 663 - schedule_delayed_work(&kvp_timeout_work, 5*HZ); 671 + schedule_delayed_work(&kvp_timeout_work, 672 + HV_UTIL_TIMEOUT * HZ); 664 673 665 674 return; 666 675
+17 -19
drivers/hv/hv_snapshot.c
··· 53 53 struct vmbus_channel *recv_channel; /* chn we got the request */ 54 54 u64 recv_req_id; /* request ID. */ 55 55 struct hv_vss_msg *msg; /* current message */ 56 - void *vss_context; /* for the channel callback */ 57 56 } vss_transaction; 58 57 59 58 ··· 73 74 static DECLARE_DELAYED_WORK(vss_timeout_work, vss_timeout_func); 74 75 static DECLARE_WORK(vss_send_op_work, vss_send_op); 75 76 77 + static void vss_poll_wrapper(void *channel) 78 + { 79 + /* Transaction is finished, reset the state here to avoid races. */ 80 + vss_transaction.state = HVUTIL_READY; 81 + hv_vss_onchannelcallback(channel); 82 + } 83 + 76 84 /* 77 85 * Callback when data is received from user mode. 78 86 */ ··· 92 86 pr_warn("VSS: timeout waiting for daemon to reply\n"); 93 87 vss_respond_to_host(HV_E_FAIL); 94 88 95 - /* Transaction is finished, reset the state. */ 96 - if (vss_transaction.state > HVUTIL_READY) 97 - vss_transaction.state = HVUTIL_READY; 98 - 99 - hv_poll_channel(vss_transaction.vss_context, 100 - hv_vss_onchannelcallback); 89 + hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper); 101 90 } 102 91 103 92 static int vss_handle_handshake(struct hv_vss_msg *vss_msg) ··· 113 112 default: 114 113 return -EINVAL; 115 114 } 116 - vss_transaction.state = HVUTIL_READY; 115 + hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper); 117 116 pr_debug("VSS: userspace daemon ver. %d registered\n", dm_reg_value); 118 117 return 0; 119 118 } ··· 139 138 if (cancel_delayed_work_sync(&vss_timeout_work)) { 140 139 vss_respond_to_host(vss_msg->error); 141 140 /* Transaction is finished, reset the state. */ 142 - vss_transaction.state = HVUTIL_READY; 143 - hv_poll_channel(vss_transaction.vss_context, 144 - hv_vss_onchannelcallback); 141 + hv_poll_channel(vss_transaction.recv_channel, 142 + vss_poll_wrapper); 145 143 } 146 144 } else { 147 145 /* This is a spurious call! */ ··· 238 238 struct icmsg_hdr *icmsghdrp; 239 239 struct icmsg_negotiate *negop = NULL; 240 240 241 - if (vss_transaction.state > HVUTIL_READY) { 242 - /* 243 - * We will defer processing this callback once 244 - * the current transaction is complete. 245 - */ 246 - vss_transaction.vss_context = context; 241 + if (vss_transaction.state > HVUTIL_READY) 247 242 return; 248 - } 249 - vss_transaction.vss_context = NULL; 250 243 251 244 vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen, 252 245 &requestid); ··· 331 338 int 332 339 hv_vss_init(struct hv_util_service *srv) 333 340 { 341 + if (vmbus_proto_version < VERSION_WIN8_1) { 342 + pr_warn("Integration service 'Backup (volume snapshot)'" 343 + " not supported on this host version.\n"); 344 + return -ENOTSUPP; 345 + } 334 346 recv_buffer = srv->recv_buffer; 335 347 336 348 /*
+97 -33
drivers/hv/hv_utils_transport.c
··· 27 27 28 28 static void hvt_reset(struct hvutil_transport *hvt) 29 29 { 30 - mutex_lock(&hvt->outmsg_lock); 31 30 kfree(hvt->outmsg); 32 31 hvt->outmsg = NULL; 33 32 hvt->outmsg_len = 0; 34 - mutex_unlock(&hvt->outmsg_lock); 35 33 if (hvt->on_reset) 36 34 hvt->on_reset(); 37 35 } ··· 42 44 43 45 hvt = container_of(file->f_op, struct hvutil_transport, fops); 44 46 45 - if (wait_event_interruptible(hvt->outmsg_q, hvt->outmsg_len > 0)) 47 + if (wait_event_interruptible(hvt->outmsg_q, hvt->outmsg_len > 0 || 48 + hvt->mode != HVUTIL_TRANSPORT_CHARDEV)) 46 49 return -EINTR; 47 50 48 - mutex_lock(&hvt->outmsg_lock); 51 + mutex_lock(&hvt->lock); 52 + 53 + if (hvt->mode == HVUTIL_TRANSPORT_DESTROY) { 54 + ret = -EBADF; 55 + goto out_unlock; 56 + } 57 + 49 58 if (!hvt->outmsg) { 50 59 ret = -EAGAIN; 51 60 goto out_unlock; ··· 73 68 hvt->outmsg_len = 0; 74 69 75 70 out_unlock: 76 - mutex_unlock(&hvt->outmsg_lock); 71 + mutex_unlock(&hvt->lock); 77 72 return ret; 78 73 } 79 74 ··· 82 77 { 83 78 struct hvutil_transport *hvt; 84 79 u8 *inmsg; 80 + int ret; 85 81 86 82 hvt = container_of(file->f_op, struct hvutil_transport, fops); 87 83 88 - inmsg = kzalloc(count, GFP_KERNEL); 89 - if (copy_from_user(inmsg, buf, count)) { 90 - kfree(inmsg); 91 - return -EFAULT; 92 - } 93 - if (hvt->on_msg(inmsg, count)) 94 - return -EFAULT; 84 + inmsg = memdup_user(buf, count); 85 + if (IS_ERR(inmsg)) 86 + return PTR_ERR(inmsg); 87 + 88 + if (hvt->mode == HVUTIL_TRANSPORT_DESTROY) 89 + ret = -EBADF; 90 + else 91 + ret = hvt->on_msg(inmsg, count); 92 + 95 93 kfree(inmsg); 96 94 97 - return count; 95 + return ret ? ret : count; 98 96 } 99 97 100 98 static unsigned int hvt_op_poll(struct file *file, poll_table *wait) ··· 107 99 hvt = container_of(file->f_op, struct hvutil_transport, fops); 108 100 109 101 poll_wait(file, &hvt->outmsg_q, wait); 102 + 103 + if (hvt->mode == HVUTIL_TRANSPORT_DESTROY) 104 + return POLLERR | POLLHUP; 105 + 110 106 if (hvt->outmsg_len > 0) 111 107 return POLLIN | POLLRDNORM; 112 108 ··· 120 108 static int hvt_op_open(struct inode *inode, struct file *file) 121 109 { 122 110 struct hvutil_transport *hvt; 111 + int ret = 0; 112 + bool issue_reset = false; 123 113 124 114 hvt = container_of(file->f_op, struct hvutil_transport, fops); 125 115 126 - /* 127 - * Switching to CHARDEV mode. We switch bach to INIT when device 128 - * gets released. 129 - */ 130 - if (hvt->mode == HVUTIL_TRANSPORT_INIT) 116 + mutex_lock(&hvt->lock); 117 + 118 + if (hvt->mode == HVUTIL_TRANSPORT_DESTROY) { 119 + ret = -EBADF; 120 + } else if (hvt->mode == HVUTIL_TRANSPORT_INIT) { 121 + /* 122 + * Switching to CHARDEV mode. We switch bach to INIT when 123 + * device gets released. 124 + */ 131 125 hvt->mode = HVUTIL_TRANSPORT_CHARDEV; 126 + } 132 127 else if (hvt->mode == HVUTIL_TRANSPORT_NETLINK) { 133 128 /* 134 129 * We're switching from netlink communication to using char 135 130 * device. Issue the reset first. 136 131 */ 137 - hvt_reset(hvt); 132 + issue_reset = true; 138 133 hvt->mode = HVUTIL_TRANSPORT_CHARDEV; 139 - } else 140 - return -EBUSY; 134 + } else { 135 + ret = -EBUSY; 136 + } 141 137 142 - return 0; 138 + if (issue_reset) 139 + hvt_reset(hvt); 140 + 141 + mutex_unlock(&hvt->lock); 142 + 143 + return ret; 144 + } 145 + 146 + static void hvt_transport_free(struct hvutil_transport *hvt) 147 + { 148 + misc_deregister(&hvt->mdev); 149 + kfree(hvt->outmsg); 150 + kfree(hvt); 143 151 } 144 152 145 153 static int hvt_op_release(struct inode *inode, struct file *file) 146 154 { 147 155 struct hvutil_transport *hvt; 156 + int mode_old; 148 157 149 158 hvt = container_of(file->f_op, struct hvutil_transport, fops); 150 159 151 - hvt->mode = HVUTIL_TRANSPORT_INIT; 160 + mutex_lock(&hvt->lock); 161 + mode_old = hvt->mode; 162 + if (hvt->mode != HVUTIL_TRANSPORT_DESTROY) 163 + hvt->mode = HVUTIL_TRANSPORT_INIT; 152 164 /* 153 165 * Cleanup message buffers to avoid spurious messages when the daemon 154 166 * connects back. 155 167 */ 156 168 hvt_reset(hvt); 169 + mutex_unlock(&hvt->lock); 170 + 171 + if (mode_old == HVUTIL_TRANSPORT_DESTROY) 172 + hvt_transport_free(hvt); 157 173 158 174 return 0; 159 175 } ··· 208 168 * Switching to NETLINK mode. Switching to CHARDEV happens when someone 209 169 * opens the device. 210 170 */ 171 + mutex_lock(&hvt->lock); 211 172 if (hvt->mode == HVUTIL_TRANSPORT_INIT) 212 173 hvt->mode = HVUTIL_TRANSPORT_NETLINK; 213 174 ··· 216 175 hvt_found->on_msg(msg->data, msg->len); 217 176 else 218 177 pr_warn("hvt_cn_callback: unexpected netlink message!\n"); 178 + mutex_unlock(&hvt->lock); 219 179 } 220 180 221 181 int hvutil_transport_send(struct hvutil_transport *hvt, void *msg, int len) ··· 224 182 struct cn_msg *cn_msg; 225 183 int ret = 0; 226 184 227 - if (hvt->mode == HVUTIL_TRANSPORT_INIT) { 185 + if (hvt->mode == HVUTIL_TRANSPORT_INIT || 186 + hvt->mode == HVUTIL_TRANSPORT_DESTROY) { 228 187 return -EINVAL; 229 188 } else if (hvt->mode == HVUTIL_TRANSPORT_NETLINK) { 230 189 cn_msg = kzalloc(sizeof(*cn_msg) + len, GFP_ATOMIC); ··· 240 197 return ret; 241 198 } 242 199 /* HVUTIL_TRANSPORT_CHARDEV */ 243 - mutex_lock(&hvt->outmsg_lock); 200 + mutex_lock(&hvt->lock); 201 + if (hvt->mode != HVUTIL_TRANSPORT_CHARDEV) { 202 + ret = -EINVAL; 203 + goto out_unlock; 204 + } 205 + 244 206 if (hvt->outmsg) { 245 207 /* Previous message wasn't received */ 246 208 ret = -EFAULT; 247 209 goto out_unlock; 248 210 } 249 211 hvt->outmsg = kzalloc(len, GFP_KERNEL); 250 - memcpy(hvt->outmsg, msg, len); 251 - hvt->outmsg_len = len; 252 - wake_up_interruptible(&hvt->outmsg_q); 212 + if (hvt->outmsg) { 213 + memcpy(hvt->outmsg, msg, len); 214 + hvt->outmsg_len = len; 215 + wake_up_interruptible(&hvt->outmsg_q); 216 + } else 217 + ret = -ENOMEM; 253 218 out_unlock: 254 - mutex_unlock(&hvt->outmsg_lock); 219 + mutex_unlock(&hvt->lock); 255 220 return ret; 256 221 } 257 222 ··· 290 239 hvt->mdev.fops = &hvt->fops; 291 240 292 241 init_waitqueue_head(&hvt->outmsg_q); 293 - mutex_init(&hvt->outmsg_lock); 242 + mutex_init(&hvt->lock); 294 243 295 244 spin_lock(&hvt_list_lock); 296 245 list_add(&hvt->list, &hvt_list); ··· 316 265 317 266 void hvutil_transport_destroy(struct hvutil_transport *hvt) 318 267 { 268 + int mode_old; 269 + 270 + mutex_lock(&hvt->lock); 271 + mode_old = hvt->mode; 272 + hvt->mode = HVUTIL_TRANSPORT_DESTROY; 273 + wake_up_interruptible(&hvt->outmsg_q); 274 + mutex_unlock(&hvt->lock); 275 + 276 + /* 277 + * In case we were in 'chardev' mode we still have an open fd so we 278 + * have to defer freeing the device. Netlink interface can be freed 279 + * now. 280 + */ 319 281 spin_lock(&hvt_list_lock); 320 282 list_del(&hvt->list); 321 283 spin_unlock(&hvt_list_lock); 322 284 if (hvt->cn_id.idx > 0 && hvt->cn_id.val > 0) 323 285 cn_del_callback(&hvt->cn_id); 324 - misc_deregister(&hvt->mdev); 325 - kfree(hvt->outmsg); 326 - kfree(hvt); 286 + 287 + if (mode_old != HVUTIL_TRANSPORT_CHARDEV) 288 + hvt_transport_free(hvt); 327 289 }
+2 -1
drivers/hv/hv_utils_transport.h
··· 25 25 HVUTIL_TRANSPORT_INIT = 0, 26 26 HVUTIL_TRANSPORT_NETLINK, 27 27 HVUTIL_TRANSPORT_CHARDEV, 28 + HVUTIL_TRANSPORT_DESTROY, 28 29 }; 29 30 30 31 struct hvutil_transport { ··· 39 38 u8 *outmsg; /* message to the userspace */ 40 39 int outmsg_len; /* its length */ 41 40 wait_queue_head_t outmsg_q; /* poll/read wait queue */ 42 - struct mutex outmsg_lock; /* protects outmsg */ 41 + struct mutex lock; /* protects struct members */ 43 42 }; 44 43 45 44 struct hvutil_transport *hvutil_transport_init(const char *name,
+11 -15
drivers/hv/hyperv_vmbus.h
··· 31 31 #include <linux/hyperv.h> 32 32 33 33 /* 34 + * Timeout for services such as KVP and fcopy. 35 + */ 36 + #define HV_UTIL_TIMEOUT 30 37 + 38 + /* 34 39 * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent 35 40 * is set by CPUID(HVCPUID_VERSION_FEATURES). 36 41 */ ··· 501 496 enum hv_message_type message_type, 502 497 void *payload, size_t payload_size); 503 498 504 - extern u16 hv_signal_event(void *con_id); 499 + extern int hv_signal_event(void *con_id); 505 500 506 501 extern int hv_synic_alloc(void); 507 502 ··· 533 528 struct kvec *kv_list, 534 529 u32 kv_count, bool *signal); 535 530 536 - int hv_ringbuffer_peek(struct hv_ring_buffer_info *ring_info, void *buffer, 537 - u32 buflen); 538 - 539 - int hv_ringbuffer_read(struct hv_ring_buffer_info *ring_info, 540 - void *buffer, 541 - u32 buflen, 542 - u32 offset, bool *signal); 543 - 531 + int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, 532 + void *buffer, u32 buflen, u32 *buffer_actual_len, 533 + u64 *requestid, bool *signal, bool raw); 544 534 545 535 void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info, 546 536 struct hv_ring_buffer_debug_info *debug_info); ··· 592 592 593 593 /* List of channels */ 594 594 struct list_head chn_list; 595 - spinlock_t channel_lock; 595 + struct mutex channel_mutex; 596 596 597 597 struct workqueue_struct *work_queue; 598 598 }; ··· 673 673 if (!channel) 674 674 return; 675 675 676 - if (channel->target_cpu != smp_processor_id()) 677 - smp_call_function_single(channel->target_cpu, 678 - cb, channel, true); 679 - else 680 - cb(channel); 676 + smp_call_function_single(channel->target_cpu, cb, channel, true); 681 677 } 682 678 683 679 enum hvutil_device_state {
+63 -155
drivers/hv/ring_buffer.c
··· 112 112 u32 read_loc = rbi->ring_buffer->read_index; 113 113 u32 pending_sz = rbi->ring_buffer->pending_send_sz; 114 114 115 - /* 116 - * If the other end is not blocked on write don't bother. 117 - */ 115 + /* If the other end is not blocked on write don't bother. */ 118 116 if (pending_sz == 0) 119 117 return false; 120 118 ··· 126 128 return false; 127 129 } 128 130 129 - /* 130 - * hv_get_next_write_location() 131 - * 132 - * Get the next write location for the specified ring buffer 133 - * 134 - */ 131 + /* Get the next write location for the specified ring buffer. */ 135 132 static inline u32 136 133 hv_get_next_write_location(struct hv_ring_buffer_info *ring_info) 137 134 { ··· 135 142 return next; 136 143 } 137 144 138 - /* 139 - * hv_set_next_write_location() 140 - * 141 - * Set the next write location for the specified ring buffer 142 - * 143 - */ 145 + /* Set the next write location for the specified ring buffer. */ 144 146 static inline void 145 147 hv_set_next_write_location(struct hv_ring_buffer_info *ring_info, 146 148 u32 next_write_location) ··· 143 155 ring_info->ring_buffer->write_index = next_write_location; 144 156 } 145 157 146 - /* 147 - * hv_get_next_read_location() 148 - * 149 - * Get the next read location for the specified ring buffer 150 - */ 158 + /* Get the next read location for the specified ring buffer. */ 151 159 static inline u32 152 160 hv_get_next_read_location(struct hv_ring_buffer_info *ring_info) 153 161 { ··· 153 169 } 154 170 155 171 /* 156 - * hv_get_next_readlocation_withoffset() 157 - * 158 172 * Get the next read location + offset for the specified ring buffer. 159 - * This allows the caller to skip 173 + * This allows the caller to skip. 160 174 */ 161 175 static inline u32 162 176 hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info, ··· 168 186 return next; 169 187 } 170 188 171 - /* 172 - * 173 - * hv_set_next_read_location() 174 - * 175 - * Set the next read location for the specified ring buffer 176 - * 177 - */ 189 + /* Set the next read location for the specified ring buffer. */ 178 190 static inline void 179 191 hv_set_next_read_location(struct hv_ring_buffer_info *ring_info, 180 192 u32 next_read_location) ··· 177 201 } 178 202 179 203 180 - /* 181 - * 182 - * hv_get_ring_buffer() 183 - * 184 - * Get the start of the ring buffer 185 - */ 204 + /* Get the start of the ring buffer. */ 186 205 static inline void * 187 206 hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info) 188 207 { ··· 185 214 } 186 215 187 216 188 - /* 189 - * 190 - * hv_get_ring_buffersize() 191 - * 192 - * Get the size of the ring buffer 193 - */ 217 + /* Get the size of the ring buffer. */ 194 218 static inline u32 195 219 hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info) 196 220 { 197 221 return ring_info->ring_datasize; 198 222 } 199 223 200 - /* 201 - * 202 - * hv_get_ring_bufferindices() 203 - * 204 - * Get the read and write indices as u64 of the specified ring buffer 205 - * 206 - */ 224 + /* Get the read and write indices as u64 of the specified ring buffer. */ 207 225 static inline u64 208 226 hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info) 209 227 { ··· 200 240 } 201 241 202 242 /* 203 - * 204 - * hv_copyfrom_ringbuffer() 205 - * 206 243 * Helper routine to copy to source from ring buffer. 207 244 * Assume there is enough room. Handles wrap-around in src case only!! 208 - * 209 245 */ 210 246 static u32 hv_copyfrom_ringbuffer( 211 247 struct hv_ring_buffer_info *ring_info, ··· 233 277 234 278 235 279 /* 236 - * 237 - * hv_copyto_ringbuffer() 238 - * 239 280 * Helper routine to copy from source to ring buffer. 240 281 * Assume there is enough room. Handles wrap-around in dest case only!! 241 - * 242 282 */ 243 283 static u32 hv_copyto_ringbuffer( 244 284 struct hv_ring_buffer_info *ring_info, ··· 260 308 return start_write_offset; 261 309 } 262 310 263 - /* 264 - * 265 - * hv_ringbuffer_get_debuginfo() 266 - * 267 - * Get various debug metrics for the specified ring buffer 268 - * 269 - */ 311 + /* Get various debug metrics for the specified ring buffer. */ 270 312 void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info, 271 313 struct hv_ring_buffer_debug_info *debug_info) 272 314 { ··· 283 337 } 284 338 } 285 339 286 - /* 287 - * 288 - * hv_ringbuffer_init() 289 - * 290 - *Initialize the ring buffer 291 - * 292 - */ 340 + /* Initialize the ring buffer. */ 293 341 int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, 294 342 void *buffer, u32 buflen) 295 343 { ··· 296 356 ring_info->ring_buffer->read_index = 297 357 ring_info->ring_buffer->write_index = 0; 298 358 299 - /* 300 - * Set the feature bit for enabling flow control. 301 - */ 359 + /* Set the feature bit for enabling flow control. */ 302 360 ring_info->ring_buffer->feature_bits.value = 1; 303 361 304 362 ring_info->ring_size = buflen; ··· 307 369 return 0; 308 370 } 309 371 310 - /* 311 - * 312 - * hv_ringbuffer_cleanup() 313 - * 314 - * Cleanup the ring buffer 315 - * 316 - */ 372 + /* Cleanup the ring buffer. */ 317 373 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) 318 374 { 319 375 } 320 376 321 - /* 322 - * 323 - * hv_ringbuffer_write() 324 - * 325 - * Write to the ring buffer 326 - * 327 - */ 377 + /* Write to the ring buffer. */ 328 378 int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, 329 379 struct kvec *kv_list, u32 kv_count, bool *signal) 330 380 { ··· 337 411 &bytes_avail_toread, 338 412 &bytes_avail_towrite); 339 413 340 - 341 - /* If there is only room for the packet, assume it is full. */ 342 - /* Otherwise, the next time around, we think the ring buffer */ 343 - /* is empty since the read index == write index */ 414 + /* 415 + * If there is only room for the packet, assume it is full. 416 + * Otherwise, the next time around, we think the ring buffer 417 + * is empty since the read index == write index. 418 + */ 344 419 if (bytes_avail_towrite <= totalbytes_towrite) { 345 420 spin_unlock_irqrestore(&outring_info->ring_lock, flags); 346 421 return -EAGAIN; ··· 380 453 return 0; 381 454 } 382 455 383 - 384 - /* 385 - * 386 - * hv_ringbuffer_peek() 387 - * 388 - * Read without advancing the read index 389 - * 390 - */ 391 - int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info, 392 - void *Buffer, u32 buflen) 393 - { 394 - u32 bytes_avail_towrite; 395 - u32 bytes_avail_toread; 396 - u32 next_read_location = 0; 397 - unsigned long flags; 398 - 399 - spin_lock_irqsave(&Inring_info->ring_lock, flags); 400 - 401 - hv_get_ringbuffer_availbytes(Inring_info, 402 - &bytes_avail_toread, 403 - &bytes_avail_towrite); 404 - 405 - /* Make sure there is something to read */ 406 - if (bytes_avail_toread < buflen) { 407 - 408 - spin_unlock_irqrestore(&Inring_info->ring_lock, flags); 409 - 410 - return -EAGAIN; 411 - } 412 - 413 - /* Convert to byte offset */ 414 - next_read_location = hv_get_next_read_location(Inring_info); 415 - 416 - next_read_location = hv_copyfrom_ringbuffer(Inring_info, 417 - Buffer, 418 - buflen, 419 - next_read_location); 420 - 421 - spin_unlock_irqrestore(&Inring_info->ring_lock, flags); 422 - 423 - return 0; 424 - } 425 - 426 - 427 - /* 428 - * 429 - * hv_ringbuffer_read() 430 - * 431 - * Read and advance the read index 432 - * 433 - */ 434 - int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer, 435 - u32 buflen, u32 offset, bool *signal) 456 + int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, 457 + void *buffer, u32 buflen, u32 *buffer_actual_len, 458 + u64 *requestid, bool *signal, bool raw) 436 459 { 437 460 u32 bytes_avail_towrite; 438 461 u32 bytes_avail_toread; 439 462 u32 next_read_location = 0; 440 463 u64 prev_indices = 0; 441 464 unsigned long flags; 465 + struct vmpacket_descriptor desc; 466 + u32 offset; 467 + u32 packetlen; 468 + int ret = 0; 442 469 443 470 if (buflen <= 0) 444 471 return -EINVAL; 445 472 446 473 spin_lock_irqsave(&inring_info->ring_lock, flags); 447 474 475 + *buffer_actual_len = 0; 476 + *requestid = 0; 477 + 448 478 hv_get_ringbuffer_availbytes(inring_info, 449 479 &bytes_avail_toread, 450 480 &bytes_avail_towrite); 451 481 452 482 /* Make sure there is something to read */ 453 - if (bytes_avail_toread < buflen) { 454 - spin_unlock_irqrestore(&inring_info->ring_lock, flags); 483 + if (bytes_avail_toread < sizeof(desc)) { 484 + /* 485 + * No error is set when there is even no header, drivers are 486 + * supposed to analyze buffer_actual_len. 487 + */ 488 + goto out_unlock; 489 + } 455 490 456 - return -EAGAIN; 491 + next_read_location = hv_get_next_read_location(inring_info); 492 + next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc, 493 + sizeof(desc), 494 + next_read_location); 495 + 496 + offset = raw ? 0 : (desc.offset8 << 3); 497 + packetlen = (desc.len8 << 3) - offset; 498 + *buffer_actual_len = packetlen; 499 + *requestid = desc.trans_id; 500 + 501 + if (bytes_avail_toread < packetlen + offset) { 502 + ret = -EAGAIN; 503 + goto out_unlock; 504 + } 505 + 506 + if (packetlen > buflen) { 507 + ret = -ENOBUFS; 508 + goto out_unlock; 457 509 } 458 510 459 511 next_read_location = ··· 440 534 441 535 next_read_location = hv_copyfrom_ringbuffer(inring_info, 442 536 buffer, 443 - buflen, 537 + packetlen, 444 538 next_read_location); 445 539 446 540 next_read_location = hv_copyfrom_ringbuffer(inring_info, ··· 448 542 sizeof(u64), 449 543 next_read_location); 450 544 451 - /* Make sure all reads are done before we update the read index since */ 452 - /* the writer may start writing to the read area once the read index */ 453 - /*is updated */ 545 + /* 546 + * Make sure all reads are done before we update the read index since 547 + * the writer may start writing to the read area once the read index 548 + * is updated. 549 + */ 454 550 mb(); 455 551 456 552 /* Update the read index */ 457 553 hv_set_next_read_location(inring_info, next_read_location); 458 554 459 - spin_unlock_irqrestore(&inring_info->ring_lock, flags); 460 - 461 555 *signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info); 462 556 463 - return 0; 557 + out_unlock: 558 + spin_unlock_irqrestore(&inring_info->ring_lock, flags); 559 + return ret; 464 560 }
+49 -33
drivers/hv/vmbus_drv.c
··· 47 47 48 48 static struct tasklet_struct msg_dpc; 49 49 static struct completion probe_event; 50 - static int irq; 51 50 52 51 53 52 static void hyperv_report_panic(struct pt_regs *regs) ··· 530 531 531 532 static const uuid_le null_guid; 532 533 533 - static inline bool is_null_guid(const __u8 *guid) 534 + static inline bool is_null_guid(const uuid_le *guid) 534 535 { 535 - if (memcmp(guid, &null_guid, sizeof(uuid_le))) 536 + if (uuid_le_cmp(*guid, null_guid)) 536 537 return false; 537 538 return true; 538 539 } ··· 543 544 */ 544 545 static const struct hv_vmbus_device_id *hv_vmbus_get_id( 545 546 const struct hv_vmbus_device_id *id, 546 - const __u8 *guid) 547 + const uuid_le *guid) 547 548 { 548 - for (; !is_null_guid(id->guid); id++) 549 - if (!memcmp(&id->guid, guid, sizeof(uuid_le))) 549 + for (; !is_null_guid(&id->guid); id++) 550 + if (!uuid_le_cmp(id->guid, *guid)) 550 551 return id; 551 552 552 553 return NULL; ··· 562 563 struct hv_driver *drv = drv_to_hv_drv(driver); 563 564 struct hv_device *hv_dev = device_to_hv_device(device); 564 565 565 - if (hv_vmbus_get_id(drv->id_table, hv_dev->dev_type.b)) 566 + if (hv_vmbus_get_id(drv->id_table, &hv_dev->dev_type)) 566 567 return 1; 567 568 568 569 return 0; ··· 579 580 struct hv_device *dev = device_to_hv_device(child_device); 580 581 const struct hv_vmbus_device_id *dev_id; 581 582 582 - dev_id = hv_vmbus_get_id(drv->id_table, dev->dev_type.b); 583 + dev_id = hv_vmbus_get_id(drv->id_table, &dev->dev_type); 583 584 if (drv->probe) { 584 585 ret = drv->probe(dev, dev_id); 585 586 if (ret != 0) ··· 601 602 { 602 603 struct hv_driver *drv; 603 604 struct hv_device *dev = device_to_hv_device(child_device); 604 - u32 relid = dev->channel->offermsg.child_relid; 605 605 606 606 if (child_device->driver) { 607 607 drv = drv_to_hv_drv(child_device->driver); 608 608 if (drv->remove) 609 609 drv->remove(dev); 610 - else { 611 - hv_process_channel_removal(dev->channel, relid); 612 - pr_err("remove not set for driver %s\n", 613 - dev_name(child_device)); 614 - } 615 - } else { 616 - /* 617 - * We don't have a driver for this device; deal with the 618 - * rescind message by removing the channel. 619 - */ 620 - hv_process_channel_removal(dev->channel, relid); 621 610 } 622 611 623 612 return 0; ··· 640 653 static void vmbus_device_release(struct device *device) 641 654 { 642 655 struct hv_device *hv_dev = device_to_hv_device(device); 656 + struct vmbus_channel *channel = hv_dev->channel; 643 657 658 + hv_process_channel_removal(channel, 659 + channel->offermsg.child_relid); 644 660 kfree(hv_dev); 645 661 646 662 } ··· 825 835 * Here, we 826 836 * - initialize the vmbus driver context 827 837 * - invoke the vmbus hv main init routine 828 - * - get the irq resource 829 838 * - retrieve the channel offers 830 839 */ 831 - static int vmbus_bus_init(int irq) 840 + static int vmbus_bus_init(void) 832 841 { 833 842 int ret; 834 843 ··· 856 867 on_each_cpu(hv_synic_init, NULL, 1); 857 868 ret = vmbus_connect(); 858 869 if (ret) 859 - goto err_alloc; 870 + goto err_connect; 860 871 861 872 if (vmbus_proto_version > VERSION_WIN7) 862 873 cpu_hotplug_disable(); ··· 874 885 875 886 return 0; 876 887 888 + err_connect: 889 + on_each_cpu(hv_synic_cleanup, NULL, 1); 877 890 err_alloc: 878 891 hv_synic_free(); 879 892 hv_remove_vmbus_irq(); ··· 1022 1031 struct resource **prev_res = NULL; 1023 1032 1024 1033 switch (res->type) { 1025 - case ACPI_RESOURCE_TYPE_IRQ: 1026 - irq = res->data.irq.interrupts[0]; 1027 - return AE_OK; 1028 1034 1029 1035 /* 1030 1036 * "Address" descriptors are for bus windows. Ignore ··· 1063 1075 new_res->start = start; 1064 1076 new_res->end = end; 1065 1077 1078 + /* 1079 + * Stick ranges from higher in address space at the front of the list. 1080 + * If two ranges are adjacent, merge them. 1081 + */ 1066 1082 do { 1067 1083 if (!*old_res) { 1068 1084 *old_res = new_res; 1085 + break; 1086 + } 1087 + 1088 + if (((*old_res)->end + 1) == new_res->start) { 1089 + (*old_res)->end = new_res->end; 1090 + kfree(new_res); 1091 + break; 1092 + } 1093 + 1094 + if ((*old_res)->start == new_res->end + 1) { 1095 + (*old_res)->start = new_res->start; 1096 + kfree(new_res); 1069 1097 break; 1070 1098 } 1071 1099 ··· 1195 1191 } 1196 1192 EXPORT_SYMBOL_GPL(vmbus_allocate_mmio); 1197 1193 1194 + /** 1195 + * vmbus_cpu_number_to_vp_number() - Map CPU to VP. 1196 + * @cpu_number: CPU number in Linux terms 1197 + * 1198 + * This function returns the mapping between the Linux processor 1199 + * number and the hypervisor's virtual processor number, useful 1200 + * in making hypercalls and such that talk about specific 1201 + * processors. 1202 + * 1203 + * Return: Virtual processor number in Hyper-V terms 1204 + */ 1205 + int vmbus_cpu_number_to_vp_number(int cpu_number) 1206 + { 1207 + return hv_context.vp_index[cpu_number]; 1208 + } 1209 + EXPORT_SYMBOL_GPL(vmbus_cpu_number_to_vp_number); 1210 + 1198 1211 static int vmbus_acpi_add(struct acpi_device *device) 1199 1212 { 1200 1213 acpi_status result; ··· 1296 1275 init_completion(&probe_event); 1297 1276 1298 1277 /* 1299 - * Get irq resources first. 1278 + * Get ACPI resources first. 1300 1279 */ 1301 1280 ret = acpi_bus_register_driver(&vmbus_acpi_driver); 1302 1281 ··· 1309 1288 goto cleanup; 1310 1289 } 1311 1290 1312 - if (irq <= 0) { 1313 - ret = -ENODEV; 1314 - goto cleanup; 1315 - } 1316 - 1317 - ret = vmbus_bus_init(irq); 1291 + ret = vmbus_bus_init(); 1318 1292 if (ret) 1319 1293 goto cleanup; 1320 1294
+1 -1
drivers/hwtracing/coresight/Kconfig
··· 8 8 This framework provides a kernel interface for the CoreSight debug 9 9 and trace drivers to register themselves with. It's intended to build 10 10 a topological view of the CoreSight components based on a DT 11 - specification and configure the right serie of components when a 11 + specification and configure the right series of components when a 12 12 trace source gets enabled. 13 13 14 14 if CORESIGHT
+1 -1
drivers/hwtracing/coresight/coresight.c
··· 548 548 to_match = data; 549 549 i_csdev = to_coresight_device(dev); 550 550 551 - if (!strcmp(to_match, dev_name(&i_csdev->dev))) 551 + if (to_match && !strcmp(to_match, dev_name(&i_csdev->dev))) 552 552 return 1; 553 553 554 554 return 0;
-10
drivers/input/serio/hyperv-keyboard.c
··· 412 412 return 0; 413 413 } 414 414 415 - /* 416 - * Keyboard GUID 417 - * {f912ad6d-2b17-48ea-bd65-f927a61c7684} 418 - */ 419 - #define HV_KBD_GUID \ 420 - .guid = { \ 421 - 0x6d, 0xad, 0x12, 0xf9, 0x17, 0x2b, 0xea, 0x48, \ 422 - 0xbd, 0x65, 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84 \ 423 - } 424 - 425 415 static const struct hv_vmbus_device_id id_table[] = { 426 416 /* Keyboard guid */ 427 417 { HV_KBD_GUID, },
+4 -2
drivers/misc/mei/main.c
··· 657 657 * @file: pointer to file structure 658 658 * @band: band bitmap 659 659 * 660 - * Return: poll mask 660 + * Return: negative on error, 661 + * 0 if it did no changes, 662 + * and positive a process was added or deleted 661 663 */ 662 664 static int mei_fasync(int fd, struct file *file, int band) 663 665 { ··· 667 665 struct mei_cl *cl = file->private_data; 668 666 669 667 if (!mei_cl_is_connected(cl)) 670 - return POLLERR; 668 + return -ENODEV; 671 669 672 670 return fasync_helper(fd, file, band, &cl->ev_async); 673 671 }
+126 -120
drivers/parport/share.c
··· 1 1 /* 2 2 * Parallel-port resource manager code. 3 - * 3 + * 4 4 * Authors: David Campbell <campbell@tirian.che.curtin.edu.au> 5 5 * Tim Waugh <tim@cyberelk.demon.co.uk> 6 6 * Jose Renau <renau@acm.org> ··· 54 54 static DEFINE_MUTEX(registration_lock); 55 55 56 56 /* What you can do to a port that's gone away.. */ 57 - static void dead_write_lines (struct parport *p, unsigned char b){} 58 - static unsigned char dead_read_lines (struct parport *p) { return 0; } 59 - static unsigned char dead_frob_lines (struct parport *p, unsigned char b, 57 + static void dead_write_lines(struct parport *p, unsigned char b){} 58 + static unsigned char dead_read_lines(struct parport *p) { return 0; } 59 + static unsigned char dead_frob_lines(struct parport *p, unsigned char b, 60 60 unsigned char c) { return 0; } 61 - static void dead_onearg (struct parport *p){} 62 - static void dead_initstate (struct pardevice *d, struct parport_state *s) { } 63 - static void dead_state (struct parport *p, struct parport_state *s) { } 64 - static size_t dead_write (struct parport *p, const void *b, size_t l, int f) 61 + static void dead_onearg(struct parport *p){} 62 + static void dead_initstate(struct pardevice *d, struct parport_state *s) { } 63 + static void dead_state(struct parport *p, struct parport_state *s) { } 64 + static size_t dead_write(struct parport *p, const void *b, size_t l, int f) 65 65 { return 0; } 66 - static size_t dead_read (struct parport *p, void *b, size_t l, int f) 66 + static size_t dead_read(struct parport *p, void *b, size_t l, int f) 67 67 { return 0; } 68 68 static struct parport_operations dead_ops = { 69 69 .write_data = dead_write_lines, /* data */ ··· 93 93 .ecp_write_data = dead_write, /* ecp */ 94 94 .ecp_read_data = dead_read, 95 95 .ecp_write_addr = dead_write, 96 - 96 + 97 97 .compat_write_data = dead_write, /* compat */ 98 98 .nibble_read_data = dead_read, /* nibble */ 99 99 .byte_read_data = dead_read, /* byte */ ··· 148 148 /* 149 149 * iterates through all the drivers registered with the bus and sends the port 150 150 * details to the match_port callback of the driver, so that the driver can 151 - * know about the new port that just regsitered with the bus and decide if it 151 + * know about the new port that just registered with the bus and decide if it 152 152 * wants to use this new port. 153 153 */ 154 154 static int driver_check(struct device_driver *dev_drv, void *_port) ··· 194 194 struct parport_driver *drv; 195 195 /* caller has exclusive registration_lock */ 196 196 list_for_each_entry(drv, &drivers, list) 197 - drv->detach (port); 197 + drv->detach(port); 198 198 199 199 /* 200 200 * call the detach function of the drivers registered in ··· 205 205 } 206 206 207 207 /* Ask kmod for some lowlevel drivers. */ 208 - static void get_lowlevel_driver (void) 208 + static void get_lowlevel_driver(void) 209 209 { 210 - /* There is no actual module called this: you should set 211 - * up an alias for modutils. */ 212 - request_module ("parport_lowlevel"); 210 + /* 211 + * There is no actual module called this: you should set 212 + * up an alias for modutils. 213 + */ 214 + request_module("parport_lowlevel"); 213 215 } 214 216 215 217 /* ··· 267 265 const char *mod_name) 268 266 { 269 267 if (list_empty(&portlist)) 270 - get_lowlevel_driver (); 268 + get_lowlevel_driver(); 271 269 272 270 if (drv->devmodel) { 273 271 /* using device model */ ··· 330 328 * finished by the time this function returns. 331 329 **/ 332 330 333 - void parport_unregister_driver (struct parport_driver *drv) 331 + void parport_unregister_driver(struct parport_driver *drv) 334 332 { 335 333 struct parport *port; 336 334 ··· 345 343 } 346 344 mutex_unlock(&registration_lock); 347 345 } 346 + EXPORT_SYMBOL(parport_unregister_driver); 348 347 349 348 static void free_port(struct device *dev) 350 349 { ··· 375 372 * until the matching parport_put_port() call. 376 373 **/ 377 374 378 - struct parport *parport_get_port (struct parport *port) 375 + struct parport *parport_get_port(struct parport *port) 379 376 { 380 377 struct device *dev = get_device(&port->bus_dev); 381 378 382 379 return to_parport_dev(dev); 383 380 } 381 + EXPORT_SYMBOL(parport_get_port); 384 382 385 383 void parport_del_port(struct parport *port) 386 384 { ··· 398 394 * zero (port is no longer used), free_port is called. 399 395 **/ 400 396 401 - void parport_put_port (struct parport *port) 397 + void parport_put_port(struct parport *port) 402 398 { 403 399 put_device(&port->bus_dev); 404 400 } 401 + EXPORT_SYMBOL(parport_put_port); 405 402 406 403 /** 407 404 * parport_register_port - register a parallel port ··· 444 439 int ret; 445 440 446 441 tmp = kzalloc(sizeof(struct parport), GFP_KERNEL); 447 - if (!tmp) { 448 - printk(KERN_WARNING "parport: memory squeeze\n"); 442 + if (!tmp) 449 443 return NULL; 450 - } 451 444 452 445 /* Init our structure */ 453 446 tmp->base = base; ··· 453 450 tmp->dma = dma; 454 451 tmp->muxport = tmp->daisy = tmp->muxsel = -1; 455 452 tmp->modes = 0; 456 - INIT_LIST_HEAD(&tmp->list); 453 + INIT_LIST_HEAD(&tmp->list); 457 454 tmp->devices = tmp->cad = NULL; 458 455 tmp->flags = 0; 459 456 tmp->ops = ops; 460 457 tmp->physport = tmp; 461 - memset (tmp->probe_info, 0, 5 * sizeof (struct parport_device_info)); 458 + memset(tmp->probe_info, 0, 5 * sizeof(struct parport_device_info)); 462 459 rwlock_init(&tmp->cad_lock); 463 460 spin_lock_init(&tmp->waitlist_lock); 464 461 spin_lock_init(&tmp->pardevice_lock); ··· 466 463 tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE; 467 464 sema_init(&tmp->ieee1284.irq, 0); 468 465 tmp->spintime = parport_default_spintime; 469 - atomic_set (&tmp->ref_count, 1); 466 + atomic_set(&tmp->ref_count, 1); 470 467 INIT_LIST_HEAD(&tmp->full_list); 471 468 472 469 name = kmalloc(15, GFP_KERNEL); 473 470 if (!name) { 474 - printk(KERN_ERR "parport: memory squeeze\n"); 475 471 kfree(tmp); 476 472 return NULL; 477 473 } ··· 510 508 511 509 return tmp; 512 510 } 511 + EXPORT_SYMBOL(parport_register_port); 513 512 514 513 /** 515 514 * parport_announce_port - tell device drivers about a parallel port ··· 524 521 * functions will be called, with @port as the parameter. 525 522 **/ 526 523 527 - void parport_announce_port (struct parport *port) 524 + void parport_announce_port(struct parport *port) 528 525 { 529 526 int i; 530 527 ··· 534 531 #endif 535 532 536 533 if (!port->dev) 537 - printk(KERN_WARNING "%s: fix this legacy " 538 - "no-device port driver!\n", 539 - port->name); 534 + printk(KERN_WARNING "%s: fix this legacy no-device port driver!\n", 535 + port->name); 540 536 541 537 parport_proc_register(port); 542 538 mutex_lock(&registration_lock); ··· 549 547 spin_unlock_irq(&parportlist_lock); 550 548 551 549 /* Let drivers know that new port(s) has arrived. */ 552 - attach_driver_chain (port); 550 + attach_driver_chain(port); 553 551 for (i = 1; i < 3; i++) { 554 552 struct parport *slave = port->slaves[i-1]; 555 553 if (slave) ··· 557 555 } 558 556 mutex_unlock(&registration_lock); 559 557 } 558 + EXPORT_SYMBOL(parport_announce_port); 560 559 561 560 /** 562 561 * parport_remove_port - deregister a parallel port ··· 585 582 mutex_lock(&registration_lock); 586 583 587 584 /* Spread the word. */ 588 - detach_driver_chain (port); 585 + detach_driver_chain(port); 589 586 590 587 #ifdef CONFIG_PARPORT_1284 591 588 /* Forget the IEEE1284.3 topology of the port. */ ··· 619 616 parport_put_port(slave); 620 617 } 621 618 } 619 + EXPORT_SYMBOL(parport_remove_port); 622 620 623 621 /** 624 622 * parport_register_device - register a device on a parallel port ··· 693 689 struct pardevice * 694 690 parport_register_device(struct parport *port, const char *name, 695 691 int (*pf)(void *), void (*kf)(void *), 696 - void (*irq_func)(void *), 692 + void (*irq_func)(void *), 697 693 int flags, void *handle) 698 694 { 699 695 struct pardevice *tmp; 700 696 701 697 if (port->physport->flags & PARPORT_FLAG_EXCL) { 702 698 /* An exclusive device is registered. */ 703 - printk (KERN_DEBUG "%s: no more devices allowed\n", 699 + printk(KERN_DEBUG "%s: no more devices allowed\n", 704 700 port->name); 705 701 return NULL; 706 702 } ··· 726 722 } 727 723 } 728 724 729 - /* We up our own module reference count, and that of the port 730 - on which a device is to be registered, to ensure that 731 - neither of us gets unloaded while we sleep in (e.g.) 732 - kmalloc. 733 - */ 734 - if (!try_module_get(port->ops->owner)) { 725 + /* 726 + * We up our own module reference count, and that of the port 727 + * on which a device is to be registered, to ensure that 728 + * neither of us gets unloaded while we sleep in (e.g.) 729 + * kmalloc. 730 + */ 731 + if (!try_module_get(port->ops->owner)) 735 732 return NULL; 736 - } 737 - 738 - parport_get_port (port); 733 + 734 + parport_get_port(port); 739 735 740 736 tmp = kmalloc(sizeof(struct pardevice), GFP_KERNEL); 741 - if (tmp == NULL) { 742 - printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name); 737 + if (!tmp) 743 738 goto out; 744 - } 745 739 746 740 tmp->state = kmalloc(sizeof(struct parport_state), GFP_KERNEL); 747 - if (tmp->state == NULL) { 748 - printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name); 741 + if (!tmp->state) 749 742 goto out_free_pardevice; 750 - } 751 743 752 744 tmp->name = name; 753 745 tmp->port = port; ··· 767 767 768 768 if (flags & PARPORT_DEV_EXCL) { 769 769 if (port->physport->devices) { 770 - spin_unlock (&port->physport->pardevice_lock); 771 - printk (KERN_DEBUG 772 - "%s: cannot grant exclusive access for " 773 - "device %s\n", port->name, name); 770 + spin_unlock(&port->physport->pardevice_lock); 771 + printk(KERN_DEBUG 772 + "%s: cannot grant exclusive access for device %s\n", 773 + port->name, name); 774 774 goto out_free_all; 775 775 } 776 776 port->flags |= PARPORT_FLAG_EXCL; 777 777 } 778 778 779 779 tmp->next = port->physport->devices; 780 - wmb(); /* Make sure that tmp->next is written before it's 781 - added to the list; see comments marked 'no locking 782 - required' */ 780 + wmb(); /* 781 + * Make sure that tmp->next is written before it's 782 + * added to the list; see comments marked 'no locking 783 + * required' 784 + */ 783 785 if (port->physport->devices) 784 786 port->physport->devices->prev = tmp; 785 787 port->physport->devices = tmp; ··· 807 805 out_free_pardevice: 808 806 kfree(tmp); 809 807 out: 810 - parport_put_port (port); 808 + parport_put_port(port); 811 809 module_put(port->ops->owner); 812 810 813 811 return NULL; 814 812 } 813 + EXPORT_SYMBOL(parport_register_device); 815 814 816 815 static void free_pardevice(struct device *dev) 817 816 { ··· 971 968 struct parport *port; 972 969 973 970 #ifdef PARPORT_PARANOID 974 - if (dev == NULL) { 971 + if (!dev) { 975 972 printk(KERN_ERR "parport_unregister_device: passed NULL\n"); 976 973 return; 977 974 } ··· 988 985 if (port->cad == dev) { 989 986 printk(KERN_DEBUG "%s: %s forgot to release port\n", 990 987 port->name, dev->name); 991 - parport_release (dev); 988 + parport_release(dev); 992 989 } 993 990 994 991 spin_lock(&port->pardevice_lock); ··· 1004 1001 1005 1002 spin_unlock(&port->pardevice_lock); 1006 1003 1007 - /* Make sure we haven't left any pointers around in the wait 1008 - * list. */ 1004 + /* 1005 + * Make sure we haven't left any pointers around in the wait 1006 + * list. 1007 + */ 1009 1008 spin_lock_irq(&port->waitlist_lock); 1010 1009 if (dev->waitprev || dev->waitnext || port->waithead == dev) { 1011 1010 if (dev->waitprev) ··· 1028 1023 kfree(dev); 1029 1024 1030 1025 module_put(port->ops->owner); 1031 - parport_put_port (port); 1026 + parport_put_port(port); 1032 1027 } 1028 + EXPORT_SYMBOL(parport_unregister_device); 1033 1029 1034 1030 /** 1035 1031 * parport_find_number - find a parallel port by number ··· 1044 1038 * gives you, use parport_put_port(). 1045 1039 */ 1046 1040 1047 - struct parport *parport_find_number (int number) 1041 + struct parport *parport_find_number(int number) 1048 1042 { 1049 1043 struct parport *port, *result = NULL; 1050 1044 1051 1045 if (list_empty(&portlist)) 1052 - get_lowlevel_driver (); 1046 + get_lowlevel_driver(); 1053 1047 1054 - spin_lock (&parportlist_lock); 1048 + spin_lock(&parportlist_lock); 1055 1049 list_for_each_entry(port, &portlist, list) { 1056 1050 if (port->number == number) { 1057 - result = parport_get_port (port); 1051 + result = parport_get_port(port); 1058 1052 break; 1059 1053 } 1060 1054 } 1061 - spin_unlock (&parportlist_lock); 1055 + spin_unlock(&parportlist_lock); 1062 1056 return result; 1063 1057 } 1058 + EXPORT_SYMBOL(parport_find_number); 1064 1059 1065 1060 /** 1066 1061 * parport_find_base - find a parallel port by base address ··· 1075 1068 * gives you, use parport_put_port(). 1076 1069 */ 1077 1070 1078 - struct parport *parport_find_base (unsigned long base) 1071 + struct parport *parport_find_base(unsigned long base) 1079 1072 { 1080 1073 struct parport *port, *result = NULL; 1081 1074 1082 1075 if (list_empty(&portlist)) 1083 - get_lowlevel_driver (); 1076 + get_lowlevel_driver(); 1084 1077 1085 - spin_lock (&parportlist_lock); 1078 + spin_lock(&parportlist_lock); 1086 1079 list_for_each_entry(port, &portlist, list) { 1087 1080 if (port->base == base) { 1088 - result = parport_get_port (port); 1081 + result = parport_get_port(port); 1089 1082 break; 1090 1083 } 1091 1084 } 1092 - spin_unlock (&parportlist_lock); 1085 + spin_unlock(&parportlist_lock); 1093 1086 return result; 1094 1087 } 1088 + EXPORT_SYMBOL(parport_find_base); 1095 1089 1096 1090 /** 1097 1091 * parport_claim - claim access to a parallel port device ··· 1119 1111 } 1120 1112 1121 1113 /* Preempt any current device */ 1122 - write_lock_irqsave (&port->cad_lock, flags); 1123 - if ((oldcad = port->cad) != NULL) { 1114 + write_lock_irqsave(&port->cad_lock, flags); 1115 + oldcad = port->cad; 1116 + if (oldcad) { 1124 1117 if (oldcad->preempt) { 1125 1118 if (oldcad->preempt(oldcad->private)) 1126 1119 goto blocked; ··· 1130 1121 goto blocked; 1131 1122 1132 1123 if (port->cad != oldcad) { 1133 - /* I think we'll actually deadlock rather than 1134 - get here, but just in case.. */ 1124 + /* 1125 + * I think we'll actually deadlock rather than 1126 + * get here, but just in case.. 1127 + */ 1135 1128 printk(KERN_WARNING 1136 1129 "%s: %s released port when preempted!\n", 1137 1130 port->name, oldcad->name); ··· 1147 1136 dev->waiting = 0; 1148 1137 1149 1138 /* Take ourselves out of the wait list again. */ 1150 - spin_lock_irq (&port->waitlist_lock); 1139 + spin_lock_irq(&port->waitlist_lock); 1151 1140 if (dev->waitprev) 1152 1141 dev->waitprev->waitnext = dev->waitnext; 1153 1142 else ··· 1156 1145 dev->waitnext->waitprev = dev->waitprev; 1157 1146 else 1158 1147 port->waittail = dev->waitprev; 1159 - spin_unlock_irq (&port->waitlist_lock); 1148 + spin_unlock_irq(&port->waitlist_lock); 1160 1149 dev->waitprev = dev->waitnext = NULL; 1161 1150 } 1162 1151 ··· 1173 1162 /* If it's a daisy chain device, select it. */ 1174 1163 if (dev->daisy >= 0) { 1175 1164 /* This could be lazier. */ 1176 - if (!parport_daisy_select (port, dev->daisy, 1165 + if (!parport_daisy_select(port, dev->daisy, 1177 1166 IEEE1284_MODE_COMPAT)) 1178 1167 port->daisy = dev->daisy; 1179 1168 } ··· 1186 1175 return 0; 1187 1176 1188 1177 blocked: 1189 - /* If this is the first time we tried to claim the port, register an 1190 - interest. This is only allowed for devices sleeping in 1191 - parport_claim_or_block(), or those with a wakeup function. */ 1178 + /* 1179 + * If this is the first time we tried to claim the port, register an 1180 + * interest. This is only allowed for devices sleeping in 1181 + * parport_claim_or_block(), or those with a wakeup function. 1182 + */ 1192 1183 1193 1184 /* The cad_lock is still held for writing here */ 1194 1185 if (dev->waiting & 2 || dev->wakeup) { 1195 - spin_lock (&port->waitlist_lock); 1186 + spin_lock(&port->waitlist_lock); 1196 1187 if (test_and_set_bit(0, &dev->waiting) == 0) { 1197 1188 /* First add ourselves to the end of the wait list. */ 1198 1189 dev->waitnext = NULL; ··· 1205 1192 } else 1206 1193 port->waithead = port->waittail = dev; 1207 1194 } 1208 - spin_unlock (&port->waitlist_lock); 1195 + spin_unlock(&port->waitlist_lock); 1209 1196 } 1210 - write_unlock_irqrestore (&port->cad_lock, flags); 1197 + write_unlock_irqrestore(&port->cad_lock, flags); 1211 1198 return -EAGAIN; 1212 1199 } 1200 + EXPORT_SYMBOL(parport_claim); 1213 1201 1214 1202 /** 1215 1203 * parport_claim_or_block - claim access to a parallel port device ··· 1226 1212 { 1227 1213 int r; 1228 1214 1229 - /* Signal to parport_claim() that we can wait even without a 1230 - wakeup function. */ 1215 + /* 1216 + * Signal to parport_claim() that we can wait even without a 1217 + * wakeup function. 1218 + */ 1231 1219 dev->waiting = 2; 1232 1220 1233 1221 /* Try to claim the port. If this fails, we need to sleep. */ ··· 1247 1231 * See also parport_release() 1248 1232 */ 1249 1233 1250 - /* If dev->waiting is clear now, an interrupt 1251 - gave us the port and we would deadlock if we slept. */ 1234 + /* 1235 + * If dev->waiting is clear now, an interrupt 1236 + * gave us the port and we would deadlock if we slept. 1237 + */ 1252 1238 if (dev->waiting) { 1253 1239 wait_event_interruptible(dev->wait_q, 1254 1240 !dev->waiting); 1255 - if (signal_pending (current)) { 1241 + if (signal_pending(current)) 1256 1242 return -EINTR; 1257 - } 1258 1243 r = 1; 1259 1244 } else { 1260 1245 r = 0; ··· 1267 1250 1268 1251 #ifdef PARPORT_DEBUG_SHARING 1269 1252 if (dev->port->physport->cad != dev) 1270 - printk(KERN_DEBUG "%s: exiting parport_claim_or_block " 1271 - "but %s owns port!\n", dev->name, 1272 - dev->port->physport->cad ? 1253 + printk(KERN_DEBUG "%s: exiting parport_claim_or_block but %s owns port!\n", 1254 + dev->name, dev->port->physport->cad ? 1273 1255 dev->port->physport->cad->name:"nobody"); 1274 1256 #endif 1275 1257 } 1276 1258 dev->waiting = 0; 1277 1259 return r; 1278 1260 } 1261 + EXPORT_SYMBOL(parport_claim_or_block); 1279 1262 1280 1263 /** 1281 1264 * parport_release - give up access to a parallel port device ··· 1295 1278 /* Make sure that dev is the current device */ 1296 1279 write_lock_irqsave(&port->cad_lock, flags); 1297 1280 if (port->cad != dev) { 1298 - write_unlock_irqrestore (&port->cad_lock, flags); 1299 - printk(KERN_WARNING "%s: %s tried to release parport " 1300 - "when not owner\n", port->name, dev->name); 1281 + write_unlock_irqrestore(&port->cad_lock, flags); 1282 + printk(KERN_WARNING "%s: %s tried to release parport when not owner\n", 1283 + port->name, dev->name); 1301 1284 return; 1302 1285 } 1303 1286 ··· 1310 1293 1311 1294 /* If this is a daisy device, deselect it. */ 1312 1295 if (dev->daisy >= 0) { 1313 - parport_daisy_deselect_all (port); 1296 + parport_daisy_deselect_all(port); 1314 1297 port->daisy = -1; 1315 1298 } 1316 1299 #endif ··· 1321 1304 /* Save control registers */ 1322 1305 port->ops->save_state(port, dev->state); 1323 1306 1324 - /* If anybody is waiting, find out who's been there longest and 1325 - then wake them up. (Note: no locking required) */ 1307 + /* 1308 + * If anybody is waiting, find out who's been there longest and 1309 + * then wake them up. (Note: no locking required) 1310 + */ 1326 1311 /* !!! LOCKING IS NEEDED HERE */ 1327 1312 for (pd = port->waithead; pd; pd = pd->waitnext) { 1328 1313 if (pd->waiting & 2) { /* sleeping in claim_or_block */ ··· 1341 1322 } 1342 1323 } 1343 1324 1344 - /* Nobody was waiting, so walk the list to see if anyone is 1345 - interested in being woken up. (Note: no locking required) */ 1325 + /* 1326 + * Nobody was waiting, so walk the list to see if anyone is 1327 + * interested in being woken up. (Note: no locking required) 1328 + */ 1346 1329 /* !!! LOCKING IS NEEDED HERE */ 1347 - for (pd = port->devices; (port->cad == NULL) && pd; pd = pd->next) { 1330 + for (pd = port->devices; !port->cad && pd; pd = pd->next) { 1348 1331 if (pd->wakeup && pd != dev) 1349 1332 pd->wakeup(pd->private); 1350 1333 } 1351 1334 } 1335 + EXPORT_SYMBOL(parport_release); 1352 1336 1353 1337 irqreturn_t parport_irq_handler(int irq, void *dev_id) 1354 1338 { ··· 1361 1339 1362 1340 return IRQ_HANDLED; 1363 1341 } 1364 - 1365 - /* Exported symbols for modules. */ 1366 - 1367 - EXPORT_SYMBOL(parport_claim); 1368 - EXPORT_SYMBOL(parport_claim_or_block); 1369 - EXPORT_SYMBOL(parport_release); 1370 - EXPORT_SYMBOL(parport_register_port); 1371 - EXPORT_SYMBOL(parport_announce_port); 1372 - EXPORT_SYMBOL(parport_remove_port); 1373 - EXPORT_SYMBOL(parport_unregister_driver); 1374 - EXPORT_SYMBOL(parport_register_device); 1375 - EXPORT_SYMBOL(parport_unregister_device); 1376 - EXPORT_SYMBOL(parport_get_port); 1377 - EXPORT_SYMBOL(parport_put_port); 1378 - EXPORT_SYMBOL(parport_find_number); 1379 - EXPORT_SYMBOL(parport_find_base); 1380 1342 EXPORT_SYMBOL(parport_irq_handler); 1381 1343 1382 1344 MODULE_LICENSE("GPL");
+65 -68
include/linux/hyperv.h
··· 141 141 { 142 142 u32 read_loc, write_loc, dsize; 143 143 144 - smp_read_barrier_depends(); 145 - 146 144 /* Capture the read/write indices before they changed */ 147 145 read_loc = rbi->ring_buffer->read_index; 148 146 write_loc = rbi->ring_buffer->write_index; ··· 628 630 struct hv_input_signal_event event; 629 631 }; 630 632 633 + enum hv_signal_policy { 634 + HV_SIGNAL_POLICY_DEFAULT = 0, 635 + HV_SIGNAL_POLICY_EXPLICIT, 636 + }; 637 + 631 638 struct vmbus_channel { 632 639 /* Unique channel id */ 633 640 int id; ··· 760 757 * link up channels based on their CPU affinity. 761 758 */ 762 759 struct list_head percpu_list; 760 + /* 761 + * Host signaling policy: The default policy will be 762 + * based on the ring buffer state. We will also support 763 + * a policy where the client driver can have explicit 764 + * signaling control. 765 + */ 766 + enum hv_signal_policy signal_policy; 763 767 }; 768 + 769 + static inline void set_channel_signal_state(struct vmbus_channel *c, 770 + enum hv_signal_policy policy) 771 + { 772 + c->signal_policy = policy; 773 + } 764 774 765 775 static inline void set_channel_read_state(struct vmbus_channel *c, bool state) 766 776 { ··· 999 983 resource_size_t size, resource_size_t align, 1000 984 bool fb_overlap_ok); 1001 985 1002 - /** 1003 - * VMBUS_DEVICE - macro used to describe a specific hyperv vmbus device 1004 - * 1005 - * This macro is used to create a struct hv_vmbus_device_id that matches a 1006 - * specific device. 1007 - */ 1008 - #define VMBUS_DEVICE(g0, g1, g2, g3, g4, g5, g6, g7, \ 1009 - g8, g9, ga, gb, gc, gd, ge, gf) \ 1010 - .guid = { g0, g1, g2, g3, g4, g5, g6, g7, \ 1011 - g8, g9, ga, gb, gc, gd, ge, gf }, 986 + int vmbus_cpu_number_to_vp_number(int cpu_number); 987 + u64 hv_do_hypercall(u64 control, void *input, void *output); 1012 988 1013 989 /* 1014 990 * GUID definitions of various offer types - services offered to the guest. ··· 1011 1003 * {f8615163-df3e-46c5-913f-f2d2f965ed0e} 1012 1004 */ 1013 1005 #define HV_NIC_GUID \ 1014 - .guid = { \ 1015 - 0x63, 0x51, 0x61, 0xf8, 0x3e, 0xdf, 0xc5, 0x46, \ 1016 - 0x91, 0x3f, 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e \ 1017 - } 1006 + .guid = UUID_LE(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \ 1007 + 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e) 1018 1008 1019 1009 /* 1020 1010 * IDE GUID 1021 1011 * {32412632-86cb-44a2-9b5c-50d1417354f5} 1022 1012 */ 1023 1013 #define HV_IDE_GUID \ 1024 - .guid = { \ 1025 - 0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44, \ 1026 - 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5 \ 1027 - } 1014 + .guid = UUID_LE(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \ 1015 + 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5) 1028 1016 1029 1017 /* 1030 1018 * SCSI GUID 1031 1019 * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} 1032 1020 */ 1033 1021 #define HV_SCSI_GUID \ 1034 - .guid = { \ 1035 - 0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d, \ 1036 - 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f \ 1037 - } 1022 + .guid = UUID_LE(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \ 1023 + 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f) 1038 1024 1039 1025 /* 1040 1026 * Shutdown GUID 1041 1027 * {0e0b6031-5213-4934-818b-38d90ced39db} 1042 1028 */ 1043 1029 #define HV_SHUTDOWN_GUID \ 1044 - .guid = { \ 1045 - 0x31, 0x60, 0x0b, 0x0e, 0x13, 0x52, 0x34, 0x49, \ 1046 - 0x81, 0x8b, 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb \ 1047 - } 1030 + .guid = UUID_LE(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \ 1031 + 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb) 1048 1032 1049 1033 /* 1050 1034 * Time Synch GUID 1051 1035 * {9527E630-D0AE-497b-ADCE-E80AB0175CAF} 1052 1036 */ 1053 1037 #define HV_TS_GUID \ 1054 - .guid = { \ 1055 - 0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49, \ 1056 - 0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf \ 1057 - } 1038 + .guid = UUID_LE(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \ 1039 + 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf) 1058 1040 1059 1041 /* 1060 1042 * Heartbeat GUID 1061 1043 * {57164f39-9115-4e78-ab55-382f3bd5422d} 1062 1044 */ 1063 1045 #define HV_HEART_BEAT_GUID \ 1064 - .guid = { \ 1065 - 0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e, \ 1066 - 0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d \ 1067 - } 1046 + .guid = UUID_LE(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \ 1047 + 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d) 1068 1048 1069 1049 /* 1070 1050 * KVP GUID 1071 1051 * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6} 1072 1052 */ 1073 1053 #define HV_KVP_GUID \ 1074 - .guid = { \ 1075 - 0xe7, 0xf4, 0xa0, 0xa9, 0x45, 0x5a, 0x96, 0x4d, \ 1076 - 0xb8, 0x27, 0x8a, 0x84, 0x1e, 0x8c, 0x3, 0xe6 \ 1077 - } 1054 + .guid = UUID_LE(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \ 1055 + 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6) 1078 1056 1079 1057 /* 1080 1058 * Dynamic memory GUID 1081 1059 * {525074dc-8985-46e2-8057-a307dc18a502} 1082 1060 */ 1083 1061 #define HV_DM_GUID \ 1084 - .guid = { \ 1085 - 0xdc, 0x74, 0x50, 0X52, 0x85, 0x89, 0xe2, 0x46, \ 1086 - 0x80, 0x57, 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02 \ 1087 - } 1062 + .guid = UUID_LE(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \ 1063 + 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02) 1088 1064 1089 1065 /* 1090 1066 * Mouse GUID 1091 1067 * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a} 1092 1068 */ 1093 1069 #define HV_MOUSE_GUID \ 1094 - .guid = { \ 1095 - 0x9e, 0xb6, 0xa8, 0xcf, 0x4a, 0x5b, 0xc0, 0x4c, \ 1096 - 0xb9, 0x8b, 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a \ 1097 - } 1070 + .guid = UUID_LE(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \ 1071 + 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a) 1072 + 1073 + /* 1074 + * Keyboard GUID 1075 + * {f912ad6d-2b17-48ea-bd65-f927a61c7684} 1076 + */ 1077 + #define HV_KBD_GUID \ 1078 + .guid = UUID_LE(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \ 1079 + 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84) 1098 1080 1099 1081 /* 1100 1082 * VSS (Backup/Restore) GUID 1101 1083 */ 1102 1084 #define HV_VSS_GUID \ 1103 - .guid = { \ 1104 - 0x29, 0x2e, 0xfa, 0x35, 0x23, 0xea, 0x36, 0x42, \ 1105 - 0x96, 0xae, 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40 \ 1106 - } 1085 + .guid = UUID_LE(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \ 1086 + 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40) 1107 1087 /* 1108 1088 * Synthetic Video GUID 1109 1089 * {DA0A7802-E377-4aac-8E77-0558EB1073F8} 1110 1090 */ 1111 1091 #define HV_SYNTHVID_GUID \ 1112 - .guid = { \ 1113 - 0x02, 0x78, 0x0a, 0xda, 0x77, 0xe3, 0xac, 0x4a, \ 1114 - 0x8e, 0x77, 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8 \ 1115 - } 1092 + .guid = UUID_LE(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \ 1093 + 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8) 1116 1094 1117 1095 /* 1118 1096 * Synthetic FC GUID 1119 1097 * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda} 1120 1098 */ 1121 1099 #define HV_SYNTHFC_GUID \ 1122 - .guid = { \ 1123 - 0x4A, 0xCC, 0x9B, 0x2F, 0x69, 0x00, 0xF3, 0x4A, \ 1124 - 0xB7, 0x6B, 0x6F, 0xD0, 0xBE, 0x52, 0x8C, 0xDA \ 1125 - } 1100 + .guid = UUID_LE(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \ 1101 + 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda) 1126 1102 1127 1103 /* 1128 1104 * Guest File Copy Service ··· 1114 1122 */ 1115 1123 1116 1124 #define HV_FCOPY_GUID \ 1117 - .guid = { \ 1118 - 0xE3, 0x4B, 0xD1, 0x34, 0xE4, 0xDE, 0xC8, 0x41, \ 1119 - 0x9A, 0xE7, 0x6B, 0x17, 0x49, 0x77, 0xC1, 0x92 \ 1120 - } 1125 + .guid = UUID_LE(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \ 1126 + 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92) 1121 1127 1122 1128 /* 1123 1129 * NetworkDirect. This is the guest RDMA service. 1124 1130 * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501} 1125 1131 */ 1126 1132 #define HV_ND_GUID \ 1127 - .guid = { \ 1128 - 0x3d, 0xaf, 0x2e, 0x8c, 0xa7, 0x32, 0x09, 0x4b, \ 1129 - 0xab, 0x99, 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01 \ 1130 - } 1133 + .guid = UUID_LE(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \ 1134 + 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01) 1135 + 1136 + /* 1137 + * PCI Express Pass Through 1138 + * {44C4F61D-4444-4400-9D52-802E27EDE19F} 1139 + */ 1140 + 1141 + #define HV_PCIE_GUID \ 1142 + .guid = UUID_LE(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \ 1143 + 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f) 1131 1144 1132 1145 /* 1133 1146 * Common header for Hyper-V ICs
+1 -1
include/linux/mod_devicetable.h
··· 404 404 * For Hyper-V devices we use the device guid as the id. 405 405 */ 406 406 struct hv_vmbus_device_id { 407 - __u8 guid[16]; 407 + uuid_le guid; 408 408 kernel_ulong_t driver_data; /* Data private to the driver */ 409 409 }; 410 410
+1
include/uapi/linux/hyperv.h
··· 313 313 #define HV_INVALIDARG 0x80070057 314 314 #define HV_GUID_NOTFOUND 0x80041002 315 315 #define HV_ERROR_ALREADY_EXISTS 0x80070050 316 + #define HV_ERROR_DISK_FULL 0x80070070 316 317 317 318 #define ADDR_FAMILY_NONE 0x00 318 319 #define ADDR_FAMILY_IPV4 0x01
+186 -48
scripts/checkkconfigsymbols.py
··· 8 8 # Licensed under the terms of the GNU GPL License version 2 9 9 10 10 11 + import difflib 11 12 import os 12 13 import re 14 + import signal 13 15 import sys 14 - from subprocess import Popen, PIPE, STDOUT 16 + from multiprocessing import Pool, cpu_count 15 17 from optparse import OptionParser 18 + from subprocess import Popen, PIPE, STDOUT 16 19 17 20 18 21 # regex expressions ··· 29 26 30 27 # regex objects 31 28 REGEX_FILE_KCONFIG = re.compile(r".*Kconfig[\.\w+\-]*$") 32 - REGEX_FEATURE = re.compile(r'(?!\B"[^"]*)' + FEATURE + r'(?![^"]*"\B)') 29 + REGEX_FEATURE = re.compile(r'(?!\B)' + FEATURE + r'(?!\B)') 33 30 REGEX_SOURCE_FEATURE = re.compile(SOURCE_FEATURE) 34 31 REGEX_KCONFIG_DEF = re.compile(DEF) 35 32 REGEX_KCONFIG_EXPR = re.compile(EXPR) ··· 37 34 REGEX_KCONFIG_HELP = re.compile(r"^\s+(help|---help---)\s*$") 38 35 REGEX_FILTER_FEATURES = re.compile(r"[A-Za-z0-9]$") 39 36 REGEX_NUMERIC = re.compile(r"0[xX][0-9a-fA-F]+|[0-9]+") 37 + REGEX_QUOTES = re.compile("(\"(.*?)\")") 40 38 41 39 42 40 def parse_options(): ··· 74 70 help="Ignore files matching this pattern. Note that " 75 71 "the pattern needs to be a Python regex. To " 76 72 "ignore defconfigs, specify -i '.*defconfig'.") 73 + 74 + parser.add_option('-s', '--sim', dest='sim', action='store', default="", 75 + help="Print a list of maximum 10 string-similar symbols.") 77 76 78 77 parser.add_option('', '--force', dest='force', action='store_true', 79 78 default=False, ··· 116 109 """Main function of this module.""" 117 110 opts = parse_options() 118 111 112 + if opts.sim and not opts.commit and not opts.diff: 113 + sims = find_sims(opts.sim, opts.ignore) 114 + if sims: 115 + print "%s: %s" % (yel("Similar symbols"), ', '.join(sims)) 116 + else: 117 + print "%s: no similar symbols found" % yel("Similar symbols") 118 + sys.exit(0) 119 + 120 + # dictionary of (un)defined symbols 121 + defined = {} 122 + undefined = {} 123 + 119 124 if opts.commit or opts.diff: 120 125 head = get_head() 121 126 ··· 146 127 147 128 # get undefined items before the commit 148 129 execute("git reset --hard %s" % commit_a) 149 - undefined_a = check_symbols(opts.ignore) 130 + undefined_a, _ = check_symbols(opts.ignore) 150 131 151 132 # get undefined items for the commit 152 133 execute("git reset --hard %s" % commit_b) 153 - undefined_b = check_symbols(opts.ignore) 134 + undefined_b, defined = check_symbols(opts.ignore) 154 135 155 136 # report cases that are present for the commit but not before 156 137 for feature in sorted(undefined_b): 157 138 # feature has not been undefined before 158 139 if not feature in undefined_a: 159 140 files = sorted(undefined_b.get(feature)) 160 - print "%s\t%s" % (yel(feature), ", ".join(files)) 161 - if opts.find: 162 - commits = find_commits(feature, opts.diff) 163 - print red(commits) 141 + undefined[feature] = files 164 142 # check if there are new files that reference the undefined feature 165 143 else: 166 144 files = sorted(undefined_b.get(feature) - 167 145 undefined_a.get(feature)) 168 146 if files: 169 - print "%s\t%s" % (yel(feature), ", ".join(files)) 170 - if opts.find: 171 - commits = find_commits(feature, opts.diff) 172 - print red(commits) 147 + undefined[feature] = files 173 148 174 149 # reset to head 175 150 execute("git reset --hard %s" % head) 176 151 177 152 # default to check the entire tree 178 153 else: 179 - undefined = check_symbols(opts.ignore) 180 - for feature in sorted(undefined): 181 - files = sorted(undefined.get(feature)) 182 - print "%s\t%s" % (yel(feature), ", ".join(files)) 154 + undefined, defined = check_symbols(opts.ignore) 155 + 156 + # now print the output 157 + for feature in sorted(undefined): 158 + print red(feature) 159 + 160 + files = sorted(undefined.get(feature)) 161 + print "%s: %s" % (yel("Referencing files"), ", ".join(files)) 162 + 163 + sims = find_sims(feature, opts.ignore, defined) 164 + sims_out = yel("Similar symbols") 165 + if sims: 166 + print "%s: %s" % (sims_out, ', '.join(sims)) 167 + else: 168 + print "%s: %s" % (sims_out, "no similar symbols found") 169 + 170 + if opts.find: 171 + print "%s:" % yel("Commits changing symbol") 172 + commits = find_commits(feature, opts.diff) 173 + if commits: 174 + for commit in commits: 175 + commit = commit.split(" ", 1) 176 + print "\t- %s (\"%s\")" % (yel(commit[0]), commit[1]) 177 + else: 178 + print "\t- no commit found" 179 + print # new line 183 180 184 181 185 182 def yel(string): ··· 225 190 """Find commits changing %symbol in the given range of %diff.""" 226 191 commits = execute("git log --pretty=oneline --abbrev-commit -G %s %s" 227 192 % (symbol, diff)) 228 - return commits 193 + return [x for x in commits.split("\n") if x] 229 194 230 195 231 196 def tree_is_dirty(): ··· 244 209 return stdout.strip('\n') 245 210 246 211 247 - def check_symbols(ignore): 248 - """Find undefined Kconfig symbols and return a dict with the symbol as key 249 - and a list of referencing files as value. Files matching %ignore are not 250 - checked for undefined symbols.""" 251 - source_files = [] 252 - kconfig_files = [] 253 - defined_features = set() 254 - referenced_features = dict() # {feature: [files]} 212 + def partition(lst, size): 213 + """Partition list @lst into eveni-sized lists of size @size.""" 214 + return [lst[i::size] for i in xrange(size)] 255 215 216 + 217 + def init_worker(): 218 + """Set signal handler to ignore SIGINT.""" 219 + signal.signal(signal.SIGINT, signal.SIG_IGN) 220 + 221 + 222 + def find_sims(symbol, ignore, defined = []): 223 + """Return a list of max. ten Kconfig symbols that are string-similar to 224 + @symbol.""" 225 + if defined: 226 + return sorted(difflib.get_close_matches(symbol, set(defined), 10)) 227 + 228 + pool = Pool(cpu_count(), init_worker) 229 + kfiles = [] 230 + for gitfile in get_files(): 231 + if REGEX_FILE_KCONFIG.match(gitfile): 232 + kfiles.append(gitfile) 233 + 234 + arglist = [] 235 + for part in partition(kfiles, cpu_count()): 236 + arglist.append((part, ignore)) 237 + 238 + for res in pool.map(parse_kconfig_files, arglist): 239 + defined.extend(res[0]) 240 + 241 + return sorted(difflib.get_close_matches(symbol, set(defined), 10)) 242 + 243 + 244 + def get_files(): 245 + """Return a list of all files in the current git directory.""" 256 246 # use 'git ls-files' to get the worklist 257 247 stdout = execute("git ls-files") 258 248 if len(stdout) > 0 and stdout[-1] == "\n": 259 249 stdout = stdout[:-1] 260 250 251 + files = [] 261 252 for gitfile in stdout.rsplit("\n"): 262 253 if ".git" in gitfile or "ChangeLog" in gitfile or \ 263 254 ".log" in gitfile or os.path.isdir(gitfile) or \ 264 255 gitfile.startswith("tools/"): 265 256 continue 257 + files.append(gitfile) 258 + return files 259 + 260 + 261 + def check_symbols(ignore): 262 + """Find undefined Kconfig symbols and return a dict with the symbol as key 263 + and a list of referencing files as value. Files matching %ignore are not 264 + checked for undefined symbols.""" 265 + pool = Pool(cpu_count(), init_worker) 266 + try: 267 + return check_symbols_helper(pool, ignore) 268 + except KeyboardInterrupt: 269 + pool.terminate() 270 + pool.join() 271 + sys.exit(1) 272 + 273 + 274 + def check_symbols_helper(pool, ignore): 275 + """Helper method for check_symbols(). Used to catch keyboard interrupts in 276 + check_symbols() in order to properly terminate running worker processes.""" 277 + source_files = [] 278 + kconfig_files = [] 279 + defined_features = [] 280 + referenced_features = dict() # {file: [features]} 281 + 282 + for gitfile in get_files(): 266 283 if REGEX_FILE_KCONFIG.match(gitfile): 267 284 kconfig_files.append(gitfile) 268 285 else: 269 - # all non-Kconfig files are checked for consistency 286 + if ignore and not re.match(ignore, gitfile): 287 + continue 288 + # add source files that do not match the ignore pattern 270 289 source_files.append(gitfile) 271 290 272 - for sfile in source_files: 273 - if ignore and re.match(ignore, sfile): 274 - # do not check files matching %ignore 275 - continue 276 - parse_source_file(sfile, referenced_features) 291 + # parse source files 292 + arglist = partition(source_files, cpu_count()) 293 + for res in pool.map(parse_source_files, arglist): 294 + referenced_features.update(res) 277 295 278 - for kfile in kconfig_files: 279 - if ignore and re.match(ignore, kfile): 280 - # do not collect references for files matching %ignore 281 - parse_kconfig_file(kfile, defined_features, dict()) 282 - else: 283 - parse_kconfig_file(kfile, defined_features, referenced_features) 296 + 297 + # parse kconfig files 298 + arglist = [] 299 + for part in partition(kconfig_files, cpu_count()): 300 + arglist.append((part, ignore)) 301 + for res in pool.map(parse_kconfig_files, arglist): 302 + defined_features.extend(res[0]) 303 + referenced_features.update(res[1]) 304 + defined_features = set(defined_features) 305 + 306 + # inverse mapping of referenced_features to dict(feature: [files]) 307 + inv_map = dict() 308 + for _file, features in referenced_features.iteritems(): 309 + for feature in features: 310 + inv_map[feature] = inv_map.get(feature, set()) 311 + inv_map[feature].add(_file) 312 + referenced_features = inv_map 284 313 285 314 undefined = {} # {feature: [files]} 286 315 for feature in sorted(referenced_features): ··· 358 259 if feature[:-len("_MODULE")] in defined_features: 359 260 continue 360 261 undefined[feature] = referenced_features.get(feature) 361 - return undefined 262 + return undefined, defined_features 362 263 363 264 364 - def parse_source_file(sfile, referenced_features): 365 - """Parse @sfile for referenced Kconfig features.""" 265 + def parse_source_files(source_files): 266 + """Parse each source file in @source_files and return dictionary with source 267 + files as keys and lists of references Kconfig symbols as values.""" 268 + referenced_features = dict() 269 + for sfile in source_files: 270 + referenced_features[sfile] = parse_source_file(sfile) 271 + return referenced_features 272 + 273 + 274 + def parse_source_file(sfile): 275 + """Parse @sfile and return a list of referenced Kconfig features.""" 366 276 lines = [] 277 + references = [] 278 + 279 + if not os.path.exists(sfile): 280 + return references 281 + 367 282 with open(sfile, "r") as stream: 368 283 lines = stream.readlines() 369 284 ··· 388 275 for feature in features: 389 276 if not REGEX_FILTER_FEATURES.search(feature): 390 277 continue 391 - sfiles = referenced_features.get(feature, set()) 392 - sfiles.add(sfile) 393 - referenced_features[feature] = sfiles 278 + references.append(feature) 279 + 280 + return references 394 281 395 282 396 283 def get_features_in_line(line): ··· 398 285 return REGEX_FEATURE.findall(line) 399 286 400 287 401 - def parse_kconfig_file(kfile, defined_features, referenced_features): 288 + def parse_kconfig_files(args): 289 + """Parse kconfig files and return tuple of defined and references Kconfig 290 + symbols. Note, @args is a tuple of a list of files and the @ignore 291 + pattern.""" 292 + kconfig_files = args[0] 293 + ignore = args[1] 294 + defined_features = [] 295 + referenced_features = dict() 296 + 297 + for kfile in kconfig_files: 298 + defined, references = parse_kconfig_file(kfile) 299 + defined_features.extend(defined) 300 + if ignore and re.match(ignore, kfile): 301 + # do not collect references for files that match the ignore pattern 302 + continue 303 + referenced_features[kfile] = references 304 + return (defined_features, referenced_features) 305 + 306 + 307 + def parse_kconfig_file(kfile): 402 308 """Parse @kfile and update feature definitions and references.""" 403 309 lines = [] 310 + defined = [] 311 + references = [] 404 312 skip = False 313 + 314 + if not os.path.exists(kfile): 315 + return defined, references 405 316 406 317 with open(kfile, "r") as stream: 407 318 lines = stream.readlines() ··· 437 300 438 301 if REGEX_KCONFIG_DEF.match(line): 439 302 feature_def = REGEX_KCONFIG_DEF.findall(line) 440 - defined_features.add(feature_def[0]) 303 + defined.append(feature_def[0]) 441 304 skip = False 442 305 elif REGEX_KCONFIG_HELP.match(line): 443 306 skip = True ··· 445 308 # ignore content of help messages 446 309 pass 447 310 elif REGEX_KCONFIG_STMT.match(line): 311 + line = REGEX_QUOTES.sub("", line) 448 312 features = get_features_in_line(line) 449 313 # multi-line statements 450 314 while line.endswith("\\"): ··· 457 319 if REGEX_NUMERIC.match(feature): 458 320 # ignore numeric values 459 321 continue 460 - paths = referenced_features.get(feature, set()) 461 - paths.add(kfile) 462 - referenced_features[feature] = paths 322 + references.append(feature) 323 + 324 + return defined, references 463 325 464 326 465 327 if __name__ == "__main__":
+1 -1
scripts/mod/file2alias.c
··· 917 917 char guid_name[(sizeof(*guid) + 1) * 2]; 918 918 919 919 for (i = 0; i < (sizeof(*guid) * 2); i += 2) 920 - sprintf(&guid_name[i], "%02x", TO_NATIVE((*guid)[i/2])); 920 + sprintf(&guid_name[i], "%02x", TO_NATIVE((guid->b)[i/2])); 921 921 922 922 strcpy(alias, "vmbus:"); 923 923 strcat(alias, guid_name);
+19 -5
tools/hv/hv_fcopy_daemon.c
··· 37 37 38 38 static int target_fd; 39 39 static char target_fname[W_MAX_PATH]; 40 + static unsigned long long filesize; 40 41 41 42 static int hv_start_fcopy(struct hv_start_fcopy *smsg) 42 43 { 43 44 int error = HV_E_FAIL; 44 45 char *q, *p; 45 46 47 + filesize = 0; 46 48 p = (char *)smsg->path_name; 47 49 snprintf(target_fname, sizeof(target_fname), "%s/%s", 48 50 (char *)smsg->path_name, (char *)smsg->file_name); ··· 100 98 static int hv_copy_data(struct hv_do_fcopy *cpmsg) 101 99 { 102 100 ssize_t bytes_written; 101 + int ret = 0; 103 102 104 103 bytes_written = pwrite(target_fd, cpmsg->data, cpmsg->size, 105 104 cpmsg->offset); 106 105 107 - if (bytes_written != cpmsg->size) 108 - return HV_E_FAIL; 106 + filesize += cpmsg->size; 107 + if (bytes_written != cpmsg->size) { 108 + switch (errno) { 109 + case ENOSPC: 110 + ret = HV_ERROR_DISK_FULL; 111 + break; 112 + default: 113 + ret = HV_E_FAIL; 114 + break; 115 + } 116 + syslog(LOG_ERR, "pwrite failed to write %llu bytes: %ld (%s)", 117 + filesize, (long)bytes_written, strerror(errno)); 118 + } 109 119 110 - return 0; 120 + return ret; 111 121 } 112 122 113 123 static int hv_copy_finished(void) ··· 179 165 } 180 166 181 167 openlog("HV_FCOPY", 0, LOG_USER); 182 - syslog(LOG_INFO, "HV_FCOPY starting; pid is:%d", getpid()); 168 + syslog(LOG_INFO, "starting; pid is:%d", getpid()); 183 169 184 170 fcopy_fd = open("/dev/vmbus/hv_fcopy", O_RDWR); 185 171 ··· 215 201 } 216 202 kernel_modver = *(__u32 *)buffer; 217 203 in_handshake = 0; 218 - syslog(LOG_INFO, "HV_FCOPY: kernel module version: %d", 204 + syslog(LOG_INFO, "kernel module version: %d", 219 205 kernel_modver); 220 206 continue; 221 207 }
+1 -1
tools/hv/hv_vss_daemon.c
··· 254 254 syslog(LOG_ERR, "Illegal op:%d\n", op); 255 255 } 256 256 vss_msg->error = error; 257 - len = write(vss_fd, &error, sizeof(struct hv_vss_msg)); 257 + len = write(vss_fd, vss_msg, sizeof(struct hv_vss_msg)); 258 258 if (len != sizeof(struct hv_vss_msg)) { 259 259 syslog(LOG_ERR, "write failed; error: %d %s", errno, 260 260 strerror(errno));