Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'char-misc-4.11-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc

Pull char/misc driver fixes from Greg KH:
"A smattering of different small fixes for some random driver
subsystems. Nothing all that major, just resolutions for reported
issues and bugs.

All have been in linux-next with no reported issues"

* tag 'char-misc-4.11-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (21 commits)
extcon: int3496: Set the id pin to direction-input if necessary
extcon: int3496: Use gpiod_get instead of gpiod_get_index
extcon: int3496: Add dependency on X86 as it's Intel specific
extcon: int3496: Add GPIO ACPI mapping table
extcon: int3496: Rename GPIO pins in accordance with binding
vmw_vmci: handle the return value from pci_alloc_irq_vectors correctly
ppdev: fix registering same device name
parport: fix attempt to write duplicate procfiles
auxdisplay: img-ascii-lcd: add missing sentinel entry in img_ascii_lcd_matches
Drivers: hv: vmbus: Don't leak memory when a channel is rescinded
Drivers: hv: vmbus: Don't leak channel ids
Drivers: hv: util: don't forget to init host_ts.lock
Drivers: hv: util: move waiting for release to hv_utils_transport itself
vmbus: remove hv_event_tasklet_disable/enable
vmbus: use rcu for per-cpu channel list
mei: don't wait for os version message reply
mei: fix deadlock on mei reset
intel_th: pci: Add Gemini Lake support
intel_th: pci: Add Denverton SOC support
intel_th: Don't leak module refcount on failure to activate
...

+111 -88
+5
Documentation/extcon/intel-int3496.txt
··· 20 Index 2: The output gpio for muxing of the data pins between the USB host and 21 the USB peripheral controller, write 1 to mux to the peripheral 22 controller
··· 20 Index 2: The output gpio for muxing of the data pins between the USB host and 21 the USB peripheral controller, write 1 to mux to the peripheral 22 controller 23 + 24 + There is a mapping between indices and GPIO connection IDs as follows 25 + id index 0 26 + vbus index 1 27 + mux index 2
+1
drivers/auxdisplay/img-ascii-lcd.c
··· 218 { .compatible = "img,boston-lcd", .data = &boston_config }, 219 { .compatible = "mti,malta-lcd", .data = &malta_config }, 220 { .compatible = "mti,sead3-lcd", .data = &sead3_config }, 221 }; 222 223 /**
··· 218 { .compatible = "img,boston-lcd", .data = &boston_config }, 219 { .compatible = "mti,malta-lcd", .data = &malta_config }, 220 { .compatible = "mti,sead3-lcd", .data = &sead3_config }, 221 + { /* sentinel */ } 222 }; 223 224 /**
+9 -2
drivers/char/ppdev.c
··· 84 struct ieee1284_info state; 85 struct ieee1284_info saved_state; 86 long default_inactivity; 87 }; 88 89 /* should we use PARDEVICE_MAX here? */ 90 static struct device *devices[PARPORT_MAX]; 91 92 /* pp_struct.flags bitfields */ 93 #define PP_CLAIMED (1<<0) ··· 293 struct pardevice *pdev = NULL; 294 char *name; 295 struct pardev_cb ppdev_cb; 296 - int rc = 0; 297 298 name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor); 299 if (name == NULL) ··· 306 goto err; 307 } 308 309 memset(&ppdev_cb, 0, sizeof(ppdev_cb)); 310 ppdev_cb.irq_func = pp_irq; 311 ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0; 312 ppdev_cb.private = pp; 313 - pdev = parport_register_dev_model(port, name, &ppdev_cb, minor); 314 parport_put_port(port); 315 316 if (!pdev) { 317 pr_warn("%s: failed to register device!\n", name); 318 rc = -ENXIO; 319 goto err; 320 } 321 322 pp->pdev = pdev; 323 dev_dbg(&pdev->dev, "registered pardevice\n"); 324 err: 325 kfree(name); ··· 761 762 if (pp->pdev) { 763 parport_unregister_device(pp->pdev); 764 pp->pdev = NULL; 765 pr_debug(CHRDEV "%x: unregistered pardevice\n", minor); 766 }
··· 84 struct ieee1284_info state; 85 struct ieee1284_info saved_state; 86 long default_inactivity; 87 + int index; 88 }; 89 90 /* should we use PARDEVICE_MAX here? */ 91 static struct device *devices[PARPORT_MAX]; 92 + 93 + static DEFINE_IDA(ida_index); 94 95 /* pp_struct.flags bitfields */ 96 #define PP_CLAIMED (1<<0) ··· 290 struct pardevice *pdev = NULL; 291 char *name; 292 struct pardev_cb ppdev_cb; 293 + int rc = 0, index; 294 295 name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor); 296 if (name == NULL) ··· 303 goto err; 304 } 305 306 + index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL); 307 memset(&ppdev_cb, 0, sizeof(ppdev_cb)); 308 ppdev_cb.irq_func = pp_irq; 309 ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0; 310 ppdev_cb.private = pp; 311 + pdev = parport_register_dev_model(port, name, &ppdev_cb, index); 312 parport_put_port(port); 313 314 if (!pdev) { 315 pr_warn("%s: failed to register device!\n", name); 316 rc = -ENXIO; 317 + ida_simple_remove(&ida_index, index); 318 goto err; 319 } 320 321 pp->pdev = pdev; 322 + pp->index = index; 323 dev_dbg(&pdev->dev, "registered pardevice\n"); 324 err: 325 kfree(name); ··· 755 756 if (pp->pdev) { 757 parport_unregister_device(pp->pdev); 758 + ida_simple_remove(&ida_index, pp->index); 759 pp->pdev = NULL; 760 pr_debug(CHRDEV "%x: unregistered pardevice\n", minor); 761 }
+1 -1
drivers/extcon/Kconfig
··· 44 45 config EXTCON_INTEL_INT3496 46 tristate "Intel INT3496 ACPI device extcon driver" 47 - depends on GPIOLIB && ACPI 48 help 49 Say Y here to enable extcon support for USB OTG ports controlled by 50 an Intel INT3496 ACPI device.
··· 44 45 config EXTCON_INTEL_INT3496 46 tristate "Intel INT3496 ACPI device extcon driver" 47 + depends on GPIOLIB && ACPI && (X86 || COMPILE_TEST) 48 help 49 Say Y here to enable extcon support for USB OTG ports controlled by 50 an Intel INT3496 ACPI device.
+28 -11
drivers/extcon/extcon-intel-int3496.c
··· 45 EXTCON_NONE, 46 }; 47 48 static void int3496_do_usb_id(struct work_struct *work) 49 { 50 struct int3496_data *data = ··· 94 struct int3496_data *data; 95 int ret; 96 97 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); 98 if (!data) 99 return -ENOMEM; ··· 108 data->dev = dev; 109 INIT_DELAYED_WORK(&data->work, int3496_do_usb_id); 110 111 - data->gpio_usb_id = devm_gpiod_get_index(dev, "id", 112 - INT3496_GPIO_USB_ID, 113 - GPIOD_IN); 114 if (IS_ERR(data->gpio_usb_id)) { 115 ret = PTR_ERR(data->gpio_usb_id); 116 dev_err(dev, "can't request USB ID GPIO: %d\n", ret); 117 return ret; 118 } 119 120 data->usb_id_irq = gpiod_to_irq(data->gpio_usb_id); 121 - if (data->usb_id_irq <= 0) { 122 dev_err(dev, "can't get USB ID IRQ: %d\n", data->usb_id_irq); 123 - return -EINVAL; 124 } 125 126 - data->gpio_vbus_en = devm_gpiod_get_index(dev, "vbus en", 127 - INT3496_GPIO_VBUS_EN, 128 - GPIOD_ASIS); 129 if (IS_ERR(data->gpio_vbus_en)) 130 dev_info(dev, "can't request VBUS EN GPIO\n"); 131 132 - data->gpio_usb_mux = devm_gpiod_get_index(dev, "usb mux", 133 - INT3496_GPIO_USB_MUX, 134 - GPIOD_ASIS); 135 if (IS_ERR(data->gpio_usb_mux)) 136 dev_info(dev, "can't request USB MUX GPIO\n"); 137 ··· 168 169 devm_free_irq(&pdev->dev, data->usb_id_irq, data); 170 cancel_delayed_work_sync(&data->work); 171 172 return 0; 173 }
··· 45 EXTCON_NONE, 46 }; 47 48 + static const struct acpi_gpio_params id_gpios = { INT3496_GPIO_USB_ID, 0, false }; 49 + static const struct acpi_gpio_params vbus_gpios = { INT3496_GPIO_VBUS_EN, 0, false }; 50 + static const struct acpi_gpio_params mux_gpios = { INT3496_GPIO_USB_MUX, 0, false }; 51 + 52 + static const struct acpi_gpio_mapping acpi_int3496_default_gpios[] = { 53 + { "id-gpios", &id_gpios, 1 }, 54 + { "vbus-gpios", &vbus_gpios, 1 }, 55 + { "mux-gpios", &mux_gpios, 1 }, 56 + { }, 57 + }; 58 + 59 static void int3496_do_usb_id(struct work_struct *work) 60 { 61 struct int3496_data *data = ··· 83 struct int3496_data *data; 84 int ret; 85 86 + ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(dev), 87 + acpi_int3496_default_gpios); 88 + if (ret) { 89 + dev_err(dev, "can't add GPIO ACPI mapping\n"); 90 + return ret; 91 + } 92 + 93 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); 94 if (!data) 95 return -ENOMEM; ··· 90 data->dev = dev; 91 INIT_DELAYED_WORK(&data->work, int3496_do_usb_id); 92 93 + data->gpio_usb_id = devm_gpiod_get(dev, "id", GPIOD_IN); 94 if (IS_ERR(data->gpio_usb_id)) { 95 ret = PTR_ERR(data->gpio_usb_id); 96 dev_err(dev, "can't request USB ID GPIO: %d\n", ret); 97 return ret; 98 + } else if (gpiod_get_direction(data->gpio_usb_id) != GPIOF_DIR_IN) { 99 + dev_warn(dev, FW_BUG "USB ID GPIO not in input mode, fixing\n"); 100 + gpiod_direction_input(data->gpio_usb_id); 101 } 102 103 data->usb_id_irq = gpiod_to_irq(data->gpio_usb_id); 104 + if (data->usb_id_irq < 0) { 105 dev_err(dev, "can't get USB ID IRQ: %d\n", data->usb_id_irq); 106 + return data->usb_id_irq; 107 } 108 109 + data->gpio_vbus_en = devm_gpiod_get(dev, "vbus", GPIOD_ASIS); 110 if (IS_ERR(data->gpio_vbus_en)) 111 dev_info(dev, "can't request VBUS EN GPIO\n"); 112 113 + data->gpio_usb_mux = devm_gpiod_get(dev, "mux", GPIOD_ASIS); 114 if (IS_ERR(data->gpio_usb_mux)) 115 dev_info(dev, "can't request USB MUX GPIO\n"); 116 ··· 153 154 devm_free_irq(&pdev->dev, data->usb_id_irq, data); 155 cancel_delayed_work_sync(&data->work); 156 + 157 + acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pdev->dev)); 158 159 return 0; 160 }
+12 -13
drivers/hv/channel.c
··· 502 503 wait_for_completion(&info->waitevent); 504 505 - if (channel->rescind) { 506 - ret = -ENODEV; 507 - goto post_msg_err; 508 - } 509 - 510 post_msg_err: 511 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 512 list_del(&info->msglistentry); 513 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); ··· 533 int ret; 534 535 /* 536 - * vmbus_on_event(), running in the tasklet, can race 537 * with vmbus_close_internal() in the case of SMP guest, e.g., when 538 * the former is accessing channel->inbound.ring_buffer, the latter 539 - * could be freeing the ring_buffer pages. 540 - * 541 - * To resolve the race, we can serialize them by disabling the 542 - * tasklet when the latter is running here. 543 */ 544 - hv_event_tasklet_disable(channel); 545 546 /* 547 * In case a device driver's probe() fails (e.g., ··· 606 get_order(channel->ringbuffer_pagecount * PAGE_SIZE)); 607 608 out: 609 - hv_event_tasklet_enable(channel); 610 - 611 return ret; 612 } 613
··· 502 503 wait_for_completion(&info->waitevent); 504 505 post_msg_err: 506 + /* 507 + * If the channel has been rescinded; 508 + * we will be awakened by the rescind 509 + * handler; set the error code to zero so we don't leak memory. 510 + */ 511 + if (channel->rescind) 512 + ret = 0; 513 + 514 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 515 list_del(&info->msglistentry); 516 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); ··· 530 int ret; 531 532 /* 533 + * vmbus_on_event(), running in the per-channel tasklet, can race 534 * with vmbus_close_internal() in the case of SMP guest, e.g., when 535 * the former is accessing channel->inbound.ring_buffer, the latter 536 + * could be freeing the ring_buffer pages, so here we must stop it 537 + * first. 538 */ 539 + tasklet_disable(&channel->callback_event); 540 541 /* 542 * In case a device driver's probe() fails (e.g., ··· 605 get_order(channel->ringbuffer_pagecount * PAGE_SIZE)); 606 607 out: 608 return ret; 609 } 610
+5 -22
drivers/hv/channel_mgmt.c
··· 350 static void free_channel(struct vmbus_channel *channel) 351 { 352 tasklet_kill(&channel->callback_event); 353 - kfree(channel); 354 } 355 356 static void percpu_channel_enq(void *arg) ··· 360 struct hv_per_cpu_context *hv_cpu 361 = this_cpu_ptr(hv_context.cpu_context); 362 363 - list_add_tail(&channel->percpu_list, &hv_cpu->chan_list); 364 } 365 366 static void percpu_channel_deq(void *arg) 367 { 368 struct vmbus_channel *channel = arg; 369 370 - list_del(&channel->percpu_list); 371 } 372 373 ··· 382 true); 383 } 384 385 - void hv_event_tasklet_disable(struct vmbus_channel *channel) 386 - { 387 - tasklet_disable(&channel->callback_event); 388 - } 389 - 390 - void hv_event_tasklet_enable(struct vmbus_channel *channel) 391 - { 392 - tasklet_enable(&channel->callback_event); 393 - 394 - /* In case there is any pending event */ 395 - tasklet_schedule(&channel->callback_event); 396 - } 397 - 398 void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) 399 { 400 unsigned long flags; ··· 390 BUG_ON(!channel->rescind); 391 BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex)); 392 393 - hv_event_tasklet_disable(channel); 394 if (channel->target_cpu != get_cpu()) { 395 put_cpu(); 396 smp_call_function_single(channel->target_cpu, ··· 398 percpu_channel_deq(channel); 399 put_cpu(); 400 } 401 - hv_event_tasklet_enable(channel); 402 403 if (channel->primary_channel == NULL) { 404 list_del(&channel->listentry); ··· 491 492 init_vp_index(newchannel, dev_type); 493 494 - hv_event_tasklet_disable(newchannel); 495 if (newchannel->target_cpu != get_cpu()) { 496 put_cpu(); 497 smp_call_function_single(newchannel->target_cpu, ··· 500 percpu_channel_enq(newchannel); 501 put_cpu(); 502 } 503 - hv_event_tasklet_enable(newchannel); 504 505 /* 506 * This state is used to indicate a successful open ··· 549 list_del(&newchannel->listentry); 550 mutex_unlock(&vmbus_connection.channel_mutex); 551 552 - hv_event_tasklet_disable(newchannel); 553 if (newchannel->target_cpu != get_cpu()) { 554 put_cpu(); 555 smp_call_function_single(newchannel->target_cpu, ··· 557 percpu_channel_deq(newchannel); 558 put_cpu(); 559 } 560 - hv_event_tasklet_enable(newchannel); 561 562 vmbus_release_relid(newchannel->offermsg.child_relid); 563 ··· 796 /* Allocate the channel object and save this offer. */ 797 newchannel = alloc_channel(); 798 if (!newchannel) { 799 pr_err("Unable to allocate channel object\n"); 800 return; 801 }
··· 350 static void free_channel(struct vmbus_channel *channel) 351 { 352 tasklet_kill(&channel->callback_event); 353 + 354 + kfree_rcu(channel, rcu); 355 } 356 357 static void percpu_channel_enq(void *arg) ··· 359 struct hv_per_cpu_context *hv_cpu 360 = this_cpu_ptr(hv_context.cpu_context); 361 362 + list_add_tail_rcu(&channel->percpu_list, &hv_cpu->chan_list); 363 } 364 365 static void percpu_channel_deq(void *arg) 366 { 367 struct vmbus_channel *channel = arg; 368 369 + list_del_rcu(&channel->percpu_list); 370 } 371 372 ··· 381 true); 382 } 383 384 void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) 385 { 386 unsigned long flags; ··· 402 BUG_ON(!channel->rescind); 403 BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex)); 404 405 if (channel->target_cpu != get_cpu()) { 406 put_cpu(); 407 smp_call_function_single(channel->target_cpu, ··· 411 percpu_channel_deq(channel); 412 put_cpu(); 413 } 414 415 if (channel->primary_channel == NULL) { 416 list_del(&channel->listentry); ··· 505 506 init_vp_index(newchannel, dev_type); 507 508 if (newchannel->target_cpu != get_cpu()) { 509 put_cpu(); 510 smp_call_function_single(newchannel->target_cpu, ··· 515 percpu_channel_enq(newchannel); 516 put_cpu(); 517 } 518 519 /* 520 * This state is used to indicate a successful open ··· 565 list_del(&newchannel->listentry); 566 mutex_unlock(&vmbus_connection.channel_mutex); 567 568 if (newchannel->target_cpu != get_cpu()) { 569 put_cpu(); 570 smp_call_function_single(newchannel->target_cpu, ··· 574 percpu_channel_deq(newchannel); 575 put_cpu(); 576 } 577 578 vmbus_release_relid(newchannel->offermsg.child_relid); 579 ··· 814 /* Allocate the channel object and save this offer. */ 815 newchannel = alloc_channel(); 816 if (!newchannel) { 817 + vmbus_release_relid(offer->child_relid); 818 pr_err("Unable to allocate channel object\n"); 819 return; 820 }
-4
drivers/hv/hv_fcopy.c
··· 71 static const char fcopy_devname[] = "vmbus/hv_fcopy"; 72 static u8 *recv_buffer; 73 static struct hvutil_transport *hvt; 74 - static struct completion release_event; 75 /* 76 * This state maintains the version number registered by the daemon. 77 */ ··· 330 331 if (cancel_delayed_work_sync(&fcopy_timeout_work)) 332 fcopy_respond_to_host(HV_E_FAIL); 333 - complete(&release_event); 334 } 335 336 int hv_fcopy_init(struct hv_util_service *srv) ··· 337 recv_buffer = srv->recv_buffer; 338 fcopy_transaction.recv_channel = srv->channel; 339 340 - init_completion(&release_event); 341 /* 342 * When this driver loads, the user level daemon that 343 * processes the host requests may not yet be running. ··· 358 fcopy_transaction.state = HVUTIL_DEVICE_DYING; 359 cancel_delayed_work_sync(&fcopy_timeout_work); 360 hvutil_transport_destroy(hvt); 361 - wait_for_completion(&release_event); 362 }
··· 71 static const char fcopy_devname[] = "vmbus/hv_fcopy"; 72 static u8 *recv_buffer; 73 static struct hvutil_transport *hvt; 74 /* 75 * This state maintains the version number registered by the daemon. 76 */ ··· 331 332 if (cancel_delayed_work_sync(&fcopy_timeout_work)) 333 fcopy_respond_to_host(HV_E_FAIL); 334 } 335 336 int hv_fcopy_init(struct hv_util_service *srv) ··· 339 recv_buffer = srv->recv_buffer; 340 fcopy_transaction.recv_channel = srv->channel; 341 342 /* 343 * When this driver loads, the user level daemon that 344 * processes the host requests may not yet be running. ··· 361 fcopy_transaction.state = HVUTIL_DEVICE_DYING; 362 cancel_delayed_work_sync(&fcopy_timeout_work); 363 hvutil_transport_destroy(hvt); 364 }
-4
drivers/hv/hv_kvp.c
··· 101 static const char kvp_devname[] = "vmbus/hv_kvp"; 102 static u8 *recv_buffer; 103 static struct hvutil_transport *hvt; 104 - static struct completion release_event; 105 /* 106 * Register the kernel component with the user-level daemon. 107 * As part of this registration, pass the LIC version number. ··· 713 if (cancel_delayed_work_sync(&kvp_timeout_work)) 714 kvp_respond_to_host(NULL, HV_E_FAIL); 715 kvp_transaction.state = HVUTIL_DEVICE_INIT; 716 - complete(&release_event); 717 } 718 719 int ··· 721 recv_buffer = srv->recv_buffer; 722 kvp_transaction.recv_channel = srv->channel; 723 724 - init_completion(&release_event); 725 /* 726 * When this driver loads, the user level daemon that 727 * processes the host requests may not yet be running. ··· 744 cancel_delayed_work_sync(&kvp_timeout_work); 745 cancel_work_sync(&kvp_sendkey_work); 746 hvutil_transport_destroy(hvt); 747 - wait_for_completion(&release_event); 748 }
··· 101 static const char kvp_devname[] = "vmbus/hv_kvp"; 102 static u8 *recv_buffer; 103 static struct hvutil_transport *hvt; 104 /* 105 * Register the kernel component with the user-level daemon. 106 * As part of this registration, pass the LIC version number. ··· 714 if (cancel_delayed_work_sync(&kvp_timeout_work)) 715 kvp_respond_to_host(NULL, HV_E_FAIL); 716 kvp_transaction.state = HVUTIL_DEVICE_INIT; 717 } 718 719 int ··· 723 recv_buffer = srv->recv_buffer; 724 kvp_transaction.recv_channel = srv->channel; 725 726 /* 727 * When this driver loads, the user level daemon that 728 * processes the host requests may not yet be running. ··· 747 cancel_delayed_work_sync(&kvp_timeout_work); 748 cancel_work_sync(&kvp_sendkey_work); 749 hvutil_transport_destroy(hvt); 750 }
-4
drivers/hv/hv_snapshot.c
··· 79 static const char vss_devname[] = "vmbus/hv_vss"; 80 static __u8 *recv_buffer; 81 static struct hvutil_transport *hvt; 82 - static struct completion release_event; 83 84 static void vss_timeout_func(struct work_struct *dummy); 85 static void vss_handle_request(struct work_struct *dummy); ··· 360 if (cancel_delayed_work_sync(&vss_timeout_work)) 361 vss_respond_to_host(HV_E_FAIL); 362 vss_transaction.state = HVUTIL_DEVICE_INIT; 363 - complete(&release_event); 364 } 365 366 int 367 hv_vss_init(struct hv_util_service *srv) 368 { 369 - init_completion(&release_event); 370 if (vmbus_proto_version < VERSION_WIN8_1) { 371 pr_warn("Integration service 'Backup (volume snapshot)'" 372 " not supported on this host version.\n"); ··· 397 cancel_delayed_work_sync(&vss_timeout_work); 398 cancel_work_sync(&vss_handle_request_work); 399 hvutil_transport_destroy(hvt); 400 - wait_for_completion(&release_event); 401 }
··· 79 static const char vss_devname[] = "vmbus/hv_vss"; 80 static __u8 *recv_buffer; 81 static struct hvutil_transport *hvt; 82 83 static void vss_timeout_func(struct work_struct *dummy); 84 static void vss_handle_request(struct work_struct *dummy); ··· 361 if (cancel_delayed_work_sync(&vss_timeout_work)) 362 vss_respond_to_host(HV_E_FAIL); 363 vss_transaction.state = HVUTIL_DEVICE_INIT; 364 } 365 366 int 367 hv_vss_init(struct hv_util_service *srv) 368 { 369 if (vmbus_proto_version < VERSION_WIN8_1) { 370 pr_warn("Integration service 'Backup (volume snapshot)'" 371 " not supported on this host version.\n"); ··· 400 cancel_delayed_work_sync(&vss_timeout_work); 401 cancel_work_sync(&vss_handle_request_work); 402 hvutil_transport_destroy(hvt); 403 }
+2
drivers/hv/hv_util.c
··· 590 if (!hyperv_cs) 591 return -ENODEV; 592 593 INIT_WORK(&wrk.work, hv_set_host_time); 594 595 /*
··· 590 if (!hyperv_cs) 591 return -ENODEV; 592 593 + spin_lock_init(&host_ts.lock); 594 + 595 INIT_WORK(&wrk.work, hv_set_host_time); 596 597 /*
+8 -4
drivers/hv/hv_utils_transport.c
··· 182 * connects back. 183 */ 184 hvt_reset(hvt); 185 - mutex_unlock(&hvt->lock); 186 187 if (mode_old == HVUTIL_TRANSPORT_DESTROY) 188 - hvt_transport_free(hvt); 189 190 return 0; 191 } ··· 305 306 init_waitqueue_head(&hvt->outmsg_q); 307 mutex_init(&hvt->lock); 308 309 spin_lock(&hvt_list_lock); 310 list_add(&hvt->list, &hvt_list); ··· 353 if (hvt->cn_id.idx > 0 && hvt->cn_id.val > 0) 354 cn_del_callback(&hvt->cn_id); 355 356 - if (mode_old != HVUTIL_TRANSPORT_CHARDEV) 357 - hvt_transport_free(hvt); 358 }
··· 182 * connects back. 183 */ 184 hvt_reset(hvt); 185 186 if (mode_old == HVUTIL_TRANSPORT_DESTROY) 187 + complete(&hvt->release); 188 + 189 + mutex_unlock(&hvt->lock); 190 191 return 0; 192 } ··· 304 305 init_waitqueue_head(&hvt->outmsg_q); 306 mutex_init(&hvt->lock); 307 + init_completion(&hvt->release); 308 309 spin_lock(&hvt_list_lock); 310 list_add(&hvt->list, &hvt_list); ··· 351 if (hvt->cn_id.idx > 0 && hvt->cn_id.val > 0) 352 cn_del_callback(&hvt->cn_id); 353 354 + if (mode_old == HVUTIL_TRANSPORT_CHARDEV) 355 + wait_for_completion(&hvt->release); 356 + 357 + hvt_transport_free(hvt); 358 }
+1
drivers/hv/hv_utils_transport.h
··· 41 int outmsg_len; /* its length */ 42 wait_queue_head_t outmsg_q; /* poll/read wait queue */ 43 struct mutex lock; /* protects struct members */ 44 }; 45 46 struct hvutil_transport *hvutil_transport_init(const char *name,
··· 41 int outmsg_len; /* its length */ 42 wait_queue_head_t outmsg_q; /* poll/read wait queue */ 43 struct mutex lock; /* protects struct members */ 44 + struct completion release; /* synchronize with fd release */ 45 }; 46 47 struct hvutil_transport *hvutil_transport_init(const char *name,
+5 -1
drivers/hv/vmbus_drv.c
··· 939 if (relid == 0) 940 continue; 941 942 /* Find channel based on relid */ 943 - list_for_each_entry(channel, &hv_cpu->chan_list, percpu_list) { 944 if (channel->offermsg.child_relid != relid) 945 continue; 946 ··· 958 tasklet_schedule(&channel->callback_event); 959 } 960 } 961 } 962 } 963
··· 939 if (relid == 0) 940 continue; 941 942 + rcu_read_lock(); 943 + 944 /* Find channel based on relid */ 945 + list_for_each_entry_rcu(channel, &hv_cpu->chan_list, percpu_list) { 946 if (channel->offermsg.child_relid != relid) 947 continue; 948 ··· 956 tasklet_schedule(&channel->callback_event); 957 } 958 } 959 + 960 + rcu_read_unlock(); 961 } 962 } 963
+3 -1
drivers/hwtracing/intel_th/core.c
··· 221 else 222 intel_th_trace_enable(thdev); 223 224 - if (ret) 225 pm_runtime_put(&thdev->dev); 226 227 return ret; 228 }
··· 221 else 222 intel_th_trace_enable(thdev); 223 224 + if (ret) { 225 pm_runtime_put(&thdev->dev); 226 + module_put(thdrv->driver.owner); 227 + } 228 229 return ret; 230 }
+10
drivers/hwtracing/intel_th/pci.c
··· 85 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6), 86 .driver_data = (kernel_ulong_t)0, 87 }, 88 { 0 }, 89 }; 90
··· 85 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6), 86 .driver_data = (kernel_ulong_t)0, 87 }, 88 + { 89 + /* Denverton */ 90 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x19e1), 91 + .driver_data = (kernel_ulong_t)0, 92 + }, 93 + { 94 + /* Gemini Lake */ 95 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e), 96 + .driver_data = (kernel_ulong_t)0, 97 + }, 98 { 0 }, 99 }; 100
+2 -12
drivers/misc/mei/bus-fixup.c
··· 112 113 static int mei_osver(struct mei_cl_device *cldev) 114 { 115 - int ret; 116 const size_t size = sizeof(struct mkhi_msg_hdr) + 117 sizeof(struct mkhi_fwcaps) + 118 sizeof(struct mei_os_ver); 119 - size_t length = 8; 120 char buf[size]; 121 struct mkhi_msg *req; 122 struct mkhi_fwcaps *fwcaps; ··· 135 os_ver = (struct mei_os_ver *)fwcaps->data; 136 os_ver->os_type = OSTYPE_LINUX; 137 138 - ret = __mei_cl_send(cldev->cl, buf, size, mode); 139 - if (ret < 0) 140 - return ret; 141 - 142 - ret = __mei_cl_recv(cldev->cl, buf, length, 0); 143 - if (ret < 0) 144 - return ret; 145 - 146 - return 0; 147 } 148 149 static void mei_mkhi_fix(struct mei_cl_device *cldev) ··· 150 return; 151 152 ret = mei_osver(cldev); 153 - if (ret) 154 dev_err(&cldev->dev, "OS version command failed %d\n", ret); 155 156 mei_cldev_disable(cldev);
··· 112 113 static int mei_osver(struct mei_cl_device *cldev) 114 { 115 const size_t size = sizeof(struct mkhi_msg_hdr) + 116 sizeof(struct mkhi_fwcaps) + 117 sizeof(struct mei_os_ver); 118 char buf[size]; 119 struct mkhi_msg *req; 120 struct mkhi_fwcaps *fwcaps; ··· 137 os_ver = (struct mei_os_ver *)fwcaps->data; 138 os_ver->os_type = OSTYPE_LINUX; 139 140 + return __mei_cl_send(cldev->cl, buf, size, mode); 141 } 142 143 static void mei_mkhi_fix(struct mei_cl_device *cldev) ··· 160 return; 161 162 ret = mei_osver(cldev); 163 + if (ret < 0) 164 dev_err(&cldev->dev, "OS version command failed %d\n", ret); 165 166 mei_cldev_disable(cldev);
+6 -2
drivers/misc/mei/init.c
··· 124 125 mei_clear_interrupts(dev); 126 127 - mei_synchronize_irq(dev); 128 - 129 /* we're already in reset, cancel the init timer 130 * if the reset was called due the hbm protocol error 131 * we need to call it before hw start ··· 302 container_of(work, struct mei_device, reset_work); 303 int ret; 304 305 mutex_lock(&dev->device_lock); 306 307 ret = mei_reset(dev); ··· 328 mei_cl_bus_remove_devices(dev); 329 330 mei_cancel_work(dev); 331 332 mutex_lock(&dev->device_lock); 333
··· 124 125 mei_clear_interrupts(dev); 126 127 /* we're already in reset, cancel the init timer 128 * if the reset was called due the hbm protocol error 129 * we need to call it before hw start ··· 304 container_of(work, struct mei_device, reset_work); 305 int ret; 306 307 + mei_clear_interrupts(dev); 308 + mei_synchronize_irq(dev); 309 + 310 mutex_lock(&dev->device_lock); 311 312 ret = mei_reset(dev); ··· 327 mei_cl_bus_remove_devices(dev); 328 329 mei_cancel_work(dev); 330 + 331 + mei_clear_interrupts(dev); 332 + mei_synchronize_irq(dev); 333 334 mutex_lock(&dev->device_lock); 335
+2 -2
drivers/misc/vmw_vmci/vmci_guest.c
··· 566 */ 567 error = pci_alloc_irq_vectors(pdev, VMCI_MAX_INTRS, VMCI_MAX_INTRS, 568 PCI_IRQ_MSIX); 569 - if (error) { 570 error = pci_alloc_irq_vectors(pdev, 1, 1, 571 PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY); 572 - if (error) 573 goto err_remove_bitmap; 574 } else { 575 vmci_dev->exclusive_vectors = true;
··· 566 */ 567 error = pci_alloc_irq_vectors(pdev, VMCI_MAX_INTRS, VMCI_MAX_INTRS, 568 PCI_IRQ_MSIX); 569 + if (error < 0) { 570 error = pci_alloc_irq_vectors(pdev, 1, 1, 571 PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY); 572 + if (error < 0) 573 goto err_remove_bitmap; 574 } else { 575 vmci_dev->exclusive_vectors = true;
+4 -2
drivers/parport/share.c
··· 939 * pardevice fields. -arca 940 */ 941 port->ops->init_state(par_dev, par_dev->state); 942 - port->proc_device = par_dev; 943 - parport_device_proc_register(par_dev); 944 945 return par_dev; 946
··· 939 * pardevice fields. -arca 940 */ 941 port->ops->init_state(par_dev, par_dev->state); 942 + if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) { 943 + port->proc_device = par_dev; 944 + parport_device_proc_register(par_dev); 945 + } 946 947 return par_dev; 948
+7 -3
include/linux/hyperv.h
··· 845 * link up channels based on their CPU affinity. 846 */ 847 struct list_head percpu_list; 848 /* 849 * For performance critical channels (storage, networking 850 * etc,), Hyper-V has a mechanism to enhance the throughput ··· 1436 const int *fw_version, int fw_vercnt, 1437 const int *srv_version, int srv_vercnt, 1438 int *nego_fw_version, int *nego_srv_version); 1439 - 1440 - void hv_event_tasklet_disable(struct vmbus_channel *channel); 1441 - void hv_event_tasklet_enable(struct vmbus_channel *channel); 1442 1443 void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid); 1444
··· 845 * link up channels based on their CPU affinity. 846 */ 847 struct list_head percpu_list; 848 + 849 + /* 850 + * Defer freeing channel until after all cpu's have 851 + * gone through grace period. 852 + */ 853 + struct rcu_head rcu; 854 + 855 /* 856 * For performance critical channels (storage, networking 857 * etc,), Hyper-V has a mechanism to enhance the throughput ··· 1429 const int *fw_version, int fw_vercnt, 1430 const int *srv_version, int srv_vercnt, 1431 int *nego_fw_version, int *nego_srv_version); 1432 1433 void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid); 1434