Merge tag 'char-misc-4.11-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc

Pull char/misc driver fixes from Greg KH:
"A smattering of different small fixes for some random driver
subsystems. Nothing all that major, just resolutions for reported
issues and bugs.

All have been in linux-next with no reported issues"

* tag 'char-misc-4.11-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (21 commits)
extcon: int3496: Set the id pin to direction-input if necessary
extcon: int3496: Use gpiod_get instead of gpiod_get_index
extcon: int3496: Add dependency on X86 as it's Intel specific
extcon: int3496: Add GPIO ACPI mapping table
extcon: int3496: Rename GPIO pins in accordance with binding
vmw_vmci: handle the return value from pci_alloc_irq_vectors correctly
ppdev: fix registering same device name
parport: fix attempt to write duplicate procfiles
auxdisplay: img-ascii-lcd: add missing sentinel entry in img_ascii_lcd_matches
Drivers: hv: vmbus: Don't leak memory when a channel is rescinded
Drivers: hv: vmbus: Don't leak channel ids
Drivers: hv: util: don't forget to init host_ts.lock
Drivers: hv: util: move waiting for release to hv_utils_transport itself
vmbus: remove hv_event_tasklet_disable/enable
vmbus: use rcu for per-cpu channel list
mei: don't wait for os version message reply
mei: fix deadlock on mei reset
intel_th: pci: Add Gemini Lake support
intel_th: pci: Add Denverton SOC support
intel_th: Don't leak module refcount on failure to activate
...

+5
Documentation/extcon/intel-int3496.txt
··· 20 20 Index 2: The output gpio for muxing of the data pins between the USB host and 21 21 the USB peripheral controller, write 1 to mux to the peripheral 22 22 controller 23 + 24 + There is a mapping between indices and GPIO connection IDs as follows 25 + id index 0 26 + vbus index 1 27 + mux index 2
+1
drivers/auxdisplay/img-ascii-lcd.c
··· 218 218 { .compatible = "img,boston-lcd", .data = &boston_config }, 219 219 { .compatible = "mti,malta-lcd", .data = &malta_config }, 220 220 { .compatible = "mti,sead3-lcd", .data = &sead3_config }, 221 + { /* sentinel */ } 221 222 }; 222 223 223 224 /**
+9 -2
drivers/char/ppdev.c
··· 84 84 struct ieee1284_info state; 85 85 struct ieee1284_info saved_state; 86 86 long default_inactivity; 87 + int index; 87 88 }; 88 89 89 90 /* should we use PARDEVICE_MAX here? */ 90 91 static struct device *devices[PARPORT_MAX]; 92 + 93 + static DEFINE_IDA(ida_index); 91 94 92 95 /* pp_struct.flags bitfields */ 93 96 #define PP_CLAIMED (1<<0) ··· 293 290 struct pardevice *pdev = NULL; 294 291 char *name; 295 292 struct pardev_cb ppdev_cb; 296 - int rc = 0; 293 + int rc = 0, index; 297 294 298 295 name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor); 299 296 if (name == NULL) ··· 306 303 goto err; 307 304 } 308 305 306 + index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL); 309 307 memset(&ppdev_cb, 0, sizeof(ppdev_cb)); 310 308 ppdev_cb.irq_func = pp_irq; 311 309 ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0; 312 310 ppdev_cb.private = pp; 313 - pdev = parport_register_dev_model(port, name, &ppdev_cb, minor); 311 + pdev = parport_register_dev_model(port, name, &ppdev_cb, index); 314 312 parport_put_port(port); 315 313 316 314 if (!pdev) { 317 315 pr_warn("%s: failed to register device!\n", name); 318 316 rc = -ENXIO; 317 + ida_simple_remove(&ida_index, index); 319 318 goto err; 320 319 } 321 320 322 321 pp->pdev = pdev; 322 + pp->index = index; 323 323 dev_dbg(&pdev->dev, "registered pardevice\n"); 324 324 err: 325 325 kfree(name); ··· 761 755 762 756 if (pp->pdev) { 763 757 parport_unregister_device(pp->pdev); 758 + ida_simple_remove(&ida_index, pp->index); 764 759 pp->pdev = NULL; 765 760 pr_debug(CHRDEV "%x: unregistered pardevice\n", minor); 766 761 }
+1 -1
drivers/extcon/Kconfig
··· 44 44 45 45 config EXTCON_INTEL_INT3496 46 46 tristate "Intel INT3496 ACPI device extcon driver" 47 - depends on GPIOLIB && ACPI 47 + depends on GPIOLIB && ACPI && (X86 || COMPILE_TEST) 48 48 help 49 49 Say Y here to enable extcon support for USB OTG ports controlled by 50 50 an Intel INT3496 ACPI device.
+28 -11
drivers/extcon/extcon-intel-int3496.c
··· 45 45 EXTCON_NONE, 46 46 }; 47 47 48 + static const struct acpi_gpio_params id_gpios = { INT3496_GPIO_USB_ID, 0, false }; 49 + static const struct acpi_gpio_params vbus_gpios = { INT3496_GPIO_VBUS_EN, 0, false }; 50 + static const struct acpi_gpio_params mux_gpios = { INT3496_GPIO_USB_MUX, 0, false }; 51 + 52 + static const struct acpi_gpio_mapping acpi_int3496_default_gpios[] = { 53 + { "id-gpios", &id_gpios, 1 }, 54 + { "vbus-gpios", &vbus_gpios, 1 }, 55 + { "mux-gpios", &mux_gpios, 1 }, 56 + { }, 57 + }; 58 + 48 59 static void int3496_do_usb_id(struct work_struct *work) 49 60 { 50 61 struct int3496_data *data = ··· 94 83 struct int3496_data *data; 95 84 int ret; 96 85 86 + ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(dev), 87 + acpi_int3496_default_gpios); 88 + if (ret) { 89 + dev_err(dev, "can't add GPIO ACPI mapping\n"); 90 + return ret; 91 + } 92 + 97 93 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); 98 94 if (!data) 99 95 return -ENOMEM; ··· 108 90 data->dev = dev; 109 91 INIT_DELAYED_WORK(&data->work, int3496_do_usb_id); 110 92 111 - data->gpio_usb_id = devm_gpiod_get_index(dev, "id", 112 - INT3496_GPIO_USB_ID, 113 - GPIOD_IN); 93 + data->gpio_usb_id = devm_gpiod_get(dev, "id", GPIOD_IN); 114 94 if (IS_ERR(data->gpio_usb_id)) { 115 95 ret = PTR_ERR(data->gpio_usb_id); 116 96 dev_err(dev, "can't request USB ID GPIO: %d\n", ret); 117 97 return ret; 98 + } else if (gpiod_get_direction(data->gpio_usb_id) != GPIOF_DIR_IN) { 99 + dev_warn(dev, FW_BUG "USB ID GPIO not in input mode, fixing\n"); 100 + gpiod_direction_input(data->gpio_usb_id); 118 101 } 119 102 120 103 data->usb_id_irq = gpiod_to_irq(data->gpio_usb_id); 121 - if (data->usb_id_irq <= 0) { 104 + if (data->usb_id_irq < 0) { 122 105 dev_err(dev, "can't get USB ID IRQ: %d\n", data->usb_id_irq); 123 - return -EINVAL; 106 + return data->usb_id_irq; 124 107 } 125 108 126 - data->gpio_vbus_en = devm_gpiod_get_index(dev, "vbus en", 127 - INT3496_GPIO_VBUS_EN, 128 - GPIOD_ASIS); 109 + data->gpio_vbus_en = devm_gpiod_get(dev, "vbus", GPIOD_ASIS); 129 110 if (IS_ERR(data->gpio_vbus_en)) 130 111 dev_info(dev, "can't request VBUS EN GPIO\n"); 131 112 132 - data->gpio_usb_mux = devm_gpiod_get_index(dev, "usb mux", 133 - INT3496_GPIO_USB_MUX, 134 - GPIOD_ASIS); 113 + data->gpio_usb_mux = devm_gpiod_get(dev, "mux", GPIOD_ASIS); 135 114 if (IS_ERR(data->gpio_usb_mux)) 136 115 dev_info(dev, "can't request USB MUX GPIO\n"); 137 116 ··· 168 153 169 154 devm_free_irq(&pdev->dev, data->usb_id_irq, data); 170 155 cancel_delayed_work_sync(&data->work); 156 + 157 + acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pdev->dev)); 171 158 172 159 return 0; 173 160 }
+12 -13
drivers/hv/channel.c
··· 502 502 503 503 wait_for_completion(&info->waitevent); 504 504 505 - if (channel->rescind) { 506 - ret = -ENODEV; 507 - goto post_msg_err; 508 - } 509 - 510 505 post_msg_err: 506 + /* 507 + * If the channel has been rescinded; 508 + * we will be awakened by the rescind 509 + * handler; set the error code to zero so we don't leak memory. 510 + */ 511 + if (channel->rescind) 512 + ret = 0; 513 + 511 514 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 512 515 list_del(&info->msglistentry); 513 516 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); ··· 533 530 int ret; 534 531 535 532 /* 536 - * vmbus_on_event(), running in the tasklet, can race 533 + * vmbus_on_event(), running in the per-channel tasklet, can race 537 534 * with vmbus_close_internal() in the case of SMP guest, e.g., when 538 535 * the former is accessing channel->inbound.ring_buffer, the latter 539 - * could be freeing the ring_buffer pages. 540 - * 541 - * To resolve the race, we can serialize them by disabling the 542 - * tasklet when the latter is running here. 536 + * could be freeing the ring_buffer pages, so here we must stop it 537 + * first. 543 538 */ 544 - hv_event_tasklet_disable(channel); 539 + tasklet_disable(&channel->callback_event); 545 540 546 541 /* 547 542 * In case a device driver's probe() fails (e.g., ··· 606 605 get_order(channel->ringbuffer_pagecount * PAGE_SIZE)); 607 606 608 607 out: 609 - hv_event_tasklet_enable(channel); 610 - 611 608 return ret; 612 609 } 613 610
+5 -22
drivers/hv/channel_mgmt.c
··· 350 350 static void free_channel(struct vmbus_channel *channel) 351 351 { 352 352 tasklet_kill(&channel->callback_event); 353 - kfree(channel); 353 + 354 + kfree_rcu(channel, rcu); 354 355 } 355 356 356 357 static void percpu_channel_enq(void *arg) ··· 360 359 struct hv_per_cpu_context *hv_cpu 361 360 = this_cpu_ptr(hv_context.cpu_context); 362 361 363 - list_add_tail(&channel->percpu_list, &hv_cpu->chan_list); 362 + list_add_tail_rcu(&channel->percpu_list, &hv_cpu->chan_list); 364 363 } 365 364 366 365 static void percpu_channel_deq(void *arg) 367 366 { 368 367 struct vmbus_channel *channel = arg; 369 368 370 - list_del(&channel->percpu_list); 369 + list_del_rcu(&channel->percpu_list); 371 370 } 372 371 373 372 ··· 382 381 true); 383 382 } 384 383 385 - void hv_event_tasklet_disable(struct vmbus_channel *channel) 386 - { 387 - tasklet_disable(&channel->callback_event); 388 - } 389 - 390 - void hv_event_tasklet_enable(struct vmbus_channel *channel) 391 - { 392 - tasklet_enable(&channel->callback_event); 393 - 394 - /* In case there is any pending event */ 395 - tasklet_schedule(&channel->callback_event); 396 - } 397 - 398 384 void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) 399 385 { 400 386 unsigned long flags; ··· 390 402 BUG_ON(!channel->rescind); 391 403 BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex)); 392 404 393 - hv_event_tasklet_disable(channel); 394 405 if (channel->target_cpu != get_cpu()) { 395 406 put_cpu(); 396 407 smp_call_function_single(channel->target_cpu, ··· 398 411 percpu_channel_deq(channel); 399 412 put_cpu(); 400 413 } 401 - hv_event_tasklet_enable(channel); 402 414 403 415 if (channel->primary_channel == NULL) { 404 416 list_del(&channel->listentry); ··· 491 505 492 506 init_vp_index(newchannel, dev_type); 493 507 494 - hv_event_tasklet_disable(newchannel); 495 508 if (newchannel->target_cpu != get_cpu()) { 496 509 put_cpu(); 497 510 smp_call_function_single(newchannel->target_cpu, ··· 500 515 percpu_channel_enq(newchannel); 501 516 put_cpu(); 502 517 } 503 - hv_event_tasklet_enable(newchannel); 504 518 505 519 /* 506 520 * This state is used to indicate a successful open ··· 549 565 list_del(&newchannel->listentry); 550 566 mutex_unlock(&vmbus_connection.channel_mutex); 551 567 552 - hv_event_tasklet_disable(newchannel); 553 568 if (newchannel->target_cpu != get_cpu()) { 554 569 put_cpu(); 555 570 smp_call_function_single(newchannel->target_cpu, ··· 557 574 percpu_channel_deq(newchannel); 558 575 put_cpu(); 559 576 } 560 - hv_event_tasklet_enable(newchannel); 561 577 562 578 vmbus_release_relid(newchannel->offermsg.child_relid); 563 579 ··· 796 814 /* Allocate the channel object and save this offer. */ 797 815 newchannel = alloc_channel(); 798 816 if (!newchannel) { 817 + vmbus_release_relid(offer->child_relid); 799 818 pr_err("Unable to allocate channel object\n"); 800 819 return; 801 820 }
-4
drivers/hv/hv_fcopy.c
··· 71 71 static const char fcopy_devname[] = "vmbus/hv_fcopy"; 72 72 static u8 *recv_buffer; 73 73 static struct hvutil_transport *hvt; 74 - static struct completion release_event; 75 74 /* 76 75 * This state maintains the version number registered by the daemon. 77 76 */ ··· 330 331 331 332 if (cancel_delayed_work_sync(&fcopy_timeout_work)) 332 333 fcopy_respond_to_host(HV_E_FAIL); 333 - complete(&release_event); 334 334 } 335 335 336 336 int hv_fcopy_init(struct hv_util_service *srv) ··· 337 339 recv_buffer = srv->recv_buffer; 338 340 fcopy_transaction.recv_channel = srv->channel; 339 341 340 - init_completion(&release_event); 341 342 /* 342 343 * When this driver loads, the user level daemon that 343 344 * processes the host requests may not yet be running. ··· 358 361 fcopy_transaction.state = HVUTIL_DEVICE_DYING; 359 362 cancel_delayed_work_sync(&fcopy_timeout_work); 360 363 hvutil_transport_destroy(hvt); 361 - wait_for_completion(&release_event); 362 364 }
-4
drivers/hv/hv_kvp.c
··· 101 101 static const char kvp_devname[] = "vmbus/hv_kvp"; 102 102 static u8 *recv_buffer; 103 103 static struct hvutil_transport *hvt; 104 - static struct completion release_event; 105 104 /* 106 105 * Register the kernel component with the user-level daemon. 107 106 * As part of this registration, pass the LIC version number. ··· 713 714 if (cancel_delayed_work_sync(&kvp_timeout_work)) 714 715 kvp_respond_to_host(NULL, HV_E_FAIL); 715 716 kvp_transaction.state = HVUTIL_DEVICE_INIT; 716 - complete(&release_event); 717 717 } 718 718 719 719 int ··· 721 723 recv_buffer = srv->recv_buffer; 722 724 kvp_transaction.recv_channel = srv->channel; 723 725 724 - init_completion(&release_event); 725 726 /* 726 727 * When this driver loads, the user level daemon that 727 728 * processes the host requests may not yet be running. ··· 744 747 cancel_delayed_work_sync(&kvp_timeout_work); 745 748 cancel_work_sync(&kvp_sendkey_work); 746 749 hvutil_transport_destroy(hvt); 747 - wait_for_completion(&release_event); 748 750 }
-4
drivers/hv/hv_snapshot.c
··· 79 79 static const char vss_devname[] = "vmbus/hv_vss"; 80 80 static __u8 *recv_buffer; 81 81 static struct hvutil_transport *hvt; 82 - static struct completion release_event; 83 82 84 83 static void vss_timeout_func(struct work_struct *dummy); 85 84 static void vss_handle_request(struct work_struct *dummy); ··· 360 361 if (cancel_delayed_work_sync(&vss_timeout_work)) 361 362 vss_respond_to_host(HV_E_FAIL); 362 363 vss_transaction.state = HVUTIL_DEVICE_INIT; 363 - complete(&release_event); 364 364 } 365 365 366 366 int 367 367 hv_vss_init(struct hv_util_service *srv) 368 368 { 369 - init_completion(&release_event); 370 369 if (vmbus_proto_version < VERSION_WIN8_1) { 371 370 pr_warn("Integration service 'Backup (volume snapshot)'" 372 371 " not supported on this host version.\n"); ··· 397 400 cancel_delayed_work_sync(&vss_timeout_work); 398 401 cancel_work_sync(&vss_handle_request_work); 399 402 hvutil_transport_destroy(hvt); 400 - wait_for_completion(&release_event); 401 403 }
+2
drivers/hv/hv_util.c
··· 590 590 if (!hyperv_cs) 591 591 return -ENODEV; 592 592 593 + spin_lock_init(&host_ts.lock); 594 + 593 595 INIT_WORK(&wrk.work, hv_set_host_time); 594 596 595 597 /*
+8 -4
drivers/hv/hv_utils_transport.c
··· 182 182 * connects back. 183 183 */ 184 184 hvt_reset(hvt); 185 - mutex_unlock(&hvt->lock); 186 185 187 186 if (mode_old == HVUTIL_TRANSPORT_DESTROY) 188 - hvt_transport_free(hvt); 187 + complete(&hvt->release); 188 + 189 + mutex_unlock(&hvt->lock); 189 190 190 191 return 0; 191 192 } ··· 305 304 306 305 init_waitqueue_head(&hvt->outmsg_q); 307 306 mutex_init(&hvt->lock); 307 + init_completion(&hvt->release); 308 308 309 309 spin_lock(&hvt_list_lock); 310 310 list_add(&hvt->list, &hvt_list); ··· 353 351 if (hvt->cn_id.idx > 0 && hvt->cn_id.val > 0) 354 352 cn_del_callback(&hvt->cn_id); 355 353 356 - if (mode_old != HVUTIL_TRANSPORT_CHARDEV) 357 - hvt_transport_free(hvt); 354 + if (mode_old == HVUTIL_TRANSPORT_CHARDEV) 355 + wait_for_completion(&hvt->release); 356 + 357 + hvt_transport_free(hvt); 358 358 }
+1
drivers/hv/hv_utils_transport.h
··· 41 41 int outmsg_len; /* its length */ 42 42 wait_queue_head_t outmsg_q; /* poll/read wait queue */ 43 43 struct mutex lock; /* protects struct members */ 44 + struct completion release; /* synchronize with fd release */ 44 45 }; 45 46 46 47 struct hvutil_transport *hvutil_transport_init(const char *name,
+5 -1
drivers/hv/vmbus_drv.c
··· 939 939 if (relid == 0) 940 940 continue; 941 941 942 + rcu_read_lock(); 943 + 942 944 /* Find channel based on relid */ 943 - list_for_each_entry(channel, &hv_cpu->chan_list, percpu_list) { 945 + list_for_each_entry_rcu(channel, &hv_cpu->chan_list, percpu_list) { 944 946 if (channel->offermsg.child_relid != relid) 945 947 continue; 946 948 ··· 958 956 tasklet_schedule(&channel->callback_event); 959 957 } 960 958 } 959 + 960 + rcu_read_unlock(); 961 961 } 962 962 } 963 963
+3 -1
drivers/hwtracing/intel_th/core.c
··· 221 221 else 222 222 intel_th_trace_enable(thdev); 223 223 224 - if (ret) 224 + if (ret) { 225 225 pm_runtime_put(&thdev->dev); 226 + module_put(thdrv->driver.owner); 227 + } 226 228 227 229 return ret; 228 230 }
+10
drivers/hwtracing/intel_th/pci.c
··· 85 85 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6), 86 86 .driver_data = (kernel_ulong_t)0, 87 87 }, 88 + { 89 + /* Denverton */ 90 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x19e1), 91 + .driver_data = (kernel_ulong_t)0, 92 + }, 93 + { 94 + /* Gemini Lake */ 95 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e), 96 + .driver_data = (kernel_ulong_t)0, 97 + }, 88 98 { 0 }, 89 99 }; 90 100
+2 -12
drivers/misc/mei/bus-fixup.c
··· 112 112 113 113 static int mei_osver(struct mei_cl_device *cldev) 114 114 { 115 - int ret; 116 115 const size_t size = sizeof(struct mkhi_msg_hdr) + 117 116 sizeof(struct mkhi_fwcaps) + 118 117 sizeof(struct mei_os_ver); 119 - size_t length = 8; 120 118 char buf[size]; 121 119 struct mkhi_msg *req; 122 120 struct mkhi_fwcaps *fwcaps; ··· 135 137 os_ver = (struct mei_os_ver *)fwcaps->data; 136 138 os_ver->os_type = OSTYPE_LINUX; 137 139 138 - ret = __mei_cl_send(cldev->cl, buf, size, mode); 139 - if (ret < 0) 140 - return ret; 141 - 142 - ret = __mei_cl_recv(cldev->cl, buf, length, 0); 143 - if (ret < 0) 144 - return ret; 145 - 146 - return 0; 140 + return __mei_cl_send(cldev->cl, buf, size, mode); 147 141 } 148 142 149 143 static void mei_mkhi_fix(struct mei_cl_device *cldev) ··· 150 160 return; 151 161 152 162 ret = mei_osver(cldev); 153 - if (ret) 163 + if (ret < 0) 154 164 dev_err(&cldev->dev, "OS version command failed %d\n", ret); 155 165 156 166 mei_cldev_disable(cldev);
+6 -2
drivers/misc/mei/init.c
··· 124 124 125 125 mei_clear_interrupts(dev); 126 126 127 - mei_synchronize_irq(dev); 128 - 129 127 /* we're already in reset, cancel the init timer 130 128 * if the reset was called due the hbm protocol error 131 129 * we need to call it before hw start ··· 302 304 container_of(work, struct mei_device, reset_work); 303 305 int ret; 304 306 307 + mei_clear_interrupts(dev); 308 + mei_synchronize_irq(dev); 309 + 305 310 mutex_lock(&dev->device_lock); 306 311 307 312 ret = mei_reset(dev); ··· 328 327 mei_cl_bus_remove_devices(dev); 329 328 330 329 mei_cancel_work(dev); 330 + 331 + mei_clear_interrupts(dev); 332 + mei_synchronize_irq(dev); 331 333 332 334 mutex_lock(&dev->device_lock); 333 335
+2 -2
drivers/misc/vmw_vmci/vmci_guest.c
··· 566 566 */ 567 567 error = pci_alloc_irq_vectors(pdev, VMCI_MAX_INTRS, VMCI_MAX_INTRS, 568 568 PCI_IRQ_MSIX); 569 - if (error) { 569 + if (error < 0) { 570 570 error = pci_alloc_irq_vectors(pdev, 1, 1, 571 571 PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY); 572 - if (error) 572 + if (error < 0) 573 573 goto err_remove_bitmap; 574 574 } else { 575 575 vmci_dev->exclusive_vectors = true;
+4 -2
drivers/parport/share.c
··· 939 939 * pardevice fields. -arca 940 940 */ 941 941 port->ops->init_state(par_dev, par_dev->state); 942 - port->proc_device = par_dev; 943 - parport_device_proc_register(par_dev); 942 + if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) { 943 + port->proc_device = par_dev; 944 + parport_device_proc_register(par_dev); 945 + } 944 946 945 947 return par_dev; 946 948
+7 -3
include/linux/hyperv.h
··· 845 845 * link up channels based on their CPU affinity. 846 846 */ 847 847 struct list_head percpu_list; 848 + 849 + /* 850 + * Defer freeing channel until after all cpu's have 851 + * gone through grace period. 852 + */ 853 + struct rcu_head rcu; 854 + 848 855 /* 849 856 * For performance critical channels (storage, networking 850 857 * etc,), Hyper-V has a mechanism to enhance the throughput ··· 1436 1429 const int *fw_version, int fw_vercnt, 1437 1430 const int *srv_version, int srv_vercnt, 1438 1431 int *nego_fw_version, int *nego_srv_version); 1439 - 1440 - void hv_event_tasklet_disable(struct vmbus_channel *channel); 1441 - void hv_event_tasklet_enable(struct vmbus_channel *channel); 1442 1432 1443 1433 void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid); 1444 1434