Merge tag 'char-misc-4.20-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc

Pull char/misc driver fixes from Greg KH:
"Here are some small driver fixes for 4.20-rc6.

There is a hyperv fix that for some reaon took forever to get into a
shape that could be applied to the tree properly, but resolves a much
reported issue. The others are some gnss patches, one a bugfix and the
two others updates to the MAINTAINERS file to properly match the gnss
files in the tree.

All have been in linux-next for a while with no reported issues"

* tag 'char-misc-4.20-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc:
MAINTAINERS: exclude gnss from SIRFPRIMA2 regex matching
MAINTAINERS: add gnss scm tree
gnss: sirf: fix activation retry handling
Drivers: hv: vmbus: Offload the handling of channels to two workqueues

Changed files
+166 -69
drivers
include
linux
+2
MAINTAINERS
··· 1472 1472 F: drivers/clocksource/timer-prima2.c 1473 1473 F: drivers/clocksource/timer-atlas7.c 1474 1474 N: [^a-z]sirf 1475 + X: drivers/gnss 1475 1476 1476 1477 ARM/EBSA110 MACHINE SUPPORT 1477 1478 M: Russell King <linux@armlinux.org.uk> ··· 6322 6321 6323 6322 GNSS SUBSYSTEM 6324 6323 M: Johan Hovold <johan@kernel.org> 6324 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/johan/gnss.git 6325 6325 S: Maintained 6326 6326 F: Documentation/ABI/testing/sysfs-class-gnss 6327 6327 F: Documentation/devicetree/bindings/gnss/
+3 -3
drivers/gnss/sirf.c
··· 168 168 else 169 169 timeout = SIRF_HIBERNATE_TIMEOUT; 170 170 171 - while (retries-- > 0) { 171 + do { 172 172 sirf_pulse_on_off(data); 173 173 ret = sirf_wait_for_power_state(data, active, timeout); 174 174 if (ret < 0) { ··· 179 179 } 180 180 181 181 break; 182 - } 182 + } while (retries--); 183 183 184 - if (retries == 0) 184 + if (retries < 0) 185 185 return -ETIMEDOUT; 186 186 187 187 return 0;
+126 -63
drivers/hv/channel_mgmt.c
··· 435 435 } 436 436 } 437 437 438 - /* 439 - * vmbus_process_offer - Process the offer by creating a channel/device 440 - * associated with this offer 441 - */ 442 - static void vmbus_process_offer(struct vmbus_channel *newchannel) 438 + /* Note: the function can run concurrently for primary/sub channels. */ 439 + static void vmbus_add_channel_work(struct work_struct *work) 443 440 { 444 - struct vmbus_channel *channel; 445 - bool fnew = true; 441 + struct vmbus_channel *newchannel = 442 + container_of(work, struct vmbus_channel, add_channel_work); 443 + struct vmbus_channel *primary_channel = newchannel->primary_channel; 446 444 unsigned long flags; 447 445 u16 dev_type; 448 446 int ret; 449 - 450 - /* Make sure this is a new offer */ 451 - mutex_lock(&vmbus_connection.channel_mutex); 452 - 453 - /* 454 - * Now that we have acquired the channel_mutex, 455 - * we can release the potentially racing rescind thread. 456 - */ 457 - atomic_dec(&vmbus_connection.offer_in_progress); 458 - 459 - list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { 460 - if (!uuid_le_cmp(channel->offermsg.offer.if_type, 461 - newchannel->offermsg.offer.if_type) && 462 - !uuid_le_cmp(channel->offermsg.offer.if_instance, 463 - newchannel->offermsg.offer.if_instance)) { 464 - fnew = false; 465 - break; 466 - } 467 - } 468 - 469 - if (fnew) 470 - list_add_tail(&newchannel->listentry, 471 - &vmbus_connection.chn_list); 472 - 473 - mutex_unlock(&vmbus_connection.channel_mutex); 474 - 475 - if (!fnew) { 476 - /* 477 - * Check to see if this is a sub-channel. 478 - */ 479 - if (newchannel->offermsg.offer.sub_channel_index != 0) { 480 - /* 481 - * Process the sub-channel. 482 - */ 483 - newchannel->primary_channel = channel; 484 - spin_lock_irqsave(&channel->lock, flags); 485 - list_add_tail(&newchannel->sc_list, &channel->sc_list); 486 - channel->num_sc++; 487 - spin_unlock_irqrestore(&channel->lock, flags); 488 - } else { 489 - goto err_free_chan; 490 - } 491 - } 492 447 493 448 dev_type = hv_get_dev_type(newchannel); 494 449 ··· 462 507 /* 463 508 * This state is used to indicate a successful open 464 509 * so that when we do close the channel normally, we 465 - * can cleanup properly 510 + * can cleanup properly. 466 511 */ 467 512 newchannel->state = CHANNEL_OPEN_STATE; 468 513 469 - if (!fnew) { 470 - struct hv_device *dev 471 - = newchannel->primary_channel->device_obj; 514 + if (primary_channel != NULL) { 515 + /* newchannel is a sub-channel. */ 516 + struct hv_device *dev = primary_channel->device_obj; 472 517 473 518 if (vmbus_add_channel_kobj(dev, newchannel)) 474 - goto err_free_chan; 519 + goto err_deq_chan; 475 520 476 - if (channel->sc_creation_callback != NULL) 477 - channel->sc_creation_callback(newchannel); 521 + if (primary_channel->sc_creation_callback != NULL) 522 + primary_channel->sc_creation_callback(newchannel); 523 + 478 524 newchannel->probe_done = true; 479 525 return; 480 526 } 481 527 482 528 /* 483 - * Start the process of binding this offer to the driver 484 - * We need to set the DeviceObject field before calling 485 - * vmbus_child_dev_add() 529 + * Start the process of binding the primary channel to the driver 486 530 */ 487 531 newchannel->device_obj = vmbus_device_create( 488 532 &newchannel->offermsg.offer.if_type, ··· 510 556 511 557 err_deq_chan: 512 558 mutex_lock(&vmbus_connection.channel_mutex); 513 - list_del(&newchannel->listentry); 559 + 560 + /* 561 + * We need to set the flag, otherwise 562 + * vmbus_onoffer_rescind() can be blocked. 563 + */ 564 + newchannel->probe_done = true; 565 + 566 + if (primary_channel == NULL) { 567 + list_del(&newchannel->listentry); 568 + } else { 569 + spin_lock_irqsave(&primary_channel->lock, flags); 570 + list_del(&newchannel->sc_list); 571 + spin_unlock_irqrestore(&primary_channel->lock, flags); 572 + } 573 + 514 574 mutex_unlock(&vmbus_connection.channel_mutex); 515 575 516 576 if (newchannel->target_cpu != get_cpu()) { 517 577 put_cpu(); 518 578 smp_call_function_single(newchannel->target_cpu, 519 - percpu_channel_deq, newchannel, true); 579 + percpu_channel_deq, 580 + newchannel, true); 520 581 } else { 521 582 percpu_channel_deq(newchannel); 522 583 put_cpu(); ··· 539 570 540 571 vmbus_release_relid(newchannel->offermsg.child_relid); 541 572 542 - err_free_chan: 543 573 free_channel(newchannel); 574 + } 575 + 576 + /* 577 + * vmbus_process_offer - Process the offer by creating a channel/device 578 + * associated with this offer 579 + */ 580 + static void vmbus_process_offer(struct vmbus_channel *newchannel) 581 + { 582 + struct vmbus_channel *channel; 583 + struct workqueue_struct *wq; 584 + unsigned long flags; 585 + bool fnew = true; 586 + 587 + mutex_lock(&vmbus_connection.channel_mutex); 588 + 589 + /* 590 + * Now that we have acquired the channel_mutex, 591 + * we can release the potentially racing rescind thread. 592 + */ 593 + atomic_dec(&vmbus_connection.offer_in_progress); 594 + 595 + list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { 596 + if (!uuid_le_cmp(channel->offermsg.offer.if_type, 597 + newchannel->offermsg.offer.if_type) && 598 + !uuid_le_cmp(channel->offermsg.offer.if_instance, 599 + newchannel->offermsg.offer.if_instance)) { 600 + fnew = false; 601 + break; 602 + } 603 + } 604 + 605 + if (fnew) 606 + list_add_tail(&newchannel->listentry, 607 + &vmbus_connection.chn_list); 608 + else { 609 + /* 610 + * Check to see if this is a valid sub-channel. 611 + */ 612 + if (newchannel->offermsg.offer.sub_channel_index == 0) { 613 + mutex_unlock(&vmbus_connection.channel_mutex); 614 + /* 615 + * Don't call free_channel(), because newchannel->kobj 616 + * is not initialized yet. 617 + */ 618 + kfree(newchannel); 619 + WARN_ON_ONCE(1); 620 + return; 621 + } 622 + /* 623 + * Process the sub-channel. 624 + */ 625 + newchannel->primary_channel = channel; 626 + spin_lock_irqsave(&channel->lock, flags); 627 + list_add_tail(&newchannel->sc_list, &channel->sc_list); 628 + spin_unlock_irqrestore(&channel->lock, flags); 629 + } 630 + 631 + mutex_unlock(&vmbus_connection.channel_mutex); 632 + 633 + /* 634 + * vmbus_process_offer() mustn't call channel->sc_creation_callback() 635 + * directly for sub-channels, because sc_creation_callback() -> 636 + * vmbus_open() may never get the host's response to the 637 + * OPEN_CHANNEL message (the host may rescind a channel at any time, 638 + * e.g. in the case of hot removing a NIC), and vmbus_onoffer_rescind() 639 + * may not wake up the vmbus_open() as it's blocked due to a non-zero 640 + * vmbus_connection.offer_in_progress, and finally we have a deadlock. 641 + * 642 + * The above is also true for primary channels, if the related device 643 + * drivers use sync probing mode by default. 644 + * 645 + * And, usually the handling of primary channels and sub-channels can 646 + * depend on each other, so we should offload them to different 647 + * workqueues to avoid possible deadlock, e.g. in sync-probing mode, 648 + * NIC1's netvsc_subchan_work() can race with NIC2's netvsc_probe() -> 649 + * rtnl_lock(), and causes deadlock: the former gets the rtnl_lock 650 + * and waits for all the sub-channels to appear, but the latter 651 + * can't get the rtnl_lock and this blocks the handling of 652 + * sub-channels. 653 + */ 654 + INIT_WORK(&newchannel->add_channel_work, vmbus_add_channel_work); 655 + wq = fnew ? vmbus_connection.handle_primary_chan_wq : 656 + vmbus_connection.handle_sub_chan_wq; 657 + queue_work(wq, &newchannel->add_channel_work); 544 658 } 545 659 546 660 /* 547 661 * We use this state to statically distribute the channel interrupt load. 548 662 */ 549 663 static int next_numa_node_id; 664 + /* 665 + * init_vp_index() accesses global variables like next_numa_node_id, and 666 + * it can run concurrently for primary channels and sub-channels: see 667 + * vmbus_process_offer(), so we need the lock to protect the global 668 + * variables. 669 + */ 670 + static DEFINE_SPINLOCK(bind_channel_to_cpu_lock); 550 671 551 672 /* 552 673 * Starting with Win8, we can statically distribute the incoming ··· 671 612 channel->target_vp = hv_cpu_number_to_vp_number(0); 672 613 return; 673 614 } 615 + 616 + spin_lock(&bind_channel_to_cpu_lock); 674 617 675 618 /* 676 619 * Based on the channel affinity policy, we will assign the NUMA ··· 755 694 756 695 channel->target_cpu = cur_cpu; 757 696 channel->target_vp = hv_cpu_number_to_vp_number(cur_cpu); 697 + 698 + spin_unlock(&bind_channel_to_cpu_lock); 758 699 759 700 free_cpumask_var(available_mask); 760 701 }
+21 -3
drivers/hv/connection.c
··· 190 190 goto cleanup; 191 191 } 192 192 193 + vmbus_connection.handle_primary_chan_wq = 194 + create_workqueue("hv_pri_chan"); 195 + if (!vmbus_connection.handle_primary_chan_wq) { 196 + ret = -ENOMEM; 197 + goto cleanup; 198 + } 199 + 200 + vmbus_connection.handle_sub_chan_wq = 201 + create_workqueue("hv_sub_chan"); 202 + if (!vmbus_connection.handle_sub_chan_wq) { 203 + ret = -ENOMEM; 204 + goto cleanup; 205 + } 206 + 193 207 INIT_LIST_HEAD(&vmbus_connection.chn_msg_list); 194 208 spin_lock_init(&vmbus_connection.channelmsg_lock); 195 209 ··· 294 280 */ 295 281 vmbus_initiate_unload(false); 296 282 297 - if (vmbus_connection.work_queue) { 298 - drain_workqueue(vmbus_connection.work_queue); 283 + if (vmbus_connection.handle_sub_chan_wq) 284 + destroy_workqueue(vmbus_connection.handle_sub_chan_wq); 285 + 286 + if (vmbus_connection.handle_primary_chan_wq) 287 + destroy_workqueue(vmbus_connection.handle_primary_chan_wq); 288 + 289 + if (vmbus_connection.work_queue) 299 290 destroy_workqueue(vmbus_connection.work_queue); 300 - } 301 291 302 292 if (vmbus_connection.int_page) { 303 293 free_pages((unsigned long)vmbus_connection.int_page, 0);
+7
drivers/hv/hyperv_vmbus.h
··· 335 335 struct list_head chn_list; 336 336 struct mutex channel_mutex; 337 337 338 + /* 339 + * An offer message is handled first on the work_queue, and then 340 + * is further handled on handle_primary_chan_wq or 341 + * handle_sub_chan_wq. 342 + */ 338 343 struct workqueue_struct *work_queue; 344 + struct workqueue_struct *handle_primary_chan_wq; 345 + struct workqueue_struct *handle_sub_chan_wq; 339 346 }; 340 347 341 348
+7
include/linux/hyperv.h
··· 905 905 906 906 bool probe_done; 907 907 908 + /* 909 + * We must offload the handling of the primary/sub channels 910 + * from the single-threaded vmbus_connection.work_queue to 911 + * two different workqueue, otherwise we can block 912 + * vmbus_connection.work_queue and hang: see vmbus_process_offer(). 913 + */ 914 + struct work_struct add_channel_work; 908 915 }; 909 916 910 917 static inline bool is_hvsock_channel(const struct vmbus_channel *c)