Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'char-misc-4.18-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc

Pull char/misc fixes from Greg KH:
"Here are a few char/misc driver fixes for 4.18-rc5.

The "largest" stuff here is fixes for the UIO changes in 4.18-rc1 that
caused breakages for some people. Thanks to Xiubo Li for fixing them
quickly. Other than that, minor fixes for thunderbolt, vmw_balloon,
nvmem, mei, ibmasm, and mei drivers. There's also a MAINTAINERS update
where Rafael is offering to help out with reviewing driver core
patches.

All of these have been in linux-next with no reported issues"

* tag 'char-misc-4.18-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc:
nvmem: Don't let a NULL cell_id for nvmem_cell_get() crash us
thunderbolt: Notify userspace when boot_acl is changed
uio: fix crash after the device is unregistered
uio: change to use the mutex lock instead of the spin lock
uio: use request_threaded_irq instead
fpga: altera-cvp: Fix an error handling path in 'altera_cvp_probe()'
ibmasm: don't write out of bounds in read handler
MAINTAINERS: Add myself as driver core changes reviewer
mei: discard messages from not connected client during power down.
vmw_balloon: fix inflation with batching

+126 -66
+1
MAINTAINERS
··· 4460 4460 4461 4461 DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS 4462 4462 M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 4463 + R: "Rafael J. Wysocki" <rafael@kernel.org> 4463 4464 T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git 4464 4465 S: Supported 4465 4466 F: Documentation/kobject.txt
+4 -2
drivers/fpga/altera-cvp.c
··· 455 455 456 456 mgr = fpga_mgr_create(&pdev->dev, conf->mgr_name, 457 457 &altera_cvp_ops, conf); 458 - if (!mgr) 459 - return -ENOMEM; 458 + if (!mgr) { 459 + ret = -ENOMEM; 460 + goto err_unmap; 461 + } 460 462 461 463 pci_set_drvdata(pdev, mgr); 462 464
+3 -24
drivers/misc/ibmasm/ibmasmfs.c
··· 507 507 static ssize_t remote_settings_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset) 508 508 { 509 509 void __iomem *address = (void __iomem *)file->private_data; 510 - unsigned char *page; 511 - int retval; 512 510 int len = 0; 513 511 unsigned int value; 514 - 515 - if (*offset < 0) 516 - return -EINVAL; 517 - if (count == 0 || count > 1024) 518 - return 0; 519 - if (*offset != 0) 520 - return 0; 521 - 522 - page = (unsigned char *)__get_free_page(GFP_KERNEL); 523 - if (!page) 524 - return -ENOMEM; 512 + char lbuf[20]; 525 513 526 514 value = readl(address); 527 - len = sprintf(page, "%d\n", value); 515 + len = snprintf(lbuf, sizeof(lbuf), "%d\n", value); 528 516 529 - if (copy_to_user(buf, page, len)) { 530 - retval = -EFAULT; 531 - goto exit; 532 - } 533 - *offset += len; 534 - retval = len; 535 - 536 - exit: 537 - free_page((unsigned long)page); 538 - return retval; 517 + return simple_read_from_buffer(buf, count, offset, lbuf, len); 539 518 } 540 519 541 520 static ssize_t remote_settings_file_write(struct file *file, const char __user *ubuff, size_t count, loff_t *offset)
+4 -1
drivers/misc/mei/interrupt.c
··· 310 310 if (&cl->link == &dev->file_list) { 311 311 /* A message for not connected fixed address clients 312 312 * should be silently discarded 313 + * On power down client may be force cleaned, 314 + * silently discard such messages 313 315 */ 314 - if (hdr_is_fixed(mei_hdr)) { 316 + if (hdr_is_fixed(mei_hdr) || 317 + dev->dev_state == MEI_DEV_POWER_DOWN) { 315 318 mei_irq_discard_msg(dev, mei_hdr); 316 319 ret = 0; 317 320 goto reset_slots;
+2 -2
drivers/misc/vmw_balloon.c
··· 467 467 unsigned int num_pages, bool is_2m_pages, unsigned int *target) 468 468 { 469 469 unsigned long status; 470 - unsigned long pfn = page_to_pfn(b->page); 470 + unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page)); 471 471 472 472 STATS_INC(b->stats.lock[is_2m_pages]); 473 473 ··· 515 515 unsigned int num_pages, bool is_2m_pages, unsigned int *target) 516 516 { 517 517 unsigned long status; 518 - unsigned long pfn = page_to_pfn(b->page); 518 + unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page)); 519 519 520 520 STATS_INC(b->stats.unlock[is_2m_pages]); 521 521
+4
drivers/nvmem/core.c
··· 936 936 return cell; 937 937 } 938 938 939 + /* NULL cell_id only allowed for device tree; invalid otherwise */ 940 + if (!cell_id) 941 + return ERR_PTR(-EINVAL); 942 + 939 943 return nvmem_cell_get_from_list(cell_id); 940 944 } 941 945 EXPORT_SYMBOL_GPL(nvmem_cell_get);
+4
drivers/thunderbolt/domain.c
··· 213 213 goto err_free_acl; 214 214 } 215 215 ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl); 216 + if (!ret) { 217 + /* Notify userspace about the change */ 218 + kobject_uevent(&tb->dev.kobj, KOBJ_CHANGE); 219 + } 216 220 mutex_unlock(&tb->lock); 217 221 218 222 err_free_acl:
+103 -36
drivers/uio/uio.c
··· 215 215 struct device_attribute *attr, char *buf) 216 216 { 217 217 struct uio_device *idev = dev_get_drvdata(dev); 218 - return sprintf(buf, "%s\n", idev->info->name); 218 + int ret; 219 + 220 + mutex_lock(&idev->info_lock); 221 + if (!idev->info) { 222 + ret = -EINVAL; 223 + dev_err(dev, "the device has been unregistered\n"); 224 + goto out; 225 + } 226 + 227 + ret = sprintf(buf, "%s\n", idev->info->name); 228 + 229 + out: 230 + mutex_unlock(&idev->info_lock); 231 + return ret; 219 232 } 220 233 static DEVICE_ATTR_RO(name); 221 234 ··· 236 223 struct device_attribute *attr, char *buf) 237 224 { 238 225 struct uio_device *idev = dev_get_drvdata(dev); 239 - return sprintf(buf, "%s\n", idev->info->version); 226 + int ret; 227 + 228 + mutex_lock(&idev->info_lock); 229 + if (!idev->info) { 230 + ret = -EINVAL; 231 + dev_err(dev, "the device has been unregistered\n"); 232 + goto out; 233 + } 234 + 235 + ret = sprintf(buf, "%s\n", idev->info->version); 236 + 237 + out: 238 + mutex_unlock(&idev->info_lock); 239 + return ret; 240 240 } 241 241 static DEVICE_ATTR_RO(version); 242 242 ··· 441 415 static irqreturn_t uio_interrupt(int irq, void *dev_id) 442 416 { 443 417 struct uio_device *idev = (struct uio_device *)dev_id; 444 - irqreturn_t ret = idev->info->handler(irq, idev->info); 418 + irqreturn_t ret; 445 419 420 + mutex_lock(&idev->info_lock); 421 + 422 + ret = idev->info->handler(irq, idev->info); 446 423 if (ret == IRQ_HANDLED) 447 424 uio_event_notify(idev->info); 448 425 426 + mutex_unlock(&idev->info_lock); 449 427 return ret; 450 428 } 451 429 ··· 463 433 struct uio_device *idev; 464 434 struct uio_listener *listener; 465 435 int ret = 0; 466 - unsigned long flags; 467 436 468 437 mutex_lock(&minor_lock); 469 438 idev = idr_find(&uio_idr, iminor(inode)); ··· 489 460 listener->event_count = atomic_read(&idev->event); 490 461 filep->private_data = listener; 491 462 492 - spin_lock_irqsave(&idev->info_lock, flags); 463 + mutex_lock(&idev->info_lock); 464 + if (!idev->info) { 465 + mutex_unlock(&idev->info_lock); 466 + ret = -EINVAL; 467 + goto err_alloc_listener; 468 + } 469 + 493 470 if (idev->info && idev->info->open) 494 471 ret = idev->info->open(idev->info, inode); 495 - spin_unlock_irqrestore(&idev->info_lock, flags); 472 + mutex_unlock(&idev->info_lock); 496 473 if (ret) 497 474 goto err_infoopen; 498 475 ··· 530 495 int ret = 0; 531 496 struct uio_listener *listener = filep->private_data; 532 497 struct uio_device *idev = listener->dev; 533 - unsigned long flags; 534 498 535 - spin_lock_irqsave(&idev->info_lock, flags); 499 + mutex_lock(&idev->info_lock); 536 500 if (idev->info && idev->info->release) 537 501 ret = idev->info->release(idev->info, inode); 538 - spin_unlock_irqrestore(&idev->info_lock, flags); 502 + mutex_unlock(&idev->info_lock); 539 503 540 504 module_put(idev->owner); 541 505 kfree(listener); ··· 547 513 struct uio_listener *listener = filep->private_data; 548 514 struct uio_device *idev = listener->dev; 549 515 __poll_t ret = 0; 550 - unsigned long flags; 551 516 552 - spin_lock_irqsave(&idev->info_lock, flags); 517 + mutex_lock(&idev->info_lock); 553 518 if (!idev->info || !idev->info->irq) 554 519 ret = -EIO; 555 - spin_unlock_irqrestore(&idev->info_lock, flags); 520 + mutex_unlock(&idev->info_lock); 556 521 557 522 if (ret) 558 523 return ret; ··· 570 537 DECLARE_WAITQUEUE(wait, current); 571 538 ssize_t retval = 0; 572 539 s32 event_count; 573 - unsigned long flags; 574 540 575 - spin_lock_irqsave(&idev->info_lock, flags); 541 + mutex_lock(&idev->info_lock); 576 542 if (!idev->info || !idev->info->irq) 577 543 retval = -EIO; 578 - spin_unlock_irqrestore(&idev->info_lock, flags); 544 + mutex_unlock(&idev->info_lock); 579 545 580 546 if (retval) 581 547 return retval; ··· 624 592 struct uio_device *idev = listener->dev; 625 593 ssize_t retval; 626 594 s32 irq_on; 627 - unsigned long flags; 628 595 629 - spin_lock_irqsave(&idev->info_lock, flags); 596 + mutex_lock(&idev->info_lock); 597 + if (!idev->info) { 598 + retval = -EINVAL; 599 + goto out; 600 + } 601 + 630 602 if (!idev->info || !idev->info->irq) { 631 603 retval = -EIO; 632 604 goto out; ··· 654 618 retval = idev->info->irqcontrol(idev->info, irq_on); 655 619 656 620 out: 657 - spin_unlock_irqrestore(&idev->info_lock, flags); 621 + mutex_unlock(&idev->info_lock); 658 622 return retval ? retval : sizeof(s32); 659 623 } 660 624 ··· 676 640 struct page *page; 677 641 unsigned long offset; 678 642 void *addr; 643 + int ret = 0; 644 + int mi; 679 645 680 - int mi = uio_find_mem_index(vmf->vma); 681 - if (mi < 0) 682 - return VM_FAULT_SIGBUS; 646 + mutex_lock(&idev->info_lock); 647 + if (!idev->info) { 648 + ret = VM_FAULT_SIGBUS; 649 + goto out; 650 + } 651 + 652 + mi = uio_find_mem_index(vmf->vma); 653 + if (mi < 0) { 654 + ret = VM_FAULT_SIGBUS; 655 + goto out; 656 + } 683 657 684 658 /* 685 659 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE ··· 704 658 page = vmalloc_to_page(addr); 705 659 get_page(page); 706 660 vmf->page = page; 707 - return 0; 661 + 662 + out: 663 + mutex_unlock(&idev->info_lock); 664 + 665 + return ret; 708 666 } 709 667 710 668 static const struct vm_operations_struct uio_logical_vm_ops = { ··· 733 683 struct uio_device *idev = vma->vm_private_data; 734 684 int mi = uio_find_mem_index(vma); 735 685 struct uio_mem *mem; 686 + 736 687 if (mi < 0) 737 688 return -EINVAL; 738 689 mem = idev->info->mem + mi; ··· 775 724 776 725 vma->vm_private_data = idev; 777 726 727 + mutex_lock(&idev->info_lock); 728 + if (!idev->info) { 729 + ret = -EINVAL; 730 + goto out; 731 + } 732 + 778 733 mi = uio_find_mem_index(vma); 779 - if (mi < 0) 780 - return -EINVAL; 734 + if (mi < 0) { 735 + ret = -EINVAL; 736 + goto out; 737 + } 781 738 782 739 requested_pages = vma_pages(vma); 783 740 actual_pages = ((idev->info->mem[mi].addr & ~PAGE_MASK) 784 741 + idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT; 785 - if (requested_pages > actual_pages) 786 - return -EINVAL; 742 + if (requested_pages > actual_pages) { 743 + ret = -EINVAL; 744 + goto out; 745 + } 787 746 788 747 if (idev->info->mmap) { 789 748 ret = idev->info->mmap(idev->info, vma); 790 - return ret; 749 + goto out; 791 750 } 792 751 793 752 switch (idev->info->mem[mi].memtype) { 794 753 case UIO_MEM_PHYS: 795 - return uio_mmap_physical(vma); 754 + ret = uio_mmap_physical(vma); 755 + break; 796 756 case UIO_MEM_LOGICAL: 797 757 case UIO_MEM_VIRTUAL: 798 - return uio_mmap_logical(vma); 758 + ret = uio_mmap_logical(vma); 759 + break; 799 760 default: 800 - return -EINVAL; 761 + ret = -EINVAL; 801 762 } 763 + 764 + out: 765 + mutex_unlock(&idev->info_lock); 766 + return 0; 802 767 } 803 768 804 769 static const struct file_operations uio_fops = { ··· 932 865 933 866 idev->owner = owner; 934 867 idev->info = info; 935 - spin_lock_init(&idev->info_lock); 868 + mutex_init(&idev->info_lock); 936 869 init_waitqueue_head(&idev->wait); 937 870 atomic_set(&idev->event, 0); 938 871 ··· 969 902 * FDs at the time of unregister and therefore may not be 970 903 * freed until they are released. 971 904 */ 972 - ret = request_irq(info->irq, uio_interrupt, 973 - info->irq_flags, info->name, idev); 905 + ret = request_threaded_irq(info->irq, NULL, uio_interrupt, 906 + info->irq_flags, info->name, idev); 907 + 974 908 if (ret) 975 909 goto err_request_irq; 976 910 } ··· 996 928 void uio_unregister_device(struct uio_info *info) 997 929 { 998 930 struct uio_device *idev; 999 - unsigned long flags; 1000 931 1001 932 if (!info || !info->uio_dev) 1002 933 return; ··· 1004 937 1005 938 uio_free_minor(idev); 1006 939 940 + mutex_lock(&idev->info_lock); 1007 941 uio_dev_del_attributes(idev); 1008 942 1009 943 if (info->irq && info->irq != UIO_IRQ_CUSTOM) 1010 944 free_irq(info->irq, idev); 1011 945 1012 - spin_lock_irqsave(&idev->info_lock, flags); 1013 946 idev->info = NULL; 1014 - spin_unlock_irqrestore(&idev->info_lock, flags); 947 + mutex_unlock(&idev->info_lock); 1015 948 1016 949 device_unregister(&idev->dev); 1017 950
+1 -1
include/linux/uio_driver.h
··· 75 75 struct fasync_struct *async_queue; 76 76 wait_queue_head_t wait; 77 77 struct uio_info *info; 78 - spinlock_t info_lock; 78 + struct mutex info_lock; 79 79 struct kobject *map_dir; 80 80 struct kobject *portio_dir; 81 81 };