Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge back earlier ACPI PM material for v4.3.

+2884 -869
+1 -1
drivers/acpi/Makefile
··· 24 24 # Power management related files 25 25 acpi-y += wakeup.o 26 26 acpi-$(CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT) += sleep.o 27 - acpi-y += device_pm.o 27 + acpi-y += device_sysfs.o device_pm.o 28 28 acpi-$(CONFIG_ACPI_SLEEP) += proc.o 29 29 30 30
+410 -1
drivers/acpi/bus.c
··· 423 423 acpi_evaluate_ost(handle, type, ost_code, NULL); 424 424 } 425 425 426 + static void acpi_device_notify(acpi_handle handle, u32 event, void *data) 427 + { 428 + struct acpi_device *device = data; 429 + 430 + device->driver->ops.notify(device, event); 431 + } 432 + 433 + static void acpi_device_notify_fixed(void *data) 434 + { 435 + struct acpi_device *device = data; 436 + 437 + /* Fixed hardware devices have no handles */ 438 + acpi_device_notify(NULL, ACPI_FIXED_HARDWARE_EVENT, device); 439 + } 440 + 441 + static u32 acpi_device_fixed_event(void *data) 442 + { 443 + acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_device_notify_fixed, data); 444 + return ACPI_INTERRUPT_HANDLED; 445 + } 446 + 447 + static int acpi_device_install_notify_handler(struct acpi_device *device) 448 + { 449 + acpi_status status; 450 + 451 + if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) 452 + status = 453 + acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON, 454 + acpi_device_fixed_event, 455 + device); 456 + else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) 457 + status = 458 + acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON, 459 + acpi_device_fixed_event, 460 + device); 461 + else 462 + status = acpi_install_notify_handler(device->handle, 463 + ACPI_DEVICE_NOTIFY, 464 + acpi_device_notify, 465 + device); 466 + 467 + if (ACPI_FAILURE(status)) 468 + return -EINVAL; 469 + return 0; 470 + } 471 + 472 + static void acpi_device_remove_notify_handler(struct acpi_device *device) 473 + { 474 + if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) 475 + acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON, 476 + acpi_device_fixed_event); 477 + else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) 478 + acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON, 479 + acpi_device_fixed_event); 480 + else 481 + acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, 482 + acpi_device_notify); 483 + } 484 + 485 + /* -------------------------------------------------------------------------- 486 + Device Matching 487 + -------------------------------------------------------------------------- */ 488 + 489 + /** 490 + * acpi_device_is_first_physical_node - Is given dev first physical node 491 + * @adev: ACPI companion device 492 + * @dev: Physical device to check 493 + * 494 + * Function checks if given @dev is the first physical devices attached to 495 + * the ACPI companion device. This distinction is needed in some cases 496 + * where the same companion device is shared between many physical devices. 497 + * 498 + * Note that the caller have to provide valid @adev pointer. 499 + */ 500 + bool acpi_device_is_first_physical_node(struct acpi_device *adev, 501 + const struct device *dev) 502 + { 503 + bool ret = false; 504 + 505 + mutex_lock(&adev->physical_node_lock); 506 + if (!list_empty(&adev->physical_node_list)) { 507 + const struct acpi_device_physical_node *node; 508 + 509 + node = list_first_entry(&adev->physical_node_list, 510 + struct acpi_device_physical_node, node); 511 + ret = node->dev == dev; 512 + } 513 + mutex_unlock(&adev->physical_node_lock); 514 + 515 + return ret; 516 + } 517 + 518 + /* 519 + * acpi_companion_match() - Can we match via ACPI companion device 520 + * @dev: Device in question 521 + * 522 + * Check if the given device has an ACPI companion and if that companion has 523 + * a valid list of PNP IDs, and if the device is the first (primary) physical 524 + * device associated with it. Return the companion pointer if that's the case 525 + * or NULL otherwise. 526 + * 527 + * If multiple physical devices are attached to a single ACPI companion, we need 528 + * to be careful. The usage scenario for this kind of relationship is that all 529 + * of the physical devices in question use resources provided by the ACPI 530 + * companion. A typical case is an MFD device where all the sub-devices share 531 + * the parent's ACPI companion. In such cases we can only allow the primary 532 + * (first) physical device to be matched with the help of the companion's PNP 533 + * IDs. 534 + * 535 + * Additional physical devices sharing the ACPI companion can still use 536 + * resources available from it but they will be matched normally using functions 537 + * provided by their bus types (and analogously for their modalias). 538 + */ 539 + struct acpi_device *acpi_companion_match(const struct device *dev) 540 + { 541 + struct acpi_device *adev; 542 + struct mutex *physical_node_lock; 543 + 544 + adev = ACPI_COMPANION(dev); 545 + if (!adev) 546 + return NULL; 547 + 548 + if (list_empty(&adev->pnp.ids)) 549 + return NULL; 550 + 551 + physical_node_lock = &adev->physical_node_lock; 552 + mutex_lock(physical_node_lock); 553 + if (list_empty(&adev->physical_node_list)) { 554 + adev = NULL; 555 + } else { 556 + const struct acpi_device_physical_node *node; 557 + 558 + node = list_first_entry(&adev->physical_node_list, 559 + struct acpi_device_physical_node, node); 560 + if (node->dev != dev) 561 + adev = NULL; 562 + } 563 + mutex_unlock(physical_node_lock); 564 + 565 + return adev; 566 + } 567 + 568 + /** 569 + * acpi_of_match_device - Match device object using the "compatible" property. 570 + * @adev: ACPI device object to match. 571 + * @of_match_table: List of device IDs to match against. 572 + * 573 + * If @dev has an ACPI companion which has ACPI_DT_NAMESPACE_HID in its list of 574 + * identifiers and a _DSD object with the "compatible" property, use that 575 + * property to match against the given list of identifiers. 576 + */ 577 + static bool acpi_of_match_device(struct acpi_device *adev, 578 + const struct of_device_id *of_match_table) 579 + { 580 + const union acpi_object *of_compatible, *obj; 581 + int i, nval; 582 + 583 + if (!adev) 584 + return false; 585 + 586 + of_compatible = adev->data.of_compatible; 587 + if (!of_match_table || !of_compatible) 588 + return false; 589 + 590 + if (of_compatible->type == ACPI_TYPE_PACKAGE) { 591 + nval = of_compatible->package.count; 592 + obj = of_compatible->package.elements; 593 + } else { /* Must be ACPI_TYPE_STRING. */ 594 + nval = 1; 595 + obj = of_compatible; 596 + } 597 + /* Now we can look for the driver DT compatible strings */ 598 + for (i = 0; i < nval; i++, obj++) { 599 + const struct of_device_id *id; 600 + 601 + for (id = of_match_table; id->compatible[0]; id++) 602 + if (!strcasecmp(obj->string.pointer, id->compatible)) 603 + return true; 604 + } 605 + 606 + return false; 607 + } 608 + 609 + static bool __acpi_match_device_cls(const struct acpi_device_id *id, 610 + struct acpi_hardware_id *hwid) 611 + { 612 + int i, msk, byte_shift; 613 + char buf[3]; 614 + 615 + if (!id->cls) 616 + return false; 617 + 618 + /* Apply class-code bitmask, before checking each class-code byte */ 619 + for (i = 1; i <= 3; i++) { 620 + byte_shift = 8 * (3 - i); 621 + msk = (id->cls_msk >> byte_shift) & 0xFF; 622 + if (!msk) 623 + continue; 624 + 625 + sprintf(buf, "%02x", (id->cls >> byte_shift) & msk); 626 + if (strncmp(buf, &hwid->id[(i - 1) * 2], 2)) 627 + return false; 628 + } 629 + return true; 630 + } 631 + 632 + static const struct acpi_device_id *__acpi_match_device( 633 + struct acpi_device *device, 634 + const struct acpi_device_id *ids, 635 + const struct of_device_id *of_ids) 636 + { 637 + const struct acpi_device_id *id; 638 + struct acpi_hardware_id *hwid; 639 + 640 + /* 641 + * If the device is not present, it is unnecessary to load device 642 + * driver for it. 643 + */ 644 + if (!device || !device->status.present) 645 + return NULL; 646 + 647 + list_for_each_entry(hwid, &device->pnp.ids, list) { 648 + /* First, check the ACPI/PNP IDs provided by the caller. */ 649 + for (id = ids; id->id[0] || id->cls; id++) { 650 + if (id->id[0] && !strcmp((char *) id->id, hwid->id)) 651 + return id; 652 + else if (id->cls && __acpi_match_device_cls(id, hwid)) 653 + return id; 654 + } 655 + 656 + /* 657 + * Next, check ACPI_DT_NAMESPACE_HID and try to match the 658 + * "compatible" property if found. 659 + * 660 + * The id returned by the below is not valid, but the only 661 + * caller passing non-NULL of_ids here is only interested in 662 + * whether or not the return value is NULL. 663 + */ 664 + if (!strcmp(ACPI_DT_NAMESPACE_HID, hwid->id) 665 + && acpi_of_match_device(device, of_ids)) 666 + return id; 667 + } 668 + return NULL; 669 + } 670 + 671 + /** 672 + * acpi_match_device - Match a struct device against a given list of ACPI IDs 673 + * @ids: Array of struct acpi_device_id object to match against. 674 + * @dev: The device structure to match. 675 + * 676 + * Check if @dev has a valid ACPI handle and if there is a struct acpi_device 677 + * object for that handle and use that object to match against a given list of 678 + * device IDs. 679 + * 680 + * Return a pointer to the first matching ID on success or %NULL on failure. 681 + */ 682 + const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, 683 + const struct device *dev) 684 + { 685 + return __acpi_match_device(acpi_companion_match(dev), ids, NULL); 686 + } 687 + EXPORT_SYMBOL_GPL(acpi_match_device); 688 + 689 + int acpi_match_device_ids(struct acpi_device *device, 690 + const struct acpi_device_id *ids) 691 + { 692 + return __acpi_match_device(device, ids, NULL) ? 0 : -ENOENT; 693 + } 694 + EXPORT_SYMBOL(acpi_match_device_ids); 695 + 696 + bool acpi_driver_match_device(struct device *dev, 697 + const struct device_driver *drv) 698 + { 699 + if (!drv->acpi_match_table) 700 + return acpi_of_match_device(ACPI_COMPANION(dev), 701 + drv->of_match_table); 702 + 703 + return !!__acpi_match_device(acpi_companion_match(dev), 704 + drv->acpi_match_table, drv->of_match_table); 705 + } 706 + EXPORT_SYMBOL_GPL(acpi_driver_match_device); 707 + 708 + /* -------------------------------------------------------------------------- 709 + ACPI Driver Management 710 + -------------------------------------------------------------------------- */ 711 + 712 + /** 713 + * acpi_bus_register_driver - register a driver with the ACPI bus 714 + * @driver: driver being registered 715 + * 716 + * Registers a driver with the ACPI bus. Searches the namespace for all 717 + * devices that match the driver's criteria and binds. Returns zero for 718 + * success or a negative error status for failure. 719 + */ 720 + int acpi_bus_register_driver(struct acpi_driver *driver) 721 + { 722 + int ret; 723 + 724 + if (acpi_disabled) 725 + return -ENODEV; 726 + driver->drv.name = driver->name; 727 + driver->drv.bus = &acpi_bus_type; 728 + driver->drv.owner = driver->owner; 729 + 730 + ret = driver_register(&driver->drv); 731 + return ret; 732 + } 733 + 734 + EXPORT_SYMBOL(acpi_bus_register_driver); 735 + 736 + /** 737 + * acpi_bus_unregister_driver - unregisters a driver with the ACPI bus 738 + * @driver: driver to unregister 739 + * 740 + * Unregisters a driver with the ACPI bus. Searches the namespace for all 741 + * devices that match the driver's criteria and unbinds. 742 + */ 743 + void acpi_bus_unregister_driver(struct acpi_driver *driver) 744 + { 745 + driver_unregister(&driver->drv); 746 + } 747 + 748 + EXPORT_SYMBOL(acpi_bus_unregister_driver); 749 + 750 + /* -------------------------------------------------------------------------- 751 + ACPI Bus operations 752 + -------------------------------------------------------------------------- */ 753 + 754 + static int acpi_bus_match(struct device *dev, struct device_driver *drv) 755 + { 756 + struct acpi_device *acpi_dev = to_acpi_device(dev); 757 + struct acpi_driver *acpi_drv = to_acpi_driver(drv); 758 + 759 + return acpi_dev->flags.match_driver 760 + && !acpi_match_device_ids(acpi_dev, acpi_drv->ids); 761 + } 762 + 763 + static int acpi_device_uevent(struct device *dev, struct kobj_uevent_env *env) 764 + { 765 + return __acpi_device_uevent_modalias(to_acpi_device(dev), env); 766 + } 767 + 768 + static int acpi_device_probe(struct device *dev) 769 + { 770 + struct acpi_device *acpi_dev = to_acpi_device(dev); 771 + struct acpi_driver *acpi_drv = to_acpi_driver(dev->driver); 772 + int ret; 773 + 774 + if (acpi_dev->handler && !acpi_is_pnp_device(acpi_dev)) 775 + return -EINVAL; 776 + 777 + if (!acpi_drv->ops.add) 778 + return -ENOSYS; 779 + 780 + ret = acpi_drv->ops.add(acpi_dev); 781 + if (ret) 782 + return ret; 783 + 784 + acpi_dev->driver = acpi_drv; 785 + ACPI_DEBUG_PRINT((ACPI_DB_INFO, 786 + "Driver [%s] successfully bound to device [%s]\n", 787 + acpi_drv->name, acpi_dev->pnp.bus_id)); 788 + 789 + if (acpi_drv->ops.notify) { 790 + ret = acpi_device_install_notify_handler(acpi_dev); 791 + if (ret) { 792 + if (acpi_drv->ops.remove) 793 + acpi_drv->ops.remove(acpi_dev); 794 + 795 + acpi_dev->driver = NULL; 796 + acpi_dev->driver_data = NULL; 797 + return ret; 798 + } 799 + } 800 + 801 + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found driver [%s] for device [%s]\n", 802 + acpi_drv->name, acpi_dev->pnp.bus_id)); 803 + get_device(dev); 804 + return 0; 805 + } 806 + 807 + static int acpi_device_remove(struct device * dev) 808 + { 809 + struct acpi_device *acpi_dev = to_acpi_device(dev); 810 + struct acpi_driver *acpi_drv = acpi_dev->driver; 811 + 812 + if (acpi_drv) { 813 + if (acpi_drv->ops.notify) 814 + acpi_device_remove_notify_handler(acpi_dev); 815 + if (acpi_drv->ops.remove) 816 + acpi_drv->ops.remove(acpi_dev); 817 + } 818 + acpi_dev->driver = NULL; 819 + acpi_dev->driver_data = NULL; 820 + 821 + put_device(dev); 822 + return 0; 823 + } 824 + 825 + struct bus_type acpi_bus_type = { 826 + .name = "acpi", 827 + .match = acpi_bus_match, 828 + .probe = acpi_device_probe, 829 + .remove = acpi_device_remove, 830 + .uevent = acpi_device_uevent, 831 + }; 832 + 426 833 /* -------------------------------------------------------------------------- 427 834 Initialization/Cleanup 428 835 -------------------------------------------------------------------------- */ ··· 1068 661 */ 1069 662 acpi_root_dir = proc_mkdir(ACPI_BUS_FILE_ROOT, NULL); 1070 663 1071 - return 0; 664 + result = bus_register(&acpi_bus_type); 665 + if (!result) 666 + return 0; 1072 667 1073 668 /* Mimic structured exception handling */ 1074 669 error1:
+8
drivers/acpi/device_pm.c
··· 1123 1123 if (dev->pm_domain) 1124 1124 return -EEXIST; 1125 1125 1126 + /* 1127 + * Only attach the power domain to the first device if the 1128 + * companion is shared by multiple. This is to prevent doing power 1129 + * management twice. 1130 + */ 1131 + if (!acpi_device_is_first_physical_node(adev, dev)) 1132 + return -EBUSY; 1133 + 1126 1134 acpi_add_pm_notifier(adev, dev, acpi_pm_notify_work_func); 1127 1135 dev->pm_domain = &acpi_general_pm_domain; 1128 1136 if (power_on) {
+521
drivers/acpi/device_sysfs.c
··· 1 + /* 2 + * drivers/acpi/device_sysfs.c - ACPI device sysfs attributes and modalias. 3 + * 4 + * Copyright (C) 2015, Intel Corp. 5 + * Author: Mika Westerberg <mika.westerberg@linux.intel.com> 6 + * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> 7 + * 8 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License version 2 as published 12 + * by the Free Software Foundation. 13 + * 14 + * This program is distributed in the hope that it will be useful, but 15 + * WITHOUT ANY WARRANTY; without even the implied warranty of 16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 + * General Public License for more details. 18 + * 19 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 20 + */ 21 + 22 + #include <linux/acpi.h> 23 + #include <linux/device.h> 24 + #include <linux/export.h> 25 + #include <linux/nls.h> 26 + 27 + #include "internal.h" 28 + 29 + /** 30 + * create_pnp_modalias - Create hid/cid(s) string for modalias and uevent 31 + * @acpi_dev: ACPI device object. 32 + * @modalias: Buffer to print into. 33 + * @size: Size of the buffer. 34 + * 35 + * Creates hid/cid(s) string needed for modalias and uevent 36 + * e.g. on a device with hid:IBM0001 and cid:ACPI0001 you get: 37 + * char *modalias: "acpi:IBM0001:ACPI0001" 38 + * Return: 0: no _HID and no _CID 39 + * -EINVAL: output error 40 + * -ENOMEM: output is truncated 41 + */ 42 + static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias, 43 + int size) 44 + { 45 + int len; 46 + int count; 47 + struct acpi_hardware_id *id; 48 + 49 + /* 50 + * Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should 51 + * be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the 52 + * device's list. 53 + */ 54 + count = 0; 55 + list_for_each_entry(id, &acpi_dev->pnp.ids, list) 56 + if (strcmp(id->id, ACPI_DT_NAMESPACE_HID)) 57 + count++; 58 + 59 + if (!count) 60 + return 0; 61 + 62 + len = snprintf(modalias, size, "acpi:"); 63 + if (len <= 0) 64 + return len; 65 + 66 + size -= len; 67 + 68 + list_for_each_entry(id, &acpi_dev->pnp.ids, list) { 69 + if (!strcmp(id->id, ACPI_DT_NAMESPACE_HID)) 70 + continue; 71 + 72 + count = snprintf(&modalias[len], size, "%s:", id->id); 73 + if (count < 0) 74 + return -EINVAL; 75 + 76 + if (count >= size) 77 + return -ENOMEM; 78 + 79 + len += count; 80 + size -= count; 81 + } 82 + modalias[len] = '\0'; 83 + return len; 84 + } 85 + 86 + /** 87 + * create_of_modalias - Creates DT compatible string for modalias and uevent 88 + * @acpi_dev: ACPI device object. 89 + * @modalias: Buffer to print into. 90 + * @size: Size of the buffer. 91 + * 92 + * Expose DT compatible modalias as of:NnameTCcompatible. This function should 93 + * only be called for devices having ACPI_DT_NAMESPACE_HID in their list of 94 + * ACPI/PNP IDs. 95 + */ 96 + static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias, 97 + int size) 98 + { 99 + struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER }; 100 + const union acpi_object *of_compatible, *obj; 101 + int len, count; 102 + int i, nval; 103 + char *c; 104 + 105 + acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf); 106 + /* DT strings are all in lower case */ 107 + for (c = buf.pointer; *c != '\0'; c++) 108 + *c = tolower(*c); 109 + 110 + len = snprintf(modalias, size, "of:N%sT", (char *)buf.pointer); 111 + ACPI_FREE(buf.pointer); 112 + 113 + if (len <= 0) 114 + return len; 115 + 116 + of_compatible = acpi_dev->data.of_compatible; 117 + if (of_compatible->type == ACPI_TYPE_PACKAGE) { 118 + nval = of_compatible->package.count; 119 + obj = of_compatible->package.elements; 120 + } else { /* Must be ACPI_TYPE_STRING. */ 121 + nval = 1; 122 + obj = of_compatible; 123 + } 124 + for (i = 0; i < nval; i++, obj++) { 125 + count = snprintf(&modalias[len], size, "C%s", 126 + obj->string.pointer); 127 + if (count < 0) 128 + return -EINVAL; 129 + 130 + if (count >= size) 131 + return -ENOMEM; 132 + 133 + len += count; 134 + size -= count; 135 + } 136 + modalias[len] = '\0'; 137 + return len; 138 + } 139 + 140 + int __acpi_device_uevent_modalias(struct acpi_device *adev, 141 + struct kobj_uevent_env *env) 142 + { 143 + int len; 144 + 145 + if (!adev) 146 + return -ENODEV; 147 + 148 + if (list_empty(&adev->pnp.ids)) 149 + return 0; 150 + 151 + if (add_uevent_var(env, "MODALIAS=")) 152 + return -ENOMEM; 153 + 154 + len = create_pnp_modalias(adev, &env->buf[env->buflen - 1], 155 + sizeof(env->buf) - env->buflen); 156 + if (len < 0) 157 + return len; 158 + 159 + env->buflen += len; 160 + if (!adev->data.of_compatible) 161 + return 0; 162 + 163 + if (len > 0 && add_uevent_var(env, "MODALIAS=")) 164 + return -ENOMEM; 165 + 166 + len = create_of_modalias(adev, &env->buf[env->buflen - 1], 167 + sizeof(env->buf) - env->buflen); 168 + if (len < 0) 169 + return len; 170 + 171 + env->buflen += len; 172 + 173 + return 0; 174 + } 175 + 176 + /** 177 + * acpi_device_uevent_modalias - uevent modalias for ACPI-enumerated devices. 178 + * 179 + * Create the uevent modalias field for ACPI-enumerated devices. 180 + * 181 + * Because other buses do not support ACPI HIDs & CIDs, e.g. for a device with 182 + * hid:IBM0001 and cid:ACPI0001 you get: "acpi:IBM0001:ACPI0001". 183 + */ 184 + int acpi_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env) 185 + { 186 + return __acpi_device_uevent_modalias(acpi_companion_match(dev), env); 187 + } 188 + EXPORT_SYMBOL_GPL(acpi_device_uevent_modalias); 189 + 190 + static int __acpi_device_modalias(struct acpi_device *adev, char *buf, int size) 191 + { 192 + int len, count; 193 + 194 + if (!adev) 195 + return -ENODEV; 196 + 197 + if (list_empty(&adev->pnp.ids)) 198 + return 0; 199 + 200 + len = create_pnp_modalias(adev, buf, size - 1); 201 + if (len < 0) { 202 + return len; 203 + } else if (len > 0) { 204 + buf[len++] = '\n'; 205 + size -= len; 206 + } 207 + if (!adev->data.of_compatible) 208 + return len; 209 + 210 + count = create_of_modalias(adev, buf + len, size - 1); 211 + if (count < 0) { 212 + return count; 213 + } else if (count > 0) { 214 + len += count; 215 + buf[len++] = '\n'; 216 + } 217 + 218 + return len; 219 + } 220 + 221 + /** 222 + * acpi_device_modalias - modalias sysfs attribute for ACPI-enumerated devices. 223 + * 224 + * Create the modalias sysfs attribute for ACPI-enumerated devices. 225 + * 226 + * Because other buses do not support ACPI HIDs & CIDs, e.g. for a device with 227 + * hid:IBM0001 and cid:ACPI0001 you get: "acpi:IBM0001:ACPI0001". 228 + */ 229 + int acpi_device_modalias(struct device *dev, char *buf, int size) 230 + { 231 + return __acpi_device_modalias(acpi_companion_match(dev), buf, size); 232 + } 233 + EXPORT_SYMBOL_GPL(acpi_device_modalias); 234 + 235 + static ssize_t 236 + acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { 237 + return __acpi_device_modalias(to_acpi_device(dev), buf, 1024); 238 + } 239 + static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL); 240 + 241 + static ssize_t real_power_state_show(struct device *dev, 242 + struct device_attribute *attr, char *buf) 243 + { 244 + struct acpi_device *adev = to_acpi_device(dev); 245 + int state; 246 + int ret; 247 + 248 + ret = acpi_device_get_power(adev, &state); 249 + if (ret) 250 + return ret; 251 + 252 + return sprintf(buf, "%s\n", acpi_power_state_string(state)); 253 + } 254 + 255 + static DEVICE_ATTR(real_power_state, 0444, real_power_state_show, NULL); 256 + 257 + static ssize_t power_state_show(struct device *dev, 258 + struct device_attribute *attr, char *buf) 259 + { 260 + struct acpi_device *adev = to_acpi_device(dev); 261 + 262 + return sprintf(buf, "%s\n", acpi_power_state_string(adev->power.state)); 263 + } 264 + 265 + static DEVICE_ATTR(power_state, 0444, power_state_show, NULL); 266 + 267 + static ssize_t 268 + acpi_eject_store(struct device *d, struct device_attribute *attr, 269 + const char *buf, size_t count) 270 + { 271 + struct acpi_device *acpi_device = to_acpi_device(d); 272 + acpi_object_type not_used; 273 + acpi_status status; 274 + 275 + if (!count || buf[0] != '1') 276 + return -EINVAL; 277 + 278 + if ((!acpi_device->handler || !acpi_device->handler->hotplug.enabled) 279 + && !acpi_device->driver) 280 + return -ENODEV; 281 + 282 + status = acpi_get_type(acpi_device->handle, &not_used); 283 + if (ACPI_FAILURE(status) || !acpi_device->flags.ejectable) 284 + return -ENODEV; 285 + 286 + get_device(&acpi_device->dev); 287 + status = acpi_hotplug_schedule(acpi_device, ACPI_OST_EC_OSPM_EJECT); 288 + if (ACPI_SUCCESS(status)) 289 + return count; 290 + 291 + put_device(&acpi_device->dev); 292 + acpi_evaluate_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT, 293 + ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL); 294 + return status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN; 295 + } 296 + 297 + static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store); 298 + 299 + static ssize_t 300 + acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf) { 301 + struct acpi_device *acpi_dev = to_acpi_device(dev); 302 + 303 + return sprintf(buf, "%s\n", acpi_device_hid(acpi_dev)); 304 + } 305 + static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL); 306 + 307 + static ssize_t acpi_device_uid_show(struct device *dev, 308 + struct device_attribute *attr, char *buf) 309 + { 310 + struct acpi_device *acpi_dev = to_acpi_device(dev); 311 + 312 + return sprintf(buf, "%s\n", acpi_dev->pnp.unique_id); 313 + } 314 + static DEVICE_ATTR(uid, 0444, acpi_device_uid_show, NULL); 315 + 316 + static ssize_t acpi_device_adr_show(struct device *dev, 317 + struct device_attribute *attr, char *buf) 318 + { 319 + struct acpi_device *acpi_dev = to_acpi_device(dev); 320 + 321 + return sprintf(buf, "0x%08x\n", 322 + (unsigned int)(acpi_dev->pnp.bus_address)); 323 + } 324 + static DEVICE_ATTR(adr, 0444, acpi_device_adr_show, NULL); 325 + 326 + static ssize_t 327 + acpi_device_path_show(struct device *dev, struct device_attribute *attr, char *buf) { 328 + struct acpi_device *acpi_dev = to_acpi_device(dev); 329 + struct acpi_buffer path = {ACPI_ALLOCATE_BUFFER, NULL}; 330 + int result; 331 + 332 + result = acpi_get_name(acpi_dev->handle, ACPI_FULL_PATHNAME, &path); 333 + if (result) 334 + goto end; 335 + 336 + result = sprintf(buf, "%s\n", (char*)path.pointer); 337 + kfree(path.pointer); 338 + end: 339 + return result; 340 + } 341 + static DEVICE_ATTR(path, 0444, acpi_device_path_show, NULL); 342 + 343 + /* sysfs file that shows description text from the ACPI _STR method */ 344 + static ssize_t description_show(struct device *dev, 345 + struct device_attribute *attr, 346 + char *buf) { 347 + struct acpi_device *acpi_dev = to_acpi_device(dev); 348 + int result; 349 + 350 + if (acpi_dev->pnp.str_obj == NULL) 351 + return 0; 352 + 353 + /* 354 + * The _STR object contains a Unicode identifier for a device. 355 + * We need to convert to utf-8 so it can be displayed. 356 + */ 357 + result = utf16s_to_utf8s( 358 + (wchar_t *)acpi_dev->pnp.str_obj->buffer.pointer, 359 + acpi_dev->pnp.str_obj->buffer.length, 360 + UTF16_LITTLE_ENDIAN, buf, 361 + PAGE_SIZE); 362 + 363 + buf[result++] = '\n'; 364 + 365 + return result; 366 + } 367 + static DEVICE_ATTR(description, 0444, description_show, NULL); 368 + 369 + static ssize_t 370 + acpi_device_sun_show(struct device *dev, struct device_attribute *attr, 371 + char *buf) { 372 + struct acpi_device *acpi_dev = to_acpi_device(dev); 373 + acpi_status status; 374 + unsigned long long sun; 375 + 376 + status = acpi_evaluate_integer(acpi_dev->handle, "_SUN", NULL, &sun); 377 + if (ACPI_FAILURE(status)) 378 + return -ENODEV; 379 + 380 + return sprintf(buf, "%llu\n", sun); 381 + } 382 + static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL); 383 + 384 + static ssize_t status_show(struct device *dev, struct device_attribute *attr, 385 + char *buf) { 386 + struct acpi_device *acpi_dev = to_acpi_device(dev); 387 + acpi_status status; 388 + unsigned long long sta; 389 + 390 + status = acpi_evaluate_integer(acpi_dev->handle, "_STA", NULL, &sta); 391 + if (ACPI_FAILURE(status)) 392 + return -ENODEV; 393 + 394 + return sprintf(buf, "%llu\n", sta); 395 + } 396 + static DEVICE_ATTR_RO(status); 397 + 398 + /** 399 + * acpi_device_setup_files - Create sysfs attributes of an ACPI device. 400 + * @dev: ACPI device object. 401 + */ 402 + int acpi_device_setup_files(struct acpi_device *dev) 403 + { 404 + struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 405 + acpi_status status; 406 + int result = 0; 407 + 408 + /* 409 + * Devices gotten from FADT don't have a "path" attribute 410 + */ 411 + if (dev->handle) { 412 + result = device_create_file(&dev->dev, &dev_attr_path); 413 + if (result) 414 + goto end; 415 + } 416 + 417 + if (!list_empty(&dev->pnp.ids)) { 418 + result = device_create_file(&dev->dev, &dev_attr_hid); 419 + if (result) 420 + goto end; 421 + 422 + result = device_create_file(&dev->dev, &dev_attr_modalias); 423 + if (result) 424 + goto end; 425 + } 426 + 427 + /* 428 + * If device has _STR, 'description' file is created 429 + */ 430 + if (acpi_has_method(dev->handle, "_STR")) { 431 + status = acpi_evaluate_object(dev->handle, "_STR", 432 + NULL, &buffer); 433 + if (ACPI_FAILURE(status)) 434 + buffer.pointer = NULL; 435 + dev->pnp.str_obj = buffer.pointer; 436 + result = device_create_file(&dev->dev, &dev_attr_description); 437 + if (result) 438 + goto end; 439 + } 440 + 441 + if (dev->pnp.type.bus_address) 442 + result = device_create_file(&dev->dev, &dev_attr_adr); 443 + if (dev->pnp.unique_id) 444 + result = device_create_file(&dev->dev, &dev_attr_uid); 445 + 446 + if (acpi_has_method(dev->handle, "_SUN")) { 447 + result = device_create_file(&dev->dev, &dev_attr_sun); 448 + if (result) 449 + goto end; 450 + } 451 + 452 + if (acpi_has_method(dev->handle, "_STA")) { 453 + result = device_create_file(&dev->dev, &dev_attr_status); 454 + if (result) 455 + goto end; 456 + } 457 + 458 + /* 459 + * If device has _EJ0, 'eject' file is created that is used to trigger 460 + * hot-removal function from userland. 461 + */ 462 + if (acpi_has_method(dev->handle, "_EJ0")) { 463 + result = device_create_file(&dev->dev, &dev_attr_eject); 464 + if (result) 465 + return result; 466 + } 467 + 468 + if (dev->flags.power_manageable) { 469 + result = device_create_file(&dev->dev, &dev_attr_power_state); 470 + if (result) 471 + return result; 472 + 473 + if (dev->power.flags.power_resources) 474 + result = device_create_file(&dev->dev, 475 + &dev_attr_real_power_state); 476 + } 477 + 478 + end: 479 + return result; 480 + } 481 + 482 + /** 483 + * acpi_device_remove_files - Remove sysfs attributes of an ACPI device. 484 + * @dev: ACPI device object. 485 + */ 486 + void acpi_device_remove_files(struct acpi_device *dev) 487 + { 488 + if (dev->flags.power_manageable) { 489 + device_remove_file(&dev->dev, &dev_attr_power_state); 490 + if (dev->power.flags.power_resources) 491 + device_remove_file(&dev->dev, 492 + &dev_attr_real_power_state); 493 + } 494 + 495 + /* 496 + * If device has _STR, remove 'description' file 497 + */ 498 + if (acpi_has_method(dev->handle, "_STR")) { 499 + kfree(dev->pnp.str_obj); 500 + device_remove_file(&dev->dev, &dev_attr_description); 501 + } 502 + /* 503 + * If device has _EJ0, remove 'eject' file. 504 + */ 505 + if (acpi_has_method(dev->handle, "_EJ0")) 506 + device_remove_file(&dev->dev, &dev_attr_eject); 507 + 508 + if (acpi_has_method(dev->handle, "_SUN")) 509 + device_remove_file(&dev->dev, &dev_attr_sun); 510 + 511 + if (dev->pnp.unique_id) 512 + device_remove_file(&dev->dev, &dev_attr_uid); 513 + if (dev->pnp.type.bus_address) 514 + device_remove_file(&dev->dev, &dev_attr_adr); 515 + device_remove_file(&dev->dev, &dev_attr_modalias); 516 + device_remove_file(&dev->dev, &dev_attr_hid); 517 + if (acpi_has_method(dev->handle, "_STA")) 518 + device_remove_file(&dev->dev, &dev_attr_status); 519 + if (dev->handle) 520 + device_remove_file(&dev->dev, &dev_attr_path); 521 + }
+11
drivers/acpi/internal.h
··· 93 93 void (*release)(struct device *)); 94 94 void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, 95 95 int type, unsigned long long sta); 96 + int acpi_device_setup_files(struct acpi_device *dev); 97 + void acpi_device_remove_files(struct acpi_device *dev); 96 98 void acpi_device_add_finalize(struct acpi_device *device); 97 99 void acpi_free_pnp_ids(struct acpi_device_pnp *pnp); 98 100 bool acpi_device_is_present(struct acpi_device *adev); 99 101 bool acpi_device_is_battery(struct acpi_device *adev); 102 + bool acpi_device_is_first_physical_node(struct acpi_device *adev, 103 + const struct device *dev); 104 + 105 + /* -------------------------------------------------------------------------- 106 + Device Matching and Notification 107 + -------------------------------------------------------------------------- */ 108 + struct acpi_device *acpi_companion_match(const struct device *dev); 109 + int __acpi_device_uevent_modalias(struct acpi_device *adev, 110 + struct kobj_uevent_env *env); 100 111 101 112 /* -------------------------------------------------------------------------- 102 113 Power Resource
+9 -6
drivers/acpi/power.c
··· 1 1 /* 2 - * acpi_power.c - ACPI Bus Power Management ($Revision: 39 $) 2 + * drivers/acpi/power.c - ACPI Power Resources management. 3 3 * 4 - * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 5 - * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 4 + * Copyright (C) 2001 - 2015 Intel Corp. 5 + * Author: Andy Grover <andrew.grover@intel.com> 6 + * Author: Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 7 + * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> 6 8 * 7 9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 8 10 * ··· 29 27 * ACPI power-managed devices may be controlled in two ways: 30 28 * 1. via "Device Specific (D-State) Control" 31 29 * 2. via "Power Resource Control". 32 - * This module is used to manage devices relying on Power Resource Control. 30 + * The code below deals with ACPI Power Resources control. 33 31 * 34 - * An ACPI "power resource object" describes a software controllable power 35 - * plane, clock plane, or other resource used by a power managed device. 32 + * An ACPI "power resource object" represents a software controllable power 33 + * plane, clock plane, or other resource depended on by a device. 34 + * 36 35 * A device may rely on multiple power resources, and a power resource 37 36 * may be shared by multiple devices. 38 37 */
-860
drivers/acpi/scan.c
··· 115 115 return 0; 116 116 } 117 117 118 - /** 119 - * create_pnp_modalias - Create hid/cid(s) string for modalias and uevent 120 - * @acpi_dev: ACPI device object. 121 - * @modalias: Buffer to print into. 122 - * @size: Size of the buffer. 123 - * 124 - * Creates hid/cid(s) string needed for modalias and uevent 125 - * e.g. on a device with hid:IBM0001 and cid:ACPI0001 you get: 126 - * char *modalias: "acpi:IBM0001:ACPI0001" 127 - * Return: 0: no _HID and no _CID 128 - * -EINVAL: output error 129 - * -ENOMEM: output is truncated 130 - */ 131 - static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias, 132 - int size) 133 - { 134 - int len; 135 - int count; 136 - struct acpi_hardware_id *id; 137 - 138 - /* 139 - * Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should 140 - * be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the 141 - * device's list. 142 - */ 143 - count = 0; 144 - list_for_each_entry(id, &acpi_dev->pnp.ids, list) 145 - if (strcmp(id->id, ACPI_DT_NAMESPACE_HID)) 146 - count++; 147 - 148 - if (!count) 149 - return 0; 150 - 151 - len = snprintf(modalias, size, "acpi:"); 152 - if (len <= 0) 153 - return len; 154 - 155 - size -= len; 156 - 157 - list_for_each_entry(id, &acpi_dev->pnp.ids, list) { 158 - if (!strcmp(id->id, ACPI_DT_NAMESPACE_HID)) 159 - continue; 160 - 161 - count = snprintf(&modalias[len], size, "%s:", id->id); 162 - if (count < 0) 163 - return -EINVAL; 164 - 165 - if (count >= size) 166 - return -ENOMEM; 167 - 168 - len += count; 169 - size -= count; 170 - } 171 - modalias[len] = '\0'; 172 - return len; 173 - } 174 - 175 - /** 176 - * create_of_modalias - Creates DT compatible string for modalias and uevent 177 - * @acpi_dev: ACPI device object. 178 - * @modalias: Buffer to print into. 179 - * @size: Size of the buffer. 180 - * 181 - * Expose DT compatible modalias as of:NnameTCcompatible. This function should 182 - * only be called for devices having ACPI_DT_NAMESPACE_HID in their list of 183 - * ACPI/PNP IDs. 184 - */ 185 - static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias, 186 - int size) 187 - { 188 - struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER }; 189 - const union acpi_object *of_compatible, *obj; 190 - int len, count; 191 - int i, nval; 192 - char *c; 193 - 194 - acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf); 195 - /* DT strings are all in lower case */ 196 - for (c = buf.pointer; *c != '\0'; c++) 197 - *c = tolower(*c); 198 - 199 - len = snprintf(modalias, size, "of:N%sT", (char *)buf.pointer); 200 - ACPI_FREE(buf.pointer); 201 - 202 - if (len <= 0) 203 - return len; 204 - 205 - of_compatible = acpi_dev->data.of_compatible; 206 - if (of_compatible->type == ACPI_TYPE_PACKAGE) { 207 - nval = of_compatible->package.count; 208 - obj = of_compatible->package.elements; 209 - } else { /* Must be ACPI_TYPE_STRING. */ 210 - nval = 1; 211 - obj = of_compatible; 212 - } 213 - for (i = 0; i < nval; i++, obj++) { 214 - count = snprintf(&modalias[len], size, "C%s", 215 - obj->string.pointer); 216 - if (count < 0) 217 - return -EINVAL; 218 - 219 - if (count >= size) 220 - return -ENOMEM; 221 - 222 - len += count; 223 - size -= count; 224 - } 225 - modalias[len] = '\0'; 226 - return len; 227 - } 228 - 229 - /* 230 - * acpi_companion_match() - Can we match via ACPI companion device 231 - * @dev: Device in question 232 - * 233 - * Check if the given device has an ACPI companion and if that companion has 234 - * a valid list of PNP IDs, and if the device is the first (primary) physical 235 - * device associated with it. Return the companion pointer if that's the case 236 - * or NULL otherwise. 237 - * 238 - * If multiple physical devices are attached to a single ACPI companion, we need 239 - * to be careful. The usage scenario for this kind of relationship is that all 240 - * of the physical devices in question use resources provided by the ACPI 241 - * companion. A typical case is an MFD device where all the sub-devices share 242 - * the parent's ACPI companion. In such cases we can only allow the primary 243 - * (first) physical device to be matched with the help of the companion's PNP 244 - * IDs. 245 - * 246 - * Additional physical devices sharing the ACPI companion can still use 247 - * resources available from it but they will be matched normally using functions 248 - * provided by their bus types (and analogously for their modalias). 249 - */ 250 - static struct acpi_device *acpi_companion_match(const struct device *dev) 251 - { 252 - struct acpi_device *adev; 253 - struct mutex *physical_node_lock; 254 - 255 - adev = ACPI_COMPANION(dev); 256 - if (!adev) 257 - return NULL; 258 - 259 - if (list_empty(&adev->pnp.ids)) 260 - return NULL; 261 - 262 - physical_node_lock = &adev->physical_node_lock; 263 - mutex_lock(physical_node_lock); 264 - if (list_empty(&adev->physical_node_list)) { 265 - adev = NULL; 266 - } else { 267 - const struct acpi_device_physical_node *node; 268 - 269 - node = list_first_entry(&adev->physical_node_list, 270 - struct acpi_device_physical_node, node); 271 - if (node->dev != dev) 272 - adev = NULL; 273 - } 274 - mutex_unlock(physical_node_lock); 275 - 276 - return adev; 277 - } 278 - 279 - static int __acpi_device_uevent_modalias(struct acpi_device *adev, 280 - struct kobj_uevent_env *env) 281 - { 282 - int len; 283 - 284 - if (!adev) 285 - return -ENODEV; 286 - 287 - if (list_empty(&adev->pnp.ids)) 288 - return 0; 289 - 290 - if (add_uevent_var(env, "MODALIAS=")) 291 - return -ENOMEM; 292 - 293 - len = create_pnp_modalias(adev, &env->buf[env->buflen - 1], 294 - sizeof(env->buf) - env->buflen); 295 - if (len < 0) 296 - return len; 297 - 298 - env->buflen += len; 299 - if (!adev->data.of_compatible) 300 - return 0; 301 - 302 - if (len > 0 && add_uevent_var(env, "MODALIAS=")) 303 - return -ENOMEM; 304 - 305 - len = create_of_modalias(adev, &env->buf[env->buflen - 1], 306 - sizeof(env->buf) - env->buflen); 307 - if (len < 0) 308 - return len; 309 - 310 - env->buflen += len; 311 - 312 - return 0; 313 - } 314 - 315 - /* 316 - * Creates uevent modalias field for ACPI enumerated devices. 317 - * Because the other buses does not support ACPI HIDs & CIDs. 318 - * e.g. for a device with hid:IBM0001 and cid:ACPI0001 you get: 319 - * "acpi:IBM0001:ACPI0001" 320 - */ 321 - int acpi_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env) 322 - { 323 - return __acpi_device_uevent_modalias(acpi_companion_match(dev), env); 324 - } 325 - EXPORT_SYMBOL_GPL(acpi_device_uevent_modalias); 326 - 327 - static int __acpi_device_modalias(struct acpi_device *adev, char *buf, int size) 328 - { 329 - int len, count; 330 - 331 - if (!adev) 332 - return -ENODEV; 333 - 334 - if (list_empty(&adev->pnp.ids)) 335 - return 0; 336 - 337 - len = create_pnp_modalias(adev, buf, size - 1); 338 - if (len < 0) { 339 - return len; 340 - } else if (len > 0) { 341 - buf[len++] = '\n'; 342 - size -= len; 343 - } 344 - if (!adev->data.of_compatible) 345 - return len; 346 - 347 - count = create_of_modalias(adev, buf + len, size - 1); 348 - if (count < 0) { 349 - return count; 350 - } else if (count > 0) { 351 - len += count; 352 - buf[len++] = '\n'; 353 - } 354 - 355 - return len; 356 - } 357 - 358 - /* 359 - * Creates modalias sysfs attribute for ACPI enumerated devices. 360 - * Because the other buses does not support ACPI HIDs & CIDs. 361 - * e.g. for a device with hid:IBM0001 and cid:ACPI0001 you get: 362 - * "acpi:IBM0001:ACPI0001" 363 - */ 364 - int acpi_device_modalias(struct device *dev, char *buf, int size) 365 - { 366 - return __acpi_device_modalias(acpi_companion_match(dev), buf, size); 367 - } 368 - EXPORT_SYMBOL_GPL(acpi_device_modalias); 369 - 370 - static ssize_t 371 - acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { 372 - return __acpi_device_modalias(to_acpi_device(dev), buf, 1024); 373 - } 374 - static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL); 375 - 376 118 bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent) 377 119 { 378 120 struct acpi_device_physical_node *pn; ··· 443 701 unlock_device_hotplug(); 444 702 } 445 703 446 - static ssize_t real_power_state_show(struct device *dev, 447 - struct device_attribute *attr, char *buf) 448 - { 449 - struct acpi_device *adev = to_acpi_device(dev); 450 - int state; 451 - int ret; 452 - 453 - ret = acpi_device_get_power(adev, &state); 454 - if (ret) 455 - return ret; 456 - 457 - return sprintf(buf, "%s\n", acpi_power_state_string(state)); 458 - } 459 - 460 - static DEVICE_ATTR(real_power_state, 0444, real_power_state_show, NULL); 461 - 462 - static ssize_t power_state_show(struct device *dev, 463 - struct device_attribute *attr, char *buf) 464 - { 465 - struct acpi_device *adev = to_acpi_device(dev); 466 - 467 - return sprintf(buf, "%s\n", acpi_power_state_string(adev->power.state)); 468 - } 469 - 470 - static DEVICE_ATTR(power_state, 0444, power_state_show, NULL); 471 - 472 - static ssize_t 473 - acpi_eject_store(struct device *d, struct device_attribute *attr, 474 - const char *buf, size_t count) 475 - { 476 - struct acpi_device *acpi_device = to_acpi_device(d); 477 - acpi_object_type not_used; 478 - acpi_status status; 479 - 480 - if (!count || buf[0] != '1') 481 - return -EINVAL; 482 - 483 - if ((!acpi_device->handler || !acpi_device->handler->hotplug.enabled) 484 - && !acpi_device->driver) 485 - return -ENODEV; 486 - 487 - status = acpi_get_type(acpi_device->handle, &not_used); 488 - if (ACPI_FAILURE(status) || !acpi_device->flags.ejectable) 489 - return -ENODEV; 490 - 491 - get_device(&acpi_device->dev); 492 - status = acpi_hotplug_schedule(acpi_device, ACPI_OST_EC_OSPM_EJECT); 493 - if (ACPI_SUCCESS(status)) 494 - return count; 495 - 496 - put_device(&acpi_device->dev); 497 - acpi_evaluate_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT, 498 - ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL); 499 - return status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN; 500 - } 501 - 502 - static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store); 503 - 504 - static ssize_t 505 - acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf) { 506 - struct acpi_device *acpi_dev = to_acpi_device(dev); 507 - 508 - return sprintf(buf, "%s\n", acpi_device_hid(acpi_dev)); 509 - } 510 - static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL); 511 - 512 - static ssize_t acpi_device_uid_show(struct device *dev, 513 - struct device_attribute *attr, char *buf) 514 - { 515 - struct acpi_device *acpi_dev = to_acpi_device(dev); 516 - 517 - return sprintf(buf, "%s\n", acpi_dev->pnp.unique_id); 518 - } 519 - static DEVICE_ATTR(uid, 0444, acpi_device_uid_show, NULL); 520 - 521 - static ssize_t acpi_device_adr_show(struct device *dev, 522 - struct device_attribute *attr, char *buf) 523 - { 524 - struct acpi_device *acpi_dev = to_acpi_device(dev); 525 - 526 - return sprintf(buf, "0x%08x\n", 527 - (unsigned int)(acpi_dev->pnp.bus_address)); 528 - } 529 - static DEVICE_ATTR(adr, 0444, acpi_device_adr_show, NULL); 530 - 531 - static ssize_t 532 - acpi_device_path_show(struct device *dev, struct device_attribute *attr, char *buf) { 533 - struct acpi_device *acpi_dev = to_acpi_device(dev); 534 - struct acpi_buffer path = {ACPI_ALLOCATE_BUFFER, NULL}; 535 - int result; 536 - 537 - result = acpi_get_name(acpi_dev->handle, ACPI_FULL_PATHNAME, &path); 538 - if (result) 539 - goto end; 540 - 541 - result = sprintf(buf, "%s\n", (char*)path.pointer); 542 - kfree(path.pointer); 543 - end: 544 - return result; 545 - } 546 - static DEVICE_ATTR(path, 0444, acpi_device_path_show, NULL); 547 - 548 - /* sysfs file that shows description text from the ACPI _STR method */ 549 - static ssize_t description_show(struct device *dev, 550 - struct device_attribute *attr, 551 - char *buf) { 552 - struct acpi_device *acpi_dev = to_acpi_device(dev); 553 - int result; 554 - 555 - if (acpi_dev->pnp.str_obj == NULL) 556 - return 0; 557 - 558 - /* 559 - * The _STR object contains a Unicode identifier for a device. 560 - * We need to convert to utf-8 so it can be displayed. 561 - */ 562 - result = utf16s_to_utf8s( 563 - (wchar_t *)acpi_dev->pnp.str_obj->buffer.pointer, 564 - acpi_dev->pnp.str_obj->buffer.length, 565 - UTF16_LITTLE_ENDIAN, buf, 566 - PAGE_SIZE); 567 - 568 - buf[result++] = '\n'; 569 - 570 - return result; 571 - } 572 - static DEVICE_ATTR(description, 0444, description_show, NULL); 573 - 574 - static ssize_t 575 - acpi_device_sun_show(struct device *dev, struct device_attribute *attr, 576 - char *buf) { 577 - struct acpi_device *acpi_dev = to_acpi_device(dev); 578 - acpi_status status; 579 - unsigned long long sun; 580 - 581 - status = acpi_evaluate_integer(acpi_dev->handle, "_SUN", NULL, &sun); 582 - if (ACPI_FAILURE(status)) 583 - return -ENODEV; 584 - 585 - return sprintf(buf, "%llu\n", sun); 586 - } 587 - static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL); 588 - 589 - static ssize_t status_show(struct device *dev, struct device_attribute *attr, 590 - char *buf) { 591 - struct acpi_device *acpi_dev = to_acpi_device(dev); 592 - acpi_status status; 593 - unsigned long long sta; 594 - 595 - status = acpi_evaluate_integer(acpi_dev->handle, "_STA", NULL, &sta); 596 - if (ACPI_FAILURE(status)) 597 - return -ENODEV; 598 - 599 - return sprintf(buf, "%llu\n", sta); 600 - } 601 - static DEVICE_ATTR_RO(status); 602 - 603 - static int acpi_device_setup_files(struct acpi_device *dev) 604 - { 605 - struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 606 - acpi_status status; 607 - int result = 0; 608 - 609 - /* 610 - * Devices gotten from FADT don't have a "path" attribute 611 - */ 612 - if (dev->handle) { 613 - result = device_create_file(&dev->dev, &dev_attr_path); 614 - if (result) 615 - goto end; 616 - } 617 - 618 - if (!list_empty(&dev->pnp.ids)) { 619 - result = device_create_file(&dev->dev, &dev_attr_hid); 620 - if (result) 621 - goto end; 622 - 623 - result = device_create_file(&dev->dev, &dev_attr_modalias); 624 - if (result) 625 - goto end; 626 - } 627 - 628 - /* 629 - * If device has _STR, 'description' file is created 630 - */ 631 - if (acpi_has_method(dev->handle, "_STR")) { 632 - status = acpi_evaluate_object(dev->handle, "_STR", 633 - NULL, &buffer); 634 - if (ACPI_FAILURE(status)) 635 - buffer.pointer = NULL; 636 - dev->pnp.str_obj = buffer.pointer; 637 - result = device_create_file(&dev->dev, &dev_attr_description); 638 - if (result) 639 - goto end; 640 - } 641 - 642 - if (dev->pnp.type.bus_address) 643 - result = device_create_file(&dev->dev, &dev_attr_adr); 644 - if (dev->pnp.unique_id) 645 - result = device_create_file(&dev->dev, &dev_attr_uid); 646 - 647 - if (acpi_has_method(dev->handle, "_SUN")) { 648 - result = device_create_file(&dev->dev, &dev_attr_sun); 649 - if (result) 650 - goto end; 651 - } 652 - 653 - if (acpi_has_method(dev->handle, "_STA")) { 654 - result = device_create_file(&dev->dev, &dev_attr_status); 655 - if (result) 656 - goto end; 657 - } 658 - 659 - /* 660 - * If device has _EJ0, 'eject' file is created that is used to trigger 661 - * hot-removal function from userland. 662 - */ 663 - if (acpi_has_method(dev->handle, "_EJ0")) { 664 - result = device_create_file(&dev->dev, &dev_attr_eject); 665 - if (result) 666 - return result; 667 - } 668 - 669 - if (dev->flags.power_manageable) { 670 - result = device_create_file(&dev->dev, &dev_attr_power_state); 671 - if (result) 672 - return result; 673 - 674 - if (dev->power.flags.power_resources) 675 - result = device_create_file(&dev->dev, 676 - &dev_attr_real_power_state); 677 - } 678 - 679 - end: 680 - return result; 681 - } 682 - 683 - static void acpi_device_remove_files(struct acpi_device *dev) 684 - { 685 - if (dev->flags.power_manageable) { 686 - device_remove_file(&dev->dev, &dev_attr_power_state); 687 - if (dev->power.flags.power_resources) 688 - device_remove_file(&dev->dev, 689 - &dev_attr_real_power_state); 690 - } 691 - 692 - /* 693 - * If device has _STR, remove 'description' file 694 - */ 695 - if (acpi_has_method(dev->handle, "_STR")) { 696 - kfree(dev->pnp.str_obj); 697 - device_remove_file(&dev->dev, &dev_attr_description); 698 - } 699 - /* 700 - * If device has _EJ0, remove 'eject' file. 701 - */ 702 - if (acpi_has_method(dev->handle, "_EJ0")) 703 - device_remove_file(&dev->dev, &dev_attr_eject); 704 - 705 - if (acpi_has_method(dev->handle, "_SUN")) 706 - device_remove_file(&dev->dev, &dev_attr_sun); 707 - 708 - if (dev->pnp.unique_id) 709 - device_remove_file(&dev->dev, &dev_attr_uid); 710 - if (dev->pnp.type.bus_address) 711 - device_remove_file(&dev->dev, &dev_attr_adr); 712 - device_remove_file(&dev->dev, &dev_attr_modalias); 713 - device_remove_file(&dev->dev, &dev_attr_hid); 714 - if (acpi_has_method(dev->handle, "_STA")) 715 - device_remove_file(&dev->dev, &dev_attr_status); 716 - if (dev->handle) 717 - device_remove_file(&dev->dev, &dev_attr_path); 718 - } 719 - /* -------------------------------------------------------------------------- 720 - ACPI Bus operations 721 - -------------------------------------------------------------------------- */ 722 - 723 - /** 724 - * acpi_of_match_device - Match device object using the "compatible" property. 725 - * @adev: ACPI device object to match. 726 - * @of_match_table: List of device IDs to match against. 727 - * 728 - * If @dev has an ACPI companion which has ACPI_DT_NAMESPACE_HID in its list of 729 - * identifiers and a _DSD object with the "compatible" property, use that 730 - * property to match against the given list of identifiers. 731 - */ 732 - static bool acpi_of_match_device(struct acpi_device *adev, 733 - const struct of_device_id *of_match_table) 734 - { 735 - const union acpi_object *of_compatible, *obj; 736 - int i, nval; 737 - 738 - if (!adev) 739 - return false; 740 - 741 - of_compatible = adev->data.of_compatible; 742 - if (!of_match_table || !of_compatible) 743 - return false; 744 - 745 - if (of_compatible->type == ACPI_TYPE_PACKAGE) { 746 - nval = of_compatible->package.count; 747 - obj = of_compatible->package.elements; 748 - } else { /* Must be ACPI_TYPE_STRING. */ 749 - nval = 1; 750 - obj = of_compatible; 751 - } 752 - /* Now we can look for the driver DT compatible strings */ 753 - for (i = 0; i < nval; i++, obj++) { 754 - const struct of_device_id *id; 755 - 756 - for (id = of_match_table; id->compatible[0]; id++) 757 - if (!strcasecmp(obj->string.pointer, id->compatible)) 758 - return true; 759 - } 760 - 761 - return false; 762 - } 763 - 764 - static bool __acpi_match_device_cls(const struct acpi_device_id *id, 765 - struct acpi_hardware_id *hwid) 766 - { 767 - int i, msk, byte_shift; 768 - char buf[3]; 769 - 770 - if (!id->cls) 771 - return false; 772 - 773 - /* Apply class-code bitmask, before checking each class-code byte */ 774 - for (i = 1; i <= 3; i++) { 775 - byte_shift = 8 * (3 - i); 776 - msk = (id->cls_msk >> byte_shift) & 0xFF; 777 - if (!msk) 778 - continue; 779 - 780 - sprintf(buf, "%02x", (id->cls >> byte_shift) & msk); 781 - if (strncmp(buf, &hwid->id[(i - 1) * 2], 2)) 782 - return false; 783 - } 784 - return true; 785 - } 786 - 787 - static const struct acpi_device_id *__acpi_match_device( 788 - struct acpi_device *device, 789 - const struct acpi_device_id *ids, 790 - const struct of_device_id *of_ids) 791 - { 792 - const struct acpi_device_id *id; 793 - struct acpi_hardware_id *hwid; 794 - 795 - /* 796 - * If the device is not present, it is unnecessary to load device 797 - * driver for it. 798 - */ 799 - if (!device || !device->status.present) 800 - return NULL; 801 - 802 - list_for_each_entry(hwid, &device->pnp.ids, list) { 803 - /* First, check the ACPI/PNP IDs provided by the caller. */ 804 - for (id = ids; id->id[0] || id->cls; id++) { 805 - if (id->id[0] && !strcmp((char *) id->id, hwid->id)) 806 - return id; 807 - else if (id->cls && __acpi_match_device_cls(id, hwid)) 808 - return id; 809 - } 810 - 811 - /* 812 - * Next, check ACPI_DT_NAMESPACE_HID and try to match the 813 - * "compatible" property if found. 814 - * 815 - * The id returned by the below is not valid, but the only 816 - * caller passing non-NULL of_ids here is only interested in 817 - * whether or not the return value is NULL. 818 - */ 819 - if (!strcmp(ACPI_DT_NAMESPACE_HID, hwid->id) 820 - && acpi_of_match_device(device, of_ids)) 821 - return id; 822 - } 823 - return NULL; 824 - } 825 - 826 - /** 827 - * acpi_match_device - Match a struct device against a given list of ACPI IDs 828 - * @ids: Array of struct acpi_device_id object to match against. 829 - * @dev: The device structure to match. 830 - * 831 - * Check if @dev has a valid ACPI handle and if there is a struct acpi_device 832 - * object for that handle and use that object to match against a given list of 833 - * device IDs. 834 - * 835 - * Return a pointer to the first matching ID on success or %NULL on failure. 836 - */ 837 - const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, 838 - const struct device *dev) 839 - { 840 - return __acpi_match_device(acpi_companion_match(dev), ids, NULL); 841 - } 842 - EXPORT_SYMBOL_GPL(acpi_match_device); 843 - 844 - int acpi_match_device_ids(struct acpi_device *device, 845 - const struct acpi_device_id *ids) 846 - { 847 - return __acpi_match_device(device, ids, NULL) ? 0 : -ENOENT; 848 - } 849 - EXPORT_SYMBOL(acpi_match_device_ids); 850 - 851 - bool acpi_driver_match_device(struct device *dev, 852 - const struct device_driver *drv) 853 - { 854 - if (!drv->acpi_match_table) 855 - return acpi_of_match_device(ACPI_COMPANION(dev), 856 - drv->of_match_table); 857 - 858 - return !!__acpi_match_device(acpi_companion_match(dev), 859 - drv->acpi_match_table, drv->of_match_table); 860 - } 861 - EXPORT_SYMBOL_GPL(acpi_driver_match_device); 862 - 863 704 static void acpi_free_power_resources_lists(struct acpi_device *device) 864 705 { 865 706 int i; ··· 468 1143 acpi_free_power_resources_lists(acpi_dev); 469 1144 kfree(acpi_dev); 470 1145 } 471 - 472 - static int acpi_bus_match(struct device *dev, struct device_driver *drv) 473 - { 474 - struct acpi_device *acpi_dev = to_acpi_device(dev); 475 - struct acpi_driver *acpi_drv = to_acpi_driver(drv); 476 - 477 - return acpi_dev->flags.match_driver 478 - && !acpi_match_device_ids(acpi_dev, acpi_drv->ids); 479 - } 480 - 481 - static int acpi_device_uevent(struct device *dev, struct kobj_uevent_env *env) 482 - { 483 - return __acpi_device_uevent_modalias(to_acpi_device(dev), env); 484 - } 485 - 486 - static void acpi_device_notify(acpi_handle handle, u32 event, void *data) 487 - { 488 - struct acpi_device *device = data; 489 - 490 - device->driver->ops.notify(device, event); 491 - } 492 - 493 - static void acpi_device_notify_fixed(void *data) 494 - { 495 - struct acpi_device *device = data; 496 - 497 - /* Fixed hardware devices have no handles */ 498 - acpi_device_notify(NULL, ACPI_FIXED_HARDWARE_EVENT, device); 499 - } 500 - 501 - static u32 acpi_device_fixed_event(void *data) 502 - { 503 - acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_device_notify_fixed, data); 504 - return ACPI_INTERRUPT_HANDLED; 505 - } 506 - 507 - static int acpi_device_install_notify_handler(struct acpi_device *device) 508 - { 509 - acpi_status status; 510 - 511 - if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) 512 - status = 513 - acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON, 514 - acpi_device_fixed_event, 515 - device); 516 - else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) 517 - status = 518 - acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON, 519 - acpi_device_fixed_event, 520 - device); 521 - else 522 - status = acpi_install_notify_handler(device->handle, 523 - ACPI_DEVICE_NOTIFY, 524 - acpi_device_notify, 525 - device); 526 - 527 - if (ACPI_FAILURE(status)) 528 - return -EINVAL; 529 - return 0; 530 - } 531 - 532 - static void acpi_device_remove_notify_handler(struct acpi_device *device) 533 - { 534 - if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) 535 - acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON, 536 - acpi_device_fixed_event); 537 - else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) 538 - acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON, 539 - acpi_device_fixed_event); 540 - else 541 - acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, 542 - acpi_device_notify); 543 - } 544 - 545 - static int acpi_device_probe(struct device *dev) 546 - { 547 - struct acpi_device *acpi_dev = to_acpi_device(dev); 548 - struct acpi_driver *acpi_drv = to_acpi_driver(dev->driver); 549 - int ret; 550 - 551 - if (acpi_dev->handler && !acpi_is_pnp_device(acpi_dev)) 552 - return -EINVAL; 553 - 554 - if (!acpi_drv->ops.add) 555 - return -ENOSYS; 556 - 557 - ret = acpi_drv->ops.add(acpi_dev); 558 - if (ret) 559 - return ret; 560 - 561 - acpi_dev->driver = acpi_drv; 562 - ACPI_DEBUG_PRINT((ACPI_DB_INFO, 563 - "Driver [%s] successfully bound to device [%s]\n", 564 - acpi_drv->name, acpi_dev->pnp.bus_id)); 565 - 566 - if (acpi_drv->ops.notify) { 567 - ret = acpi_device_install_notify_handler(acpi_dev); 568 - if (ret) { 569 - if (acpi_drv->ops.remove) 570 - acpi_drv->ops.remove(acpi_dev); 571 - 572 - acpi_dev->driver = NULL; 573 - acpi_dev->driver_data = NULL; 574 - return ret; 575 - } 576 - } 577 - 578 - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found driver [%s] for device [%s]\n", 579 - acpi_drv->name, acpi_dev->pnp.bus_id)); 580 - get_device(dev); 581 - return 0; 582 - } 583 - 584 - static int acpi_device_remove(struct device * dev) 585 - { 586 - struct acpi_device *acpi_dev = to_acpi_device(dev); 587 - struct acpi_driver *acpi_drv = acpi_dev->driver; 588 - 589 - if (acpi_drv) { 590 - if (acpi_drv->ops.notify) 591 - acpi_device_remove_notify_handler(acpi_dev); 592 - if (acpi_drv->ops.remove) 593 - acpi_drv->ops.remove(acpi_dev); 594 - } 595 - acpi_dev->driver = NULL; 596 - acpi_dev->driver_data = NULL; 597 - 598 - put_device(dev); 599 - return 0; 600 - } 601 - 602 - struct bus_type acpi_bus_type = { 603 - .name = "acpi", 604 - .match = acpi_bus_match, 605 - .probe = acpi_device_probe, 606 - .remove = acpi_device_remove, 607 - .uevent = acpi_device_uevent, 608 - }; 609 1146 610 1147 static void acpi_device_del(struct acpi_device *device) 611 1148 { ··· 714 1527 next = child->node.next; 715 1528 return next == head ? NULL : list_entry(next, struct acpi_device, node); 716 1529 } 717 - 718 - /* -------------------------------------------------------------------------- 719 - Driver Management 720 - -------------------------------------------------------------------------- */ 721 - /** 722 - * acpi_bus_register_driver - register a driver with the ACPI bus 723 - * @driver: driver being registered 724 - * 725 - * Registers a driver with the ACPI bus. Searches the namespace for all 726 - * devices that match the driver's criteria and binds. Returns zero for 727 - * success or a negative error status for failure. 728 - */ 729 - int acpi_bus_register_driver(struct acpi_driver *driver) 730 - { 731 - int ret; 732 - 733 - if (acpi_disabled) 734 - return -ENODEV; 735 - driver->drv.name = driver->name; 736 - driver->drv.bus = &acpi_bus_type; 737 - driver->drv.owner = driver->owner; 738 - 739 - ret = driver_register(&driver->drv); 740 - return ret; 741 - } 742 - 743 - EXPORT_SYMBOL(acpi_bus_register_driver); 744 - 745 - /** 746 - * acpi_bus_unregister_driver - unregisters a driver with the ACPI bus 747 - * @driver: driver to unregister 748 - * 749 - * Unregisters a driver with the ACPI bus. Searches the namespace for all 750 - * devices that match the driver's criteria and unbinds. 751 - */ 752 - void acpi_bus_unregister_driver(struct acpi_driver *driver) 753 - { 754 - driver_unregister(&driver->drv); 755 - } 756 - 757 - EXPORT_SYMBOL(acpi_bus_unregister_driver); 758 1530 759 1531 /* -------------------------------------------------------------------------- 760 1532 Device Enumeration ··· 1889 2743 int __init acpi_scan_init(void) 1890 2744 { 1891 2745 int result; 1892 - 1893 - result = bus_register(&acpi_bus_type); 1894 - if (result) { 1895 - /* We don't want to quit even if we failed to add suspend/resume */ 1896 - printk(KERN_ERR PREFIX "Could not register bus type\n"); 1897 - } 1898 2746 1899 2747 acpi_pci_root_init(); 1900 2748 acpi_pci_link_init();
+43
drivers/base/core.c
··· 1252 1252 } 1253 1253 EXPORT_SYMBOL_GPL(device_unregister); 1254 1254 1255 + static struct device *prev_device(struct klist_iter *i) 1256 + { 1257 + struct klist_node *n = klist_prev(i); 1258 + struct device *dev = NULL; 1259 + struct device_private *p; 1260 + 1261 + if (n) { 1262 + p = to_device_private_parent(n); 1263 + dev = p->device; 1264 + } 1265 + return dev; 1266 + } 1267 + 1255 1268 static struct device *next_device(struct klist_iter *i) 1256 1269 { 1257 1270 struct klist_node *n = klist_next(i); ··· 1352 1339 return error; 1353 1340 } 1354 1341 EXPORT_SYMBOL_GPL(device_for_each_child); 1342 + 1343 + /** 1344 + * device_for_each_child_reverse - device child iterator in reversed order. 1345 + * @parent: parent struct device. 1346 + * @fn: function to be called for each device. 1347 + * @data: data for the callback. 1348 + * 1349 + * Iterate over @parent's child devices, and call @fn for each, 1350 + * passing it @data. 1351 + * 1352 + * We check the return of @fn each time. If it returns anything 1353 + * other than 0, we break out and return that value. 1354 + */ 1355 + int device_for_each_child_reverse(struct device *parent, void *data, 1356 + int (*fn)(struct device *dev, void *data)) 1357 + { 1358 + struct klist_iter i; 1359 + struct device *child; 1360 + int error = 0; 1361 + 1362 + if (!parent->p) 1363 + return 0; 1364 + 1365 + klist_iter_init(&parent->p->klist_children, &i); 1366 + while ((child = prev_device(&i)) && !error) 1367 + error = fn(child, data); 1368 + klist_iter_exit(&i); 1369 + return error; 1370 + } 1371 + EXPORT_SYMBOL_GPL(device_for_each_child_reverse); 1355 1372 1356 1373 /** 1357 1374 * device_find_child - device iterator for locating a particular device.
+20
drivers/base/dd.c
··· 399 399 * 400 400 * This function must be called with @dev lock held. When called for a 401 401 * USB interface, @dev->parent lock must be held as well. 402 + * 403 + * If the device has a parent, runtime-resume the parent before driver probing. 402 404 */ 403 405 int driver_probe_device(struct device_driver *drv, struct device *dev) 404 406 { ··· 412 410 pr_debug("bus: '%s': %s: matched device %s with driver %s\n", 413 411 drv->bus->name, __func__, dev_name(dev), drv->name); 414 412 413 + if (dev->parent) 414 + pm_runtime_get_sync(dev->parent); 415 + 415 416 pm_runtime_barrier(dev); 416 417 ret = really_probe(dev, drv); 417 418 pm_request_idle(dev); 419 + 420 + if (dev->parent) 421 + pm_runtime_put(dev->parent); 418 422 419 423 return ret; 420 424 } ··· 515 507 516 508 device_lock(dev); 517 509 510 + if (dev->parent) 511 + pm_runtime_get_sync(dev->parent); 512 + 518 513 bus_for_each_drv(dev->bus, NULL, &data, __device_attach_driver); 519 514 dev_dbg(dev, "async probe completed\n"); 520 515 521 516 pm_request_idle(dev); 517 + 518 + if (dev->parent) 519 + pm_runtime_put(dev->parent); 522 520 523 521 device_unlock(dev); 524 522 ··· 555 541 .want_async = false, 556 542 }; 557 543 544 + if (dev->parent) 545 + pm_runtime_get_sync(dev->parent); 546 + 558 547 ret = bus_for_each_drv(dev->bus, NULL, &data, 559 548 __device_attach_driver); 560 549 if (!ret && allow_async && data.have_async) { ··· 574 557 } else { 575 558 pm_request_idle(dev); 576 559 } 560 + 561 + if (dev->parent) 562 + pm_runtime_put(dev->parent); 577 563 } 578 564 out_unlock: 579 565 device_unlock(dev);
+2
drivers/base/power/power.h
··· 73 73 extern void pm_qos_sysfs_remove_resume_latency(struct device *dev); 74 74 extern int pm_qos_sysfs_add_flags(struct device *dev); 75 75 extern void pm_qos_sysfs_remove_flags(struct device *dev); 76 + extern int pm_qos_sysfs_add_latency_tolerance(struct device *dev); 77 + extern void pm_qos_sysfs_remove_latency_tolerance(struct device *dev); 76 78 77 79 #else /* CONFIG_PM */ 78 80
+37
drivers/base/power/qos.c
··· 883 883 mutex_unlock(&dev_pm_qos_mtx); 884 884 return ret; 885 885 } 886 + 887 + /** 888 + * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace 889 + * @dev: Device whose latency tolerance to expose 890 + */ 891 + int dev_pm_qos_expose_latency_tolerance(struct device *dev) 892 + { 893 + int ret; 894 + 895 + if (!dev->power.set_latency_tolerance) 896 + return -EINVAL; 897 + 898 + mutex_lock(&dev_pm_qos_sysfs_mtx); 899 + ret = pm_qos_sysfs_add_latency_tolerance(dev); 900 + mutex_unlock(&dev_pm_qos_sysfs_mtx); 901 + 902 + return ret; 903 + } 904 + EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance); 905 + 906 + /** 907 + * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace 908 + * @dev: Device whose latency tolerance to hide 909 + */ 910 + void dev_pm_qos_hide_latency_tolerance(struct device *dev) 911 + { 912 + mutex_lock(&dev_pm_qos_sysfs_mtx); 913 + pm_qos_sysfs_remove_latency_tolerance(dev); 914 + mutex_unlock(&dev_pm_qos_sysfs_mtx); 915 + 916 + /* Remove the request from user space now */ 917 + pm_runtime_get_sync(dev); 918 + dev_pm_qos_update_user_latency_tolerance(dev, 919 + PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT); 920 + pm_runtime_put(dev); 921 + } 922 + EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance);
+11
drivers/base/power/sysfs.c
··· 738 738 sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group); 739 739 } 740 740 741 + int pm_qos_sysfs_add_latency_tolerance(struct device *dev) 742 + { 743 + return sysfs_merge_group(&dev->kobj, 744 + &pm_qos_latency_tolerance_attr_group); 745 + } 746 + 747 + void pm_qos_sysfs_remove_latency_tolerance(struct device *dev) 748 + { 749 + sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); 750 + } 751 + 741 752 void rpm_sysfs_remove(struct device *dev) 742 753 { 743 754 sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
+8
drivers/dma/Kconfig
··· 85 85 help 86 86 Enable support for the Intel(R) IOP Series RAID engines. 87 87 88 + config IDMA64 89 + tristate "Intel integrated DMA 64-bit support" 90 + select DMA_ENGINE 91 + select DMA_VIRTUAL_CHANNELS 92 + help 93 + Enable DMA support for Intel Low Power Subsystem such as found on 94 + Intel Skylake PCH. 95 + 88 96 source "drivers/dma/dw/Kconfig" 89 97 90 98 config AT_HDMAC
+1
drivers/dma/Makefile
··· 14 14 obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o 15 15 obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ 16 16 obj-$(CONFIG_MV_XOR) += mv_xor.o 17 + obj-$(CONFIG_IDMA64) += idma64.o 17 18 obj-$(CONFIG_DW_DMAC_CORE) += dw/ 18 19 obj-$(CONFIG_AT_HDMAC) += at_hdmac.o 19 20 obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
+710
drivers/dma/idma64.c
··· 1 + /* 2 + * Core driver for the Intel integrated DMA 64-bit 3 + * 4 + * Copyright (C) 2015 Intel Corporation 5 + * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + */ 11 + 12 + #include <linux/bitops.h> 13 + #include <linux/delay.h> 14 + #include <linux/dmaengine.h> 15 + #include <linux/dma-mapping.h> 16 + #include <linux/dmapool.h> 17 + #include <linux/init.h> 18 + #include <linux/module.h> 19 + #include <linux/platform_device.h> 20 + #include <linux/slab.h> 21 + 22 + #include "idma64.h" 23 + 24 + /* Platform driver name */ 25 + #define DRV_NAME "idma64" 26 + 27 + /* For now we support only two channels */ 28 + #define IDMA64_NR_CHAN 2 29 + 30 + /* ---------------------------------------------------------------------- */ 31 + 32 + static struct device *chan2dev(struct dma_chan *chan) 33 + { 34 + return &chan->dev->device; 35 + } 36 + 37 + /* ---------------------------------------------------------------------- */ 38 + 39 + static void idma64_off(struct idma64 *idma64) 40 + { 41 + unsigned short count = 100; 42 + 43 + dma_writel(idma64, CFG, 0); 44 + 45 + channel_clear_bit(idma64, MASK(XFER), idma64->all_chan_mask); 46 + channel_clear_bit(idma64, MASK(BLOCK), idma64->all_chan_mask); 47 + channel_clear_bit(idma64, MASK(SRC_TRAN), idma64->all_chan_mask); 48 + channel_clear_bit(idma64, MASK(DST_TRAN), idma64->all_chan_mask); 49 + channel_clear_bit(idma64, MASK(ERROR), idma64->all_chan_mask); 50 + 51 + do { 52 + cpu_relax(); 53 + } while (dma_readl(idma64, CFG) & IDMA64_CFG_DMA_EN && --count); 54 + } 55 + 56 + static void idma64_on(struct idma64 *idma64) 57 + { 58 + dma_writel(idma64, CFG, IDMA64_CFG_DMA_EN); 59 + } 60 + 61 + /* ---------------------------------------------------------------------- */ 62 + 63 + static void idma64_chan_init(struct idma64 *idma64, struct idma64_chan *idma64c) 64 + { 65 + u32 cfghi = IDMA64C_CFGH_SRC_PER(1) | IDMA64C_CFGH_DST_PER(0); 66 + u32 cfglo = 0; 67 + 68 + /* Enforce FIFO drain when channel is suspended */ 69 + cfglo |= IDMA64C_CFGL_CH_DRAIN; 70 + 71 + /* Set default burst alignment */ 72 + cfglo |= IDMA64C_CFGL_DST_BURST_ALIGN | IDMA64C_CFGL_SRC_BURST_ALIGN; 73 + 74 + channel_writel(idma64c, CFG_LO, cfglo); 75 + channel_writel(idma64c, CFG_HI, cfghi); 76 + 77 + /* Enable interrupts */ 78 + channel_set_bit(idma64, MASK(XFER), idma64c->mask); 79 + channel_set_bit(idma64, MASK(ERROR), idma64c->mask); 80 + 81 + /* 82 + * Enforce the controller to be turned on. 83 + * 84 + * The iDMA is turned off in ->probe() and looses context during system 85 + * suspend / resume cycle. That's why we have to enable it each time we 86 + * use it. 87 + */ 88 + idma64_on(idma64); 89 + } 90 + 91 + static void idma64_chan_stop(struct idma64 *idma64, struct idma64_chan *idma64c) 92 + { 93 + channel_clear_bit(idma64, CH_EN, idma64c->mask); 94 + } 95 + 96 + static void idma64_chan_start(struct idma64 *idma64, struct idma64_chan *idma64c) 97 + { 98 + struct idma64_desc *desc = idma64c->desc; 99 + struct idma64_hw_desc *hw = &desc->hw[0]; 100 + 101 + channel_writeq(idma64c, SAR, 0); 102 + channel_writeq(idma64c, DAR, 0); 103 + 104 + channel_writel(idma64c, CTL_HI, IDMA64C_CTLH_BLOCK_TS(~0UL)); 105 + channel_writel(idma64c, CTL_LO, IDMA64C_CTLL_LLP_S_EN | IDMA64C_CTLL_LLP_D_EN); 106 + 107 + channel_writeq(idma64c, LLP, hw->llp); 108 + 109 + channel_set_bit(idma64, CH_EN, idma64c->mask); 110 + } 111 + 112 + static void idma64_stop_transfer(struct idma64_chan *idma64c) 113 + { 114 + struct idma64 *idma64 = to_idma64(idma64c->vchan.chan.device); 115 + 116 + idma64_chan_stop(idma64, idma64c); 117 + } 118 + 119 + static void idma64_start_transfer(struct idma64_chan *idma64c) 120 + { 121 + struct idma64 *idma64 = to_idma64(idma64c->vchan.chan.device); 122 + struct virt_dma_desc *vdesc; 123 + 124 + /* Get the next descriptor */ 125 + vdesc = vchan_next_desc(&idma64c->vchan); 126 + if (!vdesc) { 127 + idma64c->desc = NULL; 128 + return; 129 + } 130 + 131 + list_del(&vdesc->node); 132 + idma64c->desc = to_idma64_desc(vdesc); 133 + 134 + /* Configure the channel */ 135 + idma64_chan_init(idma64, idma64c); 136 + 137 + /* Start the channel with a new descriptor */ 138 + idma64_chan_start(idma64, idma64c); 139 + } 140 + 141 + /* ---------------------------------------------------------------------- */ 142 + 143 + static void idma64_chan_irq(struct idma64 *idma64, unsigned short c, 144 + u32 status_err, u32 status_xfer) 145 + { 146 + struct idma64_chan *idma64c = &idma64->chan[c]; 147 + struct idma64_desc *desc; 148 + unsigned long flags; 149 + 150 + spin_lock_irqsave(&idma64c->vchan.lock, flags); 151 + desc = idma64c->desc; 152 + if (desc) { 153 + if (status_err & (1 << c)) { 154 + dma_writel(idma64, CLEAR(ERROR), idma64c->mask); 155 + desc->status = DMA_ERROR; 156 + } else if (status_xfer & (1 << c)) { 157 + dma_writel(idma64, CLEAR(XFER), idma64c->mask); 158 + desc->status = DMA_COMPLETE; 159 + vchan_cookie_complete(&desc->vdesc); 160 + idma64_start_transfer(idma64c); 161 + } 162 + 163 + /* idma64_start_transfer() updates idma64c->desc */ 164 + if (idma64c->desc == NULL || desc->status == DMA_ERROR) 165 + idma64_stop_transfer(idma64c); 166 + } 167 + spin_unlock_irqrestore(&idma64c->vchan.lock, flags); 168 + } 169 + 170 + static irqreturn_t idma64_irq(int irq, void *dev) 171 + { 172 + struct idma64 *idma64 = dev; 173 + u32 status = dma_readl(idma64, STATUS_INT); 174 + u32 status_xfer; 175 + u32 status_err; 176 + unsigned short i; 177 + 178 + dev_vdbg(idma64->dma.dev, "%s: status=%#x\n", __func__, status); 179 + 180 + /* Check if we have any interrupt from the DMA controller */ 181 + if (!status) 182 + return IRQ_NONE; 183 + 184 + /* Disable interrupts */ 185 + channel_clear_bit(idma64, MASK(XFER), idma64->all_chan_mask); 186 + channel_clear_bit(idma64, MASK(ERROR), idma64->all_chan_mask); 187 + 188 + status_xfer = dma_readl(idma64, RAW(XFER)); 189 + status_err = dma_readl(idma64, RAW(ERROR)); 190 + 191 + for (i = 0; i < idma64->dma.chancnt; i++) 192 + idma64_chan_irq(idma64, i, status_err, status_xfer); 193 + 194 + /* Re-enable interrupts */ 195 + channel_set_bit(idma64, MASK(XFER), idma64->all_chan_mask); 196 + channel_set_bit(idma64, MASK(ERROR), idma64->all_chan_mask); 197 + 198 + return IRQ_HANDLED; 199 + } 200 + 201 + /* ---------------------------------------------------------------------- */ 202 + 203 + static struct idma64_desc *idma64_alloc_desc(unsigned int ndesc) 204 + { 205 + struct idma64_desc *desc; 206 + 207 + desc = kzalloc(sizeof(*desc), GFP_NOWAIT); 208 + if (!desc) 209 + return NULL; 210 + 211 + desc->hw = kcalloc(ndesc, sizeof(*desc->hw), GFP_NOWAIT); 212 + if (!desc->hw) { 213 + kfree(desc); 214 + return NULL; 215 + } 216 + 217 + return desc; 218 + } 219 + 220 + static void idma64_desc_free(struct idma64_chan *idma64c, 221 + struct idma64_desc *desc) 222 + { 223 + struct idma64_hw_desc *hw; 224 + 225 + if (desc->ndesc) { 226 + unsigned int i = desc->ndesc; 227 + 228 + do { 229 + hw = &desc->hw[--i]; 230 + dma_pool_free(idma64c->pool, hw->lli, hw->llp); 231 + } while (i); 232 + } 233 + 234 + kfree(desc->hw); 235 + kfree(desc); 236 + } 237 + 238 + static void idma64_vdesc_free(struct virt_dma_desc *vdesc) 239 + { 240 + struct idma64_chan *idma64c = to_idma64_chan(vdesc->tx.chan); 241 + 242 + idma64_desc_free(idma64c, to_idma64_desc(vdesc)); 243 + } 244 + 245 + static u64 idma64_hw_desc_fill(struct idma64_hw_desc *hw, 246 + struct dma_slave_config *config, 247 + enum dma_transfer_direction direction, u64 llp) 248 + { 249 + struct idma64_lli *lli = hw->lli; 250 + u64 sar, dar; 251 + u32 ctlhi = IDMA64C_CTLH_BLOCK_TS(hw->len); 252 + u32 ctllo = IDMA64C_CTLL_LLP_S_EN | IDMA64C_CTLL_LLP_D_EN; 253 + u32 src_width, dst_width; 254 + 255 + if (direction == DMA_MEM_TO_DEV) { 256 + sar = hw->phys; 257 + dar = config->dst_addr; 258 + ctllo |= IDMA64C_CTLL_DST_FIX | IDMA64C_CTLL_SRC_INC | 259 + IDMA64C_CTLL_FC_M2P; 260 + src_width = min_t(u32, 2, __fls(sar | hw->len)); 261 + dst_width = __fls(config->dst_addr_width); 262 + } else { /* DMA_DEV_TO_MEM */ 263 + sar = config->src_addr; 264 + dar = hw->phys; 265 + ctllo |= IDMA64C_CTLL_DST_INC | IDMA64C_CTLL_SRC_FIX | 266 + IDMA64C_CTLL_FC_P2M; 267 + src_width = __fls(config->src_addr_width); 268 + dst_width = min_t(u32, 2, __fls(dar | hw->len)); 269 + } 270 + 271 + lli->sar = sar; 272 + lli->dar = dar; 273 + 274 + lli->ctlhi = ctlhi; 275 + lli->ctllo = ctllo | 276 + IDMA64C_CTLL_SRC_MSIZE(config->src_maxburst) | 277 + IDMA64C_CTLL_DST_MSIZE(config->dst_maxburst) | 278 + IDMA64C_CTLL_DST_WIDTH(dst_width) | 279 + IDMA64C_CTLL_SRC_WIDTH(src_width); 280 + 281 + lli->llp = llp; 282 + return hw->llp; 283 + } 284 + 285 + static void idma64_desc_fill(struct idma64_chan *idma64c, 286 + struct idma64_desc *desc) 287 + { 288 + struct dma_slave_config *config = &idma64c->config; 289 + struct idma64_hw_desc *hw = &desc->hw[desc->ndesc - 1]; 290 + struct idma64_lli *lli = hw->lli; 291 + u64 llp = 0; 292 + unsigned int i = desc->ndesc; 293 + 294 + /* Fill the hardware descriptors and link them to a list */ 295 + do { 296 + hw = &desc->hw[--i]; 297 + llp = idma64_hw_desc_fill(hw, config, desc->direction, llp); 298 + desc->length += hw->len; 299 + } while (i); 300 + 301 + /* Trigger interrupt after last block */ 302 + lli->ctllo |= IDMA64C_CTLL_INT_EN; 303 + } 304 + 305 + static struct dma_async_tx_descriptor *idma64_prep_slave_sg( 306 + struct dma_chan *chan, struct scatterlist *sgl, 307 + unsigned int sg_len, enum dma_transfer_direction direction, 308 + unsigned long flags, void *context) 309 + { 310 + struct idma64_chan *idma64c = to_idma64_chan(chan); 311 + struct idma64_desc *desc; 312 + struct scatterlist *sg; 313 + unsigned int i; 314 + 315 + desc = idma64_alloc_desc(sg_len); 316 + if (!desc) 317 + return NULL; 318 + 319 + for_each_sg(sgl, sg, sg_len, i) { 320 + struct idma64_hw_desc *hw = &desc->hw[i]; 321 + 322 + /* Allocate DMA capable memory for hardware descriptor */ 323 + hw->lli = dma_pool_alloc(idma64c->pool, GFP_NOWAIT, &hw->llp); 324 + if (!hw->lli) { 325 + desc->ndesc = i; 326 + idma64_desc_free(idma64c, desc); 327 + return NULL; 328 + } 329 + 330 + hw->phys = sg_dma_address(sg); 331 + hw->len = sg_dma_len(sg); 332 + } 333 + 334 + desc->ndesc = sg_len; 335 + desc->direction = direction; 336 + desc->status = DMA_IN_PROGRESS; 337 + 338 + idma64_desc_fill(idma64c, desc); 339 + return vchan_tx_prep(&idma64c->vchan, &desc->vdesc, flags); 340 + } 341 + 342 + static void idma64_issue_pending(struct dma_chan *chan) 343 + { 344 + struct idma64_chan *idma64c = to_idma64_chan(chan); 345 + unsigned long flags; 346 + 347 + spin_lock_irqsave(&idma64c->vchan.lock, flags); 348 + if (vchan_issue_pending(&idma64c->vchan) && !idma64c->desc) 349 + idma64_start_transfer(idma64c); 350 + spin_unlock_irqrestore(&idma64c->vchan.lock, flags); 351 + } 352 + 353 + static size_t idma64_active_desc_size(struct idma64_chan *idma64c) 354 + { 355 + struct idma64_desc *desc = idma64c->desc; 356 + struct idma64_hw_desc *hw; 357 + size_t bytes = desc->length; 358 + u64 llp; 359 + u32 ctlhi; 360 + unsigned int i = 0; 361 + 362 + llp = channel_readq(idma64c, LLP); 363 + do { 364 + hw = &desc->hw[i]; 365 + } while ((hw->llp != llp) && (++i < desc->ndesc)); 366 + 367 + if (!i) 368 + return bytes; 369 + 370 + do { 371 + bytes -= desc->hw[--i].len; 372 + } while (i); 373 + 374 + ctlhi = channel_readl(idma64c, CTL_HI); 375 + return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi); 376 + } 377 + 378 + static enum dma_status idma64_tx_status(struct dma_chan *chan, 379 + dma_cookie_t cookie, struct dma_tx_state *state) 380 + { 381 + struct idma64_chan *idma64c = to_idma64_chan(chan); 382 + struct virt_dma_desc *vdesc; 383 + enum dma_status status; 384 + size_t bytes; 385 + unsigned long flags; 386 + 387 + status = dma_cookie_status(chan, cookie, state); 388 + if (status == DMA_COMPLETE) 389 + return status; 390 + 391 + spin_lock_irqsave(&idma64c->vchan.lock, flags); 392 + vdesc = vchan_find_desc(&idma64c->vchan, cookie); 393 + if (idma64c->desc && cookie == idma64c->desc->vdesc.tx.cookie) { 394 + bytes = idma64_active_desc_size(idma64c); 395 + dma_set_residue(state, bytes); 396 + status = idma64c->desc->status; 397 + } else if (vdesc) { 398 + bytes = to_idma64_desc(vdesc)->length; 399 + dma_set_residue(state, bytes); 400 + } 401 + spin_unlock_irqrestore(&idma64c->vchan.lock, flags); 402 + 403 + return status; 404 + } 405 + 406 + static void convert_burst(u32 *maxburst) 407 + { 408 + if (*maxburst) 409 + *maxburst = __fls(*maxburst); 410 + else 411 + *maxburst = 0; 412 + } 413 + 414 + static int idma64_slave_config(struct dma_chan *chan, 415 + struct dma_slave_config *config) 416 + { 417 + struct idma64_chan *idma64c = to_idma64_chan(chan); 418 + 419 + /* Check if chan will be configured for slave transfers */ 420 + if (!is_slave_direction(config->direction)) 421 + return -EINVAL; 422 + 423 + memcpy(&idma64c->config, config, sizeof(idma64c->config)); 424 + 425 + convert_burst(&idma64c->config.src_maxburst); 426 + convert_burst(&idma64c->config.dst_maxburst); 427 + 428 + return 0; 429 + } 430 + 431 + static void idma64_chan_deactivate(struct idma64_chan *idma64c) 432 + { 433 + unsigned short count = 100; 434 + u32 cfglo; 435 + 436 + cfglo = channel_readl(idma64c, CFG_LO); 437 + channel_writel(idma64c, CFG_LO, cfglo | IDMA64C_CFGL_CH_SUSP); 438 + do { 439 + udelay(1); 440 + cfglo = channel_readl(idma64c, CFG_LO); 441 + } while (!(cfglo & IDMA64C_CFGL_FIFO_EMPTY) && --count); 442 + } 443 + 444 + static void idma64_chan_activate(struct idma64_chan *idma64c) 445 + { 446 + u32 cfglo; 447 + 448 + cfglo = channel_readl(idma64c, CFG_LO); 449 + channel_writel(idma64c, CFG_LO, cfglo & ~IDMA64C_CFGL_CH_SUSP); 450 + } 451 + 452 + static int idma64_pause(struct dma_chan *chan) 453 + { 454 + struct idma64_chan *idma64c = to_idma64_chan(chan); 455 + unsigned long flags; 456 + 457 + spin_lock_irqsave(&idma64c->vchan.lock, flags); 458 + if (idma64c->desc && idma64c->desc->status == DMA_IN_PROGRESS) { 459 + idma64_chan_deactivate(idma64c); 460 + idma64c->desc->status = DMA_PAUSED; 461 + } 462 + spin_unlock_irqrestore(&idma64c->vchan.lock, flags); 463 + 464 + return 0; 465 + } 466 + 467 + static int idma64_resume(struct dma_chan *chan) 468 + { 469 + struct idma64_chan *idma64c = to_idma64_chan(chan); 470 + unsigned long flags; 471 + 472 + spin_lock_irqsave(&idma64c->vchan.lock, flags); 473 + if (idma64c->desc && idma64c->desc->status == DMA_PAUSED) { 474 + idma64c->desc->status = DMA_IN_PROGRESS; 475 + idma64_chan_activate(idma64c); 476 + } 477 + spin_unlock_irqrestore(&idma64c->vchan.lock, flags); 478 + 479 + return 0; 480 + } 481 + 482 + static int idma64_terminate_all(struct dma_chan *chan) 483 + { 484 + struct idma64_chan *idma64c = to_idma64_chan(chan); 485 + unsigned long flags; 486 + LIST_HEAD(head); 487 + 488 + spin_lock_irqsave(&idma64c->vchan.lock, flags); 489 + idma64_chan_deactivate(idma64c); 490 + idma64_stop_transfer(idma64c); 491 + if (idma64c->desc) { 492 + idma64_vdesc_free(&idma64c->desc->vdesc); 493 + idma64c->desc = NULL; 494 + } 495 + vchan_get_all_descriptors(&idma64c->vchan, &head); 496 + spin_unlock_irqrestore(&idma64c->vchan.lock, flags); 497 + 498 + vchan_dma_desc_free_list(&idma64c->vchan, &head); 499 + return 0; 500 + } 501 + 502 + static int idma64_alloc_chan_resources(struct dma_chan *chan) 503 + { 504 + struct idma64_chan *idma64c = to_idma64_chan(chan); 505 + 506 + /* Create a pool of consistent memory blocks for hardware descriptors */ 507 + idma64c->pool = dma_pool_create(dev_name(chan2dev(chan)), 508 + chan->device->dev, 509 + sizeof(struct idma64_lli), 8, 0); 510 + if (!idma64c->pool) { 511 + dev_err(chan2dev(chan), "No memory for descriptors\n"); 512 + return -ENOMEM; 513 + } 514 + 515 + return 0; 516 + } 517 + 518 + static void idma64_free_chan_resources(struct dma_chan *chan) 519 + { 520 + struct idma64_chan *idma64c = to_idma64_chan(chan); 521 + 522 + vchan_free_chan_resources(to_virt_chan(chan)); 523 + dma_pool_destroy(idma64c->pool); 524 + idma64c->pool = NULL; 525 + } 526 + 527 + /* ---------------------------------------------------------------------- */ 528 + 529 + #define IDMA64_BUSWIDTHS \ 530 + BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ 531 + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 532 + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) 533 + 534 + static int idma64_probe(struct idma64_chip *chip) 535 + { 536 + struct idma64 *idma64; 537 + unsigned short nr_chan = IDMA64_NR_CHAN; 538 + unsigned short i; 539 + int ret; 540 + 541 + idma64 = devm_kzalloc(chip->dev, sizeof(*idma64), GFP_KERNEL); 542 + if (!idma64) 543 + return -ENOMEM; 544 + 545 + idma64->regs = chip->regs; 546 + chip->idma64 = idma64; 547 + 548 + idma64->chan = devm_kcalloc(chip->dev, nr_chan, sizeof(*idma64->chan), 549 + GFP_KERNEL); 550 + if (!idma64->chan) 551 + return -ENOMEM; 552 + 553 + idma64->all_chan_mask = (1 << nr_chan) - 1; 554 + 555 + /* Turn off iDMA controller */ 556 + idma64_off(idma64); 557 + 558 + ret = devm_request_irq(chip->dev, chip->irq, idma64_irq, IRQF_SHARED, 559 + dev_name(chip->dev), idma64); 560 + if (ret) 561 + return ret; 562 + 563 + INIT_LIST_HEAD(&idma64->dma.channels); 564 + for (i = 0; i < nr_chan; i++) { 565 + struct idma64_chan *idma64c = &idma64->chan[i]; 566 + 567 + idma64c->vchan.desc_free = idma64_vdesc_free; 568 + vchan_init(&idma64c->vchan, &idma64->dma); 569 + 570 + idma64c->regs = idma64->regs + i * IDMA64_CH_LENGTH; 571 + idma64c->mask = BIT(i); 572 + } 573 + 574 + dma_cap_set(DMA_SLAVE, idma64->dma.cap_mask); 575 + dma_cap_set(DMA_PRIVATE, idma64->dma.cap_mask); 576 + 577 + idma64->dma.device_alloc_chan_resources = idma64_alloc_chan_resources; 578 + idma64->dma.device_free_chan_resources = idma64_free_chan_resources; 579 + 580 + idma64->dma.device_prep_slave_sg = idma64_prep_slave_sg; 581 + 582 + idma64->dma.device_issue_pending = idma64_issue_pending; 583 + idma64->dma.device_tx_status = idma64_tx_status; 584 + 585 + idma64->dma.device_config = idma64_slave_config; 586 + idma64->dma.device_pause = idma64_pause; 587 + idma64->dma.device_resume = idma64_resume; 588 + idma64->dma.device_terminate_all = idma64_terminate_all; 589 + 590 + idma64->dma.src_addr_widths = IDMA64_BUSWIDTHS; 591 + idma64->dma.dst_addr_widths = IDMA64_BUSWIDTHS; 592 + idma64->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 593 + idma64->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 594 + 595 + idma64->dma.dev = chip->dev; 596 + 597 + ret = dma_async_device_register(&idma64->dma); 598 + if (ret) 599 + return ret; 600 + 601 + dev_info(chip->dev, "Found Intel integrated DMA 64-bit\n"); 602 + return 0; 603 + } 604 + 605 + static int idma64_remove(struct idma64_chip *chip) 606 + { 607 + struct idma64 *idma64 = chip->idma64; 608 + unsigned short i; 609 + 610 + dma_async_device_unregister(&idma64->dma); 611 + 612 + /* 613 + * Explicitly call devm_request_irq() to avoid the side effects with 614 + * the scheduled tasklets. 615 + */ 616 + devm_free_irq(chip->dev, chip->irq, idma64); 617 + 618 + for (i = 0; i < idma64->dma.chancnt; i++) { 619 + struct idma64_chan *idma64c = &idma64->chan[i]; 620 + 621 + tasklet_kill(&idma64c->vchan.task); 622 + } 623 + 624 + return 0; 625 + } 626 + 627 + /* ---------------------------------------------------------------------- */ 628 + 629 + static int idma64_platform_probe(struct platform_device *pdev) 630 + { 631 + struct idma64_chip *chip; 632 + struct device *dev = &pdev->dev; 633 + struct resource *mem; 634 + int ret; 635 + 636 + chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); 637 + if (!chip) 638 + return -ENOMEM; 639 + 640 + chip->irq = platform_get_irq(pdev, 0); 641 + if (chip->irq < 0) 642 + return chip->irq; 643 + 644 + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 645 + chip->regs = devm_ioremap_resource(dev, mem); 646 + if (IS_ERR(chip->regs)) 647 + return PTR_ERR(chip->regs); 648 + 649 + ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 650 + if (ret) 651 + return ret; 652 + 653 + chip->dev = dev; 654 + 655 + ret = idma64_probe(chip); 656 + if (ret) 657 + return ret; 658 + 659 + platform_set_drvdata(pdev, chip); 660 + return 0; 661 + } 662 + 663 + static int idma64_platform_remove(struct platform_device *pdev) 664 + { 665 + struct idma64_chip *chip = platform_get_drvdata(pdev); 666 + 667 + return idma64_remove(chip); 668 + } 669 + 670 + #ifdef CONFIG_PM_SLEEP 671 + 672 + static int idma64_pm_suspend(struct device *dev) 673 + { 674 + struct platform_device *pdev = to_platform_device(dev); 675 + struct idma64_chip *chip = platform_get_drvdata(pdev); 676 + 677 + idma64_off(chip->idma64); 678 + return 0; 679 + } 680 + 681 + static int idma64_pm_resume(struct device *dev) 682 + { 683 + struct platform_device *pdev = to_platform_device(dev); 684 + struct idma64_chip *chip = platform_get_drvdata(pdev); 685 + 686 + idma64_on(chip->idma64); 687 + return 0; 688 + } 689 + 690 + #endif /* CONFIG_PM_SLEEP */ 691 + 692 + static const struct dev_pm_ops idma64_dev_pm_ops = { 693 + SET_SYSTEM_SLEEP_PM_OPS(idma64_pm_suspend, idma64_pm_resume) 694 + }; 695 + 696 + static struct platform_driver idma64_platform_driver = { 697 + .probe = idma64_platform_probe, 698 + .remove = idma64_platform_remove, 699 + .driver = { 700 + .name = DRV_NAME, 701 + .pm = &idma64_dev_pm_ops, 702 + }, 703 + }; 704 + 705 + module_platform_driver(idma64_platform_driver); 706 + 707 + MODULE_LICENSE("GPL v2"); 708 + MODULE_DESCRIPTION("iDMA64 core driver"); 709 + MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>"); 710 + MODULE_ALIAS("platform:" DRV_NAME);
+233
drivers/dma/idma64.h
··· 1 + /* 2 + * Driver for the Intel integrated DMA 64-bit 3 + * 4 + * Copyright (C) 2015 Intel Corporation 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + */ 10 + 11 + #ifndef __DMA_IDMA64_H__ 12 + #define __DMA_IDMA64_H__ 13 + 14 + #include <linux/device.h> 15 + #include <linux/io.h> 16 + #include <linux/spinlock.h> 17 + #include <linux/types.h> 18 + 19 + #include "virt-dma.h" 20 + 21 + /* Channel registers */ 22 + 23 + #define IDMA64_CH_SAR 0x00 /* Source Address Register */ 24 + #define IDMA64_CH_DAR 0x08 /* Destination Address Register */ 25 + #define IDMA64_CH_LLP 0x10 /* Linked List Pointer */ 26 + #define IDMA64_CH_CTL_LO 0x18 /* Control Register Low */ 27 + #define IDMA64_CH_CTL_HI 0x1c /* Control Register High */ 28 + #define IDMA64_CH_SSTAT 0x20 29 + #define IDMA64_CH_DSTAT 0x28 30 + #define IDMA64_CH_SSTATAR 0x30 31 + #define IDMA64_CH_DSTATAR 0x38 32 + #define IDMA64_CH_CFG_LO 0x40 /* Configuration Register Low */ 33 + #define IDMA64_CH_CFG_HI 0x44 /* Configuration Register High */ 34 + #define IDMA64_CH_SGR 0x48 35 + #define IDMA64_CH_DSR 0x50 36 + 37 + #define IDMA64_CH_LENGTH 0x58 38 + 39 + /* Bitfields in CTL_LO */ 40 + #define IDMA64C_CTLL_INT_EN (1 << 0) /* irqs enabled? */ 41 + #define IDMA64C_CTLL_DST_WIDTH(x) ((x) << 1) /* bytes per element */ 42 + #define IDMA64C_CTLL_SRC_WIDTH(x) ((x) << 4) 43 + #define IDMA64C_CTLL_DST_INC (0 << 8) /* DAR update/not */ 44 + #define IDMA64C_CTLL_DST_FIX (1 << 8) 45 + #define IDMA64C_CTLL_SRC_INC (0 << 10) /* SAR update/not */ 46 + #define IDMA64C_CTLL_SRC_FIX (1 << 10) 47 + #define IDMA64C_CTLL_DST_MSIZE(x) ((x) << 11) /* burst, #elements */ 48 + #define IDMA64C_CTLL_SRC_MSIZE(x) ((x) << 14) 49 + #define IDMA64C_CTLL_FC_M2P (1 << 20) /* mem-to-periph */ 50 + #define IDMA64C_CTLL_FC_P2M (2 << 20) /* periph-to-mem */ 51 + #define IDMA64C_CTLL_LLP_D_EN (1 << 27) /* dest block chain */ 52 + #define IDMA64C_CTLL_LLP_S_EN (1 << 28) /* src block chain */ 53 + 54 + /* Bitfields in CTL_HI */ 55 + #define IDMA64C_CTLH_BLOCK_TS(x) ((x) & ((1 << 17) - 1)) 56 + #define IDMA64C_CTLH_DONE (1 << 17) 57 + 58 + /* Bitfields in CFG_LO */ 59 + #define IDMA64C_CFGL_DST_BURST_ALIGN (1 << 0) /* dst burst align */ 60 + #define IDMA64C_CFGL_SRC_BURST_ALIGN (1 << 1) /* src burst align */ 61 + #define IDMA64C_CFGL_CH_SUSP (1 << 8) 62 + #define IDMA64C_CFGL_FIFO_EMPTY (1 << 9) 63 + #define IDMA64C_CFGL_CH_DRAIN (1 << 10) /* drain FIFO */ 64 + #define IDMA64C_CFGL_DST_OPT_BL (1 << 20) /* optimize dst burst length */ 65 + #define IDMA64C_CFGL_SRC_OPT_BL (1 << 21) /* optimize src burst length */ 66 + 67 + /* Bitfields in CFG_HI */ 68 + #define IDMA64C_CFGH_SRC_PER(x) ((x) << 0) /* src peripheral */ 69 + #define IDMA64C_CFGH_DST_PER(x) ((x) << 4) /* dst peripheral */ 70 + #define IDMA64C_CFGH_RD_ISSUE_THD(x) ((x) << 8) 71 + #define IDMA64C_CFGH_RW_ISSUE_THD(x) ((x) << 18) 72 + 73 + /* Interrupt registers */ 74 + 75 + #define IDMA64_INT_XFER 0x00 76 + #define IDMA64_INT_BLOCK 0x08 77 + #define IDMA64_INT_SRC_TRAN 0x10 78 + #define IDMA64_INT_DST_TRAN 0x18 79 + #define IDMA64_INT_ERROR 0x20 80 + 81 + #define IDMA64_RAW(x) (0x2c0 + IDMA64_INT_##x) /* r */ 82 + #define IDMA64_STATUS(x) (0x2e8 + IDMA64_INT_##x) /* r (raw & mask) */ 83 + #define IDMA64_MASK(x) (0x310 + IDMA64_INT_##x) /* rw (set = irq enabled) */ 84 + #define IDMA64_CLEAR(x) (0x338 + IDMA64_INT_##x) /* w (ack, affects "raw") */ 85 + 86 + /* Common registers */ 87 + 88 + #define IDMA64_STATUS_INT 0x360 /* r */ 89 + #define IDMA64_CFG 0x398 90 + #define IDMA64_CH_EN 0x3a0 91 + 92 + /* Bitfields in CFG */ 93 + #define IDMA64_CFG_DMA_EN (1 << 0) 94 + 95 + /* Hardware descriptor for Linked LIst transfers */ 96 + struct idma64_lli { 97 + u64 sar; 98 + u64 dar; 99 + u64 llp; 100 + u32 ctllo; 101 + u32 ctlhi; 102 + u32 sstat; 103 + u32 dstat; 104 + }; 105 + 106 + struct idma64_hw_desc { 107 + struct idma64_lli *lli; 108 + dma_addr_t llp; 109 + dma_addr_t phys; 110 + unsigned int len; 111 + }; 112 + 113 + struct idma64_desc { 114 + struct virt_dma_desc vdesc; 115 + enum dma_transfer_direction direction; 116 + struct idma64_hw_desc *hw; 117 + unsigned int ndesc; 118 + size_t length; 119 + enum dma_status status; 120 + }; 121 + 122 + static inline struct idma64_desc *to_idma64_desc(struct virt_dma_desc *vdesc) 123 + { 124 + return container_of(vdesc, struct idma64_desc, vdesc); 125 + } 126 + 127 + struct idma64_chan { 128 + struct virt_dma_chan vchan; 129 + 130 + void __iomem *regs; 131 + 132 + /* hardware configuration */ 133 + enum dma_transfer_direction direction; 134 + unsigned int mask; 135 + struct dma_slave_config config; 136 + 137 + void *pool; 138 + struct idma64_desc *desc; 139 + }; 140 + 141 + static inline struct idma64_chan *to_idma64_chan(struct dma_chan *chan) 142 + { 143 + return container_of(chan, struct idma64_chan, vchan.chan); 144 + } 145 + 146 + #define channel_set_bit(idma64, reg, mask) \ 147 + dma_writel(idma64, reg, ((mask) << 8) | (mask)) 148 + #define channel_clear_bit(idma64, reg, mask) \ 149 + dma_writel(idma64, reg, ((mask) << 8) | 0) 150 + 151 + static inline u32 idma64c_readl(struct idma64_chan *idma64c, int offset) 152 + { 153 + return readl(idma64c->regs + offset); 154 + } 155 + 156 + static inline void idma64c_writel(struct idma64_chan *idma64c, int offset, 157 + u32 value) 158 + { 159 + writel(value, idma64c->regs + offset); 160 + } 161 + 162 + #define channel_readl(idma64c, reg) \ 163 + idma64c_readl(idma64c, IDMA64_CH_##reg) 164 + #define channel_writel(idma64c, reg, value) \ 165 + idma64c_writel(idma64c, IDMA64_CH_##reg, (value)) 166 + 167 + static inline u64 idma64c_readq(struct idma64_chan *idma64c, int offset) 168 + { 169 + u64 l, h; 170 + 171 + l = idma64c_readl(idma64c, offset); 172 + h = idma64c_readl(idma64c, offset + 4); 173 + 174 + return l | (h << 32); 175 + } 176 + 177 + static inline void idma64c_writeq(struct idma64_chan *idma64c, int offset, 178 + u64 value) 179 + { 180 + idma64c_writel(idma64c, offset, value); 181 + idma64c_writel(idma64c, offset + 4, value >> 32); 182 + } 183 + 184 + #define channel_readq(idma64c, reg) \ 185 + idma64c_readq(idma64c, IDMA64_CH_##reg) 186 + #define channel_writeq(idma64c, reg, value) \ 187 + idma64c_writeq(idma64c, IDMA64_CH_##reg, (value)) 188 + 189 + struct idma64 { 190 + struct dma_device dma; 191 + 192 + void __iomem *regs; 193 + 194 + /* channels */ 195 + unsigned short all_chan_mask; 196 + struct idma64_chan *chan; 197 + }; 198 + 199 + static inline struct idma64 *to_idma64(struct dma_device *ddev) 200 + { 201 + return container_of(ddev, struct idma64, dma); 202 + } 203 + 204 + static inline u32 idma64_readl(struct idma64 *idma64, int offset) 205 + { 206 + return readl(idma64->regs + offset); 207 + } 208 + 209 + static inline void idma64_writel(struct idma64 *idma64, int offset, u32 value) 210 + { 211 + writel(value, idma64->regs + offset); 212 + } 213 + 214 + #define dma_readl(idma64, reg) \ 215 + idma64_readl(idma64, IDMA64_##reg) 216 + #define dma_writel(idma64, reg, value) \ 217 + idma64_writel(idma64, IDMA64_##reg, (value)) 218 + 219 + /** 220 + * struct idma64_chip - representation of DesignWare DMA controller hardware 221 + * @dev: struct device of the DMA controller 222 + * @irq: irq line 223 + * @regs: memory mapped I/O space 224 + * @idma64: struct idma64 that is filed by idma64_probe() 225 + */ 226 + struct idma64_chip { 227 + struct device *dev; 228 + int irq; 229 + void __iomem *regs; 230 + struct idma64 *idma64; 231 + }; 232 + 233 + #endif /* __DMA_IDMA64_H__ */
+23
drivers/mfd/Kconfig
··· 328 328 thermal, charger and related power management functions 329 329 on these systems. 330 330 331 + config MFD_INTEL_LPSS 332 + tristate 333 + select COMMON_CLK 334 + select MFD_CORE 335 + 336 + config MFD_INTEL_LPSS_ACPI 337 + tristate "Intel Low Power Subsystem support in ACPI mode" 338 + select MFD_INTEL_LPSS 339 + depends on X86 && ACPI 340 + help 341 + This driver supports Intel Low Power Subsystem (LPSS) devices such as 342 + I2C, SPI and HS-UART starting from Intel Sunrisepoint (Intel Skylake 343 + PCH) in ACPI mode. 344 + 345 + config MFD_INTEL_LPSS_PCI 346 + tristate "Intel Low Power Subsystem support in PCI mode" 347 + select MFD_INTEL_LPSS 348 + depends on X86 && PCI 349 + help 350 + This driver supports Intel Low Power Subsystem (LPSS) devices such as 351 + I2C, SPI and HS-UART starting from Intel Sunrisepoint (Intel Skylake 352 + PCH) in PCI mode. 353 + 331 354 config MFD_INTEL_MSIC 332 355 bool "Intel MSIC" 333 356 depends on INTEL_SCU_IPC
+3
drivers/mfd/Makefile
··· 161 161 obj-$(CONFIG_MFD_TPS65090) += tps65090.o 162 162 obj-$(CONFIG_MFD_AAT2870_CORE) += aat2870-core.o 163 163 obj-$(CONFIG_MFD_ATMEL_HLCDC) += atmel-hlcdc.o 164 + obj-$(CONFIG_MFD_INTEL_LPSS) += intel-lpss.o 165 + obj-$(CONFIG_MFD_INTEL_LPSS_PCI) += intel-lpss-pci.o 166 + obj-$(CONFIG_MFD_INTEL_LPSS_ACPI) += intel-lpss-acpi.o 164 167 obj-$(CONFIG_MFD_INTEL_MSIC) += intel_msic.o 165 168 obj-$(CONFIG_MFD_PALMAS) += palmas.o 166 169 obj-$(CONFIG_MFD_VIPERBOARD) += viperboard.o
+84
drivers/mfd/intel-lpss-acpi.c
··· 1 + /* 2 + * Intel LPSS ACPI support. 3 + * 4 + * Copyright (C) 2015, Intel Corporation 5 + * 6 + * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com> 7 + * Mika Westerberg <mika.westerberg@linux.intel.com> 8 + * 9 + * This program is free software; you can redistribute it and/or modify 10 + * it under the terms of the GNU General Public License version 2 as 11 + * published by the Free Software Foundation. 12 + */ 13 + 14 + #include <linux/acpi.h> 15 + #include <linux/ioport.h> 16 + #include <linux/kernel.h> 17 + #include <linux/module.h> 18 + #include <linux/pm.h> 19 + #include <linux/pm_runtime.h> 20 + #include <linux/platform_device.h> 21 + 22 + #include "intel-lpss.h" 23 + 24 + static const struct intel_lpss_platform_info spt_info = { 25 + .clk_rate = 120000000, 26 + }; 27 + 28 + static const struct acpi_device_id intel_lpss_acpi_ids[] = { 29 + /* SPT */ 30 + { "INT3446", (kernel_ulong_t)&spt_info }, 31 + { "INT3447", (kernel_ulong_t)&spt_info }, 32 + { } 33 + }; 34 + MODULE_DEVICE_TABLE(acpi, intel_lpss_acpi_ids); 35 + 36 + static int intel_lpss_acpi_probe(struct platform_device *pdev) 37 + { 38 + struct intel_lpss_platform_info *info; 39 + const struct acpi_device_id *id; 40 + 41 + id = acpi_match_device(intel_lpss_acpi_ids, &pdev->dev); 42 + if (!id) 43 + return -ENODEV; 44 + 45 + info = devm_kmemdup(&pdev->dev, (void *)id->driver_data, sizeof(*info), 46 + GFP_KERNEL); 47 + if (!info) 48 + return -ENOMEM; 49 + 50 + info->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 51 + info->irq = platform_get_irq(pdev, 0); 52 + 53 + pm_runtime_set_active(&pdev->dev); 54 + pm_runtime_enable(&pdev->dev); 55 + 56 + return intel_lpss_probe(&pdev->dev, info); 57 + } 58 + 59 + static int intel_lpss_acpi_remove(struct platform_device *pdev) 60 + { 61 + intel_lpss_remove(&pdev->dev); 62 + pm_runtime_disable(&pdev->dev); 63 + 64 + return 0; 65 + } 66 + 67 + static INTEL_LPSS_PM_OPS(intel_lpss_acpi_pm_ops); 68 + 69 + static struct platform_driver intel_lpss_acpi_driver = { 70 + .probe = intel_lpss_acpi_probe, 71 + .remove = intel_lpss_acpi_remove, 72 + .driver = { 73 + .name = "intel-lpss", 74 + .acpi_match_table = intel_lpss_acpi_ids, 75 + .pm = &intel_lpss_acpi_pm_ops, 76 + }, 77 + }; 78 + 79 + module_platform_driver(intel_lpss_acpi_driver); 80 + 81 + MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>"); 82 + MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>"); 83 + MODULE_DESCRIPTION("Intel LPSS ACPI driver"); 84 + MODULE_LICENSE("GPL v2");
+113
drivers/mfd/intel-lpss-pci.c
··· 1 + /* 2 + * Intel LPSS PCI support. 3 + * 4 + * Copyright (C) 2015, Intel Corporation 5 + * 6 + * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com> 7 + * Mika Westerberg <mika.westerberg@linux.intel.com> 8 + * 9 + * This program is free software; you can redistribute it and/or modify 10 + * it under the terms of the GNU General Public License version 2 as 11 + * published by the Free Software Foundation. 12 + */ 13 + 14 + #include <linux/ioport.h> 15 + #include <linux/kernel.h> 16 + #include <linux/module.h> 17 + #include <linux/pci.h> 18 + #include <linux/pm.h> 19 + #include <linux/pm_runtime.h> 20 + 21 + #include "intel-lpss.h" 22 + 23 + static int intel_lpss_pci_probe(struct pci_dev *pdev, 24 + const struct pci_device_id *id) 25 + { 26 + struct intel_lpss_platform_info *info; 27 + int ret; 28 + 29 + ret = pcim_enable_device(pdev); 30 + if (ret) 31 + return ret; 32 + 33 + info = devm_kmemdup(&pdev->dev, (void *)id->driver_data, sizeof(*info), 34 + GFP_KERNEL); 35 + if (!info) 36 + return -ENOMEM; 37 + 38 + info->mem = &pdev->resource[0]; 39 + info->irq = pdev->irq; 40 + 41 + /* Probably it is enough to set this for iDMA capable devices only */ 42 + pci_set_master(pdev); 43 + 44 + ret = intel_lpss_probe(&pdev->dev, info); 45 + if (ret) 46 + return ret; 47 + 48 + pm_runtime_put(&pdev->dev); 49 + pm_runtime_allow(&pdev->dev); 50 + 51 + return 0; 52 + } 53 + 54 + static void intel_lpss_pci_remove(struct pci_dev *pdev) 55 + { 56 + pm_runtime_forbid(&pdev->dev); 57 + pm_runtime_get_sync(&pdev->dev); 58 + 59 + intel_lpss_remove(&pdev->dev); 60 + } 61 + 62 + static INTEL_LPSS_PM_OPS(intel_lpss_pci_pm_ops); 63 + 64 + static const struct intel_lpss_platform_info spt_info = { 65 + .clk_rate = 120000000, 66 + }; 67 + 68 + static const struct intel_lpss_platform_info spt_uart_info = { 69 + .clk_rate = 120000000, 70 + .clk_con_id = "baudclk", 71 + }; 72 + 73 + static const struct pci_device_id intel_lpss_pci_ids[] = { 74 + /* SPT-LP */ 75 + { PCI_VDEVICE(INTEL, 0x9d27), (kernel_ulong_t)&spt_uart_info }, 76 + { PCI_VDEVICE(INTEL, 0x9d28), (kernel_ulong_t)&spt_uart_info }, 77 + { PCI_VDEVICE(INTEL, 0x9d29), (kernel_ulong_t)&spt_info }, 78 + { PCI_VDEVICE(INTEL, 0x9d2a), (kernel_ulong_t)&spt_info }, 79 + { PCI_VDEVICE(INTEL, 0x9d60), (kernel_ulong_t)&spt_info }, 80 + { PCI_VDEVICE(INTEL, 0x9d61), (kernel_ulong_t)&spt_info }, 81 + { PCI_VDEVICE(INTEL, 0x9d62), (kernel_ulong_t)&spt_info }, 82 + { PCI_VDEVICE(INTEL, 0x9d63), (kernel_ulong_t)&spt_info }, 83 + { PCI_VDEVICE(INTEL, 0x9d64), (kernel_ulong_t)&spt_info }, 84 + { PCI_VDEVICE(INTEL, 0x9d65), (kernel_ulong_t)&spt_info }, 85 + { PCI_VDEVICE(INTEL, 0x9d66), (kernel_ulong_t)&spt_uart_info }, 86 + /* SPT-H */ 87 + { PCI_VDEVICE(INTEL, 0xa127), (kernel_ulong_t)&spt_uart_info }, 88 + { PCI_VDEVICE(INTEL, 0xa128), (kernel_ulong_t)&spt_uart_info }, 89 + { PCI_VDEVICE(INTEL, 0xa129), (kernel_ulong_t)&spt_info }, 90 + { PCI_VDEVICE(INTEL, 0xa12a), (kernel_ulong_t)&spt_info }, 91 + { PCI_VDEVICE(INTEL, 0xa160), (kernel_ulong_t)&spt_info }, 92 + { PCI_VDEVICE(INTEL, 0xa161), (kernel_ulong_t)&spt_info }, 93 + { PCI_VDEVICE(INTEL, 0xa166), (kernel_ulong_t)&spt_uart_info }, 94 + { } 95 + }; 96 + MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids); 97 + 98 + static struct pci_driver intel_lpss_pci_driver = { 99 + .name = "intel-lpss", 100 + .id_table = intel_lpss_pci_ids, 101 + .probe = intel_lpss_pci_probe, 102 + .remove = intel_lpss_pci_remove, 103 + .driver = { 104 + .pm = &intel_lpss_pci_pm_ops, 105 + }, 106 + }; 107 + 108 + module_pci_driver(intel_lpss_pci_driver); 109 + 110 + MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>"); 111 + MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>"); 112 + MODULE_DESCRIPTION("Intel LPSS PCI driver"); 113 + MODULE_LICENSE("GPL v2");
+524
drivers/mfd/intel-lpss.c
··· 1 + /* 2 + * Intel Sunrisepoint LPSS core support. 3 + * 4 + * Copyright (C) 2015, Intel Corporation 5 + * 6 + * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com> 7 + * Mika Westerberg <mika.westerberg@linux.intel.com> 8 + * Heikki Krogerus <heikki.krogerus@linux.intel.com> 9 + * Jarkko Nikula <jarkko.nikula@linux.intel.com> 10 + * 11 + * This program is free software; you can redistribute it and/or modify 12 + * it under the terms of the GNU General Public License version 2 as 13 + * published by the Free Software Foundation. 14 + */ 15 + 16 + #include <linux/clk.h> 17 + #include <linux/clkdev.h> 18 + #include <linux/clk-provider.h> 19 + #include <linux/debugfs.h> 20 + #include <linux/idr.h> 21 + #include <linux/ioport.h> 22 + #include <linux/kernel.h> 23 + #include <linux/module.h> 24 + #include <linux/mfd/core.h> 25 + #include <linux/pm_qos.h> 26 + #include <linux/pm_runtime.h> 27 + #include <linux/seq_file.h> 28 + 29 + #include "intel-lpss.h" 30 + 31 + #define LPSS_DEV_OFFSET 0x000 32 + #define LPSS_DEV_SIZE 0x200 33 + #define LPSS_PRIV_OFFSET 0x200 34 + #define LPSS_PRIV_SIZE 0x100 35 + #define LPSS_IDMA64_OFFSET 0x800 36 + #define LPSS_IDMA64_SIZE 0x800 37 + 38 + /* Offsets from lpss->priv */ 39 + #define LPSS_PRIV_RESETS 0x04 40 + #define LPSS_PRIV_RESETS_FUNC BIT(2) 41 + #define LPSS_PRIV_RESETS_IDMA 0x3 42 + 43 + #define LPSS_PRIV_ACTIVELTR 0x10 44 + #define LPSS_PRIV_IDLELTR 0x14 45 + 46 + #define LPSS_PRIV_LTR_REQ BIT(15) 47 + #define LPSS_PRIV_LTR_SCALE_MASK 0xc00 48 + #define LPSS_PRIV_LTR_SCALE_1US 0x800 49 + #define LPSS_PRIV_LTR_SCALE_32US 0xc00 50 + #define LPSS_PRIV_LTR_VALUE_MASK 0x3ff 51 + 52 + #define LPSS_PRIV_SSP_REG 0x20 53 + #define LPSS_PRIV_SSP_REG_DIS_DMA_FIN BIT(0) 54 + 55 + #define LPSS_PRIV_REMAP_ADDR_LO 0x40 56 + #define LPSS_PRIV_REMAP_ADDR_HI 0x44 57 + 58 + #define LPSS_PRIV_CAPS 0xfc 59 + #define LPSS_PRIV_CAPS_NO_IDMA BIT(8) 60 + #define LPSS_PRIV_CAPS_TYPE_SHIFT 4 61 + #define LPSS_PRIV_CAPS_TYPE_MASK (0xf << LPSS_PRIV_CAPS_TYPE_SHIFT) 62 + 63 + /* This matches the type field in CAPS register */ 64 + enum intel_lpss_dev_type { 65 + LPSS_DEV_I2C = 0, 66 + LPSS_DEV_UART, 67 + LPSS_DEV_SPI, 68 + }; 69 + 70 + struct intel_lpss { 71 + const struct intel_lpss_platform_info *info; 72 + enum intel_lpss_dev_type type; 73 + struct clk *clk; 74 + struct clk_lookup *clock; 75 + const struct mfd_cell *cell; 76 + struct device *dev; 77 + void __iomem *priv; 78 + int devid; 79 + u32 caps; 80 + u32 active_ltr; 81 + u32 idle_ltr; 82 + struct dentry *debugfs; 83 + }; 84 + 85 + static const struct resource intel_lpss_dev_resources[] = { 86 + DEFINE_RES_MEM_NAMED(LPSS_DEV_OFFSET, LPSS_DEV_SIZE, "lpss_dev"), 87 + DEFINE_RES_MEM_NAMED(LPSS_PRIV_OFFSET, LPSS_PRIV_SIZE, "lpss_priv"), 88 + DEFINE_RES_IRQ(0), 89 + }; 90 + 91 + static const struct resource intel_lpss_idma64_resources[] = { 92 + DEFINE_RES_MEM(LPSS_IDMA64_OFFSET, LPSS_IDMA64_SIZE), 93 + DEFINE_RES_IRQ(0), 94 + }; 95 + 96 + #define LPSS_IDMA64_DRIVER_NAME "idma64" 97 + 98 + /* 99 + * Cells needs to be ordered so that the iDMA is created first. This is 100 + * because we need to be sure the DMA is available when the host controller 101 + * driver is probed. 102 + */ 103 + static const struct mfd_cell intel_lpss_idma64_cell = { 104 + .name = LPSS_IDMA64_DRIVER_NAME, 105 + .num_resources = ARRAY_SIZE(intel_lpss_idma64_resources), 106 + .resources = intel_lpss_idma64_resources, 107 + }; 108 + 109 + static const struct mfd_cell intel_lpss_i2c_cell = { 110 + .name = "i2c_designware", 111 + .num_resources = ARRAY_SIZE(intel_lpss_dev_resources), 112 + .resources = intel_lpss_dev_resources, 113 + }; 114 + 115 + static const struct mfd_cell intel_lpss_uart_cell = { 116 + .name = "dw-apb-uart", 117 + .num_resources = ARRAY_SIZE(intel_lpss_dev_resources), 118 + .resources = intel_lpss_dev_resources, 119 + }; 120 + 121 + static const struct mfd_cell intel_lpss_spi_cell = { 122 + .name = "pxa2xx-spi", 123 + .num_resources = ARRAY_SIZE(intel_lpss_dev_resources), 124 + .resources = intel_lpss_dev_resources, 125 + }; 126 + 127 + static DEFINE_IDA(intel_lpss_devid_ida); 128 + static struct dentry *intel_lpss_debugfs; 129 + 130 + static int intel_lpss_request_dma_module(const char *name) 131 + { 132 + static bool intel_lpss_dma_requested; 133 + 134 + if (intel_lpss_dma_requested) 135 + return 0; 136 + 137 + intel_lpss_dma_requested = true; 138 + return request_module("%s", name); 139 + } 140 + 141 + static void intel_lpss_cache_ltr(struct intel_lpss *lpss) 142 + { 143 + lpss->active_ltr = readl(lpss->priv + LPSS_PRIV_ACTIVELTR); 144 + lpss->idle_ltr = readl(lpss->priv + LPSS_PRIV_IDLELTR); 145 + } 146 + 147 + static int intel_lpss_debugfs_add(struct intel_lpss *lpss) 148 + { 149 + struct dentry *dir; 150 + 151 + dir = debugfs_create_dir(dev_name(lpss->dev), intel_lpss_debugfs); 152 + if (IS_ERR(dir)) 153 + return PTR_ERR(dir); 154 + 155 + /* Cache the values into lpss structure */ 156 + intel_lpss_cache_ltr(lpss); 157 + 158 + debugfs_create_x32("capabilities", S_IRUGO, dir, &lpss->caps); 159 + debugfs_create_x32("active_ltr", S_IRUGO, dir, &lpss->active_ltr); 160 + debugfs_create_x32("idle_ltr", S_IRUGO, dir, &lpss->idle_ltr); 161 + 162 + lpss->debugfs = dir; 163 + return 0; 164 + } 165 + 166 + static void intel_lpss_debugfs_remove(struct intel_lpss *lpss) 167 + { 168 + debugfs_remove_recursive(lpss->debugfs); 169 + } 170 + 171 + static void intel_lpss_ltr_set(struct device *dev, s32 val) 172 + { 173 + struct intel_lpss *lpss = dev_get_drvdata(dev); 174 + u32 ltr; 175 + 176 + /* 177 + * Program latency tolerance (LTR) accordingly what has been asked 178 + * by the PM QoS layer or disable it in case we were passed 179 + * negative value or PM_QOS_LATENCY_ANY. 180 + */ 181 + ltr = readl(lpss->priv + LPSS_PRIV_ACTIVELTR); 182 + 183 + if (val == PM_QOS_LATENCY_ANY || val < 0) { 184 + ltr &= ~LPSS_PRIV_LTR_REQ; 185 + } else { 186 + ltr |= LPSS_PRIV_LTR_REQ; 187 + ltr &= ~LPSS_PRIV_LTR_SCALE_MASK; 188 + ltr &= ~LPSS_PRIV_LTR_VALUE_MASK; 189 + 190 + if (val > LPSS_PRIV_LTR_VALUE_MASK) 191 + ltr |= LPSS_PRIV_LTR_SCALE_32US | val >> 5; 192 + else 193 + ltr |= LPSS_PRIV_LTR_SCALE_1US | val; 194 + } 195 + 196 + if (ltr == lpss->active_ltr) 197 + return; 198 + 199 + writel(ltr, lpss->priv + LPSS_PRIV_ACTIVELTR); 200 + writel(ltr, lpss->priv + LPSS_PRIV_IDLELTR); 201 + 202 + /* Cache the values into lpss structure */ 203 + intel_lpss_cache_ltr(lpss); 204 + } 205 + 206 + static void intel_lpss_ltr_expose(struct intel_lpss *lpss) 207 + { 208 + lpss->dev->power.set_latency_tolerance = intel_lpss_ltr_set; 209 + dev_pm_qos_expose_latency_tolerance(lpss->dev); 210 + } 211 + 212 + static void intel_lpss_ltr_hide(struct intel_lpss *lpss) 213 + { 214 + dev_pm_qos_hide_latency_tolerance(lpss->dev); 215 + lpss->dev->power.set_latency_tolerance = NULL; 216 + } 217 + 218 + static int intel_lpss_assign_devs(struct intel_lpss *lpss) 219 + { 220 + unsigned int type; 221 + 222 + type = lpss->caps & LPSS_PRIV_CAPS_TYPE_MASK; 223 + type >>= LPSS_PRIV_CAPS_TYPE_SHIFT; 224 + 225 + switch (type) { 226 + case LPSS_DEV_I2C: 227 + lpss->cell = &intel_lpss_i2c_cell; 228 + break; 229 + case LPSS_DEV_UART: 230 + lpss->cell = &intel_lpss_uart_cell; 231 + break; 232 + case LPSS_DEV_SPI: 233 + lpss->cell = &intel_lpss_spi_cell; 234 + break; 235 + default: 236 + return -ENODEV; 237 + } 238 + 239 + lpss->type = type; 240 + 241 + return 0; 242 + } 243 + 244 + static bool intel_lpss_has_idma(const struct intel_lpss *lpss) 245 + { 246 + return (lpss->caps & LPSS_PRIV_CAPS_NO_IDMA) == 0; 247 + } 248 + 249 + static void intel_lpss_set_remap_addr(const struct intel_lpss *lpss) 250 + { 251 + resource_size_t addr = lpss->info->mem->start; 252 + 253 + writel(addr, lpss->priv + LPSS_PRIV_REMAP_ADDR_LO); 254 + #if BITS_PER_LONG > 32 255 + writel(addr >> 32, lpss->priv + LPSS_PRIV_REMAP_ADDR_HI); 256 + #else 257 + writel(0, lpss->priv + LPSS_PRIV_REMAP_ADDR_HI); 258 + #endif 259 + } 260 + 261 + static void intel_lpss_deassert_reset(const struct intel_lpss *lpss) 262 + { 263 + u32 value = LPSS_PRIV_RESETS_FUNC | LPSS_PRIV_RESETS_IDMA; 264 + 265 + /* Bring out the device from reset */ 266 + writel(value, lpss->priv + LPSS_PRIV_RESETS); 267 + } 268 + 269 + static void intel_lpss_init_dev(const struct intel_lpss *lpss) 270 + { 271 + u32 value = LPSS_PRIV_SSP_REG_DIS_DMA_FIN; 272 + 273 + intel_lpss_deassert_reset(lpss); 274 + 275 + if (!intel_lpss_has_idma(lpss)) 276 + return; 277 + 278 + intel_lpss_set_remap_addr(lpss); 279 + 280 + /* Make sure that SPI multiblock DMA transfers are re-enabled */ 281 + if (lpss->type == LPSS_DEV_SPI) 282 + writel(value, lpss->priv + LPSS_PRIV_SSP_REG); 283 + } 284 + 285 + static void intel_lpss_unregister_clock_tree(struct clk *clk) 286 + { 287 + struct clk *parent; 288 + 289 + while (clk) { 290 + parent = clk_get_parent(clk); 291 + clk_unregister(clk); 292 + clk = parent; 293 + } 294 + } 295 + 296 + static int intel_lpss_register_clock_divider(struct intel_lpss *lpss, 297 + const char *devname, 298 + struct clk **clk) 299 + { 300 + char name[32]; 301 + struct clk *tmp = *clk; 302 + 303 + snprintf(name, sizeof(name), "%s-enable", devname); 304 + tmp = clk_register_gate(NULL, name, __clk_get_name(tmp), 0, 305 + lpss->priv, 0, 0, NULL); 306 + if (IS_ERR(tmp)) 307 + return PTR_ERR(tmp); 308 + 309 + snprintf(name, sizeof(name), "%s-div", devname); 310 + tmp = clk_register_fractional_divider(NULL, name, __clk_get_name(tmp), 311 + 0, lpss->priv, 1, 15, 16, 15, 0, 312 + NULL); 313 + if (IS_ERR(tmp)) 314 + return PTR_ERR(tmp); 315 + *clk = tmp; 316 + 317 + snprintf(name, sizeof(name), "%s-update", devname); 318 + tmp = clk_register_gate(NULL, name, __clk_get_name(tmp), 319 + CLK_SET_RATE_PARENT, lpss->priv, 31, 0, NULL); 320 + if (IS_ERR(tmp)) 321 + return PTR_ERR(tmp); 322 + *clk = tmp; 323 + 324 + return 0; 325 + } 326 + 327 + static int intel_lpss_register_clock(struct intel_lpss *lpss) 328 + { 329 + const struct mfd_cell *cell = lpss->cell; 330 + struct clk *clk; 331 + char devname[24]; 332 + int ret; 333 + 334 + if (!lpss->info->clk_rate) 335 + return 0; 336 + 337 + /* Root clock */ 338 + clk = clk_register_fixed_rate(NULL, dev_name(lpss->dev), NULL, 339 + CLK_IS_ROOT, lpss->info->clk_rate); 340 + if (IS_ERR(clk)) 341 + return PTR_ERR(clk); 342 + 343 + snprintf(devname, sizeof(devname), "%s.%d", cell->name, lpss->devid); 344 + 345 + /* 346 + * Support for clock divider only if it has some preset value. 347 + * Otherwise we assume that the divider is not used. 348 + */ 349 + if (lpss->type != LPSS_DEV_I2C) { 350 + ret = intel_lpss_register_clock_divider(lpss, devname, &clk); 351 + if (ret) 352 + goto err_clk_register; 353 + } 354 + 355 + ret = -ENOMEM; 356 + 357 + /* Clock for the host controller */ 358 + lpss->clock = clkdev_create(clk, lpss->info->clk_con_id, "%s", devname); 359 + if (!lpss->clock) 360 + goto err_clk_register; 361 + 362 + lpss->clk = clk; 363 + 364 + return 0; 365 + 366 + err_clk_register: 367 + intel_lpss_unregister_clock_tree(clk); 368 + 369 + return ret; 370 + } 371 + 372 + static void intel_lpss_unregister_clock(struct intel_lpss *lpss) 373 + { 374 + if (IS_ERR_OR_NULL(lpss->clk)) 375 + return; 376 + 377 + clkdev_drop(lpss->clock); 378 + intel_lpss_unregister_clock_tree(lpss->clk); 379 + } 380 + 381 + int intel_lpss_probe(struct device *dev, 382 + const struct intel_lpss_platform_info *info) 383 + { 384 + struct intel_lpss *lpss; 385 + int ret; 386 + 387 + if (!info || !info->mem || info->irq <= 0) 388 + return -EINVAL; 389 + 390 + lpss = devm_kzalloc(dev, sizeof(*lpss), GFP_KERNEL); 391 + if (!lpss) 392 + return -ENOMEM; 393 + 394 + lpss->priv = devm_ioremap(dev, info->mem->start + LPSS_PRIV_OFFSET, 395 + LPSS_PRIV_SIZE); 396 + if (!lpss->priv) 397 + return -ENOMEM; 398 + 399 + lpss->info = info; 400 + lpss->dev = dev; 401 + lpss->caps = readl(lpss->priv + LPSS_PRIV_CAPS); 402 + 403 + dev_set_drvdata(dev, lpss); 404 + 405 + ret = intel_lpss_assign_devs(lpss); 406 + if (ret) 407 + return ret; 408 + 409 + intel_lpss_init_dev(lpss); 410 + 411 + lpss->devid = ida_simple_get(&intel_lpss_devid_ida, 0, 0, GFP_KERNEL); 412 + if (lpss->devid < 0) 413 + return lpss->devid; 414 + 415 + ret = intel_lpss_register_clock(lpss); 416 + if (ret) 417 + goto err_clk_register; 418 + 419 + intel_lpss_ltr_expose(lpss); 420 + 421 + ret = intel_lpss_debugfs_add(lpss); 422 + if (ret) 423 + dev_warn(dev, "Failed to create debugfs entries\n"); 424 + 425 + if (intel_lpss_has_idma(lpss)) { 426 + /* 427 + * Ensure the DMA driver is loaded before the host 428 + * controller device appears, so that the host controller 429 + * driver can request its DMA channels as early as 430 + * possible. 431 + * 432 + * If the DMA module is not there that's OK as well. 433 + */ 434 + intel_lpss_request_dma_module(LPSS_IDMA64_DRIVER_NAME); 435 + 436 + ret = mfd_add_devices(dev, lpss->devid, &intel_lpss_idma64_cell, 437 + 1, info->mem, info->irq, NULL); 438 + if (ret) 439 + dev_warn(dev, "Failed to add %s, fallback to PIO\n", 440 + LPSS_IDMA64_DRIVER_NAME); 441 + } 442 + 443 + ret = mfd_add_devices(dev, lpss->devid, lpss->cell, 444 + 1, info->mem, info->irq, NULL); 445 + if (ret) 446 + goto err_remove_ltr; 447 + 448 + return 0; 449 + 450 + err_remove_ltr: 451 + intel_lpss_debugfs_remove(lpss); 452 + intel_lpss_ltr_hide(lpss); 453 + 454 + err_clk_register: 455 + ida_simple_remove(&intel_lpss_devid_ida, lpss->devid); 456 + 457 + return ret; 458 + } 459 + EXPORT_SYMBOL_GPL(intel_lpss_probe); 460 + 461 + void intel_lpss_remove(struct device *dev) 462 + { 463 + struct intel_lpss *lpss = dev_get_drvdata(dev); 464 + 465 + mfd_remove_devices(dev); 466 + intel_lpss_debugfs_remove(lpss); 467 + intel_lpss_ltr_hide(lpss); 468 + intel_lpss_unregister_clock(lpss); 469 + ida_simple_remove(&intel_lpss_devid_ida, lpss->devid); 470 + } 471 + EXPORT_SYMBOL_GPL(intel_lpss_remove); 472 + 473 + static int resume_lpss_device(struct device *dev, void *data) 474 + { 475 + pm_runtime_resume(dev); 476 + return 0; 477 + } 478 + 479 + int intel_lpss_prepare(struct device *dev) 480 + { 481 + /* 482 + * Resume both child devices before entering system sleep. This 483 + * ensures that they are in proper state before they get suspended. 484 + */ 485 + device_for_each_child_reverse(dev, NULL, resume_lpss_device); 486 + return 0; 487 + } 488 + EXPORT_SYMBOL_GPL(intel_lpss_prepare); 489 + 490 + int intel_lpss_suspend(struct device *dev) 491 + { 492 + return 0; 493 + } 494 + EXPORT_SYMBOL_GPL(intel_lpss_suspend); 495 + 496 + int intel_lpss_resume(struct device *dev) 497 + { 498 + struct intel_lpss *lpss = dev_get_drvdata(dev); 499 + 500 + intel_lpss_init_dev(lpss); 501 + 502 + return 0; 503 + } 504 + EXPORT_SYMBOL_GPL(intel_lpss_resume); 505 + 506 + static int __init intel_lpss_init(void) 507 + { 508 + intel_lpss_debugfs = debugfs_create_dir("intel_lpss", NULL); 509 + return 0; 510 + } 511 + module_init(intel_lpss_init); 512 + 513 + static void __exit intel_lpss_exit(void) 514 + { 515 + debugfs_remove(intel_lpss_debugfs); 516 + } 517 + module_exit(intel_lpss_exit); 518 + 519 + MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>"); 520 + MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>"); 521 + MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>"); 522 + MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@linux.intel.com>"); 523 + MODULE_DESCRIPTION("Intel LPSS core driver"); 524 + MODULE_LICENSE("GPL v2");
+62
drivers/mfd/intel-lpss.h
··· 1 + /* 2 + * Intel LPSS core support. 3 + * 4 + * Copyright (C) 2015, Intel Corporation 5 + * 6 + * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com> 7 + * Mika Westerberg <mika.westerberg@linux.intel.com> 8 + * 9 + * This program is free software; you can redistribute it and/or modify 10 + * it under the terms of the GNU General Public License version 2 as 11 + * published by the Free Software Foundation. 12 + */ 13 + 14 + #ifndef __MFD_INTEL_LPSS_H 15 + #define __MFD_INTEL_LPSS_H 16 + 17 + struct device; 18 + struct resource; 19 + 20 + struct intel_lpss_platform_info { 21 + struct resource *mem; 22 + int irq; 23 + unsigned long clk_rate; 24 + const char *clk_con_id; 25 + }; 26 + 27 + int intel_lpss_probe(struct device *dev, 28 + const struct intel_lpss_platform_info *info); 29 + void intel_lpss_remove(struct device *dev); 30 + 31 + #ifdef CONFIG_PM 32 + int intel_lpss_prepare(struct device *dev); 33 + int intel_lpss_suspend(struct device *dev); 34 + int intel_lpss_resume(struct device *dev); 35 + 36 + #ifdef CONFIG_PM_SLEEP 37 + #define INTEL_LPSS_SLEEP_PM_OPS \ 38 + .prepare = intel_lpss_prepare, \ 39 + .suspend = intel_lpss_suspend, \ 40 + .resume = intel_lpss_resume, \ 41 + .freeze = intel_lpss_suspend, \ 42 + .thaw = intel_lpss_resume, \ 43 + .poweroff = intel_lpss_suspend, \ 44 + .restore = intel_lpss_resume, 45 + #endif 46 + 47 + #define INTEL_LPSS_RUNTIME_PM_OPS \ 48 + .runtime_suspend = intel_lpss_suspend, \ 49 + .runtime_resume = intel_lpss_resume, 50 + 51 + #else /* !CONFIG_PM */ 52 + #define INTEL_LPSS_SLEEP_PM_OPS 53 + #define INTEL_LPSS_RUNTIME_PM_OPS 54 + #endif /* CONFIG_PM */ 55 + 56 + #define INTEL_LPSS_PM_OPS(name) \ 57 + const struct dev_pm_ops name = { \ 58 + INTEL_LPSS_SLEEP_PM_OPS \ 59 + INTEL_LPSS_RUNTIME_PM_OPS \ 60 + } 61 + 62 + #endif /* __MFD_INTEL_LPSS_H */
+1 -1
drivers/mfd/mfd-core.c
··· 302 302 { 303 303 atomic_t *cnts = NULL; 304 304 305 - device_for_each_child(parent, &cnts, mfd_remove_devices_fn); 305 + device_for_each_child_reverse(parent, &cnts, mfd_remove_devices_fn); 306 306 kfree(cnts); 307 307 } 308 308 EXPORT_SYMBOL(mfd_remove_devices);
+2
include/linux/device.h
··· 959 959 extern void device_del(struct device *dev); 960 960 extern int device_for_each_child(struct device *dev, void *data, 961 961 int (*fn)(struct device *dev, void *data)); 962 + extern int device_for_each_child_reverse(struct device *dev, void *data, 963 + int (*fn)(struct device *dev, void *data)); 962 964 extern struct device *device_find_child(struct device *dev, void *data, 963 965 int (*match)(struct device *dev, void *data)); 964 966 extern int device_rename(struct device *dev, const char *new_name);
+1
include/linux/klist.h
··· 63 63 extern void klist_iter_init_node(struct klist *k, struct klist_iter *i, 64 64 struct klist_node *n); 65 65 extern void klist_iter_exit(struct klist_iter *i); 66 + extern struct klist_node *klist_prev(struct klist_iter *i); 66 67 extern struct klist_node *klist_next(struct klist_iter *i); 67 68 68 69 #endif
+5
include/linux/pm_qos.h
··· 161 161 int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set); 162 162 s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev); 163 163 int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val); 164 + int dev_pm_qos_expose_latency_tolerance(struct device *dev); 165 + void dev_pm_qos_hide_latency_tolerance(struct device *dev); 164 166 165 167 static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) 166 168 { ··· 231 229 { return PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; } 232 230 static inline int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val) 233 231 { return 0; } 232 + static inline int dev_pm_qos_expose_latency_tolerance(struct device *dev) 233 + { return 0; } 234 + static inline void dev_pm_qos_hide_latency_tolerance(struct device *dev) {} 234 235 235 236 static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return 0; } 236 237 static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; }
+41
lib/klist.c
··· 324 324 } 325 325 326 326 /** 327 + * klist_prev - Ante up prev node in list. 328 + * @i: Iterator structure. 329 + * 330 + * First grab list lock. Decrement the reference count of the previous 331 + * node, if there was one. Grab the prev node, increment its reference 332 + * count, drop the lock, and return that prev node. 333 + */ 334 + struct klist_node *klist_prev(struct klist_iter *i) 335 + { 336 + void (*put)(struct klist_node *) = i->i_klist->put; 337 + struct klist_node *last = i->i_cur; 338 + struct klist_node *prev; 339 + 340 + spin_lock(&i->i_klist->k_lock); 341 + 342 + if (last) { 343 + prev = to_klist_node(last->n_node.prev); 344 + if (!klist_dec_and_del(last)) 345 + put = NULL; 346 + } else 347 + prev = to_klist_node(i->i_klist->k_list.prev); 348 + 349 + i->i_cur = NULL; 350 + while (prev != to_klist_node(&i->i_klist->k_list)) { 351 + if (likely(!knode_dead(prev))) { 352 + kref_get(&prev->n_ref); 353 + i->i_cur = prev; 354 + break; 355 + } 356 + prev = to_klist_node(prev->n_node.prev); 357 + } 358 + 359 + spin_unlock(&i->i_klist->k_lock); 360 + 361 + if (put && last) 362 + put(last); 363 + return i->i_cur; 364 + } 365 + EXPORT_SYMBOL_GPL(klist_prev); 366 + 367 + /** 327 368 * klist_next - Ante up next node in list. 328 369 * @i: Iterator structure. 329 370 *