Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'driver-core-5.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core

Pull driver core updates from Greg KH:
"Here is the big driver core updates for 5.11-rc1

This time there was a lot of different work happening here for some
reason:

- redo of the fwnode link logic, speeding it up greatly

- auxiliary bus added (this was a tag that will be pulled in from
other trees/maintainers this merge window as well, as driver
subsystems started to rely on it)

- platform driver core cleanups on the way to fixing some long-time
api updates in future releases

- minor fixes and tweaks.

All have been in linux-next with no (finally) reported issues. Testing
there did helped in shaking issues out a lot :)"

* tag 'driver-core-5.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core: (39 commits)
driver core: platform: don't oops in platform_shutdown() on unbound devices
ACPI: Use fwnode_init() to set up fwnode
misc: pvpanic: Replace OF headers by mod_devicetable.h
misc: pvpanic: Combine ACPI and platform drivers
usb: host: sl811: Switch to use platform_get_mem_or_io()
vfio: platform: Switch to use platform_get_mem_or_io()
driver core: platform: Introduce platform_get_mem_or_io()
dyndbg: fix use before null check
soc: fix comment for freeing soc_dev_attr
driver core: platform: use bus_type functions
driver core: platform: change logic implementing platform_driver_probe
driver core: platform: reorder functions
driver core: make driver_probe_device() static
driver core: Fix a couple of typos
driver core: Reorder devices on successful probe
driver core: Delete pointless parameter in fwnode_operations.add_links
driver core: Refactor fw_devlink feature
efi: Update implementation of add_links() to create fwnode links
of: property: Update implementation of add_links() to create fwnode links
driver core: Use device's fwnode to check if it is waiting for suppliers
...

+853 -796
+1 -1
drivers/acpi/property.c
··· 76 76 return false; 77 77 78 78 dn->name = link->package.elements[0].string.pointer; 79 - dn->fwnode.ops = &acpi_data_fwnode_ops; 79 + fwnode_init(&dn->fwnode, &acpi_data_fwnode_ops); 80 80 dn->parent = parent; 81 81 INIT_LIST_HEAD(&dn->data.properties); 82 82 INIT_LIST_HEAD(&dn->data.subnodes);
+1 -1
drivers/acpi/scan.c
··· 1589 1589 device->device_type = type; 1590 1590 device->handle = handle; 1591 1591 device->parent = acpi_bus_get_parent(handle); 1592 - device->fwnode.ops = &acpi_device_fwnode_ops; 1592 + fwnode_init(&device->fwnode, &acpi_device_fwnode_ops); 1593 1593 acpi_set_device_status(device, sta); 1594 1594 acpi_device_get_busid(device); 1595 1595 acpi_set_pnp_ids(handle, &device->pnp, type);
+8 -3
drivers/base/auxiliary.c
··· 92 92 93 93 static void auxiliary_bus_shutdown(struct device *dev) 94 94 { 95 - struct auxiliary_driver *auxdrv = to_auxiliary_drv(dev->driver); 96 - struct auxiliary_device *auxdev = to_auxiliary_dev(dev); 95 + struct auxiliary_driver *auxdrv = NULL; 96 + struct auxiliary_device *auxdev; 97 97 98 - if (auxdrv->shutdown) 98 + if (dev->driver) { 99 + auxdrv = to_auxiliary_drv(dev->driver); 100 + auxdev = to_auxiliary_dev(dev); 101 + } 102 + 103 + if (auxdrv && auxdrv->shutdown) 99 104 auxdrv->shutdown(auxdev); 100 105 } 101 106
-1
drivers/base/base.h
··· 133 133 struct device *parent); 134 134 135 135 extern void driver_detach(struct device_driver *drv); 136 - extern int driver_probe_device(struct device_driver *drv, struct device *dev); 137 136 extern void driver_deferred_probe_del(struct device *dev); 138 137 extern void device_set_deferred_probe_reason(const struct device *dev, 139 138 struct va_format *vaf);
+1 -1
drivers/base/class.c
··· 210 210 } 211 211 212 212 /** 213 - * class_create - create a struct class structure 213 + * __class_create - create a struct class structure 214 214 * @owner: pointer to the module that is to "own" this struct class 215 215 * @name: pointer to a string for the name of this class. 216 216 * @key: the lock_class_key for this class; used by mutex lock debugging
+383 -228
drivers/base/core.c
··· 46 46 #endif 47 47 48 48 /* Device links support. */ 49 - static LIST_HEAD(wait_for_suppliers); 50 - static DEFINE_MUTEX(wfs_lock); 51 49 static LIST_HEAD(deferred_sync); 52 50 static unsigned int defer_sync_state_count = 1; 53 - static unsigned int defer_fw_devlink_count; 54 - static LIST_HEAD(deferred_fw_devlink); 55 - static DEFINE_MUTEX(defer_fw_devlink_lock); 51 + static DEFINE_MUTEX(fwnode_link_lock); 56 52 static bool fw_devlink_is_permissive(void); 53 + 54 + /** 55 + * fwnode_link_add - Create a link between two fwnode_handles. 56 + * @con: Consumer end of the link. 57 + * @sup: Supplier end of the link. 58 + * 59 + * Create a fwnode link between fwnode handles @con and @sup. The fwnode link 60 + * represents the detail that the firmware lists @sup fwnode as supplying a 61 + * resource to @con. 62 + * 63 + * The driver core will use the fwnode link to create a device link between the 64 + * two device objects corresponding to @con and @sup when they are created. The 65 + * driver core will automatically delete the fwnode link between @con and @sup 66 + * after doing that. 67 + * 68 + * Attempts to create duplicate links between the same pair of fwnode handles 69 + * are ignored and there is no reference counting. 70 + */ 71 + int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup) 72 + { 73 + struct fwnode_link *link; 74 + int ret = 0; 75 + 76 + mutex_lock(&fwnode_link_lock); 77 + 78 + list_for_each_entry(link, &sup->consumers, s_hook) 79 + if (link->consumer == con) 80 + goto out; 81 + 82 + link = kzalloc(sizeof(*link), GFP_KERNEL); 83 + if (!link) { 84 + ret = -ENOMEM; 85 + goto out; 86 + } 87 + 88 + link->supplier = sup; 89 + INIT_LIST_HEAD(&link->s_hook); 90 + link->consumer = con; 91 + INIT_LIST_HEAD(&link->c_hook); 92 + 93 + list_add(&link->s_hook, &sup->consumers); 94 + list_add(&link->c_hook, &con->suppliers); 95 + out: 96 + mutex_unlock(&fwnode_link_lock); 97 + 98 + return ret; 99 + } 100 + 101 + /** 102 + * fwnode_links_purge_suppliers - Delete all supplier links of fwnode_handle. 103 + * @fwnode: fwnode whose supplier links need to be deleted 104 + * 105 + * Deletes all supplier links connecting directly to @fwnode. 106 + */ 107 + static void fwnode_links_purge_suppliers(struct fwnode_handle *fwnode) 108 + { 109 + struct fwnode_link *link, *tmp; 110 + 111 + mutex_lock(&fwnode_link_lock); 112 + list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) { 113 + list_del(&link->s_hook); 114 + list_del(&link->c_hook); 115 + kfree(link); 116 + } 117 + mutex_unlock(&fwnode_link_lock); 118 + } 119 + 120 + /** 121 + * fwnode_links_purge_consumers - Delete all consumer links of fwnode_handle. 122 + * @fwnode: fwnode whose consumer links need to be deleted 123 + * 124 + * Deletes all consumer links connecting directly to @fwnode. 125 + */ 126 + static void fwnode_links_purge_consumers(struct fwnode_handle *fwnode) 127 + { 128 + struct fwnode_link *link, *tmp; 129 + 130 + mutex_lock(&fwnode_link_lock); 131 + list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) { 132 + list_del(&link->s_hook); 133 + list_del(&link->c_hook); 134 + kfree(link); 135 + } 136 + mutex_unlock(&fwnode_link_lock); 137 + } 138 + 139 + /** 140 + * fwnode_links_purge - Delete all links connected to a fwnode_handle. 141 + * @fwnode: fwnode whose links needs to be deleted 142 + * 143 + * Deletes all links connecting directly to a fwnode. 144 + */ 145 + void fwnode_links_purge(struct fwnode_handle *fwnode) 146 + { 147 + fwnode_links_purge_suppliers(fwnode); 148 + fwnode_links_purge_consumers(fwnode); 149 + } 57 150 58 151 #ifdef CONFIG_SRCU 59 152 static DEFINE_MUTEX(device_links_lock); ··· 561 468 * with runtime PM. First, setting the DL_FLAG_PM_RUNTIME flag will cause the 562 469 * runtime PM framework to take the link into account. Second, if the 563 470 * DL_FLAG_RPM_ACTIVE flag is set in addition to it, the supplier devices will 564 - * be forced into the active metastate and reference-counted upon the creation 471 + * be forced into the active meta state and reference-counted upon the creation 565 472 * of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be 566 473 * ignored. 567 474 * ··· 584 491 * Also, if DL_FLAG_STATELESS, DL_FLAG_AUTOREMOVE_CONSUMER and 585 492 * DL_FLAG_AUTOREMOVE_SUPPLIER are not set in @flags (that is, a persistent 586 493 * managed device link is being added), the DL_FLAG_AUTOPROBE_CONSUMER flag can 587 - * be used to request the driver core to automaticall probe for a consmer 494 + * be used to request the driver core to automatically probe for a consumer 588 495 * driver after successfully binding a driver to the supplier device. 589 496 * 590 497 * The combination of DL_FLAG_STATELESS and one of DL_FLAG_AUTOREMOVE_CONSUMER, ··· 644 551 if (!device_pm_initialized(supplier) 645 552 || (!(flags & DL_FLAG_SYNC_STATE_ONLY) && 646 553 device_is_dependent(consumer, supplier))) { 554 + link = NULL; 555 + goto out; 556 + } 557 + 558 + /* 559 + * SYNC_STATE_ONLY links are useless once a consumer device has probed. 560 + * So, only create it if the consumer hasn't probed yet. 561 + */ 562 + if (flags & DL_FLAG_SYNC_STATE_ONLY && 563 + consumer->links.status != DL_DEV_NO_DRIVER && 564 + consumer->links.status != DL_DEV_PROBING) { 647 565 link = NULL; 648 566 goto out; 649 567 } ··· 801 697 } 802 698 EXPORT_SYMBOL_GPL(device_link_add); 803 699 804 - /** 805 - * device_link_wait_for_supplier - Add device to wait_for_suppliers list 806 - * @consumer: Consumer device 807 - * 808 - * Marks the @consumer device as waiting for suppliers to become available by 809 - * adding it to the wait_for_suppliers list. The consumer device will never be 810 - * probed until it's removed from the wait_for_suppliers list. 811 - * 812 - * The caller is responsible for adding the links to the supplier devices once 813 - * they are available and removing the @consumer device from the 814 - * wait_for_suppliers list once links to all the suppliers have been created. 815 - * 816 - * This function is NOT meant to be called from the probe function of the 817 - * consumer but rather from code that creates/adds the consumer device. 818 - */ 819 - static void device_link_wait_for_supplier(struct device *consumer, 820 - bool need_for_probe) 821 - { 822 - mutex_lock(&wfs_lock); 823 - list_add_tail(&consumer->links.needs_suppliers, &wait_for_suppliers); 824 - consumer->links.need_for_probe = need_for_probe; 825 - mutex_unlock(&wfs_lock); 826 - } 827 - 828 - static void device_link_wait_for_mandatory_supplier(struct device *consumer) 829 - { 830 - device_link_wait_for_supplier(consumer, true); 831 - } 832 - 833 - static void device_link_wait_for_optional_supplier(struct device *consumer) 834 - { 835 - device_link_wait_for_supplier(consumer, false); 836 - } 837 - 838 - /** 839 - * device_link_add_missing_supplier_links - Add links from consumer devices to 840 - * supplier devices, leaving any 841 - * consumer with inactive suppliers on 842 - * the wait_for_suppliers list 843 - * 844 - * Loops through all consumers waiting on suppliers and tries to add all their 845 - * supplier links. If that succeeds, the consumer device is removed from 846 - * wait_for_suppliers list. Otherwise, they are left in the wait_for_suppliers 847 - * list. Devices left on the wait_for_suppliers list will not be probed. 848 - * 849 - * The fwnode add_links callback is expected to return 0 if it has found and 850 - * added all the supplier links for the consumer device. It should return an 851 - * error if it isn't able to do so. 852 - * 853 - * The caller of device_link_wait_for_supplier() is expected to call this once 854 - * it's aware of potential suppliers becoming available. 855 - */ 856 - static void device_link_add_missing_supplier_links(void) 857 - { 858 - struct device *dev, *tmp; 859 - 860 - mutex_lock(&wfs_lock); 861 - list_for_each_entry_safe(dev, tmp, &wait_for_suppliers, 862 - links.needs_suppliers) { 863 - int ret = fwnode_call_int_op(dev->fwnode, add_links, dev); 864 - if (!ret) 865 - list_del_init(&dev->links.needs_suppliers); 866 - else if (ret != -ENODEV || fw_devlink_is_permissive()) 867 - dev->links.need_for_probe = false; 868 - } 869 - mutex_unlock(&wfs_lock); 870 - } 871 - 872 700 #ifdef CONFIG_SRCU 873 701 static void __device_link_del(struct kref *kref) 874 702 { ··· 926 890 * Device waiting for supplier to become available is not allowed to 927 891 * probe. 928 892 */ 929 - mutex_lock(&wfs_lock); 930 - if (!list_empty(&dev->links.needs_suppliers) && 931 - dev->links.need_for_probe) { 932 - mutex_unlock(&wfs_lock); 893 + mutex_lock(&fwnode_link_lock); 894 + if (dev->fwnode && !list_empty(&dev->fwnode->suppliers) && 895 + !fw_devlink_is_permissive()) { 896 + mutex_unlock(&fwnode_link_lock); 933 897 return -EPROBE_DEFER; 934 898 } 935 - mutex_unlock(&wfs_lock); 899 + mutex_unlock(&fwnode_link_lock); 936 900 937 901 device_links_write_lock(); 938 902 ··· 996 960 */ 997 961 dev->state_synced = true; 998 962 999 - if (WARN_ON(!list_empty(&dev->links.defer_hook))) 963 + if (WARN_ON(!list_empty(&dev->links.defer_sync))) 1000 964 return; 1001 965 1002 966 get_device(dev); 1003 - list_add_tail(&dev->links.defer_hook, list); 967 + list_add_tail(&dev->links.defer_sync, list); 1004 968 } 1005 969 1006 970 /** ··· 1018 982 { 1019 983 struct device *dev, *tmp; 1020 984 1021 - list_for_each_entry_safe(dev, tmp, list, links.defer_hook) { 1022 - list_del_init(&dev->links.defer_hook); 985 + list_for_each_entry_safe(dev, tmp, list, links.defer_sync) { 986 + list_del_init(&dev->links.defer_sync); 1023 987 1024 988 if (dev != dont_lock_dev) 1025 989 device_lock(dev); ··· 1057 1021 if (defer_sync_state_count) 1058 1022 goto out; 1059 1023 1060 - list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_hook) { 1024 + list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_sync) { 1061 1025 /* 1062 1026 * Delete from deferred_sync list before queuing it to 1063 - * sync_list because defer_hook is used for both lists. 1027 + * sync_list because defer_sync is used for both lists. 1064 1028 */ 1065 - list_del_init(&dev->links.defer_hook); 1029 + list_del_init(&dev->links.defer_sync); 1066 1030 __device_links_queue_sync_state(dev, &sync_list); 1067 1031 } 1068 1032 out: ··· 1080 1044 1081 1045 static void __device_links_supplier_defer_sync(struct device *sup) 1082 1046 { 1083 - if (list_empty(&sup->links.defer_hook) && dev_has_sync_state(sup)) 1084 - list_add_tail(&sup->links.defer_hook, &deferred_sync); 1047 + if (list_empty(&sup->links.defer_sync) && dev_has_sync_state(sup)) 1048 + list_add_tail(&sup->links.defer_sync, &deferred_sync); 1085 1049 } 1086 1050 1087 1051 static void device_link_drop_managed(struct device_link *link) ··· 1098 1062 bool val; 1099 1063 1100 1064 device_lock(dev); 1101 - mutex_lock(&wfs_lock); 1102 - val = !list_empty(&dev->links.needs_suppliers) 1103 - && dev->links.need_for_probe; 1104 - mutex_unlock(&wfs_lock); 1065 + val = !list_empty(&dev->fwnode->suppliers); 1105 1066 device_unlock(dev); 1106 1067 return sysfs_emit(buf, "%u\n", val); 1107 1068 } ··· 1125 1092 * the device links it needs to or make new device links as it needs 1126 1093 * them. So, it no longer needs to wait on any suppliers. 1127 1094 */ 1128 - mutex_lock(&wfs_lock); 1129 - list_del_init(&dev->links.needs_suppliers); 1130 - mutex_unlock(&wfs_lock); 1095 + if (dev->fwnode && dev->fwnode->dev == dev) 1096 + fwnode_links_purge_suppliers(dev->fwnode); 1131 1097 device_remove_file(dev, &dev_attr_waiting_for_supplier); 1132 1098 1133 1099 device_links_write_lock(); ··· 1307 1275 WRITE_ONCE(link->status, DL_STATE_DORMANT); 1308 1276 } 1309 1277 1310 - list_del_init(&dev->links.defer_hook); 1278 + list_del_init(&dev->links.defer_sync); 1311 1279 __device_links_no_driver(dev); 1312 1280 1313 1281 device_links_write_unlock(); ··· 1417 1385 if (dev->class == &devlink_class) 1418 1386 return; 1419 1387 1420 - mutex_lock(&wfs_lock); 1421 - list_del(&dev->links.needs_suppliers); 1422 - mutex_unlock(&wfs_lock); 1423 - 1424 1388 /* 1425 1389 * Delete all of the remaining links from this device to any other 1426 1390 * devices (either consumers or suppliers). ··· 1467 1439 return fw_devlink_flags == DL_FLAG_SYNC_STATE_ONLY; 1468 1440 } 1469 1441 1442 + static void fw_devlink_parse_fwnode(struct fwnode_handle *fwnode) 1443 + { 1444 + if (fwnode->flags & FWNODE_FLAG_LINKS_ADDED) 1445 + return; 1446 + 1447 + fwnode_call_int_op(fwnode, add_links); 1448 + fwnode->flags |= FWNODE_FLAG_LINKS_ADDED; 1449 + } 1450 + 1451 + static void fw_devlink_parse_fwtree(struct fwnode_handle *fwnode) 1452 + { 1453 + struct fwnode_handle *child = NULL; 1454 + 1455 + fw_devlink_parse_fwnode(fwnode); 1456 + 1457 + while ((child = fwnode_get_next_available_child_node(fwnode, child))) 1458 + fw_devlink_parse_fwtree(child); 1459 + } 1460 + 1461 + /** 1462 + * fw_devlink_create_devlink - Create a device link from a consumer to fwnode 1463 + * @con - Consumer device for the device link 1464 + * @sup_handle - fwnode handle of supplier 1465 + * 1466 + * This function will try to create a device link between the consumer device 1467 + * @con and the supplier device represented by @sup_handle. 1468 + * 1469 + * The supplier has to be provided as a fwnode because incorrect cycles in 1470 + * fwnode links can sometimes cause the supplier device to never be created. 1471 + * This function detects such cases and returns an error if it cannot create a 1472 + * device link from the consumer to a missing supplier. 1473 + * 1474 + * Returns, 1475 + * 0 on successfully creating a device link 1476 + * -EINVAL if the device link cannot be created as expected 1477 + * -EAGAIN if the device link cannot be created right now, but it may be 1478 + * possible to do that in the future 1479 + */ 1480 + static int fw_devlink_create_devlink(struct device *con, 1481 + struct fwnode_handle *sup_handle, u32 flags) 1482 + { 1483 + struct device *sup_dev; 1484 + int ret = 0; 1485 + 1486 + sup_dev = get_dev_from_fwnode(sup_handle); 1487 + if (sup_dev) { 1488 + /* 1489 + * If this fails, it is due to cycles in device links. Just 1490 + * give up on this link and treat it as invalid. 1491 + */ 1492 + if (!device_link_add(con, sup_dev, flags)) 1493 + ret = -EINVAL; 1494 + 1495 + goto out; 1496 + } 1497 + 1498 + /* 1499 + * DL_FLAG_SYNC_STATE_ONLY doesn't block probing and supports 1500 + * cycles. So cycle detection isn't necessary and shouldn't be 1501 + * done. 1502 + */ 1503 + if (flags & DL_FLAG_SYNC_STATE_ONLY) 1504 + return -EAGAIN; 1505 + 1506 + /* 1507 + * If we can't find the supplier device from its fwnode, it might be 1508 + * due to a cyclic dependency between fwnodes. Some of these cycles can 1509 + * be broken by applying logic. Check for these types of cycles and 1510 + * break them so that devices in the cycle probe properly. 1511 + * 1512 + * If the supplier's parent is dependent on the consumer, then 1513 + * the consumer-supplier dependency is a false dependency. So, 1514 + * treat it as an invalid link. 1515 + */ 1516 + sup_dev = fwnode_get_next_parent_dev(sup_handle); 1517 + if (sup_dev && device_is_dependent(con, sup_dev)) { 1518 + dev_dbg(con, "Not linking to %pfwP - False link\n", 1519 + sup_handle); 1520 + ret = -EINVAL; 1521 + } else { 1522 + /* 1523 + * Can't check for cycles or no cycles. So let's try 1524 + * again later. 1525 + */ 1526 + ret = -EAGAIN; 1527 + } 1528 + 1529 + out: 1530 + put_device(sup_dev); 1531 + return ret; 1532 + } 1533 + 1534 + /** 1535 + * __fw_devlink_link_to_consumers - Create device links to consumers of a device 1536 + * @dev - Device that needs to be linked to its consumers 1537 + * 1538 + * This function looks at all the consumer fwnodes of @dev and creates device 1539 + * links between the consumer device and @dev (supplier). 1540 + * 1541 + * If the consumer device has not been added yet, then this function creates a 1542 + * SYNC_STATE_ONLY link between @dev (supplier) and the closest ancestor device 1543 + * of the consumer fwnode. This is necessary to make sure @dev doesn't get a 1544 + * sync_state() callback before the real consumer device gets to be added and 1545 + * then probed. 1546 + * 1547 + * Once device links are created from the real consumer to @dev (supplier), the 1548 + * fwnode links are deleted. 1549 + */ 1550 + static void __fw_devlink_link_to_consumers(struct device *dev) 1551 + { 1552 + struct fwnode_handle *fwnode = dev->fwnode; 1553 + struct fwnode_link *link, *tmp; 1554 + 1555 + list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) { 1556 + u32 dl_flags = fw_devlink_get_flags(); 1557 + struct device *con_dev; 1558 + bool own_link = true; 1559 + int ret; 1560 + 1561 + con_dev = get_dev_from_fwnode(link->consumer); 1562 + /* 1563 + * If consumer device is not available yet, make a "proxy" 1564 + * SYNC_STATE_ONLY link from the consumer's parent device to 1565 + * the supplier device. This is necessary to make sure the 1566 + * supplier doesn't get a sync_state() callback before the real 1567 + * consumer can create a device link to the supplier. 1568 + * 1569 + * This proxy link step is needed to handle the case where the 1570 + * consumer's parent device is added before the supplier. 1571 + */ 1572 + if (!con_dev) { 1573 + con_dev = fwnode_get_next_parent_dev(link->consumer); 1574 + /* 1575 + * However, if the consumer's parent device is also the 1576 + * parent of the supplier, don't create a 1577 + * consumer-supplier link from the parent to its child 1578 + * device. Such a dependency is impossible. 1579 + */ 1580 + if (con_dev && 1581 + fwnode_is_ancestor_of(con_dev->fwnode, fwnode)) { 1582 + put_device(con_dev); 1583 + con_dev = NULL; 1584 + } else { 1585 + own_link = false; 1586 + dl_flags = DL_FLAG_SYNC_STATE_ONLY; 1587 + } 1588 + } 1589 + 1590 + if (!con_dev) 1591 + continue; 1592 + 1593 + ret = fw_devlink_create_devlink(con_dev, fwnode, dl_flags); 1594 + put_device(con_dev); 1595 + if (!own_link || ret == -EAGAIN) 1596 + continue; 1597 + 1598 + list_del(&link->s_hook); 1599 + list_del(&link->c_hook); 1600 + kfree(link); 1601 + } 1602 + } 1603 + 1604 + /** 1605 + * __fw_devlink_link_to_suppliers - Create device links to suppliers of a device 1606 + * @dev - The consumer device that needs to be linked to its suppliers 1607 + * @fwnode - Root of the fwnode tree that is used to create device links 1608 + * 1609 + * This function looks at all the supplier fwnodes of fwnode tree rooted at 1610 + * @fwnode and creates device links between @dev (consumer) and all the 1611 + * supplier devices of the entire fwnode tree at @fwnode. 1612 + * 1613 + * The function creates normal (non-SYNC_STATE_ONLY) device links between @dev 1614 + * and the real suppliers of @dev. Once these device links are created, the 1615 + * fwnode links are deleted. When such device links are successfully created, 1616 + * this function is called recursively on those supplier devices. This is 1617 + * needed to detect and break some invalid cycles in fwnode links. See 1618 + * fw_devlink_create_devlink() for more details. 1619 + * 1620 + * In addition, it also looks at all the suppliers of the entire fwnode tree 1621 + * because some of the child devices of @dev that have not been added yet 1622 + * (because @dev hasn't probed) might already have their suppliers added to 1623 + * driver core. So, this function creates SYNC_STATE_ONLY device links between 1624 + * @dev (consumer) and these suppliers to make sure they don't execute their 1625 + * sync_state() callbacks before these child devices have a chance to create 1626 + * their device links. The fwnode links that correspond to the child devices 1627 + * aren't delete because they are needed later to create the device links 1628 + * between the real consumer and supplier devices. 1629 + */ 1630 + static void __fw_devlink_link_to_suppliers(struct device *dev, 1631 + struct fwnode_handle *fwnode) 1632 + { 1633 + bool own_link = (dev->fwnode == fwnode); 1634 + struct fwnode_link *link, *tmp; 1635 + struct fwnode_handle *child = NULL; 1636 + u32 dl_flags; 1637 + 1638 + if (own_link) 1639 + dl_flags = fw_devlink_get_flags(); 1640 + else 1641 + dl_flags = DL_FLAG_SYNC_STATE_ONLY; 1642 + 1643 + list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) { 1644 + int ret; 1645 + struct device *sup_dev; 1646 + struct fwnode_handle *sup = link->supplier; 1647 + 1648 + ret = fw_devlink_create_devlink(dev, sup, dl_flags); 1649 + if (!own_link || ret == -EAGAIN) 1650 + continue; 1651 + 1652 + list_del(&link->s_hook); 1653 + list_del(&link->c_hook); 1654 + kfree(link); 1655 + 1656 + /* If no device link was created, nothing more to do. */ 1657 + if (ret) 1658 + continue; 1659 + 1660 + /* 1661 + * If a device link was successfully created to a supplier, we 1662 + * now need to try and link the supplier to all its suppliers. 1663 + * 1664 + * This is needed to detect and delete false dependencies in 1665 + * fwnode links that haven't been converted to a device link 1666 + * yet. See comments in fw_devlink_create_devlink() for more 1667 + * details on the false dependency. 1668 + * 1669 + * Without deleting these false dependencies, some devices will 1670 + * never probe because they'll keep waiting for their false 1671 + * dependency fwnode links to be converted to device links. 1672 + */ 1673 + sup_dev = get_dev_from_fwnode(sup); 1674 + __fw_devlink_link_to_suppliers(sup_dev, sup_dev->fwnode); 1675 + put_device(sup_dev); 1676 + } 1677 + 1678 + /* 1679 + * Make "proxy" SYNC_STATE_ONLY device links to represent the needs of 1680 + * all the descendants. This proxy link step is needed to handle the 1681 + * case where the supplier is added before the consumer's parent device 1682 + * (@dev). 1683 + */ 1684 + while ((child = fwnode_get_next_available_child_node(fwnode, child))) 1685 + __fw_devlink_link_to_suppliers(dev, child); 1686 + } 1687 + 1470 1688 static void fw_devlink_link_device(struct device *dev) 1471 1689 { 1472 - int fw_ret; 1690 + struct fwnode_handle *fwnode = dev->fwnode; 1473 1691 1474 1692 if (!fw_devlink_flags) 1475 1693 return; 1476 1694 1477 - mutex_lock(&defer_fw_devlink_lock); 1478 - if (!defer_fw_devlink_count) 1479 - device_link_add_missing_supplier_links(); 1695 + fw_devlink_parse_fwtree(fwnode); 1480 1696 1481 - /* 1482 - * The device's fwnode not having add_links() doesn't affect if other 1483 - * consumers can find this device as a supplier. So, this check is 1484 - * intentionally placed after device_link_add_missing_supplier_links(). 1485 - */ 1486 - if (!fwnode_has_op(dev->fwnode, add_links)) 1487 - goto out; 1488 - 1489 - /* 1490 - * If fw_devlink is being deferred, assume all devices have mandatory 1491 - * suppliers they need to link to later. Then, when the fw_devlink is 1492 - * resumed, all these devices will get a chance to try and link to any 1493 - * suppliers they have. 1494 - */ 1495 - if (!defer_fw_devlink_count) { 1496 - fw_ret = fwnode_call_int_op(dev->fwnode, add_links, dev); 1497 - if (fw_ret == -ENODEV && fw_devlink_is_permissive()) 1498 - fw_ret = -EAGAIN; 1499 - } else { 1500 - fw_ret = -ENODEV; 1501 - /* 1502 - * defer_hook is not used to add device to deferred_sync list 1503 - * until device is bound. Since deferred fw devlink also blocks 1504 - * probing, same list hook can be used for deferred_fw_devlink. 1505 - */ 1506 - list_add_tail(&dev->links.defer_hook, &deferred_fw_devlink); 1507 - } 1508 - 1509 - if (fw_ret == -ENODEV) 1510 - device_link_wait_for_mandatory_supplier(dev); 1511 - else if (fw_ret) 1512 - device_link_wait_for_optional_supplier(dev); 1513 - 1514 - out: 1515 - mutex_unlock(&defer_fw_devlink_lock); 1697 + mutex_lock(&fwnode_link_lock); 1698 + __fw_devlink_link_to_consumers(dev); 1699 + __fw_devlink_link_to_suppliers(dev, fwnode); 1700 + mutex_unlock(&fwnode_link_lock); 1516 1701 } 1517 1702 1518 - /** 1519 - * fw_devlink_pause - Pause parsing of fwnode to create device links 1520 - * 1521 - * Calling this function defers any fwnode parsing to create device links until 1522 - * fw_devlink_resume() is called. Both these functions are ref counted and the 1523 - * caller needs to match the calls. 1524 - * 1525 - * While fw_devlink is paused: 1526 - * - Any device that is added won't have its fwnode parsed to create device 1527 - * links. 1528 - * - The probe of the device will also be deferred during this period. 1529 - * - Any devices that were already added, but waiting for suppliers won't be 1530 - * able to link to newly added devices. 1531 - * 1532 - * Once fw_devlink_resume(): 1533 - * - All the fwnodes that was not parsed will be parsed. 1534 - * - All the devices that were deferred probing will be reattempted if they 1535 - * aren't waiting for any more suppliers. 1536 - * 1537 - * This pair of functions, is mainly meant to optimize the parsing of fwnodes 1538 - * when a lot of devices that need to link to each other are added in a short 1539 - * interval of time. For example, adding all the top level devices in a system. 1540 - * 1541 - * For example, if N devices are added and: 1542 - * - All the consumers are added before their suppliers 1543 - * - All the suppliers of the N devices are part of the N devices 1544 - * 1545 - * Then: 1546 - * 1547 - * - With the use of fw_devlink_pause() and fw_devlink_resume(), each device 1548 - * will only need one parsing of its fwnode because it is guaranteed to find 1549 - * all the supplier devices already registered and ready to link to. It won't 1550 - * have to do another pass later to find one or more suppliers it couldn't 1551 - * find in the first parse of the fwnode. So, we'll only need O(N) fwnode 1552 - * parses. 1553 - * 1554 - * - Without the use of fw_devlink_pause() and fw_devlink_resume(), we would 1555 - * end up doing O(N^2) parses of fwnodes because every device that's added is 1556 - * guaranteed to trigger a parse of the fwnode of every device added before 1557 - * it. This O(N^2) parse is made worse by the fact that when a fwnode of a 1558 - * device is parsed, all it descendant devices might need to have their 1559 - * fwnodes parsed too (even if the devices themselves aren't added). 1560 - */ 1561 - void fw_devlink_pause(void) 1562 - { 1563 - mutex_lock(&defer_fw_devlink_lock); 1564 - defer_fw_devlink_count++; 1565 - mutex_unlock(&defer_fw_devlink_lock); 1566 - } 1567 - 1568 - /** fw_devlink_resume - Resume parsing of fwnode to create device links 1569 - * 1570 - * This function is used in conjunction with fw_devlink_pause() and is ref 1571 - * counted. See documentation for fw_devlink_pause() for more details. 1572 - */ 1573 - void fw_devlink_resume(void) 1574 - { 1575 - struct device *dev, *tmp; 1576 - LIST_HEAD(probe_list); 1577 - 1578 - mutex_lock(&defer_fw_devlink_lock); 1579 - if (!defer_fw_devlink_count) { 1580 - WARN(true, "Unmatched fw_devlink pause/resume!"); 1581 - goto out; 1582 - } 1583 - 1584 - defer_fw_devlink_count--; 1585 - if (defer_fw_devlink_count) 1586 - goto out; 1587 - 1588 - device_link_add_missing_supplier_links(); 1589 - list_splice_tail_init(&deferred_fw_devlink, &probe_list); 1590 - out: 1591 - mutex_unlock(&defer_fw_devlink_lock); 1592 - 1593 - /* 1594 - * bus_probe_device() can cause new devices to get added and they'll 1595 - * try to grab defer_fw_devlink_lock. So, this needs to be done outside 1596 - * the defer_fw_devlink_lock. 1597 - */ 1598 - list_for_each_entry_safe(dev, tmp, &probe_list, links.defer_hook) { 1599 - list_del_init(&dev->links.defer_hook); 1600 - bus_probe_device(dev); 1601 - } 1602 - } 1603 1703 /* Device links support end. */ 1604 1704 1605 1705 int (*platform_notify)(struct device *dev) = NULL; ··· 2352 2196 goto err_remove_dev_groups; 2353 2197 } 2354 2198 2355 - if (fw_devlink_flags && !fw_devlink_is_permissive()) { 2199 + if (fw_devlink_flags && !fw_devlink_is_permissive() && dev->fwnode) { 2356 2200 error = device_create_file(dev, &dev_attr_waiting_for_supplier); 2357 2201 if (error) 2358 2202 goto err_remove_dev_online; ··· 2583 2427 #endif 2584 2428 INIT_LIST_HEAD(&dev->links.consumers); 2585 2429 INIT_LIST_HEAD(&dev->links.suppliers); 2586 - INIT_LIST_HEAD(&dev->links.needs_suppliers); 2587 - INIT_LIST_HEAD(&dev->links.defer_hook); 2430 + INIT_LIST_HEAD(&dev->links.defer_sync); 2588 2431 dev->links.status = DL_DEV_NO_DRIVER; 2589 2432 } 2590 2433 EXPORT_SYMBOL_GPL(device_initialize);
+8 -1
drivers/base/dd.c
··· 371 371 device_pm_check_callbacks(dev); 372 372 373 373 /* 374 + * Reorder successfully probed devices to the end of the device list. 375 + * This ensures that suspend/resume order matches probe order, which 376 + * is usually what drivers rely on. 377 + */ 378 + device_pm_move_to_tail(dev); 379 + 380 + /* 374 381 * Make sure the device is no longer in one of the deferred lists and 375 382 * kick off retrying all pending devices 376 383 */ ··· 724 717 * 725 718 * If the device has a parent, runtime-resume the parent before driver probing. 726 719 */ 727 - int driver_probe_device(struct device_driver *drv, struct device *dev) 720 + static int driver_probe_device(struct device_driver *drv, struct device *dev) 728 721 { 729 722 int ret = 0; 730 723
+1 -1
drivers/base/devres.c
··· 149 149 EXPORT_SYMBOL_GPL(__devres_alloc_node); 150 150 #else 151 151 /** 152 - * devres_alloc - Allocate device resource data 152 + * devres_alloc_node - Allocate device resource data 153 153 * @release: Release function devres will be associated with 154 154 * @size: Allocation size 155 155 * @gfp: Allocation flags
+1 -1
drivers/base/firmware_loader/fallback.c
··· 128 128 } 129 129 130 130 /** 131 - * firmware_timeout_store() - set number of seconds to wait for firmware 131 + * timeout_store() - set number of seconds to wait for firmware 132 132 * @class: device class pointer 133 133 * @attr: device attribute pointer 134 134 * @buf: buffer to scan for timeout value
+252 -222
drivers/base/platform.c
··· 63 63 } 64 64 EXPORT_SYMBOL_GPL(platform_get_resource); 65 65 66 + struct resource *platform_get_mem_or_io(struct platform_device *dev, 67 + unsigned int num) 68 + { 69 + u32 i; 70 + 71 + for (i = 0; i < dev->num_resources; i++) { 72 + struct resource *r = &dev->resource[i]; 73 + 74 + if ((resource_type(r) & (IORESOURCE_MEM|IORESOURCE_IO)) && num-- == 0) 75 + return r; 76 + } 77 + return NULL; 78 + } 79 + EXPORT_SYMBOL_GPL(platform_get_mem_or_io); 80 + 66 81 #ifdef CONFIG_HAS_IOMEM 67 82 /** 68 83 * devm_platform_get_and_ioremap_resource - call devm_ioremap_resource() for a ··· 758 743 } 759 744 EXPORT_SYMBOL_GPL(platform_device_register_full); 760 745 761 - static int platform_drv_probe(struct device *_dev) 762 - { 763 - struct platform_driver *drv = to_platform_driver(_dev->driver); 764 - struct platform_device *dev = to_platform_device(_dev); 765 - int ret; 766 - 767 - ret = of_clk_set_defaults(_dev->of_node, false); 768 - if (ret < 0) 769 - return ret; 770 - 771 - ret = dev_pm_domain_attach(_dev, true); 772 - if (ret) 773 - goto out; 774 - 775 - if (drv->probe) { 776 - ret = drv->probe(dev); 777 - if (ret) 778 - dev_pm_domain_detach(_dev, true); 779 - } 780 - 781 - out: 782 - if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) { 783 - dev_warn(_dev, "probe deferral not supported\n"); 784 - ret = -ENXIO; 785 - } 786 - 787 - return ret; 788 - } 789 - 790 - static int platform_drv_probe_fail(struct device *_dev) 791 - { 792 - return -ENXIO; 793 - } 794 - 795 - static int platform_drv_remove(struct device *_dev) 796 - { 797 - struct platform_driver *drv = to_platform_driver(_dev->driver); 798 - struct platform_device *dev = to_platform_device(_dev); 799 - int ret = 0; 800 - 801 - if (drv->remove) 802 - ret = drv->remove(dev); 803 - dev_pm_domain_detach(_dev, true); 804 - 805 - return ret; 806 - } 807 - 808 - static void platform_drv_shutdown(struct device *_dev) 809 - { 810 - struct platform_driver *drv = to_platform_driver(_dev->driver); 811 - struct platform_device *dev = to_platform_device(_dev); 812 - 813 - if (drv->shutdown) 814 - drv->shutdown(dev); 815 - } 816 - 817 746 /** 818 747 * __platform_driver_register - register a driver for platform-level devices 819 748 * @drv: platform driver structure ··· 768 809 { 769 810 drv->driver.owner = owner; 770 811 drv->driver.bus = &platform_bus_type; 771 - drv->driver.probe = platform_drv_probe; 772 - drv->driver.remove = platform_drv_remove; 773 - drv->driver.shutdown = platform_drv_shutdown; 774 812 775 813 return driver_register(&drv->driver); 776 814 } ··· 782 826 driver_unregister(&drv->driver); 783 827 } 784 828 EXPORT_SYMBOL_GPL(platform_driver_unregister); 829 + 830 + static int platform_probe_fail(struct platform_device *pdev) 831 + { 832 + return -ENXIO; 833 + } 785 834 786 835 /** 787 836 * __platform_driver_probe - register driver for non-hotpluggable device ··· 848 887 * new devices fail. 849 888 */ 850 889 spin_lock(&drv->driver.bus->p->klist_drivers.k_lock); 851 - drv->probe = NULL; 890 + drv->probe = platform_probe_fail; 852 891 if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list)) 853 892 retval = -ENODEV; 854 - drv->driver.probe = platform_drv_probe_fail; 855 893 spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock); 856 894 857 895 if (code != retval) ··· 977 1017 } 978 1018 EXPORT_SYMBOL_GPL(platform_unregister_drivers); 979 1019 980 - /* modalias support enables more hands-off userspace setup: 981 - * (a) environment variable lets new-style hotplug events work once system is 982 - * fully running: "modprobe $MODALIAS" 983 - * (b) sysfs attribute lets new-style coldplug recover from hotplug events 984 - * mishandled before system is fully running: "modprobe $(cat modalias)" 985 - */ 986 - static ssize_t modalias_show(struct device *dev, 987 - struct device_attribute *attr, char *buf) 988 - { 989 - struct platform_device *pdev = to_platform_device(dev); 990 - int len; 991 - 992 - len = of_device_modalias(dev, buf, PAGE_SIZE); 993 - if (len != -ENODEV) 994 - return len; 995 - 996 - len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 997 - if (len != -ENODEV) 998 - return len; 999 - 1000 - return sysfs_emit(buf, "platform:%s\n", pdev->name); 1001 - } 1002 - static DEVICE_ATTR_RO(modalias); 1003 - 1004 - static ssize_t driver_override_store(struct device *dev, 1005 - struct device_attribute *attr, 1006 - const char *buf, size_t count) 1007 - { 1008 - struct platform_device *pdev = to_platform_device(dev); 1009 - char *driver_override, *old, *cp; 1010 - 1011 - /* We need to keep extra room for a newline */ 1012 - if (count >= (PAGE_SIZE - 1)) 1013 - return -EINVAL; 1014 - 1015 - driver_override = kstrndup(buf, count, GFP_KERNEL); 1016 - if (!driver_override) 1017 - return -ENOMEM; 1018 - 1019 - cp = strchr(driver_override, '\n'); 1020 - if (cp) 1021 - *cp = '\0'; 1022 - 1023 - device_lock(dev); 1024 - old = pdev->driver_override; 1025 - if (strlen(driver_override)) { 1026 - pdev->driver_override = driver_override; 1027 - } else { 1028 - kfree(driver_override); 1029 - pdev->driver_override = NULL; 1030 - } 1031 - device_unlock(dev); 1032 - 1033 - kfree(old); 1034 - 1035 - return count; 1036 - } 1037 - 1038 - static ssize_t driver_override_show(struct device *dev, 1039 - struct device_attribute *attr, char *buf) 1040 - { 1041 - struct platform_device *pdev = to_platform_device(dev); 1042 - ssize_t len; 1043 - 1044 - device_lock(dev); 1045 - len = sysfs_emit(buf, "%s\n", pdev->driver_override); 1046 - device_unlock(dev); 1047 - 1048 - return len; 1049 - } 1050 - static DEVICE_ATTR_RW(driver_override); 1051 - 1052 - static ssize_t numa_node_show(struct device *dev, 1053 - struct device_attribute *attr, char *buf) 1054 - { 1055 - return sysfs_emit(buf, "%d\n", dev_to_node(dev)); 1056 - } 1057 - static DEVICE_ATTR_RO(numa_node); 1058 - 1059 - static umode_t platform_dev_attrs_visible(struct kobject *kobj, struct attribute *a, 1060 - int n) 1061 - { 1062 - struct device *dev = container_of(kobj, typeof(*dev), kobj); 1063 - 1064 - if (a == &dev_attr_numa_node.attr && 1065 - dev_to_node(dev) == NUMA_NO_NODE) 1066 - return 0; 1067 - 1068 - return a->mode; 1069 - } 1070 - 1071 - static struct attribute *platform_dev_attrs[] = { 1072 - &dev_attr_modalias.attr, 1073 - &dev_attr_numa_node.attr, 1074 - &dev_attr_driver_override.attr, 1075 - NULL, 1076 - }; 1077 - 1078 - static struct attribute_group platform_dev_group = { 1079 - .attrs = platform_dev_attrs, 1080 - .is_visible = platform_dev_attrs_visible, 1081 - }; 1082 - __ATTRIBUTE_GROUPS(platform_dev); 1083 - 1084 - static int platform_uevent(struct device *dev, struct kobj_uevent_env *env) 1085 - { 1086 - struct platform_device *pdev = to_platform_device(dev); 1087 - int rc; 1088 - 1089 - /* Some devices have extra OF data and an OF-style MODALIAS */ 1090 - rc = of_device_uevent_modalias(dev, env); 1091 - if (rc != -ENODEV) 1092 - return rc; 1093 - 1094 - rc = acpi_device_uevent_modalias(dev, env); 1095 - if (rc != -ENODEV) 1096 - return rc; 1097 - 1098 - add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX, 1099 - pdev->name); 1100 - return 0; 1101 - } 1102 - 1103 1020 static const struct platform_device_id *platform_match_id( 1104 1021 const struct platform_device_id *id, 1105 1022 struct platform_device *pdev) ··· 989 1152 id++; 990 1153 } 991 1154 return NULL; 992 - } 993 - 994 - /** 995 - * platform_match - bind platform device to platform driver. 996 - * @dev: device. 997 - * @drv: driver. 998 - * 999 - * Platform device IDs are assumed to be encoded like this: 1000 - * "<name><instance>", where <name> is a short description of the type of 1001 - * device, like "pci" or "floppy", and <instance> is the enumerated 1002 - * instance of the device, like '0' or '42'. Driver IDs are simply 1003 - * "<name>". So, extract the <name> from the platform_device structure, 1004 - * and compare it against the name of the driver. Return whether they match 1005 - * or not. 1006 - */ 1007 - static int platform_match(struct device *dev, struct device_driver *drv) 1008 - { 1009 - struct platform_device *pdev = to_platform_device(dev); 1010 - struct platform_driver *pdrv = to_platform_driver(drv); 1011 - 1012 - /* When driver_override is set, only bind to the matching driver */ 1013 - if (pdev->driver_override) 1014 - return !strcmp(pdev->driver_override, drv->name); 1015 - 1016 - /* Attempt an OF style match first */ 1017 - if (of_driver_match_device(dev, drv)) 1018 - return 1; 1019 - 1020 - /* Then try ACPI style match */ 1021 - if (acpi_driver_match_device(dev, drv)) 1022 - return 1; 1023 - 1024 - /* Then try to match against the id table */ 1025 - if (pdrv->id_table) 1026 - return platform_match_id(pdrv->id_table, pdev) != NULL; 1027 - 1028 - /* fall-back to driver name match */ 1029 - return (strcmp(pdev->name, drv->name) == 0); 1030 1155 } 1031 1156 1032 1157 #ifdef CONFIG_PM_SLEEP ··· 1135 1336 1136 1337 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 1137 1338 1339 + /* modalias support enables more hands-off userspace setup: 1340 + * (a) environment variable lets new-style hotplug events work once system is 1341 + * fully running: "modprobe $MODALIAS" 1342 + * (b) sysfs attribute lets new-style coldplug recover from hotplug events 1343 + * mishandled before system is fully running: "modprobe $(cat modalias)" 1344 + */ 1345 + static ssize_t modalias_show(struct device *dev, 1346 + struct device_attribute *attr, char *buf) 1347 + { 1348 + struct platform_device *pdev = to_platform_device(dev); 1349 + int len; 1350 + 1351 + len = of_device_modalias(dev, buf, PAGE_SIZE); 1352 + if (len != -ENODEV) 1353 + return len; 1354 + 1355 + len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 1356 + if (len != -ENODEV) 1357 + return len; 1358 + 1359 + return sysfs_emit(buf, "platform:%s\n", pdev->name); 1360 + } 1361 + static DEVICE_ATTR_RO(modalias); 1362 + 1363 + static ssize_t numa_node_show(struct device *dev, 1364 + struct device_attribute *attr, char *buf) 1365 + { 1366 + return sysfs_emit(buf, "%d\n", dev_to_node(dev)); 1367 + } 1368 + static DEVICE_ATTR_RO(numa_node); 1369 + 1370 + static ssize_t driver_override_show(struct device *dev, 1371 + struct device_attribute *attr, char *buf) 1372 + { 1373 + struct platform_device *pdev = to_platform_device(dev); 1374 + ssize_t len; 1375 + 1376 + device_lock(dev); 1377 + len = sysfs_emit(buf, "%s\n", pdev->driver_override); 1378 + device_unlock(dev); 1379 + 1380 + return len; 1381 + } 1382 + 1383 + static ssize_t driver_override_store(struct device *dev, 1384 + struct device_attribute *attr, 1385 + const char *buf, size_t count) 1386 + { 1387 + struct platform_device *pdev = to_platform_device(dev); 1388 + char *driver_override, *old, *cp; 1389 + 1390 + /* We need to keep extra room for a newline */ 1391 + if (count >= (PAGE_SIZE - 1)) 1392 + return -EINVAL; 1393 + 1394 + driver_override = kstrndup(buf, count, GFP_KERNEL); 1395 + if (!driver_override) 1396 + return -ENOMEM; 1397 + 1398 + cp = strchr(driver_override, '\n'); 1399 + if (cp) 1400 + *cp = '\0'; 1401 + 1402 + device_lock(dev); 1403 + old = pdev->driver_override; 1404 + if (strlen(driver_override)) { 1405 + pdev->driver_override = driver_override; 1406 + } else { 1407 + kfree(driver_override); 1408 + pdev->driver_override = NULL; 1409 + } 1410 + device_unlock(dev); 1411 + 1412 + kfree(old); 1413 + 1414 + return count; 1415 + } 1416 + static DEVICE_ATTR_RW(driver_override); 1417 + 1418 + static struct attribute *platform_dev_attrs[] = { 1419 + &dev_attr_modalias.attr, 1420 + &dev_attr_numa_node.attr, 1421 + &dev_attr_driver_override.attr, 1422 + NULL, 1423 + }; 1424 + 1425 + static umode_t platform_dev_attrs_visible(struct kobject *kobj, struct attribute *a, 1426 + int n) 1427 + { 1428 + struct device *dev = container_of(kobj, typeof(*dev), kobj); 1429 + 1430 + if (a == &dev_attr_numa_node.attr && 1431 + dev_to_node(dev) == NUMA_NO_NODE) 1432 + return 0; 1433 + 1434 + return a->mode; 1435 + } 1436 + 1437 + static struct attribute_group platform_dev_group = { 1438 + .attrs = platform_dev_attrs, 1439 + .is_visible = platform_dev_attrs_visible, 1440 + }; 1441 + __ATTRIBUTE_GROUPS(platform_dev); 1442 + 1443 + 1444 + /** 1445 + * platform_match - bind platform device to platform driver. 1446 + * @dev: device. 1447 + * @drv: driver. 1448 + * 1449 + * Platform device IDs are assumed to be encoded like this: 1450 + * "<name><instance>", where <name> is a short description of the type of 1451 + * device, like "pci" or "floppy", and <instance> is the enumerated 1452 + * instance of the device, like '0' or '42'. Driver IDs are simply 1453 + * "<name>". So, extract the <name> from the platform_device structure, 1454 + * and compare it against the name of the driver. Return whether they match 1455 + * or not. 1456 + */ 1457 + static int platform_match(struct device *dev, struct device_driver *drv) 1458 + { 1459 + struct platform_device *pdev = to_platform_device(dev); 1460 + struct platform_driver *pdrv = to_platform_driver(drv); 1461 + 1462 + /* When driver_override is set, only bind to the matching driver */ 1463 + if (pdev->driver_override) 1464 + return !strcmp(pdev->driver_override, drv->name); 1465 + 1466 + /* Attempt an OF style match first */ 1467 + if (of_driver_match_device(dev, drv)) 1468 + return 1; 1469 + 1470 + /* Then try ACPI style match */ 1471 + if (acpi_driver_match_device(dev, drv)) 1472 + return 1; 1473 + 1474 + /* Then try to match against the id table */ 1475 + if (pdrv->id_table) 1476 + return platform_match_id(pdrv->id_table, pdev) != NULL; 1477 + 1478 + /* fall-back to driver name match */ 1479 + return (strcmp(pdev->name, drv->name) == 0); 1480 + } 1481 + 1482 + static int platform_uevent(struct device *dev, struct kobj_uevent_env *env) 1483 + { 1484 + struct platform_device *pdev = to_platform_device(dev); 1485 + int rc; 1486 + 1487 + /* Some devices have extra OF data and an OF-style MODALIAS */ 1488 + rc = of_device_uevent_modalias(dev, env); 1489 + if (rc != -ENODEV) 1490 + return rc; 1491 + 1492 + rc = acpi_device_uevent_modalias(dev, env); 1493 + if (rc != -ENODEV) 1494 + return rc; 1495 + 1496 + add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX, 1497 + pdev->name); 1498 + return 0; 1499 + } 1500 + 1501 + static int platform_probe(struct device *_dev) 1502 + { 1503 + struct platform_driver *drv = to_platform_driver(_dev->driver); 1504 + struct platform_device *dev = to_platform_device(_dev); 1505 + int ret; 1506 + 1507 + /* 1508 + * A driver registered using platform_driver_probe() cannot be bound 1509 + * again later because the probe function usually lives in __init code 1510 + * and so is gone. For these drivers .probe is set to 1511 + * platform_probe_fail in __platform_driver_probe(). Don't even prepare 1512 + * clocks and PM domains for these to match the traditional behaviour. 1513 + */ 1514 + if (unlikely(drv->probe == platform_probe_fail)) 1515 + return -ENXIO; 1516 + 1517 + ret = of_clk_set_defaults(_dev->of_node, false); 1518 + if (ret < 0) 1519 + return ret; 1520 + 1521 + ret = dev_pm_domain_attach(_dev, true); 1522 + if (ret) 1523 + goto out; 1524 + 1525 + if (drv->probe) { 1526 + ret = drv->probe(dev); 1527 + if (ret) 1528 + dev_pm_domain_detach(_dev, true); 1529 + } 1530 + 1531 + out: 1532 + if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) { 1533 + dev_warn(_dev, "probe deferral not supported\n"); 1534 + ret = -ENXIO; 1535 + } 1536 + 1537 + return ret; 1538 + } 1539 + 1540 + static int platform_remove(struct device *_dev) 1541 + { 1542 + struct platform_driver *drv = to_platform_driver(_dev->driver); 1543 + struct platform_device *dev = to_platform_device(_dev); 1544 + int ret = 0; 1545 + 1546 + if (drv->remove) 1547 + ret = drv->remove(dev); 1548 + dev_pm_domain_detach(_dev, true); 1549 + 1550 + return ret; 1551 + } 1552 + 1553 + static void platform_shutdown(struct device *_dev) 1554 + { 1555 + struct platform_device *dev = to_platform_device(_dev); 1556 + struct platform_driver *drv; 1557 + 1558 + if (!_dev->driver) 1559 + return; 1560 + 1561 + drv = to_platform_driver(_dev->driver); 1562 + if (drv->shutdown) 1563 + drv->shutdown(dev); 1564 + } 1565 + 1566 + 1138 1567 int platform_dma_configure(struct device *dev) 1139 1568 { 1140 1569 enum dev_dma_attr attr; ··· 1389 1362 .dev_groups = platform_dev_groups, 1390 1363 .match = platform_match, 1391 1364 .uevent = platform_uevent, 1365 + .probe = platform_probe, 1366 + .remove = platform_remove, 1367 + .shutdown = platform_shutdown, 1392 1368 .dma_configure = platform_dma_configure, 1393 1369 .pm = &platform_dev_pm_ops, 1394 1370 };
+52
drivers/base/property.c
··· 615 615 EXPORT_SYMBOL_GPL(fwnode_get_next_parent); 616 616 617 617 /** 618 + * fwnode_get_next_parent_dev - Find device of closest ancestor fwnode 619 + * @fwnode: firmware node 620 + * 621 + * Given a firmware node (@fwnode), this function finds its closest ancestor 622 + * firmware node that has a corresponding struct device and returns that struct 623 + * device. 624 + * 625 + * The caller of this function is expected to call put_device() on the returned 626 + * device when they are done. 627 + */ 628 + struct device *fwnode_get_next_parent_dev(struct fwnode_handle *fwnode) 629 + { 630 + struct device *dev = NULL; 631 + 632 + fwnode_handle_get(fwnode); 633 + do { 634 + fwnode = fwnode_get_next_parent(fwnode); 635 + if (fwnode) 636 + dev = get_dev_from_fwnode(fwnode); 637 + } while (fwnode && !dev); 638 + fwnode_handle_put(fwnode); 639 + return dev; 640 + } 641 + 642 + /** 618 643 * fwnode_count_parents - Return the number of parents a node has 619 644 * @fwnode: The node the parents of which are to be counted 620 645 * ··· 684 659 return fwnode; 685 660 } 686 661 EXPORT_SYMBOL_GPL(fwnode_get_nth_parent); 662 + 663 + /** 664 + * fwnode_is_ancestor_of - Test if @test_ancestor is ancestor of @test_child 665 + * @test_ancestor: Firmware which is tested for being an ancestor 666 + * @test_child: Firmware which is tested for being the child 667 + * 668 + * A node is considered an ancestor of itself too. 669 + * 670 + * Returns true if @test_ancestor is an ancestor of @test_child. 671 + * Otherwise, returns false. 672 + */ 673 + bool fwnode_is_ancestor_of(struct fwnode_handle *test_ancestor, 674 + struct fwnode_handle *test_child) 675 + { 676 + if (!test_ancestor) 677 + return false; 678 + 679 + fwnode_handle_get(test_child); 680 + while (test_child) { 681 + if (test_child == test_ancestor) { 682 + fwnode_handle_put(test_child); 683 + return true; 684 + } 685 + test_child = fwnode_get_next_parent(test_child); 686 + } 687 + return false; 688 + } 687 689 688 690 /** 689 691 * fwnode_get_next_child_node - Return the next child node handle for a node
+1 -1
drivers/base/soc.c
··· 168 168 } 169 169 EXPORT_SYMBOL_GPL(soc_device_register); 170 170 171 - /* Ensure soc_dev->attr is freed prior to calling soc_device_unregister. */ 171 + /* Ensure soc_dev->attr is freed after calling soc_device_unregister. */ 172 172 void soc_device_unregister(struct soc_device *soc_dev) 173 173 { 174 174 device_unregister(&soc_dev->dev);
+1 -1
drivers/base/swnode.c
··· 653 653 swnode->parent = parent; 654 654 swnode->allocated = allocated; 655 655 swnode->kobj.kset = swnode_kset; 656 - swnode->fwnode.ops = &software_node_ops; 656 + fwnode_init(&swnode->fwnode, &software_node_ops); 657 657 658 658 ida_init(&swnode->child_ids); 659 659 INIT_LIST_HEAD(&swnode->entry);
+6 -26
drivers/firmware/efi/efi-init.c
··· 316 316 * resource reservation conflict on the memory window that the efifb 317 317 * framebuffer steals from the PCIe host bridge. 318 318 */ 319 - static int efifb_add_links(const struct fwnode_handle *fwnode, 320 - struct device *dev) 319 + static int efifb_add_links(struct fwnode_handle *fwnode) 321 320 { 322 321 struct device_node *sup_np; 323 - struct device *sup_dev; 324 322 325 323 sup_np = find_pci_overlap_node(); 326 324 ··· 329 331 if (!sup_np) 330 332 return 0; 331 333 332 - sup_dev = get_dev_from_fwnode(&sup_np->fwnode); 334 + fwnode_link_add(fwnode, of_fwnode_handle(sup_np)); 333 335 of_node_put(sup_np); 334 - 335 - /* 336 - * Return -ENODEV if the PCI graphics controller device hasn't been 337 - * registered yet. This ensures that efifb isn't allowed to probe 338 - * and this function is retried again when new devices are 339 - * registered. 340 - */ 341 - if (!sup_dev) 342 - return -ENODEV; 343 - 344 - /* 345 - * If this fails, retrying this function at a later point won't 346 - * change anything. So, don't return an error after this. 347 - */ 348 - if (!device_link_add(dev, sup_dev, fw_devlink_get_flags())) 349 - dev_warn(dev, "device_link_add() failed\n"); 350 - 351 - put_device(sup_dev); 352 336 353 337 return 0; 354 338 } ··· 339 359 .add_links = efifb_add_links, 340 360 }; 341 361 342 - static struct fwnode_handle efifb_fwnode = { 343 - .ops = &efifb_fwnode_ops, 344 - }; 362 + static struct fwnode_handle efifb_fwnode; 345 363 346 364 static int __init register_gop_device(void) 347 365 { ··· 353 375 if (!pd) 354 376 return -ENOMEM; 355 377 356 - if (IS_ENABLED(CONFIG_PCI)) 378 + if (IS_ENABLED(CONFIG_PCI)) { 379 + fwnode_init(&efifb_fwnode, &efifb_fwnode_ops); 357 380 pd->dev.fwnode = &efifb_fwnode; 381 + } 358 382 359 383 err = platform_device_add_data(pd, &screen_info, sizeof(screen_info)); 360 384 if (err)
+19 -115
drivers/misc/pvpanic.c
··· 8 8 9 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 10 11 - #include <linux/acpi.h> 11 + #include <linux/io.h> 12 12 #include <linux/kernel.h> 13 13 #include <linux/kexec.h> 14 + #include <linux/mod_devicetable.h> 14 15 #include <linux/module.h> 15 - #include <linux/of.h> 16 - #include <linux/of_address.h> 17 16 #include <linux/platform_device.h> 18 17 #include <linux/types.h> 18 + 19 19 #include <uapi/misc/pvpanic.h> 20 20 21 21 static void __iomem *base; ··· 49 49 .priority = 1, /* let this called before broken drm_fb_helper */ 50 50 }; 51 51 52 - #ifdef CONFIG_ACPI 53 - static int pvpanic_add(struct acpi_device *device); 54 - static int pvpanic_remove(struct acpi_device *device); 55 - 56 - static const struct acpi_device_id pvpanic_device_ids[] = { 57 - { "QEMU0001", 0 }, 58 - { "", 0 } 59 - }; 60 - MODULE_DEVICE_TABLE(acpi, pvpanic_device_ids); 61 - 62 - static struct acpi_driver pvpanic_driver = { 63 - .name = "pvpanic", 64 - .class = "QEMU", 65 - .ids = pvpanic_device_ids, 66 - .ops = { 67 - .add = pvpanic_add, 68 - .remove = pvpanic_remove, 69 - }, 70 - .owner = THIS_MODULE, 71 - }; 72 - 73 - static acpi_status 74 - pvpanic_walk_resources(struct acpi_resource *res, void *context) 75 - { 76 - struct resource r; 77 - 78 - if (acpi_dev_resource_io(res, &r)) { 79 - #ifdef CONFIG_HAS_IOPORT_MAP 80 - base = ioport_map(r.start, resource_size(&r)); 81 - return AE_OK; 82 - #else 83 - return AE_ERROR; 84 - #endif 85 - } else if (acpi_dev_resource_memory(res, &r)) { 86 - base = ioremap(r.start, resource_size(&r)); 87 - return AE_OK; 88 - } 89 - 90 - return AE_ERROR; 91 - } 92 - 93 - static int pvpanic_add(struct acpi_device *device) 94 - { 95 - int ret; 96 - 97 - ret = acpi_bus_get_status(device); 98 - if (ret < 0) 99 - return ret; 100 - 101 - if (!device->status.enabled || !device->status.functional) 102 - return -ENODEV; 103 - 104 - acpi_walk_resources(device->handle, METHOD_NAME__CRS, 105 - pvpanic_walk_resources, NULL); 106 - 107 - if (!base) 108 - return -ENODEV; 109 - 110 - atomic_notifier_chain_register(&panic_notifier_list, 111 - &pvpanic_panic_nb); 112 - 113 - return 0; 114 - } 115 - 116 - static int pvpanic_remove(struct acpi_device *device) 117 - { 118 - 119 - atomic_notifier_chain_unregister(&panic_notifier_list, 120 - &pvpanic_panic_nb); 121 - iounmap(base); 122 - 123 - return 0; 124 - } 125 - 126 - static int pvpanic_register_acpi_driver(void) 127 - { 128 - return acpi_bus_register_driver(&pvpanic_driver); 129 - } 130 - 131 - static void pvpanic_unregister_acpi_driver(void) 132 - { 133 - acpi_bus_unregister_driver(&pvpanic_driver); 134 - } 135 - #else 136 - static int pvpanic_register_acpi_driver(void) 137 - { 138 - return -ENODEV; 139 - } 140 - 141 - static void pvpanic_unregister_acpi_driver(void) {} 142 - #endif 143 - 144 52 static int pvpanic_mmio_probe(struct platform_device *pdev) 145 53 { 146 - base = devm_platform_ioremap_resource(pdev, 0); 54 + struct device *dev = &pdev->dev; 55 + struct resource *res; 56 + 57 + res = platform_get_mem_or_io(pdev, 0); 58 + if (res && resource_type(res) == IORESOURCE_IO) 59 + base = devm_ioport_map(dev, res->start, resource_size(res)); 60 + else 61 + base = devm_ioremap_resource(dev, res); 147 62 if (IS_ERR(base)) 148 63 return PTR_ERR(base); 149 64 ··· 82 167 {} 83 168 }; 84 169 170 + static const struct acpi_device_id pvpanic_device_ids[] = { 171 + { "QEMU0001", 0 }, 172 + { "", 0 } 173 + }; 174 + MODULE_DEVICE_TABLE(acpi, pvpanic_device_ids); 175 + 85 176 static struct platform_driver pvpanic_mmio_driver = { 86 177 .driver = { 87 178 .name = "pvpanic-mmio", 88 179 .of_match_table = pvpanic_mmio_match, 180 + .acpi_match_table = pvpanic_device_ids, 89 181 }, 90 182 .probe = pvpanic_mmio_probe, 91 183 .remove = pvpanic_mmio_remove, 92 184 }; 93 - 94 - static int __init pvpanic_mmio_init(void) 95 - { 96 - if (acpi_disabled) 97 - return platform_driver_register(&pvpanic_mmio_driver); 98 - else 99 - return pvpanic_register_acpi_driver(); 100 - } 101 - 102 - static void __exit pvpanic_mmio_exit(void) 103 - { 104 - if (acpi_disabled) 105 - platform_driver_unregister(&pvpanic_mmio_driver); 106 - else 107 - pvpanic_unregister_acpi_driver(); 108 - } 109 - 110 - module_init(pvpanic_mmio_init); 111 - module_exit(pvpanic_mmio_exit); 185 + module_platform_driver(pvpanic_mmio_driver);
+1
drivers/of/dynamic.c
··· 356 356 357 357 property_list_free(node->properties); 358 358 property_list_free(node->deadprops); 359 + fwnode_links_purge(of_fwnode_handle(node)); 359 360 360 361 kfree(node->full_name); 361 362 kfree(node->data);
-2
drivers/of/platform.c
··· 538 538 } 539 539 540 540 /* Populate everything else. */ 541 - fw_devlink_pause(); 542 541 of_platform_default_populate(NULL, NULL, NULL); 543 - fw_devlink_resume(); 544 542 545 543 return 0; 546 544 }
+40 -109
drivers/of/property.c
··· 1038 1038 } 1039 1039 1040 1040 /** 1041 - * of_get_next_parent_dev - Add device link to supplier from supplier phandle 1042 - * @np: device tree node 1043 - * 1044 - * Given a device tree node (@np), this function finds its closest ancestor 1045 - * device tree node that has a corresponding struct device. 1046 - * 1047 - * The caller of this function is expected to call put_device() on the returned 1048 - * device when they are done. 1049 - */ 1050 - static struct device *of_get_next_parent_dev(struct device_node *np) 1051 - { 1052 - struct device *dev = NULL; 1053 - 1054 - of_node_get(np); 1055 - do { 1056 - np = of_get_next_parent(np); 1057 - if (np) 1058 - dev = get_dev_from_fwnode(&np->fwnode); 1059 - } while (np && !dev); 1060 - of_node_put(np); 1061 - return dev; 1062 - } 1063 - 1064 - /** 1065 - * of_link_to_phandle - Add device link to supplier from supplier phandle 1066 - * @dev: consumer device 1067 - * @sup_np: phandle to supplier device tree node 1041 + * of_link_to_phandle - Add fwnode link to supplier from supplier phandle 1042 + * @con_np: consumer device tree node 1043 + * @sup_np: supplier device tree node 1068 1044 * 1069 1045 * Given a phandle to a supplier device tree node (@sup_np), this function 1070 1046 * finds the device that owns the supplier device tree node and creates a ··· 1050 1074 * cases, it returns an error. 1051 1075 * 1052 1076 * Returns: 1053 - * - 0 if link successfully created to supplier 1054 - * - -EAGAIN if linking to the supplier should be reattempted 1077 + * - 0 if fwnode link successfully created to supplier 1055 1078 * - -EINVAL if the supplier link is invalid and should not be created 1056 - * - -ENODEV if there is no device that corresponds to the supplier phandle 1079 + * - -ENODEV if struct device will never be create for supplier 1057 1080 */ 1058 - static int of_link_to_phandle(struct device *dev, struct device_node *sup_np, 1059 - u32 dl_flags) 1081 + static int of_link_to_phandle(struct device_node *con_np, 1082 + struct device_node *sup_np) 1060 1083 { 1061 - struct device *sup_dev, *sup_par_dev; 1062 - int ret = 0; 1084 + struct device *sup_dev; 1063 1085 struct device_node *tmp_np = sup_np; 1064 1086 1065 1087 of_node_get(sup_np); ··· 1080 1106 } 1081 1107 1082 1108 if (!sup_np) { 1083 - dev_dbg(dev, "Not linking to %pOFP - No device\n", tmp_np); 1109 + pr_debug("Not linking %pOFP to %pOFP - No device\n", 1110 + con_np, tmp_np); 1084 1111 return -ENODEV; 1085 1112 } 1086 1113 ··· 1090 1115 * descendant nodes. By definition, a child node can't be a functional 1091 1116 * dependency for the parent node. 1092 1117 */ 1093 - if (of_is_ancestor_of(dev->of_node, sup_np)) { 1094 - dev_dbg(dev, "Not linking to %pOFP - is descendant\n", sup_np); 1118 + if (of_is_ancestor_of(con_np, sup_np)) { 1119 + pr_debug("Not linking %pOFP to %pOFP - is descendant\n", 1120 + con_np, sup_np); 1095 1121 of_node_put(sup_np); 1096 1122 return -EINVAL; 1097 1123 } 1124 + 1125 + /* 1126 + * Don't create links to "early devices" that won't have struct devices 1127 + * created for them. 1128 + */ 1098 1129 sup_dev = get_dev_from_fwnode(&sup_np->fwnode); 1099 1130 if (!sup_dev && of_node_check_flag(sup_np, OF_POPULATED)) { 1100 - /* Early device without struct device. */ 1101 - dev_dbg(dev, "Not linking to %pOFP - No struct device\n", 1102 - sup_np); 1131 + pr_debug("Not linking %pOFP to %pOFP - No struct device\n", 1132 + con_np, sup_np); 1103 1133 of_node_put(sup_np); 1104 1134 return -ENODEV; 1105 - } else if (!sup_dev) { 1106 - /* 1107 - * DL_FLAG_SYNC_STATE_ONLY doesn't block probing and supports 1108 - * cycles. So cycle detection isn't necessary and shouldn't be 1109 - * done. 1110 - */ 1111 - if (dl_flags & DL_FLAG_SYNC_STATE_ONLY) { 1112 - of_node_put(sup_np); 1113 - return -EAGAIN; 1114 - } 1115 - 1116 - sup_par_dev = of_get_next_parent_dev(sup_np); 1117 - 1118 - if (sup_par_dev && device_is_dependent(dev, sup_par_dev)) { 1119 - /* Cyclic dependency detected, don't try to link */ 1120 - dev_dbg(dev, "Not linking to %pOFP - cycle detected\n", 1121 - sup_np); 1122 - ret = -EINVAL; 1123 - } else { 1124 - /* 1125 - * Can't check for cycles or no cycles. So let's try 1126 - * again later. 1127 - */ 1128 - ret = -EAGAIN; 1129 - } 1130 - 1131 - of_node_put(sup_np); 1132 - put_device(sup_par_dev); 1133 - return ret; 1134 1135 } 1135 - of_node_put(sup_np); 1136 - if (!device_link_add(dev, sup_dev, dl_flags)) 1137 - ret = -EINVAL; 1138 1136 put_device(sup_dev); 1139 - return ret; 1137 + 1138 + fwnode_link_add(of_fwnode_handle(con_np), of_fwnode_handle(sup_np)); 1139 + of_node_put(sup_np); 1140 + 1141 + return 0; 1140 1142 } 1141 1143 1142 1144 /** ··· 1313 1361 * that list phandles to suppliers. If @prop_name isn't one, this function 1314 1362 * doesn't do anything. 1315 1363 * 1316 - * If @prop_name is one, this function attempts to create device links from the 1317 - * consumer device @dev to all the devices of the suppliers listed in 1318 - * @prop_name. 1364 + * If @prop_name is one, this function attempts to create fwnode links from the 1365 + * consumer device tree node @con_np to all the suppliers device tree nodes 1366 + * listed in @prop_name. 1319 1367 * 1320 - * Any failed attempt to create a device link will NOT result in an immediate 1368 + * Any failed attempt to create a fwnode link will NOT result in an immediate 1321 1369 * return. of_link_property() must create links to all the available supplier 1322 - * devices even when attempts to create a link to one or more suppliers fail. 1370 + * device tree nodes even when attempts to create a link to one or more 1371 + * suppliers fail. 1323 1372 */ 1324 - static int of_link_property(struct device *dev, struct device_node *con_np, 1325 - const char *prop_name) 1373 + static int of_link_property(struct device_node *con_np, const char *prop_name) 1326 1374 { 1327 1375 struct device_node *phandle; 1328 1376 const struct supplier_bindings *s = of_supplier_bindings; 1329 1377 unsigned int i = 0; 1330 1378 bool matched = false; 1331 1379 int ret = 0; 1332 - u32 dl_flags; 1333 - 1334 - if (dev->of_node == con_np) 1335 - dl_flags = fw_devlink_get_flags(); 1336 - else 1337 - dl_flags = DL_FLAG_SYNC_STATE_ONLY; 1338 1380 1339 1381 /* Do not stop at first failed link, link all available suppliers. */ 1340 1382 while (!matched && s->parse_prop) { 1341 1383 while ((phandle = s->parse_prop(con_np, prop_name, i))) { 1342 1384 matched = true; 1343 1385 i++; 1344 - if (of_link_to_phandle(dev, phandle, dl_flags) 1345 - == -EAGAIN) 1346 - ret = -EAGAIN; 1386 + of_link_to_phandle(con_np, phandle); 1347 1387 of_node_put(phandle); 1348 1388 } 1349 1389 s++; ··· 1343 1399 return ret; 1344 1400 } 1345 1401 1346 - static int of_link_to_suppliers(struct device *dev, 1347 - struct device_node *con_np) 1402 + static int of_fwnode_add_links(struct fwnode_handle *fwnode) 1348 1403 { 1349 - struct device_node *child; 1350 1404 struct property *p; 1351 - int ret = 0; 1405 + struct device_node *con_np = to_of_node(fwnode); 1406 + 1407 + if (!con_np) 1408 + return -EINVAL; 1352 1409 1353 1410 for_each_property_of_node(con_np, p) 1354 - if (of_link_property(dev, con_np, p->name)) 1355 - ret = -ENODEV; 1411 + of_link_property(con_np, p->name); 1356 1412 1357 - for_each_available_child_of_node(con_np, child) 1358 - if (of_link_to_suppliers(dev, child) && !ret) 1359 - ret = -EAGAIN; 1360 - 1361 - return ret; 1362 - } 1363 - 1364 - static int of_fwnode_add_links(const struct fwnode_handle *fwnode, 1365 - struct device *dev) 1366 - { 1367 - if (unlikely(!is_of_node(fwnode))) 1368 - return 0; 1369 - 1370 - return of_link_to_suppliers(dev, to_of_node(fwnode)); 1413 + return 0; 1371 1414 } 1372 1415 1373 1416 const struct fwnode_operations of_fwnode_ops = {
+9 -11
drivers/usb/host/sl811-hcd.c
··· 1614 1614 void __iomem *addr_reg; 1615 1615 void __iomem *data_reg; 1616 1616 int retval; 1617 - u8 tmp, ioaddr = 0; 1617 + u8 tmp, ioaddr; 1618 1618 unsigned long irqflags; 1619 1619 1620 1620 if (usb_disabled()) 1621 + return -ENODEV; 1622 + 1623 + /* the chip may be wired for either kind of addressing */ 1624 + addr = platform_get_mem_or_io(dev, 0); 1625 + data = platform_get_mem_or_io(dev, 1); 1626 + if (!addr || !data || resource_type(addr) != resource_type(data)) 1621 1627 return -ENODEV; 1622 1628 1623 1629 /* basic sanity checks first. board-specific init logic should ··· 1638 1632 irq = ires->start; 1639 1633 irqflags = ires->flags & IRQF_TRIGGER_MASK; 1640 1634 1641 - /* the chip may be wired for either kind of addressing */ 1642 - addr = platform_get_resource(dev, IORESOURCE_MEM, 0); 1643 - data = platform_get_resource(dev, IORESOURCE_MEM, 1); 1644 - retval = -EBUSY; 1645 - if (!addr || !data) { 1646 - addr = platform_get_resource(dev, IORESOURCE_IO, 0); 1647 - data = platform_get_resource(dev, IORESOURCE_IO, 1); 1648 - if (!addr || !data) 1649 - return -ENODEV; 1650 - ioaddr = 1; 1635 + ioaddr = resource_type(addr) == IORESOURCE_IO; 1636 + if (ioaddr) { 1651 1637 /* 1652 1638 * NOTE: 64-bit resource->start is getting truncated 1653 1639 * to avoid compiler warning, assuming that ->start
+1 -12
drivers/vfio/platform/vfio_platform.c
··· 25 25 int num) 26 26 { 27 27 struct platform_device *dev = (struct platform_device *) vdev->opaque; 28 - int i; 29 28 30 - for (i = 0; i < dev->num_resources; i++) { 31 - struct resource *r = &dev->resource[i]; 32 - 33 - if (resource_type(r) & (IORESOURCE_MEM|IORESOURCE_IO)) { 34 - if (!num) 35 - return r; 36 - 37 - num--; 38 - } 39 - } 40 - return NULL; 29 + return platform_get_mem_or_io(dev, num); 41 30 } 42 31 43 32 static int get_platform_irq(struct vfio_platform_device *vdev, int i)
+2 -3
fs/kernfs/dir.c
··· 604 604 */ 605 605 struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry) 606 606 { 607 - if (dentry->d_sb->s_op == &kernfs_sops && 608 - !d_really_is_negative(dentry)) 607 + if (dentry->d_sb->s_op == &kernfs_sops) 609 608 return kernfs_dentry_node(dentry); 610 609 return NULL; 611 610 } ··· 1598 1599 return error; 1599 1600 } 1600 1601 1601 - /* Relationship between s_mode and the DT_xxx types */ 1602 + /* Relationship between mode and the DT_xxx types */ 1602 1603 static inline unsigned char dt_type(struct kernfs_node *kn) 1603 1604 { 1604 1605 return (kn->mode >> 12) & 15;
+1 -1
include/linux/acpi.h
··· 55 55 if (!fwnode) 56 56 return NULL; 57 57 58 - fwnode->ops = &acpi_static_fwnode_ops; 58 + fwnode_init(fwnode, &acpi_static_fwnode_ops); 59 59 60 60 return fwnode; 61 61 }
+2 -8
include/linux/device.h
··· 351 351 * struct dev_links_info - Device data related to device links. 352 352 * @suppliers: List of links to supplier devices. 353 353 * @consumers: List of links to consumer devices. 354 - * @needs_suppliers: Hook to global list of devices waiting for suppliers. 355 - * @defer_hook: Hook to global list of devices that have deferred sync_state or 356 - * deferred fw_devlink. 357 - * @need_for_probe: If needs_suppliers is on a list, this indicates if the 358 - * suppliers are needed for probe or not. 354 + * @defer_sync: Hook to global list of devices that have deferred sync_state. 359 355 * @status: Driver status information. 360 356 */ 361 357 struct dev_links_info { 362 358 struct list_head suppliers; 363 359 struct list_head consumers; 364 - struct list_head needs_suppliers; 365 - struct list_head defer_hook; 366 - bool need_for_probe; 360 + struct list_head defer_sync; 367 361 enum dl_dev_state status; 368 362 }; 369 363
+14
include/linux/device/class.h
··· 256 256 257 257 /* This is a #define to keep the compiler from merging different 258 258 * instances of the __key variable */ 259 + 260 + /** 261 + * class_create - create a struct class structure 262 + * @owner: pointer to the module that is to "own" this struct class 263 + * @name: pointer to a string for the name of this class. 264 + * 265 + * This is used to create a struct class pointer that can then be used 266 + * in calls to device_create(). 267 + * 268 + * Returns &struct class pointer on success, or ERR_PTR() on error. 269 + * 270 + * Note, the pointer created here is to be destroyed when finished by 271 + * making a call to class_destroy(). 272 + */ 259 273 #define class_create(owner, name) \ 260 274 ({ \ 261 275 static struct lock_class_key __key; \
+31 -42
include/linux/fwnode.h
··· 10 10 #define _LINUX_FWNODE_H_ 11 11 12 12 #include <linux/types.h> 13 + #include <linux/list.h> 13 14 14 15 struct fwnode_operations; 15 16 struct device; 17 + 18 + /* 19 + * fwnode link flags 20 + * 21 + * LINKS_ADDED: The fwnode has already be parsed to add fwnode links. 22 + */ 23 + #define FWNODE_FLAG_LINKS_ADDED BIT(0) 16 24 17 25 struct fwnode_handle { 18 26 struct fwnode_handle *secondary; 19 27 const struct fwnode_operations *ops; 20 28 struct device *dev; 29 + struct list_head suppliers; 30 + struct list_head consumers; 31 + u8 flags; 32 + }; 33 + 34 + struct fwnode_link { 35 + struct fwnode_handle *supplier; 36 + struct list_head s_hook; 37 + struct fwnode_handle *consumer; 38 + struct list_head c_hook; 21 39 }; 22 40 23 41 /** ··· 86 68 * endpoint node. 87 69 * @graph_get_port_parent: Return the parent node of a port node. 88 70 * @graph_parse_endpoint: Parse endpoint for port and endpoint id. 89 - * @add_links: Called after the device corresponding to the fwnode is added 90 - * using device_add(). The function is expected to create device 91 - * links to all the suppliers of the device that are available at 92 - * the time this function is called. The function must NOT stop 93 - * at the first failed device link if other unlinked supplier 94 - * devices are present in the system. This is necessary for the 95 - * driver/bus sync_state() callbacks to work correctly. 96 - * 97 - * For example, say Device-C depends on suppliers Device-S1 and 98 - * Device-S2 and the dependency is listed in that order in the 99 - * firmware. Say, S1 gets populated from the firmware after 100 - * late_initcall_sync(). Say S2 is populated and probed way 101 - * before that in device_initcall(). When C is populated, if this 102 - * add_links() function doesn't continue past a "failed linking to 103 - * S1" and continue linking C to S2, then S2 will get a 104 - * sync_state() callback before C is probed. This is because from 105 - * the perspective of S2, C was never a consumer when its 106 - * sync_state() evaluation is done. To avoid this, the add_links() 107 - * function has to go through all available suppliers of the 108 - * device (that corresponds to this fwnode) and link to them 109 - * before returning. 110 - * 111 - * If some suppliers are not yet available (indicated by an error 112 - * return value), this function will be called again when other 113 - * devices are added to allow creating device links to any newly 114 - * available suppliers. 115 - * 116 - * Return 0 if device links have been successfully created to all 117 - * the known suppliers of this device or if the supplier 118 - * information is not known. 119 - * 120 - * Return -ENODEV if the suppliers needed for probing this device 121 - * have not been registered yet (because device links can only be 122 - * created to devices registered with the driver core). 123 - * 124 - * Return -EAGAIN if some of the suppliers of this device have not 125 - * been registered yet, but none of those suppliers are necessary 126 - * for probing the device. 71 + * @add_links: Create fwnode links to all the suppliers of the fwnode. Return 72 + * zero on success, a negative error code otherwise. 127 73 */ 128 74 struct fwnode_operations { 129 75 struct fwnode_handle *(*get)(struct fwnode_handle *fwnode); ··· 127 145 (*graph_get_port_parent)(struct fwnode_handle *fwnode); 128 146 int (*graph_parse_endpoint)(const struct fwnode_handle *fwnode, 129 147 struct fwnode_endpoint *endpoint); 130 - int (*add_links)(const struct fwnode_handle *fwnode, 131 - struct device *dev); 148 + int (*add_links)(struct fwnode_handle *fwnode); 132 149 }; 133 150 134 151 #define fwnode_has_op(fwnode, op) \ ··· 151 170 } while (false) 152 171 #define get_dev_from_fwnode(fwnode) get_device((fwnode)->dev) 153 172 173 + static inline void fwnode_init(struct fwnode_handle *fwnode, 174 + const struct fwnode_operations *ops) 175 + { 176 + fwnode->ops = ops; 177 + INIT_LIST_HEAD(&fwnode->consumers); 178 + INIT_LIST_HEAD(&fwnode->suppliers); 179 + } 180 + 154 181 extern u32 fw_devlink_get_flags(void); 155 - void fw_devlink_pause(void); 156 - void fw_devlink_resume(void); 182 + int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup); 183 + void fwnode_links_purge(struct fwnode_handle *fwnode); 157 184 158 185 #endif
+1 -1
include/linux/kernfs.h
··· 116 116 * kernfs node is represented by single kernfs_node. Most fields are 117 117 * private to kernfs and shouldn't be accessed directly by kernfs users. 118 118 * 119 - * As long as s_count reference is held, the kernfs_node itself is 119 + * As long as count reference is held, the kernfs_node itself is 120 120 * accessible. Dereferencing elem or any other outer entity requires 121 121 * active reference. 122 122 */
+2 -1
include/linux/of.h
··· 108 108 #if defined(CONFIG_OF_KOBJ) 109 109 kobject_init(&node->kobj, &of_node_ktype); 110 110 #endif 111 - node->fwnode.ops = &of_fwnode_ops; 111 + fwnode_init(&node->fwnode, &of_fwnode_ops); 112 112 } 113 113 114 114 #if defined(CONFIG_OF_KOBJ) ··· 1307 1307 #define _OF_DECLARE(table, name, compat, fn, fn_type) \ 1308 1308 static const struct of_device_id __of_table_##name \ 1309 1309 __used __section("__" #table "_of_table") \ 1310 + __aligned(__alignof__(struct of_device_id)) \ 1310 1311 = { .compatible = compat, \ 1311 1312 .data = (fn == (fn_type)NULL) ? fn : fn } 1312 1313 #else
+3
include/linux/platform_device.h
··· 52 52 53 53 extern struct resource *platform_get_resource(struct platform_device *, 54 54 unsigned int, unsigned int); 55 + extern struct resource *platform_get_mem_or_io(struct platform_device *, 56 + unsigned int); 57 + 55 58 extern struct device * 56 59 platform_find_device_by_driver(struct device *start, 57 60 const struct device_driver *drv);
+3
include/linux/property.h
··· 85 85 struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode); 86 86 struct fwnode_handle *fwnode_get_next_parent( 87 87 struct fwnode_handle *fwnode); 88 + struct device *fwnode_get_next_parent_dev(struct fwnode_handle *fwnode); 88 89 unsigned int fwnode_count_parents(const struct fwnode_handle *fwn); 89 90 struct fwnode_handle *fwnode_get_nth_parent(struct fwnode_handle *fwn, 90 91 unsigned int depth); 92 + bool fwnode_is_ancestor_of(struct fwnode_handle *test_ancestor, 93 + struct fwnode_handle *test_child); 91 94 struct fwnode_handle *fwnode_get_next_child_node( 92 95 const struct fwnode_handle *fwnode, struct fwnode_handle *child); 93 96 struct fwnode_handle *fwnode_get_next_available_child_node(
+1 -1
kernel/irq/irqdomain.c
··· 100 100 fwid->type = type; 101 101 fwid->name = n; 102 102 fwid->pa = pa; 103 - fwid->fwnode.ops = &irqchip_fwnode_ops; 103 + fwnode_init(&fwid->fwnode, &irqchip_fwnode_ops); 104 104 return &fwid->fwnode; 105 105 } 106 106 EXPORT_SYMBOL_GPL(__irq_domain_alloc_fwnode);
+7 -2
lib/dynamic_debug.c
··· 561 561 int dynamic_debug_exec_queries(const char *query, const char *modname) 562 562 { 563 563 int rc; 564 - char *qry = kstrndup(query, PAGE_SIZE, GFP_KERNEL); 564 + char *qry; /* writable copy of query */ 565 565 566 - if (!query) 566 + if (!query) { 567 + pr_err("non-null query/command string expected\n"); 568 + return -EINVAL; 569 + } 570 + qry = kstrndup(query, PAGE_SIZE, GFP_KERNEL); 571 + if (!qry) 567 572 return -ENOMEM; 568 573 569 574 rc = ddebug_exec_queries(qry, modname);