···11+What: /sys/devices/.../deferred_probe22+Date: August 201633+Contact: Ben Hutchings <ben.hutchings@codethink.co.uk>44+Description:55+ The /sys/devices/.../deferred_probe attribute is66+ present for all devices. If a driver detects during77+ probing a device that a related device is not yet88+ ready, it may defer probing of the first device. The99+ kernel will retry probing the first device after any1010+ other device is successfully probed. This attribute1111+ reads as 1 if probing of this device is currently1212+ deferred, or 0 otherwise.
···77#include <linux/slab.h>88#include <linux/kernel.h>99#include <linux/acpi.h>1010+#include <linux/acpi_iort.h>1011#include <linux/signal.h>1112#include <linux/kthread.h>1213#include <linux/dmi.h>···13701369 else13711370 return DEV_DMA_NON_COHERENT;13721371}13721372+13731373+/**13741374+ * acpi_dma_configure - Set-up DMA configuration for the device.13751375+ * @dev: The pointer to the device13761376+ * @attr: device dma attributes13771377+ */13781378+void acpi_dma_configure(struct device *dev, enum dev_dma_attr attr)13791379+{13801380+ const struct iommu_ops *iommu;13811381+13821382+ iort_set_dma_mask(dev);13831383+13841384+ iommu = iort_iommu_configure(dev);13851385+13861386+ /*13871387+ * Assume dma valid range starts at 0 and covers the whole13881388+ * coherent_dma_mask.13891389+ */13901390+ arch_setup_dma_ops(dev, 0, dev->coherent_dma_mask + 1, iommu,13911391+ attr == DEV_DMA_COHERENT);13921392+}13931393+EXPORT_SYMBOL_GPL(acpi_dma_configure);13941394+13951395+/**13961396+ * acpi_dma_deconfigure - Tear-down DMA configuration for the device.13971397+ * @dev: The pointer to the device13981398+ */13991399+void acpi_dma_deconfigure(struct device *dev)14001400+{14011401+ arch_teardown_dma_ops(dev);14021402+}14031403+EXPORT_SYMBOL_GPL(acpi_dma_deconfigure);1373140413741405static void acpi_init_coherency(struct acpi_device *adev)13751406{
+2
drivers/base/Kconfig
···224224 unusable. You should say N here unless you are explicitly looking to225225 test this functionality.226226227227+source "drivers/base/test/Kconfig"228228+227229config SYS_HYPERVISOR228230 bool229231 default n
···1616 * You should have received a copy of the GNU General Public License1717 * along with this program. If not, see <http://www.gnu.org/licenses/>.1818 */1919+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt2020+2121+#include <linux/acpi.h>1922#include <linux/bitops.h>2023#include <linux/cacheinfo.h>2124#include <linux/compiler.h>···8885{8986 return sib_leaf->of_node == this_leaf->of_node;9087}8888+8989+/* OF properties to query for a given cache type */9090+struct cache_type_info {9191+ const char *size_prop;9292+ const char *line_size_props[2];9393+ const char *nr_sets_prop;9494+};9595+9696+static const struct cache_type_info cache_type_info[] = {9797+ {9898+ .size_prop = "cache-size",9999+ .line_size_props = { "cache-line-size",100100+ "cache-block-size", },101101+ .nr_sets_prop = "cache-sets",102102+ }, {103103+ .size_prop = "i-cache-size",104104+ .line_size_props = { "i-cache-line-size",105105+ "i-cache-block-size", },106106+ .nr_sets_prop = "i-cache-sets",107107+ }, {108108+ .size_prop = "d-cache-size",109109+ .line_size_props = { "d-cache-line-size",110110+ "d-cache-block-size", },111111+ .nr_sets_prop = "d-cache-sets",112112+ },113113+};114114+115115+static inline int get_cacheinfo_idx(enum cache_type type)116116+{117117+ if (type == CACHE_TYPE_UNIFIED)118118+ return 0;119119+ return type;120120+}121121+122122+static void cache_size(struct cacheinfo *this_leaf)123123+{124124+ const char *propname;125125+ const __be32 *cache_size;126126+ int ct_idx;127127+128128+ ct_idx = get_cacheinfo_idx(this_leaf->type);129129+ propname = cache_type_info[ct_idx].size_prop;130130+131131+ cache_size = of_get_property(this_leaf->of_node, propname, NULL);132132+ if (cache_size)133133+ this_leaf->size = of_read_number(cache_size, 1);134134+}135135+136136+/* not cache_line_size() because that's a macro in include/linux/cache.h */137137+static void cache_get_line_size(struct cacheinfo *this_leaf)138138+{139139+ const __be32 *line_size;140140+ int i, lim, ct_idx;141141+142142+ ct_idx = get_cacheinfo_idx(this_leaf->type);143143+ lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);144144+145145+ for (i = 0; i < lim; i++) {146146+ const char *propname;147147+148148+ propname = cache_type_info[ct_idx].line_size_props[i];149149+ line_size = of_get_property(this_leaf->of_node, propname, NULL);150150+ if (line_size)151151+ break;152152+ }153153+154154+ if (line_size)155155+ this_leaf->coherency_line_size = of_read_number(line_size, 1);156156+}157157+158158+static void cache_nr_sets(struct cacheinfo *this_leaf)159159+{160160+ const char *propname;161161+ const __be32 *nr_sets;162162+ int ct_idx;163163+164164+ ct_idx = get_cacheinfo_idx(this_leaf->type);165165+ propname = cache_type_info[ct_idx].nr_sets_prop;166166+167167+ nr_sets = of_get_property(this_leaf->of_node, propname, NULL);168168+ if (nr_sets)169169+ this_leaf->number_of_sets = of_read_number(nr_sets, 1);170170+}171171+172172+static void cache_associativity(struct cacheinfo *this_leaf)173173+{174174+ unsigned int line_size = this_leaf->coherency_line_size;175175+ unsigned int nr_sets = this_leaf->number_of_sets;176176+ unsigned int size = this_leaf->size;177177+178178+ /*179179+ * If the cache is fully associative, there is no need to180180+ * check the other properties.181181+ */182182+ if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))183183+ this_leaf->ways_of_associativity = (size / nr_sets) / line_size;184184+}185185+186186+static void cache_of_override_properties(unsigned int cpu)187187+{188188+ int index;189189+ struct cacheinfo *this_leaf;190190+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);191191+192192+ for (index = 0; index < cache_leaves(cpu); index++) {193193+ this_leaf = this_cpu_ci->info_list + index;194194+ cache_size(this_leaf);195195+ cache_get_line_size(this_leaf);196196+ cache_nr_sets(this_leaf);197197+ cache_associativity(this_leaf);198198+ }199199+}91200#else201201+static void cache_of_override_properties(unsigned int cpu) { }92202static inline int cache_setup_of_node(unsigned int cpu) { return 0; }93203static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,94204 struct cacheinfo *sib_leaf)···220104 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);221105 struct cacheinfo *this_leaf, *sib_leaf;222106 unsigned int index;223223- int ret;107107+ int ret = 0;224108225225- ret = cache_setup_of_node(cpu);109109+ if (this_cpu_ci->cpu_map_populated)110110+ return 0;111111+112112+ if (of_have_populated_dt())113113+ ret = cache_setup_of_node(cpu);114114+ else if (!acpi_disabled)115115+ /* No cache property/hierarchy support yet in ACPI */116116+ ret = -ENOTSUPP;226117 if (ret)227118 return ret;228119···284161 }285162}286163164164+static void cache_override_properties(unsigned int cpu)165165+{166166+ if (of_have_populated_dt())167167+ return cache_of_override_properties(cpu);168168+}169169+287170static void free_cache_attributes(unsigned int cpu)288171{289172 if (!per_cpu_cacheinfo(cpu))···332203 */333204 ret = cache_shared_cpu_map_setup(cpu);334205 if (ret) {335335- pr_warn("Unable to detect cache hierarchy from DT for CPU %d\n",336336- cpu);206206+ pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);337207 goto free_ci;338208 }209209+210210+ cache_override_properties(cpu);339211 return 0;340212341213free_ci:
+578
drivers/base/core.c
···4444early_param("sysfs.deprecated", sysfs_deprecated_setup);4545#endif46464747+/* Device links support. */4848+4949+#ifdef CONFIG_SRCU5050+static DEFINE_MUTEX(device_links_lock);5151+DEFINE_STATIC_SRCU(device_links_srcu);5252+5353+static inline void device_links_write_lock(void)5454+{5555+ mutex_lock(&device_links_lock);5656+}5757+5858+static inline void device_links_write_unlock(void)5959+{6060+ mutex_unlock(&device_links_lock);6161+}6262+6363+int device_links_read_lock(void)6464+{6565+ return srcu_read_lock(&device_links_srcu);6666+}6767+6868+void device_links_read_unlock(int idx)6969+{7070+ srcu_read_unlock(&device_links_srcu, idx);7171+}7272+#else /* !CONFIG_SRCU */7373+static DECLARE_RWSEM(device_links_lock);7474+7575+static inline void device_links_write_lock(void)7676+{7777+ down_write(&device_links_lock);7878+}7979+8080+static inline void device_links_write_unlock(void)8181+{8282+ up_write(&device_links_lock);8383+}8484+8585+int device_links_read_lock(void)8686+{8787+ down_read(&device_links_lock);8888+ return 0;8989+}9090+9191+void device_links_read_unlock(int not_used)9292+{9393+ up_read(&device_links_lock);9494+}9595+#endif /* !CONFIG_SRCU */9696+9797+/**9898+ * device_is_dependent - Check if one device depends on another one9999+ * @dev: Device to check dependencies for.100100+ * @target: Device to check against.101101+ *102102+ * Check if @target depends on @dev or any device dependent on it (its child or103103+ * its consumer etc). Return 1 if that is the case or 0 otherwise.104104+ */105105+static int device_is_dependent(struct device *dev, void *target)106106+{107107+ struct device_link *link;108108+ int ret;109109+110110+ if (WARN_ON(dev == target))111111+ return 1;112112+113113+ ret = device_for_each_child(dev, target, device_is_dependent);114114+ if (ret)115115+ return ret;116116+117117+ list_for_each_entry(link, &dev->links.consumers, s_node) {118118+ if (WARN_ON(link->consumer == target))119119+ return 1;120120+121121+ ret = device_is_dependent(link->consumer, target);122122+ if (ret)123123+ break;124124+ }125125+ return ret;126126+}127127+128128+static int device_reorder_to_tail(struct device *dev, void *not_used)129129+{130130+ struct device_link *link;131131+132132+ /*133133+ * Devices that have not been registered yet will be put to the ends134134+ * of the lists during the registration, so skip them here.135135+ */136136+ if (device_is_registered(dev))137137+ devices_kset_move_last(dev);138138+139139+ if (device_pm_initialized(dev))140140+ device_pm_move_last(dev);141141+142142+ device_for_each_child(dev, NULL, device_reorder_to_tail);143143+ list_for_each_entry(link, &dev->links.consumers, s_node)144144+ device_reorder_to_tail(link->consumer, NULL);145145+146146+ return 0;147147+}148148+149149+/**150150+ * device_link_add - Create a link between two devices.151151+ * @consumer: Consumer end of the link.152152+ * @supplier: Supplier end of the link.153153+ * @flags: Link flags.154154+ *155155+ * The caller is responsible for the proper synchronization of the link creation156156+ * with runtime PM. First, setting the DL_FLAG_PM_RUNTIME flag will cause the157157+ * runtime PM framework to take the link into account. Second, if the158158+ * DL_FLAG_RPM_ACTIVE flag is set in addition to it, the supplier devices will159159+ * be forced into the active metastate and reference-counted upon the creation160160+ * of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be161161+ * ignored.162162+ *163163+ * If the DL_FLAG_AUTOREMOVE is set, the link will be removed automatically164164+ * when the consumer device driver unbinds from it. The combination of both165165+ * DL_FLAG_AUTOREMOVE and DL_FLAG_STATELESS set is invalid and will cause NULL166166+ * to be returned.167167+ *168168+ * A side effect of the link creation is re-ordering of dpm_list and the169169+ * devices_kset list by moving the consumer device and all devices depending170170+ * on it to the ends of these lists (that does not happen to devices that have171171+ * not been registered when this function is called).172172+ *173173+ * The supplier device is required to be registered when this function is called174174+ * and NULL will be returned if that is not the case. The consumer device need175175+ * not be registerd, however.176176+ */177177+struct device_link *device_link_add(struct device *consumer,178178+ struct device *supplier, u32 flags)179179+{180180+ struct device_link *link;181181+182182+ if (!consumer || !supplier ||183183+ ((flags & DL_FLAG_STATELESS) && (flags & DL_FLAG_AUTOREMOVE)))184184+ return NULL;185185+186186+ device_links_write_lock();187187+ device_pm_lock();188188+189189+ /*190190+ * If the supplier has not been fully registered yet or there is a191191+ * reverse dependency between the consumer and the supplier already in192192+ * the graph, return NULL.193193+ */194194+ if (!device_pm_initialized(supplier)195195+ || device_is_dependent(consumer, supplier)) {196196+ link = NULL;197197+ goto out;198198+ }199199+200200+ list_for_each_entry(link, &supplier->links.consumers, s_node)201201+ if (link->consumer == consumer)202202+ goto out;203203+204204+ link = kzalloc(sizeof(*link), GFP_KERNEL);205205+ if (!link)206206+ goto out;207207+208208+ if (flags & DL_FLAG_PM_RUNTIME) {209209+ if (flags & DL_FLAG_RPM_ACTIVE) {210210+ if (pm_runtime_get_sync(supplier) < 0) {211211+ pm_runtime_put_noidle(supplier);212212+ kfree(link);213213+ link = NULL;214214+ goto out;215215+ }216216+ link->rpm_active = true;217217+ }218218+ pm_runtime_new_link(consumer);219219+ }220220+ get_device(supplier);221221+ link->supplier = supplier;222222+ INIT_LIST_HEAD(&link->s_node);223223+ get_device(consumer);224224+ link->consumer = consumer;225225+ INIT_LIST_HEAD(&link->c_node);226226+ link->flags = flags;227227+228228+ /* Deterine the initial link state. */229229+ if (flags & DL_FLAG_STATELESS) {230230+ link->status = DL_STATE_NONE;231231+ } else {232232+ switch (supplier->links.status) {233233+ case DL_DEV_DRIVER_BOUND:234234+ switch (consumer->links.status) {235235+ case DL_DEV_PROBING:236236+ /*237237+ * Balance the decrementation of the supplier's238238+ * runtime PM usage counter after consumer probe239239+ * in driver_probe_device().240240+ */241241+ if (flags & DL_FLAG_PM_RUNTIME)242242+ pm_runtime_get_sync(supplier);243243+244244+ link->status = DL_STATE_CONSUMER_PROBE;245245+ break;246246+ case DL_DEV_DRIVER_BOUND:247247+ link->status = DL_STATE_ACTIVE;248248+ break;249249+ default:250250+ link->status = DL_STATE_AVAILABLE;251251+ break;252252+ }253253+ break;254254+ case DL_DEV_UNBINDING:255255+ link->status = DL_STATE_SUPPLIER_UNBIND;256256+ break;257257+ default:258258+ link->status = DL_STATE_DORMANT;259259+ break;260260+ }261261+ }262262+263263+ /*264264+ * Move the consumer and all of the devices depending on it to the end265265+ * of dpm_list and the devices_kset list.266266+ *267267+ * It is necessary to hold dpm_list locked throughout all that or else268268+ * we may end up suspending with a wrong ordering of it.269269+ */270270+ device_reorder_to_tail(consumer, NULL);271271+272272+ list_add_tail_rcu(&link->s_node, &supplier->links.consumers);273273+ list_add_tail_rcu(&link->c_node, &consumer->links.suppliers);274274+275275+ dev_info(consumer, "Linked as a consumer to %s\n", dev_name(supplier));276276+277277+ out:278278+ device_pm_unlock();279279+ device_links_write_unlock();280280+ return link;281281+}282282+EXPORT_SYMBOL_GPL(device_link_add);283283+284284+static void device_link_free(struct device_link *link)285285+{286286+ put_device(link->consumer);287287+ put_device(link->supplier);288288+ kfree(link);289289+}290290+291291+#ifdef CONFIG_SRCU292292+static void __device_link_free_srcu(struct rcu_head *rhead)293293+{294294+ device_link_free(container_of(rhead, struct device_link, rcu_head));295295+}296296+297297+static void __device_link_del(struct device_link *link)298298+{299299+ dev_info(link->consumer, "Dropping the link to %s\n",300300+ dev_name(link->supplier));301301+302302+ if (link->flags & DL_FLAG_PM_RUNTIME)303303+ pm_runtime_drop_link(link->consumer);304304+305305+ list_del_rcu(&link->s_node);306306+ list_del_rcu(&link->c_node);307307+ call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu);308308+}309309+#else /* !CONFIG_SRCU */310310+static void __device_link_del(struct device_link *link)311311+{312312+ dev_info(link->consumer, "Dropping the link to %s\n",313313+ dev_name(link->supplier));314314+315315+ list_del(&link->s_node);316316+ list_del(&link->c_node);317317+ device_link_free(link);318318+}319319+#endif /* !CONFIG_SRCU */320320+321321+/**322322+ * device_link_del - Delete a link between two devices.323323+ * @link: Device link to delete.324324+ *325325+ * The caller must ensure proper synchronization of this function with runtime326326+ * PM.327327+ */328328+void device_link_del(struct device_link *link)329329+{330330+ device_links_write_lock();331331+ device_pm_lock();332332+ __device_link_del(link);333333+ device_pm_unlock();334334+ device_links_write_unlock();335335+}336336+EXPORT_SYMBOL_GPL(device_link_del);337337+338338+static void device_links_missing_supplier(struct device *dev)339339+{340340+ struct device_link *link;341341+342342+ list_for_each_entry(link, &dev->links.suppliers, c_node)343343+ if (link->status == DL_STATE_CONSUMER_PROBE)344344+ WRITE_ONCE(link->status, DL_STATE_AVAILABLE);345345+}346346+347347+/**348348+ * device_links_check_suppliers - Check presence of supplier drivers.349349+ * @dev: Consumer device.350350+ *351351+ * Check links from this device to any suppliers. Walk the list of the device's352352+ * links to suppliers and see if all of them are available. If not, simply353353+ * return -EPROBE_DEFER.354354+ *355355+ * We need to guarantee that the supplier will not go away after the check has356356+ * been positive here. It only can go away in __device_release_driver() and357357+ * that function checks the device's links to consumers. This means we need to358358+ * mark the link as "consumer probe in progress" to make the supplier removal359359+ * wait for us to complete (or bad things may happen).360360+ *361361+ * Links with the DL_FLAG_STATELESS flag set are ignored.362362+ */363363+int device_links_check_suppliers(struct device *dev)364364+{365365+ struct device_link *link;366366+ int ret = 0;367367+368368+ device_links_write_lock();369369+370370+ list_for_each_entry(link, &dev->links.suppliers, c_node) {371371+ if (link->flags & DL_FLAG_STATELESS)372372+ continue;373373+374374+ if (link->status != DL_STATE_AVAILABLE) {375375+ device_links_missing_supplier(dev);376376+ ret = -EPROBE_DEFER;377377+ break;378378+ }379379+ WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);380380+ }381381+ dev->links.status = DL_DEV_PROBING;382382+383383+ device_links_write_unlock();384384+ return ret;385385+}386386+387387+/**388388+ * device_links_driver_bound - Update device links after probing its driver.389389+ * @dev: Device to update the links for.390390+ *391391+ * The probe has been successful, so update links from this device to any392392+ * consumers by changing their status to "available".393393+ *394394+ * Also change the status of @dev's links to suppliers to "active".395395+ *396396+ * Links with the DL_FLAG_STATELESS flag set are ignored.397397+ */398398+void device_links_driver_bound(struct device *dev)399399+{400400+ struct device_link *link;401401+402402+ device_links_write_lock();403403+404404+ list_for_each_entry(link, &dev->links.consumers, s_node) {405405+ if (link->flags & DL_FLAG_STATELESS)406406+ continue;407407+408408+ WARN_ON(link->status != DL_STATE_DORMANT);409409+ WRITE_ONCE(link->status, DL_STATE_AVAILABLE);410410+ }411411+412412+ list_for_each_entry(link, &dev->links.suppliers, c_node) {413413+ if (link->flags & DL_FLAG_STATELESS)414414+ continue;415415+416416+ WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);417417+ WRITE_ONCE(link->status, DL_STATE_ACTIVE);418418+ }419419+420420+ dev->links.status = DL_DEV_DRIVER_BOUND;421421+422422+ device_links_write_unlock();423423+}424424+425425+/**426426+ * __device_links_no_driver - Update links of a device without a driver.427427+ * @dev: Device without a drvier.428428+ *429429+ * Delete all non-persistent links from this device to any suppliers.430430+ *431431+ * Persistent links stay around, but their status is changed to "available",432432+ * unless they already are in the "supplier unbind in progress" state in which433433+ * case they need not be updated.434434+ *435435+ * Links with the DL_FLAG_STATELESS flag set are ignored.436436+ */437437+static void __device_links_no_driver(struct device *dev)438438+{439439+ struct device_link *link, *ln;440440+441441+ list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {442442+ if (link->flags & DL_FLAG_STATELESS)443443+ continue;444444+445445+ if (link->flags & DL_FLAG_AUTOREMOVE)446446+ __device_link_del(link);447447+ else if (link->status != DL_STATE_SUPPLIER_UNBIND)448448+ WRITE_ONCE(link->status, DL_STATE_AVAILABLE);449449+ }450450+451451+ dev->links.status = DL_DEV_NO_DRIVER;452452+}453453+454454+void device_links_no_driver(struct device *dev)455455+{456456+ device_links_write_lock();457457+ __device_links_no_driver(dev);458458+ device_links_write_unlock();459459+}460460+461461+/**462462+ * device_links_driver_cleanup - Update links after driver removal.463463+ * @dev: Device whose driver has just gone away.464464+ *465465+ * Update links to consumers for @dev by changing their status to "dormant" and466466+ * invoke %__device_links_no_driver() to update links to suppliers for it as467467+ * appropriate.468468+ *469469+ * Links with the DL_FLAG_STATELESS flag set are ignored.470470+ */471471+void device_links_driver_cleanup(struct device *dev)472472+{473473+ struct device_link *link;474474+475475+ device_links_write_lock();476476+477477+ list_for_each_entry(link, &dev->links.consumers, s_node) {478478+ if (link->flags & DL_FLAG_STATELESS)479479+ continue;480480+481481+ WARN_ON(link->flags & DL_FLAG_AUTOREMOVE);482482+ WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);483483+ WRITE_ONCE(link->status, DL_STATE_DORMANT);484484+ }485485+486486+ __device_links_no_driver(dev);487487+488488+ device_links_write_unlock();489489+}490490+491491+/**492492+ * device_links_busy - Check if there are any busy links to consumers.493493+ * @dev: Device to check.494494+ *495495+ * Check each consumer of the device and return 'true' if its link's status496496+ * is one of "consumer probe" or "active" (meaning that the given consumer is497497+ * probing right now or its driver is present). Otherwise, change the link498498+ * state to "supplier unbind" to prevent the consumer from being probed499499+ * successfully going forward.500500+ *501501+ * Return 'false' if there are no probing or active consumers.502502+ *503503+ * Links with the DL_FLAG_STATELESS flag set are ignored.504504+ */505505+bool device_links_busy(struct device *dev)506506+{507507+ struct device_link *link;508508+ bool ret = false;509509+510510+ device_links_write_lock();511511+512512+ list_for_each_entry(link, &dev->links.consumers, s_node) {513513+ if (link->flags & DL_FLAG_STATELESS)514514+ continue;515515+516516+ if (link->status == DL_STATE_CONSUMER_PROBE517517+ || link->status == DL_STATE_ACTIVE) {518518+ ret = true;519519+ break;520520+ }521521+ WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);522522+ }523523+524524+ dev->links.status = DL_DEV_UNBINDING;525525+526526+ device_links_write_unlock();527527+ return ret;528528+}529529+530530+/**531531+ * device_links_unbind_consumers - Force unbind consumers of the given device.532532+ * @dev: Device to unbind the consumers of.533533+ *534534+ * Walk the list of links to consumers for @dev and if any of them is in the535535+ * "consumer probe" state, wait for all device probes in progress to complete536536+ * and start over.537537+ *538538+ * If that's not the case, change the status of the link to "supplier unbind"539539+ * and check if the link was in the "active" state. If so, force the consumer540540+ * driver to unbind and start over (the consumer will not re-probe as we have541541+ * changed the state of the link already).542542+ *543543+ * Links with the DL_FLAG_STATELESS flag set are ignored.544544+ */545545+void device_links_unbind_consumers(struct device *dev)546546+{547547+ struct device_link *link;548548+549549+ start:550550+ device_links_write_lock();551551+552552+ list_for_each_entry(link, &dev->links.consumers, s_node) {553553+ enum device_link_state status;554554+555555+ if (link->flags & DL_FLAG_STATELESS)556556+ continue;557557+558558+ status = link->status;559559+ if (status == DL_STATE_CONSUMER_PROBE) {560560+ device_links_write_unlock();561561+562562+ wait_for_device_probe();563563+ goto start;564564+ }565565+ WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);566566+ if (status == DL_STATE_ACTIVE) {567567+ struct device *consumer = link->consumer;568568+569569+ get_device(consumer);570570+571571+ device_links_write_unlock();572572+573573+ device_release_driver_internal(consumer, NULL,574574+ consumer->parent);575575+ put_device(consumer);576576+ goto start;577577+ }578578+ }579579+580580+ device_links_write_unlock();581581+}582582+583583+/**584584+ * device_links_purge - Delete existing links to other devices.585585+ * @dev: Target device.586586+ */587587+static void device_links_purge(struct device *dev)588588+{589589+ struct device_link *link, *ln;590590+591591+ /*592592+ * Delete all of the remaining links from this device to any other593593+ * devices (either consumers or suppliers).594594+ */595595+ device_links_write_lock();596596+597597+ list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {598598+ WARN_ON(link->status == DL_STATE_ACTIVE);599599+ __device_link_del(link);600600+ }601601+602602+ list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) {603603+ WARN_ON(link->status != DL_STATE_DORMANT &&604604+ link->status != DL_STATE_NONE);605605+ __device_link_del(link);606606+ }607607+608608+ device_links_write_unlock();609609+}610610+611611+/* Device links support end. */612612+47613int (*platform_notify)(struct device *dev) = NULL;48614int (*platform_notify_remove)(struct device *dev) = NULL;49615static struct kobject *dev_kobj;···1060494 goto err_remove_dev_groups;1061495 }1062496497497+ error = device_create_file(dev, &dev_attr_deferred_probe);498498+ if (error)499499+ goto err_remove_online;500500+1063501 return 0;1064502503503+ err_remove_online:504504+ device_remove_file(dev, &dev_attr_online);1065505 err_remove_dev_groups:1066506 device_remove_groups(dev, dev->groups);1067507 err_remove_type_groups:···1085513 struct class *class = dev->class;1086514 const struct device_type *type = dev->type;1087515516516+ device_remove_file(dev, &dev_attr_deferred_probe);1088517 device_remove_file(dev, &dev_attr_online);1089518 device_remove_groups(dev, dev->groups);1090519···1284711#ifdef CONFIG_GENERIC_MSI_IRQ1285712 INIT_LIST_HEAD(&dev->msi_list);1286713#endif714714+ INIT_LIST_HEAD(&dev->links.consumers);715715+ INIT_LIST_HEAD(&dev->links.suppliers);716716+ dev->links.status = DL_DEV_NO_DRIVER;1287717}1288718EXPORT_SYMBOL_GPL(device_initialize);1289719···18341258 if (dev->bus)18351259 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,18361260 BUS_NOTIFY_DEL_DEVICE, dev);12611261+12621262+ device_links_purge(dev);18371263 dpm_sysfs_remove(dev);18381264 if (parent)18391265 klist_del(&dev->p->knode_parent);
+66-13
drivers/base/dd.c
···5353static LIST_HEAD(deferred_probe_active_list);5454static atomic_t deferred_trigger_count = ATOMIC_INIT(0);55555656+static ssize_t deferred_probe_show(struct device *dev,5757+ struct device_attribute *attr, char *buf)5858+{5959+ bool value;6060+6161+ mutex_lock(&deferred_probe_mutex);6262+ value = !list_empty(&dev->p->deferred_probe);6363+ mutex_unlock(&deferred_probe_mutex);6464+6565+ return sprintf(buf, "%d\n", value);6666+}6767+DEVICE_ATTR_RO(deferred_probe);6868+5669/*5770 * In some cases, like suspend to RAM or hibernation, It might be reasonable5871 * to prohibit probing of devices as it could be unsafe.···257244 __func__, dev_name(dev));258245259246 klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);247247+ device_links_driver_bound(dev);260248261249 device_pm_check_callbacks(dev);262250···352338 return ret;353339 }354340341341+ ret = device_links_check_suppliers(dev);342342+ if (ret)343343+ return ret;344344+355345 atomic_inc(&probe_count);356346 pr_debug("bus: '%s': %s: probing driver %s with device %s\n",357347 drv->bus->name, __func__, drv->name, dev_name(dev));···434416 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,435417 BUS_NOTIFY_DRIVER_NOT_BOUND, dev);436418pinctrl_bind_failed:419419+ device_links_no_driver(dev);437420 devres_release_all(dev);438421 driver_sysfs_remove(dev);439422 dev->driver = NULL;···527508 pr_debug("bus: '%s': %s: matched device %s with driver %s\n",528509 drv->bus->name, __func__, dev_name(dev), drv->name);529510511511+ pm_runtime_get_suppliers(dev);530512 if (dev->parent)531513 pm_runtime_get_sync(dev->parent);532514···538518 if (dev->parent)539519 pm_runtime_put(dev->parent);540520521521+ pm_runtime_put_suppliers(dev);541522 return ret;542523}543524···793772 * __device_release_driver() must be called with @dev lock held.794773 * When called for a USB interface, @dev->parent lock must be held as well.795774 */796796-static void __device_release_driver(struct device *dev)775775+static void __device_release_driver(struct device *dev, struct device *parent)797776{798777 struct device_driver *drv;799778···802781 if (driver_allows_async_probing(drv))803782 async_synchronize_full();804783784784+ while (device_links_busy(dev)) {785785+ device_unlock(dev);786786+ if (parent)787787+ device_unlock(parent);788788+789789+ device_links_unbind_consumers(dev);790790+ if (parent)791791+ device_lock(parent);792792+793793+ device_lock(dev);794794+ /*795795+ * A concurrent invocation of the same function might796796+ * have released the driver successfully while this one797797+ * was waiting, so check for that.798798+ */799799+ if (dev->driver != drv)800800+ return;801801+ }802802+805803 pm_runtime_get_sync(dev);804804+ pm_runtime_clean_up_links(dev);806805807806 driver_sysfs_remove(dev);808807···837796 dev->bus->remove(dev);838797 else if (drv->remove)839798 drv->remove(dev);799799+800800+ device_links_driver_cleanup(dev);840801 devres_release_all(dev);841802 dev->driver = NULL;842803 dev_set_drvdata(dev, NULL);···855812 }856813}857814815815+void device_release_driver_internal(struct device *dev,816816+ struct device_driver *drv,817817+ struct device *parent)818818+{819819+ if (parent)820820+ device_lock(parent);821821+822822+ device_lock(dev);823823+ if (!drv || drv == dev->driver)824824+ __device_release_driver(dev, parent);825825+826826+ device_unlock(dev);827827+ if (parent)828828+ device_unlock(parent);829829+}830830+858831/**859832 * device_release_driver - manually detach device from driver.860833 * @dev: device.861834 *862835 * Manually detach device from driver.863836 * When called for a USB interface, @dev->parent lock must be held.837837+ *838838+ * If this function is to be called with @dev->parent lock held, ensure that839839+ * the device's consumers are unbound in advance or that their locks can be840840+ * acquired under the @dev->parent lock.864841 */865842void device_release_driver(struct device *dev)866843{···889826 * within their ->remove callback for the same device, they890827 * will deadlock right here.891828 */892892- device_lock(dev);893893- __device_release_driver(dev);894894- device_unlock(dev);829829+ device_release_driver_internal(dev, NULL, NULL);895830}896831EXPORT_SYMBOL_GPL(device_release_driver);897832···914853 dev = dev_prv->device;915854 get_device(dev);916855 spin_unlock(&drv->p->klist_devices.k_lock);917917-918918- if (dev->parent) /* Needed for USB */919919- device_lock(dev->parent);920920- device_lock(dev);921921- if (dev->driver == drv)922922- __device_release_driver(dev);923923- device_unlock(dev);924924- if (dev->parent)925925- device_unlock(dev->parent);856856+ device_release_driver_internal(dev, drv, dev->parent);926857 put_device(dev);927858 }928859}
+83-4
drivers/base/power/main.c
···131131 dev_warn(dev, "parent %s should not be sleeping\n",132132 dev_name(dev->parent));133133 list_add_tail(&dev->power.entry, &dpm_list);134134+ dev->power.in_dpm_list = true;134135 mutex_unlock(&dpm_list_mtx);135136}136137···146145 complete_all(&dev->power.completion);147146 mutex_lock(&dpm_list_mtx);148147 list_del_init(&dev->power.entry);148148+ dev->power.in_dpm_list = false;149149 mutex_unlock(&dpm_list_mtx);150150 device_wakeup_disable(dev);151151 pm_runtime_remove(dev);···244242static void dpm_wait_for_children(struct device *dev, bool async)245243{246244 device_for_each_child(dev, &async, dpm_wait_fn);245245+}246246+247247+static void dpm_wait_for_suppliers(struct device *dev, bool async)248248+{249249+ struct device_link *link;250250+ int idx;251251+252252+ idx = device_links_read_lock();253253+254254+ /*255255+ * If the supplier goes away right after we've checked the link to it,256256+ * we'll wait for its completion to change the state, but that's fine,257257+ * because the only things that will block as a result are the SRCU258258+ * callbacks freeing the link objects for the links in the list we're259259+ * walking.260260+ */261261+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)262262+ if (READ_ONCE(link->status) != DL_STATE_DORMANT)263263+ dpm_wait(link->supplier, async);264264+265265+ device_links_read_unlock(idx);266266+}267267+268268+static void dpm_wait_for_superior(struct device *dev, bool async)269269+{270270+ dpm_wait(dev->parent, async);271271+ dpm_wait_for_suppliers(dev, async);272272+}273273+274274+static void dpm_wait_for_consumers(struct device *dev, bool async)275275+{276276+ struct device_link *link;277277+ int idx;278278+279279+ idx = device_links_read_lock();280280+281281+ /*282282+ * The status of a device link can only be changed from "dormant" by a283283+ * probe, but that cannot happen during system suspend/resume. In284284+ * theory it can change to "dormant" at that time, but then it is285285+ * reasonable to wait for the target device anyway (eg. if it goes286286+ * away, it's better to wait for it to go away completely and then287287+ * continue instead of trying to continue in parallel with its288288+ * unregistration).289289+ */290290+ list_for_each_entry_rcu(link, &dev->links.consumers, s_node)291291+ if (READ_ONCE(link->status) != DL_STATE_DORMANT)292292+ dpm_wait(link->consumer, async);293293+294294+ device_links_read_unlock(idx);295295+}296296+297297+static void dpm_wait_for_subordinate(struct device *dev, bool async)298298+{299299+ dpm_wait_for_children(dev, async);300300+ dpm_wait_for_consumers(dev, async);247301}248302249303/**···546488 if (!dev->power.is_noirq_suspended)547489 goto Out;548490549549- dpm_wait(dev->parent, async);491491+ dpm_wait_for_superior(dev, async);550492551493 if (dev->pm_domain) {552494 info = "noirq power domain ";···676618 if (!dev->power.is_late_suspended)677619 goto Out;678620679679- dpm_wait(dev->parent, async);621621+ dpm_wait_for_superior(dev, async);680622681623 if (dev->pm_domain) {682624 info = "early power domain ";···808750 goto Complete;809751 }810752811811- dpm_wait(dev->parent, async);753753+ dpm_wait_for_superior(dev, async);812754 dpm_watchdog_set(&wd, dev);813755 device_lock(dev);814756···10981040 if (dev->power.syscore || dev->power.direct_complete)10991041 goto Complete;1100104210431043+ dpm_wait_for_subordinate(dev, async);10441044+11011045 if (dev->pm_domain) {11021046 info = "noirq power domain ";11031047 callback = pm_noirq_op(&dev->pm_domain->ops, state);···1246118612471187 if (dev->power.syscore || dev->power.direct_complete)12481188 goto Complete;11891189+11901190+ dpm_wait_for_subordinate(dev, async);1249119112501192 if (dev->pm_domain) {12511193 info = "late power domain ";···14041342 return error;14051343}1406134413451345+static void dpm_clear_suppliers_direct_complete(struct device *dev)13461346+{13471347+ struct device_link *link;13481348+ int idx;13491349+13501350+ idx = device_links_read_lock();13511351+13521352+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {13531353+ spin_lock_irq(&link->supplier->power.lock);13541354+ link->supplier->power.direct_complete = false;13551355+ spin_unlock_irq(&link->supplier->power.lock);13561356+ }13571357+13581358+ device_links_read_unlock(idx);13591359+}13601360+14071361/**14081362 * device_suspend - Execute "suspend" callbacks for given device.14091363 * @dev: Device to handle.···14361358 TRACE_DEVICE(dev);14371359 TRACE_SUSPEND(0);1438136014391439- dpm_wait_for_children(dev, async);13611361+ dpm_wait_for_subordinate(dev, async);1440136214411363 if (async_error)14421364 goto Complete;···1532145415331455 spin_unlock_irq(&parent->power.lock);15341456 }14571457+ dpm_clear_suppliers_direct_complete(dev);15351458 }1536145915371460 device_unlock(dev);
···1212#include <linux/pm_runtime.h>1313#include <linux/pm_wakeirq.h>1414#include <trace/events/rpm.h>1515+1616+#include "../base.h"1517#include "power.h"16181719typedef int (*pm_callback_t)(struct device *);···260258 return retval;261259}262260261261+static int rpm_get_suppliers(struct device *dev)262262+{263263+ struct device_link *link;264264+265265+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {266266+ int retval;267267+268268+ if (!(link->flags & DL_FLAG_PM_RUNTIME))269269+ continue;270270+271271+ if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND ||272272+ link->rpm_active)273273+ continue;274274+275275+ retval = pm_runtime_get_sync(link->supplier);276276+ if (retval < 0) {277277+ pm_runtime_put_noidle(link->supplier);278278+ return retval;279279+ }280280+ link->rpm_active = true;281281+ }282282+ return 0;283283+}284284+285285+static void rpm_put_suppliers(struct device *dev)286286+{287287+ struct device_link *link;288288+289289+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)290290+ if (link->rpm_active &&291291+ READ_ONCE(link->status) != DL_STATE_SUPPLIER_UNBIND) {292292+ pm_runtime_put(link->supplier);293293+ link->rpm_active = false;294294+ }295295+}296296+263297/**264298 * __rpm_callback - Run a given runtime PM callback for a given device.265299 * @cb: Runtime PM callback to run.···304266static int __rpm_callback(int (*cb)(struct device *), struct device *dev)305267 __releases(&dev->power.lock) __acquires(&dev->power.lock)306268{307307- int retval;269269+ int retval, idx;270270+ bool use_links = dev->power.links_count > 0;308271309309- if (dev->power.irq_safe)272272+ if (dev->power.irq_safe) {310273 spin_unlock(&dev->power.lock);311311- else274274+ } else {312275 spin_unlock_irq(&dev->power.lock);276276+277277+ /*278278+ * Resume suppliers if necessary.279279+ *280280+ * The device's runtime PM status cannot change until this281281+ * routine returns, so it is safe to read the status outside of282282+ * the lock.283283+ */284284+ if (use_links && dev->power.runtime_status == RPM_RESUMING) {285285+ idx = device_links_read_lock();286286+287287+ retval = rpm_get_suppliers(dev);288288+ if (retval)289289+ goto fail;290290+291291+ device_links_read_unlock(idx);292292+ }293293+ }313294314295 retval = cb(dev);315296316316- if (dev->power.irq_safe)297297+ if (dev->power.irq_safe) {317298 spin_lock(&dev->power.lock);318318- else299299+ } else {300300+ /*301301+ * If the device is suspending and the callback has returned302302+ * success, drop the usage counters of the suppliers that have303303+ * been reference counted on its resume.304304+ *305305+ * Do that if resume fails too.306306+ */307307+ if (use_links308308+ && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)309309+ || (dev->power.runtime_status == RPM_RESUMING && retval))) {310310+ idx = device_links_read_lock();311311+312312+ fail:313313+ rpm_put_suppliers(dev);314314+315315+ device_links_read_unlock(idx);316316+ }317317+319318 spin_lock_irq(&dev->power.lock);319319+ }320320321321 return retval;322322}···15201444{15211445 __pm_runtime_disable(dev, false);15221446 pm_runtime_reinit(dev);14471447+}14481448+14491449+/**14501450+ * pm_runtime_clean_up_links - Prepare links to consumers for driver removal.14511451+ * @dev: Device whose driver is going to be removed.14521452+ *14531453+ * Check links from this device to any consumers and if any of them have active14541454+ * runtime PM references to the device, drop the usage counter of the device14551455+ * (once per link).14561456+ *14571457+ * Links with the DL_FLAG_STATELESS flag set are ignored.14581458+ *14591459+ * Since the device is guaranteed to be runtime-active at the point this is14601460+ * called, nothing else needs to be done here.14611461+ *14621462+ * Moreover, this is called after device_links_busy() has returned 'false', so14631463+ * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and14641464+ * therefore rpm_active can't be manipulated concurrently.14651465+ */14661466+void pm_runtime_clean_up_links(struct device *dev)14671467+{14681468+ struct device_link *link;14691469+ int idx;14701470+14711471+ idx = device_links_read_lock();14721472+14731473+ list_for_each_entry_rcu(link, &dev->links.consumers, s_node) {14741474+ if (link->flags & DL_FLAG_STATELESS)14751475+ continue;14761476+14771477+ if (link->rpm_active) {14781478+ pm_runtime_put_noidle(dev);14791479+ link->rpm_active = false;14801480+ }14811481+ }14821482+14831483+ device_links_read_unlock(idx);14841484+}14851485+14861486+/**14871487+ * pm_runtime_get_suppliers - Resume and reference-count supplier devices.14881488+ * @dev: Consumer device.14891489+ */14901490+void pm_runtime_get_suppliers(struct device *dev)14911491+{14921492+ struct device_link *link;14931493+ int idx;14941494+14951495+ idx = device_links_read_lock();14961496+14971497+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)14981498+ if (link->flags & DL_FLAG_PM_RUNTIME)14991499+ pm_runtime_get_sync(link->supplier);15001500+15011501+ device_links_read_unlock(idx);15021502+}15031503+15041504+/**15051505+ * pm_runtime_put_suppliers - Drop references to supplier devices.15061506+ * @dev: Consumer device.15071507+ */15081508+void pm_runtime_put_suppliers(struct device *dev)15091509+{15101510+ struct device_link *link;15111511+ int idx;15121512+15131513+ idx = device_links_read_lock();15141514+15151515+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)15161516+ if (link->flags & DL_FLAG_PM_RUNTIME)15171517+ pm_runtime_put(link->supplier);15181518+15191519+ device_links_read_unlock(idx);15201520+}15211521+15221522+void pm_runtime_new_link(struct device *dev)15231523+{15241524+ spin_lock_irq(&dev->power.lock);15251525+ dev->power.links_count++;15261526+ spin_unlock_irq(&dev->power.lock);15271527+}15281528+15291529+void pm_runtime_drop_link(struct device *dev)15301530+{15311531+ spin_lock_irq(&dev->power.lock);15321532+ WARN_ON(dev->power.links_count == 0);15331533+ dev->power.links_count--;15341534+ spin_unlock_irq(&dev->power.lock);15231535}1524153615251537/**
+9
drivers/base/test/Kconfig
···11+config TEST_ASYNC_DRIVER_PROBE22+ tristate "Build kernel module to test asynchronous driver probing"33+ depends on m44+ help55+ Enabling this option produces a kernel module that allows66+ testing asynchronous driver probing by the device core.77+ The module name will be test_async_driver_probe.ko88+99+ If unsure say N.
···805805 goto out_free_domain;806806807807 group = iommu_group_get(&pdev->dev);808808- if (!group)808808+ if (!group) {809809+ ret = -EINVAL;809810 goto out_free_domain;811811+ }810812811813 ret = iommu_attach_group(dev_state->domain, group);812814 if (ret != 0)
+82-22
drivers/iommu/arm-smmu-v3.c
···2020 * This driver is powered by bad coffee and bombay mix.2121 */22222323+#include <linux/acpi.h>2424+#include <linux/acpi_iort.h>2325#include <linux/delay.h>2426#include <linux/dma-iommu.h>2527#include <linux/err.h>···13601358 } while (size -= granule);13611359}1362136013631363-static struct iommu_gather_ops arm_smmu_gather_ops = {13611361+static const struct iommu_gather_ops arm_smmu_gather_ops = {13641362 .tlb_flush_all = arm_smmu_tlb_inv_context,13651363 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,13661364 .tlb_sync = arm_smmu_tlb_sync,···1725172317261724static int arm_smmu_match_node(struct device *dev, void *data)17271725{17281728- return dev->of_node == data;17261726+ return dev->fwnode == data;17291727}1730172817311731-static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)17291729+static17301730+struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)17321731{17331732 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,17341734- np, arm_smmu_match_node);17331733+ fwnode, arm_smmu_match_node);17351734 put_device(dev);17361735 return dev ? dev_get_drvdata(dev) : NULL;17371736}···17681765 master = fwspec->iommu_priv;17691766 smmu = master->smmu;17701767 } else {17711771- smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));17681768+ smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);17721769 if (!smmu)17731770 return -ENODEV;17741771 master = kzalloc(sizeof(*master), GFP_KERNEL);···23832380 return 0;23842381}2385238223862386-static int arm_smmu_device_probe(struct arm_smmu_device *smmu)23832383+static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)23872384{23882385 u32 reg;23892389- bool coherent;23862386+ bool coherent = smmu->features & ARM_SMMU_FEAT_COHERENCY;2390238723912388 /* IDR0 */23922389 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0);···24382435 smmu->features |= ARM_SMMU_FEAT_HYP;2439243624402437 /*24412441- * The dma-coherent property is used in preference to the ID24382438+ * The coherency feature as set by FW is used in preference to the ID24422439 * register, but warn on mismatch.24432440 */24442444- coherent = of_dma_is_coherent(smmu->dev->of_node);24452445- if (coherent)24462446- smmu->features |= ARM_SMMU_FEAT_COHERENCY;24472447-24482441 if (!!(reg & IDR0_COHACC) != coherent)24492442 dev_warn(smmu->dev, "IDR0.COHACC overridden by dma-coherent property (%s)\n",24502443 coherent ? "true" : "false");···25612562 return 0;25622563}2563256425642564-static int arm_smmu_device_dt_probe(struct platform_device *pdev)25652565+#ifdef CONFIG_ACPI25662566+static int arm_smmu_device_acpi_probe(struct platform_device *pdev,25672567+ struct arm_smmu_device *smmu)25652568{25662566- int irq, ret;25672567- struct resource *res;25682568- struct arm_smmu_device *smmu;25692569+ struct acpi_iort_smmu_v3 *iort_smmu;25702570+ struct device *dev = smmu->dev;25712571+ struct acpi_iort_node *node;25722572+25732573+ node = *(struct acpi_iort_node **)dev_get_platdata(dev);25742574+25752575+ /* Retrieve SMMUv3 specific data */25762576+ iort_smmu = (struct acpi_iort_smmu_v3 *)node->node_data;25772577+25782578+ if (iort_smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE)25792579+ smmu->features |= ARM_SMMU_FEAT_COHERENCY;25802580+25812581+ return 0;25822582+}25832583+#else25842584+static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,25852585+ struct arm_smmu_device *smmu)25862586+{25872587+ return -ENODEV;25882588+}25892589+#endif25902590+25912591+static int arm_smmu_device_dt_probe(struct platform_device *pdev,25922592+ struct arm_smmu_device *smmu)25932593+{25692594 struct device *dev = &pdev->dev;25702570- bool bypass = true;25712595 u32 cells;25962596+ int ret = -EINVAL;2572259725732598 if (of_property_read_u32(dev->of_node, "#iommu-cells", &cells))25742599 dev_err(dev, "missing #iommu-cells property\n");25752600 else if (cells != 1)25762601 dev_err(dev, "invalid #iommu-cells value (%d)\n", cells);25772602 else25782578- bypass = false;26032603+ ret = 0;26042604+26052605+ parse_driver_options(smmu);26062606+26072607+ if (of_dma_is_coherent(dev->of_node))26082608+ smmu->features |= ARM_SMMU_FEAT_COHERENCY;26092609+26102610+ return ret;26112611+}26122612+26132613+static int arm_smmu_device_probe(struct platform_device *pdev)26142614+{26152615+ int irq, ret;26162616+ struct resource *res;26172617+ struct arm_smmu_device *smmu;26182618+ struct device *dev = &pdev->dev;26192619+ bool bypass;2579262025802621 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);25812622 if (!smmu) {···26522613 if (irq > 0)26532614 smmu->gerr_irq = irq;2654261526552655- parse_driver_options(smmu);26162616+ if (dev->of_node) {26172617+ ret = arm_smmu_device_dt_probe(pdev, smmu);26182618+ } else {26192619+ ret = arm_smmu_device_acpi_probe(pdev, smmu);26202620+ if (ret == -ENODEV)26212621+ return ret;26222622+ }26232623+26242624+ /* Set bypass mode according to firmware probing result */26252625+ bypass = !!ret;2656262626572627 /* Probe the h/w */26582658- ret = arm_smmu_device_probe(smmu);26282628+ ret = arm_smmu_device_hw_probe(smmu);26592629 if (ret)26602630 return ret;26612631···26822634 return ret;2683263526842636 /* And we're up. Go go go! */26852685- of_iommu_set_ops(dev->of_node, &arm_smmu_ops);26372637+ iommu_register_instance(dev->fwnode, &arm_smmu_ops);26382638+26862639#ifdef CONFIG_PCI26872640 if (pci_bus_type.iommu_ops != &arm_smmu_ops) {26882641 pci_request_acs();···27262677 .name = "arm-smmu-v3",27272678 .of_match_table = of_match_ptr(arm_smmu_of_match),27282679 },27292729- .probe = arm_smmu_device_dt_probe,26802680+ .probe = arm_smmu_device_probe,27302681 .remove = arm_smmu_device_remove,27312682};27322683···27632714 return 0;27642715}27652716IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3", arm_smmu_of_init);27172717+27182718+#ifdef CONFIG_ACPI27192719+static int __init acpi_smmu_v3_init(struct acpi_table_header *table)27202720+{27212721+ if (iort_node_match(ACPI_IORT_NODE_SMMU_V3))27222722+ return arm_smmu_init();27232723+27242724+ return 0;27252725+}27262726+IORT_ACPI_DECLARE(arm_smmu_v3, ACPI_SIG_IORT, acpi_smmu_v3_init);27272727+#endif2766272827672729MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");27682730MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
+136-41
drivers/iommu/arm-smmu.c
···28282929#define pr_fmt(fmt) "arm-smmu: " fmt30303131+#include <linux/acpi.h>3232+#include <linux/acpi_iort.h>3133#include <linux/atomic.h>3234#include <linux/delay.h>3335#include <linux/dma-iommu.h>···249247#define ARM_MMU500_ACTLR_CPRE (1 << 1)250248251249#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)250250+#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)252251253252#define CB_PAR_F (1 << 0)254253···645642 }646643}647644648648-static struct iommu_gather_ops arm_smmu_gather_ops = {645645+static const struct iommu_gather_ops arm_smmu_gather_ops = {649646 .tlb_flush_all = arm_smmu_tlb_inv_context,650647 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,651648 .tlb_sync = arm_smmu_tlb_sync,···1382137913831380static int arm_smmu_match_node(struct device *dev, void *data)13841381{13851385- return dev->of_node == data;13821382+ return dev->fwnode == data;13861383}1387138413881388-static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)13851385+static13861386+struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)13891387{13901388 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,13911391- np, arm_smmu_match_node);13891389+ fwnode, arm_smmu_match_node);13921390 put_device(dev);13931391 return dev ? dev_get_drvdata(dev) : NULL;13941392}···14071403 if (ret)14081404 goto out_free;14091405 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {14101410- smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));14061406+ smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);14111407 } else {14121408 return -ENODEV;14131409 }···14821478 }1483147914841480 if (group)14851485- return group;14811481+ return iommu_group_ref_get(group);1486148214871483 if (dev_is_pci(dev))14881484 group = pci_device_group(dev);···15851581 for (i = 0; i < smmu->num_mapping_groups; ++i)15861582 arm_smmu_write_sme(smmu, i);1587158315881588- /*15891589- * Before clearing ARM_MMU500_ACTLR_CPRE, need to15901590- * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK15911591- * bit is only present in MMU-500r2 onwards.15921592- */15931593- reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);15941594- major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;15951595- if ((smmu->model == ARM_MMU500) && (major >= 2)) {15841584+ if (smmu->model == ARM_MMU500) {15851585+ /*15861586+ * Before clearing ARM_MMU500_ACTLR_CPRE, need to15871587+ * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK15881588+ * bit is only present in MMU-500r2 onwards.15891589+ */15901590+ reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);15911591+ major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;15961592 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);15971597- reg &= ~ARM_MMU500_ACR_CACHE_LOCK;15931593+ if (major >= 2)15941594+ reg &= ~ARM_MMU500_ACR_CACHE_LOCK;15951595+ /*15961596+ * Allow unmatched Stream IDs to allocate bypass15971597+ * TLB entries for reduced latency.15981598+ */15991599+ reg |= ARM_MMU500_ACR_SMTNMB_TLBEN;15981600 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);15991601 }16001602···16771667 unsigned long size;16781668 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);16791669 u32 id;16801680- bool cttw_dt, cttw_reg;16701670+ bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;16811671 int i;1682167216831673 dev_notice(smmu->dev, "probing hardware configuration...\n");···1722171217231713 /*17241714 * In order for DMA API calls to work properly, we must defer to what17251725- * the DT says about coherency, regardless of what the hardware claims.17151715+ * the FW says about coherency, regardless of what the hardware claims.17261716 * Fortunately, this also opens up a workaround for systems where the17271717 * ID register value has ended up configured incorrectly.17281718 */17291729- cttw_dt = of_dma_is_coherent(smmu->dev->of_node);17301719 cttw_reg = !!(id & ID0_CTTW);17311731- if (cttw_dt)17321732- smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;17331733- if (cttw_dt || cttw_reg)17201720+ if (cttw_fw || cttw_reg)17341721 dev_notice(smmu->dev, "\t%scoherent table walk\n",17351735- cttw_dt ? "" : "non-");17361736- if (cttw_dt != cttw_reg)17221722+ cttw_fw ? "" : "non-");17231723+ if (cttw_fw != cttw_reg)17371724 dev_notice(smmu->dev,17381738- "\t(IDR0.CTTW overridden by dma-coherent property)\n");17251725+ "\t(IDR0.CTTW overridden by FW configuration)\n");1739172617401727 /* Max. number of entries we have for stream matching/indexing */17411728 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);···19131906};19141907MODULE_DEVICE_TABLE(of, arm_smmu_of_match);1915190819161916-static int arm_smmu_device_dt_probe(struct platform_device *pdev)19091909+#ifdef CONFIG_ACPI19101910+static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)19111911+{19121912+ int ret = 0;19131913+19141914+ switch (model) {19151915+ case ACPI_IORT_SMMU_V1:19161916+ case ACPI_IORT_SMMU_CORELINK_MMU400:19171917+ smmu->version = ARM_SMMU_V1;19181918+ smmu->model = GENERIC_SMMU;19191919+ break;19201920+ case ACPI_IORT_SMMU_V2:19211921+ smmu->version = ARM_SMMU_V2;19221922+ smmu->model = GENERIC_SMMU;19231923+ break;19241924+ case ACPI_IORT_SMMU_CORELINK_MMU500:19251925+ smmu->version = ARM_SMMU_V2;19261926+ smmu->model = ARM_MMU500;19271927+ break;19281928+ default:19291929+ ret = -ENODEV;19301930+ }19311931+19321932+ return ret;19331933+}19341934+19351935+static int arm_smmu_device_acpi_probe(struct platform_device *pdev,19361936+ struct arm_smmu_device *smmu)19371937+{19381938+ struct device *dev = smmu->dev;19391939+ struct acpi_iort_node *node =19401940+ *(struct acpi_iort_node **)dev_get_platdata(dev);19411941+ struct acpi_iort_smmu *iort_smmu;19421942+ int ret;19431943+19441944+ /* Retrieve SMMU1/2 specific data */19451945+ iort_smmu = (struct acpi_iort_smmu *)node->node_data;19461946+19471947+ ret = acpi_smmu_get_data(iort_smmu->model, smmu);19481948+ if (ret < 0)19491949+ return ret;19501950+19511951+ /* Ignore the configuration access interrupt */19521952+ smmu->num_global_irqs = 1;19531953+19541954+ if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)19551955+ smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;19561956+19571957+ return 0;19581958+}19591959+#else19601960+static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,19611961+ struct arm_smmu_device *smmu)19621962+{19631963+ return -ENODEV;19641964+}19651965+#endif19661966+19671967+static int arm_smmu_device_dt_probe(struct platform_device *pdev,19681968+ struct arm_smmu_device *smmu)19171969{19181970 const struct arm_smmu_match_data *data;19191919- struct resource *res;19201920- struct arm_smmu_device *smmu;19211971 struct device *dev = &pdev->dev;19221922- int num_irqs, i, err;19231972 bool legacy_binding;19731973+19741974+ if (of_property_read_u32(dev->of_node, "#global-interrupts",19751975+ &smmu->num_global_irqs)) {19761976+ dev_err(dev, "missing #global-interrupts property\n");19771977+ return -ENODEV;19781978+ }19791979+19801980+ data = of_device_get_match_data(dev);19811981+ smmu->version = data->version;19821982+ smmu->model = data->model;19831983+19841984+ parse_driver_options(smmu);1924198519251986 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);19261987 if (legacy_binding && !using_generic_binding) {···20021927 return -ENODEV;20031928 }2004192919301930+ if (of_dma_is_coherent(dev->of_node))19311931+ smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;19321932+19331933+ return 0;19341934+}19351935+19361936+static int arm_smmu_device_probe(struct platform_device *pdev)19371937+{19381938+ struct resource *res;19391939+ struct arm_smmu_device *smmu;19401940+ struct device *dev = &pdev->dev;19411941+ int num_irqs, i, err;19421942+20051943 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);20061944 if (!smmu) {20071945 dev_err(dev, "failed to allocate arm_smmu_device\n");···20221934 }20231935 smmu->dev = dev;2024193620252025- data = of_device_get_match_data(dev);20262026- smmu->version = data->version;20272027- smmu->model = data->model;19371937+ if (dev->of_node)19381938+ err = arm_smmu_device_dt_probe(pdev, smmu);19391939+ else19401940+ err = arm_smmu_device_acpi_probe(pdev, smmu);19411941+19421942+ if (err)19431943+ return err;2028194420291945 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);20301946 smmu->base = devm_ioremap_resource(dev, res);20311947 if (IS_ERR(smmu->base))20321948 return PTR_ERR(smmu->base);20331949 smmu->size = resource_size(res);20342034-20352035- if (of_property_read_u32(dev->of_node, "#global-interrupts",20362036- &smmu->num_global_irqs)) {20372037- dev_err(dev, "missing #global-interrupts property\n");20382038- return -ENODEV;20392039- }2040195020411951 num_irqs = 0;20421952 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {···20701984 if (err)20711985 return err;2072198620732073- parse_driver_options(smmu);20742074-20751987 if (smmu->version == ARM_SMMU_V2 &&20761988 smmu->num_context_banks != smmu->num_context_irqs) {20771989 dev_err(dev,···20912007 }20922008 }2093200920942094- of_iommu_set_ops(dev->of_node, &arm_smmu_ops);20102010+ iommu_register_instance(dev->fwnode, &arm_smmu_ops);20952011 platform_set_drvdata(pdev, smmu);20962012 arm_smmu_device_reset(smmu);20972013···21312047 .name = "arm-smmu",21322048 .of_match_table = of_match_ptr(arm_smmu_of_match),21332049 },21342134- .probe = arm_smmu_device_dt_probe,20502050+ .probe = arm_smmu_device_probe,21352051 .remove = arm_smmu_device_remove,21362052};21372053···21732089IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);21742090IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);21752091IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);20922092+20932093+#ifdef CONFIG_ACPI20942094+static int __init arm_smmu_acpi_init(struct acpi_table_header *table)20952095+{20962096+ if (iort_node_match(ACPI_IORT_NODE_SMMU))20972097+ return arm_smmu_init();20982098+20992099+ return 0;21002100+}21012101+IORT_ACPI_DECLARE(arm_smmu, ACPI_SIG_IORT, arm_smmu_acpi_init);21022102+#endif2176210321772104MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");21782105MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
···708708};709709710710/**711711+ * enum device_link_state - Device link states.712712+ * @DL_STATE_NONE: The presence of the drivers is not being tracked.713713+ * @DL_STATE_DORMANT: None of the supplier/consumer drivers is present.714714+ * @DL_STATE_AVAILABLE: The supplier driver is present, but the consumer is not.715715+ * @DL_STATE_CONSUMER_PROBE: The consumer is probing (supplier driver present).716716+ * @DL_STATE_ACTIVE: Both the supplier and consumer drivers are present.717717+ * @DL_STATE_SUPPLIER_UNBIND: The supplier driver is unbinding.718718+ */719719+enum device_link_state {720720+ DL_STATE_NONE = -1,721721+ DL_STATE_DORMANT = 0,722722+ DL_STATE_AVAILABLE,723723+ DL_STATE_CONSUMER_PROBE,724724+ DL_STATE_ACTIVE,725725+ DL_STATE_SUPPLIER_UNBIND,726726+};727727+728728+/*729729+ * Device link flags.730730+ *731731+ * STATELESS: The core won't track the presence of supplier/consumer drivers.732732+ * AUTOREMOVE: Remove this link automatically on consumer driver unbind.733733+ * PM_RUNTIME: If set, the runtime PM framework will use this link.734734+ * RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation.735735+ */736736+#define DL_FLAG_STATELESS BIT(0)737737+#define DL_FLAG_AUTOREMOVE BIT(1)738738+#define DL_FLAG_PM_RUNTIME BIT(2)739739+#define DL_FLAG_RPM_ACTIVE BIT(3)740740+741741+/**742742+ * struct device_link - Device link representation.743743+ * @supplier: The device on the supplier end of the link.744744+ * @s_node: Hook to the supplier device's list of links to consumers.745745+ * @consumer: The device on the consumer end of the link.746746+ * @c_node: Hook to the consumer device's list of links to suppliers.747747+ * @status: The state of the link (with respect to the presence of drivers).748748+ * @flags: Link flags.749749+ * @rpm_active: Whether or not the consumer device is runtime-PM-active.750750+ * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks.751751+ */752752+struct device_link {753753+ struct device *supplier;754754+ struct list_head s_node;755755+ struct device *consumer;756756+ struct list_head c_node;757757+ enum device_link_state status;758758+ u32 flags;759759+ bool rpm_active;760760+#ifdef CONFIG_SRCU761761+ struct rcu_head rcu_head;762762+#endif763763+};764764+765765+/**766766+ * enum dl_dev_state - Device driver presence tracking information.767767+ * @DL_DEV_NO_DRIVER: There is no driver attached to the device.768768+ * @DL_DEV_PROBING: A driver is probing.769769+ * @DL_DEV_DRIVER_BOUND: The driver has been bound to the device.770770+ * @DL_DEV_UNBINDING: The driver is unbinding from the device.771771+ */772772+enum dl_dev_state {773773+ DL_DEV_NO_DRIVER = 0,774774+ DL_DEV_PROBING,775775+ DL_DEV_DRIVER_BOUND,776776+ DL_DEV_UNBINDING,777777+};778778+779779+/**780780+ * struct dev_links_info - Device data related to device links.781781+ * @suppliers: List of links to supplier devices.782782+ * @consumers: List of links to consumer devices.783783+ * @status: Driver status information.784784+ */785785+struct dev_links_info {786786+ struct list_head suppliers;787787+ struct list_head consumers;788788+ enum dl_dev_state status;789789+};790790+791791+/**711792 * struct device - The basic device structure712793 * @parent: The device's "parent" device, the device to which it is attached.713794 * In most cases, a parent device is some sort of bus or host···880799 core doesn't touch it */881800 void *driver_data; /* Driver data, set and get with882801 dev_set/get_drvdata */802802+ struct dev_links_info links;883803 struct dev_pm_info power;884804 struct dev_pm_domain *pm_domain;885805···11981116/* debugging and troubleshooting/diagnostic helpers. */11991117extern const char *dev_driver_string(const struct device *dev);1200111811191119+/* Device links interface. */11201120+struct device_link *device_link_add(struct device *consumer,11211121+ struct device *supplier, u32 flags);11221122+void device_link_del(struct device_link *link);1201112312021124#ifdef CONFIG_PRINTK12031125
···559559 pm_message_t power_state;560560 unsigned int can_wakeup:1;561561 unsigned int async_suspend:1;562562+ bool in_dpm_list:1; /* Owned by the PM core */562563 bool is_prepared:1; /* Owned by the PM core */563564 bool is_suspended:1; /* Ditto */564565 bool is_noirq_suspended:1;···597596 unsigned int use_autosuspend:1;598597 unsigned int timer_autosuspends:1;599598 unsigned int memalloc_noio:1;599599+ unsigned int links_count;600600 enum rpm_request request;601601 enum rpm_status runtime_status;602602 int runtime_error;
···5656 * kobject_action_type - translate action string to numeric type5757 *5858 * @buf: buffer containing the action string, newline is ignored5959- * @len: length of buffer5959+ * @count: length of buffer6060 * @type: pointer to the location to store the action type6161 *6262 * Returns 0 if the action string was recognized.···154154/**155155 * kobject_uevent_env - send an uevent with environmental data156156 *157157- * @action: action that is happening158157 * @kobj: struct kobject that the action is happening to158158+ * @action: action that is happening159159 * @envp_ext: pointer to environmental data160160 *161161 * Returns 0 if kobject_uevent_env() is completed with success or the···363363/**364364 * kobject_uevent - notify userspace by sending an uevent365365 *366366- * @action: action that is happening367366 * @kobj: struct kobject that the action is happening to367367+ * @action: action that is happening368368 *369369 * Returns 0 if kobject_uevent() is completed with success or the370370 * corresponding error when it fails.