Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'driver-core-6.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core

Pull driver core updates from Greg KH:
"Here is the large set of driver core changes for 6.3-rc1.

There's a lot of changes this development cycle, most of the work
falls into two different categories:

- fw_devlink fixes and updates. This has gone through numerous review
cycles and lots of review and testing by lots of different devices.
Hopefully all should be good now, and Saravana will be keeping a
watch for any potential regression on odd embedded systems.

- driver core changes to work to make struct bus_type able to be
moved into read-only memory (i.e. const) The recent work with Rust
has pointed out a number of areas in the driver core where we are
passing around and working with structures that really do not have
to be dynamic at all, and they should be able to be read-only
making things safer overall. This is the contuation of that work
(started last release with kobject changes) in moving struct
bus_type to be constant. We didn't quite make it for this release,
but the remaining patches will be finished up for the release after
this one, but the groundwork has been laid for this effort.

Other than that we have in here:

- debugfs memory leak fixes in some subsystems

- error path cleanups and fixes for some never-able-to-be-hit
codepaths.

- cacheinfo rework and fixes

- Other tiny fixes, full details are in the shortlog

All of these have been in linux-next for a while with no reported
problems"

[ Geert Uytterhoeven points out that that last sentence isn't true, and
that there's a pending report that has a fix that is queued up - Linus ]

* tag 'driver-core-6.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core: (124 commits)
debugfs: drop inline constant formatting for ERR_PTR(-ERROR)
OPP: fix error checking in opp_migrate_dentry()
debugfs: update comment of debugfs_rename()
i3c: fix device.h kernel-doc warnings
dma-mapping: no need to pass a bus_type into get_arch_dma_ops()
driver core: class: move EXPORT_SYMBOL_GPL() lines to the correct place
Revert "driver core: add error handling for devtmpfs_create_node()"
Revert "devtmpfs: add debug info to handle()"
Revert "devtmpfs: remove return value of devtmpfs_delete_node()"
driver core: cpu: don't hand-override the uevent bus_type callback.
devtmpfs: remove return value of devtmpfs_delete_node()
devtmpfs: add debug info to handle()
driver core: add error handling for devtmpfs_create_node()
driver core: bus: update my copyright notice
driver core: bus: add bus_get_dev_root() function
driver core: bus: constify bus_unregister()
driver core: bus: constify some internal functions
driver core: bus: constify bus_get_kset()
driver core: bus: constify bus_register/unregister_notifier()
driver core: remove private pointer from struct bus_type
...

+1541 -1195
+10
Documentation/ABI/testing/sysfs-kernel-address_bits
··· 1 + What: /sys/kernel/address_bit 2 + Date: May 2023 3 + KernelVersion: 6.3 4 + Contact: Thomas Weißschuh <linux@weissschuh.net> 5 + Description: 6 + The address size of the running kernel in bits. 7 + 8 + Access: Read 9 + 10 + Users: util-linux
+1
Documentation/process/embargoed-hardware-issues.rst
··· 251 251 IBM Z Christian Borntraeger <borntraeger@de.ibm.com> 252 252 Intel Tony Luck <tony.luck@intel.com> 253 253 Qualcomm Trilok Soni <tsoni@codeaurora.org> 254 + Samsung Javier González <javier.gonz@samsung.com> 254 255 255 256 Microsoft James Morris <jamorris@linux.microsoft.com> 256 257 VMware
+1 -1
arch/alpha/include/asm/dma-mapping.h
··· 4 4 5 5 extern const struct dma_map_ops alpha_pci_ops; 6 6 7 - static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 7 + static inline const struct dma_map_ops *get_arch_dma_ops(void) 8 8 { 9 9 #ifdef CONFIG_ALPHA_JENSEN 10 10 return NULL;
+7 -7
arch/arm64/kernel/cacheinfo.c
··· 46 46 int init_cache_level(unsigned int cpu) 47 47 { 48 48 unsigned int ctype, level, leaves; 49 - int fw_level; 49 + int fw_level, ret; 50 50 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); 51 51 52 52 for (level = 1, leaves = 0; level <= MAX_CACHE_LEVEL; level++) { ··· 59 59 leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1; 60 60 } 61 61 62 - if (acpi_disabled) 62 + if (acpi_disabled) { 63 63 fw_level = of_find_last_cache_level(cpu); 64 - else 65 - fw_level = acpi_find_last_cache_level(cpu); 66 - 67 - if (fw_level < 0) 68 - return fw_level; 64 + } else { 65 + ret = acpi_get_cache_info(cpu, &fw_level, NULL); 66 + if (ret < 0) 67 + fw_level = 0; 68 + } 69 69 70 70 if (level < fw_level) { 71 71 /*
+1 -1
arch/ia64/include/asm/dma-mapping.h
··· 8 8 */ 9 9 extern const struct dma_map_ops *dma_ops; 10 10 11 - static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 11 + static inline const struct dma_map_ops *get_arch_dma_ops(void) 12 12 { 13 13 return dma_ops; 14 14 }
+1 -1
arch/mips/include/asm/dma-mapping.h
··· 6 6 7 7 extern const struct dma_map_ops jazz_dma_ops; 8 8 9 - static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 9 + static inline const struct dma_map_ops *get_arch_dma_ops(void) 10 10 { 11 11 #if defined(CONFIG_MACH_JAZZ) 12 12 return &jazz_dma_ops;
+2 -2
arch/mips/sgi-ip22/ip22-gio.c
··· 199 199 }; 200 200 ATTRIBUTE_GROUPS(gio_dev); 201 201 202 - static int gio_device_uevent(struct device *dev, struct kobj_uevent_env *env) 202 + static int gio_device_uevent(const struct device *dev, struct kobj_uevent_env *env) 203 203 { 204 - struct gio_device *gio_dev = to_gio_device(dev); 204 + const struct gio_device *gio_dev = to_gio_device(dev); 205 205 206 206 add_uevent_var(env, "MODALIAS=gio:%x", gio_dev->id.id); 207 207 return 0;
+1 -1
arch/parisc/include/asm/dma-mapping.h
··· 21 21 22 22 extern const struct dma_map_ops *hppa_dma_ops; 23 23 24 - static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 24 + static inline const struct dma_map_ops *get_arch_dma_ops(void) 25 25 { 26 26 return hppa_dma_ops; 27 27 }
+2 -2
arch/parisc/kernel/drivers.c
··· 552 552 return match_device(to_parisc_driver(drv), to_parisc_device(dev)); 553 553 } 554 554 555 - static ssize_t make_modalias(struct device *dev, char *buf) 555 + static ssize_t make_modalias(const struct device *dev, char *buf) 556 556 { 557 557 const struct parisc_device *padev = to_parisc_device(dev); 558 558 const struct parisc_device_id *id = &padev->id; ··· 562 562 (u32)id->sversion); 563 563 } 564 564 565 - static int parisc_uevent(struct device *dev, struct kobj_uevent_env *env) 565 + static int parisc_uevent(const struct device *dev, struct kobj_uevent_env *env) 566 566 { 567 567 const struct parisc_device *padev; 568 568 char modalias[40];
+1 -1
arch/powerpc/include/asm/ps3.h
··· 396 396 return container_of(_drv, struct ps3_system_bus_driver, core); 397 397 } 398 398 static inline struct ps3_system_bus_device *ps3_dev_to_system_bus_dev( 399 - struct device *_dev) 399 + const struct device *_dev) 400 400 { 401 401 return container_of(_dev, struct ps3_system_bus_device, core); 402 402 }
+1 -4
arch/powerpc/include/asm/vio.h
··· 161 161 return container_of(drv, struct vio_driver, driver); 162 162 } 163 163 164 - static inline struct vio_dev *to_vio_dev(struct device *dev) 165 - { 166 - return container_of(dev, struct vio_dev, dev); 167 - } 164 + #define to_vio_dev(__dev) container_of_const(__dev, struct vio_dev, dev) 168 165 169 166 #endif /* __KERNEL__ */ 170 167 #endif /* _ASM_POWERPC_VIO_H */
+1 -1
arch/powerpc/platforms/ps3/system-bus.c
··· 439 439 dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__); 440 440 } 441 441 442 - static int ps3_system_bus_uevent(struct device *_dev, struct kobj_uevent_env *env) 442 + static int ps3_system_bus_uevent(const struct device *_dev, struct kobj_uevent_env *env) 443 443 { 444 444 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); 445 445
+6 -1
arch/powerpc/platforms/pseries/ibmebus.c
··· 426 426 }; 427 427 ATTRIBUTE_GROUPS(ibmebus_bus_device); 428 428 429 + static int ibmebus_bus_modalias(const struct device *dev, struct kobj_uevent_env *env) 430 + { 431 + return of_device_uevent_modalias(dev, env); 432 + } 433 + 429 434 struct bus_type ibmebus_bus_type = { 430 435 .name = "ibmebus", 431 - .uevent = of_device_uevent_modalias, 436 + .uevent = ibmebus_bus_modalias, 432 437 .bus_groups = ibmbus_bus_groups, 433 438 .match = ibmebus_bus_bus_match, 434 439 .probe = ibmebus_bus_device_probe,
+2 -2
arch/powerpc/platforms/pseries/vio.c
··· 1609 1609 return (ids != NULL) && (vio_match_device(ids, vio_dev) != NULL); 1610 1610 } 1611 1611 1612 - static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env) 1612 + static int vio_hotplug(const struct device *dev, struct kobj_uevent_env *env) 1613 1613 { 1614 1614 const struct vio_dev *vio_dev = to_vio_dev(dev); 1615 - struct device_node *dn; 1615 + const struct device_node *dn; 1616 1616 const char *cp; 1617 1617 1618 1618 dn = dev->of_node;
-42
arch/riscv/kernel/cacheinfo.c
··· 113 113 } 114 114 } 115 115 116 - int init_cache_level(unsigned int cpu) 117 - { 118 - struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); 119 - struct device_node *np = of_cpu_device_node_get(cpu); 120 - struct device_node *prev = NULL; 121 - int levels = 0, leaves = 0, level; 122 - 123 - if (of_property_read_bool(np, "cache-size")) 124 - ++leaves; 125 - if (of_property_read_bool(np, "i-cache-size")) 126 - ++leaves; 127 - if (of_property_read_bool(np, "d-cache-size")) 128 - ++leaves; 129 - if (leaves > 0) 130 - levels = 1; 131 - 132 - prev = np; 133 - while ((np = of_find_next_cache_node(np))) { 134 - of_node_put(prev); 135 - prev = np; 136 - if (!of_device_is_compatible(np, "cache")) 137 - break; 138 - if (of_property_read_u32(np, "cache-level", &level)) 139 - break; 140 - if (level <= levels) 141 - break; 142 - if (of_property_read_bool(np, "cache-size")) 143 - ++leaves; 144 - if (of_property_read_bool(np, "i-cache-size")) 145 - ++leaves; 146 - if (of_property_read_bool(np, "d-cache-size")) 147 - ++leaves; 148 - levels = level; 149 - } 150 - 151 - of_node_put(np); 152 - this_cpu_ci->num_levels = levels; 153 - this_cpu_ci->num_leaves = leaves; 154 - 155 - return 0; 156 - } 157 - 158 116 int populate_cache_leaves(unsigned int cpu) 159 117 { 160 118 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+1 -1
arch/sparc/include/asm/dma-mapping.h
··· 4 4 5 5 extern const struct dma_map_ops *dma_ops; 6 6 7 - static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 7 + static inline const struct dma_map_ops *get_arch_dma_ops(void) 8 8 { 9 9 /* sparc32 uses per-device dma_ops */ 10 10 return IS_ENABLED(CONFIG_SPARC64) ? dma_ops : NULL;
+1 -4
arch/sparc/include/asm/vio.h
··· 488 488 return container_of(drv, struct vio_driver, driver); 489 489 } 490 490 491 - static inline struct vio_dev *to_vio_dev(struct device *dev) 492 - { 493 - return container_of(dev, struct vio_dev, dev); 494 - } 491 + #define to_vio_dev(__dev) container_of_const(__dev, struct vio_dev, dev) 495 492 496 493 int vio_ldc_send(struct vio_driver_state *vio, void *data, int len); 497 494 void vio_link_state_change(struct vio_driver_state *vio, int event);
+1 -1
arch/sparc/kernel/vio.c
··· 46 46 return NULL; 47 47 } 48 48 49 - static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env) 49 + static int vio_hotplug(const struct device *dev, struct kobj_uevent_env *env) 50 50 { 51 51 const struct vio_dev *vio_dev = to_vio_dev(dev); 52 52
+1 -1
arch/x86/include/asm/dma-mapping.h
··· 4 4 5 5 extern const struct dma_map_ops *dma_ops; 6 6 7 - static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 7 + static inline const struct dma_map_ops *get_arch_dma_ops(void) 8 8 { 9 9 return dma_ops; 10 10 }
+1 -1
block/genhd.c
··· 1200 1200 .dev_uevent = block_uevent, 1201 1201 }; 1202 1202 1203 - static char *block_devnode(struct device *dev, umode_t *mode, 1203 + static char *block_devnode(const struct device *dev, umode_t *mode, 1204 1204 kuid_t *uid, kgid_t *gid) 1205 1205 { 1206 1206 struct gendisk *disk = dev_to_disk(dev);
+2 -2
block/partitions/core.c
··· 254 254 iput(dev_to_bdev(dev)->bd_inode); 255 255 } 256 256 257 - static int part_uevent(struct device *dev, struct kobj_uevent_env *env) 257 + static int part_uevent(const struct device *dev, struct kobj_uevent_env *env) 258 258 { 259 - struct block_device *part = dev_to_bdev(dev); 259 + const struct block_device *part = dev_to_bdev(dev); 260 260 261 261 add_uevent_var(env, "PARTN=%u", part->bd_partno); 262 262 if (part->bd_meta_info && part->bd_meta_info->volname[0])
+1 -1
drivers/acpi/bus.c
··· 1014 1014 && !acpi_match_device_ids(acpi_dev, acpi_drv->ids); 1015 1015 } 1016 1016 1017 - static int acpi_device_uevent(struct device *dev, struct kobj_uevent_env *env) 1017 + static int acpi_device_uevent(const struct device *dev, struct kobj_uevent_env *env) 1018 1018 { 1019 1019 return __acpi_device_uevent_modalias(to_acpi_device(dev), env); 1020 1020 }
+4 -4
drivers/acpi/device_sysfs.c
··· 133 133 * -EINVAL: output error 134 134 * -ENOMEM: output is truncated 135 135 */ 136 - static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias, 136 + static int create_pnp_modalias(const struct acpi_device *acpi_dev, char *modalias, 137 137 int size) 138 138 { 139 139 int len; ··· 191 191 * only be called for devices having ACPI_DT_NAMESPACE_HID in their list of 192 192 * ACPI/PNP IDs. 193 193 */ 194 - static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias, 194 + static int create_of_modalias(const struct acpi_device *acpi_dev, char *modalias, 195 195 int size) 196 196 { 197 197 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER }; ··· 239 239 return len; 240 240 } 241 241 242 - int __acpi_device_uevent_modalias(struct acpi_device *adev, 242 + int __acpi_device_uevent_modalias(const struct acpi_device *adev, 243 243 struct kobj_uevent_env *env) 244 244 { 245 245 int len; ··· 277 277 * Because other buses do not support ACPI HIDs & CIDs, e.g. for a device with 278 278 * hid:IBM0001 and cid:ACPI0001 you get: "acpi:IBM0001:ACPI0001". 279 279 */ 280 - int acpi_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env) 280 + int acpi_device_uevent_modalias(const struct device *dev, struct kobj_uevent_env *env) 281 281 { 282 282 return __acpi_device_uevent_modalias(acpi_companion_match(dev), env); 283 283 }
+1 -1
drivers/acpi/internal.h
··· 120 120 Device Matching and Notification 121 121 -------------------------------------------------------------------------- */ 122 122 struct acpi_device *acpi_companion_match(const struct device *dev); 123 - int __acpi_device_uevent_modalias(struct acpi_device *adev, 123 + int __acpi_device_uevent_modalias(const struct acpi_device *adev, 124 124 struct kobj_uevent_env *env); 125 125 126 126 /* --------------------------------------------------------------------------
+54 -39
drivers/acpi/pptt.c
··· 81 81 * acpi_pptt_walk_cache() - Attempt to find the requested acpi_pptt_cache 82 82 * @table_hdr: Pointer to the head of the PPTT table 83 83 * @local_level: passed res reflects this cache level 84 + * @split_levels: Number of split cache levels (data/instruction). 84 85 * @res: cache resource in the PPTT we want to walk 85 86 * @found: returns a pointer to the requested level if found 86 87 * @level: the requested cache level ··· 101 100 */ 102 101 static unsigned int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr, 103 102 unsigned int local_level, 103 + unsigned int *split_levels, 104 104 struct acpi_subtable_header *res, 105 105 struct acpi_pptt_cache **found, 106 106 unsigned int level, int type) ··· 115 113 while (cache) { 116 114 local_level++; 117 115 116 + if (!(cache->flags & ACPI_PPTT_CACHE_TYPE_VALID)) { 117 + cache = fetch_pptt_cache(table_hdr, cache->next_level_of_cache); 118 + continue; 119 + } 120 + 121 + if (split_levels && 122 + (acpi_pptt_match_type(cache->attributes, ACPI_PPTT_CACHE_TYPE_DATA) || 123 + acpi_pptt_match_type(cache->attributes, ACPI_PPTT_CACHE_TYPE_INSTR))) 124 + *split_levels = local_level; 125 + 118 126 if (local_level == level && 119 - cache->flags & ACPI_PPTT_CACHE_TYPE_VALID && 120 127 acpi_pptt_match_type(cache->attributes, type)) { 121 128 if (*found != NULL && cache != *found) 122 129 pr_warn("Found duplicate cache level/type unable to determine uniqueness\n"); ··· 146 135 static struct acpi_pptt_cache * 147 136 acpi_find_cache_level(struct acpi_table_header *table_hdr, 148 137 struct acpi_pptt_processor *cpu_node, 149 - unsigned int *starting_level, unsigned int level, 150 - int type) 138 + unsigned int *starting_level, unsigned int *split_levels, 139 + unsigned int level, int type) 151 140 { 152 141 struct acpi_subtable_header *res; 153 142 unsigned int number_of_levels = *starting_level; ··· 160 149 resource++; 161 150 162 151 local_level = acpi_pptt_walk_cache(table_hdr, *starting_level, 163 - res, &ret, level, type); 152 + split_levels, res, &ret, 153 + level, type); 164 154 /* 165 155 * we are looking for the max depth. Since its potentially 166 156 * possible for a given node to have resources with differing ··· 177 165 } 178 166 179 167 /** 180 - * acpi_count_levels() - Given a PPTT table, and a CPU node, count the caches 168 + * acpi_count_levels() - Given a PPTT table, and a CPU node, count the cache 169 + * levels and split cache levels (data/instruction). 181 170 * @table_hdr: Pointer to the head of the PPTT table 182 171 * @cpu_node: processor node we wish to count caches for 172 + * @levels: Number of levels if success. 173 + * @split_levels: Number of split cache levels (data/instruction) if 174 + * success. Can by NULL. 183 175 * 184 176 * Given a processor node containing a processing unit, walk into it and count 185 177 * how many levels exist solely for it, and then walk up each level until we hit 186 178 * the root node (ignore the package level because it may be possible to have 187 - * caches that exist across packages). Count the number of cache levels that 188 - * exist at each level on the way up. 189 - * 190 - * Return: Total number of levels found. 179 + * caches that exist across packages). Count the number of cache levels and 180 + * split cache levels (data/instruction) that exist at each level on the way 181 + * up. 191 182 */ 192 - static int acpi_count_levels(struct acpi_table_header *table_hdr, 193 - struct acpi_pptt_processor *cpu_node) 183 + static void acpi_count_levels(struct acpi_table_header *table_hdr, 184 + struct acpi_pptt_processor *cpu_node, 185 + unsigned int *levels, unsigned int *split_levels) 194 186 { 195 - int total_levels = 0; 196 - 197 187 do { 198 - acpi_find_cache_level(table_hdr, cpu_node, &total_levels, 0, 0); 188 + acpi_find_cache_level(table_hdr, cpu_node, levels, split_levels, 0, 0); 199 189 cpu_node = fetch_pptt_node(table_hdr, cpu_node->parent); 200 190 } while (cpu_node); 201 - 202 - return total_levels; 203 191 } 204 192 205 193 /** ··· 293 281 return NULL; 294 282 } 295 283 296 - static int acpi_find_cache_levels(struct acpi_table_header *table_hdr, 297 - u32 acpi_cpu_id) 298 - { 299 - int number_of_levels = 0; 300 - struct acpi_pptt_processor *cpu; 301 - 302 - cpu = acpi_find_processor_node(table_hdr, acpi_cpu_id); 303 - if (cpu) 304 - number_of_levels = acpi_count_levels(table_hdr, cpu); 305 - 306 - return number_of_levels; 307 - } 308 - 309 284 static u8 acpi_cache_type(enum cache_type type) 310 285 { 311 286 switch (type) { ··· 333 334 334 335 while (cpu_node && !found) { 335 336 found = acpi_find_cache_level(table_hdr, cpu_node, 336 - &total_levels, level, acpi_type); 337 + &total_levels, NULL, level, acpi_type); 337 338 *node = cpu_node; 338 339 cpu_node = fetch_pptt_node(table_hdr, cpu_node->parent); 339 340 } ··· 601 602 } 602 603 603 604 /** 604 - * acpi_find_last_cache_level() - Determines the number of cache levels for a PE 605 + * acpi_get_cache_info() - Determine the number of cache levels and 606 + * split cache levels (data/instruction) and for a PE. 605 607 * @cpu: Kernel logical CPU number 608 + * @levels: Number of levels if success. 609 + * @split_levels: Number of levels being split (i.e. data/instruction) 610 + * if success. Can by NULL. 606 611 * 607 612 * Given a logical CPU number, returns the number of levels of cache represented 608 613 * in the PPTT. Errors caused by lack of a PPTT table, or otherwise, return 0 609 614 * indicating we didn't find any cache levels. 610 615 * 611 - * Return: Cache levels visible to this core. 616 + * Return: -ENOENT if no PPTT table or no PPTT processor struct found. 617 + * 0 on success. 612 618 */ 613 - int acpi_find_last_cache_level(unsigned int cpu) 619 + int acpi_get_cache_info(unsigned int cpu, unsigned int *levels, 620 + unsigned int *split_levels) 614 621 { 615 - u32 acpi_cpu_id; 622 + struct acpi_pptt_processor *cpu_node; 616 623 struct acpi_table_header *table; 617 - int number_of_levels = 0; 624 + u32 acpi_cpu_id; 625 + 626 + *levels = 0; 627 + if (split_levels) 628 + *split_levels = 0; 618 629 619 630 table = acpi_get_pptt(); 620 631 if (!table) 621 632 return -ENOENT; 622 633 623 - pr_debug("Cache Setup find last level CPU=%d\n", cpu); 634 + pr_debug("Cache Setup: find cache levels for CPU=%d\n", cpu); 624 635 625 636 acpi_cpu_id = get_acpi_id_for_cpu(cpu); 626 - number_of_levels = acpi_find_cache_levels(table, acpi_cpu_id); 627 - pr_debug("Cache Setup find last level level=%d\n", number_of_levels); 637 + cpu_node = acpi_find_processor_node(table, acpi_cpu_id); 638 + if (!cpu_node) 639 + return -ENOENT; 628 640 629 - return number_of_levels; 641 + acpi_count_levels(table, cpu_node, levels, split_levels); 642 + 643 + pr_debug("Cache Setup: last_level=%d split_levels=%d\n", 644 + *levels, split_levels ? *split_levels : -1); 645 + 646 + return 0; 630 647 } 631 648 632 649 /**
+2 -2
drivers/amba/bus.c
··· 235 235 return amba_lookup(pcdrv->id_table, pcdev) != NULL; 236 236 } 237 237 238 - static int amba_uevent(struct device *dev, struct kobj_uevent_env *env) 238 + static int amba_uevent(const struct device *dev, struct kobj_uevent_env *env) 239 239 { 240 - struct amba_device *pcdev = to_amba_device(dev); 240 + const struct amba_device *pcdev = to_amba_device(dev); 241 241 int retval = 0; 242 242 243 243 retval = add_uevent_var(env, "AMBA_ID=%08x", pcdev->periphid);
+10 -2
drivers/base/arch_topology.c
··· 736 736 737 737 ret = detect_cache_attributes(cpuid); 738 738 if (ret && ret != -ENOENT) 739 - pr_info("Early cacheinfo failed, ret = %d\n", ret); 739 + pr_info("Early cacheinfo allocation failed, ret = %d\n", ret); 740 740 741 741 /* update core and thread sibling masks */ 742 742 for_each_online_cpu(cpu) { ··· 825 825 #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) 826 826 void __init init_cpu_topology(void) 827 827 { 828 - int ret; 828 + int cpu, ret; 829 829 830 830 reset_cpu_topology(); 831 831 ret = parse_acpi_topology(); ··· 839 839 */ 840 840 reset_cpu_topology(); 841 841 return; 842 + } 843 + 844 + for_each_possible_cpu(cpu) { 845 + ret = fetch_cache_info(cpu); 846 + if (ret) { 847 + pr_err("Early cacheinfo failed, ret = %d\n", ret); 848 + break; 849 + } 842 850 } 843 851 } 844 852
+1 -1
drivers/base/auxiliary.c
··· 185 185 return !!auxiliary_match_id(auxdrv->id_table, auxdev); 186 186 } 187 187 188 - static int auxiliary_uevent(struct device *dev, struct kobj_uevent_env *env) 188 + static int auxiliary_uevent(const struct device *dev, struct kobj_uevent_env *env) 189 189 { 190 190 const char *name, *p; 191 191
+20 -1
drivers/base/base.h
··· 52 52 53 53 struct kset glue_dirs; 54 54 struct class *class; 55 + 56 + struct lock_class_key lock_key; 55 57 }; 56 - #define to_subsys_private(obj) container_of(obj, struct subsys_private, subsys.kobj) 58 + #define to_subsys_private(obj) container_of_const(obj, struct subsys_private, subsys.kobj) 59 + 60 + static inline struct subsys_private *subsys_get(struct subsys_private *sp) 61 + { 62 + if (sp) 63 + kset_get(&sp->subsys); 64 + return sp; 65 + } 66 + 67 + static inline void subsys_put(struct subsys_private *sp) 68 + { 69 + if (sp) 70 + kset_put(&sp->subsys); 71 + } 57 72 58 73 struct driver_private { 59 74 struct kobject kobj; ··· 145 130 extern int bus_add_device(struct device *dev); 146 131 extern void bus_probe_device(struct device *dev); 147 132 extern void bus_remove_device(struct device *dev); 133 + void bus_notify(struct device *dev, enum bus_notifier_event value); 134 + bool bus_is_registered(const struct bus_type *bus); 148 135 149 136 extern int bus_add_driver(struct device_driver *drv); 150 137 extern void bus_remove_driver(struct device_driver *drv); ··· 175 158 extern void device_unblock_probing(void); 176 159 extern void deferred_probe_extend_timeout(void); 177 160 extern void driver_deferred_probe_trigger(void); 161 + const char *device_get_devnode(const struct device *dev, umode_t *mode, 162 + kuid_t *uid, kgid_t *gid, const char **tmp); 178 163 179 164 /* /sys/devices directory */ 180 165 extern struct kset *devices_kset;
+383 -194
drivers/base/bus.c
··· 6 6 * Copyright (c) 2002-3 Open Source Development Labs 7 7 * Copyright (c) 2007 Greg Kroah-Hartman <gregkh@suse.de> 8 8 * Copyright (c) 2007 Novell Inc. 9 + * Copyright (c) 2023 Greg Kroah-Hartman <gregkh@linuxfoundation.org> 9 10 */ 10 11 11 12 #include <linux/async.h> ··· 25 24 /* /sys/devices/system */ 26 25 static struct kset *system_kset; 27 26 27 + /* /sys/bus */ 28 + static struct kset *bus_kset; 29 + 28 30 #define to_bus_attr(_attr) container_of(_attr, struct bus_attribute, attr) 29 31 30 32 /* ··· 43 39 static int __must_check bus_rescan_devices_helper(struct device *dev, 44 40 void *data); 45 41 42 + /** 43 + * bus_to_subsys - Turn a struct bus_type into a struct subsys_private 44 + * 45 + * @bus: pointer to the struct bus_type to look up 46 + * 47 + * The driver core internals needs to work on the subsys_private structure, not 48 + * the external struct bus_type pointer. This function walks the list of 49 + * registered busses in the system and finds the matching one and returns the 50 + * internal struct subsys_private that relates to that bus. 51 + * 52 + * Note, the reference count of the return value is INCREMENTED if it is not 53 + * NULL. A call to subsys_put() must be done when finished with the pointer in 54 + * order for it to be properly freed. 55 + */ 56 + static struct subsys_private *bus_to_subsys(const struct bus_type *bus) 57 + { 58 + struct subsys_private *sp = NULL; 59 + struct kobject *kobj; 60 + 61 + if (!bus) 62 + return NULL; 63 + 64 + spin_lock(&bus_kset->list_lock); 65 + 66 + if (list_empty(&bus_kset->list)) 67 + goto done; 68 + 69 + list_for_each_entry(kobj, &bus_kset->list, entry) { 70 + struct kset *kset = container_of(kobj, struct kset, kobj); 71 + 72 + sp = container_of_const(kset, struct subsys_private, subsys); 73 + if (sp->bus == bus) 74 + goto done; 75 + } 76 + sp = NULL; 77 + done: 78 + sp = subsys_get(sp); 79 + spin_unlock(&bus_kset->list_lock); 80 + return sp; 81 + } 82 + 46 83 static struct bus_type *bus_get(struct bus_type *bus) 47 84 { 48 - if (bus) { 49 - kset_get(&bus->p->subsys); 85 + struct subsys_private *sp = bus_to_subsys(bus); 86 + 87 + if (sp) 50 88 return bus; 51 - } 52 89 return NULL; 53 90 } 54 91 55 - static void bus_put(struct bus_type *bus) 92 + static void bus_put(const struct bus_type *bus) 56 93 { 57 - if (bus) 58 - kset_put(&bus->p->subsys); 94 + struct subsys_private *sp = bus_to_subsys(bus); 95 + 96 + /* two puts are required as the call to bus_to_subsys incremented it again */ 97 + subsys_put(sp); 98 + subsys_put(sp); 59 99 } 60 100 61 101 static ssize_t drv_attr_show(struct kobject *kobj, struct attribute *attr, ··· 139 91 kfree(drv_priv); 140 92 } 141 93 142 - static struct kobj_type driver_ktype = { 94 + static const struct kobj_type driver_ktype = { 143 95 .sysfs_ops = &driver_sysfs_ops, 144 96 .release = driver_release, 145 97 }; ··· 176 128 .store = bus_attr_store, 177 129 }; 178 130 179 - int bus_create_file(struct bus_type *bus, struct bus_attribute *attr) 131 + int bus_create_file(const struct bus_type *bus, struct bus_attribute *attr) 180 132 { 133 + struct subsys_private *sp = bus_to_subsys(bus); 181 134 int error; 182 - if (bus_get(bus)) { 183 - error = sysfs_create_file(&bus->p->subsys.kobj, &attr->attr); 184 - bus_put(bus); 185 - } else 186 - error = -EINVAL; 135 + 136 + if (!sp) 137 + return -EINVAL; 138 + 139 + error = sysfs_create_file(&sp->subsys.kobj, &attr->attr); 140 + 141 + subsys_put(sp); 187 142 return error; 188 143 } 189 144 EXPORT_SYMBOL_GPL(bus_create_file); 190 145 191 - void bus_remove_file(struct bus_type *bus, struct bus_attribute *attr) 146 + void bus_remove_file(const struct bus_type *bus, struct bus_attribute *attr) 192 147 { 193 - if (bus_get(bus)) { 194 - sysfs_remove_file(&bus->p->subsys.kobj, &attr->attr); 195 - bus_put(bus); 196 - } 148 + struct subsys_private *sp = bus_to_subsys(bus); 149 + 150 + if (!sp) 151 + return; 152 + 153 + sysfs_remove_file(&sp->subsys.kobj, &attr->attr); 154 + subsys_put(sp); 197 155 } 198 156 EXPORT_SYMBOL_GPL(bus_remove_file); 199 157 200 158 static void bus_release(struct kobject *kobj) 201 159 { 202 160 struct subsys_private *priv = to_subsys_private(kobj); 203 - struct bus_type *bus = priv->bus; 204 161 162 + lockdep_unregister_key(&priv->lock_key); 205 163 kfree(priv); 206 - bus->p = NULL; 207 164 } 208 165 209 - static struct kobj_type bus_ktype = { 166 + static const struct kobj_type bus_ktype = { 210 167 .sysfs_ops = &bus_sysfs_ops, 211 168 .release = bus_release, 212 169 }; ··· 228 175 static const struct kset_uevent_ops bus_uevent_ops = { 229 176 .filter = bus_uevent_filter, 230 177 }; 231 - 232 - static struct kset *bus_kset; 233 178 234 179 /* Manually detach a device from its associated driver. */ 235 180 static ssize_t unbind_store(struct device_driver *drv, const char *buf, ··· 276 225 277 226 static ssize_t drivers_autoprobe_show(struct bus_type *bus, char *buf) 278 227 { 279 - return sysfs_emit(buf, "%d\n", bus->p->drivers_autoprobe); 228 + struct subsys_private *sp = bus_to_subsys(bus); 229 + int ret; 230 + 231 + if (!sp) 232 + return -EINVAL; 233 + 234 + ret = sysfs_emit(buf, "%d\n", sp->drivers_autoprobe); 235 + subsys_put(sp); 236 + return ret; 280 237 } 281 238 282 239 static ssize_t drivers_autoprobe_store(struct bus_type *bus, 283 240 const char *buf, size_t count) 284 241 { 242 + struct subsys_private *sp = bus_to_subsys(bus); 243 + 244 + if (!sp) 245 + return -EINVAL; 246 + 285 247 if (buf[0] == '0') 286 - bus->p->drivers_autoprobe = 0; 248 + sp->drivers_autoprobe = 0; 287 249 else 288 - bus->p->drivers_autoprobe = 1; 250 + sp->drivers_autoprobe = 1; 251 + 252 + subsys_put(sp); 289 253 return count; 290 254 } 291 255 ··· 351 285 * to retain this data, it should do so, and increment the reference 352 286 * count in the supplied callback. 353 287 */ 354 - int bus_for_each_dev(struct bus_type *bus, struct device *start, 288 + int bus_for_each_dev(const struct bus_type *bus, struct device *start, 355 289 void *data, int (*fn)(struct device *, void *)) 356 290 { 291 + struct subsys_private *sp = bus_to_subsys(bus); 357 292 struct klist_iter i; 358 293 struct device *dev; 359 294 int error = 0; 360 295 361 - if (!bus || !bus->p) 296 + if (!sp) 362 297 return -EINVAL; 363 298 364 - klist_iter_init_node(&bus->p->klist_devices, &i, 299 + klist_iter_init_node(&sp->klist_devices, &i, 365 300 (start ? &start->p->knode_bus : NULL)); 366 301 while (!error && (dev = next_device(&i))) 367 302 error = fn(dev, data); 368 303 klist_iter_exit(&i); 304 + subsys_put(sp); 369 305 return error; 370 306 } 371 307 EXPORT_SYMBOL_GPL(bus_for_each_dev); ··· 387 319 * if it does. If the callback returns non-zero, this function will 388 320 * return to the caller and not iterate over any more devices. 389 321 */ 390 - struct device *bus_find_device(struct bus_type *bus, 322 + struct device *bus_find_device(const struct bus_type *bus, 391 323 struct device *start, const void *data, 392 324 int (*match)(struct device *dev, const void *data)) 393 325 { 326 + struct subsys_private *sp = bus_to_subsys(bus); 394 327 struct klist_iter i; 395 328 struct device *dev; 396 329 397 - if (!bus || !bus->p) 330 + if (!sp) 398 331 return NULL; 399 332 400 - klist_iter_init_node(&bus->p->klist_devices, &i, 333 + klist_iter_init_node(&sp->klist_devices, &i, 401 334 (start ? &start->p->knode_bus : NULL)); 402 335 while ((dev = next_device(&i))) 403 336 if (match(dev, data) && get_device(dev)) 404 337 break; 405 338 klist_iter_exit(&i); 339 + subsys_put(sp); 406 340 return dev; 407 341 } 408 342 EXPORT_SYMBOL_GPL(bus_find_device); 409 - 410 - /** 411 - * subsys_find_device_by_id - find a device with a specific enumeration number 412 - * @subsys: subsystem 413 - * @id: index 'id' in struct device 414 - * @hint: device to check first 415 - * 416 - * Check the hint's next object and if it is a match return it directly, 417 - * otherwise, fall back to a full list search. Either way a reference for 418 - * the returned object is taken. 419 - */ 420 - struct device *subsys_find_device_by_id(struct bus_type *subsys, unsigned int id, 421 - struct device *hint) 422 - { 423 - struct klist_iter i; 424 - struct device *dev; 425 - 426 - if (!subsys) 427 - return NULL; 428 - 429 - if (hint) { 430 - klist_iter_init_node(&subsys->p->klist_devices, &i, &hint->p->knode_bus); 431 - dev = next_device(&i); 432 - if (dev && dev->id == id && get_device(dev)) { 433 - klist_iter_exit(&i); 434 - return dev; 435 - } 436 - klist_iter_exit(&i); 437 - } 438 - 439 - klist_iter_init_node(&subsys->p->klist_devices, &i, NULL); 440 - while ((dev = next_device(&i))) { 441 - if (dev->id == id && get_device(dev)) { 442 - klist_iter_exit(&i); 443 - return dev; 444 - } 445 - } 446 - klist_iter_exit(&i); 447 - return NULL; 448 - } 449 - EXPORT_SYMBOL_GPL(subsys_find_device_by_id); 450 343 451 344 static struct device_driver *next_driver(struct klist_iter *i) 452 345 { ··· 440 411 * in the callback. It must also be sure to increment the refcount 441 412 * so it doesn't disappear before returning to the caller. 442 413 */ 443 - int bus_for_each_drv(struct bus_type *bus, struct device_driver *start, 414 + int bus_for_each_drv(const struct bus_type *bus, struct device_driver *start, 444 415 void *data, int (*fn)(struct device_driver *, void *)) 445 416 { 417 + struct subsys_private *sp = bus_to_subsys(bus); 446 418 struct klist_iter i; 447 419 struct device_driver *drv; 448 420 int error = 0; 449 421 450 - if (!bus) 422 + if (!sp) 451 423 return -EINVAL; 452 424 453 - klist_iter_init_node(&bus->p->klist_drivers, &i, 425 + klist_iter_init_node(&sp->klist_drivers, &i, 454 426 start ? &start->p->knode_bus : NULL); 455 427 while ((drv = next_driver(&i)) && !error) 456 428 error = fn(drv, data); 457 429 klist_iter_exit(&i); 430 + subsys_put(sp); 458 431 return error; 459 432 } 460 433 EXPORT_SYMBOL_GPL(bus_for_each_drv); ··· 471 440 */ 472 441 int bus_add_device(struct device *dev) 473 442 { 474 - struct bus_type *bus = bus_get(dev->bus); 475 - int error = 0; 443 + struct subsys_private *sp = bus_to_subsys(dev->bus); 444 + int error; 476 445 477 - if (bus) { 478 - pr_debug("bus: '%s': add device %s\n", bus->name, dev_name(dev)); 479 - error = device_add_groups(dev, bus->dev_groups); 480 - if (error) 481 - goto out_put; 482 - error = sysfs_create_link(&bus->p->devices_kset->kobj, 483 - &dev->kobj, dev_name(dev)); 484 - if (error) 485 - goto out_groups; 486 - error = sysfs_create_link(&dev->kobj, 487 - &dev->bus->p->subsys.kobj, "subsystem"); 488 - if (error) 489 - goto out_subsys; 490 - klist_add_tail(&dev->p->knode_bus, &bus->p->klist_devices); 446 + if (!sp) { 447 + /* 448 + * This is a normal operation for many devices that do not 449 + * have a bus assigned to them, just say that all went 450 + * well. 451 + */ 452 + return 0; 491 453 } 454 + 455 + /* 456 + * Reference in sp is now incremented and will be dropped when 457 + * the device is removed from the bus 458 + */ 459 + 460 + pr_debug("bus: '%s': add device %s\n", sp->bus->name, dev_name(dev)); 461 + 462 + error = device_add_groups(dev, sp->bus->dev_groups); 463 + if (error) 464 + goto out_put; 465 + 466 + error = sysfs_create_link(&sp->devices_kset->kobj, &dev->kobj, dev_name(dev)); 467 + if (error) 468 + goto out_groups; 469 + 470 + error = sysfs_create_link(&dev->kobj, &sp->subsys.kobj, "subsystem"); 471 + if (error) 472 + goto out_subsys; 473 + 474 + klist_add_tail(&dev->p->knode_bus, &sp->klist_devices); 492 475 return 0; 493 476 494 477 out_subsys: 495 - sysfs_remove_link(&bus->p->devices_kset->kobj, dev_name(dev)); 478 + sysfs_remove_link(&sp->devices_kset->kobj, dev_name(dev)); 496 479 out_groups: 497 - device_remove_groups(dev, bus->dev_groups); 480 + device_remove_groups(dev, sp->bus->dev_groups); 498 481 out_put: 499 - bus_put(dev->bus); 482 + subsys_put(sp); 500 483 return error; 501 484 } 502 485 ··· 522 477 */ 523 478 void bus_probe_device(struct device *dev) 524 479 { 525 - struct bus_type *bus = dev->bus; 480 + struct subsys_private *sp = bus_to_subsys(dev->bus); 526 481 struct subsys_interface *sif; 527 482 528 - if (!bus) 483 + if (!sp) 529 484 return; 530 485 531 - if (bus->p->drivers_autoprobe) 486 + if (sp->drivers_autoprobe) 532 487 device_initial_probe(dev); 533 488 534 - mutex_lock(&bus->p->mutex); 535 - list_for_each_entry(sif, &bus->p->interfaces, node) 489 + mutex_lock(&sp->mutex); 490 + list_for_each_entry(sif, &sp->interfaces, node) 536 491 if (sif->add_dev) 537 492 sif->add_dev(dev, sif); 538 - mutex_unlock(&bus->p->mutex); 493 + mutex_unlock(&sp->mutex); 494 + subsys_put(sp); 539 495 } 540 496 541 497 /** ··· 551 505 */ 552 506 void bus_remove_device(struct device *dev) 553 507 { 554 - struct bus_type *bus = dev->bus; 508 + struct subsys_private *sp = bus_to_subsys(dev->bus); 555 509 struct subsys_interface *sif; 556 510 557 - if (!bus) 511 + if (!sp) 558 512 return; 559 513 560 - mutex_lock(&bus->p->mutex); 561 - list_for_each_entry(sif, &bus->p->interfaces, node) 514 + mutex_lock(&sp->mutex); 515 + list_for_each_entry(sif, &sp->interfaces, node) 562 516 if (sif->remove_dev) 563 517 sif->remove_dev(dev, sif); 564 - mutex_unlock(&bus->p->mutex); 518 + mutex_unlock(&sp->mutex); 565 519 566 520 sysfs_remove_link(&dev->kobj, "subsystem"); 567 - sysfs_remove_link(&dev->bus->p->devices_kset->kobj, 568 - dev_name(dev)); 521 + sysfs_remove_link(&sp->devices_kset->kobj, dev_name(dev)); 569 522 device_remove_groups(dev, dev->bus->dev_groups); 570 523 if (klist_node_attached(&dev->p->knode_bus)) 571 524 klist_del(&dev->p->knode_bus); ··· 572 527 pr_debug("bus: '%s': remove device %s\n", 573 528 dev->bus->name, dev_name(dev)); 574 529 device_release_driver(dev); 575 - bus_put(dev->bus); 530 + 531 + /* 532 + * Decrement the reference count twice, once for the bus_to_subsys() 533 + * call in the start of this function, and the second one from the 534 + * reference increment in bus_add_device() 535 + */ 536 + subsys_put(sp); 537 + subsys_put(sp); 576 538 } 577 539 578 540 static int __must_check add_bind_files(struct device_driver *drv) ··· 604 552 static BUS_ATTR_WO(drivers_probe); 605 553 static BUS_ATTR_RW(drivers_autoprobe); 606 554 607 - static int add_probe_files(struct bus_type *bus) 555 + static int add_probe_files(const struct bus_type *bus) 608 556 { 609 557 int retval; 610 558 ··· 619 567 return retval; 620 568 } 621 569 622 - static void remove_probe_files(struct bus_type *bus) 570 + static void remove_probe_files(const struct bus_type *bus) 623 571 { 624 572 bus_remove_file(bus, &bus_attr_drivers_autoprobe); 625 573 bus_remove_file(bus, &bus_attr_drivers_probe); ··· 641 589 */ 642 590 int bus_add_driver(struct device_driver *drv) 643 591 { 644 - struct bus_type *bus; 592 + struct subsys_private *sp = bus_to_subsys(drv->bus); 645 593 struct driver_private *priv; 646 594 int error = 0; 647 595 648 - bus = bus_get(drv->bus); 649 - if (!bus) 596 + if (!sp) 650 597 return -EINVAL; 651 598 652 - pr_debug("bus: '%s': add driver %s\n", bus->name, drv->name); 599 + /* 600 + * Reference in sp is now incremented and will be dropped when 601 + * the driver is removed from the bus 602 + */ 603 + pr_debug("bus: '%s': add driver %s\n", sp->bus->name, drv->name); 653 604 654 605 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 655 606 if (!priv) { ··· 662 607 klist_init(&priv->klist_devices, NULL, NULL); 663 608 priv->driver = drv; 664 609 drv->p = priv; 665 - priv->kobj.kset = bus->p->drivers_kset; 610 + priv->kobj.kset = sp->drivers_kset; 666 611 error = kobject_init_and_add(&priv->kobj, &driver_ktype, NULL, 667 612 "%s", drv->name); 668 613 if (error) 669 614 goto out_unregister; 670 615 671 - klist_add_tail(&priv->knode_bus, &bus->p->klist_drivers); 672 - if (drv->bus->p->drivers_autoprobe) { 616 + klist_add_tail(&priv->knode_bus, &sp->klist_drivers); 617 + if (sp->drivers_autoprobe) { 673 618 error = driver_attach(drv); 674 619 if (error) 675 620 goto out_del_list; ··· 681 626 printk(KERN_ERR "%s: uevent attr (%s) failed\n", 682 627 __func__, drv->name); 683 628 } 684 - error = driver_add_groups(drv, bus->drv_groups); 629 + error = driver_add_groups(drv, sp->bus->drv_groups); 685 630 if (error) { 686 631 /* How the hell do we get out of this pickle? Give up */ 687 632 printk(KERN_ERR "%s: driver_add_groups(%s) failed\n", ··· 706 651 /* drv->p is freed in driver_release() */ 707 652 drv->p = NULL; 708 653 out_put_bus: 709 - bus_put(bus); 654 + subsys_put(sp); 710 655 return error; 711 656 } 712 657 ··· 720 665 */ 721 666 void bus_remove_driver(struct device_driver *drv) 722 667 { 723 - if (!drv->bus) 668 + struct subsys_private *sp = bus_to_subsys(drv->bus); 669 + 670 + if (!sp) 724 671 return; 672 + 673 + pr_debug("bus: '%s': remove driver %s\n", sp->bus->name, drv->name); 725 674 726 675 if (!drv->suppress_bind_attrs) 727 676 remove_bind_files(drv); 728 - driver_remove_groups(drv, drv->bus->drv_groups); 677 + driver_remove_groups(drv, sp->bus->drv_groups); 729 678 driver_remove_file(drv, &driver_attr_uevent); 730 679 klist_remove(&drv->p->knode_bus); 731 - pr_debug("bus: '%s': remove driver %s\n", drv->bus->name, drv->name); 732 680 driver_detach(drv); 733 681 module_remove_driver(drv); 734 682 kobject_put(&drv->p->kobj); 735 - bus_put(drv->bus); 683 + 684 + /* 685 + * Decrement the reference count twice, once for the bus_to_subsys() 686 + * call in the start of this function, and the second one from the 687 + * reference increment in bus_add_driver() 688 + */ 689 + subsys_put(sp); 690 + subsys_put(sp); 736 691 } 737 692 738 693 /* Helper for bus_rescan_devices's iter */ ··· 792 727 } 793 728 EXPORT_SYMBOL_GPL(device_reprobe); 794 729 795 - static int bus_add_groups(struct bus_type *bus, 796 - const struct attribute_group **groups) 797 - { 798 - return sysfs_create_groups(&bus->p->subsys.kobj, groups); 799 - } 800 - 801 - static void bus_remove_groups(struct bus_type *bus, 802 - const struct attribute_group **groups) 803 - { 804 - sysfs_remove_groups(&bus->p->subsys.kobj, groups); 805 - } 806 - 807 730 static void klist_devices_get(struct klist_node *n) 808 731 { 809 732 struct device_private *dev_prv = to_device_private_bus(n); ··· 811 758 static ssize_t bus_uevent_store(struct bus_type *bus, 812 759 const char *buf, size_t count) 813 760 { 814 - int rc; 761 + struct subsys_private *sp = bus_to_subsys(bus); 762 + int ret; 815 763 816 - rc = kobject_synth_uevent(&bus->p->subsys.kobj, buf, count); 817 - return rc ? rc : count; 764 + if (!sp) 765 + return -EINVAL; 766 + 767 + ret = kobject_synth_uevent(&sp->subsys.kobj, buf, count); 768 + subsys_put(sp); 769 + 770 + if (ret) 771 + return ret; 772 + return count; 818 773 } 819 774 /* 820 775 * "open code" the old BUS_ATTR() macro here. We want to use BUS_ATTR_WO() ··· 845 784 { 846 785 int retval; 847 786 struct subsys_private *priv; 848 - struct lock_class_key *key = &bus->lock_key; 787 + struct kobject *bus_kobj; 788 + struct lock_class_key *key; 849 789 850 790 priv = kzalloc(sizeof(struct subsys_private), GFP_KERNEL); 851 791 if (!priv) 852 792 return -ENOMEM; 853 793 854 794 priv->bus = bus; 855 - bus->p = priv; 856 795 857 796 BLOCKING_INIT_NOTIFIER_HEAD(&priv->bus_notifier); 858 797 859 - retval = kobject_set_name(&priv->subsys.kobj, "%s", bus->name); 798 + bus_kobj = &priv->subsys.kobj; 799 + retval = kobject_set_name(bus_kobj, "%s", bus->name); 860 800 if (retval) 861 801 goto out; 862 802 863 - priv->subsys.kobj.kset = bus_kset; 864 - priv->subsys.kobj.ktype = &bus_ktype; 803 + bus_kobj->kset = bus_kset; 804 + bus_kobj->ktype = &bus_ktype; 865 805 priv->drivers_autoprobe = 1; 866 806 867 807 retval = kset_register(&priv->subsys); ··· 873 811 if (retval) 874 812 goto bus_uevent_fail; 875 813 876 - priv->devices_kset = kset_create_and_add("devices", NULL, 877 - &priv->subsys.kobj); 814 + priv->devices_kset = kset_create_and_add("devices", NULL, bus_kobj); 878 815 if (!priv->devices_kset) { 879 816 retval = -ENOMEM; 880 817 goto bus_devices_fail; 881 818 } 882 819 883 - priv->drivers_kset = kset_create_and_add("drivers", NULL, 884 - &priv->subsys.kobj); 820 + priv->drivers_kset = kset_create_and_add("drivers", NULL, bus_kobj); 885 821 if (!priv->drivers_kset) { 886 822 retval = -ENOMEM; 887 823 goto bus_drivers_fail; 888 824 } 889 825 890 826 INIT_LIST_HEAD(&priv->interfaces); 827 + key = &priv->lock_key; 828 + lockdep_register_key(key); 891 829 __mutex_init(&priv->mutex, "subsys mutex", key); 892 830 klist_init(&priv->klist_devices, klist_devices_get, klist_devices_put); 893 831 klist_init(&priv->klist_drivers, NULL, NULL); ··· 896 834 if (retval) 897 835 goto bus_probe_files_fail; 898 836 899 - retval = bus_add_groups(bus, bus->bus_groups); 837 + retval = sysfs_create_groups(bus_kobj, bus->bus_groups); 900 838 if (retval) 901 839 goto bus_groups_fail; 902 840 ··· 906 844 bus_groups_fail: 907 845 remove_probe_files(bus); 908 846 bus_probe_files_fail: 909 - kset_unregister(bus->p->drivers_kset); 847 + kset_unregister(priv->drivers_kset); 910 848 bus_drivers_fail: 911 - kset_unregister(bus->p->devices_kset); 849 + kset_unregister(priv->devices_kset); 912 850 bus_devices_fail: 913 851 bus_remove_file(bus, &bus_attr_uevent); 914 852 bus_uevent_fail: 915 - kset_unregister(&bus->p->subsys); 853 + kset_unregister(&priv->subsys); 916 854 out: 917 - kfree(bus->p); 918 - bus->p = NULL; 855 + kfree(priv); 919 856 return retval; 920 857 } 921 858 EXPORT_SYMBOL_GPL(bus_register); ··· 926 865 * Unregister the child subsystems and the bus itself. 927 866 * Finally, we call bus_put() to release the refcount 928 867 */ 929 - void bus_unregister(struct bus_type *bus) 868 + void bus_unregister(const struct bus_type *bus) 930 869 { 870 + struct subsys_private *sp = bus_to_subsys(bus); 871 + struct kobject *bus_kobj; 872 + 873 + if (!sp) 874 + return; 875 + 931 876 pr_debug("bus: '%s': unregistering\n", bus->name); 932 877 if (bus->dev_root) 933 878 device_unregister(bus->dev_root); 934 - bus_remove_groups(bus, bus->bus_groups); 879 + 880 + bus_kobj = &sp->subsys.kobj; 881 + sysfs_remove_groups(bus_kobj, bus->bus_groups); 935 882 remove_probe_files(bus); 936 - kset_unregister(bus->p->drivers_kset); 937 - kset_unregister(bus->p->devices_kset); 938 883 bus_remove_file(bus, &bus_attr_uevent); 939 - kset_unregister(&bus->p->subsys); 884 + 885 + kset_unregister(sp->drivers_kset); 886 + kset_unregister(sp->devices_kset); 887 + kset_unregister(&sp->subsys); 888 + subsys_put(sp); 940 889 } 941 890 EXPORT_SYMBOL_GPL(bus_unregister); 942 891 943 - int bus_register_notifier(struct bus_type *bus, struct notifier_block *nb) 892 + int bus_register_notifier(const struct bus_type *bus, struct notifier_block *nb) 944 893 { 945 - return blocking_notifier_chain_register(&bus->p->bus_notifier, nb); 894 + struct subsys_private *sp = bus_to_subsys(bus); 895 + int retval; 896 + 897 + if (!sp) 898 + return -EINVAL; 899 + 900 + retval = blocking_notifier_chain_register(&sp->bus_notifier, nb); 901 + subsys_put(sp); 902 + return retval; 946 903 } 947 904 EXPORT_SYMBOL_GPL(bus_register_notifier); 948 905 949 - int bus_unregister_notifier(struct bus_type *bus, struct notifier_block *nb) 906 + int bus_unregister_notifier(const struct bus_type *bus, struct notifier_block *nb) 950 907 { 951 - return blocking_notifier_chain_unregister(&bus->p->bus_notifier, nb); 908 + struct subsys_private *sp = bus_to_subsys(bus); 909 + int retval; 910 + 911 + if (!sp) 912 + return -EINVAL; 913 + retval = blocking_notifier_chain_unregister(&sp->bus_notifier, nb); 914 + subsys_put(sp); 915 + return retval; 952 916 } 953 917 EXPORT_SYMBOL_GPL(bus_unregister_notifier); 954 918 955 - struct kset *bus_get_kset(struct bus_type *bus) 919 + void bus_notify(struct device *dev, enum bus_notifier_event value) 956 920 { 957 - return &bus->p->subsys; 921 + struct subsys_private *sp = bus_to_subsys(dev->bus); 922 + 923 + if (!sp) 924 + return; 925 + 926 + blocking_notifier_call_chain(&sp->bus_notifier, value, dev); 927 + subsys_put(sp); 928 + } 929 + 930 + struct kset *bus_get_kset(const struct bus_type *bus) 931 + { 932 + struct subsys_private *sp = bus_to_subsys(bus); 933 + struct kset *kset; 934 + 935 + if (!sp) 936 + return NULL; 937 + 938 + kset = &sp->subsys; 939 + subsys_put(sp); 940 + 941 + return kset; 958 942 } 959 943 EXPORT_SYMBOL_GPL(bus_get_kset); 960 - 961 - struct klist *bus_get_device_klist(struct bus_type *bus) 962 - { 963 - return &bus->p->klist_devices; 964 - } 965 - EXPORT_SYMBOL_GPL(bus_get_device_klist); 966 944 967 945 /* 968 946 * Yes, this forcibly breaks the klist abstraction temporarily. It ··· 1034 934 int (*compare)(const struct device *a, 1035 935 const struct device *b)) 1036 936 { 937 + struct subsys_private *sp = bus_to_subsys(bus); 1037 938 LIST_HEAD(sorted_devices); 1038 939 struct klist_node *n, *tmp; 1039 940 struct device_private *dev_prv; 1040 941 struct device *dev; 1041 942 struct klist *device_klist; 1042 943 1043 - device_klist = bus_get_device_klist(bus); 944 + if (!sp) 945 + return; 946 + device_klist = &sp->klist_devices; 1044 947 1045 948 spin_lock(&device_klist->k_lock); 1046 949 list_for_each_entry_safe(n, tmp, &device_klist->k_list, n_node) { ··· 1053 950 } 1054 951 list_splice(&sorted_devices, &device_klist->k_list); 1055 952 spin_unlock(&device_klist->k_lock); 953 + subsys_put(sp); 1056 954 } 1057 955 EXPORT_SYMBOL_GPL(bus_sort_breadthfirst); 956 + 957 + struct subsys_dev_iter { 958 + struct klist_iter ki; 959 + const struct device_type *type; 960 + }; 1058 961 1059 962 /** 1060 963 * subsys_dev_iter_init - initialize subsys device iterator 1061 964 * @iter: subsys iterator to initialize 1062 - * @subsys: the subsys we wanna iterate over 965 + * @sp: the subsys private (i.e. bus) we wanna iterate over 1063 966 * @start: the device to start iterating from, if any 1064 967 * @type: device_type of the devices to iterate over, NULL for all 1065 968 * ··· 1074 965 * otherwise if it is NULL, the iteration starts at the beginning of 1075 966 * the list. 1076 967 */ 1077 - void subsys_dev_iter_init(struct subsys_dev_iter *iter, struct bus_type *subsys, 1078 - struct device *start, const struct device_type *type) 968 + static void subsys_dev_iter_init(struct subsys_dev_iter *iter, struct subsys_private *sp, 969 + struct device *start, const struct device_type *type) 1079 970 { 1080 971 struct klist_node *start_knode = NULL; 1081 972 1082 973 if (start) 1083 974 start_knode = &start->p->knode_bus; 1084 - klist_iter_init_node(&subsys->p->klist_devices, &iter->ki, start_knode); 975 + klist_iter_init_node(&sp->klist_devices, &iter->ki, start_knode); 1085 976 iter->type = type; 1086 977 } 1087 - EXPORT_SYMBOL_GPL(subsys_dev_iter_init); 1088 978 1089 979 /** 1090 980 * subsys_dev_iter_next - iterate to the next device ··· 1097 989 * free to do whatever it wants to do with the device including 1098 990 * calling back into subsys code. 1099 991 */ 1100 - struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter) 992 + static struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter) 1101 993 { 1102 994 struct klist_node *knode; 1103 995 struct device *dev; ··· 1111 1003 return dev; 1112 1004 } 1113 1005 } 1114 - EXPORT_SYMBOL_GPL(subsys_dev_iter_next); 1115 1006 1116 1007 /** 1117 1008 * subsys_dev_iter_exit - finish iteration ··· 1119 1012 * Finish an iteration. Always call this function after iteration is 1120 1013 * complete whether the iteration ran till the end or not. 1121 1014 */ 1122 - void subsys_dev_iter_exit(struct subsys_dev_iter *iter) 1015 + static void subsys_dev_iter_exit(struct subsys_dev_iter *iter) 1123 1016 { 1124 1017 klist_iter_exit(&iter->ki); 1125 1018 } 1126 - EXPORT_SYMBOL_GPL(subsys_dev_iter_exit); 1127 1019 1128 1020 int subsys_interface_register(struct subsys_interface *sif) 1129 1021 { 1130 - struct bus_type *subsys; 1022 + struct subsys_private *sp; 1131 1023 struct subsys_dev_iter iter; 1132 1024 struct device *dev; 1133 1025 1134 1026 if (!sif || !sif->subsys) 1135 1027 return -ENODEV; 1136 1028 1137 - subsys = bus_get(sif->subsys); 1138 - if (!subsys) 1029 + sp = bus_to_subsys(sif->subsys); 1030 + if (!sp) 1139 1031 return -EINVAL; 1140 1032 1141 - mutex_lock(&subsys->p->mutex); 1142 - list_add_tail(&sif->node, &subsys->p->interfaces); 1033 + /* 1034 + * Reference in sp is now incremented and will be dropped when 1035 + * the interface is removed from the bus 1036 + */ 1037 + 1038 + mutex_lock(&sp->mutex); 1039 + list_add_tail(&sif->node, &sp->interfaces); 1143 1040 if (sif->add_dev) { 1144 - subsys_dev_iter_init(&iter, subsys, NULL, NULL); 1041 + subsys_dev_iter_init(&iter, sp, NULL, NULL); 1145 1042 while ((dev = subsys_dev_iter_next(&iter))) 1146 1043 sif->add_dev(dev, sif); 1147 1044 subsys_dev_iter_exit(&iter); 1148 1045 } 1149 - mutex_unlock(&subsys->p->mutex); 1046 + mutex_unlock(&sp->mutex); 1150 1047 1151 1048 return 0; 1152 1049 } ··· 1158 1047 1159 1048 void subsys_interface_unregister(struct subsys_interface *sif) 1160 1049 { 1161 - struct bus_type *subsys; 1050 + struct subsys_private *sp; 1162 1051 struct subsys_dev_iter iter; 1163 1052 struct device *dev; 1164 1053 1165 1054 if (!sif || !sif->subsys) 1166 1055 return; 1167 1056 1168 - subsys = sif->subsys; 1057 + sp = bus_to_subsys(sif->subsys); 1058 + if (!sp) 1059 + return; 1169 1060 1170 - mutex_lock(&subsys->p->mutex); 1061 + mutex_lock(&sp->mutex); 1171 1062 list_del_init(&sif->node); 1172 1063 if (sif->remove_dev) { 1173 - subsys_dev_iter_init(&iter, subsys, NULL, NULL); 1064 + subsys_dev_iter_init(&iter, sp, NULL, NULL); 1174 1065 while ((dev = subsys_dev_iter_next(&iter))) 1175 1066 sif->remove_dev(dev, sif); 1176 1067 subsys_dev_iter_exit(&iter); 1177 1068 } 1178 - mutex_unlock(&subsys->p->mutex); 1069 + mutex_unlock(&sp->mutex); 1179 1070 1180 - bus_put(subsys); 1071 + /* 1072 + * Decrement the reference count twice, once for the bus_to_subsys() 1073 + * call in the start of this function, and the second one from the 1074 + * reference increment in subsys_interface_register() 1075 + */ 1076 + subsys_put(sp); 1077 + subsys_put(sp); 1181 1078 } 1182 1079 EXPORT_SYMBOL_GPL(subsys_interface_unregister); 1183 1080 ··· 1284 1165 return subsys_register(subsys, groups, virtual_dir); 1285 1166 } 1286 1167 EXPORT_SYMBOL_GPL(subsys_virtual_register); 1168 + 1169 + /** 1170 + * driver_find - locate driver on a bus by its name. 1171 + * @name: name of the driver. 1172 + * @bus: bus to scan for the driver. 1173 + * 1174 + * Call kset_find_obj() to iterate over list of drivers on 1175 + * a bus to find driver by name. Return driver if found. 1176 + * 1177 + * This routine provides no locking to prevent the driver it returns 1178 + * from being unregistered or unloaded while the caller is using it. 1179 + * The caller is responsible for preventing this. 1180 + */ 1181 + struct device_driver *driver_find(const char *name, struct bus_type *bus) 1182 + { 1183 + struct subsys_private *sp = bus_to_subsys(bus); 1184 + struct kobject *k; 1185 + struct driver_private *priv; 1186 + 1187 + if (!sp) 1188 + return NULL; 1189 + 1190 + k = kset_find_obj(sp->drivers_kset, name); 1191 + subsys_put(sp); 1192 + if (!k) 1193 + return NULL; 1194 + 1195 + priv = to_driver(k); 1196 + 1197 + /* Drop reference added by kset_find_obj() */ 1198 + kobject_put(k); 1199 + return priv->driver; 1200 + } 1201 + EXPORT_SYMBOL_GPL(driver_find); 1202 + 1203 + /* 1204 + * Warning, the value could go to "removed" instantly after calling this function, so be very 1205 + * careful when calling it... 1206 + */ 1207 + bool bus_is_registered(const struct bus_type *bus) 1208 + { 1209 + struct subsys_private *sp = bus_to_subsys(bus); 1210 + bool is_initialized = false; 1211 + 1212 + if (sp) { 1213 + is_initialized = true; 1214 + subsys_put(sp); 1215 + } 1216 + return is_initialized; 1217 + } 1218 + 1219 + /** 1220 + * bus_get_dev_root - return a pointer to the "device root" of a bus 1221 + * @bus: bus to return the device root of. 1222 + * 1223 + * If a bus has a "device root" structure, return it, WITH THE REFERENCE 1224 + * COUNT INCREMENTED. 1225 + * 1226 + * Note, when finished with the device, a call to put_device() is required. 1227 + * 1228 + * If the device root is not present (or bus is not a valid pointer), NULL 1229 + * will be returned. 1230 + */ 1231 + struct device *bus_get_dev_root(const struct bus_type *bus) 1232 + { 1233 + if (bus) 1234 + return get_device(bus->dev_root); 1235 + return NULL; 1236 + } 1237 + EXPORT_SYMBOL_GPL(bus_get_dev_root); 1287 1238 1288 1239 int __init buses_init(void) 1289 1240 {
+138 -31
drivers/base/cacheinfo.c
··· 229 229 230 230 return 0; 231 231 } 232 + 233 + static int of_count_cache_leaves(struct device_node *np) 234 + { 235 + unsigned int leaves = 0; 236 + 237 + if (of_property_read_bool(np, "cache-size")) 238 + ++leaves; 239 + if (of_property_read_bool(np, "i-cache-size")) 240 + ++leaves; 241 + if (of_property_read_bool(np, "d-cache-size")) 242 + ++leaves; 243 + 244 + if (!leaves) { 245 + /* The '[i-|d-|]cache-size' property is required, but 246 + * if absent, fallback on the 'cache-unified' property. 247 + */ 248 + if (of_property_read_bool(np, "cache-unified")) 249 + return 1; 250 + else 251 + return 2; 252 + } 253 + 254 + return leaves; 255 + } 256 + 257 + int init_of_cache_level(unsigned int cpu) 258 + { 259 + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); 260 + struct device_node *np = of_cpu_device_node_get(cpu); 261 + struct device_node *prev = NULL; 262 + unsigned int levels = 0, leaves, level; 263 + 264 + leaves = of_count_cache_leaves(np); 265 + if (leaves > 0) 266 + levels = 1; 267 + 268 + prev = np; 269 + while ((np = of_find_next_cache_node(np))) { 270 + of_node_put(prev); 271 + prev = np; 272 + if (!of_device_is_compatible(np, "cache")) 273 + goto err_out; 274 + if (of_property_read_u32(np, "cache-level", &level)) 275 + goto err_out; 276 + if (level <= levels) 277 + goto err_out; 278 + 279 + leaves += of_count_cache_leaves(np); 280 + levels = level; 281 + } 282 + 283 + of_node_put(np); 284 + this_cpu_ci->num_levels = levels; 285 + this_cpu_ci->num_leaves = leaves; 286 + 287 + return 0; 288 + 289 + err_out: 290 + of_node_put(np); 291 + return -EINVAL; 292 + } 293 + 232 294 #else 233 295 static inline int cache_setup_of_node(unsigned int cpu) { return 0; } 296 + int init_of_cache_level(unsigned int cpu) { return 0; } 234 297 #endif 235 298 236 299 int __weak cache_setup_acpi(unsigned int cpu) ··· 319 256 { 320 257 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); 321 258 struct cacheinfo *this_leaf, *sib_leaf; 322 - unsigned int index; 259 + unsigned int index, sib_index; 323 260 int ret = 0; 324 261 325 262 if (this_cpu_ci->cpu_map_populated) ··· 347 284 348 285 if (i == cpu || !sib_cpu_ci->info_list) 349 286 continue;/* skip if itself or no cacheinfo */ 350 - 351 - sib_leaf = per_cpu_cacheinfo_idx(i, index); 352 - if (cache_leaves_are_shared(this_leaf, sib_leaf)) { 353 - cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map); 354 - cpumask_set_cpu(i, &this_leaf->shared_cpu_map); 287 + for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) { 288 + sib_leaf = per_cpu_cacheinfo_idx(i, sib_index); 289 + if (cache_leaves_are_shared(this_leaf, sib_leaf)) { 290 + cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map); 291 + cpumask_set_cpu(i, &this_leaf->shared_cpu_map); 292 + break; 293 + } 355 294 } 356 295 } 357 296 /* record the maximum cache line size */ ··· 367 302 static void cache_shared_cpu_map_remove(unsigned int cpu) 368 303 { 369 304 struct cacheinfo *this_leaf, *sib_leaf; 370 - unsigned int sibling, index; 305 + unsigned int sibling, index, sib_index; 371 306 372 307 for (index = 0; index < cache_leaves(cpu); index++) { 373 308 this_leaf = per_cpu_cacheinfo_idx(cpu, index); ··· 378 313 if (sibling == cpu || !sib_cpu_ci->info_list) 379 314 continue;/* skip if itself or no cacheinfo */ 380 315 381 - sib_leaf = per_cpu_cacheinfo_idx(sibling, index); 382 - cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map); 383 - cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map); 316 + for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) { 317 + sib_leaf = per_cpu_cacheinfo_idx(sibling, sib_index); 318 + if (cache_leaves_are_shared(this_leaf, sib_leaf)) { 319 + cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map); 320 + cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map); 321 + break; 322 + } 323 + } 384 324 } 385 325 } 386 326 } ··· 396 326 return; 397 327 398 328 cache_shared_cpu_map_remove(cpu); 399 - 400 - kfree(per_cpu_cacheinfo(cpu)); 401 - per_cpu_cacheinfo(cpu) = NULL; 402 - cache_leaves(cpu) = 0; 403 329 } 404 330 405 331 int __weak init_cache_level(unsigned int cpu) ··· 408 342 return -ENOENT; 409 343 } 410 344 411 - int detect_cache_attributes(unsigned int cpu) 345 + static inline 346 + int allocate_cache_info(int cpu) 412 347 { 413 - int ret; 414 - 415 - /* Since early detection of the cacheinfo is allowed via this 416 - * function and this also gets called as CPU hotplug callbacks via 417 - * cacheinfo_cpu_online, the initialisation can be skipped and only 418 - * CPU maps can be updated as the CPU online status would be update 419 - * if called via cacheinfo_cpu_online path. 420 - */ 421 - if (per_cpu_cacheinfo(cpu)) 422 - goto update_cpu_map; 423 - 424 - if (init_cache_level(cpu) || !cache_leaves(cpu)) 425 - return -ENOENT; 426 - 427 348 per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), 428 349 sizeof(struct cacheinfo), GFP_ATOMIC); 429 - if (per_cpu_cacheinfo(cpu) == NULL) { 350 + if (!per_cpu_cacheinfo(cpu)) { 430 351 cache_leaves(cpu) = 0; 431 352 return -ENOMEM; 432 353 } 433 354 355 + return 0; 356 + } 357 + 358 + int fetch_cache_info(unsigned int cpu) 359 + { 360 + struct cpu_cacheinfo *this_cpu_ci; 361 + unsigned int levels = 0, split_levels = 0; 362 + int ret; 363 + 364 + if (acpi_disabled) { 365 + ret = init_of_cache_level(cpu); 366 + if (ret < 0) 367 + return ret; 368 + } else { 369 + ret = acpi_get_cache_info(cpu, &levels, &split_levels); 370 + if (ret < 0) 371 + return ret; 372 + 373 + this_cpu_ci = get_cpu_cacheinfo(cpu); 374 + this_cpu_ci->num_levels = levels; 375 + /* 376 + * This assumes that: 377 + * - there cannot be any split caches (data/instruction) 378 + * above a unified cache 379 + * - data/instruction caches come by pair 380 + */ 381 + this_cpu_ci->num_leaves = levels + split_levels; 382 + } 383 + if (!cache_leaves(cpu)) 384 + return -ENOENT; 385 + 386 + return allocate_cache_info(cpu); 387 + } 388 + 389 + int detect_cache_attributes(unsigned int cpu) 390 + { 391 + int ret; 392 + 393 + /* Since early initialization/allocation of the cacheinfo is allowed 394 + * via fetch_cache_info() and this also gets called as CPU hotplug 395 + * callbacks via cacheinfo_cpu_online, the init/alloc can be skipped 396 + * as it will happen only once (the cacheinfo memory is never freed). 397 + * Just populate the cacheinfo. 398 + */ 399 + if (per_cpu_cacheinfo(cpu)) 400 + goto populate_leaves; 401 + 402 + if (init_cache_level(cpu) || !cache_leaves(cpu)) 403 + return -ENOENT; 404 + 405 + ret = allocate_cache_info(cpu); 406 + if (ret) 407 + return ret; 408 + 409 + populate_leaves: 434 410 /* 435 411 * populate_cache_leaves() may completely setup the cache leaves and 436 412 * shared_cpu_map or it may leave it partially setup. ··· 481 373 if (ret) 482 374 goto free_ci; 483 375 484 - update_cpu_map: 485 376 /* 486 377 * For systems using DT for cache hierarchy, fw_token 487 378 * and shared_cpu_map will be set up here only if they are
+19 -15
drivers/base/class.c
··· 53 53 54 54 pr_debug("class '%s': release.\n", class->name); 55 55 56 + class->p = NULL; 57 + 56 58 if (class->class_release) 57 59 class->class_release(class); 58 60 else ··· 66 64 67 65 static const struct kobj_ns_type_operations *class_child_ns_type(const struct kobject *kobj) 68 66 { 69 - struct subsys_private *cp = to_subsys_private(kobj); 67 + const struct subsys_private *cp = to_subsys_private(kobj); 70 68 struct class *class = cp->class; 71 69 72 70 return class->ns_type; ··· 77 75 .store = class_attr_store, 78 76 }; 79 77 80 - static struct kobj_type class_ktype = { 78 + static const struct kobj_type class_ktype = { 81 79 .sysfs_ops = &class_sysfs_ops, 82 80 .release = class_release, 83 81 .child_ns_type = class_child_ns_type, ··· 99 97 error = -EINVAL; 100 98 return error; 101 99 } 100 + EXPORT_SYMBOL_GPL(class_create_file_ns); 102 101 103 102 void class_remove_file_ns(struct class *cls, const struct class_attribute *attr, 104 103 const void *ns) ··· 107 104 if (cls) 108 105 sysfs_remove_file_ns(&cls->p->subsys.kobj, &attr->attr, ns); 109 106 } 107 + EXPORT_SYMBOL_GPL(class_remove_file_ns); 110 108 111 109 static struct class *class_get(struct class *cls) 112 110 { ··· 190 186 cls->p = cp; 191 187 192 188 error = kset_register(&cp->subsys); 193 - if (error) { 194 - kfree(cp); 195 - return error; 196 - } 189 + if (error) 190 + goto err_out; 191 + 197 192 error = class_add_groups(class_get(cls), cls->class_groups); 198 193 class_put(cls); 199 194 if (error) { 200 195 kobject_del(&cp->subsys.kobj); 201 196 kfree_const(cp->subsys.kobj.name); 202 - kfree(cp); 197 + goto err_out; 203 198 } 199 + return 0; 200 + 201 + err_out: 202 + kfree(cp); 203 + cls->p = NULL; 204 204 return error; 205 205 } 206 206 EXPORT_SYMBOL_GPL(__class_register); ··· 215 207 class_remove_groups(cls, cls->class_groups); 216 208 kset_unregister(&cls->p->subsys); 217 209 } 210 + EXPORT_SYMBOL_GPL(class_unregister); 218 211 219 212 static void class_create_release(struct class *cls) 220 213 { ··· 279 270 280 271 class_unregister(cls); 281 272 } 273 + EXPORT_SYMBOL_GPL(class_destroy); 282 274 283 275 /** 284 276 * class_dev_iter_init - initialize class device iterator ··· 464 454 465 455 return 0; 466 456 } 457 + EXPORT_SYMBOL_GPL(class_interface_register); 467 458 468 459 void class_interface_unregister(struct class_interface *class_intf) 469 460 { ··· 487 476 488 477 class_put(parent); 489 478 } 479 + EXPORT_SYMBOL_GPL(class_interface_unregister); 490 480 491 481 ssize_t show_class_attr_string(struct class *class, 492 482 struct class_attribute *attr, char *buf) ··· 594 582 return -ENOMEM; 595 583 return 0; 596 584 } 597 - 598 - EXPORT_SYMBOL_GPL(class_create_file_ns); 599 - EXPORT_SYMBOL_GPL(class_remove_file_ns); 600 - EXPORT_SYMBOL_GPL(class_unregister); 601 - EXPORT_SYMBOL_GPL(class_destroy); 602 - 603 - EXPORT_SYMBOL_GPL(class_interface_register); 604 - EXPORT_SYMBOL_GPL(class_interface_unregister);
+1 -1
drivers/base/component.c
··· 125 125 126 126 static void component_debugfs_del(struct aggregate_device *m) 127 127 { 128 - debugfs_remove(debugfs_lookup(dev_name(m->parent), component_debugfs_dir)); 128 + debugfs_lookup_and_remove(dev_name(m->parent), component_debugfs_dir); 129 129 } 130 130 131 131 #else
+323 -174
drivers/base/core.c
··· 54 54 static unsigned int defer_sync_state_count = 1; 55 55 static DEFINE_MUTEX(fwnode_link_lock); 56 56 static bool fw_devlink_is_permissive(void); 57 + static void __fw_devlink_link_to_consumers(struct device *dev); 57 58 static bool fw_devlink_drv_reg_done; 58 59 static bool fw_devlink_best_effort; 59 60 60 61 /** 61 - * fwnode_link_add - Create a link between two fwnode_handles. 62 + * __fwnode_link_add - Create a link between two fwnode_handles. 62 63 * @con: Consumer end of the link. 63 64 * @sup: Supplier end of the link. 64 65 * ··· 75 74 * Attempts to create duplicate links between the same pair of fwnode handles 76 75 * are ignored and there is no reference counting. 77 76 */ 78 - int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup) 77 + static int __fwnode_link_add(struct fwnode_handle *con, 78 + struct fwnode_handle *sup, u8 flags) 79 79 { 80 80 struct fwnode_link *link; 81 - int ret = 0; 82 - 83 - mutex_lock(&fwnode_link_lock); 84 81 85 82 list_for_each_entry(link, &sup->consumers, s_hook) 86 - if (link->consumer == con) 87 - goto out; 83 + if (link->consumer == con) { 84 + link->flags |= flags; 85 + return 0; 86 + } 88 87 89 88 link = kzalloc(sizeof(*link), GFP_KERNEL); 90 - if (!link) { 91 - ret = -ENOMEM; 92 - goto out; 93 - } 89 + if (!link) 90 + return -ENOMEM; 94 91 95 92 link->supplier = sup; 96 93 INIT_LIST_HEAD(&link->s_hook); 97 94 link->consumer = con; 98 95 INIT_LIST_HEAD(&link->c_hook); 96 + link->flags = flags; 99 97 100 98 list_add(&link->s_hook, &sup->consumers); 101 99 list_add(&link->c_hook, &con->suppliers); 102 100 pr_debug("%pfwP Linked as a fwnode consumer to %pfwP\n", 103 101 con, sup); 104 - out: 105 - mutex_unlock(&fwnode_link_lock); 106 102 103 + return 0; 104 + } 105 + 106 + int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup) 107 + { 108 + int ret; 109 + 110 + mutex_lock(&fwnode_link_lock); 111 + ret = __fwnode_link_add(con, sup, 0); 112 + mutex_unlock(&fwnode_link_lock); 107 113 return ret; 108 114 } 109 115 ··· 127 119 list_del(&link->s_hook); 128 120 list_del(&link->c_hook); 129 121 kfree(link); 122 + } 123 + 124 + /** 125 + * __fwnode_link_cycle - Mark a fwnode link as being part of a cycle. 126 + * @link: the fwnode_link to be marked 127 + * 128 + * The fwnode_link_lock needs to be held when this function is called. 129 + */ 130 + static void __fwnode_link_cycle(struct fwnode_link *link) 131 + { 132 + pr_debug("%pfwf: Relaxing link with %pfwf\n", 133 + link->consumer, link->supplier); 134 + link->flags |= FWLINK_FLAG_CYCLE; 130 135 } 131 136 132 137 /** ··· 202 181 } 203 182 EXPORT_SYMBOL_GPL(fw_devlink_purge_absent_suppliers); 204 183 184 + /** 185 + * __fwnode_links_move_consumers - Move consumer from @from to @to fwnode_handle 186 + * @from: move consumers away from this fwnode 187 + * @to: move consumers to this fwnode 188 + * 189 + * Move all consumer links from @from fwnode to @to fwnode. 190 + */ 191 + static void __fwnode_links_move_consumers(struct fwnode_handle *from, 192 + struct fwnode_handle *to) 193 + { 194 + struct fwnode_link *link, *tmp; 195 + 196 + list_for_each_entry_safe(link, tmp, &from->consumers, s_hook) { 197 + __fwnode_link_add(link->consumer, to, link->flags); 198 + __fwnode_link_del(link); 199 + } 200 + } 201 + 202 + /** 203 + * __fw_devlink_pickup_dangling_consumers - Pick up dangling consumers 204 + * @fwnode: fwnode from which to pick up dangling consumers 205 + * @new_sup: fwnode of new supplier 206 + * 207 + * If the @fwnode has a corresponding struct device and the device supports 208 + * probing (that is, added to a bus), then we want to let fw_devlink create 209 + * MANAGED device links to this device, so leave @fwnode and its descendant's 210 + * fwnode links alone. 211 + * 212 + * Otherwise, move its consumers to the new supplier @new_sup. 213 + */ 214 + static void __fw_devlink_pickup_dangling_consumers(struct fwnode_handle *fwnode, 215 + struct fwnode_handle *new_sup) 216 + { 217 + struct fwnode_handle *child; 218 + 219 + if (fwnode->dev && fwnode->dev->bus) 220 + return; 221 + 222 + fwnode->flags |= FWNODE_FLAG_NOT_DEVICE; 223 + __fwnode_links_move_consumers(fwnode, new_sup); 224 + 225 + fwnode_for_each_available_child_node(fwnode, child) 226 + __fw_devlink_pickup_dangling_consumers(child, new_sup); 227 + } 228 + 205 229 static DEFINE_MUTEX(device_links_lock); 206 230 DEFINE_STATIC_SRCU(device_links_srcu); 207 231 ··· 296 230 return false; 297 231 } 298 232 233 + static inline bool device_link_flag_is_sync_state_only(u32 flags) 234 + { 235 + return (flags & ~(DL_FLAG_INFERRED | DL_FLAG_CYCLE)) == 236 + (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED); 237 + } 238 + 299 239 /** 300 240 * device_is_dependent - Check if one device depends on another one 301 241 * @dev: Device to check dependencies for. ··· 328 256 return ret; 329 257 330 258 list_for_each_entry(link, &dev->links.consumers, s_node) { 331 - if ((link->flags & ~DL_FLAG_INFERRED) == 332 - (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED)) 259 + if (device_link_flag_is_sync_state_only(link->flags)) 333 260 continue; 334 261 335 262 if (link->consumer == target) ··· 401 330 402 331 device_for_each_child(dev, NULL, device_reorder_to_tail); 403 332 list_for_each_entry(link, &dev->links.consumers, s_node) { 404 - if ((link->flags & ~DL_FLAG_INFERRED) == 405 - (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED)) 333 + if (device_link_flag_is_sync_state_only(link->flags)) 406 334 continue; 407 335 device_reorder_to_tail(link->consumer, NULL); 408 336 } ··· 662 592 DL_FLAG_AUTOREMOVE_SUPPLIER | \ 663 593 DL_FLAG_AUTOPROBE_CONSUMER | \ 664 594 DL_FLAG_SYNC_STATE_ONLY | \ 665 - DL_FLAG_INFERRED) 595 + DL_FLAG_INFERRED | \ 596 + DL_FLAG_CYCLE) 666 597 667 598 #define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \ 668 599 DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE) ··· 732 661 if (!consumer || !supplier || consumer == supplier || 733 662 flags & ~DL_ADD_VALID_FLAGS || 734 663 (flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) || 735 - (flags & DL_FLAG_SYNC_STATE_ONLY && 736 - (flags & ~DL_FLAG_INFERRED) != DL_FLAG_SYNC_STATE_ONLY) || 737 664 (flags & DL_FLAG_AUTOPROBE_CONSUMER && 738 665 flags & (DL_FLAG_AUTOREMOVE_CONSUMER | 739 666 DL_FLAG_AUTOREMOVE_SUPPLIER))) ··· 746 677 747 678 if (!(flags & DL_FLAG_STATELESS)) 748 679 flags |= DL_FLAG_MANAGED; 680 + 681 + if (flags & DL_FLAG_SYNC_STATE_ONLY && 682 + !device_link_flag_is_sync_state_only(flags)) 683 + return NULL; 749 684 750 685 device_links_write_lock(); 751 686 device_pm_lock(); ··· 1015 942 (dev->fwnode && (dev->fwnode->flags & FWNODE_FLAG_BEST_EFFORT)); 1016 943 } 1017 944 945 + static struct fwnode_handle *fwnode_links_check_suppliers( 946 + struct fwnode_handle *fwnode) 947 + { 948 + struct fwnode_link *link; 949 + 950 + if (!fwnode || fw_devlink_is_permissive()) 951 + return NULL; 952 + 953 + list_for_each_entry(link, &fwnode->suppliers, c_hook) 954 + if (!(link->flags & FWLINK_FLAG_CYCLE)) 955 + return link->supplier; 956 + 957 + return NULL; 958 + } 959 + 1018 960 /** 1019 961 * device_links_check_suppliers - Check presence of supplier drivers. 1020 962 * @dev: Consumer device. ··· 1057 969 * probe. 1058 970 */ 1059 971 mutex_lock(&fwnode_link_lock); 1060 - if (dev->fwnode && !list_empty(&dev->fwnode->suppliers) && 1061 - !fw_devlink_is_permissive()) { 1062 - sup_fw = list_first_entry(&dev->fwnode->suppliers, 1063 - struct fwnode_link, 1064 - c_hook)->supplier; 972 + sup_fw = fwnode_links_check_suppliers(dev->fwnode); 973 + if (sup_fw) { 1065 974 if (!dev_is_best_effort(dev)) { 1066 975 fwnode_ret = -EPROBE_DEFER; 1067 976 dev_err_probe(dev, -EPROBE_DEFER, ··· 1247 1162 bool val; 1248 1163 1249 1164 device_lock(dev); 1250 - val = !list_empty(&dev->fwnode->suppliers); 1165 + mutex_lock(&fwnode_link_lock); 1166 + val = !!fwnode_links_check_suppliers(dev->fwnode); 1167 + mutex_unlock(&fwnode_link_lock); 1251 1168 device_unlock(dev); 1252 1169 return sysfs_emit(buf, "%u\n", val); 1253 1170 } ··· 1312 1225 * them. So, fw_devlink no longer needs to create device links to any 1313 1226 * of the device's suppliers. 1314 1227 * 1315 - * Also, if a child firmware node of this bound device is not added as 1316 - * a device by now, assume it is never going to be added and make sure 1317 - * other devices don't defer probe indefinitely by waiting for such a 1318 - * child device. 1228 + * Also, if a child firmware node of this bound device is not added as a 1229 + * device by now, assume it is never going to be added. Make this bound 1230 + * device the fallback supplier to the dangling consumers of the child 1231 + * firmware node because this bound device is probably implementing the 1232 + * child firmware node functionality and we don't want the dangling 1233 + * consumers to defer probe indefinitely waiting for a device for the 1234 + * child firmware node. 1319 1235 */ 1320 1236 if (dev->fwnode && dev->fwnode->dev == dev) { 1321 1237 struct fwnode_handle *child; 1322 1238 fwnode_links_purge_suppliers(dev->fwnode); 1239 + mutex_lock(&fwnode_link_lock); 1323 1240 fwnode_for_each_available_child_node(dev->fwnode, child) 1324 - fw_devlink_purge_absent_suppliers(child); 1241 + __fw_devlink_pickup_dangling_consumers(child, 1242 + dev->fwnode); 1243 + __fw_devlink_link_to_consumers(dev); 1244 + mutex_unlock(&fwnode_link_lock); 1325 1245 } 1326 1246 device_remove_file(dev, &dev_attr_waiting_for_supplier); 1327 1247 ··· 1685 1591 } 1686 1592 early_param("fw_devlink.strict", fw_devlink_strict_setup); 1687 1593 1688 - u32 fw_devlink_get_flags(void) 1594 + static inline u32 fw_devlink_get_flags(u8 fwlink_flags) 1689 1595 { 1596 + if (fwlink_flags & FWLINK_FLAG_CYCLE) 1597 + return FW_DEVLINK_FLAGS_PERMISSIVE | DL_FLAG_CYCLE; 1598 + 1690 1599 return fw_devlink_flags; 1691 1600 } 1692 1601 ··· 1727 1630 if (!(link->flags & DL_FLAG_INFERRED)) 1728 1631 return; 1729 1632 1730 - if (link->flags == (DL_FLAG_MANAGED | FW_DEVLINK_FLAGS_PERMISSIVE)) 1633 + if (device_link_flag_is_sync_state_only(link->flags)) 1731 1634 return; 1732 1635 1733 1636 pm_runtime_drop_link(link); ··· 1824 1727 device_links_write_unlock(); 1825 1728 } 1826 1729 1827 - /** 1828 - * fw_devlink_relax_cycle - Convert cyclic links to SYNC_STATE_ONLY links 1829 - * @con: Device to check dependencies for. 1830 - * @sup: Device to check against. 1831 - * 1832 - * Check if @sup depends on @con or any device dependent on it (its child or 1833 - * its consumer etc). When such a cyclic dependency is found, convert all 1834 - * device links created solely by fw_devlink into SYNC_STATE_ONLY device links. 1835 - * This is the equivalent of doing fw_devlink=permissive just between the 1836 - * devices in the cycle. We need to do this because, at this point, fw_devlink 1837 - * can't tell which of these dependencies is not a real dependency. 1838 - * 1839 - * Return 1 if a cycle is found. Otherwise, return 0. 1840 - */ 1841 - static int fw_devlink_relax_cycle(struct device *con, void *sup) 1730 + 1731 + static bool fwnode_init_without_drv(struct fwnode_handle *fwnode) 1842 1732 { 1843 - struct device_link *link; 1844 - int ret; 1733 + struct device *dev; 1734 + bool ret; 1845 1735 1846 - if (con == sup) 1847 - return 1; 1736 + if (!(fwnode->flags & FWNODE_FLAG_INITIALIZED)) 1737 + return false; 1848 1738 1849 - ret = device_for_each_child(con, sup, fw_devlink_relax_cycle); 1850 - if (ret) 1851 - return ret; 1739 + dev = get_dev_from_fwnode(fwnode); 1740 + ret = !dev || dev->links.status == DL_DEV_NO_DRIVER; 1741 + put_device(dev); 1852 1742 1853 - list_for_each_entry(link, &con->links.consumers, s_node) { 1854 - if ((link->flags & ~DL_FLAG_INFERRED) == 1855 - (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED)) 1856 - continue; 1743 + return ret; 1744 + } 1857 1745 1858 - if (!fw_devlink_relax_cycle(link->consumer, sup)) 1859 - continue; 1746 + static bool fwnode_ancestor_init_without_drv(struct fwnode_handle *fwnode) 1747 + { 1748 + struct fwnode_handle *parent; 1860 1749 1861 - ret = 1; 1862 - 1863 - fw_devlink_relax_link(link); 1750 + fwnode_for_each_parent_node(fwnode, parent) { 1751 + if (fwnode_init_without_drv(parent)) { 1752 + fwnode_handle_put(parent); 1753 + return true; 1754 + } 1864 1755 } 1756 + 1757 + return false; 1758 + } 1759 + 1760 + /** 1761 + * __fw_devlink_relax_cycles - Relax and mark dependency cycles. 1762 + * @con: Potential consumer device. 1763 + * @sup_handle: Potential supplier's fwnode. 1764 + * 1765 + * Needs to be called with fwnode_lock and device link lock held. 1766 + * 1767 + * Check if @sup_handle or any of its ancestors or suppliers direct/indirectly 1768 + * depend on @con. This function can detect multiple cyles between @sup_handle 1769 + * and @con. When such dependency cycles are found, convert all device links 1770 + * created solely by fw_devlink into SYNC_STATE_ONLY device links. Also, mark 1771 + * all fwnode links in the cycle with FWLINK_FLAG_CYCLE so that when they are 1772 + * converted into a device link in the future, they are created as 1773 + * SYNC_STATE_ONLY device links. This is the equivalent of doing 1774 + * fw_devlink=permissive just between the devices in the cycle. We need to do 1775 + * this because, at this point, fw_devlink can't tell which of these 1776 + * dependencies is not a real dependency. 1777 + * 1778 + * Return true if one or more cycles were found. Otherwise, return false. 1779 + */ 1780 + static bool __fw_devlink_relax_cycles(struct device *con, 1781 + struct fwnode_handle *sup_handle) 1782 + { 1783 + struct device *sup_dev = NULL, *par_dev = NULL; 1784 + struct fwnode_link *link; 1785 + struct device_link *dev_link; 1786 + bool ret = false; 1787 + 1788 + if (!sup_handle) 1789 + return false; 1790 + 1791 + /* 1792 + * We aren't trying to find all cycles. Just a cycle between con and 1793 + * sup_handle. 1794 + */ 1795 + if (sup_handle->flags & FWNODE_FLAG_VISITED) 1796 + return false; 1797 + 1798 + sup_handle->flags |= FWNODE_FLAG_VISITED; 1799 + 1800 + sup_dev = get_dev_from_fwnode(sup_handle); 1801 + 1802 + /* Termination condition. */ 1803 + if (sup_dev == con) { 1804 + ret = true; 1805 + goto out; 1806 + } 1807 + 1808 + /* 1809 + * If sup_dev is bound to a driver and @con hasn't started binding to a 1810 + * driver, sup_dev can't be a consumer of @con. So, no need to check 1811 + * further. 1812 + */ 1813 + if (sup_dev && sup_dev->links.status == DL_DEV_DRIVER_BOUND && 1814 + con->links.status == DL_DEV_NO_DRIVER) { 1815 + ret = false; 1816 + goto out; 1817 + } 1818 + 1819 + list_for_each_entry(link, &sup_handle->suppliers, c_hook) { 1820 + if (__fw_devlink_relax_cycles(con, link->supplier)) { 1821 + __fwnode_link_cycle(link); 1822 + ret = true; 1823 + } 1824 + } 1825 + 1826 + /* 1827 + * Give priority to device parent over fwnode parent to account for any 1828 + * quirks in how fwnodes are converted to devices. 1829 + */ 1830 + if (sup_dev) 1831 + par_dev = get_device(sup_dev->parent); 1832 + else 1833 + par_dev = fwnode_get_next_parent_dev(sup_handle); 1834 + 1835 + if (par_dev && __fw_devlink_relax_cycles(con, par_dev->fwnode)) 1836 + ret = true; 1837 + 1838 + if (!sup_dev) 1839 + goto out; 1840 + 1841 + list_for_each_entry(dev_link, &sup_dev->links.suppliers, c_node) { 1842 + /* 1843 + * Ignore a SYNC_STATE_ONLY flag only if it wasn't marked as 1844 + * such due to a cycle. 1845 + */ 1846 + if (device_link_flag_is_sync_state_only(dev_link->flags) && 1847 + !(dev_link->flags & DL_FLAG_CYCLE)) 1848 + continue; 1849 + 1850 + if (__fw_devlink_relax_cycles(con, 1851 + dev_link->supplier->fwnode)) { 1852 + fw_devlink_relax_link(dev_link); 1853 + dev_link->flags |= DL_FLAG_CYCLE; 1854 + ret = true; 1855 + } 1856 + } 1857 + 1858 + out: 1859 + sup_handle->flags &= ~FWNODE_FLAG_VISITED; 1860 + put_device(sup_dev); 1861 + put_device(par_dev); 1865 1862 return ret; 1866 1863 } 1867 1864 ··· 1963 1772 * fw_devlink_create_devlink - Create a device link from a consumer to fwnode 1964 1773 * @con: consumer device for the device link 1965 1774 * @sup_handle: fwnode handle of supplier 1966 - * @flags: devlink flags 1775 + * @link: fwnode link that's being converted to a device link 1967 1776 * 1968 1777 * This function will try to create a device link between the consumer device 1969 1778 * @con and the supplier device represented by @sup_handle. ··· 1980 1789 * possible to do that in the future 1981 1790 */ 1982 1791 static int fw_devlink_create_devlink(struct device *con, 1983 - struct fwnode_handle *sup_handle, u32 flags) 1792 + struct fwnode_handle *sup_handle, 1793 + struct fwnode_link *link) 1984 1794 { 1985 1795 struct device *sup_dev; 1986 1796 int ret = 0; 1797 + u32 flags; 1798 + 1799 + if (con->fwnode == link->consumer) 1800 + flags = fw_devlink_get_flags(link->flags); 1801 + else 1802 + flags = FW_DEVLINK_FLAGS_PERMISSIVE; 1987 1803 1988 1804 /* 1989 1805 * In some cases, a device P might also be a supplier to its child node ··· 2011 1813 fwnode_is_ancestor_of(sup_handle, con->fwnode)) 2012 1814 return -EINVAL; 2013 1815 2014 - sup_dev = get_dev_from_fwnode(sup_handle); 1816 + /* 1817 + * SYNC_STATE_ONLY device links don't block probing and supports cycles. 1818 + * So cycle detection isn't necessary and shouldn't be done. 1819 + */ 1820 + if (!(flags & DL_FLAG_SYNC_STATE_ONLY)) { 1821 + device_links_write_lock(); 1822 + if (__fw_devlink_relax_cycles(con, sup_handle)) { 1823 + __fwnode_link_cycle(link); 1824 + flags = fw_devlink_get_flags(link->flags); 1825 + dev_info(con, "Fixed dependency cycle(s) with %pfwf\n", 1826 + sup_handle); 1827 + } 1828 + device_links_write_unlock(); 1829 + } 1830 + 1831 + if (sup_handle->flags & FWNODE_FLAG_NOT_DEVICE) 1832 + sup_dev = fwnode_get_next_parent_dev(sup_handle); 1833 + else 1834 + sup_dev = get_dev_from_fwnode(sup_handle); 1835 + 2015 1836 if (sup_dev) { 2016 1837 /* 2017 1838 * If it's one of those drivers that don't actually bind to ··· 2039 1822 */ 2040 1823 if (sup_dev->links.status == DL_DEV_NO_DRIVER && 2041 1824 sup_handle->flags & FWNODE_FLAG_INITIALIZED) { 1825 + dev_dbg(con, 1826 + "Not linking %pfwf - dev might never probe\n", 1827 + sup_handle); 2042 1828 ret = -EINVAL; 2043 1829 goto out; 2044 1830 } 2045 1831 2046 - /* 2047 - * If this fails, it is due to cycles in device links. Just 2048 - * give up on this link and treat it as invalid. 2049 - */ 2050 - if (!device_link_add(con, sup_dev, flags) && 2051 - !(flags & DL_FLAG_SYNC_STATE_ONLY)) { 2052 - dev_info(con, "Fixing up cyclic dependency with %s\n", 2053 - dev_name(sup_dev)); 2054 - device_links_write_lock(); 2055 - fw_devlink_relax_cycle(con, sup_dev); 2056 - device_links_write_unlock(); 2057 - device_link_add(con, sup_dev, 2058 - FW_DEVLINK_FLAGS_PERMISSIVE); 1832 + if (!device_link_add(con, sup_dev, flags)) { 1833 + dev_err(con, "Failed to create device link with %s\n", 1834 + dev_name(sup_dev)); 2059 1835 ret = -EINVAL; 2060 1836 } 2061 1837 2062 1838 goto out; 2063 1839 } 2064 1840 2065 - /* Supplier that's already initialized without a struct device. */ 2066 - if (sup_handle->flags & FWNODE_FLAG_INITIALIZED) 1841 + /* 1842 + * Supplier or supplier's ancestor already initialized without a struct 1843 + * device or being probed by a driver. 1844 + */ 1845 + if (fwnode_init_without_drv(sup_handle) || 1846 + fwnode_ancestor_init_without_drv(sup_handle)) { 1847 + dev_dbg(con, "Not linking %pfwf - might never become dev\n", 1848 + sup_handle); 2067 1849 return -EINVAL; 2068 - 2069 - /* 2070 - * DL_FLAG_SYNC_STATE_ONLY doesn't block probing and supports 2071 - * cycles. So cycle detection isn't necessary and shouldn't be 2072 - * done. 2073 - */ 2074 - if (flags & DL_FLAG_SYNC_STATE_ONLY) 2075 - return -EAGAIN; 2076 - 2077 - /* 2078 - * If we can't find the supplier device from its fwnode, it might be 2079 - * due to a cyclic dependency between fwnodes. Some of these cycles can 2080 - * be broken by applying logic. Check for these types of cycles and 2081 - * break them so that devices in the cycle probe properly. 2082 - * 2083 - * If the supplier's parent is dependent on the consumer, then the 2084 - * consumer and supplier have a cyclic dependency. Since fw_devlink 2085 - * can't tell which of the inferred dependencies are incorrect, don't 2086 - * enforce probe ordering between any of the devices in this cyclic 2087 - * dependency. Do this by relaxing all the fw_devlink device links in 2088 - * this cycle and by treating the fwnode link between the consumer and 2089 - * the supplier as an invalid dependency. 2090 - */ 2091 - sup_dev = fwnode_get_next_parent_dev(sup_handle); 2092 - if (sup_dev && device_is_dependent(con, sup_dev)) { 2093 - dev_info(con, "Fixing up cyclic dependency with %pfwP (%s)\n", 2094 - sup_handle, dev_name(sup_dev)); 2095 - device_links_write_lock(); 2096 - fw_devlink_relax_cycle(con, sup_dev); 2097 - device_links_write_unlock(); 2098 - ret = -EINVAL; 2099 - } else { 2100 - /* 2101 - * Can't check for cycles or no cycles. So let's try 2102 - * again later. 2103 - */ 2104 - ret = -EAGAIN; 2105 1850 } 2106 1851 1852 + ret = -EAGAIN; 2107 1853 out: 2108 1854 put_device(sup_dev); 2109 1855 return ret; ··· 2094 1914 struct fwnode_link *link, *tmp; 2095 1915 2096 1916 list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) { 2097 - u32 dl_flags = fw_devlink_get_flags(); 2098 1917 struct device *con_dev; 2099 1918 bool own_link = true; 2100 1919 int ret; ··· 2123 1944 con_dev = NULL; 2124 1945 } else { 2125 1946 own_link = false; 2126 - dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE; 2127 1947 } 2128 1948 } 2129 1949 2130 1950 if (!con_dev) 2131 1951 continue; 2132 1952 2133 - ret = fw_devlink_create_devlink(con_dev, fwnode, dl_flags); 1953 + ret = fw_devlink_create_devlink(con_dev, fwnode, link); 2134 1954 put_device(con_dev); 2135 1955 if (!own_link || ret == -EAGAIN) 2136 1956 continue; ··· 2149 1971 * 2150 1972 * The function creates normal (non-SYNC_STATE_ONLY) device links between @dev 2151 1973 * and the real suppliers of @dev. Once these device links are created, the 2152 - * fwnode links are deleted. When such device links are successfully created, 2153 - * this function is called recursively on those supplier devices. This is 2154 - * needed to detect and break some invalid cycles in fwnode links. See 2155 - * fw_devlink_create_devlink() for more details. 1974 + * fwnode links are deleted. 2156 1975 * 2157 1976 * In addition, it also looks at all the suppliers of the entire fwnode tree 2158 1977 * because some of the child devices of @dev that have not been added yet ··· 2167 1992 bool own_link = (dev->fwnode == fwnode); 2168 1993 struct fwnode_link *link, *tmp; 2169 1994 struct fwnode_handle *child = NULL; 2170 - u32 dl_flags; 2171 - 2172 - if (own_link) 2173 - dl_flags = fw_devlink_get_flags(); 2174 - else 2175 - dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE; 2176 1995 2177 1996 list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) { 2178 1997 int ret; 2179 - struct device *sup_dev; 2180 1998 struct fwnode_handle *sup = link->supplier; 2181 1999 2182 - ret = fw_devlink_create_devlink(dev, sup, dl_flags); 2000 + ret = fw_devlink_create_devlink(dev, sup, link); 2183 2001 if (!own_link || ret == -EAGAIN) 2184 2002 continue; 2185 2003 2186 2004 __fwnode_link_del(link); 2187 - 2188 - /* If no device link was created, nothing more to do. */ 2189 - if (ret) 2190 - continue; 2191 - 2192 - /* 2193 - * If a device link was successfully created to a supplier, we 2194 - * now need to try and link the supplier to all its suppliers. 2195 - * 2196 - * This is needed to detect and delete false dependencies in 2197 - * fwnode links that haven't been converted to a device link 2198 - * yet. See comments in fw_devlink_create_devlink() for more 2199 - * details on the false dependency. 2200 - * 2201 - * Without deleting these false dependencies, some devices will 2202 - * never probe because they'll keep waiting for their false 2203 - * dependency fwnode links to be converted to device links. 2204 - */ 2205 - sup_dev = get_dev_from_fwnode(sup); 2206 - __fw_devlink_link_to_suppliers(sup_dev, sup_dev->fwnode); 2207 - put_device(sup_dev); 2208 2005 } 2209 2006 2210 2007 /* ··· 2459 2312 dev->class->get_ownership(dev, uid, gid); 2460 2313 } 2461 2314 2462 - static struct kobj_type device_ktype = { 2315 + static const struct kobj_type device_ktype = { 2463 2316 .release = device_release, 2464 2317 .sysfs_ops = &dev_sysfs_ops, 2465 2318 .namespace = device_namespace, ··· 2492 2345 return NULL; 2493 2346 } 2494 2347 2495 - static int dev_uevent(struct kobject *kobj, struct kobj_uevent_env *env) 2348 + static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env) 2496 2349 { 2497 - struct device *dev = kobj_to_dev(kobj); 2350 + const struct device *dev = kobj_to_dev(kobj); 2498 2351 int retval = 0; 2499 2352 2500 2353 /* add device node properties if present */ ··· 3097 2950 return dir->class->ns_type; 3098 2951 } 3099 2952 3100 - static struct kobj_type class_dir_ktype = { 2953 + static const struct kobj_type class_dir_ktype = { 3101 2954 .release = class_dir_release, 3102 2955 .sysfs_ops = &kobj_sysfs_ops, 3103 2956 .child_ns_type = class_dir_child_ns_type ··· 3131 2984 static struct kobject *get_device_parent(struct device *dev, 3132 2985 struct device *parent) 3133 2986 { 2987 + struct kobject *kobj = NULL; 2988 + 3134 2989 if (dev->class) { 3135 - struct kobject *kobj = NULL; 3136 2990 struct kobject *parent_kobj; 3137 2991 struct kobject *k; 3138 2992 ··· 3181 3033 } 3182 3034 3183 3035 /* subsystems can specify a default root directory for their devices */ 3184 - if (!parent && dev->bus && dev->bus->dev_root) 3185 - return &dev->bus->dev_root->kobj; 3036 + if (!parent && dev->bus) { 3037 + struct device *dev_root = bus_get_dev_root(dev->bus); 3038 + 3039 + if (dev_root) { 3040 + kobj = &dev_root->kobj; 3041 + put_device(dev_root); 3042 + return kobj; 3043 + } 3044 + } 3186 3045 3187 3046 if (parent) 3188 3047 return &parent->kobj; ··· 3526 3371 /* we require the name to be set before, and pass NULL */ 3527 3372 error = kobject_add(&dev->kobj, dev->kobj.parent, NULL); 3528 3373 if (error) { 3529 - glue_dir = get_glue_dir(dev); 3374 + glue_dir = kobj; 3530 3375 goto Error; 3531 3376 } 3532 3377 ··· 3566 3411 /* Notify clients of device addition. This call must come 3567 3412 * after dpm_sysfs_add() and before kobject_uevent(). 3568 3413 */ 3569 - if (dev->bus) 3570 - blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 3571 - BUS_NOTIFY_ADD_DEVICE, dev); 3572 - 3414 + bus_notify(dev, BUS_NOTIFY_ADD_DEVICE); 3573 3415 kobject_uevent(&dev->kobj, KOBJ_ADD); 3574 3416 3575 3417 /* ··· 3623 3471 device_pm_remove(dev); 3624 3472 dpm_sysfs_remove(dev); 3625 3473 DPMError: 3474 + dev->driver = NULL; 3626 3475 bus_remove_device(dev); 3627 3476 BusError: 3628 3477 device_remove_attrs(dev); ··· 3747 3594 * before dpm_sysfs_remove(). 3748 3595 */ 3749 3596 noio_flag = memalloc_noio_save(); 3750 - if (dev->bus) 3751 - blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 3752 - BUS_NOTIFY_DEL_DEVICE, dev); 3597 + bus_notify(dev, BUS_NOTIFY_DEL_DEVICE); 3753 3598 3754 3599 dpm_sysfs_remove(dev); 3755 3600 if (parent) ··· 3778 3627 device_platform_notify_remove(dev); 3779 3628 device_links_purge(dev); 3780 3629 3781 - if (dev->bus) 3782 - blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 3783 - BUS_NOTIFY_REMOVED_DEVICE, dev); 3630 + bus_notify(dev, BUS_NOTIFY_REMOVED_DEVICE); 3784 3631 kobject_uevent(&dev->kobj, KOBJ_REMOVE); 3785 3632 glue_dir = get_glue_dir(dev); 3786 3633 kobject_del(&dev->kobj); ··· 3846 3697 * a name. This memory is returned in tmp and needs to be 3847 3698 * freed by the caller. 3848 3699 */ 3849 - const char *device_get_devnode(struct device *dev, 3700 + const char *device_get_devnode(const struct device *dev, 3850 3701 umode_t *mode, kuid_t *uid, kgid_t *gid, 3851 3702 const char **tmp) 3852 3703 {
+22 -18
drivers/base/cpu.c
··· 125 125 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ 126 126 #endif /* CONFIG_HOTPLUG_CPU */ 127 127 128 - struct bus_type cpu_subsys = { 129 - .name = "cpu", 130 - .dev_name = "cpu", 131 - .match = cpu_subsys_match, 132 - #ifdef CONFIG_HOTPLUG_CPU 133 - .online = cpu_subsys_online, 134 - .offline = cpu_subsys_offline, 135 - #endif 136 - }; 137 - EXPORT_SYMBOL_GPL(cpu_subsys); 138 - 139 128 #ifdef CONFIG_KEXEC 140 129 #include <linux/kexec.h> 141 130 ··· 325 336 return len; 326 337 } 327 338 328 - static int cpu_uevent(struct device *dev, struct kobj_uevent_env *env) 339 + static int cpu_uevent(const struct device *dev, struct kobj_uevent_env *env) 329 340 { 330 341 char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL); 331 342 if (buf) { ··· 336 347 return 0; 337 348 } 338 349 #endif 350 + 351 + struct bus_type cpu_subsys = { 352 + .name = "cpu", 353 + .dev_name = "cpu", 354 + .match = cpu_subsys_match, 355 + #ifdef CONFIG_HOTPLUG_CPU 356 + .online = cpu_subsys_online, 357 + .offline = cpu_subsys_offline, 358 + #endif 359 + #ifdef CONFIG_GENERIC_CPU_AUTOPROBE 360 + .uevent = cpu_uevent, 361 + #endif 362 + }; 363 + EXPORT_SYMBOL_GPL(cpu_subsys); 339 364 340 365 /* 341 366 * register_cpu - Setup a sysfs device for a CPU. ··· 371 368 cpu->dev.offline_disabled = !cpu->hotpluggable; 372 369 cpu->dev.offline = !cpu_online(num); 373 370 cpu->dev.of_node = of_get_cpu_node(num, NULL); 374 - #ifdef CONFIG_GENERIC_CPU_AUTOPROBE 375 - cpu->dev.bus->uevent = cpu_uevent; 376 - #endif 377 371 cpu->dev.groups = common_cpu_attr_groups; 378 372 if (cpu->hotpluggable) 379 373 cpu->dev.groups = hotplugable_cpu_attr_groups; ··· 610 610 611 611 static void __init cpu_register_vulnerabilities(void) 612 612 { 613 - if (sysfs_create_group(&cpu_subsys.dev_root->kobj, 614 - &cpu_root_vulnerabilities_group)) 615 - pr_err("Unable to register CPU vulnerabilities\n"); 613 + struct device *dev = bus_get_dev_root(&cpu_subsys); 614 + 615 + if (dev) { 616 + if (sysfs_create_group(&dev->kobj, &cpu_root_vulnerabilities_group)) 617 + pr_err("Unable to register CPU vulnerabilities\n"); 618 + put_device(dev); 619 + } 616 620 } 617 621 618 622 #else
+10 -26
drivers/base/dd.c
··· 257 257 DEFINE_SHOW_ATTRIBUTE(deferred_devs); 258 258 259 259 #ifdef CONFIG_MODULES 260 - int driver_deferred_probe_timeout = 10; 260 + static int driver_deferred_probe_timeout = 10; 261 261 #else 262 - int driver_deferred_probe_timeout; 262 + static int driver_deferred_probe_timeout; 263 263 #endif 264 - 265 - EXPORT_SYMBOL_GPL(driver_deferred_probe_timeout); 266 264 267 265 static int __init deferred_probe_timeout_setup(char *str) 268 266 { ··· 370 372 371 373 static void __exit deferred_probe_exit(void) 372 374 { 373 - debugfs_remove_recursive(debugfs_lookup("devices_deferred", NULL)); 375 + debugfs_lookup_and_remove("devices_deferred", NULL); 374 376 } 375 377 __exitcall(deferred_probe_exit); 376 378 ··· 411 413 driver_deferred_probe_del(dev); 412 414 driver_deferred_probe_trigger(); 413 415 414 - if (dev->bus) 415 - blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 416 - BUS_NOTIFY_BOUND_DRIVER, dev); 417 - 416 + bus_notify(dev, BUS_NOTIFY_BOUND_DRIVER); 418 417 kobject_uevent(&dev->kobj, KOBJ_BIND); 419 418 } 420 419 ··· 430 435 { 431 436 int ret; 432 437 433 - if (dev->bus) 434 - blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 435 - BUS_NOTIFY_BIND_DRIVER, dev); 438 + bus_notify(dev, BUS_NOTIFY_BIND_DRIVER); 436 439 437 440 ret = sysfs_create_link(&dev->driver->p->kobj, &dev->kobj, 438 441 kobject_name(&dev->kobj)); ··· 495 502 device_links_force_bind(dev); 496 503 driver_bound(dev); 497 504 } 498 - else if (dev->bus) 499 - blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 500 - BUS_NOTIFY_DRIVER_NOT_BOUND, dev); 505 + else 506 + bus_notify(dev, BUS_NOTIFY_DRIVER_NOT_BOUND); 501 507 return ret; 502 508 } 503 509 EXPORT_SYMBOL_GPL(device_bind_driver); ··· 687 695 probe_failed: 688 696 driver_sysfs_remove(dev); 689 697 sysfs_failed: 690 - if (dev->bus) 691 - blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 692 - BUS_NOTIFY_DRIVER_NOT_BOUND, dev); 698 + bus_notify(dev, BUS_NOTIFY_DRIVER_NOT_BOUND); 693 699 if (dev->bus && dev->bus->dma_cleanup) 694 700 dev->bus->dma_cleanup(dev); 695 701 pinctrl_bind_failed: ··· 1233 1243 1234 1244 driver_sysfs_remove(dev); 1235 1245 1236 - if (dev->bus) 1237 - blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 1238 - BUS_NOTIFY_UNBIND_DRIVER, 1239 - dev); 1246 + bus_notify(dev, BUS_NOTIFY_UNBIND_DRIVER); 1240 1247 1241 1248 pm_runtime_put_sync(dev); 1242 1249 ··· 1247 1260 1248 1261 klist_remove(&dev->p->knode_driver); 1249 1262 device_pm_check_callbacks(dev); 1250 - if (dev->bus) 1251 - blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 1252 - BUS_NOTIFY_UNBOUND_DRIVER, 1253 - dev); 1254 1263 1264 + bus_notify(dev, BUS_NOTIFY_UNBOUND_DRIVER); 1255 1265 kobject_uevent(&dev->kobj, KOBJ_UNBIND); 1256 1266 } 1257 1267 }
+8 -8
drivers/base/devtmpfs.c
··· 13 13 * overwrite the default setting if needed. 14 14 */ 15 15 16 + #define pr_fmt(fmt) "devtmpfs: " fmt 17 + 16 18 #include <linux/kernel.h> 17 19 #include <linux/syscalls.h> 18 20 #include <linux/mount.h> ··· 378 376 379 377 err = init_mount("devtmpfs", "dev", "devtmpfs", DEVTMPFS_MFLAGS, NULL); 380 378 if (err) 381 - printk(KERN_INFO "devtmpfs: error mounting %i\n", err); 379 + pr_info("error mounting %d\n", err); 382 380 else 383 - printk(KERN_INFO "devtmpfs: mounted\n"); 381 + pr_info("mounted\n"); 384 382 return err; 385 383 } 386 384 ··· 462 460 463 461 mnt = vfs_kern_mount(&internal_fs_type, 0, "devtmpfs", opts); 464 462 if (IS_ERR(mnt)) { 465 - printk(KERN_ERR "devtmpfs: unable to create devtmpfs %ld\n", 466 - PTR_ERR(mnt)); 463 + pr_err("unable to create devtmpfs %ld\n", PTR_ERR(mnt)); 467 464 return PTR_ERR(mnt); 468 465 } 469 466 err = register_filesystem(&dev_fs_type); 470 467 if (err) { 471 - printk(KERN_ERR "devtmpfs: unable to register devtmpfs " 472 - "type %i\n", err); 468 + pr_err("unable to register devtmpfs type %d\n", err); 473 469 return err; 474 470 } 475 471 ··· 480 480 } 481 481 482 482 if (err) { 483 - printk(KERN_ERR "devtmpfs: unable to create devtmpfs %i\n", err); 483 + pr_err("unable to create devtmpfs %d\n", err); 484 484 unregister_filesystem(&dev_fs_type); 485 485 thread = NULL; 486 486 return err; 487 487 } 488 488 489 - printk(KERN_INFO "devtmpfs: initialized\n"); 489 + pr_info("initialized\n"); 490 490 return 0; 491 491 }
+1 -28
drivers/base/driver.c
··· 224 224 int ret; 225 225 struct device_driver *other; 226 226 227 - if (!drv->bus->p) { 227 + if (!bus_is_registered(drv->bus)) { 228 228 pr_err("Driver '%s' was unable to register with bus_type '%s' because the bus was not initialized.\n", 229 229 drv->name, drv->bus->name); 230 230 return -EINVAL; ··· 274 274 bus_remove_driver(drv); 275 275 } 276 276 EXPORT_SYMBOL_GPL(driver_unregister); 277 - 278 - /** 279 - * driver_find - locate driver on a bus by its name. 280 - * @name: name of the driver. 281 - * @bus: bus to scan for the driver. 282 - * 283 - * Call kset_find_obj() to iterate over list of drivers on 284 - * a bus to find driver by name. Return driver if found. 285 - * 286 - * This routine provides no locking to prevent the driver it returns 287 - * from being unregistered or unloaded while the caller is using it. 288 - * The caller is responsible for preventing this. 289 - */ 290 - struct device_driver *driver_find(const char *name, struct bus_type *bus) 291 - { 292 - struct kobject *k = kset_find_obj(bus->p->drivers_kset, name); 293 - struct driver_private *priv; 294 - 295 - if (k) { 296 - /* Drop reference added by kset_find_obj() */ 297 - kobject_put(k); 298 - priv = to_driver(k); 299 - return priv->driver; 300 - } 301 - return NULL; 302 - } 303 - EXPORT_SYMBOL_GPL(driver_find);
+2 -7
drivers/base/memory.c
··· 115 115 } 116 116 EXPORT_SYMBOL_GPL(memory_block_size_bytes); 117 117 118 - /* 119 - * Show the first physical section index (number) of this memory block. 120 - */ 118 + /* Show the memory block ID, relative to the memory block size */ 121 119 static ssize_t phys_index_show(struct device *dev, 122 120 struct device_attribute *attr, char *buf) 123 121 { 124 122 struct memory_block *mem = to_memory_block(dev); 125 - unsigned long phys_index; 126 123 127 - phys_index = mem->start_section_nr / sections_per_block; 128 - 129 - return sysfs_emit(buf, "%08lx\n", phys_index); 124 + return sysfs_emit(buf, "%08lx\n", memory_block_id(mem->start_section_nr)); 130 125 } 131 126 132 127 /*
+4 -1
drivers/base/physical_location.c
··· 24 24 25 25 dev->physical_location = 26 26 kzalloc(sizeof(*dev->physical_location), GFP_KERNEL); 27 - if (!dev->physical_location) 27 + if (!dev->physical_location) { 28 + ACPI_FREE(pld); 28 29 return false; 30 + } 31 + 29 32 dev->physical_location->panel = pld->panel; 30 33 dev->physical_location->vertical_position = pld->vertical_position; 31 34 dev->physical_location->horizontal_position = pld->horizontal_position;
+27 -21
drivers/base/platform.c
··· 441 441 struct resource *r; 442 442 int ret; 443 443 444 - if (!dev->dev.of_node || IS_ENABLED(CONFIG_OF_IRQ)) { 445 - ret = fwnode_irq_get_byname(dev_fwnode(&dev->dev), name); 446 - if (ret > 0 || ret == -EPROBE_DEFER) 447 - return ret; 448 - } 444 + ret = fwnode_irq_get_byname(dev_fwnode(&dev->dev), name); 445 + if (ret > 0 || ret == -EPROBE_DEFER) 446 + return ret; 449 447 450 448 r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name); 451 449 if (r) { ··· 497 499 * platform_add_devices - add a numbers of platform devices 498 500 * @devs: array of platform devices to add 499 501 * @num: number of platform devices in array 502 + * 503 + * Return: 0 on success, negative error number on failure. 500 504 */ 501 505 int platform_add_devices(struct platform_device **devs, int num) 502 506 { ··· 883 883 return -ENXIO; 884 884 } 885 885 886 + static int is_bound_to_driver(struct device *dev, void *driver) 887 + { 888 + if (dev->driver == driver) 889 + return 1; 890 + return 0; 891 + } 892 + 886 893 /** 887 894 * __platform_driver_probe - register driver for non-hotpluggable device 888 895 * @drv: platform driver structure ··· 913 906 int __init_or_module __platform_driver_probe(struct platform_driver *drv, 914 907 int (*probe)(struct platform_device *), struct module *module) 915 908 { 916 - int retval, code; 909 + int retval; 917 910 918 911 if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) { 919 912 pr_err("%s: drivers registered with %s can not be probed asynchronously\n", ··· 939 932 940 933 /* temporary section violation during probe() */ 941 934 drv->probe = probe; 942 - retval = code = __platform_driver_register(drv, module); 935 + retval = __platform_driver_register(drv, module); 943 936 if (retval) 944 937 return retval; 945 938 946 - /* 947 - * Fixup that section violation, being paranoid about code scanning 948 - * the list of drivers in order to probe new devices. Check to see 949 - * if the probe was successful, and make sure any forced probes of 950 - * new devices fail. 951 - */ 952 - spin_lock(&drv->driver.bus->p->klist_drivers.k_lock); 939 + /* Force all new probes of this driver to fail */ 953 940 drv->probe = platform_probe_fail; 954 - if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list)) 955 - retval = -ENODEV; 956 - spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock); 957 941 958 - if (code != retval) 942 + /* Walk all platform devices and see if any actually bound to this driver. 943 + * If not, return an error as the device should have done so by now. 944 + */ 945 + if (!bus_for_each_dev(&platform_bus_type, NULL, &drv->driver, is_bound_to_driver)) { 946 + retval = -ENODEV; 959 947 platform_driver_unregister(drv); 948 + } 949 + 960 950 return retval; 961 951 } 962 952 EXPORT_SYMBOL_GPL(__platform_driver_probe); ··· 1357 1353 return (strcmp(pdev->name, drv->name) == 0); 1358 1354 } 1359 1355 1360 - static int platform_uevent(struct device *dev, struct kobj_uevent_env *env) 1356 + static int platform_uevent(const struct device *dev, struct kobj_uevent_env *env) 1361 1357 { 1362 - struct platform_device *pdev = to_platform_device(dev); 1358 + const struct platform_device *pdev = to_platform_device(dev); 1363 1359 int rc; 1364 1360 1365 1361 /* Some devices have extra OF data and an OF-style MODALIAS */ ··· 1420 1416 struct platform_driver *drv = to_platform_driver(_dev->driver); 1421 1417 struct platform_device *dev = to_platform_device(_dev); 1422 1418 1423 - if (drv->remove) { 1419 + if (drv->remove_new) { 1420 + drv->remove_new(dev); 1421 + } else if (drv->remove) { 1424 1422 int ret = drv->remove(dev); 1425 1423 1426 1424 if (ret)
+3 -1
drivers/base/soc.c
··· 30 30 static struct bus_type soc_bus_type = { 31 31 .name = "soc", 32 32 }; 33 + static bool soc_bus_registered; 33 34 34 35 static DEVICE_ATTR(machine, 0444, soc_info_show, NULL); 35 36 static DEVICE_ATTR(family, 0444, soc_info_show, NULL); ··· 118 117 const struct attribute_group **soc_attr_groups; 119 118 int ret; 120 119 121 - if (!soc_bus_type.p) { 120 + if (!soc_bus_registered) { 122 121 if (early_soc_dev_attr) 123 122 return ERR_PTR(-EBUSY); 124 123 early_soc_dev_attr = soc_dev_attr; ··· 184 183 ret = bus_register(&soc_bus_type); 185 184 if (ret) 186 185 return ret; 186 + soc_bus_registered = true; 187 187 188 188 if (early_soc_dev_attr) 189 189 return PTR_ERR(soc_device_register(early_soc_dev_attr));
+1 -62
drivers/base/swnode.c
··· 760 760 kfree(swnode); 761 761 } 762 762 763 - static struct kobj_type software_node_type = { 763 + static const struct kobj_type software_node_type = { 764 764 .release = software_node_release, 765 765 .sysfs_ops = &kobj_sysfs_ops, 766 766 }; ··· 818 818 kobject_uevent(&swnode->kobj, KOBJ_ADD); 819 819 return &swnode->fwnode; 820 820 } 821 - 822 - /** 823 - * software_node_register_nodes - Register an array of software nodes 824 - * @nodes: Zero terminated array of software nodes to be registered 825 - * 826 - * Register multiple software nodes at once. If any node in the array 827 - * has its .parent pointer set (which can only be to another software_node), 828 - * then its parent **must** have been registered before it is; either outside 829 - * of this function or by ordering the array such that parent comes before 830 - * child. 831 - */ 832 - int software_node_register_nodes(const struct software_node *nodes) 833 - { 834 - int ret; 835 - int i; 836 - 837 - for (i = 0; nodes[i].name; i++) { 838 - const struct software_node *parent = nodes[i].parent; 839 - 840 - if (parent && !software_node_to_swnode(parent)) { 841 - ret = -EINVAL; 842 - goto err_unregister_nodes; 843 - } 844 - 845 - ret = software_node_register(&nodes[i]); 846 - if (ret) 847 - goto err_unregister_nodes; 848 - } 849 - 850 - return 0; 851 - 852 - err_unregister_nodes: 853 - software_node_unregister_nodes(nodes); 854 - return ret; 855 - } 856 - EXPORT_SYMBOL_GPL(software_node_register_nodes); 857 - 858 - /** 859 - * software_node_unregister_nodes - Unregister an array of software nodes 860 - * @nodes: Zero terminated array of software nodes to be unregistered 861 - * 862 - * Unregister multiple software nodes at once. If parent pointers are set up 863 - * in any of the software nodes then the array **must** be ordered such that 864 - * parents come before their children. 865 - * 866 - * NOTE: If you are uncertain whether the array is ordered such that 867 - * parents will be unregistered before their children, it is wiser to 868 - * remove the nodes individually, in the correct order (child before 869 - * parent). 870 - */ 871 - void software_node_unregister_nodes(const struct software_node *nodes) 872 - { 873 - unsigned int i = 0; 874 - 875 - while (nodes[i].name) 876 - i++; 877 - 878 - while (i--) 879 - software_node_unregister(&nodes[i]); 880 - } 881 - EXPORT_SYMBOL_GPL(software_node_unregister_nodes); 882 821 883 822 /** 884 823 * software_node_register_node_group - Register a group of software nodes
+14 -16
drivers/base/test/property-entry-test.c
··· 405 405 /* Handling of reference properties */ 406 406 static void pe_test_reference(struct kunit *test) 407 407 { 408 - static const struct software_node nodes[] = { 409 - { .name = "1", }, 410 - { .name = "2", }, 411 - { } 412 - }; 408 + static const struct software_node node1 = { .name = "1" }; 409 + static const struct software_node node2 = { .name = "2" }; 410 + static const struct software_node *group[] = { &node1, &node2, NULL }; 413 411 414 412 static const struct software_node_ref_args refs[] = { 415 - SOFTWARE_NODE_REFERENCE(&nodes[0]), 416 - SOFTWARE_NODE_REFERENCE(&nodes[1], 3, 4), 413 + SOFTWARE_NODE_REFERENCE(&node1), 414 + SOFTWARE_NODE_REFERENCE(&node2, 3, 4), 417 415 }; 418 416 419 417 const struct property_entry entries[] = { 420 - PROPERTY_ENTRY_REF("ref-1", &nodes[0]), 421 - PROPERTY_ENTRY_REF("ref-2", &nodes[1], 1, 2), 418 + PROPERTY_ENTRY_REF("ref-1", &node1), 419 + PROPERTY_ENTRY_REF("ref-2", &node2, 1, 2), 422 420 PROPERTY_ENTRY_REF_ARRAY("ref-3", refs), 423 421 { } 424 422 }; ··· 425 427 struct fwnode_reference_args ref; 426 428 int error; 427 429 428 - error = software_node_register_nodes(nodes); 430 + error = software_node_register_node_group(group); 429 431 KUNIT_ASSERT_EQ(test, error, 0); 430 432 431 433 node = fwnode_create_software_node(entries, NULL); ··· 434 436 error = fwnode_property_get_reference_args(node, "ref-1", NULL, 435 437 0, 0, &ref); 436 438 KUNIT_ASSERT_EQ(test, error, 0); 437 - KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[0]); 439 + KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &node1); 438 440 KUNIT_EXPECT_EQ(test, ref.nargs, 0U); 439 441 440 442 /* wrong index */ ··· 445 447 error = fwnode_property_get_reference_args(node, "ref-2", NULL, 446 448 1, 0, &ref); 447 449 KUNIT_ASSERT_EQ(test, error, 0); 448 - KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]); 450 + KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &node2); 449 451 KUNIT_EXPECT_EQ(test, ref.nargs, 1U); 450 452 KUNIT_EXPECT_EQ(test, ref.args[0], 1LLU); 451 453 ··· 453 455 error = fwnode_property_get_reference_args(node, "ref-2", NULL, 454 456 3, 0, &ref); 455 457 KUNIT_ASSERT_EQ(test, error, 0); 456 - KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]); 458 + KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &node2); 457 459 KUNIT_EXPECT_EQ(test, ref.nargs, 3U); 458 460 KUNIT_EXPECT_EQ(test, ref.args[0], 1LLU); 459 461 KUNIT_EXPECT_EQ(test, ref.args[1], 2LLU); ··· 468 470 error = fwnode_property_get_reference_args(node, "ref-3", NULL, 469 471 0, 0, &ref); 470 472 KUNIT_ASSERT_EQ(test, error, 0); 471 - KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[0]); 473 + KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &node1); 472 474 KUNIT_EXPECT_EQ(test, ref.nargs, 0U); 473 475 474 476 /* second reference in the array */ 475 477 error = fwnode_property_get_reference_args(node, "ref-3", NULL, 476 478 2, 1, &ref); 477 479 KUNIT_ASSERT_EQ(test, error, 0); 478 - KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]); 480 + KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &node2); 479 481 KUNIT_EXPECT_EQ(test, ref.nargs, 2U); 480 482 KUNIT_EXPECT_EQ(test, ref.args[0], 3LLU); 481 483 KUNIT_EXPECT_EQ(test, ref.args[1], 4LLU); ··· 486 488 KUNIT_EXPECT_NE(test, error, 0); 487 489 488 490 fwnode_remove_software_node(node); 489 - software_node_unregister_nodes(nodes); 491 + software_node_unregister_node_group(group); 490 492 } 491 493 492 494 static struct kunit_case property_entry_test_cases[] = {
+16 -1
drivers/base/transport_class.c
··· 155 155 struct device *dev, 156 156 struct device *classdev) 157 157 { 158 + struct transport_class *tclass = class_to_transport_class(cont->class); 158 159 int error = attribute_container_add_class_device(classdev); 159 160 struct transport_container *tcont = 160 161 attribute_container_to_transport_container(cont); 161 162 162 - if (!error && tcont->statistics) 163 + if (error) 164 + goto err_remove; 165 + 166 + if (tcont->statistics) { 163 167 error = sysfs_create_group(&classdev->kobj, tcont->statistics); 168 + if (error) 169 + goto err_del; 170 + } 171 + 172 + return 0; 173 + 174 + err_del: 175 + attribute_container_class_device_del(classdev); 176 + err_remove: 177 + if (tclass->remove) 178 + tclass->remove(tcont, dev, classdev); 164 179 165 180 return error; 166 181 }
+3 -3
drivers/bcma/main.c
··· 28 28 static int bcma_bus_match(struct device *dev, struct device_driver *drv); 29 29 static int bcma_device_probe(struct device *dev); 30 30 static void bcma_device_remove(struct device *dev); 31 - static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env); 31 + static int bcma_device_uevent(const struct device *dev, struct kobj_uevent_env *env); 32 32 33 33 static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf) 34 34 { ··· 627 627 put_device(dev); 628 628 } 629 629 630 - static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env) 630 + static int bcma_device_uevent(const struct device *dev, struct kobj_uevent_env *env) 631 631 { 632 - struct bcma_device *core = container_of(dev, struct bcma_device, dev); 632 + const struct bcma_device *core = container_of_const(dev, struct bcma_device, dev); 633 633 634 634 return add_uevent_var(env, 635 635 "MODALIAS=bcma:m%04Xid%04Xrev%02Xcl%02X",
+2 -2
drivers/bus/fsl-mc/fsl-mc-bus.c
··· 124 124 /* 125 125 * fsl_mc_bus_uevent - callback invoked when a device is added 126 126 */ 127 - static int fsl_mc_bus_uevent(struct device *dev, struct kobj_uevent_env *env) 127 + static int fsl_mc_bus_uevent(const struct device *dev, struct kobj_uevent_env *env) 128 128 { 129 - struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); 129 + const struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); 130 130 131 131 if (add_uevent_var(env, "MODALIAS=fsl-mc:v%08Xd%s", 132 132 mc_dev->obj_desc.vendor,
+2 -2
drivers/bus/mhi/ep/main.c
··· 1550 1550 } 1551 1551 EXPORT_SYMBOL_GPL(mhi_ep_driver_unregister); 1552 1552 1553 - static int mhi_ep_uevent(struct device *dev, struct kobj_uevent_env *env) 1553 + static int mhi_ep_uevent(const struct device *dev, struct kobj_uevent_env *env) 1554 1554 { 1555 - struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); 1555 + const struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); 1556 1556 1557 1557 return add_uevent_var(env, "MODALIAS=" MHI_EP_DEVICE_MODALIAS_FMT, 1558 1558 mhi_dev->name);
+2 -2
drivers/bus/mhi/host/init.c
··· 1395 1395 } 1396 1396 EXPORT_SYMBOL_GPL(mhi_driver_unregister); 1397 1397 1398 - static int mhi_uevent(struct device *dev, struct kobj_uevent_env *env) 1398 + static int mhi_uevent(const struct device *dev, struct kobj_uevent_env *env) 1399 1399 { 1400 - struct mhi_device *mhi_dev = to_mhi_device(dev); 1400 + const struct mhi_device *mhi_dev = to_mhi_device(dev); 1401 1401 1402 1402 return add_uevent_var(env, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT, 1403 1403 mhi_dev->name);
+2 -2
drivers/bus/mips_cdmm.c
··· 67 67 return mips_cdmm_lookup(cdrv->id_table, cdev) != NULL; 68 68 } 69 69 70 - static int mips_cdmm_uevent(struct device *dev, struct kobj_uevent_env *env) 70 + static int mips_cdmm_uevent(const struct device *dev, struct kobj_uevent_env *env) 71 71 { 72 - struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev); 72 + const struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev); 73 73 int retval = 0; 74 74 75 75 retval = add_uevent_var(env, "CDMM_CPU=%u", cdev->cpu);
+6 -1
drivers/bus/sunxi-rsb.c
··· 172 172 drv->remove(to_sunxi_rsb_device(dev)); 173 173 } 174 174 175 + static int sunxi_rsb_device_modalias(const struct device *dev, struct kobj_uevent_env *env) 176 + { 177 + return of_device_uevent_modalias(dev, env); 178 + } 179 + 175 180 static struct bus_type sunxi_rsb_bus = { 176 181 .name = RSB_CTRL_NAME, 177 182 .match = sunxi_rsb_device_match, 178 183 .probe = sunxi_rsb_device_probe, 179 184 .remove = sunxi_rsb_device_remove, 180 - .uevent = of_device_uevent_modalias, 185 + .uevent = sunxi_rsb_device_modalias, 181 186 }; 182 187 183 188 static void sunxi_rsb_dev_release(struct device *dev)
+2 -2
drivers/cxl/core/memdev.c
··· 27 27 kfree(cxlmd); 28 28 } 29 29 30 - static char *cxl_memdev_devnode(struct device *dev, umode_t *mode, kuid_t *uid, 30 + static char *cxl_memdev_devnode(const struct device *dev, umode_t *mode, kuid_t *uid, 31 31 kgid_t *gid) 32 32 { 33 33 return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev)); ··· 162 162 .groups = cxl_memdev_attribute_groups, 163 163 }; 164 164 165 - bool is_cxl_memdev(struct device *dev) 165 + bool is_cxl_memdev(const struct device *dev) 166 166 { 167 167 return dev->type == &cxl_memdev_type; 168 168 }
+4 -4
drivers/cxl/core/port.c
··· 38 38 } 39 39 static DEVICE_ATTR_RO(devtype); 40 40 41 - static int cxl_device_id(struct device *dev) 41 + static int cxl_device_id(const struct device *dev) 42 42 { 43 43 if (dev->type == &cxl_nvdimm_bridge_type) 44 44 return CXL_DEVICE_NVDIMM_BRIDGE; ··· 523 523 .groups = cxl_port_attribute_groups, 524 524 }; 525 525 526 - bool is_cxl_port(struct device *dev) 526 + bool is_cxl_port(const struct device *dev) 527 527 { 528 528 return dev->type == &cxl_port_type; 529 529 } 530 530 EXPORT_SYMBOL_NS_GPL(is_cxl_port, CXL); 531 531 532 - struct cxl_port *to_cxl_port(struct device *dev) 532 + struct cxl_port *to_cxl_port(const struct device *dev) 533 533 { 534 534 if (dev_WARN_ONCE(dev, dev->type != &cxl_port_type, 535 535 "not a cxl_port device\n")) ··· 1826 1826 } 1827 1827 EXPORT_SYMBOL_NS_GPL(cxl_driver_unregister, CXL); 1828 1828 1829 - static int cxl_bus_uevent(struct device *dev, struct kobj_uevent_env *env) 1829 + static int cxl_bus_uevent(const struct device *dev, struct kobj_uevent_env *env) 1830 1830 { 1831 1831 return add_uevent_var(env, "MODALIAS=" CXL_MODALIAS_FMT, 1832 1832 cxl_device_id(dev));
+2 -2
drivers/cxl/cxl.h
··· 588 588 return port->uport == port->dev.parent; 589 589 } 590 590 591 - bool is_cxl_port(struct device *dev); 592 - struct cxl_port *to_cxl_port(struct device *dev); 591 + bool is_cxl_port(const struct device *dev); 592 + struct cxl_port *to_cxl_port(const struct device *dev); 593 593 struct pci_bus; 594 594 int devm_cxl_register_pci_bus(struct device *host, struct device *uport, 595 595 struct pci_bus *bus);
+1 -1
drivers/cxl/cxlmem.h
··· 72 72 return to_cxl_memdev(port->uport); 73 73 } 74 74 75 - bool is_cxl_memdev(struct device *dev); 75 + bool is_cxl_memdev(const struct device *dev); 76 76 static inline bool is_cxl_endpoint(struct cxl_port *port) 77 77 { 78 78 return is_cxl_memdev(port->uport);
+1 -1
drivers/dax/bus.c
··· 18 18 char dev_name[DAX_NAME_LEN]; 19 19 }; 20 20 21 - static int dax_bus_uevent(struct device *dev, struct kobj_uevent_env *env) 21 + static int dax_bus_uevent(const struct device *dev, struct kobj_uevent_env *env) 22 22 { 23 23 /* 24 24 * We only ever expect to handle device-dax instances, i.e. the
+2 -2
drivers/eisa/eisa-bus.c
··· 127 127 return 0; 128 128 } 129 129 130 - static int eisa_bus_uevent(struct device *dev, struct kobj_uevent_env *env) 130 + static int eisa_bus_uevent(const struct device *dev, struct kobj_uevent_env *env) 131 131 { 132 - struct eisa_device *edev = to_eisa_device(dev); 132 + const struct eisa_device *edev = to_eisa_device(dev); 133 133 134 134 add_uevent_var(env, "MODALIAS=" EISA_DEVICE_MODALIAS_FMT, edev->id.sig); 135 135 return 0;
+4 -4
drivers/firewire/core-device.c
··· 133 133 } 134 134 } 135 135 136 - static void get_modalias_ids(struct fw_unit *unit, int *id) 136 + static void get_modalias_ids(const struct fw_unit *unit, int *id) 137 137 { 138 138 get_ids(&fw_parent_device(unit)->config_rom[5], id); 139 139 get_ids(unit->directory, id); ··· 195 195 driver->remove(fw_unit(dev)); 196 196 } 197 197 198 - static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size) 198 + static int get_modalias(const struct fw_unit *unit, char *buffer, size_t buffer_size) 199 199 { 200 200 int id[] = {0, 0, 0, 0}; 201 201 ··· 206 206 id[0], id[1], id[2], id[3]); 207 207 } 208 208 209 - static int fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env) 209 + static int fw_unit_uevent(const struct device *dev, struct kobj_uevent_env *env) 210 210 { 211 - struct fw_unit *unit = fw_unit(dev); 211 + const struct fw_unit *unit = fw_unit(dev); 212 212 char modalias[64]; 213 213 214 214 get_modalias(unit, modalias, sizeof(modalias));
+2 -2
drivers/firmware/arm_ffa/bus.c
··· 56 56 ffa_drv->remove(to_ffa_dev(dev)); 57 57 } 58 58 59 - static int ffa_device_uevent(struct device *dev, struct kobj_uevent_env *env) 59 + static int ffa_device_uevent(const struct device *dev, struct kobj_uevent_env *env) 60 60 { 61 - struct ffa_device *ffa_dev = to_ffa_dev(dev); 61 + const struct ffa_device *ffa_dev = to_ffa_dev(dev); 62 62 63 63 return add_uevent_var(env, "MODALIAS=arm_ffa:%04x:%pUb", 64 64 ffa_dev->vm_id, &ffa_dev->uuid);
+2 -1
drivers/firmware/arm_scmi/bus.c
··· 12 12 #include <linux/kernel.h> 13 13 #include <linux/slab.h> 14 14 #include <linux/device.h> 15 + #include <linux/of.h> 15 16 16 17 #include "common.h" 17 18 ··· 192 191 scmi_dev->id = id; 193 192 scmi_dev->protocol_id = protocol; 194 193 scmi_dev->dev.parent = parent; 195 - scmi_dev->dev.of_node = np; 194 + device_set_node(&scmi_dev->dev, of_fwnode_handle(np)); 196 195 scmi_dev->dev.bus = &scmi_bus_type; 197 196 scmi_dev->dev.release = scmi_device_release; 198 197 dev_set_name(&scmi_dev->dev, "scmi_dev.%d", id);
+2 -2
drivers/fpga/dfl.c
··· 294 294 ddrv->remove(ddev); 295 295 } 296 296 297 - static int dfl_bus_uevent(struct device *dev, struct kobj_uevent_env *env) 297 + static int dfl_bus_uevent(const struct device *dev, struct kobj_uevent_env *env) 298 298 { 299 - struct dfl_device *ddev = to_dfl_dev(dev); 299 + const struct dfl_device *ddev = to_dfl_dev(dev); 300 300 301 301 return add_uevent_var(env, "MODALIAS=dfl:t%04Xf%04X", 302 302 ddev->type, ddev->feature_id);
+3 -3
drivers/fsi/fsi-core.c
··· 897 897 NULL, 898 898 }; 899 899 900 - static char *cfam_devnode(struct device *dev, umode_t *mode, 900 + static char *cfam_devnode(const struct device *dev, umode_t *mode, 901 901 kuid_t *uid, kgid_t *gid) 902 902 { 903 - struct fsi_slave *slave = to_fsi_slave(dev); 903 + const struct fsi_slave *slave = to_fsi_slave(dev); 904 904 905 905 #ifdef CONFIG_FSI_NEW_DEV_NODE 906 906 return kasprintf(GFP_KERNEL, "fsi/cfam%d", slave->cdev_idx); ··· 915 915 .groups = cfam_attr_groups 916 916 }; 917 917 918 - static char *fsi_cdev_devnode(struct device *dev, umode_t *mode, 918 + static char *fsi_cdev_devnode(const struct device *dev, umode_t *mode, 919 919 kuid_t *uid, kgid_t *gid) 920 920 { 921 921 #ifdef CONFIG_FSI_NEW_DEV_NODE
+7
drivers/gpio/gpiolib.c
··· 587 587 { 588 588 int ret; 589 589 590 + /* 591 + * If fwnode doesn't belong to another device, it's safe to clear its 592 + * initialized flag. 593 + */ 594 + if (gdev->dev.fwnode && !gdev->dev.fwnode->dev) 595 + fwnode_dev_initialized(gdev->dev.fwnode, false); 596 + 590 597 ret = gcdev_register(gdev, gpio_devt); 591 598 if (ret) 592 599 return ret;
+6 -1
drivers/gpu/drm/display/drm_dp_aux_bus.c
··· 161 161 kfree(aux_ep_with_data); 162 162 } 163 163 164 + static int dp_aux_ep_dev_modalias(const struct device *dev, struct kobj_uevent_env *env) 165 + { 166 + return of_device_uevent_modalias(dev, env); 167 + } 168 + 164 169 static struct device_type dp_aux_device_type_type = { 165 170 .groups = dp_aux_ep_dev_groups, 166 - .uevent = of_device_uevent_modalias, 171 + .uevent = dp_aux_ep_dev_modalias, 167 172 .release = dp_aux_ep_dev_release, 168 173 }; 169 174
+2 -2
drivers/gpu/drm/drm_mipi_dsi.c
··· 62 62 return 0; 63 63 } 64 64 65 - static int mipi_dsi_uevent(struct device *dev, struct kobj_uevent_env *env) 65 + static int mipi_dsi_uevent(const struct device *dev, struct kobj_uevent_env *env) 66 66 { 67 - struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev); 67 + const struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev); 68 68 int err; 69 69 70 70 err = of_device_uevent_modalias(dev, env);
+1 -1
drivers/gpu/host1x/bus.c
··· 338 338 return strcmp(dev_name(dev), drv->name) == 0; 339 339 } 340 340 341 - static int host1x_device_uevent(struct device *dev, 341 + static int host1x_device_uevent(const struct device *dev, 342 342 struct kobj_uevent_env *env) 343 343 { 344 344 struct device_node *np = dev->parent->of_node;
+7 -7
drivers/greybus/core.c
··· 78 78 return 0; 79 79 } 80 80 81 - static int greybus_uevent(struct device *dev, struct kobj_uevent_env *env) 81 + static int greybus_uevent(const struct device *dev, struct kobj_uevent_env *env) 82 82 { 83 - struct gb_host_device *hd; 84 - struct gb_module *module = NULL; 85 - struct gb_interface *intf = NULL; 86 - struct gb_control *control = NULL; 87 - struct gb_bundle *bundle = NULL; 88 - struct gb_svc *svc = NULL; 83 + const struct gb_host_device *hd; 84 + const struct gb_module *module = NULL; 85 + const struct gb_interface *intf = NULL; 86 + const struct gb_control *control = NULL; 87 + const struct gb_bundle *bundle = NULL; 88 + const struct gb_svc *svc = NULL; 89 89 90 90 if (is_gb_host_device(dev)) { 91 91 hd = to_gb_host_device(dev);
+2 -2
drivers/hid/hid-core.c
··· 2676 2676 }; 2677 2677 __ATTRIBUTE_GROUPS(hid_dev); 2678 2678 2679 - static int hid_uevent(struct device *dev, struct kobj_uevent_env *env) 2679 + static int hid_uevent(const struct device *dev, struct kobj_uevent_env *env) 2680 2680 { 2681 - struct hid_device *hdev = to_hid_device(dev); 2681 + const struct hid_device *hdev = to_hid_device(dev); 2682 2682 2683 2683 if (add_uevent_var(env, "HID_ID=%04X:%08X:%08X", 2684 2684 hdev->bus, hdev->vendor, hdev->product))
+1 -1
drivers/hid/intel-ish-hid/ishtp/bus.c
··· 361 361 }; 362 362 ATTRIBUTE_GROUPS(ishtp_cl_dev); 363 363 364 - static int ishtp_cl_uevent(struct device *dev, struct kobj_uevent_env *env) 364 + static int ishtp_cl_uevent(const struct device *dev, struct kobj_uevent_env *env) 365 365 { 366 366 if (add_uevent_var(env, "MODALIAS=" ISHTP_MODULE_PREFIX "%s", dev_name(dev))) 367 367 return -ENOMEM;
+1 -1
drivers/hsi/hsi_core.c
··· 30 30 }; 31 31 ATTRIBUTE_GROUPS(hsi_bus_dev); 32 32 33 - static int hsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env) 33 + static int hsi_bus_uevent(const struct device *dev, struct kobj_uevent_env *env) 34 34 { 35 35 add_uevent_var(env, "MODALIAS=hsi:%s", dev_name(dev)); 36 36
+2 -2
drivers/hv/vmbus_drv.c
··· 711 711 * representation of the device guid (each byte of the guid will be 712 712 * represented with two hex characters. 713 713 */ 714 - static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env) 714 + static int vmbus_uevent(const struct device *device, struct kobj_uevent_env *env) 715 715 { 716 - struct hv_device *dev = device_to_hv_device(device); 716 + const struct hv_device *dev = device_to_hv_device(device); 717 717 const char *format = "MODALIAS=vmbus:%*phN"; 718 718 719 719 return add_uevent_var(env, format, UUID_SIZE, &dev->dev_type);
+3 -3
drivers/hwtracing/intel_th/core.c
··· 185 185 .release = intel_th_device_release, 186 186 }; 187 187 188 - static char *intel_th_output_devnode(struct device *dev, umode_t *mode, 188 + static char *intel_th_output_devnode(const struct device *dev, umode_t *mode, 189 189 kuid_t *uid, kgid_t *gid) 190 190 { 191 - struct intel_th_device *thdev = to_intel_th_device(dev); 192 - struct intel_th *th = to_intel_th(thdev); 191 + const struct intel_th_device *thdev = to_intel_th_device(dev); 192 + const struct intel_th *th = to_intel_th(thdev); 193 193 char *node; 194 194 195 195 if (thdev->id >= 0)
+2 -2
drivers/hwtracing/intel_th/intel_th.h
··· 205 205 * INTEL_TH_SWITCH and INTEL_TH_SOURCE are children of the intel_th device. 206 206 */ 207 207 static inline struct intel_th_device * 208 - to_intel_th_parent(struct intel_th_device *thdev) 208 + to_intel_th_parent(const struct intel_th_device *thdev) 209 209 { 210 210 struct device *parent = thdev->dev.parent; 211 211 ··· 215 215 return to_intel_th_device(parent); 216 216 } 217 217 218 - static inline struct intel_th *to_intel_th(struct intel_th_device *thdev) 218 + static inline struct intel_th *to_intel_th(const struct intel_th_device *thdev) 219 219 { 220 220 if (thdev->type == INTEL_TH_OUTPUT) 221 221 thdev = to_intel_th_parent(thdev);
+2 -2
drivers/i2c/i2c-core-base.c
··· 136 136 return 0; 137 137 } 138 138 139 - static int i2c_device_uevent(struct device *dev, struct kobj_uevent_env *env) 139 + static int i2c_device_uevent(const struct device *dev, struct kobj_uevent_env *env) 140 140 { 141 - struct i2c_client *client = to_i2c_client(dev); 141 + const struct i2c_client *client = to_i2c_client(dev); 142 142 int rc; 143 143 144 144 rc = of_device_uevent_modalias(dev, env);
+1 -13
drivers/i3c/device.c
··· 78 78 * 79 79 * Retrieve I3C dev info. 80 80 */ 81 - void i3c_device_get_info(struct i3c_device *dev, 81 + void i3c_device_get_info(const struct i3c_device *dev, 82 82 struct i3c_device_info *info) 83 83 { 84 84 if (!info) ··· 207 207 return &i3cdev->dev; 208 208 } 209 209 EXPORT_SYMBOL_GPL(i3cdev_to_dev); 210 - 211 - /** 212 - * dev_to_i3cdev() - Returns the I3C device containing @dev 213 - * @dev: device object 214 - * 215 - * Return: a pointer to an I3C device object. 216 - */ 217 - struct i3c_device *dev_to_i3cdev(struct device *dev) 218 - { 219 - return container_of(dev, struct i3c_device, dev); 220 - } 221 - EXPORT_SYMBOL_GPL(dev_to_i3cdev); 222 210 223 211 /** 224 212 * i3c_device_match_id() - Returns the i3c_device_id entry matching @i3cdev
+2 -2
drivers/i3c/master.c
··· 273 273 }; 274 274 ATTRIBUTE_GROUPS(i3c_device); 275 275 276 - static int i3c_device_uevent(struct device *dev, struct kobj_uevent_env *env) 276 + static int i3c_device_uevent(const struct device *dev, struct kobj_uevent_env *env) 277 277 { 278 - struct i3c_device *i3cdev = dev_to_i3cdev(dev); 278 + const struct i3c_device *i3cdev = dev_to_i3cdev(dev); 279 279 struct i3c_device_info devinfo; 280 280 u16 manuf, part, ext; 281 281
+8 -8
drivers/input/input.c
··· 1372 1372 INPUT_DEV_STRING_ATTR_SHOW(uniq); 1373 1373 1374 1374 static int input_print_modalias_bits(char *buf, int size, 1375 - char name, unsigned long *bm, 1375 + char name, const unsigned long *bm, 1376 1376 unsigned int min_bit, unsigned int max_bit) 1377 1377 { 1378 1378 int len = 0, i; ··· 1384 1384 return len; 1385 1385 } 1386 1386 1387 - static int input_print_modalias(char *buf, int size, struct input_dev *id, 1387 + static int input_print_modalias(char *buf, int size, const struct input_dev *id, 1388 1388 int add_cr) 1389 1389 { 1390 1390 int len; ··· 1432 1432 } 1433 1433 static DEVICE_ATTR(modalias, S_IRUGO, input_dev_show_modalias, NULL); 1434 1434 1435 - static int input_print_bitmap(char *buf, int buf_size, unsigned long *bitmap, 1435 + static int input_print_bitmap(char *buf, int buf_size, const unsigned long *bitmap, 1436 1436 int max, int add_cr); 1437 1437 1438 1438 static ssize_t input_dev_show_properties(struct device *dev, ··· 1524 1524 .attrs = input_dev_id_attrs, 1525 1525 }; 1526 1526 1527 - static int input_print_bitmap(char *buf, int buf_size, unsigned long *bitmap, 1527 + static int input_print_bitmap(char *buf, int buf_size, const unsigned long *bitmap, 1528 1528 int max, int add_cr) 1529 1529 { 1530 1530 int i; ··· 1621 1621 * device bitfields. 1622 1622 */ 1623 1623 static int input_add_uevent_bm_var(struct kobj_uevent_env *env, 1624 - const char *name, unsigned long *bitmap, int max) 1624 + const char *name, const unsigned long *bitmap, int max) 1625 1625 { 1626 1626 int len; 1627 1627 ··· 1639 1639 } 1640 1640 1641 1641 static int input_add_uevent_modalias_var(struct kobj_uevent_env *env, 1642 - struct input_dev *dev) 1642 + const struct input_dev *dev) 1643 1643 { 1644 1644 int len; 1645 1645 ··· 1677 1677 return err; \ 1678 1678 } while (0) 1679 1679 1680 - static int input_dev_uevent(struct device *device, struct kobj_uevent_env *env) 1680 + static int input_dev_uevent(const struct device *device, struct kobj_uevent_env *env) 1681 1681 { 1682 - struct input_dev *dev = to_input_dev(device); 1682 + const struct input_dev *dev = to_input_dev(device); 1683 1683 1684 1684 INPUT_ADD_HOTPLUG_VAR("PRODUCT=%x/%x/%x/%x", 1685 1685 dev->id.bustype, dev->id.vendor,
+2 -2
drivers/input/serio/serio.c
··· 895 895 return err; \ 896 896 } while (0) 897 897 898 - static int serio_uevent(struct device *dev, struct kobj_uevent_env *env) 898 + static int serio_uevent(const struct device *dev, struct kobj_uevent_env *env) 899 899 { 900 - struct serio *serio; 900 + const struct serio *serio; 901 901 902 902 if (!dev) 903 903 return -ENODEV;
+2 -2
drivers/ipack/ipack.c
··· 76 76 drv->ops->remove(dev); 77 77 } 78 78 79 - static int ipack_uevent(struct device *dev, struct kobj_uevent_env *env) 79 + static int ipack_uevent(const struct device *dev, struct kobj_uevent_env *env) 80 80 { 81 - struct ipack_device *idev; 81 + const struct ipack_device *idev; 82 82 83 83 if (!dev) 84 84 return -ENODEV;
+1
drivers/irqchip/irq-imx-gpcv2.c
··· 283 283 * later the GPC power domain driver will not be skipped. 284 284 */ 285 285 of_node_clear_flag(node, OF_POPULATED); 286 + fwnode_dev_initialized(domain->fwnode, false); 286 287 return 0; 287 288 } 288 289
+6 -1
drivers/macintosh/macio_asic.c
··· 128 128 return 0; 129 129 } 130 130 131 + static int macio_device_modalias(const struct device *dev, struct kobj_uevent_env *env) 132 + { 133 + return of_device_uevent_modalias(dev, env); 134 + } 135 + 131 136 extern const struct attribute_group *macio_dev_groups[]; 132 137 133 138 struct bus_type macio_bus_type = { 134 139 .name = "macio", 135 140 .match = macio_bus_match, 136 - .uevent = of_device_uevent_modalias, 141 + .uevent = macio_device_modalias, 137 142 .probe = macio_device_probe, 138 143 .remove = macio_device_remove, 139 144 .shutdown = macio_device_shutdown,
+2 -2
drivers/mcb/mcb-core.c
··· 41 41 return 0; 42 42 } 43 43 44 - static int mcb_uevent(struct device *dev, struct kobj_uevent_env *env) 44 + static int mcb_uevent(const struct device *dev, struct kobj_uevent_env *env) 45 45 { 46 - struct mcb_device *mdev = to_mcb_device(dev); 46 + const struct mcb_device *mdev = to_mcb_device(dev); 47 47 int ret; 48 48 49 49 ret = add_uevent_var(env, "MODALIAS=mcb:16z%03d", mdev->id);
+22 -5
drivers/media/pci/intel/ipu3/cio2-bridge.c
··· 195 195 SWNODE_GRAPH_ENDPOINT_NAME_FMT, 0); /* And endpoint 0 */ 196 196 } 197 197 198 + static void cio2_bridge_init_swnode_group(struct cio2_sensor *sensor) 199 + { 200 + struct software_node *nodes = sensor->swnodes; 201 + 202 + sensor->group[SWNODE_SENSOR_HID] = &nodes[SWNODE_SENSOR_HID]; 203 + sensor->group[SWNODE_SENSOR_PORT] = &nodes[SWNODE_SENSOR_PORT]; 204 + sensor->group[SWNODE_SENSOR_ENDPOINT] = &nodes[SWNODE_SENSOR_ENDPOINT]; 205 + sensor->group[SWNODE_CIO2_PORT] = &nodes[SWNODE_CIO2_PORT]; 206 + sensor->group[SWNODE_CIO2_ENDPOINT] = &nodes[SWNODE_CIO2_ENDPOINT]; 207 + if (sensor->ssdb.vcmtype) 208 + sensor->group[SWNODE_VCM] = &nodes[SWNODE_VCM]; 209 + } 210 + 198 211 static void cio2_bridge_create_connection_swnodes(struct cio2_bridge *bridge, 199 212 struct cio2_sensor *sensor) 200 213 { ··· 232 219 if (sensor->ssdb.vcmtype) 233 220 nodes[SWNODE_VCM] = 234 221 NODE_VCM(cio2_vcm_types[sensor->ssdb.vcmtype - 1]); 222 + 223 + cio2_bridge_init_swnode_group(sensor); 235 224 } 236 225 237 226 static void cio2_bridge_instantiate_vcm_i2c_client(struct cio2_sensor *sensor) ··· 267 252 268 253 for (i = 0; i < bridge->n_sensors; i++) { 269 254 sensor = &bridge->sensors[i]; 270 - software_node_unregister_nodes(sensor->swnodes); 255 + software_node_unregister_node_group(sensor->group); 271 256 ACPI_FREE(sensor->pld); 272 257 acpi_dev_put(sensor->adev); 273 258 i2c_unregister_device(sensor->vcm_i2c_client); ··· 278 263 struct cio2_bridge *bridge, 279 264 struct pci_dev *cio2) 280 265 { 281 - struct fwnode_handle *fwnode; 266 + struct fwnode_handle *fwnode, *primary; 282 267 struct cio2_sensor *sensor; 283 268 struct acpi_device *adev; 284 269 acpi_status status; ··· 325 310 cio2_bridge_create_fwnode_properties(sensor, bridge, cfg); 326 311 cio2_bridge_create_connection_swnodes(bridge, sensor); 327 312 328 - ret = software_node_register_nodes(sensor->swnodes); 313 + ret = software_node_register_node_group(sensor->group); 329 314 if (ret) 330 315 goto err_free_pld; 331 316 ··· 337 322 } 338 323 339 324 sensor->adev = acpi_dev_get(adev); 340 - adev->fwnode.secondary = fwnode; 325 + 326 + primary = acpi_fwnode_handle(adev); 327 + primary->secondary = fwnode; 341 328 342 329 cio2_bridge_instantiate_vcm_i2c_client(sensor); 343 330 ··· 352 335 return 0; 353 336 354 337 err_free_swnodes: 355 - software_node_unregister_nodes(sensor->swnodes); 338 + software_node_unregister_node_group(sensor->group); 356 339 err_free_pld: 357 340 ACPI_FREE(sensor->pld); 358 341 err_put_adev:
+3 -2
drivers/media/pci/intel/ipu3/cio2-bridge.h
··· 117 117 struct acpi_device *adev; 118 118 struct i2c_client *vcm_i2c_client; 119 119 120 - /* SWNODE_COUNT + 1 for terminating empty node */ 121 - struct software_node swnodes[SWNODE_COUNT + 1]; 120 + /* SWNODE_COUNT + 1 for terminating NULL */ 121 + const struct software_node *group[SWNODE_COUNT + 1]; 122 + struct software_node swnodes[SWNODE_COUNT]; 122 123 struct cio2_node_names node_names; 123 124 124 125 struct cio2_sensor_ssdb ssdb;
+1 -1
drivers/media/rc/rc-main.c
··· 1614 1614 kfree(dev); 1615 1615 } 1616 1616 1617 - static int rc_dev_uevent(struct device *device, struct kobj_uevent_env *env) 1617 + static int rc_dev_uevent(const struct device *device, struct kobj_uevent_env *env) 1618 1618 { 1619 1619 struct rc_dev *dev = to_rc_dev(device); 1620 1620 int ret = 0;
+3 -3
drivers/memstick/core/memstick.c
··· 57 57 return 0; 58 58 } 59 59 60 - static int memstick_uevent(struct device *dev, struct kobj_uevent_env *env) 60 + static int memstick_uevent(const struct device *dev, struct kobj_uevent_env *env) 61 61 { 62 - struct memstick_dev *card = container_of(dev, struct memstick_dev, 63 - dev); 62 + const struct memstick_dev *card = container_of_const(dev, struct memstick_dev, 63 + dev); 64 64 65 65 if (add_uevent_var(env, "MEMSTICK_TYPE=%02X", card->id.type)) 66 66 return -ENOMEM;
+2 -2
drivers/misc/mei/bus.c
··· 1227 1227 * 1228 1228 * Return: 0 on success -ENOMEM on when add_uevent_var fails 1229 1229 */ 1230 - static int mei_cl_device_uevent(struct device *dev, struct kobj_uevent_env *env) 1230 + static int mei_cl_device_uevent(const struct device *dev, struct kobj_uevent_env *env) 1231 1231 { 1232 - struct mei_cl_device *cldev = to_mei_cl_device(dev); 1232 + const struct mei_cl_device *cldev = to_mei_cl_device(dev); 1233 1233 const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl); 1234 1234 u8 version = mei_me_cl_ver(cldev->me_cl); 1235 1235
+2 -2
drivers/misc/tifm_core.c
··· 55 55 return 0; 56 56 } 57 57 58 - static int tifm_uevent(struct device *dev, struct kobj_uevent_env *env) 58 + static int tifm_uevent(const struct device *dev, struct kobj_uevent_env *env) 59 59 { 60 - struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev); 60 + const struct tifm_dev *sock = container_of_const(dev, struct tifm_dev, dev); 61 61 62 62 if (add_uevent_var(env, "TIFM_CARD_TYPE=%s", tifm_media_type_name(sock->type, 1))) 63 63 return -ENOMEM;
+2 -2
drivers/mmc/core/bus.c
··· 55 55 ATTRIBUTE_GROUPS(mmc_dev); 56 56 57 57 static int 58 - mmc_bus_uevent(struct device *dev, struct kobj_uevent_env *env) 58 + mmc_bus_uevent(const struct device *dev, struct kobj_uevent_env *env) 59 59 { 60 - struct mmc_card *card = mmc_dev_to_card(dev); 60 + const struct mmc_card *card = mmc_dev_to_card(dev); 61 61 const char *type; 62 62 unsigned int i; 63 63 int retval = 0;
+2 -2
drivers/mmc/core/sdio_bus.c
··· 120 120 } 121 121 122 122 static int 123 - sdio_bus_uevent(struct device *dev, struct kobj_uevent_env *env) 123 + sdio_bus_uevent(const struct device *dev, struct kobj_uevent_env *env) 124 124 { 125 - struct sdio_func *func = dev_to_sdio_func(dev); 125 + const struct sdio_func *func = dev_to_sdio_func(dev); 126 126 unsigned int i; 127 127 128 128 if (add_uevent_var(env,
+10
drivers/mtd/mtdpart.c
··· 577 577 { 578 578 struct mtd_part_parser *parser; 579 579 struct device_node *np; 580 + struct device_node *child; 580 581 struct property *prop; 581 582 struct device *dev; 582 583 const char *compat; ··· 594 593 of_node_get(np); 595 594 else 596 595 np = of_get_child_by_name(np, "partitions"); 596 + 597 + /* 598 + * Don't create devices that are added to a bus but will never get 599 + * probed. That'll cause fw_devlink to block probing of consumers of 600 + * this partition until the partition device is probed. 601 + */ 602 + for_each_child_of_node(np, child) 603 + if (of_device_is_compatible(child, "nvmem-cells")) 604 + of_node_set_flag(child, OF_POPULATED); 597 605 598 606 of_property_for_each_string(np, "compatible", prop, compat) { 599 607 parser = mtd_part_get_compatible_parser(compat);
+1 -1
drivers/net/phy/mdio_bus.c
··· 1330 1330 return 0; 1331 1331 } 1332 1332 1333 - static int mdio_uevent(struct device *dev, struct kobj_uevent_env *env) 1333 + static int mdio_uevent(const struct device *dev, struct kobj_uevent_env *env) 1334 1334 { 1335 1335 int rc; 1336 1336
+1 -1
drivers/net/xen-netback/xenbus.c
··· 200 200 * and vif variables to the environment, for the benefit of the vif-* hotplug 201 201 * scripts. 202 202 */ 203 - static int netback_uevent(struct xenbus_device *xdev, 203 + static int netback_uevent(const struct xenbus_device *xdev, 204 204 struct kobj_uevent_env *env) 205 205 { 206 206 struct backend_info *be = dev_get_drvdata(&xdev->dev);
+2 -2
drivers/nvdimm/bus.c
··· 28 28 struct class *nd_class; 29 29 static DEFINE_IDA(nd_ida); 30 30 31 - static int to_nd_device_type(struct device *dev) 31 + static int to_nd_device_type(const struct device *dev) 32 32 { 33 33 if (is_nvdimm(dev)) 34 34 return ND_DEVICE_DIMM; ··· 42 42 return 0; 43 43 } 44 44 45 - static int nvdimm_bus_uevent(struct device *dev, struct kobj_uevent_env *env) 45 + static int nvdimm_bus_uevent(const struct device *dev, struct kobj_uevent_env *env) 46 46 { 47 47 return add_uevent_var(env, "MODALIAS=" ND_DEVICE_MODALIAS_FMT, 48 48 to_nd_device_type(dev));
+1 -1
drivers/nvdimm/dax_devs.c
··· 38 38 .groups = nd_pfn_attribute_groups, 39 39 }; 40 40 41 - bool is_nd_dax(struct device *dev) 41 + bool is_nd_dax(const struct device *dev) 42 42 { 43 43 return dev ? dev->type == &nd_dax_device_type : false; 44 44 }
+1 -1
drivers/nvdimm/dimm_devs.c
··· 572 572 .groups = nvdimm_attribute_groups, 573 573 }; 574 574 575 - bool is_nvdimm(struct device *dev) 575 + bool is_nvdimm(const struct device *dev) 576 576 { 577 577 return dev->type == &nvdimm_device_type; 578 578 }
+5 -5
drivers/nvdimm/nd-core.h
··· 82 82 } 83 83 #endif 84 84 85 - bool is_nvdimm(struct device *dev); 86 - bool is_nd_pmem(struct device *dev); 87 - bool is_nd_volatile(struct device *dev); 88 - static inline bool is_nd_region(struct device *dev) 85 + bool is_nvdimm(const struct device *dev); 86 + bool is_nd_pmem(const struct device *dev); 87 + bool is_nd_volatile(const struct device *dev); 88 + static inline bool is_nd_region(const struct device *dev) 89 89 { 90 90 return is_nd_pmem(dev) || is_nd_volatile(dev); 91 91 } 92 - static inline bool is_memory(struct device *dev) 92 + static inline bool is_memory(const struct device *dev) 93 93 { 94 94 return is_nd_pmem(dev) || is_nd_volatile(dev); 95 95 }
+2 -2
drivers/nvdimm/nd.h
··· 599 599 struct nd_dax *to_nd_dax(struct device *dev); 600 600 #if IS_ENABLED(CONFIG_NVDIMM_DAX) 601 601 int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns); 602 - bool is_nd_dax(struct device *dev); 602 + bool is_nd_dax(const struct device *dev); 603 603 struct device *nd_dax_create(struct nd_region *nd_region); 604 604 #else 605 605 static inline int nd_dax_probe(struct device *dev, ··· 608 608 return -ENODEV; 609 609 } 610 610 611 - static inline bool is_nd_dax(struct device *dev) 611 + static inline bool is_nd_dax(const struct device *dev) 612 612 { 613 613 return false; 614 614 }
+2 -2
drivers/nvdimm/region_devs.c
··· 839 839 .groups = nd_region_attribute_groups, 840 840 }; 841 841 842 - bool is_nd_pmem(struct device *dev) 842 + bool is_nd_pmem(const struct device *dev) 843 843 { 844 844 return dev ? dev->type == &nd_pmem_device_type : false; 845 845 } 846 846 847 - bool is_nd_volatile(struct device *dev) 847 + bool is_nd_volatile(const struct device *dev) 848 848 { 849 849 return dev ? dev->type == &nd_volatile_device_type : false; 850 850 }
+2 -2
drivers/of/device.c
··· 248 248 } 249 249 EXPORT_SYMBOL(of_device_get_match_data); 250 250 251 - static ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len) 251 + static ssize_t of_device_get_modalias(const struct device *dev, char *str, ssize_t len) 252 252 { 253 253 const char *compat; 254 254 char *c; ··· 372 372 mutex_unlock(&of_mutex); 373 373 } 374 374 375 - int of_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env) 375 + int of_device_uevent_modalias(const struct device *dev, struct kobj_uevent_env *env) 376 376 { 377 377 int sl; 378 378
+13 -71
drivers/of/property.c
··· 1062 1062 return of_device_get_match_data(dev); 1063 1063 } 1064 1064 1065 - static bool of_is_ancestor_of(struct device_node *test_ancestor, 1066 - struct device_node *child) 1067 - { 1068 - of_node_get(child); 1069 - while (child) { 1070 - if (child == test_ancestor) { 1071 - of_node_put(child); 1072 - return true; 1073 - } 1074 - child = of_get_next_parent(child); 1075 - } 1076 - return false; 1077 - } 1078 - 1079 1065 static struct device_node *of_get_compat_node(struct device_node *np) 1080 1066 { 1081 1067 of_node_get(np); ··· 1092 1106 return node; 1093 1107 } 1094 1108 1095 - /** 1096 - * of_link_to_phandle - Add fwnode link to supplier from supplier phandle 1097 - * @con_np: consumer device tree node 1098 - * @sup_np: supplier device tree node 1099 - * 1100 - * Given a phandle to a supplier device tree node (@sup_np), this function 1101 - * finds the device that owns the supplier device tree node and creates a 1102 - * device link from @dev consumer device to the supplier device. This function 1103 - * doesn't create device links for invalid scenarios such as trying to create a 1104 - * link with a parent device as the consumer of its child device. In such 1105 - * cases, it returns an error. 1106 - * 1107 - * Returns: 1108 - * - 0 if fwnode link successfully created to supplier 1109 - * - -EINVAL if the supplier link is invalid and should not be created 1110 - * - -ENODEV if struct device will never be create for supplier 1111 - */ 1112 - static int of_link_to_phandle(struct device_node *con_np, 1109 + static void of_link_to_phandle(struct device_node *con_np, 1113 1110 struct device_node *sup_np) 1114 1111 { 1115 - struct device *sup_dev; 1116 - struct device_node *tmp_np = sup_np; 1112 + struct device_node *tmp_np = of_node_get(sup_np); 1117 1113 1118 - /* 1119 - * Find the device node that contains the supplier phandle. It may be 1120 - * @sup_np or it may be an ancestor of @sup_np. 1121 - */ 1122 - sup_np = of_get_compat_node(sup_np); 1123 - if (!sup_np) { 1124 - pr_debug("Not linking %pOFP to %pOFP - No device\n", 1125 - con_np, tmp_np); 1126 - return -ENODEV; 1127 - } 1114 + /* Check that sup_np and its ancestors are available. */ 1115 + while (tmp_np) { 1116 + if (of_fwnode_handle(tmp_np)->dev) { 1117 + of_node_put(tmp_np); 1118 + break; 1119 + } 1128 1120 1129 - /* 1130 - * Don't allow linking a device node as a consumer of one of its 1131 - * descendant nodes. By definition, a child node can't be a functional 1132 - * dependency for the parent node. 1133 - */ 1134 - if (of_is_ancestor_of(con_np, sup_np)) { 1135 - pr_debug("Not linking %pOFP to %pOFP - is descendant\n", 1136 - con_np, sup_np); 1137 - of_node_put(sup_np); 1138 - return -EINVAL; 1139 - } 1121 + if (!of_device_is_available(tmp_np)) { 1122 + of_node_put(tmp_np); 1123 + return; 1124 + } 1140 1125 1141 - /* 1142 - * Don't create links to "early devices" that won't have struct devices 1143 - * created for them. 1144 - */ 1145 - sup_dev = get_dev_from_fwnode(&sup_np->fwnode); 1146 - if (!sup_dev && 1147 - (of_node_check_flag(sup_np, OF_POPULATED) || 1148 - sup_np->fwnode.flags & FWNODE_FLAG_NOT_DEVICE)) { 1149 - pr_debug("Not linking %pOFP to %pOFP - No struct device\n", 1150 - con_np, sup_np); 1151 - of_node_put(sup_np); 1152 - return -ENODEV; 1126 + tmp_np = of_get_next_parent(tmp_np); 1153 1127 } 1154 - put_device(sup_dev); 1155 1128 1156 1129 fwnode_link_add(of_fwnode_handle(con_np), of_fwnode_handle(sup_np)); 1157 - of_node_put(sup_np); 1158 - 1159 - return 0; 1160 1130 } 1161 1131 1162 1132 /**
+2 -2
drivers/pci/pci-driver.c
··· 1545 1545 } 1546 1546 EXPORT_SYMBOL(pci_dev_put); 1547 1547 1548 - static int pci_uevent(struct device *dev, struct kobj_uevent_env *env) 1548 + static int pci_uevent(const struct device *dev, struct kobj_uevent_env *env) 1549 1549 { 1550 - struct pci_dev *pdev; 1550 + const struct pci_dev *pdev; 1551 1551 1552 1552 if (!dev) 1553 1553 return -ENODEV;
+2 -2
drivers/pcmcia/ds.c
··· 927 927 return 0; 928 928 } 929 929 930 - static int pcmcia_bus_uevent(struct device *dev, struct kobj_uevent_env *env) 930 + static int pcmcia_bus_uevent(const struct device *dev, struct kobj_uevent_env *env) 931 931 { 932 - struct pcmcia_device *p_dev; 932 + const struct pcmcia_device *p_dev; 933 933 int i; 934 934 u32 hash[4] = { 0, 0, 0, 0}; 935 935
+2 -2
drivers/platform/surface/aggregator/bus.c
··· 35 35 }; 36 36 ATTRIBUTE_GROUPS(ssam_device); 37 37 38 - static int ssam_device_uevent(struct device *dev, struct kobj_uevent_env *env) 38 + static int ssam_device_uevent(const struct device *dev, struct kobj_uevent_env *env) 39 39 { 40 - struct ssam_device *sdev = to_ssam_device(dev); 40 + const struct ssam_device *sdev = to_ssam_device(dev); 41 41 42 42 return add_uevent_var(env, "MODALIAS=ssam:d%02Xc%02Xt%02Xi%02Xf%02X", 43 43 sdev->uid.domain, sdev->uid.category,
+4 -11
drivers/platform/x86/wmi.c
··· 693 693 } 694 694 EXPORT_SYMBOL_GPL(wmi_get_acpi_device_uid); 695 695 696 - static struct wmi_block *dev_to_wblock(struct device *dev) 697 - { 698 - return container_of(dev, struct wmi_block, dev.dev); 699 - } 700 - 701 - static struct wmi_device *dev_to_wdev(struct device *dev) 702 - { 703 - return container_of(dev, struct wmi_device, dev); 704 - } 696 + #define dev_to_wblock(__dev) container_of_const(__dev, struct wmi_block, dev.dev) 697 + #define dev_to_wdev(__dev) container_of_const(__dev, struct wmi_device, dev) 705 698 706 699 static inline struct wmi_driver *drv_to_wdrv(struct device_driver *drv) 707 700 { ··· 797 804 }; 798 805 ATTRIBUTE_GROUPS(wmi_method); 799 806 800 - static int wmi_dev_uevent(struct device *dev, struct kobj_uevent_env *env) 807 + static int wmi_dev_uevent(const struct device *dev, struct kobj_uevent_env *env) 801 808 { 802 - struct wmi_block *wblock = dev_to_wblock(dev); 809 + const struct wmi_block *wblock = dev_to_wblock(dev); 803 810 804 811 if (add_uevent_var(env, "MODALIAS=wmi:%pUL", &wblock->gblock.guid)) 805 812 return -ENOMEM;
+2 -2
drivers/rapidio/rio-driver.c
··· 204 204 out:return 0; 205 205 } 206 206 207 - static int rio_uevent(struct device *dev, struct kobj_uevent_env *env) 207 + static int rio_uevent(const struct device *dev, struct kobj_uevent_env *env) 208 208 { 209 - struct rio_dev *rdev; 209 + const struct rio_dev *rdev; 210 210 211 211 if (!dev) 212 212 return -ENODEV;
+2 -2
drivers/rpmsg/rpmsg_core.c
··· 492 492 return of_driver_match_device(dev, drv); 493 493 } 494 494 495 - static int rpmsg_uevent(struct device *dev, struct kobj_uevent_env *env) 495 + static int rpmsg_uevent(const struct device *dev, struct kobj_uevent_env *env) 496 496 { 497 - struct rpmsg_device *rpdev = to_rpmsg_device(dev); 497 + const struct rpmsg_device *rpdev = to_rpmsg_device(dev); 498 498 int ret; 499 499 500 500 ret = of_device_uevent_modalias(dev, env);
+2 -2
drivers/s390/cio/css.c
··· 1411 1411 sch->driver->shutdown(sch); 1412 1412 } 1413 1413 1414 - static int css_uevent(struct device *dev, struct kobj_uevent_env *env) 1414 + static int css_uevent(const struct device *dev, struct kobj_uevent_env *env) 1415 1415 { 1416 - struct subchannel *sch = to_subchannel(dev); 1416 + const struct subchannel *sch = to_subchannel(dev); 1417 1417 int ret; 1418 1418 1419 1419 ret = add_uevent_var(env, "ST=%01X", sch->st);
+4 -4
drivers/s390/cio/device.c
··· 80 80 * specified size. Return length of resulting string (excluding trailing '\0') 81 81 * even if string doesn't fit buffer (snprintf semantics). */ 82 82 static int snprint_alias(char *buf, size_t size, 83 - struct ccw_device_id *id, const char *suffix) 83 + const struct ccw_device_id *id, const char *suffix) 84 84 { 85 85 int len; 86 86 ··· 101 101 102 102 /* Set up environment variables for ccw device uevent. Return 0 on success, 103 103 * non-zero otherwise. */ 104 - static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env) 104 + static int ccw_uevent(const struct device *dev, struct kobj_uevent_env *env) 105 105 { 106 - struct ccw_device *cdev = to_ccwdev(dev); 107 - struct ccw_device_id *id = &(cdev->id); 106 + const struct ccw_device *cdev = to_ccwdev(dev); 107 + const struct ccw_device_id *id = &(cdev->id); 108 108 int ret; 109 109 char modalias_buf[30]; 110 110
+1 -1
drivers/s390/cio/scm.c
··· 37 37 scmdrv->remove(scmdev); 38 38 } 39 39 40 - static int scmdev_uevent(struct device *dev, struct kobj_uevent_env *env) 40 + static int scmdev_uevent(const struct device *dev, struct kobj_uevent_env *env) 41 41 { 42 42 return add_uevent_var(env, "MODALIAS=scm:scmdev"); 43 43 }
+2 -2
drivers/s390/crypto/ap_bus.c
··· 613 613 * It sets up a single environment variable DEV_TYPE which contains the 614 614 * hardware device type. 615 615 */ 616 - static int ap_uevent(struct device *dev, struct kobj_uevent_env *env) 616 + static int ap_uevent(const struct device *dev, struct kobj_uevent_env *env) 617 617 { 618 618 int rc = 0; 619 - struct ap_device *ap_dev = to_ap_dev(dev); 619 + const struct ap_device *ap_dev = to_ap_dev(dev); 620 620 621 621 /* Uevents from ap bus core don't need extensions to the env */ 622 622 if (dev == ap_root_device)
+2 -2
drivers/scsi/scsi_sysfs.c
··· 536 536 return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0; 537 537 } 538 538 539 - static int scsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env) 539 + static int scsi_bus_uevent(const struct device *dev, struct kobj_uevent_env *env) 540 540 { 541 - struct scsi_device *sdev; 541 + const struct scsi_device *sdev; 542 542 543 543 if (dev->type != &scsi_dev_type) 544 544 return 0;
-7
drivers/sh/maple/maple.c
··· 760 760 return 0; 761 761 } 762 762 763 - static int maple_bus_uevent(struct device *dev, 764 - struct kobj_uevent_env *env) 765 - { 766 - return 0; 767 - } 768 - 769 763 static void maple_bus_release(struct device *dev) 770 764 { 771 765 } ··· 776 782 struct bus_type maple_bus_type = { 777 783 .name = "maple", 778 784 .match = maple_match_bus_driver, 779 - .uevent = maple_bus_uevent, 780 785 }; 781 786 EXPORT_SYMBOL_GPL(maple_bus_type); 782 787
+2 -2
drivers/slimbus/core.c
··· 93 93 } 94 94 } 95 95 96 - static int slim_device_uevent(struct device *dev, struct kobj_uevent_env *env) 96 + static int slim_device_uevent(const struct device *dev, struct kobj_uevent_env *env) 97 97 { 98 - struct slim_device *sbdev = to_slim_device(dev); 98 + const struct slim_device *sbdev = to_slim_device(dev); 99 99 100 100 return add_uevent_var(env, "MODALIAS=slim:%s", dev_name(&sbdev->dev)); 101 101 }
+1 -1
drivers/soc/imx/gpcv2.c
··· 1518 1518 domain->genpd.power_off = imx_pgc_power_down; 1519 1519 1520 1520 pd_pdev->dev.parent = dev; 1521 - pd_pdev->dev.of_node = np; 1521 + device_set_node(&pd_pdev->dev, of_fwnode_handle(np)); 1522 1522 1523 1523 ret = platform_device_add(pd_pdev); 1524 1524 if (ret) {
+2 -2
drivers/soc/qcom/apr.c
··· 387 387 spin_unlock(&apr->svcs_lock); 388 388 } 389 389 390 - static int apr_uevent(struct device *dev, struct kobj_uevent_env *env) 390 + static int apr_uevent(const struct device *dev, struct kobj_uevent_env *env) 391 391 { 392 - struct apr_device *adev = to_apr_device(dev); 392 + const struct apr_device *adev = to_apr_device(dev); 393 393 int ret; 394 394 395 395 ret = of_device_uevent_modalias(dev, env);
+1 -1
drivers/soc/renesas/rcar-sysc.c
··· 437 437 438 438 error = of_genpd_add_provider_onecell(np, &domains->onecell_data); 439 439 if (!error) 440 - of_node_set_flag(np, OF_POPULATED); 440 + fwnode_dev_initialized(of_fwnode_handle(np), true); 441 441 442 442 out_put: 443 443 of_node_put(np);
+2 -2
drivers/soundwire/bus_type.c
··· 58 58 slave->id.sdw_version, slave->id.class_id); 59 59 } 60 60 61 - int sdw_slave_uevent(struct device *dev, struct kobj_uevent_env *env) 61 + int sdw_slave_uevent(const struct device *dev, struct kobj_uevent_env *env) 62 62 { 63 - struct sdw_slave *slave = dev_to_sdw_dev(dev); 63 + const struct sdw_slave *slave = dev_to_sdw_dev(dev); 64 64 char modalias[32]; 65 65 66 66 sdw_slave_modalias(slave, modalias, sizeof(modalias));
+1 -1
drivers/spi/spi.c
··· 395 395 return strcmp(spi->modalias, drv->name) == 0; 396 396 } 397 397 398 - static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) 398 + static int spi_uevent(const struct device *dev, struct kobj_uevent_env *env) 399 399 { 400 400 const struct spi_device *spi = to_spi_device(dev); 401 401 int rc;
+1 -1
drivers/spmi/spmi.c
··· 366 366 sdrv->shutdown(to_spmi_device(dev)); 367 367 } 368 368 369 - static int spmi_drv_uevent(struct device *dev, struct kobj_uevent_env *env) 369 + static int spmi_drv_uevent(const struct device *dev, struct kobj_uevent_env *env) 370 370 { 371 371 int ret; 372 372
+2 -2
drivers/ssb/main.c
··· 339 339 return 0; 340 340 } 341 341 342 - static int ssb_device_uevent(struct device *dev, struct kobj_uevent_env *env) 342 + static int ssb_device_uevent(const struct device *dev, struct kobj_uevent_env *env) 343 343 { 344 - struct ssb_device *ssb_dev = dev_to_ssb_dev(dev); 344 + const struct ssb_device *ssb_dev = dev_to_ssb_dev(dev); 345 345 346 346 if (!dev) 347 347 return -ENODEV;
-6
drivers/staging/greybus/audio_codec.c
··· 1075 1075 gbaudio_dai, ARRAY_SIZE(gbaudio_dai)); 1076 1076 } 1077 1077 1078 - static int gbaudio_codec_remove(struct platform_device *pdev) 1079 - { 1080 - return 0; 1081 - } 1082 - 1083 1078 static const struct of_device_id greybus_asoc_machine_of_match[] = { 1084 1079 { .compatible = "toshiba,apb-dummy-codec", }, 1085 1080 {}, ··· 1089 1094 .of_match_table = greybus_asoc_machine_of_match, 1090 1095 }, 1091 1096 .probe = gbaudio_codec_probe, 1092 - .remove = gbaudio_codec_remove, 1093 1097 }; 1094 1098 module_platform_driver(gbaudio_codec_driver); 1095 1099
+7 -7
drivers/staging/greybus/gbphy.c
··· 71 71 .pm = &gb_gbphy_pm_ops, 72 72 }; 73 73 74 - static int gbphy_dev_uevent(struct device *dev, struct kobj_uevent_env *env) 74 + static int gbphy_dev_uevent(const struct device *dev, struct kobj_uevent_env *env) 75 75 { 76 - struct gbphy_device *gbphy_dev = to_gbphy_dev(dev); 77 - struct greybus_descriptor_cport *cport_desc = gbphy_dev->cport_desc; 78 - struct gb_bundle *bundle = gbphy_dev->bundle; 79 - struct gb_interface *intf = bundle->intf; 80 - struct gb_module *module = intf->module; 81 - struct gb_host_device *hd = intf->hd; 76 + const struct gbphy_device *gbphy_dev = to_gbphy_dev(dev); 77 + const struct greybus_descriptor_cport *cport_desc = gbphy_dev->cport_desc; 78 + const struct gb_bundle *bundle = gbphy_dev->bundle; 79 + const struct gb_interface *intf = bundle->intf; 80 + const struct gb_module *module = intf->module; 81 + const struct gb_host_device *hd = intf->hd; 82 82 83 83 if (add_uevent_var(env, "BUS=%u", hd->bus_id)) 84 84 return -ENOMEM;
+1 -1
drivers/tee/tee_core.c
··· 1207 1207 return 0; 1208 1208 } 1209 1209 1210 - static int tee_client_device_uevent(struct device *dev, 1210 + static int tee_client_device_uevent(const struct device *dev, 1211 1211 struct kobj_uevent_env *env) 1212 1212 { 1213 1213 uuid_t *dev_id = &to_tee_client_device(dev)->id.uuid;
+2 -2
drivers/thunderbolt/switch.c
··· 2184 2184 kfree(sw); 2185 2185 } 2186 2186 2187 - static int tb_switch_uevent(struct device *dev, struct kobj_uevent_env *env) 2187 + static int tb_switch_uevent(const struct device *dev, struct kobj_uevent_env *env) 2188 2188 { 2189 - struct tb_switch *sw = tb_to_switch(dev); 2189 + const struct tb_switch *sw = tb_to_switch(dev); 2190 2190 const char *type; 2191 2191 2192 2192 if (sw->config.thunderbolt_version == USB4_VERSION_1_0) {
+1 -1
drivers/thunderbolt/tb.h
··· 837 837 return dev->type == &tb_switch_type; 838 838 } 839 839 840 - static inline struct tb_switch *tb_to_switch(struct device *dev) 840 + static inline struct tb_switch *tb_to_switch(const struct device *dev) 841 841 { 842 842 if (tb_is_switch(dev)) 843 843 return container_of(dev, struct tb_switch, dev);
+3 -3
drivers/thunderbolt/xdomain.c
··· 881 881 } 882 882 static DEVICE_ATTR_RO(key); 883 883 884 - static int get_modalias(struct tb_service *svc, char *buf, size_t size) 884 + static int get_modalias(const struct tb_service *svc, char *buf, size_t size) 885 885 { 886 886 return snprintf(buf, size, "tbsvc:k%sp%08Xv%08Xr%08X", svc->key, 887 887 svc->prtcid, svc->prtcvers, svc->prtcrevs); ··· 953 953 NULL, 954 954 }; 955 955 956 - static int tb_service_uevent(struct device *dev, struct kobj_uevent_env *env) 956 + static int tb_service_uevent(const struct device *dev, struct kobj_uevent_env *env) 957 957 { 958 - struct tb_service *svc = container_of(dev, struct tb_service, dev); 958 + const struct tb_service *svc = container_of_const(dev, struct tb_service, dev); 959 959 char modalias[64]; 960 960 961 961 get_modalias(svc, modalias, sizeof(modalias));
+1 -1
drivers/tty/serdev/core.c
··· 42 42 }; 43 43 ATTRIBUTE_GROUPS(serdev_device); 44 44 45 - static int serdev_device_uevent(struct device *dev, struct kobj_uevent_env *env) 45 + static int serdev_device_uevent(const struct device *dev, struct kobj_uevent_env *env) 46 46 { 47 47 int rc; 48 48
-7
drivers/tty/serial/arc_uart.c
··· 631 631 return uart_add_one_port(&arc_uart_driver, &arc_uart_ports[dev_id].port); 632 632 } 633 633 634 - static int arc_serial_remove(struct platform_device *pdev) 635 - { 636 - /* This will never be called */ 637 - return 0; 638 - } 639 - 640 634 static const struct of_device_id arc_uart_dt_ids[] = { 641 635 { .compatible = "snps,arc-uart" }, 642 636 { /* Sentinel */ } ··· 639 645 640 646 static struct platform_driver arc_platform_driver = { 641 647 .probe = arc_serial_probe, 642 - .remove = arc_serial_remove, 643 648 .driver = { 644 649 .name = DRIVER_NAME, 645 650 .of_match_table = arc_uart_dt_ids,
+2 -2
drivers/usb/common/ulpi.c
··· 55 55 return 0; 56 56 } 57 57 58 - static int ulpi_uevent(struct device *dev, struct kobj_uevent_env *env) 58 + static int ulpi_uevent(const struct device *dev, struct kobj_uevent_env *env) 59 59 { 60 - struct ulpi *ulpi = to_ulpi_dev(dev); 60 + const struct ulpi *ulpi = to_ulpi_dev(dev); 61 61 int ret; 62 62 63 63 ret = of_device_uevent_modalias(dev, env);
+3 -3
drivers/usb/core/driver.c
··· 899 899 return 0; 900 900 } 901 901 902 - static int usb_uevent(struct device *dev, struct kobj_uevent_env *env) 902 + static int usb_uevent(const struct device *dev, struct kobj_uevent_env *env) 903 903 { 904 - struct usb_device *usb_dev; 904 + const struct usb_device *usb_dev; 905 905 906 906 if (is_usb_device(dev)) { 907 907 usb_dev = to_usb_device(dev); 908 908 } else if (is_usb_interface(dev)) { 909 - struct usb_interface *intf = to_usb_interface(dev); 909 + const struct usb_interface *intf = to_usb_interface(dev); 910 910 911 911 usb_dev = interface_to_usbdev(intf); 912 912 } else {
+4 -4
drivers/usb/core/message.c
··· 1819 1819 } 1820 1820 } 1821 1821 1822 - static int usb_if_uevent(struct device *dev, struct kobj_uevent_env *env) 1822 + static int usb_if_uevent(const struct device *dev, struct kobj_uevent_env *env) 1823 1823 { 1824 - struct usb_device *usb_dev; 1825 - struct usb_interface *intf; 1826 - struct usb_host_interface *alt; 1824 + const struct usb_device *usb_dev; 1825 + const struct usb_interface *intf; 1826 + const struct usb_host_interface *alt; 1827 1827 1828 1828 intf = to_usb_interface(dev); 1829 1829 usb_dev = interface_to_usbdev(intf);
+4 -4
drivers/usb/core/usb.c
··· 423 423 kfree(udev); 424 424 } 425 425 426 - static int usb_dev_uevent(struct device *dev, struct kobj_uevent_env *env) 426 + static int usb_dev_uevent(const struct device *dev, struct kobj_uevent_env *env) 427 427 { 428 - struct usb_device *usb_dev; 428 + const struct usb_device *usb_dev; 429 429 430 430 usb_dev = to_usb_device(dev); 431 431 ··· 505 505 #endif /* CONFIG_PM */ 506 506 507 507 508 - static char *usb_devnode(struct device *dev, 508 + static char *usb_devnode(const struct device *dev, 509 509 umode_t *mode, kuid_t *uid, kgid_t *gid) 510 510 { 511 - struct usb_device *usb_dev; 511 + const struct usb_device *usb_dev; 512 512 513 513 usb_dev = to_usb_device(dev); 514 514 return kasprintf(GFP_KERNEL, "bus/usb/%03d/%03d",
+3 -3
drivers/usb/phy/phy.c
··· 80 80 return ERR_PTR(-EPROBE_DEFER); 81 81 } 82 82 83 - static struct usb_phy *__device_to_usb_phy(struct device *dev) 83 + static struct usb_phy *__device_to_usb_phy(const struct device *dev) 84 84 { 85 85 struct usb_phy *usb_phy; 86 86 ··· 145 145 kobject_uevent(&usb_phy->dev->kobj, KOBJ_CHANGE); 146 146 } 147 147 148 - static int usb_phy_uevent(struct device *dev, struct kobj_uevent_env *env) 148 + static int usb_phy_uevent(const struct device *dev, struct kobj_uevent_env *env) 149 149 { 150 - struct usb_phy *usb_phy; 150 + const struct usb_phy *usb_phy; 151 151 char uchger_state[50] = { 0 }; 152 152 char uchger_type[50] = { 0 }; 153 153 unsigned long flags;
+1 -2
drivers/usb/roles/class.c
··· 274 274 NULL, 275 275 }; 276 276 277 - static int 278 - usb_role_switch_uevent(struct device *dev, struct kobj_uevent_env *env) 277 + static int usb_role_switch_uevent(const struct device *dev, struct kobj_uevent_env *env) 279 278 { 280 279 int ret; 281 280
+2 -2
drivers/usb/typec/bus.c
··· 350 350 return 0; 351 351 } 352 352 353 - static int typec_uevent(struct device *dev, struct kobj_uevent_env *env) 353 + static int typec_uevent(const struct device *dev, struct kobj_uevent_env *env) 354 354 { 355 - struct typec_altmode *altmode = to_typec_altmode(dev); 355 + const struct typec_altmode *altmode = to_typec_altmode(dev); 356 356 357 357 if (add_uevent_var(env, "SVID=%04X", altmode->svid)) 358 358 return -ENOMEM;
+1 -1
drivers/usb/typec/class.c
··· 1738 1738 NULL 1739 1739 }; 1740 1740 1741 - static int typec_uevent(struct device *dev, struct kobj_uevent_env *env) 1741 + static int typec_uevent(const struct device *dev, struct kobj_uevent_env *env) 1742 1742 { 1743 1743 int ret; 1744 1744
+2 -2
drivers/virtio/virtio.c
··· 95 95 return 0; 96 96 } 97 97 98 - static int virtio_uevent(struct device *_dv, struct kobj_uevent_env *env) 98 + static int virtio_uevent(const struct device *_dv, struct kobj_uevent_env *env) 99 99 { 100 - struct virtio_device *dev = dev_to_virtio(_dv); 100 + const struct virtio_device *dev = dev_to_virtio(_dv); 101 101 102 102 return add_uevent_var(env, "MODALIAS=virtio:d%08Xv%08X", 103 103 dev->id.device, dev->id.vendor);
+5 -5
drivers/w1/w1.c
··· 170 170 .fops = &w1_default_fops, 171 171 }; 172 172 173 - static int w1_uevent(struct device *dev, struct kobj_uevent_env *env); 173 + static int w1_uevent(const struct device *dev, struct kobj_uevent_env *env); 174 174 175 175 static struct bus_type w1_bus_type = { 176 176 .name = "w1", ··· 577 577 sysfs_remove_group(&master->dev.kobj, &w1_master_defattr_group); 578 578 } 579 579 580 - static int w1_uevent(struct device *dev, struct kobj_uevent_env *env) 580 + static int w1_uevent(const struct device *dev, struct kobj_uevent_env *env) 581 581 { 582 - struct w1_master *md = NULL; 583 - struct w1_slave *sl = NULL; 584 - char *event_owner, *name; 582 + const struct w1_master *md = NULL; 583 + const struct w1_slave *sl = NULL; 584 + const char *event_owner, *name; 585 585 int err = 0; 586 586 587 587 if (dev->driver == &w1_master_driver) {
+1 -1
drivers/xen/pvcalls-back.c
··· 1191 1191 { 1192 1192 } 1193 1193 1194 - static int pvcalls_back_uevent(struct xenbus_device *xdev, 1194 + static int pvcalls_back_uevent(const struct xenbus_device *xdev, 1195 1195 struct kobj_uevent_env *env) 1196 1196 { 1197 1197 return 0;
+4 -4
drivers/xen/xenbus/xenbus_probe_backend.c
··· 92 92 return 0; 93 93 } 94 94 95 - static int xenbus_uevent_backend(struct device *dev, 95 + static int xenbus_uevent_backend(const struct device *dev, 96 96 struct kobj_uevent_env *env) 97 97 { 98 - struct xenbus_device *xdev; 99 - struct xenbus_driver *drv; 100 - struct xen_bus_type *bus; 98 + const struct xenbus_device *xdev; 99 + const struct xenbus_driver *drv; 100 + const struct xen_bus_type *bus; 101 101 102 102 DPRINTK(""); 103 103
+2 -2
drivers/xen/xenbus/xenbus_probe_frontend.c
··· 73 73 return err; 74 74 } 75 75 76 - static int xenbus_uevent_frontend(struct device *_dev, 76 + static int xenbus_uevent_frontend(const struct device *_dev, 77 77 struct kobj_uevent_env *env) 78 78 { 79 - struct xenbus_device *dev = to_xenbus_device(_dev); 79 + const struct xenbus_device *dev = to_xenbus_device(_dev); 80 80 81 81 if (add_uevent_var(env, "MODALIAS=xen:%s", dev->devicetype)) 82 82 return -ENOMEM;
+2 -2
drivers/zorro/zorro-driver.c
··· 130 130 return !!zorro_match_device(ids, z); 131 131 } 132 132 133 - static int zorro_uevent(struct device *dev, struct kobj_uevent_env *env) 133 + static int zorro_uevent(const struct device *dev, struct kobj_uevent_env *env) 134 134 { 135 - struct zorro_dev *z; 135 + const struct zorro_dev *z; 136 136 137 137 if (!dev) 138 138 return -ENODEV;
+2 -2
fs/debugfs/inode.c
··· 802 802 * exist for rename to succeed. 803 803 * 804 804 * This function will return a pointer to old_dentry (which is updated to 805 - * reflect renaming) if it succeeds. If an error occurs, %NULL will be 806 - * returned. 805 + * reflect renaming) if it succeeds. If an error occurs, ERR_PTR(-ERROR) 806 + * will be returned. 807 807 * 808 808 * If debugfs is not enabled in the kernel, the value -%ENODEV will be 809 809 * returned.
+2 -2
fs/dlm/lockspace.c
··· 215 215 return ls->ls_uevent_result; 216 216 } 217 217 218 - static int dlm_uevent(struct kobject *kobj, struct kobj_uevent_env *env) 218 + static int dlm_uevent(const struct kobject *kobj, struct kobj_uevent_env *env) 219 219 { 220 - struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj); 220 + const struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj); 221 221 222 222 add_uevent_var(env, "LOCKSPACE=%s", ls->ls_name); 223 223 return 0;
+3 -3
fs/gfs2/sys.c
··· 769 769 wait_for_completion(&sdp->sd_kobj_unregister); 770 770 } 771 771 772 - static int gfs2_uevent(struct kobject *kobj, struct kobj_uevent_env *env) 772 + static int gfs2_uevent(const struct kobject *kobj, struct kobj_uevent_env *env) 773 773 { 774 - struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj); 775 - struct super_block *s = sdp->sd_vfs; 774 + const struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj); 775 + const struct super_block *s = sdp->sd_vfs; 776 776 777 777 add_uevent_var(env, "LOCKTABLE=%s", sdp->sd_table_name); 778 778 add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name);
-3
fs/kernfs/dir.c
··· 149 149 if (kn_from == kn_to) 150 150 return strlcpy(buf, "/", buflen); 151 151 152 - if (!buf) 153 - return -EINVAL; 154 - 155 152 common = kernfs_common_ancestor(kn_from, kn_to); 156 153 if (WARN_ON(!common)) 157 154 return -EINVAL;
+1 -1
include/asm-generic/dma-mapping.h
··· 2 2 #ifndef _ASM_GENERIC_DMA_MAPPING_H 3 3 #define _ASM_GENERIC_DMA_MAPPING_H 4 4 5 - static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 5 + static inline const struct dma_map_ops *get_arch_dma_ops(void) 6 6 { 7 7 return NULL; 8 8 }
+1 -4
include/drm/drm_mipi_dsi.h
··· 197 197 198 198 #define MIPI_DSI_MODULE_PREFIX "mipi-dsi:" 199 199 200 - static inline struct mipi_dsi_device *to_mipi_dsi_device(struct device *dev) 201 - { 202 - return container_of(dev, struct mipi_dsi_device, dev); 203 - } 200 + #define to_mipi_dsi_device(__dev) container_of_const(__dev, struct mipi_dsi_device, dev) 204 201 205 202 /** 206 203 * mipi_dsi_pixel_format_to_bpp - obtain the number of bits per pixel for any
+2 -2
include/linux/acpi.h
··· 723 723 const void *acpi_device_get_match_data(const struct device *dev); 724 724 extern bool acpi_driver_match_device(struct device *dev, 725 725 const struct device_driver *drv); 726 - int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *); 726 + int acpi_device_uevent_modalias(const struct device *, struct kobj_uevent_env *); 727 727 int acpi_device_modalias(struct device *, char *, int); 728 728 729 729 struct platform_device *acpi_create_platform_device(struct acpi_device *, ··· 973 973 return NULL; 974 974 } 975 975 976 - static inline int acpi_device_uevent_modalias(struct device *dev, 976 + static inline int acpi_device_uevent_modalias(const struct device *dev, 977 977 struct kobj_uevent_env *env) 978 978 { 979 979 return -ENODEV;
+9 -4
include/linux/cacheinfo.h
··· 80 80 81 81 struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu); 82 82 int init_cache_level(unsigned int cpu); 83 + int init_of_cache_level(unsigned int cpu); 83 84 int populate_cache_leaves(unsigned int cpu); 84 85 int cache_setup_acpi(unsigned int cpu); 85 86 bool last_level_cache_is_valid(unsigned int cpu); 86 87 bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y); 88 + int fetch_cache_info(unsigned int cpu); 87 89 int detect_cache_attributes(unsigned int cpu); 88 90 #ifndef CONFIG_ACPI_PPTT 89 91 /* 90 - * acpi_find_last_cache_level is only called on ACPI enabled 92 + * acpi_get_cache_info() is only called on ACPI enabled 91 93 * platforms using the PPTT for topology. This means that if 92 94 * the platform supports other firmware configuration methods 93 95 * we need to stub out the call when ACPI is disabled. 94 96 * ACPI enabled platforms not using PPTT won't be making calls 95 97 * to this function so we need not worry about them. 96 98 */ 97 - static inline int acpi_find_last_cache_level(unsigned int cpu) 99 + static inline 100 + int acpi_get_cache_info(unsigned int cpu, 101 + unsigned int *levels, unsigned int *split_levels) 98 102 { 99 - return 0; 103 + return -ENOENT; 100 104 } 101 105 #else 102 - int acpi_find_last_cache_level(unsigned int cpu); 106 + int acpi_get_cache_info(unsigned int cpu, 107 + unsigned int *levels, unsigned int *split_levels); 103 108 #endif 104 109 105 110 const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf);
+1 -1
include/linux/container_of.h
··· 3 3 #define _LINUX_CONTAINER_OF_H 4 4 5 5 #include <linux/build_bug.h> 6 - #include <linux/err.h> 6 + #include <linux/stddef.h> 7 7 8 8 #define typeof_member(T, m) typeof(((T*)0)->m) 9 9
+3 -4
include/linux/device.h
··· 88 88 struct device_type { 89 89 const char *name; 90 90 const struct attribute_group **groups; 91 - int (*uevent)(struct device *dev, struct kobj_uevent_env *env); 92 - char *(*devnode)(struct device *dev, umode_t *mode, 91 + int (*uevent)(const struct device *dev, struct kobj_uevent_env *env); 92 + char *(*devnode)(const struct device *dev, umode_t *mode, 93 93 kuid_t *uid, kgid_t *gid); 94 94 void (*release)(struct device *dev); 95 95 ··· 328 328 #define DL_FLAG_MANAGED BIT(6) 329 329 #define DL_FLAG_SYNC_STATE_ONLY BIT(7) 330 330 #define DL_FLAG_INFERRED BIT(8) 331 + #define DL_FLAG_CYCLE BIT(9) 331 332 332 333 /** 333 334 * enum dl_dev_state - Device driver presence tracking information. ··· 908 907 int device_move(struct device *dev, struct device *new_parent, 909 908 enum dpm_order dpm_order); 910 909 int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid); 911 - const char *device_get_devnode(struct device *dev, umode_t *mode, kuid_t *uid, 912 - kgid_t *gid, const char **tmp); 913 910 int device_is_dependent(struct device *dev, void *target); 914 911 915 912 static inline bool device_supports_offline(struct device *dev)
+46 -51
include/linux/device/bus.h
··· 66 66 * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU 67 67 * driver implementations to a bus and allow the driver to do 68 68 * bus-specific setup 69 - * @p: The private data of the driver core, only the driver core can 70 - * touch this. 71 69 * @lock_key: Lock class key for use by the lock validator 72 70 * @need_parent_lock: When probing or removing a device on this bus, the 73 71 * device core should lock the device's parent. ··· 88 90 const struct attribute_group **drv_groups; 89 91 90 92 int (*match)(struct device *dev, struct device_driver *drv); 91 - int (*uevent)(struct device *dev, struct kobj_uevent_env *env); 93 + int (*uevent)(const struct device *dev, struct kobj_uevent_env *env); 92 94 int (*probe)(struct device *dev); 93 95 void (*sync_state)(struct device *dev); 94 96 void (*remove)(struct device *dev); ··· 109 111 110 112 const struct iommu_ops *iommu_ops; 111 113 112 - struct subsys_private *p; 113 - struct lock_class_key lock_key; 114 - 115 114 bool need_parent_lock; 116 115 }; 117 116 118 117 extern int __must_check bus_register(struct bus_type *bus); 119 118 120 - extern void bus_unregister(struct bus_type *bus); 119 + extern void bus_unregister(const struct bus_type *bus); 121 120 122 121 extern int __must_check bus_rescan_devices(struct bus_type *bus); 123 122 ··· 131 136 #define BUS_ATTR_WO(_name) \ 132 137 struct bus_attribute bus_attr_##_name = __ATTR_WO(_name) 133 138 134 - extern int __must_check bus_create_file(struct bus_type *, 135 - struct bus_attribute *); 136 - extern void bus_remove_file(struct bus_type *, struct bus_attribute *); 139 + int __must_check bus_create_file(const struct bus_type *bus, struct bus_attribute *attr); 140 + void bus_remove_file(const struct bus_type *bus, struct bus_attribute *attr); 137 141 138 142 /* Generic device matching functions that all busses can use to match with */ 139 143 int device_match_name(struct device *dev, const void *name); ··· 144 150 int device_match_any(struct device *dev, const void *unused); 145 151 146 152 /* iterator helpers for buses */ 147 - struct subsys_dev_iter { 148 - struct klist_iter ki; 149 - const struct device_type *type; 150 - }; 151 - void subsys_dev_iter_init(struct subsys_dev_iter *iter, 152 - struct bus_type *subsys, 153 - struct device *start, 154 - const struct device_type *type); 155 - struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter); 156 - void subsys_dev_iter_exit(struct subsys_dev_iter *iter); 157 - 158 - int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data, 153 + int bus_for_each_dev(const struct bus_type *bus, struct device *start, void *data, 159 154 int (*fn)(struct device *dev, void *data)); 160 - struct device *bus_find_device(struct bus_type *bus, struct device *start, 155 + struct device *bus_find_device(const struct bus_type *bus, struct device *start, 161 156 const void *data, 162 157 int (*match)(struct device *dev, const void *data)); 163 158 /** ··· 156 173 * @start: Device to begin with 157 174 * @name: name of the device to match 158 175 */ 159 - static inline struct device *bus_find_device_by_name(struct bus_type *bus, 176 + static inline struct device *bus_find_device_by_name(const struct bus_type *bus, 160 177 struct device *start, 161 178 const char *name) 162 179 { ··· 170 187 * @np: of_node of the device to match. 171 188 */ 172 189 static inline struct device * 173 - bus_find_device_by_of_node(struct bus_type *bus, const struct device_node *np) 190 + bus_find_device_by_of_node(const struct bus_type *bus, const struct device_node *np) 174 191 { 175 192 return bus_find_device(bus, NULL, np, device_match_of_node); 176 193 } ··· 182 199 * @fwnode: fwnode of the device to match. 183 200 */ 184 201 static inline struct device * 185 - bus_find_device_by_fwnode(struct bus_type *bus, const struct fwnode_handle *fwnode) 202 + bus_find_device_by_fwnode(const struct bus_type *bus, const struct fwnode_handle *fwnode) 186 203 { 187 204 return bus_find_device(bus, NULL, fwnode, device_match_fwnode); 188 205 } ··· 193 210 * @bus: bus type 194 211 * @devt: device type of the device to match. 195 212 */ 196 - static inline struct device *bus_find_device_by_devt(struct bus_type *bus, 213 + static inline struct device *bus_find_device_by_devt(const struct bus_type *bus, 197 214 dev_t devt) 198 215 { 199 216 return bus_find_device(bus, NULL, &devt, device_match_devt); ··· 206 223 * @cur: device to begin the search with. 207 224 */ 208 225 static inline struct device * 209 - bus_find_next_device(struct bus_type *bus,struct device *cur) 226 + bus_find_next_device(const struct bus_type *bus,struct device *cur) 210 227 { 211 228 return bus_find_device(bus, cur, NULL, device_match_any); 212 229 } ··· 221 238 * @adev: ACPI COMPANION device to match. 222 239 */ 223 240 static inline struct device * 224 - bus_find_device_by_acpi_dev(struct bus_type *bus, const struct acpi_device *adev) 241 + bus_find_device_by_acpi_dev(const struct bus_type *bus, const struct acpi_device *adev) 225 242 { 226 243 return bus_find_device(bus, NULL, adev, device_match_acpi_dev); 227 244 } 228 245 #else 229 246 static inline struct device * 230 - bus_find_device_by_acpi_dev(struct bus_type *bus, const void *adev) 247 + bus_find_device_by_acpi_dev(const struct bus_type *bus, const void *adev) 231 248 { 232 249 return NULL; 233 250 } 234 251 #endif 235 252 236 - struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id, 237 - struct device *hint); 238 - int bus_for_each_drv(struct bus_type *bus, struct device_driver *start, 253 + int bus_for_each_drv(const struct bus_type *bus, struct device_driver *start, 239 254 void *data, int (*fn)(struct device_driver *, void *)); 240 255 void bus_sort_breadthfirst(struct bus_type *bus, 241 256 int (*compare)(const struct device *a, ··· 246 265 */ 247 266 struct notifier_block; 248 267 249 - extern int bus_register_notifier(struct bus_type *bus, 268 + extern int bus_register_notifier(const struct bus_type *bus, 250 269 struct notifier_block *nb); 251 - extern int bus_unregister_notifier(struct bus_type *bus, 270 + extern int bus_unregister_notifier(const struct bus_type *bus, 252 271 struct notifier_block *nb); 253 272 254 - /* All 4 notifers below get called with the target struct device * 255 - * as an argument. Note that those functions are likely to be called 256 - * with the device lock held in the core, so be careful. 273 + /** 274 + * enum bus_notifier_event - Bus Notifier events that have happened 275 + * @BUS_NOTIFY_ADD_DEVICE: device is added to this bus 276 + * @BUS_NOTIFY_DEL_DEVICE: device is about to be removed from this bus 277 + * @BUS_NOTIFY_REMOVED_DEVICE: device is successfully removed from this bus 278 + * @BUS_NOTIFY_BIND_DRIVER: a driver is about to be bound to this device on this bus 279 + * @BUS_NOTIFY_BOUND_DRIVER: a driver is successfully bound to this device on this bus 280 + * @BUS_NOTIFY_UNBIND_DRIVER: a driver is about to be unbound from this device on this bus 281 + * @BUS_NOTIFY_UNBOUND_DRIVER: a driver is successfully unbound from this device on this bus 282 + * @BUS_NOTIFY_DRIVER_NOT_BOUND: a driver failed to be bound to this device on this bus 283 + * 284 + * These are the value passed to a bus notifier when a specific event happens. 285 + * 286 + * Note that bus notifiers are likely to be called with the device lock already 287 + * held by the driver core, so be careful in any notifier callback as to what 288 + * you do with the device structure. 289 + * 290 + * All bus notifiers are called with the target struct device * as an argument. 257 291 */ 258 - #define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */ 259 - #define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device to be removed */ 260 - #define BUS_NOTIFY_REMOVED_DEVICE 0x00000003 /* device removed */ 261 - #define BUS_NOTIFY_BIND_DRIVER 0x00000004 /* driver about to be 262 - bound */ 263 - #define BUS_NOTIFY_BOUND_DRIVER 0x00000005 /* driver bound to device */ 264 - #define BUS_NOTIFY_UNBIND_DRIVER 0x00000006 /* driver about to be 265 - unbound */ 266 - #define BUS_NOTIFY_UNBOUND_DRIVER 0x00000007 /* driver is unbound 267 - from the device */ 268 - #define BUS_NOTIFY_DRIVER_NOT_BOUND 0x00000008 /* driver fails to be bound */ 292 + enum bus_notifier_event { 293 + BUS_NOTIFY_ADD_DEVICE, 294 + BUS_NOTIFY_DEL_DEVICE, 295 + BUS_NOTIFY_REMOVED_DEVICE, 296 + BUS_NOTIFY_BIND_DRIVER, 297 + BUS_NOTIFY_BOUND_DRIVER, 298 + BUS_NOTIFY_UNBIND_DRIVER, 299 + BUS_NOTIFY_UNBOUND_DRIVER, 300 + BUS_NOTIFY_DRIVER_NOT_BOUND, 301 + }; 269 302 270 - extern struct kset *bus_get_kset(struct bus_type *bus); 271 - extern struct klist *bus_get_device_klist(struct bus_type *bus); 303 + extern struct kset *bus_get_kset(const struct bus_type *bus); 304 + struct device *bus_get_dev_root(const struct bus_type *bus); 272 305 273 306 #endif
-1
include/linux/device/driver.h
··· 240 240 } 241 241 #endif 242 242 243 - extern int driver_deferred_probe_timeout; 244 243 void driver_deferred_probe_add(struct device *dev); 245 244 int driver_deferred_probe_check_state(struct device *dev); 246 245 void driver_init(void);
+1 -1
include/linux/dma-map-ops.h
··· 90 90 { 91 91 if (dev->dma_ops) 92 92 return dev->dma_ops; 93 - return get_arch_dma_ops(dev->bus); 93 + return get_arch_dma_ops(); 94 94 } 95 95 96 96 static inline void set_dma_ops(struct device *dev,
+3 -12
include/linux/firewire.h
··· 208 208 struct fw_attribute_group attribute_group; 209 209 }; 210 210 211 - static inline struct fw_device *fw_device(struct device *dev) 212 - { 213 - return container_of(dev, struct fw_device, device); 214 - } 211 + #define fw_device(dev) container_of_const(dev, struct fw_device, device) 215 212 216 213 static inline int fw_device_is_shutdown(struct fw_device *device) 217 214 { ··· 226 229 struct fw_attribute_group attribute_group; 227 230 }; 228 231 229 - static inline struct fw_unit *fw_unit(struct device *dev) 230 - { 231 - return container_of(dev, struct fw_unit, device); 232 - } 232 + #define fw_unit(dev) container_of_const(dev, struct fw_unit, device) 233 233 234 234 static inline struct fw_unit *fw_unit_get(struct fw_unit *unit) 235 235 { ··· 240 246 put_device(&unit->device); 241 247 } 242 248 243 - static inline struct fw_device *fw_parent_device(struct fw_unit *unit) 244 - { 245 - return fw_device(unit->device.parent); 246 - } 249 + #define fw_parent_device(unit) fw_device(unit->device.parent) 247 250 248 251 struct ieee1394_device_id; 249 252
+10 -2
include/linux/fwnode.h
··· 18 18 struct device; 19 19 20 20 /* 21 - * fwnode link flags 21 + * fwnode flags 22 22 * 23 23 * LINKS_ADDED: The fwnode has already be parsed to add fwnode links. 24 24 * NOT_DEVICE: The fwnode will never be populated as a struct device. ··· 36 36 #define FWNODE_FLAG_INITIALIZED BIT(2) 37 37 #define FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD BIT(3) 38 38 #define FWNODE_FLAG_BEST_EFFORT BIT(4) 39 + #define FWNODE_FLAG_VISITED BIT(5) 39 40 40 41 struct fwnode_handle { 41 42 struct fwnode_handle *secondary; ··· 47 46 u8 flags; 48 47 }; 49 48 49 + /* 50 + * fwnode link flags 51 + * 52 + * CYCLE: The fwnode link is part of a cycle. Don't defer probe. 53 + */ 54 + #define FWLINK_FLAG_CYCLE BIT(0) 55 + 50 56 struct fwnode_link { 51 57 struct fwnode_handle *supplier; 52 58 struct list_head s_hook; 53 59 struct fwnode_handle *consumer; 54 60 struct list_head c_hook; 61 + u8 flags; 55 62 }; 56 63 57 64 /** ··· 207 198 fwnode->flags &= ~FWNODE_FLAG_INITIALIZED; 208 199 } 209 200 210 - extern u32 fw_devlink_get_flags(void); 211 201 extern bool fw_devlink_is_strict(void); 212 202 int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup); 213 203 void fwnode_links_purge(struct fwnode_handle *fwnode);
+1 -4
include/linux/hyperv.h
··· 1309 1309 }; 1310 1310 1311 1311 1312 - static inline struct hv_device *device_to_hv_device(struct device *d) 1313 - { 1314 - return container_of(d, struct hv_device, device); 1315 - } 1312 + #define device_to_hv_device(d) container_of_const(d, struct hv_device, device) 1316 1313 1317 1314 static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d) 1318 1315 {
+15 -7
include/linux/i3c/device.h
··· 18 18 /** 19 19 * enum i3c_error_code - I3C error codes 20 20 * 21 + * @I3C_ERROR_UNKNOWN: unknown error, usually means the error is not I3C 22 + * related 23 + * @I3C_ERROR_M0: M0 error 24 + * @I3C_ERROR_M1: M1 error 25 + * @I3C_ERROR_M2: M2 error 26 + * 21 27 * These are the standard error codes as defined by the I3C specification. 22 28 * When -EIO is returned by the i3c_device_do_priv_xfers() or 23 29 * i3c_device_send_hdr_cmds() one can check the error code in 24 30 * &struct_i3c_priv_xfer.err or &struct i3c_hdr_cmd.err to get a better idea of 25 31 * what went wrong. 26 32 * 27 - * @I3C_ERROR_UNKNOWN: unknown error, usually means the error is not I3C 28 - * related 29 - * @I3C_ERROR_M0: M0 error 30 - * @I3C_ERROR_M1: M1 error 31 - * @I3C_ERROR_M2: M2 error 32 33 */ 33 34 enum i3c_error_code { 34 35 I3C_ERROR_UNKNOWN = 0, ··· 187 186 } 188 187 189 188 struct device *i3cdev_to_dev(struct i3c_device *i3cdev); 190 - struct i3c_device *dev_to_i3cdev(struct device *dev); 189 + 190 + /** 191 + * dev_to_i3cdev() - Returns the I3C device containing @dev 192 + * @__dev: device object 193 + * 194 + * Return: a pointer to an I3C device object. 195 + */ 196 + #define dev_to_i3cdev(__dev) container_of_const(__dev, struct i3c_device, dev) 191 197 192 198 const struct i3c_device_id * 193 199 i3c_device_match_id(struct i3c_device *i3cdev, ··· 304 296 305 297 int i3c_device_do_setdasa(struct i3c_device *dev); 306 298 307 - void i3c_device_get_info(struct i3c_device *dev, struct i3c_device_info *info); 299 + void i3c_device_get_info(const struct i3c_device *dev, struct i3c_device_info *info); 308 300 309 301 struct i3c_ibi_payload { 310 302 unsigned int len;
+1 -1
include/linux/kobject.h
··· 137 137 struct kset_uevent_ops { 138 138 int (* const filter)(const struct kobject *kobj); 139 139 const char *(* const name)(const struct kobject *kobj); 140 - int (* const uevent)(struct kobject *kobj, struct kobj_uevent_env *env); 140 + int (* const uevent)(const struct kobject *kobj, struct kobj_uevent_env *env); 141 141 }; 142 142 143 143 struct kobj_attribute {
+1 -4
include/linux/mcb.h
··· 76 76 struct device *dma_dev; 77 77 }; 78 78 79 - static inline struct mcb_device *to_mcb_device(struct device *dev) 80 - { 81 - return container_of(dev, struct mcb_device, dev); 82 - } 79 + #define to_mcb_device(__dev) container_of_const(__dev, struct mcb_device, dev) 83 80 84 81 /** 85 82 * struct mcb_driver - MEN Chameleon Bus device driver
+2 -2
include/linux/of_device.h
··· 36 36 extern int of_device_request_module(struct device *dev); 37 37 38 38 extern void of_device_uevent(const struct device *dev, struct kobj_uevent_env *env); 39 - extern int of_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env); 39 + extern int of_device_uevent_modalias(const struct device *dev, struct kobj_uevent_env *env); 40 40 41 41 static inline struct device_node *of_cpu_device_node_get(int cpu) 42 42 { ··· 83 83 return -ENODEV; 84 84 } 85 85 86 - static inline int of_device_uevent_modalias(struct device *dev, 86 + static inline int of_device_uevent_modalias(const struct device *dev, 87 87 struct kobj_uevent_env *env) 88 88 { 89 89 return -ENODEV;
+11
include/linux/platform_device.h
··· 207 207 208 208 struct platform_driver { 209 209 int (*probe)(struct platform_device *); 210 + 211 + /* 212 + * Traditionally the remove callback returned an int which however is 213 + * ignored by the driver core. This led to wrong expectations by driver 214 + * authors who thought returning an error code was a valid error 215 + * handling strategy. To convert to a callback returning void, new 216 + * drivers should implement .remove_new() until the conversion it done 217 + * that eventually makes .remove() return void. 218 + */ 210 219 int (*remove)(struct platform_device *); 220 + void (*remove_new)(struct platform_device *); 221 + 211 222 void (*shutdown)(struct platform_device *); 212 223 int (*suspend)(struct platform_device *, pm_message_t state); 213 224 int (*resume)(struct platform_device *);
+3 -6
include/linux/property.h
··· 436 436 unsigned int fwnode_graph_get_endpoint_count(struct fwnode_handle *fwnode, 437 437 unsigned long flags); 438 438 439 - #define fwnode_graph_for_each_endpoint(fwnode, child) \ 440 - for (child = NULL; \ 441 - (child = fwnode_graph_get_next_endpoint(fwnode, child)); ) 439 + #define fwnode_graph_for_each_endpoint(fwnode, child) \ 440 + for (child = fwnode_graph_get_next_endpoint(fwnode, NULL); child; \ 441 + child = fwnode_graph_get_next_endpoint(fwnode, child)) 442 442 443 443 int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode, 444 444 struct fwnode_endpoint *endpoint); ··· 485 485 const struct software_node * 486 486 software_node_find_by_name(const struct software_node *parent, 487 487 const char *name); 488 - 489 - int software_node_register_nodes(const struct software_node *nodes); 490 - void software_node_unregister_nodes(const struct software_node *nodes); 491 488 492 489 int software_node_register_node_group(const struct software_node **node_group); 493 490 void software_node_unregister_node_group(const struct software_node **node_group);
+1 -1
include/linux/soundwire/sdw_type.h
··· 21 21 int __sdw_register_driver(struct sdw_driver *drv, struct module *owner); 22 22 void sdw_unregister_driver(struct sdw_driver *drv); 23 23 24 - int sdw_slave_uevent(struct device *dev, struct kobj_uevent_env *env); 24 + int sdw_slave_uevent(const struct device *dev, struct kobj_uevent_env *env); 25 25 26 26 /** 27 27 * module_sdw_driver() - Helper macro for registering a Soundwire driver
+1 -1
include/linux/spi/spi.h
··· 226 226 static_assert((SPI_MODE_KERNEL_MASK & SPI_MODE_USER_MASK) == 0, 227 227 "SPI_MODE_USER_MASK & SPI_MODE_KERNEL_MASK must not overlap"); 228 228 229 - static inline struct spi_device *to_spi_device(struct device *dev) 229 + static inline struct spi_device *to_spi_device(const struct device *dev) 230 230 { 231 231 return dev ? container_of(dev, struct spi_device, dev) : NULL; 232 232 }
+1 -1
include/linux/ssb/ssb.h
··· 285 285 286 286 /* Go from struct device to struct ssb_device. */ 287 287 static inline 288 - struct ssb_device * dev_to_ssb_dev(struct device *dev) 288 + struct ssb_device * dev_to_ssb_dev(const struct device *dev) 289 289 { 290 290 struct __ssb_dev_wrapper *wrap; 291 291 wrap = container_of(dev, struct __ssb_dev_wrapper, dev);
+1 -4
include/linux/surface_aggregator/device.h
··· 229 229 * Return: Returns a pointer to the &struct ssam_device wrapping the given 230 230 * device @d. 231 231 */ 232 - static inline struct ssam_device *to_ssam_device(struct device *d) 233 - { 234 - return container_of(d, struct ssam_device, dev); 235 - } 232 + #define to_ssam_device(d) container_of_const(d, struct ssam_device, dev) 236 233 237 234 /** 238 235 * to_ssam_device_driver() - Casts the given device driver to a SSAM client
+7 -1
include/linux/transport_class.h
··· 70 70 static inline int 71 71 transport_register_device(struct device *dev) 72 72 { 73 + int ret; 74 + 73 75 transport_setup_device(dev); 74 - return transport_add_device(dev); 76 + ret = transport_add_device(dev); 77 + if (ret) 78 + transport_destroy_device(dev); 79 + 80 + return ret; 75 81 } 76 82 77 83 static inline void
+1 -4
include/linux/virtio.h
··· 127 127 void *priv; 128 128 }; 129 129 130 - static inline struct virtio_device *dev_to_virtio(struct device *_dev) 131 - { 132 - return container_of(_dev, struct virtio_device, dev); 133 - } 130 + #define dev_to_virtio(_dev) container_of_const(_dev, struct virtio_device, dev) 134 131 135 132 void virtio_add_status(struct virtio_device *dev, unsigned int status); 136 133 int register_virtio_device(struct virtio_device *dev);
+1 -1
include/sound/hdaudio.h
··· 123 123 int snd_hdac_device_register(struct hdac_device *codec); 124 124 void snd_hdac_device_unregister(struct hdac_device *codec); 125 125 int snd_hdac_device_set_chip_name(struct hdac_device *codec, const char *name); 126 - int snd_hdac_codec_modalias(struct hdac_device *hdac, char *buf, size_t size); 126 + int snd_hdac_codec_modalias(const struct hdac_device *hdac, char *buf, size_t size); 127 127 128 128 int snd_hdac_refresh_widgets(struct hdac_device *codec); 129 129
+2 -5
include/xen/xenbus.h
··· 96 96 unsigned int spurious_threshold; 97 97 }; 98 98 99 - static inline struct xenbus_device *to_xenbus_device(struct device *dev) 100 - { 101 - return container_of(dev, struct xenbus_device, dev); 102 - } 99 + #define to_xenbus_device(__dev) container_of_const(__dev, struct xenbus_device, dev) 103 100 104 101 struct xenbus_device_id 105 102 { ··· 117 120 void (*remove)(struct xenbus_device *dev); 118 121 int (*suspend)(struct xenbus_device *dev); 119 122 int (*resume)(struct xenbus_device *dev); 120 - int (*uevent)(struct xenbus_device *, struct kobj_uevent_env *); 123 + int (*uevent)(const struct xenbus_device *, struct kobj_uevent_env *); 121 124 struct device_driver driver; 122 125 int (*read_otherend_details)(struct xenbus_device *dev); 123 126 int (*is_ready)(struct xenbus_device *dev);
+1 -4
kernel/fail_function.c
··· 163 163 164 164 static void fei_debugfs_remove_attr(struct fei_attr *attr) 165 165 { 166 - struct dentry *dir; 167 - 168 - dir = debugfs_lookup(attr->kp.symbol_name, fei_debugfs_dir); 169 - debugfs_remove_recursive(dir); 166 + debugfs_lookup_and_remove(attr->kp.symbol_name, fei_debugfs_dir); 170 167 } 171 168 172 169 static int fei_kprobe_handler(struct kprobe *kp, struct pt_regs *regs)
+9
kernel/ksysfs.c
··· 51 51 } 52 52 KERNEL_ATTR_RO(cpu_byteorder); 53 53 54 + /* address bits */ 55 + static ssize_t address_bits_show(struct kobject *kobj, 56 + struct kobj_attribute *attr, char *buf) 57 + { 58 + return sysfs_emit(buf, "%zu\n", sizeof(void *) * 8 /* CHAR_BIT */); 59 + } 60 + KERNEL_ATTR_RO(address_bits); 61 + 54 62 #ifdef CONFIG_UEVENT_HELPER 55 63 /* uevent helper program, used during early boot */ 56 64 static ssize_t uevent_helper_show(struct kobject *kobj, ··· 241 233 &fscaps_attr.attr, 242 234 &uevent_seqnum_attr.attr, 243 235 &cpu_byteorder_attr.attr, 236 + &address_bits_attr.attr, 244 237 #ifdef CONFIG_UEVENT_HELPER 245 238 &uevent_helper_attr.attr, 246 239 #endif
+12 -4
lib/kobject.c
··· 112 112 return length; 113 113 } 114 114 115 - static void fill_kobj_path(const struct kobject *kobj, char *path, int length) 115 + static int fill_kobj_path(const struct kobject *kobj, char *path, int length) 116 116 { 117 117 const struct kobject *parent; 118 118 ··· 121 121 int cur = strlen(kobject_name(parent)); 122 122 /* back up enough to print this name with '/' */ 123 123 length -= cur; 124 + if (length <= 0) 125 + return -EINVAL; 124 126 memcpy(path + length, kobject_name(parent), cur); 125 127 *(path + --length) = '/'; 126 128 } 127 129 128 130 pr_debug("kobject: '%s' (%p): %s: path = '%s'\n", kobject_name(kobj), 129 131 kobj, __func__, path); 132 + 133 + return 0; 130 134 } 131 135 132 136 /** ··· 145 141 char *path; 146 142 int len; 147 143 144 + retry: 148 145 len = get_kobj_path_length(kobj); 149 146 if (len == 0) 150 147 return NULL; 151 148 path = kzalloc(len, gfp_mask); 152 149 if (!path) 153 150 return NULL; 154 - fill_kobj_path(kobj, path, len); 151 + if (fill_kobj_path(kobj, path, len)) { 152 + kfree(path); 153 + goto retry; 154 + } 155 155 156 156 return path; 157 157 } ··· 737 729 kfree(kobj); 738 730 } 739 731 740 - static struct kobj_type dynamic_kobj_ktype = { 732 + static const struct kobj_type dynamic_kobj_ktype = { 741 733 .release = dynamic_kobj_release, 742 734 .sysfs_ops = &kobj_sysfs_ops, 743 735 }; ··· 921 913 kobject_get_ownership(kobj->parent, uid, gid); 922 914 } 923 915 924 - static struct kobj_type kset_ktype = { 916 + static const struct kobj_type kset_ktype = { 925 917 .sysfs_ops = &kobj_sysfs_ops, 926 918 .release = kset_release, 927 919 .get_ownership = kset_get_ownership,
+3 -2
lib/test_firmware.c
··· 22 22 #include <linux/slab.h> 23 23 #include <linux/uaccess.h> 24 24 #include <linux/delay.h> 25 + #include <linux/kstrtox.h> 25 26 #include <linux/kthread.h> 26 27 #include <linux/vmalloc.h> 27 28 #include <linux/efi_embedded_fw.h> ··· 51 50 }; 52 51 53 52 /** 54 - * test_config - represents configuration for the test for different triggers 53 + * struct test_config - represents configuration for the test for different triggers 55 54 * 56 55 * @name: the name of the firmware file to look for 57 56 * @into_buf: when the into_buf is used if this is true ··· 359 358 int ret; 360 359 361 360 mutex_lock(&test_fw_mutex); 362 - if (strtobool(buf, cfg) < 0) 361 + if (kstrtobool(buf, cfg) < 0) 363 362 ret = -EINVAL; 364 363 else 365 364 ret = size;
+1 -1
samples/kobject/kset-example.c
··· 185 185 * release function, and the set of default attributes we want created 186 186 * whenever a kobject of this type is registered with the kernel. 187 187 */ 188 - static struct kobj_type foo_ktype = { 188 + static const struct kobj_type foo_ktype = { 189 189 .sysfs_ops = &foo_sysfs_ops, 190 190 .release = foo_release, 191 191 .default_groups = foo_default_groups,
+1
scripts/const_structs.checkpatch
··· 35 35 kernel_param_ops 36 36 kgdb_arch 37 37 kgdb_io 38 + kobj_type 38 39 kset_uevent_ops 39 40 lock_manager_operations 40 41 machine_desc
+3 -3
sound/aoa/soundbus/core.c
··· 55 55 } 56 56 57 57 58 - static int soundbus_uevent(struct device *dev, struct kobj_uevent_env *env) 58 + static int soundbus_uevent(const struct device *dev, struct kobj_uevent_env *env) 59 59 { 60 - struct soundbus_dev * soundbus_dev; 61 - struct platform_device * of; 60 + const struct soundbus_dev * soundbus_dev; 61 + const struct platform_device * of; 62 62 const char *compat; 63 63 int retval = 0; 64 64 int cplen, seen = 0;
+1 -1
sound/hda/hda_bus_type.c
··· 65 65 return 1; 66 66 } 67 67 68 - static int hda_uevent(struct device *dev, struct kobj_uevent_env *env) 68 + static int hda_uevent(const struct device *dev, struct kobj_uevent_env *env) 69 69 { 70 70 char modalias[32]; 71 71
+1 -1
sound/hda/hdac_device.c
··· 204 204 * 205 205 * Returns the size of string, like snprintf(), or a negative error code. 206 206 */ 207 - int snd_hdac_codec_modalias(struct hdac_device *codec, char *buf, size_t size) 207 + int snd_hdac_codec_modalias(const struct hdac_device *codec, char *buf, size_t size) 208 208 { 209 209 return scnprintf(buf, size, "hdaudio:v%08Xr%08Xa%02X\n", 210 210 codec->vendor_id, codec->revision_id, codec->type);