Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'driver-core-6.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core

Pull driver core updates from Greg KH:
"Here is the set of driver core and kernfs changes for 6.2-rc1.

The "big" change in here is the addition of a new macro,
container_of_const() that will preserve the "const-ness" of a pointer
passed into it.

The "problem" of the current container_of() macro is that if you pass
in a "const *", out of it can comes a non-const pointer unless you
specifically ask for it. For many usages, we want to preserve the
"const" attribute by using the same call. For a specific example, this
series changes the kobj_to_dev() macro to use it, allowing it to be
used no matter what the const value is. This prevents every subsystem
from having to declare 2 different individual macros (i.e.
kobj_const_to_dev() and kobj_to_dev()) and having the compiler enforce
the const value at build time, which having 2 macros would not do
either.

The driver for all of this have been discussions with the Rust kernel
developers as to how to properly mark driver core, and kobject,
objects as being "non-mutable". The changes to the kobject and driver
core in this pull request are the result of that, as there are lots of
paths where kobjects and device pointers are not modified at all, so
marking them as "const" allows the compiler to enforce this.

So, a nice side affect of the Rust development effort has been already
to clean up the driver core code to be more obvious about object
rules.

All of this has been bike-shedded in quite a lot of detail on lkml
with different names and implementations resulting in the tiny version
we have in here, much better than my original proposal. Lots of
subsystem maintainers have acked the changes as well.

Other than this change, included in here are smaller stuff like:

- kernfs fixes and updates to handle lock contention better

- vmlinux.lds.h fixes and updates

- sysfs and debugfs documentation updates

- device property updates

All of these have been in the linux-next tree for quite a while with
no problems"

* tag 'driver-core-6.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core: (58 commits)
device property: Fix documentation for fwnode_get_next_parent()
firmware_loader: fix up to_fw_sysfs() to preserve const
usb.h: take advantage of container_of_const()
device.h: move kobj_to_dev() to use container_of_const()
container_of: add container_of_const() that preserves const-ness of the pointer
driver core: fix up missed drivers/s390/char/hmcdrv_dev.c class.devnode() conversion.
driver core: fix up missed scsi/cxlflash class.devnode() conversion.
driver core: fix up some missing class.devnode() conversions.
driver core: make struct class.devnode() take a const *
driver core: make struct class.dev_uevent() take a const *
cacheinfo: Remove of_node_put() for fw_token
device property: Add a blank line in Kconfig of tests
device property: Rename goto label to be more precise
device property: Move PROPERTY_ENTRY_BOOL() a bit down
device property: Get rid of __PROPERTY_ENTRY_ARRAY_EL*SIZE*()
kernfs: fix all kernel-doc warnings and multiple typos
driver core: pass a const * into of_device_uevent()
kobject: kset_uevent_ops: make name() callback take a const *
kobject: kset_uevent_ops: make filter() callback take a const *
kobject: make kobject_namespace take a const *
...

+499 -505
+12
Documentation/ABI/testing/sysfs-kernel-cpu_byteorder
··· 1 + What: /sys/kernel/cpu_byteorder 2 + Date: February 2023 3 + KernelVersion: 6.2 4 + Contact: Thomas Weißschuh <linux@weissschuh.net> 5 + Description: 6 + The endianness of the running kernel. 7 + 8 + Access: Read 9 + 10 + Valid values: 11 + "little", "big" 12 + Users: util-linux
+1
Documentation/driver-api/driver-model/devres.rst
··· 365 365 devm_kmemdup() 366 366 devm_krealloc() 367 367 devm_kstrdup() 368 + devm_kstrdup_const() 368 369 devm_kvasprintf() 369 370 devm_kzalloc() 370 371
+1 -1
arch/powerpc/platforms/book3s/vas-api.c
··· 53 53 struct vas_window *txwin; 54 54 }; 55 55 56 - static char *coproc_devnode(struct device *dev, umode_t *mode) 56 + static char *coproc_devnode(const struct device *dev, umode_t *mode) 57 57 { 58 58 return kasprintf(GFP_KERNEL, "crypto/%s", dev_name(dev)); 59 59 }
+2 -2
arch/x86/kernel/cpu/resctrl/pseudo_lock.c
··· 1560 1560 .mmap = pseudo_lock_dev_mmap, 1561 1561 }; 1562 1562 1563 - static char *pseudo_lock_devnode(struct device *dev, umode_t *mode) 1563 + static char *pseudo_lock_devnode(const struct device *dev, umode_t *mode) 1564 1564 { 1565 - struct rdtgroup *rdtgrp; 1565 + const struct rdtgroup *rdtgrp; 1566 1566 1567 1567 rdtgrp = dev_get_drvdata(dev); 1568 1568 if (mode)
+1 -1
arch/x86/kernel/cpuid.c
··· 139 139 return 0; 140 140 } 141 141 142 - static char *cpuid_devnode(struct device *dev, umode_t *mode) 142 + static char *cpuid_devnode(const struct device *dev, umode_t *mode) 143 143 { 144 144 return kasprintf(GFP_KERNEL, "cpu/%u/cpuid", MINOR(dev->devt)); 145 145 }
+1 -1
arch/x86/kernel/msr.c
··· 250 250 return 0; 251 251 } 252 252 253 - static char *msr_devnode(struct device *dev, umode_t *mode) 253 + static char *msr_devnode(const struct device *dev, umode_t *mode) 254 254 { 255 255 return kasprintf(GFP_KERNEL, "cpu/%u/msr", MINOR(dev->devt)); 256 256 }
+1 -1
block/bsg.c
··· 235 235 } 236 236 EXPORT_SYMBOL_GPL(bsg_register_queue); 237 237 238 - static char *bsg_devnode(struct device *dev, umode_t *mode) 238 + static char *bsg_devnode(const struct device *dev, umode_t *mode) 239 239 { 240 240 return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev)); 241 241 }
+2 -2
block/genhd.c
··· 1189 1189 iput(disk->part0->bd_inode); /* frees the disk */ 1190 1190 } 1191 1191 1192 - static int block_uevent(struct device *dev, struct kobj_uevent_env *env) 1192 + static int block_uevent(const struct device *dev, struct kobj_uevent_env *env) 1193 1193 { 1194 - struct gendisk *disk = dev_to_disk(dev); 1194 + const struct gendisk *disk = dev_to_disk(dev); 1195 1195 1196 1196 return add_uevent_var(env, "DISKSEQ=%llu", disk->diskseq); 1197 1197 }
-1
drivers/base/base.h
··· 146 146 { 147 147 return drv->bus->match ? drv->bus->match(dev, drv) : 1; 148 148 } 149 - extern bool driver_allows_async_probing(struct device_driver *drv); 150 149 151 150 extern int driver_add_groups(struct device_driver *drv, 152 151 const struct attribute_group **groups);
+1 -1
drivers/base/bus.c
··· 163 163 .release = bus_release, 164 164 }; 165 165 166 - static int bus_uevent_filter(struct kobject *kobj) 166 + static int bus_uevent_filter(const struct kobject *kobj) 167 167 { 168 168 const struct kobj_type *ktype = get_ktype(kobj); 169 169
+11 -8
drivers/base/cacheinfo.c
··· 196 196 197 197 static int cache_setup_of_node(unsigned int cpu) 198 198 { 199 - struct device_node *np; 199 + struct device_node *np, *prev; 200 200 struct cacheinfo *this_leaf; 201 201 unsigned int index = 0; 202 202 ··· 206 206 return -ENOENT; 207 207 } 208 208 209 + prev = np; 210 + 209 211 while (index < cache_leaves(cpu)) { 210 212 this_leaf = per_cpu_cacheinfo_idx(cpu, index); 211 - if (this_leaf->level != 1) 213 + if (this_leaf->level != 1) { 212 214 np = of_find_next_cache_node(np); 213 - else 214 - np = of_node_get(np);/* cpu node itself */ 215 - if (!np) 216 - break; 215 + of_node_put(prev); 216 + prev = np; 217 + if (!np) 218 + break; 219 + } 217 220 cache_of_set_props(this_leaf, np); 218 221 this_leaf->fw_token = np; 219 222 index++; 220 223 } 224 + 225 + of_node_put(np); 221 226 222 227 if (index != cache_leaves(cpu)) /* not all OF nodes populated */ 223 228 return -ENOENT; ··· 317 312 cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map); 318 313 cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map); 319 314 } 320 - if (of_have_populated_dt()) 321 - of_node_put(this_leaf->fw_token); 322 315 } 323 316 } 324 317
+6 -1
drivers/base/class.c
··· 62 62 kfree(cp); 63 63 } 64 64 65 - static const struct kobj_ns_type_operations *class_child_ns_type(struct kobject *kobj) 65 + static const struct kobj_ns_type_operations *class_child_ns_type(const struct kobject *kobj) 66 66 { 67 67 struct subsys_private *cp = to_subsys_private(kobj); 68 68 struct class *class = cp->class; ··· 192 192 } 193 193 error = class_add_groups(class_get(cls), cls->class_groups); 194 194 class_put(cls); 195 + if (error) { 196 + kobject_del(&cp->subsys.kobj); 197 + kfree_const(cp->subsys.kobj.name); 198 + kfree(cp); 199 + } 195 200 return error; 196 201 } 197 202 EXPORT_SYMBOL_GPL(__class_register);
+14 -52
drivers/base/core.c
··· 14 14 #include <linux/err.h> 15 15 #include <linux/fwnode.h> 16 16 #include <linux/init.h> 17 + #include <linux/kstrtox.h> 17 18 #include <linux/module.h> 18 19 #include <linux/slab.h> 19 20 #include <linux/string.h> ··· 1629 1628 static bool fw_devlink_strict; 1630 1629 static int __init fw_devlink_strict_setup(char *arg) 1631 1630 { 1632 - return strtobool(arg, &fw_devlink_strict); 1631 + return kstrtobool(arg, &fw_devlink_strict); 1633 1632 } 1634 1633 early_param("fw_devlink.strict", fw_devlink_strict_setup); 1635 1634 ··· 2281 2280 { 2282 2281 struct dev_ext_attribute *ea = to_ext_attr(attr); 2283 2282 2284 - if (strtobool(buf, ea->var) < 0) 2283 + if (kstrtobool(buf, ea->var) < 0) 2285 2284 return -EINVAL; 2286 2285 2287 2286 return size; ··· 2335 2334 kfree(p); 2336 2335 } 2337 2336 2338 - static const void *device_namespace(struct kobject *kobj) 2337 + static const void *device_namespace(const struct kobject *kobj) 2339 2338 { 2340 - struct device *dev = kobj_to_dev(kobj); 2339 + const struct device *dev = kobj_to_dev(kobj); 2341 2340 const void *ns = NULL; 2342 2341 2343 2342 if (dev->class && dev->class->ns_type) ··· 2346 2345 return ns; 2347 2346 } 2348 2347 2349 - static void device_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid) 2348 + static void device_get_ownership(const struct kobject *kobj, kuid_t *uid, kgid_t *gid) 2350 2349 { 2351 - struct device *dev = kobj_to_dev(kobj); 2350 + const struct device *dev = kobj_to_dev(kobj); 2352 2351 2353 2352 if (dev->class && dev->class->get_ownership) 2354 2353 dev->class->get_ownership(dev, uid, gid); ··· 2362 2361 }; 2363 2362 2364 2363 2365 - static int dev_uevent_filter(struct kobject *kobj) 2364 + static int dev_uevent_filter(const struct kobject *kobj) 2366 2365 { 2367 2366 const struct kobj_type *ktype = get_ktype(kobj); 2368 2367 2369 2368 if (ktype == &device_ktype) { 2370 - struct device *dev = kobj_to_dev(kobj); 2369 + const struct device *dev = kobj_to_dev(kobj); 2371 2370 if (dev->bus) 2372 2371 return 1; 2373 2372 if (dev->class) ··· 2376 2375 return 0; 2377 2376 } 2378 2377 2379 - static const char *dev_uevent_name(struct kobject *kobj) 2378 + static const char *dev_uevent_name(const struct kobject *kobj) 2380 2379 { 2381 - struct device *dev = kobj_to_dev(kobj); 2380 + const struct device *dev = kobj_to_dev(kobj); 2382 2381 2383 2382 if (dev->bus) 2384 2383 return dev->bus->name; ··· 2535 2534 bool val; 2536 2535 int ret; 2537 2536 2538 - ret = strtobool(buf, &val); 2537 + ret = kstrtobool(buf, &val); 2539 2538 if (ret < 0) 2540 2539 return ret; 2541 2540 ··· 2585 2584 const struct attribute_group *group; 2586 2585 const struct attribute_group **groups; 2587 2586 }; 2588 - 2589 - static int devm_attr_group_match(struct device *dev, void *res, void *data) 2590 - { 2591 - return ((union device_attr_group_devres *)res)->group == data; 2592 - } 2593 2587 2594 2588 static void devm_attr_group_remove(struct device *dev, void *res) 2595 2589 { ··· 2637 2641 EXPORT_SYMBOL_GPL(devm_device_add_group); 2638 2642 2639 2643 /** 2640 - * devm_device_remove_group: remove a managed group from a device 2641 - * @dev: device to remove the group from 2642 - * @grp: group to remove 2643 - * 2644 - * This function removes a group of attributes from a device. The attributes 2645 - * previously have to have been created for this group, otherwise it will fail. 2646 - */ 2647 - void devm_device_remove_group(struct device *dev, 2648 - const struct attribute_group *grp) 2649 - { 2650 - WARN_ON(devres_release(dev, devm_attr_group_remove, 2651 - devm_attr_group_match, 2652 - /* cast away const */ (void *)grp)); 2653 - } 2654 - EXPORT_SYMBOL_GPL(devm_device_remove_group); 2655 - 2656 - /** 2657 2644 * devm_device_add_groups - create a bunch of managed attribute groups 2658 2645 * @dev: The device to create the group for 2659 2646 * @groups: The attribute groups to create, NULL terminated ··· 2671 2692 return 0; 2672 2693 } 2673 2694 EXPORT_SYMBOL_GPL(devm_device_add_groups); 2674 - 2675 - /** 2676 - * devm_device_remove_groups - remove a list of managed groups 2677 - * 2678 - * @dev: The device for the groups to be removed from 2679 - * @groups: NULL terminated list of groups to be removed 2680 - * 2681 - * If groups is not NULL, remove the specified groups from the device. 2682 - */ 2683 - void devm_device_remove_groups(struct device *dev, 2684 - const struct attribute_group **groups) 2685 - { 2686 - WARN_ON(devres_release(dev, devm_attr_groups_remove, 2687 - devm_attr_group_match, 2688 - /* cast away const */ (void *)groups)); 2689 - } 2690 - EXPORT_SYMBOL_GPL(devm_device_remove_groups); 2691 2695 2692 2696 static int device_add_attrs(struct device *dev) 2693 2697 { ··· 2986 3024 } 2987 3025 2988 3026 static const 2989 - struct kobj_ns_type_operations *class_dir_child_ns_type(struct kobject *kobj) 3027 + struct kobj_ns_type_operations *class_dir_child_ns_type(const struct kobject *kobj) 2990 3028 { 2991 - struct class_dir *dir = to_class_dir(kobj); 3029 + const struct class_dir *dir = to_class_dir(kobj); 2992 3030 return dir->class->ns_type; 2993 3031 } 2994 3032
+6 -2
drivers/base/dd.c
··· 843 843 } 844 844 __setup("driver_async_probe=", save_async_options); 845 845 846 - bool driver_allows_async_probing(struct device_driver *drv) 846 + static bool driver_allows_async_probing(struct device_driver *drv) 847 847 { 848 848 switch (drv->probe_type) { 849 849 case PROBE_PREFER_ASYNCHRONOUS: ··· 1162 1162 return 0; 1163 1163 } else if (ret < 0) { 1164 1164 dev_dbg(dev, "Bus failed to match device: %d\n", ret); 1165 - return ret; 1165 + /* 1166 + * Driver could not match with device, but may match with 1167 + * another device on the bus. 1168 + */ 1169 + return 0; 1166 1170 } /* ret > 0 means positive match */ 1167 1171 1168 1172 if (driver_allows_async_probing(drv)) {
+3
drivers/base/devres.c
··· 101 101 size, tot_size))) 102 102 return false; 103 103 104 + /* Actually allocate the full kmalloc bucket size. */ 105 + *tot_size = kmalloc_size_roundup(*tot_size); 106 + 104 107 return true; 105 108 } 106 109
+3 -3
drivers/base/firmware_loader/sysfs.c
··· 64 64 }; 65 65 ATTRIBUTE_GROUPS(firmware_class); 66 66 67 - static int do_firmware_uevent(struct fw_sysfs *fw_sysfs, struct kobj_uevent_env *env) 67 + static int do_firmware_uevent(const struct fw_sysfs *fw_sysfs, struct kobj_uevent_env *env) 68 68 { 69 69 if (add_uevent_var(env, "FIRMWARE=%s", fw_sysfs->fw_priv->fw_name)) 70 70 return -ENOMEM; ··· 76 76 return 0; 77 77 } 78 78 79 - static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env) 79 + static int firmware_uevent(const struct device *dev, struct kobj_uevent_env *env) 80 80 { 81 - struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); 81 + const struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); 82 82 int err = 0; 83 83 84 84 mutex_lock(&fw_lock);
+1 -5
drivers/base/firmware_loader/sysfs.h
··· 80 80 struct firmware *fw; 81 81 void *fw_upload_priv; 82 82 }; 83 - 84 - static inline struct fw_sysfs *to_fw_sysfs(struct device *dev) 85 - { 86 - return container_of(dev, struct fw_sysfs, dev); 87 - } 83 + #define to_fw_sysfs(__dev) container_of_const(__dev, struct fw_sysfs, dev) 88 84 89 85 void __fw_load_abort(struct fw_priv *fw_priv); 90 86
+2 -2
drivers/base/platform.c
··· 441 441 struct resource *r; 442 442 int ret; 443 443 444 - if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) { 445 - ret = of_irq_get_byname(dev->dev.of_node, name); 444 + if (!dev->dev.of_node || IS_ENABLED(CONFIG_OF_IRQ)) { 445 + ret = fwnode_irq_get_byname(dev_fwnode(&dev->dev), name); 446 446 if (ret > 0 || ret == -EPROBE_DEFER) 447 447 return ret; 448 448 }
+22 -14
drivers/base/property.c
··· 17 17 #include <linux/property.h> 18 18 #include <linux/phy.h> 19 19 20 - struct fwnode_handle *dev_fwnode(const struct device *dev) 20 + struct fwnode_handle *__dev_fwnode(struct device *dev) 21 21 { 22 22 return IS_ENABLED(CONFIG_OF) && dev->of_node ? 23 23 of_fwnode_handle(dev->of_node) : dev->fwnode; 24 24 } 25 - EXPORT_SYMBOL_GPL(dev_fwnode); 25 + EXPORT_SYMBOL_GPL(__dev_fwnode); 26 + 27 + const struct fwnode_handle *__dev_fwnode_const(const struct device *dev) 28 + { 29 + return IS_ENABLED(CONFIG_OF) && dev->of_node ? 30 + of_fwnode_handle(dev->of_node) : dev->fwnode; 31 + } 32 + EXPORT_SYMBOL_GPL(__dev_fwnode_const); 26 33 27 34 /** 28 35 * device_property_present - check if a property of a device is present ··· 482 475 483 476 ret = fwnode_property_read_string_array(fwnode, propname, values, nval); 484 477 if (ret < 0) 485 - goto out; 478 + goto out_free; 486 479 487 480 ret = match_string(values, nval, string); 488 481 if (ret < 0) 489 482 ret = -ENODATA; 490 - out: 483 + 484 + out_free: 491 485 kfree(values); 492 486 return ret; 493 487 } ··· 609 601 * node's parents. 610 602 * 611 603 * Returns a node pointer with refcount incremented, use 612 - * fwnode_handle_node() on it when done. 604 + * fwnode_handle_put() on it when done. 613 605 */ 614 606 struct fwnode_handle *fwnode_get_next_parent(struct fwnode_handle *fwnode) 615 607 { ··· 764 756 * @dev: Device to find the next child node for. 765 757 * @child: Handle to one of the device's child nodes or a null handle. 766 758 */ 767 - struct fwnode_handle *device_get_next_child_node(struct device *dev, 759 + struct fwnode_handle *device_get_next_child_node(const struct device *dev, 768 760 struct fwnode_handle *child) 769 761 { 770 762 const struct fwnode_handle *fwnode = dev_fwnode(dev); ··· 801 793 * @dev: Device to find the named child node for. 802 794 * @childname: String to match child node name against. 803 795 */ 804 - struct fwnode_handle *device_get_named_child_node(struct device *dev, 796 + struct fwnode_handle *device_get_named_child_node(const struct device *dev, 805 797 const char *childname) 806 798 { 807 799 return fwnode_get_named_child_node(dev_fwnode(dev), childname); ··· 860 852 * device_get_child_node_count - return the number of child nodes for device 861 853 * @dev: Device to cound the child nodes for 862 854 */ 863 - unsigned int device_get_child_node_count(struct device *dev) 855 + unsigned int device_get_child_node_count(const struct device *dev) 864 856 { 865 857 struct fwnode_handle *child; 866 858 unsigned int count = 0; ··· 872 864 } 873 865 EXPORT_SYMBOL_GPL(device_get_child_node_count); 874 866 875 - bool device_dma_supported(struct device *dev) 867 + bool device_dma_supported(const struct device *dev) 876 868 { 877 869 return fwnode_call_bool_op(dev_fwnode(dev), device_dma_supported); 878 870 } 879 871 EXPORT_SYMBOL_GPL(device_dma_supported); 880 872 881 - enum dev_dma_attr device_get_dma_attr(struct device *dev) 873 + enum dev_dma_attr device_get_dma_attr(const struct device *dev) 882 874 { 883 875 if (!fwnode_has_op(dev_fwnode(dev), device_get_dma_attr)) 884 876 return DEV_DMA_NOT_SUPPORTED; ··· 1214 1206 } 1215 1207 EXPORT_SYMBOL_GPL(device_get_match_data); 1216 1208 1217 - static unsigned int fwnode_graph_devcon_matches(struct fwnode_handle *fwnode, 1209 + static unsigned int fwnode_graph_devcon_matches(const struct fwnode_handle *fwnode, 1218 1210 const char *con_id, void *data, 1219 1211 devcon_match_fn_t match, 1220 1212 void **matches, ··· 1248 1240 return count; 1249 1241 } 1250 1242 1251 - static unsigned int fwnode_devcon_matches(struct fwnode_handle *fwnode, 1243 + static unsigned int fwnode_devcon_matches(const struct fwnode_handle *fwnode, 1252 1244 const char *con_id, void *data, 1253 1245 devcon_match_fn_t match, 1254 1246 void **matches, ··· 1290 1282 * device node. @match will be used to convert the connection description to 1291 1283 * data the caller is expecting to be returned. 1292 1284 */ 1293 - void *fwnode_connection_find_match(struct fwnode_handle *fwnode, 1285 + void *fwnode_connection_find_match(const struct fwnode_handle *fwnode, 1294 1286 const char *con_id, void *data, 1295 1287 devcon_match_fn_t match) 1296 1288 { ··· 1327 1319 * 1328 1320 * Return: Number of matches resolved, or negative errno. 1329 1321 */ 1330 - int fwnode_connection_find_matches(struct fwnode_handle *fwnode, 1322 + int fwnode_connection_find_matches(const struct fwnode_handle *fwnode, 1331 1323 const char *con_id, void *data, 1332 1324 devcon_match_fn_t match, 1333 1325 void **matches, unsigned int matches_len)
+1
drivers/base/test/Kconfig
··· 8 8 The module name will be test_async_driver_probe.ko 9 9 10 10 If unsure say N. 11 + 11 12 config DRIVER_PE_KUNIT_TEST 12 13 bool "KUnit Tests for property entry API" if !KUNIT_ALL_TESTS 13 14 depends on KUNIT=y
+1 -1
drivers/block/aoe/aoechr.c
··· 273 273 .llseek = noop_llseek, 274 274 }; 275 275 276 - static char *aoe_devnode(struct device *dev, umode_t *mode) 276 + static char *aoe_devnode(const struct device *dev, umode_t *mode) 277 277 { 278 278 return kasprintf(GFP_KERNEL, "etherd/%s", dev_name(dev)); 279 279 }
+1 -1
drivers/char/mem.c
··· 746 746 .llseek = noop_llseek, 747 747 }; 748 748 749 - static char *mem_devnode(struct device *dev, umode_t *mode) 749 + static char *mem_devnode(const struct device *dev, umode_t *mode) 750 750 { 751 751 if (mode && devlist[MINOR(dev->devt)].mode) 752 752 *mode = devlist[MINOR(dev->devt)].mode;
+2 -2
drivers/char/misc.c
··· 269 269 } 270 270 EXPORT_SYMBOL(misc_deregister); 271 271 272 - static char *misc_devnode(struct device *dev, umode_t *mode) 272 + static char *misc_devnode(const struct device *dev, umode_t *mode) 273 273 { 274 - struct miscdevice *c = dev_get_drvdata(dev); 274 + const struct miscdevice *c = dev_get_drvdata(dev); 275 275 276 276 if (mode && c->mode) 277 277 *mode = c->mode;
+1 -1
drivers/dma-buf/dma-buf-sysfs-stats.c
··· 132 132 133 133 134 134 /* Statistics files do not need to send uevents. */ 135 - static int dmabuf_sysfs_uevent_filter(struct kobject *kobj) 135 + static int dmabuf_sysfs_uevent_filter(const struct kobject *kobj) 136 136 { 137 137 return 0; 138 138 }
+1 -1
drivers/dma-buf/dma-heap.c
··· 301 301 return err_ret; 302 302 } 303 303 304 - static char *dma_heap_devnode(struct device *dev, umode_t *mode) 304 + static char *dma_heap_devnode(const struct device *dev, umode_t *mode) 305 305 { 306 306 return kasprintf(GFP_KERNEL, "dma_heap/%s", dev_name(dev)); 307 307 }
+1 -1
drivers/firmware/dmi-id.c
··· 155 155 NULL 156 156 }; 157 157 158 - static int dmi_dev_uevent(struct device *dev, struct kobj_uevent_env *env) 158 + static int dmi_dev_uevent(const struct device *dev, struct kobj_uevent_env *env) 159 159 { 160 160 ssize_t len; 161 161
+3 -3
drivers/gnss/core.c
··· 337 337 [GNSS_TYPE_MTK] = "MTK", 338 338 }; 339 339 340 - static const char *gnss_type_name(struct gnss_device *gdev) 340 + static const char *gnss_type_name(const struct gnss_device *gdev) 341 341 { 342 342 const char *name = NULL; 343 343 ··· 365 365 }; 366 366 ATTRIBUTE_GROUPS(gnss); 367 367 368 - static int gnss_uevent(struct device *dev, struct kobj_uevent_env *env) 368 + static int gnss_uevent(const struct device *dev, struct kobj_uevent_env *env) 369 369 { 370 - struct gnss_device *gdev = to_gnss_device(dev); 370 + const struct gnss_device *gdev = to_gnss_device(dev); 371 371 int ret; 372 372 373 373 ret = add_uevent_var(env, "GNSS_TYPE=%s", gnss_type_name(gdev));
+1 -1
drivers/gpu/drm/drm_sysfs.c
··· 91 91 static void drm_sysfs_acpi_unregister(void) { } 92 92 #endif 93 93 94 - static char *drm_devnode(struct device *dev, umode_t *mode) 94 + static char *drm_devnode(const struct device *dev, umode_t *mode) 95 95 { 96 96 return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev)); 97 97 }
+1 -1
drivers/hid/usbhid/hiddev.c
··· 857 857 .llseek = noop_llseek, 858 858 }; 859 859 860 - static char *hiddev_devnode(struct device *dev, umode_t *mode) 860 + static char *hiddev_devnode(const struct device *dev, umode_t *mode) 861 861 { 862 862 return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev)); 863 863 }
+3 -3
drivers/infiniband/core/device.c
··· 511 511 kfree_rcu(dev, rcu_head); 512 512 } 513 513 514 - static int ib_device_uevent(struct device *device, 514 + static int ib_device_uevent(const struct device *device, 515 515 struct kobj_uevent_env *env) 516 516 { 517 517 if (add_uevent_var(env, "NAME=%s", dev_name(device))) ··· 524 524 return 0; 525 525 } 526 526 527 - static const void *net_namespace(struct device *d) 527 + static const void *net_namespace(const struct device *d) 528 528 { 529 - struct ib_core_device *coredev = 529 + const struct ib_core_device *coredev = 530 530 container_of(d, struct ib_core_device, dev); 531 531 532 532 return read_pnet(&coredev->rdma_net);
+1 -1
drivers/infiniband/core/user_mad.c
··· 1224 1224 }; 1225 1225 ATTRIBUTE_GROUPS(umad_class_dev); 1226 1226 1227 - static char *umad_devnode(struct device *dev, umode_t *mode) 1227 + static char *umad_devnode(const struct device *dev, umode_t *mode) 1228 1228 { 1229 1229 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); 1230 1230 }
+1 -1
drivers/infiniband/core/uverbs_main.c
··· 1237 1237 put_device(&uverbs_dev->dev); 1238 1238 } 1239 1239 1240 - static char *uverbs_devnode(struct device *dev, umode_t *mode) 1240 + static char *uverbs_devnode(const struct device *dev, umode_t *mode) 1241 1241 { 1242 1242 if (mode) 1243 1243 *mode = 0666;
+2 -2
drivers/infiniband/hw/hfi1/device.c
··· 72 72 return hfi1_class_name; 73 73 } 74 74 75 - static char *hfi1_devnode(struct device *dev, umode_t *mode) 75 + static char *hfi1_devnode(const struct device *dev, umode_t *mode) 76 76 { 77 77 if (mode) 78 78 *mode = 0600; ··· 85 85 return hfi1_class_name_user; 86 86 } 87 87 88 - static char *hfi1_user_devnode(struct device *dev, umode_t *mode) 88 + static char *hfi1_user_devnode(const struct device *dev, umode_t *mode) 89 89 { 90 90 if (mode) 91 91 *mode = 0666;
+1 -1
drivers/input/input.c
··· 1914 1914 #endif 1915 1915 }; 1916 1916 1917 - static char *input_devnode(struct device *dev, umode_t *mode) 1917 + static char *input_devnode(const struct device *dev, umode_t *mode) 1918 1918 { 1919 1919 return kasprintf(GFP_KERNEL, "input/%s", dev_name(dev)); 1920 1920 }
+2 -2
drivers/isdn/mISDN/core.c
··· 139 139 }; 140 140 ATTRIBUTE_GROUPS(mISDN); 141 141 142 - static int mISDN_uevent(struct device *dev, struct kobj_uevent_env *env) 142 + static int mISDN_uevent(const struct device *dev, struct kobj_uevent_env *env) 143 143 { 144 - struct mISDNdevice *mdev = dev_to_mISDN(dev); 144 + const struct mISDNdevice *mdev = dev_to_mISDN(dev); 145 145 146 146 if (!mdev) 147 147 return 0;
+4 -4
drivers/media/dvb-core/dvbdev.c
··· 1028 1028 EXPORT_SYMBOL_GPL(dvb_module_release); 1029 1029 #endif 1030 1030 1031 - static int dvb_uevent(struct device *dev, struct kobj_uevent_env *env) 1031 + static int dvb_uevent(const struct device *dev, struct kobj_uevent_env *env) 1032 1032 { 1033 - struct dvb_device *dvbdev = dev_get_drvdata(dev); 1033 + const struct dvb_device *dvbdev = dev_get_drvdata(dev); 1034 1034 1035 1035 add_uevent_var(env, "DVB_ADAPTER_NUM=%d", dvbdev->adapter->num); 1036 1036 add_uevent_var(env, "DVB_DEVICE_TYPE=%s", dnames[dvbdev->type]); ··· 1038 1038 return 0; 1039 1039 } 1040 1040 1041 - static char *dvb_devnode(struct device *dev, umode_t *mode) 1041 + static char *dvb_devnode(const struct device *dev, umode_t *mode) 1042 1042 { 1043 - struct dvb_device *dvbdev = dev_get_drvdata(dev); 1043 + const struct dvb_device *dvbdev = dev_get_drvdata(dev); 1044 1044 1045 1045 return kasprintf(GFP_KERNEL, "dvb/adapter%d/%s%d", 1046 1046 dvbdev->adapter->num, dnames[dvbdev->type], dvbdev->id);
+2 -2
drivers/media/pci/ddbridge/ddbridge-core.c
··· 2716 2716 .release = ddb_release, 2717 2717 }; 2718 2718 2719 - static char *ddb_devnode(struct device *device, umode_t *mode) 2719 + static char *ddb_devnode(const struct device *device, umode_t *mode) 2720 2720 { 2721 - struct ddb *dev = dev_get_drvdata(device); 2721 + const struct ddb *dev = dev_get_drvdata(device); 2722 2722 2723 2723 return kasprintf(GFP_KERNEL, "ddbridge/card%d", dev->nr); 2724 2724 }
+1 -1
drivers/media/rc/rc-main.c
··· 1017 1017 } 1018 1018 1019 1019 /* class for /sys/class/rc */ 1020 - static char *rc_devnode(struct device *dev, umode_t *mode) 1020 + static char *rc_devnode(const struct device *dev, umode_t *mode) 1021 1021 { 1022 1022 return kasprintf(GFP_KERNEL, "rc/%s", dev_name(dev)); 1023 1023 }
+4 -12
drivers/mfd/vexpress-sysreg.c
··· 61 61 .name = "basic-mmio-gpio", 62 62 .of_compatible = "arm,vexpress-sysreg,sys_led", 63 63 .num_resources = 1, 64 - .resources = (struct resource []) { 65 - DEFINE_RES_MEM_NAMED(SYS_LED, 0x4, "dat"), 66 - }, 64 + .resources = &DEFINE_RES_MEM_NAMED(SYS_LED, 0x4, "dat"), 67 65 .platform_data = &vexpress_sysreg_sys_led_pdata, 68 66 .pdata_size = sizeof(vexpress_sysreg_sys_led_pdata), 69 67 }, { 70 68 .name = "basic-mmio-gpio", 71 69 .of_compatible = "arm,vexpress-sysreg,sys_mci", 72 70 .num_resources = 1, 73 - .resources = (struct resource []) { 74 - DEFINE_RES_MEM_NAMED(SYS_MCI, 0x4, "dat"), 75 - }, 71 + .resources = &DEFINE_RES_MEM_NAMED(SYS_MCI, 0x4, "dat"), 76 72 .platform_data = &vexpress_sysreg_sys_mci_pdata, 77 73 .pdata_size = sizeof(vexpress_sysreg_sys_mci_pdata), 78 74 }, { 79 75 .name = "basic-mmio-gpio", 80 76 .of_compatible = "arm,vexpress-sysreg,sys_flash", 81 77 .num_resources = 1, 82 - .resources = (struct resource []) { 83 - DEFINE_RES_MEM_NAMED(SYS_FLASH, 0x4, "dat"), 84 - }, 78 + .resources = &DEFINE_RES_MEM_NAMED(SYS_FLASH, 0x4, "dat"), 85 79 .platform_data = &vexpress_sysreg_sys_flash_pdata, 86 80 .pdata_size = sizeof(vexpress_sysreg_sys_flash_pdata), 87 81 }, { 88 82 .name = "vexpress-syscfg", 89 83 .num_resources = 1, 90 - .resources = (struct resource []) { 91 - DEFINE_RES_MEM(SYS_MISC, 0x4c), 92 - }, 84 + .resources = &DEFINE_RES_MEM(SYS_MISC, 0x4c), 93 85 } 94 86 }; 95 87
+1 -1
drivers/misc/cxl/file.c
··· 546 546 }; 547 547 548 548 549 - static char *cxl_devnode(struct device *dev, umode_t *mode) 549 + static char *cxl_devnode(const struct device *dev, umode_t *mode) 550 550 { 551 551 if (cpu_has_feature(CPU_FTR_HVMODE) && 552 552 CXL_DEVT_IS_CARD(dev->devt)) {
+1 -1
drivers/misc/genwqe/card_base.c
··· 1349 1349 * Default mode should be rw for everybody. Do not change default 1350 1350 * device name. 1351 1351 */ 1352 - static char *genwqe_devnode(struct device *dev, umode_t *mode) 1352 + static char *genwqe_devnode(const struct device *dev, umode_t *mode) 1353 1353 { 1354 1354 if (mode) 1355 1355 *mode = 0666;
+1 -1
drivers/misc/ocxl/file.c
··· 584 584 device_unregister(&info->dev); 585 585 } 586 586 587 - static char *ocxl_devnode(struct device *dev, umode_t *mode) 587 + static char *ocxl_devnode(const struct device *dev, umode_t *mode) 588 588 { 589 589 return kasprintf(GFP_KERNEL, "ocxl/%s", dev_name(dev)); 590 590 }
+2 -2
drivers/net/ipvlan/ipvtap.c
··· 30 30 static dev_t ipvtap_major; 31 31 static struct cdev ipvtap_cdev; 32 32 33 - static const void *ipvtap_net_namespace(struct device *d) 33 + static const void *ipvtap_net_namespace(const struct device *d) 34 34 { 35 - struct net_device *dev = to_net_dev(d->parent); 35 + const struct net_device *dev = to_net_dev(d->parent); 36 36 return dev_net(dev); 37 37 } 38 38
+2 -2
drivers/net/macvtap.c
··· 35 35 */ 36 36 static dev_t macvtap_major; 37 37 38 - static const void *macvtap_net_namespace(struct device *d) 38 + static const void *macvtap_net_namespace(const struct device *d) 39 39 { 40 - struct net_device *dev = to_net_dev(d->parent); 40 + const struct net_device *dev = to_net_dev(d->parent); 41 41 return dev_net(dev); 42 42 } 43 43
+2 -2
drivers/nvme/host/core.c
··· 4599 4599 } 4600 4600 EXPORT_SYMBOL_GPL(nvme_remove_namespaces); 4601 4601 4602 - static int nvme_class_uevent(struct device *dev, struct kobj_uevent_env *env) 4602 + static int nvme_class_uevent(const struct device *dev, struct kobj_uevent_env *env) 4603 4603 { 4604 - struct nvme_ctrl *ctrl = 4604 + const struct nvme_ctrl *ctrl = 4605 4605 container_of(dev, struct nvme_ctrl, ctrl_device); 4606 4606 struct nvmf_ctrl_options *opts = ctrl->opts; 4607 4607 int ret;
+3 -3
drivers/of/device.c
··· 332 332 333 333 /** 334 334 * of_device_uevent - Display OF related uevent information 335 - * @dev: Device to apply DMA configuration 336 - * @env: Kernel object's userspace event reference 335 + * @dev: Device to display the uevent information for 336 + * @env: Kernel object's userspace event reference to fill up 337 337 */ 338 - void of_device_uevent(struct device *dev, struct kobj_uevent_env *env) 338 + void of_device_uevent(const struct device *dev, struct kobj_uevent_env *env) 339 339 { 340 340 const char *compat, *type; 341 341 struct alias_prop *app;
+2 -2
drivers/pcmcia/cs.c
··· 810 810 EXPORT_SYMBOL(pcmcia_reset_card); 811 811 812 812 813 - static int pcmcia_socket_uevent(struct device *dev, 813 + static int pcmcia_socket_uevent(const struct device *dev, 814 814 struct kobj_uevent_env *env) 815 815 { 816 - struct pcmcia_socket *s = container_of(dev, struct pcmcia_socket, dev); 816 + const struct pcmcia_socket *s = container_of(dev, struct pcmcia_socket, dev); 817 817 818 818 if (add_uevent_var(env, "SOCKET_NO=%u", s->sock)) 819 819 return -ENOMEM;
+1 -1
drivers/power/supply/power_supply.h
··· 16 16 #ifdef CONFIG_SYSFS 17 17 18 18 extern void power_supply_init_attrs(struct device_type *dev_type); 19 - extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env); 19 + extern int power_supply_uevent(const struct device *dev, struct kobj_uevent_env *env); 20 20 21 21 #else 22 22
+4 -4
drivers/power/supply/power_supply_sysfs.c
··· 427 427 } 428 428 } 429 429 430 - static int add_prop_uevent(struct device *dev, struct kobj_uevent_env *env, 430 + static int add_prop_uevent(const struct device *dev, struct kobj_uevent_env *env, 431 431 enum power_supply_property prop, char *prop_buf) 432 432 { 433 433 int ret = 0; ··· 438 438 pwr_attr = &power_supply_attrs[prop]; 439 439 dev_attr = &pwr_attr->dev_attr; 440 440 441 - ret = power_supply_show_property(dev, dev_attr, prop_buf); 441 + ret = power_supply_show_property((struct device *)dev, dev_attr, prop_buf); 442 442 if (ret == -ENODEV || ret == -ENODATA) { 443 443 /* 444 444 * When a battery is absent, we expect -ENODEV. Don't abort; ··· 458 458 pwr_attr->prop_name, prop_buf); 459 459 } 460 460 461 - int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env) 461 + int power_supply_uevent(const struct device *dev, struct kobj_uevent_env *env) 462 462 { 463 - struct power_supply *psy = dev_get_drvdata(dev); 463 + const struct power_supply *psy = dev_get_drvdata(dev); 464 464 int ret = 0, j; 465 465 char *prop_buf; 466 466
+1 -1
drivers/s390/char/hmcdrv_dev.c
··· 90 90 * 91 91 * Return: recommended device file name in /dev 92 92 */ 93 - static char *hmcdrv_dev_name(struct device *dev, umode_t *mode) 93 + static char *hmcdrv_dev_name(const struct device *dev, umode_t *mode) 94 94 { 95 95 char *nodename = NULL; 96 96 const char *devname = dev_name(dev); /* kernel device name */
+1 -1
drivers/scsi/cxlflash/main.c
··· 3857 3857 * 3858 3858 * Return: Allocated string describing the devtmpfs structure. 3859 3859 */ 3860 - static char *cxlflash_devnode(struct device *dev, umode_t *mode) 3860 + static char *cxlflash_devnode(const struct device *dev, umode_t *mode) 3861 3861 { 3862 3862 return kasprintf(GFP_KERNEL, "cxlflash/%s", dev_name(dev)); 3863 3863 }
+1 -1
drivers/tty/tty_io.c
··· 3498 3498 *fops = tty_fops; 3499 3499 } 3500 3500 3501 - static char *tty_devnode(struct device *dev, umode_t *mode) 3501 + static char *tty_devnode(const struct device *dev, umode_t *mode) 3502 3502 { 3503 3503 if (!mode) 3504 3504 return NULL;
+1 -1
drivers/usb/class/usblp.c
··· 1090 1090 .llseek = noop_llseek, 1091 1091 }; 1092 1092 1093 - static char *usblp_devnode(struct device *dev, umode_t *mode) 1093 + static char *usblp_devnode(const struct device *dev, umode_t *mode) 1094 1094 { 1095 1095 return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev)); 1096 1096 }
+1 -1
drivers/usb/core/file.c
··· 62 62 struct class *class; 63 63 } *usb_class; 64 64 65 - static char *usb_devnode(struct device *dev, umode_t *mode) 65 + static char *usb_devnode(const struct device *dev, umode_t *mode) 66 66 { 67 67 struct usb_class_driver *drv; 68 68
+2 -2
drivers/usb/gadget/udc/core.c
··· 1723 1723 NULL, 1724 1724 }; 1725 1725 1726 - static int usb_udc_uevent(struct device *dev, struct kobj_uevent_env *env) 1726 + static int usb_udc_uevent(const struct device *dev, struct kobj_uevent_env *env) 1727 1727 { 1728 - struct usb_udc *udc = container_of(dev, struct usb_udc, dev); 1728 + const struct usb_udc *udc = container_of(dev, struct usb_udc, dev); 1729 1729 int ret; 1730 1730 1731 1731 ret = add_uevent_var(env, "USB_UDC_NAME=%s", udc->gadget->name);
+1 -1
drivers/usb/misc/iowarrior.c
··· 717 717 .llseek = noop_llseek, 718 718 }; 719 719 720 - static char *iowarrior_devnode(struct device *dev, umode_t *mode) 720 + static char *iowarrior_devnode(const struct device *dev, umode_t *mode) 721 721 { 722 722 return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev)); 723 723 }
+1 -1
drivers/usb/misc/legousbtower.c
··· 245 245 .llseek = tower_llseek, 246 246 }; 247 247 248 - static char *legousbtower_devnode(struct device *dev, umode_t *mode) 248 + static char *legousbtower_devnode(const struct device *dev, umode_t *mode) 249 249 { 250 250 return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev)); 251 251 }
+1 -1
drivers/usb/roles/class.c
··· 87 87 } 88 88 EXPORT_SYMBOL_GPL(usb_role_switch_get_role); 89 89 90 - static void *usb_role_switch_match(struct fwnode_handle *fwnode, const char *id, 90 + static void *usb_role_switch_match(const struct fwnode_handle *fwnode, const char *id, 91 91 void *data) 92 92 { 93 93 struct device *dev;
+4 -4
drivers/usb/typec/mux.c
··· 32 32 return device_match_fwnode(dev, fwnode); 33 33 } 34 34 35 - static void *typec_switch_match(struct fwnode_handle *fwnode, const char *id, 36 - void *data) 35 + static void *typec_switch_match(const struct fwnode_handle *fwnode, 36 + const char *id, void *data) 37 37 { 38 38 struct device *dev; 39 39 ··· 262 262 return device_match_fwnode(dev, fwnode); 263 263 } 264 264 265 - static void *typec_mux_match(struct fwnode_handle *fwnode, const char *id, 266 - void *data) 265 + static void *typec_mux_match(const struct fwnode_handle *fwnode, 266 + const char *id, void *data) 267 267 { 268 268 const struct typec_altmode_desc *desc = data; 269 269 struct device *dev;
+1 -1
drivers/usb/typec/retimer.c
··· 22 22 return is_typec_retimer(dev) && device_match_fwnode(dev, fwnode); 23 23 } 24 24 25 - static void *typec_retimer_match(struct fwnode_handle *fwnode, const char *id, void *data) 25 + static void *typec_retimer_match(const struct fwnode_handle *fwnode, const char *id, void *data) 26 26 { 27 27 struct device *dev; 28 28
+1 -1
drivers/vdpa/vdpa_user/vduse_dev.c
··· 1656 1656 .llseek = noop_llseek, 1657 1657 }; 1658 1658 1659 - static char *vduse_devnode(struct device *dev, umode_t *mode) 1659 + static char *vduse_devnode(const struct device *dev, umode_t *mode) 1660 1660 { 1661 1661 return kasprintf(GFP_KERNEL, "vduse/%s", dev_name(dev)); 1662 1662 }
+1 -1
drivers/vfio/group.c
··· 827 827 } 828 828 EXPORT_SYMBOL_GPL(vfio_file_has_dev); 829 829 830 - static char *vfio_devnode(struct device *dev, umode_t *mode) 830 + static char *vfio_devnode(const struct device *dev, umode_t *mode) 831 831 { 832 832 return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev)); 833 833 }
+67 -39
fs/kernfs/dir.c
··· 125 125 * kn_to: /n1/n2/n3 [depth=3] 126 126 * result: /../.. 127 127 * 128 - * [3] when @kn_to is NULL result will be "(null)" 128 + * [3] when @kn_to is %NULL result will be "(null)" 129 129 * 130 - * Returns the length of the full path. If the full length is equal to or 130 + * Return: the length of the full path. If the full length is equal to or 131 131 * greater than @buflen, @buf contains the truncated path with the trailing 132 132 * '\0'. On error, -errno is returned. 133 133 */ ··· 185 185 * @buflen: size of @buf 186 186 * 187 187 * Copies the name of @kn into @buf of @buflen bytes. The behavior is 188 - * similar to strlcpy(). It returns the length of @kn's name and if @buf 189 - * isn't long enough, it's filled upto @buflen-1 and nul terminated. 188 + * similar to strlcpy(). 190 189 * 191 - * Fills buffer with "(null)" if @kn is NULL. 190 + * Fills buffer with "(null)" if @kn is %NULL. 191 + * 192 + * Return: the length of @kn's name and if @buf isn't long enough, 193 + * it's filled up to @buflen-1 and nul terminated. 192 194 * 193 195 * This function can be called from any context. 194 196 */ ··· 217 215 * path (which includes '..'s) as needed to reach from @from to @to is 218 216 * returned. 219 217 * 220 - * Returns the length of the full path. If the full length is equal to or 218 + * Return: the length of the full path. If the full length is equal to or 221 219 * greater than @buflen, @buf contains the truncated path with the trailing 222 220 * '\0'. On error, -errno is returned. 223 221 */ ··· 289 287 * 290 288 * Determines @kn's parent, pins and returns it. This function can be 291 289 * called from any context. 290 + * 291 + * Return: parent node of @kn 292 292 */ 293 293 struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn) 294 294 { ··· 306 302 } 307 303 308 304 /** 309 - * kernfs_name_hash 305 + * kernfs_name_hash - calculate hash of @ns + @name 310 306 * @name: Null terminated string to hash 311 307 * @ns: Namespace tag to hash 312 308 * 313 - * Returns 31 bit hash of ns + name (so it fits in an off_t ) 309 + * Return: 31-bit hash of ns + name (so it fits in an off_t) 314 310 */ 315 311 static unsigned int kernfs_name_hash(const char *name, const void *ns) 316 312 { ··· 358 354 * Locking: 359 355 * kernfs_rwsem held exclusive 360 356 * 361 - * RETURNS: 362 - * 0 on susccess -EEXIST on failure. 357 + * Return: 358 + * %0 on success, -EEXIST on failure. 363 359 */ 364 360 static int kernfs_link_sibling(struct kernfs_node *kn) 365 361 { ··· 398 394 * @kn: kernfs_node of interest 399 395 * 400 396 * Try to unlink @kn from its sibling rbtree which starts from 401 - * kn->parent->dir.children. Returns %true if @kn was actually 402 - * removed, %false if @kn wasn't on the rbtree. 397 + * kn->parent->dir.children. 398 + * 399 + * Return: %true if @kn was actually removed, 400 + * %false if @kn wasn't on the rbtree. 403 401 * 404 402 * Locking: 405 403 * kernfs_rwsem held exclusive ··· 425 419 * @kn: kernfs_node to get an active reference to 426 420 * 427 421 * Get an active reference of @kn. This function is noop if @kn 428 - * is NULL. 422 + * is %NULL. 429 423 * 430 - * RETURNS: 431 - * Pointer to @kn on success, NULL on failure. 424 + * Return: 425 + * Pointer to @kn on success, %NULL on failure. 432 426 */ 433 427 struct kernfs_node *kernfs_get_active(struct kernfs_node *kn) 434 428 { ··· 448 442 * @kn: kernfs_node to put an active reference to 449 443 * 450 444 * Put an active reference to @kn. This function is noop if @kn 451 - * is NULL. 445 + * is %NULL. 452 446 */ 453 447 void kernfs_put_active(struct kernfs_node *kn) 454 448 { ··· 470 464 * kernfs_drain - drain kernfs_node 471 465 * @kn: kernfs_node to drain 472 466 * 473 - * Drain existing usages and nuke all existing mmaps of @kn. Mutiple 467 + * Drain existing usages and nuke all existing mmaps of @kn. Multiple 474 468 * removers may invoke this function concurrently on @kn and all will 475 469 * return after draining is complete. 476 470 */ ··· 583 577 * kernfs_node_from_dentry - determine kernfs_node associated with a dentry 584 578 * @dentry: the dentry in question 585 579 * 586 - * Return the kernfs_node associated with @dentry. If @dentry is not a 580 + * Return: the kernfs_node associated with @dentry. If @dentry is not a 587 581 * kernfs one, %NULL is returned. 588 582 * 589 583 * While the returned kernfs_node will stay accessible as long as @dentry ··· 690 684 * @id's lower 32bits encode ino and upper gen. If the gen portion is 691 685 * zero, all generations are matched. 692 686 * 693 - * RETURNS: 694 - * NULL on failure. Return a kernfs node with reference counter incremented 687 + * Return: %NULL on failure, 688 + * otherwise a kernfs node with reference counter incremented. 695 689 */ 696 690 struct kernfs_node *kernfs_find_and_get_node_by_id(struct kernfs_root *root, 697 691 u64 id) ··· 739 733 * function increments nlink of the parent's inode if @kn is a 740 734 * directory and link into the children list of the parent. 741 735 * 742 - * RETURNS: 743 - * 0 on success, -EEXIST if entry with the given name already 736 + * Return: 737 + * %0 on success, -EEXIST if entry with the given name already 744 738 * exists. 745 739 */ 746 740 int kernfs_add_one(struct kernfs_node *kn) ··· 803 797 * @name: name to look for 804 798 * @ns: the namespace tag to use 805 799 * 806 - * Look for kernfs_node with name @name under @parent. Returns pointer to 807 - * the found kernfs_node on success, %NULL on failure. 800 + * Look for kernfs_node with name @name under @parent. 801 + * 802 + * Return: pointer to the found kernfs_node on success, %NULL on failure. 808 803 */ 809 804 static struct kernfs_node *kernfs_find_ns(struct kernfs_node *parent, 810 805 const unsigned char *name, ··· 878 871 * @ns: the namespace tag to use 879 872 * 880 873 * Look for kernfs_node with name @name under @parent and get a reference 881 - * if found. This function may sleep and returns pointer to the found 882 - * kernfs_node on success, %NULL on failure. 874 + * if found. This function may sleep. 875 + * 876 + * Return: pointer to the found kernfs_node on success, %NULL on failure. 883 877 */ 884 878 struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent, 885 879 const char *name, const void *ns) ··· 904 896 * @ns: the namespace tag to use 905 897 * 906 898 * Look for kernfs_node with path @path under @parent and get a reference 907 - * if found. This function may sleep and returns pointer to the found 908 - * kernfs_node on success, %NULL on failure. 899 + * if found. This function may sleep. 900 + * 901 + * Return: pointer to the found kernfs_node on success, %NULL on failure. 909 902 */ 910 903 struct kernfs_node *kernfs_walk_and_get_ns(struct kernfs_node *parent, 911 904 const char *path, const void *ns) ··· 928 919 * @flags: KERNFS_ROOT_* flags 929 920 * @priv: opaque data associated with the new directory 930 921 * 931 - * Returns the root of the new hierarchy on success, ERR_PTR() value on 922 + * Return: the root of the new hierarchy on success, ERR_PTR() value on 932 923 * failure. 933 924 */ 934 925 struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops, ··· 1000 991 /** 1001 992 * kernfs_root_to_node - return the kernfs_node associated with a kernfs_root 1002 993 * @root: root to use to lookup 994 + * 995 + * Return: @root's kernfs_node 1003 996 */ 1004 997 struct kernfs_node *kernfs_root_to_node(struct kernfs_root *root) 1005 998 { ··· 1018 1007 * @priv: opaque data associated with the new directory 1019 1008 * @ns: optional namespace tag of the directory 1020 1009 * 1021 - * Returns the created node on success, ERR_PTR() value on failure. 1010 + * Return: the created node on success, ERR_PTR() value on failure. 1022 1011 */ 1023 1012 struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent, 1024 1013 const char *name, umode_t mode, ··· 1052 1041 * @parent: parent in which to create a new directory 1053 1042 * @name: name of the new directory 1054 1043 * 1055 - * Returns the created node on success, ERR_PTR() value on failure. 1044 + * Return: the created node on success, ERR_PTR() value on failure. 1056 1045 */ 1057 1046 struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent, 1058 1047 const char *name) ··· 1094 1083 1095 1084 /* If the kernfs parent node has changed discard and 1096 1085 * proceed to ->lookup. 1086 + * 1087 + * There's nothing special needed here when getting the 1088 + * dentry parent, even if a concurrent rename is in 1089 + * progress. That's because the dentry is negative so 1090 + * it can only be the target of the rename and it will 1091 + * be doing a d_move() not a replace. Consequently the 1092 + * dentry d_parent won't change over the d_move(). 1093 + * 1094 + * Also kernfs negative dentries transitioning from 1095 + * negative to positive during revalidate won't happen 1096 + * because they are invalidated on containing directory 1097 + * changes and the lookup re-done so that a new positive 1098 + * dentry can be properly created. 1097 1099 */ 1098 - spin_lock(&dentry->d_lock); 1100 + root = kernfs_root_from_sb(dentry->d_sb); 1101 + down_read(&root->kernfs_rwsem); 1099 1102 parent = kernfs_dentry_node(dentry->d_parent); 1100 1103 if (parent) { 1101 - spin_unlock(&dentry->d_lock); 1102 - root = kernfs_root(parent); 1103 - down_read(&root->kernfs_rwsem); 1104 1104 if (kernfs_dir_changed(parent, dentry)) { 1105 1105 up_read(&root->kernfs_rwsem); 1106 1106 return 0; 1107 1107 } 1108 - up_read(&root->kernfs_rwsem); 1109 - } else 1110 - spin_unlock(&dentry->d_lock); 1108 + } 1109 + up_read(&root->kernfs_rwsem); 1111 1110 1112 1111 /* The kernfs parent node hasn't changed, leave the 1113 1112 * dentry negative and return success. ··· 1311 1290 * Find the next descendant to visit for post-order traversal of @root's 1312 1291 * descendants. @root is included in the iteration and the last node to be 1313 1292 * visited. 1293 + * 1294 + * Return: the next descendant to visit or %NULL when done. 1314 1295 */ 1315 1296 static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos, 1316 1297 struct kernfs_node *root) ··· 1576 1553 * the whole kernfs_ops which won the arbitration. This can be used to 1577 1554 * guarantee, for example, all concurrent writes to a "delete" file to 1578 1555 * finish only after the whole operation is complete. 1556 + * 1557 + * Return: %true if @kn is removed by this call, otherwise %false. 1579 1558 */ 1580 1559 bool kernfs_remove_self(struct kernfs_node *kn) 1581 1560 { ··· 1638 1613 * @ns: namespace tag of the kernfs_node to remove 1639 1614 * 1640 1615 * Look for the kernfs_node with @name and @ns under @parent and remove it. 1641 - * Returns 0 on success, -ENOENT if such entry doesn't exist. 1616 + * 1617 + * Return: %0 on success, -ENOENT if such entry doesn't exist. 1642 1618 */ 1643 1619 int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name, 1644 1620 const void *ns) ··· 1677 1651 * @new_parent: new parent to put @sd under 1678 1652 * @new_name: new name 1679 1653 * @new_ns: new namespace tag 1654 + * 1655 + * Return: %0 on success, -errno on failure. 1680 1656 */ 1681 1657 int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent, 1682 1658 const char *new_name, const void *new_ns)
+11 -7
fs/kernfs/file.c
··· 33 33 * pending queue is implemented as a singly linked list of kernfs_nodes. 34 34 * The list is terminated with the self pointer so that whether a 35 35 * kernfs_node is on the list or not can be determined by testing the next 36 - * pointer for NULL. 36 + * pointer for %NULL. 37 37 */ 38 38 #define KERNFS_NOTIFY_EOL ((void *)&kernfs_notify_list) 39 39 ··· 59 59 } 60 60 61 61 /** 62 - * of_on - Return the kernfs_open_node of the specified kernfs_open_file 63 - * @of: taret kernfs_open_file 62 + * of_on - Get the kernfs_open_node of the specified kernfs_open_file 63 + * @of: target kernfs_open_file 64 + * 65 + * Return: the kernfs_open_node of the kernfs_open_file 64 66 */ 65 67 static struct kernfs_open_node *of_on(struct kernfs_open_file *of) 66 68 { ··· 84 82 * outside RCU read-side critical section. 85 83 * 86 84 * The caller needs to make sure that kernfs_open_file_mutex is held. 85 + * 86 + * Return: @kn->attr.open when kernfs_open_file_mutex is held. 87 87 */ 88 88 static struct kernfs_open_node * 89 89 kernfs_deref_open_node_locked(struct kernfs_node *kn) ··· 552 548 * If @kn->attr.open exists, increment its reference count; otherwise, 553 549 * create one. @of is chained to the files list. 554 550 * 555 - * LOCKING: 551 + * Locking: 556 552 * Kernel thread context (may sleep). 557 553 * 558 - * RETURNS: 559 - * 0 on success, -errno on failure. 554 + * Return: 555 + * %0 on success, -errno on failure. 560 556 */ 561 557 static int kernfs_get_open_node(struct kernfs_node *kn, 562 558 struct kernfs_open_file *of) ··· 1028 1024 * @ns: optional namespace tag of the file 1029 1025 * @key: lockdep key for the file's active_ref, %NULL to disable lockdep 1030 1026 * 1031 - * Returns the created node on success, ERR_PTR() value on error. 1027 + * Return: the created node on success, ERR_PTR() value on error. 1032 1028 */ 1033 1029 struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent, 1034 1030 const char *name,
+4 -8
fs/kernfs/inode.c
··· 94 94 * @kn: target node 95 95 * @iattr: iattr to set 96 96 * 97 - * Returns 0 on success, -errno on failure. 97 + * Return: %0 on success, -errno on failure. 98 98 */ 99 99 int kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr) 100 100 { ··· 190 190 struct kernfs_root *root = kernfs_root(kn); 191 191 192 192 down_read(&root->kernfs_rwsem); 193 - spin_lock(&inode->i_lock); 194 193 kernfs_refresh_inode(kn, inode); 195 194 generic_fillattr(&init_user_ns, inode, stat); 196 - spin_unlock(&inode->i_lock); 197 195 up_read(&root->kernfs_rwsem); 198 196 199 197 return 0; ··· 239 241 * allocated and basics are initialized. New inode is returned 240 242 * locked. 241 243 * 242 - * LOCKING: 244 + * Locking: 243 245 * Kernel thread context (may sleep). 244 246 * 245 - * RETURNS: 246 - * Pointer to allocated inode on success, NULL on failure. 247 + * Return: 248 + * Pointer to allocated inode on success, %NULL on failure. 247 249 */ 248 250 struct inode *kernfs_get_inode(struct super_block *sb, struct kernfs_node *kn) 249 251 { ··· 286 288 root = kernfs_root(kn); 287 289 288 290 down_read(&root->kernfs_rwsem); 289 - spin_lock(&inode->i_lock); 290 291 kernfs_refresh_inode(kn, inode); 291 292 ret = generic_permission(&init_user_ns, inode, mask); 292 - spin_unlock(&inode->i_lock); 293 293 up_read(&root->kernfs_rwsem); 294 294 295 295 return ret;
+1 -1
fs/kernfs/kernfs-internal.h
··· 58 58 * kernfs_root - find out the kernfs_root a kernfs_node belongs to 59 59 * @kn: kernfs_node of interest 60 60 * 61 - * Return the kernfs_root @kn belongs to. 61 + * Return: the kernfs_root @kn belongs to. 62 62 */ 63 63 static inline struct kernfs_root *kernfs_root(struct kernfs_node *kn) 64 64 {
+7 -3
fs/kernfs/mount.c
··· 153 153 * kernfs_root_from_sb - determine kernfs_root associated with a super_block 154 154 * @sb: the super_block in question 155 155 * 156 - * Return the kernfs_root associated with @sb. If @sb is not a kernfs one, 156 + * Return: the kernfs_root associated with @sb. If @sb is not a kernfs one, 157 157 * %NULL is returned. 158 158 */ 159 159 struct kernfs_root *kernfs_root_from_sb(struct super_block *sb) ··· 167 167 * find the next ancestor in the path down to @child, where @parent was the 168 168 * ancestor whose descendant we want to find. 169 169 * 170 - * Say the path is /a/b/c/d. @child is d, @parent is NULL. We return the root 170 + * Say the path is /a/b/c/d. @child is d, @parent is %NULL. We return the root 171 171 * node. If @parent is b, then we return the node for c. 172 172 * Passing in d as @parent is not ok. 173 173 */ ··· 192 192 * kernfs_node_dentry - get a dentry for the given kernfs_node 193 193 * @kn: kernfs_node for which a dentry is needed 194 194 * @sb: the kernfs super_block 195 + * 196 + * Return: the dentry pointer 195 197 */ 196 198 struct dentry *kernfs_node_dentry(struct kernfs_node *kn, 197 199 struct super_block *sb) ··· 298 296 * kernfs_super_ns - determine the namespace tag of a kernfs super_block 299 297 * @sb: super_block of interest 300 298 * 301 - * Return the namespace tag associated with kernfs super_block @sb. 299 + * Return: the namespace tag associated with kernfs super_block @sb. 302 300 */ 303 301 const void *kernfs_super_ns(struct super_block *sb) 304 302 { ··· 315 313 * implementation, which should set the specified ->@fs_type and ->@flags, and 316 314 * specify the hierarchy and namespace tag to mount via ->@root and ->@ns, 317 315 * respectively. 316 + * 317 + * Return: %0 on success, -errno on failure. 318 318 */ 319 319 int kernfs_get_tree(struct fs_context *fc) 320 320 {
+1 -1
fs/kernfs/symlink.c
··· 19 19 * @name: name of the symlink 20 20 * @target: target node for the symlink to point to 21 21 * 22 - * Returns the created node on success, ERR_PTR() value on error. 22 + * Return: the created node on success, ERR_PTR() value on error. 23 23 * Ownership of the link matches ownership of the target. 24 24 */ 25 25 struct kernfs_node *kernfs_create_link(struct kernfs_node *parent,
+2 -2
fs/nfs/sysfs.c
··· 26 26 } 27 27 28 28 static const struct kobj_ns_type_operations *nfs_netns_object_child_ns_type( 29 - struct kobject *kobj) 29 + const struct kobject *kobj) 30 30 { 31 31 return &net_ns_type_operations; 32 32 } ··· 130 130 kfree(c); 131 131 } 132 132 133 - static const void *nfs_netns_client_namespace(struct kobject *kobj) 133 + static const void *nfs_netns_client_namespace(const struct kobject *kobj) 134 134 { 135 135 return container_of(kobj, struct nfs_netns_client, kobject)->net; 136 136 }
+1 -1
fs/pstore/pmsg.c
··· 46 46 #undef pr_fmt 47 47 #define pr_fmt(fmt) PMSG_NAME ": " fmt 48 48 49 - static char *pmsg_devnode(struct device *dev, umode_t *mode) 49 + static char *pmsg_devnode(const struct device *dev, umode_t *mode) 50 50 { 51 51 if (mode) 52 52 *mode = 0220;
+94 -140
include/asm-generic/vmlinux.lds.h
··· 199 199 # endif 200 200 #endif 201 201 202 + #define BOUNDED_SECTION_PRE_LABEL(_sec_, _label_, _BEGIN_, _END_) \ 203 + _BEGIN_##_label_ = .; \ 204 + KEEP(*(_sec_)) \ 205 + _END_##_label_ = .; 206 + 207 + #define BOUNDED_SECTION_POST_LABEL(_sec_, _label_, _BEGIN_, _END_) \ 208 + _label_##_BEGIN_ = .; \ 209 + KEEP(*(_sec_)) \ 210 + _label_##_END_ = .; 211 + 212 + #define BOUNDED_SECTION_BY(_sec_, _label_) \ 213 + BOUNDED_SECTION_PRE_LABEL(_sec_, _label_, __start, __stop) 214 + 215 + #define BOUNDED_SECTION(_sec) BOUNDED_SECTION_BY(_sec, _sec) 216 + 217 + #define HEADERED_SECTION_PRE_LABEL(_sec_, _label_, _BEGIN_, _END_, _HDR_) \ 218 + _HDR_##_label_ = .; \ 219 + KEEP(*(.gnu.linkonce.##_sec_)) \ 220 + BOUNDED_SECTION_PRE_LABEL(_sec_, _label_, _BEGIN_, _END_) 221 + 222 + #define HEADERED_SECTION_POST_LABEL(_sec_, _label_, _BEGIN_, _END_, _HDR_) \ 223 + _label_##_HDR_ = .; \ 224 + KEEP(*(.gnu.linkonce.##_sec_)) \ 225 + BOUNDED_SECTION_POST_LABEL(_sec_, _label_, _BEGIN_, _END_) 226 + 227 + #define HEADERED_SECTION_BY(_sec_, _label_) \ 228 + HEADERED_SECTION_PRE_LABEL(_sec_, _label_, __start, __stop) 229 + 230 + #define HEADERED_SECTION(_sec) HEADERED_SECTION_BY(_sec, _sec) 231 + 202 232 #ifdef CONFIG_TRACE_BRANCH_PROFILING 203 - #define LIKELY_PROFILE() __start_annotated_branch_profile = .; \ 204 - KEEP(*(_ftrace_annotated_branch)) \ 205 - __stop_annotated_branch_profile = .; 233 + #define LIKELY_PROFILE() \ 234 + BOUNDED_SECTION_BY(_ftrace_annotated_branch, _annotated_branch_profile) 206 235 #else 207 236 #define LIKELY_PROFILE() 208 237 #endif 209 238 210 239 #ifdef CONFIG_PROFILE_ALL_BRANCHES 211 - #define BRANCH_PROFILE() __start_branch_profile = .; \ 212 - KEEP(*(_ftrace_branch)) \ 213 - __stop_branch_profile = .; 240 + #define BRANCH_PROFILE() \ 241 + BOUNDED_SECTION_BY(_ftrace_branch, _branch_profile) 214 242 #else 215 243 #define BRANCH_PROFILE() 216 244 #endif 217 245 218 246 #ifdef CONFIG_KPROBES 219 - #define KPROBE_BLACKLIST() . = ALIGN(8); \ 220 - __start_kprobe_blacklist = .; \ 221 - KEEP(*(_kprobe_blacklist)) \ 222 - __stop_kprobe_blacklist = .; 247 + #define KPROBE_BLACKLIST() \ 248 + . = ALIGN(8); \ 249 + BOUNDED_SECTION(_kprobe_blacklist) 223 250 #else 224 251 #define KPROBE_BLACKLIST() 225 252 #endif 226 253 227 254 #ifdef CONFIG_FUNCTION_ERROR_INJECTION 228 - #define ERROR_INJECT_WHITELIST() STRUCT_ALIGN(); \ 229 - __start_error_injection_whitelist = .; \ 230 - KEEP(*(_error_injection_whitelist)) \ 231 - __stop_error_injection_whitelist = .; 255 + #define ERROR_INJECT_WHITELIST() \ 256 + STRUCT_ALIGN(); \ 257 + BOUNDED_SECTION(_error_injection_whitelist) 232 258 #else 233 259 #define ERROR_INJECT_WHITELIST() 234 260 #endif 235 261 236 262 #ifdef CONFIG_EVENT_TRACING 237 - #define FTRACE_EVENTS() . = ALIGN(8); \ 238 - __start_ftrace_events = .; \ 239 - KEEP(*(_ftrace_events)) \ 240 - __stop_ftrace_events = .; \ 241 - __start_ftrace_eval_maps = .; \ 242 - KEEP(*(_ftrace_eval_map)) \ 243 - __stop_ftrace_eval_maps = .; 263 + #define FTRACE_EVENTS() \ 264 + . = ALIGN(8); \ 265 + BOUNDED_SECTION(_ftrace_events) \ 266 + BOUNDED_SECTION_BY(_ftrace_eval_map, _ftrace_eval_maps) 244 267 #else 245 268 #define FTRACE_EVENTS() 246 269 #endif 247 270 248 271 #ifdef CONFIG_TRACING 249 - #define TRACE_PRINTKS() __start___trace_bprintk_fmt = .; \ 250 - KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \ 251 - __stop___trace_bprintk_fmt = .; 252 - #define TRACEPOINT_STR() __start___tracepoint_str = .; \ 253 - KEEP(*(__tracepoint_str)) /* Trace_printk fmt' pointer */ \ 254 - __stop___tracepoint_str = .; 272 + #define TRACE_PRINTKS() BOUNDED_SECTION_BY(__trace_printk_fmt, ___trace_bprintk_fmt) 273 + #define TRACEPOINT_STR() BOUNDED_SECTION_BY(__tracepoint_str, ___tracepoint_str) 255 274 #else 256 275 #define TRACE_PRINTKS() 257 276 #define TRACEPOINT_STR() 258 277 #endif 259 278 260 279 #ifdef CONFIG_FTRACE_SYSCALLS 261 - #define TRACE_SYSCALLS() . = ALIGN(8); \ 262 - __start_syscalls_metadata = .; \ 263 - KEEP(*(__syscalls_metadata)) \ 264 - __stop_syscalls_metadata = .; 280 + #define TRACE_SYSCALLS() \ 281 + . = ALIGN(8); \ 282 + BOUNDED_SECTION_BY(__syscalls_metadata, _syscalls_metadata) 265 283 #else 266 284 #define TRACE_SYSCALLS() 267 285 #endif 268 286 269 287 #ifdef CONFIG_BPF_EVENTS 270 - #define BPF_RAW_TP() STRUCT_ALIGN(); \ 271 - __start__bpf_raw_tp = .; \ 272 - KEEP(*(__bpf_raw_tp_map)) \ 273 - __stop__bpf_raw_tp = .; 288 + #define BPF_RAW_TP() STRUCT_ALIGN(); \ 289 + BOUNDED_SECTION_BY(__bpf_raw_tp_map, __bpf_raw_tp) 274 290 #else 275 291 #define BPF_RAW_TP() 276 292 #endif 277 293 278 294 #ifdef CONFIG_SERIAL_EARLYCON 279 - #define EARLYCON_TABLE() . = ALIGN(8); \ 280 - __earlycon_table = .; \ 281 - KEEP(*(__earlycon_table)) \ 282 - __earlycon_table_end = .; 295 + #define EARLYCON_TABLE() \ 296 + . = ALIGN(8); \ 297 + BOUNDED_SECTION_POST_LABEL(__earlycon_table, __earlycon_table, , _end) 283 298 #else 284 299 #define EARLYCON_TABLE() 285 300 #endif 286 301 287 302 #ifdef CONFIG_SECURITY 288 - #define LSM_TABLE() . = ALIGN(8); \ 289 - __start_lsm_info = .; \ 290 - KEEP(*(.lsm_info.init)) \ 291 - __end_lsm_info = .; 292 - #define EARLY_LSM_TABLE() . = ALIGN(8); \ 293 - __start_early_lsm_info = .; \ 294 - KEEP(*(.early_lsm_info.init)) \ 295 - __end_early_lsm_info = .; 303 + #define LSM_TABLE() \ 304 + . = ALIGN(8); \ 305 + BOUNDED_SECTION_PRE_LABEL(.lsm_info.init, _lsm_info, __start, __end) 306 + 307 + #define EARLY_LSM_TABLE() \ 308 + . = ALIGN(8); \ 309 + BOUNDED_SECTION_PRE_LABEL(.early_lsm_info.init, _early_lsm_info, __start, __end) 296 310 #else 297 311 #define LSM_TABLE() 298 312 #define EARLY_LSM_TABLE() ··· 332 318 #ifdef CONFIG_ACPI 333 319 #define ACPI_PROBE_TABLE(name) \ 334 320 . = ALIGN(8); \ 335 - __##name##_acpi_probe_table = .; \ 336 - KEEP(*(__##name##_acpi_probe_table)) \ 337 - __##name##_acpi_probe_table_end = .; 321 + BOUNDED_SECTION_POST_LABEL(__##name##_acpi_probe_table, \ 322 + __##name##_acpi_probe_table,, _end) 338 323 #else 339 324 #define ACPI_PROBE_TABLE(name) 340 325 #endif ··· 341 328 #ifdef CONFIG_THERMAL 342 329 #define THERMAL_TABLE(name) \ 343 330 . = ALIGN(8); \ 344 - __##name##_thermal_table = .; \ 345 - KEEP(*(__##name##_thermal_table)) \ 346 - __##name##_thermal_table_end = .; 331 + BOUNDED_SECTION_POST_LABEL(__##name##_thermal_table, \ 332 + __##name##_thermal_table,, _end) 347 333 #else 348 334 #define THERMAL_TABLE(name) 349 335 #endif ··· 372 360 *(__tracepoints) \ 373 361 /* implement dynamic printk debug */ \ 374 362 . = ALIGN(8); \ 375 - __start___dyndbg_classes = .; \ 376 - KEEP(*(__dyndbg_classes)) \ 377 - __stop___dyndbg_classes = .; \ 378 - __start___dyndbg = .; \ 379 - KEEP(*(__dyndbg)) \ 380 - __stop___dyndbg = .; \ 363 + BOUNDED_SECTION_BY(__dyndbg_classes, ___dyndbg_classes) \ 364 + BOUNDED_SECTION_BY(__dyndbg, ___dyndbg) \ 381 365 LIKELY_PROFILE() \ 382 366 BRANCH_PROFILE() \ 383 367 TRACE_PRINTKS() \ ··· 416 408 417 409 #define JUMP_TABLE_DATA \ 418 410 . = ALIGN(8); \ 419 - __start___jump_table = .; \ 420 - KEEP(*(__jump_table)) \ 421 - __stop___jump_table = .; 411 + BOUNDED_SECTION_BY(__jump_table, ___jump_table) 422 412 423 413 #ifdef CONFIG_HAVE_STATIC_CALL_INLINE 424 414 #define STATIC_CALL_DATA \ 425 415 . = ALIGN(8); \ 426 - __start_static_call_sites = .; \ 427 - KEEP(*(.static_call_sites)) \ 428 - __stop_static_call_sites = .; \ 429 - __start_static_call_tramp_key = .; \ 430 - KEEP(*(.static_call_tramp_key)) \ 431 - __stop_static_call_tramp_key = .; 416 + BOUNDED_SECTION_BY(.static_call_sites, _static_call_sites) \ 417 + BOUNDED_SECTION_BY(.static_call_tramp_key, _static_call_tramp_key) 432 418 #else 433 419 #define STATIC_CALL_DATA 434 420 #endif ··· 448 446 #ifdef CONFIG_ARCH_USES_CFI_TRAPS 449 447 #define KCFI_TRAPS \ 450 448 __kcfi_traps : AT(ADDR(__kcfi_traps) - LOAD_OFFSET) { \ 451 - __start___kcfi_traps = .; \ 452 - KEEP(*(.kcfi_traps)) \ 453 - __stop___kcfi_traps = .; \ 449 + BOUNDED_SECTION_BY(.kcfi_traps, ___kcfi_traps) \ 454 450 } 455 451 #else 456 452 #define KCFI_TRAPS ··· 466 466 SCHED_DATA \ 467 467 RO_AFTER_INIT_DATA /* Read only after init */ \ 468 468 . = ALIGN(8); \ 469 - __start___tracepoints_ptrs = .; \ 470 - KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \ 471 - __stop___tracepoints_ptrs = .; \ 469 + BOUNDED_SECTION_BY(__tracepoints_ptrs, ___tracepoints_ptrs) \ 472 470 *(__tracepoints_strings)/* Tracepoints: strings */ \ 473 471 } \ 474 472 \ ··· 476 478 \ 477 479 /* PCI quirks */ \ 478 480 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ 479 - __start_pci_fixups_early = .; \ 480 - KEEP(*(.pci_fixup_early)) \ 481 - __end_pci_fixups_early = .; \ 482 - __start_pci_fixups_header = .; \ 483 - KEEP(*(.pci_fixup_header)) \ 484 - __end_pci_fixups_header = .; \ 485 - __start_pci_fixups_final = .; \ 486 - KEEP(*(.pci_fixup_final)) \ 487 - __end_pci_fixups_final = .; \ 488 - __start_pci_fixups_enable = .; \ 489 - KEEP(*(.pci_fixup_enable)) \ 490 - __end_pci_fixups_enable = .; \ 491 - __start_pci_fixups_resume = .; \ 492 - KEEP(*(.pci_fixup_resume)) \ 493 - __end_pci_fixups_resume = .; \ 494 - __start_pci_fixups_resume_early = .; \ 495 - KEEP(*(.pci_fixup_resume_early)) \ 496 - __end_pci_fixups_resume_early = .; \ 497 - __start_pci_fixups_suspend = .; \ 498 - KEEP(*(.pci_fixup_suspend)) \ 499 - __end_pci_fixups_suspend = .; \ 500 - __start_pci_fixups_suspend_late = .; \ 501 - KEEP(*(.pci_fixup_suspend_late)) \ 502 - __end_pci_fixups_suspend_late = .; \ 481 + BOUNDED_SECTION_PRE_LABEL(.pci_fixup_early, _pci_fixups_early, __start, __end) \ 482 + BOUNDED_SECTION_PRE_LABEL(.pci_fixup_header, _pci_fixups_header, __start, __end) \ 483 + BOUNDED_SECTION_PRE_LABEL(.pci_fixup_final, _pci_fixups_final, __start, __end) \ 484 + BOUNDED_SECTION_PRE_LABEL(.pci_fixup_enable, _pci_fixups_enable, __start, __end) \ 485 + BOUNDED_SECTION_PRE_LABEL(.pci_fixup_resume, _pci_fixups_resume, __start, __end) \ 486 + BOUNDED_SECTION_PRE_LABEL(.pci_fixup_suspend, _pci_fixups_suspend, __start, __end) \ 487 + BOUNDED_SECTION_PRE_LABEL(.pci_fixup_resume_early, _pci_fixups_resume_early, __start, __end) \ 488 + BOUNDED_SECTION_PRE_LABEL(.pci_fixup_suspend_late, _pci_fixups_suspend_late, __start, __end) \ 503 489 } \ 504 490 \ 505 491 FW_LOADER_BUILT_IN_DATA \ ··· 533 551 \ 534 552 /* Built-in module parameters. */ \ 535 553 __param : AT(ADDR(__param) - LOAD_OFFSET) { \ 536 - __start___param = .; \ 537 - KEEP(*(__param)) \ 538 - __stop___param = .; \ 554 + BOUNDED_SECTION_BY(__param, ___param) \ 539 555 } \ 540 556 \ 541 557 /* Built-in module versions. */ \ 542 558 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \ 543 - __start___modver = .; \ 544 - KEEP(*(__modver)) \ 545 - __stop___modver = .; \ 559 + BOUNDED_SECTION_BY(__modver, ___modver) \ 546 560 } \ 547 561 \ 548 562 KCFI_TRAPS \ ··· 648 670 #define EXCEPTION_TABLE(align) \ 649 671 . = ALIGN(align); \ 650 672 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \ 651 - __start___ex_table = .; \ 652 - KEEP(*(__ex_table)) \ 653 - __stop___ex_table = .; \ 673 + BOUNDED_SECTION_BY(__ex_table, ___ex_table) \ 654 674 } 655 675 656 676 /* ··· 657 681 #ifdef CONFIG_DEBUG_INFO_BTF 658 682 #define BTF \ 659 683 .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { \ 660 - __start_BTF = .; \ 661 - KEEP(*(.BTF)) \ 662 - __stop_BTF = .; \ 684 + BOUNDED_SECTION_BY(.BTF, _BTF) \ 663 685 } \ 664 686 . = ALIGN(4); \ 665 687 .BTF_ids : AT(ADDR(.BTF_ids) - LOAD_OFFSET) { \ ··· 834 860 #define BUG_TABLE \ 835 861 . = ALIGN(8); \ 836 862 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ 837 - __start___bug_table = .; \ 838 - KEEP(*(__bug_table)) \ 839 - __stop___bug_table = .; \ 863 + BOUNDED_SECTION_BY(__bug_table, ___bug_table) \ 840 864 } 841 865 #else 842 866 #define BUG_TABLE ··· 844 872 #define ORC_UNWIND_TABLE \ 845 873 . = ALIGN(4); \ 846 874 .orc_unwind_ip : AT(ADDR(.orc_unwind_ip) - LOAD_OFFSET) { \ 847 - __start_orc_unwind_ip = .; \ 848 - KEEP(*(.orc_unwind_ip)) \ 849 - __stop_orc_unwind_ip = .; \ 875 + BOUNDED_SECTION_BY(.orc_unwind_ip, _orc_unwind_ip) \ 850 876 } \ 851 877 . = ALIGN(2); \ 852 878 .orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \ 853 - __start_orc_unwind = .; \ 854 - KEEP(*(.orc_unwind)) \ 855 - __stop_orc_unwind = .; \ 879 + BOUNDED_SECTION_BY(.orc_unwind, _orc_unwind) \ 856 880 } \ 857 881 text_size = _etext - _stext; \ 858 882 . = ALIGN(4); \ ··· 866 898 #ifdef CONFIG_FW_LOADER 867 899 #define FW_LOADER_BUILT_IN_DATA \ 868 900 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) ALIGN(8) { \ 869 - __start_builtin_fw = .; \ 870 - KEEP(*(.builtin_fw)) \ 871 - __end_builtin_fw = .; \ 901 + BOUNDED_SECTION_PRE_LABEL(.builtin_fw, _builtin_fw, __start, __end) \ 872 902 } 873 903 #else 874 904 #define FW_LOADER_BUILT_IN_DATA ··· 876 910 #define TRACEDATA \ 877 911 . = ALIGN(4); \ 878 912 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \ 879 - __tracedata_start = .; \ 880 - KEEP(*(.tracedata)) \ 881 - __tracedata_end = .; \ 913 + BOUNDED_SECTION_POST_LABEL(.tracedata, __tracedata, _start, _end) \ 882 914 } 883 915 #else 884 916 #define TRACEDATA ··· 885 921 #ifdef CONFIG_PRINTK_INDEX 886 922 #define PRINTK_INDEX \ 887 923 .printk_index : AT(ADDR(.printk_index) - LOAD_OFFSET) { \ 888 - __start_printk_index = .; \ 889 - *(.printk_index) \ 890 - __stop_printk_index = .; \ 924 + BOUNDED_SECTION_BY(.printk_index, _printk_index) \ 891 925 } 892 926 #else 893 927 #define PRINTK_INDEX ··· 893 931 894 932 #define NOTES \ 895 933 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ 896 - __start_notes = .; \ 897 - KEEP(*(.note.*)) \ 898 - __stop_notes = .; \ 934 + BOUNDED_SECTION_BY(.note.*, _notes) \ 899 935 } NOTES_HEADERS \ 900 936 NOTES_HEADERS_RESTORE 901 937 902 938 #define INIT_SETUP(initsetup_align) \ 903 939 . = ALIGN(initsetup_align); \ 904 - __setup_start = .; \ 905 - KEEP(*(.init.setup)) \ 906 - __setup_end = .; 940 + BOUNDED_SECTION_POST_LABEL(.init.setup, __setup, _start, _end) 907 941 908 942 #define INIT_CALLS_LEVEL(level) \ 909 943 __initcall##level##_start = .; \ ··· 921 963 __initcall_end = .; 922 964 923 965 #define CON_INITCALL \ 924 - __con_initcall_start = .; \ 925 - KEEP(*(.con_initcall.init)) \ 926 - __con_initcall_end = .; 966 + BOUNDED_SECTION_POST_LABEL(.con_initcall.init, __con_initcall, _start, _end) 927 967 928 968 /* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */ 929 969 #define KUNIT_TABLE() \ 930 970 . = ALIGN(8); \ 931 - __kunit_suites_start = .; \ 932 - KEEP(*(.kunit_test_suites)) \ 933 - __kunit_suites_end = .; 971 + BOUNDED_SECTION_POST_LABEL(.kunit_test_suites, __kunit_suites, _start, _end) 934 972 935 973 #ifdef CONFIG_BLK_DEV_INITRD 936 974 #define INIT_RAM_FS \
+11 -13
include/linux/container_of.h
··· 13 13 * @type: the type of the container struct this is embedded in. 14 14 * @member: the name of the member within the struct. 15 15 * 16 + * WARNING: any const qualifier of @ptr is lost. 16 17 */ 17 18 #define container_of(ptr, type, member) ({ \ 18 19 void *__mptr = (void *)(ptr); \ ··· 23 22 ((type *)(__mptr - offsetof(type, member))); }) 24 23 25 24 /** 26 - * container_of_safe - cast a member of a structure out to the containing structure 27 - * @ptr: the pointer to the member. 28 - * @type: the type of the container struct this is embedded in. 29 - * @member: the name of the member within the struct. 30 - * 31 - * If IS_ERR_OR_NULL(ptr), ptr is returned unchanged. 25 + * container_of_const - cast a member of a structure out to the containing 26 + * structure and preserve the const-ness of the pointer 27 + * @ptr: the pointer to the member 28 + * @type: the type of the container struct this is embedded in. 29 + * @member: the name of the member within the struct. 32 30 */ 33 - #define container_of_safe(ptr, type, member) ({ \ 34 - void *__mptr = (void *)(ptr); \ 35 - static_assert(__same_type(*(ptr), ((type *)0)->member) || \ 36 - __same_type(*(ptr), void), \ 37 - "pointer type mismatch in container_of_safe()"); \ 38 - IS_ERR_OR_NULL(__mptr) ? ERR_CAST(__mptr) : \ 39 - ((type *)(__mptr - offsetof(type, member))); }) 31 + #define container_of_const(ptr, type, member) \ 32 + _Generic(ptr, \ 33 + const typeof(*(ptr)) *: ((const type *)container_of(ptr, type, member)),\ 34 + default: ((type *)container_of(ptr, type, member)) \ 35 + ) 40 36 41 37 #endif /* _LINUX_CONTAINER_OF_H */
+1 -8
include/linux/device.h
··· 679 679 bool supplier_preactivated; /* Owned by consumer probe. */ 680 680 }; 681 681 682 - static inline struct device *kobj_to_dev(struct kobject *kobj) 683 - { 684 - return container_of(kobj, struct device, kobj); 685 - } 682 + #define kobj_to_dev(__kobj) container_of_const(__kobj, struct device, kobj) 686 683 687 684 /** 688 685 * device_iommu_mapped - Returns true when the device DMA is translated ··· 1042 1045 1043 1046 int __must_check devm_device_add_groups(struct device *dev, 1044 1047 const struct attribute_group **groups); 1045 - void devm_device_remove_groups(struct device *dev, 1046 - const struct attribute_group **groups); 1047 1048 int __must_check devm_device_add_group(struct device *dev, 1048 1049 const struct attribute_group *grp); 1049 - void devm_device_remove_group(struct device *dev, 1050 - const struct attribute_group *grp); 1051 1050 1052 1051 /* 1053 1052 * Platform "fixup" functions - allow the platform to have their say
+4 -4
include/linux/device/class.h
··· 59 59 const struct attribute_group **dev_groups; 60 60 struct kobject *dev_kobj; 61 61 62 - int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env); 63 - char *(*devnode)(struct device *dev, umode_t *mode); 62 + int (*dev_uevent)(const struct device *dev, struct kobj_uevent_env *env); 63 + char *(*devnode)(const struct device *dev, umode_t *mode); 64 64 65 65 void (*class_release)(struct class *class); 66 66 void (*dev_release)(struct device *dev); ··· 68 68 int (*shutdown_pre)(struct device *dev); 69 69 70 70 const struct kobj_ns_type_operations *ns_type; 71 - const void *(*namespace)(struct device *dev); 71 + const void *(*namespace)(const struct device *dev); 72 72 73 - void (*get_ownership)(struct device *dev, kuid_t *uid, kgid_t *gid); 73 + void (*get_ownership)(const struct device *dev, kuid_t *uid, kgid_t *gid); 74 74 75 75 const struct dev_pm_ops *pm; 76 76
+1 -1
include/linux/ioport.h
··· 155 155 156 156 /* helpers to define resources */ 157 157 #define DEFINE_RES_NAMED(_start, _size, _name, _flags) \ 158 - { \ 158 + (struct resource) { \ 159 159 .start = (_start), \ 160 160 .end = (_start) + (_size) - 1, \ 161 161 .name = (_name), \
+9 -9
include/linux/kobject.h
··· 112 112 struct kobject *kobj); 113 113 extern void kobject_put(struct kobject *kobj); 114 114 115 - extern const void *kobject_namespace(struct kobject *kobj); 116 - extern void kobject_get_ownership(struct kobject *kobj, 115 + extern const void *kobject_namespace(const struct kobject *kobj); 116 + extern void kobject_get_ownership(const struct kobject *kobj, 117 117 kuid_t *uid, kgid_t *gid); 118 - extern char *kobject_get_path(struct kobject *kobj, gfp_t flag); 118 + extern char *kobject_get_path(const struct kobject *kobj, gfp_t flag); 119 119 120 120 struct kobj_type { 121 121 void (*release)(struct kobject *kobj); 122 122 const struct sysfs_ops *sysfs_ops; 123 123 const struct attribute_group **default_groups; 124 - const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj); 125 - const void *(*namespace)(struct kobject *kobj); 126 - void (*get_ownership)(struct kobject *kobj, kuid_t *uid, kgid_t *gid); 124 + const struct kobj_ns_type_operations *(*child_ns_type)(const struct kobject *kobj); 125 + const void *(*namespace)(const struct kobject *kobj); 126 + void (*get_ownership)(const struct kobject *kobj, kuid_t *uid, kgid_t *gid); 127 127 }; 128 128 129 129 struct kobj_uevent_env { ··· 135 135 }; 136 136 137 137 struct kset_uevent_ops { 138 - int (* const filter)(struct kobject *kobj); 139 - const char *(* const name)(struct kobject *kobj); 138 + int (* const filter)(const struct kobject *kobj); 139 + const char *(* const name)(const struct kobject *kobj); 140 140 int (* const uevent)(struct kobject *kobj, struct kobj_uevent_env *env); 141 141 }; 142 142 ··· 198 198 kobject_put(&k->kobj); 199 199 } 200 200 201 - static inline const struct kobj_type *get_ktype(struct kobject *kobj) 201 + static inline const struct kobj_type *get_ktype(const struct kobject *kobj) 202 202 { 203 203 return kobj->ktype; 204 204 }
+2 -2
include/linux/kobject_ns.h
··· 47 47 48 48 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops); 49 49 int kobj_ns_type_registered(enum kobj_ns_type type); 50 - const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent); 51 - const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj); 50 + const struct kobj_ns_type_operations *kobj_child_ns_ops(const struct kobject *parent); 51 + const struct kobj_ns_type_operations *kobj_ns_ops(const struct kobject *kobj); 52 52 53 53 bool kobj_ns_current_may_mount(enum kobj_ns_type type); 54 54 void *kobj_ns_grab_current(enum kobj_ns_type type);
+1 -1
include/linux/mISDNif.h
··· 586 586 void *); 587 587 extern void mISDN_unregister_clock(struct mISDNclock *); 588 588 589 - static inline struct mISDNdevice *dev_to_mISDN(struct device *dev) 589 + static inline struct mISDNdevice *dev_to_mISDN(const struct device *dev) 590 590 { 591 591 if (dev) 592 592 return dev_get_drvdata(dev);
+2 -2
include/linux/of_device.h
··· 35 35 extern ssize_t of_device_modalias(struct device *dev, char *str, ssize_t len); 36 36 extern int of_device_request_module(struct device *dev); 37 37 38 - extern void of_device_uevent(struct device *dev, struct kobj_uevent_env *env); 38 + extern void of_device_uevent(const struct device *dev, struct kobj_uevent_env *env); 39 39 extern int of_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env); 40 40 41 41 static inline struct device_node *of_cpu_device_node_get(int cpu) ··· 64 64 return 0; 65 65 } 66 66 67 - static inline void of_device_uevent(struct device *dev, 67 + static inline void of_device_uevent(const struct device *dev, 68 68 struct kobj_uevent_env *env) { } 69 69 70 70 static inline const void *of_device_get_match_data(const struct device *dev)
+41 -43
include/linux/property.h
··· 12 12 13 13 #include <linux/bits.h> 14 14 #include <linux/fwnode.h> 15 + #include <linux/stddef.h> 15 16 #include <linux/types.h> 16 17 17 18 struct device; ··· 33 32 DEV_DMA_COHERENT, 34 33 }; 35 34 36 - struct fwnode_handle *dev_fwnode(const struct device *dev); 35 + const struct fwnode_handle *__dev_fwnode_const(const struct device *dev); 36 + struct fwnode_handle *__dev_fwnode(struct device *dev); 37 + #define dev_fwnode(dev) \ 38 + _Generic((dev), \ 39 + const struct device *: __dev_fwnode_const, \ 40 + struct device *: __dev_fwnode)(dev) 37 41 38 42 bool device_property_present(struct device *dev, const char *propname); 39 43 int device_property_read_u8_array(struct device *dev, const char *propname, ··· 123 117 for (child = fwnode_get_next_available_child_node(fwnode, NULL); child;\ 124 118 child = fwnode_get_next_available_child_node(fwnode, child)) 125 119 126 - struct fwnode_handle *device_get_next_child_node( 127 - struct device *dev, struct fwnode_handle *child); 120 + struct fwnode_handle *device_get_next_child_node(const struct device *dev, 121 + struct fwnode_handle *child); 128 122 129 123 #define device_for_each_child_node(dev, child) \ 130 124 for (child = device_get_next_child_node(dev, NULL); child; \ 131 125 child = device_get_next_child_node(dev, child)) 132 126 133 - struct fwnode_handle *fwnode_get_named_child_node( 134 - const struct fwnode_handle *fwnode, const char *childname); 135 - struct fwnode_handle *device_get_named_child_node(struct device *dev, 127 + struct fwnode_handle *fwnode_get_named_child_node(const struct fwnode_handle *fwnode, 128 + const char *childname); 129 + struct fwnode_handle *device_get_named_child_node(const struct device *dev, 136 130 const char *childname); 137 131 138 132 struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode); ··· 141 135 int fwnode_irq_get(const struct fwnode_handle *fwnode, unsigned int index); 142 136 int fwnode_irq_get_byname(const struct fwnode_handle *fwnode, const char *name); 143 137 144 - unsigned int device_get_child_node_count(struct device *dev); 138 + unsigned int device_get_child_node_count(const struct device *dev); 145 139 146 140 static inline bool device_property_read_bool(struct device *dev, 147 141 const char *propname) ··· 312 306 * crafted to avoid gcc-4.4.4's problems with initialization of anon unions 313 307 * and structs. 314 308 */ 315 - 316 - #define __PROPERTY_ENTRY_ELEMENT_SIZE(_elem_) \ 317 - sizeof(((struct property_entry *)NULL)->value._elem_[0]) 318 - 319 - #define __PROPERTY_ENTRY_ARRAY_ELSIZE_LEN(_name_, _elsize_, _Type_, \ 320 - _val_, _len_) \ 321 - (struct property_entry) { \ 322 - .name = _name_, \ 323 - .length = (_len_) * (_elsize_), \ 324 - .type = DEV_PROP_##_Type_, \ 325 - { .pointer = _val_ }, \ 309 + #define __PROPERTY_ENTRY_ARRAY_LEN(_name_, _elem_, _Type_, _val_, _len_) \ 310 + (struct property_entry) { \ 311 + .name = _name_, \ 312 + .length = (_len_) * sizeof_field(struct property_entry, value._elem_[0]), \ 313 + .type = DEV_PROP_##_Type_, \ 314 + { .pointer = _val_ }, \ 326 315 } 327 - 328 - #define __PROPERTY_ENTRY_ARRAY_LEN(_name_, _elem_, _Type_, _val_, _len_)\ 329 - __PROPERTY_ENTRY_ARRAY_ELSIZE_LEN(_name_, \ 330 - __PROPERTY_ENTRY_ELEMENT_SIZE(_elem_), \ 331 - _Type_, _val_, _len_) 332 316 333 317 #define PROPERTY_ENTRY_U8_ARRAY_LEN(_name_, _val_, _len_) \ 334 318 __PROPERTY_ENTRY_ARRAY_LEN(_name_, u8_data, U8, _val_, _len_) ··· 330 334 __PROPERTY_ENTRY_ARRAY_LEN(_name_, u64_data, U64, _val_, _len_) 331 335 #define PROPERTY_ENTRY_STRING_ARRAY_LEN(_name_, _val_, _len_) \ 332 336 __PROPERTY_ENTRY_ARRAY_LEN(_name_, str, STRING, _val_, _len_) 337 + 333 338 #define PROPERTY_ENTRY_REF_ARRAY_LEN(_name_, _val_, _len_) \ 334 - __PROPERTY_ENTRY_ARRAY_ELSIZE_LEN(_name_, \ 335 - sizeof(struct software_node_ref_args), \ 336 - REF, _val_, _len_) 339 + (struct property_entry) { \ 340 + .name = _name_, \ 341 + .length = (_len_) * sizeof(struct software_node_ref_args), \ 342 + .type = DEV_PROP_REF, \ 343 + { .pointer = _val_ }, \ 344 + } 337 345 338 346 #define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_) \ 339 347 PROPERTY_ENTRY_U8_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_)) ··· 349 349 PROPERTY_ENTRY_U64_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_)) 350 350 #define PROPERTY_ENTRY_STRING_ARRAY(_name_, _val_) \ 351 351 PROPERTY_ENTRY_STRING_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_)) 352 - #define PROPERTY_ENTRY_REF_ARRAY(_name_, _val_) \ 352 + #define PROPERTY_ENTRY_REF_ARRAY(_name_, _val_) \ 353 353 PROPERTY_ENTRY_REF_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_)) 354 354 355 355 #define __PROPERTY_ENTRY_ELEMENT(_name_, _elem_, _Type_, _val_) \ 356 356 (struct property_entry) { \ 357 357 .name = _name_, \ 358 - .length = __PROPERTY_ENTRY_ELEMENT_SIZE(_elem_), \ 358 + .length = sizeof_field(struct property_entry, value._elem_[0]), \ 359 359 .is_inline = true, \ 360 360 .type = DEV_PROP_##_Type_, \ 361 361 { .value = { ._elem_[0] = _val_ } }, \ ··· 372 372 #define PROPERTY_ENTRY_STRING(_name_, _val_) \ 373 373 __PROPERTY_ENTRY_ELEMENT(_name_, str, STRING, _val_) 374 374 375 - #define PROPERTY_ENTRY_BOOL(_name_) \ 376 - (struct property_entry) { \ 377 - .name = _name_, \ 378 - .is_inline = true, \ 379 - } 380 - 381 375 #define PROPERTY_ENTRY_REF(_name_, _ref_, ...) \ 382 376 (struct property_entry) { \ 383 377 .name = _name_, \ ··· 380 386 { .pointer = &SOFTWARE_NODE_REFERENCE(_ref_, ##__VA_ARGS__), }, \ 381 387 } 382 388 389 + #define PROPERTY_ENTRY_BOOL(_name_) \ 390 + (struct property_entry) { \ 391 + .name = _name_, \ 392 + .is_inline = true, \ 393 + } 394 + 383 395 struct property_entry * 384 396 property_entries_dup(const struct property_entry *properties); 385 - 386 397 void property_entries_free(const struct property_entry *properties); 387 398 388 - bool device_dma_supported(struct device *dev); 389 - 390 - enum dev_dma_attr device_get_dma_attr(struct device *dev); 399 + bool device_dma_supported(const struct device *dev); 400 + enum dev_dma_attr device_get_dma_attr(const struct device *dev); 391 401 392 402 const void *device_get_match_data(const struct device *dev); 393 403 ··· 411 413 struct fwnode_handle *fwnode_graph_get_remote_endpoint( 412 414 const struct fwnode_handle *fwnode); 413 415 414 - static inline bool fwnode_graph_is_endpoint(struct fwnode_handle *fwnode) 416 + static inline bool fwnode_graph_is_endpoint(const struct fwnode_handle *fwnode) 415 417 { 416 418 return fwnode_property_present(fwnode, "remote-endpoint"); 417 419 } ··· 443 445 int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode, 444 446 struct fwnode_endpoint *endpoint); 445 447 446 - typedef void *(*devcon_match_fn_t)(struct fwnode_handle *fwnode, const char *id, 448 + typedef void *(*devcon_match_fn_t)(const struct fwnode_handle *fwnode, const char *id, 447 449 void *data); 448 450 449 - void *fwnode_connection_find_match(struct fwnode_handle *fwnode, 451 + void *fwnode_connection_find_match(const struct fwnode_handle *fwnode, 450 452 const char *con_id, void *data, 451 453 devcon_match_fn_t match); 452 454 453 - static inline void *device_connection_find_match(struct device *dev, 455 + static inline void *device_connection_find_match(const struct device *dev, 454 456 const char *con_id, void *data, 455 457 devcon_match_fn_t match) 456 458 { 457 459 return fwnode_connection_find_match(dev_fwnode(dev), con_id, data, match); 458 460 } 459 461 460 - int fwnode_connection_find_matches(struct fwnode_handle *fwnode, 462 + int fwnode_connection_find_matches(const struct fwnode_handle *fwnode, 461 463 const char *con_id, void *data, 462 464 devcon_match_fn_t match, 463 465 void **matches, unsigned int matches_len);
+15 -4
include/linux/usb.h
··· 258 258 struct device *usb_dev; 259 259 struct work_struct reset_ws; /* for resets in atomic context */ 260 260 }; 261 - #define to_usb_interface(d) container_of(d, struct usb_interface, dev) 261 + 262 + #define to_usb_interface(__dev) container_of_const(__dev, struct usb_interface, dev) 262 263 263 264 static inline void *usb_get_intfdata(struct usb_interface *intf) 264 265 { ··· 722 721 u16 hub_delay; 723 722 unsigned use_generic_driver:1; 724 723 }; 725 - #define to_usb_device(d) container_of(d, struct usb_device, dev) 726 724 727 - static inline struct usb_device *interface_to_usbdev(struct usb_interface *intf) 725 + #define to_usb_device(__dev) container_of_const(__dev, struct usb_device, dev) 726 + 727 + static inline struct usb_device *__intf_to_usbdev(struct usb_interface *intf) 728 728 { 729 729 return to_usb_device(intf->dev.parent); 730 730 } 731 + static inline const struct usb_device *__intf_to_usbdev_const(const struct usb_interface *intf) 732 + { 733 + return to_usb_device((const struct device *)intf->dev.parent); 734 + } 735 + 736 + #define interface_to_usbdev(intf) \ 737 + _Generic((intf), \ 738 + const struct usb_interface *: __intf_to_usbdev_const, \ 739 + struct usb_interface *: __intf_to_usbdev)(intf) 731 740 732 741 extern struct usb_device *usb_get_dev(struct usb_device *dev); 733 742 extern void usb_put_dev(struct usb_device *dev); ··· 1295 1284 */ 1296 1285 struct usb_class_driver { 1297 1286 char *name; 1298 - char *(*devnode)(struct device *dev, umode_t *mode); 1287 + char *(*devnode)(const struct device *dev, umode_t *mode); 1299 1288 const struct file_operations *fops; 1300 1289 int minor_base; 1301 1290 };
+18
kernel/ksysfs.c
··· 6 6 * Copyright (C) 2004 Kay Sievers <kay.sievers@vrfy.org> 7 7 */ 8 8 9 + #include <asm/byteorder.h> 9 10 #include <linux/kobject.h> 10 11 #include <linux/string.h> 11 12 #include <linux/sysfs.h> ··· 21 20 22 21 #include <linux/rcupdate.h> /* rcu_expedited and rcu_normal */ 23 22 23 + #if defined(__LITTLE_ENDIAN) 24 + #define CPU_BYTEORDER_STRING "little" 25 + #elif defined(__BIG_ENDIAN) 26 + #define CPU_BYTEORDER_STRING "big" 27 + #else 28 + #error Unknown byteorder 29 + #endif 30 + 24 31 #define KERNEL_ATTR_RO(_name) \ 25 32 static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 26 33 ··· 42 33 return sprintf(buf, "%llu\n", (unsigned long long)uevent_seqnum); 43 34 } 44 35 KERNEL_ATTR_RO(uevent_seqnum); 36 + 37 + /* cpu byteorder */ 38 + static ssize_t cpu_byteorder_show(struct kobject *kobj, 39 + struct kobj_attribute *attr, char *buf) 40 + { 41 + return sysfs_emit(buf, "%s\n", CPU_BYTEORDER_STRING); 42 + } 43 + KERNEL_ATTR_RO(cpu_byteorder); 45 44 46 45 #ifdef CONFIG_UEVENT_HELPER 47 46 /* uevent helper program, used during early boot */ ··· 232 215 static struct attribute * kernel_attrs[] = { 233 216 &fscaps_attr.attr, 234 217 &uevent_seqnum_attr.attr, 218 + &cpu_byteorder_attr.attr, 235 219 #ifdef CONFIG_UEVENT_HELPER 236 220 &uevent_helper_attr.attr, 237 221 #endif
+1 -1
kernel/params.c
··· 926 926 .store = module_attr_store, 927 927 }; 928 928 929 - static int uevent_filter(struct kobject *kobj) 929 + static int uevent_filter(const struct kobject *kobj) 930 930 { 931 931 const struct kobj_type *ktype = get_ktype(kobj); 932 932
+7 -10
kernel/resource.c
··· 888 888 if (conflict->end > new->end) 889 889 new->end = conflict->end; 890 890 891 - printk("Expanded resource %s due to conflict with %s\n", new->name, conflict->name); 891 + pr_info("Expanded resource %s due to conflict with %s\n", new->name, conflict->name); 892 892 } 893 893 write_unlock(&resource_lock); 894 894 } ··· 1283 1283 1284 1284 write_unlock(&resource_lock); 1285 1285 1286 - printk(KERN_WARNING "Trying to free nonexistent resource " 1287 - "<%016llx-%016llx>\n", (unsigned long long)start, 1288 - (unsigned long long)end); 1286 + pr_warn("Trying to free nonexistent resource <%pa-%pa>\n", &start, &end); 1289 1287 } 1290 1288 EXPORT_SYMBOL(__release_region); 1291 1289 ··· 1656 1658 int iomem_map_sanity_check(resource_size_t addr, unsigned long size) 1657 1659 { 1658 1660 struct resource *p = &iomem_resource; 1661 + resource_size_t end = addr + size - 1; 1659 1662 int err = 0; 1660 1663 loff_t l; 1661 1664 ··· 1666 1667 * We can probably skip the resources without 1667 1668 * IORESOURCE_IO attribute? 1668 1669 */ 1669 - if (p->start >= addr + size) 1670 + if (p->start > end) 1670 1671 continue; 1671 1672 if (p->end < addr) 1672 1673 continue; 1673 1674 if (PFN_DOWN(p->start) <= PFN_DOWN(addr) && 1674 - PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1)) 1675 + PFN_DOWN(p->end) >= PFN_DOWN(end)) 1675 1676 continue; 1676 1677 /* 1677 1678 * if a resource is "BUSY", it's not a hardware resource ··· 1682 1683 if (p->flags & IORESOURCE_BUSY) 1683 1684 continue; 1684 1685 1685 - printk(KERN_WARNING "resource sanity check: requesting [mem %#010llx-%#010llx], which spans more than %s %pR\n", 1686 - (unsigned long long)addr, 1687 - (unsigned long long)(addr + size - 1), 1688 - p->name, p); 1686 + pr_warn("resource sanity check: requesting [mem %pa-%pa], which spans more than %s %pR\n", 1687 + &addr, &end, p->name, p); 1689 1688 err = -1; 1690 1689 break; 1691 1690 }
+18 -11
lib/kobject.c
··· 25 25 * and thus @kobj should have a namespace tag associated with it. Returns 26 26 * %NULL otherwise. 27 27 */ 28 - const void *kobject_namespace(struct kobject *kobj) 28 + const void *kobject_namespace(const struct kobject *kobj) 29 29 { 30 30 const struct kobj_ns_type_operations *ns_ops = kobj_ns_ops(kobj); 31 31 ··· 45 45 * representation of given kobject. Normally used to adjust ownership of 46 46 * objects in a container. 47 47 */ 48 - void kobject_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid) 48 + void kobject_get_ownership(const struct kobject *kobj, kuid_t *uid, kgid_t *gid) 49 49 { 50 50 *uid = GLOBAL_ROOT_UID; 51 51 *gid = GLOBAL_ROOT_GID; ··· 94 94 return 0; 95 95 } 96 96 97 - static int get_kobj_path_length(struct kobject *kobj) 97 + static int get_kobj_path_length(const struct kobject *kobj) 98 98 { 99 99 int length = 1; 100 - struct kobject *parent = kobj; 100 + const struct kobject *parent = kobj; 101 101 102 102 /* walk up the ancestors until we hit the one pointing to the 103 103 * root. ··· 112 112 return length; 113 113 } 114 114 115 - static void fill_kobj_path(struct kobject *kobj, char *path, int length) 115 + static void fill_kobj_path(const struct kobject *kobj, char *path, int length) 116 116 { 117 - struct kobject *parent; 117 + const struct kobject *parent; 118 118 119 119 --length; 120 120 for (parent = kobj; parent; parent = parent->parent) { ··· 136 136 * 137 137 * Return: The newly allocated memory, caller must free with kfree(). 138 138 */ 139 - char *kobject_get_path(struct kobject *kobj, gfp_t gfp_mask) 139 + char *kobject_get_path(const struct kobject *kobj, gfp_t gfp_mask) 140 140 { 141 141 char *path; 142 142 int len; ··· 834 834 /** 835 835 * kset_register() - Initialize and add a kset. 836 836 * @k: kset. 837 + * 838 + * NOTE: On error, the kset.kobj.name allocated by() kobj_set_name() 839 + * is freed, it can not be used any more. 837 840 */ 838 841 int kset_register(struct kset *k) 839 842 { ··· 847 844 848 845 kset_init(k); 849 846 err = kobject_add_internal(&k->kobj); 850 - if (err) 847 + if (err) { 848 + kfree_const(k->kobj.name); 849 + /* Set it to NULL to avoid accessing bad pointer in callers. */ 850 + k->kobj.name = NULL; 851 851 return err; 852 + } 852 853 kobject_uevent(&k->kobj, KOBJ_ADD); 853 854 return 0; 854 855 } ··· 907 900 kfree(kset); 908 901 } 909 902 910 - static void kset_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid) 903 + static void kset_get_ownership(const struct kobject *kobj, kuid_t *uid, kgid_t *gid) 911 904 { 912 905 if (kobj->parent) 913 906 kobject_get_ownership(kobj->parent, uid, gid); ··· 1039 1032 return registered; 1040 1033 } 1041 1034 1042 - const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent) 1035 + const struct kobj_ns_type_operations *kobj_child_ns_ops(const struct kobject *parent) 1043 1036 { 1044 1037 const struct kobj_ns_type_operations *ops = NULL; 1045 1038 ··· 1049 1042 return ops; 1050 1043 } 1051 1044 1052 - const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj) 1045 + const struct kobj_ns_type_operations *kobj_ns_ops(const struct kobject *kobj) 1053 1046 { 1054 1047 return kobj_child_ns_ops(kobj->parent); 1055 1048 }
+2 -2
net/atm/atm_sysfs.c
··· 108 108 }; 109 109 110 110 111 - static int atm_uevent(struct device *cdev, struct kobj_uevent_env *env) 111 + static int atm_uevent(const struct device *cdev, struct kobj_uevent_env *env) 112 112 { 113 - struct atm_dev *adev; 113 + const struct atm_dev *adev; 114 114 115 115 if (!cdev) 116 116 return -ENODEV;
+1 -1
net/bridge/br_if.c
··· 262 262 kfree(p); 263 263 } 264 264 265 - static void brport_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid) 265 + static void brport_get_ownership(const struct kobject *kobj, kuid_t *uid, kgid_t *gid) 266 266 { 267 267 struct net_bridge_port *p = kobj_to_brport(kobj); 268 268
+10 -10
net/core/net-sysfs.c
··· 1020 1020 netdev_put(queue->dev, &queue->dev_tracker); 1021 1021 } 1022 1022 1023 - static const void *rx_queue_namespace(struct kobject *kobj) 1023 + static const void *rx_queue_namespace(const struct kobject *kobj) 1024 1024 { 1025 1025 struct netdev_rx_queue *queue = to_rx_queue(kobj); 1026 1026 struct device *dev = &queue->dev->dev; ··· 1032 1032 return ns; 1033 1033 } 1034 1034 1035 - static void rx_queue_get_ownership(struct kobject *kobj, 1035 + static void rx_queue_get_ownership(const struct kobject *kobj, 1036 1036 kuid_t *uid, kgid_t *gid) 1037 1037 { 1038 1038 const struct net *net = rx_queue_namespace(kobj); ··· 1623 1623 netdev_put(queue->dev, &queue->dev_tracker); 1624 1624 } 1625 1625 1626 - static const void *netdev_queue_namespace(struct kobject *kobj) 1626 + static const void *netdev_queue_namespace(const struct kobject *kobj) 1627 1627 { 1628 1628 struct netdev_queue *queue = to_netdev_queue(kobj); 1629 1629 struct device *dev = &queue->dev->dev; ··· 1635 1635 return ns; 1636 1636 } 1637 1637 1638 - static void netdev_queue_get_ownership(struct kobject *kobj, 1638 + static void netdev_queue_get_ownership(const struct kobject *kobj, 1639 1639 kuid_t *uid, kgid_t *gid) 1640 1640 { 1641 1641 const struct net *net = netdev_queue_namespace(kobj); ··· 1873 1873 }; 1874 1874 EXPORT_SYMBOL_GPL(net_ns_type_operations); 1875 1875 1876 - static int netdev_uevent(struct device *d, struct kobj_uevent_env *env) 1876 + static int netdev_uevent(const struct device *d, struct kobj_uevent_env *env) 1877 1877 { 1878 - struct net_device *dev = to_net_dev(d); 1878 + const struct net_device *dev = to_net_dev(d); 1879 1879 int retval; 1880 1880 1881 1881 /* pass interface to uevent. */ ··· 1910 1910 netdev_freemem(dev); 1911 1911 } 1912 1912 1913 - static const void *net_namespace(struct device *d) 1913 + static const void *net_namespace(const struct device *d) 1914 1914 { 1915 - struct net_device *dev = to_net_dev(d); 1915 + const struct net_device *dev = to_net_dev(d); 1916 1916 1917 1917 return dev_net(dev); 1918 1918 } 1919 1919 1920 - static void net_get_ownership(struct device *d, kuid_t *uid, kgid_t *gid) 1920 + static void net_get_ownership(const struct device *d, kuid_t *uid, kgid_t *gid) 1921 1921 { 1922 - struct net_device *dev = to_net_dev(d); 1922 + const struct net_device *dev = to_net_dev(d); 1923 1923 const struct net *net = dev_net(dev); 1924 1924 1925 1925 net_ns_get_ownership(net, uid, gid);
+1 -1
net/rfkill/core.c
··· 832 832 kfree(rfkill); 833 833 } 834 834 835 - static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env) 835 + static int rfkill_dev_uevent(const struct device *dev, struct kobj_uevent_env *env) 836 836 { 837 837 struct rfkill *rfkill = to_rfkill(dev); 838 838 unsigned long flags;
+4 -4
net/sunrpc/sysfs.c
··· 31 31 } 32 32 33 33 static const struct kobj_ns_type_operations * 34 - rpc_sysfs_object_child_ns_type(struct kobject *kobj) 34 + rpc_sysfs_object_child_ns_type(const struct kobject *kobj) 35 35 { 36 36 return &net_ns_type_operations; 37 37 } ··· 381 381 kfree(xprt); 382 382 } 383 383 384 - static const void *rpc_sysfs_client_namespace(struct kobject *kobj) 384 + static const void *rpc_sysfs_client_namespace(const struct kobject *kobj) 385 385 { 386 386 return container_of(kobj, struct rpc_sysfs_client, kobject)->net; 387 387 } 388 388 389 - static const void *rpc_sysfs_xprt_switch_namespace(struct kobject *kobj) 389 + static const void *rpc_sysfs_xprt_switch_namespace(const struct kobject *kobj) 390 390 { 391 391 return container_of(kobj, struct rpc_sysfs_xprt_switch, kobject)->net; 392 392 } 393 393 394 - static const void *rpc_sysfs_xprt_namespace(struct kobject *kobj) 394 + static const void *rpc_sysfs_xprt_namespace(const struct kobject *kobj) 395 395 { 396 396 return container_of(kobj, struct rpc_sysfs_xprt, 397 397 kobject)->xprt->xprt_net;
+1 -1
net/wireless/sysfs.c
··· 148 148 #define WIPHY_PM_OPS NULL 149 149 #endif 150 150 151 - static const void *wiphy_namespace(struct device *d) 151 + static const void *wiphy_namespace(const struct device *d) 152 152 { 153 153 struct wiphy *wiphy = container_of(d, struct wiphy, dev); 154 154
+1 -1
sound/sound_core.c
··· 30 30 MODULE_AUTHOR("Alan Cox"); 31 31 MODULE_LICENSE("GPL"); 32 32 33 - static char *sound_devnode(struct device *dev, umode_t *mode) 33 + static char *sound_devnode(const struct device *dev, umode_t *mode) 34 34 { 35 35 if (MAJOR(dev->devt) == SOUND_MAJOR) 36 36 return NULL;