Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'driver-core-4.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core

Pull driver core updates from Greg KH:
"Here is the driver core / firmware changes for 4.2-rc1.

A number of small changes all over the place in the driver core, and
in the firmware subsystem. Nothing really major, full details in the
shortlog. Some of it is a bit of churn, given that the platform
driver probing changes was found to not work well, so they were
reverted.

All of these have been in linux-next for a while with no reported
issues"

* tag 'driver-core-4.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core: (31 commits)
Revert "base/platform: Only insert MEM and IO resources"
Revert "base/platform: Continue on insert_resource() error"
Revert "of/platform: Use platform_device interface"
Revert "base/platform: Remove code duplication"
firmware: add missing kfree for work on async call
fs: sysfs: don't pass count == 0 to bin file readers
base:dd - Fix for typo in comment to function driver_deferred_probe_trigger().
base/platform: Remove code duplication
of/platform: Use platform_device interface
base/platform: Continue on insert_resource() error
base/platform: Only insert MEM and IO resources
firmware: use const for remaining firmware names
firmware: fix possible use after free on name on asynchronous request
firmware: check for file truncation on direct firmware loading
firmware: fix __getname() missing failure check
drivers: of/base: move of_init to driver_init
drivers/base: cacheinfo: fix annoying typo when DT nodes are absent
sysfs: disambiguate between "error code" and "failure" in comments
driver-core: fix build for !CONFIG_MODULES
driver-core: make __device_attach() static
...

+372 -84
+1 -1
Documentation/ABI/testing/sysfs-devices-system-cpu
··· 243 243 coherency_line_size: the minimum amount of data in bytes that gets 244 244 transferred from memory to cache 245 245 246 - level: the cache hierarcy in the multi-level cache configuration 246 + level: the cache hierarchy in the multi-level cache configuration 247 247 248 248 number_of_sets: total number of sets in the cache, a set is a 249 249 collection of cache lines with the same cache index
+3
Documentation/kernel-parameters.txt
··· 953 953 auto selects the default scheme, which automatically 954 954 enables eagerfpu restore for xsaveopt. 955 955 956 + module.async_probe [KNL] 957 + Enable asynchronous probe on this module. 958 + 956 959 early_ioremap_debug [KNL] 957 960 Enable debug messages in early_ioremap support. This 958 961 is useful for tracking down temporary early mappings
+4 -3
MAINTAINERS
··· 3450 3450 F: lib/lru_cache.c 3451 3451 F: Documentation/blockdev/drbd/ 3452 3452 3453 - DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS 3453 + DRIVER CORE, KOBJECTS, DEBUGFS, KERNFS AND SYSFS 3454 3454 M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 3455 3455 T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git 3456 3456 S: Supported 3457 3457 F: Documentation/kobject.txt 3458 3458 F: drivers/base/ 3459 - F: fs/sysfs/ 3460 3459 F: fs/debugfs/ 3461 - F: include/linux/kobj* 3460 + F: fs/kernfs/ 3461 + F: fs/sysfs/ 3462 3462 F: include/linux/debugfs.h 3463 + F: include/linux/kobj* 3463 3464 F: lib/kobj* 3464 3465 3465 3466 DRM DRIVERS
+2 -2
arch/powerpc/mm/hugetlbpage.c
··· 336 336 unsigned long gpage_npages[MMU_PAGE_COUNT]; 337 337 338 338 static int __init do_gpage_early_setup(char *param, char *val, 339 - const char *unused) 339 + const char *unused, void *arg) 340 340 { 341 341 static phys_addr_t size; 342 342 unsigned long npages; ··· 385 385 386 386 strlcpy(cmdline, boot_command_line, COMMAND_LINE_SIZE); 387 387 parse_args("hugetlb gpages", cmdline, NULL, 0, 0, 0, 388 - &do_gpage_early_setup); 388 + NULL, &do_gpage_early_setup); 389 389 390 390 /* 391 391 * Walk gpage list in reverse, allocating larger page sizes first.
+1
drivers/base/base.h
··· 116 116 { 117 117 return drv->bus->match ? drv->bus->match(dev, drv) : 1; 118 118 } 119 + extern bool driver_allows_async_probing(struct device_driver *drv); 119 120 120 121 extern int driver_add_groups(struct device_driver *drv, 121 122 const struct attribute_group **groups);
+23 -8
drivers/base/bus.c
··· 10 10 * 11 11 */ 12 12 13 + #include <linux/async.h> 13 14 #include <linux/device.h> 14 15 #include <linux/module.h> 15 16 #include <linux/errno.h> ··· 550 549 { 551 550 struct bus_type *bus = dev->bus; 552 551 struct subsys_interface *sif; 553 - int ret; 554 552 555 553 if (!bus) 556 554 return; 557 555 558 - if (bus->p->drivers_autoprobe) { 559 - ret = device_attach(dev); 560 - WARN_ON(ret < 0); 561 - } 556 + if (bus->p->drivers_autoprobe) 557 + device_initial_probe(dev); 562 558 563 559 mutex_lock(&bus->p->mutex); 564 560 list_for_each_entry(sif, &bus->p->interfaces, node) ··· 657 659 } 658 660 static DRIVER_ATTR_WO(uevent); 659 661 662 + static void driver_attach_async(void *_drv, async_cookie_t cookie) 663 + { 664 + struct device_driver *drv = _drv; 665 + int ret; 666 + 667 + ret = driver_attach(drv); 668 + 669 + pr_debug("bus: '%s': driver %s async attach completed: %d\n", 670 + drv->bus->name, drv->name, ret); 671 + } 672 + 660 673 /** 661 674 * bus_add_driver - Add a driver to the bus. 662 675 * @drv: driver. ··· 700 691 701 692 klist_add_tail(&priv->knode_bus, &bus->p->klist_drivers); 702 693 if (drv->bus->p->drivers_autoprobe) { 703 - error = driver_attach(drv); 704 - if (error) 705 - goto out_unregister; 694 + if (driver_allows_async_probing(drv)) { 695 + pr_debug("bus: '%s': probing driver %s asynchronously\n", 696 + drv->bus->name, drv->name); 697 + async_schedule(driver_attach_async, drv); 698 + } else { 699 + error = driver_attach(drv); 700 + if (error) 701 + goto out_unregister; 702 + } 706 703 } 707 704 module_add_driver(drv->owner, drv); 708 705
+2 -2
drivers/base/cacheinfo.c
··· 191 191 if (ret) 192 192 goto free_ci; 193 193 /* 194 - * For systems using DT for cache hierarcy, of_node and shared_cpu_map 194 + * For systems using DT for cache hierarchy, of_node and shared_cpu_map 195 195 * will be set up here only if they are not populated already 196 196 */ 197 197 ret = cache_shared_cpu_map_setup(cpu); 198 198 if (ret) { 199 - pr_warn("Unable to detect cache hierarcy from DT for CPU %d\n", 199 + pr_warn("Unable to detect cache hierarchy from DT for CPU %d\n", 200 200 cpu); 201 201 goto free_ci; 202 202 }
+29
drivers/base/cpu.c
··· 16 16 #include <linux/acpi.h> 17 17 #include <linux/of.h> 18 18 #include <linux/cpufeature.h> 19 + #include <linux/tick.h> 19 20 20 21 #include "base.h" 21 22 ··· 266 265 } 267 266 static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL); 268 267 268 + static ssize_t print_cpus_isolated(struct device *dev, 269 + struct device_attribute *attr, char *buf) 270 + { 271 + int n = 0, len = PAGE_SIZE-2; 272 + 273 + n = scnprintf(buf, len, "%*pbl\n", cpumask_pr_args(cpu_isolated_map)); 274 + 275 + return n; 276 + } 277 + static DEVICE_ATTR(isolated, 0444, print_cpus_isolated, NULL); 278 + 279 + #ifdef CONFIG_NO_HZ_FULL 280 + static ssize_t print_cpus_nohz_full(struct device *dev, 281 + struct device_attribute *attr, char *buf) 282 + { 283 + int n = 0, len = PAGE_SIZE-2; 284 + 285 + n = scnprintf(buf, len, "%*pbl\n", cpumask_pr_args(tick_nohz_full_mask)); 286 + 287 + return n; 288 + } 289 + static DEVICE_ATTR(nohz_full, 0444, print_cpus_nohz_full, NULL); 290 + #endif 291 + 269 292 static void cpu_device_release(struct device *dev) 270 293 { 271 294 /* ··· 456 431 &cpu_attrs[2].attr.attr, 457 432 &dev_attr_kernel_max.attr, 458 433 &dev_attr_offline.attr, 434 + &dev_attr_isolated.attr, 435 + #ifdef CONFIG_NO_HZ_FULL 436 + &dev_attr_nohz_full.attr, 437 + #endif 459 438 #ifdef CONFIG_GENERIC_CPU_AUTOPROBE 460 439 &dev_attr_modalias.attr, 461 440 #endif
+148 -25
drivers/base/dd.c
··· 141 141 * more than one device is probing at the same time, it is possible for one 142 142 * probe to complete successfully while another is about to defer. If the second 143 143 * depends on the first, then it will get put on the pending list after the 144 - * trigger event has already occured and will be stuck there. 144 + * trigger event has already occurred and will be stuck there. 145 145 * 146 146 * The atomic 'deferred_trigger_count' is used to determine if a successful 147 147 * trigger has occurred in the midst of probing a driver. If the trigger count ··· 417 417 return ret; 418 418 } 419 419 420 - static int __device_attach(struct device_driver *drv, void *data) 420 + bool driver_allows_async_probing(struct device_driver *drv) 421 421 { 422 - struct device *dev = data; 422 + switch (drv->probe_type) { 423 + case PROBE_PREFER_ASYNCHRONOUS: 424 + return true; 425 + 426 + case PROBE_FORCE_SYNCHRONOUS: 427 + return false; 428 + 429 + default: 430 + if (module_requested_async_probing(drv->owner)) 431 + return true; 432 + 433 + return false; 434 + } 435 + } 436 + 437 + struct device_attach_data { 438 + struct device *dev; 439 + 440 + /* 441 + * Indicates whether we are are considering asynchronous probing or 442 + * not. Only initial binding after device or driver registration 443 + * (including deferral processing) may be done asynchronously, the 444 + * rest is always synchronous, as we expect it is being done by 445 + * request from userspace. 446 + */ 447 + bool check_async; 448 + 449 + /* 450 + * Indicates if we are binding synchronous or asynchronous drivers. 451 + * When asynchronous probing is enabled we'll execute 2 passes 452 + * over drivers: first pass doing synchronous probing and second 453 + * doing asynchronous probing (if synchronous did not succeed - 454 + * most likely because there was no driver requiring synchronous 455 + * probing - and we found asynchronous driver during first pass). 456 + * The 2 passes are done because we can't shoot asynchronous 457 + * probe for given device and driver from bus_for_each_drv() since 458 + * driver pointer is not guaranteed to stay valid once 459 + * bus_for_each_drv() iterates to the next driver on the bus. 460 + */ 461 + bool want_async; 462 + 463 + /* 464 + * We'll set have_async to 'true' if, while scanning for matching 465 + * driver, we'll encounter one that requests asynchronous probing. 466 + */ 467 + bool have_async; 468 + }; 469 + 470 + static int __device_attach_driver(struct device_driver *drv, void *_data) 471 + { 472 + struct device_attach_data *data = _data; 473 + struct device *dev = data->dev; 474 + bool async_allowed; 475 + 476 + /* 477 + * Check if device has already been claimed. This may 478 + * happen with driver loading, device discovery/registration, 479 + * and deferred probe processing happens all at once with 480 + * multiple threads. 481 + */ 482 + if (dev->driver) 483 + return -EBUSY; 423 484 424 485 if (!driver_match_device(drv, dev)) 425 486 return 0; 426 487 488 + async_allowed = driver_allows_async_probing(drv); 489 + 490 + if (async_allowed) 491 + data->have_async = true; 492 + 493 + if (data->check_async && async_allowed != data->want_async) 494 + return 0; 495 + 427 496 return driver_probe_device(drv, dev); 497 + } 498 + 499 + static void __device_attach_async_helper(void *_dev, async_cookie_t cookie) 500 + { 501 + struct device *dev = _dev; 502 + struct device_attach_data data = { 503 + .dev = dev, 504 + .check_async = true, 505 + .want_async = true, 506 + }; 507 + 508 + device_lock(dev); 509 + 510 + bus_for_each_drv(dev->bus, NULL, &data, __device_attach_driver); 511 + dev_dbg(dev, "async probe completed\n"); 512 + 513 + pm_request_idle(dev); 514 + 515 + device_unlock(dev); 516 + 517 + put_device(dev); 518 + } 519 + 520 + static int __device_attach(struct device *dev, bool allow_async) 521 + { 522 + int ret = 0; 523 + 524 + device_lock(dev); 525 + if (dev->driver) { 526 + if (klist_node_attached(&dev->p->knode_driver)) { 527 + ret = 1; 528 + goto out_unlock; 529 + } 530 + ret = device_bind_driver(dev); 531 + if (ret == 0) 532 + ret = 1; 533 + else { 534 + dev->driver = NULL; 535 + ret = 0; 536 + } 537 + } else { 538 + struct device_attach_data data = { 539 + .dev = dev, 540 + .check_async = allow_async, 541 + .want_async = false, 542 + }; 543 + 544 + ret = bus_for_each_drv(dev->bus, NULL, &data, 545 + __device_attach_driver); 546 + if (!ret && allow_async && data.have_async) { 547 + /* 548 + * If we could not find appropriate driver 549 + * synchronously and we are allowed to do 550 + * async probes and there are drivers that 551 + * want to probe asynchronously, we'll 552 + * try them. 553 + */ 554 + dev_dbg(dev, "scheduling asynchronous probe\n"); 555 + get_device(dev); 556 + async_schedule(__device_attach_async_helper, dev); 557 + } else { 558 + pm_request_idle(dev); 559 + } 560 + } 561 + out_unlock: 562 + device_unlock(dev); 563 + return ret; 428 564 } 429 565 430 566 /** ··· 579 443 */ 580 444 int device_attach(struct device *dev) 581 445 { 582 - int ret = 0; 583 - 584 - device_lock(dev); 585 - if (dev->driver) { 586 - if (klist_node_attached(&dev->p->knode_driver)) { 587 - ret = 1; 588 - goto out_unlock; 589 - } 590 - ret = device_bind_driver(dev); 591 - if (ret == 0) 592 - ret = 1; 593 - else { 594 - dev->driver = NULL; 595 - ret = 0; 596 - } 597 - } else { 598 - ret = bus_for_each_drv(dev->bus, NULL, dev, __device_attach); 599 - pm_request_idle(dev); 600 - } 601 - out_unlock: 602 - device_unlock(dev); 603 - return ret; 446 + return __device_attach(dev, false); 604 447 } 605 448 EXPORT_SYMBOL_GPL(device_attach); 449 + 450 + void device_initial_probe(struct device *dev) 451 + { 452 + __device_attach(dev, true); 453 + } 606 454 607 455 static int __driver_attach(struct device *dev, void *data) 608 456 { ··· 642 522 643 523 drv = dev->driver; 644 524 if (drv) { 525 + if (driver_allows_async_probing(drv)) 526 + async_synchronize_full(); 527 + 645 528 pm_runtime_get_sync(dev); 646 529 647 530 driver_sysfs_remove(dev);
+47 -16
drivers/base/firmware_class.c
··· 150 150 int page_array_size; 151 151 struct list_head pending_list; 152 152 #endif 153 - char fw_id[]; 153 + const char *fw_id; 154 154 }; 155 155 156 156 struct fw_cache_entry { 157 157 struct list_head list; 158 - char name[]; 158 + const char *name; 159 159 }; 160 160 161 161 struct fw_name_devm { 162 162 unsigned long magic; 163 - char name[]; 163 + const char *name; 164 164 }; 165 165 166 166 #define to_fwbuf(d) container_of(d, struct firmware_buf, ref) ··· 181 181 { 182 182 struct firmware_buf *buf; 183 183 184 - buf = kzalloc(sizeof(*buf) + strlen(fw_name) + 1, GFP_ATOMIC); 185 - 184 + buf = kzalloc(sizeof(*buf), GFP_ATOMIC); 186 185 if (!buf) 187 - return buf; 186 + return NULL; 187 + 188 + buf->fw_id = kstrdup_const(fw_name, GFP_ATOMIC); 189 + if (!buf->fw_id) { 190 + kfree(buf); 191 + return NULL; 192 + } 188 193 189 194 kref_init(&buf->ref); 190 - strcpy(buf->fw_id, fw_name); 191 195 buf->fwc = fwc; 192 196 init_completion(&buf->completion); 193 197 #ifdef CONFIG_FW_LOADER_USER_HELPER ··· 261 257 } else 262 258 #endif 263 259 vfree(buf->data); 260 + kfree_const(buf->fw_id); 264 261 kfree(buf); 265 262 } 266 263 ··· 325 320 static int fw_get_filesystem_firmware(struct device *device, 326 321 struct firmware_buf *buf) 327 322 { 328 - int i; 323 + int i, len; 329 324 int rc = -ENOENT; 330 - char *path = __getname(); 325 + char *path; 326 + 327 + path = __getname(); 328 + if (!path) 329 + return -ENOMEM; 331 330 332 331 for (i = 0; i < ARRAY_SIZE(fw_path); i++) { 333 332 struct file *file; ··· 340 331 if (!fw_path[i][0]) 341 332 continue; 342 333 343 - snprintf(path, PATH_MAX, "%s/%s", fw_path[i], buf->fw_id); 334 + len = snprintf(path, PATH_MAX, "%s/%s", 335 + fw_path[i], buf->fw_id); 336 + if (len >= PATH_MAX) { 337 + rc = -ENAMETOOLONG; 338 + break; 339 + } 344 340 345 341 file = filp_open(path, O_RDONLY, 0); 346 342 if (IS_ERR(file)) ··· 406 392 if (fwn->magic == (unsigned long)&fw_cache) 407 393 pr_debug("%s: fw_name-%s devm-%p released\n", 408 394 __func__, fwn->name, res); 395 + kfree_const(fwn->name); 409 396 } 410 397 411 398 static int fw_devm_match(struct device *dev, void *res, ··· 437 422 if (fwn) 438 423 return 1; 439 424 440 - fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm) + 441 - strlen(name) + 1, GFP_KERNEL); 425 + fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm), 426 + GFP_KERNEL); 442 427 if (!fwn) 443 428 return -ENOMEM; 429 + fwn->name = kstrdup_const(name, GFP_KERNEL); 430 + if (!fwn->name) { 431 + kfree(fwn); 432 + return -ENOMEM; 433 + } 444 434 445 435 fwn->magic = (unsigned long)&fw_cache; 446 - strcpy(fwn->name, name); 447 436 devres_add(dev, fwn); 448 437 449 438 return 0; ··· 1266 1247 put_device(fw_work->device); /* taken in request_firmware_nowait() */ 1267 1248 1268 1249 module_put(fw_work->module); 1250 + kfree_const(fw_work->name); 1269 1251 kfree(fw_work); 1270 1252 } 1271 1253 ··· 1306 1286 return -ENOMEM; 1307 1287 1308 1288 fw_work->module = module; 1309 - fw_work->name = name; 1289 + fw_work->name = kstrdup_const(name, gfp); 1290 + if (!fw_work->name) { 1291 + kfree(fw_work); 1292 + return -ENOMEM; 1293 + } 1310 1294 fw_work->device = device; 1311 1295 fw_work->context = context; 1312 1296 fw_work->cont = cont; ··· 1318 1294 (uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER); 1319 1295 1320 1296 if (!try_module_get(module)) { 1297 + kfree_const(fw_work->name); 1321 1298 kfree(fw_work); 1322 1299 return -EFAULT; 1323 1300 } ··· 1409 1384 { 1410 1385 struct fw_cache_entry *fce; 1411 1386 1412 - fce = kzalloc(sizeof(*fce) + strlen(name) + 1, GFP_ATOMIC); 1387 + fce = kzalloc(sizeof(*fce), GFP_ATOMIC); 1413 1388 if (!fce) 1414 1389 goto exit; 1415 1390 1416 - strcpy(fce->name, name); 1391 + fce->name = kstrdup_const(name, GFP_ATOMIC); 1392 + if (!fce->name) { 1393 + kfree(fce); 1394 + fce = NULL; 1395 + goto exit; 1396 + } 1417 1397 exit: 1418 1398 return fce; 1419 1399 } ··· 1458 1428 1459 1429 static void free_fw_cache_entry(struct fw_cache_entry *fce) 1460 1430 { 1431 + kfree_const(fce->name); 1461 1432 kfree(fce); 1462 1433 } 1463 1434
+13
drivers/base/platform.c
··· 613 613 { 614 614 int retval, code; 615 615 616 + if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) { 617 + pr_err("%s: drivers registered with %s can not be probed asynchronously\n", 618 + drv->driver.name, __func__); 619 + return -EINVAL; 620 + } 621 + 622 + /* 623 + * We have to run our probes synchronously because we check if 624 + * we find any devices to bind to and exit with error if there 625 + * are any. 626 + */ 627 + drv->driver.probe_type = PROBE_FORCE_SYNCHRONOUS; 628 + 616 629 /* 617 630 * Prevent driver from requesting probe deferral to avoid further 618 631 * futile probe attempts.
+1
drivers/edac/amd64_edac.c
··· 2964 2964 .probe = probe_one_instance, 2965 2965 .remove = remove_one_instance, 2966 2966 .id_table = amd64_pci_table, 2967 + .driver.probe_type = PROBE_FORCE_SYNCHRONOUS, 2967 2968 }; 2968 2969 2969 2970 static void setup_pci_device(void)
-1
fs/kernfs/file.c
··· 785 785 struct kernfs_node *kn = filp->f_path.dentry->d_fsdata; 786 786 struct kernfs_open_node *on = kn->attr.open; 787 787 788 - /* need parent for the kobj, grab both */ 789 788 if (!kernfs_get_active(kn)) 790 789 goto trigger; 791 790
+1 -1
fs/sysfs/file.c
··· 90 90 return 0; 91 91 92 92 if (size) { 93 - if (pos > size) 93 + if (pos >= size) 94 94 return 0; 95 95 if (pos + count > size) 96 96 count = size - pos;
+3 -3
fs/sysfs/group.c
··· 135 135 * This function creates a group for the first time. It will explicitly 136 136 * warn and error if any of the attribute files being created already exist. 137 137 * 138 - * Returns 0 on success or error. 138 + * Returns 0 on success or error code on failure. 139 139 */ 140 140 int sysfs_create_group(struct kobject *kobj, 141 141 const struct attribute_group *grp) ··· 155 155 * It will explicitly warn and error if any of the attribute files being 156 156 * created already exist. 157 157 * 158 - * Returns 0 on success or error code from sysfs_create_group on error. 158 + * Returns 0 on success or error code from sysfs_create_group on failure. 159 159 */ 160 160 int sysfs_create_groups(struct kobject *kobj, 161 161 const struct attribute_group **groups) ··· 193 193 * The primary use for this function is to call it after making a change 194 194 * that affects group visibility. 195 195 * 196 - * Returns 0 on success or error. 196 + * Returns 0 on success or error code on failure. 197 197 */ 198 198 int sysfs_update_group(struct kobject *kobj, 199 199 const struct attribute_group *grp)
+1 -1
include/linux/cacheinfo.h
··· 19 19 /** 20 20 * struct cacheinfo - represent a cache leaf node 21 21 * @type: type of the cache - data, inst or unified 22 - * @level: represents the hierarcy in the multi-level cache 22 + * @level: represents the hierarchy in the multi-level cache 23 23 * @coherency_line_size: size of each cache line usually representing 24 24 * the minimum amount of data that gets transferred from memory 25 25 * @number_of_sets: total number of sets, a set is a collection of cache
+31
include/linux/device.h
··· 196 196 extern struct klist *bus_get_device_klist(struct bus_type *bus); 197 197 198 198 /** 199 + * enum probe_type - device driver probe type to try 200 + * Device drivers may opt in for special handling of their 201 + * respective probe routines. This tells the core what to 202 + * expect and prefer. 203 + * 204 + * @PROBE_DEFAULT_STRATEGY: Used by drivers that work equally well 205 + * whether probed synchronously or asynchronously. 206 + * @PROBE_PREFER_ASYNCHRONOUS: Drivers for "slow" devices which 207 + * probing order is not essential for booting the system may 208 + * opt into executing their probes asynchronously. 209 + * @PROBE_FORCE_SYNCHRONOUS: Use this to annotate drivers that need 210 + * their probe routines to run synchronously with driver and 211 + * device registration (with the exception of -EPROBE_DEFER 212 + * handling - re-probing always ends up being done asynchronously). 213 + * 214 + * Note that the end goal is to switch the kernel to use asynchronous 215 + * probing by default, so annotating drivers with 216 + * %PROBE_PREFER_ASYNCHRONOUS is a temporary measure that allows us 217 + * to speed up boot process while we are validating the rest of the 218 + * drivers. 219 + */ 220 + enum probe_type { 221 + PROBE_DEFAULT_STRATEGY, 222 + PROBE_PREFER_ASYNCHRONOUS, 223 + PROBE_FORCE_SYNCHRONOUS, 224 + }; 225 + 226 + /** 199 227 * struct device_driver - The basic device driver structure 200 228 * @name: Name of the device driver. 201 229 * @bus: The bus which the device of this driver belongs to. 202 230 * @owner: The module owner. 203 231 * @mod_name: Used for built-in modules. 204 232 * @suppress_bind_attrs: Disables bind/unbind via sysfs. 233 + * @probe_type: Type of the probe (synchronous or asynchronous) to use. 205 234 * @of_match_table: The open firmware table. 206 235 * @acpi_match_table: The ACPI match table. 207 236 * @probe: Called to query the existence of a specific device, ··· 264 235 const char *mod_name; /* used for built-in modules */ 265 236 266 237 bool suppress_bind_attrs; /* disables bind/unbind via sysfs */ 238 + enum probe_type probe_type; 267 239 268 240 const struct of_device_id *of_match_table; 269 241 const struct acpi_device_id *acpi_match_table; ··· 1005 975 extern void device_release_driver(struct device *dev); 1006 976 extern int __must_check device_attach(struct device *dev); 1007 977 extern int __must_check driver_attach(struct device_driver *drv); 978 + extern void device_initial_probe(struct device *dev); 1008 979 extern int __must_check device_reprobe(struct device *dev); 1009 980 1010 981 /*
+13
include/linux/module.h
··· 257 257 bool sig_ok; 258 258 #endif 259 259 260 + bool async_probe_requested; 261 + 260 262 /* symbols that will be GPL-only in the near future. */ 261 263 const struct kernel_symbol *gpl_future_syms; 262 264 const unsigned long *gpl_future_crcs; ··· 510 508 511 509 extern void print_modules(void); 512 510 511 + static inline bool module_requested_async_probing(struct module *module) 512 + { 513 + return module && module->async_probe_requested; 514 + } 515 + 513 516 #else /* !CONFIG_MODULES... */ 514 517 515 518 /* Given an address, look for it in the exception tables. */ ··· 625 618 static inline void print_modules(void) 626 619 { 627 620 } 621 + 622 + static inline bool module_requested_async_probing(struct module *module) 623 + { 624 + return false; 625 + } 626 + 628 627 #endif /* CONFIG_MODULES */ 629 628 630 629 #ifdef CONFIG_SYSFS
+11 -1
include/linux/moduleparam.h
··· 310 310 #define core_param(name, var, type, perm) \ 311 311 param_check_##type(name, &(var)); \ 312 312 __module_param_call("", name, &param_ops_##type, &var, perm, -1, 0) 313 + 314 + /** 315 + * core_param_unsafe - same as core_param but taints kernel 316 + */ 317 + #define core_param_unsafe(name, var, type, perm) \ 318 + param_check_##type(name, &(var)); \ 319 + __module_param_call("", name, &param_ops_##type, &var, perm, \ 320 + -1, KERNEL_PARAM_FL_UNSAFE) 321 + 313 322 #endif /* !MODULE */ 314 323 315 324 /** ··· 366 357 unsigned num, 367 358 s16 level_min, 368 359 s16 level_max, 360 + void *arg, 369 361 int (*unknown)(char *param, char *val, 370 - const char *doing)); 362 + const char *doing, void *arg)); 371 363 372 364 /* Called by module remove. */ 373 365 #ifdef CONFIG_SYSFS
+15 -10
init/main.c
··· 235 235 early_param("loglevel", loglevel); 236 236 237 237 /* Change NUL term back to "=", to make "param" the whole string. */ 238 - static int __init repair_env_string(char *param, char *val, const char *unused) 238 + static int __init repair_env_string(char *param, char *val, 239 + const char *unused, void *arg) 239 240 { 240 241 if (val) { 241 242 /* param=val or param="val"? */ ··· 253 252 } 254 253 255 254 /* Anything after -- gets handed straight to init. */ 256 - static int __init set_init_arg(char *param, char *val, const char *unused) 255 + static int __init set_init_arg(char *param, char *val, 256 + const char *unused, void *arg) 257 257 { 258 258 unsigned int i; 259 259 260 260 if (panic_later) 261 261 return 0; 262 262 263 - repair_env_string(param, val, unused); 263 + repair_env_string(param, val, unused, NULL); 264 264 265 265 for (i = 0; argv_init[i]; i++) { 266 266 if (i == MAX_INIT_ARGS) { ··· 278 276 * Unknown boot options get handed to init, unless they look like 279 277 * unused parameters (modprobe will find them in /proc/cmdline). 280 278 */ 281 - static int __init unknown_bootoption(char *param, char *val, const char *unused) 279 + static int __init unknown_bootoption(char *param, char *val, 280 + const char *unused, void *arg) 282 281 { 283 - repair_env_string(param, val, unused); 282 + repair_env_string(param, val, unused, NULL); 284 283 285 284 /* Handle obsolete-style parameters */ 286 285 if (obsolete_checksetup(param)) ··· 413 410 } 414 411 415 412 /* Check for early params. */ 416 - static int __init do_early_param(char *param, char *val, const char *unused) 413 + static int __init do_early_param(char *param, char *val, 414 + const char *unused, void *arg) 417 415 { 418 416 const struct obs_kernel_param *p; 419 417 ··· 433 429 434 430 void __init parse_early_options(char *cmdline) 435 431 { 436 - parse_args("early options", cmdline, NULL, 0, 0, 0, do_early_param); 432 + parse_args("early options", cmdline, NULL, 0, 0, 0, NULL, 433 + do_early_param); 437 434 } 438 435 439 436 /* Arch code calls this early on, or if not, just before other parsing. */ ··· 540 535 after_dashes = parse_args("Booting kernel", 541 536 static_command_line, __start___param, 542 537 __stop___param - __start___param, 543 - -1, -1, &unknown_bootoption); 538 + -1, -1, NULL, &unknown_bootoption); 544 539 if (!IS_ERR_OR_NULL(after_dashes)) 545 540 parse_args("Setting init args", after_dashes, NULL, 0, -1, -1, 546 - set_init_arg); 541 + NULL, set_init_arg); 547 542 548 543 jump_label_init(); 549 544 ··· 853 848 initcall_command_line, __start___param, 854 849 __stop___param - __start___param, 855 850 level, level, 856 - &repair_env_string); 851 + NULL, &repair_env_string); 857 852 858 853 for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++) 859 854 do_one_initcall(*fn);
+14 -4
kernel/module.c
··· 3107 3107 * 3108 3108 * http://thread.gmane.org/gmane.linux.kernel/1420814 3109 3109 */ 3110 - if (current->flags & PF_USED_ASYNC) 3110 + if (!mod->async_probe_requested && (current->flags & PF_USED_ASYNC)) 3111 3111 async_synchronize_full(); 3112 3112 3113 3113 mutex_lock(&module_mutex); ··· 3237 3237 return err; 3238 3238 } 3239 3239 3240 - static int unknown_module_param_cb(char *param, char *val, const char *modname) 3240 + static int unknown_module_param_cb(char *param, char *val, const char *modname, 3241 + void *arg) 3241 3242 { 3243 + struct module *mod = arg; 3244 + int ret; 3245 + 3246 + if (strcmp(param, "async_probe") == 0) { 3247 + mod->async_probe_requested = true; 3248 + return 0; 3249 + } 3250 + 3242 3251 /* Check for magic 'dyndbg' arg */ 3243 - int ret = ddebug_dyndbg_module_param_cb(param, val, modname); 3252 + ret = ddebug_dyndbg_module_param_cb(param, val, modname); 3244 3253 if (ret != 0) 3245 3254 pr_warn("%s: unknown parameter '%s' ignored\n", modname, param); 3246 3255 return 0; ··· 3351 3342 3352 3343 /* Module is ready to execute: parsing args may do that. */ 3353 3344 after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, 3354 - -32768, 32767, unknown_module_param_cb); 3345 + -32768, 32767, NULL, 3346 + unknown_module_param_cb); 3355 3347 if (IS_ERR(after_dashes)) { 3356 3348 err = PTR_ERR(after_dashes); 3357 3349 goto bug_cleanup;
+7 -4
kernel/params.c
··· 100 100 unsigned num_params, 101 101 s16 min_level, 102 102 s16 max_level, 103 + void *arg, 103 104 int (*handle_unknown)(char *param, char *val, 104 - const char *doing)) 105 + const char *doing, void *arg)) 105 106 { 106 107 unsigned int i; 107 108 int err; ··· 129 128 130 129 if (handle_unknown) { 131 130 pr_debug("doing %s: %s='%s'\n", doing, param, val); 132 - return handle_unknown(param, val, doing); 131 + return handle_unknown(param, val, doing, arg); 133 132 } 134 133 135 134 pr_debug("Unknown argument '%s'\n", param); ··· 195 194 unsigned num, 196 195 s16 min_level, 197 196 s16 max_level, 198 - int (*unknown)(char *param, char *val, const char *doing)) 197 + void *arg, 198 + int (*unknown)(char *param, char *val, 199 + const char *doing, void *arg)) 199 200 { 200 201 char *param, *val; 201 202 ··· 217 214 return args; 218 215 irq_was_disabled = irqs_disabled(); 219 216 ret = parse_one(param, val, doing, params, num, 220 - min_level, max_level, unknown); 217 + min_level, max_level, arg, unknown); 221 218 if (irq_was_disabled && !irqs_disabled()) 222 219 pr_warn("%s: option '%s' enabled irq's!\n", 223 220 doing, param);
+2 -2
lib/dynamic_debug.c
··· 887 887 888 888 /* handle both dyndbg and $module.dyndbg params at boot */ 889 889 static int ddebug_dyndbg_boot_param_cb(char *param, char *val, 890 - const char *unused) 890 + const char *unused, void *arg) 891 891 { 892 892 vpr_info("%s=\"%s\"\n", param, val); 893 893 return ddebug_dyndbg_param_cb(param, val, NULL, 0); ··· 1028 1028 */ 1029 1029 cmdline = kstrdup(saved_command_line, GFP_KERNEL); 1030 1030 parse_args("dyndbg params", cmdline, NULL, 1031 - 0, 0, 0, &ddebug_dyndbg_boot_param_cb); 1031 + 0, 0, 0, NULL, &ddebug_dyndbg_boot_param_cb); 1032 1032 kfree(cmdline); 1033 1033 return 0; 1034 1034