at v6.3 32 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * drivers/base/devres.c - device resource management 4 * 5 * Copyright (c) 2006 SUSE Linux Products GmbH 6 * Copyright (c) 2006 Tejun Heo <teheo@suse.de> 7 */ 8 9#include <linux/device.h> 10#include <linux/module.h> 11#include <linux/slab.h> 12#include <linux/percpu.h> 13 14#include <asm/sections.h> 15 16#include "base.h" 17#include "trace.h" 18 19struct devres_node { 20 struct list_head entry; 21 dr_release_t release; 22 const char *name; 23 size_t size; 24}; 25 26struct devres { 27 struct devres_node node; 28 /* 29 * Some archs want to perform DMA into kmalloc caches 30 * and need a guaranteed alignment larger than 31 * the alignment of a 64-bit integer. 32 * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same 33 * buffer alignment as if it was allocated by plain kmalloc(). 34 */ 35 u8 __aligned(ARCH_KMALLOC_MINALIGN) data[]; 36}; 37 38struct devres_group { 39 struct devres_node node[2]; 40 void *id; 41 int color; 42 /* -- 8 pointers */ 43}; 44 45static void set_node_dbginfo(struct devres_node *node, const char *name, 46 size_t size) 47{ 48 node->name = name; 49 node->size = size; 50} 51 52#ifdef CONFIG_DEBUG_DEVRES 53static int log_devres = 0; 54module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR); 55 56static void devres_dbg(struct device *dev, struct devres_node *node, 57 const char *op) 58{ 59 if (unlikely(log_devres)) 60 dev_err(dev, "DEVRES %3s %p %s (%zu bytes)\n", 61 op, node, node->name, node->size); 62} 63#else /* CONFIG_DEBUG_DEVRES */ 64#define devres_dbg(dev, node, op) do {} while (0) 65#endif /* CONFIG_DEBUG_DEVRES */ 66 67static void devres_log(struct device *dev, struct devres_node *node, 68 const char *op) 69{ 70 trace_devres_log(dev, op, node, node->name, node->size); 71 devres_dbg(dev, node, op); 72} 73 74/* 75 * Release functions for devres group. These callbacks are used only 76 * for identification. 77 */ 78static void group_open_release(struct device *dev, void *res) 79{ 80 /* noop */ 81} 82 83static void group_close_release(struct device *dev, void *res) 84{ 85 /* noop */ 86} 87 88static struct devres_group * node_to_group(struct devres_node *node) 89{ 90 if (node->release == &group_open_release) 91 return container_of(node, struct devres_group, node[0]); 92 if (node->release == &group_close_release) 93 return container_of(node, struct devres_group, node[1]); 94 return NULL; 95} 96 97static bool check_dr_size(size_t size, size_t *tot_size) 98{ 99 /* We must catch any near-SIZE_MAX cases that could overflow. */ 100 if (unlikely(check_add_overflow(sizeof(struct devres), 101 size, tot_size))) 102 return false; 103 104 /* Actually allocate the full kmalloc bucket size. */ 105 *tot_size = kmalloc_size_roundup(*tot_size); 106 107 return true; 108} 109 110static __always_inline struct devres * alloc_dr(dr_release_t release, 111 size_t size, gfp_t gfp, int nid) 112{ 113 size_t tot_size; 114 struct devres *dr; 115 116 if (!check_dr_size(size, &tot_size)) 117 return NULL; 118 119 dr = kmalloc_node_track_caller(tot_size, gfp, nid); 120 if (unlikely(!dr)) 121 return NULL; 122 123 /* No need to clear memory twice */ 124 if (!(gfp & __GFP_ZERO)) 125 memset(dr, 0, offsetof(struct devres, data)); 126 127 INIT_LIST_HEAD(&dr->node.entry); 128 dr->node.release = release; 129 return dr; 130} 131 132static void add_dr(struct device *dev, struct devres_node *node) 133{ 134 devres_log(dev, node, "ADD"); 135 BUG_ON(!list_empty(&node->entry)); 136 list_add_tail(&node->entry, &dev->devres_head); 137} 138 139static void replace_dr(struct device *dev, 140 struct devres_node *old, struct devres_node *new) 141{ 142 devres_log(dev, old, "REPLACE"); 143 BUG_ON(!list_empty(&new->entry)); 144 list_replace(&old->entry, &new->entry); 145} 146 147/** 148 * __devres_alloc_node - Allocate device resource data 149 * @release: Release function devres will be associated with 150 * @size: Allocation size 151 * @gfp: Allocation flags 152 * @nid: NUMA node 153 * @name: Name of the resource 154 * 155 * Allocate devres of @size bytes. The allocated area is zeroed, then 156 * associated with @release. The returned pointer can be passed to 157 * other devres_*() functions. 158 * 159 * RETURNS: 160 * Pointer to allocated devres on success, NULL on failure. 161 */ 162void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid, 163 const char *name) 164{ 165 struct devres *dr; 166 167 dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid); 168 if (unlikely(!dr)) 169 return NULL; 170 set_node_dbginfo(&dr->node, name, size); 171 return dr->data; 172} 173EXPORT_SYMBOL_GPL(__devres_alloc_node); 174 175/** 176 * devres_for_each_res - Resource iterator 177 * @dev: Device to iterate resource from 178 * @release: Look for resources associated with this release function 179 * @match: Match function (optional) 180 * @match_data: Data for the match function 181 * @fn: Function to be called for each matched resource. 182 * @data: Data for @fn, the 3rd parameter of @fn 183 * 184 * Call @fn for each devres of @dev which is associated with @release 185 * and for which @match returns 1. 186 * 187 * RETURNS: 188 * void 189 */ 190void devres_for_each_res(struct device *dev, dr_release_t release, 191 dr_match_t match, void *match_data, 192 void (*fn)(struct device *, void *, void *), 193 void *data) 194{ 195 struct devres_node *node; 196 struct devres_node *tmp; 197 unsigned long flags; 198 199 if (!fn) 200 return; 201 202 spin_lock_irqsave(&dev->devres_lock, flags); 203 list_for_each_entry_safe_reverse(node, tmp, 204 &dev->devres_head, entry) { 205 struct devres *dr = container_of(node, struct devres, node); 206 207 if (node->release != release) 208 continue; 209 if (match && !match(dev, dr->data, match_data)) 210 continue; 211 fn(dev, dr->data, data); 212 } 213 spin_unlock_irqrestore(&dev->devres_lock, flags); 214} 215EXPORT_SYMBOL_GPL(devres_for_each_res); 216 217/** 218 * devres_free - Free device resource data 219 * @res: Pointer to devres data to free 220 * 221 * Free devres created with devres_alloc(). 222 */ 223void devres_free(void *res) 224{ 225 if (res) { 226 struct devres *dr = container_of(res, struct devres, data); 227 228 BUG_ON(!list_empty(&dr->node.entry)); 229 kfree(dr); 230 } 231} 232EXPORT_SYMBOL_GPL(devres_free); 233 234/** 235 * devres_add - Register device resource 236 * @dev: Device to add resource to 237 * @res: Resource to register 238 * 239 * Register devres @res to @dev. @res should have been allocated 240 * using devres_alloc(). On driver detach, the associated release 241 * function will be invoked and devres will be freed automatically. 242 */ 243void devres_add(struct device *dev, void *res) 244{ 245 struct devres *dr = container_of(res, struct devres, data); 246 unsigned long flags; 247 248 spin_lock_irqsave(&dev->devres_lock, flags); 249 add_dr(dev, &dr->node); 250 spin_unlock_irqrestore(&dev->devres_lock, flags); 251} 252EXPORT_SYMBOL_GPL(devres_add); 253 254static struct devres *find_dr(struct device *dev, dr_release_t release, 255 dr_match_t match, void *match_data) 256{ 257 struct devres_node *node; 258 259 list_for_each_entry_reverse(node, &dev->devres_head, entry) { 260 struct devres *dr = container_of(node, struct devres, node); 261 262 if (node->release != release) 263 continue; 264 if (match && !match(dev, dr->data, match_data)) 265 continue; 266 return dr; 267 } 268 269 return NULL; 270} 271 272/** 273 * devres_find - Find device resource 274 * @dev: Device to lookup resource from 275 * @release: Look for resources associated with this release function 276 * @match: Match function (optional) 277 * @match_data: Data for the match function 278 * 279 * Find the latest devres of @dev which is associated with @release 280 * and for which @match returns 1. If @match is NULL, it's considered 281 * to match all. 282 * 283 * RETURNS: 284 * Pointer to found devres, NULL if not found. 285 */ 286void * devres_find(struct device *dev, dr_release_t release, 287 dr_match_t match, void *match_data) 288{ 289 struct devres *dr; 290 unsigned long flags; 291 292 spin_lock_irqsave(&dev->devres_lock, flags); 293 dr = find_dr(dev, release, match, match_data); 294 spin_unlock_irqrestore(&dev->devres_lock, flags); 295 296 if (dr) 297 return dr->data; 298 return NULL; 299} 300EXPORT_SYMBOL_GPL(devres_find); 301 302/** 303 * devres_get - Find devres, if non-existent, add one atomically 304 * @dev: Device to lookup or add devres for 305 * @new_res: Pointer to new initialized devres to add if not found 306 * @match: Match function (optional) 307 * @match_data: Data for the match function 308 * 309 * Find the latest devres of @dev which has the same release function 310 * as @new_res and for which @match return 1. If found, @new_res is 311 * freed; otherwise, @new_res is added atomically. 312 * 313 * RETURNS: 314 * Pointer to found or added devres. 315 */ 316void * devres_get(struct device *dev, void *new_res, 317 dr_match_t match, void *match_data) 318{ 319 struct devres *new_dr = container_of(new_res, struct devres, data); 320 struct devres *dr; 321 unsigned long flags; 322 323 spin_lock_irqsave(&dev->devres_lock, flags); 324 dr = find_dr(dev, new_dr->node.release, match, match_data); 325 if (!dr) { 326 add_dr(dev, &new_dr->node); 327 dr = new_dr; 328 new_res = NULL; 329 } 330 spin_unlock_irqrestore(&dev->devres_lock, flags); 331 devres_free(new_res); 332 333 return dr->data; 334} 335EXPORT_SYMBOL_GPL(devres_get); 336 337/** 338 * devres_remove - Find a device resource and remove it 339 * @dev: Device to find resource from 340 * @release: Look for resources associated with this release function 341 * @match: Match function (optional) 342 * @match_data: Data for the match function 343 * 344 * Find the latest devres of @dev associated with @release and for 345 * which @match returns 1. If @match is NULL, it's considered to 346 * match all. If found, the resource is removed atomically and 347 * returned. 348 * 349 * RETURNS: 350 * Pointer to removed devres on success, NULL if not found. 351 */ 352void * devres_remove(struct device *dev, dr_release_t release, 353 dr_match_t match, void *match_data) 354{ 355 struct devres *dr; 356 unsigned long flags; 357 358 spin_lock_irqsave(&dev->devres_lock, flags); 359 dr = find_dr(dev, release, match, match_data); 360 if (dr) { 361 list_del_init(&dr->node.entry); 362 devres_log(dev, &dr->node, "REM"); 363 } 364 spin_unlock_irqrestore(&dev->devres_lock, flags); 365 366 if (dr) 367 return dr->data; 368 return NULL; 369} 370EXPORT_SYMBOL_GPL(devres_remove); 371 372/** 373 * devres_destroy - Find a device resource and destroy it 374 * @dev: Device to find resource from 375 * @release: Look for resources associated with this release function 376 * @match: Match function (optional) 377 * @match_data: Data for the match function 378 * 379 * Find the latest devres of @dev associated with @release and for 380 * which @match returns 1. If @match is NULL, it's considered to 381 * match all. If found, the resource is removed atomically and freed. 382 * 383 * Note that the release function for the resource will not be called, 384 * only the devres-allocated data will be freed. The caller becomes 385 * responsible for freeing any other data. 386 * 387 * RETURNS: 388 * 0 if devres is found and freed, -ENOENT if not found. 389 */ 390int devres_destroy(struct device *dev, dr_release_t release, 391 dr_match_t match, void *match_data) 392{ 393 void *res; 394 395 res = devres_remove(dev, release, match, match_data); 396 if (unlikely(!res)) 397 return -ENOENT; 398 399 devres_free(res); 400 return 0; 401} 402EXPORT_SYMBOL_GPL(devres_destroy); 403 404 405/** 406 * devres_release - Find a device resource and destroy it, calling release 407 * @dev: Device to find resource from 408 * @release: Look for resources associated with this release function 409 * @match: Match function (optional) 410 * @match_data: Data for the match function 411 * 412 * Find the latest devres of @dev associated with @release and for 413 * which @match returns 1. If @match is NULL, it's considered to 414 * match all. If found, the resource is removed atomically, the 415 * release function called and the resource freed. 416 * 417 * RETURNS: 418 * 0 if devres is found and freed, -ENOENT if not found. 419 */ 420int devres_release(struct device *dev, dr_release_t release, 421 dr_match_t match, void *match_data) 422{ 423 void *res; 424 425 res = devres_remove(dev, release, match, match_data); 426 if (unlikely(!res)) 427 return -ENOENT; 428 429 (*release)(dev, res); 430 devres_free(res); 431 return 0; 432} 433EXPORT_SYMBOL_GPL(devres_release); 434 435static int remove_nodes(struct device *dev, 436 struct list_head *first, struct list_head *end, 437 struct list_head *todo) 438{ 439 struct devres_node *node, *n; 440 int cnt = 0, nr_groups = 0; 441 442 /* First pass - move normal devres entries to @todo and clear 443 * devres_group colors. 444 */ 445 node = list_entry(first, struct devres_node, entry); 446 list_for_each_entry_safe_from(node, n, end, entry) { 447 struct devres_group *grp; 448 449 grp = node_to_group(node); 450 if (grp) { 451 /* clear color of group markers in the first pass */ 452 grp->color = 0; 453 nr_groups++; 454 } else { 455 /* regular devres entry */ 456 if (&node->entry == first) 457 first = first->next; 458 list_move_tail(&node->entry, todo); 459 cnt++; 460 } 461 } 462 463 if (!nr_groups) 464 return cnt; 465 466 /* Second pass - Scan groups and color them. A group gets 467 * color value of two iff the group is wholly contained in 468 * [current node, end). That is, for a closed group, both opening 469 * and closing markers should be in the range, while just the 470 * opening marker is enough for an open group. 471 */ 472 node = list_entry(first, struct devres_node, entry); 473 list_for_each_entry_safe_from(node, n, end, entry) { 474 struct devres_group *grp; 475 476 grp = node_to_group(node); 477 BUG_ON(!grp || list_empty(&grp->node[0].entry)); 478 479 grp->color++; 480 if (list_empty(&grp->node[1].entry)) 481 grp->color++; 482 483 BUG_ON(grp->color <= 0 || grp->color > 2); 484 if (grp->color == 2) { 485 /* No need to update current node or end. The removed 486 * nodes are always before both. 487 */ 488 list_move_tail(&grp->node[0].entry, todo); 489 list_del_init(&grp->node[1].entry); 490 } 491 } 492 493 return cnt; 494} 495 496static void release_nodes(struct device *dev, struct list_head *todo) 497{ 498 struct devres *dr, *tmp; 499 500 /* Release. Note that both devres and devres_group are 501 * handled as devres in the following loop. This is safe. 502 */ 503 list_for_each_entry_safe_reverse(dr, tmp, todo, node.entry) { 504 devres_log(dev, &dr->node, "REL"); 505 dr->node.release(dev, dr->data); 506 kfree(dr); 507 } 508} 509 510/** 511 * devres_release_all - Release all managed resources 512 * @dev: Device to release resources for 513 * 514 * Release all resources associated with @dev. This function is 515 * called on driver detach. 516 */ 517int devres_release_all(struct device *dev) 518{ 519 unsigned long flags; 520 LIST_HEAD(todo); 521 int cnt; 522 523 /* Looks like an uninitialized device structure */ 524 if (WARN_ON(dev->devres_head.next == NULL)) 525 return -ENODEV; 526 527 /* Nothing to release if list is empty */ 528 if (list_empty(&dev->devres_head)) 529 return 0; 530 531 spin_lock_irqsave(&dev->devres_lock, flags); 532 cnt = remove_nodes(dev, dev->devres_head.next, &dev->devres_head, &todo); 533 spin_unlock_irqrestore(&dev->devres_lock, flags); 534 535 release_nodes(dev, &todo); 536 return cnt; 537} 538 539/** 540 * devres_open_group - Open a new devres group 541 * @dev: Device to open devres group for 542 * @id: Separator ID 543 * @gfp: Allocation flags 544 * 545 * Open a new devres group for @dev with @id. For @id, using a 546 * pointer to an object which won't be used for another group is 547 * recommended. If @id is NULL, address-wise unique ID is created. 548 * 549 * RETURNS: 550 * ID of the new group, NULL on failure. 551 */ 552void * devres_open_group(struct device *dev, void *id, gfp_t gfp) 553{ 554 struct devres_group *grp; 555 unsigned long flags; 556 557 grp = kmalloc(sizeof(*grp), gfp); 558 if (unlikely(!grp)) 559 return NULL; 560 561 grp->node[0].release = &group_open_release; 562 grp->node[1].release = &group_close_release; 563 INIT_LIST_HEAD(&grp->node[0].entry); 564 INIT_LIST_HEAD(&grp->node[1].entry); 565 set_node_dbginfo(&grp->node[0], "grp<", 0); 566 set_node_dbginfo(&grp->node[1], "grp>", 0); 567 grp->id = grp; 568 if (id) 569 grp->id = id; 570 571 spin_lock_irqsave(&dev->devres_lock, flags); 572 add_dr(dev, &grp->node[0]); 573 spin_unlock_irqrestore(&dev->devres_lock, flags); 574 return grp->id; 575} 576EXPORT_SYMBOL_GPL(devres_open_group); 577 578/* Find devres group with ID @id. If @id is NULL, look for the latest. */ 579static struct devres_group * find_group(struct device *dev, void *id) 580{ 581 struct devres_node *node; 582 583 list_for_each_entry_reverse(node, &dev->devres_head, entry) { 584 struct devres_group *grp; 585 586 if (node->release != &group_open_release) 587 continue; 588 589 grp = container_of(node, struct devres_group, node[0]); 590 591 if (id) { 592 if (grp->id == id) 593 return grp; 594 } else if (list_empty(&grp->node[1].entry)) 595 return grp; 596 } 597 598 return NULL; 599} 600 601/** 602 * devres_close_group - Close a devres group 603 * @dev: Device to close devres group for 604 * @id: ID of target group, can be NULL 605 * 606 * Close the group identified by @id. If @id is NULL, the latest open 607 * group is selected. 608 */ 609void devres_close_group(struct device *dev, void *id) 610{ 611 struct devres_group *grp; 612 unsigned long flags; 613 614 spin_lock_irqsave(&dev->devres_lock, flags); 615 616 grp = find_group(dev, id); 617 if (grp) 618 add_dr(dev, &grp->node[1]); 619 else 620 WARN_ON(1); 621 622 spin_unlock_irqrestore(&dev->devres_lock, flags); 623} 624EXPORT_SYMBOL_GPL(devres_close_group); 625 626/** 627 * devres_remove_group - Remove a devres group 628 * @dev: Device to remove group for 629 * @id: ID of target group, can be NULL 630 * 631 * Remove the group identified by @id. If @id is NULL, the latest 632 * open group is selected. Note that removing a group doesn't affect 633 * any other resources. 634 */ 635void devres_remove_group(struct device *dev, void *id) 636{ 637 struct devres_group *grp; 638 unsigned long flags; 639 640 spin_lock_irqsave(&dev->devres_lock, flags); 641 642 grp = find_group(dev, id); 643 if (grp) { 644 list_del_init(&grp->node[0].entry); 645 list_del_init(&grp->node[1].entry); 646 devres_log(dev, &grp->node[0], "REM"); 647 } else 648 WARN_ON(1); 649 650 spin_unlock_irqrestore(&dev->devres_lock, flags); 651 652 kfree(grp); 653} 654EXPORT_SYMBOL_GPL(devres_remove_group); 655 656/** 657 * devres_release_group - Release resources in a devres group 658 * @dev: Device to release group for 659 * @id: ID of target group, can be NULL 660 * 661 * Release all resources in the group identified by @id. If @id is 662 * NULL, the latest open group is selected. The selected group and 663 * groups properly nested inside the selected group are removed. 664 * 665 * RETURNS: 666 * The number of released non-group resources. 667 */ 668int devres_release_group(struct device *dev, void *id) 669{ 670 struct devres_group *grp; 671 unsigned long flags; 672 LIST_HEAD(todo); 673 int cnt = 0; 674 675 spin_lock_irqsave(&dev->devres_lock, flags); 676 677 grp = find_group(dev, id); 678 if (grp) { 679 struct list_head *first = &grp->node[0].entry; 680 struct list_head *end = &dev->devres_head; 681 682 if (!list_empty(&grp->node[1].entry)) 683 end = grp->node[1].entry.next; 684 685 cnt = remove_nodes(dev, first, end, &todo); 686 spin_unlock_irqrestore(&dev->devres_lock, flags); 687 688 release_nodes(dev, &todo); 689 } else { 690 WARN_ON(1); 691 spin_unlock_irqrestore(&dev->devres_lock, flags); 692 } 693 694 return cnt; 695} 696EXPORT_SYMBOL_GPL(devres_release_group); 697 698/* 699 * Custom devres actions allow inserting a simple function call 700 * into the teardown sequence. 701 */ 702 703struct action_devres { 704 void *data; 705 void (*action)(void *); 706}; 707 708static int devm_action_match(struct device *dev, void *res, void *p) 709{ 710 struct action_devres *devres = res; 711 struct action_devres *target = p; 712 713 return devres->action == target->action && 714 devres->data == target->data; 715} 716 717static void devm_action_release(struct device *dev, void *res) 718{ 719 struct action_devres *devres = res; 720 721 devres->action(devres->data); 722} 723 724/** 725 * devm_add_action() - add a custom action to list of managed resources 726 * @dev: Device that owns the action 727 * @action: Function that should be called 728 * @data: Pointer to data passed to @action implementation 729 * 730 * This adds a custom action to the list of managed resources so that 731 * it gets executed as part of standard resource unwinding. 732 */ 733int devm_add_action(struct device *dev, void (*action)(void *), void *data) 734{ 735 struct action_devres *devres; 736 737 devres = devres_alloc(devm_action_release, 738 sizeof(struct action_devres), GFP_KERNEL); 739 if (!devres) 740 return -ENOMEM; 741 742 devres->data = data; 743 devres->action = action; 744 745 devres_add(dev, devres); 746 return 0; 747} 748EXPORT_SYMBOL_GPL(devm_add_action); 749 750/** 751 * devm_remove_action() - removes previously added custom action 752 * @dev: Device that owns the action 753 * @action: Function implementing the action 754 * @data: Pointer to data passed to @action implementation 755 * 756 * Removes instance of @action previously added by devm_add_action(). 757 * Both action and data should match one of the existing entries. 758 */ 759void devm_remove_action(struct device *dev, void (*action)(void *), void *data) 760{ 761 struct action_devres devres = { 762 .data = data, 763 .action = action, 764 }; 765 766 WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match, 767 &devres)); 768} 769EXPORT_SYMBOL_GPL(devm_remove_action); 770 771/** 772 * devm_release_action() - release previously added custom action 773 * @dev: Device that owns the action 774 * @action: Function implementing the action 775 * @data: Pointer to data passed to @action implementation 776 * 777 * Releases and removes instance of @action previously added by 778 * devm_add_action(). Both action and data should match one of the 779 * existing entries. 780 */ 781void devm_release_action(struct device *dev, void (*action)(void *), void *data) 782{ 783 struct action_devres devres = { 784 .data = data, 785 .action = action, 786 }; 787 788 WARN_ON(devres_release(dev, devm_action_release, devm_action_match, 789 &devres)); 790 791} 792EXPORT_SYMBOL_GPL(devm_release_action); 793 794/* 795 * Managed kmalloc/kfree 796 */ 797static void devm_kmalloc_release(struct device *dev, void *res) 798{ 799 /* noop */ 800} 801 802static int devm_kmalloc_match(struct device *dev, void *res, void *data) 803{ 804 return res == data; 805} 806 807/** 808 * devm_kmalloc - Resource-managed kmalloc 809 * @dev: Device to allocate memory for 810 * @size: Allocation size 811 * @gfp: Allocation gfp flags 812 * 813 * Managed kmalloc. Memory allocated with this function is 814 * automatically freed on driver detach. Like all other devres 815 * resources, guaranteed alignment is unsigned long long. 816 * 817 * RETURNS: 818 * Pointer to allocated memory on success, NULL on failure. 819 */ 820void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) 821{ 822 struct devres *dr; 823 824 if (unlikely(!size)) 825 return ZERO_SIZE_PTR; 826 827 /* use raw alloc_dr for kmalloc caller tracing */ 828 dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev)); 829 if (unlikely(!dr)) 830 return NULL; 831 832 /* 833 * This is named devm_kzalloc_release for historical reasons 834 * The initial implementation did not support kmalloc, only kzalloc 835 */ 836 set_node_dbginfo(&dr->node, "devm_kzalloc_release", size); 837 devres_add(dev, dr->data); 838 return dr->data; 839} 840EXPORT_SYMBOL_GPL(devm_kmalloc); 841 842/** 843 * devm_krealloc - Resource-managed krealloc() 844 * @dev: Device to re-allocate memory for 845 * @ptr: Pointer to the memory chunk to re-allocate 846 * @new_size: New allocation size 847 * @gfp: Allocation gfp flags 848 * 849 * Managed krealloc(). Resizes the memory chunk allocated with devm_kmalloc(). 850 * Behaves similarly to regular krealloc(): if @ptr is NULL or ZERO_SIZE_PTR, 851 * it's the equivalent of devm_kmalloc(). If new_size is zero, it frees the 852 * previously allocated memory and returns ZERO_SIZE_PTR. This function doesn't 853 * change the order in which the release callback for the re-alloc'ed devres 854 * will be called (except when falling back to devm_kmalloc() or when freeing 855 * resources when new_size is zero). The contents of the memory are preserved 856 * up to the lesser of new and old sizes. 857 */ 858void *devm_krealloc(struct device *dev, void *ptr, size_t new_size, gfp_t gfp) 859{ 860 size_t total_new_size, total_old_size; 861 struct devres *old_dr, *new_dr; 862 unsigned long flags; 863 864 if (unlikely(!new_size)) { 865 devm_kfree(dev, ptr); 866 return ZERO_SIZE_PTR; 867 } 868 869 if (unlikely(ZERO_OR_NULL_PTR(ptr))) 870 return devm_kmalloc(dev, new_size, gfp); 871 872 if (WARN_ON(is_kernel_rodata((unsigned long)ptr))) 873 /* 874 * We cannot reliably realloc a const string returned by 875 * devm_kstrdup_const(). 876 */ 877 return NULL; 878 879 if (!check_dr_size(new_size, &total_new_size)) 880 return NULL; 881 882 total_old_size = ksize(container_of(ptr, struct devres, data)); 883 if (total_old_size == 0) { 884 WARN(1, "Pointer doesn't point to dynamically allocated memory."); 885 return NULL; 886 } 887 888 /* 889 * If new size is smaller or equal to the actual number of bytes 890 * allocated previously - just return the same pointer. 891 */ 892 if (total_new_size <= total_old_size) 893 return ptr; 894 895 /* 896 * Otherwise: allocate new, larger chunk. We need to allocate before 897 * taking the lock as most probably the caller uses GFP_KERNEL. 898 */ 899 new_dr = alloc_dr(devm_kmalloc_release, 900 total_new_size, gfp, dev_to_node(dev)); 901 if (!new_dr) 902 return NULL; 903 904 /* 905 * The spinlock protects the linked list against concurrent 906 * modifications but not the resource itself. 907 */ 908 spin_lock_irqsave(&dev->devres_lock, flags); 909 910 old_dr = find_dr(dev, devm_kmalloc_release, devm_kmalloc_match, ptr); 911 if (!old_dr) { 912 spin_unlock_irqrestore(&dev->devres_lock, flags); 913 kfree(new_dr); 914 WARN(1, "Memory chunk not managed or managed by a different device."); 915 return NULL; 916 } 917 918 replace_dr(dev, &old_dr->node, &new_dr->node); 919 920 spin_unlock_irqrestore(&dev->devres_lock, flags); 921 922 /* 923 * We can copy the memory contents after releasing the lock as we're 924 * no longer modifying the list links. 925 */ 926 memcpy(new_dr->data, old_dr->data, 927 total_old_size - offsetof(struct devres, data)); 928 /* 929 * Same for releasing the old devres - it's now been removed from the 930 * list. This is also the reason why we must not use devm_kfree() - the 931 * links are no longer valid. 932 */ 933 kfree(old_dr); 934 935 return new_dr->data; 936} 937EXPORT_SYMBOL_GPL(devm_krealloc); 938 939/** 940 * devm_kstrdup - Allocate resource managed space and 941 * copy an existing string into that. 942 * @dev: Device to allocate memory for 943 * @s: the string to duplicate 944 * @gfp: the GFP mask used in the devm_kmalloc() call when 945 * allocating memory 946 * RETURNS: 947 * Pointer to allocated string on success, NULL on failure. 948 */ 949char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) 950{ 951 size_t size; 952 char *buf; 953 954 if (!s) 955 return NULL; 956 957 size = strlen(s) + 1; 958 buf = devm_kmalloc(dev, size, gfp); 959 if (buf) 960 memcpy(buf, s, size); 961 return buf; 962} 963EXPORT_SYMBOL_GPL(devm_kstrdup); 964 965/** 966 * devm_kstrdup_const - resource managed conditional string duplication 967 * @dev: device for which to duplicate the string 968 * @s: the string to duplicate 969 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 970 * 971 * Strings allocated by devm_kstrdup_const will be automatically freed when 972 * the associated device is detached. 973 * 974 * RETURNS: 975 * Source string if it is in .rodata section otherwise it falls back to 976 * devm_kstrdup. 977 */ 978const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp) 979{ 980 if (is_kernel_rodata((unsigned long)s)) 981 return s; 982 983 return devm_kstrdup(dev, s, gfp); 984} 985EXPORT_SYMBOL_GPL(devm_kstrdup_const); 986 987/** 988 * devm_kvasprintf - Allocate resource managed space and format a string 989 * into that. 990 * @dev: Device to allocate memory for 991 * @gfp: the GFP mask used in the devm_kmalloc() call when 992 * allocating memory 993 * @fmt: The printf()-style format string 994 * @ap: Arguments for the format string 995 * RETURNS: 996 * Pointer to allocated string on success, NULL on failure. 997 */ 998char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, 999 va_list ap) 1000{ 1001 unsigned int len; 1002 char *p; 1003 va_list aq; 1004 1005 va_copy(aq, ap); 1006 len = vsnprintf(NULL, 0, fmt, aq); 1007 va_end(aq); 1008 1009 p = devm_kmalloc(dev, len+1, gfp); 1010 if (!p) 1011 return NULL; 1012 1013 vsnprintf(p, len+1, fmt, ap); 1014 1015 return p; 1016} 1017EXPORT_SYMBOL(devm_kvasprintf); 1018 1019/** 1020 * devm_kasprintf - Allocate resource managed space and format a string 1021 * into that. 1022 * @dev: Device to allocate memory for 1023 * @gfp: the GFP mask used in the devm_kmalloc() call when 1024 * allocating memory 1025 * @fmt: The printf()-style format string 1026 * @...: Arguments for the format string 1027 * RETURNS: 1028 * Pointer to allocated string on success, NULL on failure. 1029 */ 1030char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) 1031{ 1032 va_list ap; 1033 char *p; 1034 1035 va_start(ap, fmt); 1036 p = devm_kvasprintf(dev, gfp, fmt, ap); 1037 va_end(ap); 1038 1039 return p; 1040} 1041EXPORT_SYMBOL_GPL(devm_kasprintf); 1042 1043/** 1044 * devm_kfree - Resource-managed kfree 1045 * @dev: Device this memory belongs to 1046 * @p: Memory to free 1047 * 1048 * Free memory allocated with devm_kmalloc(). 1049 */ 1050void devm_kfree(struct device *dev, const void *p) 1051{ 1052 int rc; 1053 1054 /* 1055 * Special cases: pointer to a string in .rodata returned by 1056 * devm_kstrdup_const() or NULL/ZERO ptr. 1057 */ 1058 if (unlikely(is_kernel_rodata((unsigned long)p) || ZERO_OR_NULL_PTR(p))) 1059 return; 1060 1061 rc = devres_destroy(dev, devm_kmalloc_release, 1062 devm_kmalloc_match, (void *)p); 1063 WARN_ON(rc); 1064} 1065EXPORT_SYMBOL_GPL(devm_kfree); 1066 1067/** 1068 * devm_kmemdup - Resource-managed kmemdup 1069 * @dev: Device this memory belongs to 1070 * @src: Memory region to duplicate 1071 * @len: Memory region length 1072 * @gfp: GFP mask to use 1073 * 1074 * Duplicate region of a memory using resource managed kmalloc 1075 */ 1076void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp) 1077{ 1078 void *p; 1079 1080 p = devm_kmalloc(dev, len, gfp); 1081 if (p) 1082 memcpy(p, src, len); 1083 1084 return p; 1085} 1086EXPORT_SYMBOL_GPL(devm_kmemdup); 1087 1088struct pages_devres { 1089 unsigned long addr; 1090 unsigned int order; 1091}; 1092 1093static int devm_pages_match(struct device *dev, void *res, void *p) 1094{ 1095 struct pages_devres *devres = res; 1096 struct pages_devres *target = p; 1097 1098 return devres->addr == target->addr; 1099} 1100 1101static void devm_pages_release(struct device *dev, void *res) 1102{ 1103 struct pages_devres *devres = res; 1104 1105 free_pages(devres->addr, devres->order); 1106} 1107 1108/** 1109 * devm_get_free_pages - Resource-managed __get_free_pages 1110 * @dev: Device to allocate memory for 1111 * @gfp_mask: Allocation gfp flags 1112 * @order: Allocation size is (1 << order) pages 1113 * 1114 * Managed get_free_pages. Memory allocated with this function is 1115 * automatically freed on driver detach. 1116 * 1117 * RETURNS: 1118 * Address of allocated memory on success, 0 on failure. 1119 */ 1120 1121unsigned long devm_get_free_pages(struct device *dev, 1122 gfp_t gfp_mask, unsigned int order) 1123{ 1124 struct pages_devres *devres; 1125 unsigned long addr; 1126 1127 addr = __get_free_pages(gfp_mask, order); 1128 1129 if (unlikely(!addr)) 1130 return 0; 1131 1132 devres = devres_alloc(devm_pages_release, 1133 sizeof(struct pages_devres), GFP_KERNEL); 1134 if (unlikely(!devres)) { 1135 free_pages(addr, order); 1136 return 0; 1137 } 1138 1139 devres->addr = addr; 1140 devres->order = order; 1141 1142 devres_add(dev, devres); 1143 return addr; 1144} 1145EXPORT_SYMBOL_GPL(devm_get_free_pages); 1146 1147/** 1148 * devm_free_pages - Resource-managed free_pages 1149 * @dev: Device this memory belongs to 1150 * @addr: Memory to free 1151 * 1152 * Free memory allocated with devm_get_free_pages(). Unlike free_pages, 1153 * there is no need to supply the @order. 1154 */ 1155void devm_free_pages(struct device *dev, unsigned long addr) 1156{ 1157 struct pages_devres devres = { .addr = addr }; 1158 1159 WARN_ON(devres_release(dev, devm_pages_release, devm_pages_match, 1160 &devres)); 1161} 1162EXPORT_SYMBOL_GPL(devm_free_pages); 1163 1164static void devm_percpu_release(struct device *dev, void *pdata) 1165{ 1166 void __percpu *p; 1167 1168 p = *(void __percpu **)pdata; 1169 free_percpu(p); 1170} 1171 1172static int devm_percpu_match(struct device *dev, void *data, void *p) 1173{ 1174 struct devres *devr = container_of(data, struct devres, data); 1175 1176 return *(void **)devr->data == p; 1177} 1178 1179/** 1180 * __devm_alloc_percpu - Resource-managed alloc_percpu 1181 * @dev: Device to allocate per-cpu memory for 1182 * @size: Size of per-cpu memory to allocate 1183 * @align: Alignment of per-cpu memory to allocate 1184 * 1185 * Managed alloc_percpu. Per-cpu memory allocated with this function is 1186 * automatically freed on driver detach. 1187 * 1188 * RETURNS: 1189 * Pointer to allocated memory on success, NULL on failure. 1190 */ 1191void __percpu *__devm_alloc_percpu(struct device *dev, size_t size, 1192 size_t align) 1193{ 1194 void *p; 1195 void __percpu *pcpu; 1196 1197 pcpu = __alloc_percpu(size, align); 1198 if (!pcpu) 1199 return NULL; 1200 1201 p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL); 1202 if (!p) { 1203 free_percpu(pcpu); 1204 return NULL; 1205 } 1206 1207 *(void __percpu **)p = pcpu; 1208 1209 devres_add(dev, p); 1210 1211 return pcpu; 1212} 1213EXPORT_SYMBOL_GPL(__devm_alloc_percpu); 1214 1215/** 1216 * devm_free_percpu - Resource-managed free_percpu 1217 * @dev: Device this memory belongs to 1218 * @pdata: Per-cpu memory to free 1219 * 1220 * Free memory allocated with devm_alloc_percpu(). 1221 */ 1222void devm_free_percpu(struct device *dev, void __percpu *pdata) 1223{ 1224 WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match, 1225 (__force void *)pdata)); 1226} 1227EXPORT_SYMBOL_GPL(devm_free_percpu);