at v5.9 29 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * drivers/base/devres.c - device resource management 4 * 5 * Copyright (c) 2006 SUSE Linux Products GmbH 6 * Copyright (c) 2006 Tejun Heo <teheo@suse.de> 7 */ 8 9#include <linux/device.h> 10#include <linux/module.h> 11#include <linux/slab.h> 12#include <linux/percpu.h> 13 14#include <asm/sections.h> 15 16#include "base.h" 17 18struct devres_node { 19 struct list_head entry; 20 dr_release_t release; 21#ifdef CONFIG_DEBUG_DEVRES 22 const char *name; 23 size_t size; 24#endif 25}; 26 27struct devres { 28 struct devres_node node; 29 /* 30 * Some archs want to perform DMA into kmalloc caches 31 * and need a guaranteed alignment larger than 32 * the alignment of a 64-bit integer. 33 * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same 34 * buffer alignment as if it was allocated by plain kmalloc(). 35 */ 36 u8 __aligned(ARCH_KMALLOC_MINALIGN) data[]; 37}; 38 39struct devres_group { 40 struct devres_node node[2]; 41 void *id; 42 int color; 43 /* -- 8 pointers */ 44}; 45 46#ifdef CONFIG_DEBUG_DEVRES 47static int log_devres = 0; 48module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR); 49 50static void set_node_dbginfo(struct devres_node *node, const char *name, 51 size_t size) 52{ 53 node->name = name; 54 node->size = size; 55} 56 57static void devres_log(struct device *dev, struct devres_node *node, 58 const char *op) 59{ 60 if (unlikely(log_devres)) 61 dev_err(dev, "DEVRES %3s %p %s (%lu bytes)\n", 62 op, node, node->name, (unsigned long)node->size); 63} 64#else /* CONFIG_DEBUG_DEVRES */ 65#define set_node_dbginfo(node, n, s) do {} while (0) 66#define devres_log(dev, node, op) do {} while (0) 67#endif /* CONFIG_DEBUG_DEVRES */ 68 69/* 70 * Release functions for devres group. These callbacks are used only 71 * for identification. 72 */ 73static void group_open_release(struct device *dev, void *res) 74{ 75 /* noop */ 76} 77 78static void group_close_release(struct device *dev, void *res) 79{ 80 /* noop */ 81} 82 83static struct devres_group * node_to_group(struct devres_node *node) 84{ 85 if (node->release == &group_open_release) 86 return container_of(node, struct devres_group, node[0]); 87 if (node->release == &group_close_release) 88 return container_of(node, struct devres_group, node[1]); 89 return NULL; 90} 91 92static bool check_dr_size(size_t size, size_t *tot_size) 93{ 94 /* We must catch any near-SIZE_MAX cases that could overflow. */ 95 if (unlikely(check_add_overflow(sizeof(struct devres), 96 size, tot_size))) 97 return false; 98 99 return true; 100} 101 102static __always_inline struct devres * alloc_dr(dr_release_t release, 103 size_t size, gfp_t gfp, int nid) 104{ 105 size_t tot_size; 106 struct devres *dr; 107 108 if (!check_dr_size(size, &tot_size)) 109 return NULL; 110 111 dr = kmalloc_node_track_caller(tot_size, gfp, nid); 112 if (unlikely(!dr)) 113 return NULL; 114 115 memset(dr, 0, offsetof(struct devres, data)); 116 117 INIT_LIST_HEAD(&dr->node.entry); 118 dr->node.release = release; 119 return dr; 120} 121 122static void add_dr(struct device *dev, struct devres_node *node) 123{ 124 devres_log(dev, node, "ADD"); 125 BUG_ON(!list_empty(&node->entry)); 126 list_add_tail(&node->entry, &dev->devres_head); 127} 128 129#ifdef CONFIG_DEBUG_DEVRES 130void * __devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid, 131 const char *name) 132{ 133 struct devres *dr; 134 135 dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid); 136 if (unlikely(!dr)) 137 return NULL; 138 set_node_dbginfo(&dr->node, name, size); 139 return dr->data; 140} 141EXPORT_SYMBOL_GPL(__devres_alloc_node); 142#else 143/** 144 * devres_alloc - Allocate device resource data 145 * @release: Release function devres will be associated with 146 * @size: Allocation size 147 * @gfp: Allocation flags 148 * @nid: NUMA node 149 * 150 * Allocate devres of @size bytes. The allocated area is zeroed, then 151 * associated with @release. The returned pointer can be passed to 152 * other devres_*() functions. 153 * 154 * RETURNS: 155 * Pointer to allocated devres on success, NULL on failure. 156 */ 157void * devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid) 158{ 159 struct devres *dr; 160 161 dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid); 162 if (unlikely(!dr)) 163 return NULL; 164 return dr->data; 165} 166EXPORT_SYMBOL_GPL(devres_alloc_node); 167#endif 168 169/** 170 * devres_for_each_res - Resource iterator 171 * @dev: Device to iterate resource from 172 * @release: Look for resources associated with this release function 173 * @match: Match function (optional) 174 * @match_data: Data for the match function 175 * @fn: Function to be called for each matched resource. 176 * @data: Data for @fn, the 3rd parameter of @fn 177 * 178 * Call @fn for each devres of @dev which is associated with @release 179 * and for which @match returns 1. 180 * 181 * RETURNS: 182 * void 183 */ 184void devres_for_each_res(struct device *dev, dr_release_t release, 185 dr_match_t match, void *match_data, 186 void (*fn)(struct device *, void *, void *), 187 void *data) 188{ 189 struct devres_node *node; 190 struct devres_node *tmp; 191 unsigned long flags; 192 193 if (!fn) 194 return; 195 196 spin_lock_irqsave(&dev->devres_lock, flags); 197 list_for_each_entry_safe_reverse(node, tmp, 198 &dev->devres_head, entry) { 199 struct devres *dr = container_of(node, struct devres, node); 200 201 if (node->release != release) 202 continue; 203 if (match && !match(dev, dr->data, match_data)) 204 continue; 205 fn(dev, dr->data, data); 206 } 207 spin_unlock_irqrestore(&dev->devres_lock, flags); 208} 209EXPORT_SYMBOL_GPL(devres_for_each_res); 210 211/** 212 * devres_free - Free device resource data 213 * @res: Pointer to devres data to free 214 * 215 * Free devres created with devres_alloc(). 216 */ 217void devres_free(void *res) 218{ 219 if (res) { 220 struct devres *dr = container_of(res, struct devres, data); 221 222 BUG_ON(!list_empty(&dr->node.entry)); 223 kfree(dr); 224 } 225} 226EXPORT_SYMBOL_GPL(devres_free); 227 228/** 229 * devres_add - Register device resource 230 * @dev: Device to add resource to 231 * @res: Resource to register 232 * 233 * Register devres @res to @dev. @res should have been allocated 234 * using devres_alloc(). On driver detach, the associated release 235 * function will be invoked and devres will be freed automatically. 236 */ 237void devres_add(struct device *dev, void *res) 238{ 239 struct devres *dr = container_of(res, struct devres, data); 240 unsigned long flags; 241 242 spin_lock_irqsave(&dev->devres_lock, flags); 243 add_dr(dev, &dr->node); 244 spin_unlock_irqrestore(&dev->devres_lock, flags); 245} 246EXPORT_SYMBOL_GPL(devres_add); 247 248static struct devres *find_dr(struct device *dev, dr_release_t release, 249 dr_match_t match, void *match_data) 250{ 251 struct devres_node *node; 252 253 list_for_each_entry_reverse(node, &dev->devres_head, entry) { 254 struct devres *dr = container_of(node, struct devres, node); 255 256 if (node->release != release) 257 continue; 258 if (match && !match(dev, dr->data, match_data)) 259 continue; 260 return dr; 261 } 262 263 return NULL; 264} 265 266/** 267 * devres_find - Find device resource 268 * @dev: Device to lookup resource from 269 * @release: Look for resources associated with this release function 270 * @match: Match function (optional) 271 * @match_data: Data for the match function 272 * 273 * Find the latest devres of @dev which is associated with @release 274 * and for which @match returns 1. If @match is NULL, it's considered 275 * to match all. 276 * 277 * RETURNS: 278 * Pointer to found devres, NULL if not found. 279 */ 280void * devres_find(struct device *dev, dr_release_t release, 281 dr_match_t match, void *match_data) 282{ 283 struct devres *dr; 284 unsigned long flags; 285 286 spin_lock_irqsave(&dev->devres_lock, flags); 287 dr = find_dr(dev, release, match, match_data); 288 spin_unlock_irqrestore(&dev->devres_lock, flags); 289 290 if (dr) 291 return dr->data; 292 return NULL; 293} 294EXPORT_SYMBOL_GPL(devres_find); 295 296/** 297 * devres_get - Find devres, if non-existent, add one atomically 298 * @dev: Device to lookup or add devres for 299 * @new_res: Pointer to new initialized devres to add if not found 300 * @match: Match function (optional) 301 * @match_data: Data for the match function 302 * 303 * Find the latest devres of @dev which has the same release function 304 * as @new_res and for which @match return 1. If found, @new_res is 305 * freed; otherwise, @new_res is added atomically. 306 * 307 * RETURNS: 308 * Pointer to found or added devres. 309 */ 310void * devres_get(struct device *dev, void *new_res, 311 dr_match_t match, void *match_data) 312{ 313 struct devres *new_dr = container_of(new_res, struct devres, data); 314 struct devres *dr; 315 unsigned long flags; 316 317 spin_lock_irqsave(&dev->devres_lock, flags); 318 dr = find_dr(dev, new_dr->node.release, match, match_data); 319 if (!dr) { 320 add_dr(dev, &new_dr->node); 321 dr = new_dr; 322 new_res = NULL; 323 } 324 spin_unlock_irqrestore(&dev->devres_lock, flags); 325 devres_free(new_res); 326 327 return dr->data; 328} 329EXPORT_SYMBOL_GPL(devres_get); 330 331/** 332 * devres_remove - Find a device resource and remove it 333 * @dev: Device to find resource from 334 * @release: Look for resources associated with this release function 335 * @match: Match function (optional) 336 * @match_data: Data for the match function 337 * 338 * Find the latest devres of @dev associated with @release and for 339 * which @match returns 1. If @match is NULL, it's considered to 340 * match all. If found, the resource is removed atomically and 341 * returned. 342 * 343 * RETURNS: 344 * Pointer to removed devres on success, NULL if not found. 345 */ 346void * devres_remove(struct device *dev, dr_release_t release, 347 dr_match_t match, void *match_data) 348{ 349 struct devres *dr; 350 unsigned long flags; 351 352 spin_lock_irqsave(&dev->devres_lock, flags); 353 dr = find_dr(dev, release, match, match_data); 354 if (dr) { 355 list_del_init(&dr->node.entry); 356 devres_log(dev, &dr->node, "REM"); 357 } 358 spin_unlock_irqrestore(&dev->devres_lock, flags); 359 360 if (dr) 361 return dr->data; 362 return NULL; 363} 364EXPORT_SYMBOL_GPL(devres_remove); 365 366/** 367 * devres_destroy - Find a device resource and destroy it 368 * @dev: Device to find resource from 369 * @release: Look for resources associated with this release function 370 * @match: Match function (optional) 371 * @match_data: Data for the match function 372 * 373 * Find the latest devres of @dev associated with @release and for 374 * which @match returns 1. If @match is NULL, it's considered to 375 * match all. If found, the resource is removed atomically and freed. 376 * 377 * Note that the release function for the resource will not be called, 378 * only the devres-allocated data will be freed. The caller becomes 379 * responsible for freeing any other data. 380 * 381 * RETURNS: 382 * 0 if devres is found and freed, -ENOENT if not found. 383 */ 384int devres_destroy(struct device *dev, dr_release_t release, 385 dr_match_t match, void *match_data) 386{ 387 void *res; 388 389 res = devres_remove(dev, release, match, match_data); 390 if (unlikely(!res)) 391 return -ENOENT; 392 393 devres_free(res); 394 return 0; 395} 396EXPORT_SYMBOL_GPL(devres_destroy); 397 398 399/** 400 * devres_release - Find a device resource and destroy it, calling release 401 * @dev: Device to find resource from 402 * @release: Look for resources associated with this release function 403 * @match: Match function (optional) 404 * @match_data: Data for the match function 405 * 406 * Find the latest devres of @dev associated with @release and for 407 * which @match returns 1. If @match is NULL, it's considered to 408 * match all. If found, the resource is removed atomically, the 409 * release function called and the resource freed. 410 * 411 * RETURNS: 412 * 0 if devres is found and freed, -ENOENT if not found. 413 */ 414int devres_release(struct device *dev, dr_release_t release, 415 dr_match_t match, void *match_data) 416{ 417 void *res; 418 419 res = devres_remove(dev, release, match, match_data); 420 if (unlikely(!res)) 421 return -ENOENT; 422 423 (*release)(dev, res); 424 devres_free(res); 425 return 0; 426} 427EXPORT_SYMBOL_GPL(devres_release); 428 429static int remove_nodes(struct device *dev, 430 struct list_head *first, struct list_head *end, 431 struct list_head *todo) 432{ 433 int cnt = 0, nr_groups = 0; 434 struct list_head *cur; 435 436 /* First pass - move normal devres entries to @todo and clear 437 * devres_group colors. 438 */ 439 cur = first; 440 while (cur != end) { 441 struct devres_node *node; 442 struct devres_group *grp; 443 444 node = list_entry(cur, struct devres_node, entry); 445 cur = cur->next; 446 447 grp = node_to_group(node); 448 if (grp) { 449 /* clear color of group markers in the first pass */ 450 grp->color = 0; 451 nr_groups++; 452 } else { 453 /* regular devres entry */ 454 if (&node->entry == first) 455 first = first->next; 456 list_move_tail(&node->entry, todo); 457 cnt++; 458 } 459 } 460 461 if (!nr_groups) 462 return cnt; 463 464 /* Second pass - Scan groups and color them. A group gets 465 * color value of two iff the group is wholly contained in 466 * [cur, end). That is, for a closed group, both opening and 467 * closing markers should be in the range, while just the 468 * opening marker is enough for an open group. 469 */ 470 cur = first; 471 while (cur != end) { 472 struct devres_node *node; 473 struct devres_group *grp; 474 475 node = list_entry(cur, struct devres_node, entry); 476 cur = cur->next; 477 478 grp = node_to_group(node); 479 BUG_ON(!grp || list_empty(&grp->node[0].entry)); 480 481 grp->color++; 482 if (list_empty(&grp->node[1].entry)) 483 grp->color++; 484 485 BUG_ON(grp->color <= 0 || grp->color > 2); 486 if (grp->color == 2) { 487 /* No need to update cur or end. The removed 488 * nodes are always before both. 489 */ 490 list_move_tail(&grp->node[0].entry, todo); 491 list_del_init(&grp->node[1].entry); 492 } 493 } 494 495 return cnt; 496} 497 498static int release_nodes(struct device *dev, struct list_head *first, 499 struct list_head *end, unsigned long flags) 500 __releases(&dev->devres_lock) 501{ 502 LIST_HEAD(todo); 503 int cnt; 504 struct devres *dr, *tmp; 505 506 cnt = remove_nodes(dev, first, end, &todo); 507 508 spin_unlock_irqrestore(&dev->devres_lock, flags); 509 510 /* Release. Note that both devres and devres_group are 511 * handled as devres in the following loop. This is safe. 512 */ 513 list_for_each_entry_safe_reverse(dr, tmp, &todo, node.entry) { 514 devres_log(dev, &dr->node, "REL"); 515 dr->node.release(dev, dr->data); 516 kfree(dr); 517 } 518 519 return cnt; 520} 521 522/** 523 * devres_release_all - Release all managed resources 524 * @dev: Device to release resources for 525 * 526 * Release all resources associated with @dev. This function is 527 * called on driver detach. 528 */ 529int devres_release_all(struct device *dev) 530{ 531 unsigned long flags; 532 533 /* Looks like an uninitialized device structure */ 534 if (WARN_ON(dev->devres_head.next == NULL)) 535 return -ENODEV; 536 spin_lock_irqsave(&dev->devres_lock, flags); 537 return release_nodes(dev, dev->devres_head.next, &dev->devres_head, 538 flags); 539} 540 541/** 542 * devres_open_group - Open a new devres group 543 * @dev: Device to open devres group for 544 * @id: Separator ID 545 * @gfp: Allocation flags 546 * 547 * Open a new devres group for @dev with @id. For @id, using a 548 * pointer to an object which won't be used for another group is 549 * recommended. If @id is NULL, address-wise unique ID is created. 550 * 551 * RETURNS: 552 * ID of the new group, NULL on failure. 553 */ 554void * devres_open_group(struct device *dev, void *id, gfp_t gfp) 555{ 556 struct devres_group *grp; 557 unsigned long flags; 558 559 grp = kmalloc(sizeof(*grp), gfp); 560 if (unlikely(!grp)) 561 return NULL; 562 563 grp->node[0].release = &group_open_release; 564 grp->node[1].release = &group_close_release; 565 INIT_LIST_HEAD(&grp->node[0].entry); 566 INIT_LIST_HEAD(&grp->node[1].entry); 567 set_node_dbginfo(&grp->node[0], "grp<", 0); 568 set_node_dbginfo(&grp->node[1], "grp>", 0); 569 grp->id = grp; 570 if (id) 571 grp->id = id; 572 573 spin_lock_irqsave(&dev->devres_lock, flags); 574 add_dr(dev, &grp->node[0]); 575 spin_unlock_irqrestore(&dev->devres_lock, flags); 576 return grp->id; 577} 578EXPORT_SYMBOL_GPL(devres_open_group); 579 580/* Find devres group with ID @id. If @id is NULL, look for the latest. */ 581static struct devres_group * find_group(struct device *dev, void *id) 582{ 583 struct devres_node *node; 584 585 list_for_each_entry_reverse(node, &dev->devres_head, entry) { 586 struct devres_group *grp; 587 588 if (node->release != &group_open_release) 589 continue; 590 591 grp = container_of(node, struct devres_group, node[0]); 592 593 if (id) { 594 if (grp->id == id) 595 return grp; 596 } else if (list_empty(&grp->node[1].entry)) 597 return grp; 598 } 599 600 return NULL; 601} 602 603/** 604 * devres_close_group - Close a devres group 605 * @dev: Device to close devres group for 606 * @id: ID of target group, can be NULL 607 * 608 * Close the group identified by @id. If @id is NULL, the latest open 609 * group is selected. 610 */ 611void devres_close_group(struct device *dev, void *id) 612{ 613 struct devres_group *grp; 614 unsigned long flags; 615 616 spin_lock_irqsave(&dev->devres_lock, flags); 617 618 grp = find_group(dev, id); 619 if (grp) 620 add_dr(dev, &grp->node[1]); 621 else 622 WARN_ON(1); 623 624 spin_unlock_irqrestore(&dev->devres_lock, flags); 625} 626EXPORT_SYMBOL_GPL(devres_close_group); 627 628/** 629 * devres_remove_group - Remove a devres group 630 * @dev: Device to remove group for 631 * @id: ID of target group, can be NULL 632 * 633 * Remove the group identified by @id. If @id is NULL, the latest 634 * open group is selected. Note that removing a group doesn't affect 635 * any other resources. 636 */ 637void devres_remove_group(struct device *dev, void *id) 638{ 639 struct devres_group *grp; 640 unsigned long flags; 641 642 spin_lock_irqsave(&dev->devres_lock, flags); 643 644 grp = find_group(dev, id); 645 if (grp) { 646 list_del_init(&grp->node[0].entry); 647 list_del_init(&grp->node[1].entry); 648 devres_log(dev, &grp->node[0], "REM"); 649 } else 650 WARN_ON(1); 651 652 spin_unlock_irqrestore(&dev->devres_lock, flags); 653 654 kfree(grp); 655} 656EXPORT_SYMBOL_GPL(devres_remove_group); 657 658/** 659 * devres_release_group - Release resources in a devres group 660 * @dev: Device to release group for 661 * @id: ID of target group, can be NULL 662 * 663 * Release all resources in the group identified by @id. If @id is 664 * NULL, the latest open group is selected. The selected group and 665 * groups properly nested inside the selected group are removed. 666 * 667 * RETURNS: 668 * The number of released non-group resources. 669 */ 670int devres_release_group(struct device *dev, void *id) 671{ 672 struct devres_group *grp; 673 unsigned long flags; 674 int cnt = 0; 675 676 spin_lock_irqsave(&dev->devres_lock, flags); 677 678 grp = find_group(dev, id); 679 if (grp) { 680 struct list_head *first = &grp->node[0].entry; 681 struct list_head *end = &dev->devres_head; 682 683 if (!list_empty(&grp->node[1].entry)) 684 end = grp->node[1].entry.next; 685 686 cnt = release_nodes(dev, first, end, flags); 687 } else { 688 WARN_ON(1); 689 spin_unlock_irqrestore(&dev->devres_lock, flags); 690 } 691 692 return cnt; 693} 694EXPORT_SYMBOL_GPL(devres_release_group); 695 696/* 697 * Custom devres actions allow inserting a simple function call 698 * into the teadown sequence. 699 */ 700 701struct action_devres { 702 void *data; 703 void (*action)(void *); 704}; 705 706static int devm_action_match(struct device *dev, void *res, void *p) 707{ 708 struct action_devres *devres = res; 709 struct action_devres *target = p; 710 711 return devres->action == target->action && 712 devres->data == target->data; 713} 714 715static void devm_action_release(struct device *dev, void *res) 716{ 717 struct action_devres *devres = res; 718 719 devres->action(devres->data); 720} 721 722/** 723 * devm_add_action() - add a custom action to list of managed resources 724 * @dev: Device that owns the action 725 * @action: Function that should be called 726 * @data: Pointer to data passed to @action implementation 727 * 728 * This adds a custom action to the list of managed resources so that 729 * it gets executed as part of standard resource unwinding. 730 */ 731int devm_add_action(struct device *dev, void (*action)(void *), void *data) 732{ 733 struct action_devres *devres; 734 735 devres = devres_alloc(devm_action_release, 736 sizeof(struct action_devres), GFP_KERNEL); 737 if (!devres) 738 return -ENOMEM; 739 740 devres->data = data; 741 devres->action = action; 742 743 devres_add(dev, devres); 744 return 0; 745} 746EXPORT_SYMBOL_GPL(devm_add_action); 747 748/** 749 * devm_remove_action() - removes previously added custom action 750 * @dev: Device that owns the action 751 * @action: Function implementing the action 752 * @data: Pointer to data passed to @action implementation 753 * 754 * Removes instance of @action previously added by devm_add_action(). 755 * Both action and data should match one of the existing entries. 756 */ 757void devm_remove_action(struct device *dev, void (*action)(void *), void *data) 758{ 759 struct action_devres devres = { 760 .data = data, 761 .action = action, 762 }; 763 764 WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match, 765 &devres)); 766} 767EXPORT_SYMBOL_GPL(devm_remove_action); 768 769/** 770 * devm_release_action() - release previously added custom action 771 * @dev: Device that owns the action 772 * @action: Function implementing the action 773 * @data: Pointer to data passed to @action implementation 774 * 775 * Releases and removes instance of @action previously added by 776 * devm_add_action(). Both action and data should match one of the 777 * existing entries. 778 */ 779void devm_release_action(struct device *dev, void (*action)(void *), void *data) 780{ 781 struct action_devres devres = { 782 .data = data, 783 .action = action, 784 }; 785 786 WARN_ON(devres_release(dev, devm_action_release, devm_action_match, 787 &devres)); 788 789} 790EXPORT_SYMBOL_GPL(devm_release_action); 791 792/* 793 * Managed kmalloc/kfree 794 */ 795static void devm_kmalloc_release(struct device *dev, void *res) 796{ 797 /* noop */ 798} 799 800static int devm_kmalloc_match(struct device *dev, void *res, void *data) 801{ 802 return res == data; 803} 804 805/** 806 * devm_kmalloc - Resource-managed kmalloc 807 * @dev: Device to allocate memory for 808 * @size: Allocation size 809 * @gfp: Allocation gfp flags 810 * 811 * Managed kmalloc. Memory allocated with this function is 812 * automatically freed on driver detach. Like all other devres 813 * resources, guaranteed alignment is unsigned long long. 814 * 815 * RETURNS: 816 * Pointer to allocated memory on success, NULL on failure. 817 */ 818void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) 819{ 820 struct devres *dr; 821 822 if (unlikely(!size)) 823 return ZERO_SIZE_PTR; 824 825 /* use raw alloc_dr for kmalloc caller tracing */ 826 dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev)); 827 if (unlikely(!dr)) 828 return NULL; 829 830 /* 831 * This is named devm_kzalloc_release for historical reasons 832 * The initial implementation did not support kmalloc, only kzalloc 833 */ 834 set_node_dbginfo(&dr->node, "devm_kzalloc_release", size); 835 devres_add(dev, dr->data); 836 return dr->data; 837} 838EXPORT_SYMBOL_GPL(devm_kmalloc); 839 840/** 841 * devm_kstrdup - Allocate resource managed space and 842 * copy an existing string into that. 843 * @dev: Device to allocate memory for 844 * @s: the string to duplicate 845 * @gfp: the GFP mask used in the devm_kmalloc() call when 846 * allocating memory 847 * RETURNS: 848 * Pointer to allocated string on success, NULL on failure. 849 */ 850char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) 851{ 852 size_t size; 853 char *buf; 854 855 if (!s) 856 return NULL; 857 858 size = strlen(s) + 1; 859 buf = devm_kmalloc(dev, size, gfp); 860 if (buf) 861 memcpy(buf, s, size); 862 return buf; 863} 864EXPORT_SYMBOL_GPL(devm_kstrdup); 865 866/** 867 * devm_kstrdup_const - resource managed conditional string duplication 868 * @dev: device for which to duplicate the string 869 * @s: the string to duplicate 870 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 871 * 872 * Strings allocated by devm_kstrdup_const will be automatically freed when 873 * the associated device is detached. 874 * 875 * RETURNS: 876 * Source string if it is in .rodata section otherwise it falls back to 877 * devm_kstrdup. 878 */ 879const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp) 880{ 881 if (is_kernel_rodata((unsigned long)s)) 882 return s; 883 884 return devm_kstrdup(dev, s, gfp); 885} 886EXPORT_SYMBOL_GPL(devm_kstrdup_const); 887 888/** 889 * devm_kvasprintf - Allocate resource managed space and format a string 890 * into that. 891 * @dev: Device to allocate memory for 892 * @gfp: the GFP mask used in the devm_kmalloc() call when 893 * allocating memory 894 * @fmt: The printf()-style format string 895 * @ap: Arguments for the format string 896 * RETURNS: 897 * Pointer to allocated string on success, NULL on failure. 898 */ 899char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, 900 va_list ap) 901{ 902 unsigned int len; 903 char *p; 904 va_list aq; 905 906 va_copy(aq, ap); 907 len = vsnprintf(NULL, 0, fmt, aq); 908 va_end(aq); 909 910 p = devm_kmalloc(dev, len+1, gfp); 911 if (!p) 912 return NULL; 913 914 vsnprintf(p, len+1, fmt, ap); 915 916 return p; 917} 918EXPORT_SYMBOL(devm_kvasprintf); 919 920/** 921 * devm_kasprintf - Allocate resource managed space and format a string 922 * into that. 923 * @dev: Device to allocate memory for 924 * @gfp: the GFP mask used in the devm_kmalloc() call when 925 * allocating memory 926 * @fmt: The printf()-style format string 927 * @...: Arguments for the format string 928 * RETURNS: 929 * Pointer to allocated string on success, NULL on failure. 930 */ 931char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) 932{ 933 va_list ap; 934 char *p; 935 936 va_start(ap, fmt); 937 p = devm_kvasprintf(dev, gfp, fmt, ap); 938 va_end(ap); 939 940 return p; 941} 942EXPORT_SYMBOL_GPL(devm_kasprintf); 943 944/** 945 * devm_kfree - Resource-managed kfree 946 * @dev: Device this memory belongs to 947 * @p: Memory to free 948 * 949 * Free memory allocated with devm_kmalloc(). 950 */ 951void devm_kfree(struct device *dev, const void *p) 952{ 953 int rc; 954 955 /* 956 * Special cases: pointer to a string in .rodata returned by 957 * devm_kstrdup_const() or NULL/ZERO ptr. 958 */ 959 if (unlikely(is_kernel_rodata((unsigned long)p) || ZERO_OR_NULL_PTR(p))) 960 return; 961 962 rc = devres_destroy(dev, devm_kmalloc_release, 963 devm_kmalloc_match, (void *)p); 964 WARN_ON(rc); 965} 966EXPORT_SYMBOL_GPL(devm_kfree); 967 968/** 969 * devm_kmemdup - Resource-managed kmemdup 970 * @dev: Device this memory belongs to 971 * @src: Memory region to duplicate 972 * @len: Memory region length 973 * @gfp: GFP mask to use 974 * 975 * Duplicate region of a memory using resource managed kmalloc 976 */ 977void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp) 978{ 979 void *p; 980 981 p = devm_kmalloc(dev, len, gfp); 982 if (p) 983 memcpy(p, src, len); 984 985 return p; 986} 987EXPORT_SYMBOL_GPL(devm_kmemdup); 988 989struct pages_devres { 990 unsigned long addr; 991 unsigned int order; 992}; 993 994static int devm_pages_match(struct device *dev, void *res, void *p) 995{ 996 struct pages_devres *devres = res; 997 struct pages_devres *target = p; 998 999 return devres->addr == target->addr; 1000} 1001 1002static void devm_pages_release(struct device *dev, void *res) 1003{ 1004 struct pages_devres *devres = res; 1005 1006 free_pages(devres->addr, devres->order); 1007} 1008 1009/** 1010 * devm_get_free_pages - Resource-managed __get_free_pages 1011 * @dev: Device to allocate memory for 1012 * @gfp_mask: Allocation gfp flags 1013 * @order: Allocation size is (1 << order) pages 1014 * 1015 * Managed get_free_pages. Memory allocated with this function is 1016 * automatically freed on driver detach. 1017 * 1018 * RETURNS: 1019 * Address of allocated memory on success, 0 on failure. 1020 */ 1021 1022unsigned long devm_get_free_pages(struct device *dev, 1023 gfp_t gfp_mask, unsigned int order) 1024{ 1025 struct pages_devres *devres; 1026 unsigned long addr; 1027 1028 addr = __get_free_pages(gfp_mask, order); 1029 1030 if (unlikely(!addr)) 1031 return 0; 1032 1033 devres = devres_alloc(devm_pages_release, 1034 sizeof(struct pages_devres), GFP_KERNEL); 1035 if (unlikely(!devres)) { 1036 free_pages(addr, order); 1037 return 0; 1038 } 1039 1040 devres->addr = addr; 1041 devres->order = order; 1042 1043 devres_add(dev, devres); 1044 return addr; 1045} 1046EXPORT_SYMBOL_GPL(devm_get_free_pages); 1047 1048/** 1049 * devm_free_pages - Resource-managed free_pages 1050 * @dev: Device this memory belongs to 1051 * @addr: Memory to free 1052 * 1053 * Free memory allocated with devm_get_free_pages(). Unlike free_pages, 1054 * there is no need to supply the @order. 1055 */ 1056void devm_free_pages(struct device *dev, unsigned long addr) 1057{ 1058 struct pages_devres devres = { .addr = addr }; 1059 1060 WARN_ON(devres_release(dev, devm_pages_release, devm_pages_match, 1061 &devres)); 1062} 1063EXPORT_SYMBOL_GPL(devm_free_pages); 1064 1065static void devm_percpu_release(struct device *dev, void *pdata) 1066{ 1067 void __percpu *p; 1068 1069 p = *(void __percpu **)pdata; 1070 free_percpu(p); 1071} 1072 1073static int devm_percpu_match(struct device *dev, void *data, void *p) 1074{ 1075 struct devres *devr = container_of(data, struct devres, data); 1076 1077 return *(void **)devr->data == p; 1078} 1079 1080/** 1081 * __devm_alloc_percpu - Resource-managed alloc_percpu 1082 * @dev: Device to allocate per-cpu memory for 1083 * @size: Size of per-cpu memory to allocate 1084 * @align: Alignment of per-cpu memory to allocate 1085 * 1086 * Managed alloc_percpu. Per-cpu memory allocated with this function is 1087 * automatically freed on driver detach. 1088 * 1089 * RETURNS: 1090 * Pointer to allocated memory on success, NULL on failure. 1091 */ 1092void __percpu *__devm_alloc_percpu(struct device *dev, size_t size, 1093 size_t align) 1094{ 1095 void *p; 1096 void __percpu *pcpu; 1097 1098 pcpu = __alloc_percpu(size, align); 1099 if (!pcpu) 1100 return NULL; 1101 1102 p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL); 1103 if (!p) { 1104 free_percpu(pcpu); 1105 return NULL; 1106 } 1107 1108 *(void __percpu **)p = pcpu; 1109 1110 devres_add(dev, p); 1111 1112 return pcpu; 1113} 1114EXPORT_SYMBOL_GPL(__devm_alloc_percpu); 1115 1116/** 1117 * devm_free_percpu - Resource-managed free_percpu 1118 * @dev: Device this memory belongs to 1119 * @pdata: Per-cpu memory to free 1120 * 1121 * Free memory allocated with devm_alloc_percpu(). 1122 */ 1123void devm_free_percpu(struct device *dev, void __percpu *pdata) 1124{ 1125 WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match, 1126 (void *)pdata)); 1127} 1128EXPORT_SYMBOL_GPL(devm_free_percpu);