at v5.8 21 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Memory subsystem support 4 * 5 * Written by Matt Tolentino <matthew.e.tolentino@intel.com> 6 * Dave Hansen <haveblue@us.ibm.com> 7 * 8 * This file provides the necessary infrastructure to represent 9 * a SPARSEMEM-memory-model system's physical memory in /sysfs. 10 * All arch-independent code that assumes MEMORY_HOTPLUG requires 11 * SPARSEMEM should be contained here, or in mm/memory_hotplug.c. 12 */ 13 14#include <linux/module.h> 15#include <linux/init.h> 16#include <linux/topology.h> 17#include <linux/capability.h> 18#include <linux/device.h> 19#include <linux/memory.h> 20#include <linux/memory_hotplug.h> 21#include <linux/mm.h> 22#include <linux/stat.h> 23#include <linux/slab.h> 24#include <linux/xarray.h> 25 26#include <linux/atomic.h> 27#include <linux/uaccess.h> 28 29#define MEMORY_CLASS_NAME "memory" 30 31static const char *const online_type_to_str[] = { 32 [MMOP_OFFLINE] = "offline", 33 [MMOP_ONLINE] = "online", 34 [MMOP_ONLINE_KERNEL] = "online_kernel", 35 [MMOP_ONLINE_MOVABLE] = "online_movable", 36}; 37 38int memhp_online_type_from_str(const char *str) 39{ 40 int i; 41 42 for (i = 0; i < ARRAY_SIZE(online_type_to_str); i++) { 43 if (sysfs_streq(str, online_type_to_str[i])) 44 return i; 45 } 46 return -EINVAL; 47} 48 49#define to_memory_block(dev) container_of(dev, struct memory_block, dev) 50 51static int sections_per_block; 52 53static inline unsigned long base_memory_block_id(unsigned long section_nr) 54{ 55 return section_nr / sections_per_block; 56} 57 58static inline unsigned long pfn_to_block_id(unsigned long pfn) 59{ 60 return base_memory_block_id(pfn_to_section_nr(pfn)); 61} 62 63static inline unsigned long phys_to_block_id(unsigned long phys) 64{ 65 return pfn_to_block_id(PFN_DOWN(phys)); 66} 67 68static int memory_subsys_online(struct device *dev); 69static int memory_subsys_offline(struct device *dev); 70 71static struct bus_type memory_subsys = { 72 .name = MEMORY_CLASS_NAME, 73 .dev_name = MEMORY_CLASS_NAME, 74 .online = memory_subsys_online, 75 .offline = memory_subsys_offline, 76}; 77 78/* 79 * Memory blocks are cached in a local radix tree to avoid 80 * a costly linear search for the corresponding device on 81 * the subsystem bus. 82 */ 83static DEFINE_XARRAY(memory_blocks); 84 85static BLOCKING_NOTIFIER_HEAD(memory_chain); 86 87int register_memory_notifier(struct notifier_block *nb) 88{ 89 return blocking_notifier_chain_register(&memory_chain, nb); 90} 91EXPORT_SYMBOL(register_memory_notifier); 92 93void unregister_memory_notifier(struct notifier_block *nb) 94{ 95 blocking_notifier_chain_unregister(&memory_chain, nb); 96} 97EXPORT_SYMBOL(unregister_memory_notifier); 98 99static void memory_block_release(struct device *dev) 100{ 101 struct memory_block *mem = to_memory_block(dev); 102 103 kfree(mem); 104} 105 106unsigned long __weak memory_block_size_bytes(void) 107{ 108 return MIN_MEMORY_BLOCK_SIZE; 109} 110EXPORT_SYMBOL_GPL(memory_block_size_bytes); 111 112/* 113 * Show the first physical section index (number) of this memory block. 114 */ 115static ssize_t phys_index_show(struct device *dev, 116 struct device_attribute *attr, char *buf) 117{ 118 struct memory_block *mem = to_memory_block(dev); 119 unsigned long phys_index; 120 121 phys_index = mem->start_section_nr / sections_per_block; 122 return sprintf(buf, "%08lx\n", phys_index); 123} 124 125/* 126 * Legacy interface that we cannot remove. Always indicate "removable" 127 * with CONFIG_MEMORY_HOTREMOVE - bad heuristic. 128 */ 129static ssize_t removable_show(struct device *dev, struct device_attribute *attr, 130 char *buf) 131{ 132 return sprintf(buf, "%d\n", (int)IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)); 133} 134 135/* 136 * online, offline, going offline, etc. 137 */ 138static ssize_t state_show(struct device *dev, struct device_attribute *attr, 139 char *buf) 140{ 141 struct memory_block *mem = to_memory_block(dev); 142 ssize_t len = 0; 143 144 /* 145 * We can probably put these states in a nice little array 146 * so that they're not open-coded 147 */ 148 switch (mem->state) { 149 case MEM_ONLINE: 150 len = sprintf(buf, "online\n"); 151 break; 152 case MEM_OFFLINE: 153 len = sprintf(buf, "offline\n"); 154 break; 155 case MEM_GOING_OFFLINE: 156 len = sprintf(buf, "going-offline\n"); 157 break; 158 default: 159 len = sprintf(buf, "ERROR-UNKNOWN-%ld\n", 160 mem->state); 161 WARN_ON(1); 162 break; 163 } 164 165 return len; 166} 167 168int memory_notify(unsigned long val, void *v) 169{ 170 return blocking_notifier_call_chain(&memory_chain, val, v); 171} 172 173/* 174 * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is 175 * OK to have direct references to sparsemem variables in here. 176 */ 177static int 178memory_block_action(unsigned long start_section_nr, unsigned long action, 179 int online_type, int nid) 180{ 181 unsigned long start_pfn; 182 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; 183 int ret; 184 185 start_pfn = section_nr_to_pfn(start_section_nr); 186 187 switch (action) { 188 case MEM_ONLINE: 189 ret = online_pages(start_pfn, nr_pages, online_type, nid); 190 break; 191 case MEM_OFFLINE: 192 ret = offline_pages(start_pfn, nr_pages); 193 break; 194 default: 195 WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: " 196 "%ld\n", __func__, start_section_nr, action, action); 197 ret = -EINVAL; 198 } 199 200 return ret; 201} 202 203static int memory_block_change_state(struct memory_block *mem, 204 unsigned long to_state, unsigned long from_state_req) 205{ 206 int ret = 0; 207 208 if (mem->state != from_state_req) 209 return -EINVAL; 210 211 if (to_state == MEM_OFFLINE) 212 mem->state = MEM_GOING_OFFLINE; 213 214 ret = memory_block_action(mem->start_section_nr, to_state, 215 mem->online_type, mem->nid); 216 217 mem->state = ret ? from_state_req : to_state; 218 219 return ret; 220} 221 222/* The device lock serializes operations on memory_subsys_[online|offline] */ 223static int memory_subsys_online(struct device *dev) 224{ 225 struct memory_block *mem = to_memory_block(dev); 226 int ret; 227 228 if (mem->state == MEM_ONLINE) 229 return 0; 230 231 /* 232 * When called via device_online() without configuring the online_type, 233 * we want to default to MMOP_ONLINE. 234 */ 235 if (mem->online_type == MMOP_OFFLINE) 236 mem->online_type = MMOP_ONLINE; 237 238 ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE); 239 mem->online_type = MMOP_OFFLINE; 240 241 return ret; 242} 243 244static int memory_subsys_offline(struct device *dev) 245{ 246 struct memory_block *mem = to_memory_block(dev); 247 248 if (mem->state == MEM_OFFLINE) 249 return 0; 250 251 return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE); 252} 253 254static ssize_t state_store(struct device *dev, struct device_attribute *attr, 255 const char *buf, size_t count) 256{ 257 const int online_type = memhp_online_type_from_str(buf); 258 struct memory_block *mem = to_memory_block(dev); 259 int ret; 260 261 if (online_type < 0) 262 return -EINVAL; 263 264 ret = lock_device_hotplug_sysfs(); 265 if (ret) 266 return ret; 267 268 switch (online_type) { 269 case MMOP_ONLINE_KERNEL: 270 case MMOP_ONLINE_MOVABLE: 271 case MMOP_ONLINE: 272 /* mem->online_type is protected by device_hotplug_lock */ 273 mem->online_type = online_type; 274 ret = device_online(&mem->dev); 275 break; 276 case MMOP_OFFLINE: 277 ret = device_offline(&mem->dev); 278 break; 279 default: 280 ret = -EINVAL; /* should never happen */ 281 } 282 283 unlock_device_hotplug(); 284 285 if (ret < 0) 286 return ret; 287 if (ret) 288 return -EINVAL; 289 290 return count; 291} 292 293/* 294 * phys_device is a bad name for this. What I really want 295 * is a way to differentiate between memory ranges that 296 * are part of physical devices that constitute 297 * a complete removable unit or fru. 298 * i.e. do these ranges belong to the same physical device, 299 * s.t. if I offline all of these sections I can then 300 * remove the physical device? 301 */ 302static ssize_t phys_device_show(struct device *dev, 303 struct device_attribute *attr, char *buf) 304{ 305 struct memory_block *mem = to_memory_block(dev); 306 return sprintf(buf, "%d\n", mem->phys_device); 307} 308 309#ifdef CONFIG_MEMORY_HOTREMOVE 310static void print_allowed_zone(char *buf, int nid, unsigned long start_pfn, 311 unsigned long nr_pages, int online_type, 312 struct zone *default_zone) 313{ 314 struct zone *zone; 315 316 zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages); 317 if (zone != default_zone) { 318 strcat(buf, " "); 319 strcat(buf, zone->name); 320 } 321} 322 323static ssize_t valid_zones_show(struct device *dev, 324 struct device_attribute *attr, char *buf) 325{ 326 struct memory_block *mem = to_memory_block(dev); 327 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); 328 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; 329 struct zone *default_zone; 330 int nid; 331 332 /* 333 * Check the existing zone. Make sure that we do that only on the 334 * online nodes otherwise the page_zone is not reliable 335 */ 336 if (mem->state == MEM_ONLINE) { 337 /* 338 * The block contains more than one zone can not be offlined. 339 * This can happen e.g. for ZONE_DMA and ZONE_DMA32 340 */ 341 default_zone = test_pages_in_a_zone(start_pfn, 342 start_pfn + nr_pages); 343 if (!default_zone) 344 return sprintf(buf, "none\n"); 345 strcat(buf, default_zone->name); 346 goto out; 347 } 348 349 nid = mem->nid; 350 default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, start_pfn, 351 nr_pages); 352 strcat(buf, default_zone->name); 353 354 print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_KERNEL, 355 default_zone); 356 print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_MOVABLE, 357 default_zone); 358out: 359 strcat(buf, "\n"); 360 361 return strlen(buf); 362} 363static DEVICE_ATTR_RO(valid_zones); 364#endif 365 366static DEVICE_ATTR_RO(phys_index); 367static DEVICE_ATTR_RW(state); 368static DEVICE_ATTR_RO(phys_device); 369static DEVICE_ATTR_RO(removable); 370 371/* 372 * Show the memory block size (shared by all memory blocks). 373 */ 374static ssize_t block_size_bytes_show(struct device *dev, 375 struct device_attribute *attr, char *buf) 376{ 377 return sprintf(buf, "%lx\n", memory_block_size_bytes()); 378} 379 380static DEVICE_ATTR_RO(block_size_bytes); 381 382/* 383 * Memory auto online policy. 384 */ 385 386static ssize_t auto_online_blocks_show(struct device *dev, 387 struct device_attribute *attr, char *buf) 388{ 389 return sprintf(buf, "%s\n", 390 online_type_to_str[memhp_default_online_type]); 391} 392 393static ssize_t auto_online_blocks_store(struct device *dev, 394 struct device_attribute *attr, 395 const char *buf, size_t count) 396{ 397 const int online_type = memhp_online_type_from_str(buf); 398 399 if (online_type < 0) 400 return -EINVAL; 401 402 memhp_default_online_type = online_type; 403 return count; 404} 405 406static DEVICE_ATTR_RW(auto_online_blocks); 407 408/* 409 * Some architectures will have custom drivers to do this, and 410 * will not need to do it from userspace. The fake hot-add code 411 * as well as ppc64 will do all of their discovery in userspace 412 * and will require this interface. 413 */ 414#ifdef CONFIG_ARCH_MEMORY_PROBE 415static ssize_t probe_store(struct device *dev, struct device_attribute *attr, 416 const char *buf, size_t count) 417{ 418 u64 phys_addr; 419 int nid, ret; 420 unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block; 421 422 ret = kstrtoull(buf, 0, &phys_addr); 423 if (ret) 424 return ret; 425 426 if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1)) 427 return -EINVAL; 428 429 ret = lock_device_hotplug_sysfs(); 430 if (ret) 431 return ret; 432 433 nid = memory_add_physaddr_to_nid(phys_addr); 434 ret = __add_memory(nid, phys_addr, 435 MIN_MEMORY_BLOCK_SIZE * sections_per_block); 436 437 if (ret) 438 goto out; 439 440 ret = count; 441out: 442 unlock_device_hotplug(); 443 return ret; 444} 445 446static DEVICE_ATTR_WO(probe); 447#endif 448 449#ifdef CONFIG_MEMORY_FAILURE 450/* 451 * Support for offlining pages of memory 452 */ 453 454/* Soft offline a page */ 455static ssize_t soft_offline_page_store(struct device *dev, 456 struct device_attribute *attr, 457 const char *buf, size_t count) 458{ 459 int ret; 460 u64 pfn; 461 if (!capable(CAP_SYS_ADMIN)) 462 return -EPERM; 463 if (kstrtoull(buf, 0, &pfn) < 0) 464 return -EINVAL; 465 pfn >>= PAGE_SHIFT; 466 ret = soft_offline_page(pfn, 0); 467 return ret == 0 ? count : ret; 468} 469 470/* Forcibly offline a page, including killing processes. */ 471static ssize_t hard_offline_page_store(struct device *dev, 472 struct device_attribute *attr, 473 const char *buf, size_t count) 474{ 475 int ret; 476 u64 pfn; 477 if (!capable(CAP_SYS_ADMIN)) 478 return -EPERM; 479 if (kstrtoull(buf, 0, &pfn) < 0) 480 return -EINVAL; 481 pfn >>= PAGE_SHIFT; 482 ret = memory_failure(pfn, 0); 483 return ret ? ret : count; 484} 485 486static DEVICE_ATTR_WO(soft_offline_page); 487static DEVICE_ATTR_WO(hard_offline_page); 488#endif 489 490/* 491 * Note that phys_device is optional. It is here to allow for 492 * differentiation between which *physical* devices each 493 * section belongs to... 494 */ 495int __weak arch_get_memory_phys_device(unsigned long start_pfn) 496{ 497 return 0; 498} 499 500/* 501 * A reference for the returned memory block device is acquired. 502 * 503 * Called under device_hotplug_lock. 504 */ 505static struct memory_block *find_memory_block_by_id(unsigned long block_id) 506{ 507 struct memory_block *mem; 508 509 mem = xa_load(&memory_blocks, block_id); 510 if (mem) 511 get_device(&mem->dev); 512 return mem; 513} 514 515/* 516 * Called under device_hotplug_lock. 517 */ 518struct memory_block *find_memory_block(struct mem_section *section) 519{ 520 unsigned long block_id = base_memory_block_id(__section_nr(section)); 521 522 return find_memory_block_by_id(block_id); 523} 524 525static struct attribute *memory_memblk_attrs[] = { 526 &dev_attr_phys_index.attr, 527 &dev_attr_state.attr, 528 &dev_attr_phys_device.attr, 529 &dev_attr_removable.attr, 530#ifdef CONFIG_MEMORY_HOTREMOVE 531 &dev_attr_valid_zones.attr, 532#endif 533 NULL 534}; 535 536static struct attribute_group memory_memblk_attr_group = { 537 .attrs = memory_memblk_attrs, 538}; 539 540static const struct attribute_group *memory_memblk_attr_groups[] = { 541 &memory_memblk_attr_group, 542 NULL, 543}; 544 545/* 546 * register_memory - Setup a sysfs device for a memory block 547 */ 548static 549int register_memory(struct memory_block *memory) 550{ 551 int ret; 552 553 memory->dev.bus = &memory_subsys; 554 memory->dev.id = memory->start_section_nr / sections_per_block; 555 memory->dev.release = memory_block_release; 556 memory->dev.groups = memory_memblk_attr_groups; 557 memory->dev.offline = memory->state == MEM_OFFLINE; 558 559 ret = device_register(&memory->dev); 560 if (ret) { 561 put_device(&memory->dev); 562 return ret; 563 } 564 ret = xa_err(xa_store(&memory_blocks, memory->dev.id, memory, 565 GFP_KERNEL)); 566 if (ret) { 567 put_device(&memory->dev); 568 device_unregister(&memory->dev); 569 } 570 return ret; 571} 572 573static int init_memory_block(struct memory_block **memory, 574 unsigned long block_id, unsigned long state) 575{ 576 struct memory_block *mem; 577 unsigned long start_pfn; 578 int ret = 0; 579 580 mem = find_memory_block_by_id(block_id); 581 if (mem) { 582 put_device(&mem->dev); 583 return -EEXIST; 584 } 585 mem = kzalloc(sizeof(*mem), GFP_KERNEL); 586 if (!mem) 587 return -ENOMEM; 588 589 mem->start_section_nr = block_id * sections_per_block; 590 mem->state = state; 591 start_pfn = section_nr_to_pfn(mem->start_section_nr); 592 mem->phys_device = arch_get_memory_phys_device(start_pfn); 593 mem->nid = NUMA_NO_NODE; 594 595 ret = register_memory(mem); 596 597 *memory = mem; 598 return ret; 599} 600 601static int add_memory_block(unsigned long base_section_nr) 602{ 603 int section_count = 0; 604 struct memory_block *mem; 605 unsigned long nr; 606 607 for (nr = base_section_nr; nr < base_section_nr + sections_per_block; 608 nr++) 609 if (present_section_nr(nr)) 610 section_count++; 611 612 if (section_count == 0) 613 return 0; 614 return init_memory_block(&mem, base_memory_block_id(base_section_nr), 615 MEM_ONLINE); 616} 617 618static void unregister_memory(struct memory_block *memory) 619{ 620 if (WARN_ON_ONCE(memory->dev.bus != &memory_subsys)) 621 return; 622 623 WARN_ON(xa_erase(&memory_blocks, memory->dev.id) == NULL); 624 625 /* drop the ref. we got via find_memory_block() */ 626 put_device(&memory->dev); 627 device_unregister(&memory->dev); 628} 629 630/* 631 * Create memory block devices for the given memory area. Start and size 632 * have to be aligned to memory block granularity. Memory block devices 633 * will be initialized as offline. 634 * 635 * Called under device_hotplug_lock. 636 */ 637int create_memory_block_devices(unsigned long start, unsigned long size) 638{ 639 const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start)); 640 unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size)); 641 struct memory_block *mem; 642 unsigned long block_id; 643 int ret = 0; 644 645 if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) || 646 !IS_ALIGNED(size, memory_block_size_bytes()))) 647 return -EINVAL; 648 649 for (block_id = start_block_id; block_id != end_block_id; block_id++) { 650 ret = init_memory_block(&mem, block_id, MEM_OFFLINE); 651 if (ret) 652 break; 653 } 654 if (ret) { 655 end_block_id = block_id; 656 for (block_id = start_block_id; block_id != end_block_id; 657 block_id++) { 658 mem = find_memory_block_by_id(block_id); 659 if (WARN_ON_ONCE(!mem)) 660 continue; 661 unregister_memory(mem); 662 } 663 } 664 return ret; 665} 666 667/* 668 * Remove memory block devices for the given memory area. Start and size 669 * have to be aligned to memory block granularity. Memory block devices 670 * have to be offline. 671 * 672 * Called under device_hotplug_lock. 673 */ 674void remove_memory_block_devices(unsigned long start, unsigned long size) 675{ 676 const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start)); 677 const unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size)); 678 struct memory_block *mem; 679 unsigned long block_id; 680 681 if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) || 682 !IS_ALIGNED(size, memory_block_size_bytes()))) 683 return; 684 685 for (block_id = start_block_id; block_id != end_block_id; block_id++) { 686 mem = find_memory_block_by_id(block_id); 687 if (WARN_ON_ONCE(!mem)) 688 continue; 689 unregister_memory_block_under_nodes(mem); 690 unregister_memory(mem); 691 } 692} 693 694/* return true if the memory block is offlined, otherwise, return false */ 695bool is_memblock_offlined(struct memory_block *mem) 696{ 697 return mem->state == MEM_OFFLINE; 698} 699 700static struct attribute *memory_root_attrs[] = { 701#ifdef CONFIG_ARCH_MEMORY_PROBE 702 &dev_attr_probe.attr, 703#endif 704 705#ifdef CONFIG_MEMORY_FAILURE 706 &dev_attr_soft_offline_page.attr, 707 &dev_attr_hard_offline_page.attr, 708#endif 709 710 &dev_attr_block_size_bytes.attr, 711 &dev_attr_auto_online_blocks.attr, 712 NULL 713}; 714 715static struct attribute_group memory_root_attr_group = { 716 .attrs = memory_root_attrs, 717}; 718 719static const struct attribute_group *memory_root_attr_groups[] = { 720 &memory_root_attr_group, 721 NULL, 722}; 723 724/* 725 * Initialize the sysfs support for memory devices. At the time this function 726 * is called, we cannot have concurrent creation/deletion of memory block 727 * devices, the device_hotplug_lock is not needed. 728 */ 729void __init memory_dev_init(void) 730{ 731 int ret; 732 unsigned long block_sz, nr; 733 734 /* Validate the configured memory block size */ 735 block_sz = memory_block_size_bytes(); 736 if (!is_power_of_2(block_sz) || block_sz < MIN_MEMORY_BLOCK_SIZE) 737 panic("Memory block size not suitable: 0x%lx\n", block_sz); 738 sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE; 739 740 ret = subsys_system_register(&memory_subsys, memory_root_attr_groups); 741 if (ret) 742 panic("%s() failed to register subsystem: %d\n", __func__, ret); 743 744 /* 745 * Create entries for memory sections that were found 746 * during boot and have been initialized 747 */ 748 for (nr = 0; nr <= __highest_present_section_nr; 749 nr += sections_per_block) { 750 ret = add_memory_block(nr); 751 if (ret) 752 panic("%s() failed to add memory block: %d\n", __func__, 753 ret); 754 } 755} 756 757/** 758 * walk_memory_blocks - walk through all present memory blocks overlapped 759 * by the range [start, start + size) 760 * 761 * @start: start address of the memory range 762 * @size: size of the memory range 763 * @arg: argument passed to func 764 * @func: callback for each memory section walked 765 * 766 * This function walks through all present memory blocks overlapped by the 767 * range [start, start + size), calling func on each memory block. 768 * 769 * In case func() returns an error, walking is aborted and the error is 770 * returned. 771 * 772 * Called under device_hotplug_lock. 773 */ 774int walk_memory_blocks(unsigned long start, unsigned long size, 775 void *arg, walk_memory_blocks_func_t func) 776{ 777 const unsigned long start_block_id = phys_to_block_id(start); 778 const unsigned long end_block_id = phys_to_block_id(start + size - 1); 779 struct memory_block *mem; 780 unsigned long block_id; 781 int ret = 0; 782 783 if (!size) 784 return 0; 785 786 for (block_id = start_block_id; block_id <= end_block_id; block_id++) { 787 mem = find_memory_block_by_id(block_id); 788 if (!mem) 789 continue; 790 791 ret = func(mem, arg); 792 put_device(&mem->dev); 793 if (ret) 794 break; 795 } 796 return ret; 797} 798 799struct for_each_memory_block_cb_data { 800 walk_memory_blocks_func_t func; 801 void *arg; 802}; 803 804static int for_each_memory_block_cb(struct device *dev, void *data) 805{ 806 struct memory_block *mem = to_memory_block(dev); 807 struct for_each_memory_block_cb_data *cb_data = data; 808 809 return cb_data->func(mem, cb_data->arg); 810} 811 812/** 813 * for_each_memory_block - walk through all present memory blocks 814 * 815 * @arg: argument passed to func 816 * @func: callback for each memory block walked 817 * 818 * This function walks through all present memory blocks, calling func on 819 * each memory block. 820 * 821 * In case func() returns an error, walking is aborted and the error is 822 * returned. 823 */ 824int for_each_memory_block(void *arg, walk_memory_blocks_func_t func) 825{ 826 struct for_each_memory_block_cb_data cb_data = { 827 .func = func, 828 .arg = arg, 829 }; 830 831 return bus_for_each_dev(&memory_subsys, NULL, &cb_data, 832 for_each_memory_block_cb); 833}