Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/memory_hotplug: Remove MEM_PREPARE_ONLINE/MEM_FINISH_OFFLINE notifiers

MEM_PREPARE_ONLINE/MEM_FINISH_OFFLINE memory notifiers were introduced
to prepare the transition of memory to and from a physically accessible
state. This enhancement was crucial for implementing the "memmap on memory"
feature for s390.

With introduction of dynamic (de)configuration of hotpluggable memory,
memory can be brought to accessible state before add_memory(). Memory
can be brought to inaccessible state before remove_memory(). Hence,
there is no need of MEM_PREPARE_ONLINE/MEM_FINISH_OFFLINE memory
notifiers anymore.

This basically reverts commit
c5f1e2d18909 ("mm/memory_hotplug: introduce MEM_PREPARE_ONLINE/MEM_FINISH_OFFLINE notifiers")
Additionally, apply minor adjustments to the function parameters of
move_pfn_range_to_zone() and mhp_supports_memmap_on_memory() to ensure
compatibility with the latest branch.

Acked-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Sumanth Korikkar <sumanthk@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>

authored by

Sumanth Korikkar and committed by
Heiko Carstens
300709fb ce2071e0

+6 -65
+1 -22
drivers/base/memory.c
··· 226 226 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); 227 227 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; 228 228 unsigned long nr_vmemmap_pages = 0; 229 - struct memory_notify arg; 230 229 struct zone *zone; 231 230 int ret; 232 231 ··· 245 246 if (mem->altmap) 246 247 nr_vmemmap_pages = mem->altmap->free; 247 248 248 - arg.altmap_start_pfn = start_pfn; 249 - arg.altmap_nr_pages = nr_vmemmap_pages; 250 - arg.start_pfn = start_pfn + nr_vmemmap_pages; 251 - arg.nr_pages = nr_pages - nr_vmemmap_pages; 252 249 mem_hotplug_begin(); 253 - ret = memory_notify(MEM_PREPARE_ONLINE, &arg); 254 - ret = notifier_to_errno(ret); 255 - if (ret) 256 - goto out_notifier; 257 - 258 250 if (nr_vmemmap_pages) { 259 - ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, 260 - zone, mem->altmap->inaccessible); 251 + ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone); 261 252 if (ret) 262 253 goto out; 263 254 } ··· 269 280 nr_vmemmap_pages); 270 281 271 282 mem->zone = zone; 272 - mem_hotplug_done(); 273 - return ret; 274 283 out: 275 - memory_notify(MEM_FINISH_OFFLINE, &arg); 276 - out_notifier: 277 284 mem_hotplug_done(); 278 285 return ret; 279 286 } ··· 282 297 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); 283 298 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; 284 299 unsigned long nr_vmemmap_pages = 0; 285 - struct memory_notify arg; 286 300 int ret; 287 301 288 302 if (!mem->zone) ··· 313 329 mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages); 314 330 315 331 mem->zone = NULL; 316 - arg.altmap_start_pfn = start_pfn; 317 - arg.altmap_nr_pages = nr_vmemmap_pages; 318 - arg.start_pfn = start_pfn + nr_vmemmap_pages; 319 - arg.nr_pages = nr_pages - nr_vmemmap_pages; 320 - memory_notify(MEM_FINISH_OFFLINE, &arg); 321 332 out: 322 333 mem_hotplug_done(); 323 334 return ret;
-9
include/linux/memory.h
··· 96 96 #define MEM_GOING_ONLINE (1<<3) 97 97 #define MEM_CANCEL_ONLINE (1<<4) 98 98 #define MEM_CANCEL_OFFLINE (1<<5) 99 - #define MEM_PREPARE_ONLINE (1<<6) 100 - #define MEM_FINISH_OFFLINE (1<<7) 101 99 102 100 struct memory_notify { 103 - /* 104 - * The altmap_start_pfn and altmap_nr_pages fields are designated for 105 - * specifying the altmap range and are exclusively intended for use in 106 - * MEM_PREPARE_ONLINE/MEM_FINISH_OFFLINE notifiers. 107 - */ 108 - unsigned long altmap_start_pfn; 109 - unsigned long altmap_nr_pages; 110 101 unsigned long start_pfn; 111 102 unsigned long nr_pages; 112 103 };
+1 -17
include/linux/memory_hotplug.h
··· 58 58 * implies the node id (nid). 59 59 */ 60 60 #define MHP_NID_IS_MGID ((__force mhp_t)BIT(2)) 61 - /* 62 - * The hotplugged memory is completely inaccessible while the memory is 63 - * offline. The memory provider will handle MEM_PREPARE_ONLINE / 64 - * MEM_FINISH_OFFLINE notifications and make the memory accessible. 65 - * 66 - * This flag is only relevant when used along with MHP_MEMMAP_ON_MEMORY, 67 - * because the altmap cannot be written (e.g., poisoned) when adding 68 - * memory -- before it is set online. 69 - * 70 - * This allows for adding memory with an altmap that is not currently 71 - * made available by a hypervisor. When onlining that memory, the 72 - * hypervisor can be instructed to make that memory available, and 73 - * the onlining phase will not require any memory allocations, which is 74 - * helpful in low-memory situations. 75 - */ 76 - #define MHP_OFFLINE_INACCESSIBLE ((__force mhp_t)BIT(3)) 77 61 78 62 /* 79 63 * Extended parameters for memory hotplug: ··· 107 123 long nr_pages); 108 124 /* VM interface that may be used by firmware interface */ 109 125 extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages, 110 - struct zone *zone, bool mhp_off_inaccessible); 126 + struct zone *zone); 111 127 extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages); 112 128 extern int online_pages(unsigned long pfn, unsigned long nr_pages, 113 129 struct zone *zone, struct memory_group *group);
-1
include/linux/memremap.h
··· 25 25 unsigned long free; 26 26 unsigned long align; 27 27 unsigned long alloc; 28 - bool inaccessible; 29 28 }; 30 29 31 30 /*
+3 -14
mm/memory_hotplug.c
··· 1088 1088 } 1089 1089 1090 1090 int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages, 1091 - struct zone *zone, bool mhp_off_inaccessible) 1091 + struct zone *zone) 1092 1092 { 1093 1093 unsigned long end_pfn = pfn + nr_pages; 1094 1094 int ret, i; ··· 1096 1096 ret = kasan_add_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages)); 1097 1097 if (ret) 1098 1098 return ret; 1099 - 1100 - /* 1101 - * Memory block is accessible at this stage and hence poison the struct 1102 - * pages now. If the memory block is accessible during memory hotplug 1103 - * addition phase, then page poisining is already performed in 1104 - * sparse_add_section(). 1105 - */ 1106 - if (mhp_off_inaccessible) 1107 - page_init_poison(pfn_to_page(pfn), sizeof(struct page) * nr_pages); 1108 1099 1109 1100 move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_UNMOVABLE, 1110 1101 false); ··· 1435 1444 } 1436 1445 1437 1446 static int create_altmaps_and_memory_blocks(int nid, struct memory_group *group, 1438 - u64 start, u64 size, mhp_t mhp_flags) 1447 + u64 start, u64 size) 1439 1448 { 1440 1449 unsigned long memblock_size = memory_block_size_bytes(); 1441 1450 u64 cur_start; ··· 1451 1460 }; 1452 1461 1453 1462 mhp_altmap.free = memory_block_memmap_on_memory_pages(); 1454 - if (mhp_flags & MHP_OFFLINE_INACCESSIBLE) 1455 - mhp_altmap.inaccessible = true; 1456 1463 params.altmap = kmemdup(&mhp_altmap, sizeof(struct vmem_altmap), 1457 1464 GFP_KERNEL); 1458 1465 if (!params.altmap) { ··· 1544 1555 */ 1545 1556 if ((mhp_flags & MHP_MEMMAP_ON_MEMORY) && 1546 1557 mhp_supports_memmap_on_memory()) { 1547 - ret = create_altmaps_and_memory_blocks(nid, group, start, size, mhp_flags); 1558 + ret = create_altmaps_and_memory_blocks(nid, group, start, size); 1548 1559 if (ret) 1549 1560 goto error; 1550 1561 } else {
+1 -2
mm/sparse.c
··· 951 951 * Poison uninitialized struct pages in order to catch invalid flags 952 952 * combinations. 953 953 */ 954 - if (!altmap || !altmap->inaccessible) 955 - page_init_poison(memmap, sizeof(struct page) * nr_pages); 954 + page_init_poison(memmap, sizeof(struct page) * nr_pages); 956 955 957 956 ms = __nr_to_section(section_nr); 958 957 set_section_nid(section_nr, nid);