Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/memory_hotplug: rename mhp_restrictions to mhp_params

The mhp_restrictions struct really doesn't specify anything resembling a
restriction anymore so rename it to be mhp_params as it is a list of
extended parameters.

Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Eric Badger <ebadger@gigaio.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will@kernel.org>
Link: http://lkml.kernel.org/r/20200306170846.9333-3-logang@deltatee.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Logan Gunthorpe and committed by
Linus Torvalds
f5637d3b 96c6b598

+33 -33
+2 -2
arch/arm64/mm/mmu.c
··· 1374 1374 } 1375 1375 1376 1376 int arch_add_memory(int nid, u64 start, u64 size, 1377 - struct mhp_restrictions *restrictions) 1377 + struct mhp_params *params) 1378 1378 { 1379 1379 int ret, flags = 0; 1380 1380 ··· 1387 1387 memblock_clear_nomap(start, size); 1388 1388 1389 1389 ret = __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT, 1390 - restrictions); 1390 + params); 1391 1391 if (ret) 1392 1392 __remove_pgd_mapping(swapper_pg_dir, 1393 1393 __phys_to_virt(start), size);
+2 -2
arch/ia64/mm/init.c
··· 670 670 671 671 #ifdef CONFIG_MEMORY_HOTPLUG 672 672 int arch_add_memory(int nid, u64 start, u64 size, 673 - struct mhp_restrictions *restrictions) 673 + struct mhp_params *params) 674 674 { 675 675 unsigned long start_pfn = start >> PAGE_SHIFT; 676 676 unsigned long nr_pages = size >> PAGE_SHIFT; 677 677 int ret; 678 678 679 - ret = __add_pages(nid, start_pfn, nr_pages, restrictions); 679 + ret = __add_pages(nid, start_pfn, nr_pages, params); 680 680 if (ret) 681 681 printk("%s: Problem encountered in __add_pages() as ret=%d\n", 682 682 __func__, ret);
+2 -2
arch/powerpc/mm/mem.c
··· 122 122 } 123 123 124 124 int __ref arch_add_memory(int nid, u64 start, u64 size, 125 - struct mhp_restrictions *restrictions) 125 + struct mhp_params *params) 126 126 { 127 127 unsigned long start_pfn = start >> PAGE_SHIFT; 128 128 unsigned long nr_pages = size >> PAGE_SHIFT; ··· 138 138 return -EFAULT; 139 139 } 140 140 141 - return __add_pages(nid, start_pfn, nr_pages, restrictions); 141 + return __add_pages(nid, start_pfn, nr_pages, params); 142 142 } 143 143 144 144 void __ref arch_remove_memory(int nid, u64 start, u64 size,
+3 -3
arch/s390/mm/init.c
··· 268 268 #endif /* CONFIG_CMA */ 269 269 270 270 int arch_add_memory(int nid, u64 start, u64 size, 271 - struct mhp_restrictions *restrictions) 271 + struct mhp_params *params) 272 272 { 273 273 unsigned long start_pfn = PFN_DOWN(start); 274 274 unsigned long size_pages = PFN_DOWN(size); 275 275 int rc; 276 276 277 - if (WARN_ON_ONCE(restrictions->altmap)) 277 + if (WARN_ON_ONCE(params->altmap)) 278 278 return -EINVAL; 279 279 280 280 rc = vmem_add_mapping(start, size); 281 281 if (rc) 282 282 return rc; 283 283 284 - rc = __add_pages(nid, start_pfn, size_pages, restrictions); 284 + rc = __add_pages(nid, start_pfn, size_pages, params); 285 285 if (rc) 286 286 vmem_remove_mapping(start, size); 287 287 return rc;
+2 -2
arch/sh/mm/init.c
··· 406 406 407 407 #ifdef CONFIG_MEMORY_HOTPLUG 408 408 int arch_add_memory(int nid, u64 start, u64 size, 409 - struct mhp_restrictions *restrictions) 409 + struct mhp_params *params) 410 410 { 411 411 unsigned long start_pfn = PFN_DOWN(start); 412 412 unsigned long nr_pages = size >> PAGE_SHIFT; 413 413 int ret; 414 414 415 415 /* We only have ZONE_NORMAL, so this is easy.. */ 416 - ret = __add_pages(nid, start_pfn, nr_pages, restrictions); 416 + ret = __add_pages(nid, start_pfn, nr_pages, params); 417 417 if (unlikely(ret)) 418 418 printk("%s: Failed, __add_pages() == %d\n", __func__, ret); 419 419
+2 -2
arch/x86/mm/init_32.c
··· 819 819 820 820 #ifdef CONFIG_MEMORY_HOTPLUG 821 821 int arch_add_memory(int nid, u64 start, u64 size, 822 - struct mhp_restrictions *restrictions) 822 + struct mhp_params *params) 823 823 { 824 824 unsigned long start_pfn = start >> PAGE_SHIFT; 825 825 unsigned long nr_pages = size >> PAGE_SHIFT; 826 826 827 - return __add_pages(nid, start_pfn, nr_pages, restrictions); 827 + return __add_pages(nid, start_pfn, nr_pages, params); 828 828 } 829 829 830 830 void arch_remove_memory(int nid, u64 start, u64 size,
+4 -4
arch/x86/mm/init_64.c
··· 843 843 } 844 844 845 845 int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, 846 - struct mhp_restrictions *restrictions) 846 + struct mhp_params *params) 847 847 { 848 848 int ret; 849 849 850 - ret = __add_pages(nid, start_pfn, nr_pages, restrictions); 850 + ret = __add_pages(nid, start_pfn, nr_pages, params); 851 851 WARN_ON_ONCE(ret); 852 852 853 853 /* update max_pfn, max_low_pfn and high_memory */ ··· 858 858 } 859 859 860 860 int arch_add_memory(int nid, u64 start, u64 size, 861 - struct mhp_restrictions *restrictions) 861 + struct mhp_params *params) 862 862 { 863 863 unsigned long start_pfn = start >> PAGE_SHIFT; 864 864 unsigned long nr_pages = size >> PAGE_SHIFT; 865 865 866 866 init_memory_mapping(start, start + size); 867 867 868 - return add_pages(nid, start_pfn, nr_pages, restrictions); 868 + return add_pages(nid, start_pfn, nr_pages, params); 869 869 } 870 870 871 871 #define PAGE_INUSE 0xFD
+8 -8
include/linux/memory_hotplug.h
··· 58 58 }; 59 59 60 60 /* 61 - * Restrictions for the memory hotplug: 62 - * altmap: alternative allocator for memmap array 61 + * Extended parameters for memory hotplug: 62 + * altmap: alternative allocator for memmap array (optional) 63 63 */ 64 - struct mhp_restrictions { 64 + struct mhp_params { 65 65 struct vmem_altmap *altmap; 66 66 }; 67 67 ··· 112 112 extern int try_online_node(int nid); 113 113 114 114 extern int arch_add_memory(int nid, u64 start, u64 size, 115 - struct mhp_restrictions *restrictions); 115 + struct mhp_params *params); 116 116 extern u64 max_mem_size; 117 117 118 118 extern int memhp_online_type_from_str(const char *str); ··· 133 133 134 134 /* reasonably generic interface to expand the physical pages */ 135 135 extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, 136 - struct mhp_restrictions *restrictions); 136 + struct mhp_params *params); 137 137 138 138 #ifndef CONFIG_ARCH_HAS_ADD_PAGES 139 139 static inline int add_pages(int nid, unsigned long start_pfn, 140 - unsigned long nr_pages, struct mhp_restrictions *restrictions) 140 + unsigned long nr_pages, struct mhp_params *params) 141 141 { 142 - return __add_pages(nid, start_pfn, nr_pages, restrictions); 142 + return __add_pages(nid, start_pfn, nr_pages, params); 143 143 } 144 144 #else /* ARCH_HAS_ADD_PAGES */ 145 145 int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, 146 - struct mhp_restrictions *restrictions); 146 + struct mhp_params *params); 147 147 #endif /* ARCH_HAS_ADD_PAGES */ 148 148 149 149 #ifdef CONFIG_NUMA
+4 -4
mm/memory_hotplug.c
··· 304 304 * add the new pages. 305 305 */ 306 306 int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, 307 - struct mhp_restrictions *restrictions) 307 + struct mhp_params *params) 308 308 { 309 309 const unsigned long end_pfn = pfn + nr_pages; 310 310 unsigned long cur_nr_pages; 311 311 int err; 312 - struct vmem_altmap *altmap = restrictions->altmap; 312 + struct vmem_altmap *altmap = params->altmap; 313 313 314 314 err = check_hotplug_memory_addressable(pfn, nr_pages); 315 315 if (err) ··· 1002 1002 */ 1003 1003 int __ref add_memory_resource(int nid, struct resource *res) 1004 1004 { 1005 - struct mhp_restrictions restrictions = {}; 1005 + struct mhp_params params = {}; 1006 1006 u64 start, size; 1007 1007 bool new_node = false; 1008 1008 int ret; ··· 1030 1030 new_node = ret; 1031 1031 1032 1032 /* call arch's memory hotadd */ 1033 - ret = arch_add_memory(nid, start, size, &restrictions); 1033 + ret = arch_add_memory(nid, start, size, &params); 1034 1034 if (ret < 0) 1035 1035 goto error; 1036 1036
+4 -4
mm/memremap.c
··· 184 184 { 185 185 struct resource *res = &pgmap->res; 186 186 struct dev_pagemap *conflict_pgmap; 187 - struct mhp_restrictions restrictions = { 187 + struct mhp_params params = { 188 188 /* 189 189 * We do not want any optional features only our own memmap 190 190 */ ··· 302 302 */ 303 303 if (pgmap->type == MEMORY_DEVICE_PRIVATE) { 304 304 error = add_pages(nid, PHYS_PFN(res->start), 305 - PHYS_PFN(resource_size(res)), &restrictions); 305 + PHYS_PFN(resource_size(res)), &params); 306 306 } else { 307 307 error = kasan_add_zero_shadow(__va(res->start), resource_size(res)); 308 308 if (error) { ··· 311 311 } 312 312 313 313 error = arch_add_memory(nid, res->start, resource_size(res), 314 - &restrictions); 314 + &params); 315 315 } 316 316 317 317 if (!error) { ··· 319 319 320 320 zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE]; 321 321 move_pfn_range_to_zone(zone, PHYS_PFN(res->start), 322 - PHYS_PFN(resource_size(res)), restrictions.altmap); 322 + PHYS_PFN(resource_size(res)), params.altmap); 323 323 } 324 324 325 325 mem_hotplug_done();