at v4.15 15 kB view raw
1#ifndef _LINUX_MEMBLOCK_H 2#define _LINUX_MEMBLOCK_H 3#ifdef __KERNEL__ 4 5#ifdef CONFIG_HAVE_MEMBLOCK 6/* 7 * Logical memory blocks. 8 * 9 * Copyright (C) 2001 Peter Bergner, IBM Corp. 10 * 11 * This program is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU General Public License 13 * as published by the Free Software Foundation; either version 14 * 2 of the License, or (at your option) any later version. 15 */ 16 17#include <linux/init.h> 18#include <linux/mm.h> 19 20#define INIT_MEMBLOCK_REGIONS 128 21#define INIT_PHYSMEM_REGIONS 4 22 23/* Definition of memblock flags. */ 24enum { 25 MEMBLOCK_NONE = 0x0, /* No special request */ 26 MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */ 27 MEMBLOCK_MIRROR = 0x2, /* mirrored region */ 28 MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */ 29}; 30 31struct memblock_region { 32 phys_addr_t base; 33 phys_addr_t size; 34 unsigned long flags; 35#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 36 int nid; 37#endif 38}; 39 40struct memblock_type { 41 unsigned long cnt; /* number of regions */ 42 unsigned long max; /* size of the allocated array */ 43 phys_addr_t total_size; /* size of all regions */ 44 struct memblock_region *regions; 45 char *name; 46}; 47 48struct memblock { 49 bool bottom_up; /* is bottom up direction? */ 50 phys_addr_t current_limit; 51 struct memblock_type memory; 52 struct memblock_type reserved; 53#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 54 struct memblock_type physmem; 55#endif 56}; 57 58extern struct memblock memblock; 59extern int memblock_debug; 60 61#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK 62#define __init_memblock __meminit 63#define __initdata_memblock __meminitdata 64void memblock_discard(void); 65#else 66#define __init_memblock 67#define __initdata_memblock 68#endif 69 70#define memblock_dbg(fmt, ...) \ 71 if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) 72 73phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align, 74 phys_addr_t start, phys_addr_t end, 75 int nid, ulong flags); 76phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, 77 phys_addr_t size, phys_addr_t align); 78void memblock_allow_resize(void); 79int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid); 80int memblock_add(phys_addr_t base, phys_addr_t size); 81int memblock_remove(phys_addr_t base, phys_addr_t size); 82int memblock_free(phys_addr_t base, phys_addr_t size); 83int memblock_reserve(phys_addr_t base, phys_addr_t size); 84void memblock_trim_memory(phys_addr_t align); 85bool memblock_overlaps_region(struct memblock_type *type, 86 phys_addr_t base, phys_addr_t size); 87int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size); 88int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size); 89int memblock_mark_mirror(phys_addr_t base, phys_addr_t size); 90int memblock_mark_nomap(phys_addr_t base, phys_addr_t size); 91int memblock_clear_nomap(phys_addr_t base, phys_addr_t size); 92ulong choose_memblock_flags(void); 93 94/* Low level functions */ 95int memblock_add_range(struct memblock_type *type, 96 phys_addr_t base, phys_addr_t size, 97 int nid, unsigned long flags); 98 99void __next_mem_range(u64 *idx, int nid, ulong flags, 100 struct memblock_type *type_a, 101 struct memblock_type *type_b, phys_addr_t *out_start, 102 phys_addr_t *out_end, int *out_nid); 103 104void __next_mem_range_rev(u64 *idx, int nid, ulong flags, 105 struct memblock_type *type_a, 106 struct memblock_type *type_b, phys_addr_t *out_start, 107 phys_addr_t *out_end, int *out_nid); 108 109void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start, 110 phys_addr_t *out_end); 111 112void __memblock_free_early(phys_addr_t base, phys_addr_t size); 113void __memblock_free_late(phys_addr_t base, phys_addr_t size); 114 115/** 116 * for_each_mem_range - iterate through memblock areas from type_a and not 117 * included in type_b. Or just type_a if type_b is NULL. 118 * @i: u64 used as loop variable 119 * @type_a: ptr to memblock_type to iterate 120 * @type_b: ptr to memblock_type which excludes from the iteration 121 * @nid: node selector, %NUMA_NO_NODE for all nodes 122 * @flags: pick from blocks based on memory attributes 123 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 124 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 125 * @p_nid: ptr to int for nid of the range, can be %NULL 126 */ 127#define for_each_mem_range(i, type_a, type_b, nid, flags, \ 128 p_start, p_end, p_nid) \ 129 for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \ 130 p_start, p_end, p_nid); \ 131 i != (u64)ULLONG_MAX; \ 132 __next_mem_range(&i, nid, flags, type_a, type_b, \ 133 p_start, p_end, p_nid)) 134 135/** 136 * for_each_mem_range_rev - reverse iterate through memblock areas from 137 * type_a and not included in type_b. Or just type_a if type_b is NULL. 138 * @i: u64 used as loop variable 139 * @type_a: ptr to memblock_type to iterate 140 * @type_b: ptr to memblock_type which excludes from the iteration 141 * @nid: node selector, %NUMA_NO_NODE for all nodes 142 * @flags: pick from blocks based on memory attributes 143 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 144 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 145 * @p_nid: ptr to int for nid of the range, can be %NULL 146 */ 147#define for_each_mem_range_rev(i, type_a, type_b, nid, flags, \ 148 p_start, p_end, p_nid) \ 149 for (i = (u64)ULLONG_MAX, \ 150 __next_mem_range_rev(&i, nid, flags, type_a, type_b,\ 151 p_start, p_end, p_nid); \ 152 i != (u64)ULLONG_MAX; \ 153 __next_mem_range_rev(&i, nid, flags, type_a, type_b, \ 154 p_start, p_end, p_nid)) 155 156/** 157 * for_each_reserved_mem_region - iterate over all reserved memblock areas 158 * @i: u64 used as loop variable 159 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 160 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 161 * 162 * Walks over reserved areas of memblock. Available as soon as memblock 163 * is initialized. 164 */ 165#define for_each_reserved_mem_region(i, p_start, p_end) \ 166 for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end); \ 167 i != (u64)ULLONG_MAX; \ 168 __next_reserved_mem_region(&i, p_start, p_end)) 169 170static inline bool memblock_is_hotpluggable(struct memblock_region *m) 171{ 172 return m->flags & MEMBLOCK_HOTPLUG; 173} 174 175static inline bool memblock_is_mirror(struct memblock_region *m) 176{ 177 return m->flags & MEMBLOCK_MIRROR; 178} 179 180static inline bool memblock_is_nomap(struct memblock_region *m) 181{ 182 return m->flags & MEMBLOCK_NOMAP; 183} 184 185#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 186int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn, 187 unsigned long *end_pfn); 188void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, 189 unsigned long *out_end_pfn, int *out_nid); 190unsigned long memblock_next_valid_pfn(unsigned long pfn, unsigned long max_pfn); 191 192/** 193 * for_each_mem_pfn_range - early memory pfn range iterator 194 * @i: an integer used as loop variable 195 * @nid: node selector, %MAX_NUMNODES for all nodes 196 * @p_start: ptr to ulong for start pfn of the range, can be %NULL 197 * @p_end: ptr to ulong for end pfn of the range, can be %NULL 198 * @p_nid: ptr to int for nid of the range, can be %NULL 199 * 200 * Walks over configured memory ranges. 201 */ 202#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \ 203 for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \ 204 i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid)) 205#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 206 207/** 208 * for_each_free_mem_range - iterate through free memblock areas 209 * @i: u64 used as loop variable 210 * @nid: node selector, %NUMA_NO_NODE for all nodes 211 * @flags: pick from blocks based on memory attributes 212 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 213 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 214 * @p_nid: ptr to int for nid of the range, can be %NULL 215 * 216 * Walks over free (memory && !reserved) areas of memblock. Available as 217 * soon as memblock is initialized. 218 */ 219#define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \ 220 for_each_mem_range(i, &memblock.memory, &memblock.reserved, \ 221 nid, flags, p_start, p_end, p_nid) 222 223/** 224 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas 225 * @i: u64 used as loop variable 226 * @nid: node selector, %NUMA_NO_NODE for all nodes 227 * @flags: pick from blocks based on memory attributes 228 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 229 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 230 * @p_nid: ptr to int for nid of the range, can be %NULL 231 * 232 * Walks over free (memory && !reserved) areas of memblock in reverse 233 * order. Available as soon as memblock is initialized. 234 */ 235#define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \ 236 p_nid) \ 237 for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \ 238 nid, flags, p_start, p_end, p_nid) 239 240/** 241 * for_each_resv_unavail_range - iterate through reserved and unavailable memory 242 * @i: u64 used as loop variable 243 * @flags: pick from blocks based on memory attributes 244 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 245 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 246 * 247 * Walks over unavailable but reserved (reserved && !memory) areas of memblock. 248 * Available as soon as memblock is initialized. 249 * Note: because this memory does not belong to any physical node, flags and 250 * nid arguments do not make sense and thus not exported as arguments. 251 */ 252#define for_each_resv_unavail_range(i, p_start, p_end) \ 253 for_each_mem_range(i, &memblock.reserved, &memblock.memory, \ 254 NUMA_NO_NODE, MEMBLOCK_NONE, p_start, p_end, NULL) 255 256static inline void memblock_set_region_flags(struct memblock_region *r, 257 unsigned long flags) 258{ 259 r->flags |= flags; 260} 261 262static inline void memblock_clear_region_flags(struct memblock_region *r, 263 unsigned long flags) 264{ 265 r->flags &= ~flags; 266} 267 268#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 269int memblock_set_node(phys_addr_t base, phys_addr_t size, 270 struct memblock_type *type, int nid); 271 272static inline void memblock_set_region_node(struct memblock_region *r, int nid) 273{ 274 r->nid = nid; 275} 276 277static inline int memblock_get_region_node(const struct memblock_region *r) 278{ 279 return r->nid; 280} 281#else 282static inline void memblock_set_region_node(struct memblock_region *r, int nid) 283{ 284} 285 286static inline int memblock_get_region_node(const struct memblock_region *r) 287{ 288 return 0; 289} 290#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 291 292phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid); 293phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid); 294 295phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align); 296 297/* 298 * Set the allocation direction to bottom-up or top-down. 299 */ 300static inline void __init memblock_set_bottom_up(bool enable) 301{ 302 memblock.bottom_up = enable; 303} 304 305/* 306 * Check if the allocation direction is bottom-up or not. 307 * if this is true, that said, memblock will allocate memory 308 * in bottom-up direction. 309 */ 310static inline bool memblock_bottom_up(void) 311{ 312 return memblock.bottom_up; 313} 314 315/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */ 316#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) 317#define MEMBLOCK_ALLOC_ACCESSIBLE 0 318 319phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align, 320 phys_addr_t start, phys_addr_t end, 321 ulong flags); 322phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align, 323 phys_addr_t max_addr); 324phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align, 325 phys_addr_t max_addr); 326phys_addr_t memblock_phys_mem_size(void); 327phys_addr_t memblock_reserved_size(void); 328phys_addr_t memblock_mem_size(unsigned long limit_pfn); 329phys_addr_t memblock_start_of_DRAM(void); 330phys_addr_t memblock_end_of_DRAM(void); 331void memblock_enforce_memory_limit(phys_addr_t memory_limit); 332void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size); 333void memblock_mem_limit_remove_map(phys_addr_t limit); 334bool memblock_is_memory(phys_addr_t addr); 335int memblock_is_map_memory(phys_addr_t addr); 336int memblock_is_region_memory(phys_addr_t base, phys_addr_t size); 337bool memblock_is_reserved(phys_addr_t addr); 338bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); 339 340extern void __memblock_dump_all(void); 341 342static inline void memblock_dump_all(void) 343{ 344 if (memblock_debug) 345 __memblock_dump_all(); 346} 347 348/** 349 * memblock_set_current_limit - Set the current allocation limit to allow 350 * limiting allocations to what is currently 351 * accessible during boot 352 * @limit: New limit value (physical address) 353 */ 354void memblock_set_current_limit(phys_addr_t limit); 355 356 357phys_addr_t memblock_get_current_limit(void); 358 359/* 360 * pfn conversion functions 361 * 362 * While the memory MEMBLOCKs should always be page aligned, the reserved 363 * MEMBLOCKs may not be. This accessor attempt to provide a very clear 364 * idea of what they return for such non aligned MEMBLOCKs. 365 */ 366 367/** 368 * memblock_region_memory_base_pfn - Return the lowest pfn intersecting with the memory region 369 * @reg: memblock_region structure 370 */ 371static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg) 372{ 373 return PFN_UP(reg->base); 374} 375 376/** 377 * memblock_region_memory_end_pfn - Return the end_pfn this region 378 * @reg: memblock_region structure 379 */ 380static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg) 381{ 382 return PFN_DOWN(reg->base + reg->size); 383} 384 385/** 386 * memblock_region_reserved_base_pfn - Return the lowest pfn intersecting with the reserved region 387 * @reg: memblock_region structure 388 */ 389static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg) 390{ 391 return PFN_DOWN(reg->base); 392} 393 394/** 395 * memblock_region_reserved_end_pfn - Return the end_pfn this region 396 * @reg: memblock_region structure 397 */ 398static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg) 399{ 400 return PFN_UP(reg->base + reg->size); 401} 402 403#define for_each_memblock(memblock_type, region) \ 404 for (region = memblock.memblock_type.regions; \ 405 region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \ 406 region++) 407 408#define for_each_memblock_type(i, memblock_type, rgn) \ 409 for (i = 0, rgn = &memblock_type->regions[0]; \ 410 i < memblock_type->cnt; \ 411 i++, rgn = &memblock_type->regions[i]) 412 413#ifdef CONFIG_MEMTEST 414extern void early_memtest(phys_addr_t start, phys_addr_t end); 415#else 416static inline void early_memtest(phys_addr_t start, phys_addr_t end) 417{ 418} 419#endif 420 421extern unsigned long memblock_reserved_memory_within(phys_addr_t start_addr, 422 phys_addr_t end_addr); 423#else 424static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align) 425{ 426 return 0; 427} 428 429static inline unsigned long memblock_reserved_memory_within(phys_addr_t start_addr, 430 phys_addr_t end_addr) 431{ 432 return 0; 433} 434 435#endif /* CONFIG_HAVE_MEMBLOCK */ 436 437#endif /* __KERNEL__ */ 438 439#endif /* _LINUX_MEMBLOCK_H */