Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[POWERPC] Provide walk_memory_resource() for powerpc

Provide walk_memory_resource() for 64-bit powerpc. PowerPC maintains
logical memory region mapping in the lmb.memory structure. Walk
through these structures and do the callbacks for the contiguous
chunks.

Signed-off-by: Badari Pulavarty <pbadari@us.ibm.com>
Cc: Yasunori Goto <y-goto@jp.fujitsu.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>

authored by

Badari Pulavarty and committed by
Paul Mackerras
9d88a2eb 98d5c21c

+57 -7
+23 -7
arch/powerpc/mm/mem.c
··· 154 154 155 155 /* 156 156 * walk_memory_resource() needs to make sure there is no holes in a given 157 - * memory range. On PPC64, since this range comes from /sysfs, the range 158 - * is guaranteed to be valid, non-overlapping and can not contain any 159 - * holes. By the time we get here (memory add or remove), /proc/device-tree 160 - * is updated and correct. Only reason we need to check against device-tree 161 - * would be if we allow user-land to specify a memory range through a 162 - * system call/ioctl etc. instead of doing offline/online through /sysfs. 157 + * memory range. PPC64 does not maintain the memory layout in /proc/iomem. 158 + * Instead it maintains it in lmb.memory structures. Walk through the 159 + * memory regions, find holes and callback for contiguous regions. 163 160 */ 164 161 int 165 162 walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg, 166 163 int (*func)(unsigned long, unsigned long, void *)) 167 164 { 168 - return (*func)(start_pfn, nr_pages, arg); 165 + struct lmb_property res; 166 + unsigned long pfn, len; 167 + u64 end; 168 + int ret = -1; 169 + 170 + res.base = (u64) start_pfn << PAGE_SHIFT; 171 + res.size = (u64) nr_pages << PAGE_SHIFT; 172 + 173 + end = res.base + res.size - 1; 174 + while ((res.base < end) && (lmb_find(&res) >= 0)) { 175 + pfn = (unsigned long)(res.base >> PAGE_SHIFT); 176 + len = (unsigned long)(res.size >> PAGE_SHIFT); 177 + ret = (*func)(pfn, len, arg); 178 + if (ret) 179 + break; 180 + res.base += (res.size + 1); 181 + res.size = (end - res.base + 1); 182 + } 183 + return ret; 169 184 } 185 + EXPORT_SYMBOL_GPL(walk_memory_resource); 170 186 171 187 #endif /* CONFIG_MEMORY_HOTPLUG */ 172 188
+1
include/linux/lmb.h
··· 54 54 extern u64 __init lmb_end_of_DRAM(void); 55 55 extern void __init lmb_enforce_memory_limit(u64 memory_limit); 56 56 extern int __init lmb_is_reserved(u64 addr); 57 + extern int lmb_find(struct lmb_property *res); 57 58 58 59 extern void lmb_dump_all(void); 59 60
+33
lib/lmb.c
··· 474 474 } 475 475 return 0; 476 476 } 477 + 478 + /* 479 + * Given a <base, len>, find which memory regions belong to this range. 480 + * Adjust the request and return a contiguous chunk. 481 + */ 482 + int lmb_find(struct lmb_property *res) 483 + { 484 + int i; 485 + u64 rstart, rend; 486 + 487 + rstart = res->base; 488 + rend = rstart + res->size - 1; 489 + 490 + for (i = 0; i < lmb.memory.cnt; i++) { 491 + u64 start = lmb.memory.region[i].base; 492 + u64 end = start + lmb.memory.region[i].size - 1; 493 + 494 + if (start > rend) 495 + return -1; 496 + 497 + if ((end >= rstart) && (start < rend)) { 498 + /* adjust the request */ 499 + if (rstart < start) 500 + rstart = start; 501 + if (rend > end) 502 + rend = end; 503 + res->base = rstart; 504 + res->size = rend - rstart + 1; 505 + return 0; 506 + } 507 + } 508 + return -1; 509 + }