Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.18 393 lines 11 kB view raw
1/* 2 * linux/arch/alpha/mm/numa.c 3 * 4 * DISCONTIGMEM NUMA alpha support. 5 * 6 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE 7 */ 8 9#include <linux/types.h> 10#include <linux/kernel.h> 11#include <linux/mm.h> 12#include <linux/bootmem.h> 13#include <linux/swap.h> 14#include <linux/initrd.h> 15#include <linux/pfn.h> 16 17#include <asm/hwrpb.h> 18#include <asm/pgalloc.h> 19 20pg_data_t node_data[MAX_NUMNODES]; 21bootmem_data_t node_bdata[MAX_NUMNODES]; 22 23#undef DEBUG_DISCONTIG 24#ifdef DEBUG_DISCONTIG 25#define DBGDCONT(args...) printk(args) 26#else 27#define DBGDCONT(args...) 28#endif 29 30#define for_each_mem_cluster(memdesc, cluster, i) \ 31 for ((cluster) = (memdesc)->cluster, (i) = 0; \ 32 (i) < (memdesc)->numclusters; (i)++, (cluster)++) 33 34static void __init show_mem_layout(void) 35{ 36 struct memclust_struct * cluster; 37 struct memdesc_struct * memdesc; 38 int i; 39 40 /* Find free clusters, and init and free the bootmem accordingly. */ 41 memdesc = (struct memdesc_struct *) 42 (hwrpb->mddt_offset + (unsigned long) hwrpb); 43 44 printk("Raw memory layout:\n"); 45 for_each_mem_cluster(memdesc, cluster, i) { 46 printk(" memcluster %2d, usage %1lx, start %8lu, end %8lu\n", 47 i, cluster->usage, cluster->start_pfn, 48 cluster->start_pfn + cluster->numpages); 49 } 50} 51 52static void __init 53setup_memory_node(int nid, void *kernel_end) 54{ 55 extern unsigned long mem_size_limit; 56 struct memclust_struct * cluster; 57 struct memdesc_struct * memdesc; 58 unsigned long start_kernel_pfn, end_kernel_pfn; 59 unsigned long bootmap_size, bootmap_pages, bootmap_start; 60 unsigned long start, end; 61 unsigned long node_pfn_start, node_pfn_end; 62 unsigned long node_min_pfn, node_max_pfn; 63 int i; 64 unsigned long node_datasz = PFN_UP(sizeof(pg_data_t)); 65 int show_init = 0; 66 67 /* Find the bounds of current node */ 68 node_pfn_start = (node_mem_start(nid)) >> PAGE_SHIFT; 69 node_pfn_end = node_pfn_start + (node_mem_size(nid) >> PAGE_SHIFT); 70 71 /* Find free clusters, and init and free the bootmem accordingly. */ 72 memdesc = (struct memdesc_struct *) 73 (hwrpb->mddt_offset + (unsigned long) hwrpb); 74 75 /* find the bounds of this node (node_min_pfn/node_max_pfn) */ 76 node_min_pfn = ~0UL; 77 node_max_pfn = 0UL; 78 for_each_mem_cluster(memdesc, cluster, i) { 79 /* Bit 0 is console/PALcode reserved. Bit 1 is 80 non-volatile memory -- we might want to mark 81 this for later. */ 82 if (cluster->usage & 3) 83 continue; 84 85 start = cluster->start_pfn; 86 end = start + cluster->numpages; 87 88 if (start >= node_pfn_end || end <= node_pfn_start) 89 continue; 90 91 if (!show_init) { 92 show_init = 1; 93 printk("Initializing bootmem allocator on Node ID %d\n", nid); 94 } 95 printk(" memcluster %2d, usage %1lx, start %8lu, end %8lu\n", 96 i, cluster->usage, cluster->start_pfn, 97 cluster->start_pfn + cluster->numpages); 98 99 if (start < node_pfn_start) 100 start = node_pfn_start; 101 if (end > node_pfn_end) 102 end = node_pfn_end; 103 104 if (start < node_min_pfn) 105 node_min_pfn = start; 106 if (end > node_max_pfn) 107 node_max_pfn = end; 108 } 109 110 if (mem_size_limit && node_max_pfn > mem_size_limit) { 111 static int msg_shown = 0; 112 if (!msg_shown) { 113 msg_shown = 1; 114 printk("setup: forcing memory size to %ldK (from %ldK).\n", 115 mem_size_limit << (PAGE_SHIFT - 10), 116 node_max_pfn << (PAGE_SHIFT - 10)); 117 } 118 node_max_pfn = mem_size_limit; 119 } 120 121 if (node_min_pfn >= node_max_pfn) 122 return; 123 124 /* Update global {min,max}_low_pfn from node information. */ 125 if (node_min_pfn < min_low_pfn) 126 min_low_pfn = node_min_pfn; 127 if (node_max_pfn > max_low_pfn) 128 max_pfn = max_low_pfn = node_max_pfn; 129 130 num_physpages += node_max_pfn - node_min_pfn; 131 132#if 0 /* we'll try this one again in a little while */ 133 /* Cute trick to make sure our local node data is on local memory */ 134 node_data[nid] = (pg_data_t *)(__va(node_min_pfn << PAGE_SHIFT)); 135#endif 136 /* Quasi-mark the pg_data_t as in-use */ 137 node_min_pfn += node_datasz; 138 if (node_min_pfn >= node_max_pfn) { 139 printk(" not enough mem to reserve NODE_DATA"); 140 return; 141 } 142 NODE_DATA(nid)->bdata = &node_bdata[nid]; 143 144 printk(" Detected node memory: start %8lu, end %8lu\n", 145 node_min_pfn, node_max_pfn); 146 147 DBGDCONT(" DISCONTIG: node_data[%d] is at 0x%p\n", nid, NODE_DATA(nid)); 148 DBGDCONT(" DISCONTIG: NODE_DATA(%d)->bdata is at 0x%p\n", nid, NODE_DATA(nid)->bdata); 149 150 /* Find the bounds of kernel memory. */ 151 start_kernel_pfn = PFN_DOWN(KERNEL_START_PHYS); 152 end_kernel_pfn = PFN_UP(virt_to_phys(kernel_end)); 153 bootmap_start = -1; 154 155 if (!nid && (node_max_pfn < end_kernel_pfn || node_min_pfn > start_kernel_pfn)) 156 panic("kernel loaded out of ram"); 157 158 /* Zone start phys-addr must be 2^(MAX_ORDER-1) aligned. 159 Note that we round this down, not up - node memory 160 has much larger alignment than 8Mb, so it's safe. */ 161 node_min_pfn &= ~((1UL << (MAX_ORDER-1))-1); 162 163 /* We need to know how many physically contiguous pages 164 we'll need for the bootmap. */ 165 bootmap_pages = bootmem_bootmap_pages(node_max_pfn-node_min_pfn); 166 167 /* Now find a good region where to allocate the bootmap. */ 168 for_each_mem_cluster(memdesc, cluster, i) { 169 if (cluster->usage & 3) 170 continue; 171 172 start = cluster->start_pfn; 173 end = start + cluster->numpages; 174 175 if (start >= node_max_pfn || end <= node_min_pfn) 176 continue; 177 178 if (end > node_max_pfn) 179 end = node_max_pfn; 180 if (start < node_min_pfn) 181 start = node_min_pfn; 182 183 if (start < start_kernel_pfn) { 184 if (end > end_kernel_pfn 185 && end - end_kernel_pfn >= bootmap_pages) { 186 bootmap_start = end_kernel_pfn; 187 break; 188 } else if (end > start_kernel_pfn) 189 end = start_kernel_pfn; 190 } else if (start < end_kernel_pfn) 191 start = end_kernel_pfn; 192 if (end - start >= bootmap_pages) { 193 bootmap_start = start; 194 break; 195 } 196 } 197 198 if (bootmap_start == -1) 199 panic("couldn't find a contigous place for the bootmap"); 200 201 /* Allocate the bootmap and mark the whole MM as reserved. */ 202 bootmap_size = init_bootmem_node(NODE_DATA(nid), bootmap_start, 203 node_min_pfn, node_max_pfn); 204 DBGDCONT(" bootmap_start %lu, bootmap_size %lu, bootmap_pages %lu\n", 205 bootmap_start, bootmap_size, bootmap_pages); 206 207 /* Mark the free regions. */ 208 for_each_mem_cluster(memdesc, cluster, i) { 209 if (cluster->usage & 3) 210 continue; 211 212 start = cluster->start_pfn; 213 end = cluster->start_pfn + cluster->numpages; 214 215 if (start >= node_max_pfn || end <= node_min_pfn) 216 continue; 217 218 if (end > node_max_pfn) 219 end = node_max_pfn; 220 if (start < node_min_pfn) 221 start = node_min_pfn; 222 223 if (start < start_kernel_pfn) { 224 if (end > end_kernel_pfn) { 225 free_bootmem_node(NODE_DATA(nid), PFN_PHYS(start), 226 (PFN_PHYS(start_kernel_pfn) 227 - PFN_PHYS(start))); 228 printk(" freeing pages %ld:%ld\n", 229 start, start_kernel_pfn); 230 start = end_kernel_pfn; 231 } else if (end > start_kernel_pfn) 232 end = start_kernel_pfn; 233 } else if (start < end_kernel_pfn) 234 start = end_kernel_pfn; 235 if (start >= end) 236 continue; 237 238 free_bootmem_node(NODE_DATA(nid), PFN_PHYS(start), PFN_PHYS(end) - PFN_PHYS(start)); 239 printk(" freeing pages %ld:%ld\n", start, end); 240 } 241 242 /* Reserve the bootmap memory. */ 243 reserve_bootmem_node(NODE_DATA(nid), PFN_PHYS(bootmap_start), bootmap_size); 244 printk(" reserving pages %ld:%ld\n", bootmap_start, bootmap_start+PFN_UP(bootmap_size)); 245 246 node_set_online(nid); 247} 248 249void __init 250setup_memory(void *kernel_end) 251{ 252 int nid; 253 254 show_mem_layout(); 255 256 nodes_clear(node_online_map); 257 258 min_low_pfn = ~0UL; 259 max_low_pfn = 0UL; 260 for (nid = 0; nid < MAX_NUMNODES; nid++) 261 setup_memory_node(nid, kernel_end); 262 263#ifdef CONFIG_BLK_DEV_INITRD 264 initrd_start = INITRD_START; 265 if (initrd_start) { 266 extern void *move_initrd(unsigned long); 267 268 initrd_end = initrd_start+INITRD_SIZE; 269 printk("Initial ramdisk at: 0x%p (%lu bytes)\n", 270 (void *) initrd_start, INITRD_SIZE); 271 272 if ((void *)initrd_end > phys_to_virt(PFN_PHYS(max_low_pfn))) { 273 if (!move_initrd(PFN_PHYS(max_low_pfn))) 274 printk("initrd extends beyond end of memory " 275 "(0x%08lx > 0x%p)\ndisabling initrd\n", 276 initrd_end, 277 phys_to_virt(PFN_PHYS(max_low_pfn))); 278 } else { 279 nid = kvaddr_to_nid(initrd_start); 280 reserve_bootmem_node(NODE_DATA(nid), 281 virt_to_phys((void *)initrd_start), 282 INITRD_SIZE); 283 } 284 } 285#endif /* CONFIG_BLK_DEV_INITRD */ 286} 287 288void __init paging_init(void) 289{ 290 unsigned int nid; 291 unsigned long zones_size[MAX_NR_ZONES] = {0, }; 292 unsigned long dma_local_pfn; 293 294 /* 295 * The old global MAX_DMA_ADDRESS per-arch API doesn't fit 296 * in the NUMA model, for now we convert it to a pfn and 297 * we interpret this pfn as a local per-node information. 298 * This issue isn't very important since none of these machines 299 * have legacy ISA slots anyways. 300 */ 301 dma_local_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; 302 303 for_each_online_node(nid) { 304 unsigned long start_pfn = node_bdata[nid].node_boot_start >> PAGE_SHIFT; 305 unsigned long end_pfn = node_bdata[nid].node_low_pfn; 306 307 if (dma_local_pfn >= end_pfn - start_pfn) 308 zones_size[ZONE_DMA] = end_pfn - start_pfn; 309 else { 310 zones_size[ZONE_DMA] = dma_local_pfn; 311 zones_size[ZONE_NORMAL] = (end_pfn - start_pfn) - dma_local_pfn; 312 } 313 free_area_init_node(nid, NODE_DATA(nid), zones_size, start_pfn, NULL); 314 } 315 316 /* Initialize the kernel's ZERO_PGE. */ 317 memset((void *)ZERO_PGE, 0, PAGE_SIZE); 318} 319 320void __init mem_init(void) 321{ 322 unsigned long codesize, reservedpages, datasize, initsize, pfn; 323 extern int page_is_ram(unsigned long) __init; 324 extern char _text, _etext, _data, _edata; 325 extern char __init_begin, __init_end; 326 unsigned long nid, i; 327 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); 328 329 reservedpages = 0; 330 for_each_online_node(nid) { 331 /* 332 * This will free up the bootmem, ie, slot 0 memory 333 */ 334 totalram_pages += free_all_bootmem_node(NODE_DATA(nid)); 335 336 pfn = NODE_DATA(nid)->node_start_pfn; 337 for (i = 0; i < node_spanned_pages(nid); i++, pfn++) 338 if (page_is_ram(pfn) && 339 PageReserved(nid_page_nr(nid, i))) 340 reservedpages++; 341 } 342 343 codesize = (unsigned long) &_etext - (unsigned long) &_text; 344 datasize = (unsigned long) &_edata - (unsigned long) &_data; 345 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; 346 347 printk("Memory: %luk/%luk available (%luk kernel code, %luk reserved, " 348 "%luk data, %luk init)\n", 349 (unsigned long)nr_free_pages() << (PAGE_SHIFT-10), 350 num_physpages << (PAGE_SHIFT-10), 351 codesize >> 10, 352 reservedpages << (PAGE_SHIFT-10), 353 datasize >> 10, 354 initsize >> 10); 355#if 0 356 mem_stress(); 357#endif 358} 359 360void 361show_mem(void) 362{ 363 long i,free = 0,total = 0,reserved = 0; 364 long shared = 0, cached = 0; 365 int nid; 366 367 printk("\nMem-info:\n"); 368 show_free_areas(); 369 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); 370 for_each_online_node(nid) { 371 unsigned long flags; 372 pgdat_resize_lock(NODE_DATA(nid), &flags); 373 i = node_spanned_pages(nid); 374 while (i-- > 0) { 375 struct page *page = nid_page_nr(nid, i); 376 total++; 377 if (PageReserved(page)) 378 reserved++; 379 else if (PageSwapCache(page)) 380 cached++; 381 else if (!page_count(page)) 382 free++; 383 else 384 shared += page_count(page) - 1; 385 } 386 pgdat_resize_unlock(NODE_DATA(nid), &flags); 387 } 388 printk("%ld pages of RAM\n",total); 389 printk("%ld free pages\n",free); 390 printk("%ld reserved pages\n",reserved); 391 printk("%ld pages shared\n",shared); 392 printk("%ld pages swap cached\n",cached); 393}