Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.12 772 lines 19 kB view raw
1/* 2 * linux/arch/arm/mm/mm-armv.c 3 * 4 * Copyright (C) 1998-2002 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * Page table sludge for ARM v3 and v4 processor architectures. 11 */ 12#include <linux/config.h> 13#include <linux/module.h> 14#include <linux/mm.h> 15#include <linux/init.h> 16#include <linux/bootmem.h> 17#include <linux/highmem.h> 18#include <linux/nodemask.h> 19 20#include <asm/pgalloc.h> 21#include <asm/page.h> 22#include <asm/io.h> 23#include <asm/setup.h> 24#include <asm/tlbflush.h> 25 26#include <asm/mach/map.h> 27 28#define CPOLICY_UNCACHED 0 29#define CPOLICY_BUFFERED 1 30#define CPOLICY_WRITETHROUGH 2 31#define CPOLICY_WRITEBACK 3 32#define CPOLICY_WRITEALLOC 4 33 34static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK; 35static unsigned int ecc_mask __initdata = 0; 36pgprot_t pgprot_kernel; 37 38EXPORT_SYMBOL(pgprot_kernel); 39 40pmd_t *top_pmd; 41 42struct cachepolicy { 43 const char policy[16]; 44 unsigned int cr_mask; 45 unsigned int pmd; 46 unsigned int pte; 47}; 48 49static struct cachepolicy cache_policies[] __initdata = { 50 { 51 .policy = "uncached", 52 .cr_mask = CR_W|CR_C, 53 .pmd = PMD_SECT_UNCACHED, 54 .pte = 0, 55 }, { 56 .policy = "buffered", 57 .cr_mask = CR_C, 58 .pmd = PMD_SECT_BUFFERED, 59 .pte = PTE_BUFFERABLE, 60 }, { 61 .policy = "writethrough", 62 .cr_mask = 0, 63 .pmd = PMD_SECT_WT, 64 .pte = PTE_CACHEABLE, 65 }, { 66 .policy = "writeback", 67 .cr_mask = 0, 68 .pmd = PMD_SECT_WB, 69 .pte = PTE_BUFFERABLE|PTE_CACHEABLE, 70 }, { 71 .policy = "writealloc", 72 .cr_mask = 0, 73 .pmd = PMD_SECT_WBWA, 74 .pte = PTE_BUFFERABLE|PTE_CACHEABLE, 75 } 76}; 77 78/* 79 * These are useful for identifing cache coherency 80 * problems by allowing the cache or the cache and 81 * writebuffer to be turned off. (Note: the write 82 * buffer should not be on and the cache off). 83 */ 84static void __init early_cachepolicy(char **p) 85{ 86 int i; 87 88 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { 89 int len = strlen(cache_policies[i].policy); 90 91 if (memcmp(*p, cache_policies[i].policy, len) == 0) { 92 cachepolicy = i; 93 cr_alignment &= ~cache_policies[i].cr_mask; 94 cr_no_alignment &= ~cache_policies[i].cr_mask; 95 *p += len; 96 break; 97 } 98 } 99 if (i == ARRAY_SIZE(cache_policies)) 100 printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n"); 101 flush_cache_all(); 102 set_cr(cr_alignment); 103} 104 105static void __init early_nocache(char **__unused) 106{ 107 char *p = "buffered"; 108 printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p); 109 early_cachepolicy(&p); 110} 111 112static void __init early_nowrite(char **__unused) 113{ 114 char *p = "uncached"; 115 printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p); 116 early_cachepolicy(&p); 117} 118 119static void __init early_ecc(char **p) 120{ 121 if (memcmp(*p, "on", 2) == 0) { 122 ecc_mask = PMD_PROTECTION; 123 *p += 2; 124 } else if (memcmp(*p, "off", 3) == 0) { 125 ecc_mask = 0; 126 *p += 3; 127 } 128} 129 130__early_param("nocache", early_nocache); 131__early_param("nowb", early_nowrite); 132__early_param("cachepolicy=", early_cachepolicy); 133__early_param("ecc=", early_ecc); 134 135static int __init noalign_setup(char *__unused) 136{ 137 cr_alignment &= ~CR_A; 138 cr_no_alignment &= ~CR_A; 139 set_cr(cr_alignment); 140 return 1; 141} 142 143__setup("noalign", noalign_setup); 144 145#define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD) 146 147static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt) 148{ 149 return pmd_offset(pgd, virt); 150} 151 152static inline pmd_t *pmd_off_k(unsigned long virt) 153{ 154 return pmd_off(pgd_offset_k(virt), virt); 155} 156 157/* 158 * need to get a 16k page for level 1 159 */ 160pgd_t *get_pgd_slow(struct mm_struct *mm) 161{ 162 pgd_t *new_pgd, *init_pgd; 163 pmd_t *new_pmd, *init_pmd; 164 pte_t *new_pte, *init_pte; 165 166 new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2); 167 if (!new_pgd) 168 goto no_pgd; 169 170 memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t)); 171 172 init_pgd = pgd_offset_k(0); 173 174 if (!vectors_high()) { 175 /* 176 * This lock is here just to satisfy pmd_alloc and pte_lock 177 */ 178 spin_lock(&mm->page_table_lock); 179 180 /* 181 * On ARM, first page must always be allocated since it 182 * contains the machine vectors. 183 */ 184 new_pmd = pmd_alloc(mm, new_pgd, 0); 185 if (!new_pmd) 186 goto no_pmd; 187 188 new_pte = pte_alloc_map(mm, new_pmd, 0); 189 if (!new_pte) 190 goto no_pte; 191 192 init_pmd = pmd_offset(init_pgd, 0); 193 init_pte = pte_offset_map_nested(init_pmd, 0); 194 set_pte(new_pte, *init_pte); 195 pte_unmap_nested(init_pte); 196 pte_unmap(new_pte); 197 198 spin_unlock(&mm->page_table_lock); 199 } 200 201 /* 202 * Copy over the kernel and IO PGD entries 203 */ 204 memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, 205 (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); 206 207 clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); 208 209 return new_pgd; 210 211no_pte: 212 spin_unlock(&mm->page_table_lock); 213 pmd_free(new_pmd); 214 free_pages((unsigned long)new_pgd, 2); 215 return NULL; 216 217no_pmd: 218 spin_unlock(&mm->page_table_lock); 219 free_pages((unsigned long)new_pgd, 2); 220 return NULL; 221 222no_pgd: 223 return NULL; 224} 225 226void free_pgd_slow(pgd_t *pgd) 227{ 228 pmd_t *pmd; 229 struct page *pte; 230 231 if (!pgd) 232 return; 233 234 /* pgd is always present and good */ 235 pmd = pmd_off(pgd, 0); 236 if (pmd_none(*pmd)) 237 goto free; 238 if (pmd_bad(*pmd)) { 239 pmd_ERROR(*pmd); 240 pmd_clear(pmd); 241 goto free; 242 } 243 244 pte = pmd_page(*pmd); 245 pmd_clear(pmd); 246 dec_page_state(nr_page_table_pages); 247 pte_free(pte); 248 pmd_free(pmd); 249free: 250 free_pages((unsigned long) pgd, 2); 251} 252 253/* 254 * Create a SECTION PGD between VIRT and PHYS in domain 255 * DOMAIN with protection PROT. This operates on half- 256 * pgdir entry increments. 257 */ 258static inline void 259alloc_init_section(unsigned long virt, unsigned long phys, int prot) 260{ 261 pmd_t *pmdp = pmd_off_k(virt); 262 263 if (virt & (1 << 20)) 264 pmdp++; 265 266 *pmdp = __pmd(phys | prot); 267 flush_pmd_entry(pmdp); 268} 269 270/* 271 * Create a SUPER SECTION PGD between VIRT and PHYS with protection PROT 272 */ 273static inline void 274alloc_init_supersection(unsigned long virt, unsigned long phys, int prot) 275{ 276 int i; 277 278 for (i = 0; i < 16; i += 1) { 279 alloc_init_section(virt, phys & SUPERSECTION_MASK, 280 prot | PMD_SECT_SUPER); 281 282 virt += (PGDIR_SIZE / 2); 283 phys += (PGDIR_SIZE / 2); 284 } 285} 286 287/* 288 * Add a PAGE mapping between VIRT and PHYS in domain 289 * DOMAIN with protection PROT. Note that due to the 290 * way we map the PTEs, we must allocate two PTE_SIZE'd 291 * blocks - one for the Linux pte table, and one for 292 * the hardware pte table. 293 */ 294static inline void 295alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pgprot_t prot) 296{ 297 pmd_t *pmdp = pmd_off_k(virt); 298 pte_t *ptep; 299 300 if (pmd_none(*pmdp)) { 301 unsigned long pmdval; 302 ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * 303 sizeof(pte_t)); 304 305 pmdval = __pa(ptep) | prot_l1; 306 pmdp[0] = __pmd(pmdval); 307 pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); 308 flush_pmd_entry(pmdp); 309 } 310 ptep = pte_offset_kernel(pmdp, virt); 311 312 set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot)); 313} 314 315/* 316 * Clear any PGD mapping. On a two-level page table system, 317 * the clearance is done by the middle-level functions (pmd) 318 * rather than the top-level (pgd) functions. 319 */ 320static inline void clear_mapping(unsigned long virt) 321{ 322 pmd_clear(pmd_off_k(virt)); 323} 324 325struct mem_types { 326 unsigned int prot_pte; 327 unsigned int prot_l1; 328 unsigned int prot_sect; 329 unsigned int domain; 330}; 331 332static struct mem_types mem_types[] __initdata = { 333 [MT_DEVICE] = { 334 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 335 L_PTE_WRITE, 336 .prot_l1 = PMD_TYPE_TABLE, 337 .prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED | 338 PMD_SECT_AP_WRITE, 339 .domain = DOMAIN_IO, 340 }, 341 [MT_CACHECLEAN] = { 342 .prot_sect = PMD_TYPE_SECT, 343 .domain = DOMAIN_KERNEL, 344 }, 345 [MT_MINICLEAN] = { 346 .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE, 347 .domain = DOMAIN_KERNEL, 348 }, 349 [MT_LOW_VECTORS] = { 350 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 351 L_PTE_EXEC, 352 .prot_l1 = PMD_TYPE_TABLE, 353 .domain = DOMAIN_USER, 354 }, 355 [MT_HIGH_VECTORS] = { 356 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 357 L_PTE_USER | L_PTE_EXEC, 358 .prot_l1 = PMD_TYPE_TABLE, 359 .domain = DOMAIN_USER, 360 }, 361 [MT_MEMORY] = { 362 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 363 .domain = DOMAIN_KERNEL, 364 }, 365 [MT_ROM] = { 366 .prot_sect = PMD_TYPE_SECT, 367 .domain = DOMAIN_KERNEL, 368 }, 369 [MT_IXP2000_DEVICE] = { /* IXP2400 requires XCB=101 for on-chip I/O */ 370 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 371 L_PTE_WRITE, 372 .prot_l1 = PMD_TYPE_TABLE, 373 .prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED | 374 PMD_SECT_AP_WRITE | PMD_SECT_BUFFERABLE | 375 PMD_SECT_TEX(1), 376 .domain = DOMAIN_IO, 377 } 378}; 379 380/* 381 * Adjust the PMD section entries according to the CPU in use. 382 */ 383static void __init build_mem_type_table(void) 384{ 385 struct cachepolicy *cp; 386 unsigned int cr = get_cr(); 387 int cpu_arch = cpu_architecture(); 388 int i; 389 390#if defined(CONFIG_CPU_DCACHE_DISABLE) 391 if (cachepolicy > CPOLICY_BUFFERED) 392 cachepolicy = CPOLICY_BUFFERED; 393#elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH) 394 if (cachepolicy > CPOLICY_WRITETHROUGH) 395 cachepolicy = CPOLICY_WRITETHROUGH; 396#endif 397 if (cpu_arch < CPU_ARCH_ARMv5) { 398 if (cachepolicy >= CPOLICY_WRITEALLOC) 399 cachepolicy = CPOLICY_WRITEBACK; 400 ecc_mask = 0; 401 } 402 403 if (cpu_arch <= CPU_ARCH_ARMv5) { 404 for (i = 0; i < ARRAY_SIZE(mem_types); i++) { 405 if (mem_types[i].prot_l1) 406 mem_types[i].prot_l1 |= PMD_BIT4; 407 if (mem_types[i].prot_sect) 408 mem_types[i].prot_sect |= PMD_BIT4; 409 } 410 } 411 412 /* 413 * ARMv6 and above have extended page tables. 414 */ 415 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { 416 /* 417 * bit 4 becomes XN which we must clear for the 418 * kernel memory mapping. 419 */ 420 mem_types[MT_MEMORY].prot_sect &= ~PMD_BIT4; 421 mem_types[MT_ROM].prot_sect &= ~PMD_BIT4; 422 /* 423 * Mark cache clean areas and XIP ROM read only 424 * from SVC mode and no access from userspace. 425 */ 426 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 427 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 428 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 429 } 430 431 cp = &cache_policies[cachepolicy]; 432 433 if (cpu_arch >= CPU_ARCH_ARMv5) { 434 mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE; 435 mem_types[MT_HIGH_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE; 436 } else { 437 mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte; 438 mem_types[MT_HIGH_VECTORS].prot_pte |= cp->pte; 439 mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1); 440 } 441 442 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; 443 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; 444 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; 445 mem_types[MT_ROM].prot_sect |= cp->pmd; 446 447 for (i = 0; i < 16; i++) { 448 unsigned long v = pgprot_val(protection_map[i]); 449 v &= (~(PTE_BUFFERABLE|PTE_CACHEABLE)) | cp->pte; 450 protection_map[i] = __pgprot(v); 451 } 452 453 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | 454 L_PTE_DIRTY | L_PTE_WRITE | 455 L_PTE_EXEC | cp->pte); 456 457 switch (cp->pmd) { 458 case PMD_SECT_WT: 459 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT; 460 break; 461 case PMD_SECT_WB: 462 case PMD_SECT_WBWA: 463 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB; 464 break; 465 } 466 printk("Memory policy: ECC %sabled, Data cache %s\n", 467 ecc_mask ? "en" : "dis", cp->policy); 468} 469 470#define vectors_base() (vectors_high() ? 0xffff0000 : 0) 471 472/* 473 * Create the page directory entries and any necessary 474 * page tables for the mapping specified by `md'. We 475 * are able to cope here with varying sizes and address 476 * offsets, and we take full advantage of sections and 477 * supersections. 478 */ 479static void __init create_mapping(struct map_desc *md) 480{ 481 unsigned long virt, length; 482 int prot_sect, prot_l1, domain; 483 pgprot_t prot_pte; 484 long off; 485 486 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { 487 printk(KERN_WARNING "BUG: not creating mapping for " 488 "0x%08lx at 0x%08lx in user region\n", 489 md->physical, md->virtual); 490 return; 491 } 492 493 if ((md->type == MT_DEVICE || md->type == MT_ROM) && 494 md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { 495 printk(KERN_WARNING "BUG: mapping for 0x%08lx at 0x%08lx " 496 "overlaps vmalloc space\n", 497 md->physical, md->virtual); 498 } 499 500 domain = mem_types[md->type].domain; 501 prot_pte = __pgprot(mem_types[md->type].prot_pte); 502 prot_l1 = mem_types[md->type].prot_l1 | PMD_DOMAIN(domain); 503 prot_sect = mem_types[md->type].prot_sect | PMD_DOMAIN(domain); 504 505 virt = md->virtual; 506 off = md->physical - virt; 507 length = md->length; 508 509 if (mem_types[md->type].prot_l1 == 0 && 510 (virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) { 511 printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " 512 "be mapped using pages, ignoring.\n", 513 md->physical, md->virtual); 514 return; 515 } 516 517 while ((virt & 0xfffff || (virt + off) & 0xfffff) && length >= PAGE_SIZE) { 518 alloc_init_page(virt, virt + off, prot_l1, prot_pte); 519 520 virt += PAGE_SIZE; 521 length -= PAGE_SIZE; 522 } 523 524 /* N.B. ARMv6 supersections are only defined to work with domain 0. 525 * Since domain assignments can in fact be arbitrary, the 526 * 'domain == 0' check below is required to insure that ARMv6 527 * supersections are only allocated for domain 0 regardless 528 * of the actual domain assignments in use. 529 */ 530 if (cpu_architecture() >= CPU_ARCH_ARMv6 && domain == 0) { 531 /* Align to supersection boundary */ 532 while ((virt & ~SUPERSECTION_MASK || (virt + off) & 533 ~SUPERSECTION_MASK) && length >= (PGDIR_SIZE / 2)) { 534 alloc_init_section(virt, virt + off, prot_sect); 535 536 virt += (PGDIR_SIZE / 2); 537 length -= (PGDIR_SIZE / 2); 538 } 539 540 while (length >= SUPERSECTION_SIZE) { 541 alloc_init_supersection(virt, virt + off, prot_sect); 542 543 virt += SUPERSECTION_SIZE; 544 length -= SUPERSECTION_SIZE; 545 } 546 } 547 548 /* 549 * A section mapping covers half a "pgdir" entry. 550 */ 551 while (length >= (PGDIR_SIZE / 2)) { 552 alloc_init_section(virt, virt + off, prot_sect); 553 554 virt += (PGDIR_SIZE / 2); 555 length -= (PGDIR_SIZE / 2); 556 } 557 558 while (length >= PAGE_SIZE) { 559 alloc_init_page(virt, virt + off, prot_l1, prot_pte); 560 561 virt += PAGE_SIZE; 562 length -= PAGE_SIZE; 563 } 564} 565 566/* 567 * In order to soft-boot, we need to insert a 1:1 mapping in place of 568 * the user-mode pages. This will then ensure that we have predictable 569 * results when turning the mmu off 570 */ 571void setup_mm_for_reboot(char mode) 572{ 573 unsigned long pmdval; 574 pgd_t *pgd; 575 pmd_t *pmd; 576 int i; 577 int cpu_arch = cpu_architecture(); 578 579 if (current->mm && current->mm->pgd) 580 pgd = current->mm->pgd; 581 else 582 pgd = init_mm.pgd; 583 584 for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++) { 585 pmdval = (i << PGDIR_SHIFT) | 586 PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | 587 PMD_TYPE_SECT; 588 if (cpu_arch <= CPU_ARCH_ARMv5) 589 pmdval |= PMD_BIT4; 590 pmd = pmd_off(pgd, i << PGDIR_SHIFT); 591 pmd[0] = __pmd(pmdval); 592 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1))); 593 flush_pmd_entry(pmd); 594 } 595} 596 597extern void _stext, _etext; 598 599/* 600 * Setup initial mappings. We use the page we allocated for zero page to hold 601 * the mappings, which will get overwritten by the vectors in traps_init(). 602 * The mappings must be in virtual address order. 603 */ 604void __init memtable_init(struct meminfo *mi) 605{ 606 struct map_desc *init_maps, *p, *q; 607 unsigned long address = 0; 608 int i; 609 610 build_mem_type_table(); 611 612 init_maps = p = alloc_bootmem_low_pages(PAGE_SIZE); 613 614#ifdef CONFIG_XIP_KERNEL 615 p->physical = CONFIG_XIP_PHYS_ADDR & PMD_MASK; 616 p->virtual = (unsigned long)&_stext & PMD_MASK; 617 p->length = ((unsigned long)&_etext - p->virtual + ~PMD_MASK) & PMD_MASK; 618 p->type = MT_ROM; 619 p ++; 620#endif 621 622 for (i = 0; i < mi->nr_banks; i++) { 623 if (mi->bank[i].size == 0) 624 continue; 625 626 p->physical = mi->bank[i].start; 627 p->virtual = __phys_to_virt(p->physical); 628 p->length = mi->bank[i].size; 629 p->type = MT_MEMORY; 630 p ++; 631 } 632 633#ifdef FLUSH_BASE 634 p->physical = FLUSH_BASE_PHYS; 635 p->virtual = FLUSH_BASE; 636 p->length = PGDIR_SIZE; 637 p->type = MT_CACHECLEAN; 638 p ++; 639#endif 640 641#ifdef FLUSH_BASE_MINICACHE 642 p->physical = FLUSH_BASE_PHYS + PGDIR_SIZE; 643 p->virtual = FLUSH_BASE_MINICACHE; 644 p->length = PGDIR_SIZE; 645 p->type = MT_MINICLEAN; 646 p ++; 647#endif 648 649 /* 650 * Go through the initial mappings, but clear out any 651 * pgdir entries that are not in the description. 652 */ 653 q = init_maps; 654 do { 655 if (address < q->virtual || q == p) { 656 clear_mapping(address); 657 address += PGDIR_SIZE; 658 } else { 659 create_mapping(q); 660 661 address = q->virtual + q->length; 662 address = (address + PGDIR_SIZE - 1) & PGDIR_MASK; 663 664 q ++; 665 } 666 } while (address != 0); 667 668 /* 669 * Create a mapping for the machine vectors at the high-vectors 670 * location (0xffff0000). If we aren't using high-vectors, also 671 * create a mapping at the low-vectors virtual address. 672 */ 673 init_maps->physical = virt_to_phys(init_maps); 674 init_maps->virtual = 0xffff0000; 675 init_maps->length = PAGE_SIZE; 676 init_maps->type = MT_HIGH_VECTORS; 677 create_mapping(init_maps); 678 679 if (!vectors_high()) { 680 init_maps->virtual = 0; 681 init_maps->type = MT_LOW_VECTORS; 682 create_mapping(init_maps); 683 } 684 685 flush_cache_all(); 686 flush_tlb_all(); 687 688 top_pmd = pmd_off_k(0xffff0000); 689} 690 691/* 692 * Create the architecture specific mappings 693 */ 694void __init iotable_init(struct map_desc *io_desc, int nr) 695{ 696 int i; 697 698 for (i = 0; i < nr; i++) 699 create_mapping(io_desc + i); 700} 701 702static inline void 703free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn) 704{ 705 struct page *start_pg, *end_pg; 706 unsigned long pg, pgend; 707 708 /* 709 * Convert start_pfn/end_pfn to a struct page pointer. 710 */ 711 start_pg = pfn_to_page(start_pfn); 712 end_pg = pfn_to_page(end_pfn); 713 714 /* 715 * Convert to physical addresses, and 716 * round start upwards and end downwards. 717 */ 718 pg = PAGE_ALIGN(__pa(start_pg)); 719 pgend = __pa(end_pg) & PAGE_MASK; 720 721 /* 722 * If there are free pages between these, 723 * free the section of the memmap array. 724 */ 725 if (pg < pgend) 726 free_bootmem_node(NODE_DATA(node), pg, pgend - pg); 727} 728 729static inline void free_unused_memmap_node(int node, struct meminfo *mi) 730{ 731 unsigned long bank_start, prev_bank_end = 0; 732 unsigned int i; 733 734 /* 735 * [FIXME] This relies on each bank being in address order. This 736 * may not be the case, especially if the user has provided the 737 * information on the command line. 738 */ 739 for (i = 0; i < mi->nr_banks; i++) { 740 if (mi->bank[i].size == 0 || mi->bank[i].node != node) 741 continue; 742 743 bank_start = mi->bank[i].start >> PAGE_SHIFT; 744 if (bank_start < prev_bank_end) { 745 printk(KERN_ERR "MEM: unordered memory banks. " 746 "Not freeing memmap.\n"); 747 break; 748 } 749 750 /* 751 * If we had a previous bank, and there is a space 752 * between the current bank and the previous, free it. 753 */ 754 if (prev_bank_end && prev_bank_end != bank_start) 755 free_memmap(node, prev_bank_end, bank_start); 756 757 prev_bank_end = PAGE_ALIGN(mi->bank[i].start + 758 mi->bank[i].size) >> PAGE_SHIFT; 759 } 760} 761 762/* 763 * The mem_map array can get very big. Free 764 * the unused area of the memory map. 765 */ 766void __init create_memmap_holes(struct meminfo *mi) 767{ 768 int node; 769 770 for_each_online_node(node) 771 free_unused_memmap_node(node, mi); 772}