Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at 77b2555b52a894a2e39a42e43d993df875c46a6a 707 lines 18 kB view raw
1/* 2 * linux/arch/arm/mm/mm-armv.c 3 * 4 * Copyright (C) 1998-2002 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * Page table sludge for ARM v3 and v4 processor architectures. 11 */ 12#include <linux/config.h> 13#include <linux/module.h> 14#include <linux/mm.h> 15#include <linux/init.h> 16#include <linux/bootmem.h> 17#include <linux/highmem.h> 18#include <linux/nodemask.h> 19 20#include <asm/pgalloc.h> 21#include <asm/page.h> 22#include <asm/io.h> 23#include <asm/setup.h> 24#include <asm/tlbflush.h> 25 26#include <asm/mach/map.h> 27 28#define CPOLICY_UNCACHED 0 29#define CPOLICY_BUFFERED 1 30#define CPOLICY_WRITETHROUGH 2 31#define CPOLICY_WRITEBACK 3 32#define CPOLICY_WRITEALLOC 4 33 34static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK; 35static unsigned int ecc_mask __initdata = 0; 36pgprot_t pgprot_kernel; 37 38EXPORT_SYMBOL(pgprot_kernel); 39 40pmd_t *top_pmd; 41 42struct cachepolicy { 43 const char policy[16]; 44 unsigned int cr_mask; 45 unsigned int pmd; 46 unsigned int pte; 47}; 48 49static struct cachepolicy cache_policies[] __initdata = { 50 { 51 .policy = "uncached", 52 .cr_mask = CR_W|CR_C, 53 .pmd = PMD_SECT_UNCACHED, 54 .pte = 0, 55 }, { 56 .policy = "buffered", 57 .cr_mask = CR_C, 58 .pmd = PMD_SECT_BUFFERED, 59 .pte = PTE_BUFFERABLE, 60 }, { 61 .policy = "writethrough", 62 .cr_mask = 0, 63 .pmd = PMD_SECT_WT, 64 .pte = PTE_CACHEABLE, 65 }, { 66 .policy = "writeback", 67 .cr_mask = 0, 68 .pmd = PMD_SECT_WB, 69 .pte = PTE_BUFFERABLE|PTE_CACHEABLE, 70 }, { 71 .policy = "writealloc", 72 .cr_mask = 0, 73 .pmd = PMD_SECT_WBWA, 74 .pte = PTE_BUFFERABLE|PTE_CACHEABLE, 75 } 76}; 77 78/* 79 * These are useful for identifing cache coherency 80 * problems by allowing the cache or the cache and 81 * writebuffer to be turned off. (Note: the write 82 * buffer should not be on and the cache off). 83 */ 84static void __init early_cachepolicy(char **p) 85{ 86 int i; 87 88 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { 89 int len = strlen(cache_policies[i].policy); 90 91 if (memcmp(*p, cache_policies[i].policy, len) == 0) { 92 cachepolicy = i; 93 cr_alignment &= ~cache_policies[i].cr_mask; 94 cr_no_alignment &= ~cache_policies[i].cr_mask; 95 *p += len; 96 break; 97 } 98 } 99 if (i == ARRAY_SIZE(cache_policies)) 100 printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n"); 101 flush_cache_all(); 102 set_cr(cr_alignment); 103} 104 105static void __init early_nocache(char **__unused) 106{ 107 char *p = "buffered"; 108 printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p); 109 early_cachepolicy(&p); 110} 111 112static void __init early_nowrite(char **__unused) 113{ 114 char *p = "uncached"; 115 printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p); 116 early_cachepolicy(&p); 117} 118 119static void __init early_ecc(char **p) 120{ 121 if (memcmp(*p, "on", 2) == 0) { 122 ecc_mask = PMD_PROTECTION; 123 *p += 2; 124 } else if (memcmp(*p, "off", 3) == 0) { 125 ecc_mask = 0; 126 *p += 3; 127 } 128} 129 130__early_param("nocache", early_nocache); 131__early_param("nowb", early_nowrite); 132__early_param("cachepolicy=", early_cachepolicy); 133__early_param("ecc=", early_ecc); 134 135static int __init noalign_setup(char *__unused) 136{ 137 cr_alignment &= ~CR_A; 138 cr_no_alignment &= ~CR_A; 139 set_cr(cr_alignment); 140 return 1; 141} 142 143__setup("noalign", noalign_setup); 144 145#define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD) 146 147static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt) 148{ 149 return pmd_offset(pgd, virt); 150} 151 152static inline pmd_t *pmd_off_k(unsigned long virt) 153{ 154 return pmd_off(pgd_offset_k(virt), virt); 155} 156 157/* 158 * need to get a 16k page for level 1 159 */ 160pgd_t *get_pgd_slow(struct mm_struct *mm) 161{ 162 pgd_t *new_pgd, *init_pgd; 163 pmd_t *new_pmd, *init_pmd; 164 pte_t *new_pte, *init_pte; 165 166 new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2); 167 if (!new_pgd) 168 goto no_pgd; 169 170 memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t)); 171 172 /* 173 * Copy over the kernel and IO PGD entries 174 */ 175 init_pgd = pgd_offset_k(0); 176 memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, 177 (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); 178 179 clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); 180 181 if (!vectors_high()) { 182 /* 183 * This lock is here just to satisfy pmd_alloc and pte_lock 184 */ 185 spin_lock(&mm->page_table_lock); 186 187 /* 188 * On ARM, first page must always be allocated since it 189 * contains the machine vectors. 190 */ 191 new_pmd = pmd_alloc(mm, new_pgd, 0); 192 if (!new_pmd) 193 goto no_pmd; 194 195 new_pte = pte_alloc_map(mm, new_pmd, 0); 196 if (!new_pte) 197 goto no_pte; 198 199 init_pmd = pmd_offset(init_pgd, 0); 200 init_pte = pte_offset_map_nested(init_pmd, 0); 201 set_pte(new_pte, *init_pte); 202 pte_unmap_nested(init_pte); 203 pte_unmap(new_pte); 204 205 spin_unlock(&mm->page_table_lock); 206 } 207 208 return new_pgd; 209 210no_pte: 211 spin_unlock(&mm->page_table_lock); 212 pmd_free(new_pmd); 213 free_pages((unsigned long)new_pgd, 2); 214 return NULL; 215 216no_pmd: 217 spin_unlock(&mm->page_table_lock); 218 free_pages((unsigned long)new_pgd, 2); 219 return NULL; 220 221no_pgd: 222 return NULL; 223} 224 225void free_pgd_slow(pgd_t *pgd) 226{ 227 pmd_t *pmd; 228 struct page *pte; 229 230 if (!pgd) 231 return; 232 233 /* pgd is always present and good */ 234 pmd = pmd_off(pgd, 0); 235 if (pmd_none(*pmd)) 236 goto free; 237 if (pmd_bad(*pmd)) { 238 pmd_ERROR(*pmd); 239 pmd_clear(pmd); 240 goto free; 241 } 242 243 pte = pmd_page(*pmd); 244 pmd_clear(pmd); 245 dec_page_state(nr_page_table_pages); 246 pte_free(pte); 247 pmd_free(pmd); 248free: 249 free_pages((unsigned long) pgd, 2); 250} 251 252/* 253 * Create a SECTION PGD between VIRT and PHYS in domain 254 * DOMAIN with protection PROT. This operates on half- 255 * pgdir entry increments. 256 */ 257static inline void 258alloc_init_section(unsigned long virt, unsigned long phys, int prot) 259{ 260 pmd_t *pmdp = pmd_off_k(virt); 261 262 if (virt & (1 << 20)) 263 pmdp++; 264 265 *pmdp = __pmd(phys | prot); 266 flush_pmd_entry(pmdp); 267} 268 269/* 270 * Create a SUPER SECTION PGD between VIRT and PHYS with protection PROT 271 */ 272static inline void 273alloc_init_supersection(unsigned long virt, unsigned long phys, int prot) 274{ 275 int i; 276 277 for (i = 0; i < 16; i += 1) { 278 alloc_init_section(virt, phys, prot | PMD_SECT_SUPER); 279 280 virt += (PGDIR_SIZE / 2); 281 } 282} 283 284/* 285 * Add a PAGE mapping between VIRT and PHYS in domain 286 * DOMAIN with protection PROT. Note that due to the 287 * way we map the PTEs, we must allocate two PTE_SIZE'd 288 * blocks - one for the Linux pte table, and one for 289 * the hardware pte table. 290 */ 291static inline void 292alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pgprot_t prot) 293{ 294 pmd_t *pmdp = pmd_off_k(virt); 295 pte_t *ptep; 296 297 if (pmd_none(*pmdp)) { 298 ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * 299 sizeof(pte_t)); 300 301 __pmd_populate(pmdp, __pa(ptep) | prot_l1); 302 } 303 ptep = pte_offset_kernel(pmdp, virt); 304 305 set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot)); 306} 307 308/* 309 * Clear any PGD mapping. On a two-level page table system, 310 * the clearance is done by the middle-level functions (pmd) 311 * rather than the top-level (pgd) functions. 312 */ 313static inline void clear_mapping(unsigned long virt) 314{ 315 pmd_clear(pmd_off_k(virt)); 316} 317 318struct mem_types { 319 unsigned int prot_pte; 320 unsigned int prot_l1; 321 unsigned int prot_sect; 322 unsigned int domain; 323}; 324 325static struct mem_types mem_types[] __initdata = { 326 [MT_DEVICE] = { 327 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 328 L_PTE_WRITE, 329 .prot_l1 = PMD_TYPE_TABLE, 330 .prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED | 331 PMD_SECT_AP_WRITE, 332 .domain = DOMAIN_IO, 333 }, 334 [MT_CACHECLEAN] = { 335 .prot_sect = PMD_TYPE_SECT, 336 .domain = DOMAIN_KERNEL, 337 }, 338 [MT_MINICLEAN] = { 339 .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE, 340 .domain = DOMAIN_KERNEL, 341 }, 342 [MT_LOW_VECTORS] = { 343 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 344 L_PTE_EXEC, 345 .prot_l1 = PMD_TYPE_TABLE, 346 .domain = DOMAIN_USER, 347 }, 348 [MT_HIGH_VECTORS] = { 349 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 350 L_PTE_USER | L_PTE_EXEC, 351 .prot_l1 = PMD_TYPE_TABLE, 352 .domain = DOMAIN_USER, 353 }, 354 [MT_MEMORY] = { 355 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 356 .domain = DOMAIN_KERNEL, 357 }, 358 [MT_ROM] = { 359 .prot_sect = PMD_TYPE_SECT, 360 .domain = DOMAIN_KERNEL, 361 }, 362 [MT_IXP2000_DEVICE] = { /* IXP2400 requires XCB=101 for on-chip I/O */ 363 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 364 L_PTE_WRITE, 365 .prot_l1 = PMD_TYPE_TABLE, 366 .prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED | 367 PMD_SECT_AP_WRITE | PMD_SECT_BUFFERABLE | 368 PMD_SECT_TEX(1), 369 .domain = DOMAIN_IO, 370 } 371}; 372 373/* 374 * Adjust the PMD section entries according to the CPU in use. 375 */ 376static void __init build_mem_type_table(void) 377{ 378 struct cachepolicy *cp; 379 unsigned int cr = get_cr(); 380 unsigned int user_pgprot; 381 int cpu_arch = cpu_architecture(); 382 int i; 383 384#if defined(CONFIG_CPU_DCACHE_DISABLE) 385 if (cachepolicy > CPOLICY_BUFFERED) 386 cachepolicy = CPOLICY_BUFFERED; 387#elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH) 388 if (cachepolicy > CPOLICY_WRITETHROUGH) 389 cachepolicy = CPOLICY_WRITETHROUGH; 390#endif 391 if (cpu_arch < CPU_ARCH_ARMv5) { 392 if (cachepolicy >= CPOLICY_WRITEALLOC) 393 cachepolicy = CPOLICY_WRITEBACK; 394 ecc_mask = 0; 395 } 396 397 if (cpu_arch <= CPU_ARCH_ARMv5TEJ) { 398 for (i = 0; i < ARRAY_SIZE(mem_types); i++) { 399 if (mem_types[i].prot_l1) 400 mem_types[i].prot_l1 |= PMD_BIT4; 401 if (mem_types[i].prot_sect) 402 mem_types[i].prot_sect |= PMD_BIT4; 403 } 404 } 405 406 cp = &cache_policies[cachepolicy]; 407 user_pgprot = cp->pte; 408 409 /* 410 * ARMv6 and above have extended page tables. 411 */ 412 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { 413 /* 414 * bit 4 becomes XN which we must clear for the 415 * kernel memory mapping. 416 */ 417 mem_types[MT_MEMORY].prot_sect &= ~PMD_BIT4; 418 mem_types[MT_ROM].prot_sect &= ~PMD_BIT4; 419 /* 420 * Mark cache clean areas and XIP ROM read only 421 * from SVC mode and no access from userspace. 422 */ 423 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 424 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 425 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 426 427 /* 428 * Mark the device area as "shared device" 429 */ 430 mem_types[MT_DEVICE].prot_pte |= L_PTE_BUFFERABLE; 431 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED; 432 433 /* 434 * User pages need to be mapped with the ASID 435 * (iow, non-global) 436 */ 437 user_pgprot |= L_PTE_ASID; 438 } 439 440 if (cpu_arch >= CPU_ARCH_ARMv5) { 441 mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE; 442 mem_types[MT_HIGH_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE; 443 } else { 444 mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte; 445 mem_types[MT_HIGH_VECTORS].prot_pte |= cp->pte; 446 mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1); 447 } 448 449 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; 450 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; 451 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; 452 mem_types[MT_ROM].prot_sect |= cp->pmd; 453 454 for (i = 0; i < 16; i++) { 455 unsigned long v = pgprot_val(protection_map[i]); 456 v = (v & ~(PTE_BUFFERABLE|PTE_CACHEABLE)) | user_pgprot; 457 protection_map[i] = __pgprot(v); 458 } 459 460 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | 461 L_PTE_DIRTY | L_PTE_WRITE | 462 L_PTE_EXEC | cp->pte); 463 464 switch (cp->pmd) { 465 case PMD_SECT_WT: 466 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT; 467 break; 468 case PMD_SECT_WB: 469 case PMD_SECT_WBWA: 470 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB; 471 break; 472 } 473 printk("Memory policy: ECC %sabled, Data cache %s\n", 474 ecc_mask ? "en" : "dis", cp->policy); 475} 476 477#define vectors_base() (vectors_high() ? 0xffff0000 : 0) 478 479/* 480 * Create the page directory entries and any necessary 481 * page tables for the mapping specified by `md'. We 482 * are able to cope here with varying sizes and address 483 * offsets, and we take full advantage of sections and 484 * supersections. 485 */ 486static void __init create_mapping(struct map_desc *md) 487{ 488 unsigned long virt, length; 489 int prot_sect, prot_l1, domain; 490 pgprot_t prot_pte; 491 long off; 492 493 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { 494 printk(KERN_WARNING "BUG: not creating mapping for " 495 "0x%08lx at 0x%08lx in user region\n", 496 md->physical, md->virtual); 497 return; 498 } 499 500 if ((md->type == MT_DEVICE || md->type == MT_ROM) && 501 md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { 502 printk(KERN_WARNING "BUG: mapping for 0x%08lx at 0x%08lx " 503 "overlaps vmalloc space\n", 504 md->physical, md->virtual); 505 } 506 507 domain = mem_types[md->type].domain; 508 prot_pte = __pgprot(mem_types[md->type].prot_pte); 509 prot_l1 = mem_types[md->type].prot_l1 | PMD_DOMAIN(domain); 510 prot_sect = mem_types[md->type].prot_sect | PMD_DOMAIN(domain); 511 512 virt = md->virtual; 513 off = md->physical - virt; 514 length = md->length; 515 516 if (mem_types[md->type].prot_l1 == 0 && 517 (virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) { 518 printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " 519 "be mapped using pages, ignoring.\n", 520 md->physical, md->virtual); 521 return; 522 } 523 524 while ((virt & 0xfffff || (virt + off) & 0xfffff) && length >= PAGE_SIZE) { 525 alloc_init_page(virt, virt + off, prot_l1, prot_pte); 526 527 virt += PAGE_SIZE; 528 length -= PAGE_SIZE; 529 } 530 531 /* N.B. ARMv6 supersections are only defined to work with domain 0. 532 * Since domain assignments can in fact be arbitrary, the 533 * 'domain == 0' check below is required to insure that ARMv6 534 * supersections are only allocated for domain 0 regardless 535 * of the actual domain assignments in use. 536 */ 537 if (cpu_architecture() >= CPU_ARCH_ARMv6 && domain == 0) { 538 /* Align to supersection boundary */ 539 while ((virt & ~SUPERSECTION_MASK || (virt + off) & 540 ~SUPERSECTION_MASK) && length >= (PGDIR_SIZE / 2)) { 541 alloc_init_section(virt, virt + off, prot_sect); 542 543 virt += (PGDIR_SIZE / 2); 544 length -= (PGDIR_SIZE / 2); 545 } 546 547 while (length >= SUPERSECTION_SIZE) { 548 alloc_init_supersection(virt, virt + off, prot_sect); 549 550 virt += SUPERSECTION_SIZE; 551 length -= SUPERSECTION_SIZE; 552 } 553 } 554 555 /* 556 * A section mapping covers half a "pgdir" entry. 557 */ 558 while (length >= (PGDIR_SIZE / 2)) { 559 alloc_init_section(virt, virt + off, prot_sect); 560 561 virt += (PGDIR_SIZE / 2); 562 length -= (PGDIR_SIZE / 2); 563 } 564 565 while (length >= PAGE_SIZE) { 566 alloc_init_page(virt, virt + off, prot_l1, prot_pte); 567 568 virt += PAGE_SIZE; 569 length -= PAGE_SIZE; 570 } 571} 572 573/* 574 * In order to soft-boot, we need to insert a 1:1 mapping in place of 575 * the user-mode pages. This will then ensure that we have predictable 576 * results when turning the mmu off 577 */ 578void setup_mm_for_reboot(char mode) 579{ 580 unsigned long base_pmdval; 581 pgd_t *pgd; 582 int i; 583 584 if (current->mm && current->mm->pgd) 585 pgd = current->mm->pgd; 586 else 587 pgd = init_mm.pgd; 588 589 base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; 590 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ) 591 base_pmdval |= PMD_BIT4; 592 593 for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) { 594 unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval; 595 pmd_t *pmd; 596 597 pmd = pmd_off(pgd, i << PGDIR_SHIFT); 598 pmd[0] = __pmd(pmdval); 599 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1))); 600 flush_pmd_entry(pmd); 601 } 602} 603 604extern void _stext, _etext; 605 606/* 607 * Setup initial mappings. We use the page we allocated for zero page to hold 608 * the mappings, which will get overwritten by the vectors in traps_init(). 609 * The mappings must be in virtual address order. 610 */ 611void __init memtable_init(struct meminfo *mi) 612{ 613 struct map_desc *init_maps, *p, *q; 614 unsigned long address = 0; 615 int i; 616 617 build_mem_type_table(); 618 619 init_maps = p = alloc_bootmem_low_pages(PAGE_SIZE); 620 621#ifdef CONFIG_XIP_KERNEL 622 p->physical = CONFIG_XIP_PHYS_ADDR & PMD_MASK; 623 p->virtual = (unsigned long)&_stext & PMD_MASK; 624 p->length = ((unsigned long)&_etext - p->virtual + ~PMD_MASK) & PMD_MASK; 625 p->type = MT_ROM; 626 p ++; 627#endif 628 629 for (i = 0; i < mi->nr_banks; i++) { 630 if (mi->bank[i].size == 0) 631 continue; 632 633 p->physical = mi->bank[i].start; 634 p->virtual = __phys_to_virt(p->physical); 635 p->length = mi->bank[i].size; 636 p->type = MT_MEMORY; 637 p ++; 638 } 639 640#ifdef FLUSH_BASE 641 p->physical = FLUSH_BASE_PHYS; 642 p->virtual = FLUSH_BASE; 643 p->length = PGDIR_SIZE; 644 p->type = MT_CACHECLEAN; 645 p ++; 646#endif 647 648#ifdef FLUSH_BASE_MINICACHE 649 p->physical = FLUSH_BASE_PHYS + PGDIR_SIZE; 650 p->virtual = FLUSH_BASE_MINICACHE; 651 p->length = PGDIR_SIZE; 652 p->type = MT_MINICLEAN; 653 p ++; 654#endif 655 656 /* 657 * Go through the initial mappings, but clear out any 658 * pgdir entries that are not in the description. 659 */ 660 q = init_maps; 661 do { 662 if (address < q->virtual || q == p) { 663 clear_mapping(address); 664 address += PGDIR_SIZE; 665 } else { 666 create_mapping(q); 667 668 address = q->virtual + q->length; 669 address = (address + PGDIR_SIZE - 1) & PGDIR_MASK; 670 671 q ++; 672 } 673 } while (address != 0); 674 675 /* 676 * Create a mapping for the machine vectors at the high-vectors 677 * location (0xffff0000). If we aren't using high-vectors, also 678 * create a mapping at the low-vectors virtual address. 679 */ 680 init_maps->physical = virt_to_phys(init_maps); 681 init_maps->virtual = 0xffff0000; 682 init_maps->length = PAGE_SIZE; 683 init_maps->type = MT_HIGH_VECTORS; 684 create_mapping(init_maps); 685 686 if (!vectors_high()) { 687 init_maps->virtual = 0; 688 init_maps->type = MT_LOW_VECTORS; 689 create_mapping(init_maps); 690 } 691 692 flush_cache_all(); 693 local_flush_tlb_all(); 694 695 top_pmd = pmd_off_k(0xffff0000); 696} 697 698/* 699 * Create the architecture specific mappings 700 */ 701void __init iotable_init(struct map_desc *io_desc, int nr) 702{ 703 int i; 704 705 for (i = 0; i < nr; i++) 706 create_mapping(io_desc + i); 707}