Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.36-rc7 1088 lines 30 kB view raw
1/* 2 * linux/arch/arm/mm/mmu.c 3 * 4 * Copyright (C) 1995-2005 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10#include <linux/module.h> 11#include <linux/kernel.h> 12#include <linux/errno.h> 13#include <linux/init.h> 14#include <linux/mman.h> 15#include <linux/nodemask.h> 16#include <linux/memblock.h> 17#include <linux/sort.h> 18#include <linux/fs.h> 19 20#include <asm/cputype.h> 21#include <asm/sections.h> 22#include <asm/cachetype.h> 23#include <asm/setup.h> 24#include <asm/sizes.h> 25#include <asm/smp_plat.h> 26#include <asm/tlb.h> 27#include <asm/highmem.h> 28 29#include <asm/mach/arch.h> 30#include <asm/mach/map.h> 31 32#include "mm.h" 33 34DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 35 36/* 37 * empty_zero_page is a special page that is used for 38 * zero-initialized data and COW. 39 */ 40struct page *empty_zero_page; 41EXPORT_SYMBOL(empty_zero_page); 42 43/* 44 * The pmd table for the upper-most set of pages. 45 */ 46pmd_t *top_pmd; 47 48#define CPOLICY_UNCACHED 0 49#define CPOLICY_BUFFERED 1 50#define CPOLICY_WRITETHROUGH 2 51#define CPOLICY_WRITEBACK 3 52#define CPOLICY_WRITEALLOC 4 53 54static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK; 55static unsigned int ecc_mask __initdata = 0; 56pgprot_t pgprot_user; 57pgprot_t pgprot_kernel; 58 59EXPORT_SYMBOL(pgprot_user); 60EXPORT_SYMBOL(pgprot_kernel); 61 62struct cachepolicy { 63 const char policy[16]; 64 unsigned int cr_mask; 65 unsigned int pmd; 66 unsigned int pte; 67}; 68 69static struct cachepolicy cache_policies[] __initdata = { 70 { 71 .policy = "uncached", 72 .cr_mask = CR_W|CR_C, 73 .pmd = PMD_SECT_UNCACHED, 74 .pte = L_PTE_MT_UNCACHED, 75 }, { 76 .policy = "buffered", 77 .cr_mask = CR_C, 78 .pmd = PMD_SECT_BUFFERED, 79 .pte = L_PTE_MT_BUFFERABLE, 80 }, { 81 .policy = "writethrough", 82 .cr_mask = 0, 83 .pmd = PMD_SECT_WT, 84 .pte = L_PTE_MT_WRITETHROUGH, 85 }, { 86 .policy = "writeback", 87 .cr_mask = 0, 88 .pmd = PMD_SECT_WB, 89 .pte = L_PTE_MT_WRITEBACK, 90 }, { 91 .policy = "writealloc", 92 .cr_mask = 0, 93 .pmd = PMD_SECT_WBWA, 94 .pte = L_PTE_MT_WRITEALLOC, 95 } 96}; 97 98/* 99 * These are useful for identifying cache coherency 100 * problems by allowing the cache or the cache and 101 * writebuffer to be turned off. (Note: the write 102 * buffer should not be on and the cache off). 103 */ 104static int __init early_cachepolicy(char *p) 105{ 106 int i; 107 108 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { 109 int len = strlen(cache_policies[i].policy); 110 111 if (memcmp(p, cache_policies[i].policy, len) == 0) { 112 cachepolicy = i; 113 cr_alignment &= ~cache_policies[i].cr_mask; 114 cr_no_alignment &= ~cache_policies[i].cr_mask; 115 break; 116 } 117 } 118 if (i == ARRAY_SIZE(cache_policies)) 119 printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n"); 120 /* 121 * This restriction is partly to do with the way we boot; it is 122 * unpredictable to have memory mapped using two different sets of 123 * memory attributes (shared, type, and cache attribs). We can not 124 * change these attributes once the initial assembly has setup the 125 * page tables. 126 */ 127 if (cpu_architecture() >= CPU_ARCH_ARMv6) { 128 printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n"); 129 cachepolicy = CPOLICY_WRITEBACK; 130 } 131 flush_cache_all(); 132 set_cr(cr_alignment); 133 return 0; 134} 135early_param("cachepolicy", early_cachepolicy); 136 137static int __init early_nocache(char *__unused) 138{ 139 char *p = "buffered"; 140 printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p); 141 early_cachepolicy(p); 142 return 0; 143} 144early_param("nocache", early_nocache); 145 146static int __init early_nowrite(char *__unused) 147{ 148 char *p = "uncached"; 149 printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p); 150 early_cachepolicy(p); 151 return 0; 152} 153early_param("nowb", early_nowrite); 154 155static int __init early_ecc(char *p) 156{ 157 if (memcmp(p, "on", 2) == 0) 158 ecc_mask = PMD_PROTECTION; 159 else if (memcmp(p, "off", 3) == 0) 160 ecc_mask = 0; 161 return 0; 162} 163early_param("ecc", early_ecc); 164 165static int __init noalign_setup(char *__unused) 166{ 167 cr_alignment &= ~CR_A; 168 cr_no_alignment &= ~CR_A; 169 set_cr(cr_alignment); 170 return 1; 171} 172__setup("noalign", noalign_setup); 173 174#ifndef CONFIG_SMP 175void adjust_cr(unsigned long mask, unsigned long set) 176{ 177 unsigned long flags; 178 179 mask &= ~CR_A; 180 181 set &= mask; 182 183 local_irq_save(flags); 184 185 cr_no_alignment = (cr_no_alignment & ~mask) | set; 186 cr_alignment = (cr_alignment & ~mask) | set; 187 188 set_cr((get_cr() & ~mask) | set); 189 190 local_irq_restore(flags); 191} 192#endif 193 194#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE 195#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE 196 197static struct mem_type mem_types[] = { 198 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ 199 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | 200 L_PTE_SHARED, 201 .prot_l1 = PMD_TYPE_TABLE, 202 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S, 203 .domain = DOMAIN_IO, 204 }, 205 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */ 206 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED, 207 .prot_l1 = PMD_TYPE_TABLE, 208 .prot_sect = PROT_SECT_DEVICE, 209 .domain = DOMAIN_IO, 210 }, 211 [MT_DEVICE_CACHED] = { /* ioremap_cached */ 212 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED, 213 .prot_l1 = PMD_TYPE_TABLE, 214 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB, 215 .domain = DOMAIN_IO, 216 }, 217 [MT_DEVICE_WC] = { /* ioremap_wc */ 218 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC, 219 .prot_l1 = PMD_TYPE_TABLE, 220 .prot_sect = PROT_SECT_DEVICE, 221 .domain = DOMAIN_IO, 222 }, 223 [MT_UNCACHED] = { 224 .prot_pte = PROT_PTE_DEVICE, 225 .prot_l1 = PMD_TYPE_TABLE, 226 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, 227 .domain = DOMAIN_IO, 228 }, 229 [MT_CACHECLEAN] = { 230 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, 231 .domain = DOMAIN_KERNEL, 232 }, 233 [MT_MINICLEAN] = { 234 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE, 235 .domain = DOMAIN_KERNEL, 236 }, 237 [MT_LOW_VECTORS] = { 238 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 239 L_PTE_EXEC, 240 .prot_l1 = PMD_TYPE_TABLE, 241 .domain = DOMAIN_USER, 242 }, 243 [MT_HIGH_VECTORS] = { 244 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 245 L_PTE_USER | L_PTE_EXEC, 246 .prot_l1 = PMD_TYPE_TABLE, 247 .domain = DOMAIN_USER, 248 }, 249 [MT_MEMORY] = { 250 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 251 L_PTE_USER | L_PTE_EXEC, 252 .prot_l1 = PMD_TYPE_TABLE, 253 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 254 .domain = DOMAIN_KERNEL, 255 }, 256 [MT_ROM] = { 257 .prot_sect = PMD_TYPE_SECT, 258 .domain = DOMAIN_KERNEL, 259 }, 260 [MT_MEMORY_NONCACHED] = { 261 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 262 L_PTE_USER | L_PTE_EXEC | L_PTE_MT_BUFFERABLE, 263 .prot_l1 = PMD_TYPE_TABLE, 264 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 265 .domain = DOMAIN_KERNEL, 266 }, 267 [MT_MEMORY_DTCM] = { 268 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | 269 L_PTE_DIRTY | L_PTE_WRITE, 270 .prot_l1 = PMD_TYPE_TABLE, 271 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, 272 .domain = DOMAIN_KERNEL, 273 }, 274 [MT_MEMORY_ITCM] = { 275 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 276 L_PTE_USER | L_PTE_EXEC, 277 .prot_l1 = PMD_TYPE_TABLE, 278 .domain = DOMAIN_IO, 279 }, 280}; 281 282const struct mem_type *get_mem_type(unsigned int type) 283{ 284 return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL; 285} 286EXPORT_SYMBOL(get_mem_type); 287 288/* 289 * Adjust the PMD section entries according to the CPU in use. 290 */ 291static void __init build_mem_type_table(void) 292{ 293 struct cachepolicy *cp; 294 unsigned int cr = get_cr(); 295 unsigned int user_pgprot, kern_pgprot, vecs_pgprot; 296 int cpu_arch = cpu_architecture(); 297 int i; 298 299 if (cpu_arch < CPU_ARCH_ARMv6) { 300#if defined(CONFIG_CPU_DCACHE_DISABLE) 301 if (cachepolicy > CPOLICY_BUFFERED) 302 cachepolicy = CPOLICY_BUFFERED; 303#elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH) 304 if (cachepolicy > CPOLICY_WRITETHROUGH) 305 cachepolicy = CPOLICY_WRITETHROUGH; 306#endif 307 } 308 if (cpu_arch < CPU_ARCH_ARMv5) { 309 if (cachepolicy >= CPOLICY_WRITEALLOC) 310 cachepolicy = CPOLICY_WRITEBACK; 311 ecc_mask = 0; 312 } 313#ifdef CONFIG_SMP 314 cachepolicy = CPOLICY_WRITEALLOC; 315#endif 316 317 /* 318 * Strip out features not present on earlier architectures. 319 * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those 320 * without extended page tables don't have the 'Shared' bit. 321 */ 322 if (cpu_arch < CPU_ARCH_ARMv5) 323 for (i = 0; i < ARRAY_SIZE(mem_types); i++) 324 mem_types[i].prot_sect &= ~PMD_SECT_TEX(7); 325 if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3()) 326 for (i = 0; i < ARRAY_SIZE(mem_types); i++) 327 mem_types[i].prot_sect &= ~PMD_SECT_S; 328 329 /* 330 * ARMv5 and lower, bit 4 must be set for page tables (was: cache 331 * "update-able on write" bit on ARM610). However, Xscale and 332 * Xscale3 require this bit to be cleared. 333 */ 334 if (cpu_is_xscale() || cpu_is_xsc3()) { 335 for (i = 0; i < ARRAY_SIZE(mem_types); i++) { 336 mem_types[i].prot_sect &= ~PMD_BIT4; 337 mem_types[i].prot_l1 &= ~PMD_BIT4; 338 } 339 } else if (cpu_arch < CPU_ARCH_ARMv6) { 340 for (i = 0; i < ARRAY_SIZE(mem_types); i++) { 341 if (mem_types[i].prot_l1) 342 mem_types[i].prot_l1 |= PMD_BIT4; 343 if (mem_types[i].prot_sect) 344 mem_types[i].prot_sect |= PMD_BIT4; 345 } 346 } 347 348 /* 349 * Mark the device areas according to the CPU/architecture. 350 */ 351 if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) { 352 if (!cpu_is_xsc3()) { 353 /* 354 * Mark device regions on ARMv6+ as execute-never 355 * to prevent speculative instruction fetches. 356 */ 357 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN; 358 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN; 359 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN; 360 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN; 361 } 362 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { 363 /* 364 * For ARMv7 with TEX remapping, 365 * - shared device is SXCB=1100 366 * - nonshared device is SXCB=0100 367 * - write combine device mem is SXCB=0001 368 * (Uncached Normal memory) 369 */ 370 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1); 371 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1); 372 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE; 373 } else if (cpu_is_xsc3()) { 374 /* 375 * For Xscale3, 376 * - shared device is TEXCB=00101 377 * - nonshared device is TEXCB=01000 378 * - write combine device mem is TEXCB=00100 379 * (Inner/Outer Uncacheable in xsc3 parlance) 380 */ 381 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED; 382 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2); 383 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); 384 } else { 385 /* 386 * For ARMv6 and ARMv7 without TEX remapping, 387 * - shared device is TEXCB=00001 388 * - nonshared device is TEXCB=01000 389 * - write combine device mem is TEXCB=00100 390 * (Uncached Normal in ARMv6 parlance). 391 */ 392 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED; 393 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2); 394 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); 395 } 396 } else { 397 /* 398 * On others, write combining is "Uncached/Buffered" 399 */ 400 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE; 401 } 402 403 /* 404 * Now deal with the memory-type mappings 405 */ 406 cp = &cache_policies[cachepolicy]; 407 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; 408 409#ifndef CONFIG_SMP 410 /* 411 * Only use write-through for non-SMP systems 412 */ 413 if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH) 414 vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte; 415#endif 416 417 /* 418 * Enable CPU-specific coherency if supported. 419 * (Only available on XSC3 at the moment.) 420 */ 421 if (arch_is_coherent() && cpu_is_xsc3()) { 422 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 423 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; 424 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; 425 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; 426 } 427 /* 428 * ARMv6 and above have extended page tables. 429 */ 430 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { 431 /* 432 * Mark cache clean areas and XIP ROM read only 433 * from SVC mode and no access from userspace. 434 */ 435 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 436 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 437 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 438 439#ifdef CONFIG_SMP 440 /* 441 * Mark memory with the "shared" attribute for SMP systems 442 */ 443 user_pgprot |= L_PTE_SHARED; 444 kern_pgprot |= L_PTE_SHARED; 445 vecs_pgprot |= L_PTE_SHARED; 446 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S; 447 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; 448 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; 449 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; 450 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 451 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; 452 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; 453 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; 454#endif 455 } 456 457 /* 458 * Non-cacheable Normal - intended for memory areas that must 459 * not cause dirty cache line writebacks when used 460 */ 461 if (cpu_arch >= CPU_ARCH_ARMv6) { 462 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { 463 /* Non-cacheable Normal is XCB = 001 */ 464 mem_types[MT_MEMORY_NONCACHED].prot_sect |= 465 PMD_SECT_BUFFERED; 466 } else { 467 /* For both ARMv6 and non-TEX-remapping ARMv7 */ 468 mem_types[MT_MEMORY_NONCACHED].prot_sect |= 469 PMD_SECT_TEX(1); 470 } 471 } else { 472 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; 473 } 474 475 for (i = 0; i < 16; i++) { 476 unsigned long v = pgprot_val(protection_map[i]); 477 protection_map[i] = __pgprot(v | user_pgprot); 478 } 479 480 mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot; 481 mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot; 482 483 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); 484 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | 485 L_PTE_DIRTY | L_PTE_WRITE | kern_pgprot); 486 487 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; 488 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; 489 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; 490 mem_types[MT_MEMORY].prot_pte |= kern_pgprot; 491 mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask; 492 mem_types[MT_ROM].prot_sect |= cp->pmd; 493 494 switch (cp->pmd) { 495 case PMD_SECT_WT: 496 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT; 497 break; 498 case PMD_SECT_WB: 499 case PMD_SECT_WBWA: 500 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB; 501 break; 502 } 503 printk("Memory policy: ECC %sabled, Data cache %s\n", 504 ecc_mask ? "en" : "dis", cp->policy); 505 506 for (i = 0; i < ARRAY_SIZE(mem_types); i++) { 507 struct mem_type *t = &mem_types[i]; 508 if (t->prot_l1) 509 t->prot_l1 |= PMD_DOMAIN(t->domain); 510 if (t->prot_sect) 511 t->prot_sect |= PMD_DOMAIN(t->domain); 512 } 513} 514 515#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE 516pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 517 unsigned long size, pgprot_t vma_prot) 518{ 519 if (!pfn_valid(pfn)) 520 return pgprot_noncached(vma_prot); 521 else if (file->f_flags & O_SYNC) 522 return pgprot_writecombine(vma_prot); 523 return vma_prot; 524} 525EXPORT_SYMBOL(phys_mem_access_prot); 526#endif 527 528#define vectors_base() (vectors_high() ? 0xffff0000 : 0) 529 530static void __init *early_alloc(unsigned long sz) 531{ 532 void *ptr = __va(memblock_alloc(sz, sz)); 533 memset(ptr, 0, sz); 534 return ptr; 535} 536 537static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot) 538{ 539 if (pmd_none(*pmd)) { 540 pte_t *pte = early_alloc(2 * PTRS_PER_PTE * sizeof(pte_t)); 541 __pmd_populate(pmd, __pa(pte) | prot); 542 } 543 BUG_ON(pmd_bad(*pmd)); 544 return pte_offset_kernel(pmd, addr); 545} 546 547static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, 548 unsigned long end, unsigned long pfn, 549 const struct mem_type *type) 550{ 551 pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1); 552 do { 553 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0); 554 pfn++; 555 } while (pte++, addr += PAGE_SIZE, addr != end); 556} 557 558static void __init alloc_init_section(pgd_t *pgd, unsigned long addr, 559 unsigned long end, unsigned long phys, 560 const struct mem_type *type) 561{ 562 pmd_t *pmd = pmd_offset(pgd, addr); 563 564 /* 565 * Try a section mapping - end, addr and phys must all be aligned 566 * to a section boundary. Note that PMDs refer to the individual 567 * L1 entries, whereas PGDs refer to a group of L1 entries making 568 * up one logical pointer to an L2 table. 569 */ 570 if (((addr | end | phys) & ~SECTION_MASK) == 0) { 571 pmd_t *p = pmd; 572 573 if (addr & SECTION_SIZE) 574 pmd++; 575 576 do { 577 *pmd = __pmd(phys | type->prot_sect); 578 phys += SECTION_SIZE; 579 } while (pmd++, addr += SECTION_SIZE, addr != end); 580 581 flush_pmd_entry(p); 582 } else { 583 /* 584 * No need to loop; pte's aren't interested in the 585 * individual L1 entries. 586 */ 587 alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type); 588 } 589} 590 591static void __init create_36bit_mapping(struct map_desc *md, 592 const struct mem_type *type) 593{ 594 unsigned long phys, addr, length, end; 595 pgd_t *pgd; 596 597 addr = md->virtual; 598 phys = (unsigned long)__pfn_to_phys(md->pfn); 599 length = PAGE_ALIGN(md->length); 600 601 if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) { 602 printk(KERN_ERR "MM: CPU does not support supersection " 603 "mapping for 0x%08llx at 0x%08lx\n", 604 __pfn_to_phys((u64)md->pfn), addr); 605 return; 606 } 607 608 /* N.B. ARMv6 supersections are only defined to work with domain 0. 609 * Since domain assignments can in fact be arbitrary, the 610 * 'domain == 0' check below is required to insure that ARMv6 611 * supersections are only allocated for domain 0 regardless 612 * of the actual domain assignments in use. 613 */ 614 if (type->domain) { 615 printk(KERN_ERR "MM: invalid domain in supersection " 616 "mapping for 0x%08llx at 0x%08lx\n", 617 __pfn_to_phys((u64)md->pfn), addr); 618 return; 619 } 620 621 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) { 622 printk(KERN_ERR "MM: cannot create mapping for " 623 "0x%08llx at 0x%08lx invalid alignment\n", 624 __pfn_to_phys((u64)md->pfn), addr); 625 return; 626 } 627 628 /* 629 * Shift bits [35:32] of address into bits [23:20] of PMD 630 * (See ARMv6 spec). 631 */ 632 phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20); 633 634 pgd = pgd_offset_k(addr); 635 end = addr + length; 636 do { 637 pmd_t *pmd = pmd_offset(pgd, addr); 638 int i; 639 640 for (i = 0; i < 16; i++) 641 *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER); 642 643 addr += SUPERSECTION_SIZE; 644 phys += SUPERSECTION_SIZE; 645 pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT; 646 } while (addr != end); 647} 648 649/* 650 * Create the page directory entries and any necessary 651 * page tables for the mapping specified by `md'. We 652 * are able to cope here with varying sizes and address 653 * offsets, and we take full advantage of sections and 654 * supersections. 655 */ 656static void __init create_mapping(struct map_desc *md) 657{ 658 unsigned long phys, addr, length, end; 659 const struct mem_type *type; 660 pgd_t *pgd; 661 662 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { 663 printk(KERN_WARNING "BUG: not creating mapping for " 664 "0x%08llx at 0x%08lx in user region\n", 665 __pfn_to_phys((u64)md->pfn), md->virtual); 666 return; 667 } 668 669 if ((md->type == MT_DEVICE || md->type == MT_ROM) && 670 md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { 671 printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx " 672 "overlaps vmalloc space\n", 673 __pfn_to_phys((u64)md->pfn), md->virtual); 674 } 675 676 type = &mem_types[md->type]; 677 678 /* 679 * Catch 36-bit addresses 680 */ 681 if (md->pfn >= 0x100000) { 682 create_36bit_mapping(md, type); 683 return; 684 } 685 686 addr = md->virtual & PAGE_MASK; 687 phys = (unsigned long)__pfn_to_phys(md->pfn); 688 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); 689 690 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) { 691 printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " 692 "be mapped using pages, ignoring.\n", 693 __pfn_to_phys(md->pfn), addr); 694 return; 695 } 696 697 pgd = pgd_offset_k(addr); 698 end = addr + length; 699 do { 700 unsigned long next = pgd_addr_end(addr, end); 701 702 alloc_init_section(pgd, addr, next, phys, type); 703 704 phys += next - addr; 705 addr = next; 706 } while (pgd++, addr != end); 707} 708 709/* 710 * Create the architecture specific mappings 711 */ 712void __init iotable_init(struct map_desc *io_desc, int nr) 713{ 714 int i; 715 716 for (i = 0; i < nr; i++) 717 create_mapping(io_desc + i); 718} 719 720static void * __initdata vmalloc_min = (void *)(VMALLOC_END - SZ_128M); 721 722/* 723 * vmalloc=size forces the vmalloc area to be exactly 'size' 724 * bytes. This can be used to increase (or decrease) the vmalloc 725 * area - the default is 128m. 726 */ 727static int __init early_vmalloc(char *arg) 728{ 729 unsigned long vmalloc_reserve = memparse(arg, NULL); 730 731 if (vmalloc_reserve < SZ_16M) { 732 vmalloc_reserve = SZ_16M; 733 printk(KERN_WARNING 734 "vmalloc area too small, limiting to %luMB\n", 735 vmalloc_reserve >> 20); 736 } 737 738 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) { 739 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M); 740 printk(KERN_WARNING 741 "vmalloc area is too big, limiting to %luMB\n", 742 vmalloc_reserve >> 20); 743 } 744 745 vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve); 746 return 0; 747} 748early_param("vmalloc", early_vmalloc); 749 750phys_addr_t lowmem_end_addr; 751 752static void __init sanity_check_meminfo(void) 753{ 754 int i, j, highmem = 0; 755 756 lowmem_end_addr = __pa(vmalloc_min - 1) + 1; 757 758 for (i = 0, j = 0; i < meminfo.nr_banks; i++) { 759 struct membank *bank = &meminfo.bank[j]; 760 *bank = meminfo.bank[i]; 761 762#ifdef CONFIG_HIGHMEM 763 if (__va(bank->start) > vmalloc_min || 764 __va(bank->start) < (void *)PAGE_OFFSET) 765 highmem = 1; 766 767 bank->highmem = highmem; 768 769 /* 770 * Split those memory banks which are partially overlapping 771 * the vmalloc area greatly simplifying things later. 772 */ 773 if (__va(bank->start) < vmalloc_min && 774 bank->size > vmalloc_min - __va(bank->start)) { 775 if (meminfo.nr_banks >= NR_BANKS) { 776 printk(KERN_CRIT "NR_BANKS too low, " 777 "ignoring high memory\n"); 778 } else { 779 memmove(bank + 1, bank, 780 (meminfo.nr_banks - i) * sizeof(*bank)); 781 meminfo.nr_banks++; 782 i++; 783 bank[1].size -= vmalloc_min - __va(bank->start); 784 bank[1].start = __pa(vmalloc_min - 1) + 1; 785 bank[1].highmem = highmem = 1; 786 j++; 787 } 788 bank->size = vmalloc_min - __va(bank->start); 789 } 790#else 791 bank->highmem = highmem; 792 793 /* 794 * Check whether this memory bank would entirely overlap 795 * the vmalloc area. 796 */ 797 if (__va(bank->start) >= vmalloc_min || 798 __va(bank->start) < (void *)PAGE_OFFSET) { 799 printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx " 800 "(vmalloc region overlap).\n", 801 bank->start, bank->start + bank->size - 1); 802 continue; 803 } 804 805 /* 806 * Check whether this memory bank would partially overlap 807 * the vmalloc area. 808 */ 809 if (__va(bank->start + bank->size) > vmalloc_min || 810 __va(bank->start + bank->size) < __va(bank->start)) { 811 unsigned long newsize = vmalloc_min - __va(bank->start); 812 printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx " 813 "to -%.8lx (vmalloc region overlap).\n", 814 bank->start, bank->start + bank->size - 1, 815 bank->start + newsize - 1); 816 bank->size = newsize; 817 } 818#endif 819 j++; 820 } 821#ifdef CONFIG_HIGHMEM 822 if (highmem) { 823 const char *reason = NULL; 824 825 if (cache_is_vipt_aliasing()) { 826 /* 827 * Interactions between kmap and other mappings 828 * make highmem support with aliasing VIPT caches 829 * rather difficult. 830 */ 831 reason = "with VIPT aliasing cache"; 832#ifdef CONFIG_SMP 833 } else if (tlb_ops_need_broadcast()) { 834 /* 835 * kmap_high needs to occasionally flush TLB entries, 836 * however, if the TLB entries need to be broadcast 837 * we may deadlock: 838 * kmap_high(irqs off)->flush_all_zero_pkmaps-> 839 * flush_tlb_kernel_range->smp_call_function_many 840 * (must not be called with irqs off) 841 */ 842 reason = "without hardware TLB ops broadcasting"; 843#endif 844 } 845 if (reason) { 846 printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n", 847 reason); 848 while (j > 0 && meminfo.bank[j - 1].highmem) 849 j--; 850 } 851 } 852#endif 853 meminfo.nr_banks = j; 854} 855 856static inline void prepare_page_table(void) 857{ 858 unsigned long addr; 859 860 /* 861 * Clear out all the mappings below the kernel image. 862 */ 863 for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE) 864 pmd_clear(pmd_off_k(addr)); 865 866#ifdef CONFIG_XIP_KERNEL 867 /* The XIP kernel is mapped in the module area -- skip over it */ 868 addr = ((unsigned long)_etext + PGDIR_SIZE - 1) & PGDIR_MASK; 869#endif 870 for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE) 871 pmd_clear(pmd_off_k(addr)); 872 873 /* 874 * Clear out all the kernel space mappings, except for the first 875 * memory bank, up to the end of the vmalloc region. 876 */ 877 for (addr = __phys_to_virt(bank_phys_end(&meminfo.bank[0])); 878 addr < VMALLOC_END; addr += PGDIR_SIZE) 879 pmd_clear(pmd_off_k(addr)); 880} 881 882/* 883 * Reserve the special regions of memory 884 */ 885void __init arm_mm_memblock_reserve(void) 886{ 887 /* 888 * Reserve the page tables. These are already in use, 889 * and can only be in node 0. 890 */ 891 memblock_reserve(__pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(pgd_t)); 892 893#ifdef CONFIG_SA1111 894 /* 895 * Because of the SA1111 DMA bug, we want to preserve our 896 * precious DMA-able memory... 897 */ 898 memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET); 899#endif 900} 901 902/* 903 * Set up device the mappings. Since we clear out the page tables for all 904 * mappings above VMALLOC_END, we will remove any debug device mappings. 905 * This means you have to be careful how you debug this function, or any 906 * called function. This means you can't use any function or debugging 907 * method which may touch any device, otherwise the kernel _will_ crash. 908 */ 909static void __init devicemaps_init(struct machine_desc *mdesc) 910{ 911 struct map_desc map; 912 unsigned long addr; 913 void *vectors; 914 915 /* 916 * Allocate the vector page early. 917 */ 918 vectors = early_alloc(PAGE_SIZE); 919 920 for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) 921 pmd_clear(pmd_off_k(addr)); 922 923 /* 924 * Map the kernel if it is XIP. 925 * It is always first in the modulearea. 926 */ 927#ifdef CONFIG_XIP_KERNEL 928 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); 929 map.virtual = MODULES_VADDR; 930 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK; 931 map.type = MT_ROM; 932 create_mapping(&map); 933#endif 934 935 /* 936 * Map the cache flushing regions. 937 */ 938#ifdef FLUSH_BASE 939 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS); 940 map.virtual = FLUSH_BASE; 941 map.length = SZ_1M; 942 map.type = MT_CACHECLEAN; 943 create_mapping(&map); 944#endif 945#ifdef FLUSH_BASE_MINICACHE 946 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M); 947 map.virtual = FLUSH_BASE_MINICACHE; 948 map.length = SZ_1M; 949 map.type = MT_MINICLEAN; 950 create_mapping(&map); 951#endif 952 953 /* 954 * Create a mapping for the machine vectors at the high-vectors 955 * location (0xffff0000). If we aren't using high-vectors, also 956 * create a mapping at the low-vectors virtual address. 957 */ 958 map.pfn = __phys_to_pfn(virt_to_phys(vectors)); 959 map.virtual = 0xffff0000; 960 map.length = PAGE_SIZE; 961 map.type = MT_HIGH_VECTORS; 962 create_mapping(&map); 963 964 if (!vectors_high()) { 965 map.virtual = 0; 966 map.type = MT_LOW_VECTORS; 967 create_mapping(&map); 968 } 969 970 /* 971 * Ask the machine support to map in the statically mapped devices. 972 */ 973 if (mdesc->map_io) 974 mdesc->map_io(); 975 976 /* 977 * Finally flush the caches and tlb to ensure that we're in a 978 * consistent state wrt the writebuffer. This also ensures that 979 * any write-allocated cache lines in the vector page are written 980 * back. After this point, we can start to touch devices again. 981 */ 982 local_flush_tlb_all(); 983 flush_cache_all(); 984} 985 986static void __init kmap_init(void) 987{ 988#ifdef CONFIG_HIGHMEM 989 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE), 990 PKMAP_BASE, _PAGE_KERNEL_TABLE); 991#endif 992} 993 994static inline void map_memory_bank(struct membank *bank) 995{ 996 struct map_desc map; 997 998 map.pfn = bank_pfn_start(bank); 999 map.virtual = __phys_to_virt(bank_phys_start(bank)); 1000 map.length = bank_phys_size(bank); 1001 map.type = MT_MEMORY; 1002 1003 create_mapping(&map); 1004} 1005 1006static void __init map_lowmem(void) 1007{ 1008 struct meminfo *mi = &meminfo; 1009 int i; 1010 1011 /* Map all the lowmem memory banks. */ 1012 for (i = 0; i < mi->nr_banks; i++) { 1013 struct membank *bank = &mi->bank[i]; 1014 1015 if (!bank->highmem) 1016 map_memory_bank(bank); 1017 } 1018} 1019 1020static int __init meminfo_cmp(const void *_a, const void *_b) 1021{ 1022 const struct membank *a = _a, *b = _b; 1023 long cmp = bank_pfn_start(a) - bank_pfn_start(b); 1024 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0; 1025} 1026 1027/* 1028 * paging_init() sets up the page tables, initialises the zone memory 1029 * maps, and sets up the zero page, bad page and bad page tables. 1030 */ 1031void __init paging_init(struct machine_desc *mdesc) 1032{ 1033 void *zero_page; 1034 1035 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); 1036 1037 build_mem_type_table(); 1038 sanity_check_meminfo(); 1039 prepare_page_table(); 1040 map_lowmem(); 1041 devicemaps_init(mdesc); 1042 kmap_init(); 1043 1044 top_pmd = pmd_off_k(0xffff0000); 1045 1046 /* allocate the zero page. */ 1047 zero_page = early_alloc(PAGE_SIZE); 1048 1049 bootmem_init(); 1050 1051 empty_zero_page = virt_to_page(zero_page); 1052 __flush_dcache_page(NULL, empty_zero_page); 1053} 1054 1055/* 1056 * In order to soft-boot, we need to insert a 1:1 mapping in place of 1057 * the user-mode pages. This will then ensure that we have predictable 1058 * results when turning the mmu off 1059 */ 1060void setup_mm_for_reboot(char mode) 1061{ 1062 unsigned long base_pmdval; 1063 pgd_t *pgd; 1064 int i; 1065 1066 /* 1067 * We need to access to user-mode page tables here. For kernel threads 1068 * we don't have any user-mode mappings so we use the context that we 1069 * "borrowed". 1070 */ 1071 pgd = current->active_mm->pgd; 1072 1073 base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; 1074 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) 1075 base_pmdval |= PMD_BIT4; 1076 1077 for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) { 1078 unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval; 1079 pmd_t *pmd; 1080 1081 pmd = pmd_off(pgd, i << PGDIR_SHIFT); 1082 pmd[0] = __pmd(pmdval); 1083 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1))); 1084 flush_pmd_entry(pmd); 1085 } 1086 1087 local_flush_tlb_all(); 1088}