Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.4-rc7 1085 lines 32 kB view raw
1/* 2 * Copyright (C) 1995 Linus Torvalds 3 * Copyright 2010 Tilera Corporation. All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation, version 2. 8 * 9 * This program is distributed in the hope that it will be useful, but 10 * WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 12 * NON INFRINGEMENT. See the GNU General Public License for 13 * more details. 14 */ 15 16#include <linux/module.h> 17#include <linux/signal.h> 18#include <linux/sched.h> 19#include <linux/kernel.h> 20#include <linux/errno.h> 21#include <linux/string.h> 22#include <linux/types.h> 23#include <linux/ptrace.h> 24#include <linux/mman.h> 25#include <linux/mm.h> 26#include <linux/hugetlb.h> 27#include <linux/swap.h> 28#include <linux/smp.h> 29#include <linux/init.h> 30#include <linux/highmem.h> 31#include <linux/pagemap.h> 32#include <linux/poison.h> 33#include <linux/bootmem.h> 34#include <linux/slab.h> 35#include <linux/proc_fs.h> 36#include <linux/efi.h> 37#include <linux/memory_hotplug.h> 38#include <linux/uaccess.h> 39#include <asm/mmu_context.h> 40#include <asm/processor.h> 41#include <asm/pgtable.h> 42#include <asm/pgalloc.h> 43#include <asm/dma.h> 44#include <asm/fixmap.h> 45#include <asm/tlb.h> 46#include <asm/tlbflush.h> 47#include <asm/sections.h> 48#include <asm/setup.h> 49#include <asm/homecache.h> 50#include <hv/hypervisor.h> 51#include <arch/chip.h> 52 53#include "migrate.h" 54 55#define clear_pgd(pmdptr) (*(pmdptr) = hv_pte(0)) 56 57#ifndef __tilegx__ 58unsigned long VMALLOC_RESERVE = CONFIG_VMALLOC_RESERVE; 59EXPORT_SYMBOL(VMALLOC_RESERVE); 60#endif 61 62/* Create an L2 page table */ 63static pte_t * __init alloc_pte(void) 64{ 65 return __alloc_bootmem(L2_KERNEL_PGTABLE_SIZE, HV_PAGE_TABLE_ALIGN, 0); 66} 67 68/* 69 * L2 page tables per controller. We allocate these all at once from 70 * the bootmem allocator and store them here. This saves on kernel L2 71 * page table memory, compared to allocating a full 64K page per L2 72 * page table, and also means that in cases where we use huge pages, 73 * we are guaranteed to later be able to shatter those huge pages and 74 * switch to using these page tables instead, without requiring 75 * further allocation. Each l2_ptes[] entry points to the first page 76 * table for the first hugepage-size piece of memory on the 77 * controller; other page tables are just indexed directly, i.e. the 78 * L2 page tables are contiguous in memory for each controller. 79 */ 80static pte_t *l2_ptes[MAX_NUMNODES]; 81static int num_l2_ptes[MAX_NUMNODES]; 82 83static void init_prealloc_ptes(int node, int pages) 84{ 85 BUG_ON(pages & (HV_L2_ENTRIES-1)); 86 if (pages) { 87 num_l2_ptes[node] = pages; 88 l2_ptes[node] = __alloc_bootmem(pages * sizeof(pte_t), 89 HV_PAGE_TABLE_ALIGN, 0); 90 } 91} 92 93pte_t *get_prealloc_pte(unsigned long pfn) 94{ 95 int node = pfn_to_nid(pfn); 96 pfn &= ~(-1UL << (NR_PA_HIGHBIT_SHIFT - PAGE_SHIFT)); 97 BUG_ON(node >= MAX_NUMNODES); 98 BUG_ON(pfn >= num_l2_ptes[node]); 99 return &l2_ptes[node][pfn]; 100} 101 102/* 103 * What caching do we expect pages from the heap to have when 104 * they are allocated during bootup? (Once we've installed the 105 * "real" swapper_pg_dir.) 106 */ 107static int initial_heap_home(void) 108{ 109#if CHIP_HAS_CBOX_HOME_MAP() 110 if (hash_default) 111 return PAGE_HOME_HASH; 112#endif 113 return smp_processor_id(); 114} 115 116/* 117 * Place a pointer to an L2 page table in a middle page 118 * directory entry. 119 */ 120static void __init assign_pte(pmd_t *pmd, pte_t *page_table) 121{ 122 phys_addr_t pa = __pa(page_table); 123 unsigned long l2_ptfn = pa >> HV_LOG2_PAGE_TABLE_ALIGN; 124 pte_t pteval = hv_pte_set_ptfn(__pgprot(_PAGE_TABLE), l2_ptfn); 125 BUG_ON((pa & (HV_PAGE_TABLE_ALIGN-1)) != 0); 126 pteval = pte_set_home(pteval, initial_heap_home()); 127 *(pte_t *)pmd = pteval; 128 if (page_table != (pte_t *)pmd_page_vaddr(*pmd)) 129 BUG(); 130} 131 132#ifdef __tilegx__ 133 134#if HV_L1_SIZE != HV_L2_SIZE 135# error Rework assumption that L1 and L2 page tables are same size. 136#endif 137 138/* Since pmd_t arrays and pte_t arrays are the same size, just use casts. */ 139static inline pmd_t *alloc_pmd(void) 140{ 141 return (pmd_t *)alloc_pte(); 142} 143 144static inline void assign_pmd(pud_t *pud, pmd_t *pmd) 145{ 146 assign_pte((pmd_t *)pud, (pte_t *)pmd); 147} 148 149#endif /* __tilegx__ */ 150 151/* Replace the given pmd with a full PTE table. */ 152void __init shatter_pmd(pmd_t *pmd) 153{ 154 pte_t *pte = get_prealloc_pte(pte_pfn(*(pte_t *)pmd)); 155 assign_pte(pmd, pte); 156} 157 158#ifdef CONFIG_HIGHMEM 159/* 160 * This function initializes a certain range of kernel virtual memory 161 * with new bootmem page tables, everywhere page tables are missing in 162 * the given range. 163 */ 164 165/* 166 * NOTE: The pagetables are allocated contiguous on the physical space 167 * so we can cache the place of the first one and move around without 168 * checking the pgd every time. 169 */ 170static void __init page_table_range_init(unsigned long start, 171 unsigned long end, pgd_t *pgd_base) 172{ 173 pgd_t *pgd; 174 int pgd_idx; 175 unsigned long vaddr; 176 177 vaddr = start; 178 pgd_idx = pgd_index(vaddr); 179 pgd = pgd_base + pgd_idx; 180 181 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { 182 pmd_t *pmd = pmd_offset(pud_offset(pgd, vaddr), vaddr); 183 if (pmd_none(*pmd)) 184 assign_pte(pmd, alloc_pte()); 185 vaddr += PMD_SIZE; 186 } 187} 188#endif /* CONFIG_HIGHMEM */ 189 190 191#if CHIP_HAS_CBOX_HOME_MAP() 192 193static int __initdata ktext_hash = 1; /* .text pages */ 194static int __initdata kdata_hash = 1; /* .data and .bss pages */ 195int __write_once hash_default = 1; /* kernel allocator pages */ 196EXPORT_SYMBOL(hash_default); 197int __write_once kstack_hash = 1; /* if no homecaching, use h4h */ 198#endif /* CHIP_HAS_CBOX_HOME_MAP */ 199 200/* 201 * CPUs to use to for striping the pages of kernel data. If hash-for-home 202 * is available, this is only relevant if kcache_hash sets up the 203 * .data and .bss to be page-homed, and we don't want the default mode 204 * of using the full set of kernel cpus for the striping. 205 */ 206static __initdata struct cpumask kdata_mask; 207static __initdata int kdata_arg_seen; 208 209int __write_once kdata_huge; /* if no homecaching, small pages */ 210 211 212/* Combine a generic pgprot_t with cache home to get a cache-aware pgprot. */ 213static pgprot_t __init construct_pgprot(pgprot_t prot, int home) 214{ 215 prot = pte_set_home(prot, home); 216#if CHIP_HAS_CBOX_HOME_MAP() 217 if (home == PAGE_HOME_IMMUTABLE) { 218 if (ktext_hash) 219 prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_HASH_L3); 220 else 221 prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_NO_L3); 222 } 223#endif 224 return prot; 225} 226 227/* 228 * For a given kernel data VA, how should it be cached? 229 * We return the complete pgprot_t with caching bits set. 230 */ 231static pgprot_t __init init_pgprot(ulong address) 232{ 233 int cpu; 234 unsigned long page; 235 enum { CODE_DELTA = MEM_SV_INTRPT - PAGE_OFFSET }; 236 237#if CHIP_HAS_CBOX_HOME_MAP() 238 /* For kdata=huge, everything is just hash-for-home. */ 239 if (kdata_huge) 240 return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH); 241#endif 242 243 /* We map the aliased pages of permanent text inaccessible. */ 244 if (address < (ulong) _sinittext - CODE_DELTA) 245 return PAGE_NONE; 246 247 /* 248 * We map read-only data non-coherent for performance. We could 249 * use neighborhood caching on TILE64, but it's not clear it's a win. 250 */ 251 if ((address >= (ulong) __start_rodata && 252 address < (ulong) __end_rodata) || 253 address == (ulong) empty_zero_page) { 254 return construct_pgprot(PAGE_KERNEL_RO, PAGE_HOME_IMMUTABLE); 255 } 256 257#ifndef __tilegx__ 258#if !ATOMIC_LOCKS_FOUND_VIA_TABLE() 259 /* Force the atomic_locks[] array page to be hash-for-home. */ 260 if (address == (ulong) atomic_locks) 261 return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH); 262#endif 263#endif 264 265 /* 266 * Everything else that isn't data or bss is heap, so mark it 267 * with the initial heap home (hash-for-home, or this cpu). This 268 * includes any addresses after the loaded image and any address before 269 * _einitdata, since we already captured the case of text before 270 * _sinittext, and __pa(einittext) is approximately __pa(sinitdata). 271 * 272 * All the LOWMEM pages that we mark this way will get their 273 * struct page homecache properly marked later, in set_page_homes(). 274 * The HIGHMEM pages we leave with a default zero for their 275 * homes, but with a zero free_time we don't have to actually 276 * do a flush action the first time we use them, either. 277 */ 278 if (address >= (ulong) _end || address < (ulong) _einitdata) 279 return construct_pgprot(PAGE_KERNEL, initial_heap_home()); 280 281#if CHIP_HAS_CBOX_HOME_MAP() 282 /* Use hash-for-home if requested for data/bss. */ 283 if (kdata_hash) 284 return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH); 285#endif 286 287 /* 288 * Make the w1data homed like heap to start with, to avoid 289 * making it part of the page-striped data area when we're just 290 * going to convert it to read-only soon anyway. 291 */ 292 if (address >= (ulong)__w1data_begin && address < (ulong)__w1data_end) 293 return construct_pgprot(PAGE_KERNEL, initial_heap_home()); 294 295 /* 296 * Otherwise we just hand out consecutive cpus. To avoid 297 * requiring this function to hold state, we just walk forward from 298 * _sdata by PAGE_SIZE, skipping the readonly and init data, to reach 299 * the requested address, while walking cpu home around kdata_mask. 300 * This is typically no more than a dozen or so iterations. 301 */ 302 page = (((ulong)__w1data_end) + PAGE_SIZE - 1) & PAGE_MASK; 303 BUG_ON(address < page || address >= (ulong)_end); 304 cpu = cpumask_first(&kdata_mask); 305 for (; page < address; page += PAGE_SIZE) { 306 if (page >= (ulong)&init_thread_union && 307 page < (ulong)&init_thread_union + THREAD_SIZE) 308 continue; 309 if (page == (ulong)empty_zero_page) 310 continue; 311#ifndef __tilegx__ 312#if !ATOMIC_LOCKS_FOUND_VIA_TABLE() 313 if (page == (ulong)atomic_locks) 314 continue; 315#endif 316#endif 317 cpu = cpumask_next(cpu, &kdata_mask); 318 if (cpu == NR_CPUS) 319 cpu = cpumask_first(&kdata_mask); 320 } 321 return construct_pgprot(PAGE_KERNEL, cpu); 322} 323 324/* 325 * This function sets up how we cache the kernel text. If we have 326 * hash-for-home support, normally that is used instead (see the 327 * kcache_hash boot flag for more information). But if we end up 328 * using a page-based caching technique, this option sets up the 329 * details of that. In addition, the "ktext=nocache" option may 330 * always be used to disable local caching of text pages, if desired. 331 */ 332 333static int __initdata ktext_arg_seen; 334static int __initdata ktext_small; 335static int __initdata ktext_local; 336static int __initdata ktext_all; 337static int __initdata ktext_nondataplane; 338static int __initdata ktext_nocache; 339static struct cpumask __initdata ktext_mask; 340 341static int __init setup_ktext(char *str) 342{ 343 if (str == NULL) 344 return -EINVAL; 345 346 /* If you have a leading "nocache", turn off ktext caching */ 347 if (strncmp(str, "nocache", 7) == 0) { 348 ktext_nocache = 1; 349 pr_info("ktext: disabling local caching of kernel text\n"); 350 str += 7; 351 if (*str == ',') 352 ++str; 353 if (*str == '\0') 354 return 0; 355 } 356 357 ktext_arg_seen = 1; 358 359 /* Default setting on Tile64: use a huge page */ 360 if (strcmp(str, "huge") == 0) 361 pr_info("ktext: using one huge locally cached page\n"); 362 363 /* Pay TLB cost but get no cache benefit: cache small pages locally */ 364 else if (strcmp(str, "local") == 0) { 365 ktext_small = 1; 366 ktext_local = 1; 367 pr_info("ktext: using small pages with local caching\n"); 368 } 369 370 /* Neighborhood cache ktext pages on all cpus. */ 371 else if (strcmp(str, "all") == 0) { 372 ktext_small = 1; 373 ktext_all = 1; 374 pr_info("ktext: using maximal caching neighborhood\n"); 375 } 376 377 378 /* Neighborhood ktext pages on specified mask */ 379 else if (cpulist_parse(str, &ktext_mask) == 0) { 380 char buf[NR_CPUS * 5]; 381 cpulist_scnprintf(buf, sizeof(buf), &ktext_mask); 382 if (cpumask_weight(&ktext_mask) > 1) { 383 ktext_small = 1; 384 pr_info("ktext: using caching neighborhood %s " 385 "with small pages\n", buf); 386 } else { 387 pr_info("ktext: caching on cpu %s with one huge page\n", 388 buf); 389 } 390 } 391 392 else if (*str) 393 return -EINVAL; 394 395 return 0; 396} 397 398early_param("ktext", setup_ktext); 399 400 401static inline pgprot_t ktext_set_nocache(pgprot_t prot) 402{ 403 if (!ktext_nocache) 404 prot = hv_pte_set_nc(prot); 405#if CHIP_HAS_NC_AND_NOALLOC_BITS() 406 else 407 prot = hv_pte_set_no_alloc_l2(prot); 408#endif 409 return prot; 410} 411 412#ifndef __tilegx__ 413static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va) 414{ 415 return pmd_offset(pud_offset(&pgtables[pgd_index(va)], va), va); 416} 417#else 418static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va) 419{ 420 pud_t *pud = pud_offset(&pgtables[pgd_index(va)], va); 421 if (pud_none(*pud)) 422 assign_pmd(pud, alloc_pmd()); 423 return pmd_offset(pud, va); 424} 425#endif 426 427/* Temporary page table we use for staging. */ 428static pgd_t pgtables[PTRS_PER_PGD] 429 __attribute__((aligned(HV_PAGE_TABLE_ALIGN))); 430 431/* 432 * This maps the physical memory to kernel virtual address space, a total 433 * of max_low_pfn pages, by creating page tables starting from address 434 * PAGE_OFFSET. 435 * 436 * This routine transitions us from using a set of compiled-in large 437 * pages to using some more precise caching, including removing access 438 * to code pages mapped at PAGE_OFFSET (executed only at MEM_SV_START) 439 * marking read-only data as locally cacheable, striping the remaining 440 * .data and .bss across all the available tiles, and removing access 441 * to pages above the top of RAM (thus ensuring a page fault from a bad 442 * virtual address rather than a hypervisor shoot down for accessing 443 * memory outside the assigned limits). 444 */ 445static void __init kernel_physical_mapping_init(pgd_t *pgd_base) 446{ 447 unsigned long address, pfn; 448 pmd_t *pmd; 449 pte_t *pte; 450 int pte_ofs; 451 const struct cpumask *my_cpu_mask = cpumask_of(smp_processor_id()); 452 struct cpumask kstripe_mask; 453 int rc, i; 454 455#if CHIP_HAS_CBOX_HOME_MAP() 456 if (ktext_arg_seen && ktext_hash) { 457 pr_warning("warning: \"ktext\" boot argument ignored" 458 " if \"kcache_hash\" sets up text hash-for-home\n"); 459 ktext_small = 0; 460 } 461 462 if (kdata_arg_seen && kdata_hash) { 463 pr_warning("warning: \"kdata\" boot argument ignored" 464 " if \"kcache_hash\" sets up data hash-for-home\n"); 465 } 466 467 if (kdata_huge && !hash_default) { 468 pr_warning("warning: disabling \"kdata=huge\"; requires" 469 " kcache_hash=all or =allbutstack\n"); 470 kdata_huge = 0; 471 } 472#endif 473 474 /* 475 * Set up a mask for cpus to use for kernel striping. 476 * This is normally all cpus, but minus dataplane cpus if any. 477 * If the dataplane covers the whole chip, we stripe over 478 * the whole chip too. 479 */ 480 cpumask_copy(&kstripe_mask, cpu_possible_mask); 481 if (!kdata_arg_seen) 482 kdata_mask = kstripe_mask; 483 484 /* Allocate and fill in L2 page tables */ 485 for (i = 0; i < MAX_NUMNODES; ++i) { 486#ifdef CONFIG_HIGHMEM 487 unsigned long end_pfn = node_lowmem_end_pfn[i]; 488#else 489 unsigned long end_pfn = node_end_pfn[i]; 490#endif 491 unsigned long end_huge_pfn = 0; 492 493 /* Pre-shatter the last huge page to allow per-cpu pages. */ 494 if (kdata_huge) 495 end_huge_pfn = end_pfn - (HPAGE_SIZE >> PAGE_SHIFT); 496 497 pfn = node_start_pfn[i]; 498 499 /* Allocate enough memory to hold L2 page tables for node. */ 500 init_prealloc_ptes(i, end_pfn - pfn); 501 502 address = (unsigned long) pfn_to_kaddr(pfn); 503 while (pfn < end_pfn) { 504 BUG_ON(address & (HPAGE_SIZE-1)); 505 pmd = get_pmd(pgtables, address); 506 pte = get_prealloc_pte(pfn); 507 if (pfn < end_huge_pfn) { 508 pgprot_t prot = init_pgprot(address); 509 *(pte_t *)pmd = pte_mkhuge(pfn_pte(pfn, prot)); 510 for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE; 511 pfn++, pte_ofs++, address += PAGE_SIZE) 512 pte[pte_ofs] = pfn_pte(pfn, prot); 513 } else { 514 if (kdata_huge) 515 printk(KERN_DEBUG "pre-shattered huge" 516 " page at %#lx\n", address); 517 for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE; 518 pfn++, pte_ofs++, address += PAGE_SIZE) { 519 pgprot_t prot = init_pgprot(address); 520 pte[pte_ofs] = pfn_pte(pfn, prot); 521 } 522 assign_pte(pmd, pte); 523 } 524 } 525 } 526 527 /* 528 * Set or check ktext_map now that we have cpu_possible_mask 529 * and kstripe_mask to work with. 530 */ 531 if (ktext_all) 532 cpumask_copy(&ktext_mask, cpu_possible_mask); 533 else if (ktext_nondataplane) 534 ktext_mask = kstripe_mask; 535 else if (!cpumask_empty(&ktext_mask)) { 536 /* Sanity-check any mask that was requested */ 537 struct cpumask bad; 538 cpumask_andnot(&bad, &ktext_mask, cpu_possible_mask); 539 cpumask_and(&ktext_mask, &ktext_mask, cpu_possible_mask); 540 if (!cpumask_empty(&bad)) { 541 char buf[NR_CPUS * 5]; 542 cpulist_scnprintf(buf, sizeof(buf), &bad); 543 pr_info("ktext: not using unavailable cpus %s\n", buf); 544 } 545 if (cpumask_empty(&ktext_mask)) { 546 pr_warning("ktext: no valid cpus; caching on %d.\n", 547 smp_processor_id()); 548 cpumask_copy(&ktext_mask, 549 cpumask_of(smp_processor_id())); 550 } 551 } 552 553 address = MEM_SV_INTRPT; 554 pmd = get_pmd(pgtables, address); 555 pfn = 0; /* code starts at PA 0 */ 556 if (ktext_small) { 557 /* Allocate an L2 PTE for the kernel text */ 558 int cpu = 0; 559 pgprot_t prot = construct_pgprot(PAGE_KERNEL_EXEC, 560 PAGE_HOME_IMMUTABLE); 561 562 if (ktext_local) { 563 if (ktext_nocache) 564 prot = hv_pte_set_mode(prot, 565 HV_PTE_MODE_UNCACHED); 566 else 567 prot = hv_pte_set_mode(prot, 568 HV_PTE_MODE_CACHE_NO_L3); 569 } else { 570 prot = hv_pte_set_mode(prot, 571 HV_PTE_MODE_CACHE_TILE_L3); 572 cpu = cpumask_first(&ktext_mask); 573 574 prot = ktext_set_nocache(prot); 575 } 576 577 BUG_ON(address != (unsigned long)_stext); 578 pte = NULL; 579 for (; address < (unsigned long)_einittext; 580 pfn++, address += PAGE_SIZE) { 581 pte_ofs = pte_index(address); 582 if (pte_ofs == 0) { 583 if (pte) 584 assign_pte(pmd++, pte); 585 pte = alloc_pte(); 586 } 587 if (!ktext_local) { 588 prot = set_remote_cache_cpu(prot, cpu); 589 cpu = cpumask_next(cpu, &ktext_mask); 590 if (cpu == NR_CPUS) 591 cpu = cpumask_first(&ktext_mask); 592 } 593 pte[pte_ofs] = pfn_pte(pfn, prot); 594 } 595 if (pte) 596 assign_pte(pmd, pte); 597 } else { 598 pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC); 599 pteval = pte_mkhuge(pteval); 600#if CHIP_HAS_CBOX_HOME_MAP() 601 if (ktext_hash) { 602 pteval = hv_pte_set_mode(pteval, 603 HV_PTE_MODE_CACHE_HASH_L3); 604 pteval = ktext_set_nocache(pteval); 605 } else 606#endif /* CHIP_HAS_CBOX_HOME_MAP() */ 607 if (cpumask_weight(&ktext_mask) == 1) { 608 pteval = set_remote_cache_cpu(pteval, 609 cpumask_first(&ktext_mask)); 610 pteval = hv_pte_set_mode(pteval, 611 HV_PTE_MODE_CACHE_TILE_L3); 612 pteval = ktext_set_nocache(pteval); 613 } else if (ktext_nocache) 614 pteval = hv_pte_set_mode(pteval, 615 HV_PTE_MODE_UNCACHED); 616 else 617 pteval = hv_pte_set_mode(pteval, 618 HV_PTE_MODE_CACHE_NO_L3); 619 for (; address < (unsigned long)_einittext; 620 pfn += PFN_DOWN(HPAGE_SIZE), address += HPAGE_SIZE) 621 *(pte_t *)(pmd++) = pfn_pte(pfn, pteval); 622 } 623 624 /* Set swapper_pgprot here so it is flushed to memory right away. */ 625 swapper_pgprot = init_pgprot((unsigned long)swapper_pg_dir); 626 627 /* 628 * Since we may be changing the caching of the stack and page 629 * table itself, we invoke an assembly helper to do the 630 * following steps: 631 * 632 * - flush the cache so we start with an empty slate 633 * - install pgtables[] as the real page table 634 * - flush the TLB so the new page table takes effect 635 */ 636 rc = flush_and_install_context(__pa(pgtables), 637 init_pgprot((unsigned long)pgtables), 638 __get_cpu_var(current_asid), 639 cpumask_bits(my_cpu_mask)); 640 BUG_ON(rc != 0); 641 642 /* Copy the page table back to the normal swapper_pg_dir. */ 643 memcpy(pgd_base, pgtables, sizeof(pgtables)); 644 __install_page_table(pgd_base, __get_cpu_var(current_asid), 645 swapper_pgprot); 646 647 /* 648 * We just read swapper_pgprot and thus brought it into the cache, 649 * with its new home & caching mode. When we start the other CPUs, 650 * they're going to reference swapper_pgprot via their initial fake 651 * VA-is-PA mappings, which cache everything locally. At that 652 * time, if it's in our cache with a conflicting home, the 653 * simulator's coherence checker will complain. So, flush it out 654 * of our cache; we're not going to ever use it again anyway. 655 */ 656 __insn_finv(&swapper_pgprot); 657} 658 659/* 660 * devmem_is_allowed() checks to see if /dev/mem access to a certain address 661 * is valid. The argument is a physical page number. 662 * 663 * On Tile, the only valid things for which we can just hand out unchecked 664 * PTEs are the kernel code and data. Anything else might change its 665 * homing with time, and we wouldn't know to adjust the /dev/mem PTEs. 666 * Note that init_thread_union is released to heap soon after boot, 667 * so we include it in the init data. 668 * 669 * For TILE-Gx, we might want to consider allowing access to PA 670 * regions corresponding to PCI space, etc. 671 */ 672int devmem_is_allowed(unsigned long pagenr) 673{ 674 return pagenr < kaddr_to_pfn(_end) && 675 !(pagenr >= kaddr_to_pfn(&init_thread_union) || 676 pagenr < kaddr_to_pfn(_einitdata)) && 677 !(pagenr >= kaddr_to_pfn(_sinittext) || 678 pagenr <= kaddr_to_pfn(_einittext-1)); 679} 680 681#ifdef CONFIG_HIGHMEM 682static void __init permanent_kmaps_init(pgd_t *pgd_base) 683{ 684 pgd_t *pgd; 685 pud_t *pud; 686 pmd_t *pmd; 687 pte_t *pte; 688 unsigned long vaddr; 689 690 vaddr = PKMAP_BASE; 691 page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); 692 693 pgd = swapper_pg_dir + pgd_index(vaddr); 694 pud = pud_offset(pgd, vaddr); 695 pmd = pmd_offset(pud, vaddr); 696 pte = pte_offset_kernel(pmd, vaddr); 697 pkmap_page_table = pte; 698} 699#endif /* CONFIG_HIGHMEM */ 700 701 702static void __init init_free_pfn_range(unsigned long start, unsigned long end) 703{ 704 unsigned long pfn; 705 struct page *page = pfn_to_page(start); 706 707 for (pfn = start; pfn < end; ) { 708 /* Optimize by freeing pages in large batches */ 709 int order = __ffs(pfn); 710 int count, i; 711 struct page *p; 712 713 if (order >= MAX_ORDER) 714 order = MAX_ORDER-1; 715 count = 1 << order; 716 while (pfn + count > end) { 717 count >>= 1; 718 --order; 719 } 720 for (p = page, i = 0; i < count; ++i, ++p) { 721 __ClearPageReserved(p); 722 /* 723 * Hacky direct set to avoid unnecessary 724 * lock take/release for EVERY page here. 725 */ 726 p->_count.counter = 0; 727 p->_mapcount.counter = -1; 728 } 729 init_page_count(page); 730 __free_pages(page, order); 731 totalram_pages += count; 732 733 page += count; 734 pfn += count; 735 } 736} 737 738static void __init set_non_bootmem_pages_init(void) 739{ 740 struct zone *z; 741 for_each_zone(z) { 742 unsigned long start, end; 743 int nid = z->zone_pgdat->node_id; 744 int idx = zone_idx(z); 745 746 start = z->zone_start_pfn; 747 if (start == 0) 748 continue; /* bootmem */ 749 end = start + z->spanned_pages; 750 if (idx == ZONE_NORMAL) { 751 BUG_ON(start != node_start_pfn[nid]); 752 start = node_free_pfn[nid]; 753 } 754#ifdef CONFIG_HIGHMEM 755 if (idx == ZONE_HIGHMEM) 756 totalhigh_pages += z->spanned_pages; 757#endif 758 if (kdata_huge) { 759 unsigned long percpu_pfn = node_percpu_pfn[nid]; 760 if (start < percpu_pfn && end > percpu_pfn) 761 end = percpu_pfn; 762 } 763#ifdef CONFIG_PCI 764 if (start <= pci_reserve_start_pfn && 765 end > pci_reserve_start_pfn) { 766 if (end > pci_reserve_end_pfn) 767 init_free_pfn_range(pci_reserve_end_pfn, end); 768 end = pci_reserve_start_pfn; 769 } 770#endif 771 init_free_pfn_range(start, end); 772 } 773} 774 775/* 776 * paging_init() sets up the page tables - note that all of lowmem is 777 * already mapped by head.S. 778 */ 779void __init paging_init(void) 780{ 781#ifdef CONFIG_HIGHMEM 782 unsigned long vaddr, end; 783#endif 784#ifdef __tilegx__ 785 pud_t *pud; 786#endif 787 pgd_t *pgd_base = swapper_pg_dir; 788 789 kernel_physical_mapping_init(pgd_base); 790 791#ifdef CONFIG_HIGHMEM 792 /* 793 * Fixed mappings, only the page table structure has to be 794 * created - mappings will be set by set_fixmap(): 795 */ 796 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; 797 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; 798 page_table_range_init(vaddr, end, pgd_base); 799 permanent_kmaps_init(pgd_base); 800#endif 801 802#ifdef __tilegx__ 803 /* 804 * Since GX allocates just one pmd_t array worth of vmalloc space, 805 * we go ahead and allocate it statically here, then share it 806 * globally. As a result we don't have to worry about any task 807 * changing init_mm once we get up and running, and there's no 808 * need for e.g. vmalloc_sync_all(). 809 */ 810 BUILD_BUG_ON(pgd_index(VMALLOC_START) != pgd_index(VMALLOC_END)); 811 pud = pud_offset(pgd_base + pgd_index(VMALLOC_START), VMALLOC_START); 812 assign_pmd(pud, alloc_pmd()); 813#endif 814} 815 816 817/* 818 * Walk the kernel page tables and derive the page_home() from 819 * the PTEs, so that set_pte() can properly validate the caching 820 * of all PTEs it sees. 821 */ 822void __init set_page_homes(void) 823{ 824} 825 826static void __init set_max_mapnr_init(void) 827{ 828#ifdef CONFIG_FLATMEM 829 max_mapnr = max_low_pfn; 830#endif 831} 832 833void __init mem_init(void) 834{ 835 int codesize, datasize, initsize; 836 int i; 837#ifndef __tilegx__ 838 void *last; 839#endif 840 841#ifdef CONFIG_FLATMEM 842 BUG_ON(!mem_map); 843#endif 844 845#ifdef CONFIG_HIGHMEM 846 /* check that fixmap and pkmap do not overlap */ 847 if (PKMAP_ADDR(LAST_PKMAP-1) >= FIXADDR_START) { 848 pr_err("fixmap and kmap areas overlap" 849 " - this will crash\n"); 850 pr_err("pkstart: %lxh pkend: %lxh fixstart %lxh\n", 851 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP-1), 852 FIXADDR_START); 853 BUG(); 854 } 855#endif 856 857 set_max_mapnr_init(); 858 859 /* this will put all bootmem onto the freelists */ 860 totalram_pages += free_all_bootmem(); 861 862 /* count all remaining LOWMEM and give all HIGHMEM to page allocator */ 863 set_non_bootmem_pages_init(); 864 865 codesize = (unsigned long)&_etext - (unsigned long)&_text; 866 datasize = (unsigned long)&_end - (unsigned long)&_sdata; 867 initsize = (unsigned long)&_einittext - (unsigned long)&_sinittext; 868 initsize += (unsigned long)&_einitdata - (unsigned long)&_sinitdata; 869 870 pr_info("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init, %ldk highmem)\n", 871 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), 872 num_physpages << (PAGE_SHIFT-10), 873 codesize >> 10, 874 datasize >> 10, 875 initsize >> 10, 876 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)) 877 ); 878 879 /* 880 * In debug mode, dump some interesting memory mappings. 881 */ 882#ifdef CONFIG_HIGHMEM 883 printk(KERN_DEBUG " KMAP %#lx - %#lx\n", 884 FIXADDR_START, FIXADDR_TOP + PAGE_SIZE - 1); 885 printk(KERN_DEBUG " PKMAP %#lx - %#lx\n", 886 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP) - 1); 887#endif 888#ifdef CONFIG_HUGEVMAP 889 printk(KERN_DEBUG " HUGEMAP %#lx - %#lx\n", 890 HUGE_VMAP_BASE, HUGE_VMAP_END - 1); 891#endif 892 printk(KERN_DEBUG " VMALLOC %#lx - %#lx\n", 893 _VMALLOC_START, _VMALLOC_END - 1); 894#ifdef __tilegx__ 895 for (i = MAX_NUMNODES-1; i >= 0; --i) { 896 struct pglist_data *node = &node_data[i]; 897 if (node->node_present_pages) { 898 unsigned long start = (unsigned long) 899 pfn_to_kaddr(node->node_start_pfn); 900 unsigned long end = start + 901 (node->node_present_pages << PAGE_SHIFT); 902 printk(KERN_DEBUG " MEM%d %#lx - %#lx\n", 903 i, start, end - 1); 904 } 905 } 906#else 907 last = high_memory; 908 for (i = MAX_NUMNODES-1; i >= 0; --i) { 909 if ((unsigned long)vbase_map[i] != -1UL) { 910 printk(KERN_DEBUG " LOWMEM%d %#lx - %#lx\n", 911 i, (unsigned long) (vbase_map[i]), 912 (unsigned long) (last-1)); 913 last = vbase_map[i]; 914 } 915 } 916#endif 917 918#ifndef __tilegx__ 919 /* 920 * Convert from using one lock for all atomic operations to 921 * one per cpu. 922 */ 923 __init_atomic_per_cpu(); 924#endif 925} 926 927/* 928 * this is for the non-NUMA, single node SMP system case. 929 * Specifically, in the case of x86, we will always add 930 * memory to the highmem for now. 931 */ 932#ifndef CONFIG_NEED_MULTIPLE_NODES 933int arch_add_memory(u64 start, u64 size) 934{ 935 struct pglist_data *pgdata = &contig_page_data; 936 struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1; 937 unsigned long start_pfn = start >> PAGE_SHIFT; 938 unsigned long nr_pages = size >> PAGE_SHIFT; 939 940 return __add_pages(zone, start_pfn, nr_pages); 941} 942 943int remove_memory(u64 start, u64 size) 944{ 945 return -EINVAL; 946} 947#endif 948 949struct kmem_cache *pgd_cache; 950 951void __init pgtable_cache_init(void) 952{ 953 pgd_cache = kmem_cache_create("pgd", SIZEOF_PGD, SIZEOF_PGD, 0, NULL); 954 if (!pgd_cache) 955 panic("pgtable_cache_init(): Cannot create pgd cache"); 956} 957 958#if !CHIP_HAS_COHERENT_LOCAL_CACHE() 959/* 960 * The __w1data area holds data that is only written during initialization, 961 * and is read-only and thus freely cacheable thereafter. Fix the page 962 * table entries that cover that region accordingly. 963 */ 964static void mark_w1data_ro(void) 965{ 966 /* Loop over page table entries */ 967 unsigned long addr = (unsigned long)__w1data_begin; 968 BUG_ON((addr & (PAGE_SIZE-1)) != 0); 969 for (; addr <= (unsigned long)__w1data_end - 1; addr += PAGE_SIZE) { 970 unsigned long pfn = kaddr_to_pfn((void *)addr); 971 pte_t *ptep = virt_to_pte(NULL, addr); 972 BUG_ON(pte_huge(*ptep)); /* not relevant for kdata_huge */ 973 set_pte_at(&init_mm, addr, ptep, pfn_pte(pfn, PAGE_KERNEL_RO)); 974 } 975} 976#endif 977 978#ifdef CONFIG_DEBUG_PAGEALLOC 979static long __write_once initfree; 980#else 981static long __write_once initfree = 1; 982#endif 983 984/* Select whether to free (1) or mark unusable (0) the __init pages. */ 985static int __init set_initfree(char *str) 986{ 987 long val; 988 if (strict_strtol(str, 0, &val) == 0) { 989 initfree = val; 990 pr_info("initfree: %s free init pages\n", 991 initfree ? "will" : "won't"); 992 } 993 return 1; 994} 995__setup("initfree=", set_initfree); 996 997static void free_init_pages(char *what, unsigned long begin, unsigned long end) 998{ 999 unsigned long addr = (unsigned long) begin; 1000 1001 if (kdata_huge && !initfree) { 1002 pr_warning("Warning: ignoring initfree=0:" 1003 " incompatible with kdata=huge\n"); 1004 initfree = 1; 1005 } 1006 end = (end + PAGE_SIZE - 1) & PAGE_MASK; 1007 local_flush_tlb_pages(NULL, begin, PAGE_SIZE, end - begin); 1008 for (addr = begin; addr < end; addr += PAGE_SIZE) { 1009 /* 1010 * Note we just reset the home here directly in the 1011 * page table. We know this is safe because our caller 1012 * just flushed the caches on all the other cpus, 1013 * and they won't be touching any of these pages. 1014 */ 1015 int pfn = kaddr_to_pfn((void *)addr); 1016 struct page *page = pfn_to_page(pfn); 1017 pte_t *ptep = virt_to_pte(NULL, addr); 1018 if (!initfree) { 1019 /* 1020 * If debugging page accesses then do not free 1021 * this memory but mark them not present - any 1022 * buggy init-section access will create a 1023 * kernel page fault: 1024 */ 1025 pte_clear(&init_mm, addr, ptep); 1026 continue; 1027 } 1028 __ClearPageReserved(page); 1029 init_page_count(page); 1030 if (pte_huge(*ptep)) 1031 BUG_ON(!kdata_huge); 1032 else 1033 set_pte_at(&init_mm, addr, ptep, 1034 pfn_pte(pfn, PAGE_KERNEL)); 1035 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); 1036 free_page(addr); 1037 totalram_pages++; 1038 } 1039 pr_info("Freeing %s: %ldk freed\n", what, (end - begin) >> 10); 1040} 1041 1042void free_initmem(void) 1043{ 1044 const unsigned long text_delta = MEM_SV_INTRPT - PAGE_OFFSET; 1045 1046 /* 1047 * Evict the dirty initdata on the boot cpu, evict the w1data 1048 * wherever it's homed, and evict all the init code everywhere. 1049 * We are guaranteed that no one will touch the init pages any 1050 * more, and although other cpus may be touching the w1data, 1051 * we only actually change the caching on tile64, which won't 1052 * be keeping local copies in the other tiles' caches anyway. 1053 */ 1054 homecache_evict(&cpu_cacheable_map); 1055 1056 /* Free the data pages that we won't use again after init. */ 1057 free_init_pages("unused kernel data", 1058 (unsigned long)_sinitdata, 1059 (unsigned long)_einitdata); 1060 1061 /* 1062 * Free the pages mapped from 0xc0000000 that correspond to code 1063 * pages from MEM_SV_INTRPT that we won't use again after init. 1064 */ 1065 free_init_pages("unused kernel text", 1066 (unsigned long)_sinittext - text_delta, 1067 (unsigned long)_einittext - text_delta); 1068 1069#if !CHIP_HAS_COHERENT_LOCAL_CACHE() 1070 /* 1071 * Upgrade the .w1data section to globally cached. 1072 * We don't do this on tilepro, since the cache architecture 1073 * pretty much makes it irrelevant, and in any case we end 1074 * up having racing issues with other tiles that may touch 1075 * the data after we flush the cache but before we update 1076 * the PTEs and flush the TLBs, causing sharer shootdowns 1077 * later. Even though this is to clean data, it seems like 1078 * an unnecessary complication. 1079 */ 1080 mark_w1data_ro(); 1081#endif 1082 1083 /* Do a global TLB flush so everyone sees the changes. */ 1084 flush_tlb_all(); 1085}