Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 *
9 * Derived from "arch/i386/mm/init.c"
10 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
11 *
12 * Dave Engebretsen <engebret@us.ibm.com>
13 * Rework for PPC64 port.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
22#undef DEBUG
23
24#include <linux/signal.h>
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
29#include <linux/types.h>
30#include <linux/mman.h>
31#include <linux/mm.h>
32#include <linux/swap.h>
33#include <linux/stddef.h>
34#include <linux/vmalloc.h>
35#include <linux/init.h>
36#include <linux/delay.h>
37#include <linux/highmem.h>
38#include <linux/idr.h>
39#include <linux/nodemask.h>
40#include <linux/module.h>
41#include <linux/poison.h>
42#include <linux/memblock.h>
43#include <linux/hugetlb.h>
44#include <linux/slab.h>
45
46#include <asm/pgalloc.h>
47#include <asm/page.h>
48#include <asm/prom.h>
49#include <asm/rtas.h>
50#include <asm/io.h>
51#include <asm/mmu_context.h>
52#include <asm/pgtable.h>
53#include <asm/mmu.h>
54#include <asm/uaccess.h>
55#include <asm/smp.h>
56#include <asm/machdep.h>
57#include <asm/tlb.h>
58#include <asm/eeh.h>
59#include <asm/processor.h>
60#include <asm/mmzone.h>
61#include <asm/cputable.h>
62#include <asm/sections.h>
63#include <asm/iommu.h>
64#include <asm/vdso.h>
65
66#include "mmu_decl.h"
67
68#ifdef CONFIG_PPC_STD_MMU_64
69#if PGTABLE_RANGE > USER_VSID_RANGE
70#warning Limited user VSID range means pagetable space is wasted
71#endif
72
73#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
74#warning TASK_SIZE is smaller than it needs to be.
75#endif
76#endif /* CONFIG_PPC_STD_MMU_64 */
77
78phys_addr_t memstart_addr = ~0;
79EXPORT_SYMBOL_GPL(memstart_addr);
80phys_addr_t kernstart_addr;
81EXPORT_SYMBOL_GPL(kernstart_addr);
82
83static void pgd_ctor(void *addr)
84{
85 memset(addr, 0, PGD_TABLE_SIZE);
86}
87
88static void pmd_ctor(void *addr)
89{
90#ifdef CONFIG_TRANSPARENT_HUGEPAGE
91 memset(addr, 0, PMD_TABLE_SIZE * 2);
92#else
93 memset(addr, 0, PMD_TABLE_SIZE);
94#endif
95}
96
97struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE];
98
99/*
100 * Create a kmem_cache() for pagetables. This is not used for PTE
101 * pages - they're linked to struct page, come from the normal free
102 * pages pool and have a different entry size (see real_pte_t) to
103 * everything else. Caches created by this function are used for all
104 * the higher level pagetables, and for hugepage pagetables.
105 */
106void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
107{
108 char *name;
109 unsigned long table_size = sizeof(void *) << shift;
110 unsigned long align = table_size;
111
112 /* When batching pgtable pointers for RCU freeing, we store
113 * the index size in the low bits. Table alignment must be
114 * big enough to fit it.
115 *
116 * Likewise, hugeapge pagetable pointers contain a (different)
117 * shift value in the low bits. All tables must be aligned so
118 * as to leave enough 0 bits in the address to contain it. */
119 unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
120 HUGEPD_SHIFT_MASK + 1);
121 struct kmem_cache *new;
122
123 /* It would be nice if this was a BUILD_BUG_ON(), but at the
124 * moment, gcc doesn't seem to recognize is_power_of_2 as a
125 * constant expression, so so much for that. */
126 BUG_ON(!is_power_of_2(minalign));
127 BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE));
128
129 if (PGT_CACHE(shift))
130 return; /* Already have a cache of this size */
131
132 align = max_t(unsigned long, align, minalign);
133 name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
134 new = kmem_cache_create(name, table_size, align, 0, ctor);
135 pgtable_cache[shift - 1] = new;
136 pr_debug("Allocated pgtable cache for order %d\n", shift);
137}
138
139
140void pgtable_cache_init(void)
141{
142 pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
143 pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor);
144 if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_CACHE_INDEX))
145 panic("Couldn't allocate pgtable caches");
146 /* In all current configs, when the PUD index exists it's the
147 * same size as either the pgd or pmd index. Verify that the
148 * initialization above has also created a PUD cache. This
149 * will need re-examiniation if we add new possibilities for
150 * the pagetable layout. */
151 BUG_ON(PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE));
152}
153
154#ifdef CONFIG_SPARSEMEM_VMEMMAP
155/*
156 * Given an address within the vmemmap, determine the pfn of the page that
157 * represents the start of the section it is within. Note that we have to
158 * do this by hand as the proffered address may not be correctly aligned.
159 * Subtraction of non-aligned pointers produces undefined results.
160 */
161static unsigned long __meminit vmemmap_section_start(unsigned long page)
162{
163 unsigned long offset = page - ((unsigned long)(vmemmap));
164
165 /* Return the pfn of the start of the section. */
166 return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
167}
168
169/*
170 * Check if this vmemmap page is already initialised. If any section
171 * which overlaps this vmemmap page is initialised then this page is
172 * initialised already.
173 */
174static int __meminit vmemmap_populated(unsigned long start, int page_size)
175{
176 unsigned long end = start + page_size;
177 start = (unsigned long)(pfn_to_page(vmemmap_section_start(start)));
178
179 for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
180 if (pfn_valid(page_to_pfn((struct page *)start)))
181 return 1;
182
183 return 0;
184}
185
186/* On hash-based CPUs, the vmemmap is bolted in the hash table.
187 *
188 * On Book3E CPUs, the vmemmap is currently mapped in the top half of
189 * the vmalloc space using normal page tables, though the size of
190 * pages encoded in the PTEs can be different
191 */
192
193#ifdef CONFIG_PPC_BOOK3E
194static void __meminit vmemmap_create_mapping(unsigned long start,
195 unsigned long page_size,
196 unsigned long phys)
197{
198 /* Create a PTE encoding without page size */
199 unsigned long i, flags = _PAGE_PRESENT | _PAGE_ACCESSED |
200 _PAGE_KERNEL_RW;
201
202 /* PTEs only contain page size encodings up to 32M */
203 BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].enc > 0xf);
204
205 /* Encode the size in the PTE */
206 flags |= mmu_psize_defs[mmu_vmemmap_psize].enc << 8;
207
208 /* For each PTE for that area, map things. Note that we don't
209 * increment phys because all PTEs are of the large size and
210 * thus must have the low bits clear
211 */
212 for (i = 0; i < page_size; i += PAGE_SIZE)
213 BUG_ON(map_kernel_page(start + i, phys, flags));
214}
215
216#ifdef CONFIG_MEMORY_HOTPLUG
217static void vmemmap_remove_mapping(unsigned long start,
218 unsigned long page_size)
219{
220}
221#endif
222#else /* CONFIG_PPC_BOOK3E */
223static void __meminit vmemmap_create_mapping(unsigned long start,
224 unsigned long page_size,
225 unsigned long phys)
226{
227 int mapped = htab_bolt_mapping(start, start + page_size, phys,
228 pgprot_val(PAGE_KERNEL),
229 mmu_vmemmap_psize,
230 mmu_kernel_ssize);
231 BUG_ON(mapped < 0);
232}
233
234#ifdef CONFIG_MEMORY_HOTPLUG
235static void vmemmap_remove_mapping(unsigned long start,
236 unsigned long page_size)
237{
238 int mapped = htab_remove_mapping(start, start + page_size,
239 mmu_vmemmap_psize,
240 mmu_kernel_ssize);
241 BUG_ON(mapped < 0);
242}
243#endif
244
245#endif /* CONFIG_PPC_BOOK3E */
246
247struct vmemmap_backing *vmemmap_list;
248static struct vmemmap_backing *next;
249static int num_left;
250static int num_freed;
251
252static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
253{
254 struct vmemmap_backing *vmem_back;
255 /* get from freed entries first */
256 if (num_freed) {
257 num_freed--;
258 vmem_back = next;
259 next = next->list;
260
261 return vmem_back;
262 }
263
264 /* allocate a page when required and hand out chunks */
265 if (!num_left) {
266 next = vmemmap_alloc_block(PAGE_SIZE, node);
267 if (unlikely(!next)) {
268 WARN_ON(1);
269 return NULL;
270 }
271 num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
272 }
273
274 num_left--;
275
276 return next++;
277}
278
279static __meminit void vmemmap_list_populate(unsigned long phys,
280 unsigned long start,
281 int node)
282{
283 struct vmemmap_backing *vmem_back;
284
285 vmem_back = vmemmap_list_alloc(node);
286 if (unlikely(!vmem_back)) {
287 WARN_ON(1);
288 return;
289 }
290
291 vmem_back->phys = phys;
292 vmem_back->virt_addr = start;
293 vmem_back->list = vmemmap_list;
294
295 vmemmap_list = vmem_back;
296}
297
298int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
299{
300 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
301
302 /* Align to the page size of the linear mapping. */
303 start = _ALIGN_DOWN(start, page_size);
304
305 pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
306
307 for (; start < end; start += page_size) {
308 void *p;
309
310 if (vmemmap_populated(start, page_size))
311 continue;
312
313 p = vmemmap_alloc_block(page_size, node);
314 if (!p)
315 return -ENOMEM;
316
317 vmemmap_list_populate(__pa(p), start, node);
318
319 pr_debug(" * %016lx..%016lx allocated at %p\n",
320 start, start + page_size, p);
321
322 vmemmap_create_mapping(start, page_size, __pa(p));
323 }
324
325 return 0;
326}
327
328#ifdef CONFIG_MEMORY_HOTPLUG
329static unsigned long vmemmap_list_free(unsigned long start)
330{
331 struct vmemmap_backing *vmem_back, *vmem_back_prev;
332
333 vmem_back_prev = vmem_back = vmemmap_list;
334
335 /* look for it with prev pointer recorded */
336 for (; vmem_back; vmem_back = vmem_back->list) {
337 if (vmem_back->virt_addr == start)
338 break;
339 vmem_back_prev = vmem_back;
340 }
341
342 if (unlikely(!vmem_back)) {
343 WARN_ON(1);
344 return 0;
345 }
346
347 /* remove it from vmemmap_list */
348 if (vmem_back == vmemmap_list) /* remove head */
349 vmemmap_list = vmem_back->list;
350 else
351 vmem_back_prev->list = vmem_back->list;
352
353 /* next point to this freed entry */
354 vmem_back->list = next;
355 next = vmem_back;
356 num_freed++;
357
358 return vmem_back->phys;
359}
360
361void __ref vmemmap_free(unsigned long start, unsigned long end)
362{
363 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
364
365 start = _ALIGN_DOWN(start, page_size);
366
367 pr_debug("vmemmap_free %lx...%lx\n", start, end);
368
369 for (; start < end; start += page_size) {
370 unsigned long addr;
371
372 /*
373 * the section has already be marked as invalid, so
374 * vmemmap_populated() true means some other sections still
375 * in this page, so skip it.
376 */
377 if (vmemmap_populated(start, page_size))
378 continue;
379
380 addr = vmemmap_list_free(start);
381 if (addr) {
382 struct page *page = pfn_to_page(addr >> PAGE_SHIFT);
383
384 if (PageReserved(page)) {
385 /* allocated from bootmem */
386 if (page_size < PAGE_SIZE) {
387 /*
388 * this shouldn't happen, but if it is
389 * the case, leave the memory there
390 */
391 WARN_ON_ONCE(1);
392 } else {
393 unsigned int nr_pages =
394 1 << get_order(page_size);
395 while (nr_pages--)
396 free_reserved_page(page++);
397 }
398 } else
399 free_pages((unsigned long)(__va(addr)),
400 get_order(page_size));
401
402 vmemmap_remove_mapping(start, page_size);
403 }
404 }
405}
406#endif
407void register_page_bootmem_memmap(unsigned long section_nr,
408 struct page *start_page, unsigned long size)
409{
410}
411
412/*
413 * We do not have access to the sparsemem vmemmap, so we fallback to
414 * walking the list of sparsemem blocks which we already maintain for
415 * the sake of crashdump. In the long run, we might want to maintain
416 * a tree if performance of that linear walk becomes a problem.
417 *
418 * realmode_pfn_to_page functions can fail due to:
419 * 1) As real sparsemem blocks do not lay in RAM continously (they
420 * are in virtual address space which is not available in the real mode),
421 * the requested page struct can be split between blocks so get_page/put_page
422 * may fail.
423 * 2) When huge pages are used, the get_page/put_page API will fail
424 * in real mode as the linked addresses in the page struct are virtual
425 * too.
426 */
427struct page *realmode_pfn_to_page(unsigned long pfn)
428{
429 struct vmemmap_backing *vmem_back;
430 struct page *page;
431 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
432 unsigned long pg_va = (unsigned long) pfn_to_page(pfn);
433
434 for (vmem_back = vmemmap_list; vmem_back; vmem_back = vmem_back->list) {
435 if (pg_va < vmem_back->virt_addr)
436 continue;
437
438 /* After vmemmap_list entry free is possible, need check all */
439 if ((pg_va + sizeof(struct page)) <=
440 (vmem_back->virt_addr + page_size)) {
441 page = (struct page *) (vmem_back->phys + pg_va -
442 vmem_back->virt_addr);
443 return page;
444 }
445 }
446
447 /* Probably that page struct is split between real pages */
448 return NULL;
449}
450EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
451
452#elif defined(CONFIG_FLATMEM)
453
454struct page *realmode_pfn_to_page(unsigned long pfn)
455{
456 struct page *page = pfn_to_page(pfn);
457 return page;
458}
459EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
460
461#endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */