Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2005, Paul Mackerras, IBM Corporation.
3 * Copyright 2009, Benjamin Herrenschmidt, IBM Corporation.
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/sched.h>
13#include <linux/mm_types.h>
14#include <linux/mm.h>
15
16#include <asm/pgalloc.h>
17#include <asm/pgtable.h>
18#include <asm/sections.h>
19#include <asm/mmu.h>
20#include <asm/tlb.h>
21
22#include "mmu_decl.h"
23
24#define CREATE_TRACE_POINTS
25#include <trace/events/thp.h>
26
27#if H_PGTABLE_RANGE > (USER_VSID_RANGE * (TASK_SIZE_USER64 / TASK_CONTEXT_SIZE))
28#warning Limited user VSID range means pagetable space is wasted
29#endif
30
31#ifdef CONFIG_SPARSEMEM_VMEMMAP
32/*
33 * vmemmap is the starting address of the virtual address space where
34 * struct pages are allocated for all possible PFNs present on the system
35 * including holes and bad memory (hence sparse). These virtual struct
36 * pages are stored in sequence in this virtual address space irrespective
37 * of the fact whether the corresponding PFN is valid or not. This achieves
38 * constant relationship between address of struct page and its PFN.
39 *
40 * During boot or memory hotplug operation when a new memory section is
41 * added, physical memory allocation (including hash table bolting) will
42 * be performed for the set of struct pages which are part of the memory
43 * section. This saves memory by not allocating struct pages for PFNs
44 * which are not valid.
45 *
46 * ----------------------------------------------
47 * | PHYSICAL ALLOCATION OF VIRTUAL STRUCT PAGES|
48 * ----------------------------------------------
49 *
50 * f000000000000000 c000000000000000
51 * vmemmap +--------------+ +--------------+
52 * + | page struct | +--------------> | page struct |
53 * | +--------------+ +--------------+
54 * | | page struct | +--------------> | page struct |
55 * | +--------------+ | +--------------+
56 * | | page struct | + +------> | page struct |
57 * | +--------------+ | +--------------+
58 * | | page struct | | +--> | page struct |
59 * | +--------------+ | | +--------------+
60 * | | page struct | | |
61 * | +--------------+ | |
62 * | | page struct | | |
63 * | +--------------+ | |
64 * | | page struct | | |
65 * | +--------------+ | |
66 * | | page struct | | |
67 * | +--------------+ | |
68 * | | page struct | +-------+ |
69 * | +--------------+ |
70 * | | page struct | +-----------+
71 * | +--------------+
72 * | | page struct | No mapping
73 * | +--------------+
74 * | | page struct | No mapping
75 * v +--------------+
76 *
77 * -----------------------------------------
78 * | RELATION BETWEEN STRUCT PAGES AND PFNS|
79 * -----------------------------------------
80 *
81 * vmemmap +--------------+ +---------------+
82 * + | page struct | +-------------> | PFN |
83 * | +--------------+ +---------------+
84 * | | page struct | +-------------> | PFN |
85 * | +--------------+ +---------------+
86 * | | page struct | +-------------> | PFN |
87 * | +--------------+ +---------------+
88 * | | page struct | +-------------> | PFN |
89 * | +--------------+ +---------------+
90 * | | |
91 * | +--------------+
92 * | | |
93 * | +--------------+
94 * | | |
95 * | +--------------+ +---------------+
96 * | | page struct | +-------------> | PFN |
97 * | +--------------+ +---------------+
98 * | | |
99 * | +--------------+
100 * | | |
101 * | +--------------+ +---------------+
102 * | | page struct | +-------------> | PFN |
103 * | +--------------+ +---------------+
104 * | | page struct | +-------------> | PFN |
105 * v +--------------+ +---------------+
106 */
107/*
108 * On hash-based CPUs, the vmemmap is bolted in the hash table.
109 *
110 */
111int __meminit hash__vmemmap_create_mapping(unsigned long start,
112 unsigned long page_size,
113 unsigned long phys)
114{
115 int rc = htab_bolt_mapping(start, start + page_size, phys,
116 pgprot_val(PAGE_KERNEL),
117 mmu_vmemmap_psize, mmu_kernel_ssize);
118 if (rc < 0) {
119 int rc2 = htab_remove_mapping(start, start + page_size,
120 mmu_vmemmap_psize,
121 mmu_kernel_ssize);
122 BUG_ON(rc2 && (rc2 != -ENOENT));
123 }
124 return rc;
125}
126
127#ifdef CONFIG_MEMORY_HOTPLUG
128void hash__vmemmap_remove_mapping(unsigned long start,
129 unsigned long page_size)
130{
131 int rc = htab_remove_mapping(start, start + page_size,
132 mmu_vmemmap_psize,
133 mmu_kernel_ssize);
134 BUG_ON((rc < 0) && (rc != -ENOENT));
135 WARN_ON(rc == -ENOENT);
136}
137#endif
138#endif /* CONFIG_SPARSEMEM_VMEMMAP */
139
140/*
141 * map_kernel_page currently only called by __ioremap
142 * map_kernel_page adds an entry to the ioremap page table
143 * and adds an entry to the HPT, possibly bolting it
144 */
145int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
146{
147 pgd_t *pgdp;
148 pud_t *pudp;
149 pmd_t *pmdp;
150 pte_t *ptep;
151
152 BUILD_BUG_ON(TASK_SIZE_USER64 > H_PGTABLE_RANGE);
153 if (slab_is_available()) {
154 pgdp = pgd_offset_k(ea);
155 pudp = pud_alloc(&init_mm, pgdp, ea);
156 if (!pudp)
157 return -ENOMEM;
158 pmdp = pmd_alloc(&init_mm, pudp, ea);
159 if (!pmdp)
160 return -ENOMEM;
161 ptep = pte_alloc_kernel(pmdp, ea);
162 if (!ptep)
163 return -ENOMEM;
164 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot));
165 } else {
166 /*
167 * If the mm subsystem is not fully up, we cannot create a
168 * linux page table entry for this mapping. Simply bolt an
169 * entry in the hardware page table.
170 *
171 */
172 if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, pgprot_val(prot),
173 mmu_io_psize, mmu_kernel_ssize)) {
174 printk(KERN_ERR "Failed to do bolted mapping IO "
175 "memory at %016lx !\n", pa);
176 return -ENOMEM;
177 }
178 }
179
180 smp_wmb();
181 return 0;
182}
183
184#ifdef CONFIG_TRANSPARENT_HUGEPAGE
185
186unsigned long hash__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
187 pmd_t *pmdp, unsigned long clr,
188 unsigned long set)
189{
190 __be64 old_be, tmp;
191 unsigned long old;
192
193#ifdef CONFIG_DEBUG_VM
194 WARN_ON(!hash__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
195 assert_spin_locked(pmd_lockptr(mm, pmdp));
196#endif
197
198 __asm__ __volatile__(
199 "1: ldarx %0,0,%3\n\
200 and. %1,%0,%6\n\
201 bne- 1b \n\
202 andc %1,%0,%4 \n\
203 or %1,%1,%7\n\
204 stdcx. %1,0,%3 \n\
205 bne- 1b"
206 : "=&r" (old_be), "=&r" (tmp), "=m" (*pmdp)
207 : "r" (pmdp), "r" (cpu_to_be64(clr)), "m" (*pmdp),
208 "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set))
209 : "cc" );
210
211 old = be64_to_cpu(old_be);
212
213 trace_hugepage_update(addr, old, clr, set);
214 if (old & H_PAGE_HASHPTE)
215 hpte_do_hugepage_flush(mm, addr, pmdp, old);
216 return old;
217}
218
219pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
220 pmd_t *pmdp)
221{
222 pmd_t pmd;
223
224 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
225 VM_BUG_ON(pmd_trans_huge(*pmdp));
226 VM_BUG_ON(pmd_devmap(*pmdp));
227
228 pmd = *pmdp;
229 pmd_clear(pmdp);
230 /*
231 * Wait for all pending hash_page to finish. This is needed
232 * in case of subpage collapse. When we collapse normal pages
233 * to hugepage, we first clear the pmd, then invalidate all
234 * the PTE entries. The assumption here is that any low level
235 * page fault will see a none pmd and take the slow path that
236 * will wait on mmap_sem. But we could very well be in a
237 * hash_page with local ptep pointer value. Such a hash page
238 * can result in adding new HPTE entries for normal subpages.
239 * That means we could be modifying the page content as we
240 * copy them to a huge page. So wait for parallel hash_page
241 * to finish before invalidating HPTE entries. We can do this
242 * by sending an IPI to all the cpus and executing a dummy
243 * function there.
244 */
245 serialize_against_pte_lookup(vma->vm_mm);
246 /*
247 * Now invalidate the hpte entries in the range
248 * covered by pmd. This make sure we take a
249 * fault and will find the pmd as none, which will
250 * result in a major fault which takes mmap_sem and
251 * hence wait for collapse to complete. Without this
252 * the __collapse_huge_page_copy can result in copying
253 * the old content.
254 */
255 flush_tlb_pmd_range(vma->vm_mm, &pmd, address);
256 return pmd;
257}
258
259/*
260 * We want to put the pgtable in pmd and use pgtable for tracking
261 * the base page size hptes
262 */
263void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
264 pgtable_t pgtable)
265{
266 pgtable_t *pgtable_slot;
267
268 assert_spin_locked(pmd_lockptr(mm, pmdp));
269 /*
270 * we store the pgtable in the second half of PMD
271 */
272 pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;
273 *pgtable_slot = pgtable;
274 /*
275 * expose the deposited pgtable to other cpus.
276 * before we set the hugepage PTE at pmd level
277 * hash fault code looks at the deposted pgtable
278 * to store hash index values.
279 */
280 smp_wmb();
281}
282
283pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
284{
285 pgtable_t pgtable;
286 pgtable_t *pgtable_slot;
287
288 assert_spin_locked(pmd_lockptr(mm, pmdp));
289
290 pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;
291 pgtable = *pgtable_slot;
292 /*
293 * Once we withdraw, mark the entry NULL.
294 */
295 *pgtable_slot = NULL;
296 /*
297 * We store HPTE information in the deposited PTE fragment.
298 * zero out the content on withdraw.
299 */
300 memset(pgtable, 0, PTE_FRAG_SIZE);
301 return pgtable;
302}
303
304/*
305 * A linux hugepage PMD was changed and the corresponding hash table entries
306 * neesd to be flushed.
307 */
308void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
309 pmd_t *pmdp, unsigned long old_pmd)
310{
311 int ssize;
312 unsigned int psize;
313 unsigned long vsid;
314 unsigned long flags = 0;
315
316 /* get the base page size,vsid and segment size */
317#ifdef CONFIG_DEBUG_VM
318 psize = get_slice_psize(mm, addr);
319 BUG_ON(psize == MMU_PAGE_16M);
320#endif
321 if (old_pmd & H_PAGE_COMBO)
322 psize = MMU_PAGE_4K;
323 else
324 psize = MMU_PAGE_64K;
325
326 if (!is_kernel_addr(addr)) {
327 ssize = user_segment_size(addr);
328 vsid = get_user_vsid(&mm->context, addr, ssize);
329 WARN_ON(vsid == 0);
330 } else {
331 vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
332 ssize = mmu_kernel_ssize;
333 }
334
335 if (mm_is_thread_local(mm))
336 flags |= HPTE_LOCAL_UPDATE;
337
338 return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, flags);
339}
340
341pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
342 unsigned long addr, pmd_t *pmdp)
343{
344 pmd_t old_pmd;
345 pgtable_t pgtable;
346 unsigned long old;
347 pgtable_t *pgtable_slot;
348
349 old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
350 old_pmd = __pmd(old);
351 /*
352 * We have pmd == none and we are holding page_table_lock.
353 * So we can safely go and clear the pgtable hash
354 * index info.
355 */
356 pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;
357 pgtable = *pgtable_slot;
358 /*
359 * Let's zero out old valid and hash index details
360 * hash fault look at them.
361 */
362 memset(pgtable, 0, PTE_FRAG_SIZE);
363 /*
364 * Serialize against find_current_mm_pte variants which does lock-less
365 * lookup in page tables with local interrupts disabled. For huge pages
366 * it casts pmd_t to pte_t. Since format of pte_t is different from
367 * pmd_t we want to prevent transit from pmd pointing to page table
368 * to pmd pointing to huge page (and back) while interrupts are disabled.
369 * We clear pmd to possibly replace it with page table pointer in
370 * different code paths. So make sure we wait for the parallel
371 * find_curren_mm_pte to finish.
372 */
373 serialize_against_pte_lookup(mm);
374 return old_pmd;
375}
376
377int hash__has_transparent_hugepage(void)
378{
379
380 if (!mmu_has_feature(MMU_FTR_16M_PAGE))
381 return 0;
382 /*
383 * We support THP only if PMD_SIZE is 16MB.
384 */
385 if (mmu_psize_defs[MMU_PAGE_16M].shift != PMD_SHIFT)
386 return 0;
387 /*
388 * We need to make sure that we support 16MB hugepage in a segement
389 * with base page size 64K or 4K. We only enable THP with a PAGE_SIZE
390 * of 64K.
391 */
392 /*
393 * If we have 64K HPTE, we will be using that by default
394 */
395 if (mmu_psize_defs[MMU_PAGE_64K].shift &&
396 (mmu_psize_defs[MMU_PAGE_64K].penc[MMU_PAGE_16M] == -1))
397 return 0;
398 /*
399 * Ok we only have 4K HPTE
400 */
401 if (mmu_psize_defs[MMU_PAGE_4K].penc[MMU_PAGE_16M] == -1)
402 return 0;
403
404 return 1;
405}
406#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
407
408#ifdef CONFIG_STRICT_KERNEL_RWX
409static bool hash__change_memory_range(unsigned long start, unsigned long end,
410 unsigned long newpp)
411{
412 unsigned long idx;
413 unsigned int step, shift;
414
415 shift = mmu_psize_defs[mmu_linear_psize].shift;
416 step = 1 << shift;
417
418 start = ALIGN_DOWN(start, step);
419 end = ALIGN(end, step); // aligns up
420
421 if (start >= end)
422 return false;
423
424 pr_debug("Changing page protection on range 0x%lx-0x%lx, to 0x%lx, step 0x%x\n",
425 start, end, newpp, step);
426
427 for (idx = start; idx < end; idx += step)
428 /* Not sure if we can do much with the return value */
429 mmu_hash_ops.hpte_updateboltedpp(newpp, idx, mmu_linear_psize,
430 mmu_kernel_ssize);
431
432 return true;
433}
434
435void hash__mark_rodata_ro(void)
436{
437 unsigned long start, end;
438
439 start = (unsigned long)_stext;
440 end = (unsigned long)__init_begin;
441
442 WARN_ON(!hash__change_memory_range(start, end, PP_RXXX));
443}
444
445void hash__mark_initmem_nx(void)
446{
447 unsigned long start, end, pp;
448
449 start = (unsigned long)__init_begin;
450 end = (unsigned long)__init_end;
451
452 pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL));
453
454 WARN_ON(!hash__change_memory_range(start, end, pp));
455}
456#endif