Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arch/tile: Allow tilegx to build with either 16K or 64K page size

This change introduces new flags for the hv_install_context()
API that passes a page table pointer to the hypervisor. Clients
can explicitly request 4K, 16K, or 64K small pages when they
install a new context. In practice, the page size is fixed at
kernel compile time and the same size is always requested every
time a new page table is installed.

The <hv/hypervisor.h> header changes so that it provides more abstract
macros for managing "page" things like PFNs and page tables. For
example there is now a HV_DEFAULT_PAGE_SIZE_SMALL instead of the old
HV_PAGE_SIZE_SMALL. The various PFN routines have been eliminated and
only PA- or PTFN-based ones remain (since PTFNs are always expressed
in fixed 2KB "page" size). The page-table management macros are
renamed with a leading underscore and take page-size arguments with
the presumption that clients will use those macros in some single
place to provide the "real" macros they will use themselves.

I happened to notice the old hv_set_caching() API was totally broken
(it assumed 4KB pages) so I changed it so it would nominally work
correctly with other page sizes.

Tag modules with the page size so you can't load a module built with
a conflicting page size. (And add a test for SMP while we're at it.)

Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>

+345 -197
+25
arch/tile/Kconfig
··· 139 139 smaller kernel memory footprint results from using a smaller 140 140 value on chips with fewer tiles. 141 141 142 + if TILEGX 143 + 144 + choice 145 + prompt "Kernel page size" 146 + default PAGE_SIZE_64KB 147 + help 148 + This lets you select the page size of the kernel. For best 149 + performance on memory-intensive applications, a page size of 64KB 150 + is recommended. For workloads involving many small files, many 151 + connections, etc., it may be better to select 16KB, which uses 152 + memory more efficiently at some cost in TLB performance. 153 + 154 + Note that this option is TILE-Gx specific; currently 155 + TILEPro page size is set by rebuilding the hypervisor. 156 + 157 + config PAGE_SIZE_16KB 158 + bool "16KB" 159 + 160 + config PAGE_SIZE_64KB 161 + bool "64KB" 162 + 163 + endchoice 164 + 165 + endif 166 + 142 167 source "kernel/time/Kconfig" 143 168 144 169 source "kernel/Kconfig.hz"
-1
arch/tile/include/asm/Kbuild
··· 21 21 generic-y += irq_regs.h 22 22 generic-y += kdebug.h 23 23 generic-y += local.h 24 - generic-y += module.h 25 24 generic-y += msgbuf.h 26 25 generic-y += mutex.h 27 26 generic-y += param.h
+1 -1
arch/tile/include/asm/mmu.h
··· 21 21 * Written under the mmap_sem semaphore; read without the 22 22 * semaphore but atomically, but it is conservatively set. 23 23 */ 24 - unsigned int priority_cached; 24 + unsigned long priority_cached; 25 25 }; 26 26 27 27 typedef struct mm_context mm_context_t;
+6 -2
arch/tile/include/asm/mmu_context.h
··· 30 30 return 0; 31 31 } 32 32 33 - /* Note that arch/tile/kernel/head.S also calls hv_install_context() */ 33 + /* 34 + * Note that arch/tile/kernel/head_NN.S and arch/tile/mm/migrate_NN.S 35 + * also call hv_install_context(). 36 + */ 34 37 static inline void __install_page_table(pgd_t *pgdir, int asid, pgprot_t prot) 35 38 { 36 39 /* FIXME: DIRECTIO should not always be set. FIXME. */ 37 - int rc = hv_install_context(__pa(pgdir), prot, asid, HV_CTX_DIRECTIO); 40 + int rc = hv_install_context(__pa(pgdir), prot, asid, 41 + HV_CTX_DIRECTIO | CTX_PAGE_FLAG); 38 42 if (rc < 0) 39 43 panic("hv_install_context failed: %d", rc); 40 44 }
+40
arch/tile/include/asm/module.h
··· 1 + /* 2 + * Copyright 2011 Tilera Corporation. All Rights Reserved. 3 + * 4 + * This program is free software; you can redistribute it and/or 5 + * modify it under the terms of the GNU General Public License 6 + * as published by the Free Software Foundation, version 2. 7 + * 8 + * This program is distributed in the hope that it will be useful, but 9 + * WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 11 + * NON INFRINGEMENT. See the GNU General Public License for 12 + * more details. 13 + */ 14 + 15 + #ifndef _ASM_TILE_MODULE_H 16 + #define _ASM_TILE_MODULE_H 17 + 18 + #include <arch/chip.h> 19 + 20 + #include <asm-generic/module.h> 21 + 22 + /* We can't use modules built with different page sizes. */ 23 + #if defined(CONFIG_PAGE_SIZE_16KB) 24 + # define MODULE_PGSZ " 16KB" 25 + #elif defined(CONFIG_PAGE_SIZE_64KB) 26 + # define MODULE_PGSZ " 64KB" 27 + #else 28 + # define MODULE_PGSZ "" 29 + #endif 30 + 31 + /* We don't really support no-SMP so tag if someone tries. */ 32 + #ifdef CONFIG_SMP 33 + #define MODULE_NOSMP "" 34 + #else 35 + #define MODULE_NOSMP " nosmp" 36 + #endif 37 + 38 + #define MODULE_ARCH_VERMAGIC CHIP_ARCH_NAME MODULE_PGSZ MODULE_NOSMP 39 + 40 + #endif /* _ASM_TILE_MODULE_H */
+11 -2
arch/tile/include/asm/page.h
··· 20 20 #include <arch/chip.h> 21 21 22 22 /* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */ 23 - #define PAGE_SHIFT HV_LOG2_PAGE_SIZE_SMALL 24 - #define HPAGE_SHIFT HV_LOG2_PAGE_SIZE_LARGE 23 + #if defined(CONFIG_PAGE_SIZE_16KB) 24 + #define PAGE_SHIFT 14 25 + #define CTX_PAGE_FLAG HV_CTX_PG_SM_16K 26 + #elif defined(CONFIG_PAGE_SIZE_64KB) 27 + #define PAGE_SHIFT 16 28 + #define CTX_PAGE_FLAG HV_CTX_PG_SM_64K 29 + #else 30 + #define PAGE_SHIFT HV_LOG2_DEFAULT_PAGE_SIZE_SMALL 31 + #define CTX_PAGE_FLAG 0 32 + #endif 33 + #define HPAGE_SHIFT HV_LOG2_DEFAULT_PAGE_SIZE_LARGE 25 34 26 35 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) 27 36 #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
+67 -25
arch/tile/include/asm/pgalloc.h
··· 19 19 #include <linux/mm.h> 20 20 #include <linux/mmzone.h> 21 21 #include <asm/fixmap.h> 22 + #include <asm/page.h> 22 23 #include <hv/hypervisor.h> 23 24 24 25 /* Bits for the size of the second-level page table. */ 25 - #define L2_KERNEL_PGTABLE_SHIFT \ 26 - (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL + HV_LOG2_PTE_SIZE) 26 + #define L2_KERNEL_PGTABLE_SHIFT _HV_LOG2_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT) 27 + 28 + /* How big is a kernel L2 page table? */ 29 + #define L2_KERNEL_PGTABLE_SIZE (1UL << L2_KERNEL_PGTABLE_SHIFT) 27 30 28 31 /* We currently allocate user L2 page tables by page (unlike kernel L2s). */ 29 - #if L2_KERNEL_PGTABLE_SHIFT < HV_LOG2_PAGE_SIZE_SMALL 30 - #define L2_USER_PGTABLE_SHIFT HV_LOG2_PAGE_SIZE_SMALL 32 + #if L2_KERNEL_PGTABLE_SHIFT < PAGE_SHIFT 33 + #define L2_USER_PGTABLE_SHIFT PAGE_SHIFT 31 34 #else 32 35 #define L2_USER_PGTABLE_SHIFT L2_KERNEL_PGTABLE_SHIFT 33 36 #endif 34 37 35 38 /* How many pages do we need, as an "order", for a user L2 page table? */ 36 - #define L2_USER_PGTABLE_ORDER (L2_USER_PGTABLE_SHIFT - HV_LOG2_PAGE_SIZE_SMALL) 37 - 38 - /* How big is a kernel L2 page table? */ 39 - #define L2_KERNEL_PGTABLE_SIZE (1 << L2_KERNEL_PGTABLE_SHIFT) 39 + #define L2_USER_PGTABLE_ORDER (L2_USER_PGTABLE_SHIFT - PAGE_SHIFT) 40 40 41 41 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) 42 42 { ··· 50 50 static inline void pmd_populate_kernel(struct mm_struct *mm, 51 51 pmd_t *pmd, pte_t *ptep) 52 52 { 53 - set_pmd(pmd, ptfn_pmd(__pa(ptep) >> HV_LOG2_PAGE_TABLE_ALIGN, 53 + set_pmd(pmd, ptfn_pmd(HV_CPA_TO_PTFN(__pa(ptep)), 54 54 __pgprot(_PAGE_PRESENT))); 55 55 } 56 56 57 57 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, 58 58 pgtable_t page) 59 59 { 60 - set_pmd(pmd, ptfn_pmd(HV_PFN_TO_PTFN(page_to_pfn(page)), 60 + set_pmd(pmd, ptfn_pmd(HV_CPA_TO_PTFN(PFN_PHYS(page_to_pfn(page))), 61 61 __pgprot(_PAGE_PRESENT))); 62 62 } 63 63 ··· 68 68 extern pgd_t *pgd_alloc(struct mm_struct *mm); 69 69 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); 70 70 71 - extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address); 72 - extern void pte_free(struct mm_struct *mm, struct page *pte); 71 + extern pgtable_t pgtable_alloc_one(struct mm_struct *mm, unsigned long address, 72 + int order); 73 + extern void pgtable_free(struct mm_struct *mm, struct page *pte, int order); 74 + 75 + static inline pgtable_t pte_alloc_one(struct mm_struct *mm, 76 + unsigned long address) 77 + { 78 + return pgtable_alloc_one(mm, address, L2_USER_PGTABLE_ORDER); 79 + } 80 + 81 + static inline void pte_free(struct mm_struct *mm, struct page *pte) 82 + { 83 + pgtable_free(mm, pte, L2_USER_PGTABLE_ORDER); 84 + } 73 85 74 86 #define pmd_pgtable(pmd) pmd_page(pmd) 75 87 ··· 97 85 pte_free(mm, virt_to_page(pte)); 98 86 } 99 87 100 - extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte, 101 - unsigned long address); 88 + extern void __pgtable_free_tlb(struct mmu_gather *tlb, struct page *pte, 89 + unsigned long address, int order); 90 + static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte, 91 + unsigned long address) 92 + { 93 + __pgtable_free_tlb(tlb, pte, address, L2_USER_PGTABLE_ORDER); 94 + } 102 95 103 96 #define check_pgt_cache() do { } while (0) 104 97 ··· 121 104 void shatter_huge_page(unsigned long addr); 122 105 123 106 #ifdef __tilegx__ 124 - /* We share a single page allocator for both L1 and L2 page tables. */ 125 - #if HV_L1_SIZE != HV_L2_SIZE 126 - # error Rework assumption that L1 and L2 page tables are same size. 127 - #endif 128 - #define L1_USER_PGTABLE_ORDER L2_USER_PGTABLE_ORDER 107 + 129 108 #define pud_populate(mm, pud, pmd) \ 130 109 pmd_populate_kernel((mm), (pmd_t *)(pud), (pte_t *)(pmd)) 131 - #define pmd_alloc_one(mm, addr) \ 132 - ((pmd_t *)page_to_virt(pte_alloc_one((mm), (addr)))) 133 - #define pmd_free(mm, pmdp) \ 134 - pte_free((mm), virt_to_page(pmdp)) 135 - #define __pmd_free_tlb(tlb, pmdp, address) \ 136 - __pte_free_tlb((tlb), virt_to_page(pmdp), (address)) 110 + 111 + /* Bits for the size of the L1 (intermediate) page table. */ 112 + #define L1_KERNEL_PGTABLE_SHIFT _HV_LOG2_L1_SIZE(HPAGE_SHIFT) 113 + 114 + /* How big is a kernel L2 page table? */ 115 + #define L1_KERNEL_PGTABLE_SIZE (1UL << L1_KERNEL_PGTABLE_SHIFT) 116 + 117 + /* We currently allocate L1 page tables by page. */ 118 + #if L1_KERNEL_PGTABLE_SHIFT < PAGE_SHIFT 119 + #define L1_USER_PGTABLE_SHIFT PAGE_SHIFT 120 + #else 121 + #define L1_USER_PGTABLE_SHIFT L1_KERNEL_PGTABLE_SHIFT 137 122 #endif 123 + 124 + /* How many pages do we need, as an "order", for an L1 page table? */ 125 + #define L1_USER_PGTABLE_ORDER (L1_USER_PGTABLE_SHIFT - PAGE_SHIFT) 126 + 127 + static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) 128 + { 129 + struct page *p = pgtable_alloc_one(mm, address, L1_USER_PGTABLE_ORDER); 130 + return (pmd_t *)page_to_virt(p); 131 + } 132 + 133 + static inline void pmd_free(struct mm_struct *mm, pmd_t *pmdp) 134 + { 135 + pgtable_free(mm, virt_to_page(pmdp), L1_USER_PGTABLE_ORDER); 136 + } 137 + 138 + static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, 139 + unsigned long address) 140 + { 141 + __pgtable_free_tlb(tlb, virt_to_page(pmdp), address, 142 + L1_USER_PGTABLE_ORDER); 143 + } 144 + 145 + #endif /* __tilegx__ */ 138 146 139 147 #endif /* _ASM_TILE_PGALLOC_H */
+6 -4
arch/tile/include/asm/pgtable.h
··· 27 27 #include <linux/slab.h> 28 28 #include <linux/list.h> 29 29 #include <linux/spinlock.h> 30 + #include <linux/pfn.h> 30 31 #include <asm/processor.h> 31 32 #include <asm/fixmap.h> 33 + #include <asm/page.h> 32 34 33 35 struct mm_struct; 34 36 struct vm_area_struct; ··· 164 162 (pgprot_t) { ((oldprot).val & ~_PAGE_ALL) | (newprot).val } 165 163 166 164 /* Just setting the PFN to zero suffices. */ 167 - #define pte_pgprot(x) hv_pte_set_pfn((x), 0) 165 + #define pte_pgprot(x) hv_pte_set_pa((x), 0) 168 166 169 167 /* 170 168 * For PTEs and PDEs, we must clear the Present bit first when ··· 264 262 265 263 static inline unsigned long pte_pfn(pte_t pte) 266 264 { 267 - return hv_pte_get_pfn(pte); 265 + return PFN_DOWN(hv_pte_get_pa(pte)); 268 266 } 269 267 270 268 /* Set or get the remote cache cpu in a pgprot with remote caching. */ ··· 273 271 274 272 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) 275 273 { 276 - return hv_pte_set_pfn(prot, pfn); 274 + return hv_pte_set_pa(prot, PFN_PHYS(pfn)); 277 275 } 278 276 279 277 /* Support for priority mappings. */ ··· 473 471 * OK for pte_lockptr(), since we just end up with potentially one 474 472 * lock being used for several pte_t arrays. 475 473 */ 476 - #define pmd_page(pmd) pfn_to_page(HV_PTFN_TO_PFN(pmd_ptfn(pmd))) 474 + #define pmd_page(pmd) pfn_to_page(PFN_DOWN(HV_PTFN_TO_CPA(pmd_ptfn(pmd)))) 477 475 478 476 static inline void pmd_clear(pmd_t *pmdp) 479 477 {
+8 -6
arch/tile/include/asm/pgtable_32.h
··· 20 20 * The level-1 index is defined by the huge page size. A PGD is composed 21 21 * of PTRS_PER_PGD pgd_t's and is the top level of the page table. 22 22 */ 23 - #define PGDIR_SHIFT HV_LOG2_PAGE_SIZE_LARGE 24 - #define PGDIR_SIZE HV_PAGE_SIZE_LARGE 23 + #define PGDIR_SHIFT HPAGE_SHIFT 24 + #define PGDIR_SIZE HPAGE_SIZE 25 25 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 26 - #define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) 27 - #define SIZEOF_PGD (PTRS_PER_PGD * sizeof(pgd_t)) 26 + #define PTRS_PER_PGD _HV_L1_ENTRIES(HPAGE_SHIFT) 27 + #define PGD_INDEX(va) _HV_L1_INDEX(va, HPAGE_SHIFT) 28 + #define SIZEOF_PGD _HV_L1_SIZE(HPAGE_SHIFT) 28 29 29 30 /* 30 31 * The level-2 index is defined by the difference between the huge ··· 34 33 * Note that the hypervisor docs use PTE for what we call pte_t, so 35 34 * this nomenclature is somewhat confusing. 36 35 */ 37 - #define PTRS_PER_PTE (1 << (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL)) 38 - #define SIZEOF_PTE (PTRS_PER_PTE * sizeof(pte_t)) 36 + #define PTRS_PER_PTE _HV_L2_ENTRIES(HPAGE_SHIFT, PAGE_SHIFT) 37 + #define PTE_INDEX(va) _HV_L2_INDEX(va, HPAGE_SHIFT, PAGE_SHIFT) 38 + #define SIZEOF_PTE _HV_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT) 39 39 40 40 #ifndef __ASSEMBLY__ 41 41
+16 -12
arch/tile/include/asm/pgtable_64.h
··· 21 21 #define PGDIR_SIZE HV_L1_SPAN 22 22 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 23 23 #define PTRS_PER_PGD HV_L0_ENTRIES 24 - #define SIZEOF_PGD (PTRS_PER_PGD * sizeof(pgd_t)) 24 + #define PGD_INDEX(va) HV_L0_INDEX(va) 25 + #define SIZEOF_PGD HV_L0_SIZE 25 26 26 27 /* 27 28 * The level-1 index is defined by the huge page size. A PMD is composed 28 29 * of PTRS_PER_PMD pgd_t's and is the middle level of the page table. 29 30 */ 30 - #define PMD_SHIFT HV_LOG2_PAGE_SIZE_LARGE 31 - #define PMD_SIZE HV_PAGE_SIZE_LARGE 31 + #define PMD_SHIFT HPAGE_SHIFT 32 + #define PMD_SIZE HPAGE_SIZE 32 33 #define PMD_MASK (~(PMD_SIZE-1)) 33 - #define PTRS_PER_PMD (1 << (PGDIR_SHIFT - PMD_SHIFT)) 34 - #define SIZEOF_PMD (PTRS_PER_PMD * sizeof(pmd_t)) 34 + #define PTRS_PER_PMD _HV_L1_ENTRIES(HPAGE_SHIFT) 35 + #define PMD_INDEX(va) _HV_L1_INDEX(va, HPAGE_SHIFT) 36 + #define SIZEOF_PMD _HV_L1_SIZE(HPAGE_SHIFT) 35 37 36 38 /* 37 39 * The level-2 index is defined by the difference between the huge ··· 42 40 * Note that the hypervisor docs use PTE for what we call pte_t, so 43 41 * this nomenclature is somewhat confusing. 44 42 */ 45 - #define PTRS_PER_PTE (1 << (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL)) 46 - #define SIZEOF_PTE (PTRS_PER_PTE * sizeof(pte_t)) 43 + #define PTRS_PER_PTE _HV_L2_ENTRIES(HPAGE_SHIFT, PAGE_SHIFT) 44 + #define PTE_INDEX(va) _HV_L2_INDEX(va, HPAGE_SHIFT, PAGE_SHIFT) 45 + #define SIZEOF_PTE _HV_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT) 47 46 48 47 /* 49 - * Align the vmalloc area to an L2 page table, and leave a guard page 50 - * at the beginning and end. The vmalloc code also puts in an internal 48 + * Align the vmalloc area to an L2 page table. Omit guard pages at 49 + * the beginning and end for simplicity (particularly in the per-cpu 50 + * memory allocation code). The vmalloc code puts in an internal 51 51 * guard page between each allocation. 52 52 */ 53 53 #define _VMALLOC_END HUGE_VMAP_BASE 54 - #define VMALLOC_END (_VMALLOC_END - PAGE_SIZE) 55 - #define VMALLOC_START (_VMALLOC_START + PAGE_SIZE) 54 + #define VMALLOC_END _VMALLOC_END 55 + #define VMALLOC_START _VMALLOC_START 56 56 57 57 #define HUGE_VMAP_END (HUGE_VMAP_BASE + PGDIR_SIZE) 58 58 ··· 102 98 * A pud_t points to a pmd_t array. Since we can have multiple per 103 99 * page, we don't have a one-to-one mapping of pud_t's to pages. 104 100 */ 105 - #define pud_page(pud) pfn_to_page(HV_PTFN_TO_PFN(pud_ptfn(pud))) 101 + #define pud_page(pud) pfn_to_page(PFN_DOWN(HV_PTFN_TO_CPA(pud_ptfn(pud)))) 106 102 107 103 static inline unsigned long pud_index(unsigned long address) 108 104 {
+1 -1
arch/tile/include/hv/drv_xgbe_intf.h
··· 460 460 * linux's "MAX_SKB_FRAGS", and presumably over-estimates by one, for 461 461 * our page size of exactly 65536. We add one for a "body" fragment. 462 462 */ 463 - #define LEPP_MAX_FRAGS (65536 / HV_PAGE_SIZE_SMALL + 2 + 1) 463 + #define LEPP_MAX_FRAGS (65536 / HV_DEFAULT_PAGE_SIZE_SMALL + 2 + 1) 464 464 465 465 /** Total number of bytes needed for an lepp_tso_cmd_t. */ 466 466 #define LEPP_TSO_CMD_SIZE(num_frags, header_size) \
+120 -94
arch/tile/include/hv/hypervisor.h
··· 17 17 * The hypervisor's public API. 18 18 */ 19 19 20 - #ifndef _TILE_HV_H 21 - #define _TILE_HV_H 20 + #ifndef _HV_HV_H 21 + #define _HV_HV_H 22 22 23 23 #include <arch/chip.h> 24 24 ··· 42 42 */ 43 43 #define HV_L1_SPAN (__HV_SIZE_ONE << HV_LOG2_L1_SPAN) 44 44 45 - /** The log2 of the size of small pages, in bytes. This value should 46 - * be verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL). 45 + /** The log2 of the initial size of small pages, in bytes. 46 + * See HV_DEFAULT_PAGE_SIZE_SMALL. 47 47 */ 48 - #define HV_LOG2_PAGE_SIZE_SMALL 16 48 + #define HV_LOG2_DEFAULT_PAGE_SIZE_SMALL 16 49 49 50 - /** The size of small pages, in bytes. This value should be verified 50 + /** The initial size of small pages, in bytes. This value should be verified 51 51 * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL). 52 + * It may also be modified when installing a new context. 52 53 */ 53 - #define HV_PAGE_SIZE_SMALL (__HV_SIZE_ONE << HV_LOG2_PAGE_SIZE_SMALL) 54 + #define HV_DEFAULT_PAGE_SIZE_SMALL \ 55 + (__HV_SIZE_ONE << HV_LOG2_DEFAULT_PAGE_SIZE_SMALL) 54 56 55 - /** The log2 of the size of large pages, in bytes. This value should be 56 - * verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE). 57 + /** The log2 of the initial size of large pages, in bytes. 58 + * See HV_DEFAULT_PAGE_SIZE_LARGE. 57 59 */ 58 - #define HV_LOG2_PAGE_SIZE_LARGE 24 60 + #define HV_LOG2_DEFAULT_PAGE_SIZE_LARGE 24 59 61 60 - /** The size of large pages, in bytes. This value should be verified 62 + /** The initial size of large pages, in bytes. This value should be verified 61 63 * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE). 64 + * It may also be modified when installing a new context. 62 65 */ 63 - #define HV_PAGE_SIZE_LARGE (__HV_SIZE_ONE << HV_LOG2_PAGE_SIZE_LARGE) 66 + #define HV_DEFAULT_PAGE_SIZE_LARGE \ 67 + (__HV_SIZE_ONE << HV_LOG2_DEFAULT_PAGE_SIZE_LARGE) 64 68 65 69 /** The log2 of the granularity at which page tables must be aligned; 66 70 * in other words, the CPA for a page table must have this many zero ··· 405 401 * that the temperature has hit an upper limit and is no longer being 406 402 * accurately tracked. 407 403 */ 408 - HV_SYSCONF_BOARD_TEMP = 6 404 + HV_SYSCONF_BOARD_TEMP = 6, 405 + 406 + /** Legal page size bitmask for hv_install_context(). 407 + * For example, if 16KB and 64KB small pages are supported, 408 + * it would return "HV_CTX_PG_SM_16K | HV_CTX_PG_SM_64K". 409 + */ 410 + HV_SYSCONF_VALID_PAGE_SIZES = 7, 409 411 410 412 } HV_SysconfQuery; 411 413 ··· 664 654 * new page table does not need to contain any mapping for the 665 655 * hv_install_context address itself. 666 656 * 657 + * At most one HV_CTX_PG_SM_* flag may be specified in "flags"; 658 + * if multiple flags are specified, HV_EINVAL is returned. 659 + * Specifying none of the flags results in using the default page size. 660 + * All cores participating in a given client must request the same 661 + * page size, or the results are undefined. 662 + * 667 663 * @param page_table Root of the page table. 668 664 * @param access PTE providing info on how to read the page table. This 669 665 * value must be consistent between multiple tiles sharing a page table, ··· 687 671 688 672 #define HV_CTX_DIRECTIO 0x1 /**< Direct I/O requests are accepted from 689 673 PL0. */ 674 + 675 + #define HV_CTX_PG_SM_4K 0x10 /**< Use 4K small pages, if available. */ 676 + #define HV_CTX_PG_SM_16K 0x20 /**< Use 16K small pages, if available. */ 677 + #define HV_CTX_PG_SM_64K 0x40 /**< Use 64K small pages, if available. */ 678 + #define HV_CTX_PG_SM_MASK 0xf0 /**< Mask of all possible small pages. */ 690 679 691 680 #ifndef __ASSEMBLER__ 692 681 ··· 1269 1248 * with the existing priority pages) or "red/black" (if they don't). 1270 1249 * The bitmask provides information on which parts of the cache 1271 1250 * have been used for pinned pages so far on this tile; if (1 << N) 1272 - * appears in the bitmask, that indicates that a page has been marked 1273 - * "priority" whose PFN equals N, mod 8. 1251 + * appears in the bitmask, that indicates that a 4KB region of the 1252 + * cache starting at (N * 4KB) is in use by a "priority" page. 1253 + * The portion of cache used by a particular page can be computed 1254 + * by taking the page's PA, modulo CHIP_L2_CACHE_SIZE(), and setting 1255 + * all the "4KB" bits corresponding to the actual page size. 1274 1256 * @param bitmask A bitmap of priority page set values 1275 1257 */ 1276 - void hv_set_caching(unsigned int bitmask); 1258 + void hv_set_caching(unsigned long bitmask); 1277 1259 1278 1260 1279 1261 /** Zero out a specified number of pages. ··· 1908 1884 of word */ 1909 1885 #define HV_PTE_PTFN_BITS 29 /**< Number of bits in a PTFN */ 1910 1886 1911 - /** Position of the PFN field within the PTE (subset of the PTFN). */ 1912 - #define HV_PTE_INDEX_PFN (HV_PTE_INDEX_PTFN + (HV_LOG2_PAGE_SIZE_SMALL - \ 1913 - HV_LOG2_PAGE_TABLE_ALIGN)) 1914 - 1915 - /** Length of the PFN field within the PTE (subset of the PTFN). */ 1916 - #define HV_PTE_INDEX_PFN_BITS (HV_PTE_INDEX_PTFN_BITS - \ 1917 - (HV_LOG2_PAGE_SIZE_SMALL - \ 1918 - HV_LOG2_PAGE_TABLE_ALIGN)) 1919 - 1920 1887 /* 1921 1888 * Legal values for the PTE's mode field 1922 1889 */ ··· 2260 2245 * 2261 2246 * This field contains the upper bits of the CPA (client physical 2262 2247 * address) of the target page; the complete CPA is this field with 2263 - * HV_LOG2_PAGE_SIZE_SMALL zero bits appended to it. 2248 + * HV_LOG2_PAGE_TABLE_ALIGN zero bits appended to it. 2264 2249 * 2265 - * For PTEs in a level-1 page table where the Page bit is set, the 2266 - * CPA must be aligned modulo the large page size. 2267 - */ 2268 - static __inline unsigned int 2269 - hv_pte_get_pfn(const HV_PTE pte) 2270 - { 2271 - return pte.val >> HV_PTE_INDEX_PFN; 2272 - } 2273 - 2274 - 2275 - /** Set the page frame number into a PTE. See hv_pte_get_pfn. */ 2276 - static __inline HV_PTE 2277 - hv_pte_set_pfn(HV_PTE pte, unsigned int val) 2278 - { 2279 - /* 2280 - * Note that the use of "PTFN" in the next line is intentional; we 2281 - * don't want any garbage lower bits left in that field. 2282 - */ 2283 - pte.val &= ~(((1ULL << HV_PTE_PTFN_BITS) - 1) << HV_PTE_INDEX_PTFN); 2284 - pte.val |= (__hv64) val << HV_PTE_INDEX_PFN; 2285 - return pte; 2286 - } 2287 - 2288 - /** Get the page table frame number from the PTE. 2289 - * 2290 - * This field contains the upper bits of the CPA (client physical 2291 - * address) of the target page table; the complete CPA is this field with 2292 - * with HV_PAGE_TABLE_ALIGN zero bits appended to it. 2293 - * 2294 - * For PTEs in a level-1 page table when the Page bit is not set, the 2295 - * CPA must be aligned modulo the sticter of HV_PAGE_TABLE_ALIGN and 2296 - * the level-2 page table size. 2250 + * For all PTEs in the lowest-level page table, and for all PTEs with 2251 + * the Page bit set in all page tables, the CPA must be aligned modulo 2252 + * the relevant page size. 2297 2253 */ 2298 2254 static __inline unsigned long 2299 2255 hv_pte_get_ptfn(const HV_PTE pte) 2300 2256 { 2301 2257 return pte.val >> HV_PTE_INDEX_PTFN; 2302 2258 } 2303 - 2304 2259 2305 2260 /** Set the page table frame number into a PTE. See hv_pte_get_ptfn. */ 2306 2261 static __inline HV_PTE ··· 2279 2294 pte.val &= ~(((1ULL << HV_PTE_PTFN_BITS)-1) << HV_PTE_INDEX_PTFN); 2280 2295 pte.val |= (__hv64) val << HV_PTE_INDEX_PTFN; 2281 2296 return pte; 2297 + } 2298 + 2299 + /** Get the client physical address from the PTE. See hv_pte_set_ptfn. */ 2300 + static __inline HV_PhysAddr 2301 + hv_pte_get_pa(const HV_PTE pte) 2302 + { 2303 + return (__hv64) hv_pte_get_ptfn(pte) << HV_LOG2_PAGE_TABLE_ALIGN; 2304 + } 2305 + 2306 + /** Set the client physical address into a PTE. See hv_pte_get_ptfn. */ 2307 + static __inline HV_PTE 2308 + hv_pte_set_pa(HV_PTE pte, HV_PhysAddr pa) 2309 + { 2310 + return hv_pte_set_ptfn(pte, pa >> HV_LOG2_PAGE_TABLE_ALIGN); 2282 2311 } 2283 2312 2284 2313 ··· 2330 2331 2331 2332 #endif /* !__ASSEMBLER__ */ 2332 2333 2333 - /** Converts a client physical address to a pfn. */ 2334 - #define HV_CPA_TO_PFN(p) ((p) >> HV_LOG2_PAGE_SIZE_SMALL) 2335 - 2336 - /** Converts a pfn to a client physical address. */ 2337 - #define HV_PFN_TO_CPA(p) (((HV_PhysAddr)(p)) << HV_LOG2_PAGE_SIZE_SMALL) 2338 - 2339 2334 /** Converts a client physical address to a ptfn. */ 2340 2335 #define HV_CPA_TO_PTFN(p) ((p) >> HV_LOG2_PAGE_TABLE_ALIGN) 2341 2336 2342 2337 /** Converts a ptfn to a client physical address. */ 2343 2338 #define HV_PTFN_TO_CPA(p) (((HV_PhysAddr)(p)) << HV_LOG2_PAGE_TABLE_ALIGN) 2344 2339 2345 - /** Converts a ptfn to a pfn. */ 2346 - #define HV_PTFN_TO_PFN(p) \ 2347 - ((p) >> (HV_LOG2_PAGE_SIZE_SMALL - HV_LOG2_PAGE_TABLE_ALIGN)) 2348 - 2349 - /** Converts a pfn to a ptfn. */ 2350 - #define HV_PFN_TO_PTFN(p) \ 2351 - ((p) << (HV_LOG2_PAGE_SIZE_SMALL - HV_LOG2_PAGE_TABLE_ALIGN)) 2352 - 2353 2340 #if CHIP_VA_WIDTH() > 32 2341 + 2342 + /* 2343 + * Note that we currently do not allow customizing the page size 2344 + * of the L0 pages, but fix them at 4GB, so we do not use the 2345 + * "_HV_xxx" nomenclature for the L0 macros. 2346 + */ 2354 2347 2355 2348 /** Log number of HV_PTE entries in L0 page table */ 2356 2349 #define HV_LOG2_L0_ENTRIES (CHIP_VA_WIDTH() - HV_LOG2_L1_SPAN) ··· 2373 2382 #endif /* CHIP_VA_WIDTH() > 32 */ 2374 2383 2375 2384 /** Log number of HV_PTE entries in L1 page table */ 2376 - #define HV_LOG2_L1_ENTRIES (HV_LOG2_L1_SPAN - HV_LOG2_PAGE_SIZE_LARGE) 2385 + #define _HV_LOG2_L1_ENTRIES(log2_page_size_large) \ 2386 + (HV_LOG2_L1_SPAN - log2_page_size_large) 2377 2387 2378 2388 /** Number of HV_PTE entries in L1 page table */ 2379 - #define HV_L1_ENTRIES (1 << HV_LOG2_L1_ENTRIES) 2389 + #define _HV_L1_ENTRIES(log2_page_size_large) \ 2390 + (1 << _HV_LOG2_L1_ENTRIES(log2_page_size_large)) 2380 2391 2381 2392 /** Log size of L1 page table in bytes */ 2382 - #define HV_LOG2_L1_SIZE (HV_LOG2_PTE_SIZE + HV_LOG2_L1_ENTRIES) 2393 + #define _HV_LOG2_L1_SIZE(log2_page_size_large) \ 2394 + (HV_LOG2_PTE_SIZE + _HV_LOG2_L1_ENTRIES(log2_page_size_large)) 2383 2395 2384 2396 /** Size of L1 page table in bytes */ 2385 - #define HV_L1_SIZE (1 << HV_LOG2_L1_SIZE) 2397 + #define _HV_L1_SIZE(log2_page_size_large) \ 2398 + (1 << _HV_LOG2_L1_SIZE(log2_page_size_large)) 2386 2399 2387 2400 /** Log number of HV_PTE entries in level-2 page table */ 2388 - #define HV_LOG2_L2_ENTRIES (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL) 2401 + #define _HV_LOG2_L2_ENTRIES(log2_page_size_large, log2_page_size_small) \ 2402 + (log2_page_size_large - log2_page_size_small) 2389 2403 2390 2404 /** Number of HV_PTE entries in level-2 page table */ 2391 - #define HV_L2_ENTRIES (1 << HV_LOG2_L2_ENTRIES) 2405 + #define _HV_L2_ENTRIES(log2_page_size_large, log2_page_size_small) \ 2406 + (1 << _HV_LOG2_L2_ENTRIES(log2_page_size_large, log2_page_size_small)) 2392 2407 2393 2408 /** Log size of level-2 page table in bytes */ 2394 - #define HV_LOG2_L2_SIZE (HV_LOG2_PTE_SIZE + HV_LOG2_L2_ENTRIES) 2409 + #define _HV_LOG2_L2_SIZE(log2_page_size_large, log2_page_size_small) \ 2410 + (HV_LOG2_PTE_SIZE + \ 2411 + _HV_LOG2_L2_ENTRIES(log2_page_size_large, log2_page_size_small)) 2395 2412 2396 2413 /** Size of level-2 page table in bytes */ 2397 - #define HV_L2_SIZE (1 << HV_LOG2_L2_SIZE) 2414 + #define _HV_L2_SIZE(log2_page_size_large, log2_page_size_small) \ 2415 + (1 << _HV_LOG2_L2_SIZE(log2_page_size_large, log2_page_size_small)) 2398 2416 2399 2417 #ifdef __ASSEMBLER__ 2400 2418 2401 2419 #if CHIP_VA_WIDTH() > 32 2402 2420 2403 2421 /** Index in L1 for a specific VA */ 2404 - #define HV_L1_INDEX(va) \ 2405 - (((va) >> HV_LOG2_PAGE_SIZE_LARGE) & (HV_L1_ENTRIES - 1)) 2422 + #define _HV_L1_INDEX(va, log2_page_size_large) \ 2423 + (((va) >> log2_page_size_large) & (_HV_L1_ENTRIES(log2_page_size_large) - 1)) 2406 2424 2407 2425 #else /* CHIP_VA_WIDTH() > 32 */ 2408 2426 2409 2427 /** Index in L1 for a specific VA */ 2410 - #define HV_L1_INDEX(va) \ 2411 - (((va) >> HV_LOG2_PAGE_SIZE_LARGE)) 2428 + #define _HV_L1_INDEX(va, log2_page_size_large) \ 2429 + (((va) >> log2_page_size_large)) 2412 2430 2413 2431 #endif /* CHIP_VA_WIDTH() > 32 */ 2414 2432 2415 2433 /** Index in level-2 page table for a specific VA */ 2416 - #define HV_L2_INDEX(va) \ 2417 - (((va) >> HV_LOG2_PAGE_SIZE_SMALL) & (HV_L2_ENTRIES - 1)) 2434 + #define _HV_L2_INDEX(va, log2_page_size_large, log2_page_size_small) \ 2435 + (((va) >> log2_page_size_small) & \ 2436 + (_HV_L2_ENTRIES(log2_page_size_large, log2_page_size_small) - 1)) 2418 2437 2419 2438 #else /* __ASSEMBLER __ */ 2420 2439 2421 2440 #if CHIP_VA_WIDTH() > 32 2422 2441 2423 2442 /** Index in L1 for a specific VA */ 2424 - #define HV_L1_INDEX(va) \ 2425 - (((HV_VirtAddr)(va) >> HV_LOG2_PAGE_SIZE_LARGE) & (HV_L1_ENTRIES - 1)) 2443 + #define _HV_L1_INDEX(va, log2_page_size_large) \ 2444 + (((HV_VirtAddr)(va) >> log2_page_size_large) & \ 2445 + (_HV_L1_ENTRIES(log2_page_size_large) - 1)) 2426 2446 2427 2447 #else /* CHIP_VA_WIDTH() > 32 */ 2428 2448 2429 2449 /** Index in L1 for a specific VA */ 2430 - #define HV_L1_INDEX(va) \ 2431 - (((HV_VirtAddr)(va) >> HV_LOG2_PAGE_SIZE_LARGE)) 2450 + #define _HV_L1_INDEX(va, log2_page_size_large) \ 2451 + (((HV_VirtAddr)(va) >> log2_page_size_large)) 2432 2452 2433 2453 #endif /* CHIP_VA_WIDTH() > 32 */ 2434 2454 2435 2455 /** Index in level-2 page table for a specific VA */ 2436 - #define HV_L2_INDEX(va) \ 2437 - (((HV_VirtAddr)(va) >> HV_LOG2_PAGE_SIZE_SMALL) & (HV_L2_ENTRIES - 1)) 2456 + #define _HV_L2_INDEX(va, log2_page_size_large, log2_page_size_small) \ 2457 + (((HV_VirtAddr)(va) >> log2_page_size_small) & \ 2458 + (_HV_L2_ENTRIES(log2_page_size_large, log2_page_size_small) - 1)) 2438 2459 2439 2460 #endif /* __ASSEMBLER __ */ 2440 2461 2441 - #endif /* _TILE_HV_H */ 2462 + /** Position of the PFN field within the PTE (subset of the PTFN). */ 2463 + #define _HV_PTE_INDEX_PFN(log2_page_size) \ 2464 + (HV_PTE_INDEX_PTFN + (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN)) 2465 + 2466 + /** Length of the PFN field within the PTE (subset of the PTFN). */ 2467 + #define _HV_PTE_INDEX_PFN_BITS(log2_page_size) \ 2468 + (HV_PTE_INDEX_PTFN_BITS - (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN)) 2469 + 2470 + /** Converts a client physical address to a pfn. */ 2471 + #define _HV_CPA_TO_PFN(p, log2_page_size) ((p) >> log2_page_size) 2472 + 2473 + /** Converts a pfn to a client physical address. */ 2474 + #define _HV_PFN_TO_CPA(p, log2_page_size) \ 2475 + (((HV_PhysAddr)(p)) << log2_page_size) 2476 + 2477 + /** Converts a ptfn to a pfn. */ 2478 + #define _HV_PTFN_TO_PFN(p, log2_page_size) \ 2479 + ((p) >> (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN)) 2480 + 2481 + /** Converts a pfn to a ptfn. */ 2482 + #define _HV_PFN_TO_PTFN(p, log2_page_size) \ 2483 + ((p) << (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN)) 2484 + 2485 + #endif /* _HV_HV_H */
+4 -4
arch/tile/kernel/head_32.S
··· 69 69 } 70 70 { 71 71 moveli lr, lo16(1f) 72 - move r5, zero 72 + moveli r5, CTX_PAGE_FLAG 73 73 } 74 74 { 75 75 auli lr, lr, ha16(1f) ··· 141 141 142 142 .macro PTE va, cpa, bits1, no_org=0 143 143 .ifeq \no_org 144 - .org swapper_pg_dir + HV_L1_INDEX(\va) * HV_PTE_SIZE 144 + .org swapper_pg_dir + PGD_INDEX(\va) * HV_PTE_SIZE 145 145 .endif 146 146 .word HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED | \ 147 147 (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE) 148 - .word (\bits1) | (HV_CPA_TO_PFN(\cpa) << (HV_PTE_INDEX_PFN - 32)) 148 + .word (\bits1) | (HV_CPA_TO_PTFN(\cpa) << (HV_PTE_INDEX_PTFN - 32)) 149 149 .endm 150 150 151 151 __PAGE_ALIGNED_DATA ··· 166 166 /* The true text VAs are mapped as VA = PA + MEM_SV_INTRPT */ 167 167 PTE MEM_SV_INTRPT, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \ 168 168 (1 << (HV_PTE_INDEX_EXECUTABLE - 32)) 169 - .org swapper_pg_dir + HV_L1_SIZE 169 + .org swapper_pg_dir + PGDIR_SIZE 170 170 END(swapper_pg_dir) 171 171 172 172 /*
+11 -11
arch/tile/kernel/head_64.S
··· 114 114 shl16insli r0, r0, hw0(swapper_pg_dir - PAGE_OFFSET) 115 115 } 116 116 { 117 - move r3, zero 117 + moveli r3, CTX_PAGE_FLAG 118 118 j hv_install_context 119 119 } 120 120 1: ··· 210 210 .macro PTE cpa, bits1 211 211 .quad HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED |\ 212 212 HV_PTE_GLOBAL | (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE) |\ 213 - (\bits1) | (HV_CPA_TO_PFN(\cpa) << HV_PTE_INDEX_PFN) 213 + (\bits1) | (HV_CPA_TO_PTFN(\cpa) << HV_PTE_INDEX_PTFN) 214 214 .endm 215 215 216 216 __PAGE_ALIGNED_DATA 217 217 .align PAGE_SIZE 218 218 ENTRY(swapper_pg_dir) 219 - .org swapper_pg_dir + HV_L0_INDEX(PAGE_OFFSET) * HV_PTE_SIZE 219 + .org swapper_pg_dir + PGD_INDEX(PAGE_OFFSET) * HV_PTE_SIZE 220 220 .Lsv_data_pmd: 221 221 .quad 0 /* PTE temp_data_pmd - PAGE_OFFSET, 0 */ 222 - .org swapper_pg_dir + HV_L0_INDEX(MEM_SV_START) * HV_PTE_SIZE 222 + .org swapper_pg_dir + PGD_INDEX(MEM_SV_START) * HV_PTE_SIZE 223 223 .Lsv_code_pmd: 224 224 .quad 0 /* PTE temp_code_pmd - PAGE_OFFSET, 0 */ 225 - .org swapper_pg_dir + HV_L0_SIZE 225 + .org swapper_pg_dir + SIZEOF_PGD 226 226 END(swapper_pg_dir) 227 227 228 228 .align HV_PAGE_TABLE_ALIGN ··· 233 233 * permissions later. 234 234 */ 235 235 .set addr, 0 236 - .rept HV_L1_ENTRIES 236 + .rept PTRS_PER_PMD 237 237 PTE addr, HV_PTE_READABLE | HV_PTE_WRITABLE 238 - .set addr, addr + HV_PAGE_SIZE_LARGE 238 + .set addr, addr + HPAGE_SIZE 239 239 .endr 240 - .org temp_data_pmd + HV_L1_SIZE 240 + .org temp_data_pmd + SIZEOF_PMD 241 241 END(temp_data_pmd) 242 242 243 243 .align HV_PAGE_TABLE_ALIGN ··· 248 248 * permissions later. 249 249 */ 250 250 .set addr, 0 251 - .rept HV_L1_ENTRIES 251 + .rept PTRS_PER_PMD 252 252 PTE addr, HV_PTE_READABLE | HV_PTE_EXECUTABLE 253 - .set addr, addr + HV_PAGE_SIZE_LARGE 253 + .set addr, addr + HPAGE_SIZE 254 254 .endr 255 - .org temp_code_pmd + HV_L1_SIZE 255 + .org temp_code_pmd + SIZEOF_PMD 256 256 END(temp_code_pmd) 257 257 258 258 /*
+5 -2
arch/tile/kernel/machine_kexec.c
··· 251 251 void machine_kexec(struct kimage *image) 252 252 { 253 253 void *reboot_code_buffer; 254 + pte_t *ptep; 254 255 void (*rnk)(unsigned long, void *, unsigned long) 255 256 __noreturn; 256 257 ··· 267 266 */ 268 267 homecache_change_page_home(image->control_code_page, 0, 269 268 smp_processor_id()); 270 - reboot_code_buffer = vmap(&image->control_code_page, 1, 0, 271 - __pgprot(_PAGE_KERNEL | _PAGE_EXECUTABLE)); 269 + reboot_code_buffer = page_address(image->control_code_page); 270 + BUG_ON(reboot_code_buffer == NULL); 271 + ptep = virt_to_pte(NULL, (unsigned long)reboot_code_buffer); 272 + __set_pte(ptep, pte_mkexec(*ptep)); 272 273 memcpy(reboot_code_buffer, relocate_new_kernel, 273 274 relocate_new_kernel_size); 274 275 __flush_icache_range(
+4 -4
arch/tile/kernel/setup.c
··· 1396 1396 for (i = 0; i < size; i += PAGE_SIZE, ++pfn, ++pg) { 1397 1397 1398 1398 /* Update the vmalloc mapping and page home. */ 1399 - pte_t *ptep = 1400 - virt_to_pte(NULL, (unsigned long)ptr + i); 1399 + unsigned long addr = (unsigned long)ptr + i; 1400 + pte_t *ptep = virt_to_pte(NULL, addr); 1401 1401 pte_t pte = *ptep; 1402 1402 BUG_ON(pfn != pte_pfn(pte)); 1403 1403 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3); 1404 1404 pte = set_remote_cache_cpu(pte, cpu); 1405 - set_pte(ptep, pte); 1405 + set_pte_at(&init_mm, addr, ptep, pte); 1406 1406 1407 1407 /* Update the lowmem mapping for consistency. */ 1408 1408 lowmem_va = (unsigned long)pfn_to_kaddr(pfn); ··· 1415 1415 BUG_ON(pte_huge(*ptep)); 1416 1416 } 1417 1417 BUG_ON(pfn != pte_pfn(*ptep)); 1418 - set_pte(ptep, pte); 1418 + set_pte_at(&init_mm, lowmem_va, ptep, pte); 1419 1419 } 1420 1420 } 1421 1421
+1 -1
arch/tile/kernel/smp.c
··· 203 203 if (hv_get_ipi_pte(tile, KERNEL_PL, &pte) != 0) 204 204 panic("Failed to initialize IPI for cpu %d\n", cpu); 205 205 206 - offset = hv_pte_get_pfn(pte) << PAGE_SHIFT; 206 + offset = PFN_PHYS(pte_pfn(pte)); 207 207 ipi_mappings[cpu] = ioremap_prot(offset, PAGE_SIZE, pte); 208 208 } 209 209 #endif
+4 -4
arch/tile/lib/memcpy_tile64.c
··· 160 160 break; 161 161 if (get_remote_cache_cpu(src_pte) == smp_processor_id()) 162 162 break; 163 - src_page = pfn_to_page(hv_pte_get_pfn(src_pte)); 163 + src_page = pfn_to_page(pte_pfn(src_pte)); 164 164 get_page(src_page); 165 165 if (pte_val(src_pte) != pte_val(*src_ptep)) { 166 166 put_page(src_page); ··· 168 168 } 169 169 if (pte_huge(src_pte)) { 170 170 /* Adjust the PTE to correspond to a small page */ 171 - int pfn = hv_pte_get_pfn(src_pte); 171 + int pfn = pte_pfn(src_pte); 172 172 pfn += (((unsigned long)source & (HPAGE_SIZE-1)) 173 173 >> PAGE_SHIFT); 174 174 src_pte = pfn_pte(pfn, src_pte); ··· 188 188 put_page(src_page); 189 189 break; 190 190 } 191 - dst_page = pfn_to_page(hv_pte_get_pfn(dst_pte)); 191 + dst_page = pfn_to_page(pte_pfn(dst_pte)); 192 192 if (dst_page == src_page) { 193 193 /* 194 194 * Source and dest are on the same page; this ··· 206 206 } 207 207 if (pte_huge(dst_pte)) { 208 208 /* Adjust the PTE to correspond to a small page */ 209 - int pfn = hv_pte_get_pfn(dst_pte); 209 + int pfn = pte_pfn(dst_pte); 210 210 pfn += (((unsigned long)dest & (HPAGE_SIZE-1)) 211 211 >> PAGE_SHIFT); 212 212 dst_pte = pfn_pte(pfn, dst_pte);
+3 -8
arch/tile/mm/init.c
··· 82 82 83 83 static void init_prealloc_ptes(int node, int pages) 84 84 { 85 - BUG_ON(pages & (HV_L2_ENTRIES-1)); 85 + BUG_ON(pages & (PTRS_PER_PTE - 1)); 86 86 if (pages) { 87 87 num_l2_ptes[node] = pages; 88 88 l2_ptes[node] = __alloc_bootmem(pages * sizeof(pte_t), ··· 131 131 132 132 #ifdef __tilegx__ 133 133 134 - #if HV_L1_SIZE != HV_L2_SIZE 135 - # error Rework assumption that L1 and L2 page tables are same size. 136 - #endif 137 - 138 - /* Since pmd_t arrays and pte_t arrays are the same size, just use casts. */ 139 134 static inline pmd_t *alloc_pmd(void) 140 135 { 141 - return (pmd_t *)alloc_pte(); 136 + return __alloc_bootmem(L1_KERNEL_PGTABLE_SIZE, HV_PAGE_TABLE_ALIGN, 0); 142 137 } 143 138 144 139 static inline void assign_pmd(pud_t *pud, pmd_t *pmd) ··· 806 811 * changing init_mm once we get up and running, and there's no 807 812 * need for e.g. vmalloc_sync_all(). 808 813 */ 809 - BUILD_BUG_ON(pgd_index(VMALLOC_START) != pgd_index(VMALLOC_END)); 814 + BUILD_BUG_ON(pgd_index(VMALLOC_START) != pgd_index(VMALLOC_END - 1)); 810 815 pud = pud_offset(pgd_base + pgd_index(VMALLOC_START), VMALLOC_START); 811 816 assign_pmd(pud, alloc_pmd()); 812 817 #endif
+12 -15
arch/tile/mm/pgtable.c
··· 289 289 290 290 #define L2_USER_PGTABLE_PAGES (1 << L2_USER_PGTABLE_ORDER) 291 291 292 - struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) 292 + struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address, 293 + int order) 293 294 { 294 295 gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO; 295 296 struct page *p; 296 - #if L2_USER_PGTABLE_ORDER > 0 297 297 int i; 298 - #endif 299 298 300 299 #ifdef CONFIG_HIGHPTE 301 300 flags |= __GFP_HIGHMEM; ··· 304 305 if (p == NULL) 305 306 return NULL; 306 307 307 - #if L2_USER_PGTABLE_ORDER > 0 308 308 /* 309 309 * Make every page have a page_count() of one, not just the first. 310 310 * We don't use __GFP_COMP since it doesn't look like it works 311 311 * correctly with tlb_remove_page(). 312 312 */ 313 - for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) { 313 + for (i = 1; i < order; ++i) { 314 314 init_page_count(p+i); 315 315 inc_zone_page_state(p+i, NR_PAGETABLE); 316 316 } 317 - #endif 318 317 319 318 pgtable_page_ctor(p); 320 319 return p; ··· 323 326 * process). We have to correct whatever pte_alloc_one() did before 324 327 * returning the pages to the allocator. 325 328 */ 326 - void pte_free(struct mm_struct *mm, struct page *p) 329 + void pgtable_free(struct mm_struct *mm, struct page *p, int order) 327 330 { 328 331 int i; 329 332 330 333 pgtable_page_dtor(p); 331 334 __free_page(p); 332 335 333 - for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) { 336 + for (i = 1; i < order; ++i) { 334 337 __free_page(p+i); 335 338 dec_zone_page_state(p+i, NR_PAGETABLE); 336 339 } 337 340 } 338 341 339 - void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte, 340 - unsigned long address) 342 + void __pgtable_free_tlb(struct mmu_gather *tlb, struct page *pte, 343 + unsigned long address, int order) 341 344 { 342 345 int i; 343 346 344 347 pgtable_page_dtor(pte); 345 348 tlb_remove_page(tlb, pte); 346 349 347 - for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) { 350 + for (i = 1; i < order; ++i) { 348 351 tlb_remove_page(tlb, pte + i); 349 352 dec_zone_page_state(pte + i, NR_PAGETABLE); 350 353 } ··· 487 490 /* Can this mm load a PTE with cached_priority set? */ 488 491 static inline int mm_is_priority_cached(struct mm_struct *mm) 489 492 { 490 - return mm->context.priority_cached; 493 + return mm->context.priority_cached != 0; 491 494 } 492 495 493 496 /* ··· 497 500 void start_mm_caching(struct mm_struct *mm) 498 501 { 499 502 if (!mm_is_priority_cached(mm)) { 500 - mm->context.priority_cached = -1U; 501 - hv_set_caching(-1U); 503 + mm->context.priority_cached = -1UL; 504 + hv_set_caching(-1UL); 502 505 } 503 506 } 504 507 ··· 513 516 * Presumably we'll come back later and have more luck and clear 514 517 * the value then; for now we'll just keep the cache marked for priority. 515 518 */ 516 - static unsigned int update_priority_cached(struct mm_struct *mm) 519 + static unsigned long update_priority_cached(struct mm_struct *mm) 517 520 { 518 521 if (mm->context.priority_cached && down_write_trylock(&mm->mmap_sem)) { 519 522 struct vm_area_struct *vm;