Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.9-rc2 190 lines 4.5 kB view raw
1/* 2 * MMU context allocation for 64-bit kernels. 3 * 4 * Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org> 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 * 11 */ 12 13#include <linux/sched.h> 14#include <linux/kernel.h> 15#include <linux/errno.h> 16#include <linux/string.h> 17#include <linux/types.h> 18#include <linux/mm.h> 19#include <linux/spinlock.h> 20#include <linux/idr.h> 21#include <linux/export.h> 22#include <linux/gfp.h> 23#include <linux/slab.h> 24 25#include <asm/mmu_context.h> 26#include <asm/pgalloc.h> 27 28#include "icswx.h" 29 30static DEFINE_SPINLOCK(mmu_context_lock); 31static DEFINE_IDA(mmu_context_ida); 32 33int __init_new_context(void) 34{ 35 int index; 36 int err; 37 38again: 39 if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL)) 40 return -ENOMEM; 41 42 spin_lock(&mmu_context_lock); 43 err = ida_get_new_above(&mmu_context_ida, 1, &index); 44 spin_unlock(&mmu_context_lock); 45 46 if (err == -EAGAIN) 47 goto again; 48 else if (err) 49 return err; 50 51 if (index > MAX_USER_CONTEXT) { 52 spin_lock(&mmu_context_lock); 53 ida_remove(&mmu_context_ida, index); 54 spin_unlock(&mmu_context_lock); 55 return -ENOMEM; 56 } 57 58 return index; 59} 60EXPORT_SYMBOL_GPL(__init_new_context); 61static int radix__init_new_context(struct mm_struct *mm, int index) 62{ 63 unsigned long rts_field; 64 65 /* 66 * set the process table entry, 67 */ 68 rts_field = radix__get_tree_size(); 69 process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE); 70 return 0; 71} 72 73int init_new_context(struct task_struct *tsk, struct mm_struct *mm) 74{ 75 int index; 76 77 index = __init_new_context(); 78 if (index < 0) 79 return index; 80 81 if (radix_enabled()) { 82 radix__init_new_context(mm, index); 83 } else { 84 85 /* The old code would re-promote on fork, we don't do that 86 * when using slices as it could cause problem promoting slices 87 * that have been forced down to 4K 88 * 89 * For book3s we have MMU_NO_CONTEXT set to be ~0. Hence check 90 * explicitly against context.id == 0. This ensures that we 91 * properly initialize context slice details for newly allocated 92 * mm's (which will have id == 0) and don't alter context slice 93 * inherited via fork (which will have id != 0). 94 * 95 * We should not be calling init_new_context() on init_mm. Hence a 96 * check against 0 is ok. 97 */ 98 if (mm->context.id == 0) 99 slice_set_user_psize(mm, mmu_virtual_psize); 100 subpage_prot_init_new_context(mm); 101 } 102 mm->context.id = index; 103#ifdef CONFIG_PPC_ICSWX 104 mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL); 105 if (!mm->context.cop_lockp) { 106 __destroy_context(index); 107 subpage_prot_free(mm); 108 mm->context.id = MMU_NO_CONTEXT; 109 return -ENOMEM; 110 } 111 spin_lock_init(mm->context.cop_lockp); 112#endif /* CONFIG_PPC_ICSWX */ 113 114#ifdef CONFIG_PPC_64K_PAGES 115 mm->context.pte_frag = NULL; 116#endif 117#ifdef CONFIG_SPAPR_TCE_IOMMU 118 mm_iommu_init(&mm->context); 119#endif 120 return 0; 121} 122 123void __destroy_context(int context_id) 124{ 125 spin_lock(&mmu_context_lock); 126 ida_remove(&mmu_context_ida, context_id); 127 spin_unlock(&mmu_context_lock); 128} 129EXPORT_SYMBOL_GPL(__destroy_context); 130 131#ifdef CONFIG_PPC_64K_PAGES 132static void destroy_pagetable_page(struct mm_struct *mm) 133{ 134 int count; 135 void *pte_frag; 136 struct page *page; 137 138 pte_frag = mm->context.pte_frag; 139 if (!pte_frag) 140 return; 141 142 page = virt_to_page(pte_frag); 143 /* drop all the pending references */ 144 count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT; 145 /* We allow PTE_FRAG_NR fragments from a PTE page */ 146 if (page_ref_sub_and_test(page, PTE_FRAG_NR - count)) { 147 pgtable_page_dtor(page); 148 free_hot_cold_page(page, 0); 149 } 150} 151 152#else 153static inline void destroy_pagetable_page(struct mm_struct *mm) 154{ 155 return; 156} 157#endif 158 159 160void destroy_context(struct mm_struct *mm) 161{ 162#ifdef CONFIG_SPAPR_TCE_IOMMU 163 mm_iommu_cleanup(&mm->context); 164#endif 165 166#ifdef CONFIG_PPC_ICSWX 167 drop_cop(mm->context.acop, mm); 168 kfree(mm->context.cop_lockp); 169 mm->context.cop_lockp = NULL; 170#endif /* CONFIG_PPC_ICSWX */ 171 172 if (radix_enabled()) 173 process_tb[mm->context.id].prtb1 = 0; 174 else 175 subpage_prot_free(mm); 176 destroy_pagetable_page(mm); 177 __destroy_context(mm->context.id); 178 mm->context.id = MMU_NO_CONTEXT; 179} 180 181#ifdef CONFIG_PPC_RADIX_MMU 182void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) 183{ 184 asm volatile("isync": : :"memory"); 185 mtspr(SPRN_PID, next->context.id); 186 asm volatile("isync \n" 187 PPC_SLBIA(0x7) 188 : : :"memory"); 189} 190#endif