Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.3-rc2 152 lines 3.5 kB view raw
1/* 2 * MMU context allocation for 64-bit kernels. 3 * 4 * Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org> 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 * 11 */ 12 13#include <linux/sched.h> 14#include <linux/kernel.h> 15#include <linux/errno.h> 16#include <linux/string.h> 17#include <linux/types.h> 18#include <linux/mm.h> 19#include <linux/spinlock.h> 20#include <linux/idr.h> 21#include <linux/export.h> 22#include <linux/gfp.h> 23#include <linux/slab.h> 24 25#include <asm/mmu_context.h> 26#include <asm/pgalloc.h> 27 28#include "icswx.h" 29 30static DEFINE_SPINLOCK(mmu_context_lock); 31static DEFINE_IDA(mmu_context_ida); 32 33int __init_new_context(void) 34{ 35 int index; 36 int err; 37 38again: 39 if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL)) 40 return -ENOMEM; 41 42 spin_lock(&mmu_context_lock); 43 err = ida_get_new_above(&mmu_context_ida, 1, &index); 44 spin_unlock(&mmu_context_lock); 45 46 if (err == -EAGAIN) 47 goto again; 48 else if (err) 49 return err; 50 51 if (index > MAX_USER_CONTEXT) { 52 spin_lock(&mmu_context_lock); 53 ida_remove(&mmu_context_ida, index); 54 spin_unlock(&mmu_context_lock); 55 return -ENOMEM; 56 } 57 58 return index; 59} 60EXPORT_SYMBOL_GPL(__init_new_context); 61 62int init_new_context(struct task_struct *tsk, struct mm_struct *mm) 63{ 64 int index; 65 66 index = __init_new_context(); 67 if (index < 0) 68 return index; 69 70 /* The old code would re-promote on fork, we don't do that 71 * when using slices as it could cause problem promoting slices 72 * that have been forced down to 4K 73 */ 74 if (slice_mm_new_context(mm)) 75 slice_set_user_psize(mm, mmu_virtual_psize); 76 subpage_prot_init_new_context(mm); 77 mm->context.id = index; 78#ifdef CONFIG_PPC_ICSWX 79 mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL); 80 if (!mm->context.cop_lockp) { 81 __destroy_context(index); 82 subpage_prot_free(mm); 83 mm->context.id = MMU_NO_CONTEXT; 84 return -ENOMEM; 85 } 86 spin_lock_init(mm->context.cop_lockp); 87#endif /* CONFIG_PPC_ICSWX */ 88 89#ifdef CONFIG_PPC_64K_PAGES 90 mm->context.pte_frag = NULL; 91#endif 92#ifdef CONFIG_SPAPR_TCE_IOMMU 93 mm_iommu_init(&mm->context); 94#endif 95 return 0; 96} 97 98void __destroy_context(int context_id) 99{ 100 spin_lock(&mmu_context_lock); 101 ida_remove(&mmu_context_ida, context_id); 102 spin_unlock(&mmu_context_lock); 103} 104EXPORT_SYMBOL_GPL(__destroy_context); 105 106#ifdef CONFIG_PPC_64K_PAGES 107static void destroy_pagetable_page(struct mm_struct *mm) 108{ 109 int count; 110 void *pte_frag; 111 struct page *page; 112 113 pte_frag = mm->context.pte_frag; 114 if (!pte_frag) 115 return; 116 117 page = virt_to_page(pte_frag); 118 /* drop all the pending references */ 119 count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT; 120 /* We allow PTE_FRAG_NR fragments from a PTE page */ 121 count = atomic_sub_return(PTE_FRAG_NR - count, &page->_count); 122 if (!count) { 123 pgtable_page_dtor(page); 124 free_hot_cold_page(page, 0); 125 } 126} 127 128#else 129static inline void destroy_pagetable_page(struct mm_struct *mm) 130{ 131 return; 132} 133#endif 134 135 136void destroy_context(struct mm_struct *mm) 137{ 138#ifdef CONFIG_SPAPR_TCE_IOMMU 139 mm_iommu_cleanup(&mm->context); 140#endif 141 142#ifdef CONFIG_PPC_ICSWX 143 drop_cop(mm->context.acop, mm); 144 kfree(mm->context.cop_lockp); 145 mm->context.cop_lockp = NULL; 146#endif /* CONFIG_PPC_ICSWX */ 147 148 destroy_pagetable_page(mm); 149 __destroy_context(mm->context.id); 150 subpage_prot_free(mm); 151 mm->context.id = MMU_NO_CONTEXT; 152}