Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v4.8-rc2 169 lines 5.1 kB view raw
1#ifndef __ASM_POWERPC_MMU_CONTEXT_H 2#define __ASM_POWERPC_MMU_CONTEXT_H 3#ifdef __KERNEL__ 4 5#include <linux/kernel.h> 6#include <linux/mm.h> 7#include <linux/sched.h> 8#include <linux/spinlock.h> 9#include <asm/mmu.h> 10#include <asm/cputable.h> 11#include <asm/cputhreads.h> 12 13/* 14 * Most if the context management is out of line 15 */ 16extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 17extern void destroy_context(struct mm_struct *mm); 18#ifdef CONFIG_SPAPR_TCE_IOMMU 19struct mm_iommu_table_group_mem_t; 20 21extern bool mm_iommu_preregistered(void); 22extern long mm_iommu_get(unsigned long ua, unsigned long entries, 23 struct mm_iommu_table_group_mem_t **pmem); 24extern long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem); 25extern void mm_iommu_init(mm_context_t *ctx); 26extern void mm_iommu_cleanup(mm_context_t *ctx); 27extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua, 28 unsigned long size); 29extern struct mm_iommu_table_group_mem_t *mm_iommu_find(unsigned long ua, 30 unsigned long entries); 31extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, 32 unsigned long ua, unsigned long *hpa); 33extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); 34extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem); 35#endif 36extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm); 37extern void set_context(unsigned long id, pgd_t *pgd); 38 39#ifdef CONFIG_PPC_BOOK3S_64 40extern void radix__switch_mmu_context(struct mm_struct *prev, 41 struct mm_struct *next); 42static inline void switch_mmu_context(struct mm_struct *prev, 43 struct mm_struct *next, 44 struct task_struct *tsk) 45{ 46 if (radix_enabled()) 47 return radix__switch_mmu_context(prev, next); 48 return switch_slb(tsk, next); 49} 50 51extern int __init_new_context(void); 52extern void __destroy_context(int context_id); 53static inline void mmu_context_init(void) { } 54#else 55extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, 56 struct task_struct *tsk); 57extern unsigned long __init_new_context(void); 58extern void __destroy_context(unsigned long context_id); 59extern void mmu_context_init(void); 60#endif 61 62extern void switch_cop(struct mm_struct *next); 63extern int use_cop(unsigned long acop, struct mm_struct *mm); 64extern void drop_cop(unsigned long acop, struct mm_struct *mm); 65 66/* 67 * switch_mm is the entry point called from the architecture independent 68 * code in kernel/sched/core.c 69 */ 70static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 71 struct task_struct *tsk) 72{ 73 /* Mark this context has been used on the new CPU */ 74 cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); 75 76 /* 32-bit keeps track of the current PGDIR in the thread struct */ 77#ifdef CONFIG_PPC32 78 tsk->thread.pgdir = next->pgd; 79#endif /* CONFIG_PPC32 */ 80 81 /* 64-bit Book3E keeps track of current PGD in the PACA */ 82#ifdef CONFIG_PPC_BOOK3E_64 83 get_paca()->pgd = next->pgd; 84#endif 85 /* Nothing else to do if we aren't actually switching */ 86 if (prev == next) 87 return; 88 89#ifdef CONFIG_PPC_ICSWX 90 /* Switch coprocessor context only if prev or next uses a coprocessor */ 91 if (prev->context.acop || next->context.acop) 92 switch_cop(next); 93#endif /* CONFIG_PPC_ICSWX */ 94 95 /* We must stop all altivec streams before changing the HW 96 * context 97 */ 98#ifdef CONFIG_ALTIVEC 99 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 100 asm volatile ("dssall"); 101#endif /* CONFIG_ALTIVEC */ 102 /* 103 * The actual HW switching method differs between the various 104 * sub architectures. Out of line for now 105 */ 106 switch_mmu_context(prev, next, tsk); 107} 108 109#define deactivate_mm(tsk,mm) do { } while (0) 110 111/* 112 * After we have set current->mm to a new value, this activates 113 * the context for the new mm so we see the new mappings. 114 */ 115static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) 116{ 117 unsigned long flags; 118 119 local_irq_save(flags); 120 switch_mm(prev, next, current); 121 local_irq_restore(flags); 122} 123 124/* We don't currently use enter_lazy_tlb() for anything */ 125static inline void enter_lazy_tlb(struct mm_struct *mm, 126 struct task_struct *tsk) 127{ 128 /* 64-bit Book3E keeps track of current PGD in the PACA */ 129#ifdef CONFIG_PPC_BOOK3E_64 130 get_paca()->pgd = NULL; 131#endif 132} 133 134static inline void arch_dup_mmap(struct mm_struct *oldmm, 135 struct mm_struct *mm) 136{ 137} 138 139static inline void arch_exit_mmap(struct mm_struct *mm) 140{ 141} 142 143static inline void arch_unmap(struct mm_struct *mm, 144 struct vm_area_struct *vma, 145 unsigned long start, unsigned long end) 146{ 147 if (start <= mm->context.vdso_base && mm->context.vdso_base < end) 148 mm->context.vdso_base = 0; 149} 150 151static inline void arch_bprm_mm_init(struct mm_struct *mm, 152 struct vm_area_struct *vma) 153{ 154} 155 156static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, 157 bool write, bool execute, bool foreign) 158{ 159 /* by default, allow everything */ 160 return true; 161} 162 163static inline bool arch_pte_access_permitted(pte_t pte, bool write) 164{ 165 /* by default, allow everything */ 166 return true; 167} 168#endif /* __KERNEL__ */ 169#endif /* __ASM_POWERPC_MMU_CONTEXT_H */