at v3.3 172 lines 4.3 kB view raw
1/* 2 * linux/arch/arm/mm/context.c 3 * 4 * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10#include <linux/init.h> 11#include <linux/sched.h> 12#include <linux/mm.h> 13#include <linux/smp.h> 14#include <linux/percpu.h> 15 16#include <asm/mmu_context.h> 17#include <asm/tlbflush.h> 18 19static DEFINE_RAW_SPINLOCK(cpu_asid_lock); 20unsigned int cpu_last_asid = ASID_FIRST_VERSION; 21#ifdef CONFIG_SMP 22DEFINE_PER_CPU(struct mm_struct *, current_mm); 23#endif 24 25#ifdef CONFIG_ARM_LPAE 26#define cpu_set_asid(asid) { \ 27 unsigned long ttbl, ttbh; \ 28 asm volatile( \ 29 " mrrc p15, 0, %0, %1, c2 @ read TTBR0\n" \ 30 " mov %1, %2, lsl #(48 - 32) @ set ASID\n" \ 31 " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n" \ 32 : "=&r" (ttbl), "=&r" (ttbh) \ 33 : "r" (asid & ~ASID_MASK)); \ 34} 35#else 36#define cpu_set_asid(asid) \ 37 asm(" mcr p15, 0, %0, c13, c0, 1\n" : : "r" (asid)) 38#endif 39 40/* 41 * We fork()ed a process, and we need a new context for the child 42 * to run in. We reserve version 0 for initial tasks so we will 43 * always allocate an ASID. The ASID 0 is reserved for the TTBR 44 * register changing sequence. 45 */ 46void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) 47{ 48 mm->context.id = 0; 49 raw_spin_lock_init(&mm->context.id_lock); 50} 51 52static void flush_context(void) 53{ 54 /* set the reserved ASID before flushing the TLB */ 55 cpu_set_asid(0); 56 isb(); 57 local_flush_tlb_all(); 58 if (icache_is_vivt_asid_tagged()) { 59 __flush_icache_all(); 60 dsb(); 61 } 62} 63 64#ifdef CONFIG_SMP 65 66static void set_mm_context(struct mm_struct *mm, unsigned int asid) 67{ 68 unsigned long flags; 69 70 /* 71 * Locking needed for multi-threaded applications where the 72 * same mm->context.id could be set from different CPUs during 73 * the broadcast. This function is also called via IPI so the 74 * mm->context.id_lock has to be IRQ-safe. 75 */ 76 raw_spin_lock_irqsave(&mm->context.id_lock, flags); 77 if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) { 78 /* 79 * Old version of ASID found. Set the new one and 80 * reset mm_cpumask(mm). 81 */ 82 mm->context.id = asid; 83 cpumask_clear(mm_cpumask(mm)); 84 } 85 raw_spin_unlock_irqrestore(&mm->context.id_lock, flags); 86 87 /* 88 * Set the mm_cpumask(mm) bit for the current CPU. 89 */ 90 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); 91} 92 93/* 94 * Reset the ASID on the current CPU. This function call is broadcast 95 * from the CPU handling the ASID rollover and holding cpu_asid_lock. 96 */ 97static void reset_context(void *info) 98{ 99 unsigned int asid; 100 unsigned int cpu = smp_processor_id(); 101 struct mm_struct *mm = per_cpu(current_mm, cpu); 102 103 /* 104 * Check if a current_mm was set on this CPU as it might still 105 * be in the early booting stages and using the reserved ASID. 106 */ 107 if (!mm) 108 return; 109 110 smp_rmb(); 111 asid = cpu_last_asid + cpu + 1; 112 113 flush_context(); 114 set_mm_context(mm, asid); 115 116 /* set the new ASID */ 117 cpu_set_asid(mm->context.id); 118 isb(); 119} 120 121#else 122 123static inline void set_mm_context(struct mm_struct *mm, unsigned int asid) 124{ 125 mm->context.id = asid; 126 cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); 127} 128 129#endif 130 131void __new_context(struct mm_struct *mm) 132{ 133 unsigned int asid; 134 135 raw_spin_lock(&cpu_asid_lock); 136#ifdef CONFIG_SMP 137 /* 138 * Check the ASID again, in case the change was broadcast from 139 * another CPU before we acquired the lock. 140 */ 141 if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) { 142 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); 143 raw_spin_unlock(&cpu_asid_lock); 144 return; 145 } 146#endif 147 /* 148 * At this point, it is guaranteed that the current mm (with 149 * an old ASID) isn't active on any other CPU since the ASIDs 150 * are changed simultaneously via IPI. 151 */ 152 asid = ++cpu_last_asid; 153 if (asid == 0) 154 asid = cpu_last_asid = ASID_FIRST_VERSION; 155 156 /* 157 * If we've used up all our ASIDs, we need 158 * to start a new version and flush the TLB. 159 */ 160 if (unlikely((asid & ~ASID_MASK) == 0)) { 161 asid = cpu_last_asid + smp_processor_id() + 1; 162 flush_context(); 163#ifdef CONFIG_SMP 164 smp_wmb(); 165 smp_call_function(reset_context, NULL, 1); 166#endif 167 cpu_last_asid += NR_CPUS; 168 } 169 170 set_mm_context(mm, asid); 171 raw_spin_unlock(&cpu_asid_lock); 172}