Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARM: 5905/1: ARM: Global ASID allocation on SMP

The current ASID allocation algorithm doesn't ensure the notification
of the other CPUs when the ASID rolls over. This may lead to two
processes using the same ASID (but different generation) or multiple
threads of the same process using different ASIDs.

This patch adds the broadcasting of the ASID rollover event to the
other CPUs. To avoid a race on multiple CPUs modifying "cpu_last_asid"
during the handling of the broadcast, the ASID numbering now starts at
"smp_processor_id() + 1". At rollover, the cpu_last_asid will be set
to NR_CPUS.

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

authored by

Catalin Marinas and committed by
Russell King
11805bcf 48ab7e09

+126 -14
+1
arch/arm/include/asm/mmu.h
··· 6 6 typedef struct { 7 7 #ifdef CONFIG_CPU_HAS_ASID 8 8 unsigned int id; 9 + spinlock_t id_lock; 9 10 #endif 10 11 unsigned int kvm_seq; 11 12 } mm_context_t;
+15
arch/arm/include/asm/mmu_context.h
··· 43 43 #define ASID_FIRST_VERSION (1 << ASID_BITS) 44 44 45 45 extern unsigned int cpu_last_asid; 46 + #ifdef CONFIG_SMP 47 + DECLARE_PER_CPU(struct mm_struct *, current_mm); 48 + #endif 46 49 47 50 void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); 48 51 void __new_context(struct mm_struct *mm); 49 52 50 53 static inline void check_context(struct mm_struct *mm) 51 54 { 55 + /* 56 + * This code is executed with interrupts enabled. Therefore, 57 + * mm->context.id cannot be updated to the latest ASID version 58 + * on a different CPU (and condition below not triggered) 59 + * without first getting an IPI to reset the context. The 60 + * alternative is to take a read_lock on mm->context.id_lock 61 + * (after changing its type to rwlock_t). 62 + */ 52 63 if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) 53 64 __new_context(mm); 54 65 ··· 119 108 __flush_icache_all(); 120 109 #endif 121 110 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { 111 + #ifdef CONFIG_SMP 112 + struct mm_struct **crt_mm = &per_cpu(current_mm, cpu); 113 + *crt_mm = next; 114 + #endif 122 115 check_context(next); 123 116 cpu_switch_mm(next->pgd, next); 124 117 if (cache_is_vivt())
+110 -14
arch/arm/mm/context.c
··· 10 10 #include <linux/init.h> 11 11 #include <linux/sched.h> 12 12 #include <linux/mm.h> 13 + #include <linux/smp.h> 14 + #include <linux/percpu.h> 13 15 14 16 #include <asm/mmu_context.h> 15 17 #include <asm/tlbflush.h> 16 18 17 19 static DEFINE_SPINLOCK(cpu_asid_lock); 18 20 unsigned int cpu_last_asid = ASID_FIRST_VERSION; 21 + #ifdef CONFIG_SMP 22 + DEFINE_PER_CPU(struct mm_struct *, current_mm); 23 + #endif 19 24 20 25 /* 21 26 * We fork()ed a process, and we need a new context for the child ··· 31 26 void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) 32 27 { 33 28 mm->context.id = 0; 29 + spin_lock_init(&mm->context.id_lock); 34 30 } 31 + 32 + static void flush_context(void) 33 + { 34 + /* set the reserved ASID before flushing the TLB */ 35 + asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (0)); 36 + isb(); 37 + local_flush_tlb_all(); 38 + if (icache_is_vivt_asid_tagged()) { 39 + __flush_icache_all(); 40 + dsb(); 41 + } 42 + } 43 + 44 + #ifdef CONFIG_SMP 45 + 46 + static void set_mm_context(struct mm_struct *mm, unsigned int asid) 47 + { 48 + unsigned long flags; 49 + 50 + /* 51 + * Locking needed for multi-threaded applications where the 52 + * same mm->context.id could be set from different CPUs during 53 + * the broadcast. This function is also called via IPI so the 54 + * mm->context.id_lock has to be IRQ-safe. 55 + */ 56 + spin_lock_irqsave(&mm->context.id_lock, flags); 57 + if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) { 58 + /* 59 + * Old version of ASID found. Set the new one and 60 + * reset mm_cpumask(mm). 61 + */ 62 + mm->context.id = asid; 63 + cpumask_clear(mm_cpumask(mm)); 64 + } 65 + spin_unlock_irqrestore(&mm->context.id_lock, flags); 66 + 67 + /* 68 + * Set the mm_cpumask(mm) bit for the current CPU. 69 + */ 70 + cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); 71 + } 72 + 73 + /* 74 + * Reset the ASID on the current CPU. This function call is broadcast 75 + * from the CPU handling the ASID rollover and holding cpu_asid_lock. 76 + */ 77 + static void reset_context(void *info) 78 + { 79 + unsigned int asid; 80 + unsigned int cpu = smp_processor_id(); 81 + struct mm_struct *mm = per_cpu(current_mm, cpu); 82 + 83 + /* 84 + * Check if a current_mm was set on this CPU as it might still 85 + * be in the early booting stages and using the reserved ASID. 86 + */ 87 + if (!mm) 88 + return; 89 + 90 + smp_rmb(); 91 + asid = cpu_last_asid + cpu + 1; 92 + 93 + flush_context(); 94 + set_mm_context(mm, asid); 95 + 96 + /* set the new ASID */ 97 + asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (mm->context.id)); 98 + isb(); 99 + } 100 + 101 + #else 102 + 103 + static inline void set_mm_context(struct mm_struct *mm, unsigned int asid) 104 + { 105 + mm->context.id = asid; 106 + cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); 107 + } 108 + 109 + #endif 35 110 36 111 void __new_context(struct mm_struct *mm) 37 112 { 38 113 unsigned int asid; 39 114 40 115 spin_lock(&cpu_asid_lock); 116 + #ifdef CONFIG_SMP 117 + /* 118 + * Check the ASID again, in case the change was broadcast from 119 + * another CPU before we acquired the lock. 120 + */ 121 + if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) { 122 + cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); 123 + spin_unlock(&cpu_asid_lock); 124 + return; 125 + } 126 + #endif 127 + /* 128 + * At this point, it is guaranteed that the current mm (with 129 + * an old ASID) isn't active on any other CPU since the ASIDs 130 + * are changed simultaneously via IPI. 131 + */ 41 132 asid = ++cpu_last_asid; 42 133 if (asid == 0) 43 134 asid = cpu_last_asid = ASID_FIRST_VERSION; ··· 143 42 * to start a new version and flush the TLB. 144 43 */ 145 44 if (unlikely((asid & ~ASID_MASK) == 0)) { 146 - asid = ++cpu_last_asid; 147 - /* set the reserved ASID before flushing the TLB */ 148 - asm("mcr p15, 0, %0, c13, c0, 1 @ set reserved context ID\n" 149 - : 150 - : "r" (0)); 151 - isb(); 152 - flush_tlb_all(); 153 - if (icache_is_vivt_asid_tagged()) { 154 - __flush_icache_all(); 155 - dsb(); 156 - } 45 + asid = cpu_last_asid + smp_processor_id() + 1; 46 + flush_context(); 47 + #ifdef CONFIG_SMP 48 + smp_wmb(); 49 + smp_call_function(reset_context, NULL, 1); 50 + #endif 51 + cpu_last_asid += NR_CPUS; 157 52 } 158 - spin_unlock(&cpu_asid_lock); 159 53 160 - cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); 161 - mm->context.id = asid; 54 + set_mm_context(mm, asid); 55 + spin_unlock(&cpu_asid_lock); 162 56 }