Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/8xx: reduce pressure on TLB due to context switches

For nohash powerpc, when we run out of contexts, contexts are freed by stealing
used contexts in-turn. When a victim has been selected, the associated TLB
entries are freed using _tlbil_pid(). Unfortunatly, on the PPC 8xx, _tlbil_pid()
does a tlbia, hence flushes ALL TLB entries and not only the one linked to the
stolen context. Therefore, as implented today, at each task switch requiring a
new context, all entries are flushed.

This patch modifies the implementation so that when running out of contexts, all
contexts get freed at once, hence dividing the number of calls to tlbia by 16.

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Scott Wood <scottwood@freescale.com>

authored by

LEROY Christophe and committed by
Scott Wood
debddd95 7f93c9d9

+42 -1
+42 -1
arch/powerpc/mm/mmu_context_nohash.c
··· 52 52 #include <asm/mmu_context.h> 53 53 #include <asm/tlbflush.h> 54 54 55 + #include "mmu_decl.h" 56 + 55 57 static unsigned int first_context, last_context; 56 58 static unsigned int next_context, nr_free_contexts; 57 59 static unsigned long *context_map; 58 60 static unsigned long *stale_map[NR_CPUS]; 59 61 static struct mm_struct **context_mm; 60 62 static DEFINE_RAW_SPINLOCK(context_lock); 63 + static bool no_selective_tlbil; 61 64 62 65 #define CTX_MAP_SIZE \ 63 66 (sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1)) ··· 135 132 return MMU_NO_CONTEXT; 136 133 } 137 134 #endif /* CONFIG_SMP */ 135 + 136 + static unsigned int steal_all_contexts(void) 137 + { 138 + struct mm_struct *mm; 139 + int cpu = smp_processor_id(); 140 + unsigned int id; 141 + 142 + for (id = first_context; id <= last_context; id++) { 143 + /* Pick up the victim mm */ 144 + mm = context_mm[id]; 145 + 146 + pr_hardcont(" | steal %d from 0x%p", id, mm); 147 + 148 + /* Mark this mm as having no context anymore */ 149 + mm->context.id = MMU_NO_CONTEXT; 150 + if (id != first_context) { 151 + context_mm[id] = NULL; 152 + __clear_bit(id, context_map); 153 + #ifdef DEBUG_MAP_CONSISTENCY 154 + mm->context.active = 0; 155 + #endif 156 + } 157 + __clear_bit(id, stale_map[cpu]); 158 + } 159 + 160 + /* Flush the TLB for all contexts (not to be used on SMP) */ 161 + _tlbil_all(); 162 + 163 + nr_free_contexts = last_context - first_context; 164 + 165 + return first_context; 166 + } 138 167 139 168 /* Note that this will also be called on SMP if all other CPUs are 140 169 * offlined, which means that it may be called for cpu != 0. For ··· 276 241 goto stolen; 277 242 } 278 243 #endif /* CONFIG_SMP */ 279 - id = steal_context_up(id); 244 + if (no_selective_tlbil) 245 + id = steal_all_contexts(); 246 + else 247 + id = steal_context_up(id); 280 248 goto stolen; 281 249 } 282 250 nr_free_contexts--; ··· 445 407 if (mmu_has_feature(MMU_FTR_TYPE_8xx)) { 446 408 first_context = 0; 447 409 last_context = 15; 410 + no_selective_tlbil = true; 448 411 } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) { 449 412 first_context = 1; 450 413 last_context = 65535; 414 + no_selective_tlbil = false; 451 415 } else { 452 416 first_context = 1; 453 417 last_context = 255; 418 + no_selective_tlbil = false; 454 419 } 455 420 456 421 #ifdef DEBUG_CLAMP_LAST_CONTEXT