Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] mm: tlb_is_full_mm was obscure

tlb_is_full_mm? What does that mean? The TLB is full? No, it means that the
mm's last user has gone and the whole mm is being torn down. And it's an
inline function because sparc64 uses a different (slightly better)
"tlb_frozen" name for the flag others call "fullmm".

And now the ptep_get_and_clear_full macro used in zap_pte_range refers
directly to tlb->fullmm, which would be wrong for sparc64. Rather than
correct that, I'd prefer to scrap tlb_is_full_mm altogether, and change
sparc64 to just use the same poor name as everyone else - is that okay?

Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Hugh Dickins and committed by
Linus Torvalds
4d6ddfa9 15a23ffa

+8 -37
+2 -2
arch/sparc64/mm/tlb.c
··· 72 72 73 73 no_cache_flush: 74 74 75 - if (mp->tlb_frozen) 75 + if (mp->fullmm) 76 76 return; 77 77 78 78 nr = mp->tlb_nr; ··· 97 97 unsigned long nr = mp->tlb_nr; 98 98 long s = start, e = end, vpte_base; 99 99 100 - if (mp->tlb_frozen) 100 + if (mp->fullmm) 101 101 return; 102 102 103 103 /* If start is greater than end, that is a real problem. */
-5
include/asm-arm/tlb.h
··· 68 68 put_cpu_var(mmu_gathers); 69 69 } 70 70 71 - static inline unsigned int tlb_is_full_mm(struct mmu_gather *tlb) 72 - { 73 - return tlb->fullmm; 74 - } 75 - 76 71 #define tlb_remove_tlb_entry(tlb,ptep,address) do { } while (0) 77 72 78 73 /*
-7
include/asm-arm26/tlb.h
··· 55 55 put_cpu_var(mmu_gathers); 56 56 } 57 57 58 - 59 - static inline unsigned int 60 - tlb_is_full_mm(struct mmu_gather *tlb) 61 - { 62 - return tlb->fullmm; 63 - } 64 - 65 58 #define tlb_remove_tlb_entry(tlb,ptep,address) do { } while (0) 66 59 //#define tlb_start_vma(tlb,vma) do { } while (0) 67 60 //FIXME - ARM32 uses this now that things changed in the kernel. seems like it may be pointless on arm26, however to get things compiling...
-6
include/asm-generic/tlb.h
··· 103 103 put_cpu_var(mmu_gathers); 104 104 } 105 105 106 - static inline unsigned int 107 - tlb_is_full_mm(struct mmu_gather *tlb) 108 - { 109 - return tlb->fullmm; 110 - } 111 - 112 106 /* tlb_remove_page 113 107 * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while 114 108 * handling the additional races in SMP caused by other CPUs caching valid
-6
include/asm-ia64/tlb.h
··· 178 178 put_cpu_var(mmu_gathers); 179 179 } 180 180 181 - static inline unsigned int 182 - tlb_is_full_mm(struct mmu_gather *tlb) 183 - { 184 - return tlb->fullmm; 185 - } 186 - 187 181 /* 188 182 * Logically, this routine frees PAGE. On MP machines, the actual freeing of the page 189 183 * must be delayed until after the TLB has been flushed (see comments at the beginning of
+4 -9
include/asm-sparc64/tlb.h
··· 25 25 struct mm_struct *mm; 26 26 unsigned int pages_nr; 27 27 unsigned int need_flush; 28 - unsigned int tlb_frozen; 28 + unsigned int fullmm; 29 29 unsigned int tlb_nr; 30 30 unsigned long freed; 31 31 unsigned long vaddrs[TLB_BATCH_NR]; ··· 50 50 51 51 mp->mm = mm; 52 52 mp->pages_nr = num_online_cpus() > 1 ? 0U : ~0U; 53 - mp->tlb_frozen = full_mm_flush; 53 + mp->fullmm = full_mm_flush; 54 54 mp->freed = 0; 55 55 56 56 return mp; ··· 88 88 89 89 tlb_flush_mmu(mp); 90 90 91 - if (mp->tlb_frozen) { 91 + if (mp->fullmm) { 92 92 if (CTX_VALID(mm->context)) 93 93 do_flush_tlb_mm(mm); 94 - mp->tlb_frozen = 0; 94 + mp->fullmm = 0; 95 95 } else 96 96 flush_tlb_pending(); 97 97 ··· 99 99 check_pgt_cache(); 100 100 101 101 put_cpu_var(mmu_gathers); 102 - } 103 - 104 - static inline unsigned int tlb_is_full_mm(struct mmu_gather *mp) 105 - { 106 - return mp->tlb_frozen; 107 102 } 108 103 109 104 static inline void tlb_remove_page(struct mmu_gather *mp, struct page *page)
+2 -2
mm/memory.c
··· 249 249 free_pud_range(*tlb, pgd, addr, next, floor, ceiling); 250 250 } while (pgd++, addr = next, addr != end); 251 251 252 - if (!tlb_is_full_mm(*tlb)) 252 + if (!(*tlb)->fullmm) 253 253 flush_tlb_pgtables((*tlb)->mm, start, end); 254 254 } 255 255 ··· 698 698 int tlb_start_valid = 0; 699 699 unsigned long start = start_addr; 700 700 spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL; 701 - int fullmm = tlb_is_full_mm(*tlbp); 701 + int fullmm = (*tlbp)->fullmm; 702 702 703 703 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) { 704 704 unsigned long end;