Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ia64: mmu_gather rework

Fix up the ia64 mmu_gather code to conform to the new API.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Tony Luck <tony.luck@intel.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: David Miller <davem@davemloft.net>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Namhyung Kim <namhyung@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Peter Zijlstra and committed by
Linus Torvalds
7a95a2c8 1e56a564

+46 -20
+46 -20
arch/ia64/include/asm/tlb.h
··· 47 47 #include <asm/machvec.h> 48 48 49 49 #ifdef CONFIG_SMP 50 - # define FREE_PTE_NR 2048 51 50 # define tlb_fast_mode(tlb) ((tlb)->nr == ~0U) 52 51 #else 53 - # define FREE_PTE_NR 0 54 52 # define tlb_fast_mode(tlb) (1) 55 53 #endif 54 + 55 + /* 56 + * If we can't allocate a page to make a big batch of page pointers 57 + * to work on, then just handle a few from the on-stack structure. 58 + */ 59 + #define IA64_GATHER_BUNDLE 8 56 60 57 61 struct mmu_gather { 58 62 struct mm_struct *mm; 59 63 unsigned int nr; /* == ~0U => fast mode */ 64 + unsigned int max; 60 65 unsigned char fullmm; /* non-zero means full mm flush */ 61 66 unsigned char need_flush; /* really unmapped some PTEs? */ 62 67 unsigned long start_addr; 63 68 unsigned long end_addr; 64 - struct page *pages[FREE_PTE_NR]; 69 + struct page **pages; 70 + struct page *local[IA64_GATHER_BUNDLE]; 65 71 }; 66 72 67 73 struct ia64_tr_entry { ··· 95 89 #define RR_PS_SHIFT 2 96 90 #define RR_RID_MASK 0x00000000ffffff00L 97 91 #define RR_TO_RID(val) ((val >> 8) & 0xffffff) 98 - 99 - /* Users of the generic TLB shootdown code must declare this storage space. */ 100 - DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); 101 92 102 93 /* 103 94 * Flush the TLB for address range START to END and, if not in fast mode, release the ··· 150 147 } 151 148 } 152 149 153 - /* 154 - * Return a pointer to an initialized struct mmu_gather. 155 - */ 156 - static inline struct mmu_gather * 157 - tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush) 150 + static inline void __tlb_alloc_page(struct mmu_gather *tlb) 158 151 { 159 - struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); 152 + unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); 160 153 154 + if (addr) { 155 + tlb->pages = (void *)addr; 156 + tlb->max = PAGE_SIZE / sizeof(void *); 157 + } 158 + } 159 + 160 + 161 + static inline void 162 + tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) 163 + { 161 164 tlb->mm = mm; 165 + tlb->max = ARRAY_SIZE(tlb->local); 166 + tlb->pages = tlb->local; 162 167 /* 163 168 * Use fast mode if only 1 CPU is online. 164 169 * ··· 183 172 tlb->nr = (num_online_cpus() == 1) ? ~0U : 0; 184 173 tlb->fullmm = full_mm_flush; 185 174 tlb->start_addr = ~0UL; 186 - return tlb; 187 175 } 188 176 189 177 /* ··· 190 180 * collected. 191 181 */ 192 182 static inline void 193 - tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) 183 + tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) 194 184 { 195 185 /* 196 186 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and ··· 201 191 /* keep the page table cache within bounds */ 202 192 check_pgt_cache(); 203 193 204 - put_cpu_var(mmu_gathers); 194 + if (tlb->pages != tlb->local) 195 + free_pages((unsigned long)tlb->pages, 0); 205 196 } 206 197 207 198 /* ··· 210 199 * must be delayed until after the TLB has been flushed (see comments at the beginning of 211 200 * this file). 212 201 */ 213 - static inline void 214 - tlb_remove_page (struct mmu_gather *tlb, struct page *page) 202 + static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) 215 203 { 216 204 tlb->need_flush = 1; 217 205 218 206 if (tlb_fast_mode(tlb)) { 219 207 free_page_and_swap_cache(page); 220 - return; 208 + return 1; /* avoid calling tlb_flush_mmu */ 221 209 } 210 + 211 + if (!tlb->nr && tlb->pages == tlb->local) 212 + __tlb_alloc_page(tlb); 213 + 222 214 tlb->pages[tlb->nr++] = page; 223 - if (tlb->nr >= FREE_PTE_NR) 224 - ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr); 215 + VM_BUG_ON(tlb->nr > tlb->max); 216 + 217 + return tlb->max - tlb->nr; 218 + } 219 + 220 + static inline void tlb_flush_mmu(struct mmu_gather *tlb) 221 + { 222 + ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr); 223 + } 224 + 225 + static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) 226 + { 227 + if (!__tlb_remove_page(tlb, page)) 228 + tlb_flush_mmu(tlb); 225 229 } 226 230 227 231 /*