Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

SLUB: i386 support

SLUB cannot run on i386 at this point because i386 uses the page->private and
page->index field of slab pages for the pgd cache.

Make SLUB run on i386 by replacing the pgd slab cache with a quicklist.
Limit the changes as much as possible. Leave the improvised linked list in place
etc etc. This has been working here for a couple of weeks now.

Acked-by: William Lee Irwin III <wli@holomorphy.com>
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Christoph Lameter and committed by
Linus Torvalds
f1d1a842 8df767dd

+25 -26
+4 -4
arch/i386/Kconfig
··· 55 55 bool 56 56 default y 57 57 58 + config QUICKLIST 59 + bool 60 + default y 61 + 58 62 config SBUS 59 63 bool 60 64 ··· 80 76 default y 81 77 82 78 config ARCH_MAY_HAVE_PC_FDC 83 - bool 84 - default y 85 - 86 - config ARCH_USES_SLAB_PAGE_STRUCT 87 79 bool 88 80 default y 89 81
+1
arch/i386/kernel/process.c
··· 186 186 if (__get_cpu_var(cpu_idle_state)) 187 187 __get_cpu_var(cpu_idle_state) = 0; 188 188 189 + check_pgt_cache(); 189 190 rmb(); 190 191 idle = pm_idle; 191 192
+1 -1
arch/i386/kernel/smp.c
··· 421 421 } 422 422 if (!cpus_empty(cpu_mask)) 423 423 flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); 424 - 424 + check_pgt_cache(); 425 425 preempt_enable(); 426 426 } 427 427
-7
arch/i386/mm/init.c
··· 740 740 EXPORT_SYMBOL_GPL(remove_memory); 741 741 #endif 742 742 743 - struct kmem_cache *pgd_cache; 744 743 struct kmem_cache *pmd_cache; 745 744 746 745 void __init pgtable_cache_init(void) ··· 763 764 pgd_size = PAGE_SIZE; 764 765 } 765 766 } 766 - pgd_cache = kmem_cache_create("pgd", 767 - pgd_size, 768 - pgd_size, 769 - SLAB_PANIC, 770 - pgd_ctor, 771 - (!SHARED_KERNEL_PMD) ? pgd_dtor : NULL); 772 767 } 773 768 774 769 /*
+17 -9
arch/i386/mm/pgtable.c
··· 13 13 #include <linux/pagemap.h> 14 14 #include <linux/spinlock.h> 15 15 #include <linux/module.h> 16 + #include <linux/quicklist.h> 16 17 17 18 #include <asm/system.h> 18 19 #include <asm/pgtable.h> ··· 206 205 * against pageattr.c; it is the unique case in which a valid change 207 206 * of kernel pagetables can't be lazily synchronized by vmalloc faults. 208 207 * vmalloc faults work because attached pagetables are never freed. 209 - * The locking scheme was chosen on the basis of manfred's 210 - * recommendations and having no core impact whatsoever. 211 208 * -- wli 212 209 */ 213 210 DEFINE_SPINLOCK(pgd_lock); ··· 231 232 set_page_private(next, (unsigned long)pprev); 232 233 } 233 234 235 + 236 + 234 237 #if (PTRS_PER_PMD == 1) 235 238 /* Non-PAE pgd constructor */ 236 - void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused) 239 + void pgd_ctor(void *pgd) 237 240 { 238 241 unsigned long flags; 239 242 ··· 257 256 } 258 257 #else /* PTRS_PER_PMD > 1 */ 259 258 /* PAE pgd constructor */ 260 - void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused) 259 + void pgd_ctor(void *pgd) 261 260 { 262 261 /* PAE, kernel PMD may be shared */ 263 262 ··· 276 275 } 277 276 #endif /* PTRS_PER_PMD */ 278 277 279 - void pgd_dtor(void *pgd, struct kmem_cache *cache, unsigned long unused) 278 + void pgd_dtor(void *pgd) 280 279 { 281 280 unsigned long flags; /* can be called from interrupt context */ 282 281 283 - BUG_ON(SHARED_KERNEL_PMD); 282 + if (SHARED_KERNEL_PMD) 283 + return; 284 284 285 285 paravirt_release_pd(__pa(pgd) >> PAGE_SHIFT); 286 286 spin_lock_irqsave(&pgd_lock, flags); ··· 323 321 pgd_t *pgd_alloc(struct mm_struct *mm) 324 322 { 325 323 int i; 326 - pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL); 324 + pgd_t *pgd = quicklist_alloc(0, GFP_KERNEL, pgd_ctor); 327 325 328 326 if (PTRS_PER_PMD == 1 || !pgd) 329 327 return pgd; ··· 346 344 paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT); 347 345 pmd_cache_free(pmd, i); 348 346 } 349 - kmem_cache_free(pgd_cache, pgd); 347 + quicklist_free(0, pgd_dtor, pgd); 350 348 return NULL; 351 349 } 352 350 ··· 363 361 pmd_cache_free(pmd, i); 364 362 } 365 363 /* in the non-PAE case, free_pgtables() clears user pgd entries */ 366 - kmem_cache_free(pgd_cache, pgd); 364 + quicklist_free(0, pgd_dtor, pgd); 367 365 } 366 + 367 + void check_pgt_cache(void) 368 + { 369 + quicklist_trim(0, pgd_dtor, 25, 16); 370 + } 371 +
-2
include/asm-i386/pgalloc.h
··· 65 65 #define pud_populate(mm, pmd, pte) BUG() 66 66 #endif 67 67 68 - #define check_pgt_cache() do { } while (0) 69 - 70 68 #endif /* _I386_PGALLOC_H */
+2 -3
include/asm-i386/pgtable.h
··· 35 35 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 36 36 extern unsigned long empty_zero_page[1024]; 37 37 extern pgd_t swapper_pg_dir[1024]; 38 - extern struct kmem_cache *pgd_cache; 39 38 extern struct kmem_cache *pmd_cache; 40 39 extern spinlock_t pgd_lock; 41 40 extern struct page *pgd_list; 41 + void check_pgt_cache(void); 42 42 43 43 void pmd_ctor(void *, struct kmem_cache *, unsigned long); 44 - void pgd_ctor(void *, struct kmem_cache *, unsigned long); 45 - void pgd_dtor(void *, struct kmem_cache *, unsigned long); 46 44 void pgtable_cache_init(void); 47 45 void paging_init(void); 46 + 48 47 49 48 /* 50 49 * The Linux x86 paging architecture is 'compile-time dual-mode', it