Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sparc32: centralize all mmu context handling in srmmu.c

Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Sam Ravnborg and committed by
David S. Miller
b585e855 59b00c79

+63 -71
+3 -5
arch/sparc/include/asm/mmu_context_32.h
··· 9 9 { 10 10 } 11 11 12 - /* 13 - * Initialize a new mmu context. This is invoked when a new 12 + /* Initialize a new mmu context. This is invoked when a new 14 13 * address space instance (unique or shared) is instantiated. 15 14 */ 16 - #define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0) 15 + int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 17 16 18 - /* 19 - * Destroy a dead context. This occurs when mmput drops the 17 + /* Destroy a dead context. This occurs when mmput drops the 20 18 * mm_users count to zero, the mmaps have been released, and 21 19 * all the page tables have been flushed. Our job is to destroy 22 20 * any remaining processor-specific state.
-32
arch/sparc/include/asm/pgtable_32.h
··· 79 79 #define __S110 PAGE_SHARED 80 80 #define __S111 PAGE_SHARED 81 81 82 - extern int num_contexts; 83 - 84 82 /* First physical page can be anywhere, the following is needed so that 85 83 * va-->pa and vice versa conversions work properly without performance 86 84 * hit for all __pa()/__va() operations. ··· 396 398 * This is made a constant because mm/fremap.c required a constant. 397 399 */ 398 400 #define PTE_FILE_MAX_BITS 24 399 - 400 - /* 401 - */ 402 - struct ctx_list { 403 - struct ctx_list *next; 404 - struct ctx_list *prev; 405 - unsigned int ctx_number; 406 - struct mm_struct *ctx_mm; 407 - }; 408 - 409 - extern struct ctx_list *ctx_list_pool; /* Dynamically allocated */ 410 - extern struct ctx_list ctx_free; /* Head of free list */ 411 - extern struct ctx_list ctx_used; /* Head of used contexts list */ 412 - 413 - #define NO_CONTEXT -1 414 - 415 - static inline void remove_from_ctx_list(struct ctx_list *entry) 416 - { 417 - entry->next->prev = entry->prev; 418 - entry->prev->next = entry->next; 419 - } 420 - 421 - static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry) 422 - { 423 - entry->next = head; 424 - (entry->prev = head->prev)->next = entry; 425 - head->prev = entry; 426 - } 427 - #define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry) 428 - #define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry) 429 401 430 402 static inline unsigned long 431 403 __get_phys (unsigned long addr)
-1
arch/sparc/kernel/setup_32.c
··· 371 371 (*(linux_dbvec->teach_debugger))(); 372 372 } 373 373 374 - init_mm.context = (unsigned long) NO_CONTEXT; 375 374 init_task.thread.kregs = &fake_swapper_regs; 376 375 377 376 /* Run-time patch instructions to match the cpu model */
-6
arch/sparc/mm/fault_32.c
··· 32 32 33 33 int show_unhandled_signals = 1; 34 34 35 - /* At boot time we determine these two values necessary for setting 36 - * up the segment maps and page table entries (pte's). 37 - */ 38 - 39 - int num_contexts; 40 - 41 35 /* Return how much physical memory we have. */ 42 36 unsigned long probe_memory(void) 43 37 {
-18
arch/sparc/mm/init_32.c
··· 82 82 #endif 83 83 } 84 84 85 - void __init sparc_context_init(int numctx) 86 - { 87 - int ctx; 88 - 89 - ctx_list_pool = __alloc_bootmem(numctx * sizeof(struct ctx_list), SMP_CACHE_BYTES, 0UL); 90 - 91 - for(ctx = 0; ctx < numctx; ctx++) { 92 - struct ctx_list *clist; 93 - 94 - clist = (ctx_list_pool + ctx); 95 - clist->ctx_number = ctx; 96 - clist->ctx_mm = NULL; 97 - } 98 - ctx_free.next = ctx_free.prev = &ctx_free; 99 - ctx_used.next = ctx_used.prev = &ctx_used; 100 - for(ctx = 0; ctx < numctx; ctx++) 101 - add_to_free_ctxlist(ctx_list_pool + ctx); 102 - } 103 85 104 86 extern unsigned long cmdline_memory_size; 105 87 unsigned long last_valid_pfn;
+60 -9
arch/sparc/mm/srmmu.c
··· 55 55 int vac_cache_size; 56 56 int vac_line_size; 57 57 58 - struct ctx_list *ctx_list_pool; 59 - struct ctx_list ctx_free; 60 - struct ctx_list ctx_used; 61 - 62 58 extern struct resource sparc_iomap; 63 59 64 60 extern unsigned long last_valid_pfn; ··· 351 355 srmmu_free_nocache(__nocache_va(p), PTE_SIZE); 352 356 } 353 357 354 - /* 355 - */ 358 + /* context handling - a dynamically sized pool is used */ 359 + #define NO_CONTEXT -1 360 + 361 + struct ctx_list { 362 + struct ctx_list *next; 363 + struct ctx_list *prev; 364 + unsigned int ctx_number; 365 + struct mm_struct *ctx_mm; 366 + }; 367 + 368 + static struct ctx_list *ctx_list_pool; 369 + static struct ctx_list ctx_free; 370 + static struct ctx_list ctx_used; 371 + 372 + /* At boot time we determine the number of contexts */ 373 + static int num_contexts; 374 + 375 + static inline void remove_from_ctx_list(struct ctx_list *entry) 376 + { 377 + entry->next->prev = entry->prev; 378 + entry->prev->next = entry->next; 379 + } 380 + 381 + static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry) 382 + { 383 + entry->next = head; 384 + (entry->prev = head->prev)->next = entry; 385 + head->prev = entry; 386 + } 387 + #define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry) 388 + #define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry) 389 + 390 + 356 391 static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm) 357 392 { 358 393 struct ctx_list *ctxp; ··· 419 392 add_to_free_ctxlist(ctx_old); 420 393 } 421 394 395 + static void __init sparc_context_init(int numctx) 396 + { 397 + int ctx; 398 + unsigned long size; 399 + 400 + size = numctx * sizeof(struct ctx_list); 401 + ctx_list_pool = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL); 402 + 403 + for (ctx = 0; ctx < numctx; ctx++) { 404 + struct ctx_list *clist; 405 + 406 + clist = (ctx_list_pool + ctx); 407 + clist->ctx_number = ctx; 408 + clist->ctx_mm = NULL; 409 + } 410 + ctx_free.next = ctx_free.prev = &ctx_free; 411 + ctx_used.next = ctx_used.prev = &ctx_used; 412 + for (ctx = 0; ctx < numctx; ctx++) 413 + add_to_free_ctxlist(ctx_list_pool + ctx); 414 + } 422 415 423 416 void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, 424 417 struct task_struct *tsk) ··· 846 799 } 847 800 } 848 801 849 - /* Paging initialization on the Sparc Reference MMU. */ 850 - extern void sparc_context_init(int); 851 - 852 802 void (*poke_srmmu)(void) __cpuinitdata = NULL; 853 803 854 804 extern unsigned long bootmem_init(unsigned long *pages_avail); ··· 860 816 pte_t *pte; 861 817 unsigned long pages_avail; 862 818 819 + init_mm.context = (unsigned long) NO_CONTEXT; 863 820 sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */ 864 821 865 822 if (sparc_cpu_model == sun4d) ··· 961 916 num_contexts, 962 917 srmmu_nocache_size, 963 918 srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT); 919 + } 920 + 921 + int init_new_context(struct task_struct *tsk, struct mm_struct *mm) 922 + { 923 + mm->context = NO_CONTEXT; 924 + return 0; 964 925 } 965 926 966 927 void destroy_context(struct mm_struct *mm)