···11-/*22- * Copyright (C) 2006 Atmark Techno, Inc.33- *44- * This file is subject to the terms and conditions of the GNU General Public55- * License. See the file "COPYING" in the main directory of this archive66- * for more details.77- */88-99-#ifndef _ASM_MICROBLAZE_MMU_CONTEXT_H1010-#define _ASM_MICROBLAZE_MMU_CONTEXT_H1111-1212-# define init_new_context(tsk, mm) ({ 0; })1313-1414-# define enter_lazy_tlb(mm, tsk) do {} while (0)1515-# define change_mm_context(old, ctx, _pml4) do {} while (0)1616-# define destroy_context(mm) do {} while (0)1717-# define deactivate_mm(tsk, mm) do {} while (0)1818-# define switch_mm(prev, next, tsk) do {} while (0)1919-# define activate_mm(prev, next) do {} while (0)2020-2121-#endif /* _ASM_MICROBLAZE_MMU_CONTEXT_H */11+#ifdef CONFIG_MMU22+# include "mmu_context_mm.h"33+#else44+# include "mmu_context_no.h"55+#endif
+140
arch/microblaze/include/asm/mmu_context_mm.h
···11+/*22+ * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>33+ * Copyright (C) 2008-2009 PetaLogix44+ * Copyright (C) 2006 Atmark Techno, Inc.55+ *66+ * This file is subject to the terms and conditions of the GNU General Public77+ * License. See the file "COPYING" in the main directory of this archive88+ * for more details.99+ */1010+1111+#ifndef _ASM_MICROBLAZE_MMU_CONTEXT_H1212+#define _ASM_MICROBLAZE_MMU_CONTEXT_H1313+1414+#include <asm/atomic.h>1515+#include <asm/bitops.h>1616+#include <asm/mmu.h>1717+#include <asm-generic/mm_hooks.h>1818+1919+# ifdef __KERNEL__2020+/*2121+ * This function defines the mapping from contexts to VSIDs (virtual2222+ * segment IDs). We use a skew on both the context and the high 4 bits2323+ * of the 32-bit virtual address (the "effective segment ID") in order2424+ * to spread out the entries in the MMU hash table.2525+ */2626+# define CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \2727+ & 0xffffff)2828+2929+/*3030+ MicroBlaze has 256 contexts, so we can just rotate through these3131+ as a way of "switching" contexts. If the TID of the TLB is zero,3232+ the PID/TID comparison is disabled, so we can use a TID of zero3333+ to represent all kernel pages as shared among all contexts.3434+ */3535+3636+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)3737+{3838+}3939+4040+# define NO_CONTEXT 2564141+# define LAST_CONTEXT 2554242+# define FIRST_CONTEXT 14343+4444+/*4545+ * Set the current MMU context.4646+ * This is done byloading up the segment registers for the user part of the4747+ * address space.4848+ *4949+ * Since the PGD is immediately available, it is much faster to simply5050+ * pass this along as a second parameter, which is required for 8xx and5151+ * can be used for debugging on all processors (if you happen to have5252+ * an Abatron).5353+ */5454+extern void set_context(mm_context_t context, pgd_t *pgd);5555+5656+/*5757+ * Bitmap of contexts in use.5858+ * The size of this bitmap is LAST_CONTEXT + 1 bits.5959+ */6060+extern unsigned long context_map[];6161+6262+/*6363+ * This caches the next context number that we expect to be free.6464+ * Its use is an optimization only, we can't rely on this context6565+ * number to be free, but it usually will be.6666+ */6767+extern mm_context_t next_mmu_context;6868+6969+/*7070+ * Since we don't have sufficient contexts to give one to every task7171+ * that could be in the system, we need to be able to steal contexts.7272+ * These variables support that.7373+ */7474+extern atomic_t nr_free_contexts;7575+extern struct mm_struct *context_mm[LAST_CONTEXT+1];7676+extern void steal_context(void);7777+7878+/*7979+ * Get a new mmu context for the address space described by `mm'.8080+ */8181+static inline void get_mmu_context(struct mm_struct *mm)8282+{8383+ mm_context_t ctx;8484+8585+ if (mm->context != NO_CONTEXT)8686+ return;8787+ while (atomic_dec_if_positive(&nr_free_contexts) < 0)8888+ steal_context();8989+ ctx = next_mmu_context;9090+ while (test_and_set_bit(ctx, context_map)) {9191+ ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);9292+ if (ctx > LAST_CONTEXT)9393+ ctx = 0;9494+ }9595+ next_mmu_context = (ctx + 1) & LAST_CONTEXT;9696+ mm->context = ctx;9797+ context_mm[ctx] = mm;9898+}9999+100100+/*101101+ * Set up the context for a new address space.102102+ */103103+# define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0)104104+105105+/*106106+ * We're finished using the context for an address space.107107+ */108108+static inline void destroy_context(struct mm_struct *mm)109109+{110110+ if (mm->context != NO_CONTEXT) {111111+ clear_bit(mm->context, context_map);112112+ mm->context = NO_CONTEXT;113113+ atomic_inc(&nr_free_contexts);114114+ }115115+}116116+117117+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,118118+ struct task_struct *tsk)119119+{120120+ tsk->thread.pgdir = next->pgd;121121+ get_mmu_context(next);122122+ set_context(next->context, next->pgd);123123+}124124+125125+/*126126+ * After we have set current->mm to a new value, this activates127127+ * the context for the new mm so we see the new mappings.128128+ */129129+static inline void activate_mm(struct mm_struct *active_mm,130130+ struct mm_struct *mm)131131+{132132+ current->thread.pgdir = mm->pgd;133133+ get_mmu_context(mm);134134+ set_context(mm->context, mm->pgd);135135+}136136+137137+extern void mmu_context_init(void);138138+139139+# endif /* __KERNEL__ */140140+#endif /* _ASM_MICROBLAZE_MMU_CONTEXT_H */
+23
arch/microblaze/include/asm/mmu_context_no.h
···11+/*22+ * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>33+ * Copyright (C) 2008-2009 PetaLogix44+ * Copyright (C) 2006 Atmark Techno, Inc.55+ *66+ * This file is subject to the terms and conditions of the GNU General Public77+ * License. See the file "COPYING" in the main directory of this archive88+ * for more details.99+ */1010+1111+#ifndef _ASM_MICROBLAZE_MMU_CONTEXT_H1212+#define _ASM_MICROBLAZE_MMU_CONTEXT_H1313+1414+# define init_new_context(tsk, mm) ({ 0; })1515+1616+# define enter_lazy_tlb(mm, tsk) do {} while (0)1717+# define change_mm_context(old, ctx, _pml4) do {} while (0)1818+# define destroy_context(mm) do {} while (0)1919+# define deactivate_mm(tsk, mm) do {} while (0)2020+# define switch_mm(prev, next, tsk) do {} while (0)2121+# define activate_mm(prev, next) do {} while (0)2222+2323+#endif /* _ASM_MICROBLAZE_MMU_CONTEXT_H */
+70
arch/microblaze/mm/mmu_context.c
···11+/*22+ * This file contains the routines for handling the MMU.33+ *44+ * Copyright (C) 2007 Xilinx, Inc. All rights reserved.55+ *66+ * Derived from arch/ppc/mm/4xx_mmu.c:77+ * -- paulus88+ *99+ * Derived from arch/ppc/mm/init.c:1010+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)1111+ *1212+ * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)1313+ * and Cort Dougan (PReP) (cort@cs.nmt.edu)1414+ * Copyright (C) 1996 Paul Mackerras1515+ * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).1616+ *1717+ * Derived from "arch/i386/mm/init.c"1818+ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds1919+ *2020+ * This program is free software; you can redistribute it and/or2121+ * modify it under the terms of the GNU General Public License2222+ * as published by the Free Software Foundation; either version2323+ * 2 of the License, or (at your option) any later version.2424+ *2525+ */2626+2727+#include <linux/mm.h>2828+#include <linux/init.h>2929+3030+#include <asm/tlbflush.h>3131+#include <asm/mmu_context.h>3232+3333+mm_context_t next_mmu_context;3434+unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];3535+atomic_t nr_free_contexts;3636+struct mm_struct *context_mm[LAST_CONTEXT+1];3737+3838+/*3939+ * Initialize the context management stuff.4040+ */4141+void __init mmu_context_init(void)4242+{4343+ /*4444+ * The use of context zero is reserved for the kernel.4545+ * This code assumes FIRST_CONTEXT < 32.4646+ */4747+ context_map[0] = (1 << FIRST_CONTEXT) - 1;4848+ next_mmu_context = FIRST_CONTEXT;4949+ atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);5050+}5151+5252+/*5353+ * Steal a context from a task that has one at the moment.5454+ *5555+ * This isn't an LRU system, it just frees up each context in5656+ * turn (sort-of pseudo-random replacement :). This would be the5757+ * place to implement an LRU scheme if anyone were motivated to do it.5858+ */5959+void steal_context(void)6060+{6161+ struct mm_struct *mm;6262+6363+ /* free up context `next_mmu_context' */6464+ /* if we shouldn't free context 0, don't... */6565+ if (next_mmu_context < FIRST_CONTEXT)6666+ next_mmu_context = FIRST_CONTEXT;6767+ mm = context_mm[next_mmu_context];6868+ flush_tlb_mm(mm);6969+ destroy_context(mm);7070+}