···11+/*22+ * This file is subject to the terms and conditions of the GNU General Public33+ * License. See the file "COPYING" in the main directory of this archive44+ * for more details.55+ *66+ * Copyright (C) 2005 Thiemo Seufer77+ * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.88+ * Author: Maciej W. Rozycki <macro@mips.com>99+ */1010+1111+#include <linux/init.h>1212+1313+#include <asm/addrspace.h>1414+#include <asm/bug.h>1515+1616+#ifndef CKSEG21717+#define CKSEG2 CKSSEG1818+#endif1919+#ifndef TO_PHYS_MASK2020+#define TO_PHYS_MASK -12121+#endif2222+2323+/*2424+ * FUNC is executed in one of the uncached segments, depending on its2525+ * original address as follows:2626+ *2727+ * 1. If the original address is in CKSEG0 or CKSEG1, then the uncached2828+ * segment used is CKSEG1.2929+ * 2. If the original address is in XKPHYS, then the uncached segment3030+ * used is XKPHYS(2).3131+ * 3. Otherwise it's a bug.3232+ *3333+ * The same remapping is done with the stack pointer. Stack handling3434+ * works because we don't handle stack arguments or more complex return3535+ * values, so we can avoid sharing the same stack area between a cached3636+ * and the uncached mode.3737+ */3838+unsigned long __init run_uncached(void *func)3939+{4040+ register long sp __asm__("$sp");4141+ register long ret __asm__("$2");4242+ long lfunc = (long)func, ufunc;4343+ long usp;4444+4545+ if (sp >= (long)CKSEG0 && sp < (long)CKSEG2)4646+ usp = CKSEG1ADDR(sp);4747+ else if ((long long)sp >= (long long)PHYS_TO_XKPHYS(0LL, 0) &&4848+ (long long)sp < (long long)PHYS_TO_XKPHYS(8LL, 0))4949+ usp = PHYS_TO_XKPHYS((long long)K_CALG_UNCACHED,5050+ XKPHYS_TO_PHYS((long long)sp));5151+ else {5252+ BUG();5353+ usp = sp;5454+ }5555+ if (lfunc >= (long)CKSEG0 && lfunc < (long)CKSEG2)5656+ ufunc = CKSEG1ADDR(lfunc);5757+ else if ((long long)lfunc >= (long long)PHYS_TO_XKPHYS(0LL, 0) &&5858+ (long long)lfunc < (long long)PHYS_TO_XKPHYS(8LL, 0))5959+ ufunc = PHYS_TO_XKPHYS((long long)K_CALG_UNCACHED,6060+ XKPHYS_TO_PHYS((long long)lfunc));6161+ else {6262+ BUG();6363+ ufunc = lfunc;6464+ }6565+6666+ __asm__ __volatile__ (6767+ " move $16, $sp\n"6868+ " move $sp, %1\n"6969+ " jalr %2\n"7070+ " move $sp, $16"7171+ : "=r" (ret)7272+ : "r" (usp), "r" (ufunc)7373+ : "$16", "$31");7474+7575+ return ret;7676+}
+2-4
arch/mips/mm/c-r4k.c
···2626#include <asm/system.h>2727#include <asm/mmu_context.h>2828#include <asm/war.h>2929+#include <asm/cacheflush.h> /* for run_uncached() */29303031static unsigned long icache_size, dcache_size, scache_size;3132···11201119 return 1;11211120}1122112111231123-typedef int (*probe_func_t)(unsigned long);11241122extern int r5k_sc_init(void);11251123extern int rm7k_sc_init(void);11261124···11271127{11281128 struct cpuinfo_mips *c = ¤t_cpu_data;11291129 unsigned int config = read_c0_config();11301130- probe_func_t probe_scache_kseg1;11311130 int sc_present = 0;1132113111331132 /*···11391140 case CPU_R4000MC:11401141 case CPU_R4400SC:11411142 case CPU_R4400MC:11421142- probe_scache_kseg1 = (probe_func_t) (CKSEG1ADDR(&probe_scache));11431143- sc_present = probe_scache_kseg1(config);11431143+ sc_present = run_uncached(probe_scache);11441144 if (sc_present)11451145 c->options |= MIPS_CPU_CACHE_CDEX_S;11461146 break;
+8-21
arch/mips/mm/sc-rm7k.c
···1515#include <asm/cacheops.h>1616#include <asm/mipsregs.h>1717#include <asm/processor.h>1818+#include <asm/cacheflush.h> /* for run_uncached() */18191920/* Primary cache parameters. */2021#define sc_lsize 32···9796}98979998/*100100- * This function is executed in the uncached segment CKSEG1.101101- * It must not touch the stack, because the stack pointer still points102102- * into CKSEG0.103103- *104104- * Three options:105105- * - Write it in assembly and guarantee that we don't use the stack.106106- * - Disable caching for CKSEG0 before calling it.107107- * - Pray that GCC doesn't randomly start using the stack.108108- *109109- * This being Linux, we obviously take the least sane of those options -110110- * following DaveM's lead in c-r4k.c111111- *112112- * It seems we get our kicks from relying on unguaranteed behaviour in GCC9999+ * This function is executed in uncached address space.113100 */114101static __init void __rm7k_sc_enable(void)115102{116103 int i;117104118118- set_c0_config(1 << 3); /* CONF_SE */105105+ set_c0_config(R7K_CONF_SE);119106120107 write_c0_taglo(0);121108 write_c0_taghi(0);···116127 ".set mips0\n\t"117128 ".set reorder"118129 :119119- : "r" (KSEG0ADDR(i)), "i" (Index_Store_Tag_SD));130130+ : "r" (CKSEG0ADDR(i)), "i" (Index_Store_Tag_SD));120131 }121132}122133123134static __init void rm7k_sc_enable(void)124135{125125- void (*func)(void) = (void *) KSEG1ADDR(&__rm7k_sc_enable);126126-127127- if (read_c0_config() & 0x08) /* CONF_SE */136136+ if (read_c0_config() & R7K_CONF_SE)128137 return;129138130139 printk(KERN_INFO "Enabling secondary cache...");131131- func();140140+ run_uncached(__rm7k_sc_enable);132141}133142134143static void rm7k_sc_disable(void)135144{136136- clear_c0_config(1<<3); /* CONF_SE */145145+ clear_c0_config(R7K_CONF_SE);137146}138147139148struct bcache_ops rm7k_sc_ops = {···151164 printk(KERN_INFO "Secondary cache size %dK, linesize %d bytes.\n",152165 (scache_size >> 10), sc_lsize);153166154154- if (!((config >> 3) & 1)) /* CONF_SE */167167+ if (!(config & R7K_CONF_SE))155168 rm7k_sc_enable();156169157170 /*
+3
include/asm-mips/cacheflush.h
···9090#define ClearPageDcacheDirty(page) \9191 clear_bit(PG_dcache_dirty, &(page)->flags)92929393+/* Run kernel code uncached, useful for cache probing functions. */9494+unsigned long __init run_uncached(void *func);9595+9396#endif /* _ASM_CACHEFLUSH_H */
+3
include/asm-mips/mipsregs.h
···433433#define R5K_CONF_SE (_ULCAST_(1) << 12)434434#define R5K_CONF_SS (_ULCAST_(3) << 20)435435436436+/* Bits specific to the RM7000. */437437+#define R7K_CONF_SE (_ULCAST_(1) << 3)438438+436439/* Bits specific to the R10000. */437440#define R10K_CONF_DN (_ULCAST_(3) << 3)438441#define R10K_CONF_CT (_ULCAST_(1) << 5)