Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

MIPS: MT: Remove SMTC support

Nobody is maintaining SMTC anymore and there also seems to be no userbase.
Which is a pity - the SMTC technology primarily developed by Kevin D.
Kissell <kevink@paralogos.com> is an ingenious demonstration for the MT
ASE's power and elegance.

Based on Markos Chandras <Markos.Chandras@imgtec.com> patch
https://patchwork.linux-mips.org/patch/6719/ which while very similar did
no longer apply cleanly when I tried to merge it plus some additional
post-SMTC cleanup - SMTC was a feature as tricky to remove as it was to
merge once upon a time.

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>

+72 -4097
+4 -45
arch/mips/Kconfig
··· 1852 1852 1853 1853 config CEVT_GIC 1854 1854 bool "Use GIC global counter for clock events" 1855 - depends on IRQ_GIC && !(MIPS_SEAD3 || MIPS_MT_SMTC) 1855 + depends on IRQ_GIC && !MIPS_SEAD3 1856 1856 help 1857 1857 Use the GIC global counter for the clock events. The R4K clock 1858 1858 event driver is always present, so if the platform ends up not ··· 1936 1936 Intel Hyperthreading feature. For further information go to 1937 1937 <http://www.imgtec.com/mips/mips-multithreading.asp>. 1938 1938 1939 - config MIPS_MT_SMTC 1940 - bool "Use all TCs on all VPEs for SMP (DEPRECATED)" 1941 - depends on CPU_MIPS32_R2 1942 - depends on SYS_SUPPORTS_MULTITHREADING 1943 - depends on !MIPS_CPS 1944 - select CPU_MIPSR2_IRQ_VI 1945 - select CPU_MIPSR2_IRQ_EI 1946 - select MIPS_MT 1947 - select SMP 1948 - select SMP_UP 1949 - select SYS_SUPPORTS_SMP 1950 - select NR_CPUS_DEFAULT_8 1951 - help 1952 - This is a kernel model which is known as SMTC. This is 1953 - supported on cores with the MT ASE and presents all TCs 1954 - available on all VPEs to support SMP. For further 1955 - information see <http://www.linux-mips.org/wiki/34K#SMTC>. 1956 - 1957 1939 endchoice 1958 1940 1959 1941 config MIPS_MT ··· 1959 1977 config MIPS_MT_FPAFF 1960 1978 bool "Dynamic FPU affinity for FP-intensive threads" 1961 1979 default y 1962 - depends on MIPS_MT_SMP || MIPS_MT_SMTC 1980 + depends on MIPS_MT_SMP 1963 1981 1964 1982 config MIPS_VPE_LOADER 1965 1983 bool "VPE loader support." ··· 1980 1998 bool 1981 1999 default "y" 1982 2000 depends on MIPS_VPE_LOADER && !MIPS_CMP 1983 - 1984 - config MIPS_MT_SMTC_IM_BACKSTOP 1985 - bool "Use per-TC register bits as backstop for inhibited IM bits" 1986 - depends on MIPS_MT_SMTC 1987 - default n 1988 - help 1989 - To support multiple TC microthreads acting as "CPUs" within 1990 - a VPE, VPE-wide interrupt mask bits must be specially manipulated 1991 - during interrupt handling. To support legacy drivers and interrupt 1992 - controller management code, SMTC has a "backstop" to track and 1993 - if necessary restore the interrupt mask. This has some performance 1994 - impact on interrupt service overhead. 1995 - 1996 - config MIPS_MT_SMTC_IRQAFF 1997 - bool "Support IRQ affinity API" 1998 - depends on MIPS_MT_SMTC 1999 - default n 2000 - help 2001 - Enables SMP IRQ affinity API (/proc/irq/*/smp_affinity, etc.) 2002 - for SMTC Linux kernel. Requires platform support, of which 2003 - an example can be found in the MIPS kernel i8259 and Malta 2004 - platform code. Adds some overhead to interrupt dispatch, and 2005 - should be used only if you know what you are doing. 2006 2001 2007 2002 config MIPS_VPE_LOADER_TOM 2008 2003 bool "Load VPE program into memory hidden from linux" ··· 2008 2049 2009 2050 config MIPS_CMP 2010 2051 bool "MIPS CMP framework support (DEPRECATED)" 2011 - depends on SYS_SUPPORTS_MIPS_CMP && !MIPS_MT_SMTC 2052 + depends on SYS_SUPPORTS_MIPS_CMP 2012 2053 select MIPS_GIC_IPI 2013 2054 select SYNC_R4K 2014 2055 select WEAK_ORDERING ··· 2215 2256 2216 2257 config HW_PERF_EVENTS 2217 2258 bool "Enable hardware performance counter support for perf events" 2218 - depends on PERF_EVENTS && !MIPS_MT_SMTC && OPROFILE=n && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON || CPU_XLP) 2259 + depends on PERF_EVENTS && OPROFILE=n && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON || CPU_XLP) 2219 2260 default y 2220 2261 help 2221 2262 Enable hardware performance counter support for perf events. If
-9
arch/mips/Kconfig.debug
··· 79 79 80 80 Normally, you will choose 'N' here. 81 81 82 - config SMTC_IDLE_HOOK_DEBUG 83 - bool "Enable additional debug checks before going into CPU idle loop" 84 - depends on DEBUG_KERNEL && MIPS_MT_SMTC 85 - help 86 - This option enables Enable additional debug checks before going into 87 - CPU idle loop. For details on these checks, see 88 - arch/mips/kernel/smtc.c. This debugging option result in significant 89 - overhead so should be disabled in production kernels. 90 - 91 82 config SB1XXX_CORELIS 92 83 bool "Corelis Debugger" 93 84 depends on SIBYTE_SB1xxx_SOC
-196
arch/mips/configs/maltasmtc_defconfig
··· 1 - CONFIG_MIPS_MALTA=y 2 - CONFIG_CPU_LITTLE_ENDIAN=y 3 - CONFIG_CPU_MIPS32_R2=y 4 - CONFIG_PAGE_SIZE_16KB=y 5 - CONFIG_MIPS_MT_SMTC=y 6 - # CONFIG_MIPS_MT_FPAFF is not set 7 - CONFIG_NR_CPUS=9 8 - CONFIG_HZ_48=y 9 - CONFIG_LOCALVERSION="smtc" 10 - CONFIG_SYSVIPC=y 11 - CONFIG_POSIX_MQUEUE=y 12 - CONFIG_AUDIT=y 13 - CONFIG_IKCONFIG=y 14 - CONFIG_IKCONFIG_PROC=y 15 - CONFIG_LOG_BUF_SHIFT=15 16 - CONFIG_SYSCTL_SYSCALL=y 17 - CONFIG_EMBEDDED=y 18 - CONFIG_SLAB=y 19 - CONFIG_MODULES=y 20 - CONFIG_MODULE_UNLOAD=y 21 - CONFIG_MODVERSIONS=y 22 - CONFIG_MODULE_SRCVERSION_ALL=y 23 - # CONFIG_BLK_DEV_BSG is not set 24 - CONFIG_PCI=y 25 - # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 26 - CONFIG_NET=y 27 - CONFIG_PACKET=y 28 - CONFIG_UNIX=y 29 - CONFIG_XFRM_USER=m 30 - CONFIG_NET_KEY=y 31 - CONFIG_INET=y 32 - CONFIG_IP_MULTICAST=y 33 - CONFIG_IP_ADVANCED_ROUTER=y 34 - CONFIG_IP_MULTIPLE_TABLES=y 35 - CONFIG_IP_ROUTE_MULTIPATH=y 36 - CONFIG_IP_ROUTE_VERBOSE=y 37 - CONFIG_IP_PNP=y 38 - CONFIG_IP_PNP_DHCP=y 39 - CONFIG_IP_PNP_BOOTP=y 40 - CONFIG_NET_IPIP=m 41 - CONFIG_IP_MROUTE=y 42 - CONFIG_IP_PIMSM_V1=y 43 - CONFIG_IP_PIMSM_V2=y 44 - CONFIG_SYN_COOKIES=y 45 - CONFIG_INET_AH=m 46 - CONFIG_INET_ESP=m 47 - CONFIG_INET_IPCOMP=m 48 - # CONFIG_INET_LRO is not set 49 - CONFIG_INET6_AH=m 50 - CONFIG_INET6_ESP=m 51 - CONFIG_INET6_IPCOMP=m 52 - CONFIG_IPV6_TUNNEL=m 53 - CONFIG_BRIDGE=m 54 - CONFIG_VLAN_8021Q=m 55 - CONFIG_ATALK=m 56 - CONFIG_DEV_APPLETALK=m 57 - CONFIG_IPDDP=m 58 - CONFIG_IPDDP_ENCAP=y 59 - CONFIG_NET_SCHED=y 60 - CONFIG_NET_SCH_CBQ=m 61 - CONFIG_NET_SCH_HTB=m 62 - CONFIG_NET_SCH_HFSC=m 63 - CONFIG_NET_SCH_PRIO=m 64 - CONFIG_NET_SCH_RED=m 65 - CONFIG_NET_SCH_SFQ=m 66 - CONFIG_NET_SCH_TEQL=m 67 - CONFIG_NET_SCH_TBF=m 68 - CONFIG_NET_SCH_GRED=m 69 - CONFIG_NET_SCH_DSMARK=m 70 - CONFIG_NET_SCH_NETEM=m 71 - CONFIG_NET_SCH_INGRESS=m 72 - CONFIG_NET_CLS_BASIC=m 73 - CONFIG_NET_CLS_TCINDEX=m 74 - CONFIG_NET_CLS_ROUTE4=m 75 - CONFIG_NET_CLS_FW=m 76 - CONFIG_NET_CLS_U32=m 77 - CONFIG_NET_CLS_RSVP=m 78 - CONFIG_NET_CLS_RSVP6=m 79 - CONFIG_NET_CLS_ACT=y 80 - CONFIG_NET_ACT_POLICE=y 81 - CONFIG_NET_CLS_IND=y 82 - # CONFIG_WIRELESS is not set 83 - CONFIG_DEVTMPFS=y 84 - CONFIG_BLK_DEV_LOOP=y 85 - CONFIG_BLK_DEV_CRYPTOLOOP=m 86 - CONFIG_IDE=y 87 - # CONFIG_IDE_PROC_FS is not set 88 - # CONFIG_IDEPCI_PCIBUS_ORDER is not set 89 - CONFIG_BLK_DEV_GENERIC=y 90 - CONFIG_BLK_DEV_PIIX=y 91 - CONFIG_SCSI=y 92 - CONFIG_BLK_DEV_SD=y 93 - CONFIG_CHR_DEV_SG=y 94 - # CONFIG_SCSI_LOWLEVEL is not set 95 - CONFIG_NETDEVICES=y 96 - # CONFIG_NET_VENDOR_3COM is not set 97 - # CONFIG_NET_VENDOR_ADAPTEC is not set 98 - # CONFIG_NET_VENDOR_ALTEON is not set 99 - CONFIG_PCNET32=y 100 - # CONFIG_NET_VENDOR_ATHEROS is not set 101 - # CONFIG_NET_VENDOR_BROADCOM is not set 102 - # CONFIG_NET_VENDOR_BROCADE is not set 103 - # CONFIG_NET_VENDOR_CHELSIO is not set 104 - # CONFIG_NET_VENDOR_CISCO is not set 105 - # CONFIG_NET_VENDOR_DEC is not set 106 - # CONFIG_NET_VENDOR_DLINK is not set 107 - # CONFIG_NET_VENDOR_EMULEX is not set 108 - # CONFIG_NET_VENDOR_EXAR is not set 109 - # CONFIG_NET_VENDOR_HP is not set 110 - # CONFIG_NET_VENDOR_INTEL is not set 111 - # CONFIG_NET_VENDOR_MARVELL is not set 112 - # CONFIG_NET_VENDOR_MELLANOX is not set 113 - # CONFIG_NET_VENDOR_MICREL is not set 114 - # CONFIG_NET_VENDOR_MYRI is not set 115 - # CONFIG_NET_VENDOR_NATSEMI is not set 116 - # CONFIG_NET_VENDOR_NVIDIA is not set 117 - # CONFIG_NET_VENDOR_OKI is not set 118 - # CONFIG_NET_PACKET_ENGINE is not set 119 - # CONFIG_NET_VENDOR_QLOGIC is not set 120 - # CONFIG_NET_VENDOR_REALTEK is not set 121 - # CONFIG_NET_VENDOR_RDC is not set 122 - # CONFIG_NET_VENDOR_SEEQ is not set 123 - # CONFIG_NET_VENDOR_SILAN is not set 124 - # CONFIG_NET_VENDOR_SIS is not set 125 - # CONFIG_NET_VENDOR_SMSC is not set 126 - # CONFIG_NET_VENDOR_STMICRO is not set 127 - # CONFIG_NET_VENDOR_SUN is not set 128 - # CONFIG_NET_VENDOR_TEHUTI is not set 129 - # CONFIG_NET_VENDOR_TI is not set 130 - # CONFIG_NET_VENDOR_TOSHIBA is not set 131 - # CONFIG_NET_VENDOR_VIA is not set 132 - # CONFIG_WLAN is not set 133 - # CONFIG_VT is not set 134 - CONFIG_LEGACY_PTY_COUNT=16 135 - CONFIG_SERIAL_8250=y 136 - CONFIG_SERIAL_8250_CONSOLE=y 137 - CONFIG_HW_RANDOM=y 138 - # CONFIG_HWMON is not set 139 - CONFIG_VIDEO_OUTPUT_CONTROL=m 140 - CONFIG_FB=y 141 - CONFIG_FIRMWARE_EDID=y 142 - CONFIG_FB_MATROX=y 143 - CONFIG_FB_MATROX_G=y 144 - CONFIG_USB=y 145 - CONFIG_USB_EHCI_HCD=y 146 - # CONFIG_USB_EHCI_TT_NEWSCHED is not set 147 - CONFIG_USB_UHCI_HCD=y 148 - CONFIG_USB_STORAGE=y 149 - CONFIG_NEW_LEDS=y 150 - CONFIG_LEDS_CLASS=y 151 - CONFIG_LEDS_TRIGGERS=y 152 - CONFIG_LEDS_TRIGGER_TIMER=y 153 - CONFIG_LEDS_TRIGGER_IDE_DISK=y 154 - CONFIG_LEDS_TRIGGER_HEARTBEAT=y 155 - CONFIG_LEDS_TRIGGER_BACKLIGHT=y 156 - CONFIG_LEDS_TRIGGER_DEFAULT_ON=y 157 - CONFIG_RTC_CLASS=y 158 - CONFIG_RTC_DRV_CMOS=y 159 - CONFIG_EXT2_FS=y 160 - CONFIG_EXT3_FS=y 161 - # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 162 - CONFIG_XFS_FS=y 163 - CONFIG_XFS_QUOTA=y 164 - CONFIG_XFS_POSIX_ACL=y 165 - CONFIG_QUOTA=y 166 - CONFIG_QFMT_V2=y 167 - CONFIG_MSDOS_FS=m 168 - CONFIG_VFAT_FS=m 169 - CONFIG_PROC_KCORE=y 170 - CONFIG_TMPFS=y 171 - CONFIG_NFS_FS=y 172 - CONFIG_ROOT_NFS=y 173 - CONFIG_CIFS=m 174 - CONFIG_CIFS_WEAK_PW_HASH=y 175 - CONFIG_CIFS_XATTR=y 176 - CONFIG_CIFS_POSIX=y 177 - CONFIG_NLS_CODEPAGE_437=m 178 - CONFIG_NLS_ISO8859_1=m 179 - # CONFIG_FTRACE is not set 180 - CONFIG_CRYPTO_NULL=m 181 - CONFIG_CRYPTO_PCBC=m 182 - CONFIG_CRYPTO_HMAC=y 183 - CONFIG_CRYPTO_MICHAEL_MIC=m 184 - CONFIG_CRYPTO_SHA512=m 185 - CONFIG_CRYPTO_TGR192=m 186 - CONFIG_CRYPTO_WP512=m 187 - CONFIG_CRYPTO_ANUBIS=m 188 - CONFIG_CRYPTO_BLOWFISH=m 189 - CONFIG_CRYPTO_CAST5=m 190 - CONFIG_CRYPTO_CAST6=m 191 - CONFIG_CRYPTO_KHAZAD=m 192 - CONFIG_CRYPTO_SERPENT=m 193 - CONFIG_CRYPTO_TEA=m 194 - CONFIG_CRYPTO_TWOFISH=m 195 - # CONFIG_CRYPTO_ANSI_CPRNG is not set 196 - # CONFIG_CRYPTO_HW is not set
+2 -20
arch/mips/include/asm/asmmacro.h
··· 17 17 #ifdef CONFIG_64BIT 18 18 #include <asm/asmmacro-64.h> 19 19 #endif 20 - #ifdef CONFIG_MIPS_MT_SMTC 21 - #include <asm/mipsmtregs.h> 22 - #endif 23 20 24 - #ifdef CONFIG_MIPS_MT_SMTC 25 - .macro local_irq_enable reg=t0 26 - mfc0 \reg, CP0_TCSTATUS 27 - ori \reg, \reg, TCSTATUS_IXMT 28 - xori \reg, \reg, TCSTATUS_IXMT 29 - mtc0 \reg, CP0_TCSTATUS 30 - _ehb 31 - .endm 32 - 33 - .macro local_irq_disable reg=t0 34 - mfc0 \reg, CP0_TCSTATUS 35 - ori \reg, \reg, TCSTATUS_IXMT 36 - mtc0 \reg, CP0_TCSTATUS 37 - _ehb 38 - .endm 39 - #elif defined(CONFIG_CPU_MIPSR2) 21 + #ifdef CONFIG_CPU_MIPSR2 40 22 .macro local_irq_enable reg=t0 41 23 ei 42 24 irq_enable_hazard ··· 53 71 sw \reg, TI_PRE_COUNT($28) 54 72 #endif 55 73 .endm 56 - #endif /* CONFIG_MIPS_MT_SMTC */ 74 + #endif /* CONFIG_CPU_MIPSR2 */ 57 75 58 76 .macro fpu_save_16even thread tmp=t0 59 77 cfc1 \tmp, fcr31
+4 -9
arch/mips/include/asm/cpu-info.h
··· 65 65 #ifdef CONFIG_64BIT 66 66 int vmbits; /* Virtual memory size in bits */ 67 67 #endif 68 - #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) 68 + #ifdef CONFIG_MIPS_MT_SMP 69 69 /* 70 - * In the MIPS MT "SMTC" model, each TC is considered 71 - * to be a "CPU" for the purposes of scheduling, but 72 - * exception resources, ASID spaces, etc, are common 73 - * to all TCs within the same VPE. 70 + * There is not necessarily a 1:1 mapping of VPE num to CPU number 71 + * in particular on multi-core systems. 74 72 */ 75 73 int vpe_id; /* Virtual Processor number */ 76 - #endif 77 - #ifdef CONFIG_MIPS_MT_SMTC 78 - int tc_id; /* Thread Context number */ 79 74 #endif 80 75 void *data; /* Additional data */ 81 76 unsigned int watch_reg_count; /* Number that exist */ ··· 112 117 unsigned long n; 113 118 }; 114 119 115 - #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) 120 + #ifdef CONFIG_MIPS_MT_SMP 116 121 # define cpu_vpe_id(cpuinfo) ((cpuinfo)->vpe_id) 117 122 #else 118 123 # define cpu_vpe_id(cpuinfo) 0
-4
arch/mips/include/asm/fixmap.h
··· 48 48 enum fixed_addresses { 49 49 #define FIX_N_COLOURS 8 50 50 FIX_CMAP_BEGIN, 51 - #ifdef CONFIG_MIPS_MT_SMTC 52 - FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * NR_CPUS * 2), 53 - #else 54 51 FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * 2), 55 - #endif 56 52 #ifdef CONFIG_HIGHMEM 57 53 /* reserved pte's for temporary kernel mappings */ 58 54 FIX_KMAP_BEGIN = FIX_CMAP_END + 1,
-96
arch/mips/include/asm/irq.h
··· 26 26 #define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */ 27 27 #endif 28 28 29 - #ifdef CONFIG_MIPS_MT_SMTC 30 - 31 - struct irqaction; 32 - 33 - extern unsigned long irq_hwmask[]; 34 - extern int setup_irq_smtc(unsigned int irq, struct irqaction * new, 35 - unsigned long hwmask); 36 - 37 - static inline void smtc_im_ack_irq(unsigned int irq) 38 - { 39 - if (irq_hwmask[irq] & ST0_IM) 40 - set_c0_status(irq_hwmask[irq] & ST0_IM); 41 - } 42 - 43 - #else 44 - 45 - static inline void smtc_im_ack_irq(unsigned int irq) 46 - { 47 - } 48 - 49 - #endif /* CONFIG_MIPS_MT_SMTC */ 50 - 51 - #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 52 - #include <linux/cpumask.h> 53 - 54 - extern int plat_set_irq_affinity(struct irq_data *d, 55 - const struct cpumask *affinity, bool force); 56 - extern void smtc_forward_irq(struct irq_data *d); 57 - 58 - /* 59 - * IRQ affinity hook invoked at the beginning of interrupt dispatch 60 - * if option is enabled. 61 - * 62 - * Up through Linux 2.6.22 (at least) cpumask operations are very 63 - * inefficient on MIPS. Initial prototypes of SMTC IRQ affinity 64 - * used a "fast path" per-IRQ-descriptor cache of affinity information 65 - * to reduce latency. As there is a project afoot to optimize the 66 - * cpumask implementations, this version is optimistically assuming 67 - * that cpumask.h macro overhead is reasonable during interrupt dispatch. 68 - */ 69 - static inline int handle_on_other_cpu(unsigned int irq) 70 - { 71 - struct irq_data *d = irq_get_irq_data(irq); 72 - 73 - if (cpumask_test_cpu(smp_processor_id(), d->affinity)) 74 - return 0; 75 - smtc_forward_irq(d); 76 - return 1; 77 - } 78 - 79 - #else /* Not doing SMTC affinity */ 80 - 81 - static inline int handle_on_other_cpu(unsigned int irq) { return 0; } 82 - 83 - #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ 84 - 85 - #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP 86 - 87 - static inline void smtc_im_backstop(unsigned int irq) 88 - { 89 - if (irq_hwmask[irq] & 0x0000ff00) 90 - write_c0_tccontext(read_c0_tccontext() & 91 - ~(irq_hwmask[irq] & 0x0000ff00)); 92 - } 93 - 94 - /* 95 - * Clear interrupt mask handling "backstop" if irq_hwmask 96 - * entry so indicates. This implies that the ack() or end() 97 - * functions will take over re-enabling the low-level mask. 98 - * Otherwise it will be done on return from exception. 99 - */ 100 - static inline int smtc_handle_on_other_cpu(unsigned int irq) 101 - { 102 - int ret = handle_on_other_cpu(irq); 103 - 104 - if (!ret) 105 - smtc_im_backstop(irq); 106 - return ret; 107 - } 108 - 109 - #else 110 - 111 - static inline void smtc_im_backstop(unsigned int irq) { } 112 - static inline int smtc_handle_on_other_cpu(unsigned int irq) 113 - { 114 - return handle_on_other_cpu(irq); 115 - } 116 - 117 - #endif 118 - 119 29 extern void do_IRQ(unsigned int irq); 120 - 121 - #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 122 - 123 - extern void do_IRQ_no_affinity(unsigned int irq); 124 - 125 - #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ 126 30 127 31 extern void arch_init_irq(void); 128 32 extern void spurious_interrupt(void);
+3 -29
arch/mips/include/asm/irqflags.h
··· 17 17 #include <linux/stringify.h> 18 18 #include <asm/hazards.h> 19 19 20 - #if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) 20 + #ifdef CONFIG_CPU_MIPSR2 21 21 22 22 static inline void arch_local_irq_disable(void) 23 23 { ··· 118 118 unsigned long arch_local_irq_save(void); 119 119 void arch_local_irq_restore(unsigned long flags); 120 120 void __arch_local_irq_restore(unsigned long flags); 121 - #endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */ 122 - 123 - 124 - extern void smtc_ipi_replay(void); 121 + #endif /* CONFIG_CPU_MIPSR2 */ 125 122 126 123 static inline void arch_local_irq_enable(void) 127 124 { 128 - #ifdef CONFIG_MIPS_MT_SMTC 129 - /* 130 - * SMTC kernel needs to do a software replay of queued 131 - * IPIs, at the cost of call overhead on each local_irq_enable() 132 - */ 133 - smtc_ipi_replay(); 134 - #endif 135 125 __asm__ __volatile__( 136 126 " .set push \n" 137 127 " .set reorder \n" 138 128 " .set noat \n" 139 - #ifdef CONFIG_MIPS_MT_SMTC 140 - " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n" 141 - " ori $1, 0x400 \n" 142 - " xori $1, 0x400 \n" 143 - " mtc0 $1, $2, 1 \n" 144 - #elif defined(CONFIG_CPU_MIPSR2) 129 + #if defined(CONFIG_CPU_MIPSR2) 145 130 " ei \n" 146 131 #else 147 132 " mfc0 $1,$12 \n" ··· 148 163 asm __volatile__( 149 164 " .set push \n" 150 165 " .set reorder \n" 151 - #ifdef CONFIG_MIPS_MT_SMTC 152 - " mfc0 %[flags], $2, 1 \n" 153 - #else 154 166 " mfc0 %[flags], $12 \n" 155 - #endif 156 167 " .set pop \n" 157 168 : [flags] "=r" (flags)); 158 169 ··· 158 177 159 178 static inline int arch_irqs_disabled_flags(unsigned long flags) 160 179 { 161 - #ifdef CONFIG_MIPS_MT_SMTC 162 - /* 163 - * SMTC model uses TCStatus.IXMT to disable interrupts for a thread/CPU 164 - */ 165 - return flags & 0x400; 166 - #else 167 180 return !(flags & 1); 168 - #endif 169 181 } 170 182 171 183 #endif /* #ifndef __ASSEMBLY__ */
-30
arch/mips/include/asm/mach-malta/kernel-entry-init.h
··· 80 80 .endm 81 81 82 82 .macro kernel_entry_setup 83 - #ifdef CONFIG_MIPS_MT_SMTC 84 - mfc0 t0, CP0_CONFIG 85 - bgez t0, 9f 86 - mfc0 t0, CP0_CONFIG, 1 87 - bgez t0, 9f 88 - mfc0 t0, CP0_CONFIG, 2 89 - bgez t0, 9f 90 - mfc0 t0, CP0_CONFIG, 3 91 - and t0, 1<<2 92 - bnez t0, 0f 93 - 9: 94 - /* Assume we came from YAMON... */ 95 - PTR_LA v0, 0x9fc00534 /* YAMON print */ 96 - lw v0, (v0) 97 - move a0, zero 98 - PTR_LA a1, nonmt_processor 99 - jal v0 100 - 101 - PTR_LA v0, 0x9fc00520 /* YAMON exit */ 102 - lw v0, (v0) 103 - li a0, 1 104 - jal v0 105 - 106 - 1: b 1b 107 - 108 - __INITDATA 109 - nonmt_processor: 110 - .asciz "SMTC kernel requires the MT ASE to run\n" 111 - __FINIT 112 - #endif 113 83 114 84 #ifdef CONFIG_EVA 115 85 sync
-31
arch/mips/include/asm/mach-sead3/kernel-entry-init.h
··· 10 10 #define __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H 11 11 12 12 .macro kernel_entry_setup 13 - #ifdef CONFIG_MIPS_MT_SMTC 14 - mfc0 t0, CP0_CONFIG 15 - bgez t0, 9f 16 - mfc0 t0, CP0_CONFIG, 1 17 - bgez t0, 9f 18 - mfc0 t0, CP0_CONFIG, 2 19 - bgez t0, 9f 20 - mfc0 t0, CP0_CONFIG, 3 21 - and t0, 1<<2 22 - bnez t0, 0f 23 - 9 : 24 - /* Assume we came from YAMON... */ 25 - PTR_LA v0, 0x9fc00534 /* YAMON print */ 26 - lw v0, (v0) 27 - move a0, zero 28 - PTR_LA a1, nonmt_processor 29 - jal v0 30 - 31 - PTR_LA v0, 0x9fc00520 /* YAMON exit */ 32 - lw v0, (v0) 33 - li a0, 1 34 - jal v0 35 - 36 - 1 : b 1b 37 - 38 - __INITDATA 39 - nonmt_processor : 40 - .asciz "SMTC kernel requires the MT ASE to run\n" 41 - __FINIT 42 - 0 : 43 - #endif 44 13 .endm 45 14 46 15 /*
+2 -3
arch/mips/include/asm/mips_mt.h
··· 1 1 /* 2 - * Definitions and decalrations for MIPS MT support 3 - * that are common between SMTC, VSMP, and/or AP/SP 4 - * kernel models. 2 + * Definitions and decalrations for MIPS MT support that are common between 3 + * the VSMP, and AP/SP kernel models. 5 4 */ 6 5 #ifndef __ASM_MIPS_MT_H 7 6 #define __ASM_MIPS_MT_H
+1 -132
arch/mips/include/asm/mipsregs.h
··· 1014 1014 #define write_c0_compare3(val) __write_32bit_c0_register($11, 7, val) 1015 1015 1016 1016 #define read_c0_status() __read_32bit_c0_register($12, 0) 1017 - #ifdef CONFIG_MIPS_MT_SMTC 1018 - #define write_c0_status(val) \ 1019 - do { \ 1020 - __write_32bit_c0_register($12, 0, val); \ 1021 - __ehb(); \ 1022 - } while (0) 1023 - #else 1024 - /* 1025 - * Legacy non-SMTC code, which may be hazardous 1026 - * but which might not support EHB 1027 - */ 1017 + 1028 1018 #define write_c0_status(val) __write_32bit_c0_register($12, 0, val) 1029 - #endif /* CONFIG_MIPS_MT_SMTC */ 1030 1019 1031 1020 #define read_c0_cause() __read_32bit_c0_register($13, 0) 1032 1021 #define write_c0_cause(val) __write_32bit_c0_register($13, 0, val) ··· 1739 1750 /* 1740 1751 * Manipulate bits in a c0 register. 1741 1752 */ 1742 - #ifndef CONFIG_MIPS_MT_SMTC 1743 - /* 1744 - * SMTC Linux requires shutting-down microthread scheduling 1745 - * during CP0 register read-modify-write sequences. 1746 - */ 1747 1753 #define __BUILD_SET_C0(name) \ 1748 1754 static inline unsigned int \ 1749 1755 set_c0_##name(unsigned int set) \ ··· 1776 1792 \ 1777 1793 return res; \ 1778 1794 } 1779 - 1780 - #else /* SMTC versions that manage MT scheduling */ 1781 - 1782 - #include <linux/irqflags.h> 1783 - 1784 - /* 1785 - * This is a duplicate of dmt() in mipsmtregs.h to avoid problems with 1786 - * header file recursion. 1787 - */ 1788 - static inline unsigned int __dmt(void) 1789 - { 1790 - int res; 1791 - 1792 - __asm__ __volatile__( 1793 - " .set push \n" 1794 - " .set mips32r2 \n" 1795 - " .set noat \n" 1796 - " .word 0x41610BC1 # dmt $1 \n" 1797 - " ehb \n" 1798 - " move %0, $1 \n" 1799 - " .set pop \n" 1800 - : "=r" (res)); 1801 - 1802 - instruction_hazard(); 1803 - 1804 - return res; 1805 - } 1806 - 1807 - #define __VPECONTROL_TE_SHIFT 15 1808 - #define __VPECONTROL_TE (1UL << __VPECONTROL_TE_SHIFT) 1809 - 1810 - #define __EMT_ENABLE __VPECONTROL_TE 1811 - 1812 - static inline void __emt(unsigned int previous) 1813 - { 1814 - if ((previous & __EMT_ENABLE)) 1815 - __asm__ __volatile__( 1816 - " .set mips32r2 \n" 1817 - " .word 0x41600be1 # emt \n" 1818 - " ehb \n" 1819 - " .set mips0 \n"); 1820 - } 1821 - 1822 - static inline void __ehb(void) 1823 - { 1824 - __asm__ __volatile__( 1825 - " .set mips32r2 \n" 1826 - " ehb \n" " .set mips0 \n"); 1827 - } 1828 - 1829 - /* 1830 - * Note that local_irq_save/restore affect TC-specific IXMT state, 1831 - * not Status.IE as in non-SMTC kernel. 1832 - */ 1833 - 1834 - #define __BUILD_SET_C0(name) \ 1835 - static inline unsigned int \ 1836 - set_c0_##name(unsigned int set) \ 1837 - { \ 1838 - unsigned int res; \ 1839 - unsigned int new; \ 1840 - unsigned int omt; \ 1841 - unsigned long flags; \ 1842 - \ 1843 - local_irq_save(flags); \ 1844 - omt = __dmt(); \ 1845 - res = read_c0_##name(); \ 1846 - new = res | set; \ 1847 - write_c0_##name(new); \ 1848 - __emt(omt); \ 1849 - local_irq_restore(flags); \ 1850 - \ 1851 - return res; \ 1852 - } \ 1853 - \ 1854 - static inline unsigned int \ 1855 - clear_c0_##name(unsigned int clear) \ 1856 - { \ 1857 - unsigned int res; \ 1858 - unsigned int new; \ 1859 - unsigned int omt; \ 1860 - unsigned long flags; \ 1861 - \ 1862 - local_irq_save(flags); \ 1863 - omt = __dmt(); \ 1864 - res = read_c0_##name(); \ 1865 - new = res & ~clear; \ 1866 - write_c0_##name(new); \ 1867 - __emt(omt); \ 1868 - local_irq_restore(flags); \ 1869 - \ 1870 - return res; \ 1871 - } \ 1872 - \ 1873 - static inline unsigned int \ 1874 - change_c0_##name(unsigned int change, unsigned int newbits) \ 1875 - { \ 1876 - unsigned int res; \ 1877 - unsigned int new; \ 1878 - unsigned int omt; \ 1879 - unsigned long flags; \ 1880 - \ 1881 - local_irq_save(flags); \ 1882 - \ 1883 - omt = __dmt(); \ 1884 - res = read_c0_##name(); \ 1885 - new = res & ~change; \ 1886 - new |= (newbits & change); \ 1887 - write_c0_##name(new); \ 1888 - __emt(omt); \ 1889 - local_irq_restore(flags); \ 1890 - \ 1891 - return res; \ 1892 - } 1893 - #endif 1894 1795 1895 1796 __BUILD_SET_C0(status) 1896 1797 __BUILD_SET_C0(cause)
-107
arch/mips/include/asm/mmu_context.h
··· 18 18 #include <asm/cacheflush.h> 19 19 #include <asm/hazards.h> 20 20 #include <asm/tlbflush.h> 21 - #ifdef CONFIG_MIPS_MT_SMTC 22 - #include <asm/mipsmtregs.h> 23 - #include <asm/smtc.h> 24 - #endif /* SMTC */ 25 21 #include <asm-generic/mm_hooks.h> 26 22 27 23 #define TLBMISS_HANDLER_SETUP_PGD(pgd) \ ··· 59 63 #define ASID_INC 0x10 60 64 #define ASID_MASK 0xff0 61 65 62 - #elif defined(CONFIG_MIPS_MT_SMTC) 63 - 64 - #define ASID_INC 0x1 65 - extern unsigned long smtc_asid_mask; 66 - #define ASID_MASK (smtc_asid_mask) 67 - #define HW_ASID_MASK 0xff 68 - /* End SMTC/34K debug hack */ 69 66 #else /* FIXME: not correct for R6000 */ 70 67 71 68 #define ASID_INC 0x1 ··· 81 92 #define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1))) 82 93 #define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1) 83 94 84 - #ifndef CONFIG_MIPS_MT_SMTC 85 95 /* Normal, classic MIPS get_new_mmu_context */ 86 96 static inline void 87 97 get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) ··· 103 115 cpu_context(cpu, mm) = asid_cache(cpu) = asid; 104 116 } 105 117 106 - #else /* CONFIG_MIPS_MT_SMTC */ 107 - 108 - #define get_new_mmu_context(mm, cpu) smtc_get_new_mmu_context((mm), (cpu)) 109 - 110 - #endif /* CONFIG_MIPS_MT_SMTC */ 111 - 112 118 /* 113 119 * Initialize the context related info for a new mm_struct 114 120 * instance. ··· 123 141 { 124 142 unsigned int cpu = smp_processor_id(); 125 143 unsigned long flags; 126 - #ifdef CONFIG_MIPS_MT_SMTC 127 - unsigned long oldasid; 128 - unsigned long mtflags; 129 - int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id; 130 144 local_irq_save(flags); 131 - mtflags = dvpe(); 132 - #else /* Not SMTC */ 133 - local_irq_save(flags); 134 - #endif /* CONFIG_MIPS_MT_SMTC */ 135 145 136 146 /* Check if our ASID is of an older version and thus invalid */ 137 147 if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) 138 148 get_new_mmu_context(next, cpu); 139 - #ifdef CONFIG_MIPS_MT_SMTC 140 - /* 141 - * If the EntryHi ASID being replaced happens to be 142 - * the value flagged at ASID recycling time as having 143 - * an extended life, clear the bit showing it being 144 - * in use by this "CPU", and if that's the last bit, 145 - * free up the ASID value for use and flush any old 146 - * instances of it from the TLB. 147 - */ 148 - oldasid = (read_c0_entryhi() & ASID_MASK); 149 - if(smtc_live_asid[mytlb][oldasid]) { 150 - smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); 151 - if(smtc_live_asid[mytlb][oldasid] == 0) 152 - smtc_flush_tlb_asid(oldasid); 153 - } 154 - /* 155 - * Tread softly on EntryHi, and so long as we support 156 - * having ASID_MASK smaller than the hardware maximum, 157 - * make sure no "soft" bits become "hard"... 158 - */ 159 - write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | 160 - cpu_asid(cpu, next)); 161 - ehb(); /* Make sure it propagates to TCStatus */ 162 - evpe(mtflags); 163 - #else 164 149 write_c0_entryhi(cpu_asid(cpu, next)); 165 - #endif /* CONFIG_MIPS_MT_SMTC */ 166 150 TLBMISS_HANDLER_SETUP_PGD(next->pgd); 167 151 168 152 /* ··· 161 213 unsigned long flags; 162 214 unsigned int cpu = smp_processor_id(); 163 215 164 - #ifdef CONFIG_MIPS_MT_SMTC 165 - unsigned long oldasid; 166 - unsigned long mtflags; 167 - int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id; 168 - #endif /* CONFIG_MIPS_MT_SMTC */ 169 - 170 216 local_irq_save(flags); 171 217 172 218 /* Unconditionally get a new ASID. */ 173 219 get_new_mmu_context(next, cpu); 174 220 175 - #ifdef CONFIG_MIPS_MT_SMTC 176 - /* See comments for similar code above */ 177 - mtflags = dvpe(); 178 - oldasid = read_c0_entryhi() & ASID_MASK; 179 - if(smtc_live_asid[mytlb][oldasid]) { 180 - smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); 181 - if(smtc_live_asid[mytlb][oldasid] == 0) 182 - smtc_flush_tlb_asid(oldasid); 183 - } 184 - /* See comments for similar code above */ 185 - write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | 186 - cpu_asid(cpu, next)); 187 - ehb(); /* Make sure it propagates to TCStatus */ 188 - evpe(mtflags); 189 - #else 190 221 write_c0_entryhi(cpu_asid(cpu, next)); 191 - #endif /* CONFIG_MIPS_MT_SMTC */ 192 222 TLBMISS_HANDLER_SETUP_PGD(next->pgd); 193 223 194 224 /* mark mmu ownership change */ ··· 184 258 drop_mmu_context(struct mm_struct *mm, unsigned cpu) 185 259 { 186 260 unsigned long flags; 187 - #ifdef CONFIG_MIPS_MT_SMTC 188 - unsigned long oldasid; 189 - /* Can't use spinlock because called from TLB flush within DVPE */ 190 - unsigned int prevvpe; 191 - int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id; 192 - #endif /* CONFIG_MIPS_MT_SMTC */ 193 261 194 262 local_irq_save(flags); 195 263 196 264 if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { 197 265 get_new_mmu_context(mm, cpu); 198 - #ifdef CONFIG_MIPS_MT_SMTC 199 - /* See comments for similar code above */ 200 - prevvpe = dvpe(); 201 - oldasid = (read_c0_entryhi() & ASID_MASK); 202 - if (smtc_live_asid[mytlb][oldasid]) { 203 - smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); 204 - if(smtc_live_asid[mytlb][oldasid] == 0) 205 - smtc_flush_tlb_asid(oldasid); 206 - } 207 - /* See comments for similar code above */ 208 - write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) 209 - | cpu_asid(cpu, mm)); 210 - ehb(); /* Make sure it propagates to TCStatus */ 211 - evpe(prevvpe); 212 - #else /* not CONFIG_MIPS_MT_SMTC */ 213 266 write_c0_entryhi(cpu_asid(cpu, mm)); 214 - #endif /* CONFIG_MIPS_MT_SMTC */ 215 267 } else { 216 268 /* will get a new context next time */ 217 - #ifndef CONFIG_MIPS_MT_SMTC 218 269 cpu_context(cpu, mm) = 0; 219 - #else /* SMTC */ 220 - int i; 221 - 222 - /* SMTC shares the TLB (and ASIDs) across VPEs */ 223 - for_each_online_cpu(i) { 224 - if((smtc_status & SMTC_TLB_SHARED) 225 - || (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id)) 226 - cpu_context(i, mm) = 0; 227 - } 228 - #endif /* CONFIG_MIPS_MT_SMTC */ 229 270 } 230 271 local_irq_restore(flags); 231 272 }
+1 -7
arch/mips/include/asm/module.h
··· 144 144 #define MODULE_KERNEL_TYPE "64BIT " 145 145 #endif 146 146 147 - #ifdef CONFIG_MIPS_MT_SMTC 148 - #define MODULE_KERNEL_SMTC "MT_SMTC " 149 - #else 150 - #define MODULE_KERNEL_SMTC "" 151 - #endif 152 - 153 147 #define MODULE_ARCH_VERMAGIC \ 154 - MODULE_PROC_FAMILY MODULE_KERNEL_TYPE MODULE_KERNEL_SMTC 148 + MODULE_PROC_FAMILY MODULE_KERNEL_TYPE 155 149 156 150 #endif /* _ASM_MODULE_H */
-3
arch/mips/include/asm/ptrace.h
··· 39 39 unsigned long cp0_badvaddr; 40 40 unsigned long cp0_cause; 41 41 unsigned long cp0_epc; 42 - #ifdef CONFIG_MIPS_MT_SMTC 43 - unsigned long cp0_tcstatus; 44 - #endif /* CONFIG_MIPS_MT_SMTC */ 45 42 #ifdef CONFIG_CPU_CAVIUM_OCTEON 46 43 unsigned long long mpl[3]; /* MTM{0,1,2} */ 47 44 unsigned long long mtp[3]; /* MTP{0,1,2} */
+3 -4
arch/mips/include/asm/r4kcache.h
··· 43 43 : "i" (op), "R" (*(unsigned char *)(addr))) 44 44 45 45 #ifdef CONFIG_MIPS_MT 46 - /* 47 - * Temporary hacks for SMTC debug. Optionally force single-threaded 48 - * execution during I-cache flushes. 49 - */ 50 46 47 + /* 48 + * Optionally force single-threaded execution during I-cache flushes. 49 + */ 51 50 #define PROTECT_CACHE_FLUSHES 1 52 51 53 52 #ifdef PROTECT_CACHE_FLUSHES
-78
arch/mips/include/asm/smtc.h
··· 1 - #ifndef _ASM_SMTC_MT_H 2 - #define _ASM_SMTC_MT_H 3 - 4 - /* 5 - * Definitions for SMTC multitasking on MIPS MT cores 6 - */ 7 - 8 - #include <asm/mips_mt.h> 9 - #include <asm/smtc_ipi.h> 10 - 11 - /* 12 - * System-wide SMTC status information 13 - */ 14 - 15 - extern unsigned int smtc_status; 16 - 17 - #define SMTC_TLB_SHARED 0x00000001 18 - #define SMTC_MTC_ACTIVE 0x00000002 19 - 20 - /* 21 - * TLB/ASID Management information 22 - */ 23 - 24 - #define MAX_SMTC_TLBS 2 25 - #define MAX_SMTC_ASIDS 256 26 - #if NR_CPUS <= 8 27 - typedef char asiduse; 28 - #else 29 - #if NR_CPUS <= 16 30 - typedef short asiduse; 31 - #else 32 - typedef long asiduse; 33 - #endif 34 - #endif 35 - 36 - /* 37 - * VPE Management information 38 - */ 39 - 40 - #define MAX_SMTC_VPES MAX_SMTC_TLBS /* FIXME: May not always be true. */ 41 - 42 - extern asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; 43 - 44 - struct mm_struct; 45 - struct task_struct; 46 - 47 - void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu); 48 - void self_ipi(struct smtc_ipi *); 49 - void smtc_flush_tlb_asid(unsigned long asid); 50 - extern int smtc_build_cpu_map(int startslot); 51 - extern void smtc_prepare_cpus(int cpus); 52 - extern void smtc_smp_finish(void); 53 - extern void smtc_boot_secondary(int cpu, struct task_struct *t); 54 - extern void smtc_cpus_done(void); 55 - extern void smtc_init_secondary(void); 56 - 57 - 58 - /* 59 - * Sharing the TLB between multiple VPEs means that the 60 - * "random" index selection function is not allowed to 61 - * select the current value of the Index register. To 62 - * avoid additional TLB pressure, the Index registers 63 - * are "parked" with an non-Valid value. 64 - */ 65 - 66 - #define PARKED_INDEX ((unsigned int)0x80000000) 67 - 68 - /* 69 - * Define low-level interrupt mask for IPIs, if necessary. 70 - * By default, use SW interrupt 1, which requires no external 71 - * hardware support, but which works only for single-core 72 - * MIPS MT systems. 73 - */ 74 - #ifndef MIPS_CPU_IPI_IRQ 75 - #define MIPS_CPU_IPI_IRQ 1 76 - #endif 77 - 78 - #endif /* _ASM_SMTC_MT_H */
-129
arch/mips/include/asm/smtc_ipi.h
··· 1 - /* 2 - * Definitions used in MIPS MT SMTC "Interprocessor Interrupt" code. 3 - */ 4 - #ifndef __ASM_SMTC_IPI_H 5 - #define __ASM_SMTC_IPI_H 6 - 7 - #include <linux/spinlock.h> 8 - 9 - //#define SMTC_IPI_DEBUG 10 - 11 - #ifdef SMTC_IPI_DEBUG 12 - #include <asm/mipsregs.h> 13 - #include <asm/mipsmtregs.h> 14 - #endif /* SMTC_IPI_DEBUG */ 15 - 16 - /* 17 - * An IPI "message" 18 - */ 19 - 20 - struct smtc_ipi { 21 - struct smtc_ipi *flink; 22 - int type; 23 - void *arg; 24 - int dest; 25 - #ifdef SMTC_IPI_DEBUG 26 - int sender; 27 - long stamp; 28 - #endif /* SMTC_IPI_DEBUG */ 29 - }; 30 - 31 - /* 32 - * Defined IPI Types 33 - */ 34 - 35 - #define LINUX_SMP_IPI 1 36 - #define SMTC_CLOCK_TICK 2 37 - #define IRQ_AFFINITY_IPI 3 38 - 39 - /* 40 - * A queue of IPI messages 41 - */ 42 - 43 - struct smtc_ipi_q { 44 - struct smtc_ipi *head; 45 - spinlock_t lock; 46 - struct smtc_ipi *tail; 47 - int depth; 48 - int resched_flag; /* reschedule already queued */ 49 - }; 50 - 51 - static inline void smtc_ipi_nq(struct smtc_ipi_q *q, struct smtc_ipi *p) 52 - { 53 - unsigned long flags; 54 - 55 - spin_lock_irqsave(&q->lock, flags); 56 - if (q->head == NULL) 57 - q->head = q->tail = p; 58 - else 59 - q->tail->flink = p; 60 - p->flink = NULL; 61 - q->tail = p; 62 - q->depth++; 63 - #ifdef SMTC_IPI_DEBUG 64 - p->sender = read_c0_tcbind(); 65 - p->stamp = read_c0_count(); 66 - #endif /* SMTC_IPI_DEBUG */ 67 - spin_unlock_irqrestore(&q->lock, flags); 68 - } 69 - 70 - static inline struct smtc_ipi *__smtc_ipi_dq(struct smtc_ipi_q *q) 71 - { 72 - struct smtc_ipi *p; 73 - 74 - if (q->head == NULL) 75 - p = NULL; 76 - else { 77 - p = q->head; 78 - q->head = q->head->flink; 79 - q->depth--; 80 - /* Arguably unnecessary, but leaves queue cleaner */ 81 - if (q->head == NULL) 82 - q->tail = NULL; 83 - } 84 - 85 - return p; 86 - } 87 - 88 - static inline struct smtc_ipi *smtc_ipi_dq(struct smtc_ipi_q *q) 89 - { 90 - unsigned long flags; 91 - struct smtc_ipi *p; 92 - 93 - spin_lock_irqsave(&q->lock, flags); 94 - p = __smtc_ipi_dq(q); 95 - spin_unlock_irqrestore(&q->lock, flags); 96 - 97 - return p; 98 - } 99 - 100 - static inline void smtc_ipi_req(struct smtc_ipi_q *q, struct smtc_ipi *p) 101 - { 102 - unsigned long flags; 103 - 104 - spin_lock_irqsave(&q->lock, flags); 105 - if (q->head == NULL) { 106 - q->head = q->tail = p; 107 - p->flink = NULL; 108 - } else { 109 - p->flink = q->head; 110 - q->head = p; 111 - } 112 - q->depth++; 113 - spin_unlock_irqrestore(&q->lock, flags); 114 - } 115 - 116 - static inline int smtc_ipi_qdepth(struct smtc_ipi_q *q) 117 - { 118 - unsigned long flags; 119 - int retval; 120 - 121 - spin_lock_irqsave(&q->lock, flags); 122 - retval = q->depth; 123 - spin_unlock_irqrestore(&q->lock, flags); 124 - return retval; 125 - } 126 - 127 - extern void smtc_send_ipi(int cpu, int type, unsigned int action); 128 - 129 - #endif /* __ASM_SMTC_IPI_H */
-23
arch/mips/include/asm/smtc_proc.h
··· 1 - /* 2 - * Definitions for SMTC /proc entries 3 - * Copyright(C) 2005 MIPS Technologies Inc. 4 - */ 5 - #ifndef __ASM_SMTC_PROC_H 6 - #define __ASM_SMTC_PROC_H 7 - 8 - /* 9 - * per-"CPU" statistics 10 - */ 11 - 12 - struct smtc_cpu_proc { 13 - unsigned long timerints; 14 - unsigned long selfipis; 15 - }; 16 - 17 - extern struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS]; 18 - 19 - /* Count of number of recoveries of "stolen" FPU access rights on 34K */ 20 - 21 - extern atomic_t smtc_fpu_recoveries; 22 - 23 - #endif /* __ASM_SMTC_PROC_H */
+1 -195
arch/mips/include/asm/stackframe.h
··· 19 19 #include <asm/asm-offsets.h> 20 20 #include <asm/thread_info.h> 21 21 22 - /* 23 - * For SMTC kernel, global IE should be left set, and interrupts 24 - * controlled exclusively via IXMT. 25 - */ 26 - #ifdef CONFIG_MIPS_MT_SMTC 27 - #define STATMASK 0x1e 28 - #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 22 + #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 29 23 #define STATMASK 0x3f 30 24 #else 31 25 #define STATMASK 0x1f 32 26 #endif 33 - 34 - #ifdef CONFIG_MIPS_MT_SMTC 35 - #include <asm/mipsmtregs.h> 36 - #endif /* CONFIG_MIPS_MT_SMTC */ 37 27 38 28 .macro SAVE_AT 39 29 .set push ··· 176 186 mfc0 v1, CP0_STATUS 177 187 LONG_S $2, PT_R2(sp) 178 188 LONG_S v1, PT_STATUS(sp) 179 - #ifdef CONFIG_MIPS_MT_SMTC 180 - /* 181 - * Ideally, these instructions would be shuffled in 182 - * to cover the pipeline delay. 183 - */ 184 - .set mips32 185 - mfc0 k0, CP0_TCSTATUS 186 - .set mips0 187 - LONG_S k0, PT_TCSTATUS(sp) 188 - #endif /* CONFIG_MIPS_MT_SMTC */ 189 189 LONG_S $4, PT_R4(sp) 190 190 mfc0 v1, CP0_CAUSE 191 191 LONG_S $5, PT_R5(sp) ··· 301 321 .set push 302 322 .set reorder 303 323 .set noat 304 - #ifdef CONFIG_MIPS_MT_SMTC 305 - .set mips32r2 306 - /* 307 - * We need to make sure the read-modify-write 308 - * of Status below isn't perturbed by an interrupt 309 - * or cross-TC access, so we need to do at least a DMT, 310 - * protected by an interrupt-inhibit. But setting IXMT 311 - * also creates a few-cycle window where an IPI could 312 - * be queued and not be detected before potentially 313 - * returning to a WAIT or user-mode loop. It must be 314 - * replayed. 315 - * 316 - * We're in the middle of a context switch, and 317 - * we can't dispatch it directly without trashing 318 - * some registers, so we'll try to detect this unlikely 319 - * case and program a software interrupt in the VPE, 320 - * as would be done for a cross-VPE IPI. To accommodate 321 - * the handling of that case, we're doing a DVPE instead 322 - * of just a DMT here to protect against other threads. 323 - * This is a lot of cruft to cover a tiny window. 324 - * If you can find a better design, implement it! 325 - * 326 - */ 327 - mfc0 v0, CP0_TCSTATUS 328 - ori v0, TCSTATUS_IXMT 329 - mtc0 v0, CP0_TCSTATUS 330 - _ehb 331 - DVPE 5 # dvpe a1 332 - jal mips_ihb 333 - #endif /* CONFIG_MIPS_MT_SMTC */ 334 324 mfc0 a0, CP0_STATUS 335 325 ori a0, STATMASK 336 326 xori a0, STATMASK ··· 312 362 and v0, v1 313 363 or v0, a0 314 364 mtc0 v0, CP0_STATUS 315 - #ifdef CONFIG_MIPS_MT_SMTC 316 - /* 317 - * Only after EXL/ERL have been restored to status can we 318 - * restore TCStatus.IXMT. 319 - */ 320 - LONG_L v1, PT_TCSTATUS(sp) 321 - _ehb 322 - mfc0 a0, CP0_TCSTATUS 323 - andi v1, TCSTATUS_IXMT 324 - bnez v1, 0f 325 - 326 - /* 327 - * We'd like to detect any IPIs queued in the tiny window 328 - * above and request an software interrupt to service them 329 - * when we ERET. 330 - * 331 - * Computing the offset into the IPIQ array of the executing 332 - * TC's IPI queue in-line would be tedious. We use part of 333 - * the TCContext register to hold 16 bits of offset that we 334 - * can add in-line to find the queue head. 335 - */ 336 - mfc0 v0, CP0_TCCONTEXT 337 - la a2, IPIQ 338 - srl v0, v0, 16 339 - addu a2, a2, v0 340 - LONG_L v0, 0(a2) 341 - beqz v0, 0f 342 - /* 343 - * If we have a queue, provoke dispatch within the VPE by setting C_SW1 344 - */ 345 - mfc0 v0, CP0_CAUSE 346 - ori v0, v0, C_SW1 347 - mtc0 v0, CP0_CAUSE 348 - 0: 349 - /* 350 - * This test should really never branch but 351 - * let's be prudent here. Having atomized 352 - * the shared register modifications, we can 353 - * now EVPE, and must do so before interrupts 354 - * are potentially re-enabled. 355 - */ 356 - andi a1, a1, MVPCONTROL_EVP 357 - beqz a1, 1f 358 - evpe 359 - 1: 360 - /* We know that TCStatua.IXMT should be set from above */ 361 - xori a0, a0, TCSTATUS_IXMT 362 - or a0, a0, v1 363 - mtc0 a0, CP0_TCSTATUS 364 - _ehb 365 - 366 - .set mips0 367 - #endif /* CONFIG_MIPS_MT_SMTC */ 368 365 LONG_L v1, PT_EPC(sp) 369 366 MTC0 v1, CP0_EPC 370 367 LONG_L $31, PT_R31(sp) ··· 364 467 * Set cp0 enable bit as sign that we're running on the kernel stack 365 468 */ 366 469 .macro CLI 367 - #if !defined(CONFIG_MIPS_MT_SMTC) 368 470 mfc0 t0, CP0_STATUS 369 471 li t1, ST0_CU0 | STATMASK 370 472 or t0, t1 371 473 xori t0, STATMASK 372 474 mtc0 t0, CP0_STATUS 373 - #else /* CONFIG_MIPS_MT_SMTC */ 374 - /* 375 - * For SMTC, we need to set privilege 376 - * and disable interrupts only for the 377 - * current TC, using the TCStatus register. 378 - */ 379 - mfc0 t0, CP0_TCSTATUS 380 - /* Fortunately CU 0 is in the same place in both registers */ 381 - /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */ 382 - li t1, ST0_CU0 | 0x08001c00 383 - or t0, t1 384 - /* Clear TKSU, leave IXMT */ 385 - xori t0, 0x00001800 386 - mtc0 t0, CP0_TCSTATUS 387 - _ehb 388 - /* We need to leave the global IE bit set, but clear EXL...*/ 389 - mfc0 t0, CP0_STATUS 390 - ori t0, ST0_EXL | ST0_ERL 391 - xori t0, ST0_EXL | ST0_ERL 392 - mtc0 t0, CP0_STATUS 393 - #endif /* CONFIG_MIPS_MT_SMTC */ 394 475 irq_disable_hazard 395 476 .endm 396 477 ··· 377 502 * Set cp0 enable bit as sign that we're running on the kernel stack 378 503 */ 379 504 .macro STI 380 - #if !defined(CONFIG_MIPS_MT_SMTC) 381 505 mfc0 t0, CP0_STATUS 382 506 li t1, ST0_CU0 | STATMASK 383 507 or t0, t1 384 508 xori t0, STATMASK & ~1 385 509 mtc0 t0, CP0_STATUS 386 - #else /* CONFIG_MIPS_MT_SMTC */ 387 - /* 388 - * For SMTC, we need to set privilege 389 - * and enable interrupts only for the 390 - * current TC, using the TCStatus register. 391 - */ 392 - _ehb 393 - mfc0 t0, CP0_TCSTATUS 394 - /* Fortunately CU 0 is in the same place in both registers */ 395 - /* Set TCU0, TKSU (for later inversion) and IXMT */ 396 - li t1, ST0_CU0 | 0x08001c00 397 - or t0, t1 398 - /* Clear TKSU *and* IXMT */ 399 - xori t0, 0x00001c00 400 - mtc0 t0, CP0_TCSTATUS 401 - _ehb 402 - /* We need to leave the global IE bit set, but clear EXL...*/ 403 - mfc0 t0, CP0_STATUS 404 - ori t0, ST0_EXL 405 - xori t0, ST0_EXL 406 - mtc0 t0, CP0_STATUS 407 - /* irq_enable_hazard below should expand to EHB for 24K/34K cpus */ 408 - #endif /* CONFIG_MIPS_MT_SMTC */ 409 510 irq_enable_hazard 410 511 .endm 411 512 ··· 391 540 * Set cp0 enable bit as sign that we're running on the kernel stack 392 541 */ 393 542 .macro KMODE 394 - #ifdef CONFIG_MIPS_MT_SMTC 395 - /* 396 - * This gets baroque in SMTC. We want to 397 - * protect the non-atomic clearing of EXL 398 - * with DMT/EMT, but we don't want to take 399 - * an interrupt while DMT is still in effect. 400 - */ 401 - 402 - /* KMODE gets invoked from both reorder and noreorder code */ 403 - .set push 404 - .set mips32r2 405 - .set noreorder 406 - mfc0 v0, CP0_TCSTATUS 407 - andi v1, v0, TCSTATUS_IXMT 408 - ori v0, TCSTATUS_IXMT 409 - mtc0 v0, CP0_TCSTATUS 410 - _ehb 411 - DMT 2 # dmt v0 412 - /* 413 - * We don't know a priori if ra is "live" 414 - */ 415 - move t0, ra 416 - jal mips_ihb 417 - nop /* delay slot */ 418 - move ra, t0 419 - #endif /* CONFIG_MIPS_MT_SMTC */ 420 543 mfc0 t0, CP0_STATUS 421 544 li t1, ST0_CU0 | (STATMASK & ~1) 422 545 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) ··· 401 576 or t0, t1 402 577 xori t0, STATMASK & ~1 403 578 mtc0 t0, CP0_STATUS 404 - #ifdef CONFIG_MIPS_MT_SMTC 405 - _ehb 406 - andi v0, v0, VPECONTROL_TE 407 - beqz v0, 2f 408 - nop /* delay slot */ 409 - emt 410 - 2: 411 - mfc0 v0, CP0_TCSTATUS 412 - /* Clear IXMT, then OR in previous value */ 413 - ori v0, TCSTATUS_IXMT 414 - xori v0, TCSTATUS_IXMT 415 - or v0, v1, v0 416 - mtc0 v0, CP0_TCSTATUS 417 - /* 418 - * irq_disable_hazard below should expand to EHB 419 - * on 24K/34K CPUS 420 - */ 421 - .set pop 422 - #endif /* CONFIG_MIPS_MT_SMTC */ 423 579 irq_disable_hazard 424 580 .endm 425 581
+1 -10
arch/mips/include/asm/thread_info.h
··· 159 159 * We stash processor id into a COP0 register to retrieve it fast 160 160 * at kernel exception entry. 161 161 */ 162 - #if defined(CONFIG_MIPS_MT_SMTC) 163 - #define SMP_CPUID_REG 2, 2 /* TCBIND */ 164 - #define ASM_SMP_CPUID_REG $2, 2 165 - #define SMP_CPUID_PTRSHIFT 19 166 - #elif defined(CONFIG_MIPS_PGD_C0_CONTEXT) 162 + #if defined(CONFIG_MIPS_PGD_C0_CONTEXT) 167 163 #define SMP_CPUID_REG 20, 0 /* XCONTEXT */ 168 164 #define ASM_SMP_CPUID_REG $20 169 165 #define SMP_CPUID_PTRSHIFT 48 ··· 175 179 #define SMP_CPUID_REGSHIFT (SMP_CPUID_PTRSHIFT + 2) 176 180 #endif 177 181 178 - #ifdef CONFIG_MIPS_MT_SMTC 179 - #define ASM_CPUID_MFC0 mfc0 180 - #define UASM_i_CPUID_MFC0 uasm_i_mfc0 181 - #else 182 182 #define ASM_CPUID_MFC0 MFC0 183 183 #define UASM_i_CPUID_MFC0 UASM_i_MFC0 184 - #endif 185 184 186 185 #endif /* __KERNEL__ */ 187 186 #endif /* _ASM_THREAD_INFO_H */
+1 -4
arch/mips/include/asm/time.h
··· 52 52 */ 53 53 extern unsigned int __weak get_c0_compare_int(void); 54 54 extern int r4k_clockevent_init(void); 55 - extern int smtc_clockevent_init(void); 56 55 extern int gic_clockevent_init(void); 57 56 58 57 static inline int mips_clockevent_init(void) 59 58 { 60 - #ifdef CONFIG_MIPS_MT_SMTC 61 - return smtc_clockevent_init(); 62 - #elif defined(CONFIG_CEVT_GIC) 59 + #if defined(CONFIG_CEVT_GIC) 63 60 return (gic_clockevent_init() | r4k_clockevent_init()); 64 61 #elif defined(CONFIG_CEVT_R4K) 65 62 return r4k_clockevent_init();
-2
arch/mips/kernel/Makefile
··· 17 17 18 18 obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o 19 19 obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o 20 - obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o 21 20 obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o 22 21 obj-$(CONFIG_CEVT_GIC) += cevt-gic.o 23 22 obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o ··· 49 50 50 51 obj-$(CONFIG_MIPS_MT) += mips-mt.o 51 52 obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o 52 - obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o 53 53 obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o 54 54 obj-$(CONFIG_MIPS_CMP) += smp-cmp.o 55 55 obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o
-3
arch/mips/kernel/asm-offsets.c
··· 64 64 OFFSET(PT_BVADDR, pt_regs, cp0_badvaddr); 65 65 OFFSET(PT_STATUS, pt_regs, cp0_status); 66 66 OFFSET(PT_CAUSE, pt_regs, cp0_cause); 67 - #ifdef CONFIG_MIPS_MT_SMTC 68 - OFFSET(PT_TCSTATUS, pt_regs, cp0_tcstatus); 69 - #endif /* CONFIG_MIPS_MT_SMTC */ 70 67 #ifdef CONFIG_CPU_CAVIUM_OCTEON 71 68 OFFSET(PT_MPL, pt_regs, mpl); 72 69 OFFSET(PT_MTP, pt_regs, mtp);
-14
arch/mips/kernel/cevt-r4k.c
··· 12 12 #include <linux/smp.h> 13 13 #include <linux/irq.h> 14 14 15 - #include <asm/smtc_ipi.h> 16 15 #include <asm/time.h> 17 16 #include <asm/cevt-r4k.h> 18 17 #include <asm/gic.h> 19 18 20 - /* 21 - * The SMTC Kernel for the 34K, 1004K, et. al. replaces several 22 - * of these routines with SMTC-specific variants. 23 - */ 24 - 25 - #ifndef CONFIG_MIPS_MT_SMTC 26 19 static int mips_next_event(unsigned long delta, 27 20 struct clock_event_device *evt) 28 21 { ··· 29 36 return res; 30 37 } 31 38 32 - #endif /* CONFIG_MIPS_MT_SMTC */ 33 - 34 39 void mips_set_clock_mode(enum clock_event_mode mode, 35 40 struct clock_event_device *evt) 36 41 { ··· 38 47 DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); 39 48 int cp0_timer_irq_installed; 40 49 41 - #ifndef CONFIG_MIPS_MT_SMTC 42 50 irqreturn_t c0_compare_interrupt(int irq, void *dev_id) 43 51 { 44 52 const int r2 = cpu_has_mips_r2; ··· 71 81 out: 72 82 return IRQ_HANDLED; 73 83 } 74 - 75 - #endif /* Not CONFIG_MIPS_MT_SMTC */ 76 84 77 85 struct irqaction c0_compare_irqaction = { 78 86 .handler = c0_compare_interrupt, ··· 158 170 return 1; 159 171 } 160 172 161 - #ifndef CONFIG_MIPS_MT_SMTC 162 173 int r4k_clockevent_init(void) 163 174 { 164 175 unsigned int cpu = smp_processor_id(); ··· 212 225 return 0; 213 226 } 214 227 215 - #endif /* Not CONFIG_MIPS_MT_SMTC */
-324
arch/mips/kernel/cevt-smtc.c
··· 1 - /* 2 - * This file is subject to the terms and conditions of the GNU General Public 3 - * License. See the file "COPYING" in the main directory of this archive 4 - * for more details. 5 - * 6 - * Copyright (C) 2007 MIPS Technologies, Inc. 7 - * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org> 8 - * Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl 9 - */ 10 - #include <linux/clockchips.h> 11 - #include <linux/interrupt.h> 12 - #include <linux/percpu.h> 13 - #include <linux/smp.h> 14 - #include <linux/irq.h> 15 - 16 - #include <asm/smtc_ipi.h> 17 - #include <asm/time.h> 18 - #include <asm/cevt-r4k.h> 19 - 20 - /* 21 - * Variant clock event timer support for SMTC on MIPS 34K, 1004K 22 - * or other MIPS MT cores. 23 - * 24 - * Notes on SMTC Support: 25 - * 26 - * SMTC has multiple microthread TCs pretending to be Linux CPUs. 27 - * But there's only one Count/Compare pair per VPE, and Compare 28 - * interrupts are taken opportunisitically by available TCs 29 - * bound to the VPE with the Count register. The new timer 30 - * framework provides for global broadcasts, but we really 31 - * want VPE-level multicasts for best behavior. So instead 32 - * of invoking the high-level clock-event broadcast code, 33 - * this version of SMTC support uses the historical SMTC 34 - * multicast mechanisms "under the hood", appearing to the 35 - * generic clock layer as if the interrupts are per-CPU. 36 - * 37 - * The approach taken here is to maintain a set of NR_CPUS 38 - * virtual timers, and track which "CPU" needs to be alerted 39 - * at each event. 40 - * 41 - * It's unlikely that we'll see a MIPS MT core with more than 42 - * 2 VPEs, but we *know* that we won't need to handle more 43 - * VPEs than we have "CPUs". So NCPUs arrays of NCPUs elements 44 - * is always going to be overkill, but always going to be enough. 45 - */ 46 - 47 - unsigned long smtc_nexttime[NR_CPUS][NR_CPUS]; 48 - static int smtc_nextinvpe[NR_CPUS]; 49 - 50 - /* 51 - * Timestamps stored are absolute values to be programmed 52 - * into Count register. Valid timestamps will never be zero. 53 - * If a Zero Count value is actually calculated, it is converted 54 - * to be a 1, which will introduce 1 or two CPU cycles of error 55 - * roughly once every four billion events, which at 1000 HZ means 56 - * about once every 50 days. If that's actually a problem, one 57 - * could alternate squashing 0 to 1 and to -1. 58 - */ 59 - 60 - #define MAKEVALID(x) (((x) == 0L) ? 1L : (x)) 61 - #define ISVALID(x) ((x) != 0L) 62 - 63 - /* 64 - * Time comparison is subtle, as it's really truncated 65 - * modular arithmetic. 66 - */ 67 - 68 - #define IS_SOONER(a, b, reference) \ 69 - (((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference))) 70 - 71 - /* 72 - * CATCHUP_INCREMENT, used when the function falls behind the counter. 73 - * Could be an increasing function instead of a constant; 74 - */ 75 - 76 - #define CATCHUP_INCREMENT 64 77 - 78 - static int mips_next_event(unsigned long delta, 79 - struct clock_event_device *evt) 80 - { 81 - unsigned long flags; 82 - unsigned int mtflags; 83 - unsigned long timestamp, reference, previous; 84 - unsigned long nextcomp = 0L; 85 - int vpe = current_cpu_data.vpe_id; 86 - int cpu = smp_processor_id(); 87 - local_irq_save(flags); 88 - mtflags = dmt(); 89 - 90 - /* 91 - * Maintain the per-TC virtual timer 92 - * and program the per-VPE shared Count register 93 - * as appropriate here... 94 - */ 95 - reference = (unsigned long)read_c0_count(); 96 - timestamp = MAKEVALID(reference + delta); 97 - /* 98 - * To really model the clock, we have to catch the case 99 - * where the current next-in-VPE timestamp is the old 100 - * timestamp for the calling CPE, but the new value is 101 - * in fact later. In that case, we have to do a full 102 - * scan and discover the new next-in-VPE CPU id and 103 - * timestamp. 104 - */ 105 - previous = smtc_nexttime[vpe][cpu]; 106 - if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous) 107 - && IS_SOONER(previous, timestamp, reference)) { 108 - int i; 109 - int soonest = cpu; 110 - 111 - /* 112 - * Update timestamp array here, so that new 113 - * value gets considered along with those of 114 - * other virtual CPUs on the VPE. 115 - */ 116 - smtc_nexttime[vpe][cpu] = timestamp; 117 - for_each_online_cpu(i) { 118 - if (ISVALID(smtc_nexttime[vpe][i]) 119 - && IS_SOONER(smtc_nexttime[vpe][i], 120 - smtc_nexttime[vpe][soonest], reference)) { 121 - soonest = i; 122 - } 123 - } 124 - smtc_nextinvpe[vpe] = soonest; 125 - nextcomp = smtc_nexttime[vpe][soonest]; 126 - /* 127 - * Otherwise, we don't have to process the whole array rank, 128 - * we just have to see if the event horizon has gotten closer. 129 - */ 130 - } else { 131 - if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) || 132 - IS_SOONER(timestamp, 133 - smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) { 134 - smtc_nextinvpe[vpe] = cpu; 135 - nextcomp = timestamp; 136 - } 137 - /* 138 - * Since next-in-VPE may me the same as the executing 139 - * virtual CPU, we update the array *after* checking 140 - * its value. 141 - */ 142 - smtc_nexttime[vpe][cpu] = timestamp; 143 - } 144 - 145 - /* 146 - * It may be that, in fact, we don't need to update Compare, 147 - * but if we do, we want to make sure we didn't fall into 148 - * a crack just behind Count. 149 - */ 150 - if (ISVALID(nextcomp)) { 151 - write_c0_compare(nextcomp); 152 - ehb(); 153 - /* 154 - * We never return an error, we just make sure 155 - * that we trigger the handlers as quickly as 156 - * we can if we fell behind. 157 - */ 158 - while ((nextcomp - (unsigned long)read_c0_count()) 159 - > (unsigned long)LONG_MAX) { 160 - nextcomp += CATCHUP_INCREMENT; 161 - write_c0_compare(nextcomp); 162 - ehb(); 163 - } 164 - } 165 - emt(mtflags); 166 - local_irq_restore(flags); 167 - return 0; 168 - } 169 - 170 - 171 - void smtc_distribute_timer(int vpe) 172 - { 173 - unsigned long flags; 174 - unsigned int mtflags; 175 - int cpu; 176 - struct clock_event_device *cd; 177 - unsigned long nextstamp; 178 - unsigned long reference; 179 - 180 - 181 - repeat: 182 - nextstamp = 0L; 183 - for_each_online_cpu(cpu) { 184 - /* 185 - * Find virtual CPUs within the current VPE who have 186 - * unserviced timer requests whose time is now past. 187 - */ 188 - local_irq_save(flags); 189 - mtflags = dmt(); 190 - if (cpu_data[cpu].vpe_id == vpe && 191 - ISVALID(smtc_nexttime[vpe][cpu])) { 192 - reference = (unsigned long)read_c0_count(); 193 - if ((smtc_nexttime[vpe][cpu] - reference) 194 - > (unsigned long)LONG_MAX) { 195 - smtc_nexttime[vpe][cpu] = 0L; 196 - emt(mtflags); 197 - local_irq_restore(flags); 198 - /* 199 - * We don't send IPIs to ourself. 200 - */ 201 - if (cpu != smp_processor_id()) { 202 - smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0); 203 - } else { 204 - cd = &per_cpu(mips_clockevent_device, cpu); 205 - cd->event_handler(cd); 206 - } 207 - } else { 208 - /* Local to VPE but Valid Time not yet reached. */ 209 - if (!ISVALID(nextstamp) || 210 - IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp, 211 - reference)) { 212 - smtc_nextinvpe[vpe] = cpu; 213 - nextstamp = smtc_nexttime[vpe][cpu]; 214 - } 215 - emt(mtflags); 216 - local_irq_restore(flags); 217 - } 218 - } else { 219 - emt(mtflags); 220 - local_irq_restore(flags); 221 - 222 - } 223 - } 224 - /* Reprogram for interrupt at next soonest timestamp for VPE */ 225 - if (ISVALID(nextstamp)) { 226 - write_c0_compare(nextstamp); 227 - ehb(); 228 - if ((nextstamp - (unsigned long)read_c0_count()) 229 - > (unsigned long)LONG_MAX) 230 - goto repeat; 231 - } 232 - } 233 - 234 - 235 - irqreturn_t c0_compare_interrupt(int irq, void *dev_id) 236 - { 237 - int cpu = smp_processor_id(); 238 - 239 - /* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */ 240 - handle_perf_irq(1); 241 - 242 - if (read_c0_cause() & (1 << 30)) { 243 - /* Clear Count/Compare Interrupt */ 244 - write_c0_compare(read_c0_compare()); 245 - smtc_distribute_timer(cpu_data[cpu].vpe_id); 246 - } 247 - return IRQ_HANDLED; 248 - } 249 - 250 - 251 - int smtc_clockevent_init(void) 252 - { 253 - uint64_t mips_freq = mips_hpt_frequency; 254 - unsigned int cpu = smp_processor_id(); 255 - struct clock_event_device *cd; 256 - unsigned int irq; 257 - int i; 258 - int j; 259 - 260 - if (!cpu_has_counter || !mips_hpt_frequency) 261 - return -ENXIO; 262 - if (cpu == 0) { 263 - for (i = 0; i < num_possible_cpus(); i++) { 264 - smtc_nextinvpe[i] = 0; 265 - for (j = 0; j < num_possible_cpus(); j++) 266 - smtc_nexttime[i][j] = 0L; 267 - } 268 - /* 269 - * SMTC also can't have the usablility test 270 - * run by secondary TCs once Compare is in use. 271 - */ 272 - if (!c0_compare_int_usable()) 273 - return -ENXIO; 274 - } 275 - 276 - /* 277 - * With vectored interrupts things are getting platform specific. 278 - * get_c0_compare_int is a hook to allow a platform to return the 279 - * interrupt number of it's liking. 280 - */ 281 - irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; 282 - if (get_c0_compare_int) 283 - irq = get_c0_compare_int(); 284 - 285 - cd = &per_cpu(mips_clockevent_device, cpu); 286 - 287 - cd->name = "MIPS"; 288 - cd->features = CLOCK_EVT_FEAT_ONESHOT; 289 - 290 - /* Calculate the min / max delta */ 291 - cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); 292 - cd->shift = 32; 293 - cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); 294 - cd->min_delta_ns = clockevent_delta2ns(0x300, cd); 295 - 296 - cd->rating = 300; 297 - cd->irq = irq; 298 - cd->cpumask = cpumask_of(cpu); 299 - cd->set_next_event = mips_next_event; 300 - cd->set_mode = mips_set_clock_mode; 301 - cd->event_handler = mips_event_handler; 302 - 303 - clockevents_register_device(cd); 304 - 305 - /* 306 - * On SMTC we only want to do the data structure 307 - * initialization and IRQ setup once. 308 - */ 309 - if (cpu) 310 - return 0; 311 - /* 312 - * And we need the hwmask associated with the c0_compare 313 - * vector to be initialized. 314 - */ 315 - irq_hwmask[irq] = (0x100 << cp0_compare_irq); 316 - if (cp0_timer_irq_installed) 317 - return 0; 318 - 319 - cp0_timer_irq_installed = 1; 320 - 321 - setup_irq(irq, &c0_compare_irqaction); 322 - 323 - return 0; 324 - }
+1 -1
arch/mips/kernel/cpu-probe.c
··· 62 62 case CPU_34K: 63 63 /* 64 64 * Erratum "RPS May Cause Incorrect Instruction Execution" 65 - * This code only handles VPE0, any SMP/SMTC/RTOS code 65 + * This code only handles VPE0, any SMP/RTOS code 66 66 * making use of VPE1 will be responsable for that VPE. 67 67 */ 68 68 if ((c->processor_id & PRID_REV_MASK) <= PRID_REV_34K_V1_0_2)
-38
arch/mips/kernel/entry.S
··· 16 16 #include <asm/isadep.h> 17 17 #include <asm/thread_info.h> 18 18 #include <asm/war.h> 19 - #ifdef CONFIG_MIPS_MT_SMTC 20 - #include <asm/mipsmtregs.h> 21 - #endif 22 19 23 20 #ifndef CONFIG_PREEMPT 24 21 #define resume_kernel restore_all ··· 86 89 bnez t0, syscall_exit_work 87 90 88 91 restore_all: # restore full frame 89 - #ifdef CONFIG_MIPS_MT_SMTC 90 - #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP 91 - /* Re-arm any temporarily masked interrupts not explicitly "acked" */ 92 - mfc0 v0, CP0_TCSTATUS 93 - ori v1, v0, TCSTATUS_IXMT 94 - mtc0 v1, CP0_TCSTATUS 95 - andi v0, TCSTATUS_IXMT 96 - _ehb 97 - mfc0 t0, CP0_TCCONTEXT 98 - DMT 9 # dmt t1 99 - jal mips_ihb 100 - mfc0 t2, CP0_STATUS 101 - andi t3, t0, 0xff00 102 - or t2, t2, t3 103 - mtc0 t2, CP0_STATUS 104 - _ehb 105 - andi t1, t1, VPECONTROL_TE 106 - beqz t1, 1f 107 - EMT 108 - 1: 109 - mfc0 v1, CP0_TCSTATUS 110 - /* We set IXMT above, XOR should clear it here */ 111 - xori v1, v1, TCSTATUS_IXMT 112 - or v1, v0, v1 113 - mtc0 v1, CP0_TCSTATUS 114 - _ehb 115 - xor t0, t0, t3 116 - mtc0 t0, CP0_TCCONTEXT 117 - #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ 118 - /* Detect and execute deferred IPI "interrupts" */ 119 - LONG_L s0, TI_REGS($28) 120 - LONG_S sp, TI_REGS($28) 121 - jal deferred_smtc_ipi 122 - LONG_S s0, TI_REGS($28) 123 - #endif /* CONFIG_MIPS_MT_SMTC */ 124 92 .set noat 125 93 RESTORE_TEMP 126 94 RESTORE_AT
-54
arch/mips/kernel/genex.S
··· 21 21 #include <asm/war.h> 22 22 #include <asm/thread_info.h> 23 23 24 - #ifdef CONFIG_MIPS_MT_SMTC 25 - #define PANIC_PIC(msg) \ 26 - .set push; \ 27 - .set nomicromips; \ 28 - .set reorder; \ 29 - PTR_LA a0,8f; \ 30 - .set noat; \ 31 - PTR_LA AT, panic; \ 32 - jr AT; \ 33 - 9: b 9b; \ 34 - .set pop; \ 35 - TEXT(msg) 36 - #endif 37 - 38 24 __INIT 39 25 40 26 /* ··· 237 251 SAVE_AT 238 252 .set push 239 253 .set noreorder 240 - #ifdef CONFIG_MIPS_MT_SMTC 241 - /* 242 - * To keep from blindly blocking *all* interrupts 243 - * during service by SMTC kernel, we also want to 244 - * pass the IM value to be cleared. 245 - */ 246 - FEXPORT(except_vec_vi_mori) 247 - ori a0, $0, 0 248 - #endif /* CONFIG_MIPS_MT_SMTC */ 249 254 PTR_LA v1, except_vec_vi_handler 250 255 FEXPORT(except_vec_vi_lui) 251 256 lui v0, 0 /* Patched */ ··· 254 277 NESTED(except_vec_vi_handler, 0, sp) 255 278 SAVE_TEMP 256 279 SAVE_STATIC 257 - #ifdef CONFIG_MIPS_MT_SMTC 258 - /* 259 - * SMTC has an interesting problem that interrupts are level-triggered, 260 - * and the CLI macro will clear EXL, potentially causing a duplicate 261 - * interrupt service invocation. So we need to clear the associated 262 - * IM bit of Status prior to doing CLI, and restore it after the 263 - * service routine has been invoked - we must assume that the 264 - * service routine will have cleared the state, and any active 265 - * level represents a new or otherwised unserviced event... 266 - */ 267 - mfc0 t1, CP0_STATUS 268 - and t0, a0, t1 269 - #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP 270 - mfc0 t2, CP0_TCCONTEXT 271 - or t2, t0, t2 272 - mtc0 t2, CP0_TCCONTEXT 273 - #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ 274 - xor t1, t1, t0 275 - mtc0 t1, CP0_STATUS 276 - _ehb 277 - #endif /* CONFIG_MIPS_MT_SMTC */ 278 280 CLI 279 281 #ifdef CONFIG_TRACE_IRQFLAGS 280 282 move s0, v0 281 - #ifdef CONFIG_MIPS_MT_SMTC 282 - move s1, a0 283 - #endif 284 283 TRACE_IRQS_OFF 285 - #ifdef CONFIG_MIPS_MT_SMTC 286 - move a0, s1 287 - #endif 288 284 move v0, s0 289 285 #endif 290 286 ··· 446 496 447 497 .align 5 448 498 LEAF(handle_ri_rdhwr_vivt) 449 - #ifdef CONFIG_MIPS_MT_SMTC 450 - PANIC_PIC("handle_ri_rdhwr_vivt called") 451 - #else 452 499 .set push 453 500 .set noat 454 501 .set noreorder ··· 464 517 .set pop 465 518 bltz k1, handle_ri /* slow path */ 466 519 /* fall thru */ 467 - #endif 468 520 END(handle_ri_rdhwr_vivt) 469 521 470 522 LEAF(handle_ri_rdhwr)
-56
arch/mips/kernel/head.S
··· 35 35 */ 36 36 .macro setup_c0_status set clr 37 37 .set push 38 - #ifdef CONFIG_MIPS_MT_SMTC 39 - /* 40 - * For SMTC, we need to set privilege and disable interrupts only for 41 - * the current TC, using the TCStatus register. 42 - */ 43 - mfc0 t0, CP0_TCSTATUS 44 - /* Fortunately CU 0 is in the same place in both registers */ 45 - /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */ 46 - li t1, ST0_CU0 | 0x08001c00 47 - or t0, t1 48 - /* Clear TKSU, leave IXMT */ 49 - xori t0, 0x00001800 50 - mtc0 t0, CP0_TCSTATUS 51 - _ehb 52 - /* We need to leave the global IE bit set, but clear EXL...*/ 53 - mfc0 t0, CP0_STATUS 54 - or t0, ST0_CU0 | ST0_EXL | ST0_ERL | \set | \clr 55 - xor t0, ST0_EXL | ST0_ERL | \clr 56 - mtc0 t0, CP0_STATUS 57 - #else 58 38 mfc0 t0, CP0_STATUS 59 39 or t0, ST0_CU0|\set|0x1f|\clr 60 40 xor t0, 0x1f|\clr 61 41 mtc0 t0, CP0_STATUS 62 42 .set noreorder 63 43 sll zero,3 # ehb 64 - #endif 65 44 .set pop 66 45 .endm 67 46 ··· 94 115 jr t0 95 116 0: 96 117 97 - #ifdef CONFIG_MIPS_MT_SMTC 98 - /* 99 - * In SMTC kernel, "CLI" is thread-specific, in TCStatus. 100 - * We still need to enable interrupts globally in Status, 101 - * and clear EXL/ERL. 102 - * 103 - * TCContext is used to track interrupt levels under 104 - * service in SMTC kernel. Clear for boot TC before 105 - * allowing any interrupts. 106 - */ 107 - mtc0 zero, CP0_TCCONTEXT 108 - 109 - mfc0 t0, CP0_STATUS 110 - ori t0, t0, 0xff1f 111 - xori t0, t0, 0x001e 112 - mtc0 t0, CP0_STATUS 113 - #endif /* CONFIG_MIPS_MT_SMTC */ 114 - 115 118 PTR_LA t0, __bss_start # clear .bss 116 119 LONG_S zero, (t0) 117 120 PTR_LA t1, __bss_stop - LONGSIZE ··· 125 164 * function after setting up the stack and gp registers. 126 165 */ 127 166 NESTED(smp_bootstrap, 16, sp) 128 - #ifdef CONFIG_MIPS_MT_SMTC 129 - /* 130 - * Read-modify-writes of Status must be atomic, and this 131 - * is one case where CLI is invoked without EXL being 132 - * necessarily set. The CLI and setup_c0_status will 133 - * in fact be redundant for all but the first TC of 134 - * each VPE being booted. 135 - */ 136 - DMT 10 # dmt t2 /* t0, t1 are used by CLI and setup_c0_status() */ 137 - jal mips_ihb 138 - #endif /* CONFIG_MIPS_MT_SMTC */ 139 167 smp_slave_setup 140 168 setup_c0_status_sec 141 - #ifdef CONFIG_MIPS_MT_SMTC 142 - andi t2, t2, VPECONTROL_TE 143 - beqz t2, 2f 144 - EMT # emt 145 - 2: 146 - #endif /* CONFIG_MIPS_MT_SMTC */ 147 169 j start_secondary 148 170 END(smp_bootstrap) 149 171 #endif /* CONFIG_SMP */
-4
arch/mips/kernel/i8259.c
··· 42 42 .irq_disable = disable_8259A_irq, 43 43 .irq_unmask = enable_8259A_irq, 44 44 .irq_mask_ack = mask_and_ack_8259A, 45 - #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 46 - .irq_set_affinity = plat_set_irq_affinity, 47 - #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ 48 45 }; 49 46 50 47 /* ··· 177 180 outb(cached_master_mask, PIC_MASTER_IMR); 178 181 outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */ 179 182 } 180 - smtc_im_ack_irq(irq); 181 183 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 182 184 return; 183 185
-10
arch/mips/kernel/idle.c
··· 229 229 } 230 230 } 231 231 232 - static void smtc_idle_hook(void) 233 - { 234 - #ifdef CONFIG_MIPS_MT_SMTC 235 - void smtc_idle_loop_hook(void); 236 - 237 - smtc_idle_loop_hook(); 238 - #endif 239 - } 240 - 241 232 void arch_cpu_idle(void) 242 233 { 243 - smtc_idle_hook(); 244 234 if (cpu_wait) 245 235 cpu_wait(); 246 236 else
-5
arch/mips/kernel/irq-msc01.c
··· 53 53 */ 54 54 static void level_mask_and_ack_msc_irq(struct irq_data *d) 55 55 { 56 - unsigned int irq = d->irq; 57 - 58 56 mask_msc_irq(d); 59 57 if (!cpu_has_veic) 60 58 MSCIC_WRITE(MSC01_IC_EOI, 0); 61 - /* This actually needs to be a call into platform code */ 62 - smtc_im_ack_irq(irq); 63 59 } 64 60 65 61 /* ··· 74 78 MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT); 75 79 MSCIC_WRITE(MSC01_IC_SUP+irq*8, r); 76 80 } 77 - smtc_im_ack_irq(irq); 78 81 } 79 82 80 83 /*
-17
arch/mips/kernel/irq.c
··· 73 73 */ 74 74 void ack_bad_irq(unsigned int irq) 75 75 { 76 - smtc_im_ack_irq(irq); 77 76 printk("unexpected IRQ # %d\n", irq); 78 77 } 79 78 ··· 141 142 { 142 143 irq_enter(); 143 144 check_stack_overflow(); 144 - if (!smtc_handle_on_other_cpu(irq)) 145 - generic_handle_irq(irq); 146 - irq_exit(); 147 - } 148 - 149 - #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 150 - /* 151 - * To avoid inefficient and in some cases pathological re-checking of 152 - * IRQ affinity, we have this variant that skips the affinity check. 153 - */ 154 - 155 - void __irq_entry do_IRQ_no_affinity(unsigned int irq) 156 - { 157 - irq_enter(); 158 - smtc_im_backstop(irq); 159 145 generic_handle_irq(irq); 160 146 irq_exit(); 161 147 } 162 148 163 - #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
+1 -1
arch/mips/kernel/mips-mt-fpaff.c
··· 1 1 /* 2 - * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels 2 + * General MIPS MT support routines, usable in AP/SP and SMVP. 3 3 * Copyright (C) 2005 Mips Technologies, Inc 4 4 */ 5 5 #include <linux/cpu.h>
+1 -17
arch/mips/kernel/mips-mt.c
··· 1 1 /* 2 - * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels 2 + * General MIPS MT support routines, usable in AP/SP and SMVP. 3 3 * Copyright (C) 2005 Mips Technologies, Inc 4 4 */ 5 5 ··· 57 57 int tc; 58 58 unsigned long haltval; 59 59 unsigned long tcstatval; 60 - #ifdef CONFIG_MIPS_MT_SMTC 61 - void smtc_soft_dump(void); 62 - #endif /* CONFIG_MIPT_MT_SMTC */ 63 60 64 61 local_irq_save(flags); 65 62 vpflags = dvpe(); ··· 113 116 if (!haltval) 114 117 write_tc_c0_tchalt(0); 115 118 } 116 - #ifdef CONFIG_MIPS_MT_SMTC 117 - smtc_soft_dump(); 118 - #endif /* CONFIG_MIPT_MT_SMTC */ 119 119 printk("===========================\n"); 120 120 evpe(vpflags); 121 121 local_irq_restore(flags); ··· 289 295 290 296 void mt_cflush_lockdown(void) 291 297 { 292 - #ifdef CONFIG_MIPS_MT_SMTC 293 - void smtc_cflush_lockdown(void); 294 - 295 - smtc_cflush_lockdown(); 296 - #endif /* CONFIG_MIPS_MT_SMTC */ 297 298 /* FILL IN VSMP and AP/SP VERSIONS HERE */ 298 299 } 299 300 300 301 void mt_cflush_release(void) 301 302 { 302 - #ifdef CONFIG_MIPS_MT_SMTC 303 - void smtc_cflush_release(void); 304 - 305 - smtc_cflush_release(); 306 - #endif /* CONFIG_MIPS_MT_SMTC */ 307 303 /* FILL IN VSMP and AP/SP VERSIONS HERE */ 308 304 } 309 305
-7
arch/mips/kernel/process.c
··· 140 140 */ 141 141 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); 142 142 143 - #ifdef CONFIG_MIPS_MT_SMTC 144 - /* 145 - * SMTC restores TCStatus after Status, and the CU bits 146 - * are aliased there. 147 - */ 148 - childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1); 149 - #endif 150 143 clear_tsk_thread_flag(p, TIF_USEDFPU); 151 144 152 145 #ifdef CONFIG_MIPS_MT_FPAFF
-33
arch/mips/kernel/r4k_switch.S
··· 87 87 88 88 PTR_ADDU t0, $28, _THREAD_SIZE - 32 89 89 set_saved_sp t0, t1, t2 90 - #ifdef CONFIG_MIPS_MT_SMTC 91 - /* Read-modify-writes of Status must be atomic on a VPE */ 92 - mfc0 t2, CP0_TCSTATUS 93 - ori t1, t2, TCSTATUS_IXMT 94 - mtc0 t1, CP0_TCSTATUS 95 - andi t2, t2, TCSTATUS_IXMT 96 - _ehb 97 - DMT 8 # dmt t0 98 - move t1,ra 99 - jal mips_ihb 100 - move ra,t1 101 - #endif /* CONFIG_MIPS_MT_SMTC */ 102 90 mfc0 t1, CP0_STATUS /* Do we really need this? */ 103 91 li a3, 0xff01 104 92 and t1, a3 ··· 95 107 and a2, a3 96 108 or a2, t1 97 109 mtc0 a2, CP0_STATUS 98 - #ifdef CONFIG_MIPS_MT_SMTC 99 - _ehb 100 - andi t0, t0, VPECONTROL_TE 101 - beqz t0, 1f 102 - emt 103 - 1: 104 - mfc0 t1, CP0_TCSTATUS 105 - xori t1, t1, TCSTATUS_IXMT 106 - or t1, t1, t2 107 - mtc0 t1, CP0_TCSTATUS 108 - _ehb 109 - #endif /* CONFIG_MIPS_MT_SMTC */ 110 110 move v0, a0 111 111 jr ra 112 112 END(resume) ··· 152 176 #define FPU_DEFAULT 0x00000000 153 177 154 178 LEAF(_init_fpu) 155 - #ifdef CONFIG_MIPS_MT_SMTC 156 - /* Rather than manipulate per-VPE Status, set per-TC bit in TCStatus */ 157 - mfc0 t0, CP0_TCSTATUS 158 - /* Bit position is the same for Status, TCStatus */ 159 - li t1, ST0_CU1 160 - or t0, t1 161 - mtc0 t0, CP0_TCSTATUS 162 - #else /* Normal MIPS CU1 enable */ 163 179 mfc0 t0, CP0_STATUS 164 180 li t1, ST0_CU1 165 181 or t0, t1 166 182 mtc0 t0, CP0_STATUS 167 - #endif /* CONFIG_MIPS_MT_SMTC */ 168 183 enable_fpu_hazard 169 184 170 185 li t1, FPU_DEFAULT
-1
arch/mips/kernel/rtlx-mt.c
··· 36 36 unsigned long flags; 37 37 int i; 38 38 39 - /* Ought not to be strictly necessary for SMTC builds */ 40 39 local_irq_save(flags); 41 40 vpeflags = dvpe(); 42 41 set_c0_status(0x100 << MIPS_CPU_RTLX_IRQ);
+1 -8
arch/mips/kernel/smp-cmp.c
··· 49 49 50 50 /* Enable per-cpu interrupts: platform specific */ 51 51 52 - #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) 52 + #ifdef CONFIG_MIPS_MT_SMP 53 53 if (cpu_has_mipsmt) 54 54 c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & 55 55 TCBIND_CURVPE; 56 - #endif 57 - #ifdef CONFIG_MIPS_MT_SMTC 58 - c->tc_id = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT; 59 56 #endif 60 57 } 61 58 ··· 132 135 unsigned int mvpconf0 = read_c0_mvpconf0(); 133 136 134 137 nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; 135 - #elif defined(CONFIG_MIPS_MT_SMTC) 136 - unsigned int mvpconf0 = read_c0_mvpconf0(); 137 - 138 - nvpe = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; 139 138 #endif 140 139 smp_num_siblings = nvpe; 141 140 }
-13
arch/mips/kernel/smp.c
··· 43 43 #include <asm/time.h> 44 44 #include <asm/setup.h> 45 45 46 - #ifdef CONFIG_MIPS_MT_SMTC 47 - #include <asm/mipsmtregs.h> 48 - #endif /* CONFIG_MIPS_MT_SMTC */ 49 - 50 46 volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ 51 47 52 48 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ ··· 98 102 { 99 103 unsigned int cpu; 100 104 101 - #ifdef CONFIG_MIPS_MT_SMTC 102 - /* Only do cpu_probe for first TC of CPU */ 103 - if ((read_c0_tcbind() & TCBIND_CURTC) != 0) 104 - __cpu_name[smp_processor_id()] = __cpu_name[0]; 105 - else 106 - #endif /* CONFIG_MIPS_MT_SMTC */ 107 105 cpu_probe(); 108 106 cpu_report(); 109 107 per_cpu_trap_init(false); ··· 228 238 * o collapses to normal function call on UP kernels 229 239 * o collapses to normal function call on systems with a single shared 230 240 * primary cache. 231 - * o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core. 232 241 */ 233 242 static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) 234 243 { 235 - #ifndef CONFIG_MIPS_MT_SMTC 236 244 smp_call_function(func, info, 1); 237 - #endif 238 245 } 239 246 240 247 static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
-133
arch/mips/kernel/smtc-asm.S
··· 1 - /* 2 - * Assembly Language Functions for MIPS MT SMTC support 3 - */ 4 - 5 - /* 6 - * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set. */ 7 - 8 - #include <asm/regdef.h> 9 - #include <asm/asmmacro.h> 10 - #include <asm/stackframe.h> 11 - #include <asm/irqflags.h> 12 - 13 - /* 14 - * "Software Interrupt" linkage. 15 - * 16 - * This is invoked when an "Interrupt" is sent from one TC to another, 17 - * where the TC to be interrupted is halted, has it's Restart address 18 - * and Status values saved by the "remote control" thread, then modified 19 - * to cause execution to begin here, in kenel mode. This code then 20 - * disguises the TC state as that of an exception and transfers 21 - * control to the general exception or vectored interrupt handler. 22 - */ 23 - .set noreorder 24 - 25 - /* 26 - The __smtc_ipi_vector would use k0 and k1 as temporaries and 27 - 1) Set EXL (this is per-VPE, so this can't be done by proxy!) 28 - 2) Restore the K/CU and IXMT bits to the pre "exception" state 29 - (EXL means no interrupts and access to the kernel map). 30 - 3) Set EPC to be the saved value of TCRestart. 31 - 4) Jump to the exception handler entry point passed by the sender. 32 - 33 - CAN WE PROVE THAT WE WON'T DO THIS IF INTS DISABLED?? 34 - */ 35 - 36 - /* 37 - * Reviled and slandered vision: Set EXL and restore K/CU/IXMT 38 - * state of pre-halt thread, then save everything and call 39 - * thought some function pointer to imaginary_exception, which 40 - * will parse a register value or memory message queue to 41 - * deliver things like interprocessor interrupts. On return 42 - * from that function, jump to the global ret_from_irq code 43 - * to invoke the scheduler and return as appropriate. 44 - */ 45 - 46 - #define PT_PADSLOT4 (PT_R0-8) 47 - #define PT_PADSLOT5 (PT_R0-4) 48 - 49 - .text 50 - .align 5 51 - FEXPORT(__smtc_ipi_vector) 52 - #ifdef CONFIG_CPU_MICROMIPS 53 - nop 54 - #endif 55 - .set noat 56 - /* Disable thread scheduling to make Status update atomic */ 57 - DMT 27 # dmt k1 58 - _ehb 59 - /* Set EXL */ 60 - mfc0 k0,CP0_STATUS 61 - ori k0,k0,ST0_EXL 62 - mtc0 k0,CP0_STATUS 63 - _ehb 64 - /* Thread scheduling now inhibited by EXL. Restore TE state. */ 65 - andi k1,k1,VPECONTROL_TE 66 - beqz k1,1f 67 - emt 68 - 1: 69 - /* 70 - * The IPI sender has put some information on the anticipated 71 - * kernel stack frame. If we were in user mode, this will be 72 - * built above the saved kernel SP. If we were already in the 73 - * kernel, it will be built above the current CPU SP. 74 - * 75 - * Were we in kernel mode, as indicated by CU0? 76 - */ 77 - sll k1,k0,3 78 - .set noreorder 79 - bltz k1,2f 80 - move k1,sp 81 - .set reorder 82 - /* 83 - * If previously in user mode, set CU0 and use kernel stack. 84 - */ 85 - li k1,ST0_CU0 86 - or k1,k1,k0 87 - mtc0 k1,CP0_STATUS 88 - _ehb 89 - get_saved_sp 90 - /* Interrupting TC will have pre-set values in slots in the new frame */ 91 - 2: subu k1,k1,PT_SIZE 92 - /* Load TCStatus Value */ 93 - lw k0,PT_TCSTATUS(k1) 94 - /* Write it to TCStatus to restore CU/KSU/IXMT state */ 95 - mtc0 k0,$2,1 96 - _ehb 97 - lw k0,PT_EPC(k1) 98 - mtc0 k0,CP0_EPC 99 - /* Save all will redundantly recompute the SP, but use it for now */ 100 - SAVE_ALL 101 - CLI 102 - TRACE_IRQS_OFF 103 - /* Function to be invoked passed stack pad slot 5 */ 104 - lw t0,PT_PADSLOT5(sp) 105 - /* Argument from sender passed in stack pad slot 4 */ 106 - lw a0,PT_PADSLOT4(sp) 107 - LONG_L s0, TI_REGS($28) 108 - LONG_S sp, TI_REGS($28) 109 - PTR_LA ra, ret_from_irq 110 - jr t0 111 - 112 - /* 113 - * Called from idle loop to provoke processing of queued IPIs 114 - * First IPI message in queue passed as argument. 115 - */ 116 - 117 - LEAF(self_ipi) 118 - /* Before anything else, block interrupts */ 119 - mfc0 t0,CP0_TCSTATUS 120 - ori t1,t0,TCSTATUS_IXMT 121 - mtc0 t1,CP0_TCSTATUS 122 - _ehb 123 - /* We know we're in kernel mode, so prepare stack frame */ 124 - subu t1,sp,PT_SIZE 125 - sw ra,PT_EPC(t1) 126 - sw a0,PT_PADSLOT4(t1) 127 - la t2,ipi_decode 128 - sw t2,PT_PADSLOT5(t1) 129 - /* Save pre-disable value of TCStatus */ 130 - sw t0,PT_TCSTATUS(t1) 131 - j __smtc_ipi_vector 132 - nop 133 - END(self_ipi)
-102
arch/mips/kernel/smtc-proc.c
··· 1 - /* 2 - * /proc hooks for SMTC kernel 3 - * Copyright (C) 2005 Mips Technologies, Inc 4 - */ 5 - 6 - #include <linux/kernel.h> 7 - #include <linux/sched.h> 8 - #include <linux/cpumask.h> 9 - #include <linux/interrupt.h> 10 - 11 - #include <asm/cpu.h> 12 - #include <asm/processor.h> 13 - #include <linux/atomic.h> 14 - #include <asm/hardirq.h> 15 - #include <asm/mmu_context.h> 16 - #include <asm/mipsregs.h> 17 - #include <asm/cacheflush.h> 18 - #include <linux/proc_fs.h> 19 - #include <linux/seq_file.h> 20 - 21 - #include <asm/smtc_proc.h> 22 - 23 - /* 24 - * /proc diagnostic and statistics hooks 25 - */ 26 - 27 - /* 28 - * Statistics gathered 29 - */ 30 - unsigned long selfipis[NR_CPUS]; 31 - 32 - struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS]; 33 - 34 - atomic_t smtc_fpu_recoveries; 35 - 36 - static int smtc_proc_show(struct seq_file *m, void *v) 37 - { 38 - int i; 39 - extern unsigned long ebase; 40 - 41 - seq_printf(m, "SMTC Status Word: 0x%08x\n", smtc_status); 42 - seq_printf(m, "Config7: 0x%08x\n", read_c0_config7()); 43 - seq_printf(m, "EBASE: 0x%08lx\n", ebase); 44 - seq_printf(m, "Counter Interrupts taken per CPU (TC)\n"); 45 - for (i=0; i < NR_CPUS; i++) 46 - seq_printf(m, "%d: %ld\n", i, smtc_cpu_stats[i].timerints); 47 - seq_printf(m, "Self-IPIs by CPU:\n"); 48 - for(i = 0; i < NR_CPUS; i++) 49 - seq_printf(m, "%d: %ld\n", i, smtc_cpu_stats[i].selfipis); 50 - seq_printf(m, "%d Recoveries of \"stolen\" FPU\n", 51 - atomic_read(&smtc_fpu_recoveries)); 52 - return 0; 53 - } 54 - 55 - static int smtc_proc_open(struct inode *inode, struct file *file) 56 - { 57 - return single_open(file, smtc_proc_show, NULL); 58 - } 59 - 60 - static const struct file_operations smtc_proc_fops = { 61 - .open = smtc_proc_open, 62 - .read = seq_read, 63 - .llseek = seq_lseek, 64 - .release = single_release, 65 - }; 66 - 67 - void init_smtc_stats(void) 68 - { 69 - int i; 70 - 71 - for (i=0; i<NR_CPUS; i++) { 72 - smtc_cpu_stats[i].timerints = 0; 73 - smtc_cpu_stats[i].selfipis = 0; 74 - } 75 - 76 - atomic_set(&smtc_fpu_recoveries, 0); 77 - 78 - proc_create("smtc", 0444, NULL, &smtc_proc_fops); 79 - } 80 - 81 - static int proc_cpuinfo_chain_call(struct notifier_block *nfb, 82 - unsigned long action_unused, void *data) 83 - { 84 - struct proc_cpuinfo_notifier_args *pcn = data; 85 - struct seq_file *m = pcn->m; 86 - unsigned long n = pcn->n; 87 - 88 - if (!cpu_has_mipsmt) 89 - return NOTIFY_OK; 90 - 91 - seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id); 92 - seq_printf(m, "TC\t\t\t: %d\n", cpu_data[n].tc_id); 93 - 94 - return NOTIFY_OK; 95 - } 96 - 97 - static int __init proc_cpuinfo_notifier_init(void) 98 - { 99 - return proc_cpuinfo_notifier(proc_cpuinfo_chain_call, 0); 100 - } 101 - 102 - subsys_initcall(proc_cpuinfo_notifier_init);
-1528
arch/mips/kernel/smtc.c
··· 1 - /* 2 - * This program is free software; you can redistribute it and/or 3 - * modify it under the terms of the GNU General Public License 4 - * as published by the Free Software Foundation; either version 2 5 - * of the License, or (at your option) any later version. 6 - * 7 - * This program is distributed in the hope that it will be useful, 8 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 - * GNU General Public License for more details. 11 - * 12 - * You should have received a copy of the GNU General Public License 13 - * along with this program; if not, write to the Free Software 14 - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 15 - * 16 - * Copyright (C) 2004 Mips Technologies, Inc 17 - * Copyright (C) 2008 Kevin D. Kissell 18 - */ 19 - 20 - #include <linux/clockchips.h> 21 - #include <linux/kernel.h> 22 - #include <linux/sched.h> 23 - #include <linux/smp.h> 24 - #include <linux/cpumask.h> 25 - #include <linux/interrupt.h> 26 - #include <linux/kernel_stat.h> 27 - #include <linux/module.h> 28 - #include <linux/ftrace.h> 29 - #include <linux/slab.h> 30 - 31 - #include <asm/cpu.h> 32 - #include <asm/processor.h> 33 - #include <linux/atomic.h> 34 - #include <asm/hardirq.h> 35 - #include <asm/hazards.h> 36 - #include <asm/irq.h> 37 - #include <asm/idle.h> 38 - #include <asm/mmu_context.h> 39 - #include <asm/mipsregs.h> 40 - #include <asm/cacheflush.h> 41 - #include <asm/time.h> 42 - #include <asm/addrspace.h> 43 - #include <asm/smtc.h> 44 - #include <asm/smtc_proc.h> 45 - #include <asm/setup.h> 46 - 47 - /* 48 - * SMTC Kernel needs to manipulate low-level CPU interrupt mask 49 - * in do_IRQ. These are passed in setup_irq_smtc() and stored 50 - * in this table. 51 - */ 52 - unsigned long irq_hwmask[NR_IRQS]; 53 - 54 - #define LOCK_MT_PRA() \ 55 - local_irq_save(flags); \ 56 - mtflags = dmt() 57 - 58 - #define UNLOCK_MT_PRA() \ 59 - emt(mtflags); \ 60 - local_irq_restore(flags) 61 - 62 - #define LOCK_CORE_PRA() \ 63 - local_irq_save(flags); \ 64 - mtflags = dvpe() 65 - 66 - #define UNLOCK_CORE_PRA() \ 67 - evpe(mtflags); \ 68 - local_irq_restore(flags) 69 - 70 - /* 71 - * Data structures purely associated with SMTC parallelism 72 - */ 73 - 74 - 75 - /* 76 - * Table for tracking ASIDs whose lifetime is prolonged. 77 - */ 78 - 79 - asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; 80 - 81 - /* 82 - * Number of InterProcessor Interrupt (IPI) message buffers to allocate 83 - */ 84 - 85 - #define IPIBUF_PER_CPU 4 86 - 87 - struct smtc_ipi_q IPIQ[NR_CPUS]; 88 - static struct smtc_ipi_q freeIPIq; 89 - 90 - 91 - /* 92 - * Number of FPU contexts for each VPE 93 - */ 94 - 95 - static int smtc_nconf1[MAX_SMTC_VPES]; 96 - 97 - 98 - /* Forward declarations */ 99 - 100 - void ipi_decode(struct smtc_ipi *); 101 - static void post_direct_ipi(int cpu, struct smtc_ipi *pipi); 102 - static void setup_cross_vpe_interrupts(unsigned int nvpe); 103 - void init_smtc_stats(void); 104 - 105 - /* Global SMTC Status */ 106 - 107 - unsigned int smtc_status; 108 - 109 - /* Boot command line configuration overrides */ 110 - 111 - static int vpe0limit; 112 - static int ipibuffers; 113 - static int nostlb; 114 - static int asidmask; 115 - unsigned long smtc_asid_mask = 0xff; 116 - 117 - static int __init vpe0tcs(char *str) 118 - { 119 - get_option(&str, &vpe0limit); 120 - 121 - return 1; 122 - } 123 - 124 - static int __init ipibufs(char *str) 125 - { 126 - get_option(&str, &ipibuffers); 127 - return 1; 128 - } 129 - 130 - static int __init stlb_disable(char *s) 131 - { 132 - nostlb = 1; 133 - return 1; 134 - } 135 - 136 - static int __init asidmask_set(char *str) 137 - { 138 - get_option(&str, &asidmask); 139 - switch (asidmask) { 140 - case 0x1: 141 - case 0x3: 142 - case 0x7: 143 - case 0xf: 144 - case 0x1f: 145 - case 0x3f: 146 - case 0x7f: 147 - case 0xff: 148 - smtc_asid_mask = (unsigned long)asidmask; 149 - break; 150 - default: 151 - printk("ILLEGAL ASID mask 0x%x from command line\n", asidmask); 152 - } 153 - return 1; 154 - } 155 - 156 - __setup("vpe0tcs=", vpe0tcs); 157 - __setup("ipibufs=", ipibufs); 158 - __setup("nostlb", stlb_disable); 159 - __setup("asidmask=", asidmask_set); 160 - 161 - #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG 162 - 163 - static int hang_trig; 164 - 165 - static int __init hangtrig_enable(char *s) 166 - { 167 - hang_trig = 1; 168 - return 1; 169 - } 170 - 171 - 172 - __setup("hangtrig", hangtrig_enable); 173 - 174 - #define DEFAULT_BLOCKED_IPI_LIMIT 32 175 - 176 - static int timerq_limit = DEFAULT_BLOCKED_IPI_LIMIT; 177 - 178 - static int __init tintq(char *str) 179 - { 180 - get_option(&str, &timerq_limit); 181 - return 1; 182 - } 183 - 184 - __setup("tintq=", tintq); 185 - 186 - static int imstuckcount[MAX_SMTC_VPES][8]; 187 - /* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */ 188 - static int vpemask[MAX_SMTC_VPES][8] = { 189 - {0, 0, 1, 0, 0, 0, 0, 1}, 190 - {0, 0, 0, 0, 0, 0, 0, 1} 191 - }; 192 - int tcnoprog[NR_CPUS]; 193 - static atomic_t idle_hook_initialized = ATOMIC_INIT(0); 194 - static int clock_hang_reported[NR_CPUS]; 195 - 196 - #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ 197 - 198 - /* 199 - * Configure shared TLB - VPC configuration bit must be set by caller 200 - */ 201 - 202 - static void smtc_configure_tlb(void) 203 - { 204 - int i, tlbsiz, vpes; 205 - unsigned long mvpconf0; 206 - unsigned long config1val; 207 - 208 - /* Set up ASID preservation table */ 209 - for (vpes=0; vpes<MAX_SMTC_TLBS; vpes++) { 210 - for(i = 0; i < MAX_SMTC_ASIDS; i++) { 211 - smtc_live_asid[vpes][i] = 0; 212 - } 213 - } 214 - mvpconf0 = read_c0_mvpconf0(); 215 - 216 - if ((vpes = ((mvpconf0 & MVPCONF0_PVPE) 217 - >> MVPCONF0_PVPE_SHIFT) + 1) > 1) { 218 - /* If we have multiple VPEs, try to share the TLB */ 219 - if ((mvpconf0 & MVPCONF0_TLBS) && !nostlb) { 220 - /* 221 - * If TLB sizing is programmable, shared TLB 222 - * size is the total available complement. 223 - * Otherwise, we have to take the sum of all 224 - * static VPE TLB entries. 225 - */ 226 - if ((tlbsiz = ((mvpconf0 & MVPCONF0_PTLBE) 227 - >> MVPCONF0_PTLBE_SHIFT)) == 0) { 228 - /* 229 - * If there's more than one VPE, there had better 230 - * be more than one TC, because we need one to bind 231 - * to each VPE in turn to be able to read 232 - * its configuration state! 233 - */ 234 - settc(1); 235 - /* Stop the TC from doing anything foolish */ 236 - write_tc_c0_tchalt(TCHALT_H); 237 - mips_ihb(); 238 - /* No need to un-Halt - that happens later anyway */ 239 - for (i=0; i < vpes; i++) { 240 - write_tc_c0_tcbind(i); 241 - /* 242 - * To be 100% sure we're really getting the right 243 - * information, we exit the configuration state 244 - * and do an IHB after each rebinding. 245 - */ 246 - write_c0_mvpcontrol( 247 - read_c0_mvpcontrol() & ~ MVPCONTROL_VPC ); 248 - mips_ihb(); 249 - /* 250 - * Only count if the MMU Type indicated is TLB 251 - */ 252 - if (((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) { 253 - config1val = read_vpe_c0_config1(); 254 - tlbsiz += ((config1val >> 25) & 0x3f) + 1; 255 - } 256 - 257 - /* Put core back in configuration state */ 258 - write_c0_mvpcontrol( 259 - read_c0_mvpcontrol() | MVPCONTROL_VPC ); 260 - mips_ihb(); 261 - } 262 - } 263 - write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB); 264 - ehb(); 265 - 266 - /* 267 - * Setup kernel data structures to use software total, 268 - * rather than read the per-VPE Config1 value. The values 269 - * for "CPU 0" gets copied to all the other CPUs as part 270 - * of their initialization in smtc_cpu_setup(). 271 - */ 272 - 273 - /* MIPS32 limits TLB indices to 64 */ 274 - if (tlbsiz > 64) 275 - tlbsiz = 64; 276 - cpu_data[0].tlbsize = current_cpu_data.tlbsize = tlbsiz; 277 - smtc_status |= SMTC_TLB_SHARED; 278 - local_flush_tlb_all(); 279 - 280 - printk("TLB of %d entry pairs shared by %d VPEs\n", 281 - tlbsiz, vpes); 282 - } else { 283 - printk("WARNING: TLB Not Sharable on SMTC Boot!\n"); 284 - } 285 - } 286 - } 287 - 288 - 289 - /* 290 - * Incrementally build the CPU map out of constituent MIPS MT cores, 291 - * using the specified available VPEs and TCs. Plaform code needs 292 - * to ensure that each MIPS MT core invokes this routine on reset, 293 - * one at a time(!). 294 - * 295 - * This version of the build_cpu_map and prepare_cpus routines assumes 296 - * that *all* TCs of a MIPS MT core will be used for Linux, and that 297 - * they will be spread across *all* available VPEs (to minimise the 298 - * loss of efficiency due to exception service serialization). 299 - * An improved version would pick up configuration information and 300 - * possibly leave some TCs/VPEs as "slave" processors. 301 - * 302 - * Use c0_MVPConf0 to find out how many TCs are available, setting up 303 - * cpu_possible_mask and the logical/physical mappings. 304 - */ 305 - 306 - int __init smtc_build_cpu_map(int start_cpu_slot) 307 - { 308 - int i, ntcs; 309 - 310 - /* 311 - * The CPU map isn't actually used for anything at this point, 312 - * so it's not clear what else we should do apart from set 313 - * everything up so that "logical" = "physical". 314 - */ 315 - ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; 316 - for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) { 317 - set_cpu_possible(i, true); 318 - __cpu_number_map[i] = i; 319 - __cpu_logical_map[i] = i; 320 - } 321 - #ifdef CONFIG_MIPS_MT_FPAFF 322 - /* Initialize map of CPUs with FPUs */ 323 - cpus_clear(mt_fpu_cpumask); 324 - #endif 325 - 326 - /* One of those TC's is the one booting, and not a secondary... */ 327 - printk("%i available secondary CPU TC(s)\n", i - 1); 328 - 329 - return i; 330 - } 331 - 332 - /* 333 - * Common setup before any secondaries are started 334 - * Make sure all CPUs are in a sensible state before we boot any of the 335 - * secondaries. 336 - * 337 - * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly 338 - * as possible across the available VPEs. 339 - */ 340 - 341 - static void smtc_tc_setup(int vpe, int tc, int cpu) 342 - { 343 - static int cp1contexts[MAX_SMTC_VPES]; 344 - 345 - /* 346 - * Make a local copy of the available FPU contexts in order 347 - * to keep track of TCs that can have one. 348 - */ 349 - if (tc == 1) 350 - { 351 - /* 352 - * FIXME: Multi-core SMTC hasn't been tested and the 353 - * maximum number of VPEs may change. 354 - */ 355 - cp1contexts[0] = smtc_nconf1[0] - 1; 356 - cp1contexts[1] = smtc_nconf1[1]; 357 - } 358 - 359 - settc(tc); 360 - write_tc_c0_tchalt(TCHALT_H); 361 - mips_ihb(); 362 - write_tc_c0_tcstatus((read_tc_c0_tcstatus() 363 - & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT)) 364 - | TCSTATUS_A); 365 - /* 366 - * TCContext gets an offset from the base of the IPIQ array 367 - * to be used in low-level code to detect the presence of 368 - * an active IPI queue. 369 - */ 370 - write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16); 371 - 372 - /* Bind TC to VPE. */ 373 - write_tc_c0_tcbind(vpe); 374 - 375 - /* In general, all TCs should have the same cpu_data indications. */ 376 - memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips)); 377 - 378 - /* Check to see if there is a FPU context available for this TC. */ 379 - if (!cp1contexts[vpe]) 380 - cpu_data[cpu].options &= ~MIPS_CPU_FPU; 381 - else 382 - cp1contexts[vpe]--; 383 - 384 - /* Store the TC and VPE into the cpu_data structure. */ 385 - cpu_data[cpu].vpe_id = vpe; 386 - cpu_data[cpu].tc_id = tc; 387 - 388 - /* FIXME: Multi-core SMTC hasn't been tested, but be prepared. */ 389 - cpu_data[cpu].core = (read_vpe_c0_ebase() >> 1) & 0xff; 390 - } 391 - 392 - /* 393 - * Tweak to get Count registers synced as closely as possible. The 394 - * value seems good for 34K-class cores. 395 - */ 396 - 397 - #define CP0_SKEW 8 398 - 399 - void smtc_prepare_cpus(int cpus) 400 - { 401 - int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu; 402 - unsigned long flags; 403 - unsigned long val; 404 - int nipi; 405 - struct smtc_ipi *pipi; 406 - 407 - /* disable interrupts so we can disable MT */ 408 - local_irq_save(flags); 409 - /* disable MT so we can configure */ 410 - dvpe(); 411 - dmt(); 412 - 413 - spin_lock_init(&freeIPIq.lock); 414 - 415 - /* 416 - * We probably don't have as many VPEs as we do SMP "CPUs", 417 - * but it's possible - and in any case we'll never use more! 418 - */ 419 - for (i=0; i<NR_CPUS; i++) { 420 - IPIQ[i].head = IPIQ[i].tail = NULL; 421 - spin_lock_init(&IPIQ[i].lock); 422 - IPIQ[i].depth = 0; 423 - IPIQ[i].resched_flag = 0; /* No reschedules queued initially */ 424 - } 425 - 426 - /* cpu_data index starts at zero */ 427 - cpu = 0; 428 - cpu_data[cpu].vpe_id = 0; 429 - cpu_data[cpu].tc_id = 0; 430 - cpu_data[cpu].core = (read_c0_ebase() >> 1) & 0xff; 431 - cpu++; 432 - 433 - /* Report on boot-time options */ 434 - mips_mt_set_cpuoptions(); 435 - if (vpelimit > 0) 436 - printk("Limit of %d VPEs set\n", vpelimit); 437 - if (tclimit > 0) 438 - printk("Limit of %d TCs set\n", tclimit); 439 - if (nostlb) { 440 - printk("Shared TLB Use Inhibited - UNSAFE for Multi-VPE Operation\n"); 441 - } 442 - if (asidmask) 443 - printk("ASID mask value override to 0x%x\n", asidmask); 444 - 445 - /* Temporary */ 446 - #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG 447 - if (hang_trig) 448 - printk("Logic Analyser Trigger on suspected TC hang\n"); 449 - #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ 450 - 451 - /* Put MVPE's into 'configuration state' */ 452 - write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC ); 453 - 454 - val = read_c0_mvpconf0(); 455 - nvpe = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; 456 - if (vpelimit > 0 && nvpe > vpelimit) 457 - nvpe = vpelimit; 458 - ntc = ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; 459 - if (ntc > NR_CPUS) 460 - ntc = NR_CPUS; 461 - if (tclimit > 0 && ntc > tclimit) 462 - ntc = tclimit; 463 - slop = ntc % nvpe; 464 - for (i = 0; i < nvpe; i++) { 465 - tcpervpe[i] = ntc / nvpe; 466 - if (slop) { 467 - if((slop - i) > 0) tcpervpe[i]++; 468 - } 469 - } 470 - /* Handle command line override for VPE0 */ 471 - if (vpe0limit > ntc) vpe0limit = ntc; 472 - if (vpe0limit > 0) { 473 - int slopslop; 474 - if (vpe0limit < tcpervpe[0]) { 475 - /* Reducing TC count - distribute to others */ 476 - slop = tcpervpe[0] - vpe0limit; 477 - slopslop = slop % (nvpe - 1); 478 - tcpervpe[0] = vpe0limit; 479 - for (i = 1; i < nvpe; i++) { 480 - tcpervpe[i] += slop / (nvpe - 1); 481 - if(slopslop && ((slopslop - (i - 1) > 0))) 482 - tcpervpe[i]++; 483 - } 484 - } else if (vpe0limit > tcpervpe[0]) { 485 - /* Increasing TC count - steal from others */ 486 - slop = vpe0limit - tcpervpe[0]; 487 - slopslop = slop % (nvpe - 1); 488 - tcpervpe[0] = vpe0limit; 489 - for (i = 1; i < nvpe; i++) { 490 - tcpervpe[i] -= slop / (nvpe - 1); 491 - if(slopslop && ((slopslop - (i - 1) > 0))) 492 - tcpervpe[i]--; 493 - } 494 - } 495 - } 496 - 497 - /* Set up shared TLB */ 498 - smtc_configure_tlb(); 499 - 500 - for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) { 501 - /* Get number of CP1 contexts for each VPE. */ 502 - if (tc == 0) 503 - { 504 - /* 505 - * Do not call settc() for TC0 or the FPU context 506 - * value will be incorrect. Besides, we know that 507 - * we are TC0 anyway. 508 - */ 509 - smtc_nconf1[0] = ((read_vpe_c0_vpeconf1() & 510 - VPECONF1_NCP1) >> VPECONF1_NCP1_SHIFT); 511 - if (nvpe == 2) 512 - { 513 - settc(1); 514 - smtc_nconf1[1] = ((read_vpe_c0_vpeconf1() & 515 - VPECONF1_NCP1) >> VPECONF1_NCP1_SHIFT); 516 - settc(0); 517 - } 518 - } 519 - if (tcpervpe[vpe] == 0) 520 - continue; 521 - if (vpe != 0) 522 - printk(", "); 523 - printk("VPE %d: TC", vpe); 524 - for (i = 0; i < tcpervpe[vpe]; i++) { 525 - /* 526 - * TC 0 is bound to VPE 0 at reset, 527 - * and is presumably executing this 528 - * code. Leave it alone! 529 - */ 530 - if (tc != 0) { 531 - smtc_tc_setup(vpe, tc, cpu); 532 - if (vpe != 0) { 533 - /* 534 - * Set MVP bit (possibly again). Do it 535 - * here to catch CPUs that have no TCs 536 - * bound to the VPE at reset. In that 537 - * case, a TC must be bound to the VPE 538 - * before we can set VPEControl[MVP] 539 - */ 540 - write_vpe_c0_vpeconf0( 541 - read_vpe_c0_vpeconf0() | 542 - VPECONF0_MVP); 543 - } 544 - cpu++; 545 - } 546 - printk(" %d", tc); 547 - tc++; 548 - } 549 - if (vpe != 0) { 550 - /* 551 - * Allow this VPE to control others. 552 - */ 553 - write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | 554 - VPECONF0_MVP); 555 - 556 - /* 557 - * Clear any stale software interrupts from VPE's Cause 558 - */ 559 - write_vpe_c0_cause(0); 560 - 561 - /* 562 - * Clear ERL/EXL of VPEs other than 0 563 - * and set restricted interrupt enable/mask. 564 - */ 565 - write_vpe_c0_status((read_vpe_c0_status() 566 - & ~(ST0_BEV | ST0_ERL | ST0_EXL | ST0_IM)) 567 - | (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7 568 - | ST0_IE)); 569 - /* 570 - * set config to be the same as vpe0, 571 - * particularly kseg0 coherency alg 572 - */ 573 - write_vpe_c0_config(read_c0_config()); 574 - /* Clear any pending timer interrupt */ 575 - write_vpe_c0_compare(0); 576 - /* Propagate Config7 */ 577 - write_vpe_c0_config7(read_c0_config7()); 578 - write_vpe_c0_count(read_c0_count() + CP0_SKEW); 579 - ehb(); 580 - } 581 - /* enable multi-threading within VPE */ 582 - write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE); 583 - /* enable the VPE */ 584 - write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); 585 - } 586 - 587 - /* 588 - * Pull any physically present but unused TCs out of circulation. 589 - */ 590 - while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) { 591 - set_cpu_possible(tc, false); 592 - set_cpu_present(tc, false); 593 - tc++; 594 - } 595 - 596 - /* release config state */ 597 - write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC ); 598 - 599 - printk("\n"); 600 - 601 - /* Set up coprocessor affinity CPU mask(s) */ 602 - 603 - #ifdef CONFIG_MIPS_MT_FPAFF 604 - for (tc = 0; tc < ntc; tc++) { 605 - if (cpu_data[tc].options & MIPS_CPU_FPU) 606 - cpu_set(tc, mt_fpu_cpumask); 607 - } 608 - #endif 609 - 610 - /* set up ipi interrupts... */ 611 - 612 - /* If we have multiple VPEs running, set up the cross-VPE interrupt */ 613 - 614 - setup_cross_vpe_interrupts(nvpe); 615 - 616 - /* Set up queue of free IPI "messages". */ 617 - nipi = NR_CPUS * IPIBUF_PER_CPU; 618 - if (ipibuffers > 0) 619 - nipi = ipibuffers; 620 - 621 - pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL); 622 - if (pipi == NULL) 623 - panic("kmalloc of IPI message buffers failed"); 624 - else 625 - printk("IPI buffer pool of %d buffers\n", nipi); 626 - for (i = 0; i < nipi; i++) { 627 - smtc_ipi_nq(&freeIPIq, pipi); 628 - pipi++; 629 - } 630 - 631 - /* Arm multithreading and enable other VPEs - but all TCs are Halted */ 632 - emt(EMT_ENABLE); 633 - evpe(EVPE_ENABLE); 634 - local_irq_restore(flags); 635 - /* Initialize SMTC /proc statistics/diagnostics */ 636 - init_smtc_stats(); 637 - } 638 - 639 - 640 - /* 641 - * Setup the PC, SP, and GP of a secondary processor and start it 642 - * running! 643 - * smp_bootstrap is the place to resume from 644 - * __KSTK_TOS(idle) is apparently the stack pointer 645 - * (unsigned long)idle->thread_info the gp 646 - * 647 - */ 648 - void smtc_boot_secondary(int cpu, struct task_struct *idle) 649 - { 650 - extern u32 kernelsp[NR_CPUS]; 651 - unsigned long flags; 652 - int mtflags; 653 - 654 - LOCK_MT_PRA(); 655 - if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { 656 - dvpe(); 657 - } 658 - settc(cpu_data[cpu].tc_id); 659 - 660 - /* pc */ 661 - write_tc_c0_tcrestart((unsigned long)&smp_bootstrap); 662 - 663 - /* stack pointer */ 664 - kernelsp[cpu] = __KSTK_TOS(idle); 665 - write_tc_gpr_sp(__KSTK_TOS(idle)); 666 - 667 - /* global pointer */ 668 - write_tc_gpr_gp((unsigned long)task_thread_info(idle)); 669 - 670 - smtc_status |= SMTC_MTC_ACTIVE; 671 - write_tc_c0_tchalt(0); 672 - if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { 673 - evpe(EVPE_ENABLE); 674 - } 675 - UNLOCK_MT_PRA(); 676 - } 677 - 678 - void smtc_init_secondary(void) 679 - { 680 - } 681 - 682 - void smtc_smp_finish(void) 683 - { 684 - int cpu = smp_processor_id(); 685 - 686 - /* 687 - * Lowest-numbered CPU per VPE starts a clock tick. 688 - * Like per_cpu_trap_init() hack, this assumes that 689 - * SMTC init code assigns TCs consdecutively and 690 - * in ascending order across available VPEs. 691 - */ 692 - if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id)) 693 - write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ); 694 - 695 - local_irq_enable(); 696 - 697 - printk("TC %d going on-line as CPU %d\n", 698 - cpu_data[smp_processor_id()].tc_id, smp_processor_id()); 699 - } 700 - 701 - void smtc_cpus_done(void) 702 - { 703 - } 704 - 705 - /* 706 - * Support for SMTC-optimized driver IRQ registration 707 - */ 708 - 709 - /* 710 - * SMTC Kernel needs to manipulate low-level CPU interrupt mask 711 - * in do_IRQ. These are passed in setup_irq_smtc() and stored 712 - * in this table. 713 - */ 714 - 715 - int setup_irq_smtc(unsigned int irq, struct irqaction * new, 716 - unsigned long hwmask) 717 - { 718 - #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG 719 - unsigned int vpe = current_cpu_data.vpe_id; 720 - 721 - vpemask[vpe][irq - MIPS_CPU_IRQ_BASE] = 1; 722 - #endif 723 - irq_hwmask[irq] = hwmask; 724 - 725 - return setup_irq(irq, new); 726 - } 727 - 728 - #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 729 - /* 730 - * Support for IRQ affinity to TCs 731 - */ 732 - 733 - void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity) 734 - { 735 - /* 736 - * If a "fast path" cache of quickly decodable affinity state 737 - * is maintained, this is where it gets done, on a call up 738 - * from the platform affinity code. 739 - */ 740 - } 741 - 742 - void smtc_forward_irq(struct irq_data *d) 743 - { 744 - unsigned int irq = d->irq; 745 - int target; 746 - 747 - /* 748 - * OK wise guy, now figure out how to get the IRQ 749 - * to be serviced on an authorized "CPU". 750 - * 751 - * Ideally, to handle the situation where an IRQ has multiple 752 - * eligible CPUS, we would maintain state per IRQ that would 753 - * allow a fair distribution of service requests. Since the 754 - * expected use model is any-or-only-one, for simplicity 755 - * and efficiency, we just pick the easiest one to find. 756 - */ 757 - 758 - target = cpumask_first(d->affinity); 759 - 760 - /* 761 - * We depend on the platform code to have correctly processed 762 - * IRQ affinity change requests to ensure that the IRQ affinity 763 - * mask has been purged of bits corresponding to nonexistent and 764 - * offline "CPUs", and to TCs bound to VPEs other than the VPE 765 - * connected to the physical interrupt input for the interrupt 766 - * in question. Otherwise we have a nasty problem with interrupt 767 - * mask management. This is best handled in non-performance-critical 768 - * platform IRQ affinity setting code, to minimize interrupt-time 769 - * checks. 770 - */ 771 - 772 - /* If no one is eligible, service locally */ 773 - if (target >= NR_CPUS) 774 - do_IRQ_no_affinity(irq); 775 - else 776 - smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq); 777 - } 778 - 779 - #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ 780 - 781 - /* 782 - * IPI model for SMTC is tricky, because interrupts aren't TC-specific. 783 - * Within a VPE one TC can interrupt another by different approaches. 784 - * The easiest to get right would probably be to make all TCs except 785 - * the target IXMT and set a software interrupt, but an IXMT-based 786 - * scheme requires that a handler must run before a new IPI could 787 - * be sent, which would break the "broadcast" loops in MIPS MT. 788 - * A more gonzo approach within a VPE is to halt the TC, extract 789 - * its Restart, Status, and a couple of GPRs, and program the Restart 790 - * address to emulate an interrupt. 791 - * 792 - * Within a VPE, one can be confident that the target TC isn't in 793 - * a critical EXL state when halted, since the write to the Halt 794 - * register could not have issued on the writing thread if the 795 - * halting thread had EXL set. So k0 and k1 of the target TC 796 - * can be used by the injection code. Across VPEs, one can't 797 - * be certain that the target TC isn't in a critical exception 798 - * state. So we try a two-step process of sending a software 799 - * interrupt to the target VPE, which either handles the event 800 - * itself (if it was the target) or injects the event within 801 - * the VPE. 802 - */ 803 - 804 - static void smtc_ipi_qdump(void) 805 - { 806 - int i; 807 - struct smtc_ipi *temp; 808 - 809 - for (i = 0; i < NR_CPUS ;i++) { 810 - pr_info("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n", 811 - i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail, 812 - IPIQ[i].depth); 813 - temp = IPIQ[i].head; 814 - 815 - while (temp != IPIQ[i].tail) { 816 - pr_debug("%d %d %d: ", temp->type, temp->dest, 817 - (int)temp->arg); 818 - #ifdef SMTC_IPI_DEBUG 819 - pr_debug("%u %lu\n", temp->sender, temp->stamp); 820 - #else 821 - pr_debug("\n"); 822 - #endif 823 - temp = temp->flink; 824 - } 825 - } 826 - } 827 - 828 - /* 829 - * The standard atomic.h primitives don't quite do what we want 830 - * here: We need an atomic add-and-return-previous-value (which 831 - * could be done with atomic_add_return and a decrement) and an 832 - * atomic set/zero-and-return-previous-value (which can't really 833 - * be done with the atomic.h primitives). And since this is 834 - * MIPS MT, we can assume that we have LL/SC. 835 - */ 836 - static inline int atomic_postincrement(atomic_t *v) 837 - { 838 - unsigned long result; 839 - 840 - unsigned long temp; 841 - 842 - __asm__ __volatile__( 843 - "1: ll %0, %2 \n" 844 - " addu %1, %0, 1 \n" 845 - " sc %1, %2 \n" 846 - " beqz %1, 1b \n" 847 - __WEAK_LLSC_MB 848 - : "=&r" (result), "=&r" (temp), "=m" (v->counter) 849 - : "m" (v->counter) 850 - : "memory"); 851 - 852 - return result; 853 - } 854 - 855 - void smtc_send_ipi(int cpu, int type, unsigned int action) 856 - { 857 - int tcstatus; 858 - struct smtc_ipi *pipi; 859 - unsigned long flags; 860 - int mtflags; 861 - unsigned long tcrestart; 862 - int set_resched_flag = (type == LINUX_SMP_IPI && 863 - action == SMP_RESCHEDULE_YOURSELF); 864 - 865 - if (cpu == smp_processor_id()) { 866 - printk("Cannot Send IPI to self!\n"); 867 - return; 868 - } 869 - if (set_resched_flag && IPIQ[cpu].resched_flag != 0) 870 - return; /* There is a reschedule queued already */ 871 - 872 - /* Set up a descriptor, to be delivered either promptly or queued */ 873 - pipi = smtc_ipi_dq(&freeIPIq); 874 - if (pipi == NULL) { 875 - bust_spinlocks(1); 876 - mips_mt_regdump(dvpe()); 877 - panic("IPI Msg. Buffers Depleted"); 878 - } 879 - pipi->type = type; 880 - pipi->arg = (void *)action; 881 - pipi->dest = cpu; 882 - if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { 883 - /* If not on same VPE, enqueue and send cross-VPE interrupt */ 884 - IPIQ[cpu].resched_flag |= set_resched_flag; 885 - smtc_ipi_nq(&IPIQ[cpu], pipi); 886 - LOCK_CORE_PRA(); 887 - settc(cpu_data[cpu].tc_id); 888 - write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1); 889 - UNLOCK_CORE_PRA(); 890 - } else { 891 - /* 892 - * Not sufficient to do a LOCK_MT_PRA (dmt) here, 893 - * since ASID shootdown on the other VPE may 894 - * collide with this operation. 895 - */ 896 - LOCK_CORE_PRA(); 897 - settc(cpu_data[cpu].tc_id); 898 - /* Halt the targeted TC */ 899 - write_tc_c0_tchalt(TCHALT_H); 900 - mips_ihb(); 901 - 902 - /* 903 - * Inspect TCStatus - if IXMT is set, we have to queue 904 - * a message. Otherwise, we set up the "interrupt" 905 - * of the other TC 906 - */ 907 - tcstatus = read_tc_c0_tcstatus(); 908 - 909 - if ((tcstatus & TCSTATUS_IXMT) != 0) { 910 - /* 911 - * If we're in the the irq-off version of the wait 912 - * loop, we need to force exit from the wait and 913 - * do a direct post of the IPI. 914 - */ 915 - if (cpu_wait == r4k_wait_irqoff) { 916 - tcrestart = read_tc_c0_tcrestart(); 917 - if (address_is_in_r4k_wait_irqoff(tcrestart)) { 918 - write_tc_c0_tcrestart(__pastwait); 919 - tcstatus &= ~TCSTATUS_IXMT; 920 - write_tc_c0_tcstatus(tcstatus); 921 - goto postdirect; 922 - } 923 - } 924 - /* 925 - * Otherwise we queue the message for the target TC 926 - * to pick up when he does a local_irq_restore() 927 - */ 928 - write_tc_c0_tchalt(0); 929 - UNLOCK_CORE_PRA(); 930 - IPIQ[cpu].resched_flag |= set_resched_flag; 931 - smtc_ipi_nq(&IPIQ[cpu], pipi); 932 - } else { 933 - postdirect: 934 - post_direct_ipi(cpu, pipi); 935 - write_tc_c0_tchalt(0); 936 - UNLOCK_CORE_PRA(); 937 - } 938 - } 939 - } 940 - 941 - /* 942 - * Send IPI message to Halted TC, TargTC/TargVPE already having been set 943 - */ 944 - static void post_direct_ipi(int cpu, struct smtc_ipi *pipi) 945 - { 946 - struct pt_regs *kstack; 947 - unsigned long tcstatus; 948 - unsigned long tcrestart; 949 - extern u32 kernelsp[NR_CPUS]; 950 - extern void __smtc_ipi_vector(void); 951 - //printk("%s: on %d for %d\n", __func__, smp_processor_id(), cpu); 952 - 953 - /* Extract Status, EPC from halted TC */ 954 - tcstatus = read_tc_c0_tcstatus(); 955 - tcrestart = read_tc_c0_tcrestart(); 956 - /* If TCRestart indicates a WAIT instruction, advance the PC */ 957 - if ((tcrestart & 0x80000000) 958 - && ((*(unsigned int *)tcrestart & 0xfe00003f) == 0x42000020)) { 959 - tcrestart += 4; 960 - } 961 - /* 962 - * Save on TC's future kernel stack 963 - * 964 - * CU bit of Status is indicator that TC was 965 - * already running on a kernel stack... 966 - */ 967 - if (tcstatus & ST0_CU0) { 968 - /* Note that this "- 1" is pointer arithmetic */ 969 - kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1; 970 - } else { 971 - kstack = ((struct pt_regs *)kernelsp[cpu]) - 1; 972 - } 973 - 974 - kstack->cp0_epc = (long)tcrestart; 975 - /* Save TCStatus */ 976 - kstack->cp0_tcstatus = tcstatus; 977 - /* Pass token of operation to be performed kernel stack pad area */ 978 - kstack->pad0[4] = (unsigned long)pipi; 979 - /* Pass address of function to be called likewise */ 980 - kstack->pad0[5] = (unsigned long)&ipi_decode; 981 - /* Set interrupt exempt and kernel mode */ 982 - tcstatus |= TCSTATUS_IXMT; 983 - tcstatus &= ~TCSTATUS_TKSU; 984 - write_tc_c0_tcstatus(tcstatus); 985 - ehb(); 986 - /* Set TC Restart address to be SMTC IPI vector */ 987 - write_tc_c0_tcrestart(__smtc_ipi_vector); 988 - } 989 - 990 - static void ipi_resched_interrupt(void) 991 - { 992 - scheduler_ipi(); 993 - } 994 - 995 - static void ipi_call_interrupt(void) 996 - { 997 - /* Invoke generic function invocation code in smp.c */ 998 - smp_call_function_interrupt(); 999 - } 1000 - 1001 - DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); 1002 - 1003 - static void __irq_entry smtc_clock_tick_interrupt(void) 1004 - { 1005 - unsigned int cpu = smp_processor_id(); 1006 - struct clock_event_device *cd; 1007 - int irq = MIPS_CPU_IRQ_BASE + 1; 1008 - 1009 - irq_enter(); 1010 - kstat_incr_irq_this_cpu(irq); 1011 - cd = &per_cpu(mips_clockevent_device, cpu); 1012 - cd->event_handler(cd); 1013 - irq_exit(); 1014 - } 1015 - 1016 - void ipi_decode(struct smtc_ipi *pipi) 1017 - { 1018 - void *arg_copy = pipi->arg; 1019 - int type_copy = pipi->type; 1020 - 1021 - smtc_ipi_nq(&freeIPIq, pipi); 1022 - 1023 - switch (type_copy) { 1024 - case SMTC_CLOCK_TICK: 1025 - smtc_clock_tick_interrupt(); 1026 - break; 1027 - 1028 - case LINUX_SMP_IPI: 1029 - switch ((int)arg_copy) { 1030 - case SMP_RESCHEDULE_YOURSELF: 1031 - ipi_resched_interrupt(); 1032 - break; 1033 - case SMP_CALL_FUNCTION: 1034 - ipi_call_interrupt(); 1035 - break; 1036 - default: 1037 - printk("Impossible SMTC IPI Argument %p\n", arg_copy); 1038 - break; 1039 - } 1040 - break; 1041 - #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 1042 - case IRQ_AFFINITY_IPI: 1043 - /* 1044 - * Accept a "forwarded" interrupt that was initially 1045 - * taken by a TC who doesn't have affinity for the IRQ. 1046 - */ 1047 - do_IRQ_no_affinity((int)arg_copy); 1048 - break; 1049 - #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ 1050 - default: 1051 - printk("Impossible SMTC IPI Type 0x%x\n", type_copy); 1052 - break; 1053 - } 1054 - } 1055 - 1056 - /* 1057 - * Similar to smtc_ipi_replay(), but invoked from context restore, 1058 - * so it reuses the current exception frame rather than set up a 1059 - * new one with self_ipi. 1060 - */ 1061 - 1062 - void deferred_smtc_ipi(void) 1063 - { 1064 - int cpu = smp_processor_id(); 1065 - 1066 - /* 1067 - * Test is not atomic, but much faster than a dequeue, 1068 - * and the vast majority of invocations will have a null queue. 1069 - * If irq_disabled when this was called, then any IPIs queued 1070 - * after we test last will be taken on the next irq_enable/restore. 1071 - * If interrupts were enabled, then any IPIs added after the 1072 - * last test will be taken directly. 1073 - */ 1074 - 1075 - while (IPIQ[cpu].head != NULL) { 1076 - struct smtc_ipi_q *q = &IPIQ[cpu]; 1077 - struct smtc_ipi *pipi; 1078 - unsigned long flags; 1079 - 1080 - /* 1081 - * It may be possible we'll come in with interrupts 1082 - * already enabled. 1083 - */ 1084 - local_irq_save(flags); 1085 - spin_lock(&q->lock); 1086 - pipi = __smtc_ipi_dq(q); 1087 - spin_unlock(&q->lock); 1088 - if (pipi != NULL) { 1089 - if (pipi->type == LINUX_SMP_IPI && 1090 - (int)pipi->arg == SMP_RESCHEDULE_YOURSELF) 1091 - IPIQ[cpu].resched_flag = 0; 1092 - ipi_decode(pipi); 1093 - } 1094 - /* 1095 - * The use of the __raw_local restore isn't 1096 - * as obviously necessary here as in smtc_ipi_replay(), 1097 - * but it's more efficient, given that we're already 1098 - * running down the IPI queue. 1099 - */ 1100 - __arch_local_irq_restore(flags); 1101 - } 1102 - } 1103 - 1104 - /* 1105 - * Cross-VPE interrupts in the SMTC prototype use "software interrupts" 1106 - * set via cross-VPE MTTR manipulation of the Cause register. It would be 1107 - * in some regards preferable to have external logic for "doorbell" hardware 1108 - * interrupts. 1109 - */ 1110 - 1111 - static int cpu_ipi_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_IRQ; 1112 - 1113 - static irqreturn_t ipi_interrupt(int irq, void *dev_idm) 1114 - { 1115 - int my_vpe = cpu_data[smp_processor_id()].vpe_id; 1116 - int my_tc = cpu_data[smp_processor_id()].tc_id; 1117 - int cpu; 1118 - struct smtc_ipi *pipi; 1119 - unsigned long tcstatus; 1120 - int sent; 1121 - unsigned long flags; 1122 - unsigned int mtflags; 1123 - unsigned int vpflags; 1124 - 1125 - /* 1126 - * So long as cross-VPE interrupts are done via 1127 - * MFTR/MTTR read-modify-writes of Cause, we need 1128 - * to stop other VPEs whenever the local VPE does 1129 - * anything similar. 1130 - */ 1131 - local_irq_save(flags); 1132 - vpflags = dvpe(); 1133 - clear_c0_cause(0x100 << MIPS_CPU_IPI_IRQ); 1134 - set_c0_status(0x100 << MIPS_CPU_IPI_IRQ); 1135 - irq_enable_hazard(); 1136 - evpe(vpflags); 1137 - local_irq_restore(flags); 1138 - 1139 - /* 1140 - * Cross-VPE Interrupt handler: Try to directly deliver IPIs 1141 - * queued for TCs on this VPE other than the current one. 1142 - * Return-from-interrupt should cause us to drain the queue 1143 - * for the current TC, so we ought not to have to do it explicitly here. 1144 - */ 1145 - 1146 - for_each_online_cpu(cpu) { 1147 - if (cpu_data[cpu].vpe_id != my_vpe) 1148 - continue; 1149 - 1150 - pipi = smtc_ipi_dq(&IPIQ[cpu]); 1151 - if (pipi != NULL) { 1152 - if (cpu_data[cpu].tc_id != my_tc) { 1153 - sent = 0; 1154 - LOCK_MT_PRA(); 1155 - settc(cpu_data[cpu].tc_id); 1156 - write_tc_c0_tchalt(TCHALT_H); 1157 - mips_ihb(); 1158 - tcstatus = read_tc_c0_tcstatus(); 1159 - if ((tcstatus & TCSTATUS_IXMT) == 0) { 1160 - post_direct_ipi(cpu, pipi); 1161 - sent = 1; 1162 - } 1163 - write_tc_c0_tchalt(0); 1164 - UNLOCK_MT_PRA(); 1165 - if (!sent) { 1166 - smtc_ipi_req(&IPIQ[cpu], pipi); 1167 - } 1168 - } else { 1169 - /* 1170 - * ipi_decode() should be called 1171 - * with interrupts off 1172 - */ 1173 - local_irq_save(flags); 1174 - if (pipi->type == LINUX_SMP_IPI && 1175 - (int)pipi->arg == SMP_RESCHEDULE_YOURSELF) 1176 - IPIQ[cpu].resched_flag = 0; 1177 - ipi_decode(pipi); 1178 - local_irq_restore(flags); 1179 - } 1180 - } 1181 - } 1182 - 1183 - return IRQ_HANDLED; 1184 - } 1185 - 1186 - static void ipi_irq_dispatch(void) 1187 - { 1188 - do_IRQ(cpu_ipi_irq); 1189 - } 1190 - 1191 - static struct irqaction irq_ipi = { 1192 - .handler = ipi_interrupt, 1193 - .flags = IRQF_PERCPU, 1194 - .name = "SMTC_IPI" 1195 - }; 1196 - 1197 - static void setup_cross_vpe_interrupts(unsigned int nvpe) 1198 - { 1199 - if (nvpe < 1) 1200 - return; 1201 - 1202 - if (!cpu_has_vint) 1203 - panic("SMTC Kernel requires Vectored Interrupt support"); 1204 - 1205 - set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch); 1206 - 1207 - setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ)); 1208 - 1209 - irq_set_handler(cpu_ipi_irq, handle_percpu_irq); 1210 - } 1211 - 1212 - /* 1213 - * SMTC-specific hacks invoked from elsewhere in the kernel. 1214 - */ 1215 - 1216 - /* 1217 - * smtc_ipi_replay is called from raw_local_irq_restore 1218 - */ 1219 - 1220 - void smtc_ipi_replay(void) 1221 - { 1222 - unsigned int cpu = smp_processor_id(); 1223 - 1224 - /* 1225 - * To the extent that we've ever turned interrupts off, 1226 - * we may have accumulated deferred IPIs. This is subtle. 1227 - * we should be OK: If we pick up something and dispatch 1228 - * it here, that's great. If we see nothing, but concurrent 1229 - * with this operation, another TC sends us an IPI, IXMT 1230 - * is clear, and we'll handle it as a real pseudo-interrupt 1231 - * and not a pseudo-pseudo interrupt. The important thing 1232 - * is to do the last check for queued message *after* the 1233 - * re-enabling of interrupts. 1234 - */ 1235 - while (IPIQ[cpu].head != NULL) { 1236 - struct smtc_ipi_q *q = &IPIQ[cpu]; 1237 - struct smtc_ipi *pipi; 1238 - unsigned long flags; 1239 - 1240 - /* 1241 - * It's just possible we'll come in with interrupts 1242 - * already enabled. 1243 - */ 1244 - local_irq_save(flags); 1245 - 1246 - spin_lock(&q->lock); 1247 - pipi = __smtc_ipi_dq(q); 1248 - spin_unlock(&q->lock); 1249 - /* 1250 - ** But use a raw restore here to avoid recursion. 1251 - */ 1252 - __arch_local_irq_restore(flags); 1253 - 1254 - if (pipi) { 1255 - self_ipi(pipi); 1256 - smtc_cpu_stats[cpu].selfipis++; 1257 - } 1258 - } 1259 - } 1260 - 1261 - EXPORT_SYMBOL(smtc_ipi_replay); 1262 - 1263 - void smtc_idle_loop_hook(void) 1264 - { 1265 - #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG 1266 - int im; 1267 - int flags; 1268 - int mtflags; 1269 - int bit; 1270 - int vpe; 1271 - int tc; 1272 - int hook_ntcs; 1273 - /* 1274 - * printk within DMT-protected regions can deadlock, 1275 - * so buffer diagnostic messages for later output. 1276 - */ 1277 - char *pdb_msg; 1278 - char id_ho_db_msg[768]; /* worst-case use should be less than 700 */ 1279 - 1280 - if (atomic_read(&idle_hook_initialized) == 0) { /* fast test */ 1281 - if (atomic_add_return(1, &idle_hook_initialized) == 1) { 1282 - int mvpconf0; 1283 - /* Tedious stuff to just do once */ 1284 - mvpconf0 = read_c0_mvpconf0(); 1285 - hook_ntcs = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; 1286 - if (hook_ntcs > NR_CPUS) 1287 - hook_ntcs = NR_CPUS; 1288 - for (tc = 0; tc < hook_ntcs; tc++) { 1289 - tcnoprog[tc] = 0; 1290 - clock_hang_reported[tc] = 0; 1291 - } 1292 - for (vpe = 0; vpe < 2; vpe++) 1293 - for (im = 0; im < 8; im++) 1294 - imstuckcount[vpe][im] = 0; 1295 - printk("Idle loop test hook initialized for %d TCs\n", hook_ntcs); 1296 - atomic_set(&idle_hook_initialized, 1000); 1297 - } else { 1298 - /* Someone else is initializing in parallel - let 'em finish */ 1299 - while (atomic_read(&idle_hook_initialized) < 1000) 1300 - ; 1301 - } 1302 - } 1303 - 1304 - /* Have we stupidly left IXMT set somewhere? */ 1305 - if (read_c0_tcstatus() & 0x400) { 1306 - write_c0_tcstatus(read_c0_tcstatus() & ~0x400); 1307 - ehb(); 1308 - printk("Dangling IXMT in cpu_idle()\n"); 1309 - } 1310 - 1311 - /* Have we stupidly left an IM bit turned off? */ 1312 - #define IM_LIMIT 2000 1313 - local_irq_save(flags); 1314 - mtflags = dmt(); 1315 - pdb_msg = &id_ho_db_msg[0]; 1316 - im = read_c0_status(); 1317 - vpe = current_cpu_data.vpe_id; 1318 - for (bit = 0; bit < 8; bit++) { 1319 - /* 1320 - * In current prototype, I/O interrupts 1321 - * are masked for VPE > 0 1322 - */ 1323 - if (vpemask[vpe][bit]) { 1324 - if (!(im & (0x100 << bit))) 1325 - imstuckcount[vpe][bit]++; 1326 - else 1327 - imstuckcount[vpe][bit] = 0; 1328 - if (imstuckcount[vpe][bit] > IM_LIMIT) { 1329 - set_c0_status(0x100 << bit); 1330 - ehb(); 1331 - imstuckcount[vpe][bit] = 0; 1332 - pdb_msg += sprintf(pdb_msg, 1333 - "Dangling IM %d fixed for VPE %d\n", bit, 1334 - vpe); 1335 - } 1336 - } 1337 - } 1338 - 1339 - emt(mtflags); 1340 - local_irq_restore(flags); 1341 - if (pdb_msg != &id_ho_db_msg[0]) 1342 - printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg); 1343 - #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ 1344 - 1345 - smtc_ipi_replay(); 1346 - } 1347 - 1348 - void smtc_soft_dump(void) 1349 - { 1350 - int i; 1351 - 1352 - printk("Counter Interrupts taken per CPU (TC)\n"); 1353 - for (i=0; i < NR_CPUS; i++) { 1354 - printk("%d: %ld\n", i, smtc_cpu_stats[i].timerints); 1355 - } 1356 - printk("Self-IPI invocations:\n"); 1357 - for (i=0; i < NR_CPUS; i++) { 1358 - printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis); 1359 - } 1360 - smtc_ipi_qdump(); 1361 - printk("%d Recoveries of \"stolen\" FPU\n", 1362 - atomic_read(&smtc_fpu_recoveries)); 1363 - } 1364 - 1365 - 1366 - /* 1367 - * TLB management routines special to SMTC 1368 - */ 1369 - 1370 - void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) 1371 - { 1372 - unsigned long flags, mtflags, tcstat, prevhalt, asid; 1373 - int tlb, i; 1374 - 1375 - /* 1376 - * It would be nice to be able to use a spinlock here, 1377 - * but this is invoked from within TLB flush routines 1378 - * that protect themselves with DVPE, so if a lock is 1379 - * held by another TC, it'll never be freed. 1380 - * 1381 - * DVPE/DMT must not be done with interrupts enabled, 1382 - * so even so most callers will already have disabled 1383 - * them, let's be really careful... 1384 - */ 1385 - 1386 - local_irq_save(flags); 1387 - if (smtc_status & SMTC_TLB_SHARED) { 1388 - mtflags = dvpe(); 1389 - tlb = 0; 1390 - } else { 1391 - mtflags = dmt(); 1392 - tlb = cpu_data[cpu].vpe_id; 1393 - } 1394 - asid = asid_cache(cpu); 1395 - 1396 - do { 1397 - if (!((asid += ASID_INC) & ASID_MASK) ) { 1398 - if (cpu_has_vtag_icache) 1399 - flush_icache_all(); 1400 - /* Traverse all online CPUs (hack requires contiguous range) */ 1401 - for_each_online_cpu(i) { 1402 - /* 1403 - * We don't need to worry about our own CPU, nor those of 1404 - * CPUs who don't share our TLB. 1405 - */ 1406 - if ((i != smp_processor_id()) && 1407 - ((smtc_status & SMTC_TLB_SHARED) || 1408 - (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))) { 1409 - settc(cpu_data[i].tc_id); 1410 - prevhalt = read_tc_c0_tchalt() & TCHALT_H; 1411 - if (!prevhalt) { 1412 - write_tc_c0_tchalt(TCHALT_H); 1413 - mips_ihb(); 1414 - } 1415 - tcstat = read_tc_c0_tcstatus(); 1416 - smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i); 1417 - if (!prevhalt) 1418 - write_tc_c0_tchalt(0); 1419 - } 1420 - } 1421 - if (!asid) /* fix version if needed */ 1422 - asid = ASID_FIRST_VERSION; 1423 - local_flush_tlb_all(); /* start new asid cycle */ 1424 - } 1425 - } while (smtc_live_asid[tlb][(asid & ASID_MASK)]); 1426 - 1427 - /* 1428 - * SMTC shares the TLB within VPEs and possibly across all VPEs. 1429 - */ 1430 - for_each_online_cpu(i) { 1431 - if ((smtc_status & SMTC_TLB_SHARED) || 1432 - (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id)) 1433 - cpu_context(i, mm) = asid_cache(i) = asid; 1434 - } 1435 - 1436 - if (smtc_status & SMTC_TLB_SHARED) 1437 - evpe(mtflags); 1438 - else 1439 - emt(mtflags); 1440 - local_irq_restore(flags); 1441 - } 1442 - 1443 - /* 1444 - * Invoked from macros defined in mmu_context.h 1445 - * which must already have disabled interrupts 1446 - * and done a DVPE or DMT as appropriate. 1447 - */ 1448 - 1449 - void smtc_flush_tlb_asid(unsigned long asid) 1450 - { 1451 - int entry; 1452 - unsigned long ehi; 1453 - 1454 - entry = read_c0_wired(); 1455 - 1456 - /* Traverse all non-wired entries */ 1457 - while (entry < current_cpu_data.tlbsize) { 1458 - write_c0_index(entry); 1459 - ehb(); 1460 - tlb_read(); 1461 - ehb(); 1462 - ehi = read_c0_entryhi(); 1463 - if ((ehi & ASID_MASK) == asid) { 1464 - /* 1465 - * Invalidate only entries with specified ASID, 1466 - * makiing sure all entries differ. 1467 - */ 1468 - write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1))); 1469 - write_c0_entrylo0(0); 1470 - write_c0_entrylo1(0); 1471 - mtc0_tlbw_hazard(); 1472 - tlb_write_indexed(); 1473 - } 1474 - entry++; 1475 - } 1476 - write_c0_index(PARKED_INDEX); 1477 - tlbw_use_hazard(); 1478 - } 1479 - 1480 - /* 1481 - * Support for single-threading cache flush operations. 1482 - */ 1483 - 1484 - static int halt_state_save[NR_CPUS]; 1485 - 1486 - /* 1487 - * To really, really be sure that nothing is being done 1488 - * by other TCs, halt them all. This code assumes that 1489 - * a DVPE has already been done, so while their Halted 1490 - * state is theoretically architecturally unstable, in 1491 - * practice, it's not going to change while we're looking 1492 - * at it. 1493 - */ 1494 - 1495 - void smtc_cflush_lockdown(void) 1496 - { 1497 - int cpu; 1498 - 1499 - for_each_online_cpu(cpu) { 1500 - if (cpu != smp_processor_id()) { 1501 - settc(cpu_data[cpu].tc_id); 1502 - halt_state_save[cpu] = read_tc_c0_tchalt(); 1503 - write_tc_c0_tchalt(TCHALT_H); 1504 - } 1505 - } 1506 - mips_ihb(); 1507 - } 1508 - 1509 - /* It would be cheating to change the cpu_online states during a flush! */ 1510 - 1511 - void smtc_cflush_release(void) 1512 - { 1513 - int cpu; 1514 - 1515 - /* 1516 - * Start with a hazard barrier to ensure 1517 - * that all CACHE ops have played through. 1518 - */ 1519 - mips_ihb(); 1520 - 1521 - for_each_online_cpu(cpu) { 1522 - if (cpu != smp_processor_id()) { 1523 - settc(cpu_data[cpu].tc_id); 1524 - write_tc_c0_tchalt(halt_state_save[cpu]); 1525 - } 1526 - } 1527 - mips_ihb(); 1528 - }
-18
arch/mips/kernel/sync-r4k.c
··· 6 6 * not have done anything significant (but they may have had interrupts 7 7 * enabled briefly - prom_smp_finish() should not be responsible for enabling 8 8 * interrupts...) 9 - * 10 - * FIXME: broken for SMTC 11 9 */ 12 10 13 11 #include <linux/kernel.h> ··· 30 32 int i; 31 33 unsigned long flags; 32 34 unsigned int initcount; 33 - 34 - #ifdef CONFIG_MIPS_MT_SMTC 35 - /* 36 - * SMTC needs to synchronise per VPE, not per CPU 37 - * ignore for now 38 - */ 39 - return; 40 - #endif 41 35 42 36 printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu); 43 37 ··· 99 109 { 100 110 int i; 101 111 unsigned int initcount; 102 - 103 - #ifdef CONFIG_MIPS_MT_SMTC 104 - /* 105 - * SMTC needs to synchronise per VPE, not per CPU 106 - * ignore for now 107 - */ 108 - return; 109 - #endif 110 112 111 113 /* 112 114 * Not every cpu is online at the time this gets called,
-1
arch/mips/kernel/time.c
··· 26 26 #include <asm/cpu-features.h> 27 27 #include <asm/cpu-type.h> 28 28 #include <asm/div64.h> 29 - #include <asm/smtc_ipi.h> 30 29 #include <asm/time.h> 31 30 32 31 /*
-63
arch/mips/kernel/traps.c
··· 370 370 { 371 371 static int die_counter; 372 372 int sig = SIGSEGV; 373 - #ifdef CONFIG_MIPS_MT_SMTC 374 - unsigned long dvpret; 375 - #endif /* CONFIG_MIPS_MT_SMTC */ 376 373 377 374 oops_enter(); 378 375 ··· 379 382 380 383 console_verbose(); 381 384 raw_spin_lock_irq(&die_lock); 382 - #ifdef CONFIG_MIPS_MT_SMTC 383 - dvpret = dvpe(); 384 - #endif /* CONFIG_MIPS_MT_SMTC */ 385 385 bust_spinlocks(1); 386 - #ifdef CONFIG_MIPS_MT_SMTC 387 - mips_mt_regdump(dvpret); 388 - #endif /* CONFIG_MIPS_MT_SMTC */ 389 386 390 387 printk("%s[#%d]:\n", str, ++die_counter); 391 388 show_registers(regs); ··· 1750 1759 extern char rollback_except_vec_vi; 1751 1760 char *vec_start = using_rollback_handler() ? 1752 1761 &rollback_except_vec_vi : &except_vec_vi; 1753 - #ifdef CONFIG_MIPS_MT_SMTC 1754 - /* 1755 - * We need to provide the SMTC vectored interrupt handler 1756 - * not only with the address of the handler, but with the 1757 - * Status.IM bit to be masked before going there. 1758 - */ 1759 - extern char except_vec_vi_mori; 1760 - #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) 1761 - const int mori_offset = &except_vec_vi_mori - vec_start + 2; 1762 - #else 1763 - const int mori_offset = &except_vec_vi_mori - vec_start; 1764 - #endif 1765 - #endif /* CONFIG_MIPS_MT_SMTC */ 1766 1762 #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) 1767 1763 const int lui_offset = &except_vec_vi_lui - vec_start + 2; 1768 1764 const int ori_offset = &except_vec_vi_ori - vec_start + 2; ··· 1773 1795 #else 1774 1796 handler_len); 1775 1797 #endif 1776 - #ifdef CONFIG_MIPS_MT_SMTC 1777 - BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */ 1778 - 1779 - h = (u16 *)(b + mori_offset); 1780 - *h = (0x100 << n); 1781 - #endif /* CONFIG_MIPS_MT_SMTC */ 1782 1798 h = (u16 *)(b + lui_offset); 1783 1799 *h = (handler >> 16) & 0xffff; 1784 1800 h = (u16 *)(b + ori_offset); ··· 1842 1870 unsigned int cpu = smp_processor_id(); 1843 1871 unsigned int status_set = ST0_CU0; 1844 1872 unsigned int hwrena = cpu_hwrena_impl_bits; 1845 - #ifdef CONFIG_MIPS_MT_SMTC 1846 - int secondaryTC = 0; 1847 - int bootTC = (cpu == 0); 1848 - 1849 - /* 1850 - * Only do per_cpu_trap_init() for first TC of Each VPE. 1851 - * Note that this hack assumes that the SMTC init code 1852 - * assigns TCs consecutively and in ascending order. 1853 - */ 1854 - 1855 - if (((read_c0_tcbind() & TCBIND_CURTC) != 0) && 1856 - ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id)) 1857 - secondaryTC = 1; 1858 - #endif /* CONFIG_MIPS_MT_SMTC */ 1859 1873 1860 1874 /* 1861 1875 * Disable coprocessors and select 32-bit or 64-bit addressing ··· 1868 1910 1869 1911 if (hwrena) 1870 1912 write_c0_hwrena(hwrena); 1871 - 1872 - #ifdef CONFIG_MIPS_MT_SMTC 1873 - if (!secondaryTC) { 1874 - #endif /* CONFIG_MIPS_MT_SMTC */ 1875 1913 1876 1914 if (cpu_has_veic || cpu_has_vint) { 1877 1915 unsigned long sr = set_c0_status(ST0_BEV); ··· 1903 1949 cp0_perfcount_irq = -1; 1904 1950 } 1905 1951 1906 - #ifdef CONFIG_MIPS_MT_SMTC 1907 - } 1908 - #endif /* CONFIG_MIPS_MT_SMTC */ 1909 - 1910 1952 if (!cpu_data[cpu].asid_cache) 1911 1953 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; 1912 1954 ··· 1911 1961 BUG_ON(current->mm); 1912 1962 enter_lazy_tlb(&init_mm, current); 1913 1963 1914 - #ifdef CONFIG_MIPS_MT_SMTC 1915 - if (bootTC) { 1916 - #endif /* CONFIG_MIPS_MT_SMTC */ 1917 1964 /* Boot CPU's cache setup in setup_arch(). */ 1918 1965 if (!is_boot_cpu) 1919 1966 cpu_cache_init(); 1920 1967 tlb_init(); 1921 - #ifdef CONFIG_MIPS_MT_SMTC 1922 - } else if (!secondaryTC) { 1923 - /* 1924 - * First TC in non-boot VPE must do subset of tlb_init() 1925 - * for MMU countrol registers. 1926 - */ 1927 - write_c0_pagemask(PM_DEFAULT_MASK); 1928 - write_c0_wired(0); 1929 - } 1930 - #endif /* CONFIG_MIPS_MT_SMTC */ 1931 1968 TLBMISS_HANDLER_SETUP(); 1932 1969 } 1933 1970
+7 -9
arch/mips/kernel/vpe-mt.c
··· 127 127 clear_c0_mvpcontrol(MVPCONTROL_VPC); 128 128 129 129 /* 130 - * SMTC/SMVP kernels manage VPE enable independently, 131 - * but uniprocessor kernels need to turn it on, even 132 - * if that wasn't the pre-dvpe() state. 130 + * SMVP kernels manage VPE enable independently, but uniprocessor 131 + * kernels need to turn it on, even if that wasn't the pre-dvpe() state. 133 132 */ 134 133 #ifdef CONFIG_SMP 135 134 evpe(vpeflags); ··· 453 454 454 455 settc(tc); 455 456 456 - /* Any TC that is bound to VPE0 gets left as is - in 457 - * case we are running SMTC on VPE0. A TC that is bound 458 - * to any other VPE gets bound to VPE0, ideally I'd like 459 - * to make it homeless but it doesn't appear to let me 460 - * bind a TC to a non-existent VPE. Which is perfectly 461 - * reasonable. 457 + /* 458 + * A TC that is bound to any other VPE gets bound to 459 + * VPE0, ideally I'd like to make it homeless but it 460 + * doesn't appear to let me bind a TC to a non-existent 461 + * VPE. Which is perfectly reasonable. 462 462 * 463 463 * The (un)bound state is visible to an EJTAG probe so 464 464 * may notify GDB...
+2 -2
arch/mips/lantiq/irq.c
··· 61 61 /* we have a cascade of 8 irqs */ 62 62 #define MIPS_CPU_IRQ_CASCADE 8 63 63 64 - #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) 64 + #ifdef CONFIG_MIPS_MT_SMP 65 65 int gic_present; 66 66 #endif 67 67 ··· 440 440 arch_init_ipiirq(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ, &irq_call); 441 441 #endif 442 442 443 - #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) 443 + #ifndef CONFIG_MIPS_MT_SMP 444 444 set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | 445 445 IE_IRQ3 | IE_IRQ4 | IE_IRQ5); 446 446 #else
+6 -40
arch/mips/lib/mips-atomic.c
··· 15 15 #include <linux/export.h> 16 16 #include <linux/stringify.h> 17 17 18 - #if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) 18 + #ifndef CONFIG_CPU_MIPSR2 19 19 20 20 /* 21 21 * For cli() we have to insert nops to make sure that the new value ··· 42 42 __asm__ __volatile__( 43 43 " .set push \n" 44 44 " .set noat \n" 45 - #ifdef CONFIG_MIPS_MT_SMTC 46 - " mfc0 $1, $2, 1 \n" 47 - " ori $1, 0x400 \n" 48 - " .set noreorder \n" 49 - " mtc0 $1, $2, 1 \n" 50 - #elif defined(CONFIG_CPU_MIPSR2) 45 + #if defined(CONFIG_CPU_MIPSR2) 51 46 /* see irqflags.h for inline function */ 52 47 #else 53 48 " mfc0 $1,$12 \n" ··· 72 77 " .set push \n" 73 78 " .set reorder \n" 74 79 " .set noat \n" 75 - #ifdef CONFIG_MIPS_MT_SMTC 76 - " mfc0 %[flags], $2, 1 \n" 77 - " ori $1, %[flags], 0x400 \n" 78 - " .set noreorder \n" 79 - " mtc0 $1, $2, 1 \n" 80 - " andi %[flags], %[flags], 0x400 \n" 81 - #elif defined(CONFIG_CPU_MIPSR2) 80 + #if defined(CONFIG_CPU_MIPSR2) 82 81 /* see irqflags.h for inline function */ 83 82 #else 84 83 " mfc0 %[flags], $12 \n" ··· 97 108 { 98 109 unsigned long __tmp1; 99 110 100 - #ifdef CONFIG_MIPS_MT_SMTC 101 - /* 102 - * SMTC kernel needs to do a software replay of queued 103 - * IPIs, at the cost of branch and call overhead on each 104 - * local_irq_restore() 105 - */ 106 - if (unlikely(!(flags & 0x0400))) 107 - smtc_ipi_replay(); 108 - #endif 109 111 preempt_disable(); 110 112 111 113 __asm__ __volatile__( 112 114 " .set push \n" 113 115 " .set noreorder \n" 114 116 " .set noat \n" 115 - #ifdef CONFIG_MIPS_MT_SMTC 116 - " mfc0 $1, $2, 1 \n" 117 - " andi %[flags], 0x400 \n" 118 - " ori $1, 0x400 \n" 119 - " xori $1, 0x400 \n" 120 - " or %[flags], $1 \n" 121 - " mtc0 %[flags], $2, 1 \n" 122 - #elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) 117 + #if defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) 123 118 /* see irqflags.h for inline function */ 124 119 #elif defined(CONFIG_CPU_MIPSR2) 125 120 /* see irqflags.h for inline function */ ··· 136 163 " .set push \n" 137 164 " .set noreorder \n" 138 165 " .set noat \n" 139 - #ifdef CONFIG_MIPS_MT_SMTC 140 - " mfc0 $1, $2, 1 \n" 141 - " andi %[flags], 0x400 \n" 142 - " ori $1, 0x400 \n" 143 - " xori $1, 0x400 \n" 144 - " or %[flags], $1 \n" 145 - " mtc0 %[flags], $2, 1 \n" 146 - #elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) 166 + #if defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) 147 167 /* see irqflags.h for inline function */ 148 168 #elif defined(CONFIG_CPU_MIPSR2) 149 169 /* see irqflags.h for inline function */ ··· 158 192 } 159 193 EXPORT_SYMBOL(__arch_local_irq_restore); 160 194 161 - #endif /* !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) */ 195 + #endif /* !CONFIG_CPU_MIPSR2 */
+2 -2
arch/mips/mm/c-r4k.c
··· 50 50 { 51 51 preempt_disable(); 52 52 53 - #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) 53 + #ifndef CONFIG_MIPS_MT_SMP 54 54 smp_call_function(func, info, 1); 55 55 #endif 56 56 func(info); ··· 427 427 428 428 static inline int has_valid_asid(const struct mm_struct *mm) 429 429 { 430 - #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) 430 + #ifdef CONFIG_MIPS_MT_SMP 431 431 int i; 432 432 433 433 for_each_online_cpu(i)
+5 -63
arch/mips/mm/init.c
··· 44 44 #include <asm/tlb.h> 45 45 #include <asm/fixmap.h> 46 46 47 - /* Atomicity and interruptability */ 48 - #ifdef CONFIG_MIPS_MT_SMTC 49 - 50 - #include <asm/mipsmtregs.h> 51 - 52 - #define ENTER_CRITICAL(flags) \ 53 - { \ 54 - unsigned int mvpflags; \ 55 - local_irq_save(flags);\ 56 - mvpflags = dvpe() 57 - #define EXIT_CRITICAL(flags) \ 58 - evpe(mvpflags); \ 59 - local_irq_restore(flags); \ 60 - } 61 - #else 62 - 63 - #define ENTER_CRITICAL(flags) local_irq_save(flags) 64 - #define EXIT_CRITICAL(flags) local_irq_restore(flags) 65 - 66 - #endif /* CONFIG_MIPS_MT_SMTC */ 67 - 68 47 /* 69 48 * We have up to 8 empty zeroed pages so we can map one of the right colour 70 49 * when needed. This is necessary only on R4000 / R4400 SC and MC versions ··· 79 100 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK; 80 101 } 81 102 82 - #ifdef CONFIG_MIPS_MT_SMTC 83 - static pte_t *kmap_coherent_pte; 84 - static void __init kmap_coherent_init(void) 85 - { 86 - unsigned long vaddr; 87 - 88 - /* cache the first coherent kmap pte */ 89 - vaddr = __fix_to_virt(FIX_CMAP_BEGIN); 90 - kmap_coherent_pte = kmap_get_fixmap_pte(vaddr); 91 - } 92 - #else 93 - static inline void kmap_coherent_init(void) {} 94 - #endif 95 - 96 103 void *kmap_coherent(struct page *page, unsigned long addr) 97 104 { 98 105 enum fixed_addresses idx; ··· 91 126 92 127 pagefault_disable(); 93 128 idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1); 94 - #ifdef CONFIG_MIPS_MT_SMTC 95 - idx += FIX_N_COLOURS * smp_processor_id() + 96 - (in_interrupt() ? (FIX_N_COLOURS * NR_CPUS) : 0); 97 - #else 98 129 idx += in_interrupt() ? FIX_N_COLOURS : 0; 99 - #endif 100 130 vaddr = __fix_to_virt(FIX_CMAP_END - idx); 101 131 pte = mk_pte(page, PAGE_KERNEL); 102 132 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) ··· 100 140 entrylo = pte_to_entrylo(pte_val(pte)); 101 141 #endif 102 142 103 - ENTER_CRITICAL(flags); 143 + local_irq_save(flags); 104 144 old_ctx = read_c0_entryhi(); 105 145 write_c0_entryhi(vaddr & (PAGE_MASK << 1)); 106 146 write_c0_entrylo0(entrylo); 107 147 write_c0_entrylo1(entrylo); 108 - #ifdef CONFIG_MIPS_MT_SMTC 109 - set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte); 110 - /* preload TLB instead of local_flush_tlb_one() */ 111 - mtc0_tlbw_hazard(); 112 - tlb_probe(); 113 - tlb_probe_hazard(); 114 - tlbidx = read_c0_index(); 115 - mtc0_tlbw_hazard(); 116 - if (tlbidx < 0) 117 - tlb_write_random(); 118 - else 119 - tlb_write_indexed(); 120 - #else 121 148 tlbidx = read_c0_wired(); 122 149 write_c0_wired(tlbidx + 1); 123 150 write_c0_index(tlbidx); 124 151 mtc0_tlbw_hazard(); 125 152 tlb_write_indexed(); 126 - #endif 127 153 tlbw_use_hazard(); 128 154 write_c0_entryhi(old_ctx); 129 - EXIT_CRITICAL(flags); 155 + local_irq_restore(flags); 130 156 131 157 return (void*) vaddr; 132 158 } 133 159 134 160 void kunmap_coherent(void) 135 161 { 136 - #ifndef CONFIG_MIPS_MT_SMTC 137 162 unsigned int wired; 138 163 unsigned long flags, old_ctx; 139 164 140 - ENTER_CRITICAL(flags); 165 + local_irq_save(flags); 141 166 old_ctx = read_c0_entryhi(); 142 167 wired = read_c0_wired() - 1; 143 168 write_c0_wired(wired); ··· 134 189 tlb_write_indexed(); 135 190 tlbw_use_hazard(); 136 191 write_c0_entryhi(old_ctx); 137 - EXIT_CRITICAL(flags); 138 - #endif 192 + local_irq_restore(flags); 139 193 pagefault_enable(); 140 194 } 141 195 ··· 200 256 void __init fixrange_init(unsigned long start, unsigned long end, 201 257 pgd_t *pgd_base) 202 258 { 203 - #if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC) 259 + #ifdef CONFIG_HIGHMEM 204 260 pgd_t *pgd; 205 261 pud_t *pud; 206 262 pmd_t *pmd; ··· 271 327 #ifdef CONFIG_HIGHMEM 272 328 kmap_init(); 273 329 #endif 274 - kmap_coherent_init(); 275 - 276 330 #ifdef CONFIG_ZONE_DMA 277 331 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; 278 332 #endif
+16 -38
arch/mips/mm/tlb-r4k.c
··· 25 25 26 26 extern void build_tlb_refill_handler(void); 27 27 28 - /* Atomicity and interruptability */ 29 - #ifdef CONFIG_MIPS_MT_SMTC 30 - 31 - #include <asm/smtc.h> 32 - #include <asm/mipsmtregs.h> 33 - 34 - #define ENTER_CRITICAL(flags) \ 35 - { \ 36 - unsigned int mvpflags; \ 37 - local_irq_save(flags);\ 38 - mvpflags = dvpe() 39 - #define EXIT_CRITICAL(flags) \ 40 - evpe(mvpflags); \ 41 - local_irq_restore(flags); \ 42 - } 43 - #else 44 - 45 - #define ENTER_CRITICAL(flags) local_irq_save(flags) 46 - #define EXIT_CRITICAL(flags) local_irq_restore(flags) 47 - 48 - #endif /* CONFIG_MIPS_MT_SMTC */ 49 - 50 28 /* 51 29 * LOONGSON2/3 has a 4 entry itlb which is a subset of dtlb, 52 30 * unfortunately, itlb is not totally transparent to software. ··· 53 75 unsigned long old_ctx; 54 76 int entry, ftlbhighset; 55 77 56 - ENTER_CRITICAL(flags); 78 + local_irq_save(flags); 57 79 /* Save old context and create impossible VPN2 value */ 58 80 old_ctx = read_c0_entryhi(); 59 81 write_c0_entrylo0(0); ··· 90 112 tlbw_use_hazard(); 91 113 write_c0_entryhi(old_ctx); 92 114 flush_itlb(); 93 - EXIT_CRITICAL(flags); 115 + local_irq_restore(flags); 94 116 } 95 117 EXPORT_SYMBOL(local_flush_tlb_all); 96 118 ··· 120 142 if (cpu_context(cpu, mm) != 0) { 121 143 unsigned long size, flags; 122 144 123 - ENTER_CRITICAL(flags); 145 + local_irq_save(flags); 124 146 start = round_down(start, PAGE_SIZE << 1); 125 147 end = round_up(end, PAGE_SIZE << 1); 126 148 size = (end - start) >> (PAGE_SHIFT + 1); ··· 154 176 drop_mmu_context(mm, cpu); 155 177 } 156 178 flush_itlb(); 157 - EXIT_CRITICAL(flags); 179 + local_irq_restore(flags); 158 180 } 159 181 } 160 182 ··· 162 184 { 163 185 unsigned long size, flags; 164 186 165 - ENTER_CRITICAL(flags); 187 + local_irq_save(flags); 166 188 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 167 189 size = (size + 1) >> 1; 168 190 if (size <= (current_cpu_data.tlbsizeftlbsets ? ··· 198 220 local_flush_tlb_all(); 199 221 } 200 222 flush_itlb(); 201 - EXIT_CRITICAL(flags); 223 + local_irq_restore(flags); 202 224 } 203 225 204 226 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) ··· 211 233 212 234 newpid = cpu_asid(cpu, vma->vm_mm); 213 235 page &= (PAGE_MASK << 1); 214 - ENTER_CRITICAL(flags); 236 + local_irq_save(flags); 215 237 oldpid = read_c0_entryhi(); 216 238 write_c0_entryhi(page | newpid); 217 239 mtc0_tlbw_hazard(); ··· 231 253 finish: 232 254 write_c0_entryhi(oldpid); 233 255 flush_itlb_vm(vma); 234 - EXIT_CRITICAL(flags); 256 + local_irq_restore(flags); 235 257 } 236 258 } 237 259 ··· 244 266 unsigned long flags; 245 267 int oldpid, idx; 246 268 247 - ENTER_CRITICAL(flags); 269 + local_irq_save(flags); 248 270 oldpid = read_c0_entryhi(); 249 271 page &= (PAGE_MASK << 1); 250 272 write_c0_entryhi(page); ··· 263 285 } 264 286 write_c0_entryhi(oldpid); 265 287 flush_itlb(); 266 - EXIT_CRITICAL(flags); 288 + local_irq_restore(flags); 267 289 } 268 290 269 291 /* ··· 286 308 if (current->active_mm != vma->vm_mm) 287 309 return; 288 310 289 - ENTER_CRITICAL(flags); 311 + local_irq_save(flags); 290 312 291 313 pid = read_c0_entryhi() & ASID_MASK; 292 314 address &= (PAGE_MASK << 1); ··· 336 358 } 337 359 tlbw_use_hazard(); 338 360 flush_itlb_vm(vma); 339 - EXIT_CRITICAL(flags); 361 + local_irq_restore(flags); 340 362 } 341 363 342 364 void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, ··· 347 369 unsigned long old_pagemask; 348 370 unsigned long old_ctx; 349 371 350 - ENTER_CRITICAL(flags); 372 + local_irq_save(flags); 351 373 /* Save old context and create impossible VPN2 value */ 352 374 old_ctx = read_c0_entryhi(); 353 375 old_pagemask = read_c0_pagemask(); ··· 367 389 tlbw_use_hazard(); /* What is the hazard here? */ 368 390 write_c0_pagemask(old_pagemask); 369 391 local_flush_tlb_all(); 370 - EXIT_CRITICAL(flags); 392 + local_irq_restore(flags); 371 393 } 372 394 373 395 #ifdef CONFIG_TRANSPARENT_HUGEPAGE ··· 377 399 unsigned int mask; 378 400 unsigned long flags; 379 401 380 - ENTER_CRITICAL(flags); 402 + local_irq_save(flags); 381 403 write_c0_pagemask(PM_HUGE_MASK); 382 404 back_to_back_c0_hazard(); 383 405 mask = read_c0_pagemask(); 384 406 write_c0_pagemask(PM_DEFAULT_MASK); 385 407 386 - EXIT_CRITICAL(flags); 408 + local_irq_restore(flags); 387 409 388 410 return mask == PM_HUGE_MASK; 389 411 }
-3
arch/mips/mti-malta/Makefile
··· 8 8 obj-y := malta-amon.o malta-display.o malta-init.o \ 9 9 malta-int.o malta-memory.o malta-platform.o \ 10 10 malta-reset.o malta-setup.o malta-time.o 11 - 12 - # FIXME FIXME FIXME 13 - obj-$(CONFIG_MIPS_MT_SMTC) += malta-smtc.o
-6
arch/mips/mti-malta/malta-init.c
··· 116 116 return CPC_BASE_ADDR; 117 117 } 118 118 119 - extern struct plat_smp_ops msmtc_smp_ops; 120 - 121 119 void __init prom_init(void) 122 120 { 123 121 mips_display_message("LINUX"); ··· 302 304 return; 303 305 if (!register_vsmp_smp_ops()) 304 306 return; 305 - 306 - #ifdef CONFIG_MIPS_MT_SMTC 307 - register_smp_ops(&msmtc_smp_ops); 308 - #endif 309 307 }
-19
arch/mips/mti-malta/malta-int.c
··· 504 504 } else if (cpu_has_vint) { 505 505 set_vi_handler(MIPSCPU_INT_I8259A, malta_hw0_irqdispatch); 506 506 set_vi_handler(MIPSCPU_INT_COREHI, corehi_irqdispatch); 507 - #ifdef CONFIG_MIPS_MT_SMTC 508 - setup_irq_smtc(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_I8259A, &i8259irq, 509 - (0x100 << MIPSCPU_INT_I8259A)); 510 - setup_irq_smtc(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI, 511 - &corehi_irqaction, (0x100 << MIPSCPU_INT_COREHI)); 512 - /* 513 - * Temporary hack to ensure that the subsidiary device 514 - * interrupts coing in via the i8259A, but associated 515 - * with low IRQ numbers, will restore the Status.IM 516 - * value associated with the i8259A. 517 - */ 518 - { 519 - int i; 520 - 521 - for (i = 0; i < 16; i++) 522 - irq_hwmask[i] = (0x100 << MIPSCPU_INT_I8259A); 523 - } 524 - #else /* Not SMTC */ 525 507 setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_I8259A, &i8259irq); 526 508 setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI, 527 509 &corehi_irqaction); 528 - #endif /* CONFIG_MIPS_MT_SMTC */ 529 510 } else { 530 511 setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_I8259A, &i8259irq); 531 512 setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI,
-4
arch/mips/mti-malta/malta-setup.c
··· 77 77 return "MIPS Malta"; 78 78 } 79 79 80 - #if defined(CONFIG_MIPS_MT_SMTC) 81 - const char display_string[] = " SMTC LINUX ON MALTA "; 82 - #else 83 80 const char display_string[] = " LINUX ON MALTA "; 84 - #endif /* CONFIG_MIPS_MT_SMTC */ 85 81 86 82 #ifdef CONFIG_BLK_DEV_FD 87 83 static void __init fd_activate(void)
-162
arch/mips/mti-malta/malta-smtc.c
··· 1 - /* 2 - * Malta Platform-specific hooks for SMP operation 3 - */ 4 - #include <linux/irq.h> 5 - #include <linux/init.h> 6 - 7 - #include <asm/mipsregs.h> 8 - #include <asm/mipsmtregs.h> 9 - #include <asm/smtc.h> 10 - #include <asm/smtc_ipi.h> 11 - 12 - /* VPE/SMP Prototype implements platform interfaces directly */ 13 - 14 - /* 15 - * Cause the specified action to be performed on a targeted "CPU" 16 - */ 17 - 18 - static void msmtc_send_ipi_single(int cpu, unsigned int action) 19 - { 20 - /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */ 21 - smtc_send_ipi(cpu, LINUX_SMP_IPI, action); 22 - } 23 - 24 - static void msmtc_send_ipi_mask(const struct cpumask *mask, unsigned int action) 25 - { 26 - unsigned int i; 27 - 28 - for_each_cpu(i, mask) 29 - msmtc_send_ipi_single(i, action); 30 - } 31 - 32 - /* 33 - * Post-config but pre-boot cleanup entry point 34 - */ 35 - static void msmtc_init_secondary(void) 36 - { 37 - int myvpe; 38 - 39 - /* Don't enable Malta I/O interrupts (IP2) for secondary VPEs */ 40 - myvpe = read_c0_tcbind() & TCBIND_CURVPE; 41 - if (myvpe != 0) { 42 - /* Ideally, this should be done only once per VPE, but... */ 43 - clear_c0_status(ST0_IM); 44 - set_c0_status((0x100 << cp0_compare_irq) 45 - | (0x100 << MIPS_CPU_IPI_IRQ)); 46 - if (cp0_perfcount_irq >= 0) 47 - set_c0_status(0x100 << cp0_perfcount_irq); 48 - } 49 - 50 - smtc_init_secondary(); 51 - } 52 - 53 - /* 54 - * Platform "CPU" startup hook 55 - */ 56 - static void msmtc_boot_secondary(int cpu, struct task_struct *idle) 57 - { 58 - smtc_boot_secondary(cpu, idle); 59 - } 60 - 61 - /* 62 - * SMP initialization finalization entry point 63 - */ 64 - static void msmtc_smp_finish(void) 65 - { 66 - smtc_smp_finish(); 67 - } 68 - 69 - /* 70 - * Hook for after all CPUs are online 71 - */ 72 - 73 - static void msmtc_cpus_done(void) 74 - { 75 - } 76 - 77 - /* 78 - * Platform SMP pre-initialization 79 - * 80 - * As noted above, we can assume a single CPU for now 81 - * but it may be multithreaded. 82 - */ 83 - 84 - static void __init msmtc_smp_setup(void) 85 - { 86 - /* 87 - * we won't get the definitive value until 88 - * we've run smtc_prepare_cpus later, but 89 - * we would appear to need an upper bound now. 90 - */ 91 - smp_num_siblings = smtc_build_cpu_map(0); 92 - } 93 - 94 - static void __init msmtc_prepare_cpus(unsigned int max_cpus) 95 - { 96 - smtc_prepare_cpus(max_cpus); 97 - } 98 - 99 - struct plat_smp_ops msmtc_smp_ops = { 100 - .send_ipi_single = msmtc_send_ipi_single, 101 - .send_ipi_mask = msmtc_send_ipi_mask, 102 - .init_secondary = msmtc_init_secondary, 103 - .smp_finish = msmtc_smp_finish, 104 - .cpus_done = msmtc_cpus_done, 105 - .boot_secondary = msmtc_boot_secondary, 106 - .smp_setup = msmtc_smp_setup, 107 - .prepare_cpus = msmtc_prepare_cpus, 108 - }; 109 - 110 - #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 111 - /* 112 - * IRQ affinity hook 113 - */ 114 - 115 - 116 - int plat_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, 117 - bool force) 118 - { 119 - cpumask_t tmask; 120 - int cpu = 0; 121 - void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff); 122 - 123 - /* 124 - * On the legacy Malta development board, all I/O interrupts 125 - * are routed through the 8259 and combined in a single signal 126 - * to the CPU daughterboard, and on the CoreFPGA2/3 34K models, 127 - * that signal is brought to IP2 of both VPEs. To avoid racing 128 - * concurrent interrupt service events, IP2 is enabled only on 129 - * one VPE, by convention VPE0. So long as no bits are ever 130 - * cleared in the affinity mask, there will never be any 131 - * interrupt forwarding. But as soon as a program or operator 132 - * sets affinity for one of the related IRQs, we need to make 133 - * sure that we don't ever try to forward across the VPE boundary, 134 - * at least not until we engineer a system where the interrupt 135 - * _ack() or _end() function can somehow know that it corresponds 136 - * to an interrupt taken on another VPE, and perform the appropriate 137 - * restoration of Status.IM state using MFTR/MTTR instead of the 138 - * normal local behavior. We also ensure that no attempt will 139 - * be made to forward to an offline "CPU". 140 - */ 141 - 142 - cpumask_copy(&tmask, affinity); 143 - for_each_cpu(cpu, affinity) { 144 - if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu)) 145 - cpu_clear(cpu, tmask); 146 - } 147 - cpumask_copy(d->affinity, &tmask); 148 - 149 - if (cpus_empty(tmask)) 150 - /* 151 - * We could restore a default mask here, but the 152 - * runtime code can anyway deal with the null set 153 - */ 154 - printk(KERN_WARNING 155 - "IRQ affinity leaves no legal CPU for IRQ %d\n", d->irq); 156 - 157 - /* Do any generic SMTC IRQ affinity setup */ 158 - smtc_set_irq_affinity(d->irq, tmask); 159 - 160 - return IRQ_SET_MASK_OK_NOCOPY; 161 - } 162 - #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
-1
arch/mips/pmcs-msp71xx/Makefile
··· 10 10 obj-$(CONFIG_MSP_HAS_MAC) += msp_eth.o 11 11 obj-$(CONFIG_MSP_HAS_USB) += msp_usb.o 12 12 obj-$(CONFIG_MIPS_MT_SMP) += msp_smp.o 13 - obj-$(CONFIG_MIPS_MT_SMTC) += msp_smtc.o
+5 -11
arch/mips/pmcs-msp71xx/msp_irq.c
··· 32 32 33 33 /* vectored interrupt implementation */ 34 34 35 - /* SW0/1 interrupts are used for SMP/SMTC */ 35 + /* SW0/1 interrupts are used for SMP */ 36 36 static inline void mac0_int_dispatch(void) { do_IRQ(MSP_INT_MAC0); } 37 37 static inline void mac1_int_dispatch(void) { do_IRQ(MSP_INT_MAC1); } 38 38 static inline void mac2_int_dispatch(void) { do_IRQ(MSP_INT_SAR); } ··· 138 138 set_vi_handler(MSP_INT_SEC, sec_int_dispatch); 139 139 #ifdef CONFIG_MIPS_MT_SMP 140 140 msp_vsmp_int_init(); 141 - #elif defined CONFIG_MIPS_MT_SMTC 142 - /*Set hwmask for all platform devices */ 143 - irq_hwmask[MSP_INT_MAC0] = C_IRQ0; 144 - irq_hwmask[MSP_INT_MAC1] = C_IRQ1; 145 - irq_hwmask[MSP_INT_USB] = C_IRQ2; 146 - irq_hwmask[MSP_INT_SAR] = C_IRQ3; 147 - irq_hwmask[MSP_INT_SEC] = C_IRQ5; 148 - 149 141 #endif /* CONFIG_MIPS_MT_SMP */ 150 142 #endif /* CONFIG_MIPS_MT */ 151 143 /* setup the cascaded interrupts */ ··· 145 153 setup_irq(MSP_INT_PER, &per_cascade_msp); 146 154 147 155 #else 148 - /* setup the 2nd-level SLP register based interrupt controller */ 149 - /* VSMP /SMTC support support is not enabled for SLP */ 156 + /* 157 + * Setup the 2nd-level SLP register based interrupt controller. 158 + * VSMP support support is not enabled for SLP. 159 + */ 150 160 msp_slp_irq_init(); 151 161 152 162 /* setup the cascaded SLP/PER interrupts */
+1 -6
arch/mips/pmcs-msp71xx/msp_irq_cic.c
··· 120 120 * hurt for the others 121 121 */ 122 122 *CIC_STS_REG = (1 << (d->irq - MSP_CIC_INTBASE)); 123 - smtc_im_ack_irq(d->irq); 124 123 } 125 124 126 - /*Note: Limiting to VSMP . Not tested in SMTC */ 125 + /* Note: Limiting to VSMP. */ 127 126 128 127 #ifdef CONFIG_MIPS_MT_SMP 129 128 static int msp_cic_irq_set_affinity(struct irq_data *d, ··· 182 183 for (i = MSP_CIC_INTBASE ; i < MSP_CIC_INTBASE + 32 ; i++) { 183 184 irq_set_chip_and_handler(i, &msp_cic_irq_controller, 184 185 handle_level_irq); 185 - #ifdef CONFIG_MIPS_MT_SMTC 186 - /* Mask of CIC interrupt */ 187 - irq_hwmask[i] = C_IRQ4; 188 - #endif 189 186 } 190 187 191 188 /* Initialize the PER interrupt sub-system */
-3
arch/mips/pmcs-msp71xx/msp_irq_per.c
··· 113 113 /* initialize all the IRQ descriptors */ 114 114 for (i = MSP_PER_INTBASE; i < MSP_PER_INTBASE + 32; i++) { 115 115 irq_set_chip(i, &msp_per_irq_controller); 116 - #ifdef CONFIG_MIPS_MT_SMTC 117 - irq_hwmask[i] = C_IRQ4; 118 - #endif 119 116 } 120 117 } 121 118
+1 -7
arch/mips/pmcs-msp71xx/msp_setup.c
··· 147 147 pm_power_off = msp_power_off; 148 148 } 149 149 150 - extern struct plat_smp_ops msp_smtc_smp_ops; 151 - 152 150 void __init prom_init(void) 153 151 { 154 152 unsigned long family; ··· 227 229 */ 228 230 msp_serial_setup(); 229 231 230 - if (register_vsmp_smp_ops()) { 231 - #ifdef CONFIG_MIPS_MT_SMTC 232 - register_smp_ops(&msp_smtc_smp_ops); 233 - #endif 234 - } 232 + register_vsmp_smp_ops(); 235 233 }
-104
arch/mips/pmcs-msp71xx/msp_smtc.c
··· 1 - /* 2 - * MSP71xx Platform-specific hooks for SMP operation 3 - */ 4 - #include <linux/irq.h> 5 - #include <linux/init.h> 6 - 7 - #include <asm/mipsmtregs.h> 8 - #include <asm/mipsregs.h> 9 - #include <asm/smtc.h> 10 - #include <asm/smtc_ipi.h> 11 - 12 - /* VPE/SMP Prototype implements platform interfaces directly */ 13 - 14 - /* 15 - * Cause the specified action to be performed on a targeted "CPU" 16 - */ 17 - 18 - static void msp_smtc_send_ipi_single(int cpu, unsigned int action) 19 - { 20 - /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */ 21 - smtc_send_ipi(cpu, LINUX_SMP_IPI, action); 22 - } 23 - 24 - static void msp_smtc_send_ipi_mask(const struct cpumask *mask, 25 - unsigned int action) 26 - { 27 - unsigned int i; 28 - 29 - for_each_cpu(i, mask) 30 - msp_smtc_send_ipi_single(i, action); 31 - } 32 - 33 - /* 34 - * Post-config but pre-boot cleanup entry point 35 - */ 36 - static void msp_smtc_init_secondary(void) 37 - { 38 - int myvpe; 39 - 40 - /* Don't enable Malta I/O interrupts (IP2) for secondary VPEs */ 41 - myvpe = read_c0_tcbind() & TCBIND_CURVPE; 42 - if (myvpe > 0) 43 - change_c0_status(ST0_IM, STATUSF_IP0 | STATUSF_IP1 | 44 - STATUSF_IP6 | STATUSF_IP7); 45 - smtc_init_secondary(); 46 - } 47 - 48 - /* 49 - * Platform "CPU" startup hook 50 - */ 51 - static void msp_smtc_boot_secondary(int cpu, struct task_struct *idle) 52 - { 53 - smtc_boot_secondary(cpu, idle); 54 - } 55 - 56 - /* 57 - * SMP initialization finalization entry point 58 - */ 59 - static void msp_smtc_smp_finish(void) 60 - { 61 - smtc_smp_finish(); 62 - } 63 - 64 - /* 65 - * Hook for after all CPUs are online 66 - */ 67 - 68 - static void msp_smtc_cpus_done(void) 69 - { 70 - } 71 - 72 - /* 73 - * Platform SMP pre-initialization 74 - * 75 - * As noted above, we can assume a single CPU for now 76 - * but it may be multithreaded. 77 - */ 78 - 79 - static void __init msp_smtc_smp_setup(void) 80 - { 81 - /* 82 - * we won't get the definitive value until 83 - * we've run smtc_prepare_cpus later, but 84 - */ 85 - 86 - if (read_c0_config3() & (1 << 2)) 87 - smp_num_siblings = smtc_build_cpu_map(0); 88 - } 89 - 90 - static void __init msp_smtc_prepare_cpus(unsigned int max_cpus) 91 - { 92 - smtc_prepare_cpus(max_cpus); 93 - } 94 - 95 - struct plat_smp_ops msp_smtc_smp_ops = { 96 - .send_ipi_single = msp_smtc_send_ipi_single, 97 - .send_ipi_mask = msp_smtc_send_ipi_mask, 98 - .init_secondary = msp_smtc_init_secondary, 99 - .smp_finish = msp_smtc_smp_finish, 100 - .cpus_done = msp_smtc_cpus_done, 101 - .boot_secondary = msp_smtc_boot_secondary, 102 - .smp_setup = msp_smtc_smp_setup, 103 - .prepare_cpus = msp_smtc_prepare_cpus, 104 - };