Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus

Pull MIPS updates from Ralf Baechle:
- three fixes for 3.15 that didn't make it in time
- limited Octeon 3 support.
- paravirtualization support
- improvment to platform support for Netlogix SOCs.
- add support for powering down the Malta eval board in software
- add many instructions to the in-kernel microassembler.
- add support for the BPF JIT.
- minor cleanups of the BCM47xx code.
- large cleanup of math emu code resulting in significant code size
reduction, better readability of the code and more accurate
emulation.
- improvments to the MIPS CPS code.
- support C3 power status for the R4k count/compare clock device.
- improvments to the GIO support for older SGI workstations.
- increase number of supported CPUs to 256; this can be reached on
certain embedded multithreaded ccNUMA configurations.
- various small cleanups, updates and fixes

* 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus: (173 commits)
MIPS: IP22/IP28: Improve GIO support
MIPS: Octeon: Add twsi interrupt initialization for OCTEON 3XXX, 5XXX, 63XX
DEC: Document the R4k MB ASIC mini interrupt controller
DEC: Add self as the maintainer
MIPS: Add microMIPS MSA support.
MIPS: Replace calls to obsolete strict_strto call with kstrto* equivalents.
MIPS: Replace obsolete strict_strto call with kstrto
MIPS: BFP: Simplify code slightly.
MIPS: Call find_vma with the mmap_sem held
MIPS: Fix 'write_msa_##' inline macro.
MIPS: Fix MSA toolchain support detection.
mips: Update the email address of Geert Uytterhoeven
MIPS: Add minimal defconfig for mips_paravirt
MIPS: Enable build for new system 'paravirt'
MIPS: paravirt: Add pci controller for virtio
MIPS: Add code for new system 'paravirt'
MIPS: Add functions for hypervisor call
MIPS: OCTEON: Add OCTEON3 to __get_cpu_type
MIPS: Add function get_ebase_cpunum
MIPS: Add minimal support for OCTEON3 to c-r4k.c
...

+8553 -7896
+9
MAINTAINERS
··· 2700 2700 F: Documentation/networking/decnet.txt 2701 2701 F: net/decnet/ 2702 2702 2703 + DECSTATION PLATFORM SUPPORT 2704 + M: "Maciej W. Rozycki" <macro@linux-mips.org> 2705 + L: linux-mips@linux-mips.org 2706 + W: http://www.linux-mips.org/wiki/DECstation 2707 + S: Maintained 2708 + F: arch/mips/dec/ 2709 + F: arch/mips/include/asm/dec/ 2710 + F: arch/mips/include/asm/mach-dec/ 2711 + 2703 2712 DEFXX FDDI NETWORK DRIVER 2704 2713 M: "Maciej W. Rozycki" <macro@linux-mips.org> 2705 2714 S: Maintained
+1 -1
arch/mips/Kbuild
··· 16 16 17 17 obj-y += kernel/ 18 18 obj-y += mm/ 19 - obj-y += math-emu/ 19 + obj-y += net/ 20 20 21 21 ifdef CONFIG_KVM 22 22 obj-y += kvm/
+1
arch/mips/Kbuild.platforms
··· 18 18 platforms += mti-malta 19 19 platforms += mti-sead3 20 20 platforms += netlogic 21 + platforms += paravirt 21 22 platforms += pmcs-msp71xx 22 23 platforms += pnx833x 23 24 platforms += ralink
+71 -71
arch/mips/Kconfig
··· 12 12 select HAVE_ARCH_KGDB 13 13 select HAVE_ARCH_SECCOMP_FILTER 14 14 select HAVE_ARCH_TRACEHOOK 15 + select HAVE_BPF_JIT if !CPU_MICROMIPS 15 16 select ARCH_HAVE_CUSTOM_GPIO_H 16 17 select HAVE_FUNCTION_TRACER 17 18 select HAVE_FUNCTION_TRACE_MCOUNT_TEST ··· 51 50 select CLONE_BACKWARDS 52 51 select HAVE_DEBUG_STACKOVERFLOW 53 52 select HAVE_CC_STACKPROTECTOR 53 + select CPU_PM if CPU_IDLE 54 + select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST 54 55 55 56 menu "Machine selection" 56 57 ··· 86 83 select SYS_HAS_EARLY_PRINTK 87 84 select SYS_SUPPORTS_32BIT_KERNEL 88 85 select SYS_SUPPORTS_LITTLE_ENDIAN 86 + select SYS_SUPPORTS_MIPS16 89 87 select SYS_SUPPORTS_ZBOOT_UART16550 90 88 select ARCH_REQUIRE_GPIOLIB 91 89 select VLYNQ ··· 110 106 select SYS_HAS_EARLY_PRINTK 111 107 select SYS_SUPPORTS_32BIT_KERNEL 112 108 select SYS_SUPPORTS_BIG_ENDIAN 109 + select SYS_SUPPORTS_MIPS16 113 110 help 114 111 Support for the Atheros AR71XX/AR724X/AR913X SoCs. 115 112 ··· 127 122 select NO_EXCEPT_FILL 128 123 select SYS_SUPPORTS_32BIT_KERNEL 129 124 select SYS_SUPPORTS_LITTLE_ENDIAN 125 + select SYS_SUPPORTS_MIPS16 130 126 select SYS_HAS_EARLY_PRINTK 131 127 select USE_GENERIC_EARLY_PRINTK_8250 132 128 help ··· 174 168 bool "DECstations" 175 169 select BOOT_ELF32 176 170 select CEVT_DS1287 177 - select CEVT_R4K 171 + select CEVT_R4K if CPU_R4X00 178 172 select CSRC_IOASIC 179 - select CSRC_R4K 173 + select CSRC_R4K if CPU_R4X00 180 174 select CPU_DADDI_WORKAROUNDS if 64BIT 181 175 select CPU_R4000_WORKAROUNDS if 64BIT 182 176 select CPU_R4400_WORKAROUNDS if 64BIT ··· 254 248 select SYS_HAS_CPU_MIPS32_R2 255 249 select SYS_SUPPORTS_BIG_ENDIAN 256 250 select SYS_SUPPORTS_32BIT_KERNEL 251 + select SYS_SUPPORTS_MIPS16 257 252 select SYS_SUPPORTS_MULTITHREADING 258 253 select SYS_HAS_EARLY_PRINTK 259 254 select ARCH_REQUIRE_GPIOLIB ··· 337 330 select SYS_SUPPORTS_LITTLE_ENDIAN 338 331 select SYS_SUPPORTS_MIPS_CMP 339 332 select SYS_SUPPORTS_MIPS_CPS 333 + select SYS_SUPPORTS_MIPS16 340 334 select SYS_SUPPORTS_MULTITHREADING 341 335 select SYS_SUPPORTS_SMARTMIPS 342 336 select SYS_SUPPORTS_ZBOOT ··· 369 361 select SYS_SUPPORTS_LITTLE_ENDIAN 370 362 select SYS_SUPPORTS_SMARTMIPS 371 363 select SYS_SUPPORTS_MICROMIPS 364 + select SYS_SUPPORTS_MIPS16 372 365 select USB_EHCI_BIG_ENDIAN_DESC 373 366 select USB_EHCI_BIG_ENDIAN_MMIO 374 367 select USE_OF ··· 389 380 select CEVT_R4K 390 381 select CSRC_R4K 391 382 select SYS_HAS_CPU_VR41XX 383 + select SYS_SUPPORTS_MIPS16 392 384 select ARCH_REQUIRE_GPIOLIB 393 385 394 386 config NXP_STB220 ··· 417 407 select SYS_HAS_CPU_MIPS32_R2 418 408 select SYS_SUPPORTS_32BIT_KERNEL 419 409 select SYS_SUPPORTS_BIG_ENDIAN 410 + select SYS_SUPPORTS_MIPS16 420 411 select IRQ_CPU 421 412 select SERIAL_8250 422 413 select SERIAL_8250_CONSOLE ··· 441 430 select SYS_HAS_CPU_MIPS32_R2 442 431 select SYS_SUPPORTS_32BIT_KERNEL 443 432 select SYS_SUPPORTS_LITTLE_ENDIAN 433 + select SYS_SUPPORTS_MIPS16 444 434 select SYS_HAS_EARLY_PRINTK 445 435 select HAVE_MACH_CLKDEV 446 436 select CLKDEV_LOOKUP ··· 686 674 select SYS_SUPPORTS_BIG_ENDIAN 687 675 select SYS_SUPPORTS_HIGHMEM 688 676 select SYS_SUPPORTS_LITTLE_ENDIAN 689 - select USE_GENERIC_EARLY_PRINTK_8250 690 677 help 691 678 The SNI RM200/300/400 are MIPS-based machines manufactured by 692 679 Siemens Nixdorf Informationssysteme (SNI), parent company of Pyramid ··· 732 721 select ZONE_DMA32 733 722 select HOLES_IN_ZONE 734 723 select ARCH_REQUIRE_GPIOLIB 724 + select LIBFDT 725 + select USE_OF 726 + select ARCH_SPARSEMEM_ENABLE 727 + select SYS_SUPPORTS_SMP 728 + select NR_CPUS_DEFAULT_16 735 729 help 736 730 This option supports all of the Octeon reference boards from Cavium 737 731 Networks. It builds a kernel that dynamically determines the Octeon ··· 805 789 This board is based on Netlogic XLP Processor. 806 790 Say Y here if you have a XLP based board. 807 791 792 + config MIPS_PARAVIRT 793 + bool "Para-Virtualized guest system" 794 + select CEVT_R4K 795 + select CSRC_R4K 796 + select DMA_COHERENT 797 + select SYS_SUPPORTS_64BIT_KERNEL 798 + select SYS_SUPPORTS_32BIT_KERNEL 799 + select SYS_SUPPORTS_BIG_ENDIAN 800 + select SYS_SUPPORTS_SMP 801 + select NR_CPUS_DEFAULT_4 802 + select SYS_HAS_EARLY_PRINTK 803 + select SYS_HAS_CPU_MIPS32_R2 804 + select SYS_HAS_CPU_MIPS64_R2 805 + select SYS_HAS_CPU_CAVIUM_OCTEON 806 + select HW_HAS_PCI 807 + select SWAP_IO_SPACE 808 + help 809 + This option supports guest running under ???? 810 + 808 811 endchoice 809 812 810 813 source "arch/mips/alchemy/Kconfig" ··· 844 809 source "arch/mips/loongson/Kconfig" 845 810 source "arch/mips/loongson1/Kconfig" 846 811 source "arch/mips/netlogic/Kconfig" 812 + source "arch/mips/paravirt/Kconfig" 847 813 848 814 endmenu 849 815 ··· 1095 1059 select SYS_SUPPORTS_32BIT_KERNEL 1096 1060 select SYS_SUPPORTS_LITTLE_ENDIAN 1097 1061 select SYS_SUPPORTS_BIG_ENDIAN 1062 + select SYS_SUPPORTS_MIPS16 1098 1063 select CPU_MIPSR2_IRQ_VI 1099 1064 1100 1065 config SOC_PNX8335 ··· 1435 1398 config CPU_CAVIUM_OCTEON 1436 1399 bool "Cavium Octeon processor" 1437 1400 depends on SYS_HAS_CPU_CAVIUM_OCTEON 1438 - select ARCH_SPARSEMEM_ENABLE 1439 1401 select CPU_HAS_PREFETCH 1440 1402 select CPU_SUPPORTS_64BIT_KERNEL 1441 - select SYS_SUPPORTS_SMP 1442 - select NR_CPUS_DEFAULT_16 1443 1403 select WEAK_ORDERING 1444 1404 select CPU_SUPPORTS_HIGHMEM 1445 1405 select CPU_SUPPORTS_HUGEPAGES 1446 - select LIBFDT 1447 - select USE_OF 1448 1406 select USB_EHCI_BIG_ENDIAN_MMIO 1449 1407 select MIPS_L1_CACHE_SHIFT_7 1450 1408 help ··· 1691 1659 config SYS_HAS_CPU_XLP 1692 1660 bool 1693 1661 1662 + config MIPS_MALTA_PM 1663 + depends on MIPS_MALTA 1664 + depends on PCI 1665 + bool 1666 + default y 1667 + 1694 1668 # 1695 1669 # CPU may reorder R->R, R->W, W->R, W->W 1696 1670 # Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC ··· 1880 1842 1881 1843 config CEVT_GIC 1882 1844 bool "Use GIC global counter for clock events" 1883 - depends on IRQ_GIC && !(MIPS_SEAD3 || MIPS_MT_SMTC) 1845 + depends on IRQ_GIC && !MIPS_SEAD3 1884 1846 help 1885 1847 Use the GIC global counter for the clock events. The R4K clock 1886 1848 event driver is always present, so if the platform ends up not ··· 1933 1895 bool 1934 1896 default y if !(CPU_R3000 || CPU_R8000 || CPU_SB1 || CPU_TX39XX || CPU_CAVIUM_OCTEON) 1935 1897 1936 - choice 1937 - prompt "MIPS MT options" 1938 - 1939 - config MIPS_MT_DISABLED 1940 - bool "Disable multithreading support" 1941 - help 1942 - Use this option if your platform does not support the MT ASE 1943 - which is hardware multithreading support. On systems without 1944 - an MT-enabled processor, this will be the only option that is 1945 - available in this menu. 1946 - 1947 1898 config MIPS_MT_SMP 1948 - bool "Use 1 TC on each available VPE for SMP" 1899 + bool "MIPS MT SMP support (1 TC on each available VPE)" 1949 1900 depends on SYS_SUPPORTS_MULTITHREADING 1950 1901 select CPU_MIPSR2_IRQ_VI 1951 1902 select CPU_MIPSR2_IRQ_EI ··· 1952 1925 virtual processors which supports SMP. This is equivalent to the 1953 1926 Intel Hyperthreading feature. For further information go to 1954 1927 <http://www.imgtec.com/mips/mips-multithreading.asp>. 1955 - 1956 - config MIPS_MT_SMTC 1957 - bool "Use all TCs on all VPEs for SMP (DEPRECATED)" 1958 - depends on CPU_MIPS32_R2 1959 - depends on SYS_SUPPORTS_MULTITHREADING 1960 - depends on !MIPS_CPS 1961 - select CPU_MIPSR2_IRQ_VI 1962 - select CPU_MIPSR2_IRQ_EI 1963 - select MIPS_MT 1964 - select SMP 1965 - select SMP_UP 1966 - select SYS_SUPPORTS_SMP 1967 - select NR_CPUS_DEFAULT_8 1968 - help 1969 - This is a kernel model which is known as SMTC. This is 1970 - supported on cores with the MT ASE and presents all TCs 1971 - available on all VPEs to support SMP. For further 1972 - information see <http://www.linux-mips.org/wiki/34K#SMTC>. 1973 - 1974 - endchoice 1975 1928 1976 1929 config MIPS_MT 1977 1930 bool ··· 1974 1967 config MIPS_MT_FPAFF 1975 1968 bool "Dynamic FPU affinity for FP-intensive threads" 1976 1969 default y 1977 - depends on MIPS_MT_SMP || MIPS_MT_SMTC 1970 + depends on MIPS_MT_SMP 1978 1971 1979 1972 config MIPS_VPE_LOADER 1980 1973 bool "VPE loader support." ··· 1995 1988 bool 1996 1989 default "y" 1997 1990 depends on MIPS_VPE_LOADER && !MIPS_CMP 1998 - 1999 - config MIPS_MT_SMTC_IM_BACKSTOP 2000 - bool "Use per-TC register bits as backstop for inhibited IM bits" 2001 - depends on MIPS_MT_SMTC 2002 - default n 2003 - help 2004 - To support multiple TC microthreads acting as "CPUs" within 2005 - a VPE, VPE-wide interrupt mask bits must be specially manipulated 2006 - during interrupt handling. To support legacy drivers and interrupt 2007 - controller management code, SMTC has a "backstop" to track and 2008 - if necessary restore the interrupt mask. This has some performance 2009 - impact on interrupt service overhead. 2010 - 2011 - config MIPS_MT_SMTC_IRQAFF 2012 - bool "Support IRQ affinity API" 2013 - depends on MIPS_MT_SMTC 2014 - default n 2015 - help 2016 - Enables SMP IRQ affinity API (/proc/irq/*/smp_affinity, etc.) 2017 - for SMTC Linux kernel. Requires platform support, of which 2018 - an example can be found in the MIPS kernel i8259 and Malta 2019 - platform code. Adds some overhead to interrupt dispatch, and 2020 - should be used only if you know what you are doing. 2021 1991 2022 1992 config MIPS_VPE_LOADER_TOM 2023 1993 bool "Load VPE program into memory hidden from linux" ··· 2023 2039 2024 2040 config MIPS_CMP 2025 2041 bool "MIPS CMP framework support (DEPRECATED)" 2026 - depends on SYS_SUPPORTS_MIPS_CMP && !MIPS_MT_SMTC 2042 + depends on SYS_SUPPORTS_MIPS_CMP 2027 2043 select MIPS_GIC_IPI 2028 2044 select SYNC_R4K 2029 2045 select WEAK_ORDERING ··· 2041 2057 depends on SYS_SUPPORTS_MIPS_CPS 2042 2058 select MIPS_CM 2043 2059 select MIPS_CPC 2060 + select MIPS_CPS_PM if HOTPLUG_CPU 2044 2061 select MIPS_GIC_IPI 2045 2062 select SMP 2046 2063 select SYNC_R4K if (CEVT_R4K || CSRC_R4K) 2064 + select SYS_SUPPORTS_HOTPLUG_CPU 2047 2065 select SYS_SUPPORTS_SMP 2048 2066 select WEAK_ORDERING 2049 2067 help ··· 2054 2068 enabled the kernel will probe for other cores and boot them with 2055 2069 no external assistance. It is safe to enable this when hardware 2056 2070 support is unavailable. 2071 + 2072 + config MIPS_CPS_PM 2073 + bool 2057 2074 2058 2075 config MIPS_GIC_IPI 2059 2076 bool ··· 2188 2199 config SYS_SUPPORTS_MICROMIPS 2189 2200 bool 2190 2201 2202 + config SYS_SUPPORTS_MIPS16 2203 + bool 2204 + help 2205 + This option must be set if a kernel might be executed on a MIPS16- 2206 + enabled CPU even if MIPS16 is not actually being used. In other 2207 + words, it makes the kernel MIPS16-tolerant. 2208 + 2191 2209 config CPU_SUPPORTS_MSA 2192 2210 bool 2193 2211 ··· 2235 2239 2236 2240 config HW_PERF_EVENTS 2237 2241 bool "Enable hardware performance counter support for perf events" 2238 - depends on PERF_EVENTS && !MIPS_MT_SMTC && OPROFILE=n && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON || CPU_XLP) 2242 + depends on PERF_EVENTS && OPROFILE=n && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON || CPU_XLP) 2239 2243 default y 2240 2244 help 2241 2245 Enable hardware performance counter support for perf events. If ··· 2293 2297 bool 2294 2298 2295 2299 config NR_CPUS 2296 - int "Maximum number of CPUs (2-64)" 2297 - range 2 64 2300 + int "Maximum number of CPUs (2-256)" 2301 + range 2 256 2298 2302 depends on SMP 2299 2303 default "4" if NR_CPUS_DEFAULT_4 2300 2304 default "8" if NR_CPUS_DEFAULT_8 ··· 2667 2671 config MIPS_EXTERNAL_TIMER 2668 2672 bool 2669 2673 2670 - if CPU_SUPPORTS_CPUFREQ && MIPS_EXTERNAL_TIMER 2671 2674 menu "CPU Power Management" 2675 + 2676 + if CPU_SUPPORTS_CPUFREQ && MIPS_EXTERNAL_TIMER 2672 2677 source "drivers/cpufreq/Kconfig" 2673 - endmenu 2674 2678 endif 2679 + 2680 + source "drivers/cpuidle/Kconfig" 2681 + 2682 + endmenu 2675 2683 2676 2684 source "net/Kconfig" 2677 2685
-9
arch/mips/Kconfig.debug
··· 79 79 80 80 Normally, you will choose 'N' here. 81 81 82 - config SMTC_IDLE_HOOK_DEBUG 83 - bool "Enable additional debug checks before going into CPU idle loop" 84 - depends on DEBUG_KERNEL && MIPS_MT_SMTC 85 - help 86 - This option enables Enable additional debug checks before going into 87 - CPU idle loop. For details on these checks, see 88 - arch/mips/kernel/smtc.c. This debugging option result in significant 89 - overhead so should be disabled in production kernels. 90 - 91 82 config SB1XXX_CORELIS 92 83 bool "Corelis Debugger" 93 84 depends on SIBYTE_SB1xxx_SOC
+2 -1
arch/mips/Makefile
··· 120 120 -fno-omit-frame-pointer 121 121 122 122 ifeq ($(CONFIG_CPU_HAS_MSA),y) 123 - toolchain-msa := $(call cc-option-yn,-mhard-float -mfp64 -mmsa) 123 + toolchain-msa := $(call cc-option-yn,-mhard-float -mfp64 -Wa$(comma)-mmsa) 124 124 cflags-$(toolchain-msa) += -DTOOLCHAIN_SUPPORTS_MSA 125 125 endif 126 126 ··· 251 251 head-y := arch/mips/kernel/head.o 252 252 253 253 libs-y += arch/mips/lib/ 254 + libs-y += arch/mips/math-emu/ 254 255 255 256 # See arch/mips/Kbuild for content of core part of the kernel 256 257 core-y += arch/mips/
+1 -1
arch/mips/alchemy/board-xxs1500.c
··· 49 49 prom_init_cmdline(); 50 50 51 51 memsize_str = prom_getenv("memsize"); 52 - if (!memsize_str || strict_strtoul(memsize_str, 0, &memsize)) 52 + if (!memsize_str || kstrtoul(memsize_str, 0, &memsize)) 53 53 memsize = 0x04000000; 54 54 55 55 add_memory_region(0, memsize, BOOT_MEM_RAM);
+6
arch/mips/alchemy/common/setup.c
··· 67 67 case ALCHEMY_CPU_AU1500: 68 68 case ALCHEMY_CPU_AU1100: 69 69 coherentio = 0; 70 + break; 71 + case ALCHEMY_CPU_AU1200: 72 + /* Au1200 AB USB does not support coherent memory */ 73 + if (0 == (read_c0_prid() & PRID_REV_MASK)) 74 + coherentio = 0; 75 + break; 70 76 } 71 77 72 78 board_setup(); /* board specific setup */
+2 -24
arch/mips/alchemy/common/usb.c
··· 355 355 } 356 356 } 357 357 358 - static inline int au1200_coherency_bug(void) 359 - { 360 - #if defined(CONFIG_DMA_COHERENT) 361 - /* Au1200 AB USB does not support coherent memory */ 362 - if (!(read_c0_prid() & PRID_REV_MASK)) { 363 - printk(KERN_INFO "Au1200 USB: this is chip revision AB !!\n"); 364 - printk(KERN_INFO "Au1200 USB: update your board or re-configure" 365 - " the kernel\n"); 366 - return -ENODEV; 367 - } 368 - #endif 369 - return 0; 370 - } 371 - 372 358 static inline int au1200_usb_control(int block, int enable) 373 359 { 374 360 void __iomem *base = 375 361 (void __iomem *)KSEG1ADDR(AU1200_USB_CTL_PHYS_ADDR); 376 - int ret = 0; 377 362 378 363 switch (block) { 379 364 case ALCHEMY_USB_OHCI0: 380 - ret = au1200_coherency_bug(); 381 - if (ret && enable) 382 - goto out; 383 365 __au1200_ohci_control(base, enable); 384 366 break; 385 367 case ALCHEMY_USB_UDC0: 386 368 __au1200_udc_control(base, enable); 387 369 break; 388 370 case ALCHEMY_USB_EHCI0: 389 - ret = au1200_coherency_bug(); 390 - if (ret && enable) 391 - goto out; 392 371 __au1200_ehci_control(base, enable); 393 372 break; 394 373 default: 395 - ret = -ENODEV; 374 + return -ENODEV; 396 375 } 397 - out: 398 - return ret; 376 + return 0; 399 377 } 400 378 401 379
+2 -2
arch/mips/alchemy/devboards/pm.c
··· 158 158 int tmp; 159 159 160 160 if (ATTRCMP(timer_timeout)) { 161 - tmp = strict_strtoul(instr, 0, &l); 161 + tmp = kstrtoul(instr, 0, &l); 162 162 if (tmp) 163 163 return tmp; 164 164 ··· 181 181 } 182 182 183 183 } else if (ATTRCMP(wakemsk)) { 184 - tmp = strict_strtoul(instr, 0, &l); 184 + tmp = kstrtoul(instr, 0, &l); 185 185 if (tmp) 186 186 return tmp; 187 187
+11 -8
arch/mips/bcm47xx/prom.c
··· 69 69 * BCM47XX uses 128MB for addressing the ram, if the system contains 70 70 * less that that amount of ram it remaps the ram more often into the 71 71 * available space. 72 - * Accessing memory after 128MB will cause an exception. 73 - * max contains the biggest possible address supported by the platform. 74 - * If the method wants to try something above we assume 128MB ram. 75 72 */ 76 - off = (unsigned long)prom_init; 77 - max = off | ((128 << 20) - 1); 78 - for (mem = (1 << 20); mem < (128 << 20); mem += (1 << 20)) { 79 - if ((off + mem) > max) { 80 - mem = (128 << 20); 73 + 74 + /* Physical address, without mapping to any kernel segment */ 75 + off = CPHYSADDR((unsigned long)prom_init); 76 + 77 + /* Accessing memory after 128 MiB will cause an exception */ 78 + max = 128 << 20; 79 + 80 + for (mem = 1 << 20; mem < max; mem += 1 << 20) { 81 + /* Loop condition may be not enough, off may be over 1 MiB */ 82 + if (off + mem >= max) { 83 + mem = max; 81 84 printk(KERN_DEBUG "assume 128MB RAM\n"); 82 85 break; 83 86 }
+11 -12
arch/mips/cavium-octeon/Kconfig
··· 10 10 non-CN63XXP1 hardware, so it is recommended to select "n" 11 11 unless it is known the workarounds are needed. 12 12 13 + config CAVIUM_OCTEON_CVMSEG_SIZE 14 + int "Number of L1 cache lines reserved for CVMSEG memory" 15 + range 0 54 16 + default 1 17 + help 18 + CVMSEG LM is a segment that accesses portions of the dcache as a 19 + local memory; the larger CVMSEG is, the smaller the cache is. 20 + This selects the size of CVMSEG LM, which is in cache blocks. The 21 + legally range is from zero to 54 cache blocks (i.e. CVMSEG LM is 22 + between zero and 6192 bytes). 23 + 13 24 endif # CPU_CAVIUM_OCTEON 14 25 15 26 if CAVIUM_OCTEON_SOC ··· 33 22 address and use the 2nd uart for output. This allows a kernel built 34 23 with this option to be run at the same time as one built without this 35 24 option. 36 - 37 - config CAVIUM_OCTEON_CVMSEG_SIZE 38 - int "Number of L1 cache lines reserved for CVMSEG memory" 39 - range 0 54 40 - default 1 41 - help 42 - CVMSEG LM is a segment that accesses portions of the dcache as a 43 - local memory; the larger CVMSEG is, the smaller the cache is. 44 - This selects the size of CVMSEG LM, which is in cache blocks. The 45 - legally range is from zero to 54 cache blocks (i.e. CVMSEG LM is 46 - between zero and 6192 bytes). 47 25 48 26 config CAVIUM_OCTEON_LOCK_L2 49 27 bool "Lock often used kernel code in the L2" ··· 85 85 def_bool y 86 86 select IOMMU_HELPER 87 87 select NEED_SG_DMA_LENGTH 88 - 89 88 90 89 config OCTEON_ILM 91 90 tristate "Module to measure interrupt latency using Octeon CIU Timer"
+166
arch/mips/cavium-octeon/executive/cvmx-helper.c
··· 106 106 EXPORT_SYMBOL_GPL(cvmx_helper_ports_on_interface); 107 107 108 108 /** 109 + * @INTERNAL 110 + * Return interface mode for CN68xx. 111 + */ 112 + static cvmx_helper_interface_mode_t __cvmx_get_mode_cn68xx(int interface) 113 + { 114 + union cvmx_mio_qlmx_cfg qlm_cfg; 115 + switch (interface) { 116 + case 0: 117 + qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(0)); 118 + /* QLM is disabled when QLM SPD is 15. */ 119 + if (qlm_cfg.s.qlm_spd == 15) 120 + return CVMX_HELPER_INTERFACE_MODE_DISABLED; 121 + 122 + if (qlm_cfg.s.qlm_cfg == 2) 123 + return CVMX_HELPER_INTERFACE_MODE_SGMII; 124 + else if (qlm_cfg.s.qlm_cfg == 3) 125 + return CVMX_HELPER_INTERFACE_MODE_XAUI; 126 + else 127 + return CVMX_HELPER_INTERFACE_MODE_DISABLED; 128 + case 2: 129 + case 3: 130 + case 4: 131 + qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(interface)); 132 + /* QLM is disabled when QLM SPD is 15. */ 133 + if (qlm_cfg.s.qlm_spd == 15) 134 + return CVMX_HELPER_INTERFACE_MODE_DISABLED; 135 + 136 + if (qlm_cfg.s.qlm_cfg == 2) 137 + return CVMX_HELPER_INTERFACE_MODE_SGMII; 138 + else if (qlm_cfg.s.qlm_cfg == 3) 139 + return CVMX_HELPER_INTERFACE_MODE_XAUI; 140 + else 141 + return CVMX_HELPER_INTERFACE_MODE_DISABLED; 142 + case 7: 143 + qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(3)); 144 + /* QLM is disabled when QLM SPD is 15. */ 145 + if (qlm_cfg.s.qlm_spd == 15) { 146 + return CVMX_HELPER_INTERFACE_MODE_DISABLED; 147 + } else if (qlm_cfg.s.qlm_cfg != 0) { 148 + qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(1)); 149 + if (qlm_cfg.s.qlm_cfg != 0) 150 + return CVMX_HELPER_INTERFACE_MODE_DISABLED; 151 + } 152 + return CVMX_HELPER_INTERFACE_MODE_NPI; 153 + case 8: 154 + return CVMX_HELPER_INTERFACE_MODE_LOOP; 155 + default: 156 + return CVMX_HELPER_INTERFACE_MODE_DISABLED; 157 + } 158 + } 159 + 160 + /** 161 + * @INTERNAL 162 + * Return interface mode for an Octeon II 163 + */ 164 + static cvmx_helper_interface_mode_t __cvmx_get_mode_octeon2(int interface) 165 + { 166 + union cvmx_gmxx_inf_mode mode; 167 + 168 + if (OCTEON_IS_MODEL(OCTEON_CN68XX)) 169 + return __cvmx_get_mode_cn68xx(interface); 170 + 171 + if (interface == 2) 172 + return CVMX_HELPER_INTERFACE_MODE_NPI; 173 + 174 + if (interface == 3) 175 + return CVMX_HELPER_INTERFACE_MODE_LOOP; 176 + 177 + /* Only present in CN63XX & CN66XX Octeon model */ 178 + if ((OCTEON_IS_MODEL(OCTEON_CN63XX) && 179 + (interface == 4 || interface == 5)) || 180 + (OCTEON_IS_MODEL(OCTEON_CN66XX) && 181 + interface >= 4 && interface <= 7)) { 182 + return CVMX_HELPER_INTERFACE_MODE_DISABLED; 183 + } 184 + 185 + if (OCTEON_IS_MODEL(OCTEON_CN66XX)) { 186 + union cvmx_mio_qlmx_cfg mio_qlm_cfg; 187 + 188 + /* QLM2 is SGMII0 and QLM1 is SGMII1 */ 189 + if (interface == 0) 190 + mio_qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(2)); 191 + else if (interface == 1) 192 + mio_qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(1)); 193 + else 194 + return CVMX_HELPER_INTERFACE_MODE_DISABLED; 195 + 196 + if (mio_qlm_cfg.s.qlm_spd == 15) 197 + return CVMX_HELPER_INTERFACE_MODE_DISABLED; 198 + 199 + if (mio_qlm_cfg.s.qlm_cfg == 9) 200 + return CVMX_HELPER_INTERFACE_MODE_SGMII; 201 + else if (mio_qlm_cfg.s.qlm_cfg == 11) 202 + return CVMX_HELPER_INTERFACE_MODE_XAUI; 203 + else 204 + return CVMX_HELPER_INTERFACE_MODE_DISABLED; 205 + } else if (OCTEON_IS_MODEL(OCTEON_CN61XX)) { 206 + union cvmx_mio_qlmx_cfg qlm_cfg; 207 + 208 + if (interface == 0) { 209 + qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(2)); 210 + if (qlm_cfg.s.qlm_cfg == 2) 211 + return CVMX_HELPER_INTERFACE_MODE_SGMII; 212 + else if (qlm_cfg.s.qlm_cfg == 3) 213 + return CVMX_HELPER_INTERFACE_MODE_XAUI; 214 + else 215 + return CVMX_HELPER_INTERFACE_MODE_DISABLED; 216 + } else if (interface == 1) { 217 + qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(0)); 218 + if (qlm_cfg.s.qlm_cfg == 2) 219 + return CVMX_HELPER_INTERFACE_MODE_SGMII; 220 + else if (qlm_cfg.s.qlm_cfg == 3) 221 + return CVMX_HELPER_INTERFACE_MODE_XAUI; 222 + else 223 + return CVMX_HELPER_INTERFACE_MODE_DISABLED; 224 + } 225 + } else if (OCTEON_IS_MODEL(OCTEON_CNF71XX)) { 226 + if (interface == 0) { 227 + union cvmx_mio_qlmx_cfg qlm_cfg; 228 + qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(0)); 229 + if (qlm_cfg.s.qlm_cfg == 2) 230 + return CVMX_HELPER_INTERFACE_MODE_SGMII; 231 + } 232 + return CVMX_HELPER_INTERFACE_MODE_DISABLED; 233 + } 234 + 235 + if (interface == 1 && OCTEON_IS_MODEL(OCTEON_CN63XX)) 236 + return CVMX_HELPER_INTERFACE_MODE_DISABLED; 237 + 238 + mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface)); 239 + 240 + if (OCTEON_IS_MODEL(OCTEON_CN63XX)) { 241 + switch (mode.cn63xx.mode) { 242 + case 0: 243 + return CVMX_HELPER_INTERFACE_MODE_SGMII; 244 + case 1: 245 + return CVMX_HELPER_INTERFACE_MODE_XAUI; 246 + default: 247 + return CVMX_HELPER_INTERFACE_MODE_DISABLED; 248 + } 249 + } else { 250 + if (!mode.s.en) 251 + return CVMX_HELPER_INTERFACE_MODE_DISABLED; 252 + 253 + if (mode.s.type) 254 + return CVMX_HELPER_INTERFACE_MODE_GMII; 255 + else 256 + return CVMX_HELPER_INTERFACE_MODE_RGMII; 257 + } 258 + } 259 + 260 + /** 109 261 * Get the operating mode of an interface. Depending on the Octeon 110 262 * chip and configuration, this function returns an enumeration 111 263 * of the type of packet I/O supported by an interface. ··· 270 118 cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int interface) 271 119 { 272 120 union cvmx_gmxx_inf_mode mode; 121 + 122 + if (interface < 0 || 123 + interface >= cvmx_helper_get_number_of_interfaces()) 124 + return CVMX_HELPER_INTERFACE_MODE_DISABLED; 125 + 126 + /* 127 + * Octeon II models 128 + */ 129 + if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)) 130 + return __cvmx_get_mode_octeon2(interface); 131 + 132 + /* 133 + * Octeon and Octeon Plus models 134 + */ 273 135 if (interface == 2) 274 136 return CVMX_HELPER_INTERFACE_MODE_NPI; 275 137
+2
arch/mips/cavium-octeon/octeon-irq.c
··· 1260 1260 for (i = 0; i < 4; i++) 1261 1261 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40); 1262 1262 1263 + octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45); 1263 1264 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46); 1264 1265 for (i = 0; i < 4; i++) 1265 1266 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52); 1266 1267 1267 1268 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56); 1269 + octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59); 1268 1270 1269 1271 /* CIU_1 */ 1270 1272 for (i = 0; i < 16; i++)
-17
arch/mips/cavium-octeon/setup.c
··· 729 729 octeon_write_lcd("Linux"); 730 730 #endif 731 731 732 - #ifdef CONFIG_CAVIUM_GDB 733 - /* 734 - * When debugging the linux kernel, force the cores to enter 735 - * the debug exception handler to break in. 736 - */ 737 - if (octeon_get_boot_debug_flag()) { 738 - cvmx_write_csr(CVMX_CIU_DINT, 1 << cvmx_get_core_num()); 739 - cvmx_read_csr(CVMX_CIU_DINT); 740 - } 741 - #endif 742 - 743 732 octeon_setup_delays(); 744 733 745 734 /* ··· 768 779 MAX_MEMORY = 32ull << 30; 769 780 if (*p == '@') 770 781 RESERVE_LOW_MEM = memparse(p + 1, &p); 771 - } else if (strcmp(arg, "ecc_verbose") == 0) { 772 - #ifdef CONFIG_CAVIUM_REPORT_SINGLE_BIT_ECC 773 - __cvmx_interrupt_ecc_report_single_bit_errors = 1; 774 - pr_notice("Reporting of single bit ECC errors is " 775 - "turned on\n"); 776 - #endif 777 782 #ifdef CONFIG_KEXEC 778 783 } else if (strncmp(arg, "crashkernel=", 12) == 0) { 779 784 crashk_size = memparse(arg+12, &p);
-25
arch/mips/cavium-octeon/smp.c
··· 218 218 */ 219 219 static void octeon_smp_finish(void) 220 220 { 221 - #ifdef CONFIG_CAVIUM_GDB 222 - unsigned long tmp; 223 - /* Pulse MCD0 signal on Ctrl-C to stop all the cores. Also set the MCD0 224 - to be not masked by this core so we know the signal is received by 225 - someone */ 226 - asm volatile ("dmfc0 %0, $22\n" 227 - "ori %0, %0, 0x9100\n" "dmtc0 %0, $22\n" : "=r" (tmp)); 228 - #endif 229 - 230 221 octeon_user_io_init(); 231 222 232 223 /* to generate the first CPU timer interrupt */ 233 224 write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ); 234 225 local_irq_enable(); 235 - } 236 - 237 - /** 238 - * Hook for after all CPUs are online 239 - */ 240 - static void octeon_cpus_done(void) 241 - { 242 - #ifdef CONFIG_CAVIUM_GDB 243 - unsigned long tmp; 244 - /* Pulse MCD0 signal on Ctrl-C to stop all the cores. Also set the MCD0 245 - to be not masked by this core so we know the signal is received by 246 - someone */ 247 - asm volatile ("dmfc0 %0, $22\n" 248 - "ori %0, %0, 0x9100\n" "dmtc0 %0, $22\n" : "=r" (tmp)); 249 - #endif 250 226 } 251 227 252 228 #ifdef CONFIG_HOTPLUG_CPU ··· 381 405 .send_ipi_mask = octeon_send_ipi_mask, 382 406 .init_secondary = octeon_init_secondary, 383 407 .smp_finish = octeon_smp_finish, 384 - .cpus_done = octeon_cpus_done, 385 408 .boot_secondary = octeon_boot_secondary, 386 409 .smp_setup = octeon_smp_setup, 387 410 .prepare_cpus = octeon_prepare_cpus,
+1 -2
arch/mips/configs/ath79_defconfig
··· 46 46 CONFIG_MTD_REDBOOT_PARTS=y 47 47 CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-2 48 48 CONFIG_MTD_CMDLINE_PARTS=y 49 - CONFIG_MTD_CHAR=y 50 49 CONFIG_MTD_BLOCK=y 51 50 CONFIG_MTD_CFI=y 52 51 CONFIG_MTD_JEDECPROBE=y ··· 53 54 CONFIG_MTD_COMPLEX_MAPPINGS=y 54 55 CONFIG_MTD_PHYSMAP=y 55 56 CONFIG_MTD_M25P80=y 56 - # CONFIG_M25PXX_USE_FAST_READ is not set 57 + CONFIG_MTD_SPI_NOR=y 57 58 CONFIG_NETDEVICES=y 58 59 # CONFIG_NET_PACKET_ENGINE is not set 59 60 CONFIG_ATH_COMMON=m
+1
arch/mips/configs/db1xxx_defconfig
··· 113 113 CONFIG_MTD_NAND_ECC_BCH=y 114 114 CONFIG_MTD_NAND_AU1550=y 115 115 CONFIG_MTD_NAND_PLATFORM=y 116 + CONFIG_MTD_SPI_NOR=y 116 117 CONFIG_EEPROM_AT24=y 117 118 CONFIG_EEPROM_AT25=y 118 119 CONFIG_SCSI_TGT=y
-196
arch/mips/configs/maltasmtc_defconfig
··· 1 - CONFIG_MIPS_MALTA=y 2 - CONFIG_CPU_LITTLE_ENDIAN=y 3 - CONFIG_CPU_MIPS32_R2=y 4 - CONFIG_PAGE_SIZE_16KB=y 5 - CONFIG_MIPS_MT_SMTC=y 6 - # CONFIG_MIPS_MT_FPAFF is not set 7 - CONFIG_NR_CPUS=9 8 - CONFIG_HZ_48=y 9 - CONFIG_LOCALVERSION="smtc" 10 - CONFIG_SYSVIPC=y 11 - CONFIG_POSIX_MQUEUE=y 12 - CONFIG_AUDIT=y 13 - CONFIG_IKCONFIG=y 14 - CONFIG_IKCONFIG_PROC=y 15 - CONFIG_LOG_BUF_SHIFT=15 16 - CONFIG_SYSCTL_SYSCALL=y 17 - CONFIG_EMBEDDED=y 18 - CONFIG_SLAB=y 19 - CONFIG_MODULES=y 20 - CONFIG_MODULE_UNLOAD=y 21 - CONFIG_MODVERSIONS=y 22 - CONFIG_MODULE_SRCVERSION_ALL=y 23 - # CONFIG_BLK_DEV_BSG is not set 24 - CONFIG_PCI=y 25 - # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 26 - CONFIG_NET=y 27 - CONFIG_PACKET=y 28 - CONFIG_UNIX=y 29 - CONFIG_XFRM_USER=m 30 - CONFIG_NET_KEY=y 31 - CONFIG_INET=y 32 - CONFIG_IP_MULTICAST=y 33 - CONFIG_IP_ADVANCED_ROUTER=y 34 - CONFIG_IP_MULTIPLE_TABLES=y 35 - CONFIG_IP_ROUTE_MULTIPATH=y 36 - CONFIG_IP_ROUTE_VERBOSE=y 37 - CONFIG_IP_PNP=y 38 - CONFIG_IP_PNP_DHCP=y 39 - CONFIG_IP_PNP_BOOTP=y 40 - CONFIG_NET_IPIP=m 41 - CONFIG_IP_MROUTE=y 42 - CONFIG_IP_PIMSM_V1=y 43 - CONFIG_IP_PIMSM_V2=y 44 - CONFIG_SYN_COOKIES=y 45 - CONFIG_INET_AH=m 46 - CONFIG_INET_ESP=m 47 - CONFIG_INET_IPCOMP=m 48 - # CONFIG_INET_LRO is not set 49 - CONFIG_INET6_AH=m 50 - CONFIG_INET6_ESP=m 51 - CONFIG_INET6_IPCOMP=m 52 - CONFIG_IPV6_TUNNEL=m 53 - CONFIG_BRIDGE=m 54 - CONFIG_VLAN_8021Q=m 55 - CONFIG_ATALK=m 56 - CONFIG_DEV_APPLETALK=m 57 - CONFIG_IPDDP=m 58 - CONFIG_IPDDP_ENCAP=y 59 - CONFIG_NET_SCHED=y 60 - CONFIG_NET_SCH_CBQ=m 61 - CONFIG_NET_SCH_HTB=m 62 - CONFIG_NET_SCH_HFSC=m 63 - CONFIG_NET_SCH_PRIO=m 64 - CONFIG_NET_SCH_RED=m 65 - CONFIG_NET_SCH_SFQ=m 66 - CONFIG_NET_SCH_TEQL=m 67 - CONFIG_NET_SCH_TBF=m 68 - CONFIG_NET_SCH_GRED=m 69 - CONFIG_NET_SCH_DSMARK=m 70 - CONFIG_NET_SCH_NETEM=m 71 - CONFIG_NET_SCH_INGRESS=m 72 - CONFIG_NET_CLS_BASIC=m 73 - CONFIG_NET_CLS_TCINDEX=m 74 - CONFIG_NET_CLS_ROUTE4=m 75 - CONFIG_NET_CLS_FW=m 76 - CONFIG_NET_CLS_U32=m 77 - CONFIG_NET_CLS_RSVP=m 78 - CONFIG_NET_CLS_RSVP6=m 79 - CONFIG_NET_CLS_ACT=y 80 - CONFIG_NET_ACT_POLICE=y 81 - CONFIG_NET_CLS_IND=y 82 - # CONFIG_WIRELESS is not set 83 - CONFIG_DEVTMPFS=y 84 - CONFIG_BLK_DEV_LOOP=y 85 - CONFIG_BLK_DEV_CRYPTOLOOP=m 86 - CONFIG_IDE=y 87 - # CONFIG_IDE_PROC_FS is not set 88 - # CONFIG_IDEPCI_PCIBUS_ORDER is not set 89 - CONFIG_BLK_DEV_GENERIC=y 90 - CONFIG_BLK_DEV_PIIX=y 91 - CONFIG_SCSI=y 92 - CONFIG_BLK_DEV_SD=y 93 - CONFIG_CHR_DEV_SG=y 94 - # CONFIG_SCSI_LOWLEVEL is not set 95 - CONFIG_NETDEVICES=y 96 - # CONFIG_NET_VENDOR_3COM is not set 97 - # CONFIG_NET_VENDOR_ADAPTEC is not set 98 - # CONFIG_NET_VENDOR_ALTEON is not set 99 - CONFIG_PCNET32=y 100 - # CONFIG_NET_VENDOR_ATHEROS is not set 101 - # CONFIG_NET_VENDOR_BROADCOM is not set 102 - # CONFIG_NET_VENDOR_BROCADE is not set 103 - # CONFIG_NET_VENDOR_CHELSIO is not set 104 - # CONFIG_NET_VENDOR_CISCO is not set 105 - # CONFIG_NET_VENDOR_DEC is not set 106 - # CONFIG_NET_VENDOR_DLINK is not set 107 - # CONFIG_NET_VENDOR_EMULEX is not set 108 - # CONFIG_NET_VENDOR_EXAR is not set 109 - # CONFIG_NET_VENDOR_HP is not set 110 - # CONFIG_NET_VENDOR_INTEL is not set 111 - # CONFIG_NET_VENDOR_MARVELL is not set 112 - # CONFIG_NET_VENDOR_MELLANOX is not set 113 - # CONFIG_NET_VENDOR_MICREL is not set 114 - # CONFIG_NET_VENDOR_MYRI is not set 115 - # CONFIG_NET_VENDOR_NATSEMI is not set 116 - # CONFIG_NET_VENDOR_NVIDIA is not set 117 - # CONFIG_NET_VENDOR_OKI is not set 118 - # CONFIG_NET_PACKET_ENGINE is not set 119 - # CONFIG_NET_VENDOR_QLOGIC is not set 120 - # CONFIG_NET_VENDOR_REALTEK is not set 121 - # CONFIG_NET_VENDOR_RDC is not set 122 - # CONFIG_NET_VENDOR_SEEQ is not set 123 - # CONFIG_NET_VENDOR_SILAN is not set 124 - # CONFIG_NET_VENDOR_SIS is not set 125 - # CONFIG_NET_VENDOR_SMSC is not set 126 - # CONFIG_NET_VENDOR_STMICRO is not set 127 - # CONFIG_NET_VENDOR_SUN is not set 128 - # CONFIG_NET_VENDOR_TEHUTI is not set 129 - # CONFIG_NET_VENDOR_TI is not set 130 - # CONFIG_NET_VENDOR_TOSHIBA is not set 131 - # CONFIG_NET_VENDOR_VIA is not set 132 - # CONFIG_WLAN is not set 133 - # CONFIG_VT is not set 134 - CONFIG_LEGACY_PTY_COUNT=16 135 - CONFIG_SERIAL_8250=y 136 - CONFIG_SERIAL_8250_CONSOLE=y 137 - CONFIG_HW_RANDOM=y 138 - # CONFIG_HWMON is not set 139 - CONFIG_VIDEO_OUTPUT_CONTROL=m 140 - CONFIG_FB=y 141 - CONFIG_FIRMWARE_EDID=y 142 - CONFIG_FB_MATROX=y 143 - CONFIG_FB_MATROX_G=y 144 - CONFIG_USB=y 145 - CONFIG_USB_EHCI_HCD=y 146 - # CONFIG_USB_EHCI_TT_NEWSCHED is not set 147 - CONFIG_USB_UHCI_HCD=y 148 - CONFIG_USB_STORAGE=y 149 - CONFIG_NEW_LEDS=y 150 - CONFIG_LEDS_CLASS=y 151 - CONFIG_LEDS_TRIGGERS=y 152 - CONFIG_LEDS_TRIGGER_TIMER=y 153 - CONFIG_LEDS_TRIGGER_IDE_DISK=y 154 - CONFIG_LEDS_TRIGGER_HEARTBEAT=y 155 - CONFIG_LEDS_TRIGGER_BACKLIGHT=y 156 - CONFIG_LEDS_TRIGGER_DEFAULT_ON=y 157 - CONFIG_RTC_CLASS=y 158 - CONFIG_RTC_DRV_CMOS=y 159 - CONFIG_EXT2_FS=y 160 - CONFIG_EXT3_FS=y 161 - # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 162 - CONFIG_XFS_FS=y 163 - CONFIG_XFS_QUOTA=y 164 - CONFIG_XFS_POSIX_ACL=y 165 - CONFIG_QUOTA=y 166 - CONFIG_QFMT_V2=y 167 - CONFIG_MSDOS_FS=m 168 - CONFIG_VFAT_FS=m 169 - CONFIG_PROC_KCORE=y 170 - CONFIG_TMPFS=y 171 - CONFIG_NFS_FS=y 172 - CONFIG_ROOT_NFS=y 173 - CONFIG_CIFS=m 174 - CONFIG_CIFS_WEAK_PW_HASH=y 175 - CONFIG_CIFS_XATTR=y 176 - CONFIG_CIFS_POSIX=y 177 - CONFIG_NLS_CODEPAGE_437=m 178 - CONFIG_NLS_ISO8859_1=m 179 - # CONFIG_FTRACE is not set 180 - CONFIG_CRYPTO_NULL=m 181 - CONFIG_CRYPTO_PCBC=m 182 - CONFIG_CRYPTO_HMAC=y 183 - CONFIG_CRYPTO_MICHAEL_MIC=m 184 - CONFIG_CRYPTO_SHA512=m 185 - CONFIG_CRYPTO_TGR192=m 186 - CONFIG_CRYPTO_WP512=m 187 - CONFIG_CRYPTO_ANUBIS=m 188 - CONFIG_CRYPTO_BLOWFISH=m 189 - CONFIG_CRYPTO_CAST5=m 190 - CONFIG_CRYPTO_CAST6=m 191 - CONFIG_CRYPTO_KHAZAD=m 192 - CONFIG_CRYPTO_SERPENT=m 193 - CONFIG_CRYPTO_TEA=m 194 - CONFIG_CRYPTO_TWOFISH=m 195 - # CONFIG_CRYPTO_ANSI_CPRNG is not set 196 - # CONFIG_CRYPTO_HW is not set
+1 -2
arch/mips/configs/maltasmvp_defconfig
··· 4 4 CONFIG_PAGE_SIZE_16KB=y 5 5 CONFIG_MIPS_MT_SMP=y 6 6 CONFIG_SCHED_SMT=y 7 - CONFIG_MIPS_CMP=y 7 + CONFIG_MIPS_CPS=y 8 8 CONFIG_NR_CPUS=8 9 9 CONFIG_HZ_100=y 10 - CONFIG_LOCALVERSION="cmp" 11 10 CONFIG_SYSVIPC=y 12 11 CONFIG_POSIX_MQUEUE=y 13 12 CONFIG_AUDIT=y
+1 -2
arch/mips/configs/maltasmvp_eva_defconfig
··· 5 5 CONFIG_PAGE_SIZE_16KB=y 6 6 CONFIG_MIPS_MT_SMP=y 7 7 CONFIG_SCHED_SMT=y 8 - CONFIG_MIPS_CMP=y 8 + CONFIG_MIPS_CPS=y 9 9 CONFIG_NR_CPUS=8 10 10 CONFIG_HZ_100=y 11 - CONFIG_LOCALVERSION="cmp" 12 11 CONFIG_SYSVIPC=y 13 12 CONFIG_POSIX_MQUEUE=y 14 13 CONFIG_AUDIT=y
+103
arch/mips/configs/mips_paravirt_defconfig
··· 1 + CONFIG_MIPS_PARAVIRT=y 2 + CONFIG_CPU_MIPS64_R2=y 3 + CONFIG_64BIT=y 4 + CONFIG_TRANSPARENT_HUGEPAGE=y 5 + CONFIG_SMP=y 6 + CONFIG_HZ_1000=y 7 + CONFIG_PREEMPT=y 8 + CONFIG_SYSVIPC=y 9 + CONFIG_BSD_PROCESS_ACCT=y 10 + CONFIG_BSD_PROCESS_ACCT_V3=y 11 + CONFIG_IKCONFIG=y 12 + CONFIG_IKCONFIG_PROC=y 13 + CONFIG_LOG_BUF_SHIFT=14 14 + CONFIG_RELAY=y 15 + CONFIG_BLK_DEV_INITRD=y 16 + CONFIG_EXPERT=y 17 + CONFIG_SLAB=y 18 + CONFIG_MODULES=y 19 + CONFIG_MODULE_UNLOAD=y 20 + # CONFIG_BLK_DEV_BSG is not set 21 + CONFIG_PCI=y 22 + CONFIG_MIPS32_COMPAT=y 23 + CONFIG_MIPS32_O32=y 24 + CONFIG_MIPS32_N32=y 25 + CONFIG_NET=y 26 + CONFIG_PACKET=y 27 + CONFIG_UNIX=y 28 + CONFIG_INET=y 29 + CONFIG_IP_MULTICAST=y 30 + CONFIG_IP_ADVANCED_ROUTER=y 31 + CONFIG_IP_MULTIPLE_TABLES=y 32 + CONFIG_IP_ROUTE_MULTIPATH=y 33 + CONFIG_IP_ROUTE_VERBOSE=y 34 + CONFIG_IP_PNP=y 35 + CONFIG_IP_PNP_DHCP=y 36 + CONFIG_IP_PNP_BOOTP=y 37 + CONFIG_IP_PNP_RARP=y 38 + CONFIG_IP_MROUTE=y 39 + CONFIG_IP_PIMSM_V1=y 40 + CONFIG_IP_PIMSM_V2=y 41 + CONFIG_SYN_COOKIES=y 42 + # CONFIG_INET_LRO is not set 43 + CONFIG_IPV6=y 44 + # CONFIG_WIRELESS is not set 45 + CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 46 + # CONFIG_FW_LOADER is not set 47 + CONFIG_BLK_DEV_LOOP=y 48 + CONFIG_VIRTIO_BLK=y 49 + CONFIG_SCSI=y 50 + CONFIG_BLK_DEV_SD=y 51 + CONFIG_NETDEVICES=y 52 + CONFIG_VIRTIO_NET=y 53 + # CONFIG_NET_VENDOR_BROADCOM is not set 54 + # CONFIG_NET_VENDOR_INTEL is not set 55 + # CONFIG_NET_VENDOR_MARVELL is not set 56 + # CONFIG_NET_VENDOR_MICREL is not set 57 + # CONFIG_NET_VENDOR_NATSEMI is not set 58 + # CONFIG_NET_VENDOR_SMSC is not set 59 + # CONFIG_NET_VENDOR_STMICRO is not set 60 + # CONFIG_NET_VENDOR_WIZNET is not set 61 + CONFIG_PHYLIB=y 62 + CONFIG_MARVELL_PHY=y 63 + CONFIG_BROADCOM_PHY=y 64 + CONFIG_BCM87XX_PHY=y 65 + # CONFIG_WLAN is not set 66 + # CONFIG_INPUT is not set 67 + # CONFIG_SERIO is not set 68 + # CONFIG_VT is not set 69 + CONFIG_VIRTIO_CONSOLE=y 70 + # CONFIG_HW_RANDOM is not set 71 + # CONFIG_HWMON is not set 72 + # CONFIG_USB_SUPPORT is not set 73 + CONFIG_VIRTIO_PCI=y 74 + CONFIG_VIRTIO_BALLOON=y 75 + CONFIG_VIRTIO_MMIO=y 76 + # CONFIG_IOMMU_SUPPORT is not set 77 + CONFIG_EXT4_FS=y 78 + CONFIG_EXT4_FS_POSIX_ACL=y 79 + CONFIG_EXT4_FS_SECURITY=y 80 + CONFIG_MSDOS_FS=y 81 + CONFIG_VFAT_FS=y 82 + CONFIG_PROC_KCORE=y 83 + CONFIG_TMPFS=y 84 + CONFIG_HUGETLBFS=y 85 + # CONFIG_MISC_FILESYSTEMS is not set 86 + CONFIG_NFS_FS=y 87 + CONFIG_NFS_V4=y 88 + CONFIG_NFS_V4_1=y 89 + CONFIG_ROOT_NFS=y 90 + CONFIG_NLS_CODEPAGE_437=y 91 + CONFIG_NLS_ASCII=y 92 + CONFIG_NLS_ISO8859_1=y 93 + CONFIG_NLS_UTF8=y 94 + CONFIG_DEBUG_INFO=y 95 + CONFIG_DEBUG_FS=y 96 + CONFIG_MAGIC_SYSRQ=y 97 + # CONFIG_SCHED_DEBUG is not set 98 + # CONFIG_FTRACE is not set 99 + CONFIG_CRYPTO_CBC=y 100 + CONFIG_CRYPTO_HMAC=y 101 + CONFIG_CRYPTO_MD5=y 102 + CONFIG_CRYPTO_DES=y 103 + # CONFIG_CRYPTO_ANSI_CPRNG is not set
+1 -1
arch/mips/configs/rt305x_defconfig
··· 81 81 # CONFIG_FIRMWARE_IN_KERNEL is not set 82 82 CONFIG_MTD=y 83 83 CONFIG_MTD_CMDLINE_PARTS=y 84 - CONFIG_MTD_CHAR=y 85 84 CONFIG_MTD_BLOCK=y 86 85 CONFIG_MTD_CFI=y 87 86 CONFIG_MTD_CFI_AMDSTD=y ··· 88 89 CONFIG_MTD_PHYSMAP=y 89 90 CONFIG_MTD_PHYSMAP_OF=y 90 91 CONFIG_MTD_M25P80=y 92 + CONFIG_MTD_SPI_NOR=y 91 93 CONFIG_EEPROM_93CX6=m 92 94 CONFIG_SCSI=y 93 95 CONFIG_BLK_DEV_SD=y
+5
arch/mips/dec/setup.c
··· 23 23 #include <asm/bootinfo.h> 24 24 #include <asm/cpu.h> 25 25 #include <asm/cpu-features.h> 26 + #include <asm/cpu-type.h> 26 27 #include <asm/irq.h> 27 28 #include <asm/irq_cpu.h> 28 29 #include <asm/mipsregs.h> ··· 749 748 cpu_fpu_mask = 0; 750 749 dec_interrupt[DEC_IRQ_FPU] = -1; 751 750 } 751 + /* Free the halt interrupt unused on R4k systems. */ 752 + if (current_cpu_type() == CPU_R4000SC || 753 + current_cpu_type() == CPU_R4400SC) 754 + dec_interrupt[DEC_IRQ_HALT] = -1; 752 755 753 756 /* Register board interrupts: FPU and cascade. */ 754 757 if (dec_interrupt[DEC_IRQ_FPU] >= 0)
+34 -28
arch/mips/include/asm/asmmacro.h
··· 17 17 #ifdef CONFIG_64BIT 18 18 #include <asm/asmmacro-64.h> 19 19 #endif 20 - #ifdef CONFIG_MIPS_MT_SMTC 21 - #include <asm/mipsmtregs.h> 22 - #endif 23 20 24 - #ifdef CONFIG_MIPS_MT_SMTC 25 - .macro local_irq_enable reg=t0 26 - mfc0 \reg, CP0_TCSTATUS 27 - ori \reg, \reg, TCSTATUS_IXMT 28 - xori \reg, \reg, TCSTATUS_IXMT 29 - mtc0 \reg, CP0_TCSTATUS 30 - _ehb 31 - .endm 32 - 33 - .macro local_irq_disable reg=t0 34 - mfc0 \reg, CP0_TCSTATUS 35 - ori \reg, \reg, TCSTATUS_IXMT 36 - mtc0 \reg, CP0_TCSTATUS 37 - _ehb 38 - .endm 39 - #elif defined(CONFIG_CPU_MIPSR2) 21 + #ifdef CONFIG_CPU_MIPSR2 40 22 .macro local_irq_enable reg=t0 41 23 ei 42 24 irq_enable_hazard ··· 53 71 sw \reg, TI_PRE_COUNT($28) 54 72 #endif 55 73 .endm 56 - #endif /* CONFIG_MIPS_MT_SMTC */ 74 + #endif /* CONFIG_CPU_MIPSR2 */ 57 75 58 76 .macro fpu_save_16even thread tmp=t0 59 77 cfc1 \tmp, fcr31 ··· 249 267 .set pop 250 268 .endm 251 269 #else 270 + 271 + #ifdef CONFIG_CPU_MICROMIPS 272 + #define CFC_MSA_INSN 0x587e0056 273 + #define CTC_MSA_INSN 0x583e0816 274 + #define LDD_MSA_INSN 0x58000837 275 + #define STD_MSA_INSN 0x5800083f 276 + #define COPY_UW_MSA_INSN 0x58f00056 277 + #define COPY_UD_MSA_INSN 0x58f80056 278 + #define INSERT_W_MSA_INSN 0x59300816 279 + #define INSERT_D_MSA_INSN 0x59380816 280 + #else 281 + #define CFC_MSA_INSN 0x787e0059 282 + #define CTC_MSA_INSN 0x783e0819 283 + #define LDD_MSA_INSN 0x78000823 284 + #define STD_MSA_INSN 0x78000827 285 + #define COPY_UW_MSA_INSN 0x78f00059 286 + #define COPY_UD_MSA_INSN 0x78f80059 287 + #define INSERT_W_MSA_INSN 0x79300819 288 + #define INSERT_D_MSA_INSN 0x79380819 289 + #endif 290 + 252 291 /* 253 292 * Temporary until all toolchains in use include MSA support. 254 293 */ 255 294 .macro cfcmsa rd, cs 256 295 .set push 257 296 .set noat 258 - .word 0x787e0059 | (\cs << 11) 297 + .insn 298 + .word CFC_MSA_INSN | (\cs << 11) 259 299 move \rd, $1 260 300 .set pop 261 301 .endm ··· 286 282 .set push 287 283 .set noat 288 284 move $1, \rs 289 - .word 0x783e0819 | (\cd << 6) 285 + .word CTC_MSA_INSN | (\cd << 6) 290 286 .set pop 291 287 .endm 292 288 ··· 294 290 .set push 295 291 .set noat 296 292 add $1, \base, \off 297 - .word 0x78000823 | (\wd << 6) 293 + .word LDD_MSA_INSN | (\wd << 6) 298 294 .set pop 299 295 .endm 300 296 ··· 302 298 .set push 303 299 .set noat 304 300 add $1, \base, \off 305 - .word 0x78000827 | (\wd << 6) 301 + .word STD_MSA_INSN | (\wd << 6) 306 302 .set pop 307 303 .endm 308 304 309 305 .macro copy_u_w rd, ws, n 310 306 .set push 311 307 .set noat 312 - .word 0x78f00059 | (\n << 16) | (\ws << 11) 308 + .insn 309 + .word COPY_UW_MSA_INSN | (\n << 16) | (\ws << 11) 313 310 /* move triggers an assembler bug... */ 314 311 or \rd, $1, zero 315 312 .set pop ··· 319 314 .macro copy_u_d rd, ws, n 320 315 .set push 321 316 .set noat 322 - .word 0x78f80059 | (\n << 16) | (\ws << 11) 317 + .insn 318 + .word COPY_UD_MSA_INSN | (\n << 16) | (\ws << 11) 323 319 /* move triggers an assembler bug... */ 324 320 or \rd, $1, zero 325 321 .set pop ··· 331 325 .set noat 332 326 /* move triggers an assembler bug... */ 333 327 or $1, \rs, zero 334 - .word 0x79300819 | (\n << 16) | (\wd << 6) 328 + .word INSERT_W_MSA_INSN | (\n << 16) | (\wd << 6) 335 329 .set pop 336 330 .endm 337 331 ··· 340 334 .set noat 341 335 /* move triggers an assembler bug... */ 342 336 or $1, \rs, zero 343 - .word 0x79380819 | (\n << 16) | (\wd << 6) 337 + .word INSERT_D_MSA_INSN | (\n << 16) | (\wd << 6) 344 338 .set pop 345 339 .endm 346 340 #endif
+30
arch/mips/include/asm/branch.h
··· 8 8 #ifndef _ASM_BRANCH_H 9 9 #define _ASM_BRANCH_H 10 10 11 + #include <asm/cpu-features.h> 12 + #include <asm/mipsregs.h> 11 13 #include <asm/ptrace.h> 12 14 #include <asm/inst.h> 13 15 ··· 20 18 extern int __microMIPS_compute_return_epc(struct pt_regs *regs); 21 19 extern int __MIPS16e_compute_return_epc(struct pt_regs *regs); 22 20 21 + /* 22 + * microMIPS bitfields 23 + */ 24 + #define MM_POOL32A_MINOR_MASK 0x3f 25 + #define MM_POOL32A_MINOR_SHIFT 0x6 26 + #define MM_MIPS32_COND_FC 0x30 27 + 28 + extern int __mm_isBranchInstr(struct pt_regs *regs, 29 + struct mm_decoded_insn dec_insn, unsigned long *contpc); 30 + 31 + static inline int mm_isBranchInstr(struct pt_regs *regs, 32 + struct mm_decoded_insn dec_insn, unsigned long *contpc) 33 + { 34 + if (!cpu_has_mmips) 35 + return 0; 36 + 37 + return __mm_isBranchInstr(regs, dec_insn, contpc); 38 + } 23 39 24 40 static inline int delay_slot(struct pt_regs *regs) 25 41 { 26 42 return regs->cp0_cause & CAUSEF_BD; 43 + } 44 + 45 + static inline void clear_delay_slot(struct pt_regs *regs) 46 + { 47 + regs->cp0_cause &= ~CAUSEF_BD; 48 + } 49 + 50 + static inline void set_delay_slot(struct pt_regs *regs) 51 + { 52 + regs->cp0_cause |= CAUSEF_BD; 27 53 } 28 54 29 55 static inline unsigned long exception_epc(struct pt_regs *regs)
+6
arch/mips/include/asm/cacheflush.h
··· 113 113 114 114 extern void *kmap_coherent(struct page *page, unsigned long addr); 115 115 extern void kunmap_coherent(void); 116 + extern void *kmap_noncoherent(struct page *page, unsigned long addr); 117 + 118 + static inline void kunmap_noncoherent(void) 119 + { 120 + kunmap_coherent(); 121 + } 116 122 117 123 #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE 118 124 static inline void flush_kernel_dcache_page(struct page *page)
-1
arch/mips/include/asm/cmp.h
··· 10 10 extern void cmp_smp_finish(void); 11 11 extern void cmp_boot_secondary(int cpu, struct task_struct *t); 12 12 extern void cmp_init_secondary(void); 13 - extern void cmp_cpus_done(void); 14 13 extern void cmp_prepare_cpus(unsigned int max_cpus); 15 14 16 15 /* This is platform specific */
+19 -1
arch/mips/include/asm/cpu-features.h
··· 110 110 #ifndef cpu_has_smartmips 111 111 #define cpu_has_smartmips (cpu_data[0].ases & MIPS_ASE_SMARTMIPS) 112 112 #endif 113 + 113 114 #ifndef cpu_has_rixi 114 - #define cpu_has_rixi (cpu_data[0].options & MIPS_CPU_RIXI) 115 + # ifdef CONFIG_64BIT 116 + # define cpu_has_rixi (cpu_data[0].options & MIPS_CPU_RIXI) 117 + # else /* CONFIG_32BIT */ 118 + # define cpu_has_rixi ((cpu_data[0].options & MIPS_CPU_RIXI) && !cpu_has_64bits) 119 + # endif 115 120 #endif 121 + 116 122 #ifndef cpu_has_mmips 117 123 # ifdef CONFIG_SYS_SUPPORTS_MICROMIPS 118 124 # define cpu_has_mmips (cpu_data[0].options & MIPS_CPU_MICROMIPS) ··· 126 120 # define cpu_has_mmips 0 127 121 # endif 128 122 #endif 123 + 129 124 #ifndef cpu_has_vtag_icache 130 125 #define cpu_has_vtag_icache (cpu_data[0].icache.flags & MIPS_CACHE_VTAG) 131 126 #endif ··· 190 183 /* 191 184 * Shortcuts ... 192 185 */ 186 + #define cpu_has_mips_2_3_4_5 (cpu_has_mips_2 | cpu_has_mips_3_4_5) 187 + #define cpu_has_mips_3_4_5 (cpu_has_mips_3 | cpu_has_mips_4_5) 188 + #define cpu_has_mips_4_5 (cpu_has_mips_4 | cpu_has_mips_5) 189 + 190 + #define cpu_has_mips_2_3_4_5_r (cpu_has_mips_2 | cpu_has_mips_3_4_5_r) 191 + #define cpu_has_mips_3_4_5_r (cpu_has_mips_3 | cpu_has_mips_4_5_r) 192 + #define cpu_has_mips_4_5_r (cpu_has_mips_4 | cpu_has_mips_5_r) 193 + #define cpu_has_mips_5_r (cpu_has_mips_5 | cpu_has_mips_r) 194 + 195 + #define cpu_has_mips_4_5_r2 (cpu_has_mips_4_5 | cpu_has_mips_r2) 196 + 193 197 #define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2) 194 198 #define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2) 195 199 #define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1)
+4 -9
arch/mips/include/asm/cpu-info.h
··· 65 65 #ifdef CONFIG_64BIT 66 66 int vmbits; /* Virtual memory size in bits */ 67 67 #endif 68 - #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) 68 + #ifdef CONFIG_MIPS_MT_SMP 69 69 /* 70 - * In the MIPS MT "SMTC" model, each TC is considered 71 - * to be a "CPU" for the purposes of scheduling, but 72 - * exception resources, ASID spaces, etc, are common 73 - * to all TCs within the same VPE. 70 + * There is not necessarily a 1:1 mapping of VPE num to CPU number 71 + * in particular on multi-core systems. 74 72 */ 75 73 int vpe_id; /* Virtual Processor number */ 76 - #endif 77 - #ifdef CONFIG_MIPS_MT_SMTC 78 - int tc_id; /* Thread Context number */ 79 74 #endif 80 75 void *data; /* Additional data */ 81 76 unsigned int watch_reg_count; /* Number that exist */ ··· 112 117 unsigned long n; 113 118 }; 114 119 115 - #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) 120 + #ifdef CONFIG_MIPS_MT_SMP 116 121 # define cpu_vpe_id(cpuinfo) ((cpuinfo)->vpe_id) 117 122 #else 118 123 # define cpu_vpe_id(cpuinfo) 0
+1 -3
arch/mips/include/asm/cpu-type.h
··· 155 155 case CPU_RM7000: 156 156 case CPU_SR71000: 157 157 #endif 158 - #ifdef CONFIG_SYS_HAS_CPU_RM9000 159 - case CPU_RM9000: 160 - #endif 161 158 #ifdef CONFIG_SYS_HAS_CPU_SB1 162 159 case CPU_SB1: 163 160 case CPU_SB1A: ··· 163 166 case CPU_CAVIUM_OCTEON: 164 167 case CPU_CAVIUM_OCTEON_PLUS: 165 168 case CPU_CAVIUM_OCTEON2: 169 + case CPU_CAVIUM_OCTEON3: 166 170 #endif 167 171 168 172 #if defined(CONFIG_SYS_HAS_CPU_BMIPS32_3300) || \
+2 -1
arch/mips/include/asm/cpu.h
··· 201 201 #define PRID_IMP_NETLOGIC_XLP3XX 0x1100 202 202 #define PRID_IMP_NETLOGIC_XLP2XX 0x1200 203 203 #define PRID_IMP_NETLOGIC_XLP9XX 0x1500 204 + #define PRID_IMP_NETLOGIC_XLP5XX 0x1300 204 205 205 206 /* 206 207 * Particular Revision values for bits 7:0 of the PRId register. ··· 282 281 CPU_R4700, CPU_R5000, CPU_R5500, CPU_NEVADA, CPU_R5432, CPU_R10000, 283 282 CPU_R12000, CPU_R14000, CPU_VR41XX, CPU_VR4111, CPU_VR4121, CPU_VR4122, 284 283 CPU_VR4131, CPU_VR4133, CPU_VR4181, CPU_VR4181A, CPU_RM7000, 285 - CPU_SR71000, CPU_RM9000, CPU_TX49XX, 284 + CPU_SR71000, CPU_TX49XX, 286 285 287 286 /* 288 287 * R8000 class processors
+12 -3
arch/mips/include/asm/dec/kn05.h
··· 49 49 #define KN4K_RES_15 (15*IOASIC_SLOT_SIZE) /* unused? */ 50 50 51 51 /* 52 + * MB ASIC interrupt bits. 53 + */ 54 + #define KN4K_MB_INR_MB 4 /* ??? */ 55 + #define KN4K_MB_INR_MT 3 /* memory, I/O bus read/write errors */ 56 + #define KN4K_MB_INR_RES_2 2 /* unused */ 57 + #define KN4K_MB_INR_RTC 1 /* RTC */ 58 + #define KN4K_MB_INR_TC 0 /* I/O ASIC cascade */ 59 + 60 + /* 52 61 * Bits for the MB interrupt register. 53 62 * The register appears read-only. 54 63 */ 55 - #define KN4K_MB_INT_TC (1<<0) /* TURBOchannel? */ 56 - #define KN4K_MB_INT_RTC (1<<1) /* RTC? */ 57 - #define KN4K_MB_INT_MT (1<<3) /* I/O ASIC cascade */ 64 + #define KN4K_MB_INT_IRQ (0x1f<<0) /* CPU Int[4:0] status. */ 65 + #define KN4K_MB_INT_IRQ_N(n) (1<<(n)) /* Individual status bits. */ 58 66 59 67 /* 60 68 * Bits for the MB control & status register. ··· 78 70 #define KN4K_MB_CSR_NC (1<<14) /* ??? */ 79 71 #define KN4K_MB_CSR_EE (1<<15) /* (bus) Exception Enable? */ 80 72 #define KN4K_MB_CSR_MSK (0x1f<<16) /* CPU Int[4:0] mask */ 73 + #define KN4K_MB_CSR_MSK_N(n) (1<<((n)+16)) /* Individual mask bits. */ 81 74 #define KN4K_MB_CSR_FW (1<<21) /* ??? */ 82 75 #define KN4K_MB_CSR_W (1<<31) /* ??? */ 83 76
-4
arch/mips/include/asm/fixmap.h
··· 48 48 enum fixed_addresses { 49 49 #define FIX_N_COLOURS 8 50 50 FIX_CMAP_BEGIN, 51 - #ifdef CONFIG_MIPS_MT_SMTC 52 - FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * NR_CPUS * 2), 53 - #else 54 51 FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * 2), 55 - #endif 56 52 #ifdef CONFIG_HIGHMEM 57 53 /* reserved pte's for temporary kernel mappings */ 58 54 FIX_KMAP_BEGIN = FIX_CMAP_END + 1,
+4 -3
arch/mips/include/asm/fpu.h
··· 17 17 #include <asm/mipsregs.h> 18 18 #include <asm/cpu.h> 19 19 #include <asm/cpu-features.h> 20 + #include <asm/fpu_emulator.h> 20 21 #include <asm/hazards.h> 21 22 #include <asm/processor.h> 22 23 #include <asm/current.h> ··· 29 28 struct sigcontext; 30 29 struct sigcontext32; 31 30 32 - extern void fpu_emulator_init_fpu(void); 33 31 extern void _init_fpu(void); 34 32 extern void _save_fp(struct task_struct *); 35 33 extern void _restore_fp(struct task_struct *); ··· 156 156 int ret = 0; 157 157 158 158 preempt_disable(); 159 + 159 160 if (cpu_has_fpu) { 160 161 ret = __own_fpu(); 161 162 if (!ret) 162 163 _init_fpu(); 163 - } else { 164 + } else 164 165 fpu_emulator_init_fpu(); 165 - } 166 166 167 167 preempt_enable(); 168 + 168 169 return ret; 169 170 } 170 171
+21
arch/mips/include/asm/fpu_emulator.h
··· 23 23 #ifndef _ASM_FPU_EMULATOR_H 24 24 #define _ASM_FPU_EMULATOR_H 25 25 26 + #include <linux/sched.h> 26 27 #include <asm/break.h> 28 + #include <asm/thread_info.h> 27 29 #include <asm/inst.h> 28 30 #include <asm/local.h> 31 + #include <asm/processor.h> 29 32 30 33 #ifdef CONFIG_DEBUG_FS 31 34 ··· 39 36 local_t cp1ops; 40 37 local_t cp1xops; 41 38 local_t errors; 39 + local_t ieee754_inexact; 40 + local_t ieee754_underflow; 41 + local_t ieee754_overflow; 42 + local_t ieee754_zerodiv; 43 + local_t ieee754_invalidop; 42 44 }; 43 45 44 46 DECLARE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats); ··· 78 70 * Break instruction with special math emu break code set 79 71 */ 80 72 #define BREAK_MATH (0x0000000d | (BRK_MEMU << 16)) 73 + 74 + #define SIGNALLING_NAN 0x7ff800007ff80000LL 75 + 76 + static inline void fpu_emulator_init_fpu(void) 77 + { 78 + struct task_struct *t = current; 79 + int i; 80 + 81 + t->thread.fpu.fcr31 = 0; 82 + 83 + for (i = 0; i < 32; i++) 84 + set_fpr64(&t->thread.fpu.fpr[i], 0, SIGNALLING_NAN); 85 + } 81 86 82 87 #endif /* _ASM_FPU_EMULATOR_H */
+1
arch/mips/include/asm/gic.h
··· 380 380 extern cycle_t gic_read_count(void); 381 381 extern cycle_t gic_read_compare(void); 382 382 extern void gic_write_compare(cycle_t cnt); 383 + extern void gic_write_cpu_compare(cycle_t cnt, int cpu); 383 384 extern void gic_send_ipi(unsigned int intr); 384 385 extern unsigned int plat_ipi_call_int_xlate(unsigned int); 385 386 extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
+2 -2
arch/mips/include/asm/gio_device.h
··· 50 50 extern int gio_register_driver(struct gio_driver *); 51 51 extern void gio_unregister_driver(struct gio_driver *); 52 52 53 - #define gio_get_drvdata(_dev) drv_get_drvdata(&(_dev)->dev) 54 - #define gio_set_drvdata(_dev, data) drv_set_drvdata(&(_dev)->dev, (data)) 53 + #define gio_get_drvdata(_dev) dev_get_drvdata(&(_dev)->dev) 54 + #define gio_set_drvdata(_dev, data) dev_set_drvdata(&(_dev)->dev, (data)) 55 55 56 56 extern void gio_set_master(struct gio_device *);
+14
arch/mips/include/asm/idle.h
··· 1 1 #ifndef __ASM_IDLE_H 2 2 #define __ASM_IDLE_H 3 3 4 + #include <linux/cpuidle.h> 4 5 #include <linux/linkage.h> 5 6 6 7 extern void (*cpu_wait)(void); ··· 19 18 { 20 19 return addr >= (unsigned long)r4k_wait_irqoff && 21 20 addr < (unsigned long)__pastwait; 21 + } 22 + 23 + extern int mips_cpuidle_wait_enter(struct cpuidle_device *dev, 24 + struct cpuidle_driver *drv, int index); 25 + 26 + #define MIPS_CPUIDLE_WAIT_STATE {\ 27 + .enter = mips_cpuidle_wait_enter,\ 28 + .exit_latency = 1,\ 29 + .target_residency = 1,\ 30 + .power_usage = UINT_MAX,\ 31 + .flags = CPUIDLE_FLAG_TIME_VALID,\ 32 + .name = "wait",\ 33 + .desc = "MIPS wait",\ 22 34 } 23 35 24 36 #endif /* __ASM_IDLE_H */
-96
arch/mips/include/asm/irq.h
··· 26 26 #define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */ 27 27 #endif 28 28 29 - #ifdef CONFIG_MIPS_MT_SMTC 30 - 31 - struct irqaction; 32 - 33 - extern unsigned long irq_hwmask[]; 34 - extern int setup_irq_smtc(unsigned int irq, struct irqaction * new, 35 - unsigned long hwmask); 36 - 37 - static inline void smtc_im_ack_irq(unsigned int irq) 38 - { 39 - if (irq_hwmask[irq] & ST0_IM) 40 - set_c0_status(irq_hwmask[irq] & ST0_IM); 41 - } 42 - 43 - #else 44 - 45 - static inline void smtc_im_ack_irq(unsigned int irq) 46 - { 47 - } 48 - 49 - #endif /* CONFIG_MIPS_MT_SMTC */ 50 - 51 - #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 52 - #include <linux/cpumask.h> 53 - 54 - extern int plat_set_irq_affinity(struct irq_data *d, 55 - const struct cpumask *affinity, bool force); 56 - extern void smtc_forward_irq(struct irq_data *d); 57 - 58 - /* 59 - * IRQ affinity hook invoked at the beginning of interrupt dispatch 60 - * if option is enabled. 61 - * 62 - * Up through Linux 2.6.22 (at least) cpumask operations are very 63 - * inefficient on MIPS. Initial prototypes of SMTC IRQ affinity 64 - * used a "fast path" per-IRQ-descriptor cache of affinity information 65 - * to reduce latency. As there is a project afoot to optimize the 66 - * cpumask implementations, this version is optimistically assuming 67 - * that cpumask.h macro overhead is reasonable during interrupt dispatch. 68 - */ 69 - static inline int handle_on_other_cpu(unsigned int irq) 70 - { 71 - struct irq_data *d = irq_get_irq_data(irq); 72 - 73 - if (cpumask_test_cpu(smp_processor_id(), d->affinity)) 74 - return 0; 75 - smtc_forward_irq(d); 76 - return 1; 77 - } 78 - 79 - #else /* Not doing SMTC affinity */ 80 - 81 - static inline int handle_on_other_cpu(unsigned int irq) { return 0; } 82 - 83 - #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ 84 - 85 - #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP 86 - 87 - static inline void smtc_im_backstop(unsigned int irq) 88 - { 89 - if (irq_hwmask[irq] & 0x0000ff00) 90 - write_c0_tccontext(read_c0_tccontext() & 91 - ~(irq_hwmask[irq] & 0x0000ff00)); 92 - } 93 - 94 - /* 95 - * Clear interrupt mask handling "backstop" if irq_hwmask 96 - * entry so indicates. This implies that the ack() or end() 97 - * functions will take over re-enabling the low-level mask. 98 - * Otherwise it will be done on return from exception. 99 - */ 100 - static inline int smtc_handle_on_other_cpu(unsigned int irq) 101 - { 102 - int ret = handle_on_other_cpu(irq); 103 - 104 - if (!ret) 105 - smtc_im_backstop(irq); 106 - return ret; 107 - } 108 - 109 - #else 110 - 111 - static inline void smtc_im_backstop(unsigned int irq) { } 112 - static inline int smtc_handle_on_other_cpu(unsigned int irq) 113 - { 114 - return handle_on_other_cpu(irq); 115 - } 116 - 117 - #endif 118 - 119 29 extern void do_IRQ(unsigned int irq); 120 - 121 - #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 122 - 123 - extern void do_IRQ_no_affinity(unsigned int irq); 124 - 125 - #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ 126 30 127 31 extern void arch_init_irq(void); 128 32 extern void spurious_interrupt(void);
+3 -29
arch/mips/include/asm/irqflags.h
··· 17 17 #include <linux/stringify.h> 18 18 #include <asm/hazards.h> 19 19 20 - #if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) 20 + #ifdef CONFIG_CPU_MIPSR2 21 21 22 22 static inline void arch_local_irq_disable(void) 23 23 { ··· 118 118 unsigned long arch_local_irq_save(void); 119 119 void arch_local_irq_restore(unsigned long flags); 120 120 void __arch_local_irq_restore(unsigned long flags); 121 - #endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */ 122 - 123 - 124 - extern void smtc_ipi_replay(void); 121 + #endif /* CONFIG_CPU_MIPSR2 */ 125 122 126 123 static inline void arch_local_irq_enable(void) 127 124 { 128 - #ifdef CONFIG_MIPS_MT_SMTC 129 - /* 130 - * SMTC kernel needs to do a software replay of queued 131 - * IPIs, at the cost of call overhead on each local_irq_enable() 132 - */ 133 - smtc_ipi_replay(); 134 - #endif 135 125 __asm__ __volatile__( 136 126 " .set push \n" 137 127 " .set reorder \n" 138 128 " .set noat \n" 139 - #ifdef CONFIG_MIPS_MT_SMTC 140 - " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n" 141 - " ori $1, 0x400 \n" 142 - " xori $1, 0x400 \n" 143 - " mtc0 $1, $2, 1 \n" 144 - #elif defined(CONFIG_CPU_MIPSR2) 129 + #if defined(CONFIG_CPU_MIPSR2) 145 130 " ei \n" 146 131 #else 147 132 " mfc0 $1,$12 \n" ··· 148 163 asm __volatile__( 149 164 " .set push \n" 150 165 " .set reorder \n" 151 - #ifdef CONFIG_MIPS_MT_SMTC 152 - " mfc0 %[flags], $2, 1 \n" 153 - #else 154 166 " mfc0 %[flags], $12 \n" 155 - #endif 156 167 " .set pop \n" 157 168 : [flags] "=r" (flags)); 158 169 ··· 158 177 159 178 static inline int arch_irqs_disabled_flags(unsigned long flags) 160 179 { 161 - #ifdef CONFIG_MIPS_MT_SMTC 162 - /* 163 - * SMTC model uses TCStatus.IXMT to disable interrupts for a thread/CPU 164 - */ 165 - return flags & 0x400; 166 - #else 167 180 return !(flags & 1); 168 - #endif 169 181 } 170 182 171 183 #endif /* #ifndef __ASSEMBLY__ */
+109
arch/mips/include/asm/kvm_para.h
··· 1 + #ifndef _ASM_MIPS_KVM_PARA_H 2 + #define _ASM_MIPS_KVM_PARA_H 3 + 4 + #include <uapi/asm/kvm_para.h> 5 + 6 + #define KVM_HYPERCALL ".word 0x42000028" 7 + 8 + /* 9 + * Hypercalls for KVM. 10 + * 11 + * Hypercall number is passed in v0. 12 + * Return value will be placed in v0. 13 + * Up to 3 arguments are passed in a0, a1, and a2. 14 + */ 15 + static inline unsigned long kvm_hypercall0(unsigned long num) 16 + { 17 + register unsigned long n asm("v0"); 18 + register unsigned long r asm("v0"); 19 + 20 + n = num; 21 + __asm__ __volatile__( 22 + KVM_HYPERCALL 23 + : "=r" (r) : "r" (n) : "memory" 24 + ); 25 + 26 + return r; 27 + } 28 + 29 + static inline unsigned long kvm_hypercall1(unsigned long num, 30 + unsigned long arg0) 31 + { 32 + register unsigned long n asm("v0"); 33 + register unsigned long r asm("v0"); 34 + register unsigned long a0 asm("a0"); 35 + 36 + n = num; 37 + a0 = arg0; 38 + __asm__ __volatile__( 39 + KVM_HYPERCALL 40 + : "=r" (r) : "r" (n), "r" (a0) : "memory" 41 + ); 42 + 43 + return r; 44 + } 45 + 46 + static inline unsigned long kvm_hypercall2(unsigned long num, 47 + unsigned long arg0, unsigned long arg1) 48 + { 49 + register unsigned long n asm("v0"); 50 + register unsigned long r asm("v0"); 51 + register unsigned long a0 asm("a0"); 52 + register unsigned long a1 asm("a1"); 53 + 54 + n = num; 55 + a0 = arg0; 56 + a1 = arg1; 57 + __asm__ __volatile__( 58 + KVM_HYPERCALL 59 + : "=r" (r) : "r" (n), "r" (a0), "r" (a1) : "memory" 60 + ); 61 + 62 + return r; 63 + } 64 + 65 + static inline unsigned long kvm_hypercall3(unsigned long num, 66 + unsigned long arg0, unsigned long arg1, unsigned long arg2) 67 + { 68 + register unsigned long n asm("v0"); 69 + register unsigned long r asm("v0"); 70 + register unsigned long a0 asm("a0"); 71 + register unsigned long a1 asm("a1"); 72 + register unsigned long a2 asm("a2"); 73 + 74 + n = num; 75 + a0 = arg0; 76 + a1 = arg1; 77 + a2 = arg2; 78 + __asm__ __volatile__( 79 + KVM_HYPERCALL 80 + : "=r" (r) : "r" (n), "r" (a0), "r" (a1), "r" (a2) : "memory" 81 + ); 82 + 83 + return r; 84 + } 85 + 86 + static inline bool kvm_check_and_clear_guest_paused(void) 87 + { 88 + return false; 89 + } 90 + 91 + static inline unsigned int kvm_arch_para_features(void) 92 + { 93 + return 0; 94 + } 95 + 96 + #ifdef CONFIG_MIPS_PARAVIRT 97 + static inline bool kvm_para_available(void) 98 + { 99 + return true; 100 + } 101 + #else 102 + static inline bool kvm_para_available(void) 103 + { 104 + return false; 105 + } 106 + #endif 107 + 108 + 109 + #endif /* _ASM_MIPS_KVM_PARA_H */
-1
arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
··· 22 22 #define cpu_has_3k_cache 0 23 23 #define cpu_has_4k_cache 0 24 24 #define cpu_has_tx39_cache 0 25 - #define cpu_has_fpu 0 26 25 #define cpu_has_counter 1 27 26 #define cpu_has_watch 1 28 27 #define cpu_has_divec 1
+2
arch/mips/include/asm/mach-cavium-octeon/irq.h
··· 35 35 OCTEON_IRQ_PCI_MSI2, 36 36 OCTEON_IRQ_PCI_MSI3, 37 37 38 + OCTEON_IRQ_TWSI, 39 + OCTEON_IRQ_TWSI2, 38 40 OCTEON_IRQ_RML, 39 41 OCTEON_IRQ_TIMER0, 40 42 OCTEON_IRQ_TIMER1,
+4
arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h
··· 39 39 #define cpu_has_nofpuex 0 40 40 #define cpu_has_64bits 1 41 41 42 + #define cpu_has_mips_2 1 43 + #define cpu_has_mips_3 1 44 + #define cpu_has_mips_5 0 45 + 42 46 #define cpu_has_mips32r1 0 43 47 #define cpu_has_mips32r2 0 44 48 #define cpu_has_mips64r1 0
-30
arch/mips/include/asm/mach-malta/kernel-entry-init.h
··· 80 80 .endm 81 81 82 82 .macro kernel_entry_setup 83 - #ifdef CONFIG_MIPS_MT_SMTC 84 - mfc0 t0, CP0_CONFIG 85 - bgez t0, 9f 86 - mfc0 t0, CP0_CONFIG, 1 87 - bgez t0, 9f 88 - mfc0 t0, CP0_CONFIG, 2 89 - bgez t0, 9f 90 - mfc0 t0, CP0_CONFIG, 3 91 - and t0, 1<<2 92 - bnez t0, 0f 93 - 9: 94 - /* Assume we came from YAMON... */ 95 - PTR_LA v0, 0x9fc00534 /* YAMON print */ 96 - lw v0, (v0) 97 - move a0, zero 98 - PTR_LA a1, nonmt_processor 99 - jal v0 100 - 101 - PTR_LA v0, 0x9fc00520 /* YAMON exit */ 102 - lw v0, (v0) 103 - li a0, 1 104 - jal v0 105 - 106 - 1: b 1b 107 - 108 - __INITDATA 109 - nonmt_processor: 110 - .asciz "SMTC kernel requires the MT ASE to run\n" 111 - __FINIT 112 - #endif 113 83 114 84 #ifdef CONFIG_EVA 115 85 sync
+37
arch/mips/include/asm/mach-malta/malta-pm.h
··· 1 + /* 2 + * Copyright (C) 2014 Imagination Technologies 3 + * Author: Paul Burton <paul.burton@imgtec.com> 4 + * 5 + * This program is free software; you can redistribute it and/or modify it 6 + * under the terms of the GNU General Public License as published by the 7 + * Free Software Foundation; either version 2 of the License, or (at your 8 + * option) any later version. 9 + */ 10 + 11 + #ifndef __ASM_MIPS_MACH_MALTA_PM_H__ 12 + #define __ASM_MIPS_MACH_MALTA_PM_H__ 13 + 14 + #include <asm/mips-boards/piix4.h> 15 + 16 + #ifdef CONFIG_MIPS_MALTA_PM 17 + 18 + /** 19 + * mips_pm_suspend - enter a suspend state 20 + * @state: the state to enter, one of PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_* 21 + * 22 + * Enters a suspend state via the Malta's PIIX4. If the state to be entered 23 + * is one which loses context (eg. SOFF) then this function will never 24 + * return. 25 + */ 26 + extern int mips_pm_suspend(unsigned state); 27 + 28 + #else /* !CONFIG_MIPS_MALTA_PM */ 29 + 30 + static inline int mips_pm_suspend(unsigned state) 31 + { 32 + return -EINVAL; 33 + } 34 + 35 + #endif /* !CONFIG_MIPS_MALTA_PM */ 36 + 37 + #endif /* __ASM_MIPS_MACH_MALTA_PM_H__ */
+2
arch/mips/include/asm/mach-netlogic/topology.h
··· 10 10 11 11 #include <asm/mach-netlogic/multi-node.h> 12 12 13 + #ifdef CONFIG_SMP 13 14 #define topology_physical_package_id(cpu) cpu_to_node(cpu) 14 15 #define topology_core_id(cpu) (cpu_logical_map(cpu) / NLM_THREADS_PER_CORE) 15 16 #define topology_thread_cpumask(cpu) (&cpu_sibling_map[cpu]) 16 17 #define topology_core_cpumask(cpu) cpumask_of_node(cpu_to_node(cpu)) 18 + #endif 17 19 18 20 #include <asm-generic/topology.h> 19 21
+36
arch/mips/include/asm/mach-paravirt/cpu-feature-overrides.h
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (C) 2013 Cavium, Inc. 7 + */ 8 + #ifndef __ASM_MACH_PARAVIRT_CPU_FEATURE_OVERRIDES_H 9 + #define __ASM_MACH_PARAVIRT_CPU_FEATURE_OVERRIDES_H 10 + 11 + #define cpu_has_4kex 1 12 + #define cpu_has_3k_cache 0 13 + #define cpu_has_tx39_cache 0 14 + #define cpu_has_counter 1 15 + #define cpu_has_llsc 1 16 + /* 17 + * We Disable LL/SC on non SMP systems as it is faster to disable 18 + * interrupts for atomic access than a LL/SC. 19 + */ 20 + #ifdef CONFIG_SMP 21 + # define kernel_uses_llsc 1 22 + #else 23 + # define kernel_uses_llsc 0 24 + #endif 25 + 26 + #ifdef CONFIG_CPU_CAVIUM_OCTEON 27 + #define cpu_dcache_line_size() 128 28 + #define cpu_icache_line_size() 128 29 + #define cpu_has_octeon_cache 1 30 + #define cpu_has_4k_cache 0 31 + #else 32 + #define cpu_has_octeon_cache 0 33 + #define cpu_has_4k_cache 1 34 + #endif 35 + 36 + #endif /* __ASM_MACH_PARAVIRT_CPU_FEATURE_OVERRIDES_H */
+19
arch/mips/include/asm/mach-paravirt/irq.h
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (C) 2013 Cavium, Inc. 7 + */ 8 + #ifndef __ASM_MACH_PARAVIRT_IRQ_H__ 9 + #define __ASM_MACH_PARAVIRT_IRQ_H__ 10 + 11 + #define NR_IRQS 64 12 + #define MIPS_CPU_IRQ_BASE 1 13 + 14 + #define MIPS_IRQ_PCIA (MIPS_CPU_IRQ_BASE + 8) 15 + 16 + #define MIPS_IRQ_MBOX0 (MIPS_CPU_IRQ_BASE + 32) 17 + #define MIPS_IRQ_MBOX1 (MIPS_CPU_IRQ_BASE + 33) 18 + 19 + #endif /* __ASM_MACH_PARAVIRT_IRQ_H__ */
+50
arch/mips/include/asm/mach-paravirt/kernel-entry-init.h
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (C) 2013 Cavium, Inc 7 + */ 8 + #ifndef __ASM_MACH_PARAVIRT_KERNEL_ENTRY_H 9 + #define __ASM_MACH_PARAVIRT_KERNEL_ENTRY_H 10 + 11 + #define CP0_EBASE $15, 1 12 + 13 + .macro kernel_entry_setup 14 + mfc0 t0, CP0_EBASE 15 + andi t0, t0, 0x3ff # CPUNum 16 + beqz t0, 1f 17 + # CPUs other than zero goto smp_bootstrap 18 + j smp_bootstrap 19 + 20 + 1: 21 + .endm 22 + 23 + /* 24 + * Do SMP slave processor setup necessary before we can safely execute 25 + * C code. 26 + */ 27 + .macro smp_slave_setup 28 + mfc0 t0, CP0_EBASE 29 + andi t0, t0, 0x3ff # CPUNum 30 + slti t1, t0, NR_CPUS 31 + bnez t1, 1f 32 + 2: 33 + di 34 + wait 35 + b 2b # Unknown CPU, loop forever. 36 + 1: 37 + PTR_LA t1, paravirt_smp_sp 38 + PTR_SLL t0, PTR_SCALESHIFT 39 + PTR_ADDU t1, t1, t0 40 + 3: 41 + PTR_L sp, 0(t1) 42 + beqz sp, 3b # Spin until told to proceed. 43 + 44 + PTR_LA t1, paravirt_smp_gp 45 + PTR_ADDU t1, t1, t0 46 + sync 47 + PTR_L gp, 0(t1) 48 + .endm 49 + 50 + #endif /* __ASM_MACH_PARAVIRT_KERNEL_ENTRY_H */
+25
arch/mips/include/asm/mach-paravirt/war.h
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org> 7 + * Copyright (C) 2013 Cavium Networks <support@caviumnetworks.com> 8 + */ 9 + #ifndef __ASM_MIPS_MACH_PARAVIRT_WAR_H 10 + #define __ASM_MIPS_MACH_PARAVIRT_WAR_H 11 + 12 + #define R4600_V1_INDEX_ICACHEOP_WAR 0 13 + #define R4600_V1_HIT_CACHEOP_WAR 0 14 + #define R4600_V2_HIT_CACHEOP_WAR 0 15 + #define R5432_CP0_INTERRUPT_WAR 0 16 + #define BCM1250_M3_WAR 0 17 + #define SIBYTE_1956_WAR 0 18 + #define MIPS4K_ICACHE_REFILL_WAR 0 19 + #define MIPS_CACHE_SYNC_WAR 0 20 + #define TX49XX_ICACHE_INDEX_INV_WAR 0 21 + #define ICACHE_REFILLS_WORKAROUND_WAR 0 22 + #define R10000_LLSC_WAR 0 23 + #define MIPS34K_MISSED_ITLB_WAR 0 24 + 25 + #endif /* __ASM_MIPS_MACH_PARAVIRT_WAR_H */
-4
arch/mips/include/asm/mach-pmcs-msp71xx/msp_usb.h
··· 25 25 #ifndef MSP_USB_H_ 26 26 #define MSP_USB_H_ 27 27 28 - #ifdef CONFIG_MSP_HAS_DUAL_USB 29 - #define NUM_USB_DEVS 2 30 - #else 31 28 #define NUM_USB_DEVS 1 32 - #endif 33 29 34 30 /* Register spaces for USB host 0 */ 35 31 #define MSP_USB0_MAB_START (MSP_USB0_BASE + 0x0)
-1
arch/mips/include/asm/mach-ralink/war.h
··· 17 17 #define MIPS4K_ICACHE_REFILL_WAR 0 18 18 #define MIPS_CACHE_SYNC_WAR 0 19 19 #define TX49XX_ICACHE_INDEX_INV_WAR 0 20 - #define RM9000_CDEX_SMP_WAR 0 21 20 #define ICACHE_REFILLS_WORKAROUND_WAR 0 22 21 #define R10000_LLSC_WAR 0 23 22 #define MIPS34K_MISSED_ITLB_WAR 0
-31
arch/mips/include/asm/mach-sead3/kernel-entry-init.h
··· 10 10 #define __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H 11 11 12 12 .macro kernel_entry_setup 13 - #ifdef CONFIG_MIPS_MT_SMTC 14 - mfc0 t0, CP0_CONFIG 15 - bgez t0, 9f 16 - mfc0 t0, CP0_CONFIG, 1 17 - bgez t0, 9f 18 - mfc0 t0, CP0_CONFIG, 2 19 - bgez t0, 9f 20 - mfc0 t0, CP0_CONFIG, 3 21 - and t0, 1<<2 22 - bnez t0, 0f 23 - 9 : 24 - /* Assume we came from YAMON... */ 25 - PTR_LA v0, 0x9fc00534 /* YAMON print */ 26 - lw v0, (v0) 27 - move a0, zero 28 - PTR_LA a1, nonmt_processor 29 - jal v0 30 - 31 - PTR_LA v0, 0x9fc00520 /* YAMON exit */ 32 - lw v0, (v0) 33 - li a0, 1 34 - jal v0 35 - 36 - 1 : b 1b 37 - 38 - __INITDATA 39 - nonmt_processor : 40 - .asciz "SMTC kernel requires the MT ASE to run\n" 41 - __FINIT 42 - 0 : 43 - #endif 44 13 .endm 45 14 46 15 /*
+12
arch/mips/include/asm/mips-boards/piix4.h
··· 55 55 #define PIIX4_FUNC3_PMREGMISC 0x80 56 56 #define PIIX4_FUNC3_PMREGMISC_EN (1 << 0) 57 57 58 + /* Power Management IO Space */ 59 + #define PIIX4_FUNC3IO_PMSTS 0x00 60 + #define PIIX4_FUNC3IO_PMSTS_PWRBTN_STS (1 << 8) 61 + #define PIIX4_FUNC3IO_PMCNTRL 0x04 62 + #define PIIX4_FUNC3IO_PMCNTRL_SUS_EN (1 << 13) 63 + #define PIIX4_FUNC3IO_PMCNTRL_SUS_TYP (0x7 << 10) 64 + #define PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_SOFF (0x0 << 10) 65 + #define PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_STR (0x1 << 10) 66 + 67 + /* Data for magic special PCI cycle */ 68 + #define PIIX4_SUSPEND_MAGIC 0x00120002 69 + 58 70 #endif /* __ASM_MIPS_BOARDS_PIIX4_H */
+33 -1
arch/mips/include/asm/mips-cpc.h
··· 72 72 #define MIPS_CPC_COCB_OFS 0x4000 73 73 74 74 /* Macros to ease the creation of register access functions */ 75 - #define BUILD_CPC_R_(name, off) \ 75 + #define BUILD_CPC_R_(name, off) \ 76 + static inline u32 *addr_cpc_##name(void) \ 77 + { \ 78 + return (u32 *)(mips_cpc_base + (off)); \ 79 + } \ 80 + \ 76 81 static inline u32 read_cpc_##name(void) \ 77 82 { \ 78 83 return __raw_readl(mips_cpc_base + (off)); \ ··· 151 146 /* CPC_Cx_OTHER register fields */ 152 147 #define CPC_Cx_OTHER_CORENUM_SHF 16 153 148 #define CPC_Cx_OTHER_CORENUM_MSK (_ULCAST_(0xff) << 16) 149 + 150 + #ifdef CONFIG_MIPS_CPC 151 + 152 + /** 153 + * mips_cpc_lock_other - lock access to another core 154 + * core: the other core to be accessed 155 + * 156 + * Call before operating upon a core via the 'other' register region in 157 + * order to prevent the region being moved during access. Must be followed 158 + * by a call to mips_cpc_unlock_other. 159 + */ 160 + extern void mips_cpc_lock_other(unsigned int core); 161 + 162 + /** 163 + * mips_cpc_unlock_other - unlock access to another core 164 + * 165 + * Call after operating upon another core via the 'other' register region. 166 + * Must be called after mips_cpc_lock_other. 167 + */ 168 + extern void mips_cpc_unlock_other(void); 169 + 170 + #else /* !CONFIG_MIPS_CPC */ 171 + 172 + static inline void mips_cpc_lock_other(unsigned int core) { } 173 + static inline void mips_cpc_unlock_other(void) { } 174 + 175 + #endif /* !CONFIG_MIPS_CPC */ 154 176 155 177 #endif /* __MIPS_ASM_MIPS_CPC_H__ */
+2 -3
arch/mips/include/asm/mips_mt.h
··· 1 1 /* 2 - * Definitions and decalrations for MIPS MT support 3 - * that are common between SMTC, VSMP, and/or AP/SP 4 - * kernel models. 2 + * Definitions and decalrations for MIPS MT support that are common between 3 + * the VSMP, and AP/SP kernel models. 5 4 */ 6 5 #ifndef __ASM_MIPS_MT_H 7 6 #define __ASM_MIPS_MT_H
+2
arch/mips/include/asm/mipsmtregs.h
··· 36 36 37 37 #define read_c0_tcbind() __read_32bit_c0_register($2, 2) 38 38 39 + #define write_c0_tchalt(val) __write_32bit_c0_register($2, 4, val) 40 + 39 41 #define read_c0_tccontext() __read_32bit_c0_register($2, 5) 40 42 #define write_c0_tccontext(val) __write_32bit_c0_register($2, 5, val) 41 43
+18 -133
arch/mips/include/asm/mipsregs.h
··· 709 709 #ifndef __ASSEMBLY__ 710 710 711 711 /* 712 - * Macros for handling the ISA mode bit for microMIPS. 712 + * Macros for handling the ISA mode bit for MIPS16 and microMIPS. 713 713 */ 714 + #if defined(CONFIG_SYS_SUPPORTS_MIPS16) || \ 715 + defined(CONFIG_SYS_SUPPORTS_MICROMIPS) 714 716 #define get_isa16_mode(x) ((x) & 0x1) 715 717 #define msk_isa16_mode(x) ((x) & ~0x1) 716 718 #define set_isa16_mode(x) do { (x) |= 0x1; } while(0) 719 + #else 720 + #define get_isa16_mode(x) 0 721 + #define msk_isa16_mode(x) (x) 722 + #define set_isa16_mode(x) do { } while(0) 723 + #endif 717 724 718 725 /* 719 726 * microMIPS instructions can be 16-bit or 32-bit in length. This ··· 1014 1007 #define write_c0_compare3(val) __write_32bit_c0_register($11, 7, val) 1015 1008 1016 1009 #define read_c0_status() __read_32bit_c0_register($12, 0) 1017 - #ifdef CONFIG_MIPS_MT_SMTC 1018 - #define write_c0_status(val) \ 1019 - do { \ 1020 - __write_32bit_c0_register($12, 0, val); \ 1021 - __ehb(); \ 1022 - } while (0) 1023 - #else 1024 - /* 1025 - * Legacy non-SMTC code, which may be hazardous 1026 - * but which might not support EHB 1027 - */ 1010 + 1028 1011 #define write_c0_status(val) __write_32bit_c0_register($12, 0, val) 1029 - #endif /* CONFIG_MIPS_MT_SMTC */ 1030 1012 1031 1013 #define read_c0_cause() __read_32bit_c0_register($13, 0) 1032 1014 #define write_c0_cause(val) __write_32bit_c0_register($13, 0, val) ··· 1739 1743 /* 1740 1744 * Manipulate bits in a c0 register. 1741 1745 */ 1742 - #ifndef CONFIG_MIPS_MT_SMTC 1743 - /* 1744 - * SMTC Linux requires shutting-down microthread scheduling 1745 - * during CP0 register read-modify-write sequences. 1746 - */ 1747 1746 #define __BUILD_SET_C0(name) \ 1748 1747 static inline unsigned int \ 1749 1748 set_c0_##name(unsigned int set) \ ··· 1777 1786 return res; \ 1778 1787 } 1779 1788 1780 - #else /* SMTC versions that manage MT scheduling */ 1781 - 1782 - #include <linux/irqflags.h> 1783 - 1784 - /* 1785 - * This is a duplicate of dmt() in mipsmtregs.h to avoid problems with 1786 - * header file recursion. 1787 - */ 1788 - static inline unsigned int __dmt(void) 1789 - { 1790 - int res; 1791 - 1792 - __asm__ __volatile__( 1793 - " .set push \n" 1794 - " .set mips32r2 \n" 1795 - " .set noat \n" 1796 - " .word 0x41610BC1 # dmt $1 \n" 1797 - " ehb \n" 1798 - " move %0, $1 \n" 1799 - " .set pop \n" 1800 - : "=r" (res)); 1801 - 1802 - instruction_hazard(); 1803 - 1804 - return res; 1805 - } 1806 - 1807 - #define __VPECONTROL_TE_SHIFT 15 1808 - #define __VPECONTROL_TE (1UL << __VPECONTROL_TE_SHIFT) 1809 - 1810 - #define __EMT_ENABLE __VPECONTROL_TE 1811 - 1812 - static inline void __emt(unsigned int previous) 1813 - { 1814 - if ((previous & __EMT_ENABLE)) 1815 - __asm__ __volatile__( 1816 - " .set mips32r2 \n" 1817 - " .word 0x41600be1 # emt \n" 1818 - " ehb \n" 1819 - " .set mips0 \n"); 1820 - } 1821 - 1822 - static inline void __ehb(void) 1823 - { 1824 - __asm__ __volatile__( 1825 - " .set mips32r2 \n" 1826 - " ehb \n" " .set mips0 \n"); 1827 - } 1828 - 1829 - /* 1830 - * Note that local_irq_save/restore affect TC-specific IXMT state, 1831 - * not Status.IE as in non-SMTC kernel. 1832 - */ 1833 - 1834 - #define __BUILD_SET_C0(name) \ 1835 - static inline unsigned int \ 1836 - set_c0_##name(unsigned int set) \ 1837 - { \ 1838 - unsigned int res; \ 1839 - unsigned int new; \ 1840 - unsigned int omt; \ 1841 - unsigned long flags; \ 1842 - \ 1843 - local_irq_save(flags); \ 1844 - omt = __dmt(); \ 1845 - res = read_c0_##name(); \ 1846 - new = res | set; \ 1847 - write_c0_##name(new); \ 1848 - __emt(omt); \ 1849 - local_irq_restore(flags); \ 1850 - \ 1851 - return res; \ 1852 - } \ 1853 - \ 1854 - static inline unsigned int \ 1855 - clear_c0_##name(unsigned int clear) \ 1856 - { \ 1857 - unsigned int res; \ 1858 - unsigned int new; \ 1859 - unsigned int omt; \ 1860 - unsigned long flags; \ 1861 - \ 1862 - local_irq_save(flags); \ 1863 - omt = __dmt(); \ 1864 - res = read_c0_##name(); \ 1865 - new = res & ~clear; \ 1866 - write_c0_##name(new); \ 1867 - __emt(omt); \ 1868 - local_irq_restore(flags); \ 1869 - \ 1870 - return res; \ 1871 - } \ 1872 - \ 1873 - static inline unsigned int \ 1874 - change_c0_##name(unsigned int change, unsigned int newbits) \ 1875 - { \ 1876 - unsigned int res; \ 1877 - unsigned int new; \ 1878 - unsigned int omt; \ 1879 - unsigned long flags; \ 1880 - \ 1881 - local_irq_save(flags); \ 1882 - \ 1883 - omt = __dmt(); \ 1884 - res = read_c0_##name(); \ 1885 - new = res & ~change; \ 1886 - new |= (newbits & change); \ 1887 - write_c0_##name(new); \ 1888 - __emt(omt); \ 1889 - local_irq_restore(flags); \ 1890 - \ 1891 - return res; \ 1892 - } 1893 - #endif 1894 - 1895 1789 __BUILD_SET_C0(status) 1896 1790 __BUILD_SET_C0(cause) 1897 1791 __BUILD_SET_C0(config) ··· 1791 1915 __BUILD_SET_C0(brcm_cmt_ctrl) 1792 1916 __BUILD_SET_C0(brcm_config) 1793 1917 __BUILD_SET_C0(brcm_mode) 1918 + 1919 + /* 1920 + * Return low 10 bits of ebase. 1921 + * Note that under KVM (MIPSVZ) this returns vcpu id. 1922 + */ 1923 + static inline unsigned int get_ebase_cpunum(void) 1924 + { 1925 + return read_c0_ebase() & 0x3ff; 1926 + } 1794 1927 1795 1928 #endif /* !__ASSEMBLY__ */ 1796 1929
+11 -111
arch/mips/include/asm/mmu_context.h
··· 18 18 #include <asm/cacheflush.h> 19 19 #include <asm/hazards.h> 20 20 #include <asm/tlbflush.h> 21 - #ifdef CONFIG_MIPS_MT_SMTC 22 - #include <asm/mipsmtregs.h> 23 - #include <asm/smtc.h> 24 - #endif /* SMTC */ 25 21 #include <asm-generic/mm_hooks.h> 26 22 27 23 #define TLBMISS_HANDLER_SETUP_PGD(pgd) \ ··· 27 31 } while (0) 28 32 29 33 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT 34 + 35 + #define TLBMISS_HANDLER_RESTORE() \ 36 + write_c0_xcontext((unsigned long) smp_processor_id() << \ 37 + SMP_CPUID_REGSHIFT) 38 + 30 39 #define TLBMISS_HANDLER_SETUP() \ 31 40 do { \ 32 41 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); \ 33 - write_c0_xcontext((unsigned long) smp_processor_id() << \ 34 - SMP_CPUID_REGSHIFT); \ 42 + TLBMISS_HANDLER_RESTORE(); \ 35 43 } while (0) 36 44 37 45 #else /* !CONFIG_MIPS_PGD_C0_CONTEXT: using pgd_current*/ ··· 47 47 */ 48 48 extern unsigned long pgd_current[]; 49 49 50 - #define TLBMISS_HANDLER_SETUP() \ 50 + #define TLBMISS_HANDLER_RESTORE() \ 51 51 write_c0_context((unsigned long) smp_processor_id() << \ 52 - SMP_CPUID_REGSHIFT); \ 52 + SMP_CPUID_REGSHIFT) 53 + 54 + #define TLBMISS_HANDLER_SETUP() \ 55 + TLBMISS_HANDLER_RESTORE(); \ 53 56 back_to_back_c0_hazard(); \ 54 57 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) 55 58 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/ ··· 66 63 #define ASID_INC 0x10 67 64 #define ASID_MASK 0xff0 68 65 69 - #elif defined(CONFIG_MIPS_MT_SMTC) 70 - 71 - #define ASID_INC 0x1 72 - extern unsigned long smtc_asid_mask; 73 - #define ASID_MASK (smtc_asid_mask) 74 - #define HW_ASID_MASK 0xff 75 - /* End SMTC/34K debug hack */ 76 66 #else /* FIXME: not correct for R6000 */ 77 67 78 68 #define ASID_INC 0x1 ··· 88 92 #define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1))) 89 93 #define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1) 90 94 91 - #ifndef CONFIG_MIPS_MT_SMTC 92 95 /* Normal, classic MIPS get_new_mmu_context */ 93 96 static inline void 94 97 get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) ··· 110 115 cpu_context(cpu, mm) = asid_cache(cpu) = asid; 111 116 } 112 117 113 - #else /* CONFIG_MIPS_MT_SMTC */ 114 - 115 - #define get_new_mmu_context(mm, cpu) smtc_get_new_mmu_context((mm), (cpu)) 116 - 117 - #endif /* CONFIG_MIPS_MT_SMTC */ 118 - 119 118 /* 120 119 * Initialize the context related info for a new mm_struct 121 120 * instance. ··· 130 141 { 131 142 unsigned int cpu = smp_processor_id(); 132 143 unsigned long flags; 133 - #ifdef CONFIG_MIPS_MT_SMTC 134 - unsigned long oldasid; 135 - unsigned long mtflags; 136 - int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id; 137 144 local_irq_save(flags); 138 - mtflags = dvpe(); 139 - #else /* Not SMTC */ 140 - local_irq_save(flags); 141 - #endif /* CONFIG_MIPS_MT_SMTC */ 142 145 143 146 /* Check if our ASID is of an older version and thus invalid */ 144 147 if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) 145 148 get_new_mmu_context(next, cpu); 146 - #ifdef CONFIG_MIPS_MT_SMTC 147 - /* 148 - * If the EntryHi ASID being replaced happens to be 149 - * the value flagged at ASID recycling time as having 150 - * an extended life, clear the bit showing it being 151 - * in use by this "CPU", and if that's the last bit, 152 - * free up the ASID value for use and flush any old 153 - * instances of it from the TLB. 154 - */ 155 - oldasid = (read_c0_entryhi() & ASID_MASK); 156 - if(smtc_live_asid[mytlb][oldasid]) { 157 - smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); 158 - if(smtc_live_asid[mytlb][oldasid] == 0) 159 - smtc_flush_tlb_asid(oldasid); 160 - } 161 - /* 162 - * Tread softly on EntryHi, and so long as we support 163 - * having ASID_MASK smaller than the hardware maximum, 164 - * make sure no "soft" bits become "hard"... 165 - */ 166 - write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | 167 - cpu_asid(cpu, next)); 168 - ehb(); /* Make sure it propagates to TCStatus */ 169 - evpe(mtflags); 170 - #else 171 149 write_c0_entryhi(cpu_asid(cpu, next)); 172 - #endif /* CONFIG_MIPS_MT_SMTC */ 173 150 TLBMISS_HANDLER_SETUP_PGD(next->pgd); 174 151 175 152 /* ··· 168 213 unsigned long flags; 169 214 unsigned int cpu = smp_processor_id(); 170 215 171 - #ifdef CONFIG_MIPS_MT_SMTC 172 - unsigned long oldasid; 173 - unsigned long mtflags; 174 - int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id; 175 - #endif /* CONFIG_MIPS_MT_SMTC */ 176 - 177 216 local_irq_save(flags); 178 217 179 218 /* Unconditionally get a new ASID. */ 180 219 get_new_mmu_context(next, cpu); 181 220 182 - #ifdef CONFIG_MIPS_MT_SMTC 183 - /* See comments for similar code above */ 184 - mtflags = dvpe(); 185 - oldasid = read_c0_entryhi() & ASID_MASK; 186 - if(smtc_live_asid[mytlb][oldasid]) { 187 - smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); 188 - if(smtc_live_asid[mytlb][oldasid] == 0) 189 - smtc_flush_tlb_asid(oldasid); 190 - } 191 - /* See comments for similar code above */ 192 - write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | 193 - cpu_asid(cpu, next)); 194 - ehb(); /* Make sure it propagates to TCStatus */ 195 - evpe(mtflags); 196 - #else 197 221 write_c0_entryhi(cpu_asid(cpu, next)); 198 - #endif /* CONFIG_MIPS_MT_SMTC */ 199 222 TLBMISS_HANDLER_SETUP_PGD(next->pgd); 200 223 201 224 /* mark mmu ownership change */ ··· 191 258 drop_mmu_context(struct mm_struct *mm, unsigned cpu) 192 259 { 193 260 unsigned long flags; 194 - #ifdef CONFIG_MIPS_MT_SMTC 195 - unsigned long oldasid; 196 - /* Can't use spinlock because called from TLB flush within DVPE */ 197 - unsigned int prevvpe; 198 - int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id; 199 - #endif /* CONFIG_MIPS_MT_SMTC */ 200 261 201 262 local_irq_save(flags); 202 263 203 264 if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { 204 265 get_new_mmu_context(mm, cpu); 205 - #ifdef CONFIG_MIPS_MT_SMTC 206 - /* See comments for similar code above */ 207 - prevvpe = dvpe(); 208 - oldasid = (read_c0_entryhi() & ASID_MASK); 209 - if (smtc_live_asid[mytlb][oldasid]) { 210 - smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); 211 - if(smtc_live_asid[mytlb][oldasid] == 0) 212 - smtc_flush_tlb_asid(oldasid); 213 - } 214 - /* See comments for similar code above */ 215 - write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) 216 - | cpu_asid(cpu, mm)); 217 - ehb(); /* Make sure it propagates to TCStatus */ 218 - evpe(prevvpe); 219 - #else /* not CONFIG_MIPS_MT_SMTC */ 220 266 write_c0_entryhi(cpu_asid(cpu, mm)); 221 - #endif /* CONFIG_MIPS_MT_SMTC */ 222 267 } else { 223 268 /* will get a new context next time */ 224 - #ifndef CONFIG_MIPS_MT_SMTC 225 269 cpu_context(cpu, mm) = 0; 226 - #else /* SMTC */ 227 - int i; 228 - 229 - /* SMTC shares the TLB (and ASIDs) across VPEs */ 230 - for_each_online_cpu(i) { 231 - if((smtc_status & SMTC_TLB_SHARED) 232 - || (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id)) 233 - cpu_context(i, mm) = 0; 234 - } 235 - #endif /* CONFIG_MIPS_MT_SMTC */ 236 270 } 237 271 local_irq_restore(flags); 238 272 }
+1 -7
arch/mips/include/asm/module.h
··· 144 144 #define MODULE_KERNEL_TYPE "64BIT " 145 145 #endif 146 146 147 - #ifdef CONFIG_MIPS_MT_SMTC 148 - #define MODULE_KERNEL_SMTC "MT_SMTC " 149 - #else 150 - #define MODULE_KERNEL_SMTC "" 151 - #endif 152 - 153 147 #define MODULE_ARCH_VERMAGIC \ 154 - MODULE_PROC_FAMILY MODULE_KERNEL_TYPE MODULE_KERNEL_SMTC 148 + MODULE_PROC_FAMILY MODULE_KERNEL_TYPE 155 149 156 150 #endif /* _ASM_MODULE_H */
+12 -3
arch/mips/include/asm/msa.h
··· 84 84 __asm__ __volatile__( \ 85 85 " .set push\n" \ 86 86 " .set msa\n" \ 87 - " cfcmsa $" #cs ", %0\n" \ 87 + " ctcmsa $" #cs ", %0\n" \ 88 88 " .set pop\n" \ 89 89 : : "r"(val)); \ 90 90 } ··· 96 96 * allow compilation with toolchains that do not support MSA. Once all 97 97 * toolchains in use support MSA these can be removed. 98 98 */ 99 + #ifdef CONFIG_CPU_MICROMIPS 100 + #define CFC_MSA_INSN 0x587e0056 101 + #define CTC_MSA_INSN 0x583e0816 102 + #else 103 + #define CFC_MSA_INSN 0x787e0059 104 + #define CTC_MSA_INSN 0x783e0819 105 + #endif 99 106 100 107 #define __BUILD_MSA_CTL_REG(name, cs) \ 101 108 static inline unsigned int read_msa_##name(void) \ ··· 111 104 __asm__ __volatile__( \ 112 105 " .set push\n" \ 113 106 " .set noat\n" \ 114 - " .word 0x787e0059 | (" #cs " << 11)\n" \ 107 + " .insn\n" \ 108 + " .word #CFC_MSA_INSN | (" #cs " << 11)\n" \ 115 109 " move %0, $1\n" \ 116 110 " .set pop\n" \ 117 111 : "=r"(reg)); \ ··· 125 117 " .set push\n" \ 126 118 " .set noat\n" \ 127 119 " move $1, %0\n" \ 128 - " .word 0x783e0819 | (" #cs " << 6)\n" \ 120 + " .insn\n" \ 121 + " .word #CTC_MSA_INSN | (" #cs " << 6)\n" \ 129 122 " .set pop\n" \ 130 123 : : "r"(val)); \ 131 124 }
+3 -2
arch/mips/include/asm/netlogic/mips-extns.h
··· 146 146 147 147 static inline int nlm_nodeid(void) 148 148 { 149 - uint32_t prid = read_c0_prid(); 149 + uint32_t prid = read_c0_prid() & PRID_IMP_MASK; 150 150 151 - if ((prid & 0xff00) == PRID_IMP_NETLOGIC_XLP9XX) 151 + if ((prid == PRID_IMP_NETLOGIC_XLP9XX) || 152 + (prid == PRID_IMP_NETLOGIC_XLP5XX)) 152 153 return (__read_32bit_c0_register($15, 1) >> 7) & 0x7; 153 154 else 154 155 return (__read_32bit_c0_register($15, 1) >> 5) & 0x3;
+11 -7
arch/mips/include/asm/netlogic/xlp-hal/iomap.h
··· 74 74 #define XLP_IO_USB_OHCI2_OFFSET(node) XLP_HDR_OFFSET(node, 0, 2, 4) 75 75 #define XLP_IO_USB_OHCI3_OFFSET(node) XLP_HDR_OFFSET(node, 0, 2, 5) 76 76 77 + #define XLP_IO_SATA_OFFSET(node) XLP_HDR_OFFSET(node, 0, 3, 2) 78 + 77 79 /* XLP2xx has an updated USB block */ 78 80 #define XLP2XX_IO_USB_OFFSET(node, i) XLP_HDR_OFFSET(node, 0, 4, i) 79 81 #define XLP2XX_IO_USB_XHCI0_OFFSET(node) XLP_HDR_OFFSET(node, 0, 4, 1) ··· 105 103 #define XLP_IO_SYS_OFFSET(node) XLP_HDR_OFFSET(node, 0, 6, 5) 106 104 #define XLP_IO_JTAG_OFFSET(node) XLP_HDR_OFFSET(node, 0, 6, 6) 107 105 106 + /* Flash */ 108 107 #define XLP_IO_NOR_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 0) 109 108 #define XLP_IO_NAND_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 1) 110 109 #define XLP_IO_SPI_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 2) 111 - /* SD flash */ 112 - #define XLP_IO_SD_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 3) 113 - #define XLP_IO_MMC_OFFSET(node, slot) \ 114 - ((XLP_IO_SD_OFFSET(node))+(slot*0x100)+XLP_IO_PCI_HDRSZ) 110 + #define XLP_IO_MMC_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 3) 115 111 116 112 /* Things have changed drastically in XLP 9XX */ 117 113 #define XLP9XX_HDR_OFFSET(n, d, f) \ ··· 120 120 #define XLP9XX_IO_UART_OFFSET(node) XLP9XX_HDR_OFFSET(node, 2, 2) 121 121 #define XLP9XX_IO_SYS_OFFSET(node) XLP9XX_HDR_OFFSET(node, 6, 0) 122 122 #define XLP9XX_IO_FUSE_OFFSET(node) XLP9XX_HDR_OFFSET(node, 6, 1) 123 + #define XLP9XX_IO_CLOCK_OFFSET(node) XLP9XX_HDR_OFFSET(node, 6, 2) 124 + #define XLP9XX_IO_POWER_OFFSET(node) XLP9XX_HDR_OFFSET(node, 6, 3) 123 125 #define XLP9XX_IO_JTAG_OFFSET(node) XLP9XX_HDR_OFFSET(node, 6, 4) 124 126 125 127 #define XLP9XX_IO_PCIE_OFFSET(node, i) XLP9XX_HDR_OFFSET(node, 1, i) ··· 137 135 /* XLP9XX on-chip SATA controller */ 138 136 #define XLP9XX_IO_SATA_OFFSET(node) XLP9XX_HDR_OFFSET(node, 3, 2) 139 137 138 + /* Flash */ 140 139 #define XLP9XX_IO_NOR_OFFSET(node) XLP9XX_HDR_OFFSET(node, 7, 0) 141 140 #define XLP9XX_IO_NAND_OFFSET(node) XLP9XX_HDR_OFFSET(node, 7, 1) 142 141 #define XLP9XX_IO_SPI_OFFSET(node) XLP9XX_HDR_OFFSET(node, 7, 2) 143 - /* SD flash */ 144 - #define XLP9XX_IO_MMCSD_OFFSET(node) XLP9XX_HDR_OFFSET(node, 7, 3) 142 + #define XLP9XX_IO_MMC_OFFSET(node) XLP9XX_HDR_OFFSET(node, 7, 3) 145 143 146 144 /* PCI config header register id's */ 147 145 #define XLP_PCI_CFGREG0 0x00 ··· 188 186 #define PCI_DEVICE_ID_NLM_NOR 0x1015 189 187 #define PCI_DEVICE_ID_NLM_NAND 0x1016 190 188 #define PCI_DEVICE_ID_NLM_MMC 0x1018 191 - #define PCI_DEVICE_ID_NLM_XHCI 0x101d 189 + #define PCI_DEVICE_ID_NLM_SATA 0x101A 190 + #define PCI_DEVICE_ID_NLM_XHCI 0x101D 192 191 192 + #define PCI_DEVICE_ID_XLP9XX_MMC 0x9018 193 193 #define PCI_DEVICE_ID_XLP9XX_SATA 0x901A 194 194 #define PCI_DEVICE_ID_XLP9XX_XHCI 0x901D 195 195
+14
arch/mips/include/asm/netlogic/xlp-hal/pcibus.h
··· 69 69 #define PCIE_9XX_BYTE_SWAP_IO_BASE 0x25e 70 70 #define PCIE_9XX_BYTE_SWAP_IO_LIM 0x25f 71 71 72 + #define PCIE_9XX_BRIDGE_MSIX_ADDR_BASE 0x264 73 + #define PCIE_9XX_BRIDGE_MSIX_ADDR_LIMIT 0x265 74 + #define PCIE_9XX_MSI_STATUS 0x283 75 + #define PCIE_9XX_MSI_EN 0x284 76 + /* 128 MSIX vectors available in 9xx */ 77 + #define PCIE_9XX_MSIX_STATUS0 0x286 78 + #define PCIE_9XX_MSIX_STATUSX(n) (n + 0x286) 79 + #define PCIE_9XX_MSIX_VEC 0x296 80 + #define PCIE_9XX_MSIX_VECX(n) (n + 0x296) 81 + #define PCIE_9XX_INT_STATUS0 0x397 82 + #define PCIE_9XX_INT_STATUS1 0x398 83 + #define PCIE_9XX_INT_EN0 0x399 84 + #define PCIE_9XX_INT_EN1 0x39a 85 + 72 86 /* other */ 73 87 #define PCIE_NLINKS 4 74 88
+4
arch/mips/include/asm/netlogic/xlp-hal/pic.h
··· 199 199 #define PIC_IRT_PCIE_LINK_3_INDEX 81 200 200 #define PIC_IRT_PCIE_LINK_INDEX(num) ((num) + PIC_IRT_PCIE_LINK_0_INDEX) 201 201 202 + #define PIC_9XX_IRT_PCIE_LINK_0_INDEX 191 203 + #define PIC_9XX_IRT_PCIE_LINK_INDEX(num) \ 204 + ((num) + PIC_9XX_IRT_PCIE_LINK_0_INDEX) 205 + 202 206 #define PIC_CLOCK_TIMER 7 203 207 204 208 #if !defined(LOCORE) && !defined(__ASSEMBLY__)
+35
arch/mips/include/asm/netlogic/xlp-hal/sys.h
··· 118 118 #define SYS_SCRTCH3 0x4c 119 119 120 120 /* PLL registers XLP2XX */ 121 + #define SYS_CPU_PLL_CTRL0(core) (0x1c0 + (core * 4)) 122 + #define SYS_CPU_PLL_CTRL1(core) (0x1c1 + (core * 4)) 123 + #define SYS_CPU_PLL_CTRL2(core) (0x1c2 + (core * 4)) 124 + #define SYS_CPU_PLL_CTRL3(core) (0x1c3 + (core * 4)) 121 125 #define SYS_PLL_CTRL0 0x240 122 126 #define SYS_PLL_CTRL1 0x241 123 127 #define SYS_PLL_CTRL2 0x242 ··· 151 147 #define SYS_SYS_PLL_MEM_REQ 0x2a3 152 148 #define SYS_PLL_MEM_STAT 0x2a4 153 149 150 + /* PLL registers XLP9XX */ 151 + #define SYS_9XX_CPU_PLL_CTRL0(core) (0xc0 + (core * 4)) 152 + #define SYS_9XX_CPU_PLL_CTRL1(core) (0xc1 + (core * 4)) 153 + #define SYS_9XX_CPU_PLL_CTRL2(core) (0xc2 + (core * 4)) 154 + #define SYS_9XX_CPU_PLL_CTRL3(core) (0xc3 + (core * 4)) 155 + #define SYS_9XX_DMC_PLL_CTRL0 0x140 156 + #define SYS_9XX_DMC_PLL_CTRL1 0x141 157 + #define SYS_9XX_DMC_PLL_CTRL2 0x142 158 + #define SYS_9XX_DMC_PLL_CTRL3 0x143 159 + #define SYS_9XX_PLL_CTRL0 0x144 160 + #define SYS_9XX_PLL_CTRL1 0x145 161 + #define SYS_9XX_PLL_CTRL2 0x146 162 + #define SYS_9XX_PLL_CTRL3 0x147 163 + 164 + #define SYS_9XX_PLL_CTRL0_DEVX(x) (0x148 + (x) * 4) 165 + #define SYS_9XX_PLL_CTRL1_DEVX(x) (0x149 + (x) * 4) 166 + #define SYS_9XX_PLL_CTRL2_DEVX(x) (0x14a + (x) * 4) 167 + #define SYS_9XX_PLL_CTRL3_DEVX(x) (0x14b + (x) * 4) 168 + 169 + #define SYS_9XX_CPU_PLL_CHG_CTRL 0x188 170 + #define SYS_9XX_PLL_CHG_CTRL 0x189 171 + #define SYS_9XX_CLK_DEV_DIS 0x18a 172 + #define SYS_9XX_CLK_DEV_SEL 0x18b 173 + #define SYS_9XX_CLK_DEV_DIV 0x18d 174 + #define SYS_9XX_CLK_DEV_CHG 0x18f 175 + 154 176 /* Registers changed on 9XX */ 155 177 #define SYS_9XX_POWER_ON_RESET_CFG 0x00 156 178 #define SYS_9XX_CHIP_RESET 0x01 ··· 199 169 nlm_pcicfg_base(XLP9XX_IO_FUSE_OFFSET(node)) 200 170 #define nlm_get_fuse_regbase(node) \ 201 171 (nlm_get_fuse_pcibase(node) + XLP_IO_PCI_HDRSZ) 172 + 173 + #define nlm_get_clock_pcibase(node) \ 174 + nlm_pcicfg_base(XLP9XX_IO_CLOCK_OFFSET(node)) 175 + #define nlm_get_clock_regbase(node) \ 176 + (nlm_get_clock_pcibase(node) + XLP_IO_PCI_HDRSZ) 202 177 203 178 unsigned int nlm_get_pic_frequency(int node); 204 179 #endif
+13 -6
arch/mips/include/asm/netlogic/xlp-hal/xlp.h
··· 58 58 #define PIC_I2C_1_IRQ 31 59 59 #define PIC_I2C_2_IRQ 32 60 60 #define PIC_I2C_3_IRQ 33 61 + #define PIC_SPI_IRQ 34 62 + #define PIC_NAND_IRQ 37 63 + #define PIC_SATA_IRQ 38 64 + #define PIC_GPIO_IRQ 39 61 65 62 66 #define PIC_PCIE_LINK_MSI_IRQ_BASE 44 /* 44 - 47 MSI IRQ */ 63 67 #define PIC_PCIE_LINK_MSI_IRQ(i) (44 + (i)) ··· 70 66 #define PIC_PCIE_MSIX_IRQ_BASE 48 /* 48 - 51 MSI-X IRQ */ 71 67 #define PIC_PCIE_MSIX_IRQ(i) (48 + (i)) 72 68 73 - #define NLM_MSIX_VEC_BASE 96 /* 96 - 127 - MSIX mapped */ 74 - #define NLM_MSI_VEC_BASE 128 /* 128 -255 - MSI mapped */ 69 + /* XLP9xx and XLP8xx has 128 and 32 MSIX vectors respectively */ 70 + #define NLM_MSIX_VEC_BASE 96 /* 96 - 223 - MSIX mapped */ 71 + #define NLM_MSI_VEC_BASE 224 /* 224 -351 - MSI mapped */ 75 72 76 73 #define NLM_PIC_INDIRECT_VEC_BASE 512 77 74 #define NLM_GPIO_VEC_BASE 768 ··· 100 95 101 96 static inline int cpu_is_xlpii(void) 102 97 { 103 - int chip = read_c0_prid() & 0xff00; 98 + int chip = read_c0_prid() & PRID_IMP_MASK; 104 99 105 100 return chip == PRID_IMP_NETLOGIC_XLP2XX || 106 - chip == PRID_IMP_NETLOGIC_XLP9XX; 101 + chip == PRID_IMP_NETLOGIC_XLP9XX || 102 + chip == PRID_IMP_NETLOGIC_XLP5XX; 107 103 } 108 104 109 105 static inline int cpu_is_xlp9xx(void) 110 106 { 111 - int chip = read_c0_prid() & 0xff00; 107 + int chip = read_c0_prid() & PRID_IMP_MASK; 112 108 113 - return chip == PRID_IMP_NETLOGIC_XLP9XX; 109 + return chip == PRID_IMP_NETLOGIC_XLP9XX || 110 + chip == PRID_IMP_NETLOGIC_XLP5XX; 114 111 } 115 112 #endif /* !__ASSEMBLY__ */ 116 113 #endif /* _ASM_NLM_XLP_H */
+1 -1
arch/mips/include/asm/nile4.h
··· 1 1 /* 2 2 * asm-mips/nile4.h -- NEC Vrc-5074 Nile 4 definitions 3 3 * 4 - * Copyright (C) 2000 Geert Uytterhoeven <geert@sonycom.com> 4 + * Copyright (C) 2000 Geert Uytterhoeven <geert@linux-m68k.org> 5 5 * Sony Software Development Center Europe (SDCE), Brussels 6 6 * 7 7 * This file is based on the following documentation:
-1
arch/mips/include/asm/octeon/octeon.h
··· 211 211 212 212 extern void octeon_write_lcd(const char *s); 213 213 extern void octeon_check_cpu_bist(void); 214 - extern int octeon_get_boot_debug_flag(void); 215 214 extern int octeon_get_boot_uart(void); 216 215 217 216 struct uart_port;
+2
arch/mips/include/asm/pgtable.h
··· 32 32 _page_cachable_default) 33 33 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ 34 34 _PAGE_GLOBAL | _page_cachable_default) 35 + #define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ 36 + _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT) 35 37 #define PAGE_USERIO __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | _PAGE_WRITE | \ 36 38 _page_cachable_default) 37 39 #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
+51
arch/mips/include/asm/pm-cps.h
··· 1 + /* 2 + * Copyright (C) 2014 Imagination Technologies 3 + * Author: Paul Burton <paul.burton@imgtec.com> 4 + * 5 + * This program is free software; you can redistribute it and/or modify it 6 + * under the terms of the GNU General Public License as published by the 7 + * Free Software Foundation; either version 2 of the License, or (at your 8 + * option) any later version. 9 + */ 10 + 11 + #ifndef __MIPS_ASM_PM_CPS_H__ 12 + #define __MIPS_ASM_PM_CPS_H__ 13 + 14 + /* 15 + * The CM & CPC can only handle coherence & power control on a per-core basis, 16 + * thus in an MT system the VPEs within each core are coupled and can only 17 + * enter or exit states requiring CM or CPC assistance in unison. 18 + */ 19 + #ifdef CONFIG_MIPS_MT 20 + # define coupled_coherence cpu_has_mipsmt 21 + #else 22 + # define coupled_coherence 0 23 + #endif 24 + 25 + /* Enumeration of possible PM states */ 26 + enum cps_pm_state { 27 + CPS_PM_NC_WAIT, /* MIPS wait instruction, non-coherent */ 28 + CPS_PM_CLOCK_GATED, /* Core clock gated */ 29 + CPS_PM_POWER_GATED, /* Core power gated */ 30 + CPS_PM_STATE_COUNT, 31 + }; 32 + 33 + /** 34 + * cps_pm_support_state - determine whether the system supports a PM state 35 + * @state: the state to test for support 36 + * 37 + * Returns true if the system supports the given state, otherwise false. 38 + */ 39 + extern bool cps_pm_support_state(enum cps_pm_state state); 40 + 41 + /** 42 + * cps_pm_enter_state - enter a PM state 43 + * @state: the state to enter 44 + * 45 + * Enter the given PM state. If coupled_coherence is non-zero then it is 46 + * expected that this function be called at approximately the same time on 47 + * each coupled CPU. Returns 0 on successful entry & exit, otherwise -errno. 48 + */ 49 + extern int cps_pm_enter_state(enum cps_pm_state state); 50 + 51 + #endif /* __MIPS_ASM_PM_CPS_H__ */
+159
arch/mips/include/asm/pm.h
··· 1 + /* 2 + * Copyright (C) 2014 Imagination Technologies Ltd 3 + * 4 + * This program is free software; you can redistribute it and/or modify it 5 + * under the terms of the GNU General Public License as published by the 6 + * Free Software Foundation; either version 2 of the License, or (at your 7 + * option) any later version. 8 + * 9 + * PM helper macros for CPU power off (e.g. Suspend-to-RAM). 10 + */ 11 + 12 + #ifndef __ASM_PM_H 13 + #define __ASM_PM_H 14 + 15 + #ifdef __ASSEMBLY__ 16 + 17 + #include <asm/asm-offsets.h> 18 + #include <asm/asm.h> 19 + #include <asm/mipsregs.h> 20 + #include <asm/regdef.h> 21 + 22 + /* Save CPU state to stack for suspend to RAM */ 23 + .macro SUSPEND_SAVE_REGS 24 + subu sp, PT_SIZE 25 + /* Call preserved GPRs */ 26 + LONG_S $16, PT_R16(sp) 27 + LONG_S $17, PT_R17(sp) 28 + LONG_S $18, PT_R18(sp) 29 + LONG_S $19, PT_R19(sp) 30 + LONG_S $20, PT_R20(sp) 31 + LONG_S $21, PT_R21(sp) 32 + LONG_S $22, PT_R22(sp) 33 + LONG_S $23, PT_R23(sp) 34 + LONG_S $28, PT_R28(sp) 35 + LONG_S $30, PT_R30(sp) 36 + LONG_S $31, PT_R31(sp) 37 + /* A couple of CP0 registers with space in pt_regs */ 38 + mfc0 k0, CP0_STATUS 39 + LONG_S k0, PT_STATUS(sp) 40 + .endm 41 + 42 + /* Restore CPU state from stack after resume from RAM */ 43 + .macro RESUME_RESTORE_REGS_RETURN 44 + .set push 45 + .set noreorder 46 + /* A couple of CP0 registers with space in pt_regs */ 47 + LONG_L k0, PT_STATUS(sp) 48 + mtc0 k0, CP0_STATUS 49 + /* Call preserved GPRs */ 50 + LONG_L $16, PT_R16(sp) 51 + LONG_L $17, PT_R17(sp) 52 + LONG_L $18, PT_R18(sp) 53 + LONG_L $19, PT_R19(sp) 54 + LONG_L $20, PT_R20(sp) 55 + LONG_L $21, PT_R21(sp) 56 + LONG_L $22, PT_R22(sp) 57 + LONG_L $23, PT_R23(sp) 58 + LONG_L $28, PT_R28(sp) 59 + LONG_L $30, PT_R30(sp) 60 + LONG_L $31, PT_R31(sp) 61 + /* Pop and return */ 62 + jr ra 63 + addiu sp, PT_SIZE 64 + .set pop 65 + .endm 66 + 67 + /* Get address of static suspend state into t1 */ 68 + .macro LA_STATIC_SUSPEND 69 + la t1, mips_static_suspend_state 70 + .endm 71 + 72 + /* Save important CPU state for early restoration to global data */ 73 + .macro SUSPEND_SAVE_STATIC 74 + #ifdef CONFIG_EVA 75 + /* 76 + * Segment configuration is saved in global data where it can be easily 77 + * reloaded without depending on the segment configuration. 78 + */ 79 + mfc0 k0, CP0_PAGEMASK, 2 /* SegCtl0 */ 80 + LONG_S k0, SSS_SEGCTL0(t1) 81 + mfc0 k0, CP0_PAGEMASK, 3 /* SegCtl1 */ 82 + LONG_S k0, SSS_SEGCTL1(t1) 83 + mfc0 k0, CP0_PAGEMASK, 4 /* SegCtl2 */ 84 + LONG_S k0, SSS_SEGCTL2(t1) 85 + #endif 86 + /* save stack pointer (pointing to GPRs) */ 87 + LONG_S sp, SSS_SP(t1) 88 + .endm 89 + 90 + /* Restore important CPU state early from global data */ 91 + .macro RESUME_RESTORE_STATIC 92 + #ifdef CONFIG_EVA 93 + /* 94 + * Segment configuration must be restored prior to any access to 95 + * allocated memory, as it may reside outside of the legacy kernel 96 + * segments. 97 + */ 98 + LONG_L k0, SSS_SEGCTL0(t1) 99 + mtc0 k0, CP0_PAGEMASK, 2 /* SegCtl0 */ 100 + LONG_L k0, SSS_SEGCTL1(t1) 101 + mtc0 k0, CP0_PAGEMASK, 3 /* SegCtl1 */ 102 + LONG_L k0, SSS_SEGCTL2(t1) 103 + mtc0 k0, CP0_PAGEMASK, 4 /* SegCtl2 */ 104 + tlbw_use_hazard 105 + #endif 106 + /* restore stack pointer (pointing to GPRs) */ 107 + LONG_L sp, SSS_SP(t1) 108 + .endm 109 + 110 + /* flush caches to make sure context has reached memory */ 111 + .macro SUSPEND_CACHE_FLUSH 112 + .extern __wback_cache_all 113 + .set push 114 + .set noreorder 115 + la t1, __wback_cache_all 116 + LONG_L t0, 0(t1) 117 + jalr t0 118 + nop 119 + .set pop 120 + .endm 121 + 122 + /* Save suspend state and flush data caches to RAM */ 123 + .macro SUSPEND_SAVE 124 + SUSPEND_SAVE_REGS 125 + LA_STATIC_SUSPEND 126 + SUSPEND_SAVE_STATIC 127 + SUSPEND_CACHE_FLUSH 128 + .endm 129 + 130 + /* Restore saved state after resume from RAM and return */ 131 + .macro RESUME_RESTORE_RETURN 132 + LA_STATIC_SUSPEND 133 + RESUME_RESTORE_STATIC 134 + RESUME_RESTORE_REGS_RETURN 135 + .endm 136 + 137 + #else /* __ASSEMBLY__ */ 138 + 139 + /** 140 + * struct mips_static_suspend_state - Core saved CPU state across S2R. 141 + * @segctl: CP0 Segment control registers. 142 + * @sp: Stack frame where GP register context is saved. 143 + * 144 + * This structure contains minimal CPU state that must be saved in static kernel 145 + * data in order to be able to restore the rest of the state. This includes 146 + * segmentation configuration in the case of EVA being enabled, as they must be 147 + * restored prior to any kmalloc'd memory being referenced (even the stack 148 + * pointer). 149 + */ 150 + struct mips_static_suspend_state { 151 + #ifdef CONFIG_EVA 152 + unsigned long segctl[3]; 153 + #endif 154 + unsigned long sp; 155 + }; 156 + 157 + #endif /* !__ASSEMBLY__ */ 158 + 159 + #endif /* __ASM_PM_HELPERS_H */
-3
arch/mips/include/asm/ptrace.h
··· 39 39 unsigned long cp0_badvaddr; 40 40 unsigned long cp0_cause; 41 41 unsigned long cp0_epc; 42 - #ifdef CONFIG_MIPS_MT_SMTC 43 - unsigned long cp0_tcstatus; 44 - #endif /* CONFIG_MIPS_MT_SMTC */ 45 42 #ifdef CONFIG_CPU_CAVIUM_OCTEON 46 43 unsigned long long mpl[3]; /* MTM{0,1,2} */ 47 44 unsigned long long mtp[3]; /* MTP{0,1,2} */
+5 -4
arch/mips/include/asm/r4kcache.h
··· 43 43 : "i" (op), "R" (*(unsigned char *)(addr))) 44 44 45 45 #ifdef CONFIG_MIPS_MT 46 - /* 47 - * Temporary hacks for SMTC debug. Optionally force single-threaded 48 - * execution during I-cache flushes. 49 - */ 50 46 47 + /* 48 + * Optionally force single-threaded execution during I-cache flushes. 49 + */ 51 50 #define PROTECT_CACHE_FLUSHES 1 52 51 53 52 #ifdef PROTECT_CACHE_FLUSHES ··· 523 524 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, ) 524 525 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, ) 525 526 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, ) 527 + __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, ) 528 + __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, ) 526 529 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, ) 527 530 528 531 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
+2
arch/mips/include/asm/sgi/ip22.h
··· 69 69 #define SGI_EISA_IRQ SGINT_LOCAL2 + 3 /* EISA interrupts */ 70 70 #define SGI_KEYBD_IRQ SGINT_LOCAL2 + 4 /* keyboard */ 71 71 #define SGI_SERIAL_IRQ SGINT_LOCAL2 + 5 /* onboard serial */ 72 + #define SGI_GIOEXP0_IRQ (SGINT_LOCAL2 + 6) /* Indy GIO EXP0 */ 73 + #define SGI_GIOEXP1_IRQ (SGINT_LOCAL2 + 7) /* Indy GIO EXP1 */ 72 74 73 75 #define ip22_is_fullhouse() (sgioc->sysid & SGIOC_SYSID_FULLHOUSE) 74 76
+15 -4
arch/mips/include/asm/smp-cps.h
··· 13 13 14 14 #ifndef __ASSEMBLY__ 15 15 16 - struct boot_config { 17 - unsigned int core; 18 - unsigned int vpe; 16 + struct vpe_boot_config { 19 17 unsigned long pc; 20 18 unsigned long sp; 21 19 unsigned long gp; 22 20 }; 23 21 24 - extern struct boot_config mips_cps_bootcfg; 22 + struct core_boot_config { 23 + atomic_t vpe_mask; 24 + struct vpe_boot_config *vpe_config; 25 + }; 26 + 27 + extern struct core_boot_config *mips_cps_core_bootcfg; 25 28 26 29 extern void mips_cps_core_entry(void); 30 + extern void mips_cps_core_init(void); 31 + 32 + extern struct vpe_boot_config *mips_cps_boot_vpes(void); 33 + 34 + extern bool mips_cps_smp_in_use(void); 35 + 36 + extern void mips_cps_pm_save(void); 37 + extern void mips_cps_pm_restore(void); 27 38 28 39 #else /* __ASSEMBLY__ */ 29 40
-1
arch/mips/include/asm/smp-ops.h
··· 26 26 void (*send_ipi_mask)(const struct cpumask *mask, unsigned int action); 27 27 void (*init_secondary)(void); 28 28 void (*smp_finish)(void); 29 - void (*cpus_done)(void); 30 29 void (*boot_secondary)(int cpu, struct task_struct *idle); 31 30 void (*smp_setup)(void); 32 31 void (*prepare_cpus)(unsigned int max_cpus);
+3
arch/mips/include/asm/smp.h
··· 46 46 47 47 extern volatile cpumask_t cpu_callin_map; 48 48 49 + /* Mask of CPUs which are currently definitely operating coherently */ 50 + extern cpumask_t cpu_coherent_mask; 51 + 49 52 extern void asmlinkage smp_bootstrap(void); 50 53 51 54 /*
-78
arch/mips/include/asm/smtc.h
··· 1 - #ifndef _ASM_SMTC_MT_H 2 - #define _ASM_SMTC_MT_H 3 - 4 - /* 5 - * Definitions for SMTC multitasking on MIPS MT cores 6 - */ 7 - 8 - #include <asm/mips_mt.h> 9 - #include <asm/smtc_ipi.h> 10 - 11 - /* 12 - * System-wide SMTC status information 13 - */ 14 - 15 - extern unsigned int smtc_status; 16 - 17 - #define SMTC_TLB_SHARED 0x00000001 18 - #define SMTC_MTC_ACTIVE 0x00000002 19 - 20 - /* 21 - * TLB/ASID Management information 22 - */ 23 - 24 - #define MAX_SMTC_TLBS 2 25 - #define MAX_SMTC_ASIDS 256 26 - #if NR_CPUS <= 8 27 - typedef char asiduse; 28 - #else 29 - #if NR_CPUS <= 16 30 - typedef short asiduse; 31 - #else 32 - typedef long asiduse; 33 - #endif 34 - #endif 35 - 36 - /* 37 - * VPE Management information 38 - */ 39 - 40 - #define MAX_SMTC_VPES MAX_SMTC_TLBS /* FIXME: May not always be true. */ 41 - 42 - extern asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; 43 - 44 - struct mm_struct; 45 - struct task_struct; 46 - 47 - void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu); 48 - void self_ipi(struct smtc_ipi *); 49 - void smtc_flush_tlb_asid(unsigned long asid); 50 - extern int smtc_build_cpu_map(int startslot); 51 - extern void smtc_prepare_cpus(int cpus); 52 - extern void smtc_smp_finish(void); 53 - extern void smtc_boot_secondary(int cpu, struct task_struct *t); 54 - extern void smtc_cpus_done(void); 55 - extern void smtc_init_secondary(void); 56 - 57 - 58 - /* 59 - * Sharing the TLB between multiple VPEs means that the 60 - * "random" index selection function is not allowed to 61 - * select the current value of the Index register. To 62 - * avoid additional TLB pressure, the Index registers 63 - * are "parked" with an non-Valid value. 64 - */ 65 - 66 - #define PARKED_INDEX ((unsigned int)0x80000000) 67 - 68 - /* 69 - * Define low-level interrupt mask for IPIs, if necessary. 70 - * By default, use SW interrupt 1, which requires no external 71 - * hardware support, but which works only for single-core 72 - * MIPS MT systems. 73 - */ 74 - #ifndef MIPS_CPU_IPI_IRQ 75 - #define MIPS_CPU_IPI_IRQ 1 76 - #endif 77 - 78 - #endif /* _ASM_SMTC_MT_H */
-129
arch/mips/include/asm/smtc_ipi.h
··· 1 - /* 2 - * Definitions used in MIPS MT SMTC "Interprocessor Interrupt" code. 3 - */ 4 - #ifndef __ASM_SMTC_IPI_H 5 - #define __ASM_SMTC_IPI_H 6 - 7 - #include <linux/spinlock.h> 8 - 9 - //#define SMTC_IPI_DEBUG 10 - 11 - #ifdef SMTC_IPI_DEBUG 12 - #include <asm/mipsregs.h> 13 - #include <asm/mipsmtregs.h> 14 - #endif /* SMTC_IPI_DEBUG */ 15 - 16 - /* 17 - * An IPI "message" 18 - */ 19 - 20 - struct smtc_ipi { 21 - struct smtc_ipi *flink; 22 - int type; 23 - void *arg; 24 - int dest; 25 - #ifdef SMTC_IPI_DEBUG 26 - int sender; 27 - long stamp; 28 - #endif /* SMTC_IPI_DEBUG */ 29 - }; 30 - 31 - /* 32 - * Defined IPI Types 33 - */ 34 - 35 - #define LINUX_SMP_IPI 1 36 - #define SMTC_CLOCK_TICK 2 37 - #define IRQ_AFFINITY_IPI 3 38 - 39 - /* 40 - * A queue of IPI messages 41 - */ 42 - 43 - struct smtc_ipi_q { 44 - struct smtc_ipi *head; 45 - spinlock_t lock; 46 - struct smtc_ipi *tail; 47 - int depth; 48 - int resched_flag; /* reschedule already queued */ 49 - }; 50 - 51 - static inline void smtc_ipi_nq(struct smtc_ipi_q *q, struct smtc_ipi *p) 52 - { 53 - unsigned long flags; 54 - 55 - spin_lock_irqsave(&q->lock, flags); 56 - if (q->head == NULL) 57 - q->head = q->tail = p; 58 - else 59 - q->tail->flink = p; 60 - p->flink = NULL; 61 - q->tail = p; 62 - q->depth++; 63 - #ifdef SMTC_IPI_DEBUG 64 - p->sender = read_c0_tcbind(); 65 - p->stamp = read_c0_count(); 66 - #endif /* SMTC_IPI_DEBUG */ 67 - spin_unlock_irqrestore(&q->lock, flags); 68 - } 69 - 70 - static inline struct smtc_ipi *__smtc_ipi_dq(struct smtc_ipi_q *q) 71 - { 72 - struct smtc_ipi *p; 73 - 74 - if (q->head == NULL) 75 - p = NULL; 76 - else { 77 - p = q->head; 78 - q->head = q->head->flink; 79 - q->depth--; 80 - /* Arguably unnecessary, but leaves queue cleaner */ 81 - if (q->head == NULL) 82 - q->tail = NULL; 83 - } 84 - 85 - return p; 86 - } 87 - 88 - static inline struct smtc_ipi *smtc_ipi_dq(struct smtc_ipi_q *q) 89 - { 90 - unsigned long flags; 91 - struct smtc_ipi *p; 92 - 93 - spin_lock_irqsave(&q->lock, flags); 94 - p = __smtc_ipi_dq(q); 95 - spin_unlock_irqrestore(&q->lock, flags); 96 - 97 - return p; 98 - } 99 - 100 - static inline void smtc_ipi_req(struct smtc_ipi_q *q, struct smtc_ipi *p) 101 - { 102 - unsigned long flags; 103 - 104 - spin_lock_irqsave(&q->lock, flags); 105 - if (q->head == NULL) { 106 - q->head = q->tail = p; 107 - p->flink = NULL; 108 - } else { 109 - p->flink = q->head; 110 - q->head = p; 111 - } 112 - q->depth++; 113 - spin_unlock_irqrestore(&q->lock, flags); 114 - } 115 - 116 - static inline int smtc_ipi_qdepth(struct smtc_ipi_q *q) 117 - { 118 - unsigned long flags; 119 - int retval; 120 - 121 - spin_lock_irqsave(&q->lock, flags); 122 - retval = q->depth; 123 - spin_unlock_irqrestore(&q->lock, flags); 124 - return retval; 125 - } 126 - 127 - extern void smtc_send_ipi(int cpu, int type, unsigned int action); 128 - 129 - #endif /* __ASM_SMTC_IPI_H */
-23
arch/mips/include/asm/smtc_proc.h
··· 1 - /* 2 - * Definitions for SMTC /proc entries 3 - * Copyright(C) 2005 MIPS Technologies Inc. 4 - */ 5 - #ifndef __ASM_SMTC_PROC_H 6 - #define __ASM_SMTC_PROC_H 7 - 8 - /* 9 - * per-"CPU" statistics 10 - */ 11 - 12 - struct smtc_cpu_proc { 13 - unsigned long timerints; 14 - unsigned long selfipis; 15 - }; 16 - 17 - extern struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS]; 18 - 19 - /* Count of number of recoveries of "stolen" FPU access rights on 34K */ 20 - 21 - extern atomic_t smtc_fpu_recoveries; 22 - 23 - #endif /* __ASM_SMTC_PROC_H */
+1 -195
arch/mips/include/asm/stackframe.h
··· 19 19 #include <asm/asm-offsets.h> 20 20 #include <asm/thread_info.h> 21 21 22 - /* 23 - * For SMTC kernel, global IE should be left set, and interrupts 24 - * controlled exclusively via IXMT. 25 - */ 26 - #ifdef CONFIG_MIPS_MT_SMTC 27 - #define STATMASK 0x1e 28 - #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 22 + #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 29 23 #define STATMASK 0x3f 30 24 #else 31 25 #define STATMASK 0x1f 32 26 #endif 33 - 34 - #ifdef CONFIG_MIPS_MT_SMTC 35 - #include <asm/mipsmtregs.h> 36 - #endif /* CONFIG_MIPS_MT_SMTC */ 37 27 38 28 .macro SAVE_AT 39 29 .set push ··· 176 186 mfc0 v1, CP0_STATUS 177 187 LONG_S $2, PT_R2(sp) 178 188 LONG_S v1, PT_STATUS(sp) 179 - #ifdef CONFIG_MIPS_MT_SMTC 180 - /* 181 - * Ideally, these instructions would be shuffled in 182 - * to cover the pipeline delay. 183 - */ 184 - .set mips32 185 - mfc0 k0, CP0_TCSTATUS 186 - .set mips0 187 - LONG_S k0, PT_TCSTATUS(sp) 188 - #endif /* CONFIG_MIPS_MT_SMTC */ 189 189 LONG_S $4, PT_R4(sp) 190 190 mfc0 v1, CP0_CAUSE 191 191 LONG_S $5, PT_R5(sp) ··· 301 321 .set push 302 322 .set reorder 303 323 .set noat 304 - #ifdef CONFIG_MIPS_MT_SMTC 305 - .set mips32r2 306 - /* 307 - * We need to make sure the read-modify-write 308 - * of Status below isn't perturbed by an interrupt 309 - * or cross-TC access, so we need to do at least a DMT, 310 - * protected by an interrupt-inhibit. But setting IXMT 311 - * also creates a few-cycle window where an IPI could 312 - * be queued and not be detected before potentially 313 - * returning to a WAIT or user-mode loop. It must be 314 - * replayed. 315 - * 316 - * We're in the middle of a context switch, and 317 - * we can't dispatch it directly without trashing 318 - * some registers, so we'll try to detect this unlikely 319 - * case and program a software interrupt in the VPE, 320 - * as would be done for a cross-VPE IPI. To accommodate 321 - * the handling of that case, we're doing a DVPE instead 322 - * of just a DMT here to protect against other threads. 323 - * This is a lot of cruft to cover a tiny window. 324 - * If you can find a better design, implement it! 325 - * 326 - */ 327 - mfc0 v0, CP0_TCSTATUS 328 - ori v0, TCSTATUS_IXMT 329 - mtc0 v0, CP0_TCSTATUS 330 - _ehb 331 - DVPE 5 # dvpe a1 332 - jal mips_ihb 333 - #endif /* CONFIG_MIPS_MT_SMTC */ 334 324 mfc0 a0, CP0_STATUS 335 325 ori a0, STATMASK 336 326 xori a0, STATMASK ··· 312 362 and v0, v1 313 363 or v0, a0 314 364 mtc0 v0, CP0_STATUS 315 - #ifdef CONFIG_MIPS_MT_SMTC 316 - /* 317 - * Only after EXL/ERL have been restored to status can we 318 - * restore TCStatus.IXMT. 319 - */ 320 - LONG_L v1, PT_TCSTATUS(sp) 321 - _ehb 322 - mfc0 a0, CP0_TCSTATUS 323 - andi v1, TCSTATUS_IXMT 324 - bnez v1, 0f 325 - 326 - /* 327 - * We'd like to detect any IPIs queued in the tiny window 328 - * above and request an software interrupt to service them 329 - * when we ERET. 330 - * 331 - * Computing the offset into the IPIQ array of the executing 332 - * TC's IPI queue in-line would be tedious. We use part of 333 - * the TCContext register to hold 16 bits of offset that we 334 - * can add in-line to find the queue head. 335 - */ 336 - mfc0 v0, CP0_TCCONTEXT 337 - la a2, IPIQ 338 - srl v0, v0, 16 339 - addu a2, a2, v0 340 - LONG_L v0, 0(a2) 341 - beqz v0, 0f 342 - /* 343 - * If we have a queue, provoke dispatch within the VPE by setting C_SW1 344 - */ 345 - mfc0 v0, CP0_CAUSE 346 - ori v0, v0, C_SW1 347 - mtc0 v0, CP0_CAUSE 348 - 0: 349 - /* 350 - * This test should really never branch but 351 - * let's be prudent here. Having atomized 352 - * the shared register modifications, we can 353 - * now EVPE, and must do so before interrupts 354 - * are potentially re-enabled. 355 - */ 356 - andi a1, a1, MVPCONTROL_EVP 357 - beqz a1, 1f 358 - evpe 359 - 1: 360 - /* We know that TCStatua.IXMT should be set from above */ 361 - xori a0, a0, TCSTATUS_IXMT 362 - or a0, a0, v1 363 - mtc0 a0, CP0_TCSTATUS 364 - _ehb 365 - 366 - .set mips0 367 - #endif /* CONFIG_MIPS_MT_SMTC */ 368 365 LONG_L v1, PT_EPC(sp) 369 366 MTC0 v1, CP0_EPC 370 367 LONG_L $31, PT_R31(sp) ··· 364 467 * Set cp0 enable bit as sign that we're running on the kernel stack 365 468 */ 366 469 .macro CLI 367 - #if !defined(CONFIG_MIPS_MT_SMTC) 368 470 mfc0 t0, CP0_STATUS 369 471 li t1, ST0_CU0 | STATMASK 370 472 or t0, t1 371 473 xori t0, STATMASK 372 474 mtc0 t0, CP0_STATUS 373 - #else /* CONFIG_MIPS_MT_SMTC */ 374 - /* 375 - * For SMTC, we need to set privilege 376 - * and disable interrupts only for the 377 - * current TC, using the TCStatus register. 378 - */ 379 - mfc0 t0, CP0_TCSTATUS 380 - /* Fortunately CU 0 is in the same place in both registers */ 381 - /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */ 382 - li t1, ST0_CU0 | 0x08001c00 383 - or t0, t1 384 - /* Clear TKSU, leave IXMT */ 385 - xori t0, 0x00001800 386 - mtc0 t0, CP0_TCSTATUS 387 - _ehb 388 - /* We need to leave the global IE bit set, but clear EXL...*/ 389 - mfc0 t0, CP0_STATUS 390 - ori t0, ST0_EXL | ST0_ERL 391 - xori t0, ST0_EXL | ST0_ERL 392 - mtc0 t0, CP0_STATUS 393 - #endif /* CONFIG_MIPS_MT_SMTC */ 394 475 irq_disable_hazard 395 476 .endm 396 477 ··· 377 502 * Set cp0 enable bit as sign that we're running on the kernel stack 378 503 */ 379 504 .macro STI 380 - #if !defined(CONFIG_MIPS_MT_SMTC) 381 505 mfc0 t0, CP0_STATUS 382 506 li t1, ST0_CU0 | STATMASK 383 507 or t0, t1 384 508 xori t0, STATMASK & ~1 385 509 mtc0 t0, CP0_STATUS 386 - #else /* CONFIG_MIPS_MT_SMTC */ 387 - /* 388 - * For SMTC, we need to set privilege 389 - * and enable interrupts only for the 390 - * current TC, using the TCStatus register. 391 - */ 392 - _ehb 393 - mfc0 t0, CP0_TCSTATUS 394 - /* Fortunately CU 0 is in the same place in both registers */ 395 - /* Set TCU0, TKSU (for later inversion) and IXMT */ 396 - li t1, ST0_CU0 | 0x08001c00 397 - or t0, t1 398 - /* Clear TKSU *and* IXMT */ 399 - xori t0, 0x00001c00 400 - mtc0 t0, CP0_TCSTATUS 401 - _ehb 402 - /* We need to leave the global IE bit set, but clear EXL...*/ 403 - mfc0 t0, CP0_STATUS 404 - ori t0, ST0_EXL 405 - xori t0, ST0_EXL 406 - mtc0 t0, CP0_STATUS 407 - /* irq_enable_hazard below should expand to EHB for 24K/34K cpus */ 408 - #endif /* CONFIG_MIPS_MT_SMTC */ 409 510 irq_enable_hazard 410 511 .endm 411 512 ··· 391 540 * Set cp0 enable bit as sign that we're running on the kernel stack 392 541 */ 393 542 .macro KMODE 394 - #ifdef CONFIG_MIPS_MT_SMTC 395 - /* 396 - * This gets baroque in SMTC. We want to 397 - * protect the non-atomic clearing of EXL 398 - * with DMT/EMT, but we don't want to take 399 - * an interrupt while DMT is still in effect. 400 - */ 401 - 402 - /* KMODE gets invoked from both reorder and noreorder code */ 403 - .set push 404 - .set mips32r2 405 - .set noreorder 406 - mfc0 v0, CP0_TCSTATUS 407 - andi v1, v0, TCSTATUS_IXMT 408 - ori v0, TCSTATUS_IXMT 409 - mtc0 v0, CP0_TCSTATUS 410 - _ehb 411 - DMT 2 # dmt v0 412 - /* 413 - * We don't know a priori if ra is "live" 414 - */ 415 - move t0, ra 416 - jal mips_ihb 417 - nop /* delay slot */ 418 - move ra, t0 419 - #endif /* CONFIG_MIPS_MT_SMTC */ 420 543 mfc0 t0, CP0_STATUS 421 544 li t1, ST0_CU0 | (STATMASK & ~1) 422 545 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) ··· 401 576 or t0, t1 402 577 xori t0, STATMASK & ~1 403 578 mtc0 t0, CP0_STATUS 404 - #ifdef CONFIG_MIPS_MT_SMTC 405 - _ehb 406 - andi v0, v0, VPECONTROL_TE 407 - beqz v0, 2f 408 - nop /* delay slot */ 409 - emt 410 - 2: 411 - mfc0 v0, CP0_TCSTATUS 412 - /* Clear IXMT, then OR in previous value */ 413 - ori v0, TCSTATUS_IXMT 414 - xori v0, TCSTATUS_IXMT 415 - or v0, v1, v0 416 - mtc0 v0, CP0_TCSTATUS 417 - /* 418 - * irq_disable_hazard below should expand to EHB 419 - * on 24K/34K CPUS 420 - */ 421 - .set pop 422 - #endif /* CONFIG_MIPS_MT_SMTC */ 423 579 irq_disable_hazard 424 580 .endm 425 581
+1 -10
arch/mips/include/asm/thread_info.h
··· 159 159 * We stash processor id into a COP0 register to retrieve it fast 160 160 * at kernel exception entry. 161 161 */ 162 - #if defined(CONFIG_MIPS_MT_SMTC) 163 - #define SMP_CPUID_REG 2, 2 /* TCBIND */ 164 - #define ASM_SMP_CPUID_REG $2, 2 165 - #define SMP_CPUID_PTRSHIFT 19 166 - #elif defined(CONFIG_MIPS_PGD_C0_CONTEXT) 162 + #if defined(CONFIG_MIPS_PGD_C0_CONTEXT) 167 163 #define SMP_CPUID_REG 20, 0 /* XCONTEXT */ 168 164 #define ASM_SMP_CPUID_REG $20 169 165 #define SMP_CPUID_PTRSHIFT 48 ··· 175 179 #define SMP_CPUID_REGSHIFT (SMP_CPUID_PTRSHIFT + 2) 176 180 #endif 177 181 178 - #ifdef CONFIG_MIPS_MT_SMTC 179 - #define ASM_CPUID_MFC0 mfc0 180 - #define UASM_i_CPUID_MFC0 uasm_i_mfc0 181 - #else 182 182 #define ASM_CPUID_MFC0 MFC0 183 183 #define UASM_i_CPUID_MFC0 UASM_i_MFC0 184 - #endif 185 184 186 185 #endif /* __KERNEL__ */ 187 186 #endif /* _ASM_THREAD_INFO_H */
+1 -4
arch/mips/include/asm/time.h
··· 52 52 */ 53 53 extern unsigned int __weak get_c0_compare_int(void); 54 54 extern int r4k_clockevent_init(void); 55 - extern int smtc_clockevent_init(void); 56 55 extern int gic_clockevent_init(void); 57 56 58 57 static inline int mips_clockevent_init(void) 59 58 { 60 - #ifdef CONFIG_MIPS_MT_SMTC 61 - return smtc_clockevent_init(); 62 - #elif defined(CONFIG_CEVT_GIC) 59 + #if defined(CONFIG_CEVT_GIC) 63 60 return (gic_clockevent_init() | r4k_clockevent_init()); 64 61 #elif defined(CONFIG_CEVT_R4K) 65 62 return r4k_clockevent_init();
+49 -20
arch/mips/include/asm/timex.h
··· 4 4 * for more details. 5 5 * 6 6 * Copyright (C) 1998, 1999, 2003 by Ralf Baechle 7 + * Copyright (C) 2014 by Maciej W. Rozycki 7 8 */ 8 9 #ifndef _ASM_TIMEX_H 9 10 #define _ASM_TIMEX_H 10 11 11 12 #ifdef __KERNEL__ 12 13 14 + #include <linux/compiler.h> 15 + 16 + #include <asm/cpu.h> 13 17 #include <asm/cpu-features.h> 14 18 #include <asm/mipsregs.h> 15 19 #include <asm/cpu-type.h> ··· 49 45 * However for now the implementaton of this function doesn't get these 50 46 * fine details right. 51 47 */ 48 + static inline int can_use_mips_counter(unsigned int prid) 49 + { 50 + int comp = (prid & PRID_COMP_MASK) != PRID_COMP_LEGACY; 51 + 52 + if (__builtin_constant_p(cpu_has_counter) && !cpu_has_counter) 53 + return 0; 54 + else if (__builtin_constant_p(cpu_has_mips_r) && cpu_has_mips_r) 55 + return 1; 56 + else if (likely(!__builtin_constant_p(cpu_has_mips_r) && comp)) 57 + return 1; 58 + /* Make sure we don't peek at cpu_data[0].options in the fast path! */ 59 + if (!__builtin_constant_p(cpu_has_counter)) 60 + asm volatile("" : "=m" (cpu_data[0].options)); 61 + if (likely(cpu_has_counter && 62 + prid >= (PRID_IMP_R4000 | PRID_REV_ENCODE_44(5, 0)))) 63 + return 1; 64 + else 65 + return 0; 66 + } 67 + 52 68 static inline cycles_t get_cycles(void) 53 69 { 54 - switch (boot_cpu_type()) { 55 - case CPU_R4400PC: 56 - case CPU_R4400SC: 57 - case CPU_R4400MC: 58 - if ((read_c0_prid() & 0xff) >= 0x0050) 59 - return read_c0_count(); 60 - break; 61 - 62 - case CPU_R4000PC: 63 - case CPU_R4000SC: 64 - case CPU_R4000MC: 65 - break; 66 - 67 - default: 68 - if (cpu_has_counter) 69 - return read_c0_count(); 70 - break; 71 - } 72 - 73 - return 0; /* no usable counter */ 70 + if (can_use_mips_counter(read_c0_prid())) 71 + return read_c0_count(); 72 + else 73 + return 0; /* no usable counter */ 74 74 } 75 + 76 + /* 77 + * Like get_cycles - but where c0_count is not available we desperately 78 + * use c0_random in an attempt to get at least a little bit of entropy. 79 + * 80 + * R6000 and R6000A neither have a count register nor a random register. 81 + * That leaves no entropy source in the CPU itself. 82 + */ 83 + static inline unsigned long random_get_entropy(void) 84 + { 85 + unsigned int prid = read_c0_prid(); 86 + unsigned int imp = prid & PRID_IMP_MASK; 87 + 88 + if (can_use_mips_counter(prid)) 89 + return read_c0_count(); 90 + else if (likely(imp != PRID_IMP_R6000 && imp != PRID_IMP_R6000A)) 91 + return read_c0_random(); 92 + else 93 + return 0; /* no usable register */ 94 + } 95 + #define random_get_entropy random_get_entropy 75 96 76 97 #endif /* __KERNEL__ */ 77 98
+23
arch/mips/include/asm/uasm.h
··· 55 55 #define Ip_u2u1u3(op) \ 56 56 void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c) 57 57 58 + #define Ip_u3u2u1(op) \ 59 + void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c) 60 + 58 61 #define Ip_u3u1u2(op) \ 59 62 void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c) 60 63 ··· 75 72 unsigned int d) 76 73 77 74 #define Ip_u1u2(op) \ 75 + void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b) 76 + 77 + #define Ip_u2u1(op) \ 78 78 void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b) 79 79 80 80 #define Ip_u1s2(op) \ ··· 105 99 Ip_u3u1u2(_daddu); 106 100 Ip_u2u1msbu3(_dins); 107 101 Ip_u2u1msbu3(_dinsm); 102 + Ip_u1u2(_divu); 108 103 Ip_u1u2u3(_dmfc0); 109 104 Ip_u1u2u3(_dmtc0); 110 105 Ip_u2u1u3(_drotr); ··· 121 114 Ip_u2u1msbu3(_ins); 122 115 Ip_u1(_j); 123 116 Ip_u1(_jal); 117 + Ip_u2u1(_jalr); 124 118 Ip_u1(_jr); 119 + Ip_u2s3u1(_lb); 125 120 Ip_u2s3u1(_ld); 126 121 Ip_u3u1u2(_ldx); 122 + Ip_u2s3u1(_lh); 127 123 Ip_u2s3u1(_ll); 128 124 Ip_u2s3u1(_lld); 129 125 Ip_u1s2(_lui); 130 126 Ip_u2s3u1(_lw); 131 127 Ip_u3u1u2(_lwx); 132 128 Ip_u1u2u3(_mfc0); 129 + Ip_u1(_mfhi); 130 + Ip_u1(_mflo); 133 131 Ip_u1u2u3(_mtc0); 132 + Ip_u3u1u2(_mul); 134 133 Ip_u3u1u2(_or); 135 134 Ip_u2u1u3(_ori); 136 135 Ip_u2s3u1(_pref); ··· 146 133 Ip_u2s3u1(_scd); 147 134 Ip_u2s3u1(_sd); 148 135 Ip_u2u1u3(_sll); 136 + Ip_u3u2u1(_sllv); 137 + Ip_u2u1s3(_sltiu); 138 + Ip_u3u1u2(_sltu); 149 139 Ip_u2u1u3(_sra); 150 140 Ip_u2u1u3(_srl); 141 + Ip_u3u2u1(_srlv); 151 142 Ip_u3u1u2(_subu); 152 143 Ip_u2s3u1(_sw); 144 + Ip_u1(_sync); 153 145 Ip_u1(_syscall); 154 146 Ip_0(_tlbp); 155 147 Ip_0(_tlbr); 156 148 Ip_0(_tlbwi); 157 149 Ip_0(_tlbwr); 150 + Ip_u1(_wait); 151 + Ip_u2u1(_wsbh); 158 152 Ip_u3u1u2(_xor); 159 153 Ip_u2u1u3(_xori); 154 + Ip_u2u1(_yield); 160 155 161 156 162 157 /* Handle labels. */ ··· 285 264 unsigned int bit, int lid); 286 265 void uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg, 287 266 unsigned int bit, int lid); 267 + void uasm_il_beq(u32 **p, struct uasm_reloc **r, unsigned int r1, 268 + unsigned int r2, int lid); 288 269 void uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); 289 270 void uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); 290 271 void uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
+1
arch/mips/include/uapi/asm/Kbuild
··· 4 4 generic-y += auxvec.h 5 5 generic-y += ipcbuf.h 6 6 7 + header-y += bitfield.h 7 8 header-y += bitsperlong.h 8 9 header-y += break.h 9 10 header-y += byteorder.h
+29
arch/mips/include/uapi/asm/bitfield.h
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (C) 2014 by Ralf Baechle <ralf@linux-mips.org> 7 + */ 8 + #ifndef __UAPI_ASM_BITFIELD_H 9 + #define __UAPI_ASM_BITFIELD_H 10 + 11 + /* 12 + * * Damn ... bitfields depend from byteorder :-( 13 + * */ 14 + #ifdef __MIPSEB__ 15 + #define __BITFIELD_FIELD(field, more) \ 16 + field; \ 17 + more 18 + 19 + #elif defined(__MIPSEL__) 20 + 21 + #define __BITFIELD_FIELD(field, more) \ 22 + more \ 23 + field; 24 + 25 + #else /* !defined (__MIPSEB__) && !defined (__MIPSEL__) */ 26 + #error "MIPS but neither __MIPSEL__ nor __MIPSEB__?" 27 + #endif 28 + 29 + #endif /* __UAPI_ASM_BITFIELD_H */
+35 -29
arch/mips/include/uapi/asm/inst.h
··· 13 13 #ifndef _UAPI_ASM_INST_H 14 14 #define _UAPI_ASM_INST_H 15 15 16 + #include <asm/bitfield.h> 17 + 16 18 /* 17 19 * Major opcodes; before MIPS IV cop1x was called cop3. 18 20 */ ··· 76 74 enum spec3_op { 77 75 ext_op, dextm_op, dextu_op, dext_op, 78 76 ins_op, dinsm_op, dinsu_op, dins_op, 79 - lx_op = 0x0a, lwle_op = 0x19, 80 - lwre_op = 0x1a, cachee_op = 0x1b, 81 - sbe_op = 0x1c, she_op = 0x1d, 82 - sce_op = 0x1e, swe_op = 0x1f, 83 - bshfl_op = 0x20, swle_op = 0x21, 84 - swre_op = 0x22, prefe_op = 0x23, 85 - dbshfl_op = 0x24, lbue_op = 0x28, 86 - lhue_op = 0x29, lbe_op = 0x2c, 87 - lhe_op = 0x2d, lle_op = 0x2e, 88 - lwe_op = 0x2f, rdhwr_op = 0x3b 77 + yield_op = 0x09, lx_op = 0x0a, 78 + lwle_op = 0x19, lwre_op = 0x1a, 79 + cachee_op = 0x1b, sbe_op = 0x1c, 80 + she_op = 0x1d, sce_op = 0x1e, 81 + swe_op = 0x1f, bshfl_op = 0x20, 82 + swle_op = 0x21, swre_op = 0x22, 83 + prefe_op = 0x23, dbshfl_op = 0x24, 84 + lbue_op = 0x28, lhue_op = 0x29, 85 + lbe_op = 0x2c, lhe_op = 0x2d, 86 + lle_op = 0x2e, lwe_op = 0x2f, 87 + rdhwr_op = 0x3b 89 88 }; 90 89 91 90 /* ··· 128 125 enum cop0_coi_func { 129 126 tlbr_op = 0x01, tlbwi_op = 0x02, 130 127 tlbwr_op = 0x06, tlbp_op = 0x08, 131 - rfe_op = 0x10, eret_op = 0x18 128 + rfe_op = 0x10, eret_op = 0x18, 129 + wait_op = 0x20, 132 130 }; 133 131 134 132 /* ··· 206 202 }; 207 203 208 204 /* 205 + * BSHFL opcodes 206 + */ 207 + enum bshfl_func { 208 + wsbh_op = 0x2, 209 + dshd_op = 0x5, 210 + seb_op = 0x10, 211 + seh_op = 0x18, 212 + }; 213 + 214 + /* 209 215 * (microMIPS) Major opcodes. 210 216 */ 211 217 enum mm_major_op { ··· 258 244 enum mm_32a_minor_op { 259 245 mm_sll32_op = 0x000, 260 246 mm_ins_op = 0x00c, 247 + mm_sllv32_op = 0x010, 261 248 mm_ext_op = 0x02c, 262 249 mm_pool32axf_op = 0x03c, 263 250 mm_srl32_op = 0x040, 264 251 mm_sra_op = 0x080, 252 + mm_srlv32_op = 0x090, 265 253 mm_rotr_op = 0x0c0, 266 254 mm_lwxs_op = 0x118, 267 255 mm_addu32_op = 0x150, 268 256 mm_subu32_op = 0x1d0, 257 + mm_wsbh_op = 0x1ec, 258 + mm_mul_op = 0x210, 269 259 mm_and_op = 0x250, 270 260 mm_or32_op = 0x290, 271 261 mm_xor32_op = 0x310, 262 + mm_sltu_op = 0x390, 272 263 }; 273 264 274 265 /* ··· 313 294 mm_mfc0_op = 0x003, 314 295 mm_mtc0_op = 0x00b, 315 296 mm_tlbp_op = 0x00d, 297 + mm_mfhi32_op = 0x035, 316 298 mm_jalr_op = 0x03c, 317 299 mm_tlbr_op = 0x04d, 300 + mm_mflo32_op = 0x075, 318 301 mm_jalrhb_op = 0x07c, 319 302 mm_tlbwi_op = 0x08d, 320 303 mm_tlbwr_op = 0x0cd, 321 304 mm_jalrs_op = 0x13c, 322 305 mm_jalrshb_op = 0x17c, 306 + mm_sync_op = 0x1ad, 323 307 mm_syscall_op = 0x22d, 308 + mm_wait_op = 0x24d, 324 309 mm_eret_op = 0x3cd, 310 + mm_divu_op = 0x5dc, 325 311 }; 326 312 327 313 /* ··· 503 479 * (microMIPS & MIPS16e) NOP instruction. 504 480 */ 505 481 #define MM_NOP16 0x0c00 506 - 507 - /* 508 - * Damn ... bitfields depend from byteorder :-( 509 - */ 510 - #ifdef __MIPSEB__ 511 - #define __BITFIELD_FIELD(field, more) \ 512 - field; \ 513 - more 514 - 515 - #elif defined(__MIPSEL__) 516 - 517 - #define __BITFIELD_FIELD(field, more) \ 518 - more \ 519 - field; 520 - 521 - #else /* !defined (__MIPSEB__) && !defined (__MIPSEL__) */ 522 - #error "MIPS but neither __MIPSEL__ nor __MIPSEB__?" 523 - #endif 524 482 525 483 struct j_format { 526 484 __BITFIELD_FIELD(unsigned int opcode : 6, /* Jump format */
+5 -1
arch/mips/include/uapi/asm/kvm_para.h
··· 1 - #include <asm-generic/kvm_para.h> 1 + #ifndef _UAPI_ASM_MIPS_KVM_PARA_H 2 + #define _UAPI_ASM_MIPS_KVM_PARA_H 3 + 4 + 5 + #endif /* _UAPI_ASM_MIPS_KVM_PARA_H */
+4 -1
arch/mips/include/uapi/asm/types.h
··· 14 14 /* 15 15 * We don't use int-l64.h for the kernel anymore but still use it for 16 16 * userspace to avoid code changes. 17 + * 18 + * However, some user programs (e.g. perf) may not want this. They can 19 + * flag __SANE_USERSPACE_TYPES__ to get int-ll64.h here. 17 20 */ 18 21 #ifndef __KERNEL__ 19 - # if _MIPS_SZLONG == 64 22 + # if _MIPS_SZLONG == 64 && !defined(__SANE_USERSPACE_TYPES__) 20 23 # include <asm-generic/int-l64.h> 21 24 # else 22 25 # include <asm-generic/int-ll64.h>
+4 -3
arch/mips/kernel/Makefile
··· 17 17 18 18 obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o 19 19 obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o 20 - obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o 21 20 obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o 22 21 obj-$(CONFIG_CEVT_GIC) += cevt-gic.o 23 22 obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o ··· 41 42 obj-$(CONFIG_CPU_R3000) += r2300_fpu.o r2300_switch.o 42 43 obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o 43 44 obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o 44 - obj-$(CONFIG_CPU_CAVIUM_OCTEON) += octeon_switch.o 45 + obj-$(CONFIG_CPU_CAVIUM_OCTEON) += r4k_fpu.o octeon_switch.o 45 46 46 47 obj-$(CONFIG_SMP) += smp.o 47 48 obj-$(CONFIG_SMP_UP) += smp-up.o ··· 49 50 50 51 obj-$(CONFIG_MIPS_MT) += mips-mt.o 51 52 obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o 52 - obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o 53 53 obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o 54 54 obj-$(CONFIG_MIPS_CMP) += smp-cmp.o 55 55 obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o ··· 104 106 105 107 obj-$(CONFIG_MIPS_CM) += mips-cm.o 106 108 obj-$(CONFIG_MIPS_CPC) += mips-cpc.o 109 + 110 + obj-$(CONFIG_CPU_PM) += pm.o 111 + obj-$(CONFIG_MIPS_CPS_PM) += pm-cps.o 107 112 108 113 # 109 114 # DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not
+24 -8
arch/mips/kernel/asm-offsets.c
··· 14 14 #include <linux/mm.h> 15 15 #include <linux/kbuild.h> 16 16 #include <linux/suspend.h> 17 + #include <asm/pm.h> 17 18 #include <asm/ptrace.h> 18 19 #include <asm/processor.h> 19 20 #include <asm/smp-cps.h> ··· 65 64 OFFSET(PT_BVADDR, pt_regs, cp0_badvaddr); 66 65 OFFSET(PT_STATUS, pt_regs, cp0_status); 67 66 OFFSET(PT_CAUSE, pt_regs, cp0_cause); 68 - #ifdef CONFIG_MIPS_MT_SMTC 69 - OFFSET(PT_TCSTATUS, pt_regs, cp0_tcstatus); 70 - #endif /* CONFIG_MIPS_MT_SMTC */ 71 67 #ifdef CONFIG_CPU_CAVIUM_OCTEON 72 68 OFFSET(PT_MPL, pt_regs, mpl); 73 69 OFFSET(PT_MTP, pt_regs, mtp); ··· 402 404 } 403 405 #endif 404 406 407 + #ifdef CONFIG_CPU_PM 408 + void output_pm_defines(void) 409 + { 410 + COMMENT(" PM offsets. "); 411 + #ifdef CONFIG_EVA 412 + OFFSET(SSS_SEGCTL0, mips_static_suspend_state, segctl[0]); 413 + OFFSET(SSS_SEGCTL1, mips_static_suspend_state, segctl[1]); 414 + OFFSET(SSS_SEGCTL2, mips_static_suspend_state, segctl[2]); 415 + #endif 416 + OFFSET(SSS_SP, mips_static_suspend_state, sp); 417 + BLANK(); 418 + } 419 + #endif 420 + 405 421 void output_kvm_defines(void) 406 422 { 407 423 COMMENT(" KVM/MIPS Specfic offsets. "); ··· 484 472 void output_cps_defines(void) 485 473 { 486 474 COMMENT(" MIPS CPS offsets. "); 487 - OFFSET(BOOTCFG_CORE, boot_config, core); 488 - OFFSET(BOOTCFG_VPE, boot_config, vpe); 489 - OFFSET(BOOTCFG_PC, boot_config, pc); 490 - OFFSET(BOOTCFG_SP, boot_config, sp); 491 - OFFSET(BOOTCFG_GP, boot_config, gp); 475 + 476 + OFFSET(COREBOOTCFG_VPEMASK, core_boot_config, vpe_mask); 477 + OFFSET(COREBOOTCFG_VPECONFIG, core_boot_config, vpe_config); 478 + DEFINE(COREBOOTCFG_SIZE, sizeof(struct core_boot_config)); 479 + 480 + OFFSET(VPEBOOTCFG_PC, vpe_boot_config, pc); 481 + OFFSET(VPEBOOTCFG_SP, vpe_boot_config, sp); 482 + OFFSET(VPEBOOTCFG_GP, vpe_boot_config, gp); 483 + DEFINE(VPEBOOTCFG_SIZE, sizeof(struct vpe_boot_config)); 492 484 } 493 485 #endif
+201 -1
arch/mips/kernel/branch.c
··· 48 48 return epc; 49 49 } 50 50 51 + /* (microMIPS) Convert 16-bit register encoding to 32-bit register encoding. */ 52 + static const unsigned int reg16to32map[8] = {16, 17, 2, 3, 4, 5, 6, 7}; 53 + 54 + int __mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, 55 + unsigned long *contpc) 56 + { 57 + union mips_instruction insn = (union mips_instruction)dec_insn.insn; 58 + int bc_false = 0; 59 + unsigned int fcr31; 60 + unsigned int bit; 61 + 62 + if (!cpu_has_mmips) 63 + return 0; 64 + 65 + switch (insn.mm_i_format.opcode) { 66 + case mm_pool32a_op: 67 + if ((insn.mm_i_format.simmediate & MM_POOL32A_MINOR_MASK) == 68 + mm_pool32axf_op) { 69 + switch (insn.mm_i_format.simmediate >> 70 + MM_POOL32A_MINOR_SHIFT) { 71 + case mm_jalr_op: 72 + case mm_jalrhb_op: 73 + case mm_jalrs_op: 74 + case mm_jalrshb_op: 75 + if (insn.mm_i_format.rt != 0) /* Not mm_jr */ 76 + regs->regs[insn.mm_i_format.rt] = 77 + regs->cp0_epc + 78 + dec_insn.pc_inc + 79 + dec_insn.next_pc_inc; 80 + *contpc = regs->regs[insn.mm_i_format.rs]; 81 + return 1; 82 + } 83 + } 84 + break; 85 + case mm_pool32i_op: 86 + switch (insn.mm_i_format.rt) { 87 + case mm_bltzals_op: 88 + case mm_bltzal_op: 89 + regs->regs[31] = regs->cp0_epc + 90 + dec_insn.pc_inc + 91 + dec_insn.next_pc_inc; 92 + /* Fall through */ 93 + case mm_bltz_op: 94 + if ((long)regs->regs[insn.mm_i_format.rs] < 0) 95 + *contpc = regs->cp0_epc + 96 + dec_insn.pc_inc + 97 + (insn.mm_i_format.simmediate << 1); 98 + else 99 + *contpc = regs->cp0_epc + 100 + dec_insn.pc_inc + 101 + dec_insn.next_pc_inc; 102 + return 1; 103 + case mm_bgezals_op: 104 + case mm_bgezal_op: 105 + regs->regs[31] = regs->cp0_epc + 106 + dec_insn.pc_inc + 107 + dec_insn.next_pc_inc; 108 + /* Fall through */ 109 + case mm_bgez_op: 110 + if ((long)regs->regs[insn.mm_i_format.rs] >= 0) 111 + *contpc = regs->cp0_epc + 112 + dec_insn.pc_inc + 113 + (insn.mm_i_format.simmediate << 1); 114 + else 115 + *contpc = regs->cp0_epc + 116 + dec_insn.pc_inc + 117 + dec_insn.next_pc_inc; 118 + return 1; 119 + case mm_blez_op: 120 + if ((long)regs->regs[insn.mm_i_format.rs] <= 0) 121 + *contpc = regs->cp0_epc + 122 + dec_insn.pc_inc + 123 + (insn.mm_i_format.simmediate << 1); 124 + else 125 + *contpc = regs->cp0_epc + 126 + dec_insn.pc_inc + 127 + dec_insn.next_pc_inc; 128 + return 1; 129 + case mm_bgtz_op: 130 + if ((long)regs->regs[insn.mm_i_format.rs] <= 0) 131 + *contpc = regs->cp0_epc + 132 + dec_insn.pc_inc + 133 + (insn.mm_i_format.simmediate << 1); 134 + else 135 + *contpc = regs->cp0_epc + 136 + dec_insn.pc_inc + 137 + dec_insn.next_pc_inc; 138 + return 1; 139 + case mm_bc2f_op: 140 + case mm_bc1f_op: 141 + bc_false = 1; 142 + /* Fall through */ 143 + case mm_bc2t_op: 144 + case mm_bc1t_op: 145 + preempt_disable(); 146 + if (is_fpu_owner()) 147 + asm volatile("cfc1\t%0,$31" : "=r" (fcr31)); 148 + else 149 + fcr31 = current->thread.fpu.fcr31; 150 + preempt_enable(); 151 + 152 + if (bc_false) 153 + fcr31 = ~fcr31; 154 + 155 + bit = (insn.mm_i_format.rs >> 2); 156 + bit += (bit != 0); 157 + bit += 23; 158 + if (fcr31 & (1 << bit)) 159 + *contpc = regs->cp0_epc + 160 + dec_insn.pc_inc + 161 + (insn.mm_i_format.simmediate << 1); 162 + else 163 + *contpc = regs->cp0_epc + 164 + dec_insn.pc_inc + dec_insn.next_pc_inc; 165 + return 1; 166 + } 167 + break; 168 + case mm_pool16c_op: 169 + switch (insn.mm_i_format.rt) { 170 + case mm_jalr16_op: 171 + case mm_jalrs16_op: 172 + regs->regs[31] = regs->cp0_epc + 173 + dec_insn.pc_inc + dec_insn.next_pc_inc; 174 + /* Fall through */ 175 + case mm_jr16_op: 176 + *contpc = regs->regs[insn.mm_i_format.rs]; 177 + return 1; 178 + } 179 + break; 180 + case mm_beqz16_op: 181 + if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] == 0) 182 + *contpc = regs->cp0_epc + 183 + dec_insn.pc_inc + 184 + (insn.mm_b1_format.simmediate << 1); 185 + else 186 + *contpc = regs->cp0_epc + 187 + dec_insn.pc_inc + dec_insn.next_pc_inc; 188 + return 1; 189 + case mm_bnez16_op: 190 + if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] != 0) 191 + *contpc = regs->cp0_epc + 192 + dec_insn.pc_inc + 193 + (insn.mm_b1_format.simmediate << 1); 194 + else 195 + *contpc = regs->cp0_epc + 196 + dec_insn.pc_inc + dec_insn.next_pc_inc; 197 + return 1; 198 + case mm_b16_op: 199 + *contpc = regs->cp0_epc + dec_insn.pc_inc + 200 + (insn.mm_b0_format.simmediate << 1); 201 + return 1; 202 + case mm_beq32_op: 203 + if (regs->regs[insn.mm_i_format.rs] == 204 + regs->regs[insn.mm_i_format.rt]) 205 + *contpc = regs->cp0_epc + 206 + dec_insn.pc_inc + 207 + (insn.mm_i_format.simmediate << 1); 208 + else 209 + *contpc = regs->cp0_epc + 210 + dec_insn.pc_inc + 211 + dec_insn.next_pc_inc; 212 + return 1; 213 + case mm_bne32_op: 214 + if (regs->regs[insn.mm_i_format.rs] != 215 + regs->regs[insn.mm_i_format.rt]) 216 + *contpc = regs->cp0_epc + 217 + dec_insn.pc_inc + 218 + (insn.mm_i_format.simmediate << 1); 219 + else 220 + *contpc = regs->cp0_epc + 221 + dec_insn.pc_inc + dec_insn.next_pc_inc; 222 + return 1; 223 + case mm_jalx32_op: 224 + regs->regs[31] = regs->cp0_epc + 225 + dec_insn.pc_inc + dec_insn.next_pc_inc; 226 + *contpc = regs->cp0_epc + dec_insn.pc_inc; 227 + *contpc >>= 28; 228 + *contpc <<= 28; 229 + *contpc |= (insn.j_format.target << 2); 230 + return 1; 231 + case mm_jals32_op: 232 + case mm_jal32_op: 233 + regs->regs[31] = regs->cp0_epc + 234 + dec_insn.pc_inc + dec_insn.next_pc_inc; 235 + /* Fall through */ 236 + case mm_j32_op: 237 + *contpc = regs->cp0_epc + dec_insn.pc_inc; 238 + *contpc >>= 27; 239 + *contpc <<= 27; 240 + *contpc |= (insn.j_format.target << 1); 241 + set_isa16_mode(*contpc); 242 + return 1; 243 + } 244 + return 0; 245 + } 246 + 51 247 /* 52 248 * Compute return address and emulate branch in microMIPS mode after an 53 249 * exception only. It does not handle compact branches/jumps and cannot ··· 562 366 case cop1_op: 563 367 preempt_disable(); 564 368 if (is_fpu_owner()) 565 - asm volatile("cfc1\t%0,$31" : "=r" (fcr31)); 369 + asm volatile( 370 + ".set push\n" 371 + "\t.set mips1\n" 372 + "\tcfc1\t%0,$31\n" 373 + "\t.set pop" : "=r" (fcr31)); 566 374 else 567 375 fcr31 = current->thread.fpu.fcr31; 568 376 preempt_enable();
+3 -2
arch/mips/kernel/cevt-gic.c
··· 26 26 27 27 cnt = gic_read_count(); 28 28 cnt += (u64)delta; 29 - gic_write_compare(cnt); 29 + gic_write_cpu_compare(cnt, cpumask_first(evt->cpumask)); 30 30 res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0; 31 31 return res; 32 32 } ··· 73 73 cd = &per_cpu(gic_clockevent_device, cpu); 74 74 75 75 cd->name = "MIPS GIC"; 76 - cd->features = CLOCK_EVT_FEAT_ONESHOT; 76 + cd->features = CLOCK_EVT_FEAT_ONESHOT | 77 + CLOCK_EVT_FEAT_C3STOP; 77 78 78 79 clockevent_set_clock(cd, gic_frequency); 79 80
+3 -21
arch/mips/kernel/cevt-r4k.c
··· 12 12 #include <linux/smp.h> 13 13 #include <linux/irq.h> 14 14 15 - #include <asm/smtc_ipi.h> 16 15 #include <asm/time.h> 17 16 #include <asm/cevt-r4k.h> 18 17 #include <asm/gic.h> 19 18 20 - /* 21 - * The SMTC Kernel for the 34K, 1004K, et. al. replaces several 22 - * of these routines with SMTC-specific variants. 23 - */ 24 - 25 - #ifndef CONFIG_MIPS_MT_SMTC 26 19 static int mips_next_event(unsigned long delta, 27 20 struct clock_event_device *evt) 28 21 { ··· 29 36 return res; 30 37 } 31 38 32 - #endif /* CONFIG_MIPS_MT_SMTC */ 33 - 34 39 void mips_set_clock_mode(enum clock_event_mode mode, 35 40 struct clock_event_device *evt) 36 41 { ··· 38 47 DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); 39 48 int cp0_timer_irq_installed; 40 49 41 - #ifndef CONFIG_MIPS_MT_SMTC 42 50 irqreturn_t c0_compare_interrupt(int irq, void *dev_id) 43 51 { 44 52 const int r2 = cpu_has_mips_r2; ··· 62 72 /* Clear Count/Compare Interrupt */ 63 73 write_c0_compare(read_c0_compare()); 64 74 cd = &per_cpu(mips_clockevent_device, cpu); 65 - #ifdef CONFIG_CEVT_GIC 66 - if (!gic_present) 67 - #endif 68 75 cd->event_handler(cd); 69 76 } 70 77 71 78 out: 72 79 return IRQ_HANDLED; 73 80 } 74 - 75 - #endif /* Not CONFIG_MIPS_MT_SMTC */ 76 81 77 82 struct irqaction c0_compare_irqaction = { 78 83 .handler = c0_compare_interrupt, ··· 155 170 return 1; 156 171 } 157 172 158 - #ifndef CONFIG_MIPS_MT_SMTC 159 173 int r4k_clockevent_init(void) 160 174 { 161 175 unsigned int cpu = smp_processor_id(); ··· 179 195 cd = &per_cpu(mips_clockevent_device, cpu); 180 196 181 197 cd->name = "MIPS"; 182 - cd->features = CLOCK_EVT_FEAT_ONESHOT; 198 + cd->features = CLOCK_EVT_FEAT_ONESHOT | 199 + CLOCK_EVT_FEAT_C3STOP | 200 + CLOCK_EVT_FEAT_PERCPU; 183 201 184 202 clockevent_set_clock(cd, mips_hpt_frequency); 185 203 ··· 196 210 cd->set_mode = mips_set_clock_mode; 197 211 cd->event_handler = mips_event_handler; 198 212 199 - #ifdef CONFIG_CEVT_GIC 200 - if (!gic_present) 201 - #endif 202 213 clockevents_register_device(cd); 203 214 204 215 if (cp0_timer_irq_installed) ··· 208 225 return 0; 209 226 } 210 227 211 - #endif /* Not CONFIG_MIPS_MT_SMTC */
-324
arch/mips/kernel/cevt-smtc.c
··· 1 - /* 2 - * This file is subject to the terms and conditions of the GNU General Public 3 - * License. See the file "COPYING" in the main directory of this archive 4 - * for more details. 5 - * 6 - * Copyright (C) 2007 MIPS Technologies, Inc. 7 - * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org> 8 - * Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl 9 - */ 10 - #include <linux/clockchips.h> 11 - #include <linux/interrupt.h> 12 - #include <linux/percpu.h> 13 - #include <linux/smp.h> 14 - #include <linux/irq.h> 15 - 16 - #include <asm/smtc_ipi.h> 17 - #include <asm/time.h> 18 - #include <asm/cevt-r4k.h> 19 - 20 - /* 21 - * Variant clock event timer support for SMTC on MIPS 34K, 1004K 22 - * or other MIPS MT cores. 23 - * 24 - * Notes on SMTC Support: 25 - * 26 - * SMTC has multiple microthread TCs pretending to be Linux CPUs. 27 - * But there's only one Count/Compare pair per VPE, and Compare 28 - * interrupts are taken opportunisitically by available TCs 29 - * bound to the VPE with the Count register. The new timer 30 - * framework provides for global broadcasts, but we really 31 - * want VPE-level multicasts for best behavior. So instead 32 - * of invoking the high-level clock-event broadcast code, 33 - * this version of SMTC support uses the historical SMTC 34 - * multicast mechanisms "under the hood", appearing to the 35 - * generic clock layer as if the interrupts are per-CPU. 36 - * 37 - * The approach taken here is to maintain a set of NR_CPUS 38 - * virtual timers, and track which "CPU" needs to be alerted 39 - * at each event. 40 - * 41 - * It's unlikely that we'll see a MIPS MT core with more than 42 - * 2 VPEs, but we *know* that we won't need to handle more 43 - * VPEs than we have "CPUs". So NCPUs arrays of NCPUs elements 44 - * is always going to be overkill, but always going to be enough. 45 - */ 46 - 47 - unsigned long smtc_nexttime[NR_CPUS][NR_CPUS]; 48 - static int smtc_nextinvpe[NR_CPUS]; 49 - 50 - /* 51 - * Timestamps stored are absolute values to be programmed 52 - * into Count register. Valid timestamps will never be zero. 53 - * If a Zero Count value is actually calculated, it is converted 54 - * to be a 1, which will introduce 1 or two CPU cycles of error 55 - * roughly once every four billion events, which at 1000 HZ means 56 - * about once every 50 days. If that's actually a problem, one 57 - * could alternate squashing 0 to 1 and to -1. 58 - */ 59 - 60 - #define MAKEVALID(x) (((x) == 0L) ? 1L : (x)) 61 - #define ISVALID(x) ((x) != 0L) 62 - 63 - /* 64 - * Time comparison is subtle, as it's really truncated 65 - * modular arithmetic. 66 - */ 67 - 68 - #define IS_SOONER(a, b, reference) \ 69 - (((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference))) 70 - 71 - /* 72 - * CATCHUP_INCREMENT, used when the function falls behind the counter. 73 - * Could be an increasing function instead of a constant; 74 - */ 75 - 76 - #define CATCHUP_INCREMENT 64 77 - 78 - static int mips_next_event(unsigned long delta, 79 - struct clock_event_device *evt) 80 - { 81 - unsigned long flags; 82 - unsigned int mtflags; 83 - unsigned long timestamp, reference, previous; 84 - unsigned long nextcomp = 0L; 85 - int vpe = current_cpu_data.vpe_id; 86 - int cpu = smp_processor_id(); 87 - local_irq_save(flags); 88 - mtflags = dmt(); 89 - 90 - /* 91 - * Maintain the per-TC virtual timer 92 - * and program the per-VPE shared Count register 93 - * as appropriate here... 94 - */ 95 - reference = (unsigned long)read_c0_count(); 96 - timestamp = MAKEVALID(reference + delta); 97 - /* 98 - * To really model the clock, we have to catch the case 99 - * where the current next-in-VPE timestamp is the old 100 - * timestamp for the calling CPE, but the new value is 101 - * in fact later. In that case, we have to do a full 102 - * scan and discover the new next-in-VPE CPU id and 103 - * timestamp. 104 - */ 105 - previous = smtc_nexttime[vpe][cpu]; 106 - if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous) 107 - && IS_SOONER(previous, timestamp, reference)) { 108 - int i; 109 - int soonest = cpu; 110 - 111 - /* 112 - * Update timestamp array here, so that new 113 - * value gets considered along with those of 114 - * other virtual CPUs on the VPE. 115 - */ 116 - smtc_nexttime[vpe][cpu] = timestamp; 117 - for_each_online_cpu(i) { 118 - if (ISVALID(smtc_nexttime[vpe][i]) 119 - && IS_SOONER(smtc_nexttime[vpe][i], 120 - smtc_nexttime[vpe][soonest], reference)) { 121 - soonest = i; 122 - } 123 - } 124 - smtc_nextinvpe[vpe] = soonest; 125 - nextcomp = smtc_nexttime[vpe][soonest]; 126 - /* 127 - * Otherwise, we don't have to process the whole array rank, 128 - * we just have to see if the event horizon has gotten closer. 129 - */ 130 - } else { 131 - if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) || 132 - IS_SOONER(timestamp, 133 - smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) { 134 - smtc_nextinvpe[vpe] = cpu; 135 - nextcomp = timestamp; 136 - } 137 - /* 138 - * Since next-in-VPE may me the same as the executing 139 - * virtual CPU, we update the array *after* checking 140 - * its value. 141 - */ 142 - smtc_nexttime[vpe][cpu] = timestamp; 143 - } 144 - 145 - /* 146 - * It may be that, in fact, we don't need to update Compare, 147 - * but if we do, we want to make sure we didn't fall into 148 - * a crack just behind Count. 149 - */ 150 - if (ISVALID(nextcomp)) { 151 - write_c0_compare(nextcomp); 152 - ehb(); 153 - /* 154 - * We never return an error, we just make sure 155 - * that we trigger the handlers as quickly as 156 - * we can if we fell behind. 157 - */ 158 - while ((nextcomp - (unsigned long)read_c0_count()) 159 - > (unsigned long)LONG_MAX) { 160 - nextcomp += CATCHUP_INCREMENT; 161 - write_c0_compare(nextcomp); 162 - ehb(); 163 - } 164 - } 165 - emt(mtflags); 166 - local_irq_restore(flags); 167 - return 0; 168 - } 169 - 170 - 171 - void smtc_distribute_timer(int vpe) 172 - { 173 - unsigned long flags; 174 - unsigned int mtflags; 175 - int cpu; 176 - struct clock_event_device *cd; 177 - unsigned long nextstamp; 178 - unsigned long reference; 179 - 180 - 181 - repeat: 182 - nextstamp = 0L; 183 - for_each_online_cpu(cpu) { 184 - /* 185 - * Find virtual CPUs within the current VPE who have 186 - * unserviced timer requests whose time is now past. 187 - */ 188 - local_irq_save(flags); 189 - mtflags = dmt(); 190 - if (cpu_data[cpu].vpe_id == vpe && 191 - ISVALID(smtc_nexttime[vpe][cpu])) { 192 - reference = (unsigned long)read_c0_count(); 193 - if ((smtc_nexttime[vpe][cpu] - reference) 194 - > (unsigned long)LONG_MAX) { 195 - smtc_nexttime[vpe][cpu] = 0L; 196 - emt(mtflags); 197 - local_irq_restore(flags); 198 - /* 199 - * We don't send IPIs to ourself. 200 - */ 201 - if (cpu != smp_processor_id()) { 202 - smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0); 203 - } else { 204 - cd = &per_cpu(mips_clockevent_device, cpu); 205 - cd->event_handler(cd); 206 - } 207 - } else { 208 - /* Local to VPE but Valid Time not yet reached. */ 209 - if (!ISVALID(nextstamp) || 210 - IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp, 211 - reference)) { 212 - smtc_nextinvpe[vpe] = cpu; 213 - nextstamp = smtc_nexttime[vpe][cpu]; 214 - } 215 - emt(mtflags); 216 - local_irq_restore(flags); 217 - } 218 - } else { 219 - emt(mtflags); 220 - local_irq_restore(flags); 221 - 222 - } 223 - } 224 - /* Reprogram for interrupt at next soonest timestamp for VPE */ 225 - if (ISVALID(nextstamp)) { 226 - write_c0_compare(nextstamp); 227 - ehb(); 228 - if ((nextstamp - (unsigned long)read_c0_count()) 229 - > (unsigned long)LONG_MAX) 230 - goto repeat; 231 - } 232 - } 233 - 234 - 235 - irqreturn_t c0_compare_interrupt(int irq, void *dev_id) 236 - { 237 - int cpu = smp_processor_id(); 238 - 239 - /* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */ 240 - handle_perf_irq(1); 241 - 242 - if (read_c0_cause() & (1 << 30)) { 243 - /* Clear Count/Compare Interrupt */ 244 - write_c0_compare(read_c0_compare()); 245 - smtc_distribute_timer(cpu_data[cpu].vpe_id); 246 - } 247 - return IRQ_HANDLED; 248 - } 249 - 250 - 251 - int smtc_clockevent_init(void) 252 - { 253 - uint64_t mips_freq = mips_hpt_frequency; 254 - unsigned int cpu = smp_processor_id(); 255 - struct clock_event_device *cd; 256 - unsigned int irq; 257 - int i; 258 - int j; 259 - 260 - if (!cpu_has_counter || !mips_hpt_frequency) 261 - return -ENXIO; 262 - if (cpu == 0) { 263 - for (i = 0; i < num_possible_cpus(); i++) { 264 - smtc_nextinvpe[i] = 0; 265 - for (j = 0; j < num_possible_cpus(); j++) 266 - smtc_nexttime[i][j] = 0L; 267 - } 268 - /* 269 - * SMTC also can't have the usablility test 270 - * run by secondary TCs once Compare is in use. 271 - */ 272 - if (!c0_compare_int_usable()) 273 - return -ENXIO; 274 - } 275 - 276 - /* 277 - * With vectored interrupts things are getting platform specific. 278 - * get_c0_compare_int is a hook to allow a platform to return the 279 - * interrupt number of it's liking. 280 - */ 281 - irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; 282 - if (get_c0_compare_int) 283 - irq = get_c0_compare_int(); 284 - 285 - cd = &per_cpu(mips_clockevent_device, cpu); 286 - 287 - cd->name = "MIPS"; 288 - cd->features = CLOCK_EVT_FEAT_ONESHOT; 289 - 290 - /* Calculate the min / max delta */ 291 - cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); 292 - cd->shift = 32; 293 - cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); 294 - cd->min_delta_ns = clockevent_delta2ns(0x300, cd); 295 - 296 - cd->rating = 300; 297 - cd->irq = irq; 298 - cd->cpumask = cpumask_of(cpu); 299 - cd->set_next_event = mips_next_event; 300 - cd->set_mode = mips_set_clock_mode; 301 - cd->event_handler = mips_event_handler; 302 - 303 - clockevents_register_device(cd); 304 - 305 - /* 306 - * On SMTC we only want to do the data structure 307 - * initialization and IRQ setup once. 308 - */ 309 - if (cpu) 310 - return 0; 311 - /* 312 - * And we need the hwmask associated with the c0_compare 313 - * vector to be initialized. 314 - */ 315 - irq_hwmask[irq] = (0x100 << cp0_compare_irq); 316 - if (cp0_timer_irq_installed) 317 - return 0; 318 - 319 - cp0_timer_irq_installed = 1; 320 - 321 - setup_irq(irq, &c0_compare_irqaction); 322 - 323 - return 0; 324 - }
+312 -16
arch/mips/kernel/cps-vec.S
··· 14 14 #include <asm/asmmacro.h> 15 15 #include <asm/cacheops.h> 16 16 #include <asm/mipsregs.h> 17 + #include <asm/mipsmtregs.h> 18 + #include <asm/pm.h> 17 19 18 - #define GCR_CL_COHERENCE_OFS 0x2008 20 + #define GCR_CL_COHERENCE_OFS 0x2008 21 + #define GCR_CL_ID_OFS 0x2028 22 + 23 + .extern mips_cm_base 24 + 25 + .set noreorder 26 + 27 + /* 28 + * Set dest to non-zero if the core supports the MT ASE, else zero. If 29 + * MT is not supported then branch to nomt. 30 + */ 31 + .macro has_mt dest, nomt 32 + mfc0 \dest, CP0_CONFIG 33 + bgez \dest, \nomt 34 + mfc0 \dest, CP0_CONFIG, 1 35 + bgez \dest, \nomt 36 + mfc0 \dest, CP0_CONFIG, 2 37 + bgez \dest, \nomt 38 + mfc0 \dest, CP0_CONFIG, 3 39 + andi \dest, \dest, MIPS_CONF3_MT 40 + beqz \dest, \nomt 41 + .endm 19 42 20 43 .section .text.cps-vec 21 44 .balign 0x1000 22 - .set noreorder 23 45 24 46 LEAF(mips_cps_core_entry) 25 47 /* 26 - * These first 8 bytes will be patched by cps_smp_setup to load the 27 - * base address of the CM GCRs into register v1. 48 + * These first 12 bytes will be patched by cps_smp_setup to load the 49 + * base address of the CM GCRs into register v1 and the CCA to use into 50 + * register s0. 28 51 */ 29 52 .quad 0 53 + .word 0 30 54 31 55 /* Check whether we're here due to an NMI */ 32 56 mfc0 k0, CP0_STATUS ··· 141 117 add a0, a0, t0 142 118 dcache_done: 143 119 144 - /* Set Kseg0 cacheable, coherent, write-back, write-allocate */ 120 + /* Set Kseg0 CCA to that in s0 */ 145 121 mfc0 t0, CP0_CONFIG 146 122 ori t0, 0x7 147 - xori t0, 0x2 123 + xori t0, 0x7 124 + or t0, t0, s0 148 125 mtc0 t0, CP0_CONFIG 149 126 ehb 150 127 ··· 159 134 jr t0 160 135 nop 161 136 162 - 1: /* We're up, cached & coherent */ 137 + /* 138 + * We're up, cached & coherent. Perform any further required core-level 139 + * initialisation. 140 + */ 141 + 1: jal mips_cps_core_init 142 + nop 163 143 164 144 /* 165 - * TODO: We should check the VPE number we intended to boot here, and 166 - * if non-zero we should start that VPE and stop this one. For 167 - * the moment this doesn't matter since CPUs are brought up 168 - * sequentially and in order, but once hotplug is implemented 169 - * this will need revisiting. 145 + * Boot any other VPEs within this core that should be online, and 146 + * deactivate this VPE if it should be offline. 170 147 */ 148 + jal mips_cps_boot_vpes 149 + nop 171 150 172 151 /* Off we go! */ 173 - la t0, mips_cps_bootcfg 174 - lw t1, BOOTCFG_PC(t0) 175 - lw gp, BOOTCFG_GP(t0) 176 - lw sp, BOOTCFG_SP(t0) 152 + lw t1, VPEBOOTCFG_PC(v0) 153 + lw gp, VPEBOOTCFG_GP(v0) 154 + lw sp, VPEBOOTCFG_SP(v0) 177 155 jr t1 178 156 nop 179 157 END(mips_cps_core_entry) ··· 217 189 jr k0 218 190 nop 219 191 END(excep_ejtag) 192 + 193 + LEAF(mips_cps_core_init) 194 + #ifdef CONFIG_MIPS_MT 195 + /* Check that the core implements the MT ASE */ 196 + has_mt t0, 3f 197 + nop 198 + 199 + .set push 200 + .set mt 201 + 202 + /* Only allow 1 TC per VPE to execute... */ 203 + dmt 204 + 205 + /* ...and for the moment only 1 VPE */ 206 + dvpe 207 + la t1, 1f 208 + jr.hb t1 209 + nop 210 + 211 + /* Enter VPE configuration state */ 212 + 1: mfc0 t0, CP0_MVPCONTROL 213 + ori t0, t0, MVPCONTROL_VPC 214 + mtc0 t0, CP0_MVPCONTROL 215 + 216 + /* Retrieve the number of VPEs within the core */ 217 + mfc0 t0, CP0_MVPCONF0 218 + srl t0, t0, MVPCONF0_PVPE_SHIFT 219 + andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT) 220 + addi t7, t0, 1 221 + 222 + /* If there's only 1, we're done */ 223 + beqz t0, 2f 224 + nop 225 + 226 + /* Loop through each VPE within this core */ 227 + li t5, 1 228 + 229 + 1: /* Operate on the appropriate TC */ 230 + mtc0 t5, CP0_VPECONTROL 231 + ehb 232 + 233 + /* Bind TC to VPE (1:1 TC:VPE mapping) */ 234 + mttc0 t5, CP0_TCBIND 235 + 236 + /* Set exclusive TC, non-active, master */ 237 + li t0, VPECONF0_MVP 238 + sll t1, t5, VPECONF0_XTC_SHIFT 239 + or t0, t0, t1 240 + mttc0 t0, CP0_VPECONF0 241 + 242 + /* Set TC non-active, non-allocatable */ 243 + mttc0 zero, CP0_TCSTATUS 244 + 245 + /* Set TC halted */ 246 + li t0, TCHALT_H 247 + mttc0 t0, CP0_TCHALT 248 + 249 + /* Next VPE */ 250 + addi t5, t5, 1 251 + slt t0, t5, t7 252 + bnez t0, 1b 253 + nop 254 + 255 + /* Leave VPE configuration state */ 256 + 2: mfc0 t0, CP0_MVPCONTROL 257 + xori t0, t0, MVPCONTROL_VPC 258 + mtc0 t0, CP0_MVPCONTROL 259 + 260 + 3: .set pop 261 + #endif 262 + jr ra 263 + nop 264 + END(mips_cps_core_init) 265 + 266 + LEAF(mips_cps_boot_vpes) 267 + /* Retrieve CM base address */ 268 + la t0, mips_cm_base 269 + lw t0, 0(t0) 270 + 271 + /* Calculate a pointer to this cores struct core_boot_config */ 272 + lw t0, GCR_CL_ID_OFS(t0) 273 + li t1, COREBOOTCFG_SIZE 274 + mul t0, t0, t1 275 + la t1, mips_cps_core_bootcfg 276 + lw t1, 0(t1) 277 + addu t0, t0, t1 278 + 279 + /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ 280 + has_mt t6, 1f 281 + li t9, 0 282 + 283 + /* Find the number of VPEs present in the core */ 284 + mfc0 t1, CP0_MVPCONF0 285 + srl t1, t1, MVPCONF0_PVPE_SHIFT 286 + andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT 287 + addi t1, t1, 1 288 + 289 + /* Calculate a mask for the VPE ID from EBase.CPUNum */ 290 + clz t1, t1 291 + li t2, 31 292 + subu t1, t2, t1 293 + li t2, 1 294 + sll t1, t2, t1 295 + addiu t1, t1, -1 296 + 297 + /* Retrieve the VPE ID from EBase.CPUNum */ 298 + mfc0 t9, $15, 1 299 + and t9, t9, t1 300 + 301 + 1: /* Calculate a pointer to this VPEs struct vpe_boot_config */ 302 + li t1, VPEBOOTCFG_SIZE 303 + mul v0, t9, t1 304 + lw t7, COREBOOTCFG_VPECONFIG(t0) 305 + addu v0, v0, t7 306 + 307 + #ifdef CONFIG_MIPS_MT 308 + 309 + /* If the core doesn't support MT then return */ 310 + bnez t6, 1f 311 + nop 312 + jr ra 313 + nop 314 + 315 + .set push 316 + .set mt 317 + 318 + 1: /* Enter VPE configuration state */ 319 + dvpe 320 + la t1, 1f 321 + jr.hb t1 322 + nop 323 + 1: mfc0 t1, CP0_MVPCONTROL 324 + ori t1, t1, MVPCONTROL_VPC 325 + mtc0 t1, CP0_MVPCONTROL 326 + ehb 327 + 328 + /* Loop through each VPE */ 329 + lw t6, COREBOOTCFG_VPEMASK(t0) 330 + move t8, t6 331 + li t5, 0 332 + 333 + /* Check whether the VPE should be running. If not, skip it */ 334 + 1: andi t0, t6, 1 335 + beqz t0, 2f 336 + nop 337 + 338 + /* Operate on the appropriate TC */ 339 + mfc0 t0, CP0_VPECONTROL 340 + ori t0, t0, VPECONTROL_TARGTC 341 + xori t0, t0, VPECONTROL_TARGTC 342 + or t0, t0, t5 343 + mtc0 t0, CP0_VPECONTROL 344 + ehb 345 + 346 + /* Skip the VPE if its TC is not halted */ 347 + mftc0 t0, CP0_TCHALT 348 + beqz t0, 2f 349 + nop 350 + 351 + /* Calculate a pointer to the VPEs struct vpe_boot_config */ 352 + li t0, VPEBOOTCFG_SIZE 353 + mul t0, t0, t5 354 + addu t0, t0, t7 355 + 356 + /* Set the TC restart PC */ 357 + lw t1, VPEBOOTCFG_PC(t0) 358 + mttc0 t1, CP0_TCRESTART 359 + 360 + /* Set the TC stack pointer */ 361 + lw t1, VPEBOOTCFG_SP(t0) 362 + mttgpr t1, sp 363 + 364 + /* Set the TC global pointer */ 365 + lw t1, VPEBOOTCFG_GP(t0) 366 + mttgpr t1, gp 367 + 368 + /* Copy config from this VPE */ 369 + mfc0 t0, CP0_CONFIG 370 + mttc0 t0, CP0_CONFIG 371 + 372 + /* Ensure no software interrupts are pending */ 373 + mttc0 zero, CP0_CAUSE 374 + mttc0 zero, CP0_STATUS 375 + 376 + /* Set TC active, not interrupt exempt */ 377 + mftc0 t0, CP0_TCSTATUS 378 + li t1, ~TCSTATUS_IXMT 379 + and t0, t0, t1 380 + ori t0, t0, TCSTATUS_A 381 + mttc0 t0, CP0_TCSTATUS 382 + 383 + /* Clear the TC halt bit */ 384 + mttc0 zero, CP0_TCHALT 385 + 386 + /* Set VPE active */ 387 + mftc0 t0, CP0_VPECONF0 388 + ori t0, t0, VPECONF0_VPA 389 + mttc0 t0, CP0_VPECONF0 390 + 391 + /* Next VPE */ 392 + 2: srl t6, t6, 1 393 + addi t5, t5, 1 394 + bnez t6, 1b 395 + nop 396 + 397 + /* Leave VPE configuration state */ 398 + mfc0 t1, CP0_MVPCONTROL 399 + xori t1, t1, MVPCONTROL_VPC 400 + mtc0 t1, CP0_MVPCONTROL 401 + ehb 402 + evpe 403 + 404 + /* Check whether this VPE is meant to be running */ 405 + li t0, 1 406 + sll t0, t0, t9 407 + and t0, t0, t8 408 + bnez t0, 2f 409 + nop 410 + 411 + /* This VPE should be offline, halt the TC */ 412 + li t0, TCHALT_H 413 + mtc0 t0, CP0_TCHALT 414 + la t0, 1f 415 + 1: jr.hb t0 416 + nop 417 + 418 + 2: .set pop 419 + 420 + #endif /* CONFIG_MIPS_MT */ 421 + 422 + /* Return */ 423 + jr ra 424 + nop 425 + END(mips_cps_boot_vpes) 426 + 427 + #if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM) 428 + 429 + /* Calculate a pointer to this CPUs struct mips_static_suspend_state */ 430 + .macro psstate dest 431 + .set push 432 + .set noat 433 + lw $1, TI_CPU(gp) 434 + sll $1, $1, LONGLOG 435 + la \dest, __per_cpu_offset 436 + addu $1, $1, \dest 437 + lw $1, 0($1) 438 + la \dest, cps_cpu_state 439 + addu \dest, \dest, $1 440 + .set pop 441 + .endm 442 + 443 + LEAF(mips_cps_pm_save) 444 + /* Save CPU state */ 445 + SUSPEND_SAVE_REGS 446 + psstate t1 447 + SUSPEND_SAVE_STATIC 448 + jr v0 449 + nop 450 + END(mips_cps_pm_save) 451 + 452 + LEAF(mips_cps_pm_restore) 453 + /* Restore CPU state */ 454 + psstate t1 455 + RESUME_RESTORE_STATIC 456 + RESUME_RESTORE_REGS_RETURN 457 + END(mips_cps_pm_restore) 458 + 459 + #endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */
+4 -17
arch/mips/kernel/cpu-probe.c
··· 62 62 case CPU_34K: 63 63 /* 64 64 * Erratum "RPS May Cause Incorrect Instruction Execution" 65 - * This code only handles VPE0, any SMP/SMTC/RTOS code 65 + * This code only handles VPE0, any SMP/RTOS code 66 66 * making use of VPE1 will be responsable for that VPE. 67 67 */ 68 68 if ((c->processor_id & PRID_REV_MASK) <= PRID_REV_34K_V1_0_2) ··· 423 423 424 424 #ifndef CONFIG_MIPS_CPS 425 425 if (cpu_has_mips_r2) { 426 - c->core = read_c0_ebase() & 0x3ff; 426 + c->core = get_ebase_cpunum(); 427 427 if (cpu_has_mipsmt) 428 428 c->core >>= fls(core_nvpes()) - 1; 429 429 } ··· 678 678 * Undocumented RM7000: Bit 29 in the info register of 679 679 * the RM7000 v2.0 indicates if the TLB has 48 or 64 680 680 * entries. 681 - * 682 - * 29 1 => 64 entry JTLB 683 - * 0 => 48 entry JTLB 684 - */ 685 - c->tlbsize = (read_c0_info() & (1 << 29)) ? 64 : 48; 686 - break; 687 - case PRID_IMP_RM9000: 688 - c->cputype = CPU_RM9000; 689 - __cpu_name[cpu] = "RM9000"; 690 - set_isa(c, MIPS_CPU_ISA_IV); 691 - c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | 692 - MIPS_CPU_LLSC; 693 - /* 694 - * Bit 29 in the info register of the RM9000 695 - * indicates if the TLB has 48 or 64 entries. 696 681 * 697 682 * 29 1 => 64 entry JTLB 698 683 * 0 => 48 entry JTLB ··· 1026 1041 decode_configs(c); 1027 1042 /* JZRISC does not implement the CP0 counter. */ 1028 1043 c->options &= ~MIPS_CPU_COUNTER; 1044 + BUG_ON(!__builtin_constant_p(cpu_has_counter) || cpu_has_counter); 1029 1045 switch (c->processor_id & PRID_IMP_MASK) { 1030 1046 case PRID_IMP_JZRISC: 1031 1047 c->cputype = CPU_JZRISC; ··· 1060 1074 switch (c->processor_id & PRID_IMP_MASK) { 1061 1075 case PRID_IMP_NETLOGIC_XLP2XX: 1062 1076 case PRID_IMP_NETLOGIC_XLP9XX: 1077 + case PRID_IMP_NETLOGIC_XLP5XX: 1063 1078 c->cputype = CPU_XLP; 1064 1079 __cpu_name[cpu] = "Broadcom XLPII"; 1065 1080 break;
-38
arch/mips/kernel/entry.S
··· 16 16 #include <asm/isadep.h> 17 17 #include <asm/thread_info.h> 18 18 #include <asm/war.h> 19 - #ifdef CONFIG_MIPS_MT_SMTC 20 - #include <asm/mipsmtregs.h> 21 - #endif 22 19 23 20 #ifndef CONFIG_PREEMPT 24 21 #define resume_kernel restore_all ··· 86 89 bnez t0, syscall_exit_work 87 90 88 91 restore_all: # restore full frame 89 - #ifdef CONFIG_MIPS_MT_SMTC 90 - #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP 91 - /* Re-arm any temporarily masked interrupts not explicitly "acked" */ 92 - mfc0 v0, CP0_TCSTATUS 93 - ori v1, v0, TCSTATUS_IXMT 94 - mtc0 v1, CP0_TCSTATUS 95 - andi v0, TCSTATUS_IXMT 96 - _ehb 97 - mfc0 t0, CP0_TCCONTEXT 98 - DMT 9 # dmt t1 99 - jal mips_ihb 100 - mfc0 t2, CP0_STATUS 101 - andi t3, t0, 0xff00 102 - or t2, t2, t3 103 - mtc0 t2, CP0_STATUS 104 - _ehb 105 - andi t1, t1, VPECONTROL_TE 106 - beqz t1, 1f 107 - EMT 108 - 1: 109 - mfc0 v1, CP0_TCSTATUS 110 - /* We set IXMT above, XOR should clear it here */ 111 - xori v1, v1, TCSTATUS_IXMT 112 - or v1, v0, v1 113 - mtc0 v1, CP0_TCSTATUS 114 - _ehb 115 - xor t0, t0, t3 116 - mtc0 t0, CP0_TCCONTEXT 117 - #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ 118 - /* Detect and execute deferred IPI "interrupts" */ 119 - LONG_L s0, TI_REGS($28) 120 - LONG_S sp, TI_REGS($28) 121 - jal deferred_smtc_ipi 122 - LONG_S s0, TI_REGS($28) 123 - #endif /* CONFIG_MIPS_MT_SMTC */ 124 92 .set noat 125 93 RESTORE_TEMP 126 94 RESTORE_AT
-54
arch/mips/kernel/genex.S
··· 21 21 #include <asm/war.h> 22 22 #include <asm/thread_info.h> 23 23 24 - #ifdef CONFIG_MIPS_MT_SMTC 25 - #define PANIC_PIC(msg) \ 26 - .set push; \ 27 - .set nomicromips; \ 28 - .set reorder; \ 29 - PTR_LA a0,8f; \ 30 - .set noat; \ 31 - PTR_LA AT, panic; \ 32 - jr AT; \ 33 - 9: b 9b; \ 34 - .set pop; \ 35 - TEXT(msg) 36 - #endif 37 - 38 24 __INIT 39 25 40 26 /* ··· 237 251 SAVE_AT 238 252 .set push 239 253 .set noreorder 240 - #ifdef CONFIG_MIPS_MT_SMTC 241 - /* 242 - * To keep from blindly blocking *all* interrupts 243 - * during service by SMTC kernel, we also want to 244 - * pass the IM value to be cleared. 245 - */ 246 - FEXPORT(except_vec_vi_mori) 247 - ori a0, $0, 0 248 - #endif /* CONFIG_MIPS_MT_SMTC */ 249 254 PTR_LA v1, except_vec_vi_handler 250 255 FEXPORT(except_vec_vi_lui) 251 256 lui v0, 0 /* Patched */ ··· 254 277 NESTED(except_vec_vi_handler, 0, sp) 255 278 SAVE_TEMP 256 279 SAVE_STATIC 257 - #ifdef CONFIG_MIPS_MT_SMTC 258 - /* 259 - * SMTC has an interesting problem that interrupts are level-triggered, 260 - * and the CLI macro will clear EXL, potentially causing a duplicate 261 - * interrupt service invocation. So we need to clear the associated 262 - * IM bit of Status prior to doing CLI, and restore it after the 263 - * service routine has been invoked - we must assume that the 264 - * service routine will have cleared the state, and any active 265 - * level represents a new or otherwised unserviced event... 266 - */ 267 - mfc0 t1, CP0_STATUS 268 - and t0, a0, t1 269 - #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP 270 - mfc0 t2, CP0_TCCONTEXT 271 - or t2, t0, t2 272 - mtc0 t2, CP0_TCCONTEXT 273 - #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ 274 - xor t1, t1, t0 275 - mtc0 t1, CP0_STATUS 276 - _ehb 277 - #endif /* CONFIG_MIPS_MT_SMTC */ 278 280 CLI 279 281 #ifdef CONFIG_TRACE_IRQFLAGS 280 282 move s0, v0 281 - #ifdef CONFIG_MIPS_MT_SMTC 282 - move s1, a0 283 - #endif 284 283 TRACE_IRQS_OFF 285 - #ifdef CONFIG_MIPS_MT_SMTC 286 - move a0, s1 287 - #endif 288 284 move v0, s0 289 285 #endif 290 286 ··· 446 496 447 497 .align 5 448 498 LEAF(handle_ri_rdhwr_vivt) 449 - #ifdef CONFIG_MIPS_MT_SMTC 450 - PANIC_PIC("handle_ri_rdhwr_vivt called") 451 - #else 452 499 .set push 453 500 .set noat 454 501 .set noreorder ··· 464 517 .set pop 465 518 bltz k1, handle_ri /* slow path */ 466 519 /* fall thru */ 467 - #endif 468 520 END(handle_ri_rdhwr_vivt) 469 521 470 522 LEAF(handle_ri_rdhwr)
-56
arch/mips/kernel/head.S
··· 35 35 */ 36 36 .macro setup_c0_status set clr 37 37 .set push 38 - #ifdef CONFIG_MIPS_MT_SMTC 39 - /* 40 - * For SMTC, we need to set privilege and disable interrupts only for 41 - * the current TC, using the TCStatus register. 42 - */ 43 - mfc0 t0, CP0_TCSTATUS 44 - /* Fortunately CU 0 is in the same place in both registers */ 45 - /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */ 46 - li t1, ST0_CU0 | 0x08001c00 47 - or t0, t1 48 - /* Clear TKSU, leave IXMT */ 49 - xori t0, 0x00001800 50 - mtc0 t0, CP0_TCSTATUS 51 - _ehb 52 - /* We need to leave the global IE bit set, but clear EXL...*/ 53 - mfc0 t0, CP0_STATUS 54 - or t0, ST0_CU0 | ST0_EXL | ST0_ERL | \set | \clr 55 - xor t0, ST0_EXL | ST0_ERL | \clr 56 - mtc0 t0, CP0_STATUS 57 - #else 58 38 mfc0 t0, CP0_STATUS 59 39 or t0, ST0_CU0|\set|0x1f|\clr 60 40 xor t0, 0x1f|\clr 61 41 mtc0 t0, CP0_STATUS 62 42 .set noreorder 63 43 sll zero,3 # ehb 64 - #endif 65 44 .set pop 66 45 .endm 67 46 ··· 94 115 jr t0 95 116 0: 96 117 97 - #ifdef CONFIG_MIPS_MT_SMTC 98 - /* 99 - * In SMTC kernel, "CLI" is thread-specific, in TCStatus. 100 - * We still need to enable interrupts globally in Status, 101 - * and clear EXL/ERL. 102 - * 103 - * TCContext is used to track interrupt levels under 104 - * service in SMTC kernel. Clear for boot TC before 105 - * allowing any interrupts. 106 - */ 107 - mtc0 zero, CP0_TCCONTEXT 108 - 109 - mfc0 t0, CP0_STATUS 110 - ori t0, t0, 0xff1f 111 - xori t0, t0, 0x001e 112 - mtc0 t0, CP0_STATUS 113 - #endif /* CONFIG_MIPS_MT_SMTC */ 114 - 115 118 PTR_LA t0, __bss_start # clear .bss 116 119 LONG_S zero, (t0) 117 120 PTR_LA t1, __bss_stop - LONGSIZE ··· 125 164 * function after setting up the stack and gp registers. 126 165 */ 127 166 NESTED(smp_bootstrap, 16, sp) 128 - #ifdef CONFIG_MIPS_MT_SMTC 129 - /* 130 - * Read-modify-writes of Status must be atomic, and this 131 - * is one case where CLI is invoked without EXL being 132 - * necessarily set. The CLI and setup_c0_status will 133 - * in fact be redundant for all but the first TC of 134 - * each VPE being booted. 135 - */ 136 - DMT 10 # dmt t2 /* t0, t1 are used by CLI and setup_c0_status() */ 137 - jal mips_ihb 138 - #endif /* CONFIG_MIPS_MT_SMTC */ 139 167 smp_slave_setup 140 168 setup_c0_status_sec 141 - #ifdef CONFIG_MIPS_MT_SMTC 142 - andi t2, t2, VPECONTROL_TE 143 - beqz t2, 2f 144 - EMT # emt 145 - 2: 146 - #endif /* CONFIG_MIPS_MT_SMTC */ 147 169 j start_secondary 148 170 END(smp_bootstrap) 149 171 #endif /* CONFIG_SMP */
-4
arch/mips/kernel/i8259.c
··· 42 42 .irq_disable = disable_8259A_irq, 43 43 .irq_unmask = enable_8259A_irq, 44 44 .irq_mask_ack = mask_and_ack_8259A, 45 - #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 46 - .irq_set_affinity = plat_set_irq_affinity, 47 - #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ 48 45 }; 49 46 50 47 /* ··· 177 180 outb(cached_master_mask, PIC_MASTER_IMR); 178 181 outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */ 179 182 } 180 - smtc_im_ack_irq(irq); 181 183 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 182 184 return; 183 185
+11 -14
arch/mips/kernel/idle.c
··· 224 224 cpu_wait = r4k_wait; 225 225 */ 226 226 break; 227 - case CPU_RM9000: 228 - if ((c->processor_id & 0x00ff) >= 0x40) 229 - cpu_wait = r4k_wait; 230 - break; 231 227 default: 232 228 break; 233 229 } 234 230 } 235 231 236 - static void smtc_idle_hook(void) 237 - { 238 - #ifdef CONFIG_MIPS_MT_SMTC 239 - void smtc_idle_loop_hook(void); 240 - 241 - smtc_idle_loop_hook(); 242 - #endif 243 - } 244 - 245 232 void arch_cpu_idle(void) 246 233 { 247 - smtc_idle_hook(); 248 234 if (cpu_wait) 249 235 cpu_wait(); 250 236 else 251 237 local_irq_enable(); 252 238 } 239 + 240 + #ifdef CONFIG_CPU_IDLE 241 + 242 + int mips_cpuidle_wait_enter(struct cpuidle_device *dev, 243 + struct cpuidle_driver *drv, int index) 244 + { 245 + arch_cpu_idle(); 246 + return index; 247 + } 248 + 249 + #endif
+15
arch/mips/kernel/irq-gic.c
··· 54 54 (int)(cnt & 0xffffffff)); 55 55 } 56 56 57 + void gic_write_cpu_compare(cycle_t cnt, int cpu) 58 + { 59 + unsigned long flags; 60 + 61 + local_irq_save(flags); 62 + 63 + GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu); 64 + GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI), 65 + (int)(cnt >> 32)); 66 + GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO), 67 + (int)(cnt & 0xffffffff)); 68 + 69 + local_irq_restore(flags); 70 + } 71 + 57 72 cycle_t gic_read_compare(void) 58 73 { 59 74 unsigned int hi, lo;
-5
arch/mips/kernel/irq-msc01.c
··· 53 53 */ 54 54 static void level_mask_and_ack_msc_irq(struct irq_data *d) 55 55 { 56 - unsigned int irq = d->irq; 57 - 58 56 mask_msc_irq(d); 59 57 if (!cpu_has_veic) 60 58 MSCIC_WRITE(MSC01_IC_EOI, 0); 61 - /* This actually needs to be a call into platform code */ 62 - smtc_im_ack_irq(irq); 63 59 } 64 60 65 61 /* ··· 74 78 MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT); 75 79 MSCIC_WRITE(MSC01_IC_SUP+irq*8, r); 76 80 } 77 - smtc_im_ack_irq(irq); 78 81 } 79 82 80 83 /*
-17
arch/mips/kernel/irq.c
··· 73 73 */ 74 74 void ack_bad_irq(unsigned int irq) 75 75 { 76 - smtc_im_ack_irq(irq); 77 76 printk("unexpected IRQ # %d\n", irq); 78 77 } 79 78 ··· 141 142 { 142 143 irq_enter(); 143 144 check_stack_overflow(); 144 - if (!smtc_handle_on_other_cpu(irq)) 145 - generic_handle_irq(irq); 146 - irq_exit(); 147 - } 148 - 149 - #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 150 - /* 151 - * To avoid inefficient and in some cases pathological re-checking of 152 - * IRQ affinity, we have this variant that skips the affinity check. 153 - */ 154 - 155 - void __irq_entry do_IRQ_no_affinity(unsigned int irq) 156 - { 157 - irq_enter(); 158 - smtc_im_backstop(irq); 159 145 generic_handle_irq(irq); 160 146 irq_exit(); 161 147 } 162 148 163 - #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
+28
arch/mips/kernel/mips-cpc.c
··· 9 9 */ 10 10 11 11 #include <linux/errno.h> 12 + #include <linux/percpu.h> 13 + #include <linux/spinlock.h> 12 14 13 15 #include <asm/mips-cm.h> 14 16 #include <asm/mips-cpc.h> 15 17 16 18 void __iomem *mips_cpc_base; 19 + 20 + static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock); 21 + 22 + static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags); 17 23 18 24 phys_t __weak mips_cpc_phys_base(void) 19 25 { ··· 45 39 int mips_cpc_probe(void) 46 40 { 47 41 phys_t addr; 42 + unsigned cpu; 43 + 44 + for_each_possible_cpu(cpu) 45 + spin_lock_init(&per_cpu(cpc_core_lock, cpu)); 48 46 49 47 addr = mips_cpc_phys_base(); 50 48 if (!addr) ··· 59 49 return -ENXIO; 60 50 61 51 return 0; 52 + } 53 + 54 + void mips_cpc_lock_other(unsigned int core) 55 + { 56 + unsigned curr_core; 57 + preempt_disable(); 58 + curr_core = current_cpu_data.core; 59 + spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core), 60 + per_cpu(cpc_core_lock_flags, curr_core)); 61 + write_cpc_cl_other(core << CPC_Cx_OTHER_CORENUM_SHF); 62 + } 63 + 64 + void mips_cpc_unlock_other(void) 65 + { 66 + unsigned curr_core = current_cpu_data.core; 67 + spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core), 68 + per_cpu(cpc_core_lock_flags, curr_core)); 69 + preempt_enable(); 62 70 }
+1 -1
arch/mips/kernel/mips-mt-fpaff.c
··· 1 1 /* 2 - * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels 2 + * General MIPS MT support routines, usable in AP/SP and SMVP. 3 3 * Copyright (C) 2005 Mips Technologies, Inc 4 4 */ 5 5 #include <linux/cpu.h>
+1 -17
arch/mips/kernel/mips-mt.c
··· 1 1 /* 2 - * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels 2 + * General MIPS MT support routines, usable in AP/SP and SMVP. 3 3 * Copyright (C) 2005 Mips Technologies, Inc 4 4 */ 5 5 ··· 57 57 int tc; 58 58 unsigned long haltval; 59 59 unsigned long tcstatval; 60 - #ifdef CONFIG_MIPS_MT_SMTC 61 - void smtc_soft_dump(void); 62 - #endif /* CONFIG_MIPT_MT_SMTC */ 63 60 64 61 local_irq_save(flags); 65 62 vpflags = dvpe(); ··· 113 116 if (!haltval) 114 117 write_tc_c0_tchalt(0); 115 118 } 116 - #ifdef CONFIG_MIPS_MT_SMTC 117 - smtc_soft_dump(); 118 - #endif /* CONFIG_MIPT_MT_SMTC */ 119 119 printk("===========================\n"); 120 120 evpe(vpflags); 121 121 local_irq_restore(flags); ··· 289 295 290 296 void mt_cflush_lockdown(void) 291 297 { 292 - #ifdef CONFIG_MIPS_MT_SMTC 293 - void smtc_cflush_lockdown(void); 294 - 295 - smtc_cflush_lockdown(); 296 - #endif /* CONFIG_MIPS_MT_SMTC */ 297 298 /* FILL IN VSMP and AP/SP VERSIONS HERE */ 298 299 } 299 300 300 301 void mt_cflush_release(void) 301 302 { 302 - #ifdef CONFIG_MIPS_MT_SMTC 303 - void smtc_cflush_release(void); 304 - 305 - smtc_cflush_release(); 306 - #endif /* CONFIG_MIPS_MT_SMTC */ 307 303 /* FILL IN VSMP and AP/SP VERSIONS HERE */ 308 304 } 309 305
+61 -23
arch/mips/kernel/octeon_switch.S
··· 10 10 * Copyright (C) 2000 MIPS Technologies, Inc. 11 11 * written by Carsten Langgaard, carstenl@mips.com 12 12 */ 13 - #include <asm/asm.h> 14 - #include <asm/cachectl.h> 15 - #include <asm/fpregdef.h> 16 - #include <asm/mipsregs.h> 17 - #include <asm/asm-offsets.h> 18 - #include <asm/pgtable-bits.h> 19 - #include <asm/regdef.h> 20 - #include <asm/stackframe.h> 21 - #include <asm/thread_info.h> 22 13 23 - #include <asm/asmmacro.h> 24 - 25 - /* 26 - * Offset to the current process status flags, the first 32 bytes of the 27 - * stack are not used. 28 - */ 29 - #define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS) 30 - 14 + #define USE_ALTERNATE_RESUME_IMPL 1 15 + .set push 16 + .set arch=mips64r2 17 + #include "r4k_switch.S" 18 + .set pop 31 19 /* 32 20 * task_struct *resume(task_struct *prev, task_struct *next, 33 21 * struct thread_info *next_ti, int usedfpu) ··· 28 40 cpu_save_nonscratch a0 29 41 LONG_S ra, THREAD_REG31(a0) 30 42 43 + /* 44 + * check if we need to save FPU registers 45 + */ 46 + PTR_L t3, TASK_THREAD_INFO(a0) 47 + LONG_L t0, TI_FLAGS(t3) 48 + li t1, _TIF_USEDFPU 49 + and t2, t0, t1 50 + beqz t2, 1f 51 + nor t1, zero, t1 52 + 53 + and t0, t0, t1 54 + LONG_S t0, TI_FLAGS(t3) 55 + 56 + /* 57 + * clear saved user stack CU1 bit 58 + */ 59 + LONG_L t0, ST_OFF(t3) 60 + li t1, ~ST0_CU1 61 + and t0, t0, t1 62 + LONG_S t0, ST_OFF(t3) 63 + 64 + .set push 65 + .set arch=mips64r2 66 + fpu_save_double a0 t0 t1 # c0_status passed in t0 67 + # clobbers t1 68 + .set pop 69 + 1: 70 + 71 + /* check if we need to save COP2 registers */ 72 + PTR_L t2, TASK_THREAD_INFO(a0) 73 + LONG_L t0, ST_OFF(t2) 74 + bbit0 t0, 30, 1f 75 + 76 + /* Disable COP2 in the stored process state */ 77 + li t1, ST0_CU2 78 + xor t0, t1 79 + LONG_S t0, ST_OFF(t2) 80 + 81 + /* Enable COP2 so we can save it */ 82 + mfc0 t0, CP0_STATUS 83 + or t0, t1 84 + mtc0 t0, CP0_STATUS 85 + 86 + /* Save COP2 */ 87 + daddu a0, THREAD_CP2 88 + jal octeon_cop2_save 89 + dsubu a0, THREAD_CP2 90 + 91 + /* Disable COP2 now that we are done */ 92 + mfc0 t0, CP0_STATUS 93 + li t1, ST0_CU2 94 + xor t0, t1 95 + mtc0 t0, CP0_STATUS 96 + 97 + 1: 31 98 #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 32 99 /* Check if we need to store CVMSEG state */ 33 100 mfc0 t0, $11,7 /* CvmMemCtl */ ··· 128 85 move $28, a2 129 86 cpu_restore_nonscratch a1 130 87 131 - #if (_THREAD_SIZE - 32) < 0x8000 132 - PTR_ADDIU t0, $28, _THREAD_SIZE - 32 133 - #else 134 - PTR_LI t0, _THREAD_SIZE - 32 135 - PTR_ADDU t0, $28 136 - #endif 88 + PTR_ADDU t0, $28, _THREAD_SIZE - 32 137 89 set_saved_sp t0, t1, t2 138 90 139 91 mfc0 t1, CP0_STATUS /* Do we really need this? */
+716
arch/mips/kernel/pm-cps.c
··· 1 + /* 2 + * Copyright (C) 2014 Imagination Technologies 3 + * Author: Paul Burton <paul.burton@imgtec.com> 4 + * 5 + * This program is free software; you can redistribute it and/or modify it 6 + * under the terms of the GNU General Public License as published by the 7 + * Free Software Foundation; either version 2 of the License, or (at your 8 + * option) any later version. 9 + */ 10 + 11 + #include <linux/init.h> 12 + #include <linux/percpu.h> 13 + #include <linux/slab.h> 14 + 15 + #include <asm/asm-offsets.h> 16 + #include <asm/cacheflush.h> 17 + #include <asm/cacheops.h> 18 + #include <asm/idle.h> 19 + #include <asm/mips-cm.h> 20 + #include <asm/mips-cpc.h> 21 + #include <asm/mipsmtregs.h> 22 + #include <asm/pm.h> 23 + #include <asm/pm-cps.h> 24 + #include <asm/smp-cps.h> 25 + #include <asm/uasm.h> 26 + 27 + /* 28 + * cps_nc_entry_fn - type of a generated non-coherent state entry function 29 + * @online: the count of online coupled VPEs 30 + * @nc_ready_count: pointer to a non-coherent mapping of the core ready_count 31 + * 32 + * The code entering & exiting non-coherent states is generated at runtime 33 + * using uasm, in order to ensure that the compiler cannot insert a stray 34 + * memory access at an unfortunate time and to allow the generation of optimal 35 + * core-specific code particularly for cache routines. If coupled_coherence 36 + * is non-zero and this is the entry function for the CPS_PM_NC_WAIT state, 37 + * returns the number of VPEs that were in the wait state at the point this 38 + * VPE left it. Returns garbage if coupled_coherence is zero or this is not 39 + * the entry function for CPS_PM_NC_WAIT. 40 + */ 41 + typedef unsigned (*cps_nc_entry_fn)(unsigned online, u32 *nc_ready_count); 42 + 43 + /* 44 + * The entry point of the generated non-coherent idle state entry/exit 45 + * functions. Actually per-core rather than per-CPU. 46 + */ 47 + static DEFINE_PER_CPU_READ_MOSTLY(cps_nc_entry_fn[CPS_PM_STATE_COUNT], 48 + nc_asm_enter); 49 + 50 + /* Bitmap indicating which states are supported by the system */ 51 + DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT); 52 + 53 + /* 54 + * Indicates the number of coupled VPEs ready to operate in a non-coherent 55 + * state. Actually per-core rather than per-CPU. 56 + */ 57 + static DEFINE_PER_CPU_ALIGNED(u32*, ready_count); 58 + static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc); 59 + 60 + /* Indicates online CPUs coupled with the current CPU */ 61 + static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled); 62 + 63 + /* 64 + * Used to synchronize entry to deep idle states. Actually per-core rather 65 + * than per-CPU. 66 + */ 67 + static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier); 68 + 69 + /* Saved CPU state across the CPS_PM_POWER_GATED state */ 70 + DEFINE_PER_CPU_ALIGNED(struct mips_static_suspend_state, cps_cpu_state); 71 + 72 + /* A somewhat arbitrary number of labels & relocs for uasm */ 73 + static struct uasm_label labels[32] __initdata; 74 + static struct uasm_reloc relocs[32] __initdata; 75 + 76 + /* CPU dependant sync types */ 77 + static unsigned stype_intervention; 78 + static unsigned stype_memory; 79 + static unsigned stype_ordering; 80 + 81 + enum mips_reg { 82 + zero, at, v0, v1, a0, a1, a2, a3, 83 + t0, t1, t2, t3, t4, t5, t6, t7, 84 + s0, s1, s2, s3, s4, s5, s6, s7, 85 + t8, t9, k0, k1, gp, sp, fp, ra, 86 + }; 87 + 88 + bool cps_pm_support_state(enum cps_pm_state state) 89 + { 90 + return test_bit(state, state_support); 91 + } 92 + 93 + static void coupled_barrier(atomic_t *a, unsigned online) 94 + { 95 + /* 96 + * This function is effectively the same as 97 + * cpuidle_coupled_parallel_barrier, which can't be used here since 98 + * there's no cpuidle device. 99 + */ 100 + 101 + if (!coupled_coherence) 102 + return; 103 + 104 + smp_mb__before_atomic_inc(); 105 + atomic_inc(a); 106 + 107 + while (atomic_read(a) < online) 108 + cpu_relax(); 109 + 110 + if (atomic_inc_return(a) == online * 2) { 111 + atomic_set(a, 0); 112 + return; 113 + } 114 + 115 + while (atomic_read(a) > online) 116 + cpu_relax(); 117 + } 118 + 119 + int cps_pm_enter_state(enum cps_pm_state state) 120 + { 121 + unsigned cpu = smp_processor_id(); 122 + unsigned core = current_cpu_data.core; 123 + unsigned online, left; 124 + cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled); 125 + u32 *core_ready_count, *nc_core_ready_count; 126 + void *nc_addr; 127 + cps_nc_entry_fn entry; 128 + struct core_boot_config *core_cfg; 129 + struct vpe_boot_config *vpe_cfg; 130 + 131 + /* Check that there is an entry function for this state */ 132 + entry = per_cpu(nc_asm_enter, core)[state]; 133 + if (!entry) 134 + return -EINVAL; 135 + 136 + /* Calculate which coupled CPUs (VPEs) are online */ 137 + #ifdef CONFIG_MIPS_MT 138 + if (cpu_online(cpu)) { 139 + cpumask_and(coupled_mask, cpu_online_mask, 140 + &cpu_sibling_map[cpu]); 141 + online = cpumask_weight(coupled_mask); 142 + cpumask_clear_cpu(cpu, coupled_mask); 143 + } else 144 + #endif 145 + { 146 + cpumask_clear(coupled_mask); 147 + online = 1; 148 + } 149 + 150 + /* Setup the VPE to run mips_cps_pm_restore when started again */ 151 + if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) { 152 + core_cfg = &mips_cps_core_bootcfg[core]; 153 + vpe_cfg = &core_cfg->vpe_config[current_cpu_data.vpe_id]; 154 + vpe_cfg->pc = (unsigned long)mips_cps_pm_restore; 155 + vpe_cfg->gp = (unsigned long)current_thread_info(); 156 + vpe_cfg->sp = 0; 157 + } 158 + 159 + /* Indicate that this CPU might not be coherent */ 160 + cpumask_clear_cpu(cpu, &cpu_coherent_mask); 161 + smp_mb__after_clear_bit(); 162 + 163 + /* Create a non-coherent mapping of the core ready_count */ 164 + core_ready_count = per_cpu(ready_count, core); 165 + nc_addr = kmap_noncoherent(virt_to_page(core_ready_count), 166 + (unsigned long)core_ready_count); 167 + nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK); 168 + nc_core_ready_count = nc_addr; 169 + 170 + /* Ensure ready_count is zero-initialised before the assembly runs */ 171 + ACCESS_ONCE(*nc_core_ready_count) = 0; 172 + coupled_barrier(&per_cpu(pm_barrier, core), online); 173 + 174 + /* Run the generated entry code */ 175 + left = entry(online, nc_core_ready_count); 176 + 177 + /* Remove the non-coherent mapping of ready_count */ 178 + kunmap_noncoherent(); 179 + 180 + /* Indicate that this CPU is definitely coherent */ 181 + cpumask_set_cpu(cpu, &cpu_coherent_mask); 182 + 183 + /* 184 + * If this VPE is the first to leave the non-coherent wait state then 185 + * it needs to wake up any coupled VPEs still running their wait 186 + * instruction so that they return to cpuidle, which can then complete 187 + * coordination between the coupled VPEs & provide the governor with 188 + * a chance to reflect on the length of time the VPEs were in the 189 + * idle state. 190 + */ 191 + if (coupled_coherence && (state == CPS_PM_NC_WAIT) && (left == online)) 192 + arch_send_call_function_ipi_mask(coupled_mask); 193 + 194 + return 0; 195 + } 196 + 197 + static void __init cps_gen_cache_routine(u32 **pp, struct uasm_label **pl, 198 + struct uasm_reloc **pr, 199 + const struct cache_desc *cache, 200 + unsigned op, int lbl) 201 + { 202 + unsigned cache_size = cache->ways << cache->waybit; 203 + unsigned i; 204 + const unsigned unroll_lines = 32; 205 + 206 + /* If the cache isn't present this function has it easy */ 207 + if (cache->flags & MIPS_CACHE_NOT_PRESENT) 208 + return; 209 + 210 + /* Load base address */ 211 + UASM_i_LA(pp, t0, (long)CKSEG0); 212 + 213 + /* Calculate end address */ 214 + if (cache_size < 0x8000) 215 + uasm_i_addiu(pp, t1, t0, cache_size); 216 + else 217 + UASM_i_LA(pp, t1, (long)(CKSEG0 + cache_size)); 218 + 219 + /* Start of cache op loop */ 220 + uasm_build_label(pl, *pp, lbl); 221 + 222 + /* Generate the cache ops */ 223 + for (i = 0; i < unroll_lines; i++) 224 + uasm_i_cache(pp, op, i * cache->linesz, t0); 225 + 226 + /* Update the base address */ 227 + uasm_i_addiu(pp, t0, t0, unroll_lines * cache->linesz); 228 + 229 + /* Loop if we haven't reached the end address yet */ 230 + uasm_il_bne(pp, pr, t0, t1, lbl); 231 + uasm_i_nop(pp); 232 + } 233 + 234 + static int __init cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl, 235 + struct uasm_reloc **pr, 236 + const struct cpuinfo_mips *cpu_info, 237 + int lbl) 238 + { 239 + unsigned i, fsb_size = 8; 240 + unsigned num_loads = (fsb_size * 3) / 2; 241 + unsigned line_stride = 2; 242 + unsigned line_size = cpu_info->dcache.linesz; 243 + unsigned perf_counter, perf_event; 244 + unsigned revision = cpu_info->processor_id & PRID_REV_MASK; 245 + 246 + /* 247 + * Determine whether this CPU requires an FSB flush, and if so which 248 + * performance counter/event reflect stalls due to a full FSB. 249 + */ 250 + switch (__get_cpu_type(cpu_info->cputype)) { 251 + case CPU_INTERAPTIV: 252 + perf_counter = 1; 253 + perf_event = 51; 254 + break; 255 + 256 + case CPU_PROAPTIV: 257 + /* Newer proAptiv cores don't require this workaround */ 258 + if (revision >= PRID_REV_ENCODE_332(1, 1, 0)) 259 + return 0; 260 + 261 + /* On older ones it's unavailable */ 262 + return -1; 263 + 264 + /* CPUs which do not require the workaround */ 265 + case CPU_P5600: 266 + return 0; 267 + 268 + default: 269 + WARN_ONCE(1, "pm-cps: FSB flush unsupported for this CPU\n"); 270 + return -1; 271 + } 272 + 273 + /* 274 + * Ensure that the fill/store buffer (FSB) is not holding the results 275 + * of a prefetch, since if it is then the CPC sequencer may become 276 + * stuck in the D3 (ClrBus) state whilst entering a low power state. 277 + */ 278 + 279 + /* Preserve perf counter setup */ 280 + uasm_i_mfc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ 281 + uasm_i_mfc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */ 282 + 283 + /* Setup perf counter to count FSB full pipeline stalls */ 284 + uasm_i_addiu(pp, t0, zero, (perf_event << 5) | 0xf); 285 + uasm_i_mtc0(pp, t0, 25, (perf_counter * 2) + 0); /* PerfCtlN */ 286 + uasm_i_ehb(pp); 287 + uasm_i_mtc0(pp, zero, 25, (perf_counter * 2) + 1); /* PerfCntN */ 288 + uasm_i_ehb(pp); 289 + 290 + /* Base address for loads */ 291 + UASM_i_LA(pp, t0, (long)CKSEG0); 292 + 293 + /* Start of clear loop */ 294 + uasm_build_label(pl, *pp, lbl); 295 + 296 + /* Perform some loads to fill the FSB */ 297 + for (i = 0; i < num_loads; i++) 298 + uasm_i_lw(pp, zero, i * line_size * line_stride, t0); 299 + 300 + /* 301 + * Invalidate the new D-cache entries so that the cache will need 302 + * refilling (via the FSB) if the loop is executed again. 303 + */ 304 + for (i = 0; i < num_loads; i++) { 305 + uasm_i_cache(pp, Hit_Invalidate_D, 306 + i * line_size * line_stride, t0); 307 + uasm_i_cache(pp, Hit_Writeback_Inv_SD, 308 + i * line_size * line_stride, t0); 309 + } 310 + 311 + /* Completion barrier */ 312 + uasm_i_sync(pp, stype_memory); 313 + uasm_i_ehb(pp); 314 + 315 + /* Check whether the pipeline stalled due to the FSB being full */ 316 + uasm_i_mfc0(pp, t1, 25, (perf_counter * 2) + 1); /* PerfCntN */ 317 + 318 + /* Loop if it didn't */ 319 + uasm_il_beqz(pp, pr, t1, lbl); 320 + uasm_i_nop(pp); 321 + 322 + /* Restore perf counter 1. The count may well now be wrong... */ 323 + uasm_i_mtc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ 324 + uasm_i_ehb(pp); 325 + uasm_i_mtc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */ 326 + uasm_i_ehb(pp); 327 + 328 + return 0; 329 + } 330 + 331 + static void __init cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl, 332 + struct uasm_reloc **pr, 333 + unsigned r_addr, int lbl) 334 + { 335 + uasm_i_lui(pp, t0, uasm_rel_hi(0x80000000)); 336 + uasm_build_label(pl, *pp, lbl); 337 + uasm_i_ll(pp, t1, 0, r_addr); 338 + uasm_i_or(pp, t1, t1, t0); 339 + uasm_i_sc(pp, t1, 0, r_addr); 340 + uasm_il_beqz(pp, pr, t1, lbl); 341 + uasm_i_nop(pp); 342 + } 343 + 344 + static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) 345 + { 346 + struct uasm_label *l = labels; 347 + struct uasm_reloc *r = relocs; 348 + u32 *buf, *p; 349 + const unsigned r_online = a0; 350 + const unsigned r_nc_count = a1; 351 + const unsigned r_pcohctl = t7; 352 + const unsigned max_instrs = 256; 353 + unsigned cpc_cmd; 354 + int err; 355 + enum { 356 + lbl_incready = 1, 357 + lbl_poll_cont, 358 + lbl_secondary_hang, 359 + lbl_disable_coherence, 360 + lbl_flush_fsb, 361 + lbl_invicache, 362 + lbl_flushdcache, 363 + lbl_hang, 364 + lbl_set_cont, 365 + lbl_secondary_cont, 366 + lbl_decready, 367 + }; 368 + 369 + /* Allocate a buffer to hold the generated code */ 370 + p = buf = kcalloc(max_instrs, sizeof(u32), GFP_KERNEL); 371 + if (!buf) 372 + return NULL; 373 + 374 + /* Clear labels & relocs ready for (re)use */ 375 + memset(labels, 0, sizeof(labels)); 376 + memset(relocs, 0, sizeof(relocs)); 377 + 378 + if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) { 379 + /* 380 + * Save CPU state. Note the non-standard calling convention 381 + * with the return address placed in v0 to avoid clobbering 382 + * the ra register before it is saved. 383 + */ 384 + UASM_i_LA(&p, t0, (long)mips_cps_pm_save); 385 + uasm_i_jalr(&p, v0, t0); 386 + uasm_i_nop(&p); 387 + } 388 + 389 + /* 390 + * Load addresses of required CM & CPC registers. This is done early 391 + * because they're needed in both the enable & disable coherence steps 392 + * but in the coupled case the enable step will only run on one VPE. 393 + */ 394 + UASM_i_LA(&p, r_pcohctl, (long)addr_gcr_cl_coherence()); 395 + 396 + if (coupled_coherence) { 397 + /* Increment ready_count */ 398 + uasm_i_sync(&p, stype_ordering); 399 + uasm_build_label(&l, p, lbl_incready); 400 + uasm_i_ll(&p, t1, 0, r_nc_count); 401 + uasm_i_addiu(&p, t2, t1, 1); 402 + uasm_i_sc(&p, t2, 0, r_nc_count); 403 + uasm_il_beqz(&p, &r, t2, lbl_incready); 404 + uasm_i_addiu(&p, t1, t1, 1); 405 + 406 + /* Ordering barrier */ 407 + uasm_i_sync(&p, stype_ordering); 408 + 409 + /* 410 + * If this is the last VPE to become ready for non-coherence 411 + * then it should branch below. 412 + */ 413 + uasm_il_beq(&p, &r, t1, r_online, lbl_disable_coherence); 414 + uasm_i_nop(&p); 415 + 416 + if (state < CPS_PM_POWER_GATED) { 417 + /* 418 + * Otherwise this is not the last VPE to become ready 419 + * for non-coherence. It needs to wait until coherence 420 + * has been disabled before proceeding, which it will do 421 + * by polling for the top bit of ready_count being set. 422 + */ 423 + uasm_i_addiu(&p, t1, zero, -1); 424 + uasm_build_label(&l, p, lbl_poll_cont); 425 + uasm_i_lw(&p, t0, 0, r_nc_count); 426 + uasm_il_bltz(&p, &r, t0, lbl_secondary_cont); 427 + uasm_i_ehb(&p); 428 + uasm_i_yield(&p, zero, t1); 429 + uasm_il_b(&p, &r, lbl_poll_cont); 430 + uasm_i_nop(&p); 431 + } else { 432 + /* 433 + * The core will lose power & this VPE will not continue 434 + * so it can simply halt here. 435 + */ 436 + uasm_i_addiu(&p, t0, zero, TCHALT_H); 437 + uasm_i_mtc0(&p, t0, 2, 4); 438 + uasm_build_label(&l, p, lbl_secondary_hang); 439 + uasm_il_b(&p, &r, lbl_secondary_hang); 440 + uasm_i_nop(&p); 441 + } 442 + } 443 + 444 + /* 445 + * This is the point of no return - this VPE will now proceed to 446 + * disable coherence. At this point we *must* be sure that no other 447 + * VPE within the core will interfere with the L1 dcache. 448 + */ 449 + uasm_build_label(&l, p, lbl_disable_coherence); 450 + 451 + /* Invalidate the L1 icache */ 452 + cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache, 453 + Index_Invalidate_I, lbl_invicache); 454 + 455 + /* Writeback & invalidate the L1 dcache */ 456 + cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache, 457 + Index_Writeback_Inv_D, lbl_flushdcache); 458 + 459 + /* Completion barrier */ 460 + uasm_i_sync(&p, stype_memory); 461 + uasm_i_ehb(&p); 462 + 463 + /* 464 + * Disable all but self interventions. The load from COHCTL is defined 465 + * by the interAptiv & proAptiv SUMs as ensuring that the operation 466 + * resulting from the preceeding store is complete. 467 + */ 468 + uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core); 469 + uasm_i_sw(&p, t0, 0, r_pcohctl); 470 + uasm_i_lw(&p, t0, 0, r_pcohctl); 471 + 472 + /* Sync to ensure previous interventions are complete */ 473 + uasm_i_sync(&p, stype_intervention); 474 + uasm_i_ehb(&p); 475 + 476 + /* Disable coherence */ 477 + uasm_i_sw(&p, zero, 0, r_pcohctl); 478 + uasm_i_lw(&p, t0, 0, r_pcohctl); 479 + 480 + if (state >= CPS_PM_CLOCK_GATED) { 481 + err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu], 482 + lbl_flush_fsb); 483 + if (err) 484 + goto out_err; 485 + 486 + /* Determine the CPC command to issue */ 487 + switch (state) { 488 + case CPS_PM_CLOCK_GATED: 489 + cpc_cmd = CPC_Cx_CMD_CLOCKOFF; 490 + break; 491 + case CPS_PM_POWER_GATED: 492 + cpc_cmd = CPC_Cx_CMD_PWRDOWN; 493 + break; 494 + default: 495 + BUG(); 496 + goto out_err; 497 + } 498 + 499 + /* Issue the CPC command */ 500 + UASM_i_LA(&p, t0, (long)addr_cpc_cl_cmd()); 501 + uasm_i_addiu(&p, t1, zero, cpc_cmd); 502 + uasm_i_sw(&p, t1, 0, t0); 503 + 504 + if (state == CPS_PM_POWER_GATED) { 505 + /* If anything goes wrong just hang */ 506 + uasm_build_label(&l, p, lbl_hang); 507 + uasm_il_b(&p, &r, lbl_hang); 508 + uasm_i_nop(&p); 509 + 510 + /* 511 + * There's no point generating more code, the core is 512 + * powered down & if powered back up will run from the 513 + * reset vector not from here. 514 + */ 515 + goto gen_done; 516 + } 517 + 518 + /* Completion barrier */ 519 + uasm_i_sync(&p, stype_memory); 520 + uasm_i_ehb(&p); 521 + } 522 + 523 + if (state == CPS_PM_NC_WAIT) { 524 + /* 525 + * At this point it is safe for all VPEs to proceed with 526 + * execution. This VPE will set the top bit of ready_count 527 + * to indicate to the other VPEs that they may continue. 528 + */ 529 + if (coupled_coherence) 530 + cps_gen_set_top_bit(&p, &l, &r, r_nc_count, 531 + lbl_set_cont); 532 + 533 + /* 534 + * VPEs which did not disable coherence will continue 535 + * executing, after coherence has been disabled, from this 536 + * point. 537 + */ 538 + uasm_build_label(&l, p, lbl_secondary_cont); 539 + 540 + /* Now perform our wait */ 541 + uasm_i_wait(&p, 0); 542 + } 543 + 544 + /* 545 + * Re-enable coherence. Note that for CPS_PM_NC_WAIT all coupled VPEs 546 + * will run this. The first will actually re-enable coherence & the 547 + * rest will just be performing a rather unusual nop. 548 + */ 549 + uasm_i_addiu(&p, t0, zero, CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK); 550 + uasm_i_sw(&p, t0, 0, r_pcohctl); 551 + uasm_i_lw(&p, t0, 0, r_pcohctl); 552 + 553 + /* Completion barrier */ 554 + uasm_i_sync(&p, stype_memory); 555 + uasm_i_ehb(&p); 556 + 557 + if (coupled_coherence && (state == CPS_PM_NC_WAIT)) { 558 + /* Decrement ready_count */ 559 + uasm_build_label(&l, p, lbl_decready); 560 + uasm_i_sync(&p, stype_ordering); 561 + uasm_i_ll(&p, t1, 0, r_nc_count); 562 + uasm_i_addiu(&p, t2, t1, -1); 563 + uasm_i_sc(&p, t2, 0, r_nc_count); 564 + uasm_il_beqz(&p, &r, t2, lbl_decready); 565 + uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1); 566 + 567 + /* Ordering barrier */ 568 + uasm_i_sync(&p, stype_ordering); 569 + } 570 + 571 + if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) { 572 + /* 573 + * At this point it is safe for all VPEs to proceed with 574 + * execution. This VPE will set the top bit of ready_count 575 + * to indicate to the other VPEs that they may continue. 576 + */ 577 + cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont); 578 + 579 + /* 580 + * This core will be reliant upon another core sending a 581 + * power-up command to the CPC in order to resume operation. 582 + * Thus an arbitrary VPE can't trigger the core leaving the 583 + * idle state and the one that disables coherence might as well 584 + * be the one to re-enable it. The rest will continue from here 585 + * after that has been done. 586 + */ 587 + uasm_build_label(&l, p, lbl_secondary_cont); 588 + 589 + /* Ordering barrier */ 590 + uasm_i_sync(&p, stype_ordering); 591 + } 592 + 593 + /* The core is coherent, time to return to C code */ 594 + uasm_i_jr(&p, ra); 595 + uasm_i_nop(&p); 596 + 597 + gen_done: 598 + /* Ensure the code didn't exceed the resources allocated for it */ 599 + BUG_ON((p - buf) > max_instrs); 600 + BUG_ON((l - labels) > ARRAY_SIZE(labels)); 601 + BUG_ON((r - relocs) > ARRAY_SIZE(relocs)); 602 + 603 + /* Patch branch offsets */ 604 + uasm_resolve_relocs(relocs, labels); 605 + 606 + /* Flush the icache */ 607 + local_flush_icache_range((unsigned long)buf, (unsigned long)p); 608 + 609 + return buf; 610 + out_err: 611 + kfree(buf); 612 + return NULL; 613 + } 614 + 615 + static int __init cps_gen_core_entries(unsigned cpu) 616 + { 617 + enum cps_pm_state state; 618 + unsigned core = cpu_data[cpu].core; 619 + unsigned dlinesz = cpu_data[cpu].dcache.linesz; 620 + void *entry_fn, *core_rc; 621 + 622 + for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) { 623 + if (per_cpu(nc_asm_enter, core)[state]) 624 + continue; 625 + if (!test_bit(state, state_support)) 626 + continue; 627 + 628 + entry_fn = cps_gen_entry_code(cpu, state); 629 + if (!entry_fn) { 630 + pr_err("Failed to generate core %u state %u entry\n", 631 + core, state); 632 + clear_bit(state, state_support); 633 + } 634 + 635 + per_cpu(nc_asm_enter, core)[state] = entry_fn; 636 + } 637 + 638 + if (!per_cpu(ready_count, core)) { 639 + core_rc = kmalloc(dlinesz * 2, GFP_KERNEL); 640 + if (!core_rc) { 641 + pr_err("Failed allocate core %u ready_count\n", core); 642 + return -ENOMEM; 643 + } 644 + per_cpu(ready_count_alloc, core) = core_rc; 645 + 646 + /* Ensure ready_count is aligned to a cacheline boundary */ 647 + core_rc += dlinesz - 1; 648 + core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1)); 649 + per_cpu(ready_count, core) = core_rc; 650 + } 651 + 652 + return 0; 653 + } 654 + 655 + static int __init cps_pm_init(void) 656 + { 657 + unsigned cpu; 658 + int err; 659 + 660 + /* Detect appropriate sync types for the system */ 661 + switch (current_cpu_data.cputype) { 662 + case CPU_INTERAPTIV: 663 + case CPU_PROAPTIV: 664 + case CPU_M5150: 665 + case CPU_P5600: 666 + stype_intervention = 0x2; 667 + stype_memory = 0x3; 668 + stype_ordering = 0x10; 669 + break; 670 + 671 + default: 672 + pr_warn("Power management is using heavyweight sync 0\n"); 673 + } 674 + 675 + /* A CM is required for all non-coherent states */ 676 + if (!mips_cm_present()) { 677 + pr_warn("pm-cps: no CM, non-coherent states unavailable\n"); 678 + goto out; 679 + } 680 + 681 + /* 682 + * If interrupts were enabled whilst running a wait instruction on a 683 + * non-coherent core then the VPE may end up processing interrupts 684 + * whilst non-coherent. That would be bad. 685 + */ 686 + if (cpu_wait == r4k_wait_irqoff) 687 + set_bit(CPS_PM_NC_WAIT, state_support); 688 + else 689 + pr_warn("pm-cps: non-coherent wait unavailable\n"); 690 + 691 + /* Detect whether a CPC is present */ 692 + if (mips_cpc_present()) { 693 + /* Detect whether clock gating is implemented */ 694 + if (read_cpc_cl_stat_conf() & CPC_Cx_STAT_CONF_CLKGAT_IMPL_MSK) 695 + set_bit(CPS_PM_CLOCK_GATED, state_support); 696 + else 697 + pr_warn("pm-cps: CPC does not support clock gating\n"); 698 + 699 + /* Power gating is available with CPS SMP & any CPC */ 700 + if (mips_cps_smp_in_use()) 701 + set_bit(CPS_PM_POWER_GATED, state_support); 702 + else 703 + pr_warn("pm-cps: CPS SMP not in use, power gating unavailable\n"); 704 + } else { 705 + pr_warn("pm-cps: no CPC, clock & power gating unavailable\n"); 706 + } 707 + 708 + for_each_present_cpu(cpu) { 709 + err = cps_gen_core_entries(cpu); 710 + if (err) 711 + return err; 712 + } 713 + out: 714 + return 0; 715 + } 716 + arch_initcall(cps_pm_init);
+99
arch/mips/kernel/pm.c
··· 1 + /* 2 + * Copyright (C) 2014 Imagination Technologies Ltd. 3 + * 4 + * This program is free software; you can redistribute it and/or modify it 5 + * under the terms of the GNU General Public License as published by the 6 + * Free Software Foundation; either version 2 of the License, or (at your 7 + * option) any later version. 8 + * 9 + * CPU PM notifiers for saving/restoring general CPU state. 10 + */ 11 + 12 + #include <linux/cpu_pm.h> 13 + #include <linux/init.h> 14 + 15 + #include <asm/dsp.h> 16 + #include <asm/fpu.h> 17 + #include <asm/mmu_context.h> 18 + #include <asm/pm.h> 19 + #include <asm/watch.h> 20 + 21 + /* Used by PM helper macros in asm/pm.h */ 22 + struct mips_static_suspend_state mips_static_suspend_state; 23 + 24 + /** 25 + * mips_cpu_save() - Save general CPU state. 26 + * Ensures that general CPU context is saved, notably FPU and DSP. 27 + */ 28 + static int mips_cpu_save(void) 29 + { 30 + /* Save FPU state */ 31 + lose_fpu(1); 32 + 33 + /* Save DSP state */ 34 + save_dsp(current); 35 + 36 + return 0; 37 + } 38 + 39 + /** 40 + * mips_cpu_restore() - Restore general CPU state. 41 + * Restores important CPU context. 42 + */ 43 + static void mips_cpu_restore(void) 44 + { 45 + unsigned int cpu = smp_processor_id(); 46 + 47 + /* Restore ASID */ 48 + if (current->mm) 49 + write_c0_entryhi(cpu_asid(cpu, current->mm)); 50 + 51 + /* Restore DSP state */ 52 + restore_dsp(current); 53 + 54 + /* Restore UserLocal */ 55 + if (cpu_has_userlocal) 56 + write_c0_userlocal(current_thread_info()->tp_value); 57 + 58 + /* Restore watch registers */ 59 + __restore_watch(); 60 + } 61 + 62 + /** 63 + * mips_pm_notifier() - Notifier for preserving general CPU context. 64 + * @self: Notifier block. 65 + * @cmd: CPU PM event. 66 + * @v: Private data (unused). 67 + * 68 + * This is called when a CPU power management event occurs, and is used to 69 + * ensure that important CPU context is preserved across a CPU power down. 70 + */ 71 + static int mips_pm_notifier(struct notifier_block *self, unsigned long cmd, 72 + void *v) 73 + { 74 + int ret; 75 + 76 + switch (cmd) { 77 + case CPU_PM_ENTER: 78 + ret = mips_cpu_save(); 79 + if (ret) 80 + return NOTIFY_STOP; 81 + break; 82 + case CPU_PM_ENTER_FAILED: 83 + case CPU_PM_EXIT: 84 + mips_cpu_restore(); 85 + break; 86 + } 87 + 88 + return NOTIFY_OK; 89 + } 90 + 91 + static struct notifier_block mips_pm_notifier_block = { 92 + .notifier_call = mips_pm_notifier, 93 + }; 94 + 95 + static int __init mips_pm_init(void) 96 + { 97 + return cpu_pm_register_notifier(&mips_pm_notifier_block); 98 + } 99 + arch_initcall(mips_pm_init);
-7
arch/mips/kernel/process.c
··· 140 140 */ 141 141 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); 142 142 143 - #ifdef CONFIG_MIPS_MT_SMTC 144 - /* 145 - * SMTC restores TCStatus after Status, and the CU bits 146 - * are aliased there. 147 - */ 148 - childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1); 149 - #endif 150 143 clear_tsk_thread_flag(p, TIF_USEDFPU); 151 144 152 145 #ifdef CONFIG_MIPS_MT_FPAFF
+3 -33
arch/mips/kernel/r4k_switch.S
··· 28 28 */ 29 29 #define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS) 30 30 31 + #ifndef USE_ALTERNATE_RESUME_IMPL 31 32 /* 32 33 * task_struct *resume(task_struct *prev, task_struct *next, 33 34 * struct thread_info *next_ti, s32 fp_save) ··· 88 87 89 88 PTR_ADDU t0, $28, _THREAD_SIZE - 32 90 89 set_saved_sp t0, t1, t2 91 - #ifdef CONFIG_MIPS_MT_SMTC 92 - /* Read-modify-writes of Status must be atomic on a VPE */ 93 - mfc0 t2, CP0_TCSTATUS 94 - ori t1, t2, TCSTATUS_IXMT 95 - mtc0 t1, CP0_TCSTATUS 96 - andi t2, t2, TCSTATUS_IXMT 97 - _ehb 98 - DMT 8 # dmt t0 99 - move t1,ra 100 - jal mips_ihb 101 - move ra,t1 102 - #endif /* CONFIG_MIPS_MT_SMTC */ 103 90 mfc0 t1, CP0_STATUS /* Do we really need this? */ 104 91 li a3, 0xff01 105 92 and t1, a3 ··· 96 107 and a2, a3 97 108 or a2, t1 98 109 mtc0 a2, CP0_STATUS 99 - #ifdef CONFIG_MIPS_MT_SMTC 100 - _ehb 101 - andi t0, t0, VPECONTROL_TE 102 - beqz t0, 1f 103 - emt 104 - 1: 105 - mfc0 t1, CP0_TCSTATUS 106 - xori t1, t1, TCSTATUS_IXMT 107 - or t1, t1, t2 108 - mtc0 t1, CP0_TCSTATUS 109 - _ehb 110 - #endif /* CONFIG_MIPS_MT_SMTC */ 111 110 move v0, a0 112 111 jr ra 113 112 END(resume) 113 + 114 + #endif /* USE_ALTERNATE_RESUME_IMPL */ 114 115 115 116 /* 116 117 * Save a thread's fp context. ··· 155 176 #define FPU_DEFAULT 0x00000000 156 177 157 178 LEAF(_init_fpu) 158 - #ifdef CONFIG_MIPS_MT_SMTC 159 - /* Rather than manipulate per-VPE Status, set per-TC bit in TCStatus */ 160 - mfc0 t0, CP0_TCSTATUS 161 - /* Bit position is the same for Status, TCStatus */ 162 - li t1, ST0_CU1 163 - or t0, t1 164 - mtc0 t0, CP0_TCSTATUS 165 - #else /* Normal MIPS CU1 enable */ 166 179 mfc0 t0, CP0_STATUS 167 180 li t1, ST0_CU1 168 181 or t0, t1 169 182 mtc0 t0, CP0_STATUS 170 - #endif /* CONFIG_MIPS_MT_SMTC */ 171 183 enable_fpu_hazard 172 184 173 185 li t1, FPU_DEFAULT
-1
arch/mips/kernel/rtlx-mt.c
··· 36 36 unsigned long flags; 37 37 int i; 38 38 39 - /* Ought not to be strictly necessary for SMTC builds */ 40 39 local_irq_save(flags); 41 40 vpeflags = dvpe(); 42 41 set_c0_status(0x100 << MIPS_CPU_RTLX_IRQ);
-9
arch/mips/kernel/smp-bmips.c
··· 281 281 } 282 282 283 283 /* 284 - * Runs on CPU0 after all CPUs have been booted 285 - */ 286 - static void bmips_cpus_done(void) 287 - { 288 - } 289 - 290 - /* 291 284 * BMIPS5000 raceless IPIs 292 285 * 293 286 * Each CPU has two inbound SW IRQs which are independent of all other CPUs. ··· 427 434 .boot_secondary = bmips_boot_secondary, 428 435 .smp_finish = bmips_smp_finish, 429 436 .init_secondary = bmips_init_secondary, 430 - .cpus_done = bmips_cpus_done, 431 437 .send_ipi_single = bmips43xx_send_ipi_single, 432 438 .send_ipi_mask = bmips43xx_send_ipi_mask, 433 439 #ifdef CONFIG_HOTPLUG_CPU ··· 441 449 .boot_secondary = bmips_boot_secondary, 442 450 .smp_finish = bmips_smp_finish, 443 451 .init_secondary = bmips_init_secondary, 444 - .cpus_done = bmips_cpus_done, 445 452 .send_ipi_single = bmips5000_send_ipi_single, 446 453 .send_ipi_mask = bmips5000_send_ipi_mask, 447 454 #ifdef CONFIG_HOTPLUG_CPU
+1 -14
arch/mips/kernel/smp-cmp.c
··· 49 49 50 50 /* Enable per-cpu interrupts: platform specific */ 51 51 52 - #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) 52 + #ifdef CONFIG_MIPS_MT_SMP 53 53 if (cpu_has_mipsmt) 54 54 c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & 55 55 TCBIND_CURVPE; 56 - #endif 57 - #ifdef CONFIG_MIPS_MT_SMTC 58 - c->tc_id = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT; 59 56 #endif 60 57 } 61 58 ··· 70 73 #endif /* CONFIG_MIPS_MT_FPAFF */ 71 74 72 75 local_irq_enable(); 73 - } 74 - 75 - static void cmp_cpus_done(void) 76 - { 77 - pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__); 78 76 } 79 77 80 78 /* ··· 127 135 unsigned int mvpconf0 = read_c0_mvpconf0(); 128 136 129 137 nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; 130 - #elif defined(CONFIG_MIPS_MT_SMTC) 131 - unsigned int mvpconf0 = read_c0_mvpconf0(); 132 - 133 - nvpe = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; 134 138 #endif 135 139 smp_num_siblings = nvpe; 136 140 } ··· 153 165 .send_ipi_mask = gic_send_ipi_mask, 154 166 .init_secondary = cmp_init_secondary, 155 167 .smp_finish = cmp_smp_finish, 156 - .cpus_done = cmp_cpus_done, 157 168 .boot_secondary = cmp_boot_secondary, 158 169 .smp_setup = cmp_smp_setup, 159 170 .prepare_cpus = cmp_prepare_cpus,
+281 -150
arch/mips/kernel/smp-cps.c
··· 20 20 #include <asm/mips-cpc.h> 21 21 #include <asm/mips_mt.h> 22 22 #include <asm/mipsregs.h> 23 + #include <asm/pm-cps.h> 23 24 #include <asm/smp-cps.h> 24 25 #include <asm/time.h> 25 26 #include <asm/uasm.h> 26 27 27 28 static DECLARE_BITMAP(core_power, NR_CPUS); 28 29 29 - struct boot_config mips_cps_bootcfg; 30 + struct core_boot_config *mips_cps_core_bootcfg; 30 31 31 - static void init_core(void) 32 + static unsigned core_vpe_count(unsigned core) 32 33 { 33 - unsigned int nvpes, t; 34 - u32 mvpconf0, vpeconf0, vpecontrol, tcstatus, tcbind, status; 34 + unsigned cfg; 35 35 36 - if (!cpu_has_mipsmt) 37 - return; 36 + if (!config_enabled(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt) 37 + return 1; 38 38 39 - /* Enter VPE configuration state */ 40 - dvpe(); 41 - set_c0_mvpcontrol(MVPCONTROL_VPC); 42 - 43 - /* Retrieve the count of VPEs in this core */ 44 - mvpconf0 = read_c0_mvpconf0(); 45 - nvpes = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; 46 - smp_num_siblings = nvpes; 47 - 48 - for (t = 1; t < nvpes; t++) { 49 - /* Use a 1:1 mapping of TC index to VPE index */ 50 - settc(t); 51 - 52 - /* Bind 1 TC to this VPE */ 53 - tcbind = read_tc_c0_tcbind(); 54 - tcbind &= ~TCBIND_CURVPE; 55 - tcbind |= t << TCBIND_CURVPE_SHIFT; 56 - write_tc_c0_tcbind(tcbind); 57 - 58 - /* Set exclusive TC, non-active, master */ 59 - vpeconf0 = read_vpe_c0_vpeconf0(); 60 - vpeconf0 &= ~(VPECONF0_XTC | VPECONF0_VPA); 61 - vpeconf0 |= t << VPECONF0_XTC_SHIFT; 62 - vpeconf0 |= VPECONF0_MVP; 63 - write_vpe_c0_vpeconf0(vpeconf0); 64 - 65 - /* Declare TC non-active, non-allocatable & interrupt exempt */ 66 - tcstatus = read_tc_c0_tcstatus(); 67 - tcstatus &= ~(TCSTATUS_A | TCSTATUS_DA); 68 - tcstatus |= TCSTATUS_IXMT; 69 - write_tc_c0_tcstatus(tcstatus); 70 - 71 - /* Halt the TC */ 72 - write_tc_c0_tchalt(TCHALT_H); 73 - 74 - /* Allow only 1 TC to execute */ 75 - vpecontrol = read_vpe_c0_vpecontrol(); 76 - vpecontrol &= ~VPECONTROL_TE; 77 - write_vpe_c0_vpecontrol(vpecontrol); 78 - 79 - /* Copy (most of) Status from VPE 0 */ 80 - status = read_c0_status(); 81 - status &= ~(ST0_IM | ST0_IE | ST0_KSU); 82 - status |= ST0_CU0; 83 - write_vpe_c0_status(status); 84 - 85 - /* Copy Config from VPE 0 */ 86 - write_vpe_c0_config(read_c0_config()); 87 - write_vpe_c0_config7(read_c0_config7()); 88 - 89 - /* Ensure no software interrupts are pending */ 90 - write_vpe_c0_cause(0); 91 - 92 - /* Sync Count */ 93 - write_vpe_c0_count(read_c0_count()); 94 - } 95 - 96 - /* Leave VPE configuration state */ 97 - clear_c0_mvpcontrol(MVPCONTROL_VPC); 39 + write_gcr_cl_other(core << CM_GCR_Cx_OTHER_CORENUM_SHF); 40 + cfg = read_gcr_co_config() & CM_GCR_Cx_CONFIG_PVPE_MSK; 41 + return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1; 98 42 } 99 43 100 44 static void __init cps_smp_setup(void) 101 45 { 102 46 unsigned int ncores, nvpes, core_vpes; 103 47 int c, v; 104 - u32 core_cfg, *entry_code; 105 48 106 49 /* Detect & record VPE topology */ 107 50 ncores = mips_cm_numcores(); 108 51 pr_info("VPE topology "); 109 52 for (c = nvpes = 0; c < ncores; c++) { 110 - if (cpu_has_mipsmt && config_enabled(CONFIG_MIPS_MT_SMP)) { 111 - write_gcr_cl_other(c << CM_GCR_Cx_OTHER_CORENUM_SHF); 112 - core_cfg = read_gcr_co_config(); 113 - core_vpes = ((core_cfg & CM_GCR_Cx_CONFIG_PVPE_MSK) >> 114 - CM_GCR_Cx_CONFIG_PVPE_SHF) + 1; 115 - } else { 116 - core_vpes = 1; 117 - } 118 - 53 + core_vpes = core_vpe_count(c); 119 54 pr_cont("%c%u", c ? ',' : '{', core_vpes); 55 + 56 + /* Use the number of VPEs in core 0 for smp_num_siblings */ 57 + if (!c) 58 + smp_num_siblings = core_vpes; 120 59 121 60 for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) { 122 61 cpu_data[nvpes + v].core = c; ··· 76 137 __cpu_logical_map[v] = v; 77 138 } 78 139 140 + /* Set a coherent default CCA (CWB) */ 141 + change_c0_config(CONF_CM_CMASK, 0x5); 142 + 79 143 /* Core 0 is powered up (we're running on it) */ 80 144 bitmap_set(core_power, 0, 1); 81 145 82 - /* Disable MT - we only want to run 1 TC per VPE */ 83 - if (cpu_has_mipsmt) 84 - dmt(); 85 - 86 146 /* Initialise core 0 */ 87 - init_core(); 88 - 89 - /* Patch the start of mips_cps_core_entry to provide the CM base */ 90 - entry_code = (u32 *)&mips_cps_core_entry; 91 - UASM_i_LA(&entry_code, 3, (long)mips_cm_base); 147 + mips_cps_core_init(); 92 148 93 149 /* Make core 0 coherent with everything */ 94 150 write_gcr_cl_coherence(0xff); ··· 91 157 92 158 static void __init cps_prepare_cpus(unsigned int max_cpus) 93 159 { 160 + unsigned ncores, core_vpes, c, cca; 161 + bool cca_unsuitable; 162 + u32 *entry_code; 163 + 94 164 mips_mt_set_cpuoptions(); 165 + 166 + /* Detect whether the CCA is unsuited to multi-core SMP */ 167 + cca = read_c0_config() & CONF_CM_CMASK; 168 + switch (cca) { 169 + case 0x4: /* CWBE */ 170 + case 0x5: /* CWB */ 171 + /* The CCA is coherent, multi-core is fine */ 172 + cca_unsuitable = false; 173 + break; 174 + 175 + default: 176 + /* CCA is not coherent, multi-core is not usable */ 177 + cca_unsuitable = true; 178 + } 179 + 180 + /* Warn the user if the CCA prevents multi-core */ 181 + ncores = mips_cm_numcores(); 182 + if (cca_unsuitable && ncores > 1) { 183 + pr_warn("Using only one core due to unsuitable CCA 0x%x\n", 184 + cca); 185 + 186 + for_each_present_cpu(c) { 187 + if (cpu_data[c].core) 188 + set_cpu_present(c, false); 189 + } 190 + } 191 + 192 + /* 193 + * Patch the start of mips_cps_core_entry to provide: 194 + * 195 + * v0 = CM base address 196 + * s0 = kseg0 CCA 197 + */ 198 + entry_code = (u32 *)&mips_cps_core_entry; 199 + UASM_i_LA(&entry_code, 3, (long)mips_cm_base); 200 + uasm_i_addiu(&entry_code, 16, 0, cca); 201 + dma_cache_wback_inv((unsigned long)&mips_cps_core_entry, 202 + (void *)entry_code - (void *)&mips_cps_core_entry); 203 + 204 + /* Allocate core boot configuration structs */ 205 + mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg), 206 + GFP_KERNEL); 207 + if (!mips_cps_core_bootcfg) { 208 + pr_err("Failed to allocate boot config for %u cores\n", ncores); 209 + goto err_out; 210 + } 211 + 212 + /* Allocate VPE boot configuration structs */ 213 + for (c = 0; c < ncores; c++) { 214 + core_vpes = core_vpe_count(c); 215 + mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes, 216 + sizeof(*mips_cps_core_bootcfg[c].vpe_config), 217 + GFP_KERNEL); 218 + if (!mips_cps_core_bootcfg[c].vpe_config) { 219 + pr_err("Failed to allocate %u VPE boot configs\n", 220 + core_vpes); 221 + goto err_out; 222 + } 223 + } 224 + 225 + /* Mark this CPU as booted */ 226 + atomic_set(&mips_cps_core_bootcfg[current_cpu_data.core].vpe_mask, 227 + 1 << cpu_vpe_id(&current_cpu_data)); 228 + 229 + return; 230 + err_out: 231 + /* Clean up allocations */ 232 + if (mips_cps_core_bootcfg) { 233 + for (c = 0; c < ncores; c++) 234 + kfree(mips_cps_core_bootcfg[c].vpe_config); 235 + kfree(mips_cps_core_bootcfg); 236 + mips_cps_core_bootcfg = NULL; 237 + } 238 + 239 + /* Effectively disable SMP by declaring CPUs not present */ 240 + for_each_possible_cpu(c) { 241 + if (c == 0) 242 + continue; 243 + set_cpu_present(c, false); 244 + } 95 245 } 96 246 97 - static void boot_core(struct boot_config *cfg) 247 + static void boot_core(unsigned core) 98 248 { 99 249 u32 access; 100 250 101 251 /* Select the appropriate core */ 102 - write_gcr_cl_other(cfg->core << CM_GCR_Cx_OTHER_CORENUM_SHF); 252 + write_gcr_cl_other(core << CM_GCR_Cx_OTHER_CORENUM_SHF); 103 253 104 254 /* Set its reset vector */ 105 255 write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry)); ··· 193 175 194 176 /* Ensure the core can access the GCRs */ 195 177 access = read_gcr_access(); 196 - access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + cfg->core); 178 + access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + core); 197 179 write_gcr_access(access); 198 180 199 - /* Copy cfg */ 200 - mips_cps_bootcfg = *cfg; 201 - 202 181 if (mips_cpc_present()) { 203 - /* Select the appropriate core */ 204 - write_cpc_cl_other(cfg->core << CPC_Cx_OTHER_CORENUM_SHF); 205 - 206 182 /* Reset the core */ 183 + mips_cpc_lock_other(core); 207 184 write_cpc_co_cmd(CPC_Cx_CMD_RESET); 185 + mips_cpc_unlock_other(); 208 186 } else { 209 187 /* Take the core out of reset */ 210 188 write_gcr_co_reset_release(0); 211 189 } 212 190 213 191 /* The core is now powered up */ 214 - bitmap_set(core_power, cfg->core, 1); 192 + bitmap_set(core_power, core, 1); 215 193 } 216 194 217 - static void boot_vpe(void *info) 195 + static void remote_vpe_boot(void *dummy) 218 196 { 219 - struct boot_config *cfg = info; 220 - u32 tcstatus, vpeconf0; 221 - 222 - /* Enter VPE configuration state */ 223 - dvpe(); 224 - set_c0_mvpcontrol(MVPCONTROL_VPC); 225 - 226 - settc(cfg->vpe); 227 - 228 - /* Set the TC restart PC */ 229 - write_tc_c0_tcrestart((unsigned long)&smp_bootstrap); 230 - 231 - /* Activate the TC, allow interrupts */ 232 - tcstatus = read_tc_c0_tcstatus(); 233 - tcstatus &= ~TCSTATUS_IXMT; 234 - tcstatus |= TCSTATUS_A; 235 - write_tc_c0_tcstatus(tcstatus); 236 - 237 - /* Clear the TC halt bit */ 238 - write_tc_c0_tchalt(0); 239 - 240 - /* Activate the VPE */ 241 - vpeconf0 = read_vpe_c0_vpeconf0(); 242 - vpeconf0 |= VPECONF0_VPA; 243 - write_vpe_c0_vpeconf0(vpeconf0); 244 - 245 - /* Set the stack & global pointer registers */ 246 - write_tc_gpr_sp(cfg->sp); 247 - write_tc_gpr_gp(cfg->gp); 248 - 249 - /* Leave VPE configuration state */ 250 - clear_c0_mvpcontrol(MVPCONTROL_VPC); 251 - 252 - /* Enable other VPEs to execute */ 253 - evpe(EVPE_ENABLE); 197 + mips_cps_boot_vpes(); 254 198 } 255 199 256 200 static void cps_boot_secondary(int cpu, struct task_struct *idle) 257 201 { 258 - struct boot_config cfg; 202 + unsigned core = cpu_data[cpu].core; 203 + unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); 204 + struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core]; 205 + struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id]; 259 206 unsigned int remote; 260 207 int err; 261 208 262 - cfg.core = cpu_data[cpu].core; 263 - cfg.vpe = cpu_vpe_id(&cpu_data[cpu]); 264 - cfg.pc = (unsigned long)&smp_bootstrap; 265 - cfg.sp = __KSTK_TOS(idle); 266 - cfg.gp = (unsigned long)task_thread_info(idle); 209 + vpe_cfg->pc = (unsigned long)&smp_bootstrap; 210 + vpe_cfg->sp = __KSTK_TOS(idle); 211 + vpe_cfg->gp = (unsigned long)task_thread_info(idle); 267 212 268 - if (!test_bit(cfg.core, core_power)) { 213 + atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask); 214 + 215 + preempt_disable(); 216 + 217 + if (!test_bit(core, core_power)) { 269 218 /* Boot a VPE on a powered down core */ 270 - boot_core(&cfg); 271 - return; 219 + boot_core(core); 220 + goto out; 272 221 } 273 222 274 - if (cfg.core != current_cpu_data.core) { 223 + if (core != current_cpu_data.core) { 275 224 /* Boot a VPE on another powered up core */ 276 225 for (remote = 0; remote < NR_CPUS; remote++) { 277 - if (cpu_data[remote].core != cfg.core) 226 + if (cpu_data[remote].core != core) 278 227 continue; 279 228 if (cpu_online(remote)) 280 229 break; 281 230 } 282 231 BUG_ON(remote >= NR_CPUS); 283 232 284 - err = smp_call_function_single(remote, boot_vpe, &cfg, 1); 233 + err = smp_call_function_single(remote, remote_vpe_boot, 234 + NULL, 1); 285 235 if (err) 286 236 panic("Failed to call remote CPU\n"); 287 - return; 237 + goto out; 288 238 } 289 239 290 240 BUG_ON(!cpu_has_mipsmt); 291 241 292 242 /* Boot a VPE on this core */ 293 - boot_vpe(&cfg); 243 + mips_cps_boot_vpes(); 244 + out: 245 + preempt_enable(); 294 246 } 295 247 296 248 static void cps_init_secondary(void) ··· 268 280 /* Disable MT - we only want to run 1 TC per VPE */ 269 281 if (cpu_has_mipsmt) 270 282 dmt(); 271 - 272 - /* TODO: revisit this assumption once hotplug is implemented */ 273 - if (cpu_vpe_id(&current_cpu_data) == 0) 274 - init_core(); 275 283 276 284 change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | 277 285 STATUSF_IP6 | STATUSF_IP7); ··· 286 302 local_irq_enable(); 287 303 } 288 304 289 - static void cps_cpus_done(void) 305 + #ifdef CONFIG_HOTPLUG_CPU 306 + 307 + static int cps_cpu_disable(void) 290 308 { 309 + unsigned cpu = smp_processor_id(); 310 + struct core_boot_config *core_cfg; 311 + 312 + if (!cpu) 313 + return -EBUSY; 314 + 315 + if (!cps_pm_support_state(CPS_PM_POWER_GATED)) 316 + return -EINVAL; 317 + 318 + core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core]; 319 + atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask); 320 + smp_mb__after_atomic_dec(); 321 + set_cpu_online(cpu, false); 322 + cpu_clear(cpu, cpu_callin_map); 323 + 324 + return 0; 291 325 } 326 + 327 + static DECLARE_COMPLETION(cpu_death_chosen); 328 + static unsigned cpu_death_sibling; 329 + static enum { 330 + CPU_DEATH_HALT, 331 + CPU_DEATH_POWER, 332 + } cpu_death; 333 + 334 + void play_dead(void) 335 + { 336 + unsigned cpu, core; 337 + 338 + local_irq_disable(); 339 + idle_task_exit(); 340 + cpu = smp_processor_id(); 341 + cpu_death = CPU_DEATH_POWER; 342 + 343 + if (cpu_has_mipsmt) { 344 + core = cpu_data[cpu].core; 345 + 346 + /* Look for another online VPE within the core */ 347 + for_each_online_cpu(cpu_death_sibling) { 348 + if (cpu_data[cpu_death_sibling].core != core) 349 + continue; 350 + 351 + /* 352 + * There is an online VPE within the core. Just halt 353 + * this TC and leave the core alone. 354 + */ 355 + cpu_death = CPU_DEATH_HALT; 356 + break; 357 + } 358 + } 359 + 360 + /* This CPU has chosen its way out */ 361 + complete(&cpu_death_chosen); 362 + 363 + if (cpu_death == CPU_DEATH_HALT) { 364 + /* Halt this TC */ 365 + write_c0_tchalt(TCHALT_H); 366 + instruction_hazard(); 367 + } else { 368 + /* Power down the core */ 369 + cps_pm_enter_state(CPS_PM_POWER_GATED); 370 + } 371 + 372 + /* This should never be reached */ 373 + panic("Failed to offline CPU %u", cpu); 374 + } 375 + 376 + static void wait_for_sibling_halt(void *ptr_cpu) 377 + { 378 + unsigned cpu = (unsigned)ptr_cpu; 379 + unsigned vpe_id = cpu_data[cpu].vpe_id; 380 + unsigned halted; 381 + unsigned long flags; 382 + 383 + do { 384 + local_irq_save(flags); 385 + settc(vpe_id); 386 + halted = read_tc_c0_tchalt(); 387 + local_irq_restore(flags); 388 + } while (!(halted & TCHALT_H)); 389 + } 390 + 391 + static void cps_cpu_die(unsigned int cpu) 392 + { 393 + unsigned core = cpu_data[cpu].core; 394 + unsigned stat; 395 + int err; 396 + 397 + /* Wait for the cpu to choose its way out */ 398 + if (!wait_for_completion_timeout(&cpu_death_chosen, 399 + msecs_to_jiffies(5000))) { 400 + pr_err("CPU%u: didn't offline\n", cpu); 401 + return; 402 + } 403 + 404 + /* 405 + * Now wait for the CPU to actually offline. Without doing this that 406 + * offlining may race with one or more of: 407 + * 408 + * - Onlining the CPU again. 409 + * - Powering down the core if another VPE within it is offlined. 410 + * - A sibling VPE entering a non-coherent state. 411 + * 412 + * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing 413 + * with which we could race, so do nothing. 414 + */ 415 + if (cpu_death == CPU_DEATH_POWER) { 416 + /* 417 + * Wait for the core to enter a powered down or clock gated 418 + * state, the latter happening when a JTAG probe is connected 419 + * in which case the CPC will refuse to power down the core. 420 + */ 421 + do { 422 + mips_cpc_lock_other(core); 423 + stat = read_cpc_co_stat_conf(); 424 + stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK; 425 + mips_cpc_unlock_other(); 426 + } while (stat != CPC_Cx_STAT_CONF_SEQSTATE_D0 && 427 + stat != CPC_Cx_STAT_CONF_SEQSTATE_D2 && 428 + stat != CPC_Cx_STAT_CONF_SEQSTATE_U2); 429 + 430 + /* Indicate the core is powered off */ 431 + bitmap_clear(core_power, core, 1); 432 + } else if (cpu_has_mipsmt) { 433 + /* 434 + * Have a CPU with access to the offlined CPUs registers wait 435 + * for its TC to halt. 436 + */ 437 + err = smp_call_function_single(cpu_death_sibling, 438 + wait_for_sibling_halt, 439 + (void *)cpu, 1); 440 + if (err) 441 + panic("Failed to call remote sibling CPU\n"); 442 + } 443 + } 444 + 445 + #endif /* CONFIG_HOTPLUG_CPU */ 292 446 293 447 static struct plat_smp_ops cps_smp_ops = { 294 448 .smp_setup = cps_smp_setup, ··· 436 314 .smp_finish = cps_smp_finish, 437 315 .send_ipi_single = gic_send_ipi_single, 438 316 .send_ipi_mask = gic_send_ipi_mask, 439 - .cpus_done = cps_cpus_done, 317 + #ifdef CONFIG_HOTPLUG_CPU 318 + .cpu_disable = cps_cpu_disable, 319 + .cpu_die = cps_cpu_die, 320 + #endif 440 321 }; 322 + 323 + bool mips_cps_smp_in_use(void) 324 + { 325 + extern struct plat_smp_ops *mp_ops; 326 + return mp_ops == &cps_smp_ops; 327 + } 441 328 442 329 int register_cps_smp_ops(void) 443 330 {
+11
arch/mips/kernel/smp-gic.c
··· 15 15 #include <linux/printk.h> 16 16 17 17 #include <asm/gic.h> 18 + #include <asm/mips-cpc.h> 18 19 #include <asm/smp-ops.h> 19 20 20 21 void gic_send_ipi_single(int cpu, unsigned int action) 21 22 { 22 23 unsigned long flags; 23 24 unsigned int intr; 25 + unsigned int core = cpu_data[cpu].core; 24 26 25 27 pr_debug("CPU%d: %s cpu %d action %u status %08x\n", 26 28 smp_processor_id(), __func__, cpu, action, read_c0_status()); ··· 43 41 } 44 42 45 43 gic_send_ipi(intr); 44 + 45 + if (mips_cpc_present() && (core != current_cpu_data.core)) { 46 + while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) { 47 + mips_cpc_lock_other(core); 48 + write_cpc_co_cmd(CPC_Cx_CMD_PWRUP); 49 + mips_cpc_unlock_other(); 50 + } 51 + } 52 + 46 53 local_irq_restore(flags); 47 54 } 48 55
-5
arch/mips/kernel/smp-mt.c
··· 183 183 local_irq_enable(); 184 184 } 185 185 186 - static void vsmp_cpus_done(void) 187 - { 188 - } 189 - 190 186 /* 191 187 * Setup the PC, SP, and GP of a secondary processor and start it 192 188 * running! ··· 283 287 .send_ipi_mask = vsmp_send_ipi_mask, 284 288 .init_secondary = vsmp_init_secondary, 285 289 .smp_finish = vsmp_smp_finish, 286 - .cpus_done = vsmp_cpus_done, 287 290 .boot_secondary = vsmp_boot_secondary, 288 291 .smp_setup = vsmp_smp_setup, 289 292 .prepare_cpus = vsmp_prepare_cpus,
-6
arch/mips/kernel/smp-up.c
··· 36 36 { 37 37 } 38 38 39 - /* Hook for after all CPUs are online */ 40 - static void up_cpus_done(void) 41 - { 42 - } 43 - 44 39 /* 45 40 * Firmware CPU startup hook 46 41 */ ··· 68 73 .send_ipi_mask = up_send_ipi_mask, 69 74 .init_secondary = up_init_secondary, 70 75 .smp_finish = up_smp_finish, 71 - .cpus_done = up_cpus_done, 72 76 .boot_secondary = up_boot_secondary, 73 77 .smp_setup = up_smp_setup, 74 78 .prepare_cpus = up_prepare_cpus,
+47 -14
arch/mips/kernel/smp.c
··· 43 43 #include <asm/time.h> 44 44 #include <asm/setup.h> 45 45 46 - #ifdef CONFIG_MIPS_MT_SMTC 47 - #include <asm/mipsmtregs.h> 48 - #endif /* CONFIG_MIPS_MT_SMTC */ 49 - 50 46 volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ 51 47 52 48 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ ··· 61 65 62 66 /* representing cpus for which sibling maps can be computed */ 63 67 static cpumask_t cpu_sibling_setup_map; 68 + 69 + cpumask_t cpu_coherent_mask; 64 70 65 71 static inline void set_cpu_sibling_map(int cpu) 66 72 { ··· 100 102 { 101 103 unsigned int cpu; 102 104 103 - #ifdef CONFIG_MIPS_MT_SMTC 104 - /* Only do cpu_probe for first TC of CPU */ 105 - if ((read_c0_tcbind() & TCBIND_CURTC) != 0) 106 - __cpu_name[smp_processor_id()] = __cpu_name[0]; 107 - else 108 - #endif /* CONFIG_MIPS_MT_SMTC */ 109 105 cpu_probe(); 110 106 cpu_report(); 111 107 per_cpu_trap_init(false); ··· 116 124 cpu = smp_processor_id(); 117 125 cpu_data[cpu].udelay_val = loops_per_jiffy; 118 126 127 + cpu_set(cpu, cpu_coherent_mask); 119 128 notify_cpu_starting(cpu); 120 129 121 130 set_cpu_online(cpu, true); ··· 166 173 167 174 void __init smp_cpus_done(unsigned int max_cpus) 168 175 { 169 - mp_ops->cpus_done(); 170 176 } 171 177 172 178 /* called from main before smp_init() */ ··· 178 186 #ifndef CONFIG_HOTPLUG_CPU 179 187 init_cpu_present(cpu_possible_mask); 180 188 #endif 189 + cpumask_copy(&cpu_coherent_mask, cpu_possible_mask); 181 190 } 182 191 183 192 /* preload SMP state for boot cpu */ ··· 231 238 * o collapses to normal function call on UP kernels 232 239 * o collapses to normal function call on systems with a single shared 233 240 * primary cache. 234 - * o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core. 235 241 */ 236 242 static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) 237 243 { 238 - #ifndef CONFIG_MIPS_MT_SMTC 239 244 smp_call_function(func, info, 1); 240 - #endif 241 245 } 242 246 243 247 static inline void smp_on_each_tlb(void (*func) (void *info), void *info) ··· 394 404 } 395 405 EXPORT_SYMBOL(dump_send_ipi); 396 406 #endif 407 + 408 + #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 409 + 410 + static DEFINE_PER_CPU(atomic_t, tick_broadcast_count); 411 + static DEFINE_PER_CPU(struct call_single_data, tick_broadcast_csd); 412 + 413 + void tick_broadcast(const struct cpumask *mask) 414 + { 415 + atomic_t *count; 416 + struct call_single_data *csd; 417 + int cpu; 418 + 419 + for_each_cpu(cpu, mask) { 420 + count = &per_cpu(tick_broadcast_count, cpu); 421 + csd = &per_cpu(tick_broadcast_csd, cpu); 422 + 423 + if (atomic_inc_return(count) == 1) 424 + smp_call_function_single_async(cpu, csd); 425 + } 426 + } 427 + 428 + static void tick_broadcast_callee(void *info) 429 + { 430 + int cpu = smp_processor_id(); 431 + tick_receive_broadcast(); 432 + atomic_set(&per_cpu(tick_broadcast_count, cpu), 0); 433 + } 434 + 435 + static int __init tick_broadcast_init(void) 436 + { 437 + struct call_single_data *csd; 438 + int cpu; 439 + 440 + for (cpu = 0; cpu < NR_CPUS; cpu++) { 441 + csd = &per_cpu(tick_broadcast_csd, cpu); 442 + csd->func = tick_broadcast_callee; 443 + } 444 + 445 + return 0; 446 + } 447 + early_initcall(tick_broadcast_init); 448 + 449 + #endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */
-133
arch/mips/kernel/smtc-asm.S
··· 1 - /* 2 - * Assembly Language Functions for MIPS MT SMTC support 3 - */ 4 - 5 - /* 6 - * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set. */ 7 - 8 - #include <asm/regdef.h> 9 - #include <asm/asmmacro.h> 10 - #include <asm/stackframe.h> 11 - #include <asm/irqflags.h> 12 - 13 - /* 14 - * "Software Interrupt" linkage. 15 - * 16 - * This is invoked when an "Interrupt" is sent from one TC to another, 17 - * where the TC to be interrupted is halted, has it's Restart address 18 - * and Status values saved by the "remote control" thread, then modified 19 - * to cause execution to begin here, in kenel mode. This code then 20 - * disguises the TC state as that of an exception and transfers 21 - * control to the general exception or vectored interrupt handler. 22 - */ 23 - .set noreorder 24 - 25 - /* 26 - The __smtc_ipi_vector would use k0 and k1 as temporaries and 27 - 1) Set EXL (this is per-VPE, so this can't be done by proxy!) 28 - 2) Restore the K/CU and IXMT bits to the pre "exception" state 29 - (EXL means no interrupts and access to the kernel map). 30 - 3) Set EPC to be the saved value of TCRestart. 31 - 4) Jump to the exception handler entry point passed by the sender. 32 - 33 - CAN WE PROVE THAT WE WON'T DO THIS IF INTS DISABLED?? 34 - */ 35 - 36 - /* 37 - * Reviled and slandered vision: Set EXL and restore K/CU/IXMT 38 - * state of pre-halt thread, then save everything and call 39 - * thought some function pointer to imaginary_exception, which 40 - * will parse a register value or memory message queue to 41 - * deliver things like interprocessor interrupts. On return 42 - * from that function, jump to the global ret_from_irq code 43 - * to invoke the scheduler and return as appropriate. 44 - */ 45 - 46 - #define PT_PADSLOT4 (PT_R0-8) 47 - #define PT_PADSLOT5 (PT_R0-4) 48 - 49 - .text 50 - .align 5 51 - FEXPORT(__smtc_ipi_vector) 52 - #ifdef CONFIG_CPU_MICROMIPS 53 - nop 54 - #endif 55 - .set noat 56 - /* Disable thread scheduling to make Status update atomic */ 57 - DMT 27 # dmt k1 58 - _ehb 59 - /* Set EXL */ 60 - mfc0 k0,CP0_STATUS 61 - ori k0,k0,ST0_EXL 62 - mtc0 k0,CP0_STATUS 63 - _ehb 64 - /* Thread scheduling now inhibited by EXL. Restore TE state. */ 65 - andi k1,k1,VPECONTROL_TE 66 - beqz k1,1f 67 - emt 68 - 1: 69 - /* 70 - * The IPI sender has put some information on the anticipated 71 - * kernel stack frame. If we were in user mode, this will be 72 - * built above the saved kernel SP. If we were already in the 73 - * kernel, it will be built above the current CPU SP. 74 - * 75 - * Were we in kernel mode, as indicated by CU0? 76 - */ 77 - sll k1,k0,3 78 - .set noreorder 79 - bltz k1,2f 80 - move k1,sp 81 - .set reorder 82 - /* 83 - * If previously in user mode, set CU0 and use kernel stack. 84 - */ 85 - li k1,ST0_CU0 86 - or k1,k1,k0 87 - mtc0 k1,CP0_STATUS 88 - _ehb 89 - get_saved_sp 90 - /* Interrupting TC will have pre-set values in slots in the new frame */ 91 - 2: subu k1,k1,PT_SIZE 92 - /* Load TCStatus Value */ 93 - lw k0,PT_TCSTATUS(k1) 94 - /* Write it to TCStatus to restore CU/KSU/IXMT state */ 95 - mtc0 k0,$2,1 96 - _ehb 97 - lw k0,PT_EPC(k1) 98 - mtc0 k0,CP0_EPC 99 - /* Save all will redundantly recompute the SP, but use it for now */ 100 - SAVE_ALL 101 - CLI 102 - TRACE_IRQS_OFF 103 - /* Function to be invoked passed stack pad slot 5 */ 104 - lw t0,PT_PADSLOT5(sp) 105 - /* Argument from sender passed in stack pad slot 4 */ 106 - lw a0,PT_PADSLOT4(sp) 107 - LONG_L s0, TI_REGS($28) 108 - LONG_S sp, TI_REGS($28) 109 - PTR_LA ra, ret_from_irq 110 - jr t0 111 - 112 - /* 113 - * Called from idle loop to provoke processing of queued IPIs 114 - * First IPI message in queue passed as argument. 115 - */ 116 - 117 - LEAF(self_ipi) 118 - /* Before anything else, block interrupts */ 119 - mfc0 t0,CP0_TCSTATUS 120 - ori t1,t0,TCSTATUS_IXMT 121 - mtc0 t1,CP0_TCSTATUS 122 - _ehb 123 - /* We know we're in kernel mode, so prepare stack frame */ 124 - subu t1,sp,PT_SIZE 125 - sw ra,PT_EPC(t1) 126 - sw a0,PT_PADSLOT4(t1) 127 - la t2,ipi_decode 128 - sw t2,PT_PADSLOT5(t1) 129 - /* Save pre-disable value of TCStatus */ 130 - sw t0,PT_TCSTATUS(t1) 131 - j __smtc_ipi_vector 132 - nop 133 - END(self_ipi)
-102
arch/mips/kernel/smtc-proc.c
··· 1 - /* 2 - * /proc hooks for SMTC kernel 3 - * Copyright (C) 2005 Mips Technologies, Inc 4 - */ 5 - 6 - #include <linux/kernel.h> 7 - #include <linux/sched.h> 8 - #include <linux/cpumask.h> 9 - #include <linux/interrupt.h> 10 - 11 - #include <asm/cpu.h> 12 - #include <asm/processor.h> 13 - #include <linux/atomic.h> 14 - #include <asm/hardirq.h> 15 - #include <asm/mmu_context.h> 16 - #include <asm/mipsregs.h> 17 - #include <asm/cacheflush.h> 18 - #include <linux/proc_fs.h> 19 - #include <linux/seq_file.h> 20 - 21 - #include <asm/smtc_proc.h> 22 - 23 - /* 24 - * /proc diagnostic and statistics hooks 25 - */ 26 - 27 - /* 28 - * Statistics gathered 29 - */ 30 - unsigned long selfipis[NR_CPUS]; 31 - 32 - struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS]; 33 - 34 - atomic_t smtc_fpu_recoveries; 35 - 36 - static int smtc_proc_show(struct seq_file *m, void *v) 37 - { 38 - int i; 39 - extern unsigned long ebase; 40 - 41 - seq_printf(m, "SMTC Status Word: 0x%08x\n", smtc_status); 42 - seq_printf(m, "Config7: 0x%08x\n", read_c0_config7()); 43 - seq_printf(m, "EBASE: 0x%08lx\n", ebase); 44 - seq_printf(m, "Counter Interrupts taken per CPU (TC)\n"); 45 - for (i=0; i < NR_CPUS; i++) 46 - seq_printf(m, "%d: %ld\n", i, smtc_cpu_stats[i].timerints); 47 - seq_printf(m, "Self-IPIs by CPU:\n"); 48 - for(i = 0; i < NR_CPUS; i++) 49 - seq_printf(m, "%d: %ld\n", i, smtc_cpu_stats[i].selfipis); 50 - seq_printf(m, "%d Recoveries of \"stolen\" FPU\n", 51 - atomic_read(&smtc_fpu_recoveries)); 52 - return 0; 53 - } 54 - 55 - static int smtc_proc_open(struct inode *inode, struct file *file) 56 - { 57 - return single_open(file, smtc_proc_show, NULL); 58 - } 59 - 60 - static const struct file_operations smtc_proc_fops = { 61 - .open = smtc_proc_open, 62 - .read = seq_read, 63 - .llseek = seq_lseek, 64 - .release = single_release, 65 - }; 66 - 67 - void init_smtc_stats(void) 68 - { 69 - int i; 70 - 71 - for (i=0; i<NR_CPUS; i++) { 72 - smtc_cpu_stats[i].timerints = 0; 73 - smtc_cpu_stats[i].selfipis = 0; 74 - } 75 - 76 - atomic_set(&smtc_fpu_recoveries, 0); 77 - 78 - proc_create("smtc", 0444, NULL, &smtc_proc_fops); 79 - } 80 - 81 - static int proc_cpuinfo_chain_call(struct notifier_block *nfb, 82 - unsigned long action_unused, void *data) 83 - { 84 - struct proc_cpuinfo_notifier_args *pcn = data; 85 - struct seq_file *m = pcn->m; 86 - unsigned long n = pcn->n; 87 - 88 - if (!cpu_has_mipsmt) 89 - return NOTIFY_OK; 90 - 91 - seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id); 92 - seq_printf(m, "TC\t\t\t: %d\n", cpu_data[n].tc_id); 93 - 94 - return NOTIFY_OK; 95 - } 96 - 97 - static int __init proc_cpuinfo_notifier_init(void) 98 - { 99 - return proc_cpuinfo_notifier(proc_cpuinfo_chain_call, 0); 100 - } 101 - 102 - subsys_initcall(proc_cpuinfo_notifier_init);
-1528
arch/mips/kernel/smtc.c
··· 1 - /* 2 - * This program is free software; you can redistribute it and/or 3 - * modify it under the terms of the GNU General Public License 4 - * as published by the Free Software Foundation; either version 2 5 - * of the License, or (at your option) any later version. 6 - * 7 - * This program is distributed in the hope that it will be useful, 8 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 - * GNU General Public License for more details. 11 - * 12 - * You should have received a copy of the GNU General Public License 13 - * along with this program; if not, write to the Free Software 14 - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 15 - * 16 - * Copyright (C) 2004 Mips Technologies, Inc 17 - * Copyright (C) 2008 Kevin D. Kissell 18 - */ 19 - 20 - #include <linux/clockchips.h> 21 - #include <linux/kernel.h> 22 - #include <linux/sched.h> 23 - #include <linux/smp.h> 24 - #include <linux/cpumask.h> 25 - #include <linux/interrupt.h> 26 - #include <linux/kernel_stat.h> 27 - #include <linux/module.h> 28 - #include <linux/ftrace.h> 29 - #include <linux/slab.h> 30 - 31 - #include <asm/cpu.h> 32 - #include <asm/processor.h> 33 - #include <linux/atomic.h> 34 - #include <asm/hardirq.h> 35 - #include <asm/hazards.h> 36 - #include <asm/irq.h> 37 - #include <asm/idle.h> 38 - #include <asm/mmu_context.h> 39 - #include <asm/mipsregs.h> 40 - #include <asm/cacheflush.h> 41 - #include <asm/time.h> 42 - #include <asm/addrspace.h> 43 - #include <asm/smtc.h> 44 - #include <asm/smtc_proc.h> 45 - #include <asm/setup.h> 46 - 47 - /* 48 - * SMTC Kernel needs to manipulate low-level CPU interrupt mask 49 - * in do_IRQ. These are passed in setup_irq_smtc() and stored 50 - * in this table. 51 - */ 52 - unsigned long irq_hwmask[NR_IRQS]; 53 - 54 - #define LOCK_MT_PRA() \ 55 - local_irq_save(flags); \ 56 - mtflags = dmt() 57 - 58 - #define UNLOCK_MT_PRA() \ 59 - emt(mtflags); \ 60 - local_irq_restore(flags) 61 - 62 - #define LOCK_CORE_PRA() \ 63 - local_irq_save(flags); \ 64 - mtflags = dvpe() 65 - 66 - #define UNLOCK_CORE_PRA() \ 67 - evpe(mtflags); \ 68 - local_irq_restore(flags) 69 - 70 - /* 71 - * Data structures purely associated with SMTC parallelism 72 - */ 73 - 74 - 75 - /* 76 - * Table for tracking ASIDs whose lifetime is prolonged. 77 - */ 78 - 79 - asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; 80 - 81 - /* 82 - * Number of InterProcessor Interrupt (IPI) message buffers to allocate 83 - */ 84 - 85 - #define IPIBUF_PER_CPU 4 86 - 87 - struct smtc_ipi_q IPIQ[NR_CPUS]; 88 - static struct smtc_ipi_q freeIPIq; 89 - 90 - 91 - /* 92 - * Number of FPU contexts for each VPE 93 - */ 94 - 95 - static int smtc_nconf1[MAX_SMTC_VPES]; 96 - 97 - 98 - /* Forward declarations */ 99 - 100 - void ipi_decode(struct smtc_ipi *); 101 - static void post_direct_ipi(int cpu, struct smtc_ipi *pipi); 102 - static void setup_cross_vpe_interrupts(unsigned int nvpe); 103 - void init_smtc_stats(void); 104 - 105 - /* Global SMTC Status */ 106 - 107 - unsigned int smtc_status; 108 - 109 - /* Boot command line configuration overrides */ 110 - 111 - static int vpe0limit; 112 - static int ipibuffers; 113 - static int nostlb; 114 - static int asidmask; 115 - unsigned long smtc_asid_mask = 0xff; 116 - 117 - static int __init vpe0tcs(char *str) 118 - { 119 - get_option(&str, &vpe0limit); 120 - 121 - return 1; 122 - } 123 - 124 - static int __init ipibufs(char *str) 125 - { 126 - get_option(&str, &ipibuffers); 127 - return 1; 128 - } 129 - 130 - static int __init stlb_disable(char *s) 131 - { 132 - nostlb = 1; 133 - return 1; 134 - } 135 - 136 - static int __init asidmask_set(char *str) 137 - { 138 - get_option(&str, &asidmask); 139 - switch (asidmask) { 140 - case 0x1: 141 - case 0x3: 142 - case 0x7: 143 - case 0xf: 144 - case 0x1f: 145 - case 0x3f: 146 - case 0x7f: 147 - case 0xff: 148 - smtc_asid_mask = (unsigned long)asidmask; 149 - break; 150 - default: 151 - printk("ILLEGAL ASID mask 0x%x from command line\n", asidmask); 152 - } 153 - return 1; 154 - } 155 - 156 - __setup("vpe0tcs=", vpe0tcs); 157 - __setup("ipibufs=", ipibufs); 158 - __setup("nostlb", stlb_disable); 159 - __setup("asidmask=", asidmask_set); 160 - 161 - #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG 162 - 163 - static int hang_trig; 164 - 165 - static int __init hangtrig_enable(char *s) 166 - { 167 - hang_trig = 1; 168 - return 1; 169 - } 170 - 171 - 172 - __setup("hangtrig", hangtrig_enable); 173 - 174 - #define DEFAULT_BLOCKED_IPI_LIMIT 32 175 - 176 - static int timerq_limit = DEFAULT_BLOCKED_IPI_LIMIT; 177 - 178 - static int __init tintq(char *str) 179 - { 180 - get_option(&str, &timerq_limit); 181 - return 1; 182 - } 183 - 184 - __setup("tintq=", tintq); 185 - 186 - static int imstuckcount[MAX_SMTC_VPES][8]; 187 - /* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */ 188 - static int vpemask[MAX_SMTC_VPES][8] = { 189 - {0, 0, 1, 0, 0, 0, 0, 1}, 190 - {0, 0, 0, 0, 0, 0, 0, 1} 191 - }; 192 - int tcnoprog[NR_CPUS]; 193 - static atomic_t idle_hook_initialized = ATOMIC_INIT(0); 194 - static int clock_hang_reported[NR_CPUS]; 195 - 196 - #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ 197 - 198 - /* 199 - * Configure shared TLB - VPC configuration bit must be set by caller 200 - */ 201 - 202 - static void smtc_configure_tlb(void) 203 - { 204 - int i, tlbsiz, vpes; 205 - unsigned long mvpconf0; 206 - unsigned long config1val; 207 - 208 - /* Set up ASID preservation table */ 209 - for (vpes=0; vpes<MAX_SMTC_TLBS; vpes++) { 210 - for(i = 0; i < MAX_SMTC_ASIDS; i++) { 211 - smtc_live_asid[vpes][i] = 0; 212 - } 213 - } 214 - mvpconf0 = read_c0_mvpconf0(); 215 - 216 - if ((vpes = ((mvpconf0 & MVPCONF0_PVPE) 217 - >> MVPCONF0_PVPE_SHIFT) + 1) > 1) { 218 - /* If we have multiple VPEs, try to share the TLB */ 219 - if ((mvpconf0 & MVPCONF0_TLBS) && !nostlb) { 220 - /* 221 - * If TLB sizing is programmable, shared TLB 222 - * size is the total available complement. 223 - * Otherwise, we have to take the sum of all 224 - * static VPE TLB entries. 225 - */ 226 - if ((tlbsiz = ((mvpconf0 & MVPCONF0_PTLBE) 227 - >> MVPCONF0_PTLBE_SHIFT)) == 0) { 228 - /* 229 - * If there's more than one VPE, there had better 230 - * be more than one TC, because we need one to bind 231 - * to each VPE in turn to be able to read 232 - * its configuration state! 233 - */ 234 - settc(1); 235 - /* Stop the TC from doing anything foolish */ 236 - write_tc_c0_tchalt(TCHALT_H); 237 - mips_ihb(); 238 - /* No need to un-Halt - that happens later anyway */ 239 - for (i=0; i < vpes; i++) { 240 - write_tc_c0_tcbind(i); 241 - /* 242 - * To be 100% sure we're really getting the right 243 - * information, we exit the configuration state 244 - * and do an IHB after each rebinding. 245 - */ 246 - write_c0_mvpcontrol( 247 - read_c0_mvpcontrol() & ~ MVPCONTROL_VPC ); 248 - mips_ihb(); 249 - /* 250 - * Only count if the MMU Type indicated is TLB 251 - */ 252 - if (((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) { 253 - config1val = read_vpe_c0_config1(); 254 - tlbsiz += ((config1val >> 25) & 0x3f) + 1; 255 - } 256 - 257 - /* Put core back in configuration state */ 258 - write_c0_mvpcontrol( 259 - read_c0_mvpcontrol() | MVPCONTROL_VPC ); 260 - mips_ihb(); 261 - } 262 - } 263 - write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB); 264 - ehb(); 265 - 266 - /* 267 - * Setup kernel data structures to use software total, 268 - * rather than read the per-VPE Config1 value. The values 269 - * for "CPU 0" gets copied to all the other CPUs as part 270 - * of their initialization in smtc_cpu_setup(). 271 - */ 272 - 273 - /* MIPS32 limits TLB indices to 64 */ 274 - if (tlbsiz > 64) 275 - tlbsiz = 64; 276 - cpu_data[0].tlbsize = current_cpu_data.tlbsize = tlbsiz; 277 - smtc_status |= SMTC_TLB_SHARED; 278 - local_flush_tlb_all(); 279 - 280 - printk("TLB of %d entry pairs shared by %d VPEs\n", 281 - tlbsiz, vpes); 282 - } else { 283 - printk("WARNING: TLB Not Sharable on SMTC Boot!\n"); 284 - } 285 - } 286 - } 287 - 288 - 289 - /* 290 - * Incrementally build the CPU map out of constituent MIPS MT cores, 291 - * using the specified available VPEs and TCs. Plaform code needs 292 - * to ensure that each MIPS MT core invokes this routine on reset, 293 - * one at a time(!). 294 - * 295 - * This version of the build_cpu_map and prepare_cpus routines assumes 296 - * that *all* TCs of a MIPS MT core will be used for Linux, and that 297 - * they will be spread across *all* available VPEs (to minimise the 298 - * loss of efficiency due to exception service serialization). 299 - * An improved version would pick up configuration information and 300 - * possibly leave some TCs/VPEs as "slave" processors. 301 - * 302 - * Use c0_MVPConf0 to find out how many TCs are available, setting up 303 - * cpu_possible_mask and the logical/physical mappings. 304 - */ 305 - 306 - int __init smtc_build_cpu_map(int start_cpu_slot) 307 - { 308 - int i, ntcs; 309 - 310 - /* 311 - * The CPU map isn't actually used for anything at this point, 312 - * so it's not clear what else we should do apart from set 313 - * everything up so that "logical" = "physical". 314 - */ 315 - ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; 316 - for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) { 317 - set_cpu_possible(i, true); 318 - __cpu_number_map[i] = i; 319 - __cpu_logical_map[i] = i; 320 - } 321 - #ifdef CONFIG_MIPS_MT_FPAFF 322 - /* Initialize map of CPUs with FPUs */ 323 - cpus_clear(mt_fpu_cpumask); 324 - #endif 325 - 326 - /* One of those TC's is the one booting, and not a secondary... */ 327 - printk("%i available secondary CPU TC(s)\n", i - 1); 328 - 329 - return i; 330 - } 331 - 332 - /* 333 - * Common setup before any secondaries are started 334 - * Make sure all CPUs are in a sensible state before we boot any of the 335 - * secondaries. 336 - * 337 - * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly 338 - * as possible across the available VPEs. 339 - */ 340 - 341 - static void smtc_tc_setup(int vpe, int tc, int cpu) 342 - { 343 - static int cp1contexts[MAX_SMTC_VPES]; 344 - 345 - /* 346 - * Make a local copy of the available FPU contexts in order 347 - * to keep track of TCs that can have one. 348 - */ 349 - if (tc == 1) 350 - { 351 - /* 352 - * FIXME: Multi-core SMTC hasn't been tested and the 353 - * maximum number of VPEs may change. 354 - */ 355 - cp1contexts[0] = smtc_nconf1[0] - 1; 356 - cp1contexts[1] = smtc_nconf1[1]; 357 - } 358 - 359 - settc(tc); 360 - write_tc_c0_tchalt(TCHALT_H); 361 - mips_ihb(); 362 - write_tc_c0_tcstatus((read_tc_c0_tcstatus() 363 - & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT)) 364 - | TCSTATUS_A); 365 - /* 366 - * TCContext gets an offset from the base of the IPIQ array 367 - * to be used in low-level code to detect the presence of 368 - * an active IPI queue. 369 - */ 370 - write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16); 371 - 372 - /* Bind TC to VPE. */ 373 - write_tc_c0_tcbind(vpe); 374 - 375 - /* In general, all TCs should have the same cpu_data indications. */ 376 - memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips)); 377 - 378 - /* Check to see if there is a FPU context available for this TC. */ 379 - if (!cp1contexts[vpe]) 380 - cpu_data[cpu].options &= ~MIPS_CPU_FPU; 381 - else 382 - cp1contexts[vpe]--; 383 - 384 - /* Store the TC and VPE into the cpu_data structure. */ 385 - cpu_data[cpu].vpe_id = vpe; 386 - cpu_data[cpu].tc_id = tc; 387 - 388 - /* FIXME: Multi-core SMTC hasn't been tested, but be prepared. */ 389 - cpu_data[cpu].core = (read_vpe_c0_ebase() >> 1) & 0xff; 390 - } 391 - 392 - /* 393 - * Tweak to get Count registers synced as closely as possible. The 394 - * value seems good for 34K-class cores. 395 - */ 396 - 397 - #define CP0_SKEW 8 398 - 399 - void smtc_prepare_cpus(int cpus) 400 - { 401 - int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu; 402 - unsigned long flags; 403 - unsigned long val; 404 - int nipi; 405 - struct smtc_ipi *pipi; 406 - 407 - /* disable interrupts so we can disable MT */ 408 - local_irq_save(flags); 409 - /* disable MT so we can configure */ 410 - dvpe(); 411 - dmt(); 412 - 413 - spin_lock_init(&freeIPIq.lock); 414 - 415 - /* 416 - * We probably don't have as many VPEs as we do SMP "CPUs", 417 - * but it's possible - and in any case we'll never use more! 418 - */ 419 - for (i=0; i<NR_CPUS; i++) { 420 - IPIQ[i].head = IPIQ[i].tail = NULL; 421 - spin_lock_init(&IPIQ[i].lock); 422 - IPIQ[i].depth = 0; 423 - IPIQ[i].resched_flag = 0; /* No reschedules queued initially */ 424 - } 425 - 426 - /* cpu_data index starts at zero */ 427 - cpu = 0; 428 - cpu_data[cpu].vpe_id = 0; 429 - cpu_data[cpu].tc_id = 0; 430 - cpu_data[cpu].core = (read_c0_ebase() >> 1) & 0xff; 431 - cpu++; 432 - 433 - /* Report on boot-time options */ 434 - mips_mt_set_cpuoptions(); 435 - if (vpelimit > 0) 436 - printk("Limit of %d VPEs set\n", vpelimit); 437 - if (tclimit > 0) 438 - printk("Limit of %d TCs set\n", tclimit); 439 - if (nostlb) { 440 - printk("Shared TLB Use Inhibited - UNSAFE for Multi-VPE Operation\n"); 441 - } 442 - if (asidmask) 443 - printk("ASID mask value override to 0x%x\n", asidmask); 444 - 445 - /* Temporary */ 446 - #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG 447 - if (hang_trig) 448 - printk("Logic Analyser Trigger on suspected TC hang\n"); 449 - #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ 450 - 451 - /* Put MVPE's into 'configuration state' */ 452 - write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC ); 453 - 454 - val = read_c0_mvpconf0(); 455 - nvpe = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; 456 - if (vpelimit > 0 && nvpe > vpelimit) 457 - nvpe = vpelimit; 458 - ntc = ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; 459 - if (ntc > NR_CPUS) 460 - ntc = NR_CPUS; 461 - if (tclimit > 0 && ntc > tclimit) 462 - ntc = tclimit; 463 - slop = ntc % nvpe; 464 - for (i = 0; i < nvpe; i++) { 465 - tcpervpe[i] = ntc / nvpe; 466 - if (slop) { 467 - if((slop - i) > 0) tcpervpe[i]++; 468 - } 469 - } 470 - /* Handle command line override for VPE0 */ 471 - if (vpe0limit > ntc) vpe0limit = ntc; 472 - if (vpe0limit > 0) { 473 - int slopslop; 474 - if (vpe0limit < tcpervpe[0]) { 475 - /* Reducing TC count - distribute to others */ 476 - slop = tcpervpe[0] - vpe0limit; 477 - slopslop = slop % (nvpe - 1); 478 - tcpervpe[0] = vpe0limit; 479 - for (i = 1; i < nvpe; i++) { 480 - tcpervpe[i] += slop / (nvpe - 1); 481 - if(slopslop && ((slopslop - (i - 1) > 0))) 482 - tcpervpe[i]++; 483 - } 484 - } else if (vpe0limit > tcpervpe[0]) { 485 - /* Increasing TC count - steal from others */ 486 - slop = vpe0limit - tcpervpe[0]; 487 - slopslop = slop % (nvpe - 1); 488 - tcpervpe[0] = vpe0limit; 489 - for (i = 1; i < nvpe; i++) { 490 - tcpervpe[i] -= slop / (nvpe - 1); 491 - if(slopslop && ((slopslop - (i - 1) > 0))) 492 - tcpervpe[i]--; 493 - } 494 - } 495 - } 496 - 497 - /* Set up shared TLB */ 498 - smtc_configure_tlb(); 499 - 500 - for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) { 501 - /* Get number of CP1 contexts for each VPE. */ 502 - if (tc == 0) 503 - { 504 - /* 505 - * Do not call settc() for TC0 or the FPU context 506 - * value will be incorrect. Besides, we know that 507 - * we are TC0 anyway. 508 - */ 509 - smtc_nconf1[0] = ((read_vpe_c0_vpeconf1() & 510 - VPECONF1_NCP1) >> VPECONF1_NCP1_SHIFT); 511 - if (nvpe == 2) 512 - { 513 - settc(1); 514 - smtc_nconf1[1] = ((read_vpe_c0_vpeconf1() & 515 - VPECONF1_NCP1) >> VPECONF1_NCP1_SHIFT); 516 - settc(0); 517 - } 518 - } 519 - if (tcpervpe[vpe] == 0) 520 - continue; 521 - if (vpe != 0) 522 - printk(", "); 523 - printk("VPE %d: TC", vpe); 524 - for (i = 0; i < tcpervpe[vpe]; i++) { 525 - /* 526 - * TC 0 is bound to VPE 0 at reset, 527 - * and is presumably executing this 528 - * code. Leave it alone! 529 - */ 530 - if (tc != 0) { 531 - smtc_tc_setup(vpe, tc, cpu); 532 - if (vpe != 0) { 533 - /* 534 - * Set MVP bit (possibly again). Do it 535 - * here to catch CPUs that have no TCs 536 - * bound to the VPE at reset. In that 537 - * case, a TC must be bound to the VPE 538 - * before we can set VPEControl[MVP] 539 - */ 540 - write_vpe_c0_vpeconf0( 541 - read_vpe_c0_vpeconf0() | 542 - VPECONF0_MVP); 543 - } 544 - cpu++; 545 - } 546 - printk(" %d", tc); 547 - tc++; 548 - } 549 - if (vpe != 0) { 550 - /* 551 - * Allow this VPE to control others. 552 - */ 553 - write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | 554 - VPECONF0_MVP); 555 - 556 - /* 557 - * Clear any stale software interrupts from VPE's Cause 558 - */ 559 - write_vpe_c0_cause(0); 560 - 561 - /* 562 - * Clear ERL/EXL of VPEs other than 0 563 - * and set restricted interrupt enable/mask. 564 - */ 565 - write_vpe_c0_status((read_vpe_c0_status() 566 - & ~(ST0_BEV | ST0_ERL | ST0_EXL | ST0_IM)) 567 - | (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7 568 - | ST0_IE)); 569 - /* 570 - * set config to be the same as vpe0, 571 - * particularly kseg0 coherency alg 572 - */ 573 - write_vpe_c0_config(read_c0_config()); 574 - /* Clear any pending timer interrupt */ 575 - write_vpe_c0_compare(0); 576 - /* Propagate Config7 */ 577 - write_vpe_c0_config7(read_c0_config7()); 578 - write_vpe_c0_count(read_c0_count() + CP0_SKEW); 579 - ehb(); 580 - } 581 - /* enable multi-threading within VPE */ 582 - write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE); 583 - /* enable the VPE */ 584 - write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); 585 - } 586 - 587 - /* 588 - * Pull any physically present but unused TCs out of circulation. 589 - */ 590 - while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) { 591 - set_cpu_possible(tc, false); 592 - set_cpu_present(tc, false); 593 - tc++; 594 - } 595 - 596 - /* release config state */ 597 - write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC ); 598 - 599 - printk("\n"); 600 - 601 - /* Set up coprocessor affinity CPU mask(s) */ 602 - 603 - #ifdef CONFIG_MIPS_MT_FPAFF 604 - for (tc = 0; tc < ntc; tc++) { 605 - if (cpu_data[tc].options & MIPS_CPU_FPU) 606 - cpu_set(tc, mt_fpu_cpumask); 607 - } 608 - #endif 609 - 610 - /* set up ipi interrupts... */ 611 - 612 - /* If we have multiple VPEs running, set up the cross-VPE interrupt */ 613 - 614 - setup_cross_vpe_interrupts(nvpe); 615 - 616 - /* Set up queue of free IPI "messages". */ 617 - nipi = NR_CPUS * IPIBUF_PER_CPU; 618 - if (ipibuffers > 0) 619 - nipi = ipibuffers; 620 - 621 - pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL); 622 - if (pipi == NULL) 623 - panic("kmalloc of IPI message buffers failed"); 624 - else 625 - printk("IPI buffer pool of %d buffers\n", nipi); 626 - for (i = 0; i < nipi; i++) { 627 - smtc_ipi_nq(&freeIPIq, pipi); 628 - pipi++; 629 - } 630 - 631 - /* Arm multithreading and enable other VPEs - but all TCs are Halted */ 632 - emt(EMT_ENABLE); 633 - evpe(EVPE_ENABLE); 634 - local_irq_restore(flags); 635 - /* Initialize SMTC /proc statistics/diagnostics */ 636 - init_smtc_stats(); 637 - } 638 - 639 - 640 - /* 641 - * Setup the PC, SP, and GP of a secondary processor and start it 642 - * running! 643 - * smp_bootstrap is the place to resume from 644 - * __KSTK_TOS(idle) is apparently the stack pointer 645 - * (unsigned long)idle->thread_info the gp 646 - * 647 - */ 648 - void smtc_boot_secondary(int cpu, struct task_struct *idle) 649 - { 650 - extern u32 kernelsp[NR_CPUS]; 651 - unsigned long flags; 652 - int mtflags; 653 - 654 - LOCK_MT_PRA(); 655 - if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { 656 - dvpe(); 657 - } 658 - settc(cpu_data[cpu].tc_id); 659 - 660 - /* pc */ 661 - write_tc_c0_tcrestart((unsigned long)&smp_bootstrap); 662 - 663 - /* stack pointer */ 664 - kernelsp[cpu] = __KSTK_TOS(idle); 665 - write_tc_gpr_sp(__KSTK_TOS(idle)); 666 - 667 - /* global pointer */ 668 - write_tc_gpr_gp((unsigned long)task_thread_info(idle)); 669 - 670 - smtc_status |= SMTC_MTC_ACTIVE; 671 - write_tc_c0_tchalt(0); 672 - if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { 673 - evpe(EVPE_ENABLE); 674 - } 675 - UNLOCK_MT_PRA(); 676 - } 677 - 678 - void smtc_init_secondary(void) 679 - { 680 - } 681 - 682 - void smtc_smp_finish(void) 683 - { 684 - int cpu = smp_processor_id(); 685 - 686 - /* 687 - * Lowest-numbered CPU per VPE starts a clock tick. 688 - * Like per_cpu_trap_init() hack, this assumes that 689 - * SMTC init code assigns TCs consdecutively and 690 - * in ascending order across available VPEs. 691 - */ 692 - if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id)) 693 - write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ); 694 - 695 - local_irq_enable(); 696 - 697 - printk("TC %d going on-line as CPU %d\n", 698 - cpu_data[smp_processor_id()].tc_id, smp_processor_id()); 699 - } 700 - 701 - void smtc_cpus_done(void) 702 - { 703 - } 704 - 705 - /* 706 - * Support for SMTC-optimized driver IRQ registration 707 - */ 708 - 709 - /* 710 - * SMTC Kernel needs to manipulate low-level CPU interrupt mask 711 - * in do_IRQ. These are passed in setup_irq_smtc() and stored 712 - * in this table. 713 - */ 714 - 715 - int setup_irq_smtc(unsigned int irq, struct irqaction * new, 716 - unsigned long hwmask) 717 - { 718 - #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG 719 - unsigned int vpe = current_cpu_data.vpe_id; 720 - 721 - vpemask[vpe][irq - MIPS_CPU_IRQ_BASE] = 1; 722 - #endif 723 - irq_hwmask[irq] = hwmask; 724 - 725 - return setup_irq(irq, new); 726 - } 727 - 728 - #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 729 - /* 730 - * Support for IRQ affinity to TCs 731 - */ 732 - 733 - void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity) 734 - { 735 - /* 736 - * If a "fast path" cache of quickly decodable affinity state 737 - * is maintained, this is where it gets done, on a call up 738 - * from the platform affinity code. 739 - */ 740 - } 741 - 742 - void smtc_forward_irq(struct irq_data *d) 743 - { 744 - unsigned int irq = d->irq; 745 - int target; 746 - 747 - /* 748 - * OK wise guy, now figure out how to get the IRQ 749 - * to be serviced on an authorized "CPU". 750 - * 751 - * Ideally, to handle the situation where an IRQ has multiple 752 - * eligible CPUS, we would maintain state per IRQ that would 753 - * allow a fair distribution of service requests. Since the 754 - * expected use model is any-or-only-one, for simplicity 755 - * and efficiency, we just pick the easiest one to find. 756 - */ 757 - 758 - target = cpumask_first(d->affinity); 759 - 760 - /* 761 - * We depend on the platform code to have correctly processed 762 - * IRQ affinity change requests to ensure that the IRQ affinity 763 - * mask has been purged of bits corresponding to nonexistent and 764 - * offline "CPUs", and to TCs bound to VPEs other than the VPE 765 - * connected to the physical interrupt input for the interrupt 766 - * in question. Otherwise we have a nasty problem with interrupt 767 - * mask management. This is best handled in non-performance-critical 768 - * platform IRQ affinity setting code, to minimize interrupt-time 769 - * checks. 770 - */ 771 - 772 - /* If no one is eligible, service locally */ 773 - if (target >= NR_CPUS) 774 - do_IRQ_no_affinity(irq); 775 - else 776 - smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq); 777 - } 778 - 779 - #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ 780 - 781 - /* 782 - * IPI model for SMTC is tricky, because interrupts aren't TC-specific. 783 - * Within a VPE one TC can interrupt another by different approaches. 784 - * The easiest to get right would probably be to make all TCs except 785 - * the target IXMT and set a software interrupt, but an IXMT-based 786 - * scheme requires that a handler must run before a new IPI could 787 - * be sent, which would break the "broadcast" loops in MIPS MT. 788 - * A more gonzo approach within a VPE is to halt the TC, extract 789 - * its Restart, Status, and a couple of GPRs, and program the Restart 790 - * address to emulate an interrupt. 791 - * 792 - * Within a VPE, one can be confident that the target TC isn't in 793 - * a critical EXL state when halted, since the write to the Halt 794 - * register could not have issued on the writing thread if the 795 - * halting thread had EXL set. So k0 and k1 of the target TC 796 - * can be used by the injection code. Across VPEs, one can't 797 - * be certain that the target TC isn't in a critical exception 798 - * state. So we try a two-step process of sending a software 799 - * interrupt to the target VPE, which either handles the event 800 - * itself (if it was the target) or injects the event within 801 - * the VPE. 802 - */ 803 - 804 - static void smtc_ipi_qdump(void) 805 - { 806 - int i; 807 - struct smtc_ipi *temp; 808 - 809 - for (i = 0; i < NR_CPUS ;i++) { 810 - pr_info("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n", 811 - i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail, 812 - IPIQ[i].depth); 813 - temp = IPIQ[i].head; 814 - 815 - while (temp != IPIQ[i].tail) { 816 - pr_debug("%d %d %d: ", temp->type, temp->dest, 817 - (int)temp->arg); 818 - #ifdef SMTC_IPI_DEBUG 819 - pr_debug("%u %lu\n", temp->sender, temp->stamp); 820 - #else 821 - pr_debug("\n"); 822 - #endif 823 - temp = temp->flink; 824 - } 825 - } 826 - } 827 - 828 - /* 829 - * The standard atomic.h primitives don't quite do what we want 830 - * here: We need an atomic add-and-return-previous-value (which 831 - * could be done with atomic_add_return and a decrement) and an 832 - * atomic set/zero-and-return-previous-value (which can't really 833 - * be done with the atomic.h primitives). And since this is 834 - * MIPS MT, we can assume that we have LL/SC. 835 - */ 836 - static inline int atomic_postincrement(atomic_t *v) 837 - { 838 - unsigned long result; 839 - 840 - unsigned long temp; 841 - 842 - __asm__ __volatile__( 843 - "1: ll %0, %2 \n" 844 - " addu %1, %0, 1 \n" 845 - " sc %1, %2 \n" 846 - " beqz %1, 1b \n" 847 - __WEAK_LLSC_MB 848 - : "=&r" (result), "=&r" (temp), "=m" (v->counter) 849 - : "m" (v->counter) 850 - : "memory"); 851 - 852 - return result; 853 - } 854 - 855 - void smtc_send_ipi(int cpu, int type, unsigned int action) 856 - { 857 - int tcstatus; 858 - struct smtc_ipi *pipi; 859 - unsigned long flags; 860 - int mtflags; 861 - unsigned long tcrestart; 862 - int set_resched_flag = (type == LINUX_SMP_IPI && 863 - action == SMP_RESCHEDULE_YOURSELF); 864 - 865 - if (cpu == smp_processor_id()) { 866 - printk("Cannot Send IPI to self!\n"); 867 - return; 868 - } 869 - if (set_resched_flag && IPIQ[cpu].resched_flag != 0) 870 - return; /* There is a reschedule queued already */ 871 - 872 - /* Set up a descriptor, to be delivered either promptly or queued */ 873 - pipi = smtc_ipi_dq(&freeIPIq); 874 - if (pipi == NULL) { 875 - bust_spinlocks(1); 876 - mips_mt_regdump(dvpe()); 877 - panic("IPI Msg. Buffers Depleted"); 878 - } 879 - pipi->type = type; 880 - pipi->arg = (void *)action; 881 - pipi->dest = cpu; 882 - if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { 883 - /* If not on same VPE, enqueue and send cross-VPE interrupt */ 884 - IPIQ[cpu].resched_flag |= set_resched_flag; 885 - smtc_ipi_nq(&IPIQ[cpu], pipi); 886 - LOCK_CORE_PRA(); 887 - settc(cpu_data[cpu].tc_id); 888 - write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1); 889 - UNLOCK_CORE_PRA(); 890 - } else { 891 - /* 892 - * Not sufficient to do a LOCK_MT_PRA (dmt) here, 893 - * since ASID shootdown on the other VPE may 894 - * collide with this operation. 895 - */ 896 - LOCK_CORE_PRA(); 897 - settc(cpu_data[cpu].tc_id); 898 - /* Halt the targeted TC */ 899 - write_tc_c0_tchalt(TCHALT_H); 900 - mips_ihb(); 901 - 902 - /* 903 - * Inspect TCStatus - if IXMT is set, we have to queue 904 - * a message. Otherwise, we set up the "interrupt" 905 - * of the other TC 906 - */ 907 - tcstatus = read_tc_c0_tcstatus(); 908 - 909 - if ((tcstatus & TCSTATUS_IXMT) != 0) { 910 - /* 911 - * If we're in the the irq-off version of the wait 912 - * loop, we need to force exit from the wait and 913 - * do a direct post of the IPI. 914 - */ 915 - if (cpu_wait == r4k_wait_irqoff) { 916 - tcrestart = read_tc_c0_tcrestart(); 917 - if (address_is_in_r4k_wait_irqoff(tcrestart)) { 918 - write_tc_c0_tcrestart(__pastwait); 919 - tcstatus &= ~TCSTATUS_IXMT; 920 - write_tc_c0_tcstatus(tcstatus); 921 - goto postdirect; 922 - } 923 - } 924 - /* 925 - * Otherwise we queue the message for the target TC 926 - * to pick up when he does a local_irq_restore() 927 - */ 928 - write_tc_c0_tchalt(0); 929 - UNLOCK_CORE_PRA(); 930 - IPIQ[cpu].resched_flag |= set_resched_flag; 931 - smtc_ipi_nq(&IPIQ[cpu], pipi); 932 - } else { 933 - postdirect: 934 - post_direct_ipi(cpu, pipi); 935 - write_tc_c0_tchalt(0); 936 - UNLOCK_CORE_PRA(); 937 - } 938 - } 939 - } 940 - 941 - /* 942 - * Send IPI message to Halted TC, TargTC/TargVPE already having been set 943 - */ 944 - static void post_direct_ipi(int cpu, struct smtc_ipi *pipi) 945 - { 946 - struct pt_regs *kstack; 947 - unsigned long tcstatus; 948 - unsigned long tcrestart; 949 - extern u32 kernelsp[NR_CPUS]; 950 - extern void __smtc_ipi_vector(void); 951 - //printk("%s: on %d for %d\n", __func__, smp_processor_id(), cpu); 952 - 953 - /* Extract Status, EPC from halted TC */ 954 - tcstatus = read_tc_c0_tcstatus(); 955 - tcrestart = read_tc_c0_tcrestart(); 956 - /* If TCRestart indicates a WAIT instruction, advance the PC */ 957 - if ((tcrestart & 0x80000000) 958 - && ((*(unsigned int *)tcrestart & 0xfe00003f) == 0x42000020)) { 959 - tcrestart += 4; 960 - } 961 - /* 962 - * Save on TC's future kernel stack 963 - * 964 - * CU bit of Status is indicator that TC was 965 - * already running on a kernel stack... 966 - */ 967 - if (tcstatus & ST0_CU0) { 968 - /* Note that this "- 1" is pointer arithmetic */ 969 - kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1; 970 - } else { 971 - kstack = ((struct pt_regs *)kernelsp[cpu]) - 1; 972 - } 973 - 974 - kstack->cp0_epc = (long)tcrestart; 975 - /* Save TCStatus */ 976 - kstack->cp0_tcstatus = tcstatus; 977 - /* Pass token of operation to be performed kernel stack pad area */ 978 - kstack->pad0[4] = (unsigned long)pipi; 979 - /* Pass address of function to be called likewise */ 980 - kstack->pad0[5] = (unsigned long)&ipi_decode; 981 - /* Set interrupt exempt and kernel mode */ 982 - tcstatus |= TCSTATUS_IXMT; 983 - tcstatus &= ~TCSTATUS_TKSU; 984 - write_tc_c0_tcstatus(tcstatus); 985 - ehb(); 986 - /* Set TC Restart address to be SMTC IPI vector */ 987 - write_tc_c0_tcrestart(__smtc_ipi_vector); 988 - } 989 - 990 - static void ipi_resched_interrupt(void) 991 - { 992 - scheduler_ipi(); 993 - } 994 - 995 - static void ipi_call_interrupt(void) 996 - { 997 - /* Invoke generic function invocation code in smp.c */ 998 - smp_call_function_interrupt(); 999 - } 1000 - 1001 - DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); 1002 - 1003 - static void __irq_entry smtc_clock_tick_interrupt(void) 1004 - { 1005 - unsigned int cpu = smp_processor_id(); 1006 - struct clock_event_device *cd; 1007 - int irq = MIPS_CPU_IRQ_BASE + 1; 1008 - 1009 - irq_enter(); 1010 - kstat_incr_irq_this_cpu(irq); 1011 - cd = &per_cpu(mips_clockevent_device, cpu); 1012 - cd->event_handler(cd); 1013 - irq_exit(); 1014 - } 1015 - 1016 - void ipi_decode(struct smtc_ipi *pipi) 1017 - { 1018 - void *arg_copy = pipi->arg; 1019 - int type_copy = pipi->type; 1020 - 1021 - smtc_ipi_nq(&freeIPIq, pipi); 1022 - 1023 - switch (type_copy) { 1024 - case SMTC_CLOCK_TICK: 1025 - smtc_clock_tick_interrupt(); 1026 - break; 1027 - 1028 - case LINUX_SMP_IPI: 1029 - switch ((int)arg_copy) { 1030 - case SMP_RESCHEDULE_YOURSELF: 1031 - ipi_resched_interrupt(); 1032 - break; 1033 - case SMP_CALL_FUNCTION: 1034 - ipi_call_interrupt(); 1035 - break; 1036 - default: 1037 - printk("Impossible SMTC IPI Argument %p\n", arg_copy); 1038 - break; 1039 - } 1040 - break; 1041 - #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 1042 - case IRQ_AFFINITY_IPI: 1043 - /* 1044 - * Accept a "forwarded" interrupt that was initially 1045 - * taken by a TC who doesn't have affinity for the IRQ. 1046 - */ 1047 - do_IRQ_no_affinity((int)arg_copy); 1048 - break; 1049 - #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ 1050 - default: 1051 - printk("Impossible SMTC IPI Type 0x%x\n", type_copy); 1052 - break; 1053 - } 1054 - } 1055 - 1056 - /* 1057 - * Similar to smtc_ipi_replay(), but invoked from context restore, 1058 - * so it reuses the current exception frame rather than set up a 1059 - * new one with self_ipi. 1060 - */ 1061 - 1062 - void deferred_smtc_ipi(void) 1063 - { 1064 - int cpu = smp_processor_id(); 1065 - 1066 - /* 1067 - * Test is not atomic, but much faster than a dequeue, 1068 - * and the vast majority of invocations will have a null queue. 1069 - * If irq_disabled when this was called, then any IPIs queued 1070 - * after we test last will be taken on the next irq_enable/restore. 1071 - * If interrupts were enabled, then any IPIs added after the 1072 - * last test will be taken directly. 1073 - */ 1074 - 1075 - while (IPIQ[cpu].head != NULL) { 1076 - struct smtc_ipi_q *q = &IPIQ[cpu]; 1077 - struct smtc_ipi *pipi; 1078 - unsigned long flags; 1079 - 1080 - /* 1081 - * It may be possible we'll come in with interrupts 1082 - * already enabled. 1083 - */ 1084 - local_irq_save(flags); 1085 - spin_lock(&q->lock); 1086 - pipi = __smtc_ipi_dq(q); 1087 - spin_unlock(&q->lock); 1088 - if (pipi != NULL) { 1089 - if (pipi->type == LINUX_SMP_IPI && 1090 - (int)pipi->arg == SMP_RESCHEDULE_YOURSELF) 1091 - IPIQ[cpu].resched_flag = 0; 1092 - ipi_decode(pipi); 1093 - } 1094 - /* 1095 - * The use of the __raw_local restore isn't 1096 - * as obviously necessary here as in smtc_ipi_replay(), 1097 - * but it's more efficient, given that we're already 1098 - * running down the IPI queue. 1099 - */ 1100 - __arch_local_irq_restore(flags); 1101 - } 1102 - } 1103 - 1104 - /* 1105 - * Cross-VPE interrupts in the SMTC prototype use "software interrupts" 1106 - * set via cross-VPE MTTR manipulation of the Cause register. It would be 1107 - * in some regards preferable to have external logic for "doorbell" hardware 1108 - * interrupts. 1109 - */ 1110 - 1111 - static int cpu_ipi_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_IRQ; 1112 - 1113 - static irqreturn_t ipi_interrupt(int irq, void *dev_idm) 1114 - { 1115 - int my_vpe = cpu_data[smp_processor_id()].vpe_id; 1116 - int my_tc = cpu_data[smp_processor_id()].tc_id; 1117 - int cpu; 1118 - struct smtc_ipi *pipi; 1119 - unsigned long tcstatus; 1120 - int sent; 1121 - unsigned long flags; 1122 - unsigned int mtflags; 1123 - unsigned int vpflags; 1124 - 1125 - /* 1126 - * So long as cross-VPE interrupts are done via 1127 - * MFTR/MTTR read-modify-writes of Cause, we need 1128 - * to stop other VPEs whenever the local VPE does 1129 - * anything similar. 1130 - */ 1131 - local_irq_save(flags); 1132 - vpflags = dvpe(); 1133 - clear_c0_cause(0x100 << MIPS_CPU_IPI_IRQ); 1134 - set_c0_status(0x100 << MIPS_CPU_IPI_IRQ); 1135 - irq_enable_hazard(); 1136 - evpe(vpflags); 1137 - local_irq_restore(flags); 1138 - 1139 - /* 1140 - * Cross-VPE Interrupt handler: Try to directly deliver IPIs 1141 - * queued for TCs on this VPE other than the current one. 1142 - * Return-from-interrupt should cause us to drain the queue 1143 - * for the current TC, so we ought not to have to do it explicitly here. 1144 - */ 1145 - 1146 - for_each_online_cpu(cpu) { 1147 - if (cpu_data[cpu].vpe_id != my_vpe) 1148 - continue; 1149 - 1150 - pipi = smtc_ipi_dq(&IPIQ[cpu]); 1151 - if (pipi != NULL) { 1152 - if (cpu_data[cpu].tc_id != my_tc) { 1153 - sent = 0; 1154 - LOCK_MT_PRA(); 1155 - settc(cpu_data[cpu].tc_id); 1156 - write_tc_c0_tchalt(TCHALT_H); 1157 - mips_ihb(); 1158 - tcstatus = read_tc_c0_tcstatus(); 1159 - if ((tcstatus & TCSTATUS_IXMT) == 0) { 1160 - post_direct_ipi(cpu, pipi); 1161 - sent = 1; 1162 - } 1163 - write_tc_c0_tchalt(0); 1164 - UNLOCK_MT_PRA(); 1165 - if (!sent) { 1166 - smtc_ipi_req(&IPIQ[cpu], pipi); 1167 - } 1168 - } else { 1169 - /* 1170 - * ipi_decode() should be called 1171 - * with interrupts off 1172 - */ 1173 - local_irq_save(flags); 1174 - if (pipi->type == LINUX_SMP_IPI && 1175 - (int)pipi->arg == SMP_RESCHEDULE_YOURSELF) 1176 - IPIQ[cpu].resched_flag = 0; 1177 - ipi_decode(pipi); 1178 - local_irq_restore(flags); 1179 - } 1180 - } 1181 - } 1182 - 1183 - return IRQ_HANDLED; 1184 - } 1185 - 1186 - static void ipi_irq_dispatch(void) 1187 - { 1188 - do_IRQ(cpu_ipi_irq); 1189 - } 1190 - 1191 - static struct irqaction irq_ipi = { 1192 - .handler = ipi_interrupt, 1193 - .flags = IRQF_PERCPU, 1194 - .name = "SMTC_IPI" 1195 - }; 1196 - 1197 - static void setup_cross_vpe_interrupts(unsigned int nvpe) 1198 - { 1199 - if (nvpe < 1) 1200 - return; 1201 - 1202 - if (!cpu_has_vint) 1203 - panic("SMTC Kernel requires Vectored Interrupt support"); 1204 - 1205 - set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch); 1206 - 1207 - setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ)); 1208 - 1209 - irq_set_handler(cpu_ipi_irq, handle_percpu_irq); 1210 - } 1211 - 1212 - /* 1213 - * SMTC-specific hacks invoked from elsewhere in the kernel. 1214 - */ 1215 - 1216 - /* 1217 - * smtc_ipi_replay is called from raw_local_irq_restore 1218 - */ 1219 - 1220 - void smtc_ipi_replay(void) 1221 - { 1222 - unsigned int cpu = smp_processor_id(); 1223 - 1224 - /* 1225 - * To the extent that we've ever turned interrupts off, 1226 - * we may have accumulated deferred IPIs. This is subtle. 1227 - * we should be OK: If we pick up something and dispatch 1228 - * it here, that's great. If we see nothing, but concurrent 1229 - * with this operation, another TC sends us an IPI, IXMT 1230 - * is clear, and we'll handle it as a real pseudo-interrupt 1231 - * and not a pseudo-pseudo interrupt. The important thing 1232 - * is to do the last check for queued message *after* the 1233 - * re-enabling of interrupts. 1234 - */ 1235 - while (IPIQ[cpu].head != NULL) { 1236 - struct smtc_ipi_q *q = &IPIQ[cpu]; 1237 - struct smtc_ipi *pipi; 1238 - unsigned long flags; 1239 - 1240 - /* 1241 - * It's just possible we'll come in with interrupts 1242 - * already enabled. 1243 - */ 1244 - local_irq_save(flags); 1245 - 1246 - spin_lock(&q->lock); 1247 - pipi = __smtc_ipi_dq(q); 1248 - spin_unlock(&q->lock); 1249 - /* 1250 - ** But use a raw restore here to avoid recursion. 1251 - */ 1252 - __arch_local_irq_restore(flags); 1253 - 1254 - if (pipi) { 1255 - self_ipi(pipi); 1256 - smtc_cpu_stats[cpu].selfipis++; 1257 - } 1258 - } 1259 - } 1260 - 1261 - EXPORT_SYMBOL(smtc_ipi_replay); 1262 - 1263 - void smtc_idle_loop_hook(void) 1264 - { 1265 - #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG 1266 - int im; 1267 - int flags; 1268 - int mtflags; 1269 - int bit; 1270 - int vpe; 1271 - int tc; 1272 - int hook_ntcs; 1273 - /* 1274 - * printk within DMT-protected regions can deadlock, 1275 - * so buffer diagnostic messages for later output. 1276 - */ 1277 - char *pdb_msg; 1278 - char id_ho_db_msg[768]; /* worst-case use should be less than 700 */ 1279 - 1280 - if (atomic_read(&idle_hook_initialized) == 0) { /* fast test */ 1281 - if (atomic_add_return(1, &idle_hook_initialized) == 1) { 1282 - int mvpconf0; 1283 - /* Tedious stuff to just do once */ 1284 - mvpconf0 = read_c0_mvpconf0(); 1285 - hook_ntcs = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; 1286 - if (hook_ntcs > NR_CPUS) 1287 - hook_ntcs = NR_CPUS; 1288 - for (tc = 0; tc < hook_ntcs; tc++) { 1289 - tcnoprog[tc] = 0; 1290 - clock_hang_reported[tc] = 0; 1291 - } 1292 - for (vpe = 0; vpe < 2; vpe++) 1293 - for (im = 0; im < 8; im++) 1294 - imstuckcount[vpe][im] = 0; 1295 - printk("Idle loop test hook initialized for %d TCs\n", hook_ntcs); 1296 - atomic_set(&idle_hook_initialized, 1000); 1297 - } else { 1298 - /* Someone else is initializing in parallel - let 'em finish */ 1299 - while (atomic_read(&idle_hook_initialized) < 1000) 1300 - ; 1301 - } 1302 - } 1303 - 1304 - /* Have we stupidly left IXMT set somewhere? */ 1305 - if (read_c0_tcstatus() & 0x400) { 1306 - write_c0_tcstatus(read_c0_tcstatus() & ~0x400); 1307 - ehb(); 1308 - printk("Dangling IXMT in cpu_idle()\n"); 1309 - } 1310 - 1311 - /* Have we stupidly left an IM bit turned off? */ 1312 - #define IM_LIMIT 2000 1313 - local_irq_save(flags); 1314 - mtflags = dmt(); 1315 - pdb_msg = &id_ho_db_msg[0]; 1316 - im = read_c0_status(); 1317 - vpe = current_cpu_data.vpe_id; 1318 - for (bit = 0; bit < 8; bit++) { 1319 - /* 1320 - * In current prototype, I/O interrupts 1321 - * are masked for VPE > 0 1322 - */ 1323 - if (vpemask[vpe][bit]) { 1324 - if (!(im & (0x100 << bit))) 1325 - imstuckcount[vpe][bit]++; 1326 - else 1327 - imstuckcount[vpe][bit] = 0; 1328 - if (imstuckcount[vpe][bit] > IM_LIMIT) { 1329 - set_c0_status(0x100 << bit); 1330 - ehb(); 1331 - imstuckcount[vpe][bit] = 0; 1332 - pdb_msg += sprintf(pdb_msg, 1333 - "Dangling IM %d fixed for VPE %d\n", bit, 1334 - vpe); 1335 - } 1336 - } 1337 - } 1338 - 1339 - emt(mtflags); 1340 - local_irq_restore(flags); 1341 - if (pdb_msg != &id_ho_db_msg[0]) 1342 - printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg); 1343 - #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ 1344 - 1345 - smtc_ipi_replay(); 1346 - } 1347 - 1348 - void smtc_soft_dump(void) 1349 - { 1350 - int i; 1351 - 1352 - printk("Counter Interrupts taken per CPU (TC)\n"); 1353 - for (i=0; i < NR_CPUS; i++) { 1354 - printk("%d: %ld\n", i, smtc_cpu_stats[i].timerints); 1355 - } 1356 - printk("Self-IPI invocations:\n"); 1357 - for (i=0; i < NR_CPUS; i++) { 1358 - printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis); 1359 - } 1360 - smtc_ipi_qdump(); 1361 - printk("%d Recoveries of \"stolen\" FPU\n", 1362 - atomic_read(&smtc_fpu_recoveries)); 1363 - } 1364 - 1365 - 1366 - /* 1367 - * TLB management routines special to SMTC 1368 - */ 1369 - 1370 - void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) 1371 - { 1372 - unsigned long flags, mtflags, tcstat, prevhalt, asid; 1373 - int tlb, i; 1374 - 1375 - /* 1376 - * It would be nice to be able to use a spinlock here, 1377 - * but this is invoked from within TLB flush routines 1378 - * that protect themselves with DVPE, so if a lock is 1379 - * held by another TC, it'll never be freed. 1380 - * 1381 - * DVPE/DMT must not be done with interrupts enabled, 1382 - * so even so most callers will already have disabled 1383 - * them, let's be really careful... 1384 - */ 1385 - 1386 - local_irq_save(flags); 1387 - if (smtc_status & SMTC_TLB_SHARED) { 1388 - mtflags = dvpe(); 1389 - tlb = 0; 1390 - } else { 1391 - mtflags = dmt(); 1392 - tlb = cpu_data[cpu].vpe_id; 1393 - } 1394 - asid = asid_cache(cpu); 1395 - 1396 - do { 1397 - if (!((asid += ASID_INC) & ASID_MASK) ) { 1398 - if (cpu_has_vtag_icache) 1399 - flush_icache_all(); 1400 - /* Traverse all online CPUs (hack requires contiguous range) */ 1401 - for_each_online_cpu(i) { 1402 - /* 1403 - * We don't need to worry about our own CPU, nor those of 1404 - * CPUs who don't share our TLB. 1405 - */ 1406 - if ((i != smp_processor_id()) && 1407 - ((smtc_status & SMTC_TLB_SHARED) || 1408 - (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))) { 1409 - settc(cpu_data[i].tc_id); 1410 - prevhalt = read_tc_c0_tchalt() & TCHALT_H; 1411 - if (!prevhalt) { 1412 - write_tc_c0_tchalt(TCHALT_H); 1413 - mips_ihb(); 1414 - } 1415 - tcstat = read_tc_c0_tcstatus(); 1416 - smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i); 1417 - if (!prevhalt) 1418 - write_tc_c0_tchalt(0); 1419 - } 1420 - } 1421 - if (!asid) /* fix version if needed */ 1422 - asid = ASID_FIRST_VERSION; 1423 - local_flush_tlb_all(); /* start new asid cycle */ 1424 - } 1425 - } while (smtc_live_asid[tlb][(asid & ASID_MASK)]); 1426 - 1427 - /* 1428 - * SMTC shares the TLB within VPEs and possibly across all VPEs. 1429 - */ 1430 - for_each_online_cpu(i) { 1431 - if ((smtc_status & SMTC_TLB_SHARED) || 1432 - (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id)) 1433 - cpu_context(i, mm) = asid_cache(i) = asid; 1434 - } 1435 - 1436 - if (smtc_status & SMTC_TLB_SHARED) 1437 - evpe(mtflags); 1438 - else 1439 - emt(mtflags); 1440 - local_irq_restore(flags); 1441 - } 1442 - 1443 - /* 1444 - * Invoked from macros defined in mmu_context.h 1445 - * which must already have disabled interrupts 1446 - * and done a DVPE or DMT as appropriate. 1447 - */ 1448 - 1449 - void smtc_flush_tlb_asid(unsigned long asid) 1450 - { 1451 - int entry; 1452 - unsigned long ehi; 1453 - 1454 - entry = read_c0_wired(); 1455 - 1456 - /* Traverse all non-wired entries */ 1457 - while (entry < current_cpu_data.tlbsize) { 1458 - write_c0_index(entry); 1459 - ehb(); 1460 - tlb_read(); 1461 - ehb(); 1462 - ehi = read_c0_entryhi(); 1463 - if ((ehi & ASID_MASK) == asid) { 1464 - /* 1465 - * Invalidate only entries with specified ASID, 1466 - * makiing sure all entries differ. 1467 - */ 1468 - write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1))); 1469 - write_c0_entrylo0(0); 1470 - write_c0_entrylo1(0); 1471 - mtc0_tlbw_hazard(); 1472 - tlb_write_indexed(); 1473 - } 1474 - entry++; 1475 - } 1476 - write_c0_index(PARKED_INDEX); 1477 - tlbw_use_hazard(); 1478 - } 1479 - 1480 - /* 1481 - * Support for single-threading cache flush operations. 1482 - */ 1483 - 1484 - static int halt_state_save[NR_CPUS]; 1485 - 1486 - /* 1487 - * To really, really be sure that nothing is being done 1488 - * by other TCs, halt them all. This code assumes that 1489 - * a DVPE has already been done, so while their Halted 1490 - * state is theoretically architecturally unstable, in 1491 - * practice, it's not going to change while we're looking 1492 - * at it. 1493 - */ 1494 - 1495 - void smtc_cflush_lockdown(void) 1496 - { 1497 - int cpu; 1498 - 1499 - for_each_online_cpu(cpu) { 1500 - if (cpu != smp_processor_id()) { 1501 - settc(cpu_data[cpu].tc_id); 1502 - halt_state_save[cpu] = read_tc_c0_tchalt(); 1503 - write_tc_c0_tchalt(TCHALT_H); 1504 - } 1505 - } 1506 - mips_ihb(); 1507 - } 1508 - 1509 - /* It would be cheating to change the cpu_online states during a flush! */ 1510 - 1511 - void smtc_cflush_release(void) 1512 - { 1513 - int cpu; 1514 - 1515 - /* 1516 - * Start with a hazard barrier to ensure 1517 - * that all CACHE ops have played through. 1518 - */ 1519 - mips_ihb(); 1520 - 1521 - for_each_online_cpu(cpu) { 1522 - if (cpu != smp_processor_id()) { 1523 - settc(cpu_data[cpu].tc_id); 1524 - write_tc_c0_tchalt(halt_state_save[cpu]); 1525 - } 1526 - } 1527 - mips_ihb(); 1528 - }
-18
arch/mips/kernel/sync-r4k.c
··· 6 6 * not have done anything significant (but they may have had interrupts 7 7 * enabled briefly - prom_smp_finish() should not be responsible for enabling 8 8 * interrupts...) 9 - * 10 - * FIXME: broken for SMTC 11 9 */ 12 10 13 11 #include <linux/kernel.h> ··· 30 32 int i; 31 33 unsigned long flags; 32 34 unsigned int initcount; 33 - 34 - #ifdef CONFIG_MIPS_MT_SMTC 35 - /* 36 - * SMTC needs to synchronise per VPE, not per CPU 37 - * ignore for now 38 - */ 39 - return; 40 - #endif 41 35 42 36 printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu); 43 37 ··· 99 109 { 100 110 int i; 101 111 unsigned int initcount; 102 - 103 - #ifdef CONFIG_MIPS_MT_SMTC 104 - /* 105 - * SMTC needs to synchronise per VPE, not per CPU 106 - * ignore for now 107 - */ 108 - return; 109 - #endif 110 112 111 113 /* 112 114 * Not every cpu is online at the time this gets called,
-1
arch/mips/kernel/time.c
··· 26 26 #include <asm/cpu-features.h> 27 27 #include <asm/cpu-type.h> 28 28 #include <asm/div64.h> 29 - #include <asm/smtc_ipi.h> 30 29 #include <asm/time.h> 31 30 32 31 /*
+54 -68
arch/mips/kernel/traps.c
··· 15 15 #include <linux/bug.h> 16 16 #include <linux/compiler.h> 17 17 #include <linux/context_tracking.h> 18 + #include <linux/cpu_pm.h> 18 19 #include <linux/kexec.h> 19 20 #include <linux/init.h> 20 21 #include <linux/kernel.h> ··· 371 370 { 372 371 static int die_counter; 373 372 int sig = SIGSEGV; 374 - #ifdef CONFIG_MIPS_MT_SMTC 375 - unsigned long dvpret; 376 - #endif /* CONFIG_MIPS_MT_SMTC */ 377 373 378 374 oops_enter(); 379 375 ··· 380 382 381 383 console_verbose(); 382 384 raw_spin_lock_irq(&die_lock); 383 - #ifdef CONFIG_MIPS_MT_SMTC 384 - dvpret = dvpe(); 385 - #endif /* CONFIG_MIPS_MT_SMTC */ 386 385 bust_spinlocks(1); 387 - #ifdef CONFIG_MIPS_MT_SMTC 388 - mips_mt_regdump(dvpret); 389 - #endif /* CONFIG_MIPS_MT_SMTC */ 390 386 391 387 printk("%s[#%d]:\n", str, ++die_counter); 392 388 show_registers(regs); ··· 704 712 si.si_addr = fault_addr; 705 713 si.si_signo = sig; 706 714 if (sig == SIGSEGV) { 715 + down_read(&current->mm->mmap_sem); 707 716 if (find_vma(current->mm, (unsigned long)fault_addr)) 708 717 si.si_code = SEGV_ACCERR; 709 718 else 710 719 si.si_code = SEGV_MAPERR; 720 + up_read(&current->mm->mmap_sem); 711 721 } else { 712 722 si.si_code = BUS_ADRERR; 713 723 } ··· 1753 1759 extern char rollback_except_vec_vi; 1754 1760 char *vec_start = using_rollback_handler() ? 1755 1761 &rollback_except_vec_vi : &except_vec_vi; 1756 - #ifdef CONFIG_MIPS_MT_SMTC 1757 - /* 1758 - * We need to provide the SMTC vectored interrupt handler 1759 - * not only with the address of the handler, but with the 1760 - * Status.IM bit to be masked before going there. 1761 - */ 1762 - extern char except_vec_vi_mori; 1763 - #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) 1764 - const int mori_offset = &except_vec_vi_mori - vec_start + 2; 1765 - #else 1766 - const int mori_offset = &except_vec_vi_mori - vec_start; 1767 - #endif 1768 - #endif /* CONFIG_MIPS_MT_SMTC */ 1769 1762 #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) 1770 1763 const int lui_offset = &except_vec_vi_lui - vec_start + 2; 1771 1764 const int ori_offset = &except_vec_vi_ori - vec_start + 2; ··· 1776 1795 #else 1777 1796 handler_len); 1778 1797 #endif 1779 - #ifdef CONFIG_MIPS_MT_SMTC 1780 - BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */ 1781 - 1782 - h = (u16 *)(b + mori_offset); 1783 - *h = (0x100 << n); 1784 - #endif /* CONFIG_MIPS_MT_SMTC */ 1785 1798 h = (u16 *)(b + lui_offset); 1786 1799 *h = (handler >> 16) & 0xffff; 1787 1800 h = (u16 *)(b + ori_offset); ··· 1840 1865 } 1841 1866 __setup("noulri", ulri_disable); 1842 1867 1843 - void per_cpu_trap_init(bool is_boot_cpu) 1868 + /* configure STATUS register */ 1869 + static void configure_status(void) 1844 1870 { 1845 - unsigned int cpu = smp_processor_id(); 1846 - unsigned int status_set = ST0_CU0; 1847 - unsigned int hwrena = cpu_hwrena_impl_bits; 1848 - #ifdef CONFIG_MIPS_MT_SMTC 1849 - int secondaryTC = 0; 1850 - int bootTC = (cpu == 0); 1851 - 1852 - /* 1853 - * Only do per_cpu_trap_init() for first TC of Each VPE. 1854 - * Note that this hack assumes that the SMTC init code 1855 - * assigns TCs consecutively and in ascending order. 1856 - */ 1857 - 1858 - if (((read_c0_tcbind() & TCBIND_CURTC) != 0) && 1859 - ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id)) 1860 - secondaryTC = 1; 1861 - #endif /* CONFIG_MIPS_MT_SMTC */ 1862 - 1863 1871 /* 1864 1872 * Disable coprocessors and select 32-bit or 64-bit addressing 1865 1873 * and the 16/32 or 32/32 FPR register model. Reset the BEV 1866 1874 * flag that some firmware may have left set and the TS bit (for 1867 1875 * IP27). Set XX for ISA IV code to work. 1868 1876 */ 1877 + unsigned int status_set = ST0_CU0; 1869 1878 #ifdef CONFIG_64BIT 1870 1879 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX; 1871 1880 #endif ··· 1860 1901 1861 1902 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX, 1862 1903 status_set); 1904 + } 1905 + 1906 + /* configure HWRENA register */ 1907 + static void configure_hwrena(void) 1908 + { 1909 + unsigned int hwrena = cpu_hwrena_impl_bits; 1863 1910 1864 1911 if (cpu_has_mips_r2) 1865 1912 hwrena |= 0x0000000f; ··· 1875 1910 1876 1911 if (hwrena) 1877 1912 write_c0_hwrena(hwrena); 1913 + } 1878 1914 1879 - #ifdef CONFIG_MIPS_MT_SMTC 1880 - if (!secondaryTC) { 1881 - #endif /* CONFIG_MIPS_MT_SMTC */ 1882 - 1915 + static void configure_exception_vector(void) 1916 + { 1883 1917 if (cpu_has_veic || cpu_has_vint) { 1884 1918 unsigned long sr = set_c0_status(ST0_BEV); 1885 1919 write_c0_ebase(ebase); ··· 1894 1930 } else 1895 1931 set_c0_cause(CAUSEF_IV); 1896 1932 } 1933 + } 1934 + 1935 + void per_cpu_trap_init(bool is_boot_cpu) 1936 + { 1937 + unsigned int cpu = smp_processor_id(); 1938 + 1939 + configure_status(); 1940 + configure_hwrena(); 1941 + 1942 + configure_exception_vector(); 1897 1943 1898 1944 /* 1899 1945 * Before R2 both interrupt numbers were fixed to 7, so on R2 only: ··· 1923 1949 cp0_perfcount_irq = -1; 1924 1950 } 1925 1951 1926 - #ifdef CONFIG_MIPS_MT_SMTC 1927 - } 1928 - #endif /* CONFIG_MIPS_MT_SMTC */ 1929 - 1930 1952 if (!cpu_data[cpu].asid_cache) 1931 1953 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; 1932 1954 ··· 1931 1961 BUG_ON(current->mm); 1932 1962 enter_lazy_tlb(&init_mm, current); 1933 1963 1934 - #ifdef CONFIG_MIPS_MT_SMTC 1935 - if (bootTC) { 1936 - #endif /* CONFIG_MIPS_MT_SMTC */ 1937 1964 /* Boot CPU's cache setup in setup_arch(). */ 1938 1965 if (!is_boot_cpu) 1939 1966 cpu_cache_init(); 1940 1967 tlb_init(); 1941 - #ifdef CONFIG_MIPS_MT_SMTC 1942 - } else if (!secondaryTC) { 1943 - /* 1944 - * First TC in non-boot VPE must do subset of tlb_init() 1945 - * for MMU countrol registers. 1946 - */ 1947 - write_c0_pagemask(PM_DEFAULT_MASK); 1948 - write_c0_wired(0); 1949 - } 1950 - #endif /* CONFIG_MIPS_MT_SMTC */ 1951 1968 TLBMISS_HANDLER_SETUP(); 1952 1969 } 1953 1970 ··· 2142 2185 2143 2186 cu2_notifier(default_cu2_call, 0x80000000); /* Run last */ 2144 2187 } 2188 + 2189 + static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd, 2190 + void *v) 2191 + { 2192 + switch (cmd) { 2193 + case CPU_PM_ENTER_FAILED: 2194 + case CPU_PM_EXIT: 2195 + configure_status(); 2196 + configure_hwrena(); 2197 + configure_exception_vector(); 2198 + 2199 + /* Restore register with CPU number for TLB handlers */ 2200 + TLBMISS_HANDLER_RESTORE(); 2201 + 2202 + break; 2203 + } 2204 + 2205 + return NOTIFY_OK; 2206 + } 2207 + 2208 + static struct notifier_block trap_pm_notifier_block = { 2209 + .notifier_call = trap_pm_notifier, 2210 + }; 2211 + 2212 + static int __init trap_pm_init(void) 2213 + { 2214 + return cpu_pm_register_notifier(&trap_pm_notifier_block); 2215 + } 2216 + arch_initcall(trap_pm_init);
+7 -9
arch/mips/kernel/vpe-mt.c
··· 127 127 clear_c0_mvpcontrol(MVPCONTROL_VPC); 128 128 129 129 /* 130 - * SMTC/SMVP kernels manage VPE enable independently, 131 - * but uniprocessor kernels need to turn it on, even 132 - * if that wasn't the pre-dvpe() state. 130 + * SMVP kernels manage VPE enable independently, but uniprocessor 131 + * kernels need to turn it on, even if that wasn't the pre-dvpe() state. 133 132 */ 134 133 #ifdef CONFIG_SMP 135 134 evpe(vpeflags); ··· 453 454 454 455 settc(tc); 455 456 456 - /* Any TC that is bound to VPE0 gets left as is - in 457 - * case we are running SMTC on VPE0. A TC that is bound 458 - * to any other VPE gets bound to VPE0, ideally I'd like 459 - * to make it homeless but it doesn't appear to let me 460 - * bind a TC to a non-existent VPE. Which is perfectly 461 - * reasonable. 457 + /* 458 + * A TC that is bound to any other VPE gets bound to 459 + * VPE0, ideally I'd like to make it homeless but it 460 + * doesn't appear to let me bind a TC to a non-existent 461 + * VPE. Which is perfectly reasonable. 462 462 * 463 463 * The (un)bound state is visible to an EJTAG probe so 464 464 * may notify GDB...
+2 -2
arch/mips/lantiq/irq.c
··· 61 61 /* we have a cascade of 8 irqs */ 62 62 #define MIPS_CPU_IRQ_CASCADE 8 63 63 64 - #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) 64 + #ifdef CONFIG_MIPS_MT_SMP 65 65 int gic_present; 66 66 #endif 67 67 ··· 440 440 arch_init_ipiirq(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ, &irq_call); 441 441 #endif 442 442 443 - #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) 443 + #ifndef CONFIG_MIPS_MT_SMP 444 444 set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | 445 445 IE_IRQ3 | IE_IRQ4 | IE_IRQ5); 446 446 #else
+3 -5
arch/mips/lib/delay.c
··· 11 11 #include <linux/module.h> 12 12 #include <linux/param.h> 13 13 #include <linux/smp.h> 14 + #include <linux/stringify.h> 14 15 16 + #include <asm/asm.h> 15 17 #include <asm/compiler.h> 16 18 #include <asm/war.h> 17 19 ··· 29 27 " .set noreorder \n" 30 28 " .align 3 \n" 31 29 "1: bnez %0, 1b \n" 32 - #if BITS_PER_LONG == 32 33 - " subu %0, %1 \n" 34 - #else 35 - " dsubu %0, %1 \n" 36 - #endif 30 + " " __stringify(LONG_SUBU) " %0, %1 \n" 37 31 " .set reorder \n" 38 32 : "=r" (loops) 39 33 : GCC_DADDI_IMM_ASM() (1), "0" (loops));
+6 -40
arch/mips/lib/mips-atomic.c
··· 15 15 #include <linux/export.h> 16 16 #include <linux/stringify.h> 17 17 18 - #if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) 18 + #ifndef CONFIG_CPU_MIPSR2 19 19 20 20 /* 21 21 * For cli() we have to insert nops to make sure that the new value ··· 42 42 __asm__ __volatile__( 43 43 " .set push \n" 44 44 " .set noat \n" 45 - #ifdef CONFIG_MIPS_MT_SMTC 46 - " mfc0 $1, $2, 1 \n" 47 - " ori $1, 0x400 \n" 48 - " .set noreorder \n" 49 - " mtc0 $1, $2, 1 \n" 50 - #elif defined(CONFIG_CPU_MIPSR2) 45 + #if defined(CONFIG_CPU_MIPSR2) 51 46 /* see irqflags.h for inline function */ 52 47 #else 53 48 " mfc0 $1,$12 \n" ··· 72 77 " .set push \n" 73 78 " .set reorder \n" 74 79 " .set noat \n" 75 - #ifdef CONFIG_MIPS_MT_SMTC 76 - " mfc0 %[flags], $2, 1 \n" 77 - " ori $1, %[flags], 0x400 \n" 78 - " .set noreorder \n" 79 - " mtc0 $1, $2, 1 \n" 80 - " andi %[flags], %[flags], 0x400 \n" 81 - #elif defined(CONFIG_CPU_MIPSR2) 80 + #if defined(CONFIG_CPU_MIPSR2) 82 81 /* see irqflags.h for inline function */ 83 82 #else 84 83 " mfc0 %[flags], $12 \n" ··· 97 108 { 98 109 unsigned long __tmp1; 99 110 100 - #ifdef CONFIG_MIPS_MT_SMTC 101 - /* 102 - * SMTC kernel needs to do a software replay of queued 103 - * IPIs, at the cost of branch and call overhead on each 104 - * local_irq_restore() 105 - */ 106 - if (unlikely(!(flags & 0x0400))) 107 - smtc_ipi_replay(); 108 - #endif 109 111 preempt_disable(); 110 112 111 113 __asm__ __volatile__( 112 114 " .set push \n" 113 115 " .set noreorder \n" 114 116 " .set noat \n" 115 - #ifdef CONFIG_MIPS_MT_SMTC 116 - " mfc0 $1, $2, 1 \n" 117 - " andi %[flags], 0x400 \n" 118 - " ori $1, 0x400 \n" 119 - " xori $1, 0x400 \n" 120 - " or %[flags], $1 \n" 121 - " mtc0 %[flags], $2, 1 \n" 122 - #elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) 117 + #if defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) 123 118 /* see irqflags.h for inline function */ 124 119 #elif defined(CONFIG_CPU_MIPSR2) 125 120 /* see irqflags.h for inline function */ ··· 136 163 " .set push \n" 137 164 " .set noreorder \n" 138 165 " .set noat \n" 139 - #ifdef CONFIG_MIPS_MT_SMTC 140 - " mfc0 $1, $2, 1 \n" 141 - " andi %[flags], 0x400 \n" 142 - " ori $1, 0x400 \n" 143 - " xori $1, 0x400 \n" 144 - " or %[flags], $1 \n" 145 - " mtc0 %[flags], $2, 1 \n" 146 - #elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) 166 + #if defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) 147 167 /* see irqflags.h for inline function */ 148 168 #elif defined(CONFIG_CPU_MIPSR2) 149 169 /* see irqflags.h for inline function */ ··· 158 192 } 159 193 EXPORT_SYMBOL(__arch_local_irq_restore); 160 194 161 - #endif /* !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) */ 195 + #endif /* !CONFIG_CPU_MIPSR2 */
+3 -2
arch/mips/loongson/Kconfig
··· 95 95 96 96 config CS5536_MFGPT 97 97 bool "CS5536 MFGPT Timer" 98 - depends on CS5536 98 + depends on CS5536 && !HIGH_RES_TIMERS 99 99 select MIPS_EXTERNAL_TIMER 100 100 help 101 - This option enables the mfgpt0 timer of AMD CS5536. 101 + This option enables the mfgpt0 timer of AMD CS5536. With this timer 102 + switched on you can not use high resolution timers. 102 103 103 104 If you want to enable the Loongson2 CPUFreq Driver, Please enable 104 105 this option at first, otherwise, You will get wrong system time.
-8
arch/mips/loongson/loongson-3/smp.c
··· 279 279 loongson3_ipi_write64(startargs[0], (void *)(ipi_mailbox_buf[cpu]+0x0)); 280 280 } 281 281 282 - /* 283 - * Final cleanup after all secondaries booted 284 - */ 285 - static void __init loongson3_cpus_done(void) 286 - { 287 - } 288 - 289 282 #ifdef CONFIG_HOTPLUG_CPU 290 283 291 284 static int loongson3_cpu_disable(void) ··· 425 432 .send_ipi_mask = loongson3_send_ipi_mask, 426 433 .init_secondary = loongson3_init_secondary, 427 434 .smp_finish = loongson3_smp_finish, 428 - .cpus_done = loongson3_cpus_done, 429 435 .boot_secondary = loongson3_boot_secondary, 430 436 .smp_setup = loongson3_smp_setup, 431 437 .prepare_cpus = loongson3_prepare_cpus,
+1
arch/mips/loongson1/Kconfig
··· 14 14 select SYS_SUPPORTS_32BIT_KERNEL 15 15 select SYS_SUPPORTS_LITTLE_ENDIAN 16 16 select SYS_SUPPORTS_HIGHMEM 17 + select SYS_SUPPORTS_MIPS16 17 18 select SYS_HAS_EARLY_PRINTK 18 19 select COMMON_CLK 19 20
+9 -7
arch/mips/math-emu/Makefile
··· 2 2 # Makefile for the Linux/MIPS kernel FPU emulation. 3 3 # 4 4 5 - obj-y := cp1emu.o ieee754m.o ieee754d.o ieee754dp.o ieee754sp.o ieee754.o \ 6 - ieee754xcpt.o dp_frexp.o dp_modf.o dp_div.o dp_mul.o dp_sub.o \ 7 - dp_add.o dp_fsp.o dp_cmp.o dp_logb.o dp_scalb.o dp_simple.o \ 8 - dp_tint.o dp_fint.o dp_tlong.o dp_flong.o sp_frexp.o sp_modf.o \ 9 - sp_div.o sp_mul.o sp_sub.o sp_add.o sp_fdp.o sp_cmp.o sp_logb.o \ 10 - sp_scalb.o sp_simple.o sp_tint.o sp_fint.o sp_tlong.o sp_flong.o \ 11 - dp_sqrt.o sp_sqrt.o kernel_linkage.o dsemul.o 5 + obj-y += cp1emu.o ieee754dp.o ieee754sp.o ieee754.o dp_div.o dp_mul.o \ 6 + dp_sub.o dp_add.o dp_fsp.o dp_cmp.o dp_simple.o dp_tint.o \ 7 + dp_fint.o dp_tlong.o dp_flong.o sp_div.o sp_mul.o sp_sub.o \ 8 + sp_add.o sp_fdp.o sp_cmp.o sp_simple.o sp_tint.o sp_fint.o \ 9 + sp_tlong.o sp_flong.o dsemul.o 10 + 11 + lib-y += ieee754d.o dp_sqrt.o sp_sqrt.o 12 + 13 + obj-$(CONFIG_DEBUG_FS) += me-debugfs.o
+342 -596
arch/mips/math-emu/cp1emu.c
··· 1 1 /* 2 - * cp1emu.c: a MIPS coprocessor 1 (fpu) instruction emulator 2 + * cp1emu.c: a MIPS coprocessor 1 (FPU) instruction emulator 3 3 * 4 4 * MIPS floating point support 5 5 * Copyright (C) 1994-2000 Algorithmics Ltd. ··· 18 18 * 19 19 * You should have received a copy of the GNU General Public License along 20 20 * with this program; if not, write to the Free Software Foundation, Inc., 21 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 21 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 22 22 * 23 23 * A complete emulator for MIPS coprocessor 1 instructions. This is 24 24 * required for #float(switch) or #float(trap), where it catches all 25 25 * COP1 instructions via the "CoProcessor Unusable" exception. 26 26 * 27 27 * More surprisingly it is also required for #float(ieee), to help out 28 - * the hardware fpu at the boundaries of the IEEE-754 representation 28 + * the hardware FPU at the boundaries of the IEEE-754 representation 29 29 * (denormalised values, infinities, underflow, etc). It is made 30 30 * quite nasty because emulation of some non-COP1 instructions is 31 31 * required, e.g. in branch delay slots. 32 32 * 33 - * Note if you know that you won't have an fpu, then you'll get much 33 + * Note if you know that you won't have an FPU, then you'll get much 34 34 * better performance by compiling with -msoft-float! 35 35 */ 36 36 #include <linux/sched.h> 37 - #include <linux/module.h> 38 37 #include <linux/debugfs.h> 38 + #include <linux/kconfig.h> 39 + #include <linux/percpu-defs.h> 39 40 #include <linux/perf_event.h> 40 41 42 + #include <asm/branch.h> 41 43 #include <asm/inst.h> 42 - #include <asm/bootinfo.h> 43 - #include <asm/processor.h> 44 44 #include <asm/ptrace.h> 45 45 #include <asm/signal.h> 46 - #include <asm/mipsregs.h> 46 + #include <asm/uaccess.h> 47 + 48 + #include <asm/processor.h> 47 49 #include <asm/fpu_emulator.h> 48 50 #include <asm/fpu.h> 49 - #include <asm/uaccess.h> 50 - #include <asm/branch.h> 51 51 52 52 #include "ieee754.h" 53 - 54 - /* Strap kernel emulator for full MIPS IV emulation */ 55 - 56 - #ifdef __mips 57 - #undef __mips 58 - #endif 59 - #define __mips 4 60 53 61 54 /* Function which emulates a floating point instruction. */ 62 55 63 56 static int fpu_emu(struct pt_regs *, struct mips_fpu_struct *, 64 57 mips_instruction); 65 58 66 - #if __mips >= 4 && __mips != 32 67 59 static int fpux_emu(struct pt_regs *, 68 60 struct mips_fpu_struct *, mips_instruction, void *__user *); 69 - #endif 70 - 71 - /* Further private data for which no space exists in mips_fpu_struct */ 72 - 73 - #ifdef CONFIG_DEBUG_FS 74 - DEFINE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats); 75 - #endif 76 61 77 62 /* Control registers */ 78 63 ··· 67 82 /* Determine rounding mode from the RM bits of the FCSR */ 68 83 #define modeindex(v) ((v) & FPU_CSR_RM) 69 84 70 - /* microMIPS bitfields */ 71 - #define MM_POOL32A_MINOR_MASK 0x3f 72 - #define MM_POOL32A_MINOR_SHIFT 0x6 73 - #define MM_MIPS32_COND_FC 0x30 74 - 75 - /* Convert Mips rounding mode (0..3) to IEEE library modes. */ 76 - static const unsigned char ieee_rm[4] = { 77 - [FPU_CSR_RN] = IEEE754_RN, 78 - [FPU_CSR_RZ] = IEEE754_RZ, 79 - [FPU_CSR_RU] = IEEE754_RU, 80 - [FPU_CSR_RD] = IEEE754_RD, 81 - }; 82 - /* Convert IEEE library modes to Mips rounding mode (0..3). */ 83 - static const unsigned char mips_rm[4] = { 84 - [IEEE754_RN] = FPU_CSR_RN, 85 - [IEEE754_RZ] = FPU_CSR_RZ, 86 - [IEEE754_RD] = FPU_CSR_RD, 87 - [IEEE754_RU] = FPU_CSR_RU, 88 - }; 89 - 90 - #if __mips >= 4 91 85 /* convert condition code register number to csr bit */ 92 86 static const unsigned int fpucondbit[8] = { 93 87 FPU_CSR_COND0, ··· 78 114 FPU_CSR_COND6, 79 115 FPU_CSR_COND7 80 116 }; 81 - #endif 82 - 83 - /* (microMIPS) Convert 16-bit register encoding to 32-bit register encoding. */ 84 - static const unsigned int reg16to32map[8] = {16, 17, 2, 3, 4, 5, 6, 7}; 85 117 86 118 /* (microMIPS) Convert certain microMIPS instructions to MIPS32 format. */ 87 119 static const int sd_format[] = {16, 17, 0, 0, 0, 0, 0, 0}; ··· 426 466 return 0; 427 467 } 428 468 429 - int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, 430 - unsigned long *contpc) 431 - { 432 - union mips_instruction insn = (union mips_instruction)dec_insn.insn; 433 - int bc_false = 0; 434 - unsigned int fcr31; 435 - unsigned int bit; 436 - 437 - if (!cpu_has_mmips) 438 - return 0; 439 - 440 - switch (insn.mm_i_format.opcode) { 441 - case mm_pool32a_op: 442 - if ((insn.mm_i_format.simmediate & MM_POOL32A_MINOR_MASK) == 443 - mm_pool32axf_op) { 444 - switch (insn.mm_i_format.simmediate >> 445 - MM_POOL32A_MINOR_SHIFT) { 446 - case mm_jalr_op: 447 - case mm_jalrhb_op: 448 - case mm_jalrs_op: 449 - case mm_jalrshb_op: 450 - if (insn.mm_i_format.rt != 0) /* Not mm_jr */ 451 - regs->regs[insn.mm_i_format.rt] = 452 - regs->cp0_epc + 453 - dec_insn.pc_inc + 454 - dec_insn.next_pc_inc; 455 - *contpc = regs->regs[insn.mm_i_format.rs]; 456 - return 1; 457 - } 458 - } 459 - break; 460 - case mm_pool32i_op: 461 - switch (insn.mm_i_format.rt) { 462 - case mm_bltzals_op: 463 - case mm_bltzal_op: 464 - regs->regs[31] = regs->cp0_epc + 465 - dec_insn.pc_inc + 466 - dec_insn.next_pc_inc; 467 - /* Fall through */ 468 - case mm_bltz_op: 469 - if ((long)regs->regs[insn.mm_i_format.rs] < 0) 470 - *contpc = regs->cp0_epc + 471 - dec_insn.pc_inc + 472 - (insn.mm_i_format.simmediate << 1); 473 - else 474 - *contpc = regs->cp0_epc + 475 - dec_insn.pc_inc + 476 - dec_insn.next_pc_inc; 477 - return 1; 478 - case mm_bgezals_op: 479 - case mm_bgezal_op: 480 - regs->regs[31] = regs->cp0_epc + 481 - dec_insn.pc_inc + 482 - dec_insn.next_pc_inc; 483 - /* Fall through */ 484 - case mm_bgez_op: 485 - if ((long)regs->regs[insn.mm_i_format.rs] >= 0) 486 - *contpc = regs->cp0_epc + 487 - dec_insn.pc_inc + 488 - (insn.mm_i_format.simmediate << 1); 489 - else 490 - *contpc = regs->cp0_epc + 491 - dec_insn.pc_inc + 492 - dec_insn.next_pc_inc; 493 - return 1; 494 - case mm_blez_op: 495 - if ((long)regs->regs[insn.mm_i_format.rs] <= 0) 496 - *contpc = regs->cp0_epc + 497 - dec_insn.pc_inc + 498 - (insn.mm_i_format.simmediate << 1); 499 - else 500 - *contpc = regs->cp0_epc + 501 - dec_insn.pc_inc + 502 - dec_insn.next_pc_inc; 503 - return 1; 504 - case mm_bgtz_op: 505 - if ((long)regs->regs[insn.mm_i_format.rs] <= 0) 506 - *contpc = regs->cp0_epc + 507 - dec_insn.pc_inc + 508 - (insn.mm_i_format.simmediate << 1); 509 - else 510 - *contpc = regs->cp0_epc + 511 - dec_insn.pc_inc + 512 - dec_insn.next_pc_inc; 513 - return 1; 514 - case mm_bc2f_op: 515 - case mm_bc1f_op: 516 - bc_false = 1; 517 - /* Fall through */ 518 - case mm_bc2t_op: 519 - case mm_bc1t_op: 520 - preempt_disable(); 521 - if (is_fpu_owner()) 522 - asm volatile("cfc1\t%0,$31" : "=r" (fcr31)); 523 - else 524 - fcr31 = current->thread.fpu.fcr31; 525 - preempt_enable(); 526 - 527 - if (bc_false) 528 - fcr31 = ~fcr31; 529 - 530 - bit = (insn.mm_i_format.rs >> 2); 531 - bit += (bit != 0); 532 - bit += 23; 533 - if (fcr31 & (1 << bit)) 534 - *contpc = regs->cp0_epc + 535 - dec_insn.pc_inc + 536 - (insn.mm_i_format.simmediate << 1); 537 - else 538 - *contpc = regs->cp0_epc + 539 - dec_insn.pc_inc + dec_insn.next_pc_inc; 540 - return 1; 541 - } 542 - break; 543 - case mm_pool16c_op: 544 - switch (insn.mm_i_format.rt) { 545 - case mm_jalr16_op: 546 - case mm_jalrs16_op: 547 - regs->regs[31] = regs->cp0_epc + 548 - dec_insn.pc_inc + dec_insn.next_pc_inc; 549 - /* Fall through */ 550 - case mm_jr16_op: 551 - *contpc = regs->regs[insn.mm_i_format.rs]; 552 - return 1; 553 - } 554 - break; 555 - case mm_beqz16_op: 556 - if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] == 0) 557 - *contpc = regs->cp0_epc + 558 - dec_insn.pc_inc + 559 - (insn.mm_b1_format.simmediate << 1); 560 - else 561 - *contpc = regs->cp0_epc + 562 - dec_insn.pc_inc + dec_insn.next_pc_inc; 563 - return 1; 564 - case mm_bnez16_op: 565 - if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] != 0) 566 - *contpc = regs->cp0_epc + 567 - dec_insn.pc_inc + 568 - (insn.mm_b1_format.simmediate << 1); 569 - else 570 - *contpc = regs->cp0_epc + 571 - dec_insn.pc_inc + dec_insn.next_pc_inc; 572 - return 1; 573 - case mm_b16_op: 574 - *contpc = regs->cp0_epc + dec_insn.pc_inc + 575 - (insn.mm_b0_format.simmediate << 1); 576 - return 1; 577 - case mm_beq32_op: 578 - if (regs->regs[insn.mm_i_format.rs] == 579 - regs->regs[insn.mm_i_format.rt]) 580 - *contpc = regs->cp0_epc + 581 - dec_insn.pc_inc + 582 - (insn.mm_i_format.simmediate << 1); 583 - else 584 - *contpc = regs->cp0_epc + 585 - dec_insn.pc_inc + 586 - dec_insn.next_pc_inc; 587 - return 1; 588 - case mm_bne32_op: 589 - if (regs->regs[insn.mm_i_format.rs] != 590 - regs->regs[insn.mm_i_format.rt]) 591 - *contpc = regs->cp0_epc + 592 - dec_insn.pc_inc + 593 - (insn.mm_i_format.simmediate << 1); 594 - else 595 - *contpc = regs->cp0_epc + 596 - dec_insn.pc_inc + dec_insn.next_pc_inc; 597 - return 1; 598 - case mm_jalx32_op: 599 - regs->regs[31] = regs->cp0_epc + 600 - dec_insn.pc_inc + dec_insn.next_pc_inc; 601 - *contpc = regs->cp0_epc + dec_insn.pc_inc; 602 - *contpc >>= 28; 603 - *contpc <<= 28; 604 - *contpc |= (insn.j_format.target << 2); 605 - return 1; 606 - case mm_jals32_op: 607 - case mm_jal32_op: 608 - regs->regs[31] = regs->cp0_epc + 609 - dec_insn.pc_inc + dec_insn.next_pc_inc; 610 - /* Fall through */ 611 - case mm_j32_op: 612 - *contpc = regs->cp0_epc + dec_insn.pc_inc; 613 - *contpc >>= 27; 614 - *contpc <<= 27; 615 - *contpc |= (insn.j_format.target << 1); 616 - set_isa16_mode(*contpc); 617 - return 1; 618 - } 619 - return 0; 620 - } 621 - 622 469 /* 623 470 * Redundant with logic already in kernel/branch.c, 624 471 * embedded in compute_return_epc. At some point, ··· 584 817 if (insn.i_format.rs == bc_op) { 585 818 preempt_disable(); 586 819 if (is_fpu_owner()) 587 - asm volatile("cfc1\t%0,$31" : "=r" (fcr31)); 820 + asm volatile( 821 + ".set push\n" 822 + "\t.set mips1\n" 823 + "\tcfc1\t%0,$31\n" 824 + "\t.set pop" : "=r" (fcr31)); 588 825 else 589 826 fcr31 = current->thread.fpu.fcr31; 590 827 preempt_enable(); ··· 638 867 */ 639 868 static inline int cop1_64bit(struct pt_regs *xcp) 640 869 { 641 - #if defined(CONFIG_64BIT) && !defined(CONFIG_MIPS32_O32) 642 - return 1; 643 - #elif defined(CONFIG_32BIT) && !defined(CONFIG_MIPS_O32_FP64_SUPPORT) 644 - return 0; 645 - #else 870 + if (config_enabled(CONFIG_64BIT) && !config_enabled(CONFIG_MIPS32_O32)) 871 + return 1; 872 + else if (config_enabled(CONFIG_32BIT) && 873 + !config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) 874 + return 0; 875 + 646 876 return !test_thread_flag(TIF_32BIT_FPREGS); 647 - #endif 648 877 } 649 878 650 - #define SIFROMREG(si, x) do { \ 879 + #define SIFROMREG(si, x) \ 880 + do { \ 651 881 if (cop1_64bit(xcp)) \ 652 882 (si) = get_fpr32(&ctx->fpr[x], 0); \ 653 883 else \ 654 884 (si) = get_fpr32(&ctx->fpr[(x) & ~1], (x) & 1); \ 655 885 } while (0) 656 886 657 - #define SITOREG(si, x) do { \ 887 + #define SITOREG(si, x) \ 888 + do { \ 658 889 if (cop1_64bit(xcp)) { \ 659 890 unsigned i; \ 660 891 set_fpr32(&ctx->fpr[x], 0, si); \ ··· 669 896 670 897 #define SIFROMHREG(si, x) ((si) = get_fpr32(&ctx->fpr[x], 1)) 671 898 672 - #define SITOHREG(si, x) do { \ 899 + #define SITOHREG(si, x) \ 900 + do { \ 673 901 unsigned i; \ 674 902 set_fpr32(&ctx->fpr[x], 1, si); \ 675 903 for (i = 2; i < ARRAY_SIZE(ctx->fpr[x].val32); i++) \ 676 904 set_fpr32(&ctx->fpr[x], i, 0); \ 677 905 } while (0) 678 906 679 - #define DIFROMREG(di, x) \ 907 + #define DIFROMREG(di, x) \ 680 908 ((di) = get_fpr64(&ctx->fpr[(x) & ~(cop1_64bit(xcp) == 0)], 0)) 681 909 682 - #define DITOREG(di, x) do { \ 910 + #define DITOREG(di, x) \ 911 + do { \ 683 912 unsigned fpr, i; \ 684 913 fpr = (x) & ~(cop1_64bit(xcp) == 0); \ 685 914 set_fpr64(&ctx->fpr[fpr], 0, di); \ ··· 702 927 static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx, 703 928 struct mm_decoded_insn dec_insn, void *__user *fault_addr) 704 929 { 705 - mips_instruction ir; 706 930 unsigned long contpc = xcp->cp0_epc + dec_insn.pc_inc; 707 - unsigned int cond; 708 - int pc_inc; 931 + unsigned int cond, cbit; 932 + mips_instruction ir; 933 + int likely, pc_inc; 934 + u32 __user *wva; 935 + u64 __user *dva; 936 + u32 value; 937 + u32 wval; 938 + u64 dval; 939 + int sig; 940 + 941 + /* 942 + * These are giving gcc a gentle hint about what to expect in 943 + * dec_inst in order to do better optimization. 944 + */ 945 + if (!cpu_has_mmips && dec_insn.micro_mips_mode) 946 + unreachable(); 709 947 710 948 /* XXX NEC Vr54xx bug workaround */ 711 - if (xcp->cp0_cause & CAUSEF_BD) { 949 + if (delay_slot(xcp)) { 712 950 if (dec_insn.micro_mips_mode) { 713 951 if (!mm_isBranchInstr(xcp, dec_insn, &contpc)) 714 - xcp->cp0_cause &= ~CAUSEF_BD; 952 + clear_delay_slot(xcp); 715 953 } else { 716 954 if (!isBranchInstr(xcp, dec_insn, &contpc)) 717 - xcp->cp0_cause &= ~CAUSEF_BD; 955 + clear_delay_slot(xcp); 718 956 } 719 957 } 720 958 721 - if (xcp->cp0_cause & CAUSEF_BD) { 959 + if (delay_slot(xcp)) { 722 960 /* 723 961 * The instruction to be emulated is in a branch delay slot 724 962 * which means that we have to emulate the branch instruction ··· 773 985 return SIGILL; 774 986 } 775 987 776 - emul: 988 + emul: 777 989 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, xcp, 0); 778 990 MIPS_FPU_EMU_INC_STATS(emulated); 779 991 switch (MIPSInst_OPCODE(ir)) { 780 - case ldc1_op:{ 781 - u64 __user *va = (u64 __user *) (xcp->regs[MIPSInst_RS(ir)] + 782 - MIPSInst_SIMM(ir)); 783 - u64 val; 784 - 992 + case ldc1_op: 993 + dva = (u64 __user *) (xcp->regs[MIPSInst_RS(ir)] + 994 + MIPSInst_SIMM(ir)); 785 995 MIPS_FPU_EMU_INC_STATS(loads); 786 996 787 - if (!access_ok(VERIFY_READ, va, sizeof(u64))) { 997 + if (!access_ok(VERIFY_READ, dva, sizeof(u64))) { 788 998 MIPS_FPU_EMU_INC_STATS(errors); 789 - *fault_addr = va; 999 + *fault_addr = dva; 790 1000 return SIGBUS; 791 1001 } 792 - if (__get_user(val, va)) { 1002 + if (__get_user(dval, dva)) { 793 1003 MIPS_FPU_EMU_INC_STATS(errors); 794 - *fault_addr = va; 1004 + *fault_addr = dva; 795 1005 return SIGSEGV; 796 1006 } 797 - DITOREG(val, MIPSInst_RT(ir)); 1007 + DITOREG(dval, MIPSInst_RT(ir)); 798 1008 break; 799 - } 800 1009 801 - case sdc1_op:{ 802 - u64 __user *va = (u64 __user *) (xcp->regs[MIPSInst_RS(ir)] + 803 - MIPSInst_SIMM(ir)); 804 - u64 val; 805 - 1010 + case sdc1_op: 1011 + dva = (u64 __user *) (xcp->regs[MIPSInst_RS(ir)] + 1012 + MIPSInst_SIMM(ir)); 806 1013 MIPS_FPU_EMU_INC_STATS(stores); 807 - DIFROMREG(val, MIPSInst_RT(ir)); 808 - if (!access_ok(VERIFY_WRITE, va, sizeof(u64))) { 1014 + DIFROMREG(dval, MIPSInst_RT(ir)); 1015 + if (!access_ok(VERIFY_WRITE, dva, sizeof(u64))) { 809 1016 MIPS_FPU_EMU_INC_STATS(errors); 810 - *fault_addr = va; 1017 + *fault_addr = dva; 811 1018 return SIGBUS; 812 1019 } 813 - if (__put_user(val, va)) { 1020 + if (__put_user(dval, dva)) { 814 1021 MIPS_FPU_EMU_INC_STATS(errors); 815 - *fault_addr = va; 1022 + *fault_addr = dva; 816 1023 return SIGSEGV; 817 1024 } 818 1025 break; 819 - } 820 1026 821 - case lwc1_op:{ 822 - u32 __user *va = (u32 __user *) (xcp->regs[MIPSInst_RS(ir)] + 823 - MIPSInst_SIMM(ir)); 824 - u32 val; 825 - 1027 + case lwc1_op: 1028 + wva = (u32 __user *) (xcp->regs[MIPSInst_RS(ir)] + 1029 + MIPSInst_SIMM(ir)); 826 1030 MIPS_FPU_EMU_INC_STATS(loads); 827 - if (!access_ok(VERIFY_READ, va, sizeof(u32))) { 1031 + if (!access_ok(VERIFY_READ, wva, sizeof(u32))) { 828 1032 MIPS_FPU_EMU_INC_STATS(errors); 829 - *fault_addr = va; 1033 + *fault_addr = wva; 830 1034 return SIGBUS; 831 1035 } 832 - if (__get_user(val, va)) { 1036 + if (__get_user(wval, wva)) { 833 1037 MIPS_FPU_EMU_INC_STATS(errors); 834 - *fault_addr = va; 1038 + *fault_addr = wva; 835 1039 return SIGSEGV; 836 1040 } 837 - SITOREG(val, MIPSInst_RT(ir)); 1041 + SITOREG(wval, MIPSInst_RT(ir)); 838 1042 break; 839 - } 840 1043 841 - case swc1_op:{ 842 - u32 __user *va = (u32 __user *) (xcp->regs[MIPSInst_RS(ir)] + 843 - MIPSInst_SIMM(ir)); 844 - u32 val; 845 - 1044 + case swc1_op: 1045 + wva = (u32 __user *) (xcp->regs[MIPSInst_RS(ir)] + 1046 + MIPSInst_SIMM(ir)); 846 1047 MIPS_FPU_EMU_INC_STATS(stores); 847 - SIFROMREG(val, MIPSInst_RT(ir)); 848 - if (!access_ok(VERIFY_WRITE, va, sizeof(u32))) { 1048 + SIFROMREG(wval, MIPSInst_RT(ir)); 1049 + if (!access_ok(VERIFY_WRITE, wva, sizeof(u32))) { 849 1050 MIPS_FPU_EMU_INC_STATS(errors); 850 - *fault_addr = va; 1051 + *fault_addr = wva; 851 1052 return SIGBUS; 852 1053 } 853 - if (__put_user(val, va)) { 1054 + if (__put_user(wval, wva)) { 854 1055 MIPS_FPU_EMU_INC_STATS(errors); 855 - *fault_addr = va; 1056 + *fault_addr = wva; 856 1057 return SIGSEGV; 857 1058 } 858 1059 break; 859 - } 860 1060 861 1061 case cop1_op: 862 1062 switch (MIPSInst_RS(ir)) { 863 - 864 - #if defined(__mips64) 865 1063 case dmfc_op: 1064 + if (!cpu_has_mips_3_4_5 && !cpu_has_mips64) 1065 + return SIGILL; 1066 + 866 1067 /* copregister fs -> gpr[rt] */ 867 1068 if (MIPSInst_RT(ir) != 0) { 868 1069 DIFROMREG(xcp->regs[MIPSInst_RT(ir)], ··· 860 1083 break; 861 1084 862 1085 case dmtc_op: 1086 + if (!cpu_has_mips_3_4_5 && !cpu_has_mips64) 1087 + return SIGILL; 1088 + 863 1089 /* copregister fs <- rt */ 864 1090 DITOREG(xcp->regs[MIPSInst_RT(ir)], MIPSInst_RD(ir)); 865 1091 break; 866 - #endif 867 1092 868 1093 case mfhc_op: 869 1094 if (!cpu_has_mips_r2) ··· 899 1120 SITOREG(xcp->regs[MIPSInst_RT(ir)], MIPSInst_RD(ir)); 900 1121 break; 901 1122 902 - case cfc_op:{ 1123 + case cfc_op: 903 1124 /* cop control register rd -> gpr[rt] */ 904 - u32 value; 905 - 906 1125 if (MIPSInst_RD(ir) == FPCREG_CSR) { 907 1126 value = ctx->fcr31; 908 - value = (value & ~FPU_CSR_RM) | 909 - mips_rm[modeindex(value)]; 910 - #ifdef CSRTRACE 911 - printk("%p gpr[%d]<-csr=%08x\n", 912 - (void *) (xcp->cp0_epc), 913 - MIPSInst_RT(ir), value); 914 - #endif 1127 + value = (value & ~FPU_CSR_RM) | modeindex(value); 1128 + pr_debug("%p gpr[%d]<-csr=%08x\n", 1129 + (void *) (xcp->cp0_epc), 1130 + MIPSInst_RT(ir), value); 915 1131 } 916 1132 else if (MIPSInst_RD(ir) == FPCREG_RID) 917 1133 value = 0; ··· 915 1141 if (MIPSInst_RT(ir)) 916 1142 xcp->regs[MIPSInst_RT(ir)] = value; 917 1143 break; 918 - } 919 1144 920 - case ctc_op:{ 1145 + case ctc_op: 921 1146 /* copregister rd <- rt */ 922 - u32 value; 923 - 924 1147 if (MIPSInst_RT(ir) == 0) 925 1148 value = 0; 926 1149 else ··· 926 1155 /* we only have one writable control reg 927 1156 */ 928 1157 if (MIPSInst_RD(ir) == FPCREG_CSR) { 929 - #ifdef CSRTRACE 930 - printk("%p gpr[%d]->csr=%08x\n", 931 - (void *) (xcp->cp0_epc), 932 - MIPSInst_RT(ir), value); 933 - #endif 1158 + pr_debug("%p gpr[%d]->csr=%08x\n", 1159 + (void *) (xcp->cp0_epc), 1160 + MIPSInst_RT(ir), value); 934 1161 935 1162 /* 936 1163 * Don't write reserved bits, 937 1164 * and convert to ieee library modes 938 1165 */ 939 - ctx->fcr31 = (value & 940 - ~(FPU_CSR_RSVD | FPU_CSR_RM)) | 941 - ieee_rm[modeindex(value)]; 1166 + ctx->fcr31 = (value & ~(FPU_CSR_RSVD | FPU_CSR_RM)) | 1167 + modeindex(value); 942 1168 } 943 1169 if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) { 944 1170 return SIGFPE; 945 1171 } 946 1172 break; 947 - } 948 1173 949 - case bc_op:{ 950 - int likely = 0; 951 - 952 - if (xcp->cp0_cause & CAUSEF_BD) 1174 + case bc_op: 1175 + if (delay_slot(xcp)) 953 1176 return SIGILL; 954 1177 955 - #if __mips >= 4 956 - cond = ctx->fcr31 & fpucondbit[MIPSInst_RT(ir) >> 2]; 957 - #else 958 - cond = ctx->fcr31 & FPU_CSR_COND; 959 - #endif 1178 + if (cpu_has_mips_4_5_r) 1179 + cbit = fpucondbit[MIPSInst_RT(ir) >> 2]; 1180 + else 1181 + cbit = FPU_CSR_COND; 1182 + cond = ctx->fcr31 & cbit; 1183 + 1184 + likely = 0; 960 1185 switch (MIPSInst_RT(ir) & 3) { 961 1186 case bcfl_op: 962 1187 likely = 1; ··· 968 1201 return SIGILL; 969 1202 } 970 1203 971 - xcp->cp0_cause |= CAUSEF_BD; 1204 + set_delay_slot(xcp); 972 1205 if (cond) { 973 - /* branch taken: emulate dslot 974 - * instruction 1206 + /* 1207 + * Branch taken: emulate dslot instruction 975 1208 */ 976 1209 xcp->cp0_epc += dec_insn.pc_inc; 977 1210 ··· 1005 1238 1006 1239 switch (MIPSInst_OPCODE(ir)) { 1007 1240 case lwc1_op: 1241 + goto emul; 1242 + 1008 1243 case swc1_op: 1009 - #if (__mips >= 2 || defined(__mips64)) 1244 + goto emul; 1245 + 1010 1246 case ldc1_op: 1011 1247 case sdc1_op: 1012 - #endif 1013 - case cop1_op: 1014 - #if __mips >= 4 && __mips != 32 1015 - case cop1x_op: 1016 - #endif 1017 - /* its one of ours */ 1248 + if (cpu_has_mips_2_3_4_5 || 1249 + cpu_has_mips64) 1250 + goto emul; 1251 + 1252 + return SIGILL; 1018 1253 goto emul; 1019 - #if __mips >= 4 1254 + 1255 + case cop1_op: 1256 + goto emul; 1257 + 1258 + case cop1x_op: 1259 + if (cpu_has_mips_4_5 || cpu_has_mips64) 1260 + /* its one of ours */ 1261 + goto emul; 1262 + 1263 + return SIGILL; 1264 + 1020 1265 case spec_op: 1266 + if (!cpu_has_mips_4_5_r) 1267 + return SIGILL; 1268 + 1021 1269 if (MIPSInst_FUNC(ir) == movc_op) 1022 1270 goto emul; 1023 1271 break; 1024 - #endif 1025 1272 } 1026 1273 1027 1274 /* ··· 1043 1262 * instruction in the dslot 1044 1263 */ 1045 1264 return mips_dsemul(xcp, ir, contpc); 1046 - } 1047 - else { 1048 - /* branch not taken */ 1049 - if (likely) { 1265 + } else if (likely) { /* branch not taken */ 1050 1266 /* 1051 1267 * branch likely nullifies 1052 1268 * dslot if not taken ··· 1055 1277 * dslot as normal insn 1056 1278 */ 1057 1279 } 1058 - } 1059 1280 break; 1060 - } 1061 1281 1062 1282 default: 1063 1283 if (!(MIPSInst_RS(ir) & 0x10)) 1064 1284 return SIGILL; 1065 - { 1066 - int sig; 1067 1285 1068 - /* a real fpu computation instruction */ 1069 - if ((sig = fpu_emu(xcp, ctx, ir))) 1070 - return sig; 1071 - } 1286 + /* a real fpu computation instruction */ 1287 + if ((sig = fpu_emu(xcp, ctx, ir))) 1288 + return sig; 1072 1289 } 1073 1290 break; 1074 1291 1075 - #if __mips >= 4 && __mips != 32 1076 - case cop1x_op:{ 1077 - int sig = fpux_emu(xcp, ctx, ir, fault_addr); 1292 + case cop1x_op: 1293 + if (!cpu_has_mips_4_5 && !cpu_has_mips64) 1294 + return SIGILL; 1295 + 1296 + sig = fpux_emu(xcp, ctx, ir, fault_addr); 1078 1297 if (sig) 1079 1298 return sig; 1080 1299 break; 1081 - } 1082 - #endif 1083 1300 1084 - #if __mips >= 4 1085 1301 case spec_op: 1302 + if (!cpu_has_mips_4_5_r) 1303 + return SIGILL; 1304 + 1086 1305 if (MIPSInst_FUNC(ir) != movc_op) 1087 1306 return SIGILL; 1088 1307 cond = fpucondbit[MIPSInst_RT(ir) >> 2]; ··· 1087 1312 xcp->regs[MIPSInst_RD(ir)] = 1088 1313 xcp->regs[MIPSInst_RS(ir)]; 1089 1314 break; 1090 - #endif 1091 - 1092 1315 default: 1093 1316 sigill: 1094 1317 return SIGILL; ··· 1094 1321 1095 1322 /* we did it !! */ 1096 1323 xcp->cp0_epc = contpc; 1097 - xcp->cp0_cause &= ~CAUSEF_BD; 1324 + clear_delay_slot(xcp); 1098 1325 1099 1326 return 0; 1100 1327 } ··· 1115 1342 }; 1116 1343 1117 1344 1118 - #if __mips >= 4 && __mips != 32 1119 - 1120 1345 /* 1121 1346 * Additional MIPS4 instructions 1122 1347 */ 1123 1348 1124 - #define DEF3OP(name, p, f1, f2, f3) \ 1125 - static ieee754##p fpemu_##p##_##name(ieee754##p r, ieee754##p s, \ 1126 - ieee754##p t) \ 1127 - { \ 1128 - struct _ieee754_csr ieee754_csr_save; \ 1129 - s = f1(s, t); \ 1130 - ieee754_csr_save = ieee754_csr; \ 1131 - s = f2(s, r); \ 1132 - ieee754_csr_save.cx |= ieee754_csr.cx; \ 1133 - ieee754_csr_save.sx |= ieee754_csr.sx; \ 1134 - s = f3(s); \ 1135 - ieee754_csr.cx |= ieee754_csr_save.cx; \ 1136 - ieee754_csr.sx |= ieee754_csr_save.sx; \ 1137 - return s; \ 1349 + #define DEF3OP(name, p, f1, f2, f3) \ 1350 + static union ieee754##p fpemu_##p##_##name(union ieee754##p r, \ 1351 + union ieee754##p s, union ieee754##p t) \ 1352 + { \ 1353 + struct _ieee754_csr ieee754_csr_save; \ 1354 + s = f1(s, t); \ 1355 + ieee754_csr_save = ieee754_csr; \ 1356 + s = f2(s, r); \ 1357 + ieee754_csr_save.cx |= ieee754_csr.cx; \ 1358 + ieee754_csr_save.sx |= ieee754_csr.sx; \ 1359 + s = f3(s); \ 1360 + ieee754_csr.cx |= ieee754_csr_save.cx; \ 1361 + ieee754_csr.sx |= ieee754_csr_save.sx; \ 1362 + return s; \ 1138 1363 } 1139 1364 1140 - static ieee754dp fpemu_dp_recip(ieee754dp d) 1365 + static union ieee754dp fpemu_dp_recip(union ieee754dp d) 1141 1366 { 1142 1367 return ieee754dp_div(ieee754dp_one(0), d); 1143 1368 } 1144 1369 1145 - static ieee754dp fpemu_dp_rsqrt(ieee754dp d) 1370 + static union ieee754dp fpemu_dp_rsqrt(union ieee754dp d) 1146 1371 { 1147 1372 return ieee754dp_div(ieee754dp_one(0), ieee754dp_sqrt(d)); 1148 1373 } 1149 1374 1150 - static ieee754sp fpemu_sp_recip(ieee754sp s) 1375 + static union ieee754sp fpemu_sp_recip(union ieee754sp s) 1151 1376 { 1152 1377 return ieee754sp_div(ieee754sp_one(0), s); 1153 1378 } 1154 1379 1155 - static ieee754sp fpemu_sp_rsqrt(ieee754sp s) 1380 + static union ieee754sp fpemu_sp_rsqrt(union ieee754sp s) 1156 1381 { 1157 1382 return ieee754sp_div(ieee754sp_one(0), ieee754sp_sqrt(s)); 1158 1383 } ··· 1174 1403 switch (MIPSInst_FMA_FFMT(ir)) { 1175 1404 case s_fmt:{ /* 0 */ 1176 1405 1177 - ieee754sp(*handler) (ieee754sp, ieee754sp, ieee754sp); 1178 - ieee754sp fd, fr, fs, ft; 1406 + union ieee754sp(*handler) (union ieee754sp, union ieee754sp, union ieee754sp); 1407 + union ieee754sp fd, fr, fs, ft; 1179 1408 u32 __user *va; 1180 1409 u32 val; 1181 1410 ··· 1238 1467 SPTOREG(fd, MIPSInst_FD(ir)); 1239 1468 1240 1469 copcsr: 1241 - if (ieee754_cxtest(IEEE754_INEXACT)) 1470 + if (ieee754_cxtest(IEEE754_INEXACT)) { 1471 + MIPS_FPU_EMU_INC_STATS(ieee754_inexact); 1242 1472 rcsr |= FPU_CSR_INE_X | FPU_CSR_INE_S; 1243 - if (ieee754_cxtest(IEEE754_UNDERFLOW)) 1473 + } 1474 + if (ieee754_cxtest(IEEE754_UNDERFLOW)) { 1475 + MIPS_FPU_EMU_INC_STATS(ieee754_underflow); 1244 1476 rcsr |= FPU_CSR_UDF_X | FPU_CSR_UDF_S; 1245 - if (ieee754_cxtest(IEEE754_OVERFLOW)) 1477 + } 1478 + if (ieee754_cxtest(IEEE754_OVERFLOW)) { 1479 + MIPS_FPU_EMU_INC_STATS(ieee754_overflow); 1246 1480 rcsr |= FPU_CSR_OVF_X | FPU_CSR_OVF_S; 1247 - if (ieee754_cxtest(IEEE754_INVALID_OPERATION)) 1481 + } 1482 + if (ieee754_cxtest(IEEE754_INVALID_OPERATION)) { 1483 + MIPS_FPU_EMU_INC_STATS(ieee754_invalidop); 1248 1484 rcsr |= FPU_CSR_INV_X | FPU_CSR_INV_S; 1485 + } 1249 1486 1250 1487 ctx->fcr31 = (ctx->fcr31 & ~FPU_CSR_ALL_X) | rcsr; 1251 1488 if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) { 1252 - /*printk ("SIGFPE: fpu csr = %08x\n", 1489 + /*printk ("SIGFPE: FPU csr = %08x\n", 1253 1490 ctx->fcr31); */ 1254 1491 return SIGFPE; 1255 1492 } ··· 1271 1492 } 1272 1493 1273 1494 case d_fmt:{ /* 1 */ 1274 - ieee754dp(*handler) (ieee754dp, ieee754dp, ieee754dp); 1275 - ieee754dp fd, fr, fs, ft; 1495 + union ieee754dp(*handler) (union ieee754dp, union ieee754dp, union ieee754dp); 1496 + union ieee754dp fd, fr, fs, ft; 1276 1497 u64 __user *va; 1277 1498 u64 val; 1278 1499 ··· 1353 1574 1354 1575 return 0; 1355 1576 } 1356 - #endif 1357 1577 1358 1578 1359 1579 ··· 1364 1586 { 1365 1587 int rfmt; /* resulting format */ 1366 1588 unsigned rcsr = 0; /* resulting csr */ 1589 + unsigned int oldrm; 1590 + unsigned int cbit; 1367 1591 unsigned cond; 1368 1592 union { 1369 - ieee754dp d; 1370 - ieee754sp s; 1593 + union ieee754dp d; 1594 + union ieee754sp s; 1371 1595 int w; 1372 - #ifdef __mips64 1373 1596 s64 l; 1374 - #endif 1375 1597 } rv; /* resulting value */ 1598 + u64 bits; 1376 1599 1377 1600 MIPS_FPU_EMU_INC_STATS(cp1ops); 1378 1601 switch (rfmt = (MIPSInst_FFMT(ir) & 0xf)) { 1379 - case s_fmt:{ /* 0 */ 1602 + case s_fmt: { /* 0 */ 1380 1603 union { 1381 - ieee754sp(*b) (ieee754sp, ieee754sp); 1382 - ieee754sp(*u) (ieee754sp); 1604 + union ieee754sp(*b) (union ieee754sp, union ieee754sp); 1605 + union ieee754sp(*u) (union ieee754sp); 1383 1606 } handler; 1607 + union ieee754sp fs, ft; 1384 1608 1385 1609 switch (MIPSInst_FUNC(ir)) { 1386 1610 /* binary ops */ ··· 1400 1620 goto scopbop; 1401 1621 1402 1622 /* unary ops */ 1403 - #if __mips >= 2 || defined(__mips64) 1404 1623 case fsqrt_op: 1624 + if (!cpu_has_mips_4_5_r) 1625 + return SIGILL; 1626 + 1405 1627 handler.u = ieee754sp_sqrt; 1406 1628 goto scopuop; 1407 - #endif 1408 - #if __mips >= 4 && __mips != 32 1629 + 1630 + /* 1631 + * Note that on some MIPS IV implementations such as the 1632 + * R5000 and R8000 the FSQRT and FRECIP instructions do not 1633 + * achieve full IEEE-754 accuracy - however this emulator does. 1634 + */ 1409 1635 case frsqrt_op: 1636 + if (!cpu_has_mips_4_5_r2) 1637 + return SIGILL; 1638 + 1410 1639 handler.u = fpemu_sp_rsqrt; 1411 1640 goto scopuop; 1641 + 1412 1642 case frecip_op: 1643 + if (!cpu_has_mips_4_5_r2) 1644 + return SIGILL; 1645 + 1413 1646 handler.u = fpemu_sp_recip; 1414 1647 goto scopuop; 1415 - #endif 1416 - #if __mips >= 4 1648 + 1417 1649 case fmovc_op: 1650 + if (!cpu_has_mips_4_5_r) 1651 + return SIGILL; 1652 + 1418 1653 cond = fpucondbit[MIPSInst_FT(ir) >> 2]; 1419 1654 if (((ctx->fcr31 & cond) != 0) != 1420 1655 ((MIPSInst_FT(ir) & 1) != 0)) 1421 1656 return 0; 1422 1657 SPFROMREG(rv.s, MIPSInst_FS(ir)); 1423 1658 break; 1659 + 1424 1660 case fmovz_op: 1661 + if (!cpu_has_mips_4_5_r) 1662 + return SIGILL; 1663 + 1425 1664 if (xcp->regs[MIPSInst_FT(ir)] != 0) 1426 1665 return 0; 1427 1666 SPFROMREG(rv.s, MIPSInst_FS(ir)); 1428 1667 break; 1668 + 1429 1669 case fmovn_op: 1670 + if (!cpu_has_mips_4_5_r) 1671 + return SIGILL; 1672 + 1430 1673 if (xcp->regs[MIPSInst_FT(ir)] == 0) 1431 1674 return 0; 1432 1675 SPFROMREG(rv.s, MIPSInst_FS(ir)); 1433 1676 break; 1434 - #endif 1677 + 1435 1678 case fabs_op: 1436 1679 handler.u = ieee754sp_abs; 1437 1680 goto scopuop; 1681 + 1438 1682 case fneg_op: 1439 1683 handler.u = ieee754sp_neg; 1440 1684 goto scopuop; 1685 + 1441 1686 case fmov_op: 1442 1687 /* an easy one */ 1443 1688 SPFROMREG(rv.s, MIPSInst_FS(ir)); 1444 1689 goto copcsr; 1445 1690 1446 1691 /* binary op on handler */ 1447 - scopbop: 1448 - { 1449 - ieee754sp fs, ft; 1692 + scopbop: 1693 + SPFROMREG(fs, MIPSInst_FS(ir)); 1694 + SPFROMREG(ft, MIPSInst_FT(ir)); 1450 1695 1451 - SPFROMREG(fs, MIPSInst_FS(ir)); 1452 - SPFROMREG(ft, MIPSInst_FT(ir)); 1453 - 1454 - rv.s = (*handler.b) (fs, ft); 1455 - goto copcsr; 1456 - } 1457 - scopuop: 1458 - { 1459 - ieee754sp fs; 1460 - 1461 - SPFROMREG(fs, MIPSInst_FS(ir)); 1462 - rv.s = (*handler.u) (fs); 1463 - goto copcsr; 1464 - } 1465 - copcsr: 1466 - if (ieee754_cxtest(IEEE754_INEXACT)) 1696 + rv.s = (*handler.b) (fs, ft); 1697 + goto copcsr; 1698 + scopuop: 1699 + SPFROMREG(fs, MIPSInst_FS(ir)); 1700 + rv.s = (*handler.u) (fs); 1701 + goto copcsr; 1702 + copcsr: 1703 + if (ieee754_cxtest(IEEE754_INEXACT)) { 1704 + MIPS_FPU_EMU_INC_STATS(ieee754_inexact); 1467 1705 rcsr |= FPU_CSR_INE_X | FPU_CSR_INE_S; 1468 - if (ieee754_cxtest(IEEE754_UNDERFLOW)) 1706 + } 1707 + if (ieee754_cxtest(IEEE754_UNDERFLOW)) { 1708 + MIPS_FPU_EMU_INC_STATS(ieee754_underflow); 1469 1709 rcsr |= FPU_CSR_UDF_X | FPU_CSR_UDF_S; 1470 - if (ieee754_cxtest(IEEE754_OVERFLOW)) 1710 + } 1711 + if (ieee754_cxtest(IEEE754_OVERFLOW)) { 1712 + MIPS_FPU_EMU_INC_STATS(ieee754_overflow); 1471 1713 rcsr |= FPU_CSR_OVF_X | FPU_CSR_OVF_S; 1472 - if (ieee754_cxtest(IEEE754_ZERO_DIVIDE)) 1714 + } 1715 + if (ieee754_cxtest(IEEE754_ZERO_DIVIDE)) { 1716 + MIPS_FPU_EMU_INC_STATS(ieee754_zerodiv); 1473 1717 rcsr |= FPU_CSR_DIV_X | FPU_CSR_DIV_S; 1474 - if (ieee754_cxtest(IEEE754_INVALID_OPERATION)) 1718 + } 1719 + if (ieee754_cxtest(IEEE754_INVALID_OPERATION)) { 1720 + MIPS_FPU_EMU_INC_STATS(ieee754_invalidop); 1475 1721 rcsr |= FPU_CSR_INV_X | FPU_CSR_INV_S; 1722 + } 1476 1723 break; 1477 1724 1478 1725 /* unary conv ops */ 1479 1726 case fcvts_op: 1480 1727 return SIGILL; /* not defined */ 1481 - case fcvtd_op:{ 1482 - ieee754sp fs; 1483 1728 1729 + case fcvtd_op: 1484 1730 SPFROMREG(fs, MIPSInst_FS(ir)); 1485 1731 rv.d = ieee754dp_fsp(fs); 1486 1732 rfmt = d_fmt; 1487 1733 goto copcsr; 1488 - } 1489 - case fcvtw_op:{ 1490 - ieee754sp fs; 1491 1734 1735 + case fcvtw_op: 1492 1736 SPFROMREG(fs, MIPSInst_FS(ir)); 1493 1737 rv.w = ieee754sp_tint(fs); 1494 1738 rfmt = w_fmt; 1495 1739 goto copcsr; 1496 - } 1497 1740 1498 - #if __mips >= 2 || defined(__mips64) 1499 1741 case fround_op: 1500 1742 case ftrunc_op: 1501 1743 case fceil_op: 1502 - case ffloor_op:{ 1503 - unsigned int oldrm = ieee754_csr.rm; 1504 - ieee754sp fs; 1744 + case ffloor_op: 1745 + if (!cpu_has_mips_2_3_4_5 && !cpu_has_mips64) 1746 + return SIGILL; 1505 1747 1748 + oldrm = ieee754_csr.rm; 1506 1749 SPFROMREG(fs, MIPSInst_FS(ir)); 1507 - ieee754_csr.rm = ieee_rm[modeindex(MIPSInst_FUNC(ir))]; 1750 + ieee754_csr.rm = modeindex(MIPSInst_FUNC(ir)); 1508 1751 rv.w = ieee754sp_tint(fs); 1509 1752 ieee754_csr.rm = oldrm; 1510 1753 rfmt = w_fmt; 1511 1754 goto copcsr; 1512 - } 1513 - #endif /* __mips >= 2 */ 1514 1755 1515 - #if defined(__mips64) 1516 - case fcvtl_op:{ 1517 - ieee754sp fs; 1756 + case fcvtl_op: 1757 + if (!cpu_has_mips_3_4_5 && !cpu_has_mips64) 1758 + return SIGILL; 1518 1759 1519 1760 SPFROMREG(fs, MIPSInst_FS(ir)); 1520 1761 rv.l = ieee754sp_tlong(fs); 1521 1762 rfmt = l_fmt; 1522 1763 goto copcsr; 1523 - } 1524 1764 1525 1765 case froundl_op: 1526 1766 case ftruncl_op: 1527 1767 case fceill_op: 1528 - case ffloorl_op:{ 1529 - unsigned int oldrm = ieee754_csr.rm; 1530 - ieee754sp fs; 1768 + case ffloorl_op: 1769 + if (!cpu_has_mips_3_4_5 && !cpu_has_mips64) 1770 + return SIGILL; 1531 1771 1772 + oldrm = ieee754_csr.rm; 1532 1773 SPFROMREG(fs, MIPSInst_FS(ir)); 1533 - ieee754_csr.rm = ieee_rm[modeindex(MIPSInst_FUNC(ir))]; 1774 + ieee754_csr.rm = modeindex(MIPSInst_FUNC(ir)); 1534 1775 rv.l = ieee754sp_tlong(fs); 1535 1776 ieee754_csr.rm = oldrm; 1536 1777 rfmt = l_fmt; 1537 1778 goto copcsr; 1538 - } 1539 - #endif /* defined(__mips64) */ 1540 1779 1541 1780 default: 1542 1781 if (MIPSInst_FUNC(ir) >= fcmp_op) { 1543 1782 unsigned cmpop = MIPSInst_FUNC(ir) - fcmp_op; 1544 - ieee754sp fs, ft; 1783 + union ieee754sp fs, ft; 1545 1784 1546 1785 SPFROMREG(fs, MIPSInst_FS(ir)); 1547 1786 SPFROMREG(ft, MIPSInst_FT(ir)); ··· 1573 1774 else 1574 1775 goto copcsr; 1575 1776 1576 - } 1577 - else { 1777 + } else 1578 1778 return SIGILL; 1579 - } 1580 1779 break; 1581 1780 } 1582 1781 break; 1583 1782 } 1584 1783 1585 - case d_fmt:{ 1784 + case d_fmt: { 1785 + union ieee754dp fs, ft; 1586 1786 union { 1587 - ieee754dp(*b) (ieee754dp, ieee754dp); 1588 - ieee754dp(*u) (ieee754dp); 1787 + union ieee754dp(*b) (union ieee754dp, union ieee754dp); 1788 + union ieee754dp(*u) (union ieee754dp); 1589 1789 } handler; 1590 1790 1591 1791 switch (MIPSInst_FUNC(ir)) { ··· 1603 1805 goto dcopbop; 1604 1806 1605 1807 /* unary ops */ 1606 - #if __mips >= 2 || defined(__mips64) 1607 1808 case fsqrt_op: 1809 + if (!cpu_has_mips_2_3_4_5_r) 1810 + return SIGILL; 1811 + 1608 1812 handler.u = ieee754dp_sqrt; 1609 1813 goto dcopuop; 1610 - #endif 1611 - #if __mips >= 4 && __mips != 32 1814 + /* 1815 + * Note that on some MIPS IV implementations such as the 1816 + * R5000 and R8000 the FSQRT and FRECIP instructions do not 1817 + * achieve full IEEE-754 accuracy - however this emulator does. 1818 + */ 1612 1819 case frsqrt_op: 1820 + if (!cpu_has_mips_4_5_r2) 1821 + return SIGILL; 1822 + 1613 1823 handler.u = fpemu_dp_rsqrt; 1614 1824 goto dcopuop; 1615 1825 case frecip_op: 1826 + if (!cpu_has_mips_4_5_r2) 1827 + return SIGILL; 1828 + 1616 1829 handler.u = fpemu_dp_recip; 1617 1830 goto dcopuop; 1618 - #endif 1619 - #if __mips >= 4 1620 1831 case fmovc_op: 1832 + if (!cpu_has_mips_4_5_r) 1833 + return SIGILL; 1834 + 1621 1835 cond = fpucondbit[MIPSInst_FT(ir) >> 2]; 1622 1836 if (((ctx->fcr31 & cond) != 0) != 1623 1837 ((MIPSInst_FT(ir) & 1) != 0)) ··· 1637 1827 DPFROMREG(rv.d, MIPSInst_FS(ir)); 1638 1828 break; 1639 1829 case fmovz_op: 1830 + if (!cpu_has_mips_4_5_r) 1831 + return SIGILL; 1832 + 1640 1833 if (xcp->regs[MIPSInst_FT(ir)] != 0) 1641 1834 return 0; 1642 1835 DPFROMREG(rv.d, MIPSInst_FS(ir)); 1643 1836 break; 1644 1837 case fmovn_op: 1838 + if (!cpu_has_mips_4_5_r) 1839 + return SIGILL; 1840 + 1645 1841 if (xcp->regs[MIPSInst_FT(ir)] == 0) 1646 1842 return 0; 1647 1843 DPFROMREG(rv.d, MIPSInst_FS(ir)); 1648 1844 break; 1649 - #endif 1650 1845 case fabs_op: 1651 1846 handler.u = ieee754dp_abs; 1652 1847 goto dcopuop; ··· 1666 1851 goto copcsr; 1667 1852 1668 1853 /* binary op on handler */ 1669 - dcopbop:{ 1670 - ieee754dp fs, ft; 1854 + dcopbop: 1855 + DPFROMREG(fs, MIPSInst_FS(ir)); 1856 + DPFROMREG(ft, MIPSInst_FT(ir)); 1671 1857 1672 - DPFROMREG(fs, MIPSInst_FS(ir)); 1673 - DPFROMREG(ft, MIPSInst_FT(ir)); 1858 + rv.d = (*handler.b) (fs, ft); 1859 + goto copcsr; 1860 + dcopuop: 1861 + DPFROMREG(fs, MIPSInst_FS(ir)); 1862 + rv.d = (*handler.u) (fs); 1863 + goto copcsr; 1674 1864 1675 - rv.d = (*handler.b) (fs, ft); 1676 - goto copcsr; 1677 - } 1678 - dcopuop:{ 1679 - ieee754dp fs; 1680 - 1681 - DPFROMREG(fs, MIPSInst_FS(ir)); 1682 - rv.d = (*handler.u) (fs); 1683 - goto copcsr; 1684 - } 1685 - 1686 - /* unary conv ops */ 1687 - case fcvts_op:{ 1688 - ieee754dp fs; 1689 - 1865 + /* 1866 + * unary conv ops 1867 + */ 1868 + case fcvts_op: 1690 1869 DPFROMREG(fs, MIPSInst_FS(ir)); 1691 1870 rv.s = ieee754sp_fdp(fs); 1692 1871 rfmt = s_fmt; 1693 1872 goto copcsr; 1694 - } 1873 + 1695 1874 case fcvtd_op: 1696 1875 return SIGILL; /* not defined */ 1697 1876 1698 - case fcvtw_op:{ 1699 - ieee754dp fs; 1700 - 1877 + case fcvtw_op: 1701 1878 DPFROMREG(fs, MIPSInst_FS(ir)); 1702 1879 rv.w = ieee754dp_tint(fs); /* wrong */ 1703 1880 rfmt = w_fmt; 1704 1881 goto copcsr; 1705 - } 1706 1882 1707 - #if __mips >= 2 || defined(__mips64) 1708 1883 case fround_op: 1709 1884 case ftrunc_op: 1710 1885 case fceil_op: 1711 - case ffloor_op:{ 1712 - unsigned int oldrm = ieee754_csr.rm; 1713 - ieee754dp fs; 1886 + case ffloor_op: 1887 + if (!cpu_has_mips_2_3_4_5_r) 1888 + return SIGILL; 1714 1889 1890 + oldrm = ieee754_csr.rm; 1715 1891 DPFROMREG(fs, MIPSInst_FS(ir)); 1716 - ieee754_csr.rm = ieee_rm[modeindex(MIPSInst_FUNC(ir))]; 1892 + ieee754_csr.rm = modeindex(MIPSInst_FUNC(ir)); 1717 1893 rv.w = ieee754dp_tint(fs); 1718 1894 ieee754_csr.rm = oldrm; 1719 1895 rfmt = w_fmt; 1720 1896 goto copcsr; 1721 - } 1722 - #endif 1723 1897 1724 - #if defined(__mips64) 1725 - case fcvtl_op:{ 1726 - ieee754dp fs; 1898 + case fcvtl_op: 1899 + if (!cpu_has_mips_3_4_5 && !cpu_has_mips64) 1900 + return SIGILL; 1727 1901 1728 1902 DPFROMREG(fs, MIPSInst_FS(ir)); 1729 1903 rv.l = ieee754dp_tlong(fs); 1730 1904 rfmt = l_fmt; 1731 1905 goto copcsr; 1732 - } 1733 1906 1734 1907 case froundl_op: 1735 1908 case ftruncl_op: 1736 1909 case fceill_op: 1737 - case ffloorl_op:{ 1738 - unsigned int oldrm = ieee754_csr.rm; 1739 - ieee754dp fs; 1910 + case ffloorl_op: 1911 + if (!cpu_has_mips_3_4_5 && !cpu_has_mips64) 1912 + return SIGILL; 1740 1913 1914 + oldrm = ieee754_csr.rm; 1741 1915 DPFROMREG(fs, MIPSInst_FS(ir)); 1742 - ieee754_csr.rm = ieee_rm[modeindex(MIPSInst_FUNC(ir))]; 1916 + ieee754_csr.rm = modeindex(MIPSInst_FUNC(ir)); 1743 1917 rv.l = ieee754dp_tlong(fs); 1744 1918 ieee754_csr.rm = oldrm; 1745 1919 rfmt = l_fmt; 1746 1920 goto copcsr; 1747 - } 1748 - #endif /* __mips >= 3 */ 1749 1921 1750 1922 default: 1751 1923 if (MIPSInst_FUNC(ir) >= fcmp_op) { 1752 1924 unsigned cmpop = MIPSInst_FUNC(ir) - fcmp_op; 1753 - ieee754dp fs, ft; 1925 + union ieee754dp fs, ft; 1754 1926 1755 1927 DPFROMREG(fs, MIPSInst_FS(ir)); 1756 1928 DPFROMREG(ft, MIPSInst_FT(ir)); ··· 1759 1957 break; 1760 1958 } 1761 1959 break; 1762 - } 1763 1960 1764 - case w_fmt:{ 1765 - ieee754sp fs; 1766 - 1961 + case w_fmt: 1767 1962 switch (MIPSInst_FUNC(ir)) { 1768 1963 case fcvts_op: 1769 1964 /* convert word to single precision real */ ··· 1780 1981 break; 1781 1982 } 1782 1983 1783 - #if defined(__mips64) 1784 - case l_fmt:{ 1785 - u64 bits; 1984 + case l_fmt: 1985 + 1986 + if (!cpu_has_mips_3_4_5 && !cpu_has_mips64) 1987 + return SIGILL; 1988 + 1786 1989 DIFROMREG(bits, MIPSInst_FS(ir)); 1787 1990 1788 1991 switch (MIPSInst_FUNC(ir)) { ··· 1802 2001 return SIGILL; 1803 2002 } 1804 2003 break; 1805 - } 1806 - #endif 1807 2004 1808 2005 default: 1809 2006 return SIGILL; ··· 1816 2017 */ 1817 2018 ctx->fcr31 = (ctx->fcr31 & ~FPU_CSR_ALL_X) | rcsr; 1818 2019 if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) { 1819 - /*printk ("SIGFPE: fpu csr = %08x\n",ctx->fcr31); */ 2020 + /*printk ("SIGFPE: FPU csr = %08x\n",ctx->fcr31); */ 1820 2021 return SIGFPE; 1821 2022 } 1822 2023 ··· 1824 2025 * Now we can safely write the result back to the register file. 1825 2026 */ 1826 2027 switch (rfmt) { 1827 - case -1:{ 1828 - #if __mips >= 4 1829 - cond = fpucondbit[MIPSInst_FD(ir) >> 2]; 1830 - #else 1831 - cond = FPU_CSR_COND; 1832 - #endif 1833 - if (rv.w) 1834 - ctx->fcr31 |= cond; 2028 + case -1: 2029 + 2030 + if (cpu_has_mips_4_5_r) 2031 + cbit = fpucondbit[MIPSInst_RT(ir) >> 2]; 1835 2032 else 1836 - ctx->fcr31 &= ~cond; 2033 + cbit = FPU_CSR_COND; 2034 + if (rv.w) 2035 + ctx->fcr31 |= cbit; 2036 + else 2037 + ctx->fcr31 &= ~cbit; 1837 2038 break; 1838 - } 2039 + 1839 2040 case d_fmt: 1840 2041 DPTOREG(rv.d, MIPSInst_FD(ir)); 1841 2042 break; ··· 1845 2046 case w_fmt: 1846 2047 SITOREG(rv.w, MIPSInst_FD(ir)); 1847 2048 break; 1848 - #if defined(__mips64) 1849 2049 case l_fmt: 2050 + if (!cpu_has_mips_3_4_5 && !cpu_has_mips64) 2051 + return SIGILL; 2052 + 1850 2053 DITOREG(rv.l, MIPSInst_FD(ir)); 1851 2054 break; 1852 - #endif 1853 2055 default: 1854 2056 return SIGILL; 1855 2057 } ··· 1938 2138 * ieee754_csr. But ieee754_csr.rm is ieee 1939 2139 * library modes. (not mips rounding mode) 1940 2140 */ 1941 - /* convert to ieee library modes */ 1942 - ieee754_csr.rm = ieee_rm[ieee754_csr.rm]; 1943 2141 sig = cop1Emulate(xcp, ctx, dec_insn, fault_addr); 1944 - /* revert to mips rounding mode */ 1945 - ieee754_csr.rm = mips_rm[ieee754_csr.rm]; 1946 2142 } 1947 2143 1948 2144 if (has_fpu) ··· 1951 2155 1952 2156 /* SIGILL indicates a non-fpu instruction */ 1953 2157 if (sig == SIGILL && xcp->cp0_epc != oldepc) 1954 - /* but if epc has advanced, then ignore it */ 2158 + /* but if EPC has advanced, then ignore it */ 1955 2159 sig = 0; 1956 2160 1957 2161 return sig; 1958 2162 } 1959 - 1960 - #ifdef CONFIG_DEBUG_FS 1961 - 1962 - static int fpuemu_stat_get(void *data, u64 *val) 1963 - { 1964 - int cpu; 1965 - unsigned long sum = 0; 1966 - for_each_online_cpu(cpu) { 1967 - struct mips_fpu_emulator_stats *ps; 1968 - local_t *pv; 1969 - ps = &per_cpu(fpuemustats, cpu); 1970 - pv = (void *)ps + (unsigned long)data; 1971 - sum += local_read(pv); 1972 - } 1973 - *val = sum; 1974 - return 0; 1975 - } 1976 - DEFINE_SIMPLE_ATTRIBUTE(fops_fpuemu_stat, fpuemu_stat_get, NULL, "%llu\n"); 1977 - 1978 - extern struct dentry *mips_debugfs_dir; 1979 - static int __init debugfs_fpuemu(void) 1980 - { 1981 - struct dentry *d, *dir; 1982 - 1983 - if (!mips_debugfs_dir) 1984 - return -ENODEV; 1985 - dir = debugfs_create_dir("fpuemustats", mips_debugfs_dir); 1986 - if (!dir) 1987 - return -ENOMEM; 1988 - 1989 - #define FPU_STAT_CREATE(M) \ 1990 - do { \ 1991 - d = debugfs_create_file(#M , S_IRUGO, dir, \ 1992 - (void *)offsetof(struct mips_fpu_emulator_stats, M), \ 1993 - &fops_fpuemu_stat); \ 1994 - if (!d) \ 1995 - return -ENOMEM; \ 1996 - } while (0) 1997 - 1998 - FPU_STAT_CREATE(emulated); 1999 - FPU_STAT_CREATE(loads); 2000 - FPU_STAT_CREATE(stores); 2001 - FPU_STAT_CREATE(cp1ops); 2002 - FPU_STAT_CREATE(cp1xops); 2003 - FPU_STAT_CREATE(errors); 2004 - 2005 - return 0; 2006 - } 2007 - __initcall(debugfs_fpuemu); 2008 - #endif
+36 -35
arch/mips/math-emu/dp_add.c
··· 5 5 * MIPS floating point support 6 6 * Copyright (C) 1994-2000 Algorithmics Ltd. 7 7 * 8 - * ######################################################################## 9 - * 10 8 * This program is free software; you can distribute it and/or modify it 11 9 * under the terms of the GNU General Public License (Version 2) as 12 10 * published by the Free Software Foundation. ··· 16 18 * 17 19 * You should have received a copy of the GNU General Public License along 18 20 * with this program; if not, write to the Free Software Foundation, Inc., 19 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 20 - * 21 - * ######################################################################## 22 - * 21 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 23 22 */ 24 - 25 23 26 24 #include "ieee754dp.h" 27 25 28 - ieee754dp ieee754dp_add(ieee754dp x, ieee754dp y) 26 + union ieee754dp ieee754dp_add(union ieee754dp x, union ieee754dp y) 29 27 { 28 + int s; 29 + 30 30 COMPXDP; 31 31 COMPYDP; 32 32 33 33 EXPLODEXDP; 34 34 EXPLODEYDP; 35 35 36 - CLEARCX; 36 + ieee754_clearcx(); 37 37 38 38 FLUSHXDP; 39 39 FLUSHYDP; ··· 48 52 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): 49 53 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): 50 54 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): 51 - SETCX(IEEE754_INVALID_OPERATION); 52 - return ieee754dp_nanxcpt(ieee754dp_indef(), "add", x, y); 55 + ieee754_setcx(IEEE754_INVALID_OPERATION); 56 + return ieee754dp_nanxcpt(ieee754dp_indef()); 53 57 54 58 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): 55 59 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): ··· 65 69 return x; 66 70 67 71 68 - /* Infinity handling 69 - */ 70 - 72 + /* 73 + * Infinity handling 74 + */ 71 75 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): 72 76 if (xs == ys) 73 77 return x; 74 - SETCX(IEEE754_INVALID_OPERATION); 75 - return ieee754dp_xcpt(ieee754dp_indef(), "add", x, y); 78 + ieee754_setcx(IEEE754_INVALID_OPERATION); 79 + return ieee754dp_indef(); 76 80 77 81 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): 78 82 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): ··· 84 88 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): 85 89 return x; 86 90 87 - /* Zero handling 88 - */ 89 - 91 + /* 92 + * Zero handling 93 + */ 90 94 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): 91 95 if (xs == ys) 92 96 return x; 93 97 else 94 - return ieee754dp_zero(ieee754_csr.rm == 95 - IEEE754_RD); 98 + return ieee754dp_zero(ieee754_csr.rm == FPU_CSR_RD); 96 99 97 100 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO): 98 101 case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): ··· 120 125 assert(xm & DP_HIDDEN_BIT); 121 126 assert(ym & DP_HIDDEN_BIT); 122 127 123 - /* provide guard,round and stick bit space */ 128 + /* 129 + * Provide guard,round and stick bit space. 130 + */ 124 131 xm <<= 3; 125 132 ym <<= 3; 126 133 127 134 if (xe > ye) { 128 - /* have to shift y fraction right to align 135 + /* 136 + * Have to shift y fraction right to align. 129 137 */ 130 - int s = xe - ye; 138 + s = xe - ye; 131 139 ym = XDPSRS(ym, s); 132 140 ye += s; 133 141 } else if (ye > xe) { 134 - /* have to shift x fraction right to align 142 + /* 143 + * Have to shift x fraction right to align. 135 144 */ 136 - int s = ye - xe; 145 + s = ye - xe; 137 146 xm = XDPSRS(xm, s); 138 147 xe += s; 139 148 } ··· 145 146 assert(xe <= DP_EMAX); 146 147 147 148 if (xs == ys) { 148 - /* generate 28 bit result of adding two 27 bit numbers 149 - * leaving result in xm,xs,xe 149 + /* 150 + * Generate 28 bit result of adding two 27 bit numbers 151 + * leaving result in xm, xs and xe. 150 152 */ 151 153 xm = xm + ym; 152 154 xe = xe; 153 155 xs = xs; 154 156 155 - if (xm >> (DP_MBITS + 1 + 3)) { /* carry out */ 157 + if (xm >> (DP_FBITS + 1 + 3)) { /* carry out */ 156 158 xm = XDPSRS1(xm); 157 159 xe++; 158 160 } ··· 168 168 xs = ys; 169 169 } 170 170 if (xm == 0) 171 - return ieee754dp_zero(ieee754_csr.rm == 172 - IEEE754_RD); 171 + return ieee754dp_zero(ieee754_csr.rm == FPU_CSR_RD); 173 172 174 - /* normalize to rounding precision */ 175 - while ((xm >> (DP_MBITS + 3)) == 0) { 173 + /* 174 + * Normalize to rounding precision. 175 + */ 176 + while ((xm >> (DP_FBITS + 3)) == 0) { 176 177 xm <<= 1; 177 178 xe--; 178 179 } 179 - 180 180 } 181 - DPNORMRET2(xs, xe, xm, "add", x, y); 181 + 182 + return ieee754dp_format(xs, xe, xm); 182 183 }
+11 -13
arch/mips/math-emu/dp_cmp.c
··· 5 5 * MIPS floating point support 6 6 * Copyright (C) 1994-2000 Algorithmics Ltd. 7 7 * 8 - * ######################################################################## 9 - * 10 8 * This program is free software; you can distribute it and/or modify it 11 9 * under the terms of the GNU General Public License (Version 2) as 12 10 * published by the Free Software Foundation. ··· 16 18 * 17 19 * You should have received a copy of the GNU General Public License along 18 20 * with this program; if not, write to the Free Software Foundation, Inc., 19 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 20 - * 21 - * ######################################################################## 21 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 22 22 */ 23 - 24 23 25 24 #include "ieee754dp.h" 26 25 27 - int ieee754dp_cmp(ieee754dp x, ieee754dp y, int cmp, int sig) 26 + int ieee754dp_cmp(union ieee754dp x, union ieee754dp y, int cmp, int sig) 28 27 { 28 + s64 vx; 29 + s64 vy; 30 + 29 31 COMPXDP; 30 32 COMPYDP; 31 33 ··· 33 35 EXPLODEYDP; 34 36 FLUSHXDP; 35 37 FLUSHYDP; 36 - CLEARCX; /* Even clear inexact flag here */ 38 + ieee754_clearcx(); /* Even clear inexact flag here */ 37 39 38 40 if (ieee754dp_isnan(x) || ieee754dp_isnan(y)) { 39 41 if (sig || xc == IEEE754_CLASS_SNAN || yc == IEEE754_CLASS_SNAN) 40 - SETCX(IEEE754_INVALID_OPERATION); 42 + ieee754_setcx(IEEE754_INVALID_OPERATION); 41 43 if (cmp & IEEE754_CUN) 42 44 return 1; 43 45 if (cmp & (IEEE754_CLT | IEEE754_CGT)) { 44 - if (sig && SETANDTESTCX(IEEE754_INVALID_OPERATION)) 45 - return ieee754si_xcpt(0, "fcmpf", x); 46 + if (sig && ieee754_setandtestcx(IEEE754_INVALID_OPERATION)) 47 + return 0; 46 48 } 47 49 return 0; 48 50 } else { 49 - s64 vx = x.bits; 50 - s64 vy = y.bits; 51 + vx = x.bits; 52 + vy = y.bits; 51 53 52 54 if (vx < 0) 53 55 vx = -vx ^ DP_SIGN_BIT;
+47 -49
arch/mips/math-emu/dp_div.c
··· 5 5 * MIPS floating point support 6 6 * Copyright (C) 1994-2000 Algorithmics Ltd. 7 7 * 8 - * ######################################################################## 9 - * 10 8 * This program is free software; you can distribute it and/or modify it 11 9 * under the terms of the GNU General Public License (Version 2) as 12 10 * published by the Free Software Foundation. ··· 16 18 * 17 19 * You should have received a copy of the GNU General Public License along 18 20 * with this program; if not, write to the Free Software Foundation, Inc., 19 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 20 - * 21 - * ######################################################################## 21 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 22 22 */ 23 - 24 23 25 24 #include "ieee754dp.h" 26 25 27 - ieee754dp ieee754dp_div(ieee754dp x, ieee754dp y) 26 + union ieee754dp ieee754dp_div(union ieee754dp x, union ieee754dp y) 28 27 { 28 + u64 rm; 29 + int re; 30 + u64 bm; 31 + 29 32 COMPXDP; 30 33 COMPYDP; 31 34 32 35 EXPLODEXDP; 33 36 EXPLODEYDP; 34 37 35 - CLEARCX; 38 + ieee754_clearcx(); 36 39 37 40 FLUSHXDP; 38 41 FLUSHYDP; ··· 50 51 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): 51 52 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): 52 53 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): 53 - SETCX(IEEE754_INVALID_OPERATION); 54 - return ieee754dp_nanxcpt(ieee754dp_indef(), "div", x, y); 54 + ieee754_setcx(IEEE754_INVALID_OPERATION); 55 + return ieee754dp_nanxcpt(ieee754dp_indef()); 55 56 56 57 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): 57 58 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): ··· 67 68 return x; 68 69 69 70 70 - /* Infinity handling 71 - */ 72 - 71 + /* 72 + * Infinity handling 73 + */ 73 74 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): 74 - SETCX(IEEE754_INVALID_OPERATION); 75 - return ieee754dp_xcpt(ieee754dp_indef(), "div", x, y); 75 + ieee754_setcx(IEEE754_INVALID_OPERATION); 76 + return ieee754dp_indef(); 76 77 77 78 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): 78 79 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): ··· 84 85 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): 85 86 return ieee754dp_inf(xs ^ ys); 86 87 87 - /* Zero handling 88 - */ 89 - 88 + /* 89 + * Zero handling 90 + */ 90 91 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): 91 - SETCX(IEEE754_INVALID_OPERATION); 92 - return ieee754dp_xcpt(ieee754dp_indef(), "div", x, y); 92 + ieee754_setcx(IEEE754_INVALID_OPERATION); 93 + return ieee754dp_indef(); 93 94 94 95 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO): 95 96 case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): 96 - SETCX(IEEE754_ZERO_DIVIDE); 97 - return ieee754dp_xcpt(ieee754dp_inf(xs ^ ys), "div", x, y); 97 + ieee754_setcx(IEEE754_ZERO_DIVIDE); 98 + return ieee754dp_inf(xs ^ ys); 98 99 99 100 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM): 100 101 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM): ··· 121 122 xm <<= 3; 122 123 ym <<= 3; 123 124 124 - { 125 - /* now the dirty work */ 125 + /* now the dirty work */ 126 126 127 - u64 rm = 0; 128 - int re = xe - ye; 129 - u64 bm; 127 + rm = 0; 128 + re = xe - ye; 130 129 131 - for (bm = DP_MBIT(DP_MBITS + 2); bm; bm >>= 1) { 132 - if (xm >= ym) { 133 - xm -= ym; 134 - rm |= bm; 135 - if (xm == 0) 136 - break; 137 - } 138 - xm <<= 1; 130 + for (bm = DP_MBIT(DP_FBITS + 2); bm; bm >>= 1) { 131 + if (xm >= ym) { 132 + xm -= ym; 133 + rm |= bm; 134 + if (xm == 0) 135 + break; 139 136 } 140 - rm <<= 1; 141 - if (xm) 142 - rm |= 1; /* have remainder, set sticky */ 143 - 144 - assert(rm); 145 - 146 - /* normalise rm to rounding precision ? 147 - */ 148 - while ((rm >> (DP_MBITS + 3)) == 0) { 149 - rm <<= 1; 150 - re--; 151 - } 152 - 153 - DPNORMRET2(xs == ys ? 0 : 1, re, rm, "div", x, y); 137 + xm <<= 1; 154 138 } 139 + 140 + rm <<= 1; 141 + if (xm) 142 + rm |= 1; /* have remainder, set sticky */ 143 + 144 + assert(rm); 145 + 146 + /* 147 + * Normalise rm to rounding precision ? 148 + */ 149 + while ((rm >> (DP_FBITS + 3)) == 0) { 150 + rm <<= 1; 151 + re--; 152 + } 153 + 154 + return ieee754dp_format(xs == ys ? 0 : 1, re, rm); 155 155 }
+5 -28
arch/mips/math-emu/dp_fint.c
··· 5 5 * MIPS floating point support 6 6 * Copyright (C) 1994-2000 Algorithmics Ltd. 7 7 * 8 - * ######################################################################## 9 - * 10 8 * This program is free software; you can distribute it and/or modify it 11 9 * under the terms of the GNU General Public License (Version 2) as 12 10 * published by the Free Software Foundation. ··· 16 18 * 17 19 * You should have received a copy of the GNU General Public License along 18 20 * with this program; if not, write to the Free Software Foundation, Inc., 19 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 20 - * 21 - * ######################################################################## 21 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 22 22 */ 23 - 24 23 25 24 #include "ieee754dp.h" 26 25 27 - ieee754dp ieee754dp_fint(int x) 26 + union ieee754dp ieee754dp_fint(int x) 28 27 { 29 28 u64 xm; 30 29 int xe; 31 30 int xs; 32 31 33 - CLEARCX; 32 + ieee754_clearcx(); 34 33 35 34 if (x == 0) 36 35 return ieee754dp_zero(0); ··· 46 51 xm = x; 47 52 } 48 53 49 - #if 1 50 54 /* normalize - result can never be inexact or overflow */ 51 - xe = DP_MBITS; 52 - while ((xm >> DP_MBITS) == 0) { 55 + xe = DP_FBITS; 56 + while ((xm >> DP_FBITS) == 0) { 53 57 xm <<= 1; 54 58 xe--; 55 59 } 56 60 return builddp(xs, xe + DP_EBIAS, xm & ~DP_HIDDEN_BIT); 57 - #else 58 - /* normalize */ 59 - xe = DP_MBITS + 3; 60 - while ((xm >> (DP_MBITS + 3)) == 0) { 61 - xm <<= 1; 62 - xe--; 63 - } 64 - DPNORMRET1(xs, xe, xm, "fint", x); 65 - #endif 66 - } 67 - 68 - ieee754dp ieee754dp_funs(unsigned int u) 69 - { 70 - if ((int) u < 0) 71 - return ieee754dp_add(ieee754dp_1e31(), 72 - ieee754dp_fint(u & ~(1 << 31))); 73 - return ieee754dp_fint(u); 74 61 }
+8 -20
arch/mips/math-emu/dp_flong.c
··· 5 5 * MIPS floating point support 6 6 * Copyright (C) 1994-2000 Algorithmics Ltd. 7 7 * 8 - * ######################################################################## 9 - * 10 8 * This program is free software; you can distribute it and/or modify it 11 9 * under the terms of the GNU General Public License (Version 2) as 12 10 * published by the Free Software Foundation. ··· 16 18 * 17 19 * You should have received a copy of the GNU General Public License along 18 20 * with this program; if not, write to the Free Software Foundation, Inc., 19 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 20 - * 21 - * ######################################################################## 21 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 22 22 */ 23 - 24 23 25 24 #include "ieee754dp.h" 26 25 27 - ieee754dp ieee754dp_flong(s64 x) 26 + union ieee754dp ieee754dp_flong(s64 x) 28 27 { 29 28 u64 xm; 30 29 int xe; 31 30 int xs; 32 31 33 - CLEARCX; 32 + ieee754_clearcx(); 34 33 35 34 if (x == 0) 36 35 return ieee754dp_zero(0); ··· 47 52 } 48 53 49 54 /* normalize */ 50 - xe = DP_MBITS + 3; 51 - if (xm >> (DP_MBITS + 1 + 3)) { 55 + xe = DP_FBITS + 3; 56 + if (xm >> (DP_FBITS + 1 + 3)) { 52 57 /* shunt out overflow bits */ 53 - while (xm >> (DP_MBITS + 1 + 3)) { 58 + while (xm >> (DP_FBITS + 1 + 3)) { 54 59 XDPSRSX1(); 55 60 } 56 61 } else { 57 62 /* normalize in grs extended double precision */ 58 - while ((xm >> (DP_MBITS + 3)) == 0) { 63 + while ((xm >> (DP_FBITS + 3)) == 0) { 59 64 xm <<= 1; 60 65 xe--; 61 66 } 62 67 } 63 - DPNORMRET1(xs, xe, xm, "dp_flong", x); 64 - } 65 68 66 - ieee754dp ieee754dp_fulong(u64 u) 67 - { 68 - if ((s64) u < 0) 69 - return ieee754dp_add(ieee754dp_1e63(), 70 - ieee754dp_flong(u & ~(1ULL << 63))); 71 - return ieee754dp_flong(u); 69 + return ieee754dp_format(xs, xe, xm); 72 70 }
-52
arch/mips/math-emu/dp_frexp.c
··· 1 - /* IEEE754 floating point arithmetic 2 - * double precision: common utilities 3 - */ 4 - /* 5 - * MIPS floating point support 6 - * Copyright (C) 1994-2000 Algorithmics Ltd. 7 - * 8 - * ######################################################################## 9 - * 10 - * This program is free software; you can distribute it and/or modify it 11 - * under the terms of the GNU General Public License (Version 2) as 12 - * published by the Free Software Foundation. 13 - * 14 - * This program is distributed in the hope it will be useful, but WITHOUT 15 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 16 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 17 - * for more details. 18 - * 19 - * You should have received a copy of the GNU General Public License along 20 - * with this program; if not, write to the Free Software Foundation, Inc., 21 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 22 - * 23 - * ######################################################################## 24 - */ 25 - 26 - 27 - #include "ieee754dp.h" 28 - 29 - /* close to ieeep754dp_logb 30 - */ 31 - ieee754dp ieee754dp_frexp(ieee754dp x, int *eptr) 32 - { 33 - COMPXDP; 34 - CLEARCX; 35 - EXPLODEXDP; 36 - 37 - switch (xc) { 38 - case IEEE754_CLASS_SNAN: 39 - case IEEE754_CLASS_QNAN: 40 - case IEEE754_CLASS_INF: 41 - case IEEE754_CLASS_ZERO: 42 - *eptr = 0; 43 - return x; 44 - case IEEE754_CLASS_DNORM: 45 - DPDNORMX; 46 - break; 47 - case IEEE754_CLASS_NORM: 48 - break; 49 - } 50 - *eptr = xe + 1; 51 - return builddp(xs, -1 + DP_EBIAS, xm & ~DP_HIDDEN_BIT); 52 - }
+16 -16
arch/mips/math-emu/dp_fsp.c
··· 5 5 * MIPS floating point support 6 6 * Copyright (C) 1994-2000 Algorithmics Ltd. 7 7 * 8 - * ######################################################################## 9 - * 10 8 * This program is free software; you can distribute it and/or modify it 11 9 * under the terms of the GNU General Public License (Version 2) as 12 10 * published by the Free Software Foundation. ··· 16 18 * 17 19 * You should have received a copy of the GNU General Public License along 18 20 * with this program; if not, write to the Free Software Foundation, Inc., 19 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 20 - * 21 - * ######################################################################## 21 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 22 22 */ 23 23 24 - 24 + #include "ieee754sp.h" 25 25 #include "ieee754dp.h" 26 26 27 - ieee754dp ieee754dp_fsp(ieee754sp x) 27 + union ieee754dp ieee754dp_fsp(union ieee754sp x) 28 28 { 29 29 COMPXSP; 30 30 31 31 EXPLODEXSP; 32 32 33 - CLEARCX; 33 + ieee754_clearcx(); 34 34 35 35 FLUSHXSP; 36 36 37 37 switch (xc) { 38 38 case IEEE754_CLASS_SNAN: 39 - SETCX(IEEE754_INVALID_OPERATION); 40 - return ieee754dp_nanxcpt(ieee754dp_indef(), "fsp"); 39 + ieee754_setcx(IEEE754_INVALID_OPERATION); 40 + return ieee754dp_nanxcpt(ieee754dp_indef()); 41 + 41 42 case IEEE754_CLASS_QNAN: 42 43 return ieee754dp_nanxcpt(builddp(xs, 43 44 DP_EMAX + 1 + DP_EBIAS, 44 45 ((u64) xm 45 - << (DP_MBITS - 46 - SP_MBITS))), "fsp", 47 - x); 46 + << (DP_FBITS - 47 + SP_FBITS)))); 48 48 case IEEE754_CLASS_INF: 49 49 return ieee754dp_inf(xs); 50 + 50 51 case IEEE754_CLASS_ZERO: 51 52 return ieee754dp_zero(xs); 53 + 52 54 case IEEE754_CLASS_DNORM: 53 55 /* normalize */ 54 - while ((xm >> SP_MBITS) == 0) { 56 + while ((xm >> SP_FBITS) == 0) { 55 57 xm <<= 1; 56 58 xe--; 57 59 } 58 60 break; 61 + 59 62 case IEEE754_CLASS_NORM: 60 63 break; 61 64 } 62 65 63 - /* CAN'T possibly overflow,underflow, or need rounding 66 + /* 67 + * Can't possibly overflow,underflow, or need rounding 64 68 */ 65 69 66 70 /* drop the hidden bit */ 67 71 xm &= ~SP_HIDDEN_BIT; 68 72 69 73 return builddp(xs, xe + DP_EBIAS, 70 - (u64) xm << (DP_MBITS - SP_MBITS)); 74 + (u64) xm << (DP_FBITS - SP_FBITS)); 71 75 }
-53
arch/mips/math-emu/dp_logb.c
··· 1 - /* IEEE754 floating point arithmetic 2 - * double precision: common utilities 3 - */ 4 - /* 5 - * MIPS floating point support 6 - * Copyright (C) 1994-2000 Algorithmics Ltd. 7 - * 8 - * ######################################################################## 9 - * 10 - * This program is free software; you can distribute it and/or modify it 11 - * under the terms of the GNU General Public License (Version 2) as 12 - * published by the Free Software Foundation. 13 - * 14 - * This program is distributed in the hope it will be useful, but WITHOUT 15 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 16 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 17 - * for more details. 18 - * 19 - * You should have received a copy of the GNU General Public License along 20 - * with this program; if not, write to the Free Software Foundation, Inc., 21 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 22 - * 23 - * ######################################################################## 24 - */ 25 - 26 - 27 - #include "ieee754dp.h" 28 - 29 - ieee754dp ieee754dp_logb(ieee754dp x) 30 - { 31 - COMPXDP; 32 - 33 - CLEARCX; 34 - 35 - EXPLODEXDP; 36 - 37 - switch (xc) { 38 - case IEEE754_CLASS_SNAN: 39 - return ieee754dp_nanxcpt(x, "logb", x); 40 - case IEEE754_CLASS_QNAN: 41 - return x; 42 - case IEEE754_CLASS_INF: 43 - return ieee754dp_inf(0); 44 - case IEEE754_CLASS_ZERO: 45 - return ieee754dp_inf(1); 46 - case IEEE754_CLASS_DNORM: 47 - DPDNORMX; 48 - break; 49 - case IEEE754_CLASS_NORM: 50 - break; 51 - } 52 - return ieee754dp_fint(xe); 53 - }
-79
arch/mips/math-emu/dp_modf.c
··· 1 - /* IEEE754 floating point arithmetic 2 - * double precision: common utilities 3 - */ 4 - /* 5 - * MIPS floating point support 6 - * Copyright (C) 1994-2000 Algorithmics Ltd. 7 - * 8 - * ######################################################################## 9 - * 10 - * This program is free software; you can distribute it and/or modify it 11 - * under the terms of the GNU General Public License (Version 2) as 12 - * published by the Free Software Foundation. 13 - * 14 - * This program is distributed in the hope it will be useful, but WITHOUT 15 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 16 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 17 - * for more details. 18 - * 19 - * You should have received a copy of the GNU General Public License along 20 - * with this program; if not, write to the Free Software Foundation, Inc., 21 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 22 - * 23 - * ######################################################################## 24 - */ 25 - 26 - 27 - #include "ieee754dp.h" 28 - 29 - /* modf function is always exact for a finite number 30 - */ 31 - ieee754dp ieee754dp_modf(ieee754dp x, ieee754dp *ip) 32 - { 33 - COMPXDP; 34 - 35 - CLEARCX; 36 - 37 - EXPLODEXDP; 38 - 39 - switch (xc) { 40 - case IEEE754_CLASS_SNAN: 41 - case IEEE754_CLASS_QNAN: 42 - case IEEE754_CLASS_INF: 43 - case IEEE754_CLASS_ZERO: 44 - *ip = x; 45 - return x; 46 - case IEEE754_CLASS_DNORM: 47 - /* far to small */ 48 - *ip = ieee754dp_zero(xs); 49 - return x; 50 - case IEEE754_CLASS_NORM: 51 - break; 52 - } 53 - if (xe < 0) { 54 - *ip = ieee754dp_zero(xs); 55 - return x; 56 - } 57 - if (xe >= DP_MBITS) { 58 - *ip = x; 59 - return ieee754dp_zero(xs); 60 - } 61 - /* generate ipart mantissa by clearing bottom bits 62 - */ 63 - *ip = builddp(xs, xe + DP_EBIAS, 64 - ((xm >> (DP_MBITS - xe)) << (DP_MBITS - xe)) & 65 - ~DP_HIDDEN_BIT); 66 - 67 - /* generate fpart mantissa by clearing top bits 68 - * and normalizing (must be able to normalize) 69 - */ 70 - xm = (xm << (64 - (DP_MBITS - xe))) >> (64 - (DP_MBITS - xe)); 71 - if (xm == 0) 72 - return ieee754dp_zero(xs); 73 - 74 - while ((xm >> DP_MBITS) == 0) { 75 - xm <<= 1; 76 - xe--; 77 - } 78 - return builddp(xs, xe + DP_EBIAS, xm & ~DP_HIDDEN_BIT); 79 - }
+66 -69
arch/mips/math-emu/dp_mul.c
··· 5 5 * MIPS floating point support 6 6 * Copyright (C) 1994-2000 Algorithmics Ltd. 7 7 * 8 - * ######################################################################## 9 - * 10 8 * This program is free software; you can distribute it and/or modify it 11 9 * under the terms of the GNU General Public License (Version 2) as 12 10 * published by the Free Software Foundation. ··· 16 18 * 17 19 * You should have received a copy of the GNU General Public License along 18 20 * with this program; if not, write to the Free Software Foundation, Inc., 19 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 20 - * 21 - * ######################################################################## 21 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 22 22 */ 23 - 24 23 25 24 #include "ieee754dp.h" 26 25 27 - ieee754dp ieee754dp_mul(ieee754dp x, ieee754dp y) 26 + union ieee754dp ieee754dp_mul(union ieee754dp x, union ieee754dp y) 28 27 { 28 + int re; 29 + int rs; 30 + u64 rm; 31 + unsigned lxm; 32 + unsigned hxm; 33 + unsigned lym; 34 + unsigned hym; 35 + u64 lrm; 36 + u64 hrm; 37 + u64 t; 38 + u64 at; 39 + 29 40 COMPXDP; 30 41 COMPYDP; 31 42 32 43 EXPLODEXDP; 33 44 EXPLODEYDP; 34 45 35 - CLEARCX; 46 + ieee754_clearcx(); 36 47 37 48 FLUSHXDP; 38 49 FLUSHYDP; ··· 58 51 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): 59 52 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): 60 53 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): 61 - SETCX(IEEE754_INVALID_OPERATION); 62 - return ieee754dp_nanxcpt(ieee754dp_indef(), "mul", x, y); 54 + ieee754_setcx(IEEE754_INVALID_OPERATION); 55 + return ieee754dp_nanxcpt(ieee754dp_indef()); 63 56 64 57 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): 65 58 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): ··· 75 68 return x; 76 69 77 70 78 - /* Infinity handling */ 79 - 71 + /* 72 + * Infinity handling 73 + */ 80 74 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): 81 75 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): 82 - SETCX(IEEE754_INVALID_OPERATION); 83 - return ieee754dp_xcpt(ieee754dp_indef(), "mul", x, y); 76 + ieee754_setcx(IEEE754_INVALID_OPERATION); 77 + return ieee754dp_indef(); 84 78 85 79 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): 86 80 case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF): ··· 115 107 /* rm = xm * ym, re = xe+ye basically */ 116 108 assert(xm & DP_HIDDEN_BIT); 117 109 assert(ym & DP_HIDDEN_BIT); 118 - { 119 - int re = xe + ye; 120 - int rs = xs ^ ys; 121 - u64 rm; 122 110 123 - /* shunt to top of word */ 124 - xm <<= 64 - (DP_MBITS + 1); 125 - ym <<= 64 - (DP_MBITS + 1); 111 + re = xe + ye; 112 + rs = xs ^ ys; 126 113 127 - /* multiply 32bits xm,ym to give high 32bits rm with stickness 128 - */ 114 + /* shunt to top of word */ 115 + xm <<= 64 - (DP_FBITS + 1); 116 + ym <<= 64 - (DP_FBITS + 1); 129 117 130 - /* 32 * 32 => 64 */ 118 + /* 119 + * Multiply 32 bits xm, ym to give high 32 bits rm with stickness. 120 + */ 121 + 122 + /* 32 * 32 => 64 */ 131 123 #define DPXMULT(x, y) ((u64)(x) * (u64)y) 132 124 133 - { 134 - unsigned lxm = xm; 135 - unsigned hxm = xm >> 32; 136 - unsigned lym = ym; 137 - unsigned hym = ym >> 32; 138 - u64 lrm; 139 - u64 hrm; 125 + lxm = xm; 126 + hxm = xm >> 32; 127 + lym = ym; 128 + hym = ym >> 32; 140 129 141 - lrm = DPXMULT(lxm, lym); 142 - hrm = DPXMULT(hxm, hym); 130 + lrm = DPXMULT(lxm, lym); 131 + hrm = DPXMULT(hxm, hym); 143 132 144 - { 145 - u64 t = DPXMULT(lxm, hym); 146 - { 147 - u64 at = 148 - lrm + (t << 32); 149 - hrm += at < lrm; 150 - lrm = at; 151 - } 152 - hrm = hrm + (t >> 32); 153 - } 133 + t = DPXMULT(lxm, hym); 154 134 155 - { 156 - u64 t = DPXMULT(hxm, lym); 157 - { 158 - u64 at = 159 - lrm + (t << 32); 160 - hrm += at < lrm; 161 - lrm = at; 162 - } 163 - hrm = hrm + (t >> 32); 164 - } 165 - rm = hrm | (lrm != 0); 166 - } 135 + at = lrm + (t << 32); 136 + hrm += at < lrm; 137 + lrm = at; 167 138 168 - /* 169 - * sticky shift down to normal rounding precision 170 - */ 171 - if ((s64) rm < 0) { 172 - rm = 173 - (rm >> (64 - (DP_MBITS + 1 + 3))) | 174 - ((rm << (DP_MBITS + 1 + 3)) != 0); 139 + hrm = hrm + (t >> 32); 140 + 141 + t = DPXMULT(hxm, lym); 142 + 143 + at = lrm + (t << 32); 144 + hrm += at < lrm; 145 + lrm = at; 146 + 147 + hrm = hrm + (t >> 32); 148 + 149 + rm = hrm | (lrm != 0); 150 + 151 + /* 152 + * Sticky shift down to normal rounding precision. 153 + */ 154 + if ((s64) rm < 0) { 155 + rm = (rm >> (64 - (DP_FBITS + 1 + 3))) | 156 + ((rm << (DP_FBITS + 1 + 3)) != 0); 175 157 re++; 176 - } else { 177 - rm = 178 - (rm >> (64 - (DP_MBITS + 1 + 3 + 1))) | 179 - ((rm << (DP_MBITS + 1 + 3 + 1)) != 0); 180 - } 181 - assert(rm & (DP_HIDDEN_BIT << 3)); 182 - DPNORMRET2(rs, re, rm, "mul", x, y); 158 + } else { 159 + rm = (rm >> (64 - (DP_FBITS + 1 + 3 + 1))) | 160 + ((rm << (DP_FBITS + 1 + 3 + 1)) != 0); 183 161 } 162 + assert(rm & (DP_HIDDEN_BIT << 3)); 163 + 164 + return ieee754dp_format(rs, re, rm); 184 165 }
-57
arch/mips/math-emu/dp_scalb.c
··· 1 - /* IEEE754 floating point arithmetic 2 - * double precision: common utilities 3 - */ 4 - /* 5 - * MIPS floating point support 6 - * Copyright (C) 1994-2000 Algorithmics Ltd. 7 - * 8 - * ######################################################################## 9 - * 10 - * This program is free software; you can distribute it and/or modify it 11 - * under the terms of the GNU General Public License (Version 2) as 12 - * published by the Free Software Foundation. 13 - * 14 - * This program is distributed in the hope it will be useful, but WITHOUT 15 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 16 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 17 - * for more details. 18 - * 19 - * You should have received a copy of the GNU General Public License along 20 - * with this program; if not, write to the Free Software Foundation, Inc., 21 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 22 - * 23 - * ######################################################################## 24 - */ 25 - 26 - 27 - #include "ieee754dp.h" 28 - 29 - ieee754dp ieee754dp_scalb(ieee754dp x, int n) 30 - { 31 - COMPXDP; 32 - 33 - CLEARCX; 34 - 35 - EXPLODEXDP; 36 - 37 - switch (xc) { 38 - case IEEE754_CLASS_SNAN: 39 - return ieee754dp_nanxcpt(x, "scalb", x, n); 40 - case IEEE754_CLASS_QNAN: 41 - case IEEE754_CLASS_INF: 42 - case IEEE754_CLASS_ZERO: 43 - return x; 44 - case IEEE754_CLASS_DNORM: 45 - DPDNORMX; 46 - break; 47 - case IEEE754_CLASS_NORM: 48 - break; 49 - } 50 - DPNORMRET2(xs, xe + n, xm << 3, "scalb", x, n); 51 - } 52 - 53 - 54 - ieee754dp ieee754dp_ldexp(ieee754dp x, int n) 55 - { 56 - return ieee754dp_scalb(x, n); 57 - }
+10 -29
arch/mips/math-emu/dp_simple.c
··· 5 5 * MIPS floating point support 6 6 * Copyright (C) 1994-2000 Algorithmics Ltd. 7 7 * 8 - * ######################################################################## 9 - * 10 8 * This program is free software; you can distribute it and/or modify it 11 9 * under the terms of the GNU General Public License (Version 2) as 12 10 * published by the Free Software Foundation. ··· 16 18 * 17 19 * You should have received a copy of the GNU General Public License along 18 20 * with this program; if not, write to the Free Software Foundation, Inc., 19 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 20 - * 21 - * ######################################################################## 21 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 22 22 */ 23 - 24 23 25 24 #include "ieee754dp.h" 26 25 27 - int ieee754dp_finite(ieee754dp x) 28 - { 29 - return DPBEXP(x) != DP_EMAX + 1 + DP_EBIAS; 30 - } 31 - 32 - ieee754dp ieee754dp_copysign(ieee754dp x, ieee754dp y) 33 - { 34 - CLEARCX; 35 - DPSIGN(x) = DPSIGN(y); 36 - return x; 37 - } 38 - 39 - 40 - ieee754dp ieee754dp_neg(ieee754dp x) 26 + union ieee754dp ieee754dp_neg(union ieee754dp x) 41 27 { 42 28 COMPXDP; 43 29 44 30 EXPLODEXDP; 45 - CLEARCX; 31 + ieee754_clearcx(); 46 32 FLUSHXDP; 47 33 48 34 /* ··· 37 55 DPSIGN(x) ^= 1; 38 56 39 57 if (xc == IEEE754_CLASS_SNAN) { 40 - ieee754dp y = ieee754dp_indef(); 41 - SETCX(IEEE754_INVALID_OPERATION); 58 + union ieee754dp y = ieee754dp_indef(); 59 + ieee754_setcx(IEEE754_INVALID_OPERATION); 42 60 DPSIGN(y) = DPSIGN(x); 43 - return ieee754dp_nanxcpt(y, "neg"); 61 + return ieee754dp_nanxcpt(y); 44 62 } 45 63 46 64 return x; 47 65 } 48 66 49 - 50 - ieee754dp ieee754dp_abs(ieee754dp x) 67 + union ieee754dp ieee754dp_abs(union ieee754dp x) 51 68 { 52 69 COMPXDP; 53 70 54 71 EXPLODEXDP; 55 - CLEARCX; 72 + ieee754_clearcx(); 56 73 FLUSHXDP; 57 74 58 75 /* Clear sign ALWAYS, irrespective of NaN */ 59 76 DPSIGN(x) = 0; 60 77 61 78 if (xc == IEEE754_CLASS_SNAN) { 62 - SETCX(IEEE754_INVALID_OPERATION); 63 - return ieee754dp_nanxcpt(ieee754dp_indef(), "abs"); 79 + ieee754_setcx(IEEE754_INVALID_OPERATION); 80 + return ieee754dp_nanxcpt(ieee754dp_indef()); 64 81 } 65 82 66 83 return x;
+23 -23
arch/mips/math-emu/dp_sqrt.c
··· 5 5 * MIPS floating point support 6 6 * Copyright (C) 1994-2000 Algorithmics Ltd. 7 7 * 8 - * ######################################################################## 9 - * 10 8 * This program is free software; you can distribute it and/or modify it 11 9 * under the terms of the GNU General Public License (Version 2) as 12 10 * published by the Free Software Foundation. ··· 16 18 * 17 19 * You should have received a copy of the GNU General Public License along 18 20 * with this program; if not, write to the Free Software Foundation, Inc., 19 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 20 - * 21 - * ######################################################################## 21 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 22 22 */ 23 - 24 23 25 24 #include "ieee754dp.h" 26 25 ··· 29 34 1742, 661, 130 30 35 }; 31 36 32 - ieee754dp ieee754dp_sqrt(ieee754dp x) 37 + union ieee754dp ieee754dp_sqrt(union ieee754dp x) 33 38 { 34 39 struct _ieee754_csr oldcsr; 35 - ieee754dp y, z, t; 40 + union ieee754dp y, z, t; 36 41 unsigned scalx, yh; 37 42 COMPXDP; 38 43 39 44 EXPLODEXDP; 40 - CLEARCX; 45 + ieee754_clearcx(); 41 46 FLUSHXDP; 42 47 43 48 /* x == INF or NAN? */ 44 49 switch (xc) { 45 50 case IEEE754_CLASS_QNAN: 46 51 /* sqrt(Nan) = Nan */ 47 - return ieee754dp_nanxcpt(x, "sqrt"); 52 + return ieee754dp_nanxcpt(x); 53 + 48 54 case IEEE754_CLASS_SNAN: 49 - SETCX(IEEE754_INVALID_OPERATION); 50 - return ieee754dp_nanxcpt(ieee754dp_indef(), "sqrt"); 55 + ieee754_setcx(IEEE754_INVALID_OPERATION); 56 + return ieee754dp_nanxcpt(ieee754dp_indef()); 57 + 51 58 case IEEE754_CLASS_ZERO: 52 59 /* sqrt(0) = 0 */ 53 60 return x; 61 + 54 62 case IEEE754_CLASS_INF: 55 63 if (xs) { 56 64 /* sqrt(-Inf) = Nan */ 57 - SETCX(IEEE754_INVALID_OPERATION); 58 - return ieee754dp_nanxcpt(ieee754dp_indef(), "sqrt"); 65 + ieee754_setcx(IEEE754_INVALID_OPERATION); 66 + return ieee754dp_nanxcpt(ieee754dp_indef()); 59 67 } 60 68 /* sqrt(+Inf) = Inf */ 61 69 return x; 70 + 62 71 case IEEE754_CLASS_DNORM: 63 72 DPDNORMX; 64 73 /* fall through */ 74 + 65 75 case IEEE754_CLASS_NORM: 66 76 if (xs) { 67 77 /* sqrt(-x) = Nan */ 68 - SETCX(IEEE754_INVALID_OPERATION); 69 - return ieee754dp_nanxcpt(ieee754dp_indef(), "sqrt"); 78 + ieee754_setcx(IEEE754_INVALID_OPERATION); 79 + return ieee754dp_nanxcpt(ieee754dp_indef()); 70 80 } 71 81 break; 72 82 } ··· 80 80 oldcsr = ieee754_csr; 81 81 ieee754_csr.mx &= ~IEEE754_INEXACT; 82 82 ieee754_csr.sx &= ~IEEE754_INEXACT; 83 - ieee754_csr.rm = IEEE754_RN; 83 + ieee754_csr.rm = FPU_CSR_RN; 84 84 85 85 /* adjust exponent to prevent overflow */ 86 86 scalx = 0; ··· 110 110 /* triple to almost 56 sig. bits: y ~= sqrt(x) to within 1 ulp */ 111 111 /* t=y*y; z=t; pt[n0]+=0x00100000; t+=z; z=(x-z)*y; */ 112 112 z = t = ieee754dp_mul(y, y); 113 - t.parts.bexp += 0x001; 113 + t.bexp += 0x001; 114 114 t = ieee754dp_add(t, z); 115 115 z = ieee754dp_mul(ieee754dp_sub(x, z), y); 116 116 117 117 /* t=z/(t+x) ; pt[n0]+=0x00100000; y+=t; */ 118 118 t = ieee754dp_div(z, ieee754dp_add(t, x)); 119 - t.parts.bexp += 0x001; 119 + t.bexp += 0x001; 120 120 y = ieee754dp_add(y, t); 121 121 122 122 /* twiddle last bit to force y correctly rounded */ 123 123 124 124 /* set RZ, clear INEX flag */ 125 - ieee754_csr.rm = IEEE754_RZ; 125 + ieee754_csr.rm = FPU_CSR_RZ; 126 126 ieee754_csr.sx &= ~IEEE754_INEXACT; 127 127 128 128 /* t=x/y; ...chopped quotient, possibly inexact */ ··· 139 139 oldcsr.sx |= IEEE754_INEXACT; 140 140 141 141 switch (oldcsr.rm) { 142 - case IEEE754_RP: 142 + case FPU_CSR_RU: 143 143 y.bits += 1; 144 144 /* drop through */ 145 - case IEEE754_RN: 145 + case FPU_CSR_RN: 146 146 t.bits += 1; 147 147 break; 148 148 } ··· 155 155 } 156 156 157 157 /* py[n0]=py[n0]+scalx; ...scale back y */ 158 - y.parts.bexp += scalx; 158 + y.bexp += scalx; 159 159 160 160 /* restore rounding mode, possibly set inexact */ 161 161 ieee754_csr = oldcsr;
+27 -28
arch/mips/math-emu/dp_sub.c
··· 5 5 * MIPS floating point support 6 6 * Copyright (C) 1994-2000 Algorithmics Ltd. 7 7 * 8 - * ######################################################################## 9 - * 10 8 * This program is free software; you can distribute it and/or modify it 11 9 * under the terms of the GNU General Public License (Version 2) as 12 10 * published by the Free Software Foundation. ··· 16 18 * 17 19 * You should have received a copy of the GNU General Public License along 18 20 * with this program; if not, write to the Free Software Foundation, Inc., 19 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 20 - * 21 - * ######################################################################## 21 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 22 22 */ 23 - 24 23 25 24 #include "ieee754dp.h" 26 25 27 - ieee754dp ieee754dp_sub(ieee754dp x, ieee754dp y) 26 + union ieee754dp ieee754dp_sub(union ieee754dp x, union ieee754dp y) 28 27 { 28 + int s; 29 + 29 30 COMPXDP; 30 31 COMPYDP; 31 32 32 33 EXPLODEXDP; 33 34 EXPLODEYDP; 34 35 35 - CLEARCX; 36 + ieee754_clearcx(); 36 37 37 38 FLUSHXDP; 38 39 FLUSHYDP; ··· 48 51 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): 49 52 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): 50 53 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): 51 - SETCX(IEEE754_INVALID_OPERATION); 52 - return ieee754dp_nanxcpt(ieee754dp_indef(), "sub", x, y); 54 + ieee754_setcx(IEEE754_INVALID_OPERATION); 55 + return ieee754dp_nanxcpt(ieee754dp_indef()); 53 56 54 57 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): 55 58 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): ··· 65 68 return x; 66 69 67 70 68 - /* Infinity handling 69 - */ 70 - 71 + /* 72 + * Infinity handling 73 + */ 71 74 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): 72 75 if (xs != ys) 73 76 return x; 74 - SETCX(IEEE754_INVALID_OPERATION); 75 - return ieee754dp_xcpt(ieee754dp_indef(), "sub", x, y); 77 + ieee754_setcx(IEEE754_INVALID_OPERATION); 78 + return ieee754dp_indef(); 76 79 77 80 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): 78 81 case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF): ··· 84 87 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): 85 88 return x; 86 89 87 - /* Zero handling 88 - */ 89 - 90 + /* 91 + * Zero handling 92 + */ 90 93 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): 91 94 if (xs != ys) 92 95 return x; 93 96 else 94 - return ieee754dp_zero(ieee754_csr.rm == 95 - IEEE754_RD); 97 + return ieee754dp_zero(ieee754_csr.rm == FPU_CSR_RD); 96 98 97 99 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO): 98 100 case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): ··· 132 136 ym <<= 3; 133 137 134 138 if (xe > ye) { 135 - /* have to shift y fraction right to align 139 + /* 140 + * Have to shift y fraction right to align 136 141 */ 137 - int s = xe - ye; 142 + s = xe - ye; 138 143 ym = XDPSRS(ym, s); 139 144 ye += s; 140 145 } else if (ye > xe) { 141 - /* have to shift x fraction right to align 146 + /* 147 + * Have to shift x fraction right to align 142 148 */ 143 - int s = ye - xe; 149 + s = ye - xe; 144 150 xm = XDPSRS(xm, s); 145 151 xe += s; 146 152 } ··· 156 158 xe = xe; 157 159 xs = xs; 158 160 159 - if (xm >> (DP_MBITS + 1 + 3)) { /* carry out */ 161 + if (xm >> (DP_FBITS + 1 + 3)) { /* carry out */ 160 162 xm = XDPSRS1(xm); /* shift preserving sticky */ 161 163 xe++; 162 164 } ··· 171 173 xs = ys; 172 174 } 173 175 if (xm == 0) { 174 - if (ieee754_csr.rm == IEEE754_RD) 176 + if (ieee754_csr.rm == FPU_CSR_RD) 175 177 return ieee754dp_zero(1); /* round negative inf. => sign = -1 */ 176 178 else 177 179 return ieee754dp_zero(0); /* other round modes => sign = 1 */ ··· 179 181 180 182 /* normalize to rounding precision 181 183 */ 182 - while ((xm >> (DP_MBITS + 3)) == 0) { 184 + while ((xm >> (DP_FBITS + 3)) == 0) { 183 185 xm <<= 1; 184 186 xe--; 185 187 } 186 188 } 187 - DPNORMRET2(xs, xe, xm, "sub", x, y); 189 + 190 + return ieee754dp_format(xs, xe, xm); 188 191 }
+26 -43
arch/mips/math-emu/dp_tint.c
··· 5 5 * MIPS floating point support 6 6 * Copyright (C) 1994-2000 Algorithmics Ltd. 7 7 * 8 - * ######################################################################## 9 - * 10 8 * This program is free software; you can distribute it and/or modify it 11 9 * under the terms of the GNU General Public License (Version 2) as 12 10 * published by the Free Software Foundation. ··· 16 18 * 17 19 * You should have received a copy of the GNU General Public License along 18 20 * with this program; if not, write to the Free Software Foundation, Inc., 19 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 20 - * 21 - * ######################################################################## 21 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 22 22 */ 23 23 24 - 25 - #include <linux/kernel.h> 26 24 #include "ieee754dp.h" 27 25 28 - int ieee754dp_tint(ieee754dp x) 26 + int ieee754dp_tint(union ieee754dp x) 29 27 { 28 + u64 residue; 29 + int round; 30 + int sticky; 31 + int odd; 32 + 30 33 COMPXDP; 31 34 32 - CLEARCX; 35 + ieee754_clearcx(); 33 36 34 37 EXPLODEXDP; 35 38 FLUSHXDP; ··· 39 40 case IEEE754_CLASS_SNAN: 40 41 case IEEE754_CLASS_QNAN: 41 42 case IEEE754_CLASS_INF: 42 - SETCX(IEEE754_INVALID_OPERATION); 43 - return ieee754si_xcpt(ieee754si_indef(), "dp_tint", x); 43 + ieee754_setcx(IEEE754_INVALID_OPERATION); 44 + return ieee754si_indef(); 45 + 44 46 case IEEE754_CLASS_ZERO: 45 47 return 0; 48 + 46 49 case IEEE754_CLASS_DNORM: 47 50 case IEEE754_CLASS_NORM: 48 51 break; ··· 52 51 if (xe > 31) { 53 52 /* Set invalid. We will only use overflow for floating 54 53 point overflow */ 55 - SETCX(IEEE754_INVALID_OPERATION); 56 - return ieee754si_xcpt(ieee754si_indef(), "dp_tint", x); 54 + ieee754_setcx(IEEE754_INVALID_OPERATION); 55 + return ieee754si_indef(); 57 56 } 58 57 /* oh gawd */ 59 - if (xe > DP_MBITS) { 60 - xm <<= xe - DP_MBITS; 61 - } else if (xe < DP_MBITS) { 62 - u64 residue; 63 - int round; 64 - int sticky; 65 - int odd; 66 - 58 + if (xe > DP_FBITS) { 59 + xm <<= xe - DP_FBITS; 60 + } else if (xe < DP_FBITS) { 67 61 if (xe < -1) { 68 62 residue = xm; 69 63 round = 0; 70 64 sticky = residue != 0; 71 65 xm = 0; 72 66 } else { 73 - residue = xm << (64 - DP_MBITS + xe); 67 + residue = xm << (64 - DP_FBITS + xe); 74 68 round = (residue >> 63) != 0; 75 69 sticky = (residue << 1) != 0; 76 - xm >>= DP_MBITS - xe; 70 + xm >>= DP_FBITS - xe; 77 71 } 78 72 /* Note: At this point upper 32 bits of xm are guaranteed 79 73 to be zero */ 80 74 odd = (xm & 0x1) != 0x0; 81 75 switch (ieee754_csr.rm) { 82 - case IEEE754_RN: 76 + case FPU_CSR_RN: 83 77 if (round && (sticky || odd)) 84 78 xm++; 85 79 break; 86 - case IEEE754_RZ: 80 + case FPU_CSR_RZ: 87 81 break; 88 - case IEEE754_RU: /* toward +Infinity */ 82 + case FPU_CSR_RU: /* toward +Infinity */ 89 83 if ((round || sticky) && !xs) 90 84 xm++; 91 85 break; 92 - case IEEE754_RD: /* toward -Infinity */ 86 + case FPU_CSR_RD: /* toward -Infinity */ 93 87 if ((round || sticky) && xs) 94 88 xm++; 95 89 break; ··· 92 96 /* look for valid corner case 0x80000000 */ 93 97 if ((xm >> 31) != 0 && (xs == 0 || xm != 0x80000000)) { 94 98 /* This can happen after rounding */ 95 - SETCX(IEEE754_INVALID_OPERATION); 96 - return ieee754si_xcpt(ieee754si_indef(), "dp_tint", x); 99 + ieee754_setcx(IEEE754_INVALID_OPERATION); 100 + return ieee754si_indef(); 97 101 } 98 102 if (round || sticky) 99 - SETCX(IEEE754_INEXACT); 103 + ieee754_setcx(IEEE754_INEXACT); 100 104 } 101 105 if (xs) 102 106 return -xm; 103 107 else 104 108 return xm; 105 - } 106 - 107 - 108 - unsigned int ieee754dp_tuns(ieee754dp x) 109 - { 110 - ieee754dp hb = ieee754dp_1e31(); 111 - 112 - /* what if x < 0 ?? */ 113 - if (ieee754dp_lt(x, hb)) 114 - return (unsigned) ieee754dp_tint(x); 115 - 116 - return (unsigned) ieee754dp_tint(ieee754dp_sub(x, hb)) | 117 - ((unsigned) 1 << 31); 118 109 }
+26 -42
arch/mips/math-emu/dp_tlong.c
··· 5 5 * MIPS floating point support 6 6 * Copyright (C) 1994-2000 Algorithmics Ltd. 7 7 * 8 - * ######################################################################## 9 - * 10 8 * This program is free software; you can distribute it and/or modify it 11 9 * under the terms of the GNU General Public License (Version 2) as 12 10 * published by the Free Software Foundation. ··· 16 18 * 17 19 * You should have received a copy of the GNU General Public License along 18 20 * with this program; if not, write to the Free Software Foundation, Inc., 19 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 20 - * 21 - * ######################################################################## 21 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 22 22 */ 23 - 24 23 25 24 #include "ieee754dp.h" 26 25 27 - s64 ieee754dp_tlong(ieee754dp x) 26 + s64 ieee754dp_tlong(union ieee754dp x) 28 27 { 28 + u64 residue; 29 + int round; 30 + int sticky; 31 + int odd; 32 + 29 33 COMPXDP; 30 34 31 - CLEARCX; 35 + ieee754_clearcx(); 32 36 33 37 EXPLODEXDP; 34 38 FLUSHXDP; ··· 39 39 case IEEE754_CLASS_SNAN: 40 40 case IEEE754_CLASS_QNAN: 41 41 case IEEE754_CLASS_INF: 42 - SETCX(IEEE754_INVALID_OPERATION); 43 - return ieee754di_xcpt(ieee754di_indef(), "dp_tlong", x); 42 + ieee754_setcx(IEEE754_INVALID_OPERATION); 43 + return ieee754di_indef(); 44 + 44 45 case IEEE754_CLASS_ZERO: 45 46 return 0; 47 + 46 48 case IEEE754_CLASS_DNORM: 47 49 case IEEE754_CLASS_NORM: 48 50 break; ··· 55 53 return -0x8000000000000000LL; 56 54 /* Set invalid. We will only use overflow for floating 57 55 point overflow */ 58 - SETCX(IEEE754_INVALID_OPERATION); 59 - return ieee754di_xcpt(ieee754di_indef(), "dp_tlong", x); 56 + ieee754_setcx(IEEE754_INVALID_OPERATION); 57 + return ieee754di_indef(); 60 58 } 61 59 /* oh gawd */ 62 - if (xe > DP_MBITS) { 63 - xm <<= xe - DP_MBITS; 64 - } else if (xe < DP_MBITS) { 65 - u64 residue; 66 - int round; 67 - int sticky; 68 - int odd; 69 - 60 + if (xe > DP_FBITS) { 61 + xm <<= xe - DP_FBITS; 62 + } else if (xe < DP_FBITS) { 70 63 if (xe < -1) { 71 64 residue = xm; 72 65 round = 0; ··· 72 75 * so we do it in two steps. Be aware that xe 73 76 * may be -1 */ 74 77 residue = xm << (xe + 1); 75 - residue <<= 63 - DP_MBITS; 78 + residue <<= 63 - DP_FBITS; 76 79 round = (residue >> 63) != 0; 77 80 sticky = (residue << 1) != 0; 78 - xm >>= DP_MBITS - xe; 81 + xm >>= DP_FBITS - xe; 79 82 } 80 83 odd = (xm & 0x1) != 0x0; 81 84 switch (ieee754_csr.rm) { 82 - case IEEE754_RN: 85 + case FPU_CSR_RN: 83 86 if (round && (sticky || odd)) 84 87 xm++; 85 88 break; 86 - case IEEE754_RZ: 89 + case FPU_CSR_RZ: 87 90 break; 88 - case IEEE754_RU: /* toward +Infinity */ 91 + case FPU_CSR_RU: /* toward +Infinity */ 89 92 if ((round || sticky) && !xs) 90 93 xm++; 91 94 break; 92 - case IEEE754_RD: /* toward -Infinity */ 95 + case FPU_CSR_RD: /* toward -Infinity */ 93 96 if ((round || sticky) && xs) 94 97 xm++; 95 98 break; 96 99 } 97 100 if ((xm >> 63) != 0) { 98 101 /* This can happen after rounding */ 99 - SETCX(IEEE754_INVALID_OPERATION); 100 - return ieee754di_xcpt(ieee754di_indef(), "dp_tlong", x); 102 + ieee754_setcx(IEEE754_INVALID_OPERATION); 103 + return ieee754di_indef(); 101 104 } 102 105 if (round || sticky) 103 - SETCX(IEEE754_INEXACT); 106 + ieee754_setcx(IEEE754_INEXACT); 104 107 } 105 108 if (xs) 106 109 return -xm; 107 110 else 108 111 return xm; 109 - } 110 - 111 - 112 - u64 ieee754dp_tulong(ieee754dp x) 113 - { 114 - ieee754dp hb = ieee754dp_1e63(); 115 - 116 - /* what if x < 0 ?? */ 117 - if (ieee754dp_lt(x, hb)) 118 - return (u64) ieee754dp_tlong(x); 119 - 120 - return (u64) ieee754dp_tlong(ieee754dp_sub(x, hb)) | 121 - (1ULL << 63); 122 112 }
+7 -28
arch/mips/math-emu/dsemul.c
··· 1 - #include <linux/compiler.h> 2 - #include <linux/mm.h> 3 - #include <linux/signal.h> 4 - #include <linux/smp.h> 5 - 6 - #include <asm/asm.h> 7 - #include <asm/bootinfo.h> 8 - #include <asm/byteorder.h> 9 - #include <asm/cpu.h> 10 - #include <asm/inst.h> 11 - #include <asm/processor.h> 12 - #include <asm/uaccess.h> 13 1 #include <asm/branch.h> 14 - #include <asm/mipsregs.h> 15 2 #include <asm/cacheflush.h> 16 - 17 3 #include <asm/fpu_emulator.h> 4 + #include <asm/inst.h> 5 + #include <asm/mipsregs.h> 6 + #include <asm/uaccess.h> 18 7 19 8 #include "ieee754.h" 20 - 21 - /* Strap kernel emulator for full MIPS IV emulation */ 22 - 23 - #ifdef __mips 24 - #undef __mips 25 - #endif 26 - #define __mips 4 27 9 28 10 /* 29 11 * Emulate the arbritrary instruction ir at xcp->cp0_epc. Required when ··· 41 59 (ir == 0)) { 42 60 /* NOP is easy */ 43 61 regs->cp0_epc = cpc; 44 - regs->cp0_cause &= ~CAUSEF_BD; 62 + clear_delay_slot(regs); 45 63 return 0; 46 64 } 47 - #ifdef DSEMUL_TRACE 48 - printk("dsemul %lx %lx\n", regs->cp0_epc, cpc); 49 65 50 - #endif 66 + pr_debug("dsemul %lx %lx\n", regs->cp0_epc, cpc); 51 67 52 68 /* 53 69 * The strategy is to push the instruction onto the user stack ··· 147 167 * emulating the branch delay instruction. 148 168 */ 149 169 150 - #ifdef DSEMUL_TRACE 151 - printk("dsemulret\n"); 152 - #endif 170 + pr_debug("dsemulret\n"); 171 + 153 172 if (__get_user(epc, &fr->epc)) { /* Saved EPC */ 154 173 /* This is not a good situation to be in */ 155 174 force_sig(SIGBUS, current);
+55 -93
arch/mips/math-emu/ieee754.c
··· 10 10 * MIPS floating point support 11 11 * Copyright (C) 1994-2000 Algorithmics Ltd. 12 12 * 13 - * ######################################################################## 14 - * 15 13 * This program is free software; you can distribute it and/or modify it 16 14 * under the terms of the GNU General Public License (Version 2) as 17 15 * published by the Free Software Foundation. ··· 21 23 * 22 24 * You should have received a copy of the GNU General Public License along 23 25 * with this program; if not, write to the Free Software Foundation, Inc., 24 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 25 - * 26 - * ######################################################################## 26 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 27 27 */ 28 28 29 + #include <linux/compiler.h> 29 30 30 - #include "ieee754int.h" 31 + #include "ieee754.h" 31 32 #include "ieee754sp.h" 32 33 #include "ieee754dp.h" 33 34 34 - #define DP_EBIAS 1023 35 - #define DP_EMIN (-1022) 36 - #define DP_EMAX 1023 35 + /* 36 + * Special constants 37 + */ 37 38 38 - #define SP_EBIAS 127 39 - #define SP_EMIN (-126) 40 - #define SP_EMAX 127 41 - 42 - /* special constants 43 - */ 44 - 45 - 46 - #if (defined(BYTE_ORDER) && BYTE_ORDER == LITTLE_ENDIAN) || defined(__MIPSEL__) 47 - #define SPSTR(s, b, m) {m, b, s} 48 - #define DPSTR(s, b, mh, ml) {ml, mh, b, s} 49 - #endif 50 - 51 - #ifdef __MIPSEB__ 52 - #define SPSTR(s, b, m) {s, b, m} 53 - #define DPSTR(s, b, mh, ml) {s, b, mh, ml} 54 - #endif 55 - 56 - const struct ieee754dp_konst __ieee754dp_spcvals[] = { 57 - DPSTR(0, DP_EMIN - 1 + DP_EBIAS, 0, 0), /* + zero */ 58 - DPSTR(1, DP_EMIN - 1 + DP_EBIAS, 0, 0), /* - zero */ 59 - DPSTR(0, DP_EBIAS, 0, 0), /* + 1.0 */ 60 - DPSTR(1, DP_EBIAS, 0, 0), /* - 1.0 */ 61 - DPSTR(0, 3 + DP_EBIAS, 0x40000, 0), /* + 10.0 */ 62 - DPSTR(1, 3 + DP_EBIAS, 0x40000, 0), /* - 10.0 */ 63 - DPSTR(0, DP_EMAX + 1 + DP_EBIAS, 0, 0), /* + infinity */ 64 - DPSTR(1, DP_EMAX + 1 + DP_EBIAS, 0, 0), /* - infinity */ 65 - DPSTR(0, DP_EMAX+1+DP_EBIAS, 0x7FFFF, 0xFFFFFFFF), /* + indef quiet Nan */ 66 - DPSTR(0, DP_EMAX + DP_EBIAS, 0xFFFFF, 0xFFFFFFFF), /* + max */ 67 - DPSTR(1, DP_EMAX + DP_EBIAS, 0xFFFFF, 0xFFFFFFFF), /* - max */ 68 - DPSTR(0, DP_EMIN + DP_EBIAS, 0, 0), /* + min normal */ 69 - DPSTR(1, DP_EMIN + DP_EBIAS, 0, 0), /* - min normal */ 70 - DPSTR(0, DP_EMIN - 1 + DP_EBIAS, 0, 1), /* + min denormal */ 71 - DPSTR(1, DP_EMIN - 1 + DP_EBIAS, 0, 1), /* - min denormal */ 72 - DPSTR(0, 31 + DP_EBIAS, 0, 0), /* + 1.0e31 */ 73 - DPSTR(0, 63 + DP_EBIAS, 0, 0), /* + 1.0e63 */ 74 - }; 75 - 76 - const struct ieee754sp_konst __ieee754sp_spcvals[] = { 77 - SPSTR(0, SP_EMIN - 1 + SP_EBIAS, 0), /* + zero */ 78 - SPSTR(1, SP_EMIN - 1 + SP_EBIAS, 0), /* - zero */ 79 - SPSTR(0, SP_EBIAS, 0), /* + 1.0 */ 80 - SPSTR(1, SP_EBIAS, 0), /* - 1.0 */ 81 - SPSTR(0, 3 + SP_EBIAS, 0x200000), /* + 10.0 */ 82 - SPSTR(1, 3 + SP_EBIAS, 0x200000), /* - 10.0 */ 83 - SPSTR(0, SP_EMAX + 1 + SP_EBIAS, 0), /* + infinity */ 84 - SPSTR(1, SP_EMAX + 1 + SP_EBIAS, 0), /* - infinity */ 85 - SPSTR(0, SP_EMAX+1+SP_EBIAS, 0x3FFFFF), /* + indef quiet Nan */ 86 - SPSTR(0, SP_EMAX + SP_EBIAS, 0x7FFFFF), /* + max normal */ 87 - SPSTR(1, SP_EMAX + SP_EBIAS, 0x7FFFFF), /* - max normal */ 88 - SPSTR(0, SP_EMIN + SP_EBIAS, 0), /* + min normal */ 89 - SPSTR(1, SP_EMIN + SP_EBIAS, 0), /* - min normal */ 90 - SPSTR(0, SP_EMIN - 1 + SP_EBIAS, 1), /* + min denormal */ 91 - SPSTR(1, SP_EMIN - 1 + SP_EBIAS, 1), /* - min denormal */ 92 - SPSTR(0, 31 + SP_EBIAS, 0), /* + 1.0e31 */ 93 - SPSTR(0, 63 + SP_EBIAS, 0), /* + 1.0e63 */ 94 - }; 95 - 96 - 97 - int ieee754si_xcpt(int r, const char *op, ...) 98 - { 99 - struct ieee754xctx ax; 100 - 101 - if (!TSTX()) 102 - return r; 103 - ax.op = op; 104 - ax.rt = IEEE754_RT_SI; 105 - ax.rv.si = r; 106 - va_start(ax.ap, op); 107 - ieee754_xcpt(&ax); 108 - va_end(ax.ap); 109 - return ax.rv.si; 39 + #define DPCNST(s, b, m) \ 40 + { \ 41 + .sign = (s), \ 42 + .bexp = (b) + DP_EBIAS, \ 43 + .mant = (m) \ 110 44 } 111 45 112 - s64 ieee754di_xcpt(s64 r, const char *op, ...) 113 - { 114 - struct ieee754xctx ax; 46 + const union ieee754dp __ieee754dp_spcvals[] = { 47 + DPCNST(0, DP_EMIN - 1, 0x0000000000000ULL), /* + zero */ 48 + DPCNST(1, DP_EMIN - 1, 0x0000000000000ULL), /* - zero */ 49 + DPCNST(0, 0, 0x0000000000000ULL), /* + 1.0 */ 50 + DPCNST(1, 0, 0x0000000000000ULL), /* - 1.0 */ 51 + DPCNST(0, 3, 0x4000000000000ULL), /* + 10.0 */ 52 + DPCNST(1, 3, 0x4000000000000ULL), /* - 10.0 */ 53 + DPCNST(0, DP_EMAX + 1, 0x0000000000000ULL), /* + infinity */ 54 + DPCNST(1, DP_EMAX + 1, 0x0000000000000ULL), /* - infinity */ 55 + DPCNST(0, DP_EMAX + 1, 0x7FFFFFFFFFFFFULL), /* + indef quiet Nan */ 56 + DPCNST(0, DP_EMAX, 0xFFFFFFFFFFFFFULL), /* + max */ 57 + DPCNST(1, DP_EMAX, 0xFFFFFFFFFFFFFULL), /* - max */ 58 + DPCNST(0, DP_EMIN, 0x0000000000000ULL), /* + min normal */ 59 + DPCNST(1, DP_EMIN, 0x0000000000000ULL), /* - min normal */ 60 + DPCNST(0, DP_EMIN - 1, 0x0000000000001ULL), /* + min denormal */ 61 + DPCNST(1, DP_EMIN - 1, 0x0000000000001ULL), /* - min denormal */ 62 + DPCNST(0, 31, 0x0000000000000ULL), /* + 1.0e31 */ 63 + DPCNST(0, 63, 0x0000000000000ULL), /* + 1.0e63 */ 64 + }; 115 65 116 - if (!TSTX()) 117 - return r; 118 - ax.op = op; 119 - ax.rt = IEEE754_RT_DI; 120 - ax.rv.di = r; 121 - va_start(ax.ap, op); 122 - ieee754_xcpt(&ax); 123 - va_end(ax.ap); 124 - return ax.rv.di; 66 + #define SPCNST(s, b, m) \ 67 + { \ 68 + .sign = (s), \ 69 + .bexp = (b) + SP_EBIAS, \ 70 + .mant = (m) \ 125 71 } 72 + 73 + const union ieee754sp __ieee754sp_spcvals[] = { 74 + SPCNST(0, SP_EMIN - 1, 0x000000), /* + zero */ 75 + SPCNST(1, SP_EMIN - 1, 0x000000), /* - zero */ 76 + SPCNST(0, 0, 0x000000), /* + 1.0 */ 77 + SPCNST(1, 0, 0x000000), /* - 1.0 */ 78 + SPCNST(0, 3, 0x200000), /* + 10.0 */ 79 + SPCNST(1, 3, 0x200000), /* - 10.0 */ 80 + SPCNST(0, SP_EMAX + 1, 0x000000), /* + infinity */ 81 + SPCNST(1, SP_EMAX + 1, 0x000000), /* - infinity */ 82 + SPCNST(0, SP_EMAX + 1, 0x3FFFFF), /* + indef quiet Nan */ 83 + SPCNST(0, SP_EMAX, 0x7FFFFF), /* + max normal */ 84 + SPCNST(1, SP_EMAX, 0x7FFFFF), /* - max normal */ 85 + SPCNST(0, SP_EMIN, 0x000000), /* + min normal */ 86 + SPCNST(1, SP_EMIN, 0x000000), /* - min normal */ 87 + SPCNST(0, SP_EMIN - 1, 0x000001), /* + min denormal */ 88 + SPCNST(1, SP_EMIN - 1, 0x000001), /* - min denormal */ 89 + SPCNST(0, 31, 0x000000), /* + 1.0e31 */ 90 + SPCNST(0, 63, 0x000000), /* + 1.0e63 */ 91 + };
+88 -226
arch/mips/math-emu/ieee754.h
··· 13 13 * 14 14 * You should have received a copy of the GNU General Public License along 15 15 * with this program; if not, write to the Free Software Foundation, Inc., 16 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 16 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 17 * 18 18 * Nov 7, 2000 19 19 * Modification to allow integration with Linux kernel ··· 24 24 #ifndef __ARCH_MIPS_MATH_EMU_IEEE754_H 25 25 #define __ARCH_MIPS_MATH_EMU_IEEE754_H 26 26 27 + #include <linux/compiler.h> 27 28 #include <asm/byteorder.h> 29 + #include <linux/kernel.h> 28 30 #include <linux/types.h> 29 31 #include <linux/sched.h> 32 + #include <asm/bitfield.h> 30 33 31 - /* 32 - * Not very pretty, but the Linux kernel's normal va_list definition 33 - * does not allow it to be used as a structure element, as it is here. 34 - */ 35 - #ifndef _STDARG_H 36 - #include <stdarg.h> 37 - #endif 38 - 39 - #ifdef __LITTLE_ENDIAN 40 - struct ieee754dp_konst { 41 - unsigned mantlo:32; 42 - unsigned manthi:20; 43 - unsigned bexp:11; 44 - unsigned sign:1; 45 - }; 46 - struct ieee754sp_konst { 47 - unsigned mant:23; 48 - unsigned bexp:8; 49 - unsigned sign:1; 50 - }; 51 - 52 - typedef union _ieee754dp { 53 - struct ieee754dp_konst oparts; 34 + union ieee754dp { 54 35 struct { 55 - u64 mant:52; 56 - unsigned int bexp:11; 57 - unsigned int sign:1; 58 - } parts; 36 + __BITFIELD_FIELD(unsigned int sign:1, 37 + __BITFIELD_FIELD(unsigned int bexp:11, 38 + __BITFIELD_FIELD(u64 mant:52, 39 + ;))) 40 + }; 59 41 u64 bits; 60 - double d; 61 - } ieee754dp; 62 - 63 - typedef union _ieee754sp { 64 - struct ieee754sp_konst parts; 65 - float f; 66 - u32 bits; 67 - } ieee754sp; 68 - #endif 69 - 70 - #ifdef __BIG_ENDIAN 71 - struct ieee754dp_konst { 72 - unsigned sign:1; 73 - unsigned bexp:11; 74 - unsigned manthi:20; 75 - unsigned mantlo:32; 76 42 }; 77 43 78 - typedef union _ieee754dp { 79 - struct ieee754dp_konst oparts; 44 + union ieee754sp { 80 45 struct { 81 - unsigned int sign:1; 82 - unsigned int bexp:11; 83 - u64 mant:52; 84 - } parts; 85 - double d; 86 - u64 bits; 87 - } ieee754dp; 88 - 89 - struct ieee754sp_konst { 90 - unsigned sign:1; 91 - unsigned bexp:8; 92 - unsigned mant:23; 93 - }; 94 - 95 - typedef union _ieee754sp { 96 - struct ieee754sp_konst parts; 97 - float f; 46 + __BITFIELD_FIELD(unsigned sign:1, 47 + __BITFIELD_FIELD(unsigned bexp:8, 48 + __BITFIELD_FIELD(unsigned mant:23, 49 + ;))) 50 + }; 98 51 u32 bits; 99 - } ieee754sp; 100 - #endif 52 + }; 101 53 102 54 /* 103 55 * single precision (often aka float) 104 56 */ 105 - int ieee754sp_finite(ieee754sp x); 106 - int ieee754sp_class(ieee754sp x); 57 + int ieee754sp_class(union ieee754sp x); 107 58 108 - ieee754sp ieee754sp_abs(ieee754sp x); 109 - ieee754sp ieee754sp_neg(ieee754sp x); 110 - ieee754sp ieee754sp_scalb(ieee754sp x, int); 111 - ieee754sp ieee754sp_logb(ieee754sp x); 59 + union ieee754sp ieee754sp_abs(union ieee754sp x); 60 + union ieee754sp ieee754sp_neg(union ieee754sp x); 112 61 113 - /* x with sign of y */ 114 - ieee754sp ieee754sp_copysign(ieee754sp x, ieee754sp y); 62 + union ieee754sp ieee754sp_add(union ieee754sp x, union ieee754sp y); 63 + union ieee754sp ieee754sp_sub(union ieee754sp x, union ieee754sp y); 64 + union ieee754sp ieee754sp_mul(union ieee754sp x, union ieee754sp y); 65 + union ieee754sp ieee754sp_div(union ieee754sp x, union ieee754sp y); 115 66 116 - ieee754sp ieee754sp_add(ieee754sp x, ieee754sp y); 117 - ieee754sp ieee754sp_sub(ieee754sp x, ieee754sp y); 118 - ieee754sp ieee754sp_mul(ieee754sp x, ieee754sp y); 119 - ieee754sp ieee754sp_div(ieee754sp x, ieee754sp y); 67 + union ieee754sp ieee754sp_fint(int x); 68 + union ieee754sp ieee754sp_flong(s64 x); 69 + union ieee754sp ieee754sp_fdp(union ieee754dp x); 120 70 121 - ieee754sp ieee754sp_fint(int x); 122 - ieee754sp ieee754sp_funs(unsigned x); 123 - ieee754sp ieee754sp_flong(s64 x); 124 - ieee754sp ieee754sp_fulong(u64 x); 125 - ieee754sp ieee754sp_fdp(ieee754dp x); 71 + int ieee754sp_tint(union ieee754sp x); 72 + s64 ieee754sp_tlong(union ieee754sp x); 126 73 127 - int ieee754sp_tint(ieee754sp x); 128 - unsigned int ieee754sp_tuns(ieee754sp x); 129 - s64 ieee754sp_tlong(ieee754sp x); 130 - u64 ieee754sp_tulong(ieee754sp x); 74 + int ieee754sp_cmp(union ieee754sp x, union ieee754sp y, int cop, int sig); 131 75 132 - int ieee754sp_cmp(ieee754sp x, ieee754sp y, int cop, int sig); 133 - /* 134 - * basic sp math 135 - */ 136 - ieee754sp ieee754sp_modf(ieee754sp x, ieee754sp * ip); 137 - ieee754sp ieee754sp_frexp(ieee754sp x, int *exp); 138 - ieee754sp ieee754sp_ldexp(ieee754sp x, int exp); 139 - 140 - ieee754sp ieee754sp_ceil(ieee754sp x); 141 - ieee754sp ieee754sp_floor(ieee754sp x); 142 - ieee754sp ieee754sp_trunc(ieee754sp x); 143 - 144 - ieee754sp ieee754sp_sqrt(ieee754sp x); 76 + union ieee754sp ieee754sp_sqrt(union ieee754sp x); 145 77 146 78 /* 147 79 * double precision (often aka double) 148 80 */ 149 - int ieee754dp_finite(ieee754dp x); 150 - int ieee754dp_class(ieee754dp x); 81 + int ieee754dp_class(union ieee754dp x); 151 82 152 - /* x with sign of y */ 153 - ieee754dp ieee754dp_copysign(ieee754dp x, ieee754dp y); 83 + union ieee754dp ieee754dp_add(union ieee754dp x, union ieee754dp y); 84 + union ieee754dp ieee754dp_sub(union ieee754dp x, union ieee754dp y); 85 + union ieee754dp ieee754dp_mul(union ieee754dp x, union ieee754dp y); 86 + union ieee754dp ieee754dp_div(union ieee754dp x, union ieee754dp y); 154 87 155 - ieee754dp ieee754dp_add(ieee754dp x, ieee754dp y); 156 - ieee754dp ieee754dp_sub(ieee754dp x, ieee754dp y); 157 - ieee754dp ieee754dp_mul(ieee754dp x, ieee754dp y); 158 - ieee754dp ieee754dp_div(ieee754dp x, ieee754dp y); 88 + union ieee754dp ieee754dp_abs(union ieee754dp x); 89 + union ieee754dp ieee754dp_neg(union ieee754dp x); 159 90 160 - ieee754dp ieee754dp_abs(ieee754dp x); 161 - ieee754dp ieee754dp_neg(ieee754dp x); 162 - ieee754dp ieee754dp_scalb(ieee754dp x, int); 91 + union ieee754dp ieee754dp_fint(int x); 92 + union ieee754dp ieee754dp_flong(s64 x); 93 + union ieee754dp ieee754dp_fsp(union ieee754sp x); 163 94 164 - /* return exponent as integer in floating point format 165 - */ 166 - ieee754dp ieee754dp_logb(ieee754dp x); 95 + int ieee754dp_tint(union ieee754dp x); 96 + s64 ieee754dp_tlong(union ieee754dp x); 167 97 168 - ieee754dp ieee754dp_fint(int x); 169 - ieee754dp ieee754dp_funs(unsigned x); 170 - ieee754dp ieee754dp_flong(s64 x); 171 - ieee754dp ieee754dp_fulong(u64 x); 172 - ieee754dp ieee754dp_fsp(ieee754sp x); 98 + int ieee754dp_cmp(union ieee754dp x, union ieee754dp y, int cop, int sig); 173 99 174 - ieee754dp ieee754dp_ceil(ieee754dp x); 175 - ieee754dp ieee754dp_floor(ieee754dp x); 176 - ieee754dp ieee754dp_trunc(ieee754dp x); 177 - 178 - int ieee754dp_tint(ieee754dp x); 179 - unsigned int ieee754dp_tuns(ieee754dp x); 180 - s64 ieee754dp_tlong(ieee754dp x); 181 - u64 ieee754dp_tulong(ieee754dp x); 182 - 183 - int ieee754dp_cmp(ieee754dp x, ieee754dp y, int cop, int sig); 184 - /* 185 - * basic sp math 186 - */ 187 - ieee754dp ieee754dp_modf(ieee754dp x, ieee754dp * ip); 188 - ieee754dp ieee754dp_frexp(ieee754dp x, int *exp); 189 - ieee754dp ieee754dp_ldexp(ieee754dp x, int exp); 190 - 191 - ieee754dp ieee754dp_ceil(ieee754dp x); 192 - ieee754dp ieee754dp_floor(ieee754dp x); 193 - ieee754dp ieee754dp_trunc(ieee754dp x); 194 - 195 - ieee754dp ieee754dp_sqrt(ieee754dp x); 100 + union ieee754dp ieee754dp_sqrt(union ieee754dp x); 196 101 197 102 198 103 199 104 /* 5 types of floating point number 200 105 */ 201 - #define IEEE754_CLASS_NORM 0x00 202 - #define IEEE754_CLASS_ZERO 0x01 203 - #define IEEE754_CLASS_DNORM 0x02 204 - #define IEEE754_CLASS_INF 0x03 205 - #define IEEE754_CLASS_SNAN 0x04 206 - #define IEEE754_CLASS_QNAN 0x05 106 + enum { 107 + IEEE754_CLASS_NORM = 0x00, 108 + IEEE754_CLASS_ZERO = 0x01, 109 + IEEE754_CLASS_DNORM = 0x02, 110 + IEEE754_CLASS_INF = 0x03, 111 + IEEE754_CLASS_SNAN = 0x04, 112 + IEEE754_CLASS_QNAN = 0x05, 113 + }; 207 114 208 115 /* exception numbers */ 209 116 #define IEEE754_INEXACT 0x01 ··· 126 219 #define IEEE754_CGT 0x04 127 220 #define IEEE754_CUN 0x08 128 221 129 - /* rounding mode 130 - */ 131 - #define IEEE754_RN 0 /* round to nearest */ 132 - #define IEEE754_RZ 1 /* round toward zero */ 133 - #define IEEE754_RD 2 /* round toward -Infinity */ 134 - #define IEEE754_RU 3 /* round toward +Infinity */ 135 - 136 - /* other naming */ 137 - #define IEEE754_RM IEEE754_RD 138 - #define IEEE754_RP IEEE754_RU 139 - 140 222 /* "normal" comparisons 141 223 */ 142 - static inline int ieee754sp_eq(ieee754sp x, ieee754sp y) 224 + static inline int ieee754sp_eq(union ieee754sp x, union ieee754sp y) 143 225 { 144 226 return ieee754sp_cmp(x, y, IEEE754_CEQ, 0); 145 227 } 146 228 147 - static inline int ieee754sp_ne(ieee754sp x, ieee754sp y) 229 + static inline int ieee754sp_ne(union ieee754sp x, union ieee754sp y) 148 230 { 149 231 return ieee754sp_cmp(x, y, 150 232 IEEE754_CLT | IEEE754_CGT | IEEE754_CUN, 0); 151 233 } 152 234 153 - static inline int ieee754sp_lt(ieee754sp x, ieee754sp y) 235 + static inline int ieee754sp_lt(union ieee754sp x, union ieee754sp y) 154 236 { 155 237 return ieee754sp_cmp(x, y, IEEE754_CLT, 0); 156 238 } 157 239 158 - static inline int ieee754sp_le(ieee754sp x, ieee754sp y) 240 + static inline int ieee754sp_le(union ieee754sp x, union ieee754sp y) 159 241 { 160 242 return ieee754sp_cmp(x, y, IEEE754_CLT | IEEE754_CEQ, 0); 161 243 } 162 244 163 - static inline int ieee754sp_gt(ieee754sp x, ieee754sp y) 245 + static inline int ieee754sp_gt(union ieee754sp x, union ieee754sp y) 164 246 { 165 247 return ieee754sp_cmp(x, y, IEEE754_CGT, 0); 166 248 } 167 249 168 250 169 - static inline int ieee754sp_ge(ieee754sp x, ieee754sp y) 251 + static inline int ieee754sp_ge(union ieee754sp x, union ieee754sp y) 170 252 { 171 253 return ieee754sp_cmp(x, y, IEEE754_CGT | IEEE754_CEQ, 0); 172 254 } 173 255 174 - static inline int ieee754dp_eq(ieee754dp x, ieee754dp y) 256 + static inline int ieee754dp_eq(union ieee754dp x, union ieee754dp y) 175 257 { 176 258 return ieee754dp_cmp(x, y, IEEE754_CEQ, 0); 177 259 } 178 260 179 - static inline int ieee754dp_ne(ieee754dp x, ieee754dp y) 261 + static inline int ieee754dp_ne(union ieee754dp x, union ieee754dp y) 180 262 { 181 263 return ieee754dp_cmp(x, y, 182 264 IEEE754_CLT | IEEE754_CGT | IEEE754_CUN, 0); 183 265 } 184 266 185 - static inline int ieee754dp_lt(ieee754dp x, ieee754dp y) 267 + static inline int ieee754dp_lt(union ieee754dp x, union ieee754dp y) 186 268 { 187 269 return ieee754dp_cmp(x, y, IEEE754_CLT, 0); 188 270 } 189 271 190 - static inline int ieee754dp_le(ieee754dp x, ieee754dp y) 272 + static inline int ieee754dp_le(union ieee754dp x, union ieee754dp y) 191 273 { 192 274 return ieee754dp_cmp(x, y, IEEE754_CLT | IEEE754_CEQ, 0); 193 275 } 194 276 195 - static inline int ieee754dp_gt(ieee754dp x, ieee754dp y) 277 + static inline int ieee754dp_gt(union ieee754dp x, union ieee754dp y) 196 278 { 197 279 return ieee754dp_cmp(x, y, IEEE754_CGT, 0); 198 280 } 199 281 200 - static inline int ieee754dp_ge(ieee754dp x, ieee754dp y) 282 + static inline int ieee754dp_ge(union ieee754dp x, union ieee754dp y) 201 283 { 202 284 return ieee754dp_cmp(x, y, IEEE754_CGT | IEEE754_CEQ, 0); 203 285 } 204 - 205 - 206 - /* 207 - * Like strtod 208 - */ 209 - ieee754dp ieee754dp_fstr(const char *s, char **endp); 210 - char *ieee754dp_tstr(ieee754dp x, int prec, int fmt, int af); 211 - 212 286 213 287 /* 214 288 * The control status register 215 289 */ 216 290 struct _ieee754_csr { 217 - #ifdef __BIG_ENDIAN 218 - unsigned pad0:7; 219 - unsigned nod:1; /* set 1 for no denormalised numbers */ 220 - unsigned c:1; /* condition */ 221 - unsigned pad1:5; 222 - unsigned cx:6; /* exceptions this operation */ 223 - unsigned mx:5; /* exception enable mask */ 224 - unsigned sx:5; /* exceptions total */ 225 - unsigned rm:2; /* current rounding mode */ 226 - #endif 227 - #ifdef __LITTLE_ENDIAN 228 - unsigned rm:2; /* current rounding mode */ 229 - unsigned sx:5; /* exceptions total */ 230 - unsigned mx:5; /* exception enable mask */ 231 - unsigned cx:6; /* exceptions this operation */ 232 - unsigned pad1:5; 233 - unsigned c:1; /* condition */ 234 - unsigned nod:1; /* set 1 for no denormalised numbers */ 235 - unsigned pad0:7; 236 - #endif 291 + __BITFIELD_FIELD(unsigned pad0:7, 292 + __BITFIELD_FIELD(unsigned nod:1, /* set 1 for no denormalised numbers */ 293 + __BITFIELD_FIELD(unsigned c:1, /* condition */ 294 + __BITFIELD_FIELD(unsigned pad1:5, 295 + __BITFIELD_FIELD(unsigned cx:6, /* exceptions this operation */ 296 + __BITFIELD_FIELD(unsigned mx:5, /* exception enable mask */ 297 + __BITFIELD_FIELD(unsigned sx:5, /* exceptions total */ 298 + __BITFIELD_FIELD(unsigned rm:2, /* current rounding mode */ 299 + ;)))))))) 237 300 }; 238 301 #define ieee754_csr (*(struct _ieee754_csr *)(&current->thread.fpu.fcr31)) 239 302 ··· 254 377 } 255 378 256 379 /* debugging */ 257 - ieee754sp ieee754sp_dump(char *s, ieee754sp x); 258 - ieee754dp ieee754dp_dump(char *s, ieee754dp x); 380 + union ieee754sp ieee754sp_dump(char *s, union ieee754sp x); 381 + union ieee754dp ieee754dp_dump(char *s, union ieee754dp x); 259 382 260 383 #define IEEE754_SPCVAL_PZERO 0 261 384 #define IEEE754_SPCVAL_NZERO 1 ··· 275 398 #define IEEE754_SPCVAL_P1E31 15 /* + 1.0e31 */ 276 399 #define IEEE754_SPCVAL_P1E63 16 /* + 1.0e63 */ 277 400 278 - extern const struct ieee754dp_konst __ieee754dp_spcvals[]; 279 - extern const struct ieee754sp_konst __ieee754sp_spcvals[]; 280 - #define ieee754dp_spcvals ((const ieee754dp *)__ieee754dp_spcvals) 281 - #define ieee754sp_spcvals ((const ieee754sp *)__ieee754sp_spcvals) 401 + extern const union ieee754dp __ieee754dp_spcvals[]; 402 + extern const union ieee754sp __ieee754sp_spcvals[]; 403 + #define ieee754dp_spcvals ((const union ieee754dp *)__ieee754dp_spcvals) 404 + #define ieee754sp_spcvals ((const union ieee754sp *)__ieee754sp_spcvals) 282 405 283 406 /* 284 407 * Return infinity with given sign ··· 308 431 /* 309 432 * Indefinite integer value 310 433 */ 311 - #define ieee754si_indef() INT_MAX 312 - #ifdef LONG_LONG_MAX 313 - #define ieee754di_indef() LONG_LONG_MAX 314 - #else 315 - #define ieee754di_indef() ((s64)(~0ULL>>1)) 316 - #endif 434 + static inline int ieee754si_indef(void) 435 + { 436 + return INT_MAX; 437 + } 317 438 318 - /* IEEE exception context, passed to handler */ 319 - struct ieee754xctx { 320 - const char *op; /* operation name */ 321 - int rt; /* result type */ 322 - union { 323 - ieee754sp sp; /* single precision */ 324 - ieee754dp dp; /* double precision */ 325 - #ifdef IEEE854_XP 326 - ieee754xp xp; /* extended precision */ 327 - #endif 328 - int si; /* standard signed integer (32bits) */ 329 - s64 di; /* extended signed integer (64bits) */ 330 - } rv; /* default result format implied by op */ 331 - va_list ap; 332 - }; 439 + static inline s64 ieee754di_indef(void) 440 + { 441 + return S64_MAX; 442 + } 333 443 334 444 /* result types for xctx.rt */ 335 445 #define IEEE754_RT_SP 0 ··· 324 460 #define IEEE754_RT_XP 2 325 461 #define IEEE754_RT_SI 3 326 462 #define IEEE754_RT_DI 4 327 - 328 - extern void ieee754_xcpt(struct ieee754xctx *xcp); 329 463 330 464 /* compat */ 331 465 #define ieee754dp_fix(x) ieee754dp_tint(x)
+7 -32
arch/mips/math-emu/ieee754d.c
··· 16 16 * 17 17 * You should have received a copy of the GNU General Public License along 18 18 * with this program; if not, write to the Free Software Foundation, Inc., 19 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 19 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 20 * 21 21 * Nov 7, 2000 22 22 * Modified to build and operate in Linux kernel environment. ··· 25 25 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. 26 26 */ 27 27 28 - #include <linux/kernel.h> 28 + #include <linux/types.h> 29 + #include <linux/printk.h> 29 30 #include "ieee754.h" 31 + #include "ieee754sp.h" 32 + #include "ieee754dp.h" 30 33 31 - #define DP_EBIAS 1023 32 - #define DP_EMIN (-1022) 33 - #define DP_EMAX 1023 34 - #define DP_FBITS 52 35 - 36 - #define SP_EBIAS 127 37 - #define SP_EMIN (-126) 38 - #define SP_EMAX 127 39 - #define SP_FBITS 23 40 - 41 - #define DP_MBIT(x) ((u64)1 << (x)) 42 - #define DP_HIDDEN_BIT DP_MBIT(DP_FBITS) 43 - #define DP_SIGN_BIT DP_MBIT(63) 44 - 45 - 46 - #define SP_MBIT(x) ((u32)1 << (x)) 47 - #define SP_HIDDEN_BIT SP_MBIT(SP_FBITS) 48 - #define SP_SIGN_BIT SP_MBIT(31) 49 - 50 - 51 - #define SPSIGN(sp) (sp.parts.sign) 52 - #define SPBEXP(sp) (sp.parts.bexp) 53 - #define SPMANT(sp) (sp.parts.mant) 54 - 55 - #define DPSIGN(dp) (dp.parts.sign) 56 - #define DPBEXP(dp) (dp.parts.bexp) 57 - #define DPMANT(dp) (dp.parts.mant) 58 - 59 - ieee754dp ieee754dp_dump(char *m, ieee754dp x) 34 + union ieee754dp ieee754dp_dump(char *m, union ieee754dp x) 60 35 { 61 36 int i; 62 37 ··· 71 96 return x; 72 97 } 73 98 74 - ieee754sp ieee754sp_dump(char *m, ieee754sp x) 99 + union ieee754sp ieee754sp_dump(char *m, union ieee754sp x) 75 100 { 76 101 int i; 77 102
+42 -80
arch/mips/math-emu/ieee754dp.c
··· 5 5 * MIPS floating point support 6 6 * Copyright (C) 1994-2000 Algorithmics Ltd. 7 7 * 8 - * ######################################################################## 9 - * 10 8 * This program is free software; you can distribute it and/or modify it 11 9 * under the terms of the GNU General Public License (Version 2) as 12 10 * published by the Free Software Foundation. ··· 16 18 * 17 19 * You should have received a copy of the GNU General Public License along 18 20 * with this program; if not, write to the Free Software Foundation, Inc., 19 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 20 - * 21 - * ######################################################################## 21 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 22 22 */ 23 23 24 + #include <linux/compiler.h> 24 25 25 26 #include "ieee754dp.h" 26 27 27 - int ieee754dp_class(ieee754dp x) 28 + int ieee754dp_class(union ieee754dp x) 28 29 { 29 30 COMPXDP; 30 31 EXPLODEXDP; 31 32 return xc; 32 33 } 33 34 34 - int ieee754dp_isnan(ieee754dp x) 35 + int ieee754dp_isnan(union ieee754dp x) 35 36 { 36 37 return ieee754dp_class(x) >= IEEE754_CLASS_SNAN; 37 38 } 38 39 39 - int ieee754dp_issnan(ieee754dp x) 40 + static inline int ieee754dp_issnan(union ieee754dp x) 40 41 { 41 42 assert(ieee754dp_isnan(x)); 42 - return ((DPMANT(x) & DP_MBIT(DP_MBITS-1)) == DP_MBIT(DP_MBITS-1)); 43 + return ((DPMANT(x) & DP_MBIT(DP_FBITS-1)) == DP_MBIT(DP_FBITS-1)); 43 44 } 44 45 45 46 46 - ieee754dp ieee754dp_xcpt(ieee754dp r, const char *op, ...) 47 + union ieee754dp __cold ieee754dp_nanxcpt(union ieee754dp r) 47 48 { 48 - struct ieee754xctx ax; 49 - if (!TSTX()) 50 - return r; 51 - 52 - ax.op = op; 53 - ax.rt = IEEE754_RT_DP; 54 - ax.rv.dp = r; 55 - va_start(ax.ap, op); 56 - ieee754_xcpt(&ax); 57 - va_end(ax.ap); 58 - return ax.rv.dp; 59 - } 60 - 61 - ieee754dp ieee754dp_nanxcpt(ieee754dp r, const char *op, ...) 62 - { 63 - struct ieee754xctx ax; 64 - 65 49 assert(ieee754dp_isnan(r)); 66 50 67 51 if (!ieee754dp_issnan(r)) /* QNAN does not cause invalid op !! */ 68 52 return r; 69 53 70 - if (!SETANDTESTCX(IEEE754_INVALID_OPERATION)) { 54 + if (!ieee754_setandtestcx(IEEE754_INVALID_OPERATION)) { 71 55 /* not enabled convert to a quiet NaN */ 72 - DPMANT(r) &= (~DP_MBIT(DP_MBITS-1)); 56 + DPMANT(r) &= (~DP_MBIT(DP_FBITS-1)); 73 57 if (ieee754dp_isnan(r)) 74 58 return r; 75 59 else 76 60 return ieee754dp_indef(); 77 61 } 78 62 79 - ax.op = op; 80 - ax.rt = 0; 81 - ax.rv.dp = r; 82 - va_start(ax.ap, op); 83 - ieee754_xcpt(&ax); 84 - va_end(ax.ap); 85 - return ax.rv.dp; 63 + return r; 86 64 } 87 65 88 - ieee754dp ieee754dp_bestnan(ieee754dp x, ieee754dp y) 89 - { 90 - assert(ieee754dp_isnan(x)); 91 - assert(ieee754dp_isnan(y)); 92 - 93 - if (DPMANT(x) > DPMANT(y)) 94 - return x; 95 - else 96 - return y; 97 - } 98 - 99 - 100 - static u64 get_rounding(int sn, u64 xm) 66 + static u64 ieee754dp_get_rounding(int sn, u64 xm) 101 67 { 102 68 /* inexact must round of 3 bits 103 69 */ 104 70 if (xm & (DP_MBIT(3) - 1)) { 105 71 switch (ieee754_csr.rm) { 106 - case IEEE754_RZ: 72 + case FPU_CSR_RZ: 107 73 break; 108 - case IEEE754_RN: 74 + case FPU_CSR_RN: 109 75 xm += 0x3 + ((xm >> 3) & 1); 110 76 /* xm += (xm&0x8)?0x4:0x3 */ 111 77 break; 112 - case IEEE754_RU: /* toward +Infinity */ 78 + case FPU_CSR_RU: /* toward +Infinity */ 113 79 if (!sn) /* ?? */ 114 80 xm += 0x8; 115 81 break; 116 - case IEEE754_RD: /* toward -Infinity */ 82 + case FPU_CSR_RD: /* toward -Infinity */ 117 83 if (sn) /* ?? */ 118 84 xm += 0x8; 119 85 break; ··· 92 130 * xe is an unbiased exponent 93 131 * xm is 3bit extended precision value. 94 132 */ 95 - ieee754dp ieee754dp_format(int sn, int xe, u64 xm) 133 + union ieee754dp ieee754dp_format(int sn, int xe, u64 xm) 96 134 { 97 135 assert(xm); /* we don't gen exact zeros (probably should) */ 98 136 99 - assert((xm >> (DP_MBITS + 1 + 3)) == 0); /* no execess */ 137 + assert((xm >> (DP_FBITS + 1 + 3)) == 0); /* no execess */ 100 138 assert(xm & (DP_HIDDEN_BIT << 3)); 101 139 102 140 if (xe < DP_EMIN) { ··· 104 142 int es = DP_EMIN - xe; 105 143 106 144 if (ieee754_csr.nod) { 107 - SETCX(IEEE754_UNDERFLOW); 108 - SETCX(IEEE754_INEXACT); 145 + ieee754_setcx(IEEE754_UNDERFLOW); 146 + ieee754_setcx(IEEE754_INEXACT); 109 147 110 148 switch(ieee754_csr.rm) { 111 - case IEEE754_RN: 112 - case IEEE754_RZ: 149 + case FPU_CSR_RN: 150 + case FPU_CSR_RZ: 113 151 return ieee754dp_zero(sn); 114 - case IEEE754_RU: /* toward +Infinity */ 115 - if(sn == 0) 152 + case FPU_CSR_RU: /* toward +Infinity */ 153 + if (sn == 0) 116 154 return ieee754dp_min(0); 117 155 else 118 156 return ieee754dp_zero(1); 119 - case IEEE754_RD: /* toward -Infinity */ 120 - if(sn == 0) 157 + case FPU_CSR_RD: /* toward -Infinity */ 158 + if (sn == 0) 121 159 return ieee754dp_zero(0); 122 160 else 123 161 return ieee754dp_min(1); 124 162 } 125 163 } 126 164 127 - if (xe == DP_EMIN - 1 128 - && get_rounding(sn, xm) >> (DP_MBITS + 1 + 3)) 165 + if (xe == DP_EMIN - 1 && 166 + ieee754dp_get_rounding(sn, xm) >> (DP_FBITS + 1 + 3)) 129 167 { 130 168 /* Not tiny after rounding */ 131 - SETCX(IEEE754_INEXACT); 132 - xm = get_rounding(sn, xm); 169 + ieee754_setcx(IEEE754_INEXACT); 170 + xm = ieee754dp_get_rounding(sn, xm); 133 171 xm >>= 1; 134 172 /* Clear grs bits */ 135 173 xm &= ~(DP_MBIT(3) - 1); ··· 145 183 } 146 184 } 147 185 if (xm & (DP_MBIT(3) - 1)) { 148 - SETCX(IEEE754_INEXACT); 186 + ieee754_setcx(IEEE754_INEXACT); 149 187 if ((xm & (DP_HIDDEN_BIT << 3)) == 0) { 150 - SETCX(IEEE754_UNDERFLOW); 188 + ieee754_setcx(IEEE754_UNDERFLOW); 151 189 } 152 190 153 191 /* inexact must round of 3 bits 154 192 */ 155 - xm = get_rounding(sn, xm); 193 + xm = ieee754dp_get_rounding(sn, xm); 156 194 /* adjust exponent for rounding add overflowing 157 195 */ 158 - if (xm >> (DP_MBITS + 3 + 1)) { 196 + if (xm >> (DP_FBITS + 3 + 1)) { 159 197 /* add causes mantissa overflow */ 160 198 xm >>= 1; 161 199 xe++; ··· 164 202 /* strip grs bits */ 165 203 xm >>= 3; 166 204 167 - assert((xm >> (DP_MBITS + 1)) == 0); /* no execess */ 205 + assert((xm >> (DP_FBITS + 1)) == 0); /* no execess */ 168 206 assert(xe >= DP_EMIN); 169 207 170 208 if (xe > DP_EMAX) { 171 - SETCX(IEEE754_OVERFLOW); 172 - SETCX(IEEE754_INEXACT); 209 + ieee754_setcx(IEEE754_OVERFLOW); 210 + ieee754_setcx(IEEE754_INEXACT); 173 211 /* -O can be table indexed by (rm,sn) */ 174 212 switch (ieee754_csr.rm) { 175 - case IEEE754_RN: 213 + case FPU_CSR_RN: 176 214 return ieee754dp_inf(sn); 177 - case IEEE754_RZ: 215 + case FPU_CSR_RZ: 178 216 return ieee754dp_max(sn); 179 - case IEEE754_RU: /* toward +Infinity */ 217 + case FPU_CSR_RU: /* toward +Infinity */ 180 218 if (sn == 0) 181 219 return ieee754dp_inf(0); 182 220 else 183 221 return ieee754dp_max(1); 184 - case IEEE754_RD: /* toward -Infinity */ 222 + case FPU_CSR_RD: /* toward -Infinity */ 185 223 if (sn == 0) 186 224 return ieee754dp_max(0); 187 225 else ··· 194 232 /* we underflow (tiny/zero) */ 195 233 assert(xe == DP_EMIN); 196 234 if (ieee754_csr.mx & IEEE754_UNDERFLOW) 197 - SETCX(IEEE754_UNDERFLOW); 235 + ieee754_setcx(IEEE754_UNDERFLOW); 198 236 return builddp(sn, DP_EMIN - 1 + DP_EBIAS, xm); 199 237 } else { 200 - assert((xm >> (DP_MBITS + 1)) == 0); /* no execess */ 238 + assert((xm >> (DP_FBITS + 1)) == 0); /* no execess */ 201 239 assert(xm & DP_HIDDEN_BIT); 202 240 203 241 return builddp(sn, xe + DP_EBIAS, xm & ~DP_HIDDEN_BIT);
+35 -35
arch/mips/math-emu/ieee754dp.h
··· 6 6 * MIPS floating point support 7 7 * Copyright (C) 1994-2000 Algorithmics Ltd. 8 8 * 9 - * ######################################################################## 10 - * 11 9 * This program is free software; you can distribute it and/or modify it 12 10 * under the terms of the GNU General Public License (Version 2) as 13 11 * published by the Free Software Foundation. ··· 17 19 * 18 20 * You should have received a copy of the GNU General Public License along 19 21 * with this program; if not, write to the Free Software Foundation, Inc., 20 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 21 - * 22 - * ######################################################################## 22 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 23 23 */ 24 24 25 + #include <linux/compiler.h> 25 26 26 27 #include "ieee754int.h" 27 28 28 29 #define assert(expr) ((void)0) 29 30 31 + #define DP_EBIAS 1023 32 + #define DP_EMIN (-1022) 33 + #define DP_EMAX 1023 34 + #define DP_FBITS 52 35 + #define DP_MBITS 52 36 + 37 + #define DP_MBIT(x) ((u64)1 << (x)) 38 + #define DP_HIDDEN_BIT DP_MBIT(DP_FBITS) 39 + #define DP_SIGN_BIT DP_MBIT(63) 40 + 41 + #define DPSIGN(dp) (dp.sign) 42 + #define DPBEXP(dp) (dp.bexp) 43 + #define DPMANT(dp) (dp.mant) 44 + 45 + static inline int ieee754dp_finite(union ieee754dp x) 46 + { 47 + return DPBEXP(x) != DP_EMAX + 1 + DP_EBIAS; 48 + } 49 + 30 50 /* 3bit extended double precision sticky right shift */ 31 51 #define XDPSRS(v,rs) \ 32 - ((rs > (DP_MBITS+3))?1:((v) >> (rs)) | ((v) << (64-(rs)) != 0)) 52 + ((rs > (DP_FBITS+3))?1:((v) >> (rs)) | ((v) << (64-(rs)) != 0)) 33 53 34 54 #define XDPSRSX1() \ 35 - (xe++, (xm = (xm >> 1) | (xm & 1))) 55 + (xe++, (xm = (xm >> 1) | (xm & 1))) 36 56 37 57 #define XDPSRS1(v) \ 38 - (((v) >> 1) | ((v) & 1)) 58 + (((v) >> 1) | ((v) & 1)) 39 59 40 60 /* convert denormal to normalized with extended exponent */ 41 61 #define DPDNORMx(m,e) \ 42 - while( (m >> DP_MBITS) == 0) { m <<= 1; e--; } 62 + while ((m >> DP_FBITS) == 0) { m <<= 1; e--; } 43 63 #define DPDNORMX DPDNORMx(xm, xe) 44 64 #define DPDNORMY DPDNORMx(ym, ye) 45 65 46 - static inline ieee754dp builddp(int s, int bx, u64 m) 66 + static inline union ieee754dp builddp(int s, int bx, u64 m) 47 67 { 48 - ieee754dp r; 68 + union ieee754dp r; 49 69 50 70 assert((s) == 0 || (s) == 1); 51 71 assert((bx) >= DP_EMIN - 1 + DP_EBIAS 52 72 && (bx) <= DP_EMAX + 1 + DP_EBIAS); 53 - assert(((m) >> DP_MBITS) == 0); 73 + assert(((m) >> DP_FBITS) == 0); 54 74 55 - r.parts.sign = s; 56 - r.parts.bexp = bx; 57 - r.parts.mant = m; 75 + r.sign = s; 76 + r.bexp = bx; 77 + r.mant = m; 78 + 58 79 return r; 59 80 } 60 81 61 - extern int ieee754dp_isnan(ieee754dp); 62 - extern int ieee754dp_issnan(ieee754dp); 63 - extern int ieee754si_xcpt(int, const char *, ...); 64 - extern s64 ieee754di_xcpt(s64, const char *, ...); 65 - extern ieee754dp ieee754dp_xcpt(ieee754dp, const char *, ...); 66 - extern ieee754dp ieee754dp_nanxcpt(ieee754dp, const char *, ...); 67 - extern ieee754dp ieee754dp_bestnan(ieee754dp, ieee754dp); 68 - extern ieee754dp ieee754dp_format(int, int, u64); 69 - 70 - 71 - #define DPNORMRET2(s, e, m, name, a0, a1) \ 72 - { \ 73 - ieee754dp V = ieee754dp_format(s, e, m); \ 74 - if(TSTX()) \ 75 - return ieee754dp_xcpt(V, name, a0, a1); \ 76 - else \ 77 - return V; \ 78 - } 79 - 80 - #define DPNORMRET1(s, e, m, name, a0) DPNORMRET2(s, e, m, name, a0, a0) 82 + extern int ieee754dp_isnan(union ieee754dp); 83 + extern union ieee754dp __cold ieee754dp_nanxcpt(union ieee754dp); 84 + extern union ieee754dp ieee754dp_format(int, int, u64);
+87 -110
arch/mips/math-emu/ieee754int.h
··· 6 6 * MIPS floating point support 7 7 * Copyright (C) 1994-2000 Algorithmics Ltd. 8 8 * 9 - * ######################################################################## 10 - * 11 9 * This program is free software; you can distribute it and/or modify it 12 10 * under the terms of the GNU General Public License (Version 2) as 13 11 * published by the Free Software Foundation. ··· 17 19 * 18 20 * You should have received a copy of the GNU General Public License along 19 21 * with this program; if not, write to the Free Software Foundation, Inc., 20 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 21 - * 22 - * ######################################################################## 22 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 23 23 */ 24 - 24 + #ifndef __IEEE754INT_H 25 + #define __IEEE754INT_H 25 26 26 27 #include "ieee754.h" 27 28 28 - #define DP_EBIAS 1023 29 - #define DP_EMIN (-1022) 30 - #define DP_EMAX 1023 31 - #define DP_MBITS 52 32 - 33 - #define SP_EBIAS 127 34 - #define SP_EMIN (-126) 35 - #define SP_EMAX 127 36 - #define SP_MBITS 23 37 - 38 - #define DP_MBIT(x) ((u64)1 << (x)) 39 - #define DP_HIDDEN_BIT DP_MBIT(DP_MBITS) 40 - #define DP_SIGN_BIT DP_MBIT(63) 41 - 42 - #define SP_MBIT(x) ((u32)1 << (x)) 43 - #define SP_HIDDEN_BIT SP_MBIT(SP_MBITS) 44 - #define SP_SIGN_BIT SP_MBIT(31) 45 - 46 - 47 - #define SPSIGN(sp) (sp.parts.sign) 48 - #define SPBEXP(sp) (sp.parts.bexp) 49 - #define SPMANT(sp) (sp.parts.mant) 50 - 51 - #define DPSIGN(dp) (dp.parts.sign) 52 - #define DPBEXP(dp) (dp.parts.bexp) 53 - #define DPMANT(dp) (dp.parts.mant) 54 - 55 29 #define CLPAIR(x, y) ((x)*6+(y)) 56 30 57 - #define CLEARCX \ 58 - (ieee754_csr.cx = 0) 31 + static inline void ieee754_clearcx(void) 32 + { 33 + ieee754_csr.cx = 0; 34 + } 59 35 60 - #define SETCX(x) \ 61 - (ieee754_csr.cx |= (x), ieee754_csr.sx |= (x)) 36 + static inline void ieee754_setcx(const unsigned int flags) 37 + { 38 + ieee754_csr.cx |= flags; 39 + ieee754_csr.sx |= flags; 40 + } 62 41 63 - #define SETANDTESTCX(x) \ 64 - (SETCX(x), ieee754_csr.mx & (x)) 42 + static inline int ieee754_setandtestcx(const unsigned int x) 43 + { 44 + ieee754_setcx(x); 65 45 66 - #define TSTX() \ 67 - (ieee754_csr.cx & ieee754_csr.mx) 68 - 46 + return ieee754_csr.mx & x; 47 + } 69 48 70 49 #define COMPXSP \ 71 - unsigned xm; int xe; int xs __maybe_unused; int xc 50 + unsigned xm; int xe; int xs __maybe_unused; int xc 72 51 73 52 #define COMPYSP \ 74 - unsigned ym; int ye; int ys; int yc 53 + unsigned ym; int ye; int ys; int yc 75 54 76 - #define EXPLODESP(v, vc, vs, ve, vm) \ 77 - {\ 78 - vs = SPSIGN(v);\ 79 - ve = SPBEXP(v);\ 80 - vm = SPMANT(v);\ 81 - if(ve == SP_EMAX+1+SP_EBIAS){\ 82 - if(vm == 0)\ 83 - vc = IEEE754_CLASS_INF;\ 84 - else if(vm & SP_MBIT(SP_MBITS-1)) \ 85 - vc = IEEE754_CLASS_SNAN;\ 86 - else \ 87 - vc = IEEE754_CLASS_QNAN;\ 88 - } else if(ve == SP_EMIN-1+SP_EBIAS) {\ 89 - if(vm) {\ 90 - ve = SP_EMIN;\ 91 - vc = IEEE754_CLASS_DNORM;\ 92 - } else\ 93 - vc = IEEE754_CLASS_ZERO;\ 94 - } else {\ 95 - ve -= SP_EBIAS;\ 96 - vm |= SP_HIDDEN_BIT;\ 97 - vc = IEEE754_CLASS_NORM;\ 98 - }\ 55 + #define EXPLODESP(v, vc, vs, ve, vm) \ 56 + { \ 57 + vs = SPSIGN(v); \ 58 + ve = SPBEXP(v); \ 59 + vm = SPMANT(v); \ 60 + if (ve == SP_EMAX+1+SP_EBIAS) { \ 61 + if (vm == 0) \ 62 + vc = IEEE754_CLASS_INF; \ 63 + else if (vm & SP_MBIT(SP_FBITS-1)) \ 64 + vc = IEEE754_CLASS_SNAN; \ 65 + else \ 66 + vc = IEEE754_CLASS_QNAN; \ 67 + } else if (ve == SP_EMIN-1+SP_EBIAS) { \ 68 + if (vm) { \ 69 + ve = SP_EMIN; \ 70 + vc = IEEE754_CLASS_DNORM; \ 71 + } else \ 72 + vc = IEEE754_CLASS_ZERO; \ 73 + } else { \ 74 + ve -= SP_EBIAS; \ 75 + vm |= SP_HIDDEN_BIT; \ 76 + vc = IEEE754_CLASS_NORM; \ 77 + } \ 99 78 } 100 79 #define EXPLODEXSP EXPLODESP(x, xc, xs, xe, xm) 101 80 #define EXPLODEYSP EXPLODESP(y, yc, ys, ye, ym) 102 81 103 82 104 83 #define COMPXDP \ 105 - u64 xm; int xe; int xs __maybe_unused; int xc 84 + u64 xm; int xe; int xs __maybe_unused; int xc 106 85 107 86 #define COMPYDP \ 108 - u64 ym; int ye; int ys; int yc 87 + u64 ym; int ye; int ys; int yc 109 88 110 - #define EXPLODEDP(v, vc, vs, ve, vm) \ 111 - {\ 112 - vm = DPMANT(v);\ 113 - vs = DPSIGN(v);\ 114 - ve = DPBEXP(v);\ 115 - if(ve == DP_EMAX+1+DP_EBIAS){\ 116 - if(vm == 0)\ 117 - vc = IEEE754_CLASS_INF;\ 118 - else if(vm & DP_MBIT(DP_MBITS-1)) \ 119 - vc = IEEE754_CLASS_SNAN;\ 120 - else \ 121 - vc = IEEE754_CLASS_QNAN;\ 122 - } else if(ve == DP_EMIN-1+DP_EBIAS) {\ 123 - if(vm) {\ 124 - ve = DP_EMIN;\ 125 - vc = IEEE754_CLASS_DNORM;\ 126 - } else\ 127 - vc = IEEE754_CLASS_ZERO;\ 128 - } else {\ 129 - ve -= DP_EBIAS;\ 130 - vm |= DP_HIDDEN_BIT;\ 131 - vc = IEEE754_CLASS_NORM;\ 132 - }\ 89 + #define EXPLODEDP(v, vc, vs, ve, vm) \ 90 + { \ 91 + vm = DPMANT(v); \ 92 + vs = DPSIGN(v); \ 93 + ve = DPBEXP(v); \ 94 + if (ve == DP_EMAX+1+DP_EBIAS) { \ 95 + if (vm == 0) \ 96 + vc = IEEE754_CLASS_INF; \ 97 + else if (vm & DP_MBIT(DP_FBITS-1)) \ 98 + vc = IEEE754_CLASS_SNAN; \ 99 + else \ 100 + vc = IEEE754_CLASS_QNAN; \ 101 + } else if (ve == DP_EMIN-1+DP_EBIAS) { \ 102 + if (vm) { \ 103 + ve = DP_EMIN; \ 104 + vc = IEEE754_CLASS_DNORM; \ 105 + } else \ 106 + vc = IEEE754_CLASS_ZERO; \ 107 + } else { \ 108 + ve -= DP_EBIAS; \ 109 + vm |= DP_HIDDEN_BIT; \ 110 + vc = IEEE754_CLASS_NORM; \ 111 + } \ 133 112 } 134 113 #define EXPLODEXDP EXPLODEDP(x, xc, xs, xe, xm) 135 114 #define EXPLODEYDP EXPLODEDP(y, yc, ys, ye, ym) 136 115 137 - #define FLUSHDP(v, vc, vs, ve, vm) \ 138 - if(vc==IEEE754_CLASS_DNORM) {\ 139 - if(ieee754_csr.nod) {\ 140 - SETCX(IEEE754_INEXACT);\ 141 - vc = IEEE754_CLASS_ZERO;\ 142 - ve = DP_EMIN-1+DP_EBIAS;\ 143 - vm = 0;\ 144 - v = ieee754dp_zero(vs);\ 145 - }\ 116 + #define FLUSHDP(v, vc, vs, ve, vm) \ 117 + if (vc==IEEE754_CLASS_DNORM) { \ 118 + if (ieee754_csr.nod) { \ 119 + ieee754_setcx(IEEE754_INEXACT); \ 120 + vc = IEEE754_CLASS_ZERO; \ 121 + ve = DP_EMIN-1+DP_EBIAS; \ 122 + vm = 0; \ 123 + v = ieee754dp_zero(vs); \ 124 + } \ 146 125 } 147 126 148 - #define FLUSHSP(v, vc, vs, ve, vm) \ 149 - if(vc==IEEE754_CLASS_DNORM) {\ 150 - if(ieee754_csr.nod) {\ 151 - SETCX(IEEE754_INEXACT);\ 152 - vc = IEEE754_CLASS_ZERO;\ 153 - ve = SP_EMIN-1+SP_EBIAS;\ 154 - vm = 0;\ 155 - v = ieee754sp_zero(vs);\ 156 - }\ 127 + #define FLUSHSP(v, vc, vs, ve, vm) \ 128 + if (vc==IEEE754_CLASS_DNORM) { \ 129 + if (ieee754_csr.nod) { \ 130 + ieee754_setcx(IEEE754_INEXACT); \ 131 + vc = IEEE754_CLASS_ZERO; \ 132 + ve = SP_EMIN-1+SP_EBIAS; \ 133 + vm = 0; \ 134 + v = ieee754sp_zero(vs); \ 135 + } \ 157 136 } 158 137 159 138 #define FLUSHXDP FLUSHDP(x, xc, xs, xe, xm) 160 139 #define FLUSHYDP FLUSHDP(y, yc, ys, ye, ym) 161 140 #define FLUSHXSP FLUSHSP(x, xc, xs, xe, xm) 162 141 #define FLUSHYSP FLUSHSP(y, yc, ys, ye, ym) 142 + 143 + #endif /* __IEEE754INT_H */
-55
arch/mips/math-emu/ieee754m.c
··· 1 - /* 2 - * floor, trunc, ceil 3 - */ 4 - /* 5 - * MIPS floating point support 6 - * Copyright (C) 1994-2000 Algorithmics Ltd. 7 - * 8 - * ######################################################################## 9 - * 10 - * This program is free software; you can distribute it and/or modify it 11 - * under the terms of the GNU General Public License (Version 2) as 12 - * published by the Free Software Foundation. 13 - * 14 - * This program is distributed in the hope it will be useful, but WITHOUT 15 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 16 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 17 - * for more details. 18 - * 19 - * You should have received a copy of the GNU General Public License along 20 - * with this program; if not, write to the Free Software Foundation, Inc., 21 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 22 - * 23 - * ######################################################################## 24 - */ 25 - 26 - 27 - #include "ieee754.h" 28 - 29 - ieee754dp ieee754dp_floor(ieee754dp x) 30 - { 31 - ieee754dp i; 32 - 33 - if (ieee754dp_lt(ieee754dp_modf(x, &i), ieee754dp_zero(0))) 34 - return ieee754dp_sub(i, ieee754dp_one(0)); 35 - else 36 - return i; 37 - } 38 - 39 - ieee754dp ieee754dp_ceil(ieee754dp x) 40 - { 41 - ieee754dp i; 42 - 43 - if (ieee754dp_gt(ieee754dp_modf(x, &i), ieee754dp_zero(0))) 44 - return ieee754dp_add(i, ieee754dp_one(0)); 45 - else 46 - return i; 47 - } 48 - 49 - ieee754dp ieee754dp_trunc(ieee754dp x) 50 - { 51 - ieee754dp i; 52 - 53 - (void) ieee754dp_modf(x, &i); 54 - return i; 55 - }
+43 -83
arch/mips/math-emu/ieee754sp.c
··· 5 5 * MIPS floating point support 6 6 * Copyright (C) 1994-2000 Algorithmics Ltd. 7 7 * 8 - * ######################################################################## 9 - * 10 8 * This program is free software; you can distribute it and/or modify it 11 9 * under the terms of the GNU General Public License (Version 2) as 12 10 * published by the Free Software Foundation. ··· 16 18 * 17 19 * You should have received a copy of the GNU General Public License along 18 20 * with this program; if not, write to the Free Software Foundation, Inc., 19 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 20 - * 21 - * ######################################################################## 21 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 22 22 */ 23 23 24 + #include <linux/compiler.h> 24 25 25 26 #include "ieee754sp.h" 26 27 27 - int ieee754sp_class(ieee754sp x) 28 + int ieee754sp_class(union ieee754sp x) 28 29 { 29 30 COMPXSP; 30 31 EXPLODEXSP; 31 32 return xc; 32 33 } 33 34 34 - int ieee754sp_isnan(ieee754sp x) 35 + int ieee754sp_isnan(union ieee754sp x) 35 36 { 36 37 return ieee754sp_class(x) >= IEEE754_CLASS_SNAN; 37 38 } 38 39 39 - int ieee754sp_issnan(ieee754sp x) 40 + static inline int ieee754sp_issnan(union ieee754sp x) 40 41 { 41 42 assert(ieee754sp_isnan(x)); 42 - return (SPMANT(x) & SP_MBIT(SP_MBITS-1)); 43 + return (SPMANT(x) & SP_MBIT(SP_FBITS-1)); 43 44 } 44 45 45 46 46 - ieee754sp ieee754sp_xcpt(ieee754sp r, const char *op, ...) 47 + union ieee754sp __cold ieee754sp_nanxcpt(union ieee754sp r) 47 48 { 48 - struct ieee754xctx ax; 49 - 50 - if (!TSTX()) 51 - return r; 52 - 53 - ax.op = op; 54 - ax.rt = IEEE754_RT_SP; 55 - ax.rv.sp = r; 56 - va_start(ax.ap, op); 57 - ieee754_xcpt(&ax); 58 - va_end(ax.ap); 59 - return ax.rv.sp; 60 - } 61 - 62 - ieee754sp ieee754sp_nanxcpt(ieee754sp r, const char *op, ...) 63 - { 64 - struct ieee754xctx ax; 65 - 66 49 assert(ieee754sp_isnan(r)); 67 50 68 51 if (!ieee754sp_issnan(r)) /* QNAN does not cause invalid op !! */ 69 52 return r; 70 53 71 - if (!SETANDTESTCX(IEEE754_INVALID_OPERATION)) { 54 + if (!ieee754_setandtestcx(IEEE754_INVALID_OPERATION)) { 72 55 /* not enabled convert to a quiet NaN */ 73 - SPMANT(r) &= (~SP_MBIT(SP_MBITS-1)); 56 + SPMANT(r) &= (~SP_MBIT(SP_FBITS-1)); 74 57 if (ieee754sp_isnan(r)) 75 58 return r; 76 59 else 77 60 return ieee754sp_indef(); 78 61 } 79 62 80 - ax.op = op; 81 - ax.rt = 0; 82 - ax.rv.sp = r; 83 - va_start(ax.ap, op); 84 - ieee754_xcpt(&ax); 85 - va_end(ax.ap); 86 - return ax.rv.sp; 63 + return r; 87 64 } 88 65 89 - ieee754sp ieee754sp_bestnan(ieee754sp x, ieee754sp y) 90 - { 91 - assert(ieee754sp_isnan(x)); 92 - assert(ieee754sp_isnan(y)); 93 - 94 - if (SPMANT(x) > SPMANT(y)) 95 - return x; 96 - else 97 - return y; 98 - } 99 - 100 - 101 - static unsigned get_rounding(int sn, unsigned xm) 66 + static unsigned ieee754sp_get_rounding(int sn, unsigned xm) 102 67 { 103 68 /* inexact must round of 3 bits 104 69 */ 105 70 if (xm & (SP_MBIT(3) - 1)) { 106 71 switch (ieee754_csr.rm) { 107 - case IEEE754_RZ: 72 + case FPU_CSR_RZ: 108 73 break; 109 - case IEEE754_RN: 74 + case FPU_CSR_RN: 110 75 xm += 0x3 + ((xm >> 3) & 1); 111 76 /* xm += (xm&0x8)?0x4:0x3 */ 112 77 break; 113 - case IEEE754_RU: /* toward +Infinity */ 78 + case FPU_CSR_RU: /* toward +Infinity */ 114 79 if (!sn) /* ?? */ 115 80 xm += 0x8; 116 81 break; 117 - case IEEE754_RD: /* toward -Infinity */ 82 + case FPU_CSR_RD: /* toward -Infinity */ 118 83 if (sn) /* ?? */ 119 84 xm += 0x8; 120 85 break; ··· 92 131 * xe is an unbiased exponent 93 132 * xm is 3bit extended precision value. 94 133 */ 95 - ieee754sp ieee754sp_format(int sn, int xe, unsigned xm) 134 + union ieee754sp ieee754sp_format(int sn, int xe, unsigned xm) 96 135 { 97 136 assert(xm); /* we don't gen exact zeros (probably should) */ 98 137 99 - assert((xm >> (SP_MBITS + 1 + 3)) == 0); /* no execess */ 138 + assert((xm >> (SP_FBITS + 1 + 3)) == 0); /* no execess */ 100 139 assert(xm & (SP_HIDDEN_BIT << 3)); 101 140 102 141 if (xe < SP_EMIN) { ··· 104 143 int es = SP_EMIN - xe; 105 144 106 145 if (ieee754_csr.nod) { 107 - SETCX(IEEE754_UNDERFLOW); 108 - SETCX(IEEE754_INEXACT); 146 + ieee754_setcx(IEEE754_UNDERFLOW); 147 + ieee754_setcx(IEEE754_INEXACT); 109 148 110 149 switch(ieee754_csr.rm) { 111 - case IEEE754_RN: 112 - case IEEE754_RZ: 150 + case FPU_CSR_RN: 151 + case FPU_CSR_RZ: 113 152 return ieee754sp_zero(sn); 114 - case IEEE754_RU: /* toward +Infinity */ 115 - if(sn == 0) 153 + case FPU_CSR_RU: /* toward +Infinity */ 154 + if (sn == 0) 116 155 return ieee754sp_min(0); 117 156 else 118 157 return ieee754sp_zero(1); 119 - case IEEE754_RD: /* toward -Infinity */ 120 - if(sn == 0) 158 + case FPU_CSR_RD: /* toward -Infinity */ 159 + if (sn == 0) 121 160 return ieee754sp_zero(0); 122 161 else 123 162 return ieee754sp_min(1); 124 163 } 125 164 } 126 165 127 - if (xe == SP_EMIN - 1 128 - && get_rounding(sn, xm) >> (SP_MBITS + 1 + 3)) 166 + if (xe == SP_EMIN - 1 && 167 + ieee754sp_get_rounding(sn, xm) >> (SP_FBITS + 1 + 3)) 129 168 { 130 169 /* Not tiny after rounding */ 131 - SETCX(IEEE754_INEXACT); 132 - xm = get_rounding(sn, xm); 170 + ieee754_setcx(IEEE754_INEXACT); 171 + xm = ieee754sp_get_rounding(sn, xm); 133 172 xm >>= 1; 134 173 /* Clear grs bits */ 135 174 xm &= ~(SP_MBIT(3) - 1); 136 175 xe++; 137 - } 138 - else { 176 + } else { 139 177 /* sticky right shift es bits 140 178 */ 141 179 SPXSRSXn(es); ··· 143 183 } 144 184 } 145 185 if (xm & (SP_MBIT(3) - 1)) { 146 - SETCX(IEEE754_INEXACT); 186 + ieee754_setcx(IEEE754_INEXACT); 147 187 if ((xm & (SP_HIDDEN_BIT << 3)) == 0) { 148 - SETCX(IEEE754_UNDERFLOW); 188 + ieee754_setcx(IEEE754_UNDERFLOW); 149 189 } 150 190 151 191 /* inexact must round of 3 bits 152 192 */ 153 - xm = get_rounding(sn, xm); 193 + xm = ieee754sp_get_rounding(sn, xm); 154 194 /* adjust exponent for rounding add overflowing 155 195 */ 156 - if (xm >> (SP_MBITS + 1 + 3)) { 196 + if (xm >> (SP_FBITS + 1 + 3)) { 157 197 /* add causes mantissa overflow */ 158 198 xm >>= 1; 159 199 xe++; ··· 162 202 /* strip grs bits */ 163 203 xm >>= 3; 164 204 165 - assert((xm >> (SP_MBITS + 1)) == 0); /* no execess */ 205 + assert((xm >> (SP_FBITS + 1)) == 0); /* no execess */ 166 206 assert(xe >= SP_EMIN); 167 207 168 208 if (xe > SP_EMAX) { 169 - SETCX(IEEE754_OVERFLOW); 170 - SETCX(IEEE754_INEXACT); 209 + ieee754_setcx(IEEE754_OVERFLOW); 210 + ieee754_setcx(IEEE754_INEXACT); 171 211 /* -O can be table indexed by (rm,sn) */ 172 212 switch (ieee754_csr.rm) { 173 - case IEEE754_RN: 213 + case FPU_CSR_RN: 174 214 return ieee754sp_inf(sn); 175 - case IEEE754_RZ: 215 + case FPU_CSR_RZ: 176 216 return ieee754sp_max(sn); 177 - case IEEE754_RU: /* toward +Infinity */ 217 + case FPU_CSR_RU: /* toward +Infinity */ 178 218 if (sn == 0) 179 219 return ieee754sp_inf(0); 180 220 else 181 221 return ieee754sp_max(1); 182 - case IEEE754_RD: /* toward -Infinity */ 222 + case FPU_CSR_RD: /* toward -Infinity */ 183 223 if (sn == 0) 184 224 return ieee754sp_max(0); 185 225 else ··· 192 232 /* we underflow (tiny/zero) */ 193 233 assert(xe == SP_EMIN); 194 234 if (ieee754_csr.mx & IEEE754_UNDERFLOW) 195 - SETCX(IEEE754_UNDERFLOW); 235 + ieee754_setcx(IEEE754_UNDERFLOW); 196 236 return buildsp(sn, SP_EMIN - 1 + SP_EBIAS, xm); 197 237 } else { 198 - assert((xm >> (SP_MBITS + 1)) == 0); /* no execess */ 238 + assert((xm >> (SP_FBITS + 1)) == 0); /* no execess */ 199 239 assert(xm & SP_HIDDEN_BIT); 200 240 201 241 return buildsp(sn, xe + SP_EBIAS, xm & ~SP_HIDDEN_BIT);
+39 -40
arch/mips/math-emu/ieee754sp.h
··· 6 6 * MIPS floating point support 7 7 * Copyright (C) 1994-2000 Algorithmics Ltd. 8 8 * 9 - * ######################################################################## 10 - * 11 9 * This program is free software; you can distribute it and/or modify it 12 10 * under the terms of the GNU General Public License (Version 2) as 13 11 * published by the Free Software Foundation. ··· 17 19 * 18 20 * You should have received a copy of the GNU General Public License along 19 21 * with this program; if not, write to the Free Software Foundation, Inc., 20 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 21 - * 22 - * ######################################################################## 22 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 23 23 */ 24 24 25 + #include <linux/compiler.h> 25 26 26 27 #include "ieee754int.h" 27 28 28 29 #define assert(expr) ((void)0) 29 30 31 + #define SP_EBIAS 127 32 + #define SP_EMIN (-126) 33 + #define SP_EMAX 127 34 + #define SP_FBITS 23 35 + #define SP_MBITS 23 36 + 37 + #define SP_MBIT(x) ((u32)1 << (x)) 38 + #define SP_HIDDEN_BIT SP_MBIT(SP_FBITS) 39 + #define SP_SIGN_BIT SP_MBIT(31) 40 + 41 + #define SPSIGN(sp) (sp.sign) 42 + #define SPBEXP(sp) (sp.bexp) 43 + #define SPMANT(sp) (sp.mant) 44 + 45 + static inline int ieee754sp_finite(union ieee754sp x) 46 + { 47 + return SPBEXP(x) != SP_EMAX + 1 + SP_EBIAS; 48 + } 49 + 30 50 /* 3bit extended single precision sticky right shift */ 31 - #define SPXSRSXn(rs) \ 32 - (xe += rs, \ 33 - xm = (rs > (SP_MBITS+3))?1:((xm) >> (rs)) | ((xm) << (32-(rs)) != 0)) 51 + #define SPXSRSXn(rs) \ 52 + (xe += rs, \ 53 + xm = (rs > (SP_FBITS+3))?1:((xm) >> (rs)) | ((xm) << (32-(rs)) != 0)) 34 54 35 55 #define SPXSRSX1() \ 36 - (xe++, (xm = (xm >> 1) | (xm & 1))) 56 + (xe++, (xm = (xm >> 1) | (xm & 1))) 37 57 38 - #define SPXSRSYn(rs) \ 39 - (ye+=rs, \ 40 - ym = (rs > (SP_MBITS+3))?1:((ym) >> (rs)) | ((ym) << (32-(rs)) != 0)) 58 + #define SPXSRSYn(rs) \ 59 + (ye+=rs, \ 60 + ym = (rs > (SP_FBITS+3))?1:((ym) >> (rs)) | ((ym) << (32-(rs)) != 0)) 41 61 42 62 #define SPXSRSY1() \ 43 - (ye++, (ym = (ym >> 1) | (ym & 1))) 63 + (ye++, (ym = (ym >> 1) | (ym & 1))) 44 64 45 65 /* convert denormal to normalized with extended exponent */ 46 66 #define SPDNORMx(m,e) \ 47 - while( (m >> SP_MBITS) == 0) { m <<= 1; e--; } 67 + while ((m >> SP_FBITS) == 0) { m <<= 1; e--; } 48 68 #define SPDNORMX SPDNORMx(xm, xe) 49 69 #define SPDNORMY SPDNORMx(ym, ye) 50 70 51 - static inline ieee754sp buildsp(int s, int bx, unsigned m) 71 + static inline union ieee754sp buildsp(int s, int bx, unsigned m) 52 72 { 53 - ieee754sp r; 73 + union ieee754sp r; 54 74 55 75 assert((s) == 0 || (s) == 1); 56 76 assert((bx) >= SP_EMIN - 1 + SP_EBIAS 57 77 && (bx) <= SP_EMAX + 1 + SP_EBIAS); 58 - assert(((m) >> SP_MBITS) == 0); 78 + assert(((m) >> SP_FBITS) == 0); 59 79 60 - r.parts.sign = s; 61 - r.parts.bexp = bx; 62 - r.parts.mant = m; 80 + r.sign = s; 81 + r.bexp = bx; 82 + r.mant = m; 63 83 64 84 return r; 65 85 } 66 86 67 - extern int ieee754sp_isnan(ieee754sp); 68 - extern int ieee754sp_issnan(ieee754sp); 69 - extern int ieee754si_xcpt(int, const char *, ...); 70 - extern s64 ieee754di_xcpt(s64, const char *, ...); 71 - extern ieee754sp ieee754sp_xcpt(ieee754sp, const char *, ...); 72 - extern ieee754sp ieee754sp_nanxcpt(ieee754sp, const char *, ...); 73 - extern ieee754sp ieee754sp_bestnan(ieee754sp, ieee754sp); 74 - extern ieee754sp ieee754sp_format(int, int, unsigned); 75 - 76 - 77 - #define SPNORMRET2(s, e, m, name, a0, a1) \ 78 - { \ 79 - ieee754sp V = ieee754sp_format(s, e, m); \ 80 - if(TSTX()) \ 81 - return ieee754sp_xcpt(V, name, a0, a1); \ 82 - else \ 83 - return V; \ 84 - } 85 - 86 - #define SPNORMRET1(s, e, m, name, a0) SPNORMRET2(s, e, m, name, a0, a0) 87 + extern int ieee754sp_isnan(union ieee754sp); 88 + extern union ieee754sp __cold ieee754sp_nanxcpt(union ieee754sp); 89 + extern union ieee754sp ieee754sp_format(int, int, unsigned);
-47
arch/mips/math-emu/ieee754xcpt.c
··· 1 - /* 2 - * MIPS floating point support 3 - * Copyright (C) 1994-2000 Algorithmics Ltd. 4 - * 5 - * ######################################################################## 6 - * 7 - * This program is free software; you can distribute it and/or modify it 8 - * under the terms of the GNU General Public License (Version 2) as 9 - * published by the Free Software Foundation. 10 - * 11 - * This program is distributed in the hope it will be useful, but WITHOUT 12 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 - * for more details. 15 - * 16 - * You should have received a copy of the GNU General Public License along 17 - * with this program; if not, write to the Free Software Foundation, Inc., 18 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 19 - * 20 - * ######################################################################## 21 - */ 22 - 23 - /************************************************************************** 24 - * Nov 7, 2000 25 - * Added preprocessor hacks to map to Linux kernel diagnostics. 26 - * 27 - * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com 28 - * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. 29 - *************************************************************************/ 30 - 31 - #include <linux/kernel.h> 32 - #include "ieee754.h" 33 - 34 - /* 35 - * Very naff exception handler (you can plug in your own and 36 - * override this). 37 - */ 38 - 39 - static const char *const rtnames[] = { 40 - "sp", "dp", "xp", "si", "di" 41 - }; 42 - 43 - void ieee754_xcpt(struct ieee754xctx *xcp) 44 - { 45 - printk(KERN_DEBUG "floating point exception in \"%s\", type=%s\n", 46 - xcp->op, rtnames[xcp->rt]); 47 - }
-45
arch/mips/math-emu/kernel_linkage.c
··· 1 - /* 2 - * Kevin D. Kissell, kevink@mips and Carsten Langgaard, carstenl@mips.com 3 - * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. 4 - * 5 - * This program is free software; you can distribute it and/or modify it 6 - * under the terms of the GNU General Public License (Version 2) as 7 - * published by the Free Software Foundation. 8 - * 9 - * This program is distributed in the hope it will be useful, but WITHOUT 10 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 - * for more details. 13 - * 14 - * You should have received a copy of the GNU General Public License along 15 - * with this program; if not, write to the Free Software Foundation, Inc., 16 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 17 - * 18 - * Routines corresponding to Linux kernel FP context 19 - * manipulation primitives for the Algorithmics MIPS 20 - * FPU Emulator 21 - */ 22 - #include <linux/sched.h> 23 - #include <asm/processor.h> 24 - #include <asm/signal.h> 25 - #include <asm/uaccess.h> 26 - 27 - #include <asm/fpu.h> 28 - #include <asm/fpu_emulator.h> 29 - 30 - #define SIGNALLING_NAN 0x7ff800007ff80000LL 31 - 32 - void fpu_emulator_init_fpu(void) 33 - { 34 - static int first = 1; 35 - int i; 36 - 37 - if (first) { 38 - first = 0; 39 - printk("Algorithmics/MIPS FPU Emulator v1.5\n"); 40 - } 41 - 42 - current->thread.fpu.fcr31 = 0; 43 - for (i = 0; i < 32; i++) 44 - set_fpr64(&current->thread.fpu.fpr[i], 0, SIGNALLING_NAN); 45 - }
+67
arch/mips/math-emu/me-debugfs.c
··· 1 + #include <linux/cpumask.h> 2 + #include <linux/debugfs.h> 3 + #include <linux/fs.h> 4 + #include <linux/init.h> 5 + #include <linux/percpu.h> 6 + #include <linux/types.h> 7 + #include <asm/fpu_emulator.h> 8 + #include <asm/local.h> 9 + 10 + DEFINE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats); 11 + 12 + static int fpuemu_stat_get(void *data, u64 *val) 13 + { 14 + int cpu; 15 + unsigned long sum = 0; 16 + 17 + for_each_online_cpu(cpu) { 18 + struct mips_fpu_emulator_stats *ps; 19 + local_t *pv; 20 + 21 + ps = &per_cpu(fpuemustats, cpu); 22 + pv = (void *)ps + (unsigned long)data; 23 + sum += local_read(pv); 24 + } 25 + *val = sum; 26 + return 0; 27 + } 28 + DEFINE_SIMPLE_ATTRIBUTE(fops_fpuemu_stat, fpuemu_stat_get, NULL, "%llu\n"); 29 + 30 + extern struct dentry *mips_debugfs_dir; 31 + static int __init debugfs_fpuemu(void) 32 + { 33 + struct dentry *d, *dir; 34 + 35 + if (!mips_debugfs_dir) 36 + return -ENODEV; 37 + dir = debugfs_create_dir("fpuemustats", mips_debugfs_dir); 38 + if (!dir) 39 + return -ENOMEM; 40 + 41 + #define FPU_EMU_STAT_OFFSET(m) \ 42 + offsetof(struct mips_fpu_emulator_stats, m) 43 + 44 + #define FPU_STAT_CREATE(m) \ 45 + do { \ 46 + d = debugfs_create_file(#m , S_IRUGO, dir, \ 47 + (void *)FPU_EMU_STAT_OFFSET(m), \ 48 + &fops_fpuemu_stat); \ 49 + if (!d) \ 50 + return -ENOMEM; \ 51 + } while (0) 52 + 53 + FPU_STAT_CREATE(emulated); 54 + FPU_STAT_CREATE(loads); 55 + FPU_STAT_CREATE(stores); 56 + FPU_STAT_CREATE(cp1ops); 57 + FPU_STAT_CREATE(cp1xops); 58 + FPU_STAT_CREATE(errors); 59 + FPU_STAT_CREATE(ieee754_inexact); 60 + FPU_STAT_CREATE(ieee754_underflow); 61 + FPU_STAT_CREATE(ieee754_overflow); 62 + FPU_STAT_CREATE(ieee754_zerodiv); 63 + FPU_STAT_CREATE(ieee754_invalidop); 64 + 65 + return 0; 66 + } 67 + __initcall(debugfs_fpuemu);
+38 -34
arch/mips/math-emu/sp_add.c
··· 5 5 * MIPS floating point support 6 6 * Copyright (C) 1994-2000 Algorithmics Ltd. 7 7 * 8 - * ######################################################################## 9 - * 10 8 * This program is free software; you can distribute it and/or modify it 11 9 * under the terms of the GNU General Public License (Version 2) as 12 10 * published by the Free Software Foundation. ··· 16 18 * 17 19 * You should have received a copy of the GNU General Public License along 18 20 * with this program; if not, write to the Free Software Foundation, Inc., 19 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 20 - * 21 - * ######################################################################## 21 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 22 22 */ 23 - 24 23 25 24 #include "ieee754sp.h" 26 25 27 - ieee754sp ieee754sp_add(ieee754sp x, ieee754sp y) 26 + union ieee754sp ieee754sp_add(union ieee754sp x, union ieee754sp y) 28 27 { 28 + int s; 29 + 29 30 COMPXSP; 30 31 COMPYSP; 31 32 32 33 EXPLODEXSP; 33 34 EXPLODEYSP; 34 35 35 - CLEARCX; 36 + ieee754_clearcx(); 36 37 37 38 FLUSHXSP; 38 39 FLUSHYSP; ··· 48 51 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): 49 52 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): 50 53 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): 51 - SETCX(IEEE754_INVALID_OPERATION); 52 - return ieee754sp_nanxcpt(ieee754sp_indef(), "add", x, y); 54 + ieee754_setcx(IEEE754_INVALID_OPERATION); 55 + return ieee754sp_nanxcpt(ieee754sp_indef()); 53 56 54 57 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): 55 58 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): ··· 65 68 return x; 66 69 67 70 68 - /* Infinity handling 69 - */ 70 - 71 + /* 72 + * Infinity handling 73 + */ 71 74 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): 72 75 if (xs == ys) 73 76 return x; 74 - SETCX(IEEE754_INVALID_OPERATION); 75 - return ieee754sp_xcpt(ieee754sp_indef(), "add", x, y); 77 + ieee754_setcx(IEEE754_INVALID_OPERATION); 78 + return ieee754sp_indef(); 76 79 77 80 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): 78 81 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): ··· 84 87 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): 85 88 return x; 86 89 87 - /* Zero handling 88 - */ 89 - 90 + /* 91 + * Zero handling 92 + */ 90 93 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): 91 94 if (xs == ys) 92 95 return x; 93 96 else 94 - return ieee754sp_zero(ieee754_csr.rm == 95 - IEEE754_RD); 97 + return ieee754sp_zero(ieee754_csr.rm == FPU_CSR_RD); 96 98 97 99 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO): 98 100 case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): ··· 103 107 104 108 case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): 105 109 SPDNORMX; 110 + 111 + /* FALL THROUGH */ 106 112 107 113 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM): 108 114 SPDNORMY; ··· 120 122 assert(xm & SP_HIDDEN_BIT); 121 123 assert(ym & SP_HIDDEN_BIT); 122 124 123 - /* provide guard,round and stick bit space */ 125 + /* 126 + * Provide guard, round and stick bit space. 127 + */ 124 128 xm <<= 3; 125 129 ym <<= 3; 126 130 127 131 if (xe > ye) { 128 - /* have to shift y fraction right to align 132 + /* 133 + * Have to shift y fraction right to align. 129 134 */ 130 - int s = xe - ye; 135 + s = xe - ye; 131 136 SPXSRSYn(s); 132 137 } else if (ye > xe) { 133 - /* have to shift x fraction right to align 138 + /* 139 + * Have to shift x fraction right to align. 134 140 */ 135 - int s = ye - xe; 141 + s = ye - xe; 136 142 SPXSRSXn(s); 137 143 } 138 144 assert(xe == ye); 139 145 assert(xe <= SP_EMAX); 140 146 141 147 if (xs == ys) { 142 - /* generate 28 bit result of adding two 27 bit numbers 143 - * leaving result in xm,xs,xe 148 + /* 149 + * Generate 28 bit result of adding two 27 bit numbers 150 + * leaving result in xm, xs and xe. 144 151 */ 145 152 xm = xm + ym; 146 153 xe = xe; 147 154 xs = xs; 148 155 149 - if (xm >> (SP_MBITS + 1 + 3)) { /* carry out */ 156 + if (xm >> (SP_FBITS + 1 + 3)) { /* carry out */ 150 157 SPXSRSX1(); 151 158 } 152 159 } else { ··· 165 162 xs = ys; 166 163 } 167 164 if (xm == 0) 168 - return ieee754sp_zero(ieee754_csr.rm == 169 - IEEE754_RD); 165 + return ieee754sp_zero(ieee754_csr.rm == FPU_CSR_RD); 170 166 171 - /* normalize in extended single precision */ 172 - while ((xm >> (SP_MBITS + 3)) == 0) { 167 + /* 168 + * Normalize in extended single precision 169 + */ 170 + while ((xm >> (SP_FBITS + 3)) == 0) { 173 171 xm <<= 1; 174 172 xe--; 175 173 } 176 - 177 174 } 178 - SPNORMRET2(xs, xe, xm, "add", x, y); 175 + 176 + return ieee754sp_format(xs, xe, xm); 179 177 }
+11 -13
arch/mips/math-emu/sp_cmp.c
··· 5 5 * MIPS floating point support 6 6 * Copyright (C) 1994-2000 Algorithmics Ltd. 7 7 * 8 - * ######################################################################## 9 - * 10 8 * This program is free software; you can distribute it and/or modify it 11 9 * under the terms of the GNU General Public License (Version 2) as 12 10 * published by the Free Software Foundation. ··· 16 18 * 17 19 * You should have received a copy of the GNU General Public License along 18 20 * with this program; if not, write to the Free Software Foundation, Inc., 19 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 20 - * 21 - * ######################################################################## 21 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 22 22 */ 23 - 24 23 25 24 #include "ieee754sp.h" 26 25 27 - int ieee754sp_cmp(ieee754sp x, ieee754sp y, int cmp, int sig) 26 + int ieee754sp_cmp(union ieee754sp x, union ieee754sp y, int cmp, int sig) 28 27 { 28 + int vx; 29 + int vy; 30 + 29 31 COMPXSP; 30 32 COMPYSP; 31 33 ··· 33 35 EXPLODEYSP; 34 36 FLUSHXSP; 35 37 FLUSHYSP; 36 - CLEARCX; /* Even clear inexact flag here */ 38 + ieee754_clearcx(); /* Even clear inexact flag here */ 37 39 38 40 if (ieee754sp_isnan(x) || ieee754sp_isnan(y)) { 39 41 if (sig || xc == IEEE754_CLASS_SNAN || yc == IEEE754_CLASS_SNAN) 40 - SETCX(IEEE754_INVALID_OPERATION); 42 + ieee754_setcx(IEEE754_INVALID_OPERATION); 41 43 if (cmp & IEEE754_CUN) 42 44 return 1; 43 45 if (cmp & (IEEE754_CLT | IEEE754_CGT)) { 44 - if (sig && SETANDTESTCX(IEEE754_INVALID_OPERATION)) 45 - return ieee754si_xcpt(0, "fcmpf", x); 46 + if (sig && ieee754_setandtestcx(IEEE754_INVALID_OPERATION)) 47 + return 0; 46 48 } 47 49 return 0; 48 50 } else { 49 - int vx = x.bits; 50 - int vy = y.bits; 51 + vx = x.bits; 52 + vy = y.bits; 51 53 52 54 if (vx < 0) 53 55 vx = -vx ^ SP_SIGN_BIT;
+46 -49
arch/mips/math-emu/sp_div.c
··· 5 5 * MIPS floating point support 6 6 * Copyright (C) 1994-2000 Algorithmics Ltd. 7 7 * 8 - * ######################################################################## 9 - * 10 8 * This program is free software; you can distribute it and/or modify it 11 9 * under the terms of the GNU General Public License (Version 2) as 12 10 * published by the Free Software Foundation. ··· 16 18 * 17 19 * You should have received a copy of the GNU General Public License along 18 20 * with this program; if not, write to the Free Software Foundation, Inc., 19 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 20 - * 21 - * ######################################################################## 21 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 22 22 */ 23 - 24 23 25 24 #include "ieee754sp.h" 26 25 27 - ieee754sp ieee754sp_div(ieee754sp x, ieee754sp y) 26 + union ieee754sp ieee754sp_div(union ieee754sp x, union ieee754sp y) 28 27 { 28 + unsigned rm; 29 + int re; 30 + unsigned bm; 31 + 29 32 COMPXSP; 30 33 COMPYSP; 31 34 32 35 EXPLODEXSP; 33 36 EXPLODEYSP; 34 37 35 - CLEARCX; 38 + ieee754_clearcx(); 36 39 37 40 FLUSHXSP; 38 41 FLUSHYSP; ··· 50 51 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): 51 52 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): 52 53 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): 53 - SETCX(IEEE754_INVALID_OPERATION); 54 - return ieee754sp_nanxcpt(ieee754sp_indef(), "div", x, y); 54 + ieee754_setcx(IEEE754_INVALID_OPERATION); 55 + return ieee754sp_nanxcpt(ieee754sp_indef()); 55 56 56 57 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): 57 58 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): ··· 67 68 return x; 68 69 69 70 70 - /* Infinity handling 71 - */ 72 - 71 + /* 72 + * Infinity handling 73 + */ 73 74 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): 74 - SETCX(IEEE754_INVALID_OPERATION); 75 - return ieee754sp_xcpt(ieee754sp_indef(), "div", x, y); 75 + ieee754_setcx(IEEE754_INVALID_OPERATION); 76 + return ieee754sp_indef(); 76 77 77 78 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): 78 79 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): ··· 84 85 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): 85 86 return ieee754sp_inf(xs ^ ys); 86 87 87 - /* Zero handling 88 - */ 89 - 88 + /* 89 + * Zero handling 90 + */ 90 91 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): 91 - SETCX(IEEE754_INVALID_OPERATION); 92 - return ieee754sp_xcpt(ieee754sp_indef(), "div", x, y); 92 + ieee754_setcx(IEEE754_INVALID_OPERATION); 93 + return ieee754sp_indef(); 93 94 94 95 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO): 95 96 case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): 96 - SETCX(IEEE754_ZERO_DIVIDE); 97 - return ieee754sp_xcpt(ieee754sp_inf(xs ^ ys), "div", x, y); 97 + ieee754_setcx(IEEE754_ZERO_DIVIDE); 98 + return ieee754sp_inf(xs ^ ys); 98 99 99 100 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM): 100 101 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM): ··· 121 122 xm <<= 3; 122 123 ym <<= 3; 123 124 124 - { 125 - /* now the dirty work */ 125 + /* now the dirty work */ 126 126 127 - unsigned rm = 0; 128 - int re = xe - ye; 129 - unsigned bm; 127 + rm = 0; 128 + re = xe - ye; 130 129 131 - for (bm = SP_MBIT(SP_MBITS + 2); bm; bm >>= 1) { 132 - if (xm >= ym) { 133 - xm -= ym; 134 - rm |= bm; 135 - if (xm == 0) 136 - break; 137 - } 138 - xm <<= 1; 130 + for (bm = SP_MBIT(SP_FBITS + 2); bm; bm >>= 1) { 131 + if (xm >= ym) { 132 + xm -= ym; 133 + rm |= bm; 134 + if (xm == 0) 135 + break; 139 136 } 140 - rm <<= 1; 141 - if (xm) 142 - rm |= 1; /* have remainder, set sticky */ 143 - 144 - assert(rm); 145 - 146 - /* normalise rm to rounding precision ? 147 - */ 148 - while ((rm >> (SP_MBITS + 3)) == 0) { 149 - rm <<= 1; 150 - re--; 151 - } 152 - 153 - SPNORMRET2(xs == ys ? 0 : 1, re, rm, "div", x, y); 137 + xm <<= 1; 154 138 } 139 + 140 + rm <<= 1; 141 + if (xm) 142 + rm |= 1; /* have remainder, set sticky */ 143 + 144 + assert(rm); 145 + 146 + /* normalise rm to rounding precision ? 147 + */ 148 + while ((rm >> (SP_FBITS + 3)) == 0) { 149 + rm <<= 1; 150 + re--; 151 + } 152 + 153 + return ieee754sp_format(xs == ys ? 0 : 1, re, rm); 155 154 }
+28 -28
arch/mips/math-emu/sp_fdp.c
··· 5 5 * MIPS floating point support 6 6 * Copyright (C) 1994-2000 Algorithmics Ltd. 7 7 * 8 - * ######################################################################## 9 - * 10 8 * This program is free software; you can distribute it and/or modify it 11 9 * under the terms of the GNU General Public License (Version 2) as 12 10 * published by the Free Software Foundation. ··· 16 18 * 17 19 * You should have received a copy of the GNU General Public License along 18 20 * with this program; if not, write to the Free Software Foundation, Inc., 19 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 20 - * 21 - * ######################################################################## 21 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 22 22 */ 23 23 24 - 25 24 #include "ieee754sp.h" 25 + #include "ieee754dp.h" 26 26 27 - ieee754sp ieee754sp_fdp(ieee754dp x) 27 + union ieee754sp ieee754sp_fdp(union ieee754dp x) 28 28 { 29 + u32 rm; 30 + 29 31 COMPXDP; 30 - ieee754sp nan; 32 + union ieee754sp nan; 31 33 32 34 EXPLODEXDP; 33 35 34 - CLEARCX; 36 + ieee754_clearcx(); 35 37 36 38 FLUSHXDP; 37 39 38 40 switch (xc) { 39 41 case IEEE754_CLASS_SNAN: 40 - SETCX(IEEE754_INVALID_OPERATION); 41 - return ieee754sp_nanxcpt(ieee754sp_indef(), "fdp"); 42 + ieee754_setcx(IEEE754_INVALID_OPERATION); 43 + return ieee754sp_nanxcpt(ieee754sp_indef()); 44 + 42 45 case IEEE754_CLASS_QNAN: 43 46 nan = buildsp(xs, SP_EMAX + 1 + SP_EBIAS, (u32) 44 - (xm >> (DP_MBITS - SP_MBITS))); 47 + (xm >> (DP_FBITS - SP_FBITS))); 45 48 if (!ieee754sp_isnan(nan)) 46 49 nan = ieee754sp_indef(); 47 - return ieee754sp_nanxcpt(nan, "fdp", x); 50 + return ieee754sp_nanxcpt(nan); 51 + 48 52 case IEEE754_CLASS_INF: 49 53 return ieee754sp_inf(xs); 54 + 50 55 case IEEE754_CLASS_ZERO: 51 56 return ieee754sp_zero(xs); 57 + 52 58 case IEEE754_CLASS_DNORM: 53 59 /* can't possibly be sp representable */ 54 - SETCX(IEEE754_UNDERFLOW); 55 - SETCX(IEEE754_INEXACT); 56 - if ((ieee754_csr.rm == IEEE754_RU && !xs) || 57 - (ieee754_csr.rm == IEEE754_RD && xs)) 58 - return ieee754sp_xcpt(ieee754sp_mind(xs), "fdp", x); 59 - return ieee754sp_xcpt(ieee754sp_zero(xs), "fdp", x); 60 + ieee754_setcx(IEEE754_UNDERFLOW); 61 + ieee754_setcx(IEEE754_INEXACT); 62 + if ((ieee754_csr.rm == FPU_CSR_RU && !xs) || 63 + (ieee754_csr.rm == FPU_CSR_RD && xs)) 64 + return ieee754sp_mind(xs); 65 + return ieee754sp_zero(xs); 66 + 60 67 case IEEE754_CLASS_NORM: 61 68 break; 62 69 } 63 70 64 - { 65 - u32 rm; 71 + /* 72 + * Convert from DP_FBITS to SP_FBITS+3 with sticky right shift. 73 + */ 74 + rm = (xm >> (DP_FBITS - (SP_FBITS + 3))) | 75 + ((xm << (64 - (DP_FBITS - (SP_FBITS + 3)))) != 0); 66 76 67 - /* convert from DP_MBITS to SP_MBITS+3 with sticky right shift 68 - */ 69 - rm = (xm >> (DP_MBITS - (SP_MBITS + 3))) | 70 - ((xm << (64 - (DP_MBITS - (SP_MBITS + 3)))) != 0); 71 - 72 - SPNORMRET1(xs, xe, rm, "fdp", x); 73 - } 77 + return ieee754sp_format(xs, xe, rm); 74 78 }
+8 -22
arch/mips/math-emu/sp_fint.c
··· 5 5 * MIPS floating point support 6 6 * Copyright (C) 1994-2000 Algorithmics Ltd. 7 7 * 8 - * ######################################################################## 9 - * 10 8 * This program is free software; you can distribute it and/or modify it 11 9 * under the terms of the GNU General Public License (Version 2) as 12 10 * published by the Free Software Foundation. ··· 16 18 * 17 19 * You should have received a copy of the GNU General Public License along 18 20 * with this program; if not, write to the Free Software Foundation, Inc., 19 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 20 - * 21 - * ######################################################################## 21 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 22 22 */ 23 - 24 23 25 24 #include "ieee754sp.h" 26 25 27 - ieee754sp ieee754sp_fint(int x) 26 + union ieee754sp ieee754sp_fint(int x) 28 27 { 29 28 unsigned xm; 30 29 int xe; 31 30 int xs; 32 31 33 - CLEARCX; 32 + ieee754_clearcx(); 34 33 35 34 if (x == 0) 36 35 return ieee754sp_zero(0); ··· 45 50 } else { 46 51 xm = x; 47 52 } 48 - xe = SP_MBITS + 3; 53 + xe = SP_FBITS + 3; 49 54 50 - if (xm >> (SP_MBITS + 1 + 3)) { 55 + if (xm >> (SP_FBITS + 1 + 3)) { 51 56 /* shunt out overflow bits 52 57 */ 53 - while (xm >> (SP_MBITS + 1 + 3)) { 58 + while (xm >> (SP_FBITS + 1 + 3)) { 54 59 SPXSRSX1(); 55 60 } 56 61 } else { 57 62 /* normalize in grs extended single precision 58 63 */ 59 - while ((xm >> (SP_MBITS + 3)) == 0) { 64 + while ((xm >> (SP_FBITS + 3)) == 0) { 60 65 xm <<= 1; 61 66 xe--; 62 67 } 63 68 } 64 - SPNORMRET1(xs, xe, xm, "fint", x); 65 - } 66 - 67 - 68 - ieee754sp ieee754sp_funs(unsigned int u) 69 - { 70 - if ((int) u < 0) 71 - return ieee754sp_add(ieee754sp_1e31(), 72 - ieee754sp_fint(u & ~(1 << 31))); 73 - return ieee754sp_fint(u); 69 + return ieee754sp_format(xs, xe, xm); 74 70 }
+8 -22
arch/mips/math-emu/sp_flong.c
··· 5 5 * MIPS floating point support 6 6 * Copyright (C) 1994-2000 Algorithmics Ltd. 7 7 * 8 - * ######################################################################## 9 - * 10 8 * This program is free software; you can distribute it and/or modify it 11 9 * under the terms of the GNU General Public License (Version 2) as 12 10 * published by the Free Software Foundation. ··· 16 18 * 17 19 * You should have received a copy of the GNU General Public License along 18 20 * with this program; if not, write to the Free Software Foundation, Inc., 19 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 20 - * 21 - * ######################################################################## 21 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 22 22 */ 23 - 24 23 25 24 #include "ieee754sp.h" 26 25 27 - ieee754sp ieee754sp_flong(s64 x) 26 + union ieee754sp ieee754sp_flong(s64 x) 28 27 { 29 28 u64 xm; /* <--- need 64-bit mantissa temp */ 30 29 int xe; 31 30 int xs; 32 31 33 - CLEARCX; 32 + ieee754_clearcx(); 34 33 35 34 if (x == 0) 36 35 return ieee754sp_zero(0); ··· 45 50 } else { 46 51 xm = x; 47 52 } 48 - xe = SP_MBITS + 3; 53 + xe = SP_FBITS + 3; 49 54 50 - if (xm >> (SP_MBITS + 1 + 3)) { 55 + if (xm >> (SP_FBITS + 1 + 3)) { 51 56 /* shunt out overflow bits 52 57 */ 53 - while (xm >> (SP_MBITS + 1 + 3)) { 58 + while (xm >> (SP_FBITS + 1 + 3)) { 54 59 SPXSRSX1(); 55 60 } 56 61 } else { 57 62 /* normalize in grs extended single precision */ 58 - while ((xm >> (SP_MBITS + 3)) == 0) { 63 + while ((xm >> (SP_FBITS + 3)) == 0) { 59 64 xm <<= 1; 60 65 xe--; 61 66 } 62 67 } 63 - SPNORMRET1(xs, xe, xm, "sp_flong", x); 64 - } 65 - 66 - 67 - ieee754sp ieee754sp_fulong(u64 u) 68 - { 69 - if ((s64) u < 0) 70 - return ieee754sp_add(ieee754sp_1e63(), 71 - ieee754sp_flong(u & ~(1ULL << 63))); 72 - return ieee754sp_flong(u); 68 + return ieee754sp_format(xs, xe, xm); 73 69 }
-52
arch/mips/math-emu/sp_frexp.c
··· 1 - /* IEEE754 floating point arithmetic 2 - * single precision 3 - */ 4 - /* 5 - * MIPS floating point support 6 - * Copyright (C) 1994-2000 Algorithmics Ltd. 7 - * 8 - * ######################################################################## 9 - * 10 - * This program is free software; you can distribute it and/or modify it 11 - * under the terms of the GNU General Public License (Version 2) as 12 - * published by the Free Software Foundation. 13 - * 14 - * This program is distributed in the hope it will be useful, but WITHOUT 15 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 16 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 17 - * for more details. 18 - * 19 - * You should have received a copy of the GNU General Public License along 20 - * with this program; if not, write to the Free Software Foundation, Inc., 21 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 22 - * 23 - * ######################################################################## 24 - */ 25 - 26 - 27 - #include "ieee754sp.h" 28 - 29 - /* close to ieeep754sp_logb 30 - */ 31 - ieee754sp ieee754sp_frexp(ieee754sp x, int *eptr) 32 - { 33 - COMPXSP; 34 - CLEARCX; 35 - EXPLODEXSP; 36 - 37 - switch (xc) { 38 - case IEEE754_CLASS_SNAN: 39 - case IEEE754_CLASS_QNAN: 40 - case IEEE754_CLASS_INF: 41 - case IEEE754_CLASS_ZERO: 42 - *eptr = 0; 43 - return x; 44 - case IEEE754_CLASS_DNORM: 45 - SPDNORMX; 46 - break; 47 - case IEEE754_CLASS_NORM: 48 - break; 49 - } 50 - *eptr = xe + 1; 51 - return buildsp(xs, -1 + SP_EBIAS, xm & ~SP_HIDDEN_BIT); 52 - }
-53
arch/mips/math-emu/sp_logb.c
··· 1 - /* IEEE754 floating point arithmetic 2 - * single precision 3 - */ 4 - /* 5 - * MIPS floating point support 6 - * Copyright (C) 1994-2000 Algorithmics Ltd. 7 - * 8 - * ######################################################################## 9 - * 10 - * This program is free software; you can distribute it and/or modify it 11 - * under the terms of the GNU General Public License (Version 2) as 12 - * published by the Free Software Foundation. 13 - * 14 - * This program is distributed in the hope it will be useful, but WITHOUT 15 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 16 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 17 - * for more details. 18 - * 19 - * You should have received a copy of the GNU General Public License along 20 - * with this program; if not, write to the Free Software Foundation, Inc., 21 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 22 - * 23 - * ######################################################################## 24 - */ 25 - 26 - 27 - #include "ieee754sp.h" 28 - 29 - ieee754sp ieee754sp_logb(ieee754sp x) 30 - { 31 - COMPXSP; 32 - 33 - CLEARCX; 34 - 35 - EXPLODEXSP; 36 - 37 - switch (xc) { 38 - case IEEE754_CLASS_SNAN: 39 - return ieee754sp_nanxcpt(x, "logb", x); 40 - case IEEE754_CLASS_QNAN: 41 - return x; 42 - case IEEE754_CLASS_INF: 43 - return ieee754sp_inf(0); 44 - case IEEE754_CLASS_ZERO: 45 - return ieee754sp_inf(1); 46 - case IEEE754_CLASS_DNORM: 47 - SPDNORMX; 48 - break; 49 - case IEEE754_CLASS_NORM: 50 - break; 51 - } 52 - return ieee754sp_fint(xe); 53 - }
-79
arch/mips/math-emu/sp_modf.c
··· 1 - /* IEEE754 floating point arithmetic 2 - * single precision 3 - */ 4 - /* 5 - * MIPS floating point support 6 - * Copyright (C) 1994-2000 Algorithmics Ltd. 7 - * 8 - * ######################################################################## 9 - * 10 - * This program is free software; you can distribute it and/or modify it 11 - * under the terms of the GNU General Public License (Version 2) as 12 - * published by the Free Software Foundation. 13 - * 14 - * This program is distributed in the hope it will be useful, but WITHOUT 15 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 16 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 17 - * for more details. 18 - * 19 - * You should have received a copy of the GNU General Public License along 20 - * with this program; if not, write to the Free Software Foundation, Inc., 21 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 22 - * 23 - * ######################################################################## 24 - */ 25 - 26 - 27 - #include "ieee754sp.h" 28 - 29 - /* modf function is always exact for a finite number 30 - */ 31 - ieee754sp ieee754sp_modf(ieee754sp x, ieee754sp *ip) 32 - { 33 - COMPXSP; 34 - 35 - CLEARCX; 36 - 37 - EXPLODEXSP; 38 - 39 - switch (xc) { 40 - case IEEE754_CLASS_SNAN: 41 - case IEEE754_CLASS_QNAN: 42 - case IEEE754_CLASS_INF: 43 - case IEEE754_CLASS_ZERO: 44 - *ip = x; 45 - return x; 46 - case IEEE754_CLASS_DNORM: 47 - /* far to small */ 48 - *ip = ieee754sp_zero(xs); 49 - return x; 50 - case IEEE754_CLASS_NORM: 51 - break; 52 - } 53 - if (xe < 0) { 54 - *ip = ieee754sp_zero(xs); 55 - return x; 56 - } 57 - if (xe >= SP_MBITS) { 58 - *ip = x; 59 - return ieee754sp_zero(xs); 60 - } 61 - /* generate ipart mantissa by clearing bottom bits 62 - */ 63 - *ip = buildsp(xs, xe + SP_EBIAS, 64 - ((xm >> (SP_MBITS - xe)) << (SP_MBITS - xe)) & 65 - ~SP_HIDDEN_BIT); 66 - 67 - /* generate fpart mantissa by clearing top bits 68 - * and normalizing (must be able to normalize) 69 - */ 70 - xm = (xm << (32 - (SP_MBITS - xe))) >> (32 - (SP_MBITS - xe)); 71 - if (xm == 0) 72 - return ieee754sp_zero(xs); 73 - 74 - while ((xm >> SP_MBITS) == 0) { 75 - xm <<= 1; 76 - xe--; 77 - } 78 - return buildsp(xs, xe + SP_EBIAS, xm & ~SP_HIDDEN_BIT); 79 - }
+60 -65
arch/mips/math-emu/sp_mul.c
··· 5 5 * MIPS floating point support 6 6 * Copyright (C) 1994-2000 Algorithmics Ltd. 7 7 * 8 - * ######################################################################## 9 - * 10 8 * This program is free software; you can distribute it and/or modify it 11 9 * under the terms of the GNU General Public License (Version 2) as 12 10 * published by the Free Software Foundation. ··· 16 18 * 17 19 * You should have received a copy of the GNU General Public License along 18 20 * with this program; if not, write to the Free Software Foundation, Inc., 19 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 20 - * 21 - * ######################################################################## 21 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 22 22 */ 23 - 24 23 25 24 #include "ieee754sp.h" 26 25 27 - ieee754sp ieee754sp_mul(ieee754sp x, ieee754sp y) 26 + union ieee754sp ieee754sp_mul(union ieee754sp x, union ieee754sp y) 28 27 { 28 + int re; 29 + int rs; 30 + unsigned rm; 31 + unsigned short lxm; 32 + unsigned short hxm; 33 + unsigned short lym; 34 + unsigned short hym; 35 + unsigned lrm; 36 + unsigned hrm; 37 + unsigned t; 38 + unsigned at; 39 + 29 40 COMPXSP; 30 41 COMPYSP; 31 42 32 43 EXPLODEXSP; 33 44 EXPLODEYSP; 34 45 35 - CLEARCX; 46 + ieee754_clearcx(); 36 47 37 48 FLUSHXSP; 38 49 FLUSHYSP; ··· 58 51 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): 59 52 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): 60 53 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): 61 - SETCX(IEEE754_INVALID_OPERATION); 62 - return ieee754sp_nanxcpt(ieee754sp_indef(), "mul", x, y); 54 + ieee754_setcx(IEEE754_INVALID_OPERATION); 55 + return ieee754sp_nanxcpt(ieee754sp_indef()); 63 56 64 57 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): 65 58 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): ··· 75 68 return x; 76 69 77 70 78 - /* Infinity handling */ 79 - 71 + /* 72 + * Infinity handling 73 + */ 80 74 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): 81 75 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): 82 - SETCX(IEEE754_INVALID_OPERATION); 83 - return ieee754sp_xcpt(ieee754sp_indef(), "mul", x, y); 76 + ieee754_setcx(IEEE754_INVALID_OPERATION); 77 + return ieee754sp_indef(); 84 78 85 79 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): 86 80 case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF): ··· 116 108 assert(xm & SP_HIDDEN_BIT); 117 109 assert(ym & SP_HIDDEN_BIT); 118 110 119 - { 120 - int re = xe + ye; 121 - int rs = xs ^ ys; 122 - unsigned rm; 111 + re = xe + ye; 112 + rs = xs ^ ys; 123 113 124 - /* shunt to top of word */ 125 - xm <<= 32 - (SP_MBITS + 1); 126 - ym <<= 32 - (SP_MBITS + 1); 114 + /* shunt to top of word */ 115 + xm <<= 32 - (SP_FBITS + 1); 116 + ym <<= 32 - (SP_FBITS + 1); 127 117 128 - /* multiply 32bits xm,ym to give high 32bits rm with stickness 129 - */ 130 - { 131 - unsigned short lxm = xm & 0xffff; 132 - unsigned short hxm = xm >> 16; 133 - unsigned short lym = ym & 0xffff; 134 - unsigned short hym = ym >> 16; 135 - unsigned lrm; 136 - unsigned hrm; 118 + /* 119 + * Multiply 32 bits xm, ym to give high 32 bits rm with stickness. 120 + */ 121 + lxm = xm & 0xffff; 122 + hxm = xm >> 16; 123 + lym = ym & 0xffff; 124 + hym = ym >> 16; 137 125 138 - lrm = lxm * lym; /* 16 * 16 => 32 */ 139 - hrm = hxm * hym; /* 16 * 16 => 32 */ 126 + lrm = lxm * lym; /* 16 * 16 => 32 */ 127 + hrm = hxm * hym; /* 16 * 16 => 32 */ 140 128 141 - { 142 - unsigned t = lxm * hym; /* 16 * 16 => 32 */ 143 - { 144 - unsigned at = lrm + (t << 16); 145 - hrm += at < lrm; 146 - lrm = at; 147 - } 148 - hrm = hrm + (t >> 16); 149 - } 129 + t = lxm * hym; /* 16 * 16 => 32 */ 130 + at = lrm + (t << 16); 131 + hrm += at < lrm; 132 + lrm = at; 133 + hrm = hrm + (t >> 16); 150 134 151 - { 152 - unsigned t = hxm * lym; /* 16 * 16 => 32 */ 153 - { 154 - unsigned at = lrm + (t << 16); 155 - hrm += at < lrm; 156 - lrm = at; 157 - } 158 - hrm = hrm + (t >> 16); 159 - } 160 - rm = hrm | (lrm != 0); 161 - } 135 + t = hxm * lym; /* 16 * 16 => 32 */ 136 + at = lrm + (t << 16); 137 + hrm += at < lrm; 138 + lrm = at; 139 + hrm = hrm + (t >> 16); 162 140 163 - /* 164 - * sticky shift down to normal rounding precision 165 - */ 166 - if ((int) rm < 0) { 167 - rm = (rm >> (32 - (SP_MBITS + 1 + 3))) | 168 - ((rm << (SP_MBITS + 1 + 3)) != 0); 169 - re++; 170 - } else { 171 - rm = (rm >> (32 - (SP_MBITS + 1 + 3 + 1))) | 172 - ((rm << (SP_MBITS + 1 + 3 + 1)) != 0); 173 - } 174 - assert(rm & (SP_HIDDEN_BIT << 3)); 141 + rm = hrm | (lrm != 0); 175 142 176 - SPNORMRET2(rs, re, rm, "mul", x, y); 143 + /* 144 + * Sticky shift down to normal rounding precision. 145 + */ 146 + if ((int) rm < 0) { 147 + rm = (rm >> (32 - (SP_FBITS + 1 + 3))) | 148 + ((rm << (SP_FBITS + 1 + 3)) != 0); 149 + re++; 150 + } else { 151 + rm = (rm >> (32 - (SP_FBITS + 1 + 3 + 1))) | 152 + ((rm << (SP_FBITS + 1 + 3 + 1)) != 0); 177 153 } 154 + assert(rm & (SP_HIDDEN_BIT << 3)); 155 + 156 + return ieee754sp_format(rs, re, rm); 178 157 }
-57
arch/mips/math-emu/sp_scalb.c
··· 1 - /* IEEE754 floating point arithmetic 2 - * single precision 3 - */ 4 - /* 5 - * MIPS floating point support 6 - * Copyright (C) 1994-2000 Algorithmics Ltd. 7 - * 8 - * ######################################################################## 9 - * 10 - * This program is free software; you can distribute it and/or modify it 11 - * under the terms of the GNU General Public License (Version 2) as 12 - * published by the Free Software Foundation. 13 - * 14 - * This program is distributed in the hope it will be useful, but WITHOUT 15 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 16 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 17 - * for more details. 18 - * 19 - * You should have received a copy of the GNU General Public License along 20 - * with this program; if not, write to the Free Software Foundation, Inc., 21 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 22 - * 23 - * ######################################################################## 24 - */ 25 - 26 - 27 - #include "ieee754sp.h" 28 - 29 - ieee754sp ieee754sp_scalb(ieee754sp x, int n) 30 - { 31 - COMPXSP; 32 - 33 - CLEARCX; 34 - 35 - EXPLODEXSP; 36 - 37 - switch (xc) { 38 - case IEEE754_CLASS_SNAN: 39 - return ieee754sp_nanxcpt(x, "scalb", x, n); 40 - case IEEE754_CLASS_QNAN: 41 - case IEEE754_CLASS_INF: 42 - case IEEE754_CLASS_ZERO: 43 - return x; 44 - case IEEE754_CLASS_DNORM: 45 - SPDNORMX; 46 - break; 47 - case IEEE754_CLASS_NORM: 48 - break; 49 - } 50 - SPNORMRET2(xs, xe + n, xm << 3, "scalb", x, n); 51 - } 52 - 53 - 54 - ieee754sp ieee754sp_ldexp(ieee754sp x, int n) 55 - { 56 - return ieee754sp_scalb(x, n); 57 - }
+10 -29
arch/mips/math-emu/sp_simple.c
··· 5 5 * MIPS floating point support 6 6 * Copyright (C) 1994-2000 Algorithmics Ltd. 7 7 * 8 - * ######################################################################## 9 - * 10 8 * This program is free software; you can distribute it and/or modify it 11 9 * under the terms of the GNU General Public License (Version 2) as 12 10 * published by the Free Software Foundation. ··· 16 18 * 17 19 * You should have received a copy of the GNU General Public License along 18 20 * with this program; if not, write to the Free Software Foundation, Inc., 19 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 20 - * 21 - * ######################################################################## 21 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 22 22 */ 23 - 24 23 25 24 #include "ieee754sp.h" 26 25 27 - int ieee754sp_finite(ieee754sp x) 28 - { 29 - return SPBEXP(x) != SP_EMAX + 1 + SP_EBIAS; 30 - } 31 - 32 - ieee754sp ieee754sp_copysign(ieee754sp x, ieee754sp y) 33 - { 34 - CLEARCX; 35 - SPSIGN(x) = SPSIGN(y); 36 - return x; 37 - } 38 - 39 - 40 - ieee754sp ieee754sp_neg(ieee754sp x) 26 + union ieee754sp ieee754sp_neg(union ieee754sp x) 41 27 { 42 28 COMPXSP; 43 29 44 30 EXPLODEXSP; 45 - CLEARCX; 31 + ieee754_clearcx(); 46 32 FLUSHXSP; 47 33 48 34 /* ··· 37 55 SPSIGN(x) ^= 1; 38 56 39 57 if (xc == IEEE754_CLASS_SNAN) { 40 - ieee754sp y = ieee754sp_indef(); 41 - SETCX(IEEE754_INVALID_OPERATION); 58 + union ieee754sp y = ieee754sp_indef(); 59 + ieee754_setcx(IEEE754_INVALID_OPERATION); 42 60 SPSIGN(y) = SPSIGN(x); 43 - return ieee754sp_nanxcpt(y, "neg"); 61 + return ieee754sp_nanxcpt(y); 44 62 } 45 63 46 64 return x; 47 65 } 48 66 49 - 50 - ieee754sp ieee754sp_abs(ieee754sp x) 67 + union ieee754sp ieee754sp_abs(union ieee754sp x) 51 68 { 52 69 COMPXSP; 53 70 54 71 EXPLODEXSP; 55 - CLEARCX; 72 + ieee754_clearcx(); 56 73 FLUSHXSP; 57 74 58 75 /* Clear sign ALWAYS, irrespective of NaN */ 59 76 SPSIGN(x) = 0; 60 77 61 78 if (xc == IEEE754_CLASS_SNAN) { 62 - SETCX(IEEE754_INVALID_OPERATION); 63 - return ieee754sp_nanxcpt(ieee754sp_indef(), "abs"); 79 + ieee754_setcx(IEEE754_INVALID_OPERATION); 80 + return ieee754sp_nanxcpt(ieee754sp_indef()); 64 81 } 65 82 66 83 return x;
+17 -18
arch/mips/math-emu/sp_sqrt.c
··· 5 5 * MIPS floating point support 6 6 * Copyright (C) 1994-2000 Algorithmics Ltd. 7 7 * 8 - * ######################################################################## 9 - * 10 8 * This program is free software; you can distribute it and/or modify it 11 9 * under the terms of the GNU General Public License (Version 2) as 12 10 * published by the Free Software Foundation. ··· 16 18 * 17 19 * You should have received a copy of the GNU General Public License along 18 20 * with this program; if not, write to the Free Software Foundation, Inc., 19 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 20 - * 21 - * ######################################################################## 21 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 22 22 */ 23 - 24 23 25 24 #include "ieee754sp.h" 26 25 27 - ieee754sp ieee754sp_sqrt(ieee754sp x) 26 + union ieee754sp ieee754sp_sqrt(union ieee754sp x) 28 27 { 29 28 int ix, s, q, m, t, i; 30 29 unsigned int r; ··· 30 35 /* take care of Inf and NaN */ 31 36 32 37 EXPLODEXSP; 33 - CLEARCX; 38 + ieee754_clearcx(); 34 39 FLUSHXSP; 35 40 36 41 /* x == INF or NAN? */ 37 42 switch (xc) { 38 43 case IEEE754_CLASS_QNAN: 39 44 /* sqrt(Nan) = Nan */ 40 - return ieee754sp_nanxcpt(x, "sqrt"); 45 + return ieee754sp_nanxcpt(x); 46 + 41 47 case IEEE754_CLASS_SNAN: 42 - SETCX(IEEE754_INVALID_OPERATION); 43 - return ieee754sp_nanxcpt(ieee754sp_indef(), "sqrt"); 48 + ieee754_setcx(IEEE754_INVALID_OPERATION); 49 + return ieee754sp_nanxcpt(ieee754sp_indef()); 50 + 44 51 case IEEE754_CLASS_ZERO: 45 52 /* sqrt(0) = 0 */ 46 53 return x; 54 + 47 55 case IEEE754_CLASS_INF: 48 56 if (xs) { 49 57 /* sqrt(-Inf) = Nan */ 50 - SETCX(IEEE754_INVALID_OPERATION); 51 - return ieee754sp_nanxcpt(ieee754sp_indef(), "sqrt"); 58 + ieee754_setcx(IEEE754_INVALID_OPERATION); 59 + return ieee754sp_nanxcpt(ieee754sp_indef()); 52 60 } 53 61 /* sqrt(+Inf) = Inf */ 54 62 return x; 63 + 55 64 case IEEE754_CLASS_DNORM: 56 65 case IEEE754_CLASS_NORM: 57 66 if (xs) { 58 67 /* sqrt(-x) = Nan */ 59 - SETCX(IEEE754_INVALID_OPERATION); 60 - return ieee754sp_nanxcpt(ieee754sp_indef(), "sqrt"); 68 + ieee754_setcx(IEEE754_INVALID_OPERATION); 69 + return ieee754sp_nanxcpt(ieee754sp_indef()); 61 70 } 62 71 break; 63 72 } ··· 98 99 } 99 100 100 101 if (ix != 0) { 101 - SETCX(IEEE754_INEXACT); 102 + ieee754_setcx(IEEE754_INEXACT); 102 103 switch (ieee754_csr.rm) { 103 - case IEEE754_RP: 104 + case FPU_CSR_RU: 104 105 q += 2; 105 106 break; 106 - case IEEE754_RN: 107 + case FPU_CSR_RN: 107 108 q += (q & 1); 108 109 break; 109 110 }
+28 -29
arch/mips/math-emu/sp_sub.c
··· 5 5 * MIPS floating point support 6 6 * Copyright (C) 1994-2000 Algorithmics Ltd. 7 7 * 8 - * ######################################################################## 9 - * 10 8 * This program is free software; you can distribute it and/or modify it 11 9 * under the terms of the GNU General Public License (Version 2) as 12 10 * published by the Free Software Foundation. ··· 16 18 * 17 19 * You should have received a copy of the GNU General Public License along 18 20 * with this program; if not, write to the Free Software Foundation, Inc., 19 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 20 - * 21 - * ######################################################################## 21 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 22 22 */ 23 - 24 23 25 24 #include "ieee754sp.h" 26 25 27 - ieee754sp ieee754sp_sub(ieee754sp x, ieee754sp y) 26 + union ieee754sp ieee754sp_sub(union ieee754sp x, union ieee754sp y) 28 27 { 28 + int s; 29 + 29 30 COMPXSP; 30 31 COMPYSP; 31 32 32 33 EXPLODEXSP; 33 34 EXPLODEYSP; 34 35 35 - CLEARCX; 36 + ieee754_clearcx(); 36 37 37 38 FLUSHXSP; 38 39 FLUSHYSP; ··· 48 51 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): 49 52 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): 50 53 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): 51 - SETCX(IEEE754_INVALID_OPERATION); 52 - return ieee754sp_nanxcpt(ieee754sp_indef(), "sub", x, y); 54 + ieee754_setcx(IEEE754_INVALID_OPERATION); 55 + return ieee754sp_nanxcpt(ieee754sp_indef()); 53 56 54 57 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): 55 58 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): ··· 65 68 return x; 66 69 67 70 68 - /* Infinity handling 69 - */ 70 - 71 + /* 72 + * Infinity handling 73 + */ 71 74 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): 72 75 if (xs != ys) 73 76 return x; 74 - SETCX(IEEE754_INVALID_OPERATION); 75 - return ieee754sp_xcpt(ieee754sp_indef(), "sub", x, y); 77 + ieee754_setcx(IEEE754_INVALID_OPERATION); 78 + return ieee754sp_indef(); 76 79 77 80 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): 78 81 case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF): ··· 84 87 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): 85 88 return x; 86 89 87 - /* Zero handling 88 - */ 89 - 90 + /* 91 + * Zero handling 92 + */ 90 93 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): 91 94 if (xs != ys) 92 95 return x; 93 96 else 94 - return ieee754sp_zero(ieee754_csr.rm == 95 - IEEE754_RD); 97 + return ieee754sp_zero(ieee754_csr.rm == FPU_CSR_RD); 96 98 97 99 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO): 98 100 case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): ··· 100 104 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM): 101 105 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM): 102 106 /* quick fix up */ 103 - DPSIGN(y) ^= 1; 107 + SPSIGN(y) ^= 1; 104 108 return y; 105 109 106 110 case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): ··· 129 133 ym <<= 3; 130 134 131 135 if (xe > ye) { 132 - /* have to shift y fraction right to align 136 + /* 137 + * have to shift y fraction right to align 133 138 */ 134 - int s = xe - ye; 139 + s = xe - ye; 135 140 SPXSRSYn(s); 136 141 } else if (ye > xe) { 137 - /* have to shift x fraction right to align 142 + /* 143 + * have to shift x fraction right to align 138 144 */ 139 - int s = ye - xe; 145 + s = ye - xe; 140 146 SPXSRSXn(s); 141 147 } 142 148 assert(xe == ye); ··· 151 153 xe = xe; 152 154 xs = xs; 153 155 154 - if (xm >> (SP_MBITS + 1 + 3)) { /* carry out */ 156 + if (xm >> (SP_FBITS + 1 + 3)) { /* carry out */ 155 157 SPXSRSX1(); /* shift preserving sticky */ 156 158 } 157 159 } else { ··· 165 167 xs = ys; 166 168 } 167 169 if (xm == 0) { 168 - if (ieee754_csr.rm == IEEE754_RD) 170 + if (ieee754_csr.rm == FPU_CSR_RD) 169 171 return ieee754sp_zero(1); /* round negative inf. => sign = -1 */ 170 172 else 171 173 return ieee754sp_zero(0); /* other round modes => sign = 1 */ 172 174 } 173 175 /* normalize to rounding precision 174 176 */ 175 - while ((xm >> (SP_MBITS + 3)) == 0) { 177 + while ((xm >> (SP_FBITS + 3)) == 0) { 176 178 xm <<= 1; 177 179 xe--; 178 180 } 179 181 } 180 - SPNORMRET2(xs, xe, xm, "sub", x, y); 182 + 183 + return ieee754sp_format(xs, xe, xm); 181 184 }
+25 -42
arch/mips/math-emu/sp_tint.c
··· 5 5 * MIPS floating point support 6 6 * Copyright (C) 1994-2000 Algorithmics Ltd. 7 7 * 8 - * ######################################################################## 9 - * 10 8 * This program is free software; you can distribute it and/or modify it 11 9 * under the terms of the GNU General Public License (Version 2) as 12 10 * published by the Free Software Foundation. ··· 16 18 * 17 19 * You should have received a copy of the GNU General Public License along 18 20 * with this program; if not, write to the Free Software Foundation, Inc., 19 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 20 - * 21 - * ######################################################################## 21 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 22 22 */ 23 23 24 - 25 - #include <linux/kernel.h> 26 24 #include "ieee754sp.h" 27 25 28 - int ieee754sp_tint(ieee754sp x) 26 + int ieee754sp_tint(union ieee754sp x) 29 27 { 28 + u32 residue; 29 + int round; 30 + int sticky; 31 + int odd; 32 + 30 33 COMPXSP; 31 34 32 - CLEARCX; 35 + ieee754_clearcx(); 33 36 34 37 EXPLODEXSP; 35 38 FLUSHXSP; ··· 39 40 case IEEE754_CLASS_SNAN: 40 41 case IEEE754_CLASS_QNAN: 41 42 case IEEE754_CLASS_INF: 42 - SETCX(IEEE754_INVALID_OPERATION); 43 - return ieee754si_xcpt(ieee754si_indef(), "sp_tint", x); 43 + ieee754_setcx(IEEE754_INVALID_OPERATION); 44 + return ieee754si_indef(); 45 + 44 46 case IEEE754_CLASS_ZERO: 45 47 return 0; 48 + 46 49 case IEEE754_CLASS_DNORM: 47 50 case IEEE754_CLASS_NORM: 48 51 break; ··· 55 54 return -0x80000000; 56 55 /* Set invalid. We will only use overflow for floating 57 56 point overflow */ 58 - SETCX(IEEE754_INVALID_OPERATION); 59 - return ieee754si_xcpt(ieee754si_indef(), "sp_tint", x); 57 + ieee754_setcx(IEEE754_INVALID_OPERATION); 58 + return ieee754si_indef(); 60 59 } 61 60 /* oh gawd */ 62 - if (xe > SP_MBITS) { 63 - xm <<= xe - SP_MBITS; 61 + if (xe > SP_FBITS) { 62 + xm <<= xe - SP_FBITS; 64 63 } else { 65 - u32 residue; 66 - int round; 67 - int sticky; 68 - int odd; 69 - 70 64 if (xe < -1) { 71 65 residue = xm; 72 66 round = 0; ··· 72 76 * so we do it in two steps. Be aware that xe 73 77 * may be -1 */ 74 78 residue = xm << (xe + 1); 75 - residue <<= 31 - SP_MBITS; 79 + residue <<= 31 - SP_FBITS; 76 80 round = (residue >> 31) != 0; 77 81 sticky = (residue << 1) != 0; 78 - xm >>= SP_MBITS - xe; 82 + xm >>= SP_FBITS - xe; 79 83 } 80 84 odd = (xm & 0x1) != 0x0; 81 85 switch (ieee754_csr.rm) { 82 - case IEEE754_RN: 86 + case FPU_CSR_RN: 83 87 if (round && (sticky || odd)) 84 88 xm++; 85 89 break; 86 - case IEEE754_RZ: 90 + case FPU_CSR_RZ: 87 91 break; 88 - case IEEE754_RU: /* toward +Infinity */ 92 + case FPU_CSR_RU: /* toward +Infinity */ 89 93 if ((round || sticky) && !xs) 90 94 xm++; 91 95 break; 92 - case IEEE754_RD: /* toward -Infinity */ 96 + case FPU_CSR_RD: /* toward -Infinity */ 93 97 if ((round || sticky) && xs) 94 98 xm++; 95 99 break; 96 100 } 97 101 if ((xm >> 31) != 0) { 98 102 /* This can happen after rounding */ 99 - SETCX(IEEE754_INVALID_OPERATION); 100 - return ieee754si_xcpt(ieee754si_indef(), "sp_tint", x); 103 + ieee754_setcx(IEEE754_INVALID_OPERATION); 104 + return ieee754si_indef(); 101 105 } 102 106 if (round || sticky) 103 - SETCX(IEEE754_INEXACT); 107 + ieee754_setcx(IEEE754_INEXACT); 104 108 } 105 109 if (xs) 106 110 return -xm; 107 111 else 108 112 return xm; 109 - } 110 - 111 - 112 - unsigned int ieee754sp_tuns(ieee754sp x) 113 - { 114 - ieee754sp hb = ieee754sp_1e31(); 115 - 116 - /* what if x < 0 ?? */ 117 - if (ieee754sp_lt(x, hb)) 118 - return (unsigned) ieee754sp_tint(x); 119 - 120 - return (unsigned) ieee754sp_tint(ieee754sp_sub(x, hb)) | 121 - ((unsigned) 1 << 31); 122 113 }
+27 -42
arch/mips/math-emu/sp_tlong.c
··· 5 5 * MIPS floating point support 6 6 * Copyright (C) 1994-2000 Algorithmics Ltd. 7 7 * 8 - * ######################################################################## 9 - * 10 8 * This program is free software; you can distribute it and/or modify it 11 9 * under the terms of the GNU General Public License (Version 2) as 12 10 * published by the Free Software Foundation. ··· 16 18 * 17 19 * You should have received a copy of the GNU General Public License along 18 20 * with this program; if not, write to the Free Software Foundation, Inc., 19 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 20 - * 21 - * ######################################################################## 21 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 22 22 */ 23 23 24 - 25 24 #include "ieee754sp.h" 25 + #include "ieee754dp.h" 26 26 27 - s64 ieee754sp_tlong(ieee754sp x) 27 + s64 ieee754sp_tlong(union ieee754sp x) 28 28 { 29 + u32 residue; 30 + int round; 31 + int sticky; 32 + int odd; 33 + 29 34 COMPXDP; /* <-- need 64-bit mantissa tmp */ 30 35 31 - CLEARCX; 36 + ieee754_clearcx(); 32 37 33 38 EXPLODEXSP; 34 39 FLUSHXSP; ··· 40 39 case IEEE754_CLASS_SNAN: 41 40 case IEEE754_CLASS_QNAN: 42 41 case IEEE754_CLASS_INF: 43 - SETCX(IEEE754_INVALID_OPERATION); 44 - return ieee754di_xcpt(ieee754di_indef(), "sp_tlong", x); 42 + ieee754_setcx(IEEE754_INVALID_OPERATION); 43 + return ieee754di_indef(); 44 + 45 45 case IEEE754_CLASS_ZERO: 46 46 return 0; 47 + 47 48 case IEEE754_CLASS_DNORM: 48 49 case IEEE754_CLASS_NORM: 49 50 break; ··· 56 53 return -0x8000000000000000LL; 57 54 /* Set invalid. We will only use overflow for floating 58 55 point overflow */ 59 - SETCX(IEEE754_INVALID_OPERATION); 60 - return ieee754di_xcpt(ieee754di_indef(), "sp_tlong", x); 56 + ieee754_setcx(IEEE754_INVALID_OPERATION); 57 + return ieee754di_indef(); 61 58 } 62 59 /* oh gawd */ 63 - if (xe > SP_MBITS) { 64 - xm <<= xe - SP_MBITS; 65 - } else if (xe < SP_MBITS) { 66 - u32 residue; 67 - int round; 68 - int sticky; 69 - int odd; 70 - 60 + if (xe > SP_FBITS) { 61 + xm <<= xe - SP_FBITS; 62 + } else if (xe < SP_FBITS) { 71 63 if (xe < -1) { 72 64 residue = xm; 73 65 round = 0; 74 66 sticky = residue != 0; 75 67 xm = 0; 76 68 } else { 77 - residue = xm << (32 - SP_MBITS + xe); 69 + residue = xm << (32 - SP_FBITS + xe); 78 70 round = (residue >> 31) != 0; 79 71 sticky = (residue << 1) != 0; 80 - xm >>= SP_MBITS - xe; 72 + xm >>= SP_FBITS - xe; 81 73 } 82 74 odd = (xm & 0x1) != 0x0; 83 75 switch (ieee754_csr.rm) { 84 - case IEEE754_RN: 76 + case FPU_CSR_RN: 85 77 if (round && (sticky || odd)) 86 78 xm++; 87 79 break; 88 - case IEEE754_RZ: 80 + case FPU_CSR_RZ: 89 81 break; 90 - case IEEE754_RU: /* toward +Infinity */ 82 + case FPU_CSR_RU: /* toward +Infinity */ 91 83 if ((round || sticky) && !xs) 92 84 xm++; 93 85 break; 94 - case IEEE754_RD: /* toward -Infinity */ 86 + case FPU_CSR_RD: /* toward -Infinity */ 95 87 if ((round || sticky) && xs) 96 88 xm++; 97 89 break; 98 90 } 99 91 if ((xm >> 63) != 0) { 100 92 /* This can happen after rounding */ 101 - SETCX(IEEE754_INVALID_OPERATION); 102 - return ieee754di_xcpt(ieee754di_indef(), "sp_tlong", x); 93 + ieee754_setcx(IEEE754_INVALID_OPERATION); 94 + return ieee754di_indef(); 103 95 } 104 96 if (round || sticky) 105 - SETCX(IEEE754_INEXACT); 97 + ieee754_setcx(IEEE754_INEXACT); 106 98 } 107 99 if (xs) 108 100 return -xm; 109 101 else 110 102 return xm; 111 - } 112 - 113 - 114 - u64 ieee754sp_tulong(ieee754sp x) 115 - { 116 - ieee754sp hb = ieee754sp_1e63(); 117 - 118 - /* what if x < 0 ?? */ 119 - if (ieee754sp_lt(x, hb)) 120 - return (u64) ieee754sp_tlong(x); 121 - 122 - return (u64) ieee754sp_tlong(ieee754sp_sub(x, hb)) | 123 - (1ULL << 63); 124 103 }
+2
arch/mips/mm/c-octeon.c
··· 137 137 { 138 138 struct vm_area_struct *vma; 139 139 140 + down_read(&current->mm->mmap_sem); 140 141 vma = find_vma(current->mm, addr); 141 142 octeon_flush_icache_all_cores(vma); 143 + up_read(&current->mm->mmap_sem); 142 144 } 143 145 144 146
+70 -7
arch/mips/mm/c-r4k.c
··· 7 7 * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org) 8 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 9 9 */ 10 + #include <linux/cpu_pm.h> 10 11 #include <linux/hardirq.h> 11 12 #include <linux/init.h> 12 13 #include <linux/highmem.h> ··· 51 50 { 52 51 preempt_disable(); 53 52 54 - #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) 53 + #ifndef CONFIG_MIPS_MT_SMP 55 54 smp_call_function(func, info, 1); 56 55 #endif 57 56 func(info); ··· 106 105 107 106 static inline void r4k_blast_dcache_page_dc64(unsigned long addr) 108 107 { 109 - R4600_HIT_CACHEOP_WAR_IMPL; 110 108 blast_dcache64_page(addr); 109 + } 110 + 111 + static inline void r4k_blast_dcache_page_dc128(unsigned long addr) 112 + { 113 + blast_dcache128_page(addr); 111 114 } 112 115 113 116 static void r4k_blast_dcache_page_setup(void) 114 117 { 115 118 unsigned long dc_lsize = cpu_dcache_line_size(); 116 119 117 - if (dc_lsize == 0) 120 + switch (dc_lsize) { 121 + case 0: 118 122 r4k_blast_dcache_page = (void *)cache_noop; 119 - else if (dc_lsize == 16) 123 + break; 124 + case 16: 120 125 r4k_blast_dcache_page = blast_dcache16_page; 121 - else if (dc_lsize == 32) 126 + break; 127 + case 32: 122 128 r4k_blast_dcache_page = r4k_blast_dcache_page_dc32; 123 - else if (dc_lsize == 64) 129 + break; 130 + case 64: 124 131 r4k_blast_dcache_page = r4k_blast_dcache_page_dc64; 132 + break; 133 + case 128: 134 + r4k_blast_dcache_page = r4k_blast_dcache_page_dc128; 135 + break; 136 + default: 137 + break; 138 + } 125 139 } 126 140 127 141 #ifndef CONFIG_EVA ··· 175 159 r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed; 176 160 else if (dc_lsize == 64) 177 161 r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed; 162 + else if (dc_lsize == 128) 163 + r4k_blast_dcache_page_indexed = blast_dcache128_page_indexed; 178 164 } 179 165 180 166 void (* r4k_blast_dcache)(void); ··· 194 176 r4k_blast_dcache = blast_dcache32; 195 177 else if (dc_lsize == 64) 196 178 r4k_blast_dcache = blast_dcache64; 179 + else if (dc_lsize == 128) 180 + r4k_blast_dcache = blast_dcache128; 197 181 } 198 182 199 183 /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */ ··· 285 265 r4k_blast_icache_page = blast_icache32_page; 286 266 else if (ic_lsize == 64) 287 267 r4k_blast_icache_page = blast_icache64_page; 268 + else if (ic_lsize == 128) 269 + r4k_blast_icache_page = blast_icache128_page; 288 270 } 289 271 290 272 #ifndef CONFIG_EVA ··· 360 338 r4k_blast_icache = blast_icache32; 361 339 } else if (ic_lsize == 64) 362 340 r4k_blast_icache = blast_icache64; 341 + else if (ic_lsize == 128) 342 + r4k_blast_icache = blast_icache128; 363 343 } 364 344 365 345 static void (* r4k_blast_scache_page)(unsigned long addr); ··· 452 428 453 429 static inline int has_valid_asid(const struct mm_struct *mm) 454 430 { 455 - #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) 431 + #ifdef CONFIG_MIPS_MT_SMP 456 432 int i; 457 433 458 434 for_each_online_cpu(i) ··· 1118 1094 c->dcache.waybit = 0; 1119 1095 break; 1120 1096 1097 + case CPU_CAVIUM_OCTEON3: 1098 + /* For now lie about the number of ways. */ 1099 + c->icache.linesz = 128; 1100 + c->icache.sets = 16; 1101 + c->icache.ways = 8; 1102 + c->icache.flags |= MIPS_CACHE_VTAG; 1103 + icache_size = c->icache.sets * c->icache.ways * c->icache.linesz; 1104 + 1105 + c->dcache.linesz = 128; 1106 + c->dcache.ways = 8; 1107 + c->dcache.sets = 8; 1108 + dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz; 1109 + c->options |= MIPS_CPU_PREFETCH; 1110 + break; 1111 + 1121 1112 default: 1122 1113 if (!(config & MIPS_CONF_M)) 1123 1114 panic("Don't know how to probe P-caches on this cpu."); ··· 1453 1414 loongson3_sc_init(); 1454 1415 return; 1455 1416 1417 + case CPU_CAVIUM_OCTEON3: 1456 1418 case CPU_XLP: 1457 1419 /* don't need to worry about L2, fully coherent */ 1458 1420 return; ··· 1684 1644 coherency_setup(); 1685 1645 board_cache_error_setup = r4k_cache_error_setup; 1686 1646 } 1647 + 1648 + static int r4k_cache_pm_notifier(struct notifier_block *self, unsigned long cmd, 1649 + void *v) 1650 + { 1651 + switch (cmd) { 1652 + case CPU_PM_ENTER_FAILED: 1653 + case CPU_PM_EXIT: 1654 + coherency_setup(); 1655 + break; 1656 + } 1657 + 1658 + return NOTIFY_OK; 1659 + } 1660 + 1661 + static struct notifier_block r4k_cache_pm_notifier_block = { 1662 + .notifier_call = r4k_cache_pm_notifier, 1663 + }; 1664 + 1665 + int __init r4k_cache_init_pm(void) 1666 + { 1667 + return cpu_pm_register_notifier(&r4k_cache_pm_notifier_block); 1668 + } 1669 + arch_initcall(r4k_cache_init_pm);
+17 -65
arch/mips/mm/init.c
··· 44 44 #include <asm/tlb.h> 45 45 #include <asm/fixmap.h> 46 46 47 - /* Atomicity and interruptability */ 48 - #ifdef CONFIG_MIPS_MT_SMTC 49 - 50 - #include <asm/mipsmtregs.h> 51 - 52 - #define ENTER_CRITICAL(flags) \ 53 - { \ 54 - unsigned int mvpflags; \ 55 - local_irq_save(flags);\ 56 - mvpflags = dvpe() 57 - #define EXIT_CRITICAL(flags) \ 58 - evpe(mvpflags); \ 59 - local_irq_restore(flags); \ 60 - } 61 - #else 62 - 63 - #define ENTER_CRITICAL(flags) local_irq_save(flags) 64 - #define EXIT_CRITICAL(flags) local_irq_restore(flags) 65 - 66 - #endif /* CONFIG_MIPS_MT_SMTC */ 67 - 68 47 /* 69 48 * We have up to 8 empty zeroed pages so we can map one of the right colour 70 49 * when needed. This is necessary only on R4000 / R4400 SC and MC versions ··· 79 100 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK; 80 101 } 81 102 82 - #ifdef CONFIG_MIPS_MT_SMTC 83 - static pte_t *kmap_coherent_pte; 84 - static void __init kmap_coherent_init(void) 85 - { 86 - unsigned long vaddr; 87 - 88 - /* cache the first coherent kmap pte */ 89 - vaddr = __fix_to_virt(FIX_CMAP_BEGIN); 90 - kmap_coherent_pte = kmap_get_fixmap_pte(vaddr); 91 - } 92 - #else 93 - static inline void kmap_coherent_init(void) {} 94 - #endif 95 - 96 - void *kmap_coherent(struct page *page, unsigned long addr) 103 + static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot) 97 104 { 98 105 enum fixed_addresses idx; 99 106 unsigned long vaddr, flags, entrylo; ··· 91 126 92 127 pagefault_disable(); 93 128 idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1); 94 - #ifdef CONFIG_MIPS_MT_SMTC 95 - idx += FIX_N_COLOURS * smp_processor_id() + 96 - (in_interrupt() ? (FIX_N_COLOURS * NR_CPUS) : 0); 97 - #else 98 129 idx += in_interrupt() ? FIX_N_COLOURS : 0; 99 - #endif 100 130 vaddr = __fix_to_virt(FIX_CMAP_END - idx); 101 - pte = mk_pte(page, PAGE_KERNEL); 131 + pte = mk_pte(page, prot); 102 132 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) 103 133 entrylo = pte.pte_high; 104 134 #else 105 135 entrylo = pte_to_entrylo(pte_val(pte)); 106 136 #endif 107 137 108 - ENTER_CRITICAL(flags); 138 + local_irq_save(flags); 109 139 old_ctx = read_c0_entryhi(); 110 140 write_c0_entryhi(vaddr & (PAGE_MASK << 1)); 111 141 write_c0_entrylo0(entrylo); 112 142 write_c0_entrylo1(entrylo); 113 - #ifdef CONFIG_MIPS_MT_SMTC 114 - set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte); 115 - /* preload TLB instead of local_flush_tlb_one() */ 116 - mtc0_tlbw_hazard(); 117 - tlb_probe(); 118 - tlb_probe_hazard(); 119 - tlbidx = read_c0_index(); 120 - mtc0_tlbw_hazard(); 121 - if (tlbidx < 0) 122 - tlb_write_random(); 123 - else 124 - tlb_write_indexed(); 125 - #else 126 143 tlbidx = read_c0_wired(); 127 144 write_c0_wired(tlbidx + 1); 128 145 write_c0_index(tlbidx); 129 146 mtc0_tlbw_hazard(); 130 147 tlb_write_indexed(); 131 - #endif 132 148 tlbw_use_hazard(); 133 149 write_c0_entryhi(old_ctx); 134 - EXIT_CRITICAL(flags); 150 + local_irq_restore(flags); 135 151 136 152 return (void*) vaddr; 137 153 } 138 154 155 + void *kmap_coherent(struct page *page, unsigned long addr) 156 + { 157 + return __kmap_pgprot(page, addr, PAGE_KERNEL); 158 + } 159 + 160 + void *kmap_noncoherent(struct page *page, unsigned long addr) 161 + { 162 + return __kmap_pgprot(page, addr, PAGE_KERNEL_NC); 163 + } 164 + 139 165 void kunmap_coherent(void) 140 166 { 141 - #ifndef CONFIG_MIPS_MT_SMTC 142 167 unsigned int wired; 143 168 unsigned long flags, old_ctx; 144 169 145 - ENTER_CRITICAL(flags); 170 + local_irq_save(flags); 146 171 old_ctx = read_c0_entryhi(); 147 172 wired = read_c0_wired() - 1; 148 173 write_c0_wired(wired); ··· 144 189 tlb_write_indexed(); 145 190 tlbw_use_hazard(); 146 191 write_c0_entryhi(old_ctx); 147 - EXIT_CRITICAL(flags); 148 - #endif 192 + local_irq_restore(flags); 149 193 pagefault_enable(); 150 194 } 151 195 ··· 210 256 void __init fixrange_init(unsigned long start, unsigned long end, 211 257 pgd_t *pgd_base) 212 258 { 213 - #if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC) 259 + #ifdef CONFIG_HIGHMEM 214 260 pgd_t *pgd; 215 261 pud_t *pud; 216 262 pmd_t *pmd; ··· 281 327 #ifdef CONFIG_HIGHMEM 282 328 kmap_init(); 283 329 #endif 284 - kmap_coherent_init(); 285 - 286 330 #ifdef CONFIG_ZONE_DMA 287 331 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; 288 332 #endif
+49 -39
arch/mips/mm/tlb-r4k.c
··· 8 8 * Carsten Langgaard, carstenl@mips.com 9 9 * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved. 10 10 */ 11 + #include <linux/cpu_pm.h> 11 12 #include <linux/init.h> 12 13 #include <linux/sched.h> 13 14 #include <linux/smp.h> ··· 25 24 #include <asm/tlbmisc.h> 26 25 27 26 extern void build_tlb_refill_handler(void); 28 - 29 - /* Atomicity and interruptability */ 30 - #ifdef CONFIG_MIPS_MT_SMTC 31 - 32 - #include <asm/smtc.h> 33 - #include <asm/mipsmtregs.h> 34 - 35 - #define ENTER_CRITICAL(flags) \ 36 - { \ 37 - unsigned int mvpflags; \ 38 - local_irq_save(flags);\ 39 - mvpflags = dvpe() 40 - #define EXIT_CRITICAL(flags) \ 41 - evpe(mvpflags); \ 42 - local_irq_restore(flags); \ 43 - } 44 - #else 45 - 46 - #define ENTER_CRITICAL(flags) local_irq_save(flags) 47 - #define EXIT_CRITICAL(flags) local_irq_restore(flags) 48 - 49 - #endif /* CONFIG_MIPS_MT_SMTC */ 50 27 51 28 /* 52 29 * LOONGSON2/3 has a 4 entry itlb which is a subset of dtlb, ··· 54 75 unsigned long old_ctx; 55 76 int entry, ftlbhighset; 56 77 57 - ENTER_CRITICAL(flags); 78 + local_irq_save(flags); 58 79 /* Save old context and create impossible VPN2 value */ 59 80 old_ctx = read_c0_entryhi(); 60 81 write_c0_entrylo0(0); ··· 91 112 tlbw_use_hazard(); 92 113 write_c0_entryhi(old_ctx); 93 114 flush_itlb(); 94 - EXIT_CRITICAL(flags); 115 + local_irq_restore(flags); 95 116 } 96 117 EXPORT_SYMBOL(local_flush_tlb_all); 97 118 ··· 121 142 if (cpu_context(cpu, mm) != 0) { 122 143 unsigned long size, flags; 123 144 124 - ENTER_CRITICAL(flags); 145 + local_irq_save(flags); 125 146 start = round_down(start, PAGE_SIZE << 1); 126 147 end = round_up(end, PAGE_SIZE << 1); 127 148 size = (end - start) >> (PAGE_SHIFT + 1); ··· 155 176 drop_mmu_context(mm, cpu); 156 177 } 157 178 flush_itlb(); 158 - EXIT_CRITICAL(flags); 179 + local_irq_restore(flags); 159 180 } 160 181 } 161 182 ··· 163 184 { 164 185 unsigned long size, flags; 165 186 166 - ENTER_CRITICAL(flags); 187 + local_irq_save(flags); 167 188 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 168 189 size = (size + 1) >> 1; 169 190 if (size <= (current_cpu_data.tlbsizeftlbsets ? ··· 199 220 local_flush_tlb_all(); 200 221 } 201 222 flush_itlb(); 202 - EXIT_CRITICAL(flags); 223 + local_irq_restore(flags); 203 224 } 204 225 205 226 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) ··· 212 233 213 234 newpid = cpu_asid(cpu, vma->vm_mm); 214 235 page &= (PAGE_MASK << 1); 215 - ENTER_CRITICAL(flags); 236 + local_irq_save(flags); 216 237 oldpid = read_c0_entryhi(); 217 238 write_c0_entryhi(page | newpid); 218 239 mtc0_tlbw_hazard(); ··· 232 253 finish: 233 254 write_c0_entryhi(oldpid); 234 255 flush_itlb_vm(vma); 235 - EXIT_CRITICAL(flags); 256 + local_irq_restore(flags); 236 257 } 237 258 } 238 259 ··· 245 266 unsigned long flags; 246 267 int oldpid, idx; 247 268 248 - ENTER_CRITICAL(flags); 269 + local_irq_save(flags); 249 270 oldpid = read_c0_entryhi(); 250 271 page &= (PAGE_MASK << 1); 251 272 write_c0_entryhi(page); ··· 264 285 } 265 286 write_c0_entryhi(oldpid); 266 287 flush_itlb(); 267 - EXIT_CRITICAL(flags); 288 + local_irq_restore(flags); 268 289 } 269 290 270 291 /* ··· 287 308 if (current->active_mm != vma->vm_mm) 288 309 return; 289 310 290 - ENTER_CRITICAL(flags); 311 + local_irq_save(flags); 291 312 292 313 pid = read_c0_entryhi() & ASID_MASK; 293 314 address &= (PAGE_MASK << 1); ··· 337 358 } 338 359 tlbw_use_hazard(); 339 360 flush_itlb_vm(vma); 340 - EXIT_CRITICAL(flags); 361 + local_irq_restore(flags); 341 362 } 342 363 343 364 void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, ··· 348 369 unsigned long old_pagemask; 349 370 unsigned long old_ctx; 350 371 351 - ENTER_CRITICAL(flags); 372 + local_irq_save(flags); 352 373 /* Save old context and create impossible VPN2 value */ 353 374 old_ctx = read_c0_entryhi(); 354 375 old_pagemask = read_c0_pagemask(); ··· 368 389 tlbw_use_hazard(); /* What is the hazard here? */ 369 390 write_c0_pagemask(old_pagemask); 370 391 local_flush_tlb_all(); 371 - EXIT_CRITICAL(flags); 392 + local_irq_restore(flags); 372 393 } 373 394 374 395 #ifdef CONFIG_TRANSPARENT_HUGEPAGE ··· 378 399 unsigned int mask; 379 400 unsigned long flags; 380 401 381 - ENTER_CRITICAL(flags); 402 + local_irq_save(flags); 382 403 write_c0_pagemask(PM_HUGE_MASK); 383 404 back_to_back_c0_hazard(); 384 405 mask = read_c0_pagemask(); 385 406 write_c0_pagemask(PM_DEFAULT_MASK); 386 407 387 - EXIT_CRITICAL(flags); 408 + local_irq_restore(flags); 388 409 389 410 return mask == PM_HUGE_MASK; 390 411 } ··· 400 421 401 422 __setup("ntlb=", set_ntlb); 402 423 403 - void tlb_init(void) 424 + /* 425 + * Configure TLB (for init or after a CPU has been powered off). 426 + */ 427 + static void r4k_tlb_configure(void) 404 428 { 405 429 /* 406 430 * You should never change this register: ··· 435 453 local_flush_tlb_all(); 436 454 437 455 /* Did I tell you that ARC SUCKS? */ 456 + } 457 + 458 + void tlb_init(void) 459 + { 460 + r4k_tlb_configure(); 438 461 439 462 if (ntlb) { 440 463 if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) { ··· 453 466 454 467 build_tlb_refill_handler(); 455 468 } 469 + 470 + static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd, 471 + void *v) 472 + { 473 + switch (cmd) { 474 + case CPU_PM_ENTER_FAILED: 475 + case CPU_PM_EXIT: 476 + r4k_tlb_configure(); 477 + break; 478 + } 479 + 480 + return NOTIFY_OK; 481 + } 482 + 483 + static struct notifier_block r4k_tlb_pm_notifier_block = { 484 + .notifier_call = r4k_tlb_pm_notifier, 485 + }; 486 + 487 + static int __init r4k_tlb_init_pm(void) 488 + { 489 + return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block); 490 + } 491 + arch_initcall(r4k_tlb_init_pm);
+1 -1
arch/mips/mm/tlbex.c
··· 1256 1256 memset(relocs, 0, sizeof(relocs)); 1257 1257 memset(final_handler, 0, sizeof(final_handler)); 1258 1258 1259 - if ((scratch_reg >= 0 || scratchpad_available()) && use_bbit_insns()) { 1259 + if (IS_ENABLED(CONFIG_64BIT) && (scratch_reg >= 0 || scratchpad_available()) && use_bbit_insns()) { 1260 1260 htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1, 1261 1261 scratch_reg); 1262 1262 vmalloc_mode = refill_scratch;
+14
arch/mips/mm/uasm-micromips.c
··· 63 63 { insn_cache, M(mm_pool32b_op, 0, 0, mm_cache_func, 0, 0), RT | RS | SIMM }, 64 64 { insn_daddu, 0, 0 }, 65 65 { insn_daddiu, 0, 0 }, 66 + { insn_divu, M(mm_pool32a_op, 0, 0, 0, mm_divu_op, mm_pool32axf_op), RT | RS }, 66 67 { insn_dmfc0, 0, 0 }, 67 68 { insn_dmtc0, 0, 0 }, 68 69 { insn_dsll, 0, 0 }, ··· 79 78 { insn_ext, M(mm_pool32a_op, 0, 0, 0, 0, mm_ext_op), RT | RS | RD | RE }, 80 79 { insn_j, M(mm_j32_op, 0, 0, 0, 0, 0), JIMM }, 81 80 { insn_jal, M(mm_jal32_op, 0, 0, 0, 0, 0), JIMM }, 81 + { insn_jalr, M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RT | RS }, 82 82 { insn_jr, M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS }, 83 + { insn_lb, M(mm_lb32_op, 0, 0, 0, 0, 0), RT | RS | SIMM }, 83 84 { insn_ld, 0, 0 }, 85 + { insn_lh, M(mm_lh32_op, 0, 0, 0, 0, 0), RS | RS | SIMM }, 84 86 { insn_ll, M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM }, 85 87 { insn_lld, 0, 0 }, 86 88 { insn_lui, M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM }, 87 89 { insn_lw, M(mm_lw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM }, 88 90 { insn_mfc0, M(mm_pool32a_op, 0, 0, 0, mm_mfc0_op, mm_pool32axf_op), RT | RS | RD }, 91 + { insn_mfhi, M(mm_pool32a_op, 0, 0, 0, mm_mfhi32_op, mm_pool32axf_op), RS }, 92 + { insn_mflo, M(mm_pool32a_op, 0, 0, 0, mm_mflo32_op, mm_pool32axf_op), RS }, 89 93 { insn_mtc0, M(mm_pool32a_op, 0, 0, 0, mm_mtc0_op, mm_pool32axf_op), RT | RS | RD }, 94 + { insn_mul, M(mm_pool32a_op, 0, 0, 0, 0, mm_mul_op), RT | RS | RD }, 90 95 { insn_or, M(mm_pool32a_op, 0, 0, 0, 0, mm_or32_op), RT | RS | RD }, 91 96 { insn_ori, M(mm_ori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM }, 92 97 { insn_pref, M(mm_pool32c_op, 0, 0, (mm_pref_func << 1), 0, 0), RT | RS | SIMM }, ··· 101 94 { insn_scd, 0, 0 }, 102 95 { insn_sd, 0, 0 }, 103 96 { insn_sll, M(mm_pool32a_op, 0, 0, 0, 0, mm_sll32_op), RT | RS | RD }, 97 + { insn_sllv, M(mm_pool32a_op, 0, 0, 0, 0, mm_sllv32_op), RT | RS | RD }, 98 + { insn_sltiu, M(mm_sltiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM }, 99 + { insn_sltu, M(mm_pool32a_op, 0, 0, 0, 0, mm_sltu_op), RT | RS | RD }, 104 100 { insn_sra, M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD }, 105 101 { insn_srl, M(mm_pool32a_op, 0, 0, 0, 0, mm_srl32_op), RT | RS | RD }, 102 + { insn_srlv, M(mm_pool32a_op, 0, 0, 0, 0, mm_srlv32_op), RT | RS | RD }, 106 103 { insn_rotr, M(mm_pool32a_op, 0, 0, 0, 0, mm_rotr_op), RT | RS | RD }, 107 104 { insn_subu, M(mm_pool32a_op, 0, 0, 0, 0, mm_subu32_op), RT | RS | RD }, 108 105 { insn_sw, M(mm_sw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM }, 106 + { insn_sync, M(mm_pool32a_op, 0, 0, 0, mm_sync_op, mm_pool32axf_op), RS }, 109 107 { insn_tlbp, M(mm_pool32a_op, 0, 0, 0, mm_tlbp_op, mm_pool32axf_op), 0 }, 110 108 { insn_tlbr, M(mm_pool32a_op, 0, 0, 0, mm_tlbr_op, mm_pool32axf_op), 0 }, 111 109 { insn_tlbwi, M(mm_pool32a_op, 0, 0, 0, mm_tlbwi_op, mm_pool32axf_op), 0 }, 112 110 { insn_tlbwr, M(mm_pool32a_op, 0, 0, 0, mm_tlbwr_op, mm_pool32axf_op), 0 }, 111 + { insn_wait, M(mm_pool32a_op, 0, 0, 0, mm_wait_op, mm_pool32axf_op), SCIMM }, 112 + { insn_wsbh, M(mm_pool32a_op, 0, 0, 0, mm_wsbh_op, mm_pool32axf_op), RT | RS }, 113 113 { insn_xor, M(mm_pool32a_op, 0, 0, 0, 0, mm_xor32_op), RT | RS | RD }, 114 114 { insn_xori, M(mm_xori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM }, 115 115 { insn_dins, 0, 0 },
+15
arch/mips/mm/uasm-mips.c
··· 67 67 { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD }, 68 68 { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE }, 69 69 { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE }, 70 + { insn_divu, M(spec_op, 0, 0, 0, 0, divu_op), RS | RT }, 70 71 { insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET}, 71 72 { insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET}, 72 73 { insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE }, ··· 83 82 { insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE }, 84 83 { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, 85 84 { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM }, 85 + { insn_jalr, M(spec_op, 0, 0, 0, 0, jalr_op), RS | RD }, 86 86 { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, 87 87 { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS }, 88 + { insn_lb, M(lb_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 88 89 { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 89 90 { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD }, 91 + { insn_lh, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 90 92 { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 91 93 { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 92 94 { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM }, 93 95 { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 94 96 { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD }, 95 97 { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET}, 98 + { insn_mfhi, M(spec_op, 0, 0, 0, 0, mfhi_op), RD }, 99 + { insn_mflo, M(spec_op, 0, 0, 0, 0, mflo_op), RD }, 96 100 { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET}, 101 + { insn_mul, M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD}, 97 102 { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, 98 103 { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD }, 99 104 { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, ··· 109 102 { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 110 103 { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 111 104 { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE }, 105 + { insn_sllv, M(spec_op, 0, 0, 0, 0, sllv_op), RS | RT | RD }, 106 + { insn_sltiu, M(sltiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 107 + { insn_sltu, M(spec_op, 0, 0, 0, 0, sltu_op), RS | RT | RD }, 112 108 { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE }, 113 109 { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE }, 110 + { insn_srlv, M(spec_op, 0, 0, 0, 0, srlv_op), RS | RT | RD }, 114 111 { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD }, 115 112 { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 113 + { insn_sync, M(spec_op, 0, 0, 0, 0, sync_op), RE }, 116 114 { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM}, 117 115 { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 }, 118 116 { insn_tlbr, M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0 }, 119 117 { insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 }, 120 118 { insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 }, 119 + { insn_wait, M(cop0_op, cop_op, 0, 0, 0, wait_op), SCIMM }, 120 + { insn_wsbh, M(spec3_op, 0, 0, 0, wsbh_op, bshfl_op), RT | RD }, 121 121 { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, 122 122 { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD }, 123 + { insn_yield, M(spec3_op, 0, 0, 0, 0, yield_op), RS | RD }, 123 124 { insn_invalid, 0, 0 } 124 125 }; 125 126
+45 -6
arch/mips/mm/uasm.c
··· 47 47 insn_addiu, insn_addu, insn_and, insn_andi, insn_bbit0, insn_bbit1, 48 48 insn_beq, insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl, 49 49 insn_bne, insn_cache, insn_daddiu, insn_daddu, insn_dins, insn_dinsm, 50 - insn_dmfc0, insn_dmtc0, insn_drotr, insn_drotr32, insn_dsll, 50 + insn_divu, insn_dmfc0, insn_dmtc0, insn_drotr, insn_drotr32, insn_dsll, 51 51 insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32, insn_dsubu, insn_eret, 52 - insn_ext, insn_ins, insn_j, insn_jal, insn_jr, insn_ld, insn_ldx, 53 - insn_ll, insn_lld, insn_lui, insn_lw, insn_lwx, insn_mfc0, insn_mtc0, 52 + insn_ext, insn_ins, insn_j, insn_jal, insn_jalr, insn_jr, insn_lb, 53 + insn_ld, insn_ldx, insn_lh, insn_ll, insn_lld, insn_lui, insn_lw, 54 + insn_lwx, insn_mfc0, insn_mfhi, insn_mflo, insn_mtc0, insn_mul, 54 55 insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd, 55 - insn_sd, insn_sll, insn_sra, insn_srl, insn_subu, insn_sw, 56 - insn_syscall, insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, 57 - insn_xori, 56 + insn_sd, insn_sll, insn_sllv, insn_sltiu, insn_sltu, insn_sra, 57 + insn_srl, insn_srlv, insn_subu, insn_sw, insn_sync, insn_syscall, 58 + insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, insn_wsbh, 59 + insn_xor, insn_xori, insn_yield, 58 60 }; 59 61 60 62 struct insn { ··· 146 144 } \ 147 145 UASM_EXPORT_SYMBOL(uasm_i##op); 148 146 147 + #define I_u3u2u1(op) \ 148 + Ip_u3u2u1(op) \ 149 + { \ 150 + build_insn(buf, insn##op, c, b, a); \ 151 + } \ 152 + UASM_EXPORT_SYMBOL(uasm_i##op); 153 + 149 154 #define I_u3u1u2(op) \ 150 155 Ip_u3u1u2(op) \ 151 156 { \ ··· 209 200 } \ 210 201 UASM_EXPORT_SYMBOL(uasm_i##op); 211 202 203 + #define I_u2u1(op) \ 204 + Ip_u1u2(op) \ 205 + { \ 206 + build_insn(buf, insn##op, b, a); \ 207 + } \ 208 + UASM_EXPORT_SYMBOL(uasm_i##op); 209 + 212 210 #define I_u1s2(op) \ 213 211 Ip_u1s2(op) \ 214 212 { \ ··· 253 237 I_u1u2u3(_dmtc0) 254 238 I_u2u1s3(_daddiu) 255 239 I_u3u1u2(_daddu) 240 + I_u1u2(_divu) 256 241 I_u2u1u3(_dsll) 257 242 I_u2u1u3(_dsll32) 258 243 I_u2u1u3(_dsra) ··· 267 250 I_u2u1msbu3(_ins) 268 251 I_u1(_j) 269 252 I_u1(_jal) 253 + I_u2u1(_jalr) 270 254 I_u1(_jr) 255 + I_u2s3u1(_lb) 271 256 I_u2s3u1(_ld) 257 + I_u2s3u1(_lh) 272 258 I_u2s3u1(_ll) 273 259 I_u2s3u1(_lld) 274 260 I_u1s2(_lui) 275 261 I_u2s3u1(_lw) 276 262 I_u1u2u3(_mfc0) 263 + I_u1(_mfhi) 264 + I_u1(_mflo) 277 265 I_u1u2u3(_mtc0) 266 + I_u3u1u2(_mul) 278 267 I_u2u1u3(_ori) 279 268 I_u3u1u2(_or) 280 269 I_0(_rfe) ··· 288 265 I_u2s3u1(_scd) 289 266 I_u2s3u1(_sd) 290 267 I_u2u1u3(_sll) 268 + I_u3u2u1(_sllv) 269 + I_u2u1s3(_sltiu) 270 + I_u3u1u2(_sltu) 291 271 I_u2u1u3(_sra) 292 272 I_u2u1u3(_srl) 273 + I_u3u2u1(_srlv) 293 274 I_u2u1u3(_rotr) 294 275 I_u3u1u2(_subu) 295 276 I_u2s3u1(_sw) 277 + I_u1(_sync) 296 278 I_0(_tlbp) 297 279 I_0(_tlbr) 298 280 I_0(_tlbwi) 299 281 I_0(_tlbwr) 282 + I_u1(_wait); 283 + I_u2u1(_wsbh) 300 284 I_u3u1u2(_xor) 301 285 I_u2u1u3(_xori) 286 + I_u2u1(_yield) 302 287 I_u2u1msbu3(_dins); 303 288 I_u2u1msb32u3(_dinsm); 304 289 I_u1(_syscall); ··· 499 468 ISAFUNC(uasm_i_b)(p, 0); 500 469 } 501 470 UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_b)); 471 + 472 + void ISAFUNC(uasm_il_beq)(u32 **p, struct uasm_reloc **r, unsigned int r1, 473 + unsigned int r2, int lid) 474 + { 475 + uasm_r_mips_pc16(r, *p, lid); 476 + ISAFUNC(uasm_i_beq)(p, r1, r2, 0); 477 + } 478 + UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beq)); 502 479 503 480 void ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg, 504 481 int lid)
+1 -2
arch/mips/mti-malta/Makefile
··· 9 9 malta-int.o malta-memory.o malta-platform.o \ 10 10 malta-reset.o malta-setup.o malta-time.o 11 11 12 - # FIXME FIXME FIXME 13 - obj-$(CONFIG_MIPS_MT_SMTC) += malta-smtc.o 12 + obj-$(CONFIG_MIPS_MALTA_PM) += malta-pm.o
-6
arch/mips/mti-malta/malta-init.c
··· 116 116 return CPC_BASE_ADDR; 117 117 } 118 118 119 - extern struct plat_smp_ops msmtc_smp_ops; 120 - 121 119 void __init prom_init(void) 122 120 { 123 121 mips_display_message("LINUX"); ··· 302 304 return; 303 305 if (!register_vsmp_smp_ops()) 304 306 return; 305 - 306 - #ifdef CONFIG_MIPS_MT_SMTC 307 - register_smp_ops(&msmtc_smp_ops); 308 - #endif 309 307 }
-19
arch/mips/mti-malta/malta-int.c
··· 504 504 } else if (cpu_has_vint) { 505 505 set_vi_handler(MIPSCPU_INT_I8259A, malta_hw0_irqdispatch); 506 506 set_vi_handler(MIPSCPU_INT_COREHI, corehi_irqdispatch); 507 - #ifdef CONFIG_MIPS_MT_SMTC 508 - setup_irq_smtc(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_I8259A, &i8259irq, 509 - (0x100 << MIPSCPU_INT_I8259A)); 510 - setup_irq_smtc(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI, 511 - &corehi_irqaction, (0x100 << MIPSCPU_INT_COREHI)); 512 - /* 513 - * Temporary hack to ensure that the subsidiary device 514 - * interrupts coing in via the i8259A, but associated 515 - * with low IRQ numbers, will restore the Status.IM 516 - * value associated with the i8259A. 517 - */ 518 - { 519 - int i; 520 - 521 - for (i = 0; i < 16; i++) 522 - irq_hwmask[i] = (0x100 << MIPSCPU_INT_I8259A); 523 - } 524 - #else /* Not SMTC */ 525 507 setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_I8259A, &i8259irq); 526 508 setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI, 527 509 &corehi_irqaction); 528 - #endif /* CONFIG_MIPS_MT_SMTC */ 529 510 } else { 530 511 setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_I8259A, &i8259irq); 531 512 setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI,
+2 -2
arch/mips/mti-malta/malta-memory.c
··· 26 26 27 27 fw_memblock_t * __init fw_getmdesc(int eva) 28 28 { 29 - char *memsize_str, *ememsize_str __maybe_unused = NULL, *ptr; 30 - unsigned long memsize = 0, ememsize __maybe_unused = 0; 29 + char *memsize_str, *ememsize_str = NULL, *ptr; 30 + unsigned long memsize = 0, ememsize = 0; 31 31 static char cmdline[COMMAND_LINE_SIZE] __initdata; 32 32 int tmp; 33 33
+96
arch/mips/mti-malta/malta-pm.c
··· 1 + /* 2 + * Copyright (C) 2014 Imagination Technologies 3 + * Author: Paul Burton <paul.burton@imgtec.com> 4 + * 5 + * This program is free software; you can redistribute it and/or modify it 6 + * under the terms of the GNU General Public License as published by the 7 + * Free Software Foundation; either version 2 of the License, or (at your 8 + * option) any later version. 9 + */ 10 + 11 + #include <linux/delay.h> 12 + #include <linux/init.h> 13 + #include <linux/io.h> 14 + #include <linux/pci.h> 15 + 16 + #include <asm/mach-malta/malta-pm.h> 17 + 18 + static struct pci_bus *pm_pci_bus; 19 + static resource_size_t pm_io_offset; 20 + 21 + int mips_pm_suspend(unsigned state) 22 + { 23 + int spec_devid; 24 + u16 sts; 25 + 26 + if (!pm_pci_bus || !pm_io_offset) 27 + return -ENODEV; 28 + 29 + /* Ensure the power button status is clear */ 30 + while (1) { 31 + sts = inw(pm_io_offset + PIIX4_FUNC3IO_PMSTS); 32 + if (!(sts & PIIX4_FUNC3IO_PMSTS_PWRBTN_STS)) 33 + break; 34 + outw(sts, pm_io_offset + PIIX4_FUNC3IO_PMSTS); 35 + } 36 + 37 + /* Enable entry to suspend */ 38 + outw(state | PIIX4_FUNC3IO_PMCNTRL_SUS_EN, 39 + pm_io_offset + PIIX4_FUNC3IO_PMCNTRL); 40 + 41 + /* If the special cycle occurs too soon this doesn't work... */ 42 + mdelay(10); 43 + 44 + /* 45 + * The PIIX4 will enter the suspend state only after seeing a special 46 + * cycle with the correct magic data on the PCI bus. Generate that 47 + * cycle now. 48 + */ 49 + spec_devid = PCI_DEVID(0, PCI_DEVFN(0x1f, 0x7)); 50 + pci_bus_write_config_dword(pm_pci_bus, spec_devid, 0, 51 + PIIX4_SUSPEND_MAGIC); 52 + 53 + /* Give the system some time to power down */ 54 + mdelay(1000); 55 + 56 + return 0; 57 + } 58 + 59 + static int __init malta_pm_setup(void) 60 + { 61 + struct pci_dev *dev; 62 + int res, io_region = PCI_BRIDGE_RESOURCES; 63 + 64 + /* Find a reference to the PCI bus */ 65 + pm_pci_bus = pci_find_next_bus(NULL); 66 + if (!pm_pci_bus) { 67 + pr_warn("malta-pm: failed to find reference to PCI bus\n"); 68 + return -ENODEV; 69 + } 70 + 71 + /* Find the PIIX4 PM device */ 72 + dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, 73 + PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID, 74 + PCI_ANY_ID, NULL); 75 + if (!dev) { 76 + pr_warn("malta-pm: failed to find PIIX4 PM\n"); 77 + return -ENODEV; 78 + } 79 + 80 + /* Request access to the PIIX4 PM IO registers */ 81 + res = pci_request_region(dev, io_region, "PIIX4 PM IO registers"); 82 + if (res) { 83 + pr_warn("malta-pm: failed to request PM IO registers (%d)\n", 84 + res); 85 + pci_dev_put(dev); 86 + return -ENODEV; 87 + } 88 + 89 + /* Find the offset to the PIIX4 PM IO registers */ 90 + pm_io_offset = pci_resource_start(dev, io_region); 91 + 92 + pci_dev_put(dev); 93 + return 0; 94 + } 95 + 96 + late_initcall(malta_pm_setup);
+10 -4
arch/mips/mti-malta/malta-reset.c
··· 10 10 #include <linux/pm.h> 11 11 12 12 #include <asm/reboot.h> 13 + #include <asm/mach-malta/malta-pm.h> 13 14 14 15 #define SOFTRES_REG 0x1f000500 15 16 #define GORESET 0x42 ··· 25 24 26 25 static void mips_machine_halt(void) 27 26 { 28 - unsigned int __iomem *softres_reg = 29 - ioremap(SOFTRES_REG, sizeof(unsigned int)); 27 + while (true); 28 + } 30 29 31 - __raw_writel(GORESET, softres_reg); 30 + static void mips_machine_power_off(void) 31 + { 32 + mips_pm_suspend(PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_SOFF); 33 + 34 + pr_info("Failed to power down, resetting\n"); 35 + mips_machine_restart(NULL); 32 36 } 33 37 34 38 static int __init mips_reboot_setup(void) 35 39 { 36 40 _machine_restart = mips_machine_restart; 37 41 _machine_halt = mips_machine_halt; 38 - pm_power_off = mips_machine_halt; 42 + pm_power_off = mips_machine_power_off; 39 43 40 44 return 0; 41 45 }
-4
arch/mips/mti-malta/malta-setup.c
··· 77 77 return "MIPS Malta"; 78 78 } 79 79 80 - #if defined(CONFIG_MIPS_MT_SMTC) 81 - const char display_string[] = " SMTC LINUX ON MALTA "; 82 - #else 83 80 const char display_string[] = " LINUX ON MALTA "; 84 - #endif /* CONFIG_MIPS_MT_SMTC */ 85 81 86 82 #ifdef CONFIG_BLK_DEV_FD 87 83 static void __init fd_activate(void)
-162
arch/mips/mti-malta/malta-smtc.c
··· 1 - /* 2 - * Malta Platform-specific hooks for SMP operation 3 - */ 4 - #include <linux/irq.h> 5 - #include <linux/init.h> 6 - 7 - #include <asm/mipsregs.h> 8 - #include <asm/mipsmtregs.h> 9 - #include <asm/smtc.h> 10 - #include <asm/smtc_ipi.h> 11 - 12 - /* VPE/SMP Prototype implements platform interfaces directly */ 13 - 14 - /* 15 - * Cause the specified action to be performed on a targeted "CPU" 16 - */ 17 - 18 - static void msmtc_send_ipi_single(int cpu, unsigned int action) 19 - { 20 - /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */ 21 - smtc_send_ipi(cpu, LINUX_SMP_IPI, action); 22 - } 23 - 24 - static void msmtc_send_ipi_mask(const struct cpumask *mask, unsigned int action) 25 - { 26 - unsigned int i; 27 - 28 - for_each_cpu(i, mask) 29 - msmtc_send_ipi_single(i, action); 30 - } 31 - 32 - /* 33 - * Post-config but pre-boot cleanup entry point 34 - */ 35 - static void msmtc_init_secondary(void) 36 - { 37 - int myvpe; 38 - 39 - /* Don't enable Malta I/O interrupts (IP2) for secondary VPEs */ 40 - myvpe = read_c0_tcbind() & TCBIND_CURVPE; 41 - if (myvpe != 0) { 42 - /* Ideally, this should be done only once per VPE, but... */ 43 - clear_c0_status(ST0_IM); 44 - set_c0_status((0x100 << cp0_compare_irq) 45 - | (0x100 << MIPS_CPU_IPI_IRQ)); 46 - if (cp0_perfcount_irq >= 0) 47 - set_c0_status(0x100 << cp0_perfcount_irq); 48 - } 49 - 50 - smtc_init_secondary(); 51 - } 52 - 53 - /* 54 - * Platform "CPU" startup hook 55 - */ 56 - static void msmtc_boot_secondary(int cpu, struct task_struct *idle) 57 - { 58 - smtc_boot_secondary(cpu, idle); 59 - } 60 - 61 - /* 62 - * SMP initialization finalization entry point 63 - */ 64 - static void msmtc_smp_finish(void) 65 - { 66 - smtc_smp_finish(); 67 - } 68 - 69 - /* 70 - * Hook for after all CPUs are online 71 - */ 72 - 73 - static void msmtc_cpus_done(void) 74 - { 75 - } 76 - 77 - /* 78 - * Platform SMP pre-initialization 79 - * 80 - * As noted above, we can assume a single CPU for now 81 - * but it may be multithreaded. 82 - */ 83 - 84 - static void __init msmtc_smp_setup(void) 85 - { 86 - /* 87 - * we won't get the definitive value until 88 - * we've run smtc_prepare_cpus later, but 89 - * we would appear to need an upper bound now. 90 - */ 91 - smp_num_siblings = smtc_build_cpu_map(0); 92 - } 93 - 94 - static void __init msmtc_prepare_cpus(unsigned int max_cpus) 95 - { 96 - smtc_prepare_cpus(max_cpus); 97 - } 98 - 99 - struct plat_smp_ops msmtc_smp_ops = { 100 - .send_ipi_single = msmtc_send_ipi_single, 101 - .send_ipi_mask = msmtc_send_ipi_mask, 102 - .init_secondary = msmtc_init_secondary, 103 - .smp_finish = msmtc_smp_finish, 104 - .cpus_done = msmtc_cpus_done, 105 - .boot_secondary = msmtc_boot_secondary, 106 - .smp_setup = msmtc_smp_setup, 107 - .prepare_cpus = msmtc_prepare_cpus, 108 - }; 109 - 110 - #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 111 - /* 112 - * IRQ affinity hook 113 - */ 114 - 115 - 116 - int plat_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, 117 - bool force) 118 - { 119 - cpumask_t tmask; 120 - int cpu = 0; 121 - void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff); 122 - 123 - /* 124 - * On the legacy Malta development board, all I/O interrupts 125 - * are routed through the 8259 and combined in a single signal 126 - * to the CPU daughterboard, and on the CoreFPGA2/3 34K models, 127 - * that signal is brought to IP2 of both VPEs. To avoid racing 128 - * concurrent interrupt service events, IP2 is enabled only on 129 - * one VPE, by convention VPE0. So long as no bits are ever 130 - * cleared in the affinity mask, there will never be any 131 - * interrupt forwarding. But as soon as a program or operator 132 - * sets affinity for one of the related IRQs, we need to make 133 - * sure that we don't ever try to forward across the VPE boundary, 134 - * at least not until we engineer a system where the interrupt 135 - * _ack() or _end() function can somehow know that it corresponds 136 - * to an interrupt taken on another VPE, and perform the appropriate 137 - * restoration of Status.IM state using MFTR/MTTR instead of the 138 - * normal local behavior. We also ensure that no attempt will 139 - * be made to forward to an offline "CPU". 140 - */ 141 - 142 - cpumask_copy(&tmask, affinity); 143 - for_each_cpu(cpu, affinity) { 144 - if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu)) 145 - cpu_clear(cpu, tmask); 146 - } 147 - cpumask_copy(d->affinity, &tmask); 148 - 149 - if (cpus_empty(tmask)) 150 - /* 151 - * We could restore a default mask here, but the 152 - * runtime code can anyway deal with the null set 153 - */ 154 - printk(KERN_WARNING 155 - "IRQ affinity leaves no legal CPU for IRQ %d\n", d->irq); 156 - 157 - /* Do any generic SMTC IRQ affinity setup */ 158 - smtc_set_irq_affinity(d->irq, tmask); 159 - 160 - return IRQ_SET_MASK_OK_NOCOPY; 161 - } 162 - #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
+13 -23
arch/mips/mti-sead3/sead3-pic32-i2c-drv.c
··· 312 312 313 313 pr_debug("i2c_platform_probe\n"); 314 314 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 315 - if (!r) { 316 - ret = -ENODEV; 317 - goto out; 318 - } 315 + if (!r) 316 + return -ENODEV; 319 317 320 - priv = kzalloc(sizeof(struct i2c_platform_data), GFP_KERNEL); 321 - if (!priv) { 322 - ret = -ENOMEM; 323 - goto out; 324 - } 318 + priv = devm_kzalloc(&pdev->dev, sizeof(struct i2c_platform_data), 319 + GFP_KERNEL); 320 + if (!priv) 321 + return -ENOMEM; 325 322 326 323 /* FIXME: need to allocate resource in PIC32 space */ 327 324 #if 0 ··· 327 330 #else 328 331 priv->base = r->start; 329 332 #endif 330 - if (!priv->base) { 331 - ret = -EBUSY; 332 - goto out_mem; 333 - } 333 + if (!priv->base) 334 + return -EBUSY; 334 335 335 336 priv->xfer_timeout = 200; 336 337 priv->ack_timeout = 200; ··· 343 348 i2c_platform_setup(priv); 344 349 345 350 ret = i2c_add_numbered_adapter(&priv->adap); 346 - if (ret == 0) { 347 - platform_set_drvdata(pdev, priv); 348 - return 0; 351 + if (ret) { 352 + i2c_platform_disable(priv); 353 + return ret; 349 354 } 350 355 351 - i2c_platform_disable(priv); 352 - 353 - out_mem: 354 - kfree(priv); 355 - out: 356 - return ret; 356 + platform_set_drvdata(pdev, priv); 357 + return 0; 357 358 } 358 359 359 360 static int i2c_platform_remove(struct platform_device *pdev) ··· 360 369 platform_set_drvdata(pdev, NULL); 361 370 i2c_del_adapter(&priv->adap); 362 371 i2c_platform_disable(priv); 363 - kfree(priv); 364 372 return 0; 365 373 } 366 374
+3
arch/mips/net/Makefile
··· 1 + # MIPS networking code 2 + 3 + obj-$(CONFIG_BPF_JIT) += bpf_jit.o
+1399
arch/mips/net/bpf_jit.c
··· 1 + /* 2 + * Just-In-Time compiler for BPF filters on MIPS 3 + * 4 + * Copyright (c) 2014 Imagination Technologies Ltd. 5 + * Author: Markos Chandras <markos.chandras@imgtec.com> 6 + * 7 + * This program is free software; you can redistribute it and/or modify it 8 + * under the terms of the GNU General Public License as published by the 9 + * Free Software Foundation; version 2 of the License. 10 + */ 11 + 12 + #include <linux/bitops.h> 13 + #include <linux/compiler.h> 14 + #include <linux/errno.h> 15 + #include <linux/filter.h> 16 + #include <linux/if_vlan.h> 17 + #include <linux/kconfig.h> 18 + #include <linux/moduleloader.h> 19 + #include <linux/netdevice.h> 20 + #include <linux/string.h> 21 + #include <linux/slab.h> 22 + #include <linux/types.h> 23 + #include <asm/bitops.h> 24 + #include <asm/cacheflush.h> 25 + #include <asm/cpu-features.h> 26 + #include <asm/uasm.h> 27 + 28 + #include "bpf_jit.h" 29 + 30 + /* ABI 31 + * 32 + * s0 1st scratch register 33 + * s1 2nd scratch register 34 + * s2 offset register 35 + * s3 BPF register A 36 + * s4 BPF register X 37 + * s5 *skb 38 + * s6 *scratch memory 39 + * 40 + * On entry (*bpf_func)(*skb, *filter) 41 + * a0 = MIPS_R_A0 = skb; 42 + * a1 = MIPS_R_A1 = filter; 43 + * 44 + * Stack 45 + * ... 46 + * M[15] 47 + * M[14] 48 + * M[13] 49 + * ... 50 + * M[0] <-- r_M 51 + * saved reg k-1 52 + * saved reg k-2 53 + * ... 54 + * saved reg 0 <-- r_sp 55 + * <no argument area> 56 + * 57 + * Packet layout 58 + * 59 + * <--------------------- len ------------------------> 60 + * <--skb-len(r_skb_hl)-->< ----- skb->data_len ------> 61 + * ---------------------------------------------------- 62 + * | skb->data | 63 + * ---------------------------------------------------- 64 + */ 65 + 66 + #define RSIZE (sizeof(unsigned long)) 67 + #define ptr typeof(unsigned long) 68 + 69 + /* ABI specific return values */ 70 + #ifdef CONFIG_32BIT /* O32 */ 71 + #ifdef CONFIG_CPU_LITTLE_ENDIAN 72 + #define r_err MIPS_R_V1 73 + #define r_val MIPS_R_V0 74 + #else /* CONFIG_CPU_LITTLE_ENDIAN */ 75 + #define r_err MIPS_R_V0 76 + #define r_val MIPS_R_V1 77 + #endif 78 + #else /* N64 */ 79 + #define r_err MIPS_R_V0 80 + #define r_val MIPS_R_V0 81 + #endif 82 + 83 + #define r_ret MIPS_R_V0 84 + 85 + /* 86 + * Use 2 scratch registers to avoid pipeline interlocks. 87 + * There is no overhead during epilogue and prologue since 88 + * any of the $s0-$s6 registers will only be preserved if 89 + * they are going to actually be used. 90 + */ 91 + #define r_s0 MIPS_R_S0 /* scratch reg 1 */ 92 + #define r_s1 MIPS_R_S1 /* scratch reg 2 */ 93 + #define r_off MIPS_R_S2 94 + #define r_A MIPS_R_S3 95 + #define r_X MIPS_R_S4 96 + #define r_skb MIPS_R_S5 97 + #define r_M MIPS_R_S6 98 + #define r_tmp_imm MIPS_R_T6 /* No need to preserve this */ 99 + #define r_tmp MIPS_R_T7 /* No need to preserve this */ 100 + #define r_zero MIPS_R_ZERO 101 + #define r_sp MIPS_R_SP 102 + #define r_ra MIPS_R_RA 103 + 104 + #define SCRATCH_OFF(k) (4 * (k)) 105 + 106 + /* JIT flags */ 107 + #define SEEN_CALL (1 << BPF_MEMWORDS) 108 + #define SEEN_SREG_SFT (BPF_MEMWORDS + 1) 109 + #define SEEN_SREG_BASE (1 << SEEN_SREG_SFT) 110 + #define SEEN_SREG(x) (SEEN_SREG_BASE << (x)) 111 + #define SEEN_S0 SEEN_SREG(0) 112 + #define SEEN_S1 SEEN_SREG(1) 113 + #define SEEN_OFF SEEN_SREG(2) 114 + #define SEEN_A SEEN_SREG(3) 115 + #define SEEN_X SEEN_SREG(4) 116 + #define SEEN_SKB SEEN_SREG(5) 117 + #define SEEN_MEM SEEN_SREG(6) 118 + 119 + /* Arguments used by JIT */ 120 + #define ARGS_USED_BY_JIT 2 /* only applicable to 64-bit */ 121 + 122 + #define FLAG_NEED_X_RESET (1 << 0) 123 + 124 + #define SBIT(x) (1 << (x)) /* Signed version of BIT() */ 125 + 126 + /** 127 + * struct jit_ctx - JIT context 128 + * @skf: The sk_filter 129 + * @prologue_bytes: Number of bytes for prologue 130 + * @idx: Instruction index 131 + * @flags: JIT flags 132 + * @offsets: Instruction offsets 133 + * @target: Memory location for the compiled filter 134 + */ 135 + struct jit_ctx { 136 + const struct sk_filter *skf; 137 + unsigned int prologue_bytes; 138 + u32 idx; 139 + u32 flags; 140 + u32 *offsets; 141 + u32 *target; 142 + }; 143 + 144 + 145 + static inline int optimize_div(u32 *k) 146 + { 147 + /* power of 2 divides can be implemented with right shift */ 148 + if (!(*k & (*k-1))) { 149 + *k = ilog2(*k); 150 + return 1; 151 + } 152 + 153 + return 0; 154 + } 155 + 156 + /* Simply emit the instruction if the JIT memory space has been allocated */ 157 + #define emit_instr(ctx, func, ...) \ 158 + do { \ 159 + if ((ctx)->target != NULL) { \ 160 + u32 *p = &(ctx)->target[ctx->idx]; \ 161 + uasm_i_##func(&p, ##__VA_ARGS__); \ 162 + } \ 163 + (ctx)->idx++; \ 164 + } while (0) 165 + 166 + /* Determine if immediate is within the 16-bit signed range */ 167 + static inline bool is_range16(s32 imm) 168 + { 169 + if (imm >= SBIT(15) || imm < -SBIT(15)) 170 + return true; 171 + return false; 172 + } 173 + 174 + static inline void emit_addu(unsigned int dst, unsigned int src1, 175 + unsigned int src2, struct jit_ctx *ctx) 176 + { 177 + emit_instr(ctx, addu, dst, src1, src2); 178 + } 179 + 180 + static inline void emit_nop(struct jit_ctx *ctx) 181 + { 182 + emit_instr(ctx, nop); 183 + } 184 + 185 + /* Load a u32 immediate to a register */ 186 + static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx) 187 + { 188 + if (ctx->target != NULL) { 189 + /* addiu can only handle s16 */ 190 + if (is_range16(imm)) { 191 + u32 *p = &ctx->target[ctx->idx]; 192 + uasm_i_lui(&p, r_tmp_imm, (s32)imm >> 16); 193 + p = &ctx->target[ctx->idx + 1]; 194 + uasm_i_ori(&p, dst, r_tmp_imm, imm & 0xffff); 195 + } else { 196 + u32 *p = &ctx->target[ctx->idx]; 197 + uasm_i_addiu(&p, dst, r_zero, imm); 198 + } 199 + } 200 + ctx->idx++; 201 + 202 + if (is_range16(imm)) 203 + ctx->idx++; 204 + } 205 + 206 + static inline void emit_or(unsigned int dst, unsigned int src1, 207 + unsigned int src2, struct jit_ctx *ctx) 208 + { 209 + emit_instr(ctx, or, dst, src1, src2); 210 + } 211 + 212 + static inline void emit_ori(unsigned int dst, unsigned src, u32 imm, 213 + struct jit_ctx *ctx) 214 + { 215 + if (imm >= BIT(16)) { 216 + emit_load_imm(r_tmp, imm, ctx); 217 + emit_or(dst, src, r_tmp, ctx); 218 + } else { 219 + emit_instr(ctx, ori, dst, src, imm); 220 + } 221 + } 222 + 223 + 224 + static inline void emit_daddu(unsigned int dst, unsigned int src1, 225 + unsigned int src2, struct jit_ctx *ctx) 226 + { 227 + emit_instr(ctx, daddu, dst, src1, src2); 228 + } 229 + 230 + static inline void emit_daddiu(unsigned int dst, unsigned int src, 231 + int imm, struct jit_ctx *ctx) 232 + { 233 + /* 234 + * Only used for stack, so the imm is relatively small 235 + * and it fits in 15-bits 236 + */ 237 + emit_instr(ctx, daddiu, dst, src, imm); 238 + } 239 + 240 + static inline void emit_addiu(unsigned int dst, unsigned int src, 241 + u32 imm, struct jit_ctx *ctx) 242 + { 243 + if (is_range16(imm)) { 244 + emit_load_imm(r_tmp, imm, ctx); 245 + emit_addu(dst, r_tmp, src, ctx); 246 + } else { 247 + emit_instr(ctx, addiu, dst, src, imm); 248 + } 249 + } 250 + 251 + static inline void emit_and(unsigned int dst, unsigned int src1, 252 + unsigned int src2, struct jit_ctx *ctx) 253 + { 254 + emit_instr(ctx, and, dst, src1, src2); 255 + } 256 + 257 + static inline void emit_andi(unsigned int dst, unsigned int src, 258 + u32 imm, struct jit_ctx *ctx) 259 + { 260 + /* If imm does not fit in u16 then load it to register */ 261 + if (imm >= BIT(16)) { 262 + emit_load_imm(r_tmp, imm, ctx); 263 + emit_and(dst, src, r_tmp, ctx); 264 + } else { 265 + emit_instr(ctx, andi, dst, src, imm); 266 + } 267 + } 268 + 269 + static inline void emit_xor(unsigned int dst, unsigned int src1, 270 + unsigned int src2, struct jit_ctx *ctx) 271 + { 272 + emit_instr(ctx, xor, dst, src1, src2); 273 + } 274 + 275 + static inline void emit_xori(ptr dst, ptr src, u32 imm, struct jit_ctx *ctx) 276 + { 277 + /* If imm does not fit in u16 then load it to register */ 278 + if (imm >= BIT(16)) { 279 + emit_load_imm(r_tmp, imm, ctx); 280 + emit_xor(dst, src, r_tmp, ctx); 281 + } else { 282 + emit_instr(ctx, xori, dst, src, imm); 283 + } 284 + } 285 + 286 + static inline void emit_stack_offset(int offset, struct jit_ctx *ctx) 287 + { 288 + if (config_enabled(CONFIG_64BIT)) 289 + emit_instr(ctx, daddiu, r_sp, r_sp, offset); 290 + else 291 + emit_instr(ctx, addiu, r_sp, r_sp, offset); 292 + 293 + } 294 + 295 + static inline void emit_subu(unsigned int dst, unsigned int src1, 296 + unsigned int src2, struct jit_ctx *ctx) 297 + { 298 + emit_instr(ctx, subu, dst, src1, src2); 299 + } 300 + 301 + static inline void emit_neg(unsigned int reg, struct jit_ctx *ctx) 302 + { 303 + emit_subu(reg, r_zero, reg, ctx); 304 + } 305 + 306 + static inline void emit_sllv(unsigned int dst, unsigned int src, 307 + unsigned int sa, struct jit_ctx *ctx) 308 + { 309 + emit_instr(ctx, sllv, dst, src, sa); 310 + } 311 + 312 + static inline void emit_sll(unsigned int dst, unsigned int src, 313 + unsigned int sa, struct jit_ctx *ctx) 314 + { 315 + /* sa is 5-bits long */ 316 + BUG_ON(sa >= BIT(5)); 317 + emit_instr(ctx, sll, dst, src, sa); 318 + } 319 + 320 + static inline void emit_srlv(unsigned int dst, unsigned int src, 321 + unsigned int sa, struct jit_ctx *ctx) 322 + { 323 + emit_instr(ctx, srlv, dst, src, sa); 324 + } 325 + 326 + static inline void emit_srl(unsigned int dst, unsigned int src, 327 + unsigned int sa, struct jit_ctx *ctx) 328 + { 329 + /* sa is 5-bits long */ 330 + BUG_ON(sa >= BIT(5)); 331 + emit_instr(ctx, srl, dst, src, sa); 332 + } 333 + 334 + static inline void emit_sltu(unsigned int dst, unsigned int src1, 335 + unsigned int src2, struct jit_ctx *ctx) 336 + { 337 + emit_instr(ctx, sltu, dst, src1, src2); 338 + } 339 + 340 + static inline void emit_sltiu(unsigned dst, unsigned int src, 341 + unsigned int imm, struct jit_ctx *ctx) 342 + { 343 + /* 16 bit immediate */ 344 + if (is_range16((s32)imm)) { 345 + emit_load_imm(r_tmp, imm, ctx); 346 + emit_sltu(dst, src, r_tmp, ctx); 347 + } else { 348 + emit_instr(ctx, sltiu, dst, src, imm); 349 + } 350 + 351 + } 352 + 353 + /* Store register on the stack */ 354 + static inline void emit_store_stack_reg(ptr reg, ptr base, 355 + unsigned int offset, 356 + struct jit_ctx *ctx) 357 + { 358 + if (config_enabled(CONFIG_64BIT)) 359 + emit_instr(ctx, sd, reg, offset, base); 360 + else 361 + emit_instr(ctx, sw, reg, offset, base); 362 + } 363 + 364 + static inline void emit_store(ptr reg, ptr base, unsigned int offset, 365 + struct jit_ctx *ctx) 366 + { 367 + emit_instr(ctx, sw, reg, offset, base); 368 + } 369 + 370 + static inline void emit_load_stack_reg(ptr reg, ptr base, 371 + unsigned int offset, 372 + struct jit_ctx *ctx) 373 + { 374 + if (config_enabled(CONFIG_64BIT)) 375 + emit_instr(ctx, ld, reg, offset, base); 376 + else 377 + emit_instr(ctx, lw, reg, offset, base); 378 + } 379 + 380 + static inline void emit_load(unsigned int reg, unsigned int base, 381 + unsigned int offset, struct jit_ctx *ctx) 382 + { 383 + emit_instr(ctx, lw, reg, offset, base); 384 + } 385 + 386 + static inline void emit_load_byte(unsigned int reg, unsigned int base, 387 + unsigned int offset, struct jit_ctx *ctx) 388 + { 389 + emit_instr(ctx, lb, reg, offset, base); 390 + } 391 + 392 + static inline void emit_half_load(unsigned int reg, unsigned int base, 393 + unsigned int offset, struct jit_ctx *ctx) 394 + { 395 + emit_instr(ctx, lh, reg, offset, base); 396 + } 397 + 398 + static inline void emit_mul(unsigned int dst, unsigned int src1, 399 + unsigned int src2, struct jit_ctx *ctx) 400 + { 401 + emit_instr(ctx, mul, dst, src1, src2); 402 + } 403 + 404 + static inline void emit_div(unsigned int dst, unsigned int src, 405 + struct jit_ctx *ctx) 406 + { 407 + if (ctx->target != NULL) { 408 + u32 *p = &ctx->target[ctx->idx]; 409 + uasm_i_divu(&p, dst, src); 410 + p = &ctx->target[ctx->idx + 1]; 411 + uasm_i_mfhi(&p, dst); 412 + } 413 + ctx->idx += 2; /* 2 insts */ 414 + } 415 + 416 + static inline void emit_mod(unsigned int dst, unsigned int src, 417 + struct jit_ctx *ctx) 418 + { 419 + if (ctx->target != NULL) { 420 + u32 *p = &ctx->target[ctx->idx]; 421 + uasm_i_divu(&p, dst, src); 422 + p = &ctx->target[ctx->idx + 1]; 423 + uasm_i_mflo(&p, dst); 424 + } 425 + ctx->idx += 2; /* 2 insts */ 426 + } 427 + 428 + static inline void emit_dsll(unsigned int dst, unsigned int src, 429 + unsigned int sa, struct jit_ctx *ctx) 430 + { 431 + emit_instr(ctx, dsll, dst, src, sa); 432 + } 433 + 434 + static inline void emit_dsrl32(unsigned int dst, unsigned int src, 435 + unsigned int sa, struct jit_ctx *ctx) 436 + { 437 + emit_instr(ctx, dsrl32, dst, src, sa); 438 + } 439 + 440 + static inline void emit_wsbh(unsigned int dst, unsigned int src, 441 + struct jit_ctx *ctx) 442 + { 443 + emit_instr(ctx, wsbh, dst, src); 444 + } 445 + 446 + /* load a function pointer to register */ 447 + static inline void emit_load_func(unsigned int reg, ptr imm, 448 + struct jit_ctx *ctx) 449 + { 450 + if (config_enabled(CONFIG_64BIT)) { 451 + /* At this point imm is always 64-bit */ 452 + emit_load_imm(r_tmp, (u64)imm >> 32, ctx); 453 + emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */ 454 + emit_ori(r_tmp, r_tmp_imm, (imm >> 16) & 0xffff, ctx); 455 + emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */ 456 + emit_ori(reg, r_tmp_imm, imm & 0xffff, ctx); 457 + } else { 458 + emit_load_imm(reg, imm, ctx); 459 + } 460 + } 461 + 462 + /* Move to real MIPS register */ 463 + static inline void emit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx) 464 + { 465 + if (config_enabled(CONFIG_64BIT)) 466 + emit_daddu(dst, src, r_zero, ctx); 467 + else 468 + emit_addu(dst, src, r_zero, ctx); 469 + } 470 + 471 + /* Move to JIT (32-bit) register */ 472 + static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx) 473 + { 474 + emit_addu(dst, src, r_zero, ctx); 475 + } 476 + 477 + /* Compute the immediate value for PC-relative branches. */ 478 + static inline u32 b_imm(unsigned int tgt, struct jit_ctx *ctx) 479 + { 480 + if (ctx->target == NULL) 481 + return 0; 482 + 483 + /* 484 + * We want a pc-relative branch. We only do forward branches 485 + * so tgt is always after pc. tgt is the instruction offset 486 + * we want to jump to. 487 + 488 + * Branch on MIPS: 489 + * I: target_offset <- sign_extend(offset) 490 + * I+1: PC += target_offset (delay slot) 491 + * 492 + * ctx->idx currently points to the branch instruction 493 + * but the offset is added to the delay slot so we need 494 + * to subtract 4. 495 + */ 496 + return ctx->offsets[tgt] - 497 + (ctx->idx * 4 - ctx->prologue_bytes) - 4; 498 + } 499 + 500 + static inline void emit_bcond(int cond, unsigned int reg1, unsigned int reg2, 501 + unsigned int imm, struct jit_ctx *ctx) 502 + { 503 + if (ctx->target != NULL) { 504 + u32 *p = &ctx->target[ctx->idx]; 505 + 506 + switch (cond) { 507 + case MIPS_COND_EQ: 508 + uasm_i_beq(&p, reg1, reg2, imm); 509 + break; 510 + case MIPS_COND_NE: 511 + uasm_i_bne(&p, reg1, reg2, imm); 512 + break; 513 + case MIPS_COND_ALL: 514 + uasm_i_b(&p, imm); 515 + break; 516 + default: 517 + pr_warn("%s: Unhandled branch conditional: %d\n", 518 + __func__, cond); 519 + } 520 + } 521 + ctx->idx++; 522 + } 523 + 524 + static inline void emit_b(unsigned int imm, struct jit_ctx *ctx) 525 + { 526 + emit_bcond(MIPS_COND_ALL, r_zero, r_zero, imm, ctx); 527 + } 528 + 529 + static inline void emit_jalr(unsigned int link, unsigned int reg, 530 + struct jit_ctx *ctx) 531 + { 532 + emit_instr(ctx, jalr, link, reg); 533 + } 534 + 535 + static inline void emit_jr(unsigned int reg, struct jit_ctx *ctx) 536 + { 537 + emit_instr(ctx, jr, reg); 538 + } 539 + 540 + static inline u16 align_sp(unsigned int num) 541 + { 542 + /* Double word alignment for 32-bit, quadword for 64-bit */ 543 + unsigned int align = config_enabled(CONFIG_64BIT) ? 16 : 8; 544 + num = (num + (align - 1)) & -align; 545 + return num; 546 + } 547 + 548 + static inline void update_on_xread(struct jit_ctx *ctx) 549 + { 550 + if (!(ctx->flags & SEEN_X)) 551 + ctx->flags |= FLAG_NEED_X_RESET; 552 + 553 + ctx->flags |= SEEN_X; 554 + } 555 + 556 + static bool is_load_to_a(u16 inst) 557 + { 558 + switch (inst) { 559 + case BPF_S_LD_W_LEN: 560 + case BPF_S_LD_W_ABS: 561 + case BPF_S_LD_H_ABS: 562 + case BPF_S_LD_B_ABS: 563 + case BPF_S_ANC_CPU: 564 + case BPF_S_ANC_IFINDEX: 565 + case BPF_S_ANC_MARK: 566 + case BPF_S_ANC_PROTOCOL: 567 + case BPF_S_ANC_RXHASH: 568 + case BPF_S_ANC_VLAN_TAG: 569 + case BPF_S_ANC_VLAN_TAG_PRESENT: 570 + case BPF_S_ANC_QUEUE: 571 + return true; 572 + default: 573 + return false; 574 + } 575 + } 576 + 577 + static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset) 578 + { 579 + int i = 0, real_off = 0; 580 + u32 sflags, tmp_flags; 581 + 582 + /* Adjust the stack pointer */ 583 + emit_stack_offset(-align_sp(offset), ctx); 584 + 585 + if (ctx->flags & SEEN_CALL) { 586 + /* Argument save area */ 587 + if (config_enabled(CONFIG_64BIT)) 588 + /* Bottom of current frame */ 589 + real_off = align_sp(offset) - RSIZE; 590 + else 591 + /* Top of previous frame */ 592 + real_off = align_sp(offset) + RSIZE; 593 + emit_store_stack_reg(MIPS_R_A0, r_sp, real_off, ctx); 594 + emit_store_stack_reg(MIPS_R_A1, r_sp, real_off + RSIZE, ctx); 595 + 596 + real_off = 0; 597 + } 598 + 599 + tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT; 600 + /* sflags is essentially a bitmap */ 601 + while (tmp_flags) { 602 + if ((sflags >> i) & 0x1) { 603 + emit_store_stack_reg(MIPS_R_S0 + i, r_sp, real_off, 604 + ctx); 605 + real_off += RSIZE; 606 + } 607 + i++; 608 + tmp_flags >>= 1; 609 + } 610 + 611 + /* save return address */ 612 + if (ctx->flags & SEEN_CALL) { 613 + emit_store_stack_reg(r_ra, r_sp, real_off, ctx); 614 + real_off += RSIZE; 615 + } 616 + 617 + /* Setup r_M leaving the alignment gap if necessary */ 618 + if (ctx->flags & SEEN_MEM) { 619 + if (real_off % (RSIZE * 2)) 620 + real_off += RSIZE; 621 + emit_addiu(r_M, r_sp, real_off, ctx); 622 + } 623 + } 624 + 625 + static void restore_bpf_jit_regs(struct jit_ctx *ctx, 626 + unsigned int offset) 627 + { 628 + int i, real_off = 0; 629 + u32 sflags, tmp_flags; 630 + 631 + if (ctx->flags & SEEN_CALL) { 632 + if (config_enabled(CONFIG_64BIT)) 633 + /* Bottom of current frame */ 634 + real_off = align_sp(offset) - RSIZE; 635 + else 636 + /* Top of previous frame */ 637 + real_off = align_sp(offset) + RSIZE; 638 + emit_load_stack_reg(MIPS_R_A0, r_sp, real_off, ctx); 639 + emit_load_stack_reg(MIPS_R_A1, r_sp, real_off + RSIZE, ctx); 640 + 641 + real_off = 0; 642 + } 643 + 644 + tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT; 645 + /* sflags is a bitmap */ 646 + i = 0; 647 + while (tmp_flags) { 648 + if ((sflags >> i) & 0x1) { 649 + emit_load_stack_reg(MIPS_R_S0 + i, r_sp, real_off, 650 + ctx); 651 + real_off += RSIZE; 652 + } 653 + i++; 654 + tmp_flags >>= 1; 655 + } 656 + 657 + /* restore return address */ 658 + if (ctx->flags & SEEN_CALL) 659 + emit_load_stack_reg(r_ra, r_sp, real_off, ctx); 660 + 661 + /* Restore the sp and discard the scrach memory */ 662 + emit_stack_offset(align_sp(offset), ctx); 663 + } 664 + 665 + static unsigned int get_stack_depth(struct jit_ctx *ctx) 666 + { 667 + int sp_off = 0; 668 + 669 + 670 + /* How may s* regs do we need to preserved? */ 671 + sp_off += hweight32(ctx->flags >> SEEN_SREG_SFT) * RSIZE; 672 + 673 + if (ctx->flags & SEEN_MEM) 674 + sp_off += 4 * BPF_MEMWORDS; /* BPF_MEMWORDS are 32-bit */ 675 + 676 + if (ctx->flags & SEEN_CALL) 677 + /* 678 + * The JIT code make calls to external functions using 2 679 + * arguments. Therefore, for o32 we don't need to allocate 680 + * space because we don't care if the argumetns are lost 681 + * across calls. We do need however to preserve incoming 682 + * arguments but the space is already allocated for us by 683 + * the caller. On the other hand, for n64, we need to allocate 684 + * this space ourselves. We need to preserve $ra as well. 685 + */ 686 + sp_off += config_enabled(CONFIG_64BIT) ? 687 + (ARGS_USED_BY_JIT + 1) * RSIZE : RSIZE; 688 + 689 + /* 690 + * Subtract the bytes for the last registers since we only care about 691 + * the location on the stack pointer. 692 + */ 693 + return sp_off - RSIZE; 694 + } 695 + 696 + static void build_prologue(struct jit_ctx *ctx) 697 + { 698 + u16 first_inst = ctx->skf->insns[0].code; 699 + int sp_off; 700 + 701 + /* Calculate the total offset for the stack pointer */ 702 + sp_off = get_stack_depth(ctx); 703 + save_bpf_jit_regs(ctx, sp_off); 704 + 705 + if (ctx->flags & SEEN_SKB) 706 + emit_reg_move(r_skb, MIPS_R_A0, ctx); 707 + 708 + if (ctx->flags & FLAG_NEED_X_RESET) 709 + emit_jit_reg_move(r_X, r_zero, ctx); 710 + 711 + /* Do not leak kernel data to userspace */ 712 + if ((first_inst != BPF_S_RET_K) && !(is_load_to_a(first_inst))) 713 + emit_jit_reg_move(r_A, r_zero, ctx); 714 + } 715 + 716 + static void build_epilogue(struct jit_ctx *ctx) 717 + { 718 + unsigned int sp_off; 719 + 720 + /* Calculate the total offset for the stack pointer */ 721 + 722 + sp_off = get_stack_depth(ctx); 723 + restore_bpf_jit_regs(ctx, sp_off); 724 + 725 + /* Return */ 726 + emit_jr(r_ra, ctx); 727 + emit_nop(ctx); 728 + } 729 + 730 + static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset) 731 + { 732 + u8 ret; 733 + int err; 734 + 735 + err = skb_copy_bits(skb, offset, &ret, 1); 736 + 737 + return (u64)err << 32 | ret; 738 + } 739 + 740 + static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset) 741 + { 742 + u16 ret; 743 + int err; 744 + 745 + err = skb_copy_bits(skb, offset, &ret, 2); 746 + 747 + return (u64)err << 32 | ntohs(ret); 748 + } 749 + 750 + static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset) 751 + { 752 + u32 ret; 753 + int err; 754 + 755 + err = skb_copy_bits(skb, offset, &ret, 4); 756 + 757 + return (u64)err << 32 | ntohl(ret); 758 + } 759 + 760 + #define PKT_TYPE_MAX 7 761 + static int pkt_type_offset(void) 762 + { 763 + struct sk_buff skb_probe = { 764 + .pkt_type = ~0, 765 + }; 766 + char *ct = (char *)&skb_probe; 767 + unsigned int off; 768 + 769 + for (off = 0; off < sizeof(struct sk_buff); off++) { 770 + if (ct[off] == PKT_TYPE_MAX) 771 + return off; 772 + } 773 + pr_err_once("Please fix pkt_type_offset(), as pkt_type couldn't be found\n"); 774 + return -1; 775 + } 776 + 777 + static int build_body(struct jit_ctx *ctx) 778 + { 779 + void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w}; 780 + const struct sk_filter *prog = ctx->skf; 781 + const struct sock_filter *inst; 782 + unsigned int i, off, load_order, condt; 783 + u32 k, b_off __maybe_unused; 784 + 785 + for (i = 0; i < prog->len; i++) { 786 + inst = &(prog->insns[i]); 787 + pr_debug("%s: code->0x%02x, jt->0x%x, jf->0x%x, k->0x%x\n", 788 + __func__, inst->code, inst->jt, inst->jf, inst->k); 789 + k = inst->k; 790 + 791 + if (ctx->target == NULL) 792 + ctx->offsets[i] = ctx->idx * 4; 793 + 794 + switch (inst->code) { 795 + case BPF_S_LD_IMM: 796 + /* A <- k ==> li r_A, k */ 797 + ctx->flags |= SEEN_A; 798 + emit_load_imm(r_A, k, ctx); 799 + break; 800 + case BPF_S_LD_W_LEN: 801 + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); 802 + /* A <- len ==> lw r_A, offset(skb) */ 803 + ctx->flags |= SEEN_SKB | SEEN_A; 804 + off = offsetof(struct sk_buff, len); 805 + emit_load(r_A, r_skb, off, ctx); 806 + break; 807 + case BPF_S_LD_MEM: 808 + /* A <- M[k] ==> lw r_A, offset(M) */ 809 + ctx->flags |= SEEN_MEM | SEEN_A; 810 + emit_load(r_A, r_M, SCRATCH_OFF(k), ctx); 811 + break; 812 + case BPF_S_LD_W_ABS: 813 + /* A <- P[k:4] */ 814 + load_order = 2; 815 + goto load; 816 + case BPF_S_LD_H_ABS: 817 + /* A <- P[k:2] */ 818 + load_order = 1; 819 + goto load; 820 + case BPF_S_LD_B_ABS: 821 + /* A <- P[k:1] */ 822 + load_order = 0; 823 + load: 824 + emit_load_imm(r_off, k, ctx); 825 + load_common: 826 + ctx->flags |= SEEN_CALL | SEEN_OFF | SEEN_S0 | 827 + SEEN_SKB | SEEN_A; 828 + 829 + emit_load_func(r_s0, (ptr)load_func[load_order], 830 + ctx); 831 + emit_reg_move(MIPS_R_A0, r_skb, ctx); 832 + emit_jalr(MIPS_R_RA, r_s0, ctx); 833 + /* Load second argument to delay slot */ 834 + emit_reg_move(MIPS_R_A1, r_off, ctx); 835 + /* Check the error value */ 836 + if (config_enabled(CONFIG_64BIT)) { 837 + /* Get error code from the top 32-bits */ 838 + emit_dsrl32(r_s0, r_val, 0, ctx); 839 + /* Branch to 3 instructions ahead */ 840 + emit_bcond(MIPS_COND_NE, r_s0, r_zero, 3 << 2, 841 + ctx); 842 + } else { 843 + /* Branch to 3 instructions ahead */ 844 + emit_bcond(MIPS_COND_NE, r_err, r_zero, 3 << 2, 845 + ctx); 846 + } 847 + emit_nop(ctx); 848 + /* We are good */ 849 + emit_b(b_imm(i + 1, ctx), ctx); 850 + emit_jit_reg_move(r_A, r_val, ctx); 851 + /* Return with error */ 852 + emit_b(b_imm(prog->len, ctx), ctx); 853 + emit_reg_move(r_ret, r_zero, ctx); 854 + break; 855 + case BPF_S_LD_W_IND: 856 + /* A <- P[X + k:4] */ 857 + load_order = 2; 858 + goto load_ind; 859 + case BPF_S_LD_H_IND: 860 + /* A <- P[X + k:2] */ 861 + load_order = 1; 862 + goto load_ind; 863 + case BPF_S_LD_B_IND: 864 + /* A <- P[X + k:1] */ 865 + load_order = 0; 866 + load_ind: 867 + update_on_xread(ctx); 868 + ctx->flags |= SEEN_OFF | SEEN_X; 869 + emit_addiu(r_off, r_X, k, ctx); 870 + goto load_common; 871 + case BPF_S_LDX_IMM: 872 + /* X <- k */ 873 + ctx->flags |= SEEN_X; 874 + emit_load_imm(r_X, k, ctx); 875 + break; 876 + case BPF_S_LDX_MEM: 877 + /* X <- M[k] */ 878 + ctx->flags |= SEEN_X | SEEN_MEM; 879 + emit_load(r_X, r_M, SCRATCH_OFF(k), ctx); 880 + break; 881 + case BPF_S_LDX_W_LEN: 882 + /* X <- len */ 883 + ctx->flags |= SEEN_X | SEEN_SKB; 884 + off = offsetof(struct sk_buff, len); 885 + emit_load(r_X, r_skb, off, ctx); 886 + break; 887 + case BPF_S_LDX_B_MSH: 888 + /* X <- 4 * (P[k:1] & 0xf) */ 889 + ctx->flags |= SEEN_X | SEEN_CALL | SEEN_S0 | SEEN_SKB; 890 + /* Load offset to a1 */ 891 + emit_load_func(r_s0, (ptr)jit_get_skb_b, ctx); 892 + /* 893 + * This may emit two instructions so it may not fit 894 + * in the delay slot. So use a0 in the delay slot. 895 + */ 896 + emit_load_imm(MIPS_R_A1, k, ctx); 897 + emit_jalr(MIPS_R_RA, r_s0, ctx); 898 + emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */ 899 + /* Check the error value */ 900 + if (config_enabled(CONFIG_64BIT)) { 901 + /* Top 32-bits of $v0 on 64-bit */ 902 + emit_dsrl32(r_s0, r_val, 0, ctx); 903 + emit_bcond(MIPS_COND_NE, r_s0, r_zero, 904 + 3 << 2, ctx); 905 + } else { 906 + emit_bcond(MIPS_COND_NE, r_err, r_zero, 907 + 3 << 2, ctx); 908 + } 909 + /* No need for delay slot */ 910 + /* We are good */ 911 + /* X <- P[1:K] & 0xf */ 912 + emit_andi(r_X, r_val, 0xf, ctx); 913 + /* X << 2 */ 914 + emit_b(b_imm(i + 1, ctx), ctx); 915 + emit_sll(r_X, r_X, 2, ctx); /* delay slot */ 916 + /* Return with error */ 917 + emit_b(b_imm(prog->len, ctx), ctx); 918 + emit_load_imm(r_ret, 0, ctx); /* delay slot */ 919 + break; 920 + case BPF_S_ST: 921 + /* M[k] <- A */ 922 + ctx->flags |= SEEN_MEM | SEEN_A; 923 + emit_store(r_A, r_M, SCRATCH_OFF(k), ctx); 924 + break; 925 + case BPF_S_STX: 926 + /* M[k] <- X */ 927 + ctx->flags |= SEEN_MEM | SEEN_X; 928 + emit_store(r_X, r_M, SCRATCH_OFF(k), ctx); 929 + break; 930 + case BPF_S_ALU_ADD_K: 931 + /* A += K */ 932 + ctx->flags |= SEEN_A; 933 + emit_addiu(r_A, r_A, k, ctx); 934 + break; 935 + case BPF_S_ALU_ADD_X: 936 + /* A += X */ 937 + ctx->flags |= SEEN_A | SEEN_X; 938 + emit_addu(r_A, r_A, r_X, ctx); 939 + break; 940 + case BPF_S_ALU_SUB_K: 941 + /* A -= K */ 942 + ctx->flags |= SEEN_A; 943 + emit_addiu(r_A, r_A, -k, ctx); 944 + break; 945 + case BPF_S_ALU_SUB_X: 946 + /* A -= X */ 947 + ctx->flags |= SEEN_A | SEEN_X; 948 + emit_subu(r_A, r_A, r_X, ctx); 949 + break; 950 + case BPF_S_ALU_MUL_K: 951 + /* A *= K */ 952 + /* Load K to scratch register before MUL */ 953 + ctx->flags |= SEEN_A | SEEN_S0; 954 + emit_load_imm(r_s0, k, ctx); 955 + emit_mul(r_A, r_A, r_s0, ctx); 956 + break; 957 + case BPF_S_ALU_MUL_X: 958 + /* A *= X */ 959 + update_on_xread(ctx); 960 + ctx->flags |= SEEN_A | SEEN_X; 961 + emit_mul(r_A, r_A, r_X, ctx); 962 + break; 963 + case BPF_S_ALU_DIV_K: 964 + /* A /= k */ 965 + if (k == 1) 966 + break; 967 + if (optimize_div(&k)) { 968 + ctx->flags |= SEEN_A; 969 + emit_srl(r_A, r_A, k, ctx); 970 + break; 971 + } 972 + ctx->flags |= SEEN_A | SEEN_S0; 973 + emit_load_imm(r_s0, k, ctx); 974 + emit_div(r_A, r_s0, ctx); 975 + break; 976 + case BPF_S_ALU_MOD_K: 977 + /* A %= k */ 978 + if (k == 1 || optimize_div(&k)) { 979 + ctx->flags |= SEEN_A; 980 + emit_jit_reg_move(r_A, r_zero, ctx); 981 + } else { 982 + ctx->flags |= SEEN_A | SEEN_S0; 983 + emit_load_imm(r_s0, k, ctx); 984 + emit_mod(r_A, r_s0, ctx); 985 + } 986 + break; 987 + case BPF_S_ALU_DIV_X: 988 + /* A /= X */ 989 + update_on_xread(ctx); 990 + ctx->flags |= SEEN_X | SEEN_A; 991 + /* Check if r_X is zero */ 992 + emit_bcond(MIPS_COND_EQ, r_X, r_zero, 993 + b_imm(prog->len, ctx), ctx); 994 + emit_load_imm(r_val, 0, ctx); /* delay slot */ 995 + emit_div(r_A, r_X, ctx); 996 + break; 997 + case BPF_S_ALU_MOD_X: 998 + /* A %= X */ 999 + update_on_xread(ctx); 1000 + ctx->flags |= SEEN_X | SEEN_A; 1001 + /* Check if r_X is zero */ 1002 + emit_bcond(MIPS_COND_EQ, r_X, r_zero, 1003 + b_imm(prog->len, ctx), ctx); 1004 + emit_load_imm(r_val, 0, ctx); /* delay slot */ 1005 + emit_mod(r_A, r_X, ctx); 1006 + break; 1007 + case BPF_S_ALU_OR_K: 1008 + /* A |= K */ 1009 + ctx->flags |= SEEN_A; 1010 + emit_ori(r_A, r_A, k, ctx); 1011 + break; 1012 + case BPF_S_ALU_OR_X: 1013 + /* A |= X */ 1014 + update_on_xread(ctx); 1015 + ctx->flags |= SEEN_A; 1016 + emit_ori(r_A, r_A, r_X, ctx); 1017 + break; 1018 + case BPF_S_ALU_XOR_K: 1019 + /* A ^= k */ 1020 + ctx->flags |= SEEN_A; 1021 + emit_xori(r_A, r_A, k, ctx); 1022 + break; 1023 + case BPF_S_ANC_ALU_XOR_X: 1024 + case BPF_S_ALU_XOR_X: 1025 + /* A ^= X */ 1026 + update_on_xread(ctx); 1027 + ctx->flags |= SEEN_A; 1028 + emit_xor(r_A, r_A, r_X, ctx); 1029 + break; 1030 + case BPF_S_ALU_AND_K: 1031 + /* A &= K */ 1032 + ctx->flags |= SEEN_A; 1033 + emit_andi(r_A, r_A, k, ctx); 1034 + break; 1035 + case BPF_S_ALU_AND_X: 1036 + /* A &= X */ 1037 + update_on_xread(ctx); 1038 + ctx->flags |= SEEN_A | SEEN_X; 1039 + emit_and(r_A, r_A, r_X, ctx); 1040 + break; 1041 + case BPF_S_ALU_LSH_K: 1042 + /* A <<= K */ 1043 + ctx->flags |= SEEN_A; 1044 + emit_sll(r_A, r_A, k, ctx); 1045 + break; 1046 + case BPF_S_ALU_LSH_X: 1047 + /* A <<= X */ 1048 + ctx->flags |= SEEN_A | SEEN_X; 1049 + update_on_xread(ctx); 1050 + emit_sllv(r_A, r_A, r_X, ctx); 1051 + break; 1052 + case BPF_S_ALU_RSH_K: 1053 + /* A >>= K */ 1054 + ctx->flags |= SEEN_A; 1055 + emit_srl(r_A, r_A, k, ctx); 1056 + break; 1057 + case BPF_S_ALU_RSH_X: 1058 + ctx->flags |= SEEN_A | SEEN_X; 1059 + update_on_xread(ctx); 1060 + emit_srlv(r_A, r_A, r_X, ctx); 1061 + break; 1062 + case BPF_S_ALU_NEG: 1063 + /* A = -A */ 1064 + ctx->flags |= SEEN_A; 1065 + emit_neg(r_A, ctx); 1066 + break; 1067 + case BPF_S_JMP_JA: 1068 + /* pc += K */ 1069 + emit_b(b_imm(i + k + 1, ctx), ctx); 1070 + emit_nop(ctx); 1071 + break; 1072 + case BPF_S_JMP_JEQ_K: 1073 + /* pc += ( A == K ) ? pc->jt : pc->jf */ 1074 + condt = MIPS_COND_EQ | MIPS_COND_K; 1075 + goto jmp_cmp; 1076 + case BPF_S_JMP_JEQ_X: 1077 + ctx->flags |= SEEN_X; 1078 + /* pc += ( A == X ) ? pc->jt : pc->jf */ 1079 + condt = MIPS_COND_EQ | MIPS_COND_X; 1080 + goto jmp_cmp; 1081 + case BPF_S_JMP_JGE_K: 1082 + /* pc += ( A >= K ) ? pc->jt : pc->jf */ 1083 + condt = MIPS_COND_GE | MIPS_COND_K; 1084 + goto jmp_cmp; 1085 + case BPF_S_JMP_JGE_X: 1086 + ctx->flags |= SEEN_X; 1087 + /* pc += ( A >= X ) ? pc->jt : pc->jf */ 1088 + condt = MIPS_COND_GE | MIPS_COND_X; 1089 + goto jmp_cmp; 1090 + case BPF_S_JMP_JGT_K: 1091 + /* pc += ( A > K ) ? pc->jt : pc->jf */ 1092 + condt = MIPS_COND_GT | MIPS_COND_K; 1093 + goto jmp_cmp; 1094 + case BPF_S_JMP_JGT_X: 1095 + ctx->flags |= SEEN_X; 1096 + /* pc += ( A > X ) ? pc->jt : pc->jf */ 1097 + condt = MIPS_COND_GT | MIPS_COND_X; 1098 + jmp_cmp: 1099 + /* Greater or Equal */ 1100 + if ((condt & MIPS_COND_GE) || 1101 + (condt & MIPS_COND_GT)) { 1102 + if (condt & MIPS_COND_K) { /* K */ 1103 + ctx->flags |= SEEN_S0 | SEEN_A; 1104 + emit_sltiu(r_s0, r_A, k, ctx); 1105 + } else { /* X */ 1106 + ctx->flags |= SEEN_S0 | SEEN_A | 1107 + SEEN_X; 1108 + emit_sltu(r_s0, r_A, r_X, ctx); 1109 + } 1110 + /* A < (K|X) ? r_scrach = 1 */ 1111 + b_off = b_imm(i + inst->jf + 1, ctx); 1112 + emit_bcond(MIPS_COND_GT, r_s0, r_zero, b_off, 1113 + ctx); 1114 + emit_nop(ctx); 1115 + /* A > (K|X) ? scratch = 0 */ 1116 + if (condt & MIPS_COND_GT) { 1117 + /* Checking for equality */ 1118 + ctx->flags |= SEEN_S0 | SEEN_A | SEEN_X; 1119 + if (condt & MIPS_COND_K) 1120 + emit_load_imm(r_s0, k, ctx); 1121 + else 1122 + emit_jit_reg_move(r_s0, r_X, 1123 + ctx); 1124 + b_off = b_imm(i + inst->jf + 1, ctx); 1125 + emit_bcond(MIPS_COND_EQ, r_A, r_s0, 1126 + b_off, ctx); 1127 + emit_nop(ctx); 1128 + /* Finally, A > K|X */ 1129 + b_off = b_imm(i + inst->jt + 1, ctx); 1130 + emit_b(b_off, ctx); 1131 + emit_nop(ctx); 1132 + } else { 1133 + /* A >= (K|X) so jump */ 1134 + b_off = b_imm(i + inst->jt + 1, ctx); 1135 + emit_b(b_off, ctx); 1136 + emit_nop(ctx); 1137 + } 1138 + } else { 1139 + /* A == K|X */ 1140 + if (condt & MIPS_COND_K) { /* K */ 1141 + ctx->flags |= SEEN_S0 | SEEN_A; 1142 + emit_load_imm(r_s0, k, ctx); 1143 + /* jump true */ 1144 + b_off = b_imm(i + inst->jt + 1, ctx); 1145 + emit_bcond(MIPS_COND_EQ, r_A, r_s0, 1146 + b_off, ctx); 1147 + emit_nop(ctx); 1148 + /* jump false */ 1149 + b_off = b_imm(i + inst->jf + 1, 1150 + ctx); 1151 + emit_bcond(MIPS_COND_NE, r_A, r_s0, 1152 + b_off, ctx); 1153 + emit_nop(ctx); 1154 + } else { /* X */ 1155 + /* jump true */ 1156 + ctx->flags |= SEEN_A | SEEN_X; 1157 + b_off = b_imm(i + inst->jt + 1, 1158 + ctx); 1159 + emit_bcond(MIPS_COND_EQ, r_A, r_X, 1160 + b_off, ctx); 1161 + emit_nop(ctx); 1162 + /* jump false */ 1163 + b_off = b_imm(i + inst->jf + 1, ctx); 1164 + emit_bcond(MIPS_COND_NE, r_A, r_X, 1165 + b_off, ctx); 1166 + emit_nop(ctx); 1167 + } 1168 + } 1169 + break; 1170 + case BPF_S_JMP_JSET_K: 1171 + ctx->flags |= SEEN_S0 | SEEN_S1 | SEEN_A; 1172 + /* pc += (A & K) ? pc -> jt : pc -> jf */ 1173 + emit_load_imm(r_s1, k, ctx); 1174 + emit_and(r_s0, r_A, r_s1, ctx); 1175 + /* jump true */ 1176 + b_off = b_imm(i + inst->jt + 1, ctx); 1177 + emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx); 1178 + emit_nop(ctx); 1179 + /* jump false */ 1180 + b_off = b_imm(i + inst->jf + 1, ctx); 1181 + emit_b(b_off, ctx); 1182 + emit_nop(ctx); 1183 + break; 1184 + case BPF_S_JMP_JSET_X: 1185 + ctx->flags |= SEEN_S0 | SEEN_X | SEEN_A; 1186 + /* pc += (A & X) ? pc -> jt : pc -> jf */ 1187 + emit_and(r_s0, r_A, r_X, ctx); 1188 + /* jump true */ 1189 + b_off = b_imm(i + inst->jt + 1, ctx); 1190 + emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx); 1191 + emit_nop(ctx); 1192 + /* jump false */ 1193 + b_off = b_imm(i + inst->jf + 1, ctx); 1194 + emit_b(b_off, ctx); 1195 + emit_nop(ctx); 1196 + break; 1197 + case BPF_S_RET_A: 1198 + ctx->flags |= SEEN_A; 1199 + if (i != prog->len - 1) 1200 + /* 1201 + * If this is not the last instruction 1202 + * then jump to the epilogue 1203 + */ 1204 + emit_b(b_imm(prog->len, ctx), ctx); 1205 + emit_reg_move(r_ret, r_A, ctx); /* delay slot */ 1206 + break; 1207 + case BPF_S_RET_K: 1208 + /* 1209 + * It can emit two instructions so it does not fit on 1210 + * the delay slot. 1211 + */ 1212 + emit_load_imm(r_ret, k, ctx); 1213 + if (i != prog->len - 1) { 1214 + /* 1215 + * If this is not the last instruction 1216 + * then jump to the epilogue 1217 + */ 1218 + emit_b(b_imm(prog->len, ctx), ctx); 1219 + emit_nop(ctx); 1220 + } 1221 + break; 1222 + case BPF_S_MISC_TAX: 1223 + /* X = A */ 1224 + ctx->flags |= SEEN_X | SEEN_A; 1225 + emit_jit_reg_move(r_X, r_A, ctx); 1226 + break; 1227 + case BPF_S_MISC_TXA: 1228 + /* A = X */ 1229 + ctx->flags |= SEEN_A | SEEN_X; 1230 + update_on_xread(ctx); 1231 + emit_jit_reg_move(r_A, r_X, ctx); 1232 + break; 1233 + /* AUX */ 1234 + case BPF_S_ANC_PROTOCOL: 1235 + /* A = ntohs(skb->protocol */ 1236 + ctx->flags |= SEEN_SKB | SEEN_OFF | SEEN_A; 1237 + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 1238 + protocol) != 2); 1239 + off = offsetof(struct sk_buff, protocol); 1240 + emit_half_load(r_A, r_skb, off, ctx); 1241 + #ifdef CONFIG_CPU_LITTLE_ENDIAN 1242 + /* This needs little endian fixup */ 1243 + if (cpu_has_mips_r2) { 1244 + /* R2 and later have the wsbh instruction */ 1245 + emit_wsbh(r_A, r_A, ctx); 1246 + } else { 1247 + /* Get first byte */ 1248 + emit_andi(r_tmp_imm, r_A, 0xff, ctx); 1249 + /* Shift it */ 1250 + emit_sll(r_tmp, r_tmp_imm, 8, ctx); 1251 + /* Get second byte */ 1252 + emit_srl(r_tmp_imm, r_A, 8, ctx); 1253 + emit_andi(r_tmp_imm, r_tmp_imm, 0xff, ctx); 1254 + /* Put everyting together in r_A */ 1255 + emit_or(r_A, r_tmp, r_tmp_imm, ctx); 1256 + } 1257 + #endif 1258 + break; 1259 + case BPF_S_ANC_CPU: 1260 + ctx->flags |= SEEN_A | SEEN_OFF; 1261 + /* A = current_thread_info()->cpu */ 1262 + BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, 1263 + cpu) != 4); 1264 + off = offsetof(struct thread_info, cpu); 1265 + /* $28/gp points to the thread_info struct */ 1266 + emit_load(r_A, 28, off, ctx); 1267 + break; 1268 + case BPF_S_ANC_IFINDEX: 1269 + /* A = skb->dev->ifindex */ 1270 + ctx->flags |= SEEN_SKB | SEEN_A | SEEN_S0; 1271 + off = offsetof(struct sk_buff, dev); 1272 + emit_load(r_s0, r_skb, off, ctx); 1273 + /* error (0) in the delay slot */ 1274 + emit_bcond(MIPS_COND_EQ, r_s0, r_zero, 1275 + b_imm(prog->len, ctx), ctx); 1276 + emit_reg_move(r_ret, r_zero, ctx); 1277 + BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, 1278 + ifindex) != 4); 1279 + off = offsetof(struct net_device, ifindex); 1280 + emit_load(r_A, r_s0, off, ctx); 1281 + break; 1282 + case BPF_S_ANC_MARK: 1283 + ctx->flags |= SEEN_SKB | SEEN_A; 1284 + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); 1285 + off = offsetof(struct sk_buff, mark); 1286 + emit_load(r_A, r_skb, off, ctx); 1287 + break; 1288 + case BPF_S_ANC_RXHASH: 1289 + ctx->flags |= SEEN_SKB | SEEN_A; 1290 + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); 1291 + off = offsetof(struct sk_buff, hash); 1292 + emit_load(r_A, r_skb, off, ctx); 1293 + break; 1294 + case BPF_S_ANC_VLAN_TAG: 1295 + case BPF_S_ANC_VLAN_TAG_PRESENT: 1296 + ctx->flags |= SEEN_SKB | SEEN_S0 | SEEN_A; 1297 + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 1298 + vlan_tci) != 2); 1299 + off = offsetof(struct sk_buff, vlan_tci); 1300 + emit_half_load(r_s0, r_skb, off, ctx); 1301 + if (inst->code == BPF_S_ANC_VLAN_TAG) 1302 + emit_and(r_A, r_s0, VLAN_VID_MASK, ctx); 1303 + else 1304 + emit_and(r_A, r_s0, VLAN_TAG_PRESENT, ctx); 1305 + break; 1306 + case BPF_S_ANC_PKTTYPE: 1307 + off = pkt_type_offset(); 1308 + 1309 + if (off < 0) 1310 + return -1; 1311 + emit_load_byte(r_tmp, r_skb, off, ctx); 1312 + /* Keep only the last 3 bits */ 1313 + emit_andi(r_A, r_tmp, PKT_TYPE_MAX, ctx); 1314 + break; 1315 + case BPF_S_ANC_QUEUE: 1316 + ctx->flags |= SEEN_SKB | SEEN_A; 1317 + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 1318 + queue_mapping) != 2); 1319 + BUILD_BUG_ON(offsetof(struct sk_buff, 1320 + queue_mapping) > 0xff); 1321 + off = offsetof(struct sk_buff, queue_mapping); 1322 + emit_half_load(r_A, r_skb, off, ctx); 1323 + break; 1324 + default: 1325 + pr_warn("%s: Unhandled opcode: 0x%02x\n", __FILE__, 1326 + inst->code); 1327 + return -1; 1328 + } 1329 + } 1330 + 1331 + /* compute offsets only during the first pass */ 1332 + if (ctx->target == NULL) 1333 + ctx->offsets[i] = ctx->idx * 4; 1334 + 1335 + return 0; 1336 + } 1337 + 1338 + int bpf_jit_enable __read_mostly; 1339 + 1340 + void bpf_jit_compile(struct sk_filter *fp) 1341 + { 1342 + struct jit_ctx ctx; 1343 + unsigned int alloc_size, tmp_idx; 1344 + 1345 + if (!bpf_jit_enable) 1346 + return; 1347 + 1348 + memset(&ctx, 0, sizeof(ctx)); 1349 + 1350 + ctx.offsets = kcalloc(fp->len, sizeof(*ctx.offsets), GFP_KERNEL); 1351 + if (ctx.offsets == NULL) 1352 + return; 1353 + 1354 + ctx.skf = fp; 1355 + 1356 + if (build_body(&ctx)) 1357 + goto out; 1358 + 1359 + tmp_idx = ctx.idx; 1360 + build_prologue(&ctx); 1361 + ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4; 1362 + /* just to complete the ctx.idx count */ 1363 + build_epilogue(&ctx); 1364 + 1365 + alloc_size = 4 * ctx.idx; 1366 + ctx.target = module_alloc(alloc_size); 1367 + if (ctx.target == NULL) 1368 + goto out; 1369 + 1370 + /* Clean it */ 1371 + memset(ctx.target, 0, alloc_size); 1372 + 1373 + ctx.idx = 0; 1374 + 1375 + /* Generate the actual JIT code */ 1376 + build_prologue(&ctx); 1377 + build_body(&ctx); 1378 + build_epilogue(&ctx); 1379 + 1380 + /* Update the icache */ 1381 + flush_icache_range((ptr)ctx.target, (ptr)(ctx.target + ctx.idx)); 1382 + 1383 + if (bpf_jit_enable > 1) 1384 + /* Dump JIT code */ 1385 + bpf_jit_dump(fp->len, alloc_size, 2, ctx.target); 1386 + 1387 + fp->bpf_func = (void *)ctx.target; 1388 + fp->jited = 1; 1389 + 1390 + out: 1391 + kfree(ctx.offsets); 1392 + } 1393 + 1394 + void bpf_jit_free(struct sk_filter *fp) 1395 + { 1396 + if (fp->jited) 1397 + module_free(NULL, fp->bpf_func); 1398 + kfree(fp); 1399 + }
+44
arch/mips/net/bpf_jit.h
··· 1 + /* 2 + * Just-In-Time compiler for BPF filters on MIPS 3 + * 4 + * Copyright (c) 2014 Imagination Technologies Ltd. 5 + * Author: Markos Chandras <markos.chandras@imgtec.com> 6 + * 7 + * This program is free software; you can redistribute it and/or modify it 8 + * under the terms of the GNU General Public License as published by the 9 + * Free Software Foundation; version 2 of the License. 10 + */ 11 + 12 + #ifndef BPF_JIT_MIPS_OP_H 13 + #define BPF_JIT_MIPS_OP_H 14 + 15 + /* Registers used by JIT */ 16 + #define MIPS_R_ZERO 0 17 + #define MIPS_R_V0 2 18 + #define MIPS_R_V1 3 19 + #define MIPS_R_A0 4 20 + #define MIPS_R_A1 5 21 + #define MIPS_R_T6 14 22 + #define MIPS_R_T7 15 23 + #define MIPS_R_S0 16 24 + #define MIPS_R_S1 17 25 + #define MIPS_R_S2 18 26 + #define MIPS_R_S3 19 27 + #define MIPS_R_S4 20 28 + #define MIPS_R_S5 21 29 + #define MIPS_R_S6 22 30 + #define MIPS_R_S7 23 31 + #define MIPS_R_SP 29 32 + #define MIPS_R_RA 31 33 + 34 + /* Conditional codes */ 35 + #define MIPS_COND_EQ 0x1 36 + #define MIPS_COND_GE (0x1 << 1) 37 + #define MIPS_COND_GT (0x1 << 2) 38 + #define MIPS_COND_NE (0x1 << 3) 39 + #define MIPS_COND_ALL (0x1 << 4) 40 + /* Conditionals on X register or K immediate */ 41 + #define MIPS_COND_X (0x1 << 5) 42 + #define MIPS_COND_K (0x1 << 6) 43 + 44 + #endif /* BPF_JIT_MIPS_OP_H */
+2
arch/mips/netlogic/common/irq.c
··· 203 203 204 204 xirq = nlm_irq_to_xirq(node, irq); 205 205 pic_data = irq_get_handler_data(xirq); 206 + if (WARN_ON(!pic_data)) 207 + return; 206 208 pic_data->extra_ack = xack; 207 209 } 208 210
+30 -9
arch/mips/netlogic/common/reset.S
··· 35 35 36 36 #include <asm/asm.h> 37 37 #include <asm/asm-offsets.h> 38 + #include <asm/cpu.h> 38 39 #include <asm/cacheops.h> 39 40 #include <asm/regdef.h> 40 41 #include <asm/mipsregs.h> ··· 75 74 .endm 76 75 77 76 /* 77 + * Allow access to physical mem >64G by enabling ELPA in PAGEGRAIN 78 + * register. This is needed before going to C code since the SP can 79 + * in this region. Called from all HW threads. 80 + */ 81 + .macro xlp_early_mmu_init 82 + mfc0 t0, CP0_PAGEMASK, 1 83 + li t1, (1 << 29) /* ELPA bit */ 84 + or t0, t1 85 + mtc0 t0, CP0_PAGEMASK, 1 86 + .endm 87 + 88 + /* 78 89 * L1D cache has to be flushed before enabling threads in XLP. 79 90 * On XLP8xx/XLP3xx, we do a low level flush using processor control 80 91 * registers. On XLPII CPUs, usual cache instructions work. 81 92 */ 82 93 .macro xlp_flush_l1_dcache 83 94 mfc0 t0, CP0_EBASE, 0 84 - andi t0, t0, 0xff00 95 + andi t0, t0, PRID_IMP_MASK 85 96 slt t1, t0, 0x1200 86 97 beqz t1, 15f 87 98 nop ··· 172 159 173 160 1: /* Entry point on core wakeup */ 174 161 mfc0 t0, CP0_EBASE, 0 /* processor ID */ 175 - andi t0, 0xff00 162 + andi t0, PRID_IMP_MASK 176 163 li t1, 0x1500 /* XLP 9xx */ 164 + beq t0, t1, 2f /* does not need to set coherent */ 165 + nop 166 + 167 + li t1, 0x1300 /* XLP 5xx */ 177 168 beq t0, t1, 2f /* does not need to set coherent */ 178 169 nop 179 170 ··· 214 197 EXPORT(nlm_boot_siblings) 215 198 /* core L1D flush before enable threads */ 216 199 xlp_flush_l1_dcache 200 + /* save ra and sp, will be used later (only for boot cpu) */ 201 + dmtc0 ra, $22, 6 202 + dmtc0 sp, $22, 7 217 203 /* Enable hw threads by writing to MAP_THREADMODE of the core */ 218 204 li t0, CKSEG1ADDR(RESET_DATA_PHYS) 219 205 lw t1, BOOT_THREAD_MODE(t0) /* t1 <- thread mode */ ··· 245 225 #endif 246 226 mtc0 t1, CP0_STATUS 247 227 228 + xlp_early_mmu_init 229 + 248 230 /* mark CPU ready */ 249 231 li t3, CKSEG1ADDR(RESET_DATA_PHYS) 250 232 ADDIU t1, t3, BOOT_CPU_READY ··· 260 238 nop 261 239 262 240 /* 263 - * For the boot CPU, we have to restore registers and 264 - * return 241 + * For the boot CPU, we have to restore ra and sp and return, rest 242 + * of the registers will be restored by the caller 265 243 */ 266 - 4: dmfc0 t0, $4, 2 /* restore SP from UserLocal */ 267 - li t1, 0xfadebeef 268 - dmtc0 t1, $4, 2 /* restore SP from UserLocal */ 269 - PTR_SUBU sp, t0, PT_SIZE 270 - RESTORE_ALL 244 + 4: 245 + dmfc0 ra, $22, 6 246 + dmfc0 sp, $22, 7 271 247 jr ra 272 248 nop 273 249 EXPORT(nlm_reset_entry_end) ··· 273 253 LEAF(nlm_init_boot_cpu) 274 254 #ifdef CONFIG_CPU_XLP 275 255 xlp_config_lsu 256 + xlp_early_mmu_init 276 257 #endif 277 258 jr ra 278 259 nop
+4 -8
arch/mips/netlogic/common/smp.c
··· 135 135 local_irq_enable(); 136 136 } 137 137 138 - void nlm_cpus_done(void) 139 - { 140 - } 141 - 142 138 /* 143 139 * Boot all other cpus in the system, initialize them, and bring them into 144 140 * the boot function ··· 194 198 cpumask_scnprintf(buf, ARRAY_SIZE(buf), cpu_possible_mask); 195 199 pr_info("Possible CPU mask: %s\n", buf); 196 200 197 - /* check with the cores we have worken up */ 201 + /* check with the cores we have woken up */ 198 202 for (ncore = 0, i = 0; i < NLM_NR_NODES; i++) 199 203 ncore += hweight32(nlm_get_node(i)->coremask); 200 204 ··· 209 213 { 210 214 uint32_t core0_thr_mask, core_thr_mask; 211 215 int threadmode, i, j; 216 + char buf[64]; 212 217 213 218 core0_thr_mask = 0; 214 219 for (i = 0; i < NLM_THREADS_PER_CORE; i++) ··· 244 247 return threadmode; 245 248 246 249 unsupp: 247 - panic("Unsupported CPU mask %lx", 248 - (unsigned long)cpumask_bits(wakeup_mask)[0]); 250 + cpumask_scnprintf(buf, ARRAY_SIZE(buf), wakeup_mask); 251 + panic("Unsupported CPU mask %s", buf); 249 252 return 0; 250 253 } 251 254 ··· 274 277 .send_ipi_mask = nlm_send_ipi_mask, 275 278 .init_secondary = nlm_init_secondary, 276 279 .smp_finish = nlm_smp_finish, 277 - .cpus_done = nlm_cpus_done, 278 280 .boot_secondary = nlm_boot_secondary, 279 281 .smp_setup = nlm_smp_setup, 280 282 .prepare_cpus = nlm_prepare_cpus,
+8 -4
arch/mips/netlogic/common/smpboot.S
··· 54 54 .set noat 55 55 .set arch=xlr /* for mfcr/mtcr, XLR is sufficient */ 56 56 57 - FEXPORT(xlp_boot_core0_siblings) /* "Master" cpu starts from here */ 58 - dmtc0 sp, $4, 2 /* SP saved in UserLocal */ 57 + /* Called by the boot cpu to wake up its sibling threads */ 58 + NESTED(xlp_boot_core0_siblings, PT_SIZE, sp) 59 + /* CPU register contents lost when enabling threads, save them first */ 59 60 SAVE_ALL 60 61 sync 61 62 /* find the location to which nlm_boot_siblings was relocated */ ··· 66 65 dsubu t2, t1 67 66 daddu t2, t0 68 67 /* call it */ 69 - jr t2 68 + jalr t2 70 69 nop 71 - /* not reached */ 70 + RESTORE_ALL 71 + jr ra 72 + nop 73 + END(xlp_boot_core0_siblings) 72 74 73 75 NESTED(nlm_boot_secondary_cpus, 16, sp) 74 76 /* Initialize CP0 Status */
+4 -1
arch/mips/netlogic/common/time.c
··· 82 82 static void nlm_init_pic_timer(void) 83 83 { 84 84 uint64_t picbase = nlm_get_node(0)->picbase; 85 + u32 picfreq; 85 86 86 87 nlm_pic_set_timer(picbase, PIC_CLOCK_TIMER, ~0ULL, 0, 0); 87 88 if (current_cpu_data.cputype == CPU_XLR) { ··· 93 92 csrc_pic.read = nlm_get_pic_timer; 94 93 } 95 94 csrc_pic.rating = 1000; 96 - clocksource_register_hz(&csrc_pic, pic_timer_freq()); 95 + picfreq = pic_timer_freq(); 96 + clocksource_register_hz(&csrc_pic, picfreq); 97 + pr_info("PIC clock source added, frequency %d\n", picfreq); 97 98 } 98 99 99 100 void __init plat_time_init(void)
+3 -2
arch/mips/netlogic/dts/xlp_gvp.dts
··· 26 26 interrupt-parent = <&pic>; 27 27 interrupts = <17>; 28 28 }; 29 - pic: pic@4000 { 30 - interrupt-controller; 29 + pic: pic@110000 { 30 + compatible = "netlogic,xlp-pic"; 31 31 #address-cells = <0>; 32 32 #interrupt-cells = <1>; 33 33 reg = <0 0x110000 0x200>; 34 + interrupt-controller; 34 35 }; 35 36 36 37 nor_flash@1,0 {
+2
arch/mips/netlogic/xlp/Makefile
··· 2 2 obj-$(CONFIG_SMP) += wakeup.o 3 3 obj-$(CONFIG_USB) += usb-init.o 4 4 obj-$(CONFIG_USB) += usb-init-xlp2.o 5 + obj-$(CONFIG_SATA_AHCI) += ahci-init.o 6 + obj-$(CONFIG_SATA_AHCI) += ahci-init-xlp2.o
+377
arch/mips/netlogic/xlp/ahci-init-xlp2.c
··· 1 + /* 2 + * Copyright (c) 2003-2014 Broadcom Corporation 3 + * All Rights Reserved 4 + * 5 + * This software is available to you under a choice of one of two 6 + * licenses. You may choose to be licensed under the terms of the GNU 7 + * General Public License (GPL) Version 2, available from the file 8 + * COPYING in the main directory of this source tree, or the Broadcom 9 + * license below: 10 + * 11 + * Redistribution and use in source and binary forms, with or without 12 + * modification, are permitted provided that the following conditions 13 + * are met: 14 + * 15 + * 1. Redistributions of source code must retain the above copyright 16 + * notice, this list of conditions and the following disclaimer. 17 + * 2. Redistributions in binary form must reproduce the above copyright 18 + * notice, this list of conditions and the following disclaimer in 19 + * the documentation and/or other materials provided with the 20 + * distribution. 21 + * 22 + * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR 23 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 24 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 + * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE 26 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 29 + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 30 + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 31 + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 32 + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 + */ 34 + 35 + #include <linux/dma-mapping.h> 36 + #include <linux/kernel.h> 37 + #include <linux/delay.h> 38 + #include <linux/init.h> 39 + #include <linux/pci.h> 40 + #include <linux/irq.h> 41 + #include <linux/bitops.h> 42 + #include <linux/pci_ids.h> 43 + #include <linux/nodemask.h> 44 + 45 + #include <asm/cpu.h> 46 + #include <asm/mipsregs.h> 47 + 48 + #include <asm/netlogic/common.h> 49 + #include <asm/netlogic/haldefs.h> 50 + #include <asm/netlogic/mips-extns.h> 51 + #include <asm/netlogic/xlp-hal/xlp.h> 52 + #include <asm/netlogic/xlp-hal/iomap.h> 53 + 54 + #define SATA_CTL 0x0 55 + #define SATA_STATUS 0x1 /* Status Reg */ 56 + #define SATA_INT 0x2 /* Interrupt Reg */ 57 + #define SATA_INT_MASK 0x3 /* Interrupt Mask Reg */ 58 + #define SATA_BIU_TIMEOUT 0x4 59 + #define AXIWRSPERRLOG 0x5 60 + #define AXIRDSPERRLOG 0x6 61 + #define BiuTimeoutLow 0x7 62 + #define BiuTimeoutHi 0x8 63 + #define BiuSlvErLow 0x9 64 + #define BiuSlvErHi 0xa 65 + #define IO_CONFIG_SWAP_DIS 0xb 66 + #define CR_REG_TIMER 0xc 67 + #define CORE_ID 0xd 68 + #define AXI_SLAVE_OPT1 0xe 69 + #define PHY_MEM_ACCESS 0xf 70 + #define PHY0_CNTRL 0x10 71 + #define PHY0_STAT 0x11 72 + #define PHY0_RX_ALIGN 0x12 73 + #define PHY0_RX_EQ_LO 0x13 74 + #define PHY0_RX_EQ_HI 0x14 75 + #define PHY0_BIST_LOOP 0x15 76 + #define PHY1_CNTRL 0x16 77 + #define PHY1_STAT 0x17 78 + #define PHY1_RX_ALIGN 0x18 79 + #define PHY1_RX_EQ_LO 0x19 80 + #define PHY1_RX_EQ_HI 0x1a 81 + #define PHY1_BIST_LOOP 0x1b 82 + #define RdExBase 0x1c 83 + #define RdExLimit 0x1d 84 + #define CacheAllocBase 0x1e 85 + #define CacheAllocLimit 0x1f 86 + #define BiuSlaveCmdGstNum 0x20 87 + 88 + /*SATA_CTL Bits */ 89 + #define SATA_RST_N BIT(0) /* Active low reset sata_core phy */ 90 + #define SataCtlReserve0 BIT(1) 91 + #define M_CSYSREQ BIT(2) /* AXI master low power, not used */ 92 + #define S_CSYSREQ BIT(3) /* AXI slave low power, not used */ 93 + #define P0_CP_DET BIT(8) /* Reserved, bring in from pad */ 94 + #define P0_MP_SW BIT(9) /* Mech Switch */ 95 + #define P0_DISABLE BIT(10) /* disable p0 */ 96 + #define P0_ACT_LED_EN BIT(11) /* Active LED enable */ 97 + #define P0_IRST_HARD_SYNTH BIT(12) /* PHY hard synth reset */ 98 + #define P0_IRST_HARD_TXRX BIT(13) /* PHY lane hard reset */ 99 + #define P0_IRST_POR BIT(14) /* PHY power on reset*/ 100 + #define P0_IPDTXL BIT(15) /* PHY Tx lane dis/power down */ 101 + #define P0_IPDRXL BIT(16) /* PHY Rx lane dis/power down */ 102 + #define P0_IPDIPDMSYNTH BIT(17) /* PHY synthesizer dis/porwer down */ 103 + #define P0_CP_POD_EN BIT(18) /* CP_POD enable */ 104 + #define P0_AT_BYPASS BIT(19) /* P0 address translation by pass */ 105 + #define P1_CP_DET BIT(20) /* Reserved,Cold Detect */ 106 + #define P1_MP_SW BIT(21) /* Mech Switch */ 107 + #define P1_DISABLE BIT(22) /* disable p1 */ 108 + #define P1_ACT_LED_EN BIT(23) /* Active LED enable */ 109 + #define P1_IRST_HARD_SYNTH BIT(24) /* PHY hard synth reset */ 110 + #define P1_IRST_HARD_TXRX BIT(25) /* PHY lane hard reset */ 111 + #define P1_IRST_POR BIT(26) /* PHY power on reset*/ 112 + #define P1_IPDTXL BIT(27) /* PHY Tx lane dis/porwer down */ 113 + #define P1_IPDRXL BIT(28) /* PHY Rx lane dis/porwer down */ 114 + #define P1_IPDIPDMSYNTH BIT(29) /* PHY synthesizer dis/porwer down */ 115 + #define P1_CP_POD_EN BIT(30) 116 + #define P1_AT_BYPASS BIT(31) /* P1 address translation by pass */ 117 + 118 + /* Status register */ 119 + #define M_CACTIVE BIT(0) /* m_cactive, not used */ 120 + #define S_CACTIVE BIT(1) /* s_cactive, not used */ 121 + #define P0_PHY_READY BIT(8) /* phy is ready */ 122 + #define P0_CP_POD BIT(9) /* Cold PowerOn */ 123 + #define P0_SLUMBER BIT(10) /* power mode slumber */ 124 + #define P0_PATIAL BIT(11) /* power mode patial */ 125 + #define P0_PHY_SIG_DET BIT(12) /* phy dignal detect */ 126 + #define P0_PHY_CALI BIT(13) /* phy calibration done */ 127 + #define P1_PHY_READY BIT(16) /* phy is ready */ 128 + #define P1_CP_POD BIT(17) /* Cold PowerOn */ 129 + #define P1_SLUMBER BIT(18) /* power mode slumber */ 130 + #define P1_PATIAL BIT(19) /* power mode patial */ 131 + #define P1_PHY_SIG_DET BIT(20) /* phy dignal detect */ 132 + #define P1_PHY_CALI BIT(21) /* phy calibration done */ 133 + 134 + /* SATA CR_REG_TIMER bits */ 135 + #define CR_TIME_SCALE (0x1000 << 0) 136 + 137 + /* SATA PHY specific registers start and end address */ 138 + #define RXCDRCALFOSC0 0x0065 139 + #define CALDUTY 0x006e 140 + #define RXDPIF 0x8065 141 + #define PPMDRIFTMAX_HI 0x80A4 142 + 143 + #define nlm_read_sata_reg(b, r) nlm_read_reg(b, r) 144 + #define nlm_write_sata_reg(b, r, v) nlm_write_reg(b, r, v) 145 + #define nlm_get_sata_pcibase(node) \ 146 + nlm_pcicfg_base(XLP9XX_IO_SATA_OFFSET(node)) 147 + #define nlm_get_sata_regbase(node) \ 148 + (nlm_get_sata_pcibase(node) + 0x100) 149 + 150 + /* SATA PHY config for register block 1 0x0065 .. 0x006e */ 151 + static const u8 sata_phy_config1[] = { 152 + 0xC9, 0xC9, 0x07, 0x07, 0x18, 0x18, 0x01, 0x01, 0x22, 0x00 153 + }; 154 + 155 + /* SATA PHY config for register block 2 0x0x8065 .. 0x0x80A4 */ 156 + static const u8 sata_phy_config2[] = { 157 + 0xAA, 0x00, 0x4C, 0xC9, 0xC9, 0x07, 0x07, 0x18, 158 + 0x18, 0x05, 0x0C, 0x10, 0x00, 0x10, 0x00, 0xFF, 159 + 0xCF, 0xF7, 0xE1, 0xF5, 0xFD, 0xFD, 0xFF, 0xFF, 160 + 0xFF, 0xFF, 0xE3, 0xE7, 0xDB, 0xF5, 0xFD, 0xFD, 161 + 0xF5, 0xF5, 0xFF, 0xFF, 0xE3, 0xE7, 0xDB, 0xF5, 162 + 0xFD, 0xFD, 0xF5, 0xF5, 0xFF, 0xFF, 0xFF, 0xF5, 163 + 0x3F, 0x00, 0x32, 0x00, 0x03, 0x01, 0x05, 0x05, 164 + 0x04, 0x00, 0x00, 0x08, 0x04, 0x00, 0x00, 0x04, 165 + }; 166 + 167 + const int sata_phy_debug = 0; /* set to verify PHY writes */ 168 + 169 + static void sata_clear_glue_reg(u64 regbase, u32 off, u32 bit) 170 + { 171 + u32 reg_val; 172 + 173 + reg_val = nlm_read_sata_reg(regbase, off); 174 + nlm_write_sata_reg(regbase, off, (reg_val & ~bit)); 175 + } 176 + 177 + static void sata_set_glue_reg(u64 regbase, u32 off, u32 bit) 178 + { 179 + u32 reg_val; 180 + 181 + reg_val = nlm_read_sata_reg(regbase, off); 182 + nlm_write_sata_reg(regbase, off, (reg_val | bit)); 183 + } 184 + 185 + static void write_phy_reg(u64 regbase, u32 addr, u32 physel, u8 data) 186 + { 187 + nlm_write_sata_reg(regbase, PHY_MEM_ACCESS, 188 + (1u << 31) | (physel << 24) | (data << 16) | addr); 189 + udelay(850); 190 + } 191 + 192 + static u8 read_phy_reg(u64 regbase, u32 addr, u32 physel) 193 + { 194 + u32 val; 195 + 196 + nlm_write_sata_reg(regbase, PHY_MEM_ACCESS, 197 + (0 << 31) | (physel << 24) | (0 << 16) | addr); 198 + udelay(850); 199 + val = nlm_read_sata_reg(regbase, PHY_MEM_ACCESS); 200 + return (val >> 16) & 0xff; 201 + } 202 + 203 + static void config_sata_phy(u64 regbase) 204 + { 205 + u32 port, i, reg; 206 + 207 + for (port = 0; port < 2; port++) { 208 + for (i = 0, reg = RXCDRCALFOSC0; reg <= CALDUTY; reg++, i++) 209 + write_phy_reg(regbase, reg, port, sata_phy_config1[i]); 210 + 211 + for (i = 0, reg = RXDPIF; reg <= PPMDRIFTMAX_HI; reg++, i++) 212 + write_phy_reg(regbase, reg, port, sata_phy_config2[i]); 213 + } 214 + } 215 + 216 + static void check_phy_register(u64 regbase, u32 addr, u32 physel, u8 xdata) 217 + { 218 + u8 data; 219 + 220 + data = read_phy_reg(regbase, addr, physel); 221 + pr_info("PHY read addr = 0x%x physel = %d data = 0x%x %s\n", 222 + addr, physel, data, data == xdata ? "TRUE" : "FALSE"); 223 + } 224 + 225 + static void verify_sata_phy_config(u64 regbase) 226 + { 227 + u32 port, i, reg; 228 + 229 + for (port = 0; port < 2; port++) { 230 + for (i = 0, reg = RXCDRCALFOSC0; reg <= CALDUTY; reg++, i++) 231 + check_phy_register(regbase, reg, port, 232 + sata_phy_config1[i]); 233 + 234 + for (i = 0, reg = RXDPIF; reg <= PPMDRIFTMAX_HI; reg++, i++) 235 + check_phy_register(regbase, reg, port, 236 + sata_phy_config2[i]); 237 + } 238 + } 239 + 240 + static void nlm_sata_firmware_init(int node) 241 + { 242 + u32 reg_val; 243 + u64 regbase; 244 + int n; 245 + 246 + pr_info("Initializing XLP9XX On-chip AHCI...\n"); 247 + regbase = nlm_get_sata_regbase(node); 248 + 249 + /* Reset port0 */ 250 + sata_clear_glue_reg(regbase, SATA_CTL, P0_IRST_POR); 251 + sata_clear_glue_reg(regbase, SATA_CTL, P0_IRST_HARD_TXRX); 252 + sata_clear_glue_reg(regbase, SATA_CTL, P0_IRST_HARD_SYNTH); 253 + sata_clear_glue_reg(regbase, SATA_CTL, P0_IPDTXL); 254 + sata_clear_glue_reg(regbase, SATA_CTL, P0_IPDRXL); 255 + sata_clear_glue_reg(regbase, SATA_CTL, P0_IPDIPDMSYNTH); 256 + 257 + /* port1 */ 258 + sata_clear_glue_reg(regbase, SATA_CTL, P1_IRST_POR); 259 + sata_clear_glue_reg(regbase, SATA_CTL, P1_IRST_HARD_TXRX); 260 + sata_clear_glue_reg(regbase, SATA_CTL, P1_IRST_HARD_SYNTH); 261 + sata_clear_glue_reg(regbase, SATA_CTL, P1_IPDTXL); 262 + sata_clear_glue_reg(regbase, SATA_CTL, P1_IPDRXL); 263 + sata_clear_glue_reg(regbase, SATA_CTL, P1_IPDIPDMSYNTH); 264 + udelay(300); 265 + 266 + /* Set PHY */ 267 + sata_set_glue_reg(regbase, SATA_CTL, P0_IPDTXL); 268 + sata_set_glue_reg(regbase, SATA_CTL, P0_IPDRXL); 269 + sata_set_glue_reg(regbase, SATA_CTL, P0_IPDIPDMSYNTH); 270 + sata_set_glue_reg(regbase, SATA_CTL, P1_IPDTXL); 271 + sata_set_glue_reg(regbase, SATA_CTL, P1_IPDRXL); 272 + sata_set_glue_reg(regbase, SATA_CTL, P1_IPDIPDMSYNTH); 273 + 274 + udelay(1000); 275 + sata_set_glue_reg(regbase, SATA_CTL, P0_IRST_POR); 276 + udelay(1000); 277 + sata_set_glue_reg(regbase, SATA_CTL, P1_IRST_POR); 278 + udelay(1000); 279 + 280 + /* setup PHY */ 281 + config_sata_phy(regbase); 282 + if (sata_phy_debug) 283 + verify_sata_phy_config(regbase); 284 + 285 + udelay(1000); 286 + sata_set_glue_reg(regbase, SATA_CTL, P0_IRST_HARD_TXRX); 287 + sata_set_glue_reg(regbase, SATA_CTL, P0_IRST_HARD_SYNTH); 288 + sata_set_glue_reg(regbase, SATA_CTL, P1_IRST_HARD_TXRX); 289 + sata_set_glue_reg(regbase, SATA_CTL, P1_IRST_HARD_SYNTH); 290 + udelay(300); 291 + 292 + /* Override reset in serial PHY mode */ 293 + sata_set_glue_reg(regbase, CR_REG_TIMER, CR_TIME_SCALE); 294 + /* Set reset SATA */ 295 + sata_set_glue_reg(regbase, SATA_CTL, SATA_RST_N); 296 + sata_set_glue_reg(regbase, SATA_CTL, M_CSYSREQ); 297 + sata_set_glue_reg(regbase, SATA_CTL, S_CSYSREQ); 298 + 299 + pr_debug("Waiting for PHYs to come up.\n"); 300 + n = 10000; 301 + do { 302 + reg_val = nlm_read_sata_reg(regbase, SATA_STATUS); 303 + if ((reg_val & P1_PHY_READY) && (reg_val & P0_PHY_READY)) 304 + break; 305 + udelay(10); 306 + } while (--n > 0); 307 + 308 + if (reg_val & P0_PHY_READY) 309 + pr_info("PHY0 is up.\n"); 310 + else 311 + pr_info("PHY0 is down.\n"); 312 + if (reg_val & P1_PHY_READY) 313 + pr_info("PHY1 is up.\n"); 314 + else 315 + pr_info("PHY1 is down.\n"); 316 + 317 + pr_info("XLP AHCI Init Done.\n"); 318 + } 319 + 320 + static int __init nlm_ahci_init(void) 321 + { 322 + int node; 323 + 324 + if (!cpu_is_xlp9xx()) 325 + return 0; 326 + for (node = 0; node < NLM_NR_NODES; node++) 327 + if (nlm_node_present(node)) 328 + nlm_sata_firmware_init(node); 329 + return 0; 330 + } 331 + 332 + static void nlm_sata_intr_ack(struct irq_data *data) 333 + { 334 + u64 regbase; 335 + u32 val; 336 + int node; 337 + 338 + node = data->irq / NLM_IRQS_PER_NODE; 339 + regbase = nlm_get_sata_regbase(node); 340 + val = nlm_read_sata_reg(regbase, SATA_INT); 341 + sata_set_glue_reg(regbase, SATA_INT, val); 342 + } 343 + 344 + static void nlm_sata_fixup_bar(struct pci_dev *dev) 345 + { 346 + dev->resource[5] = dev->resource[0]; 347 + memset(&dev->resource[0], 0, sizeof(dev->resource[0])); 348 + } 349 + 350 + static void nlm_sata_fixup_final(struct pci_dev *dev) 351 + { 352 + u32 val; 353 + u64 regbase; 354 + int node; 355 + 356 + /* Find end bridge function to find node */ 357 + node = xlp_socdev_to_node(dev); 358 + regbase = nlm_get_sata_regbase(node); 359 + 360 + /* clear pending interrupts and then enable them */ 361 + val = nlm_read_sata_reg(regbase, SATA_INT); 362 + sata_set_glue_reg(regbase, SATA_INT, val); 363 + 364 + /* Enable only the core interrupt */ 365 + sata_set_glue_reg(regbase, SATA_INT_MASK, 0x1); 366 + 367 + dev->irq = nlm_irq_to_xirq(node, PIC_SATA_IRQ); 368 + nlm_set_pic_extra_ack(node, PIC_SATA_IRQ, nlm_sata_intr_ack); 369 + } 370 + 371 + arch_initcall(nlm_ahci_init); 372 + 373 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_XLP9XX_SATA, 374 + nlm_sata_fixup_bar); 375 + 376 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_XLP9XX_SATA, 377 + nlm_sata_fixup_final);
+209
arch/mips/netlogic/xlp/ahci-init.c
··· 1 + /* 2 + * Copyright (c) 2003-2014 Broadcom Corporation 3 + * All Rights Reserved 4 + * 5 + * This software is available to you under a choice of one of two 6 + * licenses. You may choose to be licensed under the terms of the GNU 7 + * General Public License (GPL) Version 2, available from the file 8 + * COPYING in the main directory of this source tree, or the Broadcom 9 + * license below: 10 + * 11 + * Redistribution and use in source and binary forms, with or without 12 + * modification, are permitted provided that the following conditions 13 + * are met: 14 + * 15 + * 1. Redistributions of source code must retain the above copyright 16 + * notice, this list of conditions and the following disclaimer. 17 + * 2. Redistributions in binary form must reproduce the above copyright 18 + * notice, this list of conditions and the following disclaimer in 19 + * the documentation and/or other materials provided with the 20 + * distribution. 21 + * 22 + * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR 23 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 24 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 + * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE 26 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 29 + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 30 + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 31 + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 32 + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 + */ 34 + 35 + #include <linux/dma-mapping.h> 36 + #include <linux/kernel.h> 37 + #include <linux/delay.h> 38 + #include <linux/init.h> 39 + #include <linux/pci.h> 40 + #include <linux/irq.h> 41 + #include <linux/bitops.h> 42 + 43 + #include <asm/cpu.h> 44 + #include <asm/mipsregs.h> 45 + 46 + #include <asm/netlogic/haldefs.h> 47 + #include <asm/netlogic/xlp-hal/xlp.h> 48 + #include <asm/netlogic/common.h> 49 + #include <asm/netlogic/xlp-hal/iomap.h> 50 + #include <asm/netlogic/mips-extns.h> 51 + 52 + #define SATA_CTL 0x0 53 + #define SATA_STATUS 0x1 /* Status Reg */ 54 + #define SATA_INT 0x2 /* Interrupt Reg */ 55 + #define SATA_INT_MASK 0x3 /* Interrupt Mask Reg */ 56 + #define SATA_CR_REG_TIMER 0x4 /* PHY Conrol Timer Reg */ 57 + #define SATA_CORE_ID 0x5 /* Core ID Reg */ 58 + #define SATA_AXI_SLAVE_OPT1 0x6 /* AXI Slave Options Reg */ 59 + #define SATA_PHY_LOS_LEV 0x7 /* PHY LOS Level Reg */ 60 + #define SATA_PHY_MULTI 0x8 /* PHY Multiplier Reg */ 61 + #define SATA_PHY_CLK_SEL 0x9 /* Clock Select Reg */ 62 + #define SATA_PHY_AMP1_GEN1 0xa /* PHY Transmit Amplitude Reg 1 */ 63 + #define SATA_PHY_AMP1_GEN2 0xb /* PHY Transmit Amplitude Reg 2 */ 64 + #define SATA_PHY_AMP1_GEN3 0xc /* PHY Transmit Amplitude Reg 3 */ 65 + #define SATA_PHY_PRE1 0xd /* PHY Transmit Preemphasis Reg 1 */ 66 + #define SATA_PHY_PRE2 0xe /* PHY Transmit Preemphasis Reg 2 */ 67 + #define SATA_PHY_PRE3 0xf /* PHY Transmit Preemphasis Reg 3 */ 68 + #define SATA_SPDMODE 0x10 /* Speed Mode Reg */ 69 + #define SATA_REFCLK 0x11 /* Reference Clock Control Reg */ 70 + #define SATA_BYTE_SWAP_DIS 0x12 /* byte swap disable */ 71 + 72 + /*SATA_CTL Bits */ 73 + #define SATA_RST_N BIT(0) 74 + #define PHY0_RESET_N BIT(16) 75 + #define PHY1_RESET_N BIT(17) 76 + #define PHY2_RESET_N BIT(18) 77 + #define PHY3_RESET_N BIT(19) 78 + #define M_CSYSREQ BIT(2) 79 + #define S_CSYSREQ BIT(3) 80 + 81 + /*SATA_STATUS Bits */ 82 + #define P0_PHY_READY BIT(4) 83 + #define P1_PHY_READY BIT(5) 84 + #define P2_PHY_READY BIT(6) 85 + #define P3_PHY_READY BIT(7) 86 + 87 + #define nlm_read_sata_reg(b, r) nlm_read_reg(b, r) 88 + #define nlm_write_sata_reg(b, r, v) nlm_write_reg(b, r, v) 89 + #define nlm_get_sata_pcibase(node) \ 90 + nlm_pcicfg_base(XLP_IO_SATA_OFFSET(node)) 91 + /* SATA device specific configuration registers are starts at 0x900 offset */ 92 + #define nlm_get_sata_regbase(node) \ 93 + (nlm_get_sata_pcibase(node) + 0x900) 94 + 95 + static void sata_clear_glue_reg(uint64_t regbase, uint32_t off, uint32_t bit) 96 + { 97 + uint32_t reg_val; 98 + 99 + reg_val = nlm_read_sata_reg(regbase, off); 100 + nlm_write_sata_reg(regbase, off, (reg_val & ~bit)); 101 + } 102 + 103 + static void sata_set_glue_reg(uint64_t regbase, uint32_t off, uint32_t bit) 104 + { 105 + uint32_t reg_val; 106 + 107 + reg_val = nlm_read_sata_reg(regbase, off); 108 + nlm_write_sata_reg(regbase, off, (reg_val | bit)); 109 + } 110 + 111 + static void nlm_sata_firmware_init(int node) 112 + { 113 + uint32_t reg_val; 114 + uint64_t regbase; 115 + int i; 116 + 117 + pr_info("XLP AHCI Initialization started.\n"); 118 + regbase = nlm_get_sata_regbase(node); 119 + 120 + /* Reset SATA */ 121 + sata_clear_glue_reg(regbase, SATA_CTL, SATA_RST_N); 122 + /* Reset PHY */ 123 + sata_clear_glue_reg(regbase, SATA_CTL, 124 + (PHY3_RESET_N | PHY2_RESET_N 125 + | PHY1_RESET_N | PHY0_RESET_N)); 126 + 127 + /* Set SATA */ 128 + sata_set_glue_reg(regbase, SATA_CTL, SATA_RST_N); 129 + /* Set PHY */ 130 + sata_set_glue_reg(regbase, SATA_CTL, 131 + (PHY3_RESET_N | PHY2_RESET_N 132 + | PHY1_RESET_N | PHY0_RESET_N)); 133 + 134 + pr_debug("Waiting for PHYs to come up.\n"); 135 + i = 0; 136 + do { 137 + reg_val = nlm_read_sata_reg(regbase, SATA_STATUS); 138 + i++; 139 + } while (((reg_val & 0xF0) != 0xF0) && (i < 10000)); 140 + 141 + for (i = 0; i < 4; i++) { 142 + if (reg_val & (P0_PHY_READY << i)) 143 + pr_info("PHY%d is up.\n", i); 144 + else 145 + pr_info("PHY%d is down.\n", i); 146 + } 147 + 148 + pr_info("XLP AHCI init done.\n"); 149 + } 150 + 151 + static int __init nlm_ahci_init(void) 152 + { 153 + int node = 0; 154 + int chip = read_c0_prid() & PRID_REV_MASK; 155 + 156 + if (chip == PRID_IMP_NETLOGIC_XLP3XX) 157 + nlm_sata_firmware_init(node); 158 + return 0; 159 + } 160 + 161 + static void nlm_sata_intr_ack(struct irq_data *data) 162 + { 163 + uint32_t val = 0; 164 + uint64_t regbase; 165 + 166 + regbase = nlm_get_sata_regbase(nlm_nodeid()); 167 + val = nlm_read_sata_reg(regbase, SATA_INT); 168 + sata_set_glue_reg(regbase, SATA_INT, val); 169 + } 170 + 171 + static void nlm_sata_fixup_bar(struct pci_dev *dev) 172 + { 173 + /* 174 + * The AHCI resource is in BAR 0, move it to 175 + * BAR 5, where it is expected 176 + */ 177 + dev->resource[5] = dev->resource[0]; 178 + memset(&dev->resource[0], 0, sizeof(dev->resource[0])); 179 + } 180 + 181 + static void nlm_sata_fixup_final(struct pci_dev *dev) 182 + { 183 + uint32_t val; 184 + uint64_t regbase; 185 + int node = 0; /* XLP3XX does not support multi-node */ 186 + 187 + regbase = nlm_get_sata_regbase(node); 188 + 189 + /* clear pending interrupts and then enable them */ 190 + val = nlm_read_sata_reg(regbase, SATA_INT); 191 + sata_set_glue_reg(regbase, SATA_INT, val); 192 + 193 + /* Mask the core interrupt. If all the interrupts 194 + * are enabled there are spurious interrupt flow 195 + * happening, to avoid only enable core interrupt 196 + * mask. 197 + */ 198 + sata_set_glue_reg(regbase, SATA_INT_MASK, 0x1); 199 + 200 + dev->irq = PIC_SATA_IRQ; 201 + nlm_set_pic_extra_ack(node, PIC_SATA_IRQ, nlm_sata_intr_ack); 202 + } 203 + 204 + arch_initcall(nlm_ahci_init); 205 + 206 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_NETLOGIC, PCI_DEVICE_ID_NLM_SATA, 207 + nlm_sata_fixup_bar); 208 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_NETLOGIC, PCI_DEVICE_ID_NLM_SATA, 209 + nlm_sata_fixup_final);
+2 -1
arch/mips/netlogic/xlp/dt.c
··· 48 48 void __init *xlp_dt_init(void *fdtp) 49 49 { 50 50 if (!fdtp) { 51 - switch (current_cpu_data.processor_id & 0xff00) { 51 + switch (current_cpu_data.processor_id & PRID_IMP_MASK) { 52 52 #ifdef CONFIG_DT_XLP_GVP 53 53 case PRID_IMP_NETLOGIC_XLP9XX: 54 + case PRID_IMP_NETLOGIC_XLP5XX: 54 55 fdtp = __dtb_xlp_gvp_begin; 55 56 break; 56 57 #endif
+204 -79
arch/mips/netlogic/xlp/nlm_hal.c
··· 54 54 struct nlm_soc_info *nodep; 55 55 56 56 nodep = nlm_get_node(node); 57 + if (node == 0) 58 + nodep->coremask = 1; /* node 0, boot cpu */ 57 59 nodep->sysbase = nlm_get_sys_regbase(node); 58 60 nodep->picbase = nlm_get_pic_regbase(node); 59 61 nodep->ebase = read_c0_ebase() & (~((1 << 12) - 1)); ··· 66 64 spin_lock_init(&nodep->piclock); 67 65 } 68 66 69 - int nlm_irq_to_irt(int irq) 67 + static int xlp9xx_irq_to_irt(int irq) 68 + { 69 + switch (irq) { 70 + case PIC_GPIO_IRQ: 71 + return 12; 72 + case PIC_9XX_XHCI_0_IRQ: 73 + return 114; 74 + case PIC_9XX_XHCI_1_IRQ: 75 + return 115; 76 + case PIC_UART_0_IRQ: 77 + return 133; 78 + case PIC_UART_1_IRQ: 79 + return 134; 80 + case PIC_SATA_IRQ: 81 + return 143; 82 + case PIC_SPI_IRQ: 83 + return 152; 84 + case PIC_MMC_IRQ: 85 + return 153; 86 + case PIC_PCIE_LINK_LEGACY_IRQ(0): 87 + case PIC_PCIE_LINK_LEGACY_IRQ(1): 88 + case PIC_PCIE_LINK_LEGACY_IRQ(2): 89 + case PIC_PCIE_LINK_LEGACY_IRQ(3): 90 + return 191 + irq - PIC_PCIE_LINK_LEGACY_IRQ_BASE; 91 + } 92 + return -1; 93 + } 94 + 95 + static int xlp_irq_to_irt(int irq) 70 96 { 71 97 uint64_t pcibase; 72 98 int devoff, irt; 73 - 74 - /* bypass for 9xx */ 75 - if (cpu_is_xlp9xx()) { 76 - switch (irq) { 77 - case PIC_9XX_XHCI_0_IRQ: 78 - return 114; 79 - case PIC_9XX_XHCI_1_IRQ: 80 - return 115; 81 - case PIC_UART_0_IRQ: 82 - return 133; 83 - case PIC_UART_1_IRQ: 84 - return 134; 85 - case PIC_PCIE_LINK_LEGACY_IRQ(0): 86 - case PIC_PCIE_LINK_LEGACY_IRQ(1): 87 - case PIC_PCIE_LINK_LEGACY_IRQ(2): 88 - case PIC_PCIE_LINK_LEGACY_IRQ(3): 89 - return 191 + irq - PIC_PCIE_LINK_LEGACY_IRQ_BASE; 90 - } 91 - return -1; 92 - } 93 99 94 100 devoff = 0; 95 101 switch (irq) { ··· 108 98 devoff = XLP_IO_UART1_OFFSET(0); 109 99 break; 110 100 case PIC_MMC_IRQ: 111 - devoff = XLP_IO_SD_OFFSET(0); 101 + devoff = XLP_IO_MMC_OFFSET(0); 112 102 break; 113 103 case PIC_I2C_0_IRQ: /* I2C will be fixed up */ 114 104 case PIC_I2C_1_IRQ: ··· 118 108 devoff = XLP2XX_IO_I2C_OFFSET(0); 119 109 else 120 110 devoff = XLP_IO_I2C0_OFFSET(0); 111 + break; 112 + case PIC_SATA_IRQ: 113 + devoff = XLP_IO_SATA_OFFSET(0); 114 + break; 115 + case PIC_GPIO_IRQ: 116 + devoff = XLP_IO_GPIO_OFFSET(0); 117 + break; 118 + case PIC_NAND_IRQ: 119 + devoff = XLP_IO_NAND_OFFSET(0); 120 + break; 121 + case PIC_SPI_IRQ: 122 + devoff = XLP_IO_SPI_OFFSET(0); 121 123 break; 122 124 default: 123 125 if (cpu_is_xlpii()) { ··· 186 164 /* HW bug, PCI IRT entries are bad on early silicon, fix */ 187 165 irt = PIC_IRT_PCIE_LINK_INDEX(irq - 188 166 PIC_PCIE_LINK_LEGACY_IRQ_BASE); 189 - } else if (irq >= PIC_PCIE_LINK_MSI_IRQ(0) && 190 - irq <= PIC_PCIE_LINK_MSI_IRQ(3)) { 191 - irt = -2; 192 - } else if (irq >= PIC_PCIE_MSIX_IRQ(0) && 193 - irq <= PIC_PCIE_MSIX_IRQ(3)) { 194 - irt = -2; 195 167 } else { 196 168 irt = -1; 197 169 } 198 170 return irt; 199 171 } 200 172 201 - unsigned int nlm_get_core_frequency(int node, int core) 173 + int nlm_irq_to_irt(int irq) 174 + { 175 + /* return -2 for irqs without 1-1 mapping */ 176 + if (irq >= PIC_PCIE_LINK_MSI_IRQ(0) && irq <= PIC_PCIE_LINK_MSI_IRQ(3)) 177 + return -2; 178 + if (irq >= PIC_PCIE_MSIX_IRQ(0) && irq <= PIC_PCIE_MSIX_IRQ(3)) 179 + return -2; 180 + 181 + if (cpu_is_xlp9xx()) 182 + return xlp9xx_irq_to_irt(irq); 183 + else 184 + return xlp_irq_to_irt(irq); 185 + } 186 + 187 + static unsigned int nlm_xlp2_get_core_frequency(int node, int core) 188 + { 189 + unsigned int pll_post_div, ctrl_val0, ctrl_val1, denom; 190 + uint64_t num, sysbase, clockbase; 191 + 192 + if (cpu_is_xlp9xx()) { 193 + clockbase = nlm_get_clock_regbase(node); 194 + ctrl_val0 = nlm_read_sys_reg(clockbase, 195 + SYS_9XX_CPU_PLL_CTRL0(core)); 196 + ctrl_val1 = nlm_read_sys_reg(clockbase, 197 + SYS_9XX_CPU_PLL_CTRL1(core)); 198 + } else { 199 + sysbase = nlm_get_node(node)->sysbase; 200 + ctrl_val0 = nlm_read_sys_reg(sysbase, 201 + SYS_CPU_PLL_CTRL0(core)); 202 + ctrl_val1 = nlm_read_sys_reg(sysbase, 203 + SYS_CPU_PLL_CTRL1(core)); 204 + } 205 + 206 + /* Find PLL post divider value */ 207 + switch ((ctrl_val0 >> 24) & 0x7) { 208 + case 1: 209 + pll_post_div = 2; 210 + break; 211 + case 3: 212 + pll_post_div = 4; 213 + break; 214 + case 7: 215 + pll_post_div = 8; 216 + break; 217 + case 6: 218 + pll_post_div = 16; 219 + break; 220 + case 0: 221 + default: 222 + pll_post_div = 1; 223 + break; 224 + } 225 + 226 + num = 1000000ULL * (400 * 3 + 100 * (ctrl_val1 & 0x3f)); 227 + denom = 3 * pll_post_div; 228 + do_div(num, denom); 229 + 230 + return (unsigned int)num; 231 + } 232 + 233 + static unsigned int nlm_xlp_get_core_frequency(int node, int core) 202 234 { 203 235 unsigned int pll_divf, pll_divr, dfs_div, ext_div; 204 236 unsigned int rstval, dfsval, denom; 205 237 uint64_t num, sysbase; 206 238 207 239 sysbase = nlm_get_node(node)->sysbase; 208 - if (cpu_is_xlp9xx()) 209 - rstval = nlm_read_sys_reg(sysbase, SYS_9XX_POWER_ON_RESET_CFG); 210 - else 211 - rstval = nlm_read_sys_reg(sysbase, SYS_POWER_ON_RESET_CFG); 212 - if (cpu_is_xlpii()) { 213 - num = 1000000ULL * (400 * 3 + 100 * (rstval >> 26)); 214 - denom = 3; 215 - } else { 216 - dfsval = nlm_read_sys_reg(sysbase, SYS_CORE_DFS_DIV_VALUE); 217 - pll_divf = ((rstval >> 10) & 0x7f) + 1; 218 - pll_divr = ((rstval >> 8) & 0x3) + 1; 219 - ext_div = ((rstval >> 30) & 0x3) + 1; 220 - dfs_div = ((dfsval >> (core * 4)) & 0xf) + 1; 240 + rstval = nlm_read_sys_reg(sysbase, SYS_POWER_ON_RESET_CFG); 241 + dfsval = nlm_read_sys_reg(sysbase, SYS_CORE_DFS_DIV_VALUE); 242 + pll_divf = ((rstval >> 10) & 0x7f) + 1; 243 + pll_divr = ((rstval >> 8) & 0x3) + 1; 244 + ext_div = ((rstval >> 30) & 0x3) + 1; 245 + dfs_div = ((dfsval >> (core * 4)) & 0xf) + 1; 221 246 222 - num = 800000000ULL * pll_divf; 223 - denom = 3 * pll_divr * ext_div * dfs_div; 224 - } 247 + num = 800000000ULL * pll_divf; 248 + denom = 3 * pll_divr * ext_div * dfs_div; 225 249 do_div(num, denom); 250 + 226 251 return (unsigned int)num; 227 252 } 228 253 229 - /* Calculate Frequency to the PIC from PLL. 230 - * freq_out = ( ref_freq/2 * (6 + ctrl2[7:0]) + ctrl2[20:8]/2^13 ) / 231 - * ((2^ctrl0[7:5]) * Table(ctrl0[26:24])) 232 - */ 233 - static unsigned int nlm_2xx_get_pic_frequency(int node) 254 + unsigned int nlm_get_core_frequency(int node, int core) 234 255 { 235 - u32 ctrl_val0, ctrl_val2, vco_post_div, pll_post_div; 256 + if (cpu_is_xlpii()) 257 + return nlm_xlp2_get_core_frequency(node, core); 258 + else 259 + return nlm_xlp_get_core_frequency(node, core); 260 + } 261 + 262 + /* 263 + * Calculate PIC frequency from PLL registers. 264 + * freq_out = (ref_freq/2 * (6 + ctrl2[7:0]) + ctrl2[20:8]/2^13) / 265 + * ((2^ctrl0[7:5]) * Table(ctrl0[26:24])) 266 + */ 267 + static unsigned int nlm_xlp2_get_pic_frequency(int node) 268 + { 269 + u32 ctrl_val0, ctrl_val2, vco_post_div, pll_post_div, cpu_xlp9xx; 236 270 u32 mdiv, fdiv, pll_out_freq_den, reg_select, ref_div, pic_div; 237 - u64 ref_clk, sysbase, pll_out_freq_num, ref_clk_select; 271 + u64 sysbase, pll_out_freq_num, ref_clk_select, clockbase, ref_clk; 238 272 239 273 sysbase = nlm_get_node(node)->sysbase; 274 + clockbase = nlm_get_clock_regbase(node); 275 + cpu_xlp9xx = cpu_is_xlp9xx(); 240 276 241 277 /* Find ref_clk_base */ 242 - ref_clk_select = 243 - (nlm_read_sys_reg(sysbase, SYS_POWER_ON_RESET_CFG) >> 18) & 0x3; 278 + if (cpu_xlp9xx) 279 + ref_clk_select = (nlm_read_sys_reg(sysbase, 280 + SYS_9XX_POWER_ON_RESET_CFG) >> 18) & 0x3; 281 + else 282 + ref_clk_select = (nlm_read_sys_reg(sysbase, 283 + SYS_POWER_ON_RESET_CFG) >> 18) & 0x3; 244 284 switch (ref_clk_select) { 245 285 case 0: 246 286 ref_clk = 200000000ULL; ··· 323 239 } 324 240 325 241 /* Find the clock source PLL device for PIC */ 326 - reg_select = (nlm_read_sys_reg(sysbase, SYS_CLK_DEV_SEL) >> 22) & 0x3; 327 - switch (reg_select) { 328 - case 0: 329 - ctrl_val0 = nlm_read_sys_reg(sysbase, SYS_PLL_CTRL0); 330 - ctrl_val2 = nlm_read_sys_reg(sysbase, SYS_PLL_CTRL2); 331 - break; 332 - case 1: 333 - ctrl_val0 = nlm_read_sys_reg(sysbase, SYS_PLL_CTRL0_DEVX(0)); 334 - ctrl_val2 = nlm_read_sys_reg(sysbase, SYS_PLL_CTRL2_DEVX(0)); 335 - break; 336 - case 2: 337 - ctrl_val0 = nlm_read_sys_reg(sysbase, SYS_PLL_CTRL0_DEVX(1)); 338 - ctrl_val2 = nlm_read_sys_reg(sysbase, SYS_PLL_CTRL2_DEVX(1)); 339 - break; 340 - case 3: 341 - ctrl_val0 = nlm_read_sys_reg(sysbase, SYS_PLL_CTRL0_DEVX(2)); 342 - ctrl_val2 = nlm_read_sys_reg(sysbase, SYS_PLL_CTRL2_DEVX(2)); 343 - break; 242 + if (cpu_xlp9xx) { 243 + reg_select = nlm_read_sys_reg(clockbase, 244 + SYS_9XX_CLK_DEV_SEL) & 0x3; 245 + switch (reg_select) { 246 + case 0: 247 + ctrl_val0 = nlm_read_sys_reg(clockbase, 248 + SYS_9XX_PLL_CTRL0); 249 + ctrl_val2 = nlm_read_sys_reg(clockbase, 250 + SYS_9XX_PLL_CTRL2); 251 + break; 252 + case 1: 253 + ctrl_val0 = nlm_read_sys_reg(clockbase, 254 + SYS_9XX_PLL_CTRL0_DEVX(0)); 255 + ctrl_val2 = nlm_read_sys_reg(clockbase, 256 + SYS_9XX_PLL_CTRL2_DEVX(0)); 257 + break; 258 + case 2: 259 + ctrl_val0 = nlm_read_sys_reg(clockbase, 260 + SYS_9XX_PLL_CTRL0_DEVX(1)); 261 + ctrl_val2 = nlm_read_sys_reg(clockbase, 262 + SYS_9XX_PLL_CTRL2_DEVX(1)); 263 + break; 264 + case 3: 265 + ctrl_val0 = nlm_read_sys_reg(clockbase, 266 + SYS_9XX_PLL_CTRL0_DEVX(2)); 267 + ctrl_val2 = nlm_read_sys_reg(clockbase, 268 + SYS_9XX_PLL_CTRL2_DEVX(2)); 269 + break; 270 + } 271 + } else { 272 + reg_select = (nlm_read_sys_reg(sysbase, 273 + SYS_CLK_DEV_SEL) >> 22) & 0x3; 274 + switch (reg_select) { 275 + case 0: 276 + ctrl_val0 = nlm_read_sys_reg(sysbase, 277 + SYS_PLL_CTRL0); 278 + ctrl_val2 = nlm_read_sys_reg(sysbase, 279 + SYS_PLL_CTRL2); 280 + break; 281 + case 1: 282 + ctrl_val0 = nlm_read_sys_reg(sysbase, 283 + SYS_PLL_CTRL0_DEVX(0)); 284 + ctrl_val2 = nlm_read_sys_reg(sysbase, 285 + SYS_PLL_CTRL2_DEVX(0)); 286 + break; 287 + case 2: 288 + ctrl_val0 = nlm_read_sys_reg(sysbase, 289 + SYS_PLL_CTRL0_DEVX(1)); 290 + ctrl_val2 = nlm_read_sys_reg(sysbase, 291 + SYS_PLL_CTRL2_DEVX(1)); 292 + break; 293 + case 3: 294 + ctrl_val0 = nlm_read_sys_reg(sysbase, 295 + SYS_PLL_CTRL0_DEVX(2)); 296 + ctrl_val2 = nlm_read_sys_reg(sysbase, 297 + SYS_PLL_CTRL2_DEVX(2)); 298 + break; 299 + } 344 300 } 345 301 346 302 vco_post_div = (ctrl_val0 >> 5) & 0x7; 347 303 pll_post_div = (ctrl_val0 >> 24) & 0x7; 348 304 mdiv = ctrl_val2 & 0xff; 349 - fdiv = (ctrl_val2 >> 8) & 0xfff; 305 + fdiv = (ctrl_val2 >> 8) & 0x1fff; 350 306 351 307 /* Find PLL post divider value */ 352 308 switch (pll_post_div) { ··· 416 292 do_div(pll_out_freq_num, pll_out_freq_den); 417 293 418 294 /* PIC post divider, which happens after PLL */ 419 - pic_div = (nlm_read_sys_reg(sysbase, SYS_CLK_DEV_DIV) >> 22) & 0x3; 295 + if (cpu_xlp9xx) 296 + pic_div = nlm_read_sys_reg(clockbase, 297 + SYS_9XX_CLK_DEV_DIV) & 0x3; 298 + else 299 + pic_div = (nlm_read_sys_reg(sysbase, 300 + SYS_CLK_DEV_DIV) >> 22) & 0x3; 420 301 do_div(pll_out_freq_num, 1 << pic_div); 421 302 422 303 return pll_out_freq_num; ··· 429 300 430 301 unsigned int nlm_get_pic_frequency(int node) 431 302 { 432 - /* TODO Has to calculate freq as like 2xx */ 433 - if (cpu_is_xlp9xx()) 434 - return 250000000; 435 - 436 303 if (cpu_is_xlpii()) 437 - return nlm_2xx_get_pic_frequency(node); 304 + return nlm_xlp2_get_pic_frequency(node); 438 305 else 439 306 return 133333333; 440 307 }
+2 -1
arch/mips/netlogic/xlp/setup.c
··· 121 121 122 122 const char *get_system_type(void) 123 123 { 124 - switch (read_c0_prid() & 0xff00) { 124 + switch (read_c0_prid() & PRID_IMP_MASK) { 125 125 case PRID_IMP_NETLOGIC_XLP9XX: 126 + case PRID_IMP_NETLOGIC_XLP5XX: 126 127 case PRID_IMP_NETLOGIC_XLP2XX: 127 128 return "Broadcom XLPII Series"; 128 129 default:
+10 -6
arch/mips/netlogic/xlp/wakeup.c
··· 135 135 if (cpu_is_xlp9xx()) { 136 136 fusebase = nlm_get_fuse_regbase(n); 137 137 fusemask = nlm_read_reg(fusebase, FUSE_9XX_DEVCFG6); 138 - mask = 0xfffff; 138 + switch (read_c0_prid() & PRID_IMP_MASK) { 139 + case PRID_IMP_NETLOGIC_XLP5XX: 140 + mask = 0xff; 141 + break; 142 + case PRID_IMP_NETLOGIC_XLP9XX: 143 + default: 144 + mask = 0xfffff; 145 + break; 146 + } 139 147 } else { 140 148 fusemask = nlm_read_sys_reg(nodep->sysbase, 141 149 SYS_EFUSE_DEVICE_CFG_STATUS0); 142 - switch (read_c0_prid() & 0xff00) { 150 + switch (read_c0_prid() & PRID_IMP_MASK) { 143 151 case PRID_IMP_NETLOGIC_XLP3XX: 144 152 mask = 0xf; 145 153 break; ··· 166 158 * cores are renumbered to range 0 .. nactive-1 167 159 */ 168 160 syscoremask = (1 << hweight32(~fusemask & mask)) - 1; 169 - 170 - /* The boot cpu */ 171 - if (n == 0) 172 - nodep->coremask = 1; 173 161 174 162 pr_info("Node %d - SYS/FUSE coremask %x\n", n, syscoremask); 175 163 for (core = 0; core < nlm_cores_per_node(); core++) {
+6
arch/mips/paravirt/Kconfig
··· 1 + if MIPS_PARAVIRT 2 + 3 + config MIPS_PCI_VIRTIO 4 + def_bool y 5 + 6 + endif # MIPS_PARAVIRT
+14
arch/mips/paravirt/Makefile
··· 1 + # 2 + # Makefile for MIPS para-virtualized specific kernel interface routines 3 + # under Linux. 4 + # 5 + # This file is subject to the terms and conditions of the GNU General Public 6 + # License. See the file "COPYING" in the main directory of this archive 7 + # for more details. 8 + # 9 + # Copyright (C) 2013 Cavium, Inc. 10 + # 11 + 12 + obj-y := setup.o serial.o paravirt-irq.o 13 + 14 + obj-$(CONFIG_SMP) += paravirt-smp.o
+8
arch/mips/paravirt/Platform
··· 1 + # 2 + # Generic para-virtualized guest. 3 + # 4 + platform-$(CONFIG_MIPS_PARAVIRT) += paravirt/ 5 + cflags-$(CONFIG_MIPS_PARAVIRT) += \ 6 + -I$(srctree)/arch/mips/include/asm/mach-paravirt 7 + 8 + load-$(CONFIG_MIPS_PARAVIRT) = 0xffffffff80010000
+368
arch/mips/paravirt/paravirt-irq.c
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (C) 2013 Cavium, Inc. 7 + */ 8 + 9 + #include <linux/interrupt.h> 10 + #include <linux/cpumask.h> 11 + #include <linux/kernel.h> 12 + #include <linux/mutex.h> 13 + 14 + #include <asm/io.h> 15 + 16 + #define MBOX_BITS_PER_CPU 2 17 + 18 + static int cpunum_for_cpu(int cpu) 19 + { 20 + #ifdef CONFIG_SMP 21 + return cpu_logical_map(cpu); 22 + #else 23 + return get_ebase_cpunum(); 24 + #endif 25 + } 26 + 27 + struct core_chip_data { 28 + struct mutex core_irq_mutex; 29 + bool current_en; 30 + bool desired_en; 31 + u8 bit; 32 + }; 33 + 34 + static struct core_chip_data irq_core_chip_data[8]; 35 + 36 + static void irq_core_ack(struct irq_data *data) 37 + { 38 + struct core_chip_data *cd = irq_data_get_irq_chip_data(data); 39 + unsigned int bit = cd->bit; 40 + 41 + /* 42 + * We don't need to disable IRQs to make these atomic since 43 + * they are already disabled earlier in the low level 44 + * interrupt code. 45 + */ 46 + clear_c0_status(0x100 << bit); 47 + /* The two user interrupts must be cleared manually. */ 48 + if (bit < 2) 49 + clear_c0_cause(0x100 << bit); 50 + } 51 + 52 + static void irq_core_eoi(struct irq_data *data) 53 + { 54 + struct core_chip_data *cd = irq_data_get_irq_chip_data(data); 55 + 56 + /* 57 + * We don't need to disable IRQs to make these atomic since 58 + * they are already disabled earlier in the low level 59 + * interrupt code. 60 + */ 61 + set_c0_status(0x100 << cd->bit); 62 + } 63 + 64 + static void irq_core_set_enable_local(void *arg) 65 + { 66 + struct irq_data *data = arg; 67 + struct core_chip_data *cd = irq_data_get_irq_chip_data(data); 68 + unsigned int mask = 0x100 << cd->bit; 69 + 70 + /* 71 + * Interrupts are already disabled, so these are atomic. 72 + */ 73 + if (cd->desired_en) 74 + set_c0_status(mask); 75 + else 76 + clear_c0_status(mask); 77 + 78 + } 79 + 80 + static void irq_core_disable(struct irq_data *data) 81 + { 82 + struct core_chip_data *cd = irq_data_get_irq_chip_data(data); 83 + cd->desired_en = false; 84 + } 85 + 86 + static void irq_core_enable(struct irq_data *data) 87 + { 88 + struct core_chip_data *cd = irq_data_get_irq_chip_data(data); 89 + cd->desired_en = true; 90 + } 91 + 92 + static void irq_core_bus_lock(struct irq_data *data) 93 + { 94 + struct core_chip_data *cd = irq_data_get_irq_chip_data(data); 95 + 96 + mutex_lock(&cd->core_irq_mutex); 97 + } 98 + 99 + static void irq_core_bus_sync_unlock(struct irq_data *data) 100 + { 101 + struct core_chip_data *cd = irq_data_get_irq_chip_data(data); 102 + 103 + if (cd->desired_en != cd->current_en) { 104 + on_each_cpu(irq_core_set_enable_local, data, 1); 105 + cd->current_en = cd->desired_en; 106 + } 107 + 108 + mutex_unlock(&cd->core_irq_mutex); 109 + } 110 + 111 + static struct irq_chip irq_chip_core = { 112 + .name = "Core", 113 + .irq_enable = irq_core_enable, 114 + .irq_disable = irq_core_disable, 115 + .irq_ack = irq_core_ack, 116 + .irq_eoi = irq_core_eoi, 117 + .irq_bus_lock = irq_core_bus_lock, 118 + .irq_bus_sync_unlock = irq_core_bus_sync_unlock, 119 + 120 + .irq_cpu_online = irq_core_eoi, 121 + .irq_cpu_offline = irq_core_ack, 122 + .flags = IRQCHIP_ONOFFLINE_ENABLED, 123 + }; 124 + 125 + static void __init irq_init_core(void) 126 + { 127 + int i; 128 + int irq; 129 + struct core_chip_data *cd; 130 + 131 + /* Start with a clean slate */ 132 + clear_c0_status(ST0_IM); 133 + clear_c0_cause(CAUSEF_IP0 | CAUSEF_IP1); 134 + 135 + for (i = 0; i < ARRAY_SIZE(irq_core_chip_data); i++) { 136 + cd = irq_core_chip_data + i; 137 + cd->current_en = false; 138 + cd->desired_en = false; 139 + cd->bit = i; 140 + mutex_init(&cd->core_irq_mutex); 141 + 142 + irq = MIPS_CPU_IRQ_BASE + i; 143 + 144 + switch (i) { 145 + case 0: /* SW0 */ 146 + case 1: /* SW1 */ 147 + case 5: /* IP5 */ 148 + case 6: /* IP6 */ 149 + case 7: /* IP7 */ 150 + irq_set_chip_data(irq, cd); 151 + irq_set_chip_and_handler(irq, &irq_chip_core, 152 + handle_percpu_irq); 153 + break; 154 + default: 155 + break; 156 + } 157 + } 158 + } 159 + 160 + static void __iomem *mips_irq_chip; 161 + #define MIPS_IRQ_CHIP_NUM_BITS 0 162 + #define MIPS_IRQ_CHIP_REGS 8 163 + 164 + static int mips_irq_cpu_stride; 165 + static int mips_irq_chip_reg_raw; 166 + static int mips_irq_chip_reg_src; 167 + static int mips_irq_chip_reg_en; 168 + static int mips_irq_chip_reg_raw_w1s; 169 + static int mips_irq_chip_reg_raw_w1c; 170 + static int mips_irq_chip_reg_en_w1s; 171 + static int mips_irq_chip_reg_en_w1c; 172 + 173 + static void irq_pci_enable(struct irq_data *data) 174 + { 175 + u32 mask = 1u << data->irq; 176 + 177 + __raw_writel(mask, mips_irq_chip + mips_irq_chip_reg_en_w1s); 178 + } 179 + 180 + static void irq_pci_disable(struct irq_data *data) 181 + { 182 + u32 mask = 1u << data->irq; 183 + 184 + __raw_writel(mask, mips_irq_chip + mips_irq_chip_reg_en_w1c); 185 + } 186 + 187 + static void irq_pci_ack(struct irq_data *data) 188 + { 189 + } 190 + 191 + static void irq_pci_mask(struct irq_data *data) 192 + { 193 + u32 mask = 1u << data->irq; 194 + 195 + __raw_writel(mask, mips_irq_chip + mips_irq_chip_reg_en_w1c); 196 + } 197 + 198 + static void irq_pci_unmask(struct irq_data *data) 199 + { 200 + u32 mask = 1u << data->irq; 201 + 202 + __raw_writel(mask, mips_irq_chip + mips_irq_chip_reg_en_w1s); 203 + } 204 + 205 + static struct irq_chip irq_chip_pci = { 206 + .name = "PCI", 207 + .irq_enable = irq_pci_enable, 208 + .irq_disable = irq_pci_disable, 209 + .irq_ack = irq_pci_ack, 210 + .irq_mask = irq_pci_mask, 211 + .irq_unmask = irq_pci_unmask, 212 + }; 213 + 214 + static void irq_mbox_all(struct irq_data *data, void __iomem *base) 215 + { 216 + int cpu; 217 + unsigned int mbox = data->irq - MIPS_IRQ_MBOX0; 218 + u32 mask; 219 + 220 + WARN_ON(mbox >= MBOX_BITS_PER_CPU); 221 + 222 + for_each_online_cpu(cpu) { 223 + unsigned int cpuid = cpunum_for_cpu(cpu); 224 + mask = 1 << (cpuid * MBOX_BITS_PER_CPU + mbox); 225 + __raw_writel(mask, base + (cpuid * mips_irq_cpu_stride)); 226 + } 227 + } 228 + 229 + static void irq_mbox_enable(struct irq_data *data) 230 + { 231 + irq_mbox_all(data, mips_irq_chip + mips_irq_chip_reg_en_w1s + sizeof(u32)); 232 + } 233 + 234 + static void irq_mbox_disable(struct irq_data *data) 235 + { 236 + irq_mbox_all(data, mips_irq_chip + mips_irq_chip_reg_en_w1c + sizeof(u32)); 237 + } 238 + 239 + static void irq_mbox_ack(struct irq_data *data) 240 + { 241 + u32 mask; 242 + unsigned int mbox = data->irq - MIPS_IRQ_MBOX0; 243 + 244 + WARN_ON(mbox >= MBOX_BITS_PER_CPU); 245 + 246 + mask = 1 << (get_ebase_cpunum() * MBOX_BITS_PER_CPU + mbox); 247 + __raw_writel(mask, mips_irq_chip + mips_irq_chip_reg_raw_w1c + sizeof(u32)); 248 + } 249 + 250 + void irq_mbox_ipi(int cpu, unsigned int actions) 251 + { 252 + unsigned int cpuid = cpunum_for_cpu(cpu); 253 + u32 mask; 254 + 255 + WARN_ON(actions >= (1 << MBOX_BITS_PER_CPU)); 256 + 257 + mask = actions << (cpuid * MBOX_BITS_PER_CPU); 258 + __raw_writel(mask, mips_irq_chip + mips_irq_chip_reg_raw_w1s + sizeof(u32)); 259 + } 260 + 261 + static void irq_mbox_cpu_onoffline(struct irq_data *data, void __iomem *base) 262 + { 263 + unsigned int mbox = data->irq - MIPS_IRQ_MBOX0; 264 + unsigned int cpuid = get_ebase_cpunum(); 265 + u32 mask; 266 + 267 + WARN_ON(mbox >= MBOX_BITS_PER_CPU); 268 + 269 + mask = 1 << (cpuid * MBOX_BITS_PER_CPU + mbox); 270 + __raw_writel(mask, base + (cpuid * mips_irq_cpu_stride)); 271 + 272 + } 273 + 274 + static void irq_mbox_cpu_online(struct irq_data *data) 275 + { 276 + irq_mbox_cpu_onoffline(data, mips_irq_chip + mips_irq_chip_reg_en_w1s + sizeof(u32)); 277 + } 278 + 279 + static void irq_mbox_cpu_offline(struct irq_data *data) 280 + { 281 + irq_mbox_cpu_onoffline(data, mips_irq_chip + mips_irq_chip_reg_en_w1c + sizeof(u32)); 282 + } 283 + 284 + static struct irq_chip irq_chip_mbox = { 285 + .name = "MBOX", 286 + .irq_enable = irq_mbox_enable, 287 + .irq_disable = irq_mbox_disable, 288 + .irq_ack = irq_mbox_ack, 289 + .irq_cpu_online = irq_mbox_cpu_online, 290 + .irq_cpu_offline = irq_mbox_cpu_offline, 291 + .flags = IRQCHIP_ONOFFLINE_ENABLED, 292 + }; 293 + 294 + static void __init irq_pci_init(void) 295 + { 296 + int i, stride; 297 + u32 num_bits; 298 + 299 + mips_irq_chip = ioremap(0x1e010000, 4096); 300 + 301 + num_bits = __raw_readl(mips_irq_chip + MIPS_IRQ_CHIP_NUM_BITS); 302 + stride = 8 * (1 + ((num_bits - 1) / 64)); 303 + 304 + 305 + pr_notice("mips_irq_chip: %u bits, reg stride: %d\n", num_bits, stride); 306 + mips_irq_chip_reg_raw = MIPS_IRQ_CHIP_REGS + 0 * stride; 307 + mips_irq_chip_reg_raw_w1s = MIPS_IRQ_CHIP_REGS + 1 * stride; 308 + mips_irq_chip_reg_raw_w1c = MIPS_IRQ_CHIP_REGS + 2 * stride; 309 + mips_irq_chip_reg_src = MIPS_IRQ_CHIP_REGS + 3 * stride; 310 + mips_irq_chip_reg_en = MIPS_IRQ_CHIP_REGS + 4 * stride; 311 + mips_irq_chip_reg_en_w1s = MIPS_IRQ_CHIP_REGS + 5 * stride; 312 + mips_irq_chip_reg_en_w1c = MIPS_IRQ_CHIP_REGS + 6 * stride; 313 + mips_irq_cpu_stride = stride * 4; 314 + 315 + for (i = 0; i < 4; i++) 316 + irq_set_chip_and_handler(i + MIPS_IRQ_PCIA, &irq_chip_pci, handle_level_irq); 317 + 318 + for (i = 0; i < 2; i++) 319 + irq_set_chip_and_handler(i + MIPS_IRQ_MBOX0, &irq_chip_mbox, handle_percpu_irq); 320 + 321 + 322 + set_c0_status(STATUSF_IP2); 323 + } 324 + 325 + static void irq_pci_dispatch(void) 326 + { 327 + unsigned int cpuid = get_ebase_cpunum(); 328 + u32 en; 329 + 330 + en = __raw_readl(mips_irq_chip + mips_irq_chip_reg_src + 331 + (cpuid * mips_irq_cpu_stride)); 332 + 333 + if (!en) { 334 + en = __raw_readl(mips_irq_chip + mips_irq_chip_reg_src + (cpuid * mips_irq_cpu_stride) + sizeof(u32)); 335 + en = (en >> (2 * cpuid)) & 3; 336 + 337 + if (!en) 338 + spurious_interrupt(); 339 + else 340 + do_IRQ(__ffs(en) + MIPS_IRQ_MBOX0); /* MBOX type */ 341 + } else { 342 + do_IRQ(__ffs(en)); 343 + } 344 + } 345 + 346 + 347 + void __init arch_init_irq(void) 348 + { 349 + irq_init_core(); 350 + irq_pci_init(); 351 + } 352 + 353 + asmlinkage void plat_irq_dispatch(void) 354 + { 355 + unsigned int pending = read_c0_cause() & read_c0_status() & ST0_IM; 356 + int ip; 357 + 358 + if (unlikely(!pending)) { 359 + spurious_interrupt(); 360 + return; 361 + } 362 + 363 + ip = ffs(pending) - 1 - STATUSB_IP0; 364 + if (ip == 2) 365 + irq_pci_dispatch(); 366 + else 367 + do_IRQ(MIPS_CPU_IRQ_BASE + ip); 368 + }
+143
arch/mips/paravirt/paravirt-smp.c
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (C) 2013 Cavium, Inc. 7 + */ 8 + 9 + #include <linux/interrupt.h> 10 + #include <linux/cpumask.h> 11 + #include <linux/kernel.h> 12 + #include <linux/sched.h> 13 + 14 + #include <asm/mipsregs.h> 15 + #include <asm/setup.h> 16 + #include <asm/time.h> 17 + #include <asm/smp.h> 18 + 19 + /* 20 + * Writing the sp releases the CPU, so writes must be ordered, gp 21 + * first, then sp. 22 + */ 23 + unsigned long paravirt_smp_sp[NR_CPUS]; 24 + unsigned long paravirt_smp_gp[NR_CPUS]; 25 + 26 + static int numcpus = 1; 27 + 28 + static int __init set_numcpus(char *str) 29 + { 30 + int newval; 31 + 32 + if (get_option(&str, &newval)) { 33 + if (newval < 1 || newval >= NR_CPUS) 34 + goto bad; 35 + numcpus = newval; 36 + return 0; 37 + } 38 + bad: 39 + return -EINVAL; 40 + } 41 + early_param("numcpus", set_numcpus); 42 + 43 + 44 + static void paravirt_smp_setup(void) 45 + { 46 + int id; 47 + unsigned int cpunum = get_ebase_cpunum(); 48 + 49 + if (WARN_ON(cpunum >= NR_CPUS)) 50 + return; 51 + 52 + /* The present CPUs are initially just the boot cpu (CPU 0). */ 53 + for (id = 0; id < NR_CPUS; id++) { 54 + set_cpu_possible(id, id == 0); 55 + set_cpu_present(id, id == 0); 56 + } 57 + __cpu_number_map[cpunum] = 0; 58 + __cpu_logical_map[0] = cpunum; 59 + 60 + for (id = 0; id < numcpus; id++) { 61 + set_cpu_possible(id, true); 62 + set_cpu_present(id, true); 63 + __cpu_number_map[id] = id; 64 + __cpu_logical_map[id] = id; 65 + } 66 + } 67 + 68 + void irq_mbox_ipi(int cpu, unsigned int actions); 69 + static void paravirt_send_ipi_single(int cpu, unsigned int action) 70 + { 71 + irq_mbox_ipi(cpu, action); 72 + } 73 + 74 + static void paravirt_send_ipi_mask(const struct cpumask *mask, unsigned int action) 75 + { 76 + unsigned int cpu; 77 + 78 + for_each_cpu_mask(cpu, *mask) 79 + paravirt_send_ipi_single(cpu, action); 80 + } 81 + 82 + static void paravirt_init_secondary(void) 83 + { 84 + unsigned int sr; 85 + 86 + sr = set_c0_status(ST0_BEV); 87 + write_c0_ebase((u32)ebase); 88 + 89 + sr |= STATUSF_IP2; /* Interrupt controller on IP2 */ 90 + write_c0_status(sr); 91 + 92 + irq_cpu_online(); 93 + } 94 + 95 + static void paravirt_smp_finish(void) 96 + { 97 + /* to generate the first CPU timer interrupt */ 98 + write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ); 99 + local_irq_enable(); 100 + } 101 + 102 + static void paravirt_boot_secondary(int cpu, struct task_struct *idle) 103 + { 104 + paravirt_smp_gp[cpu] = (unsigned long)task_thread_info(idle); 105 + smp_wmb(); 106 + paravirt_smp_sp[cpu] = __KSTK_TOS(idle); 107 + } 108 + 109 + static irqreturn_t paravirt_reched_interrupt(int irq, void *dev_id) 110 + { 111 + scheduler_ipi(); 112 + return IRQ_HANDLED; 113 + } 114 + 115 + static irqreturn_t paravirt_function_interrupt(int irq, void *dev_id) 116 + { 117 + smp_call_function_interrupt(); 118 + return IRQ_HANDLED; 119 + } 120 + 121 + static void paravirt_prepare_cpus(unsigned int max_cpus) 122 + { 123 + if (request_irq(MIPS_IRQ_MBOX0, paravirt_reched_interrupt, 124 + IRQF_PERCPU | IRQF_NO_THREAD, "Scheduler", 125 + paravirt_reched_interrupt)) { 126 + panic("Cannot request_irq for SchedulerIPI"); 127 + } 128 + if (request_irq(MIPS_IRQ_MBOX1, paravirt_function_interrupt, 129 + IRQF_PERCPU | IRQF_NO_THREAD, "SMP-Call", 130 + paravirt_function_interrupt)) { 131 + panic("Cannot request_irq for SMP-Call"); 132 + } 133 + } 134 + 135 + struct plat_smp_ops paravirt_smp_ops = { 136 + .send_ipi_single = paravirt_send_ipi_single, 137 + .send_ipi_mask = paravirt_send_ipi_mask, 138 + .init_secondary = paravirt_init_secondary, 139 + .smp_finish = paravirt_smp_finish, 140 + .boot_secondary = paravirt_boot_secondary, 141 + .smp_setup = paravirt_smp_setup, 142 + .prepare_cpus = paravirt_prepare_cpus, 143 + };
+40
arch/mips/paravirt/serial.c
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (C) 2013 Cavium, Inc. 7 + */ 8 + 9 + #include <linux/kernel.h> 10 + #include <linux/virtio_console.h> 11 + #include <linux/kvm_para.h> 12 + 13 + /* 14 + * Emit one character to the boot console. 15 + */ 16 + int prom_putchar(char c) 17 + { 18 + kvm_hypercall3(KVM_HC_MIPS_CONSOLE_OUTPUT, 0 /* port 0 */, 19 + (unsigned long)&c, 1 /* len == 1 */); 20 + 21 + return 1; 22 + } 23 + 24 + #ifdef CONFIG_VIRTIO_CONSOLE 25 + static int paravirt_put_chars(u32 vtermno, const char *buf, int count) 26 + { 27 + kvm_hypercall3(KVM_HC_MIPS_CONSOLE_OUTPUT, vtermno, 28 + (unsigned long)buf, count); 29 + 30 + return count; 31 + } 32 + 33 + static int __init paravirt_cons_init(void) 34 + { 35 + virtio_cons_early_init(paravirt_put_chars); 36 + return 0; 37 + } 38 + core_initcall(paravirt_cons_init); 39 + 40 + #endif
+67
arch/mips/paravirt/setup.c
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (C) 2013 Cavium, Inc. 7 + */ 8 + 9 + #include <linux/kernel.h> 10 + #include <linux/kvm_para.h> 11 + 12 + #include <asm/reboot.h> 13 + #include <asm/bootinfo.h> 14 + #include <asm/smp-ops.h> 15 + #include <asm/time.h> 16 + 17 + extern struct plat_smp_ops paravirt_smp_ops; 18 + 19 + const char *get_system_type(void) 20 + { 21 + return "MIPS Para-Virtualized Guest"; 22 + } 23 + 24 + void __init plat_time_init(void) 25 + { 26 + mips_hpt_frequency = kvm_hypercall0(KVM_HC_MIPS_GET_CLOCK_FREQ); 27 + 28 + preset_lpj = mips_hpt_frequency / (2 * HZ); 29 + } 30 + 31 + static void pv_machine_halt(void) 32 + { 33 + kvm_hypercall0(KVM_HC_MIPS_EXIT_VM); 34 + } 35 + 36 + /* 37 + * Early entry point for arch setup 38 + */ 39 + void __init prom_init(void) 40 + { 41 + int i; 42 + int argc = fw_arg0; 43 + char **argv = (char **)fw_arg1; 44 + 45 + #ifdef CONFIG_32BIT 46 + set_io_port_base(KSEG1ADDR(0x1e000000)); 47 + #else /* CONFIG_64BIT */ 48 + set_io_port_base(PHYS_TO_XKSEG_UNCACHED(0x1e000000)); 49 + #endif 50 + 51 + for (i = 0; i < argc; i++) { 52 + strlcat(arcs_cmdline, argv[i], COMMAND_LINE_SIZE); 53 + if (i < argc - 1) 54 + strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE); 55 + } 56 + _machine_halt = pv_machine_halt; 57 + register_smp_ops(&paravirt_smp_ops); 58 + } 59 + 60 + void __init plat_mem_setup(void) 61 + { 62 + /* Do nothing, the "mem=???" parser handles our memory. */ 63 + } 64 + 65 + void __init prom_free_prom_memory(void) 66 + { 67 + }
+1 -1
arch/mips/pci/Makefile
··· 21 21 obj-$(CONFIG_MIPS_ALCHEMY) += pci-alchemy.o 22 22 obj-$(CONFIG_SOC_AR71XX) += pci-ar71xx.o 23 23 obj-$(CONFIG_PCI_AR724X) += pci-ar724x.o 24 - 24 + obj-$(CONFIG_MIPS_PCI_VIRTIO) += pci-virtio-guest.o 25 25 # 26 26 # These are still pretty much in the old state, watch, go blind. 27 27 #
+6
arch/mips/pci/fixup-malta.c
··· 68 68 { 69 69 unsigned char reg_val; 70 70 u32 reg_val32; 71 + u16 reg_val16; 71 72 /* PIIX PIRQC[A:D] irq mappings */ 72 73 static int piixirqmap[PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_MAX] = { 73 74 0, 0, 0, 3, ··· 108 107 pci_read_config_byte(pdev, PIIX4_FUNC0_SERIRQC, &reg_val); 109 108 reg_val |= PIIX4_FUNC0_SERIRQC_EN | PIIX4_FUNC0_SERIRQC_CONT; 110 109 pci_write_config_byte(pdev, PIIX4_FUNC0_SERIRQC, reg_val); 110 + 111 + /* Enable response to special cycles */ 112 + pci_read_config_word(pdev, PCI_COMMAND, &reg_val16); 113 + pci_write_config_word(pdev, PCI_COMMAND, 114 + reg_val16 | PCI_COMMAND_SPECIAL); 111 115 } 112 116 113 117 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0,
+6
arch/mips/pci/msi-octeon.c
··· 15 15 #include <asm/octeon/cvmx-npi-defs.h> 16 16 #include <asm/octeon/cvmx-pci-defs.h> 17 17 #include <asm/octeon/cvmx-npei-defs.h> 18 + #include <asm/octeon/cvmx-sli-defs.h> 18 19 #include <asm/octeon/cvmx-pexp-defs.h> 19 20 #include <asm/octeon/pci-octeon.h> 20 21 ··· 162 161 /* FIXME CVMX_NPEI_MSI_RCV* other than 0? */ 163 162 msg.address_lo = (0 + CVMX_NPEI_PCIE_MSI_RCV) & 0xffffffff; 164 163 msg.address_hi = (0 + CVMX_NPEI_PCIE_MSI_RCV) >> 32; 164 + break; 165 + case OCTEON_DMA_BAR_TYPE_PCIE2: 166 + /* When using PCIe2, Bar 0 is based at 0 */ 167 + msg.address_lo = (0 + CVMX_SLI_PCIE_MSI_RCV) & 0xffffffff; 168 + msg.address_hi = (0 + CVMX_SLI_PCIE_MSI_RCV) >> 32; 165 169 break; 166 170 default: 167 171 panic("arch_setup_msi_irq: Invalid octeon_dma_bar_type");
+135 -49
arch/mips/pci/msi-xlp.c
··· 56 56 #include <asm/netlogic/xlp-hal/bridge.h> 57 57 58 58 #define XLP_MSIVEC_PER_LINK 32 59 - #define XLP_MSIXVEC_TOTAL 32 60 - #define XLP_MSIXVEC_PER_LINK 8 59 + #define XLP_MSIXVEC_TOTAL (cpu_is_xlp9xx() ? 128 : 32) 60 + #define XLP_MSIXVEC_PER_LINK (cpu_is_xlp9xx() ? 32 : 8) 61 61 62 62 /* 128 MSI irqs per node, mapped starting at NLM_MSI_VEC_BASE */ 63 63 static inline int nlm_link_msiirq(int link, int msivec) ··· 65 65 return NLM_MSI_VEC_BASE + link * XLP_MSIVEC_PER_LINK + msivec; 66 66 } 67 67 68 + /* get the link MSI vector from irq number */ 68 69 static inline int nlm_irq_msivec(int irq) 69 70 { 70 - return irq % XLP_MSIVEC_PER_LINK; 71 + return (irq - NLM_MSI_VEC_BASE) % XLP_MSIVEC_PER_LINK; 71 72 } 72 73 74 + /* get the link from the irq number */ 73 75 static inline int nlm_irq_msilink(int irq) 74 76 { 75 - return (irq % (XLP_MSIVEC_PER_LINK * PCIE_NLINKS)) / 76 - XLP_MSIVEC_PER_LINK; 77 + int total_msivec = XLP_MSIVEC_PER_LINK * PCIE_NLINKS; 78 + 79 + return ((irq - NLM_MSI_VEC_BASE) % total_msivec) / 80 + XLP_MSIVEC_PER_LINK; 77 81 } 78 82 79 83 /* 80 - * Only 32 MSI-X vectors are possible because there are only 32 PIC 81 - * interrupts for MSI. We split them statically and use 8 MSI-X vectors 82 - * per link - this keeps the allocation and lookup simple. 84 + * For XLP 8xx/4xx/3xx/2xx, only 32 MSI-X vectors are possible because 85 + * there are only 32 PIC interrupts for MSI. We split them statically 86 + * and use 8 MSI-X vectors per link - this keeps the allocation and 87 + * lookup simple. 88 + * On XLP 9xx, there are 32 vectors per link, and the interrupts are 89 + * not routed thru PIC, so we can use all 128 MSI-X vectors. 83 90 */ 84 91 static inline int nlm_link_msixirq(int link, int bit) 85 92 { 86 93 return NLM_MSIX_VEC_BASE + link * XLP_MSIXVEC_PER_LINK + bit; 87 94 } 88 95 96 + /* get the link MSI vector from irq number */ 89 97 static inline int nlm_irq_msixvec(int irq) 90 98 { 91 - return irq % XLP_MSIXVEC_TOTAL; /* works when given xirq */ 99 + return (irq - NLM_MSIX_VEC_BASE) % XLP_MSIXVEC_TOTAL; 92 100 } 93 101 94 - static inline int nlm_irq_msixlink(int irq) 102 + /* get the link from MSIX vec */ 103 + static inline int nlm_irq_msixlink(int msixvec) 95 104 { 96 - return nlm_irq_msixvec(irq) / XLP_MSIXVEC_PER_LINK; 105 + return msixvec / XLP_MSIXVEC_PER_LINK; 97 106 } 98 107 99 108 /* ··· 138 129 vec = nlm_irq_msivec(d->irq); 139 130 spin_lock_irqsave(&md->msi_lock, flags); 140 131 md->msi_enabled_mask |= 1u << vec; 141 - nlm_write_reg(md->lnkbase, PCIE_MSI_EN, md->msi_enabled_mask); 132 + if (cpu_is_xlp9xx()) 133 + nlm_write_reg(md->lnkbase, PCIE_9XX_MSI_EN, 134 + md->msi_enabled_mask); 135 + else 136 + nlm_write_reg(md->lnkbase, PCIE_MSI_EN, md->msi_enabled_mask); 142 137 spin_unlock_irqrestore(&md->msi_lock, flags); 143 138 } 144 139 ··· 155 142 vec = nlm_irq_msivec(d->irq); 156 143 spin_lock_irqsave(&md->msi_lock, flags); 157 144 md->msi_enabled_mask &= ~(1u << vec); 158 - nlm_write_reg(md->lnkbase, PCIE_MSI_EN, md->msi_enabled_mask); 145 + if (cpu_is_xlp9xx()) 146 + nlm_write_reg(md->lnkbase, PCIE_9XX_MSI_EN, 147 + md->msi_enabled_mask); 148 + else 149 + nlm_write_reg(md->lnkbase, PCIE_MSI_EN, md->msi_enabled_mask); 159 150 spin_unlock_irqrestore(&md->msi_lock, flags); 160 151 } 161 152 ··· 173 156 xlp_msi_disable(d); 174 157 175 158 /* Ack MSI on bridge */ 176 - nlm_write_reg(md->lnkbase, PCIE_MSI_STATUS, 1u << vec); 159 + if (cpu_is_xlp9xx()) 160 + nlm_write_reg(md->lnkbase, PCIE_9XX_MSI_STATUS, 1u << vec); 161 + else 162 + nlm_write_reg(md->lnkbase, PCIE_MSI_STATUS, 1u << vec); 177 163 178 164 /* Ack at eirr and PIC */ 179 165 ack_c0_eirr(PIC_PCIE_LINK_MSI_IRQ(link)); 180 - nlm_pic_ack(md->node->picbase, PIC_IRT_PCIE_LINK_INDEX(link)); 166 + if (cpu_is_xlp9xx()) 167 + nlm_pic_ack(md->node->picbase, 168 + PIC_9XX_IRT_PCIE_LINK_INDEX(link)); 169 + else 170 + nlm_pic_ack(md->node->picbase, PIC_IRT_PCIE_LINK_INDEX(link)); 181 171 } 182 172 183 173 static struct irq_chip xlp_msi_chip = { ··· 196 172 }; 197 173 198 174 /* 199 - * The MSI-X interrupt handling is different from MSI, there are 32 200 - * MSI-X interrupts generated by the PIC and each of these correspond 201 - * to a MSI-X vector (0-31) that can be assigned. 175 + * XLP8XX/4XX/3XX/2XX: 176 + * The MSI-X interrupt handling is different from MSI, there are 32 MSI-X 177 + * interrupts generated by the PIC and each of these correspond to a MSI-X 178 + * vector (0-31) that can be assigned. 202 179 * 203 - * We divide the MSI-X vectors to 8 per link and do a per-link 204 - * allocation 180 + * We divide the MSI-X vectors to 8 per link and do a per-link allocation 181 + * 182 + * XLP9XX: 183 + * 32 MSI-X vectors are available per link, and the interrupts are not routed 184 + * thru the PIC. PIC ack not needed. 205 185 * 206 186 * Enable and disable done using standard MSI functions. 207 187 */ 208 188 static void xlp_msix_mask_ack(struct irq_data *d) 209 189 { 210 - struct xlp_msi_data *md = irq_data_get_irq_handler_data(d); 190 + struct xlp_msi_data *md; 211 191 int link, msixvec; 192 + uint32_t status_reg, bit; 212 193 213 194 msixvec = nlm_irq_msixvec(d->irq); 214 - link = nlm_irq_msixlink(d->irq); 195 + link = nlm_irq_msixlink(msixvec); 215 196 mask_msi_irq(d); 197 + md = irq_data_get_irq_handler_data(d); 216 198 217 199 /* Ack MSI on bridge */ 218 - nlm_write_reg(md->lnkbase, PCIE_MSIX_STATUS, 1u << msixvec); 200 + if (cpu_is_xlp9xx()) { 201 + status_reg = PCIE_9XX_MSIX_STATUSX(link); 202 + bit = msixvec % XLP_MSIXVEC_PER_LINK; 203 + } else { 204 + status_reg = PCIE_MSIX_STATUS; 205 + bit = msixvec; 206 + } 207 + nlm_write_reg(md->lnkbase, status_reg, 1u << bit); 219 208 220 209 /* Ack at eirr and PIC */ 221 210 ack_c0_eirr(PIC_PCIE_MSIX_IRQ(link)); 222 - nlm_pic_ack(md->node->picbase, PIC_IRT_PCIE_MSIX_INDEX(msixvec)); 211 + if (!cpu_is_xlp9xx()) 212 + nlm_pic_ack(md->node->picbase, 213 + PIC_IRT_PCIE_MSIX_INDEX(msixvec)); 223 214 } 224 215 225 216 static struct irq_chip xlp_msix_chip = { ··· 258 219 { 259 220 u32 val; 260 221 261 - val = nlm_read_reg(lnkbase, PCIE_INT_EN0); 262 - if ((val & 0x200) == 0) { 263 - val |= 0x200; /* MSI Interrupt enable */ 264 - nlm_write_reg(lnkbase, PCIE_INT_EN0, val); 222 + if (cpu_is_xlp9xx()) { 223 + val = nlm_read_reg(lnkbase, PCIE_9XX_INT_EN0); 224 + if ((val & 0x200) == 0) { 225 + val |= 0x200; /* MSI Interrupt enable */ 226 + nlm_write_reg(lnkbase, PCIE_9XX_INT_EN0, val); 227 + } 228 + } else { 229 + val = nlm_read_reg(lnkbase, PCIE_INT_EN0); 230 + if ((val & 0x200) == 0) { 231 + val |= 0x200; 232 + nlm_write_reg(lnkbase, PCIE_INT_EN0, val); 233 + } 265 234 } 266 235 267 236 val = nlm_read_reg(lnkbase, 0x1); /* CMD */ ··· 316 269 317 270 spin_lock_irqsave(&md->msi_lock, flags); 318 271 if (md->msi_alloc_mask == 0) { 319 - /* switch the link IRQ to MSI range */ 320 272 xlp_config_link_msi(lnkbase, lirq, msiaddr); 321 - irt = PIC_IRT_PCIE_LINK_INDEX(link); 273 + /* switch the link IRQ to MSI range */ 274 + if (cpu_is_xlp9xx()) 275 + irt = PIC_9XX_IRT_PCIE_LINK_INDEX(link); 276 + else 277 + irt = PIC_IRT_PCIE_LINK_INDEX(link); 322 278 nlm_setup_pic_irq(node, lirq, lirq, irt); 323 279 nlm_pic_init_irt(nlm_get_node(node)->picbase, irt, lirq, 324 280 node * nlm_threads_per_node(), 1 /*en */); ··· 361 311 val |= 0x80000000U; 362 312 nlm_write_reg(lnkbase, 0x2C, val); 363 313 } 364 - val = nlm_read_reg(lnkbase, PCIE_INT_EN0); 365 - if ((val & 0x200) == 0) { 366 - val |= 0x200; /* MSI Interrupt enable */ 367 - nlm_write_reg(lnkbase, PCIE_INT_EN0, val); 314 + 315 + if (cpu_is_xlp9xx()) { 316 + val = nlm_read_reg(lnkbase, PCIE_9XX_INT_EN0); 317 + if ((val & 0x200) == 0) { 318 + val |= 0x200; /* MSI Interrupt enable */ 319 + nlm_write_reg(lnkbase, PCIE_9XX_INT_EN0, val); 320 + } 321 + } else { 322 + val = nlm_read_reg(lnkbase, PCIE_INT_EN0); 323 + if ((val & 0x200) == 0) { 324 + val |= 0x200; /* MSI Interrupt enable */ 325 + nlm_write_reg(lnkbase, PCIE_INT_EN0, val); 326 + } 368 327 } 369 328 370 329 val = nlm_read_reg(lnkbase, 0x1); /* CMD */ ··· 388 329 val |= (1 << 8) | lirq; 389 330 nlm_write_pci_reg(lnkbase, 0xf, val); 390 331 391 - /* MSI-X addresses */ 392 - nlm_write_reg(lnkbase, PCIE_BRIDGE_MSIX_ADDR_BASE, msixaddr >> 8); 393 - nlm_write_reg(lnkbase, PCIE_BRIDGE_MSIX_ADDR_LIMIT, 394 - (msixaddr + MSI_ADDR_SZ) >> 8); 332 + if (cpu_is_xlp9xx()) { 333 + /* MSI-X addresses */ 334 + nlm_write_reg(lnkbase, PCIE_9XX_BRIDGE_MSIX_ADDR_BASE, 335 + msixaddr >> 8); 336 + nlm_write_reg(lnkbase, PCIE_9XX_BRIDGE_MSIX_ADDR_LIMIT, 337 + (msixaddr + MSI_ADDR_SZ) >> 8); 338 + } else { 339 + /* MSI-X addresses */ 340 + nlm_write_reg(lnkbase, PCIE_BRIDGE_MSIX_ADDR_BASE, 341 + msixaddr >> 8); 342 + nlm_write_reg(lnkbase, PCIE_BRIDGE_MSIX_ADDR_LIMIT, 343 + (msixaddr + MSI_ADDR_SZ) >> 8); 344 + } 395 345 } 396 346 397 347 /* ··· 437 369 438 370 xirq += t; 439 371 msixvec = nlm_irq_msixvec(xirq); 372 + 440 373 msg.address_hi = msixaddr >> 32; 441 374 msg.address_lo = msixaddr & 0xffffffff; 442 375 msg.data = 0xc00 | msixvec; ··· 478 409 { 479 410 struct nlm_soc_info *nodep; 480 411 struct xlp_msi_data *md; 481 - int irq, i, irt, msixvec; 412 + int irq, i, irt, msixvec, val; 482 413 483 414 pr_info("[%d %d] Init node PCI IRT\n", node, link); 484 415 nodep = nlm_get_node(node); ··· 499 430 irq_set_handler_data(i, md); 500 431 } 501 432 502 - for (i = 0; i < XLP_MSIXVEC_PER_LINK; i++) { 503 - /* Initialize MSI-X irts to generate one interrupt per link */ 504 - msixvec = link * XLP_MSIXVEC_PER_LINK + i; 505 - irt = PIC_IRT_PCIE_MSIX_INDEX(msixvec); 506 - nlm_pic_init_irt(nodep->picbase, irt, PIC_PCIE_MSIX_IRQ(link), 507 - node * nlm_threads_per_node(), 1 /* enable */); 433 + for (i = 0; i < XLP_MSIXVEC_PER_LINK ; i++) { 434 + if (cpu_is_xlp9xx()) { 435 + val = ((node * nlm_threads_per_node()) << 7 | 436 + PIC_PCIE_MSIX_IRQ(link) << 1 | 0 << 0); 437 + nlm_write_pcie_reg(md->lnkbase, PCIE_9XX_MSIX_VECX(i + 438 + (link * XLP_MSIXVEC_PER_LINK)), val); 439 + } else { 440 + /* Initialize MSI-X irts to generate one interrupt 441 + * per link 442 + */ 443 + msixvec = link * XLP_MSIXVEC_PER_LINK + i; 444 + irt = PIC_IRT_PCIE_MSIX_INDEX(msixvec); 445 + nlm_pic_init_irt(nodep->picbase, irt, 446 + PIC_PCIE_MSIX_IRQ(link), 447 + node * nlm_threads_per_node(), 1); 448 + } 508 449 509 450 /* Initialize MSI-X extended irq space for the link */ 510 451 irq = nlm_irq_to_xirq(node, nlm_link_msixirq(link, i)); 511 452 irq_set_chip_and_handler(irq, &xlp_msix_chip, handle_level_irq); 512 453 irq_set_handler_data(irq, md); 513 454 } 514 - 515 455 } 516 456 517 457 void nlm_dispatch_msi(int node, int lirq) ··· 532 454 link = lirq - PIC_PCIE_LINK_MSI_IRQ_BASE; 533 455 irqbase = nlm_irq_to_xirq(node, nlm_link_msiirq(link, 0)); 534 456 md = irq_get_handler_data(irqbase); 535 - status = nlm_read_reg(md->lnkbase, PCIE_MSI_STATUS) & 457 + if (cpu_is_xlp9xx()) 458 + status = nlm_read_reg(md->lnkbase, PCIE_9XX_MSI_STATUS) & 459 + md->msi_enabled_mask; 460 + else 461 + status = nlm_read_reg(md->lnkbase, PCIE_MSI_STATUS) & 536 462 md->msi_enabled_mask; 537 463 while (status) { 538 464 i = __ffs(status); ··· 554 472 link = lirq - PIC_PCIE_MSIX_IRQ_BASE; 555 473 irqbase = nlm_irq_to_xirq(node, nlm_link_msixirq(link, 0)); 556 474 md = irq_get_handler_data(irqbase); 557 - status = nlm_read_reg(md->lnkbase, PCIE_MSIX_STATUS); 475 + if (cpu_is_xlp9xx()) 476 + status = nlm_read_reg(md->lnkbase, PCIE_9XX_MSIX_STATUSX(link)); 477 + else 478 + status = nlm_read_reg(md->lnkbase, PCIE_MSIX_STATUS); 558 479 559 480 /* narrow it down to the MSI-x vectors for our link */ 560 - status = (status >> (link * XLP_MSIXVEC_PER_LINK)) & 481 + if (!cpu_is_xlp9xx()) 482 + status = (status >> (link * XLP_MSIXVEC_PER_LINK)) & 561 483 ((1 << XLP_MSIXVEC_PER_LINK) - 1); 562 484 563 485 while (status) {
+1 -1
arch/mips/pci/ops-pmcmsp.c
··· 7 7 * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net 8 8 * 9 9 * Much of the code is derived from the original DDB5074 port by 10 - * Geert Uytterhoeven <geert@sonycom.com> 10 + * Geert Uytterhoeven <geert@linux-m68k.org> 11 11 * 12 12 * This program is free software; you can redistribute it and/or modify it 13 13 * under the terms of the GNU General Public License as published by the
+1 -1
arch/mips/pci/ops-tx3927.c
··· 11 11 * Define the pci_ops for TX3927. 12 12 * 13 13 * Much of the code is derived from the original DDB5074 port by 14 - * Geert Uytterhoeven <geert@sonycom.com> 14 + * Geert Uytterhoeven <geert@linux-m68k.org> 15 15 * 16 16 * This program is free software; you can redistribute it and/or modify it 17 17 * under the terms of the GNU General Public License as published by the
+6 -3
arch/mips/pci/ops-tx4927.c
··· 202 202 unsigned long val; 203 203 204 204 if (!strncmp(str, "trdyto=", 7)) { 205 - if (strict_strtoul(str + 7, 0, &val) == 0) 205 + u8 val = 0; 206 + if (kstrtou8(str + 7, 0, &val) == 0) 206 207 tx4927_pci_opts.trdyto = val; 207 208 return NULL; 208 209 } 209 210 if (!strncmp(str, "retryto=", 8)) { 210 - if (strict_strtoul(str + 8, 0, &val) == 0) 211 + u8 val = 0; 212 + if (kstrtou8(str + 8, 0, &val) == 0) 211 213 tx4927_pci_opts.retryto = val; 212 214 return NULL; 213 215 } 214 216 if (!strncmp(str, "gbwc=", 5)) { 215 - if (strict_strtoul(str + 5, 0, &val) == 0) 217 + u16 val; 218 + if (kstrtou16(str + 5, 0, &val) == 0) 216 219 tx4927_pci_opts.gbwc = val; 217 220 return NULL; 218 221 }
+131
arch/mips/pci/pci-virtio-guest.c
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (C) 2013 Cavium, Inc. 7 + */ 8 + 9 + #include <linux/kernel.h> 10 + #include <linux/init.h> 11 + #include <linux/interrupt.h> 12 + #include <linux/pci.h> 13 + 14 + #include <uapi/asm/bitfield.h> 15 + #include <asm/byteorder.h> 16 + #include <asm/io.h> 17 + 18 + #define PCI_CONFIG_ADDRESS 0xcf8 19 + #define PCI_CONFIG_DATA 0xcfc 20 + 21 + union pci_config_address { 22 + struct { 23 + __BITFIELD_FIELD(unsigned enable_bit : 1, /* 31 */ 24 + __BITFIELD_FIELD(unsigned reserved : 7, /* 30 .. 24 */ 25 + __BITFIELD_FIELD(unsigned bus_number : 8, /* 23 .. 16 */ 26 + __BITFIELD_FIELD(unsigned devfn_number : 8, /* 15 .. 8 */ 27 + __BITFIELD_FIELD(unsigned register_number : 8, /* 7 .. 0 */ 28 + ))))); 29 + }; 30 + u32 w; 31 + }; 32 + 33 + int pcibios_plat_dev_init(struct pci_dev *dev) 34 + { 35 + return 0; 36 + } 37 + 38 + int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 39 + { 40 + return ((pin + slot) % 4)+ MIPS_IRQ_PCIA; 41 + } 42 + 43 + static void pci_virtio_guest_write_config_addr(struct pci_bus *bus, 44 + unsigned int devfn, int reg) 45 + { 46 + union pci_config_address pca = { .w = 0 }; 47 + 48 + pca.register_number = reg; 49 + pca.devfn_number = devfn; 50 + pca.bus_number = bus->number; 51 + pca.enable_bit = 1; 52 + 53 + outl(pca.w, PCI_CONFIG_ADDRESS); 54 + } 55 + 56 + static int pci_virtio_guest_write_config(struct pci_bus *bus, 57 + unsigned int devfn, int reg, int size, u32 val) 58 + { 59 + pci_virtio_guest_write_config_addr(bus, devfn, reg); 60 + 61 + switch (size) { 62 + case 1: 63 + outb(val, PCI_CONFIG_DATA + (reg & 3)); 64 + break; 65 + case 2: 66 + outw(val, PCI_CONFIG_DATA + (reg & 2)); 67 + break; 68 + case 4: 69 + outl(val, PCI_CONFIG_DATA); 70 + break; 71 + } 72 + 73 + return PCIBIOS_SUCCESSFUL; 74 + } 75 + 76 + static int pci_virtio_guest_read_config(struct pci_bus *bus, unsigned int devfn, 77 + int reg, int size, u32 *val) 78 + { 79 + pci_virtio_guest_write_config_addr(bus, devfn, reg); 80 + 81 + switch (size) { 82 + case 1: 83 + *val = inb(PCI_CONFIG_DATA + (reg & 3)); 84 + break; 85 + case 2: 86 + *val = inw(PCI_CONFIG_DATA + (reg & 2)); 87 + break; 88 + case 4: 89 + *val = inl(PCI_CONFIG_DATA); 90 + break; 91 + } 92 + return PCIBIOS_SUCCESSFUL; 93 + } 94 + 95 + static struct pci_ops pci_virtio_guest_ops = { 96 + .read = pci_virtio_guest_read_config, 97 + .write = pci_virtio_guest_write_config, 98 + }; 99 + 100 + static struct resource pci_virtio_guest_mem_resource = { 101 + .name = "Virtio MEM", 102 + .flags = IORESOURCE_MEM, 103 + .start = 0x10000000, 104 + .end = 0x1dffffff 105 + }; 106 + 107 + static struct resource pci_virtio_guest_io_resource = { 108 + .name = "Virtio IO", 109 + .flags = IORESOURCE_IO, 110 + .start = 0, 111 + .end = 0xffff 112 + }; 113 + 114 + static struct pci_controller pci_virtio_guest_controller = { 115 + .pci_ops = &pci_virtio_guest_ops, 116 + .mem_resource = &pci_virtio_guest_mem_resource, 117 + .io_resource = &pci_virtio_guest_io_resource, 118 + }; 119 + 120 + static int __init pci_virtio_guest_setup(void) 121 + { 122 + pr_err("pci_virtio_guest_setup\n"); 123 + 124 + /* Virtio comes pre-assigned */ 125 + pci_set_flags(PCI_PROBE_ONLY); 126 + 127 + pci_virtio_guest_controller.io_map_base = mips_io_port_base; 128 + register_pci_controller(&pci_virtio_guest_controller); 129 + return 0; 130 + } 131 + arch_initcall(pci_virtio_guest_setup);
-1
arch/mips/pmcs-msp71xx/Makefile
··· 10 10 obj-$(CONFIG_MSP_HAS_MAC) += msp_eth.o 11 11 obj-$(CONFIG_MSP_HAS_USB) += msp_usb.o 12 12 obj-$(CONFIG_MIPS_MT_SMP) += msp_smp.o 13 - obj-$(CONFIG_MIPS_MT_SMTC) += msp_smtc.o
-76
arch/mips/pmcs-msp71xx/msp_eth.c
··· 38 38 #define MSP_ETHERNET_GPIO1 15 39 39 #define MSP_ETHERNET_GPIO2 16 40 40 41 - #ifdef CONFIG_MSP_HAS_TSMAC 42 - #define MSP_TSMAC_SIZE 0x10020 43 - #define MSP_TSMAC_ID "pmc_tsmac" 44 - 45 - static struct resource msp_tsmac0_resources[] = { 46 - [0] = { 47 - .start = MSP_MAC0_BASE, 48 - .end = MSP_MAC0_BASE + MSP_TSMAC_SIZE - 1, 49 - .flags = IORESOURCE_MEM, 50 - }, 51 - [1] = { 52 - .start = MSP_INT_MAC0, 53 - .end = MSP_INT_MAC0, 54 - .flags = IORESOURCE_IRQ, 55 - }, 56 - }; 57 - 58 - static struct resource msp_tsmac1_resources[] = { 59 - [0] = { 60 - .start = MSP_MAC1_BASE, 61 - .end = MSP_MAC1_BASE + MSP_TSMAC_SIZE - 1, 62 - .flags = IORESOURCE_MEM, 63 - }, 64 - [1] = { 65 - .start = MSP_INT_MAC1, 66 - .end = MSP_INT_MAC1, 67 - .flags = IORESOURCE_IRQ, 68 - }, 69 - }; 70 - static struct resource msp_tsmac2_resources[] = { 71 - [0] = { 72 - .start = MSP_MAC2_BASE, 73 - .end = MSP_MAC2_BASE + MSP_TSMAC_SIZE - 1, 74 - .flags = IORESOURCE_MEM, 75 - }, 76 - [1] = { 77 - .start = MSP_INT_SAR, 78 - .end = MSP_INT_SAR, 79 - .flags = IORESOURCE_IRQ, 80 - }, 81 - }; 82 - 83 - 84 - static struct platform_device tsmac_device[] = { 85 - [0] = { 86 - .name = MSP_TSMAC_ID, 87 - .id = 0, 88 - .num_resources = ARRAY_SIZE(msp_tsmac0_resources), 89 - .resource = msp_tsmac0_resources, 90 - }, 91 - [1] = { 92 - .name = MSP_TSMAC_ID, 93 - .id = 1, 94 - .num_resources = ARRAY_SIZE(msp_tsmac1_resources), 95 - .resource = msp_tsmac1_resources, 96 - }, 97 - [2] = { 98 - .name = MSP_TSMAC_ID, 99 - .id = 2, 100 - .num_resources = ARRAY_SIZE(msp_tsmac2_resources), 101 - .resource = msp_tsmac2_resources, 102 - }, 103 - }; 104 - #define msp_eth_devs tsmac_device 105 - 106 - #else 107 - /* If it is not TSMAC assume MSP_ETH (100Mbps) */ 108 41 #define MSP_ETH_ID "pmc_mspeth" 109 42 #define MSP_ETH_SIZE 0xE0 110 43 static struct resource msp_eth0_resources[] = { ··· 85 152 }; 86 153 #define msp_eth_devs mspeth_device 87 154 88 - #endif 89 155 int __init msp_eth_setup(void) 90 156 { 91 157 int i, ret = 0; ··· 93 161 msp_gpio_pin_mode(MSP_GPIO_OUTPUT, MSP_ETHERNET_GPIO0); 94 162 msp_gpio_pin_hi(MSP_ETHERNET_GPIO0); 95 163 96 - #ifdef CONFIG_MSP_HAS_TSMAC 97 - /* 3 phys on boards with TSMAC */ 98 - msp_gpio_pin_mode(MSP_GPIO_OUTPUT, MSP_ETHERNET_GPIO1); 99 - msp_gpio_pin_hi(MSP_ETHERNET_GPIO1); 100 - 101 - msp_gpio_pin_mode(MSP_GPIO_OUTPUT, MSP_ETHERNET_GPIO2); 102 - msp_gpio_pin_hi(MSP_ETHERNET_GPIO2); 103 - #endif 104 164 for (i = 0; i < ARRAY_SIZE(msp_eth_devs); i++) { 105 165 ret = platform_device_register(&msp_eth_devs[i]); 106 166 printk(KERN_INFO "device: %d, return value = %d\n", i, ret);
+5 -11
arch/mips/pmcs-msp71xx/msp_irq.c
··· 32 32 33 33 /* vectored interrupt implementation */ 34 34 35 - /* SW0/1 interrupts are used for SMP/SMTC */ 35 + /* SW0/1 interrupts are used for SMP */ 36 36 static inline void mac0_int_dispatch(void) { do_IRQ(MSP_INT_MAC0); } 37 37 static inline void mac1_int_dispatch(void) { do_IRQ(MSP_INT_MAC1); } 38 38 static inline void mac2_int_dispatch(void) { do_IRQ(MSP_INT_SAR); } ··· 138 138 set_vi_handler(MSP_INT_SEC, sec_int_dispatch); 139 139 #ifdef CONFIG_MIPS_MT_SMP 140 140 msp_vsmp_int_init(); 141 - #elif defined CONFIG_MIPS_MT_SMTC 142 - /*Set hwmask for all platform devices */ 143 - irq_hwmask[MSP_INT_MAC0] = C_IRQ0; 144 - irq_hwmask[MSP_INT_MAC1] = C_IRQ1; 145 - irq_hwmask[MSP_INT_USB] = C_IRQ2; 146 - irq_hwmask[MSP_INT_SAR] = C_IRQ3; 147 - irq_hwmask[MSP_INT_SEC] = C_IRQ5; 148 - 149 141 #endif /* CONFIG_MIPS_MT_SMP */ 150 142 #endif /* CONFIG_MIPS_MT */ 151 143 /* setup the cascaded interrupts */ ··· 145 153 setup_irq(MSP_INT_PER, &per_cascade_msp); 146 154 147 155 #else 148 - /* setup the 2nd-level SLP register based interrupt controller */ 149 - /* VSMP /SMTC support support is not enabled for SLP */ 156 + /* 157 + * Setup the 2nd-level SLP register based interrupt controller. 158 + * VSMP support support is not enabled for SLP. 159 + */ 150 160 msp_slp_irq_init(); 151 161 152 162 /* setup the cascaded SLP/PER interrupts */
+1 -6
arch/mips/pmcs-msp71xx/msp_irq_cic.c
··· 120 120 * hurt for the others 121 121 */ 122 122 *CIC_STS_REG = (1 << (d->irq - MSP_CIC_INTBASE)); 123 - smtc_im_ack_irq(d->irq); 124 123 } 125 124 126 - /*Note: Limiting to VSMP . Not tested in SMTC */ 125 + /* Note: Limiting to VSMP. */ 127 126 128 127 #ifdef CONFIG_MIPS_MT_SMP 129 128 static int msp_cic_irq_set_affinity(struct irq_data *d, ··· 182 183 for (i = MSP_CIC_INTBASE ; i < MSP_CIC_INTBASE + 32 ; i++) { 183 184 irq_set_chip_and_handler(i, &msp_cic_irq_controller, 184 185 handle_level_irq); 185 - #ifdef CONFIG_MIPS_MT_SMTC 186 - /* Mask of CIC interrupt */ 187 - irq_hwmask[i] = C_IRQ4; 188 - #endif 189 186 } 190 187 191 188 /* Initialize the PER interrupt sub-system */
-3
arch/mips/pmcs-msp71xx/msp_irq_per.c
··· 113 113 /* initialize all the IRQ descriptors */ 114 114 for (i = MSP_PER_INTBASE; i < MSP_PER_INTBASE + 32; i++) { 115 115 irq_set_chip(i, &msp_per_irq_controller); 116 - #ifdef CONFIG_MIPS_MT_SMTC 117 - irq_hwmask[i] = C_IRQ4; 118 - #endif 119 116 } 120 117 } 121 118
+1 -16
arch/mips/pmcs-msp71xx/msp_setup.c
··· 27 27 #endif 28 28 29 29 extern void msp_serial_setup(void); 30 - extern void pmctwiled_setup(void); 31 30 32 31 #if defined(CONFIG_PMC_MSP7120_EVAL) || \ 33 32 defined(CONFIG_PMC_MSP7120_GW) || \ ··· 147 148 pm_power_off = msp_power_off; 148 149 } 149 150 150 - extern struct plat_smp_ops msp_smtc_smp_ops; 151 - 152 151 void __init prom_init(void) 153 152 { 154 153 unsigned long family; ··· 227 230 */ 228 231 msp_serial_setup(); 229 232 230 - if (register_vsmp_smp_ops()) { 231 - #ifdef CONFIG_MIPS_MT_SMTC 232 - register_smp_ops(&msp_smtc_smp_ops); 233 - #endif 234 - } 235 - 236 - #ifdef CONFIG_PMCTWILED 237 - /* 238 - * Setup LED states before the subsys_initcall loads other 239 - * dependent drivers/modules. 240 - */ 241 - pmctwiled_setup(); 242 - #endif 233 + register_vsmp_smp_ops(); 243 234 }
-104
arch/mips/pmcs-msp71xx/msp_smtc.c
··· 1 - /* 2 - * MSP71xx Platform-specific hooks for SMP operation 3 - */ 4 - #include <linux/irq.h> 5 - #include <linux/init.h> 6 - 7 - #include <asm/mipsmtregs.h> 8 - #include <asm/mipsregs.h> 9 - #include <asm/smtc.h> 10 - #include <asm/smtc_ipi.h> 11 - 12 - /* VPE/SMP Prototype implements platform interfaces directly */ 13 - 14 - /* 15 - * Cause the specified action to be performed on a targeted "CPU" 16 - */ 17 - 18 - static void msp_smtc_send_ipi_single(int cpu, unsigned int action) 19 - { 20 - /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */ 21 - smtc_send_ipi(cpu, LINUX_SMP_IPI, action); 22 - } 23 - 24 - static void msp_smtc_send_ipi_mask(const struct cpumask *mask, 25 - unsigned int action) 26 - { 27 - unsigned int i; 28 - 29 - for_each_cpu(i, mask) 30 - msp_smtc_send_ipi_single(i, action); 31 - } 32 - 33 - /* 34 - * Post-config but pre-boot cleanup entry point 35 - */ 36 - static void msp_smtc_init_secondary(void) 37 - { 38 - int myvpe; 39 - 40 - /* Don't enable Malta I/O interrupts (IP2) for secondary VPEs */ 41 - myvpe = read_c0_tcbind() & TCBIND_CURVPE; 42 - if (myvpe > 0) 43 - change_c0_status(ST0_IM, STATUSF_IP0 | STATUSF_IP1 | 44 - STATUSF_IP6 | STATUSF_IP7); 45 - smtc_init_secondary(); 46 - } 47 - 48 - /* 49 - * Platform "CPU" startup hook 50 - */ 51 - static void msp_smtc_boot_secondary(int cpu, struct task_struct *idle) 52 - { 53 - smtc_boot_secondary(cpu, idle); 54 - } 55 - 56 - /* 57 - * SMP initialization finalization entry point 58 - */ 59 - static void msp_smtc_smp_finish(void) 60 - { 61 - smtc_smp_finish(); 62 - } 63 - 64 - /* 65 - * Hook for after all CPUs are online 66 - */ 67 - 68 - static void msp_smtc_cpus_done(void) 69 - { 70 - } 71 - 72 - /* 73 - * Platform SMP pre-initialization 74 - * 75 - * As noted above, we can assume a single CPU for now 76 - * but it may be multithreaded. 77 - */ 78 - 79 - static void __init msp_smtc_smp_setup(void) 80 - { 81 - /* 82 - * we won't get the definitive value until 83 - * we've run smtc_prepare_cpus later, but 84 - */ 85 - 86 - if (read_c0_config3() & (1 << 2)) 87 - smp_num_siblings = smtc_build_cpu_map(0); 88 - } 89 - 90 - static void __init msp_smtc_prepare_cpus(unsigned int max_cpus) 91 - { 92 - smtc_prepare_cpus(max_cpus); 93 - } 94 - 95 - struct plat_smp_ops msp_smtc_smp_ops = { 96 - .send_ipi_single = msp_smtc_send_ipi_single, 97 - .send_ipi_mask = msp_smtc_send_ipi_mask, 98 - .init_secondary = msp_smtc_init_secondary, 99 - .smp_finish = msp_smtc_smp_finish, 100 - .cpus_done = msp_smtc_cpus_done, 101 - .boot_secondary = msp_smtc_boot_secondary, 102 - .smp_setup = msp_smtc_smp_setup, 103 - .prepare_cpus = msp_smtc_prepare_cpus, 104 - };
-90
arch/mips/pmcs-msp71xx/msp_usb.c
··· 75 75 .resource = msp_usbhost0_resources, 76 76 }, 77 77 }; 78 - 79 - /* MSP7140/MSP82XX has two USB2 hosts. */ 80 - #ifdef CONFIG_MSP_HAS_DUAL_USB 81 - static u64 msp_usbhost1_dma_mask = 0xffffffffUL; 82 - 83 - static struct resource msp_usbhost1_resources[] = { 84 - [0] = { /* EHCI-HS operational and capabilities registers */ 85 - .start = MSP_USB1_HS_START, 86 - .end = MSP_USB1_HS_END, 87 - .flags = IORESOURCE_MEM, 88 - }, 89 - [1] = { 90 - .start = MSP_INT_USB, 91 - .end = MSP_INT_USB, 92 - .flags = IORESOURCE_IRQ, 93 - }, 94 - [2] = { /* MSBus-to-AMBA bridge register space */ 95 - .start = MSP_USB1_MAB_START, 96 - .end = MSP_USB1_MAB_END, 97 - .flags = IORESOURCE_MEM, 98 - }, 99 - [3] = { /* Identification and general hardware parameters */ 100 - .start = MSP_USB1_ID_START, 101 - .end = MSP_USB1_ID_END, 102 - .flags = IORESOURCE_MEM, 103 - }, 104 - }; 105 - 106 - static struct mspusb_device msp_usbhost1_device = { 107 - .dev = { 108 - .name = "pmcmsp-ehci", 109 - .id = 1, 110 - .dev = { 111 - .dma_mask = &msp_usbhost1_dma_mask, 112 - .coherent_dma_mask = 0xffffffffUL, 113 - }, 114 - .num_resources = ARRAY_SIZE(msp_usbhost1_resources), 115 - .resource = msp_usbhost1_resources, 116 - }, 117 - }; 118 - #endif /* CONFIG_MSP_HAS_DUAL_USB */ 119 78 #endif /* CONFIG_USB_EHCI_HCD */ 120 79 121 80 #if defined(CONFIG_USB_GADGET) ··· 116 157 .resource = msp_usbdev0_resources, 117 158 }, 118 159 }; 119 - 120 - #ifdef CONFIG_MSP_HAS_DUAL_USB 121 - static struct resource msp_usbdev1_resources[] = { 122 - [0] = { /* EHCI-HS operational and capabilities registers */ 123 - .start = MSP_USB1_HS_START, 124 - .end = MSP_USB1_HS_END, 125 - .flags = IORESOURCE_MEM, 126 - }, 127 - [1] = { 128 - .start = MSP_INT_USB, 129 - .end = MSP_INT_USB, 130 - .flags = IORESOURCE_IRQ, 131 - }, 132 - [2] = { /* MSBus-to-AMBA bridge register space */ 133 - .start = MSP_USB1_MAB_START, 134 - .end = MSP_USB1_MAB_END, 135 - .flags = IORESOURCE_MEM, 136 - }, 137 - [3] = { /* Identification and general hardware parameters */ 138 - .start = MSP_USB1_ID_START, 139 - .end = MSP_USB1_ID_END, 140 - .flags = IORESOURCE_MEM, 141 - }, 142 - }; 143 - 144 - /* This may need to be converted to a mspusb_device, too. */ 145 - static struct mspusb_device msp_usbdev1_device = { 146 - .dev = { 147 - .name = "msp71xx_udc", 148 - .id = 0, 149 - .dev = { 150 - .dma_mask = &msp_usbdev_dma_mask, 151 - .coherent_dma_mask = 0xffffffffUL, 152 - }, 153 - .num_resources = ARRAY_SIZE(msp_usbdev1_resources), 154 - .resource = msp_usbdev1_resources, 155 - }, 156 - }; 157 - 158 - #endif /* CONFIG_MSP_HAS_DUAL_USB */ 159 160 #endif /* CONFIG_USB_GADGET */ 160 161 161 162 static int __init msp_usb_setup(void) ··· 150 231 #if defined(CONFIG_USB_EHCI_HCD) 151 232 msp_devs[0] = &msp_usbhost0_device.dev; 152 233 ppfinit("platform add USB HOST done %s.\n", msp_devs[0]->name); 153 - #ifdef CONFIG_MSP_HAS_DUAL_USB 154 - msp_devs[1] = &msp_usbhost1_device.dev; 155 - ppfinit("platform add USB HOST done %s.\n", msp_devs[1]->name); 156 - #endif 157 234 #else 158 235 ppfinit("%s: echi_hcd not supported\n", __FILE__); 159 236 #endif /* CONFIG_USB_EHCI_HCD */ ··· 159 244 msp_devs[0] = &msp_usbdev0_device.dev; 160 245 ppfinit("platform add USB DEVICE done %s.\n" 161 246 , msp_devs[0]->name); 162 - #ifdef CONFIG_MSP_HAS_DUAL_USB 163 - msp_devs[1] = &msp_usbdev1_device.dev; 164 - ppfinit("platform add USB DEVICE done %s.\n" 165 - , msp_devs[1]->name); 166 - #endif 167 247 #else 168 248 ppfinit("%s: usb_gadget not supported\n", __FILE__); 169 249 #endif /* CONFIG_USB_GADGET */
-73
arch/mips/pnx833x/common/platform.c
··· 33 33 #include <linux/mtd/nand.h> 34 34 #include <linux/mtd/partitions.h> 35 35 36 - #ifdef CONFIG_I2C_PNX0105 37 - /* Until i2c driver available in kernel.*/ 38 - #include <linux/i2c-pnx0105.h> 39 - #endif 40 - 41 36 #include <irq.h> 42 37 #include <irq-mapping.h> 43 38 #include <pnx833x.h> ··· 128 133 .num_resources = ARRAY_SIZE(pnx833x_usb_ehci_resources), 129 134 .resource = pnx833x_usb_ehci_resources, 130 135 }; 131 - 132 - #ifdef CONFIG_I2C_PNX0105 133 - static struct resource pnx833x_i2c0_resources[] = { 134 - { 135 - .start = PNX833X_I2C0_PORTS_START, 136 - .end = PNX833X_I2C0_PORTS_END, 137 - .flags = IORESOURCE_MEM, 138 - }, 139 - { 140 - .start = PNX833X_PIC_I2C0_INT, 141 - .end = PNX833X_PIC_I2C0_INT, 142 - .flags = IORESOURCE_IRQ, 143 - }, 144 - }; 145 - 146 - static struct resource pnx833x_i2c1_resources[] = { 147 - { 148 - .start = PNX833X_I2C1_PORTS_START, 149 - .end = PNX833X_I2C1_PORTS_END, 150 - .flags = IORESOURCE_MEM, 151 - }, 152 - { 153 - .start = PNX833X_PIC_I2C1_INT, 154 - .end = PNX833X_PIC_I2C1_INT, 155 - .flags = IORESOURCE_IRQ, 156 - }, 157 - }; 158 - 159 - static struct i2c_pnx0105_dev pnx833x_i2c_dev[] = { 160 - { 161 - .base = PNX833X_I2C0_PORTS_START, 162 - .irq = -1, /* should be PNX833X_PIC_I2C0_INT but polling is faster */ 163 - .clock = 6, /* 0 == 400 kHz, 4 == 100 kHz(Maximum HDMI), 6 = 50kHz(Preferred HDCP) */ 164 - .bus_addr = 0, /* no slave support */ 165 - }, 166 - { 167 - .base = PNX833X_I2C1_PORTS_START, 168 - .irq = -1, /* on high freq, polling is faster */ 169 - /*.irq = PNX833X_PIC_I2C1_INT,*/ 170 - .clock = 4, /* 0 == 400 kHz, 4 == 100 kHz. 100 kHz seems a safe default for now */ 171 - .bus_addr = 0, /* no slave support */ 172 - }, 173 - }; 174 - 175 - static struct platform_device pnx833x_i2c0_device = { 176 - .name = "i2c-pnx0105", 177 - .id = 0, 178 - .dev = { 179 - .platform_data = &pnx833x_i2c_dev[0], 180 - }, 181 - .num_resources = ARRAY_SIZE(pnx833x_i2c0_resources), 182 - .resource = pnx833x_i2c0_resources, 183 - }; 184 - 185 - static struct platform_device pnx833x_i2c1_device = { 186 - .name = "i2c-pnx0105", 187 - .id = 1, 188 - .dev = { 189 - .platform_data = &pnx833x_i2c_dev[1], 190 - }, 191 - .num_resources = ARRAY_SIZE(pnx833x_i2c1_resources), 192 - .resource = pnx833x_i2c1_resources, 193 - }; 194 - #endif 195 136 196 137 static u64 ethernet_dmamask = DMA_BIT_MASK(32); 197 138 ··· 225 294 static struct platform_device *pnx833x_platform_devices[] __initdata = { 226 295 &pnx833x_uart_device, 227 296 &pnx833x_usb_ehci_device, 228 - #ifdef CONFIG_I2C_PNX0105 229 - &pnx833x_i2c0_device, 230 - &pnx833x_i2c1_device, 231 - #endif 232 297 &pnx833x_ethernet_device, 233 298 &pnx833x_sata_device, 234 299 &pnx833x_flash_nand,
+28 -14
arch/mips/sgi-ip22/ip22-gio.c
··· 19 19 } gio_name_table[] = { 20 20 { .name = "SGI Impact", .id = 0x10 }, 21 21 { .name = "Phobos G160", .id = 0x35 }, 22 + { .name = "Phobos G130", .id = 0x36 }, 23 + { .name = "Phobos G100", .id = 0x37 }, 24 + { .name = "Set Engineering GFE", .id = 0x38 }, 22 25 /* fake IDs */ 23 26 { .name = "SGI Newport", .id = 0x7e }, 24 27 { .name = "SGI GR2/GR3", .id = 0x7f }, ··· 296 293 * data matches 297 294 */ 298 295 ptr8 = (void *)CKSEG1ADDR(addr + 3); 299 - get_dbe(tmp8, ptr8); 296 + if (get_dbe(tmp8, ptr8)) { 297 + /* 298 + * 32bit access worked, but 8bit doesn't 299 + * so we don't see phantom reads on 300 + * a pipelined bus, but a real card which 301 + * doesn't support 8 bit reads 302 + */ 303 + *res = tmp32; 304 + return 1; 305 + } 300 306 ptr16 = (void *)CKSEG1ADDR(addr + 2); 301 307 get_dbe(tmp16, ptr16); 302 308 if (tmp8 == (tmp16 & 0xff) && ··· 336 324 } 337 325 338 326 339 - static void ip22_check_gio(int slotno, unsigned long addr) 327 + static void ip22_check_gio(int slotno, unsigned long addr, int irq) 340 328 { 341 329 const char *name = "Unknown"; 342 330 struct gio_device *gio_dev; ··· 350 338 else { 351 339 if (!ip22_gio_id(addr, &tmp)) { 352 340 /* 353 - * no GIO signature at start address of slot, but 354 - * Newport doesn't have one, so let's check usea 355 - * status register 341 + * no GIO signature at start address of slot 342 + * since Newport doesn't have one, we check if 343 + * user status register is readable 356 344 */ 357 345 if (ip22_gio_id(addr + NEWPORT_USTATUS_OFFS, &tmp)) 358 346 tmp = 0x7e; ··· 381 369 gio_dev->resource.start = addr; 382 370 gio_dev->resource.end = addr + 0x3fffff; 383 371 gio_dev->resource.flags = IORESOURCE_MEM; 372 + gio_dev->irq = irq; 384 373 dev_set_name(&gio_dev->dev, "%d", slotno); 385 374 gio_device_register(gio_dev); 386 375 } else ··· 421 408 request_resource(&iomem_resource, &gio_bus_resource); 422 409 printk(KERN_INFO "GIO: Probing bus...\n"); 423 410 424 - if (ip22_is_fullhouse() || 425 - !get_dbe(pbdma, (unsigned int *)&hpc3c1->pbdma[1])) { 426 - /* Indigo2 and ChallengeS */ 427 - ip22_check_gio(0, GIO_SLOT_GFX_BASE); 428 - ip22_check_gio(1, GIO_SLOT_EXP0_BASE); 411 + if (ip22_is_fullhouse()) { 412 + /* Indigo2 */ 413 + ip22_check_gio(0, GIO_SLOT_GFX_BASE, SGI_GIO_1_IRQ); 414 + ip22_check_gio(1, GIO_SLOT_EXP0_BASE, SGI_GIO_1_IRQ); 429 415 } else { 430 - /* Indy */ 431 - ip22_check_gio(0, GIO_SLOT_GFX_BASE); 432 - ip22_check_gio(1, GIO_SLOT_EXP0_BASE); 433 - ip22_check_gio(2, GIO_SLOT_EXP1_BASE); 416 + /* Indy/Challenge S */ 417 + if (get_dbe(pbdma, (unsigned int *)&hpc3c1->pbdma[1])) 418 + ip22_check_gio(0, GIO_SLOT_GFX_BASE, 419 + SGI_GIO_0_IRQ); 420 + ip22_check_gio(1, GIO_SLOT_EXP0_BASE, SGI_GIOEXP0_IRQ); 421 + ip22_check_gio(2, GIO_SLOT_EXP1_BASE, SGI_GIOEXP1_IRQ); 434 422 } 435 423 } else 436 424 device_unregister(&gio_bus);
+6 -1
arch/mips/sgi-ip22/ip22-int.c
··· 119 119 } else 120 120 irq = lc0msk_to_irqnr[mask]; 121 121 122 - /* if irq == 0, then the interrupt has already been cleared */ 122 + /* 123 + * workaround for INT2 bug; if irq == 0, INT2 has seen a fifo full 124 + * irq, but failed to latch it into status register 125 + */ 123 126 if (irq) 124 127 do_IRQ(irq); 128 + else 129 + do_IRQ(SGINT_LOCAL0 + 0); 125 130 } 126 131 127 132 static void indy_local1_irqdispatch(void)
-5
arch/mips/sgi-ip27/ip27-smp.c
··· 186 186 local_irq_enable(); 187 187 } 188 188 189 - static void __init ip27_cpus_done(void) 190 - { 191 - } 192 - 193 189 /* 194 190 * Launch a slave into smp_bootstrap(). It doesn't take an argument, and we 195 191 * set sp to the kernel stack of the newly created idle process, gp to the proc ··· 232 236 .send_ipi_mask = ip27_send_ipi_mask, 233 237 .init_secondary = ip27_init_secondary, 234 238 .smp_finish = ip27_smp_finish, 235 - .cpus_done = ip27_cpus_done, 236 239 .boot_secondary = ip27_boot_secondary, 237 240 .smp_setup = ip27_smp_setup, 238 241 .prepare_cpus = ip27_prepare_cpus,
-11
arch/mips/sibyte/bcm1480/irq.c
··· 347 347 unsigned int cpu = smp_processor_id(); 348 348 unsigned int pending; 349 349 350 - #ifdef CONFIG_SIBYTE_BCM1480_PROF 351 - /* Set compare to count to silence count/compare timer interrupts */ 352 - write_c0_compare(read_c0_count()); 353 - #endif 354 - 355 350 pending = read_c0_cause() & read_c0_status(); 356 - 357 - #ifdef CONFIG_SIBYTE_BCM1480_PROF 358 - if (pending & CAUSEF_IP7) /* Cpu performance counter interrupt */ 359 - sbprof_cpu_intr(); 360 - else 361 - #endif 362 351 363 352 if (pending & CAUSEF_IP4) 364 353 do_IRQ(K_BCM1480_INT_TIMER_0 + cpu);
-8
arch/mips/sibyte/bcm1480/smp.c
··· 115 115 } 116 116 117 117 /* 118 - * Final cleanup after all secondaries booted 119 - */ 120 - static void bcm1480_cpus_done(void) 121 - { 122 - } 123 - 124 - /* 125 118 * Setup the PC, SP, and GP of a secondary processor and start it 126 119 * running! 127 120 */ ··· 163 170 .send_ipi_mask = bcm1480_send_ipi_mask, 164 171 .init_secondary = bcm1480_init_secondary, 165 172 .smp_finish = bcm1480_smp_finish, 166 - .cpus_done = bcm1480_cpus_done, 167 173 .boot_secondary = bcm1480_boot_secondary, 168 174 .smp_setup = bcm1480_smp_setup, 169 175 .prepare_cpus = bcm1480_prepare_cpus,
-8
arch/mips/sibyte/sb1250/smp.c
··· 103 103 } 104 104 105 105 /* 106 - * Final cleanup after all secondaries booted 107 - */ 108 - static void sb1250_cpus_done(void) 109 - { 110 - } 111 - 112 - /* 113 106 * Setup the PC, SP, and GP of a secondary processor and start it 114 107 * running! 115 108 */ ··· 151 158 .send_ipi_mask = sb1250_send_ipi_mask, 152 159 .init_secondary = sb1250_init_secondary, 153 160 .smp_finish = sb1250_smp_finish, 154 - .cpus_done = sb1250_cpus_done, 155 161 .boot_secondary = sb1250_boot_secondary, 156 162 .smp_setup = sb1250_smp_setup, 157 163 .prepare_cpus = sb1250_prepare_cpus,
+2 -2
arch/mips/txx9/generic/setup.c
··· 309 309 txx9_board_vec = find_board_byname(str + 6); 310 310 continue; 311 311 } else if (strncmp(str, "masterclk=", 10) == 0) { 312 - unsigned long val; 313 - if (strict_strtoul(str + 10, 10, &val) == 0) 312 + unsigned int val; 313 + if (kstrtouint(str + 10, 10, &val) == 0) 314 314 txx9_master_clock = val; 315 315 continue; 316 316 } else if (strcmp(str, "icdisable") == 0) {
+5
drivers/cpuidle/Kconfig
··· 35 35 source "drivers/cpuidle/Kconfig.arm" 36 36 endmenu 37 37 38 + menu "MIPS CPU Idle Drivers" 39 + depends on MIPS 40 + source "drivers/cpuidle/Kconfig.mips" 41 + endmenu 42 + 38 43 menu "POWERPC CPU Idle Drivers" 39 44 depends on PPC 40 45 source "drivers/cpuidle/Kconfig.powerpc"
+17
drivers/cpuidle/Kconfig.mips
··· 1 + # 2 + # MIPS CPU Idle Drivers 3 + # 4 + config MIPS_CPS_CPUIDLE 5 + bool "CPU Idle driver for MIPS CPS platforms" 6 + depends on CPU_IDLE 7 + depends on SYS_SUPPORTS_MIPS_CPS 8 + select ARCH_NEEDS_CPU_IDLE_COUPLED if MIPS_MT 9 + select GENERIC_CLOCKEVENTS_BROADCAST if SMP 10 + select MIPS_CPS_PM 11 + default y 12 + help 13 + Select this option to enable processor idle state management 14 + through cpuidle for systems built around the MIPS Coherent 15 + Processing System (CPS) architecture. In order to make use of 16 + the deepest idle states you will need to ensure that you are 17 + also using the CONFIG_MIPS_CPS SMP implementation.
+4
drivers/cpuidle/Makefile
··· 18 18 obj-$(CONFIG_ARM_EXYNOS_CPUIDLE) += cpuidle-exynos.o 19 19 20 20 ############################################################################### 21 + # MIPS drivers 22 + obj-$(CONFIG_MIPS_CPS_CPUIDLE) += cpuidle-cps.o 23 + 24 + ############################################################################### 21 25 # POWERPC drivers 22 26 obj-$(CONFIG_PSERIES_CPUIDLE) += cpuidle-pseries.o 23 27 obj-$(CONFIG_POWERNV_CPUIDLE) += cpuidle-powernv.o
+186
drivers/cpuidle/cpuidle-cps.c
··· 1 + /* 2 + * Copyright (C) 2014 Imagination Technologies 3 + * Author: Paul Burton <paul.burton@imgtec.com> 4 + * 5 + * This program is free software; you can redistribute it and/or modify it 6 + * under the terms of the GNU General Public License as published by the 7 + * Free Software Foundation; either version 2 of the License, or (at your 8 + * option) any later version. 9 + */ 10 + 11 + #include <linux/cpu_pm.h> 12 + #include <linux/cpuidle.h> 13 + #include <linux/init.h> 14 + 15 + #include <asm/idle.h> 16 + #include <asm/pm-cps.h> 17 + 18 + /* Enumeration of the various idle states this driver may enter */ 19 + enum cps_idle_state { 20 + STATE_WAIT = 0, /* MIPS wait instruction, coherent */ 21 + STATE_NC_WAIT, /* MIPS wait instruction, non-coherent */ 22 + STATE_CLOCK_GATED, /* Core clock gated */ 23 + STATE_POWER_GATED, /* Core power gated */ 24 + STATE_COUNT 25 + }; 26 + 27 + static int cps_nc_enter(struct cpuidle_device *dev, 28 + struct cpuidle_driver *drv, int index) 29 + { 30 + enum cps_pm_state pm_state; 31 + int err; 32 + 33 + /* 34 + * At least one core must remain powered up & clocked in order for the 35 + * system to have any hope of functioning. 36 + * 37 + * TODO: don't treat core 0 specially, just prevent the final core 38 + * TODO: remap interrupt affinity temporarily 39 + */ 40 + if (!cpu_data[dev->cpu].core && (index > STATE_NC_WAIT)) 41 + index = STATE_NC_WAIT; 42 + 43 + /* Select the appropriate cps_pm_state */ 44 + switch (index) { 45 + case STATE_NC_WAIT: 46 + pm_state = CPS_PM_NC_WAIT; 47 + break; 48 + case STATE_CLOCK_GATED: 49 + pm_state = CPS_PM_CLOCK_GATED; 50 + break; 51 + case STATE_POWER_GATED: 52 + pm_state = CPS_PM_POWER_GATED; 53 + break; 54 + default: 55 + BUG(); 56 + return -EINVAL; 57 + } 58 + 59 + /* Notify listeners the CPU is about to power down */ 60 + if ((pm_state == CPS_PM_POWER_GATED) && cpu_pm_enter()) 61 + return -EINTR; 62 + 63 + /* Enter that state */ 64 + err = cps_pm_enter_state(pm_state); 65 + 66 + /* Notify listeners the CPU is back up */ 67 + if (pm_state == CPS_PM_POWER_GATED) 68 + cpu_pm_exit(); 69 + 70 + return err ?: index; 71 + } 72 + 73 + static struct cpuidle_driver cps_driver = { 74 + .name = "cpc_cpuidle", 75 + .owner = THIS_MODULE, 76 + .states = { 77 + [STATE_WAIT] = MIPS_CPUIDLE_WAIT_STATE, 78 + [STATE_NC_WAIT] = { 79 + .enter = cps_nc_enter, 80 + .exit_latency = 200, 81 + .target_residency = 450, 82 + .flags = CPUIDLE_FLAG_TIME_VALID, 83 + .name = "nc-wait", 84 + .desc = "non-coherent MIPS wait", 85 + }, 86 + [STATE_CLOCK_GATED] = { 87 + .enter = cps_nc_enter, 88 + .exit_latency = 300, 89 + .target_residency = 700, 90 + .flags = CPUIDLE_FLAG_TIME_VALID | 91 + CPUIDLE_FLAG_TIMER_STOP, 92 + .name = "clock-gated", 93 + .desc = "core clock gated", 94 + }, 95 + [STATE_POWER_GATED] = { 96 + .enter = cps_nc_enter, 97 + .exit_latency = 600, 98 + .target_residency = 1000, 99 + .flags = CPUIDLE_FLAG_TIME_VALID | 100 + CPUIDLE_FLAG_TIMER_STOP, 101 + .name = "power-gated", 102 + .desc = "core power gated", 103 + }, 104 + }, 105 + .state_count = STATE_COUNT, 106 + .safe_state_index = 0, 107 + }; 108 + 109 + static void __init cps_cpuidle_unregister(void) 110 + { 111 + int cpu; 112 + struct cpuidle_device *device; 113 + 114 + for_each_possible_cpu(cpu) { 115 + device = &per_cpu(cpuidle_dev, cpu); 116 + cpuidle_unregister_device(device); 117 + } 118 + 119 + cpuidle_unregister_driver(&cps_driver); 120 + } 121 + 122 + static int __init cps_cpuidle_init(void) 123 + { 124 + int err, cpu, core, i; 125 + struct cpuidle_device *device; 126 + 127 + /* Detect supported states */ 128 + if (!cps_pm_support_state(CPS_PM_POWER_GATED)) 129 + cps_driver.state_count = STATE_CLOCK_GATED + 1; 130 + if (!cps_pm_support_state(CPS_PM_CLOCK_GATED)) 131 + cps_driver.state_count = STATE_NC_WAIT + 1; 132 + if (!cps_pm_support_state(CPS_PM_NC_WAIT)) 133 + cps_driver.state_count = STATE_WAIT + 1; 134 + 135 + /* Inform the user if some states are unavailable */ 136 + if (cps_driver.state_count < STATE_COUNT) { 137 + pr_info("cpuidle-cps: limited to "); 138 + switch (cps_driver.state_count - 1) { 139 + case STATE_WAIT: 140 + pr_cont("coherent wait\n"); 141 + break; 142 + case STATE_NC_WAIT: 143 + pr_cont("non-coherent wait\n"); 144 + break; 145 + case STATE_CLOCK_GATED: 146 + pr_cont("clock gating\n"); 147 + break; 148 + } 149 + } 150 + 151 + /* 152 + * Set the coupled flag on the appropriate states if this system 153 + * requires it. 154 + */ 155 + if (coupled_coherence) 156 + for (i = STATE_NC_WAIT; i < cps_driver.state_count; i++) 157 + cps_driver.states[i].flags |= CPUIDLE_FLAG_COUPLED; 158 + 159 + err = cpuidle_register_driver(&cps_driver); 160 + if (err) { 161 + pr_err("Failed to register CPS cpuidle driver\n"); 162 + return err; 163 + } 164 + 165 + for_each_possible_cpu(cpu) { 166 + core = cpu_data[cpu].core; 167 + device = &per_cpu(cpuidle_dev, cpu); 168 + device->cpu = cpu; 169 + #ifdef CONFIG_MIPS_MT 170 + cpumask_copy(&device->coupled_cpus, &cpu_sibling_map[cpu]); 171 + #endif 172 + 173 + err = cpuidle_register_device(device); 174 + if (err) { 175 + pr_err("Failed to register CPU%d cpuidle device\n", 176 + cpu); 177 + goto err_out; 178 + } 179 + } 180 + 181 + return 0; 182 + err_out: 183 + cps_cpuidle_unregister(); 184 + return err; 185 + } 186 + device_initcall(cps_cpuidle_init);
-40
drivers/usb/host/ehci-pmcmsp.c
··· 68 68 69 69 /* set TWI GPIO USB_HOST_DEV pin high */ 70 70 gpio_direction_output(MSP_PIN_USB0_HOST_DEV, 1); 71 - #ifdef CONFIG_MSP_HAS_DUAL_USB 72 - gpio_direction_output(MSP_PIN_USB1_HOST_DEV, 1); 73 - #endif 74 71 } 75 72 76 73 /* called during probe() after chip reset completes */ ··· 245 248 usb_put_hcd(hcd); 246 249 } 247 250 248 - #ifdef CONFIG_MSP_HAS_DUAL_USB 249 - /* 250 - * Wrapper around the main ehci_irq. Since both USB host controllers are 251 - * sharing the same IRQ, need to first determine whether we're the intended 252 - * recipient of this interrupt. 253 - */ 254 - static irqreturn_t ehci_msp_irq(struct usb_hcd *hcd) 255 - { 256 - u32 int_src; 257 - struct device *dev = hcd->self.controller; 258 - struct platform_device *pdev; 259 - struct mspusb_device *mdev; 260 - struct ehci_hcd *ehci = hcd_to_ehci(hcd); 261 - /* need to reverse-map a couple of containers to get our device */ 262 - pdev = to_platform_device(dev); 263 - mdev = to_mspusb_device(pdev); 264 - 265 - /* Check to see if this interrupt is for this host controller */ 266 - int_src = ehci_readl(ehci, &mdev->mab_regs->int_stat); 267 - if (int_src & (1 << pdev->id)) 268 - return ehci_irq(hcd); 269 - 270 - /* Not for this device */ 271 - return IRQ_NONE; 272 - } 273 - #endif /* DUAL_USB */ 274 - 275 251 static const struct hc_driver ehci_msp_hc_driver = { 276 252 .description = hcd_name, 277 253 .product_desc = "PMC MSP EHCI", ··· 253 283 /* 254 284 * generic hardware linkage 255 285 */ 256 - #ifdef CONFIG_MSP_HAS_DUAL_USB 257 - .irq = ehci_msp_irq, 258 - #else 259 286 .irq = ehci_irq, 260 - #endif 261 287 .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, 262 288 263 289 /* ··· 300 334 return -ENODEV; 301 335 302 336 gpio_request(MSP_PIN_USB0_HOST_DEV, "USB0_HOST_DEV_GPIO"); 303 - #ifdef CONFIG_MSP_HAS_DUAL_USB 304 - gpio_request(MSP_PIN_USB1_HOST_DEV, "USB1_HOST_DEV_GPIO"); 305 - #endif 306 337 307 338 ret = usb_hcd_msp_probe(&ehci_msp_hc_driver, pdev); 308 339 ··· 314 351 315 352 /* free TWI GPIO USB_HOST_DEV pin */ 316 353 gpio_free(MSP_PIN_USB0_HOST_DEV); 317 - #ifdef CONFIG_MSP_HAS_DUAL_USB 318 - gpio_free(MSP_PIN_USB1_HOST_DEV); 319 - #endif 320 354 321 355 return 0; 322 356 }
+1
include/linux/cpuidle.h
··· 84 84 }; 85 85 86 86 DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); 87 + DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev); 87 88 88 89 /** 89 90 * cpuidle_get_last_residency - retrieves the last state's residency time
+3
include/uapi/linux/kvm_para.h
··· 20 20 #define KVM_HC_FEATURES 3 21 21 #define KVM_HC_PPC_MAP_MAGIC_PAGE 4 22 22 #define KVM_HC_KICK_CPU 5 23 + #define KVM_HC_MIPS_GET_CLOCK_FREQ 6 24 + #define KVM_HC_MIPS_EXIT_VM 7 25 + #define KVM_HC_MIPS_CONSOLE_OUTPUT 8 23 26 24 27 /* 25 28 * hypercalls use architecture specific