Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus

Pull MIPS updates from Ralf Baechle:
"The MIPS bits for 3.8. This also includes a bunch fixes that were
sitting in the linux-mips.org git tree for a long time. This pull
request contains updates to several OCTEON drivers and the board
support code for BCM47XX, BCM63XX, XLP, XLR, XLS, lantiq, Loongson1B,
updates to the SSB bus support, MIPS kexec code and adds support for
kdump.

When pulling this, there are two expected merge conflicts in
include/linux/bcma/bcma_driver_chipcommon.h which are trivial to
resolve, just remove the conflict markers and keep both alternatives."

* 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus: (90 commits)
MIPS: PMC-Sierra Yosemite: Remove support.
VIDEO: Newport Fix console crashes
MIPS: wrppmc: Fix build of PCI code.
MIPS: IP22/IP28: Fix build of EISA code.
MIPS: RB532: Fix build of prom code.
MIPS: PowerTV: Fix build.
MIPS: IP27: Correct fucked grammar in ops-bridge.c
MIPS: Highmem: Fix build error if CONFIG_DEBUG_HIGHMEM is disabled
MIPS: Fix potencial corruption
MIPS: Fix for warning from FPU emulation code
MIPS: Handle COP3 Unusable exception as COP1X for FP emulation
MIPS: Fix poweroff failure when HOTPLUG_CPU configured.
MIPS: MT: Fix build with CONFIG_UIDGID_STRICT_TYPE_CHECKS=y
MIPS: Remove unused smvp.h
MIPS/EDAC: Improve OCTEON EDAC support.
MIPS: OCTEON: Add definitions for OCTEON memory contoller registers.
MIPS: OCTEON: Add OCTEON family definitions to octeon-model.h
ata: pata_octeon_cf: Use correct byte order for DMA in when built little-endian.
MIPS/OCTEON/ata: Convert pata_octeon_cf.c to use device tree.
MIPS: Remove usage of CEVT_R4K_LIB config option.
...

+9297 -4539
+9
MAINTAINERS
··· 2751 2751 S: Maintained 2752 2752 F: drivers/edac/amd64_edac* 2753 2753 2754 + EDAC-CAVIUM 2755 + M: Ralf Baechle <ralf@linux-mips.org> 2756 + M: David Daney <david.daney@cavium.com> 2757 + L: linux-edac@vger.kernel.org 2758 + L: linux-mips@linux-mips.org 2759 + W: bluesmoke.sourceforge.net 2760 + S: Supported 2761 + F: drivers/edac/octeon_edac* 2762 + 2754 2763 EDAC-E752X 2755 2764 M: Mark Gross <mark.gross@intel.com> 2756 2765 M: Doug Thompson <dougthompson@xmission.com>
+65 -76
arch/mips/Kconfig
··· 19 19 select HAVE_KRETPROBES 20 20 select HAVE_DEBUG_KMEMLEAK 21 21 select ARCH_BINFMT_ELF_RANDOMIZE_PIE 22 + select HAVE_ARCH_TRANSPARENT_HUGEPAGE 22 23 select RTC_LIB if !MACH_LOONGSON 23 24 select GENERIC_ATOMIC64 if !64BIT 24 25 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE ··· 56 55 config MIPS_ALCHEMY 57 56 bool "Alchemy processor based machines" 58 57 select 64BIT_PHYS_ADDR 59 - select CEVT_R4K_LIB 60 - select CSRC_R4K_LIB 58 + select CEVT_R4K 59 + select CSRC_R4K 61 60 select IRQ_CPU 62 61 select SYS_HAS_CPU_MIPS32_R1 63 62 select SYS_SUPPORTS_32BIT_KERNEL ··· 108 107 109 108 config BCM47XX 110 109 bool "Broadcom BCM47XX based boards" 110 + select ARCH_WANT_OPTIONAL_GPIOLIB 111 111 select CEVT_R4K 112 112 select CSRC_R4K 113 113 select DMA_NONCOHERENT 114 + select FW_CFE 114 115 select HW_HAS_PCI 115 116 select IRQ_CPU 116 117 select SYS_SUPPORTS_32BIT_KERNEL 117 118 select SYS_SUPPORTS_LITTLE_ENDIAN 118 - select GENERIC_GPIO 119 119 select SYS_HAS_EARLY_PRINTK 120 - select CFE 121 120 help 122 121 Support for BCM47XX based boards 123 122 ··· 194 193 195 194 config MACH_JAZZ 196 195 bool "Jazz family of machines" 197 - select ARC 198 - select ARC32 196 + select FW_ARC 197 + select FW_ARC32 199 198 select ARCH_MAY_HAVE_PC_FDC 200 199 select CEVT_R4K 201 200 select CSRC_R4K ··· 418 417 of integrated peripherals, interfaces and DSPs in addition to 419 418 a variety of MIPS cores. 420 419 421 - config PMC_YOSEMITE 422 - bool "PMC-Sierra Yosemite eval board" 423 - select CEVT_R4K 424 - select CSRC_R4K 425 - select DMA_COHERENT 426 - select HW_HAS_PCI 427 - select IRQ_CPU 428 - select IRQ_CPU_RM7K 429 - select IRQ_CPU_RM9K 430 - select SWAP_IO_SPACE 431 - select SYS_HAS_CPU_RM9000 432 - select SYS_HAS_EARLY_PRINTK 433 - select SYS_SUPPORTS_32BIT_KERNEL 434 - select SYS_SUPPORTS_64BIT_KERNEL 435 - select SYS_SUPPORTS_BIG_ENDIAN 436 - select SYS_SUPPORTS_HIGHMEM 437 - select SYS_SUPPORTS_SMP 438 - help 439 - Yosemite is an evaluation board for the RM9000x2 processor 440 - manufactured by PMC-Sierra. 441 - 442 420 config POWERTV 443 421 bool "Cisco PowerTV" 444 422 select BOOT_ELF32 ··· 438 458 439 459 config SGI_IP22 440 460 bool "SGI IP22 (Indy/Indigo2)" 441 - select ARC 442 - select ARC32 461 + select FW_ARC 462 + select FW_ARC32 443 463 select BOOT_ELF32 444 464 select CEVT_R4K 445 465 select CSRC_R4K ··· 478 498 479 499 config SGI_IP27 480 500 bool "SGI IP27 (Origin200/2000)" 481 - select ARC 482 - select ARC64 501 + select FW_ARC 502 + select FW_ARC64 483 503 select BOOT_ELF64 484 504 select DEFAULT_SGI_PARTITION 485 505 select DMA_COHERENT ··· 499 519 config SGI_IP28 500 520 bool "SGI IP28 (Indigo2 R10k) (EXPERIMENTAL)" 501 521 depends on EXPERIMENTAL 502 - select ARC 503 - select ARC64 522 + select FW_ARC 523 + select FW_ARC64 504 524 select BOOT_ELF64 505 525 select CEVT_R4K 506 526 select CSRC_R4K ··· 535 555 536 556 config SGI_IP32 537 557 bool "SGI IP32 (O2)" 538 - select ARC 539 - select ARC32 558 + select FW_ARC 559 + select FW_ARC32 540 560 select BOOT_ELF32 541 561 select CEVT_R4K 542 562 select CSRC_R4K ··· 654 674 655 675 config SNI_RM 656 676 bool "SNI RM200/300/400" 657 - select ARC if CPU_LITTLE_ENDIAN 658 - select ARC32 if CPU_LITTLE_ENDIAN 677 + select FW_ARC if CPU_LITTLE_ENDIAN 678 + select FW_ARC32 if CPU_LITTLE_ENDIAN 659 679 select SNIPROM if CPU_BIG_ENDIAN 660 680 select ARCH_MAY_HAVE_PC_FDC 661 681 select BOOT_ELF32 ··· 756 776 select DMA_COHERENT 757 777 select SYS_SUPPORTS_64BIT_KERNEL 758 778 select SYS_SUPPORTS_BIG_ENDIAN 779 + select EDAC_SUPPORT 759 780 select SYS_SUPPORTS_HOTPLUG_CPU 760 781 select SYS_HAS_EARLY_PRINTK 761 782 select SYS_HAS_CPU_CAVIUM_OCTEON ··· 800 819 select CSRC_R4K 801 820 select IRQ_CPU 802 821 select ARCH_SUPPORTS_MSI 803 - select ZONE_DMA if 64BIT 822 + select ZONE_DMA32 if 64BIT 804 823 select SYNC_R4K 805 824 select SYS_HAS_EARLY_PRINTK 806 825 select USB_ARCH_HAS_OHCI if USB_SUPPORT ··· 828 847 select CEVT_R4K 829 848 select CSRC_R4K 830 849 select IRQ_CPU 831 - select ZONE_DMA if 64BIT 850 + select ZONE_DMA32 if 64BIT 832 851 select SYNC_R4K 833 852 select SYS_HAS_EARLY_PRINTK 834 853 select USE_OF ··· 889 908 # 890 909 # Select some configuration options automatically based on user selections. 891 910 # 892 - config ARC 911 + config FW_ARC 893 912 bool 894 913 895 914 config ARCH_MAY_HAVE_PC_FDC ··· 907 926 config CEVT_GT641XX 908 927 bool 909 928 910 - config CEVT_R4K_LIB 911 - bool 912 - 913 929 config CEVT_R4K 914 - select CEVT_R4K_LIB 915 930 bool 916 931 917 932 config CEVT_SB1250 ··· 925 948 config CSRC_POWERTV 926 949 bool 927 950 928 - config CSRC_R4K_LIB 929 - bool 930 - 931 951 config CSRC_R4K 932 - select CSRC_R4K_LIB 933 952 bool 934 953 935 954 config CSRC_SB1250 ··· 936 963 select ARCH_REQUIRE_GPIOLIB 937 964 bool 938 965 939 - config CFE 966 + config FW_CFE 940 967 bool 941 968 942 969 config ARCH_DMA_ADDR_T_64BIT ··· 1052 1079 depends on CPU_SUPPORTS_HUGEPAGES && 64BIT 1053 1080 default y 1054 1081 1082 + config MIPS_HUGE_TLB_SUPPORT 1083 + def_bool HUGETLB_PAGE || TRANSPARENT_HUGEPAGE 1084 + 1055 1085 config IRQ_CPU 1056 1086 bool 1057 1087 1058 1088 config IRQ_CPU_RM7K 1059 - bool 1060 - 1061 - config IRQ_CPU_RM9K 1062 1089 bool 1063 1090 1064 1091 config IRQ_MSP_SLP ··· 1084 1111 1085 1112 config NO_EXCEPT_FILL 1086 1113 bool 1087 - 1088 - config MIPS_RM9122 1089 - bool 1090 - select SERIAL_RM9000 1091 1114 1092 1115 config SOC_EMMA2RH 1093 1116 bool ··· 1130 1161 config SWAP_IO_SPACE 1131 1162 bool 1132 1163 1133 - config SERIAL_RM9000 1134 - bool 1135 - 1136 1164 config SGI_HAS_INDYDOG 1137 1165 bool 1138 1166 ··· 1151 1185 config DEFAULT_SGI_PARTITION 1152 1186 bool 1153 1187 1154 - config ARC32 1188 + config FW_ARC32 1155 1189 bool 1156 1190 1157 1191 config SNIPROM ··· 1184 1218 depends on MACH_JAZZ || SNI_RM || SGI_IP22 || SGI_IP28 || SGI_IP32 1185 1219 default y 1186 1220 1187 - config ARC64 1221 + config FW_ARC64 1188 1222 bool 1189 1223 1190 1224 config BOOT_ELF64 ··· 1336 1370 depends on SYS_HAS_CPU_R4X00 1337 1371 select CPU_SUPPORTS_32BIT_KERNEL 1338 1372 select CPU_SUPPORTS_64BIT_KERNEL 1373 + select CPU_SUPPORTS_HUGEPAGES 1339 1374 help 1340 1375 MIPS Technologies R4000-series processors other than 4300, including 1341 1376 the R4000, R4400, R4600, and 4700. ··· 1347 1380 select CPU_HAS_PREFETCH 1348 1381 select CPU_SUPPORTS_32BIT_KERNEL 1349 1382 select CPU_SUPPORTS_64BIT_KERNEL 1383 + select CPU_SUPPORTS_HUGEPAGES 1350 1384 1351 1385 config CPU_R5000 1352 1386 bool "R5000" 1353 1387 depends on SYS_HAS_CPU_R5000 1354 1388 select CPU_SUPPORTS_32BIT_KERNEL 1355 1389 select CPU_SUPPORTS_64BIT_KERNEL 1390 + select CPU_SUPPORTS_HUGEPAGES 1356 1391 help 1357 1392 MIPS Technologies R5000-series processors other than the Nevada. 1358 1393 ··· 1363 1394 depends on SYS_HAS_CPU_R5432 1364 1395 select CPU_SUPPORTS_32BIT_KERNEL 1365 1396 select CPU_SUPPORTS_64BIT_KERNEL 1397 + select CPU_SUPPORTS_HUGEPAGES 1366 1398 1367 1399 config CPU_R5500 1368 1400 bool "R5500" ··· 1389 1419 depends on SYS_HAS_CPU_NEVADA 1390 1420 select CPU_SUPPORTS_32BIT_KERNEL 1391 1421 select CPU_SUPPORTS_64BIT_KERNEL 1422 + select CPU_SUPPORTS_HUGEPAGES 1392 1423 help 1393 1424 QED / PMC-Sierra RM52xx-series ("Nevada") processors. 1394 1425 ··· 1410 1439 select CPU_SUPPORTS_32BIT_KERNEL 1411 1440 select CPU_SUPPORTS_64BIT_KERNEL 1412 1441 select CPU_SUPPORTS_HIGHMEM 1442 + select CPU_SUPPORTS_HUGEPAGES 1413 1443 help 1414 1444 MIPS Technologies R10000-series processors. 1415 1445 ··· 1421 1449 select CPU_SUPPORTS_32BIT_KERNEL 1422 1450 select CPU_SUPPORTS_64BIT_KERNEL 1423 1451 select CPU_SUPPORTS_HIGHMEM 1424 - 1425 - config CPU_RM9000 1426 - bool "RM9000" 1427 - depends on SYS_HAS_CPU_RM9000 1428 - select CPU_HAS_PREFETCH 1429 - select CPU_SUPPORTS_32BIT_KERNEL 1430 - select CPU_SUPPORTS_64BIT_KERNEL 1431 - select CPU_SUPPORTS_HIGHMEM 1432 - select WEAK_ORDERING 1452 + select CPU_SUPPORTS_HUGEPAGES 1433 1453 1434 1454 config CPU_SB1 1435 1455 bool "SB1" ··· 1429 1465 select CPU_SUPPORTS_32BIT_KERNEL 1430 1466 select CPU_SUPPORTS_64BIT_KERNEL 1431 1467 select CPU_SUPPORTS_HIGHMEM 1468 + select CPU_SUPPORTS_HUGEPAGES 1432 1469 select WEAK_ORDERING 1433 1470 1434 1471 config CPU_CAVIUM_OCTEON ··· 1493 1528 select CPU_SUPPORTS_32BIT_KERNEL 1494 1529 select CPU_SUPPORTS_64BIT_KERNEL 1495 1530 select CPU_SUPPORTS_HIGHMEM 1531 + select CPU_SUPPORTS_HUGEPAGES 1496 1532 select WEAK_ORDERING 1497 1533 select WEAK_REORDERING_BEYOND_LLSC 1498 - select CPU_SUPPORTS_HUGEPAGES 1499 1534 help 1500 1535 Netlogic Microsystems XLR/XLS processors. 1501 1536 ··· 1509 1544 select WEAK_ORDERING 1510 1545 select WEAK_REORDERING_BEYOND_LLSC 1511 1546 select CPU_HAS_PREFETCH 1547 + select CPU_MIPSR2 1512 1548 help 1513 1549 Netlogic Microsystems XLP processors. 1514 1550 endchoice ··· 1557 1591 select CPU_SUPPORTS_32BIT_KERNEL 1558 1592 select CPU_SUPPORTS_64BIT_KERNEL 1559 1593 select CPU_SUPPORTS_HIGHMEM 1594 + select CPU_SUPPORTS_HUGEPAGES 1560 1595 1561 1596 config CPU_LOONGSON1 1562 1597 bool ··· 1642 1675 config SYS_HAS_CPU_RM7000 1643 1676 bool 1644 1677 1645 - config SYS_HAS_CPU_RM9000 1646 - bool 1647 - 1648 1678 config SYS_HAS_CPU_SB1 1649 1679 bool 1650 1680 ··· 1721 1757 bool 1722 1758 config MIPS_PGD_C0_CONTEXT 1723 1759 bool 1724 - default y if 64BIT && CPU_MIPSR2 1760 + default y if 64BIT && CPU_MIPSR2 && !CPU_XLP 1725 1761 1726 1762 # 1727 1763 # Set to y for ptrace access to watch registers. ··· 2152 2188 2153 2189 config HW_PERF_EVENTS 2154 2190 bool "Enable hardware performance counter support for perf events" 2155 - depends on PERF_EVENTS && !MIPS_MT_SMTC && OPROFILE=n && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON) 2191 + depends on PERF_EVENTS && !MIPS_MT_SMTC && OPROFILE=n && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON || CPU_XLP) 2156 2192 default y 2157 2193 help 2158 2194 Enable hardware performance counter support for perf events. If ··· 2329 2365 initially work for you. It may help to enable device hotplugging 2330 2366 support. As of this writing the exact hardware interface is 2331 2367 strongly in flux, so no good recommendation can be made. 2368 + 2369 + config CRASH_DUMP 2370 + bool "Kernel crash dumps" 2371 + help 2372 + Generate crash dump after being started by kexec. 2373 + This should be normally only set in special crash dump kernels 2374 + which are loaded in the main kernel with kexec-tools into 2375 + a specially reserved region and then later executed after 2376 + a crash by kdump/kexec. The crash dump kernel must be compiled 2377 + to a memory address not used by the main kernel or firmware using 2378 + PHYSICAL_START. 2379 + 2380 + config PHYSICAL_START 2381 + hex "Physical address where the kernel is loaded" 2382 + default "0xffffffff84000000" if 64BIT 2383 + default "0x84000000" if 32BIT 2384 + depends on CRASH_DUMP 2385 + help 2386 + This gives the CKSEG0 or KSEG0 address where the kernel is loaded. 2387 + If you plan to use kernel for capturing the crash dump change 2388 + this value to start of the reserved region (the "X" value as 2389 + specified in the "crashkernel=YM@XM" command line boot parameter 2390 + passed to the panic-ed kernel). 2332 2391 2333 2392 config SECCOMP 2334 2393 bool "Enable seccomp to safely compute untrusted bytecode" ··· 2558 2571 source "net/Kconfig" 2559 2572 2560 2573 source "drivers/Kconfig" 2574 + 2575 + source "drivers/firmware/Kconfig" 2561 2576 2562 2577 source "fs/Kconfig" 2563 2578
+7 -5
arch/mips/Makefile
··· 145 145 -Wa,--trap 146 146 cflags-$(CONFIG_CPU_RM7000) += $(call cc-option,-march=rm7000,-march=r5000) \ 147 147 -Wa,--trap 148 - cflags-$(CONFIG_CPU_RM9000) += $(call cc-option,-march=rm9000,-march=r5000) \ 149 - -Wa,--trap 150 148 cflags-$(CONFIG_CPU_SB1) += $(call cc-option,-march=sb1,-march=r5000) \ 151 149 -Wa,--trap 152 150 cflags-$(CONFIG_CPU_R8000) += -march=r8000 -Wa,--trap ··· 171 173 # 172 174 # Firmware support 173 175 # 174 - libs-$(CONFIG_ARC) += arch/mips/fw/arc/ 175 - libs-$(CONFIG_CFE) += arch/mips/fw/cfe/ 176 - libs-$(CONFIG_SNIPROM) += arch/mips/fw/sni/ 176 + libs-$(CONFIG_FW_ARC) += arch/mips/fw/arc/ 177 + libs-$(CONFIG_FW_CFE) += arch/mips/fw/cfe/ 178 + libs-$(CONFIG_FW_SNIPROM) += arch/mips/fw/sni/ 177 179 libs-y += arch/mips/fw/lib/ 178 180 179 181 # ··· 189 191 # Board-dependent options and extra files 190 192 # 191 193 include $(srctree)/arch/mips/Kbuild.platforms 194 + 195 + ifdef CONFIG_PHYSICAL_START 196 + load-y = $(CONFIG_PHYSICAL_START) 197 + endif 192 198 193 199 cflags-y += -I$(srctree)/arch/mips/include/asm/mach-generic 194 200 drivers-$(CONFIG_PCI) += arch/mips/pci/
+3
arch/mips/ar7/platform.c
··· 202 202 .end = 0x107fffff, 203 203 }; 204 204 205 + static const char *ar7_probe_types[] = { "ar7part", NULL }; 206 + 205 207 static struct physmap_flash_data physmap_flash_data = { 206 208 .width = 2, 209 + .part_probe_types = ar7_probe_types, 207 210 }; 208 211 209 212 static struct platform_device physmap_flash = {
+2
arch/mips/bcm47xx/Kconfig
··· 9 9 select SSB_EMBEDDED 10 10 select SSB_B43_PCI_BRIDGE if PCI 11 11 select SSB_PCICORE_HOSTMODE if PCI 12 + select SSB_DRIVER_GPIO 12 13 default y 13 14 help 14 15 Add support for old Broadcom BCM47xx boards with Sonics Silicon Backplane support. ··· 24 23 select BCMA_DRIVER_MIPS 25 24 select BCMA_HOST_PCI if PCI 26 25 select BCMA_DRIVER_PCI_HOSTMODE if PCI 26 + select BCMA_DRIVER_GPIO 27 27 default y 28 28 help 29 29 Add support for new Broadcom BCM47xx boards with Broadcom specific Advanced Microcontroller Bus.
+1 -1
arch/mips/bcm47xx/Makefile
··· 3 3 # under Linux. 4 4 # 5 5 6 - obj-y += gpio.o irq.o nvram.o prom.o serial.o setup.o time.o sprom.o 6 + obj-y += irq.o nvram.o prom.o serial.o setup.o time.o sprom.o 7 7 obj-$(CONFIG_BCM47XX_SSB) += wgt634u.o
-102
arch/mips/bcm47xx/gpio.c
··· 1 - /* 2 - * This file is subject to the terms and conditions of the GNU General Public 3 - * License. See the file "COPYING" in the main directory of this archive 4 - * for more details. 5 - * 6 - * Copyright (C) 2007 Aurelien Jarno <aurelien@aurel32.net> 7 - */ 8 - 9 - #include <linux/export.h> 10 - #include <linux/ssb/ssb.h> 11 - #include <linux/ssb/ssb_driver_chipcommon.h> 12 - #include <linux/ssb/ssb_driver_extif.h> 13 - #include <asm/mach-bcm47xx/bcm47xx.h> 14 - #include <asm/mach-bcm47xx/gpio.h> 15 - 16 - #if (BCM47XX_CHIPCO_GPIO_LINES > BCM47XX_EXTIF_GPIO_LINES) 17 - static DECLARE_BITMAP(gpio_in_use, BCM47XX_CHIPCO_GPIO_LINES); 18 - #else 19 - static DECLARE_BITMAP(gpio_in_use, BCM47XX_EXTIF_GPIO_LINES); 20 - #endif 21 - 22 - int gpio_request(unsigned gpio, const char *tag) 23 - { 24 - switch (bcm47xx_bus_type) { 25 - #ifdef CONFIG_BCM47XX_SSB 26 - case BCM47XX_BUS_TYPE_SSB: 27 - if (ssb_chipco_available(&bcm47xx_bus.ssb.chipco) && 28 - ((unsigned)gpio >= BCM47XX_CHIPCO_GPIO_LINES)) 29 - return -EINVAL; 30 - 31 - if (ssb_extif_available(&bcm47xx_bus.ssb.extif) && 32 - ((unsigned)gpio >= BCM47XX_EXTIF_GPIO_LINES)) 33 - return -EINVAL; 34 - 35 - if (test_and_set_bit(gpio, gpio_in_use)) 36 - return -EBUSY; 37 - 38 - return 0; 39 - #endif 40 - #ifdef CONFIG_BCM47XX_BCMA 41 - case BCM47XX_BUS_TYPE_BCMA: 42 - if (gpio >= BCM47XX_CHIPCO_GPIO_LINES) 43 - return -EINVAL; 44 - 45 - if (test_and_set_bit(gpio, gpio_in_use)) 46 - return -EBUSY; 47 - 48 - return 0; 49 - #endif 50 - } 51 - return -EINVAL; 52 - } 53 - EXPORT_SYMBOL(gpio_request); 54 - 55 - void gpio_free(unsigned gpio) 56 - { 57 - switch (bcm47xx_bus_type) { 58 - #ifdef CONFIG_BCM47XX_SSB 59 - case BCM47XX_BUS_TYPE_SSB: 60 - if (ssb_chipco_available(&bcm47xx_bus.ssb.chipco) && 61 - ((unsigned)gpio >= BCM47XX_CHIPCO_GPIO_LINES)) 62 - return; 63 - 64 - if (ssb_extif_available(&bcm47xx_bus.ssb.extif) && 65 - ((unsigned)gpio >= BCM47XX_EXTIF_GPIO_LINES)) 66 - return; 67 - 68 - clear_bit(gpio, gpio_in_use); 69 - return; 70 - #endif 71 - #ifdef CONFIG_BCM47XX_BCMA 72 - case BCM47XX_BUS_TYPE_BCMA: 73 - if (gpio >= BCM47XX_CHIPCO_GPIO_LINES) 74 - return; 75 - 76 - clear_bit(gpio, gpio_in_use); 77 - return; 78 - #endif 79 - } 80 - } 81 - EXPORT_SYMBOL(gpio_free); 82 - 83 - int gpio_to_irq(unsigned gpio) 84 - { 85 - switch (bcm47xx_bus_type) { 86 - #ifdef CONFIG_BCM47XX_SSB 87 - case BCM47XX_BUS_TYPE_SSB: 88 - if (ssb_chipco_available(&bcm47xx_bus.ssb.chipco)) 89 - return ssb_mips_irq(bcm47xx_bus.ssb.chipco.dev) + 2; 90 - else if (ssb_extif_available(&bcm47xx_bus.ssb.extif)) 91 - return ssb_mips_irq(bcm47xx_bus.ssb.extif.dev) + 2; 92 - else 93 - return -EINVAL; 94 - #endif 95 - #ifdef CONFIG_BCM47XX_BCMA 96 - case BCM47XX_BUS_TYPE_BCMA: 97 - return bcma_core_mips_irq(bcm47xx_bus.bcma.bus.drv_cc.core) + 2; 98 - #endif 99 - } 100 - return -EINVAL; 101 - } 102 - EXPORT_SYMBOL_GPL(gpio_to_irq);
+16 -4
arch/mips/bcm47xx/prom.c
··· 1 1 /* 2 2 * Copyright (C) 2004 Florian Schirmer <jolt@tuxbox.org> 3 3 * Copyright (C) 2007 Aurelien Jarno <aurelien@aurel32.net> 4 + * Copyright (C) 2010-2012 Hauke Mehrtens <hauke@hauke-m.de> 4 5 * 5 6 * This program is free software; you can redistribute it and/or modify it 6 7 * under the terms of the GNU General Public License as published by the ··· 28 27 #include <linux/types.h> 29 28 #include <linux/kernel.h> 30 29 #include <linux/spinlock.h> 30 + #include <linux/smp.h> 31 31 #include <asm/bootinfo.h> 32 32 #include <asm/fw/cfe/cfe_api.h> 33 33 #include <asm/fw/cfe/cfe_error.h> ··· 129 127 { 130 128 unsigned long mem; 131 129 unsigned long max; 130 + unsigned long off; 131 + struct cpuinfo_mips *c = &current_cpu_data; 132 132 133 133 /* Figure out memory size by finding aliases. 134 134 * ··· 147 143 * max contains the biggest possible address supported by the platform. 148 144 * If the method wants to try something above we assume 128MB ram. 149 145 */ 150 - max = ((unsigned long)(prom_init) | ((128 << 20) - 1)); 146 + off = (unsigned long)prom_init; 147 + max = off | ((128 << 20) - 1); 151 148 for (mem = (1 << 20); mem < (128 << 20); mem += (1 << 20)) { 152 - if (((unsigned long)(prom_init) + mem) > max) { 149 + if ((off + mem) > max) { 153 150 mem = (128 << 20); 154 151 printk(KERN_DEBUG "assume 128MB RAM\n"); 155 152 break; 156 153 } 157 - if (*(unsigned long *)((unsigned long)(prom_init) + mem) == 158 - *(unsigned long *)(prom_init)) 154 + if (!memcmp(prom_init, prom_init + mem, 32)) 159 155 break; 160 156 } 157 + 158 + /* Ignoring the last page when ddr size is 128M. Cached 159 + * accesses to last page is causing the processor to prefetch 160 + * using address above 128M stepping out of the ddr address 161 + * space. 162 + */ 163 + if (c->cputype == CPU_74K && (mem == (128 << 20))) 164 + mem -= 0x1000; 161 165 162 166 add_memory_region(0, mem, BOOT_MEM_RAM); 163 167 }
+6 -5
arch/mips/bcm47xx/setup.c
··· 94 94 snprintf(prefix, sizeof(prefix), "pci/%u/%u/", 95 95 bus->host_pci->bus->number + 1, 96 96 PCI_SLOT(bus->host_pci->devfn)); 97 - bcm47xx_fill_sprom(out, prefix); 97 + bcm47xx_fill_sprom(out, prefix, false); 98 98 return 0; 99 99 } else { 100 100 printk(KERN_WARNING "bcm47xx: unable to fill SPROM for given bustype.\n"); ··· 113 113 bcm47xx_fill_ssb_boardinfo(&iv->boardinfo, NULL); 114 114 115 115 memset(&iv->sprom, 0, sizeof(struct ssb_sprom)); 116 - bcm47xx_fill_sprom(&iv->sprom, NULL); 116 + bcm47xx_fill_sprom(&iv->sprom, NULL, false); 117 117 118 118 if (nvram_getenv("cardbus", buf, sizeof(buf)) >= 0) 119 119 iv->has_cardbus_slot = !!simple_strtoul(buf, NULL, 10); ··· 165 165 snprintf(prefix, sizeof(prefix), "pci/%u/%u/", 166 166 bus->host_pci->bus->number + 1, 167 167 PCI_SLOT(bus->host_pci->devfn)); 168 - bcm47xx_fill_sprom(out, prefix); 168 + bcm47xx_fill_sprom(out, prefix, false); 169 169 return 0; 170 170 case BCMA_HOSTTYPE_SOC: 171 171 memset(out, 0, sizeof(struct ssb_sprom)); 172 - bcm47xx_fill_sprom_ethernet(out, NULL); 173 172 core = bcma_find_core(bus, BCMA_CORE_80211); 174 173 if (core) { 175 174 snprintf(prefix, sizeof(prefix), "sb/%u/", 176 175 core->core_index); 177 - bcm47xx_fill_sprom(out, prefix); 176 + bcm47xx_fill_sprom(out, prefix, true); 177 + } else { 178 + bcm47xx_fill_sprom(out, NULL, false); 178 179 } 179 180 return 0; 180 181 default:
+445 -333
arch/mips/bcm47xx/sprom.c
··· 42 42 snprintf(buf, len, "%s", name); 43 43 } 44 44 45 + static int get_nvram_var(const char *prefix, const char *postfix, 46 + const char *name, char *buf, int len, bool fallback) 47 + { 48 + char key[40]; 49 + int err; 50 + 51 + create_key(prefix, postfix, name, key, sizeof(key)); 52 + 53 + err = nvram_getenv(key, buf, len); 54 + if (fallback && err == NVRAM_ERR_ENVNOTFOUND && prefix) { 55 + create_key(NULL, postfix, name, key, sizeof(key)); 56 + err = nvram_getenv(key, buf, len); 57 + } 58 + return err; 59 + } 60 + 45 61 #define NVRAM_READ_VAL(type) \ 46 62 static void nvram_read_ ## type (const char *prefix, \ 47 63 const char *postfix, const char *name, \ 48 - type *val, type allset) \ 64 + type *val, type allset, bool fallback) \ 49 65 { \ 50 66 char buf[100]; \ 51 - char key[40]; \ 52 67 int err; \ 53 68 type var; \ 54 69 \ 55 - create_key(prefix, postfix, name, key, sizeof(key)); \ 56 - \ 57 - err = nvram_getenv(key, buf, sizeof(buf)); \ 70 + err = get_nvram_var(prefix, postfix, name, buf, sizeof(buf), \ 71 + fallback); \ 58 72 if (err < 0) \ 59 73 return; \ 60 74 err = kstrto ## type (buf, 0, &var); \ 61 75 if (err) { \ 62 - pr_warn("can not parse nvram name %s with value %s" \ 63 - " got %i", key, buf, err); \ 76 + pr_warn("can not parse nvram name %s%s%s with value %s got %i\n", \ 77 + prefix, name, postfix, buf, err); \ 64 78 return; \ 65 79 } \ 66 80 if (allset && var == allset) \ ··· 90 76 #undef NVRAM_READ_VAL 91 77 92 78 static void nvram_read_u32_2(const char *prefix, const char *name, 93 - u16 *val_lo, u16 *val_hi) 79 + u16 *val_lo, u16 *val_hi, bool fallback) 94 80 { 95 81 char buf[100]; 96 - char key[40]; 97 82 int err; 98 83 u32 val; 99 84 100 - create_key(prefix, NULL, name, key, sizeof(key)); 101 - 102 - err = nvram_getenv(key, buf, sizeof(buf)); 85 + err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback); 103 86 if (err < 0) 104 87 return; 105 88 err = kstrtou32(buf, 0, &val); 106 89 if (err) { 107 - pr_warn("can not parse nvram name %s with value %s got %i", 108 - key, buf, err); 90 + pr_warn("can not parse nvram name %s%s with value %s got %i\n", 91 + prefix, name, buf, err); 109 92 return; 110 93 } 111 94 *val_lo = (val & 0x0000FFFFU); ··· 110 99 } 111 100 112 101 static void nvram_read_leddc(const char *prefix, const char *name, 113 - u8 *leddc_on_time, u8 *leddc_off_time) 102 + u8 *leddc_on_time, u8 *leddc_off_time, 103 + bool fallback) 114 104 { 115 105 char buf[100]; 116 - char key[40]; 117 106 int err; 118 107 u32 val; 119 108 120 - create_key(prefix, NULL, name, key, sizeof(key)); 121 - 122 - err = nvram_getenv(key, buf, sizeof(buf)); 109 + err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback); 123 110 if (err < 0) 124 111 return; 125 112 err = kstrtou32(buf, 0, &val); 126 113 if (err) { 127 - pr_warn("can not parse nvram name %s with value %s got %i", 128 - key, buf, err); 114 + pr_warn("can not parse nvram name %s%s with value %s got %i\n", 115 + prefix, name, buf, err); 129 116 return; 130 117 } 131 118 ··· 135 126 } 136 127 137 128 static void nvram_read_macaddr(const char *prefix, const char *name, 138 - u8 (*val)[6]) 129 + u8 (*val)[6], bool fallback) 139 130 { 140 131 char buf[100]; 141 - char key[40]; 142 132 int err; 143 133 144 - create_key(prefix, NULL, name, key, sizeof(key)); 145 - 146 - err = nvram_getenv(key, buf, sizeof(buf)); 134 + err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback); 147 135 if (err < 0) 148 136 return; 137 + 149 138 nvram_parse_macaddr(buf, *val); 150 139 } 151 140 152 141 static void nvram_read_alpha2(const char *prefix, const char *name, 153 - char (*val)[2]) 142 + char (*val)[2], bool fallback) 154 143 { 155 144 char buf[10]; 156 - char key[40]; 157 145 int err; 158 146 159 - create_key(prefix, NULL, name, key, sizeof(key)); 160 - 161 - err = nvram_getenv(key, buf, sizeof(buf)); 147 + err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback); 162 148 if (err < 0) 163 149 return; 164 150 if (buf[0] == '0') 165 151 return; 166 152 if (strlen(buf) > 2) { 167 - pr_warn("alpha2 is too long %s", buf); 153 + pr_warn("alpha2 is too long %s\n", buf); 168 154 return; 169 155 } 170 156 memcpy(val, buf, sizeof(val)); 171 157 } 172 158 173 159 static void bcm47xx_fill_sprom_r1234589(struct ssb_sprom *sprom, 174 - const char *prefix) 160 + const char *prefix, bool fallback) 175 161 { 176 - nvram_read_u16(prefix, NULL, "boardrev", &sprom->board_rev, 0); 177 - if (!sprom->board_rev) 178 - nvram_read_u16(NULL, NULL, "boardrev", &sprom->board_rev, 0); 179 - nvram_read_u16(prefix, NULL, "boardnum", &sprom->board_num, 0); 180 - nvram_read_u8(prefix, NULL, "ledbh0", &sprom->gpio0, 0xff); 181 - nvram_read_u8(prefix, NULL, "ledbh1", &sprom->gpio1, 0xff); 182 - nvram_read_u8(prefix, NULL, "ledbh2", &sprom->gpio2, 0xff); 183 - nvram_read_u8(prefix, NULL, "ledbh3", &sprom->gpio3, 0xff); 184 - nvram_read_u8(prefix, NULL, "aa2g", &sprom->ant_available_bg, 0); 185 - nvram_read_u8(prefix, NULL, "aa5g", &sprom->ant_available_a, 0); 186 - nvram_read_s8(prefix, NULL, "ag0", &sprom->antenna_gain.a0, 0); 187 - nvram_read_s8(prefix, NULL, "ag1", &sprom->antenna_gain.a1, 0); 188 - nvram_read_alpha2(prefix, "ccode", &sprom->alpha2); 162 + nvram_read_u8(prefix, NULL, "ledbh0", &sprom->gpio0, 0xff, fallback); 163 + nvram_read_u8(prefix, NULL, "ledbh1", &sprom->gpio1, 0xff, fallback); 164 + nvram_read_u8(prefix, NULL, "ledbh2", &sprom->gpio2, 0xff, fallback); 165 + nvram_read_u8(prefix, NULL, "ledbh3", &sprom->gpio3, 0xff, fallback); 166 + nvram_read_u8(prefix, NULL, "aa2g", &sprom->ant_available_bg, 0, 167 + fallback); 168 + nvram_read_u8(prefix, NULL, "aa5g", &sprom->ant_available_a, 0, 169 + fallback); 170 + nvram_read_s8(prefix, NULL, "ag0", &sprom->antenna_gain.a0, 0, 171 + fallback); 172 + nvram_read_s8(prefix, NULL, "ag1", &sprom->antenna_gain.a1, 0, 173 + fallback); 174 + nvram_read_alpha2(prefix, "ccode", &sprom->alpha2, fallback); 189 175 } 190 176 191 177 static void bcm47xx_fill_sprom_r12389(struct ssb_sprom *sprom, 192 - const char *prefix) 178 + const char *prefix, bool fallback) 193 179 { 194 - nvram_read_u16(prefix, NULL, "pa0b0", &sprom->pa0b0, 0); 195 - nvram_read_u16(prefix, NULL, "pa0b1", &sprom->pa0b1, 0); 196 - nvram_read_u16(prefix, NULL, "pa0b2", &sprom->pa0b2, 0); 197 - nvram_read_u8(prefix, NULL, "pa0itssit", &sprom->itssi_bg, 0); 198 - nvram_read_u8(prefix, NULL, "pa0maxpwr", &sprom->maxpwr_bg, 0); 199 - nvram_read_u16(prefix, NULL, "pa1b0", &sprom->pa1b0, 0); 200 - nvram_read_u16(prefix, NULL, "pa1b1", &sprom->pa1b1, 0); 201 - nvram_read_u16(prefix, NULL, "pa1b2", &sprom->pa1b2, 0); 202 - nvram_read_u8(prefix, NULL, "pa1itssit", &sprom->itssi_a, 0); 203 - nvram_read_u8(prefix, NULL, "pa1maxpwr", &sprom->maxpwr_a, 0); 180 + nvram_read_u16(prefix, NULL, "pa0b0", &sprom->pa0b0, 0, fallback); 181 + nvram_read_u16(prefix, NULL, "pa0b1", &sprom->pa0b1, 0, fallback); 182 + nvram_read_u16(prefix, NULL, "pa0b2", &sprom->pa0b2, 0, fallback); 183 + nvram_read_u8(prefix, NULL, "pa0itssit", &sprom->itssi_bg, 0, fallback); 184 + nvram_read_u8(prefix, NULL, "pa0maxpwr", &sprom->maxpwr_bg, 0, 185 + fallback); 186 + nvram_read_u16(prefix, NULL, "pa1b0", &sprom->pa1b0, 0, fallback); 187 + nvram_read_u16(prefix, NULL, "pa1b1", &sprom->pa1b1, 0, fallback); 188 + nvram_read_u16(prefix, NULL, "pa1b2", &sprom->pa1b2, 0, fallback); 189 + nvram_read_u8(prefix, NULL, "pa1itssit", &sprom->itssi_a, 0, fallback); 190 + nvram_read_u8(prefix, NULL, "pa1maxpwr", &sprom->maxpwr_a, 0, fallback); 204 191 } 205 192 206 - static void bcm47xx_fill_sprom_r1(struct ssb_sprom *sprom, const char *prefix) 193 + static void bcm47xx_fill_sprom_r1(struct ssb_sprom *sprom, const char *prefix, 194 + bool fallback) 207 195 { 208 - nvram_read_u16(prefix, NULL, "boardflags", &sprom->boardflags_lo, 0); 209 - nvram_read_u8(prefix, NULL, "cc", &sprom->country_code, 0); 196 + nvram_read_u16(prefix, NULL, "boardflags", &sprom->boardflags_lo, 0, 197 + fallback); 198 + nvram_read_u8(prefix, NULL, "cc", &sprom->country_code, 0, fallback); 210 199 } 211 200 212 201 static void bcm47xx_fill_sprom_r2389(struct ssb_sprom *sprom, 213 - const char *prefix) 202 + const char *prefix, bool fallback) 214 203 { 215 - nvram_read_u8(prefix, NULL, "opo", &sprom->opo, 0); 216 - nvram_read_u16(prefix, NULL, "pa1lob0", &sprom->pa1lob0, 0); 217 - nvram_read_u16(prefix, NULL, "pa1lob1", &sprom->pa1lob1, 0); 218 - nvram_read_u16(prefix, NULL, "pa1lob2", &sprom->pa1lob2, 0); 219 - nvram_read_u16(prefix, NULL, "pa1hib0", &sprom->pa1hib0, 0); 220 - nvram_read_u16(prefix, NULL, "pa1hib1", &sprom->pa1hib1, 0); 221 - nvram_read_u16(prefix, NULL, "pa1hib2", &sprom->pa1hib2, 0); 222 - nvram_read_u8(prefix, NULL, "pa1lomaxpwr", &sprom->maxpwr_al, 0); 223 - nvram_read_u8(prefix, NULL, "pa1himaxpwr", &sprom->maxpwr_ah, 0); 204 + nvram_read_u8(prefix, NULL, "opo", &sprom->opo, 0, fallback); 205 + nvram_read_u16(prefix, NULL, "pa1lob0", &sprom->pa1lob0, 0, fallback); 206 + nvram_read_u16(prefix, NULL, "pa1lob1", &sprom->pa1lob1, 0, fallback); 207 + nvram_read_u16(prefix, NULL, "pa1lob2", &sprom->pa1lob2, 0, fallback); 208 + nvram_read_u16(prefix, NULL, "pa1hib0", &sprom->pa1hib0, 0, fallback); 209 + nvram_read_u16(prefix, NULL, "pa1hib1", &sprom->pa1hib1, 0, fallback); 210 + nvram_read_u16(prefix, NULL, "pa1hib2", &sprom->pa1hib2, 0, fallback); 211 + nvram_read_u8(prefix, NULL, "pa1lomaxpwr", &sprom->maxpwr_al, 0, 212 + fallback); 213 + nvram_read_u8(prefix, NULL, "pa1himaxpwr", &sprom->maxpwr_ah, 0, 214 + fallback); 224 215 } 225 216 226 - static void bcm47xx_fill_sprom_r2(struct ssb_sprom *sprom, const char *prefix) 217 + static void bcm47xx_fill_sprom_r389(struct ssb_sprom *sprom, const char *prefix, 218 + bool fallback) 227 219 { 228 - nvram_read_u32_2(prefix, "boardflags", &sprom->boardflags_lo, 229 - &sprom->boardflags_hi); 230 - nvram_read_u16(prefix, NULL, "boardtype", &sprom->board_type, 0); 220 + nvram_read_u8(prefix, NULL, "bxa2g", &sprom->bxa2g, 0, fallback); 221 + nvram_read_u8(prefix, NULL, "rssisav2g", &sprom->rssisav2g, 0, 222 + fallback); 223 + nvram_read_u8(prefix, NULL, "rssismc2g", &sprom->rssismc2g, 0, 224 + fallback); 225 + nvram_read_u8(prefix, NULL, "rssismf2g", &sprom->rssismf2g, 0, 226 + fallback); 227 + nvram_read_u8(prefix, NULL, "bxa5g", &sprom->bxa5g, 0, fallback); 228 + nvram_read_u8(prefix, NULL, "rssisav5g", &sprom->rssisav5g, 0, 229 + fallback); 230 + nvram_read_u8(prefix, NULL, "rssismc5g", &sprom->rssismc5g, 0, 231 + fallback); 232 + nvram_read_u8(prefix, NULL, "rssismf5g", &sprom->rssismf5g, 0, 233 + fallback); 234 + nvram_read_u8(prefix, NULL, "tri2g", &sprom->tri2g, 0, fallback); 235 + nvram_read_u8(prefix, NULL, "tri5g", &sprom->tri5g, 0, fallback); 236 + nvram_read_u8(prefix, NULL, "tri5gl", &sprom->tri5gl, 0, fallback); 237 + nvram_read_u8(prefix, NULL, "tri5gh", &sprom->tri5gh, 0, fallback); 238 + nvram_read_s8(prefix, NULL, "rxpo2g", &sprom->rxpo2g, 0, fallback); 239 + nvram_read_s8(prefix, NULL, "rxpo5g", &sprom->rxpo5g, 0, fallback); 231 240 } 232 241 233 - static void bcm47xx_fill_sprom_r389(struct ssb_sprom *sprom, const char *prefix) 242 + static void bcm47xx_fill_sprom_r3(struct ssb_sprom *sprom, const char *prefix, 243 + bool fallback) 234 244 { 235 - nvram_read_u8(prefix, NULL, "bxa2g", &sprom->bxa2g, 0); 236 - nvram_read_u8(prefix, NULL, "rssisav2g", &sprom->rssisav2g, 0); 237 - nvram_read_u8(prefix, NULL, "rssismc2g", &sprom->rssismc2g, 0); 238 - nvram_read_u8(prefix, NULL, "rssismf2g", &sprom->rssismf2g, 0); 239 - nvram_read_u8(prefix, NULL, "bxa5g", &sprom->bxa5g, 0); 240 - nvram_read_u8(prefix, NULL, "rssisav5g", &sprom->rssisav5g, 0); 241 - nvram_read_u8(prefix, NULL, "rssismc5g", &sprom->rssismc5g, 0); 242 - nvram_read_u8(prefix, NULL, "rssismf5g", &sprom->rssismf5g, 0); 243 - nvram_read_u8(prefix, NULL, "tri2g", &sprom->tri2g, 0); 244 - nvram_read_u8(prefix, NULL, "tri5g", &sprom->tri5g, 0); 245 - nvram_read_u8(prefix, NULL, "tri5gl", &sprom->tri5gl, 0); 246 - nvram_read_u8(prefix, NULL, "tri5gh", &sprom->tri5gh, 0); 247 - nvram_read_s8(prefix, NULL, "rxpo2g", &sprom->rxpo2g, 0); 248 - nvram_read_s8(prefix, NULL, "rxpo5g", &sprom->rxpo5g, 0); 249 - } 250 - 251 - static void bcm47xx_fill_sprom_r3(struct ssb_sprom *sprom, const char *prefix) 252 - { 253 - nvram_read_u32_2(prefix, "boardflags", &sprom->boardflags_lo, 254 - &sprom->boardflags_hi); 255 - nvram_read_u16(prefix, NULL, "boardtype", &sprom->board_type, 0); 256 - nvram_read_u8(prefix, NULL, "regrev", &sprom->regrev, 0); 245 + nvram_read_u8(prefix, NULL, "regrev", &sprom->regrev, 0, fallback); 257 246 nvram_read_leddc(prefix, "leddc", &sprom->leddc_on_time, 258 - &sprom->leddc_off_time); 247 + &sprom->leddc_off_time, fallback); 259 248 } 260 249 261 250 static void bcm47xx_fill_sprom_r4589(struct ssb_sprom *sprom, 262 - const char *prefix) 251 + const char *prefix, bool fallback) 263 252 { 264 - nvram_read_u32_2(prefix, "boardflags", &sprom->boardflags_lo, 265 - &sprom->boardflags_hi); 266 - nvram_read_u32_2(prefix, "boardflags2", &sprom->boardflags2_lo, 267 - &sprom->boardflags2_hi); 268 - nvram_read_u16(prefix, NULL, "boardtype", &sprom->board_type, 0); 269 - nvram_read_u8(prefix, NULL, "regrev", &sprom->regrev, 0); 270 - nvram_read_s8(prefix, NULL, "ag2", &sprom->antenna_gain.a2, 0); 271 - nvram_read_s8(prefix, NULL, "ag3", &sprom->antenna_gain.a3, 0); 272 - nvram_read_u8(prefix, NULL, "txchain", &sprom->txchain, 0xf); 273 - nvram_read_u8(prefix, NULL, "rxchain", &sprom->rxchain, 0xf); 274 - nvram_read_u8(prefix, NULL, "antswitch", &sprom->antswitch, 0xff); 253 + nvram_read_u8(prefix, NULL, "regrev", &sprom->regrev, 0, fallback); 254 + nvram_read_s8(prefix, NULL, "ag2", &sprom->antenna_gain.a2, 0, 255 + fallback); 256 + nvram_read_s8(prefix, NULL, "ag3", &sprom->antenna_gain.a3, 0, 257 + fallback); 258 + nvram_read_u8(prefix, NULL, "txchain", &sprom->txchain, 0xf, fallback); 259 + nvram_read_u8(prefix, NULL, "rxchain", &sprom->rxchain, 0xf, fallback); 260 + nvram_read_u8(prefix, NULL, "antswitch", &sprom->antswitch, 0xff, 261 + fallback); 275 262 nvram_read_leddc(prefix, "leddc", &sprom->leddc_on_time, 276 - &sprom->leddc_off_time); 263 + &sprom->leddc_off_time, fallback); 277 264 } 278 265 279 - static void bcm47xx_fill_sprom_r458(struct ssb_sprom *sprom, const char *prefix) 266 + static void bcm47xx_fill_sprom_r458(struct ssb_sprom *sprom, const char *prefix, 267 + bool fallback) 280 268 { 281 - nvram_read_u16(prefix, NULL, "cck2gpo", &sprom->cck2gpo, 0); 282 - nvram_read_u32(prefix, NULL, "ofdm2gpo", &sprom->ofdm2gpo, 0); 283 - nvram_read_u32(prefix, NULL, "ofdm5gpo", &sprom->ofdm5gpo, 0); 284 - nvram_read_u32(prefix, NULL, "ofdm5glpo", &sprom->ofdm5glpo, 0); 285 - nvram_read_u32(prefix, NULL, "ofdm5ghpo", &sprom->ofdm5ghpo, 0); 286 - nvram_read_u16(prefix, NULL, "cddpo", &sprom->cddpo, 0); 287 - nvram_read_u16(prefix, NULL, "stbcpo", &sprom->stbcpo, 0); 288 - nvram_read_u16(prefix, NULL, "bw40po", &sprom->bw40po, 0); 289 - nvram_read_u16(prefix, NULL, "bwduppo", &sprom->bwduppo, 0); 290 - nvram_read_u16(prefix, NULL, "mcs2gpo0", &sprom->mcs2gpo[0], 0); 291 - nvram_read_u16(prefix, NULL, "mcs2gpo1", &sprom->mcs2gpo[1], 0); 292 - nvram_read_u16(prefix, NULL, "mcs2gpo2", &sprom->mcs2gpo[2], 0); 293 - nvram_read_u16(prefix, NULL, "mcs2gpo3", &sprom->mcs2gpo[3], 0); 294 - nvram_read_u16(prefix, NULL, "mcs2gpo4", &sprom->mcs2gpo[4], 0); 295 - nvram_read_u16(prefix, NULL, "mcs2gpo5", &sprom->mcs2gpo[5], 0); 296 - nvram_read_u16(prefix, NULL, "mcs2gpo6", &sprom->mcs2gpo[6], 0); 297 - nvram_read_u16(prefix, NULL, "mcs2gpo7", &sprom->mcs2gpo[7], 0); 298 - nvram_read_u16(prefix, NULL, "mcs5gpo0", &sprom->mcs5gpo[0], 0); 299 - nvram_read_u16(prefix, NULL, "mcs5gpo1", &sprom->mcs5gpo[1], 0); 300 - nvram_read_u16(prefix, NULL, "mcs5gpo2", &sprom->mcs5gpo[2], 0); 301 - nvram_read_u16(prefix, NULL, "mcs5gpo3", &sprom->mcs5gpo[3], 0); 302 - nvram_read_u16(prefix, NULL, "mcs5gpo4", &sprom->mcs5gpo[4], 0); 303 - nvram_read_u16(prefix, NULL, "mcs5gpo5", &sprom->mcs5gpo[5], 0); 304 - nvram_read_u16(prefix, NULL, "mcs5gpo6", &sprom->mcs5gpo[6], 0); 305 - nvram_read_u16(prefix, NULL, "mcs5gpo7", &sprom->mcs5gpo[7], 0); 306 - nvram_read_u16(prefix, NULL, "mcs5glpo0", &sprom->mcs5glpo[0], 0); 307 - nvram_read_u16(prefix, NULL, "mcs5glpo1", &sprom->mcs5glpo[1], 0); 308 - nvram_read_u16(prefix, NULL, "mcs5glpo2", &sprom->mcs5glpo[2], 0); 309 - nvram_read_u16(prefix, NULL, "mcs5glpo3", &sprom->mcs5glpo[3], 0); 310 - nvram_read_u16(prefix, NULL, "mcs5glpo4", &sprom->mcs5glpo[4], 0); 311 - nvram_read_u16(prefix, NULL, "mcs5glpo5", &sprom->mcs5glpo[5], 0); 312 - nvram_read_u16(prefix, NULL, "mcs5glpo6", &sprom->mcs5glpo[6], 0); 313 - nvram_read_u16(prefix, NULL, "mcs5glpo7", &sprom->mcs5glpo[7], 0); 314 - nvram_read_u16(prefix, NULL, "mcs5ghpo0", &sprom->mcs5ghpo[0], 0); 315 - nvram_read_u16(prefix, NULL, "mcs5ghpo1", &sprom->mcs5ghpo[1], 0); 316 - nvram_read_u16(prefix, NULL, "mcs5ghpo2", &sprom->mcs5ghpo[2], 0); 317 - nvram_read_u16(prefix, NULL, "mcs5ghpo3", &sprom->mcs5ghpo[3], 0); 318 - nvram_read_u16(prefix, NULL, "mcs5ghpo4", &sprom->mcs5ghpo[4], 0); 319 - nvram_read_u16(prefix, NULL, "mcs5ghpo5", &sprom->mcs5ghpo[5], 0); 320 - nvram_read_u16(prefix, NULL, "mcs5ghpo6", &sprom->mcs5ghpo[6], 0); 321 - nvram_read_u16(prefix, NULL, "mcs5ghpo7", &sprom->mcs5ghpo[7], 0); 269 + nvram_read_u16(prefix, NULL, "cck2gpo", &sprom->cck2gpo, 0, fallback); 270 + nvram_read_u32(prefix, NULL, "ofdm2gpo", &sprom->ofdm2gpo, 0, fallback); 271 + nvram_read_u32(prefix, NULL, "ofdm5gpo", &sprom->ofdm5gpo, 0, fallback); 272 + nvram_read_u32(prefix, NULL, "ofdm5glpo", &sprom->ofdm5glpo, 0, 273 + fallback); 274 + nvram_read_u32(prefix, NULL, "ofdm5ghpo", &sprom->ofdm5ghpo, 0, 275 + fallback); 276 + nvram_read_u16(prefix, NULL, "cddpo", &sprom->cddpo, 0, fallback); 277 + nvram_read_u16(prefix, NULL, "stbcpo", &sprom->stbcpo, 0, fallback); 278 + nvram_read_u16(prefix, NULL, "bw40po", &sprom->bw40po, 0, fallback); 279 + nvram_read_u16(prefix, NULL, "bwduppo", &sprom->bwduppo, 0, fallback); 280 + nvram_read_u16(prefix, NULL, "mcs2gpo0", &sprom->mcs2gpo[0], 0, 281 + fallback); 282 + nvram_read_u16(prefix, NULL, "mcs2gpo1", &sprom->mcs2gpo[1], 0, 283 + fallback); 284 + nvram_read_u16(prefix, NULL, "mcs2gpo2", &sprom->mcs2gpo[2], 0, 285 + fallback); 286 + nvram_read_u16(prefix, NULL, "mcs2gpo3", &sprom->mcs2gpo[3], 0, 287 + fallback); 288 + nvram_read_u16(prefix, NULL, "mcs2gpo4", &sprom->mcs2gpo[4], 0, 289 + fallback); 290 + nvram_read_u16(prefix, NULL, "mcs2gpo5", &sprom->mcs2gpo[5], 0, 291 + fallback); 292 + nvram_read_u16(prefix, NULL, "mcs2gpo6", &sprom->mcs2gpo[6], 0, 293 + fallback); 294 + nvram_read_u16(prefix, NULL, "mcs2gpo7", &sprom->mcs2gpo[7], 0, 295 + fallback); 296 + nvram_read_u16(prefix, NULL, "mcs5gpo0", &sprom->mcs5gpo[0], 0, 297 + fallback); 298 + nvram_read_u16(prefix, NULL, "mcs5gpo1", &sprom->mcs5gpo[1], 0, 299 + fallback); 300 + nvram_read_u16(prefix, NULL, "mcs5gpo2", &sprom->mcs5gpo[2], 0, 301 + fallback); 302 + nvram_read_u16(prefix, NULL, "mcs5gpo3", &sprom->mcs5gpo[3], 0, 303 + fallback); 304 + nvram_read_u16(prefix, NULL, "mcs5gpo4", &sprom->mcs5gpo[4], 0, 305 + fallback); 306 + nvram_read_u16(prefix, NULL, "mcs5gpo5", &sprom->mcs5gpo[5], 0, 307 + fallback); 308 + nvram_read_u16(prefix, NULL, "mcs5gpo6", &sprom->mcs5gpo[6], 0, 309 + fallback); 310 + nvram_read_u16(prefix, NULL, "mcs5gpo7", &sprom->mcs5gpo[7], 0, 311 + fallback); 312 + nvram_read_u16(prefix, NULL, "mcs5glpo0", &sprom->mcs5glpo[0], 0, 313 + fallback); 314 + nvram_read_u16(prefix, NULL, "mcs5glpo1", &sprom->mcs5glpo[1], 0, 315 + fallback); 316 + nvram_read_u16(prefix, NULL, "mcs5glpo2", &sprom->mcs5glpo[2], 0, 317 + fallback); 318 + nvram_read_u16(prefix, NULL, "mcs5glpo3", &sprom->mcs5glpo[3], 0, 319 + fallback); 320 + nvram_read_u16(prefix, NULL, "mcs5glpo4", &sprom->mcs5glpo[4], 0, 321 + fallback); 322 + nvram_read_u16(prefix, NULL, "mcs5glpo5", &sprom->mcs5glpo[5], 0, 323 + fallback); 324 + nvram_read_u16(prefix, NULL, "mcs5glpo6", &sprom->mcs5glpo[6], 0, 325 + fallback); 326 + nvram_read_u16(prefix, NULL, "mcs5glpo7", &sprom->mcs5glpo[7], 0, 327 + fallback); 328 + nvram_read_u16(prefix, NULL, "mcs5ghpo0", &sprom->mcs5ghpo[0], 0, 329 + fallback); 330 + nvram_read_u16(prefix, NULL, "mcs5ghpo1", &sprom->mcs5ghpo[1], 0, 331 + fallback); 332 + nvram_read_u16(prefix, NULL, "mcs5ghpo2", &sprom->mcs5ghpo[2], 0, 333 + fallback); 334 + nvram_read_u16(prefix, NULL, "mcs5ghpo3", &sprom->mcs5ghpo[3], 0, 335 + fallback); 336 + nvram_read_u16(prefix, NULL, "mcs5ghpo4", &sprom->mcs5ghpo[4], 0, 337 + fallback); 338 + nvram_read_u16(prefix, NULL, "mcs5ghpo5", &sprom->mcs5ghpo[5], 0, 339 + fallback); 340 + nvram_read_u16(prefix, NULL, "mcs5ghpo6", &sprom->mcs5ghpo[6], 0, 341 + fallback); 342 + nvram_read_u16(prefix, NULL, "mcs5ghpo7", &sprom->mcs5ghpo[7], 0, 343 + fallback); 322 344 } 323 345 324 - static void bcm47xx_fill_sprom_r45(struct ssb_sprom *sprom, const char *prefix) 346 + static void bcm47xx_fill_sprom_r45(struct ssb_sprom *sprom, const char *prefix, 347 + bool fallback) 325 348 { 326 - nvram_read_u8(prefix, NULL, "txpid2ga0", &sprom->txpid2g[0], 0); 327 - nvram_read_u8(prefix, NULL, "txpid2ga1", &sprom->txpid2g[1], 0); 328 - nvram_read_u8(prefix, NULL, "txpid2ga2", &sprom->txpid2g[2], 0); 329 - nvram_read_u8(prefix, NULL, "txpid2ga3", &sprom->txpid2g[3], 0); 330 - nvram_read_u8(prefix, NULL, "txpid5ga0", &sprom->txpid5g[0], 0); 331 - nvram_read_u8(prefix, NULL, "txpid5ga1", &sprom->txpid5g[1], 0); 332 - nvram_read_u8(prefix, NULL, "txpid5ga2", &sprom->txpid5g[2], 0); 333 - nvram_read_u8(prefix, NULL, "txpid5ga3", &sprom->txpid5g[3], 0); 334 - nvram_read_u8(prefix, NULL, "txpid5gla0", &sprom->txpid5gl[0], 0); 335 - nvram_read_u8(prefix, NULL, "txpid5gla1", &sprom->txpid5gl[1], 0); 336 - nvram_read_u8(prefix, NULL, "txpid5gla2", &sprom->txpid5gl[2], 0); 337 - nvram_read_u8(prefix, NULL, "txpid5gla3", &sprom->txpid5gl[3], 0); 338 - nvram_read_u8(prefix, NULL, "txpid5gha0", &sprom->txpid5gh[0], 0); 339 - nvram_read_u8(prefix, NULL, "txpid5gha1", &sprom->txpid5gh[1], 0); 340 - nvram_read_u8(prefix, NULL, "txpid5gha2", &sprom->txpid5gh[2], 0); 341 - nvram_read_u8(prefix, NULL, "txpid5gha3", &sprom->txpid5gh[3], 0); 349 + nvram_read_u8(prefix, NULL, "txpid2ga0", &sprom->txpid2g[0], 0, 350 + fallback); 351 + nvram_read_u8(prefix, NULL, "txpid2ga1", &sprom->txpid2g[1], 0, 352 + fallback); 353 + nvram_read_u8(prefix, NULL, "txpid2ga2", &sprom->txpid2g[2], 0, 354 + fallback); 355 + nvram_read_u8(prefix, NULL, "txpid2ga3", &sprom->txpid2g[3], 0, 356 + fallback); 357 + nvram_read_u8(prefix, NULL, "txpid5ga0", &sprom->txpid5g[0], 0, 358 + fallback); 359 + nvram_read_u8(prefix, NULL, "txpid5ga1", &sprom->txpid5g[1], 0, 360 + fallback); 361 + nvram_read_u8(prefix, NULL, "txpid5ga2", &sprom->txpid5g[2], 0, 362 + fallback); 363 + nvram_read_u8(prefix, NULL, "txpid5ga3", &sprom->txpid5g[3], 0, 364 + fallback); 365 + nvram_read_u8(prefix, NULL, "txpid5gla0", &sprom->txpid5gl[0], 0, 366 + fallback); 367 + nvram_read_u8(prefix, NULL, "txpid5gla1", &sprom->txpid5gl[1], 0, 368 + fallback); 369 + nvram_read_u8(prefix, NULL, "txpid5gla2", &sprom->txpid5gl[2], 0, 370 + fallback); 371 + nvram_read_u8(prefix, NULL, "txpid5gla3", &sprom->txpid5gl[3], 0, 372 + fallback); 373 + nvram_read_u8(prefix, NULL, "txpid5gha0", &sprom->txpid5gh[0], 0, 374 + fallback); 375 + nvram_read_u8(prefix, NULL, "txpid5gha1", &sprom->txpid5gh[1], 0, 376 + fallback); 377 + nvram_read_u8(prefix, NULL, "txpid5gha2", &sprom->txpid5gh[2], 0, 378 + fallback); 379 + nvram_read_u8(prefix, NULL, "txpid5gha3", &sprom->txpid5gh[3], 0, 380 + fallback); 342 381 } 343 382 344 - static void bcm47xx_fill_sprom_r89(struct ssb_sprom *sprom, const char *prefix) 383 + static void bcm47xx_fill_sprom_r89(struct ssb_sprom *sprom, const char *prefix, 384 + bool fallback) 345 385 { 346 - nvram_read_u8(prefix, NULL, "tssipos2g", &sprom->fem.ghz2.tssipos, 0); 386 + nvram_read_u8(prefix, NULL, "tssipos2g", &sprom->fem.ghz2.tssipos, 0, 387 + fallback); 347 388 nvram_read_u8(prefix, NULL, "extpagain2g", 348 - &sprom->fem.ghz2.extpa_gain, 0); 389 + &sprom->fem.ghz2.extpa_gain, 0, fallback); 349 390 nvram_read_u8(prefix, NULL, "pdetrange2g", 350 - &sprom->fem.ghz2.pdet_range, 0); 351 - nvram_read_u8(prefix, NULL, "triso2g", &sprom->fem.ghz2.tr_iso, 0); 352 - nvram_read_u8(prefix, NULL, "antswctl2g", &sprom->fem.ghz2.antswlut, 0); 353 - nvram_read_u8(prefix, NULL, "tssipos5g", &sprom->fem.ghz5.tssipos, 0); 391 + &sprom->fem.ghz2.pdet_range, 0, fallback); 392 + nvram_read_u8(prefix, NULL, "triso2g", &sprom->fem.ghz2.tr_iso, 0, 393 + fallback); 394 + nvram_read_u8(prefix, NULL, "antswctl2g", &sprom->fem.ghz2.antswlut, 0, 395 + fallback); 396 + nvram_read_u8(prefix, NULL, "tssipos5g", &sprom->fem.ghz5.tssipos, 0, 397 + fallback); 354 398 nvram_read_u8(prefix, NULL, "extpagain5g", 355 - &sprom->fem.ghz5.extpa_gain, 0); 399 + &sprom->fem.ghz5.extpa_gain, 0, fallback); 356 400 nvram_read_u8(prefix, NULL, "pdetrange5g", 357 - &sprom->fem.ghz5.pdet_range, 0); 358 - nvram_read_u8(prefix, NULL, "triso5g", &sprom->fem.ghz5.tr_iso, 0); 359 - nvram_read_u8(prefix, NULL, "antswctl5g", &sprom->fem.ghz5.antswlut, 0); 360 - nvram_read_u8(prefix, NULL, "tempthresh", &sprom->tempthresh, 0); 361 - nvram_read_u8(prefix, NULL, "tempoffset", &sprom->tempoffset, 0); 362 - nvram_read_u16(prefix, NULL, "rawtempsense", &sprom->rawtempsense, 0); 363 - nvram_read_u8(prefix, NULL, "measpower", &sprom->measpower, 0); 401 + &sprom->fem.ghz5.pdet_range, 0, fallback); 402 + nvram_read_u8(prefix, NULL, "triso5g", &sprom->fem.ghz5.tr_iso, 0, 403 + fallback); 404 + nvram_read_u8(prefix, NULL, "antswctl5g", &sprom->fem.ghz5.antswlut, 0, 405 + fallback); 406 + nvram_read_u8(prefix, NULL, "tempthresh", &sprom->tempthresh, 0, 407 + fallback); 408 + nvram_read_u8(prefix, NULL, "tempoffset", &sprom->tempoffset, 0, 409 + fallback); 410 + nvram_read_u16(prefix, NULL, "rawtempsense", &sprom->rawtempsense, 0, 411 + fallback); 412 + nvram_read_u8(prefix, NULL, "measpower", &sprom->measpower, 0, 413 + fallback); 364 414 nvram_read_u8(prefix, NULL, "tempsense_slope", 365 - &sprom->tempsense_slope, 0); 366 - nvram_read_u8(prefix, NULL, "tempcorrx", &sprom->tempcorrx, 0); 415 + &sprom->tempsense_slope, 0, fallback); 416 + nvram_read_u8(prefix, NULL, "tempcorrx", &sprom->tempcorrx, 0, 417 + fallback); 367 418 nvram_read_u8(prefix, NULL, "tempsense_option", 368 - &sprom->tempsense_option, 0); 419 + &sprom->tempsense_option, 0, fallback); 369 420 nvram_read_u8(prefix, NULL, "freqoffset_corr", 370 - &sprom->freqoffset_corr, 0); 371 - nvram_read_u8(prefix, NULL, "iqcal_swp_dis", &sprom->iqcal_swp_dis, 0); 372 - nvram_read_u8(prefix, NULL, "hw_iqcal_en", &sprom->hw_iqcal_en, 0); 373 - nvram_read_u8(prefix, NULL, "elna2g", &sprom->elna2g, 0); 374 - nvram_read_u8(prefix, NULL, "elna5g", &sprom->elna5g, 0); 421 + &sprom->freqoffset_corr, 0, fallback); 422 + nvram_read_u8(prefix, NULL, "iqcal_swp_dis", &sprom->iqcal_swp_dis, 0, 423 + fallback); 424 + nvram_read_u8(prefix, NULL, "hw_iqcal_en", &sprom->hw_iqcal_en, 0, 425 + fallback); 426 + nvram_read_u8(prefix, NULL, "elna2g", &sprom->elna2g, 0, fallback); 427 + nvram_read_u8(prefix, NULL, "elna5g", &sprom->elna5g, 0, fallback); 375 428 nvram_read_u8(prefix, NULL, "phycal_tempdelta", 376 - &sprom->phycal_tempdelta, 0); 377 - nvram_read_u8(prefix, NULL, "temps_period", &sprom->temps_period, 0); 429 + &sprom->phycal_tempdelta, 0, fallback); 430 + nvram_read_u8(prefix, NULL, "temps_period", &sprom->temps_period, 0, 431 + fallback); 378 432 nvram_read_u8(prefix, NULL, "temps_hysteresis", 379 - &sprom->temps_hysteresis, 0); 380 - nvram_read_u8(prefix, NULL, "measpower1", &sprom->measpower1, 0); 381 - nvram_read_u8(prefix, NULL, "measpower2", &sprom->measpower2, 0); 433 + &sprom->temps_hysteresis, 0, fallback); 434 + nvram_read_u8(prefix, NULL, "measpower1", &sprom->measpower1, 0, 435 + fallback); 436 + nvram_read_u8(prefix, NULL, "measpower2", &sprom->measpower2, 0, 437 + fallback); 382 438 nvram_read_u8(prefix, NULL, "rxgainerr2ga0", 383 - &sprom->rxgainerr2ga[0], 0); 439 + &sprom->rxgainerr2ga[0], 0, fallback); 384 440 nvram_read_u8(prefix, NULL, "rxgainerr2ga1", 385 - &sprom->rxgainerr2ga[1], 0); 441 + &sprom->rxgainerr2ga[1], 0, fallback); 386 442 nvram_read_u8(prefix, NULL, "rxgainerr2ga2", 387 - &sprom->rxgainerr2ga[2], 0); 443 + &sprom->rxgainerr2ga[2], 0, fallback); 388 444 nvram_read_u8(prefix, NULL, "rxgainerr5gla0", 389 - &sprom->rxgainerr5gla[0], 0); 445 + &sprom->rxgainerr5gla[0], 0, fallback); 390 446 nvram_read_u8(prefix, NULL, "rxgainerr5gla1", 391 - &sprom->rxgainerr5gla[1], 0); 447 + &sprom->rxgainerr5gla[1], 0, fallback); 392 448 nvram_read_u8(prefix, NULL, "rxgainerr5gla2", 393 - &sprom->rxgainerr5gla[2], 0); 449 + &sprom->rxgainerr5gla[2], 0, fallback); 394 450 nvram_read_u8(prefix, NULL, "rxgainerr5gma0", 395 - &sprom->rxgainerr5gma[0], 0); 451 + &sprom->rxgainerr5gma[0], 0, fallback); 396 452 nvram_read_u8(prefix, NULL, "rxgainerr5gma1", 397 - &sprom->rxgainerr5gma[1], 0); 453 + &sprom->rxgainerr5gma[1], 0, fallback); 398 454 nvram_read_u8(prefix, NULL, "rxgainerr5gma2", 399 - &sprom->rxgainerr5gma[2], 0); 455 + &sprom->rxgainerr5gma[2], 0, fallback); 400 456 nvram_read_u8(prefix, NULL, "rxgainerr5gha0", 401 - &sprom->rxgainerr5gha[0], 0); 457 + &sprom->rxgainerr5gha[0], 0, fallback); 402 458 nvram_read_u8(prefix, NULL, "rxgainerr5gha1", 403 - &sprom->rxgainerr5gha[1], 0); 459 + &sprom->rxgainerr5gha[1], 0, fallback); 404 460 nvram_read_u8(prefix, NULL, "rxgainerr5gha2", 405 - &sprom->rxgainerr5gha[2], 0); 461 + &sprom->rxgainerr5gha[2], 0, fallback); 406 462 nvram_read_u8(prefix, NULL, "rxgainerr5gua0", 407 - &sprom->rxgainerr5gua[0], 0); 463 + &sprom->rxgainerr5gua[0], 0, fallback); 408 464 nvram_read_u8(prefix, NULL, "rxgainerr5gua1", 409 - &sprom->rxgainerr5gua[1], 0); 465 + &sprom->rxgainerr5gua[1], 0, fallback); 410 466 nvram_read_u8(prefix, NULL, "rxgainerr5gua2", 411 - &sprom->rxgainerr5gua[2], 0); 412 - nvram_read_u8(prefix, NULL, "noiselvl2ga0", &sprom->noiselvl2ga[0], 0); 413 - nvram_read_u8(prefix, NULL, "noiselvl2ga1", &sprom->noiselvl2ga[1], 0); 414 - nvram_read_u8(prefix, NULL, "noiselvl2ga2", &sprom->noiselvl2ga[2], 0); 467 + &sprom->rxgainerr5gua[2], 0, fallback); 468 + nvram_read_u8(prefix, NULL, "noiselvl2ga0", &sprom->noiselvl2ga[0], 0, 469 + fallback); 470 + nvram_read_u8(prefix, NULL, "noiselvl2ga1", &sprom->noiselvl2ga[1], 0, 471 + fallback); 472 + nvram_read_u8(prefix, NULL, "noiselvl2ga2", &sprom->noiselvl2ga[2], 0, 473 + fallback); 415 474 nvram_read_u8(prefix, NULL, "noiselvl5gla0", 416 - &sprom->noiselvl5gla[0], 0); 475 + &sprom->noiselvl5gla[0], 0, fallback); 417 476 nvram_read_u8(prefix, NULL, "noiselvl5gla1", 418 - &sprom->noiselvl5gla[1], 0); 477 + &sprom->noiselvl5gla[1], 0, fallback); 419 478 nvram_read_u8(prefix, NULL, "noiselvl5gla2", 420 - &sprom->noiselvl5gla[2], 0); 479 + &sprom->noiselvl5gla[2], 0, fallback); 421 480 nvram_read_u8(prefix, NULL, "noiselvl5gma0", 422 - &sprom->noiselvl5gma[0], 0); 481 + &sprom->noiselvl5gma[0], 0, fallback); 423 482 nvram_read_u8(prefix, NULL, "noiselvl5gma1", 424 - &sprom->noiselvl5gma[1], 0); 483 + &sprom->noiselvl5gma[1], 0, fallback); 425 484 nvram_read_u8(prefix, NULL, "noiselvl5gma2", 426 - &sprom->noiselvl5gma[2], 0); 485 + &sprom->noiselvl5gma[2], 0, fallback); 427 486 nvram_read_u8(prefix, NULL, "noiselvl5gha0", 428 - &sprom->noiselvl5gha[0], 0); 487 + &sprom->noiselvl5gha[0], 0, fallback); 429 488 nvram_read_u8(prefix, NULL, "noiselvl5gha1", 430 - &sprom->noiselvl5gha[1], 0); 489 + &sprom->noiselvl5gha[1], 0, fallback); 431 490 nvram_read_u8(prefix, NULL, "noiselvl5gha2", 432 - &sprom->noiselvl5gha[2], 0); 491 + &sprom->noiselvl5gha[2], 0, fallback); 433 492 nvram_read_u8(prefix, NULL, "noiselvl5gua0", 434 - &sprom->noiselvl5gua[0], 0); 493 + &sprom->noiselvl5gua[0], 0, fallback); 435 494 nvram_read_u8(prefix, NULL, "noiselvl5gua1", 436 - &sprom->noiselvl5gua[1], 0); 495 + &sprom->noiselvl5gua[1], 0, fallback); 437 496 nvram_read_u8(prefix, NULL, "noiselvl5gua2", 438 - &sprom->noiselvl5gua[2], 0); 497 + &sprom->noiselvl5gua[2], 0, fallback); 439 498 nvram_read_u8(prefix, NULL, "pcieingress_war", 440 - &sprom->pcieingress_war, 0); 499 + &sprom->pcieingress_war, 0, fallback); 441 500 } 442 501 443 - static void bcm47xx_fill_sprom_r9(struct ssb_sprom *sprom, const char *prefix) 502 + static void bcm47xx_fill_sprom_r9(struct ssb_sprom *sprom, const char *prefix, 503 + bool fallback) 444 504 { 445 - nvram_read_u16(prefix, NULL, "cckbw202gpo", &sprom->cckbw202gpo, 0); 446 - nvram_read_u16(prefix, NULL, "cckbw20ul2gpo", &sprom->cckbw20ul2gpo, 0); 505 + nvram_read_u16(prefix, NULL, "cckbw202gpo", &sprom->cckbw202gpo, 0, 506 + fallback); 507 + nvram_read_u16(prefix, NULL, "cckbw20ul2gpo", &sprom->cckbw20ul2gpo, 0, 508 + fallback); 447 509 nvram_read_u32(prefix, NULL, "legofdmbw202gpo", 448 - &sprom->legofdmbw202gpo, 0); 510 + &sprom->legofdmbw202gpo, 0, fallback); 449 511 nvram_read_u32(prefix, NULL, "legofdmbw20ul2gpo", 450 - &sprom->legofdmbw20ul2gpo, 0); 512 + &sprom->legofdmbw20ul2gpo, 0, fallback); 451 513 nvram_read_u32(prefix, NULL, "legofdmbw205glpo", 452 - &sprom->legofdmbw205glpo, 0); 514 + &sprom->legofdmbw205glpo, 0, fallback); 453 515 nvram_read_u32(prefix, NULL, "legofdmbw20ul5glpo", 454 - &sprom->legofdmbw20ul5glpo, 0); 516 + &sprom->legofdmbw20ul5glpo, 0, fallback); 455 517 nvram_read_u32(prefix, NULL, "legofdmbw205gmpo", 456 - &sprom->legofdmbw205gmpo, 0); 518 + &sprom->legofdmbw205gmpo, 0, fallback); 457 519 nvram_read_u32(prefix, NULL, "legofdmbw20ul5gmpo", 458 - &sprom->legofdmbw20ul5gmpo, 0); 520 + &sprom->legofdmbw20ul5gmpo, 0, fallback); 459 521 nvram_read_u32(prefix, NULL, "legofdmbw205ghpo", 460 - &sprom->legofdmbw205ghpo, 0); 522 + &sprom->legofdmbw205ghpo, 0, fallback); 461 523 nvram_read_u32(prefix, NULL, "legofdmbw20ul5ghpo", 462 - &sprom->legofdmbw20ul5ghpo, 0); 463 - nvram_read_u32(prefix, NULL, "mcsbw202gpo", &sprom->mcsbw202gpo, 0); 464 - nvram_read_u32(prefix, NULL, "mcsbw20ul2gpo", &sprom->mcsbw20ul2gpo, 0); 465 - nvram_read_u32(prefix, NULL, "mcsbw402gpo", &sprom->mcsbw402gpo, 0); 466 - nvram_read_u32(prefix, NULL, "mcsbw205glpo", &sprom->mcsbw205glpo, 0); 524 + &sprom->legofdmbw20ul5ghpo, 0, fallback); 525 + nvram_read_u32(prefix, NULL, "mcsbw202gpo", &sprom->mcsbw202gpo, 0, 526 + fallback); 527 + nvram_read_u32(prefix, NULL, "mcsbw20ul2gpo", &sprom->mcsbw20ul2gpo, 0, 528 + fallback); 529 + nvram_read_u32(prefix, NULL, "mcsbw402gpo", &sprom->mcsbw402gpo, 0, 530 + fallback); 531 + nvram_read_u32(prefix, NULL, "mcsbw205glpo", &sprom->mcsbw205glpo, 0, 532 + fallback); 467 533 nvram_read_u32(prefix, NULL, "mcsbw20ul5glpo", 468 - &sprom->mcsbw20ul5glpo, 0); 469 - nvram_read_u32(prefix, NULL, "mcsbw405glpo", &sprom->mcsbw405glpo, 0); 470 - nvram_read_u32(prefix, NULL, "mcsbw205gmpo", &sprom->mcsbw205gmpo, 0); 534 + &sprom->mcsbw20ul5glpo, 0, fallback); 535 + nvram_read_u32(prefix, NULL, "mcsbw405glpo", &sprom->mcsbw405glpo, 0, 536 + fallback); 537 + nvram_read_u32(prefix, NULL, "mcsbw205gmpo", &sprom->mcsbw205gmpo, 0, 538 + fallback); 471 539 nvram_read_u32(prefix, NULL, "mcsbw20ul5gmpo", 472 - &sprom->mcsbw20ul5gmpo, 0); 473 - nvram_read_u32(prefix, NULL, "mcsbw405gmpo", &sprom->mcsbw405gmpo, 0); 474 - nvram_read_u32(prefix, NULL, "mcsbw205ghpo", &sprom->mcsbw205ghpo, 0); 540 + &sprom->mcsbw20ul5gmpo, 0, fallback); 541 + nvram_read_u32(prefix, NULL, "mcsbw405gmpo", &sprom->mcsbw405gmpo, 0, 542 + fallback); 543 + nvram_read_u32(prefix, NULL, "mcsbw205ghpo", &sprom->mcsbw205ghpo, 0, 544 + fallback); 475 545 nvram_read_u32(prefix, NULL, "mcsbw20ul5ghpo", 476 - &sprom->mcsbw20ul5ghpo, 0); 477 - nvram_read_u32(prefix, NULL, "mcsbw405ghpo", &sprom->mcsbw405ghpo, 0); 478 - nvram_read_u16(prefix, NULL, "mcs32po", &sprom->mcs32po, 0); 546 + &sprom->mcsbw20ul5ghpo, 0, fallback); 547 + nvram_read_u32(prefix, NULL, "mcsbw405ghpo", &sprom->mcsbw405ghpo, 0, 548 + fallback); 549 + nvram_read_u16(prefix, NULL, "mcs32po", &sprom->mcs32po, 0, fallback); 479 550 nvram_read_u16(prefix, NULL, "legofdm40duppo", 480 - &sprom->legofdm40duppo, 0); 481 - nvram_read_u8(prefix, NULL, "sar2g", &sprom->sar2g, 0); 482 - nvram_read_u8(prefix, NULL, "sar5g", &sprom->sar5g, 0); 551 + &sprom->legofdm40duppo, 0, fallback); 552 + nvram_read_u8(prefix, NULL, "sar2g", &sprom->sar2g, 0, fallback); 553 + nvram_read_u8(prefix, NULL, "sar5g", &sprom->sar5g, 0, fallback); 483 554 } 484 555 485 556 static void bcm47xx_fill_sprom_path_r4589(struct ssb_sprom *sprom, 486 - const char *prefix) 557 + const char *prefix, bool fallback) 487 558 { 488 559 char postfix[2]; 489 560 int i; ··· 572 483 struct ssb_sprom_core_pwr_info *pwr_info = &sprom->core_pwr_info[i]; 573 484 snprintf(postfix, sizeof(postfix), "%i", i); 574 485 nvram_read_u8(prefix, postfix, "maxp2ga", 575 - &pwr_info->maxpwr_2g, 0); 486 + &pwr_info->maxpwr_2g, 0, fallback); 576 487 nvram_read_u8(prefix, postfix, "itt2ga", 577 - &pwr_info->itssi_2g, 0); 488 + &pwr_info->itssi_2g, 0, fallback); 578 489 nvram_read_u8(prefix, postfix, "itt5ga", 579 - &pwr_info->itssi_5g, 0); 490 + &pwr_info->itssi_5g, 0, fallback); 580 491 nvram_read_u16(prefix, postfix, "pa2gw0a", 581 - &pwr_info->pa_2g[0], 0); 492 + &pwr_info->pa_2g[0], 0, fallback); 582 493 nvram_read_u16(prefix, postfix, "pa2gw1a", 583 - &pwr_info->pa_2g[1], 0); 494 + &pwr_info->pa_2g[1], 0, fallback); 584 495 nvram_read_u16(prefix, postfix, "pa2gw2a", 585 - &pwr_info->pa_2g[2], 0); 496 + &pwr_info->pa_2g[2], 0, fallback); 586 497 nvram_read_u8(prefix, postfix, "maxp5ga", 587 - &pwr_info->maxpwr_5g, 0); 498 + &pwr_info->maxpwr_5g, 0, fallback); 588 499 nvram_read_u8(prefix, postfix, "maxp5gha", 589 - &pwr_info->maxpwr_5gh, 0); 500 + &pwr_info->maxpwr_5gh, 0, fallback); 590 501 nvram_read_u8(prefix, postfix, "maxp5gla", 591 - &pwr_info->maxpwr_5gl, 0); 502 + &pwr_info->maxpwr_5gl, 0, fallback); 592 503 nvram_read_u16(prefix, postfix, "pa5gw0a", 593 - &pwr_info->pa_5g[0], 0); 504 + &pwr_info->pa_5g[0], 0, fallback); 594 505 nvram_read_u16(prefix, postfix, "pa5gw1a", 595 - &pwr_info->pa_5g[1], 0); 506 + &pwr_info->pa_5g[1], 0, fallback); 596 507 nvram_read_u16(prefix, postfix, "pa5gw2a", 597 - &pwr_info->pa_5g[2], 0); 508 + &pwr_info->pa_5g[2], 0, fallback); 598 509 nvram_read_u16(prefix, postfix, "pa5glw0a", 599 - &pwr_info->pa_5gl[0], 0); 510 + &pwr_info->pa_5gl[0], 0, fallback); 600 511 nvram_read_u16(prefix, postfix, "pa5glw1a", 601 - &pwr_info->pa_5gl[1], 0); 512 + &pwr_info->pa_5gl[1], 0, fallback); 602 513 nvram_read_u16(prefix, postfix, "pa5glw2a", 603 - &pwr_info->pa_5gl[2], 0); 514 + &pwr_info->pa_5gl[2], 0, fallback); 604 515 nvram_read_u16(prefix, postfix, "pa5ghw0a", 605 - &pwr_info->pa_5gh[0], 0); 516 + &pwr_info->pa_5gh[0], 0, fallback); 606 517 nvram_read_u16(prefix, postfix, "pa5ghw1a", 607 - &pwr_info->pa_5gh[1], 0); 518 + &pwr_info->pa_5gh[1], 0, fallback); 608 519 nvram_read_u16(prefix, postfix, "pa5ghw2a", 609 - &pwr_info->pa_5gh[2], 0); 520 + &pwr_info->pa_5gh[2], 0, fallback); 610 521 } 611 522 } 612 523 613 524 static void bcm47xx_fill_sprom_path_r45(struct ssb_sprom *sprom, 614 - const char *prefix) 525 + const char *prefix, bool fallback) 615 526 { 616 527 char postfix[2]; 617 528 int i; ··· 620 531 struct ssb_sprom_core_pwr_info *pwr_info = &sprom->core_pwr_info[i]; 621 532 snprintf(postfix, sizeof(postfix), "%i", i); 622 533 nvram_read_u16(prefix, postfix, "pa2gw3a", 623 - &pwr_info->pa_2g[3], 0); 534 + &pwr_info->pa_2g[3], 0, fallback); 624 535 nvram_read_u16(prefix, postfix, "pa5gw3a", 625 - &pwr_info->pa_5g[3], 0); 536 + &pwr_info->pa_5g[3], 0, fallback); 626 537 nvram_read_u16(prefix, postfix, "pa5glw3a", 627 - &pwr_info->pa_5gl[3], 0); 538 + &pwr_info->pa_5gl[3], 0, fallback); 628 539 nvram_read_u16(prefix, postfix, "pa5ghw3a", 629 - &pwr_info->pa_5gh[3], 0); 540 + &pwr_info->pa_5gh[3], 0, fallback); 630 541 } 631 542 } 632 543 633 - void bcm47xx_fill_sprom_ethernet(struct ssb_sprom *sprom, const char *prefix) 544 + static void bcm47xx_fill_sprom_ethernet(struct ssb_sprom *sprom, 545 + const char *prefix, bool fallback) 634 546 { 635 - nvram_read_macaddr(prefix, "et0macaddr", &sprom->et0mac); 636 - nvram_read_u8(prefix, NULL, "et0mdcport", &sprom->et0mdcport, 0); 637 - nvram_read_u8(prefix, NULL, "et0phyaddr", &sprom->et0phyaddr, 0); 547 + nvram_read_macaddr(prefix, "et0macaddr", &sprom->et0mac, fallback); 548 + nvram_read_u8(prefix, NULL, "et0mdcport", &sprom->et0mdcport, 0, 549 + fallback); 550 + nvram_read_u8(prefix, NULL, "et0phyaddr", &sprom->et0phyaddr, 0, 551 + fallback); 638 552 639 - nvram_read_macaddr(prefix, "et1macaddr", &sprom->et1mac); 640 - nvram_read_u8(prefix, NULL, "et1mdcport", &sprom->et1mdcport, 0); 641 - nvram_read_u8(prefix, NULL, "et1phyaddr", &sprom->et1phyaddr, 0); 553 + nvram_read_macaddr(prefix, "et1macaddr", &sprom->et1mac, fallback); 554 + nvram_read_u8(prefix, NULL, "et1mdcport", &sprom->et1mdcport, 0, 555 + fallback); 556 + nvram_read_u8(prefix, NULL, "et1phyaddr", &sprom->et1phyaddr, 0, 557 + fallback); 642 558 643 - nvram_read_macaddr(prefix, "macaddr", &sprom->il0mac); 644 - nvram_read_macaddr(prefix, "il0macaddr", &sprom->il0mac); 559 + nvram_read_macaddr(prefix, "macaddr", &sprom->il0mac, fallback); 560 + nvram_read_macaddr(prefix, "il0macaddr", &sprom->il0mac, fallback); 645 561 } 646 562 647 - void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix) 563 + static void bcm47xx_fill_board_data(struct ssb_sprom *sprom, const char *prefix, 564 + bool fallback) 648 565 { 649 - bcm47xx_fill_sprom_ethernet(sprom, prefix); 566 + nvram_read_u16(prefix, NULL, "boardrev", &sprom->board_rev, 0, 567 + fallback); 568 + nvram_read_u16(prefix, NULL, "boardnum", &sprom->board_num, 0, 569 + fallback); 570 + nvram_read_u16(prefix, NULL, "boardtype", &sprom->board_type, 0, 571 + fallback); 572 + nvram_read_u32_2(prefix, "boardflags", &sprom->boardflags_lo, 573 + &sprom->boardflags_hi, fallback); 574 + nvram_read_u32_2(prefix, "boardflags2", &sprom->boardflags2_lo, 575 + &sprom->boardflags2_hi, fallback); 576 + } 650 577 651 - nvram_read_u8(prefix, NULL, "sromrev", &sprom->revision, 0); 578 + void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix, 579 + bool fallback) 580 + { 581 + bcm47xx_fill_sprom_ethernet(sprom, prefix, fallback); 582 + bcm47xx_fill_board_data(sprom, prefix, fallback); 583 + 584 + nvram_read_u8(prefix, NULL, "sromrev", &sprom->revision, 0, fallback); 652 585 653 586 switch (sprom->revision) { 654 587 case 1: 655 - bcm47xx_fill_sprom_r1234589(sprom, prefix); 656 - bcm47xx_fill_sprom_r12389(sprom, prefix); 657 - bcm47xx_fill_sprom_r1(sprom, prefix); 588 + bcm47xx_fill_sprom_r1234589(sprom, prefix, fallback); 589 + bcm47xx_fill_sprom_r12389(sprom, prefix, fallback); 590 + bcm47xx_fill_sprom_r1(sprom, prefix, fallback); 658 591 break; 659 592 case 2: 660 - bcm47xx_fill_sprom_r1234589(sprom, prefix); 661 - bcm47xx_fill_sprom_r12389(sprom, prefix); 662 - bcm47xx_fill_sprom_r2389(sprom, prefix); 663 - bcm47xx_fill_sprom_r2(sprom, prefix); 593 + bcm47xx_fill_sprom_r1234589(sprom, prefix, fallback); 594 + bcm47xx_fill_sprom_r12389(sprom, prefix, fallback); 595 + bcm47xx_fill_sprom_r2389(sprom, prefix, fallback); 664 596 break; 665 597 case 3: 666 - bcm47xx_fill_sprom_r1234589(sprom, prefix); 667 - bcm47xx_fill_sprom_r12389(sprom, prefix); 668 - bcm47xx_fill_sprom_r2389(sprom, prefix); 669 - bcm47xx_fill_sprom_r389(sprom, prefix); 670 - bcm47xx_fill_sprom_r3(sprom, prefix); 598 + bcm47xx_fill_sprom_r1234589(sprom, prefix, fallback); 599 + bcm47xx_fill_sprom_r12389(sprom, prefix, fallback); 600 + bcm47xx_fill_sprom_r2389(sprom, prefix, fallback); 601 + bcm47xx_fill_sprom_r389(sprom, prefix, fallback); 602 + bcm47xx_fill_sprom_r3(sprom, prefix, fallback); 671 603 break; 672 604 case 4: 673 605 case 5: 674 - bcm47xx_fill_sprom_r1234589(sprom, prefix); 675 - bcm47xx_fill_sprom_r4589(sprom, prefix); 676 - bcm47xx_fill_sprom_r458(sprom, prefix); 677 - bcm47xx_fill_sprom_r45(sprom, prefix); 678 - bcm47xx_fill_sprom_path_r4589(sprom, prefix); 679 - bcm47xx_fill_sprom_path_r45(sprom, prefix); 606 + bcm47xx_fill_sprom_r1234589(sprom, prefix, fallback); 607 + bcm47xx_fill_sprom_r4589(sprom, prefix, fallback); 608 + bcm47xx_fill_sprom_r458(sprom, prefix, fallback); 609 + bcm47xx_fill_sprom_r45(sprom, prefix, fallback); 610 + bcm47xx_fill_sprom_path_r4589(sprom, prefix, fallback); 611 + bcm47xx_fill_sprom_path_r45(sprom, prefix, fallback); 680 612 break; 681 613 case 8: 682 - bcm47xx_fill_sprom_r1234589(sprom, prefix); 683 - bcm47xx_fill_sprom_r12389(sprom, prefix); 684 - bcm47xx_fill_sprom_r2389(sprom, prefix); 685 - bcm47xx_fill_sprom_r389(sprom, prefix); 686 - bcm47xx_fill_sprom_r4589(sprom, prefix); 687 - bcm47xx_fill_sprom_r458(sprom, prefix); 688 - bcm47xx_fill_sprom_r89(sprom, prefix); 689 - bcm47xx_fill_sprom_path_r4589(sprom, prefix); 614 + bcm47xx_fill_sprom_r1234589(sprom, prefix, fallback); 615 + bcm47xx_fill_sprom_r12389(sprom, prefix, fallback); 616 + bcm47xx_fill_sprom_r2389(sprom, prefix, fallback); 617 + bcm47xx_fill_sprom_r389(sprom, prefix, fallback); 618 + bcm47xx_fill_sprom_r4589(sprom, prefix, fallback); 619 + bcm47xx_fill_sprom_r458(sprom, prefix, fallback); 620 + bcm47xx_fill_sprom_r89(sprom, prefix, fallback); 621 + bcm47xx_fill_sprom_path_r4589(sprom, prefix, fallback); 690 622 break; 691 623 case 9: 692 - bcm47xx_fill_sprom_r1234589(sprom, prefix); 693 - bcm47xx_fill_sprom_r12389(sprom, prefix); 694 - bcm47xx_fill_sprom_r2389(sprom, prefix); 695 - bcm47xx_fill_sprom_r389(sprom, prefix); 696 - bcm47xx_fill_sprom_r4589(sprom, prefix); 697 - bcm47xx_fill_sprom_r89(sprom, prefix); 698 - bcm47xx_fill_sprom_r9(sprom, prefix); 699 - bcm47xx_fill_sprom_path_r4589(sprom, prefix); 624 + bcm47xx_fill_sprom_r1234589(sprom, prefix, fallback); 625 + bcm47xx_fill_sprom_r12389(sprom, prefix, fallback); 626 + bcm47xx_fill_sprom_r2389(sprom, prefix, fallback); 627 + bcm47xx_fill_sprom_r389(sprom, prefix, fallback); 628 + bcm47xx_fill_sprom_r4589(sprom, prefix, fallback); 629 + bcm47xx_fill_sprom_r89(sprom, prefix, fallback); 630 + bcm47xx_fill_sprom_r9(sprom, prefix, fallback); 631 + bcm47xx_fill_sprom_path_r4589(sprom, prefix, fallback); 700 632 break; 701 633 default: 702 634 pr_warn("Unsupported SPROM revision %d detected. Will extract" 703 635 " v1\n", sprom->revision); 704 636 sprom->revision = 1; 705 - bcm47xx_fill_sprom_r1234589(sprom, prefix); 706 - bcm47xx_fill_sprom_r12389(sprom, prefix); 707 - bcm47xx_fill_sprom_r1(sprom, prefix); 637 + bcm47xx_fill_sprom_r1234589(sprom, prefix, fallback); 638 + bcm47xx_fill_sprom_r12389(sprom, prefix, fallback); 639 + bcm47xx_fill_sprom_r1(sprom, prefix, fallback); 708 640 } 709 641 } 710 642 ··· 733 623 void bcm47xx_fill_ssb_boardinfo(struct ssb_boardinfo *boardinfo, 734 624 const char *prefix) 735 625 { 736 - nvram_read_u16(prefix, NULL, "boardvendor", &boardinfo->vendor, 0); 626 + nvram_read_u16(prefix, NULL, "boardvendor", &boardinfo->vendor, 0, 627 + true); 737 628 if (!boardinfo->vendor) 738 629 boardinfo->vendor = SSB_BOARDVENDOR_BCM; 739 630 740 - nvram_read_u16(prefix, NULL, "boardtype", &boardinfo->type, 0); 631 + nvram_read_u16(prefix, NULL, "boardtype", &boardinfo->type, 0, true); 741 632 } 742 633 #endif 743 634 ··· 746 635 void bcm47xx_fill_bcma_boardinfo(struct bcma_boardinfo *boardinfo, 747 636 const char *prefix) 748 637 { 749 - nvram_read_u16(prefix, NULL, "boardvendor", &boardinfo->vendor, 0); 638 + nvram_read_u16(prefix, NULL, "boardvendor", &boardinfo->vendor, 0, 639 + true); 750 640 if (!boardinfo->vendor) 751 641 boardinfo->vendor = SSB_BOARDVENDOR_BCM; 752 642 753 - nvram_read_u16(prefix, NULL, "boardtype", &boardinfo->type, 0); 643 + nvram_read_u16(prefix, NULL, "boardtype", &boardinfo->type, 0, true); 754 644 } 755 645 #endif
+6 -2
arch/mips/bcm47xx/wgt634u.c
··· 11 11 #include <linux/leds.h> 12 12 #include <linux/mtd/physmap.h> 13 13 #include <linux/ssb/ssb.h> 14 + #include <linux/ssb/ssb_embedded.h> 14 15 #include <linux/interrupt.h> 15 16 #include <linux/reboot.h> 16 17 #include <linux/gpio.h> ··· 117 116 118 117 /* Interrupt are level triggered, revert the interrupt polarity 119 118 to clear the interrupt. */ 120 - gpio_polarity(WGT634U_GPIO_RESET, state); 119 + ssb_gpio_polarity(&bcm47xx_bus.ssb, 1 << WGT634U_GPIO_RESET, 120 + state ? 1 << WGT634U_GPIO_RESET : 0); 121 121 122 122 if (!state) { 123 123 printk(KERN_INFO "Reset button pressed"); ··· 152 150 gpio_interrupt, IRQF_SHARED, 153 151 "WGT634U GPIO", &bcm47xx_bus.ssb.chipco)) { 154 152 gpio_direction_input(WGT634U_GPIO_RESET); 155 - gpio_intmask(WGT634U_GPIO_RESET, 1); 153 + ssb_gpio_intmask(&bcm47xx_bus.ssb, 154 + 1 << WGT634U_GPIO_RESET, 155 + 1 << WGT634U_GPIO_RESET); 156 156 ssb_chipco_irq_mask(&bcm47xx_bus.ssb.chipco, 157 157 SSB_CHIPCO_IRQ_GPIO, 158 158 SSB_CHIPCO_IRQ_GPIO);
+4 -3
arch/mips/bcm63xx/Makefile
··· 1 - obj-y += clk.o cpu.o cs.o gpio.o irq.o prom.o setup.o timer.o \ 2 - dev-dsp.o dev-enet.o dev-flash.o dev-pcmcia.o dev-rng.o \ 3 - dev-spi.o dev-uart.o dev-wdt.o dev-usb-usbd.o 1 + obj-y += clk.o cpu.o cs.o gpio.o irq.o nvram.o prom.o reset.o \ 2 + setup.o timer.o dev-dsp.o dev-enet.o dev-flash.o \ 3 + dev-pcmcia.o dev-rng.o dev-spi.o dev-uart.o dev-wdt.o \ 4 + dev-usb-usbd.o 4 5 obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 5 6 6 7 obj-y += boards/
+11 -60
arch/mips/bcm63xx/boards/board_bcm963xx.c
··· 18 18 #include <bcm63xx_dev_uart.h> 19 19 #include <bcm63xx_regs.h> 20 20 #include <bcm63xx_io.h> 21 + #include <bcm63xx_nvram.h> 21 22 #include <bcm63xx_dev_pci.h> 22 23 #include <bcm63xx_dev_enet.h> 23 24 #include <bcm63xx_dev_dsp.h> ··· 30 29 31 30 #define PFX "board_bcm963xx: " 32 31 33 - static struct bcm963xx_nvram nvram; 34 - static unsigned int mac_addr_used; 35 32 static struct board_info board; 36 33 37 34 /* ··· 715 716 } 716 717 717 718 /* 718 - * register & return a new board mac address 719 - */ 720 - static int board_get_mac_address(u8 *mac) 721 - { 722 - u8 *oui; 723 - int count; 724 - 725 - if (mac_addr_used >= nvram.mac_addr_count) { 726 - printk(KERN_ERR PFX "not enough mac address\n"); 727 - return -ENODEV; 728 - } 729 - 730 - memcpy(mac, nvram.mac_addr_base, ETH_ALEN); 731 - oui = mac + ETH_ALEN/2 - 1; 732 - count = mac_addr_used; 733 - 734 - while (count--) { 735 - u8 *p = mac + ETH_ALEN - 1; 736 - 737 - do { 738 - (*p)++; 739 - if (*p != 0) 740 - break; 741 - p--; 742 - } while (p != oui); 743 - 744 - if (p == oui) { 745 - printk(KERN_ERR PFX "unable to fetch mac address\n"); 746 - return -ENODEV; 747 - } 748 - } 749 - 750 - mac_addr_used++; 751 - return 0; 752 - } 753 - 754 - /* 755 719 * early init callback, read nvram data from flash and checksum it 756 720 */ 757 721 void __init board_prom_init(void) 758 722 { 759 - unsigned int check_len, i; 760 - u8 *boot_addr, *cfe, *p; 723 + unsigned int i; 724 + u8 *boot_addr, *cfe; 761 725 char cfe_version[32]; 726 + char *board_name; 762 727 u32 val; 763 728 764 729 /* read base address of boot chip select (0) ··· 745 782 strcpy(cfe_version, "unknown"); 746 783 printk(KERN_INFO PFX "CFE version: %s\n", cfe_version); 747 784 748 - /* extract nvram data */ 749 - memcpy(&nvram, boot_addr + BCM963XX_NVRAM_OFFSET, sizeof(nvram)); 750 - 751 - /* check checksum before using data */ 752 - if (nvram.version <= 4) 753 - check_len = offsetof(struct bcm963xx_nvram, checksum_old); 754 - else 755 - check_len = sizeof(nvram); 756 - val = 0; 757 - p = (u8 *)&nvram; 758 - while (check_len--) 759 - val += *p; 760 - if (val) { 785 + if (bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET)) { 761 786 printk(KERN_ERR PFX "invalid nvram checksum\n"); 762 787 return; 763 788 } 764 789 790 + board_name = bcm63xx_nvram_get_name(); 765 791 /* find board by name */ 766 792 for (i = 0; i < ARRAY_SIZE(bcm963xx_boards); i++) { 767 - if (strncmp(nvram.name, bcm963xx_boards[i]->name, 768 - sizeof(nvram.name))) 793 + if (strncmp(board_name, bcm963xx_boards[i]->name, 16)) 769 794 continue; 770 795 /* copy, board desc array is marked initdata */ 771 796 memcpy(&board, bcm963xx_boards[i], sizeof(board)); ··· 763 812 /* bail out if board is not found, will complain later */ 764 813 if (!board.name[0]) { 765 814 char name[17]; 766 - memcpy(name, nvram.name, 16); 815 + memcpy(name, board_name, 16); 767 816 name[16] = 0; 768 817 printk(KERN_ERR PFX "unknown bcm963xx board: %s\n", 769 818 name); ··· 841 890 bcm63xx_pcmcia_register(); 842 891 843 892 if (board.has_enet0 && 844 - !board_get_mac_address(board.enet0.mac_addr)) 893 + !bcm63xx_nvram_get_mac_address(board.enet0.mac_addr)) 845 894 bcm63xx_enet_register(0, &board.enet0); 846 895 847 896 if (board.has_enet1 && 848 - !board_get_mac_address(board.enet1.mac_addr)) 897 + !bcm63xx_nvram_get_mac_address(board.enet1.mac_addr)) 849 898 bcm63xx_enet_register(1, &board.enet1); 850 899 851 900 if (board.has_usbd) ··· 858 907 * do this after registering enet devices 859 908 */ 860 909 #ifdef CONFIG_SSB_PCIHOST 861 - if (!board_get_mac_address(bcm63xx_sprom.il0mac)) { 910 + if (!bcm63xx_nvram_get_mac_address(bcm63xx_sprom.il0mac)) { 862 911 memcpy(bcm63xx_sprom.et0mac, bcm63xx_sprom.il0mac, ETH_ALEN); 863 912 memcpy(bcm63xx_sprom.et1mac, bcm63xx_sprom.il0mac, ETH_ALEN); 864 913 if (ssb_arch_register_fallback_sprom(
+20 -14
arch/mips/bcm63xx/clk.c
··· 14 14 #include <bcm63xx_cpu.h> 15 15 #include <bcm63xx_io.h> 16 16 #include <bcm63xx_regs.h> 17 + #include <bcm63xx_reset.h> 17 18 #include <bcm63xx_clk.h> 18 19 19 20 static DEFINE_MUTEX(clocks_mutex); ··· 125 124 CKCTL_6368_SWPKT_USB_EN | 126 125 CKCTL_6368_SWPKT_SAR_EN, enable); 127 126 if (enable) { 128 - u32 val; 129 - 130 127 /* reset switch core afer clock change */ 131 - val = bcm_perf_readl(PERF_SOFTRESET_6368_REG); 132 - val &= ~SOFTRESET_6368_ENETSW_MASK; 133 - bcm_perf_writel(val, PERF_SOFTRESET_6368_REG); 128 + bcm63xx_core_set_reset(BCM63XX_RESET_ENETSW, 1); 134 129 msleep(10); 135 - val |= SOFTRESET_6368_ENETSW_MASK; 136 - bcm_perf_writel(val, PERF_SOFTRESET_6368_REG); 130 + bcm63xx_core_set_reset(BCM63XX_RESET_ENETSW, 0); 137 131 msleep(10); 138 132 } 139 133 } ··· 218 222 CKCTL_6368_SWPKT_SAR_EN, enable); 219 223 220 224 if (enable) { 221 - u32 val; 222 - 223 225 /* reset sar core afer clock change */ 224 - val = bcm_perf_readl(PERF_SOFTRESET_6368_REG); 225 - val &= ~SOFTRESET_6368_SAR_MASK; 226 - bcm_perf_writel(val, PERF_SOFTRESET_6368_REG); 226 + bcm63xx_core_set_reset(BCM63XX_RESET_SAR, 1); 227 227 mdelay(1); 228 - val |= SOFTRESET_6368_SAR_MASK; 229 - bcm_perf_writel(val, PERF_SOFTRESET_6368_REG); 228 + bcm63xx_core_set_reset(BCM63XX_RESET_SAR, 0); 230 229 mdelay(1); 231 230 } 232 231 } ··· 241 250 242 251 static struct clk clk_ipsec = { 243 252 .set = ipsec_set, 253 + }; 254 + 255 + /* 256 + * PCIe clock 257 + */ 258 + 259 + static void pcie_set(struct clk *clk, int enable) 260 + { 261 + bcm_hwclock_set(CKCTL_6328_PCIE_EN, enable); 262 + } 263 + 264 + static struct clk clk_pcie = { 265 + .set = pcie_set, 244 266 }; 245 267 246 268 /* ··· 317 313 return &clk_pcm; 318 314 if (BCMCPU_IS_6368() && !strcmp(id, "ipsec")) 319 315 return &clk_ipsec; 316 + if (BCMCPU_IS_6328() && !strcmp(id, "pcie")) 317 + return &clk_pcie; 320 318 return ERR_PTR(-ENOENT); 321 319 } 322 320
+107
arch/mips/bcm63xx/nvram.c
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> 7 + * Copyright (C) 2008 Florian Fainelli <florian@openwrt.org> 8 + * Copyright (C) 2012 Jonas Gorski <jonas.gorski@gmail.com> 9 + */ 10 + 11 + #define pr_fmt(fmt) "bcm63xx_nvram: " fmt 12 + 13 + #include <linux/init.h> 14 + #include <linux/crc32.h> 15 + #include <linux/export.h> 16 + #include <linux/kernel.h> 17 + #include <linux/if_ether.h> 18 + 19 + #include <bcm63xx_nvram.h> 20 + 21 + /* 22 + * nvram structure 23 + */ 24 + struct bcm963xx_nvram { 25 + u32 version; 26 + u8 reserved1[256]; 27 + u8 name[16]; 28 + u32 main_tp_number; 29 + u32 psi_size; 30 + u32 mac_addr_count; 31 + u8 mac_addr_base[ETH_ALEN]; 32 + u8 reserved2[2]; 33 + u32 checksum_old; 34 + u8 reserved3[720]; 35 + u32 checksum_high; 36 + }; 37 + 38 + static struct bcm963xx_nvram nvram; 39 + static int mac_addr_used; 40 + 41 + int __init bcm63xx_nvram_init(void *addr) 42 + { 43 + unsigned int check_len; 44 + u32 crc, expected_crc; 45 + 46 + /* extract nvram data */ 47 + memcpy(&nvram, addr, sizeof(nvram)); 48 + 49 + /* check checksum before using data */ 50 + if (nvram.version <= 4) { 51 + check_len = offsetof(struct bcm963xx_nvram, reserved3); 52 + expected_crc = nvram.checksum_old; 53 + nvram.checksum_old = 0; 54 + } else { 55 + check_len = sizeof(nvram); 56 + expected_crc = nvram.checksum_high; 57 + nvram.checksum_high = 0; 58 + } 59 + 60 + crc = crc32_le(~0, (u8 *)&nvram, check_len); 61 + 62 + if (crc != expected_crc) 63 + return -EINVAL; 64 + 65 + return 0; 66 + } 67 + 68 + u8 *bcm63xx_nvram_get_name(void) 69 + { 70 + return nvram.name; 71 + } 72 + EXPORT_SYMBOL(bcm63xx_nvram_get_name); 73 + 74 + int bcm63xx_nvram_get_mac_address(u8 *mac) 75 + { 76 + u8 *oui; 77 + int count; 78 + 79 + if (mac_addr_used >= nvram.mac_addr_count) { 80 + pr_err("not enough mac addresses\n"); 81 + return -ENODEV; 82 + } 83 + 84 + memcpy(mac, nvram.mac_addr_base, ETH_ALEN); 85 + oui = mac + ETH_ALEN/2 - 1; 86 + count = mac_addr_used; 87 + 88 + while (count--) { 89 + u8 *p = mac + ETH_ALEN - 1; 90 + 91 + do { 92 + (*p)++; 93 + if (*p != 0) 94 + break; 95 + p--; 96 + } while (p != oui); 97 + 98 + if (p == oui) { 99 + pr_err("unable to fetch mac address\n"); 100 + return -ENODEV; 101 + } 102 + } 103 + 104 + mac_addr_used++; 105 + return 0; 106 + } 107 + EXPORT_SYMBOL(bcm63xx_nvram_get_mac_address);
+223
arch/mips/bcm63xx/reset.c
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (C) 2012 Jonas Gorski <jonas.gorski@gmail.com> 7 + */ 8 + 9 + #include <linux/module.h> 10 + #include <linux/mutex.h> 11 + #include <linux/err.h> 12 + #include <linux/clk.h> 13 + #include <linux/delay.h> 14 + #include <bcm63xx_cpu.h> 15 + #include <bcm63xx_io.h> 16 + #include <bcm63xx_regs.h> 17 + #include <bcm63xx_reset.h> 18 + 19 + #define __GEN_RESET_BITS_TABLE(__cpu) \ 20 + [BCM63XX_RESET_SPI] = BCM## __cpu ##_RESET_SPI, \ 21 + [BCM63XX_RESET_ENET] = BCM## __cpu ##_RESET_ENET, \ 22 + [BCM63XX_RESET_USBH] = BCM## __cpu ##_RESET_USBH, \ 23 + [BCM63XX_RESET_USBD] = BCM## __cpu ##_RESET_USBD, \ 24 + [BCM63XX_RESET_DSL] = BCM## __cpu ##_RESET_DSL, \ 25 + [BCM63XX_RESET_SAR] = BCM## __cpu ##_RESET_SAR, \ 26 + [BCM63XX_RESET_EPHY] = BCM## __cpu ##_RESET_EPHY, \ 27 + [BCM63XX_RESET_ENETSW] = BCM## __cpu ##_RESET_ENETSW, \ 28 + [BCM63XX_RESET_PCM] = BCM## __cpu ##_RESET_PCM, \ 29 + [BCM63XX_RESET_MPI] = BCM## __cpu ##_RESET_MPI, \ 30 + [BCM63XX_RESET_PCIE] = BCM## __cpu ##_RESET_PCIE, \ 31 + [BCM63XX_RESET_PCIE_EXT] = BCM## __cpu ##_RESET_PCIE_EXT, 32 + 33 + #define BCM6328_RESET_SPI SOFTRESET_6328_SPI_MASK 34 + #define BCM6328_RESET_ENET 0 35 + #define BCM6328_RESET_USBH SOFTRESET_6328_USBH_MASK 36 + #define BCM6328_RESET_USBD SOFTRESET_6328_USBS_MASK 37 + #define BCM6328_RESET_DSL 0 38 + #define BCM6328_RESET_SAR SOFTRESET_6328_SAR_MASK 39 + #define BCM6328_RESET_EPHY SOFTRESET_6328_EPHY_MASK 40 + #define BCM6328_RESET_ENETSW SOFTRESET_6328_ENETSW_MASK 41 + #define BCM6328_RESET_PCM SOFTRESET_6328_PCM_MASK 42 + #define BCM6328_RESET_MPI 0 43 + #define BCM6328_RESET_PCIE \ 44 + (SOFTRESET_6328_PCIE_MASK | \ 45 + SOFTRESET_6328_PCIE_CORE_MASK | \ 46 + SOFTRESET_6328_PCIE_HARD_MASK) 47 + #define BCM6328_RESET_PCIE_EXT SOFTRESET_6328_PCIE_EXT_MASK 48 + 49 + #define BCM6338_RESET_SPI SOFTRESET_6338_SPI_MASK 50 + #define BCM6338_RESET_ENET SOFTRESET_6338_ENET_MASK 51 + #define BCM6338_RESET_USBH SOFTRESET_6338_USBH_MASK 52 + #define BCM6338_RESET_USBD SOFTRESET_6338_USBS_MASK 53 + #define BCM6338_RESET_DSL SOFTRESET_6338_ADSL_MASK 54 + #define BCM6338_RESET_SAR SOFTRESET_6338_SAR_MASK 55 + #define BCM6338_RESET_EPHY 0 56 + #define BCM6338_RESET_ENETSW 0 57 + #define BCM6338_RESET_PCM 0 58 + #define BCM6338_RESET_MPI 0 59 + #define BCM6338_RESET_PCIE 0 60 + #define BCM6338_RESET_PCIE_EXT 0 61 + 62 + #define BCM6348_RESET_SPI SOFTRESET_6348_SPI_MASK 63 + #define BCM6348_RESET_ENET SOFTRESET_6348_ENET_MASK 64 + #define BCM6348_RESET_USBH SOFTRESET_6348_USBH_MASK 65 + #define BCM6348_RESET_USBD SOFTRESET_6348_USBS_MASK 66 + #define BCM6348_RESET_DSL SOFTRESET_6348_ADSL_MASK 67 + #define BCM6348_RESET_SAR SOFTRESET_6348_SAR_MASK 68 + #define BCM6348_RESET_EPHY 0 69 + #define BCM6348_RESET_ENETSW 0 70 + #define BCM6348_RESET_PCM 0 71 + #define BCM6348_RESET_MPI 0 72 + #define BCM6348_RESET_PCIE 0 73 + #define BCM6348_RESET_PCIE_EXT 0 74 + 75 + #define BCM6358_RESET_SPI SOFTRESET_6358_SPI_MASK 76 + #define BCM6358_RESET_ENET SOFTRESET_6358_ENET_MASK 77 + #define BCM6358_RESET_USBH SOFTRESET_6358_USBH_MASK 78 + #define BCM6358_RESET_USBD 0 79 + #define BCM6358_RESET_DSL SOFTRESET_6358_ADSL_MASK 80 + #define BCM6358_RESET_SAR SOFTRESET_6358_SAR_MASK 81 + #define BCM6358_RESET_EPHY SOFTRESET_6358_EPHY_MASK 82 + #define BCM6358_RESET_ENETSW 0 83 + #define BCM6358_RESET_PCM SOFTRESET_6358_PCM_MASK 84 + #define BCM6358_RESET_MPI SOFTRESET_6358_MPI_MASK 85 + #define BCM6358_RESET_PCIE 0 86 + #define BCM6358_RESET_PCIE_EXT 0 87 + 88 + #define BCM6368_RESET_SPI SOFTRESET_6368_SPI_MASK 89 + #define BCM6368_RESET_ENET 0 90 + #define BCM6368_RESET_USBH SOFTRESET_6368_USBH_MASK 91 + #define BCM6368_RESET_USBD SOFTRESET_6368_USBS_MASK 92 + #define BCM6368_RESET_DSL 0 93 + #define BCM6368_RESET_SAR SOFTRESET_6368_SAR_MASK 94 + #define BCM6368_RESET_EPHY SOFTRESET_6368_EPHY_MASK 95 + #define BCM6368_RESET_ENETSW 0 96 + #define BCM6368_RESET_PCM SOFTRESET_6368_PCM_MASK 97 + #define BCM6368_RESET_MPI SOFTRESET_6368_MPI_MASK 98 + #define BCM6368_RESET_PCIE 0 99 + #define BCM6368_RESET_PCIE_EXT 0 100 + 101 + #ifdef BCMCPU_RUNTIME_DETECT 102 + 103 + /* 104 + * core reset bits 105 + */ 106 + static const u32 bcm6328_reset_bits[] = { 107 + __GEN_RESET_BITS_TABLE(6328) 108 + }; 109 + 110 + static const u32 bcm6338_reset_bits[] = { 111 + __GEN_RESET_BITS_TABLE(6338) 112 + }; 113 + 114 + static const u32 bcm6348_reset_bits[] = { 115 + __GEN_RESET_BITS_TABLE(6348) 116 + }; 117 + 118 + static const u32 bcm6358_reset_bits[] = { 119 + __GEN_RESET_BITS_TABLE(6358) 120 + }; 121 + 122 + static const u32 bcm6368_reset_bits[] = { 123 + __GEN_RESET_BITS_TABLE(6368) 124 + }; 125 + 126 + const u32 *bcm63xx_reset_bits; 127 + static int reset_reg; 128 + 129 + static int __init bcm63xx_reset_bits_init(void) 130 + { 131 + if (BCMCPU_IS_6328()) { 132 + reset_reg = PERF_SOFTRESET_6328_REG; 133 + bcm63xx_reset_bits = bcm6328_reset_bits; 134 + } else if (BCMCPU_IS_6338()) { 135 + reset_reg = PERF_SOFTRESET_REG; 136 + bcm63xx_reset_bits = bcm6338_reset_bits; 137 + } else if (BCMCPU_IS_6348()) { 138 + reset_reg = PERF_SOFTRESET_REG; 139 + bcm63xx_reset_bits = bcm6348_reset_bits; 140 + } else if (BCMCPU_IS_6358()) { 141 + reset_reg = PERF_SOFTRESET_6358_REG; 142 + bcm63xx_reset_bits = bcm6358_reset_bits; 143 + } else if (BCMCPU_IS_6368()) { 144 + reset_reg = PERF_SOFTRESET_6368_REG; 145 + bcm63xx_reset_bits = bcm6368_reset_bits; 146 + } 147 + 148 + return 0; 149 + } 150 + #else 151 + 152 + #ifdef CONFIG_BCM63XX_CPU_6328 153 + static const u32 bcm63xx_reset_bits[] = { 154 + __GEN_RESET_BITS_TABLE(6328) 155 + }; 156 + #define reset_reg PERF_SOFTRESET_6328_REG 157 + #endif 158 + 159 + #ifdef CONFIG_BCM63XX_CPU_6338 160 + static const u32 bcm63xx_reset_bits[] = { 161 + __GEN_RESET_BITS_TABLE(6338) 162 + }; 163 + #define reset_reg PERF_SOFTRESET_REG 164 + #endif 165 + 166 + #ifdef CONFIG_BCM63XX_CPU_6345 167 + static const u32 bcm63xx_reset_bits[] = { }; 168 + #define reset_reg 0 169 + #endif 170 + 171 + #ifdef CONFIG_BCM63XX_CPU_6348 172 + static const u32 bcm63xx_reset_bits[] = { 173 + __GEN_RESET_BITS_TABLE(6348) 174 + }; 175 + #define reset_reg PERF_SOFTRESET_REG 176 + #endif 177 + 178 + #ifdef CONFIG_BCM63XX_CPU_6358 179 + static const u32 bcm63xx_reset_bits[] = { 180 + __GEN_RESET_BITS_TABLE(6358) 181 + }; 182 + #define reset_reg PERF_SOFTRESET_6358_REG 183 + #endif 184 + 185 + #ifdef CONFIG_BCM63XX_CPU_6368 186 + static const u32 bcm63xx_reset_bits[] = { 187 + __GEN_RESET_BITS_TABLE(6368) 188 + }; 189 + #define reset_reg PERF_SOFTRESET_6368_REG 190 + #endif 191 + 192 + static int __init bcm63xx_reset_bits_init(void) { return 0; } 193 + #endif 194 + 195 + static DEFINE_SPINLOCK(reset_mutex); 196 + 197 + static void __bcm63xx_core_set_reset(u32 mask, int enable) 198 + { 199 + unsigned long flags; 200 + u32 val; 201 + 202 + if (!mask) 203 + return; 204 + 205 + spin_lock_irqsave(&reset_mutex, flags); 206 + val = bcm_perf_readl(reset_reg); 207 + 208 + if (enable) 209 + val &= ~mask; 210 + else 211 + val |= mask; 212 + 213 + bcm_perf_writel(val, reset_reg); 214 + spin_unlock_irqrestore(&reset_mutex, flags); 215 + } 216 + 217 + void bcm63xx_core_set_reset(enum bcm63xx_core_reset core, int reset) 218 + { 219 + __bcm63xx_core_set_reset(bcm63xx_reset_bits[core], reset); 220 + } 221 + EXPORT_SYMBOL(bcm63xx_core_set_reset); 222 + 223 + postcore_initcall(bcm63xx_reset_bits_init);
+5
arch/mips/cavium-octeon/executive/cvmx-bootmem.c
··· 688 688 cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock)); 689 689 return addr_allocated; 690 690 } 691 + 692 + struct cvmx_bootmem_desc *cvmx_bootmem_get_desc(void) 693 + { 694 + return cvmx_bootmem_desc; 695 + }
+2 -1
arch/mips/cavium-octeon/flash_setup.c
··· 51 51 flash_map.name = "phys_mapped_flash"; 52 52 flash_map.phys = region_cfg.s.base << 16; 53 53 flash_map.size = 0x1fc00000 - flash_map.phys; 54 - flash_map.bankwidth = 1; 54 + /* 8-bit bus (0 + 1) or 16-bit bus (1 + 1) */ 55 + flash_map.bankwidth = region_cfg.s.width + 1; 55 56 flash_map.virt = ioremap(flash_map.phys, flash_map.size); 56 57 pr_notice("Bootbus flash: Setting flash for %luMB flash at " 57 58 "0x%08llx\n", flash_map.size >> 20, flash_map.phys);
-1
arch/mips/cavium-octeon/octeon-irq.c
··· 1266 1266 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52); 1267 1267 1268 1268 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56); 1269 - octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_BOOTDMA, 0, 63); 1270 1269 1271 1270 /* CIU_1 */ 1272 1271 for (i = 0; i < 16; i++)
-27
arch/mips/cavium-octeon/octeon-memcpy.S
··· 79 79 /* 80 80 * Only on the 64-bit kernel we can made use of 64-bit registers. 81 81 */ 82 - #ifdef CONFIG_64BIT 83 - #define USE_DOUBLE 84 - #endif 85 - 86 - #ifdef USE_DOUBLE 87 82 88 83 #define LOAD ld 89 84 #define LOADL ldl ··· 113 118 #define t5 $13 114 119 #define t6 $14 115 120 #define t7 $15 116 - 117 - #else 118 - 119 - #define LOAD lw 120 - #define LOADL lwl 121 - #define LOADR lwr 122 - #define STOREL swl 123 - #define STORER swr 124 - #define STORE sw 125 - #define ADD addu 126 - #define SUB subu 127 - #define SRL srl 128 - #define SLL sll 129 - #define SRA sra 130 - #define SLLV sllv 131 - #define SRLV srlv 132 - #define NBYTES 4 133 - #define LOG_NBYTES 2 134 - 135 - #endif /* USE_DOUBLE */ 136 121 137 122 #ifdef CONFIG_CPU_LITTLE_ENDIAN 138 123 #define LDFIRST LOADR ··· 370 395 371 396 COPY_BYTE(0) 372 397 COPY_BYTE(1) 373 - #ifdef USE_DOUBLE 374 398 COPY_BYTE(2) 375 399 COPY_BYTE(3) 376 400 COPY_BYTE(4) 377 401 COPY_BYTE(5) 378 - #endif 379 402 EXC( lb t0, NBYTES-2(src), l_exc) 380 403 SUB len, len, 1 381 404 jr ra
-102
arch/mips/cavium-octeon/octeon-platform.c
··· 24 24 #include <asm/octeon/cvmx-helper.h> 25 25 #include <asm/octeon/cvmx-helper-board.h> 26 26 27 - static struct octeon_cf_data octeon_cf_data; 28 - 29 - static int __init octeon_cf_device_init(void) 30 - { 31 - union cvmx_mio_boot_reg_cfgx mio_boot_reg_cfg; 32 - unsigned long base_ptr, region_base, region_size; 33 - struct platform_device *pd; 34 - struct resource cf_resources[3]; 35 - unsigned int num_resources; 36 - int i; 37 - int ret = 0; 38 - 39 - /* Setup octeon-cf platform device if present. */ 40 - base_ptr = 0; 41 - if (octeon_bootinfo->major_version == 1 42 - && octeon_bootinfo->minor_version >= 1) { 43 - if (octeon_bootinfo->compact_flash_common_base_addr) 44 - base_ptr = 45 - octeon_bootinfo->compact_flash_common_base_addr; 46 - } else { 47 - base_ptr = 0x1d000800; 48 - } 49 - 50 - if (!base_ptr) 51 - return ret; 52 - 53 - /* Find CS0 region. */ 54 - for (i = 0; i < 8; i++) { 55 - mio_boot_reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(i)); 56 - region_base = mio_boot_reg_cfg.s.base << 16; 57 - region_size = (mio_boot_reg_cfg.s.size + 1) << 16; 58 - if (mio_boot_reg_cfg.s.en && base_ptr >= region_base 59 - && base_ptr < region_base + region_size) 60 - break; 61 - } 62 - if (i >= 7) { 63 - /* i and i + 1 are CS0 and CS1, both must be less than 8. */ 64 - goto out; 65 - } 66 - octeon_cf_data.base_region = i; 67 - octeon_cf_data.is16bit = mio_boot_reg_cfg.s.width; 68 - octeon_cf_data.base_region_bias = base_ptr - region_base; 69 - memset(cf_resources, 0, sizeof(cf_resources)); 70 - num_resources = 0; 71 - cf_resources[num_resources].flags = IORESOURCE_MEM; 72 - cf_resources[num_resources].start = region_base; 73 - cf_resources[num_resources].end = region_base + region_size - 1; 74 - num_resources++; 75 - 76 - 77 - if (!(base_ptr & 0xfffful)) { 78 - /* 79 - * Boot loader signals availability of DMA (true_ide 80 - * mode) by setting low order bits of base_ptr to 81 - * zero. 82 - */ 83 - 84 - /* Assume that CS1 immediately follows. */ 85 - mio_boot_reg_cfg.u64 = 86 - cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(i + 1)); 87 - region_base = mio_boot_reg_cfg.s.base << 16; 88 - region_size = (mio_boot_reg_cfg.s.size + 1) << 16; 89 - if (!mio_boot_reg_cfg.s.en) 90 - goto out; 91 - 92 - cf_resources[num_resources].flags = IORESOURCE_MEM; 93 - cf_resources[num_resources].start = region_base; 94 - cf_resources[num_resources].end = region_base + region_size - 1; 95 - num_resources++; 96 - 97 - octeon_cf_data.dma_engine = 0; 98 - cf_resources[num_resources].flags = IORESOURCE_IRQ; 99 - cf_resources[num_resources].start = OCTEON_IRQ_BOOTDMA; 100 - cf_resources[num_resources].end = OCTEON_IRQ_BOOTDMA; 101 - num_resources++; 102 - } else { 103 - octeon_cf_data.dma_engine = -1; 104 - } 105 - 106 - pd = platform_device_alloc("pata_octeon_cf", -1); 107 - if (!pd) { 108 - ret = -ENOMEM; 109 - goto out; 110 - } 111 - pd->dev.platform_data = &octeon_cf_data; 112 - 113 - ret = platform_device_add_resources(pd, cf_resources, num_resources); 114 - if (ret) 115 - goto fail; 116 - 117 - ret = platform_device_add(pd); 118 - if (ret) 119 - goto fail; 120 - 121 - return ret; 122 - fail: 123 - platform_device_put(pd); 124 - out: 125 - return ret; 126 - } 127 - device_initcall(octeon_cf_device_init); 128 - 129 27 /* Octeon Random Number Generator. */ 130 28 static int __init octeon_rng_device_init(void) 131 29 {
+356 -18
arch/mips/cavium-octeon/setup.c
··· 4 4 * for more details. 5 5 * 6 6 * Copyright (C) 2004-2007 Cavium Networks 7 - * Copyright (C) 2008 Wind River Systems 7 + * Copyright (C) 2008, 2009 Wind River Systems 8 + * written by Ralf Baechle <ralf@linux-mips.org> 8 9 */ 9 10 #include <linux/init.h> 11 + #include <linux/kernel.h> 10 12 #include <linux/console.h> 11 13 #include <linux/delay.h> 12 14 #include <linux/export.h> ··· 25 23 #include <linux/serial_8250.h> 26 24 #include <linux/of_fdt.h> 27 25 #include <linux/libfdt.h> 26 + #include <linux/kexec.h> 28 27 29 28 #include <asm/processor.h> 30 29 #include <asm/reboot.h> ··· 59 56 struct cvmx_bootinfo *octeon_bootinfo; 60 57 EXPORT_SYMBOL(octeon_bootinfo); 61 58 59 + static unsigned long long RESERVE_LOW_MEM = 0ull; 60 + #ifdef CONFIG_KEXEC 61 + #ifdef CONFIG_SMP 62 + /* 63 + * Wait for relocation code is prepared and send 64 + * secondary CPUs to spin until kernel is relocated. 65 + */ 66 + static void octeon_kexec_smp_down(void *ignored) 67 + { 68 + int cpu = smp_processor_id(); 69 + 70 + local_irq_disable(); 71 + set_cpu_online(cpu, false); 72 + while (!atomic_read(&kexec_ready_to_reboot)) 73 + cpu_relax(); 74 + 75 + asm volatile ( 76 + " sync \n" 77 + " synci ($0) \n"); 78 + 79 + relocated_kexec_smp_wait(NULL); 80 + } 81 + #endif 82 + 83 + #define OCTEON_DDR0_BASE (0x0ULL) 84 + #define OCTEON_DDR0_SIZE (0x010000000ULL) 85 + #define OCTEON_DDR1_BASE (0x410000000ULL) 86 + #define OCTEON_DDR1_SIZE (0x010000000ULL) 87 + #define OCTEON_DDR2_BASE (0x020000000ULL) 88 + #define OCTEON_DDR2_SIZE (0x3e0000000ULL) 89 + #define OCTEON_MAX_PHY_MEM_SIZE (16*1024*1024*1024ULL) 90 + 91 + static struct kimage *kimage_ptr; 92 + 93 + static void kexec_bootmem_init(uint64_t mem_size, uint32_t low_reserved_bytes) 94 + { 95 + int64_t addr; 96 + struct cvmx_bootmem_desc *bootmem_desc; 97 + 98 + bootmem_desc = cvmx_bootmem_get_desc(); 99 + 100 + if (mem_size > OCTEON_MAX_PHY_MEM_SIZE) { 101 + mem_size = OCTEON_MAX_PHY_MEM_SIZE; 102 + pr_err("Error: requested memory too large," 103 + "truncating to maximum size\n"); 104 + } 105 + 106 + bootmem_desc->major_version = CVMX_BOOTMEM_DESC_MAJ_VER; 107 + bootmem_desc->minor_version = CVMX_BOOTMEM_DESC_MIN_VER; 108 + 109 + addr = (OCTEON_DDR0_BASE + RESERVE_LOW_MEM + low_reserved_bytes); 110 + bootmem_desc->head_addr = 0; 111 + 112 + if (mem_size <= OCTEON_DDR0_SIZE) { 113 + __cvmx_bootmem_phy_free(addr, 114 + mem_size - RESERVE_LOW_MEM - 115 + low_reserved_bytes, 0); 116 + return; 117 + } 118 + 119 + __cvmx_bootmem_phy_free(addr, 120 + OCTEON_DDR0_SIZE - RESERVE_LOW_MEM - 121 + low_reserved_bytes, 0); 122 + 123 + mem_size -= OCTEON_DDR0_SIZE; 124 + 125 + if (mem_size > OCTEON_DDR1_SIZE) { 126 + __cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, OCTEON_DDR1_SIZE, 0); 127 + __cvmx_bootmem_phy_free(OCTEON_DDR2_BASE, 128 + mem_size - OCTEON_DDR1_SIZE, 0); 129 + } else 130 + __cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, mem_size, 0); 131 + } 132 + 133 + static int octeon_kexec_prepare(struct kimage *image) 134 + { 135 + int i; 136 + char *bootloader = "kexec"; 137 + 138 + octeon_boot_desc_ptr->argc = 0; 139 + for (i = 0; i < image->nr_segments; i++) { 140 + if (!strncmp(bootloader, (char *)image->segment[i].buf, 141 + strlen(bootloader))) { 142 + /* 143 + * convert command line string to array 144 + * of parameters (as bootloader does). 145 + */ 146 + int argc = 0, offt; 147 + char *str = (char *)image->segment[i].buf; 148 + char *ptr = strchr(str, ' '); 149 + while (ptr && (OCTEON_ARGV_MAX_ARGS > argc)) { 150 + *ptr = '\0'; 151 + if (ptr[1] != ' ') { 152 + offt = (int)(ptr - str + 1); 153 + octeon_boot_desc_ptr->argv[argc] = 154 + image->segment[i].mem + offt; 155 + argc++; 156 + } 157 + ptr = strchr(ptr + 1, ' '); 158 + } 159 + octeon_boot_desc_ptr->argc = argc; 160 + break; 161 + } 162 + } 163 + 164 + /* 165 + * Information about segments will be needed during pre-boot memory 166 + * initialization. 167 + */ 168 + kimage_ptr = image; 169 + return 0; 170 + } 171 + 172 + static void octeon_generic_shutdown(void) 173 + { 174 + int cpu, i; 175 + struct cvmx_bootmem_desc *bootmem_desc; 176 + void *named_block_array_ptr; 177 + 178 + bootmem_desc = cvmx_bootmem_get_desc(); 179 + named_block_array_ptr = 180 + cvmx_phys_to_ptr(bootmem_desc->named_block_array_addr); 181 + 182 + #ifdef CONFIG_SMP 183 + /* disable watchdogs */ 184 + for_each_online_cpu(cpu) 185 + cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0); 186 + #else 187 + cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0); 188 + #endif 189 + if (kimage_ptr != kexec_crash_image) { 190 + memset(named_block_array_ptr, 191 + 0x0, 192 + CVMX_BOOTMEM_NUM_NAMED_BLOCKS * 193 + sizeof(struct cvmx_bootmem_named_block_desc)); 194 + /* 195 + * Mark all memory (except low 0x100000 bytes) as free. 196 + * It is the same thing that bootloader does. 197 + */ 198 + kexec_bootmem_init(octeon_bootinfo->dram_size*1024ULL*1024ULL, 199 + 0x100000); 200 + /* 201 + * Allocate all segments to avoid their corruption during boot. 202 + */ 203 + for (i = 0; i < kimage_ptr->nr_segments; i++) 204 + cvmx_bootmem_alloc_address( 205 + kimage_ptr->segment[i].memsz + 2*PAGE_SIZE, 206 + kimage_ptr->segment[i].mem - PAGE_SIZE, 207 + PAGE_SIZE); 208 + } else { 209 + /* 210 + * Do not mark all memory as free. Free only named sections 211 + * leaving the rest of memory unchanged. 212 + */ 213 + struct cvmx_bootmem_named_block_desc *ptr = 214 + (struct cvmx_bootmem_named_block_desc *) 215 + named_block_array_ptr; 216 + 217 + for (i = 0; i < bootmem_desc->named_block_num_blocks; i++) 218 + if (ptr[i].size) 219 + cvmx_bootmem_free_named(ptr[i].name); 220 + } 221 + kexec_args[2] = 1UL; /* running on octeon_main_processor */ 222 + kexec_args[3] = (unsigned long)octeon_boot_desc_ptr; 223 + #ifdef CONFIG_SMP 224 + secondary_kexec_args[2] = 0UL; /* running on secondary cpu */ 225 + secondary_kexec_args[3] = (unsigned long)octeon_boot_desc_ptr; 226 + #endif 227 + } 228 + 229 + static void octeon_shutdown(void) 230 + { 231 + octeon_generic_shutdown(); 232 + #ifdef CONFIG_SMP 233 + smp_call_function(octeon_kexec_smp_down, NULL, 0); 234 + smp_wmb(); 235 + while (num_online_cpus() > 1) { 236 + cpu_relax(); 237 + mdelay(1); 238 + } 239 + #endif 240 + } 241 + 242 + static void octeon_crash_shutdown(struct pt_regs *regs) 243 + { 244 + octeon_generic_shutdown(); 245 + default_machine_crash_shutdown(regs); 246 + } 247 + 248 + #endif /* CONFIG_KEXEC */ 249 + 62 250 #ifdef CONFIG_CAVIUM_RESERVE32 63 251 uint64_t octeon_reserve32_memory; 64 252 EXPORT_SYMBOL(octeon_reserve32_memory); 253 + #endif 254 + 255 + #ifdef CONFIG_KEXEC 256 + /* crashkernel cmdline parameter is parsed _after_ memory setup 257 + * we also parse it here (workaround for EHB5200) */ 258 + static uint64_t crashk_size, crashk_base; 65 259 #endif 66 260 67 261 static int octeon_uart; ··· 615 415 void __init prom_init(void) 616 416 { 617 417 struct cvmx_sysinfo *sysinfo; 418 + const char *arg; 419 + char *p; 618 420 int i; 619 421 int argc; 620 422 #ifdef CONFIG_CAVIUM_RESERVE32 ··· 768 566 if (octeon_is_simulation()) 769 567 MAX_MEMORY = 64ull << 20; 770 568 569 + arg = strstr(arcs_cmdline, "mem="); 570 + if (arg) { 571 + MAX_MEMORY = memparse(arg + 4, &p); 572 + if (MAX_MEMORY == 0) 573 + MAX_MEMORY = 32ull << 30; 574 + if (*p == '@') 575 + RESERVE_LOW_MEM = memparse(p + 1, &p); 576 + } 577 + 771 578 arcs_cmdline[0] = 0; 772 579 argc = octeon_boot_desc_ptr->argc; 773 580 for (i = 0; i < argc; i++) { ··· 784 573 cvmx_phys_to_ptr(octeon_boot_desc_ptr->argv[i]); 785 574 if ((strncmp(arg, "MEM=", 4) == 0) || 786 575 (strncmp(arg, "mem=", 4) == 0)) { 787 - sscanf(arg + 4, "%llu", &MAX_MEMORY); 788 - MAX_MEMORY <<= 20; 576 + MAX_MEMORY = memparse(arg + 4, &p); 789 577 if (MAX_MEMORY == 0) 790 578 MAX_MEMORY = 32ull << 30; 579 + if (*p == '@') 580 + RESERVE_LOW_MEM = memparse(p + 1, &p); 791 581 } else if (strcmp(arg, "ecc_verbose") == 0) { 792 582 #ifdef CONFIG_CAVIUM_REPORT_SINGLE_BIT_ECC 793 583 __cvmx_interrupt_ecc_report_single_bit_errors = 1; 794 584 pr_notice("Reporting of single bit ECC errors is " 795 585 "turned on\n"); 586 + #endif 587 + #ifdef CONFIG_KEXEC 588 + } else if (strncmp(arg, "crashkernel=", 12) == 0) { 589 + crashk_size = memparse(arg+12, &p); 590 + if (*p == '@') 591 + crashk_base = memparse(p+1, &p); 592 + strcat(arcs_cmdline, " "); 593 + strcat(arcs_cmdline, arg); 594 + /* 595 + * To do: switch parsing to new style, something like: 596 + * parse_crashkernel(arg, sysinfo->system_dram_size, 597 + * &crashk_size, &crashk_base); 598 + */ 796 599 #endif 797 600 } else if (strlen(arcs_cmdline) + strlen(arg) + 1 < 798 601 sizeof(arcs_cmdline) - 1) { ··· 842 617 _machine_restart = octeon_restart; 843 618 _machine_halt = octeon_halt; 844 619 620 + #ifdef CONFIG_KEXEC 621 + _machine_kexec_shutdown = octeon_shutdown; 622 + _machine_crash_shutdown = octeon_crash_shutdown; 623 + _machine_kexec_prepare = octeon_kexec_prepare; 624 + #endif 625 + 845 626 octeon_user_io_init(); 846 627 register_smp_ops(&octeon_smp_ops); 847 628 } 848 629 849 630 /* Exclude a single page from the regions obtained in plat_mem_setup. */ 631 + #ifndef CONFIG_CRASH_DUMP 850 632 static __init void memory_exclude_page(u64 addr, u64 *mem, u64 *size) 851 633 { 852 634 if (addr > *mem && addr < *mem + *size) { ··· 868 636 *size -= PAGE_SIZE; 869 637 } 870 638 } 639 + #endif /* CONFIG_CRASH_DUMP */ 871 640 872 641 void __init plat_mem_setup(void) 873 642 { 874 643 uint64_t mem_alloc_size; 875 644 uint64_t total; 645 + uint64_t crashk_end; 646 + #ifndef CONFIG_CRASH_DUMP 876 647 int64_t memory; 648 + uint64_t kernel_start; 649 + uint64_t kernel_size; 650 + #endif 877 651 878 652 total = 0; 653 + crashk_end = 0; 879 654 880 655 /* 881 656 * The Mips memory init uses the first memory location for ··· 895 656 if (mem_alloc_size > MAX_MEMORY) 896 657 mem_alloc_size = MAX_MEMORY; 897 658 659 + /* Crashkernel ignores bootmem list. It relies on mem=X@Y option */ 660 + #ifdef CONFIG_CRASH_DUMP 661 + add_memory_region(RESERVE_LOW_MEM, MAX_MEMORY, BOOT_MEM_RAM); 662 + total += MAX_MEMORY; 663 + #else 664 + #ifdef CONFIG_KEXEC 665 + if (crashk_size > 0) { 666 + add_memory_region(crashk_base, crashk_size, BOOT_MEM_RAM); 667 + crashk_end = crashk_base + crashk_size; 668 + } 669 + #endif 898 670 /* 899 671 * When allocating memory, we want incrementing addresses from 900 672 * bootmem_alloc so the code in add_memory_region can merge ··· 914 664 cvmx_bootmem_lock(); 915 665 while ((boot_mem_map.nr_map < BOOT_MEM_MAP_MAX) 916 666 && (total < MAX_MEMORY)) { 917 - #if defined(CONFIG_64BIT) || defined(CONFIG_64BIT_PHYS_ADDR) 918 667 memory = cvmx_bootmem_phy_alloc(mem_alloc_size, 919 668 __pa_symbol(&__init_end), -1, 920 669 0x100000, 921 670 CVMX_BOOTMEM_FLAG_NO_LOCKING); 922 - #elif defined(CONFIG_HIGHMEM) 923 - memory = cvmx_bootmem_phy_alloc(mem_alloc_size, 0, 1ull << 31, 924 - 0x100000, 925 - CVMX_BOOTMEM_FLAG_NO_LOCKING); 926 - #else 927 - memory = cvmx_bootmem_phy_alloc(mem_alloc_size, 0, 512 << 20, 928 - 0x100000, 929 - CVMX_BOOTMEM_FLAG_NO_LOCKING); 930 - #endif 931 671 if (memory >= 0) { 932 672 u64 size = mem_alloc_size; 673 + #ifdef CONFIG_KEXEC 674 + uint64_t end; 675 + #endif 933 676 934 677 /* 935 678 * exclude a page at the beginning and end of ··· 935 692 memory_exclude_page(CVMX_PCIE_BAR1_PHYS_BASE + 936 693 CVMX_PCIE_BAR1_PHYS_SIZE, 937 694 &memory, &size); 695 + #ifdef CONFIG_KEXEC 696 + end = memory + mem_alloc_size; 938 697 939 698 /* 940 - * This function automatically merges address 941 - * regions next to each other if they are 942 - * received in incrementing order. 699 + * This function automatically merges address regions 700 + * next to each other if they are received in 701 + * incrementing order 943 702 */ 944 - if (size) 945 - add_memory_region(memory, size, BOOT_MEM_RAM); 703 + if (memory < crashk_base && end > crashk_end) { 704 + /* region is fully in */ 705 + add_memory_region(memory, 706 + crashk_base - memory, 707 + BOOT_MEM_RAM); 708 + total += crashk_base - memory; 709 + add_memory_region(crashk_end, 710 + end - crashk_end, 711 + BOOT_MEM_RAM); 712 + total += end - crashk_end; 713 + continue; 714 + } 715 + 716 + if (memory >= crashk_base && end <= crashk_end) 717 + /* 718 + * Entire memory region is within the new 719 + * kernel's memory, ignore it. 720 + */ 721 + continue; 722 + 723 + if (memory > crashk_base && memory < crashk_end && 724 + end > crashk_end) { 725 + /* 726 + * Overlap with the beginning of the region, 727 + * reserve the beginning. 728 + */ 729 + mem_alloc_size -= crashk_end - memory; 730 + memory = crashk_end; 731 + } else if (memory < crashk_base && end > crashk_base && 732 + end < crashk_end) 733 + /* 734 + * Overlap with the beginning of the region, 735 + * chop of end. 736 + */ 737 + mem_alloc_size -= end - crashk_base; 738 + #endif 739 + add_memory_region(memory, mem_alloc_size, BOOT_MEM_RAM); 946 740 total += mem_alloc_size; 741 + /* Recovering mem_alloc_size */ 742 + mem_alloc_size = 4 << 20; 947 743 } else { 948 744 break; 949 745 } 950 746 } 951 747 cvmx_bootmem_unlock(); 748 + /* Add the memory region for the kernel. */ 749 + kernel_start = (unsigned long) _text; 750 + kernel_size = ALIGN(_end - _text, 0x100000); 751 + 752 + /* Adjust for physical offset. */ 753 + kernel_start &= ~0xffffffff80000000ULL; 754 + add_memory_region(kernel_start, kernel_size, BOOT_MEM_RAM); 755 + #endif /* CONFIG_CRASH_DUMP */ 952 756 953 757 #ifdef CONFIG_CAVIUM_RESERVE32 954 758 /* ··· 1111 821 } 1112 822 unflatten_device_tree(); 1113 823 } 824 + 825 + static int __initdata disable_octeon_edac_p; 826 + 827 + static int __init disable_octeon_edac(char *str) 828 + { 829 + disable_octeon_edac_p = 1; 830 + return 0; 831 + } 832 + early_param("disable_octeon_edac", disable_octeon_edac); 833 + 834 + static char *edac_device_names[] = { 835 + "octeon_l2c_edac", 836 + "octeon_pc_edac", 837 + }; 838 + 839 + static int __init edac_devinit(void) 840 + { 841 + struct platform_device *dev; 842 + int i, err = 0; 843 + int num_lmc; 844 + char *name; 845 + 846 + if (disable_octeon_edac_p) 847 + return 0; 848 + 849 + for (i = 0; i < ARRAY_SIZE(edac_device_names); i++) { 850 + name = edac_device_names[i]; 851 + dev = platform_device_register_simple(name, -1, NULL, 0); 852 + if (IS_ERR(dev)) { 853 + pr_err("Registation of %s failed!\n", name); 854 + err = PTR_ERR(dev); 855 + } 856 + } 857 + 858 + num_lmc = OCTEON_IS_MODEL(OCTEON_CN68XX) ? 4 : 859 + (OCTEON_IS_MODEL(OCTEON_CN56XX) ? 2 : 1); 860 + for (i = 0; i < num_lmc; i++) { 861 + dev = platform_device_register_simple("octeon_lmc_edac", 862 + i, NULL, 0); 863 + if (IS_ERR(dev)) { 864 + pr_err("Registation of octeon_lmc_edac %d failed!\n", i); 865 + err = PTR_ERR(dev); 866 + } 867 + } 868 + 869 + return err; 870 + } 871 + device_initcall(edac_devinit);
+111
arch/mips/configs/ath79_defconfig
··· 1 + CONFIG_ATH79=y 2 + CONFIG_ATH79_MACH_AP121=y 3 + CONFIG_ATH79_MACH_AP81=y 4 + CONFIG_ATH79_MACH_DB120=y 5 + CONFIG_ATH79_MACH_PB44=y 6 + CONFIG_ATH79_MACH_UBNT_XM=y 7 + CONFIG_HZ_100=y 8 + # CONFIG_SECCOMP is not set 9 + CONFIG_EXPERIMENTAL=y 10 + # CONFIG_LOCALVERSION_AUTO is not set 11 + CONFIG_SYSVIPC=y 12 + CONFIG_HIGH_RES_TIMERS=y 13 + CONFIG_BLK_DEV_INITRD=y 14 + # CONFIG_RD_GZIP is not set 15 + CONFIG_RD_LZMA=y 16 + # CONFIG_KALLSYMS is not set 17 + # CONFIG_AIO is not set 18 + CONFIG_EMBEDDED=y 19 + # CONFIG_VM_EVENT_COUNTERS is not set 20 + # CONFIG_SLUB_DEBUG is not set 21 + # CONFIG_COMPAT_BRK is not set 22 + CONFIG_MODULES=y 23 + CONFIG_MODULE_UNLOAD=y 24 + # CONFIG_BLK_DEV_BSG is not set 25 + # CONFIG_IOSCHED_CFQ is not set 26 + CONFIG_PCI=y 27 + # CONFIG_SUSPEND is not set 28 + CONFIG_NET=y 29 + CONFIG_PACKET=y 30 + CONFIG_UNIX=y 31 + CONFIG_INET=y 32 + CONFIG_IP_MULTICAST=y 33 + CONFIG_IP_ADVANCED_ROUTER=y 34 + # CONFIG_INET_XFRM_MODE_TRANSPORT is not set 35 + # CONFIG_INET_XFRM_MODE_TUNNEL is not set 36 + # CONFIG_INET_XFRM_MODE_BEET is not set 37 + # CONFIG_INET_LRO is not set 38 + # CONFIG_IPV6 is not set 39 + CONFIG_CFG80211=m 40 + CONFIG_MAC80211=m 41 + CONFIG_MAC80211_DEBUGFS=y 42 + CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 43 + # CONFIG_FIRMWARE_IN_KERNEL is not set 44 + CONFIG_MTD=y 45 + CONFIG_MTD_REDBOOT_PARTS=y 46 + CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-2 47 + CONFIG_MTD_CMDLINE_PARTS=y 48 + CONFIG_MTD_CHAR=y 49 + CONFIG_MTD_BLOCK=y 50 + CONFIG_MTD_CFI=y 51 + CONFIG_MTD_JEDECPROBE=y 52 + CONFIG_MTD_CFI_AMDSTD=y 53 + CONFIG_MTD_COMPLEX_MAPPINGS=y 54 + CONFIG_MTD_PHYSMAP=y 55 + CONFIG_MTD_M25P80=y 56 + # CONFIG_M25PXX_USE_FAST_READ is not set 57 + CONFIG_NETDEVICES=y 58 + # CONFIG_NET_PACKET_ENGINE is not set 59 + CONFIG_ATH_COMMON=m 60 + CONFIG_ATH9K=m 61 + CONFIG_ATH9K_AHB=y 62 + CONFIG_INPUT=m 63 + # CONFIG_INPUT_MOUSEDEV is not set 64 + # CONFIG_KEYBOARD_ATKBD is not set 65 + CONFIG_KEYBOARD_GPIO_POLLED=m 66 + # CONFIG_INPUT_MOUSE is not set 67 + CONFIG_INPUT_MISC=y 68 + # CONFIG_SERIO is not set 69 + # CONFIG_VT is not set 70 + # CONFIG_LEGACY_PTYS is not set 71 + # CONFIG_DEVKMEM is not set 72 + CONFIG_SERIAL_8250=y 73 + CONFIG_SERIAL_8250_CONSOLE=y 74 + # CONFIG_SERIAL_8250_PCI is not set 75 + CONFIG_SERIAL_8250_NR_UARTS=1 76 + CONFIG_SERIAL_8250_RUNTIME_UARTS=1 77 + CONFIG_SERIAL_AR933X=y 78 + CONFIG_SERIAL_AR933X_CONSOLE=y 79 + # CONFIG_HW_RANDOM is not set 80 + CONFIG_I2C=y 81 + # CONFIG_I2C_COMPAT is not set 82 + # CONFIG_I2C_HELPER_AUTO is not set 83 + CONFIG_I2C_GPIO=y 84 + CONFIG_SPI=y 85 + CONFIG_SPI_ATH79=y 86 + CONFIG_SPI_GPIO=y 87 + CONFIG_GPIO_SYSFS=y 88 + CONFIG_GPIO_PCF857X=y 89 + # CONFIG_HWMON is not set 90 + CONFIG_WATCHDOG=y 91 + CONFIG_ATH79_WDT=y 92 + # CONFIG_VGA_ARB is not set 93 + # CONFIG_HID is not set 94 + # CONFIG_USB_HID is not set 95 + CONFIG_USB=y 96 + CONFIG_USB_EHCI_HCD=y 97 + # CONFIG_USB_EHCI_TT_NEWSCHED is not set 98 + CONFIG_USB_OHCI_HCD=y 99 + CONFIG_LEDS_CLASS=y 100 + CONFIG_LEDS_GPIO=y 101 + # CONFIG_IOMMU_SUPPORT is not set 102 + # CONFIG_DNOTIFY is not set 103 + # CONFIG_PROC_PAGE_MONITOR is not set 104 + # CONFIG_ENABLE_MUST_CHECK is not set 105 + CONFIG_STRIP_ASM_SYMS=y 106 + CONFIG_DEBUG_FS=y 107 + # CONFIG_SCHED_DEBUG is not set 108 + # CONFIG_FTRACE is not set 109 + CONFIG_CRYPTO=y 110 + # CONFIG_CRYPTO_ANSI_CPRNG is not set 111 + CONFIG_CRC_ITU_T=m
+81 -17
arch/mips/configs/cavium_octeon_defconfig
··· 1 1 CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD=y 2 + CONFIG_CAVIUM_CN63XXP1=y 2 3 CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE=2 3 4 CONFIG_SPARSEMEM_MANUAL=y 5 + CONFIG_TRANSPARENT_HUGEPAGE=y 4 6 CONFIG_SMP=y 7 + CONFIG_NR_CPUS=32 8 + CONFIG_HZ_100=y 5 9 CONFIG_PREEMPT=y 6 10 CONFIG_EXPERIMENTAL=y 7 11 CONFIG_SYSVIPC=y ··· 15 11 CONFIG_IKCONFIG=y 16 12 CONFIG_IKCONFIG_PROC=y 17 13 CONFIG_LOG_BUF_SHIFT=14 18 - CONFIG_SYSFS_DEPRECATED_V2=y 19 14 CONFIG_RELAY=y 20 15 CONFIG_BLK_DEV_INITRD=y 21 - # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 22 16 CONFIG_EXPERT=y 23 - # CONFIG_PCSPKR_PLATFORM is not set 24 17 CONFIG_SLAB=y 25 18 CONFIG_MODULES=y 26 19 CONFIG_MODULE_UNLOAD=y 27 20 # CONFIG_BLK_DEV_BSG is not set 21 + CONFIG_PCI=y 22 + CONFIG_PCI_MSI=y 28 23 CONFIG_MIPS32_COMPAT=y 29 24 CONFIG_MIPS32_O32=y 30 25 CONFIG_MIPS32_N32=y ··· 45 42 CONFIG_IP_PIMSM_V2=y 46 43 CONFIG_SYN_COOKIES=y 47 44 # CONFIG_INET_LRO is not set 48 - # CONFIG_IPV6 is not set 45 + CONFIG_IPV6=y 49 46 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 50 47 # CONFIG_FW_LOADER is not set 51 48 CONFIG_MTD=y 52 - CONFIG_MTD_PARTITIONS=y 49 + # CONFIG_MTD_OF_PARTS is not set 53 50 CONFIG_MTD_CHAR=y 54 51 CONFIG_MTD_BLOCK=y 55 52 CONFIG_MTD_CFI=y 56 53 CONFIG_MTD_CFI_AMDSTD=y 57 - CONFIG_MTD_PHYSMAP=y 54 + CONFIG_MTD_SLRAM=y 55 + CONFIG_PROC_DEVICETREE=y 58 56 CONFIG_BLK_DEV_LOOP=y 59 - # CONFIG_MISC_DEVICES is not set 57 + CONFIG_EEPROM_AT24=y 58 + CONFIG_EEPROM_AT25=y 59 + CONFIG_BLK_DEV_SD=y 60 + CONFIG_ATA=y 61 + CONFIG_SATA_AHCI=y 62 + CONFIG_PATA_OCTEON_CF=y 63 + CONFIG_SATA_SIL=y 60 64 CONFIG_NETDEVICES=y 61 - CONFIG_NET_ETHERNET=y 62 65 CONFIG_MII=y 63 - # CONFIG_NETDEV_10000 is not set 66 + # CONFIG_NET_VENDOR_3COM is not set 67 + # CONFIG_NET_VENDOR_ADAPTEC is not set 68 + # CONFIG_NET_VENDOR_ALTEON is not set 69 + # CONFIG_NET_VENDOR_AMD is not set 70 + # CONFIG_NET_VENDOR_ATHEROS is not set 71 + # CONFIG_NET_VENDOR_BROADCOM is not set 72 + # CONFIG_NET_VENDOR_BROCADE is not set 73 + # CONFIG_NET_VENDOR_CHELSIO is not set 74 + # CONFIG_NET_VENDOR_CISCO is not set 75 + # CONFIG_NET_VENDOR_DEC is not set 76 + # CONFIG_NET_VENDOR_DLINK is not set 77 + # CONFIG_NET_VENDOR_EMULEX is not set 78 + # CONFIG_NET_VENDOR_EXAR is not set 79 + # CONFIG_NET_VENDOR_HP is not set 80 + # CONFIG_NET_VENDOR_INTEL is not set 81 + # CONFIG_NET_VENDOR_MARVELL is not set 82 + # CONFIG_NET_VENDOR_MELLANOX is not set 83 + # CONFIG_NET_VENDOR_MICREL is not set 84 + # CONFIG_NET_VENDOR_MYRI is not set 85 + # CONFIG_NET_VENDOR_NATSEMI is not set 86 + # CONFIG_NET_VENDOR_NVIDIA is not set 87 + # CONFIG_NET_VENDOR_OKI is not set 88 + # CONFIG_NET_PACKET_ENGINE is not set 89 + # CONFIG_NET_VENDOR_QLOGIC is not set 90 + # CONFIG_NET_VENDOR_REALTEK is not set 91 + # CONFIG_NET_VENDOR_RDC is not set 92 + # CONFIG_NET_VENDOR_SEEQ is not set 93 + # CONFIG_NET_VENDOR_SILAN is not set 94 + # CONFIG_NET_VENDOR_SIS is not set 95 + # CONFIG_NET_VENDOR_SMSC is not set 96 + # CONFIG_NET_VENDOR_STMICRO is not set 97 + # CONFIG_NET_VENDOR_SUN is not set 98 + # CONFIG_NET_VENDOR_TEHUTI is not set 99 + # CONFIG_NET_VENDOR_TI is not set 100 + # CONFIG_NET_VENDOR_TOSHIBA is not set 101 + # CONFIG_NET_VENDOR_VIA is not set 102 + # CONFIG_NET_VENDOR_WIZNET is not set 103 + CONFIG_MARVELL_PHY=y 104 + CONFIG_BROADCOM_PHY=y 105 + CONFIG_BCM87XX_PHY=y 106 + # CONFIG_WLAN is not set 64 107 # CONFIG_INPUT is not set 65 108 # CONFIG_SERIO is not set 66 109 # CONFIG_VT is not set ··· 115 66 CONFIG_SERIAL_8250_NR_UARTS=2 116 67 CONFIG_SERIAL_8250_RUNTIME_UARTS=2 117 68 # CONFIG_HW_RANDOM is not set 69 + CONFIG_I2C=y 70 + CONFIG_I2C_OCTEON=y 71 + CONFIG_SPI=y 72 + CONFIG_SPI_OCTEON=y 118 73 # CONFIG_HWMON is not set 119 74 CONFIG_WATCHDOG=y 120 75 # CONFIG_USB_SUPPORT is not set 76 + CONFIG_RTC_CLASS=y 77 + CONFIG_RTC_DRV_DS1307=y 78 + CONFIG_STAGING=y 79 + CONFIG_OCTEON_ETHERNET=y 80 + # CONFIG_NET_VENDOR_SILICOM is not set 81 + # CONFIG_IOMMU_SUPPORT is not set 82 + CONFIG_EXT4_FS=y 83 + CONFIG_EXT4_FS_POSIX_ACL=y 84 + CONFIG_EXT4_FS_SECURITY=y 85 + CONFIG_MSDOS_FS=y 86 + CONFIG_VFAT_FS=y 121 87 CONFIG_PROC_KCORE=y 122 88 CONFIG_TMPFS=y 123 - # CONFIG_NETWORK_FILESYSTEMS is not set 124 - CONFIG_NLS=y 89 + CONFIG_HUGETLBFS=y 90 + CONFIG_NFS_FS=y 91 + CONFIG_NFS_V4=y 92 + CONFIG_NFS_V4_1=y 93 + CONFIG_ROOT_NFS=y 125 94 CONFIG_NLS_CODEPAGE_437=y 95 + CONFIG_NLS_ASCII=y 126 96 CONFIG_NLS_ISO8859_1=y 97 + CONFIG_NLS_UTF8=y 127 98 CONFIG_MAGIC_SYSRQ=y 128 99 CONFIG_DEBUG_FS=y 129 - CONFIG_DEBUG_KERNEL=y 130 - CONFIG_DEBUG_SPINLOCK=y 131 - CONFIG_DEBUG_SPINLOCK_SLEEP=y 100 + # CONFIG_SCHED_DEBUG is not set 132 101 CONFIG_DEBUG_INFO=y 133 - # CONFIG_RCU_CPU_STALL_DETECTOR is not set 134 - CONFIG_SYSCTL_SYSCALL_CHECK=y 135 - # CONFIG_EARLY_PRINTK is not set 136 102 CONFIG_SECURITY=y 137 103 CONFIG_SECURITY_NETWORK=y 138 104 CONFIG_CRYPTO_CBC=y
-94
arch/mips/configs/yosemite_defconfig
··· 1 - CONFIG_PMC_YOSEMITE=y 2 - CONFIG_HIGHMEM=y 3 - CONFIG_SMP=y 4 - CONFIG_NR_CPUS=2 5 - CONFIG_HZ_1000=y 6 - CONFIG_SYSVIPC=y 7 - CONFIG_IKCONFIG=y 8 - CONFIG_IKCONFIG_PROC=y 9 - CONFIG_LOG_BUF_SHIFT=14 10 - CONFIG_RELAY=y 11 - CONFIG_EXPERT=y 12 - CONFIG_SLAB=y 13 - CONFIG_MODULES=y 14 - CONFIG_MODULE_UNLOAD=y 15 - CONFIG_PCI=y 16 - CONFIG_PM=y 17 - CONFIG_NET=y 18 - CONFIG_PACKET=m 19 - CONFIG_UNIX=y 20 - CONFIG_XFRM_USER=m 21 - CONFIG_INET=y 22 - CONFIG_IP_PNP=y 23 - CONFIG_IP_PNP_BOOTP=y 24 - CONFIG_INET_XFRM_MODE_TRANSPORT=m 25 - CONFIG_INET_XFRM_MODE_TUNNEL=m 26 - CONFIG_INET_XFRM_MODE_BEET=m 27 - CONFIG_IPV6_PRIVACY=y 28 - CONFIG_IPV6_ROUTER_PREF=y 29 - CONFIG_INET6_AH=m 30 - CONFIG_INET6_ESP=m 31 - CONFIG_INET6_IPCOMP=m 32 - CONFIG_IPV6_TUNNEL=m 33 - CONFIG_NETWORK_SECMARK=y 34 - CONFIG_FW_LOADER=m 35 - CONFIG_CONNECTOR=m 36 - CONFIG_CDROM_PKTCDVD=m 37 - CONFIG_ATA_OVER_ETH=m 38 - CONFIG_SGI_IOC4=m 39 - CONFIG_RAID_ATTRS=m 40 - CONFIG_NETDEVICES=y 41 - CONFIG_PHYLIB=m 42 - CONFIG_MARVELL_PHY=m 43 - CONFIG_DAVICOM_PHY=m 44 - CONFIG_QSEMI_PHY=m 45 - CONFIG_LXT_PHY=m 46 - CONFIG_CICADA_PHY=m 47 - CONFIG_VITESSE_PHY=m 48 - CONFIG_SMSC_PHY=m 49 - CONFIG_NET_ETHERNET=y 50 - CONFIG_MII=y 51 - CONFIG_QLA3XXX=m 52 - CONFIG_CHELSIO_T3=m 53 - CONFIG_NETXEN_NIC=m 54 - # CONFIG_INPUT is not set 55 - # CONFIG_SERIO is not set 56 - # CONFIG_VT is not set 57 - CONFIG_SERIAL_8250=y 58 - CONFIG_SERIAL_8250_CONSOLE=y 59 - # CONFIG_HW_RANDOM is not set 60 - # CONFIG_HWMON is not set 61 - CONFIG_FUSE_FS=m 62 - CONFIG_PROC_KCORE=y 63 - CONFIG_TMPFS=y 64 - CONFIG_TMPFS_POSIX_ACL=y 65 - CONFIG_NFS_FS=y 66 - CONFIG_ROOT_NFS=y 67 - CONFIG_DEBUG_KERNEL=y 68 - CONFIG_DEBUG_MUTEXES=y 69 - CONFIG_KEYS=y 70 - CONFIG_KEYS_DEBUG_PROC_KEYS=y 71 - CONFIG_CRYPTO_NULL=m 72 - CONFIG_CRYPTO_ECB=m 73 - CONFIG_CRYPTO_PCBC=m 74 - CONFIG_CRYPTO_HMAC=y 75 - CONFIG_CRYPTO_MD4=m 76 - CONFIG_CRYPTO_MICHAEL_MIC=m 77 - CONFIG_CRYPTO_SHA256=m 78 - CONFIG_CRYPTO_SHA512=m 79 - CONFIG_CRYPTO_TGR192=m 80 - CONFIG_CRYPTO_WP512=m 81 - CONFIG_CRYPTO_ANUBIS=m 82 - CONFIG_CRYPTO_ARC4=m 83 - CONFIG_CRYPTO_BLOWFISH=m 84 - CONFIG_CRYPTO_CAMELLIA=m 85 - CONFIG_CRYPTO_CAST5=m 86 - CONFIG_CRYPTO_CAST6=m 87 - CONFIG_CRYPTO_FCRYPT=m 88 - CONFIG_CRYPTO_KHAZAD=m 89 - CONFIG_CRYPTO_SERPENT=m 90 - CONFIG_CRYPTO_TEA=m 91 - CONFIG_CRYPTO_TWOFISH=m 92 - CONFIG_CRC16=m 93 - CONFIG_CRC32=m 94 - CONFIG_LIBCRC32C=m
+1 -1
arch/mips/fw/sni/Makefile
··· 2 2 # Makefile for the SNI prom monitor routines under Linux. 3 3 # 4 4 5 - lib-$(CONFIG_SNIPROM) += sniprom.o 5 + lib-$(CONFIG_FW_SNIPROM) += sniprom.o
+3 -3
arch/mips/include/asm/cpu.h
··· 243 243 */ 244 244 CPU_R4000PC, CPU_R4000SC, CPU_R4000MC, CPU_R4200, CPU_R4300, CPU_R4310, 245 245 CPU_R4400PC, CPU_R4400SC, CPU_R4400MC, CPU_R4600, CPU_R4640, CPU_R4650, 246 - CPU_R4700, CPU_R5000, CPU_R5000A, CPU_R5500, CPU_NEVADA, CPU_R5432, 247 - CPU_R10000, CPU_R12000, CPU_R14000, CPU_VR41XX, CPU_VR4111, CPU_VR4121, 248 - CPU_VR4122, CPU_VR4131, CPU_VR4133, CPU_VR4181, CPU_VR4181A, CPU_RM7000, 246 + CPU_R4700, CPU_R5000, CPU_R5500, CPU_NEVADA, CPU_R5432, CPU_R10000, 247 + CPU_R12000, CPU_R14000, CPU_VR41XX, CPU_VR4111, CPU_VR4121, CPU_VR4122, 248 + CPU_VR4131, CPU_VR4133, CPU_VR4181, CPU_VR4181A, CPU_RM7000, 249 249 CPU_SR71000, CPU_RM9000, CPU_TX49XX, 250 250 251 251 /*
+4 -4
arch/mips/include/asm/fw/arc/types.h
··· 10 10 #define _ASM_ARC_TYPES_H 11 11 12 12 13 - #ifdef CONFIG_ARC32 13 + #ifdef CONFIG_FW_ARC32 14 14 15 15 typedef char CHAR; 16 16 typedef short SHORT; ··· 33 33 typedef LONG _PULONG; 34 34 typedef LONG _PVOID; 35 35 36 - #endif /* CONFIG_ARC32 */ 36 + #endif /* CONFIG_FW_ARC32 */ 37 37 38 - #ifdef CONFIG_ARC64 38 + #ifdef CONFIG_FW_ARC64 39 39 40 40 typedef char CHAR; 41 41 typedef short SHORT; ··· 57 57 typedef ULONG *_PULONG; 58 58 typedef VOID *_PVOID; 59 59 60 - #endif /* CONFIG_ARC64 */ 60 + #endif /* CONFIG_FW_ARC64 */ 61 61 62 62 typedef CHAR *PCHAR; 63 63 typedef SHORT *PSHORT;
-25
arch/mips/include/asm/hazards.h
··· 161 161 ) 162 162 #define instruction_hazard() do { } while (0) 163 163 164 - #elif defined(CONFIG_CPU_RM9000) 165 - 166 - /* 167 - * RM9000 hazards. When the JTLB is updated by tlbwi or tlbwr, a subsequent 168 - * use of the JTLB for instructions should not occur for 4 cpu cycles and use 169 - * for data translations should not occur for 3 cpu cycles. 170 - */ 171 - 172 - ASMMACRO(mtc0_tlbw_hazard, 173 - _ssnop; _ssnop; _ssnop; _ssnop 174 - ) 175 - ASMMACRO(tlbw_use_hazard, 176 - _ssnop; _ssnop; _ssnop; _ssnop 177 - ) 178 - ASMMACRO(tlb_probe_hazard, 179 - _ssnop; _ssnop; _ssnop; _ssnop 180 - ) 181 - ASMMACRO(irq_enable_hazard, 182 - ) 183 - ASMMACRO(irq_disable_hazard, 184 - ) 185 - ASMMACRO(back_to_back_c0_hazard, 186 - ) 187 - #define instruction_hazard() do { } while (0) 188 - 189 164 #elif defined(CONFIG_CPU_SB1) 190 165 191 166 /*
+24 -3
arch/mips/include/asm/kexec.h
··· 9 9 #ifndef _MIPS_KEXEC 10 10 # define _MIPS_KEXEC 11 11 12 + #include <asm/stacktrace.h> 13 + 12 14 /* Maximum physical address we can use pages from */ 13 15 #define KEXEC_SOURCE_MEMORY_LIMIT (0x20000000) 14 16 /* Maximum address we can reach in physical address mode */ 15 17 #define KEXEC_DESTINATION_MEMORY_LIMIT (0x20000000) 16 18 /* Maximum address we can use for the control code buffer */ 17 19 #define KEXEC_CONTROL_MEMORY_LIMIT (0x20000000) 18 - 19 - #define KEXEC_CONTROL_PAGE_SIZE 4096 20 + /* Reserve 3*4096 bytes for board-specific info */ 21 + #define KEXEC_CONTROL_PAGE_SIZE (4096 + 3*4096) 20 22 21 23 /* The native architecture */ 22 24 #define KEXEC_ARCH KEXEC_ARCH_MIPS 25 + #define MAX_NOTE_BYTES 1024 23 26 24 27 static inline void crash_setup_regs(struct pt_regs *newregs, 25 28 struct pt_regs *oldregs) 26 29 { 27 - /* Dummy implementation for now */ 30 + if (oldregs) 31 + memcpy(newregs, oldregs, sizeof(*newregs)); 32 + else 33 + prepare_frametrace(newregs); 28 34 } 35 + 36 + #ifdef CONFIG_KEXEC 37 + struct kimage; 38 + extern unsigned long kexec_args[4]; 39 + extern int (*_machine_kexec_prepare)(struct kimage *); 40 + extern void (*_machine_kexec_shutdown)(void); 41 + extern void (*_machine_crash_shutdown)(struct pt_regs *regs); 42 + extern void default_machine_crash_shutdown(struct pt_regs *regs); 43 + #ifdef CONFIG_SMP 44 + extern const unsigned char kexec_smp_wait[]; 45 + extern unsigned long secondary_kexec_args[4]; 46 + extern void (*relocated_kexec_smp_wait) (void *); 47 + extern atomic_t kexec_ready_to_reboot; 48 + #endif 49 + #endif 29 50 30 51 #endif /* !_MIPS_KEXEC */
-1
arch/mips/include/asm/mach-ar7/war.h
··· 17 17 #define MIPS4K_ICACHE_REFILL_WAR 0 18 18 #define MIPS_CACHE_SYNC_WAR 0 19 19 #define TX49XX_ICACHE_INDEX_INV_WAR 0 20 - #define RM9000_CDEX_SMP_WAR 0 21 20 #define ICACHE_REFILLS_WORKAROUND_WAR 0 22 21 #define R10000_LLSC_WAR 0 23 22 #define MIPS34K_MISSED_ITLB_WAR 0
-1
arch/mips/include/asm/mach-ath79/war.h
··· 17 17 #define MIPS4K_ICACHE_REFILL_WAR 0 18 18 #define MIPS_CACHE_SYNC_WAR 0 19 19 #define TX49XX_ICACHE_INDEX_INV_WAR 0 20 - #define RM9000_CDEX_SMP_WAR 0 21 20 #define ICACHE_REFILLS_WORKAROUND_WAR 0 22 21 #define R10000_LLSC_WAR 0 23 22 #define MIPS34K_MISSED_ITLB_WAR 0
-1
arch/mips/include/asm/mach-au1x00/war.h
··· 17 17 #define MIPS4K_ICACHE_REFILL_WAR 0 18 18 #define MIPS_CACHE_SYNC_WAR 0 19 19 #define TX49XX_ICACHE_INDEX_INV_WAR 0 20 - #define RM9000_CDEX_SMP_WAR 0 21 20 #define ICACHE_REFILLS_WORKAROUND_WAR 0 22 21 #define R10000_LLSC_WAR 0 23 22 #define MIPS34K_MISSED_ITLB_WAR 0
+2 -2
arch/mips/include/asm/mach-bcm47xx/bcm47xx.h
··· 44 44 extern union bcm47xx_bus bcm47xx_bus; 45 45 extern enum bcm47xx_bus_type bcm47xx_bus_type; 46 46 47 - void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix); 48 - void bcm47xx_fill_sprom_ethernet(struct ssb_sprom *sprom, const char *prefix); 47 + void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix, 48 + bool fallback); 49 49 50 50 #ifdef CONFIG_BCM47XX_SSB 51 51 void bcm47xx_fill_ssb_boardinfo(struct ssb_boardinfo *boardinfo,
+8 -146
arch/mips/include/asm/mach-bcm47xx/gpio.h
··· 1 - /* 2 - * This file is subject to the terms and conditions of the GNU General Public 3 - * License. See the file "COPYING" in the main directory of this archive 4 - * for more details. 5 - * 6 - * Copyright (C) 2007 Aurelien Jarno <aurelien@aurel32.net> 7 - */ 1 + #ifndef __ASM_MIPS_MACH_BCM47XX_GPIO_H 2 + #define __ASM_MIPS_MACH_BCM47XX_GPIO_H 8 3 9 - #ifndef __BCM47XX_GPIO_H 10 - #define __BCM47XX_GPIO_H 4 + #include <asm-generic/gpio.h> 11 5 12 - #include <linux/ssb/ssb_embedded.h> 13 - #include <linux/bcma/bcma.h> 14 - #include <asm/mach-bcm47xx/bcm47xx.h> 6 + #define gpio_get_value __gpio_get_value 7 + #define gpio_set_value __gpio_set_value 15 8 16 - #define BCM47XX_EXTIF_GPIO_LINES 5 17 - #define BCM47XX_CHIPCO_GPIO_LINES 16 9 + #define gpio_cansleep __gpio_cansleep 10 + #define gpio_to_irq __gpio_to_irq 18 11 19 - extern int gpio_request(unsigned gpio, const char *label); 20 - extern void gpio_free(unsigned gpio); 21 - extern int gpio_to_irq(unsigned gpio); 22 - 23 - static inline int gpio_get_value(unsigned gpio) 12 + static inline int irq_to_gpio(unsigned int irq) 24 13 { 25 - switch (bcm47xx_bus_type) { 26 - #ifdef CONFIG_BCM47XX_SSB 27 - case BCM47XX_BUS_TYPE_SSB: 28 - return ssb_gpio_in(&bcm47xx_bus.ssb, 1 << gpio); 29 - #endif 30 - #ifdef CONFIG_BCM47XX_BCMA 31 - case BCM47XX_BUS_TYPE_BCMA: 32 - return bcma_chipco_gpio_in(&bcm47xx_bus.bcma.bus.drv_cc, 33 - 1 << gpio); 34 - #endif 35 - } 36 14 return -EINVAL; 37 15 } 38 16 39 - #define gpio_get_value_cansleep gpio_get_value 40 - 41 - static inline void gpio_set_value(unsigned gpio, int value) 42 - { 43 - switch (bcm47xx_bus_type) { 44 - #ifdef CONFIG_BCM47XX_SSB 45 - case BCM47XX_BUS_TYPE_SSB: 46 - ssb_gpio_out(&bcm47xx_bus.ssb, 1 << gpio, 47 - value ? 1 << gpio : 0); 48 - return; 49 17 #endif 50 - #ifdef CONFIG_BCM47XX_BCMA 51 - case BCM47XX_BUS_TYPE_BCMA: 52 - bcma_chipco_gpio_out(&bcm47xx_bus.bcma.bus.drv_cc, 1 << gpio, 53 - value ? 1 << gpio : 0); 54 - return; 55 - #endif 56 - } 57 - } 58 - 59 - #define gpio_set_value_cansleep gpio_set_value 60 - 61 - static inline int gpio_cansleep(unsigned gpio) 62 - { 63 - return 0; 64 - } 65 - 66 - static inline int gpio_is_valid(unsigned gpio) 67 - { 68 - return gpio < (BCM47XX_EXTIF_GPIO_LINES + BCM47XX_CHIPCO_GPIO_LINES); 69 - } 70 - 71 - 72 - static inline int gpio_direction_input(unsigned gpio) 73 - { 74 - switch (bcm47xx_bus_type) { 75 - #ifdef CONFIG_BCM47XX_SSB 76 - case BCM47XX_BUS_TYPE_SSB: 77 - ssb_gpio_outen(&bcm47xx_bus.ssb, 1 << gpio, 0); 78 - return 0; 79 - #endif 80 - #ifdef CONFIG_BCM47XX_BCMA 81 - case BCM47XX_BUS_TYPE_BCMA: 82 - bcma_chipco_gpio_outen(&bcm47xx_bus.bcma.bus.drv_cc, 1 << gpio, 83 - 0); 84 - return 0; 85 - #endif 86 - } 87 - return -EINVAL; 88 - } 89 - 90 - static inline int gpio_direction_output(unsigned gpio, int value) 91 - { 92 - switch (bcm47xx_bus_type) { 93 - #ifdef CONFIG_BCM47XX_SSB 94 - case BCM47XX_BUS_TYPE_SSB: 95 - /* first set the gpio out value */ 96 - ssb_gpio_out(&bcm47xx_bus.ssb, 1 << gpio, 97 - value ? 1 << gpio : 0); 98 - /* then set the gpio mode */ 99 - ssb_gpio_outen(&bcm47xx_bus.ssb, 1 << gpio, 1 << gpio); 100 - return 0; 101 - #endif 102 - #ifdef CONFIG_BCM47XX_BCMA 103 - case BCM47XX_BUS_TYPE_BCMA: 104 - /* first set the gpio out value */ 105 - bcma_chipco_gpio_out(&bcm47xx_bus.bcma.bus.drv_cc, 1 << gpio, 106 - value ? 1 << gpio : 0); 107 - /* then set the gpio mode */ 108 - bcma_chipco_gpio_outen(&bcm47xx_bus.bcma.bus.drv_cc, 1 << gpio, 109 - 1 << gpio); 110 - return 0; 111 - #endif 112 - } 113 - return -EINVAL; 114 - } 115 - 116 - static inline int gpio_intmask(unsigned gpio, int value) 117 - { 118 - switch (bcm47xx_bus_type) { 119 - #ifdef CONFIG_BCM47XX_SSB 120 - case BCM47XX_BUS_TYPE_SSB: 121 - ssb_gpio_intmask(&bcm47xx_bus.ssb, 1 << gpio, 122 - value ? 1 << gpio : 0); 123 - return 0; 124 - #endif 125 - #ifdef CONFIG_BCM47XX_BCMA 126 - case BCM47XX_BUS_TYPE_BCMA: 127 - bcma_chipco_gpio_intmask(&bcm47xx_bus.bcma.bus.drv_cc, 128 - 1 << gpio, value ? 1 << gpio : 0); 129 - return 0; 130 - #endif 131 - } 132 - return -EINVAL; 133 - } 134 - 135 - static inline int gpio_polarity(unsigned gpio, int value) 136 - { 137 - switch (bcm47xx_bus_type) { 138 - #ifdef CONFIG_BCM47XX_SSB 139 - case BCM47XX_BUS_TYPE_SSB: 140 - ssb_gpio_polarity(&bcm47xx_bus.ssb, 1 << gpio, 141 - value ? 1 << gpio : 0); 142 - return 0; 143 - #endif 144 - #ifdef CONFIG_BCM47XX_BCMA 145 - case BCM47XX_BUS_TYPE_BCMA: 146 - bcma_chipco_gpio_polarity(&bcm47xx_bus.bcma.bus.drv_cc, 147 - 1 << gpio, value ? 1 << gpio : 0); 148 - return 0; 149 - #endif 150 - } 151 - return -EINVAL; 152 - } 153 - 154 - 155 - #endif /* __BCM47XX_GPIO_H */
-1
arch/mips/include/asm/mach-bcm47xx/war.h
··· 17 17 #define MIPS4K_ICACHE_REFILL_WAR 0 18 18 #define MIPS_CACHE_SYNC_WAR 0 19 19 #define TX49XX_ICACHE_INDEX_INV_WAR 0 20 - #define RM9000_CDEX_SMP_WAR 0 21 20 #define ICACHE_REFILLS_WORKAROUND_WAR 0 22 21 #define R10000_LLSC_WAR 0 23 22 #define MIPS34K_MISSED_ITLB_WAR 0
+35
arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h
··· 1 + #ifndef BCM63XX_NVRAM_H 2 + #define BCM63XX_NVRAM_H 3 + 4 + #include <linux/types.h> 5 + 6 + /** 7 + * bcm63xx_nvram_init() - initializes nvram 8 + * @nvram: address of the nvram data 9 + * 10 + * Initialized the local nvram copy from the target address and checks 11 + * its checksum. 12 + * 13 + * Returns 0 on success. 14 + */ 15 + int __init bcm63xx_nvram_init(void *nvram); 16 + 17 + /** 18 + * bcm63xx_nvram_get_name() - returns the board name according to nvram 19 + * 20 + * Returns the board name field from nvram. Note that it might not be 21 + * null terminated if it is exactly 16 bytes long. 22 + */ 23 + u8 *bcm63xx_nvram_get_name(void); 24 + 25 + /** 26 + * bcm63xx_nvram_get_mac_address() - register & return a new mac address 27 + * @mac: pointer to array for allocated mac 28 + * 29 + * Registers and returns a mac address from the allocated macs from nvram. 30 + * 31 + * Returns 0 on success. 32 + */ 33 + int bcm63xx_nvram_get_mac_address(u8 *mac); 34 + 35 + #endif /* BCM63XX_NVRAM_H */
+22 -7
arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
··· 53 53 CKCTL_6338_SAR_EN | \ 54 54 CKCTL_6338_SPI_EN) 55 55 56 - #define CKCTL_6345_CPU_EN (1 << 0) 57 - #define CKCTL_6345_BUS_EN (1 << 1) 58 - #define CKCTL_6345_EBI_EN (1 << 2) 59 - #define CKCTL_6345_UART_EN (1 << 3) 60 - #define CKCTL_6345_ADSLPHY_EN (1 << 4) 61 - #define CKCTL_6345_ENET_EN (1 << 7) 62 - #define CKCTL_6345_USBH_EN (1 << 8) 56 + /* BCM6345 clock bits are shifted by 16 on the left, because of the test 57 + * control register which is 16-bits wide. That way we do not have any 58 + * specific BCM6345 code for handling clocks, and writing 0 to the test 59 + * control register is fine. 60 + */ 61 + #define CKCTL_6345_CPU_EN (1 << 16) 62 + #define CKCTL_6345_BUS_EN (1 << 17) 63 + #define CKCTL_6345_EBI_EN (1 << 18) 64 + #define CKCTL_6345_UART_EN (1 << 19) 65 + #define CKCTL_6345_ADSLPHY_EN (1 << 20) 66 + #define CKCTL_6345_ENET_EN (1 << 23) 67 + #define CKCTL_6345_USBH_EN (1 << 24) 63 68 64 69 #define CKCTL_6345_ALL_SAFE_EN (CKCTL_6345_ENET_EN | \ 65 70 CKCTL_6345_USBH_EN | \ ··· 196 191 /* Soft Reset register */ 197 192 #define PERF_SOFTRESET_REG 0x28 198 193 #define PERF_SOFTRESET_6328_REG 0x10 194 + #define PERF_SOFTRESET_6358_REG 0x34 199 195 #define PERF_SOFTRESET_6368_REG 0x10 200 196 201 197 #define SOFTRESET_6328_SPI_MASK (1 << 0) ··· 249 243 SOFTRESET_6348_SAR_MASK | \ 250 244 SOFTRESET_6348_ACLC_MASK | \ 251 245 SOFTRESET_6348_ADSLMIPSPLL_MASK) 246 + 247 + #define SOFTRESET_6358_SPI_MASK (1 << 0) 248 + #define SOFTRESET_6358_ENET_MASK (1 << 2) 249 + #define SOFTRESET_6358_MPI_MASK (1 << 3) 250 + #define SOFTRESET_6358_EPHY_MASK (1 << 6) 251 + #define SOFTRESET_6358_SAR_MASK (1 << 7) 252 + #define SOFTRESET_6358_USBH_MASK (1 << 12) 253 + #define SOFTRESET_6358_PCM_MASK (1 << 13) 254 + #define SOFTRESET_6358_ADSL_MASK (1 << 14) 252 255 253 256 #define SOFTRESET_6368_SPI_MASK (1 << 0) 254 257 #define SOFTRESET_6368_MPI_MASK (1 << 3)
+21
arch/mips/include/asm/mach-bcm63xx/bcm63xx_reset.h
··· 1 + #ifndef __BCM63XX_RESET_H 2 + #define __BCM63XX_RESET_H 3 + 4 + enum bcm63xx_core_reset { 5 + BCM63XX_RESET_SPI, 6 + BCM63XX_RESET_ENET, 7 + BCM63XX_RESET_USBH, 8 + BCM63XX_RESET_USBD, 9 + BCM63XX_RESET_SAR, 10 + BCM63XX_RESET_DSL, 11 + BCM63XX_RESET_EPHY, 12 + BCM63XX_RESET_ENETSW, 13 + BCM63XX_RESET_PCM, 14 + BCM63XX_RESET_MPI, 15 + BCM63XX_RESET_PCIE, 16 + BCM63XX_RESET_PCIE_EXT, 17 + }; 18 + 19 + void bcm63xx_core_set_reset(enum bcm63xx_core_reset, int reset); 20 + 21 + #endif
-17
arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h
··· 15 15 #define BCM963XX_NVRAM_OFFSET 0x580 16 16 17 17 /* 18 - * nvram structure 19 - */ 20 - struct bcm963xx_nvram { 21 - u32 version; 22 - u8 reserved1[256]; 23 - u8 name[16]; 24 - u32 main_tp_number; 25 - u32 psi_size; 26 - u32 mac_addr_count; 27 - u8 mac_addr_base[6]; 28 - u8 reserved2[2]; 29 - u32 checksum_old; 30 - u8 reserved3[720]; 31 - u32 checksum_high; 32 - }; 33 - 34 - /* 35 18 * board definition 36 19 */ 37 20 struct board_info {
-1
arch/mips/include/asm/mach-bcm63xx/war.h
··· 17 17 #define MIPS4K_ICACHE_REFILL_WAR 0 18 18 #define MIPS_CACHE_SYNC_WAR 0 19 19 #define TX49XX_ICACHE_INDEX_INV_WAR 0 20 - #define RM9000_CDEX_SMP_WAR 0 21 20 #define ICACHE_REFILLS_WORKAROUND_WAR 0 22 21 #define R10000_LLSC_WAR 0 23 22 #define MIPS34K_MISSED_ITLB_WAR 0
-1
arch/mips/include/asm/mach-cavium-octeon/irq.h
··· 42 42 OCTEON_IRQ_TIMER3, 43 43 OCTEON_IRQ_USB0, 44 44 OCTEON_IRQ_USB1, 45 - OCTEON_IRQ_BOOTDMA, 46 45 #ifndef CONFIG_PCI_MSI 47 46 OCTEON_IRQ_LAST = 127 48 47 #endif
-1
arch/mips/include/asm/mach-cavium-octeon/war.h
··· 18 18 #define MIPS4K_ICACHE_REFILL_WAR 0 19 19 #define MIPS_CACHE_SYNC_WAR 0 20 20 #define TX49XX_ICACHE_INDEX_INV_WAR 0 21 - #define RM9000_CDEX_SMP_WAR 0 22 21 #define ICACHE_REFILLS_WORKAROUND_WAR 0 23 22 #define R10000_LLSC_WAR 0 24 23 #define MIPS34K_MISSED_ITLB_WAR 0
-1
arch/mips/include/asm/mach-cobalt/war.h
··· 17 17 #define MIPS4K_ICACHE_REFILL_WAR 0 18 18 #define MIPS_CACHE_SYNC_WAR 0 19 19 #define TX49XX_ICACHE_INDEX_INV_WAR 0 20 - #define RM9000_CDEX_SMP_WAR 0 21 20 #define ICACHE_REFILLS_WORKAROUND_WAR 0 22 21 #define R10000_LLSC_WAR 0 23 22 #define MIPS34K_MISSED_ITLB_WAR 0
-1
arch/mips/include/asm/mach-dec/war.h
··· 17 17 #define MIPS4K_ICACHE_REFILL_WAR 0 18 18 #define MIPS_CACHE_SYNC_WAR 0 19 19 #define TX49XX_ICACHE_INDEX_INV_WAR 0 20 - #define RM9000_CDEX_SMP_WAR 0 21 20 #define ICACHE_REFILLS_WORKAROUND_WAR 0 22 21 #define R10000_LLSC_WAR 0 23 22 #define MIPS34K_MISSED_ITLB_WAR 0
-1
arch/mips/include/asm/mach-emma2rh/war.h
··· 17 17 #define MIPS4K_ICACHE_REFILL_WAR 0 18 18 #define MIPS_CACHE_SYNC_WAR 0 19 19 #define TX49XX_ICACHE_INDEX_INV_WAR 0 20 - #define RM9000_CDEX_SMP_WAR 0 21 20 #define ICACHE_REFILLS_WORKAROUND_WAR 0 22 21 #define R10000_LLSC_WAR 0 23 22 #define MIPS34K_MISSED_ITLB_WAR 0
-6
arch/mips/include/asm/mach-generic/irq.h
··· 34 34 #endif 35 35 #endif 36 36 37 - #ifdef CONFIG_IRQ_CPU_RM9K 38 - #ifndef RM9K_CPU_IRQ_BASE 39 - #define RM9K_CPU_IRQ_BASE (MIPS_CPU_IRQ_BASE+12) 40 - #endif 41 - #endif 42 - 43 37 #endif /* CONFIG_IRQ_CPU */ 44 38 45 39 #endif /* __ASM_MACH_GENERIC_IRQ_H */
-1
arch/mips/include/asm/mach-ip22/war.h
··· 21 21 #define MIPS4K_ICACHE_REFILL_WAR 0 22 22 #define MIPS_CACHE_SYNC_WAR 0 23 23 #define TX49XX_ICACHE_INDEX_INV_WAR 0 24 - #define RM9000_CDEX_SMP_WAR 0 25 24 #define ICACHE_REFILLS_WORKAROUND_WAR 0 26 25 #define R10000_LLSC_WAR 0 27 26 #define MIPS34K_MISSED_ITLB_WAR 0
-1
arch/mips/include/asm/mach-ip27/war.h
··· 17 17 #define MIPS4K_ICACHE_REFILL_WAR 0 18 18 #define MIPS_CACHE_SYNC_WAR 0 19 19 #define TX49XX_ICACHE_INDEX_INV_WAR 0 20 - #define RM9000_CDEX_SMP_WAR 0 21 20 #define ICACHE_REFILLS_WORKAROUND_WAR 0 22 21 #define R10000_LLSC_WAR 1 23 22 #define MIPS34K_MISSED_ITLB_WAR 0
-1
arch/mips/include/asm/mach-ip28/war.h
··· 17 17 #define MIPS4K_ICACHE_REFILL_WAR 0 18 18 #define MIPS_CACHE_SYNC_WAR 0 19 19 #define TX49XX_ICACHE_INDEX_INV_WAR 0 20 - #define RM9000_CDEX_SMP_WAR 0 21 20 #define ICACHE_REFILLS_WORKAROUND_WAR 0 22 21 #define R10000_LLSC_WAR 1 23 22 #define MIPS34K_MISSED_ITLB_WAR 0
-1
arch/mips/include/asm/mach-ip32/war.h
··· 17 17 #define MIPS4K_ICACHE_REFILL_WAR 0 18 18 #define MIPS_CACHE_SYNC_WAR 0 19 19 #define TX49XX_ICACHE_INDEX_INV_WAR 0 20 - #define RM9000_CDEX_SMP_WAR 0 21 20 #define ICACHE_REFILLS_WORKAROUND_WAR 1 22 21 #define R10000_LLSC_WAR 0 23 22 #define MIPS34K_MISSED_ITLB_WAR 0
-1
arch/mips/include/asm/mach-jazz/war.h
··· 17 17 #define MIPS4K_ICACHE_REFILL_WAR 0 18 18 #define MIPS_CACHE_SYNC_WAR 0 19 19 #define TX49XX_ICACHE_INDEX_INV_WAR 0 20 - #define RM9000_CDEX_SMP_WAR 0 21 20 #define ICACHE_REFILLS_WORKAROUND_WAR 0 22 21 #define R10000_LLSC_WAR 0 23 22 #define MIPS34K_MISSED_ITLB_WAR 0
-1
arch/mips/include/asm/mach-jz4740/war.h
··· 17 17 #define MIPS4K_ICACHE_REFILL_WAR 0 18 18 #define MIPS_CACHE_SYNC_WAR 0 19 19 #define TX49XX_ICACHE_INDEX_INV_WAR 0 20 - #define RM9000_CDEX_SMP_WAR 0 21 20 #define ICACHE_REFILLS_WORKAROUND_WAR 0 22 21 #define R10000_LLSC_WAR 0 23 22 #define MIPS34K_MISSED_ITLB_WAR 0
-1
arch/mips/include/asm/mach-lantiq/war.h
··· 16 16 #define MIPS4K_ICACHE_REFILL_WAR 0 17 17 #define MIPS_CACHE_SYNC_WAR 0 18 18 #define TX49XX_ICACHE_INDEX_INV_WAR 0 19 - #define RM9000_CDEX_SMP_WAR 0 20 19 #define ICACHE_REFILLS_WORKAROUND_WAR 0 21 20 #define R10000_LLSC_WAR 0 22 21 #define MIPS34K_MISSED_ITLB_WAR 0
+3
arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
··· 82 82 #define LTQ_MPS_BASE_ADDR (KSEG1 + 0x1F107000) 83 83 #define LTQ_MPS_CHIPID ((u32 *)(LTQ_MPS_BASE_ADDR + 0x0344)) 84 84 85 + /* allow booting xrx200 phys */ 86 + int xrx200_gphy_boot(struct device *dev, unsigned int id, dma_addr_t dev_addr); 87 + 85 88 /* request a non-gpio and set the PIO config */ 86 89 #define PMU_PPE BIT(13) 87 90 extern void ltq_pmu_enable(unsigned int module);
-1
arch/mips/include/asm/mach-lasat/war.h
··· 17 17 #define MIPS4K_ICACHE_REFILL_WAR 0 18 18 #define MIPS_CACHE_SYNC_WAR 0 19 19 #define TX49XX_ICACHE_INDEX_INV_WAR 0 20 - #define RM9000_CDEX_SMP_WAR 0 21 20 #define ICACHE_REFILLS_WORKAROUND_WAR 0 22 21 #define R10000_LLSC_WAR 0 23 22 #define MIPS34K_MISSED_ITLB_WAR 0
-1
arch/mips/include/asm/mach-loongson/war.h
··· 17 17 #define MIPS4K_ICACHE_REFILL_WAR 0 18 18 #define MIPS_CACHE_SYNC_WAR 0 19 19 #define TX49XX_ICACHE_INDEX_INV_WAR 0 20 - #define RM9000_CDEX_SMP_WAR 0 21 20 #define ICACHE_REFILLS_WORKAROUND_WAR 0 22 21 #define R10000_LLSC_WAR 0 23 22 #define MIPS34K_MISSED_ITLB_WAR 0
+2 -1
arch/mips/include/asm/mach-loongson1/platform.h
··· 18 18 extern struct platform_device ls1x_ehci_device; 19 19 extern struct platform_device ls1x_rtc_device; 20 20 21 - void ls1x_serial_setup(void); 21 + extern void __init ls1x_clk_init(void); 22 + extern void __init ls1x_serial_setup(struct platform_device *pdev); 22 23 23 24 #endif /* __ASM_MACH_LOONGSON1_PLATFORM_H */
+4 -3
arch/mips/include/asm/mach-loongson1/regs-clk.h
··· 20 20 21 21 /* Clock PLL Divisor Register Bits */ 22 22 #define DIV_DC_EN (0x1 << 31) 23 - #define DIV_DC (0x1f << 26) 24 23 #define DIV_CPU_EN (0x1 << 25) 25 - #define DIV_CPU (0x1f << 20) 26 24 #define DIV_DDR_EN (0x1 << 19) 27 - #define DIV_DDR (0x1f << 14) 28 25 29 26 #define DIV_DC_SHIFT 26 30 27 #define DIV_CPU_SHIFT 20 31 28 #define DIV_DDR_SHIFT 14 29 + 30 + #define DIV_DC_WIDTH 5 31 + #define DIV_CPU_WIDTH 5 32 + #define DIV_DDR_WIDTH 5 32 33 33 34 #endif /* __ASM_MACH_LOONGSON1_REGS_CLK_H */
-1
arch/mips/include/asm/mach-loongson1/war.h
··· 17 17 #define MIPS4K_ICACHE_REFILL_WAR 0 18 18 #define MIPS_CACHE_SYNC_WAR 0 19 19 #define TX49XX_ICACHE_INDEX_INV_WAR 0 20 - #define RM9000_CDEX_SMP_WAR 0 21 20 #define ICACHE_REFILLS_WORKAROUND_WAR 0 22 21 #define R10000_LLSC_WAR 0 23 22 #define MIPS34K_MISSED_ITLB_WAR 0
-1
arch/mips/include/asm/mach-malta/war.h
··· 17 17 #define MIPS4K_ICACHE_REFILL_WAR 1 18 18 #define MIPS_CACHE_SYNC_WAR 1 19 19 #define TX49XX_ICACHE_INDEX_INV_WAR 0 20 - #define RM9000_CDEX_SMP_WAR 0 21 20 #define ICACHE_REFILLS_WORKAROUND_WAR 1 22 21 #define R10000_LLSC_WAR 0 23 22 #define MIPS34K_MISSED_ITLB_WAR 0
+3 -1
arch/mips/include/asm/mach-netlogic/irq.h
··· 8 8 #ifndef __ASM_NETLOGIC_IRQ_H 9 9 #define __ASM_NETLOGIC_IRQ_H 10 10 11 - #define NR_IRQS 64 11 + #include <asm/mach-netlogic/multi-node.h> 12 + #define NR_IRQS (64 * NLM_NR_NODES) 13 + 12 14 #define MIPS_CPU_IRQ_BASE 0 13 15 14 16 #endif /* __ASM_NETLOGIC_IRQ_H */
+54
arch/mips/include/asm/mach-netlogic/multi-node.h
··· 1 + /* 2 + * Copyright (c) 2003-2012 Broadcom Corporation 3 + * All Rights Reserved 4 + * 5 + * This software is available to you under a choice of one of two 6 + * licenses. You may choose to be licensed under the terms of the GNU 7 + * General Public License (GPL) Version 2, available from the file 8 + * COPYING in the main directory of this source tree, or the Broadcom 9 + * license below: 10 + * 11 + * Redistribution and use in source and binary forms, with or without 12 + * modification, are permitted provided that the following conditions 13 + * are met: 14 + * 15 + * 1. Redistributions of source code must retain the above copyright 16 + * notice, this list of conditions and the following disclaimer. 17 + * 2. Redistributions in binary form must reproduce the above copyright 18 + * notice, this list of conditions and the following disclaimer in 19 + * the documentation and/or other materials provided with the 20 + * distribution. 21 + * 22 + * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR 23 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 24 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 + * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE 26 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 29 + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 30 + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 31 + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 32 + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 + */ 34 + 35 + #ifndef _NETLOGIC_MULTI_NODE_H_ 36 + #define _NETLOGIC_MULTI_NODE_H_ 37 + 38 + #ifndef CONFIG_NLM_MULTINODE 39 + #define NLM_NR_NODES 1 40 + #else 41 + #if defined(CONFIG_NLM_MULTINODE_2) 42 + #define NLM_NR_NODES 2 43 + #elif defined(CONFIG_NLM_MULTINODE_4) 44 + #define NLM_NR_NODES 4 45 + #else 46 + #define NLM_NR_NODES 1 47 + #endif 48 + #endif 49 + 50 + #define NLM_CORES_PER_NODE 8 51 + #define NLM_THREADS_PER_CORE 4 52 + #define NLM_CPUS_PER_NODE (NLM_CORES_PER_NODE * NLM_THREADS_PER_CORE) 53 + 54 + #endif
-1
arch/mips/include/asm/mach-netlogic/war.h
··· 18 18 #define MIPS4K_ICACHE_REFILL_WAR 0 19 19 #define MIPS_CACHE_SYNC_WAR 0 20 20 #define TX49XX_ICACHE_INDEX_INV_WAR 0 21 - #define RM9000_CDEX_SMP_WAR 0 22 21 #define ICACHE_REFILLS_WORKAROUND_WAR 0 23 22 #define R10000_LLSC_WAR 0 24 23 #define MIPS34K_MISSED_ITLB_WAR 0
-1
arch/mips/include/asm/mach-pnx833x/war.h
··· 17 17 #define MIPS4K_ICACHE_REFILL_WAR 0 18 18 #define MIPS_CACHE_SYNC_WAR 0 19 19 #define TX49XX_ICACHE_INDEX_INV_WAR 0 20 - #define RM9000_CDEX_SMP_WAR 0 21 20 #define ICACHE_REFILLS_WORKAROUND_WAR 0 22 21 #define R10000_LLSC_WAR 0 23 22 #define MIPS34K_MISSED_ITLB_WAR 0
-1
arch/mips/include/asm/mach-pnx8550/war.h
··· 17 17 #define MIPS4K_ICACHE_REFILL_WAR 0 18 18 #define MIPS_CACHE_SYNC_WAR 0 19 19 #define TX49XX_ICACHE_INDEX_INV_WAR 0 20 - #define RM9000_CDEX_SMP_WAR 0 21 20 #define ICACHE_REFILLS_WORKAROUND_WAR 0 22 21 #define R10000_LLSC_WAR 0 23 22 #define MIPS34K_MISSED_ITLB_WAR 0
-1
arch/mips/include/asm/mach-powertv/war.h
··· 20 20 #define MIPS4K_ICACHE_REFILL_WAR 1 21 21 #define MIPS_CACHE_SYNC_WAR 1 22 22 #define TX49XX_ICACHE_INDEX_INV_WAR 0 23 - #define RM9000_CDEX_SMP_WAR 0 24 23 #define ICACHE_REFILLS_WORKAROUND_WAR 1 25 24 #define R10000_LLSC_WAR 0 26 25 #define MIPS34K_MISSED_ITLB_WAR 0
-1
arch/mips/include/asm/mach-rc32434/war.h
··· 17 17 #define MIPS4K_ICACHE_REFILL_WAR 1 18 18 #define MIPS_CACHE_SYNC_WAR 0 19 19 #define TX49XX_ICACHE_INDEX_INV_WAR 0 20 - #define RM9000_CDEX_SMP_WAR 0 21 20 #define ICACHE_REFILLS_WORKAROUND_WAR 0 22 21 #define R10000_LLSC_WAR 0 23 22 #define MIPS34K_MISSED_ITLB_WAR 0
-1
arch/mips/include/asm/mach-rm/war.h
··· 21 21 #define MIPS4K_ICACHE_REFILL_WAR 0 22 22 #define MIPS_CACHE_SYNC_WAR 0 23 23 #define TX49XX_ICACHE_INDEX_INV_WAR 0 24 - #define RM9000_CDEX_SMP_WAR 0 25 24 #define ICACHE_REFILLS_WORKAROUND_WAR 0 26 25 #define R10000_LLSC_WAR 0 27 26 #define MIPS34K_MISSED_ITLB_WAR 0
-1
arch/mips/include/asm/mach-sead3/war.h
··· 17 17 #define MIPS4K_ICACHE_REFILL_WAR 1 18 18 #define MIPS_CACHE_SYNC_WAR 1 19 19 #define TX49XX_ICACHE_INDEX_INV_WAR 0 20 - #define RM9000_CDEX_SMP_WAR 0 21 20 #define ICACHE_REFILLS_WORKAROUND_WAR 1 22 21 #define R10000_LLSC_WAR 0 23 22 #define MIPS34K_MISSED_ITLB_WAR 0
-1
arch/mips/include/asm/mach-sibyte/war.h
··· 33 33 #define MIPS4K_ICACHE_REFILL_WAR 0 34 34 #define MIPS_CACHE_SYNC_WAR 0 35 35 #define TX49XX_ICACHE_INDEX_INV_WAR 0 36 - #define RM9000_CDEX_SMP_WAR 0 37 36 #define ICACHE_REFILLS_WORKAROUND_WAR 0 38 37 #define R10000_LLSC_WAR 0 39 38 #define MIPS34K_MISSED_ITLB_WAR 0
-1
arch/mips/include/asm/mach-tx39xx/war.h
··· 17 17 #define MIPS4K_ICACHE_REFILL_WAR 0 18 18 #define MIPS_CACHE_SYNC_WAR 0 19 19 #define TX49XX_ICACHE_INDEX_INV_WAR 0 20 - #define RM9000_CDEX_SMP_WAR 0 21 20 #define ICACHE_REFILLS_WORKAROUND_WAR 0 22 21 #define R10000_LLSC_WAR 0 23 22 #define MIPS34K_MISSED_ITLB_WAR 0
-1
arch/mips/include/asm/mach-tx49xx/war.h
··· 17 17 #define MIPS4K_ICACHE_REFILL_WAR 0 18 18 #define MIPS_CACHE_SYNC_WAR 0 19 19 #define TX49XX_ICACHE_INDEX_INV_WAR 1 20 - #define RM9000_CDEX_SMP_WAR 0 21 20 #define ICACHE_REFILLS_WORKAROUND_WAR 0 22 21 #define R10000_LLSC_WAR 0 23 22 #define MIPS34K_MISSED_ITLB_WAR 0
-1
arch/mips/include/asm/mach-vr41xx/war.h
··· 17 17 #define MIPS4K_ICACHE_REFILL_WAR 0 18 18 #define MIPS_CACHE_SYNC_WAR 0 19 19 #define TX49XX_ICACHE_INDEX_INV_WAR 0 20 - #define RM9000_CDEX_SMP_WAR 0 21 20 #define ICACHE_REFILLS_WORKAROUND_WAR 0 22 21 #define R10000_LLSC_WAR 0 23 22 #define MIPS34K_MISSED_ITLB_WAR 0
-1
arch/mips/include/asm/mach-wrppmc/war.h
··· 17 17 #define MIPS4K_ICACHE_REFILL_WAR 0 18 18 #define MIPS_CACHE_SYNC_WAR 0 19 19 #define TX49XX_ICACHE_INDEX_INV_WAR 0 20 - #define RM9000_CDEX_SMP_WAR 0 21 20 #define ICACHE_REFILLS_WORKAROUND_WAR 1 22 21 #define R10000_LLSC_WAR 0 23 22 #define MIPS34K_MISSED_ITLB_WAR 0
-48
arch/mips/include/asm/mach-yosemite/cpu-feature-overrides.h
··· 1 - /* 2 - * This file is subject to the terms and conditions of the GNU General Public 3 - * License. See the file "COPYING" in the main directory of this archive 4 - * for more details. 5 - * 6 - * Copyright (C) 2003, 04, 07 Ralf Baechle (ralf@linux-mips.org) 7 - */ 8 - #ifndef __ASM_MACH_YOSEMITE_CPU_FEATURE_OVERRIDES_H 9 - #define __ASM_MACH_YOSEMITE_CPU_FEATURE_OVERRIDES_H 10 - 11 - /* 12 - * Momentum Jaguar ATX always has the RM9000 processor. 13 - */ 14 - #define cpu_has_watch 1 15 - #define cpu_has_mips16 0 16 - #define cpu_has_divec 0 17 - #define cpu_has_vce 0 18 - #define cpu_has_cache_cdex_p 0 19 - #define cpu_has_cache_cdex_s 0 20 - #define cpu_has_prefetch 1 21 - #define cpu_has_mcheck 0 22 - #define cpu_has_ejtag 0 23 - 24 - #define cpu_has_llsc 1 25 - #define cpu_has_vtag_icache 0 26 - #define cpu_has_dc_aliases 0 27 - #define cpu_has_ic_fills_f_dc 0 28 - #define cpu_has_dsp 0 29 - #define cpu_has_dsp2 0 30 - #define cpu_has_mipsmt 0 31 - #define cpu_has_userlocal 0 32 - #define cpu_icache_snoops_remote_store 0 33 - 34 - #define cpu_has_nofpuex 0 35 - #define cpu_has_64bits 1 36 - 37 - #define cpu_has_inclusive_pcaches 0 38 - 39 - #define cpu_dcache_line_size() 32 40 - #define cpu_icache_line_size() 32 41 - #define cpu_scache_line_size() 32 42 - 43 - #define cpu_has_mips32r1 0 44 - #define cpu_has_mips32r2 0 45 - #define cpu_has_mips64r1 0 46 - #define cpu_has_mips64r2 0 47 - 48 - #endif /* __ASM_MACH_YOSEMITE_CPU_FEATURE_OVERRIDES_H */
-25
arch/mips/include/asm/mach-yosemite/war.h
··· 1 - /* 2 - * This file is subject to the terms and conditions of the GNU General Public 3 - * License. See the file "COPYING" in the main directory of this archive 4 - * for more details. 5 - * 6 - * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org> 7 - */ 8 - #ifndef __ASM_MIPS_MACH_YOSEMITE_WAR_H 9 - #define __ASM_MIPS_MACH_YOSEMITE_WAR_H 10 - 11 - #define R4600_V1_INDEX_ICACHEOP_WAR 0 12 - #define R4600_V1_HIT_CACHEOP_WAR 0 13 - #define R4600_V2_HIT_CACHEOP_WAR 0 14 - #define R5432_CP0_INTERRUPT_WAR 0 15 - #define BCM1250_M3_WAR 0 16 - #define SIBYTE_1956_WAR 0 17 - #define MIPS4K_ICACHE_REFILL_WAR 0 18 - #define MIPS_CACHE_SYNC_WAR 0 19 - #define TX49XX_ICACHE_INDEX_INV_WAR 0 20 - #define RM9000_CDEX_SMP_WAR 1 21 - #define ICACHE_REFILLS_WORKAROUND_WAR 1 22 - #define R10000_LLSC_WAR 0 23 - #define MIPS34K_MISSED_ITLB_WAR 0 24 - 25 - #endif /* __ASM_MIPS_MACH_YOSEMITE_WAR_H */
+1 -9
arch/mips/include/asm/mipsregs.h
··· 240 240 #define PM_HUGE_MASK PM_64M 241 241 #elif defined(CONFIG_PAGE_SIZE_64KB) 242 242 #define PM_HUGE_MASK PM_256M 243 - #elif defined(CONFIG_HUGETLB_PAGE) 243 + #elif defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) 244 244 #error Bad page size configuration for hugetlbfs! 245 245 #endif 246 246 ··· 977 977 #define read_c0_framemask() __read_32bit_c0_register($21, 0) 978 978 #define write_c0_framemask(val) __write_32bit_c0_register($21, 0, val) 979 979 980 - /* RM9000 PerfControl performance counter control register */ 981 - #define read_c0_perfcontrol() __read_32bit_c0_register($22, 0) 982 - #define write_c0_perfcontrol(val) __write_32bit_c0_register($22, 0, val) 983 - 984 980 #define read_c0_diag() __read_32bit_c0_register($22, 0) 985 981 #define write_c0_diag(val) __write_32bit_c0_register($22, 0, val) 986 982 ··· 1028 1032 #define write_c0_perfcntr3(val) __write_32bit_c0_register($25, 7, val) 1029 1033 #define read_c0_perfcntr3_64() __read_64bit_c0_register($25, 7) 1030 1034 #define write_c0_perfcntr3_64(val) __write_64bit_c0_register($25, 7, val) 1031 - 1032 - /* RM9000 PerfCount performance counter register */ 1033 - #define read_c0_perfcount() __read_64bit_c0_register($25, 0) 1034 - #define write_c0_perfcount(val) __write_64bit_c0_register($25, 0, val) 1035 1035 1036 1036 #define read_c0_ecc() __read_32bit_c0_register($26, 0) 1037 1037 #define write_c0_ecc(val) __write_32bit_c0_register($26, 0, val)
-6
arch/mips/include/asm/mmu_context.h
··· 72 72 #define ASID_INC 0x10 73 73 #define ASID_MASK 0xff0 74 74 75 - #elif defined(CONFIG_CPU_RM9000) 76 - 77 - #define ASID_INC 0x1 78 - #define ASID_MASK 0xfff 79 - 80 - /* SMTC/34K debug hack - but maybe we'll keep it */ 81 75 #elif defined(CONFIG_MIPS_MT_SMTC) 82 76 83 77 #define ASID_INC 0x1
-2
arch/mips/include/asm/module.h
··· 120 120 #define MODULE_PROC_FAMILY "R10000 " 121 121 #elif defined CONFIG_CPU_RM7000 122 122 #define MODULE_PROC_FAMILY "RM7000 " 123 - #elif defined CONFIG_CPU_RM9000 124 - #define MODULE_PROC_FAMILY "RM9000 " 125 123 #elif defined CONFIG_CPU_SB1 126 124 #define MODULE_PROC_FAMILY "SB1 " 127 125 #elif defined CONFIG_CPU_LOONGSON1
+44 -7
arch/mips/include/asm/netlogic/common.h
··· 45 45 #define BOOT_NMI_HANDLER 8 46 46 47 47 #ifndef __ASSEMBLY__ 48 + #include <linux/cpumask.h> 49 + #include <linux/spinlock.h> 50 + #include <asm/irq.h> 51 + #include <asm/mach-netlogic/multi-node.h> 52 + 48 53 struct irq_desc; 49 - extern struct plat_smp_ops nlm_smp_ops; 50 - extern char nlm_reset_entry[], nlm_reset_entry_end[]; 51 54 void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc); 52 55 void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc); 53 - void nlm_smp_irq_init(void); 56 + void nlm_smp_irq_init(int hwcpuid); 54 57 void nlm_boot_secondary_cpus(void); 55 - int nlm_wakeup_secondary_cpus(u32 wakeup_mask); 58 + int nlm_wakeup_secondary_cpus(void); 56 59 void nlm_rmiboot_preboot(void); 60 + void nlm_percpu_init(int hwcpuid); 57 61 58 62 static inline void 59 63 nlm_set_nmi_handler(void *handler) ··· 72 68 * Misc. 73 69 */ 74 70 unsigned int nlm_get_cpu_frequency(void); 71 + void nlm_node_init(int node); 72 + extern struct plat_smp_ops nlm_smp_ops; 73 + extern char nlm_reset_entry[], nlm_reset_entry_end[]; 75 74 76 - extern unsigned long nlm_common_ebase; 77 - extern int nlm_threads_per_core; 78 - extern uint32_t nlm_cpumask, nlm_coremask; 75 + extern unsigned int nlm_threads_per_core; 76 + extern cpumask_t nlm_cpumask; 77 + 78 + struct nlm_soc_info { 79 + unsigned long coremask; /* cores enabled on the soc */ 80 + unsigned long ebase; 81 + uint64_t irqmask; 82 + uint64_t sysbase; /* only for XLP */ 83 + uint64_t picbase; 84 + spinlock_t piclock; 85 + }; 86 + 87 + #define nlm_get_node(i) (&nlm_nodes[i]) 88 + #ifdef CONFIG_CPU_XLR 89 + #define nlm_current_node() (&nlm_nodes[0]) 90 + #else 91 + #define nlm_current_node() (&nlm_nodes[nlm_nodeid()]) 92 + #endif 93 + 94 + struct irq_data; 95 + uint64_t nlm_pci_irqmask(int node); 96 + void nlm_set_pic_extra_ack(int node, int irq, void (*xack)(struct irq_data *)); 97 + 98 + /* 99 + * The NR_IRQs is divided between nodes, each of them has a separate irq space 100 + */ 101 + static inline int nlm_irq_to_xirq(int node, int irq) 102 + { 103 + return node * NR_IRQS / NLM_NR_NODES + irq; 104 + } 105 + 106 + extern struct nlm_soc_info nlm_nodes[NLM_NR_NODES]; 107 + extern int nlm_cpu_ready[]; 79 108 #endif 80 109 #endif /* _NETLOGIC_COMMON_H_ */
+1 -1
arch/mips/include/asm/netlogic/interrupt.h
··· 39 39 40 40 #define IRQ_IPI_SMP_FUNCTION 3 41 41 #define IRQ_IPI_SMP_RESCHEDULE 4 42 - #define IRQ_MSGRING 6 42 + #define IRQ_FMN 5 43 43 #define IRQ_TIMER 7 44 44 45 45 #endif
+142
arch/mips/include/asm/netlogic/mips-extns.h
··· 73 73 return __read_32bit_c0_register($15, 1) & 0x3ff; 74 74 } 75 75 76 + static inline int nlm_nodeid(void) 77 + { 78 + return (__read_32bit_c0_register($15, 1) >> 5) & 0x3; 79 + } 80 + 81 + static inline unsigned int nlm_core_id(void) 82 + { 83 + return (read_c0_ebase() & 0x1c) >> 2; 84 + } 85 + 86 + static inline unsigned int nlm_thread_id(void) 87 + { 88 + return read_c0_ebase() & 0x3; 89 + } 90 + 91 + #define __read_64bit_c2_split(source, sel) \ 92 + ({ \ 93 + unsigned long long __val; \ 94 + unsigned long __flags; \ 95 + \ 96 + local_irq_save(__flags); \ 97 + if (sel == 0) \ 98 + __asm__ __volatile__( \ 99 + ".set\tmips64\n\t" \ 100 + "dmfc2\t%M0, " #source "\n\t" \ 101 + "dsll\t%L0, %M0, 32\n\t" \ 102 + "dsra\t%M0, %M0, 32\n\t" \ 103 + "dsra\t%L0, %L0, 32\n\t" \ 104 + ".set\tmips0\n\t" \ 105 + : "=r" (__val)); \ 106 + else \ 107 + __asm__ __volatile__( \ 108 + ".set\tmips64\n\t" \ 109 + "dmfc2\t%M0, " #source ", " #sel "\n\t" \ 110 + "dsll\t%L0, %M0, 32\n\t" \ 111 + "dsra\t%M0, %M0, 32\n\t" \ 112 + "dsra\t%L0, %L0, 32\n\t" \ 113 + ".set\tmips0\n\t" \ 114 + : "=r" (__val)); \ 115 + local_irq_restore(__flags); \ 116 + \ 117 + __val; \ 118 + }) 119 + 120 + #define __write_64bit_c2_split(source, sel, val) \ 121 + do { \ 122 + unsigned long __flags; \ 123 + \ 124 + local_irq_save(__flags); \ 125 + if (sel == 0) \ 126 + __asm__ __volatile__( \ 127 + ".set\tmips64\n\t" \ 128 + "dsll\t%L0, %L0, 32\n\t" \ 129 + "dsrl\t%L0, %L0, 32\n\t" \ 130 + "dsll\t%M0, %M0, 32\n\t" \ 131 + "or\t%L0, %L0, %M0\n\t" \ 132 + "dmtc2\t%L0, " #source "\n\t" \ 133 + ".set\tmips0\n\t" \ 134 + : : "r" (val)); \ 135 + else \ 136 + __asm__ __volatile__( \ 137 + ".set\tmips64\n\t" \ 138 + "dsll\t%L0, %L0, 32\n\t" \ 139 + "dsrl\t%L0, %L0, 32\n\t" \ 140 + "dsll\t%M0, %M0, 32\n\t" \ 141 + "or\t%L0, %L0, %M0\n\t" \ 142 + "dmtc2\t%L0, " #source ", " #sel "\n\t" \ 143 + ".set\tmips0\n\t" \ 144 + : : "r" (val)); \ 145 + local_irq_restore(__flags); \ 146 + } while (0) 147 + 148 + #define __read_32bit_c2_register(source, sel) \ 149 + ({ uint32_t __res; \ 150 + if (sel == 0) \ 151 + __asm__ __volatile__( \ 152 + ".set\tmips32\n\t" \ 153 + "mfc2\t%0, " #source "\n\t" \ 154 + ".set\tmips0\n\t" \ 155 + : "=r" (__res)); \ 156 + else \ 157 + __asm__ __volatile__( \ 158 + ".set\tmips32\n\t" \ 159 + "mfc2\t%0, " #source ", " #sel "\n\t" \ 160 + ".set\tmips0\n\t" \ 161 + : "=r" (__res)); \ 162 + __res; \ 163 + }) 164 + 165 + #define __read_64bit_c2_register(source, sel) \ 166 + ({ unsigned long long __res; \ 167 + if (sizeof(unsigned long) == 4) \ 168 + __res = __read_64bit_c2_split(source, sel); \ 169 + else if (sel == 0) \ 170 + __asm__ __volatile__( \ 171 + ".set\tmips64\n\t" \ 172 + "dmfc2\t%0, " #source "\n\t" \ 173 + ".set\tmips0\n\t" \ 174 + : "=r" (__res)); \ 175 + else \ 176 + __asm__ __volatile__( \ 177 + ".set\tmips64\n\t" \ 178 + "dmfc2\t%0, " #source ", " #sel "\n\t" \ 179 + ".set\tmips0\n\t" \ 180 + : "=r" (__res)); \ 181 + __res; \ 182 + }) 183 + 184 + #define __write_64bit_c2_register(register, sel, value) \ 185 + do { \ 186 + if (sizeof(unsigned long) == 4) \ 187 + __write_64bit_c2_split(register, sel, value); \ 188 + else if (sel == 0) \ 189 + __asm__ __volatile__( \ 190 + ".set\tmips64\n\t" \ 191 + "dmtc2\t%z0, " #register "\n\t" \ 192 + ".set\tmips0\n\t" \ 193 + : : "Jr" (value)); \ 194 + else \ 195 + __asm__ __volatile__( \ 196 + ".set\tmips64\n\t" \ 197 + "dmtc2\t%z0, " #register ", " #sel "\n\t" \ 198 + ".set\tmips0\n\t" \ 199 + : : "Jr" (value)); \ 200 + } while (0) 201 + 202 + #define __write_32bit_c2_register(reg, sel, value) \ 203 + ({ \ 204 + if (sel == 0) \ 205 + __asm__ __volatile__( \ 206 + ".set\tmips32\n\t" \ 207 + "mtc2\t%z0, " #reg "\n\t" \ 208 + ".set\tmips0\n\t" \ 209 + : : "Jr" (value)); \ 210 + else \ 211 + __asm__ __volatile__( \ 212 + ".set\tmips32\n\t" \ 213 + "mtc2\t%z0, " #reg ", " #sel "\n\t" \ 214 + ".set\tmips0\n\t" \ 215 + : : "Jr" (value)); \ 216 + }) 217 + 76 218 #endif /*_ASM_NLM_MIPS_EXTS_H */
+8 -36
arch/mips/include/asm/netlogic/xlp-hal/pic.h
··· 273 273 return nlm_read_pic_reg(base, PIC_IRT(irt_index)); 274 274 } 275 275 276 - static inline uint64_t 277 - nlm_pic_read_control(uint64_t base) 278 - { 279 - return nlm_read_pic_reg(base, PIC_CTRL); 280 - } 281 - 282 - static inline void 283 - nlm_pic_write_control(uint64_t base, uint64_t control) 284 - { 285 - nlm_write_pic_reg(base, PIC_CTRL, control); 286 - } 287 - 288 - static inline void 289 - nlm_pic_update_control(uint64_t base, uint64_t control) 290 - { 291 - uint64_t val; 292 - 293 - val = nlm_read_pic_reg(base, PIC_CTRL); 294 - nlm_write_pic_reg(base, PIC_CTRL, control | val); 295 - } 296 - 297 276 static inline void 298 277 nlm_set_irt_to_cpu(uint64_t base, int irt, int cpu) 299 278 { 300 279 uint64_t val; 301 280 302 281 val = nlm_read_pic_reg(base, PIC_IRT(irt)); 303 - val |= cpu & 0xf; 304 - if (cpu > 15) 305 - val |= 1 << 16; 282 + /* clear cpuset and mask */ 283 + val &= ~((0x7ull << 16) | 0xffff); 284 + /* set DB, cpuset and cpumask */ 285 + val |= (1 << 19) | ((cpu >> 4) << 16) | (1 << (cpu & 0xf)); 306 286 nlm_write_pic_reg(base, PIC_IRT(irt), val); 307 287 } 308 288 ··· 349 369 static inline void 350 370 nlm_pic_disable_irt(uint64_t base, int irt) 351 371 { 352 - uint32_t reg; 372 + uint64_t reg; 353 373 354 374 reg = nlm_read_pic_reg(base, PIC_IRT(irt)); 355 375 nlm_write_pic_reg(base, PIC_IRT(irt), reg & ~((uint64_t)1 << 31)); ··· 359 379 nlm_pic_send_ipi(uint64_t base, int hwt, int irq, int nmi) 360 380 { 361 381 uint64_t ipi; 362 - int node, ncpu; 363 382 364 - node = hwt / 32; 365 - ncpu = hwt & 0x1f; 366 - ipi = ((uint64_t)nmi << 31) | (irq << 20) | (node << 17) | 367 - (1 << (ncpu & 0xf)); 368 - if (ncpu > 15) 369 - ipi |= 0x10000; /* Setting bit 16 to select cpus 16-31 */ 370 - 383 + ipi = (nmi << 31) | (irq << 20); 384 + ipi |= ((hwt >> 4) << 16) | (1 << (hwt & 0xf)); /* cpuset and mask */ 371 385 nlm_write_pic_reg(base, PIC_IPI_CTL, ipi); 372 386 } 373 387 ··· 378 404 static inline void 379 405 nlm_pic_init_irt(uint64_t base, int irt, int irq, int hwt) 380 406 { 381 - nlm_pic_write_irt_direct(base, irt, 0, 0, 0, irq, 0); 407 + nlm_pic_write_irt_direct(base, irt, 0, 0, 0, irq, hwt); 382 408 } 383 409 384 - extern uint64_t nlm_pic_base; 385 410 int nlm_irq_to_irt(int irq); 386 - int nlm_irt_to_irq(int irt); 387 411 388 412 #endif /* __ASSEMBLY__ */ 389 413 #endif /* _NLM_HAL_PIC_H */
-1
arch/mips/include/asm/netlogic/xlp-hal/sys.h
··· 124 124 #define nlm_get_sys_pcibase(node) nlm_pcicfg_base(XLP_IO_SYS_OFFSET(node)) 125 125 #define nlm_get_sys_regbase(node) (nlm_get_sys_pcibase(node) + XLP_IO_PCI_HDRSZ) 126 126 127 - extern uint64_t nlm_sys_base; 128 127 #endif 129 128 #endif
+363
arch/mips/include/asm/netlogic/xlr/fmn.h
··· 1 + /* 2 + * Copyright (c) 2003-2012 Broadcom Corporation 3 + * All Rights Reserved 4 + * 5 + * This software is available to you under a choice of one of two 6 + * licenses. You may choose to be licensed under the terms of the GNU 7 + * General Public License (GPL) Version 2, available from the file 8 + * COPYING in the main directory of this source tree, or the Broadcom 9 + * license below: 10 + * 11 + * Redistribution and use in source and binary forms, with or without 12 + * modification, are permitted provided that the following conditions 13 + * are met: 14 + * 15 + * 1. Redistributions of source code must retain the above copyright 16 + * notice, this list of conditions and the following disclaimer. 17 + * 2. Redistributions in binary form must reproduce the above copyright 18 + * notice, this list of conditions and the following disclaimer in 19 + * the documentation and/or other materials provided with the 20 + * distribution. 21 + * 22 + * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR 23 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 24 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 + * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE 26 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 29 + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 30 + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 31 + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 32 + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 + */ 34 + 35 + #ifndef _NLM_FMN_H_ 36 + #define _NLM_FMN_H_ 37 + 38 + #include <asm/netlogic/mips-extns.h> /* for COP2 access */ 39 + 40 + /* Station IDs */ 41 + #define FMN_STNID_CPU0 0x00 42 + #define FMN_STNID_CPU1 0x08 43 + #define FMN_STNID_CPU2 0x10 44 + #define FMN_STNID_CPU3 0x18 45 + #define FMN_STNID_CPU4 0x20 46 + #define FMN_STNID_CPU5 0x28 47 + #define FMN_STNID_CPU6 0x30 48 + #define FMN_STNID_CPU7 0x38 49 + 50 + #define FMN_STNID_XGS0_TX 64 51 + #define FMN_STNID_XMAC0_00_TX 64 52 + #define FMN_STNID_XMAC0_01_TX 65 53 + #define FMN_STNID_XMAC0_02_TX 66 54 + #define FMN_STNID_XMAC0_03_TX 67 55 + #define FMN_STNID_XMAC0_04_TX 68 56 + #define FMN_STNID_XMAC0_05_TX 69 57 + #define FMN_STNID_XMAC0_06_TX 70 58 + #define FMN_STNID_XMAC0_07_TX 71 59 + #define FMN_STNID_XMAC0_08_TX 72 60 + #define FMN_STNID_XMAC0_09_TX 73 61 + #define FMN_STNID_XMAC0_10_TX 74 62 + #define FMN_STNID_XMAC0_11_TX 75 63 + #define FMN_STNID_XMAC0_12_TX 76 64 + #define FMN_STNID_XMAC0_13_TX 77 65 + #define FMN_STNID_XMAC0_14_TX 78 66 + #define FMN_STNID_XMAC0_15_TX 79 67 + 68 + #define FMN_STNID_XGS1_TX 80 69 + #define FMN_STNID_XMAC1_00_TX 80 70 + #define FMN_STNID_XMAC1_01_TX 81 71 + #define FMN_STNID_XMAC1_02_TX 82 72 + #define FMN_STNID_XMAC1_03_TX 83 73 + #define FMN_STNID_XMAC1_04_TX 84 74 + #define FMN_STNID_XMAC1_05_TX 85 75 + #define FMN_STNID_XMAC1_06_TX 86 76 + #define FMN_STNID_XMAC1_07_TX 87 77 + #define FMN_STNID_XMAC1_08_TX 88 78 + #define FMN_STNID_XMAC1_09_TX 89 79 + #define FMN_STNID_XMAC1_10_TX 90 80 + #define FMN_STNID_XMAC1_11_TX 91 81 + #define FMN_STNID_XMAC1_12_TX 92 82 + #define FMN_STNID_XMAC1_13_TX 93 83 + #define FMN_STNID_XMAC1_14_TX 94 84 + #define FMN_STNID_XMAC1_15_TX 95 85 + 86 + #define FMN_STNID_GMAC 96 87 + #define FMN_STNID_GMACJFR_0 96 88 + #define FMN_STNID_GMACRFR_0 97 89 + #define FMN_STNID_GMACTX0 98 90 + #define FMN_STNID_GMACTX1 99 91 + #define FMN_STNID_GMACTX2 100 92 + #define FMN_STNID_GMACTX3 101 93 + #define FMN_STNID_GMACJFR_1 102 94 + #define FMN_STNID_GMACRFR_1 103 95 + 96 + #define FMN_STNID_DMA 104 97 + #define FMN_STNID_DMA_0 104 98 + #define FMN_STNID_DMA_1 105 99 + #define FMN_STNID_DMA_2 106 100 + #define FMN_STNID_DMA_3 107 101 + 102 + #define FMN_STNID_XGS0FR 112 103 + #define FMN_STNID_XMAC0JFR 112 104 + #define FMN_STNID_XMAC0RFR 113 105 + 106 + #define FMN_STNID_XGS1FR 114 107 + #define FMN_STNID_XMAC1JFR 114 108 + #define FMN_STNID_XMAC1RFR 115 109 + #define FMN_STNID_SEC 120 110 + #define FMN_STNID_SEC0 120 111 + #define FMN_STNID_SEC1 121 112 + #define FMN_STNID_SEC2 122 113 + #define FMN_STNID_SEC3 123 114 + #define FMN_STNID_PK0 124 115 + #define FMN_STNID_SEC_RSA 124 116 + #define FMN_STNID_SEC_RSVD0 125 117 + #define FMN_STNID_SEC_RSVD1 126 118 + #define FMN_STNID_SEC_RSVD2 127 119 + 120 + #define FMN_STNID_GMAC1 80 121 + #define FMN_STNID_GMAC1_FR_0 81 122 + #define FMN_STNID_GMAC1_TX0 82 123 + #define FMN_STNID_GMAC1_TX1 83 124 + #define FMN_STNID_GMAC1_TX2 84 125 + #define FMN_STNID_GMAC1_TX3 85 126 + #define FMN_STNID_GMAC1_FR_1 87 127 + #define FMN_STNID_GMAC0 96 128 + #define FMN_STNID_GMAC0_FR_0 97 129 + #define FMN_STNID_GMAC0_TX0 98 130 + #define FMN_STNID_GMAC0_TX1 99 131 + #define FMN_STNID_GMAC0_TX2 100 132 + #define FMN_STNID_GMAC0_TX3 101 133 + #define FMN_STNID_GMAC0_FR_1 103 134 + #define FMN_STNID_CMP_0 108 135 + #define FMN_STNID_CMP_1 109 136 + #define FMN_STNID_CMP_2 110 137 + #define FMN_STNID_CMP_3 111 138 + #define FMN_STNID_PCIE_0 116 139 + #define FMN_STNID_PCIE_1 117 140 + #define FMN_STNID_PCIE_2 118 141 + #define FMN_STNID_PCIE_3 119 142 + #define FMN_STNID_XLS_PK0 121 143 + 144 + #define nlm_read_c2_cc0(s) __read_32bit_c2_register($16, s) 145 + #define nlm_read_c2_cc1(s) __read_32bit_c2_register($17, s) 146 + #define nlm_read_c2_cc2(s) __read_32bit_c2_register($18, s) 147 + #define nlm_read_c2_cc3(s) __read_32bit_c2_register($19, s) 148 + #define nlm_read_c2_cc4(s) __read_32bit_c2_register($20, s) 149 + #define nlm_read_c2_cc5(s) __read_32bit_c2_register($21, s) 150 + #define nlm_read_c2_cc6(s) __read_32bit_c2_register($22, s) 151 + #define nlm_read_c2_cc7(s) __read_32bit_c2_register($23, s) 152 + #define nlm_read_c2_cc8(s) __read_32bit_c2_register($24, s) 153 + #define nlm_read_c2_cc9(s) __read_32bit_c2_register($25, s) 154 + #define nlm_read_c2_cc10(s) __read_32bit_c2_register($26, s) 155 + #define nlm_read_c2_cc11(s) __read_32bit_c2_register($27, s) 156 + #define nlm_read_c2_cc12(s) __read_32bit_c2_register($28, s) 157 + #define nlm_read_c2_cc13(s) __read_32bit_c2_register($29, s) 158 + #define nlm_read_c2_cc14(s) __read_32bit_c2_register($30, s) 159 + #define nlm_read_c2_cc15(s) __read_32bit_c2_register($31, s) 160 + 161 + #define nlm_write_c2_cc0(s, v) __write_32bit_c2_register($16, s, v) 162 + #define nlm_write_c2_cc1(s, v) __write_32bit_c2_register($17, s, v) 163 + #define nlm_write_c2_cc2(s, v) __write_32bit_c2_register($18, s, v) 164 + #define nlm_write_c2_cc3(s, v) __write_32bit_c2_register($19, s, v) 165 + #define nlm_write_c2_cc4(s, v) __write_32bit_c2_register($20, s, v) 166 + #define nlm_write_c2_cc5(s, v) __write_32bit_c2_register($21, s, v) 167 + #define nlm_write_c2_cc6(s, v) __write_32bit_c2_register($22, s, v) 168 + #define nlm_write_c2_cc7(s, v) __write_32bit_c2_register($23, s, v) 169 + #define nlm_write_c2_cc8(s, v) __write_32bit_c2_register($24, s, v) 170 + #define nlm_write_c2_cc9(s, v) __write_32bit_c2_register($25, s, v) 171 + #define nlm_write_c2_cc10(s, v) __write_32bit_c2_register($26, s, v) 172 + #define nlm_write_c2_cc11(s, v) __write_32bit_c2_register($27, s, v) 173 + #define nlm_write_c2_cc12(s, v) __write_32bit_c2_register($28, s, v) 174 + #define nlm_write_c2_cc13(s, v) __write_32bit_c2_register($29, s, v) 175 + #define nlm_write_c2_cc14(s, v) __write_32bit_c2_register($30, s, v) 176 + #define nlm_write_c2_cc15(s, v) __write_32bit_c2_register($31, s, v) 177 + 178 + #define nlm_read_c2_status(sel) __read_32bit_c2_register($2, 0) 179 + #define nlm_read_c2_config() __read_32bit_c2_register($3, 0) 180 + #define nlm_write_c2_config(v) __write_32bit_c2_register($3, 0, v) 181 + #define nlm_read_c2_bucksize(b) __read_32bit_c2_register($4, b) 182 + #define nlm_write_c2_bucksize(b, v) __write_32bit_c2_register($4, b, v) 183 + 184 + #define nlm_read_c2_rx_msg0() __read_64bit_c2_register($1, 0) 185 + #define nlm_read_c2_rx_msg1() __read_64bit_c2_register($1, 1) 186 + #define nlm_read_c2_rx_msg2() __read_64bit_c2_register($1, 2) 187 + #define nlm_read_c2_rx_msg3() __read_64bit_c2_register($1, 3) 188 + 189 + #define nlm_write_c2_tx_msg0(v) __write_64bit_c2_register($0, 0, v) 190 + #define nlm_write_c2_tx_msg1(v) __write_64bit_c2_register($0, 1, v) 191 + #define nlm_write_c2_tx_msg2(v) __write_64bit_c2_register($0, 2, v) 192 + #define nlm_write_c2_tx_msg3(v) __write_64bit_c2_register($0, 3, v) 193 + 194 + #define FMN_STN_RX_QSIZE 256 195 + #define FMN_NSTATIONS 128 196 + #define FMN_CORE_NBUCKETS 8 197 + 198 + static inline void nlm_msgsnd(unsigned int stid) 199 + { 200 + __asm__ volatile ( 201 + ".set push\n" 202 + ".set noreorder\n" 203 + ".set noat\n" 204 + "move $1, %0\n" 205 + "c2 0x10001\n" /* msgsnd $1 */ 206 + ".set pop\n" 207 + : : "r" (stid) : "$1" 208 + ); 209 + } 210 + 211 + static inline void nlm_msgld(unsigned int pri) 212 + { 213 + __asm__ volatile ( 214 + ".set push\n" 215 + ".set noreorder\n" 216 + ".set noat\n" 217 + "move $1, %0\n" 218 + "c2 0x10002\n" /* msgld $1 */ 219 + ".set pop\n" 220 + : : "r" (pri) : "$1" 221 + ); 222 + } 223 + 224 + static inline void nlm_msgwait(unsigned int mask) 225 + { 226 + __asm__ volatile ( 227 + ".set push\n" 228 + ".set noreorder\n" 229 + ".set noat\n" 230 + "move $8, %0\n" 231 + "c2 0x10003\n" /* msgwait $1 */ 232 + ".set pop\n" 233 + : : "r" (mask) : "$1" 234 + ); 235 + } 236 + 237 + /* 238 + * Disable interrupts and enable COP2 access 239 + */ 240 + static inline uint32_t nlm_cop2_enable(void) 241 + { 242 + uint32_t sr = read_c0_status(); 243 + 244 + write_c0_status((sr & ~ST0_IE) | ST0_CU2); 245 + return sr; 246 + } 247 + 248 + static inline void nlm_cop2_restore(uint32_t sr) 249 + { 250 + write_c0_status(sr); 251 + } 252 + 253 + static inline void nlm_fmn_setup_intr(int irq, unsigned int tmask) 254 + { 255 + uint32_t config; 256 + 257 + config = (1 << 24) /* interrupt water mark - 1 msg */ 258 + | (irq << 16) /* irq */ 259 + | (tmask << 8) /* thread mask */ 260 + | 0x2; /* enable watermark intr, disable empty intr */ 261 + nlm_write_c2_config(config); 262 + } 263 + 264 + struct nlm_fmn_msg { 265 + uint64_t msg0; 266 + uint64_t msg1; 267 + uint64_t msg2; 268 + uint64_t msg3; 269 + }; 270 + 271 + static inline int nlm_fmn_send(unsigned int size, unsigned int code, 272 + unsigned int stid, struct nlm_fmn_msg *msg) 273 + { 274 + unsigned int dest; 275 + uint32_t status; 276 + int i; 277 + 278 + /* 279 + * Make sure that all the writes pending at the cpu are flushed. 280 + * Any writes pending on CPU will not be see by devices. L1/L2 281 + * caches are coherent with IO, so no cache flush needed. 282 + */ 283 + __asm __volatile("sync"); 284 + 285 + /* Load TX message buffers */ 286 + nlm_write_c2_tx_msg0(msg->msg0); 287 + nlm_write_c2_tx_msg1(msg->msg1); 288 + nlm_write_c2_tx_msg2(msg->msg2); 289 + nlm_write_c2_tx_msg3(msg->msg3); 290 + dest = ((size - 1) << 16) | (code << 8) | stid; 291 + 292 + /* 293 + * Retry a few times on credit fail, this should be a 294 + * transient condition, unless there is a configuration 295 + * failure, or the receiver is stuck. 296 + */ 297 + for (i = 0; i < 8; i++) { 298 + nlm_msgsnd(dest); 299 + status = nlm_read_c2_status(0); 300 + if ((status & 0x2) == 1) 301 + pr_info("Send pending fail!\n"); 302 + if ((status & 0x4) == 0) 303 + return 0; 304 + } 305 + 306 + /* If there is a credit failure, return error */ 307 + return status & 0x06; 308 + } 309 + 310 + static inline int nlm_fmn_receive(int bucket, int *size, int *code, int *stid, 311 + struct nlm_fmn_msg *msg) 312 + { 313 + uint32_t status, tmp; 314 + 315 + nlm_msgld(bucket); 316 + 317 + /* wait for load pending to clear */ 318 + do { 319 + status = nlm_read_c2_status(1); 320 + } while ((status & 0x08) != 0); 321 + 322 + /* receive error bits */ 323 + tmp = status & 0x30; 324 + if (tmp != 0) 325 + return tmp; 326 + 327 + *size = ((status & 0xc0) >> 6) + 1; 328 + *code = (status & 0xff00) >> 8; 329 + *stid = (status & 0x7f0000) >> 16; 330 + msg->msg0 = nlm_read_c2_rx_msg0(); 331 + msg->msg1 = nlm_read_c2_rx_msg1(); 332 + msg->msg2 = nlm_read_c2_rx_msg2(); 333 + msg->msg3 = nlm_read_c2_rx_msg3(); 334 + 335 + return 0; 336 + } 337 + 338 + struct xlr_fmn_info { 339 + int num_buckets; 340 + int start_stn_id; 341 + int end_stn_id; 342 + int credit_config[128]; 343 + }; 344 + 345 + struct xlr_board_fmn_config { 346 + int bucket_size[128]; /* size of buckets for all stations */ 347 + struct xlr_fmn_info cpu[8]; 348 + struct xlr_fmn_info gmac[2]; 349 + struct xlr_fmn_info dma; 350 + struct xlr_fmn_info cmp; 351 + struct xlr_fmn_info sae; 352 + struct xlr_fmn_info xgmac[2]; 353 + }; 354 + 355 + extern int nlm_register_fmn_handler(int start, int end, 356 + void (*fn)(int, int, int, int, struct nlm_fmn_msg *, void *), 357 + void *arg); 358 + extern void xlr_percpu_fmn_init(void); 359 + extern void nlm_setup_fmn_irq(void); 360 + extern void xlr_board_info_setup(void); 361 + 362 + extern struct xlr_board_fmn_config xlr_board_fmn_config; 363 + #endif
-2
arch/mips/include/asm/netlogic/xlr/pic.h
··· 258 258 nlm_write_reg(base, PIC_IRT_1(irt), 259 259 (1 << 30) | (1 << 6) | irq); 260 260 } 261 - 262 - extern uint64_t nlm_pic_base; 263 261 #endif 264 262 #endif /* _ASM_NLM_XLR_PIC_H */
+2 -4
arch/mips/include/asm/netlogic/xlr/xlr.h
··· 51 51 return ((prid & 0xf000) == 0x4000); 52 52 } 53 53 54 - /* 55 - * XLR chip types 56 - */ 57 - /* The XLS product line has chip versions 0x[48c]? */ 54 + /* XLR chip types */ 55 + /* The XLS product line has chip versions 0x[48c]? */ 58 56 static inline unsigned int nlm_chip_is_xls(void) 59 57 { 60 58 uint32_t prid = read_c0_prid();
+2
arch/mips/include/asm/octeon/cvmx-bootmem.h
··· 370 370 */ 371 371 void cvmx_bootmem_unlock(void); 372 372 373 + extern struct cvmx_bootmem_desc *cvmx_bootmem_get_desc(void); 374 + 373 375 #endif /* __CVMX_BOOTMEM_H__ */
+3457
arch/mips/include/asm/octeon/cvmx-lmcx-defs.h
··· 1 + /***********************license start*************** 2 + * Author: Cavium Inc. 3 + * 4 + * Contact: support@cavium.com 5 + * This file is part of the OCTEON SDK 6 + * 7 + * Copyright (c) 2003-2012 Cavium Inc. 8 + * 9 + * This file is free software; you can redistribute it and/or modify 10 + * it under the terms of the GNU General Public License, Version 2, as 11 + * published by the Free Software Foundation. 12 + * 13 + * This file is distributed in the hope that it will be useful, but 14 + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 + * NONINFRINGEMENT. See the GNU General Public License for more 17 + * details. 18 + * 19 + * You should have received a copy of the GNU General Public License 20 + * along with this file; if not, write to the Free Software 21 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 22 + * or visit http://www.gnu.org/licenses/. 23 + * 24 + * This file may also be available under a different license from Cavium. 25 + * Contact Cavium Inc. for more information 26 + ***********************license end**************************************/ 27 + 28 + #ifndef __CVMX_LMCX_DEFS_H__ 29 + #define __CVMX_LMCX_DEFS_H__ 30 + 31 + #define CVMX_LMCX_BIST_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880000F0ull) + ((block_id) & 1) * 0x60000000ull) 32 + #define CVMX_LMCX_BIST_RESULT(block_id) (CVMX_ADD_IO_SEG(0x00011800880000F8ull) + ((block_id) & 1) * 0x60000000ull) 33 + #define CVMX_LMCX_CHAR_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000220ull) + ((block_id) & 3) * 0x1000000ull) 34 + #define CVMX_LMCX_CHAR_MASK0(block_id) (CVMX_ADD_IO_SEG(0x0001180088000228ull) + ((block_id) & 3) * 0x1000000ull) 35 + #define CVMX_LMCX_CHAR_MASK1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000230ull) + ((block_id) & 3) * 0x1000000ull) 36 + #define CVMX_LMCX_CHAR_MASK2(block_id) (CVMX_ADD_IO_SEG(0x0001180088000238ull) + ((block_id) & 3) * 0x1000000ull) 37 + #define CVMX_LMCX_CHAR_MASK3(block_id) (CVMX_ADD_IO_SEG(0x0001180088000240ull) + ((block_id) & 3) * 0x1000000ull) 38 + #define CVMX_LMCX_CHAR_MASK4(block_id) (CVMX_ADD_IO_SEG(0x0001180088000318ull) + ((block_id) & 3) * 0x1000000ull) 39 + #define CVMX_LMCX_COMP_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000028ull) + ((block_id) & 1) * 0x60000000ull) 40 + #define CVMX_LMCX_COMP_CTL2(block_id) (CVMX_ADD_IO_SEG(0x00011800880001B8ull) + ((block_id) & 3) * 0x1000000ull) 41 + #define CVMX_LMCX_CONFIG(block_id) (CVMX_ADD_IO_SEG(0x0001180088000188ull) + ((block_id) & 3) * 0x1000000ull) 42 + #define CVMX_LMCX_CONTROL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000190ull) + ((block_id) & 3) * 0x1000000ull) 43 + #define CVMX_LMCX_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000010ull) + ((block_id) & 1) * 0x60000000ull) 44 + #define CVMX_LMCX_CTL1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000090ull) + ((block_id) & 1) * 0x60000000ull) 45 + #define CVMX_LMCX_DCLK_CNT(block_id) (CVMX_ADD_IO_SEG(0x00011800880001E0ull) + ((block_id) & 3) * 0x1000000ull) 46 + #define CVMX_LMCX_DCLK_CNT_HI(block_id) (CVMX_ADD_IO_SEG(0x0001180088000070ull) + ((block_id) & 1) * 0x60000000ull) 47 + #define CVMX_LMCX_DCLK_CNT_LO(block_id) (CVMX_ADD_IO_SEG(0x0001180088000068ull) + ((block_id) & 1) * 0x60000000ull) 48 + #define CVMX_LMCX_DCLK_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880000B8ull) + ((block_id) & 1) * 0x60000000ull) 49 + #define CVMX_LMCX_DDR2_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000018ull) + ((block_id) & 1) * 0x60000000ull) 50 + #define CVMX_LMCX_DDR_PLL_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000258ull) + ((block_id) & 3) * 0x1000000ull) 51 + #define CVMX_LMCX_DELAY_CFG(block_id) (CVMX_ADD_IO_SEG(0x0001180088000088ull) + ((block_id) & 1) * 0x60000000ull) 52 + #define CVMX_LMCX_DIMMX_PARAMS(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180088000270ull) + (((offset) & 1) + ((block_id) & 3) * 0x200000ull) * 8) 53 + #define CVMX_LMCX_DIMM_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000310ull) + ((block_id) & 3) * 0x1000000ull) 54 + #define CVMX_LMCX_DLL_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880000C0ull) + ((block_id) & 1) * 0x60000000ull) 55 + #define CVMX_LMCX_DLL_CTL2(block_id) (CVMX_ADD_IO_SEG(0x00011800880001C8ull) + ((block_id) & 3) * 0x1000000ull) 56 + #define CVMX_LMCX_DLL_CTL3(block_id) (CVMX_ADD_IO_SEG(0x0001180088000218ull) + ((block_id) & 3) * 0x1000000ull) 57 + static inline uint64_t CVMX_LMCX_DUAL_MEMCFG(unsigned long block_id) 58 + { 59 + switch (cvmx_get_octeon_family()) { 60 + case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: 61 + case OCTEON_CN50XX & OCTEON_FAMILY_MASK: 62 + case OCTEON_CN58XX & OCTEON_FAMILY_MASK: 63 + case OCTEON_CN66XX & OCTEON_FAMILY_MASK: 64 + case OCTEON_CN52XX & OCTEON_FAMILY_MASK: 65 + case OCTEON_CN61XX & OCTEON_FAMILY_MASK: 66 + case OCTEON_CN63XX & OCTEON_FAMILY_MASK: 67 + return CVMX_ADD_IO_SEG(0x0001180088000098ull) + (block_id) * 0x60000000ull; 68 + case OCTEON_CN56XX & OCTEON_FAMILY_MASK: 69 + return CVMX_ADD_IO_SEG(0x0001180088000098ull) + (block_id) * 0x60000000ull; 70 + case OCTEON_CN68XX & OCTEON_FAMILY_MASK: 71 + return CVMX_ADD_IO_SEG(0x0001180088000098ull) + (block_id) * 0x1000000ull; 72 + } 73 + return CVMX_ADD_IO_SEG(0x0001180088000098ull) + (block_id) * 0x60000000ull; 74 + } 75 + 76 + static inline uint64_t CVMX_LMCX_ECC_SYND(unsigned long block_id) 77 + { 78 + switch (cvmx_get_octeon_family()) { 79 + case OCTEON_CN30XX & OCTEON_FAMILY_MASK: 80 + case OCTEON_CN50XX & OCTEON_FAMILY_MASK: 81 + case OCTEON_CN38XX & OCTEON_FAMILY_MASK: 82 + case OCTEON_CN31XX & OCTEON_FAMILY_MASK: 83 + case OCTEON_CN58XX & OCTEON_FAMILY_MASK: 84 + case OCTEON_CN66XX & OCTEON_FAMILY_MASK: 85 + case OCTEON_CN52XX & OCTEON_FAMILY_MASK: 86 + case OCTEON_CN61XX & OCTEON_FAMILY_MASK: 87 + case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: 88 + case OCTEON_CN63XX & OCTEON_FAMILY_MASK: 89 + return CVMX_ADD_IO_SEG(0x0001180088000038ull) + (block_id) * 0x60000000ull; 90 + case OCTEON_CN56XX & OCTEON_FAMILY_MASK: 91 + return CVMX_ADD_IO_SEG(0x0001180088000038ull) + (block_id) * 0x60000000ull; 92 + case OCTEON_CN68XX & OCTEON_FAMILY_MASK: 93 + return CVMX_ADD_IO_SEG(0x0001180088000038ull) + (block_id) * 0x1000000ull; 94 + } 95 + return CVMX_ADD_IO_SEG(0x0001180088000038ull) + (block_id) * 0x60000000ull; 96 + } 97 + 98 + static inline uint64_t CVMX_LMCX_FADR(unsigned long block_id) 99 + { 100 + switch (cvmx_get_octeon_family()) { 101 + case OCTEON_CN30XX & OCTEON_FAMILY_MASK: 102 + case OCTEON_CN50XX & OCTEON_FAMILY_MASK: 103 + case OCTEON_CN38XX & OCTEON_FAMILY_MASK: 104 + case OCTEON_CN31XX & OCTEON_FAMILY_MASK: 105 + case OCTEON_CN58XX & OCTEON_FAMILY_MASK: 106 + case OCTEON_CN66XX & OCTEON_FAMILY_MASK: 107 + case OCTEON_CN52XX & OCTEON_FAMILY_MASK: 108 + case OCTEON_CN61XX & OCTEON_FAMILY_MASK: 109 + case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: 110 + case OCTEON_CN63XX & OCTEON_FAMILY_MASK: 111 + return CVMX_ADD_IO_SEG(0x0001180088000020ull) + (block_id) * 0x60000000ull; 112 + case OCTEON_CN56XX & OCTEON_FAMILY_MASK: 113 + return CVMX_ADD_IO_SEG(0x0001180088000020ull) + (block_id) * 0x60000000ull; 114 + case OCTEON_CN68XX & OCTEON_FAMILY_MASK: 115 + return CVMX_ADD_IO_SEG(0x0001180088000020ull) + (block_id) * 0x1000000ull; 116 + } 117 + return CVMX_ADD_IO_SEG(0x0001180088000020ull) + (block_id) * 0x60000000ull; 118 + } 119 + 120 + #define CVMX_LMCX_IFB_CNT(block_id) (CVMX_ADD_IO_SEG(0x00011800880001D0ull) + ((block_id) & 3) * 0x1000000ull) 121 + #define CVMX_LMCX_IFB_CNT_HI(block_id) (CVMX_ADD_IO_SEG(0x0001180088000050ull) + ((block_id) & 1) * 0x60000000ull) 122 + #define CVMX_LMCX_IFB_CNT_LO(block_id) (CVMX_ADD_IO_SEG(0x0001180088000048ull) + ((block_id) & 1) * 0x60000000ull) 123 + #define CVMX_LMCX_INT(block_id) (CVMX_ADD_IO_SEG(0x00011800880001F0ull) + ((block_id) & 3) * 0x1000000ull) 124 + #define CVMX_LMCX_INT_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800880001E8ull) + ((block_id) & 3) * 0x1000000ull) 125 + #define CVMX_LMCX_MEM_CFG0(block_id) (CVMX_ADD_IO_SEG(0x0001180088000000ull) + ((block_id) & 1) * 0x60000000ull) 126 + #define CVMX_LMCX_MEM_CFG1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000008ull) + ((block_id) & 1) * 0x60000000ull) 127 + #define CVMX_LMCX_MODEREG_PARAMS0(block_id) (CVMX_ADD_IO_SEG(0x00011800880001A8ull) + ((block_id) & 3) * 0x1000000ull) 128 + #define CVMX_LMCX_MODEREG_PARAMS1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000260ull) + ((block_id) & 3) * 0x1000000ull) 129 + static inline uint64_t CVMX_LMCX_NXM(unsigned long block_id) 130 + { 131 + switch (cvmx_get_octeon_family()) { 132 + case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: 133 + case OCTEON_CN61XX & OCTEON_FAMILY_MASK: 134 + case OCTEON_CN66XX & OCTEON_FAMILY_MASK: 135 + case OCTEON_CN52XX & OCTEON_FAMILY_MASK: 136 + case OCTEON_CN58XX & OCTEON_FAMILY_MASK: 137 + case OCTEON_CN63XX & OCTEON_FAMILY_MASK: 138 + return CVMX_ADD_IO_SEG(0x00011800880000C8ull) + (block_id) * 0x60000000ull; 139 + case OCTEON_CN56XX & OCTEON_FAMILY_MASK: 140 + return CVMX_ADD_IO_SEG(0x00011800880000C8ull) + (block_id) * 0x60000000ull; 141 + case OCTEON_CN68XX & OCTEON_FAMILY_MASK: 142 + return CVMX_ADD_IO_SEG(0x00011800880000C8ull) + (block_id) * 0x1000000ull; 143 + } 144 + return CVMX_ADD_IO_SEG(0x00011800880000C8ull) + (block_id) * 0x60000000ull; 145 + } 146 + 147 + #define CVMX_LMCX_OPS_CNT(block_id) (CVMX_ADD_IO_SEG(0x00011800880001D8ull) + ((block_id) & 3) * 0x1000000ull) 148 + #define CVMX_LMCX_OPS_CNT_HI(block_id) (CVMX_ADD_IO_SEG(0x0001180088000060ull) + ((block_id) & 1) * 0x60000000ull) 149 + #define CVMX_LMCX_OPS_CNT_LO(block_id) (CVMX_ADD_IO_SEG(0x0001180088000058ull) + ((block_id) & 1) * 0x60000000ull) 150 + #define CVMX_LMCX_PHY_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000210ull) + ((block_id) & 3) * 0x1000000ull) 151 + #define CVMX_LMCX_PLL_BWCTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000040ull)) 152 + #define CVMX_LMCX_PLL_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880000A8ull) + ((block_id) & 1) * 0x60000000ull) 153 + #define CVMX_LMCX_PLL_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800880000B0ull) + ((block_id) & 1) * 0x60000000ull) 154 + #define CVMX_LMCX_READ_LEVEL_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000140ull) + ((block_id) & 1) * 0x60000000ull) 155 + #define CVMX_LMCX_READ_LEVEL_DBG(block_id) (CVMX_ADD_IO_SEG(0x0001180088000148ull) + ((block_id) & 1) * 0x60000000ull) 156 + #define CVMX_LMCX_READ_LEVEL_RANKX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180088000100ull) + (((offset) & 3) + ((block_id) & 1) * 0xC000000ull) * 8) 157 + #define CVMX_LMCX_RESET_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000180ull) + ((block_id) & 3) * 0x1000000ull) 158 + #define CVMX_LMCX_RLEVEL_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880002A0ull) + ((block_id) & 3) * 0x1000000ull) 159 + #define CVMX_LMCX_RLEVEL_DBG(block_id) (CVMX_ADD_IO_SEG(0x00011800880002A8ull) + ((block_id) & 3) * 0x1000000ull) 160 + #define CVMX_LMCX_RLEVEL_RANKX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180088000280ull) + (((offset) & 3) + ((block_id) & 3) * 0x200000ull) * 8) 161 + #define CVMX_LMCX_RODT_COMP_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880000A0ull) + ((block_id) & 1) * 0x60000000ull) 162 + #define CVMX_LMCX_RODT_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000078ull) + ((block_id) & 1) * 0x60000000ull) 163 + #define CVMX_LMCX_RODT_MASK(block_id) (CVMX_ADD_IO_SEG(0x0001180088000268ull) + ((block_id) & 3) * 0x1000000ull) 164 + #define CVMX_LMCX_SCRAMBLED_FADR(block_id) (CVMX_ADD_IO_SEG(0x0001180088000330ull)) 165 + #define CVMX_LMCX_SCRAMBLE_CFG0(block_id) (CVMX_ADD_IO_SEG(0x0001180088000320ull)) 166 + #define CVMX_LMCX_SCRAMBLE_CFG1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000328ull)) 167 + #define CVMX_LMCX_SLOT_CTL0(block_id) (CVMX_ADD_IO_SEG(0x00011800880001F8ull) + ((block_id) & 3) * 0x1000000ull) 168 + #define CVMX_LMCX_SLOT_CTL1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000200ull) + ((block_id) & 3) * 0x1000000ull) 169 + #define CVMX_LMCX_SLOT_CTL2(block_id) (CVMX_ADD_IO_SEG(0x0001180088000208ull) + ((block_id) & 3) * 0x1000000ull) 170 + #define CVMX_LMCX_TIMING_PARAMS0(block_id) (CVMX_ADD_IO_SEG(0x0001180088000198ull) + ((block_id) & 3) * 0x1000000ull) 171 + #define CVMX_LMCX_TIMING_PARAMS1(block_id) (CVMX_ADD_IO_SEG(0x00011800880001A0ull) + ((block_id) & 3) * 0x1000000ull) 172 + #define CVMX_LMCX_TRO_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000248ull) + ((block_id) & 3) * 0x1000000ull) 173 + #define CVMX_LMCX_TRO_STAT(block_id) (CVMX_ADD_IO_SEG(0x0001180088000250ull) + ((block_id) & 3) * 0x1000000ull) 174 + #define CVMX_LMCX_WLEVEL_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000300ull) + ((block_id) & 3) * 0x1000000ull) 175 + #define CVMX_LMCX_WLEVEL_DBG(block_id) (CVMX_ADD_IO_SEG(0x0001180088000308ull) + ((block_id) & 3) * 0x1000000ull) 176 + #define CVMX_LMCX_WLEVEL_RANKX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800880002B0ull) + (((offset) & 3) + ((block_id) & 3) * 0x200000ull) * 8) 177 + #define CVMX_LMCX_WODT_CTL0(block_id) (CVMX_ADD_IO_SEG(0x0001180088000030ull) + ((block_id) & 1) * 0x60000000ull) 178 + #define CVMX_LMCX_WODT_CTL1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000080ull) + ((block_id) & 1) * 0x60000000ull) 179 + #define CVMX_LMCX_WODT_MASK(block_id) (CVMX_ADD_IO_SEG(0x00011800880001B0ull) + ((block_id) & 3) * 0x1000000ull) 180 + 181 + union cvmx_lmcx_bist_ctl { 182 + uint64_t u64; 183 + struct cvmx_lmcx_bist_ctl_s { 184 + #ifdef __BIG_ENDIAN_BITFIELD 185 + uint64_t reserved_1_63:63; 186 + uint64_t start:1; 187 + #else 188 + uint64_t start:1; 189 + uint64_t reserved_1_63:63; 190 + #endif 191 + } s; 192 + struct cvmx_lmcx_bist_ctl_s cn50xx; 193 + struct cvmx_lmcx_bist_ctl_s cn52xx; 194 + struct cvmx_lmcx_bist_ctl_s cn52xxp1; 195 + struct cvmx_lmcx_bist_ctl_s cn56xx; 196 + struct cvmx_lmcx_bist_ctl_s cn56xxp1; 197 + }; 198 + 199 + union cvmx_lmcx_bist_result { 200 + uint64_t u64; 201 + struct cvmx_lmcx_bist_result_s { 202 + #ifdef __BIG_ENDIAN_BITFIELD 203 + uint64_t reserved_11_63:53; 204 + uint64_t csrd2e:1; 205 + uint64_t csre2d:1; 206 + uint64_t mwf:1; 207 + uint64_t mwd:3; 208 + uint64_t mwc:1; 209 + uint64_t mrf:1; 210 + uint64_t mrd:3; 211 + #else 212 + uint64_t mrd:3; 213 + uint64_t mrf:1; 214 + uint64_t mwc:1; 215 + uint64_t mwd:3; 216 + uint64_t mwf:1; 217 + uint64_t csre2d:1; 218 + uint64_t csrd2e:1; 219 + uint64_t reserved_11_63:53; 220 + #endif 221 + } s; 222 + struct cvmx_lmcx_bist_result_cn50xx { 223 + #ifdef __BIG_ENDIAN_BITFIELD 224 + uint64_t reserved_9_63:55; 225 + uint64_t mwf:1; 226 + uint64_t mwd:3; 227 + uint64_t mwc:1; 228 + uint64_t mrf:1; 229 + uint64_t mrd:3; 230 + #else 231 + uint64_t mrd:3; 232 + uint64_t mrf:1; 233 + uint64_t mwc:1; 234 + uint64_t mwd:3; 235 + uint64_t mwf:1; 236 + uint64_t reserved_9_63:55; 237 + #endif 238 + } cn50xx; 239 + struct cvmx_lmcx_bist_result_s cn52xx; 240 + struct cvmx_lmcx_bist_result_s cn52xxp1; 241 + struct cvmx_lmcx_bist_result_s cn56xx; 242 + struct cvmx_lmcx_bist_result_s cn56xxp1; 243 + }; 244 + 245 + union cvmx_lmcx_char_ctl { 246 + uint64_t u64; 247 + struct cvmx_lmcx_char_ctl_s { 248 + #ifdef __BIG_ENDIAN_BITFIELD 249 + uint64_t reserved_44_63:20; 250 + uint64_t dr:1; 251 + uint64_t skew_on:1; 252 + uint64_t en:1; 253 + uint64_t sel:1; 254 + uint64_t prog:8; 255 + uint64_t prbs:32; 256 + #else 257 + uint64_t prbs:32; 258 + uint64_t prog:8; 259 + uint64_t sel:1; 260 + uint64_t en:1; 261 + uint64_t skew_on:1; 262 + uint64_t dr:1; 263 + uint64_t reserved_44_63:20; 264 + #endif 265 + } s; 266 + struct cvmx_lmcx_char_ctl_s cn61xx; 267 + struct cvmx_lmcx_char_ctl_cn63xx { 268 + #ifdef __BIG_ENDIAN_BITFIELD 269 + uint64_t reserved_42_63:22; 270 + uint64_t en:1; 271 + uint64_t sel:1; 272 + uint64_t prog:8; 273 + uint64_t prbs:32; 274 + #else 275 + uint64_t prbs:32; 276 + uint64_t prog:8; 277 + uint64_t sel:1; 278 + uint64_t en:1; 279 + uint64_t reserved_42_63:22; 280 + #endif 281 + } cn63xx; 282 + struct cvmx_lmcx_char_ctl_cn63xx cn63xxp1; 283 + struct cvmx_lmcx_char_ctl_s cn66xx; 284 + struct cvmx_lmcx_char_ctl_s cn68xx; 285 + struct cvmx_lmcx_char_ctl_cn63xx cn68xxp1; 286 + struct cvmx_lmcx_char_ctl_s cnf71xx; 287 + }; 288 + 289 + union cvmx_lmcx_char_mask0 { 290 + uint64_t u64; 291 + struct cvmx_lmcx_char_mask0_s { 292 + #ifdef __BIG_ENDIAN_BITFIELD 293 + uint64_t mask:64; 294 + #else 295 + uint64_t mask:64; 296 + #endif 297 + } s; 298 + struct cvmx_lmcx_char_mask0_s cn61xx; 299 + struct cvmx_lmcx_char_mask0_s cn63xx; 300 + struct cvmx_lmcx_char_mask0_s cn63xxp1; 301 + struct cvmx_lmcx_char_mask0_s cn66xx; 302 + struct cvmx_lmcx_char_mask0_s cn68xx; 303 + struct cvmx_lmcx_char_mask0_s cn68xxp1; 304 + struct cvmx_lmcx_char_mask0_s cnf71xx; 305 + }; 306 + 307 + union cvmx_lmcx_char_mask1 { 308 + uint64_t u64; 309 + struct cvmx_lmcx_char_mask1_s { 310 + #ifdef __BIG_ENDIAN_BITFIELD 311 + uint64_t reserved_8_63:56; 312 + uint64_t mask:8; 313 + #else 314 + uint64_t mask:8; 315 + uint64_t reserved_8_63:56; 316 + #endif 317 + } s; 318 + struct cvmx_lmcx_char_mask1_s cn61xx; 319 + struct cvmx_lmcx_char_mask1_s cn63xx; 320 + struct cvmx_lmcx_char_mask1_s cn63xxp1; 321 + struct cvmx_lmcx_char_mask1_s cn66xx; 322 + struct cvmx_lmcx_char_mask1_s cn68xx; 323 + struct cvmx_lmcx_char_mask1_s cn68xxp1; 324 + struct cvmx_lmcx_char_mask1_s cnf71xx; 325 + }; 326 + 327 + union cvmx_lmcx_char_mask2 { 328 + uint64_t u64; 329 + struct cvmx_lmcx_char_mask2_s { 330 + #ifdef __BIG_ENDIAN_BITFIELD 331 + uint64_t mask:64; 332 + #else 333 + uint64_t mask:64; 334 + #endif 335 + } s; 336 + struct cvmx_lmcx_char_mask2_s cn61xx; 337 + struct cvmx_lmcx_char_mask2_s cn63xx; 338 + struct cvmx_lmcx_char_mask2_s cn63xxp1; 339 + struct cvmx_lmcx_char_mask2_s cn66xx; 340 + struct cvmx_lmcx_char_mask2_s cn68xx; 341 + struct cvmx_lmcx_char_mask2_s cn68xxp1; 342 + struct cvmx_lmcx_char_mask2_s cnf71xx; 343 + }; 344 + 345 + union cvmx_lmcx_char_mask3 { 346 + uint64_t u64; 347 + struct cvmx_lmcx_char_mask3_s { 348 + #ifdef __BIG_ENDIAN_BITFIELD 349 + uint64_t reserved_8_63:56; 350 + uint64_t mask:8; 351 + #else 352 + uint64_t mask:8; 353 + uint64_t reserved_8_63:56; 354 + #endif 355 + } s; 356 + struct cvmx_lmcx_char_mask3_s cn61xx; 357 + struct cvmx_lmcx_char_mask3_s cn63xx; 358 + struct cvmx_lmcx_char_mask3_s cn63xxp1; 359 + struct cvmx_lmcx_char_mask3_s cn66xx; 360 + struct cvmx_lmcx_char_mask3_s cn68xx; 361 + struct cvmx_lmcx_char_mask3_s cn68xxp1; 362 + struct cvmx_lmcx_char_mask3_s cnf71xx; 363 + }; 364 + 365 + union cvmx_lmcx_char_mask4 { 366 + uint64_t u64; 367 + struct cvmx_lmcx_char_mask4_s { 368 + #ifdef __BIG_ENDIAN_BITFIELD 369 + uint64_t reserved_33_63:31; 370 + uint64_t reset_n_mask:1; 371 + uint64_t a_mask:16; 372 + uint64_t ba_mask:3; 373 + uint64_t we_n_mask:1; 374 + uint64_t cas_n_mask:1; 375 + uint64_t ras_n_mask:1; 376 + uint64_t odt1_mask:2; 377 + uint64_t odt0_mask:2; 378 + uint64_t cs1_n_mask:2; 379 + uint64_t cs0_n_mask:2; 380 + uint64_t cke_mask:2; 381 + #else 382 + uint64_t cke_mask:2; 383 + uint64_t cs0_n_mask:2; 384 + uint64_t cs1_n_mask:2; 385 + uint64_t odt0_mask:2; 386 + uint64_t odt1_mask:2; 387 + uint64_t ras_n_mask:1; 388 + uint64_t cas_n_mask:1; 389 + uint64_t we_n_mask:1; 390 + uint64_t ba_mask:3; 391 + uint64_t a_mask:16; 392 + uint64_t reset_n_mask:1; 393 + uint64_t reserved_33_63:31; 394 + #endif 395 + } s; 396 + struct cvmx_lmcx_char_mask4_s cn61xx; 397 + struct cvmx_lmcx_char_mask4_s cn63xx; 398 + struct cvmx_lmcx_char_mask4_s cn63xxp1; 399 + struct cvmx_lmcx_char_mask4_s cn66xx; 400 + struct cvmx_lmcx_char_mask4_s cn68xx; 401 + struct cvmx_lmcx_char_mask4_s cn68xxp1; 402 + struct cvmx_lmcx_char_mask4_s cnf71xx; 403 + }; 404 + 405 + union cvmx_lmcx_comp_ctl { 406 + uint64_t u64; 407 + struct cvmx_lmcx_comp_ctl_s { 408 + #ifdef __BIG_ENDIAN_BITFIELD 409 + uint64_t reserved_32_63:32; 410 + uint64_t nctl_csr:4; 411 + uint64_t nctl_clk:4; 412 + uint64_t nctl_cmd:4; 413 + uint64_t nctl_dat:4; 414 + uint64_t pctl_csr:4; 415 + uint64_t pctl_clk:4; 416 + uint64_t reserved_0_7:8; 417 + #else 418 + uint64_t reserved_0_7:8; 419 + uint64_t pctl_clk:4; 420 + uint64_t pctl_csr:4; 421 + uint64_t nctl_dat:4; 422 + uint64_t nctl_cmd:4; 423 + uint64_t nctl_clk:4; 424 + uint64_t nctl_csr:4; 425 + uint64_t reserved_32_63:32; 426 + #endif 427 + } s; 428 + struct cvmx_lmcx_comp_ctl_cn30xx { 429 + #ifdef __BIG_ENDIAN_BITFIELD 430 + uint64_t reserved_32_63:32; 431 + uint64_t nctl_csr:4; 432 + uint64_t nctl_clk:4; 433 + uint64_t nctl_cmd:4; 434 + uint64_t nctl_dat:4; 435 + uint64_t pctl_csr:4; 436 + uint64_t pctl_clk:4; 437 + uint64_t pctl_cmd:4; 438 + uint64_t pctl_dat:4; 439 + #else 440 + uint64_t pctl_dat:4; 441 + uint64_t pctl_cmd:4; 442 + uint64_t pctl_clk:4; 443 + uint64_t pctl_csr:4; 444 + uint64_t nctl_dat:4; 445 + uint64_t nctl_cmd:4; 446 + uint64_t nctl_clk:4; 447 + uint64_t nctl_csr:4; 448 + uint64_t reserved_32_63:32; 449 + #endif 450 + } cn30xx; 451 + struct cvmx_lmcx_comp_ctl_cn30xx cn31xx; 452 + struct cvmx_lmcx_comp_ctl_cn30xx cn38xx; 453 + struct cvmx_lmcx_comp_ctl_cn30xx cn38xxp2; 454 + struct cvmx_lmcx_comp_ctl_cn50xx { 455 + #ifdef __BIG_ENDIAN_BITFIELD 456 + uint64_t reserved_32_63:32; 457 + uint64_t nctl_csr:4; 458 + uint64_t reserved_20_27:8; 459 + uint64_t nctl_dat:4; 460 + uint64_t pctl_csr:4; 461 + uint64_t reserved_5_11:7; 462 + uint64_t pctl_dat:5; 463 + #else 464 + uint64_t pctl_dat:5; 465 + uint64_t reserved_5_11:7; 466 + uint64_t pctl_csr:4; 467 + uint64_t nctl_dat:4; 468 + uint64_t reserved_20_27:8; 469 + uint64_t nctl_csr:4; 470 + uint64_t reserved_32_63:32; 471 + #endif 472 + } cn50xx; 473 + struct cvmx_lmcx_comp_ctl_cn50xx cn52xx; 474 + struct cvmx_lmcx_comp_ctl_cn50xx cn52xxp1; 475 + struct cvmx_lmcx_comp_ctl_cn50xx cn56xx; 476 + struct cvmx_lmcx_comp_ctl_cn50xx cn56xxp1; 477 + struct cvmx_lmcx_comp_ctl_cn50xx cn58xx; 478 + struct cvmx_lmcx_comp_ctl_cn58xxp1 { 479 + #ifdef __BIG_ENDIAN_BITFIELD 480 + uint64_t reserved_32_63:32; 481 + uint64_t nctl_csr:4; 482 + uint64_t reserved_20_27:8; 483 + uint64_t nctl_dat:4; 484 + uint64_t pctl_csr:4; 485 + uint64_t reserved_4_11:8; 486 + uint64_t pctl_dat:4; 487 + #else 488 + uint64_t pctl_dat:4; 489 + uint64_t reserved_4_11:8; 490 + uint64_t pctl_csr:4; 491 + uint64_t nctl_dat:4; 492 + uint64_t reserved_20_27:8; 493 + uint64_t nctl_csr:4; 494 + uint64_t reserved_32_63:32; 495 + #endif 496 + } cn58xxp1; 497 + }; 498 + 499 + union cvmx_lmcx_comp_ctl2 { 500 + uint64_t u64; 501 + struct cvmx_lmcx_comp_ctl2_s { 502 + #ifdef __BIG_ENDIAN_BITFIELD 503 + uint64_t reserved_34_63:30; 504 + uint64_t ddr__ptune:4; 505 + uint64_t ddr__ntune:4; 506 + uint64_t m180:1; 507 + uint64_t byp:1; 508 + uint64_t ptune:4; 509 + uint64_t ntune:4; 510 + uint64_t rodt_ctl:4; 511 + uint64_t cmd_ctl:4; 512 + uint64_t ck_ctl:4; 513 + uint64_t dqx_ctl:4; 514 + #else 515 + uint64_t dqx_ctl:4; 516 + uint64_t ck_ctl:4; 517 + uint64_t cmd_ctl:4; 518 + uint64_t rodt_ctl:4; 519 + uint64_t ntune:4; 520 + uint64_t ptune:4; 521 + uint64_t byp:1; 522 + uint64_t m180:1; 523 + uint64_t ddr__ntune:4; 524 + uint64_t ddr__ptune:4; 525 + uint64_t reserved_34_63:30; 526 + #endif 527 + } s; 528 + struct cvmx_lmcx_comp_ctl2_s cn61xx; 529 + struct cvmx_lmcx_comp_ctl2_s cn63xx; 530 + struct cvmx_lmcx_comp_ctl2_s cn63xxp1; 531 + struct cvmx_lmcx_comp_ctl2_s cn66xx; 532 + struct cvmx_lmcx_comp_ctl2_s cn68xx; 533 + struct cvmx_lmcx_comp_ctl2_s cn68xxp1; 534 + struct cvmx_lmcx_comp_ctl2_s cnf71xx; 535 + }; 536 + 537 + union cvmx_lmcx_config { 538 + uint64_t u64; 539 + struct cvmx_lmcx_config_s { 540 + #ifdef __BIG_ENDIAN_BITFIELD 541 + uint64_t reserved_61_63:3; 542 + uint64_t mode32b:1; 543 + uint64_t scrz:1; 544 + uint64_t early_unload_d1_r1:1; 545 + uint64_t early_unload_d1_r0:1; 546 + uint64_t early_unload_d0_r1:1; 547 + uint64_t early_unload_d0_r0:1; 548 + uint64_t init_status:4; 549 + uint64_t mirrmask:4; 550 + uint64_t rankmask:4; 551 + uint64_t rank_ena:1; 552 + uint64_t sref_with_dll:1; 553 + uint64_t early_dqx:1; 554 + uint64_t sequence:3; 555 + uint64_t ref_zqcs_int:19; 556 + uint64_t reset:1; 557 + uint64_t ecc_adr:1; 558 + uint64_t forcewrite:4; 559 + uint64_t idlepower:3; 560 + uint64_t pbank_lsb:4; 561 + uint64_t row_lsb:3; 562 + uint64_t ecc_ena:1; 563 + uint64_t init_start:1; 564 + #else 565 + uint64_t init_start:1; 566 + uint64_t ecc_ena:1; 567 + uint64_t row_lsb:3; 568 + uint64_t pbank_lsb:4; 569 + uint64_t idlepower:3; 570 + uint64_t forcewrite:4; 571 + uint64_t ecc_adr:1; 572 + uint64_t reset:1; 573 + uint64_t ref_zqcs_int:19; 574 + uint64_t sequence:3; 575 + uint64_t early_dqx:1; 576 + uint64_t sref_with_dll:1; 577 + uint64_t rank_ena:1; 578 + uint64_t rankmask:4; 579 + uint64_t mirrmask:4; 580 + uint64_t init_status:4; 581 + uint64_t early_unload_d0_r0:1; 582 + uint64_t early_unload_d0_r1:1; 583 + uint64_t early_unload_d1_r0:1; 584 + uint64_t early_unload_d1_r1:1; 585 + uint64_t scrz:1; 586 + uint64_t mode32b:1; 587 + uint64_t reserved_61_63:3; 588 + #endif 589 + } s; 590 + struct cvmx_lmcx_config_s cn61xx; 591 + struct cvmx_lmcx_config_cn63xx { 592 + #ifdef __BIG_ENDIAN_BITFIELD 593 + uint64_t reserved_59_63:5; 594 + uint64_t early_unload_d1_r1:1; 595 + uint64_t early_unload_d1_r0:1; 596 + uint64_t early_unload_d0_r1:1; 597 + uint64_t early_unload_d0_r0:1; 598 + uint64_t init_status:4; 599 + uint64_t mirrmask:4; 600 + uint64_t rankmask:4; 601 + uint64_t rank_ena:1; 602 + uint64_t sref_with_dll:1; 603 + uint64_t early_dqx:1; 604 + uint64_t sequence:3; 605 + uint64_t ref_zqcs_int:19; 606 + uint64_t reset:1; 607 + uint64_t ecc_adr:1; 608 + uint64_t forcewrite:4; 609 + uint64_t idlepower:3; 610 + uint64_t pbank_lsb:4; 611 + uint64_t row_lsb:3; 612 + uint64_t ecc_ena:1; 613 + uint64_t init_start:1; 614 + #else 615 + uint64_t init_start:1; 616 + uint64_t ecc_ena:1; 617 + uint64_t row_lsb:3; 618 + uint64_t pbank_lsb:4; 619 + uint64_t idlepower:3; 620 + uint64_t forcewrite:4; 621 + uint64_t ecc_adr:1; 622 + uint64_t reset:1; 623 + uint64_t ref_zqcs_int:19; 624 + uint64_t sequence:3; 625 + uint64_t early_dqx:1; 626 + uint64_t sref_with_dll:1; 627 + uint64_t rank_ena:1; 628 + uint64_t rankmask:4; 629 + uint64_t mirrmask:4; 630 + uint64_t init_status:4; 631 + uint64_t early_unload_d0_r0:1; 632 + uint64_t early_unload_d0_r1:1; 633 + uint64_t early_unload_d1_r0:1; 634 + uint64_t early_unload_d1_r1:1; 635 + uint64_t reserved_59_63:5; 636 + #endif 637 + } cn63xx; 638 + struct cvmx_lmcx_config_cn63xxp1 { 639 + #ifdef __BIG_ENDIAN_BITFIELD 640 + uint64_t reserved_55_63:9; 641 + uint64_t init_status:4; 642 + uint64_t mirrmask:4; 643 + uint64_t rankmask:4; 644 + uint64_t rank_ena:1; 645 + uint64_t sref_with_dll:1; 646 + uint64_t early_dqx:1; 647 + uint64_t sequence:3; 648 + uint64_t ref_zqcs_int:19; 649 + uint64_t reset:1; 650 + uint64_t ecc_adr:1; 651 + uint64_t forcewrite:4; 652 + uint64_t idlepower:3; 653 + uint64_t pbank_lsb:4; 654 + uint64_t row_lsb:3; 655 + uint64_t ecc_ena:1; 656 + uint64_t init_start:1; 657 + #else 658 + uint64_t init_start:1; 659 + uint64_t ecc_ena:1; 660 + uint64_t row_lsb:3; 661 + uint64_t pbank_lsb:4; 662 + uint64_t idlepower:3; 663 + uint64_t forcewrite:4; 664 + uint64_t ecc_adr:1; 665 + uint64_t reset:1; 666 + uint64_t ref_zqcs_int:19; 667 + uint64_t sequence:3; 668 + uint64_t early_dqx:1; 669 + uint64_t sref_with_dll:1; 670 + uint64_t rank_ena:1; 671 + uint64_t rankmask:4; 672 + uint64_t mirrmask:4; 673 + uint64_t init_status:4; 674 + uint64_t reserved_55_63:9; 675 + #endif 676 + } cn63xxp1; 677 + struct cvmx_lmcx_config_cn66xx { 678 + #ifdef __BIG_ENDIAN_BITFIELD 679 + uint64_t reserved_60_63:4; 680 + uint64_t scrz:1; 681 + uint64_t early_unload_d1_r1:1; 682 + uint64_t early_unload_d1_r0:1; 683 + uint64_t early_unload_d0_r1:1; 684 + uint64_t early_unload_d0_r0:1; 685 + uint64_t init_status:4; 686 + uint64_t mirrmask:4; 687 + uint64_t rankmask:4; 688 + uint64_t rank_ena:1; 689 + uint64_t sref_with_dll:1; 690 + uint64_t early_dqx:1; 691 + uint64_t sequence:3; 692 + uint64_t ref_zqcs_int:19; 693 + uint64_t reset:1; 694 + uint64_t ecc_adr:1; 695 + uint64_t forcewrite:4; 696 + uint64_t idlepower:3; 697 + uint64_t pbank_lsb:4; 698 + uint64_t row_lsb:3; 699 + uint64_t ecc_ena:1; 700 + uint64_t init_start:1; 701 + #else 702 + uint64_t init_start:1; 703 + uint64_t ecc_ena:1; 704 + uint64_t row_lsb:3; 705 + uint64_t pbank_lsb:4; 706 + uint64_t idlepower:3; 707 + uint64_t forcewrite:4; 708 + uint64_t ecc_adr:1; 709 + uint64_t reset:1; 710 + uint64_t ref_zqcs_int:19; 711 + uint64_t sequence:3; 712 + uint64_t early_dqx:1; 713 + uint64_t sref_with_dll:1; 714 + uint64_t rank_ena:1; 715 + uint64_t rankmask:4; 716 + uint64_t mirrmask:4; 717 + uint64_t init_status:4; 718 + uint64_t early_unload_d0_r0:1; 719 + uint64_t early_unload_d0_r1:1; 720 + uint64_t early_unload_d1_r0:1; 721 + uint64_t early_unload_d1_r1:1; 722 + uint64_t scrz:1; 723 + uint64_t reserved_60_63:4; 724 + #endif 725 + } cn66xx; 726 + struct cvmx_lmcx_config_cn63xx cn68xx; 727 + struct cvmx_lmcx_config_cn63xx cn68xxp1; 728 + struct cvmx_lmcx_config_s cnf71xx; 729 + }; 730 + 731 + union cvmx_lmcx_control { 732 + uint64_t u64; 733 + struct cvmx_lmcx_control_s { 734 + #ifdef __BIG_ENDIAN_BITFIELD 735 + uint64_t scramble_ena:1; 736 + uint64_t thrcnt:12; 737 + uint64_t persub:8; 738 + uint64_t thrmax:4; 739 + uint64_t crm_cnt:5; 740 + uint64_t crm_thr:5; 741 + uint64_t crm_max:5; 742 + uint64_t rodt_bprch:1; 743 + uint64_t wodt_bprch:1; 744 + uint64_t bprch:2; 745 + uint64_t ext_zqcs_dis:1; 746 + uint64_t int_zqcs_dis:1; 747 + uint64_t auto_dclkdis:1; 748 + uint64_t xor_bank:1; 749 + uint64_t max_write_batch:4; 750 + uint64_t nxm_write_en:1; 751 + uint64_t elev_prio_dis:1; 752 + uint64_t inorder_wr:1; 753 + uint64_t inorder_rd:1; 754 + uint64_t throttle_wr:1; 755 + uint64_t throttle_rd:1; 756 + uint64_t fprch2:2; 757 + uint64_t pocas:1; 758 + uint64_t ddr2t:1; 759 + uint64_t bwcnt:1; 760 + uint64_t rdimm_ena:1; 761 + #else 762 + uint64_t rdimm_ena:1; 763 + uint64_t bwcnt:1; 764 + uint64_t ddr2t:1; 765 + uint64_t pocas:1; 766 + uint64_t fprch2:2; 767 + uint64_t throttle_rd:1; 768 + uint64_t throttle_wr:1; 769 + uint64_t inorder_rd:1; 770 + uint64_t inorder_wr:1; 771 + uint64_t elev_prio_dis:1; 772 + uint64_t nxm_write_en:1; 773 + uint64_t max_write_batch:4; 774 + uint64_t xor_bank:1; 775 + uint64_t auto_dclkdis:1; 776 + uint64_t int_zqcs_dis:1; 777 + uint64_t ext_zqcs_dis:1; 778 + uint64_t bprch:2; 779 + uint64_t wodt_bprch:1; 780 + uint64_t rodt_bprch:1; 781 + uint64_t crm_max:5; 782 + uint64_t crm_thr:5; 783 + uint64_t crm_cnt:5; 784 + uint64_t thrmax:4; 785 + uint64_t persub:8; 786 + uint64_t thrcnt:12; 787 + uint64_t scramble_ena:1; 788 + #endif 789 + } s; 790 + struct cvmx_lmcx_control_s cn61xx; 791 + struct cvmx_lmcx_control_cn63xx { 792 + #ifdef __BIG_ENDIAN_BITFIELD 793 + uint64_t reserved_24_63:40; 794 + uint64_t rodt_bprch:1; 795 + uint64_t wodt_bprch:1; 796 + uint64_t bprch:2; 797 + uint64_t ext_zqcs_dis:1; 798 + uint64_t int_zqcs_dis:1; 799 + uint64_t auto_dclkdis:1; 800 + uint64_t xor_bank:1; 801 + uint64_t max_write_batch:4; 802 + uint64_t nxm_write_en:1; 803 + uint64_t elev_prio_dis:1; 804 + uint64_t inorder_wr:1; 805 + uint64_t inorder_rd:1; 806 + uint64_t throttle_wr:1; 807 + uint64_t throttle_rd:1; 808 + uint64_t fprch2:2; 809 + uint64_t pocas:1; 810 + uint64_t ddr2t:1; 811 + uint64_t bwcnt:1; 812 + uint64_t rdimm_ena:1; 813 + #else 814 + uint64_t rdimm_ena:1; 815 + uint64_t bwcnt:1; 816 + uint64_t ddr2t:1; 817 + uint64_t pocas:1; 818 + uint64_t fprch2:2; 819 + uint64_t throttle_rd:1; 820 + uint64_t throttle_wr:1; 821 + uint64_t inorder_rd:1; 822 + uint64_t inorder_wr:1; 823 + uint64_t elev_prio_dis:1; 824 + uint64_t nxm_write_en:1; 825 + uint64_t max_write_batch:4; 826 + uint64_t xor_bank:1; 827 + uint64_t auto_dclkdis:1; 828 + uint64_t int_zqcs_dis:1; 829 + uint64_t ext_zqcs_dis:1; 830 + uint64_t bprch:2; 831 + uint64_t wodt_bprch:1; 832 + uint64_t rodt_bprch:1; 833 + uint64_t reserved_24_63:40; 834 + #endif 835 + } cn63xx; 836 + struct cvmx_lmcx_control_cn63xx cn63xxp1; 837 + struct cvmx_lmcx_control_cn66xx { 838 + #ifdef __BIG_ENDIAN_BITFIELD 839 + uint64_t scramble_ena:1; 840 + uint64_t reserved_24_62:39; 841 + uint64_t rodt_bprch:1; 842 + uint64_t wodt_bprch:1; 843 + uint64_t bprch:2; 844 + uint64_t ext_zqcs_dis:1; 845 + uint64_t int_zqcs_dis:1; 846 + uint64_t auto_dclkdis:1; 847 + uint64_t xor_bank:1; 848 + uint64_t max_write_batch:4; 849 + uint64_t nxm_write_en:1; 850 + uint64_t elev_prio_dis:1; 851 + uint64_t inorder_wr:1; 852 + uint64_t inorder_rd:1; 853 + uint64_t throttle_wr:1; 854 + uint64_t throttle_rd:1; 855 + uint64_t fprch2:2; 856 + uint64_t pocas:1; 857 + uint64_t ddr2t:1; 858 + uint64_t bwcnt:1; 859 + uint64_t rdimm_ena:1; 860 + #else 861 + uint64_t rdimm_ena:1; 862 + uint64_t bwcnt:1; 863 + uint64_t ddr2t:1; 864 + uint64_t pocas:1; 865 + uint64_t fprch2:2; 866 + uint64_t throttle_rd:1; 867 + uint64_t throttle_wr:1; 868 + uint64_t inorder_rd:1; 869 + uint64_t inorder_wr:1; 870 + uint64_t elev_prio_dis:1; 871 + uint64_t nxm_write_en:1; 872 + uint64_t max_write_batch:4; 873 + uint64_t xor_bank:1; 874 + uint64_t auto_dclkdis:1; 875 + uint64_t int_zqcs_dis:1; 876 + uint64_t ext_zqcs_dis:1; 877 + uint64_t bprch:2; 878 + uint64_t wodt_bprch:1; 879 + uint64_t rodt_bprch:1; 880 + uint64_t reserved_24_62:39; 881 + uint64_t scramble_ena:1; 882 + #endif 883 + } cn66xx; 884 + struct cvmx_lmcx_control_cn68xx { 885 + #ifdef __BIG_ENDIAN_BITFIELD 886 + uint64_t reserved_63_63:1; 887 + uint64_t thrcnt:12; 888 + uint64_t persub:8; 889 + uint64_t thrmax:4; 890 + uint64_t crm_cnt:5; 891 + uint64_t crm_thr:5; 892 + uint64_t crm_max:5; 893 + uint64_t rodt_bprch:1; 894 + uint64_t wodt_bprch:1; 895 + uint64_t bprch:2; 896 + uint64_t ext_zqcs_dis:1; 897 + uint64_t int_zqcs_dis:1; 898 + uint64_t auto_dclkdis:1; 899 + uint64_t xor_bank:1; 900 + uint64_t max_write_batch:4; 901 + uint64_t nxm_write_en:1; 902 + uint64_t elev_prio_dis:1; 903 + uint64_t inorder_wr:1; 904 + uint64_t inorder_rd:1; 905 + uint64_t throttle_wr:1; 906 + uint64_t throttle_rd:1; 907 + uint64_t fprch2:2; 908 + uint64_t pocas:1; 909 + uint64_t ddr2t:1; 910 + uint64_t bwcnt:1; 911 + uint64_t rdimm_ena:1; 912 + #else 913 + uint64_t rdimm_ena:1; 914 + uint64_t bwcnt:1; 915 + uint64_t ddr2t:1; 916 + uint64_t pocas:1; 917 + uint64_t fprch2:2; 918 + uint64_t throttle_rd:1; 919 + uint64_t throttle_wr:1; 920 + uint64_t inorder_rd:1; 921 + uint64_t inorder_wr:1; 922 + uint64_t elev_prio_dis:1; 923 + uint64_t nxm_write_en:1; 924 + uint64_t max_write_batch:4; 925 + uint64_t xor_bank:1; 926 + uint64_t auto_dclkdis:1; 927 + uint64_t int_zqcs_dis:1; 928 + uint64_t ext_zqcs_dis:1; 929 + uint64_t bprch:2; 930 + uint64_t wodt_bprch:1; 931 + uint64_t rodt_bprch:1; 932 + uint64_t crm_max:5; 933 + uint64_t crm_thr:5; 934 + uint64_t crm_cnt:5; 935 + uint64_t thrmax:4; 936 + uint64_t persub:8; 937 + uint64_t thrcnt:12; 938 + uint64_t reserved_63_63:1; 939 + #endif 940 + } cn68xx; 941 + struct cvmx_lmcx_control_cn68xx cn68xxp1; 942 + struct cvmx_lmcx_control_cn66xx cnf71xx; 943 + }; 944 + 945 + union cvmx_lmcx_ctl { 946 + uint64_t u64; 947 + struct cvmx_lmcx_ctl_s { 948 + #ifdef __BIG_ENDIAN_BITFIELD 949 + uint64_t reserved_32_63:32; 950 + uint64_t ddr__nctl:4; 951 + uint64_t ddr__pctl:4; 952 + uint64_t slow_scf:1; 953 + uint64_t xor_bank:1; 954 + uint64_t max_write_batch:4; 955 + uint64_t pll_div2:1; 956 + uint64_t pll_bypass:1; 957 + uint64_t rdimm_ena:1; 958 + uint64_t r2r_slot:1; 959 + uint64_t inorder_mwf:1; 960 + uint64_t inorder_mrf:1; 961 + uint64_t reserved_10_11:2; 962 + uint64_t fprch2:1; 963 + uint64_t bprch:1; 964 + uint64_t sil_lat:2; 965 + uint64_t tskw:2; 966 + uint64_t qs_dic:2; 967 + uint64_t dic:2; 968 + #else 969 + uint64_t dic:2; 970 + uint64_t qs_dic:2; 971 + uint64_t tskw:2; 972 + uint64_t sil_lat:2; 973 + uint64_t bprch:1; 974 + uint64_t fprch2:1; 975 + uint64_t reserved_10_11:2; 976 + uint64_t inorder_mrf:1; 977 + uint64_t inorder_mwf:1; 978 + uint64_t r2r_slot:1; 979 + uint64_t rdimm_ena:1; 980 + uint64_t pll_bypass:1; 981 + uint64_t pll_div2:1; 982 + uint64_t max_write_batch:4; 983 + uint64_t xor_bank:1; 984 + uint64_t slow_scf:1; 985 + uint64_t ddr__pctl:4; 986 + uint64_t ddr__nctl:4; 987 + uint64_t reserved_32_63:32; 988 + #endif 989 + } s; 990 + struct cvmx_lmcx_ctl_cn30xx { 991 + #ifdef __BIG_ENDIAN_BITFIELD 992 + uint64_t reserved_32_63:32; 993 + uint64_t ddr__nctl:4; 994 + uint64_t ddr__pctl:4; 995 + uint64_t slow_scf:1; 996 + uint64_t xor_bank:1; 997 + uint64_t max_write_batch:4; 998 + uint64_t pll_div2:1; 999 + uint64_t pll_bypass:1; 1000 + uint64_t rdimm_ena:1; 1001 + uint64_t r2r_slot:1; 1002 + uint64_t inorder_mwf:1; 1003 + uint64_t inorder_mrf:1; 1004 + uint64_t dreset:1; 1005 + uint64_t mode32b:1; 1006 + uint64_t fprch2:1; 1007 + uint64_t bprch:1; 1008 + uint64_t sil_lat:2; 1009 + uint64_t tskw:2; 1010 + uint64_t qs_dic:2; 1011 + uint64_t dic:2; 1012 + #else 1013 + uint64_t dic:2; 1014 + uint64_t qs_dic:2; 1015 + uint64_t tskw:2; 1016 + uint64_t sil_lat:2; 1017 + uint64_t bprch:1; 1018 + uint64_t fprch2:1; 1019 + uint64_t mode32b:1; 1020 + uint64_t dreset:1; 1021 + uint64_t inorder_mrf:1; 1022 + uint64_t inorder_mwf:1; 1023 + uint64_t r2r_slot:1; 1024 + uint64_t rdimm_ena:1; 1025 + uint64_t pll_bypass:1; 1026 + uint64_t pll_div2:1; 1027 + uint64_t max_write_batch:4; 1028 + uint64_t xor_bank:1; 1029 + uint64_t slow_scf:1; 1030 + uint64_t ddr__pctl:4; 1031 + uint64_t ddr__nctl:4; 1032 + uint64_t reserved_32_63:32; 1033 + #endif 1034 + } cn30xx; 1035 + struct cvmx_lmcx_ctl_cn30xx cn31xx; 1036 + struct cvmx_lmcx_ctl_cn38xx { 1037 + #ifdef __BIG_ENDIAN_BITFIELD 1038 + uint64_t reserved_32_63:32; 1039 + uint64_t ddr__nctl:4; 1040 + uint64_t ddr__pctl:4; 1041 + uint64_t slow_scf:1; 1042 + uint64_t xor_bank:1; 1043 + uint64_t max_write_batch:4; 1044 + uint64_t reserved_16_17:2; 1045 + uint64_t rdimm_ena:1; 1046 + uint64_t r2r_slot:1; 1047 + uint64_t inorder_mwf:1; 1048 + uint64_t inorder_mrf:1; 1049 + uint64_t set_zero:1; 1050 + uint64_t mode128b:1; 1051 + uint64_t fprch2:1; 1052 + uint64_t bprch:1; 1053 + uint64_t sil_lat:2; 1054 + uint64_t tskw:2; 1055 + uint64_t qs_dic:2; 1056 + uint64_t dic:2; 1057 + #else 1058 + uint64_t dic:2; 1059 + uint64_t qs_dic:2; 1060 + uint64_t tskw:2; 1061 + uint64_t sil_lat:2; 1062 + uint64_t bprch:1; 1063 + uint64_t fprch2:1; 1064 + uint64_t mode128b:1; 1065 + uint64_t set_zero:1; 1066 + uint64_t inorder_mrf:1; 1067 + uint64_t inorder_mwf:1; 1068 + uint64_t r2r_slot:1; 1069 + uint64_t rdimm_ena:1; 1070 + uint64_t reserved_16_17:2; 1071 + uint64_t max_write_batch:4; 1072 + uint64_t xor_bank:1; 1073 + uint64_t slow_scf:1; 1074 + uint64_t ddr__pctl:4; 1075 + uint64_t ddr__nctl:4; 1076 + uint64_t reserved_32_63:32; 1077 + #endif 1078 + } cn38xx; 1079 + struct cvmx_lmcx_ctl_cn38xx cn38xxp2; 1080 + struct cvmx_lmcx_ctl_cn50xx { 1081 + #ifdef __BIG_ENDIAN_BITFIELD 1082 + uint64_t reserved_32_63:32; 1083 + uint64_t ddr__nctl:4; 1084 + uint64_t ddr__pctl:4; 1085 + uint64_t slow_scf:1; 1086 + uint64_t xor_bank:1; 1087 + uint64_t max_write_batch:4; 1088 + uint64_t reserved_17_17:1; 1089 + uint64_t pll_bypass:1; 1090 + uint64_t rdimm_ena:1; 1091 + uint64_t r2r_slot:1; 1092 + uint64_t inorder_mwf:1; 1093 + uint64_t inorder_mrf:1; 1094 + uint64_t dreset:1; 1095 + uint64_t mode32b:1; 1096 + uint64_t fprch2:1; 1097 + uint64_t bprch:1; 1098 + uint64_t sil_lat:2; 1099 + uint64_t tskw:2; 1100 + uint64_t qs_dic:2; 1101 + uint64_t dic:2; 1102 + #else 1103 + uint64_t dic:2; 1104 + uint64_t qs_dic:2; 1105 + uint64_t tskw:2; 1106 + uint64_t sil_lat:2; 1107 + uint64_t bprch:1; 1108 + uint64_t fprch2:1; 1109 + uint64_t mode32b:1; 1110 + uint64_t dreset:1; 1111 + uint64_t inorder_mrf:1; 1112 + uint64_t inorder_mwf:1; 1113 + uint64_t r2r_slot:1; 1114 + uint64_t rdimm_ena:1; 1115 + uint64_t pll_bypass:1; 1116 + uint64_t reserved_17_17:1; 1117 + uint64_t max_write_batch:4; 1118 + uint64_t xor_bank:1; 1119 + uint64_t slow_scf:1; 1120 + uint64_t ddr__pctl:4; 1121 + uint64_t ddr__nctl:4; 1122 + uint64_t reserved_32_63:32; 1123 + #endif 1124 + } cn50xx; 1125 + struct cvmx_lmcx_ctl_cn52xx { 1126 + #ifdef __BIG_ENDIAN_BITFIELD 1127 + uint64_t reserved_32_63:32; 1128 + uint64_t ddr__nctl:4; 1129 + uint64_t ddr__pctl:4; 1130 + uint64_t slow_scf:1; 1131 + uint64_t xor_bank:1; 1132 + uint64_t max_write_batch:4; 1133 + uint64_t reserved_16_17:2; 1134 + uint64_t rdimm_ena:1; 1135 + uint64_t r2r_slot:1; 1136 + uint64_t inorder_mwf:1; 1137 + uint64_t inorder_mrf:1; 1138 + uint64_t dreset:1; 1139 + uint64_t mode32b:1; 1140 + uint64_t fprch2:1; 1141 + uint64_t bprch:1; 1142 + uint64_t sil_lat:2; 1143 + uint64_t tskw:2; 1144 + uint64_t qs_dic:2; 1145 + uint64_t dic:2; 1146 + #else 1147 + uint64_t dic:2; 1148 + uint64_t qs_dic:2; 1149 + uint64_t tskw:2; 1150 + uint64_t sil_lat:2; 1151 + uint64_t bprch:1; 1152 + uint64_t fprch2:1; 1153 + uint64_t mode32b:1; 1154 + uint64_t dreset:1; 1155 + uint64_t inorder_mrf:1; 1156 + uint64_t inorder_mwf:1; 1157 + uint64_t r2r_slot:1; 1158 + uint64_t rdimm_ena:1; 1159 + uint64_t reserved_16_17:2; 1160 + uint64_t max_write_batch:4; 1161 + uint64_t xor_bank:1; 1162 + uint64_t slow_scf:1; 1163 + uint64_t ddr__pctl:4; 1164 + uint64_t ddr__nctl:4; 1165 + uint64_t reserved_32_63:32; 1166 + #endif 1167 + } cn52xx; 1168 + struct cvmx_lmcx_ctl_cn52xx cn52xxp1; 1169 + struct cvmx_lmcx_ctl_cn52xx cn56xx; 1170 + struct cvmx_lmcx_ctl_cn52xx cn56xxp1; 1171 + struct cvmx_lmcx_ctl_cn58xx { 1172 + #ifdef __BIG_ENDIAN_BITFIELD 1173 + uint64_t reserved_32_63:32; 1174 + uint64_t ddr__nctl:4; 1175 + uint64_t ddr__pctl:4; 1176 + uint64_t slow_scf:1; 1177 + uint64_t xor_bank:1; 1178 + uint64_t max_write_batch:4; 1179 + uint64_t reserved_16_17:2; 1180 + uint64_t rdimm_ena:1; 1181 + uint64_t r2r_slot:1; 1182 + uint64_t inorder_mwf:1; 1183 + uint64_t inorder_mrf:1; 1184 + uint64_t dreset:1; 1185 + uint64_t mode128b:1; 1186 + uint64_t fprch2:1; 1187 + uint64_t bprch:1; 1188 + uint64_t sil_lat:2; 1189 + uint64_t tskw:2; 1190 + uint64_t qs_dic:2; 1191 + uint64_t dic:2; 1192 + #else 1193 + uint64_t dic:2; 1194 + uint64_t qs_dic:2; 1195 + uint64_t tskw:2; 1196 + uint64_t sil_lat:2; 1197 + uint64_t bprch:1; 1198 + uint64_t fprch2:1; 1199 + uint64_t mode128b:1; 1200 + uint64_t dreset:1; 1201 + uint64_t inorder_mrf:1; 1202 + uint64_t inorder_mwf:1; 1203 + uint64_t r2r_slot:1; 1204 + uint64_t rdimm_ena:1; 1205 + uint64_t reserved_16_17:2; 1206 + uint64_t max_write_batch:4; 1207 + uint64_t xor_bank:1; 1208 + uint64_t slow_scf:1; 1209 + uint64_t ddr__pctl:4; 1210 + uint64_t ddr__nctl:4; 1211 + uint64_t reserved_32_63:32; 1212 + #endif 1213 + } cn58xx; 1214 + struct cvmx_lmcx_ctl_cn58xx cn58xxp1; 1215 + }; 1216 + 1217 + union cvmx_lmcx_ctl1 { 1218 + uint64_t u64; 1219 + struct cvmx_lmcx_ctl1_s { 1220 + #ifdef __BIG_ENDIAN_BITFIELD 1221 + uint64_t reserved_21_63:43; 1222 + uint64_t ecc_adr:1; 1223 + uint64_t forcewrite:4; 1224 + uint64_t idlepower:3; 1225 + uint64_t sequence:3; 1226 + uint64_t sil_mode:1; 1227 + uint64_t dcc_enable:1; 1228 + uint64_t reserved_2_7:6; 1229 + uint64_t data_layout:2; 1230 + #else 1231 + uint64_t data_layout:2; 1232 + uint64_t reserved_2_7:6; 1233 + uint64_t dcc_enable:1; 1234 + uint64_t sil_mode:1; 1235 + uint64_t sequence:3; 1236 + uint64_t idlepower:3; 1237 + uint64_t forcewrite:4; 1238 + uint64_t ecc_adr:1; 1239 + uint64_t reserved_21_63:43; 1240 + #endif 1241 + } s; 1242 + struct cvmx_lmcx_ctl1_cn30xx { 1243 + #ifdef __BIG_ENDIAN_BITFIELD 1244 + uint64_t reserved_2_63:62; 1245 + uint64_t data_layout:2; 1246 + #else 1247 + uint64_t data_layout:2; 1248 + uint64_t reserved_2_63:62; 1249 + #endif 1250 + } cn30xx; 1251 + struct cvmx_lmcx_ctl1_cn50xx { 1252 + #ifdef __BIG_ENDIAN_BITFIELD 1253 + uint64_t reserved_10_63:54; 1254 + uint64_t sil_mode:1; 1255 + uint64_t dcc_enable:1; 1256 + uint64_t reserved_2_7:6; 1257 + uint64_t data_layout:2; 1258 + #else 1259 + uint64_t data_layout:2; 1260 + uint64_t reserved_2_7:6; 1261 + uint64_t dcc_enable:1; 1262 + uint64_t sil_mode:1; 1263 + uint64_t reserved_10_63:54; 1264 + #endif 1265 + } cn50xx; 1266 + struct cvmx_lmcx_ctl1_cn52xx { 1267 + #ifdef __BIG_ENDIAN_BITFIELD 1268 + uint64_t reserved_21_63:43; 1269 + uint64_t ecc_adr:1; 1270 + uint64_t forcewrite:4; 1271 + uint64_t idlepower:3; 1272 + uint64_t sequence:3; 1273 + uint64_t sil_mode:1; 1274 + uint64_t dcc_enable:1; 1275 + uint64_t reserved_0_7:8; 1276 + #else 1277 + uint64_t reserved_0_7:8; 1278 + uint64_t dcc_enable:1; 1279 + uint64_t sil_mode:1; 1280 + uint64_t sequence:3; 1281 + uint64_t idlepower:3; 1282 + uint64_t forcewrite:4; 1283 + uint64_t ecc_adr:1; 1284 + uint64_t reserved_21_63:43; 1285 + #endif 1286 + } cn52xx; 1287 + struct cvmx_lmcx_ctl1_cn52xx cn52xxp1; 1288 + struct cvmx_lmcx_ctl1_cn52xx cn56xx; 1289 + struct cvmx_lmcx_ctl1_cn52xx cn56xxp1; 1290 + struct cvmx_lmcx_ctl1_cn58xx { 1291 + #ifdef __BIG_ENDIAN_BITFIELD 1292 + uint64_t reserved_10_63:54; 1293 + uint64_t sil_mode:1; 1294 + uint64_t dcc_enable:1; 1295 + uint64_t reserved_0_7:8; 1296 + #else 1297 + uint64_t reserved_0_7:8; 1298 + uint64_t dcc_enable:1; 1299 + uint64_t sil_mode:1; 1300 + uint64_t reserved_10_63:54; 1301 + #endif 1302 + } cn58xx; 1303 + struct cvmx_lmcx_ctl1_cn58xx cn58xxp1; 1304 + }; 1305 + 1306 + union cvmx_lmcx_dclk_cnt { 1307 + uint64_t u64; 1308 + struct cvmx_lmcx_dclk_cnt_s { 1309 + #ifdef __BIG_ENDIAN_BITFIELD 1310 + uint64_t dclkcnt:64; 1311 + #else 1312 + uint64_t dclkcnt:64; 1313 + #endif 1314 + } s; 1315 + struct cvmx_lmcx_dclk_cnt_s cn61xx; 1316 + struct cvmx_lmcx_dclk_cnt_s cn63xx; 1317 + struct cvmx_lmcx_dclk_cnt_s cn63xxp1; 1318 + struct cvmx_lmcx_dclk_cnt_s cn66xx; 1319 + struct cvmx_lmcx_dclk_cnt_s cn68xx; 1320 + struct cvmx_lmcx_dclk_cnt_s cn68xxp1; 1321 + struct cvmx_lmcx_dclk_cnt_s cnf71xx; 1322 + }; 1323 + 1324 + union cvmx_lmcx_dclk_cnt_hi { 1325 + uint64_t u64; 1326 + struct cvmx_lmcx_dclk_cnt_hi_s { 1327 + #ifdef __BIG_ENDIAN_BITFIELD 1328 + uint64_t reserved_32_63:32; 1329 + uint64_t dclkcnt_hi:32; 1330 + #else 1331 + uint64_t dclkcnt_hi:32; 1332 + uint64_t reserved_32_63:32; 1333 + #endif 1334 + } s; 1335 + struct cvmx_lmcx_dclk_cnt_hi_s cn30xx; 1336 + struct cvmx_lmcx_dclk_cnt_hi_s cn31xx; 1337 + struct cvmx_lmcx_dclk_cnt_hi_s cn38xx; 1338 + struct cvmx_lmcx_dclk_cnt_hi_s cn38xxp2; 1339 + struct cvmx_lmcx_dclk_cnt_hi_s cn50xx; 1340 + struct cvmx_lmcx_dclk_cnt_hi_s cn52xx; 1341 + struct cvmx_lmcx_dclk_cnt_hi_s cn52xxp1; 1342 + struct cvmx_lmcx_dclk_cnt_hi_s cn56xx; 1343 + struct cvmx_lmcx_dclk_cnt_hi_s cn56xxp1; 1344 + struct cvmx_lmcx_dclk_cnt_hi_s cn58xx; 1345 + struct cvmx_lmcx_dclk_cnt_hi_s cn58xxp1; 1346 + }; 1347 + 1348 + union cvmx_lmcx_dclk_cnt_lo { 1349 + uint64_t u64; 1350 + struct cvmx_lmcx_dclk_cnt_lo_s { 1351 + #ifdef __BIG_ENDIAN_BITFIELD 1352 + uint64_t reserved_32_63:32; 1353 + uint64_t dclkcnt_lo:32; 1354 + #else 1355 + uint64_t dclkcnt_lo:32; 1356 + uint64_t reserved_32_63:32; 1357 + #endif 1358 + } s; 1359 + struct cvmx_lmcx_dclk_cnt_lo_s cn30xx; 1360 + struct cvmx_lmcx_dclk_cnt_lo_s cn31xx; 1361 + struct cvmx_lmcx_dclk_cnt_lo_s cn38xx; 1362 + struct cvmx_lmcx_dclk_cnt_lo_s cn38xxp2; 1363 + struct cvmx_lmcx_dclk_cnt_lo_s cn50xx; 1364 + struct cvmx_lmcx_dclk_cnt_lo_s cn52xx; 1365 + struct cvmx_lmcx_dclk_cnt_lo_s cn52xxp1; 1366 + struct cvmx_lmcx_dclk_cnt_lo_s cn56xx; 1367 + struct cvmx_lmcx_dclk_cnt_lo_s cn56xxp1; 1368 + struct cvmx_lmcx_dclk_cnt_lo_s cn58xx; 1369 + struct cvmx_lmcx_dclk_cnt_lo_s cn58xxp1; 1370 + }; 1371 + 1372 + union cvmx_lmcx_dclk_ctl { 1373 + uint64_t u64; 1374 + struct cvmx_lmcx_dclk_ctl_s { 1375 + #ifdef __BIG_ENDIAN_BITFIELD 1376 + uint64_t reserved_8_63:56; 1377 + uint64_t off90_ena:1; 1378 + uint64_t dclk90_byp:1; 1379 + uint64_t dclk90_ld:1; 1380 + uint64_t dclk90_vlu:5; 1381 + #else 1382 + uint64_t dclk90_vlu:5; 1383 + uint64_t dclk90_ld:1; 1384 + uint64_t dclk90_byp:1; 1385 + uint64_t off90_ena:1; 1386 + uint64_t reserved_8_63:56; 1387 + #endif 1388 + } s; 1389 + struct cvmx_lmcx_dclk_ctl_s cn56xx; 1390 + struct cvmx_lmcx_dclk_ctl_s cn56xxp1; 1391 + }; 1392 + 1393 + union cvmx_lmcx_ddr2_ctl { 1394 + uint64_t u64; 1395 + struct cvmx_lmcx_ddr2_ctl_s { 1396 + #ifdef __BIG_ENDIAN_BITFIELD 1397 + uint64_t reserved_32_63:32; 1398 + uint64_t bank8:1; 1399 + uint64_t burst8:1; 1400 + uint64_t addlat:3; 1401 + uint64_t pocas:1; 1402 + uint64_t bwcnt:1; 1403 + uint64_t twr:3; 1404 + uint64_t silo_hc:1; 1405 + uint64_t ddr_eof:4; 1406 + uint64_t tfaw:5; 1407 + uint64_t crip_mode:1; 1408 + uint64_t ddr2t:1; 1409 + uint64_t odt_ena:1; 1410 + uint64_t qdll_ena:1; 1411 + uint64_t dll90_vlu:5; 1412 + uint64_t dll90_byp:1; 1413 + uint64_t rdqs:1; 1414 + uint64_t ddr2:1; 1415 + #else 1416 + uint64_t ddr2:1; 1417 + uint64_t rdqs:1; 1418 + uint64_t dll90_byp:1; 1419 + uint64_t dll90_vlu:5; 1420 + uint64_t qdll_ena:1; 1421 + uint64_t odt_ena:1; 1422 + uint64_t ddr2t:1; 1423 + uint64_t crip_mode:1; 1424 + uint64_t tfaw:5; 1425 + uint64_t ddr_eof:4; 1426 + uint64_t silo_hc:1; 1427 + uint64_t twr:3; 1428 + uint64_t bwcnt:1; 1429 + uint64_t pocas:1; 1430 + uint64_t addlat:3; 1431 + uint64_t burst8:1; 1432 + uint64_t bank8:1; 1433 + uint64_t reserved_32_63:32; 1434 + #endif 1435 + } s; 1436 + struct cvmx_lmcx_ddr2_ctl_cn30xx { 1437 + #ifdef __BIG_ENDIAN_BITFIELD 1438 + uint64_t reserved_32_63:32; 1439 + uint64_t bank8:1; 1440 + uint64_t burst8:1; 1441 + uint64_t addlat:3; 1442 + uint64_t pocas:1; 1443 + uint64_t bwcnt:1; 1444 + uint64_t twr:3; 1445 + uint64_t silo_hc:1; 1446 + uint64_t ddr_eof:4; 1447 + uint64_t tfaw:5; 1448 + uint64_t crip_mode:1; 1449 + uint64_t ddr2t:1; 1450 + uint64_t odt_ena:1; 1451 + uint64_t qdll_ena:1; 1452 + uint64_t dll90_vlu:5; 1453 + uint64_t dll90_byp:1; 1454 + uint64_t reserved_1_1:1; 1455 + uint64_t ddr2:1; 1456 + #else 1457 + uint64_t ddr2:1; 1458 + uint64_t reserved_1_1:1; 1459 + uint64_t dll90_byp:1; 1460 + uint64_t dll90_vlu:5; 1461 + uint64_t qdll_ena:1; 1462 + uint64_t odt_ena:1; 1463 + uint64_t ddr2t:1; 1464 + uint64_t crip_mode:1; 1465 + uint64_t tfaw:5; 1466 + uint64_t ddr_eof:4; 1467 + uint64_t silo_hc:1; 1468 + uint64_t twr:3; 1469 + uint64_t bwcnt:1; 1470 + uint64_t pocas:1; 1471 + uint64_t addlat:3; 1472 + uint64_t burst8:1; 1473 + uint64_t bank8:1; 1474 + uint64_t reserved_32_63:32; 1475 + #endif 1476 + } cn30xx; 1477 + struct cvmx_lmcx_ddr2_ctl_cn30xx cn31xx; 1478 + struct cvmx_lmcx_ddr2_ctl_s cn38xx; 1479 + struct cvmx_lmcx_ddr2_ctl_s cn38xxp2; 1480 + struct cvmx_lmcx_ddr2_ctl_s cn50xx; 1481 + struct cvmx_lmcx_ddr2_ctl_s cn52xx; 1482 + struct cvmx_lmcx_ddr2_ctl_s cn52xxp1; 1483 + struct cvmx_lmcx_ddr2_ctl_s cn56xx; 1484 + struct cvmx_lmcx_ddr2_ctl_s cn56xxp1; 1485 + struct cvmx_lmcx_ddr2_ctl_s cn58xx; 1486 + struct cvmx_lmcx_ddr2_ctl_s cn58xxp1; 1487 + }; 1488 + 1489 + union cvmx_lmcx_ddr_pll_ctl { 1490 + uint64_t u64; 1491 + struct cvmx_lmcx_ddr_pll_ctl_s { 1492 + #ifdef __BIG_ENDIAN_BITFIELD 1493 + uint64_t reserved_27_63:37; 1494 + uint64_t jtg_test_mode:1; 1495 + uint64_t dfm_div_reset:1; 1496 + uint64_t dfm_ps_en:3; 1497 + uint64_t ddr_div_reset:1; 1498 + uint64_t ddr_ps_en:3; 1499 + uint64_t diffamp:4; 1500 + uint64_t cps:3; 1501 + uint64_t cpb:3; 1502 + uint64_t reset_n:1; 1503 + uint64_t clkf:7; 1504 + #else 1505 + uint64_t clkf:7; 1506 + uint64_t reset_n:1; 1507 + uint64_t cpb:3; 1508 + uint64_t cps:3; 1509 + uint64_t diffamp:4; 1510 + uint64_t ddr_ps_en:3; 1511 + uint64_t ddr_div_reset:1; 1512 + uint64_t dfm_ps_en:3; 1513 + uint64_t dfm_div_reset:1; 1514 + uint64_t jtg_test_mode:1; 1515 + uint64_t reserved_27_63:37; 1516 + #endif 1517 + } s; 1518 + struct cvmx_lmcx_ddr_pll_ctl_s cn61xx; 1519 + struct cvmx_lmcx_ddr_pll_ctl_s cn63xx; 1520 + struct cvmx_lmcx_ddr_pll_ctl_s cn63xxp1; 1521 + struct cvmx_lmcx_ddr_pll_ctl_s cn66xx; 1522 + struct cvmx_lmcx_ddr_pll_ctl_s cn68xx; 1523 + struct cvmx_lmcx_ddr_pll_ctl_s cn68xxp1; 1524 + struct cvmx_lmcx_ddr_pll_ctl_s cnf71xx; 1525 + }; 1526 + 1527 + union cvmx_lmcx_delay_cfg { 1528 + uint64_t u64; 1529 + struct cvmx_lmcx_delay_cfg_s { 1530 + #ifdef __BIG_ENDIAN_BITFIELD 1531 + uint64_t reserved_15_63:49; 1532 + uint64_t dq:5; 1533 + uint64_t cmd:5; 1534 + uint64_t clk:5; 1535 + #else 1536 + uint64_t clk:5; 1537 + uint64_t cmd:5; 1538 + uint64_t dq:5; 1539 + uint64_t reserved_15_63:49; 1540 + #endif 1541 + } s; 1542 + struct cvmx_lmcx_delay_cfg_s cn30xx; 1543 + struct cvmx_lmcx_delay_cfg_cn38xx { 1544 + #ifdef __BIG_ENDIAN_BITFIELD 1545 + uint64_t reserved_14_63:50; 1546 + uint64_t dq:4; 1547 + uint64_t reserved_9_9:1; 1548 + uint64_t cmd:4; 1549 + uint64_t reserved_4_4:1; 1550 + uint64_t clk:4; 1551 + #else 1552 + uint64_t clk:4; 1553 + uint64_t reserved_4_4:1; 1554 + uint64_t cmd:4; 1555 + uint64_t reserved_9_9:1; 1556 + uint64_t dq:4; 1557 + uint64_t reserved_14_63:50; 1558 + #endif 1559 + } cn38xx; 1560 + struct cvmx_lmcx_delay_cfg_cn38xx cn50xx; 1561 + struct cvmx_lmcx_delay_cfg_cn38xx cn52xx; 1562 + struct cvmx_lmcx_delay_cfg_cn38xx cn52xxp1; 1563 + struct cvmx_lmcx_delay_cfg_cn38xx cn56xx; 1564 + struct cvmx_lmcx_delay_cfg_cn38xx cn56xxp1; 1565 + struct cvmx_lmcx_delay_cfg_cn38xx cn58xx; 1566 + struct cvmx_lmcx_delay_cfg_cn38xx cn58xxp1; 1567 + }; 1568 + 1569 + union cvmx_lmcx_dimmx_params { 1570 + uint64_t u64; 1571 + struct cvmx_lmcx_dimmx_params_s { 1572 + #ifdef __BIG_ENDIAN_BITFIELD 1573 + uint64_t rc15:4; 1574 + uint64_t rc14:4; 1575 + uint64_t rc13:4; 1576 + uint64_t rc12:4; 1577 + uint64_t rc11:4; 1578 + uint64_t rc10:4; 1579 + uint64_t rc9:4; 1580 + uint64_t rc8:4; 1581 + uint64_t rc7:4; 1582 + uint64_t rc6:4; 1583 + uint64_t rc5:4; 1584 + uint64_t rc4:4; 1585 + uint64_t rc3:4; 1586 + uint64_t rc2:4; 1587 + uint64_t rc1:4; 1588 + uint64_t rc0:4; 1589 + #else 1590 + uint64_t rc0:4; 1591 + uint64_t rc1:4; 1592 + uint64_t rc2:4; 1593 + uint64_t rc3:4; 1594 + uint64_t rc4:4; 1595 + uint64_t rc5:4; 1596 + uint64_t rc6:4; 1597 + uint64_t rc7:4; 1598 + uint64_t rc8:4; 1599 + uint64_t rc9:4; 1600 + uint64_t rc10:4; 1601 + uint64_t rc11:4; 1602 + uint64_t rc12:4; 1603 + uint64_t rc13:4; 1604 + uint64_t rc14:4; 1605 + uint64_t rc15:4; 1606 + #endif 1607 + } s; 1608 + struct cvmx_lmcx_dimmx_params_s cn61xx; 1609 + struct cvmx_lmcx_dimmx_params_s cn63xx; 1610 + struct cvmx_lmcx_dimmx_params_s cn63xxp1; 1611 + struct cvmx_lmcx_dimmx_params_s cn66xx; 1612 + struct cvmx_lmcx_dimmx_params_s cn68xx; 1613 + struct cvmx_lmcx_dimmx_params_s cn68xxp1; 1614 + struct cvmx_lmcx_dimmx_params_s cnf71xx; 1615 + }; 1616 + 1617 + union cvmx_lmcx_dimm_ctl { 1618 + uint64_t u64; 1619 + struct cvmx_lmcx_dimm_ctl_s { 1620 + #ifdef __BIG_ENDIAN_BITFIELD 1621 + uint64_t reserved_46_63:18; 1622 + uint64_t parity:1; 1623 + uint64_t tcws:13; 1624 + uint64_t dimm1_wmask:16; 1625 + uint64_t dimm0_wmask:16; 1626 + #else 1627 + uint64_t dimm0_wmask:16; 1628 + uint64_t dimm1_wmask:16; 1629 + uint64_t tcws:13; 1630 + uint64_t parity:1; 1631 + uint64_t reserved_46_63:18; 1632 + #endif 1633 + } s; 1634 + struct cvmx_lmcx_dimm_ctl_s cn61xx; 1635 + struct cvmx_lmcx_dimm_ctl_s cn63xx; 1636 + struct cvmx_lmcx_dimm_ctl_s cn63xxp1; 1637 + struct cvmx_lmcx_dimm_ctl_s cn66xx; 1638 + struct cvmx_lmcx_dimm_ctl_s cn68xx; 1639 + struct cvmx_lmcx_dimm_ctl_s cn68xxp1; 1640 + struct cvmx_lmcx_dimm_ctl_s cnf71xx; 1641 + }; 1642 + 1643 + union cvmx_lmcx_dll_ctl { 1644 + uint64_t u64; 1645 + struct cvmx_lmcx_dll_ctl_s { 1646 + #ifdef __BIG_ENDIAN_BITFIELD 1647 + uint64_t reserved_8_63:56; 1648 + uint64_t dreset:1; 1649 + uint64_t dll90_byp:1; 1650 + uint64_t dll90_ena:1; 1651 + uint64_t dll90_vlu:5; 1652 + #else 1653 + uint64_t dll90_vlu:5; 1654 + uint64_t dll90_ena:1; 1655 + uint64_t dll90_byp:1; 1656 + uint64_t dreset:1; 1657 + uint64_t reserved_8_63:56; 1658 + #endif 1659 + } s; 1660 + struct cvmx_lmcx_dll_ctl_s cn52xx; 1661 + struct cvmx_lmcx_dll_ctl_s cn52xxp1; 1662 + struct cvmx_lmcx_dll_ctl_s cn56xx; 1663 + struct cvmx_lmcx_dll_ctl_s cn56xxp1; 1664 + }; 1665 + 1666 + union cvmx_lmcx_dll_ctl2 { 1667 + uint64_t u64; 1668 + struct cvmx_lmcx_dll_ctl2_s { 1669 + #ifdef __BIG_ENDIAN_BITFIELD 1670 + uint64_t reserved_16_63:48; 1671 + uint64_t intf_en:1; 1672 + uint64_t dll_bringup:1; 1673 + uint64_t dreset:1; 1674 + uint64_t quad_dll_ena:1; 1675 + uint64_t byp_sel:4; 1676 + uint64_t byp_setting:8; 1677 + #else 1678 + uint64_t byp_setting:8; 1679 + uint64_t byp_sel:4; 1680 + uint64_t quad_dll_ena:1; 1681 + uint64_t dreset:1; 1682 + uint64_t dll_bringup:1; 1683 + uint64_t intf_en:1; 1684 + uint64_t reserved_16_63:48; 1685 + #endif 1686 + } s; 1687 + struct cvmx_lmcx_dll_ctl2_s cn61xx; 1688 + struct cvmx_lmcx_dll_ctl2_cn63xx { 1689 + #ifdef __BIG_ENDIAN_BITFIELD 1690 + uint64_t reserved_15_63:49; 1691 + uint64_t dll_bringup:1; 1692 + uint64_t dreset:1; 1693 + uint64_t quad_dll_ena:1; 1694 + uint64_t byp_sel:4; 1695 + uint64_t byp_setting:8; 1696 + #else 1697 + uint64_t byp_setting:8; 1698 + uint64_t byp_sel:4; 1699 + uint64_t quad_dll_ena:1; 1700 + uint64_t dreset:1; 1701 + uint64_t dll_bringup:1; 1702 + uint64_t reserved_15_63:49; 1703 + #endif 1704 + } cn63xx; 1705 + struct cvmx_lmcx_dll_ctl2_cn63xx cn63xxp1; 1706 + struct cvmx_lmcx_dll_ctl2_cn63xx cn66xx; 1707 + struct cvmx_lmcx_dll_ctl2_s cn68xx; 1708 + struct cvmx_lmcx_dll_ctl2_s cn68xxp1; 1709 + struct cvmx_lmcx_dll_ctl2_s cnf71xx; 1710 + }; 1711 + 1712 + union cvmx_lmcx_dll_ctl3 { 1713 + uint64_t u64; 1714 + struct cvmx_lmcx_dll_ctl3_s { 1715 + #ifdef __BIG_ENDIAN_BITFIELD 1716 + uint64_t reserved_41_63:23; 1717 + uint64_t dclk90_fwd:1; 1718 + uint64_t ddr_90_dly_byp:1; 1719 + uint64_t dclk90_recal_dis:1; 1720 + uint64_t dclk90_byp_sel:1; 1721 + uint64_t dclk90_byp_setting:8; 1722 + uint64_t dll_fast:1; 1723 + uint64_t dll90_setting:8; 1724 + uint64_t fine_tune_mode:1; 1725 + uint64_t dll_mode:1; 1726 + uint64_t dll90_byte_sel:4; 1727 + uint64_t offset_ena:1; 1728 + uint64_t load_offset:1; 1729 + uint64_t mode_sel:2; 1730 + uint64_t byte_sel:4; 1731 + uint64_t offset:6; 1732 + #else 1733 + uint64_t offset:6; 1734 + uint64_t byte_sel:4; 1735 + uint64_t mode_sel:2; 1736 + uint64_t load_offset:1; 1737 + uint64_t offset_ena:1; 1738 + uint64_t dll90_byte_sel:4; 1739 + uint64_t dll_mode:1; 1740 + uint64_t fine_tune_mode:1; 1741 + uint64_t dll90_setting:8; 1742 + uint64_t dll_fast:1; 1743 + uint64_t dclk90_byp_setting:8; 1744 + uint64_t dclk90_byp_sel:1; 1745 + uint64_t dclk90_recal_dis:1; 1746 + uint64_t ddr_90_dly_byp:1; 1747 + uint64_t dclk90_fwd:1; 1748 + uint64_t reserved_41_63:23; 1749 + #endif 1750 + } s; 1751 + struct cvmx_lmcx_dll_ctl3_s cn61xx; 1752 + struct cvmx_lmcx_dll_ctl3_cn63xx { 1753 + #ifdef __BIG_ENDIAN_BITFIELD 1754 + uint64_t reserved_29_63:35; 1755 + uint64_t dll_fast:1; 1756 + uint64_t dll90_setting:8; 1757 + uint64_t fine_tune_mode:1; 1758 + uint64_t dll_mode:1; 1759 + uint64_t dll90_byte_sel:4; 1760 + uint64_t offset_ena:1; 1761 + uint64_t load_offset:1; 1762 + uint64_t mode_sel:2; 1763 + uint64_t byte_sel:4; 1764 + uint64_t offset:6; 1765 + #else 1766 + uint64_t offset:6; 1767 + uint64_t byte_sel:4; 1768 + uint64_t mode_sel:2; 1769 + uint64_t load_offset:1; 1770 + uint64_t offset_ena:1; 1771 + uint64_t dll90_byte_sel:4; 1772 + uint64_t dll_mode:1; 1773 + uint64_t fine_tune_mode:1; 1774 + uint64_t dll90_setting:8; 1775 + uint64_t dll_fast:1; 1776 + uint64_t reserved_29_63:35; 1777 + #endif 1778 + } cn63xx; 1779 + struct cvmx_lmcx_dll_ctl3_cn63xx cn63xxp1; 1780 + struct cvmx_lmcx_dll_ctl3_cn63xx cn66xx; 1781 + struct cvmx_lmcx_dll_ctl3_s cn68xx; 1782 + struct cvmx_lmcx_dll_ctl3_s cn68xxp1; 1783 + struct cvmx_lmcx_dll_ctl3_s cnf71xx; 1784 + }; 1785 + 1786 + union cvmx_lmcx_dual_memcfg { 1787 + uint64_t u64; 1788 + struct cvmx_lmcx_dual_memcfg_s { 1789 + #ifdef __BIG_ENDIAN_BITFIELD 1790 + uint64_t reserved_20_63:44; 1791 + uint64_t bank8:1; 1792 + uint64_t row_lsb:3; 1793 + uint64_t reserved_8_15:8; 1794 + uint64_t cs_mask:8; 1795 + #else 1796 + uint64_t cs_mask:8; 1797 + uint64_t reserved_8_15:8; 1798 + uint64_t row_lsb:3; 1799 + uint64_t bank8:1; 1800 + uint64_t reserved_20_63:44; 1801 + #endif 1802 + } s; 1803 + struct cvmx_lmcx_dual_memcfg_s cn50xx; 1804 + struct cvmx_lmcx_dual_memcfg_s cn52xx; 1805 + struct cvmx_lmcx_dual_memcfg_s cn52xxp1; 1806 + struct cvmx_lmcx_dual_memcfg_s cn56xx; 1807 + struct cvmx_lmcx_dual_memcfg_s cn56xxp1; 1808 + struct cvmx_lmcx_dual_memcfg_s cn58xx; 1809 + struct cvmx_lmcx_dual_memcfg_s cn58xxp1; 1810 + struct cvmx_lmcx_dual_memcfg_cn61xx { 1811 + #ifdef __BIG_ENDIAN_BITFIELD 1812 + uint64_t reserved_19_63:45; 1813 + uint64_t row_lsb:3; 1814 + uint64_t reserved_8_15:8; 1815 + uint64_t cs_mask:8; 1816 + #else 1817 + uint64_t cs_mask:8; 1818 + uint64_t reserved_8_15:8; 1819 + uint64_t row_lsb:3; 1820 + uint64_t reserved_19_63:45; 1821 + #endif 1822 + } cn61xx; 1823 + struct cvmx_lmcx_dual_memcfg_cn61xx cn63xx; 1824 + struct cvmx_lmcx_dual_memcfg_cn61xx cn63xxp1; 1825 + struct cvmx_lmcx_dual_memcfg_cn61xx cn66xx; 1826 + struct cvmx_lmcx_dual_memcfg_cn61xx cn68xx; 1827 + struct cvmx_lmcx_dual_memcfg_cn61xx cn68xxp1; 1828 + struct cvmx_lmcx_dual_memcfg_cn61xx cnf71xx; 1829 + }; 1830 + 1831 + union cvmx_lmcx_ecc_synd { 1832 + uint64_t u64; 1833 + struct cvmx_lmcx_ecc_synd_s { 1834 + #ifdef __BIG_ENDIAN_BITFIELD 1835 + uint64_t reserved_32_63:32; 1836 + uint64_t mrdsyn3:8; 1837 + uint64_t mrdsyn2:8; 1838 + uint64_t mrdsyn1:8; 1839 + uint64_t mrdsyn0:8; 1840 + #else 1841 + uint64_t mrdsyn0:8; 1842 + uint64_t mrdsyn1:8; 1843 + uint64_t mrdsyn2:8; 1844 + uint64_t mrdsyn3:8; 1845 + uint64_t reserved_32_63:32; 1846 + #endif 1847 + } s; 1848 + struct cvmx_lmcx_ecc_synd_s cn30xx; 1849 + struct cvmx_lmcx_ecc_synd_s cn31xx; 1850 + struct cvmx_lmcx_ecc_synd_s cn38xx; 1851 + struct cvmx_lmcx_ecc_synd_s cn38xxp2; 1852 + struct cvmx_lmcx_ecc_synd_s cn50xx; 1853 + struct cvmx_lmcx_ecc_synd_s cn52xx; 1854 + struct cvmx_lmcx_ecc_synd_s cn52xxp1; 1855 + struct cvmx_lmcx_ecc_synd_s cn56xx; 1856 + struct cvmx_lmcx_ecc_synd_s cn56xxp1; 1857 + struct cvmx_lmcx_ecc_synd_s cn58xx; 1858 + struct cvmx_lmcx_ecc_synd_s cn58xxp1; 1859 + struct cvmx_lmcx_ecc_synd_s cn61xx; 1860 + struct cvmx_lmcx_ecc_synd_s cn63xx; 1861 + struct cvmx_lmcx_ecc_synd_s cn63xxp1; 1862 + struct cvmx_lmcx_ecc_synd_s cn66xx; 1863 + struct cvmx_lmcx_ecc_synd_s cn68xx; 1864 + struct cvmx_lmcx_ecc_synd_s cn68xxp1; 1865 + struct cvmx_lmcx_ecc_synd_s cnf71xx; 1866 + }; 1867 + 1868 + union cvmx_lmcx_fadr { 1869 + uint64_t u64; 1870 + struct cvmx_lmcx_fadr_s { 1871 + #ifdef __BIG_ENDIAN_BITFIELD 1872 + uint64_t reserved_0_63:64; 1873 + #else 1874 + uint64_t reserved_0_63:64; 1875 + #endif 1876 + } s; 1877 + struct cvmx_lmcx_fadr_cn30xx { 1878 + #ifdef __BIG_ENDIAN_BITFIELD 1879 + uint64_t reserved_32_63:32; 1880 + uint64_t fdimm:2; 1881 + uint64_t fbunk:1; 1882 + uint64_t fbank:3; 1883 + uint64_t frow:14; 1884 + uint64_t fcol:12; 1885 + #else 1886 + uint64_t fcol:12; 1887 + uint64_t frow:14; 1888 + uint64_t fbank:3; 1889 + uint64_t fbunk:1; 1890 + uint64_t fdimm:2; 1891 + uint64_t reserved_32_63:32; 1892 + #endif 1893 + } cn30xx; 1894 + struct cvmx_lmcx_fadr_cn30xx cn31xx; 1895 + struct cvmx_lmcx_fadr_cn30xx cn38xx; 1896 + struct cvmx_lmcx_fadr_cn30xx cn38xxp2; 1897 + struct cvmx_lmcx_fadr_cn30xx cn50xx; 1898 + struct cvmx_lmcx_fadr_cn30xx cn52xx; 1899 + struct cvmx_lmcx_fadr_cn30xx cn52xxp1; 1900 + struct cvmx_lmcx_fadr_cn30xx cn56xx; 1901 + struct cvmx_lmcx_fadr_cn30xx cn56xxp1; 1902 + struct cvmx_lmcx_fadr_cn30xx cn58xx; 1903 + struct cvmx_lmcx_fadr_cn30xx cn58xxp1; 1904 + struct cvmx_lmcx_fadr_cn61xx { 1905 + #ifdef __BIG_ENDIAN_BITFIELD 1906 + uint64_t reserved_36_63:28; 1907 + uint64_t fdimm:2; 1908 + uint64_t fbunk:1; 1909 + uint64_t fbank:3; 1910 + uint64_t frow:16; 1911 + uint64_t fcol:14; 1912 + #else 1913 + uint64_t fcol:14; 1914 + uint64_t frow:16; 1915 + uint64_t fbank:3; 1916 + uint64_t fbunk:1; 1917 + uint64_t fdimm:2; 1918 + uint64_t reserved_36_63:28; 1919 + #endif 1920 + } cn61xx; 1921 + struct cvmx_lmcx_fadr_cn61xx cn63xx; 1922 + struct cvmx_lmcx_fadr_cn61xx cn63xxp1; 1923 + struct cvmx_lmcx_fadr_cn61xx cn66xx; 1924 + struct cvmx_lmcx_fadr_cn61xx cn68xx; 1925 + struct cvmx_lmcx_fadr_cn61xx cn68xxp1; 1926 + struct cvmx_lmcx_fadr_cn61xx cnf71xx; 1927 + }; 1928 + 1929 + union cvmx_lmcx_ifb_cnt { 1930 + uint64_t u64; 1931 + struct cvmx_lmcx_ifb_cnt_s { 1932 + #ifdef __BIG_ENDIAN_BITFIELD 1933 + uint64_t ifbcnt:64; 1934 + #else 1935 + uint64_t ifbcnt:64; 1936 + #endif 1937 + } s; 1938 + struct cvmx_lmcx_ifb_cnt_s cn61xx; 1939 + struct cvmx_lmcx_ifb_cnt_s cn63xx; 1940 + struct cvmx_lmcx_ifb_cnt_s cn63xxp1; 1941 + struct cvmx_lmcx_ifb_cnt_s cn66xx; 1942 + struct cvmx_lmcx_ifb_cnt_s cn68xx; 1943 + struct cvmx_lmcx_ifb_cnt_s cn68xxp1; 1944 + struct cvmx_lmcx_ifb_cnt_s cnf71xx; 1945 + }; 1946 + 1947 + union cvmx_lmcx_ifb_cnt_hi { 1948 + uint64_t u64; 1949 + struct cvmx_lmcx_ifb_cnt_hi_s { 1950 + #ifdef __BIG_ENDIAN_BITFIELD 1951 + uint64_t reserved_32_63:32; 1952 + uint64_t ifbcnt_hi:32; 1953 + #else 1954 + uint64_t ifbcnt_hi:32; 1955 + uint64_t reserved_32_63:32; 1956 + #endif 1957 + } s; 1958 + struct cvmx_lmcx_ifb_cnt_hi_s cn30xx; 1959 + struct cvmx_lmcx_ifb_cnt_hi_s cn31xx; 1960 + struct cvmx_lmcx_ifb_cnt_hi_s cn38xx; 1961 + struct cvmx_lmcx_ifb_cnt_hi_s cn38xxp2; 1962 + struct cvmx_lmcx_ifb_cnt_hi_s cn50xx; 1963 + struct cvmx_lmcx_ifb_cnt_hi_s cn52xx; 1964 + struct cvmx_lmcx_ifb_cnt_hi_s cn52xxp1; 1965 + struct cvmx_lmcx_ifb_cnt_hi_s cn56xx; 1966 + struct cvmx_lmcx_ifb_cnt_hi_s cn56xxp1; 1967 + struct cvmx_lmcx_ifb_cnt_hi_s cn58xx; 1968 + struct cvmx_lmcx_ifb_cnt_hi_s cn58xxp1; 1969 + }; 1970 + 1971 + union cvmx_lmcx_ifb_cnt_lo { 1972 + uint64_t u64; 1973 + struct cvmx_lmcx_ifb_cnt_lo_s { 1974 + #ifdef __BIG_ENDIAN_BITFIELD 1975 + uint64_t reserved_32_63:32; 1976 + uint64_t ifbcnt_lo:32; 1977 + #else 1978 + uint64_t ifbcnt_lo:32; 1979 + uint64_t reserved_32_63:32; 1980 + #endif 1981 + } s; 1982 + struct cvmx_lmcx_ifb_cnt_lo_s cn30xx; 1983 + struct cvmx_lmcx_ifb_cnt_lo_s cn31xx; 1984 + struct cvmx_lmcx_ifb_cnt_lo_s cn38xx; 1985 + struct cvmx_lmcx_ifb_cnt_lo_s cn38xxp2; 1986 + struct cvmx_lmcx_ifb_cnt_lo_s cn50xx; 1987 + struct cvmx_lmcx_ifb_cnt_lo_s cn52xx; 1988 + struct cvmx_lmcx_ifb_cnt_lo_s cn52xxp1; 1989 + struct cvmx_lmcx_ifb_cnt_lo_s cn56xx; 1990 + struct cvmx_lmcx_ifb_cnt_lo_s cn56xxp1; 1991 + struct cvmx_lmcx_ifb_cnt_lo_s cn58xx; 1992 + struct cvmx_lmcx_ifb_cnt_lo_s cn58xxp1; 1993 + }; 1994 + 1995 + union cvmx_lmcx_int { 1996 + uint64_t u64; 1997 + struct cvmx_lmcx_int_s { 1998 + #ifdef __BIG_ENDIAN_BITFIELD 1999 + uint64_t reserved_9_63:55; 2000 + uint64_t ded_err:4; 2001 + uint64_t sec_err:4; 2002 + uint64_t nxm_wr_err:1; 2003 + #else 2004 + uint64_t nxm_wr_err:1; 2005 + uint64_t sec_err:4; 2006 + uint64_t ded_err:4; 2007 + uint64_t reserved_9_63:55; 2008 + #endif 2009 + } s; 2010 + struct cvmx_lmcx_int_s cn61xx; 2011 + struct cvmx_lmcx_int_s cn63xx; 2012 + struct cvmx_lmcx_int_s cn63xxp1; 2013 + struct cvmx_lmcx_int_s cn66xx; 2014 + struct cvmx_lmcx_int_s cn68xx; 2015 + struct cvmx_lmcx_int_s cn68xxp1; 2016 + struct cvmx_lmcx_int_s cnf71xx; 2017 + }; 2018 + 2019 + union cvmx_lmcx_int_en { 2020 + uint64_t u64; 2021 + struct cvmx_lmcx_int_en_s { 2022 + #ifdef __BIG_ENDIAN_BITFIELD 2023 + uint64_t reserved_3_63:61; 2024 + uint64_t intr_ded_ena:1; 2025 + uint64_t intr_sec_ena:1; 2026 + uint64_t intr_nxm_wr_ena:1; 2027 + #else 2028 + uint64_t intr_nxm_wr_ena:1; 2029 + uint64_t intr_sec_ena:1; 2030 + uint64_t intr_ded_ena:1; 2031 + uint64_t reserved_3_63:61; 2032 + #endif 2033 + } s; 2034 + struct cvmx_lmcx_int_en_s cn61xx; 2035 + struct cvmx_lmcx_int_en_s cn63xx; 2036 + struct cvmx_lmcx_int_en_s cn63xxp1; 2037 + struct cvmx_lmcx_int_en_s cn66xx; 2038 + struct cvmx_lmcx_int_en_s cn68xx; 2039 + struct cvmx_lmcx_int_en_s cn68xxp1; 2040 + struct cvmx_lmcx_int_en_s cnf71xx; 2041 + }; 2042 + 2043 + union cvmx_lmcx_mem_cfg0 { 2044 + uint64_t u64; 2045 + struct cvmx_lmcx_mem_cfg0_s { 2046 + #ifdef __BIG_ENDIAN_BITFIELD 2047 + uint64_t reserved_32_63:32; 2048 + uint64_t reset:1; 2049 + uint64_t silo_qc:1; 2050 + uint64_t bunk_ena:1; 2051 + uint64_t ded_err:4; 2052 + uint64_t sec_err:4; 2053 + uint64_t intr_ded_ena:1; 2054 + uint64_t intr_sec_ena:1; 2055 + uint64_t tcl:4; 2056 + uint64_t ref_int:6; 2057 + uint64_t pbank_lsb:4; 2058 + uint64_t row_lsb:3; 2059 + uint64_t ecc_ena:1; 2060 + uint64_t init_start:1; 2061 + #else 2062 + uint64_t init_start:1; 2063 + uint64_t ecc_ena:1; 2064 + uint64_t row_lsb:3; 2065 + uint64_t pbank_lsb:4; 2066 + uint64_t ref_int:6; 2067 + uint64_t tcl:4; 2068 + uint64_t intr_sec_ena:1; 2069 + uint64_t intr_ded_ena:1; 2070 + uint64_t sec_err:4; 2071 + uint64_t ded_err:4; 2072 + uint64_t bunk_ena:1; 2073 + uint64_t silo_qc:1; 2074 + uint64_t reset:1; 2075 + uint64_t reserved_32_63:32; 2076 + #endif 2077 + } s; 2078 + struct cvmx_lmcx_mem_cfg0_s cn30xx; 2079 + struct cvmx_lmcx_mem_cfg0_s cn31xx; 2080 + struct cvmx_lmcx_mem_cfg0_s cn38xx; 2081 + struct cvmx_lmcx_mem_cfg0_s cn38xxp2; 2082 + struct cvmx_lmcx_mem_cfg0_s cn50xx; 2083 + struct cvmx_lmcx_mem_cfg0_s cn52xx; 2084 + struct cvmx_lmcx_mem_cfg0_s cn52xxp1; 2085 + struct cvmx_lmcx_mem_cfg0_s cn56xx; 2086 + struct cvmx_lmcx_mem_cfg0_s cn56xxp1; 2087 + struct cvmx_lmcx_mem_cfg0_s cn58xx; 2088 + struct cvmx_lmcx_mem_cfg0_s cn58xxp1; 2089 + }; 2090 + 2091 + union cvmx_lmcx_mem_cfg1 { 2092 + uint64_t u64; 2093 + struct cvmx_lmcx_mem_cfg1_s { 2094 + #ifdef __BIG_ENDIAN_BITFIELD 2095 + uint64_t reserved_32_63:32; 2096 + uint64_t comp_bypass:1; 2097 + uint64_t trrd:3; 2098 + uint64_t caslat:3; 2099 + uint64_t tmrd:3; 2100 + uint64_t trfc:5; 2101 + uint64_t trp:4; 2102 + uint64_t twtr:4; 2103 + uint64_t trcd:4; 2104 + uint64_t tras:5; 2105 + #else 2106 + uint64_t tras:5; 2107 + uint64_t trcd:4; 2108 + uint64_t twtr:4; 2109 + uint64_t trp:4; 2110 + uint64_t trfc:5; 2111 + uint64_t tmrd:3; 2112 + uint64_t caslat:3; 2113 + uint64_t trrd:3; 2114 + uint64_t comp_bypass:1; 2115 + uint64_t reserved_32_63:32; 2116 + #endif 2117 + } s; 2118 + struct cvmx_lmcx_mem_cfg1_s cn30xx; 2119 + struct cvmx_lmcx_mem_cfg1_s cn31xx; 2120 + struct cvmx_lmcx_mem_cfg1_cn38xx { 2121 + #ifdef __BIG_ENDIAN_BITFIELD 2122 + uint64_t reserved_31_63:33; 2123 + uint64_t trrd:3; 2124 + uint64_t caslat:3; 2125 + uint64_t tmrd:3; 2126 + uint64_t trfc:5; 2127 + uint64_t trp:4; 2128 + uint64_t twtr:4; 2129 + uint64_t trcd:4; 2130 + uint64_t tras:5; 2131 + #else 2132 + uint64_t tras:5; 2133 + uint64_t trcd:4; 2134 + uint64_t twtr:4; 2135 + uint64_t trp:4; 2136 + uint64_t trfc:5; 2137 + uint64_t tmrd:3; 2138 + uint64_t caslat:3; 2139 + uint64_t trrd:3; 2140 + uint64_t reserved_31_63:33; 2141 + #endif 2142 + } cn38xx; 2143 + struct cvmx_lmcx_mem_cfg1_cn38xx cn38xxp2; 2144 + struct cvmx_lmcx_mem_cfg1_s cn50xx; 2145 + struct cvmx_lmcx_mem_cfg1_cn38xx cn52xx; 2146 + struct cvmx_lmcx_mem_cfg1_cn38xx cn52xxp1; 2147 + struct cvmx_lmcx_mem_cfg1_cn38xx cn56xx; 2148 + struct cvmx_lmcx_mem_cfg1_cn38xx cn56xxp1; 2149 + struct cvmx_lmcx_mem_cfg1_cn38xx cn58xx; 2150 + struct cvmx_lmcx_mem_cfg1_cn38xx cn58xxp1; 2151 + }; 2152 + 2153 + union cvmx_lmcx_modereg_params0 { 2154 + uint64_t u64; 2155 + struct cvmx_lmcx_modereg_params0_s { 2156 + #ifdef __BIG_ENDIAN_BITFIELD 2157 + uint64_t reserved_25_63:39; 2158 + uint64_t ppd:1; 2159 + uint64_t wrp:3; 2160 + uint64_t dllr:1; 2161 + uint64_t tm:1; 2162 + uint64_t rbt:1; 2163 + uint64_t cl:4; 2164 + uint64_t bl:2; 2165 + uint64_t qoff:1; 2166 + uint64_t tdqs:1; 2167 + uint64_t wlev:1; 2168 + uint64_t al:2; 2169 + uint64_t dll:1; 2170 + uint64_t mpr:1; 2171 + uint64_t mprloc:2; 2172 + uint64_t cwl:3; 2173 + #else 2174 + uint64_t cwl:3; 2175 + uint64_t mprloc:2; 2176 + uint64_t mpr:1; 2177 + uint64_t dll:1; 2178 + uint64_t al:2; 2179 + uint64_t wlev:1; 2180 + uint64_t tdqs:1; 2181 + uint64_t qoff:1; 2182 + uint64_t bl:2; 2183 + uint64_t cl:4; 2184 + uint64_t rbt:1; 2185 + uint64_t tm:1; 2186 + uint64_t dllr:1; 2187 + uint64_t wrp:3; 2188 + uint64_t ppd:1; 2189 + uint64_t reserved_25_63:39; 2190 + #endif 2191 + } s; 2192 + struct cvmx_lmcx_modereg_params0_s cn61xx; 2193 + struct cvmx_lmcx_modereg_params0_s cn63xx; 2194 + struct cvmx_lmcx_modereg_params0_s cn63xxp1; 2195 + struct cvmx_lmcx_modereg_params0_s cn66xx; 2196 + struct cvmx_lmcx_modereg_params0_s cn68xx; 2197 + struct cvmx_lmcx_modereg_params0_s cn68xxp1; 2198 + struct cvmx_lmcx_modereg_params0_s cnf71xx; 2199 + }; 2200 + 2201 + union cvmx_lmcx_modereg_params1 { 2202 + uint64_t u64; 2203 + struct cvmx_lmcx_modereg_params1_s { 2204 + #ifdef __BIG_ENDIAN_BITFIELD 2205 + uint64_t reserved_48_63:16; 2206 + uint64_t rtt_nom_11:3; 2207 + uint64_t dic_11:2; 2208 + uint64_t rtt_wr_11:2; 2209 + uint64_t srt_11:1; 2210 + uint64_t asr_11:1; 2211 + uint64_t pasr_11:3; 2212 + uint64_t rtt_nom_10:3; 2213 + uint64_t dic_10:2; 2214 + uint64_t rtt_wr_10:2; 2215 + uint64_t srt_10:1; 2216 + uint64_t asr_10:1; 2217 + uint64_t pasr_10:3; 2218 + uint64_t rtt_nom_01:3; 2219 + uint64_t dic_01:2; 2220 + uint64_t rtt_wr_01:2; 2221 + uint64_t srt_01:1; 2222 + uint64_t asr_01:1; 2223 + uint64_t pasr_01:3; 2224 + uint64_t rtt_nom_00:3; 2225 + uint64_t dic_00:2; 2226 + uint64_t rtt_wr_00:2; 2227 + uint64_t srt_00:1; 2228 + uint64_t asr_00:1; 2229 + uint64_t pasr_00:3; 2230 + #else 2231 + uint64_t pasr_00:3; 2232 + uint64_t asr_00:1; 2233 + uint64_t srt_00:1; 2234 + uint64_t rtt_wr_00:2; 2235 + uint64_t dic_00:2; 2236 + uint64_t rtt_nom_00:3; 2237 + uint64_t pasr_01:3; 2238 + uint64_t asr_01:1; 2239 + uint64_t srt_01:1; 2240 + uint64_t rtt_wr_01:2; 2241 + uint64_t dic_01:2; 2242 + uint64_t rtt_nom_01:3; 2243 + uint64_t pasr_10:3; 2244 + uint64_t asr_10:1; 2245 + uint64_t srt_10:1; 2246 + uint64_t rtt_wr_10:2; 2247 + uint64_t dic_10:2; 2248 + uint64_t rtt_nom_10:3; 2249 + uint64_t pasr_11:3; 2250 + uint64_t asr_11:1; 2251 + uint64_t srt_11:1; 2252 + uint64_t rtt_wr_11:2; 2253 + uint64_t dic_11:2; 2254 + uint64_t rtt_nom_11:3; 2255 + uint64_t reserved_48_63:16; 2256 + #endif 2257 + } s; 2258 + struct cvmx_lmcx_modereg_params1_s cn61xx; 2259 + struct cvmx_lmcx_modereg_params1_s cn63xx; 2260 + struct cvmx_lmcx_modereg_params1_s cn63xxp1; 2261 + struct cvmx_lmcx_modereg_params1_s cn66xx; 2262 + struct cvmx_lmcx_modereg_params1_s cn68xx; 2263 + struct cvmx_lmcx_modereg_params1_s cn68xxp1; 2264 + struct cvmx_lmcx_modereg_params1_s cnf71xx; 2265 + }; 2266 + 2267 + union cvmx_lmcx_nxm { 2268 + uint64_t u64; 2269 + struct cvmx_lmcx_nxm_s { 2270 + #ifdef __BIG_ENDIAN_BITFIELD 2271 + uint64_t reserved_40_63:24; 2272 + uint64_t mem_msb_d3_r1:4; 2273 + uint64_t mem_msb_d3_r0:4; 2274 + uint64_t mem_msb_d2_r1:4; 2275 + uint64_t mem_msb_d2_r0:4; 2276 + uint64_t mem_msb_d1_r1:4; 2277 + uint64_t mem_msb_d1_r0:4; 2278 + uint64_t mem_msb_d0_r1:4; 2279 + uint64_t mem_msb_d0_r0:4; 2280 + uint64_t cs_mask:8; 2281 + #else 2282 + uint64_t cs_mask:8; 2283 + uint64_t mem_msb_d0_r0:4; 2284 + uint64_t mem_msb_d0_r1:4; 2285 + uint64_t mem_msb_d1_r0:4; 2286 + uint64_t mem_msb_d1_r1:4; 2287 + uint64_t mem_msb_d2_r0:4; 2288 + uint64_t mem_msb_d2_r1:4; 2289 + uint64_t mem_msb_d3_r0:4; 2290 + uint64_t mem_msb_d3_r1:4; 2291 + uint64_t reserved_40_63:24; 2292 + #endif 2293 + } s; 2294 + struct cvmx_lmcx_nxm_cn52xx { 2295 + #ifdef __BIG_ENDIAN_BITFIELD 2296 + uint64_t reserved_8_63:56; 2297 + uint64_t cs_mask:8; 2298 + #else 2299 + uint64_t cs_mask:8; 2300 + uint64_t reserved_8_63:56; 2301 + #endif 2302 + } cn52xx; 2303 + struct cvmx_lmcx_nxm_cn52xx cn56xx; 2304 + struct cvmx_lmcx_nxm_cn52xx cn58xx; 2305 + struct cvmx_lmcx_nxm_s cn61xx; 2306 + struct cvmx_lmcx_nxm_s cn63xx; 2307 + struct cvmx_lmcx_nxm_s cn63xxp1; 2308 + struct cvmx_lmcx_nxm_s cn66xx; 2309 + struct cvmx_lmcx_nxm_s cn68xx; 2310 + struct cvmx_lmcx_nxm_s cn68xxp1; 2311 + struct cvmx_lmcx_nxm_s cnf71xx; 2312 + }; 2313 + 2314 + union cvmx_lmcx_ops_cnt { 2315 + uint64_t u64; 2316 + struct cvmx_lmcx_ops_cnt_s { 2317 + #ifdef __BIG_ENDIAN_BITFIELD 2318 + uint64_t opscnt:64; 2319 + #else 2320 + uint64_t opscnt:64; 2321 + #endif 2322 + } s; 2323 + struct cvmx_lmcx_ops_cnt_s cn61xx; 2324 + struct cvmx_lmcx_ops_cnt_s cn63xx; 2325 + struct cvmx_lmcx_ops_cnt_s cn63xxp1; 2326 + struct cvmx_lmcx_ops_cnt_s cn66xx; 2327 + struct cvmx_lmcx_ops_cnt_s cn68xx; 2328 + struct cvmx_lmcx_ops_cnt_s cn68xxp1; 2329 + struct cvmx_lmcx_ops_cnt_s cnf71xx; 2330 + }; 2331 + 2332 + union cvmx_lmcx_ops_cnt_hi { 2333 + uint64_t u64; 2334 + struct cvmx_lmcx_ops_cnt_hi_s { 2335 + #ifdef __BIG_ENDIAN_BITFIELD 2336 + uint64_t reserved_32_63:32; 2337 + uint64_t opscnt_hi:32; 2338 + #else 2339 + uint64_t opscnt_hi:32; 2340 + uint64_t reserved_32_63:32; 2341 + #endif 2342 + } s; 2343 + struct cvmx_lmcx_ops_cnt_hi_s cn30xx; 2344 + struct cvmx_lmcx_ops_cnt_hi_s cn31xx; 2345 + struct cvmx_lmcx_ops_cnt_hi_s cn38xx; 2346 + struct cvmx_lmcx_ops_cnt_hi_s cn38xxp2; 2347 + struct cvmx_lmcx_ops_cnt_hi_s cn50xx; 2348 + struct cvmx_lmcx_ops_cnt_hi_s cn52xx; 2349 + struct cvmx_lmcx_ops_cnt_hi_s cn52xxp1; 2350 + struct cvmx_lmcx_ops_cnt_hi_s cn56xx; 2351 + struct cvmx_lmcx_ops_cnt_hi_s cn56xxp1; 2352 + struct cvmx_lmcx_ops_cnt_hi_s cn58xx; 2353 + struct cvmx_lmcx_ops_cnt_hi_s cn58xxp1; 2354 + }; 2355 + 2356 + union cvmx_lmcx_ops_cnt_lo { 2357 + uint64_t u64; 2358 + struct cvmx_lmcx_ops_cnt_lo_s { 2359 + #ifdef __BIG_ENDIAN_BITFIELD 2360 + uint64_t reserved_32_63:32; 2361 + uint64_t opscnt_lo:32; 2362 + #else 2363 + uint64_t opscnt_lo:32; 2364 + uint64_t reserved_32_63:32; 2365 + #endif 2366 + } s; 2367 + struct cvmx_lmcx_ops_cnt_lo_s cn30xx; 2368 + struct cvmx_lmcx_ops_cnt_lo_s cn31xx; 2369 + struct cvmx_lmcx_ops_cnt_lo_s cn38xx; 2370 + struct cvmx_lmcx_ops_cnt_lo_s cn38xxp2; 2371 + struct cvmx_lmcx_ops_cnt_lo_s cn50xx; 2372 + struct cvmx_lmcx_ops_cnt_lo_s cn52xx; 2373 + struct cvmx_lmcx_ops_cnt_lo_s cn52xxp1; 2374 + struct cvmx_lmcx_ops_cnt_lo_s cn56xx; 2375 + struct cvmx_lmcx_ops_cnt_lo_s cn56xxp1; 2376 + struct cvmx_lmcx_ops_cnt_lo_s cn58xx; 2377 + struct cvmx_lmcx_ops_cnt_lo_s cn58xxp1; 2378 + }; 2379 + 2380 + union cvmx_lmcx_phy_ctl { 2381 + uint64_t u64; 2382 + struct cvmx_lmcx_phy_ctl_s { 2383 + #ifdef __BIG_ENDIAN_BITFIELD 2384 + uint64_t reserved_15_63:49; 2385 + uint64_t rx_always_on:1; 2386 + uint64_t lv_mode:1; 2387 + uint64_t ck_tune1:1; 2388 + uint64_t ck_dlyout1:4; 2389 + uint64_t ck_tune0:1; 2390 + uint64_t ck_dlyout0:4; 2391 + uint64_t loopback:1; 2392 + uint64_t loopback_pos:1; 2393 + uint64_t ts_stagger:1; 2394 + #else 2395 + uint64_t ts_stagger:1; 2396 + uint64_t loopback_pos:1; 2397 + uint64_t loopback:1; 2398 + uint64_t ck_dlyout0:4; 2399 + uint64_t ck_tune0:1; 2400 + uint64_t ck_dlyout1:4; 2401 + uint64_t ck_tune1:1; 2402 + uint64_t lv_mode:1; 2403 + uint64_t rx_always_on:1; 2404 + uint64_t reserved_15_63:49; 2405 + #endif 2406 + } s; 2407 + struct cvmx_lmcx_phy_ctl_s cn61xx; 2408 + struct cvmx_lmcx_phy_ctl_s cn63xx; 2409 + struct cvmx_lmcx_phy_ctl_cn63xxp1 { 2410 + #ifdef __BIG_ENDIAN_BITFIELD 2411 + uint64_t reserved_14_63:50; 2412 + uint64_t lv_mode:1; 2413 + uint64_t ck_tune1:1; 2414 + uint64_t ck_dlyout1:4; 2415 + uint64_t ck_tune0:1; 2416 + uint64_t ck_dlyout0:4; 2417 + uint64_t loopback:1; 2418 + uint64_t loopback_pos:1; 2419 + uint64_t ts_stagger:1; 2420 + #else 2421 + uint64_t ts_stagger:1; 2422 + uint64_t loopback_pos:1; 2423 + uint64_t loopback:1; 2424 + uint64_t ck_dlyout0:4; 2425 + uint64_t ck_tune0:1; 2426 + uint64_t ck_dlyout1:4; 2427 + uint64_t ck_tune1:1; 2428 + uint64_t lv_mode:1; 2429 + uint64_t reserved_14_63:50; 2430 + #endif 2431 + } cn63xxp1; 2432 + struct cvmx_lmcx_phy_ctl_s cn66xx; 2433 + struct cvmx_lmcx_phy_ctl_s cn68xx; 2434 + struct cvmx_lmcx_phy_ctl_s cn68xxp1; 2435 + struct cvmx_lmcx_phy_ctl_s cnf71xx; 2436 + }; 2437 + 2438 + union cvmx_lmcx_pll_bwctl { 2439 + uint64_t u64; 2440 + struct cvmx_lmcx_pll_bwctl_s { 2441 + #ifdef __BIG_ENDIAN_BITFIELD 2442 + uint64_t reserved_5_63:59; 2443 + uint64_t bwupd:1; 2444 + uint64_t bwctl:4; 2445 + #else 2446 + uint64_t bwctl:4; 2447 + uint64_t bwupd:1; 2448 + uint64_t reserved_5_63:59; 2449 + #endif 2450 + } s; 2451 + struct cvmx_lmcx_pll_bwctl_s cn30xx; 2452 + struct cvmx_lmcx_pll_bwctl_s cn31xx; 2453 + struct cvmx_lmcx_pll_bwctl_s cn38xx; 2454 + struct cvmx_lmcx_pll_bwctl_s cn38xxp2; 2455 + }; 2456 + 2457 + union cvmx_lmcx_pll_ctl { 2458 + uint64_t u64; 2459 + struct cvmx_lmcx_pll_ctl_s { 2460 + #ifdef __BIG_ENDIAN_BITFIELD 2461 + uint64_t reserved_30_63:34; 2462 + uint64_t bypass:1; 2463 + uint64_t fasten_n:1; 2464 + uint64_t div_reset:1; 2465 + uint64_t reset_n:1; 2466 + uint64_t clkf:12; 2467 + uint64_t clkr:6; 2468 + uint64_t reserved_6_7:2; 2469 + uint64_t en16:1; 2470 + uint64_t en12:1; 2471 + uint64_t en8:1; 2472 + uint64_t en6:1; 2473 + uint64_t en4:1; 2474 + uint64_t en2:1; 2475 + #else 2476 + uint64_t en2:1; 2477 + uint64_t en4:1; 2478 + uint64_t en6:1; 2479 + uint64_t en8:1; 2480 + uint64_t en12:1; 2481 + uint64_t en16:1; 2482 + uint64_t reserved_6_7:2; 2483 + uint64_t clkr:6; 2484 + uint64_t clkf:12; 2485 + uint64_t reset_n:1; 2486 + uint64_t div_reset:1; 2487 + uint64_t fasten_n:1; 2488 + uint64_t bypass:1; 2489 + uint64_t reserved_30_63:34; 2490 + #endif 2491 + } s; 2492 + struct cvmx_lmcx_pll_ctl_cn50xx { 2493 + #ifdef __BIG_ENDIAN_BITFIELD 2494 + uint64_t reserved_29_63:35; 2495 + uint64_t fasten_n:1; 2496 + uint64_t div_reset:1; 2497 + uint64_t reset_n:1; 2498 + uint64_t clkf:12; 2499 + uint64_t clkr:6; 2500 + uint64_t reserved_6_7:2; 2501 + uint64_t en16:1; 2502 + uint64_t en12:1; 2503 + uint64_t en8:1; 2504 + uint64_t en6:1; 2505 + uint64_t en4:1; 2506 + uint64_t en2:1; 2507 + #else 2508 + uint64_t en2:1; 2509 + uint64_t en4:1; 2510 + uint64_t en6:1; 2511 + uint64_t en8:1; 2512 + uint64_t en12:1; 2513 + uint64_t en16:1; 2514 + uint64_t reserved_6_7:2; 2515 + uint64_t clkr:6; 2516 + uint64_t clkf:12; 2517 + uint64_t reset_n:1; 2518 + uint64_t div_reset:1; 2519 + uint64_t fasten_n:1; 2520 + uint64_t reserved_29_63:35; 2521 + #endif 2522 + } cn50xx; 2523 + struct cvmx_lmcx_pll_ctl_s cn52xx; 2524 + struct cvmx_lmcx_pll_ctl_s cn52xxp1; 2525 + struct cvmx_lmcx_pll_ctl_cn50xx cn56xx; 2526 + struct cvmx_lmcx_pll_ctl_cn56xxp1 { 2527 + #ifdef __BIG_ENDIAN_BITFIELD 2528 + uint64_t reserved_28_63:36; 2529 + uint64_t div_reset:1; 2530 + uint64_t reset_n:1; 2531 + uint64_t clkf:12; 2532 + uint64_t clkr:6; 2533 + uint64_t reserved_6_7:2; 2534 + uint64_t en16:1; 2535 + uint64_t en12:1; 2536 + uint64_t en8:1; 2537 + uint64_t en6:1; 2538 + uint64_t en4:1; 2539 + uint64_t en2:1; 2540 + #else 2541 + uint64_t en2:1; 2542 + uint64_t en4:1; 2543 + uint64_t en6:1; 2544 + uint64_t en8:1; 2545 + uint64_t en12:1; 2546 + uint64_t en16:1; 2547 + uint64_t reserved_6_7:2; 2548 + uint64_t clkr:6; 2549 + uint64_t clkf:12; 2550 + uint64_t reset_n:1; 2551 + uint64_t div_reset:1; 2552 + uint64_t reserved_28_63:36; 2553 + #endif 2554 + } cn56xxp1; 2555 + struct cvmx_lmcx_pll_ctl_cn56xxp1 cn58xx; 2556 + struct cvmx_lmcx_pll_ctl_cn56xxp1 cn58xxp1; 2557 + }; 2558 + 2559 + union cvmx_lmcx_pll_status { 2560 + uint64_t u64; 2561 + struct cvmx_lmcx_pll_status_s { 2562 + #ifdef __BIG_ENDIAN_BITFIELD 2563 + uint64_t reserved_32_63:32; 2564 + uint64_t ddr__nctl:5; 2565 + uint64_t ddr__pctl:5; 2566 + uint64_t reserved_2_21:20; 2567 + uint64_t rfslip:1; 2568 + uint64_t fbslip:1; 2569 + #else 2570 + uint64_t fbslip:1; 2571 + uint64_t rfslip:1; 2572 + uint64_t reserved_2_21:20; 2573 + uint64_t ddr__pctl:5; 2574 + uint64_t ddr__nctl:5; 2575 + uint64_t reserved_32_63:32; 2576 + #endif 2577 + } s; 2578 + struct cvmx_lmcx_pll_status_s cn50xx; 2579 + struct cvmx_lmcx_pll_status_s cn52xx; 2580 + struct cvmx_lmcx_pll_status_s cn52xxp1; 2581 + struct cvmx_lmcx_pll_status_s cn56xx; 2582 + struct cvmx_lmcx_pll_status_s cn56xxp1; 2583 + struct cvmx_lmcx_pll_status_s cn58xx; 2584 + struct cvmx_lmcx_pll_status_cn58xxp1 { 2585 + #ifdef __BIG_ENDIAN_BITFIELD 2586 + uint64_t reserved_2_63:62; 2587 + uint64_t rfslip:1; 2588 + uint64_t fbslip:1; 2589 + #else 2590 + uint64_t fbslip:1; 2591 + uint64_t rfslip:1; 2592 + uint64_t reserved_2_63:62; 2593 + #endif 2594 + } cn58xxp1; 2595 + }; 2596 + 2597 + union cvmx_lmcx_read_level_ctl { 2598 + uint64_t u64; 2599 + struct cvmx_lmcx_read_level_ctl_s { 2600 + #ifdef __BIG_ENDIAN_BITFIELD 2601 + uint64_t reserved_44_63:20; 2602 + uint64_t rankmask:4; 2603 + uint64_t pattern:8; 2604 + uint64_t row:16; 2605 + uint64_t col:12; 2606 + uint64_t reserved_3_3:1; 2607 + uint64_t bnk:3; 2608 + #else 2609 + uint64_t bnk:3; 2610 + uint64_t reserved_3_3:1; 2611 + uint64_t col:12; 2612 + uint64_t row:16; 2613 + uint64_t pattern:8; 2614 + uint64_t rankmask:4; 2615 + uint64_t reserved_44_63:20; 2616 + #endif 2617 + } s; 2618 + struct cvmx_lmcx_read_level_ctl_s cn52xx; 2619 + struct cvmx_lmcx_read_level_ctl_s cn52xxp1; 2620 + struct cvmx_lmcx_read_level_ctl_s cn56xx; 2621 + struct cvmx_lmcx_read_level_ctl_s cn56xxp1; 2622 + }; 2623 + 2624 + union cvmx_lmcx_read_level_dbg { 2625 + uint64_t u64; 2626 + struct cvmx_lmcx_read_level_dbg_s { 2627 + #ifdef __BIG_ENDIAN_BITFIELD 2628 + uint64_t reserved_32_63:32; 2629 + uint64_t bitmask:16; 2630 + uint64_t reserved_4_15:12; 2631 + uint64_t byte:4; 2632 + #else 2633 + uint64_t byte:4; 2634 + uint64_t reserved_4_15:12; 2635 + uint64_t bitmask:16; 2636 + uint64_t reserved_32_63:32; 2637 + #endif 2638 + } s; 2639 + struct cvmx_lmcx_read_level_dbg_s cn52xx; 2640 + struct cvmx_lmcx_read_level_dbg_s cn52xxp1; 2641 + struct cvmx_lmcx_read_level_dbg_s cn56xx; 2642 + struct cvmx_lmcx_read_level_dbg_s cn56xxp1; 2643 + }; 2644 + 2645 + union cvmx_lmcx_read_level_rankx { 2646 + uint64_t u64; 2647 + struct cvmx_lmcx_read_level_rankx_s { 2648 + #ifdef __BIG_ENDIAN_BITFIELD 2649 + uint64_t reserved_38_63:26; 2650 + uint64_t status:2; 2651 + uint64_t byte8:4; 2652 + uint64_t byte7:4; 2653 + uint64_t byte6:4; 2654 + uint64_t byte5:4; 2655 + uint64_t byte4:4; 2656 + uint64_t byte3:4; 2657 + uint64_t byte2:4; 2658 + uint64_t byte1:4; 2659 + uint64_t byte0:4; 2660 + #else 2661 + uint64_t byte0:4; 2662 + uint64_t byte1:4; 2663 + uint64_t byte2:4; 2664 + uint64_t byte3:4; 2665 + uint64_t byte4:4; 2666 + uint64_t byte5:4; 2667 + uint64_t byte6:4; 2668 + uint64_t byte7:4; 2669 + uint64_t byte8:4; 2670 + uint64_t status:2; 2671 + uint64_t reserved_38_63:26; 2672 + #endif 2673 + } s; 2674 + struct cvmx_lmcx_read_level_rankx_s cn52xx; 2675 + struct cvmx_lmcx_read_level_rankx_s cn52xxp1; 2676 + struct cvmx_lmcx_read_level_rankx_s cn56xx; 2677 + struct cvmx_lmcx_read_level_rankx_s cn56xxp1; 2678 + }; 2679 + 2680 + union cvmx_lmcx_reset_ctl { 2681 + uint64_t u64; 2682 + struct cvmx_lmcx_reset_ctl_s { 2683 + #ifdef __BIG_ENDIAN_BITFIELD 2684 + uint64_t reserved_4_63:60; 2685 + uint64_t ddr3psv:1; 2686 + uint64_t ddr3psoft:1; 2687 + uint64_t ddr3pwarm:1; 2688 + uint64_t ddr3rst:1; 2689 + #else 2690 + uint64_t ddr3rst:1; 2691 + uint64_t ddr3pwarm:1; 2692 + uint64_t ddr3psoft:1; 2693 + uint64_t ddr3psv:1; 2694 + uint64_t reserved_4_63:60; 2695 + #endif 2696 + } s; 2697 + struct cvmx_lmcx_reset_ctl_s cn61xx; 2698 + struct cvmx_lmcx_reset_ctl_s cn63xx; 2699 + struct cvmx_lmcx_reset_ctl_s cn63xxp1; 2700 + struct cvmx_lmcx_reset_ctl_s cn66xx; 2701 + struct cvmx_lmcx_reset_ctl_s cn68xx; 2702 + struct cvmx_lmcx_reset_ctl_s cn68xxp1; 2703 + struct cvmx_lmcx_reset_ctl_s cnf71xx; 2704 + }; 2705 + 2706 + union cvmx_lmcx_rlevel_ctl { 2707 + uint64_t u64; 2708 + struct cvmx_lmcx_rlevel_ctl_s { 2709 + #ifdef __BIG_ENDIAN_BITFIELD 2710 + uint64_t reserved_22_63:42; 2711 + uint64_t delay_unload_3:1; 2712 + uint64_t delay_unload_2:1; 2713 + uint64_t delay_unload_1:1; 2714 + uint64_t delay_unload_0:1; 2715 + uint64_t bitmask:8; 2716 + uint64_t or_dis:1; 2717 + uint64_t offset_en:1; 2718 + uint64_t offset:4; 2719 + uint64_t byte:4; 2720 + #else 2721 + uint64_t byte:4; 2722 + uint64_t offset:4; 2723 + uint64_t offset_en:1; 2724 + uint64_t or_dis:1; 2725 + uint64_t bitmask:8; 2726 + uint64_t delay_unload_0:1; 2727 + uint64_t delay_unload_1:1; 2728 + uint64_t delay_unload_2:1; 2729 + uint64_t delay_unload_3:1; 2730 + uint64_t reserved_22_63:42; 2731 + #endif 2732 + } s; 2733 + struct cvmx_lmcx_rlevel_ctl_s cn61xx; 2734 + struct cvmx_lmcx_rlevel_ctl_s cn63xx; 2735 + struct cvmx_lmcx_rlevel_ctl_cn63xxp1 { 2736 + #ifdef __BIG_ENDIAN_BITFIELD 2737 + uint64_t reserved_9_63:55; 2738 + uint64_t offset_en:1; 2739 + uint64_t offset:4; 2740 + uint64_t byte:4; 2741 + #else 2742 + uint64_t byte:4; 2743 + uint64_t offset:4; 2744 + uint64_t offset_en:1; 2745 + uint64_t reserved_9_63:55; 2746 + #endif 2747 + } cn63xxp1; 2748 + struct cvmx_lmcx_rlevel_ctl_s cn66xx; 2749 + struct cvmx_lmcx_rlevel_ctl_s cn68xx; 2750 + struct cvmx_lmcx_rlevel_ctl_s cn68xxp1; 2751 + struct cvmx_lmcx_rlevel_ctl_s cnf71xx; 2752 + }; 2753 + 2754 + union cvmx_lmcx_rlevel_dbg { 2755 + uint64_t u64; 2756 + struct cvmx_lmcx_rlevel_dbg_s { 2757 + #ifdef __BIG_ENDIAN_BITFIELD 2758 + uint64_t bitmask:64; 2759 + #else 2760 + uint64_t bitmask:64; 2761 + #endif 2762 + } s; 2763 + struct cvmx_lmcx_rlevel_dbg_s cn61xx; 2764 + struct cvmx_lmcx_rlevel_dbg_s cn63xx; 2765 + struct cvmx_lmcx_rlevel_dbg_s cn63xxp1; 2766 + struct cvmx_lmcx_rlevel_dbg_s cn66xx; 2767 + struct cvmx_lmcx_rlevel_dbg_s cn68xx; 2768 + struct cvmx_lmcx_rlevel_dbg_s cn68xxp1; 2769 + struct cvmx_lmcx_rlevel_dbg_s cnf71xx; 2770 + }; 2771 + 2772 + union cvmx_lmcx_rlevel_rankx { 2773 + uint64_t u64; 2774 + struct cvmx_lmcx_rlevel_rankx_s { 2775 + #ifdef __BIG_ENDIAN_BITFIELD 2776 + uint64_t reserved_56_63:8; 2777 + uint64_t status:2; 2778 + uint64_t byte8:6; 2779 + uint64_t byte7:6; 2780 + uint64_t byte6:6; 2781 + uint64_t byte5:6; 2782 + uint64_t byte4:6; 2783 + uint64_t byte3:6; 2784 + uint64_t byte2:6; 2785 + uint64_t byte1:6; 2786 + uint64_t byte0:6; 2787 + #else 2788 + uint64_t byte0:6; 2789 + uint64_t byte1:6; 2790 + uint64_t byte2:6; 2791 + uint64_t byte3:6; 2792 + uint64_t byte4:6; 2793 + uint64_t byte5:6; 2794 + uint64_t byte6:6; 2795 + uint64_t byte7:6; 2796 + uint64_t byte8:6; 2797 + uint64_t status:2; 2798 + uint64_t reserved_56_63:8; 2799 + #endif 2800 + } s; 2801 + struct cvmx_lmcx_rlevel_rankx_s cn61xx; 2802 + struct cvmx_lmcx_rlevel_rankx_s cn63xx; 2803 + struct cvmx_lmcx_rlevel_rankx_s cn63xxp1; 2804 + struct cvmx_lmcx_rlevel_rankx_s cn66xx; 2805 + struct cvmx_lmcx_rlevel_rankx_s cn68xx; 2806 + struct cvmx_lmcx_rlevel_rankx_s cn68xxp1; 2807 + struct cvmx_lmcx_rlevel_rankx_s cnf71xx; 2808 + }; 2809 + 2810 + union cvmx_lmcx_rodt_comp_ctl { 2811 + uint64_t u64; 2812 + struct cvmx_lmcx_rodt_comp_ctl_s { 2813 + #ifdef __BIG_ENDIAN_BITFIELD 2814 + uint64_t reserved_17_63:47; 2815 + uint64_t enable:1; 2816 + uint64_t reserved_12_15:4; 2817 + uint64_t nctl:4; 2818 + uint64_t reserved_5_7:3; 2819 + uint64_t pctl:5; 2820 + #else 2821 + uint64_t pctl:5; 2822 + uint64_t reserved_5_7:3; 2823 + uint64_t nctl:4; 2824 + uint64_t reserved_12_15:4; 2825 + uint64_t enable:1; 2826 + uint64_t reserved_17_63:47; 2827 + #endif 2828 + } s; 2829 + struct cvmx_lmcx_rodt_comp_ctl_s cn50xx; 2830 + struct cvmx_lmcx_rodt_comp_ctl_s cn52xx; 2831 + struct cvmx_lmcx_rodt_comp_ctl_s cn52xxp1; 2832 + struct cvmx_lmcx_rodt_comp_ctl_s cn56xx; 2833 + struct cvmx_lmcx_rodt_comp_ctl_s cn56xxp1; 2834 + struct cvmx_lmcx_rodt_comp_ctl_s cn58xx; 2835 + struct cvmx_lmcx_rodt_comp_ctl_s cn58xxp1; 2836 + }; 2837 + 2838 + union cvmx_lmcx_rodt_ctl { 2839 + uint64_t u64; 2840 + struct cvmx_lmcx_rodt_ctl_s { 2841 + #ifdef __BIG_ENDIAN_BITFIELD 2842 + uint64_t reserved_32_63:32; 2843 + uint64_t rodt_hi3:4; 2844 + uint64_t rodt_hi2:4; 2845 + uint64_t rodt_hi1:4; 2846 + uint64_t rodt_hi0:4; 2847 + uint64_t rodt_lo3:4; 2848 + uint64_t rodt_lo2:4; 2849 + uint64_t rodt_lo1:4; 2850 + uint64_t rodt_lo0:4; 2851 + #else 2852 + uint64_t rodt_lo0:4; 2853 + uint64_t rodt_lo1:4; 2854 + uint64_t rodt_lo2:4; 2855 + uint64_t rodt_lo3:4; 2856 + uint64_t rodt_hi0:4; 2857 + uint64_t rodt_hi1:4; 2858 + uint64_t rodt_hi2:4; 2859 + uint64_t rodt_hi3:4; 2860 + uint64_t reserved_32_63:32; 2861 + #endif 2862 + } s; 2863 + struct cvmx_lmcx_rodt_ctl_s cn30xx; 2864 + struct cvmx_lmcx_rodt_ctl_s cn31xx; 2865 + struct cvmx_lmcx_rodt_ctl_s cn38xx; 2866 + struct cvmx_lmcx_rodt_ctl_s cn38xxp2; 2867 + struct cvmx_lmcx_rodt_ctl_s cn50xx; 2868 + struct cvmx_lmcx_rodt_ctl_s cn52xx; 2869 + struct cvmx_lmcx_rodt_ctl_s cn52xxp1; 2870 + struct cvmx_lmcx_rodt_ctl_s cn56xx; 2871 + struct cvmx_lmcx_rodt_ctl_s cn56xxp1; 2872 + struct cvmx_lmcx_rodt_ctl_s cn58xx; 2873 + struct cvmx_lmcx_rodt_ctl_s cn58xxp1; 2874 + }; 2875 + 2876 + union cvmx_lmcx_rodt_mask { 2877 + uint64_t u64; 2878 + struct cvmx_lmcx_rodt_mask_s { 2879 + #ifdef __BIG_ENDIAN_BITFIELD 2880 + uint64_t rodt_d3_r1:8; 2881 + uint64_t rodt_d3_r0:8; 2882 + uint64_t rodt_d2_r1:8; 2883 + uint64_t rodt_d2_r0:8; 2884 + uint64_t rodt_d1_r1:8; 2885 + uint64_t rodt_d1_r0:8; 2886 + uint64_t rodt_d0_r1:8; 2887 + uint64_t rodt_d0_r0:8; 2888 + #else 2889 + uint64_t rodt_d0_r0:8; 2890 + uint64_t rodt_d0_r1:8; 2891 + uint64_t rodt_d1_r0:8; 2892 + uint64_t rodt_d1_r1:8; 2893 + uint64_t rodt_d2_r0:8; 2894 + uint64_t rodt_d2_r1:8; 2895 + uint64_t rodt_d3_r0:8; 2896 + uint64_t rodt_d3_r1:8; 2897 + #endif 2898 + } s; 2899 + struct cvmx_lmcx_rodt_mask_s cn61xx; 2900 + struct cvmx_lmcx_rodt_mask_s cn63xx; 2901 + struct cvmx_lmcx_rodt_mask_s cn63xxp1; 2902 + struct cvmx_lmcx_rodt_mask_s cn66xx; 2903 + struct cvmx_lmcx_rodt_mask_s cn68xx; 2904 + struct cvmx_lmcx_rodt_mask_s cn68xxp1; 2905 + struct cvmx_lmcx_rodt_mask_s cnf71xx; 2906 + }; 2907 + 2908 + union cvmx_lmcx_scramble_cfg0 { 2909 + uint64_t u64; 2910 + struct cvmx_lmcx_scramble_cfg0_s { 2911 + #ifdef __BIG_ENDIAN_BITFIELD 2912 + uint64_t key:64; 2913 + #else 2914 + uint64_t key:64; 2915 + #endif 2916 + } s; 2917 + struct cvmx_lmcx_scramble_cfg0_s cn61xx; 2918 + struct cvmx_lmcx_scramble_cfg0_s cn66xx; 2919 + struct cvmx_lmcx_scramble_cfg0_s cnf71xx; 2920 + }; 2921 + 2922 + union cvmx_lmcx_scramble_cfg1 { 2923 + uint64_t u64; 2924 + struct cvmx_lmcx_scramble_cfg1_s { 2925 + #ifdef __BIG_ENDIAN_BITFIELD 2926 + uint64_t key:64; 2927 + #else 2928 + uint64_t key:64; 2929 + #endif 2930 + } s; 2931 + struct cvmx_lmcx_scramble_cfg1_s cn61xx; 2932 + struct cvmx_lmcx_scramble_cfg1_s cn66xx; 2933 + struct cvmx_lmcx_scramble_cfg1_s cnf71xx; 2934 + }; 2935 + 2936 + union cvmx_lmcx_scrambled_fadr { 2937 + uint64_t u64; 2938 + struct cvmx_lmcx_scrambled_fadr_s { 2939 + #ifdef __BIG_ENDIAN_BITFIELD 2940 + uint64_t reserved_36_63:28; 2941 + uint64_t fdimm:2; 2942 + uint64_t fbunk:1; 2943 + uint64_t fbank:3; 2944 + uint64_t frow:16; 2945 + uint64_t fcol:14; 2946 + #else 2947 + uint64_t fcol:14; 2948 + uint64_t frow:16; 2949 + uint64_t fbank:3; 2950 + uint64_t fbunk:1; 2951 + uint64_t fdimm:2; 2952 + uint64_t reserved_36_63:28; 2953 + #endif 2954 + } s; 2955 + struct cvmx_lmcx_scrambled_fadr_s cn61xx; 2956 + struct cvmx_lmcx_scrambled_fadr_s cn66xx; 2957 + struct cvmx_lmcx_scrambled_fadr_s cnf71xx; 2958 + }; 2959 + 2960 + union cvmx_lmcx_slot_ctl0 { 2961 + uint64_t u64; 2962 + struct cvmx_lmcx_slot_ctl0_s { 2963 + #ifdef __BIG_ENDIAN_BITFIELD 2964 + uint64_t reserved_24_63:40; 2965 + uint64_t w2w_init:6; 2966 + uint64_t w2r_init:6; 2967 + uint64_t r2w_init:6; 2968 + uint64_t r2r_init:6; 2969 + #else 2970 + uint64_t r2r_init:6; 2971 + uint64_t r2w_init:6; 2972 + uint64_t w2r_init:6; 2973 + uint64_t w2w_init:6; 2974 + uint64_t reserved_24_63:40; 2975 + #endif 2976 + } s; 2977 + struct cvmx_lmcx_slot_ctl0_s cn61xx; 2978 + struct cvmx_lmcx_slot_ctl0_s cn63xx; 2979 + struct cvmx_lmcx_slot_ctl0_s cn63xxp1; 2980 + struct cvmx_lmcx_slot_ctl0_s cn66xx; 2981 + struct cvmx_lmcx_slot_ctl0_s cn68xx; 2982 + struct cvmx_lmcx_slot_ctl0_s cn68xxp1; 2983 + struct cvmx_lmcx_slot_ctl0_s cnf71xx; 2984 + }; 2985 + 2986 + union cvmx_lmcx_slot_ctl1 { 2987 + uint64_t u64; 2988 + struct cvmx_lmcx_slot_ctl1_s { 2989 + #ifdef __BIG_ENDIAN_BITFIELD 2990 + uint64_t reserved_24_63:40; 2991 + uint64_t w2w_xrank_init:6; 2992 + uint64_t w2r_xrank_init:6; 2993 + uint64_t r2w_xrank_init:6; 2994 + uint64_t r2r_xrank_init:6; 2995 + #else 2996 + uint64_t r2r_xrank_init:6; 2997 + uint64_t r2w_xrank_init:6; 2998 + uint64_t w2r_xrank_init:6; 2999 + uint64_t w2w_xrank_init:6; 3000 + uint64_t reserved_24_63:40; 3001 + #endif 3002 + } s; 3003 + struct cvmx_lmcx_slot_ctl1_s cn61xx; 3004 + struct cvmx_lmcx_slot_ctl1_s cn63xx; 3005 + struct cvmx_lmcx_slot_ctl1_s cn63xxp1; 3006 + struct cvmx_lmcx_slot_ctl1_s cn66xx; 3007 + struct cvmx_lmcx_slot_ctl1_s cn68xx; 3008 + struct cvmx_lmcx_slot_ctl1_s cn68xxp1; 3009 + struct cvmx_lmcx_slot_ctl1_s cnf71xx; 3010 + }; 3011 + 3012 + union cvmx_lmcx_slot_ctl2 { 3013 + uint64_t u64; 3014 + struct cvmx_lmcx_slot_ctl2_s { 3015 + #ifdef __BIG_ENDIAN_BITFIELD 3016 + uint64_t reserved_24_63:40; 3017 + uint64_t w2w_xdimm_init:6; 3018 + uint64_t w2r_xdimm_init:6; 3019 + uint64_t r2w_xdimm_init:6; 3020 + uint64_t r2r_xdimm_init:6; 3021 + #else 3022 + uint64_t r2r_xdimm_init:6; 3023 + uint64_t r2w_xdimm_init:6; 3024 + uint64_t w2r_xdimm_init:6; 3025 + uint64_t w2w_xdimm_init:6; 3026 + uint64_t reserved_24_63:40; 3027 + #endif 3028 + } s; 3029 + struct cvmx_lmcx_slot_ctl2_s cn61xx; 3030 + struct cvmx_lmcx_slot_ctl2_s cn63xx; 3031 + struct cvmx_lmcx_slot_ctl2_s cn63xxp1; 3032 + struct cvmx_lmcx_slot_ctl2_s cn66xx; 3033 + struct cvmx_lmcx_slot_ctl2_s cn68xx; 3034 + struct cvmx_lmcx_slot_ctl2_s cn68xxp1; 3035 + struct cvmx_lmcx_slot_ctl2_s cnf71xx; 3036 + }; 3037 + 3038 + union cvmx_lmcx_timing_params0 { 3039 + uint64_t u64; 3040 + struct cvmx_lmcx_timing_params0_s { 3041 + #ifdef __BIG_ENDIAN_BITFIELD 3042 + uint64_t reserved_47_63:17; 3043 + uint64_t trp_ext:1; 3044 + uint64_t tcksre:4; 3045 + uint64_t trp:4; 3046 + uint64_t tzqinit:4; 3047 + uint64_t tdllk:4; 3048 + uint64_t tmod:4; 3049 + uint64_t tmrd:4; 3050 + uint64_t txpr:4; 3051 + uint64_t tcke:4; 3052 + uint64_t tzqcs:4; 3053 + uint64_t tckeon:10; 3054 + #else 3055 + uint64_t tckeon:10; 3056 + uint64_t tzqcs:4; 3057 + uint64_t tcke:4; 3058 + uint64_t txpr:4; 3059 + uint64_t tmrd:4; 3060 + uint64_t tmod:4; 3061 + uint64_t tdllk:4; 3062 + uint64_t tzqinit:4; 3063 + uint64_t trp:4; 3064 + uint64_t tcksre:4; 3065 + uint64_t trp_ext:1; 3066 + uint64_t reserved_47_63:17; 3067 + #endif 3068 + } s; 3069 + struct cvmx_lmcx_timing_params0_cn61xx { 3070 + #ifdef __BIG_ENDIAN_BITFIELD 3071 + uint64_t reserved_47_63:17; 3072 + uint64_t trp_ext:1; 3073 + uint64_t tcksre:4; 3074 + uint64_t trp:4; 3075 + uint64_t tzqinit:4; 3076 + uint64_t tdllk:4; 3077 + uint64_t tmod:4; 3078 + uint64_t tmrd:4; 3079 + uint64_t txpr:4; 3080 + uint64_t tcke:4; 3081 + uint64_t tzqcs:4; 3082 + uint64_t reserved_0_9:10; 3083 + #else 3084 + uint64_t reserved_0_9:10; 3085 + uint64_t tzqcs:4; 3086 + uint64_t tcke:4; 3087 + uint64_t txpr:4; 3088 + uint64_t tmrd:4; 3089 + uint64_t tmod:4; 3090 + uint64_t tdllk:4; 3091 + uint64_t tzqinit:4; 3092 + uint64_t trp:4; 3093 + uint64_t tcksre:4; 3094 + uint64_t trp_ext:1; 3095 + uint64_t reserved_47_63:17; 3096 + #endif 3097 + } cn61xx; 3098 + struct cvmx_lmcx_timing_params0_cn61xx cn63xx; 3099 + struct cvmx_lmcx_timing_params0_cn63xxp1 { 3100 + #ifdef __BIG_ENDIAN_BITFIELD 3101 + uint64_t reserved_46_63:18; 3102 + uint64_t tcksre:4; 3103 + uint64_t trp:4; 3104 + uint64_t tzqinit:4; 3105 + uint64_t tdllk:4; 3106 + uint64_t tmod:4; 3107 + uint64_t tmrd:4; 3108 + uint64_t txpr:4; 3109 + uint64_t tcke:4; 3110 + uint64_t tzqcs:4; 3111 + uint64_t tckeon:10; 3112 + #else 3113 + uint64_t tckeon:10; 3114 + uint64_t tzqcs:4; 3115 + uint64_t tcke:4; 3116 + uint64_t txpr:4; 3117 + uint64_t tmrd:4; 3118 + uint64_t tmod:4; 3119 + uint64_t tdllk:4; 3120 + uint64_t tzqinit:4; 3121 + uint64_t trp:4; 3122 + uint64_t tcksre:4; 3123 + uint64_t reserved_46_63:18; 3124 + #endif 3125 + } cn63xxp1; 3126 + struct cvmx_lmcx_timing_params0_cn61xx cn66xx; 3127 + struct cvmx_lmcx_timing_params0_cn61xx cn68xx; 3128 + struct cvmx_lmcx_timing_params0_cn61xx cn68xxp1; 3129 + struct cvmx_lmcx_timing_params0_cn61xx cnf71xx; 3130 + }; 3131 + 3132 + union cvmx_lmcx_timing_params1 { 3133 + uint64_t u64; 3134 + struct cvmx_lmcx_timing_params1_s { 3135 + #ifdef __BIG_ENDIAN_BITFIELD 3136 + uint64_t reserved_47_63:17; 3137 + uint64_t tras_ext:1; 3138 + uint64_t txpdll:5; 3139 + uint64_t tfaw:5; 3140 + uint64_t twldqsen:4; 3141 + uint64_t twlmrd:4; 3142 + uint64_t txp:3; 3143 + uint64_t trrd:3; 3144 + uint64_t trfc:5; 3145 + uint64_t twtr:4; 3146 + uint64_t trcd:4; 3147 + uint64_t tras:5; 3148 + uint64_t tmprr:4; 3149 + #else 3150 + uint64_t tmprr:4; 3151 + uint64_t tras:5; 3152 + uint64_t trcd:4; 3153 + uint64_t twtr:4; 3154 + uint64_t trfc:5; 3155 + uint64_t trrd:3; 3156 + uint64_t txp:3; 3157 + uint64_t twlmrd:4; 3158 + uint64_t twldqsen:4; 3159 + uint64_t tfaw:5; 3160 + uint64_t txpdll:5; 3161 + uint64_t tras_ext:1; 3162 + uint64_t reserved_47_63:17; 3163 + #endif 3164 + } s; 3165 + struct cvmx_lmcx_timing_params1_s cn61xx; 3166 + struct cvmx_lmcx_timing_params1_s cn63xx; 3167 + struct cvmx_lmcx_timing_params1_cn63xxp1 { 3168 + #ifdef __BIG_ENDIAN_BITFIELD 3169 + uint64_t reserved_46_63:18; 3170 + uint64_t txpdll:5; 3171 + uint64_t tfaw:5; 3172 + uint64_t twldqsen:4; 3173 + uint64_t twlmrd:4; 3174 + uint64_t txp:3; 3175 + uint64_t trrd:3; 3176 + uint64_t trfc:5; 3177 + uint64_t twtr:4; 3178 + uint64_t trcd:4; 3179 + uint64_t tras:5; 3180 + uint64_t tmprr:4; 3181 + #else 3182 + uint64_t tmprr:4; 3183 + uint64_t tras:5; 3184 + uint64_t trcd:4; 3185 + uint64_t twtr:4; 3186 + uint64_t trfc:5; 3187 + uint64_t trrd:3; 3188 + uint64_t txp:3; 3189 + uint64_t twlmrd:4; 3190 + uint64_t twldqsen:4; 3191 + uint64_t tfaw:5; 3192 + uint64_t txpdll:5; 3193 + uint64_t reserved_46_63:18; 3194 + #endif 3195 + } cn63xxp1; 3196 + struct cvmx_lmcx_timing_params1_s cn66xx; 3197 + struct cvmx_lmcx_timing_params1_s cn68xx; 3198 + struct cvmx_lmcx_timing_params1_s cn68xxp1; 3199 + struct cvmx_lmcx_timing_params1_s cnf71xx; 3200 + }; 3201 + 3202 + union cvmx_lmcx_tro_ctl { 3203 + uint64_t u64; 3204 + struct cvmx_lmcx_tro_ctl_s { 3205 + #ifdef __BIG_ENDIAN_BITFIELD 3206 + uint64_t reserved_33_63:31; 3207 + uint64_t rclk_cnt:32; 3208 + uint64_t treset:1; 3209 + #else 3210 + uint64_t treset:1; 3211 + uint64_t rclk_cnt:32; 3212 + uint64_t reserved_33_63:31; 3213 + #endif 3214 + } s; 3215 + struct cvmx_lmcx_tro_ctl_s cn61xx; 3216 + struct cvmx_lmcx_tro_ctl_s cn63xx; 3217 + struct cvmx_lmcx_tro_ctl_s cn63xxp1; 3218 + struct cvmx_lmcx_tro_ctl_s cn66xx; 3219 + struct cvmx_lmcx_tro_ctl_s cn68xx; 3220 + struct cvmx_lmcx_tro_ctl_s cn68xxp1; 3221 + struct cvmx_lmcx_tro_ctl_s cnf71xx; 3222 + }; 3223 + 3224 + union cvmx_lmcx_tro_stat { 3225 + uint64_t u64; 3226 + struct cvmx_lmcx_tro_stat_s { 3227 + #ifdef __BIG_ENDIAN_BITFIELD 3228 + uint64_t reserved_32_63:32; 3229 + uint64_t ring_cnt:32; 3230 + #else 3231 + uint64_t ring_cnt:32; 3232 + uint64_t reserved_32_63:32; 3233 + #endif 3234 + } s; 3235 + struct cvmx_lmcx_tro_stat_s cn61xx; 3236 + struct cvmx_lmcx_tro_stat_s cn63xx; 3237 + struct cvmx_lmcx_tro_stat_s cn63xxp1; 3238 + struct cvmx_lmcx_tro_stat_s cn66xx; 3239 + struct cvmx_lmcx_tro_stat_s cn68xx; 3240 + struct cvmx_lmcx_tro_stat_s cn68xxp1; 3241 + struct cvmx_lmcx_tro_stat_s cnf71xx; 3242 + }; 3243 + 3244 + union cvmx_lmcx_wlevel_ctl { 3245 + uint64_t u64; 3246 + struct cvmx_lmcx_wlevel_ctl_s { 3247 + #ifdef __BIG_ENDIAN_BITFIELD 3248 + uint64_t reserved_22_63:42; 3249 + uint64_t rtt_nom:3; 3250 + uint64_t bitmask:8; 3251 + uint64_t or_dis:1; 3252 + uint64_t sset:1; 3253 + uint64_t lanemask:9; 3254 + #else 3255 + uint64_t lanemask:9; 3256 + uint64_t sset:1; 3257 + uint64_t or_dis:1; 3258 + uint64_t bitmask:8; 3259 + uint64_t rtt_nom:3; 3260 + uint64_t reserved_22_63:42; 3261 + #endif 3262 + } s; 3263 + struct cvmx_lmcx_wlevel_ctl_s cn61xx; 3264 + struct cvmx_lmcx_wlevel_ctl_s cn63xx; 3265 + struct cvmx_lmcx_wlevel_ctl_cn63xxp1 { 3266 + #ifdef __BIG_ENDIAN_BITFIELD 3267 + uint64_t reserved_10_63:54; 3268 + uint64_t sset:1; 3269 + uint64_t lanemask:9; 3270 + #else 3271 + uint64_t lanemask:9; 3272 + uint64_t sset:1; 3273 + uint64_t reserved_10_63:54; 3274 + #endif 3275 + } cn63xxp1; 3276 + struct cvmx_lmcx_wlevel_ctl_s cn66xx; 3277 + struct cvmx_lmcx_wlevel_ctl_s cn68xx; 3278 + struct cvmx_lmcx_wlevel_ctl_s cn68xxp1; 3279 + struct cvmx_lmcx_wlevel_ctl_s cnf71xx; 3280 + }; 3281 + 3282 + union cvmx_lmcx_wlevel_dbg { 3283 + uint64_t u64; 3284 + struct cvmx_lmcx_wlevel_dbg_s { 3285 + #ifdef __BIG_ENDIAN_BITFIELD 3286 + uint64_t reserved_12_63:52; 3287 + uint64_t bitmask:8; 3288 + uint64_t byte:4; 3289 + #else 3290 + uint64_t byte:4; 3291 + uint64_t bitmask:8; 3292 + uint64_t reserved_12_63:52; 3293 + #endif 3294 + } s; 3295 + struct cvmx_lmcx_wlevel_dbg_s cn61xx; 3296 + struct cvmx_lmcx_wlevel_dbg_s cn63xx; 3297 + struct cvmx_lmcx_wlevel_dbg_s cn63xxp1; 3298 + struct cvmx_lmcx_wlevel_dbg_s cn66xx; 3299 + struct cvmx_lmcx_wlevel_dbg_s cn68xx; 3300 + struct cvmx_lmcx_wlevel_dbg_s cn68xxp1; 3301 + struct cvmx_lmcx_wlevel_dbg_s cnf71xx; 3302 + }; 3303 + 3304 + union cvmx_lmcx_wlevel_rankx { 3305 + uint64_t u64; 3306 + struct cvmx_lmcx_wlevel_rankx_s { 3307 + #ifdef __BIG_ENDIAN_BITFIELD 3308 + uint64_t reserved_47_63:17; 3309 + uint64_t status:2; 3310 + uint64_t byte8:5; 3311 + uint64_t byte7:5; 3312 + uint64_t byte6:5; 3313 + uint64_t byte5:5; 3314 + uint64_t byte4:5; 3315 + uint64_t byte3:5; 3316 + uint64_t byte2:5; 3317 + uint64_t byte1:5; 3318 + uint64_t byte0:5; 3319 + #else 3320 + uint64_t byte0:5; 3321 + uint64_t byte1:5; 3322 + uint64_t byte2:5; 3323 + uint64_t byte3:5; 3324 + uint64_t byte4:5; 3325 + uint64_t byte5:5; 3326 + uint64_t byte6:5; 3327 + uint64_t byte7:5; 3328 + uint64_t byte8:5; 3329 + uint64_t status:2; 3330 + uint64_t reserved_47_63:17; 3331 + #endif 3332 + } s; 3333 + struct cvmx_lmcx_wlevel_rankx_s cn61xx; 3334 + struct cvmx_lmcx_wlevel_rankx_s cn63xx; 3335 + struct cvmx_lmcx_wlevel_rankx_s cn63xxp1; 3336 + struct cvmx_lmcx_wlevel_rankx_s cn66xx; 3337 + struct cvmx_lmcx_wlevel_rankx_s cn68xx; 3338 + struct cvmx_lmcx_wlevel_rankx_s cn68xxp1; 3339 + struct cvmx_lmcx_wlevel_rankx_s cnf71xx; 3340 + }; 3341 + 3342 + union cvmx_lmcx_wodt_ctl0 { 3343 + uint64_t u64; 3344 + struct cvmx_lmcx_wodt_ctl0_s { 3345 + #ifdef __BIG_ENDIAN_BITFIELD 3346 + uint64_t reserved_0_63:64; 3347 + #else 3348 + uint64_t reserved_0_63:64; 3349 + #endif 3350 + } s; 3351 + struct cvmx_lmcx_wodt_ctl0_cn30xx { 3352 + #ifdef __BIG_ENDIAN_BITFIELD 3353 + uint64_t reserved_32_63:32; 3354 + uint64_t wodt_d1_r1:8; 3355 + uint64_t wodt_d1_r0:8; 3356 + uint64_t wodt_d0_r1:8; 3357 + uint64_t wodt_d0_r0:8; 3358 + #else 3359 + uint64_t wodt_d0_r0:8; 3360 + uint64_t wodt_d0_r1:8; 3361 + uint64_t wodt_d1_r0:8; 3362 + uint64_t wodt_d1_r1:8; 3363 + uint64_t reserved_32_63:32; 3364 + #endif 3365 + } cn30xx; 3366 + struct cvmx_lmcx_wodt_ctl0_cn30xx cn31xx; 3367 + struct cvmx_lmcx_wodt_ctl0_cn38xx { 3368 + #ifdef __BIG_ENDIAN_BITFIELD 3369 + uint64_t reserved_32_63:32; 3370 + uint64_t wodt_hi3:4; 3371 + uint64_t wodt_hi2:4; 3372 + uint64_t wodt_hi1:4; 3373 + uint64_t wodt_hi0:4; 3374 + uint64_t wodt_lo3:4; 3375 + uint64_t wodt_lo2:4; 3376 + uint64_t wodt_lo1:4; 3377 + uint64_t wodt_lo0:4; 3378 + #else 3379 + uint64_t wodt_lo0:4; 3380 + uint64_t wodt_lo1:4; 3381 + uint64_t wodt_lo2:4; 3382 + uint64_t wodt_lo3:4; 3383 + uint64_t wodt_hi0:4; 3384 + uint64_t wodt_hi1:4; 3385 + uint64_t wodt_hi2:4; 3386 + uint64_t wodt_hi3:4; 3387 + uint64_t reserved_32_63:32; 3388 + #endif 3389 + } cn38xx; 3390 + struct cvmx_lmcx_wodt_ctl0_cn38xx cn38xxp2; 3391 + struct cvmx_lmcx_wodt_ctl0_cn38xx cn50xx; 3392 + struct cvmx_lmcx_wodt_ctl0_cn30xx cn52xx; 3393 + struct cvmx_lmcx_wodt_ctl0_cn30xx cn52xxp1; 3394 + struct cvmx_lmcx_wodt_ctl0_cn30xx cn56xx; 3395 + struct cvmx_lmcx_wodt_ctl0_cn30xx cn56xxp1; 3396 + struct cvmx_lmcx_wodt_ctl0_cn38xx cn58xx; 3397 + struct cvmx_lmcx_wodt_ctl0_cn38xx cn58xxp1; 3398 + }; 3399 + 3400 + union cvmx_lmcx_wodt_ctl1 { 3401 + uint64_t u64; 3402 + struct cvmx_lmcx_wodt_ctl1_s { 3403 + #ifdef __BIG_ENDIAN_BITFIELD 3404 + uint64_t reserved_32_63:32; 3405 + uint64_t wodt_d3_r1:8; 3406 + uint64_t wodt_d3_r0:8; 3407 + uint64_t wodt_d2_r1:8; 3408 + uint64_t wodt_d2_r0:8; 3409 + #else 3410 + uint64_t wodt_d2_r0:8; 3411 + uint64_t wodt_d2_r1:8; 3412 + uint64_t wodt_d3_r0:8; 3413 + uint64_t wodt_d3_r1:8; 3414 + uint64_t reserved_32_63:32; 3415 + #endif 3416 + } s; 3417 + struct cvmx_lmcx_wodt_ctl1_s cn30xx; 3418 + struct cvmx_lmcx_wodt_ctl1_s cn31xx; 3419 + struct cvmx_lmcx_wodt_ctl1_s cn52xx; 3420 + struct cvmx_lmcx_wodt_ctl1_s cn52xxp1; 3421 + struct cvmx_lmcx_wodt_ctl1_s cn56xx; 3422 + struct cvmx_lmcx_wodt_ctl1_s cn56xxp1; 3423 + }; 3424 + 3425 + union cvmx_lmcx_wodt_mask { 3426 + uint64_t u64; 3427 + struct cvmx_lmcx_wodt_mask_s { 3428 + #ifdef __BIG_ENDIAN_BITFIELD 3429 + uint64_t wodt_d3_r1:8; 3430 + uint64_t wodt_d3_r0:8; 3431 + uint64_t wodt_d2_r1:8; 3432 + uint64_t wodt_d2_r0:8; 3433 + uint64_t wodt_d1_r1:8; 3434 + uint64_t wodt_d1_r0:8; 3435 + uint64_t wodt_d0_r1:8; 3436 + uint64_t wodt_d0_r0:8; 3437 + #else 3438 + uint64_t wodt_d0_r0:8; 3439 + uint64_t wodt_d0_r1:8; 3440 + uint64_t wodt_d1_r0:8; 3441 + uint64_t wodt_d1_r1:8; 3442 + uint64_t wodt_d2_r0:8; 3443 + uint64_t wodt_d2_r1:8; 3444 + uint64_t wodt_d3_r0:8; 3445 + uint64_t wodt_d3_r1:8; 3446 + #endif 3447 + } s; 3448 + struct cvmx_lmcx_wodt_mask_s cn61xx; 3449 + struct cvmx_lmcx_wodt_mask_s cn63xx; 3450 + struct cvmx_lmcx_wodt_mask_s cn63xxp1; 3451 + struct cvmx_lmcx_wodt_mask_s cn66xx; 3452 + struct cvmx_lmcx_wodt_mask_s cn68xx; 3453 + struct cvmx_lmcx_wodt_mask_s cn68xxp1; 3454 + struct cvmx_lmcx_wodt_mask_s cnf71xx; 3455 + }; 3456 + 3457 + #endif
+6
arch/mips/include/asm/octeon/octeon-model.h
··· 218 218 #define OCTEON_CN5XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_5XXX_FAMILY_MODELS) 219 219 #define OCTEON_CN6XXX (OCTEON_CN63XX_PASS1_0 | OM_MATCH_6XXX_FAMILY_MODELS) 220 220 221 + /* These are used to cover entire families of OCTEON processors */ 222 + #define OCTEON_FAM_1 (OCTEON_CN3XXX) 223 + #define OCTEON_FAM_PLUS (OCTEON_CN5XXX) 224 + #define OCTEON_FAM_1_PLUS (OCTEON_FAM_PLUS | OM_MATCH_PREVIOUS_MODELS) 225 + #define OCTEON_FAM_2 (OCTEON_CN6XXX) 226 + 221 227 /* The revision byte (low byte) has two different encodings. 222 228 * CN3XXX: 223 229 *
-7
arch/mips/include/asm/octeon/octeon.h
··· 209 209 } s; 210 210 }; 211 211 212 - struct octeon_cf_data { 213 - unsigned long base_region_bias; 214 - unsigned int base_region; /* The chip select region used by CF */ 215 - int is16bit; /* 0 - 8bit, !0 - 16bit */ 216 - int dma_engine; /* -1 for no DMA */ 217 - }; 218 - 219 212 extern void octeon_write_lcd(const char *s); 220 213 extern void octeon_check_cpu_bist(void); 221 214 extern int octeon_get_boot_debug_flag(void);
+4 -4
arch/mips/include/asm/page.h
··· 31 31 #define PAGE_SHIFT 16 32 32 #endif 33 33 #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) 34 - #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) 34 + #define PAGE_MASK (~(PAGE_SIZE - 1)) 35 35 36 - #ifdef CONFIG_HUGETLB_PAGE 36 + #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 37 37 #define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3) 38 38 #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) 39 39 #define HPAGE_MASK (~(HPAGE_SIZE - 1)) 40 40 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 41 - #else /* !CONFIG_HUGETLB_PAGE */ 41 + #else /* !CONFIG_MIPS_HUGE_TLB_SUPPORT */ 42 42 #define HPAGE_SHIFT ({BUILD_BUG(); 0; }) 43 43 #define HPAGE_SIZE ({BUILD_BUG(); 0; }) 44 44 #define HPAGE_MASK ({BUILD_BUG(); 0; }) 45 45 #define HUGETLB_PAGE_ORDER ({BUILD_BUG(); 0; }) 46 - #endif /* CONFIG_HUGETLB_PAGE */ 46 + #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ 47 47 48 48 #ifndef __ASSEMBLY__ 49 49
+1 -1
arch/mips/include/asm/pgtable-64.h
··· 175 175 176 176 static inline int pmd_bad(pmd_t pmd) 177 177 { 178 - #ifdef CONFIG_HUGETLB_PAGE 178 + #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 179 179 /* pmd_huge(pmd) but inline */ 180 180 if (unlikely(pmd_val(pmd) & _PAGE_HUGE)) 181 181 return 0;
+78 -49
arch/mips/include/asm/pgtable-bits.h
··· 34 34 */ 35 35 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) 36 36 37 - #define _PAGE_PRESENT (1<<6) /* implemented in software */ 38 - #define _PAGE_READ (1<<7) /* implemented in software */ 39 - #define _PAGE_WRITE (1<<8) /* implemented in software */ 40 - #define _PAGE_ACCESSED (1<<9) /* implemented in software */ 41 - #define _PAGE_MODIFIED (1<<10) /* implemented in software */ 42 - #define _PAGE_FILE (1<<10) /* set:pagecache unset:swap */ 37 + /* 38 + * The following bits are directly used by the TLB hardware 39 + */ 40 + #define _PAGE_R4KBUG (1 << 0) /* workaround for r4k bug */ 41 + #define _PAGE_GLOBAL (1 << 0) 42 + #define _PAGE_VALID_SHIFT 1 43 + #define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) 44 + #define _PAGE_SILENT_READ (1 << 1) /* synonym */ 45 + #define _PAGE_DIRTY_SHIFT 2 46 + #define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) /* The MIPS dirty bit */ 47 + #define _PAGE_SILENT_WRITE (1 << 2) 48 + #define _CACHE_SHIFT 3 49 + #define _CACHE_MASK (7 << 3) 43 50 44 - #define _PAGE_R4KBUG (1<<0) /* workaround for r4k bug */ 45 - #define _PAGE_GLOBAL (1<<0) 46 - #define _PAGE_VALID (1<<1) 47 - #define _PAGE_SILENT_READ (1<<1) /* synonym */ 48 - #define _PAGE_DIRTY (1<<2) /* The MIPS dirty bit */ 49 - #define _PAGE_SILENT_WRITE (1<<2) 50 - #define _CACHE_SHIFT 3 51 - #define _CACHE_MASK (7<<3) 51 + /* 52 + * The following bits are implemented in software 53 + * 54 + * _PAGE_FILE semantics: set:pagecache unset:swap 55 + */ 56 + #define _PAGE_PRESENT_SHIFT 6 57 + #define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) 58 + #define _PAGE_READ_SHIFT 7 59 + #define _PAGE_READ (1 << _PAGE_READ_SHIFT) 60 + #define _PAGE_WRITE_SHIFT 8 61 + #define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) 62 + #define _PAGE_ACCESSED_SHIFT 9 63 + #define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT) 64 + #define _PAGE_MODIFIED_SHIFT 10 65 + #define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) 66 + 67 + #define _PAGE_FILE (1 << 10) 52 68 53 69 #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 54 70 55 - #define _PAGE_PRESENT (1<<0) /* implemented in software */ 56 - #define _PAGE_READ (1<<1) /* implemented in software */ 57 - #define _PAGE_WRITE (1<<2) /* implemented in software */ 58 - #define _PAGE_ACCESSED (1<<3) /* implemented in software */ 59 - #define _PAGE_MODIFIED (1<<4) /* implemented in software */ 60 - #define _PAGE_FILE (1<<4) /* set:pagecache unset:swap */ 71 + /* 72 + * The following are implemented by software 73 + * 74 + * _PAGE_FILE semantics: set:pagecache unset:swap 75 + */ 76 + #define _PAGE_PRESENT_SHIFT 0 77 + #define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) 78 + #define _PAGE_READ_SHIFT 1 79 + #define _PAGE_READ (1 << _PAGE_READ_SHIFT) 80 + #define _PAGE_WRITE_SHIFT 2 81 + #define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) 82 + #define _PAGE_ACCESSED_SHIFT 3 83 + #define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT) 84 + #define _PAGE_MODIFIED_SHIFT 4 85 + #define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) 86 + #define _PAGE_FILE_SHIFT 4 87 + #define _PAGE_FILE (1 << _PAGE_FILE_SHIFT) 61 88 62 - #define _PAGE_GLOBAL (1<<8) 63 - #define _PAGE_VALID (1<<9) 64 - #define _PAGE_SILENT_READ (1<<9) /* synonym */ 65 - #define _PAGE_DIRTY (1<<10) /* The MIPS dirty bit */ 66 - #define _PAGE_SILENT_WRITE (1<<10) 67 - #define _CACHE_UNCACHED (1<<11) 68 - #define _CACHE_MASK (1<<11) 89 + /* 90 + * And these are the hardware TLB bits 91 + */ 92 + #define _PAGE_GLOBAL_SHIFT 8 93 + #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) 94 + #define _PAGE_VALID_SHIFT 9 95 + #define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) 96 + #define _PAGE_SILENT_READ (1 << _PAGE_VALID_SHIFT) /* synonym */ 97 + #define _PAGE_DIRTY_SHIFT 10 98 + #define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) 99 + #define _PAGE_SILENT_WRITE (1 << _PAGE_DIRTY_SHIFT) 100 + #define _CACHE_UNCACHED_SHIFT 11 101 + #define _CACHE_UNCACHED (1 << _CACHE_UNCACHED_SHIFT) 102 + #define _CACHE_MASK (1 << _CACHE_UNCACHED_SHIFT) 69 103 70 104 #else /* 'Normal' r4K case */ 71 105 /* ··· 110 76 * which is more than we need right now. 111 77 */ 112 78 113 - /* implemented in software */ 79 + /* 80 + * The following bits are implemented in software 81 + * 82 + * _PAGE_READ / _PAGE_READ_SHIFT should be unused if cpu_has_rixi. 83 + * _PAGE_FILE semantics: set:pagecache unset:swap 84 + */ 114 85 #define _PAGE_PRESENT_SHIFT (0) 115 86 #define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) 116 - /* implemented in software, should be unused if cpu_has_rixi. */ 117 87 #define _PAGE_READ_SHIFT (cpu_has_rixi ? _PAGE_PRESENT_SHIFT : _PAGE_PRESENT_SHIFT + 1) 118 88 #define _PAGE_READ ({BUG_ON(cpu_has_rixi); 1 << _PAGE_READ_SHIFT; }) 119 - /* implemented in software */ 120 89 #define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1) 121 90 #define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) 122 - /* implemented in software */ 123 91 #define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1) 124 92 #define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT) 125 - /* implemented in software */ 126 93 #define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1) 127 94 #define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) 128 - /* set:pagecache unset:swap */ 129 95 #define _PAGE_FILE (_PAGE_MODIFIED) 130 96 131 - #ifdef CONFIG_HUGETLB_PAGE 97 + #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 132 98 /* huge tlb page */ 133 99 #define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT + 1) 134 100 #define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT) ··· 137 103 #define _PAGE_HUGE ({BUG(); 1; }) /* Dummy value */ 138 104 #endif 139 105 106 + #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 107 + /* huge tlb page */ 108 + #define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT + 1) 109 + #define _PAGE_SPLITTING (1 << _PAGE_SPLITTING_SHIFT) 110 + #else 111 + #define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT) 112 + #define _PAGE_SPLITTING ({BUG(); 1; }) /* Dummy value */ 113 + #endif 114 + 140 115 /* Page cannot be executed */ 141 - #define _PAGE_NO_EXEC_SHIFT (cpu_has_rixi ? _PAGE_HUGE_SHIFT + 1 : _PAGE_HUGE_SHIFT) 116 + #define _PAGE_NO_EXEC_SHIFT (cpu_has_rixi ? _PAGE_SPLITTING_SHIFT + 1 : _PAGE_SPLITTING_SHIFT) 142 117 #define _PAGE_NO_EXEC ({BUG_ON(!cpu_has_rixi); 1 << _PAGE_NO_EXEC_SHIFT; }) 143 118 144 119 /* Page cannot be read */ ··· 234 191 #define _CACHE_CACHABLE_COW (5<<_CACHE_SHIFT) 235 192 #define _CACHE_CACHABLE_NONCOHERENT (5<<_CACHE_SHIFT) 236 193 #define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT) 237 - 238 - #elif defined(CONFIG_CPU_RM9000) 239 - 240 - #define _CACHE_WT (0<<_CACHE_SHIFT) 241 - #define _CACHE_WTWA (1<<_CACHE_SHIFT) 242 - #define _CACHE_UC_B (2<<_CACHE_SHIFT) 243 - #define _CACHE_WB (3<<_CACHE_SHIFT) 244 - #define _CACHE_CWBEA (4<<_CACHE_SHIFT) 245 - #define _CACHE_CWB (5<<_CACHE_SHIFT) 246 - #define _CACHE_UCNB (6<<_CACHE_SHIFT) 247 - #define _CACHE_FPC (7<<_CACHE_SHIFT) 248 - 249 - #define _CACHE_UNCACHED _CACHE_UC_B 250 - #define _CACHE_CACHABLE_NONCOHERENT _CACHE_WB 251 194 252 195 #else 253 196
+166 -2
arch/mips/include/asm/pgtable.h
··· 8 8 #ifndef _ASM_PGTABLE_H 9 9 #define _ASM_PGTABLE_H 10 10 11 + #include <linux/mmzone.h> 11 12 #ifdef CONFIG_32BIT 12 13 #include <asm/pgtable-32.h> 13 14 #endif ··· 86 85 * and a page entry and page directory to the page they refer to. 87 86 */ 88 87 #define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd)) 89 - #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) 88 + 89 + #define __pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) 90 + #ifndef CONFIG_TRANSPARENT_HUGEPAGE 91 + #define pmd_page(pmd) __pmd_page(pmd) 92 + #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 93 + 90 94 #define pmd_page_vaddr(pmd) pmd_val(pmd) 91 95 92 96 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) ··· 104 98 ptep->pte_high = pte.pte_high; 105 99 smp_wmb(); 106 100 ptep->pte_low = pte.pte_low; 107 - //printk("pte_high %x pte_low %x\n", ptep->pte_high, ptep->pte_low); 108 101 109 102 if (pte.pte_low & _PAGE_GLOBAL) { 110 103 pte_t *buddy = ptep_buddy(ptep); ··· 371 366 __update_cache(vma, address, pte); 372 367 } 373 368 369 + static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, 370 + unsigned long address, pmd_t *pmdp) 371 + { 372 + pte_t pte = *(pte_t *)pmdp; 373 + 374 + __update_tlb(vma, address, pte); 375 + } 376 + 374 377 #define kern_addr_valid(addr) (1) 375 378 376 379 #ifdef CONFIG_64BIT_PHYS_ADDR ··· 397 384 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 398 385 remap_pfn_range(vma, vaddr, pfn, size, prot) 399 386 #endif 387 + 388 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE 389 + 390 + extern int has_transparent_hugepage(void); 391 + 392 + static inline int pmd_trans_huge(pmd_t pmd) 393 + { 394 + return !!(pmd_val(pmd) & _PAGE_HUGE); 395 + } 396 + 397 + static inline pmd_t pmd_mkhuge(pmd_t pmd) 398 + { 399 + pmd_val(pmd) |= _PAGE_HUGE; 400 + 401 + return pmd; 402 + } 403 + 404 + static inline int pmd_trans_splitting(pmd_t pmd) 405 + { 406 + return !!(pmd_val(pmd) & _PAGE_SPLITTING); 407 + } 408 + 409 + static inline pmd_t pmd_mksplitting(pmd_t pmd) 410 + { 411 + pmd_val(pmd) |= _PAGE_SPLITTING; 412 + 413 + return pmd; 414 + } 415 + 416 + extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, 417 + pmd_t *pmdp, pmd_t pmd); 418 + 419 + #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH 420 + /* Extern to avoid header file madness */ 421 + extern void pmdp_splitting_flush(struct vm_area_struct *vma, 422 + unsigned long address, 423 + pmd_t *pmdp); 424 + 425 + #define __HAVE_ARCH_PMD_WRITE 426 + static inline int pmd_write(pmd_t pmd) 427 + { 428 + return !!(pmd_val(pmd) & _PAGE_WRITE); 429 + } 430 + 431 + static inline pmd_t pmd_wrprotect(pmd_t pmd) 432 + { 433 + pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); 434 + return pmd; 435 + } 436 + 437 + static inline pmd_t pmd_mkwrite(pmd_t pmd) 438 + { 439 + pmd_val(pmd) |= _PAGE_WRITE; 440 + if (pmd_val(pmd) & _PAGE_MODIFIED) 441 + pmd_val(pmd) |= _PAGE_SILENT_WRITE; 442 + 443 + return pmd; 444 + } 445 + 446 + static inline int pmd_dirty(pmd_t pmd) 447 + { 448 + return !!(pmd_val(pmd) & _PAGE_MODIFIED); 449 + } 450 + 451 + static inline pmd_t pmd_mkclean(pmd_t pmd) 452 + { 453 + pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); 454 + return pmd; 455 + } 456 + 457 + static inline pmd_t pmd_mkdirty(pmd_t pmd) 458 + { 459 + pmd_val(pmd) |= _PAGE_MODIFIED; 460 + if (pmd_val(pmd) & _PAGE_WRITE) 461 + pmd_val(pmd) |= _PAGE_SILENT_WRITE; 462 + 463 + return pmd; 464 + } 465 + 466 + static inline int pmd_young(pmd_t pmd) 467 + { 468 + return !!(pmd_val(pmd) & _PAGE_ACCESSED); 469 + } 470 + 471 + static inline pmd_t pmd_mkold(pmd_t pmd) 472 + { 473 + pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ); 474 + 475 + return pmd; 476 + } 477 + 478 + static inline pmd_t pmd_mkyoung(pmd_t pmd) 479 + { 480 + pmd_val(pmd) |= _PAGE_ACCESSED; 481 + 482 + if (cpu_has_rixi) { 483 + if (!(pmd_val(pmd) & _PAGE_NO_READ)) 484 + pmd_val(pmd) |= _PAGE_SILENT_READ; 485 + } else { 486 + if (pmd_val(pmd) & _PAGE_READ) 487 + pmd_val(pmd) |= _PAGE_SILENT_READ; 488 + } 489 + 490 + return pmd; 491 + } 492 + 493 + /* Extern to avoid header file madness */ 494 + extern pmd_t mk_pmd(struct page *page, pgprot_t prot); 495 + 496 + static inline unsigned long pmd_pfn(pmd_t pmd) 497 + { 498 + return pmd_val(pmd) >> _PFN_SHIFT; 499 + } 500 + 501 + static inline struct page *pmd_page(pmd_t pmd) 502 + { 503 + if (pmd_trans_huge(pmd)) 504 + return pfn_to_page(pmd_pfn(pmd)); 505 + 506 + return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT); 507 + } 508 + 509 + static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 510 + { 511 + pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot); 512 + return pmd; 513 + } 514 + 515 + static inline pmd_t pmd_mknotpresent(pmd_t pmd) 516 + { 517 + pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY); 518 + 519 + return pmd; 520 + } 521 + 522 + /* 523 + * The generic version pmdp_get_and_clear uses a version of pmd_clear() with a 524 + * different prototype. 525 + */ 526 + #define __HAVE_ARCH_PMDP_GET_AND_CLEAR 527 + static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, 528 + unsigned long address, pmd_t *pmdp) 529 + { 530 + pmd_t old = *pmdp; 531 + 532 + pmd_clear(pmdp); 533 + 534 + return old; 535 + } 536 + 537 + #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 400 538 401 539 #include <asm-generic/pgtable.h> 402 540
-1
arch/mips/include/asm/pmc-sierra/msp71xx/war.h
··· 17 17 #define MIPS4K_ICACHE_REFILL_WAR 0 18 18 #define MIPS_CACHE_SYNC_WAR 0 19 19 #define TX49XX_ICACHE_INDEX_INV_WAR 0 20 - #define RM9000_CDEX_SMP_WAR 0 21 20 #define ICACHE_REFILLS_WORKAROUND_WAR 0 22 21 #define R10000_LLSC_WAR 0 23 22 #if defined(CONFIG_PMC_MSP7120_EVAL) || defined(CONFIG_PMC_MSP7120_GW) || \
-4
arch/mips/include/asm/processor.h
··· 226 226 unsigned long cp0_badvaddr; /* Last user fault */ 227 227 unsigned long cp0_baduaddr; /* Last kernel fault accessing USEG */ 228 228 unsigned long error_code; 229 - unsigned long irix_trampoline; /* Wheee... */ 230 - unsigned long irix_oldctx; 231 229 #ifdef CONFIG_CPU_CAVIUM_OCTEON 232 230 struct octeon_cop2_state cp2 __attribute__ ((__aligned__(128))); 233 231 struct octeon_cvmseg_state cvmseg __attribute__ ((__aligned__(128))); ··· 295 297 .cp0_badvaddr = 0, \ 296 298 .cp0_baduaddr = 0, \ 297 299 .error_code = 0, \ 298 - .irix_trampoline = 0, \ 299 - .irix_oldctx = 0, \ 300 300 /* \ 301 301 * Cavium Octeon specifics (null if not Octeon) \ 302 302 */ \
+4 -4
arch/mips/include/asm/sgiarcs.h
··· 366 366 * Macros for calling a 32-bit ARC implementation from 64-bit code 367 367 */ 368 368 369 - #if defined(CONFIG_64BIT) && defined(CONFIG_ARC32) 369 + #if defined(CONFIG_64BIT) && defined(CONFIG_FW_ARC32) 370 370 371 371 #define __arc_clobbers \ 372 372 "$2", "$3" /* ... */, "$8", "$9", "$10", "$11", \ ··· 475 475 __res; \ 476 476 }) 477 477 478 - #endif /* defined(CONFIG_64BIT) && defined(CONFIG_ARC32) */ 478 + #endif /* defined(CONFIG_64BIT) && defined(CONFIG_FW_ARC32) */ 479 479 480 - #if (defined(CONFIG_32BIT) && defined(CONFIG_ARC32)) || \ 481 - (defined(CONFIG_64BIT) && defined(CONFIG_ARC64)) 480 + #if (defined(CONFIG_32BIT) && defined(CONFIG_FW_ARC32)) || \ 481 + (defined(CONFIG_64BIT) && defined(CONFIG_FW_ARC64)) 482 482 483 483 #define ARC_CALL0(dest) \ 484 484 ({ long __res; \
+6
arch/mips/include/asm/smp.h
··· 40 40 #define SMP_CALL_FUNCTION 0x2 41 41 /* Octeon - Tell another core to flush its icache */ 42 42 #define SMP_ICACHE_FLUSH 0x4 43 + /* Used by kexec crashdump to save all cpu's state */ 44 + #define SMP_DUMP 0x8 43 45 44 46 extern volatile cpumask_t cpu_callin_map; 45 47 ··· 93 91 mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION); 94 92 } 95 93 94 + #if defined(CONFIG_KEXEC) 95 + extern void (*dump_ipi_function_ptr)(void *); 96 + void dump_send_ipi(void (*dump_ipi_callback)(void *)); 97 + #endif 96 98 #endif /* __ASM_SMP_H */
-19
arch/mips/include/asm/smvp.h
··· 1 - #ifndef _ASM_SMVP_H 2 - #define _ASM_SMVP_H 3 - 4 - /* 5 - * Definitions for SMVP multitasking on MIPS MT cores 6 - */ 7 - struct task_struct; 8 - 9 - extern void smvp_smp_setup(void); 10 - extern void smvp_smp_finish(void); 11 - extern void smvp_boot_secondary(int cpu, struct task_struct *t); 12 - extern void smvp_init_secondary(void); 13 - extern void smvp_smp_finish(void); 14 - extern void smvp_cpus_done(void); 15 - extern void smvp_prepare_cpus(unsigned int max_cpus); 16 - 17 - /* This is platform specific */ 18 - extern void smvp_send_ipi(int cpu, unsigned int action); 19 - #endif /* _ASM_SMVP_H */
+1 -1
arch/mips/include/asm/sparsemem.h
··· 6 6 * SECTION_SIZE_BITS 2^N: how big each section will be 7 7 * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space 8 8 */ 9 - #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PAGE_SIZE_64KB) 9 + #if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && defined(CONFIG_PAGE_SIZE_64KB) 10 10 # define SECTION_SIZE_BITS 29 11 11 #else 12 12 # define SECTION_SIZE_BITS 28
+1 -3
arch/mips/include/asm/time.h
··· 50 50 /* 51 51 * Initialize the calling CPU's compare interrupt as clockevent device 52 52 */ 53 - #ifdef CONFIG_CEVT_R4K_LIB 54 53 extern unsigned int __weak get_c0_compare_int(void); 55 54 extern int r4k_clockevent_init(void); 56 - #endif 57 55 58 56 static inline int mips_clockevent_init(void) 59 57 { ··· 69 71 /* 70 72 * Initialize the count register as a clocksource 71 73 */ 72 - #ifdef CONFIG_CSRC_R4K_LIB 74 + #ifdef CONFIG_CSRC_R4K 73 75 extern int init_r4k_clocksource(void); 74 76 #endif 75 77
-231
arch/mips/include/asm/titan_dep.h
··· 1 - /* 2 - * Copyright 2003 PMC-Sierra 3 - * Author: Manish Lachwani (lachwani@pmc-sierra.com) 4 - * 5 - * Board specific definititions for the PMC-Sierra Yosemite 6 - * 7 - * This program is free software; you can redistribute it and/or modify it 8 - * under the terms of the GNU General Public License as published by the 9 - * Free Software Foundation; either version 2 of the License, or (at your 10 - * option) any later version. 11 - */ 12 - 13 - #ifndef __TITAN_DEP_H__ 14 - #define __TITAN_DEP_H__ 15 - 16 - #include <asm/addrspace.h> /* for KSEG1ADDR() */ 17 - #include <asm/byteorder.h> /* for cpu_to_le32() */ 18 - 19 - #define TITAN_READ(ofs) \ 20 - (*(volatile u32 *)(ocd_base+(ofs))) 21 - #define TITAN_READ_16(ofs) \ 22 - (*(volatile u16 *)(ocd_base+(ofs))) 23 - #define TITAN_READ_8(ofs) \ 24 - (*(volatile u8 *)(ocd_base+(ofs))) 25 - 26 - #define TITAN_WRITE(ofs, data) \ 27 - do { *(volatile u32 *)(ocd_base+(ofs)) = (data); } while (0) 28 - #define TITAN_WRITE_16(ofs, data) \ 29 - do { *(volatile u16 *)(ocd_base+(ofs)) = (data); } while (0) 30 - #define TITAN_WRITE_8(ofs, data) \ 31 - do { *(volatile u8 *)(ocd_base+(ofs)) = (data); } while (0) 32 - 33 - /* 34 - * PCI specific defines 35 - */ 36 - #define TITAN_PCI_0_CONFIG_ADDRESS 0x780 37 - #define TITAN_PCI_0_CONFIG_DATA 0x784 38 - 39 - /* 40 - * HT specific defines 41 - */ 42 - #define RM9000x2_HTLINK_REG 0xbb000644 43 - #define RM9000x2_BASE_ADDR 0xbb000000 44 - 45 - #define OCD_BASE 0xfb000000UL 46 - #define OCD_SIZE 0x3000UL 47 - 48 - extern unsigned long ocd_base; 49 - 50 - /* 51 - * OCD Registers 52 - */ 53 - #define RM9000x2_OCD_LKB5 0x0128 /* Ethernet */ 54 - #define RM9000x2_OCD_LKM5 0x012c 55 - 56 - #define RM9000x2_OCD_LKB7 0x0138 /* HT Region 0 */ 57 - #define RM9000x2_OCD_LKM7 0x013c 58 - #define RM9000x2_OCD_LKB8 0x0140 /* HT Region 1 */ 59 - #define RM9000x2_OCD_LKM8 0x0144 60 - 61 - #define RM9000x2_OCD_LKB9 0x0148 /* Local Bus */ 62 - #define RM9000x2_OCD_LKM9 0x014c 63 - #define RM9000x2_OCD_LKB10 0x0150 64 - #define RM9000x2_OCD_LKM10 0x0154 65 - #define RM9000x2_OCD_LKB11 0x0158 66 - #define RM9000x2_OCD_LKM11 0x015c 67 - #define RM9000x2_OCD_LKB12 0x0160 68 - #define RM9000x2_OCD_LKM12 0x0164 69 - 70 - #define RM9000x2_OCD_LKB13 0x0168 /* Scratch RAM */ 71 - #define RM9000x2_OCD_LKM13 0x016c 72 - 73 - #define RM9000x2_OCD_LPD0 0x0200 /* Local Bus */ 74 - #define RM9000x2_OCD_LPD1 0x0210 75 - #define RM9000x2_OCD_LPD2 0x0220 76 - #define RM9000x2_OCD_LPD3 0x0230 77 - 78 - #define RM9000x2_OCD_HTDVID 0x0600 /* HT Device Header */ 79 - #define RM9000x2_OCD_HTSC 0x0604 80 - #define RM9000x2_OCD_HTCCR 0x0608 81 - #define RM9000x2_OCD_HTBHL 0x060c 82 - #define RM9000x2_OCD_HTBAR0 0x0610 83 - #define RM9000x2_OCD_HTBAR1 0x0614 84 - #define RM9000x2_OCD_HTBAR2 0x0618 85 - #define RM9000x2_OCD_HTBAR3 0x061c 86 - #define RM9000x2_OCD_HTBAR4 0x0620 87 - #define RM9000x2_OCD_HTBAR5 0x0624 88 - #define RM9000x2_OCD_HTCBCPT 0x0628 89 - #define RM9000x2_OCD_HTSDVID 0x062c 90 - #define RM9000x2_OCD_HTXRA 0x0630 91 - #define RM9000x2_OCD_HTCAP1 0x0634 92 - #define RM9000x2_OCD_HTIL 0x063c 93 - 94 - #define RM9000x2_OCD_HTLCC 0x0640 /* HT Capability Block */ 95 - #define RM9000x2_OCD_HTLINK 0x0644 96 - #define RM9000x2_OCD_HTFQREV 0x0648 97 - 98 - #define RM9000x2_OCD_HTERCTL 0x0668 /* HT Controller */ 99 - #define RM9000x2_OCD_HTRXDB 0x066c 100 - #define RM9000x2_OCD_HTIMPED 0x0670 101 - #define RM9000x2_OCD_HTSWIMP 0x0674 102 - #define RM9000x2_OCD_HTCAL 0x0678 103 - 104 - #define RM9000x2_OCD_HTBAA30 0x0680 105 - #define RM9000x2_OCD_HTBAA54 0x0684 106 - #define RM9000x2_OCD_HTMASK0 0x0688 107 - #define RM9000x2_OCD_HTMASK1 0x068c 108 - #define RM9000x2_OCD_HTMASK2 0x0690 109 - #define RM9000x2_OCD_HTMASK3 0x0694 110 - #define RM9000x2_OCD_HTMASK4 0x0698 111 - #define RM9000x2_OCD_HTMASK5 0x069c 112 - 113 - #define RM9000x2_OCD_HTIFCTL 0x06a0 114 - #define RM9000x2_OCD_HTPLL 0x06a4 115 - 116 - #define RM9000x2_OCD_HTSRI 0x06b0 117 - #define RM9000x2_OCD_HTRXNUM 0x06b4 118 - #define RM9000x2_OCD_HTTXNUM 0x06b8 119 - 120 - #define RM9000x2_OCD_HTTXCNT 0x06c8 121 - 122 - #define RM9000x2_OCD_HTERROR 0x06d8 123 - #define RM9000x2_OCD_HTRCRCE 0x06dc 124 - #define RM9000x2_OCD_HTEOI 0x06e0 125 - 126 - #define RM9000x2_OCD_CRCR 0x06f0 127 - 128 - #define RM9000x2_OCD_HTCFGA 0x06f8 129 - #define RM9000x2_OCD_HTCFGD 0x06fc 130 - 131 - #define RM9000x2_OCD_INTMSG 0x0a00 132 - 133 - #define RM9000x2_OCD_INTPIN0 0x0a40 134 - #define RM9000x2_OCD_INTPIN1 0x0a44 135 - #define RM9000x2_OCD_INTPIN2 0x0a48 136 - #define RM9000x2_OCD_INTPIN3 0x0a4c 137 - #define RM9000x2_OCD_INTPIN4 0x0a50 138 - #define RM9000x2_OCD_INTPIN5 0x0a54 139 - #define RM9000x2_OCD_INTPIN6 0x0a58 140 - #define RM9000x2_OCD_INTPIN7 0x0a5c 141 - #define RM9000x2_OCD_SEM 0x0a60 142 - #define RM9000x2_OCD_SEMSET 0x0a64 143 - #define RM9000x2_OCD_SEMCLR 0x0a68 144 - 145 - #define RM9000x2_OCD_TKT 0x0a70 146 - #define RM9000x2_OCD_TKTINC 0x0a74 147 - 148 - #define RM9000x2_OCD_NMICONFIG 0x0ac0 /* Interrupts */ 149 - #define RM9000x2_OCD_INTP0PRI 0x1a80 150 - #define RM9000x2_OCD_INTP1PRI 0x1a80 151 - #define RM9000x2_OCD_INTP0STATUS0 0x1b00 152 - #define RM9000x2_OCD_INTP0MASK0 0x1b04 153 - #define RM9000x2_OCD_INTP0SET0 0x1b08 154 - #define RM9000x2_OCD_INTP0CLEAR0 0x1b0c 155 - #define RM9000x2_OCD_INTP0STATUS1 0x1b10 156 - #define RM9000x2_OCD_INTP0MASK1 0x1b14 157 - #define RM9000x2_OCD_INTP0SET1 0x1b18 158 - #define RM9000x2_OCD_INTP0CLEAR1 0x1b1c 159 - #define RM9000x2_OCD_INTP0STATUS2 0x1b20 160 - #define RM9000x2_OCD_INTP0MASK2 0x1b24 161 - #define RM9000x2_OCD_INTP0SET2 0x1b28 162 - #define RM9000x2_OCD_INTP0CLEAR2 0x1b2c 163 - #define RM9000x2_OCD_INTP0STATUS3 0x1b30 164 - #define RM9000x2_OCD_INTP0MASK3 0x1b34 165 - #define RM9000x2_OCD_INTP0SET3 0x1b38 166 - #define RM9000x2_OCD_INTP0CLEAR3 0x1b3c 167 - #define RM9000x2_OCD_INTP0STATUS4 0x1b40 168 - #define RM9000x2_OCD_INTP0MASK4 0x1b44 169 - #define RM9000x2_OCD_INTP0SET4 0x1b48 170 - #define RM9000x2_OCD_INTP0CLEAR4 0x1b4c 171 - #define RM9000x2_OCD_INTP0STATUS5 0x1b50 172 - #define RM9000x2_OCD_INTP0MASK5 0x1b54 173 - #define RM9000x2_OCD_INTP0SET5 0x1b58 174 - #define RM9000x2_OCD_INTP0CLEAR5 0x1b5c 175 - #define RM9000x2_OCD_INTP0STATUS6 0x1b60 176 - #define RM9000x2_OCD_INTP0MASK6 0x1b64 177 - #define RM9000x2_OCD_INTP0SET6 0x1b68 178 - #define RM9000x2_OCD_INTP0CLEAR6 0x1b6c 179 - #define RM9000x2_OCD_INTP0STATUS7 0x1b70 180 - #define RM9000x2_OCD_INTP0MASK7 0x1b74 181 - #define RM9000x2_OCD_INTP0SET7 0x1b78 182 - #define RM9000x2_OCD_INTP0CLEAR7 0x1b7c 183 - #define RM9000x2_OCD_INTP1STATUS0 0x2b00 184 - #define RM9000x2_OCD_INTP1MASK0 0x2b04 185 - #define RM9000x2_OCD_INTP1SET0 0x2b08 186 - #define RM9000x2_OCD_INTP1CLEAR0 0x2b0c 187 - #define RM9000x2_OCD_INTP1STATUS1 0x2b10 188 - #define RM9000x2_OCD_INTP1MASK1 0x2b14 189 - #define RM9000x2_OCD_INTP1SET1 0x2b18 190 - #define RM9000x2_OCD_INTP1CLEAR1 0x2b1c 191 - #define RM9000x2_OCD_INTP1STATUS2 0x2b20 192 - #define RM9000x2_OCD_INTP1MASK2 0x2b24 193 - #define RM9000x2_OCD_INTP1SET2 0x2b28 194 - #define RM9000x2_OCD_INTP1CLEAR2 0x2b2c 195 - #define RM9000x2_OCD_INTP1STATUS3 0x2b30 196 - #define RM9000x2_OCD_INTP1MASK3 0x2b34 197 - #define RM9000x2_OCD_INTP1SET3 0x2b38 198 - #define RM9000x2_OCD_INTP1CLEAR3 0x2b3c 199 - #define RM9000x2_OCD_INTP1STATUS4 0x2b40 200 - #define RM9000x2_OCD_INTP1MASK4 0x2b44 201 - #define RM9000x2_OCD_INTP1SET4 0x2b48 202 - #define RM9000x2_OCD_INTP1CLEAR4 0x2b4c 203 - #define RM9000x2_OCD_INTP1STATUS5 0x2b50 204 - #define RM9000x2_OCD_INTP1MASK5 0x2b54 205 - #define RM9000x2_OCD_INTP1SET5 0x2b58 206 - #define RM9000x2_OCD_INTP1CLEAR5 0x2b5c 207 - #define RM9000x2_OCD_INTP1STATUS6 0x2b60 208 - #define RM9000x2_OCD_INTP1MASK6 0x2b64 209 - #define RM9000x2_OCD_INTP1SET6 0x2b68 210 - #define RM9000x2_OCD_INTP1CLEAR6 0x2b6c 211 - #define RM9000x2_OCD_INTP1STATUS7 0x2b70 212 - #define RM9000x2_OCD_INTP1MASK7 0x2b74 213 - #define RM9000x2_OCD_INTP1SET7 0x2b78 214 - #define RM9000x2_OCD_INTP1CLEAR7 0x2b7c 215 - 216 - #define OCD_READ(reg) (*(volatile unsigned int *)(ocd_base + (reg))) 217 - #define OCD_WRITE(reg, val) \ 218 - do { *(volatile unsigned int *)(ocd_base + (reg)) = (val); } while (0) 219 - 220 - /* 221 - * Hypertransport specific macros 222 - */ 223 - #define RM9K_WRITE(ofs, data) *(volatile u_int32_t *)(RM9000x2_BASE_ADDR+ofs) = data 224 - #define RM9K_WRITE_8(ofs, data) *(volatile u8 *)(RM9000x2_BASE_ADDR+ofs) = data 225 - #define RM9K_WRITE_16(ofs, data) *(volatile u16 *)(RM9000x2_BASE_ADDR+ofs) = data 226 - 227 - #define RM9K_READ(ofs, val) *(val) = *(volatile u_int32_t *)(RM9000x2_BASE_ADDR+ofs) 228 - #define RM9K_READ_8(ofs, val) *(val) = *(volatile u8 *)(RM9000x2_BASE_ADDR+ofs) 229 - #define RM9K_READ_16(ofs, val) *(val) = *(volatile u16 *)(RM9000x2_BASE_ADDR+ofs) 230 - 231 - #endif
-8
arch/mips/include/asm/war.h
··· 209 209 #endif 210 210 211 211 /* 212 - * On the RM9000 there is a problem which makes the CreateDirtyExclusive 213 - * eache operation unusable on SMP systems. 214 - */ 215 - #ifndef RM9000_CDEX_SMP_WAR 216 - #error Check setting of RM9000_CDEX_SMP_WAR for your platform 217 - #endif 218 - 219 - /* 220 212 * The RM7000 processors and the E9000 cores have a bug (though PMC-Sierra 221 213 * opposes it being called that) where invalid instructions in the same 222 214 * I-cache line worth of instructions being fetched may case spurious
+4 -4
arch/mips/kernel/Makefile
··· 16 16 endif 17 17 18 18 obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o 19 - obj-$(CONFIG_CEVT_R4K_LIB) += cevt-r4k.o 19 + obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o 20 20 obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o 21 21 obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o 22 22 obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o ··· 25 25 obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o 26 26 obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o 27 27 obj-$(CONFIG_CSRC_POWERTV) += csrc-powertv.o 28 - obj-$(CONFIG_CSRC_R4K_LIB) += csrc-r4k.o 28 + obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o 29 29 obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o 30 30 obj-$(CONFIG_SYNC_R4K) += sync-r4k.o 31 31 ··· 58 58 obj-$(CONFIG_I8259) += i8259.o 59 59 obj-$(CONFIG_IRQ_CPU) += irq_cpu.o 60 60 obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o 61 - obj-$(CONFIG_IRQ_CPU_RM9K) += irq-rm9000.o 62 61 obj-$(CONFIG_MIPS_MSC) += irq-msc01.o 63 62 obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o 64 63 obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o ··· 79 80 80 81 obj-$(CONFIG_GPIO_TXX9) += gpio_txx9.o 81 82 82 - obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o 83 + obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o 84 + obj-$(CONFIG_CRASH_DUMP) += crash_dump.o 83 85 obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 84 86 obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o 85 87 obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o
-4
arch/mips/kernel/asm-offsets.c
··· 125 125 thread.cp0_baduaddr); 126 126 OFFSET(THREAD_ECODE, task_struct, \ 127 127 thread.error_code); 128 - OFFSET(THREAD_TRAMP, task_struct, \ 129 - thread.irix_trampoline); 130 - OFFSET(THREAD_OLDCTX, task_struct, \ 131 - thread.irix_oldctx); 132 128 BLANK(); 133 129 } 134 130
+71
arch/mips/kernel/crash.c
··· 1 + #include <linux/kernel.h> 2 + #include <linux/smp.h> 3 + #include <linux/reboot.h> 4 + #include <linux/kexec.h> 5 + #include <linux/bootmem.h> 6 + #include <linux/crash_dump.h> 7 + #include <linux/delay.h> 8 + #include <linux/init.h> 9 + #include <linux/irq.h> 10 + #include <linux/types.h> 11 + #include <linux/sched.h> 12 + 13 + /* This keeps a track of which one is crashing cpu. */ 14 + static int crashing_cpu = -1; 15 + static cpumask_t cpus_in_crash = CPU_MASK_NONE; 16 + 17 + #ifdef CONFIG_SMP 18 + static void crash_shutdown_secondary(void *ignore) 19 + { 20 + struct pt_regs *regs; 21 + int cpu = smp_processor_id(); 22 + 23 + regs = task_pt_regs(current); 24 + 25 + if (!cpu_online(cpu)) 26 + return; 27 + 28 + local_irq_disable(); 29 + if (!cpu_isset(cpu, cpus_in_crash)) 30 + crash_save_cpu(regs, cpu); 31 + cpu_set(cpu, cpus_in_crash); 32 + 33 + while (!atomic_read(&kexec_ready_to_reboot)) 34 + cpu_relax(); 35 + relocated_kexec_smp_wait(NULL); 36 + /* NOTREACHED */ 37 + } 38 + 39 + static void crash_kexec_prepare_cpus(void) 40 + { 41 + unsigned int msecs; 42 + 43 + unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ 44 + 45 + dump_send_ipi(crash_shutdown_secondary); 46 + smp_wmb(); 47 + 48 + /* 49 + * The crash CPU sends an IPI and wait for other CPUs to 50 + * respond. Delay of at least 10 seconds. 51 + */ 52 + pr_emerg("Sending IPI to other cpus...\n"); 53 + msecs = 10000; 54 + while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) { 55 + cpu_relax(); 56 + mdelay(1); 57 + } 58 + } 59 + 60 + #else /* !defined(CONFIG_SMP) */ 61 + static void crash_kexec_prepare_cpus(void) {} 62 + #endif /* !defined(CONFIG_SMP) */ 63 + 64 + void default_machine_crash_shutdown(struct pt_regs *regs) 65 + { 66 + local_irq_disable(); 67 + crashing_cpu = smp_processor_id(); 68 + crash_save_cpu(regs, crashing_cpu); 69 + crash_kexec_prepare_cpus(); 70 + cpu_set(crashing_cpu, cpus_in_crash); 71 + }
+75
arch/mips/kernel/crash_dump.c
··· 1 + #include <linux/highmem.h> 2 + #include <linux/bootmem.h> 3 + #include <linux/crash_dump.h> 4 + #include <asm/uaccess.h> 5 + 6 + static int __init parse_savemaxmem(char *p) 7 + { 8 + if (p) 9 + saved_max_pfn = (memparse(p, &p) >> PAGE_SHIFT) - 1; 10 + 11 + return 1; 12 + } 13 + __setup("savemaxmem=", parse_savemaxmem); 14 + 15 + 16 + static void *kdump_buf_page; 17 + 18 + /** 19 + * copy_oldmem_page - copy one page from "oldmem" 20 + * @pfn: page frame number to be copied 21 + * @buf: target memory address for the copy; this can be in kernel address 22 + * space or user address space (see @userbuf) 23 + * @csize: number of bytes to copy 24 + * @offset: offset in bytes into the page (based on pfn) to begin the copy 25 + * @userbuf: if set, @buf is in user address space, use copy_to_user(), 26 + * otherwise @buf is in kernel address space, use memcpy(). 27 + * 28 + * Copy a page from "oldmem". For this page, there is no pte mapped 29 + * in the current kernel. 30 + * 31 + * Calling copy_to_user() in atomic context is not desirable. Hence first 32 + * copying the data to a pre-allocated kernel page and then copying to user 33 + * space in non-atomic context. 34 + */ 35 + ssize_t copy_oldmem_page(unsigned long pfn, char *buf, 36 + size_t csize, unsigned long offset, int userbuf) 37 + { 38 + void *vaddr; 39 + 40 + if (!csize) 41 + return 0; 42 + 43 + vaddr = kmap_atomic_pfn(pfn); 44 + 45 + if (!userbuf) { 46 + memcpy(buf, (vaddr + offset), csize); 47 + kunmap_atomic(vaddr); 48 + } else { 49 + if (!kdump_buf_page) { 50 + pr_warning("Kdump: Kdump buffer page not allocated\n"); 51 + 52 + return -EFAULT; 53 + } 54 + copy_page(kdump_buf_page, vaddr); 55 + kunmap_atomic(vaddr); 56 + if (copy_to_user(buf, (kdump_buf_page + offset), csize)) 57 + return -EFAULT; 58 + } 59 + 60 + return csize; 61 + } 62 + 63 + static int __init kdump_buf_page_init(void) 64 + { 65 + int ret = 0; 66 + 67 + kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL); 68 + if (!kdump_buf_page) { 69 + pr_warning("Kdump: Failed to allocate kdump buffer page\n"); 70 + ret = -ENOMEM; 71 + } 72 + 73 + return ret; 74 + } 75 + arch_initcall(kdump_buf_page_init);
-106
arch/mips/kernel/irq-rm9000.c
··· 1 - /* 2 - * Copyright (C) 2003 Ralf Baechle 3 - * 4 - * This program is free software; you can redistribute it and/or modify it 5 - * under the terms of the GNU General Public License as published by the 6 - * Free Software Foundation; either version 2 of the License, or (at your 7 - * option) any later version. 8 - * 9 - * Handler for RM9000 extended interrupts. These are a non-standard 10 - * feature so we handle them separately from standard interrupts. 11 - */ 12 - #include <linux/init.h> 13 - #include <linux/interrupt.h> 14 - #include <linux/irq.h> 15 - #include <linux/kernel.h> 16 - #include <linux/module.h> 17 - 18 - #include <asm/irq_cpu.h> 19 - #include <asm/mipsregs.h> 20 - 21 - static inline void unmask_rm9k_irq(struct irq_data *d) 22 - { 23 - set_c0_intcontrol(0x1000 << (d->irq - RM9K_CPU_IRQ_BASE)); 24 - } 25 - 26 - static inline void mask_rm9k_irq(struct irq_data *d) 27 - { 28 - clear_c0_intcontrol(0x1000 << (d->irq - RM9K_CPU_IRQ_BASE)); 29 - } 30 - 31 - static inline void rm9k_cpu_irq_enable(struct irq_data *d) 32 - { 33 - unsigned long flags; 34 - 35 - local_irq_save(flags); 36 - unmask_rm9k_irq(d); 37 - local_irq_restore(flags); 38 - } 39 - 40 - /* 41 - * Performance counter interrupts are global on all processors. 42 - */ 43 - static void local_rm9k_perfcounter_irq_startup(void *args) 44 - { 45 - rm9k_cpu_irq_enable(args); 46 - } 47 - 48 - static unsigned int rm9k_perfcounter_irq_startup(struct irq_data *d) 49 - { 50 - on_each_cpu(local_rm9k_perfcounter_irq_startup, d, 1); 51 - 52 - return 0; 53 - } 54 - 55 - static void local_rm9k_perfcounter_irq_shutdown(void *args) 56 - { 57 - unsigned long flags; 58 - 59 - local_irq_save(flags); 60 - mask_rm9k_irq(args); 61 - local_irq_restore(flags); 62 - } 63 - 64 - static void rm9k_perfcounter_irq_shutdown(struct irq_data *d) 65 - { 66 - on_each_cpu(local_rm9k_perfcounter_irq_shutdown, d, 1); 67 - } 68 - 69 - static struct irq_chip rm9k_irq_controller = { 70 - .name = "RM9000", 71 - .irq_ack = mask_rm9k_irq, 72 - .irq_mask = mask_rm9k_irq, 73 - .irq_mask_ack = mask_rm9k_irq, 74 - .irq_unmask = unmask_rm9k_irq, 75 - .irq_eoi = unmask_rm9k_irq 76 - }; 77 - 78 - static struct irq_chip rm9k_perfcounter_irq = { 79 - .name = "RM9000", 80 - .irq_startup = rm9k_perfcounter_irq_startup, 81 - .irq_shutdown = rm9k_perfcounter_irq_shutdown, 82 - .irq_ack = mask_rm9k_irq, 83 - .irq_mask = mask_rm9k_irq, 84 - .irq_mask_ack = mask_rm9k_irq, 85 - .irq_unmask = unmask_rm9k_irq, 86 - }; 87 - 88 - unsigned int rm9000_perfcount_irq; 89 - 90 - EXPORT_SYMBOL(rm9000_perfcount_irq); 91 - 92 - void __init rm9k_cpu_irq_init(void) 93 - { 94 - int base = RM9K_CPU_IRQ_BASE; 95 - int i; 96 - 97 - clear_c0_intcontrol(0x0000f000); /* Mask all */ 98 - 99 - for (i = base; i < base + 4; i++) 100 - irq_set_chip_and_handler(i, &rm9k_irq_controller, 101 - handle_level_irq); 102 - 103 - rm9000_perfcount_irq = base + 1; 104 - irq_set_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq, 105 - handle_percpu_irq); 106 - }
+29 -4
arch/mips/kernel/machine_kexec.c
··· 5 5 * This source code is licensed under the GNU General Public License, 6 6 * Version 2. See the file COPYING for more details. 7 7 */ 8 - 8 + #include <linux/compiler.h> 9 9 #include <linux/kexec.h> 10 10 #include <linux/mm.h> 11 11 #include <linux/delay.h> ··· 19 19 extern unsigned long kexec_start_address; 20 20 extern unsigned long kexec_indirection_page; 21 21 22 + int (*_machine_kexec_prepare)(struct kimage *) = NULL; 23 + void (*_machine_kexec_shutdown)(void) = NULL; 24 + void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL; 25 + #ifdef CONFIG_SMP 26 + void (*relocated_kexec_smp_wait) (void *); 27 + atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0); 28 + #endif 29 + 22 30 int 23 31 machine_kexec_prepare(struct kimage *kimage) 24 32 { 33 + if (_machine_kexec_prepare) 34 + return _machine_kexec_prepare(kimage); 25 35 return 0; 26 36 } 27 37 ··· 43 33 void 44 34 machine_shutdown(void) 45 35 { 36 + if (_machine_kexec_shutdown) 37 + _machine_kexec_shutdown(); 46 38 } 47 39 48 40 void 49 41 machine_crash_shutdown(struct pt_regs *regs) 50 42 { 43 + if (_machine_crash_shutdown) 44 + _machine_crash_shutdown(regs); 45 + else 46 + default_machine_crash_shutdown(regs); 51 47 } 52 48 53 - typedef void (*noretfun_t)(void) __attribute__((noreturn)); 49 + typedef void (*noretfun_t)(void) __noreturn; 54 50 55 51 void 56 52 machine_kexec(struct kimage *image) ··· 68 52 reboot_code_buffer = 69 53 (unsigned long)page_address(image->control_code_page); 70 54 71 - kexec_start_address = image->start; 55 + kexec_start_address = 56 + (unsigned long) phys_to_virt(image->start); 57 + 72 58 kexec_indirection_page = 73 59 (unsigned long) phys_to_virt(image->head & PAGE_MASK); 74 60 ··· 81 63 * The generic kexec code builds a page list with physical 82 64 * addresses. they are directly accessible through KSEG0 (or 83 65 * CKSEG0 or XPHYS if on 64bit system), hence the 84 - * pys_to_virt() call. 66 + * phys_to_virt() call. 85 67 */ 86 68 for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE); 87 69 ptr = (entry & IND_INDIRECTION) ? ··· 99 81 printk("Will call new kernel at %08lx\n", image->start); 100 82 printk("Bye ...\n"); 101 83 __flush_cache_all(); 84 + #ifdef CONFIG_SMP 85 + /* All secondary cpus now may jump to kexec_wait cycle */ 86 + relocated_kexec_smp_wait = reboot_code_buffer + 87 + (void *)(kexec_smp_wait - relocate_new_kernel); 88 + smp_wmb(); 89 + atomic_set(&kexec_ready_to_reboot, 1); 90 + #endif 102 91 ((noretfun_t) reboot_code_buffer)(); 103 92 }
+2 -2
arch/mips/kernel/mips-mt-fpaff.c
··· 50 50 51 51 rcu_read_lock(); 52 52 pcred = __task_cred(p); 53 - match = (cred->euid == pcred->euid || 54 - cred->euid == pcred->uid); 53 + match = (uid_eq(cred->euid, pcred->euid) || 54 + uid_eq(cred->euid, pcred->uid)); 55 55 rcu_read_unlock(); 56 56 return match; 57 57 }
+1 -1
arch/mips/kernel/mips_ksyms.c
··· 11 11 #include <linux/interrupt.h> 12 12 #include <linux/export.h> 13 13 #include <asm/checksum.h> 14 - #include <asm/pgtable.h> 14 + #include <linux/mm.h> 15 15 #include <asm/uaccess.h> 16 16 #include <asm/ftrace.h> 17 17
+124
arch/mips/kernel/perf_event_mipsxx.c
··· 840 840 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T }, 841 841 }; 842 842 843 + static const struct mips_perf_event xlp_event_map[PERF_COUNT_HW_MAX] = { 844 + [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL }, 845 + [PERF_COUNT_HW_INSTRUCTIONS] = { 0x18, CNTR_ALL }, /* PAPI_TOT_INS */ 846 + [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */ 847 + [PERF_COUNT_HW_CACHE_MISSES] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */ 848 + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x1b, CNTR_ALL }, /* PAPI_BR_CN */ 849 + [PERF_COUNT_HW_BRANCH_MISSES] = { 0x1c, CNTR_ALL }, /* PAPI_BR_MSP */ 850 + [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID }, 851 + }; 852 + 843 853 /* 24K/34K/1004K cores can share the same cache event map. */ 844 854 static const struct mips_perf_event mipsxxcore_cache_map 845 855 [PERF_COUNT_HW_CACHE_MAX] ··· 1098 1088 [C(ITLB)] = { 1099 1089 [C(OP_READ)] = { 1100 1090 [C(RESULT_MISS)] = { 0x37, CNTR_ALL }, 1091 + }, 1092 + }, 1093 + }; 1094 + 1095 + static const struct mips_perf_event xlp_cache_map 1096 + [PERF_COUNT_HW_CACHE_MAX] 1097 + [PERF_COUNT_HW_CACHE_OP_MAX] 1098 + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 1099 + [C(L1D)] = { 1100 + [C(OP_READ)] = { 1101 + [C(RESULT_ACCESS)] = { 0x31, CNTR_ALL }, /* PAPI_L1_DCR */ 1102 + [C(RESULT_MISS)] = { 0x30, CNTR_ALL }, /* PAPI_L1_LDM */ 1103 + }, 1104 + [C(OP_WRITE)] = { 1105 + [C(RESULT_ACCESS)] = { 0x2f, CNTR_ALL }, /* PAPI_L1_DCW */ 1106 + [C(RESULT_MISS)] = { 0x2e, CNTR_ALL }, /* PAPI_L1_STM */ 1107 + }, 1108 + [C(OP_PREFETCH)] = { 1109 + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1110 + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1111 + }, 1112 + }, 1113 + [C(L1I)] = { 1114 + [C(OP_READ)] = { 1115 + [C(RESULT_ACCESS)] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */ 1116 + [C(RESULT_MISS)] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */ 1117 + }, 1118 + [C(OP_WRITE)] = { 1119 + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1120 + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1121 + }, 1122 + [C(OP_PREFETCH)] = { 1123 + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1124 + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1125 + }, 1126 + }, 1127 + [C(LL)] = { 1128 + [C(OP_READ)] = { 1129 + [C(RESULT_ACCESS)] = { 0x35, CNTR_ALL }, /* PAPI_L2_DCR */ 1130 + [C(RESULT_MISS)] = { 0x37, CNTR_ALL }, /* PAPI_L2_LDM */ 1131 + }, 1132 + [C(OP_WRITE)] = { 1133 + [C(RESULT_ACCESS)] = { 0x34, CNTR_ALL }, /* PAPI_L2_DCA */ 1134 + [C(RESULT_MISS)] = { 0x36, CNTR_ALL }, /* PAPI_L2_DCM */ 1135 + }, 1136 + [C(OP_PREFETCH)] = { 1137 + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1138 + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1139 + }, 1140 + }, 1141 + [C(DTLB)] = { 1142 + /* 1143 + * Only general DTLB misses are counted use the same event for 1144 + * read and write. 1145 + */ 1146 + [C(OP_READ)] = { 1147 + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1148 + [C(RESULT_MISS)] = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */ 1149 + }, 1150 + [C(OP_WRITE)] = { 1151 + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1152 + [C(RESULT_MISS)] = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */ 1153 + }, 1154 + [C(OP_PREFETCH)] = { 1155 + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1156 + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1157 + }, 1158 + }, 1159 + [C(ITLB)] = { 1160 + [C(OP_READ)] = { 1161 + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1162 + [C(RESULT_MISS)] = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */ 1163 + }, 1164 + [C(OP_WRITE)] = { 1165 + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1166 + [C(RESULT_MISS)] = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */ 1167 + }, 1168 + [C(OP_PREFETCH)] = { 1169 + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1170 + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1171 + }, 1172 + }, 1173 + [C(BPU)] = { 1174 + [C(OP_READ)] = { 1175 + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1176 + [C(RESULT_MISS)] = { 0x25, CNTR_ALL }, 1177 + }, 1178 + [C(OP_WRITE)] = { 1179 + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1180 + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1181 + }, 1182 + [C(OP_PREFETCH)] = { 1183 + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1184 + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1101 1185 }, 1102 1186 }, 1103 1187 }; ··· 1548 1444 return &raw_event; 1549 1445 } 1550 1446 1447 + static const struct mips_perf_event *xlp_pmu_map_raw_event(u64 config) 1448 + { 1449 + unsigned int raw_id = config & 0xff; 1450 + 1451 + /* Only 1-63 are defined */ 1452 + if ((raw_id < 0x01) || (raw_id > 0x3f)) 1453 + return ERR_PTR(-EOPNOTSUPP); 1454 + 1455 + raw_event.cntr_mask = CNTR_ALL; 1456 + raw_event.event_id = raw_id; 1457 + 1458 + return &raw_event; 1459 + } 1460 + 1551 1461 static int __init 1552 1462 init_hw_perf_events(void) 1553 1463 { ··· 1639 1521 mipspmu.name = "BMIPS5000"; 1640 1522 mipspmu.general_event_map = &bmips5000_event_map; 1641 1523 mipspmu.cache_event_map = &bmips5000_cache_map; 1524 + break; 1525 + case CPU_XLP: 1526 + mipspmu.name = "xlp"; 1527 + mipspmu.general_event_map = &xlp_event_map; 1528 + mipspmu.cache_event_map = &xlp_cache_map; 1529 + mipspmu.map_raw_event = xlp_pmu_map_raw_event; 1642 1530 break; 1643 1531 default: 1644 1532 pr_cont("Either hardware does not support performance "
+1 -3
arch/mips/kernel/process.c
··· 72 72 } 73 73 } 74 74 #ifdef CONFIG_HOTPLUG_CPU 75 - if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map) && 76 - (system_state == SYSTEM_RUNNING || 77 - system_state == SYSTEM_BOOTING)) 75 + if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map)) 78 76 play_dead(); 79 77 #endif 80 78 rcu_idle_exit();
+106 -1
arch/mips/kernel/relocate_kernel.S
··· 15 15 #include <asm/addrspace.h> 16 16 17 17 LEAF(relocate_new_kernel) 18 + PTR_L a0, arg0 19 + PTR_L a1, arg1 20 + PTR_L a2, arg2 21 + PTR_L a3, arg3 22 + 18 23 PTR_L s0, kexec_indirection_page 19 24 PTR_L s1, kexec_start_address 20 25 ··· 31 26 and s3, s2, 0x1 32 27 beq s3, zero, 1f 33 28 and s4, s2, ~0x1 /* store destination addr in s4 */ 34 - move a0, s4 35 29 b process_entry 36 30 37 31 1: ··· 64 60 b process_entry 65 61 66 62 done: 63 + #ifdef CONFIG_SMP 64 + /* kexec_flag reset is signal to other CPUs what kernel 65 + was moved to it's location. Note - we need relocated address 66 + of kexec_flag. */ 67 + 68 + bal 1f 69 + 1: move t1,ra; 70 + PTR_LA t2,1b 71 + PTR_LA t0,kexec_flag 72 + PTR_SUB t0,t0,t2; 73 + PTR_ADD t0,t1,t0; 74 + LONG_S zero,(t0) 75 + #endif 76 + 77 + #ifdef CONFIG_CPU_CAVIUM_OCTEON 78 + /* We need to flush I-cache before jumping to new kernel. 79 + * Unfortunatelly, this code is cpu-specific. 80 + */ 81 + .set push 82 + .set noreorder 83 + syncw 84 + syncw 85 + synci 0($0) 86 + .set pop 87 + #else 88 + sync 89 + #endif 67 90 /* jump to kexec_start_address */ 68 91 j s1 69 92 END(relocate_new_kernel) 93 + 94 + #ifdef CONFIG_SMP 95 + /* 96 + * Other CPUs should wait until code is relocated and 97 + * then start at entry (?) point. 98 + */ 99 + LEAF(kexec_smp_wait) 100 + PTR_L a0, s_arg0 101 + PTR_L a1, s_arg1 102 + PTR_L a2, s_arg2 103 + PTR_L a3, s_arg3 104 + PTR_L s1, kexec_start_address 105 + 106 + /* Non-relocated address works for args and kexec_start_address ( old 107 + * kernel is not overwritten). But we need relocated address of 108 + * kexec_flag. 109 + */ 110 + 111 + bal 1f 112 + 1: move t1,ra; 113 + PTR_LA t2,1b 114 + PTR_LA t0,kexec_flag 115 + PTR_SUB t0,t0,t2; 116 + PTR_ADD t0,t1,t0; 117 + 118 + 1: LONG_L s0, (t0) 119 + bne s0, zero,1b 120 + 121 + #ifdef CONFIG_CPU_CAVIUM_OCTEON 122 + .set push 123 + .set noreorder 124 + synci 0($0) 125 + .set pop 126 + #else 127 + sync 128 + #endif 129 + j s1 130 + END(kexec_smp_wait) 131 + #endif 132 + 133 + #ifdef __mips64 134 + /* all PTR's must be aligned to 8 byte in 64-bit mode */ 135 + .align 3 136 + #endif 137 + 138 + /* All parameters to new kernel are passed in registers a0-a3. 139 + * kexec_args[0..3] are uses to prepare register values. 140 + */ 141 + 142 + kexec_args: 143 + EXPORT(kexec_args) 144 + arg0: PTR 0x0 145 + arg1: PTR 0x0 146 + arg2: PTR 0x0 147 + arg3: PTR 0x0 148 + .size kexec_args,PTRSIZE*4 149 + 150 + #ifdef CONFIG_SMP 151 + /* 152 + * Secondary CPUs may have different kernel parameters in 153 + * their registers a0-a3. secondary_kexec_args[0..3] are used 154 + * to prepare register values. 155 + */ 156 + secondary_kexec_args: 157 + EXPORT(secondary_kexec_args) 158 + s_arg0: PTR 0x0 159 + s_arg1: PTR 0x0 160 + s_arg2: PTR 0x0 161 + s_arg3: PTR 0x0 162 + .size secondary_kexec_args,PTRSIZE*4 163 + kexec_flag: 164 + LONG 0x1 165 + 166 + #endif 70 167 71 168 kexec_start_address: 72 169 EXPORT(kexec_start_address)
-6
arch/mips/kernel/scall64-n32.S
··· 17 17 #include <asm/thread_info.h> 18 18 #include <asm/unistd.h> 19 19 20 - /* This duplicates the definition from <linux/sched.h> */ 21 - #define PT_TRACESYS 0x00000002 /* tracing system calls */ 22 - 23 - /* This duplicates the definition from <asm/signal.h> */ 24 - #define SIGILL 4 /* Illegal instruction (ANSI). */ 25 - 26 20 #ifndef CONFIG_MIPS32_O32 27 21 /* No O32, so define handle_sys here */ 28 22 #define handle_sysn32 handle_sys
+56
arch/mips/kernel/setup.c
··· 22 22 #include <linux/console.h> 23 23 #include <linux/pfn.h> 24 24 #include <linux/debugfs.h> 25 + #include <linux/kexec.h> 25 26 26 27 #include <asm/addrspace.h> 27 28 #include <asm/bootinfo.h> ··· 537 536 } 538 537 539 538 bootmem_init(); 539 + #ifdef CONFIG_KEXEC 540 + if (crashk_res.start != crashk_res.end) 541 + reserve_bootmem(crashk_res.start, 542 + crashk_res.end - crashk_res.start + 1, 543 + BOOTMEM_DEFAULT); 544 + #endif 540 545 device_tree_init(); 541 546 sparse_init(); 542 547 plat_swiotlb_setup(); 543 548 paging_init(); 544 549 } 550 + 551 + #ifdef CONFIG_KEXEC 552 + static inline unsigned long long get_total_mem(void) 553 + { 554 + unsigned long long total; 555 + 556 + total = max_pfn - min_low_pfn; 557 + return total << PAGE_SHIFT; 558 + } 559 + 560 + static void __init mips_parse_crashkernel(void) 561 + { 562 + unsigned long long total_mem; 563 + unsigned long long crash_size, crash_base; 564 + int ret; 565 + 566 + total_mem = get_total_mem(); 567 + ret = parse_crashkernel(boot_command_line, total_mem, 568 + &crash_size, &crash_base); 569 + if (ret != 0 || crash_size <= 0) 570 + return; 571 + 572 + crashk_res.start = crash_base; 573 + crashk_res.end = crash_base + crash_size - 1; 574 + } 575 + 576 + static void __init request_crashkernel(struct resource *res) 577 + { 578 + int ret; 579 + 580 + ret = request_resource(res, &crashk_res); 581 + if (!ret) 582 + pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n", 583 + (unsigned long)((crashk_res.end - 584 + crashk_res.start + 1) >> 20), 585 + (unsigned long)(crashk_res.start >> 20)); 586 + } 587 + #else /* !defined(CONFIG_KEXEC) */ 588 + static void __init mips_parse_crashkernel(void) 589 + { 590 + } 591 + 592 + static void __init request_crashkernel(struct resource *res) 593 + { 594 + } 595 + #endif /* !defined(CONFIG_KEXEC) */ 545 596 546 597 static void __init resource_init(void) 547 598 { ··· 610 557 /* 611 558 * Request address space for all standard RAM. 612 559 */ 560 + mips_parse_crashkernel(); 561 + 613 562 for (i = 0; i < boot_mem_map.nr_map; i++) { 614 563 struct resource *res; 615 564 unsigned long start, end; ··· 648 593 */ 649 594 request_resource(res, &code_resource); 650 595 request_resource(res, &data_resource); 596 + request_crashkernel(res); 651 597 } 652 598 } 653 599
+8 -5
arch/mips/kernel/signal.c
··· 568 568 } 569 569 570 570 if (regs->regs[0]) { 571 - if (regs->regs[2] == ERESTARTNOHAND || 572 - regs->regs[2] == ERESTARTSYS || 573 - regs->regs[2] == ERESTARTNOINTR) { 571 + switch (regs->regs[2]) { 572 + case ERESTARTNOHAND: 573 + case ERESTARTSYS: 574 + case ERESTARTNOINTR: 574 575 regs->regs[2] = regs->regs[0]; 575 576 regs->regs[7] = regs->regs[26]; 576 577 regs->cp0_epc -= 4; 577 - } 578 - if (regs->regs[2] == ERESTART_RESTARTBLOCK) { 578 + break; 579 + 580 + case ERESTART_RESTARTBLOCK: 579 581 regs->regs[2] = current->thread.abi->restart; 580 582 regs->regs[7] = regs->regs[26]; 581 583 regs->cp0_epc -= 4; 584 + break; 582 585 } 583 586 regs->regs[0] = 0; /* Don't deal with this again. */ 584 587 }
+17
arch/mips/kernel/smp.c
··· 386 386 387 387 EXPORT_SYMBOL(flush_tlb_page); 388 388 EXPORT_SYMBOL(flush_tlb_one); 389 + 390 + #if defined(CONFIG_KEXEC) 391 + void (*dump_ipi_function_ptr)(void *) = NULL; 392 + void dump_send_ipi(void (*dump_ipi_callback)(void *)) 393 + { 394 + int i; 395 + int cpu = smp_processor_id(); 396 + 397 + dump_ipi_function_ptr = dump_ipi_callback; 398 + smp_mb(); 399 + for_each_online_cpu(i) 400 + if (i != cpu) 401 + mp_ops->send_ipi_single(i, SMP_DUMP); 402 + 403 + } 404 + EXPORT_SYMBOL(dump_send_ipi); 405 + #endif
+22 -3
arch/mips/kernel/traps.c
··· 13 13 */ 14 14 #include <linux/bug.h> 15 15 #include <linux/compiler.h> 16 + #include <linux/kexec.h> 16 17 #include <linux/init.h> 17 18 #include <linux/kernel.h> 18 19 #include <linux/module.h> ··· 409 408 ssleep(5); 410 409 panic("Fatal exception"); 411 410 } 411 + 412 + if (regs && kexec_should_crash(current)) 413 + crash_kexec(regs); 412 414 413 415 do_exit(sig); 414 416 } ··· 1025 1021 1026 1022 return; 1027 1023 1024 + case 3: 1025 + /* 1026 + * Old (MIPS I and MIPS II) processors will set this code 1027 + * for COP1X opcode instructions that replaced the original 1028 + * COP3 space. We don't limit COP1 space instructions in 1029 + * the emulator according to the CPU ISA, so we want to 1030 + * treat COP1X instructions consistently regardless of which 1031 + * code the CPU chose. Therefore we redirect this trap to 1032 + * the FP emulator too. 1033 + * 1034 + * Then some newer FPU-less processors use this code 1035 + * erroneously too, so they are covered by this choice 1036 + * as well. 1037 + */ 1038 + if (raw_cpu_has_fpu) 1039 + break; 1040 + /* Fall through. */ 1041 + 1028 1042 case 1: 1029 1043 if (used_math()) /* Using the FPU again. */ 1030 1044 own_fpu(1); ··· 1066 1044 case 2: 1067 1045 raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs); 1068 1046 return; 1069 - 1070 - case 3: 1071 - break; 1072 1047 } 1073 1048 1074 1049 force_sig(SIGILL, current);
+4
arch/mips/lantiq/Kconfig
··· 36 36 bool "PCI Support" 37 37 depends on SOC_XWAY && PCI 38 38 39 + config XRX200_PHY_FW 40 + bool "XRX200 PHY firmware loader" 41 + depends on SOC_XWAY 42 + 39 43 endif
+1 -4
arch/mips/lantiq/prom.c
··· 87 87 reserve_bootmem(base, size, BOOTMEM_DEFAULT); 88 88 89 89 unflatten_device_tree(); 90 - 91 - /* free the space reserved for the dt blob */ 92 - free_bootmem(base, size); 93 90 } 94 91 95 92 void __init prom_init(void) ··· 116 119 sizeof(of_ids[0].compatible)); 117 120 strncpy(of_ids[1].compatible, "simple-bus", 118 121 sizeof(of_ids[1].compatible)); 119 - return of_platform_bus_probe(NULL, of_ids, NULL); 122 + return of_platform_populate(NULL, of_ids, NULL, NULL); 120 123 } 121 124 122 125 arch_initcall(plat_of_setup);
+2
arch/mips/lantiq/xway/Makefile
··· 1 1 obj-y := prom.o sysctrl.o clk.o reset.o dma.o gptu.o 2 + 3 + obj-$(CONFIG_XRX200_PHY_FW) += xrx200_phy_fw.o
+11 -4
arch/mips/lantiq/xway/dma.c
··· 25 25 #include <lantiq_soc.h> 26 26 #include <xway_dma.h> 27 27 28 + #define LTQ_DMA_ID 0x08 28 29 #define LTQ_DMA_CTRL 0x10 29 30 #define LTQ_DMA_CPOLL 0x14 30 31 #define LTQ_DMA_CS 0x18 ··· 49 48 #define DMA_CLK_DIV4 BIT(6) /* polling clock divider */ 50 49 #define DMA_2W_BURST BIT(1) /* 2 word burst length */ 51 50 #define DMA_MAX_CHANNEL 20 /* the soc has 20 channels */ 52 - #define DMA_ETOP_ENDIANESS (0xf << 8) /* endianess swap etop channels */ 51 + #define DMA_ETOP_ENDIANNESS (0xf << 8) /* endianness swap etop channels */ 53 52 #define DMA_WEIGHT (BIT(17) | BIT(16)) /* default channel wheight */ 54 53 55 54 #define ltq_dma_r32(x) ltq_r32(ltq_dma_membase + (x)) ··· 192 191 switch (p) { 193 192 case DMA_PORT_ETOP: 194 193 /* 195 - * Tell the DMA engine to swap the endianess of data frames and 194 + * Tell the DMA engine to swap the endianness of data frames and 196 195 * drop packets if the channel arbitration fails. 197 196 */ 198 - ltq_dma_w32_mask(0, DMA_ETOP_ENDIANESS | DMA_PDEN, 197 + ltq_dma_w32_mask(0, DMA_ETOP_ENDIANNESS | DMA_PDEN, 199 198 LTQ_DMA_PCTRL); 200 199 break; 201 200 ··· 215 214 { 216 215 struct clk *clk; 217 216 struct resource *res; 217 + unsigned id; 218 218 int i; 219 219 220 220 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ··· 245 243 ltq_dma_w32(DMA_POLL | DMA_CLK_DIV4, LTQ_DMA_CPOLL); 246 244 ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL); 247 245 } 248 - dev_info(&pdev->dev, "init done\n"); 246 + 247 + id = ltq_dma_r32(LTQ_DMA_ID); 248 + dev_info(&pdev->dev, 249 + "Init done - hw rev: %X, ports: %d, channels: %d\n", 250 + id & 0x1f, (id >> 16) & 0xf, id >> 20); 251 + 249 252 return 0; 250 253 } 251 254
+51 -7
arch/mips/lantiq/xway/reset.c
··· 28 28 #define RCU_RST_REQ 0x0010 29 29 /* reset status register */ 30 30 #define RCU_RST_STAT 0x0014 31 + /* vr9 gphy registers */ 32 + #define RCU_GFS_ADD0_XRX200 0x0020 33 + #define RCU_GFS_ADD1_XRX200 0x0068 31 34 32 35 /* reboot bit */ 36 + #define RCU_RD_GPHY0_XRX200 BIT(31) 33 37 #define RCU_RD_SRST BIT(30) 38 + #define RCU_RD_GPHY1_XRX200 BIT(29) 39 + 34 40 /* reset cause */ 35 41 #define RCU_STAT_SHIFT 26 36 42 /* boot selection */ 37 - #define RCU_BOOT_SEL_SHIFT 26 38 - #define RCU_BOOT_SEL_MASK 0x7 43 + #define RCU_BOOT_SEL(x) ((x >> 18) & 0x7) 44 + #define RCU_BOOT_SEL_XRX200(x) (((x >> 17) & 0xf) | ((x >> 8) & 0x10)) 39 45 40 46 /* remapped base addr of the reset control unit */ 41 47 static void __iomem *ltq_rcu_membase; 48 + static struct device_node *ltq_rcu_np; 42 49 43 50 /* This function is used by the watchdog driver */ 44 51 int ltq_reset_cause(void) ··· 59 52 unsigned char ltq_boot_select(void) 60 53 { 61 54 u32 val = ltq_rcu_r32(RCU_RST_STAT); 62 - return (val >> RCU_BOOT_SEL_SHIFT) & RCU_BOOT_SEL_MASK; 55 + 56 + if (of_device_is_compatible(ltq_rcu_np, "lantiq,rcu-xrx200")) 57 + return RCU_BOOT_SEL_XRX200(val); 58 + 59 + return RCU_BOOT_SEL(val); 60 + } 61 + 62 + /* reset / boot a gphy */ 63 + static struct ltq_xrx200_gphy_reset { 64 + u32 rd; 65 + u32 addr; 66 + } xrx200_gphy[] = { 67 + {RCU_RD_GPHY0_XRX200, RCU_GFS_ADD0_XRX200}, 68 + {RCU_RD_GPHY1_XRX200, RCU_GFS_ADD1_XRX200}, 69 + }; 70 + 71 + /* reset and boot a gphy. these phys only exist on xrx200 SoC */ 72 + int xrx200_gphy_boot(struct device *dev, unsigned int id, dma_addr_t dev_addr) 73 + { 74 + if (!of_device_is_compatible(ltq_rcu_np, "lantiq,rcu-xrx200")) { 75 + dev_err(dev, "this SoC has no GPHY\n"); 76 + return -EINVAL; 77 + } 78 + if (id > 1) { 79 + dev_err(dev, "%u is an invalid gphy id\n", id); 80 + return -EINVAL; 81 + } 82 + dev_info(dev, "booting GPHY%u firmware at %X\n", id, dev_addr); 83 + 84 + ltq_rcu_w32(ltq_rcu_r32(RCU_RST_REQ) | xrx200_gphy[id].rd, 85 + RCU_RST_REQ); 86 + ltq_rcu_w32(dev_addr, xrx200_gphy[id].addr); 87 + ltq_rcu_w32(ltq_rcu_r32(RCU_RST_REQ) & ~xrx200_gphy[id].rd, 88 + RCU_RST_REQ); 89 + return 0; 63 90 } 64 91 65 92 /* reset a io domain for u micro seconds */ ··· 126 85 static int __init mips_reboot_setup(void) 127 86 { 128 87 struct resource res; 129 - struct device_node *np = 130 - of_find_compatible_node(NULL, NULL, "lantiq,rcu-xway"); 88 + 89 + ltq_rcu_np = of_find_compatible_node(NULL, NULL, "lantiq,rcu-xway"); 90 + if (!ltq_rcu_np) 91 + ltq_rcu_np = of_find_compatible_node(NULL, NULL, 92 + "lantiq,rcu-xrx200"); 131 93 132 94 /* check if all the reset register range is available */ 133 - if (!np) 95 + if (!ltq_rcu_np) 134 96 panic("Failed to load reset resources from devicetree"); 135 97 136 - if (of_address_to_resource(np, 0, &res)) 98 + if (of_address_to_resource(ltq_rcu_np, 0, &res)) 137 99 panic("Failed to get rcu memory range"); 138 100 139 101 if (request_mem_region(res.start, resource_size(&res), res.name) < 0)
+4
arch/mips/lantiq/xway/sysctrl.c
··· 370 370 clkdev_add_pmu("1d900000.pcie", "pdi", 1, PMU1_PCIE_PDI); 371 371 clkdev_add_pmu("1d900000.pcie", "ctl", 1, PMU1_PCIE_CTL); 372 372 clkdev_add_pmu("1d900000.pcie", "ahb", 0, PMU_AHBM | PMU_AHBS); 373 + clkdev_add_pmu("1e108000.eth", NULL, 0, 374 + PMU_SWITCH | PMU_PPE_DPLUS | PMU_PPE_DPLUM | 375 + PMU_PPE_EMA | PMU_PPE_TC | PMU_PPE_SLL01 | 376 + PMU_PPE_QSB | PMU_PPE_TOP); 373 377 } else if (of_machine_is_compatible("lantiq,ar9")) { 374 378 clkdev_add_static(ltq_ar9_cpu_hz(), ltq_ar9_fpi_hz(), 375 379 ltq_ar9_fpi_hz());
+97
arch/mips/lantiq/xway/xrx200_phy_fw.c
··· 1 + /* 2 + * This program is free software; you can redistribute it and/or modify it 3 + * under the terms of the GNU General Public License version 2 as published 4 + * by the Free Software Foundation. 5 + * 6 + * Copyright (C) 2012 John Crispin <blogic@openwrt.org> 7 + */ 8 + 9 + #include <linux/delay.h> 10 + #include <linux/dma-mapping.h> 11 + #include <linux/module.h> 12 + #include <linux/firmware.h> 13 + #include <linux/of_platform.h> 14 + 15 + #include <lantiq_soc.h> 16 + 17 + #define XRX200_GPHY_FW_ALIGN (16 * 1024) 18 + 19 + static dma_addr_t xway_gphy_load(struct platform_device *pdev) 20 + { 21 + const struct firmware *fw; 22 + dma_addr_t dev_addr = 0; 23 + const char *fw_name; 24 + void *fw_addr; 25 + size_t size; 26 + 27 + if (of_property_read_string(pdev->dev.of_node, "firmware", &fw_name)) { 28 + dev_err(&pdev->dev, "failed to load firmware filename\n"); 29 + return 0; 30 + } 31 + 32 + dev_info(&pdev->dev, "requesting %s\n", fw_name); 33 + if (request_firmware(&fw, fw_name, &pdev->dev)) { 34 + dev_err(&pdev->dev, "failed to load firmware: %s\n", fw_name); 35 + return 0; 36 + } 37 + 38 + /* 39 + * GPHY cores need the firmware code in a persistent and contiguous 40 + * memory area with a 16 kB boundary aligned start address 41 + */ 42 + size = fw->size + XRX200_GPHY_FW_ALIGN; 43 + 44 + fw_addr = dma_alloc_coherent(&pdev->dev, size, &dev_addr, GFP_KERNEL); 45 + if (fw_addr) { 46 + fw_addr = PTR_ALIGN(fw_addr, XRX200_GPHY_FW_ALIGN); 47 + dev_addr = ALIGN(dev_addr, XRX200_GPHY_FW_ALIGN); 48 + memcpy(fw_addr, fw->data, fw->size); 49 + } else { 50 + dev_err(&pdev->dev, "failed to alloc firmware memory\n"); 51 + } 52 + 53 + release_firmware(fw); 54 + return dev_addr; 55 + } 56 + 57 + static int __devinit xway_phy_fw_probe(struct platform_device *pdev) 58 + { 59 + dma_addr_t fw_addr; 60 + struct property *pp; 61 + unsigned char *phyids; 62 + int i, ret = 0; 63 + 64 + fw_addr = xway_gphy_load(pdev); 65 + if (!fw_addr) 66 + return -EINVAL; 67 + pp = of_find_property(pdev->dev.of_node, "phys", NULL); 68 + if (!pp) 69 + return -ENOENT; 70 + phyids = pp->value; 71 + for (i = 0; i < pp->length && !ret; i++) 72 + ret = xrx200_gphy_boot(&pdev->dev, phyids[i], fw_addr); 73 + if (!ret) 74 + mdelay(100); 75 + return ret; 76 + } 77 + 78 + static const struct of_device_id xway_phy_match[] = { 79 + { .compatible = "lantiq,phy-xrx200" }, 80 + {}, 81 + }; 82 + MODULE_DEVICE_TABLE(of, xway_phy_match); 83 + 84 + static struct platform_driver xway_phy_driver = { 85 + .probe = xway_phy_fw_probe, 86 + .driver = { 87 + .name = "phy-xrx200", 88 + .owner = THIS_MODULE, 89 + .of_match_table = xway_phy_match, 90 + }, 91 + }; 92 + 93 + module_platform_driver(xway_phy_driver); 94 + 95 + MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); 96 + MODULE_DESCRIPTION("Lantiq XRX200 PHY Firmware Loader"); 97 + MODULE_LICENSE("GPL");
+1 -1
arch/mips/loongson1/Kconfig
··· 15 15 select SYS_SUPPORTS_LITTLE_ENDIAN 16 16 select SYS_SUPPORTS_HIGHMEM 17 17 select SYS_HAS_EARLY_PRINTK 18 - select HAVE_CLK 18 + select COMMON_CLK 19 19 20 20 endchoice 21 21
+3 -156
arch/mips/loongson1/common/clock.c
··· 7 7 * option) any later version. 8 8 */ 9 9 10 - #include <linux/module.h> 11 - #include <linux/list.h> 12 - #include <linux/mutex.h> 13 10 #include <linux/clk.h> 14 11 #include <linux/err.h> 15 - #include <asm/clock.h> 16 12 #include <asm/time.h> 17 - 18 - #include <loongson1.h> 19 - 20 - static LIST_HEAD(clocks); 21 - static DEFINE_MUTEX(clocks_mutex); 22 - 23 - struct clk *clk_get(struct device *dev, const char *name) 24 - { 25 - struct clk *c; 26 - struct clk *ret = NULL; 27 - 28 - mutex_lock(&clocks_mutex); 29 - list_for_each_entry(c, &clocks, node) { 30 - if (!strcmp(c->name, name)) { 31 - ret = c; 32 - break; 33 - } 34 - } 35 - mutex_unlock(&clocks_mutex); 36 - 37 - return ret; 38 - } 39 - EXPORT_SYMBOL(clk_get); 40 - 41 - int clk_enable(struct clk *clk) 42 - { 43 - return 0; 44 - } 45 - EXPORT_SYMBOL(clk_enable); 46 - 47 - void clk_disable(struct clk *clk) 48 - { 49 - } 50 - EXPORT_SYMBOL(clk_disable); 51 - 52 - unsigned long clk_get_rate(struct clk *clk) 53 - { 54 - return clk->rate; 55 - } 56 - EXPORT_SYMBOL(clk_get_rate); 57 - 58 - void clk_put(struct clk *clk) 59 - { 60 - } 61 - EXPORT_SYMBOL(clk_put); 62 - 63 - static void pll_clk_init(struct clk *clk) 64 - { 65 - u32 pll; 66 - 67 - pll = __raw_readl(LS1X_CLK_PLL_FREQ); 68 - clk->rate = (12 + (pll & 0x3f)) * 33 / 2 69 - + ((pll >> 8) & 0x3ff) * 33 / 1024 / 2; 70 - clk->rate *= 1000000; 71 - } 72 - 73 - static void cpu_clk_init(struct clk *clk) 74 - { 75 - u32 pll, ctrl; 76 - 77 - pll = clk_get_rate(clk->parent); 78 - ctrl = __raw_readl(LS1X_CLK_PLL_DIV) & DIV_CPU; 79 - clk->rate = pll / (ctrl >> DIV_CPU_SHIFT); 80 - } 81 - 82 - static void ddr_clk_init(struct clk *clk) 83 - { 84 - u32 pll, ctrl; 85 - 86 - pll = clk_get_rate(clk->parent); 87 - ctrl = __raw_readl(LS1X_CLK_PLL_DIV) & DIV_DDR; 88 - clk->rate = pll / (ctrl >> DIV_DDR_SHIFT); 89 - } 90 - 91 - static void dc_clk_init(struct clk *clk) 92 - { 93 - u32 pll, ctrl; 94 - 95 - pll = clk_get_rate(clk->parent); 96 - ctrl = __raw_readl(LS1X_CLK_PLL_DIV) & DIV_DC; 97 - clk->rate = pll / (ctrl >> DIV_DC_SHIFT); 98 - } 99 - 100 - static struct clk_ops pll_clk_ops = { 101 - .init = pll_clk_init, 102 - }; 103 - 104 - static struct clk_ops cpu_clk_ops = { 105 - .init = cpu_clk_init, 106 - }; 107 - 108 - static struct clk_ops ddr_clk_ops = { 109 - .init = ddr_clk_init, 110 - }; 111 - 112 - static struct clk_ops dc_clk_ops = { 113 - .init = dc_clk_init, 114 - }; 115 - 116 - static struct clk pll_clk = { 117 - .name = "pll", 118 - .ops = &pll_clk_ops, 119 - }; 120 - 121 - static struct clk cpu_clk = { 122 - .name = "cpu", 123 - .parent = &pll_clk, 124 - .ops = &cpu_clk_ops, 125 - }; 126 - 127 - static struct clk ddr_clk = { 128 - .name = "ddr", 129 - .parent = &pll_clk, 130 - .ops = &ddr_clk_ops, 131 - }; 132 - 133 - static struct clk dc_clk = { 134 - .name = "dc", 135 - .parent = &pll_clk, 136 - .ops = &dc_clk_ops, 137 - }; 138 - 139 - int clk_register(struct clk *clk) 140 - { 141 - mutex_lock(&clocks_mutex); 142 - list_add(&clk->node, &clocks); 143 - if (clk->ops->init) 144 - clk->ops->init(clk); 145 - mutex_unlock(&clocks_mutex); 146 - 147 - return 0; 148 - } 149 - EXPORT_SYMBOL(clk_register); 150 - 151 - static struct clk *ls1x_clks[] = { 152 - &pll_clk, 153 - &cpu_clk, 154 - &ddr_clk, 155 - &dc_clk, 156 - }; 157 - 158 - int __init ls1x_clock_init(void) 159 - { 160 - int i; 161 - 162 - for (i = 0; i < ARRAY_SIZE(ls1x_clks); i++) 163 - clk_register(ls1x_clks[i]); 164 - 165 - return 0; 166 - } 13 + #include <platform.h> 167 14 168 15 void __init plat_time_init(void) 169 16 { 170 17 struct clk *clk; 171 18 172 19 /* Initialize LS1X clocks */ 173 - ls1x_clock_init(); 20 + ls1x_clk_init(); 174 21 175 22 /* setup mips r4k timer */ 176 23 clk = clk_get(NULL, "cpu"); 177 24 if (IS_ERR(clk)) 178 - panic("unable to get dc clock, err=%ld", PTR_ERR(clk)); 25 + panic("unable to get cpu clock, err=%ld", PTR_ERR(clk)); 179 26 180 27 mips_hpt_frequency = clk_get_rate(clk) / 2; 181 28 }
+5 -5
arch/mips/loongson1/common/platform.c
··· 43 43 }, 44 44 }; 45 45 46 - void __init ls1x_serial_setup(void) 46 + void __init ls1x_serial_setup(struct platform_device *pdev) 47 47 { 48 48 struct clk *clk; 49 49 struct plat_serial8250_port *p; 50 50 51 - clk = clk_get(NULL, "dc"); 51 + clk = clk_get(NULL, pdev->name); 52 52 if (IS_ERR(clk)) 53 - panic("unable to get dc clock, err=%ld", PTR_ERR(clk)); 53 + panic("unable to get %s clock, err=%ld", 54 + pdev->name, PTR_ERR(clk)); 54 55 55 - for (p = ls1x_serial8250_port; p->flags != 0; ++p) 56 + for (p = pdev->dev.platform_data; p->flags != 0; ++p) 56 57 p->uartclk = clk_get_rate(clk); 57 58 } 58 59 ··· 72 71 }; 73 72 74 73 static struct stmmac_mdio_bus_data ls1x_mdio_bus_data = { 75 - .bus_id = 0, 76 74 .phy_mask = 0, 77 75 }; 78 76
+1 -4
arch/mips/loongson1/ls1b/board.c
··· 9 9 10 10 #include <platform.h> 11 11 12 - #include <linux/serial_8250.h> 13 - #include <loongson1.h> 14 - 15 12 static struct platform_device *ls1b_platform_devices[] __initdata = { 16 13 &ls1x_uart_device, 17 14 &ls1x_eth0_device, ··· 20 23 { 21 24 int err; 22 25 23 - ls1x_serial_setup(); 26 + ls1x_serial_setup(&ls1x_uart_device); 24 27 25 28 err = platform_add_devices(ls1b_platform_devices, 26 29 ARRAY_SIZE(ls1b_platform_devices));
+8 -7
arch/mips/math-emu/cp1emu.c
··· 171 171 * In the Linux kernel, we support selection of FPR format on the 172 172 * basis of the Status.FR bit. If an FPU is not present, the FR bit 173 173 * is hardwired to zero, which would imply a 32-bit FPU even for 174 - * 64-bit CPUs. For 64-bit kernels with no FPU we use TIF_32BIT_REGS 175 - * as a proxy for the FR bit so that a 64-bit FPU is emulated. In any 176 - * case, for a 32-bit kernel which uses the O32 MIPS ABI, only the 177 - * even FPRs are used (Status.FR = 0). 174 + * 64-bit CPUs so we rather look at TIF_32BIT_REGS. 175 + * FPU emu is slow and bulky and optimizing this function offers fairly 176 + * sizeable benefits so we try to be clever and make this function return 177 + * a constant whenever possible, that is on 64-bit kernels without O32 178 + * compatibility enabled and on 32-bit kernels. 178 179 */ 179 180 static inline int cop1_64bit(struct pt_regs *xcp) 180 181 { 181 - if (cpu_has_fpu) 182 - return xcp->cp0_status & ST0_FR; 183 - #ifdef CONFIG_64BIT 182 + #if defined(CONFIG_64BIT) && !defined(CONFIG_MIPS32_O32) 183 + return 1; 184 + #elif defined(CONFIG_64BIT) && defined(CONFIG_MIPS32_O32) 184 185 return !test_thread_flag(TIF_32BIT_REGS); 185 186 #else 186 187 return 0;
+44 -21
arch/mips/mm/c-octeon.c
··· 5 5 * 6 6 * Copyright (C) 2005-2007 Cavium Networks 7 7 */ 8 + #include <linux/export.h> 8 9 #include <linux/init.h> 9 10 #include <linux/kernel.h> 10 11 #include <linux/sched.h> ··· 29 28 #include <asm/octeon/octeon.h> 30 29 31 30 unsigned long long cache_err_dcache[NR_CPUS]; 31 + EXPORT_SYMBOL_GPL(cache_err_dcache); 32 32 33 33 /** 34 34 * Octeon automatically flushes the dcache on tlb changes, so ··· 286 284 board_cache_error_setup = octeon_cache_error_setup; 287 285 } 288 286 289 - /** 287 + /* 290 288 * Handle a cache error exception 291 289 */ 290 + static RAW_NOTIFIER_HEAD(co_cache_error_chain); 292 291 293 - static void cache_parity_error_octeon(int non_recoverable) 292 + int register_co_cache_error_notifier(struct notifier_block *nb) 294 293 { 295 - unsigned long coreid = cvmx_get_core_num(); 296 - uint64_t icache_err = read_octeon_c0_icacheerr(); 294 + return raw_notifier_chain_register(&co_cache_error_chain, nb); 295 + } 296 + EXPORT_SYMBOL_GPL(register_co_cache_error_notifier); 297 297 298 - pr_err("Cache error exception:\n"); 299 - pr_err("cp0_errorepc == %lx\n", read_c0_errorepc()); 300 - if (icache_err & 1) { 301 - pr_err("CacheErr (Icache) == %llx\n", 302 - (unsigned long long)icache_err); 303 - write_octeon_c0_icacheerr(0); 304 - } 305 - if (cache_err_dcache[coreid] & 1) { 306 - pr_err("CacheErr (Dcache) == %llx\n", 307 - (unsigned long long)cache_err_dcache[coreid]); 308 - cache_err_dcache[coreid] = 0; 309 - } 298 + int unregister_co_cache_error_notifier(struct notifier_block *nb) 299 + { 300 + return raw_notifier_chain_unregister(&co_cache_error_chain, nb); 301 + } 302 + EXPORT_SYMBOL_GPL(unregister_co_cache_error_notifier); 310 303 311 - if (non_recoverable) 312 - panic("Can't handle cache error: nested exception"); 304 + static void co_cache_error_call_notifiers(unsigned long val) 305 + { 306 + int rv = raw_notifier_call_chain(&co_cache_error_chain, val, NULL); 307 + if ((rv & ~NOTIFY_STOP_MASK) != NOTIFY_OK) { 308 + u64 dcache_err; 309 + unsigned long coreid = cvmx_get_core_num(); 310 + u64 icache_err = read_octeon_c0_icacheerr(); 311 + 312 + if (val) { 313 + dcache_err = cache_err_dcache[coreid]; 314 + cache_err_dcache[coreid] = 0; 315 + } else { 316 + dcache_err = read_octeon_c0_dcacheerr(); 317 + } 318 + 319 + pr_err("Core%lu: Cache error exception:\n", coreid); 320 + pr_err("cp0_errorepc == %lx\n", read_c0_errorepc()); 321 + if (icache_err & 1) { 322 + pr_err("CacheErr (Icache) == %llx\n", 323 + (unsigned long long)icache_err); 324 + write_octeon_c0_icacheerr(0); 325 + } 326 + if (dcache_err & 1) { 327 + pr_err("CacheErr (Dcache) == %llx\n", 328 + (unsigned long long)dcache_err); 329 + } 330 + } 313 331 } 314 332 315 - /** 333 + /* 316 334 * Called when the the exception is recoverable 317 335 */ 318 336 319 337 asmlinkage void cache_parity_error_octeon_recoverable(void) 320 338 { 321 - cache_parity_error_octeon(0); 339 + co_cache_error_call_notifiers(0); 322 340 } 323 341 324 342 /** ··· 347 325 348 326 asmlinkage void cache_parity_error_octeon_non_recoverable(void) 349 327 { 350 - cache_parity_error_octeon(1); 328 + co_cache_error_call_notifiers(1); 329 + panic("Can't handle cache error: nested exception"); 351 330 }
+4 -19
arch/mips/mm/c-r4k.c
··· 632 632 if (size >= scache_size) 633 633 r4k_blast_scache(); 634 634 else { 635 - unsigned long lsize = cpu_scache_line_size(); 636 - unsigned long almask = ~(lsize - 1); 637 - 638 635 /* 639 636 * There is no clearly documented alignment requirement 640 637 * for the cache instruction on MIPS processors and ··· 640 643 * hit ops with insufficient alignment. Solved by 641 644 * aligning the address to cache line size. 642 645 */ 643 - cache_op(Hit_Writeback_Inv_SD, addr & almask); 644 - cache_op(Hit_Writeback_Inv_SD, 645 - (addr + size - 1) & almask); 646 646 blast_inv_scache_range(addr, addr + size); 647 647 } 648 648 __sync(); ··· 649 655 if (cpu_has_safe_index_cacheops && size >= dcache_size) { 650 656 r4k_blast_dcache(); 651 657 } else { 652 - unsigned long lsize = cpu_dcache_line_size(); 653 - unsigned long almask = ~(lsize - 1); 654 - 655 658 R4600_HIT_CACHEOP_WAR_IMPL; 656 - cache_op(Hit_Writeback_Inv_D, addr & almask); 657 - cache_op(Hit_Writeback_Inv_D, (addr + size - 1) & almask); 658 659 blast_inv_dcache_range(addr, addr + size); 659 660 } 660 661 ··· 936 947 case CPU_RM7000: 937 948 rm7k_erratum31(); 938 949 939 - case CPU_RM9000: 940 950 icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); 941 951 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 942 952 c->icache.ways = 4; ··· 946 958 c->dcache.ways = 4; 947 959 c->dcache.waybit = __ffs(dcache_size / c->dcache.ways); 948 960 949 - #if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR) 950 961 c->options |= MIPS_CPU_CACHE_CDEX_P; 951 - #endif 952 962 c->options |= MIPS_CPU_PREFETCH; 953 963 break; 954 964 ··· 1231 1245 return; 1232 1246 1233 1247 case CPU_RM7000: 1234 - case CPU_RM9000: 1235 1248 #ifdef CONFIG_RM7000_CPU_SCACHE 1236 1249 rm7k_sc_init(); 1237 1250 #endif ··· 1333 1348 { 1334 1349 get_option(&str, &cca); 1335 1350 1336 - return 1; 1351 + return 0; 1337 1352 } 1338 1353 1339 - __setup("cca=", cca_setup); 1354 + early_param("cca", cca_setup); 1340 1355 1341 1356 static void __cpuinit coherency_setup(void) 1342 1357 { ··· 1386 1401 { 1387 1402 coherentio = 1; 1388 1403 1389 - return 1; 1404 + return 0; 1390 1405 } 1391 1406 1392 - __setup("coherentio", setcoherentio); 1407 + early_param("coherentio", setcoherentio); 1393 1408 #endif 1394 1409 1395 1410 static void __cpuinit r4k_cache_error_setup(void)
+2 -1
arch/mips/mm/highmem.c
··· 1 + #include <linux/compiler.h> 1 2 #include <linux/module.h> 2 3 #include <linux/highmem.h> 3 4 #include <linux/sched.h> ··· 68 67 void __kunmap_atomic(void *kvaddr) 69 68 { 70 69 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 71 - int type; 70 + int type __maybe_unused; 72 71 73 72 if (vaddr < FIXADDR_START) { // FIXME 74 73 pagefault_enable();
-9
arch/mips/mm/page.c
··· 140 140 pref_bias_copy_load = 256; 141 141 break; 142 142 143 - case CPU_RM9000: 144 - /* 145 - * As a workaround for erratum G105 which make the 146 - * PrepareForStore hint unusable we fall back to 147 - * StoreRetained on the RM9000. Once it is known which 148 - * versions of the RM9000 we'll be able to condition- 149 - * alize this. 150 - */ 151 - 152 143 case CPU_R10000: 153 144 case CPU_R12000: 154 145 case CPU_R14000:
+31
arch/mips/mm/pgtable-64.c
··· 11 11 #include <asm/fixmap.h> 12 12 #include <asm/pgtable.h> 13 13 #include <asm/pgalloc.h> 14 + #include <asm/tlbflush.h> 14 15 15 16 void pgd_init(unsigned long page) 16 17 { ··· 61 60 } while (p != end); 62 61 } 63 62 #endif 63 + 64 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE 65 + 66 + void pmdp_splitting_flush(struct vm_area_struct *vma, 67 + unsigned long address, 68 + pmd_t *pmdp) 69 + { 70 + if (!pmd_trans_splitting(*pmdp)) { 71 + pmd_t pmd = pmd_mksplitting(*pmdp); 72 + set_pmd_at(vma->vm_mm, address, pmdp, pmd); 73 + } 74 + } 75 + 76 + #endif 77 + 78 + pmd_t mk_pmd(struct page *page, pgprot_t prot) 79 + { 80 + pmd_t pmd; 81 + 82 + pmd_val(pmd) = (page_to_pfn(page) << _PFN_SHIFT) | pgprot_val(prot); 83 + 84 + return pmd; 85 + } 86 + 87 + void set_pmd_at(struct mm_struct *mm, unsigned long addr, 88 + pmd_t *pmdp, pmd_t pmd) 89 + { 90 + *pmdp = pmd; 91 + flush_tlb_all(); 92 + } 64 93 65 94 void __init pagetable_init(void) 66 95 {
+21 -1
arch/mips/mm/tlb-r4k.c
··· 295 295 pudp = pud_offset(pgdp, address); 296 296 pmdp = pmd_offset(pudp, address); 297 297 idx = read_c0_index(); 298 - #ifdef CONFIG_HUGETLB_PAGE 298 + #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 299 299 /* this could be a huge page */ 300 300 if (pmd_huge(*pmdp)) { 301 301 unsigned long lo; ··· 366 366 local_flush_tlb_all(); 367 367 EXIT_CRITICAL(flags); 368 368 } 369 + 370 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE 371 + 372 + int __init has_transparent_hugepage(void) 373 + { 374 + unsigned int mask; 375 + unsigned long flags; 376 + 377 + ENTER_CRITICAL(flags); 378 + write_c0_pagemask(PM_HUGE_MASK); 379 + back_to_back_c0_hazard(); 380 + mask = read_c0_pagemask(); 381 + write_c0_pagemask(PM_DEFAULT_MASK); 382 + 383 + EXIT_CRITICAL(flags); 384 + 385 + return mask == PM_HUGE_MASK; 386 + } 387 + 388 + #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 369 389 370 390 static int __cpuinitdata ntlb; 371 391 static int __init set_ntlb(char *str)
+75 -48
arch/mips/mm/tlbex.c
··· 158 158 label_smp_pgtable_change, 159 159 label_r3000_write_probe_fail, 160 160 label_large_segbits_fault, 161 - #ifdef CONFIG_HUGETLB_PAGE 161 + #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 162 162 label_tlb_huge_update, 163 163 #endif 164 164 }; ··· 177 177 UASM_L_LA(_smp_pgtable_change) 178 178 UASM_L_LA(_r3000_write_probe_fail) 179 179 UASM_L_LA(_large_segbits_fault) 180 - #ifdef CONFIG_HUGETLB_PAGE 180 + #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 181 181 UASM_L_LA(_tlb_huge_update) 182 182 #endif 183 183 184 184 static int __cpuinitdata hazard_instance; 185 185 186 - static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance) 186 + static void __cpuinit uasm_bgezl_hazard(u32 **p, 187 + struct uasm_reloc **r, 188 + int instance) 187 189 { 188 190 switch (instance) { 189 191 case 0 ... 7: ··· 196 194 } 197 195 } 198 196 199 - static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance) 197 + static void __cpuinit uasm_bgezl_label(struct uasm_label **l, 198 + u32 **p, 199 + int instance) 200 200 { 201 201 switch (instance) { 202 202 case 0 ... 7: ··· 210 206 } 211 207 212 208 /* 213 - * For debug purposes. 209 + * pgtable bits are assigned dynamically depending on processor feature 210 + * and statically based on kernel configuration. This spits out the actual 211 + * values the kernel is using. Required to make sense from disassembled 212 + * TLB exception handlers. 214 213 */ 215 - static inline void dump_handler(const u32 *handler, int count) 214 + static void output_pgtable_bits_defines(void) 215 + { 216 + #define pr_define(fmt, ...) \ 217 + pr_debug("#define " fmt, ##__VA_ARGS__) 218 + 219 + pr_debug("#include <asm/asm.h>\n"); 220 + pr_debug("#include <asm/regdef.h>\n"); 221 + pr_debug("\n"); 222 + 223 + pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT); 224 + pr_define("_PAGE_READ_SHIFT %d\n", _PAGE_READ_SHIFT); 225 + pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT); 226 + pr_define("_PAGE_ACCESSED_SHIFT %d\n", _PAGE_ACCESSED_SHIFT); 227 + pr_define("_PAGE_MODIFIED_SHIFT %d\n", _PAGE_MODIFIED_SHIFT); 228 + #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 229 + pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT); 230 + pr_define("_PAGE_SPLITTING_SHIFT %d\n", _PAGE_SPLITTING_SHIFT); 231 + #endif 232 + if (cpu_has_rixi) { 233 + #ifdef _PAGE_NO_EXEC_SHIFT 234 + pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT); 235 + #endif 236 + #ifdef _PAGE_NO_READ_SHIFT 237 + pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT); 238 + #endif 239 + } 240 + pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT); 241 + pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT); 242 + pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT); 243 + pr_define("_PFN_SHIFT %d\n", _PFN_SHIFT); 244 + pr_debug("\n"); 245 + } 246 + 247 + static inline void dump_handler(const char *symbol, const u32 *handler, int count) 216 248 { 217 249 int i; 250 + 251 + pr_debug("LEAF(%s)\n", symbol); 218 252 219 253 pr_debug("\t.set push\n"); 220 254 pr_debug("\t.set noreorder\n"); 221 255 222 256 for (i = 0; i < count; i++) 223 - pr_debug("\t%p\t.word 0x%08x\n", &handler[i], handler[i]); 257 + pr_debug("\t.word\t0x%08x\t\t# %p\n", handler[i], &handler[i]); 224 258 225 - pr_debug("\t.set pop\n"); 259 + pr_debug("\t.set\tpop\n"); 260 + 261 + pr_debug("\tEND(%s)\n", symbol); 226 262 } 227 263 228 264 /* The only general purpose registers allowed in TLB handlers. */ ··· 445 401 446 402 memcpy((void *)ebase, tlb_handler, 0x80); 447 403 448 - dump_handler((u32 *)ebase, 32); 404 + dump_handler("r3000_tlb_refill", (u32 *)ebase, 32); 449 405 } 450 406 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */ 451 407 ··· 487 443 case CPU_R4600: 488 444 case CPU_R4700: 489 445 case CPU_R5000: 490 - case CPU_R5000A: 491 446 case CPU_NEVADA: 492 447 uasm_i_nop(p); 493 448 uasm_i_tlbp(p); ··· 560 517 break; 561 518 562 519 case CPU_R5000: 563 - case CPU_R5000A: 564 520 case CPU_NEVADA: 565 521 uasm_i_nop(p); /* QED specifies 2 nops hazard */ 566 522 uasm_i_nop(p); /* QED specifies 2 nops hazard */ ··· 605 563 uasm_i_nop(p); 606 564 uasm_i_nop(p); 607 565 tlbw(p); 608 - break; 609 - 610 - case CPU_RM9000: 611 - /* 612 - * When the JTLB is updated by tlbwi or tlbwr, a subsequent 613 - * use of the JTLB for instructions should not occur for 4 614 - * cpu cycles and use for data translations should not occur 615 - * for 3 cpu cycles. 616 - */ 617 - uasm_i_ssnop(p); 618 - uasm_i_ssnop(p); 619 - uasm_i_ssnop(p); 620 - uasm_i_ssnop(p); 621 - tlbw(p); 622 - uasm_i_ssnop(p); 623 - uasm_i_ssnop(p); 624 - uasm_i_ssnop(p); 625 - uasm_i_ssnop(p); 626 566 break; 627 567 628 568 case CPU_VR4111: ··· 653 629 } 654 630 } 655 631 656 - #ifdef CONFIG_HUGETLB_PAGE 632 + #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 657 633 658 634 static __cpuinit void build_restore_pagemask(u32 **p, 659 635 struct uasm_reloc **r, ··· 779 755 build_huge_update_entries(p, pte, ptr); 780 756 build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0); 781 757 } 782 - #endif /* CONFIG_HUGETLB_PAGE */ 758 + #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ 783 759 784 760 #ifdef CONFIG_64BIT 785 761 /* ··· 1224 1200 /* Adjust the context during the load latency. */ 1225 1201 build_adjust_context(p, tmp); 1226 1202 1227 - #ifdef CONFIG_HUGETLB_PAGE 1203 + #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1228 1204 uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update); 1229 1205 /* 1230 1206 * The in the LWX case we don't want to do the load in the ··· 1233 1209 */ 1234 1210 if (use_lwx_insns()) 1235 1211 uasm_i_nop(p); 1236 - #endif /* CONFIG_HUGETLB_PAGE */ 1212 + #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ 1237 1213 1238 1214 1239 1215 /* build_update_entries */ ··· 1336 1312 build_get_pgde32(&p, K0, K1); /* get pgd in K1 */ 1337 1313 #endif 1338 1314 1339 - #ifdef CONFIG_HUGETLB_PAGE 1315 + #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1340 1316 build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update); 1341 1317 #endif 1342 1318 ··· 1346 1322 uasm_l_leave(&l, p); 1347 1323 uasm_i_eret(&p); /* return from trap */ 1348 1324 } 1349 - #ifdef CONFIG_HUGETLB_PAGE 1325 + #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1350 1326 uasm_l_tlb_huge_update(&l, p); 1351 1327 build_huge_update_entries(&p, htlb_info.huge_pte, K1); 1352 1328 build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random, ··· 1391 1367 uasm_copy_handler(relocs, labels, tlb_handler, p, f); 1392 1368 final_len = p - tlb_handler; 1393 1369 } else { 1394 - #if defined(CONFIG_HUGETLB_PAGE) 1370 + #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1395 1371 const enum label_id ls = label_tlb_huge_update; 1396 1372 #else 1397 1373 const enum label_id ls = label_vmalloc; ··· 1460 1436 1461 1437 memcpy((void *)ebase, final_handler, 0x100); 1462 1438 1463 - dump_handler((u32 *)ebase, 64); 1439 + dump_handler("r4000_tlb_refill", (u32 *)ebase, 64); 1464 1440 } 1465 1441 1466 1442 /* ··· 1517 1493 pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n", 1518 1494 (unsigned int)(p - tlbmiss_handler_setup_pgd)); 1519 1495 1520 - dump_handler(tlbmiss_handler_setup_pgd, 1496 + dump_handler("tlbmiss_handler", 1497 + tlbmiss_handler_setup_pgd, 1521 1498 ARRAY_SIZE(tlbmiss_handler_setup_pgd)); 1522 1499 } 1523 1500 #endif ··· 1788 1763 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", 1789 1764 (unsigned int)(p - handle_tlbl)); 1790 1765 1791 - dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl)); 1766 + dump_handler("r3000_tlb_load", handle_tlbl, ARRAY_SIZE(handle_tlbl)); 1792 1767 } 1793 1768 1794 1769 static void __cpuinit build_r3000_tlb_store_handler(void) ··· 1818 1793 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", 1819 1794 (unsigned int)(p - handle_tlbs)); 1820 1795 1821 - dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs)); 1796 + dump_handler("r3000_tlb_store", handle_tlbs, ARRAY_SIZE(handle_tlbs)); 1822 1797 } 1823 1798 1824 1799 static void __cpuinit build_r3000_tlb_modify_handler(void) ··· 1848 1823 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", 1849 1824 (unsigned int)(p - handle_tlbm)); 1850 1825 1851 - dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm)); 1826 + dump_handler("r3000_tlb_modify", handle_tlbm, ARRAY_SIZE(handle_tlbm)); 1852 1827 } 1853 1828 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */ 1854 1829 ··· 1867 1842 build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */ 1868 1843 #endif 1869 1844 1870 - #ifdef CONFIG_HUGETLB_PAGE 1845 + #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1871 1846 /* 1872 1847 * For huge tlb entries, pmd doesn't contain an address but 1873 1848 * instead contains the tlb pte. Check the PAGE_HUGE bit and ··· 1983 1958 build_make_valid(&p, &r, wr.r1, wr.r2); 1984 1959 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); 1985 1960 1986 - #ifdef CONFIG_HUGETLB_PAGE 1961 + #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1987 1962 /* 1988 1963 * This is the entry point when build_r4000_tlbchange_handler_head 1989 1964 * spots a huge page. ··· 2055 2030 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", 2056 2031 (unsigned int)(p - handle_tlbl)); 2057 2032 2058 - dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl)); 2033 + dump_handler("r4000_tlb_load", handle_tlbl, ARRAY_SIZE(handle_tlbl)); 2059 2034 } 2060 2035 2061 2036 static void __cpuinit build_r4000_tlb_store_handler(void) ··· 2076 2051 build_make_write(&p, &r, wr.r1, wr.r2); 2077 2052 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); 2078 2053 2079 - #ifdef CONFIG_HUGETLB_PAGE 2054 + #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 2080 2055 /* 2081 2056 * This is the entry point when 2082 2057 * build_r4000_tlbchange_handler_head spots a huge page. ··· 2102 2077 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", 2103 2078 (unsigned int)(p - handle_tlbs)); 2104 2079 2105 - dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs)); 2080 + dump_handler("r4000_tlb_store", handle_tlbs, ARRAY_SIZE(handle_tlbs)); 2106 2081 } 2107 2082 2108 2083 static void __cpuinit build_r4000_tlb_modify_handler(void) ··· 2124 2099 build_make_write(&p, &r, wr.r1, wr.r2); 2125 2100 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); 2126 2101 2127 - #ifdef CONFIG_HUGETLB_PAGE 2102 + #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 2128 2103 /* 2129 2104 * This is the entry point when 2130 2105 * build_r4000_tlbchange_handler_head spots a huge page. ··· 2150 2125 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", 2151 2126 (unsigned int)(p - handle_tlbm)); 2152 2127 2153 - dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm)); 2128 + dump_handler("r4000_tlb_modify", handle_tlbm, ARRAY_SIZE(handle_tlbm)); 2154 2129 } 2155 2130 2156 2131 void __cpuinit build_tlb_refill_handler(void) ··· 2161 2136 * needed once. 2162 2137 */ 2163 2138 static int run_once = 0; 2139 + 2140 + output_pgtable_bits_defines(); 2164 2141 2165 2142 #ifdef CONFIG_64BIT 2166 2143 check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
+28
arch/mips/netlogic/Kconfig
··· 9 9 This DTB will be used if the firmware does not pass in a DTB 10 10 pointer to the kernel. The corresponding DTS file is at 11 11 arch/mips/netlogic/dts/xlp_evp.dts 12 + 13 + config NLM_MULTINODE 14 + bool "Support for multi-chip boards" 15 + depends on NLM_XLP_BOARD 16 + default n 17 + help 18 + Add support for boards with 2 or 4 XLPs connected over ICI. 19 + 20 + if NLM_MULTINODE 21 + choice 22 + prompt "Number of XLPs on the board" 23 + default NLM_MULTINODE_2 24 + help 25 + In the multi-node case, specify the number of SoCs on the board. 26 + 27 + config NLM_MULTINODE_2 28 + bool "Dual-XLP board" 29 + help 30 + Support boards with upto two XLPs connected over ICI. 31 + 32 + config NLM_MULTINODE_4 33 + bool "Quad-XLP board" 34 + help 35 + Support boards with upto four XLPs connected over ICI. 36 + 37 + endchoice 38 + 39 + endif 12 40 endif 13 41 14 42 config NLM_COMMON
+105 -60
arch/mips/netlogic/common/irq.c
··· 36 36 #include <linux/init.h> 37 37 #include <linux/linkage.h> 38 38 #include <linux/interrupt.h> 39 - #include <linux/spinlock.h> 40 39 #include <linux/mm.h> 41 40 #include <linux/slab.h> 42 41 #include <linux/irq.h> ··· 58 59 #elif defined(CONFIG_CPU_XLR) 59 60 #include <asm/netlogic/xlr/iomap.h> 60 61 #include <asm/netlogic/xlr/pic.h> 62 + #include <asm/netlogic/xlr/fmn.h> 61 63 #else 62 64 #error "Unknown CPU" 63 65 #endif 64 - /* 65 - * These are the routines that handle all the low level interrupt stuff. 66 - * Actions handled here are: initialization of the interrupt map, requesting of 67 - * interrupt lines by handlers, dispatching if interrupts to handlers, probing 68 - * for interrupt lines 69 - */ 70 66 71 - /* Globals */ 72 - static uint64_t nlm_irq_mask; 73 - static DEFINE_SPINLOCK(nlm_pic_lock); 67 + #ifdef CONFIG_SMP 68 + #define SMP_IRQ_MASK ((1ULL << IRQ_IPI_SMP_FUNCTION) | \ 69 + (1ULL << IRQ_IPI_SMP_RESCHEDULE)) 70 + #else 71 + #define SMP_IRQ_MASK 0 72 + #endif 73 + #define PERCPU_IRQ_MASK (SMP_IRQ_MASK | (1ull << IRQ_TIMER) | \ 74 + (1ull << IRQ_FMN)) 75 + 76 + struct nlm_pic_irq { 77 + void (*extra_ack)(struct irq_data *); 78 + struct nlm_soc_info *node; 79 + int picirq; 80 + int irt; 81 + int flags; 82 + }; 74 83 75 84 static void xlp_pic_enable(struct irq_data *d) 76 85 { 77 86 unsigned long flags; 78 - int irt; 87 + struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d); 79 88 80 - irt = nlm_irq_to_irt(d->irq); 81 - if (irt == -1) 82 - return; 83 - spin_lock_irqsave(&nlm_pic_lock, flags); 84 - nlm_pic_enable_irt(nlm_pic_base, irt); 85 - spin_unlock_irqrestore(&nlm_pic_lock, flags); 89 + BUG_ON(!pd); 90 + spin_lock_irqsave(&pd->node->piclock, flags); 91 + nlm_pic_enable_irt(pd->node->picbase, pd->irt); 92 + spin_unlock_irqrestore(&pd->node->piclock, flags); 86 93 } 87 94 88 95 static void xlp_pic_disable(struct irq_data *d) 89 96 { 97 + struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d); 90 98 unsigned long flags; 91 - int irt; 92 99 93 - irt = nlm_irq_to_irt(d->irq); 94 - if (irt == -1) 95 - return; 96 - spin_lock_irqsave(&nlm_pic_lock, flags); 97 - nlm_pic_disable_irt(nlm_pic_base, irt); 98 - spin_unlock_irqrestore(&nlm_pic_lock, flags); 100 + BUG_ON(!pd); 101 + spin_lock_irqsave(&pd->node->piclock, flags); 102 + nlm_pic_disable_irt(pd->node->picbase, pd->irt); 103 + spin_unlock_irqrestore(&pd->node->piclock, flags); 99 104 } 100 105 101 106 static void xlp_pic_mask_ack(struct irq_data *d) 102 107 { 103 - uint64_t mask = 1ull << d->irq; 108 + struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d); 109 + uint64_t mask = 1ull << pd->picirq; 104 110 105 111 write_c0_eirr(mask); /* ack by writing EIRR */ 106 112 } 107 113 108 114 static void xlp_pic_unmask(struct irq_data *d) 109 115 { 110 - void *hd = irq_data_get_irq_handler_data(d); 111 - int irt; 116 + struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d); 112 117 113 - irt = nlm_irq_to_irt(d->irq); 114 - if (irt == -1) 118 + if (!pd) 115 119 return; 116 120 117 - if (hd) { 118 - void (*extra_ack)(void *) = hd; 119 - extra_ack(d); 120 - } 121 + if (pd->extra_ack) 122 + pd->extra_ack(d); 123 + 121 124 /* Ack is a single write, no need to lock */ 122 - nlm_pic_ack(nlm_pic_base, irt); 125 + nlm_pic_ack(pd->node->picbase, pd->irt); 123 126 } 124 127 125 128 static struct irq_chip xlp_pic = { ··· 175 174 .irq_eoi = cpuintr_ack, 176 175 }; 177 176 178 - void __init init_nlm_common_irqs(void) 177 + static void __init nlm_init_percpu_irqs(void) 179 178 { 180 - int i, irq, irt; 179 + int i; 181 180 182 181 for (i = 0; i < PIC_IRT_FIRST_IRQ; i++) 183 182 irq_set_chip_and_handler(i, &nlm_cpu_intr, handle_percpu_irq); 184 - 185 - for (i = PIC_IRT_FIRST_IRQ; i <= PIC_IRT_LAST_IRQ ; i++) 186 - irq_set_chip_and_handler(i, &xlp_pic, handle_level_irq); 187 - 188 183 #ifdef CONFIG_SMP 189 184 irq_set_chip_and_handler(IRQ_IPI_SMP_FUNCTION, &nlm_cpu_intr, 190 185 nlm_smp_function_ipi_handler); 191 186 irq_set_chip_and_handler(IRQ_IPI_SMP_RESCHEDULE, &nlm_cpu_intr, 192 187 nlm_smp_resched_ipi_handler); 193 - nlm_irq_mask |= 194 - ((1ULL << IRQ_IPI_SMP_FUNCTION) | (1ULL << IRQ_IPI_SMP_RESCHEDULE)); 195 188 #endif 189 + } 196 190 197 - for (irq = PIC_IRT_FIRST_IRQ; irq <= PIC_IRT_LAST_IRQ; irq++) { 198 - irt = nlm_irq_to_irt(irq); 191 + void nlm_setup_pic_irq(int node, int picirq, int irq, int irt) 192 + { 193 + struct nlm_pic_irq *pic_data; 194 + int xirq; 195 + 196 + xirq = nlm_irq_to_xirq(node, irq); 197 + pic_data = kzalloc(sizeof(*pic_data), GFP_KERNEL); 198 + BUG_ON(pic_data == NULL); 199 + pic_data->irt = irt; 200 + pic_data->picirq = picirq; 201 + pic_data->node = nlm_get_node(node); 202 + irq_set_chip_and_handler(xirq, &xlp_pic, handle_level_irq); 203 + irq_set_handler_data(xirq, pic_data); 204 + } 205 + 206 + void nlm_set_pic_extra_ack(int node, int irq, void (*xack)(struct irq_data *)) 207 + { 208 + struct nlm_pic_irq *pic_data; 209 + int xirq; 210 + 211 + xirq = nlm_irq_to_xirq(node, irq); 212 + pic_data = irq_get_handler_data(xirq); 213 + pic_data->extra_ack = xack; 214 + } 215 + 216 + static void nlm_init_node_irqs(int node) 217 + { 218 + int i, irt; 219 + uint64_t irqmask; 220 + struct nlm_soc_info *nodep; 221 + 222 + pr_info("Init IRQ for node %d\n", node); 223 + nodep = nlm_get_node(node); 224 + irqmask = PERCPU_IRQ_MASK; 225 + for (i = PIC_IRT_FIRST_IRQ; i <= PIC_IRT_LAST_IRQ; i++) { 226 + irt = nlm_irq_to_irt(i); 199 227 if (irt == -1) 200 228 continue; 201 - nlm_irq_mask |= (1ULL << irq); 202 - nlm_pic_init_irt(nlm_pic_base, irt, irq, 0); 229 + nlm_setup_pic_irq(node, i, i, irt); 230 + /* set interrupts to first cpu in node */ 231 + nlm_pic_init_irt(nodep->picbase, irt, i, 232 + node * NLM_CPUS_PER_NODE); 233 + irqmask |= (1ull << i); 203 234 } 204 - 205 - nlm_irq_mask |= (1ULL << IRQ_TIMER); 235 + nodep->irqmask = irqmask; 206 236 } 207 237 208 238 void __init arch_init_irq(void) 209 239 { 210 240 /* Initialize the irq descriptors */ 211 - init_nlm_common_irqs(); 212 - 213 - write_c0_eimr(nlm_irq_mask); 241 + nlm_init_percpu_irqs(); 242 + nlm_init_node_irqs(0); 243 + write_c0_eimr(nlm_current_node()->irqmask); 244 + #if defined(CONFIG_CPU_XLR) 245 + nlm_setup_fmn_irq(); 246 + #endif 214 247 } 215 248 216 - void __cpuinit nlm_smp_irq_init(void) 249 + void nlm_smp_irq_init(int hwcpuid) 217 250 { 218 - /* set interrupt mask for non-zero cpus */ 219 - write_c0_eimr(nlm_irq_mask); 251 + int node, cpu; 252 + 253 + node = hwcpuid / NLM_CPUS_PER_NODE; 254 + cpu = hwcpuid % NLM_CPUS_PER_NODE; 255 + 256 + if (cpu == 0 && node != 0) 257 + nlm_init_node_irqs(node); 258 + write_c0_eimr(nlm_current_node()->irqmask); 220 259 } 221 260 222 261 asmlinkage void plat_irq_dispatch(void) 223 262 { 224 263 uint64_t eirr; 225 - int i; 264 + int i, node; 226 265 266 + node = nlm_nodeid(); 227 267 eirr = read_c0_eirr() & read_c0_eimr(); 228 - if (eirr & (1 << IRQ_TIMER)) { 229 - do_IRQ(IRQ_TIMER); 230 - return; 231 - } 232 268 233 269 i = __ilog2_u64(eirr); 234 270 if (i == -1) 235 271 return; 236 272 237 - do_IRQ(i); 273 + /* per-CPU IRQs don't need translation */ 274 + if (eirr & PERCPU_IRQ_MASK) { 275 + do_IRQ(i); 276 + return; 277 + } 278 + 279 + /* top level irq handling */ 280 + do_IRQ(nlm_irq_to_xirq(node, i)); 238 281 }
+51 -38
arch/mips/netlogic/common/smp.c
··· 59 59 60 60 void nlm_send_ipi_single(int logical_cpu, unsigned int action) 61 61 { 62 - int cpu = cpu_logical_map(logical_cpu); 62 + int cpu, node; 63 + uint64_t picbase; 64 + 65 + cpu = cpu_logical_map(logical_cpu); 66 + node = cpu / NLM_CPUS_PER_NODE; 67 + picbase = nlm_get_node(node)->picbase; 63 68 64 69 if (action & SMP_CALL_FUNCTION) 65 - nlm_pic_send_ipi(nlm_pic_base, cpu, IRQ_IPI_SMP_FUNCTION, 0); 70 + nlm_pic_send_ipi(picbase, cpu, IRQ_IPI_SMP_FUNCTION, 0); 66 71 if (action & SMP_RESCHEDULE_YOURSELF) 67 - nlm_pic_send_ipi(nlm_pic_base, cpu, IRQ_IPI_SMP_RESCHEDULE, 0); 72 + nlm_pic_send_ipi(picbase, cpu, IRQ_IPI_SMP_RESCHEDULE, 0); 68 73 } 69 74 70 75 void nlm_send_ipi_mask(const struct cpumask *mask, unsigned int action) ··· 101 96 void nlm_early_init_secondary(int cpu) 102 97 { 103 98 change_c0_config(CONF_CM_CMASK, 0x3); 104 - write_c0_ebase((uint32_t)nlm_common_ebase); 105 99 #ifdef CONFIG_CPU_XLP 106 - if (hard_smp_processor_id() % 4 == 0) 100 + /* mmu init, once per core */ 101 + if (cpu % NLM_THREADS_PER_CORE == 0) 107 102 xlp_mmu_init(); 108 103 #endif 104 + write_c0_ebase(nlm_current_node()->ebase); 109 105 } 110 106 111 107 /* ··· 114 108 */ 115 109 static void __cpuinit nlm_init_secondary(void) 116 110 { 117 - current_cpu_data.core = hard_smp_processor_id() / 4; 118 - nlm_smp_irq_init(); 111 + int hwtid; 112 + 113 + hwtid = hard_smp_processor_id(); 114 + current_cpu_data.core = hwtid / NLM_THREADS_PER_CORE; 115 + nlm_percpu_init(hwtid); 116 + nlm_smp_irq_init(hwtid); 119 117 } 120 118 121 119 void nlm_prepare_cpus(unsigned int max_cpus) ··· 130 120 131 121 void nlm_smp_finish(void) 132 122 { 133 - #ifdef notyet 134 - nlm_common_msgring_cpu_init(); 135 - #endif 136 123 local_irq_enable(); 137 124 } 138 125 ··· 149 142 150 143 void nlm_boot_secondary(int logical_cpu, struct task_struct *idle) 151 144 { 152 - unsigned long gp = (unsigned long)task_thread_info(idle); 153 - unsigned long sp = (unsigned long)__KSTK_TOS(idle); 154 - int cpu = cpu_logical_map(logical_cpu); 145 + int cpu, node; 155 146 156 - nlm_next_sp = sp; 157 - nlm_next_gp = gp; 147 + cpu = cpu_logical_map(logical_cpu); 148 + node = cpu / NLM_CPUS_PER_NODE; 149 + nlm_next_sp = (unsigned long)__KSTK_TOS(idle); 150 + nlm_next_gp = (unsigned long)task_thread_info(idle); 158 151 159 - /* barrier */ 152 + /* barrier for sp/gp store above */ 160 153 __sync(); 161 - nlm_pic_send_ipi(nlm_pic_base, cpu, 1, 1); 154 + nlm_pic_send_ipi(nlm_get_node(node)->picbase, cpu, 1, 1); /* NMI */ 162 155 } 163 156 164 157 void __init nlm_smp_setup(void) 165 158 { 166 159 unsigned int boot_cpu; 167 - int num_cpus, i; 160 + int num_cpus, i, ncore; 168 161 169 162 boot_cpu = hard_smp_processor_id(); 170 - cpus_clear(phys_cpu_present_map); 163 + cpumask_clear(&phys_cpu_present_map); 171 164 172 - cpu_set(boot_cpu, phys_cpu_present_map); 165 + cpumask_set_cpu(boot_cpu, &phys_cpu_present_map); 173 166 __cpu_number_map[boot_cpu] = 0; 174 167 __cpu_logical_map[0] = boot_cpu; 175 168 set_cpu_possible(0, true); ··· 181 174 * it is only set for ASPs (see smpboot.S) 182 175 */ 183 176 if (nlm_cpu_ready[i]) { 184 - cpu_set(i, phys_cpu_present_map); 177 + cpumask_set_cpu(i, &phys_cpu_present_map); 185 178 __cpu_number_map[i] = num_cpus; 186 179 __cpu_logical_map[num_cpus] = i; 187 180 set_cpu_possible(num_cpus, true); ··· 189 182 } 190 183 } 191 184 185 + /* check with the cores we have worken up */ 186 + for (ncore = 0, i = 0; i < NLM_NR_NODES; i++) 187 + ncore += hweight32(nlm_get_node(i)->coremask); 188 + 192 189 pr_info("Phys CPU present map: %lx, possible map %lx\n", 193 - (unsigned long)phys_cpu_present_map.bits[0], 190 + (unsigned long)cpumask_bits(&phys_cpu_present_map)[0], 194 191 (unsigned long)cpumask_bits(cpu_possible_mask)[0]); 195 192 196 - pr_info("Detected %i Slave CPU(s)\n", num_cpus); 193 + pr_info("Detected (%dc%dt) %d Slave CPU(s)\n", ncore, 194 + nlm_threads_per_core, num_cpus); 197 195 nlm_set_nmi_handler(nlm_boot_secondary_cpus); 198 196 } 199 197 200 - static int nlm_parse_cpumask(u32 cpu_mask) 198 + static int nlm_parse_cpumask(cpumask_t *wakeup_mask) 201 199 { 202 200 uint32_t core0_thr_mask, core_thr_mask; 203 - int threadmode, i; 201 + int threadmode, i, j; 204 202 205 - core0_thr_mask = cpu_mask & 0xf; 203 + core0_thr_mask = 0; 204 + for (i = 0; i < NLM_THREADS_PER_CORE; i++) 205 + if (cpumask_test_cpu(i, wakeup_mask)) 206 + core0_thr_mask |= (1 << i); 206 207 switch (core0_thr_mask) { 207 208 case 1: 208 209 nlm_threads_per_core = 1; ··· 229 214 } 230 215 231 216 /* Verify other cores CPU masks */ 232 - nlm_coremask = 1; 233 - nlm_cpumask = core0_thr_mask; 234 - for (i = 1; i < 8; i++) { 235 - core_thr_mask = (cpu_mask >> (i * 4)) & 0xf; 236 - if (core_thr_mask) { 237 - if (core_thr_mask != core0_thr_mask) 217 + for (i = 0; i < NR_CPUS; i += NLM_THREADS_PER_CORE) { 218 + core_thr_mask = 0; 219 + for (j = 0; j < NLM_THREADS_PER_CORE; j++) 220 + if (cpumask_test_cpu(i + j, wakeup_mask)) 221 + core_thr_mask |= (1 << j); 222 + if (core_thr_mask != 0 && core_thr_mask != core0_thr_mask) 238 223 goto unsupp; 239 - nlm_coremask |= 1 << i; 240 - nlm_cpumask |= core0_thr_mask << (4 * i); 241 - } 242 224 } 243 225 return threadmode; 244 226 245 227 unsupp: 246 - panic("Unsupported CPU mask %x\n", cpu_mask); 228 + panic("Unsupported CPU mask %lx\n", 229 + (unsigned long)cpumask_bits(wakeup_mask)[0]); 247 230 return 0; 248 231 } 249 232 250 - int __cpuinit nlm_wakeup_secondary_cpus(u32 wakeup_mask) 233 + int __cpuinit nlm_wakeup_secondary_cpus(void) 251 234 { 252 235 unsigned long reset_vec; 253 236 char *reset_data; ··· 257 244 (nlm_reset_entry_end - nlm_reset_entry)); 258 245 259 246 /* verify the mask and setup core config variables */ 260 - threadmode = nlm_parse_cpumask(wakeup_mask); 247 + threadmode = nlm_parse_cpumask(&nlm_cpumask); 261 248 262 249 /* Setup CPU init parameters */ 263 250 reset_data = (char *)CKSEG1ADDR(RESET_DATA_PHYS);
+4 -2
arch/mips/netlogic/common/smpboot.S
··· 61 61 li t0, LSU_DEFEATURE 62 62 mfcr t1, t0 63 63 64 - lui t2, 0x4080 /* Enable Unaligned Access, L2HPE */ 64 + lui t2, 0xc080 /* SUE, Enable Unaligned Access, L2HPE */ 65 65 or t1, t1, t2 66 66 #ifdef XLP_AX_WORKAROUND 67 67 li t2, ~0xe /* S1RCM */ ··· 186 186 * jump to the secondary wait function. 187 187 */ 188 188 mfc0 v0, CP0_EBASE, 1 189 - andi v0, 0x7f /* v0 <- node/core */ 189 + andi v0, 0x3ff /* v0 <- node/core */ 190 190 191 191 /* Init MMU in the first thread after changing THREAD_MODE 192 192 * register (Ax Errata?) ··· 263 263 PTR_L gp, 0(t1) 264 264 265 265 /* a0 has the processor id */ 266 + mfc0 a0, CP0_EBASE, 1 267 + andi a0, 0x3ff /* a0 <- node/core */ 266 268 PTR_LA t0, nlm_early_init_secondary 267 269 jalr t0 268 270 nop
+15 -52
arch/mips/netlogic/xlp/nlm_hal.c
··· 40 40 #include <asm/mipsregs.h> 41 41 #include <asm/time.h> 42 42 43 + #include <asm/netlogic/common.h> 43 44 #include <asm/netlogic/haldefs.h> 44 45 #include <asm/netlogic/xlp-hal/iomap.h> 45 46 #include <asm/netlogic/xlp-hal/xlp.h> 46 47 #include <asm/netlogic/xlp-hal/pic.h> 47 48 #include <asm/netlogic/xlp-hal/sys.h> 48 49 49 - /* These addresses are computed by the nlm_hal_init() */ 50 - uint64_t nlm_io_base; 51 - uint64_t nlm_sys_base; 52 - uint64_t nlm_pic_base; 53 - 54 50 /* Main initialization */ 55 - void nlm_hal_init(void) 51 + void nlm_node_init(int node) 56 52 { 57 - nlm_io_base = CKSEG1ADDR(XLP_DEFAULT_IO_BASE); 58 - nlm_sys_base = nlm_get_sys_regbase(0); /* node 0 */ 59 - nlm_pic_base = nlm_get_pic_regbase(0); /* node 0 */ 53 + struct nlm_soc_info *nodep; 54 + 55 + nodep = nlm_get_node(node); 56 + nodep->sysbase = nlm_get_sys_regbase(node); 57 + nodep->picbase = nlm_get_pic_regbase(node); 58 + nodep->ebase = read_c0_ebase() & (~((1 << 12) - 1)); 59 + spin_lock_init(&nodep->piclock); 60 60 } 61 61 62 62 int nlm_irq_to_irt(int irq) ··· 100 100 } 101 101 } 102 102 103 - int nlm_irt_to_irq(int irt) 104 - { 105 - switch (irt) { 106 - case PIC_IRT_UART_0_INDEX: 107 - return PIC_UART_0_IRQ; 108 - case PIC_IRT_UART_1_INDEX: 109 - return PIC_UART_1_IRQ; 110 - case PIC_IRT_PCIE_LINK_0_INDEX: 111 - return PIC_PCIE_LINK_0_IRQ; 112 - case PIC_IRT_PCIE_LINK_1_INDEX: 113 - return PIC_PCIE_LINK_1_IRQ; 114 - case PIC_IRT_PCIE_LINK_2_INDEX: 115 - return PIC_PCIE_LINK_2_IRQ; 116 - case PIC_IRT_PCIE_LINK_3_INDEX: 117 - return PIC_PCIE_LINK_3_IRQ; 118 - case PIC_IRT_EHCI_0_INDEX: 119 - return PIC_EHCI_0_IRQ; 120 - case PIC_IRT_EHCI_1_INDEX: 121 - return PIC_EHCI_1_IRQ; 122 - case PIC_IRT_OHCI_0_INDEX: 123 - return PIC_OHCI_0_IRQ; 124 - case PIC_IRT_OHCI_1_INDEX: 125 - return PIC_OHCI_1_IRQ; 126 - case PIC_IRT_OHCI_2_INDEX: 127 - return PIC_OHCI_2_IRQ; 128 - case PIC_IRT_OHCI_3_INDEX: 129 - return PIC_OHCI_3_IRQ; 130 - case PIC_IRT_MMC_INDEX: 131 - return PIC_MMC_IRQ; 132 - case PIC_IRT_I2C_0_INDEX: 133 - return PIC_I2C_0_IRQ; 134 - case PIC_IRT_I2C_1_INDEX: 135 - return PIC_I2C_1_IRQ; 136 - default: 137 - return -1; 138 - } 139 - } 140 - 141 - unsigned int nlm_get_core_frequency(int core) 103 + unsigned int nlm_get_core_frequency(int node, int core) 142 104 { 143 105 unsigned int pll_divf, pll_divr, dfs_div, ext_div; 144 106 unsigned int rstval, dfsval, denom; 145 - uint64_t num; 107 + uint64_t num, sysbase; 146 108 147 - rstval = nlm_read_sys_reg(nlm_sys_base, SYS_POWER_ON_RESET_CFG); 148 - dfsval = nlm_read_sys_reg(nlm_sys_base, SYS_CORE_DFS_DIV_VALUE); 109 + sysbase = nlm_get_node(node)->sysbase; 110 + rstval = nlm_read_sys_reg(sysbase, SYS_POWER_ON_RESET_CFG); 111 + dfsval = nlm_read_sys_reg(sysbase, SYS_CORE_DFS_DIV_VALUE); 149 112 pll_divf = ((rstval >> 10) & 0x7f) + 1; 150 113 pll_divr = ((rstval >> 8) & 0x3) + 1; 151 114 ext_div = ((rstval >> 30) & 0x3) + 1; ··· 122 159 123 160 unsigned int nlm_get_cpu_frequency(void) 124 161 { 125 - return nlm_get_core_frequency(0); 162 + return nlm_get_core_frequency(0, 0); 126 163 }
+28 -22
arch/mips/netlogic/xlp/setup.c
··· 52 52 #include <asm/netlogic/xlp-hal/xlp.h> 53 53 #include <asm/netlogic/xlp-hal/sys.h> 54 54 55 - unsigned long nlm_common_ebase = 0x0; 56 - 57 - /* default to uniprocessor */ 58 - uint32_t nlm_coremask = 1, nlm_cpumask = 1; 59 - int nlm_threads_per_core = 1; 55 + uint64_t nlm_io_base; 56 + struct nlm_soc_info nlm_nodes[NLM_NR_NODES]; 57 + cpumask_t nlm_cpumask = CPU_MASK_CPU0; 58 + unsigned int nlm_threads_per_core; 60 59 extern u32 __dtb_start[]; 61 60 62 61 static void nlm_linux_exit(void) 63 62 { 64 - nlm_write_sys_reg(nlm_sys_base, SYS_CHIP_RESET, 1); 63 + uint64_t sysbase = nlm_get_node(0)->sysbase; 64 + 65 + nlm_write_sys_reg(sysbase, SYS_CHIP_RESET, 1); 65 66 for ( ; ; ) 66 67 cpu_wait(); 67 68 } 68 69 69 70 void __init plat_mem_setup(void) 70 71 { 72 + void *fdtp; 73 + 71 74 panic_timeout = 5; 72 75 _machine_restart = (void (*)(char *))nlm_linux_exit; 73 76 _machine_halt = nlm_linux_exit; 74 77 pm_power_off = nlm_linux_exit; 78 + 79 + /* 80 + * If no FDT pointer is passed in, use the built-in FDT. 81 + * device_tree_init() does not handle CKSEG0 pointers in 82 + * 64-bit, so convert pointer. 83 + */ 84 + fdtp = (void *)(long)fw_arg0; 85 + if (!fdtp) 86 + fdtp = __dtb_start; 87 + fdtp = phys_to_virt(__pa(fdtp)); 88 + early_init_devtree(fdtp); 75 89 } 76 90 77 91 const char *get_system_type(void) ··· 108 94 (13 + (ffz(PM_DEFAULT_MASK >> 13) / 2))); 109 95 } 110 96 97 + void nlm_percpu_init(int hwcpuid) 98 + { 99 + } 100 + 111 101 void __init prom_init(void) 112 102 { 113 - void *fdtp; 114 - 103 + nlm_io_base = CKSEG1ADDR(XLP_DEFAULT_IO_BASE); 115 104 xlp_mmu_init(); 116 - nlm_hal_init(); 105 + nlm_node_init(0); 117 106 118 - /* 119 - * If no FDT pointer is passed in, use the built-in FDT. 120 - * device_tree_init() does not handle CKSEG0 pointers in 121 - * 64-bit, so convert pointer. 122 - */ 123 - fdtp = (void *)(long)fw_arg0; 124 - if (!fdtp) 125 - fdtp = __dtb_start; 126 - fdtp = phys_to_virt(__pa(fdtp)); 127 - early_init_devtree(fdtp); 128 - 129 - nlm_common_ebase = read_c0_ebase() & (~((1 << 12) - 1)); 130 107 #ifdef CONFIG_SMP 131 - nlm_wakeup_secondary_cpus(0xffffffff); 108 + cpumask_setall(&nlm_cpumask); 109 + nlm_wakeup_secondary_cpus(); 132 110 133 111 /* update TLB size after waking up threads */ 134 112 current_cpu_data.tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1;
+55 -28
arch/mips/netlogic/xlp/wakeup.c
··· 51 51 #include <asm/netlogic/xlp-hal/xlp.h> 52 52 #include <asm/netlogic/xlp-hal/sys.h> 53 53 54 - static void xlp_enable_secondary_cores(void) 54 + static int xlp_wakeup_core(uint64_t sysbase, int core) 55 55 { 56 - uint32_t core, value, coremask, syscoremask; 56 + uint32_t coremask, value; 57 57 int count; 58 58 59 - /* read cores in reset from SYS block */ 60 - syscoremask = nlm_read_sys_reg(nlm_sys_base, SYS_CPU_RESET); 59 + coremask = (1 << core); 61 60 62 - /* update user specified */ 63 - nlm_coremask = nlm_coremask & (syscoremask | 1); 61 + /* Enable CPU clock */ 62 + value = nlm_read_sys_reg(sysbase, SYS_CORE_DFS_DIS_CTRL); 63 + value &= ~coremask; 64 + nlm_write_sys_reg(sysbase, SYS_CORE_DFS_DIS_CTRL, value); 64 65 65 - for (core = 1; core < 8; core++) { 66 - coremask = 1 << core; 67 - if ((nlm_coremask & coremask) == 0) 68 - continue; 66 + /* Remove CPU Reset */ 67 + value = nlm_read_sys_reg(sysbase, SYS_CPU_RESET); 68 + value &= ~coremask; 69 + nlm_write_sys_reg(sysbase, SYS_CPU_RESET, value); 69 70 70 - /* Enable CPU clock */ 71 - value = nlm_read_sys_reg(nlm_sys_base, SYS_CORE_DFS_DIS_CTRL); 72 - value &= ~coremask; 73 - nlm_write_sys_reg(nlm_sys_base, SYS_CORE_DFS_DIS_CTRL, value); 71 + /* Poll for CPU to mark itself coherent */ 72 + count = 100000; 73 + do { 74 + value = nlm_read_sys_reg(sysbase, SYS_CPU_NONCOHERENT_MODE); 75 + } while ((value & coremask) != 0 && --count > 0); 74 76 75 - /* Remove CPU Reset */ 76 - value = nlm_read_sys_reg(nlm_sys_base, SYS_CPU_RESET); 77 - value &= ~coremask; 78 - nlm_write_sys_reg(nlm_sys_base, SYS_CPU_RESET, value); 77 + return count != 0; 78 + } 79 79 80 - /* Poll for CPU to mark itself coherent */ 81 - count = 100000; 82 - do { 83 - value = nlm_read_sys_reg(nlm_sys_base, 84 - SYS_CPU_NONCOHERENT_MODE); 85 - } while ((value & coremask) != 0 && count-- > 0); 80 + static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask) 81 + { 82 + struct nlm_soc_info *nodep; 83 + uint64_t syspcibase; 84 + uint32_t syscoremask; 85 + int core, n, cpu; 86 86 87 - if (count == 0) 88 - pr_err("Failed to enable core %d\n", core); 87 + for (n = 0; n < NLM_NR_NODES; n++) { 88 + syspcibase = nlm_get_sys_pcibase(n); 89 + if (nlm_read_reg(syspcibase, 0) == 0xffffffff) 90 + break; 91 + 92 + /* read cores in reset from SYS and account for boot cpu */ 93 + nlm_node_init(n); 94 + nodep = nlm_get_node(n); 95 + syscoremask = nlm_read_sys_reg(nodep->sysbase, SYS_CPU_RESET); 96 + if (n == 0) 97 + syscoremask |= 1; 98 + 99 + for (core = 0; core < NLM_CORES_PER_NODE; core++) { 100 + /* see if the core exists */ 101 + if ((syscoremask & (1 << core)) == 0) 102 + continue; 103 + 104 + /* see if at least the first thread is enabled */ 105 + cpu = (n * NLM_CORES_PER_NODE + core) 106 + * NLM_THREADS_PER_CORE; 107 + if (!cpumask_test_cpu(cpu, wakeup_mask)) 108 + continue; 109 + 110 + /* wake up the core */ 111 + if (xlp_wakeup_core(nodep->sysbase, core)) 112 + nodep->coremask |= 1u << core; 113 + else 114 + pr_err("Failed to enable core %d\n", core); 115 + } 89 116 } 90 117 } 91 118 92 - void xlp_wakeup_secondary_cpus(void) 119 + void xlp_wakeup_secondary_cpus() 93 120 { 94 121 /* 95 122 * In case of u-boot, the secondaries are in reset ··· 125 98 xlp_boot_core0_siblings(); 126 99 127 100 /* now get other cores out of reset */ 128 - xlp_enable_secondary_cores(); 101 + xlp_enable_secondary_cores(&nlm_cpumask); 129 102 }
+2 -2
arch/mips/netlogic/xlr/Makefile
··· 1 - obj-y += setup.o platform.o platform-flash.o 2 - obj-$(CONFIG_SMP) += wakeup.o 1 + obj-y += fmn.o fmn-config.o setup.o platform.o platform-flash.o 2 + obj-$(CONFIG_SMP) += wakeup.o
+290
arch/mips/netlogic/xlr/fmn-config.c
··· 1 + /* 2 + * Copyright (c) 2003-2012 Broadcom Corporation 3 + * All Rights Reserved 4 + * 5 + * This software is available to you under a choice of one of two 6 + * licenses. You may choose to be licensed under the terms of the GNU 7 + * General Public License (GPL) Version 2, available from the file 8 + * COPYING in the main directory of this source tree, or the Broadcom 9 + * license below: 10 + * 11 + * Redistribution and use in source and binary forms, with or without 12 + * modification, are permitted provided that the following conditions 13 + * are met: 14 + * 15 + * 1. Redistributions of source code must retain the above copyright 16 + * notice, this list of conditions and the following disclaimer. 17 + * 2. Redistributions in binary form must reproduce the above copyright 18 + * notice, this list of conditions and the following disclaimer in 19 + * the documentation and/or other materials provided with the 20 + * distribution. 21 + * 22 + * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR 23 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 24 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 + * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE 26 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 29 + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 30 + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 31 + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 32 + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 + */ 34 + 35 + #include <asm/cpu-info.h> 36 + #include <linux/irq.h> 37 + #include <linux/interrupt.h> 38 + 39 + #include <asm/mipsregs.h> 40 + #include <asm/netlogic/xlr/fmn.h> 41 + #include <asm/netlogic/xlr/xlr.h> 42 + #include <asm/netlogic/common.h> 43 + #include <asm/netlogic/haldefs.h> 44 + 45 + struct xlr_board_fmn_config xlr_board_fmn_config; 46 + 47 + static void __maybe_unused print_credit_config(struct xlr_fmn_info *fmn_info) 48 + { 49 + int bkt; 50 + 51 + pr_info("Bucket size :\n"); 52 + pr_info("Station\t: Size\n"); 53 + for (bkt = 0; bkt < 16; bkt++) 54 + pr_info(" %d %d %d %d %d %d %d %d\n", 55 + xlr_board_fmn_config.bucket_size[(bkt * 8) + 0], 56 + xlr_board_fmn_config.bucket_size[(bkt * 8) + 1], 57 + xlr_board_fmn_config.bucket_size[(bkt * 8) + 2], 58 + xlr_board_fmn_config.bucket_size[(bkt * 8) + 3], 59 + xlr_board_fmn_config.bucket_size[(bkt * 8) + 4], 60 + xlr_board_fmn_config.bucket_size[(bkt * 8) + 5], 61 + xlr_board_fmn_config.bucket_size[(bkt * 8) + 6], 62 + xlr_board_fmn_config.bucket_size[(bkt * 8) + 7]); 63 + pr_info("\n"); 64 + 65 + pr_info("Credits distribution :\n"); 66 + pr_info("Station\t: Size\n"); 67 + for (bkt = 0; bkt < 16; bkt++) 68 + pr_info(" %d %d %d %d %d %d %d %d\n", 69 + fmn_info->credit_config[(bkt * 8) + 0], 70 + fmn_info->credit_config[(bkt * 8) + 1], 71 + fmn_info->credit_config[(bkt * 8) + 2], 72 + fmn_info->credit_config[(bkt * 8) + 3], 73 + fmn_info->credit_config[(bkt * 8) + 4], 74 + fmn_info->credit_config[(bkt * 8) + 5], 75 + fmn_info->credit_config[(bkt * 8) + 6], 76 + fmn_info->credit_config[(bkt * 8) + 7]); 77 + pr_info("\n"); 78 + } 79 + 80 + static void check_credit_distribution(void) 81 + { 82 + struct xlr_board_fmn_config *cfg = &xlr_board_fmn_config; 83 + int bkt, n, total_credits, ncores; 84 + 85 + ncores = hweight32(nlm_current_node()->coremask); 86 + for (bkt = 0; bkt < 128; bkt++) { 87 + total_credits = 0; 88 + for (n = 0; n < ncores; n++) 89 + total_credits += cfg->cpu[n].credit_config[bkt]; 90 + total_credits += cfg->gmac[0].credit_config[bkt]; 91 + total_credits += cfg->gmac[1].credit_config[bkt]; 92 + total_credits += cfg->dma.credit_config[bkt]; 93 + total_credits += cfg->cmp.credit_config[bkt]; 94 + total_credits += cfg->sae.credit_config[bkt]; 95 + total_credits += cfg->xgmac[0].credit_config[bkt]; 96 + total_credits += cfg->xgmac[1].credit_config[bkt]; 97 + if (total_credits > cfg->bucket_size[bkt]) 98 + pr_err("ERROR: Bucket %d: credits (%d) > size (%d)\n", 99 + bkt, total_credits, cfg->bucket_size[bkt]); 100 + } 101 + pr_info("Credit distribution complete.\n"); 102 + } 103 + 104 + /** 105 + * Configure bucket size and credits for a device. 'size' is the size of 106 + * the buckets for the device. This size is distributed among all the CPUs 107 + * so that all of them can send messages to the device. 108 + * 109 + * The device is also given 'cpu_credits' to send messages to the CPUs 110 + * 111 + * @dev_info: FMN information structure for each devices 112 + * @start_stn_id: Starting station id of dev_info 113 + * @end_stn_id: End station id of dev_info 114 + * @num_buckets: Total number of buckets for den_info 115 + * @cpu_credits: Allowed credits to cpu for each devices pointing by dev_info 116 + * @size: Size of the each buckets in the device station 117 + */ 118 + static void setup_fmn_cc(struct xlr_fmn_info *dev_info, int start_stn_id, 119 + int end_stn_id, int num_buckets, int cpu_credits, int size) 120 + { 121 + int i, j, num_core, n, credits_per_cpu; 122 + struct xlr_fmn_info *cpu = xlr_board_fmn_config.cpu; 123 + 124 + num_core = hweight32(nlm_current_node()->coremask); 125 + dev_info->num_buckets = num_buckets; 126 + dev_info->start_stn_id = start_stn_id; 127 + dev_info->end_stn_id = end_stn_id; 128 + 129 + n = num_core; 130 + if (num_core == 3) 131 + n = 4; 132 + 133 + for (i = start_stn_id; i <= end_stn_id; i++) { 134 + xlr_board_fmn_config.bucket_size[i] = size; 135 + 136 + /* Dividing device credits equally to cpus */ 137 + credits_per_cpu = size / n; 138 + for (j = 0; j < num_core; j++) 139 + cpu[j].credit_config[i] = credits_per_cpu; 140 + 141 + /* credits left to distribute */ 142 + credits_per_cpu = size - (credits_per_cpu * num_core); 143 + 144 + /* distribute the remaining credits (if any), among cores */ 145 + for (j = 0; (j < num_core) && (credits_per_cpu >= 4); j++) { 146 + cpu[j].credit_config[i] += 4; 147 + credits_per_cpu -= 4; 148 + } 149 + } 150 + 151 + /* Distributing cpu per bucket credits to devices */ 152 + for (i = 0; i < num_core; i++) { 153 + for (j = 0; j < FMN_CORE_NBUCKETS; j++) 154 + dev_info->credit_config[(i * 8) + j] = cpu_credits; 155 + } 156 + } 157 + 158 + /* 159 + * Each core has 256 slots and 8 buckets, 160 + * Configure the 8 buckets each with 32 slots 161 + */ 162 + static void setup_cpu_fmninfo(struct xlr_fmn_info *cpu, int num_core) 163 + { 164 + int i, j; 165 + 166 + for (i = 0; i < num_core; i++) { 167 + cpu[i].start_stn_id = (8 * i); 168 + cpu[i].end_stn_id = (8 * i + 8); 169 + 170 + for (j = cpu[i].start_stn_id; j < cpu[i].end_stn_id; j++) 171 + xlr_board_fmn_config.bucket_size[j] = 32; 172 + } 173 + } 174 + 175 + /** 176 + * Setup the FMN details for each devices according to the device available 177 + * in each variant of XLR/XLS processor 178 + */ 179 + void xlr_board_info_setup(void) 180 + { 181 + struct xlr_fmn_info *cpu = xlr_board_fmn_config.cpu; 182 + struct xlr_fmn_info *gmac = xlr_board_fmn_config.gmac; 183 + struct xlr_fmn_info *xgmac = xlr_board_fmn_config.xgmac; 184 + struct xlr_fmn_info *dma = &xlr_board_fmn_config.dma; 185 + struct xlr_fmn_info *cmp = &xlr_board_fmn_config.cmp; 186 + struct xlr_fmn_info *sae = &xlr_board_fmn_config.sae; 187 + int processor_id, num_core; 188 + 189 + num_core = hweight32(nlm_current_node()->coremask); 190 + processor_id = read_c0_prid() & 0xff00; 191 + 192 + setup_cpu_fmninfo(cpu, num_core); 193 + switch (processor_id) { 194 + case PRID_IMP_NETLOGIC_XLS104: 195 + case PRID_IMP_NETLOGIC_XLS108: 196 + setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0, 197 + FMN_STNID_GMAC0_TX3, 8, 16, 32); 198 + setup_fmn_cc(dma, FMN_STNID_DMA_0, 199 + FMN_STNID_DMA_3, 4, 8, 64); 200 + setup_fmn_cc(sae, FMN_STNID_SEC0, 201 + FMN_STNID_SEC1, 2, 8, 128); 202 + break; 203 + 204 + case PRID_IMP_NETLOGIC_XLS204: 205 + case PRID_IMP_NETLOGIC_XLS208: 206 + setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0, 207 + FMN_STNID_GMAC0_TX3, 8, 16, 32); 208 + setup_fmn_cc(dma, FMN_STNID_DMA_0, 209 + FMN_STNID_DMA_3, 4, 8, 64); 210 + setup_fmn_cc(sae, FMN_STNID_SEC0, 211 + FMN_STNID_SEC1, 2, 8, 128); 212 + break; 213 + 214 + case PRID_IMP_NETLOGIC_XLS404: 215 + case PRID_IMP_NETLOGIC_XLS408: 216 + case PRID_IMP_NETLOGIC_XLS404B: 217 + case PRID_IMP_NETLOGIC_XLS408B: 218 + case PRID_IMP_NETLOGIC_XLS416B: 219 + setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0, 220 + FMN_STNID_GMAC0_TX3, 8, 8, 32); 221 + setup_fmn_cc(&gmac[1], FMN_STNID_GMAC1_FR_0, 222 + FMN_STNID_GMAC1_TX3, 8, 8, 32); 223 + setup_fmn_cc(dma, FMN_STNID_DMA_0, 224 + FMN_STNID_DMA_3, 4, 4, 64); 225 + setup_fmn_cc(cmp, FMN_STNID_CMP_0, 226 + FMN_STNID_CMP_3, 4, 4, 64); 227 + setup_fmn_cc(sae, FMN_STNID_SEC0, 228 + FMN_STNID_SEC1, 2, 8, 128); 229 + break; 230 + 231 + case PRID_IMP_NETLOGIC_XLS412B: 232 + setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0, 233 + FMN_STNID_GMAC0_TX3, 8, 8, 32); 234 + setup_fmn_cc(&gmac[1], FMN_STNID_GMAC1_FR_0, 235 + FMN_STNID_GMAC1_TX3, 8, 8, 32); 236 + setup_fmn_cc(dma, FMN_STNID_DMA_0, 237 + FMN_STNID_DMA_3, 4, 4, 64); 238 + setup_fmn_cc(cmp, FMN_STNID_CMP_0, 239 + FMN_STNID_CMP_3, 4, 4, 64); 240 + setup_fmn_cc(sae, FMN_STNID_SEC0, 241 + FMN_STNID_SEC1, 2, 8, 128); 242 + break; 243 + 244 + case PRID_IMP_NETLOGIC_XLR308: 245 + case PRID_IMP_NETLOGIC_XLR308C: 246 + setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0, 247 + FMN_STNID_GMAC0_TX3, 8, 16, 32); 248 + setup_fmn_cc(dma, FMN_STNID_DMA_0, 249 + FMN_STNID_DMA_3, 4, 8, 64); 250 + setup_fmn_cc(sae, FMN_STNID_SEC0, 251 + FMN_STNID_SEC1, 2, 4, 128); 252 + break; 253 + 254 + case PRID_IMP_NETLOGIC_XLR532: 255 + case PRID_IMP_NETLOGIC_XLR532C: 256 + case PRID_IMP_NETLOGIC_XLR516C: 257 + case PRID_IMP_NETLOGIC_XLR508C: 258 + setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0, 259 + FMN_STNID_GMAC0_TX3, 8, 16, 32); 260 + setup_fmn_cc(dma, FMN_STNID_DMA_0, 261 + FMN_STNID_DMA_3, 4, 8, 64); 262 + setup_fmn_cc(sae, FMN_STNID_SEC0, 263 + FMN_STNID_SEC1, 2, 4, 128); 264 + break; 265 + 266 + case PRID_IMP_NETLOGIC_XLR732: 267 + case PRID_IMP_NETLOGIC_XLR716: 268 + setup_fmn_cc(&xgmac[0], FMN_STNID_XMAC0_00_TX, 269 + FMN_STNID_XMAC0_15_TX, 8, 0, 32); 270 + setup_fmn_cc(&xgmac[1], FMN_STNID_XMAC1_00_TX, 271 + FMN_STNID_XMAC1_15_TX, 8, 0, 32); 272 + setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0, 273 + FMN_STNID_GMAC0_TX3, 8, 24, 32); 274 + setup_fmn_cc(dma, FMN_STNID_DMA_0, 275 + FMN_STNID_DMA_3, 4, 4, 64); 276 + setup_fmn_cc(sae, FMN_STNID_SEC0, 277 + FMN_STNID_SEC1, 2, 4, 128); 278 + break; 279 + default: 280 + pr_err("Unknown CPU with processor ID [%d]\n", processor_id); 281 + pr_err("Error: Cannot initialize FMN credits.\n"); 282 + } 283 + 284 + check_credit_distribution(); 285 + 286 + #if 0 /* debug */ 287 + print_credit_config(&cpu[0]); 288 + print_credit_config(&gmac[0]); 289 + #endif 290 + }
+204
arch/mips/netlogic/xlr/fmn.c
··· 1 + /* 2 + * Copyright (c) 2003-2012 Broadcom Corporation 3 + * All Rights Reserved 4 + * 5 + * This software is available to you under a choice of one of two 6 + * licenses. You may choose to be licensed under the terms of the GNU 7 + * General Public License (GPL) Version 2, available from the file 8 + * COPYING in the main directory of this source tree, or the Broadcom 9 + * license below: 10 + * 11 + * Redistribution and use in source and binary forms, with or without 12 + * modification, are permitted provided that the following conditions 13 + * are met: 14 + * 15 + * 1. Redistributions of source code must retain the above copyright 16 + * notice, this list of conditions and the following disclaimer. 17 + * 2. Redistributions in binary form must reproduce the above copyright 18 + * notice, this list of conditions and the following disclaimer in 19 + * the documentation and/or other materials provided with the 20 + * distribution. 21 + * 22 + * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR 23 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 24 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 + * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE 26 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 29 + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 30 + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 31 + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 32 + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 + */ 34 + 35 + #include <linux/kernel.h> 36 + #include <linux/irqreturn.h> 37 + #include <linux/irq.h> 38 + #include <linux/interrupt.h> 39 + 40 + #include <asm/mipsregs.h> 41 + #include <asm/netlogic/interrupt.h> 42 + #include <asm/netlogic/xlr/fmn.h> 43 + #include <asm/netlogic/common.h> 44 + 45 + #define COP2_CC_INIT_CPU_DEST(dest, conf) \ 46 + do { \ 47 + nlm_write_c2_cc##dest(0, conf[(dest * 8) + 0]); \ 48 + nlm_write_c2_cc##dest(1, conf[(dest * 8) + 1]); \ 49 + nlm_write_c2_cc##dest(2, conf[(dest * 8) + 2]); \ 50 + nlm_write_c2_cc##dest(3, conf[(dest * 8) + 3]); \ 51 + nlm_write_c2_cc##dest(4, conf[(dest * 8) + 4]); \ 52 + nlm_write_c2_cc##dest(5, conf[(dest * 8) + 5]); \ 53 + nlm_write_c2_cc##dest(6, conf[(dest * 8) + 6]); \ 54 + nlm_write_c2_cc##dest(7, conf[(dest * 8) + 7]); \ 55 + } while (0) 56 + 57 + struct fmn_message_handler { 58 + void (*action)(int, int, int, int, struct nlm_fmn_msg *, void *); 59 + void *arg; 60 + } msg_handlers[128]; 61 + 62 + /* 63 + * FMN interrupt handler. We configure the FMN so that any messages in 64 + * any of the CPU buckets will trigger an interrupt on the CPU. 65 + * The message can be from any device on the FMN (like NAE/SAE/DMA). 66 + * The source station id is used to figure out which of the registered 67 + * handlers have to be called. 68 + */ 69 + static irqreturn_t fmn_message_handler(int irq, void *data) 70 + { 71 + struct fmn_message_handler *hndlr; 72 + int bucket, rv; 73 + int size = 0, code = 0, src_stnid = 0; 74 + struct nlm_fmn_msg msg; 75 + uint32_t mflags, bkt_status; 76 + 77 + mflags = nlm_cop2_enable(); 78 + /* Disable message ring interrupt */ 79 + nlm_fmn_setup_intr(irq, 0); 80 + while (1) { 81 + /* 8 bkts per core, [24:31] each bit represents one bucket 82 + * Bit is Zero if bucket is not empty */ 83 + bkt_status = (nlm_read_c2_status() >> 24) & 0xff; 84 + if (bkt_status == 0xff) 85 + break; 86 + for (bucket = 0; bucket < 8; bucket++) { 87 + /* Continue on empty bucket */ 88 + if (bkt_status & (1 << bucket)) 89 + continue; 90 + rv = nlm_fmn_receive(bucket, &size, &code, &src_stnid, 91 + &msg); 92 + if (rv != 0) 93 + continue; 94 + 95 + hndlr = &msg_handlers[src_stnid]; 96 + if (hndlr->action == NULL) 97 + pr_warn("No msgring handler for stnid %d\n", 98 + src_stnid); 99 + else { 100 + nlm_cop2_restore(mflags); 101 + hndlr->action(bucket, src_stnid, size, code, 102 + &msg, hndlr->arg); 103 + mflags = nlm_cop2_enable(); 104 + } 105 + } 106 + }; 107 + /* Enable message ring intr, to any thread in core */ 108 + nlm_fmn_setup_intr(irq, (1 << nlm_threads_per_core) - 1); 109 + nlm_cop2_restore(mflags); 110 + return IRQ_HANDLED; 111 + } 112 + 113 + struct irqaction fmn_irqaction = { 114 + .handler = fmn_message_handler, 115 + .flags = IRQF_PERCPU, 116 + .name = "fmn", 117 + }; 118 + 119 + void xlr_percpu_fmn_init(void) 120 + { 121 + struct xlr_fmn_info *cpu_fmn_info; 122 + int *bucket_sizes; 123 + uint32_t flags; 124 + int id; 125 + 126 + BUG_ON(nlm_thread_id() != 0); 127 + id = nlm_core_id(); 128 + 129 + bucket_sizes = xlr_board_fmn_config.bucket_size; 130 + cpu_fmn_info = &xlr_board_fmn_config.cpu[id]; 131 + flags = nlm_cop2_enable(); 132 + 133 + /* Setup bucket sizes for the core. */ 134 + nlm_write_c2_bucksize(0, bucket_sizes[id * 8 + 0]); 135 + nlm_write_c2_bucksize(1, bucket_sizes[id * 8 + 1]); 136 + nlm_write_c2_bucksize(2, bucket_sizes[id * 8 + 2]); 137 + nlm_write_c2_bucksize(3, bucket_sizes[id * 8 + 3]); 138 + nlm_write_c2_bucksize(4, bucket_sizes[id * 8 + 4]); 139 + nlm_write_c2_bucksize(5, bucket_sizes[id * 8 + 5]); 140 + nlm_write_c2_bucksize(6, bucket_sizes[id * 8 + 6]); 141 + nlm_write_c2_bucksize(7, bucket_sizes[id * 8 + 7]); 142 + 143 + /* 144 + * For sending FMN messages, we need credits on the destination 145 + * bucket. Program the credits this core has on the 128 possible 146 + * destination buckets. 147 + * We cannot use a loop here, because the the first argument has 148 + * to be a constant integer value. 149 + */ 150 + COP2_CC_INIT_CPU_DEST(0, cpu_fmn_info->credit_config); 151 + COP2_CC_INIT_CPU_DEST(1, cpu_fmn_info->credit_config); 152 + COP2_CC_INIT_CPU_DEST(2, cpu_fmn_info->credit_config); 153 + COP2_CC_INIT_CPU_DEST(3, cpu_fmn_info->credit_config); 154 + COP2_CC_INIT_CPU_DEST(4, cpu_fmn_info->credit_config); 155 + COP2_CC_INIT_CPU_DEST(5, cpu_fmn_info->credit_config); 156 + COP2_CC_INIT_CPU_DEST(6, cpu_fmn_info->credit_config); 157 + COP2_CC_INIT_CPU_DEST(7, cpu_fmn_info->credit_config); 158 + COP2_CC_INIT_CPU_DEST(8, cpu_fmn_info->credit_config); 159 + COP2_CC_INIT_CPU_DEST(9, cpu_fmn_info->credit_config); 160 + COP2_CC_INIT_CPU_DEST(10, cpu_fmn_info->credit_config); 161 + COP2_CC_INIT_CPU_DEST(11, cpu_fmn_info->credit_config); 162 + COP2_CC_INIT_CPU_DEST(12, cpu_fmn_info->credit_config); 163 + COP2_CC_INIT_CPU_DEST(13, cpu_fmn_info->credit_config); 164 + COP2_CC_INIT_CPU_DEST(14, cpu_fmn_info->credit_config); 165 + COP2_CC_INIT_CPU_DEST(15, cpu_fmn_info->credit_config); 166 + 167 + /* enable FMN interrupts on this CPU */ 168 + nlm_fmn_setup_intr(IRQ_FMN, (1 << nlm_threads_per_core) - 1); 169 + nlm_cop2_restore(flags); 170 + } 171 + 172 + 173 + /* 174 + * Register a FMN message handler with respect to the source station id 175 + * @stnid: source station id 176 + * @action: Handler function pointer 177 + */ 178 + int nlm_register_fmn_handler(int start_stnid, int end_stnid, 179 + void (*action)(int, int, int, int, struct nlm_fmn_msg *, void *), 180 + void *arg) 181 + { 182 + int sstnid; 183 + 184 + for (sstnid = start_stnid; sstnid <= end_stnid; sstnid++) { 185 + msg_handlers[sstnid].arg = arg; 186 + smp_wmb(); 187 + msg_handlers[sstnid].action = action; 188 + } 189 + pr_debug("Registered FMN msg handler for stnid %d-%d\n", 190 + start_stnid, end_stnid); 191 + return 0; 192 + } 193 + 194 + void nlm_setup_fmn_irq(void) 195 + { 196 + uint32_t flags; 197 + 198 + /* setup irq only once */ 199 + setup_irq(IRQ_FMN, &fmn_irqaction); 200 + 201 + flags = nlm_cop2_enable(); 202 + nlm_fmn_setup_intr(IRQ_FMN, (1 << nlm_threads_per_core) - 1); 203 + nlm_cop2_restore(flags); 204 + }
+28 -9
arch/mips/netlogic/xlr/setup.c
··· 49 49 #include <asm/netlogic/xlr/iomap.h> 50 50 #include <asm/netlogic/xlr/pic.h> 51 51 #include <asm/netlogic/xlr/gpio.h> 52 + #include <asm/netlogic/xlr/fmn.h> 52 53 53 54 uint64_t nlm_io_base = DEFAULT_NETLOGIC_IO_BASE; 54 - uint64_t nlm_pic_base; 55 55 struct psb_info nlm_prom_info; 56 56 57 - unsigned long nlm_common_ebase = 0x0; 58 - 59 57 /* default to uniprocessor */ 60 - uint32_t nlm_coremask = 1, nlm_cpumask = 1; 61 - int nlm_threads_per_core = 1; 58 + unsigned int nlm_threads_per_core = 1; 59 + struct nlm_soc_info nlm_nodes[NLM_NR_NODES]; 60 + cpumask_t nlm_cpumask = CPU_MASK_CPU0; 62 61 63 62 static void __init nlm_early_serial_setup(void) 64 63 { ··· 110 111 void __init prom_free_prom_memory(void) 111 112 { 112 113 /* Nothing yet */ 114 + } 115 + 116 + void nlm_percpu_init(int hwcpuid) 117 + { 118 + if (hwcpuid % 4 == 0) 119 + xlr_percpu_fmn_init(); 113 120 } 114 121 115 122 static void __init build_arcs_cmdline(int *argv) ··· 181 176 } 182 177 } 183 178 179 + static void nlm_init_node(void) 180 + { 181 + struct nlm_soc_info *nodep; 182 + 183 + nodep = nlm_current_node(); 184 + nodep->picbase = nlm_mmio_base(NETLOGIC_IO_PIC_OFFSET); 185 + nodep->ebase = read_c0_ebase() & (~((1 << 12) - 1)); 186 + spin_lock_init(&nodep->piclock); 187 + } 188 + 184 189 void __init prom_init(void) 185 190 { 186 - int *argv, *envp; /* passed as 32 bit ptrs */ 191 + int i, *argv, *envp; /* passed as 32 bit ptrs */ 187 192 struct psb_info *prom_infop; 188 193 189 194 /* truncate to 32 bit and sign extend all args */ ··· 202 187 prom_infop = (struct psb_info *)(long)(int)fw_arg3; 203 188 204 189 nlm_prom_info = *prom_infop; 205 - nlm_pic_base = nlm_mmio_base(NETLOGIC_IO_PIC_OFFSET); 190 + nlm_init_node(); 206 191 207 192 nlm_early_serial_setup(); 208 193 build_arcs_cmdline(argv); 209 - nlm_common_ebase = read_c0_ebase() & (~((1 << 12) - 1)); 210 194 prom_add_memory(); 211 195 212 196 #ifdef CONFIG_SMP 213 - nlm_wakeup_secondary_cpus(nlm_prom_info.online_cpu_map); 197 + for (i = 0; i < 32; i++) 198 + if (nlm_prom_info.online_cpu_map & (1 << i)) 199 + cpumask_set_cpu(i, &nlm_cpumask); 200 + nlm_wakeup_secondary_cpus(); 214 201 register_smp_ops(&nlm_smp_ops); 215 202 #endif 203 + xlr_board_info_setup(); 204 + xlr_percpu_fmn_init(); 216 205 }
+20 -3
arch/mips/netlogic/xlr/wakeup.c
··· 33 33 */ 34 34 35 35 #include <linux/init.h> 36 + #include <linux/delay.h> 36 37 #include <linux/threads.h> 37 38 38 39 #include <asm/asm.h> ··· 51 50 52 51 int __cpuinit xlr_wakeup_secondary_cpus(void) 53 52 { 54 - unsigned int i, boot_cpu; 53 + struct nlm_soc_info *nodep; 54 + unsigned int i, j, boot_cpu; 55 55 56 56 /* 57 57 * In case of RMI boot, hit with NMI to get the cores 58 58 * from bootloader to linux code. 59 59 */ 60 + nodep = nlm_get_node(0); 60 61 boot_cpu = hard_smp_processor_id(); 61 62 nlm_set_nmi_handler(nlm_rmiboot_preboot); 62 63 for (i = 0; i < NR_CPUS; i++) { 63 - if (i == boot_cpu || (nlm_cpumask & (1u << i)) == 0) 64 + if (i == boot_cpu || !cpumask_test_cpu(i, &nlm_cpumask)) 64 65 continue; 65 - nlm_pic_send_ipi(nlm_pic_base, i, 1, 1); /* send NMI */ 66 + nlm_pic_send_ipi(nodep->picbase, i, 1, 1); /* send NMI */ 67 + } 68 + 69 + /* Fill up the coremask early */ 70 + nodep->coremask = 1; 71 + for (i = 1; i < NLM_CORES_PER_NODE; i++) { 72 + for (j = 1000000; j > 0; j--) { 73 + if (nlm_cpu_ready[i * NLM_THREADS_PER_CORE]) 74 + break; 75 + udelay(10); 76 + } 77 + if (j != 0) 78 + nodep->coremask |= (1u << i); 79 + else 80 + pr_err("Failed to wakeup core %d\n", i); 66 81 } 67 82 68 83 return 0;
+1 -1
arch/mips/oprofile/Makefile
··· 12 12 oprofile-$(CONFIG_CPU_MIPS64) += op_model_mipsxx.o 13 13 oprofile-$(CONFIG_CPU_R10000) += op_model_mipsxx.o 14 14 oprofile-$(CONFIG_CPU_SB1) += op_model_mipsxx.o 15 - oprofile-$(CONFIG_CPU_RM9000) += op_model_rm9000.o 15 + oprofile-$(CONFIG_CPU_XLR) += op_model_mipsxx.o 16 16 oprofile-$(CONFIG_CPU_LOONGSON2) += op_model_loongson2.o
+1 -4
arch/mips/oprofile/common.c
··· 16 16 #include "op_impl.h" 17 17 18 18 extern struct op_mips_model op_model_mipsxx_ops __weak; 19 - extern struct op_mips_model op_model_rm9000_ops __weak; 20 19 extern struct op_mips_model op_model_loongson2_ops __weak; 21 20 22 21 static struct op_mips_model *model; ··· 90 91 case CPU_R10000: 91 92 case CPU_R12000: 92 93 case CPU_R14000: 94 + case CPU_XLR: 93 95 lmodel = &op_model_mipsxx_ops; 94 96 break; 95 97 96 - case CPU_RM9000: 97 - lmodel = &op_model_rm9000_ops; 98 - break; 99 98 case CPU_LOONGSON2: 100 99 lmodel = &op_model_loongson2_ops; 101 100 break;
+29
arch/mips/oprofile/op_model_mipsxx.c
··· 31 31 32 32 #define M_COUNTER_OVERFLOW (1UL << 31) 33 33 34 + /* Netlogic XLR specific, count events in all threads in a core */ 35 + #define M_PERFCTL_COUNT_ALL_THREADS (1UL << 13) 36 + 34 37 static int (*save_perf_irq)(void); 38 + 39 + /* 40 + * XLR has only one set of counters per core. Designate the 41 + * first hardware thread in the core for setup and init. 42 + * Skip CPUs with non-zero hardware thread id (4 hwt per core) 43 + */ 44 + #ifdef CONFIG_CPU_XLR 45 + #define oprofile_skip_cpu(c) ((cpu_logical_map(c) & 0x3) != 0) 46 + #else 47 + #define oprofile_skip_cpu(c) 0 48 + #endif 35 49 36 50 #ifdef CONFIG_MIPS_MT_SMP 37 51 static int cpu_has_mipsmt_pertccounters; ··· 166 152 reg.control[i] |= M_PERFCTL_USER; 167 153 if (ctr[i].exl) 168 154 reg.control[i] |= M_PERFCTL_EXL; 155 + if (current_cpu_type() == CPU_XLR) 156 + reg.control[i] |= M_PERFCTL_COUNT_ALL_THREADS; 169 157 reg.counter[i] = 0x80000000 - ctr[i].count; 170 158 } 171 159 } ··· 177 161 static void mipsxx_cpu_setup(void *args) 178 162 { 179 163 unsigned int counters = op_model_mipsxx_ops.num_counters; 164 + 165 + if (oprofile_skip_cpu(smp_processor_id())) 166 + return; 180 167 181 168 switch (counters) { 182 169 case 4: ··· 202 183 { 203 184 unsigned int counters = op_model_mipsxx_ops.num_counters; 204 185 186 + if (oprofile_skip_cpu(smp_processor_id())) 187 + return; 188 + 205 189 switch (counters) { 206 190 case 4: 207 191 w_c0_perfctrl3(WHAT | reg.control[3]); ··· 221 199 static void mipsxx_cpu_stop(void *args) 222 200 { 223 201 unsigned int counters = op_model_mipsxx_ops.num_counters; 202 + 203 + if (oprofile_skip_cpu(smp_processor_id())) 204 + return; 224 205 225 206 switch (counters) { 226 207 case 4: ··· 395 370 396 371 case CPU_LOONGSON1: 397 372 op_model_mipsxx_ops.cpu_type = "mips/loongson1"; 373 + break; 374 + 375 + case CPU_XLR: 376 + op_model_mipsxx_ops.cpu_type = "mips/xlr"; 398 377 break; 399 378 400 379 default:
-138
arch/mips/oprofile/op_model_rm9000.c
··· 1 - /* 2 - * This file is subject to the terms and conditions of the GNU General Public 3 - * License. See the file "COPYING" in the main directory of this archive 4 - * for more details. 5 - * 6 - * Copyright (C) 2004 by Ralf Baechle 7 - */ 8 - #include <linux/init.h> 9 - #include <linux/oprofile.h> 10 - #include <linux/interrupt.h> 11 - #include <linux/smp.h> 12 - 13 - #include "op_impl.h" 14 - 15 - #define RM9K_COUNTER1_EVENT(event) ((event) << 0) 16 - #define RM9K_COUNTER1_SUPERVISOR (1ULL << 7) 17 - #define RM9K_COUNTER1_KERNEL (1ULL << 8) 18 - #define RM9K_COUNTER1_USER (1ULL << 9) 19 - #define RM9K_COUNTER1_ENABLE (1ULL << 10) 20 - #define RM9K_COUNTER1_OVERFLOW (1ULL << 15) 21 - 22 - #define RM9K_COUNTER2_EVENT(event) ((event) << 16) 23 - #define RM9K_COUNTER2_SUPERVISOR (1ULL << 23) 24 - #define RM9K_COUNTER2_KERNEL (1ULL << 24) 25 - #define RM9K_COUNTER2_USER (1ULL << 25) 26 - #define RM9K_COUNTER2_ENABLE (1ULL << 26) 27 - #define RM9K_COUNTER2_OVERFLOW (1ULL << 31) 28 - 29 - extern unsigned int rm9000_perfcount_irq; 30 - 31 - static struct rm9k_register_config { 32 - unsigned int control; 33 - unsigned int reset_counter1; 34 - unsigned int reset_counter2; 35 - } reg; 36 - 37 - /* Compute all of the registers in preparation for enabling profiling. */ 38 - 39 - static void rm9000_reg_setup(struct op_counter_config *ctr) 40 - { 41 - unsigned int control = 0; 42 - 43 - /* Compute the performance counter control word. */ 44 - /* For now count kernel and user mode */ 45 - if (ctr[0].enabled) 46 - control |= RM9K_COUNTER1_EVENT(ctr[0].event) | 47 - RM9K_COUNTER1_KERNEL | 48 - RM9K_COUNTER1_USER | 49 - RM9K_COUNTER1_ENABLE; 50 - if (ctr[1].enabled) 51 - control |= RM9K_COUNTER2_EVENT(ctr[1].event) | 52 - RM9K_COUNTER2_KERNEL | 53 - RM9K_COUNTER2_USER | 54 - RM9K_COUNTER2_ENABLE; 55 - reg.control = control; 56 - 57 - reg.reset_counter1 = 0x80000000 - ctr[0].count; 58 - reg.reset_counter2 = 0x80000000 - ctr[1].count; 59 - } 60 - 61 - /* Program all of the registers in preparation for enabling profiling. */ 62 - 63 - static void rm9000_cpu_setup(void *args) 64 - { 65 - uint64_t perfcount; 66 - 67 - perfcount = ((uint64_t) reg.reset_counter2 << 32) | reg.reset_counter1; 68 - write_c0_perfcount(perfcount); 69 - } 70 - 71 - static void rm9000_cpu_start(void *args) 72 - { 73 - /* Start all counters on current CPU */ 74 - write_c0_perfcontrol(reg.control); 75 - } 76 - 77 - static void rm9000_cpu_stop(void *args) 78 - { 79 - /* Stop all counters on current CPU */ 80 - write_c0_perfcontrol(0); 81 - } 82 - 83 - static irqreturn_t rm9000_perfcount_handler(int irq, void *dev_id) 84 - { 85 - unsigned int control = read_c0_perfcontrol(); 86 - struct pt_regs *regs = get_irq_regs(); 87 - uint32_t counter1, counter2; 88 - uint64_t counters; 89 - 90 - /* 91 - * RM9000 combines two 32-bit performance counters into a single 92 - * 64-bit coprocessor zero register. To avoid a race updating the 93 - * registers we need to stop the counters while we're messing with 94 - * them ... 95 - */ 96 - write_c0_perfcontrol(0); 97 - 98 - counters = read_c0_perfcount(); 99 - counter1 = counters; 100 - counter2 = counters >> 32; 101 - 102 - if (control & RM9K_COUNTER1_OVERFLOW) { 103 - oprofile_add_sample(regs, 0); 104 - counter1 = reg.reset_counter1; 105 - } 106 - if (control & RM9K_COUNTER2_OVERFLOW) { 107 - oprofile_add_sample(regs, 1); 108 - counter2 = reg.reset_counter2; 109 - } 110 - 111 - counters = ((uint64_t)counter2 << 32) | counter1; 112 - write_c0_perfcount(counters); 113 - write_c0_perfcontrol(reg.control); 114 - 115 - return IRQ_HANDLED; 116 - } 117 - 118 - static int __init rm9000_init(void) 119 - { 120 - return request_irq(rm9000_perfcount_irq, rm9000_perfcount_handler, 121 - 0, "Perfcounter", NULL); 122 - } 123 - 124 - static void rm9000_exit(void) 125 - { 126 - free_irq(rm9000_perfcount_irq, NULL); 127 - } 128 - 129 - struct op_mips_model op_model_rm9000_ops = { 130 - .reg_setup = rm9000_reg_setup, 131 - .cpu_setup = rm9000_cpu_setup, 132 - .init = rm9000_init, 133 - .exit = rm9000_exit, 134 - .cpu_start = rm9000_cpu_start, 135 - .cpu_stop = rm9000_cpu_stop, 136 - .cpu_type = "mips/rm9000", 137 - .num_counters = 2 138 - };
-2
arch/mips/pci/Makefile
··· 34 34 obj-$(CONFIG_PMC_MSP7120_GW) += fixup-pmcmsp.o ops-pmcmsp.o 35 35 obj-$(CONFIG_PMC_MSP7120_EVAL) += fixup-pmcmsp.o ops-pmcmsp.o 36 36 obj-$(CONFIG_PMC_MSP7120_FPGA) += fixup-pmcmsp.o ops-pmcmsp.o 37 - obj-$(CONFIG_PMC_YOSEMITE) += fixup-yosemite.o ops-titan.o ops-titan-ht.o \ 38 - pci-yosemite.o 39 37 obj-$(CONFIG_SGI_IP27) += ops-bridge.o pci-ip27.o 40 38 obj-$(CONFIG_SGI_IP32) += fixup-ip32.o ops-mace.o pci-ip32.o 41 39 obj-$(CONFIG_SIBYTE_SB1250) += fixup-sb1250.o pci-sb1250.o
-41
arch/mips/pci/fixup-yosemite.c
··· 1 - /* 2 - * Copyright 2003 PMC-Sierra 3 - * Author: Manish Lachwani (lachwani@pmc-sierra.com) 4 - * 5 - * This program is free software; you can redistribute it and/or modify it 6 - * under the terms of the GNU General Public License as published by the 7 - * Free Software Foundation; either version 2 of the License, or (at your 8 - * option) any later version. 9 - * 10 - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 11 - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 12 - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 13 - * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 14 - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 15 - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 16 - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 17 - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 18 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 19 - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 20 - * 21 - * You should have received a copy of the GNU General Public License along 22 - * with this program; if not, write to the Free Software Foundation, Inc., 23 - * 675 Mass Ave, Cambridge, MA 02139, USA. 24 - */ 25 - #include <linux/kernel.h> 26 - #include <linux/init.h> 27 - #include <linux/pci.h> 28 - 29 - int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 30 - { 31 - if (pin == 0) 32 - return -1; 33 - 34 - return 3; /* Everything goes to one irq bit */ 35 - } 36 - 37 - /* Do platform specific device initialization at pci_enable_device() time */ 38 - int pcibios_plat_dev_init(struct pci_dev *dev) 39 - { 40 - return 0; 41 - }
+12 -12
arch/mips/pci/ops-bridge.c
··· 56 56 return PCIBIOS_DEVICE_NOT_FOUND; 57 57 58 58 /* 59 - * IOC3 is fucked fucked beyond believe ... Don't even give the 59 + * IOC3 is fucking fucked beyond belief ... Don't even give the 60 60 * generic PCI code a chance to look at it for real ... 61 61 */ 62 62 if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16))) ··· 76 76 oh_my_gawd: 77 77 78 78 /* 79 - * IOC3 is fucked fucked beyond believe ... Don't even give the 79 + * IOC3 is fucking fucked beyond belief ... Don't even give the 80 80 * generic PCI code a chance to look at the wrong register. 81 81 */ 82 82 if ((where >= 0x14 && where < 0x40) || (where >= 0x48)) { ··· 85 85 } 86 86 87 87 /* 88 - * IOC3 is fucked fucked beyond believe ... Don't try to access 88 + * IOC3 is fucking fucked beyond belief ... Don't try to access 89 89 * anything but 32-bit words ... 90 90 */ 91 91 addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2]; ··· 118 118 return PCIBIOS_DEVICE_NOT_FOUND; 119 119 120 120 /* 121 - * IOC3 is fucked fucked beyond believe ... Don't even give the 121 + * IOC3 is fucking fucked beyond belief ... Don't even give the 122 122 * generic PCI code a chance to look at it for real ... 123 123 */ 124 124 if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16))) ··· 139 139 oh_my_gawd: 140 140 141 141 /* 142 - * IOC3 is fucked fucked beyond believe ... Don't even give the 142 + * IOC3 is fucking fucked beyond belief ... Don't even give the 143 143 * generic PCI code a chance to look at the wrong register. 144 144 */ 145 145 if ((where >= 0x14 && where < 0x40) || (where >= 0x48)) { ··· 148 148 } 149 149 150 150 /* 151 - * IOC3 is fucked fucked beyond believe ... Don't try to access 151 + * IOC3 is fucking fucked beyond belief ... Don't try to access 152 152 * anything but 32-bit words ... 153 153 */ 154 154 bridge->b_pci_cfg = (busno << 16) | (slot << 11); ··· 189 189 return PCIBIOS_DEVICE_NOT_FOUND; 190 190 191 191 /* 192 - * IOC3 is fucked fucked beyond believe ... Don't even give the 192 + * IOC3 is fucking fucked beyond belief ... Don't even give the 193 193 * generic PCI code a chance to look at it for real ... 194 194 */ 195 195 if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16))) ··· 213 213 oh_my_gawd: 214 214 215 215 /* 216 - * IOC3 is fucked fucked beyond believe ... Don't even give the 216 + * IOC3 is fucking fucked beyond belief ... Don't even give the 217 217 * generic PCI code a chance to touch the wrong register. 218 218 */ 219 219 if ((where >= 0x14 && where < 0x40) || (where >= 0x48)) 220 220 return PCIBIOS_SUCCESSFUL; 221 221 222 222 /* 223 - * IOC3 is fucked fucked beyond believe ... Don't try to access 223 + * IOC3 is fucking fucked beyond belief ... Don't try to access 224 224 * anything but 32-bit words ... 225 225 */ 226 226 addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2]; ··· 257 257 return PCIBIOS_DEVICE_NOT_FOUND; 258 258 259 259 /* 260 - * IOC3 is fucked fucked beyond believe ... Don't even give the 260 + * IOC3 is fucking fucked beyond belief ... Don't even give the 261 261 * generic PCI code a chance to look at it for real ... 262 262 */ 263 263 if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16))) ··· 281 281 oh_my_gawd: 282 282 283 283 /* 284 - * IOC3 is fucked fucked beyond believe ... Don't even give the 284 + * IOC3 is fucking fucked beyond belief ... Don't even give the 285 285 * generic PCI code a chance to touch the wrong register. 286 286 */ 287 287 if ((where >= 0x14 && where < 0x40) || (where >= 0x48)) 288 288 return PCIBIOS_SUCCESSFUL; 289 289 290 290 /* 291 - * IOC3 is fucked fucked beyond believe ... Don't try to access 291 + * IOC3 is fucking fucked beyond belief ... Don't try to access 292 292 * anything but 32-bit words ... 293 293 */ 294 294 addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2];
-124
arch/mips/pci/ops-titan-ht.c
··· 1 - /* 2 - * Copyright 2003 PMC-Sierra 3 - * Author: Manish Lachwani (lachwani@pmc-sierra.com) 4 - * 5 - * This program is free software; you can redistribute it and/or modify it 6 - * under the terms of the GNU General Public License as published by the 7 - * Free Software Foundation; either version 2 of the License, or (at your 8 - * option) any later version. 9 - * 10 - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 11 - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 12 - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 13 - * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 14 - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 15 - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 16 - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 17 - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 18 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 19 - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 20 - * 21 - * You should have received a copy of the GNU General Public License along 22 - * with this program; if not, write to the Free Software Foundation, Inc., 23 - * 675 Mass Ave, Cambridge, MA 02139, USA. 24 - */ 25 - 26 - #include <linux/types.h> 27 - #include <linux/pci.h> 28 - #include <linux/kernel.h> 29 - #include <linux/delay.h> 30 - #include <asm/io.h> 31 - 32 - #include <asm/titan_dep.h> 33 - 34 - static int titan_ht_config_read_dword(struct pci_bus *bus, unsigned int devfn, 35 - int offset, u32 *val) 36 - { 37 - volatile uint32_t address; 38 - int busno; 39 - 40 - busno = bus->number; 41 - 42 - address = (busno << 16) | (devfn << 8) | (offset & 0xfc) | 0x80000000; 43 - if (busno != 0) 44 - address |= 1; 45 - 46 - /* 47 - * RM9000 HT Errata: Issue back to back HT config 48 - * transcations. Issue a BIU sync before and 49 - * after the HT cycle 50 - */ 51 - 52 - *(volatile int32_t *) 0xfb0000f0 |= 0x2; 53 - 54 - udelay(30); 55 - 56 - *(volatile int32_t *) 0xfb0006f8 = address; 57 - *(val) = *(volatile int32_t *) 0xfb0006fc; 58 - 59 - udelay(30); 60 - 61 - * (volatile int32_t *) 0xfb0000f0 |= 0x2; 62 - 63 - return PCIBIOS_SUCCESSFUL; 64 - } 65 - 66 - static int titan_ht_config_read(struct pci_bus *bus, unsigned int devfn, 67 - int offset, int size, u32 *val) 68 - { 69 - uint32_t dword; 70 - 71 - titan_ht_config_read_dword(bus, devfn, offset, &dword); 72 - 73 - dword >>= ((offset & 3) << 3); 74 - dword &= (0xffffffffU >> ((4 - size) << 8)); 75 - 76 - return PCIBIOS_SUCCESSFUL; 77 - } 78 - 79 - static inline int titan_ht_config_write_dword(struct pci_bus *bus, 80 - unsigned int devfn, int offset, u32 val) 81 - { 82 - volatile uint32_t address; 83 - int busno; 84 - 85 - busno = bus->number; 86 - 87 - address = (busno << 16) | (devfn << 8) | (offset & 0xfc) | 0x80000000; 88 - if (busno != 0) 89 - address |= 1; 90 - 91 - *(volatile int32_t *) 0xfb0000f0 |= 0x2; 92 - 93 - udelay(30); 94 - 95 - *(volatile int32_t *) 0xfb0006f8 = address; 96 - *(volatile int32_t *) 0xfb0006fc = val; 97 - 98 - udelay(30); 99 - 100 - *(volatile int32_t *) 0xfb0000f0 |= 0x2; 101 - 102 - return PCIBIOS_SUCCESSFUL; 103 - } 104 - 105 - static int titan_ht_config_write(struct pci_bus *bus, unsigned int devfn, 106 - int offset, int size, u32 val) 107 - { 108 - uint32_t val1, val2, mask; 109 - 110 - titan_ht_config_read_dword(bus, devfn, offset, &val2); 111 - 112 - val1 = val << ((offset & 3) << 3); 113 - mask = ~(0xffffffffU >> ((4 - size) << 8)); 114 - val2 &= ~(mask << ((offset & 3) << 8)); 115 - 116 - titan_ht_config_write_dword(bus, devfn, offset, val1 | val2); 117 - 118 - return PCIBIOS_SUCCESSFUL; 119 - } 120 - 121 - struct pci_ops titan_ht_pci_ops = { 122 - .read = titan_ht_config_read, 123 - .write = titan_ht_config_write, 124 - };
-111
arch/mips/pci/ops-titan.c
··· 1 - /* 2 - * Copyright 2003 PMC-Sierra 3 - * Author: Manish Lachwani (lachwani@pmc-sierra.com) 4 - * 5 - * This program is free software; you can redistribute it and/or modify it 6 - * under the terms of the GNU General Public License as published by the 7 - * Free Software Foundation; either version 2 of the License, or (at your 8 - * option) any later version. 9 - * 10 - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 11 - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 12 - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 13 - * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 14 - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 15 - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 16 - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 17 - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 18 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 19 - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 20 - * 21 - * You should have received a copy of the GNU General Public License along 22 - * with this program; if not, write to the Free Software Foundation, Inc., 23 - * 675 Mass Ave, Cambridge, MA 02139, USA. 24 - */ 25 - #include <linux/types.h> 26 - #include <linux/pci.h> 27 - #include <linux/kernel.h> 28 - 29 - #include <asm/pci.h> 30 - #include <asm/io.h> 31 - #include <asm/rm9k-ocd.h> 32 - 33 - /* 34 - * PCI specific defines 35 - */ 36 - #define TITAN_PCI_0_CONFIG_ADDRESS 0x780 37 - #define TITAN_PCI_0_CONFIG_DATA 0x784 38 - 39 - /* 40 - * Titan PCI Config Read Byte 41 - */ 42 - static int titan_read_config(struct pci_bus *bus, unsigned int devfn, int reg, 43 - int size, u32 * val) 44 - { 45 - uint32_t address, tmp; 46 - int dev, busno, func; 47 - 48 - busno = bus->number; 49 - dev = PCI_SLOT(devfn); 50 - func = PCI_FUNC(devfn); 51 - 52 - address = (busno << 16) | (dev << 11) | (func << 8) | 53 - (reg & 0xfc) | 0x80000000; 54 - 55 - 56 - /* start the configuration cycle */ 57 - ocd_writel(address, TITAN_PCI_0_CONFIG_ADDRESS); 58 - tmp = ocd_readl(TITAN_PCI_0_CONFIG_DATA) >> ((reg & 3) << 3); 59 - 60 - switch (size) { 61 - case 1: 62 - tmp &= 0xff; 63 - case 2: 64 - tmp &= 0xffff; 65 - } 66 - *val = tmp; 67 - 68 - return PCIBIOS_SUCCESSFUL; 69 - } 70 - 71 - static int titan_write_config(struct pci_bus *bus, unsigned int devfn, int reg, 72 - int size, u32 val) 73 - { 74 - uint32_t address; 75 - int dev, busno, func; 76 - 77 - busno = bus->number; 78 - dev = PCI_SLOT(devfn); 79 - func = PCI_FUNC(devfn); 80 - 81 - address = (busno << 16) | (dev << 11) | (func << 8) | 82 - (reg & 0xfc) | 0x80000000; 83 - 84 - /* start the configuration cycle */ 85 - ocd_writel(address, TITAN_PCI_0_CONFIG_ADDRESS); 86 - 87 - /* write the data */ 88 - switch (size) { 89 - case 1: 90 - ocd_writeb(val, TITAN_PCI_0_CONFIG_DATA + (~reg & 0x3)); 91 - break; 92 - 93 - case 2: 94 - ocd_writew(val, TITAN_PCI_0_CONFIG_DATA + (~reg & 0x2)); 95 - break; 96 - 97 - case 4: 98 - ocd_writel(val, TITAN_PCI_0_CONFIG_DATA); 99 - break; 100 - } 101 - 102 - return PCIBIOS_SUCCESSFUL; 103 - } 104 - 105 - /* 106 - * Titan PCI structure 107 - */ 108 - struct pci_ops titan_pci_ops = { 109 - titan_read_config, 110 - titan_write_config, 111 - };
+16 -18
arch/mips/pci/pci-bcm63xx.c
··· 11 11 #include <linux/kernel.h> 12 12 #include <linux/init.h> 13 13 #include <linux/delay.h> 14 + #include <linux/clk.h> 14 15 #include <asm/bootinfo.h> 16 + 17 + #include <bcm63xx_reset.h> 15 18 16 19 #include "pci-bcm63xx.h" 17 20 ··· 122 119 { 123 120 u32 val; 124 121 125 - /* enable clock */ 126 - val = bcm_perf_readl(PERF_CKCTL_REG); 127 - val |= CKCTL_6328_PCIE_EN; 128 - bcm_perf_writel(val, PERF_CKCTL_REG); 129 - 130 122 /* enable SERDES */ 131 123 val = bcm_misc_readl(MISC_SERDES_CTRL_REG); 132 124 val |= SERDES_PCIE_EN | SERDES_PCIE_EXD_EN; 133 125 bcm_misc_writel(val, MISC_SERDES_CTRL_REG); 134 126 135 127 /* reset the PCIe core */ 136 - val = bcm_perf_readl(PERF_SOFTRESET_6328_REG); 137 - 138 - val &= ~SOFTRESET_6328_PCIE_MASK; 139 - val &= ~SOFTRESET_6328_PCIE_CORE_MASK; 140 - val &= ~SOFTRESET_6328_PCIE_HARD_MASK; 141 - val &= ~SOFTRESET_6328_PCIE_EXT_MASK; 142 - bcm_perf_writel(val, PERF_SOFTRESET_6328_REG); 128 + bcm63xx_core_set_reset(BCM63XX_RESET_PCIE, 1); 129 + bcm63xx_core_set_reset(BCM63XX_RESET_PCIE_EXT, 1); 143 130 mdelay(10); 144 131 145 - val |= SOFTRESET_6328_PCIE_MASK; 146 - val |= SOFTRESET_6328_PCIE_CORE_MASK; 147 - val |= SOFTRESET_6328_PCIE_HARD_MASK; 148 - bcm_perf_writel(val, PERF_SOFTRESET_6328_REG); 132 + bcm63xx_core_set_reset(BCM63XX_RESET_PCIE, 0); 149 133 mdelay(10); 150 134 151 - val |= SOFTRESET_6328_PCIE_EXT_MASK; 152 - bcm_perf_writel(val, PERF_SOFTRESET_6328_REG); 135 + bcm63xx_core_set_reset(BCM63XX_RESET_PCIE_EXT, 0); 153 136 mdelay(200); 154 137 } 138 + 139 + static struct clk *pcie_clk; 155 140 156 141 static int __init bcm63xx_register_pcie(void) 157 142 { 158 143 u32 val; 144 + 145 + /* enable clock */ 146 + pcie_clk = clk_get(NULL, "pcie"); 147 + if (IS_ERR_OR_NULL(pcie_clk)) 148 + return -ENODEV; 149 + 150 + clk_prepare_enable(pcie_clk); 159 151 160 152 bcm63xx_reset_pcie(); 161 153
+5
arch/mips/pci/pci-octeon.c
··· 11 11 #include <linux/interrupt.h> 12 12 #include <linux/time.h> 13 13 #include <linux/delay.h> 14 + #include <linux/platform_device.h> 14 15 #include <linux/swiotlb.h> 15 16 16 17 #include <asm/time.h> ··· 704 703 * was setup properly. 705 704 */ 706 705 cvmx_write_csr(CVMX_NPI_PCI_INT_SUM2, -1); 706 + 707 + if (IS_ERR(platform_device_register_simple("octeon_pci_edac", 708 + -1, NULL, 0))) 709 + pr_err("Registation of co_pci_edac failed!\n"); 707 710 708 711 octeon_pci_dma_init(); 709 712
+34 -35
arch/mips/pci/pci-xlr.c
··· 47 47 48 48 #include <asm/netlogic/interrupt.h> 49 49 #include <asm/netlogic/haldefs.h> 50 + #include <asm/netlogic/common.h> 50 51 51 52 #include <asm/netlogic/xlr/msidef.h> 52 53 #include <asm/netlogic/xlr/iomap.h> ··· 175 174 return p ? bus->self : NULL; 176 175 } 177 176 178 - static int get_irq_vector(const struct pci_dev *dev) 177 + static int nlm_pci_link_to_irq(int link) 179 178 { 180 - struct pci_dev *lnk; 181 - 182 - if (!nlm_chip_is_xls()) 183 - return PIC_PCIX_IRQ; /* for XLR just one IRQ */ 184 - 185 - /* 186 - * For XLS PCIe, there is an IRQ per Link, find out which 187 - * link the device is on to assign interrupts 188 - */ 189 - lnk = xls_get_pcie_link(dev); 190 - if (lnk == NULL) 191 - return 0; 192 - 193 - switch (PCI_SLOT(lnk->devfn)) { 179 + switch (link) { 194 180 case 0: 195 181 return PIC_PCIE_LINK0_IRQ; 196 182 case 1: ··· 193 205 else 194 206 return PIC_PCIE_LINK3_IRQ; 195 207 } 196 - WARN(1, "Unexpected devfn %d\n", lnk->devfn); 208 + WARN(1, "Unexpected link %d\n", link); 197 209 return 0; 210 + } 211 + 212 + static int get_irq_vector(const struct pci_dev *dev) 213 + { 214 + struct pci_dev *lnk; 215 + int link; 216 + 217 + if (!nlm_chip_is_xls()) 218 + return PIC_PCIX_IRQ; /* for XLR just one IRQ */ 219 + 220 + lnk = xls_get_pcie_link(dev); 221 + if (lnk == NULL) 222 + return 0; 223 + 224 + link = PCI_SLOT(lnk->devfn); 225 + return nlm_pci_link_to_irq(link); 198 226 } 199 227 200 228 #ifdef CONFIG_PCI_MSI ··· 336 332 337 333 static int __init pcibios_init(void) 338 334 { 335 + void (*extra_ack)(struct irq_data *); 336 + int link, irq; 337 + 339 338 /* PSB assigns PCI resources */ 340 339 pci_set_flags(PCI_PROBE_ONLY); 341 340 pci_config_base = ioremap(DEFAULT_PCI_CONFIG_BASE, 16 << 20); ··· 357 350 * For PCI interrupts, we need to ack the PCI controller too, overload 358 351 * irq handler data to do this 359 352 */ 360 - if (nlm_chip_is_xls()) { 361 - if (nlm_chip_is_xls_b()) { 362 - irq_set_handler_data(PIC_PCIE_LINK0_IRQ, 363 - xls_pcie_ack_b); 364 - irq_set_handler_data(PIC_PCIE_LINK1_IRQ, 365 - xls_pcie_ack_b); 366 - irq_set_handler_data(PIC_PCIE_XLSB0_LINK2_IRQ, 367 - xls_pcie_ack_b); 368 - irq_set_handler_data(PIC_PCIE_XLSB0_LINK3_IRQ, 369 - xls_pcie_ack_b); 370 - } else { 371 - irq_set_handler_data(PIC_PCIE_LINK0_IRQ, xls_pcie_ack); 372 - irq_set_handler_data(PIC_PCIE_LINK1_IRQ, xls_pcie_ack); 373 - irq_set_handler_data(PIC_PCIE_LINK2_IRQ, xls_pcie_ack); 374 - irq_set_handler_data(PIC_PCIE_LINK3_IRQ, xls_pcie_ack); 375 - } 376 - } else { 353 + if (!nlm_chip_is_xls()) { 377 354 /* XLR PCI controller ACK */ 378 - irq_set_handler_data(PIC_PCIX_IRQ, xlr_pci_ack); 355 + nlm_set_pic_extra_ack(0, PIC_PCIX_IRQ, xlr_pci_ack); 356 + } else { 357 + if (nlm_chip_is_xls_b()) 358 + extra_ack = xls_pcie_ack_b; 359 + else 360 + extra_ack = xls_pcie_ack; 361 + for (link = 0; link < 4; link++) { 362 + irq = nlm_pci_link_to_irq(link); 363 + nlm_set_pic_extra_ack(0, irq, extra_ack); 364 + } 379 365 } 380 - 381 366 return 0; 382 367 } 383 368
-67
arch/mips/pci/pci-yosemite.c
··· 1 - /* 2 - * This file is subject to the terms and conditions of the GNU General Public 3 - * License. See the file "COPYING" in the main directory of this archive 4 - * for more details. 5 - * 6 - * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org) 7 - */ 8 - #include <linux/init.h> 9 - #include <linux/kernel.h> 10 - #include <linux/types.h> 11 - #include <linux/pci.h> 12 - #include <asm/titan_dep.h> 13 - 14 - extern struct pci_ops titan_pci_ops; 15 - 16 - static struct resource py_mem_resource = { 17 - .start = 0xe0000000UL, 18 - .end = 0xe3ffffffUL, 19 - .name = "Titan PCI MEM", 20 - .flags = IORESOURCE_MEM 21 - }; 22 - 23 - /* 24 - * PMON really reserves 16MB of I/O port space but that's stupid, nothing 25 - * needs that much since allocations are limited to 256 bytes per device 26 - * anyway. So we just claim 64kB here. 27 - */ 28 - #define TITAN_IO_SIZE 0x0000ffffUL 29 - #define TITAN_IO_BASE 0xe8000000UL 30 - 31 - static struct resource py_io_resource = { 32 - .start = 0x00001000UL, 33 - .end = TITAN_IO_SIZE - 1, 34 - .name = "Titan IO MEM", 35 - .flags = IORESOURCE_IO, 36 - }; 37 - 38 - static struct pci_controller py_controller = { 39 - .pci_ops = &titan_pci_ops, 40 - .mem_resource = &py_mem_resource, 41 - .mem_offset = 0x00000000UL, 42 - .io_resource = &py_io_resource, 43 - .io_offset = 0x00000000UL 44 - }; 45 - 46 - static char ioremap_failed[] __initdata = "Could not ioremap I/O port range"; 47 - 48 - static int __init pmc_yosemite_setup(void) 49 - { 50 - unsigned long io_v_base; 51 - 52 - io_v_base = (unsigned long) ioremap(TITAN_IO_BASE, TITAN_IO_SIZE); 53 - if (!io_v_base) 54 - panic(ioremap_failed); 55 - 56 - set_io_port_base(io_v_base); 57 - py_controller.io_map_base = io_v_base; 58 - TITAN_WRITE(RM9000x2_OCD_LKM7, TITAN_READ(RM9000x2_OCD_LKM7) | 1); 59 - 60 - ioport_resource.end = TITAN_IO_SIZE - 1; 61 - 62 - register_pci_controller(&py_controller); 63 - 64 - return 0; 65 - } 66 - 67 - arch_initcall(pmc_yosemite_setup);
-4
arch/mips/pmc-sierra/Kconfig
··· 34 34 35 35 endchoice 36 36 37 - config HYPERTRANSPORT 38 - bool "Hypertransport Support for PMC-Sierra Yosemite" 39 - depends on PMC_YOSEMITE 40 - 41 37 config MSP_HAS_USB 42 38 boolean 43 39 depends on PMC_MSP
-7
arch/mips/pmc-sierra/Platform
··· 5 5 cflags-$(CONFIG_PMC_MSP) += -I$(srctree)/arch/mips/include/asm/pmc-sierra/msp71xx \ 6 6 -mno-branch-likely 7 7 load-$(CONFIG_PMC_MSP) += 0xffffffff80100000 8 - 9 - # 10 - # PMC-Sierra Yosemite 11 - # 12 - platform-$(CONFIG_PMC_YOSEMITE) += pmc-sierra/yosemite/ 13 - cflags-$(CONFIG_PMC_YOSEMITE) += -I$(srctree)/arch/mips/include/asm/mach-yosemite 14 - load-$(CONFIG_PMC_YOSEMITE) += 0xffffffff80100000
-7
arch/mips/pmc-sierra/yosemite/Makefile
··· 1 - # 2 - # Makefile for the PMC-Sierra Titan 3 - # 4 - 5 - obj-y += irq.o prom.o py-console.o setup.o 6 - 7 - obj-$(CONFIG_SMP) += smp.o
-169
arch/mips/pmc-sierra/yosemite/atmel_read_eeprom.c
··· 1 - /* 2 - * Copyright (C) 2003 PMC-Sierra Inc. 3 - * Author: Manish Lachwani (lachwani@pmc-sierra.com) 4 - * 5 - * This program is free software; you can redistribute it and/or modify it 6 - * under the terms of the GNU General Public License as published by the 7 - * Free Software Foundation; either version 2 of the License, or (at your 8 - * option) any later version. 9 - * 10 - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 11 - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 12 - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 13 - * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 14 - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 15 - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 16 - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 17 - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 18 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 19 - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 20 - * 21 - * You should have received a copy of the GNU General Public License along 22 - * with this program; if not, write to the Free Software Foundation, Inc., 23 - * 675 Mass Ave, Cambridge, MA 02139, USA. 24 - */ 25 - 26 - /* 27 - * Description: 28 - * 29 - * This code reads the ATMEL 24CXX EEPROM. The PMC-Sierra Yosemite board uses the ATMEL 30 - * 24C32/24C64 which uses two byte addressing as compared to 24C16. Note that this program 31 - * uses the serial port like /dev/ttyS0, to communicate with the EEPROM. Hence, you are 32 - * expected to have a connectivity from the EEPROM to the serial port. This program does 33 - * __not__ communicate using the I2C protocol 34 - */ 35 - 36 - #include "atmel_read_eeprom.h" 37 - 38 - static void delay(int delay) 39 - { 40 - while (delay--); 41 - } 42 - 43 - static void send_bit(unsigned char bit) 44 - { 45 - scl_lo; 46 - delay(TXX); 47 - if (bit) 48 - sda_hi; 49 - else 50 - sda_lo; 51 - 52 - delay(TXX); 53 - scl_hi; 54 - delay(TXX); 55 - } 56 - 57 - static void send_ack(void) 58 - { 59 - send_bit(0); 60 - } 61 - 62 - static void send_byte(unsigned char byte) 63 - { 64 - int i = 0; 65 - 66 - for (i = 7; i >= 0; i--) 67 - send_bit((byte >> i) & 0x01); 68 - } 69 - 70 - static void send_start(void) 71 - { 72 - sda_hi; 73 - delay(TXX); 74 - scl_hi; 75 - delay(TXX); 76 - sda_lo; 77 - delay(TXX); 78 - } 79 - 80 - static void send_stop(void) 81 - { 82 - sda_lo; 83 - delay(TXX); 84 - scl_hi; 85 - delay(TXX); 86 - sda_hi; 87 - delay(TXX); 88 - } 89 - 90 - static void do_idle(void) 91 - { 92 - sda_hi; 93 - scl_hi; 94 - vcc_off; 95 - } 96 - 97 - static int recv_bit(void) 98 - { 99 - int status; 100 - 101 - scl_lo; 102 - delay(TXX); 103 - sda_hi; 104 - delay(TXX); 105 - scl_hi; 106 - delay(TXX); 107 - 108 - return 1; 109 - } 110 - 111 - static unsigned char recv_byte(void) { 112 - int i; 113 - unsigned char byte=0; 114 - 115 - for (i=7;i>=0;i--) 116 - byte |= (recv_bit() << i); 117 - 118 - return byte; 119 - } 120 - 121 - static int recv_ack(void) 122 - { 123 - unsigned int ack; 124 - 125 - ack = (unsigned int)recv_bit(); 126 - scl_lo; 127 - 128 - if (ack) { 129 - do_idle(); 130 - printk(KERN_ERR "Error reading the Atmel 24C32/24C64 EEPROM\n"); 131 - return -1; 132 - } 133 - 134 - return ack; 135 - } 136 - 137 - /* 138 - * This function does the actual read of the EEPROM. It needs the buffer into which the 139 - * read data is copied, the size of the EEPROM being read and the buffer size 140 - */ 141 - int read_eeprom(char *buffer, int eeprom_size, int size) 142 - { 143 - int i = 0, err; 144 - 145 - send_start(); 146 - send_byte(W_HEADER); 147 - recv_ack(); 148 - 149 - /* EEPROM with size of more than 2K need two byte addressing */ 150 - if (eeprom_size > 2048) { 151 - send_byte(0x00); 152 - recv_ack(); 153 - } 154 - 155 - send_start(); 156 - send_byte(R_HEADER); 157 - err = recv_ack(); 158 - if (err == -1) 159 - return err; 160 - 161 - for (i = 0; i < size; i++) { 162 - *buffer++ = recv_byte(); 163 - send_ack(); 164 - } 165 - 166 - /* Note : We should do some check if the buffer contains correct information */ 167 - 168 - send_stop(); 169 - }
-67
arch/mips/pmc-sierra/yosemite/atmel_read_eeprom.h
··· 1 - /* 2 - * arch/mips/pmc-sierra/yosemite/atmel_read_eeprom.c 3 - * 4 - * Copyright (C) 2003 PMC-Sierra Inc. 5 - * Author: Manish Lachwani (lachwani@pmc-sierra.com) 6 - * Copyright (C) 2005 Ralf Baechle (ralf@linux-mips.org) 7 - * 8 - * This program is free software; you can redistribute it and/or modify it 9 - * under the terms of the GNU General Public License as published by the 10 - * Free Software Foundation; either version 2 of the License, or (at your 11 - * option) any later version. 12 - * 13 - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 14 - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 15 - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 16 - * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 17 - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 18 - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 19 - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 20 - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 21 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 22 - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 - * 24 - * You should have received a copy of the GNU General Public License along 25 - * with this program; if not, write to the Free Software Foundation, Inc., 26 - * 675 Mass Ave, Cambridge, MA 02139, USA. 27 - */ 28 - 29 - /* 30 - * Header file for atmel_read_eeprom.c 31 - */ 32 - 33 - #include <linux/types.h> 34 - #include <linux/pci.h> 35 - #include <linux/kernel.h> 36 - #include <linux/slab.h> 37 - #include <asm/pci.h> 38 - #include <asm/io.h> 39 - #include <linux/init.h> 40 - #include <asm/termios.h> 41 - #include <asm/ioctls.h> 42 - #include <linux/ioctl.h> 43 - #include <linux/fcntl.h> 44 - 45 - #define DEFAULT_PORT "/dev/ttyS0" /* Port to open */ 46 - #define TXX 0 /* Dummy loop for spinning */ 47 - 48 - #define BLOCK_SEL 0x00 49 - #define SLAVE_ADDR 0xa0 50 - #define READ_BIT 0x01 51 - #define WRITE_BIT 0x00 52 - #define R_HEADER SLAVE_ADDR + BLOCK_SEL + READ_BIT 53 - #define W_HEADER SLAVE_ADDR + BLOCK_SEL + WRITE_BIT 54 - 55 - /* 56 - * Clock, Voltages and Data 57 - */ 58 - #define vcc_off (ioctl(fd, TIOCSBRK, 0)) 59 - #define vcc_on (ioctl(fd, TIOCCBRK, 0)) 60 - #define sda_hi (ioctl(fd, TIOCMBIS, &dtr)) 61 - #define sda_lo (ioctl(fd, TIOCMBIC, &dtr)) 62 - #define scl_lo (ioctl(fd, TIOCMBIC, &rts)) 63 - #define scl_hi (ioctl(fd, TIOCMBIS, &rts)) 64 - 65 - const char rts = TIOCM_RTS; 66 - const char dtr = TIOCM_DTR; 67 - int fd;
-41
arch/mips/pmc-sierra/yosemite/ht-irq.c
··· 1 - /* 2 - * Copyright 2003 PMC-Sierra 3 - * Author: Manish Lachwani (lachwani@pmc-sierra.com) 4 - * 5 - * This program is free software; you can redistribute it and/or modify it 6 - * under the terms of the GNU General Public License as published by the 7 - * Free Software Foundation; either version 2 of the License, or (at your 8 - * option) any later version. 9 - * 10 - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 11 - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 12 - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 13 - * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 14 - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 15 - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 16 - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 17 - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 18 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 19 - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 20 - * 21 - * You should have received a copy of the GNU General Public License along 22 - * with this program; if not, write to the Free Software Foundation, Inc., 23 - * 675 Mass Ave, Cambridge, MA 02139, USA. 24 - */ 25 - 26 - #include <linux/types.h> 27 - #include <linux/pci.h> 28 - #include <linux/kernel.h> 29 - #include <linux/init.h> 30 - #include <asm/pci.h> 31 - 32 - /* 33 - * HT Bus fixup for the Titan 34 - * XXX IRQ values need to change based on the board layout 35 - */ 36 - void __init titan_ht_pcibios_fixup_bus(struct pci_bus *bus) 37 - { 38 - /* 39 - * PLX and SPKT related changes go here 40 - */ 41 - }
-404
arch/mips/pmc-sierra/yosemite/ht.c
··· 1 - /* 2 - * Copyright 2003 PMC-Sierra 3 - * Author: Manish Lachwani (lachwani@pmc-sierra.com) 4 - * 5 - * This program is free software; you can redistribute it and/or modify it 6 - * under the terms of the GNU General Public License as published by the 7 - * Free Software Foundation; either version 2 of the License, or (at your 8 - * option) any later version. 9 - * 10 - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 11 - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 12 - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 13 - * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 14 - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 15 - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 16 - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 17 - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 18 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 19 - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 20 - * 21 - * You should have received a copy of the GNU General Public License along 22 - * with this program; if not, write to the Free Software Foundation, Inc., 23 - * 675 Mass Ave, Cambridge, MA 02139, USA. 24 - */ 25 - 26 - #include <linux/types.h> 27 - #include <linux/pci.h> 28 - #include <linux/kernel.h> 29 - #include <asm/pci.h> 30 - #include <asm/io.h> 31 - 32 - #include <linux/init.h> 33 - #include <asm/titan_dep.h> 34 - 35 - #ifdef CONFIG_HYPERTRANSPORT 36 - 37 - 38 - /* 39 - * This function check if the Hypertransport Link Initialization completed. If 40 - * it did, then proceed further with scanning bus #2 41 - */ 42 - static __inline__ int check_titan_htlink(void) 43 - { 44 - u32 val; 45 - 46 - val = *(volatile uint32_t *)(RM9000x2_HTLINK_REG); 47 - if (val & 0x00000020) 48 - /* HT Link Initialization completed */ 49 - return 1; 50 - else 51 - return 0; 52 - } 53 - 54 - static int titan_ht_config_read_dword(struct pci_dev *device, 55 - int offset, u32* val) 56 - { 57 - int dev, bus, func; 58 - uint32_t address_reg, data_reg; 59 - uint32_t address; 60 - 61 - bus = device->bus->number; 62 - dev = PCI_SLOT(device->devfn); 63 - func = PCI_FUNC(device->devfn); 64 - 65 - /* XXX Need to change the Bus # */ 66 - if (bus > 2) 67 - address = (bus << 16) | (dev << 11) | (func << 8) | (offset & 0xfc) | 68 - 0x80000000 | 0x1; 69 - else 70 - address = (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000; 71 - 72 - address_reg = RM9000x2_OCD_HTCFGA; 73 - data_reg = RM9000x2_OCD_HTCFGD; 74 - 75 - RM9K_WRITE(address_reg, address); 76 - RM9K_READ(data_reg, val); 77 - 78 - return PCIBIOS_SUCCESSFUL; 79 - } 80 - 81 - 82 - static int titan_ht_config_read_word(struct pci_dev *device, 83 - int offset, u16* val) 84 - { 85 - int dev, bus, func; 86 - uint32_t address_reg, data_reg; 87 - uint32_t address; 88 - 89 - bus = device->bus->number; 90 - dev = PCI_SLOT(device->devfn); 91 - func = PCI_FUNC(device->devfn); 92 - 93 - /* XXX Need to change the Bus # */ 94 - if (bus > 2) 95 - address = (bus << 16) | (dev << 11) | (func << 8) | (offset & 0xfc) | 96 - 0x80000000 | 0x1; 97 - else 98 - address = (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000; 99 - 100 - address_reg = RM9000x2_OCD_HTCFGA; 101 - data_reg = RM9000x2_OCD_HTCFGD; 102 - 103 - if ((offset & 0x3) == 0) 104 - offset = 0x2; 105 - else 106 - offset = 0x0; 107 - 108 - RM9K_WRITE(address_reg, address); 109 - RM9K_READ_16(data_reg + offset, val); 110 - 111 - return PCIBIOS_SUCCESSFUL; 112 - } 113 - 114 - 115 - u32 longswap(unsigned long l) 116 - { 117 - unsigned char b1, b2, b3, b4; 118 - 119 - b1 = l&255; 120 - b2 = (l>>8)&255; 121 - b3 = (l>>16)&255; 122 - b4 = (l>>24)&255; 123 - 124 - return ((b1<<24) + (b2<<16) + (b3<<8) + b4); 125 - } 126 - 127 - 128 - static int titan_ht_config_read_byte(struct pci_dev *device, 129 - int offset, u8* val) 130 - { 131 - int dev, bus, func; 132 - uint32_t address_reg, data_reg; 133 - uint32_t address; 134 - int offset1; 135 - 136 - bus = device->bus->number; 137 - dev = PCI_SLOT(device->devfn); 138 - func = PCI_FUNC(device->devfn); 139 - 140 - /* XXX Need to change the Bus # */ 141 - if (bus > 2) 142 - address = (bus << 16) | (dev << 11) | (func << 8) | (offset & 0xfc) | 143 - 0x80000000 | 0x1; 144 - else 145 - address = (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000; 146 - 147 - address_reg = RM9000x2_OCD_HTCFGA; 148 - data_reg = RM9000x2_OCD_HTCFGD; 149 - 150 - RM9K_WRITE(address_reg, address); 151 - 152 - if ((offset & 0x3) == 0) { 153 - offset1 = 0x3; 154 - } 155 - if ((offset & 0x3) == 1) { 156 - offset1 = 0x2; 157 - } 158 - if ((offset & 0x3) == 2) { 159 - offset1 = 0x1; 160 - } 161 - if ((offset & 0x3) == 3) { 162 - offset1 = 0x0; 163 - } 164 - RM9K_READ_8(data_reg + offset1, val); 165 - 166 - return PCIBIOS_SUCCESSFUL; 167 - } 168 - 169 - 170 - static int titan_ht_config_write_dword(struct pci_dev *device, 171 - int offset, u8 val) 172 - { 173 - int dev, bus, func; 174 - uint32_t address_reg, data_reg; 175 - uint32_t address; 176 - 177 - bus = device->bus->number; 178 - dev = PCI_SLOT(device->devfn); 179 - func = PCI_FUNC(device->devfn); 180 - 181 - /* XXX Need to change the Bus # */ 182 - if (bus > 2) 183 - address = (bus << 16) | (dev << 11) | (func << 8) | (offset & 0xfc) | 184 - 0x80000000 | 0x1; 185 - else 186 - address = (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000; 187 - 188 - address_reg = RM9000x2_OCD_HTCFGA; 189 - data_reg = RM9000x2_OCD_HTCFGD; 190 - 191 - RM9K_WRITE(address_reg, address); 192 - RM9K_WRITE(data_reg, val); 193 - 194 - return PCIBIOS_SUCCESSFUL; 195 - } 196 - 197 - static int titan_ht_config_write_word(struct pci_dev *device, 198 - int offset, u8 val) 199 - { 200 - int dev, bus, func; 201 - uint32_t address_reg, data_reg; 202 - uint32_t address; 203 - 204 - bus = device->bus->number; 205 - dev = PCI_SLOT(device->devfn); 206 - func = PCI_FUNC(device->devfn); 207 - 208 - /* XXX Need to change the Bus # */ 209 - if (bus > 2) 210 - address = (bus << 16) | (dev << 11) | (func << 8) | (offset & 0xfc) | 211 - 0x80000000 | 0x1; 212 - else 213 - address = (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000; 214 - 215 - address_reg = RM9000x2_OCD_HTCFGA; 216 - data_reg = RM9000x2_OCD_HTCFGD; 217 - 218 - if ((offset & 0x3) == 0) 219 - offset = 0x2; 220 - else 221 - offset = 0x0; 222 - 223 - RM9K_WRITE(address_reg, address); 224 - RM9K_WRITE_16(data_reg + offset, val); 225 - 226 - return PCIBIOS_SUCCESSFUL; 227 - } 228 - 229 - static int titan_ht_config_write_byte(struct pci_dev *device, 230 - int offset, u8 val) 231 - { 232 - int dev, bus, func; 233 - uint32_t address_reg, data_reg; 234 - uint32_t address; 235 - int offset1; 236 - 237 - bus = device->bus->number; 238 - dev = PCI_SLOT(device->devfn); 239 - func = PCI_FUNC(device->devfn); 240 - 241 - /* XXX Need to change the Bus # */ 242 - if (bus > 2) 243 - address = (bus << 16) | (dev << 11) | (func << 8) | (offset & 0xfc) | 244 - 0x80000000 | 0x1; 245 - else 246 - address = (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000; 247 - 248 - address_reg = RM9000x2_OCD_HTCFGA; 249 - data_reg = RM9000x2_OCD_HTCFGD; 250 - 251 - RM9K_WRITE(address_reg, address); 252 - 253 - if ((offset & 0x3) == 0) { 254 - offset1 = 0x3; 255 - } 256 - if ((offset & 0x3) == 1) { 257 - offset1 = 0x2; 258 - } 259 - if ((offset & 0x3) == 2) { 260 - offset1 = 0x1; 261 - } 262 - if ((offset & 0x3) == 3) { 263 - offset1 = 0x0; 264 - } 265 - 266 - RM9K_WRITE_8(data_reg + offset1, val); 267 - return PCIBIOS_SUCCESSFUL; 268 - } 269 - 270 - 271 - static void titan_pcibios_set_master(struct pci_dev *dev) 272 - { 273 - u16 cmd; 274 - int bus = dev->bus->number; 275 - 276 - if (check_titan_htlink()) 277 - titan_ht_config_read_word(dev, PCI_COMMAND, &cmd); 278 - 279 - cmd |= PCI_COMMAND_MASTER; 280 - 281 - if (check_titan_htlink()) 282 - titan_ht_config_write_word(dev, PCI_COMMAND, cmd); 283 - } 284 - 285 - 286 - int pcibios_enable_resources(struct pci_dev *dev) 287 - { 288 - u16 cmd, old_cmd; 289 - u8 tmp1; 290 - int idx; 291 - struct resource *r; 292 - int bus = dev->bus->number; 293 - 294 - if (check_titan_htlink()) 295 - titan_ht_config_read_word(dev, PCI_COMMAND, &cmd); 296 - 297 - old_cmd = cmd; 298 - for (idx = 0; idx < 6; idx++) { 299 - r = &dev->resource[idx]; 300 - if (!r->start && r->end) { 301 - printk(KERN_ERR 302 - "PCI: Device %s not available because of " 303 - "resource collisions\n", pci_name(dev)); 304 - return -EINVAL; 305 - } 306 - if (r->flags & IORESOURCE_IO) 307 - cmd |= PCI_COMMAND_IO; 308 - if (r->flags & IORESOURCE_MEM) 309 - cmd |= PCI_COMMAND_MEMORY; 310 - } 311 - if (cmd != old_cmd) { 312 - if (check_titan_htlink()) 313 - titan_ht_config_write_word(dev, PCI_COMMAND, cmd); 314 - } 315 - 316 - if (check_titan_htlink()) 317 - titan_ht_config_read_byte(dev, PCI_CACHE_LINE_SIZE, &tmp1); 318 - 319 - if (tmp1 != 8) { 320 - printk(KERN_WARNING "PCI setting cache line size to 8 from " 321 - "%d\n", tmp1); 322 - } 323 - 324 - if (check_titan_htlink()) 325 - titan_ht_config_write_byte(dev, PCI_CACHE_LINE_SIZE, 8); 326 - 327 - if (check_titan_htlink()) 328 - titan_ht_config_read_byte(dev, PCI_LATENCY_TIMER, &tmp1); 329 - 330 - if (tmp1 < 32 || tmp1 == 0xff) { 331 - printk(KERN_WARNING "PCI setting latency timer to 32 from %d\n", 332 - tmp1); 333 - } 334 - 335 - if (check_titan_htlink()) 336 - titan_ht_config_write_byte(dev, PCI_LATENCY_TIMER, 32); 337 - 338 - return 0; 339 - } 340 - 341 - 342 - int pcibios_enable_device(struct pci_dev *dev, int mask) 343 - { 344 - return pcibios_enable_resources(dev); 345 - } 346 - 347 - resource_size_t pcibios_align_resource(void *data, const struct resource *res, 348 - resource_size_t size, resource_size_t align) 349 - { 350 - struct pci_dev *dev = data; 351 - resource_size_t start = res->start; 352 - 353 - if (res->flags & IORESOURCE_IO) { 354 - /* We need to avoid collisions with `mirrored' VGA ports 355 - and other strange ISA hardware, so we always want the 356 - addresses kilobyte aligned. */ 357 - if (size > 0x100) { 358 - printk(KERN_ERR "PCI: I/O Region %s/%d too large" 359 - " (%ld bytes)\n", pci_name(dev), 360 - dev->resource - res, size); 361 - } 362 - 363 - start = (start + 1024 - 1) & ~(1024 - 1); 364 - } 365 - 366 - return start; 367 - } 368 - 369 - struct pci_ops titan_pci_ops = { 370 - titan_ht_config_read_byte, 371 - titan_ht_config_read_word, 372 - titan_ht_config_read_dword, 373 - titan_ht_config_write_byte, 374 - titan_ht_config_write_word, 375 - titan_ht_config_write_dword 376 - }; 377 - 378 - void __init pcibios_fixup_bus(struct pci_bus *c) 379 - { 380 - titan_ht_pcibios_fixup_bus(c); 381 - } 382 - 383 - void __init pcibios_init(void) 384 - { 385 - 386 - /* Reset PCI I/O and PCI MEM values */ 387 - /* XXX Need to add the proper values here */ 388 - ioport_resource.start = 0xe0000000; 389 - ioport_resource.end = 0xe0000000 + 0x20000000 - 1; 390 - iomem_resource.start = 0xc0000000; 391 - iomem_resource.end = 0xc0000000 + 0x20000000 - 1; 392 - 393 - /* XXX Need to add bus values */ 394 - pci_scan_bus(2, &titan_pci_ops, NULL); 395 - pci_scan_bus(3, &titan_pci_ops, NULL); 396 - } 397 - 398 - unsigned __init int pcibios_assign_all_busses(void) 399 - { 400 - /* We want to use the PCI bus detection done by PMON */ 401 - return 0; 402 - } 403 - 404 - #endif /* CONFIG_HYPERTRANSPORT */
-152
arch/mips/pmc-sierra/yosemite/irq.c
··· 1 - /* 2 - * Copyright (C) 2003 PMC-Sierra Inc. 3 - * Author: Manish Lachwani (lachwani@pmc-sierra.com) 4 - * 5 - * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) 6 - * 7 - * This program is free software; you can redistribute it and/or modify it 8 - * under the terms of the GNU General Public License as published by the 9 - * Free Software Foundation; either version 2 of the License, or (at your 10 - * option) any later version. 11 - * 12 - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 13 - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 14 - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 15 - * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 16 - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 17 - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 18 - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 19 - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 20 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 21 - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 22 - * 23 - * You should have received a copy of the GNU General Public License along 24 - * with this program; if not, write to the Free Software Foundation, Inc., 25 - * 675 Mass Ave, Cambridge, MA 02139, USA. 26 - * 27 - * Second level Interrupt handlers for the PMC-Sierra Titan/Yosemite board 28 - */ 29 - #include <linux/errno.h> 30 - #include <linux/init.h> 31 - #include <linux/kernel_stat.h> 32 - #include <linux/module.h> 33 - #include <linux/signal.h> 34 - #include <linux/sched.h> 35 - #include <linux/types.h> 36 - #include <linux/interrupt.h> 37 - #include <linux/ioport.h> 38 - #include <linux/irq.h> 39 - #include <linux/timex.h> 40 - #include <linux/random.h> 41 - #include <linux/bitops.h> 42 - #include <asm/bootinfo.h> 43 - #include <asm/io.h> 44 - #include <asm/irq.h> 45 - #include <asm/irq_cpu.h> 46 - #include <asm/mipsregs.h> 47 - #include <asm/titan_dep.h> 48 - 49 - /* Hypertransport specific */ 50 - #define IRQ_ACK_BITS 0x00000000 /* Ack bits */ 51 - 52 - #define HYPERTRANSPORT_INTA 0x78 /* INTA# */ 53 - #define HYPERTRANSPORT_INTB 0x79 /* INTB# */ 54 - #define HYPERTRANSPORT_INTC 0x7a /* INTC# */ 55 - #define HYPERTRANSPORT_INTD 0x7b /* INTD# */ 56 - 57 - extern void titan_mailbox_irq(void); 58 - 59 - #ifdef CONFIG_HYPERTRANSPORT 60 - /* 61 - * Handle hypertransport & SMP interrupts. The interrupt lines are scarce. 62 - * For interprocessor interrupts, the best thing to do is to use the INTMSG 63 - * register. We use the same external interrupt line, i.e. INTB3 and monitor 64 - * another status bit 65 - */ 66 - static void ll_ht_smp_irq_handler(int irq) 67 - { 68 - u32 status = OCD_READ(RM9000x2_OCD_INTP0STATUS4); 69 - 70 - /* Ack all the bits that correspond to the interrupt sources */ 71 - if (status != 0) 72 - OCD_WRITE(RM9000x2_OCD_INTP0STATUS4, IRQ_ACK_BITS); 73 - 74 - status = OCD_READ(RM9000x2_OCD_INTP1STATUS4); 75 - if (status != 0) 76 - OCD_WRITE(RM9000x2_OCD_INTP1STATUS4, IRQ_ACK_BITS); 77 - 78 - #ifdef CONFIG_HT_LEVEL_TRIGGER 79 - /* 80 - * Level Trigger Mode only. Send the HT EOI message back to the source. 81 - */ 82 - switch (status) { 83 - case 0x1000000: 84 - OCD_WRITE(RM9000x2_OCD_HTEOI, HYPERTRANSPORT_INTA); 85 - break; 86 - case 0x2000000: 87 - OCD_WRITE(RM9000x2_OCD_HTEOI, HYPERTRANSPORT_INTB); 88 - break; 89 - case 0x4000000: 90 - OCD_WRITE(RM9000x2_OCD_HTEOI, HYPERTRANSPORT_INTC); 91 - break; 92 - case 0x8000000: 93 - OCD_WRITE(RM9000x2_OCD_HTEOI, HYPERTRANSPORT_INTD); 94 - break; 95 - case 0x0000001: 96 - /* PLX */ 97 - OCD_WRITE(RM9000x2_OCD_HTEOI, 0x20); 98 - OCD_WRITE(IRQ_CLEAR_REG, IRQ_ACK_BITS); 99 - break; 100 - case 0xf000000: 101 - OCD_WRITE(RM9000x2_OCD_HTEOI, HYPERTRANSPORT_INTA); 102 - OCD_WRITE(RM9000x2_OCD_HTEOI, HYPERTRANSPORT_INTB); 103 - OCD_WRITE(RM9000x2_OCD_HTEOI, HYPERTRANSPORT_INTC); 104 - OCD_WRITE(RM9000x2_OCD_HTEOI, HYPERTRANSPORT_INTD); 105 - break; 106 - } 107 - #endif /* CONFIG_HT_LEVEL_TRIGGER */ 108 - 109 - do_IRQ(irq); 110 - } 111 - #endif 112 - 113 - asmlinkage void plat_irq_dispatch(void) 114 - { 115 - unsigned int cause = read_c0_cause(); 116 - unsigned int status = read_c0_status(); 117 - unsigned int pending = cause & status; 118 - 119 - if (pending & STATUSF_IP7) { 120 - do_IRQ(7); 121 - } else if (pending & STATUSF_IP2) { 122 - #ifdef CONFIG_HYPERTRANSPORT 123 - ll_ht_smp_irq_handler(2); 124 - #else 125 - do_IRQ(2); 126 - #endif 127 - } else if (pending & STATUSF_IP3) { 128 - do_IRQ(3); 129 - } else if (pending & STATUSF_IP4) { 130 - do_IRQ(4); 131 - } else if (pending & STATUSF_IP5) { 132 - #ifdef CONFIG_SMP 133 - titan_mailbox_irq(); 134 - #else 135 - do_IRQ(5); 136 - #endif 137 - } else if (pending & STATUSF_IP6) { 138 - do_IRQ(4); 139 - } 140 - } 141 - 142 - /* 143 - * Initialize the next level interrupt handler 144 - */ 145 - void __init arch_init_irq(void) 146 - { 147 - clear_c0_status(ST0_IM); 148 - 149 - mips_cpu_irq_init(); 150 - rm7k_cpu_irq_init(); 151 - rm9k_cpu_irq_init(); 152 - }
-142
arch/mips/pmc-sierra/yosemite/prom.c
··· 1 - /* 2 - * This program is free software; you can redistribute it and/or modify it 3 - * under the terms of the GNU General Public License as published by the 4 - * Free Software Foundation; either version 2 of the License, or (at your 5 - * option) any later version. 6 - * 7 - * Copyright (C) 2003, 2004 PMC-Sierra Inc. 8 - * Author: Manish Lachwani (lachwani@pmc-sierra.com) 9 - * Copyright (C) 2004 Ralf Baechle 10 - */ 11 - #include <linux/init.h> 12 - #include <linux/sched.h> 13 - #include <linux/mm.h> 14 - #include <linux/delay.h> 15 - #include <linux/pm.h> 16 - #include <linux/smp.h> 17 - 18 - #include <asm/io.h> 19 - #include <asm/pgtable.h> 20 - #include <asm/processor.h> 21 - #include <asm/reboot.h> 22 - #include <asm/smp-ops.h> 23 - #include <asm/bootinfo.h> 24 - #include <asm/pmon.h> 25 - 26 - #ifdef CONFIG_SMP 27 - extern void prom_grab_secondary(void); 28 - #else 29 - #define prom_grab_secondary() do { } while (0) 30 - #endif 31 - 32 - #include "setup.h" 33 - 34 - struct callvectors *debug_vectors; 35 - 36 - extern unsigned long yosemite_base; 37 - extern unsigned long cpu_clock_freq; 38 - 39 - const char *get_system_type(void) 40 - { 41 - return "PMC-Sierra Yosemite"; 42 - } 43 - 44 - static void prom_cpu0_exit(void *arg) 45 - { 46 - void *nvram = (void *) YOSEMITE_RTC_BASE; 47 - 48 - /* Ask the NVRAM/RTC/watchdog chip to assert reset in 1/16 second */ 49 - writeb(0x84, nvram + 0xff7); 50 - 51 - /* wait for the watchdog to go off */ 52 - mdelay(100 + (1000 / 16)); 53 - 54 - /* if the watchdog fails for some reason, let people know */ 55 - printk(KERN_NOTICE "Watchdog reset failed\n"); 56 - } 57 - 58 - /* 59 - * Reset the NVRAM over the local bus 60 - */ 61 - static void prom_exit(void) 62 - { 63 - #ifdef CONFIG_SMP 64 - if (smp_processor_id()) 65 - /* CPU 1 */ 66 - smp_call_function(prom_cpu0_exit, NULL, 1); 67 - #endif 68 - prom_cpu0_exit(NULL); 69 - } 70 - 71 - /* 72 - * Halt the system 73 - */ 74 - static void prom_halt(void) 75 - { 76 - printk(KERN_NOTICE "\n** You can safely turn off the power\n"); 77 - while (1) 78 - __asm__(".set\tmips3\n\t" "wait\n\t" ".set\tmips0"); 79 - } 80 - 81 - extern struct plat_smp_ops yos_smp_ops; 82 - 83 - /* 84 - * Init routine which accepts the variables from PMON 85 - */ 86 - void __init prom_init(void) 87 - { 88 - int argc = fw_arg0; 89 - char **arg = (char **) fw_arg1; 90 - char **env = (char **) fw_arg2; 91 - struct callvectors *cv = (struct callvectors *) fw_arg3; 92 - int i = 0; 93 - 94 - /* Callbacks for halt, restart */ 95 - _machine_restart = (void (*)(char *)) prom_exit; 96 - _machine_halt = prom_halt; 97 - pm_power_off = prom_halt; 98 - 99 - debug_vectors = cv; 100 - arcs_cmdline[0] = '\0'; 101 - 102 - /* Get the boot parameters */ 103 - for (i = 1; i < argc; i++) { 104 - if (strlen(arcs_cmdline) + strlen(arg[i]) + 1 >= 105 - sizeof(arcs_cmdline)) 106 - break; 107 - 108 - strcat(arcs_cmdline, arg[i]); 109 - strcat(arcs_cmdline, " "); 110 - } 111 - 112 - #ifdef CONFIG_SERIAL_8250_CONSOLE 113 - if ((strstr(arcs_cmdline, "console=ttyS")) == NULL) 114 - strcat(arcs_cmdline, "console=ttyS0,115200"); 115 - #endif 116 - 117 - while (*env) { 118 - if (strncmp("ocd_base", *env, strlen("ocd_base")) == 0) 119 - yosemite_base = 120 - simple_strtol(*env + strlen("ocd_base="), NULL, 121 - 16); 122 - 123 - if (strncmp("cpuclock", *env, strlen("cpuclock")) == 0) 124 - cpu_clock_freq = 125 - simple_strtol(*env + strlen("cpuclock="), NULL, 126 - 10); 127 - 128 - env++; 129 - } 130 - 131 - prom_grab_secondary(); 132 - 133 - register_smp_ops(&yos_smp_ops); 134 - } 135 - 136 - void __init prom_free_prom_memory(void) 137 - { 138 - } 139 - 140 - void __init prom_fixup_mem_map(unsigned long start, unsigned long end) 141 - { 142 - }
-109
arch/mips/pmc-sierra/yosemite/py-console.c
··· 1 - /* 2 - * This file is subject to the terms and conditions of the GNU General Public 3 - * License. See the file "COPYING" in the main directory of this archive 4 - * for more details. 5 - * 6 - * Copyright (C) 2001, 2002, 2004 Ralf Baechle 7 - */ 8 - #include <linux/init.h> 9 - #include <linux/console.h> 10 - #include <linux/kdev_t.h> 11 - #include <linux/major.h> 12 - #include <linux/termios.h> 13 - #include <linux/sched.h> 14 - #include <linux/tty.h> 15 - 16 - #include <linux/serial.h> 17 - #include <linux/serial_core.h> 18 - #include <asm/serial.h> 19 - #include <asm/io.h> 20 - 21 - /* SUPERIO uart register map */ 22 - struct yo_uartregs { 23 - union { 24 - volatile u8 rbr; /* read only, DLAB == 0 */ 25 - volatile u8 thr; /* write only, DLAB == 0 */ 26 - volatile u8 dll; /* DLAB == 1 */ 27 - } u1; 28 - union { 29 - volatile u8 ier; /* DLAB == 0 */ 30 - volatile u8 dlm; /* DLAB == 1 */ 31 - } u2; 32 - union { 33 - volatile u8 iir; /* read only */ 34 - volatile u8 fcr; /* write only */ 35 - } u3; 36 - volatile u8 iu_lcr; 37 - volatile u8 iu_mcr; 38 - volatile u8 iu_lsr; 39 - volatile u8 iu_msr; 40 - volatile u8 iu_scr; 41 - } yo_uregs_t; 42 - 43 - #define iu_rbr u1.rbr 44 - #define iu_thr u1.thr 45 - #define iu_dll u1.dll 46 - #define iu_ier u2.ier 47 - #define iu_dlm u2.dlm 48 - #define iu_iir u3.iir 49 - #define iu_fcr u3.fcr 50 - 51 - #define ssnop() __asm__ __volatile__("sll $0, $0, 1\n"); 52 - #define ssnop_4() do { ssnop(); ssnop(); ssnop(); ssnop(); } while (0) 53 - 54 - #define IO_BASE_64 0x9000000000000000ULL 55 - 56 - static unsigned char readb_outer_space(unsigned long long phys) 57 - { 58 - unsigned long long vaddr = IO_BASE_64 | phys; 59 - unsigned char res; 60 - unsigned int sr; 61 - 62 - sr = read_c0_status(); 63 - write_c0_status((sr | ST0_KX) & ~ ST0_IE); 64 - ssnop_4(); 65 - 66 - __asm__ __volatile__ ( 67 - " .set mips3 \n" 68 - " ld %0, %1 \n" 69 - " lbu %0, (%0) \n" 70 - " .set mips0 \n" 71 - : "=r" (res) 72 - : "m" (vaddr)); 73 - 74 - write_c0_status(sr); 75 - ssnop_4(); 76 - 77 - return res; 78 - } 79 - 80 - static void writeb_outer_space(unsigned long long phys, unsigned char c) 81 - { 82 - unsigned long long vaddr = IO_BASE_64 | phys; 83 - unsigned long tmp; 84 - unsigned int sr; 85 - 86 - sr = read_c0_status(); 87 - write_c0_status((sr | ST0_KX) & ~ ST0_IE); 88 - ssnop_4(); 89 - 90 - __asm__ __volatile__ ( 91 - " .set mips3 \n" 92 - " ld %0, %1 \n" 93 - " sb %2, (%0) \n" 94 - " .set mips0 \n" 95 - : "=&r" (tmp) 96 - : "m" (vaddr), "r" (c)); 97 - 98 - write_c0_status(sr); 99 - ssnop_4(); 100 - } 101 - 102 - void prom_putchar(char c) 103 - { 104 - unsigned long lsr = 0xfd000008ULL + offsetof(struct yo_uartregs, iu_lsr); 105 - unsigned long thr = 0xfd000008ULL + offsetof(struct yo_uartregs, iu_thr); 106 - 107 - while ((readb_outer_space(lsr) & 0x20) == 0); 108 - writeb_outer_space(thr, c); 109 - }
-224
arch/mips/pmc-sierra/yosemite/setup.c
··· 1 - /* 2 - * Copyright (C) 2003 PMC-Sierra Inc. 3 - * Author: Manish Lachwani (lachwani@pmc-sierra.com) 4 - * 5 - * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org) 6 - * 7 - * This program is free software; you can redistribute it and/or modify it 8 - * under the terms of the GNU General Public License as published by the 9 - * Free Software Foundation; either version 2 of the License, or (at your 10 - * option) any later version. 11 - * 12 - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 13 - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 14 - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 15 - * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 16 - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 17 - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 18 - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 19 - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 20 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 21 - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 22 - * 23 - * You should have received a copy of the GNU General Public License along 24 - * with this program; if not, write to the Free Software Foundation, Inc., 25 - * 675 Mass Ave, Cambridge, MA 02139, USA. 26 - */ 27 - #include <linux/bcd.h> 28 - #include <linux/init.h> 29 - #include <linux/kernel.h> 30 - #include <linux/export.h> 31 - #include <linux/types.h> 32 - #include <linux/mm.h> 33 - #include <linux/bootmem.h> 34 - #include <linux/swap.h> 35 - #include <linux/ioport.h> 36 - #include <linux/sched.h> 37 - #include <linux/interrupt.h> 38 - #include <linux/timex.h> 39 - #include <linux/termios.h> 40 - #include <linux/tty.h> 41 - #include <linux/serial.h> 42 - #include <linux/serial_core.h> 43 - #include <linux/serial_8250.h> 44 - 45 - #include <asm/time.h> 46 - #include <asm/bootinfo.h> 47 - #include <asm/page.h> 48 - #include <asm/io.h> 49 - #include <asm/irq.h> 50 - #include <asm/processor.h> 51 - #include <asm/reboot.h> 52 - #include <asm/serial.h> 53 - #include <asm/titan_dep.h> 54 - #include <asm/m48t37.h> 55 - 56 - #include "setup.h" 57 - 58 - unsigned char titan_ge_mac_addr_base[6] = { 59 - // 0x00, 0x03, 0xcc, 0x1d, 0x22, 0x00 60 - 0x00, 0xe0, 0x04, 0x00, 0x00, 0x21 61 - }; 62 - 63 - unsigned long cpu_clock_freq; 64 - unsigned long yosemite_base; 65 - 66 - static struct m48t37_rtc *m48t37_base; 67 - 68 - void __init bus_error_init(void) 69 - { 70 - /* Do nothing */ 71 - } 72 - 73 - 74 - void read_persistent_clock(struct timespec *ts) 75 - { 76 - unsigned int year, month, day, hour, min, sec; 77 - unsigned long flags; 78 - 79 - spin_lock_irqsave(&rtc_lock, flags); 80 - /* Stop the update to the time */ 81 - m48t37_base->control = 0x40; 82 - 83 - year = bcd2bin(m48t37_base->year); 84 - year += bcd2bin(m48t37_base->century) * 100; 85 - 86 - month = bcd2bin(m48t37_base->month); 87 - day = bcd2bin(m48t37_base->date); 88 - hour = bcd2bin(m48t37_base->hour); 89 - min = bcd2bin(m48t37_base->min); 90 - sec = bcd2bin(m48t37_base->sec); 91 - 92 - /* Start the update to the time again */ 93 - m48t37_base->control = 0x00; 94 - spin_unlock_irqrestore(&rtc_lock, flags); 95 - 96 - ts->tv_sec = mktime(year, month, day, hour, min, sec); 97 - ts->tv_nsec = 0; 98 - } 99 - 100 - int rtc_mips_set_time(unsigned long tim) 101 - { 102 - struct rtc_time tm; 103 - unsigned long flags; 104 - 105 - /* 106 - * Convert to a more useful format -- note months count from 0 107 - * and years from 1900 108 - */ 109 - rtc_time_to_tm(tim, &tm); 110 - tm.tm_year += 1900; 111 - tm.tm_mon += 1; 112 - 113 - spin_lock_irqsave(&rtc_lock, flags); 114 - /* enable writing */ 115 - m48t37_base->control = 0x80; 116 - 117 - /* year */ 118 - m48t37_base->year = bin2bcd(tm.tm_year % 100); 119 - m48t37_base->century = bin2bcd(tm.tm_year / 100); 120 - 121 - /* month */ 122 - m48t37_base->month = bin2bcd(tm.tm_mon); 123 - 124 - /* day */ 125 - m48t37_base->date = bin2bcd(tm.tm_mday); 126 - 127 - /* hour/min/sec */ 128 - m48t37_base->hour = bin2bcd(tm.tm_hour); 129 - m48t37_base->min = bin2bcd(tm.tm_min); 130 - m48t37_base->sec = bin2bcd(tm.tm_sec); 131 - 132 - /* day of week -- not really used, but let's keep it up-to-date */ 133 - m48t37_base->day = bin2bcd(tm.tm_wday + 1); 134 - 135 - /* disable writing */ 136 - m48t37_base->control = 0x00; 137 - spin_unlock_irqrestore(&rtc_lock, flags); 138 - 139 - return 0; 140 - } 141 - 142 - void __init plat_time_init(void) 143 - { 144 - mips_hpt_frequency = cpu_clock_freq / 2; 145 - mips_hpt_frequency = 33000000 * 3 * 5; 146 - } 147 - 148 - unsigned long ocd_base; 149 - 150 - EXPORT_SYMBOL(ocd_base); 151 - 152 - /* 153 - * Common setup before any secondaries are started 154 - */ 155 - 156 - #define TITAN_UART_CLK 3686400 157 - #define TITAN_SERIAL_BASE_BAUD (TITAN_UART_CLK / 16) 158 - #define TITAN_SERIAL_IRQ 4 159 - #define TITAN_SERIAL_BASE 0xfd000008UL 160 - 161 - static void __init py_map_ocd(void) 162 - { 163 - ocd_base = (unsigned long) ioremap(OCD_BASE, OCD_SIZE); 164 - if (!ocd_base) 165 - panic("Mapping OCD failed - game over. Your score is 0."); 166 - 167 - /* Kludge for PMON bug ... */ 168 - OCD_WRITE(0x0710, 0x0ffff029); 169 - } 170 - 171 - static void __init py_uart_setup(void) 172 - { 173 - #ifdef CONFIG_SERIAL_8250 174 - struct uart_port up; 175 - 176 - /* 177 - * Register to interrupt zero because we share the interrupt with 178 - * the serial driver which we don't properly support yet. 179 - */ 180 - memset(&up, 0, sizeof(up)); 181 - up.membase = (unsigned char *) ioremap(TITAN_SERIAL_BASE, 8); 182 - up.irq = TITAN_SERIAL_IRQ; 183 - up.uartclk = TITAN_UART_CLK; 184 - up.regshift = 0; 185 - up.iotype = UPIO_MEM; 186 - up.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST; 187 - up.line = 0; 188 - 189 - if (early_serial_setup(&up)) 190 - printk(KERN_ERR "Early serial init of port 0 failed\n"); 191 - #endif /* CONFIG_SERIAL_8250 */ 192 - } 193 - 194 - static void __init py_rtc_setup(void) 195 - { 196 - m48t37_base = ioremap(YOSEMITE_RTC_BASE, YOSEMITE_RTC_SIZE); 197 - if (!m48t37_base) 198 - printk(KERN_ERR "Mapping the RTC failed\n"); 199 - } 200 - 201 - /* Not only time init but that's what the hook it's called through is named */ 202 - static void __init py_late_time_init(void) 203 - { 204 - py_map_ocd(); 205 - py_uart_setup(); 206 - py_rtc_setup(); 207 - } 208 - 209 - void __init plat_mem_setup(void) 210 - { 211 - late_time_init = py_late_time_init; 212 - 213 - /* Add memory regions */ 214 - add_memory_region(0x00000000, 0x10000000, BOOT_MEM_RAM); 215 - 216 - #if 0 /* XXX Crash ... */ 217 - OCD_WRITE(RM9000x2_OCD_HTSC, 218 - OCD_READ(RM9000x2_OCD_HTSC) | HYPERTRANSPORT_ENABLE); 219 - 220 - /* Set the BAR. Shifted mode */ 221 - OCD_WRITE(RM9000x2_OCD_HTBAR0, HYPERTRANSPORT_BAR0_ADDR); 222 - OCD_WRITE(RM9000x2_OCD_HTMASK0, HYPERTRANSPORT_SIZE0); 223 - #endif 224 - }
-32
arch/mips/pmc-sierra/yosemite/setup.h
··· 1 - /* 2 - * Copyright 2003, 04 PMC-Sierra 3 - * Author: Manish Lachwani (lachwani@pmc-sierra.com) 4 - * Copyright 2004 Ralf Baechle <ralf@linux-mips.org> 5 - * 6 - * Board specific definititions for the PMC-Sierra Yosemite 7 - * 8 - * This program is free software; you can redistribute it and/or modify it 9 - * under the terms of the GNU General Public License as published by the 10 - * Free Software Foundation; either version 2 of the License, or (at your 11 - * option) any later version. 12 - */ 13 - #ifndef __SETUP_H__ 14 - #define __SETUP_H__ 15 - 16 - /* M48T37 RTC + NVRAM */ 17 - #define YOSEMITE_RTC_BASE 0xfc800000 18 - #define YOSEMITE_RTC_SIZE 0x00800000 19 - 20 - #define HYPERTRANSPORT_BAR0_ADDR 0x00000006 21 - #define HYPERTRANSPORT_SIZE0 0x0fffffff 22 - #define HYPERTRANSPORT_BAR0_ATTR 0x00002000 23 - 24 - #define HYPERTRANSPORT_ENABLE 0x6 25 - 26 - /* 27 - * EEPROM Size 28 - */ 29 - #define TITAN_ATMEL_24C32_SIZE 32768 30 - #define TITAN_ATMEL_24C64_SIZE 65536 31 - 32 - #endif /* __SETUP_H__ */
-185
arch/mips/pmc-sierra/yosemite/smp.c
··· 1 - #include <linux/linkage.h> 2 - #include <linux/sched.h> 3 - #include <linux/smp.h> 4 - 5 - #include <asm/pmon.h> 6 - #include <asm/titan_dep.h> 7 - #include <asm/time.h> 8 - 9 - #define LAUNCHSTACK_SIZE 256 10 - 11 - static __cpuinitdata arch_spinlock_t launch_lock = __ARCH_SPIN_LOCK_UNLOCKED; 12 - 13 - static unsigned long secondary_sp __cpuinitdata; 14 - static unsigned long secondary_gp __cpuinitdata; 15 - 16 - static unsigned char launchstack[LAUNCHSTACK_SIZE] __initdata 17 - __attribute__((aligned(2 * sizeof(long)))); 18 - 19 - static void __init prom_smp_bootstrap(void) 20 - { 21 - local_irq_disable(); 22 - 23 - while (arch_spin_is_locked(&launch_lock)); 24 - 25 - __asm__ __volatile__( 26 - " move $sp, %0 \n" 27 - " move $gp, %1 \n" 28 - " j smp_bootstrap \n" 29 - : 30 - : "r" (secondary_sp), "r" (secondary_gp)); 31 - } 32 - 33 - /* 34 - * PMON is a fragile beast. It'll blow up once the mappings it's littering 35 - * right into the middle of KSEG3 are blown away so we have to grab the slave 36 - * core early and keep it in a waiting loop. 37 - */ 38 - void __init prom_grab_secondary(void) 39 - { 40 - arch_spin_lock(&launch_lock); 41 - 42 - pmon_cpustart(1, &prom_smp_bootstrap, 43 - launchstack + LAUNCHSTACK_SIZE, 0); 44 - } 45 - 46 - void titan_mailbox_irq(void) 47 - { 48 - int cpu = smp_processor_id(); 49 - unsigned long status; 50 - 51 - switch (cpu) { 52 - case 0: 53 - status = OCD_READ(RM9000x2_OCD_INTP0STATUS3); 54 - OCD_WRITE(RM9000x2_OCD_INTP0CLEAR3, status); 55 - 56 - if (status & 0x2) 57 - smp_call_function_interrupt(); 58 - if (status & 0x4) 59 - scheduler_ipi(); 60 - break; 61 - 62 - case 1: 63 - status = OCD_READ(RM9000x2_OCD_INTP1STATUS3); 64 - OCD_WRITE(RM9000x2_OCD_INTP1CLEAR3, status); 65 - 66 - if (status & 0x2) 67 - smp_call_function_interrupt(); 68 - if (status & 0x4) 69 - scheduler_ipi(); 70 - break; 71 - } 72 - } 73 - 74 - /* 75 - * Send inter-processor interrupt 76 - */ 77 - static void yos_send_ipi_single(int cpu, unsigned int action) 78 - { 79 - /* 80 - * Generate an INTMSG so that it can be sent over to the 81 - * destination CPU. The INTMSG will put the STATUS bits 82 - * based on the action desired. An alternative strategy 83 - * is to write to the Interrupt Set register, read the 84 - * Interrupt Status register and clear the Interrupt 85 - * Clear register. The latter is preffered. 86 - */ 87 - switch (action) { 88 - case SMP_RESCHEDULE_YOURSELF: 89 - if (cpu == 1) 90 - OCD_WRITE(RM9000x2_OCD_INTP1SET3, 4); 91 - else 92 - OCD_WRITE(RM9000x2_OCD_INTP0SET3, 4); 93 - break; 94 - 95 - case SMP_CALL_FUNCTION: 96 - if (cpu == 1) 97 - OCD_WRITE(RM9000x2_OCD_INTP1SET3, 2); 98 - else 99 - OCD_WRITE(RM9000x2_OCD_INTP0SET3, 2); 100 - break; 101 - } 102 - } 103 - 104 - static void yos_send_ipi_mask(const struct cpumask *mask, unsigned int action) 105 - { 106 - unsigned int i; 107 - 108 - for_each_cpu(i, mask) 109 - yos_send_ipi_single(i, action); 110 - } 111 - 112 - /* 113 - * After we've done initial boot, this function is called to allow the 114 - * board code to clean up state, if needed 115 - */ 116 - static void __cpuinit yos_init_secondary(void) 117 - { 118 - } 119 - 120 - static void __cpuinit yos_smp_finish(void) 121 - { 122 - set_c0_status(ST0_CO | ST0_IM | ST0_IE); 123 - } 124 - 125 - /* Hook for after all CPUs are online */ 126 - static void yos_cpus_done(void) 127 - { 128 - } 129 - 130 - /* 131 - * Firmware CPU startup hook 132 - * Complicated by PMON's weird interface which tries to minimic the UNIX fork. 133 - * It launches the next * available CPU and copies some information on the 134 - * stack so the first thing we do is throw away that stuff and load useful 135 - * values into the registers ... 136 - */ 137 - static void __cpuinit yos_boot_secondary(int cpu, struct task_struct *idle) 138 - { 139 - unsigned long gp = (unsigned long) task_thread_info(idle); 140 - unsigned long sp = __KSTK_TOS(idle); 141 - 142 - secondary_sp = sp; 143 - secondary_gp = gp; 144 - 145 - arch_spin_unlock(&launch_lock); 146 - } 147 - 148 - /* 149 - * Detect available CPUs, populate cpu_possible_mask before smp_init 150 - * 151 - * We don't want to start the secondary CPU yet nor do we have a nice probing 152 - * feature in PMON so we just assume presence of the secondary core. 153 - */ 154 - static void __init yos_smp_setup(void) 155 - { 156 - int i; 157 - 158 - init_cpu_possible(cpu_none_mask); 159 - 160 - for (i = 0; i < 2; i++) { 161 - set_cpu_possible(i, true); 162 - __cpu_number_map[i] = i; 163 - __cpu_logical_map[i] = i; 164 - } 165 - } 166 - 167 - static void __init yos_prepare_cpus(unsigned int max_cpus) 168 - { 169 - /* 170 - * Be paranoid. Enable the IPI only if we're really about to go SMP. 171 - */ 172 - if (num_possible_cpus()) 173 - set_c0_status(STATUSF_IP5); 174 - } 175 - 176 - struct plat_smp_ops yos_smp_ops = { 177 - .send_ipi_single = yos_send_ipi_single, 178 - .send_ipi_mask = yos_send_ipi_mask, 179 - .init_secondary = yos_init_secondary, 180 - .smp_finish = yos_smp_finish, 181 - .cpus_done = yos_cpus_done, 182 - .boot_secondary = yos_boot_secondary, 183 - .smp_setup = yos_smp_setup, 184 - .prepare_cpus = yos_prepare_cpus, 185 - };
-37
arch/mips/powertv/init.c
··· 69 69 return result; 70 70 } 71 71 72 - /* TODO: Verify on linux-mips mailing list that the following two */ 73 - /* functions are correct */ 74 - /* TODO: Copy NMI and EJTAG exception vectors to memory from the */ 75 - /* BootROM exception vectors. Flush their cache entries. test it. */ 76 - 77 - static void __init mips_nmi_setup(void) 78 - { 79 - void *base; 80 - #if defined(CONFIG_CPU_MIPS32_R1) 81 - base = cpu_has_veic ? 82 - (void *)(CAC_BASE + 0xa80) : 83 - (void *)(CAC_BASE + 0x380); 84 - #elif defined(CONFIG_CPU_MIPS32_R2) 85 - base = (void *)0xbfc00000; 86 - #else 87 - #error NMI exception handler address not defined 88 - #endif 89 - } 90 - 91 - static void __init mips_ejtag_setup(void) 92 - { 93 - void *base; 94 - 95 - #if defined(CONFIG_CPU_MIPS32_R1) 96 - base = cpu_has_veic ? 97 - (void *)(CAC_BASE + 0xa00) : 98 - (void *)(CAC_BASE + 0x300); 99 - #elif defined(CONFIG_CPU_MIPS32_R2) 100 - base = (void *)0xbfc00480; 101 - #else 102 - #error EJTAG exception handler address not defined 103 - #endif 104 - } 105 - 106 72 void __init prom_init(void) 107 73 { 108 74 int prom_argc; ··· 78 112 prom_argv = (char *) fw_arg1; 79 113 _prom_envp = (int *) fw_arg2; 80 114 _prom_memsize = (unsigned long) fw_arg3; 81 - 82 - board_nmi_handler_setup = mips_nmi_setup; 83 - board_ejtag_handler_setup = mips_ejtag_setup; 84 115 85 116 if (prom_argc == 1) { 86 117 strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE);
+1 -2
arch/mips/rb532/prom.c
··· 72 72 static char cmd_line[COMMAND_LINE_SIZE] __initdata; 73 73 char *cp, *board; 74 74 int prom_argc; 75 - char **prom_argv, **prom_envp; 75 + char **prom_argv; 76 76 int i; 77 77 78 78 prom_argc = fw_arg0; 79 79 prom_argv = (char **) fw_arg1; 80 - prom_envp = (char **) fw_arg2; 81 80 82 81 cp = cmd_line; 83 82 /* Note: it is common that parameters start
+3 -5
arch/mips/sgi-ip22/ip22-eisa.c
··· 73 73 74 74 static irqreturn_t ip22_eisa_intr(int irq, void *dev_id) 75 75 { 76 - u8 eisa_irq; 77 - u8 dma1, dma2; 76 + u8 eisa_irq = inb(EIU_INTRPT_ACK); 78 77 79 - eisa_irq = inb(EIU_INTRPT_ACK); 80 - dma1 = inb(EISA_DMA1_STATUS); 81 - dma2 = inb(EISA_DMA2_STATUS); 78 + inb(EISA_DMA1_STATUS); 79 + inb(EISA_DMA2_STATUS); 82 80 83 81 if (eisa_irq < EISA_MAX_IRQ) { 84 82 do_IRQ(eisa_irq);
+1 -1
arch/mips/sibyte/Kconfig
··· 74 74 select SWAP_IO_SPACE 75 75 select SYS_SUPPORTS_32BIT_KERNEL 76 76 select SYS_SUPPORTS_64BIT_KERNEL 77 - select CFE 77 + select FW_CFE 78 78 select SYS_HAS_EARLY_PRINTK 79 79 80 80 choice
+4 -4
arch/mips/sni/setup.c
··· 15 15 #include <linux/fb.h> 16 16 #include <linux/screen_info.h> 17 17 18 - #ifdef CONFIG_ARC 18 + #ifdef CONFIG_FW_ARC 19 19 #include <asm/fw/arc/types.h> 20 20 #include <asm/sgialib.h> 21 21 #endif 22 22 23 - #ifdef CONFIG_SNIPROM 23 + #ifdef CONFIG_FW_SNIPROM 24 24 #include <asm/mipsprom.h> 25 25 #endif 26 26 ··· 37 37 38 38 static void __init sni_display_setup(void) 39 39 { 40 - #if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE) && defined(CONFIG_ARC) 40 + #if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE) && defined(CONFIG_FW_ARC) 41 41 struct screen_info *si = &screen_info; 42 42 DISPLAY_STATUS *di; 43 43 ··· 56 56 57 57 static void __init sni_console_setup(void) 58 58 { 59 - #ifndef CONFIG_ARC 59 + #ifndef CONFIG_FW_ARC 60 60 char *ctype; 61 61 char *cdev; 62 62 char *baud;
+2 -4
arch/mips/wrppmc/pci.c
··· 38 38 39 39 static int __init gt64120_pci_init(void) 40 40 { 41 - u32 tmp; 42 - 43 - tmp = GT_READ(GT_PCI0_CMD_OFS); /* Huh??? -- Ralf */ 44 - tmp = GT_READ(GT_PCI0_BARE_OFS); 41 + (void) GT_READ(GT_PCI0_CMD_OFS); /* Huh??? -- Ralf */ 42 + (void) GT_READ(GT_PCI0_BARE_OFS); 45 43 46 44 /* reset the whole PCI I/O space range */ 47 45 ioport_resource.start = GT_PCI_IO_BASE;
+292 -139
drivers/ata/pata_octeon_cf.c
··· 5 5 * License. See the file "COPYING" in the main directory of this archive 6 6 * for more details. 7 7 * 8 - * Copyright (C) 2005 - 2009 Cavium Networks 8 + * Copyright (C) 2005 - 2012 Cavium Inc. 9 9 * Copyright (C) 2008 Wind River Systems 10 10 */ 11 11 12 12 #include <linux/kernel.h> 13 13 #include <linux/module.h> 14 14 #include <linux/libata.h> 15 - #include <linux/irq.h> 15 + #include <linux/hrtimer.h> 16 16 #include <linux/slab.h> 17 + #include <linux/irq.h> 18 + #include <linux/of.h> 19 + #include <linux/of_platform.h> 17 20 #include <linux/platform_device.h> 18 - #include <linux/workqueue.h> 19 21 #include <scsi/scsi_host.h> 20 22 23 + #include <asm/byteorder.h> 21 24 #include <asm/octeon/octeon.h> 22 25 23 26 /* ··· 37 34 */ 38 35 39 36 #define DRV_NAME "pata_octeon_cf" 40 - #define DRV_VERSION "2.1" 37 + #define DRV_VERSION "2.2" 41 38 39 + /* Poll interval in nS. */ 40 + #define OCTEON_CF_BUSY_POLL_INTERVAL 500000 41 + 42 + #define DMA_CFG 0 43 + #define DMA_TIM 0x20 44 + #define DMA_INT 0x38 45 + #define DMA_INT_EN 0x50 42 46 43 47 struct octeon_cf_port { 44 - struct workqueue_struct *wq; 45 - struct delayed_work delayed_finish; 48 + struct hrtimer delayed_finish; 46 49 struct ata_port *ap; 47 50 int dma_finished; 51 + void *c0; 52 + unsigned int cs0; 53 + unsigned int cs1; 54 + bool is_true_ide; 55 + u64 dma_base; 48 56 }; 49 57 50 58 static struct scsi_host_template octeon_cf_sht = { 51 59 ATA_PIO_SHT(DRV_NAME), 52 60 }; 61 + 62 + static int enable_dma; 63 + module_param(enable_dma, int, 0444); 64 + MODULE_PARM_DESC(enable_dma, 65 + "Enable use of DMA on interfaces that support it (0=no dma [default], 1=use dma)"); 53 66 54 67 /** 55 68 * Convert nanosecond based time to setting used in the ··· 85 66 return val; 86 67 } 87 68 88 - static void octeon_cf_set_boot_reg_cfg(int cs) 69 + static void octeon_cf_set_boot_reg_cfg(int cs, unsigned int multiplier) 89 70 { 90 71 union cvmx_mio_boot_reg_cfgx reg_cfg; 72 + unsigned int tim_mult; 73 + 74 + switch (multiplier) { 75 + case 8: 76 + tim_mult = 3; 77 + break; 78 + case 4: 79 + tim_mult = 0; 80 + break; 81 + case 2: 82 + tim_mult = 2; 83 + break; 84 + default: 85 + tim_mult = 1; 86 + break; 87 + } 88 + 91 89 reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs)); 92 90 reg_cfg.s.dmack = 0; /* Don't assert DMACK on access */ 93 - reg_cfg.s.tim_mult = 2; /* Timing mutiplier 2x */ 91 + reg_cfg.s.tim_mult = tim_mult; /* Timing mutiplier */ 94 92 reg_cfg.s.rd_dly = 0; /* Sample on falling edge of BOOT_OE */ 95 93 reg_cfg.s.sam = 0; /* Don't combine write and output enable */ 96 94 reg_cfg.s.we_ext = 0; /* No write enable extension */ ··· 128 92 */ 129 93 static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev) 130 94 { 131 - struct octeon_cf_data *ocd = ap->dev->platform_data; 95 + struct octeon_cf_port *cf_port = ap->private_data; 132 96 union cvmx_mio_boot_reg_timx reg_tim; 133 - int cs = ocd->base_region; 134 97 int T; 135 98 struct ata_timing timing; 136 99 100 + unsigned int div; 137 101 int use_iordy; 138 102 int trh; 139 103 int pause; ··· 142 106 int t2; 143 107 int t2i; 144 108 145 - T = (int)(2000000000000LL / octeon_get_clock_rate()); 109 + /* 110 + * A divisor value of four will overflow the timing fields at 111 + * clock rates greater than 800MHz 112 + */ 113 + if (octeon_get_io_clock_rate() <= 800000000) 114 + div = 4; 115 + else 116 + div = 8; 117 + T = (int)((1000000000000LL * div) / octeon_get_io_clock_rate()); 146 118 147 119 if (ata_timing_compute(dev, dev->pio_mode, &timing, T, T)) 148 120 BUG(); ··· 165 121 if (t2i) 166 122 t2i--; 167 123 168 - trh = ns_to_tim_reg(2, 20); 124 + trh = ns_to_tim_reg(div, 20); 169 125 if (trh) 170 126 trh--; 171 127 172 - pause = timing.cycle - timing.active - timing.setup - trh; 128 + pause = (int)timing.cycle - (int)timing.active - 129 + (int)timing.setup - trh; 130 + if (pause < 0) 131 + pause = 0; 173 132 if (pause) 174 133 pause--; 175 134 176 - octeon_cf_set_boot_reg_cfg(cs); 177 - if (ocd->dma_engine >= 0) 135 + octeon_cf_set_boot_reg_cfg(cf_port->cs0, div); 136 + if (cf_port->is_true_ide) 178 137 /* True IDE mode, program both chip selects. */ 179 - octeon_cf_set_boot_reg_cfg(cs + 1); 138 + octeon_cf_set_boot_reg_cfg(cf_port->cs1, div); 180 139 181 140 182 141 use_iordy = ata_pio_need_iordy(dev); 183 142 184 - reg_tim.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_TIMX(cs)); 143 + reg_tim.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_TIMX(cf_port->cs0)); 185 144 /* Disable page mode */ 186 145 reg_tim.s.pagem = 0; 187 146 /* Enable dynamic timing */ ··· 208 161 /* How long read enable is asserted */ 209 162 reg_tim.s.oe = t2; 210 163 /* Time after CE that read/write starts */ 211 - reg_tim.s.ce = ns_to_tim_reg(2, 5); 164 + reg_tim.s.ce = ns_to_tim_reg(div, 5); 212 165 /* Time before CE that address is valid */ 213 166 reg_tim.s.adr = 0; 214 167 215 168 /* Program the bootbus region timing for the data port chip select. */ 216 - cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cs), reg_tim.u64); 217 - if (ocd->dma_engine >= 0) 169 + cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cf_port->cs0), reg_tim.u64); 170 + if (cf_port->is_true_ide) 218 171 /* True IDE mode, program both chip selects. */ 219 - cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cs + 1), reg_tim.u64); 172 + cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cf_port->cs1), 173 + reg_tim.u64); 220 174 } 221 175 222 176 static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev) 223 177 { 224 - struct octeon_cf_data *ocd = dev->link->ap->dev->platform_data; 178 + struct octeon_cf_port *cf_port = ap->private_data; 179 + union cvmx_mio_boot_pin_defs pin_defs; 225 180 union cvmx_mio_boot_dma_timx dma_tim; 226 181 unsigned int oe_a; 227 182 unsigned int oe_n; ··· 232 183 unsigned int pause; 233 184 unsigned int T0, Tkr, Td; 234 185 unsigned int tim_mult; 186 + int c; 235 187 236 188 const struct ata_timing *timing; 237 189 ··· 249 199 /* not spec'ed, value in eclocks, not affected by tim_mult */ 250 200 dma_arq = 8; 251 201 pause = 25 - dma_arq * 1000 / 252 - (octeon_get_clock_rate() / 1000000); /* Tz */ 202 + (octeon_get_io_clock_rate() / 1000000); /* Tz */ 253 203 254 204 oe_a = Td; 255 205 /* Tkr from cf spec, lengthened to meet T0 */ 256 206 oe_n = max(T0 - oe_a, Tkr); 257 207 258 - dma_tim.s.dmack_pi = 1; 208 + pin_defs.u64 = cvmx_read_csr(CVMX_MIO_BOOT_PIN_DEFS); 209 + 210 + /* DMA channel number. */ 211 + c = (cf_port->dma_base & 8) >> 3; 212 + 213 + /* Invert the polarity if the default is 0*/ 214 + dma_tim.s.dmack_pi = (pin_defs.u64 & (1ull << (11 + c))) ? 0 : 1; 259 215 260 216 dma_tim.s.oe_n = ns_to_tim_reg(tim_mult, oe_n); 261 217 dma_tim.s.oe_a = ns_to_tim_reg(tim_mult, oe_a); ··· 284 228 285 229 pr_debug("ns to ticks (mult %d) of %d is: %d\n", tim_mult, 60, 286 230 ns_to_tim_reg(tim_mult, 60)); 287 - pr_debug("oe_n: %d, oe_a: %d, dmack_s: %d, dmack_h: " 288 - "%d, dmarq: %d, pause: %d\n", 231 + pr_debug("oe_n: %d, oe_a: %d, dmack_s: %d, dmack_h: %d, dmarq: %d, pause: %d\n", 289 232 dma_tim.s.oe_n, dma_tim.s.oe_a, dma_tim.s.dmack_s, 290 233 dma_tim.s.dmack_h, dma_tim.s.dmarq, dma_tim.s.pause); 291 234 292 - cvmx_write_csr(CVMX_MIO_BOOT_DMA_TIMX(ocd->dma_engine), 293 - dma_tim.u64); 294 - 235 + cvmx_write_csr(cf_port->dma_base + DMA_TIM, dma_tim.u64); 295 236 } 296 237 297 238 /** ··· 542 489 ata_wait_idle(ap); 543 490 } 544 491 545 - static void octeon_cf_irq_on(struct ata_port *ap) 492 + static void octeon_cf_ata_port_noaction(struct ata_port *ap) 546 493 { 547 - } 548 - 549 - static void octeon_cf_irq_clear(struct ata_port *ap) 550 - { 551 - return; 552 494 } 553 495 554 496 static void octeon_cf_dma_setup(struct ata_queued_cmd *qc) ··· 567 519 */ 568 520 static void octeon_cf_dma_start(struct ata_queued_cmd *qc) 569 521 { 570 - struct octeon_cf_data *ocd = qc->ap->dev->platform_data; 522 + struct octeon_cf_port *cf_port = qc->ap->private_data; 571 523 union cvmx_mio_boot_dma_cfgx mio_boot_dma_cfg; 572 524 union cvmx_mio_boot_dma_intx mio_boot_dma_int; 573 525 struct scatterlist *sg; ··· 583 535 */ 584 536 mio_boot_dma_int.u64 = 0; 585 537 mio_boot_dma_int.s.done = 1; 586 - cvmx_write_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine), 587 - mio_boot_dma_int.u64); 538 + cvmx_write_csr(cf_port->dma_base + DMA_INT, mio_boot_dma_int.u64); 588 539 589 540 /* Enable the interrupt. */ 590 - cvmx_write_csr(CVMX_MIO_BOOT_DMA_INT_ENX(ocd->dma_engine), 591 - mio_boot_dma_int.u64); 541 + cvmx_write_csr(cf_port->dma_base + DMA_INT_EN, mio_boot_dma_int.u64); 592 542 593 543 /* Set the direction of the DMA */ 594 544 mio_boot_dma_cfg.u64 = 0; 545 + #ifdef __LITTLE_ENDIAN 546 + mio_boot_dma_cfg.s.endian = 1; 547 + #endif 595 548 mio_boot_dma_cfg.s.en = 1; 596 549 mio_boot_dma_cfg.s.rw = ((qc->tf.flags & ATA_TFLAG_WRITE) != 0); 597 550 ··· 618 569 (mio_boot_dma_cfg.s.rw) ? "write" : "read", sg->length, 619 570 (void *)(unsigned long)mio_boot_dma_cfg.s.adr); 620 571 621 - cvmx_write_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine), 622 - mio_boot_dma_cfg.u64); 572 + cvmx_write_csr(cf_port->dma_base + DMA_CFG, mio_boot_dma_cfg.u64); 623 573 } 624 574 625 575 /** ··· 631 583 struct ata_queued_cmd *qc) 632 584 { 633 585 struct ata_eh_info *ehi = &ap->link.eh_info; 634 - struct octeon_cf_data *ocd = ap->dev->platform_data; 586 + struct octeon_cf_port *cf_port = ap->private_data; 635 587 union cvmx_mio_boot_dma_cfgx dma_cfg; 636 588 union cvmx_mio_boot_dma_intx dma_int; 637 - struct octeon_cf_port *cf_port; 638 589 u8 status; 639 590 640 591 VPRINTK("ata%u: protocol %d task_state %d\n", ··· 643 596 if (ap->hsm_task_state != HSM_ST_LAST) 644 597 return 0; 645 598 646 - cf_port = ap->private_data; 647 - 648 - dma_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine)); 599 + dma_cfg.u64 = cvmx_read_csr(cf_port->dma_base + DMA_CFG); 649 600 if (dma_cfg.s.size != 0xfffff) { 650 601 /* Error, the transfer was not complete. */ 651 602 qc->err_mask |= AC_ERR_HOST_BUS; ··· 653 608 /* Stop and clear the dma engine. */ 654 609 dma_cfg.u64 = 0; 655 610 dma_cfg.s.size = -1; 656 - cvmx_write_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine), dma_cfg.u64); 611 + cvmx_write_csr(cf_port->dma_base + DMA_CFG, dma_cfg.u64); 657 612 658 613 /* Disable the interrupt. */ 659 614 dma_int.u64 = 0; 660 - cvmx_write_csr(CVMX_MIO_BOOT_DMA_INT_ENX(ocd->dma_engine), dma_int.u64); 615 + cvmx_write_csr(cf_port->dma_base + DMA_INT_EN, dma_int.u64); 661 616 662 617 /* Clear the DMA complete status */ 663 618 dma_int.s.done = 1; 664 - cvmx_write_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine), dma_int.u64); 619 + cvmx_write_csr(cf_port->dma_base + DMA_INT, dma_int.u64); 665 620 666 621 status = ap->ops->sff_check_status(ap); 667 622 ··· 694 649 struct ata_queued_cmd *qc; 695 650 union cvmx_mio_boot_dma_intx dma_int; 696 651 union cvmx_mio_boot_dma_cfgx dma_cfg; 697 - struct octeon_cf_data *ocd; 698 652 699 653 ap = host->ports[i]; 700 - ocd = ap->dev->platform_data; 701 654 cf_port = ap->private_data; 702 - dma_int.u64 = 703 - cvmx_read_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine)); 704 - dma_cfg.u64 = 705 - cvmx_read_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine)); 655 + 656 + dma_int.u64 = cvmx_read_csr(cf_port->dma_base + DMA_INT); 657 + dma_cfg.u64 = cvmx_read_csr(cf_port->dma_base + DMA_CFG); 706 658 707 659 qc = ata_qc_from_tag(ap, ap->link.active_tag); 708 660 709 - if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING)) { 710 - if (dma_int.s.done && !dma_cfg.s.en) { 711 - if (!sg_is_last(qc->cursg)) { 712 - qc->cursg = sg_next(qc->cursg); 713 - handled = 1; 714 - octeon_cf_dma_start(qc); 715 - continue; 716 - } else { 717 - cf_port->dma_finished = 1; 718 - } 719 - } 720 - if (!cf_port->dma_finished) 721 - continue; 722 - status = ioread8(ap->ioaddr.altstatus_addr); 723 - if (status & (ATA_BUSY | ATA_DRQ)) { 724 - /* 725 - * We are busy, try to handle it 726 - * later. This is the DMA finished 727 - * interrupt, and it could take a 728 - * little while for the card to be 729 - * ready for more commands. 730 - */ 731 - /* Clear DMA irq. */ 732 - dma_int.u64 = 0; 733 - dma_int.s.done = 1; 734 - cvmx_write_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine), 735 - dma_int.u64); 661 + if (!qc || (qc->tf.flags & ATA_TFLAG_POLLING)) 662 + continue; 736 663 737 - queue_delayed_work(cf_port->wq, 738 - &cf_port->delayed_finish, 1); 664 + if (dma_int.s.done && !dma_cfg.s.en) { 665 + if (!sg_is_last(qc->cursg)) { 666 + qc->cursg = sg_next(qc->cursg); 739 667 handled = 1; 668 + octeon_cf_dma_start(qc); 669 + continue; 740 670 } else { 741 - handled |= octeon_cf_dma_finished(ap, qc); 671 + cf_port->dma_finished = 1; 742 672 } 673 + } 674 + if (!cf_port->dma_finished) 675 + continue; 676 + status = ioread8(ap->ioaddr.altstatus_addr); 677 + if (status & (ATA_BUSY | ATA_DRQ)) { 678 + /* 679 + * We are busy, try to handle it later. This 680 + * is the DMA finished interrupt, and it could 681 + * take a little while for the card to be 682 + * ready for more commands. 683 + */ 684 + /* Clear DMA irq. */ 685 + dma_int.u64 = 0; 686 + dma_int.s.done = 1; 687 + cvmx_write_csr(cf_port->dma_base + DMA_INT, 688 + dma_int.u64); 689 + hrtimer_start_range_ns(&cf_port->delayed_finish, 690 + ns_to_ktime(OCTEON_CF_BUSY_POLL_INTERVAL), 691 + OCTEON_CF_BUSY_POLL_INTERVAL / 5, 692 + HRTIMER_MODE_REL); 693 + handled = 1; 694 + } else { 695 + handled |= octeon_cf_dma_finished(ap, qc); 743 696 } 744 697 } 745 698 spin_unlock_irqrestore(&host->lock, flags); ··· 745 702 return IRQ_RETVAL(handled); 746 703 } 747 704 748 - static void octeon_cf_delayed_finish(struct work_struct *work) 705 + static enum hrtimer_restart octeon_cf_delayed_finish(struct hrtimer *hrt) 749 706 { 750 - struct octeon_cf_port *cf_port = container_of(work, 707 + struct octeon_cf_port *cf_port = container_of(hrt, 751 708 struct octeon_cf_port, 752 - delayed_finish.work); 709 + delayed_finish); 753 710 struct ata_port *ap = cf_port->ap; 754 711 struct ata_host *host = ap->host; 755 712 struct ata_queued_cmd *qc; 756 713 unsigned long flags; 757 714 u8 status; 715 + enum hrtimer_restart rv = HRTIMER_NORESTART; 758 716 759 717 spin_lock_irqsave(&host->lock, flags); 760 718 ··· 770 726 status = ioread8(ap->ioaddr.altstatus_addr); 771 727 if (status & (ATA_BUSY | ATA_DRQ)) { 772 728 /* Still busy, try again. */ 773 - queue_delayed_work(cf_port->wq, 774 - &cf_port->delayed_finish, 1); 729 + hrtimer_forward_now(hrt, 730 + ns_to_ktime(OCTEON_CF_BUSY_POLL_INTERVAL)); 731 + rv = HRTIMER_RESTART; 775 732 goto out; 776 733 } 777 734 qc = ata_qc_from_tag(ap, ap->link.active_tag); 778 - if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING)) 735 + if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) 779 736 octeon_cf_dma_finished(ap, qc); 780 737 out: 781 738 spin_unlock_irqrestore(&host->lock, flags); 739 + return rv; 782 740 } 783 741 784 742 static void octeon_cf_dev_config(struct ata_device *dev) ··· 832 786 .qc_prep = ata_noop_qc_prep, 833 787 .qc_issue = octeon_cf_qc_issue, 834 788 .sff_dev_select = octeon_cf_dev_select, 835 - .sff_irq_on = octeon_cf_irq_on, 836 - .sff_irq_clear = octeon_cf_irq_clear, 789 + .sff_irq_on = octeon_cf_ata_port_noaction, 790 + .sff_irq_clear = octeon_cf_ata_port_noaction, 837 791 .cable_detect = ata_cable_40wire, 838 792 .set_piomode = octeon_cf_set_piomode, 839 793 .set_dmamode = octeon_cf_set_dmamode, ··· 844 798 { 845 799 struct resource *res_cs0, *res_cs1; 846 800 801 + bool is_16bit; 802 + const __be32 *cs_num; 803 + struct property *reg_prop; 804 + int n_addr, n_size, reg_len; 805 + struct device_node *node; 806 + const void *prop; 847 807 void __iomem *cs0; 848 808 void __iomem *cs1 = NULL; 849 809 struct ata_host *host; 850 810 struct ata_port *ap; 851 - struct octeon_cf_data *ocd; 852 811 int irq = 0; 853 812 irq_handler_t irq_handler = NULL; 854 813 void __iomem *base; 855 814 struct octeon_cf_port *cf_port; 856 - char version[32]; 815 + int rv = -ENOMEM; 816 + 817 + 818 + node = pdev->dev.of_node; 819 + if (node == NULL) 820 + return -EINVAL; 821 + 822 + cf_port = kzalloc(sizeof(*cf_port), GFP_KERNEL); 823 + if (!cf_port) 824 + return -ENOMEM; 825 + 826 + cf_port->is_true_ide = (of_find_property(node, "cavium,true-ide", NULL) != NULL); 827 + 828 + prop = of_get_property(node, "cavium,bus-width", NULL); 829 + if (prop) 830 + is_16bit = (be32_to_cpup(prop) == 16); 831 + else 832 + is_16bit = false; 833 + 834 + n_addr = of_n_addr_cells(node); 835 + n_size = of_n_size_cells(node); 836 + 837 + reg_prop = of_find_property(node, "reg", &reg_len); 838 + if (!reg_prop || reg_len < sizeof(__be32)) { 839 + rv = -EINVAL; 840 + goto free_cf_port; 841 + } 842 + cs_num = reg_prop->value; 843 + cf_port->cs0 = be32_to_cpup(cs_num); 844 + 845 + if (cf_port->is_true_ide) { 846 + struct device_node *dma_node; 847 + dma_node = of_parse_phandle(node, 848 + "cavium,dma-engine-handle", 0); 849 + if (dma_node) { 850 + struct platform_device *dma_dev; 851 + dma_dev = of_find_device_by_node(dma_node); 852 + if (dma_dev) { 853 + struct resource *res_dma; 854 + int i; 855 + res_dma = platform_get_resource(dma_dev, IORESOURCE_MEM, 0); 856 + if (!res_dma) { 857 + of_node_put(dma_node); 858 + rv = -EINVAL; 859 + goto free_cf_port; 860 + } 861 + cf_port->dma_base = (u64)devm_ioremap_nocache(&pdev->dev, res_dma->start, 862 + resource_size(res_dma)); 863 + 864 + if (!cf_port->dma_base) { 865 + of_node_put(dma_node); 866 + rv = -EINVAL; 867 + goto free_cf_port; 868 + } 869 + 870 + irq_handler = octeon_cf_interrupt; 871 + i = platform_get_irq(dma_dev, 0); 872 + if (i > 0) 873 + irq = i; 874 + } 875 + of_node_put(dma_node); 876 + } 877 + res_cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1); 878 + if (!res_cs1) { 879 + rv = -EINVAL; 880 + goto free_cf_port; 881 + } 882 + cs1 = devm_ioremap_nocache(&pdev->dev, res_cs1->start, 883 + res_cs1->end - res_cs1->start + 1); 884 + 885 + if (!cs1) 886 + goto free_cf_port; 887 + 888 + if (reg_len < (n_addr + n_size + 1) * sizeof(__be32)) { 889 + rv = -EINVAL; 890 + goto free_cf_port; 891 + } 892 + cs_num += n_addr + n_size; 893 + cf_port->cs1 = be32_to_cpup(cs_num); 894 + } 857 895 858 896 res_cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0); 859 897 860 - if (!res_cs0) 861 - return -EINVAL; 862 - 863 - ocd = pdev->dev.platform_data; 898 + if (!res_cs0) { 899 + rv = -EINVAL; 900 + goto free_cf_port; 901 + } 864 902 865 903 cs0 = devm_ioremap_nocache(&pdev->dev, res_cs0->start, 866 904 resource_size(res_cs0)); 867 905 868 906 if (!cs0) 869 - return -ENOMEM; 870 - 871 - /* Determine from availability of DMA if True IDE mode or not */ 872 - if (ocd->dma_engine >= 0) { 873 - res_cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1); 874 - if (!res_cs1) 875 - return -EINVAL; 876 - 877 - cs1 = devm_ioremap_nocache(&pdev->dev, res_cs1->start, 878 - resource_size(res_cs1)); 879 - 880 - if (!cs1) 881 - return -ENOMEM; 882 - } 883 - 884 - cf_port = kzalloc(sizeof(*cf_port), GFP_KERNEL); 885 - if (!cf_port) 886 - return -ENOMEM; 907 + goto free_cf_port; 887 908 888 909 /* allocate host */ 889 910 host = ata_host_alloc(&pdev->dev, 1); ··· 959 846 960 847 ap = host->ports[0]; 961 848 ap->private_data = cf_port; 849 + pdev->dev.platform_data = cf_port; 962 850 cf_port->ap = ap; 963 851 ap->ops = &octeon_cf_ops; 964 852 ap->pio_mask = ATA_PIO6; 965 853 ap->flags |= ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING; 966 854 967 - base = cs0 + ocd->base_region_bias; 968 - if (!ocd->is16bit) { 855 + if (!is_16bit) { 856 + base = cs0 + 0x800; 969 857 ap->ioaddr.cmd_addr = base; 970 858 ata_sff_std_ports(&ap->ioaddr); 971 859 972 860 ap->ioaddr.altstatus_addr = base + 0xe; 973 861 ap->ioaddr.ctl_addr = base + 0xe; 974 862 octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer8; 975 - } else if (cs1) { 976 - /* Presence of cs1 indicates True IDE mode. */ 863 + } else if (cf_port->is_true_ide) { 864 + base = cs0; 977 865 ap->ioaddr.cmd_addr = base + (ATA_REG_CMD << 1) + 1; 978 866 ap->ioaddr.data_addr = base + (ATA_REG_DATA << 1); 979 867 ap->ioaddr.error_addr = base + (ATA_REG_ERR << 1) + 1; ··· 990 876 ap->ioaddr.ctl_addr = cs1 + (6 << 1) + 1; 991 877 octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16; 992 878 993 - ap->mwdma_mask = ATA_MWDMA4; 994 - irq = platform_get_irq(pdev, 0); 995 - irq_handler = octeon_cf_interrupt; 879 + ap->mwdma_mask = enable_dma ? ATA_MWDMA4 : 0; 996 880 997 - /* True IDE mode needs delayed work to poll for not-busy. */ 998 - cf_port->wq = create_singlethread_workqueue(DRV_NAME); 999 - if (!cf_port->wq) 1000 - goto free_cf_port; 1001 - INIT_DELAYED_WORK(&cf_port->delayed_finish, 1002 - octeon_cf_delayed_finish); 1003 - 881 + /* True IDE mode needs a timer to poll for not-busy. */ 882 + hrtimer_init(&cf_port->delayed_finish, CLOCK_MONOTONIC, 883 + HRTIMER_MODE_REL); 884 + cf_port->delayed_finish.function = octeon_cf_delayed_finish; 1004 885 } else { 1005 886 /* 16 bit but not True IDE */ 887 + base = cs0 + 0x800; 1006 888 octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16; 1007 889 octeon_cf_ops.softreset = octeon_cf_softreset16; 1008 890 octeon_cf_ops.sff_check_status = octeon_cf_check_status16; ··· 1012 902 ap->ioaddr.ctl_addr = base + 0xe; 1013 903 ap->ioaddr.altstatus_addr = base + 0xe; 1014 904 } 905 + cf_port->c0 = ap->ioaddr.ctl_addr; 906 + 907 + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64); 908 + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 1015 909 1016 910 ata_port_desc(ap, "cmd %p ctl %p", base, ap->ioaddr.ctl_addr); 1017 911 1018 912 1019 - snprintf(version, sizeof(version), "%s %d bit%s", 1020 - DRV_VERSION, 1021 - (ocd->is16bit) ? 16 : 8, 1022 - (cs1) ? ", True IDE" : ""); 1023 - ata_print_version_once(&pdev->dev, version); 913 + dev_info(&pdev->dev, "version " DRV_VERSION" %d bit%s.\n", 914 + is_16bit ? 16 : 8, 915 + cf_port->is_true_ide ? ", True IDE" : ""); 1024 916 1025 - return ata_host_activate(host, irq, irq_handler, 0, &octeon_cf_sht); 917 + return ata_host_activate(host, irq, irq_handler, 918 + IRQF_SHARED, &octeon_cf_sht); 1026 919 1027 920 free_cf_port: 1028 921 kfree(cf_port); 1029 - return -ENOMEM; 922 + return rv; 1030 923 } 924 + 925 + static void octeon_cf_shutdown(struct device *dev) 926 + { 927 + union cvmx_mio_boot_dma_cfgx dma_cfg; 928 + union cvmx_mio_boot_dma_intx dma_int; 929 + 930 + struct octeon_cf_port *cf_port = dev->platform_data; 931 + 932 + if (cf_port->dma_base) { 933 + /* Stop and clear the dma engine. */ 934 + dma_cfg.u64 = 0; 935 + dma_cfg.s.size = -1; 936 + cvmx_write_csr(cf_port->dma_base + DMA_CFG, dma_cfg.u64); 937 + 938 + /* Disable the interrupt. */ 939 + dma_int.u64 = 0; 940 + cvmx_write_csr(cf_port->dma_base + DMA_INT_EN, dma_int.u64); 941 + 942 + /* Clear the DMA complete status */ 943 + dma_int.s.done = 1; 944 + cvmx_write_csr(cf_port->dma_base + DMA_INT, dma_int.u64); 945 + 946 + __raw_writeb(0, cf_port->c0); 947 + udelay(20); 948 + __raw_writeb(ATA_SRST, cf_port->c0); 949 + udelay(20); 950 + __raw_writeb(0, cf_port->c0); 951 + mdelay(100); 952 + } 953 + } 954 + 955 + static struct of_device_id octeon_cf_match[] = { 956 + { 957 + .compatible = "cavium,ebt3000-compact-flash", 958 + }, 959 + {}, 960 + }; 961 + MODULE_DEVICE_TABLE(of, octeon_i2c_match); 1031 962 1032 963 static struct platform_driver octeon_cf_driver = { 1033 964 .probe = octeon_cf_probe, 1034 965 .driver = { 1035 966 .name = DRV_NAME, 1036 967 .owner = THIS_MODULE, 968 + .of_match_table = octeon_cf_match, 969 + .shutdown = octeon_cf_shutdown 1037 970 }, 1038 971 }; 1039 972
+9
drivers/bcma/Kconfig
··· 65 65 66 66 If unsure, say N 67 67 68 + config BCMA_DRIVER_GPIO 69 + bool "BCMA GPIO driver" 70 + depends on BCMA 71 + select GPIOLIB 72 + help 73 + Driver to provide access to the GPIO pins of the bcma bus. 74 + 75 + If unsure, say N 76 + 68 77 config BCMA_DEBUG 69 78 bool "BCMA debugging" 70 79 depends on BCMA
+1
drivers/bcma/Makefile
··· 6 6 bcma-$(CONFIG_BCMA_DRIVER_PCI_HOSTMODE) += driver_pci_host.o 7 7 bcma-$(CONFIG_BCMA_DRIVER_MIPS) += driver_mips.o 8 8 bcma-$(CONFIG_BCMA_DRIVER_GMAC_CMN) += driver_gmac_cmn.o 9 + bcma-$(CONFIG_BCMA_DRIVER_GPIO) += driver_gpio.o 9 10 bcma-$(CONFIG_BCMA_HOST_PCI) += host_pci.o 10 11 bcma-$(CONFIG_BCMA_HOST_SOC) += host_soc.o 11 12 obj-$(CONFIG_BCMA) += bcma.o
+10
drivers/bcma/bcma_private.h
··· 91 91 void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc); 92 92 #endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */ 93 93 94 + #ifdef CONFIG_BCMA_DRIVER_GPIO 95 + /* driver_gpio.c */ 96 + int bcma_gpio_init(struct bcma_drv_cc *cc); 97 + #else 98 + static inline int bcma_gpio_init(struct bcma_drv_cc *cc) 99 + { 100 + return -ENOTSUPP; 101 + } 102 + #endif /* CONFIG_BCMA_DRIVER_GPIO */ 103 + 94 104 #endif
+76 -5
drivers/bcma/driver_chipcommon.c
··· 114 114 if (cc->early_setup_done) 115 115 return; 116 116 117 + spin_lock_init(&cc->gpio_lock); 118 + 117 119 if (cc->core->id.rev >= 11) 118 120 cc->status = bcma_cc_read32(cc, BCMA_CC_CHIPSTAT); 119 121 cc->capabilities = bcma_cc_read32(cc, BCMA_CC_CAP); ··· 204 202 205 203 u32 bcma_chipco_gpio_out(struct bcma_drv_cc *cc, u32 mask, u32 value) 206 204 { 207 - return bcma_cc_write32_masked(cc, BCMA_CC_GPIOOUT, mask, value); 205 + unsigned long flags; 206 + u32 res; 207 + 208 + spin_lock_irqsave(&cc->gpio_lock, flags); 209 + res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOOUT, mask, value); 210 + spin_unlock_irqrestore(&cc->gpio_lock, flags); 211 + 212 + return res; 208 213 } 209 214 210 215 u32 bcma_chipco_gpio_outen(struct bcma_drv_cc *cc, u32 mask, u32 value) 211 216 { 212 - return bcma_cc_write32_masked(cc, BCMA_CC_GPIOOUTEN, mask, value); 217 + unsigned long flags; 218 + u32 res; 219 + 220 + spin_lock_irqsave(&cc->gpio_lock, flags); 221 + res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOOUTEN, mask, value); 222 + spin_unlock_irqrestore(&cc->gpio_lock, flags); 223 + 224 + return res; 213 225 } 214 226 227 + /* 228 + * If the bit is set to 0, chipcommon controlls this GPIO, 229 + * if the bit is set to 1, it is used by some part of the chip and not our code. 230 + */ 215 231 u32 bcma_chipco_gpio_control(struct bcma_drv_cc *cc, u32 mask, u32 value) 216 232 { 217 - return bcma_cc_write32_masked(cc, BCMA_CC_GPIOCTL, mask, value); 233 + unsigned long flags; 234 + u32 res; 235 + 236 + spin_lock_irqsave(&cc->gpio_lock, flags); 237 + res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOCTL, mask, value); 238 + spin_unlock_irqrestore(&cc->gpio_lock, flags); 239 + 240 + return res; 218 241 } 219 242 EXPORT_SYMBOL_GPL(bcma_chipco_gpio_control); 220 243 221 244 u32 bcma_chipco_gpio_intmask(struct bcma_drv_cc *cc, u32 mask, u32 value) 222 245 { 223 - return bcma_cc_write32_masked(cc, BCMA_CC_GPIOIRQ, mask, value); 246 + unsigned long flags; 247 + u32 res; 248 + 249 + spin_lock_irqsave(&cc->gpio_lock, flags); 250 + res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOIRQ, mask, value); 251 + spin_unlock_irqrestore(&cc->gpio_lock, flags); 252 + 253 + return res; 224 254 } 225 255 226 256 u32 bcma_chipco_gpio_polarity(struct bcma_drv_cc *cc, u32 mask, u32 value) 227 257 { 228 - return bcma_cc_write32_masked(cc, BCMA_CC_GPIOPOL, mask, value); 258 + unsigned long flags; 259 + u32 res; 260 + 261 + spin_lock_irqsave(&cc->gpio_lock, flags); 262 + res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOPOL, mask, value); 263 + spin_unlock_irqrestore(&cc->gpio_lock, flags); 264 + 265 + return res; 266 + } 267 + 268 + u32 bcma_chipco_gpio_pullup(struct bcma_drv_cc *cc, u32 mask, u32 value) 269 + { 270 + unsigned long flags; 271 + u32 res; 272 + 273 + if (cc->core->id.rev < 20) 274 + return 0; 275 + 276 + spin_lock_irqsave(&cc->gpio_lock, flags); 277 + res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOPULLUP, mask, value); 278 + spin_unlock_irqrestore(&cc->gpio_lock, flags); 279 + 280 + return res; 281 + } 282 + 283 + u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value) 284 + { 285 + unsigned long flags; 286 + u32 res; 287 + 288 + if (cc->core->id.rev < 20) 289 + return 0; 290 + 291 + spin_lock_irqsave(&cc->gpio_lock, flags); 292 + res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOPULLDOWN, mask, value); 293 + spin_unlock_irqrestore(&cc->gpio_lock, flags); 294 + 295 + return res; 229 296 } 230 297 231 298 #ifdef CONFIG_BCMA_DRIVER_MIPS
+98
drivers/bcma/driver_gpio.c
··· 1 + /* 2 + * Broadcom specific AMBA 3 + * GPIO driver 4 + * 5 + * Copyright 2011, Broadcom Corporation 6 + * Copyright 2012, Hauke Mehrtens <hauke@hauke-m.de> 7 + * 8 + * Licensed under the GNU/GPL. See COPYING for details. 9 + */ 10 + 11 + #include <linux/gpio.h> 12 + #include <linux/export.h> 13 + #include <linux/bcma/bcma.h> 14 + 15 + #include "bcma_private.h" 16 + 17 + static inline struct bcma_drv_cc *bcma_gpio_get_cc(struct gpio_chip *chip) 18 + { 19 + return container_of(chip, struct bcma_drv_cc, gpio); 20 + } 21 + 22 + static int bcma_gpio_get_value(struct gpio_chip *chip, unsigned gpio) 23 + { 24 + struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip); 25 + 26 + return !!bcma_chipco_gpio_in(cc, 1 << gpio); 27 + } 28 + 29 + static void bcma_gpio_set_value(struct gpio_chip *chip, unsigned gpio, 30 + int value) 31 + { 32 + struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip); 33 + 34 + bcma_chipco_gpio_out(cc, 1 << gpio, value ? 1 << gpio : 0); 35 + } 36 + 37 + static int bcma_gpio_direction_input(struct gpio_chip *chip, unsigned gpio) 38 + { 39 + struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip); 40 + 41 + bcma_chipco_gpio_outen(cc, 1 << gpio, 0); 42 + return 0; 43 + } 44 + 45 + static int bcma_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, 46 + int value) 47 + { 48 + struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip); 49 + 50 + bcma_chipco_gpio_outen(cc, 1 << gpio, 1 << gpio); 51 + bcma_chipco_gpio_out(cc, 1 << gpio, value ? 1 << gpio : 0); 52 + return 0; 53 + } 54 + 55 + static int bcma_gpio_request(struct gpio_chip *chip, unsigned gpio) 56 + { 57 + struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip); 58 + 59 + bcma_chipco_gpio_control(cc, 1 << gpio, 0); 60 + /* clear pulldown */ 61 + bcma_chipco_gpio_pulldown(cc, 1 << gpio, 0); 62 + /* Set pullup */ 63 + bcma_chipco_gpio_pullup(cc, 1 << gpio, 1 << gpio); 64 + 65 + return 0; 66 + } 67 + 68 + static void bcma_gpio_free(struct gpio_chip *chip, unsigned gpio) 69 + { 70 + struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip); 71 + 72 + /* clear pullup */ 73 + bcma_chipco_gpio_pullup(cc, 1 << gpio, 0); 74 + } 75 + 76 + int bcma_gpio_init(struct bcma_drv_cc *cc) 77 + { 78 + struct gpio_chip *chip = &cc->gpio; 79 + 80 + chip->label = "bcma_gpio"; 81 + chip->owner = THIS_MODULE; 82 + chip->request = bcma_gpio_request; 83 + chip->free = bcma_gpio_free; 84 + chip->get = bcma_gpio_get_value; 85 + chip->set = bcma_gpio_set_value; 86 + chip->direction_input = bcma_gpio_direction_input; 87 + chip->direction_output = bcma_gpio_direction_output; 88 + chip->ngpio = 16; 89 + /* There is just one SoC in one device and its GPIO addresses should be 90 + * deterministic to address them more easily. The other buses could get 91 + * a random base number. */ 92 + if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC) 93 + chip->base = 0; 94 + else 95 + chip->base = -1; 96 + 97 + return gpiochip_add(chip); 98 + }
+5
drivers/bcma/main.c
··· 164 164 bcma_err(bus, "Error registering NAND flash\n"); 165 165 } 166 166 #endif 167 + err = bcma_gpio_init(&bus->drv_cc); 168 + if (err == -ENOTSUPP) 169 + bcma_debug(bus, "GPIO driver not activated\n"); 170 + else if (err) 171 + bcma_err(bus, "Error registering GPIO driver: %i\n", err); 167 172 168 173 if (bus->hosttype == BCMA_HOSTTYPE_SOC) { 169 174 err = bcma_chipco_watchdog_register(&bus->drv_cc);
+32 -1
drivers/edac/Kconfig
··· 7 7 menuconfig EDAC 8 8 bool "EDAC (Error Detection And Correction) reporting" 9 9 depends on HAS_IOMEM 10 - depends on X86 || PPC || TILE || ARM 10 + depends on X86 || PPC || TILE || ARM || EDAC_SUPPORT 11 11 help 12 12 EDAC is designed to report errors in the core system. 13 13 These are low-level errors that are reported in the CPU or ··· 26 26 27 27 There is also a mailing list for the EDAC project, which can 28 28 be found via the sourceforge page. 29 + 30 + config EDAC_SUPPORT 31 + bool 29 32 30 33 if EDAC 31 34 ··· 318 315 help 319 316 Support for error detection and correction on the 320 317 Calxeda Highbank memory controller. 318 + 319 + config EDAC_OCTEON_PC 320 + tristate "Cavium Octeon Primary Caches" 321 + depends on EDAC_MM_EDAC && CPU_CAVIUM_OCTEON 322 + help 323 + Support for error detection and correction on the primary caches of 324 + the cnMIPS cores of Cavium Octeon family SOCs. 325 + 326 + config EDAC_OCTEON_L2C 327 + tristate "Cavium Octeon Secondary Caches (L2C)" 328 + depends on EDAC_MM_EDAC && CPU_CAVIUM_OCTEON 329 + help 330 + Support for error detection and correction on the 331 + Cavium Octeon family of SOCs. 332 + 333 + config EDAC_OCTEON_LMC 334 + tristate "Cavium Octeon DRAM Memory Controller (LMC)" 335 + depends on EDAC_MM_EDAC && CPU_CAVIUM_OCTEON 336 + help 337 + Support for error detection and correction on the 338 + Cavium Octeon family of SOCs. 339 + 340 + config EDAC_OCTEON_PCI 341 + tristate "Cavium Octeon PCI Controller" 342 + depends on EDAC_MM_EDAC && PCI && CPU_CAVIUM_OCTEON 343 + help 344 + Support for error detection and correction on the 345 + Cavium Octeon family of SOCs. 321 346 322 347 endif # EDAC
+5
drivers/edac/Makefile
··· 58 58 59 59 obj-$(CONFIG_EDAC_HIGHBANK_MC) += highbank_mc_edac.o 60 60 obj-$(CONFIG_EDAC_HIGHBANK_L2) += highbank_l2_edac.o 61 + 62 + obj-$(CONFIG_EDAC_OCTEON_PC) += octeon_edac-pc.o 63 + obj-$(CONFIG_EDAC_OCTEON_L2C) += octeon_edac-l2c.o 64 + obj-$(CONFIG_EDAC_OCTEON_LMC) += octeon_edac-lmc.o 65 + obj-$(CONFIG_EDAC_OCTEON_PCI) += octeon_edac-pci.o
+208
drivers/edac/octeon_edac-l2c.c
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (C) 2012 Cavium, Inc. 7 + * 8 + * Copyright (C) 2009 Wind River Systems, 9 + * written by Ralf Baechle <ralf@linux-mips.org> 10 + */ 11 + #include <linux/module.h> 12 + #include <linux/init.h> 13 + #include <linux/slab.h> 14 + #include <linux/io.h> 15 + #include <linux/edac.h> 16 + 17 + #include <asm/octeon/cvmx.h> 18 + 19 + #include "edac_core.h" 20 + #include "edac_module.h" 21 + 22 + #define EDAC_MOD_STR "octeon-l2c" 23 + 24 + static void octeon_l2c_poll_oct1(struct edac_device_ctl_info *l2c) 25 + { 26 + union cvmx_l2t_err l2t_err, l2t_err_reset; 27 + union cvmx_l2d_err l2d_err, l2d_err_reset; 28 + 29 + l2t_err_reset.u64 = 0; 30 + l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR); 31 + if (l2t_err.s.sec_err) { 32 + edac_device_handle_ce(l2c, 0, 0, 33 + "Tag Single bit error (corrected)"); 34 + l2t_err_reset.s.sec_err = 1; 35 + } 36 + if (l2t_err.s.ded_err) { 37 + edac_device_handle_ue(l2c, 0, 0, 38 + "Tag Double bit error (detected)"); 39 + l2t_err_reset.s.ded_err = 1; 40 + } 41 + if (l2t_err_reset.u64) 42 + cvmx_write_csr(CVMX_L2T_ERR, l2t_err_reset.u64); 43 + 44 + l2d_err_reset.u64 = 0; 45 + l2d_err.u64 = cvmx_read_csr(CVMX_L2D_ERR); 46 + if (l2d_err.s.sec_err) { 47 + edac_device_handle_ce(l2c, 0, 1, 48 + "Data Single bit error (corrected)"); 49 + l2d_err_reset.s.sec_err = 1; 50 + } 51 + if (l2d_err.s.ded_err) { 52 + edac_device_handle_ue(l2c, 0, 1, 53 + "Data Double bit error (detected)"); 54 + l2d_err_reset.s.ded_err = 1; 55 + } 56 + if (l2d_err_reset.u64) 57 + cvmx_write_csr(CVMX_L2D_ERR, l2d_err_reset.u64); 58 + 59 + } 60 + 61 + static void _octeon_l2c_poll_oct2(struct edac_device_ctl_info *l2c, int tad) 62 + { 63 + union cvmx_l2c_err_tdtx err_tdtx, err_tdtx_reset; 64 + union cvmx_l2c_err_ttgx err_ttgx, err_ttgx_reset; 65 + char buf1[64]; 66 + char buf2[80]; 67 + 68 + err_tdtx_reset.u64 = 0; 69 + err_tdtx.u64 = cvmx_read_csr(CVMX_L2C_ERR_TDTX(tad)); 70 + if (err_tdtx.s.dbe || err_tdtx.s.sbe || 71 + err_tdtx.s.vdbe || err_tdtx.s.vsbe) 72 + snprintf(buf1, sizeof(buf1), 73 + "type:%d, syn:0x%x, way:%d", 74 + err_tdtx.s.type, err_tdtx.s.syn, err_tdtx.s.wayidx); 75 + 76 + if (err_tdtx.s.dbe) { 77 + snprintf(buf2, sizeof(buf2), 78 + "L2D Double bit error (detected):%s", buf1); 79 + err_tdtx_reset.s.dbe = 1; 80 + edac_device_handle_ue(l2c, tad, 1, buf2); 81 + } 82 + if (err_tdtx.s.sbe) { 83 + snprintf(buf2, sizeof(buf2), 84 + "L2D Single bit error (corrected):%s", buf1); 85 + err_tdtx_reset.s.sbe = 1; 86 + edac_device_handle_ce(l2c, tad, 1, buf2); 87 + } 88 + if (err_tdtx.s.vdbe) { 89 + snprintf(buf2, sizeof(buf2), 90 + "VBF Double bit error (detected):%s", buf1); 91 + err_tdtx_reset.s.vdbe = 1; 92 + edac_device_handle_ue(l2c, tad, 1, buf2); 93 + } 94 + if (err_tdtx.s.vsbe) { 95 + snprintf(buf2, sizeof(buf2), 96 + "VBF Single bit error (corrected):%s", buf1); 97 + err_tdtx_reset.s.vsbe = 1; 98 + edac_device_handle_ce(l2c, tad, 1, buf2); 99 + } 100 + if (err_tdtx_reset.u64) 101 + cvmx_write_csr(CVMX_L2C_ERR_TDTX(tad), err_tdtx_reset.u64); 102 + 103 + err_ttgx_reset.u64 = 0; 104 + err_ttgx.u64 = cvmx_read_csr(CVMX_L2C_ERR_TTGX(tad)); 105 + 106 + if (err_ttgx.s.dbe || err_ttgx.s.sbe) 107 + snprintf(buf1, sizeof(buf1), 108 + "type:%d, syn:0x%x, way:%d", 109 + err_ttgx.s.type, err_ttgx.s.syn, err_ttgx.s.wayidx); 110 + 111 + if (err_ttgx.s.dbe) { 112 + snprintf(buf2, sizeof(buf2), 113 + "Tag Double bit error (detected):%s", buf1); 114 + err_ttgx_reset.s.dbe = 1; 115 + edac_device_handle_ue(l2c, tad, 0, buf2); 116 + } 117 + if (err_ttgx.s.sbe) { 118 + snprintf(buf2, sizeof(buf2), 119 + "Tag Single bit error (corrected):%s", buf1); 120 + err_ttgx_reset.s.sbe = 1; 121 + edac_device_handle_ce(l2c, tad, 0, buf2); 122 + } 123 + if (err_ttgx_reset.u64) 124 + cvmx_write_csr(CVMX_L2C_ERR_TTGX(tad), err_ttgx_reset.u64); 125 + } 126 + 127 + static void octeon_l2c_poll_oct2(struct edac_device_ctl_info *l2c) 128 + { 129 + int i; 130 + for (i = 0; i < l2c->nr_instances; i++) 131 + _octeon_l2c_poll_oct2(l2c, i); 132 + } 133 + 134 + static int __devinit octeon_l2c_probe(struct platform_device *pdev) 135 + { 136 + struct edac_device_ctl_info *l2c; 137 + 138 + int num_tads = OCTEON_IS_MODEL(OCTEON_CN68XX) ? 4 : 1; 139 + 140 + /* 'Tags' are block 0, 'Data' is block 1*/ 141 + l2c = edac_device_alloc_ctl_info(0, "l2c", num_tads, "l2c", 2, 0, 142 + NULL, 0, edac_device_alloc_index()); 143 + if (!l2c) 144 + return -ENOMEM; 145 + 146 + l2c->dev = &pdev->dev; 147 + platform_set_drvdata(pdev, l2c); 148 + l2c->dev_name = dev_name(&pdev->dev); 149 + 150 + l2c->mod_name = "octeon-l2c"; 151 + l2c->ctl_name = "octeon_l2c_err"; 152 + 153 + 154 + if (OCTEON_IS_MODEL(OCTEON_FAM_1_PLUS)) { 155 + union cvmx_l2t_err l2t_err; 156 + union cvmx_l2d_err l2d_err; 157 + 158 + l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR); 159 + l2t_err.s.sec_intena = 0; /* We poll */ 160 + l2t_err.s.ded_intena = 0; 161 + cvmx_write_csr(CVMX_L2T_ERR, l2t_err.u64); 162 + 163 + l2d_err.u64 = cvmx_read_csr(CVMX_L2D_ERR); 164 + l2d_err.s.sec_intena = 0; /* We poll */ 165 + l2d_err.s.ded_intena = 0; 166 + cvmx_write_csr(CVMX_L2T_ERR, l2d_err.u64); 167 + 168 + l2c->edac_check = octeon_l2c_poll_oct1; 169 + } else { 170 + /* OCTEON II */ 171 + l2c->edac_check = octeon_l2c_poll_oct2; 172 + } 173 + 174 + if (edac_device_add_device(l2c) > 0) { 175 + pr_err("%s: edac_device_add_device() failed\n", __func__); 176 + goto err; 177 + } 178 + 179 + 180 + return 0; 181 + 182 + err: 183 + edac_device_free_ctl_info(l2c); 184 + 185 + return -ENXIO; 186 + } 187 + 188 + static int octeon_l2c_remove(struct platform_device *pdev) 189 + { 190 + struct edac_device_ctl_info *l2c = platform_get_drvdata(pdev); 191 + 192 + edac_device_del_device(&pdev->dev); 193 + edac_device_free_ctl_info(l2c); 194 + 195 + return 0; 196 + } 197 + 198 + static struct platform_driver octeon_l2c_driver = { 199 + .probe = octeon_l2c_probe, 200 + .remove = octeon_l2c_remove, 201 + .driver = { 202 + .name = "octeon_l2c_edac", 203 + } 204 + }; 205 + module_platform_driver(octeon_l2c_driver); 206 + 207 + MODULE_LICENSE("GPL"); 208 + MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
+186
drivers/edac/octeon_edac-lmc.c
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (C) 2009 Wind River Systems, 7 + * written by Ralf Baechle <ralf@linux-mips.org> 8 + */ 9 + #include <linux/module.h> 10 + #include <linux/init.h> 11 + #include <linux/slab.h> 12 + #include <linux/io.h> 13 + #include <linux/edac.h> 14 + 15 + #include <asm/octeon/octeon.h> 16 + #include <asm/octeon/cvmx-lmcx-defs.h> 17 + 18 + #include "edac_core.h" 19 + #include "edac_module.h" 20 + 21 + #define OCTEON_MAX_MC 4 22 + 23 + static void octeon_lmc_edac_poll(struct mem_ctl_info *mci) 24 + { 25 + union cvmx_lmcx_mem_cfg0 cfg0; 26 + bool do_clear = false; 27 + char msg[64]; 28 + 29 + cfg0.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(mci->mc_idx)); 30 + if (cfg0.s.sec_err || cfg0.s.ded_err) { 31 + union cvmx_lmcx_fadr fadr; 32 + fadr.u64 = cvmx_read_csr(CVMX_LMCX_FADR(mci->mc_idx)); 33 + snprintf(msg, sizeof(msg), 34 + "DIMM %d rank %d bank %d row %d col %d", 35 + fadr.cn30xx.fdimm, fadr.cn30xx.fbunk, 36 + fadr.cn30xx.fbank, fadr.cn30xx.frow, fadr.cn30xx.fcol); 37 + } 38 + 39 + if (cfg0.s.sec_err) { 40 + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0, 41 + -1, -1, -1, msg, ""); 42 + cfg0.s.sec_err = -1; /* Done, re-arm */ 43 + do_clear = true; 44 + } 45 + 46 + if (cfg0.s.ded_err) { 47 + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, 48 + -1, -1, -1, msg, ""); 49 + cfg0.s.ded_err = -1; /* Done, re-arm */ 50 + do_clear = true; 51 + } 52 + if (do_clear) 53 + cvmx_write_csr(CVMX_LMCX_MEM_CFG0(mci->mc_idx), cfg0.u64); 54 + } 55 + 56 + static void octeon_lmc_edac_poll_o2(struct mem_ctl_info *mci) 57 + { 58 + union cvmx_lmcx_int int_reg; 59 + bool do_clear = false; 60 + char msg[64]; 61 + 62 + int_reg.u64 = cvmx_read_csr(CVMX_LMCX_INT(mci->mc_idx)); 63 + if (int_reg.s.sec_err || int_reg.s.ded_err) { 64 + union cvmx_lmcx_fadr fadr; 65 + fadr.u64 = cvmx_read_csr(CVMX_LMCX_FADR(mci->mc_idx)); 66 + snprintf(msg, sizeof(msg), 67 + "DIMM %d rank %d bank %d row %d col %d", 68 + fadr.cn61xx.fdimm, fadr.cn61xx.fbunk, 69 + fadr.cn61xx.fbank, fadr.cn61xx.frow, fadr.cn61xx.fcol); 70 + } 71 + 72 + if (int_reg.s.sec_err) { 73 + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0, 74 + -1, -1, -1, msg, ""); 75 + int_reg.s.sec_err = -1; /* Done, re-arm */ 76 + do_clear = true; 77 + } 78 + 79 + if (int_reg.s.ded_err) { 80 + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, 81 + -1, -1, -1, msg, ""); 82 + int_reg.s.ded_err = -1; /* Done, re-arm */ 83 + do_clear = true; 84 + } 85 + if (do_clear) 86 + cvmx_write_csr(CVMX_LMCX_INT(mci->mc_idx), int_reg.u64); 87 + } 88 + 89 + static int __devinit octeon_lmc_edac_probe(struct platform_device *pdev) 90 + { 91 + struct mem_ctl_info *mci; 92 + struct edac_mc_layer layers[1]; 93 + int mc = pdev->id; 94 + 95 + layers[0].type = EDAC_MC_LAYER_CHANNEL; 96 + layers[0].size = 1; 97 + layers[0].is_virt_csrow = false; 98 + 99 + if (OCTEON_IS_MODEL(OCTEON_FAM_1_PLUS)) { 100 + union cvmx_lmcx_mem_cfg0 cfg0; 101 + 102 + cfg0.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(0)); 103 + if (!cfg0.s.ecc_ena) { 104 + dev_info(&pdev->dev, "Disabled (ECC not enabled)\n"); 105 + return 0; 106 + } 107 + 108 + mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, 0); 109 + if (!mci) 110 + return -ENXIO; 111 + 112 + mci->pdev = &pdev->dev; 113 + mci->dev_name = dev_name(&pdev->dev); 114 + 115 + mci->mod_name = "octeon-lmc"; 116 + mci->ctl_name = "octeon-lmc-err"; 117 + mci->edac_check = octeon_lmc_edac_poll; 118 + 119 + if (edac_mc_add_mc(mci)) { 120 + dev_err(&pdev->dev, "edac_mc_add_mc() failed\n"); 121 + edac_mc_free(mci); 122 + return -ENXIO; 123 + } 124 + 125 + cfg0.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(mc)); 126 + cfg0.s.intr_ded_ena = 0; /* We poll */ 127 + cfg0.s.intr_sec_ena = 0; 128 + cvmx_write_csr(CVMX_LMCX_MEM_CFG0(mc), cfg0.u64); 129 + } else { 130 + /* OCTEON II */ 131 + union cvmx_lmcx_int_en en; 132 + union cvmx_lmcx_config config; 133 + 134 + config.u64 = cvmx_read_csr(CVMX_LMCX_CONFIG(0)); 135 + if (!config.s.ecc_ena) { 136 + dev_info(&pdev->dev, "Disabled (ECC not enabled)\n"); 137 + return 0; 138 + } 139 + 140 + mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, 0); 141 + if (!mci) 142 + return -ENXIO; 143 + 144 + mci->pdev = &pdev->dev; 145 + mci->dev_name = dev_name(&pdev->dev); 146 + 147 + mci->mod_name = "octeon-lmc"; 148 + mci->ctl_name = "co_lmc_err"; 149 + mci->edac_check = octeon_lmc_edac_poll_o2; 150 + 151 + if (edac_mc_add_mc(mci)) { 152 + dev_err(&pdev->dev, "edac_mc_add_mc() failed\n"); 153 + edac_mc_free(mci); 154 + return -ENXIO; 155 + } 156 + 157 + en.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(mc)); 158 + en.s.intr_ded_ena = 0; /* We poll */ 159 + en.s.intr_sec_ena = 0; 160 + cvmx_write_csr(CVMX_LMCX_MEM_CFG0(mc), en.u64); 161 + } 162 + platform_set_drvdata(pdev, mci); 163 + 164 + return 0; 165 + } 166 + 167 + static int octeon_lmc_edac_remove(struct platform_device *pdev) 168 + { 169 + struct mem_ctl_info *mci = platform_get_drvdata(pdev); 170 + 171 + edac_mc_del_mc(&pdev->dev); 172 + edac_mc_free(mci); 173 + return 0; 174 + } 175 + 176 + static struct platform_driver octeon_lmc_edac_driver = { 177 + .probe = octeon_lmc_edac_probe, 178 + .remove = octeon_lmc_edac_remove, 179 + .driver = { 180 + .name = "octeon_lmc_edac", 181 + } 182 + }; 183 + module_platform_driver(octeon_lmc_edac_driver); 184 + 185 + MODULE_LICENSE("GPL"); 186 + MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
+143
drivers/edac/octeon_edac-pc.c
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (C) 2012 Cavium, Inc. 7 + * 8 + * Copyright (C) 2009 Wind River Systems, 9 + * written by Ralf Baechle <ralf@linux-mips.org> 10 + */ 11 + #include <linux/module.h> 12 + #include <linux/init.h> 13 + #include <linux/slab.h> 14 + #include <linux/interrupt.h> 15 + #include <linux/io.h> 16 + #include <linux/edac.h> 17 + 18 + #include "edac_core.h" 19 + #include "edac_module.h" 20 + 21 + #include <asm/octeon/cvmx.h> 22 + #include <asm/mipsregs.h> 23 + 24 + extern int register_co_cache_error_notifier(struct notifier_block *nb); 25 + extern int unregister_co_cache_error_notifier(struct notifier_block *nb); 26 + 27 + extern unsigned long long cache_err_dcache[NR_CPUS]; 28 + 29 + struct co_cache_error { 30 + struct notifier_block notifier; 31 + struct edac_device_ctl_info *ed; 32 + }; 33 + 34 + /** 35 + * EDAC CPU cache error callback 36 + * 37 + * @event: non-zero if unrecoverable. 38 + */ 39 + static int co_cache_error_event(struct notifier_block *this, 40 + unsigned long event, void *ptr) 41 + { 42 + struct co_cache_error *p = container_of(this, struct co_cache_error, 43 + notifier); 44 + 45 + unsigned int core = cvmx_get_core_num(); 46 + unsigned int cpu = smp_processor_id(); 47 + u64 icache_err = read_octeon_c0_icacheerr(); 48 + u64 dcache_err; 49 + 50 + if (event) { 51 + dcache_err = cache_err_dcache[core]; 52 + cache_err_dcache[core] = 0; 53 + } else { 54 + dcache_err = read_octeon_c0_dcacheerr(); 55 + } 56 + 57 + if (icache_err & 1) { 58 + edac_device_printk(p->ed, KERN_ERR, 59 + "CacheErr (Icache):%llx, core %d/cpu %d, cp0_errorepc == %lx\n", 60 + (unsigned long long)icache_err, core, cpu, 61 + read_c0_errorepc()); 62 + write_octeon_c0_icacheerr(0); 63 + edac_device_handle_ce(p->ed, cpu, 1, "icache"); 64 + } 65 + if (dcache_err & 1) { 66 + edac_device_printk(p->ed, KERN_ERR, 67 + "CacheErr (Dcache):%llx, core %d/cpu %d, cp0_errorepc == %lx\n", 68 + (unsigned long long)dcache_err, core, cpu, 69 + read_c0_errorepc()); 70 + if (event) 71 + edac_device_handle_ue(p->ed, cpu, 0, "dcache"); 72 + else 73 + edac_device_handle_ce(p->ed, cpu, 0, "dcache"); 74 + 75 + /* Clear the error indication */ 76 + if (OCTEON_IS_MODEL(OCTEON_FAM_2)) 77 + write_octeon_c0_dcacheerr(1); 78 + else 79 + write_octeon_c0_dcacheerr(0); 80 + } 81 + 82 + return NOTIFY_STOP; 83 + } 84 + 85 + static int __devinit co_cache_error_probe(struct platform_device *pdev) 86 + { 87 + struct co_cache_error *p = devm_kzalloc(&pdev->dev, sizeof(*p), 88 + GFP_KERNEL); 89 + if (!p) 90 + return -ENOMEM; 91 + 92 + p->notifier.notifier_call = co_cache_error_event; 93 + platform_set_drvdata(pdev, p); 94 + 95 + p->ed = edac_device_alloc_ctl_info(0, "cpu", num_possible_cpus(), 96 + "cache", 2, 0, NULL, 0, 97 + edac_device_alloc_index()); 98 + if (!p->ed) 99 + goto err; 100 + 101 + p->ed->dev = &pdev->dev; 102 + 103 + p->ed->dev_name = dev_name(&pdev->dev); 104 + 105 + p->ed->mod_name = "octeon-cpu"; 106 + p->ed->ctl_name = "cache"; 107 + 108 + if (edac_device_add_device(p->ed)) { 109 + pr_err("%s: edac_device_add_device() failed\n", __func__); 110 + goto err1; 111 + } 112 + 113 + register_co_cache_error_notifier(&p->notifier); 114 + 115 + return 0; 116 + 117 + err1: 118 + edac_device_free_ctl_info(p->ed); 119 + err: 120 + return -ENXIO; 121 + } 122 + 123 + static int co_cache_error_remove(struct platform_device *pdev) 124 + { 125 + struct co_cache_error *p = platform_get_drvdata(pdev); 126 + 127 + unregister_co_cache_error_notifier(&p->notifier); 128 + edac_device_del_device(&pdev->dev); 129 + edac_device_free_ctl_info(p->ed); 130 + return 0; 131 + } 132 + 133 + static struct platform_driver co_cache_error_driver = { 134 + .probe = co_cache_error_probe, 135 + .remove = co_cache_error_remove, 136 + .driver = { 137 + .name = "octeon_pc_edac", 138 + } 139 + }; 140 + module_platform_driver(co_cache_error_driver); 141 + 142 + MODULE_LICENSE("GPL"); 143 + MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
+111
drivers/edac/octeon_edac-pci.c
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (C) 2012 Cavium, Inc. 7 + * Copyright (C) 2009 Wind River Systems, 8 + * written by Ralf Baechle <ralf@linux-mips.org> 9 + */ 10 + #include <linux/module.h> 11 + #include <linux/init.h> 12 + #include <linux/slab.h> 13 + #include <linux/io.h> 14 + #include <linux/edac.h> 15 + 16 + #include <asm/octeon/cvmx.h> 17 + #include <asm/octeon/cvmx-npi-defs.h> 18 + #include <asm/octeon/cvmx-pci-defs.h> 19 + #include <asm/octeon/octeon.h> 20 + 21 + #include "edac_core.h" 22 + #include "edac_module.h" 23 + 24 + static void octeon_pci_poll(struct edac_pci_ctl_info *pci) 25 + { 26 + union cvmx_pci_cfg01 cfg01; 27 + 28 + cfg01.u32 = octeon_npi_read32(CVMX_NPI_PCI_CFG01); 29 + if (cfg01.s.dpe) { /* Detected parity error */ 30 + edac_pci_handle_pe(pci, pci->ctl_name); 31 + cfg01.s.dpe = 1; /* Reset */ 32 + octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32); 33 + } 34 + if (cfg01.s.sse) { 35 + edac_pci_handle_npe(pci, "Signaled System Error"); 36 + cfg01.s.sse = 1; /* Reset */ 37 + octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32); 38 + } 39 + if (cfg01.s.rma) { 40 + edac_pci_handle_npe(pci, "Received Master Abort"); 41 + cfg01.s.rma = 1; /* Reset */ 42 + octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32); 43 + } 44 + if (cfg01.s.rta) { 45 + edac_pci_handle_npe(pci, "Received Target Abort"); 46 + cfg01.s.rta = 1; /* Reset */ 47 + octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32); 48 + } 49 + if (cfg01.s.sta) { 50 + edac_pci_handle_npe(pci, "Signaled Target Abort"); 51 + cfg01.s.sta = 1; /* Reset */ 52 + octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32); 53 + } 54 + if (cfg01.s.mdpe) { 55 + edac_pci_handle_npe(pci, "Master Data Parity Error"); 56 + cfg01.s.mdpe = 1; /* Reset */ 57 + octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32); 58 + } 59 + } 60 + 61 + static int __devinit octeon_pci_probe(struct platform_device *pdev) 62 + { 63 + struct edac_pci_ctl_info *pci; 64 + int res = 0; 65 + 66 + pci = edac_pci_alloc_ctl_info(0, "octeon_pci_err"); 67 + if (!pci) 68 + return -ENOMEM; 69 + 70 + pci->dev = &pdev->dev; 71 + platform_set_drvdata(pdev, pci); 72 + pci->dev_name = dev_name(&pdev->dev); 73 + 74 + pci->mod_name = "octeon-pci"; 75 + pci->ctl_name = "octeon_pci_err"; 76 + pci->edac_check = octeon_pci_poll; 77 + 78 + if (edac_pci_add_device(pci, 0) > 0) { 79 + pr_err("%s: edac_pci_add_device() failed\n", __func__); 80 + goto err; 81 + } 82 + 83 + return 0; 84 + 85 + err: 86 + edac_pci_free_ctl_info(pci); 87 + 88 + return res; 89 + } 90 + 91 + static int octeon_pci_remove(struct platform_device *pdev) 92 + { 93 + struct edac_pci_ctl_info *pci = platform_get_drvdata(pdev); 94 + 95 + edac_pci_del_device(&pdev->dev); 96 + edac_pci_free_ctl_info(pci); 97 + 98 + return 0; 99 + } 100 + 101 + static struct platform_driver octeon_pci_driver = { 102 + .probe = octeon_pci_probe, 103 + .remove = octeon_pci_remove, 104 + .driver = { 105 + .name = "octeon_pci_edac", 106 + } 107 + }; 108 + module_platform_driver(octeon_pci_driver); 109 + 110 + MODULE_LICENSE("GPL"); 111 + MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
+9
drivers/ssb/Kconfig
··· 160 160 161 161 If unsure, say N 162 162 163 + config SSB_DRIVER_GPIO 164 + bool "SSB GPIO driver" 165 + depends on SSB 166 + select GPIOLIB 167 + help 168 + Driver to provide access to the GPIO pins on the bus. 169 + 170 + If unsure, say N 171 + 163 172 endmenu
+1
drivers/ssb/Makefile
··· 15 15 ssb-$(CONFIG_SSB_DRIVER_EXTIF) += driver_extif.o 16 16 ssb-$(CONFIG_SSB_DRIVER_PCICORE) += driver_pcicore.o 17 17 ssb-$(CONFIG_SSB_DRIVER_GIGE) += driver_gige.o 18 + ssb-$(CONFIG_SSB_DRIVER_GPIO) += driver_gpio.o 18 19 19 20 # b43 pci-ssb-bridge driver 20 21 # Not strictly a part of SSB, but kept here for convenience
+73 -5
drivers/ssb/driver_chipcommon.c
··· 349 349 { 350 350 if (!cc->dev) 351 351 return; /* We don't have a ChipCommon */ 352 + 353 + spin_lock_init(&cc->gpio_lock); 354 + 352 355 if (cc->dev->id.revision >= 11) 353 356 cc->status = chipco_read32(cc, SSB_CHIPCO_CHIPSTAT); 354 357 ssb_dprintk(KERN_INFO PFX "chipcommon status is 0x%x\n", cc->status); ··· 508 505 509 506 u32 ssb_chipco_gpio_out(struct ssb_chipcommon *cc, u32 mask, u32 value) 510 507 { 511 - return chipco_write32_masked(cc, SSB_CHIPCO_GPIOOUT, mask, value); 508 + unsigned long flags; 509 + u32 res = 0; 510 + 511 + spin_lock_irqsave(&cc->gpio_lock, flags); 512 + res = chipco_write32_masked(cc, SSB_CHIPCO_GPIOOUT, mask, value); 513 + spin_unlock_irqrestore(&cc->gpio_lock, flags); 514 + 515 + return res; 512 516 } 513 517 514 518 u32 ssb_chipco_gpio_outen(struct ssb_chipcommon *cc, u32 mask, u32 value) 515 519 { 516 - return chipco_write32_masked(cc, SSB_CHIPCO_GPIOOUTEN, mask, value); 520 + unsigned long flags; 521 + u32 res = 0; 522 + 523 + spin_lock_irqsave(&cc->gpio_lock, flags); 524 + res = chipco_write32_masked(cc, SSB_CHIPCO_GPIOOUTEN, mask, value); 525 + spin_unlock_irqrestore(&cc->gpio_lock, flags); 526 + 527 + return res; 517 528 } 518 529 519 530 u32 ssb_chipco_gpio_control(struct ssb_chipcommon *cc, u32 mask, u32 value) 520 531 { 521 - return chipco_write32_masked(cc, SSB_CHIPCO_GPIOCTL, mask, value); 532 + unsigned long flags; 533 + u32 res = 0; 534 + 535 + spin_lock_irqsave(&cc->gpio_lock, flags); 536 + res = chipco_write32_masked(cc, SSB_CHIPCO_GPIOCTL, mask, value); 537 + spin_unlock_irqrestore(&cc->gpio_lock, flags); 538 + 539 + return res; 522 540 } 523 541 EXPORT_SYMBOL(ssb_chipco_gpio_control); 524 542 525 543 u32 ssb_chipco_gpio_intmask(struct ssb_chipcommon *cc, u32 mask, u32 value) 526 544 { 527 - return chipco_write32_masked(cc, SSB_CHIPCO_GPIOIRQ, mask, value); 545 + unsigned long flags; 546 + u32 res = 0; 547 + 548 + spin_lock_irqsave(&cc->gpio_lock, flags); 549 + res = chipco_write32_masked(cc, SSB_CHIPCO_GPIOIRQ, mask, value); 550 + spin_unlock_irqrestore(&cc->gpio_lock, flags); 551 + 552 + return res; 528 553 } 529 554 530 555 u32 ssb_chipco_gpio_polarity(struct ssb_chipcommon *cc, u32 mask, u32 value) 531 556 { 532 - return chipco_write32_masked(cc, SSB_CHIPCO_GPIOPOL, mask, value); 557 + unsigned long flags; 558 + u32 res = 0; 559 + 560 + spin_lock_irqsave(&cc->gpio_lock, flags); 561 + res = chipco_write32_masked(cc, SSB_CHIPCO_GPIOPOL, mask, value); 562 + spin_unlock_irqrestore(&cc->gpio_lock, flags); 563 + 564 + return res; 565 + } 566 + 567 + u32 ssb_chipco_gpio_pullup(struct ssb_chipcommon *cc, u32 mask, u32 value) 568 + { 569 + unsigned long flags; 570 + u32 res = 0; 571 + 572 + if (cc->dev->id.revision < 20) 573 + return 0xffffffff; 574 + 575 + spin_lock_irqsave(&cc->gpio_lock, flags); 576 + res = chipco_write32_masked(cc, SSB_CHIPCO_GPIOPULLUP, mask, value); 577 + spin_unlock_irqrestore(&cc->gpio_lock, flags); 578 + 579 + return res; 580 + } 581 + 582 + u32 ssb_chipco_gpio_pulldown(struct ssb_chipcommon *cc, u32 mask, u32 value) 583 + { 584 + unsigned long flags; 585 + u32 res = 0; 586 + 587 + if (cc->dev->id.revision < 20) 588 + return 0xffffffff; 589 + 590 + spin_lock_irqsave(&cc->gpio_lock, flags); 591 + res = chipco_write32_masked(cc, SSB_CHIPCO_GPIOPULLDOWN, mask, value); 592 + spin_unlock_irqrestore(&cc->gpio_lock, flags); 593 + 594 + return res; 533 595 } 534 596 535 597 #ifdef CONFIG_SSB_SERIAL
+39 -4
drivers/ssb/driver_extif.c
··· 138 138 return ticks; 139 139 } 140 140 141 + void ssb_extif_init(struct ssb_extif *extif) 142 + { 143 + if (!extif->dev) 144 + return; /* We don't have a Extif core */ 145 + spin_lock_init(&extif->gpio_lock); 146 + } 147 + 141 148 u32 ssb_extif_gpio_in(struct ssb_extif *extif, u32 mask) 142 149 { 143 150 return extif_read32(extif, SSB_EXTIF_GPIO_IN) & mask; ··· 152 145 153 146 u32 ssb_extif_gpio_out(struct ssb_extif *extif, u32 mask, u32 value) 154 147 { 155 - return extif_write32_masked(extif, SSB_EXTIF_GPIO_OUT(0), 148 + unsigned long flags; 149 + u32 res = 0; 150 + 151 + spin_lock_irqsave(&extif->gpio_lock, flags); 152 + res = extif_write32_masked(extif, SSB_EXTIF_GPIO_OUT(0), 156 153 mask, value); 154 + spin_unlock_irqrestore(&extif->gpio_lock, flags); 155 + 156 + return res; 157 157 } 158 158 159 159 u32 ssb_extif_gpio_outen(struct ssb_extif *extif, u32 mask, u32 value) 160 160 { 161 - return extif_write32_masked(extif, SSB_EXTIF_GPIO_OUTEN(0), 161 + unsigned long flags; 162 + u32 res = 0; 163 + 164 + spin_lock_irqsave(&extif->gpio_lock, flags); 165 + res = extif_write32_masked(extif, SSB_EXTIF_GPIO_OUTEN(0), 162 166 mask, value); 167 + spin_unlock_irqrestore(&extif->gpio_lock, flags); 168 + 169 + return res; 163 170 } 164 171 165 172 u32 ssb_extif_gpio_polarity(struct ssb_extif *extif, u32 mask, u32 value) 166 173 { 167 - return extif_write32_masked(extif, SSB_EXTIF_GPIO_INTPOL, mask, value); 174 + unsigned long flags; 175 + u32 res = 0; 176 + 177 + spin_lock_irqsave(&extif->gpio_lock, flags); 178 + res = extif_write32_masked(extif, SSB_EXTIF_GPIO_INTPOL, mask, value); 179 + spin_unlock_irqrestore(&extif->gpio_lock, flags); 180 + 181 + return res; 168 182 } 169 183 170 184 u32 ssb_extif_gpio_intmask(struct ssb_extif *extif, u32 mask, u32 value) 171 185 { 172 - return extif_write32_masked(extif, SSB_EXTIF_GPIO_INTMASK, mask, value); 186 + unsigned long flags; 187 + u32 res = 0; 188 + 189 + spin_lock_irqsave(&extif->gpio_lock, flags); 190 + res = extif_write32_masked(extif, SSB_EXTIF_GPIO_INTMASK, mask, value); 191 + spin_unlock_irqrestore(&extif->gpio_lock, flags); 192 + 193 + return res; 173 194 }
+176
drivers/ssb/driver_gpio.c
··· 1 + /* 2 + * Sonics Silicon Backplane 3 + * GPIO driver 4 + * 5 + * Copyright 2011, Broadcom Corporation 6 + * Copyright 2012, Hauke Mehrtens <hauke@hauke-m.de> 7 + * 8 + * Licensed under the GNU/GPL. See COPYING for details. 9 + */ 10 + 11 + #include <linux/gpio.h> 12 + #include <linux/export.h> 13 + #include <linux/ssb/ssb.h> 14 + 15 + #include "ssb_private.h" 16 + 17 + static struct ssb_bus *ssb_gpio_get_bus(struct gpio_chip *chip) 18 + { 19 + return container_of(chip, struct ssb_bus, gpio); 20 + } 21 + 22 + static int ssb_gpio_chipco_get_value(struct gpio_chip *chip, unsigned gpio) 23 + { 24 + struct ssb_bus *bus = ssb_gpio_get_bus(chip); 25 + 26 + return !!ssb_chipco_gpio_in(&bus->chipco, 1 << gpio); 27 + } 28 + 29 + static void ssb_gpio_chipco_set_value(struct gpio_chip *chip, unsigned gpio, 30 + int value) 31 + { 32 + struct ssb_bus *bus = ssb_gpio_get_bus(chip); 33 + 34 + ssb_chipco_gpio_out(&bus->chipco, 1 << gpio, value ? 1 << gpio : 0); 35 + } 36 + 37 + static int ssb_gpio_chipco_direction_input(struct gpio_chip *chip, 38 + unsigned gpio) 39 + { 40 + struct ssb_bus *bus = ssb_gpio_get_bus(chip); 41 + 42 + ssb_chipco_gpio_outen(&bus->chipco, 1 << gpio, 0); 43 + return 0; 44 + } 45 + 46 + static int ssb_gpio_chipco_direction_output(struct gpio_chip *chip, 47 + unsigned gpio, int value) 48 + { 49 + struct ssb_bus *bus = ssb_gpio_get_bus(chip); 50 + 51 + ssb_chipco_gpio_outen(&bus->chipco, 1 << gpio, 1 << gpio); 52 + ssb_chipco_gpio_out(&bus->chipco, 1 << gpio, value ? 1 << gpio : 0); 53 + return 0; 54 + } 55 + 56 + static int ssb_gpio_chipco_request(struct gpio_chip *chip, unsigned gpio) 57 + { 58 + struct ssb_bus *bus = ssb_gpio_get_bus(chip); 59 + 60 + ssb_chipco_gpio_control(&bus->chipco, 1 << gpio, 0); 61 + /* clear pulldown */ 62 + ssb_chipco_gpio_pulldown(&bus->chipco, 1 << gpio, 0); 63 + /* Set pullup */ 64 + ssb_chipco_gpio_pullup(&bus->chipco, 1 << gpio, 1 << gpio); 65 + 66 + return 0; 67 + } 68 + 69 + static void ssb_gpio_chipco_free(struct gpio_chip *chip, unsigned gpio) 70 + { 71 + struct ssb_bus *bus = ssb_gpio_get_bus(chip); 72 + 73 + /* clear pullup */ 74 + ssb_chipco_gpio_pullup(&bus->chipco, 1 << gpio, 0); 75 + } 76 + 77 + static int ssb_gpio_chipco_init(struct ssb_bus *bus) 78 + { 79 + struct gpio_chip *chip = &bus->gpio; 80 + 81 + chip->label = "ssb_chipco_gpio"; 82 + chip->owner = THIS_MODULE; 83 + chip->request = ssb_gpio_chipco_request; 84 + chip->free = ssb_gpio_chipco_free; 85 + chip->get = ssb_gpio_chipco_get_value; 86 + chip->set = ssb_gpio_chipco_set_value; 87 + chip->direction_input = ssb_gpio_chipco_direction_input; 88 + chip->direction_output = ssb_gpio_chipco_direction_output; 89 + chip->ngpio = 16; 90 + /* There is just one SoC in one device and its GPIO addresses should be 91 + * deterministic to address them more easily. The other buses could get 92 + * a random base number. */ 93 + if (bus->bustype == SSB_BUSTYPE_SSB) 94 + chip->base = 0; 95 + else 96 + chip->base = -1; 97 + 98 + return gpiochip_add(chip); 99 + } 100 + 101 + #ifdef CONFIG_SSB_DRIVER_EXTIF 102 + 103 + static int ssb_gpio_extif_get_value(struct gpio_chip *chip, unsigned gpio) 104 + { 105 + struct ssb_bus *bus = ssb_gpio_get_bus(chip); 106 + 107 + return !!ssb_extif_gpio_in(&bus->extif, 1 << gpio); 108 + } 109 + 110 + static void ssb_gpio_extif_set_value(struct gpio_chip *chip, unsigned gpio, 111 + int value) 112 + { 113 + struct ssb_bus *bus = ssb_gpio_get_bus(chip); 114 + 115 + ssb_extif_gpio_out(&bus->extif, 1 << gpio, value ? 1 << gpio : 0); 116 + } 117 + 118 + static int ssb_gpio_extif_direction_input(struct gpio_chip *chip, 119 + unsigned gpio) 120 + { 121 + struct ssb_bus *bus = ssb_gpio_get_bus(chip); 122 + 123 + ssb_extif_gpio_outen(&bus->extif, 1 << gpio, 0); 124 + return 0; 125 + } 126 + 127 + static int ssb_gpio_extif_direction_output(struct gpio_chip *chip, 128 + unsigned gpio, int value) 129 + { 130 + struct ssb_bus *bus = ssb_gpio_get_bus(chip); 131 + 132 + ssb_extif_gpio_outen(&bus->extif, 1 << gpio, 1 << gpio); 133 + ssb_extif_gpio_out(&bus->extif, 1 << gpio, value ? 1 << gpio : 0); 134 + return 0; 135 + } 136 + 137 + static int ssb_gpio_extif_init(struct ssb_bus *bus) 138 + { 139 + struct gpio_chip *chip = &bus->gpio; 140 + 141 + chip->label = "ssb_extif_gpio"; 142 + chip->owner = THIS_MODULE; 143 + chip->get = ssb_gpio_extif_get_value; 144 + chip->set = ssb_gpio_extif_set_value; 145 + chip->direction_input = ssb_gpio_extif_direction_input; 146 + chip->direction_output = ssb_gpio_extif_direction_output; 147 + chip->ngpio = 5; 148 + /* There is just one SoC in one device and its GPIO addresses should be 149 + * deterministic to address them more easily. The other buses could get 150 + * a random base number. */ 151 + if (bus->bustype == SSB_BUSTYPE_SSB) 152 + chip->base = 0; 153 + else 154 + chip->base = -1; 155 + 156 + return gpiochip_add(chip); 157 + } 158 + 159 + #else 160 + static int ssb_gpio_extif_init(struct ssb_bus *bus) 161 + { 162 + return -ENOTSUPP; 163 + } 164 + #endif 165 + 166 + int ssb_gpio_init(struct ssb_bus *bus) 167 + { 168 + if (ssb_chipco_available(&bus->chipco)) 169 + return ssb_gpio_chipco_init(bus); 170 + else if (ssb_extif_available(&bus->extif)) 171 + return ssb_gpio_extif_init(bus); 172 + else 173 + SSB_WARN_ON(1); 174 + 175 + return -1; 176 + }
+7
drivers/ssb/main.c
··· 804 804 if (err) 805 805 goto err_pcmcia_exit; 806 806 ssb_chipcommon_init(&bus->chipco); 807 + ssb_extif_init(&bus->extif); 807 808 ssb_mipscore_init(&bus->mipscore); 809 + err = ssb_gpio_init(bus); 810 + if (err == -ENOTSUPP) 811 + ssb_dprintk(KERN_DEBUG PFX "GPIO driver not activated\n"); 812 + else if (err) 813 + ssb_dprintk(KERN_ERR PFX 814 + "Error registering GPIO driver: %i\n", err); 808 815 err = ssb_fetch_invariants(bus, get_invariants); 809 816 if (err) { 810 817 ssb_bus_may_powerdown(bus);
+17
drivers/ssb/ssb_private.h
··· 242 242 } 243 243 #endif /* CONFIG_SSB_EMBEDDED */ 244 244 245 + #ifdef CONFIG_SSB_DRIVER_EXTIF 246 + extern void ssb_extif_init(struct ssb_extif *extif); 247 + #else 248 + static inline void ssb_extif_init(struct ssb_extif *extif) 249 + { 250 + } 251 + #endif 252 + 253 + #ifdef CONFIG_SSB_DRIVER_GPIO 254 + extern int ssb_gpio_init(struct ssb_bus *bus); 255 + #else /* CONFIG_SSB_DRIVER_GPIO */ 256 + static inline int ssb_gpio_init(struct ssb_bus *bus) 257 + { 258 + return -ENOTSUPP; 259 + } 260 + #endif /* CONFIG_SSB_DRIVER_GPIO */ 261 + 245 262 #endif /* LINUX_SSB_PRIVATE_H_ */
+9 -2
drivers/video/console/newport_con.c
··· 327 327 328 328 static void newport_init(struct vc_data *vc, int init) 329 329 { 330 - vc->vc_cols = newport_xsize / 8; 331 - vc->vc_rows = newport_ysize / 16; 330 + int cols, rows; 331 + 332 + cols = newport_xsize / 8; 333 + rows = newport_ysize / 16; 332 334 vc->vc_can_do_color = 1; 335 + if (init) { 336 + vc->vc_cols = cols; 337 + vc->vc_rows = rows; 338 + } else 339 + vc_resize(vc, cols, rows); 333 340 } 334 341 335 342 static void newport_deinit(struct vc_data *c)
+9
include/linux/bcma/bcma_driver_chipcommon.h
··· 2 2 #define LINUX_BCMA_DRIVER_CC_H_ 3 3 4 4 #include <linux/platform_device.h> 5 + #include <linux/gpio.h> 5 6 6 7 /** ChipCommon core registers. **/ 7 8 #define BCMA_CC_ID 0x0000 ··· 575 574 #endif /* CONFIG_BCMA_DRIVER_MIPS */ 576 575 u32 ticks_per_ms; 577 576 struct platform_device *watchdog; 577 + 578 + /* Lock for GPIO register access. */ 579 + spinlock_t gpio_lock; 580 + #ifdef CONFIG_BCMA_DRIVER_GPIO 581 + struct gpio_chip gpio; 582 + #endif 578 583 }; 579 584 580 585 /* Register access */ ··· 617 610 u32 bcma_chipco_gpio_control(struct bcma_drv_cc *cc, u32 mask, u32 value); 618 611 u32 bcma_chipco_gpio_intmask(struct bcma_drv_cc *cc, u32 mask, u32 value); 619 612 u32 bcma_chipco_gpio_polarity(struct bcma_drv_cc *cc, u32 mask, u32 value); 613 + u32 bcma_chipco_gpio_pullup(struct bcma_drv_cc *cc, u32 mask, u32 value); 614 + u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value); 620 615 621 616 /* PMU support */ 622 617 extern void bcma_pmu_init(struct bcma_drv_cc *cc);
+4
include/linux/ssb/ssb.h
··· 6 6 #include <linux/types.h> 7 7 #include <linux/spinlock.h> 8 8 #include <linux/pci.h> 9 + #include <linux/gpio.h> 9 10 #include <linux/mod_devicetable.h> 10 11 #include <linux/dma-mapping.h> 11 12 #include <linux/platform_device.h> ··· 436 435 spinlock_t gpio_lock; 437 436 struct platform_device *watchdog; 438 437 #endif /* EMBEDDED */ 438 + #ifdef CONFIG_SSB_DRIVER_GPIO 439 + struct gpio_chip gpio; 440 + #endif /* DRIVER_GPIO */ 439 441 440 442 /* Internal-only stuff follows. Do not touch. */ 441 443 struct list_head list;
+3
include/linux/ssb/ssb_driver_chipcommon.h
··· 590 590 u32 status; 591 591 /* Fast Powerup Delay constant */ 592 592 u16 fast_pwrup_delay; 593 + spinlock_t gpio_lock; 593 594 struct ssb_chipcommon_pmu pmu; 594 595 u32 ticks_per_ms; 595 596 u32 max_timer_ms; ··· 646 645 u32 ssb_chipco_gpio_control(struct ssb_chipcommon *cc, u32 mask, u32 value); 647 646 u32 ssb_chipco_gpio_intmask(struct ssb_chipcommon *cc, u32 mask, u32 value); 648 647 u32 ssb_chipco_gpio_polarity(struct ssb_chipcommon *cc, u32 mask, u32 value); 648 + u32 ssb_chipco_gpio_pullup(struct ssb_chipcommon *cc, u32 mask, u32 value); 649 + u32 ssb_chipco_gpio_pulldown(struct ssb_chipcommon *cc, u32 mask, u32 value); 649 650 650 651 #ifdef CONFIG_SSB_SERIAL 651 652 extern int ssb_chipco_serial_init(struct ssb_chipcommon *cc,
+1
include/linux/ssb/ssb_driver_extif.h
··· 161 161 162 162 struct ssb_extif { 163 163 struct ssb_device *dev; 164 + spinlock_t gpio_lock; 164 165 }; 165 166 166 167 static inline bool ssb_extif_available(struct ssb_extif *extif)