Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc

Pull powerpc updates from Ben Herrenschmidt:
"So here's my next branch for powerpc. A bit late as I was on vacation
last week. It's mostly the same stuff that was in next already, I
just added two patches today which are the wiring up of lockref for
powerpc, which for some reason fell through the cracks last time and
is trivial.

The highlights are, in addition to a bunch of bug fixes:

- Reworked Machine Check handling on kernels running without a
hypervisor (or acting as a hypervisor). Provides hooks to handle
some errors in real mode such as TLB errors, handle SLB errors,
etc...

- Support for retrieving memory error information from the service
processor on IBM servers running without a hypervisor and routing
them to the memory poison infrastructure.

- _PAGE_NUMA support on server processors

- 32-bit BookE relocatable kernel support

- FSL e6500 hardware tablewalk support

- A bunch of new/revived board support

- FSL e6500 deeper idle states and altivec powerdown support

You'll notice a generic mm change here, it has been acked by the
relevant authorities and is a pre-req for our _PAGE_NUMA support"

* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (121 commits)
powerpc: Implement arch_spin_is_locked() using arch_spin_value_unlocked()
powerpc: Add support for the optimised lockref implementation
powerpc/powernv: Call OPAL sync before kexec'ing
powerpc/eeh: Escalate error on non-existing PE
powerpc/eeh: Handle multiple EEH errors
powerpc: Fix transactional FP/VMX/VSX unavailable handlers
powerpc: Don't corrupt transactional state when using FP/VMX in kernel
powerpc: Reclaim two unused thread_info flag bits
powerpc: Fix races with irq_work
Move precessing of MCE queued event out from syscall exit path.
pseries/cpuidle: Remove redundant call to ppc64_runlatch_off() in cpu idle routines
powerpc: Make add_system_ram_resources() __init
powerpc: add SATA_MV to ppc64_defconfig
powerpc/powernv: Increase candidate fw image size
powerpc: Add debug checks to catch invalid cpu-to-node mappings
powerpc: Fix the setup of CPU-to-Node mappings during CPU online
powerpc/iommu: Don't detach device without IOMMU group
powerpc/eeh: Hotplug improvement
powerpc/eeh: Call opal_pci_reinit() on powernv for restoring config space
powerpc/eeh: Add restore_config operation
...

+6167 -1645
+13
Documentation/devicetree/bindings/video/ssd1289fb.txt
··· 1 + * Solomon SSD1289 Framebuffer Driver 2 + 3 + Required properties: 4 + - compatible: Should be "solomon,ssd1289fb". The only supported bus for 5 + now is lbc. 6 + - reg: Should contain address of the controller on the LBC bus. The detail 7 + was described in Documentation/devicetree/bindings/powerpc/fsl/lbc.txt 8 + 9 + Examples: 10 + display@2,0 { 11 + compatible = "solomon,ssd1289fb"; 12 + reg = <0x2 0x0000 0x0004>; 13 + };
+9 -6
arch/powerpc/Kconfig
··· 140 140 select OLD_SIGACTION if PPC32 141 141 select HAVE_DEBUG_STACKOVERFLOW 142 142 select HAVE_IRQ_EXIT_ON_IRQ_STACK 143 + select ARCH_USE_CMPXCHG_LOCKREF if PPC64 143 144 144 145 config GENERIC_CSUM 145 146 def_bool CPU_LITTLE_ENDIAN ··· 214 213 help 215 214 Used to allow a board to specify it wants a uImage built by default 216 215 default n 217 - 218 - config REDBOOT 219 - bool 220 216 221 217 config ARCH_HIBERNATION_POSSIBLE 222 218 bool ··· 382 384 config ARCH_ENABLE_MEMORY_HOTREMOVE 383 385 def_bool y 384 386 387 + config PPC64_SUPPORTS_MEMORY_FAILURE 388 + bool "Add support for memory hwpoison" 389 + depends on PPC_BOOK3S_64 390 + default "y" if PPC_POWERNV 391 + select ARCH_SUPPORTS_MEMORY_FAILURE 392 + 385 393 config KEXEC 386 394 bool "kexec system call" 387 395 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP)) ··· 408 404 config CRASH_DUMP 409 405 bool "Build a kdump crash kernel" 410 406 depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP) 411 - select RELOCATABLE if PPC64 || 44x 412 - select DYNAMIC_MEMSTART if FSL_BOOKE 407 + select RELOCATABLE if PPC64 || 44x || FSL_BOOKE 413 408 help 414 409 Build a kernel suitable for use as a kdump capture kernel. 415 410 The same kernel binary can be used as production kernel and dump ··· 889 886 890 887 config RELOCATABLE 891 888 bool "Build a relocatable kernel" 892 - depends on ADVANCED_OPTIONS && FLATMEM && 44x 889 + depends on ADVANCED_OPTIONS && FLATMEM && (44x || FSL_BOOKE) 893 890 select NONSTATIC_KERNEL 894 891 help 895 892 This builds a kernel image that is capable of running at the
+1
arch/powerpc/boot/.gitignore
··· 16 16 uImage 17 17 cuImage.* 18 18 dtbImage.* 19 + *.dtb 19 20 treeImage.* 20 21 zImage 21 22 zImage.initrd
+4 -3
arch/powerpc/boot/Makefile
··· 71 71 uartlite.c mpc52xx-psc.c 72 72 src-wlib-$(CONFIG_40x) += 4xx.c planetcore.c 73 73 src-wlib-$(CONFIG_44x) += 4xx.c ebony.c bamboo.c 74 - src-wlib-$(CONFIG_8xx) += mpc8xx.c planetcore.c 74 + src-wlib-$(CONFIG_8xx) += mpc8xx.c planetcore.c fsl-soc.c 75 75 src-wlib-$(CONFIG_PPC_82xx) += pq2.c fsl-soc.c planetcore.c 76 - src-wlib-$(CONFIG_EMBEDDED6xx) += mv64x60.c mv64x60_i2c.c ugecon.c 76 + src-wlib-$(CONFIG_EMBEDDED6xx) += mv64x60.c mv64x60_i2c.c ugecon.c fsl-soc.c 77 77 78 78 src-plat-y := of.c epapr.c 79 79 src-plat-$(CONFIG_40x) += fixed-head.S ep405.c cuboot-hotfoot.c \ ··· 95 95 src-plat-$(CONFIG_EMBEDDED6xx) += cuboot-pq2.c cuboot-mpc7448hpc2.c \ 96 96 cuboot-c2k.c gamecube-head.S \ 97 97 gamecube.c wii-head.S wii.c holly.c \ 98 - prpmc2800.c 98 + prpmc2800.c fixed-head.S mvme5100.c 99 99 src-plat-$(CONFIG_AMIGAONE) += cuboot-amigaone.c 100 100 src-plat-$(CONFIG_PPC_PS3) += ps3-head.S ps3-hvcall.S ps3.c 101 101 src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c epapr-wrapper.c ··· 286 286 image-$(CONFIG_PPC_C2K) += cuImage.c2k 287 287 image-$(CONFIG_GAMECUBE) += dtbImage.gamecube 288 288 image-$(CONFIG_WII) += dtbImage.wii 289 + image-$(CONFIG_MVME5100) += dtbImage.mvme5100 289 290 290 291 # Board port in arch/powerpc/platform/amigaone/Kconfig 291 292 image-$(CONFIG_AMIGAONE) += cuImage.amigaone
+82
arch/powerpc/boot/dts/fsl/elo3-dma-2.dtsi
··· 1 + /* 2 + * QorIQ Elo3 DMA device tree stub [ controller @ offset 0x102300 ] 3 + * 4 + * Copyright 2013 Freescale Semiconductor Inc. 5 + * 6 + * Redistribution and use in source and binary forms, with or without 7 + * modification, are permitted provided that the following conditions are met: 8 + * * Redistributions of source code must retain the above copyright 9 + * notice, this list of conditions and the following disclaimer. 10 + * * Redistributions in binary form must reproduce the above copyright 11 + * notice, this list of conditions and the following disclaimer in the 12 + * documentation and/or other materials provided with the distribution. 13 + * * Neither the name of Freescale Semiconductor nor the 14 + * names of its contributors may be used to endorse or promote products 15 + * derived from this software without specific prior written permission. 16 + * 17 + * 18 + * ALTERNATIVELY, this software may be distributed under the terms of the 19 + * GNU General Public License ("GPL") as published by the Free Software 20 + * Foundation, either version 2 of that License or (at your option) any 21 + * later version. 22 + * 23 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY 24 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 25 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 26 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY 27 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 28 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 30 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 32 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 + */ 34 + 35 + dma2: dma@102300 { 36 + #address-cells = <1>; 37 + #size-cells = <1>; 38 + compatible = "fsl,elo3-dma"; 39 + reg = <0x102300 0x4>, 40 + <0x102600 0x4>; 41 + ranges = <0x0 0x102100 0x500>; 42 + dma-channel@0 { 43 + compatible = "fsl,eloplus-dma-channel"; 44 + reg = <0x0 0x80>; 45 + interrupts = <464 2 0 0>; 46 + }; 47 + dma-channel@80 { 48 + compatible = "fsl,eloplus-dma-channel"; 49 + reg = <0x80 0x80>; 50 + interrupts = <465 2 0 0>; 51 + }; 52 + dma-channel@100 { 53 + compatible = "fsl,eloplus-dma-channel"; 54 + reg = <0x100 0x80>; 55 + interrupts = <466 2 0 0>; 56 + }; 57 + dma-channel@180 { 58 + compatible = "fsl,eloplus-dma-channel"; 59 + reg = <0x180 0x80>; 60 + interrupts = <467 2 0 0>; 61 + }; 62 + dma-channel@300 { 63 + compatible = "fsl,eloplus-dma-channel"; 64 + reg = <0x300 0x80>; 65 + interrupts = <468 2 0 0>; 66 + }; 67 + dma-channel@380 { 68 + compatible = "fsl,eloplus-dma-channel"; 69 + reg = <0x380 0x80>; 70 + interrupts = <469 2 0 0>; 71 + }; 72 + dma-channel@400 { 73 + compatible = "fsl,eloplus-dma-channel"; 74 + reg = <0x400 0x80>; 75 + interrupts = <470 2 0 0>; 76 + }; 77 + dma-channel@480 { 78 + compatible = "fsl,eloplus-dma-channel"; 79 + reg = <0x480 0x80>; 80 + interrupts = <471 2 0 0>; 81 + }; 82 + };
+2 -1
arch/powerpc/boot/dts/fsl/p1020si-post.dtsi
··· 36 36 #address-cells = <2>; 37 37 #size-cells = <1>; 38 38 compatible = "fsl,p1020-elbc", "fsl,elbc", "simple-bus"; 39 - interrupts = <19 2 0 0>; 39 + interrupts = <19 2 0 0>, 40 + <16 2 0 0>; 40 41 }; 41 42 42 43 /* controller at 0x9000 */
+2 -1
arch/powerpc/boot/dts/fsl/p1021si-post.dtsi
··· 36 36 #address-cells = <2>; 37 37 #size-cells = <1>; 38 38 compatible = "fsl,p1021-elbc", "fsl,elbc", "simple-bus"; 39 - interrupts = <19 2 0 0>; 39 + interrupts = <19 2 0 0>, 40 + <16 2 0 0>; 40 41 }; 41 42 42 43 /* controller at 0x9000 */
+2 -1
arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
··· 40 40 * pin muxing when the DIU is enabled. 41 41 */ 42 42 compatible = "fsl,p1022-elbc", "fsl,elbc"; 43 - interrupts = <19 2 0 0>; 43 + interrupts = <19 2 0 0>, 44 + <16 2 0 0>; 44 45 }; 45 46 46 47 /* controller at 0x9000 */
+2 -1
arch/powerpc/boot/dts/fsl/p1023si-post.dtsi
··· 36 36 #address-cells = <2>; 37 37 #size-cells = <1>; 38 38 compatible = "fsl,p1023-elbc", "fsl,elbc", "simple-bus"; 39 - interrupts = <19 2 0 0>; 39 + interrupts = <19 2 0 0>, 40 + <16 2 0 0>; 40 41 }; 41 42 42 43 /* controller at 0xa000 */
+1 -1
arch/powerpc/boot/dts/kilauea.dts
··· 406 406 407 407 MSI: ppc4xx-msi@C10000000 { 408 408 compatible = "amcc,ppc4xx-msi", "ppc4xx-msi"; 409 - reg = < 0x0 0xEF620000 0x100>; 409 + reg = <0xEF620000 0x100>; 410 410 sdr-base = <0x4B0>; 411 411 msi-data = <0x00000000>; 412 412 msi-mask = <0x44440000>;
+185
arch/powerpc/boot/dts/mvme5100.dts
··· 1 + /* 2 + * Device Tree Source for Motorola/Emerson MVME5100. 3 + * 4 + * Copyright 2013 CSC Australia Pty. Ltd. 5 + * 6 + * This file is licensed under the terms of the GNU General Public 7 + * License version 2. This program is licensed "as is" without 8 + * any warranty of any kind, whether express or implied. 9 + */ 10 + 11 + /dts-v1/; 12 + 13 + / { 14 + model = "MVME5100"; 15 + compatible = "MVME5100"; 16 + #address-cells = <1>; 17 + #size-cells = <1>; 18 + 19 + aliases { 20 + serial0 = &serial0; 21 + pci0 = &pci0; 22 + }; 23 + 24 + cpus { 25 + #address-cells = <1>; 26 + #size-cells = <0>; 27 + 28 + PowerPC,7410 { 29 + device_type = "cpu"; 30 + reg = <0x0>; 31 + /* Following required by dtc but not used */ 32 + d-cache-line-size = <32>; 33 + i-cache-line-size = <32>; 34 + i-cache-size = <32768>; 35 + d-cache-size = <32768>; 36 + timebase-frequency = <25000000>; 37 + clock-frequency = <500000000>; 38 + bus-frequency = <100000000>; 39 + }; 40 + }; 41 + 42 + memory { 43 + device_type = "memory"; 44 + reg = <0x0 0x20000000>; 45 + }; 46 + 47 + hawk@fef80000 { 48 + #address-cells = <1>; 49 + #size-cells = <1>; 50 + compatible = "hawk-bridge", "simple-bus"; 51 + ranges = <0x0 0xfef80000 0x10000>; 52 + reg = <0xfef80000 0x10000>; 53 + 54 + serial0: serial@8000 { 55 + device_type = "serial"; 56 + compatible = "ns16550"; 57 + reg = <0x8000 0x80>; 58 + reg-shift = <4>; 59 + clock-frequency = <1843200>; 60 + current-speed = <9600>; 61 + interrupts = <1 1>; // IRQ1 Level Active Low. 62 + interrupt-parent = <&mpic>; 63 + }; 64 + 65 + serial1: serial@8200 { 66 + device_type = "serial"; 67 + compatible = "ns16550"; 68 + reg = <0x8200 0x80>; 69 + reg-shift = <4>; 70 + clock-frequency = <1843200>; 71 + current-speed = <9600>; 72 + interrupts = <1 1>; // IRQ1 Level Active Low. 73 + interrupt-parent = <&mpic>; 74 + }; 75 + 76 + mpic: interrupt-controller@f3f80000 { 77 + #interrupt-cells = <2>; 78 + #address-cells = <0>; 79 + device_type = "open-pic"; 80 + compatible = "chrp,open-pic"; 81 + interrupt-controller; 82 + reg = <0xf3f80000 0x40000>; 83 + }; 84 + }; 85 + 86 + pci0: pci@feff0000 { 87 + #address-cells = <3>; 88 + #size-cells = <2>; 89 + #interrupt-cells = <1>; 90 + device_type = "pci"; 91 + compatible = "hawk-pci"; 92 + reg = <0xfec00000 0x400000>; 93 + 8259-interrupt-acknowledge = <0xfeff0030>; 94 + ranges = <0x1000000 0x0 0x0 0xfe000000 0x0 0x800000 95 + 0x2000000 0x0 0x80000000 0x80000000 0x0 0x74000000>; 96 + bus-range = <0 255>; 97 + clock-frequency = <33333333>; 98 + interrupt-parent = <&mpic>; 99 + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; 100 + interrupt-map = < 101 + 102 + /* 103 + * This definition (IDSEL 11) duplicates the 104 + * interrupts definition in the i8259 105 + * interrupt controller below. 106 + * 107 + * Do not change the interrupt sense/polarity from 108 + * 0x2 to anything else, doing so will cause endless 109 + * "spurious" i8259 interrupts to be fielded. 110 + */ 111 + // IDSEL 11 - iPMC712 PCI/ISA Bridge 112 + 0x5800 0x0 0x0 0x1 &mpic 0x0 0x2 113 + 0x5800 0x0 0x0 0x2 &mpic 0x0 0x2 114 + 0x5800 0x0 0x0 0x3 &mpic 0x0 0x2 115 + 0x5800 0x0 0x0 0x4 &mpic 0x0 0x2 116 + 117 + /* IDSEL 12 - Not Used */ 118 + 119 + /* IDSEL 13 - Universe VME Bridge */ 120 + 0x6800 0x0 0x0 0x1 &mpic 0x5 0x1 121 + 0x6800 0x0 0x0 0x2 &mpic 0x6 0x1 122 + 0x6800 0x0 0x0 0x3 &mpic 0x7 0x1 123 + 0x6800 0x0 0x0 0x4 &mpic 0x8 0x1 124 + 125 + /* IDSEL 14 - ENET 1 */ 126 + 0x7000 0x0 0x0 0x1 &mpic 0x2 0x1 127 + 128 + /* IDSEL 15 - Not Used */ 129 + 130 + /* IDSEL 16 - PMC Slot 1 */ 131 + 0x8000 0x0 0x0 0x1 &mpic 0x9 0x1 132 + 0x8000 0x0 0x0 0x2 &mpic 0xa 0x1 133 + 0x8000 0x0 0x0 0x3 &mpic 0xb 0x1 134 + 0x8000 0x0 0x0 0x4 &mpic 0xc 0x1 135 + 136 + /* IDSEL 17 - PMC Slot 2 */ 137 + 0x8800 0x0 0x0 0x1 &mpic 0xc 0x1 138 + 0x8800 0x0 0x0 0x2 &mpic 0x9 0x1 139 + 0x8800 0x0 0x0 0x3 &mpic 0xa 0x1 140 + 0x8800 0x0 0x0 0x4 &mpic 0xb 0x1 141 + 142 + /* IDSEL 18 - Not Used */ 143 + 144 + /* IDSEL 19 - ENET 2 */ 145 + 0x9800 0x0 0x0 0x1 &mpic 0xd 0x1 146 + 147 + /* IDSEL 20 - PMCSPAN (PCI-X) */ 148 + 0xa000 0x0 0x0 0x1 &mpic 0x9 0x1 149 + 0xa000 0x0 0x0 0x2 &mpic 0xa 0x1 150 + 0xa000 0x0 0x0 0x3 &mpic 0xb 0x1 151 + 0xa000 0x0 0x0 0x4 &mpic 0xc 0x1 152 + 153 + >; 154 + 155 + isa { 156 + #address-cells = <2>; 157 + #size-cells = <1>; 158 + #interrupt-cells = <2>; 159 + device_type = "isa"; 160 + compatible = "isa"; 161 + ranges = <0x00000001 0 0x01000000 0 0x00000000 0x00001000>; 162 + interrupt-parent = <&i8259>; 163 + 164 + i8259: interrupt-controller@20 { 165 + #interrupt-cells = <2>; 166 + #address-cells = <0>; 167 + interrupts = <0 2>; 168 + device_type = "interrupt-controller"; 169 + compatible = "chrp,iic"; 170 + interrupt-controller; 171 + reg = <1 0x00000020 0x00000002 172 + 1 0x000000a0 0x00000002 173 + 1 0x000004d0 0x00000002>; 174 + interrupt-parent = <&mpic>; 175 + }; 176 + 177 + }; 178 + 179 + }; 180 + 181 + chosen { 182 + linux,stdout-path = &serial0; 183 + }; 184 + 185 + };
+23
arch/powerpc/boot/dts/p1010rdb-pa.dts
··· 1 + /* 2 + * P1010 RDB Device Tree Source 3 + * 4 + * Copyright 2011 Freescale Semiconductor Inc. 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms of the GNU General Public License as published by the 8 + * Free Software Foundation; either version 2 of the License, or (at your 9 + * option) any later version. 10 + */ 11 + 12 + /include/ "fsl/p1010si-pre.dtsi" 13 + 14 + / { 15 + model = "fsl,P1010RDB"; 16 + compatible = "fsl,P1010RDB"; 17 + 18 + /include/ "p1010rdb_32b.dtsi" 19 + }; 20 + 21 + /include/ "p1010rdb.dtsi" 22 + /include/ "p1010rdb-pa.dtsi" 23 + /include/ "fsl/p1010si-post.dtsi"
+85
arch/powerpc/boot/dts/p1010rdb-pa.dtsi
··· 1 + /* 2 + * P1010 RDB Device Tree Source stub (no addresses or top-level ranges) 3 + * 4 + * Copyright 2013 Freescale Semiconductor Inc. 5 + * 6 + * Redistribution and use in source and binary forms, with or without 7 + * modification, are permitted provided that the following conditions are met: 8 + * * Redistributions of source code must retain the above copyright 9 + * notice, this list of conditions and the following disclaimer. 10 + * * Redistributions in binary form must reproduce the above copyright 11 + * notice, this list of conditions and the following disclaimer in the 12 + * documentation and/or other materials provided with the distribution. 13 + * * Neither the name of Freescale Semiconductor nor the 14 + * names of its contributors may be used to endorse or promote products 15 + * derived from this software without specific prior written permission. 16 + * 17 + * 18 + * ALTERNATIVELY, this software may be distributed under the terms of the 19 + * GNU General Public License ("GPL") as published by the Free Software 20 + * Foundation, either version 2 of that License or (at your option) any 21 + * later version. 22 + * 23 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY 24 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 25 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 26 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY 27 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 28 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 30 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 32 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 + */ 34 + 35 + &ifc_nand { 36 + partition@0 { 37 + /* This location must not be altered */ 38 + /* 1MB for u-boot Bootloader Image */ 39 + reg = <0x0 0x00100000>; 40 + label = "NAND U-Boot Image"; 41 + read-only; 42 + }; 43 + 44 + partition@100000 { 45 + /* 1MB for DTB Image */ 46 + reg = <0x00100000 0x00100000>; 47 + label = "NAND DTB Image"; 48 + }; 49 + 50 + partition@200000 { 51 + /* 4MB for Linux Kernel Image */ 52 + reg = <0x00200000 0x00400000>; 53 + label = "NAND Linux Kernel Image"; 54 + }; 55 + 56 + partition@600000 { 57 + /* 4MB for Compressed Root file System Image */ 58 + reg = <0x00600000 0x00400000>; 59 + label = "NAND Compressed RFS Image"; 60 + }; 61 + 62 + partition@a00000 { 63 + /* 15MB for JFFS2 based Root file System */ 64 + reg = <0x00a00000 0x00f00000>; 65 + label = "NAND JFFS2 Root File System"; 66 + }; 67 + 68 + partition@1900000 { 69 + /* 7MB for User Area */ 70 + reg = <0x01900000 0x00700000>; 71 + label = "NAND User area"; 72 + }; 73 + }; 74 + 75 + &phy0 { 76 + interrupts = <1 1 0 0>; 77 + }; 78 + 79 + &phy1 { 80 + interrupts = <2 1 0 0>; 81 + }; 82 + 83 + &phy2 { 84 + interrupts = <4 1 0 0>; 85 + };
+35
arch/powerpc/boot/dts/p1010rdb-pb.dts
··· 1 + /* 2 + * P1010 RDB Device Tree Source 3 + * 4 + * Copyright 2011 Freescale Semiconductor Inc. 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms of the GNU General Public License as published by the 8 + * Free Software Foundation; either version 2 of the License, or (at your 9 + * option) any later version. 10 + */ 11 + 12 + /include/ "fsl/p1010si-pre.dtsi" 13 + 14 + / { 15 + model = "fsl,P1010RDB-PB"; 16 + compatible = "fsl,P1010RDB-PB"; 17 + 18 + /include/ "p1010rdb_32b.dtsi" 19 + }; 20 + 21 + /include/ "p1010rdb.dtsi" 22 + 23 + &phy0 { 24 + interrupts = <0 1 0 0>; 25 + }; 26 + 27 + &phy1 { 28 + interrupts = <2 1 0 0>; 29 + }; 30 + 31 + &phy2 { 32 + interrupts = <1 1 0 0>; 33 + }; 34 + 35 + /include/ "fsl/p1010si-post.dtsi"
+58
arch/powerpc/boot/dts/p1010rdb-pb_36b.dts
··· 1 + /* 2 + * P1010 RDB Device Tree Source (36-bit address map) 3 + * 4 + * Copyright 2011 Freescale Semiconductor Inc. 5 + * 6 + * Redistribution and use in source and binary forms, with or without 7 + * modification, are permitted provided that the following conditions are met: 8 + * * Redistributions of source code must retain the above copyright 9 + * notice, this list of conditions and the following disclaimer. 10 + * * Redistributions in binary form must reproduce the above copyright 11 + * notice, this list of conditions and the following disclaimer in the 12 + * documentation and/or other materials provided with the distribution. 13 + * * Neither the name of Freescale Semiconductor nor the 14 + * names of its contributors may be used to endorse or promote products 15 + * derived from this software without specific prior written permission. 16 + * 17 + * 18 + * ALTERNATIVELY, this software may be distributed under the terms of the 19 + * GNU General Public License ("GPL") as published by the Free Software 20 + * Foundation, either version 2 of that License or (at your option) any 21 + * later version. 22 + * 23 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY 24 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 25 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 26 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY 27 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 28 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 30 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 32 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 + */ 34 + 35 + /include/ "fsl/p1010si-pre.dtsi" 36 + 37 + / { 38 + model = "fsl,P1010RDB-PB"; 39 + compatible = "fsl,P1010RDB-PB"; 40 + 41 + /include/ "p1010rdb_36b.dtsi" 42 + }; 43 + 44 + /include/ "p1010rdb.dtsi" 45 + 46 + &phy0 { 47 + interrupts = <0 1 0 0>; 48 + }; 49 + 50 + &phy1 { 51 + interrupts = <2 1 0 0>; 52 + }; 53 + 54 + &phy2 { 55 + interrupts = <1 1 0 0>; 56 + }; 57 + 58 + /include/ "fsl/p1010si-post.dtsi"
-66
arch/powerpc/boot/dts/p1010rdb.dts
··· 1 - /* 2 - * P1010 RDB Device Tree Source 3 - * 4 - * Copyright 2011 Freescale Semiconductor Inc. 5 - * 6 - * This program is free software; you can redistribute it and/or modify it 7 - * under the terms of the GNU General Public License as published by the 8 - * Free Software Foundation; either version 2 of the License, or (at your 9 - * option) any later version. 10 - */ 11 - 12 - /include/ "fsl/p1010si-pre.dtsi" 13 - 14 - / { 15 - model = "fsl,P1010RDB"; 16 - compatible = "fsl,P1010RDB"; 17 - 18 - memory { 19 - device_type = "memory"; 20 - }; 21 - 22 - board_ifc: ifc: ifc@ffe1e000 { 23 - /* NOR, NAND Flashes and CPLD on board */ 24 - ranges = <0x0 0x0 0x0 0xee000000 0x02000000 25 - 0x1 0x0 0x0 0xff800000 0x00010000 26 - 0x3 0x0 0x0 0xffb00000 0x00000020>; 27 - reg = <0x0 0xffe1e000 0 0x2000>; 28 - }; 29 - 30 - board_soc: soc: soc@ffe00000 { 31 - ranges = <0x0 0x0 0xffe00000 0x100000>; 32 - }; 33 - 34 - pci0: pcie@ffe09000 { 35 - reg = <0 0xffe09000 0 0x1000>; 36 - ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 37 - 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>; 38 - pcie@0 { 39 - ranges = <0x2000000 0x0 0xa0000000 40 - 0x2000000 0x0 0xa0000000 41 - 0x0 0x20000000 42 - 43 - 0x1000000 0x0 0x0 44 - 0x1000000 0x0 0x0 45 - 0x0 0x100000>; 46 - }; 47 - }; 48 - 49 - pci1: pcie@ffe0a000 { 50 - reg = <0 0xffe0a000 0 0x1000>; 51 - ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000 52 - 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>; 53 - pcie@0 { 54 - ranges = <0x2000000 0x0 0x80000000 55 - 0x2000000 0x0 0x80000000 56 - 0x0 0x20000000 57 - 58 - 0x1000000 0x0 0x0 59 - 0x1000000 0x0 0x0 60 - 0x0 0x100000>; 61 - }; 62 - }; 63 - }; 64 - 65 - /include/ "p1010rdb.dtsi" 66 - /include/ "fsl/p1010si-post.dtsi"
+1 -42
arch/powerpc/boot/dts/p1010rdb.dtsi
··· 69 69 }; 70 70 }; 71 71 72 - nand@1,0 { 72 + ifc_nand: nand@1,0 { 73 73 #address-cells = <1>; 74 74 #size-cells = <1>; 75 75 compatible = "fsl,ifc-nand"; 76 76 reg = <0x1 0x0 0x10000>; 77 - 78 - partition@0 { 79 - /* This location must not be altered */ 80 - /* 1MB for u-boot Bootloader Image */ 81 - reg = <0x0 0x00100000>; 82 - label = "NAND U-Boot Image"; 83 - read-only; 84 - }; 85 - 86 - partition@100000 { 87 - /* 1MB for DTB Image */ 88 - reg = <0x00100000 0x00100000>; 89 - label = "NAND DTB Image"; 90 - }; 91 - 92 - partition@200000 { 93 - /* 4MB for Linux Kernel Image */ 94 - reg = <0x00200000 0x00400000>; 95 - label = "NAND Linux Kernel Image"; 96 - }; 97 - 98 - partition@600000 { 99 - /* 4MB for Compressed Root file System Image */ 100 - reg = <0x00600000 0x00400000>; 101 - label = "NAND Compressed RFS Image"; 102 - }; 103 - 104 - partition@a00000 { 105 - /* 15MB for JFFS2 based Root file System */ 106 - reg = <0x00a00000 0x00f00000>; 107 - label = "NAND JFFS2 Root File System"; 108 - }; 109 - 110 - partition@1900000 { 111 - /* 7MB for User Area */ 112 - reg = <0x01900000 0x00700000>; 113 - label = "NAND User area"; 114 - }; 115 77 }; 116 78 117 79 cpld@3,0 { ··· 155 193 156 194 mdio@24000 { 157 195 phy0: ethernet-phy@0 { 158 - interrupts = <3 1 0 0>; 159 196 reg = <0x1>; 160 197 }; 161 198 162 199 phy1: ethernet-phy@1 { 163 - interrupts = <2 1 0 0>; 164 200 reg = <0x0>; 165 201 }; 166 202 167 203 phy2: ethernet-phy@2 { 168 - interrupts = <2 1 0 0>; 169 204 reg = <0x2>; 170 205 }; 171 206
+79
arch/powerpc/boot/dts/p1010rdb_32b.dtsi
··· 1 + /* 2 + * P1010 RDB Device Tree Source stub (no addresses or top-level ranges) 3 + * 4 + * Copyright 2013 Freescale Semiconductor Inc. 5 + * 6 + * Redistribution and use in source and binary forms, with or without 7 + * modification, are permitted provided that the following conditions are met: 8 + * * Redistributions of source code must retain the above copyright 9 + * notice, this list of conditions and the following disclaimer. 10 + * * Redistributions in binary form must reproduce the above copyright 11 + * notice, this list of conditions and the following disclaimer in the 12 + * documentation and/or other materials provided with the distribution. 13 + * * Neither the name of Freescale Semiconductor nor the 14 + * names of its contributors may be used to endorse or promote products 15 + * derived from this software without specific prior written permission. 16 + * 17 + * 18 + * ALTERNATIVELY, this software may be distributed under the terms of the 19 + * GNU General Public License ("GPL") as published by the Free Software 20 + * Foundation, either version 2 of that License or (at your option) any 21 + * later version. 22 + * 23 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY 24 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 25 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 26 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY 27 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 28 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 30 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 32 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 + */ 34 + 35 + memory { 36 + device_type = "memory"; 37 + }; 38 + 39 + board_ifc: ifc: ifc@ffe1e000 { 40 + /* NOR, NAND Flashes and CPLD on board */ 41 + ranges = <0x0 0x0 0x0 0xee000000 0x02000000 42 + 0x1 0x0 0x0 0xff800000 0x00010000 43 + 0x3 0x0 0x0 0xffb00000 0x00000020>; 44 + reg = <0x0 0xffe1e000 0 0x2000>; 45 + }; 46 + 47 + board_soc: soc: soc@ffe00000 { 48 + ranges = <0x0 0x0 0xffe00000 0x100000>; 49 + }; 50 + 51 + pci0: pcie@ffe09000 { 52 + reg = <0 0xffe09000 0 0x1000>; 53 + ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 54 + 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>; 55 + pcie@0 { 56 + ranges = <0x2000000 0x0 0xa0000000 57 + 0x2000000 0x0 0xa0000000 58 + 0x0 0x20000000 59 + 60 + 0x1000000 0x0 0x0 61 + 0x1000000 0x0 0x0 62 + 0x0 0x100000>; 63 + }; 64 + }; 65 + 66 + pci1: pcie@ffe0a000 { 67 + reg = <0 0xffe0a000 0 0x1000>; 68 + ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000 69 + 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>; 70 + pcie@0 { 71 + ranges = <0x2000000 0x0 0x80000000 72 + 0x2000000 0x0 0x80000000 73 + 0x0 0x20000000 74 + 75 + 0x1000000 0x0 0x0 76 + 0x1000000 0x0 0x0 77 + 0x0 0x100000>; 78 + }; 79 + };
+2 -45
arch/powerpc/boot/dts/p1010rdb_36b.dts arch/powerpc/boot/dts/p1010rdb-pa_36b.dts
··· 38 38 model = "fsl,P1010RDB"; 39 39 compatible = "fsl,P1010RDB"; 40 40 41 - memory { 42 - device_type = "memory"; 43 - }; 44 - 45 - board_ifc: ifc: ifc@fffe1e000 { 46 - /* NOR, NAND Flashes and CPLD on board */ 47 - ranges = <0x0 0x0 0xf 0xee000000 0x02000000 48 - 0x1 0x0 0xf 0xff800000 0x00010000 49 - 0x3 0x0 0xf 0xffb00000 0x00000020>; 50 - reg = <0xf 0xffe1e000 0 0x2000>; 51 - }; 52 - 53 - board_soc: soc: soc@fffe00000 { 54 - ranges = <0x0 0xf 0xffe00000 0x100000>; 55 - }; 56 - 57 - pci0: pcie@fffe09000 { 58 - reg = <0xf 0xffe09000 0 0x1000>; 59 - ranges = <0x2000000 0x0 0xc0000000 0xc 0x20000000 0x0 0x20000000 60 - 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>; 61 - pcie@0 { 62 - ranges = <0x2000000 0x0 0xc0000000 63 - 0x2000000 0x0 0xc0000000 64 - 0x0 0x20000000 65 - 66 - 0x1000000 0x0 0x0 67 - 0x1000000 0x0 0x0 68 - 0x0 0x100000>; 69 - }; 70 - }; 71 - 72 - pci1: pcie@fffe0a000 { 73 - reg = <0xf 0xffe0a000 0 0x1000>; 74 - ranges = <0x2000000 0x0 0xc0000000 0xc 0x20000000 0x0 0x20000000 75 - 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>; 76 - pcie@0 { 77 - ranges = <0x2000000 0x0 0xc0000000 78 - 0x2000000 0x0 0xc0000000 79 - 0x0 0x20000000 80 - 81 - 0x1000000 0x0 0x0 82 - 0x1000000 0x0 0x0 83 - 0x0 0x100000>; 84 - }; 85 - }; 41 + /include/ "p1010rdb_36b.dtsi" 86 42 }; 87 43 88 44 /include/ "p1010rdb.dtsi" 45 + /include/ "p1010rdb-pa.dtsi" 89 46 /include/ "fsl/p1010si-post.dtsi"
+79
arch/powerpc/boot/dts/p1010rdb_36b.dtsi
··· 1 + /* 2 + * P1010 RDB Device Tree Source stub (no addresses or top-level ranges) 3 + * 4 + * Copyright 2013 Freescale Semiconductor Inc. 5 + * 6 + * Redistribution and use in source and binary forms, with or without 7 + * modification, are permitted provided that the following conditions are met: 8 + * * Redistributions of source code must retain the above copyright 9 + * notice, this list of conditions and the following disclaimer. 10 + * * Redistributions in binary form must reproduce the above copyright 11 + * notice, this list of conditions and the following disclaimer in the 12 + * documentation and/or other materials provided with the distribution. 13 + * * Neither the name of Freescale Semiconductor nor the 14 + * names of its contributors may be used to endorse or promote products 15 + * derived from this software without specific prior written permission. 16 + * 17 + * 18 + * ALTERNATIVELY, this software may be distributed under the terms of the 19 + * GNU General Public License ("GPL") as published by the Free Software 20 + * Foundation, either version 2 of that License or (at your option) any 21 + * later version. 22 + * 23 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY 24 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 25 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 26 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY 27 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 28 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 30 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 32 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 + */ 34 + 35 + memory { 36 + device_type = "memory"; 37 + }; 38 + 39 + board_ifc: ifc: ifc@fffe1e000 { 40 + /* NOR, NAND Flashes and CPLD on board */ 41 + ranges = <0x0 0x0 0xf 0xee000000 0x02000000 42 + 0x1 0x0 0xf 0xff800000 0x00010000 43 + 0x3 0x0 0xf 0xffb00000 0x00000020>; 44 + reg = <0xf 0xffe1e000 0 0x2000>; 45 + }; 46 + 47 + board_soc: soc: soc@fffe00000 { 48 + ranges = <0x0 0xf 0xffe00000 0x100000>; 49 + }; 50 + 51 + pci0: pcie@fffe09000 { 52 + reg = <0xf 0xffe09000 0 0x1000>; 53 + ranges = <0x2000000 0x0 0xc0000000 0xc 0x20000000 0x0 0x20000000 54 + 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>; 55 + pcie@0 { 56 + ranges = <0x2000000 0x0 0xc0000000 57 + 0x2000000 0x0 0xc0000000 58 + 0x0 0x20000000 59 + 60 + 0x1000000 0x0 0x0 61 + 0x1000000 0x0 0x0 62 + 0x0 0x100000>; 63 + }; 64 + }; 65 + 66 + pci1: pcie@fffe0a000 { 67 + reg = <0xf 0xffe0a000 0 0x1000>; 68 + ranges = <0x2000000 0x0 0xc0000000 0xc 0x20000000 0x0 0x20000000 69 + 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>; 70 + pcie@0 { 71 + ranges = <0x2000000 0x0 0xc0000000 72 + 0x2000000 0x0 0xc0000000 73 + 0x0 0x20000000 74 + 75 + 0x1000000 0x0 0x0 76 + 0x1000000 0x0 0x0 77 + 0x0 0x100000>; 78 + }; 79 + };
+2 -1
arch/powerpc/boot/dts/p1022ds.dtsi
··· 146 146 */ 147 147 }; 148 148 rtc@68 { 149 - compatible = "dallas,ds1339"; 149 + compatible = "dallas,ds3232"; 150 150 reg = <0x68>; 151 + interrupts = <0x1 0x1 0 0>; 151 152 }; 152 153 adt7461@4c { 153 154 compatible = "adi,adt7461";
+95
arch/powerpc/boot/dts/p1025twr.dts
··· 1 + /* 2 + * P1025 TWR Device Tree Source (32-bit address map) 3 + * 4 + * Copyright 2013 Freescale Semiconductor Inc. 5 + * 6 + * Redistribution and use in source and binary forms, with or without 7 + * modification, are permitted provided that the following conditions are met: 8 + * * Redistributions of source code must retain the above copyright 9 + * notice, this list of conditions and the following disclaimer. 10 + * * Redistributions in binary form must reproduce the above copyright 11 + * notice, this list of conditions and the following disclaimer in the 12 + * documentation and/or other materials provided with the distribution. 13 + * * Neither the name of Freescale Semiconductor nor the 14 + * names of its contributors may be used to endorse or promote products 15 + * derived from this software without specific prior written permission. 16 + * 17 + * 18 + * ALTERNATIVELY, this software may be distributed under the terms of the 19 + * GNU General Public License ("GPL") as published by the Free Software 20 + * Foundation, either version 2 of that License or (at your option) any 21 + * later version. 22 + * 23 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY 24 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 25 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 26 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY 27 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 28 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 30 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 32 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 + */ 34 + 35 + /include/ "fsl/p1021si-pre.dtsi" 36 + / { 37 + model = "fsl,P1025"; 38 + compatible = "fsl,TWR-P1025"; 39 + 40 + memory { 41 + device_type = "memory"; 42 + }; 43 + 44 + lbc: localbus@ffe05000 { 45 + reg = <0 0xffe05000 0 0x1000>; 46 + 47 + /* NOR Flash and SSD1289 */ 48 + ranges = <0x0 0x0 0x0 0xec000000 0x04000000 49 + 0x2 0x0 0x0 0xe0000000 0x00020000>; 50 + }; 51 + 52 + soc: soc@ffe00000 { 53 + ranges = <0x0 0x0 0xffe00000 0x100000>; 54 + }; 55 + 56 + pci0: pcie@ffe09000 { 57 + ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 58 + 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>; 59 + reg = <0 0xffe09000 0 0x1000>; 60 + pcie@0 { 61 + ranges = <0x2000000 0x0 0xa0000000 62 + 0x2000000 0x0 0xa0000000 63 + 0x0 0x20000000 64 + 65 + 0x1000000 0x0 0x0 66 + 0x1000000 0x0 0x0 67 + 0x0 0x100000>; 68 + }; 69 + }; 70 + 71 + pci1: pcie@ffe0a000 { 72 + reg = <0 0xffe0a000 0 0x1000>; 73 + ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000 74 + 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>; 75 + pcie@0 { 76 + ranges = <0x2000000 0x0 0x80000000 77 + 0x2000000 0x0 0x80000000 78 + 0x0 0x20000000 79 + 80 + 0x1000000 0x0 0x0 81 + 0x1000000 0x0 0x0 82 + 0x0 0x100000>; 83 + }; 84 + }; 85 + 86 + qe: qe@ffe80000 { 87 + ranges = <0x0 0x0 0xffe80000 0x40000>; 88 + reg = <0 0xffe80000 0 0x480>; 89 + brg-frequency = <0>; 90 + bus-frequency = <0>; 91 + }; 92 + }; 93 + 94 + /include/ "p1025twr.dtsi" 95 + /include/ "fsl/p1021si-post.dtsi"
+280
arch/powerpc/boot/dts/p1025twr.dtsi
··· 1 + /* 2 + * P1025 TWR Device Tree Source stub (no addresses or top-level ranges) 3 + * 4 + * Copyright 2013 Freescale Semiconductor Inc. 5 + * 6 + * Redistribution and use in source and binary forms, with or without 7 + * modification, are permitted provided that the following conditions are met: 8 + * * Redistributions of source code must retain the above copyright 9 + * notice, this list of conditions and the following disclaimer. 10 + * * Redistributions in binary form must reproduce the above copyright 11 + * notice, this list of conditions and the following disclaimer in the 12 + * documentation and/or other materials provided with the distribution. 13 + * * Neither the name of Freescale Semiconductor nor the 14 + * names of its contributors may be used to endorse or promote products 15 + * derived from this software without specific prior written permission. 16 + * 17 + * 18 + * ALTERNATIVELY, this software may be distributed under the terms of the 19 + * GNU General Public License ("GPL") as published by the Free Software 20 + * Foundation, either version 2 of that License or (at your option) any 21 + * later version. 22 + * 23 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY 24 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 25 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 26 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY 27 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 28 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 30 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 32 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 + */ 34 + 35 + /{ 36 + aliases { 37 + ethernet3 = &enet3; 38 + ethernet4 = &enet4; 39 + }; 40 + }; 41 + 42 + &lbc { 43 + nor@0,0 { 44 + #address-cells = <1>; 45 + #size-cells = <1>; 46 + compatible = "cfi-flash"; 47 + reg = <0x0 0x0 0x4000000>; 48 + bank-width = <2>; 49 + device-width = <1>; 50 + 51 + partition@0 { 52 + /* This location must not be altered */ 53 + /* 256KB for Vitesse 7385 Switch firmware */ 54 + reg = <0x0 0x00040000>; 55 + label = "NOR Vitesse-7385 Firmware"; 56 + read-only; 57 + }; 58 + 59 + partition@40000 { 60 + /* 256KB for DTB Image */ 61 + reg = <0x00040000 0x00040000>; 62 + label = "NOR DTB Image"; 63 + }; 64 + 65 + partition@80000 { 66 + /* 5.5 MB for Linux Kernel Image */ 67 + reg = <0x00080000 0x00580000>; 68 + label = "NOR Linux Kernel Image"; 69 + }; 70 + 71 + partition@400000 { 72 + /* 56.75MB for Root file System */ 73 + reg = <0x00600000 0x038c0000>; 74 + label = "NOR Root File System"; 75 + }; 76 + 77 + partition@ec0000 { 78 + /* This location must not be altered */ 79 + /* 256KB for QE ucode firmware*/ 80 + reg = <0x03ec0000 0x00040000>; 81 + label = "NOR QE microcode firmware"; 82 + read-only; 83 + }; 84 + 85 + partition@f00000 { 86 + /* This location must not be altered */ 87 + /* 512KB for u-boot Bootloader Image */ 88 + /* 512KB for u-boot Environment Variables */ 89 + reg = <0x03f00000 0x00100000>; 90 + label = "NOR U-Boot Image"; 91 + read-only; 92 + }; 93 + }; 94 + 95 + /* CS2 for Display */ 96 + display@2,0 { 97 + compatible = "solomon,ssd1289fb"; 98 + reg = <0x2 0x0000 0x0004>; 99 + }; 100 + 101 + }; 102 + 103 + &soc { 104 + usb@22000 { 105 + phy_type = "ulpi"; 106 + }; 107 + 108 + mdio@24000 { 109 + phy0: ethernet-phy@2 { 110 + interrupt-parent = <&mpic>; 111 + interrupts = <1 1 0 0>; 112 + reg = <0x2>; 113 + }; 114 + 115 + phy1: ethernet-phy@1 { 116 + interrupt-parent = <&mpic>; 117 + interrupts = <2 1 0 0>; 118 + reg = <0x1>; 119 + }; 120 + 121 + tbi0: tbi-phy@11 { 122 + reg = <0x11>; 123 + device_type = "tbi-phy"; 124 + }; 125 + }; 126 + 127 + mdio@25000 { 128 + tbi1: tbi-phy@11 { 129 + reg = <0x11>; 130 + device_type = "tbi-phy"; 131 + }; 132 + }; 133 + 134 + mdio@26000 { 135 + tbi2: tbi-phy@11 { 136 + reg = <0x11>; 137 + device_type = "tbi-phy"; 138 + }; 139 + }; 140 + 141 + enet0: ethernet@b0000 { 142 + phy-handle = <&phy0>; 143 + phy-connection-type = "rgmii-id"; 144 + 145 + }; 146 + 147 + enet1: ethernet@b1000 { 148 + status = "disabled"; 149 + }; 150 + 151 + enet2: ethernet@b2000 { 152 + phy-handle = <&phy1>; 153 + phy-connection-type = "rgmii-id"; 154 + }; 155 + 156 + par_io@e0100 { 157 + #address-cells = <1>; 158 + #size-cells = <1>; 159 + reg = <0xe0100 0x60>; 160 + ranges = <0x0 0xe0100 0x60>; 161 + device_type = "par_io"; 162 + num-ports = <3>; 163 + pio1: ucc_pin@01 { 164 + pio-map = < 165 + /* port pin dir open_drain assignment has_irq */ 166 + 0x1 0x13 0x1 0x0 0x1 0x0 /* QE_MUX_MDC */ 167 + 0x1 0x14 0x3 0x0 0x1 0x0 /* QE_MUX_MDIO */ 168 + 0x0 0x17 0x2 0x0 0x2 0x0 /* CLK12 */ 169 + 0x0 0x18 0x2 0x0 0x1 0x0 /* CLK9 */ 170 + 0x0 0x7 0x1 0x0 0x2 0x0 /* ENET1_TXD0_SER1_TXD0 */ 171 + 0x0 0x9 0x1 0x0 0x2 0x0 /* ENET1_TXD1_SER1_TXD1 */ 172 + 0x0 0xb 0x1 0x0 0x2 0x0 /* ENET1_TXD2_SER1_TXD2 */ 173 + 0x0 0xc 0x1 0x0 0x2 0x0 /* ENET1_TXD3_SER1_TXD3 */ 174 + 0x0 0x6 0x2 0x0 0x2 0x0 /* ENET1_RXD0_SER1_RXD0 */ 175 + 0x0 0xa 0x2 0x0 0x2 0x0 /* ENET1_RXD1_SER1_RXD1 */ 176 + 0x0 0xe 0x2 0x0 0x2 0x0 /* ENET1_RXD2_SER1_RXD2 */ 177 + 0x0 0xf 0x2 0x0 0x2 0x0 /* ENET1_RXD3_SER1_RXD3 */ 178 + 0x0 0x5 0x1 0x0 0x2 0x0 /* ENET1_TX_EN_SER1_RTS_B */ 179 + 0x0 0xd 0x1 0x0 0x2 0x0 /* ENET1_TX_ER */ 180 + 0x0 0x4 0x2 0x0 0x2 0x0 /* ENET1_RX_DV_SER1_CTS_B */ 181 + 0x0 0x8 0x2 0x0 0x2 0x0 /* ENET1_RX_ER_SER1_CD_B */ 182 + 0x0 0x11 0x2 0x0 0x2 0x0 /* ENET1_CRS */ 183 + 0x0 0x10 0x2 0x0 0x2 0x0>; /* ENET1_COL */ 184 + }; 185 + 186 + pio2: ucc_pin@02 { 187 + pio-map = < 188 + /* port pin dir open_drain assignment has_irq */ 189 + 0x1 0x13 0x1 0x0 0x1 0x0 /* QE_MUX_MDC */ 190 + 0x1 0x14 0x3 0x0 0x1 0x0 /* QE_MUX_MDIO */ 191 + 0x1 0xb 0x2 0x0 0x1 0x0 /* CLK13 */ 192 + 0x1 0x7 0x1 0x0 0x2 0x0 /* ENET5_TXD0_SER5_TXD0 */ 193 + 0x1 0xa 0x1 0x0 0x2 0x0 /* ENET5_TXD1_SER5_TXD1 */ 194 + 0x1 0x6 0x2 0x0 0x2 0x0 /* ENET5_RXD0_SER5_RXD0 */ 195 + 0x1 0x9 0x2 0x0 0x2 0x0 /* ENET5_RXD1_SER5_RXD1 */ 196 + 0x1 0x5 0x1 0x0 0x2 0x0 /* ENET5_TX_EN_SER5_RTS_B */ 197 + 0x1 0x4 0x2 0x0 0x2 0x0 /* ENET5_RX_DV_SER5_CTS_B */ 198 + 0x1 0x8 0x2 0x0 0x2 0x0>; /* ENET5_RX_ER_SER5_CD_B */ 199 + }; 200 + 201 + pio3: ucc_pin@03 { 202 + pio-map = < 203 + /* port pin dir open_drain assignment has_irq */ 204 + 0x0 0x16 0x2 0x0 0x2 0x0 /* SER7_CD_B*/ 205 + 0x0 0x12 0x2 0x0 0x2 0x0 /* SER7_CTS_B*/ 206 + 0x0 0x13 0x1 0x0 0x2 0x0 /* SER7_RTS_B*/ 207 + 0x0 0x14 0x2 0x0 0x2 0x0 /* SER7_RXD0*/ 208 + 0x0 0x15 0x1 0x0 0x2 0x0>; /* SER7_TXD0*/ 209 + }; 210 + 211 + pio4: ucc_pin@04 { 212 + pio-map = < 213 + /* port pin dir open_drain assignment has_irq */ 214 + 0x1 0x0 0x2 0x0 0x2 0x0 /* SER3_CD_B*/ 215 + 0x0 0x1c 0x2 0x0 0x2 0x0 /* SER3_CTS_B*/ 216 + 0x0 0x1d 0x1 0x0 0x2 0x0 /* SER3_RTS_B*/ 217 + 0x0 0x1e 0x2 0x0 0x2 0x0 /* SER3_RXD0*/ 218 + 0x0 0x1f 0x1 0x0 0x2 0x0>; /* SER3_TXD0*/ 219 + }; 220 + }; 221 + }; 222 + 223 + &qe { 224 + enet3: ucc@2000 { 225 + device_type = "network"; 226 + compatible = "ucc_geth"; 227 + rx-clock-name = "clk12"; 228 + tx-clock-name = "clk9"; 229 + pio-handle = <&pio1>; 230 + phy-handle = <&qe_phy0>; 231 + phy-connection-type = "mii"; 232 + }; 233 + 234 + mdio@2120 { 235 + qe_phy0: ethernet-phy@18 { 236 + interrupt-parent = <&mpic>; 237 + interrupts = <4 1 0 0>; 238 + reg = <0x18>; 239 + device_type = "ethernet-phy"; 240 + }; 241 + qe_phy1: ethernet-phy@19 { 242 + interrupt-parent = <&mpic>; 243 + interrupts = <5 1 0 0>; 244 + reg = <0x19>; 245 + device_type = "ethernet-phy"; 246 + }; 247 + tbi-phy@11 { 248 + reg = <0x11>; 249 + device_type = "tbi-phy"; 250 + }; 251 + }; 252 + 253 + enet4: ucc@2400 { 254 + device_type = "network"; 255 + compatible = "ucc_geth"; 256 + rx-clock-name = "none"; 257 + tx-clock-name = "clk13"; 258 + pio-handle = <&pio2>; 259 + phy-handle = <&qe_phy1>; 260 + phy-connection-type = "rmii"; 261 + }; 262 + 263 + serial2: ucc@2600 { 264 + device_type = "serial"; 265 + compatible = "ucc_uart"; 266 + port-number = <0>; 267 + rx-clock-name = "brg6"; 268 + tx-clock-name = "brg6"; 269 + pio-handle = <&pio3>; 270 + }; 271 + 272 + serial3: ucc@2200 { 273 + device_type = "serial"; 274 + compatible = "ucc_uart"; 275 + port-number = <1>; 276 + rx-clock-name = "brg2"; 277 + tx-clock-name = "brg2"; 278 + pio-handle = <&pio4>; 279 + }; 280 + };
+2
arch/powerpc/boot/dts/virtex440-ml507.dts
··· 257 257 #size-cells = <1>; 258 258 compatible = "xlnx,compound"; 259 259 ethernet@81c00000 { 260 + #address-cells = <1>; 261 + #size-cells = <0>; 260 262 compatible = "xlnx,xps-ll-temac-1.01.b"; 261 263 device_type = "network"; 262 264 interrupt-parent = <&xps_intc_0>;
+27
arch/powerpc/boot/mvme5100.c
··· 1 + /* 2 + * Motorola/Emerson MVME5100 with PPCBug firmware. 3 + * 4 + * Author: Stephen Chivers <schivers@csc.com> 5 + * 6 + * Copyright 2013 CSC Australia Pty. Ltd. 7 + * 8 + * This program is free software; you can redistribute it and/or 9 + * modify it under the terms of the GNU General Public License 10 + * version 2 as published by the Free Software Foundation. 11 + * 12 + */ 13 + #include "types.h" 14 + #include "ops.h" 15 + #include "io.h" 16 + 17 + BSS_STACK(4096); 18 + 19 + void platform_init(unsigned long r3, unsigned long r4, unsigned long r5) 20 + { 21 + u32 heapsize; 22 + 23 + heapsize = 0x8000000 - (u32)_end; /* 128M */ 24 + simple_alloc_init(_end, heapsize, 32, 64); 25 + fdt_init(_dtb_start); 26 + serial_console_init(); 27 + }
+4
arch/powerpc/boot/wrapper
··· 265 265 link_address='0x20000000' 266 266 pie=-pie 267 267 ;; 268 + mvme5100) 269 + platformo="$object/fixed-head.o $object/mvme5100.o" 270 + binary=y 271 + ;; 268 272 esac 269 273 270 274 vmz="$tmpdir/`basename \"$kernel\"`.$ext"
-188
arch/powerpc/configs/85xx/p1023_defconfig
··· 1 - CONFIG_PPC_85xx=y 2 - CONFIG_SMP=y 3 - CONFIG_NR_CPUS=2 4 - CONFIG_SYSVIPC=y 5 - CONFIG_POSIX_MQUEUE=y 6 - CONFIG_BSD_PROCESS_ACCT=y 7 - CONFIG_AUDIT=y 8 - CONFIG_NO_HZ=y 9 - CONFIG_HIGH_RES_TIMERS=y 10 - CONFIG_RCU_FANOUT=32 11 - CONFIG_IKCONFIG=y 12 - CONFIG_IKCONFIG_PROC=y 13 - CONFIG_LOG_BUF_SHIFT=14 14 - CONFIG_BLK_DEV_INITRD=y 15 - CONFIG_KALLSYMS_ALL=y 16 - CONFIG_EMBEDDED=y 17 - CONFIG_MODULES=y 18 - CONFIG_MODULE_UNLOAD=y 19 - CONFIG_MODULE_FORCE_UNLOAD=y 20 - CONFIG_MODVERSIONS=y 21 - # CONFIG_BLK_DEV_BSG is not set 22 - CONFIG_PARTITION_ADVANCED=y 23 - CONFIG_MAC_PARTITION=y 24 - CONFIG_PHYSICAL_START=0x00000000 25 - CONFIG_P1023_RDB=y 26 - CONFIG_P1023_RDS=y 27 - CONFIG_QUICC_ENGINE=y 28 - CONFIG_QE_GPIO=y 29 - CONFIG_CPM2=y 30 - CONFIG_HIGHMEM=y 31 - # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 32 - CONFIG_BINFMT_MISC=m 33 - CONFIG_MATH_EMULATION=y 34 - CONFIG_SWIOTLB=y 35 - CONFIG_PCI=y 36 - CONFIG_PCIEPORTBUS=y 37 - # CONFIG_PCIEAER is not set 38 - # CONFIG_PCIEASPM is not set 39 - CONFIG_PCI_MSI=y 40 - CONFIG_NET=y 41 - CONFIG_PACKET=y 42 - CONFIG_UNIX=y 43 - CONFIG_XFRM_USER=y 44 - CONFIG_NET_KEY=y 45 - CONFIG_INET=y 46 - CONFIG_IP_MULTICAST=y 47 - CONFIG_IP_ADVANCED_ROUTER=y 48 - CONFIG_IP_MULTIPLE_TABLES=y 49 - CONFIG_IP_ROUTE_MULTIPATH=y 50 - CONFIG_IP_ROUTE_VERBOSE=y 51 - CONFIG_IP_PNP=y 52 - CONFIG_IP_PNP_DHCP=y 53 - CONFIG_IP_PNP_BOOTP=y 54 - CONFIG_IP_PNP_RARP=y 55 - CONFIG_NET_IPIP=y 56 - CONFIG_IP_MROUTE=y 57 - CONFIG_IP_PIMSM_V1=y 58 - CONFIG_IP_PIMSM_V2=y 59 - CONFIG_ARPD=y 60 - CONFIG_INET_ESP=y 61 - # CONFIG_INET_XFRM_MODE_BEET is not set 62 - # CONFIG_INET_LRO is not set 63 - CONFIG_IPV6=y 64 - CONFIG_IP_SCTP=m 65 - CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 66 - CONFIG_DEVTMPFS=y 67 - CONFIG_DEVTMPFS_MOUNT=y 68 - CONFIG_MTD=y 69 - CONFIG_MTD_CMDLINE_PARTS=y 70 - CONFIG_MTD_CHAR=y 71 - CONFIG_MTD_BLOCK=y 72 - CONFIG_MTD_CFI=y 73 - CONFIG_MTD_CFI_AMDSTD=y 74 - CONFIG_MTD_PHYSMAP_OF=y 75 - CONFIG_MTD_NAND=y 76 - CONFIG_MTD_NAND_FSL_ELBC=y 77 - CONFIG_PROC_DEVICETREE=y 78 - CONFIG_BLK_DEV_LOOP=y 79 - CONFIG_BLK_DEV_RAM=y 80 - CONFIG_BLK_DEV_RAM_SIZE=131072 81 - CONFIG_EEPROM_AT24=y 82 - CONFIG_EEPROM_LEGACY=y 83 - CONFIG_BLK_DEV_SD=y 84 - CONFIG_CHR_DEV_ST=y 85 - CONFIG_BLK_DEV_SR=y 86 - CONFIG_CHR_DEV_SG=y 87 - CONFIG_SCSI_MULTI_LUN=y 88 - CONFIG_SCSI_LOGGING=y 89 - CONFIG_ATA=y 90 - CONFIG_SATA_FSL=y 91 - CONFIG_SATA_SIL24=y 92 - CONFIG_NETDEVICES=y 93 - CONFIG_DUMMY=y 94 - CONFIG_FS_ENET=y 95 - CONFIG_FSL_PQ_MDIO=y 96 - CONFIG_E1000E=y 97 - CONFIG_PHYLIB=y 98 - CONFIG_AT803X_PHY=y 99 - CONFIG_MARVELL_PHY=y 100 - CONFIG_DAVICOM_PHY=y 101 - CONFIG_CICADA_PHY=y 102 - CONFIG_VITESSE_PHY=y 103 - CONFIG_FIXED_PHY=y 104 - CONFIG_INPUT_FF_MEMLESS=m 105 - # CONFIG_INPUT_MOUSEDEV is not set 106 - # CONFIG_INPUT_KEYBOARD is not set 107 - # CONFIG_INPUT_MOUSE is not set 108 - CONFIG_SERIO_LIBPS2=y 109 - CONFIG_SERIAL_8250=y 110 - CONFIG_SERIAL_8250_CONSOLE=y 111 - CONFIG_SERIAL_8250_NR_UARTS=2 112 - CONFIG_SERIAL_8250_RUNTIME_UARTS=2 113 - CONFIG_SERIAL_8250_EXTENDED=y 114 - CONFIG_SERIAL_8250_MANY_PORTS=y 115 - CONFIG_SERIAL_8250_SHARE_IRQ=y 116 - CONFIG_SERIAL_8250_DETECT_IRQ=y 117 - CONFIG_SERIAL_8250_RSA=y 118 - CONFIG_HW_RANDOM=y 119 - CONFIG_NVRAM=y 120 - CONFIG_I2C=y 121 - CONFIG_I2C_CHARDEV=y 122 - CONFIG_I2C_CPM=m 123 - CONFIG_I2C_MPC=y 124 - CONFIG_GPIO_MPC8XXX=y 125 - # CONFIG_HWMON is not set 126 - CONFIG_VIDEO_OUTPUT_CONTROL=y 127 - CONFIG_SOUND=y 128 - CONFIG_SND=y 129 - CONFIG_SND_MIXER_OSS=y 130 - CONFIG_SND_PCM_OSS=y 131 - # CONFIG_SND_SUPPORT_OLD_API is not set 132 - CONFIG_USB=y 133 - CONFIG_USB_DEVICEFS=y 134 - CONFIG_USB_MON=y 135 - CONFIG_USB_EHCI_HCD=y 136 - CONFIG_USB_EHCI_FSL=y 137 - CONFIG_USB_STORAGE=y 138 - CONFIG_EDAC=y 139 - CONFIG_EDAC_MM_EDAC=y 140 - CONFIG_RTC_CLASS=y 141 - CONFIG_RTC_DRV_DS1307=y 142 - CONFIG_RTC_DRV_CMOS=y 143 - CONFIG_DMADEVICES=y 144 - CONFIG_FSL_DMA=y 145 - # CONFIG_NET_DMA is not set 146 - CONFIG_STAGING=y 147 - CONFIG_EXT2_FS=y 148 - CONFIG_EXT3_FS=y 149 - # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 150 - CONFIG_ISO9660_FS=m 151 - CONFIG_JOLIET=y 152 - CONFIG_ZISOFS=y 153 - CONFIG_UDF_FS=m 154 - CONFIG_MSDOS_FS=m 155 - CONFIG_VFAT_FS=y 156 - CONFIG_NTFS_FS=y 157 - CONFIG_PROC_KCORE=y 158 - CONFIG_TMPFS=y 159 - CONFIG_ADFS_FS=m 160 - CONFIG_AFFS_FS=m 161 - CONFIG_HFS_FS=m 162 - CONFIG_HFSPLUS_FS=m 163 - CONFIG_BEFS_FS=m 164 - CONFIG_BFS_FS=m 165 - CONFIG_EFS_FS=m 166 - CONFIG_CRAMFS=y 167 - CONFIG_VXFS_FS=m 168 - CONFIG_HPFS_FS=m 169 - CONFIG_QNX4FS_FS=m 170 - CONFIG_SYSV_FS=m 171 - CONFIG_UFS_FS=m 172 - CONFIG_NFS_FS=y 173 - CONFIG_NFS_V4=y 174 - CONFIG_ROOT_NFS=y 175 - CONFIG_NFSD=y 176 - CONFIG_CRC_T10DIF=y 177 - CONFIG_FRAME_WARN=8092 178 - CONFIG_DEBUG_FS=y 179 - CONFIG_DETECT_HUNG_TASK=y 180 - # CONFIG_DEBUG_BUGVERBOSE is not set 181 - CONFIG_DEBUG_INFO=y 182 - CONFIG_STRICT_DEVMEM=y 183 - CONFIG_CRYPTO_PCBC=m 184 - CONFIG_CRYPTO_SHA256=y 185 - CONFIG_CRYPTO_SHA512=y 186 - CONFIG_CRYPTO_AES=y 187 - # CONFIG_CRYPTO_ANSI_CPRNG is not set 188 - CONFIG_CRYPTO_DEV_FSL_CAAM=y
+1
arch/powerpc/configs/adder875_defconfig
··· 70 70 CONFIG_DETECT_HUNG_TASK=y 71 71 CONFIG_DEBUG_INFO=y 72 72 # CONFIG_RCU_CPU_STALL_DETECTOR is not set 73 + CONFIG_CRC32_SLICEBY4=y
+1
arch/powerpc/configs/ep88xc_defconfig
··· 72 72 CONFIG_DETECT_HUNG_TASK=y 73 73 CONFIG_DEBUG_INFO=y 74 74 # CONFIG_RCU_CPU_STALL_DETECTOR is not set 75 + CONFIG_CRC32_SLICEBY4=y
+3
arch/powerpc/configs/mpc85xx_defconfig
··· 31 31 CONFIG_P1010_RDB=y 32 32 CONFIG_P1022_DS=y 33 33 CONFIG_P1022_RDK=y 34 + CONFIG_P1023_RDB=y 34 35 CONFIG_P1023_RDS=y 35 36 CONFIG_SOCRATES=y 36 37 CONFIG_KSI8560=y ··· 114 113 CONFIG_BLK_DEV_NBD=y 115 114 CONFIG_BLK_DEV_RAM=y 116 115 CONFIG_BLK_DEV_RAM_SIZE=131072 116 + CONFIG_EEPROM_AT24=y 117 117 CONFIG_EEPROM_LEGACY=y 118 118 CONFIG_BLK_DEV_SD=y 119 119 CONFIG_CHR_DEV_ST=y ··· 213 211 CONFIG_EDAC_MM_EDAC=y 214 212 CONFIG_RTC_CLASS=y 215 213 CONFIG_RTC_DRV_CMOS=y 214 + CONFIG_RTC_DRV_DS1307=y 216 215 CONFIG_DMADEVICES=y 217 216 CONFIG_FSL_DMA=y 218 217 # CONFIG_NET_DMA is not set
+3
arch/powerpc/configs/mpc85xx_smp_defconfig
··· 34 34 CONFIG_P1010_RDB=y 35 35 CONFIG_P1022_DS=y 36 36 CONFIG_P1022_RDK=y 37 + CONFIG_P1023_RDB=y 37 38 CONFIG_P1023_RDS=y 38 39 CONFIG_SOCRATES=y 39 40 CONFIG_KSI8560=y ··· 117 116 CONFIG_BLK_DEV_NBD=y 118 117 CONFIG_BLK_DEV_RAM=y 119 118 CONFIG_BLK_DEV_RAM_SIZE=131072 119 + CONFIG_EEPROM_AT24=y 120 120 CONFIG_EEPROM_LEGACY=y 121 121 CONFIG_BLK_DEV_SD=y 122 122 CONFIG_CHR_DEV_ST=y ··· 214 212 CONFIG_EDAC_MM_EDAC=y 215 213 CONFIG_RTC_CLASS=y 216 214 CONFIG_RTC_DRV_CMOS=y 215 + CONFIG_RTC_DRV_DS1307=y 217 216 CONFIG_DMADEVICES=y 218 217 CONFIG_FSL_DMA=y 219 218 # CONFIG_NET_DMA is not set
+1
arch/powerpc/configs/mpc866_ads_defconfig
··· 55 55 CONFIG_CRC_CCITT=y 56 56 # CONFIG_RCU_CPU_STALL_DETECTOR is not set 57 57 # CONFIG_CRYPTO_ANSI_CPRNG is not set 58 + CONFIG_CRC32_SLICEBY4=y
+1
arch/powerpc/configs/mpc885_ads_defconfig
··· 78 78 CONFIG_DETECT_HUNG_TASK=y 79 79 CONFIG_DEBUG_INFO=y 80 80 # CONFIG_RCU_CPU_STALL_DETECTOR is not set 81 + CONFIG_CRC32_SLICEBY4=y
+144
arch/powerpc/configs/mvme5100_defconfig
··· 1 + CONFIG_SYSVIPC=y 2 + CONFIG_POSIX_MQUEUE=y 3 + CONFIG_NO_HZ=y 4 + CONFIG_HIGH_RES_TIMERS=y 5 + CONFIG_IKCONFIG=y 6 + CONFIG_IKCONFIG_PROC=y 7 + CONFIG_LOG_BUF_SHIFT=14 8 + # CONFIG_UTS_NS is not set 9 + # CONFIG_IPC_NS is not set 10 + # CONFIG_PID_NS is not set 11 + # CONFIG_NET_NS is not set 12 + CONFIG_CC_OPTIMIZE_FOR_SIZE=y 13 + # CONFIG_COMPAT_BRK is not set 14 + CONFIG_MODULES=y 15 + CONFIG_MODULE_UNLOAD=y 16 + # CONFIG_BLK_DEV_BSG is not set 17 + # CONFIG_PPC_CHRP is not set 18 + # CONFIG_PPC_PMAC is not set 19 + CONFIG_EMBEDDED6xx=y 20 + CONFIG_MVME5100=y 21 + CONFIG_KVM_GUEST=y 22 + CONFIG_HZ_100=y 23 + # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 24 + # CONFIG_COMPACTION is not set 25 + CONFIG_CMDLINE_BOOL=y 26 + CONFIG_CMDLINE="console=ttyS0,9600 ip=dhcp root=/dev/nfs" 27 + CONFIG_NET=y 28 + CONFIG_PACKET=y 29 + CONFIG_UNIX=y 30 + CONFIG_INET=y 31 + CONFIG_IP_MULTICAST=y 32 + CONFIG_IP_PNP=y 33 + CONFIG_IP_PNP_DHCP=y 34 + CONFIG_IP_PNP_BOOTP=y 35 + # CONFIG_INET_LRO is not set 36 + # CONFIG_IPV6 is not set 37 + CONFIG_NETFILTER=y 38 + CONFIG_NF_CONNTRACK=m 39 + CONFIG_NF_CT_PROTO_SCTP=m 40 + CONFIG_NF_CONNTRACK_AMANDA=m 41 + CONFIG_NF_CONNTRACK_FTP=m 42 + CONFIG_NF_CONNTRACK_H323=m 43 + CONFIG_NF_CONNTRACK_IRC=m 44 + CONFIG_NF_CONNTRACK_NETBIOS_NS=m 45 + CONFIG_NF_CONNTRACK_PPTP=m 46 + CONFIG_NF_CONNTRACK_SIP=m 47 + CONFIG_NF_CONNTRACK_TFTP=m 48 + CONFIG_NETFILTER_XT_MATCH_MAC=m 49 + CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m 50 + CONFIG_NETFILTER_XT_MATCH_STATE=m 51 + CONFIG_NF_CONNTRACK_IPV4=m 52 + CONFIG_IP_NF_IPTABLES=m 53 + CONFIG_IP_NF_FILTER=m 54 + CONFIG_IP_NF_TARGET_REJECT=m 55 + CONFIG_IP_NF_MANGLE=m 56 + CONFIG_IP_NF_TARGET_ECN=m 57 + CONFIG_IP_NF_TARGET_TTL=m 58 + CONFIG_IP_NF_RAW=m 59 + CONFIG_IP_NF_ARPTABLES=m 60 + CONFIG_IP_NF_ARPFILTER=m 61 + CONFIG_IP_NF_ARP_MANGLE=m 62 + CONFIG_LAPB=m 63 + CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 64 + CONFIG_PROC_DEVICETREE=y 65 + CONFIG_BLK_DEV_LOOP=y 66 + CONFIG_BLK_DEV_RAM=y 67 + CONFIG_BLK_DEV_RAM_COUNT=2 68 + CONFIG_BLK_DEV_RAM_SIZE=8192 69 + CONFIG_EEPROM_LEGACY=m 70 + CONFIG_NETDEVICES=y 71 + CONFIG_TUN=m 72 + # CONFIG_NET_VENDOR_3COM is not set 73 + CONFIG_E100=y 74 + # CONFIG_WLAN is not set 75 + # CONFIG_INPUT_MOUSEDEV_PSAUX is not set 76 + # CONFIG_INPUT_KEYBOARD is not set 77 + # CONFIG_INPUT_MOUSE is not set 78 + # CONFIG_SERIO is not set 79 + CONFIG_SERIAL_8250=y 80 + CONFIG_SERIAL_8250_CONSOLE=y 81 + CONFIG_SERIAL_8250_NR_UARTS=10 82 + CONFIG_SERIAL_8250_EXTENDED=y 83 + CONFIG_SERIAL_8250_MANY_PORTS=y 84 + CONFIG_SERIAL_8250_SHARE_IRQ=y 85 + CONFIG_SERIAL_OF_PLATFORM=y 86 + CONFIG_HW_RANDOM=y 87 + CONFIG_I2C=y 88 + CONFIG_I2C_CHARDEV=y 89 + CONFIG_I2C_MPC=y 90 + # CONFIG_HWMON is not set 91 + CONFIG_VIDEO_OUTPUT_CONTROL=m 92 + # CONFIG_VGA_CONSOLE is not set 93 + # CONFIG_HID is not set 94 + # CONFIG_USB_SUPPORT is not set 95 + # CONFIG_IOMMU_SUPPORT is not set 96 + CONFIG_VME_BUS=m 97 + CONFIG_VME_CA91CX42=m 98 + CONFIG_EXT2_FS=m 99 + CONFIG_EXT3_FS=m 100 + # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 101 + CONFIG_XFS_FS=m 102 + CONFIG_ISO9660_FS=m 103 + CONFIG_JOLIET=y 104 + CONFIG_ZISOFS=y 105 + CONFIG_UDF_FS=m 106 + CONFIG_MSDOS_FS=m 107 + CONFIG_VFAT_FS=m 108 + CONFIG_PROC_KCORE=y 109 + CONFIG_TMPFS=y 110 + CONFIG_NFS_FS=y 111 + CONFIG_NFS_V3_ACL=y 112 + CONFIG_NFS_V4=y 113 + CONFIG_ROOT_NFS=y 114 + CONFIG_NFSD=m 115 + CONFIG_NFSD_V3=y 116 + CONFIG_CIFS=m 117 + CONFIG_NLS=y 118 + CONFIG_NLS_CODEPAGE_437=m 119 + CONFIG_NLS_CODEPAGE_932=m 120 + CONFIG_NLS_ISO8859_1=m 121 + CONFIG_NLS_UTF8=m 122 + CONFIG_CRC_CCITT=m 123 + CONFIG_CRC_T10DIF=y 124 + CONFIG_XZ_DEC=y 125 + CONFIG_XZ_DEC_X86=y 126 + CONFIG_XZ_DEC_IA64=y 127 + CONFIG_XZ_DEC_ARM=y 128 + CONFIG_XZ_DEC_ARMTHUMB=y 129 + CONFIG_XZ_DEC_SPARC=y 130 + CONFIG_MAGIC_SYSRQ=y 131 + CONFIG_DEBUG_KERNEL=y 132 + CONFIG_DETECT_HUNG_TASK=y 133 + CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=20 134 + CONFIG_CRYPTO_CBC=y 135 + CONFIG_CRYPTO_PCBC=m 136 + CONFIG_CRYPTO_MD5=y 137 + CONFIG_CRYPTO_MICHAEL_MIC=m 138 + CONFIG_CRYPTO_SHA1=m 139 + CONFIG_CRYPTO_BLOWFISH=m 140 + CONFIG_CRYPTO_DES=y 141 + CONFIG_CRYPTO_SERPENT=m 142 + CONFIG_CRYPTO_TWOFISH=m 143 + CONFIG_CRYPTO_DEFLATE=m 144 + # CONFIG_CRYPTO_ANSI_CPRNG is not set
+1
arch/powerpc/configs/ppc64_defconfig
··· 186 186 CONFIG_SCSI_DH_ALUA=m 187 187 CONFIG_ATA=y 188 188 CONFIG_SATA_SIL24=y 189 + CONFIG_SATA_MV=y 189 190 CONFIG_SATA_SVW=y 190 191 CONFIG_MD=y 191 192 CONFIG_BLK_DEV_MD=y
+1
arch/powerpc/configs/tqm8xx_defconfig
··· 84 84 CONFIG_DETECT_HUNG_TASK=y 85 85 CONFIG_DEBUG_INFO=y 86 86 # CONFIG_RCU_CPU_STALL_DETECTOR is not set 87 + CONFIG_CRC32_SLICEBY4=y
+5
arch/powerpc/include/asm/bitops.h
··· 46 46 #include <asm/asm-compat.h> 47 47 #include <asm/synch.h> 48 48 49 + /* PPC bit number conversion */ 50 + #define PPC_BITLSHIFT(be) (BITS_PER_LONG - 1 - (be)) 51 + #define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit)) 52 + #define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs)) 53 + 49 54 /* 50 55 * clear_bit doesn't imply a memory barrier 51 56 */
+13 -1
arch/powerpc/include/asm/cache.h
··· 41 41 extern struct ppc64_caches ppc64_caches; 42 42 #endif /* __powerpc64__ && ! __ASSEMBLY__ */ 43 43 44 - #if !defined(__ASSEMBLY__) 44 + #if defined(__ASSEMBLY__) 45 + /* 46 + * For a snooping icache, we still need a dummy icbi to purge all the 47 + * prefetched instructions from the ifetch buffers. We also need a sync 48 + * before the icbi to order the the actual stores to memory that might 49 + * have modified instructions with the icbi. 50 + */ 51 + #define PURGE_PREFETCHED_INS \ 52 + sync; \ 53 + icbi 0,r3; \ 54 + sync; \ 55 + isync 45 56 57 + #else 46 58 #define __read_mostly __attribute__((__section__(".data..read_mostly"))) 47 59 48 60 #ifdef CONFIG_6xx
+1
arch/powerpc/include/asm/cmpxchg.h
··· 300 300 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ 301 301 cmpxchg_local((ptr), (o), (n)); \ 302 302 }) 303 + #define cmpxchg64_relaxed cmpxchg64_local 303 304 #else 304 305 #include <asm-generic/cmpxchg-local.h> 305 306 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+7
arch/powerpc/include/asm/code-patching.h
··· 34 34 unsigned long branch_target(const unsigned int *instr); 35 35 unsigned int translate_branch(const unsigned int *dest, 36 36 const unsigned int *src); 37 + #ifdef CONFIG_PPC_BOOK3E_64 38 + void __patch_exception(int exc, unsigned long addr); 39 + #define patch_exception(exc, name) do { \ 40 + extern unsigned int name; \ 41 + __patch_exception((exc), (unsigned long)&name); \ 42 + } while (0) 43 + #endif 37 44 38 45 static inline unsigned long ppc_function_entry(void *func) 39 46 {
+12
arch/powerpc/include/asm/cputable.h
··· 90 90 * if the error is fatal, 1 if it was fully recovered and 0 to 91 91 * pass up (not CPU originated) */ 92 92 int (*machine_check)(struct pt_regs *regs); 93 + 94 + /* 95 + * Processor specific early machine check handler which is 96 + * called in real mode to handle SLB and TLB errors. 97 + */ 98 + long (*machine_check_early)(struct pt_regs *regs); 99 + 100 + /* 101 + * Processor specific routine to flush tlbs. 102 + */ 103 + void (*flush_tlb)(unsigned long inval_selector); 104 + 93 105 }; 94 106 95 107 extern struct cpu_spec *cur_cpu_spec;
+13 -1
arch/powerpc/include/asm/eeh.h
··· 90 90 #define EEH_DEV_IRQ_DISABLED (1 << 3) /* Interrupt disabled */ 91 91 #define EEH_DEV_DISCONNECTED (1 << 4) /* Removing from PE */ 92 92 93 - #define EEH_DEV_SYSFS (1 << 8) /* Sysfs created */ 93 + #define EEH_DEV_NO_HANDLER (1 << 8) /* No error handler */ 94 + #define EEH_DEV_SYSFS (1 << 9) /* Sysfs created */ 94 95 95 96 struct eeh_dev { 96 97 int mode; /* EEH mode */ ··· 117 116 { 118 117 return edev ? edev->pdev : NULL; 119 118 } 119 + 120 + /* Return values from eeh_ops::next_error */ 121 + enum { 122 + EEH_NEXT_ERR_NONE = 0, 123 + EEH_NEXT_ERR_INF, 124 + EEH_NEXT_ERR_FROZEN_PE, 125 + EEH_NEXT_ERR_FENCED_PHB, 126 + EEH_NEXT_ERR_DEAD_PHB, 127 + EEH_NEXT_ERR_DEAD_IOC 128 + }; 120 129 121 130 /* 122 131 * The struct is used to trace the registered EEH operation ··· 168 157 int (*read_config)(struct device_node *dn, int where, int size, u32 *val); 169 158 int (*write_config)(struct device_node *dn, int where, int size, u32 val); 170 159 int (*next_error)(struct eeh_pe **pe); 160 + int (*restore_config)(struct device_node *dn); 171 161 }; 172 162 173 163 extern struct eeh_ops *eeh_ops;
+14 -7
arch/powerpc/include/asm/exception-64s.h
··· 301 301 beq 4f; /* if from kernel mode */ \ 302 302 ACCOUNT_CPU_USER_ENTRY(r9, r10); \ 303 303 SAVE_PPR(area, r9, r10); \ 304 - 4: std r2,GPR2(r1); /* save r2 in stackframe */ \ 305 - SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ 306 - SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ 304 + 4: EXCEPTION_PROLOG_COMMON_2(area) \ 305 + EXCEPTION_PROLOG_COMMON_3(n) \ 306 + ACCOUNT_STOLEN_TIME 307 + 308 + /* Save original regs values from save area to stack frame. */ 309 + #define EXCEPTION_PROLOG_COMMON_2(area) \ 307 310 ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \ 308 311 ld r10,area+EX_R10(r13); \ 309 312 std r9,GPR9(r1); \ ··· 321 318 ld r10,area+EX_CFAR(r13); \ 322 319 std r10,ORIG_GPR3(r1); \ 323 320 END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \ 321 + GET_CTR(r10, area); \ 322 + std r10,_CTR(r1); 323 + 324 + #define EXCEPTION_PROLOG_COMMON_3(n) \ 325 + std r2,GPR2(r1); /* save r2 in stackframe */ \ 326 + SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ 327 + SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ 324 328 mflr r9; /* Get LR, later save to stack */ \ 325 329 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ 326 330 std r9,_LINK(r1); \ 327 - GET_CTR(r10, area); \ 328 - std r10,_CTR(r1); \ 329 331 lbz r10,PACASOFTIRQEN(r13); \ 330 332 mfspr r11,SPRN_XER; /* save XER in stackframe */ \ 331 333 std r10,SOFTE(r1); \ ··· 340 332 li r10,0; \ 341 333 ld r11,exception_marker@toc(r2); \ 342 334 std r10,RESULT(r1); /* clear regs->result */ \ 343 - std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ \ 344 - ACCOUNT_STOLEN_TIME 335 + std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ 345 336 346 337 /* 347 338 * Exception vectors.
+1 -1
arch/powerpc/include/asm/fsl_lbc.h
··· 285 285 /* device info */ 286 286 struct device *dev; 287 287 struct fsl_lbc_regs __iomem *regs; 288 - int irq; 288 + int irq[2]; 289 289 wait_queue_head_t irq_wait; 290 290 spinlock_t lock; 291 291 void *nand;
+2 -1
arch/powerpc/include/asm/hardirq.h
··· 6 6 7 7 typedef struct { 8 8 unsigned int __softirq_pending; 9 - unsigned int timer_irqs; 9 + unsigned int timer_irqs_event; 10 + unsigned int timer_irqs_others; 10 11 unsigned int pmu_irqs; 11 12 unsigned int mce_exceptions; 12 13 unsigned int spurious_irqs;
+16
arch/powerpc/include/asm/io.h
··· 191 191 192 192 #endif /* __BIG_ENDIAN */ 193 193 194 + /* 195 + * Cache inhibitied accessors for use in real mode, you don't want to use these 196 + * unless you know what you're doing. 197 + * 198 + * NB. These use the cpu byte ordering. 199 + */ 200 + DEF_MMIO_OUT_X(out_rm8, 8, stbcix); 201 + DEF_MMIO_OUT_X(out_rm16, 16, sthcix); 202 + DEF_MMIO_OUT_X(out_rm32, 32, stwcix); 203 + DEF_MMIO_IN_X(in_rm8, 8, lbzcix); 204 + DEF_MMIO_IN_X(in_rm16, 16, lhzcix); 205 + DEF_MMIO_IN_X(in_rm32, 32, lwzcix); 206 + 194 207 #ifdef __powerpc64__ 208 + 209 + DEF_MMIO_OUT_X(out_rm64, 64, stdcix); 210 + DEF_MMIO_IN_X(in_rm64, 64, ldcix); 195 211 196 212 #ifdef __BIG_ENDIAN__ 197 213 DEF_MMIO_OUT_D(out_be64, 64, std);
+43 -11
arch/powerpc/include/asm/iommu.h
··· 30 30 #include <asm/machdep.h> 31 31 #include <asm/types.h> 32 32 33 - #define IOMMU_PAGE_SHIFT 12 34 - #define IOMMU_PAGE_SIZE (ASM_CONST(1) << IOMMU_PAGE_SHIFT) 35 - #define IOMMU_PAGE_MASK (~((1 << IOMMU_PAGE_SHIFT) - 1)) 36 - #define IOMMU_PAGE_ALIGN(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE) 33 + #define IOMMU_PAGE_SHIFT_4K 12 34 + #define IOMMU_PAGE_SIZE_4K (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K) 35 + #define IOMMU_PAGE_MASK_4K (~((1 << IOMMU_PAGE_SHIFT_4K) - 1)) 36 + #define IOMMU_PAGE_ALIGN_4K(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE_4K) 37 + 38 + #define IOMMU_PAGE_SIZE(tblptr) (ASM_CONST(1) << (tblptr)->it_page_shift) 39 + #define IOMMU_PAGE_MASK(tblptr) (~((1 << (tblptr)->it_page_shift) - 1)) 40 + #define IOMMU_PAGE_ALIGN(addr, tblptr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE(tblptr)) 37 41 38 42 /* Boot time flags */ 39 43 extern int iommu_is_off; 40 44 extern int iommu_force_on; 41 - 42 - /* Pure 2^n version of get_order */ 43 - static __inline__ __attribute_const__ int get_iommu_order(unsigned long size) 44 - { 45 - return __ilog2((size - 1) >> IOMMU_PAGE_SHIFT) + 1; 46 - } 47 - 48 45 49 46 /* 50 47 * IOMAP_MAX_ORDER defines the largest contiguous block ··· 73 76 struct iommu_pool large_pool; 74 77 struct iommu_pool pools[IOMMU_NR_POOLS]; 75 78 unsigned long *it_map; /* A simple allocation bitmap for now */ 79 + unsigned long it_page_shift;/* table iommu page size */ 76 80 #ifdef CONFIG_IOMMU_API 77 81 struct iommu_group *it_group; 78 82 #endif 79 83 }; 84 + 85 + /* Pure 2^n version of get_order */ 86 + static inline __attribute_const__ 87 + int get_iommu_order(unsigned long size, struct iommu_table *tbl) 88 + { 89 + return __ilog2((size - 1) >> tbl->it_page_shift) + 1; 90 + } 91 + 80 92 81 93 struct scatterlist; 82 94 ··· 107 101 */ 108 102 extern struct iommu_table *iommu_init_table(struct iommu_table * tbl, 109 103 int nid); 104 + #ifdef CONFIG_IOMMU_API 110 105 extern void iommu_register_group(struct iommu_table *tbl, 111 106 int pci_domain_number, unsigned long pe_num); 107 + extern int iommu_add_device(struct device *dev); 108 + extern void iommu_del_device(struct device *dev); 109 + #else 110 + static inline void iommu_register_group(struct iommu_table *tbl, 111 + int pci_domain_number, 112 + unsigned long pe_num) 113 + { 114 + } 115 + 116 + static inline int iommu_add_device(struct device *dev) 117 + { 118 + return 0; 119 + } 120 + 121 + static inline void iommu_del_device(struct device *dev) 122 + { 123 + } 124 + #endif /* !CONFIG_IOMMU_API */ 125 + 126 + static inline void set_iommu_table_base_and_group(struct device *dev, 127 + void *base) 128 + { 129 + set_iommu_table_base(dev, base); 130 + iommu_add_device(dev); 131 + } 112 132 113 133 extern int iommu_map_sg(struct device *dev, struct iommu_table *tbl, 114 134 struct scatterlist *sglist, int nelems,
+1
arch/powerpc/include/asm/kvm_asm.h
··· 74 74 #define BOOKE_INTERRUPT_GUEST_DBELL_CRIT 39 75 75 #define BOOKE_INTERRUPT_HV_SYSCALL 40 76 76 #define BOOKE_INTERRUPT_HV_PRIV 41 77 + #define BOOKE_INTERRUPT_LRAT_ERROR 42 77 78 78 79 /* book3s */ 79 80
-2
arch/powerpc/include/asm/lppaca.h
··· 132 132 } save_area[SLB_NUM_BOLTED]; 133 133 } ____cacheline_aligned; 134 134 135 - extern struct slb_shadow slb_shadow[]; 136 - 137 135 /* 138 136 * Layout of entries in the hypervisor's dispatch trace log buffer. 139 137 */
+197
arch/powerpc/include/asm/mce.h
··· 1 + /* 2 + * Machine check exception header file. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License as published by 6 + * the Free Software Foundation; either version 2 of the License, or 7 + * (at your option) any later version. 8 + * 9 + * This program is distributed in the hope that it will be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License 15 + * along with this program; if not, write to the Free Software 16 + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 + * 18 + * Copyright 2013 IBM Corporation 19 + * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com> 20 + */ 21 + 22 + #ifndef __ASM_PPC64_MCE_H__ 23 + #define __ASM_PPC64_MCE_H__ 24 + 25 + #include <linux/bitops.h> 26 + 27 + /* 28 + * Machine Check bits on power7 and power8 29 + */ 30 + #define P7_SRR1_MC_LOADSTORE(srr1) ((srr1) & PPC_BIT(42)) /* P8 too */ 31 + 32 + /* SRR1 bits for machine check (On Power7 and Power8) */ 33 + #define P7_SRR1_MC_IFETCH(srr1) ((srr1) & PPC_BITMASK(43, 45)) /* P8 too */ 34 + 35 + #define P7_SRR1_MC_IFETCH_UE (0x1 << PPC_BITLSHIFT(45)) /* P8 too */ 36 + #define P7_SRR1_MC_IFETCH_SLB_PARITY (0x2 << PPC_BITLSHIFT(45)) /* P8 too */ 37 + #define P7_SRR1_MC_IFETCH_SLB_MULTIHIT (0x3 << PPC_BITLSHIFT(45)) /* P8 too */ 38 + #define P7_SRR1_MC_IFETCH_SLB_BOTH (0x4 << PPC_BITLSHIFT(45)) 39 + #define P7_SRR1_MC_IFETCH_TLB_MULTIHIT (0x5 << PPC_BITLSHIFT(45)) /* P8 too */ 40 + #define P7_SRR1_MC_IFETCH_UE_TLB_RELOAD (0x6 << PPC_BITLSHIFT(45)) /* P8 too */ 41 + #define P7_SRR1_MC_IFETCH_UE_IFU_INTERNAL (0x7 << PPC_BITLSHIFT(45)) 42 + 43 + /* SRR1 bits for machine check (On Power8) */ 44 + #define P8_SRR1_MC_IFETCH_ERAT_MULTIHIT (0x4 << PPC_BITLSHIFT(45)) 45 + 46 + /* DSISR bits for machine check (On Power7 and Power8) */ 47 + #define P7_DSISR_MC_UE (PPC_BIT(48)) /* P8 too */ 48 + #define P7_DSISR_MC_UE_TABLEWALK (PPC_BIT(49)) /* P8 too */ 49 + #define P7_DSISR_MC_ERAT_MULTIHIT (PPC_BIT(52)) /* P8 too */ 50 + #define P7_DSISR_MC_TLB_MULTIHIT_MFTLB (PPC_BIT(53)) /* P8 too */ 51 + #define P7_DSISR_MC_SLB_PARITY_MFSLB (PPC_BIT(55)) /* P8 too */ 52 + #define P7_DSISR_MC_SLB_MULTIHIT (PPC_BIT(56)) /* P8 too */ 53 + #define P7_DSISR_MC_SLB_MULTIHIT_PARITY (PPC_BIT(57)) /* P8 too */ 54 + 55 + /* 56 + * DSISR bits for machine check (Power8) in addition to above. 57 + * Secondary DERAT Multihit 58 + */ 59 + #define P8_DSISR_MC_ERAT_MULTIHIT_SEC (PPC_BIT(54)) 60 + 61 + /* SLB error bits */ 62 + #define P7_DSISR_MC_SLB_ERRORS (P7_DSISR_MC_ERAT_MULTIHIT | \ 63 + P7_DSISR_MC_SLB_PARITY_MFSLB | \ 64 + P7_DSISR_MC_SLB_MULTIHIT | \ 65 + P7_DSISR_MC_SLB_MULTIHIT_PARITY) 66 + 67 + #define P8_DSISR_MC_SLB_ERRORS (P7_DSISR_MC_SLB_ERRORS | \ 68 + P8_DSISR_MC_ERAT_MULTIHIT_SEC) 69 + enum MCE_Version { 70 + MCE_V1 = 1, 71 + }; 72 + 73 + enum MCE_Severity { 74 + MCE_SEV_NO_ERROR = 0, 75 + MCE_SEV_WARNING = 1, 76 + MCE_SEV_ERROR_SYNC = 2, 77 + MCE_SEV_FATAL = 3, 78 + }; 79 + 80 + enum MCE_Disposition { 81 + MCE_DISPOSITION_RECOVERED = 0, 82 + MCE_DISPOSITION_NOT_RECOVERED = 1, 83 + }; 84 + 85 + enum MCE_Initiator { 86 + MCE_INITIATOR_UNKNOWN = 0, 87 + MCE_INITIATOR_CPU = 1, 88 + }; 89 + 90 + enum MCE_ErrorType { 91 + MCE_ERROR_TYPE_UNKNOWN = 0, 92 + MCE_ERROR_TYPE_UE = 1, 93 + MCE_ERROR_TYPE_SLB = 2, 94 + MCE_ERROR_TYPE_ERAT = 3, 95 + MCE_ERROR_TYPE_TLB = 4, 96 + }; 97 + 98 + enum MCE_UeErrorType { 99 + MCE_UE_ERROR_INDETERMINATE = 0, 100 + MCE_UE_ERROR_IFETCH = 1, 101 + MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH = 2, 102 + MCE_UE_ERROR_LOAD_STORE = 3, 103 + MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 4, 104 + }; 105 + 106 + enum MCE_SlbErrorType { 107 + MCE_SLB_ERROR_INDETERMINATE = 0, 108 + MCE_SLB_ERROR_PARITY = 1, 109 + MCE_SLB_ERROR_MULTIHIT = 2, 110 + }; 111 + 112 + enum MCE_EratErrorType { 113 + MCE_ERAT_ERROR_INDETERMINATE = 0, 114 + MCE_ERAT_ERROR_PARITY = 1, 115 + MCE_ERAT_ERROR_MULTIHIT = 2, 116 + }; 117 + 118 + enum MCE_TlbErrorType { 119 + MCE_TLB_ERROR_INDETERMINATE = 0, 120 + MCE_TLB_ERROR_PARITY = 1, 121 + MCE_TLB_ERROR_MULTIHIT = 2, 122 + }; 123 + 124 + struct machine_check_event { 125 + enum MCE_Version version:8; /* 0x00 */ 126 + uint8_t in_use; /* 0x01 */ 127 + enum MCE_Severity severity:8; /* 0x02 */ 128 + enum MCE_Initiator initiator:8; /* 0x03 */ 129 + enum MCE_ErrorType error_type:8; /* 0x04 */ 130 + enum MCE_Disposition disposition:8; /* 0x05 */ 131 + uint8_t reserved_1[2]; /* 0x06 */ 132 + uint64_t gpr3; /* 0x08 */ 133 + uint64_t srr0; /* 0x10 */ 134 + uint64_t srr1; /* 0x18 */ 135 + union { /* 0x20 */ 136 + struct { 137 + enum MCE_UeErrorType ue_error_type:8; 138 + uint8_t effective_address_provided; 139 + uint8_t physical_address_provided; 140 + uint8_t reserved_1[5]; 141 + uint64_t effective_address; 142 + uint64_t physical_address; 143 + uint8_t reserved_2[8]; 144 + } ue_error; 145 + 146 + struct { 147 + enum MCE_SlbErrorType slb_error_type:8; 148 + uint8_t effective_address_provided; 149 + uint8_t reserved_1[6]; 150 + uint64_t effective_address; 151 + uint8_t reserved_2[16]; 152 + } slb_error; 153 + 154 + struct { 155 + enum MCE_EratErrorType erat_error_type:8; 156 + uint8_t effective_address_provided; 157 + uint8_t reserved_1[6]; 158 + uint64_t effective_address; 159 + uint8_t reserved_2[16]; 160 + } erat_error; 161 + 162 + struct { 163 + enum MCE_TlbErrorType tlb_error_type:8; 164 + uint8_t effective_address_provided; 165 + uint8_t reserved_1[6]; 166 + uint64_t effective_address; 167 + uint8_t reserved_2[16]; 168 + } tlb_error; 169 + } u; 170 + }; 171 + 172 + struct mce_error_info { 173 + enum MCE_ErrorType error_type:8; 174 + union { 175 + enum MCE_UeErrorType ue_error_type:8; 176 + enum MCE_SlbErrorType slb_error_type:8; 177 + enum MCE_EratErrorType erat_error_type:8; 178 + enum MCE_TlbErrorType tlb_error_type:8; 179 + } u; 180 + uint8_t reserved[2]; 181 + }; 182 + 183 + #define MAX_MC_EVT 100 184 + 185 + /* Release flags for get_mce_event() */ 186 + #define MCE_EVENT_RELEASE true 187 + #define MCE_EVENT_DONTRELEASE false 188 + 189 + extern void save_mce_event(struct pt_regs *regs, long handled, 190 + struct mce_error_info *mce_err, uint64_t addr); 191 + extern int get_mce_event(struct machine_check_event *mce, bool release); 192 + extern void release_mce_event(void); 193 + extern void machine_check_queue_event(void); 194 + extern void machine_check_print_event_info(struct machine_check_event *evt); 195 + extern uint64_t get_mce_fault_addr(struct machine_check_event *evt); 196 + 197 + #endif /* __ASM_PPC64_MCE_H__ */
+13
arch/powerpc/include/asm/mmu-book3e.h
··· 286 286 extern int mmu_linear_psize; 287 287 extern int mmu_vmemmap_psize; 288 288 289 + struct tlb_core_data { 290 + /* For software way selection, as on Freescale TLB1 */ 291 + u8 esel_next, esel_max, esel_first; 292 + 293 + /* Per-core spinlock for e6500 TLB handlers (no tlbsrx.) */ 294 + u8 lock; 295 + }; 296 + 289 297 #ifdef CONFIG_PPC64 290 298 extern unsigned long linear_map_top; 299 + extern int book3e_htw_mode; 300 + 301 + #define PPC_HTW_NONE 0 302 + #define PPC_HTW_IBM 1 303 + #define PPC_HTW_E6500 2 291 304 292 305 /* 293 306 * 64-bit booke platforms don't load the tlb in the tlb miss handler code.
+10 -9
arch/powerpc/include/asm/mmu.h
··· 180 180 #define MMU_PAGE_64K_AP 3 /* "Admixed pages" (hash64 only) */ 181 181 #define MMU_PAGE_256K 4 182 182 #define MMU_PAGE_1M 5 183 - #define MMU_PAGE_4M 6 184 - #define MMU_PAGE_8M 7 185 - #define MMU_PAGE_16M 8 186 - #define MMU_PAGE_64M 9 187 - #define MMU_PAGE_256M 10 188 - #define MMU_PAGE_1G 11 189 - #define MMU_PAGE_16G 12 190 - #define MMU_PAGE_64G 13 183 + #define MMU_PAGE_2M 6 184 + #define MMU_PAGE_4M 7 185 + #define MMU_PAGE_8M 8 186 + #define MMU_PAGE_16M 9 187 + #define MMU_PAGE_64M 10 188 + #define MMU_PAGE_256M 11 189 + #define MMU_PAGE_1G 12 190 + #define MMU_PAGE_16G 13 191 + #define MMU_PAGE_64G 14 191 192 192 - #define MMU_PAGE_COUNT 14 193 + #define MMU_PAGE_COUNT 15 193 194 194 195 #if defined(CONFIG_PPC_STD_MMU_64) 195 196 /* 64-bit classic hash table MMU */
+105 -3
arch/powerpc/include/asm/opal.h
··· 33 33 u64 rd_loc; /* r11 */ 34 34 }; 35 35 36 + /* 37 + * SG entry 38 + * 39 + * WARNING: The current implementation requires each entry 40 + * to represent a block that is 4k aligned *and* each block 41 + * size except the last one in the list to be as well. 42 + */ 43 + struct opal_sg_entry { 44 + void *data; 45 + long length; 46 + }; 47 + 48 + /* sg list */ 49 + struct opal_sg_list { 50 + unsigned long num_entries; 51 + struct opal_sg_list *next; 52 + struct opal_sg_entry entry[]; 53 + }; 54 + 55 + /* We calculate number of sg entries based on PAGE_SIZE */ 56 + #define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry)) 57 + 36 58 extern long opal_query_takeover(u64 *hal_size, u64 *hal_align); 37 59 38 60 extern long opal_do_takeover(struct opal_takeover_args *args); ··· 154 132 #define OPAL_FLASH_VALIDATE 76 155 133 #define OPAL_FLASH_MANAGE 77 156 134 #define OPAL_FLASH_UPDATE 78 135 + #define OPAL_GET_MSG 85 136 + #define OPAL_CHECK_ASYNC_COMPLETION 86 137 + #define OPAL_SYNC_HOST_REBOOT 87 157 138 158 139 #ifndef __ASSEMBLY__ 159 140 ··· 236 211 OPAL_EVENT_ERROR_LOG = 0x40, 237 212 OPAL_EVENT_EPOW = 0x80, 238 213 OPAL_EVENT_LED_STATUS = 0x100, 239 - OPAL_EVENT_PCI_ERROR = 0x200 214 + OPAL_EVENT_PCI_ERROR = 0x200, 215 + OPAL_EVENT_MSG_PENDING = 0x800, 216 + }; 217 + 218 + enum OpalMessageType { 219 + OPAL_MSG_ASYNC_COMP = 0, 220 + OPAL_MSG_MEM_ERR, 221 + OPAL_MSG_EPOW, 222 + OPAL_MSG_SHUTDOWN, 223 + OPAL_MSG_TYPE_MAX, 240 224 }; 241 225 242 226 /* Machine check related definitions */ ··· 345 311 OPAL_ENABLE_MVE = 1 346 312 }; 347 313 348 - enum OpalPciResetAndReinitScope { 314 + enum OpalPciResetScope { 349 315 OPAL_PHB_COMPLETE = 1, OPAL_PCI_LINK = 2, OPAL_PHB_ERROR = 3, 350 316 OPAL_PCI_HOT_RESET = 4, OPAL_PCI_FUNDAMENTAL_RESET = 5, 351 317 OPAL_PCI_IODA_TABLE_RESET = 6, 318 + }; 319 + 320 + enum OpalPciReinitScope { 321 + OPAL_REINIT_PCI_DEV = 1000 352 322 }; 353 323 354 324 enum OpalPciResetState { ··· 392 354 OPAL_LPC_MEM = 0, 393 355 OPAL_LPC_IO = 1, 394 356 OPAL_LPC_FW = 2, 357 + }; 358 + 359 + struct opal_msg { 360 + uint32_t msg_type; 361 + uint32_t reserved; 362 + uint64_t params[8]; 395 363 }; 396 364 397 365 struct opal_machine_check_event { ··· 445 401 uint64_t effective_address; 446 402 uint8_t reserved_2[16]; 447 403 } tlb_error; 404 + } u; 405 + }; 406 + 407 + /* FSP memory errors handling */ 408 + enum OpalMemErr_Version { 409 + OpalMemErr_V1 = 1, 410 + }; 411 + 412 + enum OpalMemErrType { 413 + OPAL_MEM_ERR_TYPE_RESILIENCE = 0, 414 + OPAL_MEM_ERR_TYPE_DYN_DALLOC, 415 + OPAL_MEM_ERR_TYPE_SCRUB, 416 + }; 417 + 418 + /* Memory Reilience error type */ 419 + enum OpalMemErr_ResilErrType { 420 + OPAL_MEM_RESILIENCE_CE = 0, 421 + OPAL_MEM_RESILIENCE_UE, 422 + OPAL_MEM_RESILIENCE_UE_SCRUB, 423 + }; 424 + 425 + /* Dynamic Memory Deallocation type */ 426 + enum OpalMemErr_DynErrType { 427 + OPAL_MEM_DYNAMIC_DEALLOC = 0, 428 + }; 429 + 430 + /* OpalMemoryErrorData->flags */ 431 + #define OPAL_MEM_CORRECTED_ERROR 0x0001 432 + #define OPAL_MEM_THRESHOLD_EXCEEDED 0x0002 433 + #define OPAL_MEM_ACK_REQUIRED 0x8000 434 + 435 + struct OpalMemoryErrorData { 436 + enum OpalMemErr_Version version:8; /* 0x00 */ 437 + enum OpalMemErrType type:8; /* 0x01 */ 438 + uint16_t flags; /* 0x02 */ 439 + uint8_t reserved_1[4]; /* 0x04 */ 440 + 441 + union { 442 + /* Memory Resilience corrected/uncorrected error info */ 443 + struct { 444 + enum OpalMemErr_ResilErrType resil_err_type:8; 445 + uint8_t reserved_1[7]; 446 + uint64_t physical_address_start; 447 + uint64_t physical_address_end; 448 + } resilience; 449 + /* Dynamic memory deallocation error info */ 450 + struct { 451 + enum OpalMemErr_DynErrType dyn_err_type:8; 452 + uint8_t reserved_1[7]; 453 + uint64_t physical_address_start; 454 + uint64_t physical_address_end; 455 + } dyn_dealloc; 448 456 } u; 449 457 }; 450 458 ··· 806 710 int64_t opal_pci_get_phb_diag_data2(uint64_t phb_id, void *diag_buffer, 807 711 uint64_t diag_buffer_len); 808 712 int64_t opal_pci_fence_phb(uint64_t phb_id); 809 - int64_t opal_pci_reinit(uint64_t phb_id, uint8_t reinit_scope); 713 + int64_t opal_pci_reinit(uint64_t phb_id, uint64_t reinit_scope, uint64_t data); 810 714 int64_t opal_pci_mask_pe_error(uint64_t phb_id, uint16_t pe_number, uint8_t error_type, uint8_t mask_action); 811 715 int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action); 812 716 int64_t opal_get_epow_status(__be64 *status); ··· 827 731 int64_t opal_manage_flash(uint8_t op); 828 732 int64_t opal_update_flash(uint64_t blk_list); 829 733 734 + int64_t opal_get_msg(uint64_t buffer, size_t size); 735 + int64_t opal_check_completion(uint64_t buffer, size_t size, uint64_t token); 736 + int64_t opal_sync_host_reboot(void); 737 + 830 738 /* Internal functions */ 831 739 extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data); 832 740 ··· 844 744 int depth, void *data); 845 745 846 746 extern int opal_notifier_register(struct notifier_block *nb); 747 + extern int opal_message_notifier_register(enum OpalMessageType msg_type, 748 + struct notifier_block *nb); 847 749 extern void opal_notifier_enable(void); 848 750 extern void opal_notifier_disable(void); 849 751 extern void opal_notifier_update_evt(uint64_t evt_mask, uint64_t evt_val);
+15 -1
arch/powerpc/include/asm/paca.h
··· 16 16 17 17 #ifdef CONFIG_PPC64 18 18 19 - #include <linux/init.h> 20 19 #include <asm/types.h> 21 20 #include <asm/lppaca.h> 22 21 #include <asm/mmu.h> ··· 112 113 /* Keep pgd in the same cacheline as the start of extlb */ 113 114 pgd_t *pgd __attribute__((aligned(0x80))); /* Current PGD */ 114 115 pgd_t *kernel_pgd; /* Kernel PGD */ 116 + 117 + /* Shared by all threads of a core -- points to tcd of first thread */ 118 + struct tlb_core_data *tcd_ptr; 119 + 115 120 /* We can have up to 3 levels of reentrancy in the TLB miss handler */ 116 121 u64 extlb[3][EX_TLB_SIZE / sizeof(u64)]; 117 122 u64 exmc[8]; /* used for machine checks */ ··· 126 123 void *mc_kstack; 127 124 void *crit_kstack; 128 125 void *dbg_kstack; 126 + 127 + struct tlb_core_data tcd; 129 128 #endif /* CONFIG_PPC_BOOK3E */ 130 129 131 130 mm_context_t context; ··· 156 151 * early exception handler for use by high level C handler 157 152 */ 158 153 struct opal_machine_check_event *opal_mc_evt; 154 + #endif 155 + #ifdef CONFIG_PPC_BOOK3S_64 156 + /* Exclusive emergency stack pointer for machine check exception. */ 157 + void *mc_emergency_sp; 158 + /* 159 + * Flag to check whether we are in machine check early handler 160 + * and already using emergency stack. 161 + */ 162 + u16 in_mce; 159 163 #endif 160 164 161 165 /* Stuff for accurate time accounting */
+65 -1
arch/powerpc/include/asm/pgtable.h
··· 3 3 #ifdef __KERNEL__ 4 4 5 5 #ifndef __ASSEMBLY__ 6 + #include <linux/mmdebug.h> 6 7 #include <asm/processor.h> /* For TASK_SIZE */ 7 8 #include <asm/mmu.h> 8 9 #include <asm/page.h> ··· 34 33 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 35 34 static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } 36 35 static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } 37 - static inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_PRESENT; } 38 36 static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } 39 37 static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } 38 + 39 + #ifdef CONFIG_NUMA_BALANCING 40 + 41 + static inline int pte_present(pte_t pte) 42 + { 43 + return pte_val(pte) & (_PAGE_PRESENT | _PAGE_NUMA); 44 + } 45 + 46 + #define pte_numa pte_numa 47 + static inline int pte_numa(pte_t pte) 48 + { 49 + return (pte_val(pte) & 50 + (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA; 51 + } 52 + 53 + #define pte_mknonnuma pte_mknonnuma 54 + static inline pte_t pte_mknonnuma(pte_t pte) 55 + { 56 + pte_val(pte) &= ~_PAGE_NUMA; 57 + pte_val(pte) |= _PAGE_PRESENT | _PAGE_ACCESSED; 58 + return pte; 59 + } 60 + 61 + #define pte_mknuma pte_mknuma 62 + static inline pte_t pte_mknuma(pte_t pte) 63 + { 64 + /* 65 + * We should not set _PAGE_NUMA on non present ptes. Also clear the 66 + * present bit so that hash_page will return 1 and we collect this 67 + * as numa fault. 68 + */ 69 + if (pte_present(pte)) { 70 + pte_val(pte) |= _PAGE_NUMA; 71 + pte_val(pte) &= ~_PAGE_PRESENT; 72 + } else 73 + VM_BUG_ON(1); 74 + return pte; 75 + } 76 + 77 + #define pmd_numa pmd_numa 78 + static inline int pmd_numa(pmd_t pmd) 79 + { 80 + return pte_numa(pmd_pte(pmd)); 81 + } 82 + 83 + #define pmd_mknonnuma pmd_mknonnuma 84 + static inline pmd_t pmd_mknonnuma(pmd_t pmd) 85 + { 86 + return pte_pmd(pte_mknonnuma(pmd_pte(pmd))); 87 + } 88 + 89 + #define pmd_mknuma pmd_mknuma 90 + static inline pmd_t pmd_mknuma(pmd_t pmd) 91 + { 92 + return pte_pmd(pte_mknuma(pmd_pte(pmd))); 93 + } 94 + 95 + # else 96 + 97 + static inline int pte_present(pte_t pte) 98 + { 99 + return pte_val(pte) & _PAGE_PRESENT; 100 + } 101 + #endif /* CONFIG_NUMA_BALANCING */ 40 102 41 103 /* Conversion functions: convert a page and protection to a page entry, 42 104 * and a page entry and page directory to the page they refer to.
+13 -1
arch/powerpc/include/asm/ppc_asm.h
··· 4 4 #ifndef _ASM_POWERPC_PPC_ASM_H 5 5 #define _ASM_POWERPC_PPC_ASM_H 6 6 7 - #include <linux/init.h> 8 7 #include <linux/stringify.h> 9 8 #include <asm/asm-compat.h> 10 9 #include <asm/processor.h> ··· 294 295 * you want to access various offsets within it). On ppc32 this is 295 296 * identical to LOAD_REG_IMMEDIATE. 296 297 * 298 + * LOAD_REG_ADDR_PIC(rn, name) 299 + * Loads the address of label 'name' into register 'run'. Use this when 300 + * the kernel doesn't run at the linked or relocated address. Please 301 + * note that this macro will clobber the lr register. 302 + * 297 303 * LOAD_REG_ADDRBASE(rn, name) 298 304 * ADDROFF(name) 299 305 * LOAD_REG_ADDRBASE loads part of the address of label 'name' into ··· 309 305 * LOAD_REG_ADDRBASE(rX, name) 310 306 * ld rY,ADDROFF(name)(rX) 311 307 */ 308 + 309 + /* Be careful, this will clobber the lr register. */ 310 + #define LOAD_REG_ADDR_PIC(reg, name) \ 311 + bl 0f; \ 312 + 0: mflr reg; \ 313 + addis reg,reg,(name - 0b)@ha; \ 314 + addi reg,reg,(name - 0b)@l; 315 + 312 316 #ifdef __powerpc64__ 313 317 #define LOAD_REG_IMMEDIATE(reg,expr) \ 314 318 lis reg,(expr)@highest; \
+7 -1
arch/powerpc/include/asm/processor.h
··· 256 256 unsigned long evr[32]; /* upper 32-bits of SPE regs */ 257 257 u64 acc; /* Accumulator */ 258 258 unsigned long spefscr; /* SPE & eFP status */ 259 + unsigned long spefscr_last; /* SPEFSCR value on last prctl 260 + call or trap return */ 259 261 int used_spe; /* set if process has used spe */ 260 262 #endif /* CONFIG_SPE */ 261 263 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM ··· 319 317 (_ALIGN_UP(sizeof(init_thread_info), 16) + (unsigned long) &init_stack) 320 318 321 319 #ifdef CONFIG_SPE 322 - #define SPEFSCR_INIT .spefscr = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE, 320 + #define SPEFSCR_INIT \ 321 + .spefscr = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE, \ 322 + .spefscr_last = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE, 323 323 #else 324 324 #define SPEFSCR_INIT 325 325 #endif ··· 377 373 extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr); 378 374 extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val); 379 375 376 + extern void fp_enable(void); 377 + extern void vec_enable(void); 380 378 extern void load_fp_state(struct thread_fp_state *fp); 381 379 extern void store_fp_state(struct thread_fp_state *fp); 382 380 extern void load_vr_state(struct thread_vr_state *vr);
-1
arch/powerpc/include/asm/ps3.h
··· 21 21 #if !defined(_ASM_POWERPC_PS3_H) 22 22 #define _ASM_POWERPC_PS3_H 23 23 24 - #include <linux/init.h> 25 24 #include <linux/types.h> 26 25 #include <linux/device.h> 27 26 #include <asm/cell-pmu.h>
+7 -1
arch/powerpc/include/asm/pte-hash64.h
··· 19 19 #define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */ 20 20 #define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */ 21 21 #define _PAGE_GUARDED 0x0008 22 - #define _PAGE_COHERENT 0x0010 /* M: enforce memory coherence (SMP systems) */ 22 + /* We can derive Memory coherence from _PAGE_NO_CACHE */ 23 23 #define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */ 24 24 #define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */ 25 25 #define _PAGE_DIRTY 0x0080 /* C: page changed */ 26 26 #define _PAGE_ACCESSED 0x0100 /* R: page referenced */ 27 27 #define _PAGE_RW 0x0200 /* software: user write access allowed */ 28 28 #define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */ 29 + 30 + /* 31 + * Used for tracking numa faults 32 + */ 33 + #define _PAGE_NUMA 0x00000010 /* Gather numa placement stats */ 34 + 29 35 30 36 /* No separate kernel read-only */ 31 37 #define _PAGE_KERNEL_RW (_PAGE_RW | _PAGE_DIRTY) /* user access blocked by key */
+2
arch/powerpc/include/asm/reg.h
··· 1075 1075 #define PVR_8560 0x80200000 1076 1076 #define PVR_VER_E500V1 0x8020 1077 1077 #define PVR_VER_E500V2 0x8021 1078 + #define PVR_VER_E6500 0x8040 1079 + 1078 1080 /* 1079 1081 * For the 8xx processors, all of them report the same PVR family for 1080 1082 * the PowerPC core. The various versions of these processors must be
+10
arch/powerpc/include/asm/reg_booke.h
··· 101 101 #define SPRN_IVOR39 0x1B1 /* Interrupt Vector Offset Register 39 */ 102 102 #define SPRN_IVOR40 0x1B2 /* Interrupt Vector Offset Register 40 */ 103 103 #define SPRN_IVOR41 0x1B3 /* Interrupt Vector Offset Register 41 */ 104 + #define SPRN_IVOR42 0x1B4 /* Interrupt Vector Offset Register 42 */ 104 105 #define SPRN_GIVOR2 0x1B8 /* Guest IVOR2 */ 105 106 #define SPRN_GIVOR3 0x1B9 /* Guest IVOR3 */ 106 107 #define SPRN_GIVOR4 0x1BA /* Guest IVOR4 */ ··· 171 170 #define SPRN_L2CSR1 0x3FA /* L2 Data Cache Control and Status Register 1 */ 172 171 #define SPRN_DCCR 0x3FA /* Data Cache Cacheability Register */ 173 172 #define SPRN_ICCR 0x3FB /* Instruction Cache Cacheability Register */ 173 + #define SPRN_PWRMGTCR0 0x3FB /* Power management control register 0 */ 174 174 #define SPRN_SVR 0x3FF /* System Version Register */ 175 175 176 176 /* ··· 217 215 /* Bit definitions for CCR1. */ 218 216 #define CCR1_DPC 0x00000100 /* Disable L1 I-Cache/D-Cache parity checking */ 219 217 #define CCR1_TCS 0x00000080 /* Timer Clock Select */ 218 + 219 + /* Bit definitions for PWRMGTCR0. */ 220 + #define PWRMGTCR0_PW20_WAIT (1 << 14) /* PW20 state enable bit */ 221 + #define PWRMGTCR0_PW20_ENT_SHIFT 8 222 + #define PWRMGTCR0_PW20_ENT 0x3F00 223 + #define PWRMGTCR0_AV_IDLE_PD_EN (1 << 22) /* Altivec idle enable */ 224 + #define PWRMGTCR0_AV_IDLE_CNT_SHIFT 16 225 + #define PWRMGTCR0_AV_IDLE_CNT 0x3F0000 220 226 221 227 /* Bit definitions for the MCSR. */ 222 228 #define MCSR_MCS 0x80000000 /* Machine Check Summary */
+10 -2
arch/powerpc/include/asm/spinlock.h
··· 30 30 31 31 #define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */ 32 32 33 - #define arch_spin_is_locked(x) ((x)->slock != 0) 34 - 35 33 #ifdef CONFIG_PPC64 36 34 /* use 0x800000yy when locked, where yy == CPU number */ 37 35 #ifdef __BIG_ENDIAN__ ··· 53 55 #define CLEAR_IO_SYNC 54 56 #define SYNC_IO 55 57 #endif 58 + 59 + static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) 60 + { 61 + return lock.slock == 0; 62 + } 63 + 64 + static inline int arch_spin_is_locked(arch_spinlock_t *lock) 65 + { 66 + return !arch_spin_value_unlocked(*lock); 67 + } 56 68 57 69 /* 58 70 * This returns the old value in the lock, so we succeeded
+4 -5
arch/powerpc/include/asm/thread_info.h
··· 91 91 #define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling 92 92 TIF_NEED_RESCHED */ 93 93 #define TIF_32BIT 4 /* 32 bit binary */ 94 - #define TIF_PERFMON_WORK 5 /* work for pfm_handle_work() */ 95 - #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */ 94 + #define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */ 96 95 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ 97 96 #define TIF_SINGLESTEP 8 /* singlestepping active */ 98 97 #define TIF_NOHZ 9 /* in adaptive nohz mode */ ··· 114 115 #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 115 116 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 116 117 #define _TIF_32BIT (1<<TIF_32BIT) 117 - #define _TIF_PERFMON_WORK (1<<TIF_PERFMON_WORK) 118 - #define _TIF_PERFMON_CTXSW (1<<TIF_PERFMON_CTXSW) 118 + #define _TIF_RESTORE_TM (1<<TIF_RESTORE_TM) 119 119 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 120 120 #define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) 121 121 #define _TIF_SECCOMP (1<<TIF_SECCOMP) ··· 130 132 _TIF_NOHZ) 131 133 132 134 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ 133 - _TIF_NOTIFY_RESUME | _TIF_UPROBE) 135 + _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ 136 + _TIF_RESTORE_TM) 134 137 #define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR) 135 138 136 139 /* Bits in local_flags */
+1
arch/powerpc/include/asm/tm.h
··· 15 15 extern void tm_enable(void); 16 16 extern void tm_reclaim(struct thread_struct *thread, 17 17 unsigned long orig_msr, uint8_t cause); 18 + extern void tm_reclaim_current(uint8_t cause); 18 19 extern void tm_recheckpoint(struct thread_struct *thread, 19 20 unsigned long orig_msr); 20 21 extern void tm_abort(uint8_t cause);
+9 -1
arch/powerpc/include/asm/topology.h
··· 22 22 23 23 static inline int cpu_to_node(int cpu) 24 24 { 25 - return numa_cpu_lookup_table[cpu]; 25 + int nid; 26 + 27 + nid = numa_cpu_lookup_table[cpu]; 28 + 29 + /* 30 + * During early boot, the numa-cpu lookup table might not have been 31 + * setup for all CPUs yet. In such cases, default to node 0. 32 + */ 33 + return (nid < 0) ? 0 : nid; 26 34 } 27 35 28 36 #define parent_node(node) (node)
-1
arch/powerpc/include/asm/vio.h
··· 15 15 #define _ASM_POWERPC_VIO_H 16 16 #ifdef __KERNEL__ 17 17 18 - #include <linux/init.h> 19 18 #include <linux/errno.h> 20 19 #include <linux/device.h> 21 20 #include <linux/dma-mapping.h>
+1
arch/powerpc/kernel/Makefile
··· 39 39 obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o 40 40 obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o 41 41 obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o 42 + obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o 42 43 obj64-$(CONFIG_RELOCATABLE) += reloc_64.o 43 44 obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o 44 45 obj-$(CONFIG_PPC_A2) += cpu_setup_a2.o
+13
arch/powerpc/kernel/asm-offsets.c
··· 203 203 DEFINE(PACA_MC_STACK, offsetof(struct paca_struct, mc_kstack)); 204 204 DEFINE(PACA_CRIT_STACK, offsetof(struct paca_struct, crit_kstack)); 205 205 DEFINE(PACA_DBG_STACK, offsetof(struct paca_struct, dbg_kstack)); 206 + DEFINE(PACA_TCD_PTR, offsetof(struct paca_struct, tcd_ptr)); 207 + 208 + DEFINE(TCD_ESEL_NEXT, 209 + offsetof(struct tlb_core_data, esel_next)); 210 + DEFINE(TCD_ESEL_MAX, 211 + offsetof(struct tlb_core_data, esel_max)); 212 + DEFINE(TCD_ESEL_FIRST, 213 + offsetof(struct tlb_core_data, esel_first)); 214 + DEFINE(TCD_LOCK, offsetof(struct tlb_core_data, lock)); 206 215 #endif /* CONFIG_PPC_BOOK3E */ 207 216 208 217 #ifdef CONFIG_PPC_STD_MMU_64 ··· 241 232 DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx)); 242 233 #endif /* CONFIG_PPC_STD_MMU_64 */ 243 234 DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); 235 + #ifdef CONFIG_PPC_BOOK3S_64 236 + DEFINE(PACAMCEMERGSP, offsetof(struct paca_struct, mc_emergency_sp)); 237 + DEFINE(PACA_IN_MCE, offsetof(struct paca_struct, in_mce)); 238 + #endif 244 239 DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); 245 240 DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state)); 246 241 DEFINE(PACA_STARTTIME, offsetof(struct paca_struct, starttime));
-1
arch/powerpc/kernel/cacheinfo.c
··· 12 12 13 13 #include <linux/cpu.h> 14 14 #include <linux/cpumask.h> 15 - #include <linux/init.h> 16 15 #include <linux/kernel.h> 17 16 #include <linux/kobject.h> 18 17 #include <linux/list.h>
+54
arch/powerpc/kernel/cpu_setup_fsl_booke.S
··· 53 53 isync 54 54 blr 55 55 56 + /* 57 + * FIXME - we haven't yet done testing to determine a reasonable default 58 + * value for PW20_WAIT_IDLE_BIT. 59 + */ 60 + #define PW20_WAIT_IDLE_BIT 50 /* 1ms, TB frequency is 41.66MHZ */ 61 + _GLOBAL(setup_pw20_idle) 62 + mfspr r3, SPRN_PWRMGTCR0 63 + 64 + /* Set PW20_WAIT bit, enable pw20 state*/ 65 + ori r3, r3, PWRMGTCR0_PW20_WAIT 66 + li r11, PW20_WAIT_IDLE_BIT 67 + 68 + /* Set Automatic PW20 Core Idle Count */ 69 + rlwimi r3, r11, PWRMGTCR0_PW20_ENT_SHIFT, PWRMGTCR0_PW20_ENT 70 + 71 + mtspr SPRN_PWRMGTCR0, r3 72 + 73 + blr 74 + 75 + /* 76 + * FIXME - we haven't yet done testing to determine a reasonable default 77 + * value for AV_WAIT_IDLE_BIT. 78 + */ 79 + #define AV_WAIT_IDLE_BIT 50 /* 1ms, TB frequency is 41.66MHZ */ 80 + _GLOBAL(setup_altivec_idle) 81 + mfspr r3, SPRN_PWRMGTCR0 82 + 83 + /* Enable Altivec Idle */ 84 + oris r3, r3, PWRMGTCR0_AV_IDLE_PD_EN@h 85 + li r11, AV_WAIT_IDLE_BIT 86 + 87 + /* Set Automatic AltiVec Idle Count */ 88 + rlwimi r3, r11, PWRMGTCR0_AV_IDLE_CNT_SHIFT, PWRMGTCR0_AV_IDLE_CNT 89 + 90 + mtspr SPRN_PWRMGTCR0, r3 91 + 92 + blr 93 + 56 94 _GLOBAL(__setup_cpu_e6500) 57 95 mflr r6 58 96 #ifdef CONFIG_PPC64 59 97 bl .setup_altivec_ivors 98 + /* Touch IVOR42 only if the CPU supports E.HV category */ 99 + mfspr r10,SPRN_MMUCFG 100 + rlwinm. r10,r10,0,MMUCFG_LPIDSIZE 101 + beq 1f 102 + bl .setup_lrat_ivor 103 + 1: 60 104 #endif 105 + bl setup_pw20_idle 106 + bl setup_altivec_idle 61 107 bl __setup_cpu_e5500 62 108 mtlr r6 63 109 blr ··· 165 119 _GLOBAL(__restore_cpu_e6500) 166 120 mflr r5 167 121 bl .setup_altivec_ivors 122 + /* Touch IVOR42 only if the CPU supports E.HV category */ 123 + mfspr r10,SPRN_MMUCFG 124 + rlwinm. r10,r10,0,MMUCFG_LPIDSIZE 125 + beq 1f 126 + bl .setup_lrat_ivor 127 + 1: 128 + bl .setup_pw20_idle 129 + bl .setup_altivec_idle 168 130 bl __restore_cpu_e5500 169 131 mtlr r5 170 132 blr
+27 -11
arch/powerpc/kernel/cpu_setup_power.S
··· 29 29 mtspr SPRN_LPID,r0 30 30 mfspr r3,SPRN_LPCR 31 31 bl __init_LPCR 32 - bl __init_TLB 32 + bl __init_tlb_power7 33 33 mtlr r11 34 34 blr 35 35 ··· 42 42 mtspr SPRN_LPID,r0 43 43 mfspr r3,SPRN_LPCR 44 44 bl __init_LPCR 45 - bl __init_TLB 45 + bl __init_tlb_power7 46 46 mtlr r11 47 47 blr 48 48 ··· 59 59 oris r3, r3, LPCR_AIL_3@h 60 60 bl __init_LPCR 61 61 bl __init_HFSCR 62 - bl __init_TLB 62 + bl __init_tlb_power8 63 63 bl __init_PMU_HV 64 64 mtlr r11 65 65 blr ··· 78 78 oris r3, r3, LPCR_AIL_3@h 79 79 bl __init_LPCR 80 80 bl __init_HFSCR 81 - bl __init_TLB 81 + bl __init_tlb_power8 82 82 bl __init_PMU_HV 83 83 mtlr r11 84 84 blr ··· 134 134 mtspr SPRN_HFSCR,r3 135 135 blr 136 136 137 - __init_TLB: 138 - /* 139 - * Clear the TLB using the "IS 3" form of tlbiel instruction 140 - * (invalidate by congruence class). P7 has 128 CCs, P8 has 512 141 - * so we just always do 512 142 - */ 137 + /* 138 + * Clear the TLB using the specified IS form of tlbiel instruction 139 + * (invalidate by congruence class). P7 has 128 CCs., P8 has 512. 140 + * 141 + * r3 = IS field 142 + */ 143 + __init_tlb_power7: 144 + li r3,0xc00 /* IS field = 0b11 */ 145 + _GLOBAL(__flush_tlb_power7) 146 + li r6,128 147 + mtctr r6 148 + mr r7,r3 /* IS field */ 149 + ptesync 150 + 2: tlbiel r7 151 + addi r7,r7,0x1000 152 + bdnz 2b 153 + ptesync 154 + 1: blr 155 + 156 + __init_tlb_power8: 157 + li r3,0xc00 /* IS field = 0b11 */ 158 + _GLOBAL(__flush_tlb_power8) 143 159 li r6,512 144 160 mtctr r6 145 - li r7,0xc00 /* IS field = 0b11 */ 161 + mr r7,r3 /* IS field */ 146 162 ptesync 147 163 2: tlbiel r7 148 164 addi r7,r7,0x1000
+16
arch/powerpc/kernel/cputable.c
··· 71 71 extern void __setup_cpu_power8(unsigned long offset, struct cpu_spec* spec); 72 72 extern void __restore_cpu_power8(void); 73 73 extern void __restore_cpu_a2(void); 74 + extern void __flush_tlb_power7(unsigned long inval_selector); 75 + extern void __flush_tlb_power8(unsigned long inval_selector); 76 + extern long __machine_check_early_realmode_p7(struct pt_regs *regs); 77 + extern long __machine_check_early_realmode_p8(struct pt_regs *regs); 74 78 #endif /* CONFIG_PPC64 */ 75 79 #if defined(CONFIG_E500) 76 80 extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec); ··· 444 440 .oprofile_cpu_type = "ppc64/ibm-compat-v1", 445 441 .cpu_setup = __setup_cpu_power7, 446 442 .cpu_restore = __restore_cpu_power7, 443 + .flush_tlb = __flush_tlb_power7, 444 + .machine_check_early = __machine_check_early_realmode_p7, 447 445 .platform = "power7", 448 446 }, 449 447 { /* 2.07-compliant processor, i.e. Power8 "architected" mode */ ··· 462 456 .oprofile_cpu_type = "ppc64/ibm-compat-v1", 463 457 .cpu_setup = __setup_cpu_power8, 464 458 .cpu_restore = __restore_cpu_power8, 459 + .flush_tlb = __flush_tlb_power8, 460 + .machine_check_early = __machine_check_early_realmode_p8, 465 461 .platform = "power8", 466 462 }, 467 463 { /* Power7 */ ··· 482 474 .oprofile_type = PPC_OPROFILE_POWER4, 483 475 .cpu_setup = __setup_cpu_power7, 484 476 .cpu_restore = __restore_cpu_power7, 477 + .flush_tlb = __flush_tlb_power7, 478 + .machine_check_early = __machine_check_early_realmode_p7, 485 479 .platform = "power7", 486 480 }, 487 481 { /* Power7+ */ ··· 502 492 .oprofile_type = PPC_OPROFILE_POWER4, 503 493 .cpu_setup = __setup_cpu_power7, 504 494 .cpu_restore = __restore_cpu_power7, 495 + .flush_tlb = __flush_tlb_power7, 496 + .machine_check_early = __machine_check_early_realmode_p7, 505 497 .platform = "power7+", 506 498 }, 507 499 { /* Power8E */ ··· 522 510 .oprofile_type = PPC_OPROFILE_INVALID, 523 511 .cpu_setup = __setup_cpu_power8, 524 512 .cpu_restore = __restore_cpu_power8, 513 + .flush_tlb = __flush_tlb_power8, 514 + .machine_check_early = __machine_check_early_realmode_p8, 525 515 .platform = "power8", 526 516 }, 527 517 { /* Power8 */ ··· 542 528 .oprofile_type = PPC_OPROFILE_INVALID, 543 529 .cpu_setup = __setup_cpu_power8, 544 530 .cpu_restore = __restore_cpu_power8, 531 + .flush_tlb = __flush_tlb_power8, 532 + .machine_check_early = __machine_check_early_realmode_p8, 545 533 .platform = "power8", 546 534 }, 547 535 { /* Cell Broadband Engine */
-1
arch/powerpc/kernel/crash.c
··· 17 17 #include <linux/export.h> 18 18 #include <linux/crash_dump.h> 19 19 #include <linux/delay.h> 20 - #include <linux/init.h> 21 20 #include <linux/irq.h> 22 21 #include <linux/types.h> 23 22
+2 -2
arch/powerpc/kernel/dma-iommu.c
··· 83 83 return 0; 84 84 } 85 85 86 - if (tbl->it_offset > (mask >> IOMMU_PAGE_SHIFT)) { 86 + if (tbl->it_offset > (mask >> tbl->it_page_shift)) { 87 87 dev_info(dev, "Warning: IOMMU offset too big for device mask\n"); 88 88 dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n", 89 - mask, tbl->it_offset << IOMMU_PAGE_SHIFT); 89 + mask, tbl->it_offset << tbl->it_page_shift); 90 90 return 0; 91 91 } else 92 92 return 1;
+16 -1
arch/powerpc/kernel/eeh.c
··· 84 84 #define EEH_MAX_FAILS 2100000 85 85 86 86 /* Time to wait for a PCI slot to report status, in milliseconds */ 87 - #define PCI_BUS_RESET_WAIT_MSEC (60*1000) 87 + #define PCI_BUS_RESET_WAIT_MSEC (5*60*1000) 88 88 89 89 /* Platform dependent EEH operations */ 90 90 struct eeh_ops *eeh_ops = NULL; ··· 921 921 eeh_sysfs_remove_device(edev->pdev); 922 922 edev->mode &= ~EEH_DEV_SYSFS; 923 923 924 + /* 925 + * We definitely should have the PCI device removed 926 + * though it wasn't correctly. So we needn't call 927 + * into error handler afterwards. 928 + */ 929 + edev->mode |= EEH_DEV_NO_HANDLER; 930 + 924 931 edev->pdev = NULL; 925 932 dev->dev.archdata.edev = NULL; 926 933 } ··· 1029 1022 eeh_rmv_from_parent_pe(edev); 1030 1023 else 1031 1024 edev->mode |= EEH_DEV_DISCONNECTED; 1025 + 1026 + /* 1027 + * We're removing from the PCI subsystem, that means 1028 + * the PCI device driver can't support EEH or not 1029 + * well. So we rely on hotplug completely to do recovery 1030 + * for the specific PCI device. 1031 + */ 1032 + edev->mode |= EEH_DEV_NO_HANDLER; 1032 1033 1033 1034 eeh_addr_cache_rmv_dev(dev); 1034 1035 eeh_sysfs_remove_device(dev);
+86 -76
arch/powerpc/kernel/eeh_driver.c
··· 217 217 if (!driver) return NULL; 218 218 219 219 if (!driver->err_handler || 220 - !driver->err_handler->mmio_enabled) { 220 + !driver->err_handler->mmio_enabled || 221 + (edev->mode & EEH_DEV_NO_HANDLER)) { 221 222 eeh_pcid_put(dev); 222 223 return NULL; 223 224 } ··· 259 258 eeh_enable_irq(dev); 260 259 261 260 if (!driver->err_handler || 262 - !driver->err_handler->slot_reset) { 261 + !driver->err_handler->slot_reset || 262 + (edev->mode & EEH_DEV_NO_HANDLER)) { 263 263 eeh_pcid_put(dev); 264 264 return NULL; 265 265 } ··· 299 297 eeh_enable_irq(dev); 300 298 301 299 if (!driver->err_handler || 302 - !driver->err_handler->resume) { 300 + !driver->err_handler->resume || 301 + (edev->mode & EEH_DEV_NO_HANDLER)) { 302 + edev->mode &= ~EEH_DEV_NO_HANDLER; 303 303 eeh_pcid_put(dev); 304 304 return NULL; 305 305 } ··· 480 476 /* The longest amount of time to wait for a pci device 481 477 * to come back on line, in seconds. 482 478 */ 483 - #define MAX_WAIT_FOR_RECOVERY 150 479 + #define MAX_WAIT_FOR_RECOVERY 300 484 480 485 481 static void eeh_handle_normal_event(struct eeh_pe *pe) 486 482 { ··· 641 637 { 642 638 struct eeh_pe *pe, *phb_pe; 643 639 struct pci_bus *bus; 644 - struct pci_controller *hose, *tmp; 640 + struct pci_controller *hose; 645 641 unsigned long flags; 646 - int rc = 0; 642 + int rc; 647 643 648 - /* 649 - * The return value from next_error() has been classified as follows. 650 - * It might be good to enumerate them. However, next_error() is only 651 - * supported by PowerNV platform for now. So it would be fine to use 652 - * integer directly: 653 - * 654 - * 4 - Dead IOC 3 - Dead PHB 655 - * 2 - Fenced PHB 1 - Frozen PE 656 - * 0 - No error found 657 - * 658 - */ 659 - rc = eeh_ops->next_error(&pe); 660 - if (rc <= 0) 661 - return; 662 644 663 - switch (rc) { 664 - case 4: 665 - /* Mark all PHBs in dead state */ 666 - eeh_serialize_lock(&flags); 667 - list_for_each_entry_safe(hose, tmp, 668 - &hose_list, list_node) { 669 - phb_pe = eeh_phb_pe_get(hose); 670 - if (!phb_pe) continue; 645 + do { 646 + rc = eeh_ops->next_error(&pe); 671 647 672 - eeh_pe_state_mark(phb_pe, 673 - EEH_PE_ISOLATED | EEH_PE_PHB_DEAD); 648 + switch (rc) { 649 + case EEH_NEXT_ERR_DEAD_IOC: 650 + /* Mark all PHBs in dead state */ 651 + eeh_serialize_lock(&flags); 652 + 653 + /* Purge all events */ 654 + eeh_remove_event(NULL); 655 + 656 + list_for_each_entry(hose, &hose_list, list_node) { 657 + phb_pe = eeh_phb_pe_get(hose); 658 + if (!phb_pe) continue; 659 + 660 + eeh_pe_state_mark(phb_pe, 661 + EEH_PE_ISOLATED | EEH_PE_PHB_DEAD); 662 + } 663 + 664 + eeh_serialize_unlock(flags); 665 + 666 + break; 667 + case EEH_NEXT_ERR_FROZEN_PE: 668 + case EEH_NEXT_ERR_FENCED_PHB: 669 + case EEH_NEXT_ERR_DEAD_PHB: 670 + /* Mark the PE in fenced state */ 671 + eeh_serialize_lock(&flags); 672 + 673 + /* Purge all events of the PHB */ 674 + eeh_remove_event(pe); 675 + 676 + if (rc == EEH_NEXT_ERR_DEAD_PHB) 677 + eeh_pe_state_mark(pe, 678 + EEH_PE_ISOLATED | EEH_PE_PHB_DEAD); 679 + else 680 + eeh_pe_state_mark(pe, 681 + EEH_PE_ISOLATED | EEH_PE_RECOVERING); 682 + 683 + eeh_serialize_unlock(flags); 684 + 685 + break; 686 + case EEH_NEXT_ERR_NONE: 687 + return; 688 + default: 689 + pr_warn("%s: Invalid value %d from next_error()\n", 690 + __func__, rc); 691 + return; 674 692 } 675 - eeh_serialize_unlock(flags); 676 693 677 - /* Purge all events */ 678 - eeh_remove_event(NULL); 679 - break; 680 - case 3: 681 - case 2: 682 - case 1: 683 - /* Mark the PE in fenced state */ 684 - eeh_serialize_lock(&flags); 685 - if (rc == 3) 686 - eeh_pe_state_mark(pe, 687 - EEH_PE_ISOLATED | EEH_PE_PHB_DEAD); 688 - else 689 - eeh_pe_state_mark(pe, 690 - EEH_PE_ISOLATED | EEH_PE_RECOVERING); 691 - eeh_serialize_unlock(flags); 694 + /* 695 + * For fenced PHB and frozen PE, it's handled as normal 696 + * event. We have to remove the affected PHBs for dead 697 + * PHB and IOC 698 + */ 699 + if (rc == EEH_NEXT_ERR_FROZEN_PE || 700 + rc == EEH_NEXT_ERR_FENCED_PHB) { 701 + eeh_handle_normal_event(pe); 702 + } else { 703 + pci_lock_rescan_remove(); 704 + list_for_each_entry(hose, &hose_list, list_node) { 705 + phb_pe = eeh_phb_pe_get(hose); 706 + if (!phb_pe || 707 + !(phb_pe->state & EEH_PE_PHB_DEAD)) 708 + continue; 692 709 693 - /* Purge all events of the PHB */ 694 - eeh_remove_event(pe); 695 - break; 696 - default: 697 - pr_err("%s: Invalid value %d from next_error()\n", 698 - __func__, rc); 699 - return; 700 - } 701 - 702 - /* 703 - * For fenced PHB and frozen PE, it's handled as normal 704 - * event. We have to remove the affected PHBs for dead 705 - * PHB and IOC 706 - */ 707 - if (rc == 2 || rc == 1) 708 - eeh_handle_normal_event(pe); 709 - else { 710 - pci_lock_rescan_remove(); 711 - list_for_each_entry_safe(hose, tmp, 712 - &hose_list, list_node) { 713 - phb_pe = eeh_phb_pe_get(hose); 714 - if (!phb_pe || !(phb_pe->state & EEH_PE_PHB_DEAD)) 715 - continue; 716 - 717 - bus = eeh_pe_bus_get(phb_pe); 718 - /* Notify all devices that they're about to go down. */ 719 - eeh_pe_dev_traverse(pe, eeh_report_failure, NULL); 720 - pcibios_remove_pci_devices(bus); 710 + /* Notify all devices to be down */ 711 + bus = eeh_pe_bus_get(phb_pe); 712 + eeh_pe_dev_traverse(pe, 713 + eeh_report_failure, NULL); 714 + pcibios_remove_pci_devices(bus); 715 + } 716 + pci_unlock_rescan_remove(); 721 717 } 722 - pci_unlock_rescan_remove(); 723 - } 718 + 719 + /* 720 + * If we have detected dead IOC, we needn't proceed 721 + * any more since all PHBs would have been removed 722 + */ 723 + if (rc == EEH_NEXT_ERR_DEAD_IOC) 724 + break; 725 + } while (rc != EEH_NEXT_ERR_NONE); 724 726 } 725 727 726 728 /**
+3 -1
arch/powerpc/kernel/eeh_pe.c
··· 25 25 #include <linux/delay.h> 26 26 #include <linux/export.h> 27 27 #include <linux/gfp.h> 28 - #include <linux/init.h> 29 28 #include <linux/kernel.h> 30 29 #include <linux/pci.h> 31 30 #include <linux/string.h> ··· 735 736 eeh_restore_bridge_bars(edev, dn); 736 737 else 737 738 eeh_restore_device_bars(edev, dn); 739 + 740 + if (eeh_ops->restore_config) 741 + eeh_ops->restore_config(dn); 738 742 739 743 return NULL; 740 744 }
+10 -2
arch/powerpc/kernel/entry_64.S
··· 664 664 bl .restore_interrupts 665 665 SCHEDULE_USER 666 666 b .ret_from_except_lite 667 - 668 - 2: bl .save_nvgprs 667 + 2: 668 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 669 + andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM 670 + bne 3f /* only restore TM if nothing else to do */ 671 + addi r3,r1,STACK_FRAME_OVERHEAD 672 + bl .restore_tm_state 673 + b restore 674 + 3: 675 + #endif 676 + bl .save_nvgprs 669 677 bl .restore_interrupts 670 678 addi r3,r1,STACK_FRAME_OVERHEAD 671 679 bl .do_notify_resume
+19 -8
arch/powerpc/kernel/exceptions-64e.S
··· 308 308 EXCEPTION_STUB(0x2e0, guest_doorbell_crit) 309 309 EXCEPTION_STUB(0x300, hypercall) 310 310 EXCEPTION_STUB(0x320, ehpriv) 311 + EXCEPTION_STUB(0x340, lrat_error) 311 312 312 313 .globl interrupt_end_book3e 313 314 interrupt_end_book3e: ··· 678 677 bl .unknown_exception 679 678 b .ret_from_except 680 679 680 + /* LRAT Error interrupt */ 681 + START_EXCEPTION(lrat_error); 682 + NORMAL_EXCEPTION_PROLOG(0x340, BOOKE_INTERRUPT_LRAT_ERROR, 683 + PROLOG_ADDITION_NONE) 684 + EXCEPTION_COMMON(0x340, PACA_EXGEN, INTS_KEEP) 685 + addi r3,r1,STACK_FRAME_OVERHEAD 686 + bl .save_nvgprs 687 + INTS_RESTORE_HARD 688 + bl .unknown_exception 689 + b .ret_from_except 690 + 681 691 /* 682 692 * An interrupt came in while soft-disabled; We mark paca->irq_happened 683 693 * accordingly and if the interrupt is level sensitive, we hard disable ··· 871 859 BAD_STACK_TRAMPOLINE(0x300) 872 860 BAD_STACK_TRAMPOLINE(0x310) 873 861 BAD_STACK_TRAMPOLINE(0x320) 862 + BAD_STACK_TRAMPOLINE(0x340) 874 863 BAD_STACK_TRAMPOLINE(0x400) 875 864 BAD_STACK_TRAMPOLINE(0x500) 876 865 BAD_STACK_TRAMPOLINE(0x600) ··· 1068 1055 mtspr SPRN_MAS0,r3 1069 1056 tlbre 1070 1057 mfspr r6,SPRN_MAS1 1071 - rlwinm r6,r6,0,2,0 /* clear IPROT */ 1058 + rlwinm r6,r6,0,2,31 /* clear IPROT and VALID */ 1072 1059 mtspr SPRN_MAS1,r6 1073 1060 tlbwe 1074 - 1075 - /* Invalidate TLB1 */ 1076 - PPC_TLBILX_ALL(0,R0) 1077 1061 sync 1078 1062 isync 1079 1063 ··· 1124 1114 mtspr SPRN_MAS0,r4 1125 1115 tlbre 1126 1116 mfspr r5,SPRN_MAS1 1127 - rlwinm r5,r5,0,2,0 /* clear IPROT */ 1117 + rlwinm r5,r5,0,2,31 /* clear IPROT and VALID */ 1128 1118 mtspr SPRN_MAS1,r5 1129 1119 tlbwe 1130 - 1131 - /* Invalidate TLB1 */ 1132 - PPC_TLBILX_ALL(0,R0) 1133 1120 sync 1134 1121 isync 1135 1122 ··· 1420 1413 SET_IVOR(41, 0x320) /* Embedded Hypervisor Privilege */ 1421 1414 SET_IVOR(38, 0x2c0) /* Guest Processor Doorbell */ 1422 1415 SET_IVOR(39, 0x2e0) /* Guest Processor Doorbell Crit/MC */ 1416 + blr 1417 + 1418 + _GLOBAL(setup_lrat_ivor) 1419 + SET_IVOR(42, 0x340) /* LRAT Error */ 1423 1420 blr
+218 -24
arch/powerpc/kernel/exceptions-64s.S
··· 155 155 */ 156 156 HMT_MEDIUM_PPR_DISCARD 157 157 SET_SCRATCH0(r13) /* save r13 */ 158 + #ifdef CONFIG_PPC_P7_NAP 159 + BEGIN_FTR_SECTION 160 + /* Running native on arch 2.06 or later, check if we are 161 + * waking up from nap. We only handle no state loss and 162 + * supervisor state loss. We do -not- handle hypervisor 163 + * state loss at this time. 164 + */ 165 + mfspr r13,SPRN_SRR1 166 + rlwinm. r13,r13,47-31,30,31 167 + beq 9f 168 + 169 + /* waking up from powersave (nap) state */ 170 + cmpwi cr1,r13,2 171 + /* Total loss of HV state is fatal. let's just stay stuck here */ 172 + bgt cr1,. 173 + 9: 174 + END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 175 + #endif /* CONFIG_PPC_P7_NAP */ 158 176 EXCEPTION_PROLOG_0(PACA_EXMC) 177 + BEGIN_FTR_SECTION 178 + b machine_check_pSeries_early 179 + FTR_SECTION_ELSE 159 180 b machine_check_pSeries_0 181 + ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE) 160 182 161 183 . = 0x300 162 184 .globl data_access_pSeries ··· 427 405 428 406 .align 7 429 407 /* moved from 0x200 */ 408 + machine_check_pSeries_early: 409 + BEGIN_FTR_SECTION 410 + EXCEPTION_PROLOG_1(PACA_EXMC, NOTEST, 0x200) 411 + /* 412 + * Register contents: 413 + * R13 = PACA 414 + * R9 = CR 415 + * Original R9 to R13 is saved on PACA_EXMC 416 + * 417 + * Switch to mc_emergency stack and handle re-entrancy (though we 418 + * currently don't test for overflow). Save MCE registers srr1, 419 + * srr0, dar and dsisr and then set ME=1 420 + * 421 + * We use paca->in_mce to check whether this is the first entry or 422 + * nested machine check. We increment paca->in_mce to track nested 423 + * machine checks. 424 + * 425 + * If this is the first entry then set stack pointer to 426 + * paca->mc_emergency_sp, otherwise r1 is already pointing to 427 + * stack frame on mc_emergency stack. 428 + * 429 + * NOTE: We are here with MSR_ME=0 (off), which means we risk a 430 + * checkstop if we get another machine check exception before we do 431 + * rfid with MSR_ME=1. 432 + */ 433 + mr r11,r1 /* Save r1 */ 434 + lhz r10,PACA_IN_MCE(r13) 435 + cmpwi r10,0 /* Are we in nested machine check */ 436 + bne 0f /* Yes, we are. */ 437 + /* First machine check entry */ 438 + ld r1,PACAMCEMERGSP(r13) /* Use MC emergency stack */ 439 + 0: subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ 440 + addi r10,r10,1 /* increment paca->in_mce */ 441 + sth r10,PACA_IN_MCE(r13) 442 + std r11,GPR1(r1) /* Save r1 on the stack. */ 443 + std r11,0(r1) /* make stack chain pointer */ 444 + mfspr r11,SPRN_SRR0 /* Save SRR0 */ 445 + std r11,_NIP(r1) 446 + mfspr r11,SPRN_SRR1 /* Save SRR1 */ 447 + std r11,_MSR(r1) 448 + mfspr r11,SPRN_DAR /* Save DAR */ 449 + std r11,_DAR(r1) 450 + mfspr r11,SPRN_DSISR /* Save DSISR */ 451 + std r11,_DSISR(r1) 452 + std r9,_CCR(r1) /* Save CR in stackframe */ 453 + /* Save r9 through r13 from EXMC save area to stack frame. */ 454 + EXCEPTION_PROLOG_COMMON_2(PACA_EXMC) 455 + mfmsr r11 /* get MSR value */ 456 + ori r11,r11,MSR_ME /* turn on ME bit */ 457 + ori r11,r11,MSR_RI /* turn on RI bit */ 458 + ld r12,PACAKBASE(r13) /* get high part of &label */ 459 + LOAD_HANDLER(r12, machine_check_handle_early) 460 + mtspr SPRN_SRR0,r12 461 + mtspr SPRN_SRR1,r11 462 + rfid 463 + b . /* prevent speculative execution */ 464 + END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 465 + 430 466 machine_check_pSeries: 431 467 .globl machine_check_fwnmi 432 468 machine_check_fwnmi: ··· 767 687 /*** Common interrupt handlers ***/ 768 688 769 689 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) 770 - 771 - /* 772 - * Machine check is different because we use a different 773 - * save area: PACA_EXMC instead of PACA_EXGEN. 774 - */ 775 - .align 7 776 - .globl machine_check_common 777 - machine_check_common: 778 - 779 - mfspr r10,SPRN_DAR 780 - std r10,PACA_EXGEN+EX_DAR(r13) 781 - mfspr r10,SPRN_DSISR 782 - stw r10,PACA_EXGEN+EX_DSISR(r13) 783 - EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) 784 - FINISH_NAP 785 - DISABLE_INTS 786 - ld r3,PACA_EXGEN+EX_DAR(r13) 787 - lwz r4,PACA_EXGEN+EX_DSISR(r13) 788 - std r3,_DAR(r1) 789 - std r4,_DSISR(r1) 790 - bl .save_nvgprs 791 - addi r3,r1,STACK_FRAME_OVERHEAD 792 - bl .machine_check_exception 793 - b .ret_from_except 794 690 795 691 STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ) 796 692 STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt) ··· 1136 1080 #endif /* __DISABLED__ */ 1137 1081 1138 1082 1083 + /* 1084 + * Machine check is different because we use a different 1085 + * save area: PACA_EXMC instead of PACA_EXGEN. 1086 + */ 1087 + .align 7 1088 + .globl machine_check_common 1089 + machine_check_common: 1090 + 1091 + mfspr r10,SPRN_DAR 1092 + std r10,PACA_EXGEN+EX_DAR(r13) 1093 + mfspr r10,SPRN_DSISR 1094 + stw r10,PACA_EXGEN+EX_DSISR(r13) 1095 + EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) 1096 + FINISH_NAP 1097 + DISABLE_INTS 1098 + ld r3,PACA_EXGEN+EX_DAR(r13) 1099 + lwz r4,PACA_EXGEN+EX_DSISR(r13) 1100 + std r3,_DAR(r1) 1101 + std r4,_DSISR(r1) 1102 + bl .save_nvgprs 1103 + addi r3,r1,STACK_FRAME_OVERHEAD 1104 + bl .machine_check_exception 1105 + b .ret_from_except 1106 + 1139 1107 .align 7 1140 1108 .globl alignment_common 1141 1109 alignment_common: ··· 1342 1262 b machine_check_pSeries 1343 1263 #endif /* CONFIG_PPC_POWERNV */ 1344 1264 1265 + 1266 + #define MACHINE_CHECK_HANDLER_WINDUP \ 1267 + /* Clear MSR_RI before setting SRR0 and SRR1. */\ 1268 + li r0,MSR_RI; \ 1269 + mfmsr r9; /* get MSR value */ \ 1270 + andc r9,r9,r0; \ 1271 + mtmsrd r9,1; /* Clear MSR_RI */ \ 1272 + /* Move original SRR0 and SRR1 into the respective regs */ \ 1273 + ld r9,_MSR(r1); \ 1274 + mtspr SPRN_SRR1,r9; \ 1275 + ld r3,_NIP(r1); \ 1276 + mtspr SPRN_SRR0,r3; \ 1277 + ld r9,_CTR(r1); \ 1278 + mtctr r9; \ 1279 + ld r9,_XER(r1); \ 1280 + mtxer r9; \ 1281 + ld r9,_LINK(r1); \ 1282 + mtlr r9; \ 1283 + REST_GPR(0, r1); \ 1284 + REST_8GPRS(2, r1); \ 1285 + REST_GPR(10, r1); \ 1286 + ld r11,_CCR(r1); \ 1287 + mtcr r11; \ 1288 + /* Decrement paca->in_mce. */ \ 1289 + lhz r12,PACA_IN_MCE(r13); \ 1290 + subi r12,r12,1; \ 1291 + sth r12,PACA_IN_MCE(r13); \ 1292 + REST_GPR(11, r1); \ 1293 + REST_2GPRS(12, r1); \ 1294 + /* restore original r1. */ \ 1295 + ld r1,GPR1(r1) 1296 + 1297 + /* 1298 + * Handle machine check early in real mode. We come here with 1299 + * ME=1, MMU (IR=0 and DR=0) off and using MC emergency stack. 1300 + */ 1301 + .align 7 1302 + .globl machine_check_handle_early 1303 + machine_check_handle_early: 1304 + std r0,GPR0(r1) /* Save r0 */ 1305 + EXCEPTION_PROLOG_COMMON_3(0x200) 1306 + bl .save_nvgprs 1307 + addi r3,r1,STACK_FRAME_OVERHEAD 1308 + bl .machine_check_early 1309 + ld r12,_MSR(r1) 1310 + #ifdef CONFIG_PPC_P7_NAP 1311 + /* 1312 + * Check if thread was in power saving mode. We come here when any 1313 + * of the following is true: 1314 + * a. thread wasn't in power saving mode 1315 + * b. thread was in power saving mode with no state loss or 1316 + * supervisor state loss 1317 + * 1318 + * Go back to nap again if (b) is true. 1319 + */ 1320 + rlwinm. r11,r12,47-31,30,31 /* Was it in power saving mode? */ 1321 + beq 4f /* No, it wasn;t */ 1322 + /* Thread was in power saving mode. Go back to nap again. */ 1323 + cmpwi r11,2 1324 + bne 3f 1325 + /* Supervisor state loss */ 1326 + li r0,1 1327 + stb r0,PACA_NAPSTATELOST(r13) 1328 + 3: bl .machine_check_queue_event 1329 + MACHINE_CHECK_HANDLER_WINDUP 1330 + GET_PACA(r13) 1331 + ld r1,PACAR1(r13) 1332 + b .power7_enter_nap_mode 1333 + 4: 1334 + #endif 1335 + /* 1336 + * Check if we are coming from hypervisor userspace. If yes then we 1337 + * continue in host kernel in V mode to deliver the MC event. 1338 + */ 1339 + rldicl. r11,r12,4,63 /* See if MC hit while in HV mode. */ 1340 + beq 5f 1341 + andi. r11,r12,MSR_PR /* See if coming from user. */ 1342 + bne 9f /* continue in V mode if we are. */ 1343 + 1344 + 5: 1345 + #ifdef CONFIG_KVM_BOOK3S_64_HV 1346 + /* 1347 + * We are coming from kernel context. Check if we are coming from 1348 + * guest. if yes, then we can continue. We will fall through 1349 + * do_kvm_200->kvmppc_interrupt to deliver the MC event to guest. 1350 + */ 1351 + lbz r11,HSTATE_IN_GUEST(r13) 1352 + cmpwi r11,0 /* Check if coming from guest */ 1353 + bne 9f /* continue if we are. */ 1354 + #endif 1355 + /* 1356 + * At this point we are not sure about what context we come from. 1357 + * Queue up the MCE event and return from the interrupt. 1358 + * But before that, check if this is an un-recoverable exception. 1359 + * If yes, then stay on emergency stack and panic. 1360 + */ 1361 + andi. r11,r12,MSR_RI 1362 + bne 2f 1363 + 1: addi r3,r1,STACK_FRAME_OVERHEAD 1364 + bl .unrecoverable_exception 1365 + b 1b 1366 + 2: 1367 + /* 1368 + * Return from MC interrupt. 1369 + * Queue up the MCE event so that we can log it later, while 1370 + * returning from kernel or opal call. 1371 + */ 1372 + bl .machine_check_queue_event 1373 + MACHINE_CHECK_HANDLER_WINDUP 1374 + rfid 1375 + 9: 1376 + /* Deliver the machine check to host kernel in V mode. */ 1377 + MACHINE_CHECK_HANDLER_WINDUP 1378 + b machine_check_pSeries 1345 1379 1346 1380 /* 1347 1381 * r13 points to the PACA, r9 contains the saved CR,
+16
arch/powerpc/kernel/fpu.S
··· 81 81 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 82 82 83 83 /* 84 + * Enable use of the FPU, and VSX if possible, for the caller. 85 + */ 86 + _GLOBAL(fp_enable) 87 + mfmsr r3 88 + ori r3,r3,MSR_FP 89 + #ifdef CONFIG_VSX 90 + BEGIN_FTR_SECTION 91 + oris r3,r3,MSR_VSX@h 92 + END_FTR_SECTION_IFSET(CPU_FTR_VSX) 93 + #endif 94 + SYNC 95 + MTMSRD(r3) 96 + isync /* (not necessary for arch 2.02 and later) */ 97 + blr 98 + 99 + /* 84 100 * Load state from memory into FP registers including FPSCR. 85 101 * Assumes the caller has enabled FP in the MSR. 86 102 */
+2
arch/powerpc/kernel/fsl_booke_entry_mapping.S
··· 176 176 /* 7. Jump to KERNELBASE mapping */ 177 177 lis r6,(KERNELBASE & ~0xfff)@h 178 178 ori r6,r6,(KERNELBASE & ~0xfff)@l 179 + rlwinm r7,r25,0,0x03ffffff 180 + add r6,r7,r6 179 181 180 182 #elif defined(ENTRY_MAPPING_KEXEC_SETUP) 181 183 /*
+1
arch/powerpc/kernel/head_64.S
··· 23 23 */ 24 24 25 25 #include <linux/threads.h> 26 + #include <linux/init.h> 26 27 #include <asm/reg.h> 27 28 #include <asm/page.h> 28 29 #include <asm/mmu.h>
+237 -31
arch/powerpc/kernel/head_fsl_booke.S
··· 65 65 nop 66 66 67 67 /* Translate device tree address to physical, save in r30/r31 */ 68 - mfmsr r16 69 - mfspr r17,SPRN_PID 70 - rlwinm r17,r17,16,0x3fff0000 /* turn PID into MAS6[SPID] */ 71 - rlwimi r17,r16,28,0x00000001 /* turn MSR[DS] into MAS6[SAS] */ 72 - mtspr SPRN_MAS6,r17 73 - 74 - tlbsx 0,r3 /* must succeed */ 75 - 76 - mfspr r16,SPRN_MAS1 77 - mfspr r20,SPRN_MAS3 78 - rlwinm r17,r16,25,0x1f /* r17 = log2(page size) */ 79 - li r18,1024 80 - slw r18,r18,r17 /* r18 = page size */ 81 - addi r18,r18,-1 82 - and r19,r3,r18 /* r19 = page offset */ 83 - andc r31,r20,r18 /* r31 = page base */ 84 - or r31,r31,r19 /* r31 = devtree phys addr */ 85 - mfspr r30,SPRN_MAS7 68 + bl get_phys_addr 69 + mr r30,r3 70 + mr r31,r4 86 71 87 72 li r25,0 /* phys kernel start (low) */ 88 73 li r24,0 /* CPU number */ 89 74 li r23,0 /* phys kernel start (high) */ 75 + 76 + #ifdef CONFIG_RELOCATABLE 77 + LOAD_REG_ADDR_PIC(r3, _stext) /* Get our current runtime base */ 78 + 79 + /* Translate _stext address to physical, save in r23/r25 */ 80 + bl get_phys_addr 81 + mr r23,r3 82 + mr r25,r4 83 + 84 + bl 0f 85 + 0: mflr r8 86 + addis r3,r8,(is_second_reloc - 0b)@ha 87 + lwz r19,(is_second_reloc - 0b)@l(r3) 88 + 89 + /* Check if this is the second relocation. */ 90 + cmpwi r19,1 91 + bne 1f 92 + 93 + /* 94 + * For the second relocation, we already get the real memstart_addr 95 + * from device tree. So we will map PAGE_OFFSET to memstart_addr, 96 + * then the virtual address of start kernel should be: 97 + * PAGE_OFFSET + (kernstart_addr - memstart_addr) 98 + * Since the offset between kernstart_addr and memstart_addr should 99 + * never be beyond 1G, so we can just use the lower 32bit of them 100 + * for the calculation. 101 + */ 102 + lis r3,PAGE_OFFSET@h 103 + 104 + addis r4,r8,(kernstart_addr - 0b)@ha 105 + addi r4,r4,(kernstart_addr - 0b)@l 106 + lwz r5,4(r4) 107 + 108 + addis r6,r8,(memstart_addr - 0b)@ha 109 + addi r6,r6,(memstart_addr - 0b)@l 110 + lwz r7,4(r6) 111 + 112 + subf r5,r7,r5 113 + add r3,r3,r5 114 + b 2f 115 + 116 + 1: 117 + /* 118 + * We have the runtime (virutal) address of our base. 119 + * We calculate our shift of offset from a 64M page. 120 + * We could map the 64M page we belong to at PAGE_OFFSET and 121 + * get going from there. 122 + */ 123 + lis r4,KERNELBASE@h 124 + ori r4,r4,KERNELBASE@l 125 + rlwinm r6,r25,0,0x3ffffff /* r6 = PHYS_START % 64M */ 126 + rlwinm r5,r4,0,0x3ffffff /* r5 = KERNELBASE % 64M */ 127 + subf r3,r5,r6 /* r3 = r6 - r5 */ 128 + add r3,r4,r3 /* Required Virtual Address */ 129 + 130 + 2: bl relocate 131 + 132 + /* 133 + * For the second relocation, we already set the right tlb entries 134 + * for the kernel space, so skip the code in fsl_booke_entry_mapping.S 135 + */ 136 + cmpwi r19,1 137 + beq set_ivor 138 + #endif 90 139 91 140 /* We try to not make any assumptions about how the boot loader 92 141 * setup or used the TLBs. We invalidate all mappings from the ··· 162 113 #include "fsl_booke_entry_mapping.S" 163 114 #undef ENTRY_MAPPING_BOOT_SETUP 164 115 116 + set_ivor: 165 117 /* Establish the interrupt vector offsets */ 166 118 SET_IVOR(0, CriticalInput); 167 119 SET_IVOR(1, MachineCheck); ··· 216 166 /* Check to see if we're the second processor, and jump 217 167 * to the secondary_start code if so 218 168 */ 219 - lis r24, boot_cpuid@h 220 - ori r24, r24, boot_cpuid@l 169 + LOAD_REG_ADDR_PIC(r24, boot_cpuid) 221 170 lwz r24, 0(r24) 222 171 cmpwi r24, -1 223 172 mfspr r24,SPRN_PIR ··· 245 196 stw r24, TI_CPU(r22) 246 197 247 198 bl early_init 199 + 200 + #ifdef CONFIG_RELOCATABLE 201 + mr r3,r30 202 + mr r4,r31 203 + #ifdef CONFIG_PHYS_64BIT 204 + mr r5,r23 205 + mr r6,r25 206 + #else 207 + mr r5,r25 208 + #endif 209 + bl relocate_init 210 + #endif 248 211 249 212 #ifdef CONFIG_DYNAMIC_MEMSTART 250 213 lis r3,kernstart_addr@ha ··· 917 856 #endif /* CONFIG_SPE */ 918 857 919 858 /* 859 + * Translate the effec addr in r3 to phys addr. The phys addr will be put 860 + * into r3(higher 32bit) and r4(lower 32bit) 861 + */ 862 + get_phys_addr: 863 + mfmsr r8 864 + mfspr r9,SPRN_PID 865 + rlwinm r9,r9,16,0x3fff0000 /* turn PID into MAS6[SPID] */ 866 + rlwimi r9,r8,28,0x00000001 /* turn MSR[DS] into MAS6[SAS] */ 867 + mtspr SPRN_MAS6,r9 868 + 869 + tlbsx 0,r3 /* must succeed */ 870 + 871 + mfspr r8,SPRN_MAS1 872 + mfspr r12,SPRN_MAS3 873 + rlwinm r9,r8,25,0x1f /* r9 = log2(page size) */ 874 + li r10,1024 875 + slw r10,r10,r9 /* r10 = page size */ 876 + addi r10,r10,-1 877 + and r11,r3,r10 /* r11 = page offset */ 878 + andc r4,r12,r10 /* r4 = page base */ 879 + or r4,r4,r11 /* r4 = devtree phys addr */ 880 + #ifdef CONFIG_PHYS_64BIT 881 + mfspr r3,SPRN_MAS7 882 + #endif 883 + blr 884 + 885 + /* 920 886 * Global functions 921 887 */ 922 888 ··· 1145 1057 /* When we get here, r24 needs to hold the CPU # */ 1146 1058 .globl __secondary_start 1147 1059 __secondary_start: 1060 + LOAD_REG_ADDR_PIC(r3, tlbcam_index) 1061 + lwz r3,0(r3) 1062 + mtctr r3 1063 + li r26,0 /* r26 safe? */ 1064 + 1065 + bl switch_to_as1 1066 + mr r27,r3 /* tlb entry */ 1067 + /* Load each CAM entry */ 1068 + 1: mr r3,r26 1069 + bl loadcam_entry 1070 + addi r26,r26,1 1071 + bdnz 1b 1072 + mr r3,r27 /* tlb entry */ 1073 + LOAD_REG_ADDR_PIC(r4, memstart_addr) 1074 + lwz r4,0(r4) 1075 + mr r5,r25 /* phys kernel start */ 1076 + rlwinm r5,r5,0,~0x3ffffff /* aligned 64M */ 1077 + subf r4,r5,r4 /* memstart_addr - phys kernel start */ 1078 + li r5,0 /* no device tree */ 1079 + li r6,0 /* not boot cpu */ 1080 + bl restore_to_as0 1081 + 1082 + 1148 1083 lis r3,__secondary_hold_acknowledge@h 1149 1084 ori r3,r3,__secondary_hold_acknowledge@l 1150 1085 stw r24,0(r3) ··· 1175 1064 li r3,0 1176 1065 mr r4,r24 /* Why? */ 1177 1066 bl call_setup_cpu 1178 - 1179 - lis r3,tlbcam_index@ha 1180 - lwz r3,tlbcam_index@l(r3) 1181 - mtctr r3 1182 - li r26,0 /* r26 safe? */ 1183 - 1184 - /* Load each CAM entry */ 1185 - 1: mr r3,r26 1186 - bl loadcam_entry 1187 - addi r26,r26,1 1188 - bdnz 1b 1189 1067 1190 1068 /* get current_thread_info and current */ 1191 1069 lis r1,secondary_ti@ha ··· 1209 1109 __secondary_hold_acknowledge: 1210 1110 .long -1 1211 1111 #endif 1112 + 1113 + /* 1114 + * Create a tlb entry with the same effective and physical address as 1115 + * the tlb entry used by the current running code. But set the TS to 1. 1116 + * Then switch to the address space 1. It will return with the r3 set to 1117 + * the ESEL of the new created tlb. 1118 + */ 1119 + _GLOBAL(switch_to_as1) 1120 + mflr r5 1121 + 1122 + /* Find a entry not used */ 1123 + mfspr r3,SPRN_TLB1CFG 1124 + andi. r3,r3,0xfff 1125 + mfspr r4,SPRN_PID 1126 + rlwinm r4,r4,16,0x3fff0000 /* turn PID into MAS6[SPID] */ 1127 + mtspr SPRN_MAS6,r4 1128 + 1: lis r4,0x1000 /* Set MAS0(TLBSEL) = 1 */ 1129 + addi r3,r3,-1 1130 + rlwimi r4,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ 1131 + mtspr SPRN_MAS0,r4 1132 + tlbre 1133 + mfspr r4,SPRN_MAS1 1134 + andis. r4,r4,MAS1_VALID@h 1135 + bne 1b 1136 + 1137 + /* Get the tlb entry used by the current running code */ 1138 + bl 0f 1139 + 0: mflr r4 1140 + tlbsx 0,r4 1141 + 1142 + mfspr r4,SPRN_MAS1 1143 + ori r4,r4,MAS1_TS /* Set the TS = 1 */ 1144 + mtspr SPRN_MAS1,r4 1145 + 1146 + mfspr r4,SPRN_MAS0 1147 + rlwinm r4,r4,0,~MAS0_ESEL_MASK 1148 + rlwimi r4,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ 1149 + mtspr SPRN_MAS0,r4 1150 + tlbwe 1151 + isync 1152 + sync 1153 + 1154 + mfmsr r4 1155 + ori r4,r4,MSR_IS | MSR_DS 1156 + mtspr SPRN_SRR0,r5 1157 + mtspr SPRN_SRR1,r4 1158 + sync 1159 + rfi 1160 + 1161 + /* 1162 + * Restore to the address space 0 and also invalidate the tlb entry created 1163 + * by switch_to_as1. 1164 + * r3 - the tlb entry which should be invalidated 1165 + * r4 - __pa(PAGE_OFFSET in AS1) - __pa(PAGE_OFFSET in AS0) 1166 + * r5 - device tree virtual address. If r4 is 0, r5 is ignored. 1167 + * r6 - boot cpu 1168 + */ 1169 + _GLOBAL(restore_to_as0) 1170 + mflr r0 1171 + 1172 + bl 0f 1173 + 0: mflr r9 1174 + addi r9,r9,1f - 0b 1175 + 1176 + /* 1177 + * We may map the PAGE_OFFSET in AS0 to a different physical address, 1178 + * so we need calculate the right jump and device tree address based 1179 + * on the offset passed by r4. 1180 + */ 1181 + add r9,r9,r4 1182 + add r5,r5,r4 1183 + add r0,r0,r4 1184 + 1185 + 2: mfmsr r7 1186 + li r8,(MSR_IS | MSR_DS) 1187 + andc r7,r7,r8 1188 + 1189 + mtspr SPRN_SRR0,r9 1190 + mtspr SPRN_SRR1,r7 1191 + sync 1192 + rfi 1193 + 1194 + /* Invalidate the temporary tlb entry for AS1 */ 1195 + 1: lis r9,0x1000 /* Set MAS0(TLBSEL) = 1 */ 1196 + rlwimi r9,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ 1197 + mtspr SPRN_MAS0,r9 1198 + tlbre 1199 + mfspr r9,SPRN_MAS1 1200 + rlwinm r9,r9,0,2,31 /* Clear MAS1 Valid and IPPROT */ 1201 + mtspr SPRN_MAS1,r9 1202 + tlbwe 1203 + isync 1204 + 1205 + cmpwi r4,0 1206 + cmpwi cr1,r6,0 1207 + cror eq,4*cr1+eq,eq 1208 + bne 3f /* offset != 0 && is_boot_cpu */ 1209 + mtlr r0 1210 + blr 1211 + 1212 + /* 1213 + * The PAGE_OFFSET will map to a different physical address, 1214 + * jump to _start to do another relocation again. 1215 + */ 1216 + 3: mr r3,r5 1217 + bl _start 1212 1218 1213 1219 /* 1214 1220 * We put a few things here that have to be page-aligned. This stuff
-1
arch/powerpc/kernel/hw_breakpoint.c
··· 28 28 #include <linux/percpu.h> 29 29 #include <linux/kernel.h> 30 30 #include <linux/sched.h> 31 - #include <linux/init.h> 32 31 #include <linux/smp.h> 33 32 34 33 #include <asm/hw_breakpoint.h>
+1
arch/powerpc/kernel/idle_power7.S
··· 84 84 std r9,_MSR(r1) 85 85 std r1,PACAR1(r13) 86 86 87 + _GLOBAL(power7_enter_nap_mode) 87 88 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 88 89 /* Tell KVM we're napping */ 89 90 li r4,KVM_HWTHREAD_IN_NAP
-1
arch/powerpc/kernel/iomap.c
··· 3 3 * 4 4 * (C) Copyright 2004 Linus Torvalds 5 5 */ 6 - #include <linux/init.h> 7 6 #include <linux/pci.h> 8 7 #include <linux/mm.h> 9 8 #include <linux/export.h>
+62 -85
arch/powerpc/kernel/iommu.c
··· 251 251 252 252 if (dev) 253 253 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 254 - 1 << IOMMU_PAGE_SHIFT); 254 + 1 << tbl->it_page_shift); 255 255 else 256 - boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT); 256 + boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift); 257 257 /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */ 258 258 259 - n = iommu_area_alloc(tbl->it_map, limit, start, npages, 260 - tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT, 261 - align_mask); 259 + n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset, 260 + boundary_size >> tbl->it_page_shift, align_mask); 262 261 if (n == -1) { 263 262 if (likely(pass == 0)) { 264 263 /* First try the pool from the start */ ··· 319 320 return DMA_ERROR_CODE; 320 321 321 322 entry += tbl->it_offset; /* Offset into real TCE table */ 322 - ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */ 323 + ret = entry << tbl->it_page_shift; /* Set the return dma address */ 323 324 324 325 /* Put the TCEs in the HW table */ 325 326 build_fail = ppc_md.tce_build(tbl, entry, npages, 326 - (unsigned long)page & IOMMU_PAGE_MASK, 327 - direction, attrs); 327 + (unsigned long)page & 328 + IOMMU_PAGE_MASK(tbl), direction, attrs); 328 329 329 330 /* ppc_md.tce_build() only returns non-zero for transient errors. 330 331 * Clean up the table bitmap in this case and return ··· 351 352 { 352 353 unsigned long entry, free_entry; 353 354 354 - entry = dma_addr >> IOMMU_PAGE_SHIFT; 355 + entry = dma_addr >> tbl->it_page_shift; 355 356 free_entry = entry - tbl->it_offset; 356 357 357 358 if (((free_entry + npages) > tbl->it_size) || ··· 400 401 unsigned long flags; 401 402 struct iommu_pool *pool; 402 403 403 - entry = dma_addr >> IOMMU_PAGE_SHIFT; 404 + entry = dma_addr >> tbl->it_page_shift; 404 405 free_entry = entry - tbl->it_offset; 405 406 406 407 pool = get_pool(tbl, free_entry); ··· 467 468 } 468 469 /* Allocate iommu entries for that segment */ 469 470 vaddr = (unsigned long) sg_virt(s); 470 - npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE); 471 + npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl)); 471 472 align = 0; 472 - if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE && 473 + if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE && 473 474 (vaddr & ~PAGE_MASK) == 0) 474 - align = PAGE_SHIFT - IOMMU_PAGE_SHIFT; 475 + align = PAGE_SHIFT - tbl->it_page_shift; 475 476 entry = iommu_range_alloc(dev, tbl, npages, &handle, 476 - mask >> IOMMU_PAGE_SHIFT, align); 477 + mask >> tbl->it_page_shift, align); 477 478 478 479 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); 479 480 ··· 488 489 489 490 /* Convert entry to a dma_addr_t */ 490 491 entry += tbl->it_offset; 491 - dma_addr = entry << IOMMU_PAGE_SHIFT; 492 - dma_addr |= (s->offset & ~IOMMU_PAGE_MASK); 492 + dma_addr = entry << tbl->it_page_shift; 493 + dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl)); 493 494 494 495 DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n", 495 496 npages, entry, dma_addr); 496 497 497 498 /* Insert into HW table */ 498 499 build_fail = ppc_md.tce_build(tbl, entry, npages, 499 - vaddr & IOMMU_PAGE_MASK, 500 - direction, attrs); 500 + vaddr & IOMMU_PAGE_MASK(tbl), 501 + direction, attrs); 501 502 if(unlikely(build_fail)) 502 503 goto failure; 503 504 ··· 558 559 if (s->dma_length != 0) { 559 560 unsigned long vaddr, npages; 560 561 561 - vaddr = s->dma_address & IOMMU_PAGE_MASK; 562 + vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl); 562 563 npages = iommu_num_pages(s->dma_address, s->dma_length, 563 - IOMMU_PAGE_SIZE); 564 + IOMMU_PAGE_SIZE(tbl)); 564 565 __iommu_free(tbl, vaddr, npages); 565 566 s->dma_address = DMA_ERROR_CODE; 566 567 s->dma_length = 0; ··· 591 592 if (sg->dma_length == 0) 592 593 break; 593 594 npages = iommu_num_pages(dma_handle, sg->dma_length, 594 - IOMMU_PAGE_SIZE); 595 + IOMMU_PAGE_SIZE(tbl)); 595 596 __iommu_free(tbl, dma_handle, npages); 596 597 sg = sg_next(sg); 597 598 } ··· 675 676 set_bit(0, tbl->it_map); 676 677 677 678 /* We only split the IOMMU table if we have 1GB or more of space */ 678 - if ((tbl->it_size << IOMMU_PAGE_SHIFT) >= (1UL * 1024 * 1024 * 1024)) 679 + if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024)) 679 680 tbl->nr_pools = IOMMU_NR_POOLS; 680 681 else 681 682 tbl->nr_pools = 1; ··· 767 768 768 769 vaddr = page_address(page) + offset; 769 770 uaddr = (unsigned long)vaddr; 770 - npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE); 771 + npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl)); 771 772 772 773 if (tbl) { 773 774 align = 0; 774 - if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && size >= PAGE_SIZE && 775 + if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE && 775 776 ((unsigned long)vaddr & ~PAGE_MASK) == 0) 776 - align = PAGE_SHIFT - IOMMU_PAGE_SHIFT; 777 + align = PAGE_SHIFT - tbl->it_page_shift; 777 778 778 779 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, 779 - mask >> IOMMU_PAGE_SHIFT, align, 780 + mask >> tbl->it_page_shift, align, 780 781 attrs); 781 782 if (dma_handle == DMA_ERROR_CODE) { 782 783 if (printk_ratelimit()) { ··· 785 786 npages); 786 787 } 787 788 } else 788 - dma_handle |= (uaddr & ~IOMMU_PAGE_MASK); 789 + dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl)); 789 790 } 790 791 791 792 return dma_handle; ··· 800 801 BUG_ON(direction == DMA_NONE); 801 802 802 803 if (tbl) { 803 - npages = iommu_num_pages(dma_handle, size, IOMMU_PAGE_SIZE); 804 + npages = iommu_num_pages(dma_handle, size, 805 + IOMMU_PAGE_SIZE(tbl)); 804 806 iommu_free(tbl, dma_handle, npages); 805 807 } 806 808 } ··· 845 845 memset(ret, 0, size); 846 846 847 847 /* Set up tces to cover the allocated range */ 848 - nio_pages = size >> IOMMU_PAGE_SHIFT; 849 - io_order = get_iommu_order(size); 848 + nio_pages = size >> tbl->it_page_shift; 849 + io_order = get_iommu_order(size, tbl); 850 850 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, 851 - mask >> IOMMU_PAGE_SHIFT, io_order, NULL); 851 + mask >> tbl->it_page_shift, io_order, NULL); 852 852 if (mapping == DMA_ERROR_CODE) { 853 853 free_pages((unsigned long)ret, order); 854 854 return NULL; ··· 864 864 unsigned int nio_pages; 865 865 866 866 size = PAGE_ALIGN(size); 867 - nio_pages = size >> IOMMU_PAGE_SHIFT; 867 + nio_pages = size >> tbl->it_page_shift; 868 868 iommu_free(tbl, dma_handle, nio_pages); 869 869 size = PAGE_ALIGN(size); 870 870 free_pages((unsigned long)vaddr, get_order(size)); ··· 935 935 if (tce_value) 936 936 return -EINVAL; 937 937 938 - if (ioba & ~IOMMU_PAGE_MASK) 938 + if (ioba & ~IOMMU_PAGE_MASK(tbl)) 939 939 return -EINVAL; 940 940 941 - ioba >>= IOMMU_PAGE_SHIFT; 941 + ioba >>= tbl->it_page_shift; 942 942 if (ioba < tbl->it_offset) 943 943 return -EINVAL; 944 944 ··· 955 955 if (!(tce & (TCE_PCI_WRITE | TCE_PCI_READ))) 956 956 return -EINVAL; 957 957 958 - if (tce & ~(IOMMU_PAGE_MASK | TCE_PCI_WRITE | TCE_PCI_READ)) 958 + if (tce & ~(IOMMU_PAGE_MASK(tbl) | TCE_PCI_WRITE | TCE_PCI_READ)) 959 959 return -EINVAL; 960 960 961 - if (ioba & ~IOMMU_PAGE_MASK) 961 + if (ioba & ~IOMMU_PAGE_MASK(tbl)) 962 962 return -EINVAL; 963 963 964 - ioba >>= IOMMU_PAGE_SHIFT; 964 + ioba >>= tbl->it_page_shift; 965 965 if (ioba < tbl->it_offset) 966 966 return -EINVAL; 967 967 ··· 1037 1037 1038 1038 /* if (unlikely(ret)) 1039 1039 pr_err("iommu_tce: %s failed on hwaddr=%lx ioba=%lx kva=%lx ret=%d\n", 1040 - __func__, hwaddr, entry << IOMMU_PAGE_SHIFT, 1040 + __func__, hwaddr, entry << IOMMU_PAGE_SHIFT(tbl), 1041 1041 hwaddr, ret); */ 1042 1042 1043 1043 return ret; ··· 1049 1049 { 1050 1050 int ret; 1051 1051 struct page *page = NULL; 1052 - unsigned long hwaddr, offset = tce & IOMMU_PAGE_MASK & ~PAGE_MASK; 1052 + unsigned long hwaddr, offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK; 1053 1053 enum dma_data_direction direction = iommu_tce_direction(tce); 1054 1054 1055 1055 ret = get_user_pages_fast(tce & PAGE_MASK, 1, 1056 1056 direction != DMA_TO_DEVICE, &page); 1057 1057 if (unlikely(ret != 1)) { 1058 1058 /* pr_err("iommu_tce: get_user_pages_fast failed tce=%lx ioba=%lx ret=%d\n", 1059 - tce, entry << IOMMU_PAGE_SHIFT, ret); */ 1059 + tce, entry << IOMMU_PAGE_SHIFT(tbl), ret); */ 1060 1060 return -EFAULT; 1061 1061 } 1062 1062 hwaddr = (unsigned long) page_address(page) + offset; ··· 1067 1067 1068 1068 if (ret < 0) 1069 1069 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%d\n", 1070 - __func__, entry << IOMMU_PAGE_SHIFT, tce, ret); 1070 + __func__, entry << tbl->it_page_shift, tce, ret); 1071 1071 1072 1072 return ret; 1073 1073 } ··· 1105 1105 } 1106 1106 EXPORT_SYMBOL_GPL(iommu_release_ownership); 1107 1107 1108 - static int iommu_add_device(struct device *dev) 1108 + int iommu_add_device(struct device *dev) 1109 1109 { 1110 1110 struct iommu_table *tbl; 1111 1111 int ret = 0; ··· 1127 1127 pr_debug("iommu_tce: adding %s to iommu group %d\n", 1128 1128 dev_name(dev), iommu_group_id(tbl->it_group)); 1129 1129 1130 + if (PAGE_SIZE < IOMMU_PAGE_SIZE(tbl)) { 1131 + pr_err("iommu_tce: unsupported iommu page size."); 1132 + pr_err("%s has not been added\n", dev_name(dev)); 1133 + return -EINVAL; 1134 + } 1135 + 1130 1136 ret = iommu_group_add_device(tbl->it_group, dev); 1131 1137 if (ret < 0) 1132 1138 pr_err("iommu_tce: %s has not been added, ret=%d\n", ··· 1140 1134 1141 1135 return ret; 1142 1136 } 1137 + EXPORT_SYMBOL_GPL(iommu_add_device); 1143 1138 1144 - static void iommu_del_device(struct device *dev) 1139 + void iommu_del_device(struct device *dev) 1145 1140 { 1141 + /* 1142 + * Some devices might not have IOMMU table and group 1143 + * and we needn't detach them from the associated 1144 + * IOMMU groups 1145 + */ 1146 + if (!dev->iommu_group) { 1147 + pr_debug("iommu_tce: skipping device %s with no tbl\n", 1148 + dev_name(dev)); 1149 + return; 1150 + } 1151 + 1146 1152 iommu_group_remove_device(dev); 1147 1153 } 1148 - 1149 - static int iommu_bus_notifier(struct notifier_block *nb, 1150 - unsigned long action, void *data) 1151 - { 1152 - struct device *dev = data; 1153 - 1154 - switch (action) { 1155 - case BUS_NOTIFY_ADD_DEVICE: 1156 - return iommu_add_device(dev); 1157 - case BUS_NOTIFY_DEL_DEVICE: 1158 - iommu_del_device(dev); 1159 - return 0; 1160 - default: 1161 - return 0; 1162 - } 1163 - } 1164 - 1165 - static struct notifier_block tce_iommu_bus_nb = { 1166 - .notifier_call = iommu_bus_notifier, 1167 - }; 1168 - 1169 - static int __init tce_iommu_init(void) 1170 - { 1171 - struct pci_dev *pdev = NULL; 1172 - 1173 - BUILD_BUG_ON(PAGE_SIZE < IOMMU_PAGE_SIZE); 1174 - 1175 - for_each_pci_dev(pdev) 1176 - iommu_add_device(&pdev->dev); 1177 - 1178 - bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb); 1179 - return 0; 1180 - } 1181 - 1182 - subsys_initcall_sync(tce_iommu_init); 1183 - 1184 - #else 1185 - 1186 - void iommu_register_group(struct iommu_table *tbl, 1187 - int pci_domain_number, unsigned long pe_num) 1188 - { 1189 - } 1154 + EXPORT_SYMBOL_GPL(iommu_del_device); 1190 1155 1191 1156 #endif /* CONFIG_IOMMU_API */
+9 -3
arch/powerpc/kernel/irq.c
··· 354 354 355 355 seq_printf(p, "%*s: ", prec, "LOC"); 356 356 for_each_online_cpu(j) 357 - seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs); 358 - seq_printf(p, " Local timer interrupts\n"); 357 + seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event); 358 + seq_printf(p, " Local timer interrupts for timer event device\n"); 359 + 360 + seq_printf(p, "%*s: ", prec, "LOC"); 361 + for_each_online_cpu(j) 362 + seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others); 363 + seq_printf(p, " Local timer interrupts for others\n"); 359 364 360 365 seq_printf(p, "%*s: ", prec, "SPU"); 361 366 for_each_online_cpu(j) ··· 394 389 */ 395 390 u64 arch_irq_stat_cpu(unsigned int cpu) 396 391 { 397 - u64 sum = per_cpu(irq_stat, cpu).timer_irqs; 392 + u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event; 398 393 399 394 sum += per_cpu(irq_stat, cpu).pmu_irqs; 400 395 sum += per_cpu(irq_stat, cpu).mce_exceptions; 401 396 sum += per_cpu(irq_stat, cpu).spurious_irqs; 397 + sum += per_cpu(irq_stat, cpu).timer_irqs_others; 402 398 #ifdef CONFIG_PPC_DOORBELL 403 399 sum += per_cpu(irq_stat, cpu).doorbell_irqs; 404 400 #endif
-1
arch/powerpc/kernel/kgdb.c
··· 15 15 */ 16 16 17 17 #include <linux/kernel.h> 18 - #include <linux/init.h> 19 18 #include <linux/kgdb.h> 20 19 #include <linux/smp.h> 21 20 #include <linux/signal.h>
+352
arch/powerpc/kernel/mce.c
··· 1 + /* 2 + * Machine check exception handling. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License as published by 6 + * the Free Software Foundation; either version 2 of the License, or 7 + * (at your option) any later version. 8 + * 9 + * This program is distributed in the hope that it will be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License 15 + * along with this program; if not, write to the Free Software 16 + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 + * 18 + * Copyright 2013 IBM Corporation 19 + * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com> 20 + */ 21 + 22 + #undef DEBUG 23 + #define pr_fmt(fmt) "mce: " fmt 24 + 25 + #include <linux/types.h> 26 + #include <linux/ptrace.h> 27 + #include <linux/percpu.h> 28 + #include <linux/export.h> 29 + #include <linux/irq_work.h> 30 + #include <asm/mce.h> 31 + 32 + static DEFINE_PER_CPU(int, mce_nest_count); 33 + static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event); 34 + 35 + /* Queue for delayed MCE events. */ 36 + static DEFINE_PER_CPU(int, mce_queue_count); 37 + static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event_queue); 38 + 39 + static void machine_check_process_queued_event(struct irq_work *work); 40 + struct irq_work mce_event_process_work = { 41 + .func = machine_check_process_queued_event, 42 + }; 43 + 44 + static void mce_set_error_info(struct machine_check_event *mce, 45 + struct mce_error_info *mce_err) 46 + { 47 + mce->error_type = mce_err->error_type; 48 + switch (mce_err->error_type) { 49 + case MCE_ERROR_TYPE_UE: 50 + mce->u.ue_error.ue_error_type = mce_err->u.ue_error_type; 51 + break; 52 + case MCE_ERROR_TYPE_SLB: 53 + mce->u.slb_error.slb_error_type = mce_err->u.slb_error_type; 54 + break; 55 + case MCE_ERROR_TYPE_ERAT: 56 + mce->u.erat_error.erat_error_type = mce_err->u.erat_error_type; 57 + break; 58 + case MCE_ERROR_TYPE_TLB: 59 + mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type; 60 + break; 61 + case MCE_ERROR_TYPE_UNKNOWN: 62 + default: 63 + break; 64 + } 65 + } 66 + 67 + /* 68 + * Decode and save high level MCE information into per cpu buffer which 69 + * is an array of machine_check_event structure. 70 + */ 71 + void save_mce_event(struct pt_regs *regs, long handled, 72 + struct mce_error_info *mce_err, 73 + uint64_t addr) 74 + { 75 + uint64_t srr1; 76 + int index = __get_cpu_var(mce_nest_count)++; 77 + struct machine_check_event *mce = &__get_cpu_var(mce_event[index]); 78 + 79 + /* 80 + * Return if we don't have enough space to log mce event. 81 + * mce_nest_count may go beyond MAX_MC_EVT but that's ok, 82 + * the check below will stop buffer overrun. 83 + */ 84 + if (index >= MAX_MC_EVT) 85 + return; 86 + 87 + /* Populate generic machine check info */ 88 + mce->version = MCE_V1; 89 + mce->srr0 = regs->nip; 90 + mce->srr1 = regs->msr; 91 + mce->gpr3 = regs->gpr[3]; 92 + mce->in_use = 1; 93 + 94 + mce->initiator = MCE_INITIATOR_CPU; 95 + if (handled) 96 + mce->disposition = MCE_DISPOSITION_RECOVERED; 97 + else 98 + mce->disposition = MCE_DISPOSITION_NOT_RECOVERED; 99 + mce->severity = MCE_SEV_ERROR_SYNC; 100 + 101 + srr1 = regs->msr; 102 + 103 + /* 104 + * Populate the mce error_type and type-specific error_type. 105 + */ 106 + mce_set_error_info(mce, mce_err); 107 + 108 + if (!addr) 109 + return; 110 + 111 + if (mce->error_type == MCE_ERROR_TYPE_TLB) { 112 + mce->u.tlb_error.effective_address_provided = true; 113 + mce->u.tlb_error.effective_address = addr; 114 + } else if (mce->error_type == MCE_ERROR_TYPE_SLB) { 115 + mce->u.slb_error.effective_address_provided = true; 116 + mce->u.slb_error.effective_address = addr; 117 + } else if (mce->error_type == MCE_ERROR_TYPE_ERAT) { 118 + mce->u.erat_error.effective_address_provided = true; 119 + mce->u.erat_error.effective_address = addr; 120 + } else if (mce->error_type == MCE_ERROR_TYPE_UE) { 121 + mce->u.ue_error.effective_address_provided = true; 122 + mce->u.ue_error.effective_address = addr; 123 + } 124 + return; 125 + } 126 + 127 + /* 128 + * get_mce_event: 129 + * mce Pointer to machine_check_event structure to be filled. 130 + * release Flag to indicate whether to free the event slot or not. 131 + * 0 <= do not release the mce event. Caller will invoke 132 + * release_mce_event() once event has been consumed. 133 + * 1 <= release the slot. 134 + * 135 + * return 1 = success 136 + * 0 = failure 137 + * 138 + * get_mce_event() will be called by platform specific machine check 139 + * handle routine and in KVM. 140 + * When we call get_mce_event(), we are still in interrupt context and 141 + * preemption will not be scheduled until ret_from_expect() routine 142 + * is called. 143 + */ 144 + int get_mce_event(struct machine_check_event *mce, bool release) 145 + { 146 + int index = __get_cpu_var(mce_nest_count) - 1; 147 + struct machine_check_event *mc_evt; 148 + int ret = 0; 149 + 150 + /* Sanity check */ 151 + if (index < 0) 152 + return ret; 153 + 154 + /* Check if we have MCE info to process. */ 155 + if (index < MAX_MC_EVT) { 156 + mc_evt = &__get_cpu_var(mce_event[index]); 157 + /* Copy the event structure and release the original */ 158 + if (mce) 159 + *mce = *mc_evt; 160 + if (release) 161 + mc_evt->in_use = 0; 162 + ret = 1; 163 + } 164 + /* Decrement the count to free the slot. */ 165 + if (release) 166 + __get_cpu_var(mce_nest_count)--; 167 + 168 + return ret; 169 + } 170 + 171 + void release_mce_event(void) 172 + { 173 + get_mce_event(NULL, true); 174 + } 175 + 176 + /* 177 + * Queue up the MCE event which then can be handled later. 178 + */ 179 + void machine_check_queue_event(void) 180 + { 181 + int index; 182 + struct machine_check_event evt; 183 + 184 + if (!get_mce_event(&evt, MCE_EVENT_RELEASE)) 185 + return; 186 + 187 + index = __get_cpu_var(mce_queue_count)++; 188 + /* If queue is full, just return for now. */ 189 + if (index >= MAX_MC_EVT) { 190 + __get_cpu_var(mce_queue_count)--; 191 + return; 192 + } 193 + __get_cpu_var(mce_event_queue[index]) = evt; 194 + 195 + /* Queue irq work to process this event later. */ 196 + irq_work_queue(&mce_event_process_work); 197 + } 198 + 199 + /* 200 + * process pending MCE event from the mce event queue. This function will be 201 + * called during syscall exit. 202 + */ 203 + static void machine_check_process_queued_event(struct irq_work *work) 204 + { 205 + int index; 206 + 207 + /* 208 + * For now just print it to console. 209 + * TODO: log this error event to FSP or nvram. 210 + */ 211 + while (__get_cpu_var(mce_queue_count) > 0) { 212 + index = __get_cpu_var(mce_queue_count) - 1; 213 + machine_check_print_event_info( 214 + &__get_cpu_var(mce_event_queue[index])); 215 + __get_cpu_var(mce_queue_count)--; 216 + } 217 + } 218 + 219 + void machine_check_print_event_info(struct machine_check_event *evt) 220 + { 221 + const char *level, *sevstr, *subtype; 222 + static const char *mc_ue_types[] = { 223 + "Indeterminate", 224 + "Instruction fetch", 225 + "Page table walk ifetch", 226 + "Load/Store", 227 + "Page table walk Load/Store", 228 + }; 229 + static const char *mc_slb_types[] = { 230 + "Indeterminate", 231 + "Parity", 232 + "Multihit", 233 + }; 234 + static const char *mc_erat_types[] = { 235 + "Indeterminate", 236 + "Parity", 237 + "Multihit", 238 + }; 239 + static const char *mc_tlb_types[] = { 240 + "Indeterminate", 241 + "Parity", 242 + "Multihit", 243 + }; 244 + 245 + /* Print things out */ 246 + if (evt->version != MCE_V1) { 247 + pr_err("Machine Check Exception, Unknown event version %d !\n", 248 + evt->version); 249 + return; 250 + } 251 + switch (evt->severity) { 252 + case MCE_SEV_NO_ERROR: 253 + level = KERN_INFO; 254 + sevstr = "Harmless"; 255 + break; 256 + case MCE_SEV_WARNING: 257 + level = KERN_WARNING; 258 + sevstr = ""; 259 + break; 260 + case MCE_SEV_ERROR_SYNC: 261 + level = KERN_ERR; 262 + sevstr = "Severe"; 263 + break; 264 + case MCE_SEV_FATAL: 265 + default: 266 + level = KERN_ERR; 267 + sevstr = "Fatal"; 268 + break; 269 + } 270 + 271 + printk("%s%s Machine check interrupt [%s]\n", level, sevstr, 272 + evt->disposition == MCE_DISPOSITION_RECOVERED ? 273 + "Recovered" : "[Not recovered"); 274 + printk("%s Initiator: %s\n", level, 275 + evt->initiator == MCE_INITIATOR_CPU ? "CPU" : "Unknown"); 276 + switch (evt->error_type) { 277 + case MCE_ERROR_TYPE_UE: 278 + subtype = evt->u.ue_error.ue_error_type < 279 + ARRAY_SIZE(mc_ue_types) ? 280 + mc_ue_types[evt->u.ue_error.ue_error_type] 281 + : "Unknown"; 282 + printk("%s Error type: UE [%s]\n", level, subtype); 283 + if (evt->u.ue_error.effective_address_provided) 284 + printk("%s Effective address: %016llx\n", 285 + level, evt->u.ue_error.effective_address); 286 + if (evt->u.ue_error.physical_address_provided) 287 + printk("%s Physial address: %016llx\n", 288 + level, evt->u.ue_error.physical_address); 289 + break; 290 + case MCE_ERROR_TYPE_SLB: 291 + subtype = evt->u.slb_error.slb_error_type < 292 + ARRAY_SIZE(mc_slb_types) ? 293 + mc_slb_types[evt->u.slb_error.slb_error_type] 294 + : "Unknown"; 295 + printk("%s Error type: SLB [%s]\n", level, subtype); 296 + if (evt->u.slb_error.effective_address_provided) 297 + printk("%s Effective address: %016llx\n", 298 + level, evt->u.slb_error.effective_address); 299 + break; 300 + case MCE_ERROR_TYPE_ERAT: 301 + subtype = evt->u.erat_error.erat_error_type < 302 + ARRAY_SIZE(mc_erat_types) ? 303 + mc_erat_types[evt->u.erat_error.erat_error_type] 304 + : "Unknown"; 305 + printk("%s Error type: ERAT [%s]\n", level, subtype); 306 + if (evt->u.erat_error.effective_address_provided) 307 + printk("%s Effective address: %016llx\n", 308 + level, evt->u.erat_error.effective_address); 309 + break; 310 + case MCE_ERROR_TYPE_TLB: 311 + subtype = evt->u.tlb_error.tlb_error_type < 312 + ARRAY_SIZE(mc_tlb_types) ? 313 + mc_tlb_types[evt->u.tlb_error.tlb_error_type] 314 + : "Unknown"; 315 + printk("%s Error type: TLB [%s]\n", level, subtype); 316 + if (evt->u.tlb_error.effective_address_provided) 317 + printk("%s Effective address: %016llx\n", 318 + level, evt->u.tlb_error.effective_address); 319 + break; 320 + default: 321 + case MCE_ERROR_TYPE_UNKNOWN: 322 + printk("%s Error type: Unknown\n", level); 323 + break; 324 + } 325 + } 326 + 327 + uint64_t get_mce_fault_addr(struct machine_check_event *evt) 328 + { 329 + switch (evt->error_type) { 330 + case MCE_ERROR_TYPE_UE: 331 + if (evt->u.ue_error.effective_address_provided) 332 + return evt->u.ue_error.effective_address; 333 + break; 334 + case MCE_ERROR_TYPE_SLB: 335 + if (evt->u.slb_error.effective_address_provided) 336 + return evt->u.slb_error.effective_address; 337 + break; 338 + case MCE_ERROR_TYPE_ERAT: 339 + if (evt->u.erat_error.effective_address_provided) 340 + return evt->u.erat_error.effective_address; 341 + break; 342 + case MCE_ERROR_TYPE_TLB: 343 + if (evt->u.tlb_error.effective_address_provided) 344 + return evt->u.tlb_error.effective_address; 345 + break; 346 + default: 347 + case MCE_ERROR_TYPE_UNKNOWN: 348 + break; 349 + } 350 + return 0; 351 + } 352 + EXPORT_SYMBOL(get_mce_fault_addr);
+284
arch/powerpc/kernel/mce_power.c
··· 1 + /* 2 + * Machine check exception handling CPU-side for power7 and power8 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License as published by 6 + * the Free Software Foundation; either version 2 of the License, or 7 + * (at your option) any later version. 8 + * 9 + * This program is distributed in the hope that it will be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License 15 + * along with this program; if not, write to the Free Software 16 + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 + * 18 + * Copyright 2013 IBM Corporation 19 + * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com> 20 + */ 21 + 22 + #undef DEBUG 23 + #define pr_fmt(fmt) "mce_power: " fmt 24 + 25 + #include <linux/types.h> 26 + #include <linux/ptrace.h> 27 + #include <asm/mmu.h> 28 + #include <asm/mce.h> 29 + 30 + /* flush SLBs and reload */ 31 + static void flush_and_reload_slb(void) 32 + { 33 + struct slb_shadow *slb; 34 + unsigned long i, n; 35 + 36 + /* Invalidate all SLBs */ 37 + asm volatile("slbmte %0,%0; slbia" : : "r" (0)); 38 + 39 + #ifdef CONFIG_KVM_BOOK3S_HANDLER 40 + /* 41 + * If machine check is hit when in guest or in transition, we will 42 + * only flush the SLBs and continue. 43 + */ 44 + if (get_paca()->kvm_hstate.in_guest) 45 + return; 46 + #endif 47 + 48 + /* For host kernel, reload the SLBs from shadow SLB buffer. */ 49 + slb = get_slb_shadow(); 50 + if (!slb) 51 + return; 52 + 53 + n = min_t(u32, be32_to_cpu(slb->persistent), SLB_MIN_SIZE); 54 + 55 + /* Load up the SLB entries from shadow SLB */ 56 + for (i = 0; i < n; i++) { 57 + unsigned long rb = be64_to_cpu(slb->save_area[i].esid); 58 + unsigned long rs = be64_to_cpu(slb->save_area[i].vsid); 59 + 60 + rb = (rb & ~0xFFFul) | i; 61 + asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb)); 62 + } 63 + } 64 + 65 + static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits) 66 + { 67 + long handled = 1; 68 + 69 + /* 70 + * flush and reload SLBs for SLB errors and flush TLBs for TLB errors. 71 + * reset the error bits whenever we handle them so that at the end 72 + * we can check whether we handled all of them or not. 73 + * */ 74 + if (dsisr & slb_error_bits) { 75 + flush_and_reload_slb(); 76 + /* reset error bits */ 77 + dsisr &= ~(slb_error_bits); 78 + } 79 + if (dsisr & P7_DSISR_MC_TLB_MULTIHIT_MFTLB) { 80 + if (cur_cpu_spec && cur_cpu_spec->flush_tlb) 81 + cur_cpu_spec->flush_tlb(TLBIEL_INVAL_PAGE); 82 + /* reset error bits */ 83 + dsisr &= ~P7_DSISR_MC_TLB_MULTIHIT_MFTLB; 84 + } 85 + /* Any other errors we don't understand? */ 86 + if (dsisr & 0xffffffffUL) 87 + handled = 0; 88 + 89 + return handled; 90 + } 91 + 92 + static long mce_handle_derror_p7(uint64_t dsisr) 93 + { 94 + return mce_handle_derror(dsisr, P7_DSISR_MC_SLB_ERRORS); 95 + } 96 + 97 + static long mce_handle_common_ierror(uint64_t srr1) 98 + { 99 + long handled = 0; 100 + 101 + switch (P7_SRR1_MC_IFETCH(srr1)) { 102 + case 0: 103 + break; 104 + case P7_SRR1_MC_IFETCH_SLB_PARITY: 105 + case P7_SRR1_MC_IFETCH_SLB_MULTIHIT: 106 + /* flush and reload SLBs for SLB errors. */ 107 + flush_and_reload_slb(); 108 + handled = 1; 109 + break; 110 + case P7_SRR1_MC_IFETCH_TLB_MULTIHIT: 111 + if (cur_cpu_spec && cur_cpu_spec->flush_tlb) { 112 + cur_cpu_spec->flush_tlb(TLBIEL_INVAL_PAGE); 113 + handled = 1; 114 + } 115 + break; 116 + default: 117 + break; 118 + } 119 + 120 + return handled; 121 + } 122 + 123 + static long mce_handle_ierror_p7(uint64_t srr1) 124 + { 125 + long handled = 0; 126 + 127 + handled = mce_handle_common_ierror(srr1); 128 + 129 + if (P7_SRR1_MC_IFETCH(srr1) == P7_SRR1_MC_IFETCH_SLB_BOTH) { 130 + flush_and_reload_slb(); 131 + handled = 1; 132 + } 133 + return handled; 134 + } 135 + 136 + static void mce_get_common_ierror(struct mce_error_info *mce_err, uint64_t srr1) 137 + { 138 + switch (P7_SRR1_MC_IFETCH(srr1)) { 139 + case P7_SRR1_MC_IFETCH_SLB_PARITY: 140 + mce_err->error_type = MCE_ERROR_TYPE_SLB; 141 + mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY; 142 + break; 143 + case P7_SRR1_MC_IFETCH_SLB_MULTIHIT: 144 + mce_err->error_type = MCE_ERROR_TYPE_SLB; 145 + mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT; 146 + break; 147 + case P7_SRR1_MC_IFETCH_TLB_MULTIHIT: 148 + mce_err->error_type = MCE_ERROR_TYPE_TLB; 149 + mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT; 150 + break; 151 + case P7_SRR1_MC_IFETCH_UE: 152 + case P7_SRR1_MC_IFETCH_UE_IFU_INTERNAL: 153 + mce_err->error_type = MCE_ERROR_TYPE_UE; 154 + mce_err->u.ue_error_type = MCE_UE_ERROR_IFETCH; 155 + break; 156 + case P7_SRR1_MC_IFETCH_UE_TLB_RELOAD: 157 + mce_err->error_type = MCE_ERROR_TYPE_UE; 158 + mce_err->u.ue_error_type = 159 + MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH; 160 + break; 161 + } 162 + } 163 + 164 + static void mce_get_ierror_p7(struct mce_error_info *mce_err, uint64_t srr1) 165 + { 166 + mce_get_common_ierror(mce_err, srr1); 167 + if (P7_SRR1_MC_IFETCH(srr1) == P7_SRR1_MC_IFETCH_SLB_BOTH) { 168 + mce_err->error_type = MCE_ERROR_TYPE_SLB; 169 + mce_err->u.slb_error_type = MCE_SLB_ERROR_INDETERMINATE; 170 + } 171 + } 172 + 173 + static void mce_get_derror_p7(struct mce_error_info *mce_err, uint64_t dsisr) 174 + { 175 + if (dsisr & P7_DSISR_MC_UE) { 176 + mce_err->error_type = MCE_ERROR_TYPE_UE; 177 + mce_err->u.ue_error_type = MCE_UE_ERROR_LOAD_STORE; 178 + } else if (dsisr & P7_DSISR_MC_UE_TABLEWALK) { 179 + mce_err->error_type = MCE_ERROR_TYPE_UE; 180 + mce_err->u.ue_error_type = 181 + MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE; 182 + } else if (dsisr & P7_DSISR_MC_ERAT_MULTIHIT) { 183 + mce_err->error_type = MCE_ERROR_TYPE_ERAT; 184 + mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT; 185 + } else if (dsisr & P7_DSISR_MC_SLB_MULTIHIT) { 186 + mce_err->error_type = MCE_ERROR_TYPE_SLB; 187 + mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT; 188 + } else if (dsisr & P7_DSISR_MC_SLB_PARITY_MFSLB) { 189 + mce_err->error_type = MCE_ERROR_TYPE_SLB; 190 + mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY; 191 + } else if (dsisr & P7_DSISR_MC_TLB_MULTIHIT_MFTLB) { 192 + mce_err->error_type = MCE_ERROR_TYPE_TLB; 193 + mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT; 194 + } else if (dsisr & P7_DSISR_MC_SLB_MULTIHIT_PARITY) { 195 + mce_err->error_type = MCE_ERROR_TYPE_SLB; 196 + mce_err->u.slb_error_type = MCE_SLB_ERROR_INDETERMINATE; 197 + } 198 + } 199 + 200 + long __machine_check_early_realmode_p7(struct pt_regs *regs) 201 + { 202 + uint64_t srr1, addr; 203 + long handled = 1; 204 + struct mce_error_info mce_error_info = { 0 }; 205 + 206 + srr1 = regs->msr; 207 + 208 + /* 209 + * Handle memory errors depending whether this was a load/store or 210 + * ifetch exception. Also, populate the mce error_type and 211 + * type-specific error_type from either SRR1 or DSISR, depending 212 + * whether this was a load/store or ifetch exception 213 + */ 214 + if (P7_SRR1_MC_LOADSTORE(srr1)) { 215 + handled = mce_handle_derror_p7(regs->dsisr); 216 + mce_get_derror_p7(&mce_error_info, regs->dsisr); 217 + addr = regs->dar; 218 + } else { 219 + handled = mce_handle_ierror_p7(srr1); 220 + mce_get_ierror_p7(&mce_error_info, srr1); 221 + addr = regs->nip; 222 + } 223 + 224 + save_mce_event(regs, handled, &mce_error_info, addr); 225 + return handled; 226 + } 227 + 228 + static void mce_get_ierror_p8(struct mce_error_info *mce_err, uint64_t srr1) 229 + { 230 + mce_get_common_ierror(mce_err, srr1); 231 + if (P7_SRR1_MC_IFETCH(srr1) == P8_SRR1_MC_IFETCH_ERAT_MULTIHIT) { 232 + mce_err->error_type = MCE_ERROR_TYPE_ERAT; 233 + mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT; 234 + } 235 + } 236 + 237 + static void mce_get_derror_p8(struct mce_error_info *mce_err, uint64_t dsisr) 238 + { 239 + mce_get_derror_p7(mce_err, dsisr); 240 + if (dsisr & P8_DSISR_MC_ERAT_MULTIHIT_SEC) { 241 + mce_err->error_type = MCE_ERROR_TYPE_ERAT; 242 + mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT; 243 + } 244 + } 245 + 246 + static long mce_handle_ierror_p8(uint64_t srr1) 247 + { 248 + long handled = 0; 249 + 250 + handled = mce_handle_common_ierror(srr1); 251 + 252 + if (P7_SRR1_MC_IFETCH(srr1) == P8_SRR1_MC_IFETCH_ERAT_MULTIHIT) { 253 + flush_and_reload_slb(); 254 + handled = 1; 255 + } 256 + return handled; 257 + } 258 + 259 + static long mce_handle_derror_p8(uint64_t dsisr) 260 + { 261 + return mce_handle_derror(dsisr, P8_DSISR_MC_SLB_ERRORS); 262 + } 263 + 264 + long __machine_check_early_realmode_p8(struct pt_regs *regs) 265 + { 266 + uint64_t srr1, addr; 267 + long handled = 1; 268 + struct mce_error_info mce_error_info = { 0 }; 269 + 270 + srr1 = regs->msr; 271 + 272 + if (P7_SRR1_MC_LOADSTORE(srr1)) { 273 + handled = mce_handle_derror_p8(regs->dsisr); 274 + mce_get_derror_p8(&mce_error_info, regs->dsisr); 275 + addr = regs->dar; 276 + } else { 277 + handled = mce_handle_ierror_p8(srr1); 278 + mce_get_ierror_p8(&mce_error_info, srr1); 279 + addr = regs->nip; 280 + } 281 + 282 + save_mce_event(regs, handled, &mce_error_info, addr); 283 + return handled; 284 + }
+3 -1
arch/powerpc/kernel/misc_32.S
··· 344 344 */ 345 345 _KPROBE(flush_icache_range) 346 346 BEGIN_FTR_SECTION 347 - isync 347 + PURGE_PREFETCHED_INS 348 348 blr /* for 601, do nothing */ 349 349 END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) 350 350 li r5,L1_CACHE_BYTES-1 ··· 448 448 */ 449 449 _GLOBAL(__flush_dcache_icache) 450 450 BEGIN_FTR_SECTION 451 + PURGE_PREFETCHED_INS 451 452 blr 452 453 END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) 453 454 rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */ ··· 490 489 */ 491 490 _GLOBAL(__flush_dcache_icache_phys) 492 491 BEGIN_FTR_SECTION 492 + PURGE_PREFETCHED_INS 493 493 blr /* for 601, do nothing */ 494 494 END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) 495 495 mfmsr r10
+6
arch/powerpc/kernel/misc_64.S
··· 67 67 68 68 _KPROBE(flush_icache_range) 69 69 BEGIN_FTR_SECTION 70 + PURGE_PREFETCHED_INS 70 71 blr 71 72 END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) 72 73 /* ··· 211 210 * 212 211 * Different systems have different cache line sizes 213 212 */ 213 + 214 + BEGIN_FTR_SECTION 215 + PURGE_PREFETCHED_INS 216 + blr 217 + END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) 214 218 215 219 /* Flush the dcache */ 216 220 ld r7,PPC64_CACHES@toc(r2)
+30 -7
arch/powerpc/kernel/paca.c
··· 99 99 * 3 persistent SLBs are registered here. The buffer will be zero 100 100 * initially, hence will all be invaild until we actually write them. 101 101 */ 102 - struct slb_shadow slb_shadow[] __cacheline_aligned = { 103 - [0 ... (NR_CPUS-1)] = { 104 - .persistent = cpu_to_be32(SLB_NUM_BOLTED), 105 - .buffer_length = cpu_to_be32(sizeof(struct slb_shadow)), 106 - }, 107 - }; 102 + static struct slb_shadow *slb_shadow; 103 + 104 + static void __init allocate_slb_shadows(int nr_cpus, int limit) 105 + { 106 + int size = PAGE_ALIGN(sizeof(struct slb_shadow) * nr_cpus); 107 + slb_shadow = __va(memblock_alloc_base(size, PAGE_SIZE, limit)); 108 + memset(slb_shadow, 0, size); 109 + } 110 + 111 + static struct slb_shadow * __init init_slb_shadow(int cpu) 112 + { 113 + struct slb_shadow *s = &slb_shadow[cpu]; 114 + 115 + s->persistent = cpu_to_be32(SLB_NUM_BOLTED); 116 + s->buffer_length = cpu_to_be32(sizeof(*s)); 117 + 118 + return s; 119 + } 120 + 121 + #else /* CONFIG_PPC_STD_MMU_64 */ 122 + 123 + static void __init allocate_slb_shadows(int nr_cpus, int limit) { } 108 124 109 125 #endif /* CONFIG_PPC_STD_MMU_64 */ 110 126 ··· 158 142 new_paca->__current = &init_task; 159 143 new_paca->data_offset = 0xfeeeeeeeeeeeeeeeULL; 160 144 #ifdef CONFIG_PPC_STD_MMU_64 161 - new_paca->slb_shadow_ptr = &slb_shadow[cpu]; 145 + new_paca->slb_shadow_ptr = init_slb_shadow(cpu); 162 146 #endif /* CONFIG_PPC_STD_MMU_64 */ 147 + 148 + #ifdef CONFIG_PPC_BOOK3E 149 + /* For now -- if we have threads this will be adjusted later */ 150 + new_paca->tcd_ptr = &new_paca->tcd; 151 + #endif 163 152 } 164 153 165 154 /* Put the paca pointer into r13 and SPRG_PACA */ ··· 210 189 paca_size, nr_cpu_ids, paca); 211 190 212 191 allocate_lppacas(nr_cpu_ids, limit); 192 + 193 + allocate_slb_shadows(nr_cpu_ids, limit); 213 194 214 195 /* Can't use for_each_*_cpu, as they aren't functional yet */ 215 196 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
+162 -15
arch/powerpc/kernel/process.c
··· 25 25 #include <linux/slab.h> 26 26 #include <linux/user.h> 27 27 #include <linux/elf.h> 28 - #include <linux/init.h> 29 28 #include <linux/prctl.h> 30 29 #include <linux/init_task.h> 31 30 #include <linux/export.h> ··· 73 74 struct task_struct *last_task_used_spe = NULL; 74 75 #endif 75 76 77 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 78 + void giveup_fpu_maybe_transactional(struct task_struct *tsk) 79 + { 80 + /* 81 + * If we are saving the current thread's registers, and the 82 + * thread is in a transactional state, set the TIF_RESTORE_TM 83 + * bit so that we know to restore the registers before 84 + * returning to userspace. 85 + */ 86 + if (tsk == current && tsk->thread.regs && 87 + MSR_TM_ACTIVE(tsk->thread.regs->msr) && 88 + !test_thread_flag(TIF_RESTORE_TM)) { 89 + tsk->thread.tm_orig_msr = tsk->thread.regs->msr; 90 + set_thread_flag(TIF_RESTORE_TM); 91 + } 92 + 93 + giveup_fpu(tsk); 94 + } 95 + 96 + void giveup_altivec_maybe_transactional(struct task_struct *tsk) 97 + { 98 + /* 99 + * If we are saving the current thread's registers, and the 100 + * thread is in a transactional state, set the TIF_RESTORE_TM 101 + * bit so that we know to restore the registers before 102 + * returning to userspace. 103 + */ 104 + if (tsk == current && tsk->thread.regs && 105 + MSR_TM_ACTIVE(tsk->thread.regs->msr) && 106 + !test_thread_flag(TIF_RESTORE_TM)) { 107 + tsk->thread.tm_orig_msr = tsk->thread.regs->msr; 108 + set_thread_flag(TIF_RESTORE_TM); 109 + } 110 + 111 + giveup_altivec(tsk); 112 + } 113 + 114 + #else 115 + #define giveup_fpu_maybe_transactional(tsk) giveup_fpu(tsk) 116 + #define giveup_altivec_maybe_transactional(tsk) giveup_altivec(tsk) 117 + #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 118 + 76 119 #ifdef CONFIG_PPC_FPU 77 120 /* 78 121 * Make sure the floating-point register state in the ··· 143 102 */ 144 103 BUG_ON(tsk != current); 145 104 #endif 146 - giveup_fpu(tsk); 105 + giveup_fpu_maybe_transactional(tsk); 147 106 } 148 107 preempt_enable(); 149 108 } 150 109 } 151 110 EXPORT_SYMBOL_GPL(flush_fp_to_thread); 152 - #endif 111 + #endif /* CONFIG_PPC_FPU */ 153 112 154 113 void enable_kernel_fp(void) 155 114 { ··· 157 116 158 117 #ifdef CONFIG_SMP 159 118 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) 160 - giveup_fpu(current); 119 + giveup_fpu_maybe_transactional(current); 161 120 else 162 121 giveup_fpu(NULL); /* just enables FP for kernel */ 163 122 #else 164 - giveup_fpu(last_task_used_math); 123 + giveup_fpu_maybe_transactional(last_task_used_math); 165 124 #endif /* CONFIG_SMP */ 166 125 } 167 126 EXPORT_SYMBOL(enable_kernel_fp); ··· 173 132 174 133 #ifdef CONFIG_SMP 175 134 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) 176 - giveup_altivec(current); 135 + giveup_altivec_maybe_transactional(current); 177 136 else 178 137 giveup_altivec_notask(); 179 138 #else 180 - giveup_altivec(last_task_used_altivec); 139 + giveup_altivec_maybe_transactional(last_task_used_altivec); 181 140 #endif /* CONFIG_SMP */ 182 141 } 183 142 EXPORT_SYMBOL(enable_kernel_altivec); ··· 194 153 #ifdef CONFIG_SMP 195 154 BUG_ON(tsk != current); 196 155 #endif 197 - giveup_altivec(tsk); 156 + giveup_altivec_maybe_transactional(tsk); 198 157 } 199 158 preempt_enable(); 200 159 } ··· 223 182 224 183 void giveup_vsx(struct task_struct *tsk) 225 184 { 226 - giveup_fpu(tsk); 227 - giveup_altivec(tsk); 185 + giveup_fpu_maybe_transactional(tsk); 186 + giveup_altivec_maybe_transactional(tsk); 228 187 __giveup_vsx(tsk); 229 188 } 230 189 ··· 520 479 return false; 521 480 return true; 522 481 } 482 + 523 483 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 484 + static void tm_reclaim_thread(struct thread_struct *thr, 485 + struct thread_info *ti, uint8_t cause) 486 + { 487 + unsigned long msr_diff = 0; 488 + 489 + /* 490 + * If FP/VSX registers have been already saved to the 491 + * thread_struct, move them to the transact_fp array. 492 + * We clear the TIF_RESTORE_TM bit since after the reclaim 493 + * the thread will no longer be transactional. 494 + */ 495 + if (test_ti_thread_flag(ti, TIF_RESTORE_TM)) { 496 + msr_diff = thr->tm_orig_msr & ~thr->regs->msr; 497 + if (msr_diff & MSR_FP) 498 + memcpy(&thr->transact_fp, &thr->fp_state, 499 + sizeof(struct thread_fp_state)); 500 + if (msr_diff & MSR_VEC) 501 + memcpy(&thr->transact_vr, &thr->vr_state, 502 + sizeof(struct thread_vr_state)); 503 + clear_ti_thread_flag(ti, TIF_RESTORE_TM); 504 + msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1; 505 + } 506 + 507 + tm_reclaim(thr, thr->regs->msr, cause); 508 + 509 + /* Having done the reclaim, we now have the checkpointed 510 + * FP/VSX values in the registers. These might be valid 511 + * even if we have previously called enable_kernel_fp() or 512 + * flush_fp_to_thread(), so update thr->regs->msr to 513 + * indicate their current validity. 514 + */ 515 + thr->regs->msr |= msr_diff; 516 + } 517 + 518 + void tm_reclaim_current(uint8_t cause) 519 + { 520 + tm_enable(); 521 + tm_reclaim_thread(&current->thread, current_thread_info(), cause); 522 + } 523 + 524 524 static inline void tm_reclaim_task(struct task_struct *tsk) 525 525 { 526 526 /* We have to work out if we're switching from/to a task that's in the ··· 584 502 585 503 /* Stash the original thread MSR, as giveup_fpu et al will 586 504 * modify it. We hold onto it to see whether the task used 587 - * FP & vector regs. 505 + * FP & vector regs. If the TIF_RESTORE_TM flag is set, 506 + * tm_orig_msr is already set. 588 507 */ 589 - thr->tm_orig_msr = thr->regs->msr; 508 + if (!test_ti_thread_flag(task_thread_info(tsk), TIF_RESTORE_TM)) 509 + thr->tm_orig_msr = thr->regs->msr; 590 510 591 511 TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, " 592 512 "ccr=%lx, msr=%lx, trap=%lx)\n", ··· 596 512 thr->regs->ccr, thr->regs->msr, 597 513 thr->regs->trap); 598 514 599 - tm_reclaim(thr, thr->regs->msr, TM_CAUSE_RESCHED); 515 + tm_reclaim_thread(thr, task_thread_info(tsk), TM_CAUSE_RESCHED); 600 516 601 517 TM_DEBUG("--- tm_reclaim on pid %d complete\n", 602 518 tsk->pid); ··· 672 588 tm_reclaim_task(prev); 673 589 } 674 590 } 591 + 592 + /* 593 + * This is called if we are on the way out to userspace and the 594 + * TIF_RESTORE_TM flag is set. It checks if we need to reload 595 + * FP and/or vector state and does so if necessary. 596 + * If userspace is inside a transaction (whether active or 597 + * suspended) and FP/VMX/VSX instructions have ever been enabled 598 + * inside that transaction, then we have to keep them enabled 599 + * and keep the FP/VMX/VSX state loaded while ever the transaction 600 + * continues. The reason is that if we didn't, and subsequently 601 + * got a FP/VMX/VSX unavailable interrupt inside a transaction, 602 + * we don't know whether it's the same transaction, and thus we 603 + * don't know which of the checkpointed state and the transactional 604 + * state to use. 605 + */ 606 + void restore_tm_state(struct pt_regs *regs) 607 + { 608 + unsigned long msr_diff; 609 + 610 + clear_thread_flag(TIF_RESTORE_TM); 611 + if (!MSR_TM_ACTIVE(regs->msr)) 612 + return; 613 + 614 + msr_diff = current->thread.tm_orig_msr & ~regs->msr; 615 + msr_diff &= MSR_FP | MSR_VEC | MSR_VSX; 616 + if (msr_diff & MSR_FP) { 617 + fp_enable(); 618 + load_fp_state(&current->thread.fp_state); 619 + regs->msr |= current->thread.fpexc_mode; 620 + } 621 + if (msr_diff & MSR_VEC) { 622 + vec_enable(); 623 + load_vr_state(&current->thread.vr_state); 624 + } 625 + regs->msr |= msr_diff; 626 + } 627 + 675 628 #else 676 629 #define tm_recheckpoint_new_task(new) 677 630 #define __switch_to_tm(prev) ··· 1296 1175 if (val & PR_FP_EXC_SW_ENABLE) { 1297 1176 #ifdef CONFIG_SPE 1298 1177 if (cpu_has_feature(CPU_FTR_SPE)) { 1178 + /* 1179 + * When the sticky exception bits are set 1180 + * directly by userspace, it must call prctl 1181 + * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE 1182 + * in the existing prctl settings) or 1183 + * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in 1184 + * the bits being set). <fenv.h> functions 1185 + * saving and restoring the whole 1186 + * floating-point environment need to do so 1187 + * anyway to restore the prctl settings from 1188 + * the saved environment. 1189 + */ 1190 + tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR); 1299 1191 tsk->thread.fpexc_mode = val & 1300 1192 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT); 1301 1193 return 0; ··· 1340 1206 1341 1207 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) 1342 1208 #ifdef CONFIG_SPE 1343 - if (cpu_has_feature(CPU_FTR_SPE)) 1209 + if (cpu_has_feature(CPU_FTR_SPE)) { 1210 + /* 1211 + * When the sticky exception bits are set 1212 + * directly by userspace, it must call prctl 1213 + * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE 1214 + * in the existing prctl settings) or 1215 + * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in 1216 + * the bits being set). <fenv.h> functions 1217 + * saving and restoring the whole 1218 + * floating-point environment need to do so 1219 + * anyway to restore the prctl settings from 1220 + * the saved environment. 1221 + */ 1222 + tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR); 1344 1223 val = tsk->thread.fpexc_mode; 1345 - else 1224 + } else 1346 1225 return -EINVAL; 1347 1226 #else 1348 1227 return -EINVAL;
+40 -1
arch/powerpc/kernel/prom.c
··· 523 523 return early_init_dt_scan_memory(node, uname, depth, data); 524 524 } 525 525 526 + /* 527 + * For a relocatable kernel, we need to get the memstart_addr first, 528 + * then use it to calculate the virtual kernel start address. This has 529 + * to happen at a very early stage (before machine_init). In this case, 530 + * we just want to get the memstart_address and would not like to mess the 531 + * memblock at this stage. So introduce a variable to skip the memblock_add() 532 + * for this reason. 533 + */ 534 + #ifdef CONFIG_RELOCATABLE 535 + static int add_mem_to_memblock = 1; 536 + #else 537 + #define add_mem_to_memblock 1 538 + #endif 539 + 526 540 void __init early_init_dt_add_memory_arch(u64 base, u64 size) 527 541 { 528 542 #ifdef CONFIG_PPC64 ··· 557 543 } 558 544 559 545 /* Add the chunk to the MEMBLOCK list */ 560 - memblock_add(base, size); 546 + if (add_mem_to_memblock) 547 + memblock_add(base, size); 561 548 } 562 549 563 550 static void __init early_reserve_mem_dt(void) ··· 754 739 755 740 DBG(" <- early_init_devtree()\n"); 756 741 } 742 + 743 + #ifdef CONFIG_RELOCATABLE 744 + /* 745 + * This function run before early_init_devtree, so we have to init 746 + * initial_boot_params. 747 + */ 748 + void __init early_get_first_memblock_info(void *params, phys_addr_t *size) 749 + { 750 + /* Setup flat device-tree pointer */ 751 + initial_boot_params = params; 752 + 753 + /* 754 + * Scan the memory nodes and set add_mem_to_memblock to 0 to avoid 755 + * mess the memblock. 756 + */ 757 + add_mem_to_memblock = 0; 758 + of_scan_flat_dt(early_init_dt_scan_root, NULL); 759 + of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL); 760 + add_mem_to_memblock = 1; 761 + 762 + if (size) 763 + *size = first_memblock_size; 764 + } 765 + #endif 757 766 758 767 /******* 759 768 *
+41 -6
arch/powerpc/kernel/setup_64.c
··· 97 97 int icache_bsize; 98 98 int ucache_bsize; 99 99 100 + #if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP) 101 + static void setup_tlb_core_data(void) 102 + { 103 + int cpu; 104 + 105 + for_each_possible_cpu(cpu) { 106 + int first = cpu_first_thread_sibling(cpu); 107 + 108 + paca[cpu].tcd_ptr = &paca[first].tcd; 109 + 110 + /* 111 + * If we have threads, we need either tlbsrx. 112 + * or e6500 tablewalk mode, or else TLB handlers 113 + * will be racy and could produce duplicate entries. 114 + */ 115 + if (smt_enabled_at_boot >= 2 && 116 + !mmu_has_feature(MMU_FTR_USE_TLBRSRV) && 117 + book3e_htw_mode != PPC_HTW_E6500) { 118 + /* Should we panic instead? */ 119 + WARN_ONCE("%s: unsupported MMU configuration -- expect problems\n", 120 + __func__); 121 + } 122 + } 123 + } 124 + #else 125 + static void setup_tlb_core_data(void) 126 + { 127 + } 128 + #endif 129 + 100 130 #ifdef CONFIG_SMP 101 131 102 132 static char *smt_enabled_cmdline; ··· 475 445 476 446 smp_setup_cpu_maps(); 477 447 check_smt_enabled(); 448 + setup_tlb_core_data(); 478 449 479 450 #ifdef CONFIG_SMP 480 451 /* Release secondary cpus out of their spinloops at 0x60 now that ··· 551 520 #ifdef CONFIG_PPC_BOOK3E 552 521 static void __init exc_lvl_early_init(void) 553 522 { 554 - extern unsigned int interrupt_base_book3e; 555 - extern unsigned int exc_debug_debug_book3e; 556 - 557 523 unsigned int i; 558 524 559 525 for_each_possible_cpu(i) { ··· 563 535 } 564 536 565 537 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) 566 - patch_branch(&interrupt_base_book3e + (0x040 / 4) + 1, 567 - (unsigned long)&exc_debug_debug_book3e, 0); 538 + patch_exception(0x040, exc_debug_debug_book3e); 568 539 } 569 540 #else 570 541 #define exc_lvl_early_init() ··· 571 544 572 545 /* 573 546 * Stack space used when we detect a bad kernel stack pointer, and 574 - * early in SMP boots before relocation is enabled. 547 + * early in SMP boots before relocation is enabled. Exclusive emergency 548 + * stack for machine checks. 575 549 */ 576 550 static void __init emergency_stack_init(void) 577 551 { ··· 595 567 sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit); 596 568 sp += THREAD_SIZE; 597 569 paca[i].emergency_sp = __va(sp); 570 + 571 + #ifdef CONFIG_PPC_BOOK3S_64 572 + /* emergency stack for machine check exception handling. */ 573 + sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit); 574 + sp += THREAD_SIZE; 575 + paca[i].mc_emergency_sp = __va(sp); 576 + #endif 598 577 } 599 578 } 600 579
+1 -2
arch/powerpc/kernel/signal.c
··· 203 203 204 204 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 205 205 if (MSR_TM_ACTIVE(regs->msr)) { 206 - tm_enable(); 207 - tm_reclaim(&current->thread, regs->msr, TM_CAUSE_SIGNAL); 206 + tm_reclaim_current(TM_CAUSE_SIGNAL); 208 207 if (MSR_TM_TRANSACTIONAL(regs->msr)) 209 208 return current->thread.ckpt_regs.gpr[1]; 210 209 }
+7 -14
arch/powerpc/kernel/signal_32.c
··· 519 519 { 520 520 unsigned long msr = regs->msr; 521 521 522 + /* Remove TM bits from thread's MSR. The MSR in the sigcontext 523 + * just indicates to userland that we were doing a transaction, but we 524 + * don't want to return in transactional state. This also ensures 525 + * that flush_fp_to_thread won't set TIF_RESTORE_TM again. 526 + */ 527 + regs->msr &= ~MSR_TS_MASK; 528 + 522 529 /* Make sure floating point registers are stored in regs */ 523 530 flush_fp_to_thread(current); 524 531 ··· 1063 1056 /* enter the signal handler in native-endian mode */ 1064 1057 regs->msr &= ~MSR_LE; 1065 1058 regs->msr |= (MSR_KERNEL & MSR_LE); 1066 - #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1067 - /* Remove TM bits from thread's MSR. The MSR in the sigcontext 1068 - * just indicates to userland that we were doing a transaction, but we 1069 - * don't want to return in transactional state: 1070 - */ 1071 - regs->msr &= ~MSR_TS_MASK; 1072 - #endif 1073 1059 return 1; 1074 1060 1075 1061 badframe: ··· 1484 1484 regs->nip = (unsigned long) ka->sa.sa_handler; 1485 1485 /* enter the signal handler in big-endian mode */ 1486 1486 regs->msr &= ~MSR_LE; 1487 - #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1488 - /* Remove TM bits from thread's MSR. The MSR in the sigcontext 1489 - * just indicates to userland that we were doing a transaction, but we 1490 - * don't want to return in transactional state: 1491 - */ 1492 - regs->msr &= ~MSR_TS_MASK; 1493 - #endif 1494 1487 return 1; 1495 1488 1496 1489 badframe:
+7 -7
arch/powerpc/kernel/signal_64.c
··· 192 192 193 193 BUG_ON(!MSR_TM_ACTIVE(regs->msr)); 194 194 195 + /* Remove TM bits from thread's MSR. The MSR in the sigcontext 196 + * just indicates to userland that we were doing a transaction, but we 197 + * don't want to return in transactional state. This also ensures 198 + * that flush_fp_to_thread won't set TIF_RESTORE_TM again. 199 + */ 200 + regs->msr &= ~MSR_TS_MASK; 201 + 195 202 flush_fp_to_thread(current); 196 203 197 204 #ifdef CONFIG_ALTIVEC ··· 756 749 757 750 /* Make sure signal handler doesn't get spurious FP exceptions */ 758 751 current->thread.fp_state.fpscr = 0; 759 - #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 760 - /* Remove TM bits from thread's MSR. The MSR in the sigcontext 761 - * just indicates to userland that we were doing a transaction, but we 762 - * don't want to return in transactional state: 763 - */ 764 - regs->msr &= ~MSR_TS_MASK; 765 - #endif 766 752 767 753 /* Set up to return from userspace. */ 768 754 if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
-1
arch/powerpc/kernel/smp-tbsync.c
··· 9 9 #include <linux/sched.h> 10 10 #include <linux/smp.h> 11 11 #include <linux/unistd.h> 12 - #include <linux/init.h> 13 12 #include <linux/slab.h> 14 13 #include <linux/atomic.h> 15 14 #include <asm/smp.h>
+2 -7
arch/powerpc/kernel/smp.c
··· 369 369 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid)); 370 370 cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid)); 371 371 372 - if (smp_ops) 373 - if (smp_ops->probe) 374 - max_cpus = smp_ops->probe(); 375 - else 376 - max_cpus = NR_CPUS; 377 - else 378 - max_cpus = 1; 372 + if (smp_ops && smp_ops->probe) 373 + smp_ops->probe(); 379 374 } 380 375 381 376 void smp_prepare_boot_cpu(void)
+16 -16
arch/powerpc/kernel/swsusp_booke.S
··· 74 74 bne 1b 75 75 76 76 /* Save SPRGs */ 77 - mfsprg r4,0 77 + mfspr r4,SPRN_SPRG0 78 78 stw r4,SL_SPRG0(r11) 79 - mfsprg r4,1 79 + mfspr r4,SPRN_SPRG1 80 80 stw r4,SL_SPRG1(r11) 81 - mfsprg r4,2 81 + mfspr r4,SPRN_SPRG2 82 82 stw r4,SL_SPRG2(r11) 83 - mfsprg r4,3 83 + mfspr r4,SPRN_SPRG3 84 84 stw r4,SL_SPRG3(r11) 85 - mfsprg r4,4 85 + mfspr r4,SPRN_SPRG4 86 86 stw r4,SL_SPRG4(r11) 87 - mfsprg r4,5 87 + mfspr r4,SPRN_SPRG5 88 88 stw r4,SL_SPRG5(r11) 89 - mfsprg r4,6 89 + mfspr r4,SPRN_SPRG6 90 90 stw r4,SL_SPRG6(r11) 91 - mfsprg r4,7 91 + mfspr r4,SPRN_SPRG7 92 92 stw r4,SL_SPRG7(r11) 93 93 94 94 /* Call the low level suspend stuff (we should probably have made ··· 150 150 bl _tlbil_all 151 151 152 152 lwz r4,SL_SPRG0(r11) 153 - mtsprg 0,r4 153 + mtspr SPRN_SPRG0,r4 154 154 lwz r4,SL_SPRG1(r11) 155 - mtsprg 1,r4 155 + mtspr SPRN_SPRG1,r4 156 156 lwz r4,SL_SPRG2(r11) 157 - mtsprg 2,r4 157 + mtspr SPRN_SPRG2,r4 158 158 lwz r4,SL_SPRG3(r11) 159 - mtsprg 3,r4 159 + mtspr SPRN_SPRG3,r4 160 160 lwz r4,SL_SPRG4(r11) 161 - mtsprg 4,r4 161 + mtspr SPRN_SPRG4,r4 162 162 lwz r4,SL_SPRG5(r11) 163 - mtsprg 5,r4 163 + mtspr SPRN_SPRG5,r4 164 164 lwz r4,SL_SPRG6(r11) 165 - mtsprg 6,r4 165 + mtspr SPRN_SPRG6,r4 166 166 lwz r4,SL_SPRG7(r11) 167 - mtsprg 7,r4 167 + mtspr SPRN_SPRG7,r4 168 168 169 169 /* restore the MSR */ 170 170 lwz r3,SL_MSR(r11)
-1
arch/powerpc/kernel/syscalls.c
··· 34 34 #include <linux/ipc.h> 35 35 #include <linux/utsname.h> 36 36 #include <linux/file.h> 37 - #include <linux/init.h> 38 37 #include <linux/personality.h> 39 38 40 39 #include <asm/uaccess.h>
+354 -34
arch/powerpc/kernel/sysfs.c
··· 86 86 87 87 #endif /* CONFIG_PPC64 */ 88 88 89 + #ifdef CONFIG_PPC_FSL_BOOK3E 90 + #define MAX_BIT 63 91 + 92 + static u64 pw20_wt; 93 + static u64 altivec_idle_wt; 94 + 95 + static unsigned int get_idle_ticks_bit(u64 ns) 96 + { 97 + u64 cycle; 98 + 99 + if (ns >= 10000) 100 + cycle = div_u64(ns + 500, 1000) * tb_ticks_per_usec; 101 + else 102 + cycle = div_u64(ns * tb_ticks_per_usec, 1000); 103 + 104 + if (!cycle) 105 + return 0; 106 + 107 + return ilog2(cycle); 108 + } 109 + 110 + static void do_show_pwrmgtcr0(void *val) 111 + { 112 + u32 *value = val; 113 + 114 + *value = mfspr(SPRN_PWRMGTCR0); 115 + } 116 + 117 + static ssize_t show_pw20_state(struct device *dev, 118 + struct device_attribute *attr, char *buf) 119 + { 120 + u32 value; 121 + unsigned int cpu = dev->id; 122 + 123 + smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1); 124 + 125 + value &= PWRMGTCR0_PW20_WAIT; 126 + 127 + return sprintf(buf, "%u\n", value ? 1 : 0); 128 + } 129 + 130 + static void do_store_pw20_state(void *val) 131 + { 132 + u32 *value = val; 133 + u32 pw20_state; 134 + 135 + pw20_state = mfspr(SPRN_PWRMGTCR0); 136 + 137 + if (*value) 138 + pw20_state |= PWRMGTCR0_PW20_WAIT; 139 + else 140 + pw20_state &= ~PWRMGTCR0_PW20_WAIT; 141 + 142 + mtspr(SPRN_PWRMGTCR0, pw20_state); 143 + } 144 + 145 + static ssize_t store_pw20_state(struct device *dev, 146 + struct device_attribute *attr, 147 + const char *buf, size_t count) 148 + { 149 + u32 value; 150 + unsigned int cpu = dev->id; 151 + 152 + if (kstrtou32(buf, 0, &value)) 153 + return -EINVAL; 154 + 155 + if (value > 1) 156 + return -EINVAL; 157 + 158 + smp_call_function_single(cpu, do_store_pw20_state, &value, 1); 159 + 160 + return count; 161 + } 162 + 163 + static ssize_t show_pw20_wait_time(struct device *dev, 164 + struct device_attribute *attr, char *buf) 165 + { 166 + u32 value; 167 + u64 tb_cycle = 1; 168 + u64 time; 169 + 170 + unsigned int cpu = dev->id; 171 + 172 + if (!pw20_wt) { 173 + smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1); 174 + value = (value & PWRMGTCR0_PW20_ENT) >> 175 + PWRMGTCR0_PW20_ENT_SHIFT; 176 + 177 + tb_cycle = (tb_cycle << (MAX_BIT - value + 1)); 178 + /* convert ms to ns */ 179 + if (tb_ticks_per_usec > 1000) { 180 + time = div_u64(tb_cycle, tb_ticks_per_usec / 1000); 181 + } else { 182 + u32 rem_us; 183 + 184 + time = div_u64_rem(tb_cycle, tb_ticks_per_usec, 185 + &rem_us); 186 + time = time * 1000 + rem_us * 1000 / tb_ticks_per_usec; 187 + } 188 + } else { 189 + time = pw20_wt; 190 + } 191 + 192 + return sprintf(buf, "%llu\n", time > 0 ? time : 0); 193 + } 194 + 195 + static void set_pw20_wait_entry_bit(void *val) 196 + { 197 + u32 *value = val; 198 + u32 pw20_idle; 199 + 200 + pw20_idle = mfspr(SPRN_PWRMGTCR0); 201 + 202 + /* Set Automatic PW20 Core Idle Count */ 203 + /* clear count */ 204 + pw20_idle &= ~PWRMGTCR0_PW20_ENT; 205 + 206 + /* set count */ 207 + pw20_idle |= ((MAX_BIT - *value) << PWRMGTCR0_PW20_ENT_SHIFT); 208 + 209 + mtspr(SPRN_PWRMGTCR0, pw20_idle); 210 + } 211 + 212 + static ssize_t store_pw20_wait_time(struct device *dev, 213 + struct device_attribute *attr, 214 + const char *buf, size_t count) 215 + { 216 + u32 entry_bit; 217 + u64 value; 218 + 219 + unsigned int cpu = dev->id; 220 + 221 + if (kstrtou64(buf, 0, &value)) 222 + return -EINVAL; 223 + 224 + if (!value) 225 + return -EINVAL; 226 + 227 + entry_bit = get_idle_ticks_bit(value); 228 + if (entry_bit > MAX_BIT) 229 + return -EINVAL; 230 + 231 + pw20_wt = value; 232 + 233 + smp_call_function_single(cpu, set_pw20_wait_entry_bit, 234 + &entry_bit, 1); 235 + 236 + return count; 237 + } 238 + 239 + static ssize_t show_altivec_idle(struct device *dev, 240 + struct device_attribute *attr, char *buf) 241 + { 242 + u32 value; 243 + unsigned int cpu = dev->id; 244 + 245 + smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1); 246 + 247 + value &= PWRMGTCR0_AV_IDLE_PD_EN; 248 + 249 + return sprintf(buf, "%u\n", value ? 1 : 0); 250 + } 251 + 252 + static void do_store_altivec_idle(void *val) 253 + { 254 + u32 *value = val; 255 + u32 altivec_idle; 256 + 257 + altivec_idle = mfspr(SPRN_PWRMGTCR0); 258 + 259 + if (*value) 260 + altivec_idle |= PWRMGTCR0_AV_IDLE_PD_EN; 261 + else 262 + altivec_idle &= ~PWRMGTCR0_AV_IDLE_PD_EN; 263 + 264 + mtspr(SPRN_PWRMGTCR0, altivec_idle); 265 + } 266 + 267 + static ssize_t store_altivec_idle(struct device *dev, 268 + struct device_attribute *attr, 269 + const char *buf, size_t count) 270 + { 271 + u32 value; 272 + unsigned int cpu = dev->id; 273 + 274 + if (kstrtou32(buf, 0, &value)) 275 + return -EINVAL; 276 + 277 + if (value > 1) 278 + return -EINVAL; 279 + 280 + smp_call_function_single(cpu, do_store_altivec_idle, &value, 1); 281 + 282 + return count; 283 + } 284 + 285 + static ssize_t show_altivec_idle_wait_time(struct device *dev, 286 + struct device_attribute *attr, char *buf) 287 + { 288 + u32 value; 289 + u64 tb_cycle = 1; 290 + u64 time; 291 + 292 + unsigned int cpu = dev->id; 293 + 294 + if (!altivec_idle_wt) { 295 + smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1); 296 + value = (value & PWRMGTCR0_AV_IDLE_CNT) >> 297 + PWRMGTCR0_AV_IDLE_CNT_SHIFT; 298 + 299 + tb_cycle = (tb_cycle << (MAX_BIT - value + 1)); 300 + /* convert ms to ns */ 301 + if (tb_ticks_per_usec > 1000) { 302 + time = div_u64(tb_cycle, tb_ticks_per_usec / 1000); 303 + } else { 304 + u32 rem_us; 305 + 306 + time = div_u64_rem(tb_cycle, tb_ticks_per_usec, 307 + &rem_us); 308 + time = time * 1000 + rem_us * 1000 / tb_ticks_per_usec; 309 + } 310 + } else { 311 + time = altivec_idle_wt; 312 + } 313 + 314 + return sprintf(buf, "%llu\n", time > 0 ? time : 0); 315 + } 316 + 317 + static void set_altivec_idle_wait_entry_bit(void *val) 318 + { 319 + u32 *value = val; 320 + u32 altivec_idle; 321 + 322 + altivec_idle = mfspr(SPRN_PWRMGTCR0); 323 + 324 + /* Set Automatic AltiVec Idle Count */ 325 + /* clear count */ 326 + altivec_idle &= ~PWRMGTCR0_AV_IDLE_CNT; 327 + 328 + /* set count */ 329 + altivec_idle |= ((MAX_BIT - *value) << PWRMGTCR0_AV_IDLE_CNT_SHIFT); 330 + 331 + mtspr(SPRN_PWRMGTCR0, altivec_idle); 332 + } 333 + 334 + static ssize_t store_altivec_idle_wait_time(struct device *dev, 335 + struct device_attribute *attr, 336 + const char *buf, size_t count) 337 + { 338 + u32 entry_bit; 339 + u64 value; 340 + 341 + unsigned int cpu = dev->id; 342 + 343 + if (kstrtou64(buf, 0, &value)) 344 + return -EINVAL; 345 + 346 + if (!value) 347 + return -EINVAL; 348 + 349 + entry_bit = get_idle_ticks_bit(value); 350 + if (entry_bit > MAX_BIT) 351 + return -EINVAL; 352 + 353 + altivec_idle_wt = value; 354 + 355 + smp_call_function_single(cpu, set_altivec_idle_wait_entry_bit, 356 + &entry_bit, 1); 357 + 358 + return count; 359 + } 360 + 361 + /* 362 + * Enable/Disable interface: 363 + * 0, disable. 1, enable. 364 + */ 365 + static DEVICE_ATTR(pw20_state, 0600, show_pw20_state, store_pw20_state); 366 + static DEVICE_ATTR(altivec_idle, 0600, show_altivec_idle, store_altivec_idle); 367 + 368 + /* 369 + * Set wait time interface:(Nanosecond) 370 + * Example: Base on TBfreq is 41MHZ. 371 + * 1~48(ns): TB[63] 372 + * 49~97(ns): TB[62] 373 + * 98~195(ns): TB[61] 374 + * 196~390(ns): TB[60] 375 + * 391~780(ns): TB[59] 376 + * 781~1560(ns): TB[58] 377 + * ... 378 + */ 379 + static DEVICE_ATTR(pw20_wait_time, 0600, 380 + show_pw20_wait_time, 381 + store_pw20_wait_time); 382 + static DEVICE_ATTR(altivec_idle_wait_time, 0600, 383 + show_altivec_idle_wait_time, 384 + store_altivec_idle_wait_time); 385 + #endif 386 + 89 387 /* 90 388 * Enabling PMCs will slow partition context switch times so we only do 91 389 * it the first time we write to the PMCs. ··· 406 108 } 407 109 EXPORT_SYMBOL(ppc_enable_pmcs); 408 110 409 - #define SYSFS_PMCSETUP(NAME, ADDRESS) \ 111 + #define __SYSFS_SPRSETUP(NAME, ADDRESS, EXTRA) \ 410 112 static void read_##NAME(void *val) \ 411 113 { \ 412 114 *(unsigned long *)val = mfspr(ADDRESS); \ 413 115 } \ 414 116 static void write_##NAME(void *val) \ 415 117 { \ 416 - ppc_enable_pmcs(); \ 118 + EXTRA; \ 417 119 mtspr(ADDRESS, *(unsigned long *)val); \ 418 120 } \ 419 121 static ssize_t show_##NAME(struct device *dev, \ ··· 438 140 return count; \ 439 141 } 440 142 143 + #define SYSFS_PMCSETUP(NAME, ADDRESS) \ 144 + __SYSFS_SPRSETUP(NAME, ADDRESS, ppc_enable_pmcs()) 145 + #define SYSFS_SPRSETUP(NAME, ADDRESS) \ 146 + __SYSFS_SPRSETUP(NAME, ADDRESS, ) 441 147 442 148 /* Let's define all possible registers, we'll only hook up the ones 443 149 * that are implemented on the current processor ··· 477 175 SYSFS_PMCSETUP(pmc8, SPRN_PMC8); 478 176 479 177 SYSFS_PMCSETUP(mmcra, SPRN_MMCRA); 480 - SYSFS_PMCSETUP(purr, SPRN_PURR); 481 - SYSFS_PMCSETUP(spurr, SPRN_SPURR); 482 - SYSFS_PMCSETUP(dscr, SPRN_DSCR); 483 - SYSFS_PMCSETUP(pir, SPRN_PIR); 178 + SYSFS_SPRSETUP(purr, SPRN_PURR); 179 + SYSFS_SPRSETUP(spurr, SPRN_SPURR); 180 + SYSFS_SPRSETUP(dscr, SPRN_DSCR); 181 + SYSFS_SPRSETUP(pir, SPRN_PIR); 484 182 485 183 /* 486 184 Lets only enable read for phyp resources and ··· 551 249 SYSFS_PMCSETUP(pa6t_pmc4, SPRN_PA6T_PMC4); 552 250 SYSFS_PMCSETUP(pa6t_pmc5, SPRN_PA6T_PMC5); 553 251 #ifdef CONFIG_DEBUG_KERNEL 554 - SYSFS_PMCSETUP(hid0, SPRN_HID0); 555 - SYSFS_PMCSETUP(hid1, SPRN_HID1); 556 - SYSFS_PMCSETUP(hid4, SPRN_HID4); 557 - SYSFS_PMCSETUP(hid5, SPRN_HID5); 558 - SYSFS_PMCSETUP(ima0, SPRN_PA6T_IMA0); 559 - SYSFS_PMCSETUP(ima1, SPRN_PA6T_IMA1); 560 - SYSFS_PMCSETUP(ima2, SPRN_PA6T_IMA2); 561 - SYSFS_PMCSETUP(ima3, SPRN_PA6T_IMA3); 562 - SYSFS_PMCSETUP(ima4, SPRN_PA6T_IMA4); 563 - SYSFS_PMCSETUP(ima5, SPRN_PA6T_IMA5); 564 - SYSFS_PMCSETUP(ima6, SPRN_PA6T_IMA6); 565 - SYSFS_PMCSETUP(ima7, SPRN_PA6T_IMA7); 566 - SYSFS_PMCSETUP(ima8, SPRN_PA6T_IMA8); 567 - SYSFS_PMCSETUP(ima9, SPRN_PA6T_IMA9); 568 - SYSFS_PMCSETUP(imaat, SPRN_PA6T_IMAAT); 569 - SYSFS_PMCSETUP(btcr, SPRN_PA6T_BTCR); 570 - SYSFS_PMCSETUP(pccr, SPRN_PA6T_PCCR); 571 - SYSFS_PMCSETUP(rpccr, SPRN_PA6T_RPCCR); 572 - SYSFS_PMCSETUP(der, SPRN_PA6T_DER); 573 - SYSFS_PMCSETUP(mer, SPRN_PA6T_MER); 574 - SYSFS_PMCSETUP(ber, SPRN_PA6T_BER); 575 - SYSFS_PMCSETUP(ier, SPRN_PA6T_IER); 576 - SYSFS_PMCSETUP(sier, SPRN_PA6T_SIER); 577 - SYSFS_PMCSETUP(siar, SPRN_PA6T_SIAR); 578 - SYSFS_PMCSETUP(tsr0, SPRN_PA6T_TSR0); 579 - SYSFS_PMCSETUP(tsr1, SPRN_PA6T_TSR1); 580 - SYSFS_PMCSETUP(tsr2, SPRN_PA6T_TSR2); 581 - SYSFS_PMCSETUP(tsr3, SPRN_PA6T_TSR3); 252 + SYSFS_SPRSETUP(hid0, SPRN_HID0); 253 + SYSFS_SPRSETUP(hid1, SPRN_HID1); 254 + SYSFS_SPRSETUP(hid4, SPRN_HID4); 255 + SYSFS_SPRSETUP(hid5, SPRN_HID5); 256 + SYSFS_SPRSETUP(ima0, SPRN_PA6T_IMA0); 257 + SYSFS_SPRSETUP(ima1, SPRN_PA6T_IMA1); 258 + SYSFS_SPRSETUP(ima2, SPRN_PA6T_IMA2); 259 + SYSFS_SPRSETUP(ima3, SPRN_PA6T_IMA3); 260 + SYSFS_SPRSETUP(ima4, SPRN_PA6T_IMA4); 261 + SYSFS_SPRSETUP(ima5, SPRN_PA6T_IMA5); 262 + SYSFS_SPRSETUP(ima6, SPRN_PA6T_IMA6); 263 + SYSFS_SPRSETUP(ima7, SPRN_PA6T_IMA7); 264 + SYSFS_SPRSETUP(ima8, SPRN_PA6T_IMA8); 265 + SYSFS_SPRSETUP(ima9, SPRN_PA6T_IMA9); 266 + SYSFS_SPRSETUP(imaat, SPRN_PA6T_IMAAT); 267 + SYSFS_SPRSETUP(btcr, SPRN_PA6T_BTCR); 268 + SYSFS_SPRSETUP(pccr, SPRN_PA6T_PCCR); 269 + SYSFS_SPRSETUP(rpccr, SPRN_PA6T_RPCCR); 270 + SYSFS_SPRSETUP(der, SPRN_PA6T_DER); 271 + SYSFS_SPRSETUP(mer, SPRN_PA6T_MER); 272 + SYSFS_SPRSETUP(ber, SPRN_PA6T_BER); 273 + SYSFS_SPRSETUP(ier, SPRN_PA6T_IER); 274 + SYSFS_SPRSETUP(sier, SPRN_PA6T_SIER); 275 + SYSFS_SPRSETUP(siar, SPRN_PA6T_SIAR); 276 + SYSFS_SPRSETUP(tsr0, SPRN_PA6T_TSR0); 277 + SYSFS_SPRSETUP(tsr1, SPRN_PA6T_TSR1); 278 + SYSFS_SPRSETUP(tsr2, SPRN_PA6T_TSR2); 279 + SYSFS_SPRSETUP(tsr3, SPRN_PA6T_TSR3); 582 280 #endif /* CONFIG_DEBUG_KERNEL */ 583 281 #endif /* HAS_PPC_PMC_PA6T */ 584 282 ··· 723 421 device_create_file(s, &dev_attr_pir); 724 422 #endif /* CONFIG_PPC64 */ 725 423 424 + #ifdef CONFIG_PPC_FSL_BOOK3E 425 + if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) { 426 + device_create_file(s, &dev_attr_pw20_state); 427 + device_create_file(s, &dev_attr_pw20_wait_time); 428 + 429 + device_create_file(s, &dev_attr_altivec_idle); 430 + device_create_file(s, &dev_attr_altivec_idle_wait_time); 431 + } 432 + #endif 726 433 cacheinfo_cpu_online(cpu); 727 434 } 728 435 ··· 804 493 device_remove_file(s, &dev_attr_pir); 805 494 #endif /* CONFIG_PPC64 */ 806 495 496 + #ifdef CONFIG_PPC_FSL_BOOK3E 497 + if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) { 498 + device_remove_file(s, &dev_attr_pw20_state); 499 + device_remove_file(s, &dev_attr_pw20_wait_time); 500 + 501 + device_remove_file(s, &dev_attr_altivec_idle); 502 + device_remove_file(s, &dev_attr_altivec_idle_wait_time); 503 + } 504 + #endif 807 505 cacheinfo_cpu_offline(cpu); 808 506 } 809 507
+13 -1
arch/powerpc/kernel/time.c
··· 510 510 */ 511 511 may_hard_irq_enable(); 512 512 513 - __get_cpu_var(irq_stat).timer_irqs++; 514 513 515 514 #if defined(CONFIG_PPC32) && defined(CONFIG_PMAC) 516 515 if (atomic_read(&ppc_n_lost_interrupts) != 0) ··· 531 532 *next_tb = ~(u64)0; 532 533 if (evt->event_handler) 533 534 evt->event_handler(evt); 535 + __get_cpu_var(irq_stat).timer_irqs_event++; 534 536 } else { 535 537 now = *next_tb - now; 536 538 if (now <= DECREMENTER_MAX) 537 539 set_dec((int)now); 540 + /* We may have raced with new irq work */ 541 + if (test_irq_work_pending()) 542 + set_dec(1); 543 + __get_cpu_var(irq_stat).timer_irqs_others++; 538 544 } 539 545 540 546 #ifdef CONFIG_PPC64 ··· 805 801 static int decrementer_set_next_event(unsigned long evt, 806 802 struct clock_event_device *dev) 807 803 { 804 + /* Don't adjust the decrementer if some irq work is pending */ 805 + if (test_irq_work_pending()) 806 + return 0; 808 807 __get_cpu_var(decrementers_next_tb) = get_tb_or_rtc() + evt; 809 808 set_dec(evt); 809 + 810 + /* We may have raced with new irq work */ 811 + if (test_irq_work_pending()) 812 + set_dec(1); 813 + 810 814 return 0; 811 815 } 812 816
+55 -19
arch/powerpc/kernel/traps.c
··· 285 285 286 286 /* What should we do here? We could issue a shutdown or hard reset. */ 287 287 } 288 + 289 + /* 290 + * This function is called in real mode. Strictly no printk's please. 291 + * 292 + * regs->nip and regs->msr contains srr0 and ssr1. 293 + */ 294 + long machine_check_early(struct pt_regs *regs) 295 + { 296 + long handled = 0; 297 + 298 + if (cur_cpu_spec && cur_cpu_spec->machine_check_early) 299 + handled = cur_cpu_spec->machine_check_early(regs); 300 + return handled; 301 + } 302 + 288 303 #endif 289 304 290 305 /* ··· 1399 1384 1400 1385 TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n", 1401 1386 regs->nip, regs->msr); 1402 - tm_enable(); 1403 1387 1404 1388 /* We can only have got here if the task started using FP after 1405 1389 * beginning the transaction. So, the transactional regs are just a ··· 1407 1393 * transaction, and probably retry but now with FP enabled. So the 1408 1394 * checkpointed FP registers need to be loaded. 1409 1395 */ 1410 - tm_reclaim(&current->thread, current->thread.regs->msr, 1411 - TM_CAUSE_FAC_UNAV); 1396 + tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1412 1397 /* Reclaim didn't save out any FPRs to transact_fprs. */ 1413 1398 1414 1399 /* Enable FP for the task: */ ··· 1416 1403 /* This loads and recheckpoints the FP registers from 1417 1404 * thread.fpr[]. They will remain in registers after the 1418 1405 * checkpoint so we don't need to reload them after. 1406 + * If VMX is in use, the VRs now hold checkpointed values, 1407 + * so we don't want to load the VRs from the thread_struct. 1419 1408 */ 1420 - tm_recheckpoint(&current->thread, regs->msr); 1409 + tm_recheckpoint(&current->thread, MSR_FP); 1410 + 1411 + /* If VMX is in use, get the transactional values back */ 1412 + if (regs->msr & MSR_VEC) { 1413 + do_load_up_transact_altivec(&current->thread); 1414 + /* At this point all the VSX state is loaded, so enable it */ 1415 + regs->msr |= MSR_VSX; 1416 + } 1421 1417 } 1422 1418 1423 - #ifdef CONFIG_ALTIVEC 1424 1419 void altivec_unavailable_tm(struct pt_regs *regs) 1425 1420 { 1426 1421 /* See the comments in fp_unavailable_tm(). This function operates ··· 1438 1417 TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx," 1439 1418 "MSR=%lx\n", 1440 1419 regs->nip, regs->msr); 1441 - tm_enable(); 1442 - tm_reclaim(&current->thread, current->thread.regs->msr, 1443 - TM_CAUSE_FAC_UNAV); 1420 + tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1444 1421 regs->msr |= MSR_VEC; 1445 - tm_recheckpoint(&current->thread, regs->msr); 1422 + tm_recheckpoint(&current->thread, MSR_VEC); 1446 1423 current->thread.used_vr = 1; 1447 - } 1448 - #endif 1449 1424 1450 - #ifdef CONFIG_VSX 1425 + if (regs->msr & MSR_FP) { 1426 + do_load_up_transact_fpu(&current->thread); 1427 + regs->msr |= MSR_VSX; 1428 + } 1429 + } 1430 + 1451 1431 void vsx_unavailable_tm(struct pt_regs *regs) 1452 1432 { 1433 + unsigned long orig_msr = regs->msr; 1434 + 1453 1435 /* See the comments in fp_unavailable_tm(). This works similarly, 1454 1436 * though we're loading both FP and VEC registers in here. 1455 1437 * ··· 1464 1440 "MSR=%lx\n", 1465 1441 regs->nip, regs->msr); 1466 1442 1467 - tm_enable(); 1443 + current->thread.used_vsr = 1; 1444 + 1445 + /* If FP and VMX are already loaded, we have all the state we need */ 1446 + if ((orig_msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC)) { 1447 + regs->msr |= MSR_VSX; 1448 + return; 1449 + } 1450 + 1468 1451 /* This reclaims FP and/or VR regs if they're already enabled */ 1469 - tm_reclaim(&current->thread, current->thread.regs->msr, 1470 - TM_CAUSE_FAC_UNAV); 1452 + tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1471 1453 1472 1454 regs->msr |= MSR_VEC | MSR_FP | current->thread.fpexc_mode | 1473 1455 MSR_VSX; 1474 - /* This loads & recheckpoints FP and VRs. */ 1475 - tm_recheckpoint(&current->thread, regs->msr); 1476 - current->thread.used_vsr = 1; 1456 + 1457 + /* This loads & recheckpoints FP and VRs; but we have 1458 + * to be sure not to overwrite previously-valid state. 1459 + */ 1460 + tm_recheckpoint(&current->thread, regs->msr & ~orig_msr); 1461 + 1462 + if (orig_msr & MSR_FP) 1463 + do_load_up_transact_fpu(&current->thread); 1464 + if (orig_msr & MSR_VEC) 1465 + do_load_up_transact_altivec(&current->thread); 1477 1466 } 1478 - #endif 1479 1467 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 1480 1468 1481 1469 void performance_monitor_exception(struct pt_regs *regs)
-1
arch/powerpc/kernel/vdso32/vdso32_wrapper.S
··· 1 - #include <linux/init.h> 2 1 #include <linux/linkage.h> 3 2 #include <asm/page.h> 4 3
-1
arch/powerpc/kernel/vdso64/vdso64_wrapper.S
··· 1 - #include <linux/init.h> 2 1 #include <linux/linkage.h> 3 2 #include <asm/page.h> 4 3
+10
arch/powerpc/kernel/vector.S
··· 37 37 #endif 38 38 39 39 /* 40 + * Enable use of VMX/Altivec for the caller. 41 + */ 42 + _GLOBAL(vec_enable) 43 + mfmsr r3 44 + oris r3,r3,MSR_VEC@h 45 + MTMSRD(r3) 46 + isync 47 + blr 48 + 49 + /* 40 50 * Load state from memory into VMX registers including VSCR. 41 51 * Assumes the caller has enabled VMX in the MSR. 42 52 */
+22 -9
arch/powerpc/kernel/vio.c
··· 518 518 struct dma_attrs *attrs) 519 519 { 520 520 struct vio_dev *viodev = to_vio_dev(dev); 521 + struct iommu_table *tbl; 521 522 dma_addr_t ret = DMA_ERROR_CODE; 522 523 523 - if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE))) { 524 + tbl = get_iommu_table_base(dev); 525 + if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) { 524 526 atomic_inc(&viodev->cmo.allocs_failed); 525 527 return ret; 526 528 } 527 529 528 530 ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs); 529 531 if (unlikely(dma_mapping_error(dev, ret))) { 530 - vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); 532 + vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))); 531 533 atomic_inc(&viodev->cmo.allocs_failed); 532 534 } 533 535 ··· 542 540 struct dma_attrs *attrs) 543 541 { 544 542 struct vio_dev *viodev = to_vio_dev(dev); 543 + struct iommu_table *tbl; 545 544 545 + tbl = get_iommu_table_base(dev); 546 546 dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs); 547 547 548 - vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); 548 + vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))); 549 549 } 550 550 551 551 static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, ··· 555 551 struct dma_attrs *attrs) 556 552 { 557 553 struct vio_dev *viodev = to_vio_dev(dev); 554 + struct iommu_table *tbl; 558 555 struct scatterlist *sgl; 559 556 int ret, count = 0; 560 557 size_t alloc_size = 0; 561 558 559 + tbl = get_iommu_table_base(dev); 562 560 for (sgl = sglist; count < nelems; count++, sgl++) 563 - alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE); 561 + alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl)); 564 562 565 563 if (vio_cmo_alloc(viodev, alloc_size)) { 566 564 atomic_inc(&viodev->cmo.allocs_failed); ··· 578 572 } 579 573 580 574 for (sgl = sglist, count = 0; count < ret; count++, sgl++) 581 - alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE); 575 + alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl)); 582 576 if (alloc_size) 583 577 vio_cmo_dealloc(viodev, alloc_size); 584 578 ··· 591 585 struct dma_attrs *attrs) 592 586 { 593 587 struct vio_dev *viodev = to_vio_dev(dev); 588 + struct iommu_table *tbl; 594 589 struct scatterlist *sgl; 595 590 size_t alloc_size = 0; 596 591 int count = 0; 597 592 593 + tbl = get_iommu_table_base(dev); 598 594 for (sgl = sglist; count < nelems; count++, sgl++) 599 - alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE); 595 + alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl)); 600 596 601 597 dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs); 602 598 ··· 714 706 { 715 707 struct vio_cmo_dev_entry *dev_ent; 716 708 struct device *dev = &viodev->dev; 709 + struct iommu_table *tbl; 717 710 struct vio_driver *viodrv = to_vio_driver(dev->driver); 718 711 unsigned long flags; 719 712 size_t size; 720 713 bool dma_capable = false; 714 + 715 + tbl = get_iommu_table_base(dev); 721 716 722 717 /* A device requires entitlement if it has a DMA window property */ 723 718 switch (viodev->family) { ··· 747 736 return -EINVAL; 748 737 } 749 738 750 - viodev->cmo.desired = IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev)); 739 + viodev->cmo.desired = 740 + IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev), tbl); 751 741 if (viodev->cmo.desired < VIO_CMO_MIN_ENT) 752 742 viodev->cmo.desired = VIO_CMO_MIN_ENT; 753 743 size = VIO_CMO_MIN_ENT; ··· 1188 1176 &tbl->it_index, &offset, &size); 1189 1177 1190 1178 /* TCE table size - measured in tce entries */ 1191 - tbl->it_size = size >> IOMMU_PAGE_SHIFT; 1179 + tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K; 1180 + tbl->it_size = size >> tbl->it_page_shift; 1192 1181 /* offset for VIO should always be 0 */ 1193 - tbl->it_offset = offset >> IOMMU_PAGE_SHIFT; 1182 + tbl->it_offset = offset >> tbl->it_page_shift; 1194 1183 tbl->it_busno = 0; 1195 1184 tbl->it_type = TCE_VB; 1196 1185 tbl->it_blocksize = 16;
+24 -26
arch/powerpc/kvm/book3s_hv_ras.c
··· 12 12 #include <linux/kvm_host.h> 13 13 #include <linux/kernel.h> 14 14 #include <asm/opal.h> 15 + #include <asm/mce.h> 15 16 16 17 /* SRR1 bits for machine check on POWER7 */ 17 18 #define SRR1_MC_LDSTERR (1ul << (63-42)) ··· 59 58 } 60 59 } 61 60 62 - /* POWER7 TLB flush */ 63 - static void flush_tlb_power7(struct kvm_vcpu *vcpu) 64 - { 65 - unsigned long i, rb; 66 - 67 - rb = TLBIEL_INVAL_SET_LPID; 68 - for (i = 0; i < POWER7_TLB_SETS; ++i) { 69 - asm volatile("tlbiel %0" : : "r" (rb)); 70 - rb += 1 << TLBIEL_INVAL_SET_SHIFT; 71 - } 72 - } 73 - 74 61 /* 75 62 * On POWER7, see if we can handle a machine check that occurred inside 76 63 * the guest in real mode, without switching to the host partition. ··· 68 79 static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu) 69 80 { 70 81 unsigned long srr1 = vcpu->arch.shregs.msr; 71 - #ifdef CONFIG_PPC_POWERNV 72 - struct opal_machine_check_event *opal_evt; 73 - #endif 82 + struct machine_check_event mce_evt; 74 83 long handled = 1; 75 84 76 85 if (srr1 & SRR1_MC_LDSTERR) { ··· 83 96 DSISR_MC_SLB_PARITY | DSISR_MC_DERAT_MULTI); 84 97 } 85 98 if (dsisr & DSISR_MC_TLB_MULTI) { 86 - flush_tlb_power7(vcpu); 99 + if (cur_cpu_spec && cur_cpu_spec->flush_tlb) 100 + cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET_LPID); 87 101 dsisr &= ~DSISR_MC_TLB_MULTI; 88 102 } 89 103 /* Any other errors we don't understand? */ ··· 101 113 reload_slb(vcpu); 102 114 break; 103 115 case SRR1_MC_IFETCH_TLBMULTI: 104 - flush_tlb_power7(vcpu); 116 + if (cur_cpu_spec && cur_cpu_spec->flush_tlb) 117 + cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET_LPID); 105 118 break; 106 119 default: 107 120 handled = 0; 108 121 } 109 122 110 - #ifdef CONFIG_PPC_POWERNV 111 123 /* 112 - * See if OPAL has already handled the condition. 113 - * We assume that if the condition is recovered then OPAL 124 + * See if we have already handled the condition in the linux host. 125 + * We assume that if the condition is recovered then linux host 114 126 * will have generated an error log event that we will pick 115 127 * up and log later. 128 + * Don't release mce event now. In case if condition is not 129 + * recovered we do guest exit and go back to linux host machine 130 + * check handler. Hence we need make sure that current mce event 131 + * is available for linux host to consume. 116 132 */ 117 - opal_evt = local_paca->opal_mc_evt; 118 - if (opal_evt->version == OpalMCE_V1 && 119 - (opal_evt->severity == OpalMCE_SEV_NO_ERROR || 120 - opal_evt->disposition == OpalMCE_DISPOSITION_RECOVERED)) 133 + if (!get_mce_event(&mce_evt, MCE_EVENT_DONTRELEASE)) 134 + goto out; 135 + 136 + if (mce_evt.version == MCE_V1 && 137 + (mce_evt.severity == MCE_SEV_NO_ERROR || 138 + mce_evt.disposition == MCE_DISPOSITION_RECOVERED)) 121 139 handled = 1; 122 140 141 + out: 142 + /* 143 + * If we have handled the error, then release the mce event because 144 + * we will be delivering machine check to guest. 145 + */ 123 146 if (handled) 124 - opal_evt->in_use = 0; 125 - #endif 147 + release_mce_event(); 126 148 127 149 return handled; 128 150 }
+2
arch/powerpc/kvm/bookehv_interrupts.S
··· 319 319 SPRN_DSRR0, SPRN_DSRR1, 0 320 320 kvm_handler BOOKE_INTERRUPT_DEBUG, EX_PARAMS(CRIT), \ 321 321 SPRN_CSRR0, SPRN_CSRR1, 0 322 + kvm_handler BOOKE_INTERRUPT_LRAT_ERROR, EX_PARAMS(GEN), \ 323 + SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR) 322 324 #else 323 325 /* 324 326 * For input register values, see arch/powerpc/include/asm/kvm_booke_hv_asm.h
+15
arch/powerpc/lib/code-patching.c
··· 159 159 return 0; 160 160 } 161 161 162 + #ifdef CONFIG_PPC_BOOK3E_64 163 + void __patch_exception(int exc, unsigned long addr) 164 + { 165 + extern unsigned int interrupt_base_book3e; 166 + unsigned int *ibase = &interrupt_base_book3e; 167 + 168 + /* Our exceptions vectors start with a NOP and -then- a branch 169 + * to deal with single stepping from userspace which stops on 170 + * the second instruction. Thus we need to patch the second 171 + * instruction of the exception, not the first one 172 + */ 173 + 174 + patch_branch(ibase + (exc / 4) + 1, addr, 0); 175 + } 176 + #endif 162 177 163 178 #ifdef CONFIG_CODE_PATCHING_SELFTEST 164 179
+186
arch/powerpc/lib/crtsavres.S
··· 231 231 mr 1,11 232 232 blr 233 233 234 + #ifdef CONFIG_ALTIVEC 235 + /* Called with r0 pointing just beyond the end of the vector save area. */ 236 + 237 + _GLOBAL(_savevr_20) 238 + li r11,-192 239 + stvx vr20,r11,r0 240 + _GLOBAL(_savevr_21) 241 + li r11,-176 242 + stvx vr21,r11,r0 243 + _GLOBAL(_savevr_22) 244 + li r11,-160 245 + stvx vr22,r11,r0 246 + _GLOBAL(_savevr_23) 247 + li r11,-144 248 + stvx vr23,r11,r0 249 + _GLOBAL(_savevr_24) 250 + li r11,-128 251 + stvx vr24,r11,r0 252 + _GLOBAL(_savevr_25) 253 + li r11,-112 254 + stvx vr25,r11,r0 255 + _GLOBAL(_savevr_26) 256 + li r11,-96 257 + stvx vr26,r11,r0 258 + _GLOBAL(_savevr_27) 259 + li r11,-80 260 + stvx vr27,r11,r0 261 + _GLOBAL(_savevr_28) 262 + li r11,-64 263 + stvx vr28,r11,r0 264 + _GLOBAL(_savevr_29) 265 + li r11,-48 266 + stvx vr29,r11,r0 267 + _GLOBAL(_savevr_30) 268 + li r11,-32 269 + stvx vr30,r11,r0 270 + _GLOBAL(_savevr_31) 271 + li r11,-16 272 + stvx vr31,r11,r0 273 + blr 274 + 275 + _GLOBAL(_restvr_20) 276 + li r11,-192 277 + lvx vr20,r11,r0 278 + _GLOBAL(_restvr_21) 279 + li r11,-176 280 + lvx vr21,r11,r0 281 + _GLOBAL(_restvr_22) 282 + li r11,-160 283 + lvx vr22,r11,r0 284 + _GLOBAL(_restvr_23) 285 + li r11,-144 286 + lvx vr23,r11,r0 287 + _GLOBAL(_restvr_24) 288 + li r11,-128 289 + lvx vr24,r11,r0 290 + _GLOBAL(_restvr_25) 291 + li r11,-112 292 + lvx vr25,r11,r0 293 + _GLOBAL(_restvr_26) 294 + li r11,-96 295 + lvx vr26,r11,r0 296 + _GLOBAL(_restvr_27) 297 + li r11,-80 298 + lvx vr27,r11,r0 299 + _GLOBAL(_restvr_28) 300 + li r11,-64 301 + lvx vr28,r11,r0 302 + _GLOBAL(_restvr_29) 303 + li r11,-48 304 + lvx vr29,r11,r0 305 + _GLOBAL(_restvr_30) 306 + li r11,-32 307 + lvx vr30,r11,r0 308 + _GLOBAL(_restvr_31) 309 + li r11,-16 310 + lvx vr31,r11,r0 311 + blr 312 + 313 + #endif /* CONFIG_ALTIVEC */ 314 + 234 315 #else /* CONFIG_PPC64 */ 235 316 236 317 .section ".text.save.restore","ax",@progbits ··· 436 355 ld r31,-8(r1) 437 356 mtlr r0 438 357 blr 358 + 359 + #ifdef CONFIG_ALTIVEC 360 + /* Called with r0 pointing just beyond the end of the vector save area. */ 361 + 362 + .globl _savevr_20 363 + _savevr_20: 364 + li r12,-192 365 + stvx vr20,r12,r0 366 + .globl _savevr_21 367 + _savevr_21: 368 + li r12,-176 369 + stvx vr21,r12,r0 370 + .globl _savevr_22 371 + _savevr_22: 372 + li r12,-160 373 + stvx vr22,r12,r0 374 + .globl _savevr_23 375 + _savevr_23: 376 + li r12,-144 377 + stvx vr23,r12,r0 378 + .globl _savevr_24 379 + _savevr_24: 380 + li r12,-128 381 + stvx vr24,r12,r0 382 + .globl _savevr_25 383 + _savevr_25: 384 + li r12,-112 385 + stvx vr25,r12,r0 386 + .globl _savevr_26 387 + _savevr_26: 388 + li r12,-96 389 + stvx vr26,r12,r0 390 + .globl _savevr_27 391 + _savevr_27: 392 + li r12,-80 393 + stvx vr27,r12,r0 394 + .globl _savevr_28 395 + _savevr_28: 396 + li r12,-64 397 + stvx vr28,r12,r0 398 + .globl _savevr_29 399 + _savevr_29: 400 + li r12,-48 401 + stvx vr29,r12,r0 402 + .globl _savevr_30 403 + _savevr_30: 404 + li r12,-32 405 + stvx vr30,r12,r0 406 + .globl _savevr_31 407 + _savevr_31: 408 + li r12,-16 409 + stvx vr31,r12,r0 410 + blr 411 + 412 + .globl _restvr_20 413 + _restvr_20: 414 + li r12,-192 415 + lvx vr20,r12,r0 416 + .globl _restvr_21 417 + _restvr_21: 418 + li r12,-176 419 + lvx vr21,r12,r0 420 + .globl _restvr_22 421 + _restvr_22: 422 + li r12,-160 423 + lvx vr22,r12,r0 424 + .globl _restvr_23 425 + _restvr_23: 426 + li r12,-144 427 + lvx vr23,r12,r0 428 + .globl _restvr_24 429 + _restvr_24: 430 + li r12,-128 431 + lvx vr24,r12,r0 432 + .globl _restvr_25 433 + _restvr_25: 434 + li r12,-112 435 + lvx vr25,r12,r0 436 + .globl _restvr_26 437 + _restvr_26: 438 + li r12,-96 439 + lvx vr26,r12,r0 440 + .globl _restvr_27 441 + _restvr_27: 442 + li r12,-80 443 + lvx vr27,r12,r0 444 + .globl _restvr_28 445 + _restvr_28: 446 + li r12,-64 447 + lvx vr28,r12,r0 448 + .globl _restvr_29 449 + _restvr_29: 450 + li r12,-48 451 + lvx vr29,r12,r0 452 + .globl _restvr_30 453 + _restvr_30: 454 + li r12,-32 455 + lvx vr30,r12,r0 456 + .globl _restvr_31 457 + _restvr_31: 458 + li r12,-16 459 + lvx vr31,r12,r0 460 + blr 461 + 462 + #endif /* CONFIG_ALTIVEC */ 439 463 440 464 #endif /* CONFIG_PPC64 */ 441 465
+239 -79
arch/powerpc/math-emu/math_efp.c
··· 20 20 */ 21 21 22 22 #include <linux/types.h> 23 + #include <linux/prctl.h> 23 24 24 25 #include <asm/uaccess.h> 25 26 #include <asm/reg.h> ··· 276 275 277 276 case EFSCTSF: 278 277 case EFSCTUF: 279 - if (!((vb.wp[1] >> 23) == 0xff && ((vb.wp[1] & 0x7fffff) > 0))) { 280 - /* NaN */ 281 - if (((vb.wp[1] >> 23) & 0xff) == 0) { 282 - /* denorm */ 283 - vc.wp[1] = 0x0; 284 - } else if ((vb.wp[1] >> 31) == 0) { 285 - /* positive normal */ 286 - vc.wp[1] = (func == EFSCTSF) ? 287 - 0x7fffffff : 0xffffffff; 288 - } else { /* negative normal */ 289 - vc.wp[1] = (func == EFSCTSF) ? 290 - 0x80000000 : 0x0; 291 - } 292 - } else { /* rB is NaN */ 293 - vc.wp[1] = 0x0; 278 + if (SB_c == FP_CLS_NAN) { 279 + vc.wp[1] = 0; 280 + FP_SET_EXCEPTION(FP_EX_INVALID); 281 + } else { 282 + SB_e += (func == EFSCTSF ? 31 : 32); 283 + FP_TO_INT_ROUND_S(vc.wp[1], SB, 32, 284 + (func == EFSCTSF)); 294 285 } 295 286 goto update_regs; 296 287 ··· 299 306 } 300 307 301 308 case EFSCTSI: 302 - case EFSCTSIZ: 303 309 case EFSCTUI: 304 - case EFSCTUIZ: 305 - if (func & 0x4) { 306 - _FP_ROUND(1, SB); 310 + if (SB_c == FP_CLS_NAN) { 311 + vc.wp[1] = 0; 312 + FP_SET_EXCEPTION(FP_EX_INVALID); 307 313 } else { 308 - _FP_ROUND_ZERO(1, SB); 314 + FP_TO_INT_ROUND_S(vc.wp[1], SB, 32, 315 + ((func & 0x3) != 0)); 309 316 } 310 - FP_TO_INT_S(vc.wp[1], SB, 32, 311 - (((func & 0x3) != 0) || SB_s)); 317 + goto update_regs; 318 + 319 + case EFSCTSIZ: 320 + case EFSCTUIZ: 321 + if (SB_c == FP_CLS_NAN) { 322 + vc.wp[1] = 0; 323 + FP_SET_EXCEPTION(FP_EX_INVALID); 324 + } else { 325 + FP_TO_INT_S(vc.wp[1], SB, 32, 326 + ((func & 0x3) != 0)); 327 + } 312 328 goto update_regs; 313 329 314 330 default: ··· 406 404 407 405 case EFDCTSF: 408 406 case EFDCTUF: 409 - if (!((vb.wp[0] >> 20) == 0x7ff && 410 - ((vb.wp[0] & 0xfffff) > 0 || (vb.wp[1] > 0)))) { 411 - /* not a NaN */ 412 - if (((vb.wp[0] >> 20) & 0x7ff) == 0) { 413 - /* denorm */ 414 - vc.wp[1] = 0x0; 415 - } else if ((vb.wp[0] >> 31) == 0) { 416 - /* positive normal */ 417 - vc.wp[1] = (func == EFDCTSF) ? 418 - 0x7fffffff : 0xffffffff; 419 - } else { /* negative normal */ 420 - vc.wp[1] = (func == EFDCTSF) ? 421 - 0x80000000 : 0x0; 422 - } 423 - } else { /* NaN */ 424 - vc.wp[1] = 0x0; 407 + if (DB_c == FP_CLS_NAN) { 408 + vc.wp[1] = 0; 409 + FP_SET_EXCEPTION(FP_EX_INVALID); 410 + } else { 411 + DB_e += (func == EFDCTSF ? 31 : 32); 412 + FP_TO_INT_ROUND_D(vc.wp[1], DB, 32, 413 + (func == EFDCTSF)); 425 414 } 426 415 goto update_regs; 427 416 ··· 430 437 431 438 case EFDCTUIDZ: 432 439 case EFDCTSIDZ: 433 - _FP_ROUND_ZERO(2, DB); 434 - FP_TO_INT_D(vc.dp[0], DB, 64, ((func & 0x1) == 0)); 440 + if (DB_c == FP_CLS_NAN) { 441 + vc.dp[0] = 0; 442 + FP_SET_EXCEPTION(FP_EX_INVALID); 443 + } else { 444 + FP_TO_INT_D(vc.dp[0], DB, 64, 445 + ((func & 0x1) == 0)); 446 + } 435 447 goto update_regs; 436 448 437 449 case EFDCTUI: 438 450 case EFDCTSI: 451 + if (DB_c == FP_CLS_NAN) { 452 + vc.wp[1] = 0; 453 + FP_SET_EXCEPTION(FP_EX_INVALID); 454 + } else { 455 + FP_TO_INT_ROUND_D(vc.wp[1], DB, 32, 456 + ((func & 0x3) != 0)); 457 + } 458 + goto update_regs; 459 + 439 460 case EFDCTUIZ: 440 461 case EFDCTSIZ: 441 - if (func & 0x4) { 442 - _FP_ROUND(2, DB); 462 + if (DB_c == FP_CLS_NAN) { 463 + vc.wp[1] = 0; 464 + FP_SET_EXCEPTION(FP_EX_INVALID); 443 465 } else { 444 - _FP_ROUND_ZERO(2, DB); 466 + FP_TO_INT_D(vc.wp[1], DB, 32, 467 + ((func & 0x3) != 0)); 445 468 } 446 - FP_TO_INT_D(vc.wp[1], DB, 32, 447 - (((func & 0x3) != 0) || DB_s)); 448 469 goto update_regs; 449 470 450 471 default: ··· 563 556 cmp = -1; 564 557 goto cmp_vs; 565 558 566 - case EVFSCTSF: 567 - __asm__ __volatile__ ("mtspr 512, %4\n" 568 - "efsctsf %0, %2\n" 569 - "efsctsf %1, %3\n" 570 - : "=r" (vc.wp[0]), "=r" (vc.wp[1]) 571 - : "r" (vb.wp[0]), "r" (vb.wp[1]), "r" (0)); 572 - goto update_regs; 573 - 574 559 case EVFSCTUF: 575 - __asm__ __volatile__ ("mtspr 512, %4\n" 576 - "efsctuf %0, %2\n" 577 - "efsctuf %1, %3\n" 578 - : "=r" (vc.wp[0]), "=r" (vc.wp[1]) 579 - : "r" (vb.wp[0]), "r" (vb.wp[1]), "r" (0)); 560 + case EVFSCTSF: 561 + if (SB0_c == FP_CLS_NAN) { 562 + vc.wp[0] = 0; 563 + FP_SET_EXCEPTION(FP_EX_INVALID); 564 + } else { 565 + SB0_e += (func == EVFSCTSF ? 31 : 32); 566 + FP_TO_INT_ROUND_S(vc.wp[0], SB0, 32, 567 + (func == EVFSCTSF)); 568 + } 569 + if (SB1_c == FP_CLS_NAN) { 570 + vc.wp[1] = 0; 571 + FP_SET_EXCEPTION(FP_EX_INVALID); 572 + } else { 573 + SB1_e += (func == EVFSCTSF ? 31 : 32); 574 + FP_TO_INT_ROUND_S(vc.wp[1], SB1, 32, 575 + (func == EVFSCTSF)); 576 + } 580 577 goto update_regs; 581 578 582 579 case EVFSCTUI: 583 580 case EVFSCTSI: 581 + if (SB0_c == FP_CLS_NAN) { 582 + vc.wp[0] = 0; 583 + FP_SET_EXCEPTION(FP_EX_INVALID); 584 + } else { 585 + FP_TO_INT_ROUND_S(vc.wp[0], SB0, 32, 586 + ((func & 0x3) != 0)); 587 + } 588 + if (SB1_c == FP_CLS_NAN) { 589 + vc.wp[1] = 0; 590 + FP_SET_EXCEPTION(FP_EX_INVALID); 591 + } else { 592 + FP_TO_INT_ROUND_S(vc.wp[1], SB1, 32, 593 + ((func & 0x3) != 0)); 594 + } 595 + goto update_regs; 596 + 584 597 case EVFSCTUIZ: 585 598 case EVFSCTSIZ: 586 - if (func & 0x4) { 587 - _FP_ROUND(1, SB0); 588 - _FP_ROUND(1, SB1); 599 + if (SB0_c == FP_CLS_NAN) { 600 + vc.wp[0] = 0; 601 + FP_SET_EXCEPTION(FP_EX_INVALID); 589 602 } else { 590 - _FP_ROUND_ZERO(1, SB0); 591 - _FP_ROUND_ZERO(1, SB1); 603 + FP_TO_INT_S(vc.wp[0], SB0, 32, 604 + ((func & 0x3) != 0)); 592 605 } 593 - FP_TO_INT_S(vc.wp[0], SB0, 32, 594 - (((func & 0x3) != 0) || SB0_s)); 595 - FP_TO_INT_S(vc.wp[1], SB1, 32, 596 - (((func & 0x3) != 0) || SB1_s)); 606 + if (SB1_c == FP_CLS_NAN) { 607 + vc.wp[1] = 0; 608 + FP_SET_EXCEPTION(FP_EX_INVALID); 609 + } else { 610 + FP_TO_INT_S(vc.wp[1], SB1, 32, 611 + ((func & 0x3) != 0)); 612 + } 597 613 goto update_regs; 598 614 599 615 default: ··· 660 630 regs->ccr |= (IR << ((7 - ((speinsn >> 23) & 0x7)) << 2)); 661 631 662 632 update_regs: 663 - __FPU_FPSCR &= ~FP_EX_MASK; 633 + /* 634 + * If the "invalid" exception sticky bit was set by the 635 + * processor for non-finite input, but was not set before the 636 + * instruction being emulated, clear it. Likewise for the 637 + * "underflow" bit, which may have been set by the processor 638 + * for exact underflow, not just inexact underflow when the 639 + * flag should be set for IEEE 754 semantics. Other sticky 640 + * exceptions will only be set by the processor when they are 641 + * correct according to IEEE 754 semantics, and we must not 642 + * clear sticky bits that were already set before the emulated 643 + * instruction as they represent the user-visible sticky 644 + * exception status. "inexact" traps to kernel are not 645 + * required for IEEE semantics and are not enabled by default, 646 + * so the "inexact" sticky bit may have been set by a previous 647 + * instruction without the kernel being aware of it. 648 + */ 649 + __FPU_FPSCR 650 + &= ~(FP_EX_INVALID | FP_EX_UNDERFLOW) | current->thread.spefscr_last; 664 651 __FPU_FPSCR |= (FP_CUR_EXCEPTIONS & FP_EX_MASK); 665 652 mtspr(SPRN_SPEFSCR, __FPU_FPSCR); 653 + current->thread.spefscr_last = __FPU_FPSCR; 666 654 667 655 current->thread.evr[fc] = vc.wp[0]; 668 656 regs->gpr[fc] = vc.wp[1]; ··· 692 644 pr_debug("va: %08x %08x\n", va.wp[0], va.wp[1]); 693 645 pr_debug("vb: %08x %08x\n", vb.wp[0], vb.wp[1]); 694 646 647 + if (current->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) { 648 + if ((FP_CUR_EXCEPTIONS & FP_EX_DIVZERO) 649 + && (current->thread.fpexc_mode & PR_FP_EXC_DIV)) 650 + return 1; 651 + if ((FP_CUR_EXCEPTIONS & FP_EX_OVERFLOW) 652 + && (current->thread.fpexc_mode & PR_FP_EXC_OVF)) 653 + return 1; 654 + if ((FP_CUR_EXCEPTIONS & FP_EX_UNDERFLOW) 655 + && (current->thread.fpexc_mode & PR_FP_EXC_UND)) 656 + return 1; 657 + if ((FP_CUR_EXCEPTIONS & FP_EX_INEXACT) 658 + && (current->thread.fpexc_mode & PR_FP_EXC_RES)) 659 + return 1; 660 + if ((FP_CUR_EXCEPTIONS & FP_EX_INVALID) 661 + && (current->thread.fpexc_mode & PR_FP_EXC_INV)) 662 + return 1; 663 + } 695 664 return 0; 696 665 697 666 illegal: ··· 727 662 { 728 663 union dw_union fgpr; 729 664 int s_lo, s_hi; 730 - unsigned long speinsn, type, fc; 665 + int lo_inexact, hi_inexact; 666 + int fp_result; 667 + unsigned long speinsn, type, fb, fc, fptype, func; 731 668 732 669 if (get_user(speinsn, (unsigned int __user *) regs->nip)) 733 670 return -EFAULT; 734 671 if ((speinsn >> 26) != 4) 735 672 return -EINVAL; /* not an spe instruction */ 736 673 737 - type = insn_type(speinsn & 0x7ff); 674 + func = speinsn & 0x7ff; 675 + type = insn_type(func); 738 676 if (type == XCR) return -ENOSYS; 739 677 740 678 __FPU_FPSCR = mfspr(SPRN_SPEFSCR); 741 679 pr_debug("speinsn:%08lx spefscr:%08lx\n", speinsn, __FPU_FPSCR); 742 680 681 + fptype = (speinsn >> 5) & 0x7; 682 + 743 683 /* No need to round if the result is exact */ 744 - if (!(__FPU_FPSCR & FP_EX_INEXACT)) 684 + lo_inexact = __FPU_FPSCR & (SPEFSCR_FG | SPEFSCR_FX); 685 + hi_inexact = __FPU_FPSCR & (SPEFSCR_FGH | SPEFSCR_FXH); 686 + if (!(lo_inexact || (hi_inexact && fptype == VCT))) 745 687 return 0; 746 688 747 689 fc = (speinsn >> 21) & 0x1f; ··· 757 685 fgpr.wp[0] = current->thread.evr[fc]; 758 686 fgpr.wp[1] = regs->gpr[fc]; 759 687 688 + fb = (speinsn >> 11) & 0x1f; 689 + switch (func) { 690 + case EFSCTUIZ: 691 + case EFSCTSIZ: 692 + case EVFSCTUIZ: 693 + case EVFSCTSIZ: 694 + case EFDCTUIDZ: 695 + case EFDCTSIDZ: 696 + case EFDCTUIZ: 697 + case EFDCTSIZ: 698 + /* 699 + * These instructions always round to zero, 700 + * independent of the rounding mode. 701 + */ 702 + return 0; 703 + 704 + case EFSCTUI: 705 + case EFSCTUF: 706 + case EVFSCTUI: 707 + case EVFSCTUF: 708 + case EFDCTUI: 709 + case EFDCTUF: 710 + fp_result = 0; 711 + s_lo = 0; 712 + s_hi = 0; 713 + break; 714 + 715 + case EFSCTSI: 716 + case EFSCTSF: 717 + fp_result = 0; 718 + /* Recover the sign of a zero result if possible. */ 719 + if (fgpr.wp[1] == 0) 720 + s_lo = regs->gpr[fb] & SIGN_BIT_S; 721 + break; 722 + 723 + case EVFSCTSI: 724 + case EVFSCTSF: 725 + fp_result = 0; 726 + /* Recover the sign of a zero result if possible. */ 727 + if (fgpr.wp[1] == 0) 728 + s_lo = regs->gpr[fb] & SIGN_BIT_S; 729 + if (fgpr.wp[0] == 0) 730 + s_hi = current->thread.evr[fb] & SIGN_BIT_S; 731 + break; 732 + 733 + case EFDCTSI: 734 + case EFDCTSF: 735 + fp_result = 0; 736 + s_hi = s_lo; 737 + /* Recover the sign of a zero result if possible. */ 738 + if (fgpr.wp[1] == 0) 739 + s_hi = current->thread.evr[fb] & SIGN_BIT_S; 740 + break; 741 + 742 + default: 743 + fp_result = 1; 744 + break; 745 + } 746 + 760 747 pr_debug("round fgpr: %08x %08x\n", fgpr.wp[0], fgpr.wp[1]); 761 748 762 - switch ((speinsn >> 5) & 0x7) { 749 + switch (fptype) { 763 750 /* Since SPE instructions on E500 core can handle round to nearest 764 751 * and round toward zero with IEEE-754 complied, we just need 765 752 * to handle round toward +Inf and round toward -Inf by software. ··· 827 696 if ((FP_ROUNDMODE) == FP_RND_PINF) { 828 697 if (!s_lo) fgpr.wp[1]++; /* Z > 0, choose Z1 */ 829 698 } else { /* round to -Inf */ 830 - if (s_lo) fgpr.wp[1]++; /* Z < 0, choose Z2 */ 699 + if (s_lo) { 700 + if (fp_result) 701 + fgpr.wp[1]++; /* Z < 0, choose Z2 */ 702 + else 703 + fgpr.wp[1]--; /* Z < 0, choose Z2 */ 704 + } 831 705 } 832 706 break; 833 707 834 708 case DPFP: 835 709 if (FP_ROUNDMODE == FP_RND_PINF) { 836 - if (!s_hi) fgpr.dp[0]++; /* Z > 0, choose Z1 */ 710 + if (!s_hi) { 711 + if (fp_result) 712 + fgpr.dp[0]++; /* Z > 0, choose Z1 */ 713 + else 714 + fgpr.wp[1]++; /* Z > 0, choose Z1 */ 715 + } 837 716 } else { /* round to -Inf */ 838 - if (s_hi) fgpr.dp[0]++; /* Z < 0, choose Z2 */ 717 + if (s_hi) { 718 + if (fp_result) 719 + fgpr.dp[0]++; /* Z < 0, choose Z2 */ 720 + else 721 + fgpr.wp[1]--; /* Z < 0, choose Z2 */ 722 + } 839 723 } 840 724 break; 841 725 842 726 case VCT: 843 727 if (FP_ROUNDMODE == FP_RND_PINF) { 844 - if (!s_lo) fgpr.wp[1]++; /* Z_low > 0, choose Z1 */ 845 - if (!s_hi) fgpr.wp[0]++; /* Z_high word > 0, choose Z1 */ 728 + if (lo_inexact && !s_lo) 729 + fgpr.wp[1]++; /* Z_low > 0, choose Z1 */ 730 + if (hi_inexact && !s_hi) 731 + fgpr.wp[0]++; /* Z_high word > 0, choose Z1 */ 846 732 } else { /* round to -Inf */ 847 - if (s_lo) fgpr.wp[1]++; /* Z_low < 0, choose Z2 */ 848 - if (s_hi) fgpr.wp[0]++; /* Z_high < 0, choose Z2 */ 733 + if (lo_inexact && s_lo) { 734 + if (fp_result) 735 + fgpr.wp[1]++; /* Z_low < 0, choose Z2 */ 736 + else 737 + fgpr.wp[1]--; /* Z_low < 0, choose Z2 */ 738 + } 739 + if (hi_inexact && s_hi) { 740 + if (fp_result) 741 + fgpr.wp[0]++; /* Z_high < 0, choose Z2 */ 742 + else 743 + fgpr.wp[0]--; /* Z_high < 0, choose Z2 */ 744 + } 849 745 } 850 746 break; 851 747 ··· 885 727 886 728 pr_debug(" to fgpr: %08x %08x\n", fgpr.wp[0], fgpr.wp[1]); 887 729 730 + if (current->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) 731 + return (current->thread.fpexc_mode & PR_FP_EXC_RES) ? 1 : 0; 888 732 return 0; 889 733 } 890 734
+77 -3
arch/powerpc/mm/fsl_booke_mmu.c
··· 52 52 #include <asm/smp.h> 53 53 #include <asm/machdep.h> 54 54 #include <asm/setup.h> 55 + #include <asm/paca.h> 55 56 56 57 #include "mmu_decl.h" 57 58 ··· 172 171 return 1UL << camsize; 173 172 } 174 173 175 - unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx) 174 + static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt, 175 + unsigned long ram, int max_cam_idx) 176 176 { 177 177 int i; 178 - unsigned long virt = PAGE_OFFSET; 179 - phys_addr_t phys = memstart_addr; 180 178 unsigned long amount_mapped = 0; 181 179 182 180 /* Calculate CAM values */ ··· 192 192 } 193 193 tlbcam_index = i; 194 194 195 + #ifdef CONFIG_PPC64 196 + get_paca()->tcd.esel_next = i; 197 + get_paca()->tcd.esel_max = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; 198 + get_paca()->tcd.esel_first = i; 199 + #endif 200 + 195 201 return amount_mapped; 202 + } 203 + 204 + unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx) 205 + { 206 + unsigned long virt = PAGE_OFFSET; 207 + phys_addr_t phys = memstart_addr; 208 + 209 + return map_mem_in_cams_addr(phys, virt, ram, max_cam_idx); 196 210 } 197 211 198 212 #ifdef CONFIG_PPC32 ··· 236 222 /* adjust lowmem size to __max_low_memory */ 237 223 ram = min((phys_addr_t)__max_low_memory, (phys_addr_t)total_lowmem); 238 224 225 + i = switch_to_as1(); 239 226 __max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM); 227 + restore_to_as0(i, 0, 0, 1); 240 228 241 229 pr_info("Memory CAM mapping: "); 242 230 for (i = 0; i < tlbcam_index - 1; i++) ··· 257 241 /* 64M mapped initially according to head_fsl_booke.S */ 258 242 memblock_set_current_limit(min_t(u64, limit, 0x04000000)); 259 243 } 244 + 245 + #ifdef CONFIG_RELOCATABLE 246 + int __initdata is_second_reloc; 247 + notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start) 248 + { 249 + unsigned long base = KERNELBASE; 250 + 251 + kernstart_addr = start; 252 + if (is_second_reloc) { 253 + virt_phys_offset = PAGE_OFFSET - memstart_addr; 254 + return; 255 + } 256 + 257 + /* 258 + * Relocatable kernel support based on processing of dynamic 259 + * relocation entries. Before we get the real memstart_addr, 260 + * We will compute the virt_phys_offset like this: 261 + * virt_phys_offset = stext.run - kernstart_addr 262 + * 263 + * stext.run = (KERNELBASE & ~0x3ffffff) + 264 + * (kernstart_addr & 0x3ffffff) 265 + * When we relocate, we have : 266 + * 267 + * (kernstart_addr & 0x3ffffff) = (stext.run & 0x3ffffff) 268 + * 269 + * hence: 270 + * virt_phys_offset = (KERNELBASE & ~0x3ffffff) - 271 + * (kernstart_addr & ~0x3ffffff) 272 + * 273 + */ 274 + start &= ~0x3ffffff; 275 + base &= ~0x3ffffff; 276 + virt_phys_offset = base - start; 277 + early_get_first_memblock_info(__va(dt_ptr), NULL); 278 + /* 279 + * We now get the memstart_addr, then we should check if this 280 + * address is the same as what the PAGE_OFFSET map to now. If 281 + * not we have to change the map of PAGE_OFFSET to memstart_addr 282 + * and do a second relocation. 283 + */ 284 + if (start != memstart_addr) { 285 + int n; 286 + long offset = start - memstart_addr; 287 + 288 + is_second_reloc = 1; 289 + n = switch_to_as1(); 290 + /* map a 64M area for the second relocation */ 291 + if (memstart_addr > start) 292 + map_mem_in_cams(0x4000000, CONFIG_LOWMEM_CAM_NUM); 293 + else 294 + map_mem_in_cams_addr(start, PAGE_OFFSET + offset, 295 + 0x4000000, CONFIG_LOWMEM_CAM_NUM); 296 + restore_to_as0(n, offset, __va(dt_ptr), 1); 297 + /* We should never reach here */ 298 + panic("Relocation error"); 299 + } 300 + } 301 + #endif 260 302 #endif
+12 -3
arch/powerpc/mm/hash_low_64.S
··· 148 148 and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/ 149 149 andc r0,r30,r0 /* r0 = pte & ~r0 */ 150 150 rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */ 151 - ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */ 151 + /* 152 + * Always add "C" bit for perf. Memory coherence is always enabled 153 + */ 154 + ori r3,r3,HPTE_R_C | HPTE_R_M 152 155 153 156 /* We eventually do the icache sync here (maybe inline that 154 157 * code rather than call a C function...) ··· 460 457 and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/ 461 458 andc r0,r3,r0 /* r0 = pte & ~r0 */ 462 459 rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */ 463 - ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */ 460 + /* 461 + * Always add "C" bit for perf. Memory coherence is always enabled 462 + */ 463 + ori r3,r3,HPTE_R_C | HPTE_R_M 464 464 465 465 /* We eventually do the icache sync here (maybe inline that 466 466 * code rather than call a C function...) ··· 801 795 and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/ 802 796 andc r0,r30,r0 /* r0 = pte & ~r0 */ 803 797 rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */ 804 - ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */ 798 + /* 799 + * Always add "C" bit for perf. Memory coherence is always enabled 800 + */ 801 + ori r3,r3,HPTE_R_C | HPTE_R_M 805 802 806 803 /* We eventually do the icache sync here (maybe inline that 807 804 * code rather than call a C function...)
+4 -3
arch/powerpc/mm/hash_utils_64.c
··· 169 169 if ((pteflags & _PAGE_USER) && !((pteflags & _PAGE_RW) && 170 170 (pteflags & _PAGE_DIRTY))) 171 171 rflags |= 1; 172 - 173 - /* Always add C */ 174 - return rflags | HPTE_R_C; 172 + /* 173 + * Always add "C" bit for perf. Memory coherence is always enabled 174 + */ 175 + return rflags | HPTE_R_C | HPTE_R_M; 175 176 } 176 177 177 178 int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
+5 -1
arch/powerpc/mm/hugepage-hash64.c
··· 127 127 128 128 /* Add in WIMG bits */ 129 129 rflags |= (new_pmd & (_PAGE_WRITETHRU | _PAGE_NO_CACHE | 130 - _PAGE_COHERENT | _PAGE_GUARDED)); 130 + _PAGE_GUARDED)); 131 + /* 132 + * enable the memory coherence always 133 + */ 134 + rflags |= HPTE_R_M; 131 135 132 136 /* Insert into the hash table, primary slot */ 133 137 slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
+43 -11
arch/powerpc/mm/hugetlbpage-book3e.c
··· 8 8 #include <linux/mm.h> 9 9 #include <linux/hugetlb.h> 10 10 11 + #ifdef CONFIG_PPC_FSL_BOOK3E 12 + #ifdef CONFIG_PPC64 13 + static inline int tlb1_next(void) 14 + { 15 + struct paca_struct *paca = get_paca(); 16 + struct tlb_core_data *tcd; 17 + int this, next; 18 + 19 + tcd = paca->tcd_ptr; 20 + this = tcd->esel_next; 21 + 22 + next = this + 1; 23 + if (next >= tcd->esel_max) 24 + next = tcd->esel_first; 25 + 26 + tcd->esel_next = next; 27 + return this; 28 + } 29 + #else 30 + static inline int tlb1_next(void) 31 + { 32 + int index, ncams; 33 + 34 + ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; 35 + 36 + index = __get_cpu_var(next_tlbcam_idx); 37 + 38 + /* Just round-robin the entries and wrap when we hit the end */ 39 + if (unlikely(index == ncams - 1)) 40 + __get_cpu_var(next_tlbcam_idx) = tlbcam_index; 41 + else 42 + __get_cpu_var(next_tlbcam_idx)++; 43 + 44 + return index; 45 + } 46 + #endif /* !PPC64 */ 47 + #endif /* FSL */ 48 + 11 49 static inline int mmu_get_tsize(int psize) 12 50 { 13 51 return mmu_psize_defs[psize].enc; ··· 85 47 struct mm_struct *mm; 86 48 87 49 #ifdef CONFIG_PPC_FSL_BOOK3E 88 - int index, ncams; 50 + int index; 89 51 #endif 90 52 91 53 if (unlikely(is_kernel_addr(ea))) ··· 115 77 } 116 78 117 79 #ifdef CONFIG_PPC_FSL_BOOK3E 118 - ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; 119 - 120 80 /* We have to use the CAM(TLB1) on FSL parts for hugepages */ 121 - index = __get_cpu_var(next_tlbcam_idx); 81 + index = tlb1_next(); 122 82 mtspr(SPRN_MAS0, MAS0_ESEL(index) | MAS0_TLBSEL(1)); 123 - 124 - /* Just round-robin the entries and wrap when we hit the end */ 125 - if (unlikely(index == ncams - 1)) 126 - __get_cpu_var(next_tlbcam_idx) = tlbcam_index; 127 - else 128 - __get_cpu_var(next_tlbcam_idx)++; 129 83 #endif 84 + 130 85 mas1 = MAS1_VALID | MAS1_TID(mm->context.id) | MAS1_TSIZE(tsize); 131 86 mas2 = ea & ~((1UL << shift) - 1); 132 87 mas2 |= (pte_val(pte) >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK; ··· 134 103 if (mmu_has_feature(MMU_FTR_USE_PAIRED_MAS)) { 135 104 mtspr(SPRN_MAS7_MAS3, mas7_3); 136 105 } else { 137 - mtspr(SPRN_MAS7, upper_32_bits(mas7_3)); 106 + if (mmu_has_feature(MMU_FTR_BIG_PHYS)) 107 + mtspr(SPRN_MAS7, upper_32_bits(mas7_3)); 138 108 mtspr(SPRN_MAS3, lower_32_bits(mas7_3)); 139 109 } 140 110
+4
arch/powerpc/mm/hugetlbpage-hash64.c
··· 99 99 /* Add in WIMG bits */ 100 100 rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE | 101 101 _PAGE_COHERENT | _PAGE_GUARDED)); 102 + /* 103 + * enable the memory coherence always 104 + */ 105 + rflags |= HPTE_R_M; 102 106 103 107 slot = hpte_insert_repeating(hash, vpn, pa, rflags, 0, 104 108 mmu_psize, ssize);
+7 -1
arch/powerpc/mm/mem.c
··· 307 307 308 308 void __init mem_init(void) 309 309 { 310 + /* 311 + * book3s is limited to 16 page sizes due to encoding this in 312 + * a 4-bit field for slices. 313 + */ 314 + BUILD_BUG_ON(MMU_PAGE_COUNT > 16); 315 + 310 316 #ifdef CONFIG_SWIOTLB 311 317 swiotlb_init(0); 312 318 #endif ··· 513 507 * System memory should not be in /proc/iomem but various tools expect it 514 508 * (eg kdump). 515 509 */ 516 - static int add_system_ram_resources(void) 510 + static int __init add_system_ram_resources(void) 517 511 { 518 512 struct memblock_region *reg; 519 513
+2
arch/powerpc/mm/mmu_decl.h
··· 148 148 extern void MMU_init_hw(void); 149 149 extern unsigned long mmu_mapin_ram(unsigned long top); 150 150 extern void adjust_total_lowmem(void); 151 + extern int switch_to_as1(void); 152 + extern void restore_to_as0(int esel, int offset, void *dt_ptr, int bootcpu); 151 153 #endif 152 154 extern void loadcam_entry(unsigned int index); 153 155
+91 -5
arch/powerpc/mm/numa.c
··· 31 31 #include <asm/sparsemem.h> 32 32 #include <asm/prom.h> 33 33 #include <asm/smp.h> 34 + #include <asm/cputhreads.h> 35 + #include <asm/topology.h> 34 36 #include <asm/firmware.h> 35 37 #include <asm/paca.h> 36 38 #include <asm/hvcall.h> ··· 154 152 } 155 153 } 156 154 157 - static void map_cpu_to_node(int cpu, int node) 155 + static void reset_numa_cpu_lookup_table(void) 156 + { 157 + unsigned int cpu; 158 + 159 + for_each_possible_cpu(cpu) 160 + numa_cpu_lookup_table[cpu] = -1; 161 + } 162 + 163 + static void update_numa_cpu_lookup_table(unsigned int cpu, int node) 158 164 { 159 165 numa_cpu_lookup_table[cpu] = node; 166 + } 167 + 168 + static void map_cpu_to_node(int cpu, int node) 169 + { 170 + update_numa_cpu_lookup_table(cpu, node); 160 171 161 172 dbg("adding cpu %d to node %d\n", cpu, node); 162 173 ··· 537 522 */ 538 523 static int numa_setup_cpu(unsigned long lcpu) 539 524 { 540 - int nid = 0; 541 - struct device_node *cpu = of_get_cpu_node(lcpu, NULL); 525 + int nid; 526 + struct device_node *cpu; 527 + 528 + /* 529 + * If a valid cpu-to-node mapping is already available, use it 530 + * directly instead of querying the firmware, since it represents 531 + * the most recent mapping notified to us by the platform (eg: VPHN). 532 + */ 533 + if ((nid = numa_cpu_lookup_table[lcpu]) >= 0) { 534 + map_cpu_to_node(lcpu, nid); 535 + return nid; 536 + } 537 + 538 + cpu = of_get_cpu_node(lcpu, NULL); 542 539 543 540 if (!cpu) { 544 541 WARN_ON(1); 542 + nid = 0; 545 543 goto out; 546 544 } 547 545 ··· 570 542 return nid; 571 543 } 572 544 545 + static void verify_cpu_node_mapping(int cpu, int node) 546 + { 547 + int base, sibling, i; 548 + 549 + /* Verify that all the threads in the core belong to the same node */ 550 + base = cpu_first_thread_sibling(cpu); 551 + 552 + for (i = 0; i < threads_per_core; i++) { 553 + sibling = base + i; 554 + 555 + if (sibling == cpu || cpu_is_offline(sibling)) 556 + continue; 557 + 558 + if (cpu_to_node(sibling) != node) { 559 + WARN(1, "CPU thread siblings %d and %d don't belong" 560 + " to the same node!\n", cpu, sibling); 561 + break; 562 + } 563 + } 564 + } 565 + 573 566 static int cpu_numa_callback(struct notifier_block *nfb, unsigned long action, 574 567 void *hcpu) 575 568 { 576 569 unsigned long lcpu = (unsigned long)hcpu; 577 - int ret = NOTIFY_DONE; 570 + int ret = NOTIFY_DONE, nid; 578 571 579 572 switch (action) { 580 573 case CPU_UP_PREPARE: 581 574 case CPU_UP_PREPARE_FROZEN: 582 - numa_setup_cpu(lcpu); 575 + nid = numa_setup_cpu(lcpu); 576 + verify_cpu_node_mapping((int)lcpu, nid); 583 577 ret = NOTIFY_OK; 584 578 break; 585 579 #ifdef CONFIG_HOTPLUG_CPU ··· 1119 1069 */ 1120 1070 setup_node_to_cpumask_map(); 1121 1071 1072 + reset_numa_cpu_lookup_table(); 1122 1073 register_cpu_notifier(&ppc64_numa_nb); 1123 1074 cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE, 1124 1075 (void *)(unsigned long)boot_cpuid); ··· 1498 1447 return 0; 1499 1448 } 1500 1449 1450 + static int update_lookup_table(void *data) 1451 + { 1452 + struct topology_update_data *update; 1453 + 1454 + if (!data) 1455 + return -EINVAL; 1456 + 1457 + /* 1458 + * Upon topology update, the numa-cpu lookup table needs to be updated 1459 + * for all threads in the core, including offline CPUs, to ensure that 1460 + * future hotplug operations respect the cpu-to-node associativity 1461 + * properly. 1462 + */ 1463 + for (update = data; update; update = update->next) { 1464 + int nid, base, j; 1465 + 1466 + nid = update->new_nid; 1467 + base = cpu_first_thread_sibling(update->cpu); 1468 + 1469 + for (j = 0; j < threads_per_core; j++) { 1470 + update_numa_cpu_lookup_table(base + j, nid); 1471 + } 1472 + } 1473 + 1474 + return 0; 1475 + } 1476 + 1501 1477 /* 1502 1478 * Update the node maps and sysfs entries for each cpu whose home node 1503 1479 * has changed. Returns 1 when the topology has changed, and 0 otherwise. ··· 1592 1514 } 1593 1515 1594 1516 stop_machine(update_cpu_topology, &updates[0], &updated_cpus); 1517 + 1518 + /* 1519 + * Update the numa-cpu lookup table with the new mappings, even for 1520 + * offline CPUs. It is best to perform this update from the stop- 1521 + * machine context. 1522 + */ 1523 + stop_machine(update_lookup_table, &updates[0], 1524 + cpumask_of(raw_smp_processor_id())); 1595 1525 1596 1526 for (ud = &updates[0]; ud; ud = ud->next) { 1597 1527 unregister_cpu_under_node(ud->cpu, ud->old_nid);
+1 -2
arch/powerpc/mm/pgtable.c
··· 24 24 #include <linux/kernel.h> 25 25 #include <linux/gfp.h> 26 26 #include <linux/mm.h> 27 - #include <linux/init.h> 28 27 #include <linux/percpu.h> 29 28 #include <linux/hardirq.h> 30 29 #include <linux/hugetlb.h> ··· 173 174 pte_t pte) 174 175 { 175 176 #ifdef CONFIG_DEBUG_VM 176 - WARN_ON(pte_present(*ptep)); 177 + WARN_ON(pte_val(*ptep) & _PAGE_PRESENT); 177 178 #endif 178 179 /* Note: mm->context.id might not yet have been assigned as 179 180 * this context might not have been activated yet when this
+1
arch/powerpc/mm/pgtable_32.c
··· 299 299 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, 300 300 __pgprot(flags))); 301 301 } 302 + smp_wmb(); 302 303 return err; 303 304 } 304 305
+13 -2
arch/powerpc/mm/pgtable_64.c
··· 33 33 #include <linux/swap.h> 34 34 #include <linux/stddef.h> 35 35 #include <linux/vmalloc.h> 36 - #include <linux/init.h> 37 36 #include <linux/bootmem.h> 38 37 #include <linux/memblock.h> 39 38 #include <linux/slab.h> ··· 152 153 } 153 154 #endif /* !CONFIG_PPC_MMU_NOHASH */ 154 155 } 156 + 157 + #ifdef CONFIG_PPC_BOOK3E_64 158 + /* 159 + * With hardware tablewalk, a sync is needed to ensure that 160 + * subsequent accesses see the PTE we just wrote. Unlike userspace 161 + * mappings, we can't tolerate spurious faults, so make sure 162 + * the new PTE will be seen the first time. 163 + */ 164 + mb(); 165 + #else 166 + smp_wmb(); 167 + #endif 155 168 return 0; 156 169 } 157 170 ··· 698 687 pmd_t *pmdp, pmd_t pmd) 699 688 { 700 689 #ifdef CONFIG_DEBUG_VM 701 - WARN_ON(!pmd_none(*pmdp)); 690 + WARN_ON(pmd_val(*pmdp) & _PAGE_PRESENT); 702 691 assert_spin_locked(&mm->page_table_lock); 703 692 WARN_ON(!pmd_trans_huge(pmd)); 704 693 #endif
-1
arch/powerpc/mm/tlb_hash64.c
··· 23 23 24 24 #include <linux/kernel.h> 25 25 #include <linux/mm.h> 26 - #include <linux/init.h> 27 26 #include <linux/percpu.h> 28 27 #include <linux/hardirq.h> 29 28 #include <asm/pgalloc.h>
+173 -1
arch/powerpc/mm/tlb_low_64e.S
··· 136 136 */ 137 137 PPC_TLBSRX_DOT(0,R16) 138 138 ldx r14,r14,r15 /* grab pgd entry */ 139 - beq normal_tlb_miss_done /* tlb exists already, bail */ 139 + beq tlb_miss_done_bolted /* tlb exists already, bail */ 140 140 MMU_FTR_SECTION_ELSE 141 141 ldx r14,r14,r15 /* grab pgd entry */ 142 142 ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV) ··· 192 192 mtspr SPRN_MAS7_MAS3,r15 193 193 tlbwe 194 194 195 + tlb_miss_done_bolted: 195 196 TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK) 196 197 tlb_epilog_bolted 197 198 rfi ··· 239 238 oris r11,r11,_PAGE_ACCESSED@h 240 239 beq tlb_miss_common_bolted 241 240 b itlb_miss_kernel_bolted 241 + 242 + /* 243 + * TLB miss handling for e6500 and derivatives, using hardware tablewalk. 244 + * 245 + * Linear mapping is bolted: no virtual page table or nested TLB misses 246 + * Indirect entries in TLB1, hardware loads resulting direct entries 247 + * into TLB0 248 + * No HES or NV hint on TLB1, so we need to do software round-robin 249 + * No tlbsrx. so we need a spinlock, and we have to deal 250 + * with MAS-damage caused by tlbsx 251 + * 4K pages only 252 + */ 253 + 254 + START_EXCEPTION(instruction_tlb_miss_e6500) 255 + tlb_prolog_bolted BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR0 256 + 257 + ld r11,PACA_TCD_PTR(r13) 258 + srdi. r15,r16,60 /* get region */ 259 + ori r16,r16,1 260 + 261 + TLB_MISS_STATS_SAVE_INFO_BOLTED 262 + bne tlb_miss_kernel_e6500 /* user/kernel test */ 263 + 264 + b tlb_miss_common_e6500 265 + 266 + START_EXCEPTION(data_tlb_miss_e6500) 267 + tlb_prolog_bolted BOOKE_INTERRUPT_DTLB_MISS SPRN_DEAR 268 + 269 + ld r11,PACA_TCD_PTR(r13) 270 + srdi. r15,r16,60 /* get region */ 271 + rldicr r16,r16,0,62 272 + 273 + TLB_MISS_STATS_SAVE_INFO_BOLTED 274 + bne tlb_miss_kernel_e6500 /* user vs kernel check */ 275 + 276 + /* 277 + * This is the guts of the TLB miss handler for e6500 and derivatives. 278 + * We are entered with: 279 + * 280 + * r16 = page of faulting address (low bit 0 if data, 1 if instruction) 281 + * r15 = crap (free to use) 282 + * r14 = page table base 283 + * r13 = PACA 284 + * r11 = tlb_per_core ptr 285 + * r10 = crap (free to use) 286 + */ 287 + tlb_miss_common_e6500: 288 + /* 289 + * Search if we already have an indirect entry for that virtual 290 + * address, and if we do, bail out. 291 + * 292 + * MAS6:IND should be already set based on MAS4 293 + */ 294 + addi r10,r11,TCD_LOCK 295 + 1: lbarx r15,0,r10 296 + cmpdi r15,0 297 + bne 2f 298 + li r15,1 299 + stbcx. r15,0,r10 300 + bne 1b 301 + .subsection 1 302 + 2: lbz r15,0(r10) 303 + cmpdi r15,0 304 + bne 2b 305 + b 1b 306 + .previous 307 + 308 + mfspr r15,SPRN_MAS2 309 + 310 + tlbsx 0,r16 311 + mfspr r10,SPRN_MAS1 312 + andis. r10,r10,MAS1_VALID@h 313 + bne tlb_miss_done_e6500 314 + 315 + /* Undo MAS-damage from the tlbsx */ 316 + mfspr r10,SPRN_MAS1 317 + oris r10,r10,MAS1_VALID@h 318 + mtspr SPRN_MAS1,r10 319 + mtspr SPRN_MAS2,r15 320 + 321 + /* Now, we need to walk the page tables. First check if we are in 322 + * range. 323 + */ 324 + rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4 325 + bne- tlb_miss_fault_e6500 326 + 327 + rldicl r15,r16,64-PGDIR_SHIFT+3,64-PGD_INDEX_SIZE-3 328 + cmpldi cr0,r14,0 329 + clrrdi r15,r15,3 330 + beq- tlb_miss_fault_e6500 /* No PGDIR, bail */ 331 + ldx r14,r14,r15 /* grab pgd entry */ 332 + 333 + rldicl r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3 334 + clrrdi r15,r15,3 335 + cmpdi cr0,r14,0 336 + bge tlb_miss_fault_e6500 /* Bad pgd entry or hugepage; bail */ 337 + ldx r14,r14,r15 /* grab pud entry */ 338 + 339 + rldicl r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3 340 + clrrdi r15,r15,3 341 + cmpdi cr0,r14,0 342 + bge tlb_miss_fault_e6500 343 + ldx r14,r14,r15 /* Grab pmd entry */ 344 + 345 + mfspr r10,SPRN_MAS0 346 + cmpdi cr0,r14,0 347 + bge tlb_miss_fault_e6500 348 + 349 + /* Now we build the MAS for a 2M indirect page: 350 + * 351 + * MAS 0 : ESEL needs to be filled by software round-robin 352 + * MAS 1 : Fully set up 353 + * - PID already updated by caller if necessary 354 + * - TSIZE for now is base ind page size always 355 + * - TID already cleared if necessary 356 + * MAS 2 : Default not 2M-aligned, need to be redone 357 + * MAS 3+7 : Needs to be done 358 + */ 359 + 360 + ori r14,r14,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT) 361 + mtspr SPRN_MAS7_MAS3,r14 362 + 363 + clrrdi r15,r16,21 /* make EA 2M-aligned */ 364 + mtspr SPRN_MAS2,r15 365 + 366 + lbz r15,TCD_ESEL_NEXT(r11) 367 + lbz r16,TCD_ESEL_MAX(r11) 368 + lbz r14,TCD_ESEL_FIRST(r11) 369 + rlwimi r10,r15,16,0x00ff0000 /* insert esel_next into MAS0 */ 370 + addi r15,r15,1 /* increment esel_next */ 371 + mtspr SPRN_MAS0,r10 372 + cmpw r15,r16 373 + iseleq r15,r14,r15 /* if next == last use first */ 374 + stb r15,TCD_ESEL_NEXT(r11) 375 + 376 + tlbwe 377 + 378 + tlb_miss_done_e6500: 379 + .macro tlb_unlock_e6500 380 + li r15,0 381 + isync 382 + stb r15,TCD_LOCK(r11) 383 + .endm 384 + 385 + tlb_unlock_e6500 386 + TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK) 387 + tlb_epilog_bolted 388 + rfi 389 + 390 + tlb_miss_kernel_e6500: 391 + mfspr r10,SPRN_MAS1 392 + ld r14,PACA_KERNELPGD(r13) 393 + cmpldi cr0,r15,8 /* Check for vmalloc region */ 394 + rlwinm r10,r10,0,16,1 /* Clear TID */ 395 + mtspr SPRN_MAS1,r10 396 + beq+ tlb_miss_common_e6500 397 + 398 + tlb_miss_fault_e6500: 399 + tlb_unlock_e6500 400 + /* We need to check if it was an instruction miss */ 401 + andi. r16,r16,1 402 + bne itlb_miss_fault_e6500 403 + dtlb_miss_fault_e6500: 404 + TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT) 405 + tlb_epilog_bolted 406 + b exc_data_storage_book3e 407 + itlb_miss_fault_e6500: 408 + TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT) 409 + tlb_epilog_bolted 410 + b exc_instruction_storage_book3e 411 + 242 412 243 413 /********************************************************************** 244 414 * *
+67 -45
arch/powerpc/mm/tlb_nohash.c
··· 43 43 #include <asm/tlb.h> 44 44 #include <asm/code-patching.h> 45 45 #include <asm/hugetlb.h> 46 + #include <asm/paca.h> 46 47 47 48 #include "mmu_decl.h" 48 49 ··· 58 57 [MMU_PAGE_4K] = { 59 58 .shift = 12, 60 59 .enc = BOOK3E_PAGESZ_4K, 60 + }, 61 + [MMU_PAGE_2M] = { 62 + .shift = 21, 63 + .enc = BOOK3E_PAGESZ_2M, 61 64 }, 62 65 [MMU_PAGE_4M] = { 63 66 .shift = 22, ··· 141 136 int mmu_linear_psize; /* Page size used for the linear mapping */ 142 137 int mmu_pte_psize; /* Page size used for PTE pages */ 143 138 int mmu_vmemmap_psize; /* Page size used for the virtual mem map */ 144 - int book3e_htw_enabled; /* Is HW tablewalk enabled ? */ 139 + int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */ 145 140 unsigned long linear_map_top; /* Top of linear mapping */ 146 141 147 142 #endif /* CONFIG_PPC64 */ ··· 382 377 { 383 378 int tsize = mmu_psize_defs[mmu_pte_psize].enc; 384 379 385 - if (book3e_htw_enabled) { 380 + if (book3e_htw_mode != PPC_HTW_NONE) { 386 381 unsigned long start = address & PMD_MASK; 387 382 unsigned long end = address + PMD_SIZE; 388 383 unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift; ··· 435 430 def = &mmu_psize_defs[psize]; 436 431 shift = def->shift; 437 432 438 - if (shift == 0) 433 + if (shift == 0 || shift & 1) 439 434 continue; 440 435 441 436 /* adjust to be in terms of 4^shift Kb */ ··· 445 440 def->flags |= MMU_PAGE_SIZE_DIRECT; 446 441 } 447 442 448 - goto no_indirect; 443 + goto out; 449 444 } 450 445 451 446 if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) { 452 - u32 tlb1ps = mfspr(SPRN_TLB1PS); 447 + u32 tlb1cfg, tlb1ps; 448 + 449 + tlb0cfg = mfspr(SPRN_TLB0CFG); 450 + tlb1cfg = mfspr(SPRN_TLB1CFG); 451 + tlb1ps = mfspr(SPRN_TLB1PS); 452 + eptcfg = mfspr(SPRN_EPTCFG); 453 + 454 + if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT)) 455 + book3e_htw_mode = PPC_HTW_E6500; 456 + 457 + /* 458 + * We expect 4K subpage size and unrestricted indirect size. 459 + * The lack of a restriction on indirect size is a Freescale 460 + * extension, indicated by PSn = 0 but SPSn != 0. 461 + */ 462 + if (eptcfg != 2) 463 + book3e_htw_mode = PPC_HTW_NONE; 453 464 454 465 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { 455 466 struct mmu_psize_def *def = &mmu_psize_defs[psize]; 456 467 457 468 if (tlb1ps & (1U << (def->shift - 10))) { 458 469 def->flags |= MMU_PAGE_SIZE_DIRECT; 470 + 471 + if (book3e_htw_mode && psize == MMU_PAGE_2M) 472 + def->flags |= MMU_PAGE_SIZE_INDIRECT; 459 473 } 460 474 } 461 475 462 - goto no_indirect; 476 + goto out; 463 477 } 464 478 #endif 465 479 ··· 495 471 } 496 472 497 473 /* Indirect page sizes supported ? */ 498 - if ((tlb0cfg & TLBnCFG_IND) == 0) 499 - goto no_indirect; 474 + if ((tlb0cfg & TLBnCFG_IND) == 0 || 475 + (tlb0cfg & TLBnCFG_PT) == 0) 476 + goto out; 477 + 478 + book3e_htw_mode = PPC_HTW_IBM; 500 479 501 480 /* Now, we only deal with one IND page size for each 502 481 * direct size. Hopefully all implementations today are ··· 524 497 def->ind = ps + 10; 525 498 } 526 499 } 527 - no_indirect: 528 500 501 + out: 529 502 /* Cleanup array and print summary */ 530 503 pr_info("MMU: Supported page sizes\n"); 531 504 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { ··· 545 518 } 546 519 } 547 520 548 - static void __patch_exception(int exc, unsigned long addr) 549 - { 550 - extern unsigned int interrupt_base_book3e; 551 - unsigned int *ibase = &interrupt_base_book3e; 552 - 553 - /* Our exceptions vectors start with a NOP and -then- a branch 554 - * to deal with single stepping from userspace which stops on 555 - * the second instruction. Thus we need to patch the second 556 - * instruction of the exception, not the first one 557 - */ 558 - 559 - patch_branch(ibase + (exc / 4) + 1, addr, 0); 560 - } 561 - 562 - #define patch_exception(exc, name) do { \ 563 - extern unsigned int name; \ 564 - __patch_exception((exc), (unsigned long)&name); \ 565 - } while (0) 566 - 567 521 static void setup_mmu_htw(void) 568 522 { 569 - /* Check if HW tablewalk is present, and if yes, enable it by: 570 - * 571 - * - patching the TLB miss handlers to branch to the 572 - * one dedicates to it 573 - * 574 - * - setting the global book3e_htw_enabled 575 - */ 576 - unsigned int tlb0cfg = mfspr(SPRN_TLB0CFG); 523 + /* 524 + * If we want to use HW tablewalk, enable it by patching the TLB miss 525 + * handlers to branch to the one dedicated to it. 526 + */ 577 527 578 - if ((tlb0cfg & TLBnCFG_IND) && 579 - (tlb0cfg & TLBnCFG_PT)) { 528 + switch (book3e_htw_mode) { 529 + case PPC_HTW_IBM: 580 530 patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e); 581 531 patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e); 582 - book3e_htw_enabled = 1; 532 + break; 533 + case PPC_HTW_E6500: 534 + patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e); 535 + patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e); 536 + break; 583 537 } 584 538 pr_info("MMU: Book3E HW tablewalk %s\n", 585 - book3e_htw_enabled ? "enabled" : "not supported"); 539 + book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported"); 586 540 } 587 541 588 542 /* ··· 603 595 /* Set MAS4 based on page table setting */ 604 596 605 597 mas4 = 0x4 << MAS4_WIMGED_SHIFT; 606 - if (book3e_htw_enabled) { 607 - mas4 |= mas4 | MAS4_INDD; 598 + switch (book3e_htw_mode) { 599 + case PPC_HTW_E6500: 600 + mas4 |= MAS4_INDD; 601 + mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT; 602 + mas4 |= MAS4_TLBSELD(1); 603 + mmu_pte_psize = MMU_PAGE_2M; 604 + break; 605 + 606 + case PPC_HTW_IBM: 607 + mas4 |= MAS4_INDD; 608 608 #ifdef CONFIG_PPC_64K_PAGES 609 609 mas4 |= BOOK3E_PAGESZ_256M << MAS4_TSIZED_SHIFT; 610 610 mmu_pte_psize = MMU_PAGE_256M; ··· 620 604 mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT; 621 605 mmu_pte_psize = MMU_PAGE_1M; 622 606 #endif 623 - } else { 607 + break; 608 + 609 + case PPC_HTW_NONE: 624 610 #ifdef CONFIG_PPC_64K_PAGES 625 611 mas4 |= BOOK3E_PAGESZ_64K << MAS4_TSIZED_SHIFT; 626 612 #else 627 613 mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT; 628 614 #endif 629 615 mmu_pte_psize = mmu_virtual_psize; 616 + break; 630 617 } 631 618 mtspr(SPRN_MAS4, mas4); 632 619 ··· 649 630 /* limit memory so we dont have linear faults */ 650 631 memblock_enforce_memory_limit(linear_map_top); 651 632 652 - patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e); 653 - patch_exception(0x1e0, exc_instruction_tlb_miss_bolted_book3e); 633 + if (book3e_htw_mode == PPC_HTW_NONE) { 634 + patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e); 635 + patch_exception(0x1e0, 636 + exc_instruction_tlb_miss_bolted_book3e); 637 + } 654 638 } 655 639 #endif 656 640
+3 -1
arch/powerpc/mm/tlb_nohash_low.S
··· 402 402 * Load TLBCAM[index] entry in to the L2 CAM MMU 403 403 */ 404 404 _GLOBAL(loadcam_entry) 405 - LOAD_REG_ADDR(r4, TLBCAM) 405 + mflr r5 406 + LOAD_REG_ADDR_PIC(r4, TLBCAM) 407 + mtlr r5 406 408 mulli r5,r3,TLBCAM_SIZE 407 409 add r3,r5,r4 408 410 lwz r4,TLBCAM_MAS0(r3)
-1
arch/powerpc/oprofile/op_model_7450.c
··· 16 16 */ 17 17 18 18 #include <linux/oprofile.h> 19 - #include <linux/init.h> 20 19 #include <linux/smp.h> 21 20 #include <asm/ptrace.h> 22 21 #include <asm/processor.h>
-1
arch/powerpc/oprofile/op_model_cell.c
··· 16 16 17 17 #include <linux/cpufreq.h> 18 18 #include <linux/delay.h> 19 - #include <linux/init.h> 20 19 #include <linux/jiffies.h> 21 20 #include <linux/kthread.h> 22 21 #include <linux/oprofile.h>
-1
arch/powerpc/oprofile/op_model_fsl_emb.c
··· 14 14 */ 15 15 16 16 #include <linux/oprofile.h> 17 - #include <linux/init.h> 18 17 #include <linux/smp.h> 19 18 #include <asm/ptrace.h> 20 19 #include <asm/processor.h>
-1
arch/powerpc/oprofile/op_model_pa6t.c
··· 22 22 */ 23 23 24 24 #include <linux/oprofile.h> 25 - #include <linux/init.h> 26 25 #include <linux/smp.h> 27 26 #include <linux/percpu.h> 28 27 #include <asm/processor.h>
-1
arch/powerpc/oprofile/op_model_power4.c
··· 10 10 */ 11 11 12 12 #include <linux/oprofile.h> 13 - #include <linux/init.h> 14 13 #include <linux/smp.h> 15 14 #include <asm/firmware.h> 16 15 #include <asm/ptrace.h>
-1
arch/powerpc/oprofile/op_model_rs64.c
··· 8 8 */ 9 9 10 10 #include <linux/oprofile.h> 11 - #include <linux/init.h> 12 11 #include <linux/smp.h> 13 12 #include <asm/ptrace.h> 14 13 #include <asm/processor.h>
-1
arch/powerpc/platforms/83xx/Kconfig
··· 99 99 config ASP834x 100 100 bool "Analogue & Micro ASP 834x" 101 101 select PPC_MPC834x 102 - select REDBOOT 103 102 help 104 103 This enables support for the Analogue & Micro ASP 83xx 105 104 board.
-1
arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
··· 11 11 * (at your option) any later version. 12 12 */ 13 13 14 - #include <linux/init.h> 15 14 #include <linux/kernel.h> 16 15 #include <linux/module.h> 17 16 #include <linux/device.h>
-1
arch/powerpc/platforms/83xx/suspend.c
··· 10 10 * by the Free Software Foundation. 11 11 */ 12 12 13 - #include <linux/init.h> 14 13 #include <linux/pm.h> 15 14 #include <linux/types.h> 16 15 #include <linux/ioport.h>
+6
arch/powerpc/platforms/85xx/Kconfig
··· 123 123 help 124 124 This option enables support for the P1023 RDS and RDB boards 125 125 126 + config TWR_P102x 127 + bool "Freescale TWR-P102x" 128 + select DEFAULT_UIMAGE 129 + help 130 + This option enables support for the TWR-P1025 board. 131 + 126 132 config SOCRATES 127 133 bool "Socrates" 128 134 select DEFAULT_UIMAGE
+1
arch/powerpc/platforms/85xx/Makefile
··· 18 18 obj-$(CONFIG_P1022_DS) += p1022_ds.o 19 19 obj-$(CONFIG_P1022_RDK) += p1022_rdk.o 20 20 obj-$(CONFIG_P1023_RDS) += p1023_rds.o 21 + obj-$(CONFIG_TWR_P102x) += twr_p102x.o 21 22 obj-$(CONFIG_CORENET_GENERIC) += corenet_generic.o 22 23 obj-$(CONFIG_STX_GP3) += stx_gp3.o 23 24 obj-$(CONFIG_TQM85xx) += tqm85xx.o
+38
arch/powerpc/platforms/85xx/common.c
··· 9 9 #include <linux/of_irq.h> 10 10 #include <linux/of_platform.h> 11 11 12 + #include <asm/qe.h> 12 13 #include <sysdev/cpm2_pic.h> 13 14 14 15 #include "mpc85xx.h" ··· 81 80 cpm2_pic_init(np); 82 81 of_node_put(np); 83 82 irq_set_chained_handler(irq, cpm2_cascade); 83 + } 84 + #endif 85 + 86 + #ifdef CONFIG_QUICC_ENGINE 87 + void __init mpc85xx_qe_init(void) 88 + { 89 + struct device_node *np; 90 + 91 + np = of_find_compatible_node(NULL, NULL, "fsl,qe"); 92 + if (!np) { 93 + np = of_find_node_by_name(NULL, "qe"); 94 + if (!np) { 95 + pr_err("%s: Could not find Quicc Engine node\n", 96 + __func__); 97 + return; 98 + } 99 + } 100 + 101 + if (!of_device_is_available(np)) { 102 + of_node_put(np); 103 + return; 104 + } 105 + 106 + qe_reset(); 107 + of_node_put(np); 108 + 109 + np = of_find_node_by_name(NULL, "par_io"); 110 + if (np) { 111 + struct device_node *ucc; 112 + 113 + par_io_init(np); 114 + of_node_put(np); 115 + 116 + for_each_node_by_name(ucc, "ucc") 117 + par_io_of_config(ucc); 118 + 119 + } 84 120 } 85 121 #endif
+6
arch/powerpc/platforms/85xx/mpc85xx.h
··· 8 8 static inline void __init mpc85xx_cpm2_pic_init(void) {} 9 9 #endif /* CONFIG_CPM2 */ 10 10 11 + #ifdef CONFIG_QUICC_ENGINE 12 + extern void mpc85xx_qe_init(void); 13 + #else 14 + static inline void __init mpc85xx_qe_init(void) {} 15 + #endif 16 + 11 17 #endif
+2 -27
arch/powerpc/platforms/85xx/mpc85xx_mds.c
··· 1 1 /* 2 - * Copyright (C) 2006-2010, 2012 Freescale Semiconductor, Inc. 2 + * Copyright (C) 2006-2010, 2012-2013 Freescale Semiconductor, Inc. 3 3 * All rights reserved. 4 4 * 5 5 * Author: Andy Fleming <afleming@freescale.com> ··· 238 238 { 239 239 struct device_node *np; 240 240 241 - np = of_find_compatible_node(NULL, NULL, "fsl,qe"); 242 - if (!np) { 243 - np = of_find_node_by_name(NULL, "qe"); 244 - if (!np) 245 - return; 246 - } 247 - 248 - if (!of_device_is_available(np)) { 249 - of_node_put(np); 250 - return; 251 - } 252 - 253 - qe_reset(); 254 - of_node_put(np); 255 - 256 - np = of_find_node_by_name(NULL, "par_io"); 257 - if (np) { 258 - struct device_node *ucc; 259 - 260 - par_io_init(np); 261 - of_node_put(np); 262 - 263 - for_each_node_by_name(ucc, "ucc") 264 - par_io_of_config(ucc); 265 - } 266 - 241 + mpc85xx_qe_init(); 267 242 mpc85xx_mds_reset_ucc_phys(); 268 243 269 244 if (machine_is(p1021_mds)) {
+2 -23
arch/powerpc/platforms/85xx/mpc85xx_rdb.c
··· 1 1 /* 2 2 * MPC85xx RDB Board Setup 3 3 * 4 - * Copyright 2009,2012 Freescale Semiconductor Inc. 4 + * Copyright 2009,2012-2013 Freescale Semiconductor Inc. 5 5 * 6 6 * This program is free software; you can redistribute it and/or modify it 7 7 * under the terms of the GNU General Public License as published by the ··· 98 98 fsl_pci_assign_primary(); 99 99 100 100 #ifdef CONFIG_QUICC_ENGINE 101 - np = of_find_compatible_node(NULL, NULL, "fsl,qe"); 102 - if (!np) { 103 - pr_err("%s: Could not find Quicc Engine node\n", __func__); 104 - goto qe_fail; 105 - } 106 - 107 - qe_reset(); 108 - of_node_put(np); 109 - 110 - np = of_find_node_by_name(NULL, "par_io"); 111 - if (np) { 112 - struct device_node *ucc; 113 - 114 - par_io_init(np); 115 - of_node_put(np); 116 - 117 - for_each_node_by_name(ucc, "ucc") 118 - par_io_of_config(ucc); 119 - 120 - } 101 + mpc85xx_qe_init(); 121 102 #if defined(CONFIG_UCC_GETH) || defined(CONFIG_SERIAL_QE) 122 103 if (machine_is(p1025_rdb)) { 123 104 ··· 129 148 130 149 } 131 150 #endif 132 - 133 - qe_fail: 134 151 #endif /* CONFIG_QUICC_ENGINE */ 135 152 136 153 printk(KERN_INFO "MPC85xx RDB board from Freescale Semiconductor\n");
-1
arch/powerpc/platforms/85xx/sgy_cts1000.c
··· 14 14 #include <linux/platform_device.h> 15 15 #include <linux/device.h> 16 16 #include <linux/module.h> 17 - #include <linux/init.h> 18 17 #include <linux/of_gpio.h> 19 18 #include <linux/of_irq.h> 20 19 #include <linux/workqueue.h>
+11 -6
arch/powerpc/platforms/85xx/smp.c
··· 389 389 } 390 390 #endif /* CONFIG_KEXEC */ 391 391 392 - static void smp_85xx_setup_cpu(int cpu_nr) 392 + static void smp_85xx_basic_setup(int cpu_nr) 393 393 { 394 - if (smp_85xx_ops.probe == smp_mpic_probe) 395 - mpic_setup_this_cpu(); 396 - 397 394 if (cpu_has_feature(CPU_FTR_DBELL)) 398 395 doorbell_setup_this_cpu(); 396 + } 397 + 398 + static void smp_85xx_setup_cpu(int cpu_nr) 399 + { 400 + mpic_setup_this_cpu(); 401 + smp_85xx_basic_setup(cpu_nr); 399 402 } 400 403 401 404 static const struct of_device_id mpc85xx_smp_guts_ids[] = { ··· 415 412 { 416 413 struct device_node *np; 417 414 418 - smp_85xx_ops.setup_cpu = smp_85xx_setup_cpu; 419 415 420 416 np = of_find_node_by_type(NULL, "open-pic"); 421 417 if (np) { 422 418 smp_85xx_ops.probe = smp_mpic_probe; 419 + smp_85xx_ops.setup_cpu = smp_85xx_setup_cpu; 423 420 smp_85xx_ops.message_pass = smp_mpic_message_pass; 424 - } 421 + } else 422 + smp_85xx_ops.setup_cpu = smp_85xx_basic_setup; 425 423 426 424 if (cpu_has_feature(CPU_FTR_DBELL)) { 427 425 /* ··· 431 427 */ 432 428 smp_85xx_ops.message_pass = NULL; 433 429 smp_85xx_ops.cause_ipi = doorbell_cause_ipi; 430 + smp_85xx_ops.probe = NULL; 434 431 } 435 432 436 433 np = of_find_matching_node(NULL, mpc85xx_smp_guts_ids);
+147
arch/powerpc/platforms/85xx/twr_p102x.c
··· 1 + /* 2 + * Copyright 2010-2011, 2013 Freescale Semiconductor, Inc. 3 + * 4 + * Author: Michael Johnston <michael.johnston@freescale.com> 5 + * 6 + * Description: 7 + * TWR-P102x Board Setup 8 + * 9 + * This program is free software; you can redistribute it and/or modify it 10 + * under the terms of the GNU General Public License as published by the 11 + * Free Software Foundation; either version 2 of the License, or (at your 12 + * option) any later version. 13 + */ 14 + 15 + #include <linux/kernel.h> 16 + #include <linux/init.h> 17 + #include <linux/errno.h> 18 + #include <linux/pci.h> 19 + #include <linux/of_platform.h> 20 + 21 + #include <asm/pci-bridge.h> 22 + #include <asm/udbg.h> 23 + #include <asm/mpic.h> 24 + #include <asm/qe.h> 25 + #include <asm/qe_ic.h> 26 + #include <asm/fsl_guts.h> 27 + 28 + #include <sysdev/fsl_soc.h> 29 + #include <sysdev/fsl_pci.h> 30 + #include "smp.h" 31 + 32 + #include "mpc85xx.h" 33 + 34 + static void __init twr_p1025_pic_init(void) 35 + { 36 + struct mpic *mpic; 37 + 38 + #ifdef CONFIG_QUICC_ENGINE 39 + struct device_node *np; 40 + #endif 41 + 42 + mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN | 43 + MPIC_SINGLE_DEST_CPU, 44 + 0, 256, " OpenPIC "); 45 + 46 + BUG_ON(mpic == NULL); 47 + mpic_init(mpic); 48 + 49 + #ifdef CONFIG_QUICC_ENGINE 50 + np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic"); 51 + if (np) { 52 + qe_ic_init(np, 0, qe_ic_cascade_low_mpic, 53 + qe_ic_cascade_high_mpic); 54 + of_node_put(np); 55 + } else 56 + pr_err("Could not find qe-ic node\n"); 57 + #endif 58 + } 59 + 60 + /* ************************************************************************ 61 + * 62 + * Setup the architecture 63 + * 64 + */ 65 + static void __init twr_p1025_setup_arch(void) 66 + { 67 + #ifdef CONFIG_QUICC_ENGINE 68 + struct device_node *np; 69 + #endif 70 + 71 + if (ppc_md.progress) 72 + ppc_md.progress("twr_p1025_setup_arch()", 0); 73 + 74 + mpc85xx_smp_init(); 75 + 76 + fsl_pci_assign_primary(); 77 + 78 + #ifdef CONFIG_QUICC_ENGINE 79 + mpc85xx_qe_init(); 80 + 81 + #if defined(CONFIG_UCC_GETH) || defined(CONFIG_SERIAL_QE) 82 + if (machine_is(twr_p1025)) { 83 + struct ccsr_guts __iomem *guts; 84 + 85 + np = of_find_compatible_node(NULL, NULL, "fsl,p1021-guts"); 86 + if (np) { 87 + guts = of_iomap(np, 0); 88 + if (!guts) 89 + pr_err("twr_p1025: could not map global utilities register\n"); 90 + else { 91 + /* P1025 has pins muxed for QE and other functions. To 92 + * enable QE UEC mode, we need to set bit QE0 for UCC1 93 + * in Eth mode, QE0 and QE3 for UCC5 in Eth mode, QE9 94 + * and QE12 for QE MII management signals in PMUXCR 95 + * register. 96 + * Set QE mux bits in PMUXCR */ 97 + setbits32(&guts->pmuxcr, MPC85xx_PMUXCR_QE(0) | 98 + MPC85xx_PMUXCR_QE(3) | 99 + MPC85xx_PMUXCR_QE(9) | 100 + MPC85xx_PMUXCR_QE(12)); 101 + iounmap(guts); 102 + 103 + #if defined(CONFIG_SERIAL_QE) 104 + /* On P1025TWR board, the UCC7 acted as UART port. 105 + * However, The UCC7's CTS pin is low level in default, 106 + * it will impact the transmission in full duplex 107 + * communication. So disable the Flow control pin PA18. 108 + * The UCC7 UART just can use RXD and TXD pins. 109 + */ 110 + par_io_config_pin(0, 18, 0, 0, 0, 0); 111 + #endif 112 + /* Drive PB29 to CPLD low - CPLD will then change 113 + * muxing from LBC to QE */ 114 + par_io_config_pin(1, 29, 1, 0, 0, 0); 115 + par_io_data_set(1, 29, 0); 116 + } 117 + of_node_put(np); 118 + } 119 + } 120 + #endif 121 + #endif /* CONFIG_QUICC_ENGINE */ 122 + 123 + pr_info("TWR-P1025 board from Freescale Semiconductor\n"); 124 + } 125 + 126 + machine_arch_initcall(twr_p1025, mpc85xx_common_publish_devices); 127 + 128 + static int __init twr_p1025_probe(void) 129 + { 130 + unsigned long root = of_get_flat_dt_root(); 131 + 132 + return of_flat_dt_is_compatible(root, "fsl,TWR-P1025"); 133 + } 134 + 135 + define_machine(twr_p1025) { 136 + .name = "TWR-P1025", 137 + .probe = twr_p1025_probe, 138 + .setup_arch = twr_p1025_setup_arch, 139 + .init_IRQ = twr_p1025_pic_init, 140 + #ifdef CONFIG_PCI 141 + .pcibios_fixup_bus = fsl_pcibios_fixup_bus, 142 + #endif 143 + .get_irq = mpic_get_irq, 144 + .restart = fsl_rstcr_restart, 145 + .calibrate_decr = generic_calibrate_decr, 146 + .progress = udbg_progress, 147 + };
-1
arch/powerpc/platforms/8xx/Kconfig
··· 45 45 config PPC_ADDER875 46 46 bool "Analogue & Micro Adder 875" 47 47 select CPM1 48 - select REDBOOT 49 48 help 50 49 This enables support for the Analogue & Micro Adder 875 51 50 board.
+1
arch/powerpc/platforms/Kconfig.cputype
··· 72 72 select PPC_HAVE_PMU_SUPPORT 73 73 select SYS_SUPPORTS_HUGETLBFS 74 74 select HAVE_ARCH_TRANSPARENT_HUGEPAGE if PPC_64K_PAGES 75 + select ARCH_SUPPORTS_NUMA_BALANCING 75 76 76 77 config PPC_BOOK3E_64 77 78 bool "Embedded processors"
+2 -2
arch/powerpc/platforms/cell/beat_htab.c
··· 111 111 DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r); 112 112 113 113 if (rflags & _PAGE_NO_CACHE) 114 - hpte_r &= ~_PAGE_COHERENT; 114 + hpte_r &= ~HPTE_R_M; 115 115 116 116 raw_spin_lock(&beat_htab_lock); 117 117 lpar_rc = beat_read_mask(hpte_group); ··· 337 337 DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r); 338 338 339 339 if (rflags & _PAGE_NO_CACHE) 340 - hpte_r &= ~_PAGE_COHERENT; 340 + hpte_r &= ~HPTE_R_M; 341 341 342 342 /* insert into not-volted entry */ 343 343 lpar_rc = beat_insert_htab_entry3(0, hpte_group, hpte_v, hpte_r,
+8 -6
arch/powerpc/platforms/cell/iommu.c
··· 197 197 198 198 io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); 199 199 200 - for (i = 0; i < npages; i++, uaddr += IOMMU_PAGE_SIZE) 200 + for (i = 0; i < npages; i++, uaddr += tbl->it_page_shift) 201 201 io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask); 202 202 203 203 mb(); ··· 430 430 { 431 431 cell_iommu_setup_stab(iommu, base, size, 0, 0); 432 432 iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0, 433 - IOMMU_PAGE_SHIFT); 433 + IOMMU_PAGE_SHIFT_4K); 434 434 cell_iommu_enable_hardware(iommu); 435 435 } 436 436 ··· 487 487 window->table.it_blocksize = 16; 488 488 window->table.it_base = (unsigned long)iommu->ptab; 489 489 window->table.it_index = iommu->nid; 490 - window->table.it_offset = (offset >> IOMMU_PAGE_SHIFT) + pte_offset; 491 - window->table.it_size = size >> IOMMU_PAGE_SHIFT; 490 + window->table.it_page_shift = IOMMU_PAGE_SHIFT_4K; 491 + window->table.it_offset = 492 + (offset >> window->table.it_page_shift) + pte_offset; 493 + window->table.it_size = size >> window->table.it_page_shift; 492 494 493 495 iommu_init_table(&window->table, iommu->nid); 494 496 ··· 775 773 776 774 /* Setup the iommu_table */ 777 775 cell_iommu_setup_window(iommu, np, base, size, 778 - offset >> IOMMU_PAGE_SHIFT); 776 + offset >> IOMMU_PAGE_SHIFT_4K); 779 777 } 780 778 781 779 static void __init cell_disable_iommus(void) ··· 1124 1122 1125 1123 cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize); 1126 1124 iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0, 1127 - IOMMU_PAGE_SHIFT); 1125 + IOMMU_PAGE_SHIFT_4K); 1128 1126 cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize, 1129 1127 fbase, fsize); 1130 1128 cell_iommu_enable_hardware(iommu);
-1
arch/powerpc/platforms/chrp/smp.c
··· 14 14 #include <linux/interrupt.h> 15 15 #include <linux/kernel_stat.h> 16 16 #include <linux/delay.h> 17 - #include <linux/init.h> 18 17 #include <linux/spinlock.h> 19 18 20 19 #include <asm/ptrace.h>
+12 -1
arch/powerpc/platforms/embedded6xx/Kconfig
··· 67 67 This option enables support for the GE Fanuc C2K board (formerly 68 68 an SBS board). 69 69 70 + config MVME5100 71 + bool "Motorola/Emerson MVME5100" 72 + depends on EMBEDDED6xx 73 + select MPIC 74 + select PCI 75 + select PPC_INDIRECT_PCI 76 + select PPC_I8259 77 + select PPC_NATIVE 78 + help 79 + This option enables support for the Motorola (now Emerson) MVME5100 80 + board. 81 + 70 82 config TSI108_BRIDGE 71 83 bool 72 84 select PCI ··· 125 113 help 126 114 Select WII if configuring for the Nintendo Wii. 127 115 More information at: <http://gc-linux.sourceforge.net/> 128 -
+1
arch/powerpc/platforms/embedded6xx/Makefile
··· 11 11 obj-$(CONFIG_GAMECUBE_COMMON) += flipper-pic.o 12 12 obj-$(CONFIG_GAMECUBE) += gamecube.o 13 13 obj-$(CONFIG_WII) += wii.o hlwd-pic.o 14 + obj-$(CONFIG_MVME5100) += mvme5100.o
-1
arch/powerpc/platforms/embedded6xx/hlwd-pic.c
··· 15 15 #define pr_fmt(fmt) DRV_MODULE_NAME ": " fmt 16 16 17 17 #include <linux/kernel.h> 18 - #include <linux/init.h> 19 18 #include <linux/irq.h> 20 19 #include <linux/of.h> 21 20 #include <linux/of_address.h>
+221
arch/powerpc/platforms/embedded6xx/mvme5100.c
··· 1 + /* 2 + * Board setup routines for the Motorola/Emerson MVME5100. 3 + * 4 + * Copyright 2013 CSC Australia Pty. Ltd. 5 + * 6 + * Based on earlier code by: 7 + * 8 + * Matt Porter, MontaVista Software Inc. 9 + * Copyright 2001 MontaVista Software Inc. 10 + * 11 + * This program is free software; you can redistribute it and/or modify it 12 + * under the terms of the GNU General Public License as published by the 13 + * Free Software Foundation; either version 2 of the License, or (at your 14 + * option) any later version. 15 + * 16 + * Author: Stephen Chivers <schivers@csc.com> 17 + * 18 + */ 19 + 20 + #include <linux/of_platform.h> 21 + 22 + #include <asm/i8259.h> 23 + #include <asm/pci-bridge.h> 24 + #include <asm/mpic.h> 25 + #include <asm/prom.h> 26 + #include <mm/mmu_decl.h> 27 + #include <asm/udbg.h> 28 + 29 + #define HAWK_MPIC_SIZE 0x00040000U 30 + #define MVME5100_PCI_MEM_OFFSET 0x00000000 31 + 32 + /* Board register addresses. */ 33 + #define BOARD_STATUS_REG 0xfef88080 34 + #define BOARD_MODFAIL_REG 0xfef88090 35 + #define BOARD_MODRST_REG 0xfef880a0 36 + #define BOARD_TBEN_REG 0xfef880c0 37 + #define BOARD_SW_READ_REG 0xfef880e0 38 + #define BOARD_GEO_ADDR_REG 0xfef880e8 39 + #define BOARD_EXT_FEATURE1_REG 0xfef880f0 40 + #define BOARD_EXT_FEATURE2_REG 0xfef88100 41 + 42 + static phys_addr_t pci_membase; 43 + static u_char *restart; 44 + 45 + static void mvme5100_8259_cascade(unsigned int irq, struct irq_desc *desc) 46 + { 47 + struct irq_chip *chip = irq_desc_get_chip(desc); 48 + unsigned int cascade_irq = i8259_irq(); 49 + 50 + if (cascade_irq != NO_IRQ) 51 + generic_handle_irq(cascade_irq); 52 + 53 + chip->irq_eoi(&desc->irq_data); 54 + } 55 + 56 + static void __init mvme5100_pic_init(void) 57 + { 58 + struct mpic *mpic; 59 + struct device_node *np; 60 + struct device_node *cp = NULL; 61 + unsigned int cirq; 62 + unsigned long intack = 0; 63 + const u32 *prop = NULL; 64 + 65 + np = of_find_node_by_type(NULL, "open-pic"); 66 + if (!np) { 67 + pr_err("Could not find open-pic node\n"); 68 + return; 69 + } 70 + 71 + mpic = mpic_alloc(np, pci_membase, 0, 16, 256, " OpenPIC "); 72 + 73 + BUG_ON(mpic == NULL); 74 + of_node_put(np); 75 + 76 + mpic_assign_isu(mpic, 0, pci_membase + 0x10000); 77 + 78 + mpic_init(mpic); 79 + 80 + cp = of_find_compatible_node(NULL, NULL, "chrp,iic"); 81 + if (cp == NULL) { 82 + pr_warn("mvme5100_pic_init: couldn't find i8259\n"); 83 + return; 84 + } 85 + 86 + cirq = irq_of_parse_and_map(cp, 0); 87 + if (cirq == NO_IRQ) { 88 + pr_warn("mvme5100_pic_init: no cascade interrupt?\n"); 89 + return; 90 + } 91 + 92 + np = of_find_compatible_node(NULL, "pci", "mpc10x-pci"); 93 + if (np) { 94 + prop = of_get_property(np, "8259-interrupt-acknowledge", NULL); 95 + 96 + if (prop) 97 + intack = prop[0]; 98 + 99 + of_node_put(np); 100 + } 101 + 102 + if (intack) 103 + pr_debug("mvme5100_pic_init: PCI 8259 intack at 0x%016lx\n", 104 + intack); 105 + 106 + i8259_init(cp, intack); 107 + of_node_put(cp); 108 + irq_set_chained_handler(cirq, mvme5100_8259_cascade); 109 + } 110 + 111 + static int __init mvme5100_add_bridge(struct device_node *dev) 112 + { 113 + const int *bus_range; 114 + int len; 115 + struct pci_controller *hose; 116 + unsigned short devid; 117 + 118 + pr_info("Adding PCI host bridge %s\n", dev->full_name); 119 + 120 + bus_range = of_get_property(dev, "bus-range", &len); 121 + 122 + hose = pcibios_alloc_controller(dev); 123 + if (hose == NULL) 124 + return -ENOMEM; 125 + 126 + hose->first_busno = bus_range ? bus_range[0] : 0; 127 + hose->last_busno = bus_range ? bus_range[1] : 0xff; 128 + 129 + setup_indirect_pci(hose, 0xfe000cf8, 0xfe000cfc, 0); 130 + 131 + pci_process_bridge_OF_ranges(hose, dev, 1); 132 + 133 + early_read_config_word(hose, 0, 0, PCI_DEVICE_ID, &devid); 134 + 135 + if (devid != PCI_DEVICE_ID_MOTOROLA_HAWK) { 136 + pr_err("HAWK PHB not present?\n"); 137 + return 0; 138 + } 139 + 140 + early_read_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_1, &pci_membase); 141 + 142 + if (pci_membase == 0) { 143 + pr_err("HAWK PHB mibar not correctly set?\n"); 144 + return 0; 145 + } 146 + 147 + pr_info("mvme5100_pic_init: pci_membase: %x\n", pci_membase); 148 + 149 + return 0; 150 + } 151 + 152 + static struct of_device_id mvme5100_of_bus_ids[] __initdata = { 153 + { .compatible = "hawk-bridge", }, 154 + {}, 155 + }; 156 + 157 + /* 158 + * Setup the architecture 159 + */ 160 + static void __init mvme5100_setup_arch(void) 161 + { 162 + struct device_node *np; 163 + 164 + if (ppc_md.progress) 165 + ppc_md.progress("mvme5100_setup_arch()", 0); 166 + 167 + for_each_compatible_node(np, "pci", "hawk-pci") 168 + mvme5100_add_bridge(np); 169 + 170 + restart = ioremap(BOARD_MODRST_REG, 4); 171 + } 172 + 173 + 174 + static void mvme5100_show_cpuinfo(struct seq_file *m) 175 + { 176 + seq_puts(m, "Vendor\t\t: Motorola/Emerson\n"); 177 + seq_puts(m, "Machine\t\t: MVME5100\n"); 178 + } 179 + 180 + static void mvme5100_restart(char *cmd) 181 + { 182 + 183 + local_irq_disable(); 184 + mtmsr(mfmsr() | MSR_IP); 185 + 186 + out_8((u_char *) restart, 0x01); 187 + 188 + while (1) 189 + ; 190 + } 191 + 192 + /* 193 + * Called very early, device-tree isn't unflattened 194 + */ 195 + static int __init mvme5100_probe(void) 196 + { 197 + unsigned long root = of_get_flat_dt_root(); 198 + 199 + return of_flat_dt_is_compatible(root, "MVME5100"); 200 + } 201 + 202 + static int __init probe_of_platform_devices(void) 203 + { 204 + 205 + of_platform_bus_probe(NULL, mvme5100_of_bus_ids, NULL); 206 + return 0; 207 + } 208 + 209 + machine_device_initcall(mvme5100, probe_of_platform_devices); 210 + 211 + define_machine(mvme5100) { 212 + .name = "MVME5100", 213 + .probe = mvme5100_probe, 214 + .setup_arch = mvme5100_setup_arch, 215 + .init_IRQ = mvme5100_pic_init, 216 + .show_cpuinfo = mvme5100_show_cpuinfo, 217 + .get_irq = mpic_get_irq, 218 + .restart = mvme5100_restart, 219 + .calibrate_decr = generic_calibrate_decr, 220 + .progress = udbg_progress, 221 + };
-1
arch/powerpc/platforms/pasemi/dma_lib.c
··· 18 18 */ 19 19 20 20 #include <linux/kernel.h> 21 - #include <linux/init.h> 22 21 #include <linux/export.h> 23 22 #include <linux/pci.h> 24 23 #include <linux/slab.h>
+4 -1
arch/powerpc/platforms/pasemi/iommu.c
··· 138 138 pr_debug(" -> %s\n", __func__); 139 139 iommu_table_iobmap.it_busno = 0; 140 140 iommu_table_iobmap.it_offset = 0; 141 + iommu_table_iobmap.it_page_shift = IOBMAP_PAGE_SHIFT; 142 + 141 143 /* it_size is in number of entries */ 142 - iommu_table_iobmap.it_size = 0x80000000 >> IOBMAP_PAGE_SHIFT; 144 + iommu_table_iobmap.it_size = 145 + 0x80000000 >> iommu_table_iobmap.it_page_shift; 143 146 144 147 /* Initialize the common IOMMU code */ 145 148 iommu_table_iobmap.it_base = (unsigned long)iob_l2_base;
-1
arch/powerpc/platforms/powermac/pfunc_core.c
··· 5 5 * FIXME: LOCKING !!! 6 6 */ 7 7 8 - #include <linux/init.h> 9 8 #include <linux/delay.h> 10 9 #include <linux/kernel.h> 11 10 #include <linux/spinlock.h>
-5
arch/powerpc/platforms/powernv/Kconfig
··· 13 13 select ARCH_RANDOM 14 14 default y 15 15 16 - config POWERNV_MSI 17 - bool "Support PCI MSI on PowerNV platform" 18 - depends on PCI_MSI 19 - default y 20 - 21 16 config PPC_POWERNV_RTAS 22 17 depends on PPC_POWERNV 23 18 bool "Support for RTAS based PowerNV platforms such as BML"
+1
arch/powerpc/platforms/powernv/Makefile
··· 6 6 obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o pci-ioda.o 7 7 obj-$(CONFIG_EEH) += eeh-ioda.o eeh-powernv.o 8 8 obj-$(CONFIG_PPC_SCOM) += opal-xscom.o 9 + obj-$(CONFIG_MEMORY_FAILURE) += opal-memory-errors.o
+47 -176
arch/powerpc/platforms/powernv/eeh-ioda.c
··· 14 14 #include <linux/bootmem.h> 15 15 #include <linux/debugfs.h> 16 16 #include <linux/delay.h> 17 - #include <linux/init.h> 18 17 #include <linux/io.h> 19 18 #include <linux/irq.h> 20 19 #include <linux/kernel.h> ··· 577 578 return -EIO; 578 579 } 579 580 580 - /* 581 - * FIXME: We probably need log the error in somewhere. 582 - * Lets make it up in future. 583 - */ 584 - /* pr_info("%s", phb->diag.blob); */ 581 + /* The PHB diag-data is always indicative */ 582 + pnv_pci_dump_phb_diag_data(hose, phb->diag.blob); 585 583 586 584 spin_unlock_irqrestore(&phb->lock, flags); 587 585 ··· 666 670 } 667 671 } 668 672 669 - static void ioda_eeh_p7ioc_phb_diag(struct pci_controller *hose, 670 - struct OpalIoPhbErrorCommon *common) 671 - { 672 - struct OpalIoP7IOCPhbErrorData *data; 673 - int i; 674 - 675 - data = (struct OpalIoP7IOCPhbErrorData *)common; 676 - 677 - pr_info("P7IOC PHB#%x Diag-data (Version: %d)\n\n", 678 - hose->global_number, common->version); 679 - 680 - pr_info(" brdgCtl: %08x\n", data->brdgCtl); 681 - 682 - pr_info(" portStatusReg: %08x\n", data->portStatusReg); 683 - pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus); 684 - pr_info(" busAgentStatus: %08x\n", data->busAgentStatus); 685 - 686 - pr_info(" deviceStatus: %08x\n", data->deviceStatus); 687 - pr_info(" slotStatus: %08x\n", data->slotStatus); 688 - pr_info(" linkStatus: %08x\n", data->linkStatus); 689 - pr_info(" devCmdStatus: %08x\n", data->devCmdStatus); 690 - pr_info(" devSecStatus: %08x\n", data->devSecStatus); 691 - 692 - pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus); 693 - pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus); 694 - pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus); 695 - pr_info(" tlpHdr1: %08x\n", data->tlpHdr1); 696 - pr_info(" tlpHdr2: %08x\n", data->tlpHdr2); 697 - pr_info(" tlpHdr3: %08x\n", data->tlpHdr3); 698 - pr_info(" tlpHdr4: %08x\n", data->tlpHdr4); 699 - pr_info(" sourceId: %08x\n", data->sourceId); 700 - 701 - pr_info(" errorClass: %016llx\n", data->errorClass); 702 - pr_info(" correlator: %016llx\n", data->correlator); 703 - pr_info(" p7iocPlssr: %016llx\n", data->p7iocPlssr); 704 - pr_info(" p7iocCsr: %016llx\n", data->p7iocCsr); 705 - pr_info(" lemFir: %016llx\n", data->lemFir); 706 - pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask); 707 - pr_info(" lemWOF: %016llx\n", data->lemWOF); 708 - pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus); 709 - pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus); 710 - pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0); 711 - pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1); 712 - pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus); 713 - pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus); 714 - pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0); 715 - pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1); 716 - pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus); 717 - pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus); 718 - pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0); 719 - pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1); 720 - pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus); 721 - pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus); 722 - pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0); 723 - pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1); 724 - 725 - for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) { 726 - if ((data->pestA[i] >> 63) == 0 && 727 - (data->pestB[i] >> 63) == 0) 728 - continue; 729 - 730 - pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]); 731 - pr_info(" PESTB: %016llx\n", data->pestB[i]); 732 - } 733 - } 734 - 735 - static void ioda_eeh_phb3_phb_diag(struct pci_controller *hose, 736 - struct OpalIoPhbErrorCommon *common) 737 - { 738 - struct OpalIoPhb3ErrorData *data; 739 - int i; 740 - 741 - data = (struct OpalIoPhb3ErrorData*)common; 742 - pr_info("PHB3 PHB#%x Diag-data (Version: %d)\n\n", 743 - hose->global_number, common->version); 744 - 745 - pr_info(" brdgCtl: %08x\n", data->brdgCtl); 746 - 747 - pr_info(" portStatusReg: %08x\n", data->portStatusReg); 748 - pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus); 749 - pr_info(" busAgentStatus: %08x\n", data->busAgentStatus); 750 - 751 - pr_info(" deviceStatus: %08x\n", data->deviceStatus); 752 - pr_info(" slotStatus: %08x\n", data->slotStatus); 753 - pr_info(" linkStatus: %08x\n", data->linkStatus); 754 - pr_info(" devCmdStatus: %08x\n", data->devCmdStatus); 755 - pr_info(" devSecStatus: %08x\n", data->devSecStatus); 756 - 757 - pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus); 758 - pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus); 759 - pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus); 760 - pr_info(" tlpHdr1: %08x\n", data->tlpHdr1); 761 - pr_info(" tlpHdr2: %08x\n", data->tlpHdr2); 762 - pr_info(" tlpHdr3: %08x\n", data->tlpHdr3); 763 - pr_info(" tlpHdr4: %08x\n", data->tlpHdr4); 764 - pr_info(" sourceId: %08x\n", data->sourceId); 765 - pr_info(" errorClass: %016llx\n", data->errorClass); 766 - pr_info(" correlator: %016llx\n", data->correlator); 767 - pr_info(" nFir: %016llx\n", data->nFir); 768 - pr_info(" nFirMask: %016llx\n", data->nFirMask); 769 - pr_info(" nFirWOF: %016llx\n", data->nFirWOF); 770 - pr_info(" PhbPlssr: %016llx\n", data->phbPlssr); 771 - pr_info(" PhbCsr: %016llx\n", data->phbCsr); 772 - pr_info(" lemFir: %016llx\n", data->lemFir); 773 - pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask); 774 - pr_info(" lemWOF: %016llx\n", data->lemWOF); 775 - pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus); 776 - pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus); 777 - pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0); 778 - pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1); 779 - pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus); 780 - pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus); 781 - pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0); 782 - pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1); 783 - pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus); 784 - pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus); 785 - pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0); 786 - pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1); 787 - pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus); 788 - pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus); 789 - pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0); 790 - pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1); 791 - 792 - for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) { 793 - if ((data->pestA[i] >> 63) == 0 && 794 - (data->pestB[i] >> 63) == 0) 795 - continue; 796 - 797 - pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]); 798 - pr_info(" PESTB: %016llx\n", data->pestB[i]); 799 - } 800 - } 801 - 802 673 static void ioda_eeh_phb_diag(struct pci_controller *hose) 803 674 { 804 675 struct pnv_phb *phb = hose->private_data; 805 - struct OpalIoPhbErrorCommon *common; 806 676 long rc; 807 677 808 678 rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob, ··· 679 817 return; 680 818 } 681 819 682 - common = (struct OpalIoPhbErrorCommon *)phb->diag.blob; 683 - switch (common->ioType) { 684 - case OPAL_PHB_ERROR_DATA_TYPE_P7IOC: 685 - ioda_eeh_p7ioc_phb_diag(hose, common); 686 - break; 687 - case OPAL_PHB_ERROR_DATA_TYPE_PHB3: 688 - ioda_eeh_phb3_phb_diag(hose, common); 689 - break; 690 - default: 691 - pr_warning("%s: Unrecognized I/O chip %d\n", 692 - __func__, common->ioType); 693 - } 820 + pnv_pci_dump_phb_diag_data(hose, phb->diag.blob); 694 821 } 695 822 696 823 static int ioda_eeh_get_phb_pe(struct pci_controller *hose, ··· 713 862 dev.phb = hose; 714 863 dev.pe_config_addr = pe_no; 715 864 dev_pe = eeh_pe_get(&dev); 716 - if (!dev_pe) { 717 - pr_warning("%s: Can't find PE for PHB#%x - PE#%x\n", 718 - __func__, hose->global_number, pe_no); 719 - return -EEXIST; 720 - } 865 + if (!dev_pe) return -EEXIST; 721 866 722 867 *pe = dev_pe; 723 868 return 0; ··· 731 884 */ 732 885 static int ioda_eeh_next_error(struct eeh_pe **pe) 733 886 { 734 - struct pci_controller *hose, *tmp; 887 + struct pci_controller *hose; 735 888 struct pnv_phb *phb; 736 889 u64 frozen_pe_no; 737 890 u16 err_type, severity; 738 891 long rc; 739 - int ret = 1; 892 + int ret = EEH_NEXT_ERR_NONE; 740 893 741 894 /* 742 895 * While running here, it's safe to purge the event queue. ··· 746 899 eeh_remove_event(NULL); 747 900 opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul); 748 901 749 - list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { 902 + list_for_each_entry(hose, &hose_list, list_node) { 750 903 /* 751 904 * If the subordinate PCI buses of the PHB has been 752 905 * removed, we needn't take care of it any more. ··· 785 938 switch (err_type) { 786 939 case OPAL_EEH_IOC_ERROR: 787 940 if (severity == OPAL_EEH_SEV_IOC_DEAD) { 788 - list_for_each_entry_safe(hose, tmp, 789 - &hose_list, list_node) { 941 + list_for_each_entry(hose, &hose_list, 942 + list_node) { 790 943 phb = hose->private_data; 791 944 phb->eeh_state |= PNV_EEH_STATE_REMOVED; 792 945 } 793 946 794 947 pr_err("EEH: dead IOC detected\n"); 795 - ret = 4; 796 - goto out; 948 + ret = EEH_NEXT_ERR_DEAD_IOC; 797 949 } else if (severity == OPAL_EEH_SEV_INF) { 798 950 pr_info("EEH: IOC informative error " 799 951 "detected\n"); 800 952 ioda_eeh_hub_diag(hose); 953 + ret = EEH_NEXT_ERR_NONE; 801 954 } 802 955 803 956 break; ··· 809 962 pr_err("EEH: dead PHB#%x detected\n", 810 963 hose->global_number); 811 964 phb->eeh_state |= PNV_EEH_STATE_REMOVED; 812 - ret = 3; 813 - goto out; 965 + ret = EEH_NEXT_ERR_DEAD_PHB; 814 966 } else if (severity == OPAL_EEH_SEV_PHB_FENCED) { 815 967 if (ioda_eeh_get_phb_pe(hose, pe)) 816 968 break; 817 969 818 970 pr_err("EEH: fenced PHB#%x detected\n", 819 971 hose->global_number); 820 - ret = 2; 821 - goto out; 972 + ret = EEH_NEXT_ERR_FENCED_PHB; 822 973 } else if (severity == OPAL_EEH_SEV_INF) { 823 974 pr_info("EEH: PHB#%x informative error " 824 975 "detected\n", 825 976 hose->global_number); 826 977 ioda_eeh_phb_diag(hose); 978 + ret = EEH_NEXT_ERR_NONE; 827 979 } 828 980 829 981 break; 830 982 case OPAL_EEH_PE_ERROR: 831 - if (ioda_eeh_get_pe(hose, frozen_pe_no, pe)) 832 - break; 983 + /* 984 + * If we can't find the corresponding PE, the 985 + * PEEV / PEST would be messy. So we force an 986 + * fenced PHB so that it can be recovered. 987 + */ 988 + if (ioda_eeh_get_pe(hose, frozen_pe_no, pe)) { 989 + if (!ioda_eeh_get_phb_pe(hose, pe)) { 990 + pr_err("EEH: Escalated fenced PHB#%x " 991 + "detected for PE#%llx\n", 992 + hose->global_number, 993 + frozen_pe_no); 994 + ret = EEH_NEXT_ERR_FENCED_PHB; 995 + } else { 996 + ret = EEH_NEXT_ERR_NONE; 997 + } 998 + } else { 999 + pr_err("EEH: Frozen PE#%x on PHB#%x detected\n", 1000 + (*pe)->addr, (*pe)->phb->global_number); 1001 + ret = EEH_NEXT_ERR_FROZEN_PE; 1002 + } 833 1003 834 - pr_err("EEH: Frozen PE#%x on PHB#%x detected\n", 835 - (*pe)->addr, (*pe)->phb->global_number); 836 - ret = 1; 837 - goto out; 1004 + break; 1005 + default: 1006 + pr_warn("%s: Unexpected error type %d\n", 1007 + __func__, err_type); 838 1008 } 1009 + 1010 + /* 1011 + * If we have no errors on the specific PHB or only 1012 + * informative error there, we continue poking it. 1013 + * Otherwise, we need actions to be taken by upper 1014 + * layer. 1015 + */ 1016 + if (ret > EEH_NEXT_ERR_INF) 1017 + break; 839 1018 } 840 1019 841 - ret = 0; 842 - out: 843 1020 return ret; 844 1021 } 845 1022
+23 -1
arch/powerpc/platforms/powernv/eeh-powernv.c
··· 344 344 return -EEXIST; 345 345 } 346 346 347 + static int powernv_eeh_restore_config(struct device_node *dn) 348 + { 349 + struct eeh_dev *edev = of_node_to_eeh_dev(dn); 350 + struct pnv_phb *phb; 351 + s64 ret; 352 + 353 + if (!edev) 354 + return -EEXIST; 355 + 356 + phb = edev->phb->private_data; 357 + ret = opal_pci_reinit(phb->opal_id, 358 + OPAL_REINIT_PCI_DEV, edev->config_addr); 359 + if (ret) { 360 + pr_warn("%s: Can't reinit PCI dev 0x%x (%lld)\n", 361 + __func__, edev->config_addr, ret); 362 + return -EIO; 363 + } 364 + 365 + return 0; 366 + } 367 + 347 368 static struct eeh_ops powernv_eeh_ops = { 348 369 .name = "powernv", 349 370 .init = powernv_eeh_init, ··· 380 359 .configure_bridge = powernv_eeh_configure_bridge, 381 360 .read_config = pnv_pci_cfg_read, 382 361 .write_config = pnv_pci_cfg_write, 383 - .next_error = powernv_eeh_next_error 362 + .next_error = powernv_eeh_next_error, 363 + .restore_config = powernv_eeh_restore_config 384 364 }; 385 365 386 366 /**
+8 -27
arch/powerpc/platforms/powernv/opal-flash.c
··· 76 76 /* Validate buffer size */ 77 77 #define VALIDATE_BUF_SIZE 4096 78 78 79 - /* XXX: Assume candidate image size is <= 256MB */ 80 - #define MAX_IMAGE_SIZE 0x10000000 79 + /* XXX: Assume candidate image size is <= 1GB */ 80 + #define MAX_IMAGE_SIZE 0x40000000 81 81 82 82 /* Flash sg list version */ 83 83 #define SG_LIST_VERSION (1UL) ··· 101 101 uint16_t magic; 102 102 uint16_t version; 103 103 uint32_t size; 104 - }; 105 - 106 - /* Scatter/gather entry */ 107 - struct opal_sg_entry { 108 - void *data; 109 - long length; 110 - }; 111 - 112 - /* We calculate number of entries based on PAGE_SIZE */ 113 - #define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry)) 114 - 115 - /* 116 - * This struct is very similar but not identical to that 117 - * needed by the opal flash update. All we need to do for 118 - * opal is rewrite num_entries into a version/length and 119 - * translate the pointers to absolute. 120 - */ 121 - struct opal_sg_list { 122 - unsigned long num_entries; 123 - struct opal_sg_list *next; 124 - struct opal_sg_entry entry[SG_ENTRIES_PER_NODE]; 125 104 }; 126 105 127 106 struct validate_flash_t { ··· 312 333 addr = image_data.data; 313 334 size = image_data.size; 314 335 315 - sg1 = kzalloc((sizeof(struct opal_sg_list)), GFP_KERNEL); 336 + sg1 = kzalloc(PAGE_SIZE, GFP_KERNEL); 316 337 if (!sg1) 317 338 return NULL; 318 339 ··· 330 351 331 352 sg1->num_entries++; 332 353 if (sg1->num_entries >= SG_ENTRIES_PER_NODE) { 333 - sg1->next = kzalloc((sizeof(struct opal_sg_list)), 334 - GFP_KERNEL); 354 + sg1->next = kzalloc(PAGE_SIZE, GFP_KERNEL); 335 355 if (!sg1->next) { 336 356 pr_err("%s : Failed to allocate memory\n", 337 357 __func__); ··· 380 402 else 381 403 sg->next = NULL; 382 404 383 - /* Make num_entries into the version/length field */ 405 + /* 406 + * Convert num_entries to version/length format 407 + * to satisfy OPAL. 408 + */ 384 409 sg->num_entries = (SG_LIST_VERSION << 56) | 385 410 (sg->num_entries * sizeof(struct opal_sg_entry) + 16); 386 411 }
+146
arch/powerpc/platforms/powernv/opal-memory-errors.c
··· 1 + /* 2 + * OPAL asynchronus Memory error handling support in PowreNV. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License as published by 6 + * the Free Software Foundation; either version 2 of the License, or 7 + * (at your option) any later version. 8 + * 9 + * This program is distributed in the hope that it will be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License 15 + * along with this program; if not, write to the Free Software 16 + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 + * 18 + * Copyright 2013 IBM Corporation 19 + * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com> 20 + */ 21 + 22 + #undef DEBUG 23 + 24 + #include <linux/kernel.h> 25 + #include <linux/init.h> 26 + #include <linux/of.h> 27 + #include <linux/mm.h> 28 + #include <linux/slab.h> 29 + 30 + #include <asm/opal.h> 31 + #include <asm/cputable.h> 32 + 33 + static int opal_mem_err_nb_init; 34 + static LIST_HEAD(opal_memory_err_list); 35 + static DEFINE_SPINLOCK(opal_mem_err_lock); 36 + 37 + struct OpalMsgNode { 38 + struct list_head list; 39 + struct opal_msg msg; 40 + }; 41 + 42 + static void handle_memory_error_event(struct OpalMemoryErrorData *merr_evt) 43 + { 44 + uint64_t paddr_start, paddr_end; 45 + 46 + pr_debug("%s: Retrived memory error event, type: 0x%x\n", 47 + __func__, merr_evt->type); 48 + switch (merr_evt->type) { 49 + case OPAL_MEM_ERR_TYPE_RESILIENCE: 50 + paddr_start = merr_evt->u.resilience.physical_address_start; 51 + paddr_end = merr_evt->u.resilience.physical_address_end; 52 + break; 53 + case OPAL_MEM_ERR_TYPE_DYN_DALLOC: 54 + paddr_start = merr_evt->u.dyn_dealloc.physical_address_start; 55 + paddr_end = merr_evt->u.dyn_dealloc.physical_address_end; 56 + break; 57 + default: 58 + return; 59 + } 60 + 61 + for (; paddr_start < paddr_end; paddr_start += PAGE_SIZE) { 62 + memory_failure(paddr_start >> PAGE_SHIFT, 0, 0); 63 + } 64 + } 65 + 66 + static void handle_memory_error(void) 67 + { 68 + unsigned long flags; 69 + struct OpalMemoryErrorData *merr_evt; 70 + struct OpalMsgNode *msg_node; 71 + 72 + spin_lock_irqsave(&opal_mem_err_lock, flags); 73 + while (!list_empty(&opal_memory_err_list)) { 74 + msg_node = list_entry(opal_memory_err_list.next, 75 + struct OpalMsgNode, list); 76 + list_del(&msg_node->list); 77 + spin_unlock_irqrestore(&opal_mem_err_lock, flags); 78 + 79 + merr_evt = (struct OpalMemoryErrorData *) 80 + &msg_node->msg.params[0]; 81 + handle_memory_error_event(merr_evt); 82 + kfree(msg_node); 83 + spin_lock_irqsave(&opal_mem_err_lock, flags); 84 + } 85 + spin_unlock_irqrestore(&opal_mem_err_lock, flags); 86 + } 87 + 88 + static void mem_error_handler(struct work_struct *work) 89 + { 90 + handle_memory_error(); 91 + } 92 + 93 + static DECLARE_WORK(mem_error_work, mem_error_handler); 94 + 95 + /* 96 + * opal_memory_err_event - notifier handler that queues up the opal message 97 + * to be preocessed later. 98 + */ 99 + static int opal_memory_err_event(struct notifier_block *nb, 100 + unsigned long msg_type, void *msg) 101 + { 102 + unsigned long flags; 103 + struct OpalMsgNode *msg_node; 104 + 105 + if (msg_type != OPAL_MSG_MEM_ERR) 106 + return 0; 107 + 108 + msg_node = kzalloc(sizeof(*msg_node), GFP_ATOMIC); 109 + if (!msg_node) { 110 + pr_err("MEMORY_ERROR: out of memory, Opal message event not" 111 + "handled\n"); 112 + return -ENOMEM; 113 + } 114 + memcpy(&msg_node->msg, msg, sizeof(struct opal_msg)); 115 + 116 + spin_lock_irqsave(&opal_mem_err_lock, flags); 117 + list_add(&msg_node->list, &opal_memory_err_list); 118 + spin_unlock_irqrestore(&opal_mem_err_lock, flags); 119 + 120 + schedule_work(&mem_error_work); 121 + return 0; 122 + } 123 + 124 + static struct notifier_block opal_mem_err_nb = { 125 + .notifier_call = opal_memory_err_event, 126 + .next = NULL, 127 + .priority = 0, 128 + }; 129 + 130 + static int __init opal_mem_err_init(void) 131 + { 132 + int ret; 133 + 134 + if (!opal_mem_err_nb_init) { 135 + ret = opal_message_notifier_register( 136 + OPAL_MSG_MEM_ERR, &opal_mem_err_nb); 137 + if (ret) { 138 + pr_err("%s: Can't register OPAL event notifier (%d)\n", 139 + __func__, ret); 140 + return ret; 141 + } 142 + opal_mem_err_nb_init = 1; 143 + } 144 + return 0; 145 + } 146 + subsys_initcall(opal_mem_err_init);
+5 -1
arch/powerpc/platforms/powernv/opal-rtc.c
··· 18 18 19 19 #include <asm/opal.h> 20 20 #include <asm/firmware.h> 21 + #include <asm/machdep.h> 21 22 22 23 static void opal_to_tm(u32 y_m_d, u64 h_m_s_ms, struct rtc_time *tm) 23 24 { ··· 49 48 else 50 49 mdelay(10); 51 50 } 52 - if (rc != OPAL_SUCCESS) 51 + if (rc != OPAL_SUCCESS) { 52 + ppc_md.get_rtc_time = NULL; 53 + ppc_md.set_rtc_time = NULL; 53 54 return 0; 55 + } 54 56 y_m_d = be32_to_cpu(__y_m_d); 55 57 h_m_s_ms = be64_to_cpu(__h_m_s_ms); 56 58 opal_to_tm(y_m_d, h_m_s_ms, &tm);
+3
arch/powerpc/platforms/powernv/opal-wrappers.S
··· 126 126 OPAL_CALL(opal_validate_flash, OPAL_FLASH_VALIDATE); 127 127 OPAL_CALL(opal_manage_flash, OPAL_FLASH_MANAGE); 128 128 OPAL_CALL(opal_update_flash, OPAL_FLASH_UPDATE); 129 + OPAL_CALL(opal_get_msg, OPAL_GET_MSG); 130 + OPAL_CALL(opal_check_completion, OPAL_CHECK_ASYNC_COMPLETION); 131 + OPAL_CALL(opal_sync_host_reboot, OPAL_SYNC_HOST_REBOOT);
+156 -109
arch/powerpc/platforms/powernv/opal.c
··· 18 18 #include <linux/interrupt.h> 19 19 #include <linux/notifier.h> 20 20 #include <linux/slab.h> 21 + #include <linux/sched.h> 21 22 #include <linux/kobject.h> 23 + #include <linux/delay.h> 22 24 #include <asm/opal.h> 23 25 #include <asm/firmware.h> 26 + #include <asm/mce.h> 24 27 25 28 #include "powernv.h" 26 29 ··· 41 38 static unsigned int *opal_irqs; 42 39 static unsigned int opal_irq_count; 43 40 static ATOMIC_NOTIFIER_HEAD(opal_notifier_head); 41 + static struct atomic_notifier_head opal_msg_notifier_head[OPAL_MSG_TYPE_MAX]; 44 42 static DEFINE_SPINLOCK(opal_notifier_lock); 45 43 static uint64_t last_notified_mask = 0x0ul; 46 44 static atomic_t opal_notifier_hold = ATOMIC_INIT(0); ··· 92 88 if (!(powerpc_firmware_features & FW_FEATURE_OPAL)) 93 89 return -ENODEV; 94 90 95 - /* Hookup some exception handlers. We use the fwnmi area at 0x7000 96 - * to provide the glue space to OPAL 91 + /* Hookup some exception handlers except machine check. We use the 92 + * fwnmi area at 0x7000 to provide the glue space to OPAL 97 93 */ 98 94 glue = 0x7000; 99 - opal_register_exception_handler(OPAL_MACHINE_CHECK_HANDLER, 100 - __pa(opal_mc_secondary_handler[0]), 101 - glue); 102 - glue += 128; 103 95 opal_register_exception_handler(OPAL_HYPERVISOR_MAINTENANCE_HANDLER, 104 96 0, glue); 105 97 glue += 128; ··· 168 168 { 169 169 atomic_set(&opal_notifier_hold, 1); 170 170 } 171 + 172 + /* 173 + * Opal message notifier based on message type. Allow subscribers to get 174 + * notified for specific messgae type. 175 + */ 176 + int opal_message_notifier_register(enum OpalMessageType msg_type, 177 + struct notifier_block *nb) 178 + { 179 + if (!nb) { 180 + pr_warning("%s: Invalid argument (%p)\n", 181 + __func__, nb); 182 + return -EINVAL; 183 + } 184 + if (msg_type > OPAL_MSG_TYPE_MAX) { 185 + pr_warning("%s: Invalid message type argument (%d)\n", 186 + __func__, msg_type); 187 + return -EINVAL; 188 + } 189 + return atomic_notifier_chain_register( 190 + &opal_msg_notifier_head[msg_type], nb); 191 + } 192 + 193 + static void opal_message_do_notify(uint32_t msg_type, void *msg) 194 + { 195 + /* notify subscribers */ 196 + atomic_notifier_call_chain(&opal_msg_notifier_head[msg_type], 197 + msg_type, msg); 198 + } 199 + 200 + static void opal_handle_message(void) 201 + { 202 + s64 ret; 203 + /* 204 + * TODO: pre-allocate a message buffer depending on opal-msg-size 205 + * value in /proc/device-tree. 206 + */ 207 + static struct opal_msg msg; 208 + 209 + ret = opal_get_msg(__pa(&msg), sizeof(msg)); 210 + /* No opal message pending. */ 211 + if (ret == OPAL_RESOURCE) 212 + return; 213 + 214 + /* check for errors. */ 215 + if (ret) { 216 + pr_warning("%s: Failed to retrive opal message, err=%lld\n", 217 + __func__, ret); 218 + return; 219 + } 220 + 221 + /* Sanity check */ 222 + if (msg.msg_type > OPAL_MSG_TYPE_MAX) { 223 + pr_warning("%s: Unknown message type: %u\n", 224 + __func__, msg.msg_type); 225 + return; 226 + } 227 + opal_message_do_notify(msg.msg_type, (void *)&msg); 228 + } 229 + 230 + static int opal_message_notify(struct notifier_block *nb, 231 + unsigned long events, void *change) 232 + { 233 + if (events & OPAL_EVENT_MSG_PENDING) 234 + opal_handle_message(); 235 + return 0; 236 + } 237 + 238 + static struct notifier_block opal_message_nb = { 239 + .notifier_call = opal_message_notify, 240 + .next = NULL, 241 + .priority = 0, 242 + }; 243 + 244 + static int __init opal_message_init(void) 245 + { 246 + int ret, i; 247 + 248 + for (i = 0; i < OPAL_MSG_TYPE_MAX; i++) 249 + ATOMIC_INIT_NOTIFIER_HEAD(&opal_msg_notifier_head[i]); 250 + 251 + ret = opal_notifier_register(&opal_message_nb); 252 + if (ret) { 253 + pr_err("%s: Can't register OPAL event notifier (%d)\n", 254 + __func__, ret); 255 + return ret; 256 + } 257 + return 0; 258 + } 259 + early_initcall(opal_message_init); 171 260 172 261 int opal_get_chars(uint32_t vtermno, char *buf, int count) 173 262 { ··· 343 254 return written; 344 255 } 345 256 257 + static int opal_recover_mce(struct pt_regs *regs, 258 + struct machine_check_event *evt) 259 + { 260 + int recovered = 0; 261 + uint64_t ea = get_mce_fault_addr(evt); 262 + 263 + if (!(regs->msr & MSR_RI)) { 264 + /* If MSR_RI isn't set, we cannot recover */ 265 + recovered = 0; 266 + } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) { 267 + /* Platform corrected itself */ 268 + recovered = 1; 269 + } else if (ea && !is_kernel_addr(ea)) { 270 + /* 271 + * Faulting address is not in kernel text. We should be fine. 272 + * We need to find which process uses this address. 273 + * For now, kill the task if we have received exception when 274 + * in userspace. 275 + * 276 + * TODO: Queue up this address for hwpoisioning later. 277 + */ 278 + if (user_mode(regs) && !is_global_init(current)) { 279 + _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip); 280 + recovered = 1; 281 + } else 282 + recovered = 0; 283 + } else if (user_mode(regs) && !is_global_init(current) && 284 + evt->severity == MCE_SEV_ERROR_SYNC) { 285 + /* 286 + * If we have received a synchronous error when in userspace 287 + * kill the task. 288 + */ 289 + _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip); 290 + recovered = 1; 291 + } 292 + return recovered; 293 + } 294 + 346 295 int opal_machine_check(struct pt_regs *regs) 347 296 { 348 - struct opal_machine_check_event *opal_evt = get_paca()->opal_mc_evt; 349 - struct opal_machine_check_event evt; 350 - const char *level, *sevstr, *subtype; 351 - static const char *opal_mc_ue_types[] = { 352 - "Indeterminate", 353 - "Instruction fetch", 354 - "Page table walk ifetch", 355 - "Load/Store", 356 - "Page table walk Load/Store", 357 - }; 358 - static const char *opal_mc_slb_types[] = { 359 - "Indeterminate", 360 - "Parity", 361 - "Multihit", 362 - }; 363 - static const char *opal_mc_erat_types[] = { 364 - "Indeterminate", 365 - "Parity", 366 - "Multihit", 367 - }; 368 - static const char *opal_mc_tlb_types[] = { 369 - "Indeterminate", 370 - "Parity", 371 - "Multihit", 372 - }; 297 + struct machine_check_event evt; 373 298 374 - /* Copy the event structure and release the original */ 375 - evt = *opal_evt; 376 - opal_evt->in_use = 0; 299 + if (!get_mce_event(&evt, MCE_EVENT_RELEASE)) 300 + return 0; 377 301 378 302 /* Print things out */ 379 - if (evt.version != OpalMCE_V1) { 303 + if (evt.version != MCE_V1) { 380 304 pr_err("Machine Check Exception, Unknown event version %d !\n", 381 305 evt.version); 382 306 return 0; 383 307 } 384 - switch(evt.severity) { 385 - case OpalMCE_SEV_NO_ERROR: 386 - level = KERN_INFO; 387 - sevstr = "Harmless"; 388 - break; 389 - case OpalMCE_SEV_WARNING: 390 - level = KERN_WARNING; 391 - sevstr = ""; 392 - break; 393 - case OpalMCE_SEV_ERROR_SYNC: 394 - level = KERN_ERR; 395 - sevstr = "Severe"; 396 - break; 397 - case OpalMCE_SEV_FATAL: 398 - default: 399 - level = KERN_ERR; 400 - sevstr = "Fatal"; 401 - break; 402 - } 308 + machine_check_print_event_info(&evt); 403 309 404 - printk("%s%s Machine check interrupt [%s]\n", level, sevstr, 405 - evt.disposition == OpalMCE_DISPOSITION_RECOVERED ? 406 - "Recovered" : "[Not recovered"); 407 - printk("%s Initiator: %s\n", level, 408 - evt.initiator == OpalMCE_INITIATOR_CPU ? "CPU" : "Unknown"); 409 - switch(evt.error_type) { 410 - case OpalMCE_ERROR_TYPE_UE: 411 - subtype = evt.u.ue_error.ue_error_type < 412 - ARRAY_SIZE(opal_mc_ue_types) ? 413 - opal_mc_ue_types[evt.u.ue_error.ue_error_type] 414 - : "Unknown"; 415 - printk("%s Error type: UE [%s]\n", level, subtype); 416 - if (evt.u.ue_error.effective_address_provided) 417 - printk("%s Effective address: %016llx\n", 418 - level, evt.u.ue_error.effective_address); 419 - if (evt.u.ue_error.physical_address_provided) 420 - printk("%s Physial address: %016llx\n", 421 - level, evt.u.ue_error.physical_address); 422 - break; 423 - case OpalMCE_ERROR_TYPE_SLB: 424 - subtype = evt.u.slb_error.slb_error_type < 425 - ARRAY_SIZE(opal_mc_slb_types) ? 426 - opal_mc_slb_types[evt.u.slb_error.slb_error_type] 427 - : "Unknown"; 428 - printk("%s Error type: SLB [%s]\n", level, subtype); 429 - if (evt.u.slb_error.effective_address_provided) 430 - printk("%s Effective address: %016llx\n", 431 - level, evt.u.slb_error.effective_address); 432 - break; 433 - case OpalMCE_ERROR_TYPE_ERAT: 434 - subtype = evt.u.erat_error.erat_error_type < 435 - ARRAY_SIZE(opal_mc_erat_types) ? 436 - opal_mc_erat_types[evt.u.erat_error.erat_error_type] 437 - : "Unknown"; 438 - printk("%s Error type: ERAT [%s]\n", level, subtype); 439 - if (evt.u.erat_error.effective_address_provided) 440 - printk("%s Effective address: %016llx\n", 441 - level, evt.u.erat_error.effective_address); 442 - break; 443 - case OpalMCE_ERROR_TYPE_TLB: 444 - subtype = evt.u.tlb_error.tlb_error_type < 445 - ARRAY_SIZE(opal_mc_tlb_types) ? 446 - opal_mc_tlb_types[evt.u.tlb_error.tlb_error_type] 447 - : "Unknown"; 448 - printk("%s Error type: TLB [%s]\n", level, subtype); 449 - if (evt.u.tlb_error.effective_address_provided) 450 - printk("%s Effective address: %016llx\n", 451 - level, evt.u.tlb_error.effective_address); 452 - break; 453 - default: 454 - case OpalMCE_ERROR_TYPE_UNKNOWN: 455 - printk("%s Error type: Unknown\n", level); 456 - break; 457 - } 458 - return evt.severity == OpalMCE_SEV_FATAL ? 0 : 1; 310 + if (opal_recover_mce(regs, &evt)) 311 + return 1; 312 + return 0; 459 313 } 460 314 461 315 static irqreturn_t opal_interrupt(int irq, void *data) ··· 483 451 void opal_shutdown(void) 484 452 { 485 453 unsigned int i; 454 + long rc = OPAL_BUSY; 486 455 456 + /* First free interrupts, which will also mask them */ 487 457 for (i = 0; i < opal_irq_count; i++) { 488 458 if (opal_irqs[i]) 489 459 free_irq(opal_irqs[i], NULL); 490 460 opal_irqs[i] = 0; 461 + } 462 + 463 + /* 464 + * Then sync with OPAL which ensure anything that can 465 + * potentially write to our memory has completed such 466 + * as an ongoing dump retrieval 467 + */ 468 + while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { 469 + rc = opal_sync_host_reboot(); 470 + if (rc == OPAL_BUSY) 471 + opal_poll_events(NULL); 472 + else 473 + mdelay(10); 491 474 } 492 475 }
+5 -6
arch/powerpc/platforms/powernv/pci-ioda.c
··· 460 460 return; 461 461 462 462 pe = &phb->ioda.pe_array[pdn->pe_number]; 463 - set_iommu_table_base(&pdev->dev, &pe->tce32_table); 463 + set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table); 464 464 } 465 465 466 466 static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus) ··· 468 468 struct pci_dev *dev; 469 469 470 470 list_for_each_entry(dev, &bus->devices, bus_list) { 471 - set_iommu_table_base(&dev->dev, &pe->tce32_table); 471 + set_iommu_table_base_and_group(&dev->dev, &pe->tce32_table); 472 472 if (dev->subordinate) 473 473 pnv_ioda_setup_bus_dma(pe, dev->subordinate); 474 474 } ··· 644 644 iommu_register_group(tbl, pci_domain_nr(pe->pbus), pe->pe_number); 645 645 646 646 if (pe->pdev) 647 - set_iommu_table_base(&pe->pdev->dev, tbl); 647 + set_iommu_table_base_and_group(&pe->pdev->dev, tbl); 648 648 else 649 649 pnv_ioda_setup_bus_dma(pe, pe->pbus); 650 650 ··· 723 723 iommu_register_group(tbl, pci_domain_nr(pe->pbus), pe->pe_number); 724 724 725 725 if (pe->pdev) 726 - set_iommu_table_base(&pe->pdev->dev, tbl); 726 + set_iommu_table_base_and_group(&pe->pdev->dev, tbl); 727 727 else 728 728 pnv_ioda_setup_bus_dma(pe, pe->pbus); 729 729 ··· 1144 1144 { 1145 1145 struct pci_controller *hose; 1146 1146 struct pnv_phb *phb; 1147 - unsigned long size, m32map_off, iomap_off, pemap_off; 1147 + unsigned long size, m32map_off, pemap_off, iomap_off = 0; 1148 1148 const __be64 *prop64; 1149 1149 const __be32 *prop32; 1150 1150 int len; ··· 1231 1231 size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long)); 1232 1232 m32map_off = size; 1233 1233 size += phb->ioda.total_pe * sizeof(phb->ioda.m32_segmap[0]); 1234 - iomap_off = size; 1235 1234 if (phb->type == PNV_PHB_IODA1) { 1236 1235 iomap_off = size; 1237 1236 size += phb->ioda.total_pe * sizeof(phb->ioda.io_segmap[0]);
+1 -1
arch/powerpc/platforms/powernv/pci-p5ioc2.c
··· 92 92 pci_domain_nr(phb->hose->bus), phb->opal_id); 93 93 } 94 94 95 - set_iommu_table_base(&pdev->dev, &phb->p5ioc2.iommu_table); 95 + set_iommu_table_base_and_group(&pdev->dev, &phb->p5ioc2.iommu_table); 96 96 } 97 97 98 98 static void __init pnv_pci_init_p5ioc2_phb(struct device_node *np, u64 hub_id,
+166 -56
arch/powerpc/platforms/powernv/pci.c
··· 124 124 } 125 125 #endif /* CONFIG_PCI_MSI */ 126 126 127 - static void pnv_pci_dump_p7ioc_diag_data(struct pnv_phb *phb) 127 + static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose, 128 + struct OpalIoPhbErrorCommon *common) 128 129 { 129 - struct OpalIoP7IOCPhbErrorData *data = &phb->diag.p7ioc; 130 + struct OpalIoP7IOCPhbErrorData *data; 130 131 int i; 131 132 132 - pr_info("PHB %d diagnostic data:\n", phb->hose->global_number); 133 + data = (struct OpalIoP7IOCPhbErrorData *)common; 134 + pr_info("P7IOC PHB#%d Diag-data (Version: %d)\n\n", 135 + hose->global_number, common->version); 133 136 134 - pr_info(" brdgCtl = 0x%08x\n", data->brdgCtl); 137 + pr_info(" brdgCtl: %08x\n", data->brdgCtl); 135 138 136 - pr_info(" portStatusReg = 0x%08x\n", data->portStatusReg); 137 - pr_info(" rootCmplxStatus = 0x%08x\n", data->rootCmplxStatus); 138 - pr_info(" busAgentStatus = 0x%08x\n", data->busAgentStatus); 139 + pr_info(" portStatusReg: %08x\n", data->portStatusReg); 140 + pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus); 141 + pr_info(" busAgentStatus: %08x\n", data->busAgentStatus); 139 142 140 - pr_info(" deviceStatus = 0x%08x\n", data->deviceStatus); 141 - pr_info(" slotStatus = 0x%08x\n", data->slotStatus); 142 - pr_info(" linkStatus = 0x%08x\n", data->linkStatus); 143 - pr_info(" devCmdStatus = 0x%08x\n", data->devCmdStatus); 144 - pr_info(" devSecStatus = 0x%08x\n", data->devSecStatus); 143 + pr_info(" deviceStatus: %08x\n", data->deviceStatus); 144 + pr_info(" slotStatus: %08x\n", data->slotStatus); 145 + pr_info(" linkStatus: %08x\n", data->linkStatus); 146 + pr_info(" devCmdStatus: %08x\n", data->devCmdStatus); 147 + pr_info(" devSecStatus: %08x\n", data->devSecStatus); 145 148 146 - pr_info(" rootErrorStatus = 0x%08x\n", data->rootErrorStatus); 147 - pr_info(" uncorrErrorStatus = 0x%08x\n", data->uncorrErrorStatus); 148 - pr_info(" corrErrorStatus = 0x%08x\n", data->corrErrorStatus); 149 - pr_info(" tlpHdr1 = 0x%08x\n", data->tlpHdr1); 150 - pr_info(" tlpHdr2 = 0x%08x\n", data->tlpHdr2); 151 - pr_info(" tlpHdr3 = 0x%08x\n", data->tlpHdr3); 152 - pr_info(" tlpHdr4 = 0x%08x\n", data->tlpHdr4); 153 - pr_info(" sourceId = 0x%08x\n", data->sourceId); 154 - 155 - pr_info(" errorClass = 0x%016llx\n", data->errorClass); 156 - pr_info(" correlator = 0x%016llx\n", data->correlator); 157 - 158 - pr_info(" p7iocPlssr = 0x%016llx\n", data->p7iocPlssr); 159 - pr_info(" p7iocCsr = 0x%016llx\n", data->p7iocCsr); 160 - pr_info(" lemFir = 0x%016llx\n", data->lemFir); 161 - pr_info(" lemErrorMask = 0x%016llx\n", data->lemErrorMask); 162 - pr_info(" lemWOF = 0x%016llx\n", data->lemWOF); 163 - pr_info(" phbErrorStatus = 0x%016llx\n", data->phbErrorStatus); 164 - pr_info(" phbFirstErrorStatus = 0x%016llx\n", data->phbFirstErrorStatus); 165 - pr_info(" phbErrorLog0 = 0x%016llx\n", data->phbErrorLog0); 166 - pr_info(" phbErrorLog1 = 0x%016llx\n", data->phbErrorLog1); 167 - pr_info(" mmioErrorStatus = 0x%016llx\n", data->mmioErrorStatus); 168 - pr_info(" mmioFirstErrorStatus = 0x%016llx\n", data->mmioFirstErrorStatus); 169 - pr_info(" mmioErrorLog0 = 0x%016llx\n", data->mmioErrorLog0); 170 - pr_info(" mmioErrorLog1 = 0x%016llx\n", data->mmioErrorLog1); 171 - pr_info(" dma0ErrorStatus = 0x%016llx\n", data->dma0ErrorStatus); 172 - pr_info(" dma0FirstErrorStatus = 0x%016llx\n", data->dma0FirstErrorStatus); 173 - pr_info(" dma0ErrorLog0 = 0x%016llx\n", data->dma0ErrorLog0); 174 - pr_info(" dma0ErrorLog1 = 0x%016llx\n", data->dma0ErrorLog1); 175 - pr_info(" dma1ErrorStatus = 0x%016llx\n", data->dma1ErrorStatus); 176 - pr_info(" dma1FirstErrorStatus = 0x%016llx\n", data->dma1FirstErrorStatus); 177 - pr_info(" dma1ErrorLog0 = 0x%016llx\n", data->dma1ErrorLog0); 178 - pr_info(" dma1ErrorLog1 = 0x%016llx\n", data->dma1ErrorLog1); 149 + pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus); 150 + pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus); 151 + pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus); 152 + pr_info(" tlpHdr1: %08x\n", data->tlpHdr1); 153 + pr_info(" tlpHdr2: %08x\n", data->tlpHdr2); 154 + pr_info(" tlpHdr3: %08x\n", data->tlpHdr3); 155 + pr_info(" tlpHdr4: %08x\n", data->tlpHdr4); 156 + pr_info(" sourceId: %08x\n", data->sourceId); 157 + pr_info(" errorClass: %016llx\n", data->errorClass); 158 + pr_info(" correlator: %016llx\n", data->correlator); 159 + pr_info(" p7iocPlssr: %016llx\n", data->p7iocPlssr); 160 + pr_info(" p7iocCsr: %016llx\n", data->p7iocCsr); 161 + pr_info(" lemFir: %016llx\n", data->lemFir); 162 + pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask); 163 + pr_info(" lemWOF: %016llx\n", data->lemWOF); 164 + pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus); 165 + pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus); 166 + pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0); 167 + pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1); 168 + pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus); 169 + pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus); 170 + pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0); 171 + pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1); 172 + pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus); 173 + pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus); 174 + pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0); 175 + pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1); 176 + pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus); 177 + pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus); 178 + pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0); 179 + pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1); 179 180 180 181 for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) { 181 182 if ((data->pestA[i] >> 63) == 0 && 182 183 (data->pestB[i] >> 63) == 0) 183 184 continue; 184 - pr_info(" PE[%3d] PESTA = 0x%016llx\n", i, data->pestA[i]); 185 - pr_info(" PESTB = 0x%016llx\n", data->pestB[i]); 185 + 186 + pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]); 187 + pr_info(" PESTB: %016llx\n", data->pestB[i]); 186 188 } 187 189 } 188 190 189 - static void pnv_pci_dump_phb_diag_data(struct pnv_phb *phb) 191 + static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose, 192 + struct OpalIoPhbErrorCommon *common) 190 193 { 191 - switch(phb->model) { 192 - case PNV_PHB_MODEL_P7IOC: 193 - pnv_pci_dump_p7ioc_diag_data(phb); 194 + struct OpalIoPhb3ErrorData *data; 195 + int i; 196 + 197 + data = (struct OpalIoPhb3ErrorData*)common; 198 + pr_info("PHB3 PHB#%d Diag-data (Version: %d)\n\n", 199 + hose->global_number, common->version); 200 + 201 + pr_info(" brdgCtl: %08x\n", data->brdgCtl); 202 + 203 + pr_info(" portStatusReg: %08x\n", data->portStatusReg); 204 + pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus); 205 + pr_info(" busAgentStatus: %08x\n", data->busAgentStatus); 206 + 207 + pr_info(" deviceStatus: %08x\n", data->deviceStatus); 208 + pr_info(" slotStatus: %08x\n", data->slotStatus); 209 + pr_info(" linkStatus: %08x\n", data->linkStatus); 210 + pr_info(" devCmdStatus: %08x\n", data->devCmdStatus); 211 + pr_info(" devSecStatus: %08x\n", data->devSecStatus); 212 + 213 + pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus); 214 + pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus); 215 + pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus); 216 + pr_info(" tlpHdr1: %08x\n", data->tlpHdr1); 217 + pr_info(" tlpHdr2: %08x\n", data->tlpHdr2); 218 + pr_info(" tlpHdr3: %08x\n", data->tlpHdr3); 219 + pr_info(" tlpHdr4: %08x\n", data->tlpHdr4); 220 + pr_info(" sourceId: %08x\n", data->sourceId); 221 + pr_info(" errorClass: %016llx\n", data->errorClass); 222 + pr_info(" correlator: %016llx\n", data->correlator); 223 + 224 + pr_info(" nFir: %016llx\n", data->nFir); 225 + pr_info(" nFirMask: %016llx\n", data->nFirMask); 226 + pr_info(" nFirWOF: %016llx\n", data->nFirWOF); 227 + pr_info(" PhbPlssr: %016llx\n", data->phbPlssr); 228 + pr_info(" PhbCsr: %016llx\n", data->phbCsr); 229 + pr_info(" lemFir: %016llx\n", data->lemFir); 230 + pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask); 231 + pr_info(" lemWOF: %016llx\n", data->lemWOF); 232 + pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus); 233 + pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus); 234 + pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0); 235 + pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1); 236 + pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus); 237 + pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus); 238 + pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0); 239 + pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1); 240 + pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus); 241 + pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus); 242 + pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0); 243 + pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1); 244 + pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus); 245 + pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus); 246 + pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0); 247 + pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1); 248 + 249 + for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) { 250 + if ((data->pestA[i] >> 63) == 0 && 251 + (data->pestB[i] >> 63) == 0) 252 + continue; 253 + 254 + pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]); 255 + pr_info(" PESTB: %016llx\n", data->pestB[i]); 256 + } 257 + } 258 + 259 + void pnv_pci_dump_phb_diag_data(struct pci_controller *hose, 260 + unsigned char *log_buff) 261 + { 262 + struct OpalIoPhbErrorCommon *common; 263 + 264 + if (!hose || !log_buff) 265 + return; 266 + 267 + common = (struct OpalIoPhbErrorCommon *)log_buff; 268 + switch (common->ioType) { 269 + case OPAL_PHB_ERROR_DATA_TYPE_P7IOC: 270 + pnv_pci_dump_p7ioc_diag_data(hose, common); 271 + break; 272 + case OPAL_PHB_ERROR_DATA_TYPE_PHB3: 273 + pnv_pci_dump_phb3_diag_data(hose, common); 194 274 break; 195 275 default: 196 - pr_warning("PCI %d: Can't decode this PHB diag data\n", 197 - phb->hose->global_number); 276 + pr_warn("%s: Unrecognized ioType %d\n", 277 + __func__, common->ioType); 198 278 } 199 279 } 200 280 ··· 302 222 * with the normal errors generated when probing empty slots 303 223 */ 304 224 if (has_diag) 305 - pnv_pci_dump_phb_diag_data(phb); 225 + pnv_pci_dump_phb_diag_data(phb->hose, phb->diag.blob); 306 226 else 307 227 pr_warning("PCI %d: No diag data available\n", 308 228 phb->hose->global_number); ··· 564 484 { 565 485 tbl->it_blocksize = 16; 566 486 tbl->it_base = (unsigned long)tce_mem; 567 - tbl->it_offset = dma_offset >> IOMMU_PAGE_SHIFT; 487 + tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K; 488 + tbl->it_offset = dma_offset >> tbl->it_page_shift; 568 489 tbl->it_index = 0; 569 490 tbl->it_size = tce_size >> 3; 570 491 tbl->it_busno = 0; ··· 617 536 pdn->iommu_table = pnv_pci_setup_bml_iommu(hose); 618 537 if (!pdn->iommu_table) 619 538 return; 620 - set_iommu_table_base(&pdev->dev, pdn->iommu_table); 539 + set_iommu_table_base_and_group(&pdev->dev, pdn->iommu_table); 621 540 } 622 541 623 542 static void pnv_pci_dma_dev_setup(struct pci_dev *pdev) ··· 738 657 ppc_md.teardown_msi_irqs = pnv_teardown_msi_irqs; 739 658 #endif 740 659 } 660 + 661 + static int tce_iommu_bus_notifier(struct notifier_block *nb, 662 + unsigned long action, void *data) 663 + { 664 + struct device *dev = data; 665 + 666 + switch (action) { 667 + case BUS_NOTIFY_ADD_DEVICE: 668 + return iommu_add_device(dev); 669 + case BUS_NOTIFY_DEL_DEVICE: 670 + if (dev->iommu_group) 671 + iommu_del_device(dev); 672 + return 0; 673 + default: 674 + return 0; 675 + } 676 + } 677 + 678 + static struct notifier_block tce_iommu_bus_nb = { 679 + .notifier_call = tce_iommu_bus_notifier, 680 + }; 681 + 682 + static int __init tce_iommu_bus_notifier_init(void) 683 + { 684 + bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb); 685 + return 0; 686 + } 687 + 688 + subsys_initcall_sync(tce_iommu_bus_notifier_init);
+3
arch/powerpc/platforms/powernv/pci.h
··· 176 176 union { 177 177 unsigned char blob[PNV_PCI_DIAG_BUF_SIZE]; 178 178 struct OpalIoP7IOCPhbErrorData p7ioc; 179 + struct OpalIoPhb3ErrorData phb3; 179 180 struct OpalIoP7IOCErrorData hub_diag; 180 181 } diag; 181 182 ··· 187 186 extern struct pnv_eeh_ops ioda_eeh_ops; 188 187 #endif 189 188 189 + void pnv_pci_dump_phb_diag_data(struct pci_controller *hose, 190 + unsigned char *log_buff); 190 191 int pnv_pci_cfg_read(struct device_node *dn, 191 192 int where, int size, u32 *val); 192 193 int pnv_pci_cfg_write(struct device_node *dn,
+4 -2
arch/powerpc/platforms/powernv/setup.c
··· 145 145 /* Let the PCI code clear up IODA tables */ 146 146 pnv_pci_shutdown(); 147 147 148 - /* And unregister all OPAL interrupts so they don't fire 149 - * up while we kexec 148 + /* 149 + * Stop OPAL activity: Unregister all OPAL interrupts so they 150 + * don't fire up while we kexec and make sure all potentially 151 + * DMA'ing ops are complete (such as dump retrieval). 150 152 */ 151 153 opal_shutdown(); 152 154 }
+1 -1
arch/powerpc/platforms/ps3/spu.c
··· 143 143 pr_debug("%s:%d: shadow: %lxh\n", func, line, shadow); 144 144 } 145 145 146 - inline u64 ps3_get_spe_id(void *arg) 146 + u64 ps3_get_spe_id(void *arg) 147 147 { 148 148 return spu_pdata(arg)->spe_id; 149 149 }
+1 -1
arch/powerpc/platforms/pseries/Kconfig
··· 34 34 35 35 config PSERIES_MSI 36 36 bool 37 - depends on PCI_MSI && EEH 37 + depends on PCI_MSI && PPC_PSERIES && EEH 38 38 default y 39 39 40 40 config PSERIES_ENERGY
-1
arch/powerpc/platforms/pseries/cmm.c
··· 25 25 #include <linux/errno.h> 26 26 #include <linux/fs.h> 27 27 #include <linux/gfp.h> 28 - #include <linux/init.h> 29 28 #include <linux/kthread.h> 30 29 #include <linux/module.h> 31 30 #include <linux/oom.h>
-1
arch/powerpc/platforms/pseries/dtl.c
··· 20 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 21 */ 22 22 23 - #include <linux/init.h> 24 23 #include <linux/slab.h> 25 24 #include <linux/debugfs.h> 26 25 #include <linux/spinlock.h>
+3 -1
arch/powerpc/platforms/pseries/eeh_pseries.c
··· 689 689 .get_log = pseries_eeh_get_log, 690 690 .configure_bridge = pseries_eeh_configure_bridge, 691 691 .read_config = pseries_eeh_read_config, 692 - .write_config = pseries_eeh_write_config 692 + .write_config = pseries_eeh_write_config, 693 + .next_error = NULL, 694 + .restore_config = NULL 693 695 }; 694 696 695 697 /**
+42 -125
arch/powerpc/platforms/pseries/iommu.c
··· 486 486 memset((void *)tbl->it_base, 0, *sizep); 487 487 488 488 tbl->it_busno = phb->bus->number; 489 + tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K; 489 490 490 491 /* Units of tce entries */ 491 - tbl->it_offset = phb->dma_window_base_cur >> IOMMU_PAGE_SHIFT; 492 + tbl->it_offset = phb->dma_window_base_cur >> tbl->it_page_shift; 492 493 493 494 /* Test if we are going over 2GB of DMA space */ 494 495 if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) { ··· 500 499 phb->dma_window_base_cur += phb->dma_window_size; 501 500 502 501 /* Set the tce table size - measured in entries */ 503 - tbl->it_size = phb->dma_window_size >> IOMMU_PAGE_SHIFT; 502 + tbl->it_size = phb->dma_window_size >> tbl->it_page_shift; 504 503 505 504 tbl->it_index = 0; 506 505 tbl->it_blocksize = 16; ··· 538 537 of_parse_dma_window(dn, dma_window, &tbl->it_index, &offset, &size); 539 538 540 539 tbl->it_busno = phb->bus->number; 540 + tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K; 541 541 tbl->it_base = 0; 542 542 tbl->it_blocksize = 16; 543 543 tbl->it_type = TCE_PCI; 544 - tbl->it_offset = offset >> IOMMU_PAGE_SHIFT; 545 - tbl->it_size = size >> IOMMU_PAGE_SHIFT; 544 + tbl->it_offset = offset >> tbl->it_page_shift; 545 + tbl->it_size = size >> tbl->it_page_shift; 546 546 } 547 547 548 548 static void pci_dma_bus_setup_pSeries(struct pci_bus *bus) ··· 689 687 iommu_table_setparms(phb, dn, tbl); 690 688 PCI_DN(dn)->iommu_table = iommu_init_table(tbl, phb->node); 691 689 iommu_register_group(tbl, pci_domain_nr(phb->bus), 0); 692 - set_iommu_table_base(&dev->dev, PCI_DN(dn)->iommu_table); 690 + set_iommu_table_base_and_group(&dev->dev, 691 + PCI_DN(dn)->iommu_table); 693 692 return; 694 693 } 695 694 ··· 702 699 dn = dn->parent; 703 700 704 701 if (dn && PCI_DN(dn)) 705 - set_iommu_table_base(&dev->dev, PCI_DN(dn)->iommu_table); 702 + set_iommu_table_base_and_group(&dev->dev, 703 + PCI_DN(dn)->iommu_table); 706 704 else 707 705 printk(KERN_WARNING "iommu: Device %s has no iommu table\n", 708 706 pci_name(dev)); ··· 720 716 } 721 717 722 718 early_param("disable_ddw", disable_ddw_setup); 723 - 724 - static inline void __remove_ddw(struct device_node *np, const u32 *ddw_avail, u64 liobn) 725 - { 726 - int ret; 727 - 728 - ret = rtas_call(ddw_avail[2], 1, 1, NULL, liobn); 729 - if (ret) 730 - pr_warning("%s: failed to remove DMA window: rtas returned " 731 - "%d to ibm,remove-pe-dma-window(%x) %llx\n", 732 - np->full_name, ret, ddw_avail[2], liobn); 733 - else 734 - pr_debug("%s: successfully removed DMA window: rtas returned " 735 - "%d to ibm,remove-pe-dma-window(%x) %llx\n", 736 - np->full_name, ret, ddw_avail[2], liobn); 737 - } 738 719 739 720 static void remove_ddw(struct device_node *np) 740 721 { ··· 750 761 pr_debug("%s successfully cleared tces in window.\n", 751 762 np->full_name); 752 763 753 - __remove_ddw(np, ddw_avail, liobn); 764 + ret = rtas_call(ddw_avail[2], 1, 1, NULL, liobn); 765 + if (ret) 766 + pr_warning("%s: failed to remove direct window: rtas returned " 767 + "%d to ibm,remove-pe-dma-window(%x) %llx\n", 768 + np->full_name, ret, ddw_avail[2], liobn); 769 + else 770 + pr_debug("%s: successfully removed direct window: rtas returned " 771 + "%d to ibm,remove-pe-dma-window(%x) %llx\n", 772 + np->full_name, ret, ddw_avail[2], liobn); 754 773 755 774 delprop: 756 775 ret = of_remove_property(np, win64); ··· 787 790 return dma_addr; 788 791 } 789 792 790 - static void __restore_default_window(struct eeh_dev *edev, 791 - u32 ddw_restore_token) 792 - { 793 - u32 cfg_addr; 794 - u64 buid; 795 - int ret; 796 - 797 - /* 798 - * Get the config address and phb buid of the PE window. 799 - * Rely on eeh to retrieve this for us. 800 - * Retrieve them from the pci device, not the node with the 801 - * dma-window property 802 - */ 803 - cfg_addr = edev->config_addr; 804 - if (edev->pe_config_addr) 805 - cfg_addr = edev->pe_config_addr; 806 - buid = edev->phb->buid; 807 - 808 - do { 809 - ret = rtas_call(ddw_restore_token, 3, 1, NULL, cfg_addr, 810 - BUID_HI(buid), BUID_LO(buid)); 811 - } while (rtas_busy_delay(ret)); 812 - pr_info("ibm,reset-pe-dma-windows(%x) %x %x %x returned %d\n", 813 - ddw_restore_token, cfg_addr, BUID_HI(buid), BUID_LO(buid), ret); 814 - } 815 - 816 793 static int find_existing_ddw_windows(void) 817 794 { 795 + int len; 818 796 struct device_node *pdn; 797 + struct direct_window *window; 819 798 const struct dynamic_dma_window_prop *direct64; 820 - const u32 *ddw_extensions; 821 799 822 800 if (!firmware_has_feature(FW_FEATURE_LPAR)) 823 801 return 0; 824 802 825 803 for_each_node_with_property(pdn, DIRECT64_PROPNAME) { 826 - direct64 = of_get_property(pdn, DIRECT64_PROPNAME, NULL); 804 + direct64 = of_get_property(pdn, DIRECT64_PROPNAME, &len); 827 805 if (!direct64) 828 806 continue; 829 807 830 - /* 831 - * We need to ensure the IOMMU table is active when we 832 - * return from the IOMMU setup so that the common code 833 - * can clear the table or find the holes. To that end, 834 - * first, remove any existing DDW configuration. 835 - */ 836 - remove_ddw(pdn); 808 + window = kzalloc(sizeof(*window), GFP_KERNEL); 809 + if (!window || len < sizeof(struct dynamic_dma_window_prop)) { 810 + kfree(window); 811 + remove_ddw(pdn); 812 + continue; 813 + } 837 814 838 - /* 839 - * Second, if we are running on a new enough level of 840 - * firmware where the restore API is present, use it to 841 - * restore the 32-bit window, which was removed in 842 - * create_ddw. 843 - * If the API is not present, then create_ddw couldn't 844 - * have removed the 32-bit window in the first place, so 845 - * removing the DDW configuration should be sufficient. 846 - */ 847 - ddw_extensions = of_get_property(pdn, "ibm,ddw-extensions", 848 - NULL); 849 - if (ddw_extensions && ddw_extensions[0] > 0) 850 - __restore_default_window(of_node_to_eeh_dev(pdn), 851 - ddw_extensions[1]); 815 + window->device = pdn; 816 + window->prop = direct64; 817 + spin_lock(&direct_window_list_lock); 818 + list_add(&window->list, &direct_window_list); 819 + spin_unlock(&direct_window_list_lock); 852 820 } 853 821 854 822 return 0; ··· 883 921 return ret; 884 922 } 885 923 886 - static void restore_default_window(struct pci_dev *dev, 887 - u32 ddw_restore_token) 888 - { 889 - __restore_default_window(pci_dev_to_eeh_dev(dev), ddw_restore_token); 890 - } 891 - 892 924 struct failed_ddw_pdn { 893 925 struct device_node *pdn; 894 926 struct list_head list; ··· 910 954 u64 dma_addr, max_addr; 911 955 struct device_node *dn; 912 956 const u32 *uninitialized_var(ddw_avail); 913 - const u32 *uninitialized_var(ddw_extensions); 914 - u32 ddw_restore_token = 0; 915 957 struct direct_window *window; 916 958 struct property *win64; 917 959 struct dynamic_dma_window_prop *ddwprop; 918 - const void *dma_window = NULL; 919 - unsigned long liobn, offset, size; 920 960 struct failed_ddw_pdn *fpdn; 921 961 922 962 mutex_lock(&direct_window_init_mutex); ··· 943 991 */ 944 992 ddw_avail = of_get_property(pdn, "ibm,ddw-applicable", &len); 945 993 if (!ddw_avail || len < 3 * sizeof(u32)) 946 - goto out_unlock; 994 + goto out_failed; 947 995 948 - /* 949 - * the extensions property is only required to exist in certain 950 - * levels of firmware and later 951 - * the ibm,ddw-extensions property is a list with the first 952 - * element containing the number of extensions and each 953 - * subsequent entry is a value corresponding to that extension 954 - */ 955 - ddw_extensions = of_get_property(pdn, "ibm,ddw-extensions", &len); 956 - if (ddw_extensions) { 957 - /* 958 - * each new defined extension length should be added to 959 - * the top of the switch so the "earlier" entries also 960 - * get picked up 961 - */ 962 - switch (ddw_extensions[0]) { 963 - /* ibm,reset-pe-dma-windows */ 964 - case 1: 965 - ddw_restore_token = ddw_extensions[1]; 966 - break; 967 - } 968 - } 969 - 970 - /* 971 - * Only remove the existing DMA window if we can restore back to 972 - * the default state. Removing the existing window maximizes the 973 - * resources available to firmware for dynamic window creation. 974 - */ 975 - if (ddw_restore_token) { 976 - dma_window = of_get_property(pdn, "ibm,dma-window", NULL); 977 - of_parse_dma_window(pdn, dma_window, &liobn, &offset, &size); 978 - __remove_ddw(pdn, ddw_avail, liobn); 979 - } 980 - 981 - /* 996 + /* 982 997 * Query if there is a second window of size to map the 983 998 * whole partition. Query returns number of windows, largest 984 999 * block assigned to PE (partition endpoint), and two bitmasks ··· 954 1035 dn = pci_device_to_OF_node(dev); 955 1036 ret = query_ddw(dev, ddw_avail, &query); 956 1037 if (ret != 0) 957 - goto out_restore_window; 1038 + goto out_failed; 958 1039 959 1040 if (query.windows_available == 0) { 960 1041 /* ··· 963 1044 * trading in for a larger page size. 964 1045 */ 965 1046 dev_dbg(&dev->dev, "no free dynamic windows"); 966 - goto out_restore_window; 1047 + goto out_failed; 967 1048 } 968 1049 if (be32_to_cpu(query.page_size) & 4) { 969 1050 page_shift = 24; /* 16MB */ ··· 974 1055 } else { 975 1056 dev_dbg(&dev->dev, "no supported direct page size in mask %x", 976 1057 query.page_size); 977 - goto out_restore_window; 1058 + goto out_failed; 978 1059 } 979 1060 /* verify the window * number of ptes will map the partition */ 980 1061 /* check largest block * page size > max memory hotplug addr */ ··· 983 1064 dev_dbg(&dev->dev, "can't map partiton max 0x%llx with %u " 984 1065 "%llu-sized pages\n", max_addr, query.largest_available_block, 985 1066 1ULL << page_shift); 986 - goto out_restore_window; 1067 + goto out_failed; 987 1068 } 988 1069 len = order_base_2(max_addr); 989 1070 win64 = kzalloc(sizeof(struct property), GFP_KERNEL); 990 1071 if (!win64) { 991 1072 dev_info(&dev->dev, 992 1073 "couldn't allocate property for 64bit dma window\n"); 993 - goto out_restore_window; 1074 + goto out_failed; 994 1075 } 995 1076 win64->name = kstrdup(DIRECT64_PROPNAME, GFP_KERNEL); 996 1077 win64->value = ddwprop = kmalloc(sizeof(*ddwprop), GFP_KERNEL); ··· 1052 1133 kfree(win64->value); 1053 1134 kfree(win64); 1054 1135 1055 - out_restore_window: 1056 - if (ddw_restore_token) 1057 - restore_default_window(dev, ddw_restore_token); 1136 + out_failed: 1058 1137 1059 1138 fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL); 1060 1139 if (!fpdn) ··· 1110 1193 pr_debug(" found DMA window, table: %p\n", pci->iommu_table); 1111 1194 } 1112 1195 1113 - set_iommu_table_base(&dev->dev, pci->iommu_table); 1196 + set_iommu_table_base_and_group(&dev->dev, pci->iommu_table); 1114 1197 } 1115 1198 1116 1199 static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
+3 -2
arch/powerpc/platforms/pseries/lpar.c
··· 92 92 * PAPR says this feature is SLB-Buffer but firmware never 93 93 * reports that. All SPLPAR support SLB shadow buffer. 94 94 */ 95 - addr = __pa(&slb_shadow[cpu]); 95 + addr = __pa(paca[cpu].slb_shadow_ptr); 96 96 if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 97 97 ret = register_slb_shadow(hwcpu, addr); 98 98 if (ret) ··· 153 153 154 154 /* Make pHyp happy */ 155 155 if ((rflags & _PAGE_NO_CACHE) && !(rflags & _PAGE_WRITETHRU)) 156 - hpte_r &= ~_PAGE_COHERENT; 156 + hpte_r &= ~HPTE_R_M; 157 + 157 158 if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N)) 158 159 flags |= H_COALESCE_CAND; 159 160
-3
arch/powerpc/platforms/pseries/processor_idle.c
··· 17 17 #include <asm/reg.h> 18 18 #include <asm/machdep.h> 19 19 #include <asm/firmware.h> 20 - #include <asm/runlatch.h> 21 20 #include <asm/plpar_wrappers.h> 22 21 23 22 struct cpuidle_driver pseries_idle_driver = { ··· 61 62 set_thread_flag(TIF_POLLING_NRFLAG); 62 63 63 64 while ((!need_resched()) && cpu_online(cpu)) { 64 - ppc64_runlatch_off(); 65 65 HMT_low(); 66 66 HMT_very_low(); 67 67 } ··· 100 102 idle_loop_prolog(&in_purr); 101 103 get_lppaca()->donate_dedicated_cpu = 1; 102 104 103 - ppc64_runlatch_off(); 104 105 HMT_medium(); 105 106 check_and_cede_processor(); 106 107
+2 -2
arch/powerpc/platforms/pseries/setup.c
··· 72 72 73 73 int CMO_PrPSP = -1; 74 74 int CMO_SecPSP = -1; 75 - unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT); 75 + unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K); 76 76 EXPORT_SYMBOL(CMO_PageSize); 77 77 78 78 int fwnmi_active; /* TRUE if an FWNMI handler is present */ ··· 569 569 { 570 570 char *ptr, *key, *value, *end; 571 571 int call_status; 572 - int page_order = IOMMU_PAGE_SHIFT; 572 + int page_order = IOMMU_PAGE_SHIFT_4K; 573 573 574 574 pr_debug(" -> fw_cmo_feature_init()\n"); 575 575 spin_lock(&rtas_data_buf_lock);
+6 -5
arch/powerpc/platforms/wsp/wsp_pci.c
··· 260 260 *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT; 261 261 262 262 dma_debug("[DMA] TCE %p set to 0x%016llx (dma addr: 0x%lx)\n", 263 - tcep, *tcep, (tbl->it_offset + index) << IOMMU_PAGE_SHIFT); 263 + tcep, *tcep, (tbl->it_offset + index) << IOMMU_PAGE_SHIFT_4K); 264 264 265 265 uaddr += TCE_PAGE_SIZE; 266 266 index++; ··· 381 381 382 382 /* Init bits and pieces */ 383 383 tbl->table.it_blocksize = 16; 384 - tbl->table.it_offset = addr >> IOMMU_PAGE_SHIFT; 385 - tbl->table.it_size = size >> IOMMU_PAGE_SHIFT; 384 + tbl->table.it_page_shift = IOMMU_PAGE_SHIFT_4K; 385 + tbl->table.it_offset = addr >> tbl->table.it_page_shift; 386 + tbl->table.it_size = size >> tbl->table.it_page_shift; 386 387 387 388 /* 388 389 * It's already blank but we clear it anyway. ··· 450 449 if (table) { 451 450 pr_info("%s: Setup iommu: 32-bit DMA region 0x%08lx..0x%08lx\n", 452 451 pci_name(pdev), 453 - table->table.it_offset << IOMMU_PAGE_SHIFT, 454 - (table->table.it_offset << IOMMU_PAGE_SHIFT) 452 + table->table.it_offset << IOMMU_PAGE_SHIFT_4K, 453 + (table->table.it_offset << IOMMU_PAGE_SHIFT_4K) 455 454 + phb->dma32_region_size - 1); 456 455 archdata->dma_data.iommu_table_base = &table->table; 457 456 return;
+1 -1
arch/powerpc/sysdev/Kconfig
··· 19 19 default y if MPIC 20 20 default y if FSL_PCI 21 21 default y if PPC4xx_MSI 22 - default y if POWERNV_MSI 22 + default y if PPC_POWERNV 23 23 24 24 source "arch/powerpc/sysdev/xics/Kconfig" 25 25
-1
arch/powerpc/sysdev/cpm2_pic.c
··· 27 27 */ 28 28 29 29 #include <linux/stddef.h> 30 - #include <linux/init.h> 31 30 #include <linux/sched.h> 32 31 #include <linux/signal.h> 33 32 #include <linux/irq.h>
-1
arch/powerpc/sysdev/fsl_ifc.c
··· 19 19 * along with this program; if not, write to the Free Software 20 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 21 */ 22 - #include <linux/init.h> 23 22 #include <linux/module.h> 24 23 #include <linux/kernel.h> 25 24 #include <linux/compiler.h>
+25 -6
arch/powerpc/sysdev/fsl_lbc.c
··· 214 214 struct fsl_lbc_ctrl *ctrl = data; 215 215 struct fsl_lbc_regs __iomem *lbc = ctrl->regs; 216 216 u32 status; 217 + unsigned long flags; 217 218 219 + spin_lock_irqsave(&fsl_lbc_lock, flags); 218 220 status = in_be32(&lbc->ltesr); 219 - if (!status) 221 + if (!status) { 222 + spin_unlock_irqrestore(&fsl_lbc_lock, flags); 220 223 return IRQ_NONE; 224 + } 221 225 222 226 out_be32(&lbc->ltesr, LTESR_CLEAR); 223 227 out_be32(&lbc->lteatr, 0); ··· 264 260 if (status & ~LTESR_MASK) 265 261 dev_err(ctrl->dev, "Unknown error: " 266 262 "LTESR 0x%08X\n", status); 263 + spin_unlock_irqrestore(&fsl_lbc_lock, flags); 267 264 return IRQ_HANDLED; 268 265 } 269 266 ··· 303 298 goto err; 304 299 } 305 300 306 - fsl_lbc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0); 307 - if (fsl_lbc_ctrl_dev->irq == NO_IRQ) { 301 + fsl_lbc_ctrl_dev->irq[0] = irq_of_parse_and_map(dev->dev.of_node, 0); 302 + if (!fsl_lbc_ctrl_dev->irq[0]) { 308 303 dev_err(&dev->dev, "failed to get irq resource\n"); 309 304 ret = -ENODEV; 310 305 goto err; ··· 316 311 if (ret < 0) 317 312 goto err; 318 313 319 - ret = request_irq(fsl_lbc_ctrl_dev->irq, fsl_lbc_ctrl_irq, 0, 314 + ret = request_irq(fsl_lbc_ctrl_dev->irq[0], fsl_lbc_ctrl_irq, 0, 320 315 "fsl-lbc", fsl_lbc_ctrl_dev); 321 316 if (ret != 0) { 322 317 dev_err(&dev->dev, "failed to install irq (%d)\n", 323 - fsl_lbc_ctrl_dev->irq); 324 - ret = fsl_lbc_ctrl_dev->irq; 318 + fsl_lbc_ctrl_dev->irq[0]); 319 + ret = fsl_lbc_ctrl_dev->irq[0]; 325 320 goto err; 321 + } 322 + 323 + fsl_lbc_ctrl_dev->irq[1] = irq_of_parse_and_map(dev->dev.of_node, 1); 324 + if (fsl_lbc_ctrl_dev->irq[1]) { 325 + ret = request_irq(fsl_lbc_ctrl_dev->irq[1], fsl_lbc_ctrl_irq, 326 + IRQF_SHARED, "fsl-lbc-err", fsl_lbc_ctrl_dev); 327 + if (ret) { 328 + dev_err(&dev->dev, "failed to install irq (%d)\n", 329 + fsl_lbc_ctrl_dev->irq[1]); 330 + ret = fsl_lbc_ctrl_dev->irq[1]; 331 + goto err1; 332 + } 326 333 } 327 334 328 335 /* Enable interrupts for any detected events */ ··· 342 325 343 326 return 0; 344 327 328 + err1: 329 + free_irq(fsl_lbc_ctrl_dev->irq[0], fsl_lbc_ctrl_dev); 345 330 err: 346 331 iounmap(fsl_lbc_ctrl_dev->regs); 347 332 kfree(fsl_lbc_ctrl_dev);
+3 -2
arch/powerpc/sysdev/fsl_pci.c
··· 122 122 * address width of the SoC such that we can address any internal 123 123 * SoC address from across PCI if needed 124 124 */ 125 - if ((dev->bus == &pci_bus_type) && 125 + if ((dev_is_pci(dev)) && 126 126 dma_mask >= DMA_BIT_MASK(MAX_PHYS_ADDR_BITS)) { 127 127 set_dma_ops(dev, &dma_direct_ops); 128 128 set_dma_offset(dev, pci64_dma_offset); ··· 454 454 } 455 455 } 456 456 457 - int __init fsl_add_bridge(struct platform_device *pdev, int is_primary) 457 + int fsl_add_bridge(struct platform_device *pdev, int is_primary) 458 458 { 459 459 int len; 460 460 struct pci_controller *hose; ··· 1035 1035 { .compatible = "fsl,mpc8548-pcie", }, 1036 1036 { .compatible = "fsl,mpc8610-pci", }, 1037 1037 { .compatible = "fsl,mpc8641-pcie", }, 1038 + { .compatible = "fsl,qoriq-pcie", }, 1038 1039 { .compatible = "fsl,qoriq-pcie-v2.1", }, 1039 1040 { .compatible = "fsl,qoriq-pcie-v2.2", }, 1040 1041 { .compatible = "fsl,qoriq-pcie-v2.3", },
-1
arch/powerpc/sysdev/ge/ge_pic.h
··· 1 1 #ifndef __GEF_PIC_H__ 2 2 #define __GEF_PIC_H__ 3 3 4 - #include <linux/init.h> 5 4 6 5 void gef_pic_cascade(unsigned int, struct irq_desc *); 7 6 unsigned int gef_pic_get_irq(void);
-1
arch/powerpc/sysdev/i8259.c
··· 8 8 */ 9 9 #undef DEBUG 10 10 11 - #include <linux/init.h> 12 11 #include <linux/ioport.h> 13 12 #include <linux/interrupt.h> 14 13 #include <linux/kernel.h>
+2 -4
arch/powerpc/sysdev/indirect_pci.c
··· 152 152 .write = indirect_write_config, 153 153 }; 154 154 155 - void __init 156 - setup_indirect_pci(struct pci_controller* hose, 157 - resource_size_t cfg_addr, 158 - resource_size_t cfg_data, u32 flags) 155 + void setup_indirect_pci(struct pci_controller *hose, resource_size_t cfg_addr, 156 + resource_size_t cfg_data, u32 flags) 159 157 { 160 158 resource_size_t base = cfg_addr & PAGE_MASK; 161 159 void __iomem *mbase;
-1
arch/powerpc/sysdev/mpc8xx_pic.c
··· 1 1 #include <linux/kernel.h> 2 2 #include <linux/stddef.h> 3 - #include <linux/init.h> 4 3 #include <linux/sched.h> 5 4 #include <linux/signal.h> 6 5 #include <linux/irq.h>
+8 -2
arch/powerpc/sysdev/mpic_timer.c
··· 41 41 #define MPIC_TIMER_TCR_ROVR_OFFSET 24 42 42 43 43 #define TIMER_STOP 0x80000000 44 + #define GTCCR_TOG 0x80000000 44 45 #define TIMERS_PER_GROUP 4 45 46 #define MAX_TICKS (~0U >> 1) 46 47 #define MAX_TICKS_CASCADE (~0U) ··· 97 96 time->tv_sec = (__kernel_time_t)div_u64(ticks, priv->timerfreq); 98 97 tmp_sec = (u64)time->tv_sec * (u64)priv->timerfreq; 99 98 100 - time->tv_usec = (__kernel_suseconds_t) 101 - div_u64((ticks - tmp_sec) * 1000000, priv->timerfreq); 99 + time->tv_usec = 0; 100 + 101 + if (tmp_sec <= ticks) 102 + time->tv_usec = (__kernel_suseconds_t) 103 + div_u64((ticks - tmp_sec) * 1000000, priv->timerfreq); 102 104 103 105 return; 104 106 } ··· 331 327 casc_priv = priv->timer[handle->num].cascade_handle; 332 328 if (casc_priv) { 333 329 tmp_ticks = in_be32(&priv->regs[handle->num].gtccr); 330 + tmp_ticks &= ~GTCCR_TOG; 334 331 ticks = ((u64)tmp_ticks & UINT_MAX) * (u64)MAX_TICKS_CASCADE; 335 332 tmp_ticks = in_be32(&priv->regs[handle->num - 1].gtccr); 336 333 ticks += tmp_ticks; 337 334 } else { 338 335 ticks = in_be32(&priv->regs[handle->num].gtccr); 336 + ticks &= ~GTCCR_TOG; 339 337 } 340 338 341 339 convert_ticks_to_time(priv, ticks, time);
-1
arch/powerpc/sysdev/qe_lib/qe_io.c
··· 16 16 17 17 #include <linux/stddef.h> 18 18 #include <linux/kernel.h> 19 - #include <linux/init.h> 20 19 #include <linux/errno.h> 21 20 #include <linux/module.h> 22 21 #include <linux/ioport.h>
-1
arch/powerpc/sysdev/qe_lib/ucc.c
··· 14 14 * option) any later version. 15 15 */ 16 16 #include <linux/kernel.h> 17 - #include <linux/init.h> 18 17 #include <linux/errno.h> 19 18 #include <linux/stddef.h> 20 19 #include <linux/spinlock.h>
-1
arch/powerpc/sysdev/qe_lib/ucc_fast.c
··· 13 13 * option) any later version. 14 14 */ 15 15 #include <linux/kernel.h> 16 - #include <linux/init.h> 17 16 #include <linux/errno.h> 18 17 #include <linux/slab.h> 19 18 #include <linux/stddef.h>
-1
arch/powerpc/sysdev/qe_lib/ucc_slow.c
··· 13 13 * option) any later version. 14 14 */ 15 15 #include <linux/kernel.h> 16 - #include <linux/init.h> 17 16 #include <linux/errno.h> 18 17 #include <linux/slab.h> 19 18 #include <linux/stddef.h>
-1
arch/powerpc/sysdev/udbg_memcons.c
··· 18 18 * 2 of the License, or (at your option) any later version. 19 19 */ 20 20 21 - #include <linux/init.h> 22 21 #include <linux/kernel.h> 23 22 #include <asm/barrier.h> 24 23 #include <asm/page.h>
-1
arch/powerpc/sysdev/xics/icp-hv.c
··· 12 12 #include <linux/irq.h> 13 13 #include <linux/smp.h> 14 14 #include <linux/interrupt.h> 15 - #include <linux/init.h> 16 15 #include <linux/cpu.h> 17 16 #include <linux/of.h> 18 17
+4
arch/powerpc/xmon/xmon.c
··· 2051 2051 DUMP(p, stab_addr, "lx"); 2052 2052 #endif 2053 2053 DUMP(p, emergency_sp, "p"); 2054 + #ifdef CONFIG_PPC_BOOK3S_64 2055 + DUMP(p, mc_emergency_sp, "p"); 2056 + DUMP(p, in_mce, "x"); 2057 + #endif 2054 2058 DUMP(p, data_offset, "lx"); 2055 2059 DUMP(p, hw_cpu_id, "x"); 2056 2060 DUMP(p, cpu_start, "x");
+1 -1
drivers/macintosh/windfarm_lm75_sensor.c
··· 133 133 lm->inited = 0; 134 134 lm->ds1775 = ds1775; 135 135 lm->i2c = client; 136 - lm->sens.name = (char *)name; /* XXX fix constness in structure */ 136 + lm->sens.name = name; 137 137 lm->sens.ops = &wf_lm75_ops; 138 138 i2c_set_clientdata(client, lm); 139 139
+1 -1
drivers/macintosh/windfarm_max6690_sensor.c
··· 95 95 } 96 96 97 97 max->i2c = client; 98 - max->sens.name = (char *)name; /* XXX fix constness in structure */ 98 + max->sens.name = name; 99 99 max->sens.ops = &wf_max6690_ops; 100 100 i2c_set_clientdata(client, max); 101 101
+8 -4
drivers/net/ethernet/ibm/ibmveth.c
··· 1275 1275 { 1276 1276 struct net_device *netdev = dev_get_drvdata(&vdev->dev); 1277 1277 struct ibmveth_adapter *adapter; 1278 + struct iommu_table *tbl; 1278 1279 unsigned long ret; 1279 1280 int i; 1280 1281 int rxqentries = 1; 1281 1282 1283 + tbl = get_iommu_table_base(&vdev->dev); 1284 + 1282 1285 /* netdev inits at probe time along with the structures we need below*/ 1283 1286 if (netdev == NULL) 1284 - return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT); 1287 + return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT, tbl); 1285 1288 1286 1289 adapter = netdev_priv(netdev); 1287 1290 1288 1291 ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE; 1289 - ret += IOMMU_PAGE_ALIGN(netdev->mtu); 1292 + ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl); 1290 1293 1291 1294 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { 1292 1295 /* add the size of the active receive buffers */ ··· 1297 1294 ret += 1298 1295 adapter->rx_buff_pool[i].size * 1299 1296 IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i]. 1300 - buff_size); 1297 + buff_size, tbl); 1301 1298 rxqentries += adapter->rx_buff_pool[i].size; 1302 1299 } 1303 1300 /* add the size of the receive queue entries */ 1304 - ret += IOMMU_PAGE_ALIGN(rxqentries * sizeof(struct ibmveth_rx_q_entry)); 1301 + ret += IOMMU_PAGE_ALIGN( 1302 + rxqentries * sizeof(struct ibmveth_rx_q_entry), tbl); 1305 1303 1306 1304 return ret; 1307 1305 }
+1 -1
drivers/tty/Kconfig
··· 366 366 "Trace data router for MIPI P1149.7 cJTAG standard". 367 367 368 368 config PPC_EPAPR_HV_BYTECHAN 369 - tristate "ePAPR hypervisor byte channel driver" 369 + bool "ePAPR hypervisor byte channel driver" 370 370 depends on PPC 371 371 select EPAPR_PARAVIRT 372 372 help
+14 -14
drivers/vfio/vfio_iommu_spapr_tce.c
··· 81 81 * enforcing the limit based on the max that the guest can map. 82 82 */ 83 83 down_write(&current->mm->mmap_sem); 84 - npages = (tbl->it_size << IOMMU_PAGE_SHIFT) >> PAGE_SHIFT; 84 + npages = (tbl->it_size << IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT; 85 85 locked = current->mm->locked_vm + npages; 86 86 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 87 87 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) { ··· 110 110 111 111 down_write(&current->mm->mmap_sem); 112 112 current->mm->locked_vm -= (container->tbl->it_size << 113 - IOMMU_PAGE_SHIFT) >> PAGE_SHIFT; 113 + IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT; 114 114 up_write(&current->mm->mmap_sem); 115 115 } 116 116 ··· 174 174 if (info.argsz < minsz) 175 175 return -EINVAL; 176 176 177 - info.dma32_window_start = tbl->it_offset << IOMMU_PAGE_SHIFT; 178 - info.dma32_window_size = tbl->it_size << IOMMU_PAGE_SHIFT; 177 + info.dma32_window_start = tbl->it_offset << IOMMU_PAGE_SHIFT_4K; 178 + info.dma32_window_size = tbl->it_size << IOMMU_PAGE_SHIFT_4K; 179 179 info.flags = 0; 180 180 181 181 if (copy_to_user((void __user *)arg, &info, minsz)) ··· 205 205 VFIO_DMA_MAP_FLAG_WRITE)) 206 206 return -EINVAL; 207 207 208 - if ((param.size & ~IOMMU_PAGE_MASK) || 209 - (param.vaddr & ~IOMMU_PAGE_MASK)) 208 + if ((param.size & ~IOMMU_PAGE_MASK_4K) || 209 + (param.vaddr & ~IOMMU_PAGE_MASK_4K)) 210 210 return -EINVAL; 211 211 212 212 /* iova is checked by the IOMMU API */ ··· 220 220 if (ret) 221 221 return ret; 222 222 223 - for (i = 0; i < (param.size >> IOMMU_PAGE_SHIFT); ++i) { 223 + for (i = 0; i < (param.size >> IOMMU_PAGE_SHIFT_4K); ++i) { 224 224 ret = iommu_put_tce_user_mode(tbl, 225 - (param.iova >> IOMMU_PAGE_SHIFT) + i, 225 + (param.iova >> IOMMU_PAGE_SHIFT_4K) + i, 226 226 tce); 227 227 if (ret) 228 228 break; 229 - tce += IOMMU_PAGE_SIZE; 229 + tce += IOMMU_PAGE_SIZE_4K; 230 230 } 231 231 if (ret) 232 232 iommu_clear_tces_and_put_pages(tbl, 233 - param.iova >> IOMMU_PAGE_SHIFT, i); 233 + param.iova >> IOMMU_PAGE_SHIFT_4K, i); 234 234 235 235 iommu_flush_tce(tbl); 236 236 ··· 256 256 if (param.flags) 257 257 return -EINVAL; 258 258 259 - if (param.size & ~IOMMU_PAGE_MASK) 259 + if (param.size & ~IOMMU_PAGE_MASK_4K) 260 260 return -EINVAL; 261 261 262 262 ret = iommu_tce_clear_param_check(tbl, param.iova, 0, 263 - param.size >> IOMMU_PAGE_SHIFT); 263 + param.size >> IOMMU_PAGE_SHIFT_4K); 264 264 if (ret) 265 265 return ret; 266 266 267 267 ret = iommu_clear_tces_and_put_pages(tbl, 268 - param.iova >> IOMMU_PAGE_SHIFT, 269 - param.size >> IOMMU_PAGE_SHIFT); 268 + param.iova >> IOMMU_PAGE_SHIFT_4K, 269 + param.size >> IOMMU_PAGE_SHIFT_4K); 270 270 iommu_flush_tce(tbl); 271 271 272 272 return ret;
+1 -1
include/linux/mm.h
··· 1895 1895 } 1896 1896 #endif 1897 1897 1898 - #ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE 1898 + #ifdef CONFIG_NUMA_BALANCING 1899 1899 unsigned long change_prot_numa(struct vm_area_struct *vma, 1900 1900 unsigned long start, unsigned long end); 1901 1901 #endif
+1
include/linux/of_fdt.h
··· 116 116 extern void unflatten_device_tree(void); 117 117 extern void unflatten_and_copy_device_tree(void); 118 118 extern void early_init_devtree(void *); 119 + extern void early_get_first_memblock_info(void *, phys_addr_t *); 119 120 #else /* CONFIG_OF_FLATTREE */ 120 121 static inline const char *of_flat_dt_get_machine_name(void) { return NULL; } 121 122 static inline void unflatten_device_tree(void) {}
+7 -2
include/math-emu/op-common.h
··· 685 685 else \ 686 686 { \ 687 687 r = 0; \ 688 - if (X##_s) \ 688 + if (!X##_s) \ 689 689 r = ~r; \ 690 690 } \ 691 691 FP_SET_EXCEPTION(FP_EX_INVALID); \ ··· 743 743 } \ 744 744 else \ 745 745 { \ 746 + int _lz0, _lz1; \ 746 747 if (X##_e <= -_FP_WORKBITS - 1) \ 747 748 _FP_FRAC_SET_##wc(X, _FP_MINFRAC_##wc); \ 748 749 else \ 749 750 _FP_FRAC_SRS_##wc(X, _FP_FRACBITS_##fs - 1 - X##_e, \ 750 751 _FP_WFRACBITS_##fs); \ 752 + _FP_FRAC_CLZ_##wc(_lz0, X); \ 751 753 _FP_ROUND(wc, X); \ 754 + _FP_FRAC_CLZ_##wc(_lz1, X); \ 755 + if (_lz1 < _lz0) \ 756 + X##_e++; /* For overflow detection. */ \ 752 757 _FP_FRAC_SRL_##wc(X, _FP_WORKBITS); \ 753 758 _FP_FRAC_ASSEMBLE_##wc(r, X, rsize); \ 754 759 } \ ··· 767 762 if (!rsigned) \ 768 763 { \ 769 764 r = 0; \ 770 - if (X##_s) \ 765 + if (!X##_s) \ 771 766 r = ~r; \ 772 767 } \ 773 768 else if (rsigned != 2) \
+2 -3
mm/mempolicy.c
··· 613 613 return 0; 614 614 } 615 615 616 - #ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE 616 + #ifdef CONFIG_NUMA_BALANCING 617 617 /* 618 618 * This is used to mark a range of virtual addresses to be inaccessible. 619 619 * These are later cleared by a NUMA hinting fault. Depending on these ··· 627 627 unsigned long addr, unsigned long end) 628 628 { 629 629 int nr_updated; 630 - BUILD_BUG_ON(_PAGE_NUMA != _PAGE_PROTNONE); 631 630 632 631 nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1); 633 632 if (nr_updated) ··· 640 641 { 641 642 return 0; 642 643 } 643 - #endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */ 644 + #endif /* CONFIG_NUMA_BALANCING */ 644 645 645 646 /* 646 647 * Walk through page tables and collect pages to be migrated.
+6 -2
scripts/mod/modpost.c
··· 584 584 if (strncmp(symname, "_restgpr_", sizeof("_restgpr_") - 1) == 0 || 585 585 strncmp(symname, "_savegpr_", sizeof("_savegpr_") - 1) == 0 || 586 586 strncmp(symname, "_rest32gpr_", sizeof("_rest32gpr_") - 1) == 0 || 587 - strncmp(symname, "_save32gpr_", sizeof("_save32gpr_") - 1) == 0) 587 + strncmp(symname, "_save32gpr_", sizeof("_save32gpr_") - 1) == 0 || 588 + strncmp(symname, "_restvr_", sizeof("_restvr_") - 1) == 0 || 589 + strncmp(symname, "_savevr_", sizeof("_savevr_") - 1) == 0) 588 590 return 1; 589 591 if (info->hdr->e_machine == EM_PPC64) 590 592 /* Special register function linked on all modules during final link of .ko */ 591 593 if (strncmp(symname, "_restgpr0_", sizeof("_restgpr0_") - 1) == 0 || 592 - strncmp(symname, "_savegpr0_", sizeof("_savegpr0_") - 1) == 0) 594 + strncmp(symname, "_savegpr0_", sizeof("_savegpr0_") - 1) == 0 || 595 + strncmp(symname, "_restvr_", sizeof("_restvr_") - 1) == 0 || 596 + strncmp(symname, "_savevr_", sizeof("_savevr_") - 1) == 0) 593 597 return 1; 594 598 /* Do not ignore this symbol */ 595 599 return 0;