Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'arm-soc/for-4.2/soc-take2' of http://github.com/broadcom/stblinux into next/soc

Merge mach-bcm changes from Florian Fainelli:

This pull request contains the following changes:

- Rafal adds an additional fault code to be ignored by the kernel on BCM5301X SoC

- BCM63138 SMP support which:
* common code to control the PMB bus, to be shared with a reset
controller driver in drivers/reset
* secondary CPU initialization sequence using PMB helpers
* small changes suggested by Russell King to allow platforms to disable VFP

* tag 'arm-soc/for-4.2/soc-take2' of http://github.com/broadcom/stblinux:
ARM: BCM63xx: Add SMP support for BCM63138
ARM: vfp: Add vfp_disable for problematic platforms
ARM: vfp: Add include guards
ARM: BCM63xx: Add secondary CPU PMB initialization sequence
ARM: BCM63xx: Add Broadcom BCM63xx PMB controller helpers
ARM: BCM5301X: Ignore another (BCM4709 specific) fault code

+543 -5
+9
arch/arm/include/asm/vfp.h
··· 5 5 * First, the standard VFP set. 6 6 */ 7 7 8 + #ifndef __ASM_VFP_H 9 + #define __ASM_VFP_H 10 + 8 11 #define FPSID cr0 9 12 #define FPSCR cr1 10 13 #define MVFR1 cr6 ··· 90 87 #define VFPOPDESC_UNUSED_BIT (24) 91 88 #define VFPOPDESC_UNUSED_MASK (0xFF << VFPOPDESC_UNUSED_BIT) 92 89 #define VFPOPDESC_OPDESC_MASK (~(VFPOPDESC_LENGTH_MASK | VFPOPDESC_UNUSED_MASK)) 90 + 91 + #ifndef __ASSEMBLY__ 92 + void vfp_disable(void); 93 + #endif 94 + 95 + #endif /* __ASM_VFP_H */
+6 -1
arch/arm/mach-bcm/Makefile
··· 38 38 obj-$(CONFIG_ARCH_BCM_5301X) += bcm_5301x.o 39 39 40 40 # BCM63XXx 41 - obj-$(CONFIG_ARCH_BCM_63XX) := bcm63xx.o 41 + ifeq ($(CONFIG_ARCH_BCM_63XX),y) 42 + CFLAGS_bcm63xx_headsmp.o += -march=armv7-a 43 + obj-y += bcm63xx.o 44 + obj-$(CONFIG_SMP) += bcm63xx_smp.o bcm63xx_headsmp.o \ 45 + bcm63xx_pmb.o 46 + endif 42 47 43 48 ifeq ($(CONFIG_ARCH_BRCMSTB),y) 44 49 CFLAGS_platsmp-brcmstb.o += -march=armv7-a
+23
arch/arm/mach-bcm/bcm63xx_headsmp.S
··· 1 + /* 2 + * Copyright (C) 2015, Broadcom Corporation 3 + * All Rights Reserved 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License version 2 as 7 + * published by the Free Software Foundation. 8 + */ 9 + #include <linux/linkage.h> 10 + #include <linux/init.h> 11 + #include <asm/assembler.h> 12 + 13 + ENTRY(bcm63138_secondary_startup) 14 + ARM_BE8(setend be) 15 + /* 16 + * L1 cache does have unpredictable contents at power-up clean its 17 + * contents without flushing 18 + */ 19 + bl v7_invalidate_l1 20 + nop 21 + 22 + b secondary_startup 23 + ENDPROC(bcm63138_secondary_startup)
+221
arch/arm/mach-bcm/bcm63xx_pmb.c
··· 1 + /* 2 + * Broadcom BCM63138 PMB initialization for secondary CPU(s) 3 + * 4 + * Copyright (C) 2015 Broadcom Corporation 5 + * Author: Florian Fainelli <f.fainelli@gmail.com> 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License as published by 9 + * the Free Software Foundation; either version 2 of the License, or 10 + * (at your option) any later version. 11 + */ 12 + #include <linux/kernel.h> 13 + #include <linux/io.h> 14 + #include <linux/spinlock.h> 15 + #include <linux/reset/bcm63xx_pmb.h> 16 + #include <linux/of.h> 17 + #include <linux/of_address.h> 18 + 19 + #include "bcm63xx_smp.h" 20 + 21 + /* ARM Control register definitions */ 22 + #define CORE_PWR_CTRL_SHIFT 0 23 + #define CORE_PWR_CTRL_MASK 0x3 24 + #define PLL_PWR_ON BIT(8) 25 + #define PLL_LDO_PWR_ON BIT(9) 26 + #define PLL_CLAMP_ON BIT(10) 27 + #define CPU_RESET_N(x) BIT(13 + (x)) 28 + #define NEON_RESET_N BIT(15) 29 + #define PWR_CTRL_STATUS_SHIFT 28 30 + #define PWR_CTRL_STATUS_MASK 0x3 31 + #define PWR_DOWN_SHIFT 30 32 + #define PWR_DOWN_MASK 0x3 33 + 34 + /* CPU Power control register definitions */ 35 + #define MEM_PWR_OK BIT(0) 36 + #define MEM_PWR_ON BIT(1) 37 + #define MEM_CLAMP_ON BIT(2) 38 + #define MEM_PWR_OK_STATUS BIT(4) 39 + #define MEM_PWR_ON_STATUS BIT(5) 40 + #define MEM_PDA_SHIFT 8 41 + #define MEM_PDA_MASK 0xf 42 + #define MEM_PDA_CPU_MASK 0x1 43 + #define MEM_PDA_NEON_MASK 0xf 44 + #define CLAMP_ON BIT(15) 45 + #define PWR_OK_SHIFT 16 46 + #define PWR_OK_MASK 0xf 47 + #define PWR_ON_SHIFT 20 48 + #define PWR_CPU_MASK 0x03 49 + #define PWR_NEON_MASK 0x01 50 + #define PWR_ON_MASK 0xf 51 + #define PWR_OK_STATUS_SHIFT 24 52 + #define PWR_OK_STATUS_MASK 0xf 53 + #define PWR_ON_STATUS_SHIFT 28 54 + #define PWR_ON_STATUS_MASK 0xf 55 + 56 + #define ARM_CONTROL 0x30 57 + #define ARM_PWR_CONTROL_BASE 0x34 58 + #define ARM_PWR_CONTROL(x) (ARM_PWR_CONTROL_BASE + (x) * 0x4) 59 + #define ARM_NEON_L2 0x3c 60 + 61 + /* Perform a value write, then spin until the value shifted by 62 + * shift is seen, masked with mask and is different from cond. 63 + */ 64 + static int bpcm_wr_rd_mask(void __iomem *master, 65 + unsigned int addr, u32 off, u32 *val, 66 + u32 shift, u32 mask, u32 cond) 67 + { 68 + int ret; 69 + 70 + ret = bpcm_wr(master, addr, off, *val); 71 + if (ret) 72 + return ret; 73 + 74 + do { 75 + ret = bpcm_rd(master, addr, off, val); 76 + if (ret) 77 + return ret; 78 + 79 + cpu_relax(); 80 + } while (((*val >> shift) & mask) != cond); 81 + 82 + return ret; 83 + } 84 + 85 + /* Global lock to serialize accesses to the PMB registers while we 86 + * are bringing up the secondary CPU 87 + */ 88 + static DEFINE_SPINLOCK(pmb_lock); 89 + 90 + static int bcm63xx_pmb_get_resources(struct device_node *dn, 91 + void __iomem **base, 92 + unsigned int *cpu, 93 + unsigned int *addr) 94 + { 95 + struct device_node *pmb_dn; 96 + struct of_phandle_args args; 97 + int ret; 98 + 99 + ret = of_property_read_u32(dn, "reg", cpu); 100 + if (ret) { 101 + pr_err("CPU is missing a reg node\n"); 102 + return ret; 103 + } 104 + 105 + ret = of_parse_phandle_with_args(dn, "resets", "#reset-cells", 106 + 0, &args); 107 + if (ret) { 108 + pr_err("CPU is missing a resets phandle\n"); 109 + return ret; 110 + } 111 + 112 + pmb_dn = args.np; 113 + if (args.args_count != 2) { 114 + pr_err("reset-controller does not conform to reset-cells\n"); 115 + return -EINVAL; 116 + } 117 + 118 + *base = of_iomap(args.np, 0); 119 + if (!*base) { 120 + pr_err("failed remapping PMB register\n"); 121 + return -ENOMEM; 122 + } 123 + 124 + /* We do not need the number of zones */ 125 + *addr = args.args[0]; 126 + 127 + return 0; 128 + } 129 + 130 + int bcm63xx_pmb_power_on_cpu(struct device_node *dn) 131 + { 132 + void __iomem *base; 133 + unsigned int cpu, addr; 134 + unsigned long flags; 135 + u32 val, ctrl; 136 + int ret; 137 + 138 + ret = bcm63xx_pmb_get_resources(dn, &base, &cpu, &addr); 139 + if (ret) 140 + return ret; 141 + 142 + /* We would not know how to enable a third and greater CPU */ 143 + WARN_ON(cpu > 1); 144 + 145 + spin_lock_irqsave(&pmb_lock, flags); 146 + 147 + /* Check if the CPU is already on and save the ARM_CONTROL register 148 + * value since we will use it later for CPU de-assert once done with 149 + * the CPU-specific power sequence 150 + */ 151 + ret = bpcm_rd(base, addr, ARM_CONTROL, &ctrl); 152 + if (ret) 153 + return ret; 154 + 155 + if (ctrl & CPU_RESET_N(cpu)) { 156 + pr_info("PMB: CPU%d is already powered on\n", cpu); 157 + ret = 0; 158 + goto out; 159 + } 160 + 161 + /* Power on PLL */ 162 + ret = bpcm_rd(base, addr, ARM_PWR_CONTROL(cpu), &val); 163 + if (ret) 164 + goto out; 165 + 166 + val |= (PWR_CPU_MASK << PWR_ON_SHIFT); 167 + 168 + ret = bpcm_wr_rd_mask(base, addr, ARM_PWR_CONTROL(cpu), &val, 169 + PWR_ON_STATUS_SHIFT, PWR_CPU_MASK, PWR_CPU_MASK); 170 + if (ret) 171 + goto out; 172 + 173 + val |= (PWR_CPU_MASK << PWR_OK_SHIFT); 174 + 175 + ret = bpcm_wr_rd_mask(base, addr, ARM_PWR_CONTROL(cpu), &val, 176 + PWR_OK_STATUS_SHIFT, PWR_CPU_MASK, PWR_CPU_MASK); 177 + if (ret) 178 + goto out; 179 + 180 + val &= ~CLAMP_ON; 181 + 182 + ret = bpcm_wr(base, addr, ARM_PWR_CONTROL(cpu), val); 183 + if (ret) 184 + goto out; 185 + 186 + /* Power on CPU<N> RAM */ 187 + val &= ~(MEM_PDA_MASK << MEM_PDA_SHIFT); 188 + 189 + ret = bpcm_wr(base, addr, ARM_PWR_CONTROL(cpu), val); 190 + if (ret) 191 + goto out; 192 + 193 + val |= MEM_PWR_ON; 194 + 195 + ret = bpcm_wr_rd_mask(base, addr, ARM_PWR_CONTROL(cpu), &val, 196 + 0, MEM_PWR_ON_STATUS, MEM_PWR_ON_STATUS); 197 + if (ret) 198 + goto out; 199 + 200 + val |= MEM_PWR_OK; 201 + 202 + ret = bpcm_wr_rd_mask(base, addr, ARM_PWR_CONTROL(cpu), &val, 203 + 0, MEM_PWR_OK_STATUS, MEM_PWR_OK_STATUS); 204 + if (ret) 205 + goto out; 206 + 207 + val &= ~MEM_CLAMP_ON; 208 + 209 + ret = bpcm_wr(base, addr, ARM_PWR_CONTROL(cpu), val); 210 + if (ret) 211 + goto out; 212 + 213 + /* De-assert CPU reset */ 214 + ctrl |= CPU_RESET_N(cpu); 215 + 216 + ret = bpcm_wr(base, addr, ARM_CONTROL, ctrl); 217 + out: 218 + spin_unlock_irqrestore(&pmb_lock, flags); 219 + iounmap(base); 220 + return ret; 221 + }
+169
arch/arm/mach-bcm/bcm63xx_smp.c
··· 1 + /* 2 + * Broadcom BCM63138 DSL SoCs SMP support code 3 + * 4 + * Copyright (C) 2015, Broadcom Corporation 5 + * 6 + * Licensed under the terms of the GPLv2 7 + */ 8 + 9 + #include <linux/delay.h> 10 + #include <linux/init.h> 11 + #include <linux/smp.h> 12 + #include <linux/io.h> 13 + #include <linux/of.h> 14 + #include <linux/of_address.h> 15 + 16 + #include <asm/cacheflush.h> 17 + #include <asm/smp_scu.h> 18 + #include <asm/smp_plat.h> 19 + #include <asm/vfp.h> 20 + 21 + #include "bcm63xx_smp.h" 22 + 23 + /* Size of mapped Cortex A9 SCU address space */ 24 + #define CORTEX_A9_SCU_SIZE 0x58 25 + 26 + /* 27 + * Enable the Cortex A9 Snoop Control Unit 28 + * 29 + * By the time this is called we already know there are multiple 30 + * cores present. We assume we're running on a Cortex A9 processor, 31 + * so any trouble getting the base address register or getting the 32 + * SCU base is a problem. 33 + * 34 + * Return 0 if successful or an error code otherwise. 35 + */ 36 + static int __init scu_a9_enable(void) 37 + { 38 + unsigned long config_base; 39 + void __iomem *scu_base; 40 + unsigned int i, ncores; 41 + 42 + if (!scu_a9_has_base()) { 43 + pr_err("no configuration base address register!\n"); 44 + return -ENXIO; 45 + } 46 + 47 + /* Config base address register value is zero for uniprocessor */ 48 + config_base = scu_a9_get_base(); 49 + if (!config_base) { 50 + pr_err("hardware reports only one core\n"); 51 + return -ENOENT; 52 + } 53 + 54 + scu_base = ioremap((phys_addr_t)config_base, CORTEX_A9_SCU_SIZE); 55 + if (!scu_base) { 56 + pr_err("failed to remap config base (%lu/%u) for SCU\n", 57 + config_base, CORTEX_A9_SCU_SIZE); 58 + return -ENOMEM; 59 + } 60 + 61 + scu_enable(scu_base); 62 + 63 + ncores = scu_base ? scu_get_core_count(scu_base) : 1; 64 + 65 + if (ncores > nr_cpu_ids) { 66 + pr_warn("SMP: %u cores greater than maximum (%u), clipping\n", 67 + ncores, nr_cpu_ids); 68 + ncores = nr_cpu_ids; 69 + } 70 + 71 + /* The BCM63138 SoC has two Cortex-A9 CPUs, CPU0 features a complete 72 + * and fully functional VFP unit that can be used, but CPU1 does not. 73 + * Since we will not be able to trap kernel-mode NEON to force 74 + * migration to CPU0, just do not advertise VFP support at all. 75 + * 76 + * This will make vfp_init bail out and do not attempt to use VFP at 77 + * all, for kernel-mode NEON, we do not want to introduce any 78 + * conditionals in hot-paths, so we just restrict the system to UP. 79 + */ 80 + #ifdef CONFIG_VFP 81 + if (ncores > 1) { 82 + pr_warn("SMP: secondary CPUs lack VFP unit, disabling VFP\n"); 83 + vfp_disable(); 84 + 85 + #ifdef CONFIG_KERNEL_MODE_NEON 86 + WARN(1, "SMP: kernel-mode NEON enabled, restricting to UP\n"); 87 + ncores = 1; 88 + #endif 89 + } 90 + #endif 91 + 92 + for (i = 0; i < ncores; i++) 93 + set_cpu_possible(i, true); 94 + 95 + iounmap(scu_base); /* That's the last we'll need of this */ 96 + 97 + return 0; 98 + } 99 + 100 + static const struct of_device_id bcm63138_bootlut_ids[] = { 101 + { .compatible = "brcm,bcm63138-bootlut", }, 102 + { /* sentinel */ }, 103 + }; 104 + 105 + #define BOOTLUT_RESET_VECT 0x20 106 + 107 + static int bcm63138_smp_boot_secondary(unsigned int cpu, 108 + struct task_struct *idle) 109 + { 110 + void __iomem *bootlut_base; 111 + struct device_node *dn; 112 + int ret = 0; 113 + u32 val; 114 + 115 + dn = of_find_matching_node(NULL, bcm63138_bootlut_ids); 116 + if (!dn) { 117 + pr_err("SMP: unable to find bcm63138 boot LUT node\n"); 118 + return -ENODEV; 119 + } 120 + 121 + bootlut_base = of_iomap(dn, 0); 122 + of_node_put(dn); 123 + 124 + if (!bootlut_base) { 125 + pr_err("SMP: unable to remap boot LUT base register\n"); 126 + return -ENOMEM; 127 + } 128 + 129 + /* Locate the secondary CPU node */ 130 + dn = of_get_cpu_node(cpu_logical_map(cpu), NULL); 131 + if (!dn) { 132 + pr_err("SMP: failed to locate secondary CPU%d node\n", cpu); 133 + ret = -ENODEV; 134 + goto out; 135 + } 136 + 137 + /* Write the secondary init routine to the BootLUT reset vector */ 138 + val = virt_to_phys(bcm63138_secondary_startup); 139 + writel_relaxed(val, bootlut_base + BOOTLUT_RESET_VECT); 140 + 141 + /* Power up the core, will jump straight to its reset vector when we 142 + * return 143 + */ 144 + ret = bcm63xx_pmb_power_on_cpu(dn); 145 + if (ret) 146 + goto out; 147 + out: 148 + iounmap(bootlut_base); 149 + 150 + return ret; 151 + } 152 + 153 + static void __init bcm63138_smp_prepare_cpus(unsigned int max_cpus) 154 + { 155 + int ret; 156 + 157 + ret = scu_a9_enable(); 158 + if (ret) { 159 + pr_warn("SMP: Cortex-A9 SCU setup failed\n"); 160 + return; 161 + } 162 + } 163 + 164 + struct smp_operations bcm63138_smp_ops __initdata = { 165 + .smp_prepare_cpus = bcm63138_smp_prepare_cpus, 166 + .smp_boot_secondary = bcm63138_smp_boot_secondary, 167 + }; 168 + 169 + CPU_METHOD_OF_DECLARE(bcm63138_smp, "brcm,bcm63138", &bcm63138_smp_ops);
+9
arch/arm/mach-bcm/bcm63xx_smp.h
··· 1 + #ifndef __BCM63XX_SMP_H 2 + #define __BCM63XX_SMP_H 3 + 4 + struct device_node; 5 + 6 + extern void bcm63138_secondary_startup(void); 7 + extern int bcm63xx_pmb_power_on_cpu(struct device_node *dn); 8 + 9 + #endif /* __BCM63XX_SMP_H */
+5 -4
arch/arm/mach-bcm/bcm_5301x.c
··· 18 18 static int bcm5301x_abort_handler(unsigned long addr, unsigned int fsr, 19 19 struct pt_regs *regs) 20 20 { 21 - if (fsr == 0x1c06 && first_fault) { 21 + if ((fsr == 0x1406 || fsr == 0x1c06) && first_fault) { 22 22 first_fault = false; 23 23 24 24 /* 25 - * These faults with code 0x1c06 happens for no good reason, 26 - * possibly left over from the CFE boot loader. 25 + * These faults with codes 0x1406 (BCM4709) or 0x1c06 happens 26 + * for no good reason, possibly left over from the CFE boot 27 + * loader. 27 28 */ 28 29 pr_warn("External imprecise Data abort at addr=%#lx, fsr=%#x ignored.\n", 29 - addr, fsr); 30 + addr, fsr); 30 31 31 32 /* Returning non-zero causes fault display and panic */ 32 33 return 0;
+13
arch/arm/vfp/vfpmodule.c
··· 445 445 set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11)); 446 446 } 447 447 448 + /* Called by platforms on which we want to disable VFP because it may not be 449 + * present on all CPUs within a SMP complex. Needs to be called prior to 450 + * vfp_init(). 451 + */ 452 + void vfp_disable(void) 453 + { 454 + if (VFP_arch) { 455 + pr_debug("%s: should be called prior to vfp_init\n", __func__); 456 + return; 457 + } 458 + VFP_arch = 1; 459 + } 460 + 448 461 #ifdef CONFIG_CPU_PM 449 462 static int vfp_pm_suspend(void) 450 463 {
+88
include/linux/reset/bcm63xx_pmb.h
··· 1 + /* 2 + * Broadcom BCM63xx Processor Monitor Bus shared routines (SMP and reset) 3 + * 4 + * Copyright (C) 2015, Broadcom Corporation 5 + * Author: Florian Fainelli <f.fainelli@gmail.com> 6 + * 7 + * This program is free software; you can redistribute it and/or 8 + * modify it under the terms of the GNU General Public License as 9 + * published by the Free Software Foundation version 2. 10 + * 11 + * This program is distributed "as is" WITHOUT ANY WARRANTY of any 12 + * kind, whether express or implied; without even the implied warranty 13 + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 + * GNU General Public License for more details. 15 + */ 16 + #ifndef __BCM63XX_PMB_H 17 + #define __BCM63XX_PMB_H 18 + 19 + #include <linux/io.h> 20 + #include <linux/types.h> 21 + #include <linux/delay.h> 22 + #include <linux/err.h> 23 + 24 + /* PMB Master controller register */ 25 + #define PMB_CTRL 0x00 26 + #define PMC_PMBM_START (1 << 31) 27 + #define PMC_PMBM_TIMEOUT (1 << 30) 28 + #define PMC_PMBM_SLAVE_ERR (1 << 29) 29 + #define PMC_PMBM_BUSY (1 << 28) 30 + #define PMC_PMBM_READ (0 << 20) 31 + #define PMC_PMBM_WRITE (1 << 20) 32 + #define PMB_WR_DATA 0x04 33 + #define PMB_TIMEOUT 0x08 34 + #define PMB_RD_DATA 0x0C 35 + 36 + #define PMB_BUS_ID_SHIFT 8 37 + 38 + /* Perform the low-level PMB master operation, shared between reads and 39 + * writes. 40 + */ 41 + static inline int __bpcm_do_op(void __iomem *master, unsigned int addr, 42 + u32 off, u32 op) 43 + { 44 + unsigned int timeout = 1000; 45 + u32 cmd; 46 + 47 + cmd = (PMC_PMBM_START | op | (addr & 0xff) << 12 | off); 48 + writel(cmd, master + PMB_CTRL); 49 + do { 50 + cmd = readl(master + PMB_CTRL); 51 + if (!(cmd & PMC_PMBM_START)) 52 + return 0; 53 + 54 + if (cmd & PMC_PMBM_SLAVE_ERR) 55 + return -EIO; 56 + 57 + if (cmd & PMC_PMBM_TIMEOUT) 58 + return -ETIMEDOUT; 59 + 60 + udelay(1); 61 + } while (timeout-- > 0); 62 + 63 + return -ETIMEDOUT; 64 + } 65 + 66 + static inline int bpcm_rd(void __iomem *master, unsigned int addr, 67 + u32 off, u32 *val) 68 + { 69 + int ret = 0; 70 + 71 + ret = __bpcm_do_op(master, addr, off >> 2, PMC_PMBM_READ); 72 + *val = readl(master + PMB_RD_DATA); 73 + 74 + return ret; 75 + } 76 + 77 + static inline int bpcm_wr(void __iomem *master, unsigned int addr, 78 + u32 off, u32 val) 79 + { 80 + int ret = 0; 81 + 82 + writel(val, master + PMB_WR_DATA); 83 + ret = __bpcm_do_op(master, addr, off >> 2, PMC_PMBM_WRITE); 84 + 85 + return ret; 86 + } 87 + 88 + #endif /* __BCM63XX_PMB_H */