Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'armsoc-drivers' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc

Pull ARM SoC-related driver updates from Olof Johansson:
"Various driver updates for platforms and a couple of the small driver
subsystems we merge through our tree:

Among the larger pieces:

- Power management improvements for TI am335x and am437x (RTC
suspend/wake)

- Misc new additions for Amlogic (socinfo updates)

- ZynqMP FPGA manager

- Nvidia improvements for reset/powergate handling

- PMIC wrapper for Mediatek MT8516

- Misc fixes/improvements for ARM SCMI, TEE, NXP i.MX SCU drivers"

* tag 'armsoc-drivers' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc: (57 commits)
soc: aspeed: fix Kconfig
soc: add aspeed folder and misc drivers
spi: zynqmp: Fix build break
soc: imx: Add generic i.MX8 SoC driver
MAINTAINERS: Update email for Qualcomm SoC maintainer
memory: tegra: Fix a typos for "fdcdwr2" mc client
Revert "ARM: tegra: Restore memory arbitration on resume from LP1 on Tegra30+"
memory: tegra: Replace readl-writel with mc_readl-mc_writel
memory: tegra: Fix integer overflow on tick value calculation
memory: tegra: Fix missed registers values latching
ARM: tegra: cpuidle: Handle tick broadcasting within cpuidle core on Tegra20/30
optee: allow to work without static shared memory
soc/tegra: pmc: Move powergate initialisation to probe
soc/tegra: pmc: Remove reset sysfs entries on error
soc/tegra: pmc: Fix reset sources and levels
soc: amlogic: meson-gx-pwrc-vpu: Add support for G12A
soc: amlogic: meson-gx-pwrc-vpu: Fix power on/off register bitmask
fpga manager: Adding FPGA Manager support for Xilinx zynqmp
dt-bindings: fpga: Add bindings for ZynqMP fpga driver
firmware: xilinx: Add fpga API's
...

+1715 -432
+25
Documentation/devicetree/bindings/fpga/xlnx,zynqmp-pcap-fpga.txt
··· 1 + Devicetree bindings for Zynq Ultrascale MPSoC FPGA Manager. 2 + The ZynqMP SoC uses the PCAP (Processor configuration Port) to configure the 3 + Programmable Logic (PL). The configuration uses the firmware interface. 4 + 5 + Required properties: 6 + - compatible: should contain "xlnx,zynqmp-pcap-fpga" 7 + 8 + Example for full FPGA configuration: 9 + 10 + fpga-region0 { 11 + compatible = "fpga-region"; 12 + fpga-mgr = <&zynqmp_pcap>; 13 + #address-cells = <0x1>; 14 + #size-cells = <0x1>; 15 + }; 16 + 17 + firmware { 18 + zynqmp_firmware: zynqmp-firmware { 19 + compatible = "xlnx,zynqmp-firmware"; 20 + method = "smc"; 21 + zynqmp_pcap: pcap { 22 + compatible = "xlnx,zynqmp-pcap-fpga"; 23 + }; 24 + }; 25 + };
+1
Documentation/devicetree/bindings/soc/mediatek/pwrap.txt
··· 25 25 "mediatek,mt8135-pwrap" for MT8135 SoCs 26 26 "mediatek,mt8173-pwrap" for MT8173 SoCs 27 27 "mediatek,mt8183-pwrap" for MT8183 SoCs 28 + "mediatek,mt8516-pwrap" for MT8516 SoCs 28 29 - interrupts: IRQ for pwrap in SOC 29 30 - reg-names: Must include the following entries: 30 31 "pwrap": Main registers base
+2 -2
Documentation/xilinx/eemi.txt
··· 41 41 int ret; 42 42 43 43 eemi_ops = zynqmp_pm_get_eemi_ops(); 44 - if (!eemi_ops) 45 - return -ENXIO; 44 + if (IS_ERR(eemi_ops)) 45 + return PTR_ERR(eemi_ops); 46 46 47 47 ret = eemi_ops->query_data(qdata, ret_payload); 48 48
+1 -1
MAINTAINERS
··· 2043 2043 S: Maintained 2044 2044 2045 2045 ARM/QUALCOMM SUPPORT 2046 - M: Andy Gross <andy.gross@linaro.org> 2046 + M: Andy Gross <agross@kernel.org> 2047 2047 M: David Brown <david.brown@linaro.org> 2048 2048 L: linux-arm-msm@vger.kernel.org 2049 2049 S: Maintained
+75 -1
arch/arm/mach-omap2/pm33xx-core.c
··· 10 10 #include <asm/suspend.h> 11 11 #include <linux/errno.h> 12 12 #include <linux/platform_data/pm33xx.h> 13 + #include <linux/clk.h> 14 + #include <linux/platform_data/gpio-omap.h> 15 + #include <linux/pinctrl/pinmux.h> 16 + #include <linux/wkup_m3_ipc.h> 17 + #include <linux/of.h> 18 + #include <linux/rtc.h> 13 19 14 20 #include "cm33xx.h" 15 21 #include "common.h" ··· 40 34 41 35 if (!scu_base) 42 36 return -ENOMEM; 37 + 38 + return 0; 39 + } 40 + 41 + static int am33xx_check_off_mode_enable(void) 42 + { 43 + if (enable_off_mode) 44 + pr_warn("WARNING: This platform does not support off-mode, entering DeepSleep suspend.\n"); 45 + 46 + /* off mode not supported on am335x so return 0 always */ 47 + return 0; 48 + } 49 + 50 + static int am43xx_check_off_mode_enable(void) 51 + { 52 + /* 53 + * Check for am437x-gp-evm which has the right Hardware design to 54 + * support this mode reliably. 55 + */ 56 + if (of_machine_is_compatible("ti,am437x-gp-evm") && enable_off_mode) 57 + return enable_off_mode; 58 + else if (enable_off_mode) 59 + pr_warn("WARNING: This platform does not support off-mode, entering DeepSleep suspend.\n"); 43 60 44 61 return 0; 45 62 } ··· 170 141 scu_power_mode(scu_base, SCU_PM_POWEROFF); 171 142 ret = cpu_suspend(args, fn); 172 143 scu_power_mode(scu_base, SCU_PM_NORMAL); 173 - amx3_post_suspend_common(); 144 + 145 + if (!am43xx_check_off_mode_enable()) 146 + amx3_post_suspend_common(); 174 147 175 148 return ret; 176 149 } ··· 194 163 return omap_hwmod_get_mpu_rt_va(rtc_oh); 195 164 } 196 165 166 + static void am43xx_save_context(void) 167 + { 168 + } 169 + 170 + static void am33xx_save_context(void) 171 + { 172 + omap_intc_save_context(); 173 + } 174 + 175 + static void am33xx_restore_context(void) 176 + { 177 + omap_intc_restore_context(); 178 + } 179 + 180 + static void am43xx_restore_context(void) 181 + { 182 + /* 183 + * HACK: restore dpll_per_clkdcoldo register contents, to avoid 184 + * breaking suspend-resume 185 + */ 186 + writel_relaxed(0x0, AM33XX_L4_WK_IO_ADDRESS(0x44df2e14)); 187 + } 188 + 189 + static void am43xx_prepare_rtc_suspend(void) 190 + { 191 + omap_hwmod_enable(rtc_oh); 192 + } 193 + 194 + static void am43xx_prepare_rtc_resume(void) 195 + { 196 + omap_hwmod_idle(rtc_oh); 197 + } 198 + 197 199 static struct am33xx_pm_platform_data am33xx_ops = { 198 200 .init = am33xx_suspend_init, 199 201 .soc_suspend = am33xx_suspend, 200 202 .get_sram_addrs = amx3_get_sram_addrs, 203 + .save_context = am33xx_save_context, 204 + .restore_context = am33xx_restore_context, 205 + .prepare_rtc_suspend = am43xx_prepare_rtc_suspend, 206 + .prepare_rtc_resume = am43xx_prepare_rtc_resume, 207 + .check_off_mode_enable = am33xx_check_off_mode_enable, 201 208 .get_rtc_base_addr = am43xx_get_rtc_base_addr, 202 209 }; 203 210 ··· 243 174 .init = am43xx_suspend_init, 244 175 .soc_suspend = am43xx_suspend, 245 176 .get_sram_addrs = amx3_get_sram_addrs, 177 + .save_context = am43xx_save_context, 178 + .restore_context = am43xx_restore_context, 179 + .prepare_rtc_suspend = am43xx_prepare_rtc_suspend, 180 + .prepare_rtc_resume = am43xx_prepare_rtc_resume, 181 + .check_off_mode_enable = am43xx_check_off_mode_enable, 246 182 .get_rtc_base_addr = am43xx_get_rtc_base_addr, 247 183 }; 248 184
+3
arch/arm/mach-omap2/sleep43xx.S
··· 368 368 mov r1, #AM43XX_EMIF_POWEROFF_DISABLE 369 369 str r1, [r2, #0x0] 370 370 371 + ldr r1, [r9, #EMIF_PM_RUN_HW_LEVELING] 372 + blx r1 373 + 371 374 #ifdef CONFIG_CACHE_L2X0 372 375 ldr r2, l2_cache_base 373 376 ldr r0, [r2, #L2X0_CTRL]
+1
arch/arm/mach-tegra/Kconfig
··· 10 10 select HAVE_ARM_SCU if SMP 11 11 select HAVE_ARM_TWD if SMP 12 12 select PINCTRL 13 + select PM 13 14 select PM_OPP 14 15 select RESET_CONTROLLER 15 16 select SOC_BUS
+2 -9
arch/arm/mach-tegra/cpuidle-tegra20.c
··· 61 61 .exit_latency = 5000, 62 62 .target_residency = 10000, 63 63 .power_usage = 0, 64 - .flags = CPUIDLE_FLAG_COUPLED, 64 + .flags = CPUIDLE_FLAG_COUPLED | 65 + CPUIDLE_FLAG_TIMER_STOP, 65 66 .name = "powered-down", 66 67 .desc = "CPU power gated", 67 68 }, ··· 137 136 if (tegra20_reset_cpu_1() || !tegra_cpu_rail_off_ready()) 138 137 return false; 139 138 140 - tick_broadcast_enter(); 141 - 142 139 tegra_idle_lp2_last(); 143 - 144 - tick_broadcast_exit(); 145 140 146 141 if (cpu_online(1)) 147 142 tegra20_wake_cpu1_from_reset(); ··· 150 153 struct cpuidle_driver *drv, 151 154 int index) 152 155 { 153 - tick_broadcast_enter(); 154 - 155 156 cpu_suspend(0, tegra20_sleep_cpu_secondary_finish); 156 157 157 158 tegra20_cpu_clear_resettable(); 158 - 159 - tick_broadcast_exit(); 160 159 161 160 return true; 162 161 }
+1 -8
arch/arm/mach-tegra/cpuidle-tegra30.c
··· 56 56 .exit_latency = 2000, 57 57 .target_residency = 2200, 58 58 .power_usage = 0, 59 + .flags = CPUIDLE_FLAG_TIMER_STOP, 59 60 .name = "powered-down", 60 61 .desc = "CPU power gated", 61 62 }, ··· 77 76 return false; 78 77 } 79 78 80 - tick_broadcast_enter(); 81 - 82 79 tegra_idle_lp2_last(); 83 - 84 - tick_broadcast_exit(); 85 80 86 81 return true; 87 82 } ··· 87 90 struct cpuidle_driver *drv, 88 91 int index) 89 92 { 90 - tick_broadcast_enter(); 91 - 92 93 smp_wmb(); 93 94 94 95 cpu_suspend(0, tegra30_sleep_cpu_secondary_finish); 95 - 96 - tick_broadcast_exit(); 97 96 98 97 return true; 99 98 }
-9
arch/arm/mach-tegra/iomap.h
··· 79 79 #define TEGRA_PMC_BASE 0x7000E400 80 80 #define TEGRA_PMC_SIZE SZ_256 81 81 82 - #define TEGRA_MC_BASE 0x7000F000 83 - #define TEGRA_MC_SIZE SZ_1K 84 - 85 82 #define TEGRA_EMC_BASE 0x7000F400 86 83 #define TEGRA_EMC_SIZE SZ_1K 87 - 88 - #define TEGRA114_MC_BASE 0x70019000 89 - #define TEGRA114_MC_SIZE SZ_4K 90 84 91 85 #define TEGRA_EMC0_BASE 0x7001A000 92 86 #define TEGRA_EMC0_SIZE SZ_2K 93 87 94 88 #define TEGRA_EMC1_BASE 0x7001A800 95 89 #define TEGRA_EMC1_SIZE SZ_2K 96 - 97 - #define TEGRA124_MC_BASE 0x70019000 98 - #define TEGRA124_MC_SIZE SZ_4K 99 90 100 91 #define TEGRA124_EMC_BASE 0x7001B000 101 92 #define TEGRA124_EMC_SIZE SZ_2K
-21
arch/arm/mach-tegra/sleep-tegra30.S
··· 44 44 #define EMC_XM2VTTGENPADCTRL 0x310 45 45 #define EMC_XM2VTTGENPADCTRL2 0x314 46 46 47 - #define MC_EMEM_ARB_CFG 0x90 48 - 49 47 #define PMC_CTRL 0x0 50 48 #define PMC_CTRL_SIDE_EFFECT_LP0 (1 << 14) /* enter LP0 when CPU pwr gated */ 51 49 ··· 418 420 movweq r0, #:lower16:TEGRA124_EMC_BASE 419 421 movteq r0, #:upper16:TEGRA124_EMC_BASE 420 422 421 - cmp r10, #TEGRA30 422 - moveq r2, #0x20 423 - movweq r4, #:lower16:TEGRA_MC_BASE 424 - movteq r4, #:upper16:TEGRA_MC_BASE 425 - cmp r10, #TEGRA114 426 - moveq r2, #0x34 427 - movweq r4, #:lower16:TEGRA114_MC_BASE 428 - movteq r4, #:upper16:TEGRA114_MC_BASE 429 - cmp r10, #TEGRA124 430 - moveq r2, #0x20 431 - movweq r4, #:lower16:TEGRA124_MC_BASE 432 - movteq r4, #:upper16:TEGRA124_MC_BASE 433 - 434 - ldr r1, [r5, r2] @ restore MC_EMEM_ARB_CFG 435 - str r1, [r4, #MC_EMEM_ARB_CFG] 436 - 437 423 exit_self_refresh: 438 424 ldr r1, [r5, #0xC] @ restore EMC_XM2VTTGENPADCTRL 439 425 str r1, [r0, #EMC_XM2VTTGENPADCTRL] ··· 546 564 .word TEGRA_PMC_BASE + PMC_IO_DPD_STATUS @0x14 547 565 .word TEGRA_CLK_RESET_BASE + CLK_RESET_CLK_SOURCE_MSELECT @0x18 548 566 .word TEGRA_CLK_RESET_BASE + CLK_RESET_SCLK_BURST @0x1c 549 - .word TEGRA_MC_BASE + MC_EMEM_ARB_CFG @0x20 550 567 tegra30_sdram_pad_address_end: 551 568 552 569 tegra114_sdram_pad_address: ··· 562 581 .word TEGRA_EMC1_BASE + EMC_AUTO_CAL_INTERVAL @0x28 563 582 .word TEGRA_EMC1_BASE + EMC_XM2VTTGENPADCTRL @0x2c 564 583 .word TEGRA_EMC1_BASE + EMC_XM2VTTGENPADCTRL2 @0x30 565 - .word TEGRA114_MC_BASE + MC_EMEM_ARB_CFG @0x34 566 584 tegra114_sdram_pad_adress_end: 567 585 568 586 tegra124_sdram_pad_address: ··· 573 593 .word TEGRA_PMC_BASE + PMC_IO_DPD_STATUS @0x14 574 594 .word TEGRA_CLK_RESET_BASE + CLK_RESET_CLK_SOURCE_MSELECT @0x18 575 595 .word TEGRA_CLK_RESET_BASE + CLK_RESET_SCLK_BURST @0x1c 576 - .word TEGRA124_MC_BASE + MC_EMEM_ARB_CFG @0x20 577 596 tegra124_sdram_pad_address_end: 578 597 579 598 tegra30_sdram_pad_size:
+46 -20
drivers/bus/tegra-aconnect.c
··· 12 12 #include <linux/module.h> 13 13 #include <linux/of_platform.h> 14 14 #include <linux/platform_device.h> 15 - #include <linux/pm_clock.h> 16 15 #include <linux/pm_runtime.h> 16 + 17 + struct tegra_aconnect { 18 + struct clk *ape_clk; 19 + struct clk *apb2ape_clk; 20 + }; 17 21 18 22 static int tegra_aconnect_probe(struct platform_device *pdev) 19 23 { 20 - int ret; 24 + struct tegra_aconnect *aconnect; 21 25 22 26 if (!pdev->dev.of_node) 23 27 return -EINVAL; 24 28 25 - ret = pm_clk_create(&pdev->dev); 26 - if (ret) 27 - return ret; 29 + aconnect = devm_kzalloc(&pdev->dev, sizeof(struct tegra_aconnect), 30 + GFP_KERNEL); 31 + if (!aconnect) 32 + return -ENOMEM; 28 33 29 - ret = of_pm_clk_add_clk(&pdev->dev, "ape"); 30 - if (ret) 31 - goto clk_destroy; 34 + aconnect->ape_clk = devm_clk_get(&pdev->dev, "ape"); 35 + if (IS_ERR(aconnect->ape_clk)) { 36 + dev_err(&pdev->dev, "Can't retrieve ape clock\n"); 37 + return PTR_ERR(aconnect->ape_clk); 38 + } 32 39 33 - ret = of_pm_clk_add_clk(&pdev->dev, "apb2ape"); 34 - if (ret) 35 - goto clk_destroy; 40 + aconnect->apb2ape_clk = devm_clk_get(&pdev->dev, "apb2ape"); 41 + if (IS_ERR(aconnect->apb2ape_clk)) { 42 + dev_err(&pdev->dev, "Can't retrieve apb2ape clock\n"); 43 + return PTR_ERR(aconnect->apb2ape_clk); 44 + } 36 45 46 + dev_set_drvdata(&pdev->dev, aconnect); 37 47 pm_runtime_enable(&pdev->dev); 38 48 39 49 of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); ··· 51 41 dev_info(&pdev->dev, "Tegra ACONNECT bus registered\n"); 52 42 53 43 return 0; 54 - 55 - clk_destroy: 56 - pm_clk_destroy(&pdev->dev); 57 - 58 - return ret; 59 44 } 60 45 61 46 static int tegra_aconnect_remove(struct platform_device *pdev) 62 47 { 63 48 pm_runtime_disable(&pdev->dev); 64 49 65 - pm_clk_destroy(&pdev->dev); 66 - 67 50 return 0; 68 51 } 69 52 70 53 static int tegra_aconnect_runtime_resume(struct device *dev) 71 54 { 72 - return pm_clk_resume(dev); 55 + struct tegra_aconnect *aconnect = dev_get_drvdata(dev); 56 + int ret; 57 + 58 + ret = clk_prepare_enable(aconnect->ape_clk); 59 + if (ret) { 60 + dev_err(dev, "ape clk_enable failed: %d\n", ret); 61 + return ret; 62 + } 63 + 64 + ret = clk_prepare_enable(aconnect->apb2ape_clk); 65 + if (ret) { 66 + clk_disable_unprepare(aconnect->ape_clk); 67 + dev_err(dev, "apb2ape clk_enable failed: %d\n", ret); 68 + return ret; 69 + } 70 + 71 + return 0; 73 72 } 74 73 75 74 static int tegra_aconnect_runtime_suspend(struct device *dev) 76 75 { 77 - return pm_clk_suspend(dev); 76 + struct tegra_aconnect *aconnect = dev_get_drvdata(dev); 77 + 78 + clk_disable_unprepare(aconnect->ape_clk); 79 + clk_disable_unprepare(aconnect->apb2ape_clk); 80 + 81 + return 0; 78 82 } 79 83 80 84 static const struct dev_pm_ops tegra_aconnect_pm_ops = { 81 85 SET_RUNTIME_PM_OPS(tegra_aconnect_runtime_suspend, 82 86 tegra_aconnect_runtime_resume, NULL) 87 + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 88 + pm_runtime_force_resume) 83 89 }; 84 90 85 91 static const struct of_device_id tegra_aconnect_of_match[] = {
+2 -2
drivers/clk/zynqmp/clkc.c
··· 739 739 struct device *dev = &pdev->dev; 740 740 741 741 eemi_ops = zynqmp_pm_get_eemi_ops(); 742 - if (!eemi_ops) 743 - return -ENXIO; 742 + if (IS_ERR(eemi_ops)) 743 + return PTR_ERR(eemi_ops); 744 744 745 745 ret = zynqmp_clk_setup(dev->of_node); 746 746
+4 -4
drivers/firmware/arm_scmi/driver.c
··· 654 654 655 655 static int scmi_mailbox_check(struct device_node *np) 656 656 { 657 - struct of_phandle_args arg; 658 - 659 - return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells", 0, &arg); 657 + return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells", 0, NULL); 660 658 } 661 659 662 660 static int scmi_mbox_free_channel(int id, void *p, void *data) ··· 796 798 return -EINVAL; 797 799 } 798 800 799 - desc = of_match_device(scmi_of_match, dev)->data; 801 + desc = of_device_get_match_data(dev); 802 + if (!desc) 803 + return -EINVAL; 800 804 801 805 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); 802 806 if (!info)
+1 -1
drivers/firmware/imx/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 - obj-$(CONFIG_IMX_SCU) += imx-scu.o misc.o 2 + obj-$(CONFIG_IMX_SCU) += imx-scu.o misc.o imx-scu-irq.o 3 3 obj-$(CONFIG_IMX_SCU_PD) += scu-pd.o
+168
drivers/firmware/imx/imx-scu-irq.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* 3 + * Copyright 2019 NXP 4 + * 5 + * Implementation of the SCU IRQ functions using MU. 6 + * 7 + */ 8 + 9 + #include <dt-bindings/firmware/imx/rsrc.h> 10 + #include <linux/firmware/imx/ipc.h> 11 + #include <linux/mailbox_client.h> 12 + 13 + #define IMX_SC_IRQ_FUNC_ENABLE 1 14 + #define IMX_SC_IRQ_FUNC_STATUS 2 15 + #define IMX_SC_IRQ_NUM_GROUP 4 16 + 17 + static u32 mu_resource_id; 18 + 19 + struct imx_sc_msg_irq_get_status { 20 + struct imx_sc_rpc_msg hdr; 21 + union { 22 + struct { 23 + u16 resource; 24 + u8 group; 25 + u8 reserved; 26 + } __packed req; 27 + struct { 28 + u32 status; 29 + } resp; 30 + } data; 31 + }; 32 + 33 + struct imx_sc_msg_irq_enable { 34 + struct imx_sc_rpc_msg hdr; 35 + u32 mask; 36 + u16 resource; 37 + u8 group; 38 + u8 enable; 39 + } __packed; 40 + 41 + static struct imx_sc_ipc *imx_sc_irq_ipc_handle; 42 + static struct work_struct imx_sc_irq_work; 43 + static ATOMIC_NOTIFIER_HEAD(imx_scu_irq_notifier_chain); 44 + 45 + int imx_scu_irq_register_notifier(struct notifier_block *nb) 46 + { 47 + return atomic_notifier_chain_register( 48 + &imx_scu_irq_notifier_chain, nb); 49 + } 50 + EXPORT_SYMBOL(imx_scu_irq_register_notifier); 51 + 52 + int imx_scu_irq_unregister_notifier(struct notifier_block *nb) 53 + { 54 + return atomic_notifier_chain_unregister( 55 + &imx_scu_irq_notifier_chain, nb); 56 + } 57 + EXPORT_SYMBOL(imx_scu_irq_unregister_notifier); 58 + 59 + static int imx_scu_irq_notifier_call_chain(unsigned long status, u8 *group) 60 + { 61 + return atomic_notifier_call_chain(&imx_scu_irq_notifier_chain, 62 + status, (void *)group); 63 + } 64 + 65 + static void imx_scu_irq_work_handler(struct work_struct *work) 66 + { 67 + struct imx_sc_msg_irq_get_status msg; 68 + struct imx_sc_rpc_msg *hdr = &msg.hdr; 69 + u32 irq_status; 70 + int ret; 71 + u8 i; 72 + 73 + for (i = 0; i < IMX_SC_IRQ_NUM_GROUP; i++) { 74 + hdr->ver = IMX_SC_RPC_VERSION; 75 + hdr->svc = IMX_SC_RPC_SVC_IRQ; 76 + hdr->func = IMX_SC_IRQ_FUNC_STATUS; 77 + hdr->size = 2; 78 + 79 + msg.data.req.resource = mu_resource_id; 80 + msg.data.req.group = i; 81 + 82 + ret = imx_scu_call_rpc(imx_sc_irq_ipc_handle, &msg, true); 83 + if (ret) { 84 + pr_err("get irq group %d status failed, ret %d\n", 85 + i, ret); 86 + return; 87 + } 88 + 89 + irq_status = msg.data.resp.status; 90 + if (!irq_status) 91 + continue; 92 + 93 + imx_scu_irq_notifier_call_chain(irq_status, &i); 94 + } 95 + } 96 + 97 + int imx_scu_irq_group_enable(u8 group, u32 mask, u8 enable) 98 + { 99 + struct imx_sc_msg_irq_enable msg; 100 + struct imx_sc_rpc_msg *hdr = &msg.hdr; 101 + int ret; 102 + 103 + hdr->ver = IMX_SC_RPC_VERSION; 104 + hdr->svc = IMX_SC_RPC_SVC_IRQ; 105 + hdr->func = IMX_SC_IRQ_FUNC_ENABLE; 106 + hdr->size = 3; 107 + 108 + msg.resource = mu_resource_id; 109 + msg.group = group; 110 + msg.mask = mask; 111 + msg.enable = enable; 112 + 113 + ret = imx_scu_call_rpc(imx_sc_irq_ipc_handle, &msg, true); 114 + if (ret) 115 + pr_err("enable irq failed, group %d, mask %d, ret %d\n", 116 + group, mask, ret); 117 + 118 + return ret; 119 + } 120 + EXPORT_SYMBOL(imx_scu_irq_group_enable); 121 + 122 + static void imx_scu_irq_callback(struct mbox_client *c, void *msg) 123 + { 124 + schedule_work(&imx_sc_irq_work); 125 + } 126 + 127 + int imx_scu_enable_general_irq_channel(struct device *dev) 128 + { 129 + struct of_phandle_args spec; 130 + struct mbox_client *cl; 131 + struct mbox_chan *ch; 132 + int ret = 0, i = 0; 133 + 134 + ret = imx_scu_get_handle(&imx_sc_irq_ipc_handle); 135 + if (ret) 136 + return ret; 137 + 138 + cl = devm_kzalloc(dev, sizeof(*cl), GFP_KERNEL); 139 + if (!cl) 140 + return -ENOMEM; 141 + 142 + cl->dev = dev; 143 + cl->rx_callback = imx_scu_irq_callback; 144 + 145 + /* SCU general IRQ uses general interrupt channel 3 */ 146 + ch = mbox_request_channel_byname(cl, "gip3"); 147 + if (IS_ERR(ch)) { 148 + ret = PTR_ERR(ch); 149 + dev_err(dev, "failed to request mbox chan gip3, ret %d\n", ret); 150 + devm_kfree(dev, cl); 151 + return ret; 152 + } 153 + 154 + INIT_WORK(&imx_sc_irq_work, imx_scu_irq_work_handler); 155 + 156 + if (!of_parse_phandle_with_args(dev->of_node, "mboxes", 157 + "#mbox-cells", 0, &spec)) 158 + i = of_alias_get_id(spec.np, "mu"); 159 + 160 + /* use mu1 as general mu irq channel if failed */ 161 + if (i < 0) 162 + i = 1; 163 + 164 + mu_resource_id = IMX_SC_R_MU_0A + i; 165 + 166 + return ret; 167 + } 168 + EXPORT_SYMBOL(imx_scu_enable_general_irq_channel);
+6
drivers/firmware/imx/imx-scu.c
··· 10 10 #include <linux/err.h> 11 11 #include <linux/firmware/imx/types.h> 12 12 #include <linux/firmware/imx/ipc.h> 13 + #include <linux/firmware/imx/sci.h> 13 14 #include <linux/interrupt.h> 14 15 #include <linux/irq.h> 15 16 #include <linux/kernel.h> ··· 246 245 init_completion(&sc_ipc->done); 247 246 248 247 imx_sc_ipc_handle = sc_ipc; 248 + 249 + ret = imx_scu_enable_general_irq_channel(dev); 250 + if (ret) 251 + dev_warn(dev, 252 + "failed to enable general irq channel: %d\n", ret); 249 253 250 254 dev_info(dev, "NXP i.MX SCU Initialized\n"); 251 255
+62 -55
drivers/firmware/imx/scu-pd.c
··· 74 74 char *name; 75 75 u32 rsrc; 76 76 u8 num; 77 + 78 + /* add domain index */ 77 79 bool postfix; 80 + u8 start_from; 78 81 }; 79 82 80 83 struct imx_sc_pd_soc { ··· 87 84 88 85 static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = { 89 86 /* LSIO SS */ 90 - { "lsio-pwm", IMX_SC_R_PWM_0, 8, 1 }, 91 - { "lsio-gpio", IMX_SC_R_GPIO_0, 8, 1 }, 92 - { "lsio-gpt", IMX_SC_R_GPT_0, 5, 1 }, 93 - { "lsio-kpp", IMX_SC_R_KPP, 1, 0 }, 94 - { "lsio-fspi", IMX_SC_R_FSPI_0, 2, 1 }, 95 - { "lsio-mu", IMX_SC_R_MU_0A, 14, 1 }, 87 + { "pwm", IMX_SC_R_PWM_0, 8, true, 0 }, 88 + { "gpio", IMX_SC_R_GPIO_0, 8, true, 0 }, 89 + { "gpt", IMX_SC_R_GPT_0, 5, true, 0 }, 90 + { "kpp", IMX_SC_R_KPP, 1, false, 0 }, 91 + { "fspi", IMX_SC_R_FSPI_0, 2, true, 0 }, 92 + { "mu", IMX_SC_R_MU_0A, 14, true, 0 }, 96 93 97 94 /* CONN SS */ 98 - { "con-usb", IMX_SC_R_USB_0, 2, 1 }, 99 - { "con-usb0phy", IMX_SC_R_USB_0_PHY, 1, 0 }, 100 - { "con-usb2", IMX_SC_R_USB_2, 1, 0 }, 101 - { "con-usb2phy", IMX_SC_R_USB_2_PHY, 1, 0 }, 102 - { "con-sdhc", IMX_SC_R_SDHC_0, 3, 1 }, 103 - { "con-enet", IMX_SC_R_ENET_0, 2, 1 }, 104 - { "con-nand", IMX_SC_R_NAND, 1, 0 }, 105 - { "con-mlb", IMX_SC_R_MLB_0, 1, 1 }, 95 + { "usb", IMX_SC_R_USB_0, 2, true, 0 }, 96 + { "usb0phy", IMX_SC_R_USB_0_PHY, 1, false, 0 }, 97 + { "usb2", IMX_SC_R_USB_2, 1, false, 0 }, 98 + { "usb2phy", IMX_SC_R_USB_2_PHY, 1, false, 0 }, 99 + { "sdhc", IMX_SC_R_SDHC_0, 3, true, 0 }, 100 + { "enet", IMX_SC_R_ENET_0, 2, true, 0 }, 101 + { "nand", IMX_SC_R_NAND, 1, false, 0 }, 102 + { "mlb", IMX_SC_R_MLB_0, 1, true, 0 }, 106 103 107 - /* Audio DMA SS */ 108 - { "adma-audio-pll0", IMX_SC_R_AUDIO_PLL_0, 1, 0 }, 109 - { "adma-audio-pll1", IMX_SC_R_AUDIO_PLL_1, 1, 0 }, 110 - { "adma-audio-clk-0", IMX_SC_R_AUDIO_CLK_0, 1, 0 }, 111 - { "adma-dma0-ch", IMX_SC_R_DMA_0_CH0, 16, 1 }, 112 - { "adma-dma1-ch", IMX_SC_R_DMA_1_CH0, 16, 1 }, 113 - { "adma-dma2-ch", IMX_SC_R_DMA_2_CH0, 5, 1 }, 114 - { "adma-asrc0", IMX_SC_R_ASRC_0, 1, 0 }, 115 - { "adma-asrc1", IMX_SC_R_ASRC_1, 1, 0 }, 116 - { "adma-esai0", IMX_SC_R_ESAI_0, 1, 0 }, 117 - { "adma-spdif0", IMX_SC_R_SPDIF_0, 1, 0 }, 118 - { "adma-sai", IMX_SC_R_SAI_0, 3, 1 }, 119 - { "adma-amix", IMX_SC_R_AMIX, 1, 0 }, 120 - { "adma-mqs0", IMX_SC_R_MQS_0, 1, 0 }, 121 - { "adma-dsp", IMX_SC_R_DSP, 1, 0 }, 122 - { "adma-dsp-ram", IMX_SC_R_DSP_RAM, 1, 0 }, 123 - { "adma-can", IMX_SC_R_CAN_0, 3, 1 }, 124 - { "adma-ftm", IMX_SC_R_FTM_0, 2, 1 }, 125 - { "adma-lpi2c", IMX_SC_R_I2C_0, 4, 1 }, 126 - { "adma-adc", IMX_SC_R_ADC_0, 1, 1 }, 127 - { "adma-lcd", IMX_SC_R_LCD_0, 1, 1 }, 128 - { "adma-lcd0-pwm", IMX_SC_R_LCD_0_PWM_0, 1, 1 }, 129 - { "adma-lpuart", IMX_SC_R_UART_0, 4, 1 }, 130 - { "adma-lpspi", IMX_SC_R_SPI_0, 4, 1 }, 104 + /* AUDIO SS */ 105 + { "audio-pll0", IMX_SC_R_AUDIO_PLL_0, 1, false, 0 }, 106 + { "audio-pll1", IMX_SC_R_AUDIO_PLL_1, 1, false, 0 }, 107 + { "audio-clk-0", IMX_SC_R_AUDIO_CLK_0, 1, false, 0 }, 108 + { "dma0-ch", IMX_SC_R_DMA_0_CH0, 16, true, 0 }, 109 + { "dma1-ch", IMX_SC_R_DMA_1_CH0, 16, true, 0 }, 110 + { "dma2-ch", IMX_SC_R_DMA_2_CH0, 5, true, 0 }, 111 + { "asrc0", IMX_SC_R_ASRC_0, 1, false, 0 }, 112 + { "asrc1", IMX_SC_R_ASRC_1, 1, false, 0 }, 113 + { "esai0", IMX_SC_R_ESAI_0, 1, false, 0 }, 114 + { "spdif0", IMX_SC_R_SPDIF_0, 1, false, 0 }, 115 + { "sai", IMX_SC_R_SAI_0, 3, true, 0 }, 116 + { "amix", IMX_SC_R_AMIX, 1, false, 0 }, 117 + { "mqs0", IMX_SC_R_MQS_0, 1, false, 0 }, 118 + { "dsp", IMX_SC_R_DSP, 1, false, 0 }, 119 + { "dsp-ram", IMX_SC_R_DSP_RAM, 1, false, 0 }, 131 120 132 - /* VPU SS */ 133 - { "vpu", IMX_SC_R_VPU, 1, 0 }, 134 - { "vpu-pid", IMX_SC_R_VPU_PID0, 8, 1 }, 135 - { "vpu-dec0", IMX_SC_R_VPU_DEC_0, 1, 0 }, 136 - { "vpu-enc0", IMX_SC_R_VPU_ENC_0, 1, 0 }, 121 + /* DMA SS */ 122 + { "can", IMX_SC_R_CAN_0, 3, true, 0 }, 123 + { "ftm", IMX_SC_R_FTM_0, 2, true, 0 }, 124 + { "lpi2c", IMX_SC_R_I2C_0, 4, true, 0 }, 125 + { "adc", IMX_SC_R_ADC_0, 1, true, 0 }, 126 + { "lcd", IMX_SC_R_LCD_0, 1, true, 0 }, 127 + { "lcd0-pwm", IMX_SC_R_LCD_0_PWM_0, 1, true, 0 }, 128 + { "lpuart", IMX_SC_R_UART_0, 4, true, 0 }, 129 + { "lpspi", IMX_SC_R_SPI_0, 4, true, 0 }, 130 + 131 + /* VPU SS */ 132 + { "vpu", IMX_SC_R_VPU, 1, false, 0 }, 133 + { "vpu-pid", IMX_SC_R_VPU_PID0, 8, true, 0 }, 134 + { "vpu-dec0", IMX_SC_R_VPU_DEC_0, 1, false, 0 }, 135 + { "vpu-enc0", IMX_SC_R_VPU_ENC_0, 1, false, 0 }, 137 136 138 137 /* GPU SS */ 139 - { "gpu0-pid", IMX_SC_R_GPU_0_PID0, 4, 1 }, 138 + { "gpu0-pid", IMX_SC_R_GPU_0_PID0, 4, true, 0 }, 140 139 141 140 /* HSIO SS */ 142 - { "hsio-pcie-b", IMX_SC_R_PCIE_B, 1, 0 }, 143 - { "hsio-serdes-1", IMX_SC_R_SERDES_1, 1, 0 }, 144 - { "hsio-gpio", IMX_SC_R_HSIO_GPIO, 1, 0 }, 141 + { "pcie-b", IMX_SC_R_PCIE_B, 1, false, 0 }, 142 + { "serdes-1", IMX_SC_R_SERDES_1, 1, false, 0 }, 143 + { "hsio-gpio", IMX_SC_R_HSIO_GPIO, 1, false, 0 }, 145 144 146 - /* MIPI/LVDS SS */ 147 - { "mipi0", IMX_SC_R_MIPI_0, 1, 0 }, 148 - { "mipi0-pwm0", IMX_SC_R_MIPI_0_PWM_0, 1, 0 }, 149 - { "mipi0-i2c", IMX_SC_R_MIPI_0_I2C_0, 2, 1 }, 150 - { "lvds0", IMX_SC_R_LVDS_0, 1, 0 }, 145 + /* MIPI SS */ 146 + { "mipi0", IMX_SC_R_MIPI_0, 1, false, 0 }, 147 + { "mipi0-pwm0", IMX_SC_R_MIPI_0_PWM_0, 1, false, 0 }, 148 + { "mipi0-i2c", IMX_SC_R_MIPI_0_I2C_0, 2, true, 0 }, 149 + 150 + /* LVDS SS */ 151 + { "lvds0", IMX_SC_R_LVDS_0, 1, false, 0 }, 151 152 152 153 /* DC SS */ 153 - { "dc0", IMX_SC_R_DC_0, 1, 0 }, 154 - { "dc0-pll", IMX_SC_R_DC_0_PLL_0, 2, 1 }, 154 + { "dc0", IMX_SC_R_DC_0, 1, false, 0 }, 155 + { "dc0-pll", IMX_SC_R_DC_0_PLL_0, 2, true, 0 }, 155 156 }; 156 157 157 158 static const struct imx_sc_pd_soc imx8qxp_scu_pd = { ··· 243 236 244 237 if (pd_ranges->postfix) 245 238 snprintf(sc_pd->name, sizeof(sc_pd->name), 246 - "%s%i", pd_ranges->name, idx); 239 + "%s%i", pd_ranges->name, pd_ranges->start_from + idx); 247 240 else 248 241 snprintf(sc_pd->name, sizeof(sc_pd->name), 249 242 "%s", pd_ranges->name);
+4 -14
drivers/firmware/xilinx/zynqmp-debug.c
··· 90 90 int ret; 91 91 struct zynqmp_pm_query_data qdata = {0}; 92 92 93 - if (!eemi_ops) 94 - return -ENXIO; 95 - 96 93 switch (pm_id) { 97 94 case PM_GET_API_VERSION: 98 95 ret = eemi_ops->get_api_version(&pm_api_version); ··· 160 163 161 164 strcpy(debugfs_buf, ""); 162 165 163 - if (*off != 0 || len == 0) 166 + if (*off != 0 || len <= 1 || len > PAGE_SIZE - 1) 164 167 return -EINVAL; 165 168 166 - kern_buff = kzalloc(len, GFP_KERNEL); 167 - if (!kern_buff) 168 - return -ENOMEM; 169 - 169 + kern_buff = memdup_user_nul(ptr, len); 170 + if (IS_ERR(kern_buff)) 171 + return PTR_ERR(kern_buff); 170 172 tmp_buff = kern_buff; 171 - 172 - ret = strncpy_from_user(kern_buff, ptr, len); 173 - if (ret < 0) { 174 - ret = -EFAULT; 175 - goto err; 176 - } 177 173 178 174 /* Read the API name from a user request */ 179 175 pm_api_req = strsep(&kern_buff, " ");
+55 -1
drivers/firmware/xilinx/zynqmp.c
··· 24 24 #include <linux/firmware/xlnx-zynqmp.h> 25 25 #include "zynqmp-debug.h" 26 26 27 + static const struct zynqmp_eemi_ops *eemi_ops_tbl; 28 + 27 29 static const struct mfd_cell firmware_devs[] = { 28 30 { 29 31 .name = "zynqmp_power_controller", ··· 540 538 } 541 539 542 540 /** 541 + * zynqmp_pm_fpga_load - Perform the fpga load 542 + * @address: Address to write to 543 + * @size: pl bitstream size 544 + * @flags: Bitstream type 545 + * -XILINX_ZYNQMP_PM_FPGA_FULL: FPGA full reconfiguration 546 + * -XILINX_ZYNQMP_PM_FPGA_PARTIAL: FPGA partial reconfiguration 547 + * 548 + * This function provides access to pmufw. To transfer 549 + * the required bitstream into PL. 550 + * 551 + * Return: Returns status, either success or error+reason 552 + */ 553 + static int zynqmp_pm_fpga_load(const u64 address, const u32 size, 554 + const u32 flags) 555 + { 556 + return zynqmp_pm_invoke_fn(PM_FPGA_LOAD, lower_32_bits(address), 557 + upper_32_bits(address), size, flags, NULL); 558 + } 559 + 560 + /** 561 + * zynqmp_pm_fpga_get_status - Read value from PCAP status register 562 + * @value: Value to read 563 + * 564 + * This function provides access to the pmufw to get the PCAP 565 + * status 566 + * 567 + * Return: Returns status, either success or error+reason 568 + */ 569 + static int zynqmp_pm_fpga_get_status(u32 *value) 570 + { 571 + u32 ret_payload[PAYLOAD_ARG_CNT]; 572 + int ret; 573 + 574 + if (!value) 575 + return -EINVAL; 576 + 577 + ret = zynqmp_pm_invoke_fn(PM_FPGA_GET_STATUS, 0, 0, 0, 0, ret_payload); 578 + *value = ret_payload[1]; 579 + 580 + return ret; 581 + } 582 + 583 + /** 543 584 * zynqmp_pm_init_finalize() - PM call to inform firmware that the caller 544 585 * master has initialized its own power management 545 586 * ··· 685 640 .request_node = zynqmp_pm_request_node, 686 641 .release_node = zynqmp_pm_release_node, 687 642 .set_requirement = zynqmp_pm_set_requirement, 643 + .fpga_load = zynqmp_pm_fpga_load, 644 + .fpga_get_status = zynqmp_pm_fpga_get_status, 688 645 }; 689 646 690 647 /** ··· 696 649 */ 697 650 const struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void) 698 651 { 699 - return &eemi_ops; 652 + if (eemi_ops_tbl) 653 + return eemi_ops_tbl; 654 + else 655 + return ERR_PTR(-EPROBE_DEFER); 656 + 700 657 } 701 658 EXPORT_SYMBOL_GPL(zynqmp_pm_get_eemi_ops); 702 659 ··· 744 693 745 694 pr_info("%s Trustzone version v%d.%d\n", __func__, 746 695 pm_tz_version >> 16, pm_tz_version & 0xFFFF); 696 + 697 + /* Assign eemi_ops_table */ 698 + eemi_ops_tbl = &eemi_ops; 747 699 748 700 zynqmp_pm_api_debugfs_init(); 749 701
+9
drivers/fpga/Kconfig
··· 204 204 205 205 To compile this as a module, choose M here. 206 206 207 + config FPGA_MGR_ZYNQMP_FPGA 208 + tristate "Xilinx ZynqMP FPGA" 209 + depends on ARCH_ZYNQMP || COMPILE_TEST 210 + help 211 + FPGA manager driver support for Xilinx ZynqMP FPGAs. 212 + This driver uses the processor configuration port(PCAP) 213 + to configure the programmable logic(PL) through PS 214 + on ZynqMP SoC. 215 + 207 216 endif # FPGA
+1
drivers/fpga/Makefile
··· 17 17 obj-$(CONFIG_FPGA_MGR_TS73XX) += ts73xx-fpga.o 18 18 obj-$(CONFIG_FPGA_MGR_XILINX_SPI) += xilinx-spi.o 19 19 obj-$(CONFIG_FPGA_MGR_ZYNQ_FPGA) += zynq-fpga.o 20 + obj-$(CONFIG_FPGA_MGR_ZYNQMP_FPGA) += zynqmp-fpga.o 20 21 obj-$(CONFIG_ALTERA_PR_IP_CORE) += altera-pr-ip-core.o 21 22 obj-$(CONFIG_ALTERA_PR_IP_CORE_PLAT) += altera-pr-ip-core-plat.o 22 23
+159
drivers/fpga/zynqmp-fpga.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* 3 + * Copyright (C) 2019 Xilinx, Inc. 4 + */ 5 + 6 + #include <linux/dma-mapping.h> 7 + #include <linux/fpga/fpga-mgr.h> 8 + #include <linux/io.h> 9 + #include <linux/kernel.h> 10 + #include <linux/module.h> 11 + #include <linux/of_address.h> 12 + #include <linux/string.h> 13 + #include <linux/firmware/xlnx-zynqmp.h> 14 + 15 + /* Constant Definitions */ 16 + #define IXR_FPGA_DONE_MASK BIT(3) 17 + 18 + /** 19 + * struct zynqmp_fpga_priv - Private data structure 20 + * @dev: Device data structure 21 + * @flags: flags which is used to identify the bitfile type 22 + */ 23 + struct zynqmp_fpga_priv { 24 + struct device *dev; 25 + u32 flags; 26 + }; 27 + 28 + static int zynqmp_fpga_ops_write_init(struct fpga_manager *mgr, 29 + struct fpga_image_info *info, 30 + const char *buf, size_t size) 31 + { 32 + struct zynqmp_fpga_priv *priv; 33 + 34 + priv = mgr->priv; 35 + priv->flags = info->flags; 36 + 37 + return 0; 38 + } 39 + 40 + static int zynqmp_fpga_ops_write(struct fpga_manager *mgr, 41 + const char *buf, size_t size) 42 + { 43 + const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops(); 44 + struct zynqmp_fpga_priv *priv; 45 + dma_addr_t dma_addr; 46 + u32 eemi_flags = 0; 47 + char *kbuf; 48 + int ret; 49 + 50 + if (!eemi_ops || !eemi_ops->fpga_load) 51 + return -ENXIO; 52 + 53 + priv = mgr->priv; 54 + 55 + kbuf = dma_alloc_coherent(priv->dev, size, &dma_addr, GFP_KERNEL); 56 + if (!kbuf) 57 + return -ENOMEM; 58 + 59 + memcpy(kbuf, buf, size); 60 + 61 + wmb(); /* ensure all writes are done before initiate FW call */ 62 + 63 + if (priv->flags & FPGA_MGR_PARTIAL_RECONFIG) 64 + eemi_flags |= XILINX_ZYNQMP_PM_FPGA_PARTIAL; 65 + 66 + ret = eemi_ops->fpga_load(dma_addr, size, eemi_flags); 67 + 68 + dma_free_coherent(priv->dev, size, kbuf, dma_addr); 69 + 70 + return ret; 71 + } 72 + 73 + static int zynqmp_fpga_ops_write_complete(struct fpga_manager *mgr, 74 + struct fpga_image_info *info) 75 + { 76 + return 0; 77 + } 78 + 79 + static enum fpga_mgr_states zynqmp_fpga_ops_state(struct fpga_manager *mgr) 80 + { 81 + const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops(); 82 + u32 status; 83 + 84 + if (!eemi_ops || !eemi_ops->fpga_get_status) 85 + return FPGA_MGR_STATE_UNKNOWN; 86 + 87 + eemi_ops->fpga_get_status(&status); 88 + if (status & IXR_FPGA_DONE_MASK) 89 + return FPGA_MGR_STATE_OPERATING; 90 + 91 + return FPGA_MGR_STATE_UNKNOWN; 92 + } 93 + 94 + static const struct fpga_manager_ops zynqmp_fpga_ops = { 95 + .state = zynqmp_fpga_ops_state, 96 + .write_init = zynqmp_fpga_ops_write_init, 97 + .write = zynqmp_fpga_ops_write, 98 + .write_complete = zynqmp_fpga_ops_write_complete, 99 + }; 100 + 101 + static int zynqmp_fpga_probe(struct platform_device *pdev) 102 + { 103 + struct device *dev = &pdev->dev; 104 + struct zynqmp_fpga_priv *priv; 105 + struct fpga_manager *mgr; 106 + int ret; 107 + 108 + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 109 + if (!priv) 110 + return -ENOMEM; 111 + 112 + priv->dev = dev; 113 + 114 + mgr = devm_fpga_mgr_create(dev, "Xilinx ZynqMP FPGA Manager", 115 + &zynqmp_fpga_ops, priv); 116 + if (!mgr) 117 + return -ENOMEM; 118 + 119 + platform_set_drvdata(pdev, mgr); 120 + 121 + ret = fpga_mgr_register(mgr); 122 + if (ret) { 123 + dev_err(dev, "unable to register FPGA manager"); 124 + return ret; 125 + } 126 + 127 + return 0; 128 + } 129 + 130 + static int zynqmp_fpga_remove(struct platform_device *pdev) 131 + { 132 + struct fpga_manager *mgr = platform_get_drvdata(pdev); 133 + 134 + fpga_mgr_unregister(mgr); 135 + 136 + return 0; 137 + } 138 + 139 + static const struct of_device_id zynqmp_fpga_of_match[] = { 140 + { .compatible = "xlnx,zynqmp-pcap-fpga", }, 141 + {}, 142 + }; 143 + 144 + MODULE_DEVICE_TABLE(of, zynqmp_fpga_of_match); 145 + 146 + static struct platform_driver zynqmp_fpga_driver = { 147 + .probe = zynqmp_fpga_probe, 148 + .remove = zynqmp_fpga_remove, 149 + .driver = { 150 + .name = "zynqmp_fpga_manager", 151 + .of_match_table = of_match_ptr(zynqmp_fpga_of_match), 152 + }, 153 + }; 154 + 155 + module_platform_driver(zynqmp_fpga_driver); 156 + 157 + MODULE_AUTHOR("Nava kishore Manne <navam@xilinx.com>"); 158 + MODULE_DESCRIPTION("Xilinx ZynqMp FPGA Manager"); 159 + MODULE_LICENSE("GPL");
+4
drivers/memory/emif.h
··· 537 537 #define MCONNID_SHIFT 0 538 538 #define MCONNID_MASK (0xff << 0) 539 539 540 + /* READ_WRITE_LEVELING_CONTROL */ 541 + #define RDWRLVLFULL_START 0x80000000 542 + 540 543 /* DDR_PHY_CTRL_1 - EMIF4D */ 541 544 #define DLL_SLAVE_DLY_CTRL_SHIFT_4D 4 542 545 #define DLL_SLAVE_DLY_CTRL_MASK_4D (0xFF << 4) ··· 601 598 602 599 void ti_emif_save_context(void); 603 600 void ti_emif_restore_context(void); 601 + void ti_emif_run_hw_leveling(void); 604 602 void ti_emif_enter_sr(void); 605 603 void ti_emif_exit_sr(void); 606 604 void ti_emif_abort_sr(void);
+20 -14
drivers/memory/tegra/mc.c
··· 51 51 #define MC_EMEM_ADR_CFG 0x54 52 52 #define MC_EMEM_ADR_CFG_EMEM_NUMDEV BIT(0) 53 53 54 + #define MC_TIMING_CONTROL 0xfc 55 + #define MC_TIMING_UPDATE BIT(0) 56 + 54 57 static const struct of_device_id tegra_mc_of_match[] = { 55 58 #ifdef CONFIG_ARCH_TEGRA_2x_SOC 56 59 { .compatible = "nvidia,tegra20-mc-gart", .data = &tegra20_mc_soc }, ··· 77 74 }; 78 75 MODULE_DEVICE_TABLE(of, tegra_mc_of_match); 79 76 80 - static int terga_mc_block_dma_common(struct tegra_mc *mc, 77 + static int tegra_mc_block_dma_common(struct tegra_mc *mc, 81 78 const struct tegra_mc_reset *rst) 82 79 { 83 80 unsigned long flags; ··· 93 90 return 0; 94 91 } 95 92 96 - static bool terga_mc_dma_idling_common(struct tegra_mc *mc, 93 + static bool tegra_mc_dma_idling_common(struct tegra_mc *mc, 97 94 const struct tegra_mc_reset *rst) 98 95 { 99 96 return (mc_readl(mc, rst->status) & BIT(rst->bit)) != 0; 100 97 } 101 98 102 - static int terga_mc_unblock_dma_common(struct tegra_mc *mc, 99 + static int tegra_mc_unblock_dma_common(struct tegra_mc *mc, 103 100 const struct tegra_mc_reset *rst) 104 101 { 105 102 unsigned long flags; ··· 115 112 return 0; 116 113 } 117 114 118 - static int terga_mc_reset_status_common(struct tegra_mc *mc, 115 + static int tegra_mc_reset_status_common(struct tegra_mc *mc, 119 116 const struct tegra_mc_reset *rst) 120 117 { 121 118 return (mc_readl(mc, rst->control) & BIT(rst->bit)) != 0; 122 119 } 123 120 124 - const struct tegra_mc_reset_ops terga_mc_reset_ops_common = { 125 - .block_dma = terga_mc_block_dma_common, 126 - .dma_idling = terga_mc_dma_idling_common, 127 - .unblock_dma = terga_mc_unblock_dma_common, 128 - .reset_status = terga_mc_reset_status_common, 121 + const struct tegra_mc_reset_ops tegra_mc_reset_ops_common = { 122 + .block_dma = tegra_mc_block_dma_common, 123 + .dma_idling = tegra_mc_dma_idling_common, 124 + .unblock_dma = tegra_mc_unblock_dma_common, 125 + .reset_status = tegra_mc_reset_status_common, 129 126 }; 130 127 131 128 static inline struct tegra_mc *reset_to_mc(struct reset_controller_dev *rcdev) ··· 285 282 u32 value; 286 283 287 284 /* compute the number of MC clock cycles per tick */ 288 - tick = mc->tick * clk_get_rate(mc->clk); 285 + tick = (unsigned long long)mc->tick * clk_get_rate(mc->clk); 289 286 do_div(tick, NSEC_PER_SEC); 290 287 291 - value = readl(mc->regs + MC_EMEM_ARB_CFG); 288 + value = mc_readl(mc, MC_EMEM_ARB_CFG); 292 289 value &= ~MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE_MASK; 293 290 value |= MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE(tick); 294 - writel(value, mc->regs + MC_EMEM_ARB_CFG); 291 + mc_writel(mc, value, MC_EMEM_ARB_CFG); 295 292 296 293 /* write latency allowance defaults */ 297 294 for (i = 0; i < mc->soc->num_clients; i++) { 298 295 const struct tegra_mc_la *la = &mc->soc->clients[i].la; 299 296 u32 value; 300 297 301 - value = readl(mc->regs + la->reg); 298 + value = mc_readl(mc, la->reg); 302 299 value &= ~(la->mask << la->shift); 303 300 value |= (la->def & la->mask) << la->shift; 304 - writel(value, mc->regs + la->reg); 301 + mc_writel(mc, value, la->reg); 305 302 } 303 + 304 + /* latch new values */ 305 + mc_writel(mc, MC_TIMING_UPDATE, MC_TIMING_CONTROL); 306 306 307 307 return 0; 308 308 }
+1 -1
drivers/memory/tegra/mc.h
··· 35 35 writel_relaxed(value, mc->regs + offset); 36 36 } 37 37 38 - extern const struct tegra_mc_reset_ops terga_mc_reset_ops_common; 38 + extern const struct tegra_mc_reset_ops tegra_mc_reset_ops_common; 39 39 40 40 #ifdef CONFIG_ARCH_TEGRA_2x_SOC 41 41 extern const struct tegra_mc_soc tegra20_mc_soc;
+2 -2
drivers/memory/tegra/tegra114.c
··· 572 572 }, 573 573 }, { 574 574 .id = 0x34, 575 - .name = "fdcwr2", 575 + .name = "fdcdwr2", 576 576 .swgroup = TEGRA_SWGROUP_NV, 577 577 .smmu = { 578 578 .reg = 0x22c, ··· 975 975 .smmu = &tegra114_smmu_soc, 976 976 .intmask = MC_INT_INVALID_SMMU_PAGE | MC_INT_SECURITY_VIOLATION | 977 977 MC_INT_DECERR_EMEM, 978 - .reset_ops = &terga_mc_reset_ops_common, 978 + .reset_ops = &tegra_mc_reset_ops_common, 979 979 .resets = tegra114_mc_resets, 980 980 .num_resets = ARRAY_SIZE(tegra114_mc_resets), 981 981 };
+2 -2
drivers/memory/tegra/tegra124.c
··· 1074 1074 .intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR | 1075 1075 MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE | 1076 1076 MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM, 1077 - .reset_ops = &terga_mc_reset_ops_common, 1077 + .reset_ops = &tegra_mc_reset_ops_common, 1078 1078 .resets = tegra124_mc_resets, 1079 1079 .num_resets = ARRAY_SIZE(tegra124_mc_resets), 1080 1080 }; ··· 1104 1104 .intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR | 1105 1105 MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE | 1106 1106 MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM, 1107 - .reset_ops = &terga_mc_reset_ops_common, 1107 + .reset_ops = &tegra_mc_reset_ops_common, 1108 1108 .resets = tegra124_mc_resets, 1109 1109 .num_resets = ARRAY_SIZE(tegra124_mc_resets), 1110 1110 };
+14 -14
drivers/memory/tegra/tegra20.c
··· 198 198 TEGRA20_MC_RESET(VI, 0x100, 0x178, 0x104, 14), 199 199 }; 200 200 201 - static int terga20_mc_hotreset_assert(struct tegra_mc *mc, 201 + static int tegra20_mc_hotreset_assert(struct tegra_mc *mc, 202 202 const struct tegra_mc_reset *rst) 203 203 { 204 204 unsigned long flags; ··· 214 214 return 0; 215 215 } 216 216 217 - static int terga20_mc_hotreset_deassert(struct tegra_mc *mc, 217 + static int tegra20_mc_hotreset_deassert(struct tegra_mc *mc, 218 218 const struct tegra_mc_reset *rst) 219 219 { 220 220 unsigned long flags; ··· 230 230 return 0; 231 231 } 232 232 233 - static int terga20_mc_block_dma(struct tegra_mc *mc, 233 + static int tegra20_mc_block_dma(struct tegra_mc *mc, 234 234 const struct tegra_mc_reset *rst) 235 235 { 236 236 unsigned long flags; ··· 246 246 return 0; 247 247 } 248 248 249 - static bool terga20_mc_dma_idling(struct tegra_mc *mc, 249 + static bool tegra20_mc_dma_idling(struct tegra_mc *mc, 250 250 const struct tegra_mc_reset *rst) 251 251 { 252 252 return mc_readl(mc, rst->status) == 0; 253 253 } 254 254 255 - static int terga20_mc_reset_status(struct tegra_mc *mc, 255 + static int tegra20_mc_reset_status(struct tegra_mc *mc, 256 256 const struct tegra_mc_reset *rst) 257 257 { 258 258 return (mc_readl(mc, rst->reset) & BIT(rst->bit)) == 0; 259 259 } 260 260 261 - static int terga20_mc_unblock_dma(struct tegra_mc *mc, 261 + static int tegra20_mc_unblock_dma(struct tegra_mc *mc, 262 262 const struct tegra_mc_reset *rst) 263 263 { 264 264 unsigned long flags; ··· 274 274 return 0; 275 275 } 276 276 277 - const struct tegra_mc_reset_ops terga20_mc_reset_ops = { 278 - .hotreset_assert = terga20_mc_hotreset_assert, 279 - .hotreset_deassert = terga20_mc_hotreset_deassert, 280 - .block_dma = terga20_mc_block_dma, 281 - .dma_idling = terga20_mc_dma_idling, 282 - .unblock_dma = terga20_mc_unblock_dma, 283 - .reset_status = terga20_mc_reset_status, 277 + static const struct tegra_mc_reset_ops tegra20_mc_reset_ops = { 278 + .hotreset_assert = tegra20_mc_hotreset_assert, 279 + .hotreset_deassert = tegra20_mc_hotreset_deassert, 280 + .block_dma = tegra20_mc_block_dma, 281 + .dma_idling = tegra20_mc_dma_idling, 282 + .unblock_dma = tegra20_mc_unblock_dma, 283 + .reset_status = tegra20_mc_reset_status, 284 284 }; 285 285 286 286 const struct tegra_mc_soc tegra20_mc_soc = { ··· 290 290 .client_id_mask = 0x3f, 291 291 .intmask = MC_INT_SECURITY_VIOLATION | MC_INT_INVALID_GART_PAGE | 292 292 MC_INT_DECERR_EMEM, 293 - .reset_ops = &terga20_mc_reset_ops, 293 + .reset_ops = &tegra20_mc_reset_ops, 294 294 .resets = tegra20_mc_resets, 295 295 .num_resets = ARRAY_SIZE(tegra20_mc_resets), 296 296 };
+1 -1
drivers/memory/tegra/tegra210.c
··· 1132 1132 .intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR | 1133 1133 MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE | 1134 1134 MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM, 1135 - .reset_ops = &terga_mc_reset_ops_common, 1135 + .reset_ops = &tegra_mc_reset_ops_common, 1136 1136 .resets = tegra210_mc_resets, 1137 1137 .num_resets = ARRAY_SIZE(tegra210_mc_resets), 1138 1138 };
+2 -2
drivers/memory/tegra/tegra30.c
··· 726 726 }, 727 727 }, { 728 728 .id = 0x34, 729 - .name = "fdcwr2", 729 + .name = "fdcdwr2", 730 730 .swgroup = TEGRA_SWGROUP_NV2, 731 731 .smmu = { 732 732 .reg = 0x22c, ··· 999 999 .smmu = &tegra30_smmu_soc, 1000 1000 .intmask = MC_INT_INVALID_SMMU_PAGE | MC_INT_SECURITY_VIOLATION | 1001 1001 MC_INT_DECERR_EMEM, 1002 - .reset_ops = &terga_mc_reset_ops_common, 1002 + .reset_ops = &tegra_mc_reset_ops_common, 1003 1003 .resets = tegra30_mc_resets, 1004 1004 .num_resets = ARRAY_SIZE(tegra30_mc_resets), 1005 1005 };
+3
drivers/memory/ti-emif-pm.c
··· 138 138 emif_data->pm_functions.exit_sr = 139 139 sram_resume_address(emif_data, 140 140 (unsigned long)ti_emif_exit_sr); 141 + emif_data->pm_functions.run_hw_leveling = 142 + sram_resume_address(emif_data, 143 + (unsigned long)ti_emif_run_hw_leveling); 141 144 142 145 emif_data->pm_data.regs_virt = 143 146 (struct emif_regs_amx3 *)emif_data->ti_emif_sram_data_virt;
+41
drivers/memory/ti-emif-sram-pm.S
··· 27 27 #define EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK 0x0700 28 28 29 29 #define EMIF_SDCFG_TYPE_DDR2 0x2 << SDRAM_TYPE_SHIFT 30 + #define EMIF_SDCFG_TYPE_DDR3 0x3 << SDRAM_TYPE_SHIFT 30 31 #define EMIF_STATUS_READY 0x4 31 32 32 33 #define AM43XX_EMIF_PHY_CTRL_REG_COUNT 0x120 ··· 244 243 245 244 mov pc, lr 246 245 ENDPROC(ti_emif_restore_context) 246 + 247 + /* 248 + * void ti_emif_run_hw_leveling(void) 249 + * 250 + * Used during resume to run hardware leveling again and restore the 251 + * configuration of the EMIF PHY, only for DDR3. 252 + */ 253 + ENTRY(ti_emif_run_hw_leveling) 254 + adr r4, ti_emif_pm_sram_data 255 + ldr r0, [r4, #EMIF_PM_BASE_ADDR_PHYS_OFFSET] 256 + 257 + ldr r3, [r0, #EMIF_READ_WRITE_LEVELING_CONTROL] 258 + orr r3, r3, #RDWRLVLFULL_START 259 + ldr r2, [r0, #EMIF_SDRAM_CONFIG] 260 + and r2, r2, #SDRAM_TYPE_MASK 261 + cmp r2, #EMIF_SDCFG_TYPE_DDR3 262 + bne skip_hwlvl 263 + 264 + str r3, [r0, #EMIF_READ_WRITE_LEVELING_CONTROL] 265 + 266 + /* 267 + * If EMIF registers are touched during initial stage of HW 268 + * leveling sequence there will be an L3 NOC timeout error issued 269 + * as the EMIF will not respond, which is not fatal, but it is 270 + * avoidable. This small wait loop is enough time for this condition 271 + * to clear, even at worst case of CPU running at max speed of 1Ghz. 272 + */ 273 + mov r2, #0x2000 274 + 1: 275 + subs r2, r2, #0x1 276 + bne 1b 277 + 278 + /* Bit clears when operation is complete */ 279 + 2: ldr r1, [r0, #EMIF_READ_WRITE_LEVELING_CONTROL] 280 + tst r1, #RDWRLVLFULL_START 281 + bne 2b 282 + 283 + skip_hwlvl: 284 + mov pc, lr 285 + ENDPROC(ti_emif_run_hw_leveling) 247 286 248 287 /* 249 288 * void ti_emif_enter_sr(void)
-24
drivers/misc/Kconfig
··· 496 496 bus. System Configuration interface is one of the possible means 497 497 of generating transactions on this bus. 498 498 499 - config ASPEED_P2A_CTRL 500 - depends on (ARCH_ASPEED || COMPILE_TEST) && REGMAP && MFD_SYSCON 501 - tristate "Aspeed ast2400/2500 HOST P2A VGA MMIO to BMC bridge control" 502 - help 503 - Control Aspeed ast2400/2500 HOST P2A VGA MMIO to BMC mappings through 504 - ioctl()s, the driver also provides an interface for userspace mappings to 505 - a pre-defined region. 506 - 507 - config ASPEED_LPC_CTRL 508 - depends on (ARCH_ASPEED || COMPILE_TEST) && REGMAP && MFD_SYSCON 509 - tristate "Aspeed ast2400/2500 HOST LPC to BMC bridge control" 510 - ---help--- 511 - Control Aspeed ast2400/2500 HOST LPC to BMC mappings through 512 - ioctl()s, the driver also provides a read/write interface to a BMC ram 513 - region where the host LPC read/write region can be buffered. 514 - 515 - config ASPEED_LPC_SNOOP 516 - tristate "Aspeed ast2500 HOST LPC snoop support" 517 - depends on (ARCH_ASPEED || COMPILE_TEST) && REGMAP && MFD_SYSCON 518 - help 519 - Provides a driver to control the LPC snoop interface which 520 - allows the BMC to listen on and save the data written by 521 - the host to an arbitrary LPC I/O port. 522 - 523 499 config PCI_ENDPOINT_TEST 524 500 depends on PCI 525 501 select CRC32
-3
drivers/misc/Makefile
··· 54 54 obj-$(CONFIG_ECHO) += echo/ 55 55 obj-$(CONFIG_VEXPRESS_SYSCFG) += vexpress-syscfg.o 56 56 obj-$(CONFIG_CXL_BASE) += cxl/ 57 - obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o 58 - obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o 59 - obj-$(CONFIG_ASPEED_P2A_CTRL) += aspeed-p2a-ctrl.o 60 57 obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o 61 58 obj-$(CONFIG_OCXL) += ocxl/ 62 59 obj-y += cardreader/
drivers/misc/aspeed-lpc-ctrl.c drivers/soc/aspeed/aspeed-lpc-ctrl.c
drivers/misc/aspeed-lpc-snoop.c drivers/soc/aspeed/aspeed-lpc-snoop.c
drivers/misc/aspeed-p2a-ctrl.c drivers/soc/aspeed/aspeed-p2a-ctrl.c
+7 -3
drivers/nvmem/zynqmp_nvmem.c
··· 16 16 struct nvmem_device *nvmem; 17 17 }; 18 18 19 + static const struct zynqmp_eemi_ops *eemi_ops; 20 + 19 21 static int zynqmp_nvmem_read(void *context, unsigned int offset, 20 22 void *val, size_t bytes) 21 23 { ··· 25 23 int idcode, version; 26 24 struct zynqmp_nvmem_data *priv = context; 27 25 28 - const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops(); 29 - 30 - if (!eemi_ops || !eemi_ops->get_chipid) 26 + if (!eemi_ops->get_chipid) 31 27 return -ENXIO; 32 28 33 29 ret = eemi_ops->get_chipid(&idcode, &version); ··· 60 60 priv = devm_kzalloc(dev, sizeof(struct zynqmp_nvmem_data), GFP_KERNEL); 61 61 if (!priv) 62 62 return -ENOMEM; 63 + 64 + eemi_ops = zynqmp_pm_get_eemi_ops(); 65 + if (IS_ERR(eemi_ops)) 66 + return PTR_ERR(eemi_ops); 63 67 64 68 priv->dev = dev; 65 69 econfig.dev = dev;
+4 -4
drivers/reset/reset-zynqmp.c
··· 79 79 if (!priv) 80 80 return -ENOMEM; 81 81 82 - platform_set_drvdata(pdev, priv); 83 - 84 82 priv->eemi_ops = zynqmp_pm_get_eemi_ops(); 85 - if (!priv->eemi_ops) 86 - return -ENXIO; 83 + if (IS_ERR(priv->eemi_ops)) 84 + return PTR_ERR(priv->eemi_ops); 85 + 86 + platform_set_drvdata(pdev, priv); 87 87 88 88 priv->rcdev.ops = &zynqmp_reset_ops; 89 89 priv->rcdev.owner = THIS_MODULE;
+41 -8
drivers/rtc/rtc-omap.c
··· 403 403 404 404 static struct omap_rtc *omap_rtc_power_off_rtc; 405 405 406 - /* 407 - * omap_rtc_poweroff: RTC-controlled power off 408 - * 409 - * The RTC can be used to control an external PMIC via the pmic_power_en pin, 410 - * which can be configured to transition to OFF on ALARM2 events. 411 - * 412 - * Called with local interrupts disabled. 406 + /** 407 + * omap_rtc_power_off_program: Set the pmic power off sequence. The RTC 408 + * generates pmic_pwr_enable control, which can be used to control an external 409 + * PMIC. 413 410 */ 414 - static void omap_rtc_power_off(void) 411 + int omap_rtc_power_off_program(struct device *dev) 415 412 { 416 413 struct omap_rtc *rtc = omap_rtc_power_off_rtc; 417 414 struct rtc_time tm; ··· 422 425 rtc_writel(rtc, OMAP_RTC_PMIC_REG, val | OMAP_RTC_PMIC_POWER_EN_EN); 423 426 424 427 again: 428 + /* Clear any existing ALARM2 event */ 429 + rtc_writel(rtc, OMAP_RTC_STATUS_REG, OMAP_RTC_STATUS_ALARM2); 430 + 425 431 /* set alarm one second from now */ 426 432 omap_rtc_read_time_raw(rtc, &tm); 427 433 seconds = tm.tm_sec; ··· 460 460 } 461 461 462 462 rtc->type->lock(rtc); 463 + 464 + return 0; 465 + } 466 + EXPORT_SYMBOL(omap_rtc_power_off_program); 467 + 468 + /* 469 + * omap_rtc_poweroff: RTC-controlled power off 470 + * 471 + * The RTC can be used to control an external PMIC via the pmic_power_en pin, 472 + * which can be configured to transition to OFF on ALARM2 events. 473 + * 474 + * Notes: 475 + * The one-second alarm offset is the shortest offset possible as the alarm 476 + * registers must be set before the next timer update and the offset 477 + * calculation is too heavy for everything to be done within a single access 478 + * period (~15 us). 479 + * 480 + * Called with local interrupts disabled. 481 + */ 482 + static void omap_rtc_power_off(void) 483 + { 484 + struct rtc_device *rtc = omap_rtc_power_off_rtc->rtc; 485 + u32 val; 486 + 487 + omap_rtc_power_off_program(rtc->dev.parent); 488 + 489 + /* Set PMIC power enable and EXT_WAKEUP in case PB power on is used */ 490 + omap_rtc_power_off_rtc->type->unlock(omap_rtc_power_off_rtc); 491 + val = rtc_readl(omap_rtc_power_off_rtc, OMAP_RTC_PMIC_REG); 492 + val |= OMAP_RTC_PMIC_POWER_EN_EN | OMAP_RTC_PMIC_EXT_WKUP_POL(0) | 493 + OMAP_RTC_PMIC_EXT_WKUP_EN(0); 494 + rtc_writel(omap_rtc_power_off_rtc, OMAP_RTC_PMIC_REG, val); 495 + omap_rtc_power_off_rtc->type->lock(omap_rtc_power_off_rtc); 463 496 464 497 /* 465 498 * Wait for alarm to trigger (within one second) and external PMIC to
+1
drivers/soc/Kconfig
··· 2 2 3 3 source "drivers/soc/actions/Kconfig" 4 4 source "drivers/soc/amlogic/Kconfig" 5 + source "drivers/soc/aspeed/Kconfig" 5 6 source "drivers/soc/atmel/Kconfig" 6 7 source "drivers/soc/bcm/Kconfig" 7 8 source "drivers/soc/fsl/Kconfig"
+1
drivers/soc/Makefile
··· 4 4 # 5 5 6 6 obj-$(CONFIG_ARCH_ACTIONS) += actions/ 7 + obj-$(CONFIG_SOC_ASPEED) += aspeed/ 7 8 obj-$(CONFIG_ARCH_AT91) += atmel/ 8 9 obj-y += bcm/ 9 10 obj-$(CONFIG_ARCH_DOVE) += dove/
+144 -16
drivers/soc/amlogic/meson-gx-pwrc-vpu.c
··· 11 11 #include <linux/bitfield.h> 12 12 #include <linux/regmap.h> 13 13 #include <linux/mfd/syscon.h> 14 + #include <linux/of_device.h> 14 15 #include <linux/reset.h> 15 16 #include <linux/clk.h> 16 17 ··· 27 26 #define HHI_MEM_PD_REG0 (0x40 << 2) 28 27 #define HHI_VPU_MEM_PD_REG0 (0x41 << 2) 29 28 #define HHI_VPU_MEM_PD_REG1 (0x42 << 2) 29 + #define HHI_VPU_MEM_PD_REG2 (0x4d << 2) 30 30 31 31 struct meson_gx_pwrc_vpu { 32 32 struct generic_pm_domain genpd; ··· 56 54 /* Power Down Memories */ 57 55 for (i = 0; i < 32; i += 2) { 58 56 regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0, 59 - 0x2 << i, 0x3 << i); 57 + 0x3 << i, 0x3 << i); 60 58 udelay(5); 61 59 } 62 60 for (i = 0; i < 32; i += 2) { 63 61 regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1, 64 - 0x2 << i, 0x3 << i); 62 + 0x3 << i, 0x3 << i); 63 + udelay(5); 64 + } 65 + for (i = 8; i < 16; i++) { 66 + regmap_update_bits(pd->regmap_hhi, HHI_MEM_PD_REG0, 67 + BIT(i), BIT(i)); 68 + udelay(5); 69 + } 70 + udelay(20); 71 + 72 + regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, 73 + GEN_PWR_VPU_HDMI, GEN_PWR_VPU_HDMI); 74 + 75 + msleep(20); 76 + 77 + clk_disable_unprepare(pd->vpu_clk); 78 + clk_disable_unprepare(pd->vapb_clk); 79 + 80 + return 0; 81 + } 82 + 83 + static int meson_g12a_pwrc_vpu_power_off(struct generic_pm_domain *genpd) 84 + { 85 + struct meson_gx_pwrc_vpu *pd = genpd_to_pd(genpd); 86 + int i; 87 + 88 + regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, 89 + GEN_PWR_VPU_HDMI_ISO, GEN_PWR_VPU_HDMI_ISO); 90 + udelay(20); 91 + 92 + /* Power Down Memories */ 93 + for (i = 0; i < 32; i += 2) { 94 + regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0, 95 + 0x3 << i, 0x3 << i); 96 + udelay(5); 97 + } 98 + for (i = 0; i < 32; i += 2) { 99 + regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1, 100 + 0x3 << i, 0x3 << i); 101 + udelay(5); 102 + } 103 + for (i = 0; i < 32; i += 2) { 104 + regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG2, 105 + 0x3 << i, 0x3 << i); 65 106 udelay(5); 66 107 } 67 108 for (i = 8; i < 16; i++) { ··· 153 108 /* Power Up Memories */ 154 109 for (i = 0; i < 32; i += 2) { 155 110 regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0, 156 - 0x2 << i, 0); 111 + 0x3 << i, 0); 157 112 udelay(5); 158 113 } 159 114 160 115 for (i = 0; i < 32; i += 2) { 161 116 regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1, 162 - 0x2 << i, 0); 117 + 0x3 << i, 0); 118 + udelay(5); 119 + } 120 + 121 + for (i = 8; i < 16; i++) { 122 + regmap_update_bits(pd->regmap_hhi, HHI_MEM_PD_REG0, 123 + BIT(i), 0); 124 + udelay(5); 125 + } 126 + udelay(20); 127 + 128 + ret = reset_control_assert(pd->rstc); 129 + if (ret) 130 + return ret; 131 + 132 + regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, 133 + GEN_PWR_VPU_HDMI_ISO, 0); 134 + 135 + ret = reset_control_deassert(pd->rstc); 136 + if (ret) 137 + return ret; 138 + 139 + ret = meson_gx_pwrc_vpu_setup_clk(pd); 140 + if (ret) 141 + return ret; 142 + 143 + return 0; 144 + } 145 + 146 + static int meson_g12a_pwrc_vpu_power_on(struct generic_pm_domain *genpd) 147 + { 148 + struct meson_gx_pwrc_vpu *pd = genpd_to_pd(genpd); 149 + int ret; 150 + int i; 151 + 152 + regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, 153 + GEN_PWR_VPU_HDMI, 0); 154 + udelay(20); 155 + 156 + /* Power Up Memories */ 157 + for (i = 0; i < 32; i += 2) { 158 + regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0, 159 + 0x3 << i, 0); 160 + udelay(5); 161 + } 162 + 163 + for (i = 0; i < 32; i += 2) { 164 + regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1, 165 + 0x3 << i, 0); 166 + udelay(5); 167 + } 168 + 169 + for (i = 0; i < 32; i += 2) { 170 + regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG2, 171 + 0x3 << i, 0); 163 172 udelay(5); 164 173 } 165 174 ··· 259 160 }, 260 161 }; 261 162 163 + static struct meson_gx_pwrc_vpu vpu_hdmi_pd_g12a = { 164 + .genpd = { 165 + .name = "vpu_hdmi", 166 + .power_off = meson_g12a_pwrc_vpu_power_off, 167 + .power_on = meson_g12a_pwrc_vpu_power_on, 168 + }, 169 + }; 170 + 262 171 static int meson_gx_pwrc_vpu_probe(struct platform_device *pdev) 263 172 { 173 + const struct meson_gx_pwrc_vpu *vpu_pd_match; 264 174 struct regmap *regmap_ao, *regmap_hhi; 175 + struct meson_gx_pwrc_vpu *vpu_pd; 265 176 struct reset_control *rstc; 266 177 struct clk *vpu_clk; 267 178 struct clk *vapb_clk; 268 179 bool powered_off; 269 180 int ret; 181 + 182 + vpu_pd_match = of_device_get_match_data(&pdev->dev); 183 + if (!vpu_pd_match) { 184 + dev_err(&pdev->dev, "failed to get match data\n"); 185 + return -ENODEV; 186 + } 187 + 188 + vpu_pd = devm_kzalloc(&pdev->dev, sizeof(*vpu_pd), GFP_KERNEL); 189 + if (!vpu_pd) 190 + return -ENOMEM; 191 + 192 + memcpy(vpu_pd, vpu_pd_match, sizeof(*vpu_pd)); 270 193 271 194 regmap_ao = syscon_node_to_regmap(of_get_parent(pdev->dev.of_node)); 272 195 if (IS_ERR(regmap_ao)) { ··· 322 201 return PTR_ERR(vapb_clk); 323 202 } 324 203 325 - vpu_hdmi_pd.regmap_ao = regmap_ao; 326 - vpu_hdmi_pd.regmap_hhi = regmap_hhi; 327 - vpu_hdmi_pd.rstc = rstc; 328 - vpu_hdmi_pd.vpu_clk = vpu_clk; 329 - vpu_hdmi_pd.vapb_clk = vapb_clk; 204 + vpu_pd->regmap_ao = regmap_ao; 205 + vpu_pd->regmap_hhi = regmap_hhi; 206 + vpu_pd->rstc = rstc; 207 + vpu_pd->vpu_clk = vpu_clk; 208 + vpu_pd->vapb_clk = vapb_clk; 330 209 331 - powered_off = meson_gx_pwrc_vpu_get_power(&vpu_hdmi_pd); 210 + platform_set_drvdata(pdev, vpu_pd); 211 + 212 + powered_off = meson_gx_pwrc_vpu_get_power(vpu_pd); 332 213 333 214 /* If already powered, sync the clock states */ 334 215 if (!powered_off) { 335 - ret = meson_gx_pwrc_vpu_setup_clk(&vpu_hdmi_pd); 216 + ret = meson_gx_pwrc_vpu_setup_clk(vpu_pd); 336 217 if (ret) 337 218 return ret; 338 219 } 339 220 340 - pm_genpd_init(&vpu_hdmi_pd.genpd, &pm_domain_always_on_gov, 221 + pm_genpd_init(&vpu_pd->genpd, &pm_domain_always_on_gov, 341 222 powered_off); 342 223 343 224 return of_genpd_add_provider_simple(pdev->dev.of_node, 344 - &vpu_hdmi_pd.genpd); 225 + &vpu_pd->genpd); 345 226 } 346 227 347 228 static void meson_gx_pwrc_vpu_shutdown(struct platform_device *pdev) 348 229 { 230 + struct meson_gx_pwrc_vpu *vpu_pd = platform_get_drvdata(pdev); 349 231 bool powered_off; 350 232 351 - powered_off = meson_gx_pwrc_vpu_get_power(&vpu_hdmi_pd); 233 + powered_off = meson_gx_pwrc_vpu_get_power(vpu_pd); 352 234 if (!powered_off) 353 - meson_gx_pwrc_vpu_power_off(&vpu_hdmi_pd.genpd); 235 + vpu_pd->genpd.power_off(&vpu_pd->genpd); 354 236 } 355 237 356 238 static const struct of_device_id meson_gx_pwrc_vpu_match_table[] = { 357 - { .compatible = "amlogic,meson-gx-pwrc-vpu" }, 239 + { .compatible = "amlogic,meson-gx-pwrc-vpu", .data = &vpu_hdmi_pd }, 240 + { 241 + .compatible = "amlogic,meson-g12a-pwrc-vpu", 242 + .data = &vpu_hdmi_pd_g12a 243 + }, 358 244 { /* sentinel */ } 359 245 }; 360 246
+27 -16
drivers/soc/amlogic/meson-gx-socinfo.c
··· 37 37 { "AXG", 0x25 }, 38 38 { "GXLX", 0x26 }, 39 39 { "TXHD", 0x27 }, 40 + { "G12A", 0x28 }, 41 + { "G12B", 0x29 }, 40 42 }; 41 43 42 44 static const struct meson_gx_package_id { 43 45 const char *name; 44 46 unsigned int major_id; 45 47 unsigned int pack_id; 48 + unsigned int pack_mask; 46 49 } soc_packages[] = { 47 - { "S905", 0x1f, 0 }, 48 - { "S905H", 0x1f, 0x13 }, 49 - { "S905M", 0x1f, 0x20 }, 50 - { "S905D", 0x21, 0 }, 51 - { "S905X", 0x21, 0x80 }, 52 - { "S905W", 0x21, 0xa0 }, 53 - { "S905L", 0x21, 0xc0 }, 54 - { "S905M2", 0x21, 0xe0 }, 55 - { "S912", 0x22, 0 }, 56 - { "962X", 0x24, 0x10 }, 57 - { "962E", 0x24, 0x20 }, 58 - { "A113X", 0x25, 0x37 }, 59 - { "A113D", 0x25, 0x22 }, 50 + { "S905", 0x1f, 0, 0x20 }, /* pack_id != 0x20 */ 51 + { "S905H", 0x1f, 0x3, 0xf }, /* pack_id & 0xf == 0x3 */ 52 + { "S905M", 0x1f, 0x20, 0xf0 }, /* pack_id == 0x20 */ 53 + { "S905D", 0x21, 0, 0xf0 }, 54 + { "S905X", 0x21, 0x80, 0xf0 }, 55 + { "S905W", 0x21, 0xa0, 0xf0 }, 56 + { "S905L", 0x21, 0xc0, 0xf0 }, 57 + { "S905M2", 0x21, 0xe0, 0xf0 }, 58 + { "S805X", 0x21, 0x30, 0xf0 }, 59 + { "S805Y", 0x21, 0xb0, 0xf0 }, 60 + { "S912", 0x22, 0, 0x0 }, /* Only S912 is known for GXM */ 61 + { "962X", 0x24, 0x10, 0xf0 }, 62 + { "962E", 0x24, 0x20, 0xf0 }, 63 + { "A113X", 0x25, 0x37, 0xff }, 64 + { "A113D", 0x25, 0x22, 0xff }, 65 + { "S905D2", 0x28, 0x10, 0xf0 }, 66 + { "S905X2", 0x28, 0x40, 0xf0 }, 67 + { "S922X", 0x29, 0x40, 0xf0 }, 60 68 }; 61 69 62 70 static inline unsigned int socinfo_to_major(u32 socinfo) ··· 89 81 90 82 static const char *socinfo_to_package_id(u32 socinfo) 91 83 { 92 - unsigned int pack = socinfo_to_pack(socinfo) & 0xf0; 84 + unsigned int pack = socinfo_to_pack(socinfo); 93 85 unsigned int major = socinfo_to_major(socinfo); 94 86 int i; 95 87 96 88 for (i = 0 ; i < ARRAY_SIZE(soc_packages) ; ++i) { 97 89 if (soc_packages[i].major_id == major && 98 - soc_packages[i].pack_id == pack) 90 + soc_packages[i].pack_id == 91 + (pack & soc_packages[i].pack_mask)) 99 92 return soc_packages[i].name; 100 93 } 101 94 ··· 132 123 return -ENODEV; 133 124 134 125 /* check if interface is enabled */ 135 - if (!of_device_is_available(np)) 126 + if (!of_device_is_available(np)) { 127 + of_node_put(np); 136 128 return -ENODEV; 129 + } 137 130 138 131 /* check if chip-id is available */ 139 132 if (!of_property_read_bool(np, "amlogic,has-chip-id"))
+31
drivers/soc/aspeed/Kconfig
··· 1 + menu "Aspeed SoC drivers" 2 + 3 + config SOC_ASPEED 4 + def_bool y 5 + depends on ARCH_ASPEED || COMPILE_TEST 6 + 7 + config ASPEED_LPC_CTRL 8 + depends on SOC_ASPEED && REGMAP && MFD_SYSCON 9 + tristate "Aspeed ast2400/2500 HOST LPC to BMC bridge control" 10 + ---help--- 11 + Control Aspeed ast2400/2500 HOST LPC to BMC mappings through 12 + ioctl()s, the driver also provides a read/write interface to a BMC ram 13 + region where the host LPC read/write region can be buffered. 14 + 15 + config ASPEED_LPC_SNOOP 16 + tristate "Aspeed ast2500 HOST LPC snoop support" 17 + depends on SOC_ASPEED && REGMAP && MFD_SYSCON 18 + help 19 + Provides a driver to control the LPC snoop interface which 20 + allows the BMC to listen on and save the data written by 21 + the host to an arbitrary LPC I/O port. 22 + 23 + config ASPEED_P2A_CTRL 24 + depends on SOC_ASPEED && REGMAP && MFD_SYSCON 25 + tristate "Aspeed ast2400/2500 HOST P2A VGA MMIO to BMC bridge control" 26 + help 27 + Control Aspeed ast2400/2500 HOST P2A VGA MMIO to BMC mappings through 28 + ioctl()s, the driver also provides an interface for userspace mappings to 29 + a pre-defined region. 30 + 31 + endmenu
+3
drivers/soc/aspeed/Makefile
··· 1 + obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o 2 + obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o 3 + obj-$(CONFIG_ASPEED_P2A_CTRL) += aspeed-p2a-ctrl.o
+1
drivers/soc/imx/Makefile
··· 1 1 obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o 2 2 obj-$(CONFIG_IMX_GPCV2_PM_DOMAINS) += gpcv2.o 3 + obj-$(CONFIG_ARCH_MXC) += soc-imx8.o
+1 -3
drivers/soc/imx/gpc.c
··· 406 406 const struct imx_gpc_dt_data *of_id_data = of_id->data; 407 407 struct device_node *pgc_node; 408 408 struct regmap *regmap; 409 - struct resource *res; 410 409 void __iomem *base; 411 410 int ret; 412 411 ··· 416 417 !pgc_node) 417 418 return 0; 418 419 419 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 420 - base = devm_ioremap_resource(&pdev->dev, res); 420 + base = devm_platform_ioremap_resource(pdev, 0); 421 421 if (IS_ERR(base)) 422 422 return PTR_ERR(base); 423 423
+15 -28
drivers/soc/imx/gpcv2.c
··· 136 136 GPC_PU_PGC_SW_PUP_REQ : GPC_PU_PGC_SW_PDN_REQ; 137 137 const bool enable_power_control = !on; 138 138 const bool has_regulator = !IS_ERR(domain->regulator); 139 - unsigned long deadline; 140 139 int i, ret = 0; 140 + u32 pxx_req; 141 141 142 142 regmap_update_bits(domain->regmap, GPC_PGC_CPU_MAPPING, 143 143 domain->bits.map, domain->bits.map); ··· 169 169 * As per "5.5.9.4 Example Code 4" in IMX7DRM.pdf wait 170 170 * for PUP_REQ/PDN_REQ bit to be cleared 171 171 */ 172 - deadline = jiffies + msecs_to_jiffies(1); 173 - while (true) { 174 - u32 pxx_req; 175 - 176 - regmap_read(domain->regmap, offset, &pxx_req); 177 - 178 - if (!(pxx_req & domain->bits.pxx)) 179 - break; 180 - 181 - if (time_after(jiffies, deadline)) { 182 - dev_err(domain->dev, "falied to command PGC\n"); 183 - ret = -ETIMEDOUT; 184 - /* 185 - * If we were in a process of enabling a 186 - * domain and failed we might as well disable 187 - * the regulator we just enabled. And if it 188 - * was the opposite situation and we failed to 189 - * power down -- keep the regulator on 190 - */ 191 - on = !on; 192 - break; 193 - } 194 - 195 - cpu_relax(); 172 + ret = regmap_read_poll_timeout(domain->regmap, offset, pxx_req, 173 + !(pxx_req & domain->bits.pxx), 174 + 0, USEC_PER_MSEC); 175 + if (ret) { 176 + dev_err(domain->dev, "failed to command PGC\n"); 177 + /* 178 + * If we were in a process of enabling a 179 + * domain and failed we might as well disable 180 + * the regulator we just enabled. And if it 181 + * was the opposite situation and we failed to 182 + * power down -- keep the regulator on 183 + */ 184 + on = !on; 196 185 } 197 186 198 187 if (enable_power_control) ··· 563 574 struct device *dev = &pdev->dev; 564 575 struct device_node *pgc_np, *np; 565 576 struct regmap *regmap; 566 - struct resource *res; 567 577 void __iomem *base; 568 578 int ret; 569 579 ··· 572 584 return -EINVAL; 573 585 } 574 586 575 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 576 - base = devm_ioremap_resource(dev, res); 587 + base = devm_platform_ioremap_resource(pdev, 0); 577 588 if (IS_ERR(base)) 578 589 return PTR_ERR(base); 579 590
+115
drivers/soc/imx/soc-imx8.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright 2019 NXP. 4 + */ 5 + 6 + #include <linux/init.h> 7 + #include <linux/io.h> 8 + #include <linux/of_address.h> 9 + #include <linux/slab.h> 10 + #include <linux/sys_soc.h> 11 + #include <linux/platform_device.h> 12 + #include <linux/of.h> 13 + 14 + #define REV_B1 0x21 15 + 16 + #define IMX8MQ_SW_INFO_B1 0x40 17 + #define IMX8MQ_SW_MAGIC_B1 0xff0055aa 18 + 19 + struct imx8_soc_data { 20 + char *name; 21 + u32 (*soc_revision)(void); 22 + }; 23 + 24 + static u32 __init imx8mq_soc_revision(void) 25 + { 26 + struct device_node *np; 27 + void __iomem *ocotp_base; 28 + u32 magic; 29 + u32 rev = 0; 30 + 31 + np = of_find_compatible_node(NULL, NULL, "fsl,imx8mq-ocotp"); 32 + if (!np) 33 + goto out; 34 + 35 + ocotp_base = of_iomap(np, 0); 36 + WARN_ON(!ocotp_base); 37 + 38 + magic = readl_relaxed(ocotp_base + IMX8MQ_SW_INFO_B1); 39 + if (magic == IMX8MQ_SW_MAGIC_B1) 40 + rev = REV_B1; 41 + 42 + iounmap(ocotp_base); 43 + 44 + out: 45 + of_node_put(np); 46 + return rev; 47 + } 48 + 49 + static const struct imx8_soc_data imx8mq_soc_data = { 50 + .name = "i.MX8MQ", 51 + .soc_revision = imx8mq_soc_revision, 52 + }; 53 + 54 + static const struct of_device_id imx8_soc_match[] = { 55 + { .compatible = "fsl,imx8mq", .data = &imx8mq_soc_data, }, 56 + { } 57 + }; 58 + 59 + #define imx8_revision(soc_rev) \ 60 + soc_rev ? \ 61 + kasprintf(GFP_KERNEL, "%d.%d", (soc_rev >> 4) & 0xf, soc_rev & 0xf) : \ 62 + "unknown" 63 + 64 + static int __init imx8_soc_init(void) 65 + { 66 + struct soc_device_attribute *soc_dev_attr; 67 + struct soc_device *soc_dev; 68 + struct device_node *root; 69 + const struct of_device_id *id; 70 + u32 soc_rev = 0; 71 + const struct imx8_soc_data *data; 72 + int ret; 73 + 74 + soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); 75 + if (!soc_dev_attr) 76 + return -ENODEV; 77 + 78 + soc_dev_attr->family = "Freescale i.MX"; 79 + 80 + root = of_find_node_by_path("/"); 81 + ret = of_property_read_string(root, "model", &soc_dev_attr->machine); 82 + if (ret) 83 + goto free_soc; 84 + 85 + id = of_match_node(imx8_soc_match, root); 86 + if (!id) 87 + goto free_soc; 88 + 89 + of_node_put(root); 90 + 91 + data = id->data; 92 + if (data) { 93 + soc_dev_attr->soc_id = data->name; 94 + if (data->soc_revision) 95 + soc_rev = data->soc_revision(); 96 + } 97 + 98 + soc_dev_attr->revision = imx8_revision(soc_rev); 99 + if (!soc_dev_attr->revision) 100 + goto free_soc; 101 + 102 + soc_dev = soc_device_register(soc_dev_attr); 103 + if (IS_ERR(soc_dev)) 104 + goto free_rev; 105 + 106 + return 0; 107 + 108 + free_rev: 109 + kfree(soc_dev_attr->revision); 110 + free_soc: 111 + kfree(soc_dev_attr); 112 + of_node_put(root); 113 + return -ENODEV; 114 + } 115 + device_initcall(imx8_soc_init);
+109 -2
drivers/soc/mediatek/mtk-pmic-wrap.c
··· 381 381 PWRAP_EXT_GPS_AUXADC_RDATA_ADDR, 382 382 PWRAP_GPSINF_0_STA, 383 383 PWRAP_GPSINF_1_STA, 384 + 385 + /* MT8516 only regs */ 386 + PWRAP_OP_TYPE, 387 + PWRAP_MSB_FIRST, 384 388 }; 385 389 386 390 static int mt2701_regs[] = { ··· 856 852 [PWRAP_WACS2_VLDCLR] = 0xC28, 857 853 }; 858 854 855 + static int mt8516_regs[] = { 856 + [PWRAP_MUX_SEL] = 0x0, 857 + [PWRAP_WRAP_EN] = 0x4, 858 + [PWRAP_DIO_EN] = 0x8, 859 + [PWRAP_SIDLY] = 0xc, 860 + [PWRAP_RDDMY] = 0x10, 861 + [PWRAP_SI_CK_CON] = 0x14, 862 + [PWRAP_CSHEXT_WRITE] = 0x18, 863 + [PWRAP_CSHEXT_READ] = 0x1c, 864 + [PWRAP_CSLEXT_START] = 0x20, 865 + [PWRAP_CSLEXT_END] = 0x24, 866 + [PWRAP_STAUPD_PRD] = 0x28, 867 + [PWRAP_STAUPD_GRPEN] = 0x2c, 868 + [PWRAP_STAUPD_MAN_TRIG] = 0x40, 869 + [PWRAP_STAUPD_STA] = 0x44, 870 + [PWRAP_WRAP_STA] = 0x48, 871 + [PWRAP_HARB_INIT] = 0x4c, 872 + [PWRAP_HARB_HPRIO] = 0x50, 873 + [PWRAP_HIPRIO_ARB_EN] = 0x54, 874 + [PWRAP_HARB_STA0] = 0x58, 875 + [PWRAP_HARB_STA1] = 0x5c, 876 + [PWRAP_MAN_EN] = 0x60, 877 + [PWRAP_MAN_CMD] = 0x64, 878 + [PWRAP_MAN_RDATA] = 0x68, 879 + [PWRAP_MAN_VLDCLR] = 0x6c, 880 + [PWRAP_WACS0_EN] = 0x70, 881 + [PWRAP_INIT_DONE0] = 0x74, 882 + [PWRAP_WACS0_CMD] = 0x78, 883 + [PWRAP_WACS0_RDATA] = 0x7c, 884 + [PWRAP_WACS0_VLDCLR] = 0x80, 885 + [PWRAP_WACS1_EN] = 0x84, 886 + [PWRAP_INIT_DONE1] = 0x88, 887 + [PWRAP_WACS1_CMD] = 0x8c, 888 + [PWRAP_WACS1_RDATA] = 0x90, 889 + [PWRAP_WACS1_VLDCLR] = 0x94, 890 + [PWRAP_WACS2_EN] = 0x98, 891 + [PWRAP_INIT_DONE2] = 0x9c, 892 + [PWRAP_WACS2_CMD] = 0xa0, 893 + [PWRAP_WACS2_RDATA] = 0xa4, 894 + [PWRAP_WACS2_VLDCLR] = 0xa8, 895 + [PWRAP_INT_EN] = 0xac, 896 + [PWRAP_INT_FLG_RAW] = 0xb0, 897 + [PWRAP_INT_FLG] = 0xb4, 898 + [PWRAP_INT_CLR] = 0xb8, 899 + [PWRAP_SIG_ADR] = 0xbc, 900 + [PWRAP_SIG_MODE] = 0xc0, 901 + [PWRAP_SIG_VALUE] = 0xc4, 902 + [PWRAP_SIG_ERRVAL] = 0xc8, 903 + [PWRAP_CRC_EN] = 0xcc, 904 + [PWRAP_TIMER_EN] = 0xd0, 905 + [PWRAP_TIMER_STA] = 0xd4, 906 + [PWRAP_WDT_UNIT] = 0xd8, 907 + [PWRAP_WDT_SRC_EN] = 0xdc, 908 + [PWRAP_WDT_FLG] = 0xe0, 909 + [PWRAP_DEBUG_INT_SEL] = 0xe4, 910 + [PWRAP_DVFS_ADR0] = 0xe8, 911 + [PWRAP_DVFS_WDATA0] = 0xec, 912 + [PWRAP_DVFS_ADR1] = 0xf0, 913 + [PWRAP_DVFS_WDATA1] = 0xf4, 914 + [PWRAP_DVFS_ADR2] = 0xf8, 915 + [PWRAP_DVFS_WDATA2] = 0xfc, 916 + [PWRAP_DVFS_ADR3] = 0x100, 917 + [PWRAP_DVFS_WDATA3] = 0x104, 918 + [PWRAP_DVFS_ADR4] = 0x108, 919 + [PWRAP_DVFS_WDATA4] = 0x10c, 920 + [PWRAP_DVFS_ADR5] = 0x110, 921 + [PWRAP_DVFS_WDATA5] = 0x114, 922 + [PWRAP_DVFS_ADR6] = 0x118, 923 + [PWRAP_DVFS_WDATA6] = 0x11c, 924 + [PWRAP_DVFS_ADR7] = 0x120, 925 + [PWRAP_DVFS_WDATA7] = 0x124, 926 + [PWRAP_SPMINF_STA] = 0x128, 927 + [PWRAP_CIPHER_KEY_SEL] = 0x12c, 928 + [PWRAP_CIPHER_IV_SEL] = 0x130, 929 + [PWRAP_CIPHER_EN] = 0x134, 930 + [PWRAP_CIPHER_RDY] = 0x138, 931 + [PWRAP_CIPHER_MODE] = 0x13c, 932 + [PWRAP_CIPHER_SWRST] = 0x140, 933 + [PWRAP_DCM_EN] = 0x144, 934 + [PWRAP_DCM_DBC_PRD] = 0x148, 935 + [PWRAP_SW_RST] = 0x168, 936 + [PWRAP_OP_TYPE] = 0x16c, 937 + [PWRAP_MSB_FIRST] = 0x170, 938 + }; 939 + 859 940 enum pmic_type { 860 941 PMIC_MT6323, 861 942 PMIC_MT6351, ··· 958 869 PWRAP_MT8135, 959 870 PWRAP_MT8173, 960 871 PWRAP_MT8183, 872 + PWRAP_MT8516, 961 873 }; 962 874 963 875 struct pmic_wrapper; ··· 1371 1281 static int pwrap_init_cipher(struct pmic_wrapper *wrp) 1372 1282 { 1373 1283 int ret; 1374 - u32 rdata; 1284 + u32 rdata = 0; 1375 1285 1376 1286 pwrap_writel(wrp, 0x1, PWRAP_CIPHER_SWRST); 1377 1287 pwrap_writel(wrp, 0x0, PWRAP_CIPHER_SWRST); ··· 1387 1297 case PWRAP_MT6765: 1388 1298 case PWRAP_MT6797: 1389 1299 case PWRAP_MT8173: 1300 + case PWRAP_MT8516: 1390 1301 pwrap_writel(wrp, 1, PWRAP_CIPHER_EN); 1391 1302 break; 1392 1303 case PWRAP_MT7622: ··· 1569 1478 { 1570 1479 int ret; 1571 1480 1572 - reset_control_reset(wrp->rstc); 1481 + if (wrp->rstc) 1482 + reset_control_reset(wrp->rstc); 1573 1483 if (wrp->rstc_bridge) 1574 1484 reset_control_reset(wrp->rstc_bridge); 1575 1485 ··· 1856 1764 .init_soc_specific = pwrap_mt8183_init_soc_specific, 1857 1765 }; 1858 1766 1767 + static struct pmic_wrapper_type pwrap_mt8516 = { 1768 + .regs = mt8516_regs, 1769 + .type = PWRAP_MT8516, 1770 + .arb_en_all = 0xff, 1771 + .int_en_all = ~(u32)(BIT(31) | BIT(2)), 1772 + .spi_w = PWRAP_MAN_CMD_SPI_WRITE, 1773 + .wdt_src = PWRAP_WDT_SRC_MASK_ALL, 1774 + .caps = PWRAP_CAP_DCM, 1775 + .init_reg_clock = pwrap_mt2701_init_reg_clock, 1776 + .init_soc_specific = NULL, 1777 + }; 1778 + 1859 1779 static const struct of_device_id of_pwrap_match_tbl[] = { 1860 1780 { 1861 1781 .compatible = "mediatek,mt2701-pwrap", ··· 1890 1786 }, { 1891 1787 .compatible = "mediatek,mt8183-pwrap", 1892 1788 .data = &pwrap_mt8183, 1789 + }, { 1790 + .compatible = "mediatek,mt8516-pwrap", 1791 + .data = &pwrap_mt8516, 1893 1792 }, { 1894 1793 /* sentinel */ 1895 1794 }
+2 -2
drivers/soc/qcom/cmd-db.c
··· 248 248 } 249 249 250 250 cmd_db_header = memremap(rmem->base, rmem->size, MEMREMAP_WB); 251 - if (IS_ERR_OR_NULL(cmd_db_header)) { 252 - ret = PTR_ERR(cmd_db_header); 251 + if (!cmd_db_header) { 252 + ret = -ENOMEM; 253 253 cmd_db_header = NULL; 254 254 return ret; 255 255 }
+2 -5
drivers/soc/qcom/qmi_interface.c
··· 345 345 struct qmi_handle *qmi = txn->qmi; 346 346 int ret; 347 347 348 - ret = wait_for_completion_interruptible_timeout(&txn->completion, 349 - timeout); 348 + ret = wait_for_completion_timeout(&txn->completion, timeout); 350 349 351 350 mutex_lock(&qmi->txn_lock); 352 351 mutex_lock(&txn->lock); ··· 353 354 mutex_unlock(&txn->lock); 354 355 mutex_unlock(&qmi->txn_lock); 355 356 356 - if (ret < 0) 357 - return ret; 358 - else if (ret == 0) 357 + if (ret == 0) 359 358 return -ETIMEDOUT; 360 359 else 361 360 return txn->result;
+21
drivers/soc/qcom/rmtfs_mem.c
··· 137 137 .name = "rmtfs", 138 138 }; 139 139 140 + static int qcom_rmtfs_mem_mmap(struct file *filep, struct vm_area_struct *vma) 141 + { 142 + struct qcom_rmtfs_mem *rmtfs_mem = filep->private_data; 143 + 144 + if (vma->vm_end - vma->vm_start > rmtfs_mem->size) { 145 + dev_dbg(&rmtfs_mem->dev, 146 + "vm_end[%lu] - vm_start[%lu] [%lu] > mem->size[%pa]\n", 147 + vma->vm_end, vma->vm_start, 148 + (vma->vm_end - vma->vm_start), &rmtfs_mem->size); 149 + return -EINVAL; 150 + } 151 + 152 + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 153 + return remap_pfn_range(vma, 154 + vma->vm_start, 155 + rmtfs_mem->addr >> PAGE_SHIFT, 156 + vma->vm_end - vma->vm_start, 157 + vma->vm_page_prot); 158 + } 159 + 140 160 static const struct file_operations qcom_rmtfs_mem_fops = { 141 161 .owner = THIS_MODULE, 142 162 .open = qcom_rmtfs_mem_open, ··· 164 144 .write = qcom_rmtfs_mem_write, 165 145 .release = qcom_rmtfs_mem_release, 166 146 .llseek = default_llseek, 147 + .mmap = qcom_rmtfs_mem_mmap, 167 148 }; 168 149 169 150 static void qcom_rmtfs_mem_release_device(struct device *dev)
+1 -1
drivers/soc/qcom/rpmh-rsc.c
··· 459 459 do { 460 460 slot = bitmap_find_next_zero_area(tcs->slots, MAX_TCS_SLOTS, 461 461 i, msg->num_cmds, 0); 462 - if (slot == tcs->num_tcs * tcs->ncpt) 462 + if (slot >= tcs->num_tcs * tcs->ncpt) 463 463 return -ENOMEM; 464 464 i += tcs->ncpt; 465 465 } while (slot + msg->num_cmds - 1 >= i);
+3
drivers/soc/renesas/renesas-soc.c
··· 335 335 /* R-Car M3-W ES1.1 incorrectly identifies as ES2.0 */ 336 336 if ((product & 0x7fff) == 0x5210) 337 337 product ^= 0x11; 338 + /* R-Car M3-W ES1.3 incorrectly identifies as ES2.1 */ 339 + if ((product & 0x7fff) == 0x5211) 340 + product ^= 0x12; 338 341 if (soc->id && ((product >> 8) & 0xff) != soc->id) { 339 342 pr_warn("SoC mismatch (product = 0x%x)\n", product); 340 343 return -ENODEV;
+2
drivers/soc/rockchip/grf.c
··· 66 66 }; 67 67 68 68 #define RK3288_GRF_SOC_CON0 0x244 69 + #define RK3288_GRF_SOC_CON2 0x24c 69 70 70 71 static const struct rockchip_grf_value rk3288_defaults[] __initconst = { 71 72 { "jtag switching", RK3288_GRF_SOC_CON0, HIWORD_UPDATE(0, 1, 12) }, 73 + { "pwm select", RK3288_GRF_SOC_CON2, HIWORD_UPDATE(1, 1, 0) }, 72 74 }; 73 75 74 76 static const struct rockchip_grf_info rk3288_grf __initconst = {
+136 -39
drivers/soc/tegra/pmc.c
··· 272 272 "WATCHDOG", 273 273 "SENSOR", 274 274 "SW_MAIN", 275 + "LP0" 276 + }; 277 + 278 + static const char * const tegra210_reset_sources[] = { 279 + "POWER_ON_RESET", 280 + "WATCHDOG", 281 + "SENSOR", 282 + "SW_MAIN", 275 283 "LP0", 276 284 "AOTAG" 277 285 }; ··· 664 656 int err; 665 657 666 658 err = tegra_powergate_power_up(pg, true); 667 - if (err) 659 + if (err) { 668 660 dev_err(dev, "failed to turn on PM domain %s: %d\n", 669 661 pg->genpd.name, err); 662 + goto out; 663 + } 670 664 665 + reset_control_release(pg->reset); 666 + 667 + out: 671 668 return err; 672 669 } 673 670 ··· 682 669 struct device *dev = pg->pmc->dev; 683 670 int err; 684 671 672 + err = reset_control_acquire(pg->reset); 673 + if (err < 0) { 674 + pr_err("failed to acquire resets: %d\n", err); 675 + return err; 676 + } 677 + 685 678 err = tegra_powergate_power_down(pg); 686 - if (err) 679 + if (err) { 687 680 dev_err(dev, "failed to turn off PM domain %s: %d\n", 688 681 pg->genpd.name, err); 682 + reset_control_release(pg->reset); 683 + } 689 684 690 685 return err; 691 686 } ··· 958 937 struct device *dev = pg->pmc->dev; 959 938 int err; 960 939 961 - pg->reset = of_reset_control_array_get_exclusive(np); 940 + pg->reset = of_reset_control_array_get_exclusive_released(np); 962 941 if (IS_ERR(pg->reset)) { 963 942 err = PTR_ERR(pg->reset); 964 943 dev_err(dev, "failed to get device resets: %d\n", err); 965 944 return err; 966 945 } 967 946 968 - if (off) 969 - err = reset_control_assert(pg->reset); 970 - else 971 - err = reset_control_deassert(pg->reset); 947 + err = reset_control_acquire(pg->reset); 948 + if (err < 0) { 949 + pr_err("failed to acquire resets: %d\n", err); 950 + goto out; 951 + } 972 952 973 - if (err) 953 + if (off) { 954 + err = reset_control_assert(pg->reset); 955 + } else { 956 + err = reset_control_deassert(pg->reset); 957 + if (err < 0) 958 + goto out; 959 + 960 + reset_control_release(pg->reset); 961 + } 962 + 963 + out: 964 + if (err) { 965 + reset_control_release(pg->reset); 974 966 reset_control_put(pg->reset); 967 + } 975 968 976 969 return err; 977 970 } 978 971 979 - static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np) 972 + static int tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np) 980 973 { 981 974 struct device *dev = pmc->dev; 982 975 struct tegra_powergate *pg; 983 - int id, err; 976 + int id, err = 0; 984 977 bool off; 985 978 986 979 pg = kzalloc(sizeof(*pg), GFP_KERNEL); 987 980 if (!pg) 988 - return; 981 + return -ENOMEM; 989 982 990 983 id = tegra_powergate_lookup(pmc, np->name); 991 984 if (id < 0) { 992 985 dev_err(dev, "powergate lookup failed for %pOFn: %d\n", np, id); 986 + err = -ENODEV; 993 987 goto free_mem; 994 988 } 995 989 ··· 1057 1021 1058 1022 dev_dbg(dev, "added PM domain %s\n", pg->genpd.name); 1059 1023 1060 - return; 1024 + return 0; 1061 1025 1062 1026 remove_genpd: 1063 1027 pm_genpd_remove(&pg->genpd); ··· 1076 1040 1077 1041 free_mem: 1078 1042 kfree(pg); 1043 + 1044 + return err; 1079 1045 } 1080 1046 1081 - static void tegra_powergate_init(struct tegra_pmc *pmc, 1082 - struct device_node *parent) 1047 + static int tegra_powergate_init(struct tegra_pmc *pmc, 1048 + struct device_node *parent) 1083 1049 { 1084 1050 struct device_node *np, *child; 1085 - unsigned int i; 1051 + int err = 0; 1086 1052 1087 - /* Create a bitmap of the available and valid partitions */ 1088 - for (i = 0; i < pmc->soc->num_powergates; i++) 1089 - if (pmc->soc->powergates[i]) 1090 - set_bit(i, pmc->powergates_available); 1053 + np = of_get_child_by_name(parent, "powergates"); 1054 + if (!np) 1055 + return 0; 1056 + 1057 + for_each_child_of_node(np, child) { 1058 + err = tegra_powergate_add(pmc, child); 1059 + if (err < 0) { 1060 + of_node_put(child); 1061 + break; 1062 + } 1063 + } 1064 + 1065 + of_node_put(np); 1066 + 1067 + return err; 1068 + } 1069 + 1070 + static void tegra_powergate_remove(struct generic_pm_domain *genpd) 1071 + { 1072 + struct tegra_powergate *pg = to_powergate(genpd); 1073 + 1074 + reset_control_put(pg->reset); 1075 + 1076 + while (pg->num_clks--) 1077 + clk_put(pg->clks[pg->num_clks]); 1078 + 1079 + kfree(pg->clks); 1080 + 1081 + set_bit(pg->id, pmc->powergates_available); 1082 + 1083 + kfree(pg); 1084 + } 1085 + 1086 + static void tegra_powergate_remove_all(struct device_node *parent) 1087 + { 1088 + struct generic_pm_domain *genpd; 1089 + struct device_node *np, *child; 1091 1090 1092 1091 np = of_get_child_by_name(parent, "powergates"); 1093 1092 if (!np) 1094 1093 return; 1095 1094 1096 - for_each_child_of_node(np, child) 1097 - tegra_powergate_add(pmc, child); 1095 + for_each_child_of_node(np, child) { 1096 + of_genpd_del_provider(child); 1097 + 1098 + genpd = of_genpd_remove_last(child); 1099 + if (IS_ERR(genpd)) 1100 + continue; 1101 + 1102 + tegra_powergate_remove(genpd); 1103 + } 1098 1104 1099 1105 of_node_put(np); 1100 1106 } ··· 1787 1709 static ssize_t reset_reason_show(struct device *dev, 1788 1710 struct device_attribute *attr, char *buf) 1789 1711 { 1790 - u32 value, rst_src; 1712 + u32 value; 1791 1713 1792 1714 value = tegra_pmc_readl(pmc, pmc->soc->regs->rst_status); 1793 - rst_src = (value & pmc->soc->regs->rst_source_mask) >> 1794 - pmc->soc->regs->rst_source_shift; 1715 + value &= pmc->soc->regs->rst_source_mask; 1716 + value >>= pmc->soc->regs->rst_source_shift; 1795 1717 1796 - return sprintf(buf, "%s\n", pmc->soc->reset_sources[rst_src]); 1718 + if (WARN_ON(value >= pmc->soc->num_reset_sources)) 1719 + return sprintf(buf, "%s\n", "UNKNOWN"); 1720 + 1721 + return sprintf(buf, "%s\n", pmc->soc->reset_sources[value]); 1797 1722 } 1798 1723 1799 1724 static DEVICE_ATTR_RO(reset_reason); ··· 1804 1723 static ssize_t reset_level_show(struct device *dev, 1805 1724 struct device_attribute *attr, char *buf) 1806 1725 { 1807 - u32 value, rst_lvl; 1726 + u32 value; 1808 1727 1809 1728 value = tegra_pmc_readl(pmc, pmc->soc->regs->rst_status); 1810 - rst_lvl = (value & pmc->soc->regs->rst_level_mask) >> 1811 - pmc->soc->regs->rst_level_shift; 1729 + value &= pmc->soc->regs->rst_level_mask; 1730 + value >>= pmc->soc->regs->rst_level_shift; 1812 1731 1813 - return sprintf(buf, "%s\n", pmc->soc->reset_levels[rst_lvl]); 1732 + if (WARN_ON(value >= pmc->soc->num_reset_levels)) 1733 + return sprintf(buf, "%s\n", "UNKNOWN"); 1734 + 1735 + return sprintf(buf, "%s\n", pmc->soc->reset_levels[value]); 1814 1736 } 1815 1737 1816 1738 static DEVICE_ATTR_RO(reset_level); ··· 2083 1999 if (IS_ENABLED(CONFIG_DEBUG_FS)) { 2084 2000 err = tegra_powergate_debugfs_init(); 2085 2001 if (err < 0) 2086 - return err; 2002 + goto cleanup_sysfs; 2087 2003 } 2088 2004 2089 2005 err = register_restart_handler(&tegra_pmc_restart_handler); ··· 2097 2013 if (err) 2098 2014 goto cleanup_restart_handler; 2099 2015 2016 + err = tegra_powergate_init(pmc, pdev->dev.of_node); 2017 + if (err < 0) 2018 + goto cleanup_powergates; 2019 + 2100 2020 err = tegra_pmc_irq_init(pmc); 2101 2021 if (err < 0) 2102 - goto cleanup_restart_handler; 2022 + goto cleanup_powergates; 2103 2023 2104 2024 mutex_lock(&pmc->powergates_lock); 2105 2025 iounmap(pmc->base); ··· 2114 2026 2115 2027 return 0; 2116 2028 2029 + cleanup_powergates: 2030 + tegra_powergate_remove_all(pdev->dev.of_node); 2117 2031 cleanup_restart_handler: 2118 2032 unregister_restart_handler(&tegra_pmc_restart_handler); 2119 2033 cleanup_debugfs: 2120 2034 debugfs_remove(pmc->debugfs); 2035 + cleanup_sysfs: 2036 + device_remove_file(&pdev->dev, &dev_attr_reset_reason); 2037 + device_remove_file(&pdev->dev, &dev_attr_reset_level); 2121 2038 return err; 2122 2039 } 2123 2040 ··· 2278 2185 .init = tegra20_pmc_init, 2279 2186 .setup_irq_polarity = tegra20_pmc_setup_irq_polarity, 2280 2187 .reset_sources = tegra30_reset_sources, 2281 - .num_reset_sources = 5, 2188 + .num_reset_sources = ARRAY_SIZE(tegra30_reset_sources), 2282 2189 .reset_levels = NULL, 2283 2190 .num_reset_levels = 0, 2284 2191 }; ··· 2329 2236 .init = tegra20_pmc_init, 2330 2237 .setup_irq_polarity = tegra20_pmc_setup_irq_polarity, 2331 2238 .reset_sources = tegra30_reset_sources, 2332 - .num_reset_sources = 5, 2239 + .num_reset_sources = ARRAY_SIZE(tegra30_reset_sources), 2333 2240 .reset_levels = NULL, 2334 2241 .num_reset_levels = 0, 2335 2242 }; ··· 2440 2347 .init = tegra20_pmc_init, 2441 2348 .setup_irq_polarity = tegra20_pmc_setup_irq_polarity, 2442 2349 .reset_sources = tegra30_reset_sources, 2443 - .num_reset_sources = 5, 2350 + .num_reset_sources = ARRAY_SIZE(tegra30_reset_sources), 2444 2351 .reset_levels = NULL, 2445 2352 .num_reset_levels = 0, 2446 2353 }; ··· 2545 2452 .regs = &tegra20_pmc_regs, 2546 2453 .init = tegra20_pmc_init, 2547 2454 .setup_irq_polarity = tegra20_pmc_setup_irq_polarity, 2548 - .reset_sources = tegra30_reset_sources, 2549 - .num_reset_sources = 5, 2455 + .reset_sources = tegra210_reset_sources, 2456 + .num_reset_sources = ARRAY_SIZE(tegra210_reset_sources), 2550 2457 .reset_levels = NULL, 2551 2458 .num_reset_levels = 0, 2552 2459 }; ··· 2671 2578 .init = NULL, 2672 2579 .setup_irq_polarity = tegra186_pmc_setup_irq_polarity, 2673 2580 .reset_sources = tegra186_reset_sources, 2674 - .num_reset_sources = 14, 2581 + .num_reset_sources = ARRAY_SIZE(tegra186_reset_sources), 2675 2582 .reset_levels = tegra186_reset_levels, 2676 - .num_reset_levels = 3, 2583 + .num_reset_levels = ARRAY_SIZE(tegra186_reset_levels), 2677 2584 .num_wake_events = ARRAY_SIZE(tegra186_wake_events), 2678 2585 .wake_events = tegra186_wake_events, 2679 2586 }; ··· 2812 2719 const struct of_device_id *match; 2813 2720 struct device_node *np; 2814 2721 struct resource regs; 2722 + unsigned int i; 2815 2723 bool invert; 2816 2724 2817 2725 mutex_init(&pmc->powergates_lock); ··· 2869 2775 if (pmc->soc->maybe_tz_only) 2870 2776 pmc->tz_only = tegra_pmc_detect_tz_only(pmc); 2871 2777 2872 - tegra_powergate_init(pmc, np); 2778 + /* Create a bitmap of the available and valid partitions */ 2779 + for (i = 0; i < pmc->soc->num_powergates; i++) 2780 + if (pmc->soc->powergates[i]) 2781 + set_bit(i, pmc->powergates_available); 2873 2782 2874 2783 /* 2875 2784 * Invert the interrupt polarity if a PMC device tree node
+3 -2
drivers/soc/ti/Kconfig
··· 45 45 config AMX3_PM 46 46 tristate "AMx3 Power Management" 47 47 depends on SOC_AM33XX || SOC_AM43XX 48 - depends on WKUP_M3_IPC && TI_EMIF_SRAM && SRAM 48 + depends on WKUP_M3_IPC && TI_EMIF_SRAM && SRAM && RTC_DRV_OMAP 49 49 help 50 50 Enable power management on AM335x and AM437x. Required for suspend to mem 51 51 and standby states on both AM335x and AM437x platforms and for deeper cpuidle 52 - c-states on AM335x. 52 + c-states on AM335x. Also required for rtc and ddr in self-refresh low 53 + power mode on AM437x platforms. 53 54 54 55 config WKUP_M3_IPC 55 56 tristate "TI AMx3 Wkup-M3 IPC Driver"
+222 -49
drivers/soc/ti/pm33xx.c
··· 6 6 * Vaibhav Bedia, Dave Gerlach 7 7 */ 8 8 9 + #include <linux/clk.h> 9 10 #include <linux/cpu.h> 10 11 #include <linux/err.h> 11 12 #include <linux/genalloc.h> ··· 14 13 #include <linux/init.h> 15 14 #include <linux/io.h> 16 15 #include <linux/module.h> 16 + #include <linux/nvmem-consumer.h> 17 17 #include <linux/of.h> 18 18 #include <linux/platform_data/pm33xx.h> 19 19 #include <linux/platform_device.h> 20 + #include <linux/rtc.h> 21 + #include <linux/rtc/rtc-omap.h> 20 22 #include <linux/sizes.h> 21 23 #include <linux/sram.h> 22 24 #include <linux/suspend.h> ··· 33 29 #define AMX3_PM_SRAM_SYMBOL_OFFSET(sym) ((unsigned long)(sym) - \ 34 30 (unsigned long)pm_sram->do_wfi) 35 31 32 + #define RTC_SCRATCH_RESUME_REG 0 33 + #define RTC_SCRATCH_MAGIC_REG 1 34 + #define RTC_REG_BOOT_MAGIC 0x8cd0 /* RTC */ 35 + #define GIC_INT_SET_PENDING_BASE 0x200 36 + #define AM43XX_GIC_DIST_BASE 0x48241000 37 + 38 + static u32 rtc_magic_val; 39 + 36 40 static int (*am33xx_do_wfi_sram)(unsigned long unused); 37 41 static phys_addr_t am33xx_do_wfi_sram_phys; 38 42 39 43 static struct gen_pool *sram_pool, *sram_pool_data; 40 44 static unsigned long ocmcram_location, ocmcram_location_data; 45 + 46 + static struct rtc_device *omap_rtc; 47 + static void __iomem *gic_dist_base; 41 48 42 49 static struct am33xx_pm_platform_data *pm_ops; 43 50 static struct am33xx_pm_sram_addr *pm_sram; ··· 56 41 static struct device *pm33xx_dev; 57 42 static struct wkup_m3_ipc *m3_ipc; 58 43 44 + #ifdef CONFIG_SUSPEND 45 + static int rtc_only_idle; 46 + static int retrigger_irq; 59 47 static unsigned long suspend_wfi_flags; 48 + 49 + static struct wkup_m3_wakeup_src wakeup_src = {.irq_nr = 0, 50 + .src = "Unknown", 51 + }; 52 + 53 + static struct wkup_m3_wakeup_src rtc_alarm_wakeup = { 54 + .irq_nr = 108, .src = "RTC Alarm", 55 + }; 56 + 57 + static struct wkup_m3_wakeup_src rtc_ext_wakeup = { 58 + .irq_nr = 0, .src = "Ext wakeup", 59 + }; 60 + #endif 60 61 61 62 static u32 sram_suspend_address(unsigned long addr) 62 63 { ··· 80 49 AMX3_PM_SRAM_SYMBOL_OFFSET(addr)); 81 50 } 82 51 52 + static int am33xx_push_sram_idle(void) 53 + { 54 + struct am33xx_pm_ro_sram_data ro_sram_data; 55 + int ret; 56 + u32 table_addr, ro_data_addr; 57 + void *copy_addr; 58 + 59 + ro_sram_data.amx3_pm_sram_data_virt = ocmcram_location_data; 60 + ro_sram_data.amx3_pm_sram_data_phys = 61 + gen_pool_virt_to_phys(sram_pool_data, ocmcram_location_data); 62 + ro_sram_data.rtc_base_virt = pm_ops->get_rtc_base_addr(); 63 + 64 + /* Save physical address to calculate resume offset during pm init */ 65 + am33xx_do_wfi_sram_phys = gen_pool_virt_to_phys(sram_pool, 66 + ocmcram_location); 67 + 68 + am33xx_do_wfi_sram = sram_exec_copy(sram_pool, (void *)ocmcram_location, 69 + pm_sram->do_wfi, 70 + *pm_sram->do_wfi_sz); 71 + if (!am33xx_do_wfi_sram) { 72 + dev_err(pm33xx_dev, 73 + "PM: %s: am33xx_do_wfi copy to sram failed\n", 74 + __func__); 75 + return -ENODEV; 76 + } 77 + 78 + table_addr = 79 + sram_suspend_address((unsigned long)pm_sram->emif_sram_table); 80 + ret = ti_emif_copy_pm_function_table(sram_pool, (void *)table_addr); 81 + if (ret) { 82 + dev_dbg(pm33xx_dev, 83 + "PM: %s: EMIF function copy failed\n", __func__); 84 + return -EPROBE_DEFER; 85 + } 86 + 87 + ro_data_addr = 88 + sram_suspend_address((unsigned long)pm_sram->ro_sram_data); 89 + copy_addr = sram_exec_copy(sram_pool, (void *)ro_data_addr, 90 + &ro_sram_data, 91 + sizeof(ro_sram_data)); 92 + if (!copy_addr) { 93 + dev_err(pm33xx_dev, 94 + "PM: %s: ro_sram_data copy to sram failed\n", 95 + __func__); 96 + return -ENODEV; 97 + } 98 + 99 + return 0; 100 + } 101 + 102 + static int __init am43xx_map_gic(void) 103 + { 104 + gic_dist_base = ioremap(AM43XX_GIC_DIST_BASE, SZ_4K); 105 + 106 + if (!gic_dist_base) 107 + return -ENOMEM; 108 + 109 + return 0; 110 + } 111 + 83 112 #ifdef CONFIG_SUSPEND 113 + struct wkup_m3_wakeup_src rtc_wake_src(void) 114 + { 115 + u32 i; 116 + 117 + i = __raw_readl(pm_ops->get_rtc_base_addr() + 0x44) & 0x40; 118 + 119 + if (i) { 120 + retrigger_irq = rtc_alarm_wakeup.irq_nr; 121 + return rtc_alarm_wakeup; 122 + } 123 + 124 + retrigger_irq = rtc_ext_wakeup.irq_nr; 125 + 126 + return rtc_ext_wakeup; 127 + } 128 + 129 + int am33xx_rtc_only_idle(unsigned long wfi_flags) 130 + { 131 + omap_rtc_power_off_program(&omap_rtc->dev); 132 + am33xx_do_wfi_sram(wfi_flags); 133 + return 0; 134 + } 135 + 84 136 static int am33xx_pm_suspend(suspend_state_t suspend_state) 85 137 { 86 138 int i, ret = 0; 87 139 88 - ret = pm_ops->soc_suspend((unsigned long)suspend_state, 89 - am33xx_do_wfi_sram, suspend_wfi_flags); 140 + if (suspend_state == PM_SUSPEND_MEM && 141 + pm_ops->check_off_mode_enable()) { 142 + pm_ops->prepare_rtc_suspend(); 143 + pm_ops->save_context(); 144 + suspend_wfi_flags |= WFI_FLAG_RTC_ONLY; 145 + clk_save_context(); 146 + ret = pm_ops->soc_suspend(suspend_state, am33xx_rtc_only_idle, 147 + suspend_wfi_flags); 148 + 149 + suspend_wfi_flags &= ~WFI_FLAG_RTC_ONLY; 150 + 151 + if (!ret) { 152 + clk_restore_context(); 153 + pm_ops->restore_context(); 154 + m3_ipc->ops->set_rtc_only(m3_ipc); 155 + am33xx_push_sram_idle(); 156 + } 157 + } else { 158 + ret = pm_ops->soc_suspend(suspend_state, am33xx_do_wfi_sram, 159 + suspend_wfi_flags); 160 + } 90 161 91 162 if (ret) { 92 163 dev_err(pm33xx_dev, "PM: Kernel suspend failure\n"); ··· 210 77 "PM: CM3 returned unknown result = %d\n", i); 211 78 ret = -1; 212 79 } 80 + 81 + /* print the wakeup reason */ 82 + if (rtc_only_idle) { 83 + wakeup_src = rtc_wake_src(); 84 + pr_info("PM: Wakeup source %s\n", wakeup_src.src); 85 + } else { 86 + pr_info("PM: Wakeup source %s\n", 87 + m3_ipc->ops->request_wake_src(m3_ipc)); 88 + } 213 89 } 90 + 91 + if (suspend_state == PM_SUSPEND_MEM && pm_ops->check_off_mode_enable()) 92 + pm_ops->prepare_rtc_resume(); 214 93 215 94 return ret; 216 95 } ··· 246 101 static int am33xx_pm_begin(suspend_state_t state) 247 102 { 248 103 int ret = -EINVAL; 104 + struct nvmem_device *nvmem; 105 + 106 + if (state == PM_SUSPEND_MEM && pm_ops->check_off_mode_enable()) { 107 + nvmem = devm_nvmem_device_get(&omap_rtc->dev, 108 + "omap_rtc_scratch0"); 109 + if (nvmem) 110 + nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4, 111 + (void *)&rtc_magic_val); 112 + rtc_only_idle = 1; 113 + } else { 114 + rtc_only_idle = 0; 115 + } 249 116 250 117 switch (state) { 251 118 case PM_SUSPEND_MEM: ··· 273 116 274 117 static void am33xx_pm_end(void) 275 118 { 119 + u32 val = 0; 120 + struct nvmem_device *nvmem; 121 + 122 + nvmem = devm_nvmem_device_get(&omap_rtc->dev, "omap_rtc_scratch0"); 276 123 m3_ipc->ops->finish_low_power(m3_ipc); 124 + if (rtc_only_idle) { 125 + if (retrigger_irq) 126 + /* 127 + * 32 bits of Interrupt Set-Pending correspond to 32 128 + * 32 interrupts. Compute the bit offset of the 129 + * Interrupt and set that particular bit 130 + * Compute the register offset by dividing interrupt 131 + * number by 32 and mutiplying by 4 132 + */ 133 + writel_relaxed(1 << (retrigger_irq & 31), 134 + gic_dist_base + GIC_INT_SET_PENDING_BASE 135 + + retrigger_irq / 32 * 4); 136 + nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4, 137 + (void *)&val); 138 + } 139 + 140 + rtc_only_idle = 0; 277 141 } 278 142 279 143 static int am33xx_pm_valid(suspend_state_t state) ··· 397 219 return ret; 398 220 } 399 221 400 - static int am33xx_push_sram_idle(void) 222 + static int am33xx_pm_rtc_setup(void) 401 223 { 402 - struct am33xx_pm_ro_sram_data ro_sram_data; 403 - int ret; 404 - u32 table_addr, ro_data_addr; 405 - void *copy_addr; 224 + struct device_node *np; 225 + unsigned long val = 0; 226 + struct nvmem_device *nvmem; 406 227 407 - ro_sram_data.amx3_pm_sram_data_virt = ocmcram_location_data; 408 - ro_sram_data.amx3_pm_sram_data_phys = 409 - gen_pool_virt_to_phys(sram_pool_data, ocmcram_location_data); 410 - ro_sram_data.rtc_base_virt = pm_ops->get_rtc_base_addr(); 228 + np = of_find_node_by_name(NULL, "rtc"); 411 229 412 - /* Save physical address to calculate resume offset during pm init */ 413 - am33xx_do_wfi_sram_phys = gen_pool_virt_to_phys(sram_pool, 414 - ocmcram_location); 230 + if (of_device_is_available(np)) { 231 + omap_rtc = rtc_class_open("rtc0"); 232 + if (!omap_rtc) { 233 + pr_warn("PM: rtc0 not available"); 234 + return -EPROBE_DEFER; 235 + } 415 236 416 - am33xx_do_wfi_sram = sram_exec_copy(sram_pool, (void *)ocmcram_location, 417 - pm_sram->do_wfi, 418 - *pm_sram->do_wfi_sz); 419 - if (!am33xx_do_wfi_sram) { 420 - dev_err(pm33xx_dev, 421 - "PM: %s: am33xx_do_wfi copy to sram failed\n", 422 - __func__); 423 - return -ENODEV; 424 - } 237 + nvmem = devm_nvmem_device_get(&omap_rtc->dev, 238 + "omap_rtc_scratch0"); 239 + if (nvmem) { 240 + nvmem_device_read(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 241 + 4, (void *)&rtc_magic_val); 242 + if ((rtc_magic_val & 0xffff) != RTC_REG_BOOT_MAGIC) 243 + pr_warn("PM: bootloader does not support rtc-only!\n"); 425 244 426 - table_addr = 427 - sram_suspend_address((unsigned long)pm_sram->emif_sram_table); 428 - ret = ti_emif_copy_pm_function_table(sram_pool, (void *)table_addr); 429 - if (ret) { 430 - dev_dbg(pm33xx_dev, 431 - "PM: %s: EMIF function copy failed\n", __func__); 432 - return -EPROBE_DEFER; 433 - } 434 - 435 - ro_data_addr = 436 - sram_suspend_address((unsigned long)pm_sram->ro_sram_data); 437 - copy_addr = sram_exec_copy(sram_pool, (void *)ro_data_addr, 438 - &ro_sram_data, 439 - sizeof(ro_sram_data)); 440 - if (!copy_addr) { 441 - dev_err(pm33xx_dev, 442 - "PM: %s: ro_sram_data copy to sram failed\n", 443 - __func__); 444 - return -ENODEV; 245 + nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 246 + 4, (void *)&val); 247 + val = pm_sram->resume_address; 248 + nvmem_device_write(nvmem, RTC_SCRATCH_RESUME_REG * 4, 249 + 4, (void *)&val); 250 + } 251 + } else { 252 + pr_warn("PM: no-rtc available, rtc-only mode disabled.\n"); 445 253 } 446 254 447 255 return 0; ··· 448 284 return -ENODEV; 449 285 } 450 286 287 + ret = am43xx_map_gic(); 288 + if (ret) { 289 + pr_err("PM: Could not ioremap GIC base\n"); 290 + return ret; 291 + } 292 + 451 293 pm_sram = pm_ops->get_sram_addrs(); 452 294 if (!pm_sram) { 453 295 dev_err(dev, "PM: Cannot get PM asm function addresses!!\n"); 454 296 return -ENODEV; 297 + } 298 + 299 + m3_ipc = wkup_m3_ipc_get(); 300 + if (!m3_ipc) { 301 + pr_err("PM: Cannot get wkup_m3_ipc handle\n"); 302 + return -EPROBE_DEFER; 455 303 } 456 304 457 305 pm33xx_dev = dev; ··· 472 296 if (ret) 473 297 return ret; 474 298 475 - ret = am33xx_push_sram_idle(); 299 + ret = am33xx_pm_rtc_setup(); 476 300 if (ret) 477 301 goto err_free_sram; 478 302 479 - m3_ipc = wkup_m3_ipc_get(); 480 - if (!m3_ipc) { 481 - dev_dbg(dev, "PM: Cannot get wkup_m3_ipc handle\n"); 482 - ret = -EPROBE_DEFER; 303 + ret = am33xx_push_sram_idle(); 304 + if (ret) 483 305 goto err_free_sram; 484 - } 485 306 486 307 am33xx_pm_set_ipc_ops(); 487 308 488 309 #ifdef CONFIG_SUSPEND 489 310 suspend_set_ops(&am33xx_pm_ops); 490 - #endif /* CONFIG_SUSPEND */ 491 311 492 312 /* 493 313 * For a system suspend we must flush the caches, we want ··· 495 323 suspend_wfi_flags |= WFI_FLAG_SELF_REFRESH; 496 324 suspend_wfi_flags |= WFI_FLAG_SAVE_EMIF; 497 325 suspend_wfi_flags |= WFI_FLAG_WAKE_M3; 326 + #endif /* CONFIG_SUSPEND */ 498 327 499 328 ret = pm_ops->init(); 500 329 if (ret) {
+10 -8
drivers/soc/xilinx/zynqmp_pm_domains.c
··· 23 23 /* Flag stating if PM nodes mapped to the PM domain has been requested */ 24 24 #define ZYNQMP_PM_DOMAIN_REQUESTED BIT(0) 25 25 26 + static const struct zynqmp_eemi_ops *eemi_ops; 27 + 26 28 /** 27 29 * struct zynqmp_pm_domain - Wrapper around struct generic_pm_domain 28 30 * @gpd: Generic power domain ··· 73 71 { 74 72 int ret; 75 73 struct zynqmp_pm_domain *pd; 76 - const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops(); 77 74 78 - if (!eemi_ops || !eemi_ops->set_requirement) 75 + if (!eemi_ops->set_requirement) 79 76 return -ENXIO; 80 77 81 78 pd = container_of(domain, struct zynqmp_pm_domain, gpd); ··· 108 107 struct zynqmp_pm_domain *pd; 109 108 u32 capabilities = 0; 110 109 bool may_wakeup; 111 - const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops(); 112 110 113 - if (!eemi_ops || !eemi_ops->set_requirement) 111 + if (!eemi_ops->set_requirement) 114 112 return -ENXIO; 115 113 116 114 pd = container_of(domain, struct zynqmp_pm_domain, gpd); ··· 160 160 { 161 161 int ret; 162 162 struct zynqmp_pm_domain *pd; 163 - const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops(); 164 163 165 - if (!eemi_ops || !eemi_ops->request_node) 164 + if (!eemi_ops->request_node) 166 165 return -ENXIO; 167 166 168 167 pd = container_of(domain, struct zynqmp_pm_domain, gpd); ··· 196 197 { 197 198 int ret; 198 199 struct zynqmp_pm_domain *pd; 199 - const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops(); 200 200 201 - if (!eemi_ops || !eemi_ops->release_node) 201 + if (!eemi_ops->release_node) 202 202 return; 203 203 204 204 pd = container_of(domain, struct zynqmp_pm_domain, gpd); ··· 263 265 struct generic_pm_domain **domains; 264 266 struct zynqmp_pm_domain *pd; 265 267 struct device *dev = &pdev->dev; 268 + 269 + eemi_ops = zynqmp_pm_get_eemi_ops(); 270 + if (IS_ERR(eemi_ops)) 271 + return PTR_ERR(eemi_ops); 266 272 267 273 pd = devm_kcalloc(dev, ZYNQMP_NUM_DOMAINS, sizeof(*pd), GFP_KERNEL); 268 274 if (!pd)
+6 -4
drivers/soc/xilinx/zynqmp_power.c
··· 31 31 }; 32 32 33 33 static enum pm_suspend_mode suspend_mode = PM_SUSPEND_MODE_STD; 34 + static const struct zynqmp_eemi_ops *eemi_ops; 34 35 35 36 enum pm_api_cb_id { 36 37 PM_INIT_SUSPEND_CB = 30, ··· 93 92 const char *buf, size_t count) 94 93 { 95 94 int md, ret = -EINVAL; 96 - const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops(); 97 95 98 - if (!eemi_ops || !eemi_ops->set_suspend_mode) 96 + if (!eemi_ops->set_suspend_mode) 99 97 return ret; 100 98 101 99 for (md = PM_SUSPEND_MODE_FIRST; md < ARRAY_SIZE(suspend_modes); md++) ··· 120 120 int ret, irq; 121 121 u32 pm_api_version; 122 122 123 - const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops(); 123 + eemi_ops = zynqmp_pm_get_eemi_ops(); 124 + if (IS_ERR(eemi_ops)) 125 + return PTR_ERR(eemi_ops); 124 126 125 - if (!eemi_ops || !eemi_ops->get_api_version || !eemi_ops->init_finalize) 127 + if (!eemi_ops->get_api_version || !eemi_ops->init_finalize) 126 128 return -ENXIO; 127 129 128 130 eemi_ops->init_finalize();
+6
drivers/spi/spi-zynqmp-gqspi.c
··· 14 14 #include <linux/delay.h> 15 15 #include <linux/dma-mapping.h> 16 16 #include <linux/dmaengine.h> 17 + #include <linux/firmware/xlnx-zynqmp.h> 17 18 #include <linux/interrupt.h> 18 19 #include <linux/io.h> 19 20 #include <linux/module.h> ··· 139 138 140 139 #define SPI_AUTOSUSPEND_TIMEOUT 3000 141 140 enum mode_type {GQSPI_MODE_IO, GQSPI_MODE_DMA}; 141 + static const struct zynqmp_eemi_ops *eemi_ops; 142 142 143 143 /** 144 144 * struct zynqmp_qspi - Defines qspi driver instance ··· 1022 1020 struct zynqmp_qspi *xqspi; 1023 1021 struct resource *res; 1024 1022 struct device *dev = &pdev->dev; 1023 + 1024 + eemi_ops = zynqmp_pm_get_eemi_ops(); 1025 + if (IS_ERR(eemi_ops)) 1026 + return PTR_ERR(eemi_ops); 1025 1027 1026 1028 master = spi_alloc_master(&pdev->dev, sizeof(*xqspi)); 1027 1029 if (!master)
+48 -30
drivers/tee/optee/core.c
··· 419 419 return true; 420 420 } 421 421 422 + static struct tee_shm_pool *optee_config_dyn_shm(void) 423 + { 424 + struct tee_shm_pool_mgr *priv_mgr; 425 + struct tee_shm_pool_mgr *dmabuf_mgr; 426 + void *rc; 427 + 428 + rc = optee_shm_pool_alloc_pages(); 429 + if (IS_ERR(rc)) 430 + return rc; 431 + priv_mgr = rc; 432 + 433 + rc = optee_shm_pool_alloc_pages(); 434 + if (IS_ERR(rc)) { 435 + tee_shm_pool_mgr_destroy(priv_mgr); 436 + return rc; 437 + } 438 + dmabuf_mgr = rc; 439 + 440 + rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr); 441 + if (IS_ERR(rc)) { 442 + tee_shm_pool_mgr_destroy(priv_mgr); 443 + tee_shm_pool_mgr_destroy(dmabuf_mgr); 444 + } 445 + 446 + return rc; 447 + } 448 + 422 449 static struct tee_shm_pool * 423 - optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm, 424 - u32 sec_caps) 450 + optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm) 425 451 { 426 452 union { 427 453 struct arm_smccc_res smccc; ··· 462 436 struct tee_shm_pool_mgr *priv_mgr; 463 437 struct tee_shm_pool_mgr *dmabuf_mgr; 464 438 void *rc; 439 + const int sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; 465 440 466 441 invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc); 467 442 if (res.result.status != OPTEE_SMC_RETURN_OK) { 468 - pr_info("shm service not available\n"); 443 + pr_err("static shm service not available\n"); 469 444 return ERR_PTR(-ENOENT); 470 445 } 471 446 ··· 492 465 } 493 466 vaddr = (unsigned long)va; 494 467 495 - /* 496 - * If OP-TEE can work with unregistered SHM, we will use own pool 497 - * for private shm 498 - */ 499 - if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) { 500 - rc = optee_shm_pool_alloc_pages(); 501 - if (IS_ERR(rc)) 502 - goto err_memunmap; 503 - priv_mgr = rc; 504 - } else { 505 - const size_t sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; 468 + rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz, 469 + 3 /* 8 bytes aligned */); 470 + if (IS_ERR(rc)) 471 + goto err_memunmap; 472 + priv_mgr = rc; 506 473 507 - rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz, 508 - 3 /* 8 bytes aligned */); 509 - if (IS_ERR(rc)) 510 - goto err_memunmap; 511 - priv_mgr = rc; 512 - 513 - vaddr += sz; 514 - paddr += sz; 515 - size -= sz; 516 - } 474 + vaddr += sz; 475 + paddr += sz; 476 + size -= sz; 517 477 518 478 rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, size, PAGE_SHIFT); 519 479 if (IS_ERR(rc)) ··· 566 552 static struct optee *optee_probe(struct device_node *np) 567 553 { 568 554 optee_invoke_fn *invoke_fn; 569 - struct tee_shm_pool *pool; 555 + struct tee_shm_pool *pool = ERR_PTR(-EINVAL); 570 556 struct optee *optee = NULL; 571 557 void *memremaped_shm = NULL; 572 558 struct tee_device *teedev; ··· 595 581 } 596 582 597 583 /* 598 - * We have no other option for shared memory, if secure world 599 - * doesn't have any reserved memory we can use we can't continue. 584 + * Try to use dynamic shared memory if possible 600 585 */ 601 - if (!(sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM)) 602 - return ERR_PTR(-EINVAL); 586 + if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) 587 + pool = optee_config_dyn_shm(); 603 588 604 - pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm, sec_caps); 589 + /* 590 + * If dynamic shared memory is not available or failed - try static one 591 + */ 592 + if (IS_ERR(pool) && (sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM)) 593 + pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm); 594 + 605 595 if (IS_ERR(pool)) 606 596 return (void *)pool; 607 597
+5
include/linux/firmware/imx/sci.h
··· 15 15 16 16 #include <linux/firmware/imx/svc/misc.h> 17 17 #include <linux/firmware/imx/svc/pm.h> 18 + 19 + int imx_scu_enable_general_irq_channel(struct device *dev); 20 + int imx_scu_irq_register_notifier(struct notifier_block *nb); 21 + int imx_scu_irq_unregister_notifier(struct notifier_block *nb); 22 + int imx_scu_irq_group_enable(u8 group, u32 mask, u8 enable); 18 23 #endif /* _SC_SCI_H */
+13 -1
include/linux/firmware/xlnx-zynqmp.h
··· 48 48 #define ZYNQMP_PM_CAPABILITY_WAKEUP 0x4U 49 49 #define ZYNQMP_PM_CAPABILITY_POWER 0x8U 50 50 51 + /* 52 + * Firmware FPGA Manager flags 53 + * XILINX_ZYNQMP_PM_FPGA_FULL: FPGA full reconfiguration 54 + * XILINX_ZYNQMP_PM_FPGA_PARTIAL: FPGA partial reconfiguration 55 + */ 56 + #define XILINX_ZYNQMP_PM_FPGA_FULL 0x0U 57 + #define XILINX_ZYNQMP_PM_FPGA_PARTIAL BIT(0) 58 + 51 59 enum pm_api_id { 52 60 PM_GET_API_VERSION = 1, 53 61 PM_REQUEST_NODE = 13, ··· 64 56 PM_RESET_ASSERT = 17, 65 57 PM_RESET_GET_STATUS, 66 58 PM_PM_INIT_FINALIZE = 21, 59 + PM_FPGA_LOAD, 60 + PM_FPGA_GET_STATUS, 67 61 PM_GET_CHIPID = 24, 68 62 PM_IOCTL = 34, 69 63 PM_QUERY_DATA, ··· 268 258 struct zynqmp_eemi_ops { 269 259 int (*get_api_version)(u32 *version); 270 260 int (*get_chipid)(u32 *idcode, u32 *version); 261 + int (*fpga_load)(const u64 address, const u32 size, const u32 flags); 262 + int (*fpga_get_status)(u32 *value); 271 263 int (*query_data)(struct zynqmp_pm_query_data qdata, u32 *out); 272 264 int (*clock_enable)(u32 clock_id); 273 265 int (*clock_disable)(u32 clock_id); ··· 305 293 #else 306 294 static inline struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void) 307 295 { 308 - return NULL; 296 + return ERR_PTR(-ENODEV); 309 297 } 310 298 #endif 311 299
+5
include/linux/platform_data/pm33xx.h
··· 51 51 unsigned long args); 52 52 struct am33xx_pm_sram_addr *(*get_sram_addrs)(void); 53 53 void __iomem *(*get_rtc_base_addr)(void); 54 + void (*save_context)(void); 55 + void (*restore_context)(void); 56 + void (*prepare_rtc_suspend)(void); 57 + void (*prepare_rtc_resume)(void); 58 + int (*check_off_mode_enable)(void); 54 59 }; 55 60 56 61 struct am33xx_pm_sram_data {
+2
include/linux/reset.h
··· 2 2 #ifndef _LINUX_RESET_H_ 3 3 #define _LINUX_RESET_H_ 4 4 5 + #include <linux/err.h> 6 + #include <linux/errno.h> 5 7 #include <linux/types.h> 6 8 7 9 struct device;
+7
include/linux/rtc/rtc-omap.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #ifndef _LINUX_RTCOMAP_H_ 4 + #define _LINUX_RTCOMAP_H_ 5 + 6 + int omap_rtc_power_off_program(struct device *dev); 7 + #endif /* _LINUX_RTCOMAP_H_ */
+3
include/linux/ti-emif-sram.h
··· 55 55 struct ti_emif_pm_functions { 56 56 u32 save_context; 57 57 u32 restore_context; 58 + u32 run_hw_leveling; 58 59 u32 enter_sr; 59 60 u32 exit_sr; 60 61 u32 abort_sr; ··· 127 126 offsetof(struct ti_emif_pm_functions, save_context)); 128 127 DEFINE(EMIF_PM_RESTORE_CONTEXT_OFFSET, 129 128 offsetof(struct ti_emif_pm_functions, restore_context)); 129 + DEFINE(EMIF_PM_RUN_HW_LEVELING, 130 + offsetof(struct ti_emif_pm_functions, run_hw_leveling)); 130 131 DEFINE(EMIF_PM_ENTER_SR_OFFSET, 131 132 offsetof(struct ti_emif_pm_functions, enter_sr)); 132 133 DEFINE(EMIF_PM_EXIT_SR_OFFSET,