Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'soc-drivers-6.19' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc

Pull SoC driver updates from Arnd Bergmann:
"This is the first half of the driver changes:

- A treewide interface change to the "syscore" operations for power
management, as a preparation for future Tegra specific changes

- Reset controller updates with added drivers for LAN969x, eic770 and
RZ/G3S SoCs

- Protection of system controller registers on Renesas and Google
SoCs, to prevent trivially triggering a system crash from e.g.
debugfs access

- soc_device identification updates on Nvidia, Exynos and Mediatek

- debugfs support in the ST STM32 firewall driver

- Minor updates for SoC drivers on AMD/Xilinx, Renesas, Allwinner, TI

- Cleanups for memory controller support on Nvidia and Renesas"

* tag 'soc-drivers-6.19' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc: (114 commits)
memory: tegra186-emc: Fix missing put_bpmp
Documentation: reset: Remove reset_controller_add_lookup()
reset: fix BIT macro reference
reset: rzg2l-usbphy-ctrl: Fix a NULL vs IS_ERR() bug in probe
reset: th1520: Support reset controllers in more subsystems
reset: th1520: Prepare for supporting multiple controllers
dt-bindings: reset: thead,th1520-reset: Add controllers for more subsys
dt-bindings: reset: thead,th1520-reset: Remove non-VO-subsystem resets
reset: remove legacy reset lookup code
clk: davinci: psc: drop unused reset lookup
reset: rzg2l-usbphy-ctrl: Add support for RZ/G3S SoC
reset: rzg2l-usbphy-ctrl: Add support for USB PWRRDY
dt-bindings: reset: renesas,rzg2l-usbphy-ctrl: Document RZ/G3S support
reset: eswin: Add eic7700 reset driver
dt-bindings: reset: eswin: Documentation for eic7700 SoC
reset: sparx5: add LAN969x support
dt-bindings: reset: microchip: Add LAN969x support
soc: rockchip: grf: Add select correct PWM implementation on RK3368
soc/tegra: pmc: Add USB wake events for Tegra234
amba: tegra-ahb: Fix device leak on SMMU enable
...

+5897 -1291
+6 -2
Documentation/devicetree/bindings/bus/st,stm32mp25-rifsc.yaml
··· 33 33 properties: 34 34 compatible: 35 35 contains: 36 - const: st,stm32mp25-rifsc 36 + enum: 37 + - st,stm32mp21-rifsc 38 + - st,stm32mp25-rifsc 37 39 required: 38 40 - compatible 39 41 40 42 properties: 41 43 compatible: 42 44 items: 43 - - const: st,stm32mp25-rifsc 45 + - enum: 46 + - st,stm32mp21-rifsc 47 + - st,stm32mp25-rifsc 44 48 - const: simple-bus 45 49 46 50 reg:
+2
Documentation/devicetree/bindings/cache/qcom,llcc.yaml
··· 21 21 compatible: 22 22 enum: 23 23 - qcom,ipq5424-llcc 24 + - qcom,kaanapali-llcc 24 25 - qcom,qcs615-llcc 25 26 - qcom,qcs8300-llcc 26 27 - qcom,qdu1000-llcc ··· 273 272 compatible: 274 273 contains: 275 274 enum: 275 + - qcom,kaanapali-llcc 276 276 - qcom,sm8450-llcc 277 277 - qcom,sm8550-llcc 278 278 - qcom,sm8650-llcc
+3
Documentation/devicetree/bindings/firmware/qcom,scm.yaml
··· 23 23 - enum: 24 24 - qcom,scm-apq8064 25 25 - qcom,scm-apq8084 26 + - qcom,scm-glymur 26 27 - qcom,scm-ipq4019 27 28 - qcom,scm-ipq5018 28 29 - qcom,scm-ipq5332 ··· 32 31 - qcom,scm-ipq806x 33 32 - qcom,scm-ipq8074 34 33 - qcom,scm-ipq9574 34 + - qcom,scm-kaanapali 35 35 - qcom,scm-mdm9607 36 36 - qcom,scm-milos 37 37 - qcom,scm-msm8226 ··· 204 202 compatible: 205 203 contains: 206 204 enum: 205 + - qcom,scm-kaanapali 207 206 - qcom,scm-milos 208 207 - qcom,scm-sm8450 209 208 - qcom,scm-sm8550
+2
Documentation/devicetree/bindings/hwinfo/samsung,exynos-chipid.yaml
··· 20 20 - samsung,exynos5433-chipid 21 21 - samsung,exynos7-chipid 22 22 - samsung,exynos7870-chipid 23 + - samsung,exynos8890-chipid 23 24 - const: samsung,exynos4210-chipid 24 25 - items: 25 26 - enum: 26 27 - samsung,exynos2200-chipid 27 28 - samsung,exynos7885-chipid 28 29 - samsung,exynos8895-chipid 30 + - samsung,exynos9610-chipid 29 31 - samsung,exynos9810-chipid 30 32 - samsung,exynos990-chipid 31 33 - samsung,exynosautov9-chipid
+42
Documentation/devicetree/bindings/reset/eswin,eic7700-reset.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/reset/eswin,eic7700-reset.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: ESWIN EIC7700 SoC reset controller 8 + 9 + maintainers: 10 + - Yifeng Huang <huangyifeng@eswincomputing.com> 11 + - Xuyang Dong <dongxuyang@eswincomputing.com> 12 + 13 + description: 14 + The system reset controller can be used to reset various peripheral 15 + controllers in ESWIN eic7700 SoC. 16 + 17 + properties: 18 + compatible: 19 + const: eswin,eic7700-reset 20 + 21 + reg: 22 + maxItems: 1 23 + 24 + '#reset-cells': 25 + const: 1 26 + 27 + required: 28 + - compatible 29 + - reg 30 + - '#reset-cells' 31 + 32 + additionalProperties: false 33 + 34 + examples: 35 + - | 36 + #include <dt-bindings/reset/eswin,eic7700-reset.h> 37 + 38 + reset-controller@51828300 { 39 + compatible = "eswin,eic7700-reset"; 40 + reg = <0x51828300 0x200>; 41 + #reset-cells = <1>; 42 + };
+8 -3
Documentation/devicetree/bindings/reset/microchip,rst.yaml
··· 20 20 pattern: "^reset-controller@[0-9a-f]+$" 21 21 22 22 compatible: 23 - enum: 24 - - microchip,sparx5-switch-reset 25 - - microchip,lan966x-switch-reset 23 + oneOf: 24 + - enum: 25 + - microchip,sparx5-switch-reset 26 + - microchip,lan966x-switch-reset 27 + - items: 28 + - enum: 29 + - microchip,lan9691-switch-reset 30 + - const: microchip,lan966x-switch-reset 26 31 27 32 reg: 28 33 items:
+35 -6
Documentation/devicetree/bindings/reset/renesas,rzg2l-usbphy-ctrl.yaml
··· 15 15 16 16 properties: 17 17 compatible: 18 - items: 19 - - enum: 20 - - renesas,r9a07g043-usbphy-ctrl # RZ/G2UL and RZ/Five 21 - - renesas,r9a07g044-usbphy-ctrl # RZ/G2{L,LC} 22 - - renesas,r9a07g054-usbphy-ctrl # RZ/V2L 23 - - const: renesas,rzg2l-usbphy-ctrl 18 + oneOf: 19 + - items: 20 + - enum: 21 + - renesas,r9a07g043-usbphy-ctrl # RZ/G2UL and RZ/Five 22 + - renesas,r9a07g044-usbphy-ctrl # RZ/G2{L,LC} 23 + - renesas,r9a07g054-usbphy-ctrl # RZ/V2L 24 + - const: renesas,rzg2l-usbphy-ctrl 25 + - const: renesas,r9a08g045-usbphy-ctrl # RZ/G3S 24 26 25 27 reg: 26 28 maxItems: 1 ··· 50 48 $ref: /schemas/regulator/regulator.yaml# 51 49 unevaluatedProperties: false 52 50 51 + renesas,sysc-pwrrdy: 52 + description: 53 + The system controller PWRRDY indicates to the USB PHY if the power supply 54 + is ready. PWRRDY needs to be set during power-on before applying any 55 + other settings. It also needs to be set before powering off the USB. 56 + $ref: /schemas/types.yaml#/definitions/phandle-array 57 + items: 58 + - items: 59 + - description: 60 + System controller phandle required by USB PHY CTRL driver to set 61 + PWRRDY 62 + - description: Register offset associated with PWRRDY 63 + - description: Register bitmask associated with PWRRDY 64 + 53 65 required: 54 66 - compatible 55 67 - reg ··· 72 56 - power-domains 73 57 - '#reset-cells' 74 58 - regulator-vbus 59 + 60 + allOf: 61 + - if: 62 + properties: 63 + compatible: 64 + contains: 65 + const: renesas,r9a08g045-usbphy-ctrl 66 + then: 67 + required: 68 + - renesas,sysc-pwrrdy 69 + else: 70 + properties: 71 + renesas,sysc-pwrrdy: false 75 72 76 73 additionalProperties: false 77 74
+7 -1
Documentation/devicetree/bindings/reset/thead,th1520-reset.yaml
··· 16 16 properties: 17 17 compatible: 18 18 enum: 19 - - thead,th1520-reset 19 + - thead,th1520-reset # Reset controller for VO subsystem 20 + - thead,th1520-reset-ao 21 + - thead,th1520-reset-ap 22 + - thead,th1520-reset-dsp 23 + - thead,th1520-reset-misc 24 + - thead,th1520-reset-vi 25 + - thead,th1520-reset-vp 20 26 21 27 reg: 22 28 maxItems: 1
+1
Documentation/devicetree/bindings/soc/mediatek/mediatek,pwrap.yaml
··· 52 52 - items: 53 53 - enum: 54 54 - mediatek,mt8188-pwrap 55 + - mediatek,mt8189-pwrap 55 56 - const: mediatek,mt8195-pwrap 56 57 - const: syscon 57 58
+2
Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.yaml
··· 25 25 compatible: 26 26 items: 27 27 - enum: 28 + - qcom,glymur-aoss-qmp 29 + - qcom,kaanapali-aoss-qmp 28 30 - qcom,milos-aoss-qmp 29 31 - qcom,qcs615-aoss-qmp 30 32 - qcom,qcs8300-aoss-qmp
+2
Documentation/devicetree/bindings/soc/samsung/exynos-pmu.yaml
··· 55 55 - samsung,exynos2200-pmu 56 56 - samsung,exynos7870-pmu 57 57 - samsung,exynos7885-pmu 58 + - samsung,exynos8890-pmu 58 59 - samsung,exynos8895-pmu 59 60 - samsung,exynos9810-pmu 60 61 - samsung,exynos990-pmu ··· 173 172 - samsung,exynos5250-pmu 174 173 - samsung,exynos5420-pmu 175 174 - samsung,exynos5433-pmu 175 + - samsung,exynos7870-pmu 176 176 then: 177 177 properties: 178 178 mipi-phy: true
+23
Documentation/devicetree/bindings/soc/samsung/samsung,exynos-sysreg.yaml
··· 15 15 - items: 16 16 - enum: 17 17 - google,gs101-apm-sysreg 18 + - google,gs101-hsi0-sysreg 18 19 - google,gs101-hsi2-sysreg 20 + - google,gs101-misc-sysreg 19 21 - google,gs101-peric0-sysreg 20 22 - google,gs101-peric1-sysreg 21 23 - samsung,exynos2200-cmgp-sysreg ··· 28 26 - samsung,exynos3-sysreg 29 27 - samsung,exynos4-sysreg 30 28 - samsung,exynos5-sysreg 29 + - samsung,exynos7870-cam0-sysreg 30 + - samsung,exynos7870-disp-sysreg 31 31 - samsung,exynos8895-fsys0-sysreg 32 32 - samsung,exynos8895-fsys1-sysreg 33 33 - samsung,exynos8895-peric0-sysreg 34 34 - samsung,exynos8895-peric1-sysreg 35 + - samsung,exynos990-peric0-sysreg 36 + - samsung,exynos990-peric1-sysreg 35 37 - samsung,exynosautov920-hsi2-sysreg 36 38 - samsung,exynosautov920-peric0-sysreg 37 39 - samsung,exynosautov920-peric1-sysreg ··· 79 73 clocks: 80 74 maxItems: 1 81 75 76 + power-domains: 77 + maxItems: 1 78 + 82 79 required: 83 80 - compatible 84 81 - reg ··· 92 83 compatible: 93 84 contains: 94 85 enum: 86 + - google,gs101-hsi0-sysreg 95 87 - google,gs101-hsi2-sysreg 88 + - google,gs101-misc-sysreg 96 89 - google,gs101-peric0-sysreg 97 90 - google,gs101-peric1-sysreg 98 91 - samsung,exynos850-cmgp-sysreg ··· 104 93 - samsung,exynos8895-fsys1-sysreg 105 94 - samsung,exynos8895-peric0-sysreg 106 95 - samsung,exynos8895-peric1-sysreg 96 + - samsung,exynos990-peric0-sysreg 97 + - samsung,exynos990-peric1-sysreg 107 98 then: 108 99 required: 109 100 - clocks 110 101 else: 111 102 properties: 112 103 clocks: false 104 + 105 + - if: 106 + properties: 107 + compatible: 108 + not: 109 + contains: 110 + pattern: "^google,gs101-[^-]+-sysreg$" 111 + then: 112 + properties: 113 + power-domains: false 113 114 114 115 additionalProperties: false 115 116
-1
Documentation/driver-api/reset.rst
··· 218 218 reset_controller_register 219 219 reset_controller_unregister 220 220 devm_reset_controller_register 221 - reset_controller_add_lookup
+1
MAINTAINERS
··· 10679 10679 F: Documentation/devicetree/bindings/soc/google/google,gs101-pmu-intr-gen.yaml 10680 10680 F: arch/arm64/boot/dts/exynos/google/ 10681 10681 F: drivers/clk/samsung/clk-gs101.c 10682 + F: drivers/soc/samsung/gs101-pmu.c 10682 10683 F: drivers/phy/samsung/phy-gs101-ufs.c 10683 10684 F: include/dt-bindings/clock/google,gs101.h 10684 10685 K: [gG]oogle.?[tT]ensor
+8 -4
arch/arm/mach-exynos/mcpm-exynos.c
··· 215 215 {}, 216 216 }; 217 217 218 - static void exynos_mcpm_setup_entry_point(void) 218 + static void exynos_mcpm_setup_entry_point(void *data) 219 219 { 220 220 /* 221 221 * U-Boot SPL is hardcoded to jump to the start of ns_sram_base_addr ··· 228 228 __raw_writel(__pa_symbol(mcpm_entry_point), ns_sram_base_addr + 8); 229 229 } 230 230 231 - static struct syscore_ops exynos_mcpm_syscore_ops = { 231 + static const struct syscore_ops exynos_mcpm_syscore_ops = { 232 232 .resume = exynos_mcpm_setup_entry_point, 233 + }; 234 + 235 + static struct syscore exynos_mcpm_syscore = { 236 + .ops = &exynos_mcpm_syscore_ops, 233 237 }; 234 238 235 239 static int __init exynos_mcpm_init(void) ··· 304 300 pmu_raw_writel(value, EXYNOS_COMMON_OPTION(i)); 305 301 } 306 302 307 - exynos_mcpm_setup_entry_point(); 303 + exynos_mcpm_setup_entry_point(NULL); 308 304 309 - register_syscore_ops(&exynos_mcpm_syscore_ops); 305 + register_syscore(&exynos_mcpm_syscore); 310 306 311 307 return ret; 312 308 }
+29 -19
arch/arm/mach-exynos/suspend.c
··· 53 53 54 54 void (*pm_prepare)(void); 55 55 void (*pm_resume_prepare)(void); 56 - void (*pm_resume)(void); 57 - int (*pm_suspend)(void); 58 56 int (*cpu_suspend)(unsigned long); 57 + 58 + const struct syscore_ops *syscore_ops; 59 59 }; 60 60 61 61 /* Used only on Exynos542x/5800 */ ··· 376 376 } 377 377 378 378 379 - static int exynos_pm_suspend(void) 379 + static int exynos_pm_suspend(void *data) 380 380 { 381 381 exynos_pm_central_suspend(); 382 382 ··· 390 390 return 0; 391 391 } 392 392 393 - static int exynos5420_pm_suspend(void) 393 + static int exynos5420_pm_suspend(void *data) 394 394 { 395 395 u32 this_cluster; 396 396 ··· 408 408 return 0; 409 409 } 410 410 411 - static void exynos_pm_resume(void) 411 + static void exynos_pm_resume(void *data) 412 412 { 413 413 u32 cpuid = read_cpuid_part(); 414 414 ··· 429 429 exynos_set_delayed_reset_assertion(true); 430 430 } 431 431 432 - static void exynos3250_pm_resume(void) 432 + static void exynos3250_pm_resume(void *data) 433 433 { 434 434 u32 cpuid = read_cpuid_part(); 435 435 ··· 473 473 } 474 474 } 475 475 476 - static void exynos5420_pm_resume(void) 476 + static void exynos5420_pm_resume(void *data) 477 477 { 478 478 unsigned long tmp; 479 479 ··· 596 596 .valid = suspend_valid_only_mem, 597 597 }; 598 598 599 + static const struct syscore_ops exynos3250_syscore_ops = { 600 + .suspend = exynos_pm_suspend, 601 + .resume = exynos3250_pm_resume, 602 + }; 603 + 599 604 static const struct exynos_pm_data exynos3250_pm_data = { 600 605 .wkup_irq = exynos3250_wkup_irq, 601 606 .wake_disable_mask = ((0xFF << 8) | (0x1F << 1)), 602 - .pm_suspend = exynos_pm_suspend, 603 - .pm_resume = exynos3250_pm_resume, 604 607 .pm_prepare = exynos3250_pm_prepare, 605 608 .cpu_suspend = exynos3250_cpu_suspend, 609 + .syscore_ops = &exynos3250_syscore_ops, 610 + }; 611 + 612 + static const struct syscore_ops exynos_syscore_ops = { 613 + .suspend = exynos_pm_suspend, 614 + .resume = exynos_pm_resume, 606 615 }; 607 616 608 617 static const struct exynos_pm_data exynos4_pm_data = { 609 618 .wkup_irq = exynos4_wkup_irq, 610 619 .wake_disable_mask = ((0xFF << 8) | (0x1F << 1)), 611 - .pm_suspend = exynos_pm_suspend, 612 - .pm_resume = exynos_pm_resume, 613 620 .pm_prepare = exynos_pm_prepare, 614 621 .cpu_suspend = exynos_cpu_suspend, 622 + .syscore_ops = &exynos_syscore_ops, 615 623 }; 616 624 617 625 static const struct exynos_pm_data exynos5250_pm_data = { 618 626 .wkup_irq = exynos5250_wkup_irq, 619 627 .wake_disable_mask = ((0xFF << 8) | (0x1F << 1)), 620 - .pm_suspend = exynos_pm_suspend, 621 - .pm_resume = exynos_pm_resume, 622 628 .pm_prepare = exynos_pm_prepare, 623 629 .cpu_suspend = exynos_cpu_suspend, 630 + .syscore_ops = &exynos_syscore_ops, 631 + }; 632 + 633 + static const struct syscore_ops exynos5420_syscore_ops = { 634 + .resume = exynos5420_pm_resume, 635 + .suspend = exynos5420_pm_suspend, 624 636 }; 625 637 626 638 static const struct exynos_pm_data exynos5420_pm_data = { 627 639 .wkup_irq = exynos5250_wkup_irq, 628 640 .wake_disable_mask = (0x7F << 7) | (0x1F << 1), 629 641 .pm_resume_prepare = exynos5420_prepare_pm_resume, 630 - .pm_resume = exynos5420_pm_resume, 631 - .pm_suspend = exynos5420_pm_suspend, 632 642 .pm_prepare = exynos5420_pm_prepare, 633 643 .cpu_suspend = exynos5420_cpu_suspend, 644 + .syscore_ops = &exynos5420_syscore_ops, 634 645 }; 635 646 636 647 static const struct of_device_id exynos_pmu_of_device_ids[] __initconst = { ··· 667 656 { /*sentinel*/ }, 668 657 }; 669 658 670 - static struct syscore_ops exynos_pm_syscore_ops; 659 + static struct syscore exynos_pm_syscore; 671 660 672 661 void __init exynos_pm_init(void) 673 662 { ··· 695 684 tmp |= pm_data->wake_disable_mask; 696 685 pmu_raw_writel(tmp, S5P_WAKEUP_MASK); 697 686 698 - exynos_pm_syscore_ops.suspend = pm_data->pm_suspend; 699 - exynos_pm_syscore_ops.resume = pm_data->pm_resume; 687 + exynos_pm_syscore.ops = pm_data->syscore_ops; 700 688 701 - register_syscore_ops(&exynos_pm_syscore_ops); 689 + register_syscore(&exynos_pm_syscore); 702 690 suspend_set_ops(&exynos_suspend_ops); 703 691 704 692 /*
+3 -3
arch/arm/mach-pxa/generic.h
··· 34 34 extern void __init pxa3xx_init_irq(void); 35 35 extern void __init pxa3xx_map_io(void); 36 36 37 - extern struct syscore_ops pxa_irq_syscore_ops; 38 - extern struct syscore_ops pxa2xx_mfp_syscore_ops; 39 - extern struct syscore_ops pxa3xx_mfp_syscore_ops; 37 + extern struct syscore pxa_irq_syscore; 38 + extern struct syscore pxa2xx_mfp_syscore; 39 + extern struct syscore pxa3xx_mfp_syscore; 40 40 41 41 void __init pxa_set_ffuart_info(void *info); 42 42 void __init pxa_set_btuart_info(void *info);
+7 -3
arch/arm/mach-pxa/irq.c
··· 178 178 static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32]; 179 179 static unsigned long saved_ipr[MAX_INTERNAL_IRQS]; 180 180 181 - static int pxa_irq_suspend(void) 181 + static int pxa_irq_suspend(void *data) 182 182 { 183 183 int i; 184 184 ··· 197 197 return 0; 198 198 } 199 199 200 - static void pxa_irq_resume(void) 200 + static void pxa_irq_resume(void *data) 201 201 { 202 202 int i; 203 203 ··· 219 219 #define pxa_irq_resume NULL 220 220 #endif 221 221 222 - struct syscore_ops pxa_irq_syscore_ops = { 222 + static const struct syscore_ops pxa_irq_syscore_ops = { 223 223 .suspend = pxa_irq_suspend, 224 224 .resume = pxa_irq_resume, 225 + }; 226 + 227 + struct syscore pxa_irq_syscore = { 228 + .ops = &pxa_irq_syscore_ops, 225 229 }; 226 230 227 231 #ifdef CONFIG_OF
+7 -3
arch/arm/mach-pxa/mfp-pxa2xx.c
··· 346 346 static unsigned long saved_gplr[4]; 347 347 static unsigned long saved_pgsr[4]; 348 348 349 - static int pxa2xx_mfp_suspend(void) 349 + static int pxa2xx_mfp_suspend(void *data) 350 350 { 351 351 int i; 352 352 ··· 385 385 return 0; 386 386 } 387 387 388 - static void pxa2xx_mfp_resume(void) 388 + static void pxa2xx_mfp_resume(void *data) 389 389 { 390 390 int i; 391 391 ··· 404 404 #define pxa2xx_mfp_resume NULL 405 405 #endif 406 406 407 - struct syscore_ops pxa2xx_mfp_syscore_ops = { 407 + static const struct syscore_ops pxa2xx_mfp_syscore_ops = { 408 408 .suspend = pxa2xx_mfp_suspend, 409 409 .resume = pxa2xx_mfp_resume, 410 + }; 411 + 412 + struct syscore pxa2xx_mfp_syscore = { 413 + .ops = &pxa2xx_mfp_syscore_ops, 410 414 }; 411 415 412 416 static int __init pxa2xx_mfp_init(void)
+7 -3
arch/arm/mach-pxa/mfp-pxa3xx.c
··· 27 27 * a pull-down mode if they're an active low chip select, and we're 28 28 * just entering standby. 29 29 */ 30 - static int pxa3xx_mfp_suspend(void) 30 + static int pxa3xx_mfp_suspend(void *data) 31 31 { 32 32 mfp_config_lpm(); 33 33 return 0; 34 34 } 35 35 36 - static void pxa3xx_mfp_resume(void) 36 + static void pxa3xx_mfp_resume(void *data) 37 37 { 38 38 mfp_config_run(); 39 39 ··· 49 49 #define pxa3xx_mfp_resume NULL 50 50 #endif 51 51 52 - struct syscore_ops pxa3xx_mfp_syscore_ops = { 52 + static const struct syscore_ops pxa3xx_mfp_syscore_ops = { 53 53 .suspend = pxa3xx_mfp_suspend, 54 54 .resume = pxa3xx_mfp_resume, 55 + }; 56 + 57 + struct syscore pxa3xx_mfp_syscore = { 58 + .ops = &pxa3xx_mfp_syscore_ops, 55 59 };
+2 -2
arch/arm/mach-pxa/pxa25x.c
··· 235 235 236 236 pxa25x_init_pm(); 237 237 238 - register_syscore_ops(&pxa_irq_syscore_ops); 239 - register_syscore_ops(&pxa2xx_mfp_syscore_ops); 238 + register_syscore(&pxa_irq_syscore); 239 + register_syscore(&pxa2xx_mfp_syscore); 240 240 241 241 if (!of_have_populated_dt()) { 242 242 software_node_register(&pxa2xx_gpiochip_node);
+2 -2
arch/arm/mach-pxa/pxa27x.c
··· 337 337 338 338 pxa27x_init_pm(); 339 339 340 - register_syscore_ops(&pxa_irq_syscore_ops); 341 - register_syscore_ops(&pxa2xx_mfp_syscore_ops); 340 + register_syscore(&pxa_irq_syscore); 341 + register_syscore(&pxa2xx_mfp_syscore); 342 342 343 343 if (!of_have_populated_dt()) { 344 344 software_node_register(&pxa2xx_gpiochip_node);
+2 -2
arch/arm/mach-pxa/pxa3xx.c
··· 424 424 if (cpu_is_pxa320()) 425 425 enable_irq_wake(IRQ_WAKEUP1); 426 426 427 - register_syscore_ops(&pxa_irq_syscore_ops); 428 - register_syscore_ops(&pxa3xx_mfp_syscore_ops); 427 + register_syscore(&pxa_irq_syscore); 428 + register_syscore(&pxa3xx_mfp_syscore); 429 429 } 430 430 431 431 return ret;
+8 -4
arch/arm/mach-pxa/smemc.c
··· 18 18 static unsigned long sxcnfg, memclkcfg; 19 19 static unsigned long csadrcfg[4]; 20 20 21 - static int pxa3xx_smemc_suspend(void) 21 + static int pxa3xx_smemc_suspend(void *data) 22 22 { 23 23 msc[0] = __raw_readl(MSC0); 24 24 msc[1] = __raw_readl(MSC1); ··· 32 32 return 0; 33 33 } 34 34 35 - static void pxa3xx_smemc_resume(void) 35 + static void pxa3xx_smemc_resume(void *data) 36 36 { 37 37 __raw_writel(msc[0], MSC0); 38 38 __raw_writel(msc[1], MSC1); ··· 46 46 __raw_writel(0x2, CSMSADRCFG); 47 47 } 48 48 49 - static struct syscore_ops smemc_syscore_ops = { 49 + static const struct syscore_ops smemc_syscore_ops = { 50 50 .suspend = pxa3xx_smemc_suspend, 51 51 .resume = pxa3xx_smemc_resume, 52 + }; 53 + 54 + static struct syscore smemc_syscore = { 55 + .ops = &smemc_syscore_ops, 52 56 }; 53 57 54 58 static int __init smemc_init(void) ··· 68 64 */ 69 65 __raw_writel(0x2, CSMSADRCFG); 70 66 71 - register_syscore_ops(&smemc_syscore_ops); 67 + register_syscore(&smemc_syscore); 72 68 } 73 69 74 70 return 0;
+8 -4
arch/arm/mach-s3c/irq-pm-s3c64xx.c
··· 58 58 59 59 static u32 irq_uart_mask[SERIAL_SAMSUNG_UARTS]; 60 60 61 - static int s3c64xx_irq_pm_suspend(void) 61 + static int s3c64xx_irq_pm_suspend(void *data) 62 62 { 63 63 struct irq_grp_save *grp = eint_grp_save; 64 64 int i; ··· 79 79 return 0; 80 80 } 81 81 82 - static void s3c64xx_irq_pm_resume(void) 82 + static void s3c64xx_irq_pm_resume(void *data) 83 83 { 84 84 struct irq_grp_save *grp = eint_grp_save; 85 85 int i; ··· 100 100 S3C_PMDBG("%s: IRQ configuration restored\n", __func__); 101 101 } 102 102 103 - static struct syscore_ops s3c64xx_irq_syscore_ops = { 103 + static const struct syscore_ops s3c64xx_irq_syscore_ops = { 104 104 .suspend = s3c64xx_irq_pm_suspend, 105 105 .resume = s3c64xx_irq_pm_resume, 106 + }; 107 + 108 + static struct syscore s3c64xx_irq_syscore = { 109 + .ops = &s3c64xx_irq_syscore_ops, 106 110 }; 107 111 108 112 static __init int s3c64xx_syscore_init(void) ··· 115 111 if (of_have_populated_dt() || !soc_is_s3c64xx()) 116 112 return 0; 117 113 118 - register_syscore_ops(&s3c64xx_irq_syscore_ops); 114 + register_syscore(&s3c64xx_irq_syscore); 119 115 120 116 return 0; 121 117 }
+7 -3
arch/arm/mach-s5pv210/pm.c
··· 195 195 /* 196 196 * Syscore operations used to delay restore of certain registers. 197 197 */ 198 - static void s5pv210_pm_resume(void) 198 + static void s5pv210_pm_resume(void *data) 199 199 { 200 200 s3c_pm_do_restore_core(s5pv210_core_save, ARRAY_SIZE(s5pv210_core_save)); 201 201 } 202 202 203 - static struct syscore_ops s5pv210_pm_syscore_ops = { 203 + static const struct syscore_ops s5pv210_pm_syscore_ops = { 204 204 .resume = s5pv210_pm_resume, 205 + }; 206 + 207 + static struct syscore s5pv210_pm_syscore = { 208 + .ops = &s5pv210_pm_syscore_ops, 205 209 }; 206 210 207 211 /* ··· 213 209 */ 214 210 void __init s5pv210_pm_init(void) 215 211 { 216 - register_syscore_ops(&s5pv210_pm_syscore_ops); 212 + register_syscore(&s5pv210_pm_syscore); 217 213 suspend_set_ops(&s5pv210_suspend_ops); 218 214 }
+8 -4
arch/arm/mach-versatile/integrator_ap.c
··· 63 63 #ifdef CONFIG_PM 64 64 static unsigned long ic_irq_enable; 65 65 66 - static int irq_suspend(void) 66 + static int irq_suspend(void *data) 67 67 { 68 68 ic_irq_enable = readl(VA_IC_BASE + IRQ_ENABLE); 69 69 return 0; 70 70 } 71 71 72 - static void irq_resume(void) 72 + static void irq_resume(void *data) 73 73 { 74 74 /* disable all irq sources */ 75 75 cm_clear_irqs(); ··· 83 83 #define irq_resume NULL 84 84 #endif 85 85 86 - static struct syscore_ops irq_syscore_ops = { 86 + static const struct syscore_ops irq_syscore_ops = { 87 87 .suspend = irq_suspend, 88 88 .resume = irq_resume, 89 89 }; 90 90 91 + static struct syscore irq_syscore = { 92 + .ops = &irq_syscore_ops, 93 + }; 94 + 91 95 static int __init irq_syscore_init(void) 92 96 { 93 - register_syscore_ops(&irq_syscore_ops); 97 + register_syscore(&irq_syscore); 94 98 95 99 return 0; 96 100 }
+8 -4
arch/arm/mm/cache-b15-rac.c
··· 256 256 return 0; 257 257 } 258 258 259 - static int b15_rac_suspend(void) 259 + static int b15_rac_suspend(void *data) 260 260 { 261 261 /* Suspend the read-ahead cache oeprations, forcing our cache 262 262 * implementation to fallback to the regular ARMv7 calls. ··· 271 271 return 0; 272 272 } 273 273 274 - static void b15_rac_resume(void) 274 + static void b15_rac_resume(void *data) 275 275 { 276 276 /* Coming out of a S3 suspend/resume cycle, the read-ahead cache 277 277 * register RAC_CONFIG0_REG will be restored to its default value, make ··· 282 282 clear_bit(RAC_SUSPENDED, &b15_rac_flags); 283 283 } 284 284 285 - static struct syscore_ops b15_rac_syscore_ops = { 285 + static const struct syscore_ops b15_rac_syscore_ops = { 286 286 .suspend = b15_rac_suspend, 287 287 .resume = b15_rac_resume, 288 + }; 289 + 290 + static struct syscore b15_rac_syscore = { 291 + .ops = &b15_rac_syscore_ops, 288 292 }; 289 293 290 294 static int __init b15_rac_init(void) ··· 351 347 } 352 348 353 349 if (IS_ENABLED(CONFIG_PM_SLEEP)) 354 - register_syscore_ops(&b15_rac_syscore_ops); 350 + register_syscore(&b15_rac_syscore); 355 351 356 352 spin_lock(&rac_lock); 357 353 reg = __raw_readl(b15_rac_base + RAC_CONFIG0_REG);
+8 -4
arch/loongarch/kernel/smp.c
··· 535 535 */ 536 536 #ifdef CONFIG_PM 537 537 538 - static int loongson_ipi_suspend(void) 538 + static int loongson_ipi_suspend(void *data) 539 539 { 540 540 return 0; 541 541 } 542 542 543 - static void loongson_ipi_resume(void) 543 + static void loongson_ipi_resume(void *data) 544 544 { 545 545 iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN); 546 546 } 547 547 548 - static struct syscore_ops loongson_ipi_syscore_ops = { 548 + static const struct syscore_ops loongson_ipi_syscore_ops = { 549 549 .resume = loongson_ipi_resume, 550 550 .suspend = loongson_ipi_suspend, 551 + }; 552 + 553 + static struct syscore loongson_ipi_syscore = { 554 + .ops = &loongson_ipi_syscore_ops, 551 555 }; 552 556 553 557 /* ··· 560 556 */ 561 557 static int __init ipi_pm_init(void) 562 558 { 563 - register_syscore_ops(&loongson_ipi_syscore_ops); 559 + register_syscore(&loongson_ipi_syscore); 564 560 return 0; 565 561 } 566 562
+8 -4
arch/mips/alchemy/common/dbdma.c
··· 982 982 983 983 static unsigned long alchemy_dbdma_pm_data[NUM_DBDMA_CHANS + 1][6]; 984 984 985 - static int alchemy_dbdma_suspend(void) 985 + static int alchemy_dbdma_suspend(void *data) 986 986 { 987 987 int i; 988 988 void __iomem *addr; ··· 1019 1019 return 0; 1020 1020 } 1021 1021 1022 - static void alchemy_dbdma_resume(void) 1022 + static void alchemy_dbdma_resume(void *data) 1023 1023 { 1024 1024 int i; 1025 1025 void __iomem *addr; ··· 1044 1044 } 1045 1045 } 1046 1046 1047 - static struct syscore_ops alchemy_dbdma_syscore_ops = { 1047 + static const struct syscore_ops alchemy_dbdma_syscore_ops = { 1048 1048 .suspend = alchemy_dbdma_suspend, 1049 1049 .resume = alchemy_dbdma_resume, 1050 + }; 1051 + 1052 + static struct syscore alchemy_dbdma_syscore = { 1053 + .ops = &alchemy_dbdma_syscore_ops, 1050 1054 }; 1051 1055 1052 1056 static int __init dbdma_setup(unsigned int irq, dbdev_tab_t *idtable) ··· 1075 1071 printk(KERN_ERR "Cannot grab DBDMA interrupt!\n"); 1076 1072 else { 1077 1073 dbdma_initialized = 1; 1078 - register_syscore_ops(&alchemy_dbdma_syscore_ops); 1074 + register_syscore(&alchemy_dbdma_syscore); 1079 1075 } 1080 1076 1081 1077 return ret;
+16 -8
arch/mips/alchemy/common/irq.c
··· 758 758 wmb(); 759 759 } 760 760 761 - static int alchemy_ic_suspend(void) 761 + static int alchemy_ic_suspend(void *data) 762 762 { 763 763 alchemy_ic_suspend_one((void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR), 764 764 alchemy_gpic_pmdata); ··· 767 767 return 0; 768 768 } 769 769 770 - static void alchemy_ic_resume(void) 770 + static void alchemy_ic_resume(void *data) 771 771 { 772 772 alchemy_ic_resume_one((void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR), 773 773 &alchemy_gpic_pmdata[7]); ··· 775 775 alchemy_gpic_pmdata); 776 776 } 777 777 778 - static int alchemy_gpic_suspend(void) 778 + static int alchemy_gpic_suspend(void *data) 779 779 { 780 780 void __iomem *base = (void __iomem *)KSEG1ADDR(AU1300_GPIC_PHYS_ADDR); 781 781 int i; ··· 806 806 return 0; 807 807 } 808 808 809 - static void alchemy_gpic_resume(void) 809 + static void alchemy_gpic_resume(void *data) 810 810 { 811 811 void __iomem *base = (void __iomem *)KSEG1ADDR(AU1300_GPIC_PHYS_ADDR); 812 812 int i; ··· 837 837 wmb(); 838 838 } 839 839 840 - static struct syscore_ops alchemy_ic_pmops = { 840 + static const struct syscore_ops alchemy_ic_pmops = { 841 841 .suspend = alchemy_ic_suspend, 842 842 .resume = alchemy_ic_resume, 843 843 }; 844 844 845 - static struct syscore_ops alchemy_gpic_pmops = { 845 + static struct syscore alchemy_ic_pm = { 846 + .ops = &alchemy_ic_pmops, 847 + }; 848 + 849 + static const struct syscore_ops alchemy_gpic_pmops = { 846 850 .suspend = alchemy_gpic_suspend, 847 851 .resume = alchemy_gpic_resume, 852 + }; 853 + 854 + static struct syscore alchemy_gpic_pm = { 855 + .ops = &alchemy_gpic_pmops, 848 856 }; 849 857 850 858 /******************************************************************************/ ··· 888 880 889 881 ic_init((void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR)); 890 882 ic_init((void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR)); 891 - register_syscore_ops(&alchemy_ic_pmops); 883 + register_syscore(&alchemy_ic_pm); 892 884 mips_cpu_irq_init(); 893 885 894 886 /* register all 64 possible IC0+IC1 irq sources as type "none". ··· 933 925 int i; 934 926 void __iomem *bank_base; 935 927 936 - register_syscore_ops(&alchemy_gpic_pmops); 928 + register_syscore(&alchemy_gpic_pm); 937 929 mips_cpu_irq_init(); 938 930 939 931 /* disable & ack all possible interrupt sources */
+8 -4
arch/mips/alchemy/common/usb.c
··· 580 580 } 581 581 } 582 582 583 - static int alchemy_usb_suspend(void) 583 + static int alchemy_usb_suspend(void *data) 584 584 { 585 585 alchemy_usb_pm(1); 586 586 return 0; 587 587 } 588 588 589 - static void alchemy_usb_resume(void) 589 + static void alchemy_usb_resume(void *data) 590 590 { 591 591 alchemy_usb_pm(0); 592 592 } 593 593 594 - static struct syscore_ops alchemy_usb_pm_ops = { 594 + static const struct syscore_ops alchemy_usb_pm_syscore_ops = { 595 595 .suspend = alchemy_usb_suspend, 596 596 .resume = alchemy_usb_resume, 597 + }; 598 + 599 + static struct syscore alchemy_usb_pm_syscore = { 600 + .ops = &alchemy_usb_pm_syscore_ops, 597 601 }; 598 602 599 603 static int __init alchemy_usb_init(void) ··· 624 620 } 625 621 626 622 if (!ret) 627 - register_syscore_ops(&alchemy_usb_pm_ops); 623 + register_syscore(&alchemy_usb_pm_syscore); 628 624 629 625 return ret; 630 626 }
+10 -6
arch/mips/pci/pci-alchemy.c
··· 304 304 } 305 305 306 306 /* save PCI controller register contents. */ 307 - static int alchemy_pci_suspend(void) 307 + static int alchemy_pci_suspend(void *data) 308 308 { 309 309 struct alchemy_pci_context *ctx = __alchemy_pci_ctx; 310 310 if (!ctx) ··· 326 326 return 0; 327 327 } 328 328 329 - static void alchemy_pci_resume(void) 329 + static void alchemy_pci_resume(void *data) 330 330 { 331 331 struct alchemy_pci_context *ctx = __alchemy_pci_ctx; 332 332 if (!ctx) ··· 354 354 alchemy_pci_wired_entry(ctx); /* install it */ 355 355 } 356 356 357 - static struct syscore_ops alchemy_pci_pmops = { 358 - .suspend = alchemy_pci_suspend, 359 - .resume = alchemy_pci_resume, 357 + static const struct syscore_ops alchemy_pci_syscore_ops = { 358 + .suspend = alchemy_pci_suspend, 359 + .resume = alchemy_pci_resume, 360 + }; 361 + 362 + static struct syscore alchemy_pci_syscore = { 363 + .ops = &alchemy_pci_syscore_ops, 360 364 }; 361 365 362 366 static int alchemy_pci_probe(struct platform_device *pdev) ··· 482 478 483 479 __alchemy_pci_ctx = ctx; 484 480 platform_set_drvdata(pdev, ctx); 485 - register_syscore_ops(&alchemy_pci_pmops); 481 + register_syscore(&alchemy_pci_syscore); 486 482 register_pci_controller(&ctx->alchemy_pci_ctrl); 487 483 488 484 dev_info(&pdev->dev, "PCI controller at %ld MHz\n",
+7 -3
arch/powerpc/platforms/cell/spu_base.c
··· 726 726 } 727 727 #endif 728 728 729 - static void spu_shutdown(void) 729 + static void spu_shutdown(void *data) 730 730 { 731 731 struct spu *spu; 732 732 ··· 738 738 mutex_unlock(&spu_full_list_mutex); 739 739 } 740 740 741 - static struct syscore_ops spu_syscore_ops = { 741 + static const struct syscore_ops spu_syscore_ops = { 742 742 .shutdown = spu_shutdown, 743 + }; 744 + 745 + static struct syscore spu_syscore = { 746 + .ops = &spu_syscore_ops, 743 747 }; 744 748 745 749 static int __init init_spu_base(void) ··· 778 774 crash_register_spus(&spu_full_list); 779 775 mutex_unlock(&spu_full_list_mutex); 780 776 spu_add_dev_attr(&dev_attr_stat); 781 - register_syscore_ops(&spu_syscore_ops); 777 + register_syscore(&spu_syscore); 782 778 783 779 spu_init_affinity(); 784 780
+8 -4
arch/powerpc/platforms/powermac/pic.c
··· 600 600 return viaint; 601 601 } 602 602 603 - static int pmacpic_suspend(void) 603 + static int pmacpic_suspend(void *data) 604 604 { 605 605 int viaint = pmacpic_find_viaint(); 606 606 ··· 621 621 return 0; 622 622 } 623 623 624 - static void pmacpic_resume(void) 624 + static void pmacpic_resume(void *data) 625 625 { 626 626 int i; 627 627 ··· 634 634 pmac_unmask_irq(irq_get_irq_data(i)); 635 635 } 636 636 637 - static struct syscore_ops pmacpic_syscore_ops = { 637 + static const struct syscore_ops pmacpic_syscore_ops = { 638 638 .suspend = pmacpic_suspend, 639 639 .resume = pmacpic_resume, 640 + }; 641 + 642 + static struct syscore pmacpic_syscore = { 643 + .ops = &pmacpic_syscore_ops, 640 644 }; 641 645 642 646 static int __init init_pmacpic_syscore(void) 643 647 { 644 648 if (pmac_irq_hw[0]) 645 - register_syscore_ops(&pmacpic_syscore_ops); 649 + register_syscore(&pmacpic_syscore); 646 650 return 0; 647 651 } 648 652
+8 -4
arch/powerpc/sysdev/fsl_lbc.c
··· 350 350 #ifdef CONFIG_SUSPEND 351 351 352 352 /* save lbc registers */ 353 - static int fsl_lbc_syscore_suspend(void) 353 + static int fsl_lbc_syscore_suspend(void *data) 354 354 { 355 355 struct fsl_lbc_ctrl *ctrl; 356 356 struct fsl_lbc_regs __iomem *lbc; ··· 374 374 } 375 375 376 376 /* restore lbc registers */ 377 - static void fsl_lbc_syscore_resume(void) 377 + static void fsl_lbc_syscore_resume(void *data) 378 378 { 379 379 struct fsl_lbc_ctrl *ctrl; 380 380 struct fsl_lbc_regs __iomem *lbc; ··· 408 408 }; 409 409 410 410 #ifdef CONFIG_SUSPEND 411 - static struct syscore_ops lbc_syscore_pm_ops = { 411 + static const struct syscore_ops lbc_syscore_pm_ops = { 412 412 .suspend = fsl_lbc_syscore_suspend, 413 413 .resume = fsl_lbc_syscore_resume, 414 + }; 415 + 416 + static struct syscore lbc_syscore_pm = { 417 + .ops = &lbc_syscore_pm_ops, 414 418 }; 415 419 #endif 416 420 ··· 429 425 static int __init fsl_lbc_init(void) 430 426 { 431 427 #ifdef CONFIG_SUSPEND 432 - register_syscore_ops(&lbc_syscore_pm_ops); 428 + register_syscore(&lbc_syscore_pm); 433 429 #endif 434 430 return platform_driver_register(&fsl_lbc_ctrl_driver); 435 431 }
+8 -4
arch/powerpc/sysdev/fsl_pci.c
··· 1258 1258 send_pme_turnoff_message(hose); 1259 1259 } 1260 1260 1261 - static int fsl_pci_syscore_suspend(void) 1261 + static int fsl_pci_syscore_suspend(void *data) 1262 1262 { 1263 1263 struct pci_controller *hose, *tmp; 1264 1264 ··· 1291 1291 setup_pci_atmu(hose); 1292 1292 } 1293 1293 1294 - static void fsl_pci_syscore_resume(void) 1294 + static void fsl_pci_syscore_resume(void *data) 1295 1295 { 1296 1296 struct pci_controller *hose, *tmp; 1297 1297 ··· 1299 1299 fsl_pci_syscore_do_resume(hose); 1300 1300 } 1301 1301 1302 - static struct syscore_ops pci_syscore_pm_ops = { 1302 + static const struct syscore_ops pci_syscore_pm_ops = { 1303 1303 .suspend = fsl_pci_syscore_suspend, 1304 1304 .resume = fsl_pci_syscore_resume, 1305 + }; 1306 + 1307 + static struct syscore pci_syscore_pm = { 1308 + .ops = &pci_syscore_pm_ops, 1305 1309 }; 1306 1310 #endif 1307 1311 ··· 1363 1359 static int __init fsl_pci_init(void) 1364 1360 { 1365 1361 #ifdef CONFIG_PM_SLEEP 1366 - register_syscore_ops(&pci_syscore_pm_ops); 1362 + register_syscore(&pci_syscore_pm); 1367 1363 #endif 1368 1364 return platform_driver_register(&fsl_pci_driver); 1369 1365 }
+8 -4
arch/powerpc/sysdev/ipic.c
··· 817 817 u32 sercr; 818 818 } ipic_saved_state; 819 819 820 - static int ipic_suspend(void) 820 + static int ipic_suspend(void *data) 821 821 { 822 822 struct ipic *ipic = primary_ipic; 823 823 ··· 848 848 return 0; 849 849 } 850 850 851 - static void ipic_resume(void) 851 + static void ipic_resume(void *data) 852 852 { 853 853 struct ipic *ipic = primary_ipic; 854 854 ··· 870 870 #define ipic_resume NULL 871 871 #endif 872 872 873 - static struct syscore_ops ipic_syscore_ops = { 873 + static const struct syscore_ops ipic_syscore_ops = { 874 874 .suspend = ipic_suspend, 875 875 .resume = ipic_resume, 876 + }; 877 + 878 + static struct syscore ipic_syscore = { 879 + .ops = &ipic_syscore_ops, 876 880 }; 877 881 878 882 static int __init init_ipic_syscore(void) ··· 885 881 return -ENODEV; 886 882 887 883 printk(KERN_DEBUG "Registering ipic system core operations\n"); 888 - register_syscore_ops(&ipic_syscore_ops); 884 + register_syscore(&ipic_syscore); 889 885 890 886 return 0; 891 887 }
+9 -5
arch/powerpc/sysdev/mpic.c
··· 1944 1944 } 1945 1945 } 1946 1946 1947 - static int mpic_suspend(void) 1947 + static int mpic_suspend(void *data) 1948 1948 { 1949 1949 struct mpic *mpic = mpics; 1950 1950 ··· 1986 1986 } /* end for loop */ 1987 1987 } 1988 1988 1989 - static void mpic_resume(void) 1989 + static void mpic_resume(void *data) 1990 1990 { 1991 1991 struct mpic *mpic = mpics; 1992 1992 ··· 1996 1996 } 1997 1997 } 1998 1998 1999 - static struct syscore_ops mpic_syscore_ops = { 1999 + static const struct syscore_ops mpic_syscore_ops = { 2000 2000 .resume = mpic_resume, 2001 2001 .suspend = mpic_suspend, 2002 + }; 2003 + 2004 + static struct syscore mpic_syscore = { 2005 + .ops = &mpic_syscore_ops, 2002 2006 }; 2003 2007 2004 2008 static int mpic_init_sys(void) 2005 2009 { 2006 2010 int rc; 2007 2011 2008 - register_syscore_ops(&mpic_syscore_ops); 2012 + register_syscore(&mpic_syscore); 2009 2013 rc = subsys_system_register(&mpic_subsys, NULL); 2010 2014 if (rc) { 2011 - unregister_syscore_ops(&mpic_syscore_ops); 2015 + unregister_syscore(&mpic_syscore); 2012 2016 pr_err("mpic: Failed to register subsystem!\n"); 2013 2017 return rc; 2014 2018 }
+7 -3
arch/powerpc/sysdev/mpic_timer.c
··· 519 519 kfree(priv); 520 520 } 521 521 522 - static void mpic_timer_resume(void) 522 + static void mpic_timer_resume(void *data) 523 523 { 524 524 struct timer_group_priv *priv; 525 525 ··· 535 535 {}, 536 536 }; 537 537 538 - static struct syscore_ops mpic_timer_syscore_ops = { 538 + static const struct syscore_ops mpic_timer_syscore_ops = { 539 539 .resume = mpic_timer_resume, 540 + }; 541 + 542 + static struct syscore mpic_timer_syscore = { 543 + .ops = &mpic_timer_syscore_ops, 540 544 }; 541 545 542 546 static int __init mpic_timer_init(void) ··· 550 546 for_each_matching_node(np, mpic_timer_ids) 551 547 timer_group_init(np); 552 548 553 - register_syscore_ops(&mpic_timer_syscore_ops); 549 + register_syscore(&mpic_timer_syscore); 554 550 555 551 if (list_empty(&timer_group_list)) 556 552 return -ENODEV;
+7 -3
arch/sh/mm/pmb.c
··· 857 857 subsys_initcall(pmb_debugfs_init); 858 858 859 859 #ifdef CONFIG_PM 860 - static void pmb_syscore_resume(void) 860 + static void pmb_syscore_resume(void *data) 861 861 { 862 862 struct pmb_entry *pmbe; 863 863 int i; ··· 874 874 read_unlock(&pmb_rwlock); 875 875 } 876 876 877 - static struct syscore_ops pmb_syscore_ops = { 877 + static const struct syscore_ops pmb_syscore_ops = { 878 878 .resume = pmb_syscore_resume, 879 + }; 880 + 881 + static struct syscore pmb_syscore = { 882 + .ops = &pmb_syscore_ops, 879 883 }; 880 884 881 885 static int __init pmb_sysdev_init(void) 882 886 { 883 - register_syscore_ops(&pmb_syscore_ops); 887 + register_syscore(&pmb_syscore); 884 888 return 0; 885 889 } 886 890 subsys_initcall(pmb_sysdev_init);
+8 -4
arch/x86/events/amd/ibs.c
··· 1718 1718 1719 1719 #ifdef CONFIG_PM 1720 1720 1721 - static int perf_ibs_suspend(void) 1721 + static int perf_ibs_suspend(void *data) 1722 1722 { 1723 1723 clear_APIC_ibs(); 1724 1724 return 0; 1725 1725 } 1726 1726 1727 - static void perf_ibs_resume(void) 1727 + static void perf_ibs_resume(void *data) 1728 1728 { 1729 1729 ibs_eilvt_setup(); 1730 1730 setup_APIC_ibs(); 1731 1731 } 1732 1732 1733 - static struct syscore_ops perf_ibs_syscore_ops = { 1733 + static const struct syscore_ops perf_ibs_syscore_ops = { 1734 1734 .resume = perf_ibs_resume, 1735 1735 .suspend = perf_ibs_suspend, 1736 1736 }; 1737 1737 1738 + static struct syscore perf_ibs_syscore = { 1739 + .ops = &perf_ibs_syscore_ops, 1740 + }; 1741 + 1738 1742 static void perf_ibs_pm_init(void) 1739 1743 { 1740 - register_syscore_ops(&perf_ibs_syscore_ops); 1744 + register_syscore(&perf_ibs_syscore); 1741 1745 } 1742 1746 1743 1747 #else
+8 -4
arch/x86/hyperv/hv_init.c
··· 351 351 return 1; 352 352 } 353 353 354 - static int hv_suspend(void) 354 + static int hv_suspend(void *data) 355 355 { 356 356 union hv_x64_msr_hypercall_contents hypercall_msr; 357 357 int ret; ··· 378 378 return ret; 379 379 } 380 380 381 - static void hv_resume(void) 381 + static void hv_resume(void *data) 382 382 { 383 383 union hv_x64_msr_hypercall_contents hypercall_msr; 384 384 int ret; ··· 405 405 } 406 406 407 407 /* Note: when the ops are called, only CPU0 is online and IRQs are disabled. */ 408 - static struct syscore_ops hv_syscore_ops = { 408 + static const struct syscore_ops hv_syscore_ops = { 409 409 .suspend = hv_suspend, 410 410 .resume = hv_resume, 411 + }; 412 + 413 + static struct syscore hv_syscore = { 414 + .ops = &hv_syscore_ops, 411 415 }; 412 416 413 417 static void (* __initdata old_setup_percpu_clockev)(void); ··· 573 569 574 570 x86_init.pci.arch_init = hv_pci_init; 575 571 576 - register_syscore_ops(&hv_syscore_ops); 572 + register_syscore(&hv_syscore); 577 573 578 574 if (ms_hyperv.priv_high & HV_ACCESS_PARTITION_ID) 579 575 hv_get_partition_id();
+7 -3
arch/x86/kernel/amd_gart_64.c
··· 591 591 } 592 592 } 593 593 594 - static void gart_resume(void) 594 + static void gart_resume(void *data) 595 595 { 596 596 pr_info("PCI-DMA: Resuming GART IOMMU\n"); 597 597 ··· 600 600 enable_gart_translations(); 601 601 } 602 602 603 - static struct syscore_ops gart_syscore_ops = { 603 + static const struct syscore_ops gart_syscore_ops = { 604 604 .resume = gart_resume, 605 605 606 + }; 607 + 608 + static struct syscore gart_syscore = { 609 + .ops = &gart_syscore_ops, 606 610 }; 607 611 608 612 /* ··· 654 650 655 651 agp_gatt_table = gatt; 656 652 657 - register_syscore_ops(&gart_syscore_ops); 653 + register_syscore(&gart_syscore); 658 654 659 655 flush_gart(); 660 656
+8 -4
arch/x86/kernel/apic/apic.c
··· 2385 2385 unsigned int apic_cmci; 2386 2386 } apic_pm_state; 2387 2387 2388 - static int lapic_suspend(void) 2388 + static int lapic_suspend(void *data) 2389 2389 { 2390 2390 unsigned long flags; 2391 2391 int maxlvt; ··· 2433 2433 return 0; 2434 2434 } 2435 2435 2436 - static void lapic_resume(void) 2436 + static void lapic_resume(void *data) 2437 2437 { 2438 2438 unsigned int l, h; 2439 2439 unsigned long flags; ··· 2508 2508 * are needed on every CPU up until machine_halt/restart/poweroff. 2509 2509 */ 2510 2510 2511 - static struct syscore_ops lapic_syscore_ops = { 2511 + static const struct syscore_ops lapic_syscore_ops = { 2512 2512 .resume = lapic_resume, 2513 2513 .suspend = lapic_suspend, 2514 + }; 2515 + 2516 + static struct syscore lapic_syscore = { 2517 + .ops = &lapic_syscore_ops, 2514 2518 }; 2515 2519 2516 2520 static void apic_pm_activate(void) ··· 2526 2522 { 2527 2523 /* XXX: remove suspend/resume procs if !apic_pm_state.active? */ 2528 2524 if (boot_cpu_has(X86_FEATURE_APIC)) 2529 - register_syscore_ops(&lapic_syscore_ops); 2525 + register_syscore(&lapic_syscore); 2530 2526 2531 2527 return 0; 2532 2528 }
+13 -4
arch/x86/kernel/apic/io_apic.c
··· 2308 2308 } 2309 2309 } 2310 2310 2311 - static void ioapic_resume(void) 2311 + static int ioapic_suspend(void *data) 2312 + { 2313 + return save_ioapic_entries(); 2314 + } 2315 + 2316 + static void ioapic_resume(void *data) 2312 2317 { 2313 2318 int ioapic_idx; 2314 2319 ··· 2323 2318 restore_ioapic_entries(); 2324 2319 } 2325 2320 2326 - static struct syscore_ops ioapic_syscore_ops = { 2327 - .suspend = save_ioapic_entries, 2321 + static const struct syscore_ops ioapic_syscore_ops = { 2322 + .suspend = ioapic_suspend, 2328 2323 .resume = ioapic_resume, 2324 + }; 2325 + 2326 + static struct syscore ioapic_syscore = { 2327 + .ops = &ioapic_syscore_ops, 2329 2328 }; 2330 2329 2331 2330 static int __init ioapic_init_ops(void) 2332 2331 { 2333 - register_syscore_ops(&ioapic_syscore_ops); 2332 + register_syscore(&ioapic_syscore); 2334 2333 2335 2334 return 0; 2336 2335 }
+12 -8
arch/x86/kernel/cpu/aperfmperf.c
··· 37 37 .seq = SEQCNT_ZERO(cpu_samples.seq) 38 38 }; 39 39 40 - static void init_counter_refs(void) 40 + static void init_counter_refs(void *data) 41 41 { 42 42 u64 aperf, mperf; 43 43 ··· 289 289 } 290 290 291 291 #ifdef CONFIG_PM_SLEEP 292 - static struct syscore_ops freq_invariance_syscore_ops = { 292 + static const struct syscore_ops freq_invariance_syscore_ops = { 293 293 .resume = init_counter_refs, 294 294 }; 295 295 296 - static void register_freq_invariance_syscore_ops(void) 296 + static struct syscore freq_invariance_syscore = { 297 + .ops = &freq_invariance_syscore_ops, 298 + }; 299 + 300 + static void register_freq_invariance_syscore(void) 297 301 { 298 - register_syscore_ops(&freq_invariance_syscore_ops); 302 + register_syscore(&freq_invariance_syscore); 299 303 } 300 304 #else 301 - static inline void register_freq_invariance_syscore_ops(void) {} 305 + static inline void register_freq_invariance_syscore(void) {} 302 306 #endif 303 307 304 308 static void freq_invariance_enable(void) ··· 312 308 return; 313 309 } 314 310 static_branch_enable_cpuslocked(&arch_scale_freq_key); 315 - register_freq_invariance_syscore_ops(); 311 + register_freq_invariance_syscore(); 316 312 pr_info("Estimated ratio of average max frequency by base frequency (times 1024): %llu\n", arch_max_freq_ratio); 317 313 } 318 314 ··· 539 535 if (!cpu_feature_enabled(X86_FEATURE_APERFMPERF)) 540 536 return 0; 541 537 542 - init_counter_refs(); 538 + init_counter_refs(NULL); 543 539 bp_init_freq_invariance(); 544 540 return 0; 545 541 } ··· 548 544 void ap_init_aperfmperf(void) 549 545 { 550 546 if (cpu_feature_enabled(X86_FEATURE_APERFMPERF)) 551 - init_counter_refs(); 547 + init_counter_refs(NULL); 552 548 }
+10 -6
arch/x86/kernel/cpu/intel_epb.c
··· 75 75 [EPB_INDEX_POWERSAVE] = ENERGY_PERF_BIAS_POWERSAVE, 76 76 }; 77 77 78 - static int intel_epb_save(void) 78 + static int intel_epb_save(void *data) 79 79 { 80 80 u64 epb; 81 81 ··· 89 89 return 0; 90 90 } 91 91 92 - static void intel_epb_restore(void) 92 + static void intel_epb_restore(void *data) 93 93 { 94 94 u64 val = this_cpu_read(saved_epb); 95 95 u64 epb; ··· 114 114 wrmsrq(MSR_IA32_ENERGY_PERF_BIAS, (epb & ~EPB_MASK) | val); 115 115 } 116 116 117 - static struct syscore_ops intel_epb_syscore_ops = { 117 + static const struct syscore_ops intel_epb_syscore_ops = { 118 118 .suspend = intel_epb_save, 119 119 .resume = intel_epb_restore, 120 + }; 121 + 122 + static struct syscore intel_epb_syscore = { 123 + .ops = &intel_epb_syscore_ops, 120 124 }; 121 125 122 126 static const char * const energy_perf_strings[] = { ··· 189 185 { 190 186 struct device *cpu_dev = get_cpu_device(cpu); 191 187 192 - intel_epb_restore(); 188 + intel_epb_restore(NULL); 193 189 if (!cpuhp_tasks_frozen) 194 190 sysfs_merge_group(&cpu_dev->kobj, &intel_epb_attr_group); 195 191 ··· 203 199 if (!cpuhp_tasks_frozen) 204 200 sysfs_unmerge_group(&cpu_dev->kobj, &intel_epb_attr_group); 205 201 206 - intel_epb_save(); 202 + intel_epb_save(NULL); 207 203 return 0; 208 204 } 209 205 ··· 234 230 if (ret < 0) 235 231 goto err_out_online; 236 232 237 - register_syscore_ops(&intel_epb_syscore_ops); 233 + register_syscore(&intel_epb_syscore); 238 234 return 0; 239 235 240 236 err_out_online:
+9 -5
arch/x86/kernel/cpu/mce/core.c
··· 2439 2439 mce_disable_error_reporting(); 2440 2440 } 2441 2441 2442 - static int mce_syscore_suspend(void) 2442 + static int mce_syscore_suspend(void *data) 2443 2443 { 2444 2444 vendor_disable_error_reporting(); 2445 2445 return 0; 2446 2446 } 2447 2447 2448 - static void mce_syscore_shutdown(void) 2448 + static void mce_syscore_shutdown(void *data) 2449 2449 { 2450 2450 vendor_disable_error_reporting(); 2451 2451 } ··· 2455 2455 * Only one CPU is active at this time, the others get re-added later using 2456 2456 * CPU hotplug: 2457 2457 */ 2458 - static void mce_syscore_resume(void) 2458 + static void mce_syscore_resume(void *data) 2459 2459 { 2460 2460 __mcheck_cpu_init_generic(); 2461 2461 __mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info)); ··· 2463 2463 cr4_set_bits(X86_CR4_MCE); 2464 2464 } 2465 2465 2466 - static struct syscore_ops mce_syscore_ops = { 2466 + static const struct syscore_ops mce_syscore_ops = { 2467 2467 .suspend = mce_syscore_suspend, 2468 2468 .shutdown = mce_syscore_shutdown, 2469 2469 .resume = mce_syscore_resume, 2470 + }; 2471 + 2472 + static struct syscore mce_syscore = { 2473 + .ops = &mce_syscore_ops, 2470 2474 }; 2471 2475 2472 2476 /* ··· 2873 2869 if (err < 0) 2874 2870 goto err_out_online; 2875 2871 2876 - register_syscore_ops(&mce_syscore_ops); 2872 + register_syscore(&mce_syscore); 2877 2873 2878 2874 return 0; 2879 2875
+12 -3
arch/x86/kernel/cpu/microcode/core.c
··· 823 823 reload_early_microcode(cpu); 824 824 } 825 825 826 - static struct syscore_ops mc_syscore_ops = { 827 - .resume = microcode_bsp_resume, 826 + static void microcode_bsp_syscore_resume(void *data) 827 + { 828 + microcode_bsp_resume(); 829 + } 830 + 831 + static const struct syscore_ops mc_syscore_ops = { 832 + .resume = microcode_bsp_syscore_resume, 833 + }; 834 + 835 + static struct syscore mc_syscore = { 836 + .ops = &mc_syscore_ops, 828 837 }; 829 838 830 839 static int mc_cpu_online(unsigned int cpu) ··· 912 903 } 913 904 } 914 905 915 - register_syscore_ops(&mc_syscore_ops); 906 + register_syscore(&mc_syscore); 916 907 cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/microcode:online", 917 908 mc_cpu_online, mc_cpu_down_prep); 918 909
+8 -4
arch/x86/kernel/cpu/mtrr/legacy.c
··· 41 41 42 42 static struct mtrr_value *mtrr_value; 43 43 44 - static int mtrr_save(void) 44 + static int mtrr_save(void *data) 45 45 { 46 46 int i; 47 47 ··· 56 56 return 0; 57 57 } 58 58 59 - static void mtrr_restore(void) 59 + static void mtrr_restore(void *data) 60 60 { 61 61 int i; 62 62 ··· 69 69 } 70 70 } 71 71 72 - static struct syscore_ops mtrr_syscore_ops = { 72 + static const struct syscore_ops mtrr_syscore_ops = { 73 73 .suspend = mtrr_save, 74 74 .resume = mtrr_restore, 75 + }; 76 + 77 + static struct syscore mtrr_syscore = { 78 + .ops = &mtrr_syscore_ops, 75 79 }; 76 80 77 81 void mtrr_register_syscore(void) ··· 90 86 * TBD: is there any system with such CPU which supports 91 87 * suspend/resume? If no, we should remove the code. 92 88 */ 93 - register_syscore_ops(&mtrr_syscore_ops); 89 + register_syscore(&mtrr_syscore); 94 90 }
+7 -3
arch/x86/kernel/cpu/umwait.c
··· 86 86 * trust the firmware nor does it matter if the same value is written 87 87 * again. 88 88 */ 89 - static void umwait_syscore_resume(void) 89 + static void umwait_syscore_resume(void *data) 90 90 { 91 91 umwait_update_control_msr(NULL); 92 92 } 93 93 94 - static struct syscore_ops umwait_syscore_ops = { 94 + static const struct syscore_ops umwait_syscore_ops = { 95 95 .resume = umwait_syscore_resume, 96 + }; 97 + 98 + static struct syscore umwait_syscore = { 99 + .ops = &umwait_syscore_ops, 96 100 }; 97 101 98 102 /* sysfs interface */ ··· 230 226 return ret; 231 227 } 232 228 233 - register_syscore_ops(&umwait_syscore_ops); 229 + register_syscore(&umwait_syscore); 234 230 235 231 /* 236 232 * Add umwait control interface. Ignore failure, so at least the
+7 -3
arch/x86/kernel/i8237.c
··· 19 19 * in asm/dma.h. 20 20 */ 21 21 22 - static void i8237A_resume(void) 22 + static void i8237A_resume(void *data) 23 23 { 24 24 unsigned long flags; 25 25 int i; ··· 41 41 release_dma_lock(flags); 42 42 } 43 43 44 - static struct syscore_ops i8237_syscore_ops = { 44 + static const struct syscore_ops i8237_syscore_ops = { 45 45 .resume = i8237A_resume, 46 + }; 47 + 48 + static struct syscore i8237_syscore = { 49 + .ops = &i8237_syscore_ops, 46 50 }; 47 51 48 52 static int __init i8237A_init_ops(void) ··· 74 70 if (x86_pnpbios_disabled() && dmi_get_bios_year() >= 2017) 75 71 return -ENODEV; 76 72 77 - register_syscore_ops(&i8237_syscore_ops); 73 + register_syscore(&i8237_syscore); 78 74 return 0; 79 75 } 80 76 device_initcall(i8237A_init_ops);
+9 -5
arch/x86/kernel/i8259.c
··· 247 247 trigger[1] = inb(PIC_ELCR2) & 0xDE; 248 248 } 249 249 250 - static void i8259A_resume(void) 250 + static void i8259A_resume(void *data) 251 251 { 252 252 init_8259A(i8259A_auto_eoi); 253 253 restore_ELCR(irq_trigger); 254 254 } 255 255 256 - static int i8259A_suspend(void) 256 + static int i8259A_suspend(void *data) 257 257 { 258 258 save_ELCR(irq_trigger); 259 259 return 0; 260 260 } 261 261 262 - static void i8259A_shutdown(void) 262 + static void i8259A_shutdown(void *data) 263 263 { 264 264 /* Put the i8259A into a quiescent state that 265 265 * the kernel initialization code can get it ··· 269 269 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ 270 270 } 271 271 272 - static struct syscore_ops i8259_syscore_ops = { 272 + static const struct syscore_ops i8259_syscore_ops = { 273 273 .suspend = i8259A_suspend, 274 274 .resume = i8259A_resume, 275 275 .shutdown = i8259A_shutdown, 276 + }; 277 + 278 + static struct syscore i8259_syscore = { 279 + .ops = &i8259_syscore_ops, 276 280 }; 277 281 278 282 static void mask_8259A(void) ··· 448 444 static int __init i8259A_init_ops(void) 449 445 { 450 446 if (legacy_pic == &default_legacy_pic) 451 - register_syscore_ops(&i8259_syscore_ops); 447 + register_syscore(&i8259_syscore); 452 448 453 449 return 0; 454 450 }
+8 -4
arch/x86/kernel/kvm.c
··· 721 721 722 722 #endif 723 723 724 - static int kvm_suspend(void) 724 + static int kvm_suspend(void *data) 725 725 { 726 726 u64 val = 0; 727 727 ··· 735 735 return 0; 736 736 } 737 737 738 - static void kvm_resume(void) 738 + static void kvm_resume(void *data) 739 739 { 740 740 kvm_cpu_online(raw_smp_processor_id()); 741 741 ··· 745 745 #endif 746 746 } 747 747 748 - static struct syscore_ops kvm_syscore_ops = { 748 + static const struct syscore_ops kvm_syscore_ops = { 749 749 .suspend = kvm_suspend, 750 750 .resume = kvm_resume, 751 + }; 752 + 753 + static struct syscore kvm_syscore = { 754 + .ops = &kvm_syscore_ops, 751 755 }; 752 756 753 757 static void kvm_pv_guest_cpu_reboot(void *unused) ··· 863 859 machine_ops.crash_shutdown = kvm_crash_shutdown; 864 860 #endif 865 861 866 - register_syscore_ops(&kvm_syscore_ops); 862 + register_syscore(&kvm_syscore); 867 863 868 864 /* 869 865 * Hard lockup detection is enabled by default. Disable it, as guests
+7 -3
drivers/acpi/pci_link.c
··· 761 761 return 0; 762 762 } 763 763 764 - static void irqrouter_resume(void) 764 + static void irqrouter_resume(void *data) 765 765 { 766 766 struct acpi_pci_link *link; 767 767 ··· 888 888 889 889 __setup("acpi_irq_balance", acpi_irq_balance_set); 890 890 891 - static struct syscore_ops irqrouter_syscore_ops = { 891 + static const struct syscore_ops irqrouter_syscore_ops = { 892 892 .resume = irqrouter_resume, 893 + }; 894 + 895 + static struct syscore irqrouter_syscore = { 896 + .ops = &irqrouter_syscore_ops, 893 897 }; 894 898 895 899 void __init acpi_pci_link_init(void) ··· 908 904 else 909 905 acpi_irq_balance = 0; 910 906 } 911 - register_syscore_ops(&irqrouter_syscore_ops); 907 + register_syscore(&irqrouter_syscore); 912 908 acpi_scan_add_handler(&pci_link_handler); 913 909 }
+8 -4
drivers/acpi/sleep.c
··· 884 884 #ifdef CONFIG_PM_SLEEP 885 885 static u32 saved_bm_rld; 886 886 887 - static int acpi_save_bm_rld(void) 887 + static int acpi_save_bm_rld(void *data) 888 888 { 889 889 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld); 890 890 return 0; 891 891 } 892 892 893 - static void acpi_restore_bm_rld(void) 893 + static void acpi_restore_bm_rld(void *data) 894 894 { 895 895 u32 resumed_bm_rld = 0; 896 896 ··· 901 901 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld); 902 902 } 903 903 904 - static struct syscore_ops acpi_sleep_syscore_ops = { 904 + static const struct syscore_ops acpi_sleep_syscore_ops = { 905 905 .suspend = acpi_save_bm_rld, 906 906 .resume = acpi_restore_bm_rld, 907 907 }; 908 908 909 + static struct syscore acpi_sleep_syscore = { 910 + .ops = &acpi_sleep_syscore_ops, 911 + }; 912 + 909 913 static void acpi_sleep_syscore_init(void) 910 914 { 911 - register_syscore_ops(&acpi_sleep_syscore_ops); 915 + register_syscore(&acpi_sleep_syscore); 912 916 } 913 917 #else 914 918 static inline void acpi_sleep_syscore_init(void) {}
+1
drivers/amba/tegra-ahb.c
··· 144 144 if (!dev) 145 145 return -EPROBE_DEFER; 146 146 ahb = dev_get_drvdata(dev); 147 + put_device(dev); 147 148 val = gizmo_readl(ahb, AHB_ARBITRATION_XBAR_CTRL); 148 149 val |= AHB_ARBITRATION_XBAR_CTRL_SMMU_INIT_DONE; 149 150 gizmo_writel(ahb, val, AHB_ARBITRATION_XBAR_CTRL);
+8 -4
drivers/base/firmware_loader/main.c
··· 1576 1576 } 1577 1577 1578 1578 /* stop caching firmware once syscore_suspend is reached */ 1579 - static int fw_suspend(void) 1579 + static int fw_suspend(void *data) 1580 1580 { 1581 1581 fw_cache.state = FW_LOADER_NO_CACHE; 1582 1582 return 0; 1583 1583 } 1584 1584 1585 - static struct syscore_ops fw_syscore_ops = { 1585 + static const struct syscore_ops fw_syscore_ops = { 1586 1586 .suspend = fw_suspend, 1587 + }; 1588 + 1589 + static struct syscore fw_syscore = { 1590 + .ops = &fw_syscore_ops, 1587 1591 }; 1588 1592 1589 1593 static int __init register_fw_pm_ops(void) ··· 1605 1601 if (ret) 1606 1602 return ret; 1607 1603 1608 - register_syscore_ops(&fw_syscore_ops); 1604 + register_syscore(&fw_syscore); 1609 1605 1610 1606 return ret; 1611 1607 } 1612 1608 1613 1609 static inline void unregister_fw_pm_ops(void) 1614 1610 { 1615 - unregister_syscore_ops(&fw_syscore_ops); 1611 + unregister_syscore(&fw_syscore); 1616 1612 unregister_pm_notifier(&fw_cache.pm_notify); 1617 1613 } 1618 1614 #else
+43 -39
drivers/base/syscore.c
··· 11 11 #include <linux/suspend.h> 12 12 #include <trace/events/power.h> 13 13 14 - static LIST_HEAD(syscore_ops_list); 15 - static DEFINE_MUTEX(syscore_ops_lock); 14 + static LIST_HEAD(syscore_list); 15 + static DEFINE_MUTEX(syscore_lock); 16 16 17 17 /** 18 - * register_syscore_ops - Register a set of system core operations. 19 - * @ops: System core operations to register. 18 + * register_syscore - Register a set of system core operations. 19 + * @syscore: System core operations to register. 20 20 */ 21 - void register_syscore_ops(struct syscore_ops *ops) 21 + void register_syscore(struct syscore *syscore) 22 22 { 23 - mutex_lock(&syscore_ops_lock); 24 - list_add_tail(&ops->node, &syscore_ops_list); 25 - mutex_unlock(&syscore_ops_lock); 23 + mutex_lock(&syscore_lock); 24 + list_add_tail(&syscore->node, &syscore_list); 25 + mutex_unlock(&syscore_lock); 26 26 } 27 - EXPORT_SYMBOL_GPL(register_syscore_ops); 27 + EXPORT_SYMBOL_GPL(register_syscore); 28 28 29 29 /** 30 - * unregister_syscore_ops - Unregister a set of system core operations. 31 - * @ops: System core operations to unregister. 30 + * unregister_syscore - Unregister a set of system core operations. 31 + * @syscore: System core operations to unregister. 32 32 */ 33 - void unregister_syscore_ops(struct syscore_ops *ops) 33 + void unregister_syscore(struct syscore *syscore) 34 34 { 35 - mutex_lock(&syscore_ops_lock); 36 - list_del(&ops->node); 37 - mutex_unlock(&syscore_ops_lock); 35 + mutex_lock(&syscore_lock); 36 + list_del(&syscore->node); 37 + mutex_unlock(&syscore_lock); 38 38 } 39 - EXPORT_SYMBOL_GPL(unregister_syscore_ops); 39 + EXPORT_SYMBOL_GPL(unregister_syscore); 40 40 41 41 #ifdef CONFIG_PM_SLEEP 42 42 /** ··· 46 46 */ 47 47 int syscore_suspend(void) 48 48 { 49 - struct syscore_ops *ops; 49 + struct syscore *syscore; 50 50 int ret = 0; 51 51 52 52 trace_suspend_resume(TPS("syscore_suspend"), 0, true); ··· 59 59 WARN_ONCE(!irqs_disabled(), 60 60 "Interrupts enabled before system core suspend.\n"); 61 61 62 - list_for_each_entry_reverse(ops, &syscore_ops_list, node) 63 - if (ops->suspend) { 64 - pm_pr_dbg("Calling %pS\n", ops->suspend); 65 - ret = ops->suspend(); 62 + list_for_each_entry_reverse(syscore, &syscore_list, node) 63 + if (syscore->ops->suspend) { 64 + pm_pr_dbg("Calling %pS\n", syscore->ops->suspend); 65 + ret = syscore->ops->suspend(syscore->data); 66 66 if (ret) 67 67 goto err_out; 68 68 WARN_ONCE(!irqs_disabled(), 69 - "Interrupts enabled after %pS\n", ops->suspend); 69 + "Interrupts enabled after %pS\n", 70 + syscore->ops->suspend); 70 71 } 71 72 72 73 trace_suspend_resume(TPS("syscore_suspend"), 0, false); 73 74 return 0; 74 75 75 76 err_out: 76 - pr_err("PM: System core suspend callback %pS failed.\n", ops->suspend); 77 + pr_err("PM: System core suspend callback %pS failed.\n", 78 + syscore->ops->suspend); 77 79 78 - list_for_each_entry_continue(ops, &syscore_ops_list, node) 79 - if (ops->resume) 80 - ops->resume(); 80 + list_for_each_entry_continue(syscore, &syscore_list, node) 81 + if (syscore->ops->resume) 82 + syscore->ops->resume(syscore->data); 81 83 82 84 return ret; 83 85 } ··· 92 90 */ 93 91 void syscore_resume(void) 94 92 { 95 - struct syscore_ops *ops; 93 + struct syscore *syscore; 96 94 97 95 trace_suspend_resume(TPS("syscore_resume"), 0, true); 98 96 WARN_ONCE(!irqs_disabled(), 99 97 "Interrupts enabled before system core resume.\n"); 100 98 101 - list_for_each_entry(ops, &syscore_ops_list, node) 102 - if (ops->resume) { 103 - pm_pr_dbg("Calling %pS\n", ops->resume); 104 - ops->resume(); 99 + list_for_each_entry(syscore, &syscore_list, node) 100 + if (syscore->ops->resume) { 101 + pm_pr_dbg("Calling %pS\n", syscore->ops->resume); 102 + syscore->ops->resume(syscore->data); 105 103 WARN_ONCE(!irqs_disabled(), 106 - "Interrupts enabled after %pS\n", ops->resume); 104 + "Interrupts enabled after %pS\n", 105 + syscore->ops->resume); 107 106 } 108 107 trace_suspend_resume(TPS("syscore_resume"), 0, false); 109 108 } ··· 116 113 */ 117 114 void syscore_shutdown(void) 118 115 { 119 - struct syscore_ops *ops; 116 + struct syscore *syscore; 120 117 121 - mutex_lock(&syscore_ops_lock); 118 + mutex_lock(&syscore_lock); 122 119 123 - list_for_each_entry_reverse(ops, &syscore_ops_list, node) 124 - if (ops->shutdown) { 120 + list_for_each_entry_reverse(syscore, &syscore_list, node) 121 + if (syscore->ops->shutdown) { 125 122 if (initcall_debug) 126 - pr_info("PM: Calling %pS\n", ops->shutdown); 127 - ops->shutdown(); 123 + pr_info("PM: Calling %pS\n", 124 + syscore->ops->shutdown); 125 + syscore->ops->shutdown(syscore->data); 128 126 } 129 127 130 - mutex_unlock(&syscore_ops_lock); 128 + mutex_unlock(&syscore_lock); 131 129 }
+10 -6
drivers/bus/mvebu-mbus.c
··· 1006 1006 } 1007 1007 fs_initcall(mvebu_mbus_debugfs_init); 1008 1008 1009 - static int mvebu_mbus_suspend(void) 1009 + static int mvebu_mbus_suspend(void *data) 1010 1010 { 1011 1011 struct mvebu_mbus_state *s = &mbus_state; 1012 1012 int win; ··· 1040 1040 return 0; 1041 1041 } 1042 1042 1043 - static void mvebu_mbus_resume(void) 1043 + static void mvebu_mbus_resume(void *data) 1044 1044 { 1045 1045 struct mvebu_mbus_state *s = &mbus_state; 1046 1046 int win; ··· 1069 1069 } 1070 1070 } 1071 1071 1072 - static struct syscore_ops mvebu_mbus_syscore_ops = { 1073 - .suspend = mvebu_mbus_suspend, 1074 - .resume = mvebu_mbus_resume, 1072 + static const struct syscore_ops mvebu_mbus_syscore_ops = { 1073 + .suspend = mvebu_mbus_suspend, 1074 + .resume = mvebu_mbus_resume, 1075 + }; 1076 + 1077 + static struct syscore mvebu_mbus_syscore = { 1078 + .ops = &mvebu_mbus_syscore_ops, 1075 1079 }; 1076 1080 1077 1081 static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus, ··· 1122 1118 writel(UNIT_SYNC_BARRIER_ALL, 1123 1119 mbus->mbuswins_base + UNIT_SYNC_BARRIER_OFF); 1124 1120 1125 - register_syscore_ops(&mvebu_mbus_syscore_ops); 1121 + register_syscore(&mvebu_mbus_syscore); 1126 1122 1127 1123 return 0; 1128 1124 }
+593 -4
drivers/bus/stm32_rifsc.c
··· 5 5 6 6 #include <linux/bitfield.h> 7 7 #include <linux/bits.h> 8 + #include <linux/debugfs.h> 8 9 #include <linux/device.h> 9 10 #include <linux/err.h> 10 11 #include <linux/init.h> ··· 26 25 #define RIFSC_RISC_PRIVCFGR0 0x30 27 26 #define RIFSC_RISC_PER0_CIDCFGR 0x100 28 27 #define RIFSC_RISC_PER0_SEMCR 0x104 28 + #define RIFSC_RISC_REG0_ACFGR 0x900 29 + #define RIFSC_RISC_REG3_AADDR 0x924 29 30 #define RIFSC_RISC_HWCFGR2 0xFEC 30 31 31 32 /* ··· 72 69 /* Compartiment IDs */ 73 70 #define RIF_CID0 0x0 74 71 #define RIF_CID1 0x1 72 + 73 + #if defined(CONFIG_DEBUG_FS) 74 + #define RIFSC_RISUP_ENTRIES 128 75 + #define RIFSC_RIMU_ENTRIES 16 76 + #define RIFSC_RISAL_SUBREGIONS 2 77 + #define RIFSC_RISAL_GRANULARITY 8 78 + 79 + #define RIFSC_RIMC_ATTR0 0xC10 80 + 81 + #define RIFSC_RIMC_CIDSEL BIT(2) 82 + #define RIFSC_RIMC_MCID_MASK GENMASK(6, 4) 83 + #define RIFSC_RIMC_MSEC BIT(8) 84 + #define RIFSC_RIMC_MPRIV BIT(9) 85 + 86 + #define RIFSC_RISC_SRCID_MASK GENMASK(6, 4) 87 + #define RIFSC_RISC_SRPRIV BIT(9) 88 + #define RIFSC_RISC_SRSEC BIT(8) 89 + #define RIFSC_RISC_SRRLOCK BIT(1) 90 + #define RIFSC_RISC_SREN BIT(0) 91 + #define RIFSC_RISC_SRLENGTH_MASK GENMASK(27, 16) 92 + #define RIFSC_RISC_SRSTART_MASK GENMASK(10, 0) 93 + 94 + static const char *stm32mp21_rifsc_rimu_names[RIFSC_RIMU_ENTRIES] = { 95 + "ETR", 96 + "SDMMC1", 97 + "SDMMC2", 98 + "SDMMC3", 99 + "OTG_HS", 100 + "USBH", 101 + "ETH1", 102 + "ETH2", 103 + "RESERVED", 104 + "RESERVED", 105 + "DCMIPP", 106 + "LTDC_L1/L2", 107 + "LTDC_L3", 108 + "RESERVED", 109 + "RESERVED", 110 + "RESERVED", 111 + }; 112 + 113 + static const char *stm32mp25_rifsc_rimu_names[RIFSC_RIMU_ENTRIES] = { 114 + "ETR", 115 + "SDMMC1", 116 + "SDMMC2", 117 + "SDMMC3", 118 + "USB3DR", 119 + "USBH", 120 + "ETH1", 121 + "ETH2", 122 + "PCIE", 123 + "GPU", 124 + "DMCIPP", 125 + "LTDC_L0/L1", 126 + "LTDC_L2", 127 + "LTDC_ROT", 128 + "VDEC", 129 + "VENC" 130 + }; 131 + 132 + static const char *stm32mp21_rifsc_risup_names[RIFSC_RISUP_ENTRIES] = { 133 + "TIM1", 134 + "TIM2", 135 + "TIM3", 136 + "TIM4", 137 + "TIM5", 138 + "TIM6", 139 + "TIM7", 140 + "TIM8", 141 + "TIM10", 142 + "TIM11", 143 + "TIM12", 144 + "TIM13", 145 + "TIM14", 146 + "TIM15", 147 + "TIM16", 148 + "TIM17", 149 + "RESERVED", 150 + "LPTIM1", 151 + "LPTIM2", 152 + "LPTIM3", 153 + "LPTIM4", 154 + "LPTIM5", 155 + "SPI1", 156 + "SPI2", 157 + "SPI3", 158 + "SPI4", 159 + "SPI5", 160 + "SPI6", 161 + "RESERVED", 162 + "RESERVED", 163 + "SPDIFRX", 164 + "USART1", 165 + "USART2", 166 + "USART3", 167 + "UART4", 168 + "UART5", 169 + "USART6", 170 + "UART7", 171 + "RESERVED", 172 + "RESERVED", 173 + "LPUART1", 174 + "I2C1", 175 + "I2C2", 176 + "I2C3", 177 + "RESERVED", 178 + "RESERVED", 179 + "RESERVED", 180 + "RESERVED", 181 + "RESERVED", 182 + "SAI1", 183 + "SAI2", 184 + "SAI3", 185 + "SAI4", 186 + "RESERVED", 187 + "MDF1", 188 + "RESERVED", 189 + "FDCAN", 190 + "HDP", 191 + "ADC1", 192 + "ADC2", 193 + "ETH1", 194 + "ETH2", 195 + "RESERVED", 196 + "USBH", 197 + "RESERVED", 198 + "RESERVED", 199 + "OTG_HS", 200 + "DDRPERFM", 201 + "RESERVED", 202 + "RESERVED", 203 + "RESERVED", 204 + "RESERVED", 205 + "RESERVED", 206 + "STGEN", 207 + "OCTOSPI1", 208 + "RESERVED", 209 + "SDMMC1", 210 + "SDMMC2", 211 + "SDMMC3", 212 + "RESERVED", 213 + "LTDC_CMN", 214 + "RESERVED", 215 + "RESERVED", 216 + "RESERVED", 217 + "RESERVED", 218 + "RESERVED", 219 + "CSI", 220 + "DCMIPP", 221 + "DCMI_PSSI", 222 + "RESERVED", 223 + "RESERVED", 224 + "RESERVED", 225 + "RNG1", 226 + "RNG2", 227 + "PKA", 228 + "SAES", 229 + "HASH1", 230 + "HASH2", 231 + "CRYP1", 232 + "CRYP2", 233 + "IWDG1", 234 + "IWDG2", 235 + "IWDG3", 236 + "IWDG4", 237 + "WWDG1", 238 + "RESERVED", 239 + "VREFBUF", 240 + "DTS", 241 + "RAMCFG", 242 + "CRC", 243 + "SERC", 244 + "RESERVED", 245 + "RESERVED", 246 + "RESERVED", 247 + "I3C1", 248 + "I3C2", 249 + "I3C3", 250 + "RESERVED", 251 + "ICACHE_DCACHE", 252 + "LTDC_L1L2", 253 + "LTDC_L3", 254 + "RESERVED", 255 + "RESERVED", 256 + "RESERVED", 257 + "RESERVED", 258 + "OTFDEC1", 259 + "RESERVED", 260 + "IAC", 261 + }; 262 + 263 + static const char *stm32mp25_rifsc_risup_names[RIFSC_RISUP_ENTRIES] = { 264 + "TIM1", 265 + "TIM2", 266 + "TIM3", 267 + "TIM4", 268 + "TIM5", 269 + "TIM6", 270 + "TIM7", 271 + "TIM8", 272 + "TIM10", 273 + "TIM11", 274 + "TIM12", 275 + "TIM13", 276 + "TIM14", 277 + "TIM15", 278 + "TIM16", 279 + "TIM17", 280 + "TIM20", 281 + "LPTIM1", 282 + "LPTIM2", 283 + "LPTIM3", 284 + "LPTIM4", 285 + "LPTIM5", 286 + "SPI1", 287 + "SPI2", 288 + "SPI3", 289 + "SPI4", 290 + "SPI5", 291 + "SPI6", 292 + "SPI7", 293 + "SPI8", 294 + "SPDIFRX", 295 + "USART1", 296 + "USART2", 297 + "USART3", 298 + "UART4", 299 + "UART5", 300 + "USART6", 301 + "UART7", 302 + "UART8", 303 + "UART9", 304 + "LPUART1", 305 + "I2C1", 306 + "I2C2", 307 + "I2C3", 308 + "I2C4", 309 + "I2C5", 310 + "I2C6", 311 + "I2C7", 312 + "I2C8", 313 + "SAI1", 314 + "SAI2", 315 + "SAI3", 316 + "SAI4", 317 + "RESERVED", 318 + "MDF1", 319 + "ADF1", 320 + "FDCAN", 321 + "HDP", 322 + "ADC12", 323 + "ADC3", 324 + "ETH1", 325 + "ETH2", 326 + "RESERVED", 327 + "USBH", 328 + "RESERVED", 329 + "RESERVED", 330 + "USB3DR", 331 + "COMBOPHY", 332 + "PCIE", 333 + "UCPD1", 334 + "ETHSW_DEIP", 335 + "ETHSW_ACM_CF", 336 + "ETHSW_ACM_MSGBU", 337 + "STGEN", 338 + "OCTOSPI1", 339 + "OCTOSPI2", 340 + "SDMMC1", 341 + "SDMMC2", 342 + "SDMMC3", 343 + "GPU", 344 + "LTDC_CMN", 345 + "DSI_CMN", 346 + "RESERVED", 347 + "RESERVED", 348 + "LVDS", 349 + "RESERVED", 350 + "CSI", 351 + "DCMIPP", 352 + "DCMI_PSSI", 353 + "VDEC", 354 + "VENC", 355 + "RESERVED", 356 + "RNG", 357 + "PKA", 358 + "SAES", 359 + "HASH", 360 + "CRYP1", 361 + "CRYP2", 362 + "IWDG1", 363 + "IWDG2", 364 + "IWDG3", 365 + "IWDG4", 366 + "IWDG5", 367 + "WWDG1", 368 + "WWDG2", 369 + "RESERVED", 370 + "VREFBUF", 371 + "DTS", 372 + "RAMCFG", 373 + "CRC", 374 + "SERC", 375 + "OCTOSPIM", 376 + "GICV2M", 377 + "RESERVED", 378 + "I3C1", 379 + "I3C2", 380 + "I3C3", 381 + "I3C4", 382 + "ICACHE_DCACHE", 383 + "LTDC_L0L1", 384 + "LTDC_L2", 385 + "LTDC_ROT", 386 + "DSI_TRIG", 387 + "DSI_RDFIFO", 388 + "RESERVED", 389 + "OTFDEC1", 390 + "OTFDEC2", 391 + "IAC", 392 + }; 393 + struct rifsc_risup_debug_data { 394 + char dev_name[15]; 395 + u8 dev_cid; 396 + u8 dev_sem_cids; 397 + u8 dev_id; 398 + bool dev_cid_filt_en; 399 + bool dev_sem_en; 400 + bool dev_priv; 401 + bool dev_sec; 402 + }; 403 + 404 + struct rifsc_rimu_debug_data { 405 + char m_name[11]; 406 + u8 m_cid; 407 + bool cidsel; 408 + bool m_sec; 409 + bool m_priv; 410 + }; 411 + 412 + struct rifsc_subreg_debug_data { 413 + bool sr_sec; 414 + bool sr_priv; 415 + u8 sr_cid; 416 + bool sr_rlock; 417 + bool sr_enable; 418 + u16 sr_start; 419 + u16 sr_length; 420 + }; 421 + 422 + struct stm32_rifsc_resources_names { 423 + const char **device_names; 424 + const char **initiator_names; 425 + }; 426 + struct rifsc_dbg_private { 427 + const struct stm32_rifsc_resources_names *res_names; 428 + void __iomem *mmio; 429 + unsigned int nb_risup; 430 + unsigned int nb_rimu; 431 + unsigned int nb_risal; 432 + }; 433 + 434 + static const struct stm32_rifsc_resources_names rifsc_mp21_res_names = { 435 + .device_names = stm32mp21_rifsc_risup_names, 436 + .initiator_names = stm32mp21_rifsc_rimu_names, 437 + }; 438 + 439 + static const struct stm32_rifsc_resources_names rifsc_mp25_res_names = { 440 + .device_names = stm32mp25_rifsc_risup_names, 441 + .initiator_names = stm32mp25_rifsc_rimu_names, 442 + }; 443 + 444 + static void stm32_rifsc_fill_rimu_dbg_entry(struct rifsc_dbg_private *rifsc, 445 + struct rifsc_rimu_debug_data *dbg_entry, int i) 446 + { 447 + const struct stm32_rifsc_resources_names *dbg_names = rifsc->res_names; 448 + u32 rimc_attr = readl_relaxed(rifsc->mmio + RIFSC_RIMC_ATTR0 + 0x4 * i); 449 + 450 + snprintf(dbg_entry->m_name, sizeof(dbg_entry->m_name), "%s", dbg_names->initiator_names[i]); 451 + dbg_entry->m_cid = FIELD_GET(RIFSC_RIMC_MCID_MASK, rimc_attr); 452 + dbg_entry->cidsel = rimc_attr & RIFSC_RIMC_CIDSEL; 453 + dbg_entry->m_sec = rimc_attr & RIFSC_RIMC_MSEC; 454 + dbg_entry->m_priv = rimc_attr & RIFSC_RIMC_MPRIV; 455 + } 456 + 457 + static void stm32_rifsc_fill_dev_dbg_entry(struct rifsc_dbg_private *rifsc, 458 + struct rifsc_risup_debug_data *dbg_entry, int i) 459 + { 460 + const struct stm32_rifsc_resources_names *dbg_names = rifsc->res_names; 461 + u32 cid_cfgr, sec_cfgr, priv_cfgr; 462 + u8 reg_id = i / IDS_PER_RISC_SEC_PRIV_REGS; 463 + u8 reg_offset = i % IDS_PER_RISC_SEC_PRIV_REGS; 464 + 465 + cid_cfgr = readl_relaxed(rifsc->mmio + RIFSC_RISC_PER0_CIDCFGR + 0x8 * i); 466 + sec_cfgr = readl_relaxed(rifsc->mmio + RIFSC_RISC_SECCFGR0 + 0x4 * reg_id); 467 + priv_cfgr = readl_relaxed(rifsc->mmio + RIFSC_RISC_PRIVCFGR0 + 0x4 * reg_id); 468 + 469 + snprintf(dbg_entry->dev_name, sizeof(dbg_entry->dev_name), "%s", 470 + dbg_names->device_names[i]); 471 + dbg_entry->dev_id = i; 472 + dbg_entry->dev_cid_filt_en = cid_cfgr & CIDCFGR_CFEN; 473 + dbg_entry->dev_sem_en = cid_cfgr & CIDCFGR_SEMEN; 474 + dbg_entry->dev_cid = FIELD_GET(RIFSC_RISC_SCID_MASK, cid_cfgr); 475 + dbg_entry->dev_sem_cids = FIELD_GET(RIFSC_RISC_SEMWL_MASK, cid_cfgr); 476 + dbg_entry->dev_sec = sec_cfgr & BIT(reg_offset) ? true : false; 477 + dbg_entry->dev_priv = priv_cfgr & BIT(reg_offset) ? true : false; 478 + } 479 + 480 + 481 + static void stm32_rifsc_fill_subreg_dbg_entry(struct rifsc_dbg_private *rifsc, 482 + struct rifsc_subreg_debug_data *dbg_entry, int i, 483 + int j) 484 + { 485 + u32 risc_xcfgr = readl_relaxed(rifsc->mmio + RIFSC_RISC_REG0_ACFGR + 0x10 * i + 0x8 * j); 486 + u32 risc_xaddr; 487 + 488 + dbg_entry->sr_sec = risc_xcfgr & RIFSC_RISC_SRSEC; 489 + dbg_entry->sr_priv = risc_xcfgr & RIFSC_RISC_SRPRIV; 490 + dbg_entry->sr_cid = FIELD_GET(RIFSC_RISC_SRCID_MASK, risc_xcfgr); 491 + dbg_entry->sr_rlock = risc_xcfgr & RIFSC_RISC_SRRLOCK; 492 + dbg_entry->sr_enable = risc_xcfgr & RIFSC_RISC_SREN; 493 + if (i == 2) { 494 + risc_xaddr = readl_relaxed(rifsc->mmio + RIFSC_RISC_REG3_AADDR + 0x8 * j); 495 + dbg_entry->sr_length = FIELD_GET(RIFSC_RISC_SRLENGTH_MASK, risc_xaddr); 496 + dbg_entry->sr_start = FIELD_GET(RIFSC_RISC_SRSTART_MASK, risc_xaddr); 497 + } else { 498 + dbg_entry->sr_start = 0; 499 + dbg_entry->sr_length = U16_MAX; 500 + } 501 + } 502 + 503 + static int stm32_rifsc_conf_dump_show(struct seq_file *s, void *data) 504 + { 505 + struct rifsc_dbg_private *rifsc = (struct rifsc_dbg_private *)s->private; 506 + int i, j; 507 + 508 + seq_puts(s, "\n=============================================\n"); 509 + seq_puts(s, " RIFSC dump\n"); 510 + seq_puts(s, "=============================================\n\n"); 511 + 512 + seq_puts(s, "\n=============================================\n"); 513 + seq_puts(s, " RISUP dump\n"); 514 + seq_puts(s, "=============================================\n"); 515 + 516 + seq_printf(s, "\n| %-15s |", "Peripheral name"); 517 + seq_puts(s, "| Firewall ID |"); 518 + seq_puts(s, "| N/SECURE |"); 519 + seq_puts(s, "| N/PRIVILEGED |"); 520 + seq_puts(s, "| CID filtering |"); 521 + seq_puts(s, "| Semaphore mode |"); 522 + seq_puts(s, "| SCID |"); 523 + seq_printf(s, "| %7s |\n", "SEMWL"); 524 + 525 + for (i = 0; i < RIFSC_RISUP_ENTRIES && i < rifsc->nb_risup; i++) { 526 + struct rifsc_risup_debug_data d_dbg_entry; 527 + 528 + stm32_rifsc_fill_dev_dbg_entry(rifsc, &d_dbg_entry, i); 529 + 530 + seq_printf(s, "| %-15s |", d_dbg_entry.dev_name); 531 + seq_printf(s, "| %-11d |", d_dbg_entry.dev_id); 532 + seq_printf(s, "| %-8s |", d_dbg_entry.dev_sec ? "SEC" : "NSEC"); 533 + seq_printf(s, "| %-12s |", d_dbg_entry.dev_priv ? "PRIV" : "NPRIV"); 534 + seq_printf(s, "| %-13s |", str_enabled_disabled(d_dbg_entry.dev_cid_filt_en)); 535 + seq_printf(s, "| %-14s |", str_enabled_disabled(d_dbg_entry.dev_sem_en)); 536 + seq_printf(s, "| %-4d |", d_dbg_entry.dev_cid); 537 + seq_printf(s, "| %#-7x |\n", d_dbg_entry.dev_sem_cids); 538 + } 539 + 540 + seq_puts(s, "\n=============================================\n"); 541 + seq_puts(s, " RIMU dump\n"); 542 + seq_puts(s, "=============================================\n"); 543 + 544 + seq_puts(s, "| RIMU's name |"); 545 + seq_puts(s, "| CIDSEL |"); 546 + seq_puts(s, "| MCID |"); 547 + seq_puts(s, "| N/SECURE |"); 548 + seq_puts(s, "| N/PRIVILEGED |\n"); 549 + 550 + for (i = 0; i < RIFSC_RIMU_ENTRIES && rifsc->nb_rimu; i++) { 551 + struct rifsc_rimu_debug_data m_dbg_entry; 552 + 553 + stm32_rifsc_fill_rimu_dbg_entry(rifsc, &m_dbg_entry, i); 554 + 555 + seq_printf(s, "| %-11s |", m_dbg_entry.m_name); 556 + seq_printf(s, "| %-6s |", m_dbg_entry.cidsel ? "CIDSEL" : ""); 557 + seq_printf(s, "| %-4d |", m_dbg_entry.m_cid); 558 + seq_printf(s, "| %-8s |", m_dbg_entry.m_sec ? "SEC" : "NSEC"); 559 + seq_printf(s, "| %-12s |\n", m_dbg_entry.m_priv ? "PRIV" : "NPRIV"); 560 + } 561 + 562 + if (rifsc->nb_risal > 0) { 563 + seq_puts(s, "\n=============================================\n"); 564 + seq_puts(s, " RISAL dump\n"); 565 + seq_puts(s, "=============================================\n"); 566 + 567 + seq_puts(s, "| Memory |"); 568 + seq_puts(s, "| Subreg. |"); 569 + seq_puts(s, "| N/SECURE |"); 570 + seq_puts(s, "| N/PRIVILEGED |"); 571 + seq_puts(s, "| Subreg. CID |"); 572 + seq_puts(s, "| Resource lock |"); 573 + seq_puts(s, "| Subreg. enable |"); 574 + seq_puts(s, "| Subreg. start |"); 575 + seq_puts(s, "| Subreg. end |\n"); 576 + 577 + for (i = 0; i < rifsc->nb_risal; i++) { 578 + for (j = 0; j < RIFSC_RISAL_SUBREGIONS; j++) { 579 + struct rifsc_subreg_debug_data sr_dbg_entry; 580 + 581 + stm32_rifsc_fill_subreg_dbg_entry(rifsc, &sr_dbg_entry, i, j); 582 + 583 + seq_printf(s, "| LPSRAM%1d |", i + 1); 584 + seq_printf(s, "| %1s |", (j == 0) ? "A" : "B"); 585 + seq_printf(s, "| %-8s |", sr_dbg_entry.sr_sec ? "SEC" : "NSEC"); 586 + seq_printf(s, "| %-12s |", sr_dbg_entry.sr_priv ? "PRIV" : "NPRIV"); 587 + seq_printf(s, "| 0x%-9x |", sr_dbg_entry.sr_cid); 588 + seq_printf(s, "| %-13s |", 589 + sr_dbg_entry.sr_rlock ? "locked (1)" : "unlocked (0)"); 590 + seq_printf(s, "| %-14s |", 591 + str_enabled_disabled(sr_dbg_entry.sr_enable)); 592 + seq_printf(s, "| 0x%-11x |", sr_dbg_entry.sr_start); 593 + seq_printf(s, "| 0x%-11x |\n", sr_dbg_entry.sr_start + 594 + sr_dbg_entry.sr_length - 1); 595 + } 596 + } 597 + } 598 + 599 + return 0; 600 + } 601 + DEFINE_SHOW_ATTRIBUTE(stm32_rifsc_conf_dump); 602 + 603 + static int stm32_rifsc_register_debugfs(struct stm32_firewall_controller *rifsc_controller, 604 + u32 nb_risup, u32 nb_rimu, u32 nb_risal) 605 + { 606 + struct rifsc_dbg_private *rifsc_priv; 607 + struct dentry *root = NULL; 608 + 609 + rifsc_priv = devm_kzalloc(rifsc_controller->dev, sizeof(*rifsc_priv), GFP_KERNEL); 610 + if (!rifsc_priv) 611 + return -ENOMEM; 612 + 613 + rifsc_priv->mmio = rifsc_controller->mmio; 614 + rifsc_priv->nb_risup = nb_risup; 615 + rifsc_priv->nb_rimu = nb_rimu; 616 + rifsc_priv->nb_risal = nb_risal; 617 + rifsc_priv->res_names = of_device_get_match_data(rifsc_controller->dev); 618 + 619 + root = debugfs_lookup("stm32_firewall", NULL); 620 + if (!root) 621 + root = debugfs_create_dir("stm32_firewall", NULL); 622 + 623 + if (IS_ERR(root)) 624 + return PTR_ERR(root); 625 + 626 + debugfs_create_file("rifsc", 0444, root, rifsc_priv, &stm32_rifsc_conf_dump_fops); 627 + 628 + return 0; 629 + } 630 + #endif /* defined(CONFIG_DEBUG_FS) */ 75 631 76 632 static bool stm32_rifsc_is_semaphore_available(void __iomem *addr) 77 633 { ··· 769 207 rifsc_controller->release_access = stm32_rifsc_release_access; 770 208 771 209 /* Get number of RIFSC entries*/ 772 - nb_risup = readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2) & HWCFGR2_CONF1_MASK; 773 - nb_rimu = readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2) & HWCFGR2_CONF2_MASK; 774 - nb_risal = readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2) & HWCFGR2_CONF3_MASK; 210 + nb_risup = FIELD_GET(HWCFGR2_CONF1_MASK, 211 + readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2)); 212 + nb_rimu = FIELD_GET(HWCFGR2_CONF2_MASK, 213 + readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2)); 214 + nb_risal = FIELD_GET(HWCFGR2_CONF3_MASK, 215 + readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2)); 216 + /* 217 + * On STM32MP21, RIFSC_RISC_HWCFGR2 shows an incorrect number of RISAL (NUM_RISAL is 3 218 + * instead of 0). A software workaround is implemented using the st,mem-map property in the 219 + * device tree. This property is absent or left empty if there is no RISAL. 220 + */ 221 + if (of_device_is_compatible(np, "st,stm32mp21-rifsc")) 222 + nb_risal = 0; 775 223 rifsc_controller->max_entries = nb_risup + nb_rimu + nb_risal; 776 224 777 225 platform_set_drvdata(pdev, rifsc_controller); ··· 800 228 return rc; 801 229 } 802 230 231 + #if defined(CONFIG_DEBUG_FS) 232 + rc = stm32_rifsc_register_debugfs(rifsc_controller, nb_risup, nb_rimu, nb_risal); 233 + if (rc) 234 + return dev_err_probe(rifsc_controller->dev, rc, "Failed creating debugfs entry\n"); 235 + #endif 236 + 803 237 /* Populate all allowed nodes */ 804 238 return of_platform_populate(np, NULL, NULL, &pdev->dev); 805 239 } 806 240 807 241 static const struct of_device_id stm32_rifsc_of_match[] = { 808 - { .compatible = "st,stm32mp25-rifsc" }, 242 + { 243 + .compatible = "st,stm32mp25-rifsc", 244 + #if defined(CONFIG_DEBUG_FS) 245 + .data = &rifsc_mp25_res_names, 246 + #endif 247 + }, 248 + { 249 + .compatible = "st,stm32mp21-rifsc", 250 + #if defined(CONFIG_DEBUG_FS) 251 + .data = &rifsc_mp21_res_names, 252 + #endif 253 + }, 809 254 {} 810 255 }; 811 256 MODULE_DEVICE_TABLE(of, stm32_rifsc_of_match);
-2
drivers/bus/sunxi-rsb.c
··· 373 373 unlock: 374 374 mutex_unlock(&rsb->lock); 375 375 376 - pm_runtime_mark_last_busy(rsb->dev); 377 376 pm_runtime_put_autosuspend(rsb->dev); 378 377 379 378 return ret; ··· 416 417 417 418 mutex_unlock(&rsb->lock); 418 419 419 - pm_runtime_mark_last_busy(rsb->dev); 420 420 pm_runtime_put_autosuspend(rsb->dev); 421 421 422 422 return ret;
+9 -2
drivers/bus/ti-sysc.c
··· 48 48 SOC_UNKNOWN, 49 49 SOC_2420, 50 50 SOC_2430, 51 + SOC_AM33, 51 52 SOC_3430, 52 53 SOC_AM35, 53 54 SOC_3630, ··· 2913 2912 static const struct soc_device_attribute sysc_soc_match[] = { 2914 2913 SOC_FLAG("OMAP242*", SOC_2420), 2915 2914 SOC_FLAG("OMAP243*", SOC_2430), 2915 + SOC_FLAG("AM33*", SOC_AM33), 2916 2916 SOC_FLAG("AM35*", SOC_AM35), 2917 2917 SOC_FLAG("OMAP3[45]*", SOC_3430), 2918 2918 SOC_FLAG("OMAP3[67]*", SOC_3630), ··· 3119 3117 * can be dropped if we stop supporting old beagleboard revisions 3120 3118 * A to B4 at some point. 3121 3119 */ 3122 - if (sysc_soc->soc == SOC_3430 || sysc_soc->soc == SOC_AM35) 3120 + switch (sysc_soc->soc) { 3121 + case SOC_AM33: 3122 + case SOC_3430: 3123 + case SOC_AM35: 3123 3124 error = -ENXIO; 3124 - else 3125 + break; 3126 + default: 3125 3127 error = -EBUSY; 3128 + } 3126 3129 3127 3130 if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) && 3128 3131 (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE))
+8 -4
drivers/clk/at91/pmc.c
··· 115 115 /* Address in SECURAM that say if we suspend to backup mode. */ 116 116 static void __iomem *at91_pmc_backup_suspend; 117 117 118 - static int at91_pmc_suspend(void) 118 + static int at91_pmc_suspend(void *data) 119 119 { 120 120 unsigned int backup; 121 121 ··· 129 129 return clk_save_context(); 130 130 } 131 131 132 - static void at91_pmc_resume(void) 132 + static void at91_pmc_resume(void *data) 133 133 { 134 134 unsigned int backup; 135 135 ··· 143 143 clk_restore_context(); 144 144 } 145 145 146 - static struct syscore_ops pmc_syscore_ops = { 146 + static const struct syscore_ops pmc_syscore_ops = { 147 147 .suspend = at91_pmc_suspend, 148 148 .resume = at91_pmc_resume, 149 + }; 150 + 151 + static struct syscore pmc_syscore = { 152 + .ops = &pmc_syscore_ops, 149 153 }; 150 154 151 155 static const struct of_device_id pmc_dt_ids[] = { ··· 189 185 return -ENOMEM; 190 186 } 191 187 192 - register_syscore_ops(&pmc_syscore_ops); 188 + register_syscore(&pmc_syscore); 193 189 194 190 return 0; 195 191 }
-7
drivers/clk/davinci/psc-da850.c
··· 6 6 */ 7 7 8 8 #include <linux/clk-provider.h> 9 - #include <linux/reset-controller.h> 10 9 #include <linux/clk.h> 11 10 #include <linux/clkdev.h> 12 11 #include <linux/init.h> ··· 65 66 "fck", "ecap.1", 66 67 "fck", "ecap.2"); 67 68 68 - static struct reset_control_lookup da850_psc0_reset_lookup_table[] = { 69 - RESET_LOOKUP("da850-psc0", 15, "davinci-rproc.0", NULL), 70 - }; 71 - 72 69 static int da850_psc0_init(struct device *dev, void __iomem *base) 73 70 { 74 - reset_controller_add_lookup(da850_psc0_reset_lookup_table, 75 - ARRAY_SIZE(da850_psc0_reset_lookup_table)); 76 71 return davinci_psc_register_clocks(dev, da850_psc0_info, 16, base); 77 72 } 78 73
+8 -4
drivers/clk/imx/clk-vf610.c
··· 139 139 return clk; 140 140 }; 141 141 142 - static int vf610_clk_suspend(void) 142 + static int vf610_clk_suspend(void *data) 143 143 { 144 144 int i; 145 145 ··· 156 156 return 0; 157 157 } 158 158 159 - static void vf610_clk_resume(void) 159 + static void vf610_clk_resume(void *data) 160 160 { 161 161 int i; 162 162 ··· 171 171 writel_relaxed(ccgr[i], CCM_CCGRx(i)); 172 172 } 173 173 174 - static struct syscore_ops vf610_clk_syscore_ops = { 174 + static const struct syscore_ops vf610_clk_syscore_ops = { 175 175 .suspend = vf610_clk_suspend, 176 176 .resume = vf610_clk_resume, 177 + }; 178 + 179 + static struct syscore vf610_clk_syscore = { 180 + .ops = &vf610_clk_syscore_ops, 177 181 }; 178 182 179 183 static void __init vf610_clocks_init(struct device_node *ccm_node) ··· 466 462 for (i = 0; i < ARRAY_SIZE(clks_init_on); i++) 467 463 clk_prepare_enable(clk[clks_init_on[i]]); 468 464 469 - register_syscore_ops(&vf610_clk_syscore_ops); 465 + register_syscore(&vf610_clk_syscore); 470 466 471 467 /* Add the clocks to provider list */ 472 468 clk_data.clks = clk;
+1 -1
drivers/clk/ingenic/jz4725b-cgu.c
··· 268 268 if (retval) 269 269 pr_err("%s: failed to register CGU Clocks\n", __func__); 270 270 271 - ingenic_cgu_register_syscore_ops(cgu); 271 + ingenic_cgu_register_syscore(cgu); 272 272 } 273 273 CLK_OF_DECLARE_DRIVER(jz4725b_cgu, "ingenic,jz4725b-cgu", jz4725b_cgu_init);
+1 -1
drivers/clk/ingenic/jz4740-cgu.c
··· 266 266 if (retval) 267 267 pr_err("%s: failed to register CGU Clocks\n", __func__); 268 268 269 - ingenic_cgu_register_syscore_ops(cgu); 269 + ingenic_cgu_register_syscore(cgu); 270 270 } 271 271 CLK_OF_DECLARE_DRIVER(jz4740_cgu, "ingenic,jz4740-cgu", jz4740_cgu_init);
+1 -1
drivers/clk/ingenic/jz4755-cgu.c
··· 337 337 if (retval) 338 338 pr_err("%s: failed to register CGU Clocks\n", __func__); 339 339 340 - ingenic_cgu_register_syscore_ops(cgu); 340 + ingenic_cgu_register_syscore(cgu); 341 341 } 342 342 /* 343 343 * CGU has some children devices, this is useful for probing children devices
+1 -1
drivers/clk/ingenic/jz4760-cgu.c
··· 436 436 if (retval) 437 437 pr_err("%s: failed to register CGU Clocks\n", __func__); 438 438 439 - ingenic_cgu_register_syscore_ops(cgu); 439 + ingenic_cgu_register_syscore(cgu); 440 440 } 441 441 442 442 /* We only probe via devicetree, no need for a platform driver */
+1 -1
drivers/clk/ingenic/jz4770-cgu.c
··· 456 456 if (retval) 457 457 pr_err("%s: failed to register CGU Clocks\n", __func__); 458 458 459 - ingenic_cgu_register_syscore_ops(cgu); 459 + ingenic_cgu_register_syscore(cgu); 460 460 } 461 461 462 462 /* We only probe via devicetree, no need for a platform driver */
+1 -1
drivers/clk/ingenic/jz4780-cgu.c
··· 803 803 return; 804 804 } 805 805 806 - ingenic_cgu_register_syscore_ops(cgu); 806 + ingenic_cgu_register_syscore(cgu); 807 807 } 808 808 CLK_OF_DECLARE_DRIVER(jz4780_cgu, "ingenic,jz4780-cgu", jz4780_cgu_init);
+9 -5
drivers/clk/ingenic/pm.c
··· 15 15 16 16 static void __iomem * __maybe_unused ingenic_cgu_base; 17 17 18 - static int __maybe_unused ingenic_cgu_pm_suspend(void) 18 + static int __maybe_unused ingenic_cgu_pm_suspend(void *data) 19 19 { 20 20 u32 val = readl(ingenic_cgu_base + CGU_REG_LCR); 21 21 ··· 24 24 return 0; 25 25 } 26 26 27 - static void __maybe_unused ingenic_cgu_pm_resume(void) 27 + static void __maybe_unused ingenic_cgu_pm_resume(void *data) 28 28 { 29 29 u32 val = readl(ingenic_cgu_base + CGU_REG_LCR); 30 30 31 31 writel(val & ~LCR_LOW_POWER_MODE, ingenic_cgu_base + CGU_REG_LCR); 32 32 } 33 33 34 - static struct syscore_ops __maybe_unused ingenic_cgu_pm_ops = { 34 + static const struct syscore_ops __maybe_unused ingenic_cgu_pm_ops = { 35 35 .suspend = ingenic_cgu_pm_suspend, 36 36 .resume = ingenic_cgu_pm_resume, 37 37 }; 38 38 39 - void ingenic_cgu_register_syscore_ops(struct ingenic_cgu *cgu) 39 + static struct syscore __maybe_unused ingenic_cgu_pm = { 40 + .ops = &ingenic_cgu_pm_ops, 41 + }; 42 + 43 + void ingenic_cgu_register_syscore(struct ingenic_cgu *cgu) 40 44 { 41 45 if (IS_ENABLED(CONFIG_PM_SLEEP)) { 42 46 ingenic_cgu_base = cgu->base; 43 - register_syscore_ops(&ingenic_cgu_pm_ops); 47 + register_syscore(&ingenic_cgu_pm); 44 48 } 45 49 }
+1 -1
drivers/clk/ingenic/pm.h
··· 7 7 8 8 struct ingenic_cgu; 9 9 10 - void ingenic_cgu_register_syscore_ops(struct ingenic_cgu *cgu); 10 + void ingenic_cgu_register_syscore(struct ingenic_cgu *cgu); 11 11 12 12 #endif /* DRIVERS_CLK_INGENIC_PM_H */
+8 -4
drivers/clk/ingenic/tcu.c
··· 455 455 return ret; 456 456 } 457 457 458 - static int __maybe_unused tcu_pm_suspend(void) 458 + static int __maybe_unused tcu_pm_suspend(void *data) 459 459 { 460 460 struct ingenic_tcu *tcu = ingenic_tcu; 461 461 ··· 465 465 return 0; 466 466 } 467 467 468 - static void __maybe_unused tcu_pm_resume(void) 468 + static void __maybe_unused tcu_pm_resume(void *data) 469 469 { 470 470 struct ingenic_tcu *tcu = ingenic_tcu; 471 471 ··· 473 473 clk_enable(tcu->clk); 474 474 } 475 475 476 - static struct syscore_ops __maybe_unused tcu_pm_ops = { 476 + static const struct syscore_ops __maybe_unused tcu_pm_ops = { 477 477 .suspend = tcu_pm_suspend, 478 478 .resume = tcu_pm_resume, 479 + }; 480 + 481 + static struct syscore __maybe_unused tcu_pm = { 482 + .ops = &tcu_pm_ops, 479 483 }; 480 484 481 485 static void __init ingenic_tcu_init(struct device_node *np) ··· 490 486 pr_crit("Failed to initialize TCU clocks: %d\n", ret); 491 487 492 488 if (IS_ENABLED(CONFIG_PM_SLEEP)) 493 - register_syscore_ops(&tcu_pm_ops); 489 + register_syscore(&tcu_pm); 494 490 } 495 491 496 492 CLK_OF_DECLARE_DRIVER(jz4740_cgu, "ingenic,jz4740-tcu", ingenic_tcu_init);
+1 -1
drivers/clk/ingenic/x1000-cgu.c
··· 556 556 return; 557 557 } 558 558 559 - ingenic_cgu_register_syscore_ops(cgu); 559 + ingenic_cgu_register_syscore(cgu); 560 560 } 561 561 /* 562 562 * CGU has some children devices, this is useful for probing children devices
+1 -1
drivers/clk/ingenic/x1830-cgu.c
··· 463 463 return; 464 464 } 465 465 466 - ingenic_cgu_register_syscore_ops(cgu); 466 + ingenic_cgu_register_syscore(cgu); 467 467 } 468 468 /* 469 469 * CGU has some children devices, this is useful for probing children devices
+8 -4
drivers/clk/mvebu/common.c
··· 215 215 return ERR_PTR(-ENODEV); 216 216 } 217 217 218 - static int mvebu_clk_gating_suspend(void) 218 + static int mvebu_clk_gating_suspend(void *data) 219 219 { 220 220 ctrl->saved_reg = readl(ctrl->base); 221 221 return 0; 222 222 } 223 223 224 - static void mvebu_clk_gating_resume(void) 224 + static void mvebu_clk_gating_resume(void *data) 225 225 { 226 226 writel(ctrl->saved_reg, ctrl->base); 227 227 } 228 228 229 - static struct syscore_ops clk_gate_syscore_ops = { 229 + static const struct syscore_ops clk_gate_syscore_ops = { 230 230 .suspend = mvebu_clk_gating_suspend, 231 231 .resume = mvebu_clk_gating_resume, 232 + }; 233 + 234 + static struct syscore clk_gate_syscore = { 235 + .ops = &clk_gate_syscore_ops, 232 236 }; 233 237 234 238 void __init mvebu_clk_gating_setup(struct device_node *np, ··· 288 284 289 285 of_clk_add_provider(np, clk_gating_get_src, ctrl); 290 286 291 - register_syscore_ops(&clk_gate_syscore_ops); 287 + register_syscore(&clk_gate_syscore); 292 288 293 289 return; 294 290 gates_out:
+8 -4
drivers/clk/rockchip/clk-rk3288.c
··· 871 871 872 872 static u32 rk3288_saved_cru_regs[ARRAY_SIZE(rk3288_saved_cru_reg_ids)]; 873 873 874 - static int rk3288_clk_suspend(void) 874 + static int rk3288_clk_suspend(void *data) 875 875 { 876 876 int i, reg_id; 877 877 ··· 906 906 return 0; 907 907 } 908 908 909 - static void rk3288_clk_resume(void) 909 + static void rk3288_clk_resume(void *data) 910 910 { 911 911 int i, reg_id; 912 912 ··· 923 923 writel_relaxed(0xf3030000, rk3288_cru_base + RK3288_MODE_CON); 924 924 } 925 925 926 - static struct syscore_ops rk3288_clk_syscore_ops = { 926 + static const struct syscore_ops rk3288_clk_syscore_ops = { 927 927 .suspend = rk3288_clk_suspend, 928 928 .resume = rk3288_clk_resume, 929 + }; 930 + 931 + static struct syscore rk3288_clk_syscore = { 932 + .ops = &rk3288_clk_syscore_ops, 929 933 }; 930 934 931 935 static void __init rk3288_common_init(struct device_node *np, ··· 980 976 981 977 rockchip_register_restart_notifier(ctx, RK3288_GLB_SRST_FST, 982 978 rk3288_clk_shutdown); 983 - register_syscore_ops(&rk3288_clk_syscore_ops); 979 + register_syscore(&rk3288_clk_syscore); 984 980 985 981 rockchip_clk_of_add_provider(np, ctx); 986 982 }
+8 -4
drivers/clk/samsung/clk-s5pv210-audss.c
··· 36 36 {ASS_CLK_GATE, 0}, 37 37 }; 38 38 39 - static int s5pv210_audss_clk_suspend(void) 39 + static int s5pv210_audss_clk_suspend(void *data) 40 40 { 41 41 int i; 42 42 ··· 46 46 return 0; 47 47 } 48 48 49 - static void s5pv210_audss_clk_resume(void) 49 + static void s5pv210_audss_clk_resume(void *data) 50 50 { 51 51 int i; 52 52 ··· 54 54 writel(reg_save[i][1], reg_base + reg_save[i][0]); 55 55 } 56 56 57 - static struct syscore_ops s5pv210_audss_clk_syscore_ops = { 57 + static const struct syscore_ops s5pv210_audss_clk_syscore_ops = { 58 58 .suspend = s5pv210_audss_clk_suspend, 59 59 .resume = s5pv210_audss_clk_resume, 60 + }; 61 + 62 + static struct syscore s5pv210_audss_clk_syscore = { 63 + .ops = &s5pv210_audss_clk_syscore_ops, 60 64 }; 61 65 #endif /* CONFIG_PM_SLEEP */ 62 66 ··· 179 175 } 180 176 181 177 #ifdef CONFIG_PM_SLEEP 182 - register_syscore_ops(&s5pv210_audss_clk_syscore_ops); 178 + register_syscore(&s5pv210_audss_clk_syscore); 183 179 #endif 184 180 185 181 return 0;
+8 -4
drivers/clk/samsung/clk.c
··· 271 271 } 272 272 273 273 #ifdef CONFIG_PM_SLEEP 274 - static int samsung_clk_suspend(void) 274 + static int samsung_clk_suspend(void *data) 275 275 { 276 276 struct samsung_clock_reg_cache *reg_cache; 277 277 ··· 284 284 return 0; 285 285 } 286 286 287 - static void samsung_clk_resume(void) 287 + static void samsung_clk_resume(void *data) 288 288 { 289 289 struct samsung_clock_reg_cache *reg_cache; 290 290 ··· 293 293 reg_cache->rd_num); 294 294 } 295 295 296 - static struct syscore_ops samsung_clk_syscore_ops = { 296 + static const struct syscore_ops samsung_clk_syscore_ops = { 297 297 .suspend = samsung_clk_suspend, 298 298 .resume = samsung_clk_resume, 299 + }; 300 + 301 + static struct syscore samsung_clk_syscore = { 302 + .ops = &samsung_clk_syscore_ops, 299 303 }; 300 304 301 305 void samsung_clk_extended_sleep_init(void __iomem *reg_base, ··· 320 316 panic("could not allocate register dump storage.\n"); 321 317 322 318 if (list_empty(&clock_reg_cache_list)) 323 - register_syscore_ops(&samsung_clk_syscore_ops); 319 + register_syscore(&samsung_clk_syscore); 324 320 325 321 reg_cache->reg_base = reg_base; 326 322 reg_cache->rd_num = nr_rdump;
+8 -4
drivers/clk/tegra/clk-tegra210.c
··· 3444 3444 static u32 spare_reg_ctx, misc_clk_enb_ctx, clk_msk_arm_ctx; 3445 3445 static u32 cpu_softrst_ctx[3]; 3446 3446 3447 - static int tegra210_clk_suspend(void) 3447 + static int tegra210_clk_suspend(void *data) 3448 3448 { 3449 3449 unsigned int i; 3450 3450 ··· 3465 3465 return 0; 3466 3466 } 3467 3467 3468 - static void tegra210_clk_resume(void) 3468 + static void tegra210_clk_resume(void *data) 3469 3469 { 3470 3470 unsigned int i; 3471 3471 ··· 3523 3523 } 3524 3524 #endif 3525 3525 3526 - static struct syscore_ops tegra_clk_syscore_ops = { 3526 + static const struct syscore_ops tegra_clk_syscore_ops = { 3527 3527 #ifdef CONFIG_PM_SLEEP 3528 3528 .suspend = tegra210_clk_suspend, 3529 3529 .resume = tegra210_clk_resume, 3530 3530 #endif 3531 + }; 3532 + 3533 + static struct syscore tegra_clk_syscore = { 3534 + .ops = &tegra_clk_syscore_ops, 3531 3535 }; 3532 3536 3533 3537 static struct tegra_cpu_car_ops tegra210_cpu_car_ops = { ··· 3817 3813 3818 3814 tegra_cpu_car_ops = &tegra210_cpu_car_ops; 3819 3815 3820 - register_syscore_ops(&tegra_clk_syscore_ops); 3816 + register_syscore(&tegra_clk_syscore); 3821 3817 } 3822 3818 CLK_OF_DECLARE(tegra210, "nvidia,tegra210-car", tegra210_clock_init);
+8 -4
drivers/clocksource/timer-armada-370-xp.c
··· 207 207 208 208 static u32 timer0_ctrl_reg, timer0_local_ctrl_reg; 209 209 210 - static int armada_370_xp_timer_suspend(void) 210 + static int armada_370_xp_timer_suspend(void *data) 211 211 { 212 212 timer0_ctrl_reg = readl(timer_base + TIMER_CTRL_OFF); 213 213 timer0_local_ctrl_reg = readl(local_base + TIMER_CTRL_OFF); 214 214 return 0; 215 215 } 216 216 217 - static void armada_370_xp_timer_resume(void) 217 + static void armada_370_xp_timer_resume(void *data) 218 218 { 219 219 writel(0xffffffff, timer_base + TIMER0_VAL_OFF); 220 220 writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF); ··· 222 222 writel(timer0_local_ctrl_reg, local_base + TIMER_CTRL_OFF); 223 223 } 224 224 225 - static struct syscore_ops armada_370_xp_timer_syscore_ops = { 225 + static const struct syscore_ops armada_370_xp_timer_syscore_ops = { 226 226 .suspend = armada_370_xp_timer_suspend, 227 227 .resume = armada_370_xp_timer_resume, 228 + }; 229 + 230 + static struct syscore armada_370_xp_timer_syscore = { 231 + .ops = &armada_370_xp_timer_syscore_ops, 228 232 }; 229 233 230 234 static unsigned long armada_370_delay_timer_read(void) ··· 328 324 return res; 329 325 } 330 326 331 - register_syscore_ops(&armada_370_xp_timer_syscore_ops); 327 + register_syscore(&armada_370_xp_timer_syscore); 332 328 333 329 return 0; 334 330 }
+8 -4
drivers/cpuidle/cpuidle-psci.c
··· 177 177 } 178 178 } 179 179 180 - static int psci_idle_syscore_suspend(void) 180 + static int psci_idle_syscore_suspend(void *data) 181 181 { 182 182 psci_idle_syscore_switch(true); 183 183 return 0; 184 184 } 185 185 186 - static void psci_idle_syscore_resume(void) 186 + static void psci_idle_syscore_resume(void *data) 187 187 { 188 188 psci_idle_syscore_switch(false); 189 189 } 190 190 191 - static struct syscore_ops psci_idle_syscore_ops = { 191 + static const struct syscore_ops psci_idle_syscore_ops = { 192 192 .suspend = psci_idle_syscore_suspend, 193 193 .resume = psci_idle_syscore_resume, 194 + }; 195 + 196 + static struct syscore psci_idle_syscore = { 197 + .ops = &psci_idle_syscore_ops, 194 198 }; 195 199 196 200 static void psci_idle_init_syscore(void) 197 201 { 198 202 if (psci_cpuidle_use_syscore) 199 - register_syscore_ops(&psci_idle_syscore_ops); 203 + register_syscore(&psci_idle_syscore); 200 204 } 201 205 202 206 static void psci_idle_init_cpuhp(void)
+17 -15
drivers/firmware/imx/imx-scu-irq.c
··· 203 203 struct mbox_chan *ch; 204 204 int ret = 0, i = 0; 205 205 206 + if (!of_parse_phandle_with_args(dev->of_node, "mboxes", 207 + "#mbox-cells", 0, &spec)) { 208 + i = of_alias_get_id(spec.np, "mu"); 209 + of_node_put(spec.np); 210 + } 211 + 212 + /* use mu1 as general mu irq channel if failed */ 213 + if (i < 0) 214 + i = 1; 215 + 216 + mu_resource_id = IMX_SC_R_MU_0A + i; 217 + 206 218 ret = imx_scu_get_handle(&imx_sc_irq_ipc_handle); 207 219 if (ret) 208 220 return ret; ··· 226 214 cl->dev = dev; 227 215 cl->rx_callback = imx_scu_irq_callback; 228 216 217 + INIT_WORK(&imx_sc_irq_work, imx_scu_irq_work_handler); 218 + 229 219 /* SCU general IRQ uses general interrupt channel 3 */ 230 220 ch = mbox_request_channel_byname(cl, "gip3"); 231 221 if (IS_ERR(ch)) { 232 222 ret = PTR_ERR(ch); 233 223 dev_err(dev, "failed to request mbox chan gip3, ret %d\n", ret); 234 - devm_kfree(dev, cl); 235 - return ret; 224 + goto free_cl; 236 225 } 237 - 238 - INIT_WORK(&imx_sc_irq_work, imx_scu_irq_work_handler); 239 - 240 - if (!of_parse_phandle_with_args(dev->of_node, "mboxes", 241 - "#mbox-cells", 0, &spec)) 242 - i = of_alias_get_id(spec.np, "mu"); 243 - 244 - /* use mu1 as general mu irq channel if failed */ 245 - if (i < 0) 246 - i = 1; 247 - 248 - mu_resource_id = IMX_SC_R_MU_0A + i; 249 226 250 227 /* Create directory under /sysfs/firmware */ 251 228 wakeup_obj = kobject_create_and_add("scu_wakeup_source", firmware_kobj); ··· 254 253 255 254 free_ch: 256 255 mbox_free_channel(ch); 256 + free_cl: 257 + devm_kfree(dev, cl); 257 258 258 259 return ret; 259 260 } 260 - EXPORT_SYMBOL(imx_scu_enable_general_irq_channel);
+7 -4
drivers/firmware/imx/imx-scu.c
··· 73 73 -EACCES, /* IMX_SC_ERR_NOACCESS */ 74 74 -EACCES, /* IMX_SC_ERR_LOCKED */ 75 75 -ERANGE, /* IMX_SC_ERR_UNAVAILABLE */ 76 - -EEXIST, /* IMX_SC_ERR_NOTFOUND */ 77 - -EPERM, /* IMX_SC_ERR_NOPOWER */ 78 - -EPIPE, /* IMX_SC_ERR_IPC */ 76 + -ENOENT, /* IMX_SC_ERR_NOTFOUND */ 77 + -ENODEV, /* IMX_SC_ERR_NOPOWER */ 78 + -ECOMM, /* IMX_SC_ERR_IPC */ 79 79 -EBUSY, /* IMX_SC_ERR_BUSY */ 80 80 -EIO, /* IMX_SC_ERR_FAIL */ 81 81 }; ··· 324 324 } 325 325 326 326 sc_ipc->dev = dev; 327 - mutex_init(&sc_ipc->lock); 327 + ret = devm_mutex_init(dev, &sc_ipc->lock); 328 + if (ret) 329 + return ret; 328 330 init_completion(&sc_ipc->done); 329 331 330 332 imx_sc_ipc_handle = sc_ipc; ··· 354 352 .driver = { 355 353 .name = "imx-scu", 356 354 .of_match_table = imx_scu_match, 355 + .suppress_bind_attrs = true, 357 356 }, 358 357 .probe = imx_scu_probe, 359 358 };
+128 -27
drivers/firmware/ti_sci.c
··· 398 398 static inline int ti_sci_do_xfer(struct ti_sci_info *info, 399 399 struct ti_sci_xfer *xfer) 400 400 { 401 + struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; 402 + bool response_expected = !!(hdr->flags & (TI_SCI_FLAG_REQ_ACK_ON_PROCESSED | 403 + TI_SCI_FLAG_REQ_ACK_ON_RECEIVED)); 401 404 int ret; 402 405 int timeout; 403 406 struct device *dev = info->dev; ··· 412 409 413 410 ret = 0; 414 411 415 - if (system_state <= SYSTEM_RUNNING) { 412 + if (response_expected && system_state <= SYSTEM_RUNNING) { 416 413 /* And we wait for the response. */ 417 414 timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms); 418 415 if (!wait_for_completion_timeout(&xfer->done, timeout)) 419 416 ret = -ETIMEDOUT; 420 - } else { 417 + } else if (response_expected) { 421 418 /* 422 419 * If we are !running, we cannot use wait_for_completion_timeout 423 420 * during noirq phase, so we must manually poll the completion. ··· 1673 1670 static int ti_sci_cmd_prepare_sleep(const struct ti_sci_handle *handle, u8 mode, 1674 1671 u32 ctx_lo, u32 ctx_hi, u32 debug_flags) 1675 1672 { 1673 + u32 msg_flags = mode == TISCI_MSG_VALUE_SLEEP_MODE_PARTIAL_IO ? 1674 + TI_SCI_FLAG_REQ_GENERIC_NORESPONSE : 1675 + TI_SCI_FLAG_REQ_ACK_ON_PROCESSED; 1676 1676 struct ti_sci_info *info; 1677 1677 struct ti_sci_msg_req_prepare_sleep *req; 1678 1678 struct ti_sci_msg_hdr *resp; ··· 1692 1686 dev = info->dev; 1693 1687 1694 1688 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PREPARE_SLEEP, 1695 - TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1689 + msg_flags, 1696 1690 sizeof(*req), sizeof(*resp)); 1697 1691 if (IS_ERR(xfer)) { 1698 1692 ret = PTR_ERR(xfer); ··· 1712 1706 goto fail; 1713 1707 } 1714 1708 1715 - resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 1716 - 1717 - if (!ti_sci_is_response_ack(resp)) { 1718 - dev_err(dev, "Failed to prepare sleep\n"); 1719 - ret = -ENODEV; 1709 + if (msg_flags == TI_SCI_FLAG_REQ_ACK_ON_PROCESSED) { 1710 + resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 1711 + if (!ti_sci_is_response_ack(resp)) { 1712 + dev_err(dev, "Failed to prepare sleep\n"); 1713 + ret = -ENODEV; 1714 + } 1720 1715 } 1721 1716 1722 1717 fail: ··· 3671 3664 } 3672 3665 EXPORT_SYMBOL_GPL(devm_ti_sci_get_resource); 3673 3666 3667 + /* 3668 + * Iterate all device nodes that have a wakeup-source property and check if one 3669 + * of the possible phandles points to a Partial-IO system state. If it 3670 + * does resolve the device node to an actual device and check if wakeup is 3671 + * enabled. 3672 + */ 3673 + static bool ti_sci_partial_io_wakeup_enabled(struct ti_sci_info *info) 3674 + { 3675 + struct device_node *wakeup_node = NULL; 3676 + 3677 + for_each_node_with_property(wakeup_node, "wakeup-source") { 3678 + struct of_phandle_iterator it; 3679 + int err; 3680 + 3681 + of_for_each_phandle(&it, err, wakeup_node, "wakeup-source", NULL, 0) { 3682 + struct platform_device *pdev; 3683 + bool may_wakeup; 3684 + 3685 + /* 3686 + * Continue if idle-state-name is not off-wake. Return 3687 + * value is the index of the string which should be 0 if 3688 + * off-wake is present. 3689 + */ 3690 + if (of_property_match_string(it.node, "idle-state-name", "off-wake")) 3691 + continue; 3692 + 3693 + pdev = of_find_device_by_node(wakeup_node); 3694 + if (!pdev) 3695 + continue; 3696 + 3697 + may_wakeup = device_may_wakeup(&pdev->dev); 3698 + put_device(&pdev->dev); 3699 + 3700 + if (may_wakeup) { 3701 + dev_dbg(info->dev, "%pOF identified as wakeup source for Partial-IO\n", 3702 + wakeup_node); 3703 + of_node_put(it.node); 3704 + of_node_put(wakeup_node); 3705 + return true; 3706 + } 3707 + } 3708 + } 3709 + 3710 + return false; 3711 + } 3712 + 3713 + static int ti_sci_sys_off_handler(struct sys_off_data *data) 3714 + { 3715 + struct ti_sci_info *info = data->cb_data; 3716 + const struct ti_sci_handle *handle = &info->handle; 3717 + bool enter_partial_io = ti_sci_partial_io_wakeup_enabled(info); 3718 + int ret; 3719 + 3720 + if (!enter_partial_io) 3721 + return NOTIFY_DONE; 3722 + 3723 + dev_info(info->dev, "Entering Partial-IO because a powered wakeup-enabled device was found.\n"); 3724 + 3725 + ret = ti_sci_cmd_prepare_sleep(handle, TISCI_MSG_VALUE_SLEEP_MODE_PARTIAL_IO, 0, 0, 0); 3726 + if (ret) { 3727 + dev_err(info->dev, 3728 + "Failed to enter Partial-IO %pe, trying to do an emergency restart\n", 3729 + ERR_PTR(ret)); 3730 + emergency_restart(); 3731 + } 3732 + 3733 + mdelay(5000); 3734 + emergency_restart(); 3735 + 3736 + return NOTIFY_DONE; 3737 + } 3738 + 3674 3739 static int tisci_reboot_handler(struct sys_off_data *data) 3675 3740 { 3676 3741 struct ti_sci_info *info = data->cb_data; ··· 3785 3706 } 3786 3707 } 3787 3708 3788 - static int __maybe_unused ti_sci_suspend(struct device *dev) 3709 + static int ti_sci_suspend(struct device *dev) 3789 3710 { 3790 3711 struct ti_sci_info *info = dev_get_drvdata(dev); 3791 3712 struct device *cpu_dev, *cpu_dev_max = NULL; ··· 3825 3746 return 0; 3826 3747 } 3827 3748 3828 - static int __maybe_unused ti_sci_suspend_noirq(struct device *dev) 3749 + static int ti_sci_suspend_noirq(struct device *dev) 3829 3750 { 3830 3751 struct ti_sci_info *info = dev_get_drvdata(dev); 3831 3752 int ret = 0; 3832 3753 3833 - ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_ENABLE); 3834 - if (ret) 3835 - return ret; 3754 + if (info->fw_caps & MSG_FLAG_CAPS_IO_ISOLATION) { 3755 + ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_ENABLE); 3756 + if (ret) 3757 + return ret; 3758 + } 3836 3759 3837 3760 return 0; 3838 3761 } 3839 3762 3840 - static int __maybe_unused ti_sci_resume_noirq(struct device *dev) 3763 + static int ti_sci_resume_noirq(struct device *dev) 3841 3764 { 3842 3765 struct ti_sci_info *info = dev_get_drvdata(dev); 3843 3766 int ret = 0; ··· 3848 3767 u8 pin; 3849 3768 u8 mode; 3850 3769 3851 - ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_DISABLE); 3852 - if (ret) 3853 - return ret; 3770 + if (info->fw_caps & MSG_FLAG_CAPS_IO_ISOLATION) { 3771 + ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_DISABLE); 3772 + if (ret) 3773 + return ret; 3774 + } 3854 3775 3855 3776 ret = ti_sci_msg_cmd_lpm_wake_reason(&info->handle, &source, &time, &pin, &mode); 3856 3777 /* Do not fail to resume on error as the wake reason is not critical */ ··· 3863 3780 return 0; 3864 3781 } 3865 3782 3866 - static void __maybe_unused ti_sci_pm_complete(struct device *dev) 3783 + static void ti_sci_pm_complete(struct device *dev) 3867 3784 { 3868 3785 struct ti_sci_info *info = dev_get_drvdata(dev); 3869 3786 ··· 3874 3791 } 3875 3792 3876 3793 static const struct dev_pm_ops ti_sci_pm_ops = { 3877 - #ifdef CONFIG_PM_SLEEP 3878 - .suspend = ti_sci_suspend, 3879 - .suspend_noirq = ti_sci_suspend_noirq, 3880 - .resume_noirq = ti_sci_resume_noirq, 3881 - .complete = ti_sci_pm_complete, 3882 - #endif 3794 + .suspend = pm_sleep_ptr(ti_sci_suspend), 3795 + .suspend_noirq = pm_sleep_ptr(ti_sci_suspend_noirq), 3796 + .resume_noirq = pm_sleep_ptr(ti_sci_resume_noirq), 3797 + .complete = pm_sleep_ptr(ti_sci_pm_complete), 3883 3798 }; 3884 3799 3885 3800 /* Description for K2G */ ··· 4009 3928 } 4010 3929 4011 3930 ti_sci_msg_cmd_query_fw_caps(&info->handle, &info->fw_caps); 4012 - dev_dbg(dev, "Detected firmware capabilities: %s%s%s%s\n", 3931 + dev_dbg(dev, "Detected firmware capabilities: %s%s%s%s%s\n", 4013 3932 info->fw_caps & MSG_FLAG_CAPS_GENERIC ? "Generic" : "", 4014 3933 info->fw_caps & MSG_FLAG_CAPS_LPM_PARTIAL_IO ? " Partial-IO" : "", 4015 3934 info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED ? " DM-Managed" : "", 4016 - info->fw_caps & MSG_FLAG_CAPS_LPM_ABORT ? " LPM-Abort" : "" 3935 + info->fw_caps & MSG_FLAG_CAPS_LPM_ABORT ? " LPM-Abort" : "", 3936 + info->fw_caps & MSG_FLAG_CAPS_IO_ISOLATION ? " IO-Isolation" : "" 4017 3937 ); 4018 3938 4019 3939 ti_sci_setup_ops(info); ··· 4023 3941 if (ret) { 4024 3942 dev_err(dev, "reboot registration fail(%d)\n", ret); 4025 3943 goto out; 3944 + } 3945 + 3946 + if (info->fw_caps & MSG_FLAG_CAPS_LPM_PARTIAL_IO) { 3947 + ret = devm_register_sys_off_handler(dev, 3948 + SYS_OFF_MODE_POWER_OFF, 3949 + SYS_OFF_PRIO_FIRMWARE, 3950 + ti_sci_sys_off_handler, 3951 + info); 3952 + if (ret) { 3953 + dev_err(dev, "Failed to register sys_off_handler %pe\n", 3954 + ERR_PTR(ret)); 3955 + goto out; 3956 + } 4026 3957 } 4027 3958 4028 3959 dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n", ··· 4047 3952 list_add_tail(&info->node, &ti_sci_list); 4048 3953 mutex_unlock(&ti_sci_list_mutex); 4049 3954 4050 - return of_platform_populate(dev->of_node, NULL, NULL, dev); 3955 + ret = of_platform_populate(dev->of_node, NULL, NULL, dev); 3956 + if (ret) { 3957 + dev_err(dev, "platform_populate failed %pe\n", ERR_PTR(ret)); 3958 + goto out; 3959 + } 3960 + return 0; 3961 + 4051 3962 out: 4052 3963 if (!IS_ERR(info->chan_tx)) 4053 3964 mbox_free_channel(info->chan_tx);
+7
drivers/firmware/ti_sci.h
··· 149 149 * MSG_FLAG_CAPS_LPM_PARTIAL_IO: Partial IO in LPM 150 150 * MSG_FLAG_CAPS_LPM_DM_MANAGED: LPM can be managed by DM 151 151 * MSG_FLAG_CAPS_LPM_ABORT: Abort entry to LPM 152 + * MSG_FLAG_CAPS_IO_ISOLATION: IO Isolation support 152 153 * 153 154 * Response to a generic message with message type TI_SCI_MSG_QUERY_FW_CAPS 154 155 * providing currently available SOC/firmware capabilities. SoC that don't ··· 161 160 #define MSG_FLAG_CAPS_LPM_PARTIAL_IO TI_SCI_MSG_FLAG(4) 162 161 #define MSG_FLAG_CAPS_LPM_DM_MANAGED TI_SCI_MSG_FLAG(5) 163 162 #define MSG_FLAG_CAPS_LPM_ABORT TI_SCI_MSG_FLAG(9) 163 + #define MSG_FLAG_CAPS_IO_ISOLATION TI_SCI_MSG_FLAG(7) 164 164 #define MSG_MASK_CAPS_LPM GENMASK_ULL(4, 1) 165 165 u64 fw_caps; 166 166 } __packed; ··· 597 595 struct ti_sci_msg_req_prepare_sleep { 598 596 struct ti_sci_msg_hdr hdr; 599 597 598 + /* 599 + * When sending prepare_sleep with MODE_PARTIAL_IO no response will be sent, 600 + * no further steps are required. 601 + */ 602 + #define TISCI_MSG_VALUE_SLEEP_MODE_PARTIAL_IO 0x03 600 603 #define TISCI_MSG_VALUE_SLEEP_MODE_DM_MANAGED 0xfd 601 604 u8 mode; 602 605 u32 ctx_lo;
+13
drivers/firmware/xilinx/zynqmp-debug.c
··· 3 3 * Xilinx Zynq MPSoC Firmware layer for debugfs APIs 4 4 * 5 5 * Copyright (C) 2014-2018 Xilinx, Inc. 6 + * Copyright (C) 2022 - 2025 Advanced Micro Devices, Inc. 6 7 * 7 8 * Michal Simek <michal.simek@amd.com> 8 9 * Davorin Mista <davorin.mista@aggios.com> ··· 39 38 PM_API(PM_RELEASE_NODE), 40 39 PM_API(PM_SET_REQUIREMENT), 41 40 PM_API(PM_GET_API_VERSION), 41 + PM_API(PM_GET_NODE_STATUS), 42 42 PM_API(PM_REGISTER_NOTIFIER), 43 43 PM_API(PM_RESET_ASSERT), 44 44 PM_API(PM_RESET_GET_STATUS), ··· 168 166 pm_api_arg[2] : 0, 169 167 pm_api_arg[3] ? pm_api_arg[3] : 170 168 ZYNQMP_PM_REQUEST_ACK_BLOCKING); 169 + break; 170 + case PM_GET_NODE_STATUS: 171 + ret = zynqmp_pm_get_node_status(pm_api_arg[0], 172 + &pm_api_ret[0], 173 + &pm_api_ret[1], 174 + &pm_api_ret[2]); 175 + if (!ret) 176 + sprintf(debugfs_buf, 177 + "GET_NODE_STATUS:\n\tNodeId: %llu\n\tStatus: %u\n\tRequirements: %u\n\tUsage: %u\n", 178 + pm_api_arg[0], pm_api_ret[0], 179 + pm_api_ret[1], pm_api_ret[2]); 171 180 break; 172 181 case PM_REGISTER_NOTIFIER: 173 182 ret = zynqmp_pm_register_notifier(pm_api_arg[0],
+85 -29
drivers/firmware/xilinx/zynqmp.c
··· 3 3 * Xilinx Zynq MPSoC Firmware layer 4 4 * 5 5 * Copyright (C) 2014-2022 Xilinx, Inc. 6 - * Copyright (C) 2022 - 2024, Advanced Micro Devices, Inc. 6 + * Copyright (C) 2022 - 2025 Advanced Micro Devices, Inc. 7 7 * 8 8 * Michal Simek <michal.simek@amd.com> 9 9 * Davorin Mista <davorin.mista@aggios.com> ··· 71 71 int feature_status; 72 72 struct hlist_node hentry; 73 73 }; 74 + 75 + struct platform_fw_data { 76 + /* 77 + * Family code for platform. 78 + */ 79 + const u32 family_code; 80 + }; 81 + 82 + static struct platform_fw_data *active_platform_fw_data; 74 83 75 84 static const struct mfd_cell firmware_devs[] = { 76 85 { ··· 473 464 474 465 static u32 pm_api_version; 475 466 static u32 pm_tz_version; 476 - static u32 pm_family_code; 477 - static u32 pm_sub_family_code; 478 467 479 468 int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset) 480 469 { ··· 539 532 /** 540 533 * zynqmp_pm_get_family_info() - Get family info of platform 541 534 * @family: Returned family code value 542 - * @subfamily: Returned sub-family code value 543 535 * 544 536 * Return: Returns status, either success or error+reason 545 537 */ 546 - int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily) 538 + int zynqmp_pm_get_family_info(u32 *family) 547 539 { 548 - u32 ret_payload[PAYLOAD_ARG_CNT]; 549 - u32 idcode; 550 - int ret; 540 + if (!active_platform_fw_data) 541 + return -ENODEV; 551 542 552 - /* Check is family or sub-family code already received */ 553 - if (pm_family_code && pm_sub_family_code) { 554 - *family = pm_family_code; 555 - *subfamily = pm_sub_family_code; 556 - return 0; 557 - } 543 + if (!family) 544 + return -EINVAL; 558 545 559 - ret = zynqmp_pm_invoke_fn(PM_GET_CHIPID, ret_payload, 0); 560 - if (ret < 0) 561 - return ret; 562 - 563 - idcode = ret_payload[1]; 564 - pm_family_code = FIELD_GET(FAMILY_CODE_MASK, idcode); 565 - pm_sub_family_code = FIELD_GET(SUB_FAMILY_CODE_MASK, idcode); 566 - *family = pm_family_code; 567 - *subfamily = pm_sub_family_code; 546 + *family = active_platform_fw_data->family_code; 568 547 569 548 return 0; 570 549 } ··· 1231 1238 u32 value) 1232 1239 { 1233 1240 int ret; 1241 + u32 pm_family_code; 1234 1242 1235 - if (pm_family_code == ZYNQMP_FAMILY_CODE && 1243 + ret = zynqmp_pm_get_family_info(&pm_family_code); 1244 + if (ret) 1245 + return ret; 1246 + 1247 + if (pm_family_code == PM_ZYNQMP_FAMILY_CODE && 1236 1248 param == PM_PINCTRL_CONFIG_TRI_STATE) { 1237 1249 ret = zynqmp_pm_feature(PM_PINCTRL_CONFIG_PARAM_SET); 1238 1250 if (ret < PM_PINCTRL_PARAM_SET_VERSION) { ··· 1410 1412 (u32)tcm_mode); 1411 1413 } 1412 1414 EXPORT_SYMBOL_GPL(zynqmp_pm_set_tcm_config); 1415 + 1416 + /** 1417 + * zynqmp_pm_get_node_status - PM call to request a node's current power state 1418 + * @node: ID of the component or sub-system in question 1419 + * @status: Current operating state of the requested node 1420 + * @requirements: Current requirements asserted on the node, 1421 + * used for slave nodes only. 1422 + * @usage: Usage information, used for slave nodes only: 1423 + * PM_USAGE_NO_MASTER - No master is currently using 1424 + * the node 1425 + * PM_USAGE_CURRENT_MASTER - Only requesting master is 1426 + * currently using the node 1427 + * PM_USAGE_OTHER_MASTER - Only other masters are 1428 + * currently using the node 1429 + * PM_USAGE_BOTH_MASTERS - Both the current and at least 1430 + * one other master is currently 1431 + * using the node 1432 + * 1433 + * Return: Returns status, either success or error+reason 1434 + */ 1435 + int zynqmp_pm_get_node_status(const u32 node, u32 *const status, 1436 + u32 *const requirements, u32 *const usage) 1437 + { 1438 + u32 ret_payload[PAYLOAD_ARG_CNT]; 1439 + int ret; 1440 + 1441 + if (!status || !requirements || !usage) 1442 + return -EINVAL; 1443 + 1444 + ret = zynqmp_pm_invoke_fn(PM_GET_NODE_STATUS, ret_payload, 1, node); 1445 + if (ret_payload[0] == XST_PM_SUCCESS) { 1446 + *status = ret_payload[1]; 1447 + *requirements = ret_payload[2]; 1448 + *usage = ret_payload[3]; 1449 + } 1450 + 1451 + return ret; 1452 + } 1453 + EXPORT_SYMBOL_GPL(zynqmp_pm_get_node_status); 1413 1454 1414 1455 /** 1415 1456 * zynqmp_pm_force_pwrdwn - PM call to request for another PU or subsystem to ··· 2044 2007 { 2045 2008 struct device *dev = &pdev->dev; 2046 2009 struct zynqmp_devinfo *devinfo; 2010 + u32 pm_family_code; 2047 2011 int ret; 2048 2012 2049 2013 ret = get_set_conduit_method(dev->of_node); 2050 2014 if (ret) 2051 2015 return ret; 2016 + 2017 + /* Get platform-specific firmware data from device tree match */ 2018 + active_platform_fw_data = (struct platform_fw_data *)device_get_match_data(dev); 2019 + if (!active_platform_fw_data) 2020 + return -EINVAL; 2052 2021 2053 2022 /* Get SiP SVC version number */ 2054 2023 ret = zynqmp_pm_get_sip_svc_version(&sip_svc_version); ··· 2088 2045 pr_info("%s Platform Management API v%d.%d\n", __func__, 2089 2046 pm_api_version >> 16, pm_api_version & 0xFFFF); 2090 2047 2091 - /* Get the Family code and sub family code of platform */ 2092 - ret = zynqmp_pm_get_family_info(&pm_family_code, &pm_sub_family_code); 2048 + /* Get the Family code of platform */ 2049 + ret = zynqmp_pm_get_family_info(&pm_family_code); 2093 2050 if (ret < 0) 2094 2051 return ret; 2095 2052 ··· 2116 2073 2117 2074 zynqmp_pm_api_debugfs_init(); 2118 2075 2119 - if (pm_family_code == VERSAL_FAMILY_CODE) { 2076 + if (pm_family_code != PM_ZYNQMP_FAMILY_CODE) { 2120 2077 em_dev = platform_device_register_data(&pdev->dev, "xlnx_event_manager", 2121 2078 -1, NULL, 0); 2122 2079 if (IS_ERR(em_dev)) ··· 2156 2113 dev_warn(dev, "failed to release power management to firmware\n"); 2157 2114 } 2158 2115 2116 + static const struct platform_fw_data platform_fw_data_versal = { 2117 + .family_code = PM_VERSAL_FAMILY_CODE, 2118 + }; 2119 + 2120 + static const struct platform_fw_data platform_fw_data_versal_net = { 2121 + .family_code = PM_VERSAL_NET_FAMILY_CODE, 2122 + }; 2123 + 2124 + static const struct platform_fw_data platform_fw_data_zynqmp = { 2125 + .family_code = PM_ZYNQMP_FAMILY_CODE, 2126 + }; 2127 + 2159 2128 static const struct of_device_id zynqmp_firmware_of_match[] = { 2160 - {.compatible = "xlnx,zynqmp-firmware"}, 2161 - {.compatible = "xlnx,versal-firmware"}, 2129 + {.compatible = "xlnx,zynqmp-firmware", .data = &platform_fw_data_zynqmp}, 2130 + {.compatible = "xlnx,versal-firmware", .data = &platform_fw_data_versal}, 2131 + {.compatible = "xlnx,versal-net-firmware", .data = &platform_fw_data_versal_net}, 2162 2132 {}, 2163 2133 }; 2164 2134 MODULE_DEVICE_TABLE(of, zynqmp_firmware_of_match);
+8 -4
drivers/gpio/gpio-mxc.c
··· 667 667 RUNTIME_PM_OPS(mxc_gpio_runtime_suspend, mxc_gpio_runtime_resume, NULL) 668 668 }; 669 669 670 - static int mxc_gpio_syscore_suspend(void) 670 + static int mxc_gpio_syscore_suspend(void *data) 671 671 { 672 672 struct mxc_gpio_port *port; 673 673 int ret; ··· 684 684 return 0; 685 685 } 686 686 687 - static void mxc_gpio_syscore_resume(void) 687 + static void mxc_gpio_syscore_resume(void *data) 688 688 { 689 689 struct mxc_gpio_port *port; 690 690 int ret; ··· 701 701 } 702 702 } 703 703 704 - static struct syscore_ops mxc_gpio_syscore_ops = { 704 + static const struct syscore_ops mxc_gpio_syscore_ops = { 705 705 .suspend = mxc_gpio_syscore_suspend, 706 706 .resume = mxc_gpio_syscore_resume, 707 + }; 708 + 709 + static struct syscore mxc_gpio_syscore = { 710 + .ops = &mxc_gpio_syscore_ops, 707 711 }; 708 712 709 713 static struct platform_driver mxc_gpio_driver = { ··· 722 718 723 719 static int __init gpio_mxc_init(void) 724 720 { 725 - register_syscore_ops(&mxc_gpio_syscore_ops); 721 + register_syscore(&mxc_gpio_syscore); 726 722 727 723 return platform_driver_register(&mxc_gpio_driver); 728 724 }
+8 -4
drivers/gpio/gpio-pxa.c
··· 747 747 device_initcall(pxa_gpio_dt_init); 748 748 749 749 #ifdef CONFIG_PM 750 - static int pxa_gpio_suspend(void) 750 + static int pxa_gpio_suspend(void *data) 751 751 { 752 752 struct pxa_gpio_chip *pchip = pxa_gpio_chip; 753 753 struct pxa_gpio_bank *c; ··· 768 768 return 0; 769 769 } 770 770 771 - static void pxa_gpio_resume(void) 771 + static void pxa_gpio_resume(void *data) 772 772 { 773 773 struct pxa_gpio_chip *pchip = pxa_gpio_chip; 774 774 struct pxa_gpio_bank *c; ··· 792 792 #define pxa_gpio_resume NULL 793 793 #endif 794 794 795 - static struct syscore_ops pxa_gpio_syscore_ops = { 795 + static const struct syscore_ops pxa_gpio_syscore_ops = { 796 796 .suspend = pxa_gpio_suspend, 797 797 .resume = pxa_gpio_resume, 798 798 }; 799 799 800 + static struct syscore pxa_gpio_syscore = { 801 + .ops = &pxa_gpio_syscore_ops, 802 + }; 803 + 800 804 static int __init pxa_gpio_sysinit(void) 801 805 { 802 - register_syscore_ops(&pxa_gpio_syscore_ops); 806 + register_syscore(&pxa_gpio_syscore); 803 807 return 0; 804 808 } 805 809 postcore_initcall(pxa_gpio_sysinit);
+8 -4
drivers/gpio/gpio-sa1100.c
··· 256 256 } while (mask); 257 257 } 258 258 259 - static int sa1100_gpio_suspend(void) 259 + static int sa1100_gpio_suspend(void *data) 260 260 { 261 261 struct sa1100_gpio_chip *sgc = &sa1100_gpio_chip; 262 262 ··· 275 275 return 0; 276 276 } 277 277 278 - static void sa1100_gpio_resume(void) 278 + static void sa1100_gpio_resume(void *data) 279 279 { 280 280 sa1100_update_edge_regs(&sa1100_gpio_chip); 281 281 } 282 282 283 - static struct syscore_ops sa1100_gpio_syscore_ops = { 283 + static const struct syscore_ops sa1100_gpio_syscore_ops = { 284 284 .suspend = sa1100_gpio_suspend, 285 285 .resume = sa1100_gpio_resume, 286 286 }; 287 287 288 + static struct syscore sa1100_gpio_syscore = { 289 + .ops = &sa1100_gpio_syscore_ops, 290 + }; 291 + 288 292 static int __init sa1100_gpio_init_devicefs(void) 289 293 { 290 - register_syscore_ops(&sa1100_gpio_syscore_ops); 294 + register_syscore(&sa1100_gpio_syscore); 291 295 return 0; 292 296 } 293 297
+9 -5
drivers/hv/vmbus_drv.c
··· 2801 2801 hv_synic_disable_regs(cpu); 2802 2802 }; 2803 2803 2804 - static int hv_synic_suspend(void) 2804 + static int hv_synic_suspend(void *data) 2805 2805 { 2806 2806 /* 2807 2807 * When we reach here, all the non-boot CPUs have been offlined. ··· 2828 2828 return 0; 2829 2829 } 2830 2830 2831 - static void hv_synic_resume(void) 2831 + static void hv_synic_resume(void *data) 2832 2832 { 2833 2833 hv_synic_enable_regs(0); 2834 2834 ··· 2840 2840 } 2841 2841 2842 2842 /* The callbacks run only on CPU0, with irqs_disabled. */ 2843 - static struct syscore_ops hv_synic_syscore_ops = { 2843 + static const struct syscore_ops hv_synic_syscore_ops = { 2844 2844 .suspend = hv_synic_suspend, 2845 2845 .resume = hv_synic_resume, 2846 + }; 2847 + 2848 + static struct syscore hv_synic_syscore = { 2849 + .ops = &hv_synic_syscore_ops, 2846 2850 }; 2847 2851 2848 2852 static int __init hv_acpi_init(void) ··· 2891 2887 hv_setup_kexec_handler(hv_kexec_handler); 2892 2888 hv_setup_crash_handler(hv_crash_handler); 2893 2889 2894 - register_syscore_ops(&hv_synic_syscore_ops); 2890 + register_syscore(&hv_synic_syscore); 2895 2891 2896 2892 return 0; 2897 2893 ··· 2905 2901 { 2906 2902 int cpu; 2907 2903 2908 - unregister_syscore_ops(&hv_synic_syscore_ops); 2904 + unregister_syscore(&hv_synic_syscore); 2909 2905 2910 2906 hv_remove_kexec_handler(); 2911 2907 hv_remove_crash_handler();
+10 -6
drivers/iommu/amd/init.c
··· 3033 3033 * disable suspend until real resume implemented 3034 3034 */ 3035 3035 3036 - static void amd_iommu_resume(void) 3036 + static void amd_iommu_resume(void *data) 3037 3037 { 3038 3038 struct amd_iommu *iommu; 3039 3039 ··· 3047 3047 amd_iommu_enable_interrupts(); 3048 3048 } 3049 3049 3050 - static int amd_iommu_suspend(void) 3050 + static int amd_iommu_suspend(void *data) 3051 3051 { 3052 3052 /* disable IOMMUs to go out of the way for BIOS */ 3053 3053 disable_iommus(); ··· 3055 3055 return 0; 3056 3056 } 3057 3057 3058 - static struct syscore_ops amd_iommu_syscore_ops = { 3058 + static const struct syscore_ops amd_iommu_syscore_ops = { 3059 3059 .suspend = amd_iommu_suspend, 3060 3060 .resume = amd_iommu_resume, 3061 + }; 3062 + 3063 + static struct syscore amd_iommu_syscore = { 3064 + .ops = &amd_iommu_syscore_ops, 3061 3065 }; 3062 3066 3063 3067 static void __init free_iommu_resources(void) ··· 3408 3404 init_state = IOMMU_ENABLED; 3409 3405 break; 3410 3406 case IOMMU_ENABLED: 3411 - register_syscore_ops(&amd_iommu_syscore_ops); 3407 + register_syscore(&amd_iommu_syscore); 3412 3408 iommu_snp_enable(); 3413 3409 ret = amd_iommu_init_pci(); 3414 3410 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT; ··· 3511 3507 3512 3508 void amd_iommu_disable(void) 3513 3509 { 3514 - amd_iommu_suspend(); 3510 + amd_iommu_suspend(NULL); 3515 3511 } 3516 3512 3517 3513 int amd_iommu_reenable(int mode) 3518 3514 { 3519 - amd_iommu_resume(); 3515 + amd_iommu_resume(NULL); 3520 3516 3521 3517 return 0; 3522 3518 }
+8 -4
drivers/iommu/intel/iommu.c
··· 1825 1825 } 1826 1826 } 1827 1827 1828 - static int iommu_suspend(void) 1828 + static int iommu_suspend(void *data) 1829 1829 { 1830 1830 struct dmar_drhd_unit *drhd; 1831 1831 struct intel_iommu *iommu = NULL; ··· 1852 1852 return 0; 1853 1853 } 1854 1854 1855 - static void iommu_resume(void) 1855 + static void iommu_resume(void *data) 1856 1856 { 1857 1857 struct dmar_drhd_unit *drhd; 1858 1858 struct intel_iommu *iommu = NULL; ··· 1883 1883 } 1884 1884 } 1885 1885 1886 - static struct syscore_ops iommu_syscore_ops = { 1886 + static const struct syscore_ops iommu_syscore_ops = { 1887 1887 .resume = iommu_resume, 1888 1888 .suspend = iommu_suspend, 1889 1889 }; 1890 1890 1891 + static struct syscore iommu_syscore = { 1892 + .ops = &iommu_syscore_ops, 1893 + }; 1894 + 1891 1895 static void __init init_iommu_pm_ops(void) 1892 1896 { 1893 - register_syscore_ops(&iommu_syscore_ops); 1897 + register_syscore(&iommu_syscore); 1894 1898 } 1895 1899 1896 1900 #else
+10 -4
drivers/irqchip/exynos-combiner.c
··· 200 200 201 201 /** 202 202 * combiner_suspend - save interrupt combiner state before suspend 203 + * @data: syscore context 203 204 * 204 205 * Save the interrupt enable set register for all combiner groups since 205 206 * the state is lost when the system enters into a sleep state. 206 207 * 207 208 */ 208 - static int combiner_suspend(void) 209 + static int combiner_suspend(void *data) 209 210 { 210 211 int i; 211 212 ··· 219 218 220 219 /** 221 220 * combiner_resume - restore interrupt combiner state after resume 221 + * @data: syscore context 222 222 * 223 223 * Restore the interrupt enable set register for all combiner groups since 224 224 * the state is lost when the system enters into a sleep state on suspend. 225 225 * 226 226 */ 227 - static void combiner_resume(void) 227 + static void combiner_resume(void *data) 228 228 { 229 229 int i; 230 230 ··· 242 240 #define combiner_resume NULL 243 241 #endif 244 242 245 - static struct syscore_ops combiner_syscore_ops = { 243 + static const struct syscore_ops combiner_syscore_ops = { 246 244 .suspend = combiner_suspend, 247 245 .resume = combiner_resume, 246 + }; 247 + 248 + static struct syscore combiner_syscore = { 249 + .ops = &combiner_syscore_ops, 248 250 }; 249 251 250 252 static int __init combiner_of_init(struct device_node *np, ··· 270 264 271 265 combiner_init(combiner_base, np); 272 266 273 - register_syscore_ops(&combiner_syscore_ops); 267 + register_syscore(&combiner_syscore); 274 268 275 269 return 0; 276 270 }
+8 -4
drivers/irqchip/irq-armada-370-xp.c
··· 726 726 } while (1); 727 727 } 728 728 729 - static int mpic_suspend(void) 729 + static int mpic_suspend(void *data) 730 730 { 731 731 struct mpic *mpic = mpic_data; 732 732 ··· 735 735 return 0; 736 736 } 737 737 738 - static void mpic_resume(void) 738 + static void mpic_resume(void *data) 739 739 { 740 740 struct mpic *mpic = mpic_data; 741 741 bool src0, src1; ··· 788 788 mpic_ipi_resume(mpic); 789 789 } 790 790 791 - static struct syscore_ops mpic_syscore_ops = { 791 + static const struct syscore_ops mpic_syscore_ops = { 792 792 .suspend = mpic_suspend, 793 793 .resume = mpic_resume, 794 + }; 795 + 796 + static struct syscore mpic_syscore = { 797 + .ops = &mpic_syscore_ops, 794 798 }; 795 799 796 800 static int __init mpic_map_region(struct device_node *np, int index, ··· 909 905 mpic_handle_cascade_irq, mpic); 910 906 } 911 907 912 - register_syscore_ops(&mpic_syscore_ops); 908 + register_syscore(&mpic_syscore); 913 909 914 910 return 0; 915 911 }
+8 -4
drivers/irqchip/irq-bcm7038-l1.c
··· 285 285 static LIST_HEAD(bcm7038_l1_intcs_list); 286 286 static DEFINE_RAW_SPINLOCK(bcm7038_l1_intcs_lock); 287 287 288 - static int bcm7038_l1_suspend(void) 288 + static int bcm7038_l1_suspend(void *data) 289 289 { 290 290 struct bcm7038_l1_chip *intc; 291 291 int boot_cpu, word; ··· 311 311 return 0; 312 312 } 313 313 314 - static void bcm7038_l1_resume(void) 314 + static void bcm7038_l1_resume(void *data) 315 315 { 316 316 struct bcm7038_l1_chip *intc; 317 317 int boot_cpu, word; ··· 332 332 } 333 333 } 334 334 335 - static struct syscore_ops bcm7038_l1_syscore_ops = { 335 + static const struct syscore_ops bcm7038_l1_syscore_ops = { 336 336 .suspend = bcm7038_l1_suspend, 337 337 .resume = bcm7038_l1_resume, 338 + }; 339 + 340 + static struct syscore bcm7038_l1_syscore = { 341 + .ops = &bcm7038_l1_syscore_ops, 338 342 }; 339 343 340 344 static int bcm7038_l1_set_wake(struct irq_data *d, unsigned int on) ··· 428 424 raw_spin_unlock(&bcm7038_l1_intcs_lock); 429 425 430 426 if (list_is_singular(&bcm7038_l1_intcs_list)) 431 - register_syscore_ops(&bcm7038_l1_syscore_ops); 427 + register_syscore(&bcm7038_l1_syscore); 432 428 #endif 433 429 434 430 pr_info("registered BCM7038 L1 intc (%pOF, IRQs: %d)\n",
+8 -4
drivers/irqchip/irq-gic-v3-its.c
··· 4992 4992 its_quirks, its); 4993 4993 } 4994 4994 4995 - static int its_save_disable(void) 4995 + static int its_save_disable(void *data) 4996 4996 { 4997 4997 struct its_node *its; 4998 4998 int err = 0; ··· 5028 5028 return err; 5029 5029 } 5030 5030 5031 - static void its_restore_enable(void) 5031 + static void its_restore_enable(void *data) 5032 5032 { 5033 5033 struct its_node *its; 5034 5034 int ret; ··· 5088 5088 raw_spin_unlock(&its_lock); 5089 5089 } 5090 5090 5091 - static struct syscore_ops its_syscore_ops = { 5091 + static const struct syscore_ops its_syscore_ops = { 5092 5092 .suspend = its_save_disable, 5093 5093 .resume = its_restore_enable, 5094 + }; 5095 + 5096 + static struct syscore its_syscore = { 5097 + .ops = &its_syscore_ops, 5094 5098 }; 5095 5099 5096 5100 static void __init __iomem *its_map_one(struct resource *res, int *err) ··· 5868 5864 } 5869 5865 } 5870 5866 5871 - register_syscore_ops(&its_syscore_ops); 5867 + register_syscore(&its_syscore); 5872 5868 5873 5869 return 0; 5874 5870 }
+8 -4
drivers/irqchip/irq-i8259.c
··· 202 202 } 203 203 } 204 204 205 - static void i8259A_resume(void) 205 + static void i8259A_resume(void *data) 206 206 { 207 207 if (i8259A_auto_eoi >= 0) 208 208 init_8259A(i8259A_auto_eoi); 209 209 } 210 210 211 - static void i8259A_shutdown(void) 211 + static void i8259A_shutdown(void *data) 212 212 { 213 213 /* Put the i8259A into a quiescent state that 214 214 * the kernel initialization code can get it ··· 220 220 } 221 221 } 222 222 223 - static struct syscore_ops i8259_syscore_ops = { 223 + static const struct syscore_ops i8259_syscore_ops = { 224 224 .resume = i8259A_resume, 225 225 .shutdown = i8259A_shutdown, 226 + }; 227 + 228 + static struct syscore i8259_syscore = { 229 + .ops = &i8259_syscore_ops, 226 230 }; 227 231 228 232 static void init_8259A(int auto_eoi) ··· 324 320 325 321 if (request_irq(irq, no_action, IRQF_NO_THREAD, "cascade", NULL)) 326 322 pr_err("Failed to register cascade interrupt\n"); 327 - register_syscore_ops(&i8259_syscore_ops); 323 + register_syscore(&i8259_syscore); 328 324 return domain; 329 325 } 330 326
+10 -6
drivers/irqchip/irq-imx-gpcv2.c
··· 33 33 return cd->gpc_base + cd->cpu2wakeup + i * 4; 34 34 } 35 35 36 - static int gpcv2_wakeup_source_save(void) 36 + static int gpcv2_wakeup_source_save(void *data) 37 37 { 38 38 struct gpcv2_irqchip_data *cd; 39 39 void __iomem *reg; ··· 52 52 return 0; 53 53 } 54 54 55 - static void gpcv2_wakeup_source_restore(void) 55 + static void gpcv2_wakeup_source_restore(void *data) 56 56 { 57 57 struct gpcv2_irqchip_data *cd; 58 58 int i; ··· 65 65 writel_relaxed(cd->saved_irq_mask[i], gpcv2_idx_to_reg(cd, i)); 66 66 } 67 67 68 - static struct syscore_ops imx_gpcv2_syscore_ops = { 69 - .suspend = gpcv2_wakeup_source_save, 70 - .resume = gpcv2_wakeup_source_restore, 68 + static const struct syscore_ops gpcv2_syscore_ops = { 69 + .suspend = gpcv2_wakeup_source_save, 70 + .resume = gpcv2_wakeup_source_restore, 71 + }; 72 + 73 + static struct syscore gpcv2_syscore = { 74 + .ops = &gpcv2_syscore_ops, 71 75 }; 72 76 73 77 static int imx_gpcv2_irq_set_wake(struct irq_data *d, unsigned int on) ··· 280 276 writel_relaxed(~0x1, cd->gpc_base + cd->cpu2wakeup); 281 277 282 278 imx_gpcv2_instance = cd; 283 - register_syscore_ops(&imx_gpcv2_syscore_ops); 279 + register_syscore(&gpcv2_syscore); 284 280 285 281 /* 286 282 * Clear the OF_POPULATED flag set in of_irq_init so that
+8 -4
drivers/irqchip/irq-loongson-eiointc.c
··· 407 407 return NULL; 408 408 } 409 409 410 - static int eiointc_suspend(void) 410 + static int eiointc_suspend(void *data) 411 411 { 412 412 return 0; 413 413 } 414 414 415 - static void eiointc_resume(void) 415 + static void eiointc_resume(void *data) 416 416 { 417 417 eiointc_router_init(0); 418 418 } 419 419 420 - static struct syscore_ops eiointc_syscore_ops = { 420 + static const struct syscore_ops eiointc_syscore_ops = { 421 421 .suspend = eiointc_suspend, 422 422 .resume = eiointc_resume, 423 + }; 424 + 425 + static struct syscore eiointc_syscore = { 426 + .ops = &eiointc_syscore_ops, 423 427 }; 424 428 425 429 static int __init pch_pic_parse_madt(union acpi_subtable_headers *header, ··· 544 540 eiointc_router_init(0); 545 541 546 542 if (nr_pics == 1) { 547 - register_syscore_ops(&eiointc_syscore_ops); 543 + register_syscore(&eiointc_syscore); 548 544 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_EIOINTC_STARTING, 549 545 "irqchip/loongarch/eiointc:starting", 550 546 eiointc_router_init, NULL);
+7 -3
drivers/irqchip/irq-loongson-htpic.c
··· 71 71 writel(0xffff, htpic->base + HTINT_EN_OFF); 72 72 } 73 73 74 - static void htpic_resume(void) 74 + static void htpic_resume(void *data) 75 75 { 76 76 htpic_reg_init(); 77 77 } 78 78 79 - struct syscore_ops htpic_syscore_ops = { 79 + static const struct syscore_ops htpic_syscore_ops = { 80 80 .resume = htpic_resume, 81 + }; 82 + 83 + static struct syscore htpic_syscore = { 84 + .ops = &htpic_syscore_ops, 81 85 }; 82 86 83 87 static int __init htpic_of_init(struct device_node *node, struct device_node *parent) ··· 134 130 htpic_irq_dispatch, htpic); 135 131 } 136 132 137 - register_syscore_ops(&htpic_syscore_ops); 133 + register_syscore(&htpic_syscore); 138 134 139 135 return 0; 140 136
+8 -4
drivers/irqchip/irq-loongson-htvec.c
··· 159 159 } 160 160 } 161 161 162 - static int htvec_suspend(void) 162 + static int htvec_suspend(void *data) 163 163 { 164 164 int i; 165 165 ··· 169 169 return 0; 170 170 } 171 171 172 - static void htvec_resume(void) 172 + static void htvec_resume(void *data) 173 173 { 174 174 int i; 175 175 ··· 177 177 writel(htvec_priv->saved_vec_en[i], htvec_priv->base + HTVEC_EN_OFF + 4 * i); 178 178 } 179 179 180 - static struct syscore_ops htvec_syscore_ops = { 180 + static const struct syscore_ops htvec_syscore_ops = { 181 181 .suspend = htvec_suspend, 182 182 .resume = htvec_resume, 183 + }; 184 + 185 + static struct syscore htvec_syscore = { 186 + .ops = &htvec_syscore_ops, 183 187 }; 184 188 185 189 static int htvec_init(phys_addr_t addr, unsigned long size, ··· 218 214 219 215 htvec_priv = priv; 220 216 221 - register_syscore_ops(&htvec_syscore_ops); 217 + register_syscore(&htvec_syscore); 222 218 223 219 return 0; 224 220
+8 -4
drivers/irqchip/irq-loongson-pch-lpc.c
··· 151 151 (readl(priv->base + LPC_INT_STS) == 0xffffffff); 152 152 } 153 153 154 - static int pch_lpc_suspend(void) 154 + static int pch_lpc_suspend(void *data) 155 155 { 156 156 pch_lpc_priv->saved_reg_ctl = readl(pch_lpc_priv->base + LPC_INT_CTL); 157 157 pch_lpc_priv->saved_reg_ena = readl(pch_lpc_priv->base + LPC_INT_ENA); ··· 159 159 return 0; 160 160 } 161 161 162 - static void pch_lpc_resume(void) 162 + static void pch_lpc_resume(void *data) 163 163 { 164 164 writel(pch_lpc_priv->saved_reg_ctl, pch_lpc_priv->base + LPC_INT_CTL); 165 165 writel(pch_lpc_priv->saved_reg_ena, pch_lpc_priv->base + LPC_INT_ENA); 166 166 writel(pch_lpc_priv->saved_reg_pol, pch_lpc_priv->base + LPC_INT_POL); 167 167 } 168 168 169 - static struct syscore_ops pch_lpc_syscore_ops = { 169 + static const struct syscore_ops pch_lpc_syscore_ops = { 170 170 .suspend = pch_lpc_suspend, 171 171 .resume = pch_lpc_resume, 172 + }; 173 + 174 + static struct syscore pch_lpc_syscore = { 175 + .ops = &pch_lpc_syscore_ops, 172 176 }; 173 177 174 178 int __init pch_lpc_acpi_init(struct irq_domain *parent, ··· 226 222 227 223 pch_lpc_priv = priv; 228 224 pch_lpc_handle = irq_handle; 229 - register_syscore_ops(&pch_lpc_syscore_ops); 225 + register_syscore(&pch_lpc_syscore); 230 226 231 227 return 0; 232 228
+8 -4
drivers/irqchip/irq-loongson-pch-pic.c
··· 278 278 } 279 279 } 280 280 281 - static int pch_pic_suspend(void) 281 + static int pch_pic_suspend(void *data) 282 282 { 283 283 int i, j; 284 284 ··· 296 296 return 0; 297 297 } 298 298 299 - static void pch_pic_resume(void) 299 + static void pch_pic_resume(void *data) 300 300 { 301 301 int i, j; 302 302 ··· 313 313 } 314 314 } 315 315 316 - static struct syscore_ops pch_pic_syscore_ops = { 316 + static const struct syscore_ops pch_pic_syscore_ops = { 317 317 .suspend = pch_pic_suspend, 318 318 .resume = pch_pic_resume, 319 + }; 320 + 321 + static struct syscore pch_pic_syscore = { 322 + .ops = &pch_pic_syscore_ops, 319 323 }; 320 324 321 325 static int pch_pic_init(phys_addr_t addr, unsigned long size, int vec_base, ··· 360 356 pch_pic_priv[nr_pics++] = priv; 361 357 362 358 if (nr_pics == 1) 363 - register_syscore_ops(&pch_pic_syscore_ops); 359 + register_syscore(&pch_pic_syscore); 364 360 365 361 return 0; 366 362
+8 -4
drivers/irqchip/irq-mchp-eic.c
··· 109 109 return 0; 110 110 } 111 111 112 - static int mchp_eic_irq_suspend(void) 112 + static int mchp_eic_irq_suspend(void *data) 113 113 { 114 114 unsigned int hwirq; 115 115 ··· 123 123 return 0; 124 124 } 125 125 126 - static void mchp_eic_irq_resume(void) 126 + static void mchp_eic_irq_resume(void *data) 127 127 { 128 128 unsigned int hwirq; 129 129 ··· 135 135 MCHP_EIC_SCFG(hwirq)); 136 136 } 137 137 138 - static struct syscore_ops mchp_eic_syscore_ops = { 138 + static const struct syscore_ops mchp_eic_syscore_ops = { 139 139 .suspend = mchp_eic_irq_suspend, 140 140 .resume = mchp_eic_irq_resume, 141 + }; 142 + 143 + static struct syscore mchp_eic_syscore = { 144 + .ops = &mchp_eic_syscore_ops, 141 145 }; 142 146 143 147 static struct irq_chip mchp_eic_chip = { ··· 262 258 goto clk_unprepare; 263 259 } 264 260 265 - register_syscore_ops(&mchp_eic_syscore_ops); 261 + register_syscore(&mchp_eic_syscore); 266 262 267 263 pr_info("%pOF: EIC registered, nr_irqs %u\n", node, MCHP_EIC_NIRQ); 268 264
+8 -4
drivers/irqchip/irq-mst-intc.c
··· 143 143 writew_relaxed(cd->saved_polarity_conf[i], addr + i * 4); 144 144 } 145 145 146 - static void mst_irq_resume(void) 146 + static void mst_irq_resume(void *data) 147 147 { 148 148 struct mst_intc_chip_data *cd; 149 149 ··· 151 151 mst_intc_polarity_restore(cd); 152 152 } 153 153 154 - static int mst_irq_suspend(void) 154 + static int mst_irq_suspend(void *data) 155 155 { 156 156 struct mst_intc_chip_data *cd; 157 157 ··· 160 160 return 0; 161 161 } 162 162 163 - static struct syscore_ops mst_irq_syscore_ops = { 163 + static const struct syscore_ops mst_irq_syscore_ops = { 164 164 .suspend = mst_irq_suspend, 165 165 .resume = mst_irq_resume, 166 166 }; 167 167 168 + static struct syscore mst_irq_syscore = { 169 + .ops = &mst_irq_syscore_ops, 170 + }; 171 + 168 172 static int __init mst_irq_pm_init(void) 169 173 { 170 - register_syscore_ops(&mst_irq_syscore_ops); 174 + register_syscore(&mst_irq_syscore); 171 175 return 0; 172 176 } 173 177 late_initcall(mst_irq_pm_init);
+8 -4
drivers/irqchip/irq-mtk-cirq.c
··· 199 199 }; 200 200 201 201 #ifdef CONFIG_PM_SLEEP 202 - static int mtk_cirq_suspend(void) 202 + static int mtk_cirq_suspend(void *data) 203 203 { 204 204 void __iomem *reg; 205 205 u32 value, mask; ··· 257 257 return 0; 258 258 } 259 259 260 - static void mtk_cirq_resume(void) 260 + static void mtk_cirq_resume(void *data) 261 261 { 262 262 void __iomem *reg = mtk_cirq_reg(cirq_data, CIRQ_CONTROL); 263 263 u32 value; ··· 272 272 writel_relaxed(value, reg); 273 273 } 274 274 275 - static struct syscore_ops mtk_cirq_syscore_ops = { 275 + static const struct syscore_ops mtk_cirq_syscore_ops = { 276 276 .suspend = mtk_cirq_suspend, 277 277 .resume = mtk_cirq_resume, 278 278 }; 279 279 280 + static struct syscore mtk_cirq_syscore = { 281 + .ops = &mtk_cirq_syscore_ops, 282 + }; 283 + 280 284 static void mtk_cirq_syscore_init(void) 281 285 { 282 - register_syscore_ops(&mtk_cirq_syscore_ops); 286 + register_syscore(&mtk_cirq_syscore); 283 287 } 284 288 #else 285 289 static inline void mtk_cirq_syscore_init(void) {}
+8 -4
drivers/irqchip/irq-renesas-rzg2l.c
··· 398 398 return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH); 399 399 } 400 400 401 - static int rzg2l_irqc_irq_suspend(void) 401 + static int rzg2l_irqc_irq_suspend(void *data) 402 402 { 403 403 struct rzg2l_irqc_reg_cache *cache = &rzg2l_irqc_data->cache; 404 404 void __iomem *base = rzg2l_irqc_data->base; ··· 410 410 return 0; 411 411 } 412 412 413 - static void rzg2l_irqc_irq_resume(void) 413 + static void rzg2l_irqc_irq_resume(void *data) 414 414 { 415 415 struct rzg2l_irqc_reg_cache *cache = &rzg2l_irqc_data->cache; 416 416 void __iomem *base = rzg2l_irqc_data->base; ··· 425 425 writel_relaxed(cache->iitsr, base + IITSR); 426 426 } 427 427 428 - static struct syscore_ops rzg2l_irqc_syscore_ops = { 428 + static const struct syscore_ops rzg2l_irqc_syscore_ops = { 429 429 .suspend = rzg2l_irqc_irq_suspend, 430 430 .resume = rzg2l_irqc_irq_resume, 431 + }; 432 + 433 + static struct syscore rzg2l_irqc_syscore = { 434 + .ops = &rzg2l_irqc_syscore_ops, 431 435 }; 432 436 433 437 static const struct irq_chip rzg2l_irqc_chip = { ··· 581 577 return -ENOMEM; 582 578 } 583 579 584 - register_syscore_ops(&rzg2l_irqc_syscore_ops); 580 + register_syscore(&rzg2l_irqc_syscore); 585 581 586 582 return 0; 587 583 }
+8 -4
drivers/irqchip/irq-sa11x0.c
··· 85 85 unsigned int iccr; 86 86 } sa1100irq_state; 87 87 88 - static int sa1100irq_suspend(void) 88 + static int sa1100irq_suspend(void *data) 89 89 { 90 90 struct sa1100irq_state *st = &sa1100irq_state; 91 91 ··· 102 102 return 0; 103 103 } 104 104 105 - static void sa1100irq_resume(void) 105 + static void sa1100irq_resume(void *data) 106 106 { 107 107 struct sa1100irq_state *st = &sa1100irq_state; 108 108 ··· 114 114 } 115 115 } 116 116 117 - static struct syscore_ops sa1100irq_syscore_ops = { 117 + static const struct syscore_ops sa1100irq_syscore_ops = { 118 118 .suspend = sa1100irq_suspend, 119 119 .resume = sa1100irq_resume, 120 120 }; 121 121 122 + static struct syscore sa1100irq_syscore = { 123 + .ops = &sa1100irq_syscore_ops, 124 + }; 125 + 122 126 static int __init sa1100irq_init_devicefs(void) 123 127 { 124 - register_syscore_ops(&sa1100irq_syscore_ops); 128 + register_syscore(&sa1100irq_syscore); 125 129 return 0; 126 130 } 127 131
+8 -4
drivers/irqchip/irq-sifive-plic.c
··· 255 255 return IRQ_SET_MASK_OK; 256 256 } 257 257 258 - static int plic_irq_suspend(void) 258 + static int plic_irq_suspend(void *data) 259 259 { 260 260 struct plic_priv *priv; 261 261 ··· 270 270 return 0; 271 271 } 272 272 273 - static void plic_irq_resume(void) 273 + static void plic_irq_resume(void *data) 274 274 { 275 275 unsigned int i, index, cpu; 276 276 unsigned long flags; ··· 301 301 } 302 302 } 303 303 304 - static struct syscore_ops plic_irq_syscore_ops = { 304 + static const struct syscore_ops plic_irq_syscore_ops = { 305 305 .suspend = plic_irq_suspend, 306 306 .resume = plic_irq_resume, 307 + }; 308 + 309 + static struct syscore plic_irq_syscore = { 310 + .ops = &plic_irq_syscore_ops, 307 311 }; 308 312 309 313 static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq, ··· 773 769 cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, 774 770 "irqchip/sifive/plic:starting", 775 771 plic_starting_cpu, plic_dying_cpu); 776 - register_syscore_ops(&plic_irq_syscore_ops); 772 + register_syscore(&plic_irq_syscore); 777 773 plic_global_setup_done = true; 778 774 } 779 775 }
+11 -7
drivers/irqchip/irq-sun6i-r.c
··· 268 268 .free = irq_domain_free_irqs_common, 269 269 }; 270 270 271 - static int sun6i_r_intc_suspend(void) 271 + static int sun6i_r_intc_suspend(void *data) 272 272 { 273 273 u32 buf[BITS_TO_U32(MAX(SUN6I_NR_TOP_LEVEL_IRQS, SUN6I_NR_MUX_BITS))]; 274 274 int i; ··· 284 284 return 0; 285 285 } 286 286 287 - static void sun6i_r_intc_resume(void) 287 + static void sun6i_r_intc_resume(void *data) 288 288 { 289 289 int i; 290 290 ··· 294 294 writel_relaxed(0, base + SUN6I_IRQ_ENABLE(i)); 295 295 } 296 296 297 - static void sun6i_r_intc_shutdown(void) 297 + static void sun6i_r_intc_shutdown(void *data) 298 298 { 299 - sun6i_r_intc_suspend(); 299 + sun6i_r_intc_suspend(data); 300 300 } 301 301 302 - static struct syscore_ops sun6i_r_intc_syscore_ops = { 302 + static const struct syscore_ops sun6i_r_intc_syscore_ops = { 303 303 .suspend = sun6i_r_intc_suspend, 304 304 .resume = sun6i_r_intc_resume, 305 305 .shutdown = sun6i_r_intc_shutdown, 306 + }; 307 + 308 + static struct syscore sun6i_r_intc_syscore = { 309 + .ops = &sun6i_r_intc_syscore_ops, 306 310 }; 307 311 308 312 static int __init sun6i_r_intc_init(struct device_node *node, ··· 350 346 return -ENOMEM; 351 347 } 352 348 353 - register_syscore_ops(&sun6i_r_intc_syscore_ops); 349 + register_syscore(&sun6i_r_intc_syscore); 354 350 355 351 sun6i_r_intc_ack_nmi(); 356 - sun6i_r_intc_resume(); 352 + sun6i_r_intc_resume(NULL); 357 353 358 354 return 0; 359 355 }
+8 -4
drivers/irqchip/irq-tegra.c
··· 132 132 return 0; 133 133 } 134 134 135 - static int tegra_ictlr_suspend(void) 135 + static int tegra_ictlr_suspend(void *data) 136 136 { 137 137 unsigned long flags; 138 138 unsigned int i; ··· 161 161 return 0; 162 162 } 163 163 164 - static void tegra_ictlr_resume(void) 164 + static void tegra_ictlr_resume(void *data) 165 165 { 166 166 unsigned long flags; 167 167 unsigned int i; ··· 184 184 local_irq_restore(flags); 185 185 } 186 186 187 - static struct syscore_ops tegra_ictlr_syscore_ops = { 187 + static const struct syscore_ops tegra_ictlr_syscore_ops = { 188 188 .suspend = tegra_ictlr_suspend, 189 189 .resume = tegra_ictlr_resume, 190 190 }; 191 191 192 + static struct syscore tegra_ictlr_syscore = { 193 + .ops = &tegra_ictlr_syscore_ops, 194 + }; 195 + 192 196 static void tegra_ictlr_syscore_init(void) 193 197 { 194 - register_syscore_ops(&tegra_ictlr_syscore_ops); 198 + register_syscore(&tegra_ictlr_syscore); 195 199 } 196 200 #else 197 201 #define tegra_set_wake NULL
+8 -4
drivers/irqchip/irq-vic.c
··· 120 120 writel(~vic->soft_int, base + VIC_INT_SOFT_CLEAR); 121 121 } 122 122 123 - static void vic_resume(void) 123 + static void vic_resume(void *data) 124 124 { 125 125 int id; 126 126 ··· 146 146 writel(~vic->resume_irqs, base + VIC_INT_ENABLE_CLEAR); 147 147 } 148 148 149 - static int vic_suspend(void) 149 + static int vic_suspend(void *data) 150 150 { 151 151 int id; 152 152 ··· 156 156 return 0; 157 157 } 158 158 159 - static struct syscore_ops vic_syscore_ops = { 159 + static const struct syscore_ops vic_syscore_ops = { 160 160 .suspend = vic_suspend, 161 161 .resume = vic_resume, 162 + }; 163 + 164 + static struct syscore vic_syscore = { 165 + .ops = &vic_syscore_ops, 162 166 }; 163 167 164 168 /** ··· 175 171 static int __init vic_pm_init(void) 176 172 { 177 173 if (vic_id > 0) 178 - register_syscore_ops(&vic_syscore_ops); 174 + register_syscore(&vic_syscore); 179 175 180 176 return 0; 181 177 }
+9 -5
drivers/leds/trigger/ledtrig-cpu.c
··· 94 94 } 95 95 EXPORT_SYMBOL(ledtrig_cpu); 96 96 97 - static int ledtrig_cpu_syscore_suspend(void) 97 + static int ledtrig_cpu_syscore_suspend(void *data) 98 98 { 99 99 ledtrig_cpu(CPU_LED_STOP); 100 100 return 0; 101 101 } 102 102 103 - static void ledtrig_cpu_syscore_resume(void) 103 + static void ledtrig_cpu_syscore_resume(void *data) 104 104 { 105 105 ledtrig_cpu(CPU_LED_START); 106 106 } 107 107 108 - static void ledtrig_cpu_syscore_shutdown(void) 108 + static void ledtrig_cpu_syscore_shutdown(void *data) 109 109 { 110 110 ledtrig_cpu(CPU_LED_HALTED); 111 111 } 112 112 113 - static struct syscore_ops ledtrig_cpu_syscore_ops = { 113 + static const struct syscore_ops ledtrig_cpu_syscore_ops = { 114 114 .shutdown = ledtrig_cpu_syscore_shutdown, 115 115 .suspend = ledtrig_cpu_syscore_suspend, 116 116 .resume = ledtrig_cpu_syscore_resume, 117 + }; 118 + 119 + static struct syscore ledtrig_cpu_syscore = { 120 + .ops = &ledtrig_cpu_syscore_ops, 117 121 }; 118 122 119 123 static int ledtrig_online_cpu(unsigned int cpu) ··· 161 157 led_trigger_register_simple(trig->name, &trig->_trig); 162 158 } 163 159 164 - register_syscore_ops(&ledtrig_cpu_syscore_ops); 160 + register_syscore(&ledtrig_cpu_syscore); 165 161 166 162 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "leds/trigger:starting", 167 163 ledtrig_online_cpu, ledtrig_prepare_down_cpu);
+8 -4
drivers/macintosh/via-pmu.c
··· 2600 2600 #if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32) 2601 2601 int pmu_sys_suspended; 2602 2602 2603 - static int pmu_syscore_suspend(void) 2603 + static int pmu_syscore_suspend(void *data) 2604 2604 { 2605 2605 /* Suspend PMU event interrupts */ 2606 2606 pmu_suspend(); ··· 2614 2614 return 0; 2615 2615 } 2616 2616 2617 - static void pmu_syscore_resume(void) 2617 + static void pmu_syscore_resume(void *data) 2618 2618 { 2619 2619 struct adb_request req; 2620 2620 ··· 2634 2634 pmu_sys_suspended = 0; 2635 2635 } 2636 2636 2637 - static struct syscore_ops pmu_syscore_ops = { 2637 + static const struct syscore_ops pmu_syscore_ops = { 2638 2638 .suspend = pmu_syscore_suspend, 2639 2639 .resume = pmu_syscore_resume, 2640 2640 }; 2641 2641 2642 + static struct syscore pmu_syscore = { 2643 + .ops = &pmu_syscore_ops, 2644 + }; 2645 + 2642 2646 static int pmu_syscore_register(void) 2643 2647 { 2644 - register_syscore_ops(&pmu_syscore_ops); 2648 + register_syscore(&pmu_syscore); 2645 2649 2646 2650 return 0; 2647 2651 }
+44 -12
drivers/memory/renesas-rpc-if.c
··· 67 67 void __iomem *dirmap; 68 68 struct regmap *regmap; 69 69 struct reset_control *rstc; 70 + struct clk *spi_clk; 71 + struct clk *spix2_clk; 70 72 struct platform_device *vdev; 71 73 size_t size; 72 74 const struct rpcif_info *info; ··· 1026 1024 * flash write failure. So, enable these clocks during probe() and 1027 1025 * disable it in remove(). 1028 1026 */ 1029 - if (rpc->info->type == XSPI_RZ_G3E) { 1030 - struct clk *spi_clk; 1027 + rpc->spix2_clk = devm_clk_get_optional_enabled(dev, "spix2"); 1028 + if (IS_ERR(rpc->spix2_clk)) 1029 + return dev_err_probe(dev, PTR_ERR(rpc->spix2_clk), 1030 + "cannot get enabled spix2 clk\n"); 1031 1031 1032 - spi_clk = devm_clk_get_enabled(dev, "spix2"); 1033 - if (IS_ERR(spi_clk)) 1034 - return dev_err_probe(dev, PTR_ERR(spi_clk), 1035 - "cannot get enabled spix2 clk\n"); 1036 - 1037 - spi_clk = devm_clk_get_enabled(dev, "spi"); 1038 - if (IS_ERR(spi_clk)) 1039 - return dev_err_probe(dev, PTR_ERR(spi_clk), 1040 - "cannot get enabled spi clk\n"); 1041 - } 1032 + rpc->spi_clk = devm_clk_get_optional_enabled(dev, "spi"); 1033 + if (IS_ERR(rpc->spi_clk)) 1034 + return dev_err_probe(dev, PTR_ERR(rpc->spi_clk), 1035 + "cannot get enabled spi clk\n"); 1042 1036 1043 1037 vdev = platform_device_alloc(name, pdev->id); 1044 1038 if (!vdev) ··· 1059 1061 struct rpcif_priv *rpc = platform_get_drvdata(pdev); 1060 1062 1061 1063 platform_device_unregister(rpc->vdev); 1064 + } 1065 + 1066 + static int rpcif_suspend(struct device *dev) 1067 + { 1068 + struct rpcif_priv *rpc = dev_get_drvdata(dev); 1069 + 1070 + clk_disable_unprepare(rpc->spi_clk); 1071 + clk_disable_unprepare(rpc->spix2_clk); 1072 + 1073 + return 0; 1074 + } 1075 + 1076 + static int rpcif_resume(struct device *dev) 1077 + { 1078 + struct rpcif_priv *rpc = dev_get_drvdata(dev); 1079 + int ret; 1080 + 1081 + ret = clk_prepare_enable(rpc->spix2_clk); 1082 + if (ret) { 1083 + dev_err(dev, "failed to enable spix2 clock: %pe\n", ERR_PTR(ret)); 1084 + return ret; 1085 + } 1086 + 1087 + ret = clk_prepare_enable(rpc->spi_clk); 1088 + if (ret) { 1089 + clk_disable_unprepare(rpc->spix2_clk); 1090 + dev_err(dev, "failed to enable spi clock: %pe\n", ERR_PTR(ret)); 1091 + return ret; 1092 + } 1093 + 1094 + return 0; 1062 1095 } 1063 1096 1064 1097 static const struct rpcif_impl rpcif_impl = { ··· 1154 1125 }; 1155 1126 MODULE_DEVICE_TABLE(of, rpcif_of_match); 1156 1127 1128 + static DEFINE_SIMPLE_DEV_PM_OPS(rpcif_pm_ops, rpcif_suspend, rpcif_resume); 1129 + 1157 1130 static struct platform_driver rpcif_driver = { 1158 1131 .probe = rpcif_probe, 1159 1132 .remove = rpcif_remove, 1160 1133 .driver = { 1161 1134 .name = "rpc-if", 1162 1135 .of_match_table = rpcif_of_match, 1136 + .pm = pm_sleep_ptr(&rpcif_pm_ops), 1163 1137 }, 1164 1138 }; 1165 1139 module_platform_driver(rpcif_driver);
+64 -76
drivers/memory/tegra/tegra124-emc.c
··· 571 571 dev_err(emc->dev, "clock change timed out\n"); 572 572 } 573 573 574 - static struct emc_timing *tegra_emc_find_timing(struct tegra_emc *emc, 575 - unsigned long rate) 574 + static struct emc_timing *tegra124_emc_find_timing(struct tegra_emc *emc, 575 + unsigned long rate) 576 576 { 577 577 struct emc_timing *timing = NULL; 578 578 unsigned int i; ··· 592 592 return timing; 593 593 } 594 594 595 - static int tegra_emc_prepare_timing_change(struct tegra_emc *emc, 596 - unsigned long rate) 595 + static int tegra124_emc_prepare_timing_change(struct tegra_emc *emc, 596 + unsigned long rate) 597 597 { 598 - struct emc_timing *timing = tegra_emc_find_timing(emc, rate); 598 + struct emc_timing *timing = tegra124_emc_find_timing(emc, rate); 599 599 struct emc_timing *last = &emc->last_timing; 600 600 enum emc_dll_change dll_change; 601 601 unsigned int pre_wait = 0; ··· 820 820 return 0; 821 821 } 822 822 823 - static void tegra_emc_complete_timing_change(struct tegra_emc *emc, 824 - unsigned long rate) 823 + static void tegra124_emc_complete_timing_change(struct tegra_emc *emc, 824 + unsigned long rate) 825 825 { 826 - struct emc_timing *timing = tegra_emc_find_timing(emc, rate); 826 + struct emc_timing *timing = tegra124_emc_find_timing(emc, rate); 827 827 struct emc_timing *last = &emc->last_timing; 828 828 u32 val; 829 829 ··· 896 896 timing->emc_mode_reset = 0; 897 897 } 898 898 899 - static int emc_init(struct tegra_emc *emc) 899 + static void emc_init(struct tegra_emc *emc) 900 900 { 901 901 emc->dram_type = readl(emc->regs + EMC_FBIO_CFG5); 902 902 ··· 913 913 emc->dram_num = tegra_mc_get_emem_device_count(emc->mc); 914 914 915 915 emc_read_current_timing(emc, &emc->last_timing); 916 - 917 - return 0; 918 916 } 919 917 920 918 static int load_one_timing_from_dt(struct tegra_emc *emc, ··· 986 988 return 1; 987 989 } 988 990 989 - static int tegra_emc_load_timings_from_dt(struct tegra_emc *emc, 990 - struct device_node *node) 991 + static int tegra124_emc_load_timings_from_dt(struct tegra_emc *emc, 992 + struct device_node *node) 991 993 { 992 994 int child_count = of_get_child_count(node); 993 995 struct emc_timing *timing; ··· 1015 1017 return 0; 1016 1018 } 1017 1019 1018 - static const struct of_device_id tegra_emc_of_match[] = { 1020 + static const struct of_device_id tegra124_emc_of_match[] = { 1019 1021 { .compatible = "nvidia,tegra124-emc" }, 1020 1022 { .compatible = "nvidia,tegra132-emc" }, 1021 1023 {} 1022 1024 }; 1023 - MODULE_DEVICE_TABLE(of, tegra_emc_of_match); 1025 + MODULE_DEVICE_TABLE(of, tegra124_emc_of_match); 1024 1026 1025 1027 static struct device_node * 1026 - tegra_emc_find_node_by_ram_code(struct device_node *node, u32 ram_code) 1028 + tegra124_emc_find_node_by_ram_code(struct device_node *node, u32 ram_code) 1027 1029 { 1028 1030 struct device_node *np; 1029 1031 int err; ··· 1041 1043 return NULL; 1042 1044 } 1043 1045 1044 - static void tegra_emc_rate_requests_init(struct tegra_emc *emc) 1046 + static void tegra124_emc_rate_requests_init(struct tegra_emc *emc) 1045 1047 { 1046 1048 unsigned int i; 1047 1049 ··· 1143 1145 * valid range. 1144 1146 */ 1145 1147 1146 - static bool tegra_emc_validate_rate(struct tegra_emc *emc, unsigned long rate) 1148 + static bool tegra124_emc_validate_rate(struct tegra_emc *emc, unsigned long rate) 1147 1149 { 1148 1150 unsigned int i; 1149 1151 ··· 1154 1156 return false; 1155 1157 } 1156 1158 1157 - static int tegra_emc_debug_available_rates_show(struct seq_file *s, 1158 - void *data) 1159 + static int tegra124_emc_debug_available_rates_show(struct seq_file *s, 1160 + void *data) 1159 1161 { 1160 1162 struct tegra_emc *emc = s->private; 1161 1163 const char *prefix = ""; ··· 1171 1173 return 0; 1172 1174 } 1173 1175 1174 - DEFINE_SHOW_ATTRIBUTE(tegra_emc_debug_available_rates); 1176 + DEFINE_SHOW_ATTRIBUTE(tegra124_emc_debug_available_rates); 1175 1177 1176 - static int tegra_emc_debug_min_rate_get(void *data, u64 *rate) 1178 + static int tegra124_emc_debug_min_rate_get(void *data, u64 *rate) 1177 1179 { 1178 1180 struct tegra_emc *emc = data; 1179 1181 ··· 1182 1184 return 0; 1183 1185 } 1184 1186 1185 - static int tegra_emc_debug_min_rate_set(void *data, u64 rate) 1187 + static int tegra124_emc_debug_min_rate_set(void *data, u64 rate) 1186 1188 { 1187 1189 struct tegra_emc *emc = data; 1188 1190 int err; 1189 1191 1190 - if (!tegra_emc_validate_rate(emc, rate)) 1192 + if (!tegra124_emc_validate_rate(emc, rate)) 1191 1193 return -EINVAL; 1192 1194 1193 1195 err = emc_set_min_rate(emc, rate, EMC_RATE_DEBUG); ··· 1199 1201 return 0; 1200 1202 } 1201 1203 1202 - DEFINE_DEBUGFS_ATTRIBUTE(tegra_emc_debug_min_rate_fops, 1203 - tegra_emc_debug_min_rate_get, 1204 - tegra_emc_debug_min_rate_set, "%llu\n"); 1204 + DEFINE_DEBUGFS_ATTRIBUTE(tegra124_emc_debug_min_rate_fops, 1205 + tegra124_emc_debug_min_rate_get, 1206 + tegra124_emc_debug_min_rate_set, "%llu\n"); 1205 1207 1206 - static int tegra_emc_debug_max_rate_get(void *data, u64 *rate) 1208 + static int tegra124_emc_debug_max_rate_get(void *data, u64 *rate) 1207 1209 { 1208 1210 struct tegra_emc *emc = data; 1209 1211 ··· 1212 1214 return 0; 1213 1215 } 1214 1216 1215 - static int tegra_emc_debug_max_rate_set(void *data, u64 rate) 1217 + static int tegra124_emc_debug_max_rate_set(void *data, u64 rate) 1216 1218 { 1217 1219 struct tegra_emc *emc = data; 1218 1220 int err; 1219 1221 1220 - if (!tegra_emc_validate_rate(emc, rate)) 1222 + if (!tegra124_emc_validate_rate(emc, rate)) 1221 1223 return -EINVAL; 1222 1224 1223 1225 err = emc_set_max_rate(emc, rate, EMC_RATE_DEBUG); ··· 1229 1231 return 0; 1230 1232 } 1231 1233 1232 - DEFINE_DEBUGFS_ATTRIBUTE(tegra_emc_debug_max_rate_fops, 1233 - tegra_emc_debug_max_rate_get, 1234 - tegra_emc_debug_max_rate_set, "%llu\n"); 1234 + DEFINE_DEBUGFS_ATTRIBUTE(tegra124_emc_debug_max_rate_fops, 1235 + tegra124_emc_debug_max_rate_get, 1236 + tegra124_emc_debug_max_rate_set, "%llu\n"); 1235 1237 1236 1238 static void emc_debugfs_init(struct device *dev, struct tegra_emc *emc) 1237 1239 { ··· 1266 1268 emc->debugfs.root = debugfs_create_dir("emc", NULL); 1267 1269 1268 1270 debugfs_create_file("available_rates", 0444, emc->debugfs.root, emc, 1269 - &tegra_emc_debug_available_rates_fops); 1271 + &tegra124_emc_debug_available_rates_fops); 1270 1272 debugfs_create_file("min_rate", 0644, emc->debugfs.root, 1271 - emc, &tegra_emc_debug_min_rate_fops); 1273 + emc, &tegra124_emc_debug_min_rate_fops); 1272 1274 debugfs_create_file("max_rate", 0644, emc->debugfs.root, 1273 - emc, &tegra_emc_debug_max_rate_fops); 1275 + emc, &tegra124_emc_debug_max_rate_fops); 1274 1276 } 1275 1277 1276 1278 static inline struct tegra_emc * ··· 1334 1336 return 0; 1335 1337 } 1336 1338 1337 - static int tegra_emc_interconnect_init(struct tegra_emc *emc) 1339 + static int tegra124_emc_interconnect_init(struct tegra_emc *emc) 1338 1340 { 1339 1341 const struct tegra_mc_soc *soc = emc->mc->soc; 1340 1342 struct icc_node *node; ··· 1350 1352 1351 1353 /* create External Memory Controller node */ 1352 1354 node = icc_node_create(TEGRA_ICC_EMC); 1353 - if (IS_ERR(node)) { 1354 - err = PTR_ERR(node); 1355 - goto err_msg; 1356 - } 1355 + if (IS_ERR(node)) 1356 + return PTR_ERR(node); 1357 1357 1358 1358 node->name = "External Memory Controller"; 1359 1359 icc_node_add(node, &emc->provider); ··· 1379 1383 1380 1384 remove_nodes: 1381 1385 icc_nodes_remove(&emc->provider); 1382 - err_msg: 1383 - dev_err(emc->dev, "failed to initialize ICC: %d\n", err); 1384 1386 1385 - return err; 1387 + return dev_err_probe(emc->dev, err, "failed to initialize ICC\n"); 1386 1388 } 1387 1389 1388 - static int tegra_emc_opp_table_init(struct tegra_emc *emc) 1390 + static int tegra124_emc_opp_table_init(struct tegra_emc *emc) 1389 1391 { 1390 1392 u32 hw_version = BIT(tegra_sku_info.soc_speedo_id); 1391 1393 int opp_token, err; 1392 1394 1393 1395 err = dev_pm_opp_set_supported_hw(emc->dev, &hw_version, 1); 1394 - if (err < 0) { 1395 - dev_err(emc->dev, "failed to set OPP supported HW: %d\n", err); 1396 - return err; 1397 - } 1396 + if (err < 0) 1397 + return dev_err_probe(emc->dev, err, "failed to set OPP supported HW\n"); 1398 + 1398 1399 opp_token = err; 1399 1400 1400 1401 err = dev_pm_opp_of_add_table(emc->dev); 1401 1402 if (err) { 1402 1403 if (err == -ENODEV) 1403 - dev_err(emc->dev, "OPP table not found, please update your device tree\n"); 1404 + dev_err_probe(emc->dev, err, 1405 + "OPP table not found, please update your device tree\n"); 1404 1406 else 1405 - dev_err(emc->dev, "failed to add OPP table: %d\n", err); 1407 + dev_err_probe(emc->dev, err, "failed to add OPP table\n"); 1406 1408 1407 1409 goto put_hw_table; 1408 1410 } ··· 1411 1417 /* first dummy rate-set initializes voltage state */ 1412 1418 err = dev_pm_opp_set_rate(emc->dev, clk_get_rate(emc->clk)); 1413 1419 if (err) { 1414 - dev_err(emc->dev, "failed to initialize OPP clock: %d\n", err); 1420 + dev_err_probe(emc->dev, err, "failed to initialize OPP clock\n"); 1415 1421 goto remove_table; 1416 1422 } 1417 1423 ··· 1425 1431 return err; 1426 1432 } 1427 1433 1428 - static void devm_tegra_emc_unset_callback(void *data) 1434 + static void devm_tegra124_emc_unset_callback(void *data) 1429 1435 { 1430 1436 tegra124_clk_set_emc_callbacks(NULL, NULL); 1431 1437 } 1432 1438 1433 - static int tegra_emc_probe(struct platform_device *pdev) 1439 + static int tegra124_emc_probe(struct platform_device *pdev) 1434 1440 { 1435 1441 struct device_node *np; 1436 1442 struct tegra_emc *emc; ··· 1454 1460 1455 1461 ram_code = tegra_read_ram_code(); 1456 1462 1457 - np = tegra_emc_find_node_by_ram_code(pdev->dev.of_node, ram_code); 1463 + np = tegra124_emc_find_node_by_ram_code(pdev->dev.of_node, ram_code); 1458 1464 if (np) { 1459 - err = tegra_emc_load_timings_from_dt(emc, np); 1465 + err = tegra124_emc_load_timings_from_dt(emc, np); 1460 1466 of_node_put(np); 1461 1467 if (err) 1462 1468 return err; ··· 1466 1472 ram_code); 1467 1473 } 1468 1474 1469 - err = emc_init(emc); 1470 - if (err) { 1471 - dev_err(&pdev->dev, "EMC initialization failed: %d\n", err); 1472 - return err; 1473 - } 1475 + emc_init(emc); 1474 1476 1475 1477 platform_set_drvdata(pdev, emc); 1476 1478 1477 - tegra124_clk_set_emc_callbacks(tegra_emc_prepare_timing_change, 1478 - tegra_emc_complete_timing_change); 1479 + tegra124_clk_set_emc_callbacks(tegra124_emc_prepare_timing_change, 1480 + tegra124_emc_complete_timing_change); 1479 1481 1480 - err = devm_add_action_or_reset(&pdev->dev, devm_tegra_emc_unset_callback, 1482 + err = devm_add_action_or_reset(&pdev->dev, devm_tegra124_emc_unset_callback, 1481 1483 NULL); 1482 1484 if (err) 1483 1485 return err; 1484 1486 1485 1487 emc->clk = devm_clk_get(&pdev->dev, "emc"); 1486 - if (IS_ERR(emc->clk)) { 1487 - err = PTR_ERR(emc->clk); 1488 - dev_err(&pdev->dev, "failed to get EMC clock: %d\n", err); 1489 - return err; 1490 - } 1488 + if (IS_ERR(emc->clk)) 1489 + return dev_err_probe(&pdev->dev, PTR_ERR(emc->clk), 1490 + "failed to get EMC clock\n"); 1491 1491 1492 - err = tegra_emc_opp_table_init(emc); 1492 + err = tegra124_emc_opp_table_init(emc); 1493 1493 if (err) 1494 1494 return err; 1495 1495 1496 - tegra_emc_rate_requests_init(emc); 1496 + tegra124_emc_rate_requests_init(emc); 1497 1497 1498 1498 if (IS_ENABLED(CONFIG_DEBUG_FS)) 1499 1499 emc_debugfs_init(&pdev->dev, emc); 1500 1500 1501 - tegra_emc_interconnect_init(emc); 1501 + tegra124_emc_interconnect_init(emc); 1502 1502 1503 1503 /* 1504 1504 * Don't allow the kernel module to be unloaded. Unloading adds some ··· 1504 1516 return 0; 1505 1517 }; 1506 1518 1507 - static struct platform_driver tegra_emc_driver = { 1508 - .probe = tegra_emc_probe, 1519 + static struct platform_driver tegra124_emc_driver = { 1520 + .probe = tegra124_emc_probe, 1509 1521 .driver = { 1510 1522 .name = "tegra-emc", 1511 - .of_match_table = tegra_emc_of_match, 1523 + .of_match_table = tegra124_emc_of_match, 1512 1524 .suppress_bind_attrs = true, 1513 1525 .sync_state = icc_sync_state, 1514 1526 }, 1515 1527 }; 1516 - module_platform_driver(tegra_emc_driver); 1528 + module_platform_driver(tegra124_emc_driver); 1517 1529 1518 1530 MODULE_AUTHOR("Mikko Perttunen <mperttunen@nvidia.com>"); 1519 1531 MODULE_DESCRIPTION("NVIDIA Tegra124 EMC driver");
+16 -19
drivers/memory/tegra/tegra186-emc.c
··· 218 218 } 219 219 220 220 /* 221 - * tegra_emc_icc_set_bw() - Set BW api for EMC provider 221 + * tegra186_emc_icc_set_bw() - Set BW api for EMC provider 222 222 * @src: ICC node for External Memory Controller (EMC) 223 223 * @dst: ICC node for External Memory (DRAM) 224 224 * 225 225 * Do nothing here as info to BPMP-FW is now passed in the BW set function 226 226 * of the MC driver. BPMP-FW sets the final Freq based on the passed values. 227 227 */ 228 - static int tegra_emc_icc_set_bw(struct icc_node *src, struct icc_node *dst) 228 + static int tegra186_emc_icc_set_bw(struct icc_node *src, struct icc_node *dst) 229 229 { 230 230 return 0; 231 231 } 232 232 233 233 static struct icc_node * 234 - tegra_emc_of_icc_xlate(const struct of_phandle_args *spec, void *data) 234 + tegra186_emc_of_icc_xlate(const struct of_phandle_args *spec, void *data) 235 235 { 236 236 struct icc_provider *provider = data; 237 237 struct icc_node *node; ··· 247 247 return ERR_PTR(-EPROBE_DEFER); 248 248 } 249 249 250 - static int tegra_emc_icc_get_init_bw(struct icc_node *node, u32 *avg, u32 *peak) 250 + static int tegra186_emc_icc_get_init_bw(struct icc_node *node, u32 *avg, u32 *peak) 251 251 { 252 252 *avg = 0; 253 253 *peak = 0; ··· 255 255 return 0; 256 256 } 257 257 258 - static int tegra_emc_interconnect_init(struct tegra186_emc *emc) 258 + static int tegra186_emc_interconnect_init(struct tegra186_emc *emc) 259 259 { 260 260 struct tegra_mc *mc = dev_get_drvdata(emc->dev->parent); 261 261 const struct tegra_mc_soc *soc = mc->soc; ··· 263 263 int err; 264 264 265 265 emc->provider.dev = emc->dev; 266 - emc->provider.set = tegra_emc_icc_set_bw; 266 + emc->provider.set = tegra186_emc_icc_set_bw; 267 267 emc->provider.data = &emc->provider; 268 268 emc->provider.aggregate = soc->icc_ops->aggregate; 269 - emc->provider.xlate = tegra_emc_of_icc_xlate; 270 - emc->provider.get_bw = tegra_emc_icc_get_init_bw; 269 + emc->provider.xlate = tegra186_emc_of_icc_xlate; 270 + emc->provider.get_bw = tegra186_emc_icc_get_init_bw; 271 271 272 272 icc_provider_init(&emc->provider); 273 273 274 274 /* create External Memory Controller node */ 275 275 node = icc_node_create(TEGRA_ICC_EMC); 276 - if (IS_ERR(node)) { 277 - err = PTR_ERR(node); 278 - goto err_msg; 279 - } 276 + if (IS_ERR(node)) 277 + return PTR_ERR(node); 280 278 281 279 node->name = "External Memory Controller"; 282 280 icc_node_add(node, &emc->provider); ··· 302 304 303 305 remove_nodes: 304 306 icc_nodes_remove(&emc->provider); 305 - err_msg: 306 - dev_err(emc->dev, "failed to initialize ICC: %d\n", err); 307 307 308 - return err; 308 + return dev_err_probe(emc->dev, err, "failed to initialize ICC\n"); 309 309 } 310 310 311 311 static int tegra186_emc_probe(struct platform_device *pdev) ··· 318 322 319 323 emc->bpmp = tegra_bpmp_get(&pdev->dev); 320 324 if (IS_ERR(emc->bpmp)) 321 - return dev_err_probe(&pdev->dev, PTR_ERR(emc->bpmp), "failed to get BPMP\n"); 325 + return dev_err_probe(&pdev->dev, PTR_ERR(emc->bpmp), 326 + "failed to get BPMP\n"); 322 327 323 328 emc->clk = devm_clk_get(&pdev->dev, "emc"); 324 329 if (IS_ERR(emc->clk)) { 325 - err = PTR_ERR(emc->clk); 326 - dev_err(&pdev->dev, "failed to get EMC clock: %d\n", err); 330 + err = dev_err_probe(&pdev->dev, PTR_ERR(emc->clk), 331 + "failed to get EMC clock\n"); 327 332 goto put_bpmp; 328 333 } 329 334 ··· 356 359 * EINVAL instead of passing the request to BPMP-FW later when the BW 357 360 * request is made by client with 'icc_set_bw()' call. 358 361 */ 359 - err = tegra_emc_interconnect_init(emc); 362 + err = tegra186_emc_interconnect_init(emc); 360 363 if (err) { 361 364 mc->bpmp = NULL; 362 365 goto put_bpmp;
+71 -79
drivers/memory/tegra/tegra20-emc.c
··· 232 232 bool mrr_error; 233 233 }; 234 234 235 - static irqreturn_t tegra_emc_isr(int irq, void *data) 235 + static irqreturn_t tegra20_emc_isr(int irq, void *data) 236 236 { 237 237 struct tegra_emc *emc = data; 238 238 u32 intmask = EMC_REFRESH_OVERFLOW_INT; ··· 253 253 return IRQ_HANDLED; 254 254 } 255 255 256 - static struct emc_timing *tegra_emc_find_timing(struct tegra_emc *emc, 257 - unsigned long rate) 256 + static struct emc_timing *tegra20_emc_find_timing(struct tegra_emc *emc, 257 + unsigned long rate) 258 258 { 259 259 struct emc_timing *timing = NULL; 260 260 unsigned int i; ··· 276 276 277 277 static int emc_prepare_timing_change(struct tegra_emc *emc, unsigned long rate) 278 278 { 279 - struct emc_timing *timing = tegra_emc_find_timing(emc, rate); 279 + struct emc_timing *timing = tegra20_emc_find_timing(emc, rate); 280 280 unsigned int i; 281 281 282 282 if (!timing) ··· 321 321 return 0; 322 322 } 323 323 324 - static int tegra_emc_clk_change_notify(struct notifier_block *nb, 325 - unsigned long msg, void *data) 324 + static int tegra20_emc_clk_change_notify(struct notifier_block *nb, 325 + unsigned long msg, void *data) 326 326 { 327 327 struct tegra_emc *emc = container_of(nb, struct tegra_emc, clk_nb); 328 328 struct clk_notifier_data *cnd = data; ··· 407 407 return 0; 408 408 } 409 409 410 - static int tegra_emc_load_timings_from_dt(struct tegra_emc *emc, 411 - struct device_node *node) 410 + static int tegra20_emc_load_timings_from_dt(struct tegra_emc *emc, 411 + struct device_node *node) 412 412 { 413 413 struct emc_timing *timing; 414 414 int child_count; ··· 452 452 } 453 453 454 454 static struct device_node * 455 - tegra_emc_find_node_by_ram_code(struct tegra_emc *emc) 455 + tegra20_emc_find_node_by_ram_code(struct tegra_emc *emc) 456 456 { 457 457 struct device *dev = emc->dev; 458 458 struct device_node *np; ··· 710 710 return timing->rate; 711 711 } 712 712 713 - static void tegra_emc_rate_requests_init(struct tegra_emc *emc) 713 + static void tegra20_emc_rate_requests_init(struct tegra_emc *emc) 714 714 { 715 715 unsigned int i; 716 716 ··· 812 812 * valid range. 813 813 */ 814 814 815 - static bool tegra_emc_validate_rate(struct tegra_emc *emc, unsigned long rate) 815 + static bool tegra20_emc_validate_rate(struct tegra_emc *emc, unsigned long rate) 816 816 { 817 817 unsigned int i; 818 818 ··· 823 823 return false; 824 824 } 825 825 826 - static int tegra_emc_debug_available_rates_show(struct seq_file *s, void *data) 826 + static int tegra20_emc_debug_available_rates_show(struct seq_file *s, void *data) 827 827 { 828 828 struct tegra_emc *emc = s->private; 829 829 const char *prefix = ""; ··· 838 838 839 839 return 0; 840 840 } 841 - DEFINE_SHOW_ATTRIBUTE(tegra_emc_debug_available_rates); 841 + DEFINE_SHOW_ATTRIBUTE(tegra20_emc_debug_available_rates); 842 842 843 - static int tegra_emc_debug_min_rate_get(void *data, u64 *rate) 843 + static int tegra20_emc_debug_min_rate_get(void *data, u64 *rate) 844 844 { 845 845 struct tegra_emc *emc = data; 846 846 ··· 849 849 return 0; 850 850 } 851 851 852 - static int tegra_emc_debug_min_rate_set(void *data, u64 rate) 852 + static int tegra20_emc_debug_min_rate_set(void *data, u64 rate) 853 853 { 854 854 struct tegra_emc *emc = data; 855 855 int err; 856 856 857 - if (!tegra_emc_validate_rate(emc, rate)) 857 + if (!tegra20_emc_validate_rate(emc, rate)) 858 858 return -EINVAL; 859 859 860 860 err = emc_set_min_rate(emc, rate, EMC_RATE_DEBUG); ··· 866 866 return 0; 867 867 } 868 868 869 - DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_min_rate_fops, 870 - tegra_emc_debug_min_rate_get, 871 - tegra_emc_debug_min_rate_set, "%llu\n"); 869 + DEFINE_SIMPLE_ATTRIBUTE(tegra20_emc_debug_min_rate_fops, 870 + tegra20_emc_debug_min_rate_get, 871 + tegra20_emc_debug_min_rate_set, "%llu\n"); 872 872 873 - static int tegra_emc_debug_max_rate_get(void *data, u64 *rate) 873 + static int tegra20_emc_debug_max_rate_get(void *data, u64 *rate) 874 874 { 875 875 struct tegra_emc *emc = data; 876 876 ··· 879 879 return 0; 880 880 } 881 881 882 - static int tegra_emc_debug_max_rate_set(void *data, u64 rate) 882 + static int tegra20_emc_debug_max_rate_set(void *data, u64 rate) 883 883 { 884 884 struct tegra_emc *emc = data; 885 885 int err; 886 886 887 - if (!tegra_emc_validate_rate(emc, rate)) 887 + if (!tegra20_emc_validate_rate(emc, rate)) 888 888 return -EINVAL; 889 889 890 890 err = emc_set_max_rate(emc, rate, EMC_RATE_DEBUG); ··· 896 896 return 0; 897 897 } 898 898 899 - DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_max_rate_fops, 900 - tegra_emc_debug_max_rate_get, 901 - tegra_emc_debug_max_rate_set, "%llu\n"); 899 + DEFINE_SIMPLE_ATTRIBUTE(tegra20_emc_debug_max_rate_fops, 900 + tegra20_emc_debug_max_rate_get, 901 + tegra20_emc_debug_max_rate_set, "%llu\n"); 902 902 903 - static void tegra_emc_debugfs_init(struct tegra_emc *emc) 903 + static void tegra20_emc_debugfs_init(struct tegra_emc *emc) 904 904 { 905 905 struct device *dev = emc->dev; 906 906 unsigned int i; ··· 933 933 emc->debugfs.root = debugfs_create_dir("emc", NULL); 934 934 935 935 debugfs_create_file("available_rates", 0444, emc->debugfs.root, 936 - emc, &tegra_emc_debug_available_rates_fops); 936 + emc, &tegra20_emc_debug_available_rates_fops); 937 937 debugfs_create_file("min_rate", 0644, emc->debugfs.root, 938 - emc, &tegra_emc_debug_min_rate_fops); 938 + emc, &tegra20_emc_debug_min_rate_fops); 939 939 debugfs_create_file("max_rate", 0644, emc->debugfs.root, 940 - emc, &tegra_emc_debug_max_rate_fops); 940 + emc, &tegra20_emc_debug_max_rate_fops); 941 941 } 942 942 943 943 static inline struct tegra_emc * ··· 1000 1000 return 0; 1001 1001 } 1002 1002 1003 - static int tegra_emc_interconnect_init(struct tegra_emc *emc) 1003 + static int tegra20_emc_interconnect_init(struct tegra_emc *emc) 1004 1004 { 1005 1005 const struct tegra_mc_soc *soc; 1006 1006 struct icc_node *node; ··· 1022 1022 1023 1023 /* create External Memory Controller node */ 1024 1024 node = icc_node_create(TEGRA_ICC_EMC); 1025 - if (IS_ERR(node)) { 1026 - err = PTR_ERR(node); 1027 - goto err_msg; 1028 - } 1025 + if (IS_ERR(node)) 1026 + return PTR_ERR(node); 1029 1027 1030 1028 node->name = "External Memory Controller"; 1031 1029 icc_node_add(node, &emc->provider); ··· 1051 1053 1052 1054 remove_nodes: 1053 1055 icc_nodes_remove(&emc->provider); 1054 - err_msg: 1055 - dev_err(emc->dev, "failed to initialize ICC: %d\n", err); 1056 1056 1057 - return err; 1057 + return dev_err_probe(emc->dev, err, "failed to initialize ICC\n"); 1058 1058 } 1059 1059 1060 - static void devm_tegra_emc_unset_callback(void *data) 1060 + static void devm_tegra20_emc_unset_callback(void *data) 1061 1061 { 1062 1062 tegra20_clk_set_emc_round_callback(NULL, NULL); 1063 1063 } 1064 1064 1065 - static void devm_tegra_emc_unreg_clk_notifier(void *data) 1065 + static void devm_tegra20_emc_unreg_clk_notifier(void *data) 1066 1066 { 1067 1067 struct tegra_emc *emc = data; 1068 1068 1069 1069 clk_notifier_unregister(emc->clk, &emc->clk_nb); 1070 1070 } 1071 1071 1072 - static int tegra_emc_init_clk(struct tegra_emc *emc) 1072 + static int tegra20_emc_init_clk(struct tegra_emc *emc) 1073 1073 { 1074 1074 int err; 1075 1075 1076 1076 tegra20_clk_set_emc_round_callback(emc_round_rate, emc); 1077 1077 1078 - err = devm_add_action_or_reset(emc->dev, devm_tegra_emc_unset_callback, 1078 + err = devm_add_action_or_reset(emc->dev, devm_tegra20_emc_unset_callback, 1079 1079 NULL); 1080 1080 if (err) 1081 1081 return err; 1082 1082 1083 1083 emc->clk = devm_clk_get(emc->dev, NULL); 1084 - if (IS_ERR(emc->clk)) { 1085 - dev_err(emc->dev, "failed to get EMC clock: %pe\n", emc->clk); 1086 - return PTR_ERR(emc->clk); 1087 - } 1084 + if (IS_ERR(emc->clk)) 1085 + return dev_err_probe(emc->dev, PTR_ERR(emc->clk), 1086 + "failed to get EMC clock\n"); 1088 1087 1089 1088 err = clk_notifier_register(emc->clk, &emc->clk_nb); 1090 - if (err) { 1091 - dev_err(emc->dev, "failed to register clk notifier: %d\n", err); 1092 - return err; 1093 - } 1089 + if (err) 1090 + return dev_err_probe(emc->dev, err, "failed to register clk notifier\n"); 1094 1091 1095 1092 err = devm_add_action_or_reset(emc->dev, 1096 - devm_tegra_emc_unreg_clk_notifier, emc); 1093 + devm_tegra20_emc_unreg_clk_notifier, emc); 1097 1094 if (err) 1098 1095 return err; 1099 1096 1100 1097 return 0; 1101 1098 } 1102 1099 1103 - static int tegra_emc_devfreq_target(struct device *dev, unsigned long *freq, 1104 - u32 flags) 1100 + static int tegra20_emc_devfreq_target(struct device *dev, unsigned long *freq, 1101 + u32 flags) 1105 1102 { 1106 1103 struct tegra_emc *emc = dev_get_drvdata(dev); 1107 1104 struct dev_pm_opp *opp; ··· 1114 1121 return emc_set_min_rate(emc, rate, EMC_RATE_DEVFREQ); 1115 1122 } 1116 1123 1117 - static int tegra_emc_devfreq_get_dev_status(struct device *dev, 1118 - struct devfreq_dev_status *stat) 1124 + static int tegra20_emc_devfreq_get_dev_status(struct device *dev, 1125 + struct devfreq_dev_status *stat) 1119 1126 { 1120 1127 struct tegra_emc *emc = dev_get_drvdata(dev); 1121 1128 ··· 1137 1144 return 0; 1138 1145 } 1139 1146 1140 - static struct devfreq_dev_profile tegra_emc_devfreq_profile = { 1147 + static struct devfreq_dev_profile tegra20_emc_devfreq_profile = { 1141 1148 .polling_ms = 30, 1142 - .target = tegra_emc_devfreq_target, 1143 - .get_dev_status = tegra_emc_devfreq_get_dev_status, 1149 + .target = tegra20_emc_devfreq_target, 1150 + .get_dev_status = tegra20_emc_devfreq_get_dev_status, 1144 1151 }; 1145 1152 1146 - static int tegra_emc_devfreq_init(struct tegra_emc *emc) 1153 + static int tegra20_emc_devfreq_init(struct tegra_emc *emc) 1147 1154 { 1148 1155 struct devfreq *devfreq; 1149 1156 ··· 1165 1172 writel_relaxed(0x00000000, emc->regs + EMC_STAT_LLMC_CONTROL); 1166 1173 writel_relaxed(0xffffffff, emc->regs + EMC_STAT_PWR_CLOCK_LIMIT); 1167 1174 1168 - devfreq = devm_devfreq_add_device(emc->dev, &tegra_emc_devfreq_profile, 1175 + devfreq = devm_devfreq_add_device(emc->dev, &tegra20_emc_devfreq_profile, 1169 1176 DEVFREQ_GOV_SIMPLE_ONDEMAND, 1170 1177 &emc->ondemand_data); 1171 - if (IS_ERR(devfreq)) { 1172 - dev_err(emc->dev, "failed to initialize devfreq: %pe", devfreq); 1173 - return PTR_ERR(devfreq); 1174 - } 1178 + if (IS_ERR(devfreq)) 1179 + return dev_err_probe(emc->dev, PTR_ERR(devfreq), 1180 + "failed to initialize devfreq\n"); 1175 1181 1176 1182 return 0; 1177 1183 } 1178 1184 1179 - static int tegra_emc_probe(struct platform_device *pdev) 1185 + static int tegra20_emc_probe(struct platform_device *pdev) 1180 1186 { 1181 1187 struct tegra_core_opp_params opp_params = {}; 1182 1188 struct device_node *np; ··· 1191 1199 return -ENOMEM; 1192 1200 1193 1201 mutex_init(&emc->rate_lock); 1194 - emc->clk_nb.notifier_call = tegra_emc_clk_change_notify; 1202 + emc->clk_nb.notifier_call = tegra20_emc_clk_change_notify; 1195 1203 emc->dev = &pdev->dev; 1196 1204 1197 1205 emc->regs = devm_platform_ioremap_resource(pdev, 0); ··· 1202 1210 if (err) 1203 1211 return err; 1204 1212 1205 - np = tegra_emc_find_node_by_ram_code(emc); 1213 + np = tegra20_emc_find_node_by_ram_code(emc); 1206 1214 if (np) { 1207 - err = tegra_emc_load_timings_from_dt(emc, np); 1215 + err = tegra20_emc_load_timings_from_dt(emc, np); 1208 1216 of_node_put(np); 1209 1217 if (err) 1210 1218 return err; 1211 1219 } 1212 1220 1213 - err = devm_request_irq(&pdev->dev, irq, tegra_emc_isr, 0, 1221 + err = devm_request_irq(&pdev->dev, irq, tegra20_emc_isr, 0, 1214 1222 dev_name(&pdev->dev), emc); 1215 1223 if (err) { 1216 1224 dev_err(&pdev->dev, "failed to request IRQ: %d\n", err); 1217 1225 return err; 1218 1226 } 1219 1227 1220 - err = tegra_emc_init_clk(emc); 1228 + err = tegra20_emc_init_clk(emc); 1221 1229 if (err) 1222 1230 return err; 1223 1231 ··· 1228 1236 return err; 1229 1237 1230 1238 platform_set_drvdata(pdev, emc); 1231 - tegra_emc_rate_requests_init(emc); 1232 - tegra_emc_debugfs_init(emc); 1233 - tegra_emc_interconnect_init(emc); 1234 - tegra_emc_devfreq_init(emc); 1239 + tegra20_emc_rate_requests_init(emc); 1240 + tegra20_emc_debugfs_init(emc); 1241 + tegra20_emc_interconnect_init(emc); 1242 + tegra20_emc_devfreq_init(emc); 1235 1243 1236 1244 /* 1237 1245 * Don't allow the kernel module to be unloaded. Unloading adds some ··· 1243 1251 return 0; 1244 1252 } 1245 1253 1246 - static const struct of_device_id tegra_emc_of_match[] = { 1254 + static const struct of_device_id tegra20_emc_of_match[] = { 1247 1255 { .compatible = "nvidia,tegra20-emc", }, 1248 1256 {}, 1249 1257 }; 1250 - MODULE_DEVICE_TABLE(of, tegra_emc_of_match); 1258 + MODULE_DEVICE_TABLE(of, tegra20_emc_of_match); 1251 1259 1252 - static struct platform_driver tegra_emc_driver = { 1253 - .probe = tegra_emc_probe, 1260 + static struct platform_driver tegra20_emc_driver = { 1261 + .probe = tegra20_emc_probe, 1254 1262 .driver = { 1255 1263 .name = "tegra20-emc", 1256 - .of_match_table = tegra_emc_of_match, 1264 + .of_match_table = tegra20_emc_of_match, 1257 1265 .suppress_bind_attrs = true, 1258 1266 .sync_state = icc_sync_state, 1259 1267 }, 1260 1268 }; 1261 - module_platform_driver(tegra_emc_driver); 1269 + module_platform_driver(tegra20_emc_driver); 1262 1270 1263 1271 MODULE_AUTHOR("Dmitry Osipenko <digetx@gmail.com>"); 1264 1272 MODULE_DESCRIPTION("NVIDIA Tegra20 EMC driver");
+55 -64
drivers/memory/tegra/tegra30-emc.c
··· 413 413 return 0; 414 414 } 415 415 416 - static irqreturn_t tegra_emc_isr(int irq, void *data) 416 + static irqreturn_t tegra30_emc_isr(int irq, void *data) 417 417 { 418 418 struct tegra_emc *emc = data; 419 419 u32 intmask = EMC_REFRESH_OVERFLOW_INT; ··· 1228 1228 return timing->rate; 1229 1229 } 1230 1230 1231 - static void tegra_emc_rate_requests_init(struct tegra_emc *emc) 1231 + static void tegra30_emc_rate_requests_init(struct tegra_emc *emc) 1232 1232 { 1233 1233 unsigned int i; 1234 1234 ··· 1330 1330 * valid range. 1331 1331 */ 1332 1332 1333 - static bool tegra_emc_validate_rate(struct tegra_emc *emc, unsigned long rate) 1333 + static bool tegra30_emc_validate_rate(struct tegra_emc *emc, unsigned long rate) 1334 1334 { 1335 1335 unsigned int i; 1336 1336 ··· 1341 1341 return false; 1342 1342 } 1343 1343 1344 - static int tegra_emc_debug_available_rates_show(struct seq_file *s, void *data) 1344 + static int tegra30_emc_debug_available_rates_show(struct seq_file *s, void *data) 1345 1345 { 1346 1346 struct tegra_emc *emc = s->private; 1347 1347 const char *prefix = ""; ··· 1356 1356 1357 1357 return 0; 1358 1358 } 1359 - DEFINE_SHOW_ATTRIBUTE(tegra_emc_debug_available_rates); 1359 + DEFINE_SHOW_ATTRIBUTE(tegra30_emc_debug_available_rates); 1360 1360 1361 - static int tegra_emc_debug_min_rate_get(void *data, u64 *rate) 1361 + static int tegra30_emc_debug_min_rate_get(void *data, u64 *rate) 1362 1362 { 1363 1363 struct tegra_emc *emc = data; 1364 1364 ··· 1367 1367 return 0; 1368 1368 } 1369 1369 1370 - static int tegra_emc_debug_min_rate_set(void *data, u64 rate) 1370 + static int tegra30_emc_debug_min_rate_set(void *data, u64 rate) 1371 1371 { 1372 1372 struct tegra_emc *emc = data; 1373 1373 int err; 1374 1374 1375 - if (!tegra_emc_validate_rate(emc, rate)) 1375 + if (!tegra30_emc_validate_rate(emc, rate)) 1376 1376 return -EINVAL; 1377 1377 1378 1378 err = emc_set_min_rate(emc, rate, EMC_RATE_DEBUG); ··· 1384 1384 return 0; 1385 1385 } 1386 1386 1387 - DEFINE_DEBUGFS_ATTRIBUTE(tegra_emc_debug_min_rate_fops, 1388 - tegra_emc_debug_min_rate_get, 1389 - tegra_emc_debug_min_rate_set, "%llu\n"); 1387 + DEFINE_DEBUGFS_ATTRIBUTE(tegra30_emc_debug_min_rate_fops, 1388 + tegra30_emc_debug_min_rate_get, 1389 + tegra30_emc_debug_min_rate_set, "%llu\n"); 1390 1390 1391 - static int tegra_emc_debug_max_rate_get(void *data, u64 *rate) 1391 + static int tegra30_emc_debug_max_rate_get(void *data, u64 *rate) 1392 1392 { 1393 1393 struct tegra_emc *emc = data; 1394 1394 ··· 1397 1397 return 0; 1398 1398 } 1399 1399 1400 - static int tegra_emc_debug_max_rate_set(void *data, u64 rate) 1400 + static int tegra30_emc_debug_max_rate_set(void *data, u64 rate) 1401 1401 { 1402 1402 struct tegra_emc *emc = data; 1403 1403 int err; 1404 1404 1405 - if (!tegra_emc_validate_rate(emc, rate)) 1405 + if (!tegra30_emc_validate_rate(emc, rate)) 1406 1406 return -EINVAL; 1407 1407 1408 1408 err = emc_set_max_rate(emc, rate, EMC_RATE_DEBUG); ··· 1414 1414 return 0; 1415 1415 } 1416 1416 1417 - DEFINE_DEBUGFS_ATTRIBUTE(tegra_emc_debug_max_rate_fops, 1418 - tegra_emc_debug_max_rate_get, 1419 - tegra_emc_debug_max_rate_set, "%llu\n"); 1417 + DEFINE_DEBUGFS_ATTRIBUTE(tegra30_emc_debug_max_rate_fops, 1418 + tegra30_emc_debug_max_rate_get, 1419 + tegra30_emc_debug_max_rate_set, "%llu\n"); 1420 1420 1421 - static void tegra_emc_debugfs_init(struct tegra_emc *emc) 1421 + static void tegra30_emc_debugfs_init(struct tegra_emc *emc) 1422 1422 { 1423 1423 struct device *dev = emc->dev; 1424 1424 unsigned int i; ··· 1451 1451 emc->debugfs.root = debugfs_create_dir("emc", NULL); 1452 1452 1453 1453 debugfs_create_file("available_rates", 0444, emc->debugfs.root, 1454 - emc, &tegra_emc_debug_available_rates_fops); 1454 + emc, &tegra30_emc_debug_available_rates_fops); 1455 1455 debugfs_create_file("min_rate", 0644, emc->debugfs.root, 1456 - emc, &tegra_emc_debug_min_rate_fops); 1456 + emc, &tegra30_emc_debug_min_rate_fops); 1457 1457 debugfs_create_file("max_rate", 0644, emc->debugfs.root, 1458 - emc, &tegra_emc_debug_max_rate_fops); 1458 + emc, &tegra30_emc_debug_max_rate_fops); 1459 1459 } 1460 1460 1461 1461 static inline struct tegra_emc * ··· 1518 1518 return 0; 1519 1519 } 1520 1520 1521 - static int tegra_emc_interconnect_init(struct tegra_emc *emc) 1521 + static int tegra30_emc_interconnect_init(struct tegra_emc *emc) 1522 1522 { 1523 1523 const struct tegra_mc_soc *soc = emc->mc->soc; 1524 1524 struct icc_node *node; ··· 1534 1534 1535 1535 /* create External Memory Controller node */ 1536 1536 node = icc_node_create(TEGRA_ICC_EMC); 1537 - if (IS_ERR(node)) { 1538 - err = PTR_ERR(node); 1539 - goto err_msg; 1540 - } 1537 + if (IS_ERR(node)) 1538 + return PTR_ERR(node); 1541 1539 1542 1540 node->name = "External Memory Controller"; 1543 1541 icc_node_add(node, &emc->provider); ··· 1563 1565 1564 1566 remove_nodes: 1565 1567 icc_nodes_remove(&emc->provider); 1566 - err_msg: 1567 - dev_err(emc->dev, "failed to initialize ICC: %d\n", err); 1568 1568 1569 - return err; 1569 + return dev_err_probe(emc->dev, err, "failed to initialize ICC\n"); 1570 1570 } 1571 1571 1572 - static void devm_tegra_emc_unset_callback(void *data) 1572 + static void devm_tegra30_emc_unset_callback(void *data) 1573 1573 { 1574 1574 tegra20_clk_set_emc_round_callback(NULL, NULL); 1575 1575 } 1576 1576 1577 - static void devm_tegra_emc_unreg_clk_notifier(void *data) 1577 + static void devm_tegra30_emc_unreg_clk_notifier(void *data) 1578 1578 { 1579 1579 struct tegra_emc *emc = data; 1580 1580 1581 1581 clk_notifier_unregister(emc->clk, &emc->clk_nb); 1582 1582 } 1583 1583 1584 - static int tegra_emc_init_clk(struct tegra_emc *emc) 1584 + static int tegra30_emc_init_clk(struct tegra_emc *emc) 1585 1585 { 1586 1586 int err; 1587 1587 1588 1588 tegra20_clk_set_emc_round_callback(emc_round_rate, emc); 1589 1589 1590 - err = devm_add_action_or_reset(emc->dev, devm_tegra_emc_unset_callback, 1590 + err = devm_add_action_or_reset(emc->dev, devm_tegra30_emc_unset_callback, 1591 1591 NULL); 1592 1592 if (err) 1593 1593 return err; 1594 1594 1595 1595 emc->clk = devm_clk_get(emc->dev, NULL); 1596 - if (IS_ERR(emc->clk)) { 1597 - dev_err(emc->dev, "failed to get EMC clock: %pe\n", emc->clk); 1598 - return PTR_ERR(emc->clk); 1599 - } 1596 + if (IS_ERR(emc->clk)) 1597 + return dev_err_probe(emc->dev, PTR_ERR(emc->clk), 1598 + "failed to get EMC clock\n"); 1600 1599 1601 1600 err = clk_notifier_register(emc->clk, &emc->clk_nb); 1602 - if (err) { 1603 - dev_err(emc->dev, "failed to register clk notifier: %d\n", err); 1604 - return err; 1605 - } 1601 + if (err) 1602 + return dev_err_probe(emc->dev, err, "failed to register clk notifier\n"); 1606 1603 1607 1604 err = devm_add_action_or_reset(emc->dev, 1608 - devm_tegra_emc_unreg_clk_notifier, emc); 1605 + devm_tegra30_emc_unreg_clk_notifier, emc); 1609 1606 if (err) 1610 1607 return err; 1611 1608 1612 1609 return 0; 1613 1610 } 1614 1611 1615 - static int tegra_emc_probe(struct platform_device *pdev) 1612 + static int tegra30_emc_probe(struct platform_device *pdev) 1616 1613 { 1617 1614 struct tegra_core_opp_params opp_params = {}; 1618 1615 struct device_node *np; ··· 1648 1655 1649 1656 emc->irq = err; 1650 1657 1651 - err = devm_request_irq(&pdev->dev, emc->irq, tegra_emc_isr, 0, 1658 + err = devm_request_irq(&pdev->dev, emc->irq, tegra30_emc_isr, 0, 1652 1659 dev_name(&pdev->dev), emc); 1653 - if (err) { 1654 - dev_err(&pdev->dev, "failed to request irq: %d\n", err); 1655 - return err; 1656 - } 1660 + if (err) 1661 + return dev_err_probe(&pdev->dev, err, "failed to request irq\n"); 1657 1662 1658 - err = tegra_emc_init_clk(emc); 1663 + err = tegra30_emc_init_clk(emc); 1659 1664 if (err) 1660 1665 return err; 1661 1666 ··· 1664 1673 return err; 1665 1674 1666 1675 platform_set_drvdata(pdev, emc); 1667 - tegra_emc_rate_requests_init(emc); 1668 - tegra_emc_debugfs_init(emc); 1669 - tegra_emc_interconnect_init(emc); 1676 + tegra30_emc_rate_requests_init(emc); 1677 + tegra30_emc_debugfs_init(emc); 1678 + tegra30_emc_interconnect_init(emc); 1670 1679 1671 1680 /* 1672 1681 * Don't allow the kernel module to be unloaded. Unloading adds some ··· 1678 1687 return 0; 1679 1688 } 1680 1689 1681 - static int tegra_emc_suspend(struct device *dev) 1690 + static int tegra30_emc_suspend(struct device *dev) 1682 1691 { 1683 1692 struct tegra_emc *emc = dev_get_drvdata(dev); 1684 1693 int err; ··· 1699 1708 return 0; 1700 1709 } 1701 1710 1702 - static int tegra_emc_resume(struct device *dev) 1711 + static int tegra30_emc_resume(struct device *dev) 1703 1712 { 1704 1713 struct tegra_emc *emc = dev_get_drvdata(dev); 1705 1714 ··· 1711 1720 return 0; 1712 1721 } 1713 1722 1714 - static const struct dev_pm_ops tegra_emc_pm_ops = { 1715 - .suspend = tegra_emc_suspend, 1716 - .resume = tegra_emc_resume, 1723 + static const struct dev_pm_ops tegra30_emc_pm_ops = { 1724 + .suspend = tegra30_emc_suspend, 1725 + .resume = tegra30_emc_resume, 1717 1726 }; 1718 1727 1719 - static const struct of_device_id tegra_emc_of_match[] = { 1728 + static const struct of_device_id tegra30_emc_of_match[] = { 1720 1729 { .compatible = "nvidia,tegra30-emc", }, 1721 1730 {}, 1722 1731 }; 1723 - MODULE_DEVICE_TABLE(of, tegra_emc_of_match); 1732 + MODULE_DEVICE_TABLE(of, tegra30_emc_of_match); 1724 1733 1725 - static struct platform_driver tegra_emc_driver = { 1726 - .probe = tegra_emc_probe, 1734 + static struct platform_driver tegra30_emc_driver = { 1735 + .probe = tegra30_emc_probe, 1727 1736 .driver = { 1728 1737 .name = "tegra30-emc", 1729 - .of_match_table = tegra_emc_of_match, 1730 - .pm = &tegra_emc_pm_ops, 1738 + .of_match_table = tegra30_emc_of_match, 1739 + .pm = &tegra30_emc_pm_ops, 1731 1740 .suppress_bind_attrs = true, 1732 1741 .sync_state = icc_sync_state, 1733 1742 }, 1734 1743 }; 1735 - module_platform_driver(tegra_emc_driver); 1744 + module_platform_driver(tegra30_emc_driver); 1736 1745 1737 1746 MODULE_AUTHOR("Dmitry Osipenko <digetx@gmail.com>"); 1738 1747 MODULE_DESCRIPTION("NVIDIA Tegra30 EMC driver");
+3 -4
drivers/pinctrl/pinctrl-zynqmp.c
··· 100 100 101 101 static struct pinctrl_desc zynqmp_desc; 102 102 static u32 family_code; 103 - static u32 sub_family_code; 104 103 105 104 static int zynqmp_pctrl_get_groups_count(struct pinctrl_dev *pctldev) 106 105 { ··· 604 605 return -ENOMEM; 605 606 606 607 for (pin = 0; pin < groups[resp[i]].npins; pin++) { 607 - if (family_code == ZYNQMP_FAMILY_CODE) 608 + if (family_code == PM_ZYNQMP_FAMILY_CODE) 608 609 __set_bit(groups[resp[i]].pins[pin], used_pins); 609 610 else 610 611 __set_bit((u8)groups[resp[i]].pins[pin] - 1, used_pins); ··· 957 958 if (!pctrl) 958 959 return -ENOMEM; 959 960 960 - ret = zynqmp_pm_get_family_info(&family_code, &sub_family_code); 961 + ret = zynqmp_pm_get_family_info(&family_code); 961 962 if (ret < 0) 962 963 return ret; 963 964 964 - if (family_code == ZYNQMP_FAMILY_CODE) { 965 + if (family_code == PM_ZYNQMP_FAMILY_CODE) { 965 966 ret = zynqmp_pinctrl_prepare_pin_desc(&pdev->dev, &zynqmp_desc.pins, 966 967 &zynqmp_desc.npins); 967 968 } else {
+7 -3
drivers/power/reset/sc27xx-poweroff.c
··· 28 28 * taking cpus down to avoid racing regmap or spi mutex lock when poweroff 29 29 * system through PMIC. 30 30 */ 31 - static void sc27xx_poweroff_shutdown(void) 31 + static void sc27xx_poweroff_shutdown(void *data) 32 32 { 33 33 #ifdef CONFIG_HOTPLUG_CPU 34 34 int cpu; ··· 40 40 #endif 41 41 } 42 42 43 - static struct syscore_ops poweroff_syscore_ops = { 43 + static const struct syscore_ops poweroff_syscore_ops = { 44 44 .shutdown = sc27xx_poweroff_shutdown, 45 + }; 46 + 47 + static struct syscore poweroff_syscore = { 48 + .ops = &poweroff_syscore_ops, 45 49 }; 46 50 47 51 static void sc27xx_poweroff_do_poweroff(void) ··· 66 62 return -ENODEV; 67 63 68 64 pm_power_off = sc27xx_poweroff_do_poweroff; 69 - register_syscore_ops(&poweroff_syscore_ops); 65 + register_syscore(&poweroff_syscore); 70 66 return 0; 71 67 } 72 68
+12 -1
drivers/reset/Kconfig
··· 73 73 This enables the RESCAL reset controller for SATA, PCIe0, or PCIe1 on 74 74 BCM7216 or the BCM2712. 75 75 76 + config RESET_EIC7700 77 + bool "Reset controller driver for ESWIN SoCs" 78 + depends on ARCH_ESWIN || COMPILE_TEST 79 + default ARCH_ESWIN 80 + help 81 + This enables the reset controller driver for ESWIN SoCs. This driver is 82 + specific to ESWIN SoCs and should only be enabled if using such hardware. 83 + The driver supports eic7700 series chips and provides functionality for 84 + asserting and deasserting resets on the chip. 85 + 76 86 config RESET_EYEQ 77 87 bool "Mobileye EyeQ reset controller" 78 88 depends on MACH_EYEQ5 || MACH_EYEQ6H || COMPILE_TEST ··· 181 171 182 172 config RESET_MCHP_SPARX5 183 173 tristate "Microchip Sparx5 reset driver" 184 - depends on ARCH_SPARX5 || SOC_LAN966 || MCHP_LAN966X_PCI || COMPILE_TEST 174 + depends on ARCH_SPARX5 || ARCH_LAN969X || SOC_LAN966 || MCHP_LAN966X_PCI || COMPILE_TEST 185 175 default y if SPARX5_SWITCH 186 176 select MFD_SYSCON 187 177 help ··· 248 238 config RESET_RZG2L_USBPHY_CTRL 249 239 tristate "Renesas RZ/G2L USBPHY control driver" 250 240 depends on ARCH_RZG2L || COMPILE_TEST 241 + select MFD_SYSCON 251 242 help 252 243 Support for USBPHY Control found on RZ/G2L family. It mainly 253 244 controls reset and power down of the USB/PHY.
+1
drivers/reset/Makefile
··· 13 13 obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o 14 14 obj-$(CONFIG_RESET_BRCMSTB) += reset-brcmstb.o 15 15 obj-$(CONFIG_RESET_BRCMSTB_RESCAL) += reset-brcmstb-rescal.o 16 + obj-$(CONFIG_RESET_EIC7700) += reset-eic7700.o 16 17 obj-$(CONFIG_RESET_EYEQ) += reset-eyeq.o 17 18 obj-$(CONFIG_RESET_GPIO) += reset-gpio.o 18 19 obj-$(CONFIG_RESET_HSDK) += reset-hsdk.o
+3 -121
drivers/reset/core.c
··· 27 27 static DEFINE_MUTEX(reset_list_mutex); 28 28 static LIST_HEAD(reset_controller_list); 29 29 30 - static DEFINE_MUTEX(reset_lookup_mutex); 31 - static LIST_HEAD(reset_lookup_list); 32 - 33 30 /* Protects reset_gpio_lookup_list */ 34 31 static DEFINE_MUTEX(reset_gpio_lookup_mutex); 35 32 static LIST_HEAD(reset_gpio_lookup_list); ··· 190 193 return ret; 191 194 } 192 195 EXPORT_SYMBOL_GPL(devm_reset_controller_register); 193 - 194 - /** 195 - * reset_controller_add_lookup - register a set of lookup entries 196 - * @lookup: array of reset lookup entries 197 - * @num_entries: number of entries in the lookup array 198 - */ 199 - void reset_controller_add_lookup(struct reset_control_lookup *lookup, 200 - unsigned int num_entries) 201 - { 202 - struct reset_control_lookup *entry; 203 - unsigned int i; 204 - 205 - mutex_lock(&reset_lookup_mutex); 206 - for (i = 0; i < num_entries; i++) { 207 - entry = &lookup[i]; 208 - 209 - if (!entry->dev_id || !entry->provider) { 210 - pr_warn("%s(): reset lookup entry badly specified, skipping\n", 211 - __func__); 212 - continue; 213 - } 214 - 215 - list_add_tail(&entry->list, &reset_lookup_list); 216 - } 217 - mutex_unlock(&reset_lookup_mutex); 218 - } 219 - EXPORT_SYMBOL_GPL(reset_controller_add_lookup); 220 196 221 197 static inline struct reset_control_array * 222 198 rstc_to_array(struct reset_control *rstc) { ··· 1073 1103 } 1074 1104 EXPORT_SYMBOL_GPL(__of_reset_control_get); 1075 1105 1076 - static struct reset_controller_dev * 1077 - __reset_controller_by_name(const char *name) 1078 - { 1079 - struct reset_controller_dev *rcdev; 1080 - 1081 - lockdep_assert_held(&reset_list_mutex); 1082 - 1083 - list_for_each_entry(rcdev, &reset_controller_list, list) { 1084 - if (!rcdev->dev) 1085 - continue; 1086 - 1087 - if (!strcmp(name, dev_name(rcdev->dev))) 1088 - return rcdev; 1089 - } 1090 - 1091 - return NULL; 1092 - } 1093 - 1094 - static struct reset_control * 1095 - __reset_control_get_from_lookup(struct device *dev, const char *con_id, 1096 - enum reset_control_flags flags) 1097 - { 1098 - bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL; 1099 - const struct reset_control_lookup *lookup; 1100 - struct reset_controller_dev *rcdev; 1101 - const char *dev_id = dev_name(dev); 1102 - struct reset_control *rstc = NULL; 1103 - 1104 - mutex_lock(&reset_lookup_mutex); 1105 - 1106 - list_for_each_entry(lookup, &reset_lookup_list, list) { 1107 - if (strcmp(lookup->dev_id, dev_id)) 1108 - continue; 1109 - 1110 - if ((!con_id && !lookup->con_id) || 1111 - ((con_id && lookup->con_id) && 1112 - !strcmp(con_id, lookup->con_id))) { 1113 - mutex_lock(&reset_list_mutex); 1114 - rcdev = __reset_controller_by_name(lookup->provider); 1115 - if (!rcdev) { 1116 - mutex_unlock(&reset_list_mutex); 1117 - mutex_unlock(&reset_lookup_mutex); 1118 - /* Reset provider may not be ready yet. */ 1119 - return ERR_PTR(-EPROBE_DEFER); 1120 - } 1121 - 1122 - flags &= ~RESET_CONTROL_FLAGS_BIT_OPTIONAL; 1123 - 1124 - rstc = __reset_control_get_internal(rcdev, 1125 - lookup->index, 1126 - flags); 1127 - mutex_unlock(&reset_list_mutex); 1128 - break; 1129 - } 1130 - } 1131 - 1132 - mutex_unlock(&reset_lookup_mutex); 1133 - 1134 - if (!rstc) 1135 - return optional ? NULL : ERR_PTR(-ENOENT); 1136 - 1137 - return rstc; 1138 - } 1139 - 1140 1106 struct reset_control *__reset_control_get(struct device *dev, const char *id, 1141 1107 int index, enum reset_control_flags flags) 1142 1108 { 1143 1109 bool shared = flags & RESET_CONTROL_FLAGS_BIT_SHARED; 1144 1110 bool acquired = flags & RESET_CONTROL_FLAGS_BIT_ACQUIRED; 1111 + bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL; 1145 1112 1146 1113 if (WARN_ON(shared && acquired)) 1147 1114 return ERR_PTR(-EINVAL); ··· 1086 1179 if (dev->of_node) 1087 1180 return __of_reset_control_get(dev->of_node, id, index, flags); 1088 1181 1089 - return __reset_control_get_from_lookup(dev, id, flags); 1182 + return optional ? NULL : ERR_PTR(-ENOENT); 1090 1183 } 1091 1184 EXPORT_SYMBOL_GPL(__reset_control_get); 1092 1185 ··· 1421 1514 } 1422 1515 EXPORT_SYMBOL_GPL(devm_reset_control_array_get); 1423 1516 1424 - static int reset_control_get_count_from_lookup(struct device *dev) 1425 - { 1426 - const struct reset_control_lookup *lookup; 1427 - const char *dev_id; 1428 - int count = 0; 1429 - 1430 - if (!dev) 1431 - return -EINVAL; 1432 - 1433 - dev_id = dev_name(dev); 1434 - mutex_lock(&reset_lookup_mutex); 1435 - 1436 - list_for_each_entry(lookup, &reset_lookup_list, list) { 1437 - if (!strcmp(lookup->dev_id, dev_id)) 1438 - count++; 1439 - } 1440 - 1441 - mutex_unlock(&reset_lookup_mutex); 1442 - 1443 - if (count == 0) 1444 - count = -ENOENT; 1445 - 1446 - return count; 1447 - } 1448 - 1449 1517 /** 1450 1518 * reset_control_get_count - Count number of resets available with a device 1451 1519 * ··· 1434 1552 if (dev->of_node) 1435 1553 return of_reset_control_get_count(dev->of_node); 1436 1554 1437 - return reset_control_get_count_from_lookup(dev); 1555 + return -ENOENT; 1438 1556 } 1439 1557 EXPORT_SYMBOL_GPL(reset_control_get_count);
+429
drivers/reset/reset-eic7700.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright 2025, Beijing ESWIN Computing Technology Co., Ltd.. 4 + * All rights reserved. 5 + * 6 + * ESWIN Reset Driver 7 + * 8 + * Authors: 9 + * Yifeng Huang <huangyifeng@eswincomputing.com> 10 + * Xuyang Dong <dongxuyang@eswincomputing.com> 11 + */ 12 + 13 + #include <linux/err.h> 14 + #include <linux/init.h> 15 + #include <linux/of.h> 16 + #include <linux/platform_device.h> 17 + #include <linux/regmap.h> 18 + #include <linux/reset-controller.h> 19 + #include <linux/slab.h> 20 + #include <linux/types.h> 21 + 22 + #include <dt-bindings/reset/eswin,eic7700-reset.h> 23 + 24 + #define SYSCRG_CLEAR_BOOT_INFO_OFFSET 0xC 25 + #define CLEAR_BOOT_FLAG_BIT BIT(0) 26 + #define SYSCRG_RESET_OFFSET 0x100 27 + 28 + /** 29 + * struct eic7700_reset_data - reset controller information structure 30 + * @rcdev: reset controller entity 31 + * @regmap: regmap handle containing the memory-mapped reset registers 32 + */ 33 + struct eic7700_reset_data { 34 + struct reset_controller_dev rcdev; 35 + struct regmap *regmap; 36 + }; 37 + 38 + static const struct regmap_config eic7700_regmap_config = { 39 + .reg_bits = 32, 40 + .val_bits = 32, 41 + .reg_stride = 4, 42 + .max_register = 0x1fc, 43 + }; 44 + 45 + struct eic7700_reg { 46 + u32 reg; 47 + u32 bit; 48 + }; 49 + 50 + static inline struct eic7700_reset_data * 51 + to_eic7700_reset_data(struct reset_controller_dev *rcdev) 52 + { 53 + return container_of(rcdev, struct eic7700_reset_data, rcdev); 54 + } 55 + 56 + #define EIC7700_RESET(id, reg, bit)[id] = \ 57 + { SYSCRG_RESET_OFFSET + (reg) * sizeof(u32), BIT(bit) } 58 + 59 + /* mapping table for reset ID to register offset and reset bit */ 60 + static const struct eic7700_reg eic7700_reset[] = { 61 + EIC7700_RESET(EIC7700_RESET_NOC_NSP, 0, 0), 62 + EIC7700_RESET(EIC7700_RESET_NOC_CFG, 0, 1), 63 + EIC7700_RESET(EIC7700_RESET_RNOC_NSP, 0, 2), 64 + EIC7700_RESET(EIC7700_RESET_SNOC_TCU, 0, 3), 65 + EIC7700_RESET(EIC7700_RESET_SNOC_U84, 0, 4), 66 + EIC7700_RESET(EIC7700_RESET_SNOC_PCIE_XSR, 0, 5), 67 + EIC7700_RESET(EIC7700_RESET_SNOC_PCIE_XMR, 0, 6), 68 + EIC7700_RESET(EIC7700_RESET_SNOC_PCIE_PR, 0, 7), 69 + EIC7700_RESET(EIC7700_RESET_SNOC_NPU, 0, 8), 70 + EIC7700_RESET(EIC7700_RESET_SNOC_JTAG, 0, 9), 71 + EIC7700_RESET(EIC7700_RESET_SNOC_DSP, 0, 10), 72 + EIC7700_RESET(EIC7700_RESET_SNOC_DDRC1_P2, 0, 11), 73 + EIC7700_RESET(EIC7700_RESET_SNOC_DDRC1_P1, 0, 12), 74 + EIC7700_RESET(EIC7700_RESET_SNOC_DDRC0_P2, 0, 13), 75 + EIC7700_RESET(EIC7700_RESET_SNOC_DDRC0_P1, 0, 14), 76 + EIC7700_RESET(EIC7700_RESET_SNOC_D2D, 0, 15), 77 + EIC7700_RESET(EIC7700_RESET_SNOC_AON, 0, 16), 78 + EIC7700_RESET(EIC7700_RESET_GPU_AXI, 1, 0), 79 + EIC7700_RESET(EIC7700_RESET_GPU_CFG, 1, 1), 80 + EIC7700_RESET(EIC7700_RESET_GPU_GRAY, 1, 2), 81 + EIC7700_RESET(EIC7700_RESET_GPU_JONES, 1, 3), 82 + EIC7700_RESET(EIC7700_RESET_GPU_SPU, 1, 4), 83 + EIC7700_RESET(EIC7700_RESET_DSP_AXI, 2, 0), 84 + EIC7700_RESET(EIC7700_RESET_DSP_CFG, 2, 1), 85 + EIC7700_RESET(EIC7700_RESET_DSP_DIV4, 2, 2), 86 + EIC7700_RESET(EIC7700_RESET_DSP_DIV0, 2, 4), 87 + EIC7700_RESET(EIC7700_RESET_DSP_DIV1, 2, 5), 88 + EIC7700_RESET(EIC7700_RESET_DSP_DIV2, 2, 6), 89 + EIC7700_RESET(EIC7700_RESET_DSP_DIV3, 2, 7), 90 + EIC7700_RESET(EIC7700_RESET_D2D_AXI, 3, 0), 91 + EIC7700_RESET(EIC7700_RESET_D2D_CFG, 3, 1), 92 + EIC7700_RESET(EIC7700_RESET_D2D_PRST, 3, 2), 93 + EIC7700_RESET(EIC7700_RESET_D2D_RAW_PCS, 3, 4), 94 + EIC7700_RESET(EIC7700_RESET_D2D_RX, 3, 5), 95 + EIC7700_RESET(EIC7700_RESET_D2D_TX, 3, 6), 96 + EIC7700_RESET(EIC7700_RESET_D2D_CORE, 3, 7), 97 + EIC7700_RESET(EIC7700_RESET_DDR1_ARST, 4, 0), 98 + EIC7700_RESET(EIC7700_RESET_DDR1_TRACE, 4, 6), 99 + EIC7700_RESET(EIC7700_RESET_DDR0_ARST, 4, 16), 100 + EIC7700_RESET(EIC7700_RESET_DDR_CFG, 4, 21), 101 + EIC7700_RESET(EIC7700_RESET_DDR0_TRACE, 4, 22), 102 + EIC7700_RESET(EIC7700_RESET_DDR_CORE, 4, 23), 103 + EIC7700_RESET(EIC7700_RESET_DDR_PRST, 4, 26), 104 + EIC7700_RESET(EIC7700_RESET_TCU_AXI, 5, 0), 105 + EIC7700_RESET(EIC7700_RESET_TCU_CFG, 5, 1), 106 + EIC7700_RESET(EIC7700_RESET_TCU_TBU0, 5, 4), 107 + EIC7700_RESET(EIC7700_RESET_TCU_TBU1, 5, 5), 108 + EIC7700_RESET(EIC7700_RESET_TCU_TBU2, 5, 6), 109 + EIC7700_RESET(EIC7700_RESET_TCU_TBU3, 5, 7), 110 + EIC7700_RESET(EIC7700_RESET_TCU_TBU4, 5, 8), 111 + EIC7700_RESET(EIC7700_RESET_TCU_TBU5, 5, 9), 112 + EIC7700_RESET(EIC7700_RESET_TCU_TBU6, 5, 10), 113 + EIC7700_RESET(EIC7700_RESET_TCU_TBU7, 5, 11), 114 + EIC7700_RESET(EIC7700_RESET_TCU_TBU8, 5, 12), 115 + EIC7700_RESET(EIC7700_RESET_TCU_TBU9, 5, 13), 116 + EIC7700_RESET(EIC7700_RESET_TCU_TBU10, 5, 14), 117 + EIC7700_RESET(EIC7700_RESET_TCU_TBU11, 5, 15), 118 + EIC7700_RESET(EIC7700_RESET_TCU_TBU12, 5, 16), 119 + EIC7700_RESET(EIC7700_RESET_TCU_TBU13, 5, 17), 120 + EIC7700_RESET(EIC7700_RESET_TCU_TBU14, 5, 18), 121 + EIC7700_RESET(EIC7700_RESET_TCU_TBU15, 5, 19), 122 + EIC7700_RESET(EIC7700_RESET_TCU_TBU16, 5, 20), 123 + EIC7700_RESET(EIC7700_RESET_NPU_AXI, 6, 0), 124 + EIC7700_RESET(EIC7700_RESET_NPU_CFG, 6, 1), 125 + EIC7700_RESET(EIC7700_RESET_NPU_CORE, 6, 2), 126 + EIC7700_RESET(EIC7700_RESET_NPU_E31CORE, 6, 3), 127 + EIC7700_RESET(EIC7700_RESET_NPU_E31BUS, 6, 4), 128 + EIC7700_RESET(EIC7700_RESET_NPU_E31DBG, 6, 5), 129 + EIC7700_RESET(EIC7700_RESET_NPU_LLC, 6, 6), 130 + EIC7700_RESET(EIC7700_RESET_HSP_AXI, 7, 0), 131 + EIC7700_RESET(EIC7700_RESET_HSP_CFG, 7, 1), 132 + EIC7700_RESET(EIC7700_RESET_HSP_POR, 7, 2), 133 + EIC7700_RESET(EIC7700_RESET_MSHC0_PHY, 7, 3), 134 + EIC7700_RESET(EIC7700_RESET_MSHC1_PHY, 7, 4), 135 + EIC7700_RESET(EIC7700_RESET_MSHC2_PHY, 7, 5), 136 + EIC7700_RESET(EIC7700_RESET_MSHC0_TXRX, 7, 6), 137 + EIC7700_RESET(EIC7700_RESET_MSHC1_TXRX, 7, 7), 138 + EIC7700_RESET(EIC7700_RESET_MSHC2_TXRX, 7, 8), 139 + EIC7700_RESET(EIC7700_RESET_SATA_ASIC0, 7, 9), 140 + EIC7700_RESET(EIC7700_RESET_SATA_OOB, 7, 10), 141 + EIC7700_RESET(EIC7700_RESET_SATA_PMALIVE, 7, 11), 142 + EIC7700_RESET(EIC7700_RESET_SATA_RBC, 7, 12), 143 + EIC7700_RESET(EIC7700_RESET_DMA0, 7, 13), 144 + EIC7700_RESET(EIC7700_RESET_HSP_DMA, 7, 14), 145 + EIC7700_RESET(EIC7700_RESET_USB0_VAUX, 7, 15), 146 + EIC7700_RESET(EIC7700_RESET_USB1_VAUX, 7, 16), 147 + EIC7700_RESET(EIC7700_RESET_HSP_SD1_PRST, 7, 17), 148 + EIC7700_RESET(EIC7700_RESET_HSP_SD0_PRST, 7, 18), 149 + EIC7700_RESET(EIC7700_RESET_HSP_EMMC_PRST, 7, 19), 150 + EIC7700_RESET(EIC7700_RESET_HSP_DMA_PRST, 7, 20), 151 + EIC7700_RESET(EIC7700_RESET_HSP_SD1_ARST, 7, 21), 152 + EIC7700_RESET(EIC7700_RESET_HSP_SD0_ARST, 7, 22), 153 + EIC7700_RESET(EIC7700_RESET_HSP_EMMC_ARST, 7, 23), 154 + EIC7700_RESET(EIC7700_RESET_HSP_DMA_ARST, 7, 24), 155 + EIC7700_RESET(EIC7700_RESET_HSP_ETH1_ARST, 7, 25), 156 + EIC7700_RESET(EIC7700_RESET_HSP_ETH0_ARST, 7, 26), 157 + EIC7700_RESET(EIC7700_RESET_SATA_ARST, 7, 27), 158 + EIC7700_RESET(EIC7700_RESET_PCIE_CFG, 8, 0), 159 + EIC7700_RESET(EIC7700_RESET_PCIE_POWEUP, 8, 1), 160 + EIC7700_RESET(EIC7700_RESET_PCIE_PERST, 8, 2), 161 + EIC7700_RESET(EIC7700_RESET_I2C0, 9, 0), 162 + EIC7700_RESET(EIC7700_RESET_I2C1, 9, 1), 163 + EIC7700_RESET(EIC7700_RESET_I2C2, 9, 2), 164 + EIC7700_RESET(EIC7700_RESET_I2C3, 9, 3), 165 + EIC7700_RESET(EIC7700_RESET_I2C4, 9, 4), 166 + EIC7700_RESET(EIC7700_RESET_I2C5, 9, 5), 167 + EIC7700_RESET(EIC7700_RESET_I2C6, 9, 6), 168 + EIC7700_RESET(EIC7700_RESET_I2C7, 9, 7), 169 + EIC7700_RESET(EIC7700_RESET_I2C8, 9, 8), 170 + EIC7700_RESET(EIC7700_RESET_I2C9, 9, 9), 171 + EIC7700_RESET(EIC7700_RESET_FAN, 10, 0), 172 + EIC7700_RESET(EIC7700_RESET_PVT0, 11, 0), 173 + EIC7700_RESET(EIC7700_RESET_PVT1, 11, 1), 174 + EIC7700_RESET(EIC7700_RESET_MBOX0, 12, 0), 175 + EIC7700_RESET(EIC7700_RESET_MBOX1, 12, 1), 176 + EIC7700_RESET(EIC7700_RESET_MBOX2, 12, 2), 177 + EIC7700_RESET(EIC7700_RESET_MBOX3, 12, 3), 178 + EIC7700_RESET(EIC7700_RESET_MBOX4, 12, 4), 179 + EIC7700_RESET(EIC7700_RESET_MBOX5, 12, 5), 180 + EIC7700_RESET(EIC7700_RESET_MBOX6, 12, 6), 181 + EIC7700_RESET(EIC7700_RESET_MBOX7, 12, 7), 182 + EIC7700_RESET(EIC7700_RESET_MBOX8, 12, 8), 183 + EIC7700_RESET(EIC7700_RESET_MBOX9, 12, 9), 184 + EIC7700_RESET(EIC7700_RESET_MBOX10, 12, 10), 185 + EIC7700_RESET(EIC7700_RESET_MBOX11, 12, 11), 186 + EIC7700_RESET(EIC7700_RESET_MBOX12, 12, 12), 187 + EIC7700_RESET(EIC7700_RESET_MBOX13, 12, 13), 188 + EIC7700_RESET(EIC7700_RESET_MBOX14, 12, 14), 189 + EIC7700_RESET(EIC7700_RESET_MBOX15, 12, 15), 190 + EIC7700_RESET(EIC7700_RESET_UART0, 13, 0), 191 + EIC7700_RESET(EIC7700_RESET_UART1, 13, 1), 192 + EIC7700_RESET(EIC7700_RESET_UART2, 13, 2), 193 + EIC7700_RESET(EIC7700_RESET_UART3, 13, 3), 194 + EIC7700_RESET(EIC7700_RESET_UART4, 13, 4), 195 + EIC7700_RESET(EIC7700_RESET_GPIO0, 14, 0), 196 + EIC7700_RESET(EIC7700_RESET_GPIO1, 14, 1), 197 + EIC7700_RESET(EIC7700_RESET_TIMER, 15, 0), 198 + EIC7700_RESET(EIC7700_RESET_SSI0, 16, 0), 199 + EIC7700_RESET(EIC7700_RESET_SSI1, 16, 1), 200 + EIC7700_RESET(EIC7700_RESET_WDT0, 17, 0), 201 + EIC7700_RESET(EIC7700_RESET_WDT1, 17, 1), 202 + EIC7700_RESET(EIC7700_RESET_WDT2, 17, 2), 203 + EIC7700_RESET(EIC7700_RESET_WDT3, 17, 3), 204 + EIC7700_RESET(EIC7700_RESET_LSP_CFG, 18, 0), 205 + EIC7700_RESET(EIC7700_RESET_U84_CORE0, 19, 0), 206 + EIC7700_RESET(EIC7700_RESET_U84_CORE1, 19, 1), 207 + EIC7700_RESET(EIC7700_RESET_U84_CORE2, 19, 2), 208 + EIC7700_RESET(EIC7700_RESET_U84_CORE3, 19, 3), 209 + EIC7700_RESET(EIC7700_RESET_U84_BUS, 19, 4), 210 + EIC7700_RESET(EIC7700_RESET_U84_DBG, 19, 5), 211 + EIC7700_RESET(EIC7700_RESET_U84_TRACECOM, 19, 6), 212 + EIC7700_RESET(EIC7700_RESET_U84_TRACE0, 19, 8), 213 + EIC7700_RESET(EIC7700_RESET_U84_TRACE1, 19, 9), 214 + EIC7700_RESET(EIC7700_RESET_U84_TRACE2, 19, 10), 215 + EIC7700_RESET(EIC7700_RESET_U84_TRACE3, 19, 11), 216 + EIC7700_RESET(EIC7700_RESET_SCPU_CORE, 20, 0), 217 + EIC7700_RESET(EIC7700_RESET_SCPU_BUS, 20, 1), 218 + EIC7700_RESET(EIC7700_RESET_SCPU_DBG, 20, 2), 219 + EIC7700_RESET(EIC7700_RESET_LPCPU_CORE, 21, 0), 220 + EIC7700_RESET(EIC7700_RESET_LPCPU_BUS, 21, 1), 221 + EIC7700_RESET(EIC7700_RESET_LPCPU_DBG, 21, 2), 222 + EIC7700_RESET(EIC7700_RESET_VC_CFG, 22, 0), 223 + EIC7700_RESET(EIC7700_RESET_VC_AXI, 22, 1), 224 + EIC7700_RESET(EIC7700_RESET_VC_MONCFG, 22, 2), 225 + EIC7700_RESET(EIC7700_RESET_JD_CFG, 23, 0), 226 + EIC7700_RESET(EIC7700_RESET_JD_AXI, 23, 1), 227 + EIC7700_RESET(EIC7700_RESET_JE_CFG, 24, 0), 228 + EIC7700_RESET(EIC7700_RESET_JE_AXI, 24, 1), 229 + EIC7700_RESET(EIC7700_RESET_VD_CFG, 25, 0), 230 + EIC7700_RESET(EIC7700_RESET_VD_AXI, 25, 1), 231 + EIC7700_RESET(EIC7700_RESET_VE_AXI, 26, 0), 232 + EIC7700_RESET(EIC7700_RESET_VE_CFG, 26, 1), 233 + EIC7700_RESET(EIC7700_RESET_G2D_CORE, 27, 0), 234 + EIC7700_RESET(EIC7700_RESET_G2D_CFG, 27, 1), 235 + EIC7700_RESET(EIC7700_RESET_G2D_AXI, 27, 2), 236 + EIC7700_RESET(EIC7700_RESET_VI_AXI, 28, 0), 237 + EIC7700_RESET(EIC7700_RESET_VI_CFG, 28, 1), 238 + EIC7700_RESET(EIC7700_RESET_VI_DWE, 28, 2), 239 + EIC7700_RESET(EIC7700_RESET_DVP, 29, 0), 240 + EIC7700_RESET(EIC7700_RESET_ISP0, 30, 0), 241 + EIC7700_RESET(EIC7700_RESET_ISP1, 31, 0), 242 + EIC7700_RESET(EIC7700_RESET_SHUTTR0, 32, 0), 243 + EIC7700_RESET(EIC7700_RESET_SHUTTR1, 32, 1), 244 + EIC7700_RESET(EIC7700_RESET_SHUTTR2, 32, 2), 245 + EIC7700_RESET(EIC7700_RESET_SHUTTR3, 32, 3), 246 + EIC7700_RESET(EIC7700_RESET_SHUTTR4, 32, 4), 247 + EIC7700_RESET(EIC7700_RESET_SHUTTR5, 32, 5), 248 + EIC7700_RESET(EIC7700_RESET_VO_MIPI, 33, 0), 249 + EIC7700_RESET(EIC7700_RESET_VO_PRST, 33, 1), 250 + EIC7700_RESET(EIC7700_RESET_VO_HDMI_PRST, 33, 3), 251 + EIC7700_RESET(EIC7700_RESET_VO_HDMI_PHY, 33, 4), 252 + EIC7700_RESET(EIC7700_RESET_VO_HDMI, 33, 5), 253 + EIC7700_RESET(EIC7700_RESET_VO_I2S, 34, 0), 254 + EIC7700_RESET(EIC7700_RESET_VO_I2S_PRST, 34, 1), 255 + EIC7700_RESET(EIC7700_RESET_VO_AXI, 35, 0), 256 + EIC7700_RESET(EIC7700_RESET_VO_CFG, 35, 1), 257 + EIC7700_RESET(EIC7700_RESET_VO_DC, 35, 2), 258 + EIC7700_RESET(EIC7700_RESET_VO_DC_PRST, 35, 3), 259 + EIC7700_RESET(EIC7700_RESET_BOOTSPI_HRST, 36, 0), 260 + EIC7700_RESET(EIC7700_RESET_BOOTSPI, 36, 1), 261 + EIC7700_RESET(EIC7700_RESET_ANO1, 37, 0), 262 + EIC7700_RESET(EIC7700_RESET_ANO0, 38, 0), 263 + EIC7700_RESET(EIC7700_RESET_DMA1_ARST, 39, 0), 264 + EIC7700_RESET(EIC7700_RESET_DMA1_HRST, 39, 1), 265 + EIC7700_RESET(EIC7700_RESET_FPRT, 40, 0), 266 + EIC7700_RESET(EIC7700_RESET_HBLOCK, 41, 0), 267 + EIC7700_RESET(EIC7700_RESET_SECSR, 42, 0), 268 + EIC7700_RESET(EIC7700_RESET_OTP, 43, 0), 269 + EIC7700_RESET(EIC7700_RESET_PKA, 44, 0), 270 + EIC7700_RESET(EIC7700_RESET_SPACC, 45, 0), 271 + EIC7700_RESET(EIC7700_RESET_TRNG, 46, 0), 272 + EIC7700_RESET(EIC7700_RESET_TIMER0_0, 48, 0), 273 + EIC7700_RESET(EIC7700_RESET_TIMER0_1, 48, 1), 274 + EIC7700_RESET(EIC7700_RESET_TIMER0_2, 48, 2), 275 + EIC7700_RESET(EIC7700_RESET_TIMER0_3, 48, 3), 276 + EIC7700_RESET(EIC7700_RESET_TIMER0_4, 48, 4), 277 + EIC7700_RESET(EIC7700_RESET_TIMER0_5, 48, 5), 278 + EIC7700_RESET(EIC7700_RESET_TIMER0_6, 48, 6), 279 + EIC7700_RESET(EIC7700_RESET_TIMER0_7, 48, 7), 280 + EIC7700_RESET(EIC7700_RESET_TIMER0_N, 48, 8), 281 + EIC7700_RESET(EIC7700_RESET_TIMER1_0, 49, 0), 282 + EIC7700_RESET(EIC7700_RESET_TIMER1_1, 49, 1), 283 + EIC7700_RESET(EIC7700_RESET_TIMER1_2, 49, 2), 284 + EIC7700_RESET(EIC7700_RESET_TIMER1_3, 49, 3), 285 + EIC7700_RESET(EIC7700_RESET_TIMER1_4, 49, 4), 286 + EIC7700_RESET(EIC7700_RESET_TIMER1_5, 49, 5), 287 + EIC7700_RESET(EIC7700_RESET_TIMER1_6, 49, 6), 288 + EIC7700_RESET(EIC7700_RESET_TIMER1_7, 49, 7), 289 + EIC7700_RESET(EIC7700_RESET_TIMER1_N, 49, 8), 290 + EIC7700_RESET(EIC7700_RESET_TIMER2_0, 50, 0), 291 + EIC7700_RESET(EIC7700_RESET_TIMER2_1, 50, 1), 292 + EIC7700_RESET(EIC7700_RESET_TIMER2_2, 50, 2), 293 + EIC7700_RESET(EIC7700_RESET_TIMER2_3, 50, 3), 294 + EIC7700_RESET(EIC7700_RESET_TIMER2_4, 50, 4), 295 + EIC7700_RESET(EIC7700_RESET_TIMER2_5, 50, 5), 296 + EIC7700_RESET(EIC7700_RESET_TIMER2_6, 50, 6), 297 + EIC7700_RESET(EIC7700_RESET_TIMER2_7, 50, 7), 298 + EIC7700_RESET(EIC7700_RESET_TIMER2_N, 50, 8), 299 + EIC7700_RESET(EIC7700_RESET_TIMER3_0, 51, 0), 300 + EIC7700_RESET(EIC7700_RESET_TIMER3_1, 51, 1), 301 + EIC7700_RESET(EIC7700_RESET_TIMER3_2, 51, 2), 302 + EIC7700_RESET(EIC7700_RESET_TIMER3_3, 51, 3), 303 + EIC7700_RESET(EIC7700_RESET_TIMER3_4, 51, 4), 304 + EIC7700_RESET(EIC7700_RESET_TIMER3_5, 51, 5), 305 + EIC7700_RESET(EIC7700_RESET_TIMER3_6, 51, 6), 306 + EIC7700_RESET(EIC7700_RESET_TIMER3_7, 51, 7), 307 + EIC7700_RESET(EIC7700_RESET_TIMER3_N, 51, 8), 308 + EIC7700_RESET(EIC7700_RESET_RTC, 52, 0), 309 + EIC7700_RESET(EIC7700_RESET_MNOC_SNOC_NSP, 53, 0), 310 + EIC7700_RESET(EIC7700_RESET_MNOC_VC, 53, 1), 311 + EIC7700_RESET(EIC7700_RESET_MNOC_CFG, 53, 2), 312 + EIC7700_RESET(EIC7700_RESET_MNOC_HSP, 53, 3), 313 + EIC7700_RESET(EIC7700_RESET_MNOC_GPU, 53, 4), 314 + EIC7700_RESET(EIC7700_RESET_MNOC_DDRC1_P3, 53, 5), 315 + EIC7700_RESET(EIC7700_RESET_MNOC_DDRC0_P3, 53, 6), 316 + EIC7700_RESET(EIC7700_RESET_RNOC_VO, 54, 0), 317 + EIC7700_RESET(EIC7700_RESET_RNOC_VI, 54, 1), 318 + EIC7700_RESET(EIC7700_RESET_RNOC_SNOC_NSP, 54, 2), 319 + EIC7700_RESET(EIC7700_RESET_RNOC_CFG, 54, 3), 320 + EIC7700_RESET(EIC7700_RESET_MNOC_DDRC1_P4, 54, 4), 321 + EIC7700_RESET(EIC7700_RESET_MNOC_DDRC0_P4, 54, 5), 322 + EIC7700_RESET(EIC7700_RESET_CNOC_VO_CFG, 55, 0), 323 + EIC7700_RESET(EIC7700_RESET_CNOC_VI_CFG, 55, 1), 324 + EIC7700_RESET(EIC7700_RESET_CNOC_VC_CFG, 55, 2), 325 + EIC7700_RESET(EIC7700_RESET_CNOC_TCU_CFG, 55, 3), 326 + EIC7700_RESET(EIC7700_RESET_CNOC_PCIE_CFG, 55, 4), 327 + EIC7700_RESET(EIC7700_RESET_CNOC_NPU_CFG, 55, 5), 328 + EIC7700_RESET(EIC7700_RESET_CNOC_LSP_CFG, 55, 6), 329 + EIC7700_RESET(EIC7700_RESET_CNOC_HSP_CFG, 55, 7), 330 + EIC7700_RESET(EIC7700_RESET_CNOC_GPU_CFG, 55, 8), 331 + EIC7700_RESET(EIC7700_RESET_CNOC_DSPT_CFG, 55, 9), 332 + EIC7700_RESET(EIC7700_RESET_CNOC_DDRT1_CFG, 55, 10), 333 + EIC7700_RESET(EIC7700_RESET_CNOC_DDRT0_CFG, 55, 11), 334 + EIC7700_RESET(EIC7700_RESET_CNOC_D2D_CFG, 55, 12), 335 + EIC7700_RESET(EIC7700_RESET_CNOC_CFG, 55, 13), 336 + EIC7700_RESET(EIC7700_RESET_CNOC_CLMM_CFG, 55, 14), 337 + EIC7700_RESET(EIC7700_RESET_CNOC_AON_CFG, 55, 15), 338 + EIC7700_RESET(EIC7700_RESET_LNOC_CFG, 56, 0), 339 + EIC7700_RESET(EIC7700_RESET_LNOC_NPU_LLC, 56, 1), 340 + EIC7700_RESET(EIC7700_RESET_LNOC_DDRC1_P0, 56, 2), 341 + EIC7700_RESET(EIC7700_RESET_LNOC_DDRC0_P0, 56, 3), 342 + }; 343 + 344 + static int eic7700_reset_assert(struct reset_controller_dev *rcdev, 345 + unsigned long id) 346 + { 347 + struct eic7700_reset_data *data = to_eic7700_reset_data(rcdev); 348 + 349 + return regmap_clear_bits(data->regmap, eic7700_reset[id].reg, 350 + eic7700_reset[id].bit); 351 + } 352 + 353 + static int eic7700_reset_deassert(struct reset_controller_dev *rcdev, 354 + unsigned long id) 355 + { 356 + struct eic7700_reset_data *data = to_eic7700_reset_data(rcdev); 357 + 358 + return regmap_set_bits(data->regmap, eic7700_reset[id].reg, 359 + eic7700_reset[id].bit); 360 + } 361 + 362 + static int eic7700_reset_reset(struct reset_controller_dev *rcdev, 363 + unsigned long id) 364 + { 365 + int ret; 366 + 367 + ret = eic7700_reset_assert(rcdev, id); 368 + if (ret) 369 + return ret; 370 + 371 + usleep_range(10, 15); 372 + 373 + return eic7700_reset_deassert(rcdev, id); 374 + } 375 + 376 + static const struct reset_control_ops eic7700_reset_ops = { 377 + .reset = eic7700_reset_reset, 378 + .assert = eic7700_reset_assert, 379 + .deassert = eic7700_reset_deassert, 380 + }; 381 + 382 + static const struct of_device_id eic7700_reset_dt_ids[] = { 383 + { .compatible = "eswin,eic7700-reset", }, 384 + { /* sentinel */ } 385 + }; 386 + 387 + static int eic7700_reset_probe(struct platform_device *pdev) 388 + { 389 + struct eic7700_reset_data *data; 390 + struct device *dev = &pdev->dev; 391 + void __iomem *base; 392 + 393 + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); 394 + if (!data) 395 + return -ENOMEM; 396 + 397 + base = devm_platform_ioremap_resource(pdev, 0); 398 + if (IS_ERR(base)) 399 + return PTR_ERR(base); 400 + 401 + data->regmap = devm_regmap_init_mmio(dev, base, &eic7700_regmap_config); 402 + if (IS_ERR(data->regmap)) 403 + return dev_err_probe(dev, PTR_ERR(data->regmap), 404 + "failed to get regmap!\n"); 405 + 406 + data->rcdev.owner = THIS_MODULE; 407 + data->rcdev.ops = &eic7700_reset_ops; 408 + data->rcdev.of_node = dev->of_node; 409 + data->rcdev.of_reset_n_cells = 1; 410 + data->rcdev.dev = dev; 411 + data->rcdev.nr_resets = ARRAY_SIZE(eic7700_reset); 412 + 413 + /* clear boot flag so u84 and scpu could be reseted by software */ 414 + regmap_set_bits(data->regmap, SYSCRG_CLEAR_BOOT_INFO_OFFSET, 415 + CLEAR_BOOT_FLAG_BIT); 416 + msleep(50); 417 + 418 + return devm_reset_controller_register(dev, &data->rcdev); 419 + } 420 + 421 + static struct platform_driver eic7700_reset_driver = { 422 + .probe = eic7700_reset_probe, 423 + .driver = { 424 + .name = "eic7700-reset", 425 + .of_match_table = eic7700_reset_dt_ids, 426 + }, 427 + }; 428 + 429 + builtin_platform_driver(eic7700_reset_driver);
+60
drivers/reset/reset-rzg2l-usbphy-ctrl.c
··· 13 13 #include <linux/regmap.h> 14 14 #include <linux/reset.h> 15 15 #include <linux/reset-controller.h> 16 + #include <linux/mfd/syscon.h> 16 17 17 18 #define RESET 0x000 18 19 #define VBENCTL 0x03c ··· 92 91 return !!(readl(priv->base + RESET) & port_mask); 93 92 } 94 93 94 + #define RZG2L_USBPHY_CTRL_PWRRDY 1 95 + 95 96 static const struct of_device_id rzg2l_usbphy_ctrl_match_table[] = { 96 97 { .compatible = "renesas,rzg2l-usbphy-ctrl" }, 98 + { 99 + .compatible = "renesas,r9a08g045-usbphy-ctrl", 100 + .data = (void *)RZG2L_USBPHY_CTRL_PWRRDY 101 + }, 97 102 { /* Sentinel */ } 98 103 }; 99 104 MODULE_DEVICE_TABLE(of, rzg2l_usbphy_ctrl_match_table); ··· 116 109 .reg_stride = 4, 117 110 .max_register = 1, 118 111 }; 112 + 113 + static void rzg2l_usbphy_ctrl_set_pwrrdy(struct regmap_field *pwrrdy, 114 + bool power_on) 115 + { 116 + u32 val = power_on ? 0 : 1; 117 + 118 + /* The initialization path guarantees that the mask is 1 bit long. */ 119 + regmap_field_update_bits(pwrrdy, 1, val); 120 + } 121 + 122 + static void rzg2l_usbphy_ctrl_pwrrdy_off(void *data) 123 + { 124 + rzg2l_usbphy_ctrl_set_pwrrdy(data, false); 125 + } 126 + 127 + static int rzg2l_usbphy_ctrl_pwrrdy_init(struct device *dev) 128 + { 129 + struct regmap_field *pwrrdy; 130 + struct reg_field field; 131 + struct regmap *regmap; 132 + const int *data; 133 + u32 args[2]; 134 + 135 + data = device_get_match_data(dev); 136 + if ((uintptr_t)data != RZG2L_USBPHY_CTRL_PWRRDY) 137 + return 0; 138 + 139 + regmap = syscon_regmap_lookup_by_phandle_args(dev->of_node, 140 + "renesas,sysc-pwrrdy", 141 + ARRAY_SIZE(args), args); 142 + if (IS_ERR(regmap)) 143 + return PTR_ERR(regmap); 144 + 145 + /* Don't allow more than one bit in mask. */ 146 + if (hweight32(args[1]) != 1) 147 + return -EINVAL; 148 + 149 + field.reg = args[0]; 150 + field.lsb = __ffs(args[1]); 151 + field.msb = __fls(args[1]); 152 + 153 + pwrrdy = devm_regmap_field_alloc(dev, regmap, field); 154 + if (IS_ERR(pwrrdy)) 155 + return PTR_ERR(pwrrdy); 156 + 157 + rzg2l_usbphy_ctrl_set_pwrrdy(pwrrdy, true); 158 + 159 + return devm_add_action_or_reset(dev, rzg2l_usbphy_ctrl_pwrrdy_off, pwrrdy); 160 + } 119 161 120 162 static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev) 121 163 { ··· 187 131 regmap = devm_regmap_init_mmio(dev, priv->base + VBENCTL, &rzg2l_usb_regconf); 188 132 if (IS_ERR(regmap)) 189 133 return PTR_ERR(regmap); 134 + 135 + error = rzg2l_usbphy_ctrl_pwrrdy_init(dev); 136 + if (error) 137 + return error; 190 138 191 139 priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); 192 140 if (IS_ERR(priv->rstc))
+824 -13
drivers/reset/reset-th1520.c
··· 11 11 12 12 #include <dt-bindings/reset/thead,th1520-reset.h> 13 13 14 + /* register offset in RSTGEN_R */ 15 + #define TH1520_BROM_RST_CFG 0x0 16 + #define TH1520_C910_RST_CFG 0x4 17 + #define TH1520_CHIP_DBG_RST_CFG 0xc 18 + #define TH1520_AXI4_CPUSYS2_RST_CFG 0x10 19 + #define TH1520_X2H_CPUSYS_RST_CFG 0x18 20 + #define TH1520_AHB2_CPUSYS_RST_CFG 0x1c 21 + #define TH1520_APB3_CPUSYS_RST_CFG 0x20 22 + #define TH1520_MBOX0_RST_CFG 0x24 23 + #define TH1520_MBOX1_RST_CFG 0x28 24 + #define TH1520_MBOX2_RST_CFG 0x2c 25 + #define TH1520_MBOX3_RST_CFG 0x30 26 + #define TH1520_WDT0_RST_CFG 0x34 27 + #define TH1520_WDT1_RST_CFG 0x38 28 + #define TH1520_TIMER0_RST_CFG 0x3c 29 + #define TH1520_TIMER1_RST_CFG 0x40 30 + #define TH1520_PERISYS_AHB_RST_CFG 0x44 31 + #define TH1520_PERISYS_APB1_RST_CFG 0x48 32 + #define TH1520_PERISYS_APB2_RST_CFG 0x4c 33 + #define TH1520_GMAC0_RST_CFG 0x68 34 + #define TH1520_UART0_RST_CFG 0x70 35 + #define TH1520_UART1_RST_CFG 0x74 36 + #define TH1520_UART2_RST_CFG 0x78 37 + #define TH1520_UART3_RST_CFG 0x7c 38 + #define TH1520_UART4_RST_CFG 0x80 39 + #define TH1520_UART5_RST_CFG 0x84 40 + #define TH1520_QSPI0_RST_CFG 0x8c 41 + #define TH1520_QSPI1_RST_CFG 0x90 42 + #define TH1520_SPI_RST_CFG 0x94 43 + #define TH1520_I2C0_RST_CFG 0x98 44 + #define TH1520_I2C1_RST_CFG 0x9c 45 + #define TH1520_I2C2_RST_CFG 0xa0 46 + #define TH1520_I2C3_RST_CFG 0xa4 47 + #define TH1520_I2C4_RST_CFG 0xa8 48 + #define TH1520_I2C5_RST_CFG 0xac 49 + #define TH1520_GPIO0_RST_CFG 0xb0 50 + #define TH1520_GPIO1_RST_CFG 0xb4 51 + #define TH1520_GPIO2_RST_CFG 0xb8 52 + #define TH1520_PWM_RST_CFG 0xc0 53 + #define TH1520_PADCTRL0_APSYS_RST_CFG 0xc4 54 + #define TH1520_CPU2PERI_X2H_RST_CFG 0xcc 55 + #define TH1520_CPU2AON_X2H_RST_CFG 0xe4 56 + #define TH1520_AON2CPU_A2X_RST_CFG 0xfc 57 + #define TH1520_NPUSYS_AXI_RST_CFG 0x128 58 + #define TH1520_CPU2VP_X2P_RST_CFG 0x12c 59 + #define TH1520_CPU2VI_X2H_RST_CFG 0x138 60 + #define TH1520_BMU_C910_RST_CFG 0x148 61 + #define TH1520_DMAC_CPUSYS_RST_CFG 0x14c 62 + #define TH1520_SPINLOCK_RST_CFG 0x178 63 + #define TH1520_CFG2TEE_X2H_RST_CFG 0x188 64 + #define TH1520_DSMART_RST_CFG 0x18c 65 + #define TH1520_GPIO3_RST_CFG 0x1a8 66 + #define TH1520_I2S_RST_CFG 0x1ac 67 + #define TH1520_IMG_NNA_RST_CFG 0x1b0 68 + #define TH1520_PERI_APB3_RST_CFG 0x1dc 69 + #define TH1520_VP_SUBSYS_RST_CFG 0x1ec 70 + #define TH1520_PERISYS_APB4_RST_CFG 0x1f8 71 + #define TH1520_GMAC1_RST_CFG 0x204 72 + #define TH1520_GMAC_AXI_RST_CFG 0x208 73 + #define TH1520_PADCTRL1_APSYS_RST_CFG 0x20c 74 + #define TH1520_VOSYS_AXI_RST_CFG 0x210 75 + #define TH1520_VOSYS_X2X_RST_CFG 0x214 76 + #define TH1520_MISC2VP_X2X_RST_CFG 0x218 77 + #define TH1520_SUBSYS_RST_CFG 0x220 78 + 79 + /* register offset in DSP_REGMAP */ 80 + #define TH1520_DSPSYS_RST_CFG 0x0 81 + 82 + /* register offset in MISCSYS_REGMAP */ 83 + #define TH1520_EMMC_RST_CFG 0x0 84 + #define TH1520_MISCSYS_AXI_RST_CFG 0x8 85 + #define TH1520_SDIO0_RST_CFG 0xc 86 + #define TH1520_SDIO1_RST_CFG 0x10 87 + #define TH1520_USB3_DRD_RST_CFG 0x14 88 + 89 + /* register offset in VISYS_REGMAP */ 90 + #define TH1520_VISYS_RST_CFG 0x0 91 + #define TH1520_VISYS_2_RST_CFG 0x4 92 + 14 93 /* register offset in VOSYS_REGMAP */ 15 94 #define TH1520_GPU_RST_CFG 0x0 16 95 #define TH1520_GPU_RST_CFG_MASK GENMASK(1, 0) ··· 97 18 #define TH1520_DSI0_RST_CFG 0x8 98 19 #define TH1520_DSI1_RST_CFG 0xc 99 20 #define TH1520_HDMI_RST_CFG 0x14 21 + #define TH1520_AXI4_VO_DW_AXI_RST_CFG 0x18 22 + #define TH1520_X2H_X4_VOSYS_DW_RST_CFG 0x20 100 23 101 24 /* register values */ 102 25 #define TH1520_GPU_SW_GPU_RST BIT(0) ··· 110 29 #define TH1520_HDMI_SW_MAIN_RST BIT(0) 111 30 #define TH1520_HDMI_SW_PRST BIT(1) 112 31 113 - struct th1520_reset_priv { 114 - struct reset_controller_dev rcdev; 115 - struct regmap *map; 116 - }; 32 + /* register offset in VPSYS_REGMAP */ 33 + #define TH1520_AXIBUS_RST_CFG 0x0 34 + #define TH1520_FCE_RST_CFG 0x4 35 + #define TH1520_G2D_RST_CFG 0x8 36 + #define TH1520_VDEC_RST_CFG 0xc 37 + #define TH1520_VENC_RST_CFG 0x10 117 38 118 39 struct th1520_reset_map { 119 40 u32 bit; 120 41 u32 reg; 42 + }; 43 + 44 + struct th1520_reset_priv { 45 + struct reset_controller_dev rcdev; 46 + struct regmap *map; 47 + const struct th1520_reset_map *resets; 48 + }; 49 + 50 + struct th1520_reset_data { 51 + const struct th1520_reset_map *resets; 52 + size_t num; 121 53 }; 122 54 123 55 static const struct th1520_reset_map th1520_resets[] = { ··· 170 76 .bit = TH1520_HDMI_SW_PRST, 171 77 .reg = TH1520_HDMI_RST_CFG, 172 78 }, 79 + [TH1520_RESET_ID_VOAXI] = { 80 + .bit = BIT(0), 81 + .reg = TH1520_AXI4_VO_DW_AXI_RST_CFG, 82 + }, 83 + [TH1520_RESET_ID_VOAXI_APB] = { 84 + .bit = BIT(1), 85 + .reg = TH1520_AXI4_VO_DW_AXI_RST_CFG, 86 + }, 87 + [TH1520_RESET_ID_X2H_DPU_AXI] = { 88 + .bit = BIT(0), 89 + .reg = TH1520_X2H_X4_VOSYS_DW_RST_CFG, 90 + }, 91 + [TH1520_RESET_ID_X2H_DPU_AHB] = { 92 + .bit = BIT(1), 93 + .reg = TH1520_X2H_X4_VOSYS_DW_RST_CFG, 94 + }, 95 + [TH1520_RESET_ID_X2H_DPU1_AXI] = { 96 + .bit = BIT(2), 97 + .reg = TH1520_X2H_X4_VOSYS_DW_RST_CFG, 98 + }, 99 + [TH1520_RESET_ID_X2H_DPU1_AHB] = { 100 + .bit = BIT(3), 101 + .reg = TH1520_X2H_X4_VOSYS_DW_RST_CFG, 102 + }, 103 + }; 104 + 105 + static const struct th1520_reset_map th1520_ap_resets[] = { 106 + [TH1520_RESET_ID_BROM] = { 107 + .bit = BIT(0), 108 + .reg = TH1520_BROM_RST_CFG, 109 + }, 110 + [TH1520_RESET_ID_C910_TOP] = { 111 + .bit = BIT(0), 112 + .reg = TH1520_C910_RST_CFG, 113 + }, 114 + [TH1520_RESET_ID_NPU] = { 115 + .bit = BIT(0), 116 + .reg = TH1520_IMG_NNA_RST_CFG, 117 + }, 118 + [TH1520_RESET_ID_WDT0] = { 119 + .bit = BIT(0), 120 + .reg = TH1520_WDT0_RST_CFG, 121 + }, 122 + [TH1520_RESET_ID_WDT1] = { 123 + .bit = BIT(0), 124 + .reg = TH1520_WDT1_RST_CFG, 125 + }, 126 + [TH1520_RESET_ID_C910_C0] = { 127 + .bit = BIT(1), 128 + .reg = TH1520_C910_RST_CFG, 129 + }, 130 + [TH1520_RESET_ID_C910_C1] = { 131 + .bit = BIT(2), 132 + .reg = TH1520_C910_RST_CFG, 133 + }, 134 + [TH1520_RESET_ID_C910_C2] = { 135 + .bit = BIT(3), 136 + .reg = TH1520_C910_RST_CFG, 137 + }, 138 + [TH1520_RESET_ID_C910_C3] = { 139 + .bit = BIT(4), 140 + .reg = TH1520_C910_RST_CFG, 141 + }, 142 + [TH1520_RESET_ID_CHIP_DBG_CORE] = { 143 + .bit = BIT(0), 144 + .reg = TH1520_CHIP_DBG_RST_CFG, 145 + }, 146 + [TH1520_RESET_ID_CHIP_DBG_AXI] = { 147 + .bit = BIT(1), 148 + .reg = TH1520_CHIP_DBG_RST_CFG, 149 + }, 150 + [TH1520_RESET_ID_AXI4_CPUSYS2_AXI] = { 151 + .bit = BIT(0), 152 + .reg = TH1520_AXI4_CPUSYS2_RST_CFG, 153 + }, 154 + [TH1520_RESET_ID_AXI4_CPUSYS2_APB] = { 155 + .bit = BIT(1), 156 + .reg = TH1520_AXI4_CPUSYS2_RST_CFG, 157 + }, 158 + [TH1520_RESET_ID_X2H_CPUSYS] = { 159 + .bit = BIT(0), 160 + .reg = TH1520_X2H_CPUSYS_RST_CFG, 161 + }, 162 + [TH1520_RESET_ID_AHB2_CPUSYS] = { 163 + .bit = BIT(0), 164 + .reg = TH1520_AHB2_CPUSYS_RST_CFG, 165 + }, 166 + [TH1520_RESET_ID_APB3_CPUSYS] = { 167 + .bit = BIT(0), 168 + .reg = TH1520_APB3_CPUSYS_RST_CFG, 169 + }, 170 + [TH1520_RESET_ID_MBOX0_APB] = { 171 + .bit = BIT(0), 172 + .reg = TH1520_MBOX0_RST_CFG, 173 + }, 174 + [TH1520_RESET_ID_MBOX1_APB] = { 175 + .bit = BIT(0), 176 + .reg = TH1520_MBOX1_RST_CFG, 177 + }, 178 + [TH1520_RESET_ID_MBOX2_APB] = { 179 + .bit = BIT(0), 180 + .reg = TH1520_MBOX2_RST_CFG, 181 + }, 182 + [TH1520_RESET_ID_MBOX3_APB] = { 183 + .bit = BIT(0), 184 + .reg = TH1520_MBOX3_RST_CFG, 185 + }, 186 + [TH1520_RESET_ID_TIMER0_APB] = { 187 + .bit = BIT(0), 188 + .reg = TH1520_TIMER0_RST_CFG, 189 + }, 190 + [TH1520_RESET_ID_TIMER0_CORE] = { 191 + .bit = BIT(1), 192 + .reg = TH1520_TIMER0_RST_CFG, 193 + }, 194 + [TH1520_RESET_ID_TIMER1_APB] = { 195 + .bit = BIT(0), 196 + .reg = TH1520_TIMER1_RST_CFG, 197 + }, 198 + [TH1520_RESET_ID_TIMER1_CORE] = { 199 + .bit = BIT(1), 200 + .reg = TH1520_TIMER1_RST_CFG, 201 + }, 202 + [TH1520_RESET_ID_PERISYS_AHB] = { 203 + .bit = BIT(0), 204 + .reg = TH1520_PERISYS_AHB_RST_CFG, 205 + }, 206 + [TH1520_RESET_ID_PERISYS_APB1] = { 207 + .bit = BIT(0), 208 + .reg = TH1520_PERISYS_APB1_RST_CFG, 209 + }, 210 + [TH1520_RESET_ID_PERISYS_APB2] = { 211 + .bit = BIT(0), 212 + .reg = TH1520_PERISYS_APB2_RST_CFG, 213 + }, 214 + [TH1520_RESET_ID_GMAC0_APB] = { 215 + .bit = BIT(0), 216 + .reg = TH1520_GMAC0_RST_CFG, 217 + }, 218 + [TH1520_RESET_ID_GMAC0_AHB] = { 219 + .bit = BIT(1), 220 + .reg = TH1520_GMAC0_RST_CFG, 221 + }, 222 + [TH1520_RESET_ID_GMAC0_CLKGEN] = { 223 + .bit = BIT(2), 224 + .reg = TH1520_GMAC0_RST_CFG, 225 + }, 226 + [TH1520_RESET_ID_GMAC0_AXI] = { 227 + .bit = BIT(3), 228 + .reg = TH1520_GMAC0_RST_CFG, 229 + }, 230 + [TH1520_RESET_ID_UART0_APB] = { 231 + .bit = BIT(0), 232 + .reg = TH1520_UART0_RST_CFG, 233 + }, 234 + [TH1520_RESET_ID_UART0_IF] = { 235 + .bit = BIT(1), 236 + .reg = TH1520_UART0_RST_CFG, 237 + }, 238 + [TH1520_RESET_ID_UART1_APB] = { 239 + .bit = BIT(0), 240 + .reg = TH1520_UART1_RST_CFG, 241 + }, 242 + [TH1520_RESET_ID_UART1_IF] = { 243 + .bit = BIT(1), 244 + .reg = TH1520_UART1_RST_CFG, 245 + }, 246 + [TH1520_RESET_ID_UART2_APB] = { 247 + .bit = BIT(0), 248 + .reg = TH1520_UART2_RST_CFG, 249 + }, 250 + [TH1520_RESET_ID_UART2_IF] = { 251 + .bit = BIT(1), 252 + .reg = TH1520_UART2_RST_CFG, 253 + }, 254 + [TH1520_RESET_ID_UART3_APB] = { 255 + .bit = BIT(0), 256 + .reg = TH1520_UART3_RST_CFG, 257 + }, 258 + [TH1520_RESET_ID_UART3_IF] = { 259 + .bit = BIT(1), 260 + .reg = TH1520_UART3_RST_CFG, 261 + }, 262 + [TH1520_RESET_ID_UART4_APB] = { 263 + .bit = BIT(0), 264 + .reg = TH1520_UART4_RST_CFG, 265 + }, 266 + [TH1520_RESET_ID_UART4_IF] = { 267 + .bit = BIT(1), 268 + .reg = TH1520_UART4_RST_CFG, 269 + }, 270 + [TH1520_RESET_ID_UART5_APB] = { 271 + .bit = BIT(0), 272 + .reg = TH1520_UART5_RST_CFG, 273 + }, 274 + [TH1520_RESET_ID_UART5_IF] = { 275 + .bit = BIT(1), 276 + .reg = TH1520_UART5_RST_CFG, 277 + }, 278 + [TH1520_RESET_ID_QSPI0_IF] = { 279 + .bit = BIT(0), 280 + .reg = TH1520_QSPI0_RST_CFG, 281 + }, 282 + [TH1520_RESET_ID_QSPI0_APB] = { 283 + .bit = BIT(1), 284 + .reg = TH1520_QSPI0_RST_CFG, 285 + }, 286 + [TH1520_RESET_ID_QSPI1_IF] = { 287 + .bit = BIT(0), 288 + .reg = TH1520_QSPI1_RST_CFG, 289 + }, 290 + [TH1520_RESET_ID_QSPI1_APB] = { 291 + .bit = BIT(1), 292 + .reg = TH1520_QSPI1_RST_CFG, 293 + }, 294 + [TH1520_RESET_ID_SPI_IF] = { 295 + .bit = BIT(0), 296 + .reg = TH1520_SPI_RST_CFG, 297 + }, 298 + [TH1520_RESET_ID_SPI_APB] = { 299 + .bit = BIT(1), 300 + .reg = TH1520_SPI_RST_CFG, 301 + }, 302 + [TH1520_RESET_ID_I2C0_APB] = { 303 + .bit = BIT(0), 304 + .reg = TH1520_I2C0_RST_CFG, 305 + }, 306 + [TH1520_RESET_ID_I2C0_CORE] = { 307 + .bit = BIT(1), 308 + .reg = TH1520_I2C0_RST_CFG, 309 + }, 310 + [TH1520_RESET_ID_I2C1_APB] = { 311 + .bit = BIT(0), 312 + .reg = TH1520_I2C1_RST_CFG, 313 + }, 314 + [TH1520_RESET_ID_I2C1_CORE] = { 315 + .bit = BIT(1), 316 + .reg = TH1520_I2C1_RST_CFG, 317 + }, 318 + [TH1520_RESET_ID_I2C2_APB] = { 319 + .bit = BIT(0), 320 + .reg = TH1520_I2C2_RST_CFG, 321 + }, 322 + [TH1520_RESET_ID_I2C2_CORE] = { 323 + .bit = BIT(1), 324 + .reg = TH1520_I2C2_RST_CFG, 325 + }, 326 + [TH1520_RESET_ID_I2C3_APB] = { 327 + .bit = BIT(0), 328 + .reg = TH1520_I2C3_RST_CFG, 329 + }, 330 + [TH1520_RESET_ID_I2C3_CORE] = { 331 + .bit = BIT(1), 332 + .reg = TH1520_I2C3_RST_CFG, 333 + }, 334 + [TH1520_RESET_ID_I2C4_APB] = { 335 + .bit = BIT(0), 336 + .reg = TH1520_I2C4_RST_CFG, 337 + }, 338 + [TH1520_RESET_ID_I2C4_CORE] = { 339 + .bit = BIT(1), 340 + .reg = TH1520_I2C4_RST_CFG, 341 + }, 342 + [TH1520_RESET_ID_I2C5_APB] = { 343 + .bit = BIT(0), 344 + .reg = TH1520_I2C5_RST_CFG, 345 + }, 346 + [TH1520_RESET_ID_I2C5_CORE] = { 347 + .bit = BIT(1), 348 + .reg = TH1520_I2C5_RST_CFG, 349 + }, 350 + [TH1520_RESET_ID_GPIO0_DB] = { 351 + .bit = BIT(0), 352 + .reg = TH1520_GPIO0_RST_CFG, 353 + }, 354 + [TH1520_RESET_ID_GPIO0_APB] = { 355 + .bit = BIT(1), 356 + .reg = TH1520_GPIO0_RST_CFG, 357 + }, 358 + [TH1520_RESET_ID_GPIO1_DB] = { 359 + .bit = BIT(0), 360 + .reg = TH1520_GPIO1_RST_CFG, 361 + }, 362 + [TH1520_RESET_ID_GPIO1_APB] = { 363 + .bit = BIT(1), 364 + .reg = TH1520_GPIO1_RST_CFG, 365 + }, 366 + [TH1520_RESET_ID_GPIO2_DB] = { 367 + .bit = BIT(0), 368 + .reg = TH1520_GPIO2_RST_CFG, 369 + }, 370 + [TH1520_RESET_ID_GPIO2_APB] = { 371 + .bit = BIT(1), 372 + .reg = TH1520_GPIO2_RST_CFG, 373 + }, 374 + [TH1520_RESET_ID_PWM_COUNTER] = { 375 + .bit = BIT(0), 376 + .reg = TH1520_PWM_RST_CFG, 377 + }, 378 + [TH1520_RESET_ID_PWM_APB] = { 379 + .bit = BIT(1), 380 + .reg = TH1520_PWM_RST_CFG, 381 + }, 382 + [TH1520_RESET_ID_PADCTRL0_APB] = { 383 + .bit = BIT(0), 384 + .reg = TH1520_PADCTRL0_APSYS_RST_CFG, 385 + }, 386 + [TH1520_RESET_ID_CPU2PERI_X2H] = { 387 + .bit = BIT(1), 388 + .reg = TH1520_CPU2PERI_X2H_RST_CFG, 389 + }, 390 + [TH1520_RESET_ID_CPU2AON_X2H] = { 391 + .bit = BIT(0), 392 + .reg = TH1520_CPU2AON_X2H_RST_CFG, 393 + }, 394 + [TH1520_RESET_ID_AON2CPU_A2X] = { 395 + .bit = BIT(0), 396 + .reg = TH1520_AON2CPU_A2X_RST_CFG, 397 + }, 398 + [TH1520_RESET_ID_NPUSYS_AXI] = { 399 + .bit = BIT(0), 400 + .reg = TH1520_NPUSYS_AXI_RST_CFG, 401 + }, 402 + [TH1520_RESET_ID_NPUSYS_AXI_APB] = { 403 + .bit = BIT(1), 404 + .reg = TH1520_NPUSYS_AXI_RST_CFG, 405 + }, 406 + [TH1520_RESET_ID_CPU2VP_X2P] = { 407 + .bit = BIT(0), 408 + .reg = TH1520_CPU2VP_X2P_RST_CFG, 409 + }, 410 + [TH1520_RESET_ID_CPU2VI_X2H] = { 411 + .bit = BIT(0), 412 + .reg = TH1520_CPU2VI_X2H_RST_CFG, 413 + }, 414 + [TH1520_RESET_ID_BMU_AXI] = { 415 + .bit = BIT(0), 416 + .reg = TH1520_BMU_C910_RST_CFG, 417 + }, 418 + [TH1520_RESET_ID_BMU_APB] = { 419 + .bit = BIT(1), 420 + .reg = TH1520_BMU_C910_RST_CFG, 421 + }, 422 + [TH1520_RESET_ID_DMAC_CPUSYS_AXI] = { 423 + .bit = BIT(0), 424 + .reg = TH1520_DMAC_CPUSYS_RST_CFG, 425 + }, 426 + [TH1520_RESET_ID_DMAC_CPUSYS_AHB] = { 427 + .bit = BIT(1), 428 + .reg = TH1520_DMAC_CPUSYS_RST_CFG, 429 + }, 430 + [TH1520_RESET_ID_SPINLOCK] = { 431 + .bit = BIT(0), 432 + .reg = TH1520_SPINLOCK_RST_CFG, 433 + }, 434 + [TH1520_RESET_ID_CFG2TEE] = { 435 + .bit = BIT(0), 436 + .reg = TH1520_CFG2TEE_X2H_RST_CFG, 437 + }, 438 + [TH1520_RESET_ID_DSMART] = { 439 + .bit = BIT(0), 440 + .reg = TH1520_DSMART_RST_CFG, 441 + }, 442 + [TH1520_RESET_ID_GPIO3_DB] = { 443 + .bit = BIT(0), 444 + .reg = TH1520_GPIO3_RST_CFG, 445 + }, 446 + [TH1520_RESET_ID_GPIO3_APB] = { 447 + .bit = BIT(1), 448 + .reg = TH1520_GPIO3_RST_CFG, 449 + }, 450 + [TH1520_RESET_ID_PERI_I2S] = { 451 + .bit = BIT(0), 452 + .reg = TH1520_I2S_RST_CFG, 453 + }, 454 + [TH1520_RESET_ID_PERI_APB3] = { 455 + .bit = BIT(0), 456 + .reg = TH1520_PERI_APB3_RST_CFG, 457 + }, 458 + [TH1520_RESET_ID_PERI2PERI1_APB] = { 459 + .bit = BIT(1), 460 + .reg = TH1520_PERI_APB3_RST_CFG, 461 + }, 462 + [TH1520_RESET_ID_VPSYS_APB] = { 463 + .bit = BIT(0), 464 + .reg = TH1520_VP_SUBSYS_RST_CFG, 465 + }, 466 + [TH1520_RESET_ID_PERISYS_APB4] = { 467 + .bit = BIT(0), 468 + .reg = TH1520_PERISYS_APB4_RST_CFG, 469 + }, 470 + [TH1520_RESET_ID_GMAC1_APB] = { 471 + .bit = BIT(0), 472 + .reg = TH1520_GMAC1_RST_CFG, 473 + }, 474 + [TH1520_RESET_ID_GMAC1_AHB] = { 475 + .bit = BIT(1), 476 + .reg = TH1520_GMAC1_RST_CFG, 477 + }, 478 + [TH1520_RESET_ID_GMAC1_CLKGEN] = { 479 + .bit = BIT(2), 480 + .reg = TH1520_GMAC1_RST_CFG, 481 + }, 482 + [TH1520_RESET_ID_GMAC1_AXI] = { 483 + .bit = BIT(3), 484 + .reg = TH1520_GMAC1_RST_CFG, 485 + }, 486 + [TH1520_RESET_ID_GMAC_AXI] = { 487 + .bit = BIT(0), 488 + .reg = TH1520_GMAC_AXI_RST_CFG, 489 + }, 490 + [TH1520_RESET_ID_GMAC_AXI_APB] = { 491 + .bit = BIT(1), 492 + .reg = TH1520_GMAC_AXI_RST_CFG, 493 + }, 494 + [TH1520_RESET_ID_PADCTRL1_APB] = { 495 + .bit = BIT(0), 496 + .reg = TH1520_PADCTRL1_APSYS_RST_CFG, 497 + }, 498 + [TH1520_RESET_ID_VOSYS_AXI] = { 499 + .bit = BIT(0), 500 + .reg = TH1520_VOSYS_AXI_RST_CFG, 501 + }, 502 + [TH1520_RESET_ID_VOSYS_AXI_APB] = { 503 + .bit = BIT(1), 504 + .reg = TH1520_VOSYS_AXI_RST_CFG, 505 + }, 506 + [TH1520_RESET_ID_VOSYS_AXI_X2X] = { 507 + .bit = BIT(0), 508 + .reg = TH1520_VOSYS_X2X_RST_CFG, 509 + }, 510 + [TH1520_RESET_ID_MISC2VP_X2X] = { 511 + .bit = BIT(0), 512 + .reg = TH1520_MISC2VP_X2X_RST_CFG, 513 + }, 514 + [TH1520_RESET_ID_DSPSYS] = { 515 + .bit = BIT(0), 516 + .reg = TH1520_SUBSYS_RST_CFG, 517 + }, 518 + [TH1520_RESET_ID_VISYS] = { 519 + .bit = BIT(1), 520 + .reg = TH1520_SUBSYS_RST_CFG, 521 + }, 522 + [TH1520_RESET_ID_VOSYS] = { 523 + .bit = BIT(2), 524 + .reg = TH1520_SUBSYS_RST_CFG, 525 + }, 526 + [TH1520_RESET_ID_VPSYS] = { 527 + .bit = BIT(3), 528 + .reg = TH1520_SUBSYS_RST_CFG, 529 + }, 530 + }; 531 + 532 + static const struct th1520_reset_map th1520_dsp_resets[] = { 533 + [TH1520_RESET_ID_X2X_DSP1] = { 534 + .bit = BIT(0), 535 + .reg = TH1520_DSPSYS_RST_CFG, 536 + }, 537 + [TH1520_RESET_ID_X2X_DSP0] = { 538 + .bit = BIT(1), 539 + .reg = TH1520_DSPSYS_RST_CFG, 540 + }, 541 + [TH1520_RESET_ID_X2X_SLAVE_DSP1] = { 542 + .bit = BIT(2), 543 + .reg = TH1520_DSPSYS_RST_CFG, 544 + }, 545 + [TH1520_RESET_ID_X2X_SLAVE_DSP0] = { 546 + .bit = BIT(3), 547 + .reg = TH1520_DSPSYS_RST_CFG, 548 + }, 549 + [TH1520_RESET_ID_DSP0_CORE] = { 550 + .bit = BIT(8), 551 + .reg = TH1520_DSPSYS_RST_CFG, 552 + }, 553 + [TH1520_RESET_ID_DSP0_DEBUG] = { 554 + .bit = BIT(9), 555 + .reg = TH1520_DSPSYS_RST_CFG, 556 + }, 557 + [TH1520_RESET_ID_DSP0_APB] = { 558 + .bit = BIT(10), 559 + .reg = TH1520_DSPSYS_RST_CFG, 560 + }, 561 + [TH1520_RESET_ID_DSP1_CORE] = { 562 + .bit = BIT(12), 563 + .reg = TH1520_DSPSYS_RST_CFG, 564 + }, 565 + [TH1520_RESET_ID_DSP1_DEBUG] = { 566 + .bit = BIT(13), 567 + .reg = TH1520_DSPSYS_RST_CFG, 568 + }, 569 + [TH1520_RESET_ID_DSP1_APB] = { 570 + .bit = BIT(14), 571 + .reg = TH1520_DSPSYS_RST_CFG, 572 + }, 573 + [TH1520_RESET_ID_DSPSYS_APB] = { 574 + .bit = BIT(16), 575 + .reg = TH1520_DSPSYS_RST_CFG, 576 + }, 577 + [TH1520_RESET_ID_AXI4_DSPSYS_SLV] = { 578 + .bit = BIT(20), 579 + .reg = TH1520_DSPSYS_RST_CFG, 580 + }, 581 + [TH1520_RESET_ID_AXI4_DSPSYS] = { 582 + .bit = BIT(24), 583 + .reg = TH1520_DSPSYS_RST_CFG, 584 + }, 585 + [TH1520_RESET_ID_AXI4_DSP_RS] = { 586 + .bit = BIT(26), 587 + .reg = TH1520_DSPSYS_RST_CFG, 588 + }, 589 + }; 590 + 591 + static const struct th1520_reset_map th1520_misc_resets[] = { 592 + [TH1520_RESET_ID_EMMC_SDIO_CLKGEN] = { 593 + .bit = BIT(0), 594 + .reg = TH1520_EMMC_RST_CFG, 595 + }, 596 + [TH1520_RESET_ID_EMMC] = { 597 + .bit = BIT(1), 598 + .reg = TH1520_EMMC_RST_CFG, 599 + }, 600 + [TH1520_RESET_ID_MISCSYS_AXI] = { 601 + .bit = BIT(0), 602 + .reg = TH1520_MISCSYS_AXI_RST_CFG, 603 + }, 604 + [TH1520_RESET_ID_MISCSYS_AXI_APB] = { 605 + .bit = BIT(1), 606 + .reg = TH1520_MISCSYS_AXI_RST_CFG, 607 + }, 608 + [TH1520_RESET_ID_SDIO0] = { 609 + .bit = BIT(0), 610 + .reg = TH1520_SDIO0_RST_CFG, 611 + }, 612 + [TH1520_RESET_ID_SDIO1] = { 613 + .bit = BIT(1), 614 + .reg = TH1520_SDIO1_RST_CFG, 615 + }, 616 + [TH1520_RESET_ID_USB3_APB] = { 617 + .bit = BIT(0), 618 + .reg = TH1520_USB3_DRD_RST_CFG, 619 + }, 620 + [TH1520_RESET_ID_USB3_PHY] = { 621 + .bit = BIT(1), 622 + .reg = TH1520_USB3_DRD_RST_CFG, 623 + }, 624 + [TH1520_RESET_ID_USB3_VCC] = { 625 + .bit = BIT(2), 626 + .reg = TH1520_USB3_DRD_RST_CFG, 627 + }, 628 + }; 629 + 630 + static const struct th1520_reset_map th1520_vi_resets[] = { 631 + [TH1520_RESET_ID_ISP0] = { 632 + .bit = BIT(0), 633 + .reg = TH1520_VISYS_RST_CFG, 634 + }, 635 + [TH1520_RESET_ID_ISP1] = { 636 + .bit = BIT(4), 637 + .reg = TH1520_VISYS_RST_CFG, 638 + }, 639 + [TH1520_RESET_ID_CSI0_APB] = { 640 + .bit = BIT(16), 641 + .reg = TH1520_VISYS_RST_CFG, 642 + }, 643 + [TH1520_RESET_ID_CSI1_APB] = { 644 + .bit = BIT(17), 645 + .reg = TH1520_VISYS_RST_CFG, 646 + }, 647 + [TH1520_RESET_ID_CSI2_APB] = { 648 + .bit = BIT(18), 649 + .reg = TH1520_VISYS_RST_CFG, 650 + }, 651 + [TH1520_RESET_ID_MIPI_FIFO] = { 652 + .bit = BIT(20), 653 + .reg = TH1520_VISYS_RST_CFG, 654 + }, 655 + [TH1520_RESET_ID_ISP_VENC_APB] = { 656 + .bit = BIT(24), 657 + .reg = TH1520_VISYS_RST_CFG, 658 + }, 659 + [TH1520_RESET_ID_VIPRE_APB] = { 660 + .bit = BIT(28), 661 + .reg = TH1520_VISYS_RST_CFG, 662 + }, 663 + [TH1520_RESET_ID_VIPRE_AXI] = { 664 + .bit = BIT(29), 665 + .reg = TH1520_VISYS_RST_CFG, 666 + }, 667 + [TH1520_RESET_ID_DW200_APB] = { 668 + .bit = BIT(31), 669 + .reg = TH1520_VISYS_RST_CFG, 670 + }, 671 + [TH1520_RESET_ID_VISYS3_AXI] = { 672 + .bit = BIT(8), 673 + .reg = TH1520_VISYS_2_RST_CFG, 674 + }, 675 + [TH1520_RESET_ID_VISYS2_AXI] = { 676 + .bit = BIT(9), 677 + .reg = TH1520_VISYS_2_RST_CFG, 678 + }, 679 + [TH1520_RESET_ID_VISYS1_AXI] = { 680 + .bit = BIT(10), 681 + .reg = TH1520_VISYS_2_RST_CFG, 682 + }, 683 + [TH1520_RESET_ID_VISYS_AXI] = { 684 + .bit = BIT(12), 685 + .reg = TH1520_VISYS_2_RST_CFG, 686 + }, 687 + [TH1520_RESET_ID_VISYS_APB] = { 688 + .bit = BIT(16), 689 + .reg = TH1520_VISYS_2_RST_CFG, 690 + }, 691 + [TH1520_RESET_ID_ISP_VENC_AXI] = { 692 + .bit = BIT(20), 693 + .reg = TH1520_VISYS_2_RST_CFG, 694 + }, 695 + }; 696 + 697 + static const struct th1520_reset_map th1520_vp_resets[] = { 698 + [TH1520_RESET_ID_VPSYS_AXI_APB] = { 699 + .bit = BIT(0), 700 + .reg = TH1520_AXIBUS_RST_CFG, 701 + }, 702 + [TH1520_RESET_ID_VPSYS_AXI] = { 703 + .bit = BIT(1), 704 + .reg = TH1520_AXIBUS_RST_CFG, 705 + }, 706 + [TH1520_RESET_ID_FCE_APB] = { 707 + .bit = BIT(0), 708 + .reg = TH1520_FCE_RST_CFG, 709 + }, 710 + [TH1520_RESET_ID_FCE_CORE] = { 711 + .bit = BIT(1), 712 + .reg = TH1520_FCE_RST_CFG, 713 + }, 714 + [TH1520_RESET_ID_FCE_X2X_MASTER] = { 715 + .bit = BIT(4), 716 + .reg = TH1520_FCE_RST_CFG, 717 + }, 718 + [TH1520_RESET_ID_FCE_X2X_SLAVE] = { 719 + .bit = BIT(5), 720 + .reg = TH1520_FCE_RST_CFG, 721 + }, 722 + [TH1520_RESET_ID_G2D_APB] = { 723 + .bit = BIT(0), 724 + .reg = TH1520_G2D_RST_CFG, 725 + }, 726 + [TH1520_RESET_ID_G2D_ACLK] = { 727 + .bit = BIT(1), 728 + .reg = TH1520_G2D_RST_CFG, 729 + }, 730 + [TH1520_RESET_ID_G2D_CORE] = { 731 + .bit = BIT(2), 732 + .reg = TH1520_G2D_RST_CFG, 733 + }, 734 + [TH1520_RESET_ID_VDEC_APB] = { 735 + .bit = BIT(0), 736 + .reg = TH1520_VDEC_RST_CFG, 737 + }, 738 + [TH1520_RESET_ID_VDEC_ACLK] = { 739 + .bit = BIT(1), 740 + .reg = TH1520_VDEC_RST_CFG, 741 + }, 742 + [TH1520_RESET_ID_VDEC_CORE] = { 743 + .bit = BIT(2), 744 + .reg = TH1520_VDEC_RST_CFG, 745 + }, 746 + [TH1520_RESET_ID_VENC_APB] = { 747 + .bit = BIT(0), 748 + .reg = TH1520_VENC_RST_CFG, 749 + }, 750 + [TH1520_RESET_ID_VENC_CORE] = { 751 + .bit = BIT(1), 752 + .reg = TH1520_VENC_RST_CFG, 753 + }, 173 754 }; 174 755 175 756 static inline struct th1520_reset_priv * ··· 859 90 struct th1520_reset_priv *priv = to_th1520_reset(rcdev); 860 91 const struct th1520_reset_map *reset; 861 92 862 - reset = &th1520_resets[id]; 93 + reset = &priv->resets[id]; 863 94 864 95 return regmap_update_bits(priv->map, reset->reg, reset->bit, 0); 865 96 } ··· 870 101 struct th1520_reset_priv *priv = to_th1520_reset(rcdev); 871 102 const struct th1520_reset_map *reset; 872 103 873 - reset = &th1520_resets[id]; 104 + reset = &priv->resets[id]; 874 105 875 106 return regmap_update_bits(priv->map, reset->reg, reset->bit, 876 107 reset->bit); ··· 889 120 890 121 static int th1520_reset_probe(struct platform_device *pdev) 891 122 { 123 + const struct th1520_reset_data *data; 892 124 struct device *dev = &pdev->dev; 893 125 struct th1520_reset_priv *priv; 894 126 void __iomem *base; 895 127 int ret; 128 + 129 + data = device_get_match_data(dev); 896 130 897 131 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 898 132 if (!priv) ··· 910 138 if (IS_ERR(priv->map)) 911 139 return PTR_ERR(priv->map); 912 140 913 - /* Initialize GPU resets to asserted state */ 914 - ret = regmap_update_bits(priv->map, TH1520_GPU_RST_CFG, 915 - TH1520_GPU_RST_CFG_MASK, 0); 916 - if (ret) 917 - return ret; 141 + if (of_device_is_compatible(dev->of_node, "thead,th1520-reset")) { 142 + /* Initialize GPU resets to asserted state */ 143 + ret = regmap_update_bits(priv->map, TH1520_GPU_RST_CFG, 144 + TH1520_GPU_RST_CFG_MASK, 0); 145 + if (ret) 146 + return ret; 147 + } 918 148 919 149 priv->rcdev.owner = THIS_MODULE; 920 - priv->rcdev.nr_resets = ARRAY_SIZE(th1520_resets); 150 + priv->rcdev.nr_resets = data->num; 921 151 priv->rcdev.ops = &th1520_reset_ops; 922 152 priv->rcdev.of_node = dev->of_node; 153 + 154 + priv->resets = data->resets; 923 155 924 156 return devm_reset_controller_register(dev, &priv->rcdev); 925 157 } 926 158 159 + static const struct th1520_reset_data th1520_reset_data = { 160 + .resets = th1520_resets, 161 + .num = ARRAY_SIZE(th1520_resets), 162 + }; 163 + 164 + static const struct th1520_reset_data th1520_ap_reset_data = { 165 + .resets = th1520_ap_resets, 166 + .num = ARRAY_SIZE(th1520_ap_resets), 167 + }; 168 + 169 + static const struct th1520_reset_data th1520_dsp_reset_data = { 170 + .resets = th1520_dsp_resets, 171 + .num = ARRAY_SIZE(th1520_dsp_resets), 172 + }; 173 + 174 + static const struct th1520_reset_data th1520_misc_reset_data = { 175 + .resets = th1520_misc_resets, 176 + .num = ARRAY_SIZE(th1520_misc_resets), 177 + }; 178 + 179 + static const struct th1520_reset_data th1520_vi_reset_data = { 180 + .resets = th1520_vi_resets, 181 + .num = ARRAY_SIZE(th1520_vi_resets), 182 + }; 183 + 184 + static const struct th1520_reset_data th1520_vp_reset_data = { 185 + .resets = th1520_vp_resets, 186 + .num = ARRAY_SIZE(th1520_vp_resets), 187 + }; 188 + 927 189 static const struct of_device_id th1520_reset_match[] = { 928 - { .compatible = "thead,th1520-reset" }, 190 + { .compatible = "thead,th1520-reset", .data = &th1520_reset_data }, 191 + { .compatible = "thead,th1520-reset-ap", .data = &th1520_ap_reset_data }, 192 + { .compatible = "thead,th1520-reset-dsp", .data = &th1520_dsp_reset_data }, 193 + { .compatible = "thead,th1520-reset-misc", .data = &th1520_misc_reset_data }, 194 + { .compatible = "thead,th1520-reset-vi", .data = &th1520_vi_reset_data }, 195 + { .compatible = "thead,th1520-reset-vp", .data = &th1520_vp_reset_data }, 929 196 { /* sentinel */ } 930 197 }; 931 198 MODULE_DEVICE_TABLE(of, th1520_reset_match);
+7 -3
drivers/sh/clk/core.c
··· 569 569 EXPORT_SYMBOL_GPL(clk_round_rate); 570 570 571 571 #ifdef CONFIG_PM 572 - static void clks_core_resume(void) 572 + static void clks_core_resume(void *data) 573 573 { 574 574 struct clk *clkp; 575 575 ··· 588 588 } 589 589 } 590 590 591 - static struct syscore_ops clks_syscore_ops = { 591 + static const struct syscore_ops clks_syscore_ops = { 592 592 .resume = clks_core_resume, 593 + }; 594 + 595 + static struct syscore clks_syscore = { 596 + .ops = &clks_syscore_ops, 593 597 }; 594 598 595 599 static int __init clk_syscore_init(void) 596 600 { 597 - register_syscore_ops(&clks_syscore_ops); 601 + register_syscore(&clks_syscore); 598 602 599 603 return 0; 600 604 }
+8 -4
drivers/sh/intc/core.c
··· 394 394 return -ENOMEM; 395 395 } 396 396 397 - static int intc_suspend(void) 397 + static int intc_suspend(void *data) 398 398 { 399 399 struct intc_desc_int *d; 400 400 ··· 420 420 return 0; 421 421 } 422 422 423 - static void intc_resume(void) 423 + static void intc_resume(void *data) 424 424 { 425 425 struct intc_desc_int *d; 426 426 ··· 450 450 } 451 451 } 452 452 453 - struct syscore_ops intc_syscore_ops = { 453 + static const struct syscore_ops intc_syscore_ops = { 454 454 .suspend = intc_suspend, 455 455 .resume = intc_resume, 456 + }; 457 + 458 + static struct syscore intc_syscore = { 459 + .ops = &intc_syscore_ops, 456 460 }; 457 461 458 462 const struct bus_type intc_subsys = { ··· 481 477 struct intc_desc_int *d; 482 478 int error; 483 479 484 - register_syscore_ops(&intc_syscore_ops); 480 + register_syscore(&intc_syscore); 485 481 486 482 error = subsys_system_register(&intc_subsys, NULL); 487 483 if (!error) {
+8 -4
drivers/soc/bcm/brcmstb/biuctrl.c
··· 298 298 #ifdef CONFIG_PM_SLEEP 299 299 static u32 cpubiuctrl_reg_save[NUM_CPU_BIUCTRL_REGS]; 300 300 301 - static int brcmstb_cpu_credit_reg_suspend(void) 301 + static int brcmstb_cpu_credit_reg_suspend(void *data) 302 302 { 303 303 unsigned int i; 304 304 ··· 311 311 return 0; 312 312 } 313 313 314 - static void brcmstb_cpu_credit_reg_resume(void) 314 + static void brcmstb_cpu_credit_reg_resume(void *data) 315 315 { 316 316 unsigned int i; 317 317 ··· 322 322 cbc_writel(cpubiuctrl_reg_save[i], i); 323 323 } 324 324 325 - static struct syscore_ops brcmstb_cpu_credit_syscore_ops = { 325 + static const struct syscore_ops brcmstb_cpu_credit_syscore_ops = { 326 326 .suspend = brcmstb_cpu_credit_reg_suspend, 327 327 .resume = brcmstb_cpu_credit_reg_resume, 328 + }; 329 + 330 + static struct syscore brcmstb_cpu_credit_syscore = { 331 + .ops = &brcmstb_cpu_credit_syscore_ops, 328 332 }; 329 333 #endif 330 334 ··· 358 354 a72_b53_rac_enable_all(np); 359 355 mcp_a72_b53_set(); 360 356 #ifdef CONFIG_PM_SLEEP 361 - register_syscore_ops(&brcmstb_cpu_credit_syscore_ops); 357 + register_syscore(&brcmstb_cpu_credit_syscore); 362 358 #endif 363 359 ret = 0; 364 360 out_put:
+3
drivers/soc/mediatek/mtk-socinfo.c
··· 50 50 MTK_SOCINFO_ENTRY("MT8186T", "MT8186TV/AZA", "Kompanio 528", 0x81862001, CELL_NOT_USED), 51 51 MTK_SOCINFO_ENTRY("MT8188", "MT8188GV/AZA", "Kompanio 838", 0x81880000, 0x00000010), 52 52 MTK_SOCINFO_ENTRY("MT8188", "MT8188GV/HZA", "Kompanio 838", 0x81880000, 0x00000011), 53 + MTK_SOCINFO_ENTRY("MT8189", "MT8189GV/AZA", "Kompanio 540", 0x81890000, 0x00000020), 54 + MTK_SOCINFO_ENTRY("MT8189", "MT8189HV/AZA", "Kompanio 540", 0x81890000, 0x00000021), 53 55 MTK_SOCINFO_ENTRY("MT8192", "MT8192V/AZA", "Kompanio 820", 0x00001100, 0x00040080), 54 56 MTK_SOCINFO_ENTRY("MT8192T", "MT8192V/ATZA", "Kompanio 828", 0x00000100, 0x000400C0), 55 57 MTK_SOCINFO_ENTRY("MT8195", "MT8195GV/EZA", "Kompanio 1200", 0x81950300, CELL_NOT_USED), ··· 60 58 MTK_SOCINFO_ENTRY("MT8195", "MT8195TV/EHZA", "Kompanio 1380", 0x81950404, CELL_NOT_USED), 61 59 MTK_SOCINFO_ENTRY("MT8370", "MT8370AV/AZA", "Genio 510", 0x83700000, 0x00000081), 62 60 MTK_SOCINFO_ENTRY("MT8390", "MT8390AV/AZA", "Genio 700", 0x83900000, 0x00000080), 61 + MTK_SOCINFO_ENTRY("MT8391", "MT8391AV/AZA", "Genio 720", 0x83910000, 0x00000080), 63 62 MTK_SOCINFO_ENTRY("MT8395", "MT8395AV/ZA", "Genio 1200", 0x83950100, CELL_NOT_USED), 64 63 MTK_SOCINFO_ENTRY("MT8395", "MT8395AV/ZA", "Genio 1200", 0x83950800, CELL_NOT_USED), 65 64 };
+58 -23
drivers/soc/qcom/ice.c
··· 22 22 #include <soc/qcom/ice.h> 23 23 24 24 #define AES_256_XTS_KEY_SIZE 64 /* for raw keys only */ 25 - #define QCOM_ICE_HWKM_WRAPPED_KEY_SIZE 100 /* assuming HWKM v2 */ 25 + 26 + #define QCOM_ICE_HWKM_V1 1 /* HWKM version 1 */ 27 + #define QCOM_ICE_HWKM_V2 2 /* HWKM version 2 */ 28 + 29 + #define QCOM_ICE_HWKM_MAX_WRAPPED_KEY_SIZE 100 /* Maximum HWKM wrapped key size */ 30 + 31 + /* 32 + * Wrapped key size depends upon HWKM version: 33 + * HWKM version 1 supports 68 bytes 34 + * HWKM version 2 supports 100 bytes 35 + */ 36 + #define QCOM_ICE_HWKM_WRAPPED_KEY_SIZE(v) ((v) == QCOM_ICE_HWKM_V1 ? 68 : 100) 26 37 27 38 /* QCOM ICE registers */ 28 39 ··· 73 62 74 63 #define QCOM_ICE_REG_HWKM_TZ_KM_CTL (HWKM_OFFSET + 0x1000) 75 64 #define QCOM_ICE_HWKM_DISABLE_CRC_CHECKS_VAL (BIT(1) | BIT(2)) 65 + /* In HWKM v1 the ICE legacy mode is controlled from HWKM register space */ 66 + #define QCOM_ICE_HWKM_ICE_LEGACY_MODE_ENABLED BIT(5) 76 67 77 68 #define QCOM_ICE_REG_HWKM_TZ_KM_STATUS (HWKM_OFFSET + 0x1004) 78 69 #define QCOM_ICE_HWKM_KT_CLEAR_DONE BIT(0) 79 70 #define QCOM_ICE_HWKM_BOOT_CMD_LIST0_DONE BIT(1) 80 71 #define QCOM_ICE_HWKM_BOOT_CMD_LIST1_DONE BIT(2) 81 - #define QCOM_ICE_HWKM_CRYPTO_BIST_DONE_V2 BIT(7) 82 - #define QCOM_ICE_HWKM_BIST_DONE_V2 BIT(9) 72 + #define QCOM_ICE_HWKM_CRYPTO_BIST_DONE(v) (((v) == QCOM_ICE_HWKM_V1) ? BIT(14) : BIT(7)) 73 + #define QCOM_ICE_HWKM_BIST_DONE(v) (((v) == QCOM_ICE_HWKM_V1) ? BIT(16) : BIT(9)) 83 74 84 75 #define QCOM_ICE_REG_HWKM_BANK0_BANKN_IRQ_STATUS (HWKM_OFFSET + 0x2008) 85 76 #define QCOM_ICE_HWKM_RSP_FIFO_CLEAR_VAL BIT(3) ··· 110 97 struct clk *core_clk; 111 98 bool use_hwkm; 112 99 bool hwkm_init_complete; 100 + u8 hwkm_version; 113 101 }; 114 102 115 103 static bool qcom_ice_check_supported(struct qcom_ice *ice) ··· 128 114 return false; 129 115 } 130 116 117 + /* HWKM version v2 is present from ICE 3.2.1 onwards while version v1 118 + * is present only in ICE 3.2.0. Earlier ICE version don't have HWKM. 119 + */ 120 + if (major > 3 || 121 + (major == 3 && (minor >= 3 || (minor == 2 && step >= 1)))) 122 + ice->hwkm_version = QCOM_ICE_HWKM_V2; 123 + else if ((major == 3) && (minor == 2)) 124 + ice->hwkm_version = QCOM_ICE_HWKM_V1; 125 + else 126 + ice->hwkm_version = 0; 127 + 131 128 dev_info(dev, "Found QC Inline Crypto Engine (ICE) v%d.%d.%d\n", 132 129 major, minor, step); 130 + 131 + if (ice->hwkm_version) 132 + dev_info(dev, "QC Hardware Key Manager (HWKM) version v%d\n", 133 + ice->hwkm_version); 133 134 134 135 /* If fuses are blown, ICE might not work in the standard way. */ 135 136 regval = qcom_ice_readl(ice, QCOM_ICE_REG_FUSE_SETTING); ··· 160 131 * v3.2.1 and later have HWKM v2. ICE v3.2.0 has HWKM v1. Earlier ICE 161 132 * versions don't have HWKM at all. However, for HWKM to be fully 162 133 * usable by Linux, the TrustZone software also needs to support certain 163 - * SCM calls including the ones to generate and prepare keys. That 164 - * effectively makes the earliest supported SoC be SM8650, which has 165 - * HWKM v2. Therefore, this driver doesn't include support for HWKM v1, 166 - * and it checks for the SCM call support before it decides to use HWKM. 134 + * SCM calls including the ones to generate and prepare keys. Support 135 + * for these SCM calls is present for SoCs with HWKM v2 and is being 136 + * added for SoCs with HWKM v1 as well but not every SoC with HWKM v1 137 + * currently supports this. So, this driver checks for the SCM call 138 + * support before it decides to use HWKM. 167 139 * 168 140 * Also, since HWKM and legacy mode are mutually exclusive, and 169 141 * ICE-capable storage driver(s) need to know early on whether to 170 142 * advertise support for raw keys or wrapped keys, HWKM cannot be used 171 143 * unconditionally. A module parameter is used to opt into using it. 172 144 */ 173 - if ((major >= 4 || 174 - (major == 3 && (minor >= 3 || (minor == 2 && step >= 1)))) && 175 - qcom_scm_has_wrapped_key_support()) { 145 + if (ice->hwkm_version && qcom_scm_has_wrapped_key_support()) { 176 146 if (qcom_ice_use_wrapped_keys) { 177 147 dev_info(dev, "Using HWKM. Supporting wrapped keys only.\n"); 178 148 ice->use_hwkm = true; ··· 240 212 (QCOM_ICE_HWKM_KT_CLEAR_DONE | 241 213 QCOM_ICE_HWKM_BOOT_CMD_LIST0_DONE | 242 214 QCOM_ICE_HWKM_BOOT_CMD_LIST1_DONE | 243 - QCOM_ICE_HWKM_CRYPTO_BIST_DONE_V2 | 244 - QCOM_ICE_HWKM_BIST_DONE_V2)) { 215 + QCOM_ICE_HWKM_CRYPTO_BIST_DONE(ice->hwkm_version) | 216 + QCOM_ICE_HWKM_BIST_DONE(ice->hwkm_version))) { 245 217 dev_err(ice->dev, "HWKM self-test error!\n"); 246 218 /* 247 219 * Too late to revoke use_hwkm here, as it was already ··· 258 230 if (!ice->use_hwkm) 259 231 return; 260 232 261 - BUILD_BUG_ON(QCOM_ICE_HWKM_WRAPPED_KEY_SIZE > 233 + BUILD_BUG_ON(QCOM_ICE_HWKM_MAX_WRAPPED_KEY_SIZE > 262 234 BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE); 263 235 /* 264 236 * When ICE is in HWKM mode, it only supports wrapped keys. ··· 266 238 * 267 239 * Put ICE in HWKM mode. ICE defaults to legacy mode. 268 240 */ 269 - regval = qcom_ice_readl(ice, QCOM_ICE_REG_CONTROL); 270 - regval &= ~QCOM_ICE_LEGACY_MODE_ENABLED; 271 - qcom_ice_writel(ice, regval, QCOM_ICE_REG_CONTROL); 241 + if (ice->hwkm_version == QCOM_ICE_HWKM_V2) { 242 + regval = qcom_ice_readl(ice, QCOM_ICE_REG_CONTROL); 243 + regval &= ~QCOM_ICE_LEGACY_MODE_ENABLED; 244 + qcom_ice_writel(ice, regval, QCOM_ICE_REG_CONTROL); 245 + } else if (ice->hwkm_version == QCOM_ICE_HWKM_V1) { 246 + regval = qcom_ice_readl(ice, QCOM_ICE_REG_HWKM_TZ_KM_CTL); 247 + regval &= ~QCOM_ICE_HWKM_ICE_LEGACY_MODE_ENABLED; 248 + qcom_ice_writel(ice, regval, QCOM_ICE_REG_HWKM_TZ_KM_CTL); 249 + } 272 250 273 251 /* Disable CRC checks. This HWKM feature is not used. */ 274 252 qcom_ice_writel(ice, QCOM_ICE_HWKM_DISABLE_CRC_CHECKS_VAL, ··· 332 298 333 299 static unsigned int translate_hwkm_slot(struct qcom_ice *ice, unsigned int slot) 334 300 { 335 - return slot * 2; 301 + return ice->hwkm_version == QCOM_ICE_HWKM_V1 ? slot : slot * 2; 336 302 } 337 303 338 304 static int qcom_ice_program_wrapped_key(struct qcom_ice *ice, unsigned int slot, ··· 485 451 { 486 452 int err; 487 453 488 - err = qcom_scm_generate_ice_key(lt_key, QCOM_ICE_HWKM_WRAPPED_KEY_SIZE); 454 + err = qcom_scm_generate_ice_key(lt_key, 455 + QCOM_ICE_HWKM_WRAPPED_KEY_SIZE(ice->hwkm_version)); 489 456 if (err) 490 457 return err; 491 458 492 - return QCOM_ICE_HWKM_WRAPPED_KEY_SIZE; 459 + return QCOM_ICE_HWKM_WRAPPED_KEY_SIZE(ice->hwkm_version); 493 460 } 494 461 EXPORT_SYMBOL_GPL(qcom_ice_generate_key); 495 462 ··· 513 478 int err; 514 479 515 480 err = qcom_scm_prepare_ice_key(lt_key, lt_key_size, 516 - eph_key, QCOM_ICE_HWKM_WRAPPED_KEY_SIZE); 481 + eph_key, QCOM_ICE_HWKM_WRAPPED_KEY_SIZE(ice->hwkm_version)); 517 482 if (err == -EIO || err == -EINVAL) 518 483 err = -EBADMSG; /* probably invalid key */ 519 484 if (err) 520 485 return err; 521 486 522 - return QCOM_ICE_HWKM_WRAPPED_KEY_SIZE; 487 + return QCOM_ICE_HWKM_WRAPPED_KEY_SIZE(ice->hwkm_version); 523 488 } 524 489 EXPORT_SYMBOL_GPL(qcom_ice_prepare_key); 525 490 ··· 541 506 int err; 542 507 543 508 err = qcom_scm_import_ice_key(raw_key, raw_key_size, 544 - lt_key, QCOM_ICE_HWKM_WRAPPED_KEY_SIZE); 509 + lt_key, QCOM_ICE_HWKM_WRAPPED_KEY_SIZE(ice->hwkm_version)); 545 510 if (err) 546 511 return err; 547 512 548 - return QCOM_ICE_HWKM_WRAPPED_KEY_SIZE; 513 + return QCOM_ICE_HWKM_WRAPPED_KEY_SIZE(ice->hwkm_version); 549 514 } 550 515 EXPORT_SYMBOL_GPL(qcom_ice_import_key); 551 516
+373
drivers/soc/qcom/llcc-qcom.c
··· 214 214 }, 215 215 }; 216 216 217 + static const struct llcc_slice_config kaanapali_data[] = { 218 + { 219 + .usecase_id = LLCC_CPUSS, 220 + .slice_id = 1, 221 + .max_cap = 5120, 222 + .priority = 1, 223 + .bonus_ways = 0xffffffff, 224 + .activate_on_init = true, 225 + .write_scid_en = true, 226 + .stale_en = true, 227 + .mru_uncap_en = true, 228 + .vict_prio = true, 229 + }, { 230 + .usecase_id = LLCC_VIDSC0, 231 + .slice_id = 2, 232 + .max_cap = 512, 233 + .priority = 4, 234 + .fixed_size = true, 235 + .bonus_ways = 0xffffffff, 236 + .mru_uncap_en = true, 237 + .vict_prio = true, 238 + }, { 239 + .usecase_id = LLCC_AUDIO, 240 + .slice_id = 35, 241 + .max_cap = 512, 242 + .priority = 1, 243 + .fixed_size = true, 244 + .bonus_ways = 0xffffffff, 245 + .mru_uncap_en = true, 246 + .vict_prio = true, 247 + }, { 248 + .usecase_id = LLCC_MDMHPGRW, 249 + .slice_id = 25, 250 + .max_cap = 1024, 251 + .priority = 5, 252 + .bonus_ways = 0xffffffff, 253 + .mru_uncap_en = true, 254 + .vict_prio = true, 255 + }, { 256 + .usecase_id = LLCC_CMPT, 257 + .slice_id = 34, 258 + .max_cap = 4096, 259 + .priority = 1, 260 + .fixed_size = true, 261 + .bonus_ways = 0xffffffff, 262 + .mru_uncap_en = true, 263 + .vict_prio = true, 264 + }, { 265 + .usecase_id = LLCC_GPUHTW, 266 + .slice_id = 11, 267 + .max_cap = 512, 268 + .priority = 1, 269 + .fixed_size = true, 270 + .bonus_ways = 0xffffffff, 271 + .mru_uncap_en = true, 272 + .vict_prio = true, 273 + }, { 274 + .usecase_id = LLCC_GPU, 275 + .slice_id = 9, 276 + .max_cap = 5632, 277 + .priority = 1, 278 + .fixed_size = true, 279 + .bonus_ways = 0xffffffff, 280 + .write_scid_cacheable_en = true, 281 + .mru_uncap_en = true, 282 + .vict_prio = true, 283 + }, { 284 + .usecase_id = LLCC_MMUHWT, 285 + .slice_id = 18, 286 + .max_cap = 768, 287 + .priority = 1, 288 + .fixed_size = true, 289 + .bonus_ways = 0xffffffff, 290 + .activate_on_init = true, 291 + .mru_uncap_en = true, 292 + .vict_prio = true, 293 + }, { 294 + .usecase_id = LLCC_DISP, 295 + .slice_id = 16, 296 + .max_cap = 7168, 297 + .priority = 1, 298 + .fixed_size = true, 299 + .bonus_ways = 0xffffffff, 300 + .cache_mode = 2, 301 + .stale_en = true, 302 + .mru_uncap_en = true, 303 + .vict_prio = true, 304 + }, { 305 + .usecase_id = LLCC_MDMHPFX, 306 + .slice_id = 24, 307 + .max_cap = 1024, 308 + .priority = 5, 309 + .fixed_size = true, 310 + .bonus_ways = 0xffffffff, 311 + .mru_uncap_en = true, 312 + .vict_prio = true, 313 + }, { 314 + .usecase_id = LLCC_MDMPNG, 315 + .slice_id = 27, 316 + .max_cap = 256, 317 + .priority = 5, 318 + .bonus_ways = 0xfffff, 319 + .mru_uncap_en = true, 320 + .vict_prio = true, 321 + }, { 322 + .usecase_id = LLCC_CVP, 323 + .slice_id = 8, 324 + .max_cap = 800, 325 + .priority = 5, 326 + .fixed_size = true, 327 + .bonus_ways = 0xffffffff, 328 + .mru_uncap_en = true, 329 + .ovcap_en = true, 330 + .vict_prio = true, 331 + .parent_slice_id = 33, 332 + }, { 333 + .usecase_id = LLCC_MODPE, 334 + .slice_id = 29, 335 + .max_cap = 256, 336 + .priority = 1, 337 + .fixed_size = true, 338 + .bonus_ways = 0xf0000000, 339 + .mru_uncap_en = true, 340 + .alloc_oneway_en = true, 341 + .vict_prio = true, 342 + }, { 343 + .usecase_id = LLCC_WRCACHE, 344 + .slice_id = 31, 345 + .max_cap = 512, 346 + .priority = 1, 347 + .fixed_size = true, 348 + .bonus_ways = 0xffffffff, 349 + .activate_on_init = true, 350 + .mru_uncap_en = true, 351 + .vict_prio = true, 352 + }, { 353 + .usecase_id = LLCC_CVPFW, 354 + .slice_id = 19, 355 + .max_cap = 512, 356 + .priority = 5, 357 + .fixed_size = true, 358 + .bonus_ways = 0xffffffff, 359 + .mru_uncap_en = true, 360 + .vict_prio = true, 361 + .parent_slice_id = 33, 362 + }, { 363 + .usecase_id = LLCC_CPUMTE, 364 + .slice_id = 7, 365 + .max_cap = 256, 366 + .priority = 1, 367 + .fixed_size = true, 368 + .bonus_ways = 0xffffffff, 369 + .mru_uncap_en = true, 370 + .vict_prio = true, 371 + }, { 372 + .usecase_id = LLCC_CMPTHCP, 373 + .slice_id = 15, 374 + .max_cap = 256, 375 + .priority = 4, 376 + .fixed_size = true, 377 + .bonus_ways = 0xffffffff, 378 + .mru_uncap_en = true, 379 + .vict_prio = true, 380 + }, { 381 + .usecase_id = LLCC_LCPDARE, 382 + .slice_id = 30, 383 + .max_cap = 128, 384 + .priority = 5, 385 + .fixed_size = true, 386 + .bonus_ways = 0xffffffff, 387 + .activate_on_init = true, 388 + .mru_uncap_en = true, 389 + .alloc_oneway_en = true, 390 + .vict_prio = true, 391 + }, { 392 + .usecase_id = LLCC_AENPU, 393 + .slice_id = 3, 394 + .max_cap = 3072, 395 + .priority = 1, 396 + .fixed_size = true, 397 + .bonus_ways = 0xffffffff, 398 + .cache_mode = 2, 399 + .mru_uncap_en = true, 400 + .vict_prio = true, 401 + }, { 402 + .usecase_id = LLCC_ISLAND1, 403 + .slice_id = 12, 404 + .max_cap = 7936, 405 + .priority = 7, 406 + .fixed_size = true, 407 + .bonus_ways = 0x7fffffff, 408 + .mru_uncap_en = true, 409 + .vict_prio = true, 410 + }, { 411 + .usecase_id = LLCC_DISP_WB, 412 + .slice_id = 23, 413 + .max_cap = 512, 414 + .priority = 4, 415 + .fixed_size = true, 416 + .bonus_ways = 0xffffffff, 417 + .mru_uncap_en = true, 418 + .vict_prio = true, 419 + }, { 420 + .usecase_id = LLCC_VIDVSP, 421 + .slice_id = 4, 422 + .max_cap = 256, 423 + .priority = 4, 424 + .fixed_size = true, 425 + .bonus_ways = 0xffffffff, 426 + .mru_uncap_en = true, 427 + .vict_prio = true, 428 + }, { 429 + .usecase_id = LLCC_VIDDEC, 430 + .slice_id = 5, 431 + .max_cap = 512, 432 + .priority = 4, 433 + .fixed_size = true, 434 + .bonus_ways = 0xffffffff, 435 + .cache_mode = 2, 436 + .mru_uncap_en = true, 437 + .ovcap_en = true, 438 + .vict_prio = true, 439 + .parent_slice_id = 33, 440 + }, { 441 + .usecase_id = LLCC_CAMOFE, 442 + .slice_id = 33, 443 + .max_cap = 6144, 444 + .priority = 4, 445 + .fixed_size = true, 446 + .bonus_ways = 0xffffffff, 447 + .stale_en = true, 448 + .mru_uncap_en = true, 449 + .ovcap_en = true, 450 + .vict_prio = true, 451 + .parent_slice_id = 33, 452 + }, { 453 + .usecase_id = LLCC_CAMRTIP, 454 + .slice_id = 13, 455 + .max_cap = 6144, 456 + .priority = 4, 457 + .fixed_size = true, 458 + .bonus_ways = 0xffffffff, 459 + .stale_en = true, 460 + .mru_uncap_en = true, 461 + .ovcap_en = true, 462 + .vict_prio = true, 463 + .parent_slice_id = 33, 464 + }, { 465 + .usecase_id = LLCC_CAMRTRF, 466 + .slice_id = 10, 467 + .max_cap = 3584, 468 + .priority = 3, 469 + .fixed_size = true, 470 + .bonus_ways = 0xffffffff, 471 + .stale_en = true, 472 + .mru_uncap_en = true, 473 + .ovcap_en = true, 474 + .vict_prio = true, 475 + .parent_slice_id = 33, 476 + }, { 477 + .usecase_id = LLCC_CAMSRTRF, 478 + .slice_id = 21, 479 + .max_cap = 6144, 480 + .priority = 1, 481 + .fixed_size = true, 482 + .bonus_ways = 0xffffffff, 483 + .stale_en = true, 484 + .mru_uncap_en = true, 485 + .ovcap_en = true, 486 + .vict_prio = true, 487 + .parent_slice_id = 33, 488 + }, { 489 + .usecase_id = LLCC_VIDEO_APV, 490 + .slice_id = 6, 491 + .max_cap = 768, 492 + .priority = 4, 493 + .fixed_size = true, 494 + .bonus_ways = 0xffffffff, 495 + .mru_uncap_en = true, 496 + .vict_prio = true, 497 + }, { 498 + .usecase_id = LLCC_COMPUTE1, 499 + .slice_id = 22, 500 + .max_cap = 4096, 501 + .priority = 1, 502 + .fixed_size = true, 503 + .bonus_ways = 0xffffffff, 504 + .mru_uncap_en = true, 505 + .vict_prio = true, 506 + }, { 507 + .usecase_id = LLCC_CPUSS_OPP, 508 + .slice_id = 32, 509 + .max_cap = 0, 510 + .priority = 0, 511 + .fixed_size = true, 512 + .bonus_ways = 0, 513 + .activate_on_init = true, 514 + .write_scid_en = true, 515 + .mru_uncap_en = true, 516 + .vict_prio = true, 517 + }, { 518 + .usecase_id = LLCC_CPUSSMPAM, 519 + .slice_id = 17, 520 + .max_cap = 2048, 521 + .priority = 1, 522 + .fixed_size = true, 523 + .bonus_ways = 0xffffffff, 524 + .activate_on_init = true, 525 + .write_scid_en = true, 526 + .stale_en = true, 527 + .mru_uncap_en = true, 528 + .vict_prio = true, 529 + }, { 530 + .usecase_id = LLCC_CAM_IPE_STROV, 531 + .slice_id = 14, 532 + .max_cap = 400, 533 + .priority = 5, 534 + .fixed_size = true, 535 + .bonus_ways = 0xffffffff, 536 + .mru_uncap_en = true, 537 + .ovcap_en = true, 538 + .vict_prio = true, 539 + .parent_slice_id = 33, 540 + }, { 541 + .usecase_id = LLCC_CAM_OFE_STROV, 542 + .slice_id = 20, 543 + .max_cap = 400, 544 + .priority = 5, 545 + .fixed_size = true, 546 + .bonus_ways = 0xffffffff, 547 + .mru_uncap_en = true, 548 + .ovcap_en = true, 549 + .vict_prio = true, 550 + .parent_slice_id = 33, 551 + }, { 552 + .usecase_id = LLCC_CPUSS_HEU, 553 + .slice_id = 28, 554 + .max_cap = 0, 555 + .priority = 0, 556 + .fixed_size = true, 557 + .bonus_ways = 0, 558 + .mru_uncap_en = true, 559 + .ovcap_en = true, 560 + .vict_prio = true, 561 + }, { 562 + .usecase_id = LLCC_MDM_PNG_FIXED, 563 + .slice_id = 26, 564 + .max_cap = 256, 565 + .priority = 5, 566 + .fixed_size = true, 567 + .bonus_ways = 0xff000000, 568 + .activate_on_init = true, 569 + .write_scid_en = true, 570 + .mru_uncap_en = true, 571 + .vict_prio = true, 572 + }, 573 + }; 574 + 217 575 static const struct llcc_slice_config sa8775p_data[] = { 218 576 { 219 577 .usecase_id = LLCC_CPUSS, ··· 3863 3505 [LLCC_TRP_WRS_CACHEABLE_EN] = 0x00042088, 3864 3506 }; 3865 3507 3508 + static const struct qcom_llcc_config kaanapali_cfg[] = { 3509 + { 3510 + .sct_data = kaanapali_data, 3511 + .size = ARRAY_SIZE(kaanapali_data), 3512 + .reg_offset = llcc_v6_reg_offset, 3513 + .edac_reg_offset = &llcc_v6_edac_reg_offset, 3514 + }, 3515 + }; 3516 + 3866 3517 static const struct qcom_llcc_config qcs615_cfg[] = { 3867 3518 { 3868 3519 .sct_data = qcs615_data, ··· 4096 3729 .edac_reg_offset = &llcc_v2_1_edac_reg_offset, 4097 3730 .irq_configured = true, 4098 3731 }, 3732 + }; 3733 + 3734 + static const struct qcom_sct_config kaanapali_cfgs = { 3735 + .llcc_config = kaanapali_cfg, 3736 + .num_config = ARRAY_SIZE(kaanapali_cfg), 4099 3737 }; 4100 3738 4101 3739 static const struct qcom_sct_config qcs615_cfgs = { ··· 4942 4570 4943 4571 static const struct of_device_id qcom_llcc_of_match[] = { 4944 4572 { .compatible = "qcom,ipq5424-llcc", .data = &ipq5424_cfgs}, 4573 + { .compatible = "qcom,kaanapali-llcc", .data = &kaanapali_cfgs}, 4945 4574 { .compatible = "qcom,qcs615-llcc", .data = &qcs615_cfgs}, 4946 4575 { .compatible = "qcom,qcs8300-llcc", .data = &qcs8300_cfgs}, 4947 4576 { .compatible = "qcom,qdu1000-llcc", .data = &qdu1000_cfgs},
+22 -30
drivers/soc/qcom/mdt_loader.c
··· 332 332 return false; 333 333 } 334 334 335 - static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, 336 - const char *fw_name, void *mem_region, 337 - phys_addr_t mem_phys, size_t mem_size, 338 - phys_addr_t *reloc_base) 335 + /** 336 + * qcom_mdt_load_no_init() - load the firmware which header is loaded as fw 337 + * @dev: device handle to associate resources with 338 + * @fw: firmware object for the mdt file 339 + * @fw_name: name of the firmware, for construction of segment file names 340 + * @mem_region: allocated memory region to load firmware into 341 + * @mem_phys: physical address of allocated memory region 342 + * @mem_size: size of the allocated memory region 343 + * @reloc_base: adjusted physical address after relocation 344 + * 345 + * Returns 0 on success, negative errno otherwise. 346 + */ 347 + int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw, 348 + const char *fw_name, void *mem_region, 349 + phys_addr_t mem_phys, size_t mem_size, 350 + phys_addr_t *reloc_base) 339 351 { 340 352 const struct elf32_phdr *phdrs; 341 353 const struct elf32_phdr *phdr; ··· 447 435 448 436 return ret; 449 437 } 438 + EXPORT_SYMBOL_GPL(qcom_mdt_load_no_init); 450 439 451 440 /** 452 441 * qcom_mdt_load() - load the firmware which header is loaded as fw 453 442 * @dev: device handle to associate resources with 454 443 * @fw: firmware object for the mdt file 455 - * @firmware: name of the firmware, for construction of segment file names 444 + * @fw_name: name of the firmware, for construction of segment file names 456 445 * @pas_id: PAS identifier 457 446 * @mem_region: allocated memory region to load firmware into 458 447 * @mem_phys: physical address of allocated memory region ··· 463 450 * Returns 0 on success, negative errno otherwise. 464 451 */ 465 452 int qcom_mdt_load(struct device *dev, const struct firmware *fw, 466 - const char *firmware, int pas_id, void *mem_region, 453 + const char *fw_name, int pas_id, void *mem_region, 467 454 phys_addr_t mem_phys, size_t mem_size, 468 455 phys_addr_t *reloc_base) 469 456 { 470 457 int ret; 471 458 472 - ret = qcom_mdt_pas_init(dev, fw, firmware, pas_id, mem_phys, NULL); 459 + ret = qcom_mdt_pas_init(dev, fw, fw_name, pas_id, mem_phys, NULL); 473 460 if (ret) 474 461 return ret; 475 462 476 - return __qcom_mdt_load(dev, fw, firmware, mem_region, mem_phys, 477 - mem_size, reloc_base); 463 + return qcom_mdt_load_no_init(dev, fw, fw_name, mem_region, mem_phys, 464 + mem_size, reloc_base); 478 465 } 479 466 EXPORT_SYMBOL_GPL(qcom_mdt_load); 480 - 481 - /** 482 - * qcom_mdt_load_no_init() - load the firmware which header is loaded as fw 483 - * @dev: device handle to associate resources with 484 - * @fw: firmware object for the mdt file 485 - * @firmware: name of the firmware, for construction of segment file names 486 - * @mem_region: allocated memory region to load firmware into 487 - * @mem_phys: physical address of allocated memory region 488 - * @mem_size: size of the allocated memory region 489 - * @reloc_base: adjusted physical address after relocation 490 - * 491 - * Returns 0 on success, negative errno otherwise. 492 - */ 493 - int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw, 494 - const char *firmware, void *mem_region, phys_addr_t mem_phys, 495 - size_t mem_size, phys_addr_t *reloc_base) 496 - { 497 - return __qcom_mdt_load(dev, fw, firmware, mem_region, mem_phys, 498 - mem_size, reloc_base); 499 - } 500 - EXPORT_SYMBOL_GPL(qcom_mdt_load_no_init); 501 467 502 468 MODULE_DESCRIPTION("Firmware parser for Qualcomm MDT format"); 503 469 MODULE_LICENSE("GPL v2");
+1 -1
drivers/soc/qcom/ocmem.c
··· 202 202 } 203 203 204 204 ocmem = platform_get_drvdata(pdev); 205 + put_device(&pdev->dev); 205 206 if (!ocmem) { 206 207 dev_err(dev, "Cannot get ocmem\n"); 207 - put_device(&pdev->dev); 208 208 return ERR_PTR(-ENODEV); 209 209 } 210 210 return ocmem;
+8 -1
drivers/soc/qcom/pmic_glink.c
··· 39 39 struct mutex state_lock; 40 40 unsigned int client_state; 41 41 unsigned int pdr_state; 42 + bool pdr_available; 42 43 43 44 /* serializing clients list updates */ 44 45 spinlock_t client_lock; ··· 247 246 return dev_err_probe(&rpdev->dev, -ENODEV, "no pmic_glink device to attach to\n"); 248 247 249 248 dev_set_drvdata(&rpdev->dev, pg); 249 + pg->pdr_available = rpdev->id.driver_data; 250 250 251 251 guard(mutex)(&pg->state_lock); 252 252 pg->ept = rpdev->ept; 253 + if (!pg->pdr_available) 254 + pg->pdr_state = SERVREG_SERVICE_STATE_UP; 253 255 pmic_glink_state_notify_clients(pg); 254 256 255 257 return 0; ··· 269 265 270 266 guard(mutex)(&pg->state_lock); 271 267 pg->ept = NULL; 268 + if (!pg->pdr_available) 269 + pg->pdr_state = SERVREG_SERVICE_STATE_DOWN; 272 270 pmic_glink_state_notify_clients(pg); 273 271 } 274 272 275 273 static const struct rpmsg_device_id pmic_glink_rpmsg_id_match[] = { 276 - { "PMIC_RTR_ADSP_APPS" }, 274 + {.name = "PMIC_RTR_ADSP_APPS", .driver_data = true }, 275 + {.name = "PMIC_RTR_SOCCP_APPS", .driver_data = false }, 277 276 {} 278 277 }; 279 278
+2
drivers/soc/qcom/qcom-pbs.c
··· 173 173 return ERR_PTR(-EINVAL); 174 174 } 175 175 176 + platform_device_put(pdev); 177 + 176 178 return pbs; 177 179 } 178 180 EXPORT_SYMBOL_GPL(get_pbs_client_device);
-8
drivers/soc/qcom/qcom_gsbi.c
··· 212 212 return of_platform_populate(node, NULL, NULL, &pdev->dev); 213 213 } 214 214 215 - static void gsbi_remove(struct platform_device *pdev) 216 - { 217 - struct gsbi_info *gsbi = platform_get_drvdata(pdev); 218 - 219 - clk_disable_unprepare(gsbi->hclk); 220 - } 221 - 222 215 static const struct of_device_id gsbi_dt_match[] = { 223 216 { .compatible = "qcom,gsbi-v1.0.0", }, 224 217 { }, ··· 225 232 .of_match_table = gsbi_dt_match, 226 233 }, 227 234 .probe = gsbi_probe, 228 - .remove = gsbi_remove, 229 235 }; 230 236 231 237 module_platform_driver(gsbi_driver);
+10
drivers/soc/qcom/qcom_pd_mapper.c
··· 360 360 }, 361 361 }; 362 362 363 + static const struct qcom_pdm_domain_data *kaanapali_domains[] = { 364 + &adsp_audio_pd, 365 + &adsp_root_pd, 366 + &adsp_sensor_pd, 367 + &cdsp_root_pd, 368 + &mpss_root_pd_gps, 369 + NULL, 370 + }; 371 + 363 372 static const struct qcom_pdm_domain_data *msm8996_domains[] = { 364 373 &msm8996_adsp_audio_pd, 365 374 &msm8996_adsp_root_pd, ··· 561 552 { .compatible = "qcom,apq8074", .data = NULL, }, 562 553 { .compatible = "qcom,apq8084", .data = NULL, }, 563 554 { .compatible = "qcom,apq8096", .data = msm8996_domains, }, 555 + { .compatible = "qcom,kaanapali", .data = kaanapali_domains, }, 564 556 { .compatible = "qcom,msm8226", .data = NULL, }, 565 557 { .compatible = "qcom,msm8909", .data = NULL, }, 566 558 { .compatible = "qcom,msm8916", .data = NULL, },
+18 -15
drivers/soc/qcom/smem.c
··· 353 353 return p - le32_to_cpu(e->size); 354 354 } 355 355 356 - /* Pointer to the one and only smem handle */ 357 - static struct qcom_smem *__smem; 356 + /* 357 + * Pointer to the one and only smem handle. 358 + * Init to -EPROBE_DEFER to signal SMEM still has to be probed. 359 + * Can be set to -ENODEV if SMEM is not initialized by SBL. 360 + */ 361 + static struct qcom_smem *__smem = INIT_ERR_PTR(-EPROBE_DEFER); 358 362 359 363 /* Timeout (ms) for the trylock of remote spinlocks */ 360 364 #define HWSPINLOCK_TIMEOUT 1000 ··· 512 508 unsigned long flags; 513 509 int ret; 514 510 515 - if (!__smem) 516 - return -EPROBE_DEFER; 511 + if (IS_ERR(__smem)) 512 + return PTR_ERR(__smem); 517 513 518 514 if (item < SMEM_ITEM_LAST_FIXED) { 519 515 dev_err(__smem->dev, ··· 521 517 return -EINVAL; 522 518 } 523 519 524 - if (WARN_ON(item >= __smem->item_count)) 520 + if (item >= __smem->item_count) 525 521 return -EINVAL; 526 522 527 523 ret = hwspin_lock_timeout_irqsave(__smem->hwlock, ··· 689 685 void *qcom_smem_get(unsigned host, unsigned item, size_t *size) 690 686 { 691 687 struct smem_partition *part; 692 - void *ptr = ERR_PTR(-EPROBE_DEFER); 688 + void *ptr; 693 689 694 - if (!__smem) 695 - return ptr; 690 + if (IS_ERR(__smem)) 691 + return __smem; 696 692 697 - if (WARN_ON(item >= __smem->item_count)) 693 + if (item >= __smem->item_count) 698 694 return ERR_PTR(-EINVAL); 699 695 700 696 if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) { ··· 727 723 struct smem_header *header; 728 724 unsigned ret; 729 725 730 - if (!__smem) 731 - return -EPROBE_DEFER; 726 + if (IS_ERR(__smem)) 727 + return PTR_ERR(__smem); 732 728 733 729 if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) { 734 730 part = &__smem->partitions[host]; ··· 1185 1181 header = smem->regions[0].virt_base; 1186 1182 if (le32_to_cpu(header->initialized) != 1 || 1187 1183 le32_to_cpu(header->reserved)) { 1188 - dev_err(&pdev->dev, "SMEM is not initialized by SBL\n"); 1189 - return -EINVAL; 1184 + __smem = ERR_PTR(-ENODEV); 1185 + return dev_err_probe(&pdev->dev, PTR_ERR(__smem), "SMEM is not initialized by SBL\n"); 1190 1186 } 1191 1187 1192 1188 hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0); ··· 1194 1190 return dev_err_probe(&pdev->dev, hwlock_id, 1195 1191 "failed to retrieve hwlock\n"); 1196 1192 1197 - smem->hwlock = hwspin_lock_request_specific(hwlock_id); 1193 + smem->hwlock = devm_hwspin_lock_request_specific(&pdev->dev, hwlock_id); 1198 1194 if (!smem->hwlock) 1199 1195 return -ENXIO; 1200 1196 ··· 1247 1243 { 1248 1244 platform_device_unregister(__smem->socinfo); 1249 1245 1250 - hwspin_lock_free(__smem->hwlock); 1251 1246 __smem = NULL; 1252 1247 } 1253 1248
+90 -16
drivers/soc/qcom/socinfo.c
··· 37 37 */ 38 38 #define SMEM_IMAGE_TABLE_BOOT_INDEX 0 39 39 #define SMEM_IMAGE_TABLE_TZ_INDEX 1 40 + #define SMEM_IMAGE_TABLE_TZSECAPP_INDEX 2 40 41 #define SMEM_IMAGE_TABLE_RPM_INDEX 3 42 + #define SMEM_IMAGE_TABLE_SDI_INDEX 4 43 + #define SMEM_IMAGE_TABLE_HYP_INDEX 5 44 + #define SMEM_IMAGE_TABLE_ADSP1_INDEX 6 45 + #define SMEM_IMAGE_TABLE_ADSP2_INDEX 7 46 + #define SMEM_IMAGE_TABLE_CDSP2_INDEX 8 41 47 #define SMEM_IMAGE_TABLE_APPSBL_INDEX 9 42 48 #define SMEM_IMAGE_TABLE_APPS_INDEX 10 43 49 #define SMEM_IMAGE_TABLE_MPSS_INDEX 11 ··· 52 46 #define SMEM_IMAGE_TABLE_VIDEO_INDEX 14 53 47 #define SMEM_IMAGE_TABLE_DSPS_INDEX 15 54 48 #define SMEM_IMAGE_TABLE_CDSP_INDEX 16 49 + #define SMEM_IMAGE_TABLE_NPU_INDEX 17 50 + #define SMEM_IMAGE_TABLE_WPSS_INDEX 18 55 51 #define SMEM_IMAGE_TABLE_CDSP1_INDEX 19 56 52 #define SMEM_IMAGE_TABLE_GPDSP_INDEX 20 57 53 #define SMEM_IMAGE_TABLE_GPDSP1_INDEX 21 54 + #define SMEM_IMAGE_TABLE_SENSORPD_INDEX 22 55 + #define SMEM_IMAGE_TABLE_AUDIOPD_INDEX 23 56 + #define SMEM_IMAGE_TABLE_OEMPD_INDEX 24 57 + #define SMEM_IMAGE_TABLE_CHARGERPD_INDEX 25 58 + #define SMEM_IMAGE_TABLE_OISPD_INDEX 26 59 + #define SMEM_IMAGE_TABLE_SOCCP_INDEX 27 58 60 #define SMEM_IMAGE_TABLE_TME_INDEX 28 61 + #define SMEM_IMAGE_TABLE_GEARVM_INDEX 29 62 + #define SMEM_IMAGE_TABLE_UEFI_INDEX 30 63 + #define SMEM_IMAGE_TABLE_CDSP3_INDEX 31 64 + #define SMEM_IMAGE_TABLE_AUDIOPD_ADSP1_INDEX 32 65 + #define SMEM_IMAGE_TABLE_AUDIOPD_ADSP2_INDEX 33 66 + #define SMEM_IMAGE_TABLE_DCP_INDEX 34 67 + #define SMEM_IMAGE_TABLE_OOBS_INDEX 35 68 + #define SMEM_IMAGE_TABLE_OOBNS_INDEX 36 69 + #define SMEM_IMAGE_TABLE_DEVCFG_INDEX 37 70 + #define SMEM_IMAGE_TABLE_BTPD_INDEX 38 71 + #define SMEM_IMAGE_TABLE_QECP_INDEX 39 72 + 59 73 #define SMEM_IMAGE_VERSION_TABLE 469 74 + #define SMEM_IMAGE_VERSION_TABLE_2 667 60 75 61 76 /* 62 77 * SMEM Image table names 63 78 */ 64 79 static const char *const socinfo_image_names[] = { 80 + [SMEM_IMAGE_TABLE_ADSP1_INDEX] = "adsp1", 81 + [SMEM_IMAGE_TABLE_ADSP2_INDEX] = "adsp2", 65 82 [SMEM_IMAGE_TABLE_ADSP_INDEX] = "adsp", 66 83 [SMEM_IMAGE_TABLE_APPSBL_INDEX] = "appsbl", 67 84 [SMEM_IMAGE_TABLE_APPS_INDEX] = "apps", 85 + [SMEM_IMAGE_TABLE_AUDIOPD_INDEX] = "audiopd", 86 + [SMEM_IMAGE_TABLE_AUDIOPD_ADSP1_INDEX] = "audiopd_adsp1", 87 + [SMEM_IMAGE_TABLE_AUDIOPD_ADSP2_INDEX] = "audiopd_adsp2", 68 88 [SMEM_IMAGE_TABLE_BOOT_INDEX] = "boot", 69 - [SMEM_IMAGE_TABLE_CNSS_INDEX] = "cnss", 70 - [SMEM_IMAGE_TABLE_MPSS_INDEX] = "mpss", 71 - [SMEM_IMAGE_TABLE_RPM_INDEX] = "rpm", 72 - [SMEM_IMAGE_TABLE_TZ_INDEX] = "tz", 73 - [SMEM_IMAGE_TABLE_VIDEO_INDEX] = "video", 74 - [SMEM_IMAGE_TABLE_DSPS_INDEX] = "dsps", 75 - [SMEM_IMAGE_TABLE_CDSP_INDEX] = "cdsp", 89 + [SMEM_IMAGE_TABLE_BTPD_INDEX] = "btpd", 76 90 [SMEM_IMAGE_TABLE_CDSP1_INDEX] = "cdsp1", 77 - [SMEM_IMAGE_TABLE_GPDSP_INDEX] = "gpdsp", 91 + [SMEM_IMAGE_TABLE_CDSP2_INDEX] = "cdsp2", 92 + [SMEM_IMAGE_TABLE_CDSP3_INDEX] = "cdsp3", 93 + [SMEM_IMAGE_TABLE_CDSP_INDEX] = "cdsp", 94 + [SMEM_IMAGE_TABLE_CHARGERPD_INDEX] = "chargerpd", 95 + [SMEM_IMAGE_TABLE_CNSS_INDEX] = "cnss", 96 + [SMEM_IMAGE_TABLE_DCP_INDEX] = "dcp", 97 + [SMEM_IMAGE_TABLE_DEVCFG_INDEX] = "devcfg", 98 + [SMEM_IMAGE_TABLE_DSPS_INDEX] = "dsps", 99 + [SMEM_IMAGE_TABLE_GEARVM_INDEX] = "gearvm", 78 100 [SMEM_IMAGE_TABLE_GPDSP1_INDEX] = "gpdsp1", 101 + [SMEM_IMAGE_TABLE_GPDSP_INDEX] = "gpdsp", 102 + [SMEM_IMAGE_TABLE_HYP_INDEX] = "hyp", 103 + [SMEM_IMAGE_TABLE_MPSS_INDEX] = "mpss", 104 + [SMEM_IMAGE_TABLE_NPU_INDEX] = "npu", 105 + [SMEM_IMAGE_TABLE_OEMPD_INDEX] = "oempd", 106 + [SMEM_IMAGE_TABLE_OISPD_INDEX] = "oispd", 107 + [SMEM_IMAGE_TABLE_OOBNS_INDEX] = "oobns", 108 + [SMEM_IMAGE_TABLE_OOBS_INDEX] = "oobs", 109 + [SMEM_IMAGE_TABLE_QECP_INDEX] = "qecp", 110 + [SMEM_IMAGE_TABLE_RPM_INDEX] = "rpm", 111 + [SMEM_IMAGE_TABLE_SDI_INDEX] = "sdi", 112 + [SMEM_IMAGE_TABLE_SENSORPD_INDEX] = "sensorpd", 113 + [SMEM_IMAGE_TABLE_SOCCP_INDEX] = "soccp", 79 114 [SMEM_IMAGE_TABLE_TME_INDEX] = "tme", 115 + [SMEM_IMAGE_TABLE_TZ_INDEX] = "tz", 116 + [SMEM_IMAGE_TABLE_TZSECAPP_INDEX] = "tzsecapp", 117 + [SMEM_IMAGE_TABLE_UEFI_INDEX] = "uefi", 118 + [SMEM_IMAGE_TABLE_VIDEO_INDEX] = "video", 119 + [SMEM_IMAGE_TABLE_WPSS_INDEX] = "wpss", 80 120 }; 81 121 82 122 static const char *const pmic_models[] = { ··· 213 161 u32 num_func_clusters; 214 162 u32 boot_cluster; 215 163 u32 boot_core; 164 + u32 raw_package_type; 216 165 }; 217 166 218 167 struct smem_image_version { ··· 468 415 { qcom_board_id(SC7280) }, 469 416 { qcom_board_id(SC7180P) }, 470 417 { qcom_board_id(QCM6490) }, 418 + { qcom_board_id(QCS6490) }, 471 419 { qcom_board_id(SM7325P) }, 472 420 { qcom_board_id(IPQ5000) }, 473 421 { qcom_board_id(IPQ0509) }, ··· 515 461 { qcom_board_id(IPQ5424) }, 516 462 { qcom_board_id(QCM6690) }, 517 463 { qcom_board_id(QCS6690) }, 464 + { qcom_board_id(SM8850) }, 518 465 { qcom_board_id(IPQ5404) }, 519 466 { qcom_board_id(QCS9100) }, 520 467 { qcom_board_id(QCS8300) }, ··· 664 609 struct smem_image_version *versions; 665 610 struct dentry *dentry; 666 611 size_t size; 667 - int i; 612 + int i, j; 668 613 unsigned int num_pmics; 669 614 unsigned int pmic_array_offset; 670 615 ··· 676 621 &qcom_socinfo->info.fmt); 677 622 678 623 switch (qcom_socinfo->info.fmt) { 624 + case SOCINFO_VERSION(0, 23): 625 + case SOCINFO_VERSION(0, 22): 626 + case SOCINFO_VERSION(0, 21): 627 + case SOCINFO_VERSION(0, 20): 628 + qcom_socinfo->info.raw_package_type = __le32_to_cpu(info->raw_package_type); 629 + debugfs_create_u32("raw_package_type", 0444, qcom_socinfo->dbg_root, 630 + &qcom_socinfo->info.raw_package_type); 631 + fallthrough; 679 632 case SOCINFO_VERSION(0, 19): 680 633 qcom_socinfo->info.num_func_clusters = __le32_to_cpu(info->num_func_clusters); 681 634 qcom_socinfo->info.boot_cluster = __le32_to_cpu(info->boot_cluster); ··· 816 753 break; 817 754 } 818 755 819 - versions = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_IMAGE_VERSION_TABLE, 820 - &size); 821 - 822 - for (i = 0; i < ARRAY_SIZE(socinfo_image_names); i++) { 756 + for (i = 0, j = 0; i < ARRAY_SIZE(socinfo_image_names); i++, j++) { 823 757 if (!socinfo_image_names[i]) 824 758 continue; 825 759 760 + if (i == 0) { 761 + versions = qcom_smem_get(QCOM_SMEM_HOST_ANY, 762 + SMEM_IMAGE_VERSION_TABLE, 763 + &size); 764 + } else if (i == 32) { 765 + versions = qcom_smem_get(QCOM_SMEM_HOST_ANY, 766 + SMEM_IMAGE_VERSION_TABLE_2, 767 + &size); 768 + if (IS_ERR(versions)) 769 + break; 770 + 771 + j = 0; 772 + } 773 + 826 774 dentry = debugfs_create_dir(socinfo_image_names[i], 827 775 qcom_socinfo->dbg_root); 828 - debugfs_create_file("name", 0444, dentry, &versions[i], 776 + debugfs_create_file("name", 0444, dentry, &versions[j], 829 777 &qcom_image_name_ops); 830 - debugfs_create_file("variant", 0444, dentry, &versions[i], 778 + debugfs_create_file("variant", 0444, dentry, &versions[j], 831 779 &qcom_image_variant_ops); 832 - debugfs_create_file("oem", 0444, dentry, &versions[i], 780 + debugfs_create_file("oem", 0444, dentry, &versions[j], 833 781 &qcom_image_oem_ops); 834 782 } 835 783 }
+24
drivers/soc/qcom/ubwc_config.c
··· 16 16 /* no UBWC, no HBB */ 17 17 }; 18 18 19 + static const struct qcom_ubwc_cfg_data kaanapali_data = { 20 + .ubwc_enc_version = UBWC_6_0, 21 + .ubwc_dec_version = UBWC_6_0, 22 + .ubwc_swizzle = UBWC_SWIZZLE_ENABLE_LVL2 | 23 + UBWC_SWIZZLE_ENABLE_LVL3, 24 + .ubwc_bank_spread = true, 25 + .highest_bank_bit = 16, 26 + .macrotile_mode = true, 27 + }; 28 + 19 29 static const struct qcom_ubwc_cfg_data msm8937_data = { 20 30 .ubwc_enc_version = UBWC_1_0, 21 31 .ubwc_dec_version = UBWC_1_0, ··· 228 218 .macrotile_mode = true, 229 219 }; 230 220 221 + static const struct qcom_ubwc_cfg_data glymur_data = { 222 + .ubwc_enc_version = UBWC_5_0, 223 + .ubwc_dec_version = UBWC_5_0, 224 + .ubwc_swizzle = UBWC_SWIZZLE_ENABLE_LVL2 | 225 + UBWC_SWIZZLE_ENABLE_LVL3, 226 + .ubwc_bank_spread = true, 227 + /* TODO: highest_bank_bit = 15 for LP_DDR4 */ 228 + .highest_bank_bit = 16, 229 + .macrotile_mode = true, 230 + }; 231 + 231 232 static const struct of_device_id qcom_ubwc_configs[] __maybe_unused = { 232 233 { .compatible = "qcom,apq8016", .data = &no_ubwc_data }, 233 234 { .compatible = "qcom,apq8026", .data = &no_ubwc_data }, 234 235 { .compatible = "qcom,apq8074", .data = &no_ubwc_data }, 235 236 { .compatible = "qcom,apq8096", .data = &msm8998_data }, 237 + { .compatible = "qcom,kaanapali", .data = &kaanapali_data, }, 238 + { .compatible = "qcom,glymur", .data = &glymur_data}, 236 239 { .compatible = "qcom,msm8226", .data = &no_ubwc_data }, 237 240 { .compatible = "qcom,msm8916", .data = &no_ubwc_data }, 238 241 { .compatible = "qcom,msm8917", .data = &no_ubwc_data }, ··· 260 237 { .compatible = "qcom,msm8998", .data = &msm8998_data }, 261 238 { .compatible = "qcom,qcm2290", .data = &qcm2290_data, }, 262 239 { .compatible = "qcom,qcm6490", .data = &sc7280_data, }, 240 + { .compatible = "qcom,qcs8300", .data = &sc8280xp_data, }, 263 241 { .compatible = "qcom,sa8155p", .data = &sm8150_data, }, 264 242 { .compatible = "qcom,sa8540p", .data = &sc8280xp_data, }, 265 243 { .compatible = "qcom,sa8775p", .data = &sa8775p_data, },
+69
drivers/soc/renesas/r9a08g045-sysc.c
··· 6 6 */ 7 7 8 8 #include <linux/bits.h> 9 + #include <linux/device.h> 9 10 #include <linux/init.h> 10 11 11 12 #include "rz-sysc.h" 13 + 14 + #define SYS_XSPI_MAP_STAADD_CS0 0x348 15 + #define SYS_XSPI_MAP_ENDADD_CS0 0x34c 16 + #define SYS_XSPI_MAP_STAADD_CS1 0x350 17 + #define SYS_XSPI_MAP_ENDADD_CS1 0x354 18 + #define SYS_GETH0_CFG 0x380 19 + #define SYS_GETH1_CFG 0x390 20 + #define SYS_PCIE_CFG 0x3a0 21 + #define SYS_PCIE_MON 0x3a4 22 + #define SYS_PCIE_ERR_MON 0x3ac 23 + #define SYS_PCIE_PHY 0x3b4 24 + #define SYS_I2C0_CFG 0x400 25 + #define SYS_I2C1_CFG 0x410 26 + #define SYS_I2C2_CFG 0x420 27 + #define SYS_I2C3_CFG 0x430 28 + #define SYS_I3C_CFG 0x440 29 + #define SYS_USB_PWRRDY 0xd70 30 + #define SYS_PCIE_RST_RSM_B 0xd74 12 31 13 32 static const struct rz_sysc_soc_id_init_data rzg3s_sysc_soc_id_init_data __initconst = { 14 33 .family = "RZ/G3S", ··· 37 18 .specific_id_mask = GENMASK(27, 0), 38 19 }; 39 20 21 + static bool rzg3s_regmap_readable_reg(struct device *dev, unsigned int reg) 22 + { 23 + switch (reg) { 24 + case SYS_XSPI_MAP_STAADD_CS0: 25 + case SYS_XSPI_MAP_ENDADD_CS0: 26 + case SYS_XSPI_MAP_STAADD_CS1: 27 + case SYS_XSPI_MAP_ENDADD_CS1: 28 + case SYS_GETH0_CFG: 29 + case SYS_GETH1_CFG: 30 + case SYS_PCIE_CFG: 31 + case SYS_PCIE_MON: 32 + case SYS_PCIE_ERR_MON: 33 + case SYS_PCIE_PHY: 34 + case SYS_I2C0_CFG: 35 + case SYS_I2C1_CFG: 36 + case SYS_I2C2_CFG: 37 + case SYS_I2C3_CFG: 38 + case SYS_I3C_CFG: 39 + case SYS_USB_PWRRDY: 40 + case SYS_PCIE_RST_RSM_B: 41 + return true; 42 + default: 43 + return false; 44 + } 45 + } 46 + 47 + static bool rzg3s_regmap_writeable_reg(struct device *dev, unsigned int reg) 48 + { 49 + switch (reg) { 50 + case SYS_XSPI_MAP_STAADD_CS0: 51 + case SYS_XSPI_MAP_ENDADD_CS0: 52 + case SYS_XSPI_MAP_STAADD_CS1: 53 + case SYS_XSPI_MAP_ENDADD_CS1: 54 + case SYS_PCIE_CFG: 55 + case SYS_PCIE_PHY: 56 + case SYS_I2C0_CFG: 57 + case SYS_I2C1_CFG: 58 + case SYS_I2C2_CFG: 59 + case SYS_I2C3_CFG: 60 + case SYS_I3C_CFG: 61 + case SYS_USB_PWRRDY: 62 + case SYS_PCIE_RST_RSM_B: 63 + return true; 64 + default: 65 + return false; 66 + } 67 + } 68 + 40 69 const struct rz_sysc_init_data rzg3s_sysc_init_data __initconst = { 41 70 .soc_id_init_data = &rzg3s_sysc_soc_id_init_data, 71 + .readable_reg = rzg3s_regmap_readable_reg, 72 + .writeable_reg = rzg3s_regmap_writeable_reg, 42 73 .max_register = 0xe20, 43 74 };
+79
drivers/soc/renesas/r9a09g047-sys.c
··· 29 29 #define SYS_LSI_PRR_CA55_DIS BIT(8) 30 30 #define SYS_LSI_PRR_NPU_DIS BIT(1) 31 31 32 + #define SYS_LSI_OTPTSU1TRMVAL0 0x330 33 + #define SYS_LSI_OTPTSU1TRMVAL1 0x334 34 + #define SYS_SPI_STAADDCS0 0x900 35 + #define SYS_SPI_ENDADDCS0 0x904 36 + #define SYS_SPI_STAADDCS1 0x908 37 + #define SYS_SPI_ENDADDCS1 0x90c 38 + #define SYS_VSP_CLK 0xe00 39 + #define SYS_GBETH0_CFG 0xf00 40 + #define SYS_GBETH1_CFG 0xf04 41 + #define SYS_PCIE_INTX_CH0 0x1000 42 + #define SYS_PCIE_MSI1_CH0 0x1004 43 + #define SYS_PCIE_MSI2_CH0 0x1008 44 + #define SYS_PCIE_MSI3_CH0 0x100c 45 + #define SYS_PCIE_MSI4_CH0 0x1010 46 + #define SYS_PCIE_MSI5_CH0 0x1014 47 + #define SYS_PCIE_PME_CH0 0x1018 48 + #define SYS_PCIE_ACK_CH0 0x101c 49 + #define SYS_PCIE_MISC_CH0 0x1020 50 + #define SYS_PCIE_MODE_CH0 0x1024 51 + #define SYS_ADC_CFG 0x1600 52 + 32 53 static void rzg3e_sys_print_id(struct device *dev, 33 54 void __iomem *sysc_base, 34 55 struct soc_device_attribute *soc_dev_attr) ··· 83 62 .print_id = rzg3e_sys_print_id, 84 63 }; 85 64 65 + static bool rzg3e_regmap_readable_reg(struct device *dev, unsigned int reg) 66 + { 67 + switch (reg) { 68 + case SYS_LSI_OTPTSU1TRMVAL0: 69 + case SYS_LSI_OTPTSU1TRMVAL1: 70 + case SYS_SPI_STAADDCS0: 71 + case SYS_SPI_ENDADDCS0: 72 + case SYS_SPI_STAADDCS1: 73 + case SYS_SPI_ENDADDCS1: 74 + case SYS_VSP_CLK: 75 + case SYS_GBETH0_CFG: 76 + case SYS_GBETH1_CFG: 77 + case SYS_PCIE_INTX_CH0: 78 + case SYS_PCIE_MSI1_CH0: 79 + case SYS_PCIE_MSI2_CH0: 80 + case SYS_PCIE_MSI3_CH0: 81 + case SYS_PCIE_MSI4_CH0: 82 + case SYS_PCIE_MSI5_CH0: 83 + case SYS_PCIE_PME_CH0: 84 + case SYS_PCIE_ACK_CH0: 85 + case SYS_PCIE_MISC_CH0: 86 + case SYS_PCIE_MODE_CH0: 87 + case SYS_ADC_CFG: 88 + return true; 89 + default: 90 + return false; 91 + } 92 + } 93 + 94 + static bool rzg3e_regmap_writeable_reg(struct device *dev, unsigned int reg) 95 + { 96 + switch (reg) { 97 + case SYS_SPI_STAADDCS0: 98 + case SYS_SPI_ENDADDCS0: 99 + case SYS_SPI_STAADDCS1: 100 + case SYS_SPI_ENDADDCS1: 101 + case SYS_VSP_CLK: 102 + case SYS_GBETH0_CFG: 103 + case SYS_GBETH1_CFG: 104 + case SYS_PCIE_INTX_CH0: 105 + case SYS_PCIE_MSI1_CH0: 106 + case SYS_PCIE_MSI2_CH0: 107 + case SYS_PCIE_MSI3_CH0: 108 + case SYS_PCIE_MSI4_CH0: 109 + case SYS_PCIE_MSI5_CH0: 110 + case SYS_PCIE_PME_CH0: 111 + case SYS_PCIE_ACK_CH0: 112 + case SYS_PCIE_MISC_CH0: 113 + case SYS_PCIE_MODE_CH0: 114 + case SYS_ADC_CFG: 115 + return true; 116 + default: 117 + return false; 118 + } 119 + } 120 + 86 121 const struct rz_sysc_init_data rzg3e_sys_init_data = { 87 122 .soc_id_init_data = &rzg3e_sys_soc_id_init_data, 123 + .readable_reg = rzg3e_regmap_readable_reg, 124 + .writeable_reg = rzg3e_regmap_writeable_reg, 88 125 .max_register = 0x170c, 89 126 };
+69
drivers/soc/renesas/r9a09g056-sys.c
··· 34 34 #define SYS_RZV2N_FEATURE_C55 BIT(1) 35 35 #define SYS_RZV2N_FEATURE_SEC BIT(2) 36 36 37 + #define SYS_LSI_OTPTSU0TRMVAL0 0x320 38 + #define SYS_LSI_OTPTSU0TRMVAL1 0x324 39 + #define SYS_LSI_OTPTSU1TRMVAL0 0x330 40 + #define SYS_LSI_OTPTSU1TRMVAL1 0x334 41 + #define SYS_GBETH0_CFG 0xf00 42 + #define SYS_GBETH1_CFG 0xf04 43 + #define SYS_PCIE_INTX_CH0 0x1000 44 + #define SYS_PCIE_MSI1_CH0 0x1004 45 + #define SYS_PCIE_MSI2_CH0 0x1008 46 + #define SYS_PCIE_MSI3_CH0 0x100c 47 + #define SYS_PCIE_MSI4_CH0 0x1010 48 + #define SYS_PCIE_MSI5_CH0 0x1014 49 + #define SYS_PCIE_PME_CH0 0x1018 50 + #define SYS_PCIE_ACK_CH0 0x101c 51 + #define SYS_PCIE_MISC_CH0 0x1020 52 + #define SYS_PCIE_MODE_CH0 0x1024 53 + #define SYS_ADC_CFG 0x1600 54 + 37 55 static void rzv2n_sys_print_id(struct device *dev, 38 56 void __iomem *sysc_base, 39 57 struct soc_device_attribute *soc_dev_attr) ··· 88 70 .print_id = rzv2n_sys_print_id, 89 71 }; 90 72 73 + static bool rzv2n_regmap_readable_reg(struct device *dev, unsigned int reg) 74 + { 75 + switch (reg) { 76 + case SYS_LSI_OTPTSU0TRMVAL0: 77 + case SYS_LSI_OTPTSU0TRMVAL1: 78 + case SYS_LSI_OTPTSU1TRMVAL0: 79 + case SYS_LSI_OTPTSU1TRMVAL1: 80 + case SYS_GBETH0_CFG: 81 + case SYS_GBETH1_CFG: 82 + case SYS_PCIE_INTX_CH0: 83 + case SYS_PCIE_MSI1_CH0: 84 + case SYS_PCIE_MSI2_CH0: 85 + case SYS_PCIE_MSI3_CH0: 86 + case SYS_PCIE_MSI4_CH0: 87 + case SYS_PCIE_MSI5_CH0: 88 + case SYS_PCIE_PME_CH0: 89 + case SYS_PCIE_ACK_CH0: 90 + case SYS_PCIE_MISC_CH0: 91 + case SYS_PCIE_MODE_CH0: 92 + case SYS_ADC_CFG: 93 + return true; 94 + default: 95 + return false; 96 + } 97 + } 98 + 99 + static bool rzv2n_regmap_writeable_reg(struct device *dev, unsigned int reg) 100 + { 101 + switch (reg) { 102 + case SYS_GBETH0_CFG: 103 + case SYS_GBETH1_CFG: 104 + case SYS_PCIE_INTX_CH0: 105 + case SYS_PCIE_MSI1_CH0: 106 + case SYS_PCIE_MSI2_CH0: 107 + case SYS_PCIE_MSI3_CH0: 108 + case SYS_PCIE_MSI4_CH0: 109 + case SYS_PCIE_MSI5_CH0: 110 + case SYS_PCIE_PME_CH0: 111 + case SYS_PCIE_ACK_CH0: 112 + case SYS_PCIE_MISC_CH0: 113 + case SYS_PCIE_MODE_CH0: 114 + case SYS_ADC_CFG: 115 + return true; 116 + default: 117 + return false; 118 + } 119 + } 120 + 91 121 const struct rz_sysc_init_data rzv2n_sys_init_data = { 92 122 .soc_id_init_data = &rzv2n_sys_soc_id_init_data, 123 + .readable_reg = rzv2n_regmap_readable_reg, 124 + .writeable_reg = rzv2n_regmap_writeable_reg, 125 + .max_register = 0x170c, 93 126 };
+101
drivers/soc/renesas/r9a09g057-sys.c
··· 29 29 #define SYS_LSI_PRR_GPU_DIS BIT(0) 30 30 #define SYS_LSI_PRR_ISP_DIS BIT(4) 31 31 32 + #define SYS_LSI_OTPTSU0TRMVAL0 0x320 33 + #define SYS_LSI_OTPTSU0TRMVAL1 0x324 34 + #define SYS_LSI_OTPTSU1TRMVAL0 0x330 35 + #define SYS_LSI_OTPTSU1TRMVAL1 0x334 36 + #define SYS_GBETH0_CFG 0xf00 37 + #define SYS_GBETH1_CFG 0xf04 38 + #define SYS_PCIE_INTX_CH0 0x1000 39 + #define SYS_PCIE_MSI1_CH0 0x1004 40 + #define SYS_PCIE_MSI2_CH0 0x1008 41 + #define SYS_PCIE_MSI3_CH0 0x100c 42 + #define SYS_PCIE_MSI4_CH0 0x1010 43 + #define SYS_PCIE_MSI5_CH0 0x1014 44 + #define SYS_PCIE_PME_CH0 0x1018 45 + #define SYS_PCIE_ACK_CH0 0x101c 46 + #define SYS_PCIE_MISC_CH0 0x1020 47 + #define SYS_PCIE_MODE_CH0 0x1024 48 + #define SYS_PCIE_INTX_CH1 0x1030 49 + #define SYS_PCIE_MSI1_CH1 0x1034 50 + #define SYS_PCIE_MSI2_CH1 0x1038 51 + #define SYS_PCIE_MSI3_CH1 0x103c 52 + #define SYS_PCIE_MSI4_CH1 0x1040 53 + #define SYS_PCIE_MSI5_CH1 0x1044 54 + #define SYS_PCIE_PME_CH1 0x1048 55 + #define SYS_PCIE_ACK_CH1 0x104c 56 + #define SYS_PCIE_MISC_CH1 0x1050 57 + #define SYS_PCIE_MODE_CH1 0x1054 58 + #define SYS_PCIE_MODE 0x1060 59 + #define SYS_ADC_CFG 0x1600 60 + 32 61 static void rzv2h_sys_print_id(struct device *dev, 33 62 void __iomem *sysc_base, 34 63 struct soc_device_attribute *soc_dev_attr) ··· 91 62 .print_id = rzv2h_sys_print_id, 92 63 }; 93 64 65 + static bool rzv2h_regmap_readable_reg(struct device *dev, unsigned int reg) 66 + { 67 + switch (reg) { 68 + case SYS_LSI_OTPTSU0TRMVAL0: 69 + case SYS_LSI_OTPTSU0TRMVAL1: 70 + case SYS_LSI_OTPTSU1TRMVAL0: 71 + case SYS_LSI_OTPTSU1TRMVAL1: 72 + case SYS_GBETH0_CFG: 73 + case SYS_GBETH1_CFG: 74 + case SYS_PCIE_INTX_CH0: 75 + case SYS_PCIE_MSI1_CH0: 76 + case SYS_PCIE_MSI2_CH0: 77 + case SYS_PCIE_MSI3_CH0: 78 + case SYS_PCIE_MSI4_CH0: 79 + case SYS_PCIE_MSI5_CH0: 80 + case SYS_PCIE_PME_CH0: 81 + case SYS_PCIE_ACK_CH0: 82 + case SYS_PCIE_MISC_CH0: 83 + case SYS_PCIE_MODE_CH0: 84 + case SYS_PCIE_INTX_CH1: 85 + case SYS_PCIE_MSI1_CH1: 86 + case SYS_PCIE_MSI2_CH1: 87 + case SYS_PCIE_MSI3_CH1: 88 + case SYS_PCIE_MSI4_CH1: 89 + case SYS_PCIE_MSI5_CH1: 90 + case SYS_PCIE_PME_CH1: 91 + case SYS_PCIE_ACK_CH1: 92 + case SYS_PCIE_MISC_CH1: 93 + case SYS_PCIE_MODE_CH1: 94 + case SYS_PCIE_MODE: 95 + case SYS_ADC_CFG: 96 + return true; 97 + default: 98 + return false; 99 + } 100 + } 101 + 102 + static bool rzv2h_regmap_writeable_reg(struct device *dev, unsigned int reg) 103 + { 104 + switch (reg) { 105 + case SYS_GBETH0_CFG: 106 + case SYS_GBETH1_CFG: 107 + case SYS_PCIE_INTX_CH0: 108 + case SYS_PCIE_MSI1_CH0: 109 + case SYS_PCIE_MSI2_CH0: 110 + case SYS_PCIE_MSI3_CH0: 111 + case SYS_PCIE_MSI4_CH0: 112 + case SYS_PCIE_MSI5_CH0: 113 + case SYS_PCIE_PME_CH0: 114 + case SYS_PCIE_ACK_CH0: 115 + case SYS_PCIE_MISC_CH0: 116 + case SYS_PCIE_MODE_CH0: 117 + case SYS_PCIE_INTX_CH1: 118 + case SYS_PCIE_MSI1_CH1: 119 + case SYS_PCIE_MSI2_CH1: 120 + case SYS_PCIE_MSI3_CH1: 121 + case SYS_PCIE_MSI4_CH1: 122 + case SYS_PCIE_MSI5_CH1: 123 + case SYS_PCIE_PME_CH1: 124 + case SYS_PCIE_ACK_CH1: 125 + case SYS_PCIE_MISC_CH1: 126 + case SYS_PCIE_MODE_CH1: 127 + case SYS_PCIE_MODE: 128 + case SYS_ADC_CFG: 129 + return true; 130 + default: 131 + return false; 132 + } 133 + } 134 + 94 135 const struct rz_sysc_init_data rzv2h_sys_init_data = { 95 136 .soc_id_init_data = &rzv2h_sys_soc_id_init_data, 137 + .readable_reg = rzv2h_regmap_readable_reg, 138 + .writeable_reg = rzv2h_regmap_writeable_reg, 96 139 .max_register = 0x170c, 97 140 };
+2 -1
drivers/soc/renesas/rcar-rst.c
··· 12 12 13 13 #define WDTRSTCR_RESET 0xA55A0002 14 14 #define WDTRSTCR 0x0054 15 + #define GEN4_WDTRSTCR_RESET 0xA55A8002 15 16 #define GEN4_WDTRSTCR 0x0010 16 17 17 18 #define CR7BAR 0x0070 ··· 31 30 32 31 static int rcar_rst_v3u_enable_wdt_reset(void __iomem *base) 33 32 { 34 - iowrite32(WDTRSTCR_RESET, base + GEN4_WDTRSTCR); 33 + iowrite32(GEN4_WDTRSTCR_RESET, base + GEN4_WDTRSTCR); 35 34 return 0; 36 35 } 37 36
+2
drivers/soc/renesas/rz-sysc.c
··· 140 140 regmap_cfg->val_bits = 32; 141 141 regmap_cfg->fast_io = true; 142 142 regmap_cfg->max_register = data->max_register; 143 + regmap_cfg->readable_reg = data->readable_reg; 144 + regmap_cfg->writeable_reg = data->writeable_reg; 143 145 144 146 regmap = devm_regmap_init_mmio(dev, sysc->base, regmap_cfg); 145 147 if (IS_ERR(regmap))
+4
drivers/soc/renesas/rz-sysc.h
··· 34 34 /** 35 35 * struct rz_sysc_init_data - RZ SYSC initialization data 36 36 * @soc_id_init_data: RZ SYSC SoC ID initialization data 37 + * @writeable_reg: Regmap writeable register check function 38 + * @readable_reg: Regmap readable register check function 37 39 * @max_register: Maximum SYSC register offset to be used by the regmap config 38 40 */ 39 41 struct rz_sysc_init_data { 40 42 const struct rz_sysc_soc_id_init_data *soc_id_init_data; 43 + bool (*writeable_reg)(struct device *dev, unsigned int reg); 44 + bool (*readable_reg)(struct device *dev, unsigned int reg); 41 45 u32 max_register; 42 46 }; 43 47
+15
drivers/soc/rockchip/grf.c
··· 91 91 92 92 static const struct rockchip_grf_value rk3368_defaults[] __initconst = { 93 93 { "jtag switching", RK3368_GRF_SOC_CON15, FIELD_PREP_WM16_CONST(BIT(13), 0) }, 94 + { "pwm select", RK3368_GRF_SOC_CON15, FIELD_PREP_WM16_CONST(BIT(12), 1) }, 94 95 }; 95 96 96 97 static const struct rockchip_grf_info rk3368_grf __initconst = { 97 98 .values = rk3368_defaults, 98 99 .num_values = ARRAY_SIZE(rk3368_defaults), 100 + }; 101 + 102 + #define RK3368_PMUGRF_SOC_CON0 0x100 103 + 104 + static const struct rockchip_grf_value rk3368_pmugrf_defaults[] __initconst = { 105 + { "pwm2 select", RK3368_PMUGRF_SOC_CON0, FIELD_PREP_WM16_CONST(BIT(7), 0) }, 106 + }; 107 + 108 + static const struct rockchip_grf_info rk3368_pmugrf __initconst = { 109 + .values = rk3368_pmugrf_defaults, 110 + .num_values = ARRAY_SIZE(rk3368_pmugrf_defaults), 99 111 }; 100 112 101 113 #define RK3399_GRF_SOC_CON7 0xe21c ··· 187 175 }, { 188 176 .compatible = "rockchip,rk3368-grf", 189 177 .data = (void *)&rk3368_grf, 178 + }, { 179 + .compatible = "rockchip,rk3368-pmugrf", 180 + .data = (void *)&rk3368_pmugrf, 190 181 }, { 191 182 .compatible = "rockchip,rk3399-grf", 192 183 .data = (void *)&rk3399_grf,
+2 -1
drivers/soc/samsung/Makefile
··· 6 6 7 7 obj-$(CONFIG_EXYNOS_USI) += exynos-usi.o 8 8 9 - obj-$(CONFIG_EXYNOS_PMU) += exynos-pmu.o 9 + obj-$(CONFIG_EXYNOS_PMU) += exynos_pmu.o 10 + exynos_pmu-y += exynos-pmu.o gs101-pmu.o 10 11 11 12 obj-$(CONFIG_EXYNOS_PMU_ARM_DRIVERS) += exynos3250-pmu.o exynos4-pmu.o \ 12 13 exynos5250-pmu.o exynos5420-pmu.o
+10 -8
drivers/soc/samsung/exynos-chipid.c
··· 57 57 { "EXYNOS5800", 0xE5422000 }, 58 58 { "EXYNOS7420", 0xE7420000 }, 59 59 { "EXYNOS7870", 0xE7870000 }, 60 + { "EXYNOS8890", 0xE8890000 }, 60 61 /* Compatible with: samsung,exynos850-chipid */ 61 62 { "EXYNOS2200", 0xE9925000 }, 62 63 { "EXYNOS7885", 0xE7885000 }, 63 64 { "EXYNOS850", 0xE3830000 }, 64 65 { "EXYNOS8895", 0xE8895000 }, 66 + { "EXYNOS9610", 0xE9610000 }, 65 67 { "EXYNOS9810", 0xE9810000 }, 66 68 { "EXYNOS990", 0xE9830000 }, 67 69 { "EXYNOSAUTOV9", 0xAAA80000 }, ··· 109 107 const struct exynos_chipid_variant *drv_data; 110 108 struct exynos_chipid_info soc_info; 111 109 struct soc_device_attribute *soc_dev_attr; 110 + struct device *dev = &pdev->dev; 112 111 struct soc_device *soc_dev; 113 112 struct device_node *root; 114 113 struct regmap *regmap; 115 114 int ret; 116 115 117 - drv_data = of_device_get_match_data(&pdev->dev); 116 + drv_data = of_device_get_match_data(dev); 118 117 if (!drv_data) 119 118 return -EINVAL; 120 119 121 - regmap = device_node_to_regmap(pdev->dev.of_node); 120 + regmap = device_node_to_regmap(dev->of_node); 122 121 if (IS_ERR(regmap)) 123 122 return PTR_ERR(regmap); 124 123 ··· 127 124 if (ret < 0) 128 125 return ret; 129 126 130 - soc_dev_attr = devm_kzalloc(&pdev->dev, sizeof(*soc_dev_attr), 131 - GFP_KERNEL); 127 + soc_dev_attr = devm_kzalloc(dev, sizeof(*soc_dev_attr), GFP_KERNEL); 132 128 if (!soc_dev_attr) 133 129 return -ENOMEM; 134 130 ··· 137 135 of_property_read_string(root, "model", &soc_dev_attr->machine); 138 136 of_node_put(root); 139 137 140 - soc_dev_attr->revision = devm_kasprintf(&pdev->dev, GFP_KERNEL, 141 - "%x", soc_info.revision); 138 + soc_dev_attr->revision = devm_kasprintf(dev, GFP_KERNEL, "%x", 139 + soc_info.revision); 142 140 if (!soc_dev_attr->revision) 143 141 return -ENOMEM; 144 142 soc_dev_attr->soc_id = product_id_to_soc_id(soc_info.product_id); ··· 152 150 if (IS_ERR(soc_dev)) 153 151 return PTR_ERR(soc_dev); 154 152 155 - ret = exynos_asv_init(&pdev->dev, regmap); 153 + ret = exynos_asv_init(dev, regmap); 156 154 if (ret) 157 155 goto err; 158 156 159 157 platform_set_drvdata(pdev, soc_dev); 160 158 161 - dev_info(&pdev->dev, "Exynos: CPU[%s] PRO_ID[0x%x] REV[0x%x] Detected\n", 159 + dev_info(dev, "Exynos: CPU[%s] PRO_ID[0x%x] REV[0x%x] Detected\n", 162 160 soc_dev_attr->soc_id, soc_info.product_id, soc_info.revision); 163 161 164 162 return 0;
+5 -133
drivers/soc/samsung/exynos-pmu.c
··· 6 6 // Exynos - CPU PMU(Power Management Unit) support 7 7 8 8 #include <linux/array_size.h> 9 - #include <linux/arm-smccc.h> 10 9 #include <linux/bitmap.h> 11 10 #include <linux/cpuhotplug.h> 12 11 #include <linux/cpu_pm.h> ··· 23 24 #include <linux/soc/samsung/exynos-pmu.h> 24 25 25 26 #include "exynos-pmu.h" 26 - 27 - #define PMUALIVE_MASK GENMASK(13, 0) 28 - #define TENSOR_SET_BITS (BIT(15) | BIT(14)) 29 - #define TENSOR_CLR_BITS BIT(15) 30 - #define TENSOR_SMC_PMU_SEC_REG 0x82000504 31 - #define TENSOR_PMUREG_READ 0 32 - #define TENSOR_PMUREG_WRITE 1 33 - #define TENSOR_PMUREG_RMW 2 34 27 35 28 struct exynos_pmu_context { 36 29 struct device *dev; ··· 44 53 static struct exynos_pmu_context *pmu_context; 45 54 /* forward declaration */ 46 55 static struct platform_driver exynos_pmu_driver; 47 - 48 - /* 49 - * Tensor SoCs are configured so that PMU_ALIVE registers can only be written 50 - * from EL3, but are still read accessible. As Linux needs to write some of 51 - * these registers, the following functions are provided and exposed via 52 - * regmap. 53 - * 54 - * Note: This SMC interface is known to be implemented on gs101 and derivative 55 - * SoCs. 56 - */ 57 - 58 - /* Write to a protected PMU register. */ 59 - static int tensor_sec_reg_write(void *context, unsigned int reg, 60 - unsigned int val) 61 - { 62 - struct arm_smccc_res res; 63 - unsigned long pmu_base = (unsigned long)context; 64 - 65 - arm_smccc_smc(TENSOR_SMC_PMU_SEC_REG, pmu_base + reg, 66 - TENSOR_PMUREG_WRITE, val, 0, 0, 0, 0, &res); 67 - 68 - /* returns -EINVAL if access isn't allowed or 0 */ 69 - if (res.a0) 70 - pr_warn("%s(): SMC failed: %d\n", __func__, (int)res.a0); 71 - 72 - return (int)res.a0; 73 - } 74 - 75 - /* Read/Modify/Write a protected PMU register. */ 76 - static int tensor_sec_reg_rmw(void *context, unsigned int reg, 77 - unsigned int mask, unsigned int val) 78 - { 79 - struct arm_smccc_res res; 80 - unsigned long pmu_base = (unsigned long)context; 81 - 82 - arm_smccc_smc(TENSOR_SMC_PMU_SEC_REG, pmu_base + reg, 83 - TENSOR_PMUREG_RMW, mask, val, 0, 0, 0, &res); 84 - 85 - /* returns -EINVAL if access isn't allowed or 0 */ 86 - if (res.a0) 87 - pr_warn("%s(): SMC failed: %d\n", __func__, (int)res.a0); 88 - 89 - return (int)res.a0; 90 - } 91 - 92 - /* 93 - * Read a protected PMU register. All PMU registers can be read by Linux. 94 - * Note: The SMC read register is not used, as only registers that can be 95 - * written are readable via SMC. 96 - */ 97 - static int tensor_sec_reg_read(void *context, unsigned int reg, 98 - unsigned int *val) 99 - { 100 - *val = pmu_raw_readl(reg); 101 - return 0; 102 - } 103 - 104 - /* 105 - * For SoCs that have set/clear bit hardware this function can be used when 106 - * the PMU register will be accessed by multiple masters. 107 - * 108 - * For example, to set bits 13:8 in PMU reg offset 0x3e80 109 - * tensor_set_bits_atomic(ctx, 0x3e80, 0x3f00, 0x3f00); 110 - * 111 - * Set bit 8, and clear bits 13:9 PMU reg offset 0x3e80 112 - * tensor_set_bits_atomic(0x3e80, 0x100, 0x3f00); 113 - */ 114 - static int tensor_set_bits_atomic(void *ctx, unsigned int offset, u32 val, 115 - u32 mask) 116 - { 117 - int ret; 118 - unsigned int i; 119 - 120 - for (i = 0; i < 32; i++) { 121 - if (!(mask & BIT(i))) 122 - continue; 123 - 124 - offset &= ~TENSOR_SET_BITS; 125 - 126 - if (val & BIT(i)) 127 - offset |= TENSOR_SET_BITS; 128 - else 129 - offset |= TENSOR_CLR_BITS; 130 - 131 - ret = tensor_sec_reg_write(ctx, offset, i); 132 - if (ret) 133 - return ret; 134 - } 135 - return 0; 136 - } 137 - 138 - static bool tensor_is_atomic(unsigned int reg) 139 - { 140 - /* 141 - * Use atomic operations for PMU_ALIVE registers (offset 0~0x3FFF) 142 - * as the target registers can be accessed by multiple masters. SFRs 143 - * that don't support atomic are added to the switch statement below. 144 - */ 145 - if (reg > PMUALIVE_MASK) 146 - return false; 147 - 148 - switch (reg) { 149 - case GS101_SYSIP_DAT0: 150 - case GS101_SYSTEM_CONFIGURATION: 151 - return false; 152 - default: 153 - return true; 154 - } 155 - } 156 - 157 - static int tensor_sec_update_bits(void *ctx, unsigned int reg, 158 - unsigned int mask, unsigned int val) 159 - { 160 - 161 - if (!tensor_is_atomic(reg)) 162 - return tensor_sec_reg_rmw(ctx, reg, mask, val); 163 - 164 - return tensor_set_bits_atomic(ctx, reg, val, mask); 165 - } 166 56 167 57 void pmu_raw_writel(u32 val, u32 offset) 168 58 { ··· 114 242 .reg_stride = 4, 115 243 .val_bits = 32, 116 244 .use_raw_spinlock = true, 117 - }; 118 - 119 - static const struct exynos_pmu_data gs101_pmu_data = { 120 - .pmu_secure = true, 121 - .pmu_cpuhp = true, 122 245 }; 123 246 124 247 /* ··· 231 364 * disabled and cpupm_lock held. 232 365 */ 233 366 static int __gs101_cpu_pmu_online(unsigned int cpu) 367 + __must_hold(&pmu_context->cpupm_lock) 234 368 { 235 369 unsigned int cpuhint = smp_processor_id(); 236 370 u32 reg, mask; ··· 292 424 293 425 /* Common function shared by both CPU hot plug and CPUIdle */ 294 426 static int __gs101_cpu_pmu_offline(unsigned int cpu) 427 + __must_hold(&pmu_context->cpupm_lock) 295 428 { 296 429 unsigned int cpuhint = smp_processor_id(); 297 430 u32 reg, mask; ··· 504 635 pmu_regmcfg = regmap_smccfg; 505 636 pmu_regmcfg.max_register = resource_size(res) - 506 637 pmu_regmcfg.reg_stride; 638 + pmu_regmcfg.wr_table = pmu_context->pmu_data->wr_table; 639 + pmu_regmcfg.rd_table = pmu_context->pmu_data->rd_table; 640 + 507 641 /* Need physical address for SMC call */ 508 642 regmap = devm_regmap_init(dev, NULL, 509 643 (void *)(uintptr_t)res->start,
+37
drivers/soc/samsung/exynos-pmu.h
··· 13 13 14 14 #define PMU_TABLE_END (-1U) 15 15 16 + struct regmap_access_table; 17 + 16 18 struct exynos_pmu_conf { 17 19 unsigned int offset; 18 20 u8 val[NUM_SYS_POWERDOWN]; 19 21 }; 20 22 23 + /** 24 + * struct exynos_pmu_data - of_device_id (match) data 25 + * 26 + * @pmu_config: Optional table detailing register writes for target system 27 + * states: SYS_AFTR, SYS_LPA, SYS_SLEEP. 28 + * @pmu_config_extra: Optional secondary table detailing additional register 29 + * writes for target system states: SYS_AFTR, SYS_LPA, 30 + * SYS_SLEEP. 31 + * @pmu_secure: Whether or not PMU register writes need to be done via SMC call. 32 + * @pmu_cpuhp: Whether or not extra handling is required for CPU hotplug and 33 + * CPUidle outside of standard PSCI calls, due to non-compliant 34 + * firmware. 35 + * @pmu_init: Optional init function. 36 + * @powerdown_conf: Optional callback before entering target system states: 37 + * SYS_AFTR, SYS_LPA, SYS_SLEEP. This will be invoked before 38 + * the registers from @pmu_config are written. 39 + * @powerdown_conf_extra: Optional secondary callback before entering 40 + * target system states: SYS_AFTR, SYS_LPA, SYS_SLEEP. 41 + * This will be invoked after @pmu_config registers have 42 + * been written. 43 + * @rd_table: A table of readable register ranges in case a custom regmap is 44 + * used (i.e. when @pmu_secure is @true). 45 + * @wr_table: A table of writable register ranges in case a custom regmap is 46 + * used (i.e. when @pmu_secure is @true). 47 + */ 21 48 struct exynos_pmu_data { 22 49 const struct exynos_pmu_conf *pmu_config; 23 50 const struct exynos_pmu_conf *pmu_config_extra; ··· 54 27 void (*pmu_init)(void); 55 28 void (*powerdown_conf)(enum sys_powerdown); 56 29 void (*powerdown_conf_extra)(enum sys_powerdown); 30 + 31 + const struct regmap_access_table *rd_table; 32 + const struct regmap_access_table *wr_table; 57 33 }; 58 34 59 35 extern void __iomem *pmu_base_addr; ··· 70 40 extern const struct exynos_pmu_data exynos5250_pmu_data; 71 41 extern const struct exynos_pmu_data exynos5420_pmu_data; 72 42 #endif 43 + extern const struct exynos_pmu_data gs101_pmu_data; 73 44 74 45 extern void pmu_raw_writel(u32 val, u32 offset); 75 46 extern u32 pmu_raw_readl(u32 offset); 47 + 48 + int tensor_sec_reg_write(void *context, unsigned int reg, unsigned int val); 49 + int tensor_sec_reg_read(void *context, unsigned int reg, unsigned int *val); 50 + int tensor_sec_update_bits(void *context, unsigned int reg, unsigned int mask, 51 + unsigned int val); 52 + 76 53 #endif /* __EXYNOS_PMU_H */
+446
drivers/soc/samsung/gs101-pmu.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright 2025 Linaro Ltd. 4 + * 5 + * GS101 PMU (Power Management Unit) support 6 + */ 7 + 8 + #include <linux/arm-smccc.h> 9 + #include <linux/array_size.h> 10 + #include <linux/soc/samsung/exynos-pmu.h> 11 + #include <linux/soc/samsung/exynos-regs-pmu.h> 12 + #include <linux/regmap.h> 13 + 14 + #include "exynos-pmu.h" 15 + 16 + #define PMUALIVE_MASK GENMASK(13, 0) 17 + #define TENSOR_SET_BITS (BIT(15) | BIT(14)) 18 + #define TENSOR_CLR_BITS BIT(15) 19 + #define TENSOR_SMC_PMU_SEC_REG 0x82000504 20 + #define TENSOR_PMUREG_READ 0 21 + #define TENSOR_PMUREG_WRITE 1 22 + #define TENSOR_PMUREG_RMW 2 23 + 24 + static const struct regmap_range gs101_pmu_registers[] = { 25 + regmap_reg_range(GS101_OM_STAT, GS101_SYSTEM_INFO), 26 + regmap_reg_range(GS101_IDLE_IP(0), GS101_IDLE_IP_MASK(3)), 27 + regmap_reg_range(GS101_DATARAM_STATE_SLC_CH(0), 28 + GS101_PPMPURAM_INFORM_SCL_CH(3)), 29 + regmap_reg_range(GS101_INFORM0, GS101_SYSIP_DAT(0)), 30 + /* skip SYSIP_DAT1 SYSIP_DAT2 */ 31 + regmap_reg_range(GS101_SYSIP_DAT(3), GS101_PWR_HOLD_SW_TRIP), 32 + regmap_reg_range(GS101_GSA_INFORM(0), GS101_GSA_INFORM(1)), 33 + regmap_reg_range(GS101_INFORM4, GS101_IROM_INFORM), 34 + regmap_reg_range(GS101_IROM_CPU_INFORM(0), GS101_IROM_CPU_INFORM(7)), 35 + regmap_reg_range(GS101_PMU_SPARE(0), GS101_PMU_SPARE(3)), 36 + /* skip most IROM_xxx registers */ 37 + regmap_reg_range(GS101_DREX_CALIBRATION(0), GS101_DREX_CALIBRATION(7)), 38 + 39 + #define CLUSTER_CPU_RANGE(cl, cpu) \ 40 + regmap_reg_range(GS101_CLUSTER_CPU_CONFIGURATION(cl, cpu), \ 41 + GS101_CLUSTER_CPU_OPTION(cl, cpu)), \ 42 + regmap_reg_range(GS101_CLUSTER_CPU_OUT(cl, cpu), \ 43 + GS101_CLUSTER_CPU_IN(cl, cpu)), \ 44 + regmap_reg_range(GS101_CLUSTER_CPU_INT_IN(cl, cpu), \ 45 + GS101_CLUSTER_CPU_INT_DIR(cl, cpu)) 46 + 47 + /* cluster 0..2 and cpu 0..4 or 0..1 */ 48 + CLUSTER_CPU_RANGE(GS101_CLUSTER0_OFFSET, 0), 49 + CLUSTER_CPU_RANGE(GS101_CLUSTER0_OFFSET, 1), 50 + CLUSTER_CPU_RANGE(GS101_CLUSTER0_OFFSET, 2), 51 + CLUSTER_CPU_RANGE(GS101_CLUSTER0_OFFSET, 3), 52 + CLUSTER_CPU_RANGE(GS101_CLUSTER1_OFFSET, 0), 53 + CLUSTER_CPU_RANGE(GS101_CLUSTER1_OFFSET, 1), 54 + CLUSTER_CPU_RANGE(GS101_CLUSTER2_OFFSET, 0), 55 + CLUSTER_CPU_RANGE(GS101_CLUSTER2_OFFSET, 1), 56 + #undef CLUSTER_CPU_RANGE 57 + 58 + #define CLUSTER_NONCPU_RANGE(cl) \ 59 + regmap_reg_range(GS101_CLUSTER_NONCPU_CONFIGURATION(cl), \ 60 + GS101_CLUSTER_NONCPU_OPTION(cl)), \ 61 + regmap_reg_range(GS101_CLUSTER_NONCPU_OUT(cl), \ 62 + GS101_CLUSTER_NONCPU_IN(cl)), \ 63 + regmap_reg_range(GS101_CLUSTER_NONCPU_INT_IN(cl), \ 64 + GS101_CLUSTER_NONCPU_INT_DIR(cl)), \ 65 + regmap_reg_range(GS101_CLUSTER_NONCPU_DUALRAIL_CTRL_OUT(cl), \ 66 + GS101_CLUSTER_NONCPU_DUALRAIL_POS_OUT(cl)), \ 67 + regmap_reg_range(GS101_CLUSTER_NONCPU_DUALRAIL_CTRL_IN(cl), \ 68 + GS101_CLUSTER_NONCPU_DUALRAIL_CTRL_IN(cl)) 69 + 70 + CLUSTER_NONCPU_RANGE(0), 71 + regmap_reg_range(GS101_CLUSTER0_NONCPU_DSU_PCH, 72 + GS101_CLUSTER0_NONCPU_DSU_PCH), 73 + CLUSTER_NONCPU_RANGE(1), 74 + CLUSTER_NONCPU_RANGE(2), 75 + #undef CLUSTER_NONCPU_RANGE 76 + 77 + #define SUBBLK_RANGE(blk) \ 78 + regmap_reg_range(GS101_SUBBLK_CONFIGURATION(blk), \ 79 + GS101_SUBBLK_CTRL(blk)), \ 80 + regmap_reg_range(GS101_SUBBLK_OUT(blk), GS101_SUBBLK_IN(blk)), \ 81 + regmap_reg_range(GS101_SUBBLK_INT_IN(blk), \ 82 + GS101_SUBBLK_INT_DIR(blk)), \ 83 + regmap_reg_range(GS101_SUBBLK_MEMORY_OUT(blk), \ 84 + GS101_SUBBLK_MEMORY_IN(blk)) 85 + 86 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_ALIVE), 87 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_AOC), 88 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_APM), 89 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_CMU), 90 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_BUS0), 91 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_BUS1), 92 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_BUS2), 93 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_CORE), 94 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_EH), 95 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_CPUCL0), 96 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_CPUCL1), 97 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_CPUCL2), 98 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_G3D), 99 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_EMBEDDED_CPUCL0), 100 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_EMBEDDED_G3D), 101 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_HSI0), 102 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_HSI1), 103 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_HSI2), 104 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_DPU), 105 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_DISP), 106 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_G2D), 107 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MFC), 108 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_CSIS), 109 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_PDP), 110 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_DNS), 111 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_G3AA), 112 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_IPP), 113 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_ITP), 114 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MCSC), 115 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_GDC), 116 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_TNR), 117 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_BO), 118 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_TPU), 119 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MIF0), 120 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MIF1), 121 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MIF2), 122 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MIF3), 123 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MISC), 124 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_PERIC0), 125 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_PERIC1), 126 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_S2D), 127 + #undef SUBBLK_RANGE 128 + 129 + #define SUBBLK_CPU_RANGE(blk) \ 130 + regmap_reg_range(GS101_SUBBLK_CPU_CONFIGURATION(blk), \ 131 + GS101_SUBBLK_CPU_OPTION(blk)), \ 132 + regmap_reg_range(GS101_SUBBLK_CPU_OUT(blk), \ 133 + GS101_SUBBLK_CPU_IN(blk)), \ 134 + regmap_reg_range(GS101_SUBBLK_CPU_INT_IN(blk), \ 135 + GS101_SUBBLK_CPU_INT_DIR(blk)) 136 + 137 + SUBBLK_CPU_RANGE(GS101_SUBBBLK_CPU_OFFSET_APM), 138 + SUBBLK_CPU_RANGE(GS101_SUBBBLK_CPU_OFFSET_DBGCORE), 139 + SUBBLK_CPU_RANGE(GS101_SUBBBLK_CPU_OFFSET_SSS), 140 + #undef SUBBLK_CPU_RANGE 141 + 142 + regmap_reg_range(GS101_MIF_CONFIGURATION, GS101_MIF_CTRL), 143 + regmap_reg_range(GS101_MIF_OUT, GS101_MIF_IN), 144 + regmap_reg_range(GS101_MIF_INT_IN, GS101_MIF_INT_DIR), 145 + regmap_reg_range(GS101_TOP_CONFIGURATION, GS101_TOP_OPTION), 146 + regmap_reg_range(GS101_TOP_OUT, GS101_TOP_IN), 147 + regmap_reg_range(GS101_TOP_INT_IN, GS101_WAKEUP2_STAT), 148 + regmap_reg_range(GS101_WAKEUP2_INT_IN, GS101_WAKEUP2_INT_DIR), 149 + regmap_reg_range(GS101_SYSTEM_CONFIGURATION, GS101_USER_DEFINED_OUT), 150 + regmap_reg_range(GS101_SYSTEM_OUT, GS101_SYSTEM_IN), 151 + regmap_reg_range(GS101_SYSTEM_INT_IN, GS101_EINT_WAKEUP_MASK3), 152 + regmap_reg_range(GS101_USER_DEFINED_INT_IN, GS101_SCAN2DRAM_INT_DIR), 153 + /* skip HCU_START */ 154 + regmap_reg_range(GS101_CUSTOM_OUT, GS101_CUSTOM_IN), 155 + regmap_reg_range(GS101_CUSTOM_INT_IN, GS101_CUSTOM_INT_DIR), 156 + regmap_reg_range(GS101_ACK_LAST_CPU, GS101_HCU_R(3)), 157 + regmap_reg_range(GS101_HCU_SP, GS101_HCU_PC), 158 + /* skip PMU_RAM_CTRL */ 159 + regmap_reg_range(GS101_APM_HCU_CTRL, GS101_APM_HCU_CTRL), 160 + regmap_reg_range(GS101_APM_NMI_ENABLE, GS101_RST_STAT_PMU), 161 + regmap_reg_range(GS101_HPM_INT_IN, GS101_BOOT_STAT), 162 + regmap_reg_range(GS101_PMLINK_OUT, GS101_PMLINK_AOC_CTRL), 163 + regmap_reg_range(GS101_TCXO_BUF_CTRL, GS101_ADD_CTRL), 164 + regmap_reg_range(GS101_HCU_TIMEOUT_RESET, GS101_HCU_TIMEOUT_SCAN2DRAM), 165 + regmap_reg_range(GS101_TIMER(0), GS101_TIMER(3)), 166 + regmap_reg_range(GS101_PPC_MIF(0), GS101_PPC_EH), 167 + /* PPC_OFFSET, skip PPC_CPUCL1_0 PPC_CPUCL1_1 */ 168 + regmap_reg_range(GS101_EXT_REGULATOR_MIF_DURATION, GS101_TCXO_DURATION), 169 + regmap_reg_range(GS101_BURNIN_CTRL, GS101_TMU_SUB_TRIP), 170 + regmap_reg_range(GS101_MEMORY_CEN, GS101_MEMORY_SMX_FEEDBACK), 171 + regmap_reg_range(GS101_SLC_PCH_CHANNEL, GS101_SLC_PCH_CB), 172 + regmap_reg_range(GS101_FORCE_NOMC, GS101_FORCE_NOMC), 173 + regmap_reg_range(GS101_FORCE_BOOST, GS101_PMLINK_SLC_BUSY), 174 + regmap_reg_range(GS101_BOOTSYNC_OUT, GS101_CTRL_SECJTAG_ALIVE), 175 + regmap_reg_range(GS101_CTRL_DIV_PLL_ALV_DIVLOW, GS101_CTRL_CLKDIV__CLKRTC), 176 + regmap_reg_range(GS101_CTRL_SOC32K, GS101_CTRL_SBU_SW_EN), 177 + regmap_reg_range(GS101_PAD_CTRL_CLKOUT0, GS101_PAD_CTRL_WRESETO_n), 178 + regmap_reg_range(GS101_PHY_CTRL_USB20, GS101_PHY_CTRL_UFS), 179 + }; 180 + 181 + static const struct regmap_range gs101_pmu_ro_registers[] = { 182 + regmap_reg_range(GS101_OM_STAT, GS101_VERSION), 183 + regmap_reg_range(GS101_OTP_STATUS, GS101_OTP_STATUS), 184 + 185 + regmap_reg_range(GS101_DATARAM_STATE_SLC_CH(0), 186 + GS101_PPMPURAM_STATE_SLC_CH(0)), 187 + regmap_reg_range(GS101_DATARAM_STATE_SLC_CH(1), 188 + GS101_PPMPURAM_STATE_SLC_CH(1)), 189 + regmap_reg_range(GS101_DATARAM_STATE_SLC_CH(2), 190 + GS101_PPMPURAM_STATE_SLC_CH(2)), 191 + regmap_reg_range(GS101_DATARAM_STATE_SLC_CH(3), 192 + GS101_PPMPURAM_STATE_SLC_CH(3)), 193 + 194 + #define CLUSTER_CPU_RANGE(cl, cpu) \ 195 + regmap_reg_range(GS101_CLUSTER_CPU_IN(cl, cpu), \ 196 + GS101_CLUSTER_CPU_IN(cl, cpu)), \ 197 + regmap_reg_range(GS101_CLUSTER_CPU_INT_IN(cl, cpu), \ 198 + GS101_CLUSTER_CPU_INT_IN(cl, cpu)) 199 + 200 + CLUSTER_CPU_RANGE(GS101_CLUSTER0_OFFSET, 0), 201 + CLUSTER_CPU_RANGE(GS101_CLUSTER0_OFFSET, 1), 202 + CLUSTER_CPU_RANGE(GS101_CLUSTER0_OFFSET, 2), 203 + CLUSTER_CPU_RANGE(GS101_CLUSTER0_OFFSET, 3), 204 + CLUSTER_CPU_RANGE(GS101_CLUSTER1_OFFSET, 0), 205 + CLUSTER_CPU_RANGE(GS101_CLUSTER1_OFFSET, 1), 206 + CLUSTER_CPU_RANGE(GS101_CLUSTER2_OFFSET, 0), 207 + CLUSTER_CPU_RANGE(GS101_CLUSTER2_OFFSET, 1), 208 + #undef CLUSTER_CPU_RANGE 209 + 210 + #define CLUSTER_NONCPU_RANGE(cl) \ 211 + regmap_reg_range(GS101_CLUSTER_NONCPU_IN(cl), \ 212 + GS101_CLUSTER_NONCPU_IN(cl)), \ 213 + regmap_reg_range(GS101_CLUSTER_NONCPU_INT_IN(cl), \ 214 + GS101_CLUSTER_NONCPU_INT_IN(cl)), \ 215 + regmap_reg_range(GS101_CLUSTER_NONCPU_DUALRAIL_CTRL_IN(cl), \ 216 + GS101_CLUSTER_NONCPU_DUALRAIL_CTRL_IN(cl)) 217 + 218 + CLUSTER_NONCPU_RANGE(0), 219 + CLUSTER_NONCPU_RANGE(1), 220 + CLUSTER_NONCPU_RANGE(2), 221 + regmap_reg_range(GS101_CLUSTER_NONCPU_INT_EN(2), 222 + GS101_CLUSTER_NONCPU_INT_DIR(2)), 223 + #undef CLUSTER_NONCPU_RANGE 224 + 225 + #define SUBBLK_RANGE(blk) \ 226 + regmap_reg_range(GS101_SUBBLK_IN(blk), GS101_SUBBLK_IN(blk)), \ 227 + regmap_reg_range(GS101_SUBBLK_INT_IN(blk), \ 228 + GS101_SUBBLK_INT_IN(blk)), \ 229 + regmap_reg_range(GS101_SUBBLK_MEMORY_IN(blk), \ 230 + GS101_SUBBLK_MEMORY_IN(blk)) 231 + 232 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_ALIVE), 233 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_AOC), 234 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_APM), 235 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_CMU), 236 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_BUS0), 237 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_BUS1), 238 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_BUS2), 239 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_CORE), 240 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_EH), 241 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_CPUCL0), 242 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_CPUCL1), 243 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_CPUCL2), 244 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_G3D), 245 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_EMBEDDED_CPUCL0), 246 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_EMBEDDED_G3D), 247 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_HSI0), 248 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_HSI1), 249 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_HSI2), 250 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_DPU), 251 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_DISP), 252 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_G2D), 253 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MFC), 254 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_CSIS), 255 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_PDP), 256 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_DNS), 257 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_G3AA), 258 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_IPP), 259 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_ITP), 260 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MCSC), 261 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_GDC), 262 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_TNR), 263 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_BO), 264 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_TPU), 265 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MIF0), 266 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MIF1), 267 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MIF2), 268 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MIF3), 269 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_MISC), 270 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_PERIC0), 271 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_PERIC1), 272 + SUBBLK_RANGE(GS101_SUBBBLK_OFFSET_S2D), 273 + #undef SUBBLK_RANGE 274 + 275 + #define SUBBLK_CPU_RANGE(blk) \ 276 + regmap_reg_range(GS101_SUBBLK_CPU_IN(blk), \ 277 + GS101_SUBBLK_CPU_IN(blk)), \ 278 + regmap_reg_range(GS101_SUBBLK_CPU_INT_IN(blk), \ 279 + GS101_SUBBLK_CPU_INT_IN(blk)) 280 + 281 + SUBBLK_CPU_RANGE(GS101_SUBBBLK_CPU_OFFSET_APM), 282 + SUBBLK_CPU_RANGE(GS101_SUBBBLK_CPU_OFFSET_DBGCORE), 283 + SUBBLK_CPU_RANGE(GS101_SUBBBLK_CPU_OFFSET_SSS), 284 + #undef SUBBLK_CPU_RANGE 285 + 286 + regmap_reg_range(GS101_MIF_CONFIGURATION, GS101_MIF_CONFIGURATION), 287 + regmap_reg_range(GS101_MIF_IN, GS101_MIF_IN), 288 + regmap_reg_range(GS101_MIF_INT_IN, GS101_MIF_INT_IN), 289 + regmap_reg_range(GS101_TOP_IN, GS101_TOP_IN), 290 + regmap_reg_range(GS101_TOP_INT_IN, GS101_TOP_INT_IN), 291 + regmap_reg_range(GS101_WAKEUP2_INT_IN, GS101_WAKEUP2_INT_IN), 292 + regmap_reg_range(GS101_SYSTEM_IN, GS101_SYSTEM_IN), 293 + regmap_reg_range(GS101_SYSTEM_INT_IN, GS101_SYSTEM_INT_IN), 294 + regmap_reg_range(GS101_EINT_INT_IN, GS101_EINT_INT_IN), 295 + regmap_reg_range(GS101_EINT2_INT_IN, GS101_EINT2_INT_IN), 296 + regmap_reg_range(GS101_EINT3_INT_IN, GS101_EINT3_INT_IN), 297 + regmap_reg_range(GS101_USER_DEFINED_INT_IN, GS101_USER_DEFINED_INT_IN), 298 + regmap_reg_range(GS101_SCAN2DRAM_INT_IN, GS101_SCAN2DRAM_INT_IN), 299 + regmap_reg_range(GS101_CUSTOM_IN, GS101_CUSTOM_IN), 300 + regmap_reg_range(GS101_CUSTOM_INT_IN, GS101_CUSTOM_INT_IN), 301 + regmap_reg_range(GS101_HCU_R(0), GS101_HCU_R(3)), 302 + regmap_reg_range(GS101_HCU_SP, GS101_HCU_PC), 303 + regmap_reg_range(GS101_NMI_SRC_IN, GS101_NMI_SRC_IN), 304 + regmap_reg_range(GS101_HPM_INT_IN, GS101_HPM_INT_IN), 305 + regmap_reg_range(GS101_MEMORY_PGEN_FEEDBACK, GS101_MEMORY_PGEN_FEEDBACK), 306 + regmap_reg_range(GS101_MEMORY_SMX_FEEDBACK, GS101_MEMORY_SMX_FEEDBACK), 307 + regmap_reg_range(GS101_PMLINK_SLC_ACK, GS101_PMLINK_SLC_BUSY), 308 + regmap_reg_range(GS101_BOOTSYNC_IN, GS101_BOOTSYNC_IN), 309 + regmap_reg_range(GS101_SCAN_READY_IN, GS101_SCAN_READY_IN), 310 + regmap_reg_range(GS101_CTRL_PLL_ALV_LOCK, GS101_CTRL_PLL_ALV_LOCK), 311 + }; 312 + 313 + static const struct regmap_access_table gs101_pmu_rd_table = { 314 + .yes_ranges = gs101_pmu_registers, 315 + .n_yes_ranges = ARRAY_SIZE(gs101_pmu_registers), 316 + }; 317 + 318 + static const struct regmap_access_table gs101_pmu_wr_table = { 319 + .yes_ranges = gs101_pmu_registers, 320 + .n_yes_ranges = ARRAY_SIZE(gs101_pmu_registers), 321 + .no_ranges = gs101_pmu_ro_registers, 322 + .n_no_ranges = ARRAY_SIZE(gs101_pmu_ro_registers), 323 + }; 324 + 325 + const struct exynos_pmu_data gs101_pmu_data = { 326 + .pmu_secure = true, 327 + .pmu_cpuhp = true, 328 + .rd_table = &gs101_pmu_rd_table, 329 + .wr_table = &gs101_pmu_wr_table, 330 + }; 331 + 332 + /* 333 + * Tensor SoCs are configured so that PMU_ALIVE registers can only be written 334 + * from EL3, but are still read accessible. As Linux needs to write some of 335 + * these registers, the following functions are provided and exposed via 336 + * regmap. 337 + * 338 + * Note: This SMC interface is known to be implemented on gs101 and derivative 339 + * SoCs. 340 + */ 341 + 342 + /* Write to a protected PMU register. */ 343 + int tensor_sec_reg_write(void *context, unsigned int reg, unsigned int val) 344 + { 345 + struct arm_smccc_res res; 346 + unsigned long pmu_base = (unsigned long)context; 347 + 348 + arm_smccc_smc(TENSOR_SMC_PMU_SEC_REG, pmu_base + reg, 349 + TENSOR_PMUREG_WRITE, val, 0, 0, 0, 0, &res); 350 + 351 + /* returns -EINVAL if access isn't allowed or 0 */ 352 + if (res.a0) 353 + pr_warn("%s(): SMC failed: %d\n", __func__, (int)res.a0); 354 + 355 + return (int)res.a0; 356 + } 357 + 358 + /* Read/Modify/Write a protected PMU register. */ 359 + static int tensor_sec_reg_rmw(void *context, unsigned int reg, 360 + unsigned int mask, unsigned int val) 361 + { 362 + struct arm_smccc_res res; 363 + unsigned long pmu_base = (unsigned long)context; 364 + 365 + arm_smccc_smc(TENSOR_SMC_PMU_SEC_REG, pmu_base + reg, 366 + TENSOR_PMUREG_RMW, mask, val, 0, 0, 0, &res); 367 + 368 + /* returns -EINVAL if access isn't allowed or 0 */ 369 + if (res.a0) 370 + pr_warn("%s(): SMC failed: %d\n", __func__, (int)res.a0); 371 + 372 + return (int)res.a0; 373 + } 374 + 375 + /* 376 + * Read a protected PMU register. All PMU registers can be read by Linux. 377 + * Note: The SMC read register is not used, as only registers that can be 378 + * written are readable via SMC. 379 + */ 380 + int tensor_sec_reg_read(void *context, unsigned int reg, unsigned int *val) 381 + { 382 + *val = pmu_raw_readl(reg); 383 + return 0; 384 + } 385 + 386 + /* 387 + * For SoCs that have set/clear bit hardware this function can be used when 388 + * the PMU register will be accessed by multiple masters. 389 + * 390 + * For example, to set bits 13:8 in PMU reg offset 0x3e80 391 + * tensor_set_bits_atomic(ctx, 0x3e80, 0x3f00, 0x3f00); 392 + * 393 + * Set bit 8, and clear bits 13:9 PMU reg offset 0x3e80 394 + * tensor_set_bits_atomic(0x3e80, 0x100, 0x3f00); 395 + */ 396 + static int tensor_set_bits_atomic(void *context, unsigned int offset, u32 val, 397 + u32 mask) 398 + { 399 + int ret; 400 + unsigned int i; 401 + 402 + for (i = 0; i < 32; i++) { 403 + if (!(mask & BIT(i))) 404 + continue; 405 + 406 + offset &= ~TENSOR_SET_BITS; 407 + 408 + if (val & BIT(i)) 409 + offset |= TENSOR_SET_BITS; 410 + else 411 + offset |= TENSOR_CLR_BITS; 412 + 413 + ret = tensor_sec_reg_write(context, offset, i); 414 + if (ret) 415 + return ret; 416 + } 417 + return 0; 418 + } 419 + 420 + static bool tensor_is_atomic(unsigned int reg) 421 + { 422 + /* 423 + * Use atomic operations for PMU_ALIVE registers (offset 0~0x3FFF) 424 + * as the target registers can be accessed by multiple masters. SFRs 425 + * that don't support atomic are added to the switch statement below. 426 + */ 427 + if (reg > PMUALIVE_MASK) 428 + return false; 429 + 430 + switch (reg) { 431 + case GS101_SYSIP_DAT(0): 432 + case GS101_SYSTEM_CONFIGURATION: 433 + return false; 434 + default: 435 + return true; 436 + } 437 + } 438 + 439 + int tensor_sec_update_bits(void *context, unsigned int reg, unsigned int mask, 440 + unsigned int val) 441 + { 442 + if (!tensor_is_atomic(reg)) 443 + return tensor_sec_reg_rmw(context, reg, mask, val); 444 + 445 + return tensor_set_bits_atomic(context, reg, val, mask); 446 + }
+1 -1
drivers/soc/tegra/cbb/tegra194-cbb.c
··· 1836 1836 } 1837 1837 1838 1838 /* 1839 - * Print transcation type, error code and description from ErrLog0 for all 1839 + * Print transaction type, error code and description from ErrLog0 for all 1840 1840 * errors. For NOC target errors, all relevant error info is printed using 1841 1841 * ErrLog0 only. But additional information is printed for errors from 1842 1842 * APB targets because for them:
-2
drivers/soc/tegra/fuse/fuse-tegra.c
··· 182 182 } 183 183 184 184 fuse->soc->init(fuse); 185 - tegra_fuse_print_sku_info(&tegra_sku_info); 186 - tegra_soc_device_register(); 187 185 188 186 err = tegra_fuse_add_lookups(fuse); 189 187 if (err)
+42 -17
drivers/soc/tegra/fuse/speedo-tegra210.c
··· 65 65 sku_info->gpu_speedo_id = 0; 66 66 *threshold = THRESHOLD_INDEX_0; 67 67 68 - switch (sku) { 69 - case 0x00: /* Engineering SKU */ 70 - case 0x01: /* Engineering SKU */ 71 - case 0x07: 72 - case 0x17: 73 - case 0x27: 74 - if (speedo_rev >= 2) 68 + if (sku_info->revision >= TEGRA_REVISION_A02) { 69 + switch (sku) { 70 + case 0x00: /* Engineering SKU */ 71 + case 0x01: /* Engineering SKU */ 72 + case 0x13: 73 + sku_info->cpu_speedo_id = 5; 74 + sku_info->gpu_speedo_id = 2; 75 + break; 76 + 77 + case 0x07: 78 + case 0x17: 79 + case 0x1F: 80 + sku_info->cpu_speedo_id = 7; 81 + sku_info->gpu_speedo_id = 2; 82 + break; 83 + 84 + case 0x27: 85 + sku_info->cpu_speedo_id = 1; 86 + sku_info->gpu_speedo_id = 2; 87 + break; 88 + 89 + case 0x83: 90 + sku_info->cpu_speedo_id = 3; 91 + sku_info->gpu_speedo_id = 3; 92 + break; 93 + 94 + case 0x87: 95 + sku_info->cpu_speedo_id = 2; 75 96 sku_info->gpu_speedo_id = 1; 76 - break; 97 + break; 77 98 78 - case 0x13: 79 - if (speedo_rev >= 2) 80 - sku_info->gpu_speedo_id = 1; 99 + case 0x8F: 100 + sku_info->soc_speedo_id = 2; 101 + sku_info->cpu_speedo_id = 9; 102 + sku_info->gpu_speedo_id = 2; 103 + break; 81 104 82 - sku_info->cpu_speedo_id = 1; 83 - break; 84 - 85 - default: 105 + default: 106 + pr_err("Tegra210: unknown revision 2 or newer SKU %#04x\n", sku); 107 + /* Using the default for the error case */ 108 + break; 109 + } 110 + } else if (sku == 0x00 || sku == 0x01 || sku == 0x07 || sku == 0x13 || sku == 0x17) { 111 + sku_info->gpu_speedo_id = 1; 112 + } else { 86 113 pr_err("Tegra210: unknown SKU %#04x\n", sku); 87 - /* Using the default for the error case */ 88 - break; 89 114 } 90 115 } 91 116
+28 -10
drivers/soc/tegra/pmc.c
··· 423 423 * @wake_sw_status_map: Bitmap to hold raw status of wakes without mask 424 424 * @wake_cntrl_level_map: Bitmap to hold wake levels to be programmed in 425 425 * cntrl register associated with each wake during system suspend. 426 + * @syscore: syscore suspend/resume callbacks 426 427 */ 427 428 struct tegra_pmc { 428 429 struct device *dev; ··· 467 466 unsigned long *wake_type_dual_edge_map; 468 467 unsigned long *wake_sw_status_map; 469 468 unsigned long *wake_cntrl_level_map; 470 - struct syscore_ops syscore; 469 + struct syscore syscore; 471 470 }; 472 471 473 472 static struct tegra_pmc *pmc = &(struct tegra_pmc) { ··· 2898 2897 if (IS_ERR(pmc->wake)) 2899 2898 return PTR_ERR(pmc->wake); 2900 2899 2901 - pmc->aotag = devm_platform_ioremap_resource_byname(pdev, "aotag"); 2902 - if (IS_ERR(pmc->aotag)) 2903 - return PTR_ERR(pmc->aotag); 2900 + /* "aotag" is an optional aperture */ 2901 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 2902 + "aotag"); 2903 + if (res) { 2904 + pmc->aotag = devm_ioremap_resource(&pdev->dev, res); 2905 + if (IS_ERR(pmc->aotag)) 2906 + return PTR_ERR(pmc->aotag); 2907 + } else { 2908 + pmc->aotag = NULL; 2909 + } 2904 2910 2905 2911 /* "scratch" is an optional aperture */ 2906 2912 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, ··· 3155 3147 } 3156 3148 } 3157 3149 3158 - static void tegra186_pmc_wake_syscore_resume(void) 3150 + static void tegra186_pmc_wake_syscore_resume(void *data) 3159 3151 { 3160 3152 u32 status, mask; 3161 3153 unsigned int i; ··· 3168 3160 } 3169 3161 } 3170 3162 3171 - static int tegra186_pmc_wake_syscore_suspend(void) 3163 + static int tegra186_pmc_wake_syscore_suspend(void *data) 3172 3164 { 3173 3165 wke_read_sw_wake_status(pmc); 3174 3166 ··· 3186 3178 3187 3179 return 0; 3188 3180 } 3181 + 3182 + static const struct syscore_ops tegra186_pmc_wake_syscore_ops = { 3183 + .suspend = tegra186_pmc_wake_syscore_suspend, 3184 + .resume = tegra186_pmc_wake_syscore_resume, 3185 + }; 3189 3186 3190 3187 #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM) 3191 3188 static int tegra_pmc_suspend(struct device *dev) ··· 3842 3829 3843 3830 static void tegra186_pmc_init(struct tegra_pmc *pmc) 3844 3831 { 3845 - pmc->syscore.suspend = tegra186_pmc_wake_syscore_suspend; 3846 - pmc->syscore.resume = tegra186_pmc_wake_syscore_resume; 3847 - 3848 - register_syscore_ops(&pmc->syscore); 3832 + pmc->syscore.ops = &tegra186_pmc_wake_syscore_ops; 3833 + register_syscore(&pmc->syscore); 3849 3834 } 3850 3835 3851 3836 static void tegra186_pmc_setup_irq_polarity(struct tegra_pmc *pmc, ··· 4225 4214 TEGRA_WAKE_GPIO("power", 29, 1, TEGRA234_AON_GPIO(EE, 4)), 4226 4215 TEGRA_WAKE_GPIO("mgbe", 56, 0, TEGRA234_MAIN_GPIO(Y, 3)), 4227 4216 TEGRA_WAKE_IRQ("rtc", 73, 10), 4217 + TEGRA_WAKE_IRQ("usb3-port-0", 76, 167), 4218 + TEGRA_WAKE_IRQ("usb3-port-1", 77, 167), 4219 + TEGRA_WAKE_IRQ("usb3-port-2-3", 78, 167), 4220 + TEGRA_WAKE_IRQ("usb2-port-0", 79, 167), 4221 + TEGRA_WAKE_IRQ("usb2-port-1", 80, 167), 4222 + TEGRA_WAKE_IRQ("usb2-port-2", 81, 167), 4223 + TEGRA_WAKE_IRQ("usb2-port-3", 82, 167), 4228 4224 TEGRA_WAKE_IRQ("sw-wake", SW_WAKE_ID, 179), 4229 4225 }; 4230 4226
+4 -4
drivers/soc/xilinx/xlnx_event_manager.c
··· 77 77 78 78 static bool xlnx_is_error_event(const u32 node_id) 79 79 { 80 - u32 pm_family_code, pm_sub_family_code; 80 + u32 pm_family_code; 81 81 82 - zynqmp_pm_get_family_info(&pm_family_code, &pm_sub_family_code); 82 + zynqmp_pm_get_family_info(&pm_family_code); 83 83 84 - if (pm_sub_family_code == VERSAL_SUB_FAMILY_CODE) { 84 + if (pm_family_code == PM_VERSAL_FAMILY_CODE) { 85 85 if (node_id == VERSAL_EVENT_ERROR_PMC_ERR1 || 86 86 node_id == VERSAL_EVENT_ERROR_PMC_ERR2 || 87 87 node_id == VERSAL_EVENT_ERROR_PSM_ERR1 || 88 88 node_id == VERSAL_EVENT_ERROR_PSM_ERR2) 89 89 return true; 90 - } else { 90 + } else if (pm_family_code == PM_VERSAL_NET_FAMILY_CODE) { 91 91 if (node_id == VERSAL_NET_EVENT_ERROR_PMC_ERR1 || 92 92 node_id == VERSAL_NET_EVENT_ERROR_PMC_ERR2 || 93 93 node_id == VERSAL_NET_EVENT_ERROR_PMC_ERR3 ||
+6 -4
drivers/soc/xilinx/zynqmp_power.c
··· 285 285 static int zynqmp_pm_probe(struct platform_device *pdev) 286 286 { 287 287 int ret, irq; 288 - u32 pm_api_version, pm_family_code, pm_sub_family_code, node_id; 288 + u32 pm_api_version, pm_family_code, node_id; 289 289 struct mbox_client *client; 290 290 291 291 ret = zynqmp_pm_get_api_version(&pm_api_version); ··· 315 315 INIT_WORK(&zynqmp_pm_init_suspend_work->callback_work, 316 316 zynqmp_pm_init_suspend_work_fn); 317 317 318 - ret = zynqmp_pm_get_family_info(&pm_family_code, &pm_sub_family_code); 318 + ret = zynqmp_pm_get_family_info(&pm_family_code); 319 319 if (ret < 0) 320 320 return ret; 321 321 322 - if (pm_sub_family_code == VERSALNET_SUB_FAMILY_CODE) 322 + if (pm_family_code == PM_VERSAL_NET_FAMILY_CODE) 323 323 node_id = PM_DEV_ACPU_0_0; 324 - else 324 + else if (pm_family_code == PM_VERSAL_FAMILY_CODE) 325 325 node_id = PM_DEV_ACPU_0; 326 + else 327 + return -ENODEV; 326 328 327 329 ret = register_event(&pdev->dev, PM_NOTIFY_CB, node_id, EVENT_SUBSYSTEM_RESTART, 328 330 false, subsystem_restart_event_callback);
+8 -4
drivers/thermal/intel/intel_hfi.c
··· 592 592 hfi_disable(); 593 593 } 594 594 595 - static void hfi_syscore_resume(void) 595 + static void hfi_syscore_resume(void *data) 596 596 { 597 597 /* This code runs only on the boot CPU. */ 598 598 struct hfi_cpu_info *info = &per_cpu(hfi_cpu_info, 0); ··· 603 603 hfi_enable_instance(hfi_instance); 604 604 } 605 605 606 - static int hfi_syscore_suspend(void) 606 + static int hfi_syscore_suspend(void *data) 607 607 { 608 608 /* No locking needed. There is no concurrency with CPU offline. */ 609 609 hfi_disable(); ··· 611 611 return 0; 612 612 } 613 613 614 - static struct syscore_ops hfi_pm_ops = { 614 + static const struct syscore_ops hfi_pm_ops = { 615 615 .resume = hfi_syscore_resume, 616 616 .suspend = hfi_syscore_suspend, 617 + }; 618 + 619 + static struct syscore hfi_pm = { 620 + .ops = &hfi_pm_ops, 617 621 }; 618 622 619 623 static int hfi_thermal_notify(struct notifier_block *nb, unsigned long state, ··· 714 710 if (thermal_genl_register_notifier(&hfi_thermal_nb)) 715 711 goto err_nl_notif; 716 712 717 - register_syscore_ops(&hfi_pm_ops); 713 + register_syscore(&hfi_pm); 718 714 719 715 return; 720 716
+8 -4
drivers/xen/xen-acpi-processor.c
··· 495 495 pr_info("ACPI data upload failed, error = %d\n", rc); 496 496 } 497 497 498 - static void xen_acpi_processor_resume(void) 498 + static void xen_acpi_processor_resume(void *data) 499 499 { 500 500 static DECLARE_WORK(wq, xen_acpi_processor_resume_worker); 501 501 ··· 509 509 schedule_work(&wq); 510 510 } 511 511 512 - static struct syscore_ops xap_syscore_ops = { 512 + static const struct syscore_ops xap_syscore_ops = { 513 513 .resume = xen_acpi_processor_resume, 514 + }; 515 + 516 + static struct syscore xap_syscore = { 517 + .ops = &xap_syscore_ops, 514 518 }; 515 519 516 520 static int __init xen_acpi_processor_init(void) ··· 567 563 if (rc) 568 564 goto err_unregister; 569 565 570 - register_syscore_ops(&xap_syscore_ops); 566 + register_syscore(&xap_syscore); 571 567 572 568 return 0; 573 569 err_unregister: ··· 584 580 { 585 581 int i; 586 582 587 - unregister_syscore_ops(&xap_syscore_ops); 583 + unregister_syscore(&xap_syscore); 588 584 bitmap_free(acpi_ids_done); 589 585 bitmap_free(acpi_id_present); 590 586 bitmap_free(acpi_id_cst_present);
+2
include/dt-bindings/arm/qcom,ids.h
··· 240 240 #define QCOM_ID_SC7280 487 241 241 #define QCOM_ID_SC7180P 495 242 242 #define QCOM_ID_QCM6490 497 243 + #define QCOM_ID_QCS6490 498 243 244 #define QCOM_ID_SM7325P 499 244 245 #define QCOM_ID_IPQ5000 503 245 246 #define QCOM_ID_IPQ0509 504 ··· 287 286 #define QCOM_ID_IPQ5424 651 288 287 #define QCOM_ID_QCM6690 657 289 288 #define QCOM_ID_QCS6690 658 289 + #define QCOM_ID_SM8850 660 290 290 #define QCOM_ID_IPQ5404 671 291 291 #define QCOM_ID_QCS9100 667 292 292 #define QCOM_ID_QCS8300 674
+298
include/dt-bindings/reset/eswin,eic7700-reset.h
··· 1 + /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ 2 + /* 3 + * Copyright 2025, Beijing ESWIN Computing Technology Co., Ltd.. 4 + * All rights reserved. 5 + * 6 + * Device Tree binding constants for EIC7700 reset controller. 7 + * 8 + * Authors: 9 + * Yifeng Huang <huangyifeng@eswincomputing.com> 10 + * Xuyang Dong <dongxuyang@eswincomputing.com> 11 + */ 12 + 13 + #ifndef __DT_ESWIN_EIC7700_RESET_H__ 14 + #define __DT_ESWIN_EIC7700_RESET_H__ 15 + 16 + #define EIC7700_RESET_NOC_NSP 0 17 + #define EIC7700_RESET_NOC_CFG 1 18 + #define EIC7700_RESET_RNOC_NSP 2 19 + #define EIC7700_RESET_SNOC_TCU 3 20 + #define EIC7700_RESET_SNOC_U84 4 21 + #define EIC7700_RESET_SNOC_PCIE_XSR 5 22 + #define EIC7700_RESET_SNOC_PCIE_XMR 6 23 + #define EIC7700_RESET_SNOC_PCIE_PR 7 24 + #define EIC7700_RESET_SNOC_NPU 8 25 + #define EIC7700_RESET_SNOC_JTAG 9 26 + #define EIC7700_RESET_SNOC_DSP 10 27 + #define EIC7700_RESET_SNOC_DDRC1_P2 11 28 + #define EIC7700_RESET_SNOC_DDRC1_P1 12 29 + #define EIC7700_RESET_SNOC_DDRC0_P2 13 30 + #define EIC7700_RESET_SNOC_DDRC0_P1 14 31 + #define EIC7700_RESET_SNOC_D2D 15 32 + #define EIC7700_RESET_SNOC_AON 16 33 + #define EIC7700_RESET_GPU_AXI 17 34 + #define EIC7700_RESET_GPU_CFG 18 35 + #define EIC7700_RESET_GPU_GRAY 19 36 + #define EIC7700_RESET_GPU_JONES 20 37 + #define EIC7700_RESET_GPU_SPU 21 38 + #define EIC7700_RESET_DSP_AXI 22 39 + #define EIC7700_RESET_DSP_CFG 23 40 + #define EIC7700_RESET_DSP_DIV4 24 41 + #define EIC7700_RESET_DSP_DIV0 25 42 + #define EIC7700_RESET_DSP_DIV1 26 43 + #define EIC7700_RESET_DSP_DIV2 27 44 + #define EIC7700_RESET_DSP_DIV3 28 45 + #define EIC7700_RESET_D2D_AXI 29 46 + #define EIC7700_RESET_D2D_CFG 30 47 + #define EIC7700_RESET_D2D_PRST 31 48 + #define EIC7700_RESET_D2D_RAW_PCS 32 49 + #define EIC7700_RESET_D2D_RX 33 50 + #define EIC7700_RESET_D2D_TX 34 51 + #define EIC7700_RESET_D2D_CORE 35 52 + #define EIC7700_RESET_DDR1_ARST 36 53 + #define EIC7700_RESET_DDR1_TRACE 37 54 + #define EIC7700_RESET_DDR0_ARST 38 55 + #define EIC7700_RESET_DDR_CFG 39 56 + #define EIC7700_RESET_DDR0_TRACE 40 57 + #define EIC7700_RESET_DDR_CORE 41 58 + #define EIC7700_RESET_DDR_PRST 42 59 + #define EIC7700_RESET_TCU_AXI 43 60 + #define EIC7700_RESET_TCU_CFG 44 61 + #define EIC7700_RESET_TCU_TBU0 45 62 + #define EIC7700_RESET_TCU_TBU1 46 63 + #define EIC7700_RESET_TCU_TBU2 47 64 + #define EIC7700_RESET_TCU_TBU3 48 65 + #define EIC7700_RESET_TCU_TBU4 49 66 + #define EIC7700_RESET_TCU_TBU5 50 67 + #define EIC7700_RESET_TCU_TBU6 51 68 + #define EIC7700_RESET_TCU_TBU7 52 69 + #define EIC7700_RESET_TCU_TBU8 53 70 + #define EIC7700_RESET_TCU_TBU9 54 71 + #define EIC7700_RESET_TCU_TBU10 55 72 + #define EIC7700_RESET_TCU_TBU11 56 73 + #define EIC7700_RESET_TCU_TBU12 57 74 + #define EIC7700_RESET_TCU_TBU13 58 75 + #define EIC7700_RESET_TCU_TBU14 59 76 + #define EIC7700_RESET_TCU_TBU15 60 77 + #define EIC7700_RESET_TCU_TBU16 61 78 + #define EIC7700_RESET_NPU_AXI 62 79 + #define EIC7700_RESET_NPU_CFG 63 80 + #define EIC7700_RESET_NPU_CORE 64 81 + #define EIC7700_RESET_NPU_E31CORE 65 82 + #define EIC7700_RESET_NPU_E31BUS 66 83 + #define EIC7700_RESET_NPU_E31DBG 67 84 + #define EIC7700_RESET_NPU_LLC 68 85 + #define EIC7700_RESET_HSP_AXI 69 86 + #define EIC7700_RESET_HSP_CFG 70 87 + #define EIC7700_RESET_HSP_POR 71 88 + #define EIC7700_RESET_MSHC0_PHY 72 89 + #define EIC7700_RESET_MSHC1_PHY 73 90 + #define EIC7700_RESET_MSHC2_PHY 74 91 + #define EIC7700_RESET_MSHC0_TXRX 75 92 + #define EIC7700_RESET_MSHC1_TXRX 76 93 + #define EIC7700_RESET_MSHC2_TXRX 77 94 + #define EIC7700_RESET_SATA_ASIC0 78 95 + #define EIC7700_RESET_SATA_OOB 79 96 + #define EIC7700_RESET_SATA_PMALIVE 80 97 + #define EIC7700_RESET_SATA_RBC 81 98 + #define EIC7700_RESET_DMA0 82 99 + #define EIC7700_RESET_HSP_DMA 83 100 + #define EIC7700_RESET_USB0_VAUX 84 101 + #define EIC7700_RESET_USB1_VAUX 85 102 + #define EIC7700_RESET_HSP_SD1_PRST 86 103 + #define EIC7700_RESET_HSP_SD0_PRST 87 104 + #define EIC7700_RESET_HSP_EMMC_PRST 88 105 + #define EIC7700_RESET_HSP_DMA_PRST 89 106 + #define EIC7700_RESET_HSP_SD1_ARST 90 107 + #define EIC7700_RESET_HSP_SD0_ARST 91 108 + #define EIC7700_RESET_HSP_EMMC_ARST 92 109 + #define EIC7700_RESET_HSP_DMA_ARST 93 110 + #define EIC7700_RESET_HSP_ETH1_ARST 94 111 + #define EIC7700_RESET_HSP_ETH0_ARST 95 112 + #define EIC7700_RESET_SATA_ARST 96 113 + #define EIC7700_RESET_PCIE_CFG 97 114 + #define EIC7700_RESET_PCIE_POWEUP 98 115 + #define EIC7700_RESET_PCIE_PERST 99 116 + #define EIC7700_RESET_I2C0 100 117 + #define EIC7700_RESET_I2C1 101 118 + #define EIC7700_RESET_I2C2 102 119 + #define EIC7700_RESET_I2C3 103 120 + #define EIC7700_RESET_I2C4 104 121 + #define EIC7700_RESET_I2C5 105 122 + #define EIC7700_RESET_I2C6 106 123 + #define EIC7700_RESET_I2C7 107 124 + #define EIC7700_RESET_I2C8 108 125 + #define EIC7700_RESET_I2C9 109 126 + #define EIC7700_RESET_FAN 110 127 + #define EIC7700_RESET_PVT0 111 128 + #define EIC7700_RESET_PVT1 112 129 + #define EIC7700_RESET_MBOX0 113 130 + #define EIC7700_RESET_MBOX1 114 131 + #define EIC7700_RESET_MBOX2 115 132 + #define EIC7700_RESET_MBOX3 116 133 + #define EIC7700_RESET_MBOX4 117 134 + #define EIC7700_RESET_MBOX5 118 135 + #define EIC7700_RESET_MBOX6 119 136 + #define EIC7700_RESET_MBOX7 120 137 + #define EIC7700_RESET_MBOX8 121 138 + #define EIC7700_RESET_MBOX9 122 139 + #define EIC7700_RESET_MBOX10 123 140 + #define EIC7700_RESET_MBOX11 124 141 + #define EIC7700_RESET_MBOX12 125 142 + #define EIC7700_RESET_MBOX13 126 143 + #define EIC7700_RESET_MBOX14 127 144 + #define EIC7700_RESET_MBOX15 128 145 + #define EIC7700_RESET_UART0 129 146 + #define EIC7700_RESET_UART1 130 147 + #define EIC7700_RESET_UART2 131 148 + #define EIC7700_RESET_UART3 132 149 + #define EIC7700_RESET_UART4 133 150 + #define EIC7700_RESET_GPIO0 134 151 + #define EIC7700_RESET_GPIO1 135 152 + #define EIC7700_RESET_TIMER 136 153 + #define EIC7700_RESET_SSI0 137 154 + #define EIC7700_RESET_SSI1 138 155 + #define EIC7700_RESET_WDT0 139 156 + #define EIC7700_RESET_WDT1 140 157 + #define EIC7700_RESET_WDT2 141 158 + #define EIC7700_RESET_WDT3 142 159 + #define EIC7700_RESET_LSP_CFG 143 160 + #define EIC7700_RESET_U84_CORE0 144 161 + #define EIC7700_RESET_U84_CORE1 145 162 + #define EIC7700_RESET_U84_CORE2 146 163 + #define EIC7700_RESET_U84_CORE3 147 164 + #define EIC7700_RESET_U84_BUS 148 165 + #define EIC7700_RESET_U84_DBG 149 166 + #define EIC7700_RESET_U84_TRACECOM 150 167 + #define EIC7700_RESET_U84_TRACE0 151 168 + #define EIC7700_RESET_U84_TRACE1 152 169 + #define EIC7700_RESET_U84_TRACE2 153 170 + #define EIC7700_RESET_U84_TRACE3 154 171 + #define EIC7700_RESET_SCPU_CORE 155 172 + #define EIC7700_RESET_SCPU_BUS 156 173 + #define EIC7700_RESET_SCPU_DBG 157 174 + #define EIC7700_RESET_LPCPU_CORE 158 175 + #define EIC7700_RESET_LPCPU_BUS 159 176 + #define EIC7700_RESET_LPCPU_DBG 160 177 + #define EIC7700_RESET_VC_CFG 161 178 + #define EIC7700_RESET_VC_AXI 162 179 + #define EIC7700_RESET_VC_MONCFG 163 180 + #define EIC7700_RESET_JD_CFG 164 181 + #define EIC7700_RESET_JD_AXI 165 182 + #define EIC7700_RESET_JE_CFG 166 183 + #define EIC7700_RESET_JE_AXI 167 184 + #define EIC7700_RESET_VD_CFG 168 185 + #define EIC7700_RESET_VD_AXI 169 186 + #define EIC7700_RESET_VE_AXI 170 187 + #define EIC7700_RESET_VE_CFG 171 188 + #define EIC7700_RESET_G2D_CORE 172 189 + #define EIC7700_RESET_G2D_CFG 173 190 + #define EIC7700_RESET_G2D_AXI 174 191 + #define EIC7700_RESET_VI_AXI 175 192 + #define EIC7700_RESET_VI_CFG 176 193 + #define EIC7700_RESET_VI_DWE 177 194 + #define EIC7700_RESET_DVP 178 195 + #define EIC7700_RESET_ISP0 179 196 + #define EIC7700_RESET_ISP1 180 197 + #define EIC7700_RESET_SHUTTR0 181 198 + #define EIC7700_RESET_SHUTTR1 182 199 + #define EIC7700_RESET_SHUTTR2 183 200 + #define EIC7700_RESET_SHUTTR3 184 201 + #define EIC7700_RESET_SHUTTR4 185 202 + #define EIC7700_RESET_SHUTTR5 186 203 + #define EIC7700_RESET_VO_MIPI 187 204 + #define EIC7700_RESET_VO_PRST 188 205 + #define EIC7700_RESET_VO_HDMI_PRST 189 206 + #define EIC7700_RESET_VO_HDMI_PHY 190 207 + #define EIC7700_RESET_VO_HDMI 191 208 + #define EIC7700_RESET_VO_I2S 192 209 + #define EIC7700_RESET_VO_I2S_PRST 193 210 + #define EIC7700_RESET_VO_AXI 194 211 + #define EIC7700_RESET_VO_CFG 195 212 + #define EIC7700_RESET_VO_DC 196 213 + #define EIC7700_RESET_VO_DC_PRST 197 214 + #define EIC7700_RESET_BOOTSPI_HRST 198 215 + #define EIC7700_RESET_BOOTSPI 199 216 + #define EIC7700_RESET_ANO1 200 217 + #define EIC7700_RESET_ANO0 201 218 + #define EIC7700_RESET_DMA1_ARST 202 219 + #define EIC7700_RESET_DMA1_HRST 203 220 + #define EIC7700_RESET_FPRT 204 221 + #define EIC7700_RESET_HBLOCK 205 222 + #define EIC7700_RESET_SECSR 206 223 + #define EIC7700_RESET_OTP 207 224 + #define EIC7700_RESET_PKA 208 225 + #define EIC7700_RESET_SPACC 209 226 + #define EIC7700_RESET_TRNG 210 227 + #define EIC7700_RESET_TIMER0_0 211 228 + #define EIC7700_RESET_TIMER0_1 212 229 + #define EIC7700_RESET_TIMER0_2 213 230 + #define EIC7700_RESET_TIMER0_3 214 231 + #define EIC7700_RESET_TIMER0_4 215 232 + #define EIC7700_RESET_TIMER0_5 216 233 + #define EIC7700_RESET_TIMER0_6 217 234 + #define EIC7700_RESET_TIMER0_7 218 235 + #define EIC7700_RESET_TIMER0_N 219 236 + #define EIC7700_RESET_TIMER1_0 220 237 + #define EIC7700_RESET_TIMER1_1 221 238 + #define EIC7700_RESET_TIMER1_2 222 239 + #define EIC7700_RESET_TIMER1_3 223 240 + #define EIC7700_RESET_TIMER1_4 224 241 + #define EIC7700_RESET_TIMER1_5 225 242 + #define EIC7700_RESET_TIMER1_6 226 243 + #define EIC7700_RESET_TIMER1_7 227 244 + #define EIC7700_RESET_TIMER1_N 228 245 + #define EIC7700_RESET_TIMER2_0 229 246 + #define EIC7700_RESET_TIMER2_1 230 247 + #define EIC7700_RESET_TIMER2_2 231 248 + #define EIC7700_RESET_TIMER2_3 232 249 + #define EIC7700_RESET_TIMER2_4 233 250 + #define EIC7700_RESET_TIMER2_5 234 251 + #define EIC7700_RESET_TIMER2_6 235 252 + #define EIC7700_RESET_TIMER2_7 236 253 + #define EIC7700_RESET_TIMER2_N 237 254 + #define EIC7700_RESET_TIMER3_0 238 255 + #define EIC7700_RESET_TIMER3_1 239 256 + #define EIC7700_RESET_TIMER3_2 240 257 + #define EIC7700_RESET_TIMER3_3 241 258 + #define EIC7700_RESET_TIMER3_4 242 259 + #define EIC7700_RESET_TIMER3_5 243 260 + #define EIC7700_RESET_TIMER3_6 244 261 + #define EIC7700_RESET_TIMER3_7 245 262 + #define EIC7700_RESET_TIMER3_N 246 263 + #define EIC7700_RESET_RTC 247 264 + #define EIC7700_RESET_MNOC_SNOC_NSP 248 265 + #define EIC7700_RESET_MNOC_VC 249 266 + #define EIC7700_RESET_MNOC_CFG 250 267 + #define EIC7700_RESET_MNOC_HSP 251 268 + #define EIC7700_RESET_MNOC_GPU 252 269 + #define EIC7700_RESET_MNOC_DDRC1_P3 253 270 + #define EIC7700_RESET_MNOC_DDRC0_P3 254 271 + #define EIC7700_RESET_RNOC_VO 255 272 + #define EIC7700_RESET_RNOC_VI 256 273 + #define EIC7700_RESET_RNOC_SNOC_NSP 257 274 + #define EIC7700_RESET_RNOC_CFG 258 275 + #define EIC7700_RESET_MNOC_DDRC1_P4 259 276 + #define EIC7700_RESET_MNOC_DDRC0_P4 260 277 + #define EIC7700_RESET_CNOC_VO_CFG 261 278 + #define EIC7700_RESET_CNOC_VI_CFG 262 279 + #define EIC7700_RESET_CNOC_VC_CFG 263 280 + #define EIC7700_RESET_CNOC_TCU_CFG 264 281 + #define EIC7700_RESET_CNOC_PCIE_CFG 265 282 + #define EIC7700_RESET_CNOC_NPU_CFG 266 283 + #define EIC7700_RESET_CNOC_LSP_CFG 267 284 + #define EIC7700_RESET_CNOC_HSP_CFG 268 285 + #define EIC7700_RESET_CNOC_GPU_CFG 269 286 + #define EIC7700_RESET_CNOC_DSPT_CFG 270 287 + #define EIC7700_RESET_CNOC_DDRT1_CFG 271 288 + #define EIC7700_RESET_CNOC_DDRT0_CFG 272 289 + #define EIC7700_RESET_CNOC_D2D_CFG 273 290 + #define EIC7700_RESET_CNOC_CFG 274 291 + #define EIC7700_RESET_CNOC_CLMM_CFG 275 292 + #define EIC7700_RESET_CNOC_AON_CFG 276 293 + #define EIC7700_RESET_LNOC_CFG 277 294 + #define EIC7700_RESET_LNOC_NPU_LLC 278 295 + #define EIC7700_RESET_LNOC_DDRC1_P0 279 296 + #define EIC7700_RESET_LNOC_DDRC0_P0 280 297 + 298 + #endif /* __DT_ESWIN_EIC7700_RESET_H__ */
+216 -3
include/dt-bindings/reset/thead,th1520-reset.h
··· 7 7 #ifndef _DT_BINDINGS_TH1520_RESET_H 8 8 #define _DT_BINDINGS_TH1520_RESET_H 9 9 10 + /* AO Subsystem */ 11 + #define TH1520_RESET_ID_SYSTEM 0 12 + #define TH1520_RESET_ID_RTC_APB 1 13 + #define TH1520_RESET_ID_RTC_REF 2 14 + #define TH1520_RESET_ID_AOGPIO_DB 3 15 + #define TH1520_RESET_ID_AOGPIO_APB 4 16 + #define TH1520_RESET_ID_AOI2C_APB 5 17 + #define TH1520_RESET_ID_PVT_APB 6 18 + #define TH1520_RESET_ID_E902_CORE 7 19 + #define TH1520_RESET_ID_E902_HAD 8 20 + #define TH1520_RESET_ID_AOTIMER_APB 9 21 + #define TH1520_RESET_ID_AOTIMER_CORE 10 22 + #define TH1520_RESET_ID_AOWDT_APB 11 23 + #define TH1520_RESET_ID_APSYS 12 24 + #define TH1520_RESET_ID_NPUSYS 13 25 + #define TH1520_RESET_ID_DDRSYS 14 26 + #define TH1520_RESET_ID_AXI_AP2CP 15 27 + #define TH1520_RESET_ID_AXI_CP2AP 16 28 + #define TH1520_RESET_ID_AXI_CP2SRAM 17 29 + #define TH1520_RESET_ID_AUDSYS_CORE 18 30 + #define TH1520_RESET_ID_AUDSYS_IOPMP 19 31 + #define TH1520_RESET_ID_AUDSYS 20 32 + #define TH1520_RESET_ID_DSP0 21 33 + #define TH1520_RESET_ID_DSP1 22 34 + #define TH1520_RESET_ID_GPU_MODULE 23 35 + #define TH1520_RESET_ID_VDEC 24 36 + #define TH1520_RESET_ID_VENC 25 37 + #define TH1520_RESET_ID_ADC_APB 26 38 + #define TH1520_RESET_ID_AUDGPIO_DB 27 39 + #define TH1520_RESET_ID_AUDGPIO_APB 28 40 + #define TH1520_RESET_ID_AOUART_IF 29 41 + #define TH1520_RESET_ID_AOUART_APB 30 42 + #define TH1520_RESET_ID_SRAM_AXI_P0 31 43 + #define TH1520_RESET_ID_SRAM_AXI_P1 32 44 + #define TH1520_RESET_ID_SRAM_AXI_P2 33 45 + #define TH1520_RESET_ID_SRAM_AXI_P3 34 46 + #define TH1520_RESET_ID_SRAM_AXI_P4 35 47 + #define TH1520_RESET_ID_SRAM_AXI_CORE 36 48 + #define TH1520_RESET_ID_SE 37 49 + 50 + /* AP Subsystem */ 51 + #define TH1520_RESET_ID_BROM 0 52 + #define TH1520_RESET_ID_C910_TOP 1 53 + #define TH1520_RESET_ID_NPU 2 54 + #define TH1520_RESET_ID_WDT0 3 55 + #define TH1520_RESET_ID_WDT1 4 56 + #define TH1520_RESET_ID_C910_C0 5 57 + #define TH1520_RESET_ID_C910_C1 6 58 + #define TH1520_RESET_ID_C910_C2 7 59 + #define TH1520_RESET_ID_C910_C3 8 60 + #define TH1520_RESET_ID_CHIP_DBG_CORE 9 61 + #define TH1520_RESET_ID_CHIP_DBG_AXI 10 62 + #define TH1520_RESET_ID_AXI4_CPUSYS2_AXI 11 63 + #define TH1520_RESET_ID_AXI4_CPUSYS2_APB 12 64 + #define TH1520_RESET_ID_X2H_CPUSYS 13 65 + #define TH1520_RESET_ID_AHB2_CPUSYS 14 66 + #define TH1520_RESET_ID_APB3_CPUSYS 15 67 + #define TH1520_RESET_ID_MBOX0_APB 16 68 + #define TH1520_RESET_ID_MBOX1_APB 17 69 + #define TH1520_RESET_ID_MBOX2_APB 18 70 + #define TH1520_RESET_ID_MBOX3_APB 19 71 + #define TH1520_RESET_ID_TIMER0_APB 20 72 + #define TH1520_RESET_ID_TIMER0_CORE 21 73 + #define TH1520_RESET_ID_TIMER1_APB 22 74 + #define TH1520_RESET_ID_TIMER1_CORE 23 75 + #define TH1520_RESET_ID_PERISYS_AHB 24 76 + #define TH1520_RESET_ID_PERISYS_APB1 25 77 + #define TH1520_RESET_ID_PERISYS_APB2 26 78 + #define TH1520_RESET_ID_GMAC0_APB 27 79 + #define TH1520_RESET_ID_GMAC0_AHB 28 80 + #define TH1520_RESET_ID_GMAC0_CLKGEN 29 81 + #define TH1520_RESET_ID_GMAC0_AXI 30 82 + #define TH1520_RESET_ID_UART0_APB 31 83 + #define TH1520_RESET_ID_UART0_IF 32 84 + #define TH1520_RESET_ID_UART1_APB 33 85 + #define TH1520_RESET_ID_UART1_IF 34 86 + #define TH1520_RESET_ID_UART2_APB 35 87 + #define TH1520_RESET_ID_UART2_IF 36 88 + #define TH1520_RESET_ID_UART3_APB 37 89 + #define TH1520_RESET_ID_UART3_IF 38 90 + #define TH1520_RESET_ID_UART4_APB 39 91 + #define TH1520_RESET_ID_UART4_IF 40 92 + #define TH1520_RESET_ID_UART5_APB 41 93 + #define TH1520_RESET_ID_UART5_IF 42 94 + #define TH1520_RESET_ID_QSPI0_IF 43 95 + #define TH1520_RESET_ID_QSPI0_APB 44 96 + #define TH1520_RESET_ID_QSPI1_IF 45 97 + #define TH1520_RESET_ID_QSPI1_APB 46 98 + #define TH1520_RESET_ID_SPI_IF 47 99 + #define TH1520_RESET_ID_SPI_APB 48 100 + #define TH1520_RESET_ID_I2C0_APB 49 101 + #define TH1520_RESET_ID_I2C0_CORE 50 102 + #define TH1520_RESET_ID_I2C1_APB 51 103 + #define TH1520_RESET_ID_I2C1_CORE 52 104 + #define TH1520_RESET_ID_I2C2_APB 53 105 + #define TH1520_RESET_ID_I2C2_CORE 54 106 + #define TH1520_RESET_ID_I2C3_APB 55 107 + #define TH1520_RESET_ID_I2C3_CORE 56 108 + #define TH1520_RESET_ID_I2C4_APB 57 109 + #define TH1520_RESET_ID_I2C4_CORE 58 110 + #define TH1520_RESET_ID_I2C5_APB 59 111 + #define TH1520_RESET_ID_I2C5_CORE 60 112 + #define TH1520_RESET_ID_GPIO0_DB 61 113 + #define TH1520_RESET_ID_GPIO0_APB 62 114 + #define TH1520_RESET_ID_GPIO1_DB 63 115 + #define TH1520_RESET_ID_GPIO1_APB 64 116 + #define TH1520_RESET_ID_GPIO2_DB 65 117 + #define TH1520_RESET_ID_GPIO2_APB 66 118 + #define TH1520_RESET_ID_PWM_COUNTER 67 119 + #define TH1520_RESET_ID_PWM_APB 68 120 + #define TH1520_RESET_ID_PADCTRL0_APB 69 121 + #define TH1520_RESET_ID_CPU2PERI_X2H 70 122 + #define TH1520_RESET_ID_CPU2AON_X2H 71 123 + #define TH1520_RESET_ID_AON2CPU_A2X 72 124 + #define TH1520_RESET_ID_NPUSYS_AXI 73 125 + #define TH1520_RESET_ID_NPUSYS_AXI_APB 74 126 + #define TH1520_RESET_ID_CPU2VP_X2P 75 127 + #define TH1520_RESET_ID_CPU2VI_X2H 76 128 + #define TH1520_RESET_ID_BMU_AXI 77 129 + #define TH1520_RESET_ID_BMU_APB 78 130 + #define TH1520_RESET_ID_DMAC_CPUSYS_AXI 79 131 + #define TH1520_RESET_ID_DMAC_CPUSYS_AHB 80 132 + #define TH1520_RESET_ID_SPINLOCK 81 133 + #define TH1520_RESET_ID_CFG2TEE 82 134 + #define TH1520_RESET_ID_DSMART 83 135 + #define TH1520_RESET_ID_GPIO3_DB 84 136 + #define TH1520_RESET_ID_GPIO3_APB 85 137 + #define TH1520_RESET_ID_PERI_I2S 86 138 + #define TH1520_RESET_ID_PERI_APB3 87 139 + #define TH1520_RESET_ID_PERI2PERI1_APB 88 140 + #define TH1520_RESET_ID_VPSYS_APB 89 141 + #define TH1520_RESET_ID_PERISYS_APB4 90 142 + #define TH1520_RESET_ID_GMAC1_APB 91 143 + #define TH1520_RESET_ID_GMAC1_AHB 92 144 + #define TH1520_RESET_ID_GMAC1_CLKGEN 93 145 + #define TH1520_RESET_ID_GMAC1_AXI 94 146 + #define TH1520_RESET_ID_GMAC_AXI 95 147 + #define TH1520_RESET_ID_GMAC_AXI_APB 96 148 + #define TH1520_RESET_ID_PADCTRL1_APB 97 149 + #define TH1520_RESET_ID_VOSYS_AXI 98 150 + #define TH1520_RESET_ID_VOSYS_AXI_APB 99 151 + #define TH1520_RESET_ID_VOSYS_AXI_X2X 100 152 + #define TH1520_RESET_ID_MISC2VP_X2X 101 153 + #define TH1520_RESET_ID_DSPSYS 102 154 + #define TH1520_RESET_ID_VISYS 103 155 + #define TH1520_RESET_ID_VOSYS 104 156 + #define TH1520_RESET_ID_VPSYS 105 157 + 158 + /* DSP Subsystem */ 159 + #define TH1520_RESET_ID_X2X_DSP1 0 160 + #define TH1520_RESET_ID_X2X_DSP0 1 161 + #define TH1520_RESET_ID_X2X_SLAVE_DSP1 2 162 + #define TH1520_RESET_ID_X2X_SLAVE_DSP0 3 163 + #define TH1520_RESET_ID_DSP0_CORE 4 164 + #define TH1520_RESET_ID_DSP0_DEBUG 5 165 + #define TH1520_RESET_ID_DSP0_APB 6 166 + #define TH1520_RESET_ID_DSP1_CORE 7 167 + #define TH1520_RESET_ID_DSP1_DEBUG 8 168 + #define TH1520_RESET_ID_DSP1_APB 9 169 + #define TH1520_RESET_ID_DSPSYS_APB 10 170 + #define TH1520_RESET_ID_AXI4_DSPSYS_SLV 11 171 + #define TH1520_RESET_ID_AXI4_DSPSYS 12 172 + #define TH1520_RESET_ID_AXI4_DSP_RS 13 173 + 174 + /* MISC Subsystem */ 175 + #define TH1520_RESET_ID_EMMC_SDIO_CLKGEN 0 176 + #define TH1520_RESET_ID_EMMC 1 177 + #define TH1520_RESET_ID_MISCSYS_AXI 2 178 + #define TH1520_RESET_ID_MISCSYS_AXI_APB 3 179 + #define TH1520_RESET_ID_SDIO0 4 180 + #define TH1520_RESET_ID_SDIO1 5 181 + #define TH1520_RESET_ID_USB3_APB 6 182 + #define TH1520_RESET_ID_USB3_PHY 7 183 + #define TH1520_RESET_ID_USB3_VCC 8 184 + 185 + /* VI Subsystem */ 186 + #define TH1520_RESET_ID_ISP0 0 187 + #define TH1520_RESET_ID_ISP1 1 188 + #define TH1520_RESET_ID_CSI0_APB 2 189 + #define TH1520_RESET_ID_CSI1_APB 3 190 + #define TH1520_RESET_ID_CSI2_APB 4 191 + #define TH1520_RESET_ID_MIPI_FIFO 5 192 + #define TH1520_RESET_ID_ISP_VENC_APB 6 193 + #define TH1520_RESET_ID_VIPRE_APB 7 194 + #define TH1520_RESET_ID_VIPRE_AXI 8 195 + #define TH1520_RESET_ID_DW200_APB 9 196 + #define TH1520_RESET_ID_VISYS3_AXI 10 197 + #define TH1520_RESET_ID_VISYS2_AXI 11 198 + #define TH1520_RESET_ID_VISYS1_AXI 12 199 + #define TH1520_RESET_ID_VISYS_AXI 13 200 + #define TH1520_RESET_ID_VISYS_APB 14 201 + #define TH1520_RESET_ID_ISP_VENC_AXI 15 202 + 203 + /* VO Subsystem */ 10 204 #define TH1520_RESET_ID_GPU 0 11 205 #define TH1520_RESET_ID_GPU_CLKGEN 1 12 - #define TH1520_RESET_ID_NPU 2 13 - #define TH1520_RESET_ID_WDT0 3 14 - #define TH1520_RESET_ID_WDT1 4 15 206 #define TH1520_RESET_ID_DPU_AHB 5 16 207 #define TH1520_RESET_ID_DPU_AXI 6 17 208 #define TH1520_RESET_ID_DPU_CORE 7 ··· 210 19 #define TH1520_RESET_ID_DSI1_APB 9 211 20 #define TH1520_RESET_ID_HDMI 10 212 21 #define TH1520_RESET_ID_HDMI_APB 11 22 + #define TH1520_RESET_ID_VOAXI 12 23 + #define TH1520_RESET_ID_VOAXI_APB 13 24 + #define TH1520_RESET_ID_X2H_DPU_AXI 14 25 + #define TH1520_RESET_ID_X2H_DPU_AHB 15 26 + #define TH1520_RESET_ID_X2H_DPU1_AXI 16 27 + #define TH1520_RESET_ID_X2H_DPU1_AHB 17 28 + 29 + /* VP Subsystem */ 30 + #define TH1520_RESET_ID_VPSYS_AXI_APB 0 31 + #define TH1520_RESET_ID_VPSYS_AXI 1 32 + #define TH1520_RESET_ID_FCE_APB 2 33 + #define TH1520_RESET_ID_FCE_CORE 3 34 + #define TH1520_RESET_ID_FCE_X2X_MASTER 4 35 + #define TH1520_RESET_ID_FCE_X2X_SLAVE 5 36 + #define TH1520_RESET_ID_G2D_APB 6 37 + #define TH1520_RESET_ID_G2D_ACLK 7 38 + #define TH1520_RESET_ID_G2D_CORE 8 39 + #define TH1520_RESET_ID_VDEC_APB 9 40 + #define TH1520_RESET_ID_VDEC_ACLK 10 41 + #define TH1520_RESET_ID_VDEC_CORE 11 42 + #define TH1520_RESET_ID_VENC_APB 12 43 + #define TH1520_RESET_ID_VENC_CORE 13 213 44 214 45 #endif /* _DT_BINDINGS_TH1520_RESET_H */
+8
include/linux/err.h
··· 41 41 return (void *) error; 42 42 } 43 43 44 + /** 45 + * INIT_ERR_PTR - Init a const error pointer. 46 + * @error: A negative error code. 47 + * 48 + * Like ERR_PTR(), but usable to initialize static variables. 49 + */ 50 + #define INIT_ERR_PTR(error) ((void *)(error)) 51 + 44 52 /* Return the pointer in the percpu address space. */ 45 53 #define ERR_PTR_PCPU(error) ((void __percpu *)(unsigned long)ERR_PTR(error)) 46 54
+12 -3
include/linux/firmware/qcom/qcom_tzmem.h
··· 17 17 * enum qcom_tzmem_policy - Policy for pool growth. 18 18 */ 19 19 enum qcom_tzmem_policy { 20 - /**< Static pool, never grow above initial size. */ 20 + /** 21 + * @QCOM_TZMEM_POLICY_STATIC: Static pool, 22 + * never grow above initial size. 23 + */ 21 24 QCOM_TZMEM_POLICY_STATIC = 1, 22 - /**< When out of memory, add increment * current size of memory. */ 25 + /** 26 + * @QCOM_TZMEM_POLICY_MULTIPLIER: When out of memory, 27 + * add increment * current size of memory. 28 + */ 23 29 QCOM_TZMEM_POLICY_MULTIPLIER, 24 - /**< When out of memory add as much as is needed until max_size. */ 30 + /** 31 + * @QCOM_TZMEM_POLICY_ON_DEMAND: When out of memory 32 + * add as much as is needed until max_size. 33 + */ 25 34 QCOM_TZMEM_POLICY_ON_DEMAND, 26 35 }; 27 36
+17 -13
include/linux/firmware/xlnx-zynqmp.h
··· 3 3 * Xilinx Zynq MPSoC Firmware layer 4 4 * 5 5 * Copyright (C) 2014-2021 Xilinx 6 - * Copyright (C) 2022 - 2024, Advanced Micro Devices, Inc. 6 + * Copyright (C) 2022 - 2025 Advanced Micro Devices, Inc. 7 7 * 8 8 * Michal Simek <michal.simek@amd.com> 9 9 * Davorin Mista <davorin.mista@aggios.com> ··· 51 51 52 52 #define PM_PINCTRL_PARAM_SET_VERSION 2 53 53 54 - #define ZYNQMP_FAMILY_CODE 0x23 55 - #define VERSAL_FAMILY_CODE 0x26 56 - 57 - /* When all subfamily of platform need to support */ 58 - #define ALL_SUB_FAMILY_CODE 0x00 59 - #define VERSAL_SUB_FAMILY_CODE 0x01 60 - #define VERSALNET_SUB_FAMILY_CODE 0x03 61 - 62 - #define FAMILY_CODE_MASK GENMASK(27, 21) 63 - #define SUB_FAMILY_CODE_MASK GENMASK(20, 19) 54 + /* Family codes */ 55 + #define PM_ZYNQMP_FAMILY_CODE 0x1 /* ZynqMP family code */ 56 + #define PM_VERSAL_FAMILY_CODE 0x2 /* Versal family code */ 57 + #define PM_VERSAL_NET_FAMILY_CODE 0x3 /* Versal NET family code */ 64 58 65 59 #define API_ID_MASK GENMASK(7, 0) 66 60 #define MODULE_ID_MASK GENMASK(11, 8) ··· 158 164 enum pm_api_id { 159 165 PM_API_FEATURES = 0, 160 166 PM_GET_API_VERSION = 1, 167 + PM_GET_NODE_STATUS = 3, 161 168 PM_REGISTER_NOTIFIER = 5, 162 169 PM_FORCE_POWERDOWN = 8, 163 170 PM_REQUEST_WAKEUP = 10, ··· 559 564 #if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE) 560 565 int zynqmp_pm_get_api_version(u32 *version); 561 566 int zynqmp_pm_get_chipid(u32 *idcode, u32 *version); 562 - int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily); 567 + int zynqmp_pm_get_family_info(u32 *family); 563 568 int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out); 564 569 int zynqmp_pm_clock_enable(u32 clock_id); 565 570 int zynqmp_pm_clock_disable(u32 clock_id); ··· 624 629 int zynqmp_pm_get_rpu_mode(u32 node_id, enum rpu_oper_mode *rpu_mode); 625 630 int zynqmp_pm_set_rpu_mode(u32 node_id, enum rpu_oper_mode rpu_mode); 626 631 int zynqmp_pm_set_tcm_config(u32 node_id, enum rpu_tcm_comb tcm_mode); 632 + int zynqmp_pm_get_node_status(const u32 node, u32 *const status, 633 + u32 *const requirements, u32 *const usage); 627 634 int zynqmp_pm_set_sd_config(u32 node, enum pm_sd_config_type config, u32 value); 628 635 int zynqmp_pm_set_gem_config(u32 node, enum pm_gem_config_type config, 629 636 u32 value); ··· 640 643 return -ENODEV; 641 644 } 642 645 643 - static inline int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily) 646 + static inline int zynqmp_pm_get_family_info(u32 *family) 644 647 { 645 648 return -ENODEV; 646 649 } ··· 924 927 } 925 928 926 929 static inline int zynqmp_pm_set_tcm_config(u32 node_id, enum rpu_tcm_comb tcm_mode) 930 + { 931 + return -ENODEV; 932 + } 933 + 934 + static inline int zynqmp_pm_get_node_status(const u32 node, u32 *const status, 935 + u32 *const requirements, 936 + u32 *const usage) 927 937 { 928 938 return -ENODEV; 929 939 }
-33
include/linux/reset-controller.h
··· 27 27 struct of_phandle_args; 28 28 29 29 /** 30 - * struct reset_control_lookup - represents a single lookup entry 31 - * 32 - * @list: internal list of all reset lookup entries 33 - * @provider: name of the reset controller device controlling this reset line 34 - * @index: ID of the reset controller in the reset controller device 35 - * @dev_id: name of the device associated with this reset line 36 - * @con_id: name of the reset line (can be NULL) 37 - */ 38 - struct reset_control_lookup { 39 - struct list_head list; 40 - const char *provider; 41 - unsigned int index; 42 - const char *dev_id; 43 - const char *con_id; 44 - }; 45 - 46 - #define RESET_LOOKUP(_provider, _index, _dev_id, _con_id) \ 47 - { \ 48 - .provider = _provider, \ 49 - .index = _index, \ 50 - .dev_id = _dev_id, \ 51 - .con_id = _con_id, \ 52 - } 53 - 54 - /** 55 30 * struct reset_controller_dev - reset controller entity that might 56 31 * provide multiple reset controls 57 32 * @ops: a pointer to device specific struct reset_control_ops ··· 65 90 struct device; 66 91 int devm_reset_controller_register(struct device *dev, 67 92 struct reset_controller_dev *rcdev); 68 - 69 - void reset_controller_add_lookup(struct reset_control_lookup *lookup, 70 - unsigned int num_entries); 71 93 #else 72 94 static inline int reset_controller_register(struct reset_controller_dev *rcdev) 73 95 { ··· 79 107 struct reset_controller_dev *rcdev) 80 108 { 81 109 return 0; 82 - } 83 - 84 - static inline void reset_controller_add_lookup(struct reset_control_lookup *lookup, 85 - unsigned int num_entries) 86 - { 87 110 } 88 111 #endif 89 112
+1
include/linux/reset.h
··· 2 2 #ifndef _LINUX_RESET_H_ 3 3 #define _LINUX_RESET_H_ 4 4 5 + #include <linux/bits.h> 5 6 #include <linux/err.h> 6 7 #include <linux/errno.h> 7 8 #include <linux/types.h>
+7
include/linux/soc/qcom/llcc-qcom.h
··· 74 74 #define LLCC_CAMSRTIP 73 75 75 #define LLCC_CAMRTRF 74 76 76 #define LLCC_CAMSRTRF 75 77 + #define LLCC_VIDEO_APV 83 78 + #define LLCC_COMPUTE1 87 79 + #define LLCC_CPUSS_OPP 88 77 80 #define LLCC_CPUSSMPAM 89 81 + #define LLCC_CAM_IPE_STROV 92 82 + #define LLCC_CAM_OFE_STROV 93 83 + #define LLCC_CPUSS_HEU 94 84 + #define LLCC_MDM_PNG_FIXED 100 78 85 79 86 /** 80 87 * struct llcc_slice_desc - Cache slice descriptor
+4
include/linux/soc/qcom/socinfo.h
··· 82 82 __le32 num_func_clusters; 83 83 __le32 boot_cluster; 84 84 __le32 boot_core; 85 + /* Version 20 */ 86 + __le32 raw_package_type; 87 + /* Version 21, 22, 23 */ 88 + __le32 reserve1[4]; 85 89 }; 86 90 87 91 /* Internal feature codes */
+1
include/linux/soc/qcom/ubwc.h
··· 52 52 #define UBWC_4_0 0x40000000 53 53 #define UBWC_4_3 0x40030000 54 54 #define UBWC_5_0 0x50000000 55 + #define UBWC_6_0 0x60000000 55 56 56 57 #if IS_ENABLED(CONFIG_QCOM_UBWC_CONFIG) 57 58 const struct qcom_ubwc_cfg_data *qcom_ubwc_config_get_data(void);
+335 -8
include/linux/soc/samsung/exynos-regs-pmu.h
··· 672 672 673 673 /* For Tensor GS101 */ 674 674 /* PMU ALIVE */ 675 - #define GS101_SYSIP_DAT0 (0x810) 676 - #define GS101_CPU0_INFORM (0x860) 677 - #define GS101_CPU_INFORM(cpu) \ 678 - (GS101_CPU0_INFORM + (cpu*4)) 679 - #define GS101_SYSTEM_CONFIGURATION (0x3A00) 680 - #define GS101_EINT_WAKEUP_MASK (0x3A80) 681 - #define GS101_PHY_CTRL_USB20 (0x3EB0) 682 - #define GS101_PHY_CTRL_USBDP (0x3EB4) 675 + #define GS101_OM_STAT 0x0000 676 + #define GS101_VERSION 0x0004 677 + #define GS101_PORESET_CHECK 0x0008 678 + #define GS101_OTP_STATUS 0x000c 679 + #define GS101_SYSTEM_INFO 0x0010 680 + #define GS101_IDLE_IP(n) (0x03e0 + ((n) & 3) * 4) 681 + #define GS101_IDLE_IP_MASK(n) (0x03f0 + ((n) & 3) * 4) 682 + #define GS101_SLC_CH_OFFSET(ch) (0x0400 + ((ch) & 3) * 0x10) 683 + #define GS101_DATARAM_STATE_SLC_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x00) 684 + #define GS101_TAGRAM_STATE_SLC_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x04) 685 + #define GS101_LRURAM_STATE_SLC_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x08) 686 + #define GS101_PPMPURAM_STATE_SLC_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x0c) 687 + #define GS101_DATARAM_INFORM_SCL_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x40) 688 + #define GS101_TAGRAM_INFORM_SCL_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x44) 689 + #define GS101_LRURAM_INFORM_SCL_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x48) 690 + #define GS101_PPMPURAM_INFORM_SCL_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x4c) 691 + #define GS101_INFORM0 0x0800 692 + #define GS101_INFORM1 0x0804 693 + #define GS101_INFORM2 0x0808 694 + #define GS101_INFORM3 0x080c 695 + #define GS101_SYSIP_DAT(n) (0x0810 + ((n) & 3) * 4) 696 + #define GS101_PWR_HOLD_HW_TRIP 0x0820 697 + #define GS101_PWR_HOLD_SW_TRIP 0x0824 698 + #define GS101_GSA_INFORM(n) (0x0830 + ((n) & 1) * 4) 699 + #define GS101_INFORM4 0x0840 700 + #define GS101_INFORM5 0x0844 701 + #define GS101_INFORM6 0x0848 702 + #define GS101_INFORM7 0x084c 703 + #define GS101_INFORM8 0x0850 704 + #define GS101_INFORM9 0x0854 705 + #define GS101_INFORM10 0x0858 706 + #define GS101_INFORM11 0x085c 707 + #define GS101_CPU_INFORM(cpu) (0x0860 + ((cpu) & 7) * 4) 708 + #define GS101_IROM_INFORM 0x0880 709 + #define GS101_IROM_CPU_INFORM(cpu) (0x0890 + ((cpu) & 7) * 4) 710 + #define GS101_PMU_SPARE(n) (0x0900 + ((n) & 3) * 4) 711 + #define GS101_IROM_DATA_REG(n) (0x0980 + ((n) & 3) * 4) 712 + #define GS101_IROM_PWRMODE 0x0990 713 + #define GS101_DREX_CALIBRATION(n) (0x09a0 + ((n) & 7) * 4) 714 + 715 + #define GS101_CLUSTER0_OFFSET 0x1000 716 + #define GS101_CLUSTER1_OFFSET 0x1300 717 + #define GS101_CLUSTER2_OFFSET 0x1500 718 + #define GS101_CLUSTER_CPU_OFFSET(cl, cpu) ((cl) + ((cpu) * 0x80)) 719 + #define GS101_CLUSTER_CPU_CONFIGURATION(cl, cpu) \ 720 + (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x00) 721 + #define GS101_CLUSTER_CPU_STATUS(cl, cpu) \ 722 + (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x04) 723 + #define GS101_CLUSTER_CPU_STATES(cl, cpu) \ 724 + (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x08) 725 + #define GS101_CLUSTER_CPU_OPTION(cl, cpu) \ 726 + (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x0c) 727 + #define GS101_CLUSTER_CPU_OUT(cl, cpu) \ 728 + (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x20) 729 + #define GS101_CLUSTER_CPU_IN(cl, cpu) \ 730 + (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x24) 731 + #define GS101_CLUSTER_CPU_INT_IN(cl, cpu) \ 732 + (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x40) 733 + #define GS101_CLUSTER_CPU_INT_EN(cl, cpu) \ 734 + (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x44) 735 + #define GS101_CLUSTER_CPU_INT_TYPE(cl, cpu) \ 736 + (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x48) 737 + #define GS101_CLUSTER_CPU_INT_DIR(cl, cpu) \ 738 + (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x4c) 739 + 740 + #define GS101_CLUSTER_NONCPU_OFFSET(cl) (0x1200 + ((cl) * 0x200)) 741 + #define GS101_CLUSTER_NONCPU_CONFIGURATION(cl) \ 742 + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x00) 743 + #define GS101_CLUSTER_NONCPU_STATUS(cl) \ 744 + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x04) 745 + #define GS101_CLUSTER_NONCPU_STATES(cl) \ 746 + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x08) 747 + #define GS101_CLUSTER_NONCPU_OPTION(cl) \ 748 + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x0c) 749 + #define GS101_CLUSTER_NONCPU_OUT(cl) \ 750 + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x20) 751 + #define GS101_CLUSTER_NONCPU_IN(cl) \ 752 + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x24) 753 + #define GS101_CLUSTER_NONCPU_INT_IN(cl) \ 754 + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x40) 755 + #define GS101_CLUSTER_NONCPU_INT_EN(cl) \ 756 + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x44) 757 + #define GS101_CLUSTER_NONCPU_INT_TYPE(cl) \ 758 + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x48) 759 + #define GS101_CLUSTER_NONCPU_INT_DIR(cl) \ 760 + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x4c) 761 + #define GS101_CLUSTER_NONCPU_DUALRAIL_CTRL_OUT(cl) \ 762 + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x60) 763 + #define GS101_CLUSTER_NONCPU_DUALRAIL_POS_OUT(cl) \ 764 + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x64) 765 + #define GS101_CLUSTER_NONCPU_DUALRAIL_CTRL_IN(cl) \ 766 + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x6c) 767 + #define GS101_CLUSTER0_NONCPU_DSU_PCH \ 768 + (GS101_CLUSTER_NONCPU_OFFSET(0) + 0x80) 769 + 770 + #define GS101_SUBBBLK_OFFSET_ALIVE 0x1800 771 + #define GS101_SUBBBLK_OFFSET_AOC 0x1880 772 + #define GS101_SUBBBLK_OFFSET_APM 0x1900 773 + #define GS101_SUBBBLK_OFFSET_CMU 0x1980 774 + #define GS101_SUBBBLK_OFFSET_BUS0 0x1a00 775 + #define GS101_SUBBBLK_OFFSET_BUS1 0x1a80 776 + #define GS101_SUBBBLK_OFFSET_BUS2 0x1b00 777 + #define GS101_SUBBBLK_OFFSET_CORE 0x1b80 778 + #define GS101_SUBBBLK_OFFSET_EH 0x1c00 779 + #define GS101_SUBBBLK_OFFSET_CPUCL0 0x1c80 780 + #define GS101_SUBBBLK_OFFSET_CPUCL1 0x1d00 781 + #define GS101_SUBBBLK_OFFSET_CPUCL2 0x1d80 782 + #define GS101_SUBBBLK_OFFSET_G3D 0x1e00 783 + #define GS101_SUBBBLK_OFFSET_EMBEDDED_CPUCL0 0x1e80 784 + #define GS101_SUBBBLK_OFFSET_EMBEDDED_G3D 0x2000 785 + #define GS101_SUBBBLK_OFFSET_HSI0 0x2080 786 + #define GS101_SUBBBLK_OFFSET_HSI1 0x2100 787 + #define GS101_SUBBBLK_OFFSET_HSI2 0x2180 788 + #define GS101_SUBBBLK_OFFSET_DPU 0x2200 789 + #define GS101_SUBBBLK_OFFSET_DISP 0x2280 790 + #define GS101_SUBBBLK_OFFSET_G2D 0x2300 791 + #define GS101_SUBBBLK_OFFSET_MFC 0x2380 792 + #define GS101_SUBBBLK_OFFSET_CSIS 0x2400 793 + #define GS101_SUBBBLK_OFFSET_PDP 0x2480 794 + #define GS101_SUBBBLK_OFFSET_DNS 0x2500 795 + #define GS101_SUBBBLK_OFFSET_G3AA 0x2580 796 + #define GS101_SUBBBLK_OFFSET_IPP 0x2600 797 + #define GS101_SUBBBLK_OFFSET_ITP 0x2680 798 + #define GS101_SUBBBLK_OFFSET_MCSC 0x2700 799 + #define GS101_SUBBBLK_OFFSET_GDC 0x2780 800 + #define GS101_SUBBBLK_OFFSET_TNR 0x2800 801 + #define GS101_SUBBBLK_OFFSET_BO 0x2880 802 + #define GS101_SUBBBLK_OFFSET_TPU 0x2900 803 + #define GS101_SUBBBLK_OFFSET_MIF0 0x2980 804 + #define GS101_SUBBBLK_OFFSET_MIF1 0x2a00 805 + #define GS101_SUBBBLK_OFFSET_MIF2 0x2a80 806 + #define GS101_SUBBBLK_OFFSET_MIF3 0x2b00 807 + #define GS101_SUBBBLK_OFFSET_MISC 0x2b80 808 + #define GS101_SUBBBLK_OFFSET_PERIC0 0x2c00 809 + #define GS101_SUBBBLK_OFFSET_PERIC1 0x2c80 810 + #define GS101_SUBBBLK_OFFSET_S2D 0x2d00 811 + #define GS101_SUBBLK_CONFIGURATION(blk) ((blk) + 0x00) 812 + #define GS101_SUBBLK_STATUS(blk) ((blk) + 0x04) 813 + #define GS101_SUBBLK_STATES(blk) ((blk) + 0x08) 814 + #define GS101_SUBBLK_OPTION(blk) ((blk) + 0x0c) 815 + #define GS101_SUBBLK_CTRL(blk) ((blk) + 0x10) 816 + #define GS101_SUBBLK_OUT(blk) ((blk) + 0x20) 817 + #define GS101_SUBBLK_IN(blk) ((blk) + 0x24) 818 + #define GS101_SUBBLK_INT_IN(blk) ((blk) + 0x40) 819 + #define GS101_SUBBLK_INT_EN(blk) ((blk) + 0x44) 820 + #define GS101_SUBBLK_INT_TYPE(blk) ((blk) + 0x48) 821 + #define GS101_SUBBLK_INT_DIR(blk) ((blk) + 0x4c) 822 + #define GS101_SUBBLK_MEMORY_OUT(blk) ((blk) + 0x60) 823 + #define GS101_SUBBLK_MEMORY_IN(blk) ((blk) + 0x64) 824 + 825 + #define GS101_SUBBBLK_CPU_OFFSET_APM 0x3000 826 + #define GS101_SUBBBLK_CPU_OFFSET_DBGCORE 0x3080 827 + #define GS101_SUBBBLK_CPU_OFFSET_SSS 0x3100 828 + #define GS101_SUBBLK_CPU_CONFIGURATION(blk) ((blk) + 0x00) 829 + #define GS101_SUBBLK_CPU_STATUS(blk) ((blk) + 0x04) 830 + #define GS101_SUBBLK_CPU_STATES(blk) ((blk) + 0x08) 831 + #define GS101_SUBBLK_CPU_OPTION(blk) ((blk) + 0x0c) 832 + #define GS101_SUBBLK_CPU_OUT(blk) ((blk) + 0x20) 833 + #define GS101_SUBBLK_CPU_IN(blk) ((blk) + 0x24) 834 + #define GS101_SUBBLK_CPU_INT_IN(blk) ((blk) + 0x40) 835 + #define GS101_SUBBLK_CPU_INT_EN(blk) ((blk) + 0x44) 836 + #define GS101_SUBBLK_CPU_INT_TYPE(blk) ((blk) + 0x48) 837 + #define GS101_SUBBLK_CPU_INT_DIR(blk) ((blk) + 0x4c) 838 + 839 + #define GS101_MIF_CONFIGURATION 0x3800 840 + #define GS101_MIF_STATUS 0x3804 841 + #define GS101_MIF_STATES 0x3808 842 + #define GS101_MIF_OPTION 0x380c 843 + #define GS101_MIF_CTRL 0x3810 844 + #define GS101_MIF_OUT 0x3820 845 + #define GS101_MIF_IN 0x3824 846 + #define GS101_MIF_INT_IN 0x3840 847 + #define GS101_MIF_INT_EN 0x3844 848 + #define GS101_MIF_INT_TYPE 0x3848 849 + #define GS101_MIF_INT_DIR 0x384c 850 + #define GS101_TOP_CONFIGURATION 0x3900 851 + #define GS101_TOP_STATUS 0x3904 852 + #define GS101_TOP_STATES 0x3908 853 + #define GS101_TOP_OPTION 0x390c 854 + #define GS101_TOP_OUT 0x3920 855 + #define GS101_TOP_IN 0x3924 856 + #define GS101_TOP_INT_IN 0x3940 857 + #define GS101_TOP_INT_EN 0x3944 858 + #define GS101_TOP_INT_TYPE 0x3948 859 + #define GS101_TOP_INT_DIR 0x394c 860 + #define GS101_WAKEUP_STAT 0x3950 861 + #define GS101_WAKEUP2_STAT 0x3954 862 + #define GS101_WAKEUP2_INT_IN 0x3960 863 + #define GS101_WAKEUP2_INT_EN 0x3964 864 + #define GS101_WAKEUP2_INT_TYPE 0x3968 865 + #define GS101_WAKEUP2_INT_DIR 0x396c 866 + #define GS101_SYSTEM_CONFIGURATION 0x3a00 867 + #define GS101_SYSTEM_STATUS 0x3a04 868 + #define GS101_SYSTEM_STATES 0x3a08 869 + #define GS101_SYSTEM_OPTION 0x3a0c 870 + #define GS101_SYSTEM_CTRL 0x3a10 871 + #define GS101_SPARE_CTRL 0x3a14 872 + #define GS101_USER_DEFINED_OUT 0x3a18 873 + #define GS101_SYSTEM_OUT 0x3a20 874 + #define GS101_SYSTEM_IN 0x3a24 875 + #define GS101_SYSTEM_INT_IN 0x3a40 876 + #define GS101_SYSTEM_INT_EN 0x3a44 877 + #define GS101_SYSTEM_INT_TYPE 0x3a48 878 + #define GS101_SYSTEM_INT_DIR 0x3a4c 879 + #define GS101_EINT_INT_IN 0x3a50 880 + #define GS101_EINT_INT_EN 0x3a54 881 + #define GS101_EINT_INT_TYPE 0x3a58 882 + #define GS101_EINT_INT_DIR 0x3a5c 883 + #define GS101_EINT2_INT_IN 0x3a60 884 + #define GS101_EINT2_INT_EN 0x3a64 885 + #define GS101_EINT2_INT_TYPE 0x3a68 886 + #define GS101_EINT2_INT_DIR 0x3a6c 887 + #define GS101_EINT3_INT_IN 0x3a70 888 + #define GS101_EINT3_INT_EN 0x3a74 889 + #define GS101_EINT3_INT_TYPE 0x3a78 890 + #define GS101_EINT3_INT_DIR 0x3a7c 891 + #define GS101_EINT_WAKEUP_MASK 0x3a80 892 + #define GS101_EINT_WAKEUP_MASK2 0x3a84 893 + #define GS101_EINT_WAKEUP_MASK3 0x3a88 894 + #define GS101_USER_DEFINED_INT_IN 0x3a90 895 + #define GS101_USER_DEFINED_INT_EN 0x3a94 896 + #define GS101_USER_DEFINED_INT_TYPE 0x3a98 897 + #define GS101_USER_DEFINED_INT_DIR 0x3a9c 898 + #define GS101_SCAN2DRAM_INT_IN 0x3aa0 899 + #define GS101_SCAN2DRAM_INT_EN 0x3aa4 900 + #define GS101_SCAN2DRAM_INT_TYPE 0x3aa8 901 + #define GS101_SCAN2DRAM_INT_DIR 0x3aac 902 + #define GS101_HCU_START 0x3ab0 903 + #define GS101_CUSTOM_OUT 0x3ac0 904 + #define GS101_CUSTOM_IN 0x3ac4 905 + #define GS101_CUSTOM_INT_IN 0x3ad0 906 + #define GS101_CUSTOM_INT_EN 0x3ad4 907 + #define GS101_CUSTOM_INT_TYPE 0x3ad8 908 + #define GS101_CUSTOM_INT_DIR 0x3adc 909 + #define GS101_ACK_LAST_CPU 0x3afc 910 + #define GS101_HCU_R(n) (0x3b00 + ((n) & 3) * 4) 911 + #define GS101_HCU_SP 0x3b14 912 + #define GS101_HCU_PC 0x3b18 913 + #define GS101_PMU_RAM_CTRL 0x3b20 914 + #define GS101_APM_HCU_CTRL 0x3b24 915 + #define GS101_APM_NMI_ENABLE 0x3b30 916 + #define GS101_DBGCORE_NMI_ENABLE 0x3b34 917 + #define GS101_HCU_NMI_ENABLE 0x3b38 918 + #define GS101_PWR_HOLD_WDT_ENABLE 0x3b3c 919 + #define GS101_NMI_SRC_IN 0x3b40 920 + #define GS101_RST_STAT 0x3b44 921 + #define GS101_RST_STAT_PMU 0x3b48 922 + #define GS101_HPM_INT_IN 0x3b60 923 + #define GS101_HPM_INT_EN 0x3b64 924 + #define GS101_HPM_INT_TYPE 0x3b68 925 + #define GS101_HPM_INT_DIR 0x3b6c 926 + #define GS101_S2D_AUTH 0x3b70 927 + #define GS101_BOOT_STAT 0x3b74 928 + #define GS101_PMLINK_OUT 0x3c00 929 + #define GS101_PMLINK_AOC_OUT 0x3c04 930 + #define GS101_PMLINK_AOC_CTRL 0x3c08 931 + #define GS101_TCXO_BUF_CTRL 0x3c10 932 + #define GS101_ADD_CTRL 0x3c14 933 + #define GS101_HCU_TIMEOUT_RESET 0x3c20 934 + #define GS101_HCU_TIMEOUT_SCAN2DRAM 0x3c24 935 + #define GS101_TIMER(n) (0x3c80 + ((n) & 3) * 4) 936 + #define GS101_PPC_MIF(n) (0x3c90 + ((n) & 3) * 4) 937 + #define GS101_PPC_CORE 0x3ca0 938 + #define GS101_PPC_EH 0x3ca4 939 + #define GS101_PPC_CPUCL1_0 0x3ca8 940 + #define GS101_PPC_CPUCL1_1 0x3cac 941 + #define GS101_EXT_REGULATOR_MIF_DURATION 0x3cb0 942 + #define GS101_EXT_REGULATOR_TOP_DURATION 0x3cb4 943 + #define GS101_EXT_REGULATOR_CPUCL2_DURATION 0x3cb8 944 + #define GS101_EXT_REGULATOR_CPUCL1_DURATION 0x3cbc 945 + #define GS101_EXT_REGULATOR_G3D_DURATION 0x3cc0 946 + #define GS101_EXT_REGULATOR_TPU_DURATION 0x3cc4 947 + #define GS101_TCXO_DURATION 0x3cc8 948 + #define GS101_BURNIN_CTRL 0x3cd0 949 + #define GS101_JTAG_DBG_DET 0x3cd4 950 + #define GS101_MMC_CONWKUP_CTRL 0x3cd8 951 + #define GS101_USBDPPHY0_USBDP_WAKEUP 0x3cdc 952 + #define GS101_TMU_TOP_TRIP 0x3ce0 953 + #define GS101_TMU_SUB_TRIP 0x3ce4 954 + #define GS101_MEMORY_CEN 0x3d00 955 + #define GS101_MEMORY_PGEN 0x3d04 956 + #define GS101_MEMORY_RET 0x3d08 957 + #define GS101_MEMORY_PGEN_FEEDBACK 0x3d0c 958 + #define GS101_MEMORY_SMX 0x3d10 959 + #define GS101_MEMORY_SMX_FEEDBACK 0x3d14 960 + #define GS101_SLC_PCH_CHANNEL 0x3d20 961 + #define GS101_SLC_PCH_CB 0x3d24 962 + #define GS101_FORCE_NOMC 0x3d3c 963 + #define GS101_FORCE_BOOST 0x3d4c 964 + #define GS101_PMLINK_SLC_REQ 0x3d50 965 + #define GS101_PMLINK_SLC_ACK 0x3d54 966 + #define GS101_PMLINK_SLC_BUSY 0x3d58 967 + #define GS101_BOOTSYNC_OUT 0x3d80 968 + #define GS101_BOOTSYNC_IN 0x3d84 969 + #define GS101_SCAN_READY_OUT 0x3d88 970 + #define GS101_SCAN_READY_IN 0x3d8c 971 + #define GS101_GSA_RESTORE 0x3d90 972 + #define GS101_ALIVE_OTP_LATCH 0x3d94 973 + #define GS101_DEBUG_OVERRIDE 0x3d98 974 + #define GS101_WDT_OPTION 0x3d9c 975 + #define GS101_AOC_WDT_CFG 0x3da0 976 + #define GS101_CTRL_SECJTAG_ALIVE 0x3da4 977 + #define GS101_CTRL_DIV_PLL_ALV_DIVLOW 0x3e00 978 + #define GS101_CTRL_MUX_CLK_APM_REFSRC_AUTORESTORE 0x3e04 979 + #define GS101_CTRL_MUX_CLK_APM_REFSRC 0x3e08 980 + #define GS101_CTRL_MUX_CLK_APM_REF 0x3e0c 981 + #define GS101_CTRL_MUX_PLL_ALV_DIV4 0x3e10 982 + #define GS101_CTRL_PLL_ALV_DIV4 0x3e14 983 + #define GS101_CTRL_OSCCLK_APMGSA 0x3e18 984 + #define GS101_CTRL_BLK_AOC_CLKS 0x3e1c 985 + #define GS101_CTRL_PLL_ALV_LOCK 0x3e20 986 + #define GS101_CTRL_CLKDIV__CLKRTC 0x3e24 987 + #define GS101_CTRL_SOC32K 0x3e30 988 + #define GS101_CTRL_STM_PMU 0x3e34 989 + #define GS101_CTRL_PMU_DEBUG 0x3e38 990 + #define GS101_CTRL_DEBUG_UART 0x3e3c 991 + #define GS101_CTRL_TCK 0x3e40 992 + #define GS101_CTRL_SBU_SW_EN 0x3e44 993 + #define GS101_PAD_CTRL_CLKOUT0 0x3e80 994 + #define GS101_PAD_CTRL_CLKOUT1 0x3e84 995 + #define GS101_PAD_CTRL_APM_24MOUT_0 0x3e88 996 + #define GS101_PAD_CTRL_APM_24MOUT_1 0x3e8c 997 + #define GS101_PAD_CTRL_IO_FORCE_RETENTION 0x3e90 998 + #define GS101_PAD_CTRL_APACTIVE_n 0x3e94 999 + #define GS101_PAD_CTRL_TCXO_ON 0x3e98 1000 + #define GS101_PAD_CTRL_PWR_HOLD 0x3e9c 1001 + #define GS101_PAD_CTRL_RESETO_n 0x3ea0 1002 + #define GS101_PAD_CTRL_WRESETO_n 0x3ea4 1003 + #define GS101_PHY_CTRL_USB20 0x3eb0 1004 + #define GS101_PHY_CTRL_USBDP 0x3eb4 1005 + #define GS101_PHY_CTRL_MIPI_DCPHY_M4M4 0x3eb8 1006 + #define GS101_PHY_CTRL_MIPI_DCPHY_S4S4S4S4 0x3ebc 1007 + #define GS101_PHY_CTRL_PCIE_GEN4_0 0x3ec0 1008 + #define GS101_PHY_CTRL_PCIE_GEN4_1 0x3ec4 1009 + #define GS101_PHY_CTRL_UFS 0x3ec8 683 1010 684 1011 /* PMU INTR GEN */ 685 1012 #define GS101_GRP1_INTR_BID_UPEND (0x0108)
+11 -6
include/linux/syscore_ops.h
··· 11 11 #include <linux/list.h> 12 12 13 13 struct syscore_ops { 14 - struct list_head node; 15 - int (*suspend)(void); 16 - void (*resume)(void); 17 - void (*shutdown)(void); 14 + int (*suspend)(void *data); 15 + void (*resume)(void *data); 16 + void (*shutdown)(void *data); 18 17 }; 19 18 20 - extern void register_syscore_ops(struct syscore_ops *ops); 21 - extern void unregister_syscore_ops(struct syscore_ops *ops); 19 + struct syscore { 20 + struct list_head node; 21 + const struct syscore_ops *ops; 22 + void *data; 23 + }; 24 + 25 + extern void register_syscore(struct syscore *syscore); 26 + extern void unregister_syscore(struct syscore *syscore); 22 27 #ifdef CONFIG_PM_SLEEP 23 28 extern int syscore_suspend(void); 24 29 extern void syscore_resume(void);
+8 -4
kernel/cpu_pm.c
··· 173 173 EXPORT_SYMBOL_GPL(cpu_cluster_pm_exit); 174 174 175 175 #ifdef CONFIG_PM 176 - static int cpu_pm_suspend(void) 176 + static int cpu_pm_suspend(void *data) 177 177 { 178 178 int ret; 179 179 ··· 185 185 return ret; 186 186 } 187 187 188 - static void cpu_pm_resume(void) 188 + static void cpu_pm_resume(void *data) 189 189 { 190 190 cpu_cluster_pm_exit(); 191 191 cpu_pm_exit(); 192 192 } 193 193 194 - static struct syscore_ops cpu_pm_syscore_ops = { 194 + static const struct syscore_ops cpu_pm_syscore_ops = { 195 195 .suspend = cpu_pm_suspend, 196 196 .resume = cpu_pm_resume, 197 197 }; 198 198 199 + static struct syscore cpu_pm_syscore = { 200 + .ops = &cpu_pm_syscore_ops, 201 + }; 202 + 199 203 static int cpu_pm_init(void) 200 204 { 201 - register_syscore_ops(&cpu_pm_syscore_ops); 205 + register_syscore(&cpu_pm_syscore); 202 206 return 0; 203 207 } 204 208 core_initcall(cpu_pm_init);
+9 -5
kernel/irq/generic-chip.c
··· 650 650 } 651 651 652 652 #ifdef CONFIG_PM 653 - static int irq_gc_suspend(void) 653 + static int irq_gc_suspend(void *data) 654 654 { 655 655 struct irq_chip_generic *gc; 656 656 ··· 670 670 return 0; 671 671 } 672 672 673 - static void irq_gc_resume(void) 673 + static void irq_gc_resume(void *data) 674 674 { 675 675 struct irq_chip_generic *gc; 676 676 ··· 693 693 #define irq_gc_resume NULL 694 694 #endif 695 695 696 - static void irq_gc_shutdown(void) 696 + static void irq_gc_shutdown(void *data) 697 697 { 698 698 struct irq_chip_generic *gc; 699 699 ··· 709 709 } 710 710 } 711 711 712 - static struct syscore_ops irq_gc_syscore_ops = { 712 + static const struct syscore_ops irq_gc_syscore_ops = { 713 713 .suspend = irq_gc_suspend, 714 714 .resume = irq_gc_resume, 715 715 .shutdown = irq_gc_shutdown, 716 716 }; 717 717 718 + static struct syscore irq_gc_syscore = { 719 + .ops = &irq_gc_syscore_ops, 720 + }; 721 + 718 722 static int __init irq_gc_init_ops(void) 719 723 { 720 - register_syscore_ops(&irq_gc_syscore_ops); 724 + register_syscore(&irq_gc_syscore); 721 725 return 0; 722 726 } 723 727 device_initcall(irq_gc_init_ops);
+8 -3
kernel/irq/pm.c
··· 211 211 212 212 /** 213 213 * irq_pm_syscore_resume - enable interrupt lines early 214 + * @data: syscore context 214 215 * 215 216 * Enable all interrupt lines with %IRQF_EARLY_RESUME set. 216 217 */ 217 - static void irq_pm_syscore_resume(void) 218 + static void irq_pm_syscore_resume(void *data) 218 219 { 219 220 resume_irqs(true); 220 221 } 221 222 222 - static struct syscore_ops irq_pm_syscore_ops = { 223 + static const struct syscore_ops irq_pm_syscore_ops = { 223 224 .resume = irq_pm_syscore_resume, 225 + }; 226 + 227 + static struct syscore irq_pm_syscore = { 228 + .ops = &irq_pm_syscore_ops, 224 229 }; 225 230 226 231 static int __init irq_pm_init_ops(void) 227 232 { 228 - register_syscore_ops(&irq_pm_syscore_ops); 233 + register_syscore(&irq_pm_syscore); 229 234 return 0; 230 235 } 231 236
+8 -3
kernel/printk/printk.c
··· 3705 3705 3706 3706 /** 3707 3707 * printk_kthreads_shutdown - shutdown all threaded printers 3708 + * @data: syscore context 3708 3709 * 3709 3710 * On system shutdown all threaded printers are stopped. This allows printk 3710 3711 * to transition back to atomic printing, thus providing a robust mechanism 3711 3712 * for the final shutdown/reboot messages to be output. 3712 3713 */ 3713 - static void printk_kthreads_shutdown(void) 3714 + static void printk_kthreads_shutdown(void *data) 3714 3715 { 3715 3716 struct console *con; 3716 3717 ··· 3733 3732 console_list_unlock(); 3734 3733 } 3735 3734 3736 - static struct syscore_ops printk_syscore_ops = { 3735 + static const struct syscore_ops printk_syscore_ops = { 3737 3736 .shutdown = printk_kthreads_shutdown, 3737 + }; 3738 + 3739 + static struct syscore printk_syscore = { 3740 + .ops = &printk_syscore_ops, 3738 3741 }; 3739 3742 3740 3743 /* ··· 3808 3803 3809 3804 static int __init printk_set_kthreads_ready(void) 3810 3805 { 3811 - register_syscore_ops(&printk_syscore_ops); 3806 + register_syscore(&printk_syscore); 3812 3807 3813 3808 console_list_lock(); 3814 3809 printk_kthreads_ready = true;
+18 -4
kernel/time/sched_clock.c
··· 296 296 return 0; 297 297 } 298 298 299 + static int sched_clock_syscore_suspend(void *data) 300 + { 301 + return sched_clock_suspend(); 302 + } 303 + 299 304 void sched_clock_resume(void) 300 305 { 301 306 struct clock_read_data *rd = &cd.read_data[0]; ··· 310 305 rd->read_sched_clock = cd.actual_read_sched_clock; 311 306 } 312 307 313 - static struct syscore_ops sched_clock_ops = { 314 - .suspend = sched_clock_suspend, 315 - .resume = sched_clock_resume, 308 + static void sched_clock_syscore_resume(void *data) 309 + { 310 + sched_clock_resume(); 311 + } 312 + 313 + static const struct syscore_ops sched_clock_syscore_ops = { 314 + .suspend = sched_clock_syscore_suspend, 315 + .resume = sched_clock_syscore_resume, 316 + }; 317 + 318 + static struct syscore sched_clock_syscore = { 319 + .ops = &sched_clock_syscore_ops, 316 320 }; 317 321 318 322 static int __init sched_clock_syscore_init(void) 319 323 { 320 - register_syscore_ops(&sched_clock_ops); 324 + register_syscore(&sched_clock_syscore); 321 325 322 326 return 0; 323 327 }
+18 -4
kernel/time/timekeeping.c
··· 1994 1994 timerfd_resume(); 1995 1995 } 1996 1996 1997 + static void timekeeping_syscore_resume(void *data) 1998 + { 1999 + timekeeping_resume(); 2000 + } 2001 + 1997 2002 int timekeeping_suspend(void) 1998 2003 { 1999 2004 struct timekeeper *tks = &tk_core.shadow_timekeeper; ··· 2066 2061 return 0; 2067 2062 } 2068 2063 2064 + static int timekeeping_syscore_suspend(void *data) 2065 + { 2066 + return timekeeping_suspend(); 2067 + } 2068 + 2069 2069 /* sysfs resume/suspend bits for timekeeping */ 2070 - static struct syscore_ops timekeeping_syscore_ops = { 2071 - .resume = timekeeping_resume, 2072 - .suspend = timekeeping_suspend, 2070 + static const struct syscore_ops timekeeping_syscore_ops = { 2071 + .resume = timekeeping_syscore_resume, 2072 + .suspend = timekeeping_syscore_suspend, 2073 + }; 2074 + 2075 + static struct syscore timekeeping_syscore = { 2076 + .ops = &timekeeping_syscore_ops, 2073 2077 }; 2074 2078 2075 2079 static int __init timekeeping_init_ops(void) 2076 2080 { 2077 - register_syscore_ops(&timekeeping_syscore_ops); 2081 + register_syscore(&timekeeping_syscore); 2078 2082 return 0; 2079 2083 } 2080 2084 device_initcall(timekeeping_init_ops);
+11 -7
virt/kvm/kvm_main.c
··· 5636 5636 return 0; 5637 5637 } 5638 5638 5639 - static void kvm_shutdown(void) 5639 + static void kvm_shutdown(void *data) 5640 5640 { 5641 5641 /* 5642 5642 * Disable hardware virtualization and set kvm_rebooting to indicate ··· 5654 5654 on_each_cpu(kvm_disable_virtualization_cpu, NULL, 1); 5655 5655 } 5656 5656 5657 - static int kvm_suspend(void) 5657 + static int kvm_suspend(void *data) 5658 5658 { 5659 5659 /* 5660 5660 * Secondary CPUs and CPU hotplug are disabled across the suspend/resume ··· 5671 5671 return 0; 5672 5672 } 5673 5673 5674 - static void kvm_resume(void) 5674 + static void kvm_resume(void *data) 5675 5675 { 5676 5676 lockdep_assert_not_held(&kvm_usage_lock); 5677 5677 lockdep_assert_irqs_disabled(); ··· 5679 5679 WARN_ON_ONCE(kvm_enable_virtualization_cpu()); 5680 5680 } 5681 5681 5682 - static struct syscore_ops kvm_syscore_ops = { 5682 + static const struct syscore_ops kvm_syscore_ops = { 5683 5683 .suspend = kvm_suspend, 5684 5684 .resume = kvm_resume, 5685 5685 .shutdown = kvm_shutdown, 5686 + }; 5687 + 5688 + static struct syscore kvm_syscore = { 5689 + .ops = &kvm_syscore_ops, 5686 5690 }; 5687 5691 5688 5692 int kvm_enable_virtualization(void) ··· 5705 5701 if (r) 5706 5702 goto err_cpuhp; 5707 5703 5708 - register_syscore_ops(&kvm_syscore_ops); 5704 + register_syscore(&kvm_syscore); 5709 5705 5710 5706 /* 5711 5707 * Undo virtualization enabling and bail if the system is going down. ··· 5727 5723 return 0; 5728 5724 5729 5725 err_rebooting: 5730 - unregister_syscore_ops(&kvm_syscore_ops); 5726 + unregister_syscore(&kvm_syscore); 5731 5727 cpuhp_remove_state(CPUHP_AP_KVM_ONLINE); 5732 5728 err_cpuhp: 5733 5729 kvm_arch_disable_virtualization(); ··· 5743 5739 if (--kvm_usage_count) 5744 5740 return; 5745 5741 5746 - unregister_syscore_ops(&kvm_syscore_ops); 5742 + unregister_syscore(&kvm_syscore); 5747 5743 cpuhp_remove_state(CPUHP_AP_KVM_ONLINE); 5748 5744 kvm_arch_disable_virtualization(); 5749 5745 }