Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'arm-drivers-5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc

Pull ARM driver updates from Arnd Bergmann:
"These are the usual updates for SoC specific device drivers and
related subsystems that don't have their own top-level maintainers:

- ARM SCMI/SCPI updates to allow pluggable transport layers

- TEE subsystem cleanups

- A new driver for the Amlogic secure power domain controller

- Various driver updates for the NXP Layerscape DPAA2, NXP i.MX SCU
and TI OMAP2+ sysc drivers.

- Qualcomm SoC driver updates, including a new library module for
"protection domain" notifications

- Lots of smaller bugfixes and cleanups in other drivers"

* tag 'arm-drivers-5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc: (70 commits)
soc: fsl: qe: fix sparse warnings for ucc_slow.c
soc: fsl: qe: ucc_slow: remove 0 assignment for kzalloc'ed structure
soc: fsl: qe: fix sparse warnings for ucc_fast.c
soc: fsl: qe: fix sparse warnings for qe_ic.c
soc: fsl: qe: fix sparse warnings for ucc.c
soc: fsl: qe: fix sparse warning for qe_common.c
soc: fsl: qe: fix sparse warnings for qe.c
soc: qcom: Fix QCOM_APR dependencies
soc: qcom: pdr: Avoid uninitialized use of found in pdr_indication_cb
soc: imx: drop COMPILE_TEST for IMX_SCU_SOC
firmware: imx: add COMPILE_TEST for IMX_SCU driver
soc: imx: gpc: fix power up sequencing
soc: imx: increase build coverage for imx8m soc driver
soc: qcom: apr: Add avs/audio tracking functionality
dt-bindings: soc: qcom: apr: Add protection domain bindings
soc: qcom: Introduce Protection Domain Restart helpers
devicetree: bindings: firmware: add ipq806x to qcom_scm
memory: tegra: Correct debugfs clk rate-range on Tegra124
memory: tegra: Correct debugfs clk rate-range on Tegra30
memory: tegra: Correct debugfs clk rate-range on Tegra20
...

+3696 -637
+1
Documentation/devicetree/bindings/bus/ti-sysc.txt
··· 38 38 "ti,sysc-dra7-mcasp" 39 39 "ti,sysc-usb-host-fs" 40 40 "ti,sysc-dra7-mcan" 41 + "ti,sysc-pruss" 41 42 42 43 - reg shall have register areas implemented for the interconnect 43 44 target module in question such as revision, sysc and syss
+1
Documentation/devicetree/bindings/firmware/qcom,scm.txt
··· 10 10 * "qcom,scm-apq8064" 11 11 * "qcom,scm-apq8084" 12 12 * "qcom,scm-ipq4019" 13 + * "qcom,scm-ipq806x" 13 14 * "qcom,scm-msm8660" 14 15 * "qcom,scm-msm8916" 15 16 * "qcom,scm-msm8960"
+40
Documentation/devicetree/bindings/power/amlogic,meson-sec-pwrc.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0+ OR MIT) 2 + # Copyright (c) 2019 Amlogic, Inc 3 + # Author: Jianxin Pan <jianxin.pan@amlogic.com> 4 + %YAML 1.2 5 + --- 6 + $id: "http://devicetree.org/schemas/power/amlogic,meson-sec-pwrc.yaml#" 7 + $schema: "http://devicetree.org/meta-schemas/core.yaml#" 8 + 9 + title: Amlogic Meson Secure Power Domains 10 + 11 + maintainers: 12 + - Jianxin Pan <jianxin.pan@amlogic.com> 13 + 14 + description: |+ 15 + Secure Power Domains used in Meson A1/C1 SoCs, and should be the child node 16 + of secure-monitor. 17 + 18 + properties: 19 + compatible: 20 + enum: 21 + - amlogic,meson-a1-pwrc 22 + 23 + "#power-domain-cells": 24 + const: 1 25 + 26 + required: 27 + - compatible 28 + - "#power-domain-cells" 29 + 30 + examples: 31 + - | 32 + secure-monitor { 33 + compatible = "amlogic,meson-gxbb-sm"; 34 + 35 + pwrc: power-controller { 36 + compatible = "amlogic,meson-a1-pwrc"; 37 + #power-domain-cells = <1>; 38 + }; 39 + }; 40 +
+50
Documentation/devicetree/bindings/soc/qcom/qcom,apr.txt
··· 45 45 12 - Ultrasound stream manager. 46 46 13 - Listen stream manager. 47 47 48 + - qcom,protection-domain 49 + Usage: optional 50 + Value type: <stringlist> 51 + Definition: Must list the protection domain service name and path 52 + that the particular apr service has a dependency on. 53 + Possible values are : 54 + "avs/audio", "msm/adsp/audio_pd". 55 + "kernel/elf_loader", "msm/modem/wlan_pd". 56 + "tms/servreg", "msm/adsp/audio_pd". 57 + "tms/servreg", "msm/modem/wlan_pd". 58 + "tms/servreg", "msm/slpi/sensor_pd". 59 + 48 60 = EXAMPLE 49 61 The following example represents a QDSP based sound card on a MSM8996 device 50 62 which uses apr as communication between Apps and QDSP. ··· 91 79 q6adm@8 { 92 80 compatible = "qcom,q6adm"; 93 81 reg = <APR_SVC_ADM>; 82 + ... 83 + }; 84 + }; 85 + 86 + = EXAMPLE 2 87 + The following example represents a QDSP based sound card with protection domain 88 + dependencies specified. Here some of the apr services are dependent on services 89 + running on protection domain hosted on ADSP/SLPI remote processors while others 90 + have no such dependency. 91 + 92 + apr { 93 + compatible = "qcom,apr-v2"; 94 + qcom,glink-channels = "apr_audio_svc"; 95 + qcom,apr-domain = <APR_DOMAIN_ADSP>; 96 + 97 + q6core { 98 + compatible = "qcom,q6core"; 99 + reg = <APR_SVC_ADSP_CORE>; 100 + }; 101 + 102 + q6afe: q6afe { 103 + compatible = "qcom,q6afe"; 104 + reg = <APR_SVC_AFE>; 105 + qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd"; 106 + ... 107 + }; 108 + 109 + q6asm: q6asm { 110 + compatible = "qcom,q6asm"; 111 + reg = <APR_SVC_ASM>; 112 + qcom,protection-domain = "tms/servreg", "msm/slpi/sensor_pd"; 113 + ... 114 + }; 115 + 116 + q6adm: q6adm { 117 + compatible = "qcom,q6adm"; 118 + reg = <APR_SVC_ADM>; 119 + qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd"; 94 120 ... 95 121 }; 96 122 };
+6
arch/arm/mach-omap2/pdata-quirks.c
··· 397 397 return omap_hwmod_shutdown(cookie->data); 398 398 } 399 399 400 + static bool ti_sysc_soc_type_gp(void) 401 + { 402 + return omap_type() == OMAP2_DEVICE_TYPE_GP; 403 + } 404 + 400 405 static struct of_dev_auxdata omap_auxdata_lookup[]; 401 406 402 407 static struct ti_sysc_platform_data ti_sysc_pdata = { 403 408 .auxdata = omap_auxdata_lookup, 409 + .soc_type_gp = ti_sysc_soc_type_gp, 404 410 .init_clockdomain = ti_sysc_clkdm_init, 405 411 .clkdm_deny_idle = ti_sysc_clkdm_deny_idle, 406 412 .clkdm_allow_idle = ti_sysc_clkdm_allow_idle,
+25 -2
drivers/bus/hisi_lpc.c
··· 358 358 } 359 359 360 360 /* 361 + * Released firmware describes the IO port max address as 0x3fff, which is 362 + * the max host bus address. Fixup to a proper range. This will probably 363 + * never be fixed in firmware. 364 + */ 365 + static void hisi_lpc_acpi_fixup_child_resource(struct device *hostdev, 366 + struct resource *r) 367 + { 368 + if (r->end != 0x3fff) 369 + return; 370 + 371 + if (r->start == 0xe4) 372 + r->end = 0xe4 + 0x04 - 1; 373 + else if (r->start == 0x2f8) 374 + r->end = 0x2f8 + 0x08 - 1; 375 + else 376 + dev_warn(hostdev, "unrecognised resource %pR to fixup, ignoring\n", 377 + r); 378 + } 379 + 380 + /* 361 381 * hisi_lpc_acpi_set_io_res - set the resources for a child 362 382 * @child: the device node to be updated the I/O resource 363 383 * @hostdev: the device node associated with host controller ··· 438 418 return -ENOMEM; 439 419 } 440 420 count = 0; 441 - list_for_each_entry(rentry, &resource_list, node) 442 - resources[count++] = *rentry->res; 421 + list_for_each_entry(rentry, &resource_list, node) { 422 + resources[count] = *rentry->res; 423 + hisi_lpc_acpi_fixup_child_resource(hostdev, &resources[count]); 424 + count++; 425 + } 443 426 444 427 acpi_dev_free_resource_list(&resource_list); 445 428
+515 -89
drivers/bus/ti-sysc.c
··· 7 7 #include <linux/clk.h> 8 8 #include <linux/clkdev.h> 9 9 #include <linux/delay.h> 10 + #include <linux/list.h> 10 11 #include <linux/module.h> 11 12 #include <linux/platform_device.h> 12 13 #include <linux/pm_domain.h> ··· 16 15 #include <linux/of_address.h> 17 16 #include <linux/of_platform.h> 18 17 #include <linux/slab.h> 18 + #include <linux/sys_soc.h> 19 19 #include <linux/iopoll.h> 20 20 21 21 #include <linux/platform_data/ti-sysc.h> 22 22 23 23 #include <dt-bindings/bus/ti-sysc.h> 24 24 25 + #define DIS_ISP BIT(2) 26 + #define DIS_IVA BIT(1) 27 + #define DIS_SGX BIT(0) 28 + 29 + #define SOC_FLAG(match, flag) { .machine = match, .data = (void *)(flag), } 30 + 25 31 #define MAX_MODULE_SOFTRESET_WAIT 10000 26 32 27 - static const char * const reg_names[] = { "rev", "sysc", "syss", }; 33 + enum sysc_soc { 34 + SOC_UNKNOWN, 35 + SOC_2420, 36 + SOC_2430, 37 + SOC_3430, 38 + SOC_3630, 39 + SOC_4430, 40 + SOC_4460, 41 + SOC_4470, 42 + SOC_5430, 43 + SOC_AM3, 44 + SOC_AM4, 45 + SOC_DRA7, 46 + }; 47 + 48 + struct sysc_address { 49 + unsigned long base; 50 + struct list_head node; 51 + }; 52 + 53 + struct sysc_soc_info { 54 + unsigned long general_purpose:1; 55 + enum sysc_soc soc; 56 + struct mutex list_lock; /* disabled modules list lock */ 57 + struct list_head disabled_modules; 58 + }; 28 59 29 60 enum sysc_clocks { 30 61 SYSC_FCK, ··· 72 39 SYSC_MAX_CLOCKS, 73 40 }; 74 41 42 + static struct sysc_soc_info *sysc_soc; 43 + static const char * const reg_names[] = { "rev", "sysc", "syss", }; 75 44 static const char * const clock_names[SYSC_MAX_CLOCKS] = { 76 45 "fck", "ick", "opt0", "opt1", "opt2", "opt3", "opt4", 77 46 "opt5", "opt6", "opt7", ··· 105 70 * @child_needs_resume: runtime resume needed for child on resume from suspend 106 71 * @disable_on_idle: status flag used for disabling modules with resets 107 72 * @idle_work: work structure used to perform delayed idle on a module 108 - * @clk_enable_quirk: module specific clock enable quirk 109 - * @clk_disable_quirk: module specific clock disable quirk 73 + * @pre_reset_quirk: module specific pre-reset quirk 74 + * @post_reset_quirk: module specific post-reset quirk 110 75 * @reset_done_quirk: module specific reset done quirk 111 76 * @module_enable_quirk: module specific enable quirk 112 77 * @module_disable_quirk: module specific disable quirk 78 + * @module_unlock_quirk: module specific sysconfig unlock quirk 79 + * @module_lock_quirk: module specific sysconfig lock quirk 113 80 */ 114 81 struct sysc { 115 82 struct device *dev; ··· 134 97 unsigned int needs_resume:1; 135 98 unsigned int child_needs_resume:1; 136 99 struct delayed_work idle_work; 137 - void (*clk_enable_quirk)(struct sysc *sysc); 138 - void (*clk_disable_quirk)(struct sysc *sysc); 100 + void (*pre_reset_quirk)(struct sysc *sysc); 101 + void (*post_reset_quirk)(struct sysc *sysc); 139 102 void (*reset_done_quirk)(struct sysc *sysc); 140 103 void (*module_enable_quirk)(struct sysc *sysc); 141 104 void (*module_disable_quirk)(struct sysc *sysc); 105 + void (*module_unlock_quirk)(struct sysc *sysc); 106 + void (*module_lock_quirk)(struct sysc *sysc); 142 107 }; 143 108 144 109 static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np, ··· 663 624 const char *name; 664 625 665 626 name = of_get_property(np, "ti,hwmods", NULL); 666 - if (name) 627 + if (name && !of_device_is_compatible(np, "ti,sysc")) 667 628 dev_warn(ddata->dev, "really a child ti,hwmods property?"); 668 629 669 630 sysc_check_quirk_stdout(ddata, np); ··· 900 861 buf); 901 862 } 902 863 864 + /** 865 + * sysc_write_sysconfig - handle sysconfig quirks for register write 866 + * @ddata: device driver data 867 + * @value: register value 868 + */ 869 + static void sysc_write_sysconfig(struct sysc *ddata, u32 value) 870 + { 871 + if (ddata->module_unlock_quirk) 872 + ddata->module_unlock_quirk(ddata); 873 + 874 + sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], value); 875 + 876 + if (ddata->module_lock_quirk) 877 + ddata->module_lock_quirk(ddata); 878 + } 879 + 903 880 #define SYSC_IDLE_MASK (SYSC_NR_IDLEMODES - 1) 904 881 #define SYSC_CLOCACT_ICK 2 905 882 ··· 962 907 963 908 reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift); 964 909 reg |= best_mode << regbits->sidle_shift; 965 - sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); 910 + sysc_write_sysconfig(ddata, reg); 966 911 967 912 set_midle: 968 913 /* Set MIDLE mode */ ··· 981 926 982 927 reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift); 983 928 reg |= best_mode << regbits->midle_shift; 984 - sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); 929 + sysc_write_sysconfig(ddata, reg); 985 930 986 931 set_autoidle: 987 932 /* Autoidle bit must enabled separately if available */ 988 933 if (regbits->autoidle_shift >= 0 && 989 934 ddata->cfg.sysc_val & BIT(regbits->autoidle_shift)) { 990 935 reg |= 1 << regbits->autoidle_shift; 991 - sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); 936 + sysc_write_sysconfig(ddata, reg); 992 937 } 993 938 994 939 if (ddata->module_enable_quirk) ··· 1046 991 1047 992 reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift); 1048 993 reg |= best_mode << regbits->midle_shift; 1049 - sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); 994 + sysc_write_sysconfig(ddata, reg); 1050 995 1051 996 set_sidle: 1052 997 /* Set SIDLE mode */ ··· 1069 1014 if (regbits->autoidle_shift >= 0 && 1070 1015 ddata->cfg.sysc_val & BIT(regbits->autoidle_shift)) 1071 1016 reg |= 1 << regbits->autoidle_shift; 1072 - sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); 1017 + sysc_write_sysconfig(ddata, reg); 1073 1018 1074 1019 return 0; 1075 1020 } ··· 1271 1216 SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_OPT_CLKS_IN_RESET), 1272 1217 SYSC_QUIRK("sham", 0, 0x100, 0x110, 0x114, 0x40000c03, 0xffffffff, 1273 1218 SYSC_QUIRK_LEGACY_IDLE), 1274 - SYSC_QUIRK("smartreflex", 0, -1, 0x24, -1, 0x00000000, 0xffffffff, 1219 + SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x24, -ENODEV, 0x00000000, 0xffffffff, 1275 1220 SYSC_QUIRK_LEGACY_IDLE), 1276 - SYSC_QUIRK("smartreflex", 0, -1, 0x38, -1, 0x00000000, 0xffffffff, 1221 + SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x38, -ENODEV, 0x00000000, 0xffffffff, 1277 1222 SYSC_QUIRK_LEGACY_IDLE), 1278 1223 SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff, 1279 1224 0), 1280 1225 /* Some timers on omap4 and later */ 1281 - SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x50002100, 0xffffffff, 1226 + SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x50002100, 0xffffffff, 1282 1227 0), 1283 - SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x4fff1301, 0xffff00ff, 1228 + SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x4fff1301, 0xffff00ff, 1284 1229 0), 1285 1230 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff, 1286 1231 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE), ··· 1293 1238 SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE), 1294 1239 1295 1240 /* Quirks that need to be set based on the module address */ 1296 - SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -1, 0x50000800, 0xffffffff, 1241 + SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff, 1297 1242 SYSC_QUIRK_EXT_OPT_CLOCK | SYSC_QUIRK_NO_RESET_ON_INIT | 1298 1243 SYSC_QUIRK_SWSUP_SIDLE), 1299 1244 1300 1245 /* Quirks that need to be set based on detected module */ 1301 - SYSC_QUIRK("aess", 0, 0, 0x10, -1, 0x40000000, 0xffffffff, 1246 + SYSC_QUIRK("aess", 0, 0, 0x10, -ENODEV, 0x40000000, 0xffffffff, 1302 1247 SYSC_MODULE_QUIRK_AESS), 1303 - SYSC_QUIRK("dcan", 0x48480000, 0x20, -1, -1, 0xa3170504, 0xffffffff, 1248 + SYSC_QUIRK("dcan", 0x48480000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff, 1304 1249 SYSC_QUIRK_CLKDM_NOAUTO), 1305 - SYSC_QUIRK("dwc3", 0x48880000, 0, 0x10, -1, 0x500a0200, 0xffffffff, 1250 + SYSC_QUIRK("dss", 0x4832a000, 0, 0x10, 0x14, 0x00000020, 0xffffffff, 1251 + SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET), 1252 + SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000040, 0xffffffff, 1253 + SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET), 1254 + SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000061, 0xffffffff, 1255 + SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET), 1256 + SYSC_QUIRK("dwc3", 0x48880000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff, 1306 1257 SYSC_QUIRK_CLKDM_NOAUTO), 1307 - SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -1, 0x500a0200, 0xffffffff, 1258 + SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff, 1308 1259 SYSC_QUIRK_CLKDM_NOAUTO), 1260 + SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50030200, 0xffffffff, 1261 + SYSC_QUIRK_OPT_CLKS_NEEDED), 1309 1262 SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff, 1310 1263 SYSC_MODULE_QUIRK_HDQ1W), 1311 1264 SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x0000000a, 0xffffffff, ··· 1326 1263 SYSC_MODULE_QUIRK_I2C), 1327 1264 SYSC_QUIRK("i2c", 0, 0, 0x10, 0x90, 0x5040000a, 0xfffff0f0, 1328 1265 SYSC_MODULE_QUIRK_I2C), 1329 - SYSC_QUIRK("gpu", 0x50000000, 0x14, -1, -1, 0x00010201, 0xffffffff, 0), 1330 - SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -1, 0x40000000 , 0xffffffff, 1266 + SYSC_QUIRK("gpu", 0x50000000, 0x14, -ENODEV, -ENODEV, 0x00010201, 0xffffffff, 0), 1267 + SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff, 1331 1268 SYSC_MODULE_QUIRK_SGX), 1332 - SYSC_QUIRK("lcdc", 0, 0, 0x54, -1, 0x4f201000, 0xffffffff, 1269 + SYSC_QUIRK("lcdc", 0, 0, 0x54, -ENODEV, 0x4f201000, 0xffffffff, 1270 + SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), 1271 + SYSC_QUIRK("rtc", 0, 0x74, 0x78, -ENODEV, 0x4eb01908, 0xffff00f0, 1272 + SYSC_MODULE_QUIRK_RTC_UNLOCK), 1273 + SYSC_QUIRK("tptc", 0, 0, 0x10, -ENODEV, 0x40006c00, 0xffffefff, 1274 + SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), 1275 + SYSC_QUIRK("tptc", 0, 0, -ENODEV, -ENODEV, 0x40007c00, 0xffffffff, 1333 1276 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), 1334 1277 SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050, 1335 1278 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), 1336 - SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -1, 0x4ea2080d, 0xffffffff, 1279 + SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff, 1337 1280 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), 1338 1281 SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0, 1339 1282 SYSC_MODULE_QUIRK_WDT), 1283 + /* PRUSS on am3, am4 and am5 */ 1284 + SYSC_QUIRK("pruss", 0, 0x26000, 0x26004, -ENODEV, 0x47000000, 0xff000000, 1285 + SYSC_MODULE_QUIRK_PRUSS), 1340 1286 /* Watchdog on am3 and am4 */ 1341 1287 SYSC_QUIRK("wdt", 0x44e35000, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0, 1342 1288 SYSC_MODULE_QUIRK_WDT | SYSC_QUIRK_SWSUP_SIDLE), 1343 1289 1344 1290 #ifdef DEBUG 1345 - SYSC_QUIRK("adc", 0, 0, 0x10, -1, 0x47300001, 0xffffffff, 0), 1346 - SYSC_QUIRK("atl", 0, 0, -1, -1, 0x0a070100, 0xffffffff, 0), 1347 - SYSC_QUIRK("cm", 0, 0, -1, -1, 0x40000301, 0xffffffff, 0), 1348 - SYSC_QUIRK("control", 0, 0, 0x10, -1, 0x40000900, 0xffffffff, 0), 1291 + SYSC_QUIRK("adc", 0, 0, 0x10, -ENODEV, 0x47300001, 0xffffffff, 0), 1292 + SYSC_QUIRK("atl", 0, 0, -ENODEV, -ENODEV, 0x0a070100, 0xffffffff, 0), 1293 + SYSC_QUIRK("cm", 0, 0, -ENODEV, -ENODEV, 0x40000301, 0xffffffff, 0), 1294 + SYSC_QUIRK("control", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0), 1349 1295 SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902, 1350 1296 0xffff00f0, 0), 1351 - SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0xa3170504, 0xffffffff, 0), 1352 - SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0x4edb1902, 0xffffffff, 0), 1353 - SYSC_QUIRK("dmic", 0, 0, 0x10, -1, 0x50010000, 0xffffffff, 0), 1354 - SYSC_QUIRK("dwc3", 0, 0, 0x10, -1, 0x500a0200, 0xffffffff, 0), 1297 + SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff, 0), 1298 + SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0x4edb1902, 0xffffffff, 0), 1299 + SYSC_QUIRK("dispc", 0x4832a400, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0), 1300 + SYSC_QUIRK("dispc", 0x58001000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0), 1301 + SYSC_QUIRK("dispc", 0x58001000, 0, 0x10, 0x14, 0x00000051, 0xffffffff, 0), 1302 + SYSC_QUIRK("dmic", 0, 0, 0x10, -ENODEV, 0x50010000, 0xffffffff, 0), 1303 + SYSC_QUIRK("dsi", 0x58004000, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0), 1304 + SYSC_QUIRK("dsi", 0x58005000, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0), 1305 + SYSC_QUIRK("dsi", 0x58005000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0), 1306 + SYSC_QUIRK("dsi", 0x58009000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0), 1307 + SYSC_QUIRK("dwc3", 0, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff, 0), 1355 1308 SYSC_QUIRK("d2d", 0x4a0b6000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0), 1356 1309 SYSC_QUIRK("d2d", 0x4a0cd000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0), 1357 - SYSC_QUIRK("epwmss", 0, 0, 0x4, -1, 0x47400001, 0xffffffff, 0), 1358 - SYSC_QUIRK("gpu", 0, 0x1fc00, 0x1fc10, -1, 0, 0, 0), 1359 - SYSC_QUIRK("gpu", 0, 0xfe00, 0xfe10, -1, 0x40000000 , 0xffffffff, 0), 1310 + SYSC_QUIRK("epwmss", 0, 0, 0x4, -ENODEV, 0x47400001, 0xffffffff, 0), 1311 + SYSC_QUIRK("gpu", 0, 0x1fc00, 0x1fc10, -ENODEV, 0, 0, 0), 1312 + SYSC_QUIRK("gpu", 0, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff, 0), 1313 + SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50031d00, 0xffffffff, 0), 1360 1314 SYSC_QUIRK("hsi", 0, 0, 0x10, 0x14, 0x50043101, 0xffffffff, 0), 1361 - SYSC_QUIRK("iss", 0, 0, 0x10, -1, 0x40000101, 0xffffffff, 0), 1362 - SYSC_QUIRK("mcasp", 0, 0, 0x4, -1, 0x44306302, 0xffffffff, 0), 1363 - SYSC_QUIRK("mcasp", 0, 0, 0x4, -1, 0x44307b02, 0xffffffff, 0), 1364 - SYSC_QUIRK("mcbsp", 0, -1, 0x8c, -1, 0, 0, 0), 1365 - SYSC_QUIRK("mcspi", 0, 0, 0x10, -1, 0x40300a0b, 0xffff00ff, 0), 1315 + SYSC_QUIRK("iss", 0, 0, 0x10, -ENODEV, 0x40000101, 0xffffffff, 0), 1316 + SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44306302, 0xffffffff, 0), 1317 + SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44307b02, 0xffffffff, 0), 1318 + SYSC_QUIRK("mcbsp", 0, -ENODEV, 0x8c, -ENODEV, 0, 0, 0), 1319 + SYSC_QUIRK("mcspi", 0, 0, 0x10, -ENODEV, 0x40300a0b, 0xffff00ff, 0), 1366 1320 SYSC_QUIRK("mcspi", 0, 0, 0x110, 0x114, 0x40300a0b, 0xffffffff, 0), 1367 - SYSC_QUIRK("mailbox", 0, 0, 0x10, -1, 0x00000400, 0xffffffff, 0), 1368 - SYSC_QUIRK("m3", 0, 0, -1, -1, 0x5f580105, 0x0fff0f00, 0), 1321 + SYSC_QUIRK("mailbox", 0, 0, 0x10, -ENODEV, 0x00000400, 0xffffffff, 0), 1322 + SYSC_QUIRK("m3", 0, 0, -ENODEV, -ENODEV, 0x5f580105, 0x0fff0f00, 0), 1369 1323 SYSC_QUIRK("ocp2scp", 0, 0, 0x10, 0x14, 0x50060005, 0xfffffff0, 0), 1370 - SYSC_QUIRK("ocp2scp", 0, 0, -1, -1, 0x50060007, 0xffffffff, 0), 1371 - SYSC_QUIRK("padconf", 0, 0, 0x10, -1, 0x4fff0800, 0xffffffff, 0), 1372 - SYSC_QUIRK("padconf", 0, 0, -1, -1, 0x40001100, 0xffffffff, 0), 1373 - SYSC_QUIRK("prcm", 0, 0, -1, -1, 0x40000100, 0xffffffff, 0), 1374 - SYSC_QUIRK("prcm", 0, 0, -1, -1, 0x00004102, 0xffffffff, 0), 1375 - SYSC_QUIRK("prcm", 0, 0, -1, -1, 0x40000400, 0xffffffff, 0), 1376 - SYSC_QUIRK("scm", 0, 0, 0x10, -1, 0x40000900, 0xffffffff, 0), 1377 - SYSC_QUIRK("scm", 0, 0, -1, -1, 0x4e8b0100, 0xffffffff, 0), 1378 - SYSC_QUIRK("scm", 0, 0, -1, -1, 0x4f000100, 0xffffffff, 0), 1379 - SYSC_QUIRK("scm", 0, 0, -1, -1, 0x40000900, 0xffffffff, 0), 1380 - SYSC_QUIRK("scrm", 0, 0, -1, -1, 0x00000010, 0xffffffff, 0), 1381 - SYSC_QUIRK("sdio", 0, 0, 0x10, -1, 0x40202301, 0xffff0ff0, 0), 1324 + SYSC_QUIRK("ocp2scp", 0, 0, -ENODEV, -ENODEV, 0x50060007, 0xffffffff, 0), 1325 + SYSC_QUIRK("padconf", 0, 0, 0x10, -ENODEV, 0x4fff0800, 0xffffffff, 0), 1326 + SYSC_QUIRK("padconf", 0, 0, -ENODEV, -ENODEV, 0x40001100, 0xffffffff, 0), 1327 + SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000100, 0xffffffff, 0), 1328 + SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x00004102, 0xffffffff, 0), 1329 + SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000400, 0xffffffff, 0), 1330 + SYSC_QUIRK("rfbi", 0x4832a800, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0), 1331 + SYSC_QUIRK("rfbi", 0x58002000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0), 1332 + SYSC_QUIRK("scm", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0), 1333 + SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4e8b0100, 0xffffffff, 0), 1334 + SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4f000100, 0xffffffff, 0), 1335 + SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x40000900, 0xffffffff, 0), 1336 + SYSC_QUIRK("scrm", 0, 0, -ENODEV, -ENODEV, 0x00000010, 0xffffffff, 0), 1337 + SYSC_QUIRK("sdio", 0, 0, 0x10, -ENODEV, 0x40202301, 0xffff0ff0, 0), 1382 1338 SYSC_QUIRK("sdio", 0, 0x2fc, 0x110, 0x114, 0x31010000, 0xffffffff, 0), 1383 1339 SYSC_QUIRK("sdma", 0, 0, 0x2c, 0x28, 0x00010900, 0xffffffff, 0), 1384 - SYSC_QUIRK("slimbus", 0, 0, 0x10, -1, 0x40000902, 0xffffffff, 0), 1385 - SYSC_QUIRK("slimbus", 0, 0, 0x10, -1, 0x40002903, 0xffffffff, 0), 1386 - SYSC_QUIRK("spinlock", 0, 0, 0x10, -1, 0x50020000, 0xffffffff, 0), 1387 - SYSC_QUIRK("rng", 0, 0x1fe0, 0x1fe4, -1, 0x00000020, 0xffffffff, 0), 1388 - SYSC_QUIRK("rtc", 0, 0x74, 0x78, -1, 0x4eb01908, 0xffff00f0, 0), 1389 - SYSC_QUIRK("timer32k", 0, 0, 0x4, -1, 0x00000060, 0xffffffff, 0), 1340 + SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40000902, 0xffffffff, 0), 1341 + SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40002903, 0xffffffff, 0), 1342 + SYSC_QUIRK("spinlock", 0, 0, 0x10, -ENODEV, 0x50020000, 0xffffffff, 0), 1343 + SYSC_QUIRK("rng", 0, 0x1fe0, 0x1fe4, -ENODEV, 0x00000020, 0xffffffff, 0), 1344 + SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000060, 0xffffffff, 0), 1345 + SYSC_QUIRK("tpcc", 0, 0, -ENODEV, -ENODEV, 0x40014c00, 0xffffffff, 0), 1390 1346 SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000004, 0xffffffff, 0), 1391 1347 SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000008, 0xffffffff, 0), 1392 1348 SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff, 0), 1393 - SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -1, 0x50700101, 0xffffffff, 0), 1394 - SYSC_QUIRK("vfpe", 0, 0, 0x104, -1, 0x4d001200, 0xffffffff, 0), 1349 + SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff, 0), 1350 + SYSC_QUIRK("venc", 0x58003000, 0, -ENODEV, -ENODEV, 0x00000002, 0xffffffff, 0), 1351 + SYSC_QUIRK("vfpe", 0, 0, 0x104, -ENODEV, 0x4d001200, 0xffffffff, 0), 1395 1352 #endif 1396 1353 }; 1397 1354 ··· 1433 1350 if (q->base != ddata->module_pa) 1434 1351 continue; 1435 1352 1436 - if (q->rev_offset >= 0 && 1437 - q->rev_offset != ddata->offsets[SYSC_REVISION]) 1353 + if (q->rev_offset != ddata->offsets[SYSC_REVISION]) 1438 1354 continue; 1439 1355 1440 - if (q->sysc_offset >= 0 && 1441 - q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG]) 1356 + if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG]) 1442 1357 continue; 1443 1358 1444 - if (q->syss_offset >= 0 && 1445 - q->syss_offset != ddata->offsets[SYSC_SYSSTATUS]) 1359 + if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS]) 1446 1360 continue; 1447 1361 1448 1362 ddata->name = q->name; ··· 1459 1379 if (q->base && q->base != ddata->module_pa) 1460 1380 continue; 1461 1381 1462 - if (q->rev_offset >= 0 && 1463 - q->rev_offset != ddata->offsets[SYSC_REVISION]) 1382 + if (q->rev_offset != ddata->offsets[SYSC_REVISION]) 1464 1383 continue; 1465 1384 1466 - if (q->sysc_offset >= 0 && 1467 - q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG]) 1385 + if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG]) 1468 1386 continue; 1469 1387 1470 - if (q->syss_offset >= 0 && 1471 - q->syss_offset != ddata->offsets[SYSC_SYSSTATUS]) 1388 + if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS]) 1472 1389 continue; 1473 1390 1474 1391 if (q->revision == ddata->revision || ··· 1475 1398 ddata->cfg.quirks |= q->quirks; 1476 1399 } 1477 1400 } 1401 + } 1402 + 1403 + /* 1404 + * DSS needs dispc outputs disabled to reset modules. Returns mask of 1405 + * enabled DSS interrupts. Eventually we may be able to do this on 1406 + * dispc init rather than top-level DSS init. 1407 + */ 1408 + static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset, 1409 + bool disable) 1410 + { 1411 + bool lcd_en, digit_en, lcd2_en = false, lcd3_en = false; 1412 + const int lcd_en_mask = BIT(0), digit_en_mask = BIT(1); 1413 + int manager_count; 1414 + bool framedonetv_irq; 1415 + u32 val, irq_mask = 0; 1416 + 1417 + switch (sysc_soc->soc) { 1418 + case SOC_2420 ... SOC_3630: 1419 + manager_count = 2; 1420 + framedonetv_irq = false; 1421 + break; 1422 + case SOC_4430 ... SOC_4470: 1423 + manager_count = 3; 1424 + break; 1425 + case SOC_5430: 1426 + case SOC_DRA7: 1427 + manager_count = 4; 1428 + break; 1429 + case SOC_AM4: 1430 + manager_count = 1; 1431 + break; 1432 + case SOC_UNKNOWN: 1433 + default: 1434 + return 0; 1435 + }; 1436 + 1437 + /* Remap the whole module range to be able to reset dispc outputs */ 1438 + devm_iounmap(ddata->dev, ddata->module_va); 1439 + ddata->module_va = devm_ioremap(ddata->dev, 1440 + ddata->module_pa, 1441 + ddata->module_size); 1442 + if (!ddata->module_va) 1443 + return -EIO; 1444 + 1445 + /* DISP_CONTROL */ 1446 + val = sysc_read(ddata, dispc_offset + 0x40); 1447 + lcd_en = val & lcd_en_mask; 1448 + digit_en = val & digit_en_mask; 1449 + if (lcd_en) 1450 + irq_mask |= BIT(0); /* FRAMEDONE */ 1451 + if (digit_en) { 1452 + if (framedonetv_irq) 1453 + irq_mask |= BIT(24); /* FRAMEDONETV */ 1454 + else 1455 + irq_mask |= BIT(2) | BIT(3); /* EVSYNC bits */ 1456 + } 1457 + if (disable & (lcd_en | digit_en)) 1458 + sysc_write(ddata, dispc_offset + 0x40, 1459 + val & ~(lcd_en_mask | digit_en_mask)); 1460 + 1461 + if (manager_count <= 2) 1462 + return irq_mask; 1463 + 1464 + /* DISPC_CONTROL2 */ 1465 + val = sysc_read(ddata, dispc_offset + 0x238); 1466 + lcd2_en = val & lcd_en_mask; 1467 + if (lcd2_en) 1468 + irq_mask |= BIT(22); /* FRAMEDONE2 */ 1469 + if (disable && lcd2_en) 1470 + sysc_write(ddata, dispc_offset + 0x238, 1471 + val & ~lcd_en_mask); 1472 + 1473 + if (manager_count <= 3) 1474 + return irq_mask; 1475 + 1476 + /* DISPC_CONTROL3 */ 1477 + val = sysc_read(ddata, dispc_offset + 0x848); 1478 + lcd3_en = val & lcd_en_mask; 1479 + if (lcd3_en) 1480 + irq_mask |= BIT(30); /* FRAMEDONE3 */ 1481 + if (disable && lcd3_en) 1482 + sysc_write(ddata, dispc_offset + 0x848, 1483 + val & ~lcd_en_mask); 1484 + 1485 + return irq_mask; 1486 + } 1487 + 1488 + /* DSS needs child outputs disabled and SDI registers cleared for reset */ 1489 + static void sysc_pre_reset_quirk_dss(struct sysc *ddata) 1490 + { 1491 + const int dispc_offset = 0x1000; 1492 + int error; 1493 + u32 irq_mask, val; 1494 + 1495 + /* Get enabled outputs */ 1496 + irq_mask = sysc_quirk_dispc(ddata, dispc_offset, false); 1497 + if (!irq_mask) 1498 + return; 1499 + 1500 + /* Clear IRQSTATUS */ 1501 + sysc_write(ddata, dispc_offset + 0x18, irq_mask); 1502 + 1503 + /* Disable outputs */ 1504 + val = sysc_quirk_dispc(ddata, dispc_offset, true); 1505 + 1506 + /* Poll IRQSTATUS */ 1507 + error = readl_poll_timeout(ddata->module_va + dispc_offset + 0x18, 1508 + val, val != irq_mask, 100, 50); 1509 + if (error) 1510 + dev_warn(ddata->dev, "%s: timed out %08x !+ %08x\n", 1511 + __func__, val, irq_mask); 1512 + 1513 + if (sysc_soc->soc == SOC_3430) { 1514 + /* Clear DSS_SDI_CONTROL */ 1515 + sysc_write(ddata, 0x44, 0); 1516 + 1517 + /* Clear DSS_PLL_CONTROL */ 1518 + sysc_write(ddata, 0x48, 0); 1519 + } 1520 + 1521 + /* Clear DSS_CONTROL to switch DSS clock sources to PRCM if not */ 1522 + sysc_write(ddata, 0x40, 0); 1478 1523 } 1479 1524 1480 1525 /* 1-wire needs module's internal clocks enabled for reset */ ··· 1618 1419 sysc_write(ddata, offset, 1); 1619 1420 } 1620 1421 1621 - /* I2C needs extra enable bit toggling for reset */ 1422 + /* I2C needs to be disabled for reset */ 1622 1423 static void sysc_clk_quirk_i2c(struct sysc *ddata, bool enable) 1623 1424 { 1624 1425 int offset; ··· 1639 1440 sysc_write(ddata, offset, val); 1640 1441 } 1641 1442 1642 - static void sysc_clk_enable_quirk_i2c(struct sysc *ddata) 1443 + static void sysc_pre_reset_quirk_i2c(struct sysc *ddata) 1444 + { 1445 + sysc_clk_quirk_i2c(ddata, false); 1446 + } 1447 + 1448 + static void sysc_post_reset_quirk_i2c(struct sysc *ddata) 1643 1449 { 1644 1450 sysc_clk_quirk_i2c(ddata, true); 1645 1451 } 1646 1452 1647 - static void sysc_clk_disable_quirk_i2c(struct sysc *ddata) 1453 + /* RTC on am3 and 4 needs to be unlocked and locked for sysconfig */ 1454 + static void sysc_quirk_rtc(struct sysc *ddata, bool lock) 1648 1455 { 1649 - sysc_clk_quirk_i2c(ddata, false); 1456 + u32 val, kick0_val = 0, kick1_val = 0; 1457 + unsigned long flags; 1458 + int error; 1459 + 1460 + if (!lock) { 1461 + kick0_val = 0x83e70b13; 1462 + kick1_val = 0x95a4f1e0; 1463 + } 1464 + 1465 + local_irq_save(flags); 1466 + /* RTC_STATUS BUSY bit may stay active for 1/32768 seconds (~30 usec) */ 1467 + error = readl_poll_timeout(ddata->module_va + 0x44, val, 1468 + !(val & BIT(0)), 100, 50); 1469 + if (error) 1470 + dev_warn(ddata->dev, "rtc busy timeout\n"); 1471 + /* Now we have ~15 microseconds to read/write various registers */ 1472 + sysc_write(ddata, 0x6c, kick0_val); 1473 + sysc_write(ddata, 0x70, kick1_val); 1474 + local_irq_restore(flags); 1475 + } 1476 + 1477 + static void sysc_module_unlock_quirk_rtc(struct sysc *ddata) 1478 + { 1479 + sysc_quirk_rtc(ddata, false); 1480 + } 1481 + 1482 + static void sysc_module_lock_quirk_rtc(struct sysc *ddata) 1483 + { 1484 + sysc_quirk_rtc(ddata, true); 1650 1485 } 1651 1486 1652 1487 /* 36xx SGX needs a quirk for to bypass OCP IPG interrupt logic */ ··· 1716 1483 dev_warn(ddata->dev, "wdt disable step2 failed\n"); 1717 1484 } 1718 1485 1486 + /* PRUSS needs to set MSTANDBY_INIT inorder to idle properly */ 1487 + static void sysc_module_disable_quirk_pruss(struct sysc *ddata) 1488 + { 1489 + u32 reg; 1490 + 1491 + reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]); 1492 + reg |= SYSC_PRUSS_STANDBY_INIT; 1493 + sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); 1494 + } 1495 + 1719 1496 static void sysc_init_module_quirks(struct sysc *ddata) 1720 1497 { 1721 1498 if (ddata->legacy_mode || !ddata->name) 1722 1499 return; 1723 1500 1724 1501 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_HDQ1W) { 1725 - ddata->clk_disable_quirk = sysc_pre_reset_quirk_hdq1w; 1502 + ddata->pre_reset_quirk = sysc_pre_reset_quirk_hdq1w; 1726 1503 1727 1504 return; 1728 1505 } 1729 1506 1730 1507 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_I2C) { 1731 - ddata->clk_enable_quirk = sysc_clk_enable_quirk_i2c; 1732 - ddata->clk_disable_quirk = sysc_clk_disable_quirk_i2c; 1508 + ddata->pre_reset_quirk = sysc_pre_reset_quirk_i2c; 1509 + ddata->post_reset_quirk = sysc_post_reset_quirk_i2c; 1733 1510 1734 1511 return; 1735 1512 } 1736 1513 1737 1514 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_AESS) 1738 1515 ddata->module_enable_quirk = sysc_module_enable_quirk_aess; 1516 + 1517 + if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_DSS_RESET) 1518 + ddata->pre_reset_quirk = sysc_pre_reset_quirk_dss; 1519 + 1520 + if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_RTC_UNLOCK) { 1521 + ddata->module_unlock_quirk = sysc_module_unlock_quirk_rtc; 1522 + ddata->module_lock_quirk = sysc_module_lock_quirk_rtc; 1523 + 1524 + return; 1525 + } 1739 1526 1740 1527 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_SGX) 1741 1528 ddata->module_enable_quirk = sysc_module_enable_quirk_sgx; ··· 1764 1511 ddata->reset_done_quirk = sysc_reset_done_quirk_wdt; 1765 1512 ddata->module_disable_quirk = sysc_reset_done_quirk_wdt; 1766 1513 } 1514 + 1515 + if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_PRUSS) 1516 + ddata->module_disable_quirk = sysc_module_disable_quirk_pruss; 1767 1517 } 1768 1518 1769 1519 static int sysc_clockdomain_init(struct sysc *ddata) ··· 1828 1572 sysc_offset = ddata->offsets[SYSC_SYSCONFIG]; 1829 1573 syss_offset = ddata->offsets[SYSC_SYSSTATUS]; 1830 1574 1831 - if (ddata->legacy_mode || sysc_offset < 0 || 1575 + if (ddata->legacy_mode || 1832 1576 ddata->cap->regbits->srst_shift < 0 || 1833 1577 ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) 1834 1578 return 0; ··· 1840 1584 else 1841 1585 syss_done = ddata->cfg.syss_mask; 1842 1586 1843 - if (ddata->clk_disable_quirk) 1844 - ddata->clk_disable_quirk(ddata); 1587 + if (ddata->pre_reset_quirk) 1588 + ddata->pre_reset_quirk(ddata); 1845 1589 1846 - sysc_val = sysc_read_sysconfig(ddata); 1847 - sysc_val |= sysc_mask; 1848 - sysc_write(ddata, sysc_offset, sysc_val); 1590 + if (sysc_offset >= 0) { 1591 + sysc_val = sysc_read_sysconfig(ddata); 1592 + sysc_val |= sysc_mask; 1593 + sysc_write(ddata, sysc_offset, sysc_val); 1594 + } 1849 1595 1850 1596 if (ddata->cfg.srst_udelay) 1851 1597 usleep_range(ddata->cfg.srst_udelay, 1852 1598 ddata->cfg.srst_udelay * 2); 1853 1599 1854 - if (ddata->clk_enable_quirk) 1855 - ddata->clk_enable_quirk(ddata); 1600 + if (ddata->post_reset_quirk) 1601 + ddata->post_reset_quirk(ddata); 1856 1602 1857 1603 /* Poll on reset status */ 1858 1604 if (syss_offset >= 0) { ··· 2572 2314 .mod_quirks = SYSS_QUIRK_RESETDONE_INVERTED, 2573 2315 }; 2574 2316 2317 + /* 2318 + * PRUSS found on some AM33xx, AM437x and AM57xx SoCs 2319 + */ 2320 + static const struct sysc_capabilities sysc_pruss = { 2321 + .type = TI_SYSC_PRUSS, 2322 + .sysc_mask = SYSC_PRUSS_STANDBY_INIT | SYSC_PRUSS_SUB_MWAIT, 2323 + .regbits = &sysc_regbits_omap4_simple, 2324 + .mod_quirks = SYSC_MODULE_QUIRK_PRUSS, 2325 + }; 2326 + 2575 2327 static int sysc_init_pdata(struct sysc *ddata) 2576 2328 { 2577 2329 struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev); ··· 2655 2387 pm_runtime_put_sync(ddata->dev); 2656 2388 } 2657 2389 2390 + /* 2391 + * SoC model and features detection. Only needed for SoCs that need 2392 + * special handling for quirks, no need to list others. 2393 + */ 2394 + static const struct soc_device_attribute sysc_soc_match[] = { 2395 + SOC_FLAG("OMAP242*", SOC_2420), 2396 + SOC_FLAG("OMAP243*", SOC_2430), 2397 + SOC_FLAG("OMAP3[45]*", SOC_3430), 2398 + SOC_FLAG("OMAP3[67]*", SOC_3630), 2399 + SOC_FLAG("OMAP443*", SOC_4430), 2400 + SOC_FLAG("OMAP446*", SOC_4460), 2401 + SOC_FLAG("OMAP447*", SOC_4470), 2402 + SOC_FLAG("OMAP54*", SOC_5430), 2403 + SOC_FLAG("AM433", SOC_AM3), 2404 + SOC_FLAG("AM43*", SOC_AM4), 2405 + SOC_FLAG("DRA7*", SOC_DRA7), 2406 + 2407 + { /* sentinel */ }, 2408 + }; 2409 + 2410 + /* 2411 + * List of SoCs variants with disabled features. By default we assume all 2412 + * devices in the device tree are available so no need to list those SoCs. 2413 + */ 2414 + static const struct soc_device_attribute sysc_soc_feat_match[] = { 2415 + /* OMAP3430/3530 and AM3517 variants with some accelerators disabled */ 2416 + SOC_FLAG("AM3505", DIS_SGX), 2417 + SOC_FLAG("OMAP3525", DIS_SGX), 2418 + SOC_FLAG("OMAP3515", DIS_IVA | DIS_SGX), 2419 + SOC_FLAG("OMAP3503", DIS_ISP | DIS_IVA | DIS_SGX), 2420 + 2421 + /* OMAP3630/DM3730 variants with some accelerators disabled */ 2422 + SOC_FLAG("AM3703", DIS_IVA | DIS_SGX), 2423 + SOC_FLAG("DM3725", DIS_SGX), 2424 + SOC_FLAG("OMAP3611", DIS_ISP | DIS_IVA | DIS_SGX), 2425 + SOC_FLAG("OMAP3615/AM3715", DIS_IVA), 2426 + SOC_FLAG("OMAP3621", DIS_ISP), 2427 + 2428 + { /* sentinel */ }, 2429 + }; 2430 + 2431 + static int sysc_add_disabled(unsigned long base) 2432 + { 2433 + struct sysc_address *disabled_module; 2434 + 2435 + disabled_module = kzalloc(sizeof(*disabled_module), GFP_KERNEL); 2436 + if (!disabled_module) 2437 + return -ENOMEM; 2438 + 2439 + disabled_module->base = base; 2440 + 2441 + mutex_lock(&sysc_soc->list_lock); 2442 + list_add(&disabled_module->node, &sysc_soc->disabled_modules); 2443 + mutex_unlock(&sysc_soc->list_lock); 2444 + 2445 + return 0; 2446 + } 2447 + 2448 + /* 2449 + * One time init to detect the booted SoC and disable unavailable features. 2450 + * Note that we initialize static data shared across all ti-sysc instances 2451 + * so ddata is only used for SoC type. This can be called from module_init 2452 + * once we no longer need to rely on platform data. 2453 + */ 2454 + static int sysc_init_soc(struct sysc *ddata) 2455 + { 2456 + const struct soc_device_attribute *match; 2457 + struct ti_sysc_platform_data *pdata; 2458 + unsigned long features = 0; 2459 + 2460 + if (sysc_soc) 2461 + return 0; 2462 + 2463 + sysc_soc = kzalloc(sizeof(*sysc_soc), GFP_KERNEL); 2464 + if (!sysc_soc) 2465 + return -ENOMEM; 2466 + 2467 + mutex_init(&sysc_soc->list_lock); 2468 + INIT_LIST_HEAD(&sysc_soc->disabled_modules); 2469 + sysc_soc->general_purpose = true; 2470 + 2471 + pdata = dev_get_platdata(ddata->dev); 2472 + if (pdata && pdata->soc_type_gp) 2473 + sysc_soc->general_purpose = pdata->soc_type_gp(); 2474 + 2475 + match = soc_device_match(sysc_soc_match); 2476 + if (match && match->data) 2477 + sysc_soc->soc = (int)match->data; 2478 + 2479 + match = soc_device_match(sysc_soc_feat_match); 2480 + if (!match) 2481 + return 0; 2482 + 2483 + if (match->data) 2484 + features = (unsigned long)match->data; 2485 + 2486 + /* 2487 + * Add disabled devices to the list based on the module base. 2488 + * Note that this must be done before we attempt to access the 2489 + * device and have module revision checks working. 2490 + */ 2491 + if (features & DIS_ISP) 2492 + sysc_add_disabled(0x480bd400); 2493 + if (features & DIS_IVA) 2494 + sysc_add_disabled(0x5d000000); 2495 + if (features & DIS_SGX) 2496 + sysc_add_disabled(0x50000000); 2497 + 2498 + return 0; 2499 + } 2500 + 2501 + static void sysc_cleanup_soc(void) 2502 + { 2503 + struct sysc_address *disabled_module; 2504 + struct list_head *pos, *tmp; 2505 + 2506 + if (!sysc_soc) 2507 + return; 2508 + 2509 + mutex_lock(&sysc_soc->list_lock); 2510 + list_for_each_safe(pos, tmp, &sysc_soc->disabled_modules) { 2511 + disabled_module = list_entry(pos, struct sysc_address, node); 2512 + list_del(pos); 2513 + kfree(disabled_module); 2514 + } 2515 + mutex_unlock(&sysc_soc->list_lock); 2516 + } 2517 + 2518 + static int sysc_check_disabled_devices(struct sysc *ddata) 2519 + { 2520 + struct sysc_address *disabled_module; 2521 + struct list_head *pos; 2522 + int error = 0; 2523 + 2524 + mutex_lock(&sysc_soc->list_lock); 2525 + list_for_each(pos, &sysc_soc->disabled_modules) { 2526 + disabled_module = list_entry(pos, struct sysc_address, node); 2527 + if (ddata->module_pa == disabled_module->base) { 2528 + dev_dbg(ddata->dev, "module disabled for this SoC\n"); 2529 + error = -ENODEV; 2530 + break; 2531 + } 2532 + } 2533 + mutex_unlock(&sysc_soc->list_lock); 2534 + 2535 + return error; 2536 + } 2537 + 2658 2538 static const struct of_device_id sysc_match_table[] = { 2659 2539 { .compatible = "simple-bus", }, 2660 2540 { /* sentinel */ }, ··· 2820 2404 2821 2405 ddata->dev = &pdev->dev; 2822 2406 platform_set_drvdata(pdev, ddata); 2407 + 2408 + error = sysc_init_soc(ddata); 2409 + if (error) 2410 + return error; 2823 2411 2824 2412 error = sysc_init_match(ddata); 2825 2413 if (error) ··· 2854 2434 return error; 2855 2435 2856 2436 sysc_init_early_quirks(ddata); 2437 + 2438 + error = sysc_check_disabled_devices(ddata); 2439 + if (error) 2440 + return error; 2857 2441 2858 2442 error = sysc_get_clocks(ddata); 2859 2443 if (error) ··· 2963 2539 { .compatible = "ti,sysc-usb-host-fs", 2964 2540 .data = &sysc_omap4_usb_host_fs, }, 2965 2541 { .compatible = "ti,sysc-dra7-mcan", .data = &sysc_dra7_mcan, }, 2542 + { .compatible = "ti,sysc-pruss", .data = &sysc_pruss, }, 2966 2543 { }, 2967 2544 }; 2968 2545 MODULE_DEVICE_TABLE(of, sysc_match); ··· 2990 2565 { 2991 2566 bus_unregister_notifier(&platform_bus_type, &sysc_nb); 2992 2567 platform_driver_unregister(&sysc_driver); 2568 + sysc_cleanup_soc(); 2993 2569 } 2994 2570 module_exit(sysc_exit); 2995 2571
+2 -1
drivers/firmware/arm_scmi/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 - obj-y = scmi-bus.o scmi-driver.o scmi-protocols.o 2 + obj-y = scmi-bus.o scmi-driver.o scmi-protocols.o scmi-transport.o 3 3 scmi-bus-y = bus.o 4 4 scmi-driver-y = driver.o 5 + scmi-transport-y = mailbox.o shmem.o 5 6 scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o 6 7 obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o
+113 -2
drivers/firmware/arm_scmi/common.h
··· 33 33 /** 34 34 * struct scmi_msg_resp_prot_version - Response for a message 35 35 * 36 - * @major_version: Major version of the ABI that firmware supports 37 36 * @minor_version: Minor version of the ABI that firmware supports 37 + * @major_version: Major version of the ABI that firmware supports 38 38 * 39 39 * In general, ABI version changes follow the rule that minor version increments 40 40 * are backward compatible. Major revision changes in ABI may not be ··· 46 46 __le16 minor_version; 47 47 __le16 major_version; 48 48 }; 49 + 50 + #define MSG_ID_MASK GENMASK(7, 0) 51 + #define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr)) 52 + #define MSG_TYPE_MASK GENMASK(9, 8) 53 + #define MSG_XTRACT_TYPE(hdr) FIELD_GET(MSG_TYPE_MASK, (hdr)) 54 + #define MSG_TYPE_COMMAND 0 55 + #define MSG_TYPE_DELAYED_RESP 2 56 + #define MSG_TYPE_NOTIFICATION 3 57 + #define MSG_PROTOCOL_ID_MASK GENMASK(17, 10) 58 + #define MSG_XTRACT_PROT_ID(hdr) FIELD_GET(MSG_PROTOCOL_ID_MASK, (hdr)) 59 + #define MSG_TOKEN_ID_MASK GENMASK(27, 18) 60 + #define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr)) 61 + #define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1) 49 62 50 63 /** 51 64 * struct scmi_msg_hdr - Message(Tx/Rx) header ··· 81 68 }; 82 69 83 70 /** 71 + * pack_scmi_header() - packs and returns 32-bit header 72 + * 73 + * @hdr: pointer to header containing all the information on message id, 74 + * protocol id and sequence id. 75 + * 76 + * Return: 32-bit packed message header to be sent to the platform. 77 + */ 78 + static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr) 79 + { 80 + return FIELD_PREP(MSG_ID_MASK, hdr->id) | 81 + FIELD_PREP(MSG_TOKEN_ID_MASK, hdr->seq) | 82 + FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id); 83 + } 84 + 85 + /** 86 + * unpack_scmi_header() - unpacks and records message and protocol id 87 + * 88 + * @msg_hdr: 32-bit packed message header sent from the platform 89 + * @hdr: pointer to header to fetch message and protocol id. 90 + */ 91 + static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr) 92 + { 93 + hdr->id = MSG_XTRACT_ID(msg_hdr); 94 + hdr->protocol_id = MSG_XTRACT_PROT_ID(msg_hdr); 95 + } 96 + 97 + /** 84 98 * struct scmi_msg - Message(Tx/Rx) structure 85 99 * 86 100 * @buf: Buffer pointer ··· 128 88 * message. If request-ACK protocol is used, we can reuse the same 129 89 * buffer for the rx path as we use for the tx path. 130 90 * @done: command message transmit completion event 131 - * @async: pointer to delayed response message received event completion 91 + * @async_done: pointer to delayed response message received event completion 132 92 */ 133 93 struct scmi_xfer { 134 94 int transfer_id; ··· 153 113 u8 *prot_imp); 154 114 155 115 int scmi_base_protocol_init(struct scmi_handle *h); 116 + 117 + /* SCMI Transport */ 118 + /** 119 + * struct scmi_chan_info - Structure representing a SCMI channel information 120 + * 121 + * @dev: Reference to device in the SCMI hierarchy corresponding to this 122 + * channel 123 + * @handle: Pointer to SCMI entity handle 124 + * @transport_info: Transport layer related information 125 + */ 126 + struct scmi_chan_info { 127 + struct device *dev; 128 + struct scmi_handle *handle; 129 + void *transport_info; 130 + }; 131 + 132 + /** 133 + * struct scmi_transport_ops - Structure representing a SCMI transport ops 134 + * 135 + * @chan_available: Callback to check if channel is available or not 136 + * @chan_setup: Callback to allocate and setup a channel 137 + * @chan_free: Callback to free a channel 138 + * @send_message: Callback to send a message 139 + * @mark_txdone: Callback to mark tx as done 140 + * @fetch_response: Callback to fetch response 141 + * @poll_done: Callback to poll transfer status 142 + */ 143 + struct scmi_transport_ops { 144 + bool (*chan_available)(struct device *dev, int idx); 145 + int (*chan_setup)(struct scmi_chan_info *cinfo, struct device *dev, 146 + bool tx); 147 + int (*chan_free)(int id, void *p, void *data); 148 + int (*send_message)(struct scmi_chan_info *cinfo, 149 + struct scmi_xfer *xfer); 150 + void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret); 151 + void (*fetch_response)(struct scmi_chan_info *cinfo, 152 + struct scmi_xfer *xfer); 153 + bool (*poll_done)(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer); 154 + }; 155 + 156 + /** 157 + * struct scmi_desc - Description of SoC integration 158 + * 159 + * @ops: Pointer to the transport specific ops structure 160 + * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds) 161 + * @max_msg: Maximum number of messages that can be pending 162 + * simultaneously in the system 163 + * @max_msg_size: Maximum size of data per message that can be handled. 164 + */ 165 + struct scmi_desc { 166 + struct scmi_transport_ops *ops; 167 + int max_rx_timeout_ms; 168 + int max_msg; 169 + int max_msg_size; 170 + }; 171 + 172 + extern const struct scmi_desc scmi_mailbox_desc; 173 + 174 + void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr); 175 + void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id); 176 + 177 + /* shmem related declarations */ 178 + struct scmi_shared_mem; 179 + 180 + void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem, 181 + struct scmi_xfer *xfer); 182 + u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem); 183 + void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem, 184 + struct scmi_xfer *xfer); 185 + bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem, 186 + struct scmi_xfer *xfer);
+34 -259
drivers/firmware/arm_scmi/driver.c
··· 19 19 #include <linux/io.h> 20 20 #include <linux/kernel.h> 21 21 #include <linux/ktime.h> 22 - #include <linux/mailbox_client.h> 23 22 #include <linux/module.h> 24 23 #include <linux/of_address.h> 25 24 #include <linux/of_device.h> 26 25 #include <linux/processor.h> 27 - #include <linux/semaphore.h> 28 26 #include <linux/slab.h> 29 27 30 28 #include "common.h" 31 29 32 30 #define CREATE_TRACE_POINTS 33 31 #include <trace/events/scmi.h> 34 - 35 - #define MSG_ID_MASK GENMASK(7, 0) 36 - #define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr)) 37 - #define MSG_TYPE_MASK GENMASK(9, 8) 38 - #define MSG_XTRACT_TYPE(hdr) FIELD_GET(MSG_TYPE_MASK, (hdr)) 39 - #define MSG_TYPE_COMMAND 0 40 - #define MSG_TYPE_DELAYED_RESP 2 41 - #define MSG_TYPE_NOTIFICATION 3 42 - #define MSG_PROTOCOL_ID_MASK GENMASK(17, 10) 43 - #define MSG_XTRACT_PROT_ID(hdr) FIELD_GET(MSG_PROTOCOL_ID_MASK, (hdr)) 44 - #define MSG_TOKEN_ID_MASK GENMASK(27, 18) 45 - #define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr)) 46 - #define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1) 47 32 48 33 enum scmi_error_codes { 49 34 SCMI_SUCCESS = 0, /* Success */ ··· 68 83 }; 69 84 70 85 /** 71 - * struct scmi_desc - Description of SoC integration 72 - * 73 - * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds) 74 - * @max_msg: Maximum number of messages that can be pending 75 - * simultaneously in the system 76 - * @max_msg_size: Maximum size of data per message that can be handled. 77 - */ 78 - struct scmi_desc { 79 - int max_rx_timeout_ms; 80 - int max_msg; 81 - int max_msg_size; 82 - }; 83 - 84 - /** 85 - * struct scmi_chan_info - Structure representing a SCMI channel information 86 - * 87 - * @cl: Mailbox Client 88 - * @chan: Transmit/Receive mailbox channel 89 - * @payload: Transmit/Receive mailbox channel payload area 90 - * @dev: Reference to device in the SCMI hierarchy corresponding to this 91 - * channel 92 - * @handle: Pointer to SCMI entity handle 93 - */ 94 - struct scmi_chan_info { 95 - struct mbox_client cl; 96 - struct mbox_chan *chan; 97 - void __iomem *payload; 98 - struct device *dev; 99 - struct scmi_handle *handle; 100 - }; 101 - 102 - /** 103 86 * struct scmi_info - Structure representing a SCMI instance 104 87 * 105 88 * @dev: Device pointer 106 89 * @desc: SoC description for this instance 107 - * @handle: Instance of SCMI handle to send to clients 108 90 * @version: SCMI revision information containing protocol version, 109 91 * implementation version and (sub-)vendor identification. 92 + * @handle: Instance of SCMI handle to send to clients 110 93 * @tx_minfo: Universal Transmit Message management info 111 94 * @tx_idr: IDR object to map protocol id to Tx channel info pointer 112 95 * @rx_idr: IDR object to map protocol id to Rx channel info pointer ··· 96 143 int users; 97 144 }; 98 145 99 - #define client_to_scmi_chan_info(c) container_of(c, struct scmi_chan_info, cl) 100 146 #define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle) 101 - 102 - /* 103 - * SCMI specification requires all parameters, message headers, return 104 - * arguments or any protocol data to be expressed in little endian 105 - * format only. 106 - */ 107 - struct scmi_shared_mem { 108 - __le32 reserved; 109 - __le32 channel_status; 110 - #define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR BIT(1) 111 - #define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE BIT(0) 112 - __le32 reserved1[2]; 113 - __le32 flags; 114 - #define SCMI_SHMEM_FLAG_INTR_ENABLED BIT(0) 115 - __le32 length; 116 - __le32 msg_header; 117 - u8 msg_payload[0]; 118 - }; 119 147 120 148 static const int scmi_linux_errmap[] = { 121 149 /* better than switch case as long as return value is continuous */ ··· 131 197 { 132 198 dev_dbg(dev, "Message ID: %x Sequence ID: %x Protocol: %x\n", 133 199 hdr->id, hdr->seq, hdr->protocol_id); 134 - } 135 - 136 - static void scmi_fetch_response(struct scmi_xfer *xfer, 137 - struct scmi_shared_mem __iomem *mem) 138 - { 139 - xfer->hdr.status = ioread32(mem->msg_payload); 140 - /* Skip the length of header and status in payload area i.e 8 bytes */ 141 - xfer->rx.len = min_t(size_t, xfer->rx.len, ioread32(&mem->length) - 8); 142 - 143 - /* Take a copy to the rx buffer.. */ 144 - memcpy_fromio(xfer->rx.buf, mem->msg_payload + 4, xfer->rx.len); 145 - } 146 - 147 - /** 148 - * pack_scmi_header() - packs and returns 32-bit header 149 - * 150 - * @hdr: pointer to header containing all the information on message id, 151 - * protocol id and sequence id. 152 - * 153 - * Return: 32-bit packed message header to be sent to the platform. 154 - */ 155 - static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr) 156 - { 157 - return FIELD_PREP(MSG_ID_MASK, hdr->id) | 158 - FIELD_PREP(MSG_TOKEN_ID_MASK, hdr->seq) | 159 - FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id); 160 - } 161 - 162 - /** 163 - * unpack_scmi_header() - unpacks and records message and protocol id 164 - * 165 - * @msg_hdr: 32-bit packed message header sent from the platform 166 - * @hdr: pointer to header to fetch message and protocol id. 167 - */ 168 - static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr) 169 - { 170 - hdr->id = MSG_XTRACT_ID(msg_hdr); 171 - hdr->protocol_id = MSG_XTRACT_PROT_ID(msg_hdr); 172 - } 173 - 174 - /** 175 - * scmi_tx_prepare() - mailbox client callback to prepare for the transfer 176 - * 177 - * @cl: client pointer 178 - * @m: mailbox message 179 - * 180 - * This function prepares the shared memory which contains the header and the 181 - * payload. 182 - */ 183 - static void scmi_tx_prepare(struct mbox_client *cl, void *m) 184 - { 185 - struct scmi_xfer *t = m; 186 - struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl); 187 - struct scmi_shared_mem __iomem *mem = cinfo->payload; 188 - 189 - /* 190 - * Ideally channel must be free by now unless OS timeout last 191 - * request and platform continued to process the same, wait 192 - * until it releases the shared memory, otherwise we may endup 193 - * overwriting its response with new message payload or vice-versa 194 - */ 195 - spin_until_cond(ioread32(&mem->channel_status) & 196 - SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE); 197 - /* Mark channel busy + clear error */ 198 - iowrite32(0x0, &mem->channel_status); 199 - iowrite32(t->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED, 200 - &mem->flags); 201 - iowrite32(sizeof(mem->msg_header) + t->tx.len, &mem->length); 202 - iowrite32(pack_scmi_header(&t->hdr), &mem->msg_header); 203 - if (t->tx.buf) 204 - memcpy_toio(mem->msg_payload, t->tx.buf, t->tx.len); 205 200 } 206 201 207 202 /** ··· 201 338 } 202 339 203 340 /** 204 - * scmi_rx_callback() - mailbox client callback for receive messages 341 + * scmi_rx_callback() - callback for receiving messages 205 342 * 206 - * @cl: client pointer 207 - * @m: mailbox message 343 + * @cinfo: SCMI channel info 344 + * @msg_hdr: Message header 208 345 * 209 346 * Processes one received message to appropriate transfer information and 210 347 * signals completion of the transfer. ··· 212 349 * NOTE: This function will be invoked in IRQ context, hence should be 213 350 * as optimal as possible. 214 351 */ 215 - static void scmi_rx_callback(struct mbox_client *cl, void *m) 352 + void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr) 216 353 { 217 - u8 msg_type; 218 - u32 msg_hdr; 219 - u16 xfer_id; 220 - struct scmi_xfer *xfer; 221 - struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl); 222 - struct device *dev = cinfo->dev; 223 354 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); 224 355 struct scmi_xfers_info *minfo = &info->tx_minfo; 225 - struct scmi_shared_mem __iomem *mem = cinfo->payload; 226 - 227 - msg_hdr = ioread32(&mem->msg_header); 228 - msg_type = MSG_XTRACT_TYPE(msg_hdr); 229 - xfer_id = MSG_XTRACT_TOKEN(msg_hdr); 356 + u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr); 357 + u8 msg_type = MSG_XTRACT_TYPE(msg_hdr); 358 + struct device *dev = cinfo->dev; 359 + struct scmi_xfer *xfer; 230 360 231 361 if (msg_type == MSG_TYPE_NOTIFICATION) 232 362 return; /* Notifications not yet supported */ ··· 234 378 235 379 scmi_dump_header_dbg(dev, &xfer->hdr); 236 380 237 - scmi_fetch_response(xfer, mem); 381 + info->desc->ops->fetch_response(cinfo, xfer); 238 382 239 383 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id, 240 384 xfer->hdr.protocol_id, xfer->hdr.seq, ··· 259 403 __scmi_xfer_put(&info->tx_minfo, xfer); 260 404 } 261 405 262 - static bool 263 - scmi_xfer_poll_done(const struct scmi_chan_info *cinfo, struct scmi_xfer *xfer) 264 - { 265 - struct scmi_shared_mem __iomem *mem = cinfo->payload; 266 - u16 xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header)); 267 - 268 - if (xfer->hdr.seq != xfer_id) 269 - return false; 270 - 271 - return ioread32(&mem->channel_status) & 272 - (SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR | 273 - SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE); 274 - } 275 - 276 406 #define SCMI_MAX_POLL_TO_NS (100 * NSEC_PER_USEC) 277 407 278 - static bool scmi_xfer_done_no_timeout(const struct scmi_chan_info *cinfo, 408 + static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo, 279 409 struct scmi_xfer *xfer, ktime_t stop) 280 410 { 281 - ktime_t __cur = ktime_get(); 411 + struct scmi_info *info = handle_to_scmi_info(cinfo->handle); 282 412 283 - return scmi_xfer_poll_done(cinfo, xfer) || ktime_after(__cur, stop); 413 + return info->desc->ops->poll_done(cinfo, xfer) || 414 + ktime_after(ktime_get(), stop); 284 415 } 285 416 286 417 /** ··· 296 453 xfer->hdr.protocol_id, xfer->hdr.seq, 297 454 xfer->hdr.poll_completion); 298 455 299 - ret = mbox_send_message(cinfo->chan, xfer); 456 + ret = info->desc->ops->send_message(cinfo, xfer); 300 457 if (ret < 0) { 301 - dev_dbg(dev, "mbox send fail %d\n", ret); 458 + dev_dbg(dev, "Failed to send message %d\n", ret); 302 459 return ret; 303 460 } 304 - 305 - /* mbox_send_message returns non-negative value on success, so reset */ 306 - ret = 0; 307 461 308 462 if (xfer->hdr.poll_completion) { 309 463 ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS); ··· 308 468 spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop)); 309 469 310 470 if (ktime_before(ktime_get(), stop)) 311 - scmi_fetch_response(xfer, cinfo->payload); 471 + info->desc->ops->fetch_response(cinfo, xfer); 312 472 else 313 473 ret = -ETIMEDOUT; 314 474 } else { 315 475 /* And we wait for the response. */ 316 476 timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms); 317 477 if (!wait_for_completion_timeout(&xfer->done, timeout)) { 318 - dev_err(dev, "mbox timed out in resp(caller: %pS)\n", 478 + dev_err(dev, "timed out in resp(caller: %pS)\n", 319 479 (void *)_RET_IP_); 320 480 ret = -ETIMEDOUT; 321 481 } ··· 324 484 if (!ret && xfer->hdr.status) 325 485 ret = scmi_to_linux_errno(xfer->hdr.status); 326 486 327 - /* 328 - * NOTE: we might prefer not to need the mailbox ticker to manage the 329 - * transfer queueing since the protocol layer queues things by itself. 330 - * Unfortunately, we have to kick the mailbox framework after we have 331 - * received our message. 332 - */ 333 - mbox_client_txdone(cinfo->chan, ret); 487 + if (info->desc->ops->mark_txdone) 488 + info->desc->ops->mark_txdone(cinfo, ret); 334 489 335 490 trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id, 336 491 xfer->hdr.protocol_id, xfer->hdr.seq, ··· 566 731 return 0; 567 732 } 568 733 569 - static int scmi_mailbox_check(struct device_node *np, int idx) 570 - { 571 - return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells", 572 - idx, NULL); 573 - } 574 - 575 - static int scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev, 576 - int prot_id, bool tx) 734 + static int scmi_chan_setup(struct scmi_info *info, struct device *dev, 735 + int prot_id, bool tx) 577 736 { 578 737 int ret, idx; 579 - struct resource res; 580 - resource_size_t size; 581 - struct device_node *shmem, *np = dev->of_node; 582 738 struct scmi_chan_info *cinfo; 583 - struct mbox_client *cl; 584 739 struct idr *idr; 585 - const char *desc = tx ? "Tx" : "Rx"; 586 740 587 741 /* Transmit channel is first entry i.e. index 0 */ 588 742 idx = tx ? 0 : 1; ··· 582 758 if (cinfo) 583 759 return 0; 584 760 585 - if (scmi_mailbox_check(np, idx)) { 761 + if (!info->desc->ops->chan_available(dev, idx)) { 586 762 cinfo = idr_find(idr, SCMI_PROTOCOL_BASE); 587 763 if (unlikely(!cinfo)) /* Possible only if platform has no Rx */ 588 764 return -EINVAL; ··· 595 771 596 772 cinfo->dev = dev; 597 773 598 - cl = &cinfo->cl; 599 - cl->dev = dev; 600 - cl->rx_callback = scmi_rx_callback; 601 - cl->tx_prepare = tx ? scmi_tx_prepare : NULL; 602 - cl->tx_block = false; 603 - cl->knows_txdone = tx; 604 - 605 - shmem = of_parse_phandle(np, "shmem", idx); 606 - ret = of_address_to_resource(shmem, 0, &res); 607 - of_node_put(shmem); 608 - if (ret) { 609 - dev_err(dev, "failed to get SCMI %s payload memory\n", desc); 774 + ret = info->desc->ops->chan_setup(cinfo, info->dev, tx); 775 + if (ret) 610 776 return ret; 611 - } 612 - 613 - size = resource_size(&res); 614 - cinfo->payload = devm_ioremap(info->dev, res.start, size); 615 - if (!cinfo->payload) { 616 - dev_err(dev, "failed to ioremap SCMI %s payload\n", desc); 617 - return -EADDRNOTAVAIL; 618 - } 619 - 620 - cinfo->chan = mbox_request_channel(cl, idx); 621 - if (IS_ERR(cinfo->chan)) { 622 - ret = PTR_ERR(cinfo->chan); 623 - if (ret != -EPROBE_DEFER) 624 - dev_err(dev, "failed to request SCMI %s mailbox\n", 625 - desc); 626 - return ret; 627 - } 628 777 629 778 idr_alloc: 630 779 ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL); ··· 611 814 } 612 815 613 816 static inline int 614 - scmi_mbox_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id) 817 + scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id) 615 818 { 616 - int ret = scmi_mbox_chan_setup(info, dev, prot_id, true); 819 + int ret = scmi_chan_setup(info, dev, prot_id, true); 617 820 618 821 if (!ret) /* Rx is optional, hence no error check */ 619 - scmi_mbox_chan_setup(info, dev, prot_id, false); 822 + scmi_chan_setup(info, dev, prot_id, false); 620 823 621 824 return ret; 622 825 } ··· 634 837 return; 635 838 } 636 839 637 - if (scmi_mbox_txrx_setup(info, &sdev->dev, prot_id)) { 840 + if (scmi_txrx_setup(info, &sdev->dev, prot_id)) { 638 841 dev_err(&sdev->dev, "failed to setup transport\n"); 639 842 scmi_device_destroy(sdev); 640 843 return; ··· 687 890 struct device *dev = &pdev->dev; 688 891 struct device_node *child, *np = dev->of_node; 689 892 690 - /* Only mailbox method supported, check for the presence of one */ 691 - if (scmi_mailbox_check(np, 0)) { 692 - dev_err(dev, "no mailbox found in %pOF\n", np); 693 - return -EINVAL; 694 - } 695 - 696 893 desc = of_device_get_match_data(dev); 697 894 if (!desc) 698 895 return -EINVAL; ··· 711 920 handle->dev = info->dev; 712 921 handle->version = &info->version; 713 922 714 - ret = scmi_mbox_txrx_setup(info, dev, SCMI_PROTOCOL_BASE); 923 + ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE); 715 924 if (ret) 716 925 return ret; 717 926 ··· 746 955 return 0; 747 956 } 748 957 749 - static int scmi_mbox_free_channel(int id, void *p, void *data) 958 + void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id) 750 959 { 751 - struct scmi_chan_info *cinfo = p; 752 - struct idr *idr = data; 753 - 754 - if (!IS_ERR_OR_NULL(cinfo->chan)) { 755 - mbox_free_channel(cinfo->chan); 756 - cinfo->chan = NULL; 757 - } 758 - 759 960 idr_remove(idr, id); 760 - 761 - return 0; 762 961 } 763 962 764 963 static int scmi_remove(struct platform_device *pdev) ··· 768 987 return ret; 769 988 770 989 /* Safe to free channels since no more users */ 771 - ret = idr_for_each(idr, scmi_mbox_free_channel, idr); 990 + ret = idr_for_each(idr, info->desc->ops->chan_free, idr); 772 991 idr_destroy(&info->tx_idr); 773 992 774 993 idr = &info->rx_idr; 775 - ret = idr_for_each(idr, scmi_mbox_free_channel, idr); 994 + ret = idr_for_each(idr, info->desc->ops->chan_free, idr); 776 995 idr_destroy(&info->rx_idr); 777 996 778 997 return ret; ··· 824 1043 }; 825 1044 ATTRIBUTE_GROUPS(versions); 826 1045 827 - static const struct scmi_desc scmi_generic_desc = { 828 - .max_rx_timeout_ms = 30, /* We may increase this if required */ 829 - .max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */ 830 - .max_msg_size = 128, 831 - }; 832 - 833 1046 /* Each compatible listed below must have descriptor associated with it */ 834 1047 static const struct of_device_id scmi_of_match[] = { 835 - { .compatible = "arm,scmi", .data = &scmi_generic_desc }, 1048 + { .compatible = "arm,scmi", .data = &scmi_mailbox_desc }, 836 1049 { /* Sentinel */ }, 837 1050 }; 838 1051
+184
drivers/firmware/arm_scmi/mailbox.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * System Control and Management Interface (SCMI) Message Mailbox Transport 4 + * driver. 5 + * 6 + * Copyright (C) 2019 ARM Ltd. 7 + */ 8 + 9 + #include <linux/err.h> 10 + #include <linux/device.h> 11 + #include <linux/mailbox_client.h> 12 + #include <linux/of.h> 13 + #include <linux/of_address.h> 14 + #include <linux/slab.h> 15 + 16 + #include "common.h" 17 + 18 + /** 19 + * struct scmi_mailbox - Structure representing a SCMI mailbox transport 20 + * 21 + * @cl: Mailbox Client 22 + * @chan: Transmit/Receive mailbox channel 23 + * @cinfo: SCMI channel info 24 + * @shmem: Transmit/Receive shared memory area 25 + */ 26 + struct scmi_mailbox { 27 + struct mbox_client cl; 28 + struct mbox_chan *chan; 29 + struct scmi_chan_info *cinfo; 30 + struct scmi_shared_mem __iomem *shmem; 31 + }; 32 + 33 + #define client_to_scmi_mailbox(c) container_of(c, struct scmi_mailbox, cl) 34 + 35 + static void tx_prepare(struct mbox_client *cl, void *m) 36 + { 37 + struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl); 38 + 39 + shmem_tx_prepare(smbox->shmem, m); 40 + } 41 + 42 + static void rx_callback(struct mbox_client *cl, void *m) 43 + { 44 + struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl); 45 + 46 + scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem)); 47 + } 48 + 49 + static bool mailbox_chan_available(struct device *dev, int idx) 50 + { 51 + return !of_parse_phandle_with_args(dev->of_node, "mboxes", 52 + "#mbox-cells", idx, NULL); 53 + } 54 + 55 + static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, 56 + bool tx) 57 + { 58 + const char *desc = tx ? "Tx" : "Rx"; 59 + struct device *cdev = cinfo->dev; 60 + struct scmi_mailbox *smbox; 61 + struct device_node *shmem; 62 + int ret, idx = tx ? 0 : 1; 63 + struct mbox_client *cl; 64 + resource_size_t size; 65 + struct resource res; 66 + 67 + smbox = devm_kzalloc(dev, sizeof(*smbox), GFP_KERNEL); 68 + if (!smbox) 69 + return -ENOMEM; 70 + 71 + shmem = of_parse_phandle(cdev->of_node, "shmem", idx); 72 + ret = of_address_to_resource(shmem, 0, &res); 73 + of_node_put(shmem); 74 + if (ret) { 75 + dev_err(cdev, "failed to get SCMI %s shared memory\n", desc); 76 + return ret; 77 + } 78 + 79 + size = resource_size(&res); 80 + smbox->shmem = devm_ioremap(dev, res.start, size); 81 + if (!smbox->shmem) { 82 + dev_err(dev, "failed to ioremap SCMI %s shared memory\n", desc); 83 + return -EADDRNOTAVAIL; 84 + } 85 + 86 + cl = &smbox->cl; 87 + cl->dev = cdev; 88 + cl->tx_prepare = tx ? tx_prepare : NULL; 89 + cl->rx_callback = rx_callback; 90 + cl->tx_block = false; 91 + cl->knows_txdone = tx; 92 + 93 + smbox->chan = mbox_request_channel(cl, tx ? 0 : 1); 94 + if (IS_ERR(smbox->chan)) { 95 + ret = PTR_ERR(smbox->chan); 96 + if (ret != -EPROBE_DEFER) 97 + dev_err(cdev, "failed to request SCMI %s mailbox\n", 98 + tx ? "Tx" : "Rx"); 99 + return ret; 100 + } 101 + 102 + cinfo->transport_info = smbox; 103 + smbox->cinfo = cinfo; 104 + 105 + return 0; 106 + } 107 + 108 + static int mailbox_chan_free(int id, void *p, void *data) 109 + { 110 + struct scmi_chan_info *cinfo = p; 111 + struct scmi_mailbox *smbox = cinfo->transport_info; 112 + 113 + if (!IS_ERR(smbox->chan)) { 114 + mbox_free_channel(smbox->chan); 115 + cinfo->transport_info = NULL; 116 + smbox->chan = NULL; 117 + smbox->cinfo = NULL; 118 + } 119 + 120 + scmi_free_channel(cinfo, data, id); 121 + 122 + return 0; 123 + } 124 + 125 + static int mailbox_send_message(struct scmi_chan_info *cinfo, 126 + struct scmi_xfer *xfer) 127 + { 128 + struct scmi_mailbox *smbox = cinfo->transport_info; 129 + int ret; 130 + 131 + ret = mbox_send_message(smbox->chan, xfer); 132 + 133 + /* mbox_send_message returns non-negative value on success, so reset */ 134 + if (ret > 0) 135 + ret = 0; 136 + 137 + return ret; 138 + } 139 + 140 + static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret) 141 + { 142 + struct scmi_mailbox *smbox = cinfo->transport_info; 143 + 144 + /* 145 + * NOTE: we might prefer not to need the mailbox ticker to manage the 146 + * transfer queueing since the protocol layer queues things by itself. 147 + * Unfortunately, we have to kick the mailbox framework after we have 148 + * received our message. 149 + */ 150 + mbox_client_txdone(smbox->chan, ret); 151 + } 152 + 153 + static void mailbox_fetch_response(struct scmi_chan_info *cinfo, 154 + struct scmi_xfer *xfer) 155 + { 156 + struct scmi_mailbox *smbox = cinfo->transport_info; 157 + 158 + shmem_fetch_response(smbox->shmem, xfer); 159 + } 160 + 161 + static bool 162 + mailbox_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer) 163 + { 164 + struct scmi_mailbox *smbox = cinfo->transport_info; 165 + 166 + return shmem_poll_done(smbox->shmem, xfer); 167 + } 168 + 169 + static struct scmi_transport_ops scmi_mailbox_ops = { 170 + .chan_available = mailbox_chan_available, 171 + .chan_setup = mailbox_chan_setup, 172 + .chan_free = mailbox_chan_free, 173 + .send_message = mailbox_send_message, 174 + .mark_txdone = mailbox_mark_txdone, 175 + .fetch_response = mailbox_fetch_response, 176 + .poll_done = mailbox_poll_done, 177 + }; 178 + 179 + const struct scmi_desc scmi_mailbox_desc = { 180 + .ops = &scmi_mailbox_ops, 181 + .max_rx_timeout_ms = 30, /* We may increase this if required */ 182 + .max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */ 183 + .max_msg_size = 128, 184 + };
+1 -1
drivers/firmware/arm_scmi/perf.c
··· 89 89 __le32 power; 90 90 __le16 transition_latency_us; 91 91 __le16 reserved; 92 - } opp[0]; 92 + } opp[]; 93 93 }; 94 94 95 95 struct scmi_perf_get_fc_info {
+83
drivers/firmware/arm_scmi/shmem.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * For transport using shared mem structure. 4 + * 5 + * Copyright (C) 2019 ARM Ltd. 6 + */ 7 + 8 + #include <linux/io.h> 9 + #include <linux/processor.h> 10 + #include <linux/types.h> 11 + 12 + #include "common.h" 13 + 14 + /* 15 + * SCMI specification requires all parameters, message headers, return 16 + * arguments or any protocol data to be expressed in little endian 17 + * format only. 18 + */ 19 + struct scmi_shared_mem { 20 + __le32 reserved; 21 + __le32 channel_status; 22 + #define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR BIT(1) 23 + #define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE BIT(0) 24 + __le32 reserved1[2]; 25 + __le32 flags; 26 + #define SCMI_SHMEM_FLAG_INTR_ENABLED BIT(0) 27 + __le32 length; 28 + __le32 msg_header; 29 + u8 msg_payload[]; 30 + }; 31 + 32 + void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem, 33 + struct scmi_xfer *xfer) 34 + { 35 + /* 36 + * Ideally channel must be free by now unless OS timeout last 37 + * request and platform continued to process the same, wait 38 + * until it releases the shared memory, otherwise we may endup 39 + * overwriting its response with new message payload or vice-versa 40 + */ 41 + spin_until_cond(ioread32(&shmem->channel_status) & 42 + SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE); 43 + /* Mark channel busy + clear error */ 44 + iowrite32(0x0, &shmem->channel_status); 45 + iowrite32(xfer->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED, 46 + &shmem->flags); 47 + iowrite32(sizeof(shmem->msg_header) + xfer->tx.len, &shmem->length); 48 + iowrite32(pack_scmi_header(&xfer->hdr), &shmem->msg_header); 49 + if (xfer->tx.buf) 50 + memcpy_toio(shmem->msg_payload, xfer->tx.buf, xfer->tx.len); 51 + } 52 + 53 + u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem) 54 + { 55 + return ioread32(&shmem->msg_header); 56 + } 57 + 58 + void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem, 59 + struct scmi_xfer *xfer) 60 + { 61 + xfer->hdr.status = ioread32(shmem->msg_payload); 62 + /* Skip the length of header and status in shmem area i.e 8 bytes */ 63 + xfer->rx.len = min_t(size_t, xfer->rx.len, 64 + ioread32(&shmem->length) - 8); 65 + 66 + /* Take a copy to the rx buffer.. */ 67 + memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len); 68 + } 69 + 70 + bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem, 71 + struct scmi_xfer *xfer) 72 + { 73 + u16 xfer_id; 74 + 75 + xfer_id = MSG_XTRACT_TOKEN(ioread32(&shmem->msg_header)); 76 + 77 + if (xfer->hdr.seq != xfer_id) 78 + return false; 79 + 80 + return ioread32(&shmem->channel_status) & 81 + (SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR | 82 + SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE); 83 + }
+2 -2
drivers/firmware/arm_scpi.c
··· 262 262 struct scpi_shared_mem { 263 263 __le32 command; 264 264 __le32 status; 265 - u8 payload[0]; 265 + u8 payload[]; 266 266 } __packed; 267 267 268 268 struct legacy_scpi_shared_mem { 269 269 __le32 status; 270 - u8 payload[0]; 270 + u8 payload[]; 271 271 } __packed; 272 272 273 273 struct scp_capabilities {
+12 -1
drivers/firmware/imx/scu-pd.c
··· 93 93 { "kpp", IMX_SC_R_KPP, 1, false, 0 }, 94 94 { "fspi", IMX_SC_R_FSPI_0, 2, true, 0 }, 95 95 { "mu_a", IMX_SC_R_MU_0A, 14, true, 0 }, 96 - { "mu_b", IMX_SC_R_MU_13B, 1, true, 13 }, 96 + { "mu_b", IMX_SC_R_MU_5B, 9, true, 5 }, 97 97 98 98 /* CONN SS */ 99 99 { "usb", IMX_SC_R_USB_0, 2, true, 0 }, ··· 109 109 { "audio-pll0", IMX_SC_R_AUDIO_PLL_0, 1, false, 0 }, 110 110 { "audio-pll1", IMX_SC_R_AUDIO_PLL_1, 1, false, 0 }, 111 111 { "audio-clk-0", IMX_SC_R_AUDIO_CLK_0, 1, false, 0 }, 112 + { "audio-clk-1", IMX_SC_R_AUDIO_CLK_1, 1, false, 0 }, 112 113 { "dma0-ch", IMX_SC_R_DMA_0_CH0, 16, true, 0 }, 113 114 { "dma1-ch", IMX_SC_R_DMA_1_CH0, 16, true, 0 }, 114 115 { "dma2-ch", IMX_SC_R_DMA_2_CH0, 5, true, 0 }, ··· 117 116 { "asrc1", IMX_SC_R_ASRC_1, 1, false, 0 }, 118 117 { "esai0", IMX_SC_R_ESAI_0, 1, false, 0 }, 119 118 { "spdif0", IMX_SC_R_SPDIF_0, 1, false, 0 }, 119 + { "spdif1", IMX_SC_R_SPDIF_1, 1, false, 0 }, 120 120 { "sai", IMX_SC_R_SAI_0, 3, true, 0 }, 121 + { "sai3", IMX_SC_R_SAI_3, 1, false, 0 }, 122 + { "sai4", IMX_SC_R_SAI_4, 1, false, 0 }, 123 + { "sai5", IMX_SC_R_SAI_5, 1, false, 0 }, 124 + { "sai6", IMX_SC_R_SAI_6, 1, false, 0 }, 125 + { "sai7", IMX_SC_R_SAI_7, 1, false, 0 }, 121 126 { "amix", IMX_SC_R_AMIX, 1, false, 0 }, 122 127 { "mqs0", IMX_SC_R_MQS_0, 1, false, 0 }, 123 128 { "dsp", IMX_SC_R_DSP, 1, false, 0 }, ··· 165 158 /* DC SS */ 166 159 { "dc0", IMX_SC_R_DC_0, 1, false, 0 }, 167 160 { "dc0-pll", IMX_SC_R_DC_0_PLL_0, 2, true, 0 }, 161 + 162 + /* CM40 SS */ 163 + { "cm40_i2c", IMX_SC_R_M4_0_I2C, 1, 0 }, 164 + { "cm40_intmux", IMX_SC_R_M4_0_INTMUX, 1, 0 }, 168 165 }; 169 166 170 167 static const struct imx_sc_pd_soc imx8qxp_scu_pd = {
+2
drivers/firmware/meson/meson_sm.c
··· 44 44 CMD(SM_EFUSE_WRITE, 0x82000031), 45 45 CMD(SM_EFUSE_USER_MAX, 0x82000033), 46 46 CMD(SM_GET_CHIP_ID, 0x82000044), 47 + CMD(SM_A1_PWRC_SET, 0x82000093), 48 + CMD(SM_A1_PWRC_GET, 0x82000095), 47 49 { /* sentinel */ }, 48 50 }, 49 51 };
+1 -1
drivers/firmware/tegra/Kconfig
··· 7 7 help 8 8 IVC (Inter-VM Communication) protocol is part of the IPC 9 9 (Inter Processor Communication) framework on Tegra. It maintains the 10 - data and the different commuication channels in SysRAM or RAM and 10 + data and the different communication channels in SysRAM or RAM and 11 11 keeps the content is synchronization between host CPU and remote 12 12 processors. 13 13
+22 -3
drivers/gpu/drm/omapdrm/dss/dss.c
··· 1348 1348 return dev == child; 1349 1349 } 1350 1350 1351 + struct dss_component_match_data { 1352 + struct device *dev; 1353 + struct component_match **match; 1354 + }; 1355 + 1351 1356 static int dss_add_child_component(struct device *dev, void *data) 1352 1357 { 1353 - struct component_match **match = data; 1358 + struct dss_component_match_data *cmatch = data; 1359 + struct component_match **match = cmatch->match; 1354 1360 1355 1361 /* 1356 1362 * HACK ··· 1367 1361 if (strstr(dev_name(dev), "rfbi")) 1368 1362 return 0; 1369 1363 1370 - component_match_add(dev->parent, match, dss_component_compare, dev); 1364 + /* 1365 + * Handle possible interconnect target modules defined within the DSS. 1366 + * The DSS components can be children of an interconnect target module 1367 + * after the device tree has been updated for the module data. 1368 + * See also omapdss_boot_init() for compatible fixup. 1369 + */ 1370 + if (strstr(dev_name(dev), "target-module")) 1371 + return device_for_each_child(dev, cmatch, 1372 + dss_add_child_component); 1373 + 1374 + component_match_add(cmatch->dev, match, dss_component_compare, dev); 1371 1375 1372 1376 return 0; 1373 1377 } ··· 1420 1404 static int dss_probe(struct platform_device *pdev) 1421 1405 { 1422 1406 const struct soc_device_attribute *soc; 1407 + struct dss_component_match_data cmatch; 1423 1408 struct component_match *match = NULL; 1424 1409 struct resource *dss_mem; 1425 1410 struct dss_device *dss; ··· 1498 1481 1499 1482 omapdss_gather_components(&pdev->dev); 1500 1483 1501 - device_for_each_child(&pdev->dev, &match, dss_add_child_component); 1484 + cmatch.dev = &pdev->dev; 1485 + cmatch.match = &match; 1486 + device_for_each_child(&pdev->dev, &cmatch, dss_add_child_component); 1502 1487 1503 1488 r = component_master_add_with_match(&pdev->dev, &dss_component_ops, match); 1504 1489 if (r)
+17 -8
drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
··· 178 178 {}, 179 179 }; 180 180 181 + static void __init omapdss_find_children(struct device_node *np) 182 + { 183 + struct device_node *child; 184 + 185 + for_each_available_child_of_node(np, child) { 186 + if (!of_find_property(child, "compatible", NULL)) 187 + continue; 188 + 189 + omapdss_walk_device(child, true); 190 + 191 + if (of_device_is_compatible(child, "ti,sysc")) 192 + omapdss_find_children(child); 193 + } 194 + } 195 + 181 196 static int __init omapdss_boot_init(void) 182 197 { 183 - struct device_node *dss, *child; 198 + struct device_node *dss; 184 199 185 200 INIT_LIST_HEAD(&dss_conv_list); 186 201 ··· 205 190 goto put_node; 206 191 207 192 omapdss_walk_device(dss, true); 208 - 209 - for_each_available_child_of_node(dss, child) { 210 - if (!of_find_property(child, "compatible", NULL)) 211 - continue; 212 - 213 - omapdss_walk_device(child, true); 214 - } 193 + omapdss_find_children(dss); 215 194 216 195 while (!list_empty(&dss_conv_list)) { 217 196 struct dss_conv_node *n;
+5
drivers/memory/tegra/tegra124-emc.c
··· 1158 1158 emc->debugfs.max_rate = emc->timings[i].rate; 1159 1159 } 1160 1160 1161 + if (!emc->num_timings) { 1162 + emc->debugfs.min_rate = clk_get_rate(emc->clk); 1163 + emc->debugfs.max_rate = emc->debugfs.min_rate; 1164 + } 1165 + 1161 1166 err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate, 1162 1167 emc->debugfs.max_rate); 1163 1168 if (err < 0) {
+5
drivers/memory/tegra/tegra20-emc.c
··· 628 628 emc->debugfs.max_rate = emc->timings[i].rate; 629 629 } 630 630 631 + if (!emc->num_timings) { 632 + emc->debugfs.min_rate = clk_get_rate(emc->clk); 633 + emc->debugfs.max_rate = emc->debugfs.min_rate; 634 + } 635 + 631 636 err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate, 632 637 emc->debugfs.max_rate); 633 638 if (err < 0) {
+5
drivers/memory/tegra/tegra30-emc.c
··· 1256 1256 emc->debugfs.max_rate = emc->timings[i].rate; 1257 1257 } 1258 1258 1259 + if (!emc->num_timings) { 1260 + emc->debugfs.min_rate = clk_get_rate(emc->clk); 1261 + emc->debugfs.max_rate = emc->debugfs.min_rate; 1262 + } 1263 + 1259 1264 err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate, 1260 1265 emc->debugfs.max_rate); 1261 1266 if (err < 0) {
+1 -1
drivers/soc/Makefile
··· 11 11 obj-$(CONFIG_MACH_DOVE) += dove/ 12 12 obj-y += fsl/ 13 13 obj-$(CONFIG_ARCH_GEMINI) += gemini/ 14 - obj-$(CONFIG_ARCH_MXC) += imx/ 14 + obj-y += imx/ 15 15 obj-$(CONFIG_ARCH_IXP4XX) += ixp4xx/ 16 16 obj-$(CONFIG_SOC_XWAY) += lantiq/ 17 17 obj-y += mediatek/
+13
drivers/soc/amlogic/Kconfig
··· 48 48 Say yes to expose Amlogic Meson Everything-Else Power Domains as 49 49 Generic Power Domains. 50 50 51 + config MESON_SECURE_PM_DOMAINS 52 + bool "Amlogic Meson Secure Power Domains driver" 53 + depends on (ARCH_MESON || COMPILE_TEST) && MESON_SM 54 + depends on PM && OF 55 + depends on HAVE_ARM_SMCCC 56 + default ARCH_MESON 57 + select PM_GENERIC_DOMAINS 58 + select PM_GENERIC_DOMAINS_OF 59 + help 60 + Support for the power controller on Amlogic A1/C1 series. 61 + Say yes to expose Amlogic Meson Secure Power Domains as Generic 62 + Power Domains. 63 + 51 64 config MESON_MX_SOCINFO 52 65 bool "Amlogic Meson MX SoC Information driver" 53 66 depends on ARCH_MESON || COMPILE_TEST
+1
drivers/soc/amlogic/Makefile
··· 5 5 obj-$(CONFIG_MESON_GX_PM_DOMAINS) += meson-gx-pwrc-vpu.o 6 6 obj-$(CONFIG_MESON_MX_SOCINFO) += meson-mx-socinfo.o 7 7 obj-$(CONFIG_MESON_EE_PM_DOMAINS) += meson-ee-pwrc.o 8 + obj-$(CONFIG_MESON_SECURE_PM_DOMAINS) += meson-secure-pwrc.o
+204
drivers/soc/amlogic/meson-secure-pwrc.c
··· 1 + // SPDX-License-Identifier: (GPL-2.0+ OR MIT) 2 + /* 3 + * Copyright (c) 2019 Amlogic, Inc. 4 + * Author: Jianxin Pan <jianxin.pan@amlogic.com> 5 + */ 6 + 7 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 + 9 + #include <linux/io.h> 10 + #include <linux/of_device.h> 11 + #include <linux/platform_device.h> 12 + #include <linux/pm_domain.h> 13 + #include <dt-bindings/power/meson-a1-power.h> 14 + #include <linux/arm-smccc.h> 15 + #include <linux/firmware/meson/meson_sm.h> 16 + 17 + #define PWRC_ON 1 18 + #define PWRC_OFF 0 19 + 20 + struct meson_secure_pwrc_domain { 21 + struct generic_pm_domain base; 22 + unsigned int index; 23 + struct meson_secure_pwrc *pwrc; 24 + }; 25 + 26 + struct meson_secure_pwrc { 27 + struct meson_secure_pwrc_domain *domains; 28 + struct genpd_onecell_data xlate; 29 + struct meson_sm_firmware *fw; 30 + }; 31 + 32 + struct meson_secure_pwrc_domain_desc { 33 + unsigned int index; 34 + unsigned int flags; 35 + char *name; 36 + bool (*is_off)(struct meson_secure_pwrc_domain *pwrc_domain); 37 + }; 38 + 39 + struct meson_secure_pwrc_domain_data { 40 + unsigned int count; 41 + struct meson_secure_pwrc_domain_desc *domains; 42 + }; 43 + 44 + static bool pwrc_secure_is_off(struct meson_secure_pwrc_domain *pwrc_domain) 45 + { 46 + int is_off = 1; 47 + 48 + if (meson_sm_call(pwrc_domain->pwrc->fw, SM_A1_PWRC_GET, &is_off, 49 + pwrc_domain->index, 0, 0, 0, 0) < 0) 50 + pr_err("failed to get power domain status\n"); 51 + 52 + return is_off; 53 + } 54 + 55 + static int meson_secure_pwrc_off(struct generic_pm_domain *domain) 56 + { 57 + int ret = 0; 58 + struct meson_secure_pwrc_domain *pwrc_domain = 59 + container_of(domain, struct meson_secure_pwrc_domain, base); 60 + 61 + if (meson_sm_call(pwrc_domain->pwrc->fw, SM_A1_PWRC_SET, NULL, 62 + pwrc_domain->index, PWRC_OFF, 0, 0, 0) < 0) { 63 + pr_err("failed to set power domain off\n"); 64 + ret = -EINVAL; 65 + } 66 + 67 + return ret; 68 + } 69 + 70 + static int meson_secure_pwrc_on(struct generic_pm_domain *domain) 71 + { 72 + int ret = 0; 73 + struct meson_secure_pwrc_domain *pwrc_domain = 74 + container_of(domain, struct meson_secure_pwrc_domain, base); 75 + 76 + if (meson_sm_call(pwrc_domain->pwrc->fw, SM_A1_PWRC_SET, NULL, 77 + pwrc_domain->index, PWRC_ON, 0, 0, 0) < 0) { 78 + pr_err("failed to set power domain on\n"); 79 + ret = -EINVAL; 80 + } 81 + 82 + return ret; 83 + } 84 + 85 + #define SEC_PD(__name, __flag) \ 86 + [PWRC_##__name##_ID] = \ 87 + { \ 88 + .name = #__name, \ 89 + .index = PWRC_##__name##_ID, \ 90 + .is_off = pwrc_secure_is_off, \ 91 + .flags = __flag, \ 92 + } 93 + 94 + static struct meson_secure_pwrc_domain_desc a1_pwrc_domains[] = { 95 + SEC_PD(DSPA, 0), 96 + SEC_PD(DSPB, 0), 97 + /* UART should keep working in ATF after suspend and before resume */ 98 + SEC_PD(UART, GENPD_FLAG_ALWAYS_ON), 99 + /* DMC is for DDR PHY ana/dig and DMC, and should be always on */ 100 + SEC_PD(DMC, GENPD_FLAG_ALWAYS_ON), 101 + SEC_PD(I2C, 0), 102 + SEC_PD(PSRAM, 0), 103 + SEC_PD(ACODEC, 0), 104 + SEC_PD(AUDIO, 0), 105 + SEC_PD(OTP, 0), 106 + SEC_PD(DMA, 0), 107 + SEC_PD(SD_EMMC, 0), 108 + SEC_PD(RAMA, 0), 109 + /* SRAMB is used as ATF runtime memory, and should be always on */ 110 + SEC_PD(RAMB, GENPD_FLAG_ALWAYS_ON), 111 + SEC_PD(IR, 0), 112 + SEC_PD(SPICC, 0), 113 + SEC_PD(SPIFC, 0), 114 + SEC_PD(USB, 0), 115 + /* NIC is for the Arm NIC-400 interconnect, and should be always on */ 116 + SEC_PD(NIC, GENPD_FLAG_ALWAYS_ON), 117 + SEC_PD(PDMIN, 0), 118 + SEC_PD(RSA, 0), 119 + }; 120 + 121 + static int meson_secure_pwrc_probe(struct platform_device *pdev) 122 + { 123 + int i; 124 + struct device_node *sm_np; 125 + struct meson_secure_pwrc *pwrc; 126 + const struct meson_secure_pwrc_domain_data *match; 127 + 128 + match = of_device_get_match_data(&pdev->dev); 129 + if (!match) { 130 + dev_err(&pdev->dev, "failed to get match data\n"); 131 + return -ENODEV; 132 + } 133 + 134 + sm_np = of_find_compatible_node(NULL, NULL, "amlogic,meson-gxbb-sm"); 135 + if (!sm_np) { 136 + dev_err(&pdev->dev, "no secure-monitor node\n"); 137 + return -ENODEV; 138 + } 139 + 140 + pwrc = devm_kzalloc(&pdev->dev, sizeof(*pwrc), GFP_KERNEL); 141 + if (!pwrc) 142 + return -ENOMEM; 143 + 144 + pwrc->fw = meson_sm_get(sm_np); 145 + of_node_put(sm_np); 146 + if (!pwrc->fw) 147 + return -EPROBE_DEFER; 148 + 149 + pwrc->xlate.domains = devm_kcalloc(&pdev->dev, match->count, 150 + sizeof(*pwrc->xlate.domains), 151 + GFP_KERNEL); 152 + if (!pwrc->xlate.domains) 153 + return -ENOMEM; 154 + 155 + pwrc->domains = devm_kcalloc(&pdev->dev, match->count, 156 + sizeof(*pwrc->domains), GFP_KERNEL); 157 + if (!pwrc->domains) 158 + return -ENOMEM; 159 + 160 + pwrc->xlate.num_domains = match->count; 161 + platform_set_drvdata(pdev, pwrc); 162 + 163 + for (i = 0 ; i < match->count ; ++i) { 164 + struct meson_secure_pwrc_domain *dom = &pwrc->domains[i]; 165 + 166 + if (!match->domains[i].index) 167 + continue; 168 + 169 + dom->pwrc = pwrc; 170 + dom->index = match->domains[i].index; 171 + dom->base.name = match->domains[i].name; 172 + dom->base.flags = match->domains[i].flags; 173 + dom->base.power_on = meson_secure_pwrc_on; 174 + dom->base.power_off = meson_secure_pwrc_off; 175 + 176 + pm_genpd_init(&dom->base, NULL, match->domains[i].is_off(dom)); 177 + 178 + pwrc->xlate.domains[i] = &dom->base; 179 + } 180 + 181 + return of_genpd_add_provider_onecell(pdev->dev.of_node, &pwrc->xlate); 182 + } 183 + 184 + static struct meson_secure_pwrc_domain_data meson_secure_a1_pwrc_data = { 185 + .domains = a1_pwrc_domains, 186 + .count = ARRAY_SIZE(a1_pwrc_domains), 187 + }; 188 + 189 + static const struct of_device_id meson_secure_pwrc_match_table[] = { 190 + { 191 + .compatible = "amlogic,meson-a1-pwrc", 192 + .data = &meson_secure_a1_pwrc_data, 193 + }, 194 + { /* sentinel */ } 195 + }; 196 + 197 + static struct platform_driver meson_secure_pwrc_driver = { 198 + .probe = meson_secure_pwrc_probe, 199 + .driver = { 200 + .name = "meson_secure_pwrc", 201 + .of_match_table = meson_secure_pwrc_match_table, 202 + }, 203 + }; 204 + builtin_platform_driver(meson_secure_pwrc_driver);
+66 -3
drivers/soc/fsl/dpio/dpio-service.c
··· 1 1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 2 2 /* 3 3 * Copyright 2014-2016 Freescale Semiconductor Inc. 4 - * Copyright 2016 NXP 4 + * Copyright 2016-2019 NXP 5 5 * 6 6 */ 7 7 #include <linux/types.h> ··· 433 433 EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq); 434 434 435 435 /** 436 + * dpaa2_io_service_enqueue_multiple_fq() - Enqueue multiple frames 437 + * to a frame queue using one fqid. 438 + * @d: the given DPIO service. 439 + * @fqid: the given frame queue id. 440 + * @fd: the frame descriptor which is enqueued. 441 + * @nb: number of frames to be enqueud 442 + * 443 + * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready, 444 + * or -ENODEV if there is no dpio service. 445 + */ 446 + int dpaa2_io_service_enqueue_multiple_fq(struct dpaa2_io *d, 447 + u32 fqid, 448 + const struct dpaa2_fd *fd, 449 + int nb) 450 + { 451 + struct qbman_eq_desc ed; 452 + 453 + d = service_select(d); 454 + if (!d) 455 + return -ENODEV; 456 + 457 + qbman_eq_desc_clear(&ed); 458 + qbman_eq_desc_set_no_orp(&ed, 0); 459 + qbman_eq_desc_set_fq(&ed, fqid); 460 + 461 + return qbman_swp_enqueue_multiple(d->swp, &ed, fd, 0, nb); 462 + } 463 + EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_fq); 464 + 465 + /** 466 + * dpaa2_io_service_enqueue_multiple_desc_fq() - Enqueue multiple frames 467 + * to different frame queue using a list of fqids. 468 + * @d: the given DPIO service. 469 + * @fqid: the given list of frame queue ids. 470 + * @fd: the frame descriptor which is enqueued. 471 + * @nb: number of frames to be enqueud 472 + * 473 + * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready, 474 + * or -ENODEV if there is no dpio service. 475 + */ 476 + int dpaa2_io_service_enqueue_multiple_desc_fq(struct dpaa2_io *d, 477 + u32 *fqid, 478 + const struct dpaa2_fd *fd, 479 + int nb) 480 + { 481 + int i; 482 + struct qbman_eq_desc ed[32]; 483 + 484 + d = service_select(d); 485 + if (!d) 486 + return -ENODEV; 487 + 488 + for (i = 0; i < nb; i++) { 489 + qbman_eq_desc_clear(&ed[i]); 490 + qbman_eq_desc_set_no_orp(&ed[i], 0); 491 + qbman_eq_desc_set_fq(&ed[i], fqid[i]); 492 + } 493 + 494 + return qbman_swp_enqueue_multiple_desc(d->swp, &ed[0], fd, nb); 495 + } 496 + EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_desc_fq); 497 + 498 + /** 436 499 * dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD. 437 500 * @d: the given DPIO service. 438 501 * @qdid: the given queuing destination id. ··· 589 526 590 527 /** 591 528 * dpaa2_io_store_create() - Create the dma memory storage for dequeue result. 592 - * @max_frames: the maximum number of dequeued result for frames, must be <= 16. 529 + * @max_frames: the maximum number of dequeued result for frames, must be <= 32. 593 530 * @dev: the device to allow mapping/unmapping the DMAable region. 594 531 * 595 532 * The size of the storage is "max_frames*sizeof(struct dpaa2_dq)". ··· 604 541 struct dpaa2_io_store *ret; 605 542 size_t size; 606 543 607 - if (!max_frames || (max_frames > 16)) 544 + if (!max_frames || (max_frames > 32)) 608 545 return NULL; 609 546 610 547 ret = kmalloc(sizeof(*ret), GFP_KERNEL);
+687 -82
drivers/soc/fsl/dpio/qbman-portal.c
··· 1 1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 2 2 /* 3 3 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc. 4 - * Copyright 2016 NXP 4 + * Copyright 2016-2019 NXP 5 5 * 6 6 */ 7 7 8 8 #include <asm/cacheflush.h> 9 9 #include <linux/io.h> 10 10 #include <linux/slab.h> 11 + #include <linux/spinlock.h> 11 12 #include <soc/fsl/dpaa2-global.h> 12 13 13 14 #include "qbman-portal.h" 14 - 15 - #define QMAN_REV_4000 0x04000000 16 - #define QMAN_REV_4100 0x04010000 17 - #define QMAN_REV_4101 0x04010001 18 - #define QMAN_REV_5000 0x05000000 19 - 20 - #define QMAN_REV_MASK 0xffff0000 21 15 22 16 /* All QBMan command and result structures use this "valid bit" encoding */ 23 17 #define QB_VALID_BIT ((u32)0x80) ··· 22 28 23 29 /* CINH register offsets */ 24 30 #define QBMAN_CINH_SWP_EQCR_PI 0x800 31 + #define QBMAN_CINH_SWP_EQCR_CI 0x840 25 32 #define QBMAN_CINH_SWP_EQAR 0x8c0 26 33 #define QBMAN_CINH_SWP_CR_RT 0x900 27 34 #define QBMAN_CINH_SWP_VDQCR_RT 0x940 ··· 46 51 #define QBMAN_CENA_SWP_CR 0x600 47 52 #define QBMAN_CENA_SWP_RR(vb) (0x700 + ((u32)(vb) >> 1)) 48 53 #define QBMAN_CENA_SWP_VDQCR 0x780 54 + #define QBMAN_CENA_SWP_EQCR_CI 0x840 55 + #define QBMAN_CENA_SWP_EQCR_CI_MEMBACK 0x1840 49 56 50 57 /* CENA register offsets in memory-backed mode */ 51 58 #define QBMAN_CENA_SWP_DQRR_MEM(n) (0x800 + ((u32)(n) << 6)) ··· 75 78 /* opaque token for static dequeues */ 76 79 #define QMAN_SDQCR_TOKEN 0xbb 77 80 81 + #define QBMAN_EQCR_DCA_IDXMASK 0x0f 82 + #define QBMAN_ENQUEUE_FLAG_DCA (1ULL << 31) 83 + 84 + #define EQ_DESC_SIZE_WITHOUT_FD 29 85 + #define EQ_DESC_SIZE_FD_START 32 86 + 78 87 enum qbman_sdqcr_dct { 79 88 qbman_sdqcr_dct_null = 0, 80 89 qbman_sdqcr_dct_prio_ics, ··· 92 89 qbman_sdqcr_fc_one = 0, 93 90 qbman_sdqcr_fc_up_to_3 = 1 94 91 }; 92 + 93 + /* Internal Function declaration */ 94 + static int qbman_swp_enqueue_direct(struct qbman_swp *s, 95 + const struct qbman_eq_desc *d, 96 + const struct dpaa2_fd *fd); 97 + static int qbman_swp_enqueue_mem_back(struct qbman_swp *s, 98 + const struct qbman_eq_desc *d, 99 + const struct dpaa2_fd *fd); 100 + static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s, 101 + const struct qbman_eq_desc *d, 102 + const struct dpaa2_fd *fd, 103 + uint32_t *flags, 104 + int num_frames); 105 + static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s, 106 + const struct qbman_eq_desc *d, 107 + const struct dpaa2_fd *fd, 108 + uint32_t *flags, 109 + int num_frames); 110 + static int 111 + qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s, 112 + const struct qbman_eq_desc *d, 113 + const struct dpaa2_fd *fd, 114 + int num_frames); 115 + static 116 + int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s, 117 + const struct qbman_eq_desc *d, 118 + const struct dpaa2_fd *fd, 119 + int num_frames); 120 + static int qbman_swp_pull_direct(struct qbman_swp *s, 121 + struct qbman_pull_desc *d); 122 + static int qbman_swp_pull_mem_back(struct qbman_swp *s, 123 + struct qbman_pull_desc *d); 124 + 125 + const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s); 126 + const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s); 127 + 128 + static int qbman_swp_release_direct(struct qbman_swp *s, 129 + const struct qbman_release_desc *d, 130 + const u64 *buffers, 131 + unsigned int num_buffers); 132 + static int qbman_swp_release_mem_back(struct qbman_swp *s, 133 + const struct qbman_release_desc *d, 134 + const u64 *buffers, 135 + unsigned int num_buffers); 136 + 137 + /* Function pointers */ 138 + int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s, 139 + const struct qbman_eq_desc *d, 140 + const struct dpaa2_fd *fd) 141 + = qbman_swp_enqueue_direct; 142 + 143 + int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s, 144 + const struct qbman_eq_desc *d, 145 + const struct dpaa2_fd *fd, 146 + uint32_t *flags, 147 + int num_frames) 148 + = qbman_swp_enqueue_multiple_direct; 149 + 150 + int 151 + (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s, 152 + const struct qbman_eq_desc *d, 153 + const struct dpaa2_fd *fd, 154 + int num_frames) 155 + = qbman_swp_enqueue_multiple_desc_direct; 156 + 157 + int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d) 158 + = qbman_swp_pull_direct; 159 + 160 + const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s) 161 + = qbman_swp_dqrr_next_direct; 162 + 163 + int (*qbman_swp_release_ptr)(struct qbman_swp *s, 164 + const struct qbman_release_desc *d, 165 + const u64 *buffers, 166 + unsigned int num_buffers) 167 + = qbman_swp_release_direct; 95 168 96 169 /* Portal Access */ 97 170 ··· 225 146 226 147 #define QMAN_RT_MODE 0x00000100 227 148 149 + static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last) 150 + { 151 + /* 'first' is included, 'last' is excluded */ 152 + if (first <= last) 153 + return last - first; 154 + else 155 + return (2 * ringsize) - (first - last); 156 + } 157 + 228 158 /** 229 159 * qbman_swp_init() - Create a functional object representing the given 230 160 * QBMan portal descriptor. ··· 244 156 */ 245 157 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d) 246 158 { 247 - struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL); 159 + struct qbman_swp *p = kzalloc(sizeof(*p), GFP_KERNEL); 248 160 u32 reg; 161 + u32 mask_size; 162 + u32 eqcr_pi; 249 163 250 164 if (!p) 251 165 return NULL; 166 + 167 + spin_lock_init(&p->access_spinlock); 168 + 252 169 p->desc = d; 253 170 p->mc.valid_bit = QB_VALID_BIT; 254 171 p->sdq = 0; ··· 279 186 p->addr_cena = d->cena_bar; 280 187 p->addr_cinh = d->cinh_bar; 281 188 282 - if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) 283 - memset(p->addr_cena, 0, 64 * 1024); 189 + if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) { 284 190 285 - reg = qbman_set_swp_cfg(p->dqrr.dqrr_size, 286 - 1, /* Writes Non-cacheable */ 287 - 0, /* EQCR_CI stashing threshold */ 288 - 3, /* RPM: Valid bit mode, RCR in array mode */ 289 - 2, /* DCM: Discrete consumption ack mode */ 290 - 3, /* EPM: Valid bit mode, EQCR in array mode */ 291 - 1, /* mem stashing drop enable == TRUE */ 292 - 1, /* mem stashing priority == TRUE */ 293 - 1, /* mem stashing enable == TRUE */ 294 - 1, /* dequeue stashing priority == TRUE */ 295 - 0, /* dequeue stashing enable == FALSE */ 296 - 0); /* EQCR_CI stashing priority == FALSE */ 297 - if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) 191 + reg = qbman_set_swp_cfg(p->dqrr.dqrr_size, 192 + 1, /* Writes Non-cacheable */ 193 + 0, /* EQCR_CI stashing threshold */ 194 + 3, /* RPM: RCR in array mode */ 195 + 2, /* DCM: Discrete consumption ack */ 196 + 2, /* EPM: EQCR in ring mode */ 197 + 1, /* mem stashing drop enable enable */ 198 + 1, /* mem stashing priority enable */ 199 + 1, /* mem stashing enable */ 200 + 1, /* dequeue stashing priority enable */ 201 + 0, /* dequeue stashing enable enable */ 202 + 0); /* EQCR_CI stashing priority enable */ 203 + } else { 204 + memset(p->addr_cena, 0, 64 * 1024); 205 + reg = qbman_set_swp_cfg(p->dqrr.dqrr_size, 206 + 1, /* Writes Non-cacheable */ 207 + 1, /* EQCR_CI stashing threshold */ 208 + 3, /* RPM: RCR in array mode */ 209 + 2, /* DCM: Discrete consumption ack */ 210 + 0, /* EPM: EQCR in ring mode */ 211 + 1, /* mem stashing drop enable */ 212 + 1, /* mem stashing priority enable */ 213 + 1, /* mem stashing enable */ 214 + 1, /* dequeue stashing priority enable */ 215 + 0, /* dequeue stashing enable */ 216 + 0); /* EQCR_CI stashing priority enable */ 298 217 reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */ 299 218 1 << SWP_CFG_VPM_SHIFT | /* VDQCR read triggered mode */ 300 219 1 << SWP_CFG_CPM_SHIFT; /* CR read triggered mode */ 220 + } 301 221 302 222 qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg); 303 223 reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG); ··· 331 225 * applied when dequeues from a specific channel are enabled. 332 226 */ 333 227 qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0); 228 + 229 + p->eqcr.pi_ring_size = 8; 230 + if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) { 231 + p->eqcr.pi_ring_size = 32; 232 + qbman_swp_enqueue_ptr = 233 + qbman_swp_enqueue_mem_back; 234 + qbman_swp_enqueue_multiple_ptr = 235 + qbman_swp_enqueue_multiple_mem_back; 236 + qbman_swp_enqueue_multiple_desc_ptr = 237 + qbman_swp_enqueue_multiple_desc_mem_back; 238 + qbman_swp_pull_ptr = qbman_swp_pull_mem_back; 239 + qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back; 240 + qbman_swp_release_ptr = qbman_swp_release_mem_back; 241 + } 242 + 243 + for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1) 244 + p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask << 1) + 1; 245 + eqcr_pi = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_PI); 246 + p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask; 247 + p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT; 248 + p->eqcr.ci = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_CI) 249 + & p->eqcr.pi_ci_mask; 250 + p->eqcr.available = p->eqcr.pi_ring_size; 251 + 334 252 return p; 335 253 } 336 254 ··· 508 378 #define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2 509 379 #define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3 510 380 #define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4 381 + #define QB_ENQUEUE_CMD_DCA_EN_SHIFT 7 511 382 512 383 /** 513 384 * qbman_eq_desc_clear() - Clear the contents of a descriptor to ··· 584 453 QMAN_RT_MODE); 585 454 } 586 455 456 + #define QB_RT_BIT ((u32)0x100) 587 457 /** 588 - * qbman_swp_enqueue() - Issue an enqueue command 458 + * qbman_swp_enqueue_direct() - Issue an enqueue command 589 459 * @s: the software portal used for enqueue 590 460 * @d: the enqueue descriptor 591 461 * @fd: the frame descriptor to be enqueued ··· 596 464 * 597 465 * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready. 598 466 */ 599 - int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d, 600 - const struct dpaa2_fd *fd) 467 + static 468 + int qbman_swp_enqueue_direct(struct qbman_swp *s, 469 + const struct qbman_eq_desc *d, 470 + const struct dpaa2_fd *fd) 601 471 { 602 - struct qbman_eq_desc *p; 603 - u32 eqar = qbman_read_register(s, QBMAN_CINH_SWP_EQAR); 472 + int flags = 0; 473 + int ret = qbman_swp_enqueue_multiple_direct(s, d, fd, &flags, 1); 604 474 605 - if (!EQAR_SUCCESS(eqar)) 606 - return -EBUSY; 475 + if (ret >= 0) 476 + ret = 0; 477 + else 478 + ret = -EBUSY; 479 + return ret; 480 + } 607 481 608 - p = qbman_get_cmd(s, QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar))); 609 - memcpy(&p->dca, &d->dca, 31); 610 - memcpy(&p->fd, fd, sizeof(*fd)); 482 + /** 483 + * qbman_swp_enqueue_mem_back() - Issue an enqueue command 484 + * @s: the software portal used for enqueue 485 + * @d: the enqueue descriptor 486 + * @fd: the frame descriptor to be enqueued 487 + * 488 + * Please note that 'fd' should only be NULL if the "action" of the 489 + * descriptor is "orp_hole" or "orp_nesn". 490 + * 491 + * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready. 492 + */ 493 + static 494 + int qbman_swp_enqueue_mem_back(struct qbman_swp *s, 495 + const struct qbman_eq_desc *d, 496 + const struct dpaa2_fd *fd) 497 + { 498 + int flags = 0; 499 + int ret = qbman_swp_enqueue_multiple_mem_back(s, d, fd, &flags, 1); 611 500 612 - if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) { 613 - /* Set the verb byte, have to substitute in the valid-bit */ 614 - dma_wmb(); 615 - p->verb = d->verb | EQAR_VB(eqar); 616 - } else { 617 - p->verb = d->verb | EQAR_VB(eqar); 618 - dma_wmb(); 619 - qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar)); 501 + if (ret >= 0) 502 + ret = 0; 503 + else 504 + ret = -EBUSY; 505 + return ret; 506 + } 507 + 508 + /** 509 + * qbman_swp_enqueue_multiple_direct() - Issue a multi enqueue command 510 + * using one enqueue descriptor 511 + * @s: the software portal used for enqueue 512 + * @d: the enqueue descriptor 513 + * @fd: table pointer of frame descriptor table to be enqueued 514 + * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL 515 + * @num_frames: number of fd to be enqueued 516 + * 517 + * Return the number of fd enqueued, or a negative error number. 518 + */ 519 + static 520 + int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s, 521 + const struct qbman_eq_desc *d, 522 + const struct dpaa2_fd *fd, 523 + uint32_t *flags, 524 + int num_frames) 525 + { 526 + uint32_t *p = NULL; 527 + const uint32_t *cl = (uint32_t *)d; 528 + uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask; 529 + int i, num_enqueued = 0; 530 + uint64_t addr_cena; 531 + 532 + spin_lock(&s->access_spinlock); 533 + half_mask = (s->eqcr.pi_ci_mask>>1); 534 + full_mask = s->eqcr.pi_ci_mask; 535 + 536 + if (!s->eqcr.available) { 537 + eqcr_ci = s->eqcr.ci; 538 + p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI; 539 + s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI); 540 + 541 + s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size, 542 + eqcr_ci, s->eqcr.ci); 543 + if (!s->eqcr.available) { 544 + spin_unlock(&s->access_spinlock); 545 + return 0; 546 + } 620 547 } 621 548 622 - return 0; 549 + eqcr_pi = s->eqcr.pi; 550 + num_enqueued = (s->eqcr.available < num_frames) ? 551 + s->eqcr.available : num_frames; 552 + s->eqcr.available -= num_enqueued; 553 + /* Fill in the EQCR ring */ 554 + for (i = 0; i < num_enqueued; i++) { 555 + p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); 556 + /* Skip copying the verb */ 557 + memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1); 558 + memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)], 559 + &fd[i], sizeof(*fd)); 560 + eqcr_pi++; 561 + } 562 + 563 + dma_wmb(); 564 + 565 + /* Set the verb byte, have to substitute in the valid-bit */ 566 + eqcr_pi = s->eqcr.pi; 567 + for (i = 0; i < num_enqueued; i++) { 568 + p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); 569 + p[0] = cl[0] | s->eqcr.pi_vb; 570 + if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) { 571 + struct qbman_eq_desc *d = (struct qbman_eq_desc *)p; 572 + 573 + d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) | 574 + ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK); 575 + } 576 + eqcr_pi++; 577 + if (!(eqcr_pi & half_mask)) 578 + s->eqcr.pi_vb ^= QB_VALID_BIT; 579 + } 580 + 581 + /* Flush all the cacheline without load/store in between */ 582 + eqcr_pi = s->eqcr.pi; 583 + addr_cena = (size_t)s->addr_cena; 584 + for (i = 0; i < num_enqueued; i++) 585 + eqcr_pi++; 586 + s->eqcr.pi = eqcr_pi & full_mask; 587 + spin_unlock(&s->access_spinlock); 588 + 589 + return num_enqueued; 590 + } 591 + 592 + /** 593 + * qbman_swp_enqueue_multiple_mem_back() - Issue a multi enqueue command 594 + * using one enqueue descriptor 595 + * @s: the software portal used for enqueue 596 + * @d: the enqueue descriptor 597 + * @fd: table pointer of frame descriptor table to be enqueued 598 + * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL 599 + * @num_frames: number of fd to be enqueued 600 + * 601 + * Return the number of fd enqueued, or a negative error number. 602 + */ 603 + static 604 + int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s, 605 + const struct qbman_eq_desc *d, 606 + const struct dpaa2_fd *fd, 607 + uint32_t *flags, 608 + int num_frames) 609 + { 610 + uint32_t *p = NULL; 611 + const uint32_t *cl = (uint32_t *)(d); 612 + uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask; 613 + int i, num_enqueued = 0; 614 + unsigned long irq_flags; 615 + 616 + spin_lock(&s->access_spinlock); 617 + local_irq_save(irq_flags); 618 + 619 + half_mask = (s->eqcr.pi_ci_mask>>1); 620 + full_mask = s->eqcr.pi_ci_mask; 621 + if (!s->eqcr.available) { 622 + eqcr_ci = s->eqcr.ci; 623 + p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK; 624 + s->eqcr.ci = __raw_readl(p) & full_mask; 625 + s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size, 626 + eqcr_ci, s->eqcr.ci); 627 + if (!s->eqcr.available) { 628 + local_irq_restore(irq_flags); 629 + spin_unlock(&s->access_spinlock); 630 + return 0; 631 + } 632 + } 633 + 634 + eqcr_pi = s->eqcr.pi; 635 + num_enqueued = (s->eqcr.available < num_frames) ? 636 + s->eqcr.available : num_frames; 637 + s->eqcr.available -= num_enqueued; 638 + /* Fill in the EQCR ring */ 639 + for (i = 0; i < num_enqueued; i++) { 640 + p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); 641 + /* Skip copying the verb */ 642 + memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1); 643 + memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)], 644 + &fd[i], sizeof(*fd)); 645 + eqcr_pi++; 646 + } 647 + 648 + /* Set the verb byte, have to substitute in the valid-bit */ 649 + eqcr_pi = s->eqcr.pi; 650 + for (i = 0; i < num_enqueued; i++) { 651 + p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); 652 + p[0] = cl[0] | s->eqcr.pi_vb; 653 + if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) { 654 + struct qbman_eq_desc *d = (struct qbman_eq_desc *)p; 655 + 656 + d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) | 657 + ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK); 658 + } 659 + eqcr_pi++; 660 + if (!(eqcr_pi & half_mask)) 661 + s->eqcr.pi_vb ^= QB_VALID_BIT; 662 + } 663 + s->eqcr.pi = eqcr_pi & full_mask; 664 + 665 + dma_wmb(); 666 + qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI, 667 + (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb); 668 + local_irq_restore(irq_flags); 669 + spin_unlock(&s->access_spinlock); 670 + 671 + return num_enqueued; 672 + } 673 + 674 + /** 675 + * qbman_swp_enqueue_multiple_desc_direct() - Issue a multi enqueue command 676 + * using multiple enqueue descriptor 677 + * @s: the software portal used for enqueue 678 + * @d: table of minimal enqueue descriptor 679 + * @fd: table pointer of frame descriptor table to be enqueued 680 + * @num_frames: number of fd to be enqueued 681 + * 682 + * Return the number of fd enqueued, or a negative error number. 683 + */ 684 + static 685 + int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s, 686 + const struct qbman_eq_desc *d, 687 + const struct dpaa2_fd *fd, 688 + int num_frames) 689 + { 690 + uint32_t *p; 691 + const uint32_t *cl; 692 + uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask; 693 + int i, num_enqueued = 0; 694 + uint64_t addr_cena; 695 + 696 + half_mask = (s->eqcr.pi_ci_mask>>1); 697 + full_mask = s->eqcr.pi_ci_mask; 698 + if (!s->eqcr.available) { 699 + eqcr_ci = s->eqcr.ci; 700 + p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI; 701 + s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI); 702 + s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size, 703 + eqcr_ci, s->eqcr.ci); 704 + if (!s->eqcr.available) 705 + return 0; 706 + } 707 + 708 + eqcr_pi = s->eqcr.pi; 709 + num_enqueued = (s->eqcr.available < num_frames) ? 710 + s->eqcr.available : num_frames; 711 + s->eqcr.available -= num_enqueued; 712 + /* Fill in the EQCR ring */ 713 + for (i = 0; i < num_enqueued; i++) { 714 + p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); 715 + cl = (uint32_t *)(&d[i]); 716 + /* Skip copying the verb */ 717 + memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1); 718 + memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)], 719 + &fd[i], sizeof(*fd)); 720 + eqcr_pi++; 721 + } 722 + 723 + dma_wmb(); 724 + 725 + /* Set the verb byte, have to substitute in the valid-bit */ 726 + eqcr_pi = s->eqcr.pi; 727 + for (i = 0; i < num_enqueued; i++) { 728 + p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); 729 + cl = (uint32_t *)(&d[i]); 730 + p[0] = cl[0] | s->eqcr.pi_vb; 731 + eqcr_pi++; 732 + if (!(eqcr_pi & half_mask)) 733 + s->eqcr.pi_vb ^= QB_VALID_BIT; 734 + } 735 + 736 + /* Flush all the cacheline without load/store in between */ 737 + eqcr_pi = s->eqcr.pi; 738 + addr_cena = (uint64_t)s->addr_cena; 739 + for (i = 0; i < num_enqueued; i++) 740 + eqcr_pi++; 741 + s->eqcr.pi = eqcr_pi & full_mask; 742 + 743 + return num_enqueued; 744 + } 745 + 746 + /** 747 + * qbman_swp_enqueue_multiple_desc_mem_back() - Issue a multi enqueue command 748 + * using multiple enqueue descriptor 749 + * @s: the software portal used for enqueue 750 + * @d: table of minimal enqueue descriptor 751 + * @fd: table pointer of frame descriptor table to be enqueued 752 + * @num_frames: number of fd to be enqueued 753 + * 754 + * Return the number of fd enqueued, or a negative error number. 755 + */ 756 + static 757 + int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s, 758 + const struct qbman_eq_desc *d, 759 + const struct dpaa2_fd *fd, 760 + int num_frames) 761 + { 762 + uint32_t *p; 763 + const uint32_t *cl; 764 + uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask; 765 + int i, num_enqueued = 0; 766 + 767 + half_mask = (s->eqcr.pi_ci_mask>>1); 768 + full_mask = s->eqcr.pi_ci_mask; 769 + if (!s->eqcr.available) { 770 + eqcr_ci = s->eqcr.ci; 771 + p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK; 772 + s->eqcr.ci = __raw_readl(p) & full_mask; 773 + s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size, 774 + eqcr_ci, s->eqcr.ci); 775 + if (!s->eqcr.available) 776 + return 0; 777 + } 778 + 779 + eqcr_pi = s->eqcr.pi; 780 + num_enqueued = (s->eqcr.available < num_frames) ? 781 + s->eqcr.available : num_frames; 782 + s->eqcr.available -= num_enqueued; 783 + /* Fill in the EQCR ring */ 784 + for (i = 0; i < num_enqueued; i++) { 785 + p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); 786 + cl = (uint32_t *)(&d[i]); 787 + /* Skip copying the verb */ 788 + memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1); 789 + memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)], 790 + &fd[i], sizeof(*fd)); 791 + eqcr_pi++; 792 + } 793 + 794 + /* Set the verb byte, have to substitute in the valid-bit */ 795 + eqcr_pi = s->eqcr.pi; 796 + for (i = 0; i < num_enqueued; i++) { 797 + p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); 798 + cl = (uint32_t *)(&d[i]); 799 + p[0] = cl[0] | s->eqcr.pi_vb; 800 + eqcr_pi++; 801 + if (!(eqcr_pi & half_mask)) 802 + s->eqcr.pi_vb ^= QB_VALID_BIT; 803 + } 804 + 805 + s->eqcr.pi = eqcr_pi & full_mask; 806 + 807 + dma_wmb(); 808 + qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI, 809 + (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb); 810 + 811 + return num_enqueued; 623 812 } 624 813 625 814 /* Static (push) dequeue */ ··· 1098 645 } 1099 646 1100 647 /** 1101 - * qbman_swp_pull() - Issue the pull dequeue command 648 + * qbman_swp_pull_direct() - Issue the pull dequeue command 1102 649 * @s: the software portal object 1103 650 * @d: the software portal descriptor which has been configured with 1104 651 * the set of qbman_pull_desc_set_*() calls ··· 1106 653 * Return 0 for success, and -EBUSY if the software portal is not ready 1107 654 * to do pull dequeue. 1108 655 */ 1109 - int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d) 656 + static 657 + int qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d) 658 + { 659 + struct qbman_pull_desc *p; 660 + 661 + if (!atomic_dec_and_test(&s->vdq.available)) { 662 + atomic_inc(&s->vdq.available); 663 + return -EBUSY; 664 + } 665 + s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt; 666 + if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) 667 + p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR); 668 + else 669 + p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM); 670 + p->numf = d->numf; 671 + p->tok = QMAN_DQ_TOKEN_VALID; 672 + p->dq_src = d->dq_src; 673 + p->rsp_addr = d->rsp_addr; 674 + p->rsp_addr_virt = d->rsp_addr_virt; 675 + dma_wmb(); 676 + /* Set the verb byte, have to substitute in the valid-bit */ 677 + p->verb = d->verb | s->vdq.valid_bit; 678 + s->vdq.valid_bit ^= QB_VALID_BIT; 679 + 680 + return 0; 681 + } 682 + 683 + /** 684 + * qbman_swp_pull_mem_back() - Issue the pull dequeue command 685 + * @s: the software portal object 686 + * @d: the software portal descriptor which has been configured with 687 + * the set of qbman_pull_desc_set_*() calls 688 + * 689 + * Return 0 for success, and -EBUSY if the software portal is not ready 690 + * to do pull dequeue. 691 + */ 692 + static 693 + int qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d) 1110 694 { 1111 695 struct qbman_pull_desc *p; 1112 696 ··· 1162 672 p->rsp_addr = d->rsp_addr; 1163 673 p->rsp_addr_virt = d->rsp_addr_virt; 1164 674 1165 - if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) { 1166 - dma_wmb(); 1167 - /* Set the verb byte, have to substitute in the valid-bit */ 1168 - p->verb = d->verb | s->vdq.valid_bit; 1169 - s->vdq.valid_bit ^= QB_VALID_BIT; 1170 - } else { 1171 - p->verb = d->verb | s->vdq.valid_bit; 1172 - s->vdq.valid_bit ^= QB_VALID_BIT; 1173 - dma_wmb(); 1174 - qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE); 1175 - } 675 + /* Set the verb byte, have to substitute in the valid-bit */ 676 + p->verb = d->verb | s->vdq.valid_bit; 677 + s->vdq.valid_bit ^= QB_VALID_BIT; 678 + dma_wmb(); 679 + qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE); 1176 680 1177 681 return 0; 1178 682 } ··· 1174 690 #define QMAN_DQRR_PI_MASK 0xf 1175 691 1176 692 /** 1177 - * qbman_swp_dqrr_next() - Get an valid DQRR entry 693 + * qbman_swp_dqrr_next_direct() - Get an valid DQRR entry 1178 694 * @s: the software portal object 1179 695 * 1180 696 * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry 1181 697 * only once, so repeated calls can return a sequence of DQRR entries, without 1182 698 * requiring they be consumed immediately or in any particular order. 1183 699 */ 1184 - const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s) 700 + const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s) 1185 701 { 1186 702 u32 verb; 1187 703 u32 response_verb; ··· 1224 740 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx))); 1225 741 } 1226 742 1227 - if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) 1228 - p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); 1229 - else 1230 - p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx)); 743 + p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); 744 + verb = p->dq.verb; 745 + 746 + /* 747 + * If the valid-bit isn't of the expected polarity, nothing there. Note, 748 + * in the DQRR reset bug workaround, we shouldn't need to skip these 749 + * check, because we've already determined that a new entry is available 750 + * and we've invalidated the cacheline before reading it, so the 751 + * valid-bit behaviour is repaired and should tell us what we already 752 + * knew from reading PI. 753 + */ 754 + if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) { 755 + prefetch(qbman_get_cmd(s, 756 + QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx))); 757 + return NULL; 758 + } 759 + /* 760 + * There's something there. Move "next_idx" attention to the next ring 761 + * entry (and prefetch it) before returning what we found. 762 + */ 763 + s->dqrr.next_idx++; 764 + s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */ 765 + if (!s->dqrr.next_idx) 766 + s->dqrr.valid_bit ^= QB_VALID_BIT; 767 + 768 + /* 769 + * If this is the final response to a volatile dequeue command 770 + * indicate that the vdq is available 771 + */ 772 + flags = p->dq.stat; 773 + response_verb = verb & QBMAN_RESULT_MASK; 774 + if ((response_verb == QBMAN_RESULT_DQ) && 775 + (flags & DPAA2_DQ_STAT_VOLATILE) && 776 + (flags & DPAA2_DQ_STAT_EXPIRED)) 777 + atomic_inc(&s->vdq.available); 778 + 779 + prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx))); 780 + 781 + return p; 782 + } 783 + 784 + /** 785 + * qbman_swp_dqrr_next_mem_back() - Get an valid DQRR entry 786 + * @s: the software portal object 787 + * 788 + * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry 789 + * only once, so repeated calls can return a sequence of DQRR entries, without 790 + * requiring they be consumed immediately or in any particular order. 791 + */ 792 + const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s) 793 + { 794 + u32 verb; 795 + u32 response_verb; 796 + u32 flags; 797 + struct dpaa2_dq *p; 798 + 799 + /* Before using valid-bit to detect if something is there, we have to 800 + * handle the case of the DQRR reset bug... 801 + */ 802 + if (unlikely(s->dqrr.reset_bug)) { 803 + /* 804 + * We pick up new entries by cache-inhibited producer index, 805 + * which means that a non-coherent mapping would require us to 806 + * invalidate and read *only* once that PI has indicated that 807 + * there's an entry here. The first trip around the DQRR ring 808 + * will be much less efficient than all subsequent trips around 809 + * it... 810 + */ 811 + u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) & 812 + QMAN_DQRR_PI_MASK; 813 + 814 + /* there are new entries if pi != next_idx */ 815 + if (pi == s->dqrr.next_idx) 816 + return NULL; 817 + 818 + /* 819 + * if next_idx is/was the last ring index, and 'pi' is 820 + * different, we can disable the workaround as all the ring 821 + * entries have now been DMA'd to so valid-bit checking is 822 + * repaired. Note: this logic needs to be based on next_idx 823 + * (which increments one at a time), rather than on pi (which 824 + * can burst and wrap-around between our snapshots of it). 825 + */ 826 + if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) { 827 + pr_debug("next_idx=%d, pi=%d, clear reset bug\n", 828 + s->dqrr.next_idx, pi); 829 + s->dqrr.reset_bug = 0; 830 + } 831 + prefetch(qbman_get_cmd(s, 832 + QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx))); 833 + } 834 + 835 + p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx)); 1231 836 verb = p->dq.verb; 1232 837 1233 838 /* ··· 1445 872 #define RAR_SUCCESS(rar) ((rar) & 0x100) 1446 873 1447 874 /** 1448 - * qbman_swp_release() - Issue a buffer release command 875 + * qbman_swp_release_direct() - Issue a buffer release command 1449 876 * @s: the software portal object 1450 877 * @d: the release descriptor 1451 878 * @buffers: a pointer pointing to the buffer address to be released ··· 1453 880 * 1454 881 * Return 0 for success, -EBUSY if the release command ring is not ready. 1455 882 */ 1456 - int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d, 1457 - const u64 *buffers, unsigned int num_buffers) 883 + int qbman_swp_release_direct(struct qbman_swp *s, 884 + const struct qbman_release_desc *d, 885 + const u64 *buffers, unsigned int num_buffers) 1458 886 { 1459 887 int i; 1460 888 struct qbman_release_desc *p; ··· 1469 895 return -EBUSY; 1470 896 1471 897 /* Start the release command */ 1472 - if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) 1473 - p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar))); 1474 - else 1475 - p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar))); 898 + p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar))); 899 + 1476 900 /* Copy the caller's buffer pointers to the command */ 1477 901 for (i = 0; i < num_buffers; i++) 1478 902 p->buf[i] = cpu_to_le64(buffers[i]); 1479 903 p->bpid = d->bpid; 1480 904 1481 - if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) { 1482 - /* 1483 - * Set the verb byte, have to substitute in the valid-bit 1484 - * and the number of buffers. 1485 - */ 1486 - dma_wmb(); 1487 - p->verb = d->verb | RAR_VB(rar) | num_buffers; 1488 - } else { 1489 - p->verb = d->verb | RAR_VB(rar) | num_buffers; 1490 - dma_wmb(); 1491 - qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT + 1492 - RAR_IDX(rar) * 4, QMAN_RT_MODE); 1493 - } 905 + /* 906 + * Set the verb byte, have to substitute in the valid-bit 907 + * and the number of buffers. 908 + */ 909 + dma_wmb(); 910 + p->verb = d->verb | RAR_VB(rar) | num_buffers; 911 + 912 + return 0; 913 + } 914 + 915 + /** 916 + * qbman_swp_release_mem_back() - Issue a buffer release command 917 + * @s: the software portal object 918 + * @d: the release descriptor 919 + * @buffers: a pointer pointing to the buffer address to be released 920 + * @num_buffers: number of buffers to be released, must be less than 8 921 + * 922 + * Return 0 for success, -EBUSY if the release command ring is not ready. 923 + */ 924 + int qbman_swp_release_mem_back(struct qbman_swp *s, 925 + const struct qbman_release_desc *d, 926 + const u64 *buffers, unsigned int num_buffers) 927 + { 928 + int i; 929 + struct qbman_release_desc *p; 930 + u32 rar; 931 + 932 + if (!num_buffers || (num_buffers > 7)) 933 + return -EINVAL; 934 + 935 + rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR); 936 + if (!RAR_SUCCESS(rar)) 937 + return -EBUSY; 938 + 939 + /* Start the release command */ 940 + p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar))); 941 + 942 + /* Copy the caller's buffer pointers to the command */ 943 + for (i = 0; i < num_buffers; i++) 944 + p->buf[i] = cpu_to_le64(buffers[i]); 945 + p->bpid = d->bpid; 946 + 947 + p->verb = d->verb | RAR_VB(rar) | num_buffers; 948 + dma_wmb(); 949 + qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT + 950 + RAR_IDX(rar) * 4, QMAN_RT_MODE); 1494 951 1495 952 return 0; 1496 953 }
+151 -7
drivers/soc/fsl/dpio/qbman-portal.h
··· 9 9 10 10 #include <soc/fsl/dpaa2-fd.h> 11 11 12 + #define QMAN_REV_4000 0x04000000 13 + #define QMAN_REV_4100 0x04010000 14 + #define QMAN_REV_4101 0x04010001 15 + #define QMAN_REV_5000 0x05000000 16 + 17 + #define QMAN_REV_MASK 0xffff0000 18 + 12 19 struct dpaa2_dq; 13 20 struct qbman_swp; 14 21 ··· 88 81 u8 wae; 89 82 u8 rspid; 90 83 __le64 rsp_addr; 84 + }; 85 + 86 + struct qbman_eq_desc_with_fd { 87 + struct qbman_eq_desc desc; 91 88 u8 fd[32]; 92 89 }; 93 90 ··· 143 132 u8 dqrr_size; 144 133 int reset_bug; /* indicates dqrr reset workaround is needed */ 145 134 } dqrr; 135 + 136 + struct { 137 + u32 pi; 138 + u32 pi_vb; 139 + u32 pi_ring_size; 140 + u32 pi_ci_mask; 141 + u32 ci; 142 + int available; 143 + u32 pend; 144 + u32 no_pfdr; 145 + } eqcr; 146 + 147 + spinlock_t access_spinlock; 146 148 }; 147 149 150 + /* Function pointers */ 151 + extern 152 + int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s, 153 + const struct qbman_eq_desc *d, 154 + const struct dpaa2_fd *fd); 155 + extern 156 + int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s, 157 + const struct qbman_eq_desc *d, 158 + const struct dpaa2_fd *fd, 159 + uint32_t *flags, 160 + int num_frames); 161 + extern 162 + int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s, 163 + const struct qbman_eq_desc *d, 164 + const struct dpaa2_fd *fd, 165 + int num_frames); 166 + extern 167 + int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d); 168 + extern 169 + const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s); 170 + extern 171 + int (*qbman_swp_release_ptr)(struct qbman_swp *s, 172 + const struct qbman_release_desc *d, 173 + const u64 *buffers, 174 + unsigned int num_buffers); 175 + 176 + /* Functions */ 148 177 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d); 149 178 void qbman_swp_finish(struct qbman_swp *p); 150 179 u32 qbman_swp_interrupt_read_status(struct qbman_swp *p); ··· 209 158 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid, 210 159 enum qbman_pull_type_e dct); 211 160 212 - int qbman_swp_pull(struct qbman_swp *p, struct qbman_pull_desc *d); 213 - 214 - const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s); 215 161 void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq); 216 162 217 163 int qbman_result_has_new_result(struct qbman_swp *p, const struct dpaa2_dq *dq); ··· 220 172 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid, 221 173 u32 qd_bin, u32 qd_prio); 222 174 223 - int qbman_swp_enqueue(struct qbman_swp *p, const struct qbman_eq_desc *d, 224 - const struct dpaa2_fd *fd); 225 175 226 176 void qbman_release_desc_clear(struct qbman_release_desc *d); 227 177 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid); 228 178 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable); 229 179 230 - int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d, 231 - const u64 *buffers, unsigned int num_buffers); 232 180 int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers, 233 181 unsigned int num_buffers); 234 182 int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid, ··· 236 192 void *qbman_swp_mc_start(struct qbman_swp *p); 237 193 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb); 238 194 void *qbman_swp_mc_result(struct qbman_swp *p); 195 + 196 + /** 197 + * qbman_swp_enqueue() - Issue an enqueue command 198 + * @s: the software portal used for enqueue 199 + * @d: the enqueue descriptor 200 + * @fd: the frame descriptor to be enqueued 201 + * 202 + * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready. 203 + */ 204 + static inline int 205 + qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d, 206 + const struct dpaa2_fd *fd) 207 + { 208 + return qbman_swp_enqueue_ptr(s, d, fd); 209 + } 210 + 211 + /** 212 + * qbman_swp_enqueue_multiple() - Issue a multi enqueue command 213 + * using one enqueue descriptor 214 + * @s: the software portal used for enqueue 215 + * @d: the enqueue descriptor 216 + * @fd: table pointer of frame descriptor table to be enqueued 217 + * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL 218 + * @num_frames: number of fd to be enqueued 219 + * 220 + * Return the number of fd enqueued, or a negative error number. 221 + */ 222 + static inline int 223 + qbman_swp_enqueue_multiple(struct qbman_swp *s, 224 + const struct qbman_eq_desc *d, 225 + const struct dpaa2_fd *fd, 226 + uint32_t *flags, 227 + int num_frames) 228 + { 229 + return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags, num_frames); 230 + } 231 + 232 + /** 233 + * qbman_swp_enqueue_multiple_desc() - Issue a multi enqueue command 234 + * using multiple enqueue descriptor 235 + * @s: the software portal used for enqueue 236 + * @d: table of minimal enqueue descriptor 237 + * @fd: table pointer of frame descriptor table to be enqueued 238 + * @num_frames: number of fd to be enqueued 239 + * 240 + * Return the number of fd enqueued, or a negative error number. 241 + */ 242 + static inline int 243 + qbman_swp_enqueue_multiple_desc(struct qbman_swp *s, 244 + const struct qbman_eq_desc *d, 245 + const struct dpaa2_fd *fd, 246 + int num_frames) 247 + { 248 + return qbman_swp_enqueue_multiple_desc_ptr(s, d, fd, num_frames); 249 + } 239 250 240 251 /** 241 252 * qbman_result_is_DQ() - check if the dequeue result is a dequeue response ··· 602 503 struct qbman_bp_query_rslt *r); 603 504 604 505 u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a); 506 + 507 + /** 508 + * qbman_swp_release() - Issue a buffer release command 509 + * @s: the software portal object 510 + * @d: the release descriptor 511 + * @buffers: a pointer pointing to the buffer address to be released 512 + * @num_buffers: number of buffers to be released, must be less than 8 513 + * 514 + * Return 0 for success, -EBUSY if the release command ring is not ready. 515 + */ 516 + static inline int qbman_swp_release(struct qbman_swp *s, 517 + const struct qbman_release_desc *d, 518 + const u64 *buffers, 519 + unsigned int num_buffers) 520 + { 521 + return qbman_swp_release_ptr(s, d, buffers, num_buffers); 522 + } 523 + 524 + /** 525 + * qbman_swp_pull() - Issue the pull dequeue command 526 + * @s: the software portal object 527 + * @d: the software portal descriptor which has been configured with 528 + * the set of qbman_pull_desc_set_*() calls 529 + * 530 + * Return 0 for success, and -EBUSY if the software portal is not ready 531 + * to do pull dequeue. 532 + */ 533 + static inline int qbman_swp_pull(struct qbman_swp *s, 534 + struct qbman_pull_desc *d) 535 + { 536 + return qbman_swp_pull_ptr(s, d); 537 + } 538 + 539 + /** 540 + * qbman_swp_dqrr_next() - Get an valid DQRR entry 541 + * @s: the software portal object 542 + * 543 + * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry 544 + * only once, so repeated calls can return a sequence of DQRR entries, without 545 + * requiring they be consumed immediately or in any particular order. 546 + */ 547 + static inline const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s) 548 + { 549 + return qbman_swp_dqrr_next_ptr(s); 550 + } 605 551 606 552 #endif /* __FSL_QBMAN_PORTAL_H */
+2 -2
drivers/soc/fsl/qe/qe.c
··· 423 423 qe_iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata); 424 424 425 425 /* Set I-RAM Ready Register */ 426 - qe_iowrite32be(be32_to_cpu(QE_IRAM_READY), &qe_immr->iram.iready); 426 + qe_iowrite32be(QE_IRAM_READY, &qe_immr->iram.iready); 427 427 } 428 428 429 429 /* ··· 525 525 */ 526 526 memset(&qe_firmware_info, 0, sizeof(qe_firmware_info)); 527 527 strlcpy(qe_firmware_info.id, firmware->id, sizeof(qe_firmware_info.id)); 528 - qe_firmware_info.extended_modes = firmware->extended_modes; 528 + qe_firmware_info.extended_modes = be64_to_cpu(firmware->extended_modes); 529 529 memcpy(qe_firmware_info.vtraps, firmware->vtraps, 530 530 sizeof(firmware->vtraps)); 531 531
+1 -1
drivers/soc/fsl/qe/qe_common.c
··· 46 46 { 47 47 struct device_node *np; 48 48 struct resource r; 49 - u32 zero[OF_MAX_ADDR_CELLS] = {}; 49 + __be32 zero[OF_MAX_ADDR_CELLS] = {}; 50 50 resource_size_t max = 0; 51 51 int i = 0; 52 52 int ret = 0;
+1 -1
drivers/soc/fsl/qe/qe_ic.c
··· 44 44 45 45 struct qe_ic { 46 46 /* Control registers offset */ 47 - u32 __iomem *regs; 47 + __be32 __iomem *regs; 48 48 49 49 /* The remapper for this QEIC */ 50 50 struct irq_domain *irqhost;
+1 -1
drivers/soc/fsl/qe/ucc.c
··· 632 632 { 633 633 int source; 634 634 u32 shift; 635 - struct qe_mux *qe_mux_reg; 635 + struct qe_mux __iomem *qe_mux_reg; 636 636 637 637 qe_mux_reg = &qe_immr->qmx; 638 638
+13 -20
drivers/soc/fsl/qe/ucc_slow.c
··· 72 72 73 73 void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode) 74 74 { 75 - struct ucc_slow *us_regs; 75 + struct ucc_slow __iomem *us_regs; 76 76 u32 gumr_l; 77 77 78 78 us_regs = uccs->us_regs; ··· 93 93 94 94 void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode) 95 95 { 96 - struct ucc_slow *us_regs; 96 + struct ucc_slow __iomem *us_regs; 97 97 u32 gumr_l; 98 98 99 99 us_regs = uccs->us_regs; ··· 122 122 u32 i; 123 123 struct ucc_slow __iomem *us_regs; 124 124 u32 gumr; 125 - struct qe_bd *bd; 125 + struct qe_bd __iomem *bd; 126 126 u32 id; 127 127 u32 command; 128 128 int ret = 0; ··· 168 168 return -ENOMEM; 169 169 } 170 170 171 - uccs->saved_uccm = 0; 172 - uccs->p_rx_frame = 0; 173 171 us_regs = uccs->us_regs; 174 - uccs->p_ucce = (u16 *) & (us_regs->ucce); 175 - uccs->p_uccm = (u16 *) & (us_regs->uccm); 176 - #ifdef STATISTICS 177 - uccs->rx_frames = 0; 178 - uccs->tx_frames = 0; 179 - uccs->rx_discarded = 0; 180 - #endif /* STATISTICS */ 172 + uccs->p_ucce = &us_regs->ucce; 173 + uccs->p_uccm = &us_regs->uccm; 181 174 182 175 /* Get PRAM base */ 183 176 uccs->us_pram_offset = ··· 224 231 /* clear bd buffer */ 225 232 qe_iowrite32be(0, &bd->buf); 226 233 /* set bd status and length */ 227 - qe_iowrite32be(0, (u32 *)bd); 234 + qe_iowrite32be(0, (u32 __iomem *)bd); 228 235 bd++; 229 236 } 230 237 /* for last BD set Wrap bit */ 231 238 qe_iowrite32be(0, &bd->buf); 232 - qe_iowrite32be(cpu_to_be32(T_W), (u32 *)bd); 239 + qe_iowrite32be(T_W, (u32 __iomem *)bd); 233 240 234 241 /* Init Rx bds */ 235 242 bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset); 236 243 for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) { 237 244 /* set bd status and length */ 238 - qe_iowrite32be(0, (u32 *)bd); 245 + qe_iowrite32be(0, (u32 __iomem *)bd); 239 246 /* clear bd buffer */ 240 247 qe_iowrite32be(0, &bd->buf); 241 248 bd++; 242 249 } 243 250 /* for last BD set Wrap bit */ 244 - qe_iowrite32be(cpu_to_be32(R_W), (u32 *)bd); 251 + qe_iowrite32be(R_W, (u32 __iomem *)bd); 245 252 qe_iowrite32be(0, &bd->buf); 246 253 247 254 /* Set GUMR (For more details see the hardware spec.). */ ··· 266 273 qe_iowrite32be(gumr, &us_regs->gumr_h); 267 274 268 275 /* gumr_l */ 269 - gumr = us_info->tdcr | us_info->rdcr | us_info->tenc | us_info->renc | 270 - us_info->diag | us_info->mode; 276 + gumr = (u32)us_info->tdcr | (u32)us_info->rdcr | (u32)us_info->tenc | 277 + (u32)us_info->renc | (u32)us_info->diag | (u32)us_info->mode; 271 278 if (us_info->tci) 272 279 gumr |= UCC_SLOW_GUMR_L_TCI; 273 280 if (us_info->rinv) ··· 282 289 283 290 /* if the data is in cachable memory, the 'global' */ 284 291 /* in the function code should be set. */ 285 - uccs->us_pram->tbmr = UCC_BMR_BO_BE; 286 - uccs->us_pram->rbmr = UCC_BMR_BO_BE; 292 + qe_iowrite8(UCC_BMR_BO_BE, &uccs->us_pram->tbmr); 293 + qe_iowrite8(UCC_BMR_BO_BE, &uccs->us_pram->rbmr); 287 294 288 295 /* rbase, tbase are offsets from MURAM base */ 289 296 qe_iowrite16be(uccs->rx_base_offset, &uccs->us_pram->rbase);
+10 -1
drivers/soc/imx/Kconfig
··· 10 10 11 11 config IMX_SCU_SOC 12 12 bool "i.MX System Controller Unit SoC info support" 13 - depends on IMX_SCU || COMPILE_TEST 13 + depends on IMX_SCU 14 14 select SOC_BUS 15 15 help 16 16 If you say yes here you get support for the NXP i.MX System 17 17 Controller Unit SoC info module, it will provide the SoC info 18 18 like SoC family, ID and revision etc. 19 + 20 + config SOC_IMX8M 21 + bool "i.MX8M SoC family support" 22 + depends on ARCH_MXC || COMPILE_TEST 23 + default ARCH_MXC && ARM64 24 + help 25 + If you say yes here you get support for the NXP i.MX8M family 26 + support, it will provide the SoC info like SoC family, 27 + ID and revision etc. 19 28 20 29 endmenu
+1 -1
drivers/soc/imx/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 2 obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o 3 3 obj-$(CONFIG_IMX_GPCV2_PM_DOMAINS) += gpcv2.o 4 - obj-$(CONFIG_ARCH_MXC) += soc-imx8.o 4 + obj-$(CONFIG_SOC_IMX8M) += soc-imx8m.o 5 5 obj-$(CONFIG_IMX_SCU_SOC) += soc-imx-scu.o
+13 -11
drivers/soc/imx/gpc.c
··· 87 87 static int imx6_pm_domain_power_on(struct generic_pm_domain *genpd) 88 88 { 89 89 struct imx_pm_domain *pd = to_imx_pm_domain(genpd); 90 - int i, ret, sw, sw2iso; 91 - u32 val; 90 + int i, ret; 91 + u32 val, req; 92 92 93 93 if (pd->supply) { 94 94 ret = regulator_enable(pd->supply); ··· 107 107 regmap_update_bits(pd->regmap, pd->reg_offs + GPC_PGC_CTRL_OFFS, 108 108 0x1, 0x1); 109 109 110 - /* Read ISO and ISO2SW power up delays */ 111 - regmap_read(pd->regmap, pd->reg_offs + GPC_PGC_PUPSCR_OFFS, &val); 112 - sw = val & 0x3f; 113 - sw2iso = (val >> 8) & 0x3f; 114 - 115 110 /* Request GPC to power up domain */ 116 - val = BIT(pd->cntr_pdn_bit + 1); 117 - regmap_update_bits(pd->regmap, GPC_CNTR, val, val); 111 + req = BIT(pd->cntr_pdn_bit + 1); 112 + regmap_update_bits(pd->regmap, GPC_CNTR, req, req); 118 113 119 - /* Wait ISO + ISO2SW IPG clock cycles */ 120 - udelay(DIV_ROUND_UP(sw + sw2iso, pd->ipg_rate_mhz)); 114 + /* Wait for the PGC to handle the request */ 115 + ret = regmap_read_poll_timeout(pd->regmap, GPC_CNTR, val, !(val & req), 116 + 1, 50); 117 + if (ret) 118 + pr_err("powerup request on domain %s timed out\n", genpd->name); 119 + 120 + /* Wait for reset to propagate through peripherals */ 121 + usleep_range(5, 10); 121 122 122 123 /* Disable reset clocks for all devices in the domain */ 123 124 for (i = 0; i < pd->num_clks; i++) ··· 344 343 .rd_table = &access_table, 345 344 .wr_table = &access_table, 346 345 .max_register = 0x2ac, 346 + .fast_io = true, 347 347 }; 348 348 349 349 static struct generic_pm_domain *imx_gpc_onecell_domains[] = {
+1
drivers/soc/imx/gpcv2.c
··· 14 14 #include <linux/pm_domain.h> 15 15 #include <linux/regmap.h> 16 16 #include <linux/regulator/consumer.h> 17 + #include <linux/sizes.h> 17 18 #include <dt-bindings/power/imx7-power.h> 18 19 #include <dt-bindings/power/imx8mq-power.h> 19 20
drivers/soc/imx/soc-imx8.c drivers/soc/imx/soc-imx8m.c
+6 -1
drivers/soc/qcom/Kconfig
··· 76 76 requirements. This is typically used by the GPU, camera/video, and 77 77 audio components on some Snapdragon SoCs. 78 78 79 + config QCOM_PDR_HELPERS 80 + tristate 81 + select QCOM_QMI_HELPERS 82 + 79 83 config QCOM_PM 80 84 bool "Qualcomm Power Management" 81 85 depends on ARCH_QCOM && !ARM64 ··· 92 88 93 89 config QCOM_QMI_HELPERS 94 90 tristate 95 - depends on ARCH_QCOM || COMPILE_TEST 96 91 depends on NET 97 92 98 93 config QCOM_RMTFS_MEM ··· 200 197 tristate "Qualcomm APR Bus (Asynchronous Packet Router)" 201 198 depends on ARCH_QCOM || COMPILE_TEST 202 199 depends on RPMSG 200 + depends on NET 201 + select QCOM_PDR_HELPERS 203 202 help 204 203 Enable APR IPC protocol support between 205 204 application processor and QDSP6. APR is
+1
drivers/soc/qcom/Makefile
··· 7 7 obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o 8 8 obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o 9 9 obj-$(CONFIG_QCOM_OCMEM) += ocmem.o 10 + obj-$(CONFIG_QCOM_PDR_HELPERS) += pdr_interface.o 10 11 obj-$(CONFIG_QCOM_PM) += spm.o 11 12 obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi_helpers.o 12 13 qmi_helpers-y += qmi_encdec.o qmi_interface.o
+115 -10
drivers/soc/qcom/apr.c
··· 11 11 #include <linux/workqueue.h> 12 12 #include <linux/of_device.h> 13 13 #include <linux/soc/qcom/apr.h> 14 + #include <linux/soc/qcom/pdr.h> 14 15 #include <linux/rpmsg.h> 15 16 #include <linux/of.h> 16 17 ··· 22 21 spinlock_t rx_lock; 23 22 struct idr svcs_idr; 24 23 int dest_domain_id; 24 + struct pdr_handle *pdr; 25 25 struct workqueue_struct *rxwq; 26 26 struct work_struct rx_work; 27 27 struct list_head rx_list; ··· 291 289 id->svc_id + 1, GFP_ATOMIC); 292 290 spin_unlock(&apr->svcs_lock); 293 291 292 + of_property_read_string_index(np, "qcom,protection-domain", 293 + 1, &adev->service_path); 294 + 294 295 dev_info(dev, "Adding APR dev: %s\n", dev_name(&adev->dev)); 295 296 296 297 ret = device_register(&adev->dev); ··· 305 300 return ret; 306 301 } 307 302 308 - static void of_register_apr_devices(struct device *dev) 303 + static int of_apr_add_pd_lookups(struct device *dev) 304 + { 305 + const char *service_name, *service_path; 306 + struct apr *apr = dev_get_drvdata(dev); 307 + struct device_node *node; 308 + struct pdr_service *pds; 309 + int ret; 310 + 311 + for_each_child_of_node(dev->of_node, node) { 312 + ret = of_property_read_string_index(node, "qcom,protection-domain", 313 + 0, &service_name); 314 + if (ret < 0) 315 + continue; 316 + 317 + ret = of_property_read_string_index(node, "qcom,protection-domain", 318 + 1, &service_path); 319 + if (ret < 0) { 320 + dev_err(dev, "pdr service path missing: %d\n", ret); 321 + return ret; 322 + } 323 + 324 + pds = pdr_add_lookup(apr->pdr, service_name, service_path); 325 + if (IS_ERR(pds) && PTR_ERR(pds) != -EALREADY) { 326 + dev_err(dev, "pdr add lookup failed: %d\n", ret); 327 + return PTR_ERR(pds); 328 + } 329 + } 330 + 331 + return 0; 332 + } 333 + 334 + static void of_register_apr_devices(struct device *dev, const char *svc_path) 309 335 { 310 336 struct apr *apr = dev_get_drvdata(dev); 311 337 struct device_node *node; 338 + const char *service_path; 339 + int ret; 312 340 313 341 for_each_child_of_node(dev->of_node, node) { 314 342 struct apr_device_id id = { {0} }; 343 + 344 + /* 345 + * This function is called with svc_path NULL during 346 + * apr_probe(), in which case we register any apr devices 347 + * without a qcom,protection-domain specified. 348 + * 349 + * Then as the protection domains becomes available 350 + * (if applicable) this function is again called, but with 351 + * svc_path representing the service becoming available. In 352 + * this case we register any apr devices with a matching 353 + * qcom,protection-domain. 354 + */ 355 + 356 + ret = of_property_read_string_index(node, "qcom,protection-domain", 357 + 1, &service_path); 358 + if (svc_path) { 359 + /* skip APR services that are PD independent */ 360 + if (ret) 361 + continue; 362 + 363 + /* skip APR services whose PD paths don't match */ 364 + if (strcmp(service_path, svc_path)) 365 + continue; 366 + } else { 367 + /* skip APR services whose PD lookups are registered */ 368 + if (ret == 0) 369 + continue; 370 + } 315 371 316 372 if (of_property_read_u32(node, "reg", &id.svc_id)) 317 373 continue; ··· 381 315 382 316 if (apr_add_device(dev, node, &id)) 383 317 dev_err(dev, "Failed to add apr %d svc\n", id.svc_id); 318 + } 319 + } 320 + 321 + static int apr_remove_device(struct device *dev, void *svc_path) 322 + { 323 + struct apr_device *adev = to_apr_device(dev); 324 + 325 + if (svc_path && adev->service_path) { 326 + if (!strcmp(adev->service_path, (char *)svc_path)) 327 + device_unregister(&adev->dev); 328 + } else { 329 + device_unregister(&adev->dev); 330 + } 331 + 332 + return 0; 333 + } 334 + 335 + static void apr_pd_status(int state, char *svc_path, void *priv) 336 + { 337 + struct apr *apr = (struct apr *)priv; 338 + 339 + switch (state) { 340 + case SERVREG_SERVICE_STATE_UP: 341 + of_register_apr_devices(apr->dev, svc_path); 342 + break; 343 + case SERVREG_SERVICE_STATE_DOWN: 344 + device_for_each_child(apr->dev, svc_path, apr_remove_device); 345 + break; 384 346 } 385 347 } 386 348 ··· 437 343 return -ENOMEM; 438 344 } 439 345 INIT_WORK(&apr->rx_work, apr_rxwq); 346 + 347 + apr->pdr = pdr_handle_alloc(apr_pd_status, apr); 348 + if (IS_ERR(apr->pdr)) { 349 + dev_err(dev, "Failed to init PDR handle\n"); 350 + ret = PTR_ERR(apr->pdr); 351 + goto destroy_wq; 352 + } 353 + 440 354 INIT_LIST_HEAD(&apr->rx_list); 441 355 spin_lock_init(&apr->rx_lock); 442 356 spin_lock_init(&apr->svcs_lock); 443 357 idr_init(&apr->svcs_idr); 444 - of_register_apr_devices(dev); 358 + 359 + ret = of_apr_add_pd_lookups(dev); 360 + if (ret) 361 + goto handle_release; 362 + 363 + of_register_apr_devices(dev, NULL); 445 364 446 365 return 0; 447 - } 448 366 449 - static int apr_remove_device(struct device *dev, void *null) 450 - { 451 - struct apr_device *adev = to_apr_device(dev); 452 - 453 - device_unregister(&adev->dev); 454 - 455 - return 0; 367 + handle_release: 368 + pdr_handle_release(apr->pdr); 369 + destroy_wq: 370 + destroy_workqueue(apr->rxwq); 371 + return ret; 456 372 } 457 373 458 374 static void apr_remove(struct rpmsg_device *rpdev) 459 375 { 460 376 struct apr *apr = dev_get_drvdata(&rpdev->dev); 461 377 378 + pdr_handle_release(apr->pdr); 462 379 device_for_each_child(&rpdev->dev, NULL, apr_remove_device); 463 380 flush_workqueue(apr->rxwq); 464 381 destroy_workqueue(apr->rxwq);
+757
drivers/soc/qcom/pdr_interface.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (C) 2020 The Linux Foundation. All rights reserved. 4 + */ 5 + 6 + #include <linux/kernel.h> 7 + #include <linux/module.h> 8 + #include <linux/string.h> 9 + #include <linux/workqueue.h> 10 + 11 + #include "pdr_internal.h" 12 + 13 + struct pdr_service { 14 + char service_name[SERVREG_NAME_LENGTH + 1]; 15 + char service_path[SERVREG_NAME_LENGTH + 1]; 16 + 17 + struct sockaddr_qrtr addr; 18 + 19 + unsigned int instance; 20 + unsigned int service; 21 + u8 service_data_valid; 22 + u32 service_data; 23 + int state; 24 + 25 + bool need_notifier_register; 26 + bool need_notifier_remove; 27 + bool need_locator_lookup; 28 + bool service_connected; 29 + 30 + struct list_head node; 31 + }; 32 + 33 + struct pdr_handle { 34 + struct qmi_handle locator_hdl; 35 + struct qmi_handle notifier_hdl; 36 + 37 + struct sockaddr_qrtr locator_addr; 38 + 39 + struct list_head lookups; 40 + struct list_head indack_list; 41 + 42 + /* control access to pdr lookup/indack lists */ 43 + struct mutex list_lock; 44 + 45 + /* serialize pd status invocation */ 46 + struct mutex status_lock; 47 + 48 + /* control access to the locator state */ 49 + struct mutex lock; 50 + 51 + bool locator_init_complete; 52 + 53 + struct work_struct locator_work; 54 + struct work_struct notifier_work; 55 + struct work_struct indack_work; 56 + 57 + struct workqueue_struct *notifier_wq; 58 + struct workqueue_struct *indack_wq; 59 + 60 + void (*status)(int state, char *service_path, void *priv); 61 + void *priv; 62 + }; 63 + 64 + struct pdr_list_node { 65 + enum servreg_service_state curr_state; 66 + u16 transaction_id; 67 + struct pdr_service *pds; 68 + struct list_head node; 69 + }; 70 + 71 + static int pdr_locator_new_server(struct qmi_handle *qmi, 72 + struct qmi_service *svc) 73 + { 74 + struct pdr_handle *pdr = container_of(qmi, struct pdr_handle, 75 + locator_hdl); 76 + struct pdr_service *pds; 77 + 78 + /* Create a local client port for QMI communication */ 79 + pdr->locator_addr.sq_family = AF_QIPCRTR; 80 + pdr->locator_addr.sq_node = svc->node; 81 + pdr->locator_addr.sq_port = svc->port; 82 + 83 + mutex_lock(&pdr->lock); 84 + pdr->locator_init_complete = true; 85 + mutex_unlock(&pdr->lock); 86 + 87 + /* Service pending lookup requests */ 88 + mutex_lock(&pdr->list_lock); 89 + list_for_each_entry(pds, &pdr->lookups, node) { 90 + if (pds->need_locator_lookup) 91 + schedule_work(&pdr->locator_work); 92 + } 93 + mutex_unlock(&pdr->list_lock); 94 + 95 + return 0; 96 + } 97 + 98 + static void pdr_locator_del_server(struct qmi_handle *qmi, 99 + struct qmi_service *svc) 100 + { 101 + struct pdr_handle *pdr = container_of(qmi, struct pdr_handle, 102 + locator_hdl); 103 + 104 + mutex_lock(&pdr->lock); 105 + pdr->locator_init_complete = false; 106 + mutex_unlock(&pdr->lock); 107 + 108 + pdr->locator_addr.sq_node = 0; 109 + pdr->locator_addr.sq_port = 0; 110 + } 111 + 112 + static struct qmi_ops pdr_locator_ops = { 113 + .new_server = pdr_locator_new_server, 114 + .del_server = pdr_locator_del_server, 115 + }; 116 + 117 + static int pdr_register_listener(struct pdr_handle *pdr, 118 + struct pdr_service *pds, 119 + bool enable) 120 + { 121 + struct servreg_register_listener_resp resp; 122 + struct servreg_register_listener_req req; 123 + struct qmi_txn txn; 124 + int ret; 125 + 126 + ret = qmi_txn_init(&pdr->notifier_hdl, &txn, 127 + servreg_register_listener_resp_ei, 128 + &resp); 129 + if (ret < 0) 130 + return ret; 131 + 132 + req.enable = enable; 133 + strcpy(req.service_path, pds->service_path); 134 + 135 + ret = qmi_send_request(&pdr->notifier_hdl, &pds->addr, 136 + &txn, SERVREG_REGISTER_LISTENER_REQ, 137 + SERVREG_REGISTER_LISTENER_REQ_LEN, 138 + servreg_register_listener_req_ei, 139 + &req); 140 + if (ret < 0) { 141 + qmi_txn_cancel(&txn); 142 + return ret; 143 + } 144 + 145 + ret = qmi_txn_wait(&txn, 5 * HZ); 146 + if (ret < 0) { 147 + pr_err("PDR: %s register listener txn wait failed: %d\n", 148 + pds->service_path, ret); 149 + return ret; 150 + } 151 + 152 + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { 153 + pr_err("PDR: %s register listener failed: 0x%x\n", 154 + pds->service_path, resp.resp.error); 155 + return ret; 156 + } 157 + 158 + if ((int)resp.curr_state < INT_MIN || (int)resp.curr_state > INT_MAX) 159 + pr_err("PDR: %s notification state invalid: 0x%x\n", 160 + pds->service_path, resp.curr_state); 161 + 162 + pds->state = resp.curr_state; 163 + 164 + return 0; 165 + } 166 + 167 + static void pdr_notifier_work(struct work_struct *work) 168 + { 169 + struct pdr_handle *pdr = container_of(work, struct pdr_handle, 170 + notifier_work); 171 + struct pdr_service *pds; 172 + int ret; 173 + 174 + mutex_lock(&pdr->list_lock); 175 + list_for_each_entry(pds, &pdr->lookups, node) { 176 + if (pds->service_connected) { 177 + if (!pds->need_notifier_register) 178 + continue; 179 + 180 + pds->need_notifier_register = false; 181 + ret = pdr_register_listener(pdr, pds, true); 182 + if (ret < 0) 183 + pds->state = SERVREG_SERVICE_STATE_DOWN; 184 + } else { 185 + if (!pds->need_notifier_remove) 186 + continue; 187 + 188 + pds->need_notifier_remove = false; 189 + pds->state = SERVREG_SERVICE_STATE_DOWN; 190 + } 191 + 192 + mutex_lock(&pdr->status_lock); 193 + pdr->status(pds->state, pds->service_path, pdr->priv); 194 + mutex_unlock(&pdr->status_lock); 195 + } 196 + mutex_unlock(&pdr->list_lock); 197 + } 198 + 199 + static int pdr_notifier_new_server(struct qmi_handle *qmi, 200 + struct qmi_service *svc) 201 + { 202 + struct pdr_handle *pdr = container_of(qmi, struct pdr_handle, 203 + notifier_hdl); 204 + struct pdr_service *pds; 205 + 206 + mutex_lock(&pdr->list_lock); 207 + list_for_each_entry(pds, &pdr->lookups, node) { 208 + if (pds->service == svc->service && 209 + pds->instance == svc->instance) { 210 + pds->service_connected = true; 211 + pds->need_notifier_register = true; 212 + pds->addr.sq_family = AF_QIPCRTR; 213 + pds->addr.sq_node = svc->node; 214 + pds->addr.sq_port = svc->port; 215 + queue_work(pdr->notifier_wq, &pdr->notifier_work); 216 + } 217 + } 218 + mutex_unlock(&pdr->list_lock); 219 + 220 + return 0; 221 + } 222 + 223 + static void pdr_notifier_del_server(struct qmi_handle *qmi, 224 + struct qmi_service *svc) 225 + { 226 + struct pdr_handle *pdr = container_of(qmi, struct pdr_handle, 227 + notifier_hdl); 228 + struct pdr_service *pds; 229 + 230 + mutex_lock(&pdr->list_lock); 231 + list_for_each_entry(pds, &pdr->lookups, node) { 232 + if (pds->service == svc->service && 233 + pds->instance == svc->instance) { 234 + pds->service_connected = false; 235 + pds->need_notifier_remove = true; 236 + pds->addr.sq_node = 0; 237 + pds->addr.sq_port = 0; 238 + queue_work(pdr->notifier_wq, &pdr->notifier_work); 239 + } 240 + } 241 + mutex_unlock(&pdr->list_lock); 242 + } 243 + 244 + static struct qmi_ops pdr_notifier_ops = { 245 + .new_server = pdr_notifier_new_server, 246 + .del_server = pdr_notifier_del_server, 247 + }; 248 + 249 + static int pdr_send_indack_msg(struct pdr_handle *pdr, struct pdr_service *pds, 250 + u16 tid) 251 + { 252 + struct servreg_set_ack_resp resp; 253 + struct servreg_set_ack_req req; 254 + struct qmi_txn txn; 255 + int ret; 256 + 257 + ret = qmi_txn_init(&pdr->notifier_hdl, &txn, servreg_set_ack_resp_ei, 258 + &resp); 259 + if (ret < 0) 260 + return ret; 261 + 262 + req.transaction_id = tid; 263 + strcpy(req.service_path, pds->service_path); 264 + 265 + ret = qmi_send_request(&pdr->notifier_hdl, &pds->addr, 266 + &txn, SERVREG_SET_ACK_REQ, 267 + SERVREG_SET_ACK_REQ_LEN, 268 + servreg_set_ack_req_ei, 269 + &req); 270 + 271 + /* Skip waiting for response */ 272 + qmi_txn_cancel(&txn); 273 + return ret; 274 + } 275 + 276 + static void pdr_indack_work(struct work_struct *work) 277 + { 278 + struct pdr_handle *pdr = container_of(work, struct pdr_handle, 279 + indack_work); 280 + struct pdr_list_node *ind, *tmp; 281 + struct pdr_service *pds; 282 + 283 + list_for_each_entry_safe(ind, tmp, &pdr->indack_list, node) { 284 + pds = ind->pds; 285 + pdr_send_indack_msg(pdr, pds, ind->transaction_id); 286 + 287 + mutex_lock(&pdr->status_lock); 288 + pds->state = ind->curr_state; 289 + pdr->status(pds->state, pds->service_path, pdr->priv); 290 + mutex_unlock(&pdr->status_lock); 291 + 292 + mutex_lock(&pdr->list_lock); 293 + list_del(&ind->node); 294 + mutex_unlock(&pdr->list_lock); 295 + 296 + kfree(ind); 297 + } 298 + } 299 + 300 + static void pdr_indication_cb(struct qmi_handle *qmi, 301 + struct sockaddr_qrtr *sq, 302 + struct qmi_txn *txn, const void *data) 303 + { 304 + struct pdr_handle *pdr = container_of(qmi, struct pdr_handle, 305 + notifier_hdl); 306 + const struct servreg_state_updated_ind *ind_msg = data; 307 + struct pdr_list_node *ind; 308 + struct pdr_service *pds; 309 + bool found = false; 310 + 311 + if (!ind_msg || !ind_msg->service_path[0] || 312 + strlen(ind_msg->service_path) > SERVREG_NAME_LENGTH) 313 + return; 314 + 315 + mutex_lock(&pdr->list_lock); 316 + list_for_each_entry(pds, &pdr->lookups, node) { 317 + if (strcmp(pds->service_path, ind_msg->service_path)) 318 + continue; 319 + 320 + found = true; 321 + break; 322 + } 323 + mutex_unlock(&pdr->list_lock); 324 + 325 + if (!found) 326 + return; 327 + 328 + pr_info("PDR: Indication received from %s, state: 0x%x, trans-id: %d\n", 329 + ind_msg->service_path, ind_msg->curr_state, 330 + ind_msg->transaction_id); 331 + 332 + ind = kzalloc(sizeof(*ind), GFP_KERNEL); 333 + if (!ind) 334 + return; 335 + 336 + ind->transaction_id = ind_msg->transaction_id; 337 + ind->curr_state = ind_msg->curr_state; 338 + ind->pds = pds; 339 + 340 + mutex_lock(&pdr->list_lock); 341 + list_add_tail(&ind->node, &pdr->indack_list); 342 + mutex_unlock(&pdr->list_lock); 343 + 344 + queue_work(pdr->indack_wq, &pdr->indack_work); 345 + } 346 + 347 + static struct qmi_msg_handler qmi_indication_handler[] = { 348 + { 349 + .type = QMI_INDICATION, 350 + .msg_id = SERVREG_STATE_UPDATED_IND_ID, 351 + .ei = servreg_state_updated_ind_ei, 352 + .decoded_size = sizeof(struct servreg_state_updated_ind), 353 + .fn = pdr_indication_cb, 354 + }, 355 + {} 356 + }; 357 + 358 + static int pdr_get_domain_list(struct servreg_get_domain_list_req *req, 359 + struct servreg_get_domain_list_resp *resp, 360 + struct pdr_handle *pdr) 361 + { 362 + struct qmi_txn txn; 363 + int ret; 364 + 365 + ret = qmi_txn_init(&pdr->locator_hdl, &txn, 366 + servreg_get_domain_list_resp_ei, resp); 367 + if (ret < 0) 368 + return ret; 369 + 370 + ret = qmi_send_request(&pdr->locator_hdl, 371 + &pdr->locator_addr, 372 + &txn, SERVREG_GET_DOMAIN_LIST_REQ, 373 + SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN, 374 + servreg_get_domain_list_req_ei, 375 + req); 376 + if (ret < 0) { 377 + qmi_txn_cancel(&txn); 378 + return ret; 379 + } 380 + 381 + ret = qmi_txn_wait(&txn, 5 * HZ); 382 + if (ret < 0) { 383 + pr_err("PDR: %s get domain list txn wait failed: %d\n", 384 + req->service_name, ret); 385 + return ret; 386 + } 387 + 388 + if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { 389 + pr_err("PDR: %s get domain list failed: 0x%x\n", 390 + req->service_name, resp->resp.error); 391 + return -EREMOTEIO; 392 + } 393 + 394 + return 0; 395 + } 396 + 397 + static int pdr_locate_service(struct pdr_handle *pdr, struct pdr_service *pds) 398 + { 399 + struct servreg_get_domain_list_resp *resp; 400 + struct servreg_get_domain_list_req req; 401 + struct servreg_location_entry *entry; 402 + int domains_read = 0; 403 + int ret, i; 404 + 405 + resp = kzalloc(sizeof(*resp), GFP_KERNEL); 406 + if (!resp) 407 + return -ENOMEM; 408 + 409 + /* Prepare req message */ 410 + strcpy(req.service_name, pds->service_name); 411 + req.domain_offset_valid = true; 412 + req.domain_offset = 0; 413 + 414 + do { 415 + req.domain_offset = domains_read; 416 + ret = pdr_get_domain_list(&req, resp, pdr); 417 + if (ret < 0) 418 + goto out; 419 + 420 + for (i = domains_read; i < resp->domain_list_len; i++) { 421 + entry = &resp->domain_list[i]; 422 + 423 + if (strnlen(entry->name, sizeof(entry->name)) == sizeof(entry->name)) 424 + continue; 425 + 426 + if (!strcmp(entry->name, pds->service_path)) { 427 + pds->service_data_valid = entry->service_data_valid; 428 + pds->service_data = entry->service_data; 429 + pds->instance = entry->instance; 430 + goto out; 431 + } 432 + } 433 + 434 + /* Update ret to indicate that the service is not yet found */ 435 + ret = -ENXIO; 436 + 437 + /* Always read total_domains from the response msg */ 438 + if (resp->domain_list_len > resp->total_domains) 439 + resp->domain_list_len = resp->total_domains; 440 + 441 + domains_read += resp->domain_list_len; 442 + } while (domains_read < resp->total_domains); 443 + out: 444 + kfree(resp); 445 + return ret; 446 + } 447 + 448 + static void pdr_notify_lookup_failure(struct pdr_handle *pdr, 449 + struct pdr_service *pds, 450 + int err) 451 + { 452 + pr_err("PDR: service lookup for %s failed: %d\n", 453 + pds->service_name, err); 454 + 455 + if (err == -ENXIO) 456 + return; 457 + 458 + list_del(&pds->node); 459 + pds->state = SERVREG_LOCATOR_ERR; 460 + mutex_lock(&pdr->status_lock); 461 + pdr->status(pds->state, pds->service_path, pdr->priv); 462 + mutex_unlock(&pdr->status_lock); 463 + kfree(pds); 464 + } 465 + 466 + static void pdr_locator_work(struct work_struct *work) 467 + { 468 + struct pdr_handle *pdr = container_of(work, struct pdr_handle, 469 + locator_work); 470 + struct pdr_service *pds, *tmp; 471 + int ret = 0; 472 + 473 + /* Bail out early if the SERVREG LOCATOR QMI service is not up */ 474 + mutex_lock(&pdr->lock); 475 + if (!pdr->locator_init_complete) { 476 + mutex_unlock(&pdr->lock); 477 + pr_debug("PDR: SERVICE LOCATOR service not available\n"); 478 + return; 479 + } 480 + mutex_unlock(&pdr->lock); 481 + 482 + mutex_lock(&pdr->list_lock); 483 + list_for_each_entry_safe(pds, tmp, &pdr->lookups, node) { 484 + if (!pds->need_locator_lookup) 485 + continue; 486 + 487 + ret = pdr_locate_service(pdr, pds); 488 + if (ret < 0) { 489 + pdr_notify_lookup_failure(pdr, pds, ret); 490 + continue; 491 + } 492 + 493 + ret = qmi_add_lookup(&pdr->notifier_hdl, pds->service, 1, 494 + pds->instance); 495 + if (ret < 0) { 496 + pdr_notify_lookup_failure(pdr, pds, ret); 497 + continue; 498 + } 499 + 500 + pds->need_locator_lookup = false; 501 + } 502 + mutex_unlock(&pdr->list_lock); 503 + } 504 + 505 + /** 506 + * pdr_add_lookup() - register a tracking request for a PD 507 + * @pdr: PDR client handle 508 + * @service_name: service name of the tracking request 509 + * @service_path: service path of the tracking request 510 + * 511 + * Registering a pdr lookup allows for tracking the life cycle of the PD. 512 + * 513 + * Return: pdr_service object on success, ERR_PTR on failure. -EALREADY is 514 + * returned if a lookup is already in progress for the given service path. 515 + */ 516 + struct pdr_service *pdr_add_lookup(struct pdr_handle *pdr, 517 + const char *service_name, 518 + const char *service_path) 519 + { 520 + struct pdr_service *pds, *tmp; 521 + int ret; 522 + 523 + if (IS_ERR_OR_NULL(pdr)) 524 + return ERR_PTR(-EINVAL); 525 + 526 + if (!service_name || strlen(service_name) > SERVREG_NAME_LENGTH || 527 + !service_path || strlen(service_path) > SERVREG_NAME_LENGTH) 528 + return ERR_PTR(-EINVAL); 529 + 530 + pds = kzalloc(sizeof(*pds), GFP_KERNEL); 531 + if (!pds) 532 + return ERR_PTR(-ENOMEM); 533 + 534 + pds->service = SERVREG_NOTIFIER_SERVICE; 535 + strcpy(pds->service_name, service_name); 536 + strcpy(pds->service_path, service_path); 537 + pds->need_locator_lookup = true; 538 + 539 + mutex_lock(&pdr->list_lock); 540 + list_for_each_entry(tmp, &pdr->lookups, node) { 541 + if (strcmp(tmp->service_path, service_path)) 542 + continue; 543 + 544 + mutex_unlock(&pdr->list_lock); 545 + ret = -EALREADY; 546 + goto err; 547 + } 548 + 549 + list_add(&pds->node, &pdr->lookups); 550 + mutex_unlock(&pdr->list_lock); 551 + 552 + schedule_work(&pdr->locator_work); 553 + 554 + return pds; 555 + err: 556 + kfree(pds); 557 + return ERR_PTR(ret); 558 + } 559 + EXPORT_SYMBOL(pdr_add_lookup); 560 + 561 + /** 562 + * pdr_restart_pd() - restart PD 563 + * @pdr: PDR client handle 564 + * @pds: PD service handle 565 + * 566 + * Restarts the PD tracked by the PDR client handle for a given service path. 567 + * 568 + * Return: 0 on success, negative errno on failure. 569 + */ 570 + int pdr_restart_pd(struct pdr_handle *pdr, struct pdr_service *pds) 571 + { 572 + struct servreg_restart_pd_resp resp; 573 + struct servreg_restart_pd_req req; 574 + struct sockaddr_qrtr addr; 575 + struct pdr_service *tmp; 576 + struct qmi_txn txn; 577 + int ret; 578 + 579 + if (IS_ERR_OR_NULL(pdr) || IS_ERR_OR_NULL(pds)) 580 + return -EINVAL; 581 + 582 + mutex_lock(&pdr->list_lock); 583 + list_for_each_entry(tmp, &pdr->lookups, node) { 584 + if (tmp != pds) 585 + continue; 586 + 587 + if (!pds->service_connected) 588 + break; 589 + 590 + /* Prepare req message */ 591 + strcpy(req.service_path, pds->service_path); 592 + addr = pds->addr; 593 + break; 594 + } 595 + mutex_unlock(&pdr->list_lock); 596 + 597 + if (!req.service_path[0]) 598 + return -EINVAL; 599 + 600 + ret = qmi_txn_init(&pdr->notifier_hdl, &txn, 601 + servreg_restart_pd_resp_ei, 602 + &resp); 603 + if (ret < 0) 604 + return ret; 605 + 606 + ret = qmi_send_request(&pdr->notifier_hdl, &addr, 607 + &txn, SERVREG_RESTART_PD_REQ, 608 + SERVREG_RESTART_PD_REQ_MAX_LEN, 609 + servreg_restart_pd_req_ei, &req); 610 + if (ret < 0) { 611 + qmi_txn_cancel(&txn); 612 + return ret; 613 + } 614 + 615 + ret = qmi_txn_wait(&txn, 5 * HZ); 616 + if (ret < 0) { 617 + pr_err("PDR: %s PD restart txn wait failed: %d\n", 618 + req.service_path, ret); 619 + return ret; 620 + } 621 + 622 + /* Check response if PDR is disabled */ 623 + if (resp.resp.result == QMI_RESULT_FAILURE_V01 && 624 + resp.resp.error == QMI_ERR_DISABLED_V01) { 625 + pr_err("PDR: %s PD restart is disabled: 0x%x\n", 626 + req.service_path, resp.resp.error); 627 + return -EOPNOTSUPP; 628 + } 629 + 630 + /* Check the response for other error case*/ 631 + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { 632 + pr_err("PDR: %s request for PD restart failed: 0x%x\n", 633 + req.service_path, resp.resp.error); 634 + return -EREMOTEIO; 635 + } 636 + 637 + return 0; 638 + } 639 + EXPORT_SYMBOL(pdr_restart_pd); 640 + 641 + /** 642 + * pdr_handle_alloc() - initialize the PDR client handle 643 + * @status: function to be called on PD state change 644 + * @priv: handle for client's use 645 + * 646 + * Initializes the PDR client handle to allow for tracking/restart of PDs. 647 + * 648 + * Return: pdr_handle object on success, ERR_PTR on failure. 649 + */ 650 + struct pdr_handle *pdr_handle_alloc(void (*status)(int state, 651 + char *service_path, 652 + void *priv), void *priv) 653 + { 654 + struct pdr_handle *pdr; 655 + int ret; 656 + 657 + if (!status) 658 + return ERR_PTR(-EINVAL); 659 + 660 + pdr = kzalloc(sizeof(*pdr), GFP_KERNEL); 661 + if (!pdr) 662 + return ERR_PTR(-ENOMEM); 663 + 664 + pdr->status = status; 665 + pdr->priv = priv; 666 + 667 + mutex_init(&pdr->status_lock); 668 + mutex_init(&pdr->list_lock); 669 + mutex_init(&pdr->lock); 670 + 671 + INIT_LIST_HEAD(&pdr->lookups); 672 + INIT_LIST_HEAD(&pdr->indack_list); 673 + 674 + INIT_WORK(&pdr->locator_work, pdr_locator_work); 675 + INIT_WORK(&pdr->notifier_work, pdr_notifier_work); 676 + INIT_WORK(&pdr->indack_work, pdr_indack_work); 677 + 678 + pdr->notifier_wq = create_singlethread_workqueue("pdr_notifier_wq"); 679 + if (!pdr->notifier_wq) { 680 + ret = -ENOMEM; 681 + goto free_pdr_handle; 682 + } 683 + 684 + pdr->indack_wq = alloc_ordered_workqueue("pdr_indack_wq", WQ_HIGHPRI); 685 + if (!pdr->indack_wq) { 686 + ret = -ENOMEM; 687 + goto destroy_notifier; 688 + } 689 + 690 + ret = qmi_handle_init(&pdr->locator_hdl, 691 + SERVREG_GET_DOMAIN_LIST_RESP_MAX_LEN, 692 + &pdr_locator_ops, NULL); 693 + if (ret < 0) 694 + goto destroy_indack; 695 + 696 + ret = qmi_add_lookup(&pdr->locator_hdl, SERVREG_LOCATOR_SERVICE, 1, 1); 697 + if (ret < 0) 698 + goto release_qmi_handle; 699 + 700 + ret = qmi_handle_init(&pdr->notifier_hdl, 701 + SERVREG_STATE_UPDATED_IND_MAX_LEN, 702 + &pdr_notifier_ops, 703 + qmi_indication_handler); 704 + if (ret < 0) 705 + goto release_qmi_handle; 706 + 707 + return pdr; 708 + 709 + release_qmi_handle: 710 + qmi_handle_release(&pdr->locator_hdl); 711 + destroy_indack: 712 + destroy_workqueue(pdr->indack_wq); 713 + destroy_notifier: 714 + destroy_workqueue(pdr->notifier_wq); 715 + free_pdr_handle: 716 + kfree(pdr); 717 + 718 + return ERR_PTR(ret); 719 + } 720 + EXPORT_SYMBOL(pdr_handle_alloc); 721 + 722 + /** 723 + * pdr_handle_release() - release the PDR client handle 724 + * @pdr: PDR client handle 725 + * 726 + * Cleans up pending tracking requests and releases the underlying qmi handles. 727 + */ 728 + void pdr_handle_release(struct pdr_handle *pdr) 729 + { 730 + struct pdr_service *pds, *tmp; 731 + 732 + if (IS_ERR_OR_NULL(pdr)) 733 + return; 734 + 735 + mutex_lock(&pdr->list_lock); 736 + list_for_each_entry_safe(pds, tmp, &pdr->lookups, node) { 737 + list_del(&pds->node); 738 + kfree(pds); 739 + } 740 + mutex_unlock(&pdr->list_lock); 741 + 742 + cancel_work_sync(&pdr->locator_work); 743 + cancel_work_sync(&pdr->notifier_work); 744 + cancel_work_sync(&pdr->indack_work); 745 + 746 + destroy_workqueue(pdr->notifier_wq); 747 + destroy_workqueue(pdr->indack_wq); 748 + 749 + qmi_handle_release(&pdr->locator_hdl); 750 + qmi_handle_release(&pdr->notifier_hdl); 751 + 752 + kfree(pdr); 753 + } 754 + EXPORT_SYMBOL(pdr_handle_release); 755 + 756 + MODULE_LICENSE("GPL v2"); 757 + MODULE_DESCRIPTION("Qualcomm Protection Domain Restart helpers");
+379
drivers/soc/qcom/pdr_internal.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __QCOM_PDR_HELPER_INTERNAL__ 3 + #define __QCOM_PDR_HELPER_INTERNAL__ 4 + 5 + #include <linux/soc/qcom/pdr.h> 6 + 7 + #define SERVREG_LOCATOR_SERVICE 0x40 8 + #define SERVREG_NOTIFIER_SERVICE 0x42 9 + 10 + #define SERVREG_REGISTER_LISTENER_REQ 0x20 11 + #define SERVREG_GET_DOMAIN_LIST_REQ 0x21 12 + #define SERVREG_STATE_UPDATED_IND_ID 0x22 13 + #define SERVREG_SET_ACK_REQ 0x23 14 + #define SERVREG_RESTART_PD_REQ 0x24 15 + 16 + #define SERVREG_DOMAIN_LIST_LENGTH 32 17 + #define SERVREG_RESTART_PD_REQ_MAX_LEN 67 18 + #define SERVREG_REGISTER_LISTENER_REQ_LEN 71 19 + #define SERVREG_SET_ACK_REQ_LEN 72 20 + #define SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN 74 21 + #define SERVREG_STATE_UPDATED_IND_MAX_LEN 79 22 + #define SERVREG_GET_DOMAIN_LIST_RESP_MAX_LEN 2389 23 + 24 + struct servreg_location_entry { 25 + char name[SERVREG_NAME_LENGTH + 1]; 26 + u8 service_data_valid; 27 + u32 service_data; 28 + u32 instance; 29 + }; 30 + 31 + struct qmi_elem_info servreg_location_entry_ei[] = { 32 + { 33 + .data_type = QMI_STRING, 34 + .elem_len = SERVREG_NAME_LENGTH + 1, 35 + .elem_size = sizeof(char), 36 + .array_type = NO_ARRAY, 37 + .tlv_type = 0, 38 + .offset = offsetof(struct servreg_location_entry, 39 + name), 40 + }, 41 + { 42 + .data_type = QMI_UNSIGNED_4_BYTE, 43 + .elem_len = 1, 44 + .elem_size = sizeof(u32), 45 + .array_type = NO_ARRAY, 46 + .tlv_type = 0, 47 + .offset = offsetof(struct servreg_location_entry, 48 + instance), 49 + }, 50 + { 51 + .data_type = QMI_UNSIGNED_1_BYTE, 52 + .elem_len = 1, 53 + .elem_size = sizeof(u8), 54 + .array_type = NO_ARRAY, 55 + .tlv_type = 0, 56 + .offset = offsetof(struct servreg_location_entry, 57 + service_data_valid), 58 + }, 59 + { 60 + .data_type = QMI_UNSIGNED_4_BYTE, 61 + .elem_len = 1, 62 + .elem_size = sizeof(u32), 63 + .array_type = NO_ARRAY, 64 + .tlv_type = 0, 65 + .offset = offsetof(struct servreg_location_entry, 66 + service_data), 67 + }, 68 + {} 69 + }; 70 + 71 + struct servreg_get_domain_list_req { 72 + char service_name[SERVREG_NAME_LENGTH + 1]; 73 + u8 domain_offset_valid; 74 + u32 domain_offset; 75 + }; 76 + 77 + struct qmi_elem_info servreg_get_domain_list_req_ei[] = { 78 + { 79 + .data_type = QMI_STRING, 80 + .elem_len = SERVREG_NAME_LENGTH + 1, 81 + .elem_size = sizeof(char), 82 + .array_type = NO_ARRAY, 83 + .tlv_type = 0x01, 84 + .offset = offsetof(struct servreg_get_domain_list_req, 85 + service_name), 86 + }, 87 + { 88 + .data_type = QMI_OPT_FLAG, 89 + .elem_len = 1, 90 + .elem_size = sizeof(u8), 91 + .array_type = NO_ARRAY, 92 + .tlv_type = 0x10, 93 + .offset = offsetof(struct servreg_get_domain_list_req, 94 + domain_offset_valid), 95 + }, 96 + { 97 + .data_type = QMI_UNSIGNED_4_BYTE, 98 + .elem_len = 1, 99 + .elem_size = sizeof(u32), 100 + .array_type = NO_ARRAY, 101 + .tlv_type = 0x10, 102 + .offset = offsetof(struct servreg_get_domain_list_req, 103 + domain_offset), 104 + }, 105 + {} 106 + }; 107 + 108 + struct servreg_get_domain_list_resp { 109 + struct qmi_response_type_v01 resp; 110 + u8 total_domains_valid; 111 + u16 total_domains; 112 + u8 db_rev_count_valid; 113 + u16 db_rev_count; 114 + u8 domain_list_valid; 115 + u32 domain_list_len; 116 + struct servreg_location_entry domain_list[SERVREG_DOMAIN_LIST_LENGTH]; 117 + }; 118 + 119 + struct qmi_elem_info servreg_get_domain_list_resp_ei[] = { 120 + { 121 + .data_type = QMI_STRUCT, 122 + .elem_len = 1, 123 + .elem_size = sizeof(struct qmi_response_type_v01), 124 + .array_type = NO_ARRAY, 125 + .tlv_type = 0x02, 126 + .offset = offsetof(struct servreg_get_domain_list_resp, 127 + resp), 128 + .ei_array = qmi_response_type_v01_ei, 129 + }, 130 + { 131 + .data_type = QMI_OPT_FLAG, 132 + .elem_len = 1, 133 + .elem_size = sizeof(u8), 134 + .array_type = NO_ARRAY, 135 + .tlv_type = 0x10, 136 + .offset = offsetof(struct servreg_get_domain_list_resp, 137 + total_domains_valid), 138 + }, 139 + { 140 + .data_type = QMI_UNSIGNED_2_BYTE, 141 + .elem_len = 1, 142 + .elem_size = sizeof(u16), 143 + .array_type = NO_ARRAY, 144 + .tlv_type = 0x10, 145 + .offset = offsetof(struct servreg_get_domain_list_resp, 146 + total_domains), 147 + }, 148 + { 149 + .data_type = QMI_OPT_FLAG, 150 + .elem_len = 1, 151 + .elem_size = sizeof(u8), 152 + .array_type = NO_ARRAY, 153 + .tlv_type = 0x11, 154 + .offset = offsetof(struct servreg_get_domain_list_resp, 155 + db_rev_count_valid), 156 + }, 157 + { 158 + .data_type = QMI_UNSIGNED_2_BYTE, 159 + .elem_len = 1, 160 + .elem_size = sizeof(u16), 161 + .array_type = NO_ARRAY, 162 + .tlv_type = 0x11, 163 + .offset = offsetof(struct servreg_get_domain_list_resp, 164 + db_rev_count), 165 + }, 166 + { 167 + .data_type = QMI_OPT_FLAG, 168 + .elem_len = 1, 169 + .elem_size = sizeof(u8), 170 + .array_type = NO_ARRAY, 171 + .tlv_type = 0x12, 172 + .offset = offsetof(struct servreg_get_domain_list_resp, 173 + domain_list_valid), 174 + }, 175 + { 176 + .data_type = QMI_DATA_LEN, 177 + .elem_len = 1, 178 + .elem_size = sizeof(u8), 179 + .array_type = NO_ARRAY, 180 + .tlv_type = 0x12, 181 + .offset = offsetof(struct servreg_get_domain_list_resp, 182 + domain_list_len), 183 + }, 184 + { 185 + .data_type = QMI_STRUCT, 186 + .elem_len = SERVREG_DOMAIN_LIST_LENGTH, 187 + .elem_size = sizeof(struct servreg_location_entry), 188 + .array_type = NO_ARRAY, 189 + .tlv_type = 0x12, 190 + .offset = offsetof(struct servreg_get_domain_list_resp, 191 + domain_list), 192 + .ei_array = servreg_location_entry_ei, 193 + }, 194 + {} 195 + }; 196 + 197 + struct servreg_register_listener_req { 198 + u8 enable; 199 + char service_path[SERVREG_NAME_LENGTH + 1]; 200 + }; 201 + 202 + struct qmi_elem_info servreg_register_listener_req_ei[] = { 203 + { 204 + .data_type = QMI_UNSIGNED_1_BYTE, 205 + .elem_len = 1, 206 + .elem_size = sizeof(u8), 207 + .array_type = NO_ARRAY, 208 + .tlv_type = 0x01, 209 + .offset = offsetof(struct servreg_register_listener_req, 210 + enable), 211 + }, 212 + { 213 + .data_type = QMI_STRING, 214 + .elem_len = SERVREG_NAME_LENGTH + 1, 215 + .elem_size = sizeof(char), 216 + .array_type = NO_ARRAY, 217 + .tlv_type = 0x02, 218 + .offset = offsetof(struct servreg_register_listener_req, 219 + service_path), 220 + }, 221 + {} 222 + }; 223 + 224 + struct servreg_register_listener_resp { 225 + struct qmi_response_type_v01 resp; 226 + u8 curr_state_valid; 227 + enum servreg_service_state curr_state; 228 + }; 229 + 230 + struct qmi_elem_info servreg_register_listener_resp_ei[] = { 231 + { 232 + .data_type = QMI_STRUCT, 233 + .elem_len = 1, 234 + .elem_size = sizeof(struct qmi_response_type_v01), 235 + .array_type = NO_ARRAY, 236 + .tlv_type = 0x02, 237 + .offset = offsetof(struct servreg_register_listener_resp, 238 + resp), 239 + .ei_array = qmi_response_type_v01_ei, 240 + }, 241 + { 242 + .data_type = QMI_OPT_FLAG, 243 + .elem_len = 1, 244 + .elem_size = sizeof(u8), 245 + .array_type = NO_ARRAY, 246 + .tlv_type = 0x10, 247 + .offset = offsetof(struct servreg_register_listener_resp, 248 + curr_state_valid), 249 + }, 250 + { 251 + .data_type = QMI_SIGNED_4_BYTE_ENUM, 252 + .elem_len = 1, 253 + .elem_size = sizeof(enum servreg_service_state), 254 + .array_type = NO_ARRAY, 255 + .tlv_type = 0x10, 256 + .offset = offsetof(struct servreg_register_listener_resp, 257 + curr_state), 258 + }, 259 + {} 260 + }; 261 + 262 + struct servreg_restart_pd_req { 263 + char service_path[SERVREG_NAME_LENGTH + 1]; 264 + }; 265 + 266 + struct qmi_elem_info servreg_restart_pd_req_ei[] = { 267 + { 268 + .data_type = QMI_STRING, 269 + .elem_len = SERVREG_NAME_LENGTH + 1, 270 + .elem_size = sizeof(char), 271 + .array_type = NO_ARRAY, 272 + .tlv_type = 0x01, 273 + .offset = offsetof(struct servreg_restart_pd_req, 274 + service_path), 275 + }, 276 + {} 277 + }; 278 + 279 + struct servreg_restart_pd_resp { 280 + struct qmi_response_type_v01 resp; 281 + }; 282 + 283 + struct qmi_elem_info servreg_restart_pd_resp_ei[] = { 284 + { 285 + .data_type = QMI_STRUCT, 286 + .elem_len = 1, 287 + .elem_size = sizeof(struct qmi_response_type_v01), 288 + .array_type = NO_ARRAY, 289 + .tlv_type = 0x02, 290 + .offset = offsetof(struct servreg_restart_pd_resp, 291 + resp), 292 + .ei_array = qmi_response_type_v01_ei, 293 + }, 294 + {} 295 + }; 296 + 297 + struct servreg_state_updated_ind { 298 + enum servreg_service_state curr_state; 299 + char service_path[SERVREG_NAME_LENGTH + 1]; 300 + u16 transaction_id; 301 + }; 302 + 303 + struct qmi_elem_info servreg_state_updated_ind_ei[] = { 304 + { 305 + .data_type = QMI_SIGNED_4_BYTE_ENUM, 306 + .elem_len = 1, 307 + .elem_size = sizeof(u32), 308 + .array_type = NO_ARRAY, 309 + .tlv_type = 0x01, 310 + .offset = offsetof(struct servreg_state_updated_ind, 311 + curr_state), 312 + }, 313 + { 314 + .data_type = QMI_STRING, 315 + .elem_len = SERVREG_NAME_LENGTH + 1, 316 + .elem_size = sizeof(char), 317 + .array_type = NO_ARRAY, 318 + .tlv_type = 0x02, 319 + .offset = offsetof(struct servreg_state_updated_ind, 320 + service_path), 321 + }, 322 + { 323 + .data_type = QMI_UNSIGNED_2_BYTE, 324 + .elem_len = 1, 325 + .elem_size = sizeof(u16), 326 + .array_type = NO_ARRAY, 327 + .tlv_type = 0x03, 328 + .offset = offsetof(struct servreg_state_updated_ind, 329 + transaction_id), 330 + }, 331 + {} 332 + }; 333 + 334 + struct servreg_set_ack_req { 335 + char service_path[SERVREG_NAME_LENGTH + 1]; 336 + u16 transaction_id; 337 + }; 338 + 339 + struct qmi_elem_info servreg_set_ack_req_ei[] = { 340 + { 341 + .data_type = QMI_STRING, 342 + .elem_len = SERVREG_NAME_LENGTH + 1, 343 + .elem_size = sizeof(char), 344 + .array_type = NO_ARRAY, 345 + .tlv_type = 0x01, 346 + .offset = offsetof(struct servreg_set_ack_req, 347 + service_path), 348 + }, 349 + { 350 + .data_type = QMI_UNSIGNED_2_BYTE, 351 + .elem_len = 1, 352 + .elem_size = sizeof(u16), 353 + .array_type = NO_ARRAY, 354 + .tlv_type = 0x02, 355 + .offset = offsetof(struct servreg_set_ack_req, 356 + transaction_id), 357 + }, 358 + {} 359 + }; 360 + 361 + struct servreg_set_ack_resp { 362 + struct qmi_response_type_v01 resp; 363 + }; 364 + 365 + struct qmi_elem_info servreg_set_ack_resp_ei[] = { 366 + { 367 + .data_type = QMI_STRUCT, 368 + .elem_len = 1, 369 + .elem_size = sizeof(struct qmi_response_type_v01), 370 + .array_type = NO_ARRAY, 371 + .tlv_type = 0x02, 372 + .offset = offsetof(struct servreg_set_ack_resp, 373 + resp), 374 + .ei_array = qmi_response_type_v01_ei, 375 + }, 376 + {} 377 + }; 378 + 379 + #endif
+5 -1
drivers/soc/qcom/qcom_aoss.c
··· 200 200 { 201 201 struct qmp *qmp = data; 202 202 203 - wake_up_interruptible_all(&qmp->event); 203 + wake_up_all(&qmp->event); 204 204 205 205 return IRQ_HANDLED; 206 206 } ··· 225 225 static int qmp_send(struct qmp *qmp, const void *data, size_t len) 226 226 { 227 227 long time_left; 228 + size_t tlen; 228 229 int ret; 229 230 230 231 if (WARN_ON(len + sizeof(u32) > qmp->size)) ··· 240 239 __iowrite32_copy(qmp->msgram + qmp->offset + sizeof(u32), 241 240 data, len / sizeof(u32)); 242 241 writel(len, qmp->msgram + qmp->offset); 242 + 243 + /* Read back len to confirm data written in message RAM */ 244 + tlen = readl(qmp->msgram + qmp->offset); 243 245 qmp_kick(qmp); 244 246 245 247 time_left = wait_event_interruptible_timeout(qmp->event,
+1
drivers/soc/qcom/rpmh-internal.h
··· 110 110 int rpmh_rsc_invalidate(struct rsc_drv *drv); 111 111 112 112 void rpmh_tx_done(const struct tcs_request *msg, int r); 113 + int rpmh_flush(struct rpmh_ctrlr *ctrlr); 113 114 114 115 #endif /* __RPM_INTERNAL_H__ */
+1 -1
drivers/soc/qcom/rpmh-rsc.c
··· 277 277 write_tcs_cmd(drv, RSC_DRV_CMD_MSGID, tcs_id, j, msgid); 278 278 write_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j, cmd->addr); 279 279 write_tcs_cmd(drv, RSC_DRV_CMD_DATA, tcs_id, j, cmd->data); 280 - trace_rpmh_send_msg(drv, tcs_id, j, msgid, cmd); 280 + trace_rpmh_send_msg_rcuidle(drv, tcs_id, j, msgid, cmd); 281 281 } 282 282 283 283 write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, cmd_complete);
+10 -12
drivers/soc/qcom/rpmh.c
··· 23 23 24 24 #define RPMH_TIMEOUT_MS msecs_to_jiffies(10000) 25 25 26 - #define DEFINE_RPMH_MSG_ONSTACK(dev, s, q, name) \ 26 + #define DEFINE_RPMH_MSG_ONSTACK(device, s, q, name) \ 27 27 struct rpmh_request name = { \ 28 28 .msg = { \ 29 29 .state = s, \ ··· 33 33 }, \ 34 34 .cmd = { { 0 } }, \ 35 35 .completion = q, \ 36 - .dev = dev, \ 36 + .dev = device, \ 37 37 .needs_free = false, \ 38 38 } 39 39 ··· 427 427 req->sleep_val != req->wake_val); 428 428 } 429 429 430 - static int send_single(const struct device *dev, enum rpmh_state state, 430 + static int send_single(struct rpmh_ctrlr *ctrlr, enum rpmh_state state, 431 431 u32 addr, u32 data) 432 432 { 433 - DEFINE_RPMH_MSG_ONSTACK(dev, state, NULL, rpm_msg); 434 - struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); 433 + DEFINE_RPMH_MSG_ONSTACK(NULL, state, NULL, rpm_msg); 435 434 436 435 /* Wake sets are always complete and sleep sets are not */ 437 436 rpm_msg.msg.wait_for_compl = (state == RPMH_WAKE_ONLY_STATE); ··· 444 445 /** 445 446 * rpmh_flush: Flushes the buffered active and sleep sets to TCS 446 447 * 447 - * @dev: The device making the request 448 + * @ctrlr: controller making request to flush cached data 448 449 * 449 450 * Return: -EBUSY if the controller is busy, probably waiting on a response 450 451 * to a RPMH request sent earlier. ··· 453 454 * that is powering down the entire system. Since no other RPMH API would be 454 455 * executing at this time, it is safe to run lockless. 455 456 */ 456 - int rpmh_flush(const struct device *dev) 457 + int rpmh_flush(struct rpmh_ctrlr *ctrlr) 457 458 { 458 459 struct cache_req *p; 459 - struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); 460 460 int ret; 461 461 462 462 if (!ctrlr->dirty) { ··· 478 480 __func__, p->addr, p->sleep_val, p->wake_val); 479 481 continue; 480 482 } 481 - ret = send_single(dev, RPMH_SLEEP_STATE, p->addr, p->sleep_val); 483 + ret = send_single(ctrlr, RPMH_SLEEP_STATE, p->addr, 484 + p->sleep_val); 482 485 if (ret) 483 486 return ret; 484 - ret = send_single(dev, RPMH_WAKE_ONLY_STATE, 485 - p->addr, p->wake_val); 487 + ret = send_single(ctrlr, RPMH_WAKE_ONLY_STATE, p->addr, 488 + p->wake_val); 486 489 if (ret) 487 490 return ret; 488 491 } ··· 492 493 493 494 return 0; 494 495 } 495 - EXPORT_SYMBOL(rpmh_flush); 496 496 497 497 /** 498 498 * rpmh_invalidate: Invalidate all sleep and active sets
+1 -1
drivers/soc/qcom/socinfo.c
··· 277 277 { \ 278 278 struct smem_image_version *image_version = seq->private; \ 279 279 seq_puts(seq, image_version->type); \ 280 - seq_puts(seq, "\n"); \ 280 + seq_putc(seq, '\n'); \ 281 281 return 0; \ 282 282 } \ 283 283 static int open_image_##type(struct inode *inode, struct file *file) \
+10 -10
drivers/soc/renesas/Kconfig
··· 195 195 This enables support for the Renesas RZ/G2E SoC. 196 196 197 197 config ARCH_R8A77950 198 - bool 199 - 200 - config ARCH_R8A77951 201 - bool 202 - 203 - config ARCH_R8A7795 204 - bool "Renesas R-Car H3 SoC Platform" 205 - select ARCH_R8A77950 206 - select ARCH_R8A77951 198 + bool "Renesas R-Car H3 ES1.x SoC Platform" 207 199 select ARCH_RCAR_GEN3 208 200 select SYSC_R8A7795 209 201 help 210 - This enables support for the Renesas R-Car H3 SoC. 202 + This enables support for the Renesas R-Car H3 SoC (revision 1.x). 203 + 204 + config ARCH_R8A77951 205 + bool "Renesas R-Car H3 ES2.0+ SoC Platform" 206 + select ARCH_RCAR_GEN3 207 + select SYSC_R8A7795 208 + help 209 + This enables support for the Renesas R-Car H3 SoC (revisions 2.0 and 210 + later). 211 211 212 212 config ARCH_R8A77960 213 213 bool "Renesas R-Car M3-W SoC Platform"
+2 -2
drivers/soc/renesas/rcar-sysc.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 2 - * 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 3 * Renesas R-Car System Controller 4 4 * 5 5 * Copyright (C) 2016 Glider bvba
+1 -1
drivers/soc/renesas/renesas-soc.c
··· 259 259 #ifdef CONFIG_ARCH_R8A7794 260 260 { .compatible = "renesas,r8a7794", .data = &soc_rcar_e2 }, 261 261 #endif 262 - #ifdef CONFIG_ARCH_R8A7795 262 + #if defined(CONFIG_ARCH_R8A77950) || defined(CONFIG_ARCH_R8A77951) 263 263 { .compatible = "renesas,r8a7795", .data = &soc_rcar_h3 }, 264 264 #endif 265 265 #ifdef CONFIG_ARCH_R8A77960
-1
drivers/tee/tee_core.c
··· 44 44 45 45 kref_init(&ctx->refcount); 46 46 ctx->teedev = teedev; 47 - INIT_LIST_HEAD(&ctx->list_shm); 48 47 rc = teedev->desc->ops->open(ctx); 49 48 if (rc) 50 49 goto err;
+2 -1
drivers/tee/tee_private.h
··· 37 37 * @num_users: number of active users of this device 38 38 * @c_no_user: completion used when unregistering the device 39 39 * @mutex: mutex protecting @num_users and @idr 40 - * @idr: register of shared memory object allocated on this device 40 + * @idr: register of user space shared memory objects allocated or 41 + * registered on this device 41 42 * @pool: shared memory pool 42 43 */ 43 44 struct tee_device {
+24 -61
drivers/tee/tee_shm.c
··· 13 13 14 14 static void tee_shm_release(struct tee_shm *shm) 15 15 { 16 - struct tee_device *teedev = shm->teedev; 16 + struct tee_device *teedev = shm->ctx->teedev; 17 17 18 - mutex_lock(&teedev->mutex); 19 - idr_remove(&teedev->idr, shm->id); 20 - if (shm->ctx) 21 - list_del(&shm->link); 22 - mutex_unlock(&teedev->mutex); 18 + if (shm->flags & TEE_SHM_DMA_BUF) { 19 + mutex_lock(&teedev->mutex); 20 + idr_remove(&teedev->idr, shm->id); 21 + mutex_unlock(&teedev->mutex); 22 + } 23 23 24 24 if (shm->flags & TEE_SHM_POOL) { 25 25 struct tee_shm_pool_mgr *poolm; ··· 44 44 kfree(shm->pages); 45 45 } 46 46 47 - if (shm->ctx) 48 - teedev_ctx_put(shm->ctx); 47 + teedev_ctx_put(shm->ctx); 49 48 50 49 kfree(shm); 51 50 ··· 76 77 size_t size = vma->vm_end - vma->vm_start; 77 78 78 79 /* Refuse sharing shared memory provided by application */ 79 - if (shm->flags & TEE_SHM_REGISTER) 80 + if (shm->flags & TEE_SHM_USER_MAPPED) 80 81 return -EINVAL; 81 82 82 83 return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT, ··· 90 91 .mmap = tee_shm_op_mmap, 91 92 }; 92 93 93 - static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx, 94 - struct tee_device *teedev, 95 - size_t size, u32 flags) 94 + struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) 96 95 { 96 + struct tee_device *teedev = ctx->teedev; 97 97 struct tee_shm_pool_mgr *poolm = NULL; 98 98 struct tee_shm *shm; 99 99 void *ret; 100 100 int rc; 101 - 102 - if (ctx && ctx->teedev != teedev) { 103 - dev_err(teedev->dev.parent, "ctx and teedev mismatch\n"); 104 - return ERR_PTR(-EINVAL); 105 - } 106 101 107 102 if (!(flags & TEE_SHM_MAPPED)) { 108 103 dev_err(teedev->dev.parent, ··· 125 132 } 126 133 127 134 shm->flags = flags | TEE_SHM_POOL; 128 - shm->teedev = teedev; 129 135 shm->ctx = ctx; 130 136 if (flags & TEE_SHM_DMA_BUF) 131 137 poolm = teedev->pool->dma_buf_mgr; ··· 137 145 goto err_kfree; 138 146 } 139 147 140 - mutex_lock(&teedev->mutex); 141 - shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL); 142 - mutex_unlock(&teedev->mutex); 143 - if (shm->id < 0) { 144 - ret = ERR_PTR(shm->id); 145 - goto err_pool_free; 146 - } 147 148 148 149 if (flags & TEE_SHM_DMA_BUF) { 149 150 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 151 + 152 + mutex_lock(&teedev->mutex); 153 + shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL); 154 + mutex_unlock(&teedev->mutex); 155 + if (shm->id < 0) { 156 + ret = ERR_PTR(shm->id); 157 + goto err_pool_free; 158 + } 150 159 151 160 exp_info.ops = &tee_shm_dma_buf_ops; 152 161 exp_info.size = shm->size; ··· 161 168 } 162 169 } 163 170 164 - if (ctx) { 171 + if (ctx) 165 172 teedev_ctx_get(ctx); 166 - mutex_lock(&teedev->mutex); 167 - list_add_tail(&shm->link, &ctx->list_shm); 168 - mutex_unlock(&teedev->mutex); 169 - } 170 173 171 174 return shm; 172 175 err_rem: 173 - mutex_lock(&teedev->mutex); 174 - idr_remove(&teedev->idr, shm->id); 175 - mutex_unlock(&teedev->mutex); 176 + if (flags & TEE_SHM_DMA_BUF) { 177 + mutex_lock(&teedev->mutex); 178 + idr_remove(&teedev->idr, shm->id); 179 + mutex_unlock(&teedev->mutex); 180 + } 176 181 err_pool_free: 177 182 poolm->ops->free(poolm, shm); 178 183 err_kfree: ··· 179 188 tee_device_put(teedev); 180 189 return ret; 181 190 } 182 - 183 - /** 184 - * tee_shm_alloc() - Allocate shared memory 185 - * @ctx: Context that allocates the shared memory 186 - * @size: Requested size of shared memory 187 - * @flags: Flags setting properties for the requested shared memory. 188 - * 189 - * Memory allocated as global shared memory is automatically freed when the 190 - * TEE file pointer is closed. The @flags field uses the bits defined by 191 - * TEE_SHM_* in <linux/tee_drv.h>. TEE_SHM_MAPPED must currently always be 192 - * set. If TEE_SHM_DMA_BUF global shared memory will be allocated and 193 - * associated with a dma-buf handle, else driver private memory. 194 - */ 195 - struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) 196 - { 197 - return __tee_shm_alloc(ctx, ctx->teedev, size, flags); 198 - } 199 191 EXPORT_SYMBOL_GPL(tee_shm_alloc); 200 - 201 - struct tee_shm *tee_shm_priv_alloc(struct tee_device *teedev, size_t size) 202 - { 203 - return __tee_shm_alloc(NULL, teedev, size, TEE_SHM_MAPPED); 204 - } 205 - EXPORT_SYMBOL_GPL(tee_shm_priv_alloc); 206 192 207 193 struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, 208 194 size_t length, u32 flags) ··· 213 245 } 214 246 215 247 shm->flags = flags | TEE_SHM_REGISTER; 216 - shm->teedev = teedev; 217 248 shm->ctx = ctx; 218 249 shm->id = -1; 219 250 addr = untagged_addr(addr); ··· 267 300 goto err; 268 301 } 269 302 } 270 - 271 - mutex_lock(&teedev->mutex); 272 - list_add_tail(&shm->link, &ctx->list_shm); 273 - mutex_unlock(&teedev->mutex); 274 303 275 304 return shm; 276 305 err:
+4
include/dt-bindings/bus/ti-sysc.h
··· 18 18 19 19 #define SYSC_DRA7_MCAN_ENAWAKEUP (1 << 4) 20 20 21 + /* PRUSS sysc found on AM33xx/AM43xx/AM57xx */ 22 + #define SYSC_PRUSS_SUB_MWAIT (1 << 5) 23 + #define SYSC_PRUSS_STANDBY_INIT (1 << 4) 24 + 21 25 /* SYSCONFIG STANDBYMODE/MIDLEMODE/SIDLEMODE supported by hardware */ 22 26 #define SYSC_IDLE_FORCE 0 23 27 #define SYSC_IDLE_NO 1
+32
include/dt-bindings/power/meson-a1-power.h
··· 1 + /* SPDX-License-Identifier: (GPL-2.0+ or MIT) */ 2 + /* 3 + * Copyright (c) 2019 Amlogic, Inc. 4 + * Author: Jianxin Pan <jianxin.pan@amlogic.com> 5 + */ 6 + 7 + #ifndef _DT_BINDINGS_MESON_A1_POWER_H 8 + #define _DT_BINDINGS_MESON_A1_POWER_H 9 + 10 + #define PWRC_DSPA_ID 8 11 + #define PWRC_DSPB_ID 9 12 + #define PWRC_UART_ID 10 13 + #define PWRC_DMC_ID 11 14 + #define PWRC_I2C_ID 12 15 + #define PWRC_PSRAM_ID 13 16 + #define PWRC_ACODEC_ID 14 17 + #define PWRC_AUDIO_ID 15 18 + #define PWRC_OTP_ID 16 19 + #define PWRC_DMA_ID 17 20 + #define PWRC_SD_EMMC_ID 18 21 + #define PWRC_RAMA_ID 19 22 + #define PWRC_RAMB_ID 20 23 + #define PWRC_IR_ID 21 24 + #define PWRC_SPICC_ID 22 25 + #define PWRC_SPIFC_ID 23 26 + #define PWRC_USB_ID 24 27 + #define PWRC_NIC_ID 25 28 + #define PWRC_PDMIN_ID 26 29 + #define PWRC_RSA_ID 27 30 + #define PWRC_MAX_ID 28 31 + 32 + #endif
-1
include/linux/firmware/imx/ipc.h
··· 25 25 IMX_SC_RPC_SVC_PAD = 6, 26 26 IMX_SC_RPC_SVC_MISC = 7, 27 27 IMX_SC_RPC_SVC_IRQ = 8, 28 - IMX_SC_RPC_SVC_ABORT = 9 29 28 }; 30 29 31 30 struct imx_sc_rpc_msg {
+2
include/linux/firmware/meson/meson_sm.h
··· 12 12 SM_EFUSE_WRITE, 13 13 SM_EFUSE_USER_MAX, 14 14 SM_GET_CHIP_ID, 15 + SM_A1_PWRC_SET, 16 + SM_A1_PWRC_GET, 15 17 }; 16 18 17 19 struct meson_sm_firmware;
+5
include/linux/platform_data/ti-sysc.h
··· 17 17 TI_SYSC_OMAP4_MCASP, 18 18 TI_SYSC_OMAP4_USB_HOST_FS, 19 19 TI_SYSC_DRA7_MCAN, 20 + TI_SYSC_PRUSS, 20 21 }; 21 22 22 23 struct ti_sysc_cookie { ··· 50 49 s8 emufree_shift; 51 50 }; 52 51 52 + #define SYSC_MODULE_QUIRK_PRUSS BIT(24) 53 + #define SYSC_MODULE_QUIRK_DSS_RESET BIT(23) 54 + #define SYSC_MODULE_QUIRK_RTC_UNLOCK BIT(22) 53 55 #define SYSC_QUIRK_CLKDM_NOAUTO BIT(21) 54 56 #define SYSC_QUIRK_FORCE_MSTANDBY BIT(20) 55 57 #define SYSC_MODULE_QUIRK_AESS BIT(19) ··· 145 141 146 142 struct ti_sysc_platform_data { 147 143 struct of_dev_auxdata *auxdata; 144 + bool (*soc_type_gp)(void); 148 145 int (*init_clockdomain)(struct device *dev, struct clk *fck, 149 146 struct clk *ick, struct ti_sysc_cookie *cookie); 150 147 void (*clkdm_deny_idle)(struct device *dev,
+1
include/linux/soc/qcom/apr.h
··· 85 85 uint16_t domain_id; 86 86 uint32_t version; 87 87 char name[APR_NAME_SIZE]; 88 + const char *service_path; 88 89 spinlock_t lock; 89 90 struct list_head node; 90 91 };
+29
include/linux/soc/qcom/pdr.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __QCOM_PDR_HELPER__ 3 + #define __QCOM_PDR_HELPER__ 4 + 5 + #include <linux/soc/qcom/qmi.h> 6 + 7 + #define SERVREG_NAME_LENGTH 64 8 + 9 + struct pdr_service; 10 + struct pdr_handle; 11 + 12 + enum servreg_service_state { 13 + SERVREG_LOCATOR_ERR = 0x1, 14 + SERVREG_SERVICE_STATE_DOWN = 0x0FFFFFFF, 15 + SERVREG_SERVICE_STATE_UP = 0x1FFFFFFF, 16 + SERVREG_SERVICE_STATE_EARLY_DOWN = 0x2FFFFFFF, 17 + SERVREG_SERVICE_STATE_UNINIT = 0x7FFFFFFF, 18 + }; 19 + 20 + struct pdr_handle *pdr_handle_alloc(void (*status)(int state, 21 + char *service_path, 22 + void *priv), void *priv); 23 + struct pdr_service *pdr_add_lookup(struct pdr_handle *pdr, 24 + const char *service_name, 25 + const char *service_path); 26 + int pdr_restart_pd(struct pdr_handle *pdr, struct pdr_service *pds); 27 + void pdr_handle_release(struct pdr_handle *pdr); 28 + 29 + #endif
+1
include/linux/soc/qcom/qmi.h
··· 88 88 #define QMI_ERR_CLIENT_IDS_EXHAUSTED_V01 5 89 89 #define QMI_ERR_INVALID_ID_V01 41 90 90 #define QMI_ERR_ENCODING_V01 58 91 + #define QMI_ERR_DISABLED_V01 69 91 92 #define QMI_ERR_INCOMPATIBLE_STATE_V01 90 92 93 #define QMI_ERR_NOT_SUPPORTED_V01 94 93 94
+1 -18
include/linux/tee_drv.h
··· 49 49 */ 50 50 struct tee_context { 51 51 struct tee_device *teedev; 52 - struct list_head list_shm; 53 52 void *data; 54 53 struct kref refcount; 55 54 bool releasing; ··· 167 168 168 169 /** 169 170 * struct tee_shm - shared memory object 170 - * @teedev: device used to allocate the object 171 - * @ctx: context using the object, if NULL the context is gone 172 - * @link link element 171 + * @ctx: context using the object 173 172 * @paddr: physical address of the shared memory 174 173 * @kaddr: virtual address of the shared memory 175 174 * @size: size of shared memory ··· 182 185 * subsystem and from drivers that implements their own shm pool manager. 183 186 */ 184 187 struct tee_shm { 185 - struct tee_device *teedev; 186 188 struct tee_context *ctx; 187 - struct list_head link; 188 189 phys_addr_t paddr; 189 190 void *kaddr; 190 191 size_t size; ··· 312 317 * @returns a pointer to 'struct tee_shm' 313 318 */ 314 319 struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags); 315 - 316 - /** 317 - * tee_shm_priv_alloc() - Allocate shared memory privately 318 - * @dev: Device that allocates the shared memory 319 - * @size: Requested size of shared memory 320 - * 321 - * Allocates shared memory buffer that is not associated with any client 322 - * context. Such buffers are owned by TEE driver and used for internal calls. 323 - * 324 - * @returns a pointer to 'struct tee_shm' 325 - */ 326 - struct tee_shm *tee_shm_priv_alloc(struct tee_device *teedev, size_t size); 327 320 328 321 /** 329 322 * tee_shm_register() - Register shared memory buffer
+5 -1
include/soc/fsl/dpaa2-io.h
··· 1 1 /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ 2 2 /* 3 3 * Copyright 2014-2016 Freescale Semiconductor Inc. 4 - * Copyright NXP 4 + * Copyright 2017-2019 NXP 5 5 * 6 6 */ 7 7 #ifndef __FSL_DPAA2_IO_H ··· 109 109 110 110 int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d, u32 fqid, 111 111 const struct dpaa2_fd *fd); 112 + int dpaa2_io_service_enqueue_multiple_fq(struct dpaa2_io *d, u32 fqid, 113 + const struct dpaa2_fd *fd, int number_of_frame); 114 + int dpaa2_io_service_enqueue_multiple_desc_fq(struct dpaa2_io *d, u32 *fqid, 115 + const struct dpaa2_fd *fd, int number_of_frame); 112 116 int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, u32 qdid, u8 prio, 113 117 u16 qdbin, const struct dpaa2_fd *fd); 114 118 int dpaa2_io_service_release(struct dpaa2_io *d, u16 bpid,
+3 -3
include/soc/fsl/qe/ucc_fast.h
··· 178 178 struct ucc_fast_private { 179 179 struct ucc_fast_info *uf_info; 180 180 struct ucc_fast __iomem *uf_regs; /* a pointer to the UCC regs. */ 181 - u32 __iomem *p_ucce; /* a pointer to the event register in memory. */ 182 - u32 __iomem *p_uccm; /* a pointer to the mask register in memory. */ 181 + __be32 __iomem *p_ucce; /* a pointer to the event register in memory. */ 182 + __be32 __iomem *p_uccm; /* a pointer to the mask register in memory. */ 183 183 #ifdef CONFIG_UGETH_TX_ON_DEMAND 184 - u16 __iomem *p_utodr; /* pointer to the transmit on demand register */ 184 + __be16 __iomem *p_utodr;/* pointer to the transmit on demand register */ 185 185 #endif 186 186 int enabled_tx; /* Whether channel is enabled for Tx (ENT) */ 187 187 int enabled_rx; /* Whether channel is enabled for Rx (ENR) */
+6 -7
include/soc/fsl/qe/ucc_slow.h
··· 184 184 struct ucc_slow_private { 185 185 struct ucc_slow_info *us_info; 186 186 struct ucc_slow __iomem *us_regs; /* Ptr to memory map of UCC regs */ 187 - struct ucc_slow_pram *us_pram; /* a pointer to the parameter RAM */ 187 + struct ucc_slow_pram __iomem *us_pram; /* a pointer to the parameter RAM */ 188 188 s32 us_pram_offset; 189 189 int enabled_tx; /* Whether channel is enabled for Tx (ENT) */ 190 190 int enabled_rx; /* Whether channel is enabled for Rx (ENR) */ ··· 196 196 and length for first BD in a frame */ 197 197 s32 tx_base_offset; /* first BD in Tx BD table offset (In MURAM) */ 198 198 s32 rx_base_offset; /* first BD in Rx BD table offset (In MURAM) */ 199 - struct qe_bd *confBd; /* next BD for confirm after Tx */ 200 - struct qe_bd *tx_bd; /* next BD for new Tx request */ 201 - struct qe_bd *rx_bd; /* next BD to collect after Rx */ 199 + struct qe_bd __iomem *confBd; /* next BD for confirm after Tx */ 200 + struct qe_bd __iomem *tx_bd; /* next BD for new Tx request */ 201 + struct qe_bd __iomem *rx_bd; /* next BD to collect after Rx */ 202 202 void *p_rx_frame; /* accumulating receive frame */ 203 - u16 *p_ucce; /* a pointer to the event register in memory. 204 - */ 205 - u16 *p_uccm; /* a pointer to the mask register in memory */ 203 + __be16 __iomem *p_ucce; /* a pointer to the event register in memory */ 204 + __be16 __iomem *p_uccm; /* a pointer to the mask register in memory */ 206 205 u16 saved_uccm; /* a saved mask for the RX Interrupt bits */ 207 206 #ifdef STATISTICS 208 207 u32 tx_frames; /* Transmitted frames counters */
-5
include/soc/qcom/rpmh.h
··· 20 20 int rpmh_write_batch(const struct device *dev, enum rpmh_state state, 21 21 const struct tcs_cmd *cmd, u32 *n); 22 22 23 - int rpmh_flush(const struct device *dev); 24 - 25 23 int rpmh_invalidate(const struct device *dev); 26 24 27 25 #else ··· 36 38 static inline int rpmh_write_batch(const struct device *dev, 37 39 enum rpmh_state state, 38 40 const struct tcs_cmd *cmd, u32 *n) 39 - { return -ENODEV; } 40 - 41 - static inline int rpmh_flush(const struct device *dev) 42 41 { return -ENODEV; } 43 42 44 43 static inline int rpmh_invalidate(const struct device *dev)