Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'qcom-drivers-for-5.8' of git://git.kernel.org/pub/scm/linux/kernel/git/qcom/linux into arm/drivers

Qualcomm driver updates for v5.8

This contains a large set of cleanups, bug fixes, general improvements
and documentation fixes for the RPMH driver. It adds a debugfs mechanism
for inspecting Command DB. Socinfo got the "soc_id" attribute defines
and definitions for a various variants of MSM8939.

RPMH, RPMPD and RPMHPD where made possible to build as modules, but RPMH
had to be reverted due to a compilation issue when tracing is enabled.

RPMHPD gained power-domains for the SM8250 voltage corners.

The SCM driver gained fixes for two build warnings and the SMP2P had an
unnecessary error print removed.

* tag 'qcom-drivers-for-5.8' of git://git.kernel.org/pub/scm/linux/kernel/git/qcom/linux: (42 commits)
Revert "soc: qcom: rpmh: Allow RPMH driver to be loaded as a module"
soc: qcom: rpmh-rsc: Remove the pm_lock
soc: qcom: rpmh-rsc: Simplify locking by eliminating the per-TCS lock
kernel/cpu_pm: Fix uninitted local in cpu_pm
soc: qcom: rpmh-rsc: We aren't notified of our own failure w/ NOTIFY_BAD
soc: qcom: rpmh-rsc: Correctly ignore CPU_CLUSTER_PM notifications
firmware: qcom_scm-legacy: Replace zero-length array with flexible-array
soc: qcom: rpmh-rsc: Timeout after 1 second in write_tcs_reg_sync()
soc: qcom: rpmh-rsc: Factor "tcs_reg_addr" and "tcs_cmd_addr" calculation
soc: qcom: socinfo: add msm8936/39 and apq8036/39 soc ids
soc: qcom: aoss: Add SM8250 compatible
soc: qcom: pdr: Remove impossible error condition
soc: qcom: rpmh: Dirt can only make you dirtier, not cleaner
soc: qcom: rpmhpd: Add SM8250 power domains
firmware: qcom_scm: fix bogous abuse of dma-direct internals
dt-bindings: soc: qcom: apr: Use generic node names for APR services
firmware: qcom_scm: Remove unneeded conversion to bool
soc: qcom: cmd-db: Properly endian swap the slv_id for debugfs
soc: qcom: cmd-db: Use 5 digits for printing address
soc: qcom: cmd-db: Cast sizeof() to int to silence field width warning
...

Link: https://lore.kernel.org/r/20200519052533.1250024-1-bjorn.andersson@linaro.org
Signed-off-by: Arnd Bergmann <arnd@arndb.de>

+778 -310
+1
Documentation/devicetree/bindings/power/qcom,rpmpd.yaml
··· 23 23 - qcom,sc7180-rpmhpd 24 24 - qcom,sdm845-rpmhpd 25 25 - qcom,sm8150-rpmhpd 26 + - qcom,sm8250-rpmhpd 26 27 27 28 '#power-domain-cells': 28 29 const: 1
+1
Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.txt
··· 19 19 "qcom,sc7180-aoss-qmp" 20 20 "qcom,sdm845-aoss-qmp" 21 21 "qcom,sm8150-aoss-qmp" 22 + "qcom,sm8250-aoss-qmp" 22 23 23 24 - reg: 24 25 Usage: required
+10 -10
Documentation/devicetree/bindings/soc/qcom/qcom,apr.txt
··· 65 65 compatible = "qcom,apr-v2"; 66 66 qcom,apr-domain = <APR_DOMAIN_ADSP>; 67 67 68 - q6core@3 { 68 + apr-service@3 { 69 69 compatible = "qcom,q6core"; 70 70 reg = <APR_SVC_ADSP_CORE>; 71 71 }; 72 72 73 - q6afe@4 { 73 + apr-service@4 { 74 74 compatible = "qcom,q6afe"; 75 75 reg = <APR_SVC_AFE>; 76 76 77 77 dais { 78 78 #sound-dai-cells = <1>; 79 - hdmi@1 { 80 - reg = <1>; 79 + dai@1 { 80 + reg = <HDMI_RX>; 81 81 }; 82 82 }; 83 83 }; 84 84 85 - q6asm@7 { 85 + apr-service@7 { 86 86 compatible = "qcom,q6asm"; 87 87 reg = <APR_SVC_ASM>; 88 88 ... 89 89 }; 90 90 91 - q6adm@8 { 91 + apr-service@8 { 92 92 compatible = "qcom,q6adm"; 93 93 reg = <APR_SVC_ADM>; 94 94 ... ··· 106 106 qcom,glink-channels = "apr_audio_svc"; 107 107 qcom,apr-domain = <APR_DOMAIN_ADSP>; 108 108 109 - q6core { 109 + apr-service@3 { 110 110 compatible = "qcom,q6core"; 111 111 reg = <APR_SVC_ADSP_CORE>; 112 112 }; 113 113 114 - q6afe: q6afe { 114 + q6afe: apr-service@4 { 115 115 compatible = "qcom,q6afe"; 116 116 reg = <APR_SVC_AFE>; 117 117 qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd"; 118 118 ... 119 119 }; 120 120 121 - q6asm: q6asm { 121 + q6asm: apr-service@7 { 122 122 compatible = "qcom,q6asm"; 123 123 reg = <APR_SVC_ASM>; 124 124 qcom,protection-domain = "tms/servreg", "msm/slpi/sensor_pd"; 125 125 ... 126 126 }; 127 127 128 - q6adm: q6adm { 128 + q6adm: apr-service@8 { 129 129 compatible = "qcom,q6adm"; 130 130 reg = <APR_SVC_ADM>; 131 131 qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
+1 -1
drivers/firmware/qcom_scm-legacy.c
··· 56 56 __le32 buf_offset; 57 57 __le32 resp_hdr_offset; 58 58 __le32 id; 59 - __le32 buf[0]; 59 + __le32 buf[]; 60 60 }; 61 61 62 62 /**
+4 -7
drivers/firmware/qcom_scm.c
··· 6 6 #include <linux/init.h> 7 7 #include <linux/cpumask.h> 8 8 #include <linux/export.h> 9 - #include <linux/dma-direct.h> 10 9 #include <linux/dma-mapping.h> 11 10 #include <linux/module.h> 12 11 #include <linux/types.h> ··· 805 806 struct qcom_scm_mem_map_info *mem_to_map; 806 807 phys_addr_t mem_to_map_phys; 807 808 phys_addr_t dest_phys; 808 - phys_addr_t ptr_phys; 809 - dma_addr_t ptr_dma; 809 + dma_addr_t ptr_phys; 810 810 size_t mem_to_map_sz; 811 811 size_t dest_sz; 812 812 size_t src_sz; ··· 822 824 ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) + 823 825 ALIGN(dest_sz, SZ_64); 824 826 825 - ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_dma, GFP_KERNEL); 827 + ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL); 826 828 if (!ptr) 827 829 return -ENOMEM; 828 - ptr_phys = dma_to_phys(__scm->dev, ptr_dma); 829 830 830 831 /* Fill source vmid detail */ 831 832 src = ptr; ··· 852 855 853 856 ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz, 854 857 ptr_phys, src_sz, dest_phys, dest_sz); 855 - dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_dma); 858 + dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys); 856 859 if (ret) { 857 860 dev_err(__scm->dev, 858 861 "Assign memory protection call failed %d\n", ret); ··· 940 943 941 944 qcom_scm_clk_disable(); 942 945 943 - return ret > 0 ? true : false; 946 + return ret > 0; 944 947 } 945 948 EXPORT_SYMBOL(qcom_scm_hdcp_available); 946 949
+3 -3
drivers/soc/qcom/Kconfig
··· 117 117 help apply the aggregated state on the resource. 118 118 119 119 config QCOM_RPMHPD 120 - bool "Qualcomm RPMh Power domain driver" 120 + tristate "Qualcomm RPMh Power domain driver" 121 121 depends on QCOM_RPMH && QCOM_COMMAND_DB 122 122 help 123 123 QCOM RPMh Power domain driver to support power-domains with ··· 126 126 for the voltage rail. 127 127 128 128 config QCOM_RPMPD 129 - bool "Qualcomm RPM Power domain driver" 130 - depends on QCOM_SMD_RPM=y 129 + tristate "Qualcomm RPM Power domain driver" 130 + depends on QCOM_SMD_RPM 131 131 help 132 132 QCOM RPM Power domain driver to support power-domains with 133 133 performance states. The driver communicates a performance state
+76 -2
drivers/soc/qcom/cmd-db.c
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 2 /* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. */ 3 3 4 + #include <linux/debugfs.h> 4 5 #include <linux/kernel.h> 5 6 #include <linux/of.h> 6 7 #include <linux/of_address.h> 7 - #include <linux/of_platform.h> 8 8 #include <linux/of_reserved_mem.h> 9 9 #include <linux/platform_device.h> 10 + #include <linux/seq_file.h> 10 11 #include <linux/types.h> 11 12 12 13 #include <soc/qcom/cmd-db.h> ··· 237 236 } 238 237 EXPORT_SYMBOL(cmd_db_read_slave_id); 239 238 239 + #ifdef CONFIG_DEBUG_FS 240 + static int cmd_db_debugfs_dump(struct seq_file *seq, void *p) 241 + { 242 + int i, j; 243 + const struct rsc_hdr *rsc; 244 + const struct entry_header *ent; 245 + const char *name; 246 + u16 len, version; 247 + u8 major, minor; 248 + 249 + seq_puts(seq, "Command DB DUMP\n"); 250 + 251 + for (i = 0; i < MAX_SLV_ID; i++) { 252 + rsc = &cmd_db_header->header[i]; 253 + if (!rsc->slv_id) 254 + break; 255 + 256 + switch (le16_to_cpu(rsc->slv_id)) { 257 + case CMD_DB_HW_ARC: 258 + name = "ARC"; 259 + break; 260 + case CMD_DB_HW_VRM: 261 + name = "VRM"; 262 + break; 263 + case CMD_DB_HW_BCM: 264 + name = "BCM"; 265 + break; 266 + default: 267 + name = "Unknown"; 268 + break; 269 + } 270 + 271 + version = le16_to_cpu(rsc->version); 272 + major = version >> 8; 273 + minor = version; 274 + 275 + seq_printf(seq, "Slave %s (v%u.%u)\n", name, major, minor); 276 + seq_puts(seq, "-------------------------\n"); 277 + 278 + ent = rsc_to_entry_header(rsc); 279 + for (j = 0; j < le16_to_cpu(rsc->cnt); j++, ent++) { 280 + seq_printf(seq, "0x%05x: %*pEp", le32_to_cpu(ent->addr), 281 + (int)sizeof(ent->id), ent->id); 282 + 283 + len = le16_to_cpu(ent->len); 284 + if (len) { 285 + seq_printf(seq, " [%*ph]", 286 + len, rsc_offset(rsc, ent)); 287 + } 288 + seq_putc(seq, '\n'); 289 + } 290 + } 291 + 292 + return 0; 293 + } 294 + 295 + static int open_cmd_db_debugfs(struct inode *inode, struct file *file) 296 + { 297 + return single_open(file, cmd_db_debugfs_dump, inode->i_private); 298 + } 299 + #endif 300 + 301 + static const struct file_operations cmd_db_debugfs_ops = { 302 + #ifdef CONFIG_DEBUG_FS 303 + .open = open_cmd_db_debugfs, 304 + #endif 305 + .read = seq_read, 306 + .llseek = seq_lseek, 307 + .release = single_release, 308 + }; 309 + 240 310 static int cmd_db_dev_probe(struct platform_device *pdev) 241 311 { 242 312 struct reserved_mem *rmem; ··· 331 259 return -EINVAL; 332 260 } 333 261 262 + debugfs_create_file("cmd-db", 0400, NULL, NULL, &cmd_db_debugfs_ops); 263 + 334 264 return 0; 335 265 } 336 266 337 267 static const struct of_device_id cmd_db_match_table[] = { 338 268 { .compatible = "qcom,cmd-db" }, 339 - { }, 269 + { } 340 270 }; 341 271 342 272 static struct platform_driver cmd_db_dev_driver = {
-4
drivers/soc/qcom/pdr_interface.c
··· 155 155 return ret; 156 156 } 157 157 158 - if ((int)resp.curr_state < INT_MIN || (int)resp.curr_state > INT_MAX) 159 - pr_err("PDR: %s notification state invalid: 0x%x\n", 160 - pds->service_path, resp.curr_state); 161 - 162 158 pds->state = resp.curr_state; 163 159 164 160 return 0;
+1
drivers/soc/qcom/qcom_aoss.c
··· 599 599 { .compatible = "qcom,sc7180-aoss-qmp", }, 600 600 { .compatible = "qcom,sdm845-aoss-qmp", }, 601 601 { .compatible = "qcom,sm8150-aoss-qmp", }, 602 + { .compatible = "qcom,sm8250-aoss-qmp", }, 602 603 {} 603 604 }; 604 605 MODULE_DEVICE_TABLE(of, qmp_dt_match);
+38 -21
drivers/soc/qcom/rpmh-internal.h
··· 22 22 * struct tcs_group: group of Trigger Command Sets (TCS) to send state requests 23 23 * to the controller 24 24 * 25 - * @drv: the controller 26 - * @type: type of the TCS in this group - active, sleep, wake 27 - * @mask: mask of the TCSes relative to all the TCSes in the RSC 28 - * @offset: start of the TCS group relative to the TCSes in the RSC 29 - * @num_tcs: number of TCSes in this type 30 - * @ncpt: number of commands in each TCS 31 - * @lock: lock for synchronizing this TCS writes 32 - * @req: requests that are sent from the TCS 33 - * @cmd_cache: flattened cache of cmds in sleep/wake TCS 34 - * @slots: indicates which of @cmd_addr are occupied 25 + * @drv: The controller. 26 + * @type: Type of the TCS in this group - active, sleep, wake. 27 + * @mask: Mask of the TCSes relative to all the TCSes in the RSC. 28 + * @offset: Start of the TCS group relative to the TCSes in the RSC. 29 + * @num_tcs: Number of TCSes in this type. 30 + * @ncpt: Number of commands in each TCS. 31 + * @req: Requests that are sent from the TCS; only used for ACTIVE_ONLY 32 + * transfers (could be on a wake/sleep TCS if we are borrowing for 33 + * an ACTIVE_ONLY transfer). 34 + * Start: grab drv->lock, set req, set tcs_in_use, drop drv->lock, 35 + * trigger 36 + * End: get irq, access req, 37 + * grab drv->lock, clear tcs_in_use, drop drv->lock 38 + * @slots: Indicates which of @cmd_addr are occupied; only used for 39 + * SLEEP / WAKE TCSs. Things are tightly packed in the 40 + * case that (ncpt < MAX_CMDS_PER_TCS). That is if ncpt = 2 and 41 + * MAX_CMDS_PER_TCS = 16 then bit[2] = the first bit in 2nd TCS. 35 42 */ 36 43 struct tcs_group { 37 44 struct rsc_drv *drv; ··· 47 40 u32 offset; 48 41 int num_tcs; 49 42 int ncpt; 50 - spinlock_t lock; 51 43 const struct tcs_request *req[MAX_TCS_PER_TYPE]; 52 - u32 *cmd_cache; 53 44 DECLARE_BITMAP(slots, MAX_TCS_SLOTS); 54 45 }; 55 46 ··· 89 84 * struct rsc_drv: the Direct Resource Voter (DRV) of the 90 85 * Resource State Coordinator controller (RSC) 91 86 * 92 - * @name: controller identifier 93 - * @tcs_base: start address of the TCS registers in this controller 94 - * @id: instance id in the controller (Direct Resource Voter) 95 - * @num_tcs: number of TCSes in this DRV 96 - * @tcs: TCS groups 97 - * @tcs_in_use: s/w state of the TCS 98 - * @lock: synchronize state of the controller 99 - * @client: handle to the DRV's client. 87 + * @name: Controller identifier. 88 + * @tcs_base: Start address of the TCS registers in this controller. 89 + * @id: Instance id in the controller (Direct Resource Voter). 90 + * @num_tcs: Number of TCSes in this DRV. 91 + * @rsc_pm: CPU PM notifier for controller. 92 + * Used when solver mode is not present. 93 + * @cpus_in_pm: Number of CPUs not in idle power collapse. 94 + * Used when solver mode is not present. 95 + * @tcs: TCS groups. 96 + * @tcs_in_use: S/W state of the TCS; only set for ACTIVE_ONLY 97 + * transfers, but might show a sleep/wake TCS in use if 98 + * it was borrowed for an active_only transfer. You 99 + * must hold the lock in this struct (AKA drv->lock) in 100 + * order to update this. 101 + * @lock: Synchronize state of the controller. If RPMH's cache 102 + * lock will also be held, the order is: drv->lock then 103 + * cache_lock. 104 + * @client: Handle to the DRV's client. 100 105 */ 101 106 struct rsc_drv { 102 107 const char *name; 103 108 void __iomem *tcs_base; 104 109 int id; 105 110 int num_tcs; 111 + struct notifier_block rsc_pm; 112 + atomic_t cpus_in_pm; 106 113 struct tcs_group tcs[TCS_TYPE_NR]; 107 114 DECLARE_BITMAP(tcs_in_use, MAX_TCS_NR); 108 115 spinlock_t lock; ··· 124 107 int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg); 125 108 int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, 126 109 const struct tcs_request *msg); 127 - int rpmh_rsc_invalidate(struct rsc_drv *drv); 110 + void rpmh_rsc_invalidate(struct rsc_drv *drv); 128 111 129 112 void rpmh_tx_done(const struct tcs_request *msg, int r); 130 113 int rpmh_flush(struct rpmh_ctrlr *ctrlr);
+545 -207
drivers/soc/qcom/rpmh-rsc.c
··· 6 6 #define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME 7 7 8 8 #include <linux/atomic.h> 9 + #include <linux/cpu_pm.h> 9 10 #include <linux/delay.h> 10 11 #include <linux/interrupt.h> 11 12 #include <linux/io.h> 13 + #include <linux/iopoll.h> 12 14 #include <linux/kernel.h> 13 15 #include <linux/list.h> 14 16 #include <linux/of.h> ··· 32 30 #define RSC_DRV_TCS_OFFSET 672 33 31 #define RSC_DRV_CMD_OFFSET 20 34 32 35 - /* DRV Configuration Information Register */ 33 + /* DRV HW Solver Configuration Information Register */ 34 + #define DRV_SOLVER_CONFIG 0x04 35 + #define DRV_HW_SOLVER_MASK 1 36 + #define DRV_HW_SOLVER_SHIFT 24 37 + 38 + /* DRV TCS Configuration Information Register */ 36 39 #define DRV_PRNT_CHLD_CONFIG 0x0C 37 40 #define DRV_NUM_TCS_MASK 0x3F 38 41 #define DRV_NUM_TCS_SHIFT 6 39 42 #define DRV_NCPT_MASK 0x1F 40 43 #define DRV_NCPT_SHIFT 27 41 44 42 - /* Register offsets */ 45 + /* Offsets for common TCS Registers, one bit per TCS */ 43 46 #define RSC_DRV_IRQ_ENABLE 0x00 44 47 #define RSC_DRV_IRQ_STATUS 0x04 45 - #define RSC_DRV_IRQ_CLEAR 0x08 46 - #define RSC_DRV_CMD_WAIT_FOR_CMPL 0x10 48 + #define RSC_DRV_IRQ_CLEAR 0x08 /* w/o; write 1 to clear */ 49 + 50 + /* 51 + * Offsets for per TCS Registers. 52 + * 53 + * TCSes start at 0x10 from tcs_base and are stored one after another. 54 + * Multiply tcs_id by RSC_DRV_TCS_OFFSET to find a given TCS and add one 55 + * of the below to find a register. 56 + */ 57 + #define RSC_DRV_CMD_WAIT_FOR_CMPL 0x10 /* 1 bit per command */ 47 58 #define RSC_DRV_CONTROL 0x14 48 - #define RSC_DRV_STATUS 0x18 49 - #define RSC_DRV_CMD_ENABLE 0x1C 59 + #define RSC_DRV_STATUS 0x18 /* zero if tcs is busy */ 60 + #define RSC_DRV_CMD_ENABLE 0x1C /* 1 bit per command */ 61 + 62 + /* 63 + * Offsets for per command in a TCS. 64 + * 65 + * Commands (up to 16) start at 0x30 in a TCS; multiply command index 66 + * by RSC_DRV_CMD_OFFSET and add one of the below to find a register. 67 + */ 50 68 #define RSC_DRV_CMD_MSGID 0x30 51 69 #define RSC_DRV_CMD_ADDR 0x34 52 70 #define RSC_DRV_CMD_DATA 0x38 ··· 83 61 #define CMD_STATUS_ISSUED BIT(8) 84 62 #define CMD_STATUS_COMPL BIT(16) 85 63 86 - static u32 read_tcs_reg(struct rsc_drv *drv, int reg, int tcs_id, int cmd_id) 64 + /* 65 + * Here's a high level overview of how all the registers in RPMH work 66 + * together: 67 + * 68 + * - The main rpmh-rsc address is the base of a register space that can 69 + * be used to find overall configuration of the hardware 70 + * (DRV_PRNT_CHLD_CONFIG). Also found within the rpmh-rsc register 71 + * space are all the TCS blocks. The offset of the TCS blocks is 72 + * specified in the device tree by "qcom,tcs-offset" and used to 73 + * compute tcs_base. 74 + * - TCS blocks come one after another. Type, count, and order are 75 + * specified by the device tree as "qcom,tcs-config". 76 + * - Each TCS block has some registers, then space for up to 16 commands. 77 + * Note that though address space is reserved for 16 commands, fewer 78 + * might be present. See ncpt (num cmds per TCS). 79 + * 80 + * Here's a picture: 81 + * 82 + * +---------------------------------------------------+ 83 + * |RSC | 84 + * | ctrl | 85 + * | | 86 + * | Drvs: | 87 + * | +-----------------------------------------------+ | 88 + * | |DRV0 | | 89 + * | | ctrl/config | | 90 + * | | IRQ | | 91 + * | | | | 92 + * | | TCSes: | | 93 + * | | +------------------------------------------+ | | 94 + * | | |TCS0 | | | | | | | | | | | | | | | 95 + * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | | 96 + * | | | | | | | | | | | | | | | | | | 97 + * | | +------------------------------------------+ | | 98 + * | | +------------------------------------------+ | | 99 + * | | |TCS1 | | | | | | | | | | | | | | | 100 + * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | | 101 + * | | | | | | | | | | | | | | | | | | 102 + * | | +------------------------------------------+ | | 103 + * | | +------------------------------------------+ | | 104 + * | | |TCS2 | | | | | | | | | | | | | | | 105 + * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | | 106 + * | | | | | | | | | | | | | | | | | | 107 + * | | +------------------------------------------+ | | 108 + * | | ...... | | 109 + * | +-----------------------------------------------+ | 110 + * | +-----------------------------------------------+ | 111 + * | |DRV1 | | 112 + * | | (same as DRV0) | | 113 + * | +-----------------------------------------------+ | 114 + * | ...... | 115 + * +---------------------------------------------------+ 116 + */ 117 + 118 + static inline void __iomem * 119 + tcs_reg_addr(const struct rsc_drv *drv, int reg, int tcs_id) 87 120 { 88 - return readl_relaxed(drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id + 89 - RSC_DRV_CMD_OFFSET * cmd_id); 121 + return drv->tcs_base + RSC_DRV_TCS_OFFSET * tcs_id + reg; 90 122 } 91 123 92 - static void write_tcs_cmd(struct rsc_drv *drv, int reg, int tcs_id, int cmd_id, 124 + static inline void __iomem * 125 + tcs_cmd_addr(const struct rsc_drv *drv, int reg, int tcs_id, int cmd_id) 126 + { 127 + return tcs_reg_addr(drv, reg, tcs_id) + RSC_DRV_CMD_OFFSET * cmd_id; 128 + } 129 + 130 + static u32 read_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id, 131 + int cmd_id) 132 + { 133 + return readl_relaxed(tcs_cmd_addr(drv, reg, tcs_id, cmd_id)); 134 + } 135 + 136 + static u32 read_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id) 137 + { 138 + return readl_relaxed(tcs_reg_addr(drv, reg, tcs_id)); 139 + } 140 + 141 + static void write_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id, 142 + int cmd_id, u32 data) 143 + { 144 + writel_relaxed(data, tcs_cmd_addr(drv, reg, tcs_id, cmd_id)); 145 + } 146 + 147 + static void write_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id, 93 148 u32 data) 94 149 { 95 - writel_relaxed(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id + 96 - RSC_DRV_CMD_OFFSET * cmd_id); 150 + writel_relaxed(data, tcs_reg_addr(drv, reg, tcs_id)); 97 151 } 98 152 99 - static void write_tcs_reg(struct rsc_drv *drv, int reg, int tcs_id, u32 data) 100 - { 101 - writel_relaxed(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id); 102 - } 103 - 104 - static void write_tcs_reg_sync(struct rsc_drv *drv, int reg, int tcs_id, 153 + static void write_tcs_reg_sync(const struct rsc_drv *drv, int reg, int tcs_id, 105 154 u32 data) 106 155 { 107 - writel(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id); 108 - for (;;) { 109 - if (data == readl(drv->tcs_base + reg + 110 - RSC_DRV_TCS_OFFSET * tcs_id)) 111 - break; 112 - udelay(1); 113 - } 156 + u32 new_data; 157 + 158 + writel(data, tcs_reg_addr(drv, reg, tcs_id)); 159 + if (readl_poll_timeout_atomic(tcs_reg_addr(drv, reg, tcs_id), new_data, 160 + new_data == data, 1, USEC_PER_SEC)) 161 + pr_err("%s: error writing %#x to %d:%#x\n", drv->name, 162 + data, tcs_id, reg); 114 163 } 115 164 165 + /** 166 + * tcs_is_free() - Return if a TCS is totally free. 167 + * @drv: The RSC controller. 168 + * @tcs_id: The global ID of this TCS. 169 + * 170 + * Returns true if nobody has claimed this TCS (by setting tcs_in_use). 171 + * 172 + * Context: Must be called with the drv->lock held. 173 + * 174 + * Return: true if the given TCS is free. 175 + */ 116 176 static bool tcs_is_free(struct rsc_drv *drv, int tcs_id) 117 177 { 118 - return !test_bit(tcs_id, drv->tcs_in_use) && 119 - read_tcs_reg(drv, RSC_DRV_STATUS, tcs_id, 0); 178 + return !test_bit(tcs_id, drv->tcs_in_use); 120 179 } 121 180 122 - static struct tcs_group *get_tcs_of_type(struct rsc_drv *drv, int type) 123 - { 124 - return &drv->tcs[type]; 125 - } 126 - 127 - static int tcs_invalidate(struct rsc_drv *drv, int type) 181 + /** 182 + * tcs_invalidate() - Invalidate all TCSes of the given type (sleep or wake). 183 + * @drv: The RSC controller. 184 + * @type: SLEEP_TCS or WAKE_TCS 185 + * 186 + * This will clear the "slots" variable of the given tcs_group and also 187 + * tell the hardware to forget about all entries. 188 + * 189 + * The caller must ensure that no other RPMH actions are happening when this 190 + * function is called, since otherwise the device may immediately become 191 + * used again even before this function exits. 192 + */ 193 + static void tcs_invalidate(struct rsc_drv *drv, int type) 128 194 { 129 195 int m; 130 - struct tcs_group *tcs; 196 + struct tcs_group *tcs = &drv->tcs[type]; 131 197 132 - tcs = get_tcs_of_type(drv, type); 133 - 134 - spin_lock(&tcs->lock); 135 - if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS)) { 136 - spin_unlock(&tcs->lock); 137 - return 0; 138 - } 198 + /* Caller ensures nobody else is running so no lock */ 199 + if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS)) 200 + return; 139 201 140 202 for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) { 141 - if (!tcs_is_free(drv, m)) { 142 - spin_unlock(&tcs->lock); 143 - return -EAGAIN; 144 - } 145 203 write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, m, 0); 146 204 write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, m, 0); 147 205 } 148 206 bitmap_zero(tcs->slots, MAX_TCS_SLOTS); 149 - spin_unlock(&tcs->lock); 150 - 151 - return 0; 152 207 } 153 208 154 209 /** 155 - * rpmh_rsc_invalidate - Invalidate sleep and wake TCSes 210 + * rpmh_rsc_invalidate() - Invalidate sleep and wake TCSes. 211 + * @drv: The RSC controller. 156 212 * 157 - * @drv: the RSC controller 213 + * The caller must ensure that no other RPMH actions are happening when this 214 + * function is called, since otherwise the device may immediately become 215 + * used again even before this function exits. 158 216 */ 159 - int rpmh_rsc_invalidate(struct rsc_drv *drv) 217 + void rpmh_rsc_invalidate(struct rsc_drv *drv) 160 218 { 161 - int ret; 162 - 163 - ret = tcs_invalidate(drv, SLEEP_TCS); 164 - if (!ret) 165 - ret = tcs_invalidate(drv, WAKE_TCS); 166 - 167 - return ret; 219 + tcs_invalidate(drv, SLEEP_TCS); 220 + tcs_invalidate(drv, WAKE_TCS); 168 221 } 169 222 223 + /** 224 + * get_tcs_for_msg() - Get the tcs_group used to send the given message. 225 + * @drv: The RSC controller. 226 + * @msg: The message we want to send. 227 + * 228 + * This is normally pretty straightforward except if we are trying to send 229 + * an ACTIVE_ONLY message but don't have any active_only TCSes. 230 + * 231 + * Return: A pointer to a tcs_group or an ERR_PTR. 232 + */ 170 233 static struct tcs_group *get_tcs_for_msg(struct rsc_drv *drv, 171 234 const struct tcs_request *msg) 172 235 { 173 - int type, ret; 236 + int type; 174 237 struct tcs_group *tcs; 175 238 176 239 switch (msg->state) { ··· 275 168 /* 276 169 * If we are making an active request on a RSC that does not have a 277 170 * dedicated TCS for active state use, then re-purpose a wake TCS to 278 - * send active votes. 279 - * NOTE: The driver must be aware that this RSC does not have a 280 - * dedicated AMC, and therefore would invalidate the sleep and wake 281 - * TCSes before making an active state request. 171 + * send active votes. This is safe because we ensure any active-only 172 + * transfers have finished before we use it (maybe by running from 173 + * the last CPU in PM code). 282 174 */ 283 - tcs = get_tcs_of_type(drv, type); 284 - if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs) { 285 - tcs = get_tcs_of_type(drv, WAKE_TCS); 286 - if (tcs->num_tcs) { 287 - ret = rpmh_rsc_invalidate(drv); 288 - if (ret) 289 - return ERR_PTR(ret); 290 - } 291 - } 175 + tcs = &drv->tcs[type]; 176 + if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs) 177 + tcs = &drv->tcs[WAKE_TCS]; 292 178 293 179 return tcs; 294 180 } 295 181 182 + /** 183 + * get_req_from_tcs() - Get a stashed request that was xfering on the given TCS. 184 + * @drv: The RSC controller. 185 + * @tcs_id: The global ID of this TCS. 186 + * 187 + * For ACTIVE_ONLY transfers we want to call back into the client when the 188 + * transfer finishes. To do this we need the "request" that the client 189 + * originally provided us. This function grabs the request that we stashed 190 + * when we started the transfer. 191 + * 192 + * This only makes sense for ACTIVE_ONLY transfers since those are the only 193 + * ones we track sending (the only ones we enable interrupts for and the only 194 + * ones we call back to the client for). 195 + * 196 + * Return: The stashed request. 197 + */ 296 198 static const struct tcs_request *get_req_from_tcs(struct rsc_drv *drv, 297 199 int tcs_id) 298 200 { ··· 318 202 } 319 203 320 204 /** 321 - * tcs_tx_done: TX Done interrupt handler 205 + * __tcs_set_trigger() - Start xfer on a TCS or unset trigger on a borrowed TCS 206 + * @drv: The controller. 207 + * @tcs_id: The global ID of this TCS. 208 + * @trigger: If true then untrigger/retrigger. If false then just untrigger. 209 + * 210 + * In the normal case we only ever call with "trigger=true" to start a 211 + * transfer. That will un-trigger/disable the TCS from the last transfer 212 + * then trigger/enable for this transfer. 213 + * 214 + * If we borrowed a wake TCS for an active-only transfer we'll also call 215 + * this function with "trigger=false" to just do the un-trigger/disable 216 + * before using the TCS for wake purposes again. 217 + * 218 + * Note that the AP is only in charge of triggering active-only transfers. 219 + * The AP never triggers sleep/wake values using this function. 220 + */ 221 + static void __tcs_set_trigger(struct rsc_drv *drv, int tcs_id, bool trigger) 222 + { 223 + u32 enable; 224 + 225 + /* 226 + * HW req: Clear the DRV_CONTROL and enable TCS again 227 + * While clearing ensure that the AMC mode trigger is cleared 228 + * and then the mode enable is cleared. 229 + */ 230 + enable = read_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id); 231 + enable &= ~TCS_AMC_MODE_TRIGGER; 232 + write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable); 233 + enable &= ~TCS_AMC_MODE_ENABLE; 234 + write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable); 235 + 236 + if (trigger) { 237 + /* Enable the AMC mode on the TCS and then trigger the TCS */ 238 + enable = TCS_AMC_MODE_ENABLE; 239 + write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable); 240 + enable |= TCS_AMC_MODE_TRIGGER; 241 + write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable); 242 + } 243 + } 244 + 245 + /** 246 + * enable_tcs_irq() - Enable or disable interrupts on the given TCS. 247 + * @drv: The controller. 248 + * @tcs_id: The global ID of this TCS. 249 + * @enable: If true then enable; if false then disable 250 + * 251 + * We only ever call this when we borrow a wake TCS for an active-only 252 + * transfer. For active-only TCSes interrupts are always left enabled. 253 + */ 254 + static void enable_tcs_irq(struct rsc_drv *drv, int tcs_id, bool enable) 255 + { 256 + u32 data; 257 + 258 + data = readl_relaxed(drv->tcs_base + RSC_DRV_IRQ_ENABLE); 259 + if (enable) 260 + data |= BIT(tcs_id); 261 + else 262 + data &= ~BIT(tcs_id); 263 + writel_relaxed(data, drv->tcs_base + RSC_DRV_IRQ_ENABLE); 264 + } 265 + 266 + /** 267 + * tcs_tx_done() - TX Done interrupt handler. 268 + * @irq: The IRQ number (ignored). 269 + * @p: Pointer to "struct rsc_drv". 270 + * 271 + * Called for ACTIVE_ONLY transfers (those are the only ones we enable the 272 + * IRQ for) when a transfer is done. 273 + * 274 + * Return: IRQ_HANDLED 322 275 */ 323 276 static irqreturn_t tcs_tx_done(int irq, void *p) 324 277 { ··· 397 212 const struct tcs_request *req; 398 213 struct tcs_cmd *cmd; 399 214 400 - irq_status = read_tcs_reg(drv, RSC_DRV_IRQ_STATUS, 0, 0); 215 + irq_status = readl_relaxed(drv->tcs_base + RSC_DRV_IRQ_STATUS); 401 216 402 217 for_each_set_bit(i, &irq_status, BITS_PER_LONG) { 403 218 req = get_req_from_tcs(drv, i); ··· 411 226 u32 sts; 412 227 413 228 cmd = &req->cmds[j]; 414 - sts = read_tcs_reg(drv, RSC_DRV_CMD_STATUS, i, j); 229 + sts = read_tcs_cmd(drv, RSC_DRV_CMD_STATUS, i, j); 415 230 if (!(sts & CMD_STATUS_ISSUED) || 416 231 ((req->wait_for_compl || cmd->wait) && 417 232 !(sts & CMD_STATUS_COMPL))) { ··· 422 237 } 423 238 424 239 trace_rpmh_tx_done(drv, i, req, err); 240 + 241 + /* 242 + * If wake tcs was re-purposed for sending active 243 + * votes, clear AMC trigger & enable modes and 244 + * disable interrupt for this TCS 245 + */ 246 + if (!drv->tcs[ACTIVE_TCS].num_tcs) 247 + __tcs_set_trigger(drv, i, false); 425 248 skip: 426 249 /* Reclaim the TCS */ 427 250 write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, i, 0); 428 251 write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, i, 0); 429 - write_tcs_reg(drv, RSC_DRV_IRQ_CLEAR, 0, BIT(i)); 252 + writel_relaxed(BIT(i), drv->tcs_base + RSC_DRV_IRQ_CLEAR); 430 253 spin_lock(&drv->lock); 431 254 clear_bit(i, drv->tcs_in_use); 255 + /* 256 + * Disable interrupt for WAKE TCS to avoid being 257 + * spammed with interrupts coming when the solver 258 + * sends its wake votes. 259 + */ 260 + if (!drv->tcs[ACTIVE_TCS].num_tcs) 261 + enable_tcs_irq(drv, i, false); 432 262 spin_unlock(&drv->lock); 433 263 if (req) 434 264 rpmh_tx_done(req, err); ··· 452 252 return IRQ_HANDLED; 453 253 } 454 254 255 + /** 256 + * __tcs_buffer_write() - Write to TCS hardware from a request; don't trigger. 257 + * @drv: The controller. 258 + * @tcs_id: The global ID of this TCS. 259 + * @cmd_id: The index within the TCS to start writing. 260 + * @msg: The message we want to send, which will contain several addr/data 261 + * pairs to program (but few enough that they all fit in one TCS). 262 + * 263 + * This is used for all types of transfers (active, sleep, and wake). 264 + */ 455 265 static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id, 456 266 const struct tcs_request *msg) 457 267 { ··· 475 265 cmd_msgid |= msg->wait_for_compl ? CMD_MSGID_RESP_REQ : 0; 476 266 cmd_msgid |= CMD_MSGID_WRITE; 477 267 478 - cmd_complete = read_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0); 268 + cmd_complete = read_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id); 479 269 480 270 for (i = 0, j = cmd_id; i < msg->num_cmds; i++, j++) { 481 271 cmd = &msg->cmds[i]; ··· 491 281 } 492 282 493 283 write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, cmd_complete); 494 - cmd_enable |= read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0); 284 + cmd_enable |= read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id); 495 285 write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, cmd_enable); 496 286 } 497 287 498 - static void __tcs_trigger(struct rsc_drv *drv, int tcs_id) 499 - { 500 - u32 enable; 501 - 502 - /* 503 - * HW req: Clear the DRV_CONTROL and enable TCS again 504 - * While clearing ensure that the AMC mode trigger is cleared 505 - * and then the mode enable is cleared. 506 - */ 507 - enable = read_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id, 0); 508 - enable &= ~TCS_AMC_MODE_TRIGGER; 509 - write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable); 510 - enable &= ~TCS_AMC_MODE_ENABLE; 511 - write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable); 512 - 513 - /* Enable the AMC mode on the TCS and then trigger the TCS */ 514 - enable = TCS_AMC_MODE_ENABLE; 515 - write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable); 516 - enable |= TCS_AMC_MODE_TRIGGER; 517 - write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable); 518 - } 519 - 288 + /** 289 + * check_for_req_inflight() - Look to see if conflicting cmds are in flight. 290 + * @drv: The controller. 291 + * @tcs: A pointer to the tcs_group used for ACTIVE_ONLY transfers. 292 + * @msg: The message we want to send, which will contain several addr/data 293 + * pairs to program (but few enough that they all fit in one TCS). 294 + * 295 + * This will walk through the TCSes in the group and check if any of them 296 + * appear to be sending to addresses referenced in the message. If it finds 297 + * one it'll return -EBUSY. 298 + * 299 + * Only for use for active-only transfers. 300 + * 301 + * Must be called with the drv->lock held since that protects tcs_in_use. 302 + * 303 + * Return: 0 if nothing in flight or -EBUSY if we should try again later. 304 + * The caller must re-enable interrupts between tries since that's 305 + * the only way tcs_is_free() will ever return true and the only way 306 + * RSC_DRV_CMD_ENABLE will ever be cleared. 307 + */ 520 308 static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs, 521 309 const struct tcs_request *msg) 522 310 { ··· 527 319 if (tcs_is_free(drv, tcs_id)) 528 320 continue; 529 321 530 - curr_enabled = read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0); 322 + curr_enabled = read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id); 531 323 532 324 for_each_set_bit(j, &curr_enabled, MAX_CMDS_PER_TCS) { 533 - addr = read_tcs_reg(drv, RSC_DRV_CMD_ADDR, tcs_id, j); 325 + addr = read_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j); 534 326 for (k = 0; k < msg->num_cmds; k++) { 535 327 if (addr == msg->cmds[k].addr) 536 328 return -EBUSY; ··· 541 333 return 0; 542 334 } 543 335 336 + /** 337 + * find_free_tcs() - Find free tcs in the given tcs_group; only for active. 338 + * @tcs: A pointer to the active-only tcs_group (or the wake tcs_group if 339 + * we borrowed it because there are zero active-only ones). 340 + * 341 + * Must be called with the drv->lock held since that protects tcs_in_use. 342 + * 343 + * Return: The first tcs that's free. 344 + */ 544 345 static int find_free_tcs(struct tcs_group *tcs) 545 346 { 546 347 int i; ··· 562 345 return -EBUSY; 563 346 } 564 347 348 + /** 349 + * tcs_write() - Store messages into a TCS right now, or return -EBUSY. 350 + * @drv: The controller. 351 + * @msg: The data to be sent. 352 + * 353 + * Grabs a TCS for ACTIVE_ONLY transfers and writes the messages to it. 354 + * 355 + * If there are no free TCSes for ACTIVE_ONLY transfers or if a command for 356 + * the same address is already transferring returns -EBUSY which means the 357 + * client should retry shortly. 358 + * 359 + * Return: 0 on success, -EBUSY if client should retry, or an error. 360 + * Client should have interrupts enabled for a bit before retrying. 361 + */ 565 362 static int tcs_write(struct rsc_drv *drv, const struct tcs_request *msg) 566 363 { 567 364 struct tcs_group *tcs; ··· 587 356 if (IS_ERR(tcs)) 588 357 return PTR_ERR(tcs); 589 358 590 - spin_lock_irqsave(&tcs->lock, flags); 591 - spin_lock(&drv->lock); 359 + spin_lock_irqsave(&drv->lock, flags); 592 360 /* 593 361 * The h/w does not like if we send a request to the same address, 594 362 * when one is already in-flight or being processed. 595 363 */ 596 364 ret = check_for_req_inflight(drv, tcs, msg); 597 - if (ret) { 598 - spin_unlock(&drv->lock); 599 - goto done_write; 600 - } 365 + if (ret) 366 + goto unlock; 601 367 602 - tcs_id = find_free_tcs(tcs); 603 - if (tcs_id < 0) { 604 - ret = tcs_id; 605 - spin_unlock(&drv->lock); 606 - goto done_write; 607 - } 368 + ret = find_free_tcs(tcs); 369 + if (ret < 0) 370 + goto unlock; 371 + tcs_id = ret; 608 372 609 373 tcs->req[tcs_id - tcs->offset] = msg; 610 374 set_bit(tcs_id, drv->tcs_in_use); 611 - spin_unlock(&drv->lock); 375 + if (msg->state == RPMH_ACTIVE_ONLY_STATE && tcs->type != ACTIVE_TCS) { 376 + /* 377 + * Clear previously programmed WAKE commands in selected 378 + * repurposed TCS to avoid triggering them. tcs->slots will be 379 + * cleaned from rpmh_flush() by invoking rpmh_rsc_invalidate() 380 + */ 381 + write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0); 382 + write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0); 383 + enable_tcs_irq(drv, tcs_id, true); 384 + } 385 + spin_unlock_irqrestore(&drv->lock, flags); 612 386 387 + /* 388 + * These two can be done after the lock is released because: 389 + * - We marked "tcs_in_use" under lock. 390 + * - Once "tcs_in_use" has been marked nobody else could be writing 391 + * to these registers until the interrupt goes off. 392 + * - The interrupt can't go off until we trigger w/ the last line 393 + * of __tcs_set_trigger() below. 394 + */ 613 395 __tcs_buffer_write(drv, tcs_id, 0, msg); 614 - __tcs_trigger(drv, tcs_id); 396 + __tcs_set_trigger(drv, tcs_id, true); 615 397 616 - done_write: 617 - spin_unlock_irqrestore(&tcs->lock, flags); 398 + return 0; 399 + unlock: 400 + spin_unlock_irqrestore(&drv->lock, flags); 618 401 return ret; 619 402 } 620 403 621 404 /** 622 - * rpmh_rsc_send_data: Validate the incoming message and write to the 623 - * appropriate TCS block. 405 + * rpmh_rsc_send_data() - Write / trigger active-only message. 406 + * @drv: The controller. 407 + * @msg: The data to be sent. 624 408 * 625 - * @drv: the controller 626 - * @msg: the data to be sent 409 + * NOTES: 410 + * - This is only used for "ACTIVE_ONLY" since the limitations of this 411 + * function don't make sense for sleep/wake cases. 412 + * - To do the transfer, we will grab a whole TCS for ourselves--we don't 413 + * try to share. If there are none available we'll wait indefinitely 414 + * for a free one. 415 + * - This function will not wait for the commands to be finished, only for 416 + * data to be programmed into the RPMh. See rpmh_tx_done() which will 417 + * be called when the transfer is fully complete. 418 + * - This function must be called with interrupts enabled. If the hardware 419 + * is busy doing someone else's transfer we need that transfer to fully 420 + * finish so that we can have the hardware, and to fully finish it needs 421 + * the interrupt handler to run. If the interrupts is set to run on the 422 + * active CPU this can never happen if interrupts are disabled. 627 423 * 628 424 * Return: 0 on success, -EINVAL on error. 629 - * Note: This call blocks until a valid data is written to the TCS. 630 425 */ 631 426 int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg) 632 427 { 633 428 int ret; 634 - 635 - if (!msg || !msg->cmds || !msg->num_cmds || 636 - msg->num_cmds > MAX_RPMH_PAYLOAD) { 637 - WARN_ON(1); 638 - return -EINVAL; 639 - } 640 429 641 430 do { 642 431 ret = tcs_write(drv, msg); ··· 670 419 return ret; 671 420 } 672 421 673 - static int find_match(const struct tcs_group *tcs, const struct tcs_cmd *cmd, 674 - int len) 675 - { 676 - int i, j; 677 - 678 - /* Check for already cached commands */ 679 - for_each_set_bit(i, tcs->slots, MAX_TCS_SLOTS) { 680 - if (tcs->cmd_cache[i] != cmd[0].addr) 681 - continue; 682 - if (i + len >= tcs->num_tcs * tcs->ncpt) 683 - goto seq_err; 684 - for (j = 0; j < len; j++) { 685 - if (tcs->cmd_cache[i + j] != cmd[j].addr) 686 - goto seq_err; 687 - } 688 - return i; 689 - } 690 - 691 - return -ENODATA; 692 - 693 - seq_err: 694 - WARN(1, "Message does not match previous sequence.\n"); 695 - return -EINVAL; 696 - } 697 - 422 + /** 423 + * find_slots() - Find a place to write the given message. 424 + * @tcs: The tcs group to search. 425 + * @msg: The message we want to find room for. 426 + * @tcs_id: If we return 0 from the function, we return the global ID of the 427 + * TCS to write to here. 428 + * @cmd_id: If we return 0 from the function, we return the index of 429 + * the command array of the returned TCS where the client should 430 + * start writing the message. 431 + * 432 + * Only for use on sleep/wake TCSes since those are the only ones we maintain 433 + * tcs->slots for. 434 + * 435 + * Return: -ENOMEM if there was no room, else 0. 436 + */ 698 437 static int find_slots(struct tcs_group *tcs, const struct tcs_request *msg, 699 438 int *tcs_id, int *cmd_id) 700 439 { 701 440 int slot, offset; 702 441 int i = 0; 703 442 704 - /* Find if we already have the msg in our TCS */ 705 - slot = find_match(tcs, msg->cmds, msg->num_cmds); 706 - if (slot >= 0) 707 - goto copy_data; 708 - 709 - /* Do over, until we can fit the full payload in a TCS */ 443 + /* Do over, until we can fit the full payload in a single TCS */ 710 444 do { 711 445 slot = bitmap_find_next_zero_area(tcs->slots, MAX_TCS_SLOTS, 712 446 i, msg->num_cmds, 0); ··· 700 464 i += tcs->ncpt; 701 465 } while (slot + msg->num_cmds - 1 >= i); 702 466 703 - copy_data: 704 467 bitmap_set(tcs->slots, slot, msg->num_cmds); 705 - /* Copy the addresses of the resources over to the slots */ 706 - for (i = 0; i < msg->num_cmds; i++) 707 - tcs->cmd_cache[slot + i] = msg->cmds[i].addr; 708 468 709 469 offset = slot / tcs->ncpt; 710 470 *tcs_id = offset + tcs->offset; ··· 709 477 return 0; 710 478 } 711 479 712 - static int tcs_ctrl_write(struct rsc_drv *drv, const struct tcs_request *msg) 480 + /** 481 + * rpmh_rsc_write_ctrl_data() - Write request to controller but don't trigger. 482 + * @drv: The controller. 483 + * @msg: The data to be written to the controller. 484 + * 485 + * This should only be called for for sleep/wake state, never active-only 486 + * state. 487 + * 488 + * The caller must ensure that no other RPMH actions are happening and the 489 + * controller is idle when this function is called since it runs lockless. 490 + * 491 + * Return: 0 if no error; else -error. 492 + */ 493 + int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, const struct tcs_request *msg) 713 494 { 714 495 struct tcs_group *tcs; 715 496 int tcs_id = 0, cmd_id = 0; 716 - unsigned long flags; 717 497 int ret; 718 498 719 499 tcs = get_tcs_for_msg(drv, msg); 720 500 if (IS_ERR(tcs)) 721 501 return PTR_ERR(tcs); 722 502 723 - spin_lock_irqsave(&tcs->lock, flags); 724 503 /* find the TCS id and the command in the TCS to write to */ 725 504 ret = find_slots(tcs, msg, &tcs_id, &cmd_id); 726 505 if (!ret) 727 506 __tcs_buffer_write(drv, tcs_id, cmd_id, msg); 728 - spin_unlock_irqrestore(&tcs->lock, flags); 729 507 730 508 return ret; 731 509 } 732 510 733 511 /** 734 - * rpmh_rsc_write_ctrl_data: Write request to the controller 512 + * rpmh_rsc_ctrlr_is_busy() - Check if any of the AMCs are busy. 513 + * @drv: The controller 735 514 * 736 - * @drv: the controller 737 - * @msg: the data to be written to the controller 515 + * Checks if any of the AMCs are busy in handling ACTIVE sets. 516 + * This is called from the last cpu powering down before flushing 517 + * SLEEP and WAKE sets. If AMCs are busy, controller can not enter 518 + * power collapse, so deny from the last cpu's pm notification. 738 519 * 739 - * There is no response returned for writing the request to the controller. 520 + * Context: Must be called with the drv->lock held. 521 + * 522 + * Return: 523 + * * False - AMCs are idle 524 + * * True - AMCs are busy 740 525 */ 741 - int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, const struct tcs_request *msg) 526 + static bool rpmh_rsc_ctrlr_is_busy(struct rsc_drv *drv) 742 527 { 743 - if (!msg || !msg->cmds || !msg->num_cmds || 744 - msg->num_cmds > MAX_RPMH_PAYLOAD) { 745 - pr_err("Payload error\n"); 746 - return -EINVAL; 528 + int m; 529 + struct tcs_group *tcs = &drv->tcs[ACTIVE_TCS]; 530 + 531 + /* 532 + * If we made an active request on a RSC that does not have a 533 + * dedicated TCS for active state use, then re-purposed wake TCSes 534 + * should be checked for not busy, because we used wake TCSes for 535 + * active requests in this case. 536 + */ 537 + if (!tcs->num_tcs) 538 + tcs = &drv->tcs[WAKE_TCS]; 539 + 540 + for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) { 541 + if (!tcs_is_free(drv, m)) 542 + return true; 747 543 } 748 544 749 - /* Data sent to this API will not be sent immediately */ 750 - if (msg->state == RPMH_ACTIVE_ONLY_STATE) 751 - return -EINVAL; 545 + return false; 546 + } 752 547 753 - return tcs_ctrl_write(drv, msg); 548 + /** 549 + * rpmh_rsc_cpu_pm_callback() - Check if any of the AMCs are busy. 550 + * @nfb: Pointer to the notifier block in struct rsc_drv. 551 + * @action: CPU_PM_ENTER, CPU_PM_ENTER_FAILED, or CPU_PM_EXIT. 552 + * @v: Unused 553 + * 554 + * This function is given to cpu_pm_register_notifier so we can be informed 555 + * about when CPUs go down. When all CPUs go down we know no more active 556 + * transfers will be started so we write sleep/wake sets. This function gets 557 + * called from cpuidle code paths and also at system suspend time. 558 + * 559 + * If its last CPU going down and AMCs are not busy then writes cached sleep 560 + * and wake messages to TCSes. The firmware then takes care of triggering 561 + * them when entering deepest low power modes. 562 + * 563 + * Return: See cpu_pm_register_notifier() 564 + */ 565 + static int rpmh_rsc_cpu_pm_callback(struct notifier_block *nfb, 566 + unsigned long action, void *v) 567 + { 568 + struct rsc_drv *drv = container_of(nfb, struct rsc_drv, rsc_pm); 569 + int ret = NOTIFY_OK; 570 + int cpus_in_pm; 571 + 572 + switch (action) { 573 + case CPU_PM_ENTER: 574 + cpus_in_pm = atomic_inc_return(&drv->cpus_in_pm); 575 + /* 576 + * NOTE: comments for num_online_cpus() point out that it's 577 + * only a snapshot so we need to be careful. It should be OK 578 + * for us to use, though. It's important for us not to miss 579 + * if we're the last CPU going down so it would only be a 580 + * problem if a CPU went offline right after we did the check 581 + * AND that CPU was not idle AND that CPU was the last non-idle 582 + * CPU. That can't happen. CPUs would have to come out of idle 583 + * before the CPU could go offline. 584 + */ 585 + if (cpus_in_pm < num_online_cpus()) 586 + return NOTIFY_OK; 587 + break; 588 + case CPU_PM_ENTER_FAILED: 589 + case CPU_PM_EXIT: 590 + atomic_dec(&drv->cpus_in_pm); 591 + return NOTIFY_OK; 592 + default: 593 + return NOTIFY_DONE; 594 + } 595 + 596 + /* 597 + * It's likely we're on the last CPU. Grab the drv->lock and write 598 + * out the sleep/wake commands to RPMH hardware. Grabbing the lock 599 + * means that if we race with another CPU coming up we are still 600 + * guaranteed to be safe. If another CPU came up just after we checked 601 + * and has grabbed the lock or started an active transfer then we'll 602 + * notice we're busy and abort. If another CPU comes up after we start 603 + * flushing it will be blocked from starting an active transfer until 604 + * we're done flushing. If another CPU starts an active transfer after 605 + * we release the lock we're still OK because we're no longer the last 606 + * CPU. 607 + */ 608 + if (spin_trylock(&drv->lock)) { 609 + if (rpmh_rsc_ctrlr_is_busy(drv) || rpmh_flush(&drv->client)) 610 + ret = NOTIFY_BAD; 611 + spin_unlock(&drv->lock); 612 + } else { 613 + /* Another CPU must be up */ 614 + return NOTIFY_OK; 615 + } 616 + 617 + if (ret == NOTIFY_BAD) { 618 + /* Double-check if we're here because someone else is up */ 619 + if (cpus_in_pm < num_online_cpus()) 620 + ret = NOTIFY_OK; 621 + else 622 + /* We won't be called w/ CPU_PM_ENTER_FAILED */ 623 + atomic_dec(&drv->cpus_in_pm); 624 + } 625 + 626 + return ret; 754 627 } 755 628 756 629 static int rpmh_probe_tcs_config(struct platform_device *pdev, 757 - struct rsc_drv *drv) 630 + struct rsc_drv *drv, void __iomem *base) 758 631 { 759 632 struct tcs_type_config { 760 633 u32 type; ··· 869 532 u32 config, max_tcs, ncpt, offset; 870 533 int i, ret, n, st = 0; 871 534 struct tcs_group *tcs; 872 - struct resource *res; 873 - void __iomem *base; 874 - char drv_id[10] = {0}; 875 - 876 - snprintf(drv_id, ARRAY_SIZE(drv_id), "drv-%d", drv->id); 877 - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, drv_id); 878 - base = devm_ioremap_resource(&pdev->dev, res); 879 - if (IS_ERR(base)) 880 - return PTR_ERR(base); 881 535 882 536 ret = of_property_read_u32(dn, "qcom,tcs-offset", &offset); 883 537 if (ret) ··· 912 584 tcs->type = tcs_cfg[i].type; 913 585 tcs->num_tcs = tcs_cfg[i].n; 914 586 tcs->ncpt = ncpt; 915 - spin_lock_init(&tcs->lock); 916 587 917 588 if (!tcs->num_tcs || tcs->type == CONTROL_TCS) 918 589 continue; ··· 923 596 tcs->mask = ((1 << tcs->num_tcs) - 1) << st; 924 597 tcs->offset = st; 925 598 st += tcs->num_tcs; 926 - 927 - /* 928 - * Allocate memory to cache sleep and wake requests to 929 - * avoid reading TCS register memory. 930 - */ 931 - if (tcs->type == ACTIVE_TCS) 932 - continue; 933 - 934 - tcs->cmd_cache = devm_kcalloc(&pdev->dev, 935 - tcs->num_tcs * ncpt, sizeof(u32), 936 - GFP_KERNEL); 937 - if (!tcs->cmd_cache) 938 - return -ENOMEM; 939 599 } 940 600 941 601 drv->num_tcs = st; ··· 934 620 { 935 621 struct device_node *dn = pdev->dev.of_node; 936 622 struct rsc_drv *drv; 623 + struct resource *res; 624 + char drv_id[10] = {0}; 937 625 int ret, irq; 626 + u32 solver_config; 627 + void __iomem *base; 938 628 939 629 /* 940 630 * Even though RPMh doesn't directly use cmd-db, all of its children ··· 964 646 if (!drv->name) 965 647 drv->name = dev_name(&pdev->dev); 966 648 967 - ret = rpmh_probe_tcs_config(pdev, drv); 649 + snprintf(drv_id, ARRAY_SIZE(drv_id), "drv-%d", drv->id); 650 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, drv_id); 651 + base = devm_ioremap_resource(&pdev->dev, res); 652 + if (IS_ERR(base)) 653 + return PTR_ERR(base); 654 + 655 + ret = rpmh_probe_tcs_config(pdev, drv, base); 968 656 if (ret) 969 657 return ret; 970 658 ··· 987 663 if (ret) 988 664 return ret; 989 665 666 + /* 667 + * CPU PM notification are not required for controllers that support 668 + * 'HW solver' mode where they can be in autonomous mode executing low 669 + * power mode to power down. 670 + */ 671 + solver_config = readl_relaxed(base + DRV_SOLVER_CONFIG); 672 + solver_config &= DRV_HW_SOLVER_MASK << DRV_HW_SOLVER_SHIFT; 673 + solver_config = solver_config >> DRV_HW_SOLVER_SHIFT; 674 + if (!solver_config) { 675 + drv->rsc_pm.notifier_call = rpmh_rsc_cpu_pm_callback; 676 + cpu_pm_register_notifier(&drv->rsc_pm); 677 + } 678 + 990 679 /* Enable the active TCS to send requests immediately */ 991 - write_tcs_reg(drv, RSC_DRV_IRQ_ENABLE, 0, drv->tcs[ACTIVE_TCS].mask); 680 + writel_relaxed(drv->tcs[ACTIVE_TCS].mask, 681 + drv->tcs_base + RSC_DRV_IRQ_ENABLE); 992 682 993 683 spin_lock_init(&drv->client.cache_lock); 994 684 INIT_LIST_HEAD(&drv->client.cache);
+47 -50
drivers/soc/qcom/rpmh.c
··· 9 9 #include <linux/jiffies.h> 10 10 #include <linux/kernel.h> 11 11 #include <linux/list.h> 12 + #include <linux/lockdep.h> 12 13 #include <linux/module.h> 13 14 #include <linux/of.h> 14 15 #include <linux/platform_device.h> ··· 120 119 { 121 120 struct cache_req *req; 122 121 unsigned long flags; 122 + u32 old_sleep_val, old_wake_val; 123 123 124 124 spin_lock_irqsave(&ctrlr->cache_lock, flags); 125 125 req = __find_req(ctrlr, cmd->addr); ··· 135 133 136 134 req->addr = cmd->addr; 137 135 req->sleep_val = req->wake_val = UINT_MAX; 138 - INIT_LIST_HEAD(&req->list); 139 136 list_add_tail(&req->list, &ctrlr->cache); 140 137 141 138 existing: 139 + old_sleep_val = req->sleep_val; 140 + old_wake_val = req->wake_val; 141 + 142 142 switch (state) { 143 143 case RPMH_ACTIVE_ONLY_STATE: 144 - if (req->sleep_val != UINT_MAX) 145 - req->wake_val = cmd->data; 146 - break; 147 144 case RPMH_WAKE_ONLY_STATE: 148 145 req->wake_val = cmd->data; 149 146 break; 150 147 case RPMH_SLEEP_STATE: 151 148 req->sleep_val = cmd->data; 152 149 break; 153 - default: 154 - break; 155 150 } 156 151 157 - ctrlr->dirty = true; 152 + ctrlr->dirty |= (req->sleep_val != old_sleep_val || 153 + req->wake_val != old_wake_val) && 154 + req->sleep_val != UINT_MAX && 155 + req->wake_val != UINT_MAX; 156 + 158 157 unlock: 159 158 spin_unlock_irqrestore(&ctrlr->cache_lock, flags); 160 159 ··· 290 287 291 288 spin_lock_irqsave(&ctrlr->cache_lock, flags); 292 289 list_add_tail(&req->list, &ctrlr->batch_cache); 290 + ctrlr->dirty = true; 293 291 spin_unlock_irqrestore(&ctrlr->cache_lock, flags); 294 292 } 295 293 ··· 298 294 { 299 295 struct batch_cache_req *req; 300 296 const struct rpmh_request *rpm_msg; 301 - unsigned long flags; 302 297 int ret = 0; 303 298 int i; 304 299 305 300 /* Send Sleep/Wake requests to the controller, expect no response */ 306 - spin_lock_irqsave(&ctrlr->cache_lock, flags); 307 301 list_for_each_entry(req, &ctrlr->batch_cache, list) { 308 302 for (i = 0; i < req->count; i++) { 309 303 rpm_msg = req->rpm_msgs + i; ··· 311 309 break; 312 310 } 313 311 } 314 - spin_unlock_irqrestore(&ctrlr->cache_lock, flags); 315 312 316 313 return ret; 317 - } 318 - 319 - static void invalidate_batch(struct rpmh_ctrlr *ctrlr) 320 - { 321 - struct batch_cache_req *req, *tmp; 322 - unsigned long flags; 323 - 324 - spin_lock_irqsave(&ctrlr->cache_lock, flags); 325 - list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list) 326 - kfree(req); 327 - INIT_LIST_HEAD(&ctrlr->batch_cache); 328 - spin_unlock_irqrestore(&ctrlr->cache_lock, flags); 329 314 } 330 315 331 316 /** ··· 431 442 } 432 443 433 444 /** 434 - * rpmh_flush: Flushes the buffered active and sleep sets to TCS 445 + * rpmh_flush() - Flushes the buffered sleep and wake sets to TCSes 435 446 * 436 - * @ctrlr: controller making request to flush cached data 447 + * @ctrlr: Controller making request to flush cached data 437 448 * 438 - * Return: -EBUSY if the controller is busy, probably waiting on a response 439 - * to a RPMH request sent earlier. 440 - * 441 - * This function is always called from the sleep code from the last CPU 442 - * that is powering down the entire system. Since no other RPMH API would be 443 - * executing at this time, it is safe to run lockless. 449 + * Return: 450 + * * 0 - Success 451 + * * Error code - Otherwise 444 452 */ 445 453 int rpmh_flush(struct rpmh_ctrlr *ctrlr) 446 454 { 447 455 struct cache_req *p; 448 - int ret; 456 + int ret = 0; 457 + 458 + lockdep_assert_irqs_disabled(); 459 + 460 + /* 461 + * Currently rpmh_flush() is only called when we think we're running 462 + * on the last processor. If the lock is busy it means another 463 + * processor is up and it's better to abort than spin. 464 + */ 465 + if (!spin_trylock(&ctrlr->cache_lock)) 466 + return -EBUSY; 449 467 450 468 if (!ctrlr->dirty) { 451 469 pr_debug("Skipping flush, TCS has latest data.\n"); 452 - return 0; 470 + goto exit; 453 471 } 472 + 473 + /* Invalidate the TCSes first to avoid stale data */ 474 + rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr)); 454 475 455 476 /* First flush the cached batch requests */ 456 477 ret = flush_batch(ctrlr); 457 478 if (ret) 458 - return ret; 479 + goto exit; 459 480 460 - /* 461 - * Nobody else should be calling this function other than system PM, 462 - * hence we can run without locks. 463 - */ 464 481 list_for_each_entry(p, &ctrlr->cache, list) { 465 482 if (!is_req_valid(p)) { 466 483 pr_debug("%s: skipping RPMH req: a:%#x s:%#x w:%#x", ··· 476 481 ret = send_single(ctrlr, RPMH_SLEEP_STATE, p->addr, 477 482 p->sleep_val); 478 483 if (ret) 479 - return ret; 484 + goto exit; 480 485 ret = send_single(ctrlr, RPMH_WAKE_ONLY_STATE, p->addr, 481 486 p->wake_val); 482 487 if (ret) 483 - return ret; 488 + goto exit; 484 489 } 485 490 486 491 ctrlr->dirty = false; 487 492 488 - return 0; 493 + exit: 494 + spin_unlock(&ctrlr->cache_lock); 495 + return ret; 489 496 } 490 497 491 498 /** 492 - * rpmh_invalidate: Invalidate all sleep and active sets 493 - * sets. 499 + * rpmh_invalidate: Invalidate sleep and wake sets in batch_cache 494 500 * 495 501 * @dev: The device making the request 496 502 * 497 - * Invalidate the sleep and active values in the TCS blocks. 503 + * Invalidate the sleep and wake values in batch_cache. 498 504 */ 499 505 int rpmh_invalidate(const struct device *dev) 500 506 { 501 507 struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); 502 - int ret; 508 + struct batch_cache_req *req, *tmp; 509 + unsigned long flags; 503 510 504 - invalidate_batch(ctrlr); 511 + spin_lock_irqsave(&ctrlr->cache_lock, flags); 512 + list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list) 513 + kfree(req); 514 + INIT_LIST_HEAD(&ctrlr->batch_cache); 505 515 ctrlr->dirty = true; 516 + spin_unlock_irqrestore(&ctrlr->cache_lock, flags); 506 517 507 - do { 508 - ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr)); 509 - } while (ret == -EAGAIN); 510 - 511 - return ret; 518 + return 0; 512 519 } 513 520 EXPORT_SYMBOL(rpmh_invalidate);
+24
drivers/soc/qcom/rpmhpd.c
··· 4 4 #include <linux/err.h> 5 5 #include <linux/init.h> 6 6 #include <linux/kernel.h> 7 + #include <linux/module.h> 7 8 #include <linux/mutex.h> 8 9 #include <linux/pm_domain.h> 9 10 #include <linux/slab.h> ··· 167 166 .num_pds = ARRAY_SIZE(sm8150_rpmhpds), 168 167 }; 169 168 169 + static struct rpmhpd *sm8250_rpmhpds[] = { 170 + [SM8250_CX] = &sdm845_cx, 171 + [SM8250_CX_AO] = &sdm845_cx_ao, 172 + [SM8250_EBI] = &sdm845_ebi, 173 + [SM8250_GFX] = &sdm845_gfx, 174 + [SM8250_LCX] = &sdm845_lcx, 175 + [SM8250_LMX] = &sdm845_lmx, 176 + [SM8250_MMCX] = &sm8150_mmcx, 177 + [SM8250_MMCX_AO] = &sm8150_mmcx_ao, 178 + [SM8250_MX] = &sdm845_mx, 179 + [SM8250_MX_AO] = &sdm845_mx_ao, 180 + }; 181 + 182 + static const struct rpmhpd_desc sm8250_desc = { 183 + .rpmhpds = sm8250_rpmhpds, 184 + .num_pds = ARRAY_SIZE(sm8250_rpmhpds), 185 + }; 186 + 170 187 /* SC7180 RPMH powerdomains */ 171 188 static struct rpmhpd *sc7180_rpmhpds[] = { 172 189 [SC7180_CX] = &sdm845_cx, ··· 206 187 { .compatible = "qcom,sc7180-rpmhpd", .data = &sc7180_desc }, 207 188 { .compatible = "qcom,sdm845-rpmhpd", .data = &sdm845_desc }, 208 189 { .compatible = "qcom,sm8150-rpmhpd", .data = &sm8150_desc }, 190 + { .compatible = "qcom,sm8250-rpmhpd", .data = &sm8250_desc }, 209 191 { } 210 192 }; 193 + MODULE_DEVICE_TABLE(of, rpmhpd_match_table); 211 194 212 195 static int rpmhpd_send_corner(struct rpmhpd *pd, int state, 213 196 unsigned int corner, bool sync) ··· 481 460 return platform_driver_register(&rpmhpd_driver); 482 461 } 483 462 core_initcall(rpmhpd_init); 463 + 464 + MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPMh Power Domain Driver"); 465 + MODULE_LICENSE("GPL v2");
+5
drivers/soc/qcom/rpmpd.c
··· 4 4 #include <linux/err.h> 5 5 #include <linux/init.h> 6 6 #include <linux/kernel.h> 7 + #include <linux/module.h> 7 8 #include <linux/mutex.h> 8 9 #include <linux/pm_domain.h> 9 10 #include <linux/of.h> ··· 227 226 { .compatible = "qcom,qcs404-rpmpd", .data = &qcs404_desc }, 228 227 { } 229 228 }; 229 + MODULE_DEVICE_TABLE(of, rpmpd_match_table); 230 230 231 231 static int rpmpd_send_enable(struct rpmpd *pd, bool enable) 232 232 { ··· 424 422 return platform_driver_register(&rpmpd_driver); 425 423 } 426 424 core_initcall(rpmpd_init); 425 + 426 + MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPM Power Domain Driver"); 427 + MODULE_LICENSE("GPL v2");
+1 -3
drivers/soc/qcom/smp2p.c
··· 474 474 goto report_read_failure; 475 475 476 476 irq = platform_get_irq(pdev, 0); 477 - if (irq < 0) { 478 - dev_err(&pdev->dev, "unable to acquire smp2p interrupt\n"); 477 + if (irq < 0) 479 478 return irq; 480 - } 481 479 482 480 smp2p->mbox_client.dev = &pdev->dev; 483 481 smp2p->mbox_client.knows_txdone = true;
+6
drivers/soc/qcom/socinfo.c
··· 188 188 { 216, "MSM8674PRO" }, 189 189 { 217, "MSM8974-AA" }, 190 190 { 218, "MSM8974-AB" }, 191 + { 233, "MSM8936" }, 192 + { 239, "MSM8939" }, 193 + { 240, "APQ8036" }, 194 + { 241, "APQ8039" }, 191 195 { 246, "MSM8996" }, 192 196 { 247, "APQ8016" }, 193 197 { 248, "MSM8216" }, ··· 434 430 qs->attr.family = "Snapdragon"; 435 431 qs->attr.machine = socinfo_machine(&pdev->dev, 436 432 le32_to_cpu(info->id)); 433 + qs->attr.soc_id = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%u", 434 + le32_to_cpu(info->id)); 437 435 qs->attr.revision = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%u.%u", 438 436 SOCINFO_MAJOR(le32_to_cpu(info->ver)), 439 437 SOCINFO_MINOR(le32_to_cpu(info->ver)));
+12
include/dt-bindings/power/qcom-rpmpd.h
··· 28 28 #define SM8150_MMCX 9 29 29 #define SM8150_MMCX_AO 10 30 30 31 + /* SM8250 Power Domain Indexes */ 32 + #define SM8250_CX 0 33 + #define SM8250_CX_AO 1 34 + #define SM8250_EBI 2 35 + #define SM8250_GFX 3 36 + #define SM8250_LCX 4 37 + #define SM8250_LMX 5 38 + #define SM8250_MMCX 6 39 + #define SM8250_MMCX_AO 7 40 + #define SM8250_MX 8 41 + #define SM8250_MX_AO 9 42 + 31 43 /* SC7180 Power Domain Indexes */ 32 44 #define SC7180_CX 0 33 45 #define SC7180_CX_AO 1
+1
include/soc/qcom/cmd-db.h
··· 4 4 #ifndef __QCOM_COMMAND_DB_H__ 5 5 #define __QCOM_COMMAND_DB_H__ 6 6 7 + #include <linux/err.h> 7 8 8 9 enum cmd_db_hw_type { 9 10 CMD_DB_HW_INVALID = 0,
+2 -2
kernel/cpu_pm.c
··· 80 80 */ 81 81 int cpu_pm_enter(void) 82 82 { 83 - int nr_calls; 83 + int nr_calls = 0; 84 84 int ret = 0; 85 85 86 86 ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls); ··· 131 131 */ 132 132 int cpu_cluster_pm_enter(void) 133 133 { 134 - int nr_calls; 134 + int nr_calls = 0; 135 135 int ret = 0; 136 136 137 137 ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls);