Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drivers: qcom: rpmh: cache sleep/wake state requests

Active state requests are sent immediately to the RSC controller, while
sleep and wake state requests are cached in this driver to avoid taxing
the RSC controller repeatedly. The cached values will be sent to the
controller when the rpmh_flush() is called.

Generally, flushing is a system PM activity and may be called from the
system PM drivers when the system is entering suspend or deeper sleep
modes during cpuidle.

Also allow invalidating the cached requests, so they may be re-populated
again.

Signed-off-by: Lina Iyer <ilina@codeaurora.org>
[rplsssn: remove unneeded semicolon, address line over 80chars error]
Signed-off-by: Raju P.L.S.S.S.N <rplsssn@codeaurora.org>
Reviewed-by: Evan Green <evgreen@chromium.org>
Reviewed-by: Matthias Kaehlcke <mka@chromium.org>
Signed-off-by: Andy Gross <andy.gross@linaro.org>

authored by

Lina Iyer and committed by
Andy Gross
600513df 9a3afcfb

+216 -7
+6 -2
drivers/soc/qcom/rpmh-internal.h
··· 66 66 /** 67 67 * struct rpmh_ctrlr: our representation of the controller 68 68 * 69 - * @drv: the controller instance 69 + * @cache: the list of cached requests 70 + * @cache_lock: synchronize access to the cache data 71 + * @dirty: was the cache updated since flush 70 72 */ 71 73 struct rpmh_ctrlr { 72 - struct rsc_drv *drv; 74 + struct list_head cache; 75 + spinlock_t cache_lock; 76 + bool dirty; 73 77 }; 74 78 75 79 /**
+3
drivers/soc/qcom/rpmh-rsc.c
··· 634 634 /* Enable the active TCS to send requests immediately */ 635 635 write_tcs_reg(drv, RSC_DRV_IRQ_ENABLE, 0, drv->tcs[ACTIVE_TCS].mask); 636 636 637 + spin_lock_init(&drv->client.cache_lock); 638 + INIT_LIST_HEAD(&drv->client.cache); 639 + 637 640 dev_set_drvdata(&pdev->dev, drv); 638 641 639 642 return devm_of_platform_populate(&pdev->dev);
+196 -5
drivers/soc/qcom/rpmh.c
··· 8 8 #include <linux/interrupt.h> 9 9 #include <linux/jiffies.h> 10 10 #include <linux/kernel.h> 11 + #include <linux/list.h> 11 12 #include <linux/module.h> 12 13 #include <linux/of.h> 13 14 #include <linux/platform_device.h> 14 15 #include <linux/slab.h> 16 + #include <linux/spinlock.h> 15 17 #include <linux/types.h> 16 18 #include <linux/wait.h> 17 19 ··· 37 35 } 38 36 39 37 #define ctrlr_to_drv(ctrlr) container_of(ctrlr, struct rsc_drv, client) 38 + 39 + /** 40 + * struct cache_req: the request object for caching 41 + * 42 + * @addr: the address of the resource 43 + * @sleep_val: the sleep vote 44 + * @wake_val: the wake vote 45 + * @list: linked list obj 46 + */ 47 + struct cache_req { 48 + u32 addr; 49 + u32 sleep_val; 50 + u32 wake_val; 51 + struct list_head list; 52 + }; 40 53 41 54 static struct rpmh_ctrlr *get_rpmh_ctrlr(const struct device *dev) 42 55 { ··· 77 60 complete(compl); 78 61 } 79 62 63 + static struct cache_req *__find_req(struct rpmh_ctrlr *ctrlr, u32 addr) 64 + { 65 + struct cache_req *p, *req = NULL; 66 + 67 + list_for_each_entry(p, &ctrlr->cache, list) { 68 + if (p->addr == addr) { 69 + req = p; 70 + break; 71 + } 72 + } 73 + 74 + return req; 75 + } 76 + 77 + static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr, 78 + enum rpmh_state state, 79 + struct tcs_cmd *cmd) 80 + { 81 + struct cache_req *req; 82 + unsigned long flags; 83 + 84 + spin_lock_irqsave(&ctrlr->cache_lock, flags); 85 + req = __find_req(ctrlr, cmd->addr); 86 + if (req) 87 + goto existing; 88 + 89 + req = kzalloc(sizeof(*req), GFP_ATOMIC); 90 + if (!req) { 91 + req = ERR_PTR(-ENOMEM); 92 + goto unlock; 93 + } 94 + 95 + req->addr = cmd->addr; 96 + req->sleep_val = req->wake_val = UINT_MAX; 97 + INIT_LIST_HEAD(&req->list); 98 + list_add_tail(&req->list, &ctrlr->cache); 99 + 100 + existing: 101 + switch (state) { 102 + case RPMH_ACTIVE_ONLY_STATE: 103 + if (req->sleep_val != UINT_MAX) 104 + req->wake_val = cmd->data; 105 + break; 106 + case RPMH_WAKE_ONLY_STATE: 107 + req->wake_val = cmd->data; 108 + break; 109 + case RPMH_SLEEP_STATE: 110 + req->sleep_val = cmd->data; 111 + break; 112 + default: 113 + break; 114 + } 115 + 116 + ctrlr->dirty = true; 117 + unlock: 118 + spin_unlock_irqrestore(&ctrlr->cache_lock, flags); 119 + 120 + return req; 121 + } 122 + 80 123 /** 81 - * __rpmh_write: send the RPMH request 124 + * __rpmh_write: Cache and send the RPMH request 82 125 * 83 126 * @dev: The device making the request 84 127 * @state: Active/Sleep request type 85 128 * @rpm_msg: The data that needs to be sent (cmds). 129 + * 130 + * Cache the RPMH request and send if the state is ACTIVE_ONLY. 131 + * SLEEP/WAKE_ONLY requests are not sent to the controller at 132 + * this time. Use rpmh_flush() to send them to the controller. 86 133 */ 87 134 static int __rpmh_write(const struct device *dev, enum rpmh_state state, 88 135 struct rpmh_request *rpm_msg) 89 136 { 90 137 struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); 138 + int ret = -EINVAL; 139 + struct cache_req *req; 140 + int i; 91 141 92 142 rpm_msg->msg.state = state; 93 143 94 - if (state != RPMH_ACTIVE_ONLY_STATE) 95 - return -EINVAL; 144 + /* Cache the request in our store and link the payload */ 145 + for (i = 0; i < rpm_msg->msg.num_cmds; i++) { 146 + req = cache_rpm_request(ctrlr, state, &rpm_msg->msg.cmds[i]); 147 + if (IS_ERR(req)) 148 + return PTR_ERR(req); 149 + } 96 150 97 - WARN_ON(irqs_disabled()); 151 + rpm_msg->msg.state = state; 98 152 99 - return rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg); 153 + if (state == RPMH_ACTIVE_ONLY_STATE) { 154 + WARN_ON(irqs_disabled()); 155 + ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg); 156 + } else { 157 + ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr), 158 + &rpm_msg->msg); 159 + /* Clean up our call by spoofing tx_done */ 160 + rpmh_tx_done(&rpm_msg->msg, ret); 161 + } 162 + 163 + return ret; 100 164 } 101 165 102 166 /** ··· 212 114 return (ret > 0) ? 0 : -ETIMEDOUT; 213 115 } 214 116 EXPORT_SYMBOL(rpmh_write); 117 + 118 + static int is_req_valid(struct cache_req *req) 119 + { 120 + return (req->sleep_val != UINT_MAX && 121 + req->wake_val != UINT_MAX && 122 + req->sleep_val != req->wake_val); 123 + } 124 + 125 + static int send_single(const struct device *dev, enum rpmh_state state, 126 + u32 addr, u32 data) 127 + { 128 + DEFINE_RPMH_MSG_ONSTACK(dev, state, NULL, rpm_msg); 129 + struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); 130 + 131 + /* Wake sets are always complete and sleep sets are not */ 132 + rpm_msg.msg.wait_for_compl = (state == RPMH_WAKE_ONLY_STATE); 133 + rpm_msg.cmd[0].addr = addr; 134 + rpm_msg.cmd[0].data = data; 135 + rpm_msg.msg.num_cmds = 1; 136 + 137 + return rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr), &rpm_msg.msg); 138 + } 139 + 140 + /** 141 + * rpmh_flush: Flushes the buffered active and sleep sets to TCS 142 + * 143 + * @dev: The device making the request 144 + * 145 + * Return: -EBUSY if the controller is busy, probably waiting on a response 146 + * to a RPMH request sent earlier. 147 + * 148 + * This function is always called from the sleep code from the last CPU 149 + * that is powering down the entire system. Since no other RPMH API would be 150 + * executing at this time, it is safe to run lockless. 151 + */ 152 + int rpmh_flush(const struct device *dev) 153 + { 154 + struct cache_req *p; 155 + struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); 156 + int ret; 157 + 158 + if (!ctrlr->dirty) { 159 + pr_debug("Skipping flush, TCS has latest data.\n"); 160 + return 0; 161 + } 162 + 163 + /* 164 + * Nobody else should be calling this function other than system PM, 165 + * hence we can run without locks. 166 + */ 167 + list_for_each_entry(p, &ctrlr->cache, list) { 168 + if (!is_req_valid(p)) { 169 + pr_debug("%s: skipping RPMH req: a:%#x s:%#x w:%#x", 170 + __func__, p->addr, p->sleep_val, p->wake_val); 171 + continue; 172 + } 173 + ret = send_single(dev, RPMH_SLEEP_STATE, p->addr, p->sleep_val); 174 + if (ret) 175 + return ret; 176 + ret = send_single(dev, RPMH_WAKE_ONLY_STATE, 177 + p->addr, p->wake_val); 178 + if (ret) 179 + return ret; 180 + } 181 + 182 + ctrlr->dirty = false; 183 + 184 + return 0; 185 + } 186 + EXPORT_SYMBOL(rpmh_flush); 187 + 188 + /** 189 + * rpmh_invalidate: Invalidate all sleep and active sets 190 + * sets. 191 + * 192 + * @dev: The device making the request 193 + * 194 + * Invalidate the sleep and active values in the TCS blocks. 195 + */ 196 + int rpmh_invalidate(const struct device *dev) 197 + { 198 + struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); 199 + int ret; 200 + 201 + ctrlr->dirty = true; 202 + 203 + do { 204 + ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr)); 205 + } while (ret == -EAGAIN); 206 + 207 + return ret; 208 + } 209 + EXPORT_SYMBOL(rpmh_invalidate);
+11
include/soc/qcom/rpmh.h
··· 14 14 int rpmh_write(const struct device *dev, enum rpmh_state state, 15 15 const struct tcs_cmd *cmd, u32 n); 16 16 17 + int rpmh_flush(const struct device *dev); 18 + 19 + int rpmh_invalidate(const struct device *dev); 20 + 17 21 #else 18 22 19 23 static inline int rpmh_write(const struct device *dev, enum rpmh_state state, 20 24 const struct tcs_cmd *cmd, u32 n) 25 + { return -ENODEV; } 26 + 27 + 28 + static inline int rpmh_flush(const struct device *dev) 29 + { return -ENODEV; } 30 + 31 + static inline int rpmh_invalidate(const struct device *dev) 21 32 { return -ENODEV; } 22 33 23 34 #endif /* CONFIG_QCOM_RPMH */