Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drivers: qcom: rpmh: allow requests to be sent asynchronously

Platform drivers that want to send a request but do not want to block
until the RPMH request completes have now a new API -
rpmh_write_async().

The API allocates memory and send the requests and returns the control
back to the platform driver. The tx_done callback from the controller is
handled in the context of the controller's thread and frees the
allocated memory. This API allows RPMH requests from atomic contexts as
well.

Signed-off-by: Lina Iyer <ilina@codeaurora.org>
Signed-off-by: Raju P.L.S.S.S.N <rplsssn@codeaurora.org>
Signed-off-by: Andy Gross <andy.gross@linaro.org>

authored by

Lina Iyer and committed by
Andy Gross
564b5e24 600513df

+60
+2
drivers/soc/qcom/rpmh-internal.h
··· 54 54 * @completion: triggered when request is done 55 55 * @dev: the device making the request 56 56 * @err: err return from the controller 57 + * @needs_free: check to free dynamically allocated request object 57 58 */ 58 59 struct rpmh_request { 59 60 struct tcs_request msg; ··· 62 61 struct completion *completion; 63 62 const struct device *dev; 64 63 int err; 64 + bool needs_free; 65 65 }; 66 66 67 67 /**
+51
drivers/soc/qcom/rpmh.c
··· 34 34 .cmd = { { 0 } }, \ 35 35 .completion = q, \ 36 36 .dev = dev, \ 37 + .needs_free = false, \ 37 38 } 38 39 39 40 #define ctrlr_to_drv(ctrlr) container_of(ctrlr, struct rsc_drv, client) ··· 76 75 /* Signal the blocking thread we are done */ 77 76 if (compl) 78 77 complete(compl); 78 + 79 + if (rpm_msg->needs_free) 80 + kfree(rpm_msg); 79 81 } 80 82 81 83 static struct cache_req *__find_req(struct rpmh_ctrlr *ctrlr, u32 addr) ··· 183 179 184 180 return ret; 185 181 } 182 + 183 + static int __fill_rpmh_msg(struct rpmh_request *req, enum rpmh_state state, 184 + const struct tcs_cmd *cmd, u32 n) 185 + { 186 + if (!cmd || !n || n > MAX_RPMH_PAYLOAD) 187 + return -EINVAL; 188 + 189 + memcpy(req->cmd, cmd, n * sizeof(*cmd)); 190 + 191 + req->msg.state = state; 192 + req->msg.cmds = req->cmd; 193 + req->msg.num_cmds = n; 194 + 195 + return 0; 196 + } 197 + 198 + /** 199 + * rpmh_write_async: Write a set of RPMH commands 200 + * 201 + * @dev: The device making the request 202 + * @state: Active/sleep set 203 + * @cmd: The payload data 204 + * @n: The number of elements in payload 205 + * 206 + * Write a set of RPMH commands, the order of commands is maintained 207 + * and will be sent as a single shot. 208 + */ 209 + int rpmh_write_async(const struct device *dev, enum rpmh_state state, 210 + const struct tcs_cmd *cmd, u32 n) 211 + { 212 + struct rpmh_request *rpm_msg; 213 + int ret; 214 + 215 + rpm_msg = kzalloc(sizeof(*rpm_msg), GFP_ATOMIC); 216 + if (!rpm_msg) 217 + return -ENOMEM; 218 + rpm_msg->needs_free = true; 219 + 220 + ret = __fill_rpmh_msg(rpm_msg, state, cmd, n); 221 + if (ret) { 222 + kfree(rpm_msg); 223 + return ret; 224 + } 225 + 226 + return __rpmh_write(dev, state, rpm_msg); 227 + } 228 + EXPORT_SYMBOL(rpmh_write_async); 186 229 187 230 /** 188 231 * rpmh_write: Write a set of RPMH commands and block until response
+7
include/soc/qcom/rpmh.h
··· 14 14 int rpmh_write(const struct device *dev, enum rpmh_state state, 15 15 const struct tcs_cmd *cmd, u32 n); 16 16 17 + int rpmh_write_async(const struct device *dev, enum rpmh_state state, 18 + const struct tcs_cmd *cmd, u32 n); 19 + 17 20 int rpmh_flush(const struct device *dev); 18 21 19 22 int rpmh_invalidate(const struct device *dev); ··· 27 24 const struct tcs_cmd *cmd, u32 n) 28 25 { return -ENODEV; } 29 26 27 + static inline int rpmh_write_async(const struct device *dev, 28 + enum rpmh_state state, 29 + const struct tcs_cmd *cmd, u32 n) 30 + { return -ENODEV; } 30 31 31 32 static inline int rpmh_flush(const struct device *dev) 32 33 { return -ENODEV; }