Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

firmware: qcom: scm: Add wait-queue handling logic

When the firmware (FW) supports multiple requests per VM, multiple requests
from the same/different VM can reach the firmware at the same time. Since
the firmware currently being used has limited resources, it guards them
with a resource lock and puts requests on a wait-queue internally and
signals to HLOS that it is doing so. It does this by returning a new return
value in addition to success or error: SCM_WAITQ_SLEEP. A sleeping SCM call
can be woken up by an interrupt that the FW raises.

1) SCM_WAITQ_SLEEP:

When an SCM call receives this return value instead of success
or error, FW has placed this call on a wait-queue and has signalled
HLOS to put it to non-interruptible sleep.

Along with this return value, FW also passes to HLOS `wq_ctx` -
a unique number (UID) identifying the wait-queue that it has put
the call on, internally. This is to help HLOS with its own
bookkeeping to wake this sleeping call later.

Additionally, FW also passes to HLOS `smc_call_ctx` - a UID
identifying the SCM call thus being put to sleep. This is also
for HLOS' bookkeeping to wake this call up later.

These two additional values are passed via the a1 and a2
registers.

N.B.: The "ctx" in the above UID names = "context".

The handshake mechanism that HLOS uses to talk to FW about wait-queue
operations involves two new SMC calls.

1) get_wq_ctx():

Arguments: None
Returns: wq_ctx, flags, more_pending

Get the wait-queue context, and wake up either one or all of the
sleeping SCM calls associated with that wait-queue.

Additionally, repeat this if there are more wait-queues that are
ready to have their requests woken up (`more_pending`).

2) wq_resume(smc_call_ctx):

Arguments: smc_call_ctx

HLOS needs to issue this in response to receiving an
IRQ, passing to FW the same smc_call_ctx that FW
receives from HLOS via the get_wq_ctx() call.

(The mechanism to wake a SMC call back up is described in detail below)

VM_1 VM_2 Firmware
│ │ │
│ │ │
│ │ │
│ │ │
│ REQUEST_1 │ │
├────────────────────────┼─────────────────────────────────┤
│ │ │
│ │ ┌──┼──┐
│ │ │ │ │
│ │ REQUEST_2 │ │ │
│ ├──────────────────────────────┼──┤ │
│ │ │ │ │Resource
│ │ │ │ │is busy
│ │ {WQ_SLEEP} │ │ │
│ │◄─────────────────────────────┼──┤ │
│ │ wq_ctx, smc_call_ctx │ │ │
│ │ └──┼──┘
│ REQUEST_1 COMPLETE │ │
│◄───────────────────────┼─────────────────────────────────┤
│ │ │
│ │ IRQ │
│ │◄─-------------------------------│
│ │ │
│ │ get_wq_ctx() │
│ ├────────────────────────────────►│
│ │ │
│ │ │
│ │◄────────────────────────────────┤
│ │ wq_ctx, flags, and │
│ │ more_pending │
│ │ │
│ │ │
│ │ wq_resume(smc_call_ctx) │
│ ├────────────────────────────────►│
│ │ │
│ │ │
│ │ REQUEST_2 COMPLETE │
│ │◄────────────────────────────────┤
│ │ │
│ │ │

With the exception of get_wq_ctx(), the other SMC call wq_resume() can
return WQ_SLEEP (these nested rounds of WQ_SLEEP are not shown in the
above diagram for the sake of simplicity). Therefore, introduce a new
do-while loop to handle multiple WQ_SLEEP return values for the same
parent SCM call.

Request Completion in the above diagram refers to either a success
return value (zero) or error (and not SMC_WAITQ_SLEEP)

Also add the interrupt handler that wakes up a sleeping SCM call.

Signed-off-by: Guru Das Srinagesh <quic_gurus@quicinc.com>
Co-developed-by: Sibi Sankar <quic_sibis@quicinc.com>
Signed-off-by: Sibi Sankar <quic_sibis@quicinc.com>
Reviewed-by: Guru Das Srinagesh <quic_gurus@quicinc.com>
Signed-off-by: Bjorn Andersson <andersson@kernel.org>
Link: https://lore.kernel.org/r/20230113161114.22607-3-quic_sibis@quicinc.com

authored by

Guru Das Srinagesh and committed by
Bjorn Andersson
6bf32599 afb37e25

+176 -8
+79 -7
drivers/firmware/qcom_scm-smc.c
··· 52 52 } while (res->a0 == QCOM_SCM_INTERRUPTED); 53 53 } 54 54 55 - static void __scm_smc_do(const struct arm_smccc_args *smc, 56 - struct arm_smccc_res *res, bool atomic) 55 + static void fill_wq_resume_args(struct arm_smccc_args *resume, u32 smc_call_ctx) 57 56 { 58 - int retry_count = 0; 57 + memset(resume->args, 0, sizeof(resume->args[0]) * ARRAY_SIZE(resume->args)); 58 + 59 + resume->args[0] = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, 60 + ARM_SMCCC_SMC_64, ARM_SMCCC_OWNER_SIP, 61 + SCM_SMC_FNID(QCOM_SCM_SVC_WAITQ, QCOM_SCM_WAITQ_RESUME)); 62 + 63 + resume->args[1] = QCOM_SCM_ARGS(1); 64 + 65 + resume->args[2] = smc_call_ctx; 66 + } 67 + 68 + int scm_get_wq_ctx(u32 *wq_ctx, u32 *flags, u32 *more_pending) 69 + { 70 + int ret; 71 + struct arm_smccc_res get_wq_res; 72 + struct arm_smccc_args get_wq_ctx = {0}; 73 + 74 + get_wq_ctx.args[0] = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, 75 + ARM_SMCCC_SMC_64, ARM_SMCCC_OWNER_SIP, 76 + SCM_SMC_FNID(QCOM_SCM_SVC_WAITQ, QCOM_SCM_WAITQ_GET_WQ_CTX)); 77 + 78 + /* Guaranteed to return only success or error, no WAITQ_* */ 79 + __scm_smc_do_quirk(&get_wq_ctx, &get_wq_res); 80 + ret = get_wq_res.a0; 81 + if (ret) 82 + return ret; 83 + 84 + *wq_ctx = get_wq_res.a1; 85 + *flags = get_wq_res.a2; 86 + *more_pending = get_wq_res.a3; 87 + 88 + return 0; 89 + } 90 + 91 + static int __scm_smc_do_quirk_handle_waitq(struct device *dev, struct arm_smccc_args *waitq, 92 + struct arm_smccc_res *res) 93 + { 94 + int ret; 95 + u32 wq_ctx, smc_call_ctx; 96 + struct arm_smccc_args resume; 97 + struct arm_smccc_args *smc = waitq; 98 + 99 + do { 100 + __scm_smc_do_quirk(smc, res); 101 + 102 + if (res->a0 == QCOM_SCM_WAITQ_SLEEP) { 103 + wq_ctx = res->a1; 104 + smc_call_ctx = res->a2; 105 + 106 + ret = qcom_scm_wait_for_wq_completion(wq_ctx); 107 + if (ret) 108 + return ret; 109 + 110 + fill_wq_resume_args(&resume, smc_call_ctx); 111 + smc = &resume; 112 + } 113 + } while (res->a0 == QCOM_SCM_WAITQ_SLEEP); 114 + 115 + return 0; 116 + } 117 + 118 + static int __scm_smc_do(struct device *dev, struct arm_smccc_args *smc, 119 + struct arm_smccc_res *res, bool atomic) 120 + { 121 + int ret, retry_count = 0; 59 122 60 123 if (atomic) { 61 124 __scm_smc_do_quirk(smc, res); 62 - return; 125 + return 0; 63 126 } 64 127 65 128 do { 66 129 mutex_lock(&qcom_scm_lock); 67 130 68 - __scm_smc_do_quirk(smc, res); 131 + ret = __scm_smc_do_quirk_handle_waitq(dev, smc, res); 69 132 70 133 mutex_unlock(&qcom_scm_lock); 134 + 135 + if (ret) 136 + return ret; 71 137 72 138 if (res->a0 == QCOM_SCM_V2_EBUSY) { 73 139 if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY) ··· 141 75 msleep(QCOM_SCM_EBUSY_WAIT_MS); 142 76 } 143 77 } while (res->a0 == QCOM_SCM_V2_EBUSY); 78 + 79 + return 0; 144 80 } 145 81 146 82 ··· 151 83 struct qcom_scm_res *res, bool atomic) 152 84 { 153 85 int arglen = desc->arginfo & 0xf; 154 - int i; 86 + int i, ret; 155 87 dma_addr_t args_phys = 0; 156 88 void *args_virt = NULL; 157 89 size_t alloc_len; ··· 203 135 smc.args[SCM_SMC_LAST_REG_IDX] = args_phys; 204 136 } 205 137 206 - __scm_smc_do(&smc, &smc_res, atomic); 138 + /* ret error check follows after args_virt cleanup*/ 139 + ret = __scm_smc_do(dev, &smc, &smc_res, atomic); 207 140 208 141 if (args_virt) { 209 142 dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE); 210 143 kfree(args_virt); 211 144 } 145 + 146 + if (ret) 147 + return ret; 212 148 213 149 if (res) { 214 150 res->result[0] = smc_res.a1;
+89 -1
drivers/firmware/qcom_scm.c
··· 4 4 */ 5 5 #include <linux/platform_device.h> 6 6 #include <linux/init.h> 7 + #include <linux/interrupt.h> 8 + #include <linux/completion.h> 7 9 #include <linux/cpumask.h> 8 10 #include <linux/export.h> 9 11 #include <linux/dma-mapping.h> ··· 15 13 #include <linux/qcom_scm.h> 16 14 #include <linux/of.h> 17 15 #include <linux/of_address.h> 16 + #include <linux/of_irq.h> 18 17 #include <linux/of_platform.h> 19 18 #include <linux/clk.h> 20 19 #include <linux/reset-controller.h> ··· 36 33 struct clk *iface_clk; 37 34 struct clk *bus_clk; 38 35 struct icc_path *path; 36 + struct completion waitq_comp; 39 37 struct reset_controller_dev reset; 40 38 41 39 /* control access to the interconnect path */ ··· 66 62 static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = { 67 63 BIT(2), BIT(1), BIT(4), BIT(6) 68 64 }; 65 + 66 + #define QCOM_SMC_WAITQ_FLAG_WAKE_ONE BIT(0) 67 + #define QCOM_SMC_WAITQ_FLAG_WAKE_ALL BIT(1) 69 68 70 69 static const char * const qcom_scm_convention_names[] = { 71 70 [SMC_CONVENTION_UNKNOWN] = "unknown", ··· 1332 1325 } 1333 1326 EXPORT_SYMBOL(qcom_scm_is_available); 1334 1327 1328 + static int qcom_scm_assert_valid_wq_ctx(u32 wq_ctx) 1329 + { 1330 + /* FW currently only supports a single wq_ctx (zero). 1331 + * TODO: Update this logic to include dynamic allocation and lookup of 1332 + * completion structs when FW supports more wq_ctx values. 1333 + */ 1334 + if (wq_ctx != 0) { 1335 + dev_err(__scm->dev, "Firmware unexpectedly passed non-zero wq_ctx\n"); 1336 + return -EINVAL; 1337 + } 1338 + 1339 + return 0; 1340 + } 1341 + 1342 + int qcom_scm_wait_for_wq_completion(u32 wq_ctx) 1343 + { 1344 + int ret; 1345 + 1346 + ret = qcom_scm_assert_valid_wq_ctx(wq_ctx); 1347 + if (ret) 1348 + return ret; 1349 + 1350 + wait_for_completion(&__scm->waitq_comp); 1351 + 1352 + return 0; 1353 + } 1354 + 1355 + static int qcom_scm_waitq_wakeup(struct qcom_scm *scm, unsigned int wq_ctx) 1356 + { 1357 + int ret; 1358 + 1359 + ret = qcom_scm_assert_valid_wq_ctx(wq_ctx); 1360 + if (ret) 1361 + return ret; 1362 + 1363 + complete(&__scm->waitq_comp); 1364 + 1365 + return 0; 1366 + } 1367 + 1368 + static irqreturn_t qcom_scm_irq_handler(int irq, void *data) 1369 + { 1370 + int ret; 1371 + struct qcom_scm *scm = data; 1372 + u32 wq_ctx, flags, more_pending = 0; 1373 + 1374 + do { 1375 + ret = scm_get_wq_ctx(&wq_ctx, &flags, &more_pending); 1376 + if (ret) { 1377 + dev_err(scm->dev, "GET_WQ_CTX SMC call failed: %d\n", ret); 1378 + goto out; 1379 + } 1380 + 1381 + if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE && 1382 + flags != QCOM_SMC_WAITQ_FLAG_WAKE_ALL) { 1383 + dev_err(scm->dev, "Invalid flags found for wq_ctx: %u\n", flags); 1384 + goto out; 1385 + } 1386 + 1387 + ret = qcom_scm_waitq_wakeup(scm, wq_ctx); 1388 + if (ret) 1389 + goto out; 1390 + } while (more_pending); 1391 + 1392 + out: 1393 + return IRQ_HANDLED; 1394 + } 1395 + 1335 1396 static int qcom_scm_probe(struct platform_device *pdev) 1336 1397 { 1337 1398 struct qcom_scm *scm; 1338 1399 unsigned long clks; 1339 - int ret; 1400 + int irq, ret; 1340 1401 1341 1402 scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL); 1342 1403 if (!scm) ··· 1476 1401 1477 1402 __scm = scm; 1478 1403 __scm->dev = &pdev->dev; 1404 + 1405 + init_completion(&__scm->waitq_comp); 1406 + 1407 + irq = platform_get_irq(pdev, 0); 1408 + if (irq < 0) { 1409 + if (irq != -ENXIO) 1410 + return irq; 1411 + } else { 1412 + ret = devm_request_threaded_irq(__scm->dev, irq, NULL, qcom_scm_irq_handler, 1413 + IRQF_ONESHOT, "qcom-scm", __scm); 1414 + if (ret < 0) 1415 + return dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n"); 1416 + } 1479 1417 1480 1418 __get_convention(); 1481 1419
+8
drivers/firmware/qcom_scm.h
··· 60 60 u64 result[MAX_QCOM_SCM_RETS]; 61 61 }; 62 62 63 + int qcom_scm_wait_for_wq_completion(u32 wq_ctx); 64 + int scm_get_wq_ctx(u32 *wq_ctx, u32 *flags, u32 *more_pending); 65 + 63 66 #define SCM_SMC_FNID(s, c) ((((s) & 0xFF) << 8) | ((c) & 0xFF)) 64 67 extern int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc, 65 68 enum qcom_scm_convention qcom_convention, ··· 132 129 #define QCOM_SCM_SMMU_CONFIG_ERRATA1 0x03 133 130 #define QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL 0x02 134 131 132 + #define QCOM_SCM_SVC_WAITQ 0x24 133 + #define QCOM_SCM_WAITQ_RESUME 0x02 134 + #define QCOM_SCM_WAITQ_GET_WQ_CTX 0x03 135 + 135 136 /* common error codes */ 136 137 #define QCOM_SCM_V2_EBUSY -12 137 138 #define QCOM_SCM_ENOMEM -5 ··· 144 137 #define QCOM_SCM_EINVAL_ARG -2 145 138 #define QCOM_SCM_ERROR -1 146 139 #define QCOM_SCM_INTERRUPTED 1 140 + #define QCOM_SCM_WAITQ_SLEEP 2 147 141 148 142 static inline int qcom_scm_remap_error(int err) 149 143 {