Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'qcom-drivers-for-6.11' of https://git.kernel.org/pub/scm/linux/kernel/git/qcom/linux into soc/drivers

Qualcomm driver updates for v6.11

Support for Shared Memory (shm) Bridge is added, which provides a
stricter interface for handling of buffers passed to TrustZone.

The X1Elite platform is added to uefisecapp allow list, to instantiate
the efivars implementation.

A new in-kernel implementation of the pd-mapper (or servreg) service is
introduced, to replace the userspace dependency for USB Type-C and
battery management.

Support for sharing interrupts across multiple bwmon instances is added,
and a refcount imbalance issue is corrected.

The LLCC support for recent platforms is corrected, and SA8775P support
is added.

A new interface is added to SMEM, to expose "feature codes". One example
of the usecase for this is to indicate to the GPU driver which
frequencies are available on the given device.

The interrupt consumer and provider side of SMP2P is updated to provide
more useful names in interrupt stats.

Support for using the mailbox binding and driver for outgoing IPC
interrupt in the SMSM driver is introduced.

socinfo driver learns about SDM670 and IPQ5321, as well as get some
updates to the X1E PMICs.

pmic_glink is bumped to now support managing 3 USB Type-C ports.

* tag 'qcom-drivers-for-6.11' of https://git.kernel.org/pub/scm/linux/kernel/git/qcom/linux: (48 commits)
soc: qcom: smp2p: Use devname for interrupt descriptions
soc: qcom: smsm: Add missing mailbox dependency to Kconfig
soc: qcom: add missing pd-mapper dependencies
soc: qcom: icc-bwmon: Allow for interrupts to be shared across instances
dt-bindings: interconnect: qcom,msm8998-bwmon: Add X1E80100 BWMON instances
dt-bindings: interconnect: qcom,msm8998-bwmon: Remove opp-table from the required list
firmware: qcom: tzmem: export devm_qcom_tzmem_pool_new()
soc: qcom: add pd-mapper implementation
soc: qcom: pdr: extract PDR message marshalling data
soc: qcom: pdr: fix parsing of domains lists
soc: qcom: pdr: protect locator_addr with the main mutex
firmware: qcom: scm: clarify the comment in qcom_scm_pas_init_image()
firmware: qcom: scm: add support for SHM bridge memory carveout
firmware: qcom: tzmem: enable SHM Bridge support
firmware: qcom: scm: add support for SHM bridge operations
firmware: qcom: qseecom: convert to using the TZ allocator
firmware: qcom: scm: make qcom_scm_qseecom_app_get_id() use the TZ allocator
firmware: qcom: scm: make qcom_scm_lmh_dcvsh() use the TZ allocator
firmware: qcom: scm: make qcom_scm_ice_set_key() use the TZ allocator
firmware: qcom: scm: make qcom_scm_assign_mem() use the TZ allocator
...

Link: https://lore.kernel.org/r/20240705034410.13968-1-andersson@kernel.org
Signed-off-by: Arnd Bergmann <arnd@arndb.de>

+2267 -572
+55 -2
Documentation/devicetree/bindings/cache/qcom,llcc.yaml
··· 21 21 compatible: 22 22 enum: 23 23 - qcom,qdu1000-llcc 24 + - qcom,sa8775p-llcc 24 25 - qcom,sc7180-llcc 25 26 - qcom,sc7280-llcc 26 27 - qcom,sc8180x-llcc ··· 86 85 compatible: 87 86 contains: 88 87 enum: 88 + - qcom,sa8775p-llcc 89 + then: 90 + properties: 91 + reg: 92 + items: 93 + - description: LLCC0 base register region 94 + - description: LLCC1 base register region 95 + - description: LLCC2 base register region 96 + - description: LLCC3 base register region 97 + - description: LLCC4 base register region 98 + - description: LLCC5 base register region 99 + - description: LLCC broadcast base register region 100 + reg-names: 101 + items: 102 + - const: llcc0_base 103 + - const: llcc1_base 104 + - const: llcc2_base 105 + - const: llcc3_base 106 + - const: llcc4_base 107 + - const: llcc5_base 108 + - const: llcc_broadcast_base 109 + 110 + - if: 111 + properties: 112 + compatible: 113 + contains: 114 + enum: 89 115 - qcom,sc7280-llcc 90 116 then: 91 117 properties: ··· 169 141 - qcom,sm8150-llcc 170 142 - qcom,sm8250-llcc 171 143 - qcom,sm8350-llcc 172 - - qcom,sm8450-llcc 173 - - qcom,sm8550-llcc 174 144 then: 175 145 properties: 176 146 reg: ··· 185 159 - const: llcc2_base 186 160 - const: llcc3_base 187 161 - const: llcc_broadcast_base 162 + 163 + - if: 164 + properties: 165 + compatible: 166 + contains: 167 + enum: 168 + - qcom,sm8450-llcc 169 + - qcom,sm8550-llcc 170 + - qcom,sm8650-llcc 171 + then: 172 + properties: 173 + reg: 174 + items: 175 + - description: LLCC0 base register region 176 + - description: LLCC1 base register region 177 + - description: LLCC2 base register region 178 + - description: LLCC3 base register region 179 + - description: LLCC broadcast OR register region 180 + - description: LLCC broadcast AND register region 181 + reg-names: 182 + items: 183 + - const: llcc0_base 184 + - const: llcc1_base 185 + - const: llcc2_base 186 + - const: llcc3_base 187 + - const: llcc_broadcast_base 188 + - const: llcc_broadcast_and_base 188 189 189 190 additionalProperties: false 190 191
+15
Documentation/devicetree/bindings/firmware/qcom,scm.yaml
··· 93 93 protocol to handle sleeping SCM calls. 94 94 maxItems: 1 95 95 96 + memory-region: 97 + description: 98 + Phandle to the memory region reserved for the shared memory bridge to TZ. 99 + maxItems: 1 100 + 96 101 qcom,sdi-enabled: 97 102 description: 98 103 Indicates that the SDI (Secure Debug Image) has been enabled by TZ ··· 198 193 then: 199 194 properties: 200 195 interrupts: false 196 + - if: 197 + not: 198 + properties: 199 + compatible: 200 + contains: 201 + enum: 202 + - qcom,scm-sa8775p 203 + then: 204 + properties: 205 + memory-region: false 201 206 202 207 required: 203 208 - compatible
+2 -1
Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml
··· 35 35 - qcom,sm8250-cpu-bwmon 36 36 - qcom,sm8550-cpu-bwmon 37 37 - qcom,sm8650-cpu-bwmon 38 + - qcom,x1e80100-cpu-bwmon 38 39 - const: qcom,sdm845-bwmon # BWMON v4, unified register space 39 40 - items: 40 41 - enum: ··· 45 44 - qcom,sm8250-llcc-bwmon 46 45 - qcom,sm8550-llcc-bwmon 47 46 - qcom,sm8650-llcc-bwmon 47 + - qcom,x1e80100-llcc-bwmon 48 48 - const: qcom,sc7280-llcc-bwmon 49 49 - const: qcom,sc7280-llcc-bwmon # BWMON v5 50 50 - const: qcom,sdm845-llcc-bwmon # BWMON v5 ··· 74 72 - interconnects 75 73 - interrupts 76 74 - operating-points-v2 77 - - opp-table 78 75 - reg 79 76 80 77 additionalProperties: false
+1
Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.yaml
··· 31 31 - qcom,sc7280-aoss-qmp 32 32 - qcom,sc8180x-aoss-qmp 33 33 - qcom,sc8280xp-aoss-qmp 34 + - qcom,sdx75-aoss-qmp 34 35 - qcom,sdm845-aoss-qmp 35 36 - qcom,sm6350-aoss-qmp 36 37 - qcom,sm8150-aoss-qmp
+2 -1
Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.yaml
··· 41 41 description: 42 42 Three entries specifying the outgoing ipc bit used for signaling the 43 43 remote end of the smp2p edge. 44 + deprecated: true 44 45 45 46 qcom,local-pid: 46 47 $ref: /schemas/types.yaml#/definitions/uint32 ··· 129 128 compatible = "qcom,smp2p"; 130 129 qcom,smem = <431>, <451>; 131 130 interrupts = <GIC_SPI 143 IRQ_TYPE_EDGE_RISING>; 132 - qcom,ipc = <&apcs 8 18>; 131 + mboxes = <&apcs 18>; 133 132 qcom,local-pid = <0>; 134 133 qcom,remote-pid = <4>; 135 134
+21 -9
Documentation/devicetree/bindings/soc/qcom/qcom,smsm.yaml
··· 33 33 specifier of the column in the subscription matrix representing the local 34 34 processor. 35 35 36 + mboxes: 37 + minItems: 1 38 + maxItems: 5 39 + description: 40 + Reference to the mailbox representing the outgoing doorbell in APCS for 41 + this client. Each entry represents the N:th remote processor by index 42 + (0-indexed). 43 + 36 44 '#size-cells': 37 45 const: 0 38 46 ··· 55 47 description: 56 48 Three entries specifying the outgoing ipc bit used for signaling the N:th 57 49 remote processor. 50 + deprecated: true 58 51 59 52 "@[0-9a-f]$": 60 53 type: object ··· 107 98 - '#address-cells' 108 99 - '#size-cells' 109 100 110 - anyOf: 101 + oneOf: 111 102 - required: 112 - - qcom,ipc-1 113 - - required: 114 - - qcom,ipc-2 115 - - required: 116 - - qcom,ipc-3 117 - - required: 118 - - qcom,ipc-4 103 + - mboxes 104 + - anyOf: 105 + - required: 106 + - qcom,ipc-1 107 + - required: 108 + - qcom,ipc-2 109 + - required: 110 + - qcom,ipc-3 111 + - required: 112 + - qcom,ipc-4 119 113 120 114 additionalProperties: false 121 115 ··· 134 122 compatible = "qcom,smsm"; 135 123 #address-cells = <1>; 136 124 #size-cells = <0>; 137 - qcom,ipc-3 = <&apcs 8 19>; 125 + mboxes = <0>, <0>, <0>, <&apcs 19>; 138 126 139 127 apps_smsm: apps@0 { 140 128 reg = <0>;
+8
MAINTAINERS
··· 18607 18607 F: drivers/net/ethernet/qualcomm/rmnet/ 18608 18608 F: include/linux/if_rmnet.h 18609 18609 18610 + QUALCOMM TRUST ZONE MEMORY ALLOCATOR 18611 + M: Bartosz Golaszewski <bartosz.golaszewski@linaro.org> 18612 + L: linux-arm-msm@vger.kernel.org 18613 + S: Maintained 18614 + F: drivers/firmware/qcom/qcom_tzmem.c 18615 + F: drivers/firmware/qcom/qcom_tzmem.h 18616 + F: include/linux/firmware/qcom/qcom_tzmem.h 18617 + 18610 18618 QUALCOMM TSENS THERMAL DRIVER 18611 18619 M: Amit Kucheria <amitk@kernel.org> 18612 18620 M: Thara Gopinath <thara.gopinath@gmail.com>
+1
drivers/cpufreq/qcom-cpufreq-nvmem.c
··· 191 191 case QCOM_ID_IPQ5312: 192 192 case QCOM_ID_IPQ5302: 193 193 case QCOM_ID_IPQ5300: 194 + case QCOM_ID_IPQ5321: 194 195 case QCOM_ID_IPQ9514: 195 196 case QCOM_ID_IPQ9550: 196 197 case QCOM_ID_IPQ9554:
+31
drivers/firmware/qcom/Kconfig
··· 7 7 menu "Qualcomm firmware drivers" 8 8 9 9 config QCOM_SCM 10 + select QCOM_TZMEM 10 11 tristate 12 + 13 + config QCOM_TZMEM 14 + tristate 15 + select GENERIC_ALLOCATOR 16 + 17 + choice 18 + prompt "TrustZone interface memory allocator mode" 19 + default QCOM_TZMEM_MODE_GENERIC 20 + help 21 + Selects the mode of the memory allocator providing memory buffers of 22 + suitable format for sharing with the TrustZone. If in doubt, select 23 + 'Generic'. 24 + 25 + config QCOM_TZMEM_MODE_GENERIC 26 + bool "Generic" 27 + help 28 + Use the generic allocator mode. The memory is page-aligned, non-cachable 29 + and physically contiguous. 30 + 31 + config QCOM_TZMEM_MODE_SHMBRIDGE 32 + bool "SHM Bridge" 33 + help 34 + Use Qualcomm Shared Memory Bridge. The memory has the same alignment as 35 + in the 'Generic' allocator but is also explicitly marked as an SHM Bridge 36 + buffer. 37 + 38 + With this selected, all buffers passed to the TrustZone must be allocated 39 + using the TZMem allocator or else the TrustZone will refuse to use them. 40 + 41 + endchoice 11 42 12 43 config QCOM_SCM_DOWNLOAD_MODE_DEFAULT 13 44 bool "Qualcomm download mode enabled by default"
+1
drivers/firmware/qcom/Makefile
··· 5 5 6 6 obj-$(CONFIG_QCOM_SCM) += qcom-scm.o 7 7 qcom-scm-objs += qcom_scm.o qcom_scm-smc.o qcom_scm-legacy.o 8 + obj-$(CONFIG_QCOM_TZMEM) += qcom_tzmem.o 8 9 obj-$(CONFIG_QCOM_QSEECOM) += qcom_qseecom.o 9 10 obj-$(CONFIG_QCOM_QSEECOM_UEFISECAPP) += qcom_qseecom_uefisecapp.o
+98 -158
drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
··· 13 13 #include <linux/mutex.h> 14 14 #include <linux/of.h> 15 15 #include <linux/platform_device.h> 16 + #include <linux/sizes.h> 16 17 #include <linux/slab.h> 17 18 #include <linux/types.h> 18 19 #include <linux/ucs2_string.h> 19 20 20 21 #include <linux/firmware/qcom/qcom_qseecom.h> 22 + #include <linux/firmware/qcom/qcom_scm.h> 23 + #include <linux/firmware/qcom/qcom_tzmem.h> 21 24 22 25 /* -- Qualcomm "uefisecapp" interface definitions. -------------------------- */ 23 26 ··· 275 272 struct qcuefi_client { 276 273 struct qseecom_client *client; 277 274 struct efivars efivars; 275 + struct qcom_tzmem_pool *mempool; 278 276 }; 279 277 280 278 static struct device *qcuefi_dev(struct qcuefi_client *qcuefi) ··· 297 293 { 298 294 struct qsee_req_uefi_get_variable *req_data; 299 295 struct qsee_rsp_uefi_get_variable *rsp_data; 296 + void *cmd_buf __free(qcom_tzmem) = NULL; 300 297 unsigned long buffer_size = *data_size; 301 - efi_status_t efi_status = EFI_SUCCESS; 302 298 unsigned long name_length; 303 - dma_addr_t cmd_buf_dma; 299 + efi_status_t efi_status; 304 300 size_t cmd_buf_size; 305 - void *cmd_buf; 306 301 size_t guid_offs; 307 302 size_t name_offs; 308 303 size_t req_size; ··· 336 333 __reqdata_offs(rsp_size, &rsp_offs) 337 334 ); 338 335 339 - cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL); 340 - if (!cmd_buf) { 341 - efi_status = EFI_OUT_OF_RESOURCES; 342 - goto out; 343 - } 336 + cmd_buf = qcom_tzmem_alloc(qcuefi->mempool, cmd_buf_size, GFP_KERNEL); 337 + if (!cmd_buf) 338 + return EFI_OUT_OF_RESOURCES; 344 339 345 340 req_data = cmd_buf + req_offs; 346 341 rsp_data = cmd_buf + rsp_offs; ··· 352 351 req_data->length = req_size; 353 352 354 353 status = ucs2_strscpy(((void *)req_data) + req_data->name_offset, name, name_length); 355 - if (status < 0) { 356 - efi_status = EFI_INVALID_PARAMETER; 357 - goto out_free; 358 - } 354 + if (status < 0) 355 + return EFI_INVALID_PARAMETER; 359 356 360 357 memcpy(((void *)req_data) + req_data->guid_offset, guid, req_data->guid_size); 361 358 362 359 status = qcom_qseecom_app_send(qcuefi->client, 363 - cmd_buf_dma + req_offs, req_size, 364 - cmd_buf_dma + rsp_offs, rsp_size); 365 - if (status) { 366 - efi_status = EFI_DEVICE_ERROR; 367 - goto out_free; 368 - } 360 + cmd_buf + req_offs, req_size, 361 + cmd_buf + rsp_offs, rsp_size); 362 + if (status) 363 + return EFI_DEVICE_ERROR; 369 364 370 - if (rsp_data->command_id != QSEE_CMD_UEFI_GET_VARIABLE) { 371 - efi_status = EFI_DEVICE_ERROR; 372 - goto out_free; 373 - } 365 + if (rsp_data->command_id != QSEE_CMD_UEFI_GET_VARIABLE) 366 + return EFI_DEVICE_ERROR; 374 367 375 - if (rsp_data->length < sizeof(*rsp_data)) { 376 - efi_status = EFI_DEVICE_ERROR; 377 - goto out_free; 378 - } 368 + if (rsp_data->length < sizeof(*rsp_data)) 369 + return EFI_DEVICE_ERROR; 379 370 380 371 if (rsp_data->status) { 381 372 dev_dbg(qcuefi_dev(qcuefi), "%s: uefisecapp error: 0x%x\n", ··· 381 388 *attributes = rsp_data->attributes; 382 389 } 383 390 384 - goto out_free; 391 + return qsee_uefi_status_to_efi(rsp_data->status); 385 392 } 386 393 387 - if (rsp_data->length > rsp_size) { 388 - efi_status = EFI_DEVICE_ERROR; 389 - goto out_free; 390 - } 394 + if (rsp_data->length > rsp_size) 395 + return EFI_DEVICE_ERROR; 391 396 392 - if (rsp_data->data_offset + rsp_data->data_size > rsp_data->length) { 393 - efi_status = EFI_DEVICE_ERROR; 394 - goto out_free; 395 - } 397 + if (rsp_data->data_offset + rsp_data->data_size > rsp_data->length) 398 + return EFI_DEVICE_ERROR; 396 399 397 400 /* 398 401 * Note: We need to set attributes and data size even if the buffer is ··· 411 422 if (attributes) 412 423 *attributes = rsp_data->attributes; 413 424 414 - if (buffer_size == 0 && !data) { 415 - efi_status = EFI_SUCCESS; 416 - goto out_free; 417 - } 425 + if (buffer_size == 0 && !data) 426 + return EFI_SUCCESS; 418 427 419 - if (buffer_size < rsp_data->data_size) { 420 - efi_status = EFI_BUFFER_TOO_SMALL; 421 - goto out_free; 422 - } 428 + if (buffer_size < rsp_data->data_size) 429 + return EFI_BUFFER_TOO_SMALL; 423 430 424 431 memcpy(data, ((void *)rsp_data) + rsp_data->data_offset, rsp_data->data_size); 425 432 426 - out_free: 427 - qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma); 428 - out: 429 - return efi_status; 433 + return EFI_SUCCESS; 430 434 } 431 435 432 436 static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const efi_char16_t *name, ··· 428 446 { 429 447 struct qsee_req_uefi_set_variable *req_data; 430 448 struct qsee_rsp_uefi_set_variable *rsp_data; 431 - efi_status_t efi_status = EFI_SUCCESS; 449 + void *cmd_buf __free(qcom_tzmem) = NULL; 432 450 unsigned long name_length; 433 - dma_addr_t cmd_buf_dma; 434 451 size_t cmd_buf_size; 435 - void *cmd_buf; 436 452 size_t name_offs; 437 453 size_t guid_offs; 438 454 size_t data_offs; ··· 466 486 __reqdata_offs(sizeof(*rsp_data), &rsp_offs) 467 487 ); 468 488 469 - cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL); 470 - if (!cmd_buf) { 471 - efi_status = EFI_OUT_OF_RESOURCES; 472 - goto out; 473 - } 489 + cmd_buf = qcom_tzmem_alloc(qcuefi->mempool, cmd_buf_size, GFP_KERNEL); 490 + if (!cmd_buf) 491 + return EFI_OUT_OF_RESOURCES; 474 492 475 493 req_data = cmd_buf + req_offs; 476 494 rsp_data = cmd_buf + rsp_offs; ··· 484 506 req_data->length = req_size; 485 507 486 508 status = ucs2_strscpy(((void *)req_data) + req_data->name_offset, name, name_length); 487 - if (status < 0) { 488 - efi_status = EFI_INVALID_PARAMETER; 489 - goto out_free; 490 - } 509 + if (status < 0) 510 + return EFI_INVALID_PARAMETER; 491 511 492 512 memcpy(((void *)req_data) + req_data->guid_offset, guid, req_data->guid_size); 493 513 ··· 493 517 memcpy(((void *)req_data) + req_data->data_offset, data, req_data->data_size); 494 518 495 519 status = qcom_qseecom_app_send(qcuefi->client, 496 - cmd_buf_dma + req_offs, req_size, 497 - cmd_buf_dma + rsp_offs, sizeof(*rsp_data)); 498 - if (status) { 499 - efi_status = EFI_DEVICE_ERROR; 500 - goto out_free; 501 - } 520 + cmd_buf + req_offs, req_size, 521 + cmd_buf + rsp_offs, sizeof(*rsp_data)); 522 + if (status) 523 + return EFI_DEVICE_ERROR; 502 524 503 - if (rsp_data->command_id != QSEE_CMD_UEFI_SET_VARIABLE) { 504 - efi_status = EFI_DEVICE_ERROR; 505 - goto out_free; 506 - } 525 + if (rsp_data->command_id != QSEE_CMD_UEFI_SET_VARIABLE) 526 + return EFI_DEVICE_ERROR; 507 527 508 - if (rsp_data->length != sizeof(*rsp_data)) { 509 - efi_status = EFI_DEVICE_ERROR; 510 - goto out_free; 511 - } 528 + if (rsp_data->length != sizeof(*rsp_data)) 529 + return EFI_DEVICE_ERROR; 512 530 513 531 if (rsp_data->status) { 514 532 dev_dbg(qcuefi_dev(qcuefi), "%s: uefisecapp error: 0x%x\n", 515 533 __func__, rsp_data->status); 516 - efi_status = qsee_uefi_status_to_efi(rsp_data->status); 534 + return qsee_uefi_status_to_efi(rsp_data->status); 517 535 } 518 536 519 - out_free: 520 - qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma); 521 - out: 522 - return efi_status; 537 + return EFI_SUCCESS; 523 538 } 524 539 525 540 static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi, ··· 519 552 { 520 553 struct qsee_req_uefi_get_next_variable *req_data; 521 554 struct qsee_rsp_uefi_get_next_variable *rsp_data; 522 - efi_status_t efi_status = EFI_SUCCESS; 523 - dma_addr_t cmd_buf_dma; 555 + void *cmd_buf __free(qcom_tzmem) = NULL; 556 + efi_status_t efi_status; 524 557 size_t cmd_buf_size; 525 - void *cmd_buf; 526 558 size_t guid_offs; 527 559 size_t name_offs; 528 560 size_t req_size; ··· 553 587 __reqdata_offs(rsp_size, &rsp_offs) 554 588 ); 555 589 556 - cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL); 557 - if (!cmd_buf) { 558 - efi_status = EFI_OUT_OF_RESOURCES; 559 - goto out; 560 - } 590 + cmd_buf = qcom_tzmem_alloc(qcuefi->mempool, cmd_buf_size, GFP_KERNEL); 591 + if (!cmd_buf) 592 + return EFI_OUT_OF_RESOURCES; 561 593 562 594 req_data = cmd_buf + req_offs; 563 595 rsp_data = cmd_buf + rsp_offs; ··· 570 606 memcpy(((void *)req_data) + req_data->guid_offset, guid, req_data->guid_size); 571 607 status = ucs2_strscpy(((void *)req_data) + req_data->name_offset, name, 572 608 *name_size / sizeof(*name)); 573 - if (status < 0) { 574 - efi_status = EFI_INVALID_PARAMETER; 575 - goto out_free; 576 - } 609 + if (status < 0) 610 + return EFI_INVALID_PARAMETER; 577 611 578 612 status = qcom_qseecom_app_send(qcuefi->client, 579 - cmd_buf_dma + req_offs, req_size, 580 - cmd_buf_dma + rsp_offs, rsp_size); 581 - if (status) { 582 - efi_status = EFI_DEVICE_ERROR; 583 - goto out_free; 584 - } 613 + cmd_buf + req_offs, req_size, 614 + cmd_buf + rsp_offs, rsp_size); 615 + if (status) 616 + return EFI_DEVICE_ERROR; 585 617 586 - if (rsp_data->command_id != QSEE_CMD_UEFI_GET_NEXT_VARIABLE) { 587 - efi_status = EFI_DEVICE_ERROR; 588 - goto out_free; 589 - } 618 + if (rsp_data->command_id != QSEE_CMD_UEFI_GET_NEXT_VARIABLE) 619 + return EFI_DEVICE_ERROR; 590 620 591 - if (rsp_data->length < sizeof(*rsp_data)) { 592 - efi_status = EFI_DEVICE_ERROR; 593 - goto out_free; 594 - } 621 + if (rsp_data->length < sizeof(*rsp_data)) 622 + return EFI_DEVICE_ERROR; 595 623 596 624 if (rsp_data->status) { 597 625 dev_dbg(qcuefi_dev(qcuefi), "%s: uefisecapp error: 0x%x\n", ··· 598 642 if (efi_status == EFI_BUFFER_TOO_SMALL) 599 643 *name_size = rsp_data->name_size; 600 644 601 - goto out_free; 645 + return efi_status; 602 646 } 603 647 604 - if (rsp_data->length > rsp_size) { 605 - efi_status = EFI_DEVICE_ERROR; 606 - goto out_free; 607 - } 648 + if (rsp_data->length > rsp_size) 649 + return EFI_DEVICE_ERROR; 608 650 609 - if (rsp_data->name_offset + rsp_data->name_size > rsp_data->length) { 610 - efi_status = EFI_DEVICE_ERROR; 611 - goto out_free; 612 - } 651 + if (rsp_data->name_offset + rsp_data->name_size > rsp_data->length) 652 + return EFI_DEVICE_ERROR; 613 653 614 - if (rsp_data->guid_offset + rsp_data->guid_size > rsp_data->length) { 615 - efi_status = EFI_DEVICE_ERROR; 616 - goto out_free; 617 - } 654 + if (rsp_data->guid_offset + rsp_data->guid_size > rsp_data->length) 655 + return EFI_DEVICE_ERROR; 618 656 619 657 if (rsp_data->name_size > *name_size) { 620 658 *name_size = rsp_data->name_size; 621 - efi_status = EFI_BUFFER_TOO_SMALL; 622 - goto out_free; 659 + return EFI_BUFFER_TOO_SMALL; 623 660 } 624 661 625 - if (rsp_data->guid_size != sizeof(*guid)) { 626 - efi_status = EFI_DEVICE_ERROR; 627 - goto out_free; 628 - } 662 + if (rsp_data->guid_size != sizeof(*guid)) 663 + return EFI_DEVICE_ERROR; 629 664 630 665 memcpy(guid, ((void *)rsp_data) + rsp_data->guid_offset, rsp_data->guid_size); 631 666 status = ucs2_strscpy(name, ((void *)rsp_data) + rsp_data->name_offset, 632 667 rsp_data->name_size / sizeof(*name)); 633 668 *name_size = rsp_data->name_size; 634 669 635 - if (status < 0) { 670 + if (status < 0) 636 671 /* 637 672 * Return EFI_DEVICE_ERROR here because the buffer size should 638 673 * have already been validated above, causing this function to 639 674 * bail with EFI_BUFFER_TOO_SMALL. 640 675 */ 641 - efi_status = EFI_DEVICE_ERROR; 642 - } 676 + return EFI_DEVICE_ERROR; 643 677 644 - out_free: 645 - qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma); 646 - out: 647 - return efi_status; 678 + return EFI_SUCCESS; 648 679 } 649 680 650 681 static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi, u32 attr, ··· 640 697 { 641 698 struct qsee_req_uefi_query_variable_info *req_data; 642 699 struct qsee_rsp_uefi_query_variable_info *rsp_data; 643 - efi_status_t efi_status = EFI_SUCCESS; 644 - dma_addr_t cmd_buf_dma; 700 + void *cmd_buf __free(qcom_tzmem) = NULL; 645 701 size_t cmd_buf_size; 646 - void *cmd_buf; 647 702 size_t req_offs; 648 703 size_t rsp_offs; 649 704 int status; ··· 651 710 __reqdata_offs(sizeof(*rsp_data), &rsp_offs) 652 711 ); 653 712 654 - cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL); 655 - if (!cmd_buf) { 656 - efi_status = EFI_OUT_OF_RESOURCES; 657 - goto out; 658 - } 713 + cmd_buf = qcom_tzmem_alloc(qcuefi->mempool, cmd_buf_size, GFP_KERNEL); 714 + if (!cmd_buf) 715 + return EFI_OUT_OF_RESOURCES; 659 716 660 717 req_data = cmd_buf + req_offs; 661 718 rsp_data = cmd_buf + rsp_offs; ··· 663 724 req_data->length = sizeof(*req_data); 664 725 665 726 status = qcom_qseecom_app_send(qcuefi->client, 666 - cmd_buf_dma + req_offs, sizeof(*req_data), 667 - cmd_buf_dma + rsp_offs, sizeof(*rsp_data)); 668 - if (status) { 669 - efi_status = EFI_DEVICE_ERROR; 670 - goto out_free; 671 - } 727 + cmd_buf + req_offs, sizeof(*req_data), 728 + cmd_buf + rsp_offs, sizeof(*rsp_data)); 729 + if (status) 730 + return EFI_DEVICE_ERROR; 672 731 673 - if (rsp_data->command_id != QSEE_CMD_UEFI_QUERY_VARIABLE_INFO) { 674 - efi_status = EFI_DEVICE_ERROR; 675 - goto out_free; 676 - } 732 + if (rsp_data->command_id != QSEE_CMD_UEFI_QUERY_VARIABLE_INFO) 733 + return EFI_DEVICE_ERROR; 677 734 678 - if (rsp_data->length != sizeof(*rsp_data)) { 679 - efi_status = EFI_DEVICE_ERROR; 680 - goto out_free; 681 - } 735 + if (rsp_data->length != sizeof(*rsp_data)) 736 + return EFI_DEVICE_ERROR; 682 737 683 738 if (rsp_data->status) { 684 739 dev_dbg(qcuefi_dev(qcuefi), "%s: uefisecapp error: 0x%x\n", 685 740 __func__, rsp_data->status); 686 - efi_status = qsee_uefi_status_to_efi(rsp_data->status); 687 - goto out_free; 741 + return qsee_uefi_status_to_efi(rsp_data->status); 688 742 } 689 743 690 744 if (storage_space) ··· 689 757 if (max_variable_size) 690 758 *max_variable_size = rsp_data->max_variable_size; 691 759 692 - out_free: 693 - qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma); 694 - out: 695 - return efi_status; 760 + return EFI_SUCCESS; 696 761 } 697 762 698 763 /* -- Global efivar interface. ---------------------------------------------- */ ··· 800 871 static int qcom_uefisecapp_probe(struct auxiliary_device *aux_dev, 801 872 const struct auxiliary_device_id *aux_dev_id) 802 873 { 874 + struct qcom_tzmem_pool_config pool_config; 803 875 struct qcuefi_client *qcuefi; 804 876 int status; 805 877 ··· 818 888 status = efivars_register(&qcuefi->efivars, &qcom_efivar_ops); 819 889 if (status) 820 890 qcuefi_set_reference(NULL); 891 + 892 + memset(&pool_config, 0, sizeof(pool_config)); 893 + pool_config.initial_size = SZ_4K; 894 + pool_config.policy = QCOM_TZMEM_POLICY_MULTIPLIER; 895 + pool_config.increment = 2; 896 + pool_config.max_size = SZ_256K; 897 + 898 + qcuefi->mempool = devm_qcom_tzmem_pool_new(&aux_dev->dev, &pool_config); 899 + if (IS_ERR(qcuefi->mempool)) 900 + return PTR_ERR(qcuefi->mempool); 821 901 822 902 return status; 823 903 }
+8 -22
drivers/firmware/qcom/qcom_scm-smc.c
··· 2 2 /* Copyright (c) 2015,2019 The Linux Foundation. All rights reserved. 3 3 */ 4 4 5 + #include <linux/cleanup.h> 5 6 #include <linux/io.h> 6 7 #include <linux/errno.h> 7 8 #include <linux/delay.h> ··· 10 9 #include <linux/slab.h> 11 10 #include <linux/types.h> 12 11 #include <linux/firmware/qcom/qcom_scm.h> 12 + #include <linux/firmware/qcom/qcom_tzmem.h> 13 13 #include <linux/arm-smccc.h> 14 14 #include <linux/dma-mapping.h> 15 15 ··· 152 150 enum qcom_scm_convention qcom_convention, 153 151 struct qcom_scm_res *res, bool atomic) 154 152 { 153 + struct qcom_tzmem_pool *mempool = qcom_scm_get_tzmem_pool(); 155 154 int arglen = desc->arginfo & 0xf; 156 155 int i, ret; 157 - dma_addr_t args_phys = 0; 158 - void *args_virt = NULL; 159 - size_t alloc_len; 156 + void *args_virt __free(qcom_tzmem) = NULL; 160 157 gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL; 161 158 u32 smccc_call_type = atomic ? ARM_SMCCC_FAST_CALL : ARM_SMCCC_STD_CALL; 162 159 u32 qcom_smccc_convention = (qcom_convention == SMC_CONVENTION_ARM_32) ? ··· 173 172 smc.args[i + SCM_SMC_FIRST_REG_IDX] = desc->args[i]; 174 173 175 174 if (unlikely(arglen > SCM_SMC_N_REG_ARGS)) { 176 - alloc_len = SCM_SMC_N_EXT_ARGS * sizeof(u64); 177 - args_virt = kzalloc(PAGE_ALIGN(alloc_len), flag); 178 - 175 + args_virt = qcom_tzmem_alloc(mempool, 176 + SCM_SMC_N_EXT_ARGS * sizeof(u64), 177 + flag); 179 178 if (!args_virt) 180 179 return -ENOMEM; 181 180 ··· 193 192 SCM_SMC_FIRST_EXT_IDX]); 194 193 } 195 194 196 - args_phys = dma_map_single(dev, args_virt, alloc_len, 197 - DMA_TO_DEVICE); 198 - 199 - if (dma_mapping_error(dev, args_phys)) { 200 - kfree(args_virt); 201 - return -ENOMEM; 202 - } 203 - 204 - smc.args[SCM_SMC_LAST_REG_IDX] = args_phys; 195 + smc.args[SCM_SMC_LAST_REG_IDX] = qcom_tzmem_to_phys(args_virt); 205 196 } 206 197 207 - /* ret error check follows after args_virt cleanup*/ 208 198 ret = __scm_smc_do(dev, &smc, &smc_res, atomic); 209 - 210 - if (args_virt) { 211 - dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE); 212 - kfree(args_virt); 213 - } 214 - 215 199 if (ret) 216 200 return ret; 217 201
+148 -49
drivers/firmware/qcom/qcom_scm.c
··· 6 6 #include <linux/arm-smccc.h> 7 7 #include <linux/bitfield.h> 8 8 #include <linux/bits.h> 9 + #include <linux/cleanup.h> 9 10 #include <linux/clk.h> 10 11 #include <linux/completion.h> 11 12 #include <linux/cpumask.h> 12 13 #include <linux/dma-mapping.h> 14 + #include <linux/err.h> 13 15 #include <linux/export.h> 14 16 #include <linux/firmware/qcom/qcom_scm.h> 17 + #include <linux/firmware/qcom/qcom_tzmem.h> 15 18 #include <linux/init.h> 16 19 #include <linux/interconnect.h> 17 20 #include <linux/interrupt.h> ··· 23 20 #include <linux/of_address.h> 24 21 #include <linux/of_irq.h> 25 22 #include <linux/of_platform.h> 23 + #include <linux/of_reserved_mem.h> 26 24 #include <linux/platform_device.h> 27 25 #include <linux/reset-controller.h> 26 + #include <linux/sizes.h> 28 27 #include <linux/types.h> 29 28 30 29 #include "qcom_scm.h" 30 + #include "qcom_tzmem.h" 31 31 32 32 static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT); 33 33 module_param(download_mode, bool, 0); ··· 49 43 int scm_vote_count; 50 44 51 45 u64 dload_mode_addr; 46 + 47 + struct qcom_tzmem_pool *mempool; 52 48 }; 53 49 54 50 struct qcom_scm_current_perm_info { ··· 122 114 }; 123 115 124 116 #define QCOM_SMC_WAITQ_FLAG_WAKE_ONE BIT(0) 125 - #define QCOM_SMC_WAITQ_FLAG_WAKE_ALL BIT(1) 126 117 127 118 #define QCOM_DLOAD_MASK GENMASK(5, 4) 128 119 #define QCOM_DLOAD_NODUMP 0 ··· 204 197 205 198 enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN; 206 199 static DEFINE_SPINLOCK(scm_query_lock); 200 + 201 + struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void) 202 + { 203 + return __scm->mempool; 204 + } 207 205 208 206 static enum qcom_scm_convention __get_convention(void) 209 207 { ··· 582 570 * During the scm call memory protection will be enabled for the meta 583 571 * data blob, so make sure it's physically contiguous, 4K aligned and 584 572 * non-cachable to avoid XPU violations. 573 + * 574 + * For PIL calls the hypervisor creates SHM Bridges for the blob 575 + * buffers on behalf of Linux so we must not do it ourselves hence 576 + * not using the TZMem allocator here. 577 + * 578 + * If we pass a buffer that is already part of an SHM Bridge to this 579 + * call, it will fail. 585 580 */ 586 581 mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys, 587 582 GFP_KERNEL); ··· 1027 1008 struct qcom_scm_mem_map_info *mem_to_map; 1028 1009 phys_addr_t mem_to_map_phys; 1029 1010 phys_addr_t dest_phys; 1030 - dma_addr_t ptr_phys; 1011 + phys_addr_t ptr_phys; 1031 1012 size_t mem_to_map_sz; 1032 1013 size_t dest_sz; 1033 1014 size_t src_sz; 1034 1015 size_t ptr_sz; 1035 1016 int next_vm; 1036 1017 __le32 *src; 1037 - void *ptr; 1038 1018 int ret, i, b; 1039 1019 u64 srcvm_bits = *srcvm; 1040 1020 ··· 1043 1025 ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) + 1044 1026 ALIGN(dest_sz, SZ_64); 1045 1027 1046 - ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL); 1028 + void *ptr __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1029 + ptr_sz, GFP_KERNEL); 1047 1030 if (!ptr) 1048 1031 return -ENOMEM; 1032 + 1033 + ptr_phys = qcom_tzmem_to_phys(ptr); 1049 1034 1050 1035 /* Fill source vmid detail */ 1051 1036 src = ptr; ··· 1078 1057 1079 1058 ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz, 1080 1059 ptr_phys, src_sz, dest_phys, dest_sz); 1081 - dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys); 1082 1060 if (ret) { 1083 1061 dev_err(__scm->dev, 1084 1062 "Assign memory protection call failed %d\n", ret); ··· 1225 1205 .args[4] = data_unit_size, 1226 1206 .owner = ARM_SMCCC_OWNER_SIP, 1227 1207 }; 1228 - void *keybuf; 1229 - dma_addr_t key_phys; 1208 + 1230 1209 int ret; 1231 1210 1232 - /* 1233 - * 'key' may point to vmalloc()'ed memory, but we need to pass a 1234 - * physical address that's been properly flushed. The sanctioned way to 1235 - * do this is by using the DMA API. But as is best practice for crypto 1236 - * keys, we also must wipe the key after use. This makes kmemdup() + 1237 - * dma_map_single() not clearly correct, since the DMA API can use 1238 - * bounce buffers. Instead, just use dma_alloc_coherent(). Programming 1239 - * keys is normally rare and thus not performance-critical. 1240 - */ 1241 - 1242 - keybuf = dma_alloc_coherent(__scm->dev, key_size, &key_phys, 1243 - GFP_KERNEL); 1211 + void *keybuf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1212 + key_size, 1213 + GFP_KERNEL); 1244 1214 if (!keybuf) 1245 1215 return -ENOMEM; 1246 1216 memcpy(keybuf, key, key_size); 1247 - desc.args[1] = key_phys; 1217 + desc.args[1] = qcom_tzmem_to_phys(keybuf); 1248 1218 1249 1219 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1250 1220 1251 1221 memzero_explicit(keybuf, key_size); 1252 1222 1253 - dma_free_coherent(__scm->dev, key_size, keybuf, key_phys); 1254 1223 return ret; 1255 1224 } 1256 1225 EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key); ··· 1351 1342 } 1352 1343 EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available); 1353 1344 1345 + int qcom_scm_shm_bridge_enable(void) 1346 + { 1347 + struct qcom_scm_desc desc = { 1348 + .svc = QCOM_SCM_SVC_MP, 1349 + .cmd = QCOM_SCM_MP_SHM_BRIDGE_ENABLE, 1350 + .owner = ARM_SMCCC_OWNER_SIP 1351 + }; 1352 + 1353 + struct qcom_scm_res res; 1354 + 1355 + if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP, 1356 + QCOM_SCM_MP_SHM_BRIDGE_ENABLE)) 1357 + return -EOPNOTSUPP; 1358 + 1359 + return qcom_scm_call(__scm->dev, &desc, &res) ?: res.result[0]; 1360 + } 1361 + EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_enable); 1362 + 1363 + int qcom_scm_shm_bridge_create(struct device *dev, u64 pfn_and_ns_perm_flags, 1364 + u64 ipfn_and_s_perm_flags, u64 size_and_flags, 1365 + u64 ns_vmids, u64 *handle) 1366 + { 1367 + struct qcom_scm_desc desc = { 1368 + .svc = QCOM_SCM_SVC_MP, 1369 + .cmd = QCOM_SCM_MP_SHM_BRIDGE_CREATE, 1370 + .owner = ARM_SMCCC_OWNER_SIP, 1371 + .args[0] = pfn_and_ns_perm_flags, 1372 + .args[1] = ipfn_and_s_perm_flags, 1373 + .args[2] = size_and_flags, 1374 + .args[3] = ns_vmids, 1375 + .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL, 1376 + QCOM_SCM_VAL, QCOM_SCM_VAL), 1377 + }; 1378 + 1379 + struct qcom_scm_res res; 1380 + int ret; 1381 + 1382 + ret = qcom_scm_call(__scm->dev, &desc, &res); 1383 + 1384 + if (handle && !ret) 1385 + *handle = res.result[1]; 1386 + 1387 + return ret ?: res.result[0]; 1388 + } 1389 + EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_create); 1390 + 1391 + int qcom_scm_shm_bridge_delete(struct device *dev, u64 handle) 1392 + { 1393 + struct qcom_scm_desc desc = { 1394 + .svc = QCOM_SCM_SVC_MP, 1395 + .cmd = QCOM_SCM_MP_SHM_BRIDGE_DELETE, 1396 + .owner = ARM_SMCCC_OWNER_SIP, 1397 + .args[0] = handle, 1398 + .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL), 1399 + }; 1400 + 1401 + return qcom_scm_call(__scm->dev, &desc, NULL); 1402 + } 1403 + EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_delete); 1404 + 1354 1405 int qcom_scm_lmh_profile_change(u32 profile_id) 1355 1406 { 1356 1407 struct qcom_scm_desc desc = { ··· 1428 1359 int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, 1429 1360 u64 limit_node, u32 node_id, u64 version) 1430 1361 { 1431 - dma_addr_t payload_phys; 1432 - u32 *payload_buf; 1433 1362 int ret, payload_size = 5 * sizeof(u32); 1434 1363 1435 1364 struct qcom_scm_desc desc = { ··· 1442 1375 .owner = ARM_SMCCC_OWNER_SIP, 1443 1376 }; 1444 1377 1445 - payload_buf = dma_alloc_coherent(__scm->dev, payload_size, &payload_phys, GFP_KERNEL); 1378 + u32 *payload_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1379 + payload_size, 1380 + GFP_KERNEL); 1446 1381 if (!payload_buf) 1447 1382 return -ENOMEM; 1448 1383 ··· 1454 1385 payload_buf[3] = 1; 1455 1386 payload_buf[4] = payload_val; 1456 1387 1457 - desc.args[0] = payload_phys; 1388 + desc.args[0] = qcom_tzmem_to_phys(payload_buf); 1458 1389 1459 1390 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1460 1391 1461 - dma_free_coherent(__scm->dev, payload_size, payload_buf, payload_phys); 1462 1392 return ret; 1463 1393 } 1464 1394 EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh); 1395 + 1396 + int qcom_scm_gpu_init_regs(u32 gpu_req) 1397 + { 1398 + struct qcom_scm_desc desc = { 1399 + .svc = QCOM_SCM_SVC_GPU, 1400 + .cmd = QCOM_SCM_SVC_GPU_INIT_REGS, 1401 + .arginfo = QCOM_SCM_ARGS(1), 1402 + .args[0] = gpu_req, 1403 + .owner = ARM_SMCCC_OWNER_SIP, 1404 + }; 1405 + 1406 + return qcom_scm_call(__scm->dev, &desc, NULL); 1407 + } 1408 + EXPORT_SYMBOL_GPL(qcom_scm_gpu_init_regs); 1465 1409 1466 1410 static int qcom_scm_find_dload_address(struct device *dev, u64 *addr) 1467 1411 { ··· 1627 1545 unsigned long app_name_len = strlen(app_name); 1628 1546 struct qcom_scm_desc desc = {}; 1629 1547 struct qcom_scm_qseecom_resp res = {}; 1630 - dma_addr_t name_buf_phys; 1631 - char *name_buf; 1632 1548 int status; 1633 1549 1634 1550 if (app_name_len >= name_buf_size) 1635 1551 return -EINVAL; 1636 1552 1637 - name_buf = kzalloc(name_buf_size, GFP_KERNEL); 1553 + char *name_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, 1554 + name_buf_size, 1555 + GFP_KERNEL); 1638 1556 if (!name_buf) 1639 1557 return -ENOMEM; 1640 1558 1641 1559 memcpy(name_buf, app_name, app_name_len); 1642 1560 1643 - name_buf_phys = dma_map_single(__scm->dev, name_buf, name_buf_size, DMA_TO_DEVICE); 1644 - status = dma_mapping_error(__scm->dev, name_buf_phys); 1645 - if (status) { 1646 - kfree(name_buf); 1647 - dev_err(__scm->dev, "qseecom: failed to map dma address\n"); 1648 - return status; 1649 - } 1650 - 1651 1561 desc.owner = QSEECOM_TZ_OWNER_QSEE_OS; 1652 1562 desc.svc = QSEECOM_TZ_SVC_APP_MGR; 1653 1563 desc.cmd = QSEECOM_TZ_CMD_APP_LOOKUP; 1654 1564 desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL); 1655 - desc.args[0] = name_buf_phys; 1565 + desc.args[0] = qcom_tzmem_to_phys(name_buf); 1656 1566 desc.args[1] = app_name_len; 1657 1567 1658 1568 status = qcom_scm_qseecom_call(&desc, &res); 1659 - dma_unmap_single(__scm->dev, name_buf_phys, name_buf_size, DMA_TO_DEVICE); 1660 - kfree(name_buf); 1661 1569 1662 1570 if (status) 1663 1571 return status; ··· 1669 1597 /** 1670 1598 * qcom_scm_qseecom_app_send() - Send to and receive data from a given QSEE app. 1671 1599 * @app_id: The ID of the target app. 1672 - * @req: DMA address of the request buffer sent to the app. 1600 + * @req: Request buffer sent to the app (must be TZ memory) 1673 1601 * @req_size: Size of the request buffer. 1674 - * @rsp: DMA address of the response buffer, written to by the app. 1602 + * @rsp: Response buffer, written to by the app (must be TZ memory) 1675 1603 * @rsp_size: Size of the response buffer. 1676 1604 * 1677 1605 * Sends a request to the QSEE app associated with the given ID and read back ··· 1682 1610 * 1683 1611 * Return: Zero on success, nonzero on failure. 1684 1612 */ 1685 - int qcom_scm_qseecom_app_send(u32 app_id, dma_addr_t req, size_t req_size, 1686 - dma_addr_t rsp, size_t rsp_size) 1613 + int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, 1614 + void *rsp, size_t rsp_size) 1687 1615 { 1688 1616 struct qcom_scm_qseecom_resp res = {}; 1689 1617 struct qcom_scm_desc desc = {}; 1618 + phys_addr_t req_phys; 1619 + phys_addr_t rsp_phys; 1690 1620 int status; 1621 + 1622 + req_phys = qcom_tzmem_to_phys(req); 1623 + rsp_phys = qcom_tzmem_to_phys(rsp); 1691 1624 1692 1625 desc.owner = QSEECOM_TZ_OWNER_TZ_APPS; 1693 1626 desc.svc = QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER; ··· 1701 1624 QCOM_SCM_RW, QCOM_SCM_VAL, 1702 1625 QCOM_SCM_RW, QCOM_SCM_VAL); 1703 1626 desc.args[0] = app_id; 1704 - desc.args[1] = req; 1627 + desc.args[1] = req_phys; 1705 1628 desc.args[2] = req_size; 1706 - desc.args[3] = rsp; 1629 + desc.args[3] = rsp_phys; 1707 1630 desc.args[4] = rsp_size; 1708 1631 1709 1632 status = qcom_scm_qseecom_call(&desc, &res); ··· 1726 1649 { .compatible = "lenovo,flex-5g" }, 1727 1650 { .compatible = "lenovo,thinkpad-x13s", }, 1728 1651 { .compatible = "qcom,sc8180x-primus" }, 1652 + { .compatible = "qcom,x1e80100-crd" }, 1653 + { .compatible = "qcom,x1e80100-qcp" }, 1729 1654 { } 1730 1655 }; 1731 1656 ··· 1872 1793 goto out; 1873 1794 } 1874 1795 1875 - if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE && 1876 - flags != QCOM_SMC_WAITQ_FLAG_WAKE_ALL) { 1877 - dev_err(scm->dev, "Invalid flags found for wq_ctx: %u\n", flags); 1796 + if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE) { 1797 + dev_err(scm->dev, "Invalid flags received for wq_ctx: %u\n", flags); 1878 1798 goto out; 1879 1799 } 1880 1800 ··· 1888 1810 1889 1811 static int qcom_scm_probe(struct platform_device *pdev) 1890 1812 { 1813 + struct qcom_tzmem_pool_config pool_config; 1891 1814 struct qcom_scm *scm; 1892 1815 int irq, ret; 1893 1816 ··· 1963 1884 */ 1964 1885 if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled")) 1965 1886 qcom_scm_disable_sdi(); 1887 + 1888 + ret = of_reserved_mem_device_init(__scm->dev); 1889 + if (ret && ret != -ENODEV) 1890 + return dev_err_probe(__scm->dev, ret, 1891 + "Failed to setup the reserved memory region for TZ mem\n"); 1892 + 1893 + ret = qcom_tzmem_enable(__scm->dev); 1894 + if (ret) 1895 + return dev_err_probe(__scm->dev, ret, 1896 + "Failed to enable the TrustZone memory allocator\n"); 1897 + 1898 + memset(&pool_config, 0, sizeof(pool_config)); 1899 + pool_config.initial_size = 0; 1900 + pool_config.policy = QCOM_TZMEM_POLICY_ON_DEMAND; 1901 + pool_config.max_size = SZ_256K; 1902 + 1903 + __scm->mempool = devm_qcom_tzmem_pool_new(__scm->dev, &pool_config); 1904 + if (IS_ERR(__scm->mempool)) 1905 + return dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool), 1906 + "Failed to create the SCM memory pool\n"); 1966 1907 1967 1908 /* 1968 1909 * Initialize the QSEECOM interface.
+9
drivers/firmware/qcom/qcom_scm.h
··· 5 5 #define __QCOM_SCM_INT_H 6 6 7 7 struct device; 8 + struct qcom_tzmem_pool; 8 9 9 10 enum qcom_scm_convention { 10 11 SMC_CONVENTION_UNKNOWN, ··· 79 78 int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc, 80 79 struct qcom_scm_res *res); 81 80 81 + struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void); 82 + 82 83 #define QCOM_SCM_SVC_BOOT 0x01 83 84 #define QCOM_SCM_BOOT_SET_ADDR 0x01 84 85 #define QCOM_SCM_BOOT_TERMINATE_PC 0x02 ··· 116 113 #define QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE 0x05 117 114 #define QCOM_SCM_MP_VIDEO_VAR 0x08 118 115 #define QCOM_SCM_MP_ASSIGN 0x16 116 + #define QCOM_SCM_MP_SHM_BRIDGE_ENABLE 0x1c 117 + #define QCOM_SCM_MP_SHM_BRIDGE_DELETE 0x1d 118 + #define QCOM_SCM_MP_SHM_BRIDGE_CREATE 0x1e 119 119 120 120 #define QCOM_SCM_SVC_OCMEM 0x0f 121 121 #define QCOM_SCM_OCMEM_LOCK_CMD 0x01 ··· 143 137 #define QCOM_SCM_SVC_WAITQ 0x24 144 138 #define QCOM_SCM_WAITQ_RESUME 0x02 145 139 #define QCOM_SCM_WAITQ_GET_WQ_CTX 0x03 140 + 141 + #define QCOM_SCM_SVC_GPU 0x28 142 + #define QCOM_SCM_SVC_GPU_INIT_REGS 0x01 146 143 147 144 /* common error codes */ 148 145 #define QCOM_SCM_V2_EBUSY -12
+467
drivers/firmware/qcom/qcom_tzmem.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Memory allocator for buffers shared with the TrustZone. 4 + * 5 + * Copyright (C) 2023-2024 Linaro Ltd. 6 + */ 7 + 8 + #include <linux/bug.h> 9 + #include <linux/cleanup.h> 10 + #include <linux/dma-mapping.h> 11 + #include <linux/err.h> 12 + #include <linux/firmware/qcom/qcom_tzmem.h> 13 + #include <linux/genalloc.h> 14 + #include <linux/gfp.h> 15 + #include <linux/kernel.h> 16 + #include <linux/list.h> 17 + #include <linux/mm.h> 18 + #include <linux/radix-tree.h> 19 + #include <linux/slab.h> 20 + #include <linux/spinlock.h> 21 + #include <linux/types.h> 22 + 23 + #include "qcom_tzmem.h" 24 + 25 + struct qcom_tzmem_area { 26 + struct list_head list; 27 + void *vaddr; 28 + dma_addr_t paddr; 29 + size_t size; 30 + void *priv; 31 + }; 32 + 33 + struct qcom_tzmem_pool { 34 + struct gen_pool *genpool; 35 + struct list_head areas; 36 + enum qcom_tzmem_policy policy; 37 + size_t increment; 38 + size_t max_size; 39 + spinlock_t lock; 40 + }; 41 + 42 + struct qcom_tzmem_chunk { 43 + phys_addr_t paddr; 44 + size_t size; 45 + struct qcom_tzmem_pool *owner; 46 + }; 47 + 48 + static struct device *qcom_tzmem_dev; 49 + static RADIX_TREE(qcom_tzmem_chunks, GFP_ATOMIC); 50 + static DEFINE_SPINLOCK(qcom_tzmem_chunks_lock); 51 + 52 + #if IS_ENABLED(CONFIG_QCOM_TZMEM_MODE_GENERIC) 53 + 54 + static int qcom_tzmem_init(void) 55 + { 56 + return 0; 57 + } 58 + 59 + static int qcom_tzmem_init_area(struct qcom_tzmem_area *area) 60 + { 61 + return 0; 62 + } 63 + 64 + static void qcom_tzmem_cleanup_area(struct qcom_tzmem_area *area) 65 + { 66 + 67 + } 68 + 69 + #elif IS_ENABLED(CONFIG_QCOM_TZMEM_MODE_SHMBRIDGE) 70 + 71 + #include <linux/firmware/qcom/qcom_scm.h> 72 + #include <linux/of.h> 73 + 74 + #define QCOM_SHM_BRIDGE_NUM_VM_SHIFT 9 75 + 76 + static bool qcom_tzmem_using_shm_bridge; 77 + 78 + /* List of machines that are known to not support SHM bridge correctly. */ 79 + static const char *const qcom_tzmem_blacklist[] = { 80 + "qcom,sc8180x", 81 + NULL 82 + }; 83 + 84 + static int qcom_tzmem_init(void) 85 + { 86 + const char *const *platform; 87 + int ret; 88 + 89 + for (platform = qcom_tzmem_blacklist; *platform; platform++) { 90 + if (of_machine_is_compatible(*platform)) 91 + goto notsupp; 92 + } 93 + 94 + ret = qcom_scm_shm_bridge_enable(); 95 + if (ret == -EOPNOTSUPP) 96 + goto notsupp; 97 + 98 + if (!ret) 99 + qcom_tzmem_using_shm_bridge = true; 100 + 101 + return ret; 102 + 103 + notsupp: 104 + dev_info(qcom_tzmem_dev, "SHM Bridge not supported\n"); 105 + return 0; 106 + } 107 + 108 + static int qcom_tzmem_init_area(struct qcom_tzmem_area *area) 109 + { 110 + u64 pfn_and_ns_perm, ipfn_and_s_perm, size_and_flags; 111 + int ret; 112 + 113 + if (!qcom_tzmem_using_shm_bridge) 114 + return 0; 115 + 116 + pfn_and_ns_perm = (u64)area->paddr | QCOM_SCM_PERM_RW; 117 + ipfn_and_s_perm = (u64)area->paddr | QCOM_SCM_PERM_RW; 118 + size_and_flags = area->size | (1 << QCOM_SHM_BRIDGE_NUM_VM_SHIFT); 119 + 120 + u64 *handle __free(kfree) = kzalloc(sizeof(*handle), GFP_KERNEL); 121 + if (!handle) 122 + return -ENOMEM; 123 + 124 + ret = qcom_scm_shm_bridge_create(qcom_tzmem_dev, pfn_and_ns_perm, 125 + ipfn_and_s_perm, size_and_flags, 126 + QCOM_SCM_VMID_HLOS, handle); 127 + if (ret) 128 + return ret; 129 + 130 + area->priv = no_free_ptr(handle); 131 + 132 + return 0; 133 + } 134 + 135 + static void qcom_tzmem_cleanup_area(struct qcom_tzmem_area *area) 136 + { 137 + u64 *handle = area->priv; 138 + 139 + if (!qcom_tzmem_using_shm_bridge) 140 + return; 141 + 142 + qcom_scm_shm_bridge_delete(qcom_tzmem_dev, *handle); 143 + kfree(handle); 144 + } 145 + 146 + #endif /* CONFIG_QCOM_TZMEM_MODE_SHMBRIDGE */ 147 + 148 + static int qcom_tzmem_pool_add_memory(struct qcom_tzmem_pool *pool, 149 + size_t size, gfp_t gfp) 150 + { 151 + int ret; 152 + 153 + struct qcom_tzmem_area *area __free(kfree) = kzalloc(sizeof(*area), 154 + gfp); 155 + if (!area) 156 + return -ENOMEM; 157 + 158 + area->size = PAGE_ALIGN(size); 159 + 160 + area->vaddr = dma_alloc_coherent(qcom_tzmem_dev, area->size, 161 + &area->paddr, gfp); 162 + if (!area->vaddr) 163 + return -ENOMEM; 164 + 165 + ret = qcom_tzmem_init_area(area); 166 + if (ret) { 167 + dma_free_coherent(qcom_tzmem_dev, area->size, 168 + area->vaddr, area->paddr); 169 + return ret; 170 + } 171 + 172 + ret = gen_pool_add_virt(pool->genpool, (unsigned long)area->vaddr, 173 + (phys_addr_t)area->paddr, size, -1); 174 + if (ret) { 175 + dma_free_coherent(qcom_tzmem_dev, area->size, 176 + area->vaddr, area->paddr); 177 + return ret; 178 + } 179 + 180 + scoped_guard(spinlock_irqsave, &pool->lock) 181 + list_add_tail(&area->list, &pool->areas); 182 + 183 + area = NULL; 184 + return 0; 185 + } 186 + 187 + /** 188 + * qcom_tzmem_pool_new() - Create a new TZ memory pool. 189 + * @config: Pool configuration. 190 + * 191 + * Create a new pool of memory suitable for sharing with the TrustZone. 192 + * 193 + * Must not be used in atomic context. 194 + * 195 + * Return: New memory pool address or ERR_PTR() on error. 196 + */ 197 + struct qcom_tzmem_pool * 198 + qcom_tzmem_pool_new(const struct qcom_tzmem_pool_config *config) 199 + { 200 + int ret = -ENOMEM; 201 + 202 + might_sleep(); 203 + 204 + switch (config->policy) { 205 + case QCOM_TZMEM_POLICY_STATIC: 206 + if (!config->initial_size) 207 + return ERR_PTR(-EINVAL); 208 + break; 209 + case QCOM_TZMEM_POLICY_MULTIPLIER: 210 + if (!config->increment) 211 + return ERR_PTR(-EINVAL); 212 + break; 213 + case QCOM_TZMEM_POLICY_ON_DEMAND: 214 + break; 215 + default: 216 + return ERR_PTR(-EINVAL); 217 + } 218 + 219 + struct qcom_tzmem_pool *pool __free(kfree) = kzalloc(sizeof(*pool), 220 + GFP_KERNEL); 221 + if (!pool) 222 + return ERR_PTR(-ENOMEM); 223 + 224 + pool->genpool = gen_pool_create(PAGE_SHIFT, -1); 225 + if (!pool->genpool) 226 + return ERR_PTR(-ENOMEM); 227 + 228 + gen_pool_set_algo(pool->genpool, gen_pool_best_fit, NULL); 229 + 230 + pool->policy = config->policy; 231 + pool->increment = config->increment; 232 + pool->max_size = config->max_size; 233 + INIT_LIST_HEAD(&pool->areas); 234 + spin_lock_init(&pool->lock); 235 + 236 + if (config->initial_size) { 237 + ret = qcom_tzmem_pool_add_memory(pool, config->initial_size, 238 + GFP_KERNEL); 239 + if (ret) { 240 + gen_pool_destroy(pool->genpool); 241 + return ERR_PTR(ret); 242 + } 243 + } 244 + 245 + return no_free_ptr(pool); 246 + } 247 + EXPORT_SYMBOL_GPL(qcom_tzmem_pool_new); 248 + 249 + /** 250 + * qcom_tzmem_pool_free() - Destroy a TZ memory pool and free all resources. 251 + * @pool: Memory pool to free. 252 + * 253 + * Must not be called if any of the allocated chunks has not been freed. 254 + * Must not be used in atomic context. 255 + */ 256 + void qcom_tzmem_pool_free(struct qcom_tzmem_pool *pool) 257 + { 258 + struct qcom_tzmem_area *area, *next; 259 + struct qcom_tzmem_chunk *chunk; 260 + struct radix_tree_iter iter; 261 + bool non_empty = false; 262 + void __rcu **slot; 263 + 264 + might_sleep(); 265 + 266 + if (!pool) 267 + return; 268 + 269 + scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock) { 270 + radix_tree_for_each_slot(slot, &qcom_tzmem_chunks, &iter, 0) { 271 + chunk = radix_tree_deref_slot_protected(slot, 272 + &qcom_tzmem_chunks_lock); 273 + 274 + if (chunk->owner == pool) 275 + non_empty = true; 276 + } 277 + } 278 + 279 + WARN(non_empty, "Freeing TZ memory pool with memory still allocated"); 280 + 281 + list_for_each_entry_safe(area, next, &pool->areas, list) { 282 + list_del(&area->list); 283 + qcom_tzmem_cleanup_area(area); 284 + dma_free_coherent(qcom_tzmem_dev, area->size, 285 + area->vaddr, area->paddr); 286 + kfree(area); 287 + } 288 + 289 + gen_pool_destroy(pool->genpool); 290 + kfree(pool); 291 + } 292 + EXPORT_SYMBOL_GPL(qcom_tzmem_pool_free); 293 + 294 + static void devm_qcom_tzmem_pool_free(void *data) 295 + { 296 + struct qcom_tzmem_pool *pool = data; 297 + 298 + qcom_tzmem_pool_free(pool); 299 + } 300 + 301 + /** 302 + * devm_qcom_tzmem_pool_new() - Managed variant of qcom_tzmem_pool_new(). 303 + * @dev: Device managing this resource. 304 + * @config: Pool configuration. 305 + * 306 + * Must not be used in atomic context. 307 + * 308 + * Return: Address of the managed pool or ERR_PTR() on failure. 309 + */ 310 + struct qcom_tzmem_pool * 311 + devm_qcom_tzmem_pool_new(struct device *dev, 312 + const struct qcom_tzmem_pool_config *config) 313 + { 314 + struct qcom_tzmem_pool *pool; 315 + int ret; 316 + 317 + pool = qcom_tzmem_pool_new(config); 318 + if (IS_ERR(pool)) 319 + return pool; 320 + 321 + ret = devm_add_action_or_reset(dev, devm_qcom_tzmem_pool_free, pool); 322 + if (ret) 323 + return ERR_PTR(ret); 324 + 325 + return pool; 326 + } 327 + EXPORT_SYMBOL_GPL(devm_qcom_tzmem_pool_new); 328 + 329 + static bool qcom_tzmem_try_grow_pool(struct qcom_tzmem_pool *pool, 330 + size_t requested, gfp_t gfp) 331 + { 332 + size_t current_size = gen_pool_size(pool->genpool); 333 + 334 + if (pool->max_size && (current_size + requested) > pool->max_size) 335 + return false; 336 + 337 + switch (pool->policy) { 338 + case QCOM_TZMEM_POLICY_STATIC: 339 + return false; 340 + case QCOM_TZMEM_POLICY_MULTIPLIER: 341 + requested = current_size * pool->increment; 342 + break; 343 + case QCOM_TZMEM_POLICY_ON_DEMAND: 344 + break; 345 + } 346 + 347 + return !qcom_tzmem_pool_add_memory(pool, requested, gfp); 348 + } 349 + 350 + /** 351 + * qcom_tzmem_alloc() - Allocate a memory chunk suitable for sharing with TZ. 352 + * @pool: TZ memory pool from which to allocate memory. 353 + * @size: Number of bytes to allocate. 354 + * @gfp: GFP flags. 355 + * 356 + * Can be used in any context. 357 + * 358 + * Return: 359 + * Address of the allocated buffer or NULL if no more memory can be allocated. 360 + * The buffer must be released using qcom_tzmem_free(). 361 + */ 362 + void *qcom_tzmem_alloc(struct qcom_tzmem_pool *pool, size_t size, gfp_t gfp) 363 + { 364 + unsigned long vaddr; 365 + int ret; 366 + 367 + if (!size) 368 + return NULL; 369 + 370 + size = PAGE_ALIGN(size); 371 + 372 + struct qcom_tzmem_chunk *chunk __free(kfree) = kzalloc(sizeof(*chunk), 373 + gfp); 374 + if (!chunk) 375 + return NULL; 376 + 377 + again: 378 + vaddr = gen_pool_alloc(pool->genpool, size); 379 + if (!vaddr) { 380 + if (qcom_tzmem_try_grow_pool(pool, size, gfp)) 381 + goto again; 382 + 383 + return NULL; 384 + } 385 + 386 + chunk->paddr = gen_pool_virt_to_phys(pool->genpool, vaddr); 387 + chunk->size = size; 388 + chunk->owner = pool; 389 + 390 + scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock) { 391 + ret = radix_tree_insert(&qcom_tzmem_chunks, vaddr, chunk); 392 + if (ret) { 393 + gen_pool_free(pool->genpool, vaddr, size); 394 + return NULL; 395 + } 396 + 397 + chunk = NULL; 398 + } 399 + 400 + return (void *)vaddr; 401 + } 402 + EXPORT_SYMBOL_GPL(qcom_tzmem_alloc); 403 + 404 + /** 405 + * qcom_tzmem_free() - Release a buffer allocated from a TZ memory pool. 406 + * @vaddr: Virtual address of the buffer. 407 + * 408 + * Can be used in any context. 409 + */ 410 + void qcom_tzmem_free(void *vaddr) 411 + { 412 + struct qcom_tzmem_chunk *chunk; 413 + 414 + scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock) 415 + chunk = radix_tree_delete_item(&qcom_tzmem_chunks, 416 + (unsigned long)vaddr, NULL); 417 + 418 + if (!chunk) { 419 + WARN(1, "Virtual address %p not owned by TZ memory allocator", 420 + vaddr); 421 + return; 422 + } 423 + 424 + scoped_guard(spinlock_irqsave, &chunk->owner->lock) 425 + gen_pool_free(chunk->owner->genpool, (unsigned long)vaddr, 426 + chunk->size); 427 + kfree(chunk); 428 + } 429 + EXPORT_SYMBOL_GPL(qcom_tzmem_free); 430 + 431 + /** 432 + * qcom_tzmem_to_phys() - Map the virtual address of a TZ buffer to physical. 433 + * @vaddr: Virtual address of the buffer allocated from a TZ memory pool. 434 + * 435 + * Can be used in any context. The address must have been returned by a call 436 + * to qcom_tzmem_alloc(). 437 + * 438 + * Returns: Physical address of the buffer. 439 + */ 440 + phys_addr_t qcom_tzmem_to_phys(void *vaddr) 441 + { 442 + struct qcom_tzmem_chunk *chunk; 443 + 444 + guard(spinlock_irqsave)(&qcom_tzmem_chunks_lock); 445 + 446 + chunk = radix_tree_lookup(&qcom_tzmem_chunks, (unsigned long)vaddr); 447 + if (!chunk) 448 + return 0; 449 + 450 + return chunk->paddr; 451 + } 452 + EXPORT_SYMBOL_GPL(qcom_tzmem_to_phys); 453 + 454 + int qcom_tzmem_enable(struct device *dev) 455 + { 456 + if (qcom_tzmem_dev) 457 + return -EBUSY; 458 + 459 + qcom_tzmem_dev = dev; 460 + 461 + return qcom_tzmem_init(); 462 + } 463 + EXPORT_SYMBOL_GPL(qcom_tzmem_enable); 464 + 465 + MODULE_DESCRIPTION("TrustZone memory allocator for Qualcomm firmware drivers"); 466 + MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>"); 467 + MODULE_LICENSE("GPL");
+13
drivers/firmware/qcom/qcom_tzmem.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Copyright (C) 2023-2024 Linaro Ltd. 4 + */ 5 + 6 + #ifndef __QCOM_TZMEM_PRIV_H 7 + #define __QCOM_TZMEM_PRIV_H 8 + 9 + struct device; 10 + 11 + int qcom_tzmem_enable(struct device *dev); 12 + 13 + #endif /* __QCOM_TZMEM_PRIV_H */
+18
drivers/soc/qcom/Kconfig
··· 72 72 requirements. This is typically used by the GPU, camera/video, and 73 73 audio components on some Snapdragon SoCs. 74 74 75 + config QCOM_PD_MAPPER 76 + tristate "Qualcomm Protection Domain Mapper" 77 + select QCOM_QMI_HELPERS 78 + select QCOM_PDR_MSG 79 + select AUXILIARY_BUS 80 + depends on NET && QRTR 81 + default QCOM_RPROC_COMMON 82 + help 83 + The Protection Domain Mapper maps registered services to the domains 84 + and instances handled by the remote DSPs. This is a kernel-space 85 + implementation of the service. It is a simpler alternative to the 86 + userspace daemon. 87 + 75 88 config QCOM_PDR_HELPERS 76 89 tristate 77 90 select QCOM_QMI_HELPERS 91 + select QCOM_PDR_MSG 78 92 depends on NET 93 + 94 + config QCOM_PDR_MSG 95 + tristate 79 96 80 97 config QCOM_PMIC_PDCHARGER_ULOG 81 98 tristate "Qualcomm PMIC PDCharger ULOG driver" ··· 211 194 212 195 config QCOM_SMSM 213 196 tristate "Qualcomm Shared Memory State Machine" 197 + depends on MAILBOX 214 198 depends on QCOM_SMEM 215 199 select QCOM_SMEM_STATE 216 200 select IRQ_DOMAIN
+2
drivers/soc/qcom/Makefile
··· 7 7 obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o 8 8 obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o 9 9 obj-$(CONFIG_QCOM_OCMEM) += ocmem.o 10 + obj-$(CONFIG_QCOM_PD_MAPPER) += qcom_pd_mapper.o 10 11 obj-$(CONFIG_QCOM_PDR_HELPERS) += pdr_interface.o 12 + obj-$(CONFIG_QCOM_PDR_MSG) += qcom_pdr_msg.o 11 13 obj-$(CONFIG_QCOM_PMIC_GLINK) += pmic_glink.o 12 14 obj-$(CONFIG_QCOM_PMIC_GLINK) += pmic_glink_altmode.o 13 15 obj-$(CONFIG_QCOM_PMIC_PDCHARGER_ULOG) += pmic_pdcharger_ulog.o
+12 -4
drivers/soc/qcom/icc-bwmon.c
··· 565 565 int window; 566 566 567 567 /* No need to check for errors, as this must have succeeded before. */ 568 - dev_pm_opp_find_bw_ceil(bwmon->dev, &bw_low, 0); 568 + dev_pm_opp_put(dev_pm_opp_find_bw_ceil(bwmon->dev, &bw_low, 0)); 569 569 570 570 bwmon_clear_counters(bwmon, true); 571 571 ··· 772 772 opp = dev_pm_opp_find_bw_floor(dev, &bwmon->max_bw_kbps, 0); 773 773 if (IS_ERR(opp)) 774 774 return dev_err_probe(dev, PTR_ERR(opp), "failed to find max peak bandwidth\n"); 775 + dev_pm_opp_put(opp); 775 776 776 777 bwmon->min_bw_kbps = 0; 777 778 opp = dev_pm_opp_find_bw_ceil(dev, &bwmon->min_bw_kbps, 0); 778 779 if (IS_ERR(opp)) 779 780 return dev_err_probe(dev, PTR_ERR(opp), "failed to find min peak bandwidth\n"); 781 + dev_pm_opp_put(opp); 780 782 781 783 bwmon->dev = dev; 782 784 783 785 bwmon_disable(bwmon); 784 - ret = devm_request_threaded_irq(dev, bwmon->irq, bwmon_intr, 785 - bwmon_intr_thread, 786 - IRQF_ONESHOT, dev_name(dev), bwmon); 786 + 787 + /* 788 + * SoCs with multiple cpu-bwmon instances can end up using a shared interrupt 789 + * line. Using the devm_ variant might result in the IRQ handler being executed 790 + * after bwmon_disable in bwmon_remove() 791 + */ 792 + ret = request_threaded_irq(bwmon->irq, bwmon_intr, bwmon_intr_thread, 793 + IRQF_ONESHOT | IRQF_SHARED, dev_name(dev), bwmon); 787 794 if (ret) 788 795 return dev_err_probe(dev, ret, "failed to request IRQ\n"); 789 796 ··· 805 798 struct icc_bwmon *bwmon = platform_get_drvdata(pdev); 806 799 807 800 bwmon_disable(bwmon); 801 + free_irq(bwmon->irq, bwmon); 808 802 } 809 803 810 804 static const struct icc_bwmon_data msm8998_bwmon_data = {
+50 -1
drivers/soc/qcom/llcc-qcom.c
··· 150 150 LLCC_COMMON_STATUS0, 151 151 }; 152 152 153 + static const struct llcc_slice_config sa8775p_data[] = { 154 + {LLCC_CPUSS, 1, 2048, 1, 0, 0x00FF, 0x0, 0, 0, 0, 1, 1, 0, 0}, 155 + {LLCC_VIDSC0, 2, 512, 3, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0}, 156 + {LLCC_CPUSS1, 3, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0}, 157 + {LLCC_CPUHWT, 5, 512, 1, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0}, 158 + {LLCC_AUDIO, 6, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 0, 0, 0, 0}, 159 + {LLCC_CMPT, 10, 4096, 1, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0}, 160 + {LLCC_GPUHTW, 11, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0}, 161 + {LLCC_GPU, 12, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 1, 0}, 162 + {LLCC_MMUHWT, 13, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 0, 1, 0, 0}, 163 + {LLCC_CMPTDMA, 15, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0}, 164 + {LLCC_DISP, 16, 4096, 2, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0}, 165 + {LLCC_VIDFW, 17, 3072, 1, 0, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0}, 166 + {LLCC_AUDHW, 22, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 0, 0, 0, 0}, 167 + {LLCC_CVP, 28, 256, 3, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0}, 168 + {LLCC_APTCM, 30, 1024, 3, 1, 0x0, 0xF0, 1, 0, 0, 1, 0, 0, 0}, 169 + {LLCC_WRCACHE, 31, 512, 1, 1, 0x00FF, 0x0, 0, 0, 0, 0, 1, 0, 0}, 170 + }; 171 + 153 172 static const struct llcc_slice_config sc7180_data[] = { 154 173 { LLCC_CPUSS, 1, 256, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 1 }, 155 174 { LLCC_MDM, 8, 128, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 0 }, ··· 571 552 }, 572 553 }; 573 554 555 + static const struct qcom_llcc_config sa8775p_cfg[] = { 556 + { 557 + .sct_data = sa8775p_data, 558 + .size = ARRAY_SIZE(sa8775p_data), 559 + .need_llcc_cfg = true, 560 + .reg_offset = llcc_v2_1_reg_offset, 561 + .edac_reg_offset = &llcc_v2_1_edac_reg_offset, 562 + }, 563 + }; 564 + 574 565 static const struct qcom_llcc_config sc7180_cfg[] = { 575 566 { 576 567 .sct_data = sc7180_data, ··· 727 698 .num_config = ARRAY_SIZE(qdu1000_cfg), 728 699 }; 729 700 701 + static const struct qcom_sct_config sa8775p_cfgs = { 702 + .llcc_config = sa8775p_cfg, 703 + .num_config = ARRAY_SIZE(sa8775p_cfg), 704 + }; 705 + 730 706 static const struct qcom_sct_config sc7180_cfgs = { 731 707 .llcc_config = sc7180_cfg, 732 708 .num_config = ARRAY_SIZE(sc7180_cfg), ··· 855 821 static int llcc_update_act_ctrl(u32 sid, 856 822 u32 act_ctrl_reg_val, u32 status) 857 823 { 824 + struct regmap *regmap; 858 825 u32 act_ctrl_reg; 859 826 u32 act_clear_reg; 860 827 u32 status_reg; ··· 884 849 return ret; 885 850 886 851 if (drv_data->version >= LLCC_VERSION_4_1_0_0) { 887 - ret = regmap_read_poll_timeout(drv_data->bcast_regmap, status_reg, 852 + regmap = drv_data->bcast_and_regmap ?: drv_data->bcast_regmap; 853 + ret = regmap_read_poll_timeout(regmap, status_reg, 888 854 slice_status, (slice_status & ACT_COMPLETE), 889 855 0, LLCC_STATUS_READ_DELAY); 890 856 if (ret) ··· 1320 1284 1321 1285 drv_data->version = version; 1322 1286 1287 + /* Applicable only when drv_data->version >= 4.1 */ 1288 + if (drv_data->version >= LLCC_VERSION_4_1_0_0) { 1289 + drv_data->bcast_and_regmap = qcom_llcc_init_mmio(pdev, i + 1, "llcc_broadcast_and_base"); 1290 + if (IS_ERR(drv_data->bcast_and_regmap)) { 1291 + ret = PTR_ERR(drv_data->bcast_and_regmap); 1292 + if (ret == -EINVAL) 1293 + drv_data->bcast_and_regmap = NULL; 1294 + else 1295 + goto err; 1296 + } 1297 + } 1298 + 1323 1299 llcc_cfg = cfg->sct_data; 1324 1300 sz = cfg->size; 1325 1301 ··· 1380 1332 1381 1333 static const struct of_device_id qcom_llcc_of_match[] = { 1382 1334 { .compatible = "qcom,qdu1000-llcc", .data = &qdu1000_cfgs}, 1335 + { .compatible = "qcom,sa8775p-llcc", .data = &sa8775p_cfgs }, 1383 1336 { .compatible = "qcom,sc7180-llcc", .data = &sc7180_cfgs }, 1384 1337 { .compatible = "qcom,sc7280-llcc", .data = &sc7280_cfgs }, 1385 1338 { .compatible = "qcom,sc8180x-llcc", .data = &sc8180x_cfgs },
+5 -3
drivers/soc/qcom/pdr_interface.c
··· 76 76 locator_hdl); 77 77 struct pdr_service *pds; 78 78 79 + mutex_lock(&pdr->lock); 79 80 /* Create a local client port for QMI communication */ 80 81 pdr->locator_addr.sq_family = AF_QIPCRTR; 81 82 pdr->locator_addr.sq_node = svc->node; 82 83 pdr->locator_addr.sq_port = svc->port; 83 84 84 - mutex_lock(&pdr->lock); 85 85 pdr->locator_init_complete = true; 86 86 mutex_unlock(&pdr->lock); 87 87 ··· 104 104 105 105 mutex_lock(&pdr->lock); 106 106 pdr->locator_init_complete = false; 107 - mutex_unlock(&pdr->lock); 108 107 109 108 pdr->locator_addr.sq_node = 0; 110 109 pdr->locator_addr.sq_port = 0; 110 + mutex_unlock(&pdr->lock); 111 111 } 112 112 113 113 static const struct qmi_ops pdr_locator_ops = { ··· 365 365 if (ret < 0) 366 366 return ret; 367 367 368 + mutex_lock(&pdr->lock); 368 369 ret = qmi_send_request(&pdr->locator_hdl, 369 370 &pdr->locator_addr, 370 371 &txn, SERVREG_GET_DOMAIN_LIST_REQ, 371 372 SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN, 372 373 servreg_get_domain_list_req_ei, 373 374 req); 375 + mutex_unlock(&pdr->lock); 374 376 if (ret < 0) { 375 377 qmi_txn_cancel(&txn); 376 378 return ret; ··· 417 415 if (ret < 0) 418 416 goto out; 419 417 420 - for (i = domains_read; i < resp->domain_list_len; i++) { 418 + for (i = 0; i < resp->domain_list_len; i++) { 421 419 entry = &resp->domain_list[i]; 422 420 423 421 if (strnlen(entry->name, sizeof(entry->name)) == sizeof(entry->name))
+23 -295
drivers/soc/qcom/pdr_internal.h
··· 13 13 #define SERVREG_SET_ACK_REQ 0x23 14 14 #define SERVREG_RESTART_PD_REQ 0x24 15 15 16 + #define SERVREG_LOC_PFR_REQ 0x24 17 + 16 18 #define SERVREG_DOMAIN_LIST_LENGTH 32 17 19 #define SERVREG_RESTART_PD_REQ_MAX_LEN 67 18 20 #define SERVREG_REGISTER_LISTENER_REQ_LEN 71 ··· 22 20 #define SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN 74 23 21 #define SERVREG_STATE_UPDATED_IND_MAX_LEN 79 24 22 #define SERVREG_GET_DOMAIN_LIST_RESP_MAX_LEN 2389 23 + #define SERVREG_LOC_PFR_RESP_MAX_LEN 10 25 24 26 25 struct servreg_location_entry { 27 26 char name[SERVREG_NAME_LENGTH + 1]; ··· 31 28 u32 instance; 32 29 }; 33 30 34 - static const struct qmi_elem_info servreg_location_entry_ei[] = { 35 - { 36 - .data_type = QMI_STRING, 37 - .elem_len = SERVREG_NAME_LENGTH + 1, 38 - .elem_size = sizeof(char), 39 - .array_type = NO_ARRAY, 40 - .tlv_type = 0, 41 - .offset = offsetof(struct servreg_location_entry, 42 - name), 43 - }, 44 - { 45 - .data_type = QMI_UNSIGNED_4_BYTE, 46 - .elem_len = 1, 47 - .elem_size = sizeof(u32), 48 - .array_type = NO_ARRAY, 49 - .tlv_type = 0, 50 - .offset = offsetof(struct servreg_location_entry, 51 - instance), 52 - }, 53 - { 54 - .data_type = QMI_UNSIGNED_1_BYTE, 55 - .elem_len = 1, 56 - .elem_size = sizeof(u8), 57 - .array_type = NO_ARRAY, 58 - .tlv_type = 0, 59 - .offset = offsetof(struct servreg_location_entry, 60 - service_data_valid), 61 - }, 62 - { 63 - .data_type = QMI_UNSIGNED_4_BYTE, 64 - .elem_len = 1, 65 - .elem_size = sizeof(u32), 66 - .array_type = NO_ARRAY, 67 - .tlv_type = 0, 68 - .offset = offsetof(struct servreg_location_entry, 69 - service_data), 70 - }, 71 - {} 72 - }; 73 - 74 31 struct servreg_get_domain_list_req { 75 32 char service_name[SERVREG_NAME_LENGTH + 1]; 76 33 u8 domain_offset_valid; 77 34 u32 domain_offset; 78 - }; 79 - 80 - static const struct qmi_elem_info servreg_get_domain_list_req_ei[] = { 81 - { 82 - .data_type = QMI_STRING, 83 - .elem_len = SERVREG_NAME_LENGTH + 1, 84 - .elem_size = sizeof(char), 85 - .array_type = NO_ARRAY, 86 - .tlv_type = 0x01, 87 - .offset = offsetof(struct servreg_get_domain_list_req, 88 - service_name), 89 - }, 90 - { 91 - .data_type = QMI_OPT_FLAG, 92 - .elem_len = 1, 93 - .elem_size = sizeof(u8), 94 - .array_type = NO_ARRAY, 95 - .tlv_type = 0x10, 96 - .offset = offsetof(struct servreg_get_domain_list_req, 97 - domain_offset_valid), 98 - }, 99 - { 100 - .data_type = QMI_UNSIGNED_4_BYTE, 101 - .elem_len = 1, 102 - .elem_size = sizeof(u32), 103 - .array_type = NO_ARRAY, 104 - .tlv_type = 0x10, 105 - .offset = offsetof(struct servreg_get_domain_list_req, 106 - domain_offset), 107 - }, 108 - {} 109 35 }; 110 36 111 37 struct servreg_get_domain_list_resp { ··· 48 116 struct servreg_location_entry domain_list[SERVREG_DOMAIN_LIST_LENGTH]; 49 117 }; 50 118 51 - static const struct qmi_elem_info servreg_get_domain_list_resp_ei[] = { 52 - { 53 - .data_type = QMI_STRUCT, 54 - .elem_len = 1, 55 - .elem_size = sizeof(struct qmi_response_type_v01), 56 - .array_type = NO_ARRAY, 57 - .tlv_type = 0x02, 58 - .offset = offsetof(struct servreg_get_domain_list_resp, 59 - resp), 60 - .ei_array = qmi_response_type_v01_ei, 61 - }, 62 - { 63 - .data_type = QMI_OPT_FLAG, 64 - .elem_len = 1, 65 - .elem_size = sizeof(u8), 66 - .array_type = NO_ARRAY, 67 - .tlv_type = 0x10, 68 - .offset = offsetof(struct servreg_get_domain_list_resp, 69 - total_domains_valid), 70 - }, 71 - { 72 - .data_type = QMI_UNSIGNED_2_BYTE, 73 - .elem_len = 1, 74 - .elem_size = sizeof(u16), 75 - .array_type = NO_ARRAY, 76 - .tlv_type = 0x10, 77 - .offset = offsetof(struct servreg_get_domain_list_resp, 78 - total_domains), 79 - }, 80 - { 81 - .data_type = QMI_OPT_FLAG, 82 - .elem_len = 1, 83 - .elem_size = sizeof(u8), 84 - .array_type = NO_ARRAY, 85 - .tlv_type = 0x11, 86 - .offset = offsetof(struct servreg_get_domain_list_resp, 87 - db_rev_count_valid), 88 - }, 89 - { 90 - .data_type = QMI_UNSIGNED_2_BYTE, 91 - .elem_len = 1, 92 - .elem_size = sizeof(u16), 93 - .array_type = NO_ARRAY, 94 - .tlv_type = 0x11, 95 - .offset = offsetof(struct servreg_get_domain_list_resp, 96 - db_rev_count), 97 - }, 98 - { 99 - .data_type = QMI_OPT_FLAG, 100 - .elem_len = 1, 101 - .elem_size = sizeof(u8), 102 - .array_type = NO_ARRAY, 103 - .tlv_type = 0x12, 104 - .offset = offsetof(struct servreg_get_domain_list_resp, 105 - domain_list_valid), 106 - }, 107 - { 108 - .data_type = QMI_DATA_LEN, 109 - .elem_len = 1, 110 - .elem_size = sizeof(u8), 111 - .array_type = NO_ARRAY, 112 - .tlv_type = 0x12, 113 - .offset = offsetof(struct servreg_get_domain_list_resp, 114 - domain_list_len), 115 - }, 116 - { 117 - .data_type = QMI_STRUCT, 118 - .elem_len = SERVREG_DOMAIN_LIST_LENGTH, 119 - .elem_size = sizeof(struct servreg_location_entry), 120 - .array_type = VAR_LEN_ARRAY, 121 - .tlv_type = 0x12, 122 - .offset = offsetof(struct servreg_get_domain_list_resp, 123 - domain_list), 124 - .ei_array = servreg_location_entry_ei, 125 - }, 126 - {} 127 - }; 128 - 129 119 struct servreg_register_listener_req { 130 120 u8 enable; 131 121 char service_path[SERVREG_NAME_LENGTH + 1]; 132 - }; 133 - 134 - static const struct qmi_elem_info servreg_register_listener_req_ei[] = { 135 - { 136 - .data_type = QMI_UNSIGNED_1_BYTE, 137 - .elem_len = 1, 138 - .elem_size = sizeof(u8), 139 - .array_type = NO_ARRAY, 140 - .tlv_type = 0x01, 141 - .offset = offsetof(struct servreg_register_listener_req, 142 - enable), 143 - }, 144 - { 145 - .data_type = QMI_STRING, 146 - .elem_len = SERVREG_NAME_LENGTH + 1, 147 - .elem_size = sizeof(char), 148 - .array_type = NO_ARRAY, 149 - .tlv_type = 0x02, 150 - .offset = offsetof(struct servreg_register_listener_req, 151 - service_path), 152 - }, 153 - {} 154 122 }; 155 123 156 124 struct servreg_register_listener_resp { ··· 59 227 enum servreg_service_state curr_state; 60 228 }; 61 229 62 - static const struct qmi_elem_info servreg_register_listener_resp_ei[] = { 63 - { 64 - .data_type = QMI_STRUCT, 65 - .elem_len = 1, 66 - .elem_size = sizeof(struct qmi_response_type_v01), 67 - .array_type = NO_ARRAY, 68 - .tlv_type = 0x02, 69 - .offset = offsetof(struct servreg_register_listener_resp, 70 - resp), 71 - .ei_array = qmi_response_type_v01_ei, 72 - }, 73 - { 74 - .data_type = QMI_OPT_FLAG, 75 - .elem_len = 1, 76 - .elem_size = sizeof(u8), 77 - .array_type = NO_ARRAY, 78 - .tlv_type = 0x10, 79 - .offset = offsetof(struct servreg_register_listener_resp, 80 - curr_state_valid), 81 - }, 82 - { 83 - .data_type = QMI_SIGNED_4_BYTE_ENUM, 84 - .elem_len = 1, 85 - .elem_size = sizeof(enum servreg_service_state), 86 - .array_type = NO_ARRAY, 87 - .tlv_type = 0x10, 88 - .offset = offsetof(struct servreg_register_listener_resp, 89 - curr_state), 90 - }, 91 - {} 92 - }; 93 - 94 230 struct servreg_restart_pd_req { 95 231 char service_path[SERVREG_NAME_LENGTH + 1]; 96 232 }; 97 233 98 - static const struct qmi_elem_info servreg_restart_pd_req_ei[] = { 99 - { 100 - .data_type = QMI_STRING, 101 - .elem_len = SERVREG_NAME_LENGTH + 1, 102 - .elem_size = sizeof(char), 103 - .array_type = NO_ARRAY, 104 - .tlv_type = 0x01, 105 - .offset = offsetof(struct servreg_restart_pd_req, 106 - service_path), 107 - }, 108 - {} 109 - }; 110 - 111 234 struct servreg_restart_pd_resp { 112 235 struct qmi_response_type_v01 resp; 113 - }; 114 - 115 - static const struct qmi_elem_info servreg_restart_pd_resp_ei[] = { 116 - { 117 - .data_type = QMI_STRUCT, 118 - .elem_len = 1, 119 - .elem_size = sizeof(struct qmi_response_type_v01), 120 - .array_type = NO_ARRAY, 121 - .tlv_type = 0x02, 122 - .offset = offsetof(struct servreg_restart_pd_resp, 123 - resp), 124 - .ei_array = qmi_response_type_v01_ei, 125 - }, 126 - {} 127 236 }; 128 237 129 238 struct servreg_state_updated_ind { ··· 73 300 u16 transaction_id; 74 301 }; 75 302 76 - static const struct qmi_elem_info servreg_state_updated_ind_ei[] = { 77 - { 78 - .data_type = QMI_SIGNED_4_BYTE_ENUM, 79 - .elem_len = 1, 80 - .elem_size = sizeof(u32), 81 - .array_type = NO_ARRAY, 82 - .tlv_type = 0x01, 83 - .offset = offsetof(struct servreg_state_updated_ind, 84 - curr_state), 85 - }, 86 - { 87 - .data_type = QMI_STRING, 88 - .elem_len = SERVREG_NAME_LENGTH + 1, 89 - .elem_size = sizeof(char), 90 - .array_type = NO_ARRAY, 91 - .tlv_type = 0x02, 92 - .offset = offsetof(struct servreg_state_updated_ind, 93 - service_path), 94 - }, 95 - { 96 - .data_type = QMI_UNSIGNED_2_BYTE, 97 - .elem_len = 1, 98 - .elem_size = sizeof(u16), 99 - .array_type = NO_ARRAY, 100 - .tlv_type = 0x03, 101 - .offset = offsetof(struct servreg_state_updated_ind, 102 - transaction_id), 103 - }, 104 - {} 105 - }; 106 - 107 303 struct servreg_set_ack_req { 108 304 char service_path[SERVREG_NAME_LENGTH + 1]; 109 305 u16 transaction_id; 110 - }; 111 - 112 - static const struct qmi_elem_info servreg_set_ack_req_ei[] = { 113 - { 114 - .data_type = QMI_STRING, 115 - .elem_len = SERVREG_NAME_LENGTH + 1, 116 - .elem_size = sizeof(char), 117 - .array_type = NO_ARRAY, 118 - .tlv_type = 0x01, 119 - .offset = offsetof(struct servreg_set_ack_req, 120 - service_path), 121 - }, 122 - { 123 - .data_type = QMI_UNSIGNED_2_BYTE, 124 - .elem_len = 1, 125 - .elem_size = sizeof(u16), 126 - .array_type = NO_ARRAY, 127 - .tlv_type = 0x02, 128 - .offset = offsetof(struct servreg_set_ack_req, 129 - transaction_id), 130 - }, 131 - {} 132 306 }; 133 307 134 308 struct servreg_set_ack_resp { 135 309 struct qmi_response_type_v01 resp; 136 310 }; 137 311 138 - static const struct qmi_elem_info servreg_set_ack_resp_ei[] = { 139 - { 140 - .data_type = QMI_STRUCT, 141 - .elem_len = 1, 142 - .elem_size = sizeof(struct qmi_response_type_v01), 143 - .array_type = NO_ARRAY, 144 - .tlv_type = 0x02, 145 - .offset = offsetof(struct servreg_set_ack_resp, 146 - resp), 147 - .ei_array = qmi_response_type_v01_ei, 148 - }, 149 - {} 312 + struct servreg_loc_pfr_req { 313 + char service[SERVREG_NAME_LENGTH + 1]; 314 + char reason[257]; 150 315 }; 316 + 317 + struct servreg_loc_pfr_resp { 318 + struct qmi_response_type_v01 rsp; 319 + }; 320 + 321 + extern const struct qmi_elem_info servreg_location_entry_ei[]; 322 + extern const struct qmi_elem_info servreg_get_domain_list_req_ei[]; 323 + extern const struct qmi_elem_info servreg_get_domain_list_resp_ei[]; 324 + extern const struct qmi_elem_info servreg_register_listener_req_ei[]; 325 + extern const struct qmi_elem_info servreg_register_listener_resp_ei[]; 326 + extern const struct qmi_elem_info servreg_restart_pd_req_ei[]; 327 + extern const struct qmi_elem_info servreg_restart_pd_resp_ei[]; 328 + extern const struct qmi_elem_info servreg_state_updated_ind_ei[]; 329 + extern const struct qmi_elem_info servreg_set_ack_req_ei[]; 330 + extern const struct qmi_elem_info servreg_set_ack_resp_ei[]; 331 + extern const struct qmi_elem_info servreg_loc_pfr_req_ei[]; 332 + extern const struct qmi_elem_info servreg_loc_pfr_resp_ei[]; 151 333 152 334 #endif
+11 -2
drivers/soc/qcom/pmic_glink.c
··· 369 369 370 370 static int pmic_glink_init(void) 371 371 { 372 - platform_driver_register(&pmic_glink_driver); 373 - register_rpmsg_driver(&pmic_glink_rpmsg_driver); 372 + int ret; 373 + 374 + ret = platform_driver_register(&pmic_glink_driver); 375 + if (ret < 0) 376 + return ret; 377 + 378 + ret = register_rpmsg_driver(&pmic_glink_rpmsg_driver); 379 + if (ret < 0) { 380 + platform_driver_unregister(&pmic_glink_driver); 381 + return ret; 382 + } 374 383 375 384 return 0; 376 385 }
+677
drivers/soc/qcom/qcom_pd_mapper.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Qualcomm Protection Domain mapper 4 + * 5 + * Copyright (c) 2023 Linaro Ltd. 6 + */ 7 + 8 + #include <linux/auxiliary_bus.h> 9 + #include <linux/kernel.h> 10 + #include <linux/mod_devicetable.h> 11 + #include <linux/module.h> 12 + #include <linux/of.h> 13 + #include <linux/refcount.h> 14 + #include <linux/slab.h> 15 + #include <linux/soc/qcom/qmi.h> 16 + 17 + #include "pdr_internal.h" 18 + 19 + #define SERVREG_QMI_VERSION 0x101 20 + #define SERVREG_QMI_INSTANCE 0 21 + 22 + #define TMS_SERVREG_SERVICE "tms/servreg" 23 + 24 + struct qcom_pdm_domain_data { 25 + const char *domain; 26 + u32 instance_id; 27 + /* NULL-terminated array */ 28 + const char * services[]; 29 + }; 30 + 31 + struct qcom_pdm_domain { 32 + struct list_head list; 33 + const char *name; 34 + u32 instance_id; 35 + }; 36 + 37 + struct qcom_pdm_service { 38 + struct list_head list; 39 + struct list_head domains; 40 + const char *name; 41 + }; 42 + 43 + struct qcom_pdm_data { 44 + refcount_t refcnt; 45 + struct qmi_handle handle; 46 + struct list_head services; 47 + }; 48 + 49 + static DEFINE_MUTEX(qcom_pdm_mutex); /* protects __qcom_pdm_data */ 50 + static struct qcom_pdm_data *__qcom_pdm_data; 51 + 52 + static struct qcom_pdm_service *qcom_pdm_find(struct qcom_pdm_data *data, 53 + const char *name) 54 + { 55 + struct qcom_pdm_service *service; 56 + 57 + list_for_each_entry(service, &data->services, list) { 58 + if (!strcmp(service->name, name)) 59 + return service; 60 + } 61 + 62 + return NULL; 63 + } 64 + 65 + static int qcom_pdm_add_service_domain(struct qcom_pdm_data *data, 66 + const char *service_name, 67 + const char *domain_name, 68 + u32 instance_id) 69 + { 70 + struct qcom_pdm_service *service; 71 + struct qcom_pdm_domain *domain; 72 + 73 + service = qcom_pdm_find(data, service_name); 74 + if (service) { 75 + list_for_each_entry(domain, &service->domains, list) { 76 + if (!strcmp(domain->name, domain_name)) 77 + return -EBUSY; 78 + } 79 + } else { 80 + service = kzalloc(sizeof(*service), GFP_KERNEL); 81 + if (!service) 82 + return -ENOMEM; 83 + 84 + INIT_LIST_HEAD(&service->domains); 85 + service->name = service_name; 86 + 87 + list_add_tail(&service->list, &data->services); 88 + } 89 + 90 + domain = kzalloc(sizeof(*domain), GFP_KERNEL); 91 + if (!domain) { 92 + if (list_empty(&service->domains)) { 93 + list_del(&service->list); 94 + kfree(service); 95 + } 96 + 97 + return -ENOMEM; 98 + } 99 + 100 + domain->name = domain_name; 101 + domain->instance_id = instance_id; 102 + list_add_tail(&domain->list, &service->domains); 103 + 104 + return 0; 105 + } 106 + 107 + static int qcom_pdm_add_domain(struct qcom_pdm_data *data, 108 + const struct qcom_pdm_domain_data *domain) 109 + { 110 + int ret; 111 + int i; 112 + 113 + ret = qcom_pdm_add_service_domain(data, 114 + TMS_SERVREG_SERVICE, 115 + domain->domain, 116 + domain->instance_id); 117 + if (ret) 118 + return ret; 119 + 120 + for (i = 0; domain->services[i]; i++) { 121 + ret = qcom_pdm_add_service_domain(data, 122 + domain->services[i], 123 + domain->domain, 124 + domain->instance_id); 125 + if (ret) 126 + return ret; 127 + } 128 + 129 + return 0; 130 + 131 + } 132 + 133 + static void qcom_pdm_free_domains(struct qcom_pdm_data *data) 134 + { 135 + struct qcom_pdm_service *service, *tservice; 136 + struct qcom_pdm_domain *domain, *tdomain; 137 + 138 + list_for_each_entry_safe(service, tservice, &data->services, list) { 139 + list_for_each_entry_safe(domain, tdomain, &service->domains, list) { 140 + list_del(&domain->list); 141 + kfree(domain); 142 + } 143 + 144 + list_del(&service->list); 145 + kfree(service); 146 + } 147 + } 148 + 149 + static void qcom_pdm_get_domain_list(struct qmi_handle *qmi, 150 + struct sockaddr_qrtr *sq, 151 + struct qmi_txn *txn, 152 + const void *decoded) 153 + { 154 + struct qcom_pdm_data *data = container_of(qmi, struct qcom_pdm_data, handle); 155 + const struct servreg_get_domain_list_req *req = decoded; 156 + struct servreg_get_domain_list_resp *rsp; 157 + struct qcom_pdm_service *service; 158 + u32 offset; 159 + int ret; 160 + 161 + rsp = kzalloc(sizeof(*rsp), GFP_KERNEL); 162 + if (!rsp) 163 + return; 164 + 165 + offset = req->domain_offset_valid ? req->domain_offset : 0; 166 + 167 + rsp->resp.result = QMI_RESULT_SUCCESS_V01; 168 + rsp->resp.error = QMI_ERR_NONE_V01; 169 + 170 + rsp->db_rev_count_valid = true; 171 + rsp->db_rev_count = 1; 172 + 173 + rsp->total_domains_valid = true; 174 + rsp->total_domains = 0; 175 + 176 + mutex_lock(&qcom_pdm_mutex); 177 + 178 + service = qcom_pdm_find(data, req->service_name); 179 + if (service) { 180 + struct qcom_pdm_domain *domain; 181 + 182 + rsp->domain_list_valid = true; 183 + rsp->domain_list_len = 0; 184 + 185 + list_for_each_entry(domain, &service->domains, list) { 186 + u32 i = rsp->total_domains++; 187 + 188 + if (i >= offset && i < SERVREG_DOMAIN_LIST_LENGTH) { 189 + u32 j = rsp->domain_list_len++; 190 + 191 + strscpy(rsp->domain_list[j].name, domain->name, 192 + sizeof(rsp->domain_list[i].name)); 193 + rsp->domain_list[j].instance = domain->instance_id; 194 + 195 + pr_debug("PDM: found %s / %d\n", domain->name, 196 + domain->instance_id); 197 + } 198 + } 199 + } 200 + 201 + pr_debug("PDM: service '%s' offset %d returning %d domains (of %d)\n", req->service_name, 202 + req->domain_offset_valid ? req->domain_offset : -1, rsp->domain_list_len, rsp->total_domains); 203 + 204 + ret = qmi_send_response(qmi, sq, txn, SERVREG_GET_DOMAIN_LIST_REQ, 205 + SERVREG_GET_DOMAIN_LIST_RESP_MAX_LEN, 206 + servreg_get_domain_list_resp_ei, rsp); 207 + if (ret) 208 + pr_err("Error sending servreg response: %d\n", ret); 209 + 210 + mutex_unlock(&qcom_pdm_mutex); 211 + 212 + kfree(rsp); 213 + } 214 + 215 + static void qcom_pdm_pfr(struct qmi_handle *qmi, 216 + struct sockaddr_qrtr *sq, 217 + struct qmi_txn *txn, 218 + const void *decoded) 219 + { 220 + const struct servreg_loc_pfr_req *req = decoded; 221 + struct servreg_loc_pfr_resp rsp = {}; 222 + int ret; 223 + 224 + pr_warn_ratelimited("PDM: service '%s' crash: '%s'\n", req->service, req->reason); 225 + 226 + rsp.rsp.result = QMI_RESULT_SUCCESS_V01; 227 + rsp.rsp.error = QMI_ERR_NONE_V01; 228 + 229 + ret = qmi_send_response(qmi, sq, txn, SERVREG_LOC_PFR_REQ, 230 + SERVREG_LOC_PFR_RESP_MAX_LEN, 231 + servreg_loc_pfr_resp_ei, &rsp); 232 + if (ret) 233 + pr_err("Error sending servreg response: %d\n", ret); 234 + } 235 + 236 + static const struct qmi_msg_handler qcom_pdm_msg_handlers[] = { 237 + { 238 + .type = QMI_REQUEST, 239 + .msg_id = SERVREG_GET_DOMAIN_LIST_REQ, 240 + .ei = servreg_get_domain_list_req_ei, 241 + .decoded_size = sizeof(struct servreg_get_domain_list_req), 242 + .fn = qcom_pdm_get_domain_list, 243 + }, 244 + { 245 + .type = QMI_REQUEST, 246 + .msg_id = SERVREG_LOC_PFR_REQ, 247 + .ei = servreg_loc_pfr_req_ei, 248 + .decoded_size = sizeof(struct servreg_loc_pfr_req), 249 + .fn = qcom_pdm_pfr, 250 + }, 251 + { }, 252 + }; 253 + 254 + static const struct qcom_pdm_domain_data adsp_audio_pd = { 255 + .domain = "msm/adsp/audio_pd", 256 + .instance_id = 74, 257 + .services = { 258 + "avs/audio", 259 + NULL, 260 + }, 261 + }; 262 + 263 + static const struct qcom_pdm_domain_data adsp_charger_pd = { 264 + .domain = "msm/adsp/charger_pd", 265 + .instance_id = 74, 266 + .services = { NULL }, 267 + }; 268 + 269 + static const struct qcom_pdm_domain_data adsp_root_pd = { 270 + .domain = "msm/adsp/root_pd", 271 + .instance_id = 74, 272 + .services = { NULL }, 273 + }; 274 + 275 + static const struct qcom_pdm_domain_data adsp_root_pd_pdr = { 276 + .domain = "msm/adsp/root_pd", 277 + .instance_id = 74, 278 + .services = { 279 + "tms/pdr_enabled", 280 + NULL, 281 + }, 282 + }; 283 + 284 + static const struct qcom_pdm_domain_data adsp_sensor_pd = { 285 + .domain = "msm/adsp/sensor_pd", 286 + .instance_id = 74, 287 + .services = { NULL }, 288 + }; 289 + 290 + static const struct qcom_pdm_domain_data msm8996_adsp_audio_pd = { 291 + .domain = "msm/adsp/audio_pd", 292 + .instance_id = 4, 293 + .services = { NULL }, 294 + }; 295 + 296 + static const struct qcom_pdm_domain_data msm8996_adsp_root_pd = { 297 + .domain = "msm/adsp/root_pd", 298 + .instance_id = 4, 299 + .services = { NULL }, 300 + }; 301 + 302 + static const struct qcom_pdm_domain_data cdsp_root_pd = { 303 + .domain = "msm/cdsp/root_pd", 304 + .instance_id = 76, 305 + .services = { NULL }, 306 + }; 307 + 308 + static const struct qcom_pdm_domain_data slpi_root_pd = { 309 + .domain = "msm/slpi/root_pd", 310 + .instance_id = 90, 311 + .services = { NULL }, 312 + }; 313 + 314 + static const struct qcom_pdm_domain_data slpi_sensor_pd = { 315 + .domain = "msm/slpi/sensor_pd", 316 + .instance_id = 90, 317 + .services = { NULL }, 318 + }; 319 + 320 + static const struct qcom_pdm_domain_data mpss_root_pd = { 321 + .domain = "msm/modem/root_pd", 322 + .instance_id = 180, 323 + .services = { 324 + NULL, 325 + }, 326 + }; 327 + 328 + static const struct qcom_pdm_domain_data mpss_root_pd_gps = { 329 + .domain = "msm/modem/root_pd", 330 + .instance_id = 180, 331 + .services = { 332 + "gps/gps_service", 333 + NULL, 334 + }, 335 + }; 336 + 337 + static const struct qcom_pdm_domain_data mpss_root_pd_gps_pdr = { 338 + .domain = "msm/modem/root_pd", 339 + .instance_id = 180, 340 + .services = { 341 + "gps/gps_service", 342 + "tms/pdr_enabled", 343 + NULL, 344 + }, 345 + }; 346 + 347 + static const struct qcom_pdm_domain_data msm8996_mpss_root_pd = { 348 + .domain = "msm/modem/root_pd", 349 + .instance_id = 100, 350 + .services = { NULL }, 351 + }; 352 + 353 + static const struct qcom_pdm_domain_data mpss_wlan_pd = { 354 + .domain = "msm/modem/wlan_pd", 355 + .instance_id = 180, 356 + .services = { 357 + "kernel/elf_loader", 358 + "wlan/fw", 359 + NULL, 360 + }, 361 + }; 362 + 363 + static const struct qcom_pdm_domain_data *msm8996_domains[] = { 364 + &msm8996_adsp_audio_pd, 365 + &msm8996_adsp_root_pd, 366 + &msm8996_mpss_root_pd, 367 + NULL, 368 + }; 369 + 370 + static const struct qcom_pdm_domain_data *msm8998_domains[] = { 371 + &mpss_root_pd, 372 + &mpss_wlan_pd, 373 + NULL, 374 + }; 375 + 376 + static const struct qcom_pdm_domain_data *qcm2290_domains[] = { 377 + &adsp_audio_pd, 378 + &adsp_root_pd, 379 + &adsp_sensor_pd, 380 + &mpss_root_pd_gps, 381 + &mpss_wlan_pd, 382 + NULL, 383 + }; 384 + 385 + static const struct qcom_pdm_domain_data *qcs404_domains[] = { 386 + &adsp_audio_pd, 387 + &adsp_root_pd, 388 + &adsp_sensor_pd, 389 + &cdsp_root_pd, 390 + &mpss_root_pd, 391 + &mpss_wlan_pd, 392 + NULL, 393 + }; 394 + 395 + static const struct qcom_pdm_domain_data *sc7180_domains[] = { 396 + &adsp_audio_pd, 397 + &adsp_root_pd_pdr, 398 + &adsp_sensor_pd, 399 + &mpss_root_pd_gps_pdr, 400 + &mpss_wlan_pd, 401 + NULL, 402 + }; 403 + 404 + static const struct qcom_pdm_domain_data *sc7280_domains[] = { 405 + &adsp_audio_pd, 406 + &adsp_root_pd_pdr, 407 + &adsp_charger_pd, 408 + &adsp_sensor_pd, 409 + &cdsp_root_pd, 410 + &mpss_root_pd_gps_pdr, 411 + NULL, 412 + }; 413 + 414 + static const struct qcom_pdm_domain_data *sc8180x_domains[] = { 415 + &adsp_audio_pd, 416 + &adsp_root_pd, 417 + &adsp_charger_pd, 418 + &cdsp_root_pd, 419 + &mpss_root_pd_gps, 420 + &mpss_wlan_pd, 421 + NULL, 422 + }; 423 + 424 + static const struct qcom_pdm_domain_data *sc8280xp_domains[] = { 425 + &adsp_audio_pd, 426 + &adsp_root_pd_pdr, 427 + &adsp_charger_pd, 428 + &cdsp_root_pd, 429 + NULL, 430 + }; 431 + 432 + static const struct qcom_pdm_domain_data *sdm660_domains[] = { 433 + &adsp_audio_pd, 434 + &adsp_root_pd, 435 + &adsp_sensor_pd, 436 + &cdsp_root_pd, 437 + &mpss_root_pd, 438 + &mpss_wlan_pd, 439 + NULL, 440 + }; 441 + 442 + static const struct qcom_pdm_domain_data *sdm670_domains[] = { 443 + &adsp_audio_pd, 444 + &adsp_root_pd, 445 + &cdsp_root_pd, 446 + &mpss_root_pd, 447 + &mpss_wlan_pd, 448 + NULL, 449 + }; 450 + 451 + static const struct qcom_pdm_domain_data *sdm845_domains[] = { 452 + &adsp_audio_pd, 453 + &adsp_root_pd, 454 + &cdsp_root_pd, 455 + &mpss_root_pd, 456 + &mpss_wlan_pd, 457 + &slpi_root_pd, 458 + &slpi_sensor_pd, 459 + NULL, 460 + }; 461 + 462 + static const struct qcom_pdm_domain_data *sm6115_domains[] = { 463 + &adsp_audio_pd, 464 + &adsp_root_pd, 465 + &adsp_sensor_pd, 466 + &cdsp_root_pd, 467 + &mpss_root_pd_gps, 468 + &mpss_wlan_pd, 469 + NULL, 470 + }; 471 + 472 + static const struct qcom_pdm_domain_data *sm6350_domains[] = { 473 + &adsp_audio_pd, 474 + &adsp_root_pd, 475 + &adsp_sensor_pd, 476 + &cdsp_root_pd, 477 + &mpss_wlan_pd, 478 + NULL, 479 + }; 480 + 481 + static const struct qcom_pdm_domain_data *sm8150_domains[] = { 482 + &adsp_audio_pd, 483 + &adsp_root_pd, 484 + &cdsp_root_pd, 485 + &mpss_root_pd_gps, 486 + &mpss_wlan_pd, 487 + NULL, 488 + }; 489 + 490 + static const struct qcom_pdm_domain_data *sm8250_domains[] = { 491 + &adsp_audio_pd, 492 + &adsp_root_pd, 493 + &cdsp_root_pd, 494 + &slpi_root_pd, 495 + &slpi_sensor_pd, 496 + NULL, 497 + }; 498 + 499 + static const struct qcom_pdm_domain_data *sm8350_domains[] = { 500 + &adsp_audio_pd, 501 + &adsp_root_pd_pdr, 502 + &adsp_charger_pd, 503 + &cdsp_root_pd, 504 + &mpss_root_pd_gps, 505 + &slpi_root_pd, 506 + &slpi_sensor_pd, 507 + NULL, 508 + }; 509 + 510 + static const struct qcom_pdm_domain_data *sm8550_domains[] = { 511 + &adsp_audio_pd, 512 + &adsp_root_pd, 513 + &adsp_charger_pd, 514 + &adsp_sensor_pd, 515 + &cdsp_root_pd, 516 + &mpss_root_pd_gps, 517 + NULL, 518 + }; 519 + 520 + static const struct of_device_id qcom_pdm_domains[] = { 521 + { .compatible = "qcom,apq8064", .data = NULL, }, 522 + { .compatible = "qcom,apq8074", .data = NULL, }, 523 + { .compatible = "qcom,apq8084", .data = NULL, }, 524 + { .compatible = "qcom,apq8096", .data = msm8996_domains, }, 525 + { .compatible = "qcom,msm8226", .data = NULL, }, 526 + { .compatible = "qcom,msm8974", .data = NULL, }, 527 + { .compatible = "qcom,msm8996", .data = msm8996_domains, }, 528 + { .compatible = "qcom,msm8998", .data = msm8998_domains, }, 529 + { .compatible = "qcom,qcm2290", .data = qcm2290_domains, }, 530 + { .compatible = "qcom,qcs404", .data = qcs404_domains, }, 531 + { .compatible = "qcom,sc7180", .data = sc7180_domains, }, 532 + { .compatible = "qcom,sc7280", .data = sc7280_domains, }, 533 + { .compatible = "qcom,sc8180x", .data = sc8180x_domains, }, 534 + { .compatible = "qcom,sc8280xp", .data = sc8280xp_domains, }, 535 + { .compatible = "qcom,sda660", .data = sdm660_domains, }, 536 + { .compatible = "qcom,sdm660", .data = sdm660_domains, }, 537 + { .compatible = "qcom,sdm670", .data = sdm670_domains, }, 538 + { .compatible = "qcom,sdm845", .data = sdm845_domains, }, 539 + { .compatible = "qcom,sm4250", .data = sm6115_domains, }, 540 + { .compatible = "qcom,sm6115", .data = sm6115_domains, }, 541 + { .compatible = "qcom,sm6350", .data = sm6350_domains, }, 542 + { .compatible = "qcom,sm8150", .data = sm8150_domains, }, 543 + { .compatible = "qcom,sm8250", .data = sm8250_domains, }, 544 + { .compatible = "qcom,sm8350", .data = sm8350_domains, }, 545 + { .compatible = "qcom,sm8450", .data = sm8350_domains, }, 546 + { .compatible = "qcom,sm8550", .data = sm8550_domains, }, 547 + { .compatible = "qcom,sm8650", .data = sm8550_domains, }, 548 + {}, 549 + }; 550 + 551 + static void qcom_pdm_stop(struct qcom_pdm_data *data) 552 + { 553 + qcom_pdm_free_domains(data); 554 + 555 + /* The server is removed automatically */ 556 + qmi_handle_release(&data->handle); 557 + 558 + kfree(data); 559 + } 560 + 561 + static struct qcom_pdm_data *qcom_pdm_start(void) 562 + { 563 + const struct qcom_pdm_domain_data * const *domains; 564 + const struct of_device_id *match; 565 + struct qcom_pdm_data *data; 566 + struct device_node *root; 567 + int ret, i; 568 + 569 + root = of_find_node_by_path("/"); 570 + if (!root) 571 + return ERR_PTR(-ENODEV); 572 + 573 + match = of_match_node(qcom_pdm_domains, root); 574 + of_node_put(root); 575 + if (!match) { 576 + pr_notice("PDM: no support for the platform, userspace daemon might be required.\n"); 577 + return ERR_PTR(-ENODEV); 578 + } 579 + 580 + domains = match->data; 581 + if (!domains) { 582 + pr_debug("PDM: no domains\n"); 583 + return ERR_PTR(-ENODEV); 584 + } 585 + 586 + data = kzalloc(sizeof(*data), GFP_KERNEL); 587 + if (!data) 588 + return ERR_PTR(-ENOMEM); 589 + 590 + INIT_LIST_HEAD(&data->services); 591 + 592 + ret = qmi_handle_init(&data->handle, SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN, 593 + NULL, qcom_pdm_msg_handlers); 594 + if (ret) { 595 + kfree(data); 596 + return ERR_PTR(ret); 597 + } 598 + 599 + refcount_set(&data->refcnt, 1); 600 + 601 + for (i = 0; domains[i]; i++) { 602 + ret = qcom_pdm_add_domain(data, domains[i]); 603 + if (ret) 604 + goto err_stop; 605 + } 606 + 607 + ret = qmi_add_server(&data->handle, SERVREG_LOCATOR_SERVICE, 608 + SERVREG_QMI_VERSION, SERVREG_QMI_INSTANCE); 609 + if (ret) { 610 + pr_err("PDM: error adding server %d\n", ret); 611 + goto err_stop; 612 + } 613 + 614 + return data; 615 + 616 + err_stop: 617 + qcom_pdm_stop(data); 618 + 619 + return ERR_PTR(ret); 620 + } 621 + 622 + static int qcom_pdm_probe(struct auxiliary_device *auxdev, 623 + const struct auxiliary_device_id *id) 624 + 625 + { 626 + struct qcom_pdm_data *data; 627 + int ret = 0; 628 + 629 + mutex_lock(&qcom_pdm_mutex); 630 + 631 + if (!__qcom_pdm_data) { 632 + data = qcom_pdm_start(); 633 + 634 + if (IS_ERR(data)) 635 + ret = PTR_ERR(data); 636 + else 637 + __qcom_pdm_data = data; 638 + } 639 + 640 + auxiliary_set_drvdata(auxdev, __qcom_pdm_data); 641 + 642 + mutex_unlock(&qcom_pdm_mutex); 643 + 644 + return ret; 645 + } 646 + 647 + static void qcom_pdm_remove(struct auxiliary_device *auxdev) 648 + { 649 + struct qcom_pdm_data *data; 650 + 651 + data = auxiliary_get_drvdata(auxdev); 652 + if (!data) 653 + return; 654 + 655 + if (refcount_dec_and_mutex_lock(&data->refcnt, &qcom_pdm_mutex)) { 656 + __qcom_pdm_data = NULL; 657 + qcom_pdm_stop(data); 658 + mutex_unlock(&qcom_pdm_mutex); 659 + } 660 + } 661 + 662 + static const struct auxiliary_device_id qcom_pdm_table[] = { 663 + { .name = "qcom_common.pd-mapper" }, 664 + {}, 665 + }; 666 + MODULE_DEVICE_TABLE(auxiliary, qcom_pdm_table); 667 + 668 + static struct auxiliary_driver qcom_pdm_drv = { 669 + .name = "qcom-pdm-mapper", 670 + .id_table = qcom_pdm_table, 671 + .probe = qcom_pdm_probe, 672 + .remove = qcom_pdm_remove, 673 + }; 674 + module_auxiliary_driver(qcom_pdm_drv); 675 + 676 + MODULE_DESCRIPTION("Qualcomm Protection Domain Mapper"); 677 + MODULE_LICENSE("GPL");
+353
drivers/soc/qcom/qcom_pdr_msg.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (C) 2020 The Linux Foundation. All rights reserved. 4 + */ 5 + 6 + #include <linux/module.h> 7 + #include <linux/soc/qcom/qmi.h> 8 + 9 + #include "pdr_internal.h" 10 + 11 + const struct qmi_elem_info servreg_location_entry_ei[] = { 12 + { 13 + .data_type = QMI_STRING, 14 + .elem_len = SERVREG_NAME_LENGTH + 1, 15 + .elem_size = sizeof(char), 16 + .array_type = NO_ARRAY, 17 + .tlv_type = 0, 18 + .offset = offsetof(struct servreg_location_entry, 19 + name), 20 + }, 21 + { 22 + .data_type = QMI_UNSIGNED_4_BYTE, 23 + .elem_len = 1, 24 + .elem_size = sizeof(u32), 25 + .array_type = NO_ARRAY, 26 + .tlv_type = 0, 27 + .offset = offsetof(struct servreg_location_entry, 28 + instance), 29 + }, 30 + { 31 + .data_type = QMI_UNSIGNED_1_BYTE, 32 + .elem_len = 1, 33 + .elem_size = sizeof(u8), 34 + .array_type = NO_ARRAY, 35 + .tlv_type = 0, 36 + .offset = offsetof(struct servreg_location_entry, 37 + service_data_valid), 38 + }, 39 + { 40 + .data_type = QMI_UNSIGNED_4_BYTE, 41 + .elem_len = 1, 42 + .elem_size = sizeof(u32), 43 + .array_type = NO_ARRAY, 44 + .tlv_type = 0, 45 + .offset = offsetof(struct servreg_location_entry, 46 + service_data), 47 + }, 48 + {} 49 + }; 50 + EXPORT_SYMBOL_GPL(servreg_location_entry_ei); 51 + 52 + const struct qmi_elem_info servreg_get_domain_list_req_ei[] = { 53 + { 54 + .data_type = QMI_STRING, 55 + .elem_len = SERVREG_NAME_LENGTH + 1, 56 + .elem_size = sizeof(char), 57 + .array_type = NO_ARRAY, 58 + .tlv_type = 0x01, 59 + .offset = offsetof(struct servreg_get_domain_list_req, 60 + service_name), 61 + }, 62 + { 63 + .data_type = QMI_OPT_FLAG, 64 + .elem_len = 1, 65 + .elem_size = sizeof(u8), 66 + .array_type = NO_ARRAY, 67 + .tlv_type = 0x10, 68 + .offset = offsetof(struct servreg_get_domain_list_req, 69 + domain_offset_valid), 70 + }, 71 + { 72 + .data_type = QMI_UNSIGNED_4_BYTE, 73 + .elem_len = 1, 74 + .elem_size = sizeof(u32), 75 + .array_type = NO_ARRAY, 76 + .tlv_type = 0x10, 77 + .offset = offsetof(struct servreg_get_domain_list_req, 78 + domain_offset), 79 + }, 80 + {} 81 + }; 82 + EXPORT_SYMBOL_GPL(servreg_get_domain_list_req_ei); 83 + 84 + const struct qmi_elem_info servreg_get_domain_list_resp_ei[] = { 85 + { 86 + .data_type = QMI_STRUCT, 87 + .elem_len = 1, 88 + .elem_size = sizeof(struct qmi_response_type_v01), 89 + .array_type = NO_ARRAY, 90 + .tlv_type = 0x02, 91 + .offset = offsetof(struct servreg_get_domain_list_resp, 92 + resp), 93 + .ei_array = qmi_response_type_v01_ei, 94 + }, 95 + { 96 + .data_type = QMI_OPT_FLAG, 97 + .elem_len = 1, 98 + .elem_size = sizeof(u8), 99 + .array_type = NO_ARRAY, 100 + .tlv_type = 0x10, 101 + .offset = offsetof(struct servreg_get_domain_list_resp, 102 + total_domains_valid), 103 + }, 104 + { 105 + .data_type = QMI_UNSIGNED_2_BYTE, 106 + .elem_len = 1, 107 + .elem_size = sizeof(u16), 108 + .array_type = NO_ARRAY, 109 + .tlv_type = 0x10, 110 + .offset = offsetof(struct servreg_get_domain_list_resp, 111 + total_domains), 112 + }, 113 + { 114 + .data_type = QMI_OPT_FLAG, 115 + .elem_len = 1, 116 + .elem_size = sizeof(u8), 117 + .array_type = NO_ARRAY, 118 + .tlv_type = 0x11, 119 + .offset = offsetof(struct servreg_get_domain_list_resp, 120 + db_rev_count_valid), 121 + }, 122 + { 123 + .data_type = QMI_UNSIGNED_2_BYTE, 124 + .elem_len = 1, 125 + .elem_size = sizeof(u16), 126 + .array_type = NO_ARRAY, 127 + .tlv_type = 0x11, 128 + .offset = offsetof(struct servreg_get_domain_list_resp, 129 + db_rev_count), 130 + }, 131 + { 132 + .data_type = QMI_OPT_FLAG, 133 + .elem_len = 1, 134 + .elem_size = sizeof(u8), 135 + .array_type = NO_ARRAY, 136 + .tlv_type = 0x12, 137 + .offset = offsetof(struct servreg_get_domain_list_resp, 138 + domain_list_valid), 139 + }, 140 + { 141 + .data_type = QMI_DATA_LEN, 142 + .elem_len = 1, 143 + .elem_size = sizeof(u8), 144 + .array_type = NO_ARRAY, 145 + .tlv_type = 0x12, 146 + .offset = offsetof(struct servreg_get_domain_list_resp, 147 + domain_list_len), 148 + }, 149 + { 150 + .data_type = QMI_STRUCT, 151 + .elem_len = SERVREG_DOMAIN_LIST_LENGTH, 152 + .elem_size = sizeof(struct servreg_location_entry), 153 + .array_type = VAR_LEN_ARRAY, 154 + .tlv_type = 0x12, 155 + .offset = offsetof(struct servreg_get_domain_list_resp, 156 + domain_list), 157 + .ei_array = servreg_location_entry_ei, 158 + }, 159 + {} 160 + }; 161 + EXPORT_SYMBOL_GPL(servreg_get_domain_list_resp_ei); 162 + 163 + const struct qmi_elem_info servreg_register_listener_req_ei[] = { 164 + { 165 + .data_type = QMI_UNSIGNED_1_BYTE, 166 + .elem_len = 1, 167 + .elem_size = sizeof(u8), 168 + .array_type = NO_ARRAY, 169 + .tlv_type = 0x01, 170 + .offset = offsetof(struct servreg_register_listener_req, 171 + enable), 172 + }, 173 + { 174 + .data_type = QMI_STRING, 175 + .elem_len = SERVREG_NAME_LENGTH + 1, 176 + .elem_size = sizeof(char), 177 + .array_type = NO_ARRAY, 178 + .tlv_type = 0x02, 179 + .offset = offsetof(struct servreg_register_listener_req, 180 + service_path), 181 + }, 182 + {} 183 + }; 184 + EXPORT_SYMBOL_GPL(servreg_register_listener_req_ei); 185 + 186 + const struct qmi_elem_info servreg_register_listener_resp_ei[] = { 187 + { 188 + .data_type = QMI_STRUCT, 189 + .elem_len = 1, 190 + .elem_size = sizeof(struct qmi_response_type_v01), 191 + .array_type = NO_ARRAY, 192 + .tlv_type = 0x02, 193 + .offset = offsetof(struct servreg_register_listener_resp, 194 + resp), 195 + .ei_array = qmi_response_type_v01_ei, 196 + }, 197 + { 198 + .data_type = QMI_OPT_FLAG, 199 + .elem_len = 1, 200 + .elem_size = sizeof(u8), 201 + .array_type = NO_ARRAY, 202 + .tlv_type = 0x10, 203 + .offset = offsetof(struct servreg_register_listener_resp, 204 + curr_state_valid), 205 + }, 206 + { 207 + .data_type = QMI_SIGNED_4_BYTE_ENUM, 208 + .elem_len = 1, 209 + .elem_size = sizeof(enum servreg_service_state), 210 + .array_type = NO_ARRAY, 211 + .tlv_type = 0x10, 212 + .offset = offsetof(struct servreg_register_listener_resp, 213 + curr_state), 214 + }, 215 + {} 216 + }; 217 + EXPORT_SYMBOL_GPL(servreg_register_listener_resp_ei); 218 + 219 + const struct qmi_elem_info servreg_restart_pd_req_ei[] = { 220 + { 221 + .data_type = QMI_STRING, 222 + .elem_len = SERVREG_NAME_LENGTH + 1, 223 + .elem_size = sizeof(char), 224 + .array_type = NO_ARRAY, 225 + .tlv_type = 0x01, 226 + .offset = offsetof(struct servreg_restart_pd_req, 227 + service_path), 228 + }, 229 + {} 230 + }; 231 + EXPORT_SYMBOL_GPL(servreg_restart_pd_req_ei); 232 + 233 + const struct qmi_elem_info servreg_restart_pd_resp_ei[] = { 234 + { 235 + .data_type = QMI_STRUCT, 236 + .elem_len = 1, 237 + .elem_size = sizeof(struct qmi_response_type_v01), 238 + .array_type = NO_ARRAY, 239 + .tlv_type = 0x02, 240 + .offset = offsetof(struct servreg_restart_pd_resp, 241 + resp), 242 + .ei_array = qmi_response_type_v01_ei, 243 + }, 244 + {} 245 + }; 246 + EXPORT_SYMBOL_GPL(servreg_restart_pd_resp_ei); 247 + 248 + const struct qmi_elem_info servreg_state_updated_ind_ei[] = { 249 + { 250 + .data_type = QMI_SIGNED_4_BYTE_ENUM, 251 + .elem_len = 1, 252 + .elem_size = sizeof(u32), 253 + .array_type = NO_ARRAY, 254 + .tlv_type = 0x01, 255 + .offset = offsetof(struct servreg_state_updated_ind, 256 + curr_state), 257 + }, 258 + { 259 + .data_type = QMI_STRING, 260 + .elem_len = SERVREG_NAME_LENGTH + 1, 261 + .elem_size = sizeof(char), 262 + .array_type = NO_ARRAY, 263 + .tlv_type = 0x02, 264 + .offset = offsetof(struct servreg_state_updated_ind, 265 + service_path), 266 + }, 267 + { 268 + .data_type = QMI_UNSIGNED_2_BYTE, 269 + .elem_len = 1, 270 + .elem_size = sizeof(u16), 271 + .array_type = NO_ARRAY, 272 + .tlv_type = 0x03, 273 + .offset = offsetof(struct servreg_state_updated_ind, 274 + transaction_id), 275 + }, 276 + {} 277 + }; 278 + EXPORT_SYMBOL_GPL(servreg_state_updated_ind_ei); 279 + 280 + const struct qmi_elem_info servreg_set_ack_req_ei[] = { 281 + { 282 + .data_type = QMI_STRING, 283 + .elem_len = SERVREG_NAME_LENGTH + 1, 284 + .elem_size = sizeof(char), 285 + .array_type = NO_ARRAY, 286 + .tlv_type = 0x01, 287 + .offset = offsetof(struct servreg_set_ack_req, 288 + service_path), 289 + }, 290 + { 291 + .data_type = QMI_UNSIGNED_2_BYTE, 292 + .elem_len = 1, 293 + .elem_size = sizeof(u16), 294 + .array_type = NO_ARRAY, 295 + .tlv_type = 0x02, 296 + .offset = offsetof(struct servreg_set_ack_req, 297 + transaction_id), 298 + }, 299 + {} 300 + }; 301 + EXPORT_SYMBOL_GPL(servreg_set_ack_req_ei); 302 + 303 + const struct qmi_elem_info servreg_set_ack_resp_ei[] = { 304 + { 305 + .data_type = QMI_STRUCT, 306 + .elem_len = 1, 307 + .elem_size = sizeof(struct qmi_response_type_v01), 308 + .array_type = NO_ARRAY, 309 + .tlv_type = 0x02, 310 + .offset = offsetof(struct servreg_set_ack_resp, 311 + resp), 312 + .ei_array = qmi_response_type_v01_ei, 313 + }, 314 + {} 315 + }; 316 + EXPORT_SYMBOL_GPL(servreg_set_ack_resp_ei); 317 + 318 + const struct qmi_elem_info servreg_loc_pfr_req_ei[] = { 319 + { 320 + .data_type = QMI_STRING, 321 + .elem_len = SERVREG_NAME_LENGTH + 1, 322 + .elem_size = sizeof(char), 323 + .array_type = VAR_LEN_ARRAY, 324 + .tlv_type = 0x01, 325 + .offset = offsetof(struct servreg_loc_pfr_req, service) 326 + }, 327 + { 328 + .data_type = QMI_STRING, 329 + .elem_len = SERVREG_NAME_LENGTH + 1, 330 + .elem_size = sizeof(char), 331 + .array_type = VAR_LEN_ARRAY, 332 + .tlv_type = 0x02, 333 + .offset = offsetof(struct servreg_loc_pfr_req, reason) 334 + }, 335 + {} 336 + }; 337 + EXPORT_SYMBOL_GPL(servreg_loc_pfr_req_ei); 338 + 339 + const struct qmi_elem_info servreg_loc_pfr_resp_ei[] = { 340 + { 341 + .data_type = QMI_STRUCT, 342 + .elem_len = 1, 343 + .elem_size = sizeof_field(struct servreg_loc_pfr_resp, rsp), 344 + .tlv_type = 0x02, 345 + .offset = offsetof(struct servreg_loc_pfr_resp, rsp), 346 + .ei_array = qmi_response_type_v01_ei, 347 + }, 348 + {} 349 + }; 350 + EXPORT_SYMBOL_GPL(servreg_loc_pfr_resp_ei); 351 + 352 + MODULE_LICENSE("GPL"); 353 + MODULE_DESCRIPTION("Qualcomm Protection Domain messages data");
+4 -3
drivers/soc/qcom/rpmh-rsc.c
··· 646 646 { 647 647 struct tcs_group *tcs; 648 648 int tcs_id; 649 - unsigned long flags; 649 + 650 + might_sleep(); 650 651 651 652 tcs = get_tcs_for_msg(drv, msg); 652 653 if (IS_ERR(tcs)) 653 654 return PTR_ERR(tcs); 654 655 655 - spin_lock_irqsave(&drv->lock, flags); 656 + spin_lock_irq(&drv->lock); 656 657 657 658 /* Wait forever for a free tcs. It better be there eventually! */ 658 659 wait_event_lock_irq(drv->tcs_wait, ··· 671 670 write_tcs_reg_sync(drv, drv->regs[RSC_DRV_CMD_ENABLE], tcs_id, 0); 672 671 enable_tcs_irq(drv, tcs_id, true); 673 672 } 674 - spin_unlock_irqrestore(&drv->lock, flags); 673 + spin_unlock_irq(&drv->lock); 675 674 676 675 /* 677 676 * These two can be done after the lock is released because:
-1
drivers/soc/qcom/rpmh.c
··· 183 183 } 184 184 185 185 if (state == RPMH_ACTIVE_ONLY_STATE) { 186 - WARN_ON(irqs_disabled()); 187 186 ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg); 188 187 } else { 189 188 /* Clean up our call by spoofing tx_done */
+33
drivers/soc/qcom/smem.c
··· 795 795 } 796 796 EXPORT_SYMBOL_GPL(qcom_smem_get_soc_id); 797 797 798 + /** 799 + * qcom_smem_get_feature_code() - return the feature code 800 + * @code: On success, return the feature code here. 801 + * 802 + * Look up the feature code identifier from SMEM and return it. 803 + * 804 + * Return: 0 on success, negative errno on failure. 805 + */ 806 + int qcom_smem_get_feature_code(u32 *code) 807 + { 808 + struct socinfo *info; 809 + u32 raw_code; 810 + 811 + info = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_HW_SW_BUILD_ID, NULL); 812 + if (IS_ERR(info)) 813 + return PTR_ERR(info); 814 + 815 + /* This only makes sense for socinfo >= 16 */ 816 + if (__le32_to_cpu(info->fmt) < SOCINFO_VERSION(0, 16)) 817 + return -EOPNOTSUPP; 818 + 819 + raw_code = __le32_to_cpu(info->feature_code); 820 + 821 + /* Ensure the value makes sense */ 822 + if (raw_code > SOCINFO_FC_INT_MAX) 823 + raw_code = SOCINFO_FC_UNKNOWN; 824 + 825 + *code = raw_code; 826 + 827 + return 0; 828 + } 829 + EXPORT_SYMBOL_GPL(qcom_smem_get_feature_code); 830 + 798 831 static int qcom_smem_get_sbl_version(struct qcom_smem *smem) 799 832 { 800 833 struct smem_header *header;
+10 -1
drivers/soc/qcom/smp2p.c
··· 16 16 #include <linux/platform_device.h> 17 17 #include <linux/pm_wakeirq.h> 18 18 #include <linux/regmap.h> 19 + #include <linux/seq_file.h> 19 20 #include <linux/soc/qcom/smem.h> 20 21 #include <linux/soc/qcom/smem_state.h> 21 22 #include <linux/spinlock.h> ··· 354 353 return 0; 355 354 } 356 355 356 + static void smp2p_irq_print_chip(struct irq_data *irqd, struct seq_file *p) 357 + { 358 + struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd); 359 + 360 + seq_printf(p, " %8s", dev_name(entry->smp2p->dev)); 361 + } 362 + 357 363 static struct irq_chip smp2p_irq_chip = { 358 364 .name = "smp2p", 359 365 .irq_mask = smp2p_mask_irq, 360 366 .irq_unmask = smp2p_unmask_irq, 361 367 .irq_set_type = smp2p_set_irq_type, 368 + .irq_print_chip = smp2p_irq_print_chip, 362 369 }; 363 370 364 371 static int smp2p_irq_map(struct irq_domain *d, ··· 626 617 ret = devm_request_threaded_irq(&pdev->dev, irq, 627 618 NULL, qcom_smp2p_intr, 628 619 IRQF_ONESHOT, 629 - "smp2p", (void *)smp2p); 620 + NULL, (void *)smp2p); 630 621 if (ret) { 631 622 dev_err(&pdev->dev, "failed to request interrupt\n"); 632 623 goto unwind_interfaces;
+50 -1
drivers/soc/qcom/smsm.c
··· 5 5 */ 6 6 7 7 #include <linux/interrupt.h> 8 + #include <linux/mailbox_client.h> 8 9 #include <linux/mfd/syscon.h> 9 10 #include <linux/module.h> 10 11 #include <linux/of_irq.h> ··· 72 71 * @lock: spinlock for read-modify-write of the outgoing state 73 72 * @entries: context for each of the entries 74 73 * @hosts: context for each of the hosts 74 + * @mbox_client: mailbox client handle 75 75 */ 76 76 struct qcom_smsm { 77 77 struct device *dev; ··· 90 88 91 89 struct smsm_entry *entries; 92 90 struct smsm_host *hosts; 91 + 92 + struct mbox_client mbox_client; 93 93 }; 94 94 95 95 /** ··· 124 120 * @ipc_regmap: regmap for outgoing interrupt 125 121 * @ipc_offset: offset in @ipc_regmap for outgoing interrupt 126 122 * @ipc_bit: bit in @ipc_regmap + @ipc_offset for outgoing interrupt 123 + * @mbox_chan: apcs ipc mailbox channel handle 127 124 */ 128 125 struct smsm_host { 129 126 struct regmap *ipc_regmap; 130 127 int ipc_offset; 131 128 int ipc_bit; 129 + 130 + struct mbox_chan *mbox_chan; 132 131 }; 133 132 134 133 /** ··· 179 172 hostp = &smsm->hosts[host]; 180 173 181 174 val = readl(smsm->subscription + host); 182 - if (val & changes && hostp->ipc_regmap) { 175 + if (!(val & changes)) 176 + continue; 177 + 178 + if (hostp->mbox_chan) { 179 + mbox_send_message(hostp->mbox_chan, NULL); 180 + mbox_client_txdone(hostp->mbox_chan, 0); 181 + } else if (hostp->ipc_regmap) { 183 182 regmap_write(hostp->ipc_regmap, 184 183 hostp->ipc_offset, 185 184 BIT(hostp->ipc_bit)); ··· 366 353 }; 367 354 368 355 /** 356 + * smsm_parse_mbox() - requests an mbox channel 357 + * @smsm: smsm driver context 358 + * @host_id: index of the remote host to be resolved 359 + * 360 + * Requests the desired channel using the mbox interface which is needed for 361 + * sending the outgoing interrupts to a remove hosts - identified by @host_id. 362 + */ 363 + static int smsm_parse_mbox(struct qcom_smsm *smsm, unsigned int host_id) 364 + { 365 + struct smsm_host *host = &smsm->hosts[host_id]; 366 + int ret = 0; 367 + 368 + host->mbox_chan = mbox_request_channel(&smsm->mbox_client, host_id); 369 + if (IS_ERR(host->mbox_chan)) { 370 + ret = PTR_ERR(host->mbox_chan); 371 + host->mbox_chan = NULL; 372 + } 373 + 374 + return ret; 375 + } 376 + 377 + /** 369 378 * smsm_parse_ipc() - parses a qcom,ipc-%d device tree property 370 379 * @smsm: smsm driver context 371 380 * @host_id: index of the remote host to be resolved ··· 556 521 "qcom,local-host", 557 522 &smsm->local_host); 558 523 524 + smsm->mbox_client.dev = &pdev->dev; 525 + smsm->mbox_client.knows_txdone = true; 526 + 559 527 /* Parse the host properties */ 560 528 for (id = 0; id < smsm->num_hosts; id++) { 529 + /* Try using mbox interface first, otherwise fall back to syscon */ 530 + ret = smsm_parse_mbox(smsm, id); 531 + if (!ret) 532 + continue; 533 + 561 534 ret = smsm_parse_ipc(smsm, id); 562 535 if (ret < 0) 563 536 goto out_put; ··· 652 609 653 610 qcom_smem_state_unregister(smsm->state); 654 611 out_put: 612 + for (id = 0; id < smsm->num_hosts; id++) 613 + mbox_free_channel(smsm->hosts[id].mbox_chan); 614 + 655 615 of_node_put(local_node); 656 616 return ret; 657 617 } ··· 667 621 for (id = 0; id < smsm->num_entries; id++) 668 622 if (smsm->entries[id].domain) 669 623 irq_domain_remove(smsm->entries[id].domain); 624 + 625 + for (id = 0; id < smsm->num_hosts; id++) 626 + mbox_free_channel(smsm->hosts[id].mbox_chan); 670 627 671 628 qcom_smem_state_unregister(smsm->state); 672 629 }
+4 -9
drivers/soc/qcom/socinfo.c
··· 21 21 22 22 #include <dt-bindings/arm/qcom,ids.h> 23 23 24 - /* 25 - * SoC version type with major number in the upper 16 bits and minor 26 - * number in the lower 16 bits. 27 - */ 28 - #define SOCINFO_MAJOR(ver) (((ver) >> 16) & 0xffff) 29 - #define SOCINFO_MINOR(ver) ((ver) & 0xffff) 30 - #define SOCINFO_VERSION(maj, min) ((((maj) & 0xffff) << 16)|((min) & 0xffff)) 31 - 32 24 /* Helper macros to create soc_id table */ 33 25 #define qcom_board_id(id) QCOM_ID_ ## id, __stringify(id) 34 26 #define qcom_board_id_named(id, name) QCOM_ID_ ## id, (name) ··· 125 133 [72] = "PMR735D", 126 134 [73] = "PM8550", 127 135 [74] = "PMK8550", 128 - [82] = "SMB2360", 136 + [82] = "PMC8380", 137 + [83] = "SMB2360", 129 138 }; 130 139 131 140 struct socinfo_params { ··· 341 348 { qcom_board_id(SDA630) }, 342 349 { qcom_board_id(MSM8905) }, 343 350 { qcom_board_id(SDX202) }, 351 + { qcom_board_id(SDM670) }, 344 352 { qcom_board_id(SDM450) }, 345 353 { qcom_board_id(SM8150) }, 346 354 { qcom_board_id(SDA845) }, ··· 439 445 { qcom_board_id(QCS8550) }, 440 446 { qcom_board_id(QCM8550) }, 441 447 { qcom_board_id(IPQ5300) }, 448 + { qcom_board_id(IPQ5321) }, 442 449 }; 443 450 444 451 static const char *socinfo_machine(struct device *dev, unsigned int id)
+1
drivers/soc/qcom/spm.c
··· 572 572 } 573 573 arch_initcall(qcom_spm_init); 574 574 575 + MODULE_DESCRIPTION("Qualcomm Subsystem Power Manager (SPM)"); 575 576 MODULE_LICENSE("GPL v2");
+2
include/dt-bindings/arm/qcom,ids.h
··· 175 175 #define QCOM_ID_SDA630 327 176 176 #define QCOM_ID_MSM8905 331 177 177 #define QCOM_ID_SDX202 333 178 + #define QCOM_ID_SDM670 336 178 179 #define QCOM_ID_SDM450 338 179 180 #define QCOM_ID_SM8150 339 180 181 #define QCOM_ID_SDA845 341 ··· 273 272 #define QCOM_ID_QCS8550 603 274 273 #define QCOM_ID_QCM8550 604 275 274 #define QCOM_ID_IPQ5300 624 275 + #define QCOM_ID_IPQ5321 650 276 276 277 277 /* 278 278 * The board type and revision information, used by Qualcomm bootloaders and
+4 -4
include/linux/firmware/qcom/qcom_qseecom.h
··· 73 73 /** 74 74 * qcom_qseecom_app_send() - Send to and receive data from a given QSEE app. 75 75 * @client: The QSEECOM client associated with the target app. 76 - * @req: DMA address of the request buffer sent to the app. 76 + * @req: Request buffer sent to the app (must be TZ memory). 77 77 * @req_size: Size of the request buffer. 78 - * @rsp: DMA address of the response buffer, written to by the app. 78 + * @rsp: Response buffer, written to by the app (must be TZ memory). 79 79 * @rsp_size: Size of the response buffer. 80 80 * 81 81 * Sends a request to the QSEE app associated with the given client and read ··· 90 90 * Return: Zero on success, nonzero on failure. 91 91 */ 92 92 static inline int qcom_qseecom_app_send(struct qseecom_client *client, 93 - dma_addr_t req, size_t req_size, 94 - dma_addr_t rsp, size_t rsp_size) 93 + void *req, size_t req_size, 94 + void *rsp, size_t rsp_size) 95 95 { 96 96 return qcom_scm_qseecom_app_send(client->app_id, req, req_size, rsp, rsp_size); 97 97 }
+33 -4
include/linux/firmware/qcom/qcom_scm.h
··· 115 115 int qcom_scm_lmh_profile_change(u32 profile_id); 116 116 bool qcom_scm_lmh_dcvsh_available(void); 117 117 118 + /* 119 + * Request TZ to program set of access controlled registers necessary 120 + * irrespective of any features 121 + */ 122 + #define QCOM_SCM_GPU_ALWAYS_EN_REQ BIT(0) 123 + /* 124 + * Request TZ to program BCL id to access controlled register when BCL is 125 + * enabled 126 + */ 127 + #define QCOM_SCM_GPU_BCL_EN_REQ BIT(1) 128 + /* 129 + * Request TZ to program set of access controlled register for CLX feature 130 + * when enabled 131 + */ 132 + #define QCOM_SCM_GPU_CLX_EN_REQ BIT(2) 133 + /* 134 + * Request TZ to program tsense ids to access controlled registers for reading 135 + * gpu temperature sensors 136 + */ 137 + #define QCOM_SCM_GPU_TSENSE_EN_REQ BIT(3) 138 + 139 + int qcom_scm_gpu_init_regs(u32 gpu_req); 140 + 141 + int qcom_scm_shm_bridge_enable(void); 142 + int qcom_scm_shm_bridge_create(struct device *dev, u64 pfn_and_ns_perm_flags, 143 + u64 ipfn_and_s_perm_flags, u64 size_and_flags, 144 + u64 ns_vmids, u64 *handle); 145 + int qcom_scm_shm_bridge_delete(struct device *dev, u64 handle); 146 + 118 147 #ifdef CONFIG_QCOM_QSEECOM 119 148 120 149 int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id); 121 - int qcom_scm_qseecom_app_send(u32 app_id, dma_addr_t req, size_t req_size, 122 - dma_addr_t rsp, size_t rsp_size); 150 + int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, 151 + void *rsp, size_t rsp_size); 123 152 124 153 #else /* CONFIG_QCOM_QSEECOM */ 125 154 ··· 158 129 } 159 130 160 131 static inline int qcom_scm_qseecom_app_send(u32 app_id, 161 - dma_addr_t req, size_t req_size, 162 - dma_addr_t rsp, size_t rsp_size) 132 + void *req, size_t req_size, 133 + void *rsp, size_t rsp_size) 163 134 { 164 135 return -EINVAL; 165 136 }
+56
include/linux/firmware/qcom/qcom_tzmem.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Copyright (C) 2023-2024 Linaro Ltd. 4 + */ 5 + 6 + #ifndef __QCOM_TZMEM_H 7 + #define __QCOM_TZMEM_H 8 + 9 + #include <linux/cleanup.h> 10 + #include <linux/gfp.h> 11 + #include <linux/types.h> 12 + 13 + struct device; 14 + struct qcom_tzmem_pool; 15 + 16 + /** 17 + * enum qcom_tzmem_policy - Policy for pool growth. 18 + */ 19 + enum qcom_tzmem_policy { 20 + /**< Static pool, never grow above initial size. */ 21 + QCOM_TZMEM_POLICY_STATIC = 1, 22 + /**< When out of memory, add increment * current size of memory. */ 23 + QCOM_TZMEM_POLICY_MULTIPLIER, 24 + /**< When out of memory add as much as is needed until max_size. */ 25 + QCOM_TZMEM_POLICY_ON_DEMAND, 26 + }; 27 + 28 + /** 29 + * struct qcom_tzmem_pool_config - TZ memory pool configuration. 30 + * @initial_size: Number of bytes to allocate for the pool during its creation. 31 + * @policy: Pool size growth policy. 32 + * @increment: Used with policies that allow pool growth. 33 + * @max_size: Size above which the pool will never grow. 34 + */ 35 + struct qcom_tzmem_pool_config { 36 + size_t initial_size; 37 + enum qcom_tzmem_policy policy; 38 + size_t increment; 39 + size_t max_size; 40 + }; 41 + 42 + struct qcom_tzmem_pool * 43 + qcom_tzmem_pool_new(const struct qcom_tzmem_pool_config *config); 44 + void qcom_tzmem_pool_free(struct qcom_tzmem_pool *pool); 45 + struct qcom_tzmem_pool * 46 + devm_qcom_tzmem_pool_new(struct device *dev, 47 + const struct qcom_tzmem_pool_config *config); 48 + 49 + void *qcom_tzmem_alloc(struct qcom_tzmem_pool *pool, size_t size, gfp_t gfp); 50 + void qcom_tzmem_free(void *ptr); 51 + 52 + DEFINE_FREE(qcom_tzmem, void *, if (_T) qcom_tzmem_free(_T)) 53 + 54 + phys_addr_t qcom_tzmem_to_phys(void *ptr); 55 + 56 + #endif /* __QCOM_TZMEM */
+3 -1
include/linux/soc/qcom/llcc-qcom.h
··· 115 115 /** 116 116 * struct llcc_drv_data - Data associated with the llcc driver 117 117 * @regmaps: regmaps associated with the llcc device 118 - * @bcast_regmap: regmap associated with llcc broadcast offset 118 + * @bcast_regmap: regmap associated with llcc broadcast OR offset 119 + * @bcast_and_regmap: regmap associated with llcc broadcast AND offset 119 120 * @cfg: pointer to the data structure for slice configuration 120 121 * @edac_reg_offset: Offset of the LLCC EDAC registers 121 122 * @lock: mutex associated with each slice ··· 130 129 struct llcc_drv_data { 131 130 struct regmap **regmaps; 132 131 struct regmap *bcast_regmap; 132 + struct regmap *bcast_and_regmap; 133 133 const struct llcc_slice_config *cfg; 134 134 const struct llcc_edac_reg_offset *edac_reg_offset; 135 135 struct mutex lock;
+1
include/linux/soc/qcom/smem.h
··· 13 13 phys_addr_t qcom_smem_virt_to_phys(void *p); 14 14 15 15 int qcom_smem_get_soc_id(u32 *id); 16 + int qcom_smem_get_feature_code(u32 *code); 16 17 17 18 #endif
+34
include/linux/soc/qcom/socinfo.h
··· 3 3 #ifndef __QCOM_SOCINFO_H__ 4 4 #define __QCOM_SOCINFO_H__ 5 5 6 + #include <linux/types.h> 7 + 6 8 /* 7 9 * SMEM item id, used to acquire handles to respective 8 10 * SMEM region. ··· 13 11 14 12 #define SMEM_SOCINFO_BUILD_ID_LENGTH 32 15 13 #define SMEM_SOCINFO_CHIP_ID_LENGTH 32 14 + 15 + /* 16 + * SoC version type with major number in the upper 16 bits and minor 17 + * number in the lower 16 bits. 18 + */ 19 + #define SOCINFO_MAJOR(ver) (((ver) >> 16) & 0xffff) 20 + #define SOCINFO_MINOR(ver) ((ver) & 0xffff) 21 + #define SOCINFO_VERSION(maj, min) ((((maj) & 0xffff) << 16)|((min) & 0xffff)) 16 22 17 23 /* Socinfo SMEM item structure */ 18 24 struct socinfo { ··· 83 73 __le32 boot_cluster; 84 74 __le32 boot_core; 85 75 }; 76 + 77 + /* Internal feature codes */ 78 + enum qcom_socinfo_feature_code { 79 + /* External feature codes */ 80 + SOCINFO_FC_UNKNOWN = 0x0, 81 + SOCINFO_FC_AA, 82 + SOCINFO_FC_AB, 83 + SOCINFO_FC_AC, 84 + SOCINFO_FC_AD, 85 + SOCINFO_FC_AE, 86 + SOCINFO_FC_AF, 87 + SOCINFO_FC_AG, 88 + SOCINFO_FC_AH, 89 + }; 90 + 91 + /* Internal feature codes */ 92 + /* Valid values: 0 <= n <= 0xf */ 93 + #define SOCINFO_FC_Yn(n) (0xf1 + (n)) 94 + #define SOCINFO_FC_INT_MAX SOCINFO_FC_Yn(0xf) 95 + 96 + /* Product codes */ 97 + #define SOCINFO_PC_UNKNOWN 0 98 + #define SOCINFO_PCn(n) ((n) + 1) 99 + #define SOCINFO_PC_RESERVE (BIT(31) - 1) 86 100 87 101 #endif