Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

firmware: arm_scmi: Support 'reg-io-width' property for shared memory

Some shared memory areas might only support a certain access width,
such as 32-bit, which memcpy_{from,to}_io() does not adhere to at least
on ARM64 by making both 8-bit and 64-bit accesses to such memory.

Update the shmem layer to support reading from and writing to such
shared memory area using the specified I/O width in the Device Tree. The
various transport layers making use of the shmem.c code are updated
accordingly to pass the I/O accessors that they store.

Signed-off-by: Florian Fainelli <florian.fainelli@broadcom.com>
Message-Id: <20240827182450.3608307-3-florian.fainelli@broadcom.com>
Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>

authored by

Florian Fainelli and committed by
Sudeep Holla
2cd7f3db 14b2157a

+124 -21
+28 -4
drivers/firmware/arm_scmi/common.h
··· 311 311 MSG_MBOX_SPURIOUS = -5, 312 312 }; 313 313 314 + /* Used for compactness and signature validation of the function pointers being 315 + * passed. 316 + */ 317 + typedef void (*shmem_copy_toio_t)(void __iomem *to, const void *from, 318 + size_t count); 319 + typedef void (*shmem_copy_fromio_t)(void *to, const void __iomem *from, 320 + size_t count); 321 + 322 + /** 323 + * struct scmi_shmem_io_ops - I/O operations to read from/write to 324 + * Shared Memory 325 + * 326 + * @toio: Copy data to the shared memory area 327 + * @fromio: Copy data from the shared memory area 328 + */ 329 + struct scmi_shmem_io_ops { 330 + shmem_copy_fromio_t fromio; 331 + shmem_copy_toio_t toio; 332 + }; 333 + 314 334 /* shmem related declarations */ 315 335 struct scmi_shared_mem; 316 336 ··· 351 331 struct scmi_shared_mem_operations { 352 332 void (*tx_prepare)(struct scmi_shared_mem __iomem *shmem, 353 333 struct scmi_xfer *xfer, 354 - struct scmi_chan_info *cinfo); 334 + struct scmi_chan_info *cinfo, 335 + shmem_copy_toio_t toio); 355 336 u32 (*read_header)(struct scmi_shared_mem __iomem *shmem); 356 337 357 338 void (*fetch_response)(struct scmi_shared_mem __iomem *shmem, 358 - struct scmi_xfer *xfer); 339 + struct scmi_xfer *xfer, 340 + shmem_copy_fromio_t fromio); 359 341 void (*fetch_notification)(struct scmi_shared_mem __iomem *shmem, 360 - size_t max_len, struct scmi_xfer *xfer); 342 + size_t max_len, struct scmi_xfer *xfer, 343 + shmem_copy_fromio_t fromio); 361 344 void (*clear_channel)(struct scmi_shared_mem __iomem *shmem); 362 345 bool (*poll_done)(struct scmi_shared_mem __iomem *shmem, 363 346 struct scmi_xfer *xfer); ··· 368 345 bool (*channel_intr_enabled)(struct scmi_shared_mem __iomem *shmem); 369 346 void __iomem *(*setup_iomap)(struct scmi_chan_info *cinfo, 370 347 struct device *dev, 371 - bool tx, struct resource *res); 348 + bool tx, struct resource *res, 349 + struct scmi_shmem_io_ops **ops); 372 350 }; 373 351 374 352 const struct scmi_shared_mem_operations *scmi_shared_mem_operations_get(void);
+71 -7
drivers/firmware/arm_scmi/shmem.c
··· 34 34 u8 msg_payload[]; 35 35 }; 36 36 37 + static inline void shmem_memcpy_fromio32(void *to, 38 + const void __iomem *from, 39 + size_t count) 40 + { 41 + WARN_ON(!IS_ALIGNED((unsigned long)from, 4) || 42 + !IS_ALIGNED((unsigned long)to, 4) || 43 + count % 4); 44 + 45 + __ioread32_copy(to, from, count / 4); 46 + } 47 + 48 + static inline void shmem_memcpy_toio32(void __iomem *to, 49 + const void *from, 50 + size_t count) 51 + { 52 + WARN_ON(!IS_ALIGNED((unsigned long)to, 4) || 53 + !IS_ALIGNED((unsigned long)from, 4) || 54 + count % 4); 55 + 56 + __iowrite32_copy(to, from, count / 4); 57 + } 58 + 59 + static struct scmi_shmem_io_ops shmem_io_ops32 = { 60 + .fromio = shmem_memcpy_fromio32, 61 + .toio = shmem_memcpy_toio32, 62 + }; 63 + 64 + /* Wrappers are needed for proper memcpy_{from,to}_io expansion by the 65 + * pre-processor. 66 + */ 67 + static inline void shmem_memcpy_fromio(void *to, 68 + const void __iomem *from, 69 + size_t count) 70 + { 71 + memcpy_fromio(to, from, count); 72 + } 73 + 74 + static inline void shmem_memcpy_toio(void __iomem *to, 75 + const void *from, 76 + size_t count) 77 + { 78 + memcpy_toio(to, from, count); 79 + } 80 + 81 + static struct scmi_shmem_io_ops shmem_io_ops_default = { 82 + .fromio = shmem_memcpy_fromio, 83 + .toio = shmem_memcpy_toio, 84 + }; 85 + 37 86 static void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem, 38 87 struct scmi_xfer *xfer, 39 - struct scmi_chan_info *cinfo) 88 + struct scmi_chan_info *cinfo, 89 + shmem_copy_toio_t copy_toio) 40 90 { 41 91 ktime_t stop; 42 92 ··· 123 73 iowrite32(sizeof(shmem->msg_header) + xfer->tx.len, &shmem->length); 124 74 iowrite32(pack_scmi_header(&xfer->hdr), &shmem->msg_header); 125 75 if (xfer->tx.buf) 126 - memcpy_toio(shmem->msg_payload, xfer->tx.buf, xfer->tx.len); 76 + copy_toio(shmem->msg_payload, xfer->tx.buf, xfer->tx.len); 127 77 } 128 78 129 79 static u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem) ··· 132 82 } 133 83 134 84 static void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem, 135 - struct scmi_xfer *xfer) 85 + struct scmi_xfer *xfer, 86 + shmem_copy_fromio_t copy_fromio) 136 87 { 137 88 size_t len = ioread32(&shmem->length); 138 89 ··· 142 91 xfer->rx.len = min_t(size_t, xfer->rx.len, len > 8 ? len - 8 : 0); 143 92 144 93 /* Take a copy to the rx buffer.. */ 145 - memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len); 94 + copy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len); 146 95 } 147 96 148 97 static void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem, 149 - size_t max_len, struct scmi_xfer *xfer) 98 + size_t max_len, struct scmi_xfer *xfer, 99 + shmem_copy_fromio_t copy_fromio) 150 100 { 151 101 size_t len = ioread32(&shmem->length); 152 102 ··· 155 103 xfer->rx.len = min_t(size_t, max_len, len > 4 ? len - 4 : 0); 156 104 157 105 /* Take a copy to the rx buffer.. */ 158 - memcpy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len); 106 + copy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len); 159 107 } 160 108 161 109 static void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem) ··· 191 139 192 140 static void __iomem *shmem_setup_iomap(struct scmi_chan_info *cinfo, 193 141 struct device *dev, bool tx, 194 - struct resource *res) 142 + struct resource *res, 143 + struct scmi_shmem_io_ops **ops) 195 144 { 196 145 struct device_node *shmem __free(device_node); 197 146 const char *desc = tx ? "Tx" : "Rx"; ··· 201 148 struct resource lres = {}; 202 149 resource_size_t size; 203 150 void __iomem *addr; 151 + u32 reg_io_width; 204 152 205 153 shmem = of_parse_phandle(cdev->of_node, "shmem", idx); 206 154 if (!shmem) ··· 225 171 if (!addr) { 226 172 dev_err(dev, "failed to ioremap SCMI %s shared memory\n", desc); 227 173 return IOMEM_ERR_PTR(-EADDRNOTAVAIL); 174 + } 175 + 176 + of_property_read_u32(shmem, "reg-io-width", &reg_io_width); 177 + switch (reg_io_width) { 178 + case 4: 179 + *ops = &shmem_io_ops32; 180 + break; 181 + default: 182 + *ops = &shmem_io_ops_default; 183 + break; 228 184 } 229 185 230 186 return addr;
+9 -4
drivers/firmware/arm_scmi/transports/mailbox.c
··· 26 26 * @cinfo: SCMI channel info 27 27 * @shmem: Transmit/Receive shared memory area 28 28 * @chan_lock: Lock that prevents multiple xfers from being queued 29 + * @io_ops: Transport specific I/O operations 29 30 */ 30 31 struct scmi_mailbox { 31 32 struct mbox_client cl; ··· 36 35 struct scmi_chan_info *cinfo; 37 36 struct scmi_shared_mem __iomem *shmem; 38 37 struct mutex chan_lock; 38 + struct scmi_shmem_io_ops *io_ops; 39 39 }; 40 40 41 41 #define client_to_scmi_mailbox(c) container_of(c, struct scmi_mailbox, cl) ··· 47 45 { 48 46 struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl); 49 47 50 - core->shmem->tx_prepare(smbox->shmem, m, smbox->cinfo); 48 + core->shmem->tx_prepare(smbox->shmem, m, smbox->cinfo, 49 + smbox->io_ops->toio); 51 50 } 52 51 53 52 static void rx_callback(struct mbox_client *cl, void *m) ··· 200 197 if (!smbox) 201 198 return -ENOMEM; 202 199 203 - smbox->shmem = core->shmem->setup_iomap(cinfo, dev, tx, NULL); 200 + smbox->shmem = core->shmem->setup_iomap(cinfo, dev, tx, NULL, 201 + &smbox->io_ops); 204 202 if (IS_ERR(smbox->shmem)) 205 203 return PTR_ERR(smbox->shmem); 206 204 ··· 309 305 { 310 306 struct scmi_mailbox *smbox = cinfo->transport_info; 311 307 312 - core->shmem->fetch_response(smbox->shmem, xfer); 308 + core->shmem->fetch_response(smbox->shmem, xfer, smbox->io_ops->fromio); 313 309 } 314 310 315 311 static void mailbox_fetch_notification(struct scmi_chan_info *cinfo, ··· 317 313 { 318 314 struct scmi_mailbox *smbox = cinfo->transport_info; 319 315 320 - core->shmem->fetch_notification(smbox->shmem, max_len, xfer); 316 + core->shmem->fetch_notification(smbox->shmem, max_len, xfer, 317 + smbox->io_ops->fromio); 321 318 } 322 319 323 320 static void mailbox_clear_channel(struct scmi_chan_info *cinfo)
+8 -3
drivers/firmware/arm_scmi/transports/optee.c
··· 114 114 * @req.shmem: Virtual base address of the shared memory 115 115 * @req.msg: Shared memory protocol handle for SCMI request and 116 116 * synchronous response 117 + * @io_ops: Transport specific I/O operations 117 118 * @tee_shm: TEE shared memory handle @req or NULL if using IOMEM shmem 118 119 * @link: Reference in agent's channel list 119 120 */ ··· 129 128 struct scmi_shared_mem __iomem *shmem; 130 129 struct scmi_msg_payld *msg; 131 130 } req; 131 + struct scmi_shmem_io_ops *io_ops; 132 132 struct tee_shm *tee_shm; 133 133 struct list_head link; 134 134 }; ··· 352 350 static int setup_static_shmem(struct device *dev, struct scmi_chan_info *cinfo, 353 351 struct scmi_optee_channel *channel) 354 352 { 355 - channel->req.shmem = core->shmem->setup_iomap(cinfo, dev, true, NULL); 353 + channel->req.shmem = core->shmem->setup_iomap(cinfo, dev, true, NULL, 354 + &channel->io_ops); 356 355 if (IS_ERR(channel->req.shmem)) 357 356 return PTR_ERR(channel->req.shmem); 358 357 ··· 468 465 ret = invoke_process_msg_channel(channel, 469 466 core->msg->command_size(xfer)); 470 467 } else { 471 - core->shmem->tx_prepare(channel->req.shmem, xfer, cinfo); 468 + core->shmem->tx_prepare(channel->req.shmem, xfer, cinfo, 469 + channel->io_ops->toio); 472 470 ret = invoke_process_smt_channel(channel); 473 471 } 474 472 ··· 488 484 core->msg->fetch_response(channel->req.msg, 489 485 channel->rx_len, xfer); 490 486 else 491 - core->shmem->fetch_response(channel->req.shmem, xfer); 487 + core->shmem->fetch_response(channel->req.shmem, xfer, 488 + channel->io_ops->fromio); 492 489 } 493 490 494 491 static void scmi_optee_mark_txdone(struct scmi_chan_info *cinfo, int ret,
+8 -3
drivers/firmware/arm_scmi/transports/smc.c
··· 45 45 * @irq: An optional IRQ for completion 46 46 * @cinfo: SCMI channel info 47 47 * @shmem: Transmit/Receive shared memory area 48 + * @io_ops: Transport specific I/O operations 48 49 * @shmem_lock: Lock to protect access to Tx/Rx shared memory area. 49 50 * Used when NOT operating in atomic mode. 50 51 * @inflight: Atomic flag to protect access to Tx/Rx shared memory area. ··· 61 60 int irq; 62 61 struct scmi_chan_info *cinfo; 63 62 struct scmi_shared_mem __iomem *shmem; 63 + struct scmi_shmem_io_ops *io_ops; 64 64 /* Protect access to shmem area */ 65 65 struct mutex shmem_lock; 66 66 #define INFLIGHT_NONE MSG_TOKEN_MAX ··· 146 144 if (!scmi_info) 147 145 return -ENOMEM; 148 146 149 - scmi_info->shmem = core->shmem->setup_iomap(cinfo, dev, tx, &res); 147 + scmi_info->shmem = core->shmem->setup_iomap(cinfo, dev, tx, &res, 148 + &scmi_info->io_ops); 150 149 if (IS_ERR(scmi_info->shmem)) 151 150 return PTR_ERR(scmi_info->shmem); 152 151 ··· 232 229 */ 233 230 smc_channel_lock_acquire(scmi_info, xfer); 234 231 235 - core->shmem->tx_prepare(scmi_info->shmem, xfer, cinfo); 232 + core->shmem->tx_prepare(scmi_info->shmem, xfer, cinfo, 233 + scmi_info->io_ops->toio); 236 234 237 235 if (scmi_info->cap_id != ULONG_MAX) 238 236 arm_smccc_1_1_invoke(scmi_info->func_id, scmi_info->cap_id, 0, ··· 257 253 { 258 254 struct scmi_smc *scmi_info = cinfo->transport_info; 259 255 260 - core->shmem->fetch_response(scmi_info->shmem, xfer); 256 + core->shmem->fetch_response(scmi_info->shmem, xfer, 257 + scmi_info->io_ops->fromio); 261 258 } 262 259 263 260 static void smc_mark_txdone(struct scmi_chan_info *cinfo, int ret,