Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'qcom-soc-for-4.4' of git://codeaurora.org/quic/kernel/agross-msm into next/drivers

Pull "Qualcomm ARM Based SoC Updates for 4.4" from Andy Gross:

* Implement id_table driver matching in SMD
* Avoid NULL pointer exception on remove of SMEM
* Reorder SMEM/SMD configs
* Make qcom_smem_get() return a pointer
* Handle big endian CPUs correctly in SMEM
* Represent SMD channel layout in structures
* Use __iowrite32_copy() in SMD
* Remove use of VLAIs in SMD
* Handle big endian CPUs correctly in SMD/RPM
* Handle big endian CPUs corretly in SMD
* Reject sending SMD packets that are too large
* Fix endianness issue in SCM __qcom_scm_is_call_available
* Add missing prototype for qcom_scm_is_available()
* Correct SMEM items for upper channels
* Use architecture level to build SCM correctly
* Delete unneeded of_node_put in SMD
* Correct active/slep state flagging in SMD/RPM
* Move RPM message ram out of SMEM DT node

* tag 'qcom-soc-for-4.4' of git://codeaurora.org/quic/kernel/agross-msm:
soc: qcom: smem: Move RPM message ram out of smem DT node
soc: qcom: smd-rpm: Correct the active vs sleep state flagging
soc: qcom: smd: delete unneeded of_node_put
firmware: qcom-scm: build for correct architecture level
soc: qcom: smd: Correct SMEM items for upper channels
qcom-scm: add missing prototype for qcom_scm_is_available()
qcom-scm: fix endianess issue in __qcom_scm_is_call_available
soc: qcom: smd: Reject send of too big packets
soc: qcom: smd: Handle big endian CPUs
soc: qcom: smd_rpm: Handle big endian CPUs
soc: qcom: smd: Remove use of VLAIS
soc: qcom: smd: Use __iowrite32_copy() instead of open-coding it
soc: qcom: smd: Represent channel layout in structures
soc: qcom: smem: Handle big endian CPUs
soc: qcom: Make qcom_smem_get() return a pointer
soc: qcom: Reorder SMEM/SMD configs
soc: qcom: smem: Avoid NULL pointer exception on remove
soc: qcom: smd: Implement id_table driver matching

+450 -334
+11 -6
arch/arm/boot/dts/qcom-msm8974.dtsi
··· 100 100 clock-frequency = <19200000>; 101 101 }; 102 102 103 + smem { 104 + compatible = "qcom,smem"; 105 + 106 + memory-region = <&smem_region>; 107 + qcom,rpm-msg-ram = <&rpm_msg_ram>; 108 + 109 + hwlocks = <&tcsr_mutex 3>; 110 + }; 111 + 103 112 soc: soc { 104 113 #address-cells = <1>; 105 114 #size-cells = <1>; ··· 259 250 #hwlock-cells = <1>; 260 251 }; 261 252 262 - smem@fa00000 { 263 - compatible = "qcom,smem"; 264 - 265 - memory-region = <&smem_region>; 253 + rpm_msg_ram: memory@fc428000 { 254 + compatible = "qcom,rpm-msg-ram"; 266 255 reg = <0xfc428000 0x4000>; 267 - 268 - hwlocks = <&tcsr_mutex 3>; 269 256 }; 270 257 271 258 blsp1_uart2: serial@f991e000 {
+1 -1
drivers/firmware/Makefile
··· 16 16 obj-$(CONFIG_QCOM_SCM) += qcom_scm.o 17 17 obj-$(CONFIG_QCOM_SCM_64) += qcom_scm-64.o 18 18 obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o 19 - CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1) 19 + CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch armv7-a\n.arch_extension sec,-DREQUIRES_SEC=1) -march=armv7-a 20 20 21 21 obj-y += broadcom/ 22 22 obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
+3 -3
drivers/firmware/qcom_scm-32.c
··· 480 480 int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id) 481 481 { 482 482 int ret; 483 - u32 svc_cmd = (svc_id << 10) | cmd_id; 484 - u32 ret_val = 0; 483 + __le32 svc_cmd = cpu_to_le32((svc_id << 10) | cmd_id); 484 + __le32 ret_val = 0; 485 485 486 486 ret = qcom_scm_call(QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD, &svc_cmd, 487 487 sizeof(svc_cmd), &ret_val, sizeof(ret_val)); 488 488 if (ret) 489 489 return ret; 490 490 491 - return ret_val; 491 + return le32_to_cpu(ret_val); 492 492 } 493 493 494 494 int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
+8 -8
drivers/soc/qcom/Kconfig
··· 19 19 modes. It interface with various system drivers to put the cores in 20 20 low power modes. 21 21 22 + config QCOM_SMEM 23 + tristate "Qualcomm Shared Memory Manager (SMEM)" 24 + depends on ARCH_QCOM 25 + help 26 + Say y here to enable support for the Qualcomm Shared Memory Manager. 27 + The driver provides an interface to items in a heap shared among all 28 + processors in a Qualcomm platform. 29 + 22 30 config QCOM_SMD 23 31 tristate "Qualcomm Shared Memory Driver (SMD)" 24 32 depends on QCOM_SMEM ··· 48 40 49 41 Say M here if you want to include support for the Qualcomm RPM as a 50 42 module. This will build a module called "qcom-smd-rpm". 51 - 52 - config QCOM_SMEM 53 - tristate "Qualcomm Shared Memory Manager (SMEM)" 54 - depends on ARCH_QCOM 55 - help 56 - Say y here to enable support for the Qualcomm Shared Memory Manager. 57 - The driver provides an interface to items in a heap shared among all 58 - processors in a Qualcomm platform.
+38 -30
drivers/soc/qcom/smd-rpm.c
··· 17 17 #include <linux/of_platform.h> 18 18 #include <linux/io.h> 19 19 #include <linux/interrupt.h> 20 + #include <linux/slab.h> 20 21 21 22 #include <linux/soc/qcom/smd.h> 22 23 #include <linux/soc/qcom/smd-rpm.h> ··· 45 44 * @length: length of the payload 46 45 */ 47 46 struct qcom_rpm_header { 48 - u32 service_type; 49 - u32 length; 47 + __le32 service_type; 48 + __le32 length; 50 49 }; 51 50 52 51 /** ··· 58 57 * @data_len: length of the payload following this header 59 58 */ 60 59 struct qcom_rpm_request { 61 - u32 msg_id; 62 - u32 flags; 63 - u32 type; 64 - u32 id; 65 - u32 data_len; 60 + __le32 msg_id; 61 + __le32 flags; 62 + __le32 type; 63 + __le32 id; 64 + __le32 data_len; 66 65 }; 67 66 68 67 /** ··· 75 74 * Multiple of these messages can be stacked in an rpm message. 76 75 */ 77 76 struct qcom_rpm_message { 78 - u32 msg_type; 79 - u32 length; 77 + __le32 msg_type; 78 + __le32 length; 80 79 union { 81 - u32 msg_id; 80 + __le32 msg_id; 82 81 u8 message[0]; 83 82 }; 84 83 }; ··· 105 104 static unsigned msg_id = 1; 106 105 int left; 107 106 int ret; 108 - 109 107 struct { 110 108 struct qcom_rpm_header hdr; 111 109 struct qcom_rpm_request req; 112 - u8 payload[count]; 113 - } pkt; 110 + u8 payload[]; 111 + } *pkt; 112 + size_t size = sizeof(*pkt) + count; 114 113 115 114 /* SMD packets to the RPM may not exceed 256 bytes */ 116 - if (WARN_ON(sizeof(pkt) >= 256)) 115 + if (WARN_ON(size >= 256)) 117 116 return -EINVAL; 117 + 118 + pkt = kmalloc(size, GFP_KERNEL); 119 + if (!pkt) 120 + return -ENOMEM; 118 121 119 122 mutex_lock(&rpm->lock); 120 123 121 - pkt.hdr.service_type = RPM_SERVICE_TYPE_REQUEST; 122 - pkt.hdr.length = sizeof(struct qcom_rpm_request) + count; 124 + pkt->hdr.service_type = cpu_to_le32(RPM_SERVICE_TYPE_REQUEST); 125 + pkt->hdr.length = cpu_to_le32(sizeof(struct qcom_rpm_request) + count); 123 126 124 - pkt.req.msg_id = msg_id++; 125 - pkt.req.flags = BIT(state); 126 - pkt.req.type = type; 127 - pkt.req.id = id; 128 - pkt.req.data_len = count; 129 - memcpy(pkt.payload, buf, count); 127 + pkt->req.msg_id = cpu_to_le32(msg_id++); 128 + pkt->req.flags = cpu_to_le32(state); 129 + pkt->req.type = cpu_to_le32(type); 130 + pkt->req.id = cpu_to_le32(id); 131 + pkt->req.data_len = cpu_to_le32(count); 132 + memcpy(pkt->payload, buf, count); 130 133 131 - ret = qcom_smd_send(rpm->rpm_channel, &pkt, sizeof(pkt)); 134 + ret = qcom_smd_send(rpm->rpm_channel, pkt, sizeof(*pkt)); 132 135 if (ret) 133 136 goto out; 134 137 ··· 143 138 ret = rpm->ack_status; 144 139 145 140 out: 141 + kfree(pkt); 146 142 mutex_unlock(&rpm->lock); 147 143 return ret; 148 144 } ··· 154 148 size_t count) 155 149 { 156 150 const struct qcom_rpm_header *hdr = data; 151 + size_t hdr_length = le32_to_cpu(hdr->length); 157 152 const struct qcom_rpm_message *msg; 158 153 struct qcom_smd_rpm *rpm = dev_get_drvdata(&qsdev->dev); 159 154 const u8 *buf = data + sizeof(struct qcom_rpm_header); 160 - const u8 *end = buf + hdr->length; 155 + const u8 *end = buf + hdr_length; 161 156 char msgbuf[32]; 162 157 int status = 0; 163 - u32 len; 158 + u32 len, msg_length; 164 159 165 - if (hdr->service_type != RPM_SERVICE_TYPE_REQUEST || 166 - hdr->length < sizeof(struct qcom_rpm_message)) { 160 + if (le32_to_cpu(hdr->service_type) != RPM_SERVICE_TYPE_REQUEST || 161 + hdr_length < sizeof(struct qcom_rpm_message)) { 167 162 dev_err(&qsdev->dev, "invalid request\n"); 168 163 return 0; 169 164 } 170 165 171 166 while (buf < end) { 172 167 msg = (struct qcom_rpm_message *)buf; 173 - switch (msg->msg_type) { 168 + msg_length = le32_to_cpu(msg->length); 169 + switch (le32_to_cpu(msg->msg_type)) { 174 170 case RPM_MSG_TYPE_MSG_ID: 175 171 break; 176 172 case RPM_MSG_TYPE_ERR: 177 - len = min_t(u32, ALIGN(msg->length, 4), sizeof(msgbuf)); 173 + len = min_t(u32, ALIGN(msg_length, 4), sizeof(msgbuf)); 178 174 memcpy_fromio(msgbuf, msg->message, len); 179 175 msgbuf[len - 1] = 0; 180 176 ··· 187 179 break; 188 180 } 189 181 190 - buf = PTR_ALIGN(buf + 2 * sizeof(u32) + msg->length, 4); 182 + buf = PTR_ALIGN(buf + 2 * sizeof(u32) + msg_length, 4); 191 183 } 192 184 193 185 rpm->ack_status = status;
+178 -114
drivers/soc/qcom/smd.c
··· 65 65 */ 66 66 67 67 struct smd_channel_info; 68 + struct smd_channel_info_pair; 68 69 struct smd_channel_info_word; 70 + struct smd_channel_info_word_pair; 69 71 70 72 #define SMD_ALLOC_TBL_COUNT 2 71 73 #define SMD_ALLOC_TBL_SIZE 64 ··· 87 85 .fifo_base_id = 338 88 86 }, 89 87 { 90 - .alloc_tbl_id = 14, 91 - .info_base_id = 266, 88 + .alloc_tbl_id = 266, 89 + .info_base_id = 138, 92 90 .fifo_base_id = 202, 93 91 }, 94 92 }; ··· 153 151 * @name: name of the channel 154 152 * @state: local state of the channel 155 153 * @remote_state: remote state of the channel 156 - * @tx_info: byte aligned outgoing channel info 157 - * @rx_info: byte aligned incoming channel info 158 - * @tx_info_word: word aligned outgoing channel info 159 - * @rx_info_word: word aligned incoming channel info 154 + * @info: byte aligned outgoing/incoming channel info 155 + * @info_word: word aligned outgoing/incoming channel info 160 156 * @tx_lock: lock to make writes to the channel mutually exclusive 161 157 * @fblockread_event: wakeup event tied to tx fBLOCKREADINTR 162 158 * @tx_fifo: pointer to the outgoing ring buffer ··· 175 175 enum smd_channel_state state; 176 176 enum smd_channel_state remote_state; 177 177 178 - struct smd_channel_info *tx_info; 179 - struct smd_channel_info *rx_info; 180 - 181 - struct smd_channel_info_word *tx_info_word; 182 - struct smd_channel_info_word *rx_info_word; 178 + struct smd_channel_info_pair *info; 179 + struct smd_channel_info_word_pair *info_word; 183 180 184 181 struct mutex tx_lock; 185 182 wait_queue_head_t fblockread_event; ··· 212 215 * Format of the smd_info smem items, for byte aligned channels. 213 216 */ 214 217 struct smd_channel_info { 215 - u32 state; 218 + __le32 state; 216 219 u8 fDSR; 217 220 u8 fCTS; 218 221 u8 fCD; ··· 221 224 u8 fTAIL; 222 225 u8 fSTATE; 223 226 u8 fBLOCKREADINTR; 224 - u32 tail; 225 - u32 head; 227 + __le32 tail; 228 + __le32 head; 229 + }; 230 + 231 + struct smd_channel_info_pair { 232 + struct smd_channel_info tx; 233 + struct smd_channel_info rx; 226 234 }; 227 235 228 236 /* 229 237 * Format of the smd_info smem items, for word aligned channels. 230 238 */ 231 239 struct smd_channel_info_word { 232 - u32 state; 233 - u32 fDSR; 234 - u32 fCTS; 235 - u32 fCD; 236 - u32 fRI; 237 - u32 fHEAD; 238 - u32 fTAIL; 239 - u32 fSTATE; 240 - u32 fBLOCKREADINTR; 241 - u32 tail; 242 - u32 head; 240 + __le32 state; 241 + __le32 fDSR; 242 + __le32 fCTS; 243 + __le32 fCD; 244 + __le32 fRI; 245 + __le32 fHEAD; 246 + __le32 fTAIL; 247 + __le32 fSTATE; 248 + __le32 fBLOCKREADINTR; 249 + __le32 tail; 250 + __le32 head; 243 251 }; 244 252 245 - #define GET_RX_CHANNEL_INFO(channel, param) \ 246 - (channel->rx_info_word ? \ 247 - channel->rx_info_word->param : \ 248 - channel->rx_info->param) 253 + struct smd_channel_info_word_pair { 254 + struct smd_channel_info_word tx; 255 + struct smd_channel_info_word rx; 256 + }; 249 257 250 - #define SET_RX_CHANNEL_INFO(channel, param, value) \ 251 - (channel->rx_info_word ? \ 252 - (channel->rx_info_word->param = value) : \ 253 - (channel->rx_info->param = value)) 258 + #define GET_RX_CHANNEL_FLAG(channel, param) \ 259 + ({ \ 260 + BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u8)); \ 261 + channel->info_word ? \ 262 + le32_to_cpu(channel->info_word->rx.param) : \ 263 + channel->info->rx.param; \ 264 + }) 254 265 255 - #define GET_TX_CHANNEL_INFO(channel, param) \ 256 - (channel->tx_info_word ? \ 257 - channel->tx_info_word->param : \ 258 - channel->tx_info->param) 266 + #define GET_RX_CHANNEL_INFO(channel, param) \ 267 + ({ \ 268 + BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u32)); \ 269 + le32_to_cpu(channel->info_word ? \ 270 + channel->info_word->rx.param : \ 271 + channel->info->rx.param); \ 272 + }) 259 273 260 - #define SET_TX_CHANNEL_INFO(channel, param, value) \ 261 - (channel->tx_info_word ? \ 262 - (channel->tx_info_word->param = value) : \ 263 - (channel->tx_info->param = value)) 274 + #define SET_RX_CHANNEL_FLAG(channel, param, value) \ 275 + ({ \ 276 + BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u8)); \ 277 + if (channel->info_word) \ 278 + channel->info_word->rx.param = cpu_to_le32(value); \ 279 + else \ 280 + channel->info->rx.param = value; \ 281 + }) 282 + 283 + #define SET_RX_CHANNEL_INFO(channel, param, value) \ 284 + ({ \ 285 + BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u32)); \ 286 + if (channel->info_word) \ 287 + channel->info_word->rx.param = cpu_to_le32(value); \ 288 + else \ 289 + channel->info->rx.param = cpu_to_le32(value); \ 290 + }) 291 + 292 + #define GET_TX_CHANNEL_FLAG(channel, param) \ 293 + ({ \ 294 + BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u8)); \ 295 + channel->info_word ? \ 296 + le32_to_cpu(channel->info_word->tx.param) : \ 297 + channel->info->tx.param; \ 298 + }) 299 + 300 + #define GET_TX_CHANNEL_INFO(channel, param) \ 301 + ({ \ 302 + BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u32)); \ 303 + le32_to_cpu(channel->info_word ? \ 304 + channel->info_word->tx.param : \ 305 + channel->info->tx.param); \ 306 + }) 307 + 308 + #define SET_TX_CHANNEL_FLAG(channel, param, value) \ 309 + ({ \ 310 + BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u8)); \ 311 + if (channel->info_word) \ 312 + channel->info_word->tx.param = cpu_to_le32(value); \ 313 + else \ 314 + channel->info->tx.param = value; \ 315 + }) 316 + 317 + #define SET_TX_CHANNEL_INFO(channel, param, value) \ 318 + ({ \ 319 + BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u32)); \ 320 + if (channel->info_word) \ 321 + channel->info_word->tx.param = cpu_to_le32(value); \ 322 + else \ 323 + channel->info->tx.param = cpu_to_le32(value); \ 324 + }) 264 325 265 326 /** 266 327 * struct qcom_smd_alloc_entry - channel allocation entry ··· 329 274 */ 330 275 struct qcom_smd_alloc_entry { 331 276 u8 name[20]; 332 - u32 cid; 333 - u32 flags; 334 - u32 ref_count; 277 + __le32 cid; 278 + __le32 flags; 279 + __le32 ref_count; 335 280 } __packed; 336 281 337 282 #define SMD_CHANNEL_FLAGS_EDGE_MASK 0xff ··· 360 305 static void qcom_smd_channel_reset(struct qcom_smd_channel *channel) 361 306 { 362 307 SET_TX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED); 363 - SET_TX_CHANNEL_INFO(channel, fDSR, 0); 364 - SET_TX_CHANNEL_INFO(channel, fCTS, 0); 365 - SET_TX_CHANNEL_INFO(channel, fCD, 0); 366 - SET_TX_CHANNEL_INFO(channel, fRI, 0); 367 - SET_TX_CHANNEL_INFO(channel, fHEAD, 0); 368 - SET_TX_CHANNEL_INFO(channel, fTAIL, 0); 369 - SET_TX_CHANNEL_INFO(channel, fSTATE, 1); 370 - SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 1); 308 + SET_TX_CHANNEL_FLAG(channel, fDSR, 0); 309 + SET_TX_CHANNEL_FLAG(channel, fCTS, 0); 310 + SET_TX_CHANNEL_FLAG(channel, fCD, 0); 311 + SET_TX_CHANNEL_FLAG(channel, fRI, 0); 312 + SET_TX_CHANNEL_FLAG(channel, fHEAD, 0); 313 + SET_TX_CHANNEL_FLAG(channel, fTAIL, 0); 314 + SET_TX_CHANNEL_FLAG(channel, fSTATE, 1); 315 + SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1); 371 316 SET_TX_CHANNEL_INFO(channel, head, 0); 372 317 SET_TX_CHANNEL_INFO(channel, tail, 0); 373 318 ··· 405 350 406 351 dev_dbg(edge->smd->dev, "set_state(%s, %d)\n", channel->name, state); 407 352 408 - SET_TX_CHANNEL_INFO(channel, fDSR, is_open); 409 - SET_TX_CHANNEL_INFO(channel, fCTS, is_open); 410 - SET_TX_CHANNEL_INFO(channel, fCD, is_open); 353 + SET_TX_CHANNEL_FLAG(channel, fDSR, is_open); 354 + SET_TX_CHANNEL_FLAG(channel, fCTS, is_open); 355 + SET_TX_CHANNEL_FLAG(channel, fCD, is_open); 411 356 412 357 SET_TX_CHANNEL_INFO(channel, state, state); 413 - SET_TX_CHANNEL_INFO(channel, fSTATE, 1); 358 + SET_TX_CHANNEL_FLAG(channel, fSTATE, 1); 414 359 415 360 channel->state = state; 416 361 qcom_smd_signal_channel(channel); ··· 419 364 /* 420 365 * Copy count bytes of data using 32bit accesses, if that's required. 421 366 */ 422 - static void smd_copy_to_fifo(void __iomem *_dst, 423 - const void *_src, 367 + static void smd_copy_to_fifo(void __iomem *dst, 368 + const void *src, 424 369 size_t count, 425 370 bool word_aligned) 426 371 { 427 - u32 *dst = (u32 *)_dst; 428 - u32 *src = (u32 *)_src; 429 - 430 372 if (word_aligned) { 431 - count /= sizeof(u32); 432 - while (count--) 433 - writel_relaxed(*src++, dst++); 373 + __iowrite32_copy(dst, src, count / sizeof(u32)); 434 374 } else { 435 - memcpy_toio(_dst, _src, count); 375 + memcpy_toio(dst, src, count); 436 376 } 437 377 } 438 378 ··· 445 395 if (word_aligned) { 446 396 count /= sizeof(u32); 447 397 while (count--) 448 - *dst++ = readl_relaxed(src++); 398 + *dst++ = __raw_readl(src++); 449 399 } else { 450 400 memcpy_fromio(_dst, _src, count); 451 401 } ··· 462 412 unsigned tail; 463 413 size_t len; 464 414 465 - word_aligned = channel->rx_info_word != NULL; 415 + word_aligned = channel->info_word; 466 416 tail = GET_RX_CHANNEL_INFO(channel, tail); 467 417 468 418 len = min_t(size_t, count, channel->fifo_size - tail); ··· 541 491 { 542 492 bool need_state_scan = false; 543 493 int remote_state; 544 - u32 pktlen; 494 + __le32 pktlen; 545 495 int avail; 546 496 int ret; 547 497 ··· 552 502 need_state_scan = true; 553 503 } 554 504 /* Indicate that we have seen any state change */ 555 - SET_RX_CHANNEL_INFO(channel, fSTATE, 0); 505 + SET_RX_CHANNEL_FLAG(channel, fSTATE, 0); 556 506 557 507 /* Signal waiting qcom_smd_send() about the interrupt */ 558 - if (!GET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR)) 508 + if (!GET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR)) 559 509 wake_up_interruptible(&channel->fblockread_event); 560 510 561 511 /* Don't consume any data until we've opened the channel */ ··· 563 513 goto out; 564 514 565 515 /* Indicate that we've seen the new data */ 566 - SET_RX_CHANNEL_INFO(channel, fHEAD, 0); 516 + SET_RX_CHANNEL_FLAG(channel, fHEAD, 0); 567 517 568 518 /* Consume data */ 569 519 for (;;) { ··· 572 522 if (!channel->pkt_size && avail >= SMD_PACKET_HEADER_LEN) { 573 523 qcom_smd_channel_peek(channel, &pktlen, sizeof(pktlen)); 574 524 qcom_smd_channel_advance(channel, SMD_PACKET_HEADER_LEN); 575 - channel->pkt_size = pktlen; 525 + channel->pkt_size = le32_to_cpu(pktlen); 576 526 } else if (channel->pkt_size && avail >= channel->pkt_size) { 577 527 ret = qcom_smd_channel_recv_single(channel); 578 528 if (ret) ··· 583 533 } 584 534 585 535 /* Indicate that we have seen and updated tail */ 586 - SET_RX_CHANNEL_INFO(channel, fTAIL, 1); 536 + SET_RX_CHANNEL_FLAG(channel, fTAIL, 1); 587 537 588 538 /* Signal the remote that we've consumed the data (if requested) */ 589 - if (!GET_RX_CHANNEL_INFO(channel, fBLOCKREADINTR)) { 539 + if (!GET_RX_CHANNEL_FLAG(channel, fBLOCKREADINTR)) { 590 540 /* Ensure ordering of channel info updates */ 591 541 wmb(); 592 542 ··· 677 627 unsigned head; 678 628 size_t len; 679 629 680 - word_aligned = channel->tx_info_word != NULL; 630 + word_aligned = channel->info_word; 681 631 head = GET_TX_CHANNEL_INFO(channel, head); 682 632 683 633 len = min_t(size_t, count, channel->fifo_size - head); ··· 715 665 */ 716 666 int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len) 717 667 { 718 - u32 hdr[5] = {len,}; 668 + __le32 hdr[5] = { cpu_to_le32(len), }; 719 669 int tlen = sizeof(hdr) + len; 720 670 int ret; 721 671 722 672 /* Word aligned channels only accept word size aligned data */ 723 - if (channel->rx_info_word != NULL && len % 4) 673 + if (channel->info_word && len % 4) 674 + return -EINVAL; 675 + 676 + /* Reject packets that are too big */ 677 + if (tlen >= channel->fifo_size) 724 678 return -EINVAL; 725 679 726 680 ret = mutex_lock_interruptible(&channel->tx_lock); ··· 737 683 goto out; 738 684 } 739 685 740 - SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 0); 686 + SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 0); 741 687 742 688 ret = wait_event_interruptible(channel->fblockread_event, 743 689 qcom_smd_get_tx_avail(channel) >= tlen || ··· 745 691 if (ret) 746 692 goto out; 747 693 748 - SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 1); 694 + SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1); 749 695 } 750 696 751 - SET_TX_CHANNEL_INFO(channel, fTAIL, 0); 697 + SET_TX_CHANNEL_FLAG(channel, fTAIL, 0); 752 698 753 699 qcom_smd_write_fifo(channel, hdr, sizeof(hdr)); 754 700 qcom_smd_write_fifo(channel, data, len); 755 701 756 - SET_TX_CHANNEL_INFO(channel, fHEAD, 1); 702 + SET_TX_CHANNEL_FLAG(channel, fHEAD, 1); 757 703 758 704 /* Ensure ordering of channel info updates */ 759 705 wmb(); ··· 781 727 782 728 static int qcom_smd_dev_match(struct device *dev, struct device_driver *drv) 783 729 { 730 + struct qcom_smd_device *qsdev = to_smd_device(dev); 731 + struct qcom_smd_driver *qsdrv = container_of(drv, struct qcom_smd_driver, driver); 732 + const struct qcom_smd_id *match = qsdrv->smd_match_table; 733 + const char *name = qsdev->channel->name; 734 + 735 + if (match) { 736 + while (match->name[0]) { 737 + if (!strcmp(match->name, name)) 738 + return 1; 739 + match++; 740 + } 741 + } 742 + 784 743 return of_driver_match_device(dev, drv); 785 744 } 786 745 ··· 921 854 for_each_available_child_of_node(edge_node, child) { 922 855 key = "qcom,smd-channels"; 923 856 ret = of_property_read_string(child, key, &name); 924 - if (ret) { 925 - of_node_put(child); 857 + if (ret) 926 858 continue; 927 - } 928 859 929 860 if (strcmp(name, channel) == 0) 930 861 return child; ··· 945 880 if (channel->qsdev) 946 881 return -EEXIST; 947 882 948 - node = qcom_smd_match_channel(edge->of_node, channel->name); 949 - if (!node) { 950 - dev_dbg(smd->dev, "no match for '%s'\n", channel->name); 951 - return -ENXIO; 952 - } 953 - 954 883 dev_dbg(smd->dev, "registering '%s'\n", channel->name); 955 884 956 885 qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL); 957 886 if (!qsdev) 958 887 return -ENOMEM; 959 888 960 - dev_set_name(&qsdev->dev, "%s.%s", edge->of_node->name, node->name); 889 + node = qcom_smd_match_channel(edge->of_node, channel->name); 890 + dev_set_name(&qsdev->dev, "%s.%s", 891 + edge->of_node->name, 892 + node ? node->name : channel->name); 893 + 961 894 qsdev->dev.parent = smd->dev; 962 895 qsdev->dev.bus = &qcom_smd_bus; 963 896 qsdev->dev.release = qcom_smd_release_device; ··· 1041 978 spin_lock_init(&channel->recv_lock); 1042 979 init_waitqueue_head(&channel->fblockread_event); 1043 980 1044 - ret = qcom_smem_get(edge->remote_pid, smem_info_item, (void **)&info, 1045 - &info_size); 1046 - if (ret) 981 + info = qcom_smem_get(edge->remote_pid, smem_info_item, &info_size); 982 + if (IS_ERR(info)) { 983 + ret = PTR_ERR(info); 1047 984 goto free_name_and_channel; 985 + } 1048 986 1049 987 /* 1050 988 * Use the size of the item to figure out which channel info struct to 1051 989 * use. 1052 990 */ 1053 991 if (info_size == 2 * sizeof(struct smd_channel_info_word)) { 1054 - channel->tx_info_word = info; 1055 - channel->rx_info_word = info + sizeof(struct smd_channel_info_word); 992 + channel->info_word = info; 1056 993 } else if (info_size == 2 * sizeof(struct smd_channel_info)) { 1057 - channel->tx_info = info; 1058 - channel->rx_info = info + sizeof(struct smd_channel_info); 994 + channel->info = info; 1059 995 } else { 1060 996 dev_err(smd->dev, 1061 997 "channel info of size %zu not supported\n", info_size); ··· 1062 1000 goto free_name_and_channel; 1063 1001 } 1064 1002 1065 - ret = qcom_smem_get(edge->remote_pid, smem_fifo_item, &fifo_base, 1066 - &fifo_size); 1067 - if (ret) 1003 + fifo_base = qcom_smem_get(edge->remote_pid, smem_fifo_item, &fifo_size); 1004 + if (IS_ERR(fifo_base)) { 1005 + ret = PTR_ERR(fifo_base); 1068 1006 goto free_name_and_channel; 1007 + } 1069 1008 1070 1009 /* The channel consist of a rx and tx fifo of equal size */ 1071 1010 fifo_size /= 2; ··· 1103 1040 unsigned long flags; 1104 1041 unsigned fifo_id; 1105 1042 unsigned info_id; 1106 - int ret; 1107 1043 int tbl; 1108 1044 int i; 1045 + u32 eflags, cid; 1109 1046 1110 1047 for (tbl = 0; tbl < SMD_ALLOC_TBL_COUNT; tbl++) { 1111 - ret = qcom_smem_get(edge->remote_pid, 1112 - smem_items[tbl].alloc_tbl_id, 1113 - (void **)&alloc_tbl, 1114 - NULL); 1115 - if (ret < 0) 1048 + alloc_tbl = qcom_smem_get(edge->remote_pid, 1049 + smem_items[tbl].alloc_tbl_id, NULL); 1050 + if (IS_ERR(alloc_tbl)) 1116 1051 continue; 1117 1052 1118 1053 for (i = 0; i < SMD_ALLOC_TBL_SIZE; i++) { 1119 1054 entry = &alloc_tbl[i]; 1055 + eflags = le32_to_cpu(entry->flags); 1120 1056 if (test_bit(i, edge->allocated[tbl])) 1121 1057 continue; 1122 1058 ··· 1125 1063 if (!entry->name[0]) 1126 1064 continue; 1127 1065 1128 - if (!(entry->flags & SMD_CHANNEL_FLAGS_PACKET)) 1066 + if (!(eflags & SMD_CHANNEL_FLAGS_PACKET)) 1129 1067 continue; 1130 1068 1131 - if ((entry->flags & SMD_CHANNEL_FLAGS_EDGE_MASK) != edge->edge_id) 1069 + if ((eflags & SMD_CHANNEL_FLAGS_EDGE_MASK) != edge->edge_id) 1132 1070 continue; 1133 1071 1134 - info_id = smem_items[tbl].info_base_id + entry->cid; 1135 - fifo_id = smem_items[tbl].fifo_base_id + entry->cid; 1072 + cid = le32_to_cpu(entry->cid); 1073 + info_id = smem_items[tbl].info_base_id + cid; 1074 + fifo_id = smem_items[tbl].fifo_base_id + cid; 1136 1075 1137 1076 channel = qcom_smd_create_channel(edge, info_id, fifo_id, entry->name); 1138 1077 if (IS_ERR(channel)) ··· 1290 1227 int num_edges; 1291 1228 int ret; 1292 1229 int i = 0; 1230 + void *p; 1293 1231 1294 1232 /* Wait for smem */ 1295 - ret = qcom_smem_get(QCOM_SMEM_HOST_ANY, smem_items[0].alloc_tbl_id, NULL, NULL); 1296 - if (ret == -EPROBE_DEFER) 1297 - return ret; 1233 + p = qcom_smem_get(QCOM_SMEM_HOST_ANY, smem_items[0].alloc_tbl_id, NULL); 1234 + if (PTR_ERR(p) == -EPROBE_DEFER) 1235 + return PTR_ERR(p); 1298 1236 1299 1237 num_edges = of_get_available_child_count(pdev->dev.of_node); 1300 1238 array_size = sizeof(*smd) + num_edges * sizeof(struct qcom_smd_edge);
+197 -171
drivers/soc/qcom/smem.c
··· 92 92 * @params: parameters to the command 93 93 */ 94 94 struct smem_proc_comm { 95 - u32 command; 96 - u32 status; 97 - u32 params[2]; 95 + __le32 command; 96 + __le32 status; 97 + __le32 params[2]; 98 98 }; 99 99 100 100 /** ··· 106 106 * the default region. bits 0,1 are reserved 107 107 */ 108 108 struct smem_global_entry { 109 - u32 allocated; 110 - u32 offset; 111 - u32 size; 112 - u32 aux_base; /* bits 1:0 reserved */ 109 + __le32 allocated; 110 + __le32 offset; 111 + __le32 size; 112 + __le32 aux_base; /* bits 1:0 reserved */ 113 113 }; 114 114 #define AUX_BASE_MASK 0xfffffffc 115 115 ··· 125 125 */ 126 126 struct smem_header { 127 127 struct smem_proc_comm proc_comm[4]; 128 - u32 version[32]; 129 - u32 initialized; 130 - u32 free_offset; 131 - u32 available; 132 - u32 reserved; 128 + __le32 version[32]; 129 + __le32 initialized; 130 + __le32 free_offset; 131 + __le32 available; 132 + __le32 reserved; 133 133 struct smem_global_entry toc[SMEM_ITEM_COUNT]; 134 134 }; 135 135 ··· 143 143 * @reserved: reserved entries for later use 144 144 */ 145 145 struct smem_ptable_entry { 146 - u32 offset; 147 - u32 size; 148 - u32 flags; 149 - u16 host0; 150 - u16 host1; 151 - u32 reserved[8]; 146 + __le32 offset; 147 + __le32 size; 148 + __le32 flags; 149 + __le16 host0; 150 + __le16 host1; 151 + __le32 reserved[8]; 152 152 }; 153 153 154 154 /** ··· 160 160 * @entry: list of @smem_ptable_entry for the @num_entries partitions 161 161 */ 162 162 struct smem_ptable { 163 - u32 magic; 164 - u32 version; 165 - u32 num_entries; 166 - u32 reserved[5]; 163 + u8 magic[4]; 164 + __le32 version; 165 + __le32 num_entries; 166 + __le32 reserved[5]; 167 167 struct smem_ptable_entry entry[]; 168 168 }; 169 - #define SMEM_PTABLE_MAGIC 0x434f5424 /* "$TOC" */ 169 + 170 + static const u8 SMEM_PTABLE_MAGIC[] = { 0x24, 0x54, 0x4f, 0x43 }; /* "$TOC" */ 170 171 171 172 /** 172 173 * struct smem_partition_header - header of the partitions ··· 182 181 * @reserved: for now reserved entries 183 182 */ 184 183 struct smem_partition_header { 185 - u32 magic; 186 - u16 host0; 187 - u16 host1; 188 - u32 size; 189 - u32 offset_free_uncached; 190 - u32 offset_free_cached; 191 - u32 reserved[3]; 184 + u8 magic[4]; 185 + __le16 host0; 186 + __le16 host1; 187 + __le32 size; 188 + __le32 offset_free_uncached; 189 + __le32 offset_free_cached; 190 + __le32 reserved[3]; 192 191 }; 193 - #define SMEM_PART_MAGIC 0x54525024 /* "$PRT" */ 192 + 193 + static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 }; 194 194 195 195 /** 196 196 * struct smem_private_entry - header of each item in the private partition ··· 203 201 * @reserved: for now reserved entry 204 202 */ 205 203 struct smem_private_entry { 206 - u16 canary; 207 - u16 item; 208 - u32 size; /* includes padding bytes */ 209 - u16 padding_data; 210 - u16 padding_hdr; 211 - u32 reserved; 204 + u16 canary; /* bytes are the same so no swapping needed */ 205 + __le16 item; 206 + __le32 size; /* includes padding bytes */ 207 + __le16 padding_data; 208 + __le16 padding_hdr; 209 + __le32 reserved; 212 210 }; 213 211 #define SMEM_PRIVATE_CANARY 0xa5a5 214 212 ··· 244 242 struct smem_region regions[0]; 245 243 }; 246 244 245 + static struct smem_private_entry * 246 + phdr_to_last_private_entry(struct smem_partition_header *phdr) 247 + { 248 + void *p = phdr; 249 + 250 + return p + le32_to_cpu(phdr->offset_free_uncached); 251 + } 252 + 253 + static void *phdr_to_first_cached_entry(struct smem_partition_header *phdr) 254 + { 255 + void *p = phdr; 256 + 257 + return p + le32_to_cpu(phdr->offset_free_cached); 258 + } 259 + 260 + static struct smem_private_entry * 261 + phdr_to_first_private_entry(struct smem_partition_header *phdr) 262 + { 263 + void *p = phdr; 264 + 265 + return p + sizeof(*phdr); 266 + } 267 + 268 + static struct smem_private_entry * 269 + private_entry_next(struct smem_private_entry *e) 270 + { 271 + void *p = e; 272 + 273 + return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) + 274 + le32_to_cpu(e->size); 275 + } 276 + 277 + static void *entry_to_item(struct smem_private_entry *e) 278 + { 279 + void *p = e; 280 + 281 + return p + sizeof(*e) + le16_to_cpu(e->padding_hdr); 282 + } 283 + 247 284 /* Pointer to the one and only smem handle */ 248 285 static struct qcom_smem *__smem; 249 286 ··· 295 254 size_t size) 296 255 { 297 256 struct smem_partition_header *phdr; 298 - struct smem_private_entry *hdr; 257 + struct smem_private_entry *hdr, *end; 299 258 size_t alloc_size; 300 - void *p; 259 + void *cached; 301 260 302 261 phdr = smem->partitions[host]; 262 + hdr = phdr_to_first_private_entry(phdr); 263 + end = phdr_to_last_private_entry(phdr); 264 + cached = phdr_to_first_cached_entry(phdr); 303 265 304 - p = (void *)phdr + sizeof(*phdr); 305 - while (p < (void *)phdr + phdr->offset_free_uncached) { 306 - hdr = p; 307 - 266 + while (hdr < end) { 308 267 if (hdr->canary != SMEM_PRIVATE_CANARY) { 309 268 dev_err(smem->dev, 310 269 "Found invalid canary in host %d partition\n", ··· 312 271 return -EINVAL; 313 272 } 314 273 315 - if (hdr->item == item) 274 + if (le16_to_cpu(hdr->item) == item) 316 275 return -EEXIST; 317 276 318 - p += sizeof(*hdr) + hdr->padding_hdr + hdr->size; 277 + hdr = private_entry_next(hdr); 319 278 } 320 279 321 280 /* Check that we don't grow into the cached region */ 322 281 alloc_size = sizeof(*hdr) + ALIGN(size, 8); 323 - if (p + alloc_size >= (void *)phdr + phdr->offset_free_cached) { 282 + if ((void *)hdr + alloc_size >= cached) { 324 283 dev_err(smem->dev, "Out of memory\n"); 325 284 return -ENOSPC; 326 285 } 327 286 328 - hdr = p; 329 287 hdr->canary = SMEM_PRIVATE_CANARY; 330 - hdr->item = item; 331 - hdr->size = ALIGN(size, 8); 332 - hdr->padding_data = hdr->size - size; 288 + hdr->item = cpu_to_le16(item); 289 + hdr->size = cpu_to_le32(ALIGN(size, 8)); 290 + hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size); 333 291 hdr->padding_hdr = 0; 334 292 335 293 /* ··· 337 297 * gets a consistent view of the linked list. 338 298 */ 339 299 wmb(); 340 - phdr->offset_free_uncached += alloc_size; 300 + le32_add_cpu(&phdr->offset_free_uncached, alloc_size); 341 301 342 302 return 0; 343 303 } ··· 358 318 return -EEXIST; 359 319 360 320 size = ALIGN(size, 8); 361 - if (WARN_ON(size > header->available)) 321 + if (WARN_ON(size > le32_to_cpu(header->available))) 362 322 return -ENOMEM; 363 323 364 324 entry->offset = header->free_offset; 365 - entry->size = size; 325 + entry->size = cpu_to_le32(size); 366 326 367 327 /* 368 328 * Ensure the header is consistent before we mark the item allocated, ··· 370 330 * even though they do not take the spinlock on read. 371 331 */ 372 332 wmb(); 373 - entry->allocated = 1; 333 + entry->allocated = cpu_to_le32(1); 374 334 375 - header->free_offset += size; 376 - header->available -= size; 335 + le32_add_cpu(&header->free_offset, size); 336 + le32_add_cpu(&header->available, -size); 377 337 378 338 return 0; 379 339 } ··· 418 378 } 419 379 EXPORT_SYMBOL(qcom_smem_alloc); 420 380 421 - static int qcom_smem_get_global(struct qcom_smem *smem, 422 - unsigned item, 423 - void **ptr, 424 - size_t *size) 381 + static void *qcom_smem_get_global(struct qcom_smem *smem, 382 + unsigned item, 383 + size_t *size) 425 384 { 426 385 struct smem_header *header; 427 386 struct smem_region *area; ··· 429 390 unsigned i; 430 391 431 392 if (WARN_ON(item >= SMEM_ITEM_COUNT)) 432 - return -EINVAL; 393 + return ERR_PTR(-EINVAL); 433 394 434 395 header = smem->regions[0].virt_base; 435 396 entry = &header->toc[item]; 436 397 if (!entry->allocated) 437 - return -ENXIO; 398 + return ERR_PTR(-ENXIO); 438 399 439 - if (ptr != NULL) { 440 - aux_base = entry->aux_base & AUX_BASE_MASK; 400 + aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK; 441 401 442 - for (i = 0; i < smem->num_regions; i++) { 443 - area = &smem->regions[i]; 402 + for (i = 0; i < smem->num_regions; i++) { 403 + area = &smem->regions[i]; 444 404 445 - if (area->aux_base == aux_base || !aux_base) { 446 - *ptr = area->virt_base + entry->offset; 447 - break; 448 - } 405 + if (area->aux_base == aux_base || !aux_base) { 406 + if (size != NULL) 407 + *size = le32_to_cpu(entry->size); 408 + return area->virt_base + le32_to_cpu(entry->offset); 449 409 } 450 410 } 451 - if (size != NULL) 452 - *size = entry->size; 453 411 454 - return 0; 412 + return ERR_PTR(-ENOENT); 455 413 } 456 414 457 - static int qcom_smem_get_private(struct qcom_smem *smem, 458 - unsigned host, 459 - unsigned item, 460 - void **ptr, 461 - size_t *size) 415 + static void *qcom_smem_get_private(struct qcom_smem *smem, 416 + unsigned host, 417 + unsigned item, 418 + size_t *size) 462 419 { 463 420 struct smem_partition_header *phdr; 464 - struct smem_private_entry *hdr; 465 - void *p; 421 + struct smem_private_entry *e, *end; 466 422 467 423 phdr = smem->partitions[host]; 424 + e = phdr_to_first_private_entry(phdr); 425 + end = phdr_to_last_private_entry(phdr); 468 426 469 - p = (void *)phdr + sizeof(*phdr); 470 - while (p < (void *)phdr + phdr->offset_free_uncached) { 471 - hdr = p; 472 - 473 - if (hdr->canary != SMEM_PRIVATE_CANARY) { 427 + while (e < end) { 428 + if (e->canary != SMEM_PRIVATE_CANARY) { 474 429 dev_err(smem->dev, 475 430 "Found invalid canary in host %d partition\n", 476 431 host); 477 - return -EINVAL; 432 + return ERR_PTR(-EINVAL); 478 433 } 479 434 480 - if (hdr->item == item) { 481 - if (ptr != NULL) 482 - *ptr = p + sizeof(*hdr) + hdr->padding_hdr; 483 - 435 + if (le16_to_cpu(e->item) == item) { 484 436 if (size != NULL) 485 - *size = hdr->size - hdr->padding_data; 437 + *size = le32_to_cpu(e->size) - 438 + le16_to_cpu(e->padding_data); 486 439 487 - return 0; 440 + return entry_to_item(e); 488 441 } 489 442 490 - p += sizeof(*hdr) + hdr->padding_hdr + hdr->size; 443 + e = private_entry_next(e); 491 444 } 492 445 493 - return -ENOENT; 446 + return ERR_PTR(-ENOENT); 494 447 } 495 448 496 449 /** 497 450 * qcom_smem_get() - resolve ptr of size of a smem item 498 451 * @host: the remote processor, or -1 499 452 * @item: smem item handle 500 - * @ptr: pointer to be filled out with address of the item 501 453 * @size: pointer to be filled out with size of the item 502 454 * 503 - * Looks up pointer and size of a smem item. 455 + * Looks up smem item and returns pointer to it. Size of smem 456 + * item is returned in @size. 504 457 */ 505 - int qcom_smem_get(unsigned host, unsigned item, void **ptr, size_t *size) 458 + void *qcom_smem_get(unsigned host, unsigned item, size_t *size) 506 459 { 507 460 unsigned long flags; 508 461 int ret; 462 + void *ptr = ERR_PTR(-EPROBE_DEFER); 509 463 510 464 if (!__smem) 511 - return -EPROBE_DEFER; 465 + return ptr; 512 466 513 467 ret = hwspin_lock_timeout_irqsave(__smem->hwlock, 514 468 HWSPINLOCK_TIMEOUT, 515 469 &flags); 516 470 if (ret) 517 - return ret; 471 + return ERR_PTR(ret); 518 472 519 473 if (host < SMEM_HOST_COUNT && __smem->partitions[host]) 520 - ret = qcom_smem_get_private(__smem, host, item, ptr, size); 474 + ptr = qcom_smem_get_private(__smem, host, item, size); 521 475 else 522 - ret = qcom_smem_get_global(__smem, item, ptr, size); 476 + ptr = qcom_smem_get_global(__smem, item, size); 523 477 524 478 hwspin_unlock_irqrestore(__smem->hwlock, &flags); 525 - return ret; 479 + 480 + return ptr; 526 481 527 482 } 528 483 EXPORT_SYMBOL(qcom_smem_get); ··· 539 506 540 507 if (host < SMEM_HOST_COUNT && __smem->partitions[host]) { 541 508 phdr = __smem->partitions[host]; 542 - ret = phdr->offset_free_cached - phdr->offset_free_uncached; 509 + ret = le32_to_cpu(phdr->offset_free_cached) - 510 + le32_to_cpu(phdr->offset_free_uncached); 543 511 } else { 544 512 header = __smem->regions[0].virt_base; 545 - ret = header->available; 513 + ret = le32_to_cpu(header->available); 546 514 } 547 515 548 516 return ret; ··· 552 518 553 519 static int qcom_smem_get_sbl_version(struct qcom_smem *smem) 554 520 { 555 - unsigned *versions; 521 + __le32 *versions; 556 522 size_t size; 557 - int ret; 558 523 559 - ret = qcom_smem_get_global(smem, SMEM_ITEM_VERSION, 560 - (void **)&versions, &size); 561 - if (ret < 0) { 524 + versions = qcom_smem_get_global(smem, SMEM_ITEM_VERSION, &size); 525 + if (IS_ERR(versions)) { 562 526 dev_err(smem->dev, "Unable to read the version item\n"); 563 527 return -ENOENT; 564 528 } ··· 566 534 return -EINVAL; 567 535 } 568 536 569 - return versions[SMEM_MASTER_SBL_VERSION_INDEX]; 537 + return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]); 570 538 } 571 539 572 540 static int qcom_smem_enumerate_partitions(struct qcom_smem *smem, ··· 576 544 struct smem_ptable_entry *entry; 577 545 struct smem_ptable *ptable; 578 546 unsigned remote_host; 547 + u32 version, host0, host1; 579 548 int i; 580 549 581 550 ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K; 582 - if (ptable->magic != SMEM_PTABLE_MAGIC) 551 + if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic))) 583 552 return 0; 584 553 585 - if (ptable->version != 1) { 554 + version = le32_to_cpu(ptable->version); 555 + if (version != 1) { 586 556 dev_err(smem->dev, 587 - "Unsupported partition header version %d\n", 588 - ptable->version); 557 + "Unsupported partition header version %d\n", version); 589 558 return -EINVAL; 590 559 } 591 560 592 - for (i = 0; i < ptable->num_entries; i++) { 561 + for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) { 593 562 entry = &ptable->entry[i]; 563 + host0 = le16_to_cpu(entry->host0); 564 + host1 = le16_to_cpu(entry->host1); 594 565 595 - if (entry->host0 != local_host && entry->host1 != local_host) 566 + if (host0 != local_host && host1 != local_host) 596 567 continue; 597 568 598 - if (!entry->offset) 569 + if (!le32_to_cpu(entry->offset)) 599 570 continue; 600 571 601 - if (!entry->size) 572 + if (!le32_to_cpu(entry->size)) 602 573 continue; 603 574 604 - if (entry->host0 == local_host) 605 - remote_host = entry->host1; 575 + if (host0 == local_host) 576 + remote_host = host1; 606 577 else 607 - remote_host = entry->host0; 578 + remote_host = host0; 608 579 609 580 if (remote_host >= SMEM_HOST_COUNT) { 610 581 dev_err(smem->dev, ··· 623 588 return -EINVAL; 624 589 } 625 590 626 - header = smem->regions[0].virt_base + entry->offset; 591 + header = smem->regions[0].virt_base + le32_to_cpu(entry->offset); 592 + host0 = le16_to_cpu(header->host0); 593 + host1 = le16_to_cpu(header->host1); 627 594 628 - if (header->magic != SMEM_PART_MAGIC) { 595 + if (memcmp(header->magic, SMEM_PART_MAGIC, 596 + sizeof(header->magic))) { 629 597 dev_err(smem->dev, 630 598 "Partition %d has invalid magic\n", i); 631 599 return -EINVAL; 632 600 } 633 601 634 - if (header->host0 != local_host && header->host1 != local_host) { 602 + if (host0 != local_host && host1 != local_host) { 635 603 dev_err(smem->dev, 636 604 "Partition %d hosts are invalid\n", i); 637 605 return -EINVAL; 638 606 } 639 607 640 - if (header->host0 != remote_host && header->host1 != remote_host) { 608 + if (host0 != remote_host && host1 != remote_host) { 641 609 dev_err(smem->dev, 642 610 "Partition %d hosts are invalid\n", i); 643 611 return -EINVAL; ··· 652 614 return -EINVAL; 653 615 } 654 616 655 - if (header->offset_free_uncached > header->size) { 617 + if (le32_to_cpu(header->offset_free_uncached) > le32_to_cpu(header->size)) { 656 618 dev_err(smem->dev, 657 619 "Partition %d has invalid free pointer\n", i); 658 620 return -EINVAL; ··· 664 626 return 0; 665 627 } 666 628 667 - static int qcom_smem_count_mem_regions(struct platform_device *pdev) 629 + static int qcom_smem_map_memory(struct qcom_smem *smem, struct device *dev, 630 + const char *name, int i) 668 631 { 669 - struct resource *res; 670 - int num_regions = 0; 671 - int i; 632 + struct device_node *np; 633 + struct resource r; 634 + int ret; 672 635 673 - for (i = 0; i < pdev->num_resources; i++) { 674 - res = &pdev->resource[i]; 675 - 676 - if (resource_type(res) == IORESOURCE_MEM) 677 - num_regions++; 636 + np = of_parse_phandle(dev->of_node, name, 0); 637 + if (!np) { 638 + dev_err(dev, "No %s specified\n", name); 639 + return -EINVAL; 678 640 } 679 641 680 - return num_regions; 642 + ret = of_address_to_resource(np, 0, &r); 643 + of_node_put(np); 644 + if (ret) 645 + return ret; 646 + 647 + smem->regions[i].aux_base = (u32)r.start; 648 + smem->regions[i].size = resource_size(&r); 649 + smem->regions[i].virt_base = devm_ioremap_nocache(dev, r.start, 650 + resource_size(&r)); 651 + if (!smem->regions[i].virt_base) 652 + return -ENOMEM; 653 + 654 + return 0; 681 655 } 682 656 683 657 static int qcom_smem_probe(struct platform_device *pdev) 684 658 { 685 659 struct smem_header *header; 686 - struct device_node *np; 687 660 struct qcom_smem *smem; 688 - struct resource *res; 689 - struct resource r; 690 661 size_t array_size; 691 - int num_regions = 0; 662 + int num_regions; 692 663 int hwlock_id; 693 664 u32 version; 694 665 int ret; 695 - int i; 696 666 697 - num_regions = qcom_smem_count_mem_regions(pdev) + 1; 667 + num_regions = 1; 668 + if (of_find_property(pdev->dev.of_node, "qcom,rpm-msg-ram", NULL)) 669 + num_regions++; 698 670 699 671 array_size = num_regions * sizeof(struct smem_region); 700 672 smem = devm_kzalloc(&pdev->dev, sizeof(*smem) + array_size, GFP_KERNEL); ··· 714 666 smem->dev = &pdev->dev; 715 667 smem->num_regions = num_regions; 716 668 717 - np = of_parse_phandle(pdev->dev.of_node, "memory-region", 0); 718 - if (!np) { 719 - dev_err(&pdev->dev, "No memory-region specified\n"); 720 - return -EINVAL; 721 - } 722 - 723 - ret = of_address_to_resource(np, 0, &r); 724 - of_node_put(np); 669 + ret = qcom_smem_map_memory(smem, &pdev->dev, "memory-region", 0); 725 670 if (ret) 726 671 return ret; 727 672 728 - smem->regions[0].aux_base = (u32)r.start; 729 - smem->regions[0].size = resource_size(&r); 730 - smem->regions[0].virt_base = devm_ioremap_nocache(&pdev->dev, 731 - r.start, 732 - resource_size(&r)); 733 - if (!smem->regions[0].virt_base) 734 - return -ENOMEM; 735 - 736 - for (i = 1; i < num_regions; i++) { 737 - res = platform_get_resource(pdev, IORESOURCE_MEM, i - 1); 738 - 739 - smem->regions[i].aux_base = (u32)res->start; 740 - smem->regions[i].size = resource_size(res); 741 - smem->regions[i].virt_base = devm_ioremap_nocache(&pdev->dev, 742 - res->start, 743 - resource_size(res)); 744 - if (!smem->regions[i].virt_base) 745 - return -ENOMEM; 746 - } 673 + if (num_regions > 1 && (ret = qcom_smem_map_memory(smem, &pdev->dev, 674 + "qcom,rpm-msg-ram", 1))) 675 + return ret; 747 676 748 677 header = smem->regions[0].virt_base; 749 - if (header->initialized != 1 || header->reserved) { 678 + if (le32_to_cpu(header->initialized) != 1 || 679 + le32_to_cpu(header->reserved)) { 750 680 dev_err(&pdev->dev, "SMEM is not initialized by SBL\n"); 751 681 return -EINVAL; 752 682 } ··· 756 730 757 731 static int qcom_smem_remove(struct platform_device *pdev) 758 732 { 759 - __smem = NULL; 760 733 hwspin_lock_free(__smem->hwlock); 734 + __smem = NULL; 761 735 762 736 return 0; 763 737 }
+2
include/linux/qcom_scm.h
··· 23 23 u32 val; 24 24 }; 25 25 26 + extern bool qcom_scm_is_available(void); 27 + 26 28 extern bool qcom_scm_hdcp_available(void); 27 29 extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, 28 30 u32 *resp);
+11
include/linux/soc/qcom/smd.h
··· 9 9 struct qcom_smd_lookup; 10 10 11 11 /** 12 + * struct qcom_smd_id - struct used for matching a smd device 13 + * @name: name of the channel 14 + */ 15 + struct qcom_smd_id { 16 + char name[20]; 17 + }; 18 + 19 + /** 12 20 * struct qcom_smd_device - smd device struct 13 21 * @dev: the device struct 14 22 * @channel: handle to the smd channel for this device ··· 29 21 /** 30 22 * struct qcom_smd_driver - smd driver struct 31 23 * @driver: underlying device driver 24 + * @smd_match_table: static channel match table 32 25 * @probe: invoked when the smd channel is found 33 26 * @remove: invoked when the smd channel is closed 34 27 * @callback: invoked when an inbound message is received on the channel, ··· 38 29 */ 39 30 struct qcom_smd_driver { 40 31 struct device_driver driver; 32 + const struct qcom_smd_id *smd_match_table; 33 + 41 34 int (*probe)(struct qcom_smd_device *dev); 42 35 void (*remove)(struct qcom_smd_device *dev); 43 36 int (*callback)(struct qcom_smd_device *, const void *, size_t);
+1 -1
include/linux/soc/qcom/smem.h
··· 4 4 #define QCOM_SMEM_HOST_ANY -1 5 5 6 6 int qcom_smem_alloc(unsigned host, unsigned item, size_t size); 7 - int qcom_smem_get(unsigned host, unsigned item, void **ptr, size_t *size); 7 + void *qcom_smem_get(unsigned host, unsigned item, size_t *size); 8 8 9 9 int qcom_smem_get_free_space(unsigned host); 10 10