Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

slimbus: ngd: Add qcom SLIMBus NGD driver

This patch adds suppor to Qualcomm SLIMBus Non-Generic Device (NGD)
controller driver.
This is light-weight SLIMBus controller driver responsible for
communicating with slave HW directly over the bus using messaging
interface, and communicating with master component residing on ADSP
for bandwidth and data-channel management

Based on intial work from
Karthikeyan Ramasubramanian <kramasub@codeaurora.org> and
Sagar Dharia <sdharia@codeaurora.org>

Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
Tested-by: Craig Tatlor <ctatlor97@gmail.com>
Reviewed-by: Vinod Koul <vkoul@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Srinivas Kandagatla and committed by
Greg Kroah-Hartman
917809e2 992d3615

+1403
+11
drivers/slimbus/Kconfig
··· 20 20 Select driver if Qualcomm's SLIMbus Manager Component is 21 21 programmed using Linux kernel. 22 22 23 + config SLIM_QCOM_NGD_CTRL 24 + tristate "Qualcomm SLIMbus Satellite Non-Generic Device Component" 25 + depends on QCOM_QMI_HELPERS 26 + depends on HAS_IOMEM && DMA_ENGINE 27 + help 28 + Select driver if Qualcomm's SLIMbus Satellite Non-Generic Device 29 + Component is programmed using Linux kernel. 30 + This is light-weight slimbus controller driver responsible for 31 + communicating with slave HW directly over the bus using messaging 32 + interface, and communicating with master component residing on ADSP 33 + for bandwidth and data-channel management. 23 34 endif
+3
drivers/slimbus/Makefile
··· 8 8 #Controllers 9 9 obj-$(CONFIG_SLIM_QCOM_CTRL) += slim-qcom-ctrl.o 10 10 slim-qcom-ctrl-y := qcom-ctrl.o 11 + 12 + obj-$(CONFIG_SLIM_QCOM_NGD_CTRL) += slim-qcom-ngd-ctrl.o 13 + slim-qcom-ngd-ctrl-y := qcom-ngd-ctrl.o
+1381
drivers/slimbus/qcom-ngd-ctrl.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. 3 + // Copyright (c) 2018, Linaro Limited 4 + 5 + #include <linux/irq.h> 6 + #include <linux/kernel.h> 7 + #include <linux/init.h> 8 + #include <linux/slab.h> 9 + #include <linux/interrupt.h> 10 + #include <linux/platform_device.h> 11 + #include <linux/dma-mapping.h> 12 + #include <linux/dmaengine.h> 13 + #include <linux/slimbus.h> 14 + #include <linux/delay.h> 15 + #include <linux/pm_runtime.h> 16 + #include <linux/of.h> 17 + #include <linux/io.h> 18 + #include <linux/soc/qcom/qmi.h> 19 + #include <net/sock.h> 20 + #include "slimbus.h" 21 + 22 + /* NGD (Non-ported Generic Device) registers */ 23 + #define NGD_CFG 0x0 24 + #define NGD_CFG_ENABLE BIT(0) 25 + #define NGD_CFG_RX_MSGQ_EN BIT(1) 26 + #define NGD_CFG_TX_MSGQ_EN BIT(2) 27 + #define NGD_STATUS 0x4 28 + #define NGD_LADDR BIT(1) 29 + #define NGD_RX_MSGQ_CFG 0x8 30 + #define NGD_INT_EN 0x10 31 + #define NGD_INT_RECFG_DONE BIT(24) 32 + #define NGD_INT_TX_NACKED_2 BIT(25) 33 + #define NGD_INT_MSG_BUF_CONTE BIT(26) 34 + #define NGD_INT_MSG_TX_INVAL BIT(27) 35 + #define NGD_INT_IE_VE_CHG BIT(28) 36 + #define NGD_INT_DEV_ERR BIT(29) 37 + #define NGD_INT_RX_MSG_RCVD BIT(30) 38 + #define NGD_INT_TX_MSG_SENT BIT(31) 39 + #define NGD_INT_STAT 0x14 40 + #define NGD_INT_CLR 0x18 41 + #define DEF_NGD_INT_MASK (NGD_INT_TX_NACKED_2 | NGD_INT_MSG_BUF_CONTE | \ 42 + NGD_INT_MSG_TX_INVAL | NGD_INT_IE_VE_CHG | \ 43 + NGD_INT_DEV_ERR | NGD_INT_TX_MSG_SENT | \ 44 + NGD_INT_RX_MSG_RCVD) 45 + 46 + /* Slimbus QMI service */ 47 + #define SLIMBUS_QMI_SVC_ID 0x0301 48 + #define SLIMBUS_QMI_SVC_V1 1 49 + #define SLIMBUS_QMI_INS_ID 0 50 + #define SLIMBUS_QMI_SELECT_INSTANCE_REQ_V01 0x0020 51 + #define SLIMBUS_QMI_SELECT_INSTANCE_RESP_V01 0x0020 52 + #define SLIMBUS_QMI_POWER_REQ_V01 0x0021 53 + #define SLIMBUS_QMI_POWER_RESP_V01 0x0021 54 + #define SLIMBUS_QMI_CHECK_FRAMER_STATUS_REQ 0x0022 55 + #define SLIMBUS_QMI_CHECK_FRAMER_STATUS_RESP 0x0022 56 + #define SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN 14 57 + #define SLIMBUS_QMI_POWER_RESP_MAX_MSG_LEN 7 58 + #define SLIMBUS_QMI_SELECT_INSTANCE_REQ_MAX_MSG_LEN 14 59 + #define SLIMBUS_QMI_SELECT_INSTANCE_RESP_MAX_MSG_LEN 7 60 + #define SLIMBUS_QMI_CHECK_FRAMER_STAT_RESP_MAX_MSG_LEN 7 61 + /* QMI response timeout of 500ms */ 62 + #define SLIMBUS_QMI_RESP_TOUT 1000 63 + 64 + /* User defined commands */ 65 + #define SLIM_USR_MC_GENERIC_ACK 0x25 66 + #define SLIM_USR_MC_MASTER_CAPABILITY 0x0 67 + #define SLIM_USR_MC_REPORT_SATELLITE 0x1 68 + #define SLIM_USR_MC_ADDR_QUERY 0xD 69 + #define SLIM_USR_MC_ADDR_REPLY 0xE 70 + #define SLIM_USR_MC_DEFINE_CHAN 0x20 71 + #define SLIM_USR_MC_DEF_ACT_CHAN 0x21 72 + #define SLIM_USR_MC_CHAN_CTRL 0x23 73 + #define SLIM_USR_MC_RECONFIG_NOW 0x24 74 + #define SLIM_USR_MC_REQ_BW 0x28 75 + #define SLIM_USR_MC_CONNECT_SRC 0x2C 76 + #define SLIM_USR_MC_CONNECT_SINK 0x2D 77 + #define SLIM_USR_MC_DISCONNECT_PORT 0x2E 78 + #define SLIM_USR_MC_REPEAT_CHANGE_VALUE 0x0 79 + 80 + #define QCOM_SLIM_NGD_AUTOSUSPEND MSEC_PER_SEC 81 + #define SLIM_RX_MSGQ_TIMEOUT_VAL 0x10000 82 + 83 + #define SLIM_LA_MGR 0xFF 84 + #define SLIM_ROOT_FREQ 24576000 85 + #define LADDR_RETRY 5 86 + 87 + /* Per spec.max 40 bytes per received message */ 88 + #define SLIM_MSGQ_BUF_LEN 40 89 + #define QCOM_SLIM_NGD_DESC_NUM 32 90 + 91 + #define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \ 92 + ((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16)) 93 + 94 + #define INIT_MX_RETRIES 10 95 + #define DEF_RETRY_MS 10 96 + #define SAT_MAGIC_LSB 0xD9 97 + #define SAT_MAGIC_MSB 0xC5 98 + #define SAT_MSG_VER 0x1 99 + #define SAT_MSG_PROT 0x1 100 + #define to_ngd(d) container_of(d, struct qcom_slim_ngd, dev) 101 + 102 + struct ngd_reg_offset_data { 103 + u32 offset, size; 104 + }; 105 + 106 + static const struct ngd_reg_offset_data ngd_v1_5_offset_info = { 107 + .offset = 0x1000, 108 + .size = 0x1000, 109 + }; 110 + 111 + enum qcom_slim_ngd_state { 112 + QCOM_SLIM_NGD_CTRL_AWAKE, 113 + QCOM_SLIM_NGD_CTRL_IDLE, 114 + QCOM_SLIM_NGD_CTRL_ASLEEP, 115 + QCOM_SLIM_NGD_CTRL_DOWN, 116 + }; 117 + 118 + struct qcom_slim_ngd_qmi { 119 + struct qmi_handle qmi; 120 + struct sockaddr_qrtr svc_info; 121 + struct qmi_handle svc_event_hdl; 122 + struct qmi_response_type_v01 resp; 123 + struct qmi_handle *handle; 124 + struct completion qmi_comp; 125 + }; 126 + 127 + struct qcom_slim_ngd_ctrl; 128 + struct qcom_slim_ngd; 129 + 130 + struct qcom_slim_ngd_dma_desc { 131 + struct dma_async_tx_descriptor *desc; 132 + struct qcom_slim_ngd_ctrl *ctrl; 133 + struct completion *comp; 134 + dma_cookie_t cookie; 135 + dma_addr_t phys; 136 + void *base; 137 + }; 138 + 139 + struct qcom_slim_ngd { 140 + struct platform_device *pdev; 141 + void __iomem *base; 142 + int id; 143 + }; 144 + 145 + struct qcom_slim_ngd_ctrl { 146 + struct slim_framer framer; 147 + struct slim_controller ctrl; 148 + struct qcom_slim_ngd_qmi qmi; 149 + struct qcom_slim_ngd *ngd; 150 + struct device *dev; 151 + void __iomem *base; 152 + struct dma_chan *dma_rx_channel; 153 + struct dma_chan *dma_tx_channel; 154 + struct qcom_slim_ngd_dma_desc rx_desc[QCOM_SLIM_NGD_DESC_NUM]; 155 + struct qcom_slim_ngd_dma_desc txdesc[QCOM_SLIM_NGD_DESC_NUM]; 156 + struct completion reconf; 157 + struct work_struct m_work; 158 + struct workqueue_struct *mwq; 159 + spinlock_t tx_buf_lock; 160 + enum qcom_slim_ngd_state state; 161 + dma_addr_t rx_phys_base; 162 + dma_addr_t tx_phys_base; 163 + void *rx_base; 164 + void *tx_base; 165 + int tx_tail; 166 + int tx_head; 167 + u32 ver; 168 + }; 169 + 170 + enum slimbus_mode_enum_type_v01 { 171 + /* To force a 32 bit signed enum. Do not change or use*/ 172 + SLIMBUS_MODE_ENUM_TYPE_MIN_ENUM_VAL_V01 = INT_MIN, 173 + SLIMBUS_MODE_SATELLITE_V01 = 1, 174 + SLIMBUS_MODE_MASTER_V01 = 2, 175 + SLIMBUS_MODE_ENUM_TYPE_MAX_ENUM_VAL_V01 = INT_MAX, 176 + }; 177 + 178 + enum slimbus_pm_enum_type_v01 { 179 + /* To force a 32 bit signed enum. Do not change or use*/ 180 + SLIMBUS_PM_ENUM_TYPE_MIN_ENUM_VAL_V01 = INT_MIN, 181 + SLIMBUS_PM_INACTIVE_V01 = 1, 182 + SLIMBUS_PM_ACTIVE_V01 = 2, 183 + SLIMBUS_PM_ENUM_TYPE_MAX_ENUM_VAL_V01 = INT_MAX, 184 + }; 185 + 186 + enum slimbus_resp_enum_type_v01 { 187 + SLIMBUS_RESP_ENUM_TYPE_MIN_VAL_V01 = INT_MIN, 188 + SLIMBUS_RESP_SYNCHRONOUS_V01 = 1, 189 + SLIMBUS_RESP_ENUM_TYPE_MAX_VAL_V01 = INT_MAX, 190 + }; 191 + 192 + struct slimbus_select_inst_req_msg_v01 { 193 + uint32_t instance; 194 + uint8_t mode_valid; 195 + enum slimbus_mode_enum_type_v01 mode; 196 + }; 197 + 198 + struct slimbus_select_inst_resp_msg_v01 { 199 + struct qmi_response_type_v01 resp; 200 + }; 201 + 202 + struct slimbus_power_req_msg_v01 { 203 + enum slimbus_pm_enum_type_v01 pm_req; 204 + uint8_t resp_type_valid; 205 + enum slimbus_resp_enum_type_v01 resp_type; 206 + }; 207 + 208 + struct slimbus_power_resp_msg_v01 { 209 + struct qmi_response_type_v01 resp; 210 + }; 211 + 212 + static struct qmi_elem_info slimbus_select_inst_req_msg_v01_ei[] = { 213 + { 214 + .data_type = QMI_UNSIGNED_4_BYTE, 215 + .elem_len = 1, 216 + .elem_size = sizeof(uint32_t), 217 + .array_type = NO_ARRAY, 218 + .tlv_type = 0x01, 219 + .offset = offsetof(struct slimbus_select_inst_req_msg_v01, 220 + instance), 221 + .ei_array = NULL, 222 + }, 223 + { 224 + .data_type = QMI_OPT_FLAG, 225 + .elem_len = 1, 226 + .elem_size = sizeof(uint8_t), 227 + .array_type = NO_ARRAY, 228 + .tlv_type = 0x10, 229 + .offset = offsetof(struct slimbus_select_inst_req_msg_v01, 230 + mode_valid), 231 + .ei_array = NULL, 232 + }, 233 + { 234 + .data_type = QMI_UNSIGNED_4_BYTE, 235 + .elem_len = 1, 236 + .elem_size = sizeof(enum slimbus_mode_enum_type_v01), 237 + .array_type = NO_ARRAY, 238 + .tlv_type = 0x10, 239 + .offset = offsetof(struct slimbus_select_inst_req_msg_v01, 240 + mode), 241 + .ei_array = NULL, 242 + }, 243 + { 244 + .data_type = QMI_EOTI, 245 + .elem_len = 0, 246 + .elem_size = 0, 247 + .array_type = NO_ARRAY, 248 + .tlv_type = 0x00, 249 + .offset = 0, 250 + .ei_array = NULL, 251 + }, 252 + }; 253 + 254 + static struct qmi_elem_info slimbus_select_inst_resp_msg_v01_ei[] = { 255 + { 256 + .data_type = QMI_STRUCT, 257 + .elem_len = 1, 258 + .elem_size = sizeof(struct qmi_response_type_v01), 259 + .array_type = NO_ARRAY, 260 + .tlv_type = 0x02, 261 + .offset = offsetof(struct slimbus_select_inst_resp_msg_v01, 262 + resp), 263 + .ei_array = qmi_response_type_v01_ei, 264 + }, 265 + { 266 + .data_type = QMI_EOTI, 267 + .elem_len = 0, 268 + .elem_size = 0, 269 + .array_type = NO_ARRAY, 270 + .tlv_type = 0x00, 271 + .offset = 0, 272 + .ei_array = NULL, 273 + }, 274 + }; 275 + 276 + static struct qmi_elem_info slimbus_power_req_msg_v01_ei[] = { 277 + { 278 + .data_type = QMI_UNSIGNED_4_BYTE, 279 + .elem_len = 1, 280 + .elem_size = sizeof(enum slimbus_pm_enum_type_v01), 281 + .array_type = NO_ARRAY, 282 + .tlv_type = 0x01, 283 + .offset = offsetof(struct slimbus_power_req_msg_v01, 284 + pm_req), 285 + .ei_array = NULL, 286 + }, 287 + { 288 + .data_type = QMI_OPT_FLAG, 289 + .elem_len = 1, 290 + .elem_size = sizeof(uint8_t), 291 + .array_type = NO_ARRAY, 292 + .tlv_type = 0x10, 293 + .offset = offsetof(struct slimbus_power_req_msg_v01, 294 + resp_type_valid), 295 + }, 296 + { 297 + .data_type = QMI_SIGNED_4_BYTE_ENUM, 298 + .elem_len = 1, 299 + .elem_size = sizeof(enum slimbus_resp_enum_type_v01), 300 + .array_type = NO_ARRAY, 301 + .tlv_type = 0x10, 302 + .offset = offsetof(struct slimbus_power_req_msg_v01, 303 + resp_type), 304 + }, 305 + { 306 + .data_type = QMI_EOTI, 307 + .elem_len = 0, 308 + .elem_size = 0, 309 + .array_type = NO_ARRAY, 310 + .tlv_type = 0x00, 311 + .offset = 0, 312 + .ei_array = NULL, 313 + }, 314 + }; 315 + 316 + static struct qmi_elem_info slimbus_power_resp_msg_v01_ei[] = { 317 + { 318 + .data_type = QMI_STRUCT, 319 + .elem_len = 1, 320 + .elem_size = sizeof(struct qmi_response_type_v01), 321 + .array_type = NO_ARRAY, 322 + .tlv_type = 0x02, 323 + .offset = offsetof(struct slimbus_power_resp_msg_v01, resp), 324 + .ei_array = qmi_response_type_v01_ei, 325 + }, 326 + { 327 + .data_type = QMI_EOTI, 328 + .elem_len = 0, 329 + .elem_size = 0, 330 + .array_type = NO_ARRAY, 331 + .tlv_type = 0x00, 332 + .offset = 0, 333 + .ei_array = NULL, 334 + }, 335 + }; 336 + 337 + static int qcom_slim_qmi_send_select_inst_req(struct qcom_slim_ngd_ctrl *ctrl, 338 + struct slimbus_select_inst_req_msg_v01 *req) 339 + { 340 + struct slimbus_select_inst_resp_msg_v01 resp = { { 0, 0 } }; 341 + struct qmi_txn txn; 342 + int rc; 343 + 344 + rc = qmi_txn_init(ctrl->qmi.handle, &txn, 345 + slimbus_select_inst_resp_msg_v01_ei, &resp); 346 + if (rc < 0) { 347 + dev_err(ctrl->dev, "QMI TXN init fail: %d\n", rc); 348 + return rc; 349 + } 350 + 351 + rc = qmi_send_request(ctrl->qmi.handle, NULL, &txn, 352 + SLIMBUS_QMI_SELECT_INSTANCE_REQ_V01, 353 + SLIMBUS_QMI_SELECT_INSTANCE_REQ_MAX_MSG_LEN, 354 + slimbus_select_inst_req_msg_v01_ei, req); 355 + if (rc < 0) { 356 + dev_err(ctrl->dev, "QMI send req fail %d\n", rc); 357 + qmi_txn_cancel(&txn); 358 + return rc; 359 + } 360 + 361 + rc = qmi_txn_wait(&txn, SLIMBUS_QMI_RESP_TOUT); 362 + if (rc < 0) { 363 + dev_err(ctrl->dev, "QMI TXN wait fail: %d\n", rc); 364 + return rc; 365 + } 366 + /* Check the response */ 367 + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { 368 + dev_err(ctrl->dev, "QMI request failed 0x%x\n", 369 + resp.resp.result); 370 + return -EREMOTEIO; 371 + } 372 + 373 + return 0; 374 + } 375 + 376 + static void qcom_slim_qmi_power_resp_cb(struct qmi_handle *handle, 377 + struct sockaddr_qrtr *sq, 378 + struct qmi_txn *txn, const void *data) 379 + { 380 + struct slimbus_power_resp_msg_v01 *resp; 381 + 382 + resp = (struct slimbus_power_resp_msg_v01 *)data; 383 + if (resp->resp.result != QMI_RESULT_SUCCESS_V01) 384 + pr_err("QMI power request failed 0x%x\n", 385 + resp->resp.result); 386 + 387 + complete(&txn->completion); 388 + } 389 + 390 + static int qcom_slim_qmi_send_power_request(struct qcom_slim_ngd_ctrl *ctrl, 391 + struct slimbus_power_req_msg_v01 *req) 392 + { 393 + struct slimbus_power_resp_msg_v01 resp = { { 0, 0 } }; 394 + struct qmi_txn txn; 395 + int rc; 396 + 397 + rc = qmi_txn_init(ctrl->qmi.handle, &txn, 398 + slimbus_power_resp_msg_v01_ei, &resp); 399 + 400 + rc = qmi_send_request(ctrl->qmi.handle, NULL, &txn, 401 + SLIMBUS_QMI_POWER_REQ_V01, 402 + SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN, 403 + slimbus_power_req_msg_v01_ei, req); 404 + if (rc < 0) { 405 + dev_err(ctrl->dev, "QMI send req fail %d\n", rc); 406 + qmi_txn_cancel(&txn); 407 + return rc; 408 + } 409 + 410 + rc = qmi_txn_wait(&txn, SLIMBUS_QMI_RESP_TOUT); 411 + if (rc < 0) { 412 + dev_err(ctrl->dev, "QMI TXN wait fail: %d\n", rc); 413 + return rc; 414 + } 415 + 416 + /* Check the response */ 417 + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { 418 + dev_err(ctrl->dev, "QMI request failed 0x%x\n", 419 + resp.resp.result); 420 + return -EREMOTEIO; 421 + } 422 + 423 + return 0; 424 + } 425 + 426 + static struct qmi_msg_handler qcom_slim_qmi_msg_handlers[] = { 427 + { 428 + .type = QMI_RESPONSE, 429 + .msg_id = SLIMBUS_QMI_POWER_RESP_V01, 430 + .ei = slimbus_power_resp_msg_v01_ei, 431 + .decoded_size = sizeof(struct slimbus_power_resp_msg_v01), 432 + .fn = qcom_slim_qmi_power_resp_cb, 433 + }, 434 + {} 435 + }; 436 + 437 + static int qcom_slim_qmi_init(struct qcom_slim_ngd_ctrl *ctrl, 438 + bool apps_is_master) 439 + { 440 + struct slimbus_select_inst_req_msg_v01 req; 441 + struct qmi_handle *handle; 442 + int rc; 443 + 444 + handle = devm_kzalloc(ctrl->dev, sizeof(*handle), GFP_KERNEL); 445 + if (!handle) 446 + return -ENOMEM; 447 + 448 + rc = qmi_handle_init(handle, SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN, 449 + NULL, qcom_slim_qmi_msg_handlers); 450 + if (rc < 0) { 451 + dev_err(ctrl->dev, "QMI client init failed: %d\n", rc); 452 + goto qmi_handle_init_failed; 453 + } 454 + 455 + rc = kernel_connect(handle->sock, 456 + (struct sockaddr *)&ctrl->qmi.svc_info, 457 + sizeof(ctrl->qmi.svc_info), 0); 458 + if (rc < 0) { 459 + dev_err(ctrl->dev, "Remote Service connect failed: %d\n", rc); 460 + goto qmi_connect_to_service_failed; 461 + } 462 + 463 + /* Instance is 0 based */ 464 + req.instance = (ctrl->ngd->id >> 1); 465 + req.mode_valid = 1; 466 + 467 + /* Mode indicates the role of the ADSP */ 468 + if (apps_is_master) 469 + req.mode = SLIMBUS_MODE_SATELLITE_V01; 470 + else 471 + req.mode = SLIMBUS_MODE_MASTER_V01; 472 + 473 + ctrl->qmi.handle = handle; 474 + 475 + rc = qcom_slim_qmi_send_select_inst_req(ctrl, &req); 476 + if (rc) { 477 + dev_err(ctrl->dev, "failed to select h/w instance\n"); 478 + goto qmi_select_instance_failed; 479 + } 480 + 481 + return 0; 482 + 483 + qmi_select_instance_failed: 484 + ctrl->qmi.handle = NULL; 485 + qmi_connect_to_service_failed: 486 + qmi_handle_release(handle); 487 + qmi_handle_init_failed: 488 + devm_kfree(ctrl->dev, handle); 489 + return rc; 490 + } 491 + 492 + static void qcom_slim_qmi_exit(struct qcom_slim_ngd_ctrl *ctrl) 493 + { 494 + if (!ctrl->qmi.handle) 495 + return; 496 + 497 + qmi_handle_release(ctrl->qmi.handle); 498 + devm_kfree(ctrl->dev, ctrl->qmi.handle); 499 + ctrl->qmi.handle = NULL; 500 + } 501 + 502 + static int qcom_slim_qmi_power_request(struct qcom_slim_ngd_ctrl *ctrl, 503 + bool active) 504 + { 505 + struct slimbus_power_req_msg_v01 req; 506 + 507 + if (active) 508 + req.pm_req = SLIMBUS_PM_ACTIVE_V01; 509 + else 510 + req.pm_req = SLIMBUS_PM_INACTIVE_V01; 511 + 512 + req.resp_type_valid = 0; 513 + 514 + return qcom_slim_qmi_send_power_request(ctrl, &req); 515 + } 516 + 517 + static u32 *qcom_slim_ngd_tx_msg_get(struct qcom_slim_ngd_ctrl *ctrl, int len, 518 + struct completion *comp) 519 + { 520 + struct qcom_slim_ngd_dma_desc *desc; 521 + unsigned long flags; 522 + 523 + spin_lock_irqsave(&ctrl->tx_buf_lock, flags); 524 + 525 + if ((ctrl->tx_tail + 1) % QCOM_SLIM_NGD_DESC_NUM == ctrl->tx_head) { 526 + spin_unlock_irqrestore(&ctrl->tx_buf_lock, flags); 527 + return NULL; 528 + } 529 + desc = &ctrl->txdesc[ctrl->tx_tail]; 530 + desc->base = ctrl->tx_base + ctrl->tx_tail * SLIM_MSGQ_BUF_LEN; 531 + desc->comp = comp; 532 + ctrl->tx_tail = (ctrl->tx_tail + 1) % QCOM_SLIM_NGD_DESC_NUM; 533 + 534 + spin_unlock_irqrestore(&ctrl->tx_buf_lock, flags); 535 + 536 + return desc->base; 537 + } 538 + 539 + static void qcom_slim_ngd_tx_msg_dma_cb(void *args) 540 + { 541 + struct qcom_slim_ngd_dma_desc *desc = args; 542 + struct qcom_slim_ngd_ctrl *ctrl = desc->ctrl; 543 + unsigned long flags; 544 + 545 + spin_lock_irqsave(&ctrl->tx_buf_lock, flags); 546 + 547 + if (desc->comp) { 548 + complete(desc->comp); 549 + desc->comp = NULL; 550 + } 551 + 552 + ctrl->tx_head = (ctrl->tx_head + 1) % QCOM_SLIM_NGD_DESC_NUM; 553 + spin_unlock_irqrestore(&ctrl->tx_buf_lock, flags); 554 + } 555 + 556 + static int qcom_slim_ngd_tx_msg_post(struct qcom_slim_ngd_ctrl *ctrl, 557 + void *buf, int len) 558 + { 559 + struct qcom_slim_ngd_dma_desc *desc; 560 + unsigned long flags; 561 + int index, offset; 562 + 563 + spin_lock_irqsave(&ctrl->tx_buf_lock, flags); 564 + offset = buf - ctrl->tx_base; 565 + index = offset/SLIM_MSGQ_BUF_LEN; 566 + 567 + desc = &ctrl->txdesc[index]; 568 + desc->phys = ctrl->tx_phys_base + offset; 569 + desc->base = ctrl->tx_base + offset; 570 + desc->ctrl = ctrl; 571 + len = (len + 3) & 0xfc; 572 + 573 + desc->desc = dmaengine_prep_slave_single(ctrl->dma_tx_channel, 574 + desc->phys, len, 575 + DMA_MEM_TO_DEV, 576 + DMA_PREP_INTERRUPT); 577 + if (!desc->desc) { 578 + dev_err(ctrl->dev, "unable to prepare channel\n"); 579 + spin_unlock_irqrestore(&ctrl->tx_buf_lock, flags); 580 + return -EINVAL; 581 + } 582 + 583 + desc->desc->callback = qcom_slim_ngd_tx_msg_dma_cb; 584 + desc->desc->callback_param = desc; 585 + desc->desc->cookie = dmaengine_submit(desc->desc); 586 + dma_async_issue_pending(ctrl->dma_tx_channel); 587 + spin_unlock_irqrestore(&ctrl->tx_buf_lock, flags); 588 + 589 + return 0; 590 + } 591 + 592 + static void qcom_slim_ngd_rx(struct qcom_slim_ngd_ctrl *ctrl, u8 *buf) 593 + { 594 + u8 mc, mt, len; 595 + 596 + mt = SLIM_HEADER_GET_MT(buf[0]); 597 + len = SLIM_HEADER_GET_RL(buf[0]); 598 + mc = SLIM_HEADER_GET_MC(buf[1]); 599 + 600 + if (mc == SLIM_USR_MC_MASTER_CAPABILITY && 601 + mt == SLIM_MSG_MT_SRC_REFERRED_USER) 602 + queue_work(ctrl->mwq, &ctrl->m_work); 603 + 604 + if (mc == SLIM_MSG_MC_REPLY_INFORMATION || 605 + mc == SLIM_MSG_MC_REPLY_VALUE || (mc == SLIM_USR_MC_ADDR_REPLY && 606 + mt == SLIM_MSG_MT_SRC_REFERRED_USER)) { 607 + slim_msg_response(&ctrl->ctrl, &buf[4], buf[3], len - 4); 608 + pm_runtime_mark_last_busy(ctrl->dev); 609 + } 610 + } 611 + 612 + static void qcom_slim_ngd_rx_msgq_cb(void *args) 613 + { 614 + struct qcom_slim_ngd_dma_desc *desc = args; 615 + struct qcom_slim_ngd_ctrl *ctrl = desc->ctrl; 616 + 617 + qcom_slim_ngd_rx(ctrl, (u8 *)desc->base); 618 + /* Add descriptor back to the queue */ 619 + desc->desc = dmaengine_prep_slave_single(ctrl->dma_rx_channel, 620 + desc->phys, SLIM_MSGQ_BUF_LEN, 621 + DMA_DEV_TO_MEM, 622 + DMA_PREP_INTERRUPT); 623 + if (!desc->desc) { 624 + dev_err(ctrl->dev, "Unable to prepare rx channel\n"); 625 + return; 626 + } 627 + 628 + desc->desc->callback = qcom_slim_ngd_rx_msgq_cb; 629 + desc->desc->callback_param = desc; 630 + desc->desc->cookie = dmaengine_submit(desc->desc); 631 + dma_async_issue_pending(ctrl->dma_rx_channel); 632 + } 633 + 634 + static int qcom_slim_ngd_post_rx_msgq(struct qcom_slim_ngd_ctrl *ctrl) 635 + { 636 + struct qcom_slim_ngd_dma_desc *desc; 637 + int i; 638 + 639 + for (i = 0; i < QCOM_SLIM_NGD_DESC_NUM; i++) { 640 + desc = &ctrl->rx_desc[i]; 641 + desc->phys = ctrl->rx_phys_base + i * SLIM_MSGQ_BUF_LEN; 642 + desc->ctrl = ctrl; 643 + desc->base = ctrl->rx_base + i * SLIM_MSGQ_BUF_LEN; 644 + desc->desc = dmaengine_prep_slave_single(ctrl->dma_rx_channel, 645 + desc->phys, SLIM_MSGQ_BUF_LEN, 646 + DMA_DEV_TO_MEM, 647 + DMA_PREP_INTERRUPT); 648 + if (!desc->desc) { 649 + dev_err(ctrl->dev, "Unable to prepare rx channel\n"); 650 + return -EINVAL; 651 + } 652 + 653 + desc->desc->callback = qcom_slim_ngd_rx_msgq_cb; 654 + desc->desc->callback_param = desc; 655 + desc->desc->cookie = dmaengine_submit(desc->desc); 656 + } 657 + dma_async_issue_pending(ctrl->dma_rx_channel); 658 + 659 + return 0; 660 + } 661 + 662 + static int qcom_slim_ngd_init_rx_msgq(struct qcom_slim_ngd_ctrl *ctrl) 663 + { 664 + struct device *dev = ctrl->dev; 665 + int ret, size; 666 + 667 + ctrl->dma_rx_channel = dma_request_slave_channel(dev, "rx"); 668 + if (!ctrl->dma_rx_channel) { 669 + dev_err(dev, "Failed to request dma channels"); 670 + return -EINVAL; 671 + } 672 + 673 + size = QCOM_SLIM_NGD_DESC_NUM * SLIM_MSGQ_BUF_LEN; 674 + ctrl->rx_base = dma_alloc_coherent(dev, size, &ctrl->rx_phys_base, 675 + GFP_KERNEL); 676 + if (!ctrl->rx_base) { 677 + dev_err(dev, "dma_alloc_coherent failed\n"); 678 + ret = -ENOMEM; 679 + goto rel_rx; 680 + } 681 + 682 + ret = qcom_slim_ngd_post_rx_msgq(ctrl); 683 + if (ret) { 684 + dev_err(dev, "post_rx_msgq() failed 0x%x\n", ret); 685 + goto rx_post_err; 686 + } 687 + 688 + return 0; 689 + 690 + rx_post_err: 691 + dma_free_coherent(dev, size, ctrl->rx_base, ctrl->rx_phys_base); 692 + rel_rx: 693 + dma_release_channel(ctrl->dma_rx_channel); 694 + return ret; 695 + } 696 + 697 + static int qcom_slim_ngd_init_tx_msgq(struct qcom_slim_ngd_ctrl *ctrl) 698 + { 699 + struct device *dev = ctrl->dev; 700 + unsigned long flags; 701 + int ret = 0; 702 + int size; 703 + 704 + ctrl->dma_tx_channel = dma_request_slave_channel(dev, "tx"); 705 + if (!ctrl->dma_tx_channel) { 706 + dev_err(dev, "Failed to request dma channels"); 707 + return -EINVAL; 708 + } 709 + 710 + size = ((QCOM_SLIM_NGD_DESC_NUM + 1) * SLIM_MSGQ_BUF_LEN); 711 + ctrl->tx_base = dma_alloc_coherent(dev, size, &ctrl->tx_phys_base, 712 + GFP_KERNEL); 713 + if (!ctrl->tx_base) { 714 + dev_err(dev, "dma_alloc_coherent failed\n"); 715 + ret = -EINVAL; 716 + goto rel_tx; 717 + } 718 + 719 + spin_lock_irqsave(&ctrl->tx_buf_lock, flags); 720 + ctrl->tx_tail = 0; 721 + ctrl->tx_head = 0; 722 + spin_unlock_irqrestore(&ctrl->tx_buf_lock, flags); 723 + 724 + return 0; 725 + rel_tx: 726 + dma_release_channel(ctrl->dma_tx_channel); 727 + return ret; 728 + } 729 + 730 + static int qcom_slim_ngd_init_dma(struct qcom_slim_ngd_ctrl *ctrl) 731 + { 732 + int ret = 0; 733 + 734 + ret = qcom_slim_ngd_init_rx_msgq(ctrl); 735 + if (ret) { 736 + dev_err(ctrl->dev, "rx dma init failed\n"); 737 + return ret; 738 + } 739 + 740 + ret = qcom_slim_ngd_init_tx_msgq(ctrl); 741 + if (ret) 742 + dev_err(ctrl->dev, "tx dma init failed\n"); 743 + 744 + return ret; 745 + } 746 + 747 + static irqreturn_t qcom_slim_ngd_interrupt(int irq, void *d) 748 + { 749 + struct qcom_slim_ngd_ctrl *ctrl = d; 750 + void __iomem *base = ctrl->ngd->base; 751 + u32 stat = readl(base + NGD_INT_STAT); 752 + 753 + if ((stat & NGD_INT_MSG_BUF_CONTE) || 754 + (stat & NGD_INT_MSG_TX_INVAL) || (stat & NGD_INT_DEV_ERR) || 755 + (stat & NGD_INT_TX_NACKED_2)) { 756 + dev_err(ctrl->dev, "Error Interrupt received 0x%x\n", stat); 757 + } 758 + 759 + writel(stat, base + NGD_INT_CLR); 760 + 761 + return IRQ_HANDLED; 762 + } 763 + 764 + static int qcom_slim_ngd_xfer_msg(struct slim_controller *sctrl, 765 + struct slim_msg_txn *txn) 766 + { 767 + struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(sctrl->dev); 768 + DECLARE_COMPLETION_ONSTACK(tx_sent); 769 + int ret, timeout; 770 + u32 *pbuf; 771 + u8 *puc; 772 + u8 la = txn->la; 773 + 774 + if (txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG) 775 + return -EPROTONOSUPPORT; 776 + 777 + if (txn->mt == SLIM_MSG_MT_CORE && 778 + (txn->mc >= SLIM_MSG_MC_BEGIN_RECONFIGURATION && 779 + txn->mc <= SLIM_MSG_MC_RECONFIGURE_NOW)) 780 + return 0; 781 + 782 + if (txn->dt == SLIM_MSG_DEST_ENUMADDR) 783 + return -EPROTONOSUPPORT; 784 + 785 + if (txn->msg->num_bytes > SLIM_MSGQ_BUF_LEN || 786 + txn->rl > SLIM_MSGQ_BUF_LEN) { 787 + dev_err(ctrl->dev, "msg exeeds HW limit\n"); 788 + return -EINVAL; 789 + } 790 + 791 + pbuf = qcom_slim_ngd_tx_msg_get(ctrl, txn->rl, &tx_sent); 792 + if (!pbuf) { 793 + dev_err(ctrl->dev, "Message buffer unavailable\n"); 794 + return -ENOMEM; 795 + } 796 + 797 + /* HW expects length field to be excluded */ 798 + txn->rl--; 799 + puc = (u8 *)pbuf; 800 + *pbuf = 0; 801 + if (txn->dt == SLIM_MSG_DEST_LOGICALADDR) { 802 + *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, txn->mc, 0, 803 + la); 804 + puc += 3; 805 + } else { 806 + *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, txn->mc, 1, 807 + la); 808 + puc += 2; 809 + } 810 + 811 + if (slim_tid_txn(txn->mt, txn->mc)) 812 + *(puc++) = txn->tid; 813 + 814 + if (slim_ec_txn(txn->mt, txn->mc)) { 815 + *(puc++) = (txn->ec & 0xFF); 816 + *(puc++) = (txn->ec >> 8) & 0xFF; 817 + } 818 + 819 + if (txn->msg && txn->msg->wbuf) 820 + memcpy(puc, txn->msg->wbuf, txn->msg->num_bytes); 821 + 822 + ret = qcom_slim_ngd_tx_msg_post(ctrl, pbuf, txn->rl); 823 + if (ret) 824 + return ret; 825 + 826 + timeout = wait_for_completion_timeout(&tx_sent, HZ); 827 + if (!timeout) { 828 + dev_err(sctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc, 829 + txn->mt); 830 + return -ETIMEDOUT; 831 + } 832 + 833 + return 0; 834 + } 835 + 836 + static int qcom_slim_ngd_xfer_msg_sync(struct slim_controller *ctrl, 837 + struct slim_msg_txn *txn) 838 + { 839 + DECLARE_COMPLETION_ONSTACK(done); 840 + int ret, timeout; 841 + 842 + pm_runtime_get_sync(ctrl->dev); 843 + 844 + txn->comp = &done; 845 + 846 + ret = qcom_slim_ngd_xfer_msg(ctrl, txn); 847 + if (ret) 848 + return ret; 849 + 850 + timeout = wait_for_completion_timeout(&done, HZ); 851 + if (!timeout) { 852 + dev_err(ctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc, 853 + txn->mt); 854 + return -ETIMEDOUT; 855 + } 856 + return 0; 857 + } 858 + 859 + static int qcom_slim_ngd_get_laddr(struct slim_controller *ctrl, 860 + struct slim_eaddr *ea, u8 *laddr) 861 + { 862 + struct slim_val_inf msg = {0}; 863 + struct slim_msg_txn txn; 864 + u8 wbuf[10] = {0}; 865 + u8 rbuf[10] = {0}; 866 + int ret; 867 + 868 + txn.mt = SLIM_MSG_MT_DEST_REFERRED_USER; 869 + txn.dt = SLIM_MSG_DEST_LOGICALADDR; 870 + txn.la = SLIM_LA_MGR; 871 + txn.ec = 0; 872 + 873 + txn.mc = SLIM_USR_MC_ADDR_QUERY; 874 + txn.rl = 11; 875 + txn.msg = &msg; 876 + txn.msg->num_bytes = 7; 877 + txn.msg->wbuf = wbuf; 878 + txn.msg->rbuf = rbuf; 879 + 880 + ret = slim_alloc_txn_tid(ctrl, &txn); 881 + if (ret < 0) 882 + return ret; 883 + 884 + wbuf[0] = (u8)txn.tid; 885 + memcpy(&wbuf[1], ea, sizeof(*ea)); 886 + 887 + ret = qcom_slim_ngd_xfer_msg_sync(ctrl, &txn); 888 + if (ret) { 889 + slim_free_txn_tid(ctrl, &txn); 890 + return ret; 891 + } 892 + 893 + *laddr = rbuf[6]; 894 + 895 + return ret; 896 + } 897 + 898 + static int qcom_slim_ngd_exit_dma(struct qcom_slim_ngd_ctrl *ctrl) 899 + { 900 + if (ctrl->dma_rx_channel) { 901 + dmaengine_terminate_sync(ctrl->dma_rx_channel); 902 + dma_release_channel(ctrl->dma_rx_channel); 903 + } 904 + 905 + if (ctrl->dma_tx_channel) { 906 + dmaengine_terminate_sync(ctrl->dma_tx_channel); 907 + dma_release_channel(ctrl->dma_tx_channel); 908 + } 909 + 910 + ctrl->dma_tx_channel = ctrl->dma_rx_channel = NULL; 911 + 912 + return 0; 913 + } 914 + 915 + static void qcom_slim_ngd_setup(struct qcom_slim_ngd_ctrl *ctrl) 916 + { 917 + u32 cfg = readl_relaxed(ctrl->ngd->base); 918 + 919 + if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN) 920 + qcom_slim_ngd_init_dma(ctrl); 921 + 922 + /* By default enable message queues */ 923 + cfg |= NGD_CFG_RX_MSGQ_EN; 924 + cfg |= NGD_CFG_TX_MSGQ_EN; 925 + 926 + /* Enable NGD if it's not already enabled*/ 927 + if (!(cfg & NGD_CFG_ENABLE)) 928 + cfg |= NGD_CFG_ENABLE; 929 + 930 + writel_relaxed(cfg, ctrl->ngd->base); 931 + } 932 + 933 + static int qcom_slim_ngd_power_up(struct qcom_slim_ngd_ctrl *ctrl) 934 + { 935 + enum qcom_slim_ngd_state cur_state = ctrl->state; 936 + struct qcom_slim_ngd *ngd = ctrl->ngd; 937 + u32 laddr, rx_msgq; 938 + int timeout, ret = 0; 939 + 940 + if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN) { 941 + timeout = wait_for_completion_timeout(&ctrl->qmi.qmi_comp, HZ); 942 + if (!timeout) 943 + return -EREMOTEIO; 944 + } 945 + 946 + if (ctrl->state == QCOM_SLIM_NGD_CTRL_ASLEEP || 947 + ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN) { 948 + ret = qcom_slim_qmi_power_request(ctrl, true); 949 + if (ret) { 950 + dev_err(ctrl->dev, "SLIM QMI power request failed:%d\n", 951 + ret); 952 + return ret; 953 + } 954 + } 955 + 956 + ctrl->ver = readl_relaxed(ctrl->base); 957 + /* Version info in 16 MSbits */ 958 + ctrl->ver >>= 16; 959 + 960 + laddr = readl_relaxed(ngd->base + NGD_STATUS); 961 + if (laddr & NGD_LADDR) { 962 + /* 963 + * external MDM restart case where ADSP itself was active framer 964 + * For example, modem restarted when playback was active 965 + */ 966 + if (cur_state == QCOM_SLIM_NGD_CTRL_AWAKE) { 967 + dev_info(ctrl->dev, "Subsys restart: ADSP active framer\n"); 968 + return 0; 969 + } 970 + return 0; 971 + } 972 + 973 + writel_relaxed(DEF_NGD_INT_MASK, ngd->base + NGD_INT_EN); 974 + rx_msgq = readl_relaxed(ngd->base + NGD_RX_MSGQ_CFG); 975 + 976 + writel_relaxed(rx_msgq|SLIM_RX_MSGQ_TIMEOUT_VAL, 977 + ngd->base + NGD_RX_MSGQ_CFG); 978 + qcom_slim_ngd_setup(ctrl); 979 + 980 + timeout = wait_for_completion_timeout(&ctrl->reconf, HZ); 981 + if (!timeout) { 982 + dev_err(ctrl->dev, "capability exchange timed-out\n"); 983 + return -ETIMEDOUT; 984 + } 985 + 986 + return 0; 987 + } 988 + 989 + static void qcom_slim_ngd_notify_slaves(struct qcom_slim_ngd_ctrl *ctrl) 990 + { 991 + struct slim_device *sbdev; 992 + struct device_node *node; 993 + 994 + for_each_child_of_node(ctrl->ngd->pdev->dev.of_node, node) { 995 + sbdev = of_slim_get_device(&ctrl->ctrl, node); 996 + if (!sbdev) 997 + continue; 998 + 999 + if (slim_get_logical_addr(sbdev)) 1000 + dev_err(ctrl->dev, "Failed to get logical address\n"); 1001 + } 1002 + } 1003 + 1004 + static void qcom_slim_ngd_master_worker(struct work_struct *work) 1005 + { 1006 + struct qcom_slim_ngd_ctrl *ctrl; 1007 + struct slim_msg_txn txn; 1008 + struct slim_val_inf msg = {0}; 1009 + int retries = 0; 1010 + u8 wbuf[8]; 1011 + int ret = 0; 1012 + 1013 + ctrl = container_of(work, struct qcom_slim_ngd_ctrl, m_work); 1014 + txn.dt = SLIM_MSG_DEST_LOGICALADDR; 1015 + txn.ec = 0; 1016 + txn.mc = SLIM_USR_MC_REPORT_SATELLITE; 1017 + txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER; 1018 + txn.la = SLIM_LA_MGR; 1019 + wbuf[0] = SAT_MAGIC_LSB; 1020 + wbuf[1] = SAT_MAGIC_MSB; 1021 + wbuf[2] = SAT_MSG_VER; 1022 + wbuf[3] = SAT_MSG_PROT; 1023 + txn.msg = &msg; 1024 + txn.msg->wbuf = wbuf; 1025 + txn.msg->num_bytes = 4; 1026 + txn.rl = 8; 1027 + 1028 + dev_info(ctrl->dev, "SLIM SAT: Rcvd master capability\n"); 1029 + 1030 + capability_retry: 1031 + ret = qcom_slim_ngd_xfer_msg(&ctrl->ctrl, &txn); 1032 + if (!ret) { 1033 + if (ctrl->state >= QCOM_SLIM_NGD_CTRL_ASLEEP) 1034 + complete(&ctrl->reconf); 1035 + else 1036 + dev_err(ctrl->dev, "unexpected state:%d\n", 1037 + ctrl->state); 1038 + 1039 + if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN) 1040 + qcom_slim_ngd_notify_slaves(ctrl); 1041 + 1042 + } else if (ret == -EIO) { 1043 + dev_err(ctrl->dev, "capability message NACKed, retrying\n"); 1044 + if (retries < INIT_MX_RETRIES) { 1045 + msleep(DEF_RETRY_MS); 1046 + retries++; 1047 + goto capability_retry; 1048 + } 1049 + } else { 1050 + dev_err(ctrl->dev, "SLIM: capability TX failed:%d\n", ret); 1051 + } 1052 + } 1053 + 1054 + static int qcom_slim_ngd_runtime_resume(struct device *dev) 1055 + { 1056 + struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev); 1057 + int ret = 0; 1058 + 1059 + if (ctrl->state >= QCOM_SLIM_NGD_CTRL_ASLEEP) 1060 + ret = qcom_slim_ngd_power_up(ctrl); 1061 + if (ret) { 1062 + /* Did SSR cause this power up failure */ 1063 + if (ctrl->state != QCOM_SLIM_NGD_CTRL_DOWN) 1064 + ctrl->state = QCOM_SLIM_NGD_CTRL_ASLEEP; 1065 + else 1066 + dev_err(ctrl->dev, "HW wakeup attempt during SSR\n"); 1067 + } else { 1068 + ctrl->state = QCOM_SLIM_NGD_CTRL_AWAKE; 1069 + } 1070 + 1071 + return 0; 1072 + } 1073 + 1074 + static int qcom_slim_ngd_enable(struct qcom_slim_ngd_ctrl *ctrl, bool enable) 1075 + { 1076 + if (enable) { 1077 + int ret = qcom_slim_qmi_init(ctrl, false); 1078 + 1079 + if (ret) { 1080 + dev_err(ctrl->dev, "qmi init fail, ret:%d, state:%d\n", 1081 + ret, ctrl->state); 1082 + return ret; 1083 + } 1084 + /* controller state should be in sync with framework state */ 1085 + complete(&ctrl->qmi.qmi_comp); 1086 + if (!pm_runtime_enabled(ctrl->dev) || 1087 + !pm_runtime_suspended(ctrl->dev)) 1088 + qcom_slim_ngd_runtime_resume(ctrl->dev); 1089 + else 1090 + pm_runtime_resume(ctrl->dev); 1091 + pm_runtime_mark_last_busy(ctrl->dev); 1092 + pm_runtime_put(ctrl->dev); 1093 + } else { 1094 + qcom_slim_qmi_exit(ctrl); 1095 + } 1096 + 1097 + return 0; 1098 + } 1099 + 1100 + static int qcom_slim_ngd_qmi_new_server(struct qmi_handle *hdl, 1101 + struct qmi_service *service) 1102 + { 1103 + struct qcom_slim_ngd_qmi *qmi = 1104 + container_of(hdl, struct qcom_slim_ngd_qmi, svc_event_hdl); 1105 + struct qcom_slim_ngd_ctrl *ctrl = 1106 + container_of(qmi, struct qcom_slim_ngd_ctrl, qmi); 1107 + 1108 + qmi->svc_info.sq_family = AF_QIPCRTR; 1109 + qmi->svc_info.sq_node = service->node; 1110 + qmi->svc_info.sq_port = service->port; 1111 + 1112 + qcom_slim_ngd_enable(ctrl, true); 1113 + 1114 + return 0; 1115 + } 1116 + 1117 + static void qcom_slim_ngd_qmi_del_server(struct qmi_handle *hdl, 1118 + struct qmi_service *service) 1119 + { 1120 + struct qcom_slim_ngd_qmi *qmi = 1121 + container_of(hdl, struct qcom_slim_ngd_qmi, svc_event_hdl); 1122 + 1123 + qmi->svc_info.sq_node = 0; 1124 + qmi->svc_info.sq_port = 0; 1125 + } 1126 + 1127 + static struct qmi_ops qcom_slim_ngd_qmi_svc_event_ops = { 1128 + .new_server = qcom_slim_ngd_qmi_new_server, 1129 + .del_server = qcom_slim_ngd_qmi_del_server, 1130 + }; 1131 + 1132 + static int qcom_slim_ngd_qmi_svc_event_init(struct qcom_slim_ngd_ctrl *ctrl) 1133 + { 1134 + struct qcom_slim_ngd_qmi *qmi = &ctrl->qmi; 1135 + int ret; 1136 + 1137 + ret = qmi_handle_init(&qmi->svc_event_hdl, 0, 1138 + &qcom_slim_ngd_qmi_svc_event_ops, NULL); 1139 + if (ret < 0) { 1140 + dev_err(ctrl->dev, "qmi_handle_init failed: %d\n", ret); 1141 + return ret; 1142 + } 1143 + 1144 + ret = qmi_add_lookup(&qmi->svc_event_hdl, SLIMBUS_QMI_SVC_ID, 1145 + SLIMBUS_QMI_SVC_V1, SLIMBUS_QMI_INS_ID); 1146 + if (ret < 0) { 1147 + dev_err(ctrl->dev, "qmi_add_lookup failed: %d\n", ret); 1148 + qmi_handle_release(&qmi->svc_event_hdl); 1149 + } 1150 + return ret; 1151 + } 1152 + 1153 + static void qcom_slim_ngd_qmi_svc_event_deinit(struct qcom_slim_ngd_qmi *qmi) 1154 + { 1155 + qmi_handle_release(&qmi->svc_event_hdl); 1156 + } 1157 + 1158 + static struct platform_driver qcom_slim_ngd_driver; 1159 + #define QCOM_SLIM_NGD_DRV_NAME "qcom,slim-ngd" 1160 + 1161 + static const struct of_device_id qcom_slim_ngd_dt_match[] = { 1162 + { 1163 + .compatible = "qcom,slim-ngd-v1.5.0", 1164 + .data = &ngd_v1_5_offset_info, 1165 + }, 1166 + {} 1167 + }; 1168 + 1169 + MODULE_DEVICE_TABLE(of, qcom_slim_ngd_dt_match); 1170 + 1171 + static int of_qcom_slim_ngd_register(struct device *parent, 1172 + struct qcom_slim_ngd_ctrl *ctrl) 1173 + { 1174 + const struct ngd_reg_offset_data *data; 1175 + struct qcom_slim_ngd *ngd; 1176 + struct device_node *node; 1177 + u32 id; 1178 + 1179 + data = of_match_node(qcom_slim_ngd_dt_match, parent->of_node)->data; 1180 + 1181 + for_each_available_child_of_node(parent->of_node, node) { 1182 + if (of_property_read_u32(node, "reg", &id)) 1183 + continue; 1184 + 1185 + ngd = kzalloc(sizeof(*ngd), GFP_KERNEL); 1186 + if (!ngd) 1187 + return -ENOMEM; 1188 + 1189 + ngd->pdev = platform_device_alloc(QCOM_SLIM_NGD_DRV_NAME, id); 1190 + ngd->id = id; 1191 + ngd->pdev->dev.parent = parent; 1192 + ngd->pdev->driver_override = QCOM_SLIM_NGD_DRV_NAME; 1193 + ngd->pdev->dev.of_node = node; 1194 + ctrl->ngd = ngd; 1195 + platform_set_drvdata(ngd->pdev, ctrl); 1196 + 1197 + platform_device_add(ngd->pdev); 1198 + ngd->base = ctrl->base + ngd->id * data->offset + 1199 + (ngd->id - 1) * data->size; 1200 + ctrl->ngd = ngd; 1201 + platform_driver_register(&qcom_slim_ngd_driver); 1202 + 1203 + return 0; 1204 + } 1205 + 1206 + return -ENODEV; 1207 + } 1208 + 1209 + static int qcom_slim_ngd_probe(struct platform_device *pdev) 1210 + { 1211 + struct qcom_slim_ngd_ctrl *ctrl = platform_get_drvdata(pdev); 1212 + struct device *dev = &pdev->dev; 1213 + int ret; 1214 + 1215 + ctrl->ctrl.dev = dev; 1216 + ret = slim_register_controller(&ctrl->ctrl); 1217 + if (ret) { 1218 + dev_err(dev, "error adding slim controller\n"); 1219 + return ret; 1220 + } 1221 + 1222 + pm_runtime_use_autosuspend(dev); 1223 + pm_runtime_set_autosuspend_delay(dev, QCOM_SLIM_NGD_AUTOSUSPEND); 1224 + pm_runtime_set_suspended(dev); 1225 + pm_runtime_enable(dev); 1226 + pm_runtime_get_noresume(dev); 1227 + ret = qcom_slim_ngd_qmi_svc_event_init(ctrl); 1228 + if (ret) { 1229 + dev_err(&pdev->dev, "QMI service registration failed:%d", ret); 1230 + goto err; 1231 + } 1232 + 1233 + INIT_WORK(&ctrl->m_work, qcom_slim_ngd_master_worker); 1234 + ctrl->mwq = create_singlethread_workqueue("ngd_master"); 1235 + if (!ctrl->mwq) { 1236 + dev_err(&pdev->dev, "Failed to start master worker\n"); 1237 + ret = -ENOMEM; 1238 + goto wq_err; 1239 + } 1240 + 1241 + return 0; 1242 + err: 1243 + slim_unregister_controller(&ctrl->ctrl); 1244 + wq_err: 1245 + qcom_slim_ngd_qmi_svc_event_deinit(&ctrl->qmi); 1246 + if (ctrl->mwq) 1247 + destroy_workqueue(ctrl->mwq); 1248 + 1249 + return 0; 1250 + } 1251 + 1252 + static int qcom_slim_ngd_ctrl_probe(struct platform_device *pdev) 1253 + { 1254 + struct device *dev = &pdev->dev; 1255 + struct qcom_slim_ngd_ctrl *ctrl; 1256 + struct resource *res; 1257 + int ret; 1258 + 1259 + ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL); 1260 + if (!ctrl) 1261 + return -ENOMEM; 1262 + 1263 + dev_set_drvdata(dev, ctrl); 1264 + 1265 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1266 + ctrl->base = devm_ioremap_resource(dev, res); 1267 + if (IS_ERR(ctrl->base)) 1268 + return PTR_ERR(ctrl->base); 1269 + 1270 + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1271 + if (!res) { 1272 + dev_err(&pdev->dev, "no slimbus IRQ resource\n"); 1273 + return -ENODEV; 1274 + } 1275 + 1276 + ret = devm_request_irq(dev, res->start, qcom_slim_ngd_interrupt, 1277 + IRQF_TRIGGER_HIGH, "slim-ngd", ctrl); 1278 + if (ret) { 1279 + dev_err(&pdev->dev, "request IRQ failed\n"); 1280 + return ret; 1281 + } 1282 + 1283 + ctrl->dev = dev; 1284 + ctrl->framer.rootfreq = SLIM_ROOT_FREQ >> 3; 1285 + ctrl->framer.superfreq = 1286 + ctrl->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8; 1287 + 1288 + ctrl->ctrl.a_framer = &ctrl->framer; 1289 + ctrl->ctrl.clkgear = SLIM_MAX_CLK_GEAR; 1290 + ctrl->ctrl.get_laddr = qcom_slim_ngd_get_laddr; 1291 + ctrl->ctrl.xfer_msg = qcom_slim_ngd_xfer_msg; 1292 + ctrl->ctrl.wakeup = NULL; 1293 + ctrl->state = QCOM_SLIM_NGD_CTRL_DOWN; 1294 + 1295 + spin_lock_init(&ctrl->tx_buf_lock); 1296 + init_completion(&ctrl->reconf); 1297 + init_completion(&ctrl->qmi.qmi_comp); 1298 + 1299 + return of_qcom_slim_ngd_register(dev, ctrl); 1300 + } 1301 + 1302 + static int qcom_slim_ngd_ctrl_remove(struct platform_device *pdev) 1303 + { 1304 + platform_driver_unregister(&qcom_slim_ngd_driver); 1305 + 1306 + return 0; 1307 + } 1308 + 1309 + static int qcom_slim_ngd_remove(struct platform_device *pdev) 1310 + { 1311 + struct qcom_slim_ngd_ctrl *ctrl = platform_get_drvdata(pdev); 1312 + 1313 + pm_runtime_disable(&pdev->dev); 1314 + slim_unregister_controller(&ctrl->ctrl); 1315 + qcom_slim_ngd_exit_dma(ctrl); 1316 + qcom_slim_ngd_qmi_svc_event_deinit(&ctrl->qmi); 1317 + if (ctrl->mwq) 1318 + destroy_workqueue(ctrl->mwq); 1319 + 1320 + kfree(ctrl->ngd); 1321 + ctrl->ngd = NULL; 1322 + return 0; 1323 + } 1324 + 1325 + static int qcom_slim_ngd_runtime_idle(struct device *dev) 1326 + { 1327 + struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev); 1328 + 1329 + if (ctrl->state == QCOM_SLIM_NGD_CTRL_AWAKE) 1330 + ctrl->state = QCOM_SLIM_NGD_CTRL_IDLE; 1331 + pm_request_autosuspend(dev); 1332 + return -EAGAIN; 1333 + } 1334 + 1335 + #ifdef CONFIG_PM 1336 + static int qcom_slim_ngd_runtime_suspend(struct device *dev) 1337 + { 1338 + struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev); 1339 + int ret = 0; 1340 + 1341 + ret = qcom_slim_qmi_power_request(ctrl, false); 1342 + if (ret && ret != -EBUSY) 1343 + dev_info(ctrl->dev, "slim resource not idle:%d\n", ret); 1344 + if (!ret || ret == -ETIMEDOUT) 1345 + ctrl->state = QCOM_SLIM_NGD_CTRL_ASLEEP; 1346 + 1347 + return ret; 1348 + } 1349 + #endif 1350 + 1351 + static const struct dev_pm_ops qcom_slim_ngd_dev_pm_ops = { 1352 + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 1353 + pm_runtime_force_resume) 1354 + SET_RUNTIME_PM_OPS( 1355 + qcom_slim_ngd_runtime_suspend, 1356 + qcom_slim_ngd_runtime_resume, 1357 + qcom_slim_ngd_runtime_idle 1358 + ) 1359 + }; 1360 + 1361 + static struct platform_driver qcom_slim_ngd_ctrl_driver = { 1362 + .probe = qcom_slim_ngd_ctrl_probe, 1363 + .remove = qcom_slim_ngd_ctrl_remove, 1364 + .driver = { 1365 + .name = "qcom,slim-ngd-ctrl", 1366 + .of_match_table = qcom_slim_ngd_dt_match, 1367 + }, 1368 + }; 1369 + 1370 + static struct platform_driver qcom_slim_ngd_driver = { 1371 + .probe = qcom_slim_ngd_probe, 1372 + .remove = qcom_slim_ngd_remove, 1373 + .driver = { 1374 + .name = QCOM_SLIM_NGD_DRV_NAME, 1375 + .pm = &qcom_slim_ngd_dev_pm_ops, 1376 + }, 1377 + }; 1378 + 1379 + module_platform_driver(qcom_slim_ngd_ctrl_driver); 1380 + MODULE_LICENSE("GPL v2"); 1381 + MODULE_DESCRIPTION("Qualcomm SLIMBus NGD controller");
+8
drivers/slimbus/slimbus.h
··· 17 17 18 18 /* SLIMbus message types. Related to interpretation of message code. */ 19 19 #define SLIM_MSG_MT_CORE 0x0 20 + #define SLIM_MSG_MT_DEST_REFERRED_USER 0x2 21 + #define SLIM_MSG_MT_SRC_REFERRED_USER 0x6 20 22 21 23 /* 22 24 * SLIM Broadcast header format ··· 49 47 #define SLIM_MSG_MC_BEGIN_RECONFIGURATION 0x40 50 48 #define SLIM_MSG_MC_NEXT_PAUSE_CLOCK 0x4A 51 49 #define SLIM_MSG_MC_RECONFIGURE_NOW 0x5F 50 + 51 + /* 52 + * Clock pause flag to indicate that the reconfig message 53 + * corresponds to clock pause sequence 54 + */ 55 + #define SLIM_MSG_CLK_PAUSE_SEQ_FLG (1U << 8) 52 56 53 57 /* Clock pause values per SLIMbus spec */ 54 58 #define SLIM_CLK_FAST 0