Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mailbox: ti-msgmgr: Add support for Secure Proxy

Secure Proxy is another communication scheme in Texas Instrument's
devices intended to provide an unique communication path from various
processors in the System on Chip(SoC) to a central System Controller.

Secure proxy is, in effect, an evolution of current generation Message
Manager hardware block found in K2G devices. However the following
changes have taken place:

Secure Proxy instance exposes "threads" or "proxies" which is
primary representation of "a" communication channel. Each thread is
preconfigured by System controller configuration based on SoC usage
requirements. Secure proxy by itself represents a single "queue" of
communication but allows the proxies to be independently operated.

Each Secure proxy thread can uniquely have their own error and threshold
interrupts allowing for more fine control of IRQ handling.

Provide the driver support for Secure Proxy and thread instances.

NOTE: Secure proxy configuration is only done by System Controller,
hence these are assumed to be pre-configured instances.

See AM65x Technical Reference Manual (SPRUID7, April 2018)
for further details: http://www.ti.com/lit/pdf/spruid7

Signed-off-by: Nishanth Menon <nm@ti.com>
Signed-off-by: Jassi Brar <jaswinder.singh@linaro.org>

authored by

Nishanth Menon and committed by
Jassi Brar
a2b79838 0f23a179

+204 -27
+204 -27
drivers/mailbox/ti-msgmgr.c
··· 25 25 #define Q_STATE_OFFSET(queue) ((queue) * 0x4) 26 26 #define Q_STATE_ENTRY_COUNT_MASK (0xFFF000) 27 27 28 + #define SPROXY_THREAD_OFFSET(tid) (0x1000 * (tid)) 29 + #define SPROXY_THREAD_DATA_OFFSET(tid, reg) \ 30 + (SPROXY_THREAD_OFFSET(tid) + ((reg) * 0x4) + 0x4) 31 + 32 + #define SPROXY_THREAD_STATUS_OFFSET(tid) (SPROXY_THREAD_OFFSET(tid)) 33 + 34 + #define SPROXY_THREAD_STATUS_COUNT_MASK (0xFF) 35 + 36 + #define SPROXY_THREAD_CTRL_OFFSET(tid) (0x1000 + SPROXY_THREAD_OFFSET(tid)) 37 + #define SPROXY_THREAD_CTRL_DIR_MASK (0x1 << 31) 38 + 28 39 /** 29 40 * struct ti_msgmgr_valid_queue_desc - SoC valid queues meant for this processor 30 41 * @queue_id: Queue Number for this path ··· 56 45 * @data_first_reg: First data register for proxy data region 57 46 * @data_last_reg: Last data register for proxy data region 58 47 * @status_cnt_mask: Mask for getting the status value 48 + * @status_err_mask: Mask for getting the error value, if applicable 59 49 * @tx_polled: Do I need to use polled mechanism for tx 60 50 * @tx_poll_timeout_ms: Timeout in ms if polled 61 51 * @valid_queues: List of Valid queues that the processor can access 62 52 * @data_region_name: Name of the proxy data region 63 53 * @status_region_name: Name of the proxy status region 54 + * @ctrl_region_name: Name of the proxy control region 64 55 * @num_valid_queues: Number of valid queues 56 + * @is_sproxy: Is this an Secure Proxy instance? 65 57 * 66 58 * This structure is used in of match data to describe how integration 67 59 * for a specific compatible SoC is done. ··· 76 62 u8 data_first_reg; 77 63 u8 data_last_reg; 78 64 u32 status_cnt_mask; 65 + u32 status_err_mask; 79 66 bool tx_polled; 80 67 int tx_poll_timeout_ms; 81 68 const struct ti_msgmgr_valid_queue_desc *valid_queues; 82 69 const char *data_region_name; 83 70 const char *status_region_name; 71 + const char *ctrl_region_name; 84 72 int num_valid_queues; 73 + bool is_sproxy; 85 74 }; 86 75 87 76 /** ··· 97 80 * @queue_buff_start: First register of Data Buffer 98 81 * @queue_buff_end: Last (or confirmation) register of Data buffer 99 82 * @queue_state: Queue status register 83 + * @queue_ctrl: Queue Control register 100 84 * @chan: Mailbox channel 101 85 * @rx_buff: Receive buffer pointer allocated at probe, max_message_size 102 86 */ ··· 110 92 void __iomem *queue_buff_start; 111 93 void __iomem *queue_buff_end; 112 94 void __iomem *queue_state; 95 + void __iomem *queue_ctrl; 113 96 struct mbox_chan *chan; 114 97 u32 *rx_buff; 115 98 }; ··· 121 102 * @desc: Description of the SoC integration 122 103 * @queue_proxy_region: Queue proxy region where queue buffers are located 123 104 * @queue_state_debug_region: Queue status register regions 105 + * @queue_ctrl_region: Queue Control register regions 124 106 * @num_valid_queues: Number of valid queues defined for the processor 125 107 * Note: other queues are probably reserved for other processors 126 108 * in the SoC. ··· 134 114 const struct ti_msgmgr_desc *desc; 135 115 void __iomem *queue_proxy_region; 136 116 void __iomem *queue_state_debug_region; 117 + void __iomem *queue_ctrl_region; 137 118 u8 num_valid_queues; 138 119 struct ti_queue_inst *qinsts; 139 120 struct mbox_controller mbox; ··· 163 142 val >>= __ffs(status_cnt_mask); 164 143 165 144 return val; 145 + } 146 + 147 + /** 148 + * ti_msgmgr_queue_is_error() - Check to see if there is queue error 149 + * @d: Description of message manager 150 + * @qinst: Queue instance for which we check the number of pending messages 151 + * 152 + * Return: true if error, else false 153 + */ 154 + static inline bool ti_msgmgr_queue_is_error(const struct ti_msgmgr_desc *d, 155 + struct ti_queue_inst *qinst) 156 + { 157 + u32 val; 158 + 159 + /* Msgmgr has no error detection */ 160 + if (!d->is_sproxy) 161 + return false; 162 + 163 + /* 164 + * We cannot use relaxed operation here - update may happen 165 + * real-time. 166 + */ 167 + val = readl(qinst->queue_state) & d->status_err_mask; 168 + 169 + return val ? true : false; 166 170 } 167 171 168 172 /** ··· 224 178 } 225 179 226 180 desc = inst->desc; 181 + if (ti_msgmgr_queue_is_error(desc, qinst)) { 182 + dev_err(dev, "Error on Rx channel %s\n", qinst->name); 183 + return IRQ_NONE; 184 + } 185 + 227 186 /* Do I actually have messages to read? */ 228 187 msg_count = ti_msgmgr_queue_get_num_messages(desc, qinst); 229 188 if (!msg_count) { ··· 287 236 struct ti_queue_inst *qinst = chan->con_priv; 288 237 struct device *dev = chan->mbox->dev; 289 238 struct ti_msgmgr_inst *inst = dev_get_drvdata(dev); 239 + const struct ti_msgmgr_desc *desc = inst->desc; 290 240 int msg_count; 291 241 292 242 if (qinst->is_tx) 293 243 return false; 294 244 295 - msg_count = ti_msgmgr_queue_get_num_messages(inst->desc, qinst); 245 + if (ti_msgmgr_queue_is_error(desc, qinst)) { 246 + dev_err(dev, "Error on channel %s\n", qinst->name); 247 + return false; 248 + } 249 + 250 + msg_count = ti_msgmgr_queue_get_num_messages(desc, qinst); 296 251 297 252 return msg_count ? true : false; 298 253 } ··· 314 257 struct ti_queue_inst *qinst = chan->con_priv; 315 258 struct device *dev = chan->mbox->dev; 316 259 struct ti_msgmgr_inst *inst = dev_get_drvdata(dev); 260 + const struct ti_msgmgr_desc *desc = inst->desc; 317 261 int msg_count; 318 262 319 263 if (!qinst->is_tx) 320 264 return false; 321 265 322 - msg_count = ti_msgmgr_queue_get_num_messages(inst->desc, qinst); 266 + if (ti_msgmgr_queue_is_error(desc, qinst)) { 267 + dev_err(dev, "Error on channel %s\n", qinst->name); 268 + return false; 269 + } 270 + 271 + msg_count = ti_msgmgr_queue_get_num_messages(desc, qinst); 272 + 273 + if (desc->is_sproxy) { 274 + /* In secure proxy, msg_count indicates how many we can send */ 275 + return msg_count ? true : false; 276 + } 323 277 324 278 /* if we have any messages pending.. */ 325 279 return msg_count ? false : true; ··· 359 291 return -EINVAL; 360 292 } 361 293 desc = inst->desc; 294 + 295 + if (ti_msgmgr_queue_is_error(desc, qinst)) { 296 + dev_err(dev, "Error on channel %s\n", qinst->name); 297 + return false; 298 + } 362 299 363 300 if (desc->max_message_size < message->len) { 364 301 dev_err(dev, "Queue %s message length %zu > max %d\n", ··· 400 327 /** 401 328 * ti_msgmgr_queue_rx_irq_req() - RX IRQ request 402 329 * @dev: device pointer 330 + * @d: descriptor for ti_msgmgr 403 331 * @qinst: Queue instance 404 332 * @chan: Channel pointer 405 333 */ 406 334 static int ti_msgmgr_queue_rx_irq_req(struct device *dev, 335 + const struct ti_msgmgr_desc *d, 407 336 struct ti_queue_inst *qinst, 408 337 struct mbox_chan *chan) 409 338 { ··· 414 339 struct device_node *np; 415 340 416 341 snprintf(of_rx_irq_name, sizeof(of_rx_irq_name), 417 - "rx_%03d", qinst->queue_id); 342 + "rx_%03d", d->is_sproxy ? qinst->proxy_id : qinst->queue_id); 418 343 419 344 /* Get the IRQ if not found */ 420 345 if (qinst->irq < 0) { ··· 457 382 struct ti_queue_inst *qinst = chan->con_priv; 458 383 const struct ti_msgmgr_desc *d = inst->desc; 459 384 int ret; 385 + int msg_count; 386 + 387 + /* 388 + * If sproxy is starting and can send messages, we are a Tx thread, 389 + * else Rx 390 + */ 391 + if (d->is_sproxy) { 392 + qinst->is_tx = (readl(qinst->queue_ctrl) & 393 + SPROXY_THREAD_CTRL_DIR_MASK) ? false : true; 394 + 395 + msg_count = ti_msgmgr_queue_get_num_messages(d, qinst); 396 + 397 + if (!msg_count && qinst->is_tx) { 398 + dev_err(dev, "%s: Cannot transmit with 0 credits!\n", 399 + qinst->name); 400 + return -EINVAL; 401 + } 402 + } 460 403 461 404 if (!qinst->is_tx) { 462 405 /* Allocate usage buffer for rx */ ··· 482 389 if (!qinst->rx_buff) 483 390 return -ENOMEM; 484 391 /* Request IRQ */ 485 - ret = ti_msgmgr_queue_rx_irq_req(dev, qinst, chan); 392 + ret = ti_msgmgr_queue_rx_irq_req(dev, d, qinst, chan); 486 393 if (ret) { 487 394 kfree(qinst->rx_buff); 488 395 return ret; ··· 520 427 struct ti_msgmgr_inst *inst; 521 428 int req_qid, req_pid; 522 429 struct ti_queue_inst *qinst; 523 - int i; 430 + const struct ti_msgmgr_desc *d; 431 + int i, ncells; 524 432 525 433 inst = container_of(mbox, struct ti_msgmgr_inst, mbox); 526 434 if (WARN_ON(!inst)) 527 435 return ERR_PTR(-EINVAL); 528 436 529 - /* #mbox-cells is 2 */ 530 - if (p->args_count != 2) { 531 - dev_err(inst->dev, "Invalid arguments in dt[%d] instead of 2\n", 532 - p->args_count); 437 + d = inst->desc; 438 + 439 + if (d->is_sproxy) 440 + ncells = 1; 441 + else 442 + ncells = 2; 443 + if (p->args_count != ncells) { 444 + dev_err(inst->dev, "Invalid arguments in dt[%d]. Must be %d\n", 445 + p->args_count, ncells); 533 446 return ERR_PTR(-EINVAL); 534 447 } 535 - req_qid = p->args[0]; 536 - req_pid = p->args[1]; 448 + if (ncells == 1) { 449 + req_qid = 0; 450 + req_pid = p->args[0]; 451 + } else { 452 + req_qid = p->args[0]; 453 + req_pid = p->args[1]; 454 + } 455 + 456 + if (d->is_sproxy) { 457 + if (req_pid > d->num_valid_queues) 458 + goto err; 459 + qinst = &inst->qinsts[req_pid]; 460 + return qinst->chan; 461 + } 537 462 538 463 for (qinst = inst->qinsts, i = 0; i < inst->num_valid_queues; 539 464 i++, qinst++) { ··· 559 448 return qinst->chan; 560 449 } 561 450 451 + err: 562 452 dev_err(inst->dev, "Queue ID %d, Proxy ID %d is wrong on %s\n", 563 453 req_qid, req_pid, p->np->name); 564 454 return ERR_PTR(-ENOENT); ··· 586 474 struct ti_queue_inst *qinst, 587 475 struct mbox_chan *chan) 588 476 { 477 + char *dir; 478 + 589 479 qinst->proxy_id = qd->proxy_id; 590 480 qinst->queue_id = qd->queue_id; 591 481 ··· 597 483 return -ERANGE; 598 484 } 599 485 600 - qinst->is_tx = qd->is_tx; 601 - snprintf(qinst->name, sizeof(qinst->name), "%s %s_%03d_%03d", 602 - dev_name(dev), qinst->is_tx ? "tx" : "rx", qinst->queue_id, 603 - qinst->proxy_id); 486 + if (d->is_sproxy) { 487 + qinst->queue_buff_start = inst->queue_proxy_region + 488 + SPROXY_THREAD_DATA_OFFSET(qinst->proxy_id, 489 + d->data_first_reg); 490 + qinst->queue_buff_end = inst->queue_proxy_region + 491 + SPROXY_THREAD_DATA_OFFSET(qinst->proxy_id, 492 + d->data_last_reg); 493 + qinst->queue_state = inst->queue_state_debug_region + 494 + SPROXY_THREAD_STATUS_OFFSET(qinst->proxy_id); 495 + qinst->queue_ctrl = inst->queue_ctrl_region + 496 + SPROXY_THREAD_CTRL_OFFSET(qinst->proxy_id); 604 497 605 - qinst->queue_buff_start = inst->queue_proxy_region + 606 - Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id, d->data_first_reg); 607 - qinst->queue_buff_end = inst->queue_proxy_region + 608 - Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id, d->data_last_reg); 609 - qinst->queue_state = inst->queue_state_debug_region + 610 - Q_STATE_OFFSET(qinst->queue_id); 498 + /* XXX: DONOT read registers here!.. Some may be unusable */ 499 + dir = "thr"; 500 + snprintf(qinst->name, sizeof(qinst->name), "%s %s_%03d", 501 + dev_name(dev), dir, qinst->proxy_id); 502 + } else { 503 + qinst->queue_buff_start = inst->queue_proxy_region + 504 + Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id, 505 + d->data_first_reg); 506 + qinst->queue_buff_end = inst->queue_proxy_region + 507 + Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id, 508 + d->data_last_reg); 509 + qinst->queue_state = 510 + inst->queue_state_debug_region + 511 + Q_STATE_OFFSET(qinst->queue_id); 512 + qinst->is_tx = qd->is_tx; 513 + dir = qinst->is_tx ? "tx" : "rx"; 514 + snprintf(qinst->name, sizeof(qinst->name), "%s %s_%03d_%03d", 515 + dev_name(dev), dir, qinst->queue_id, qinst->proxy_id); 516 + } 517 + 611 518 qinst->chan = chan; 612 519 613 520 /* Setup an error value for IRQ - Lazy allocation */ ··· 678 543 .tx_polled = false, 679 544 .valid_queues = k2g_valid_queues, 680 545 .num_valid_queues = ARRAY_SIZE(k2g_valid_queues), 546 + .is_sproxy = false, 547 + }; 548 + 549 + static const struct ti_msgmgr_desc am654_desc = { 550 + .queue_count = 190, 551 + .num_valid_queues = 190, 552 + .max_message_size = 60, 553 + .data_region_name = "target_data", 554 + .status_region_name = "rt", 555 + .ctrl_region_name = "scfg", 556 + .data_first_reg = 0, 557 + .data_last_reg = 14, 558 + .status_cnt_mask = SPROXY_THREAD_STATUS_COUNT_MASK, 559 + .tx_polled = false, 560 + .is_sproxy = true, 681 561 }; 682 562 683 563 static const struct of_device_id ti_msgmgr_of_match[] = { 684 564 {.compatible = "ti,k2g-message-manager", .data = &k2g_desc}, 565 + {.compatible = "ti,am654-secure-proxy", .data = &am654_desc}, 685 566 { /* Sentinel */ } 686 567 }; 568 + 687 569 MODULE_DEVICE_TABLE(of, ti_msgmgr_of_match); 688 570 689 571 static int ti_msgmgr_probe(struct platform_device *pdev) ··· 751 599 if (IS_ERR(inst->queue_state_debug_region)) 752 600 return PTR_ERR(inst->queue_state_debug_region); 753 601 602 + if (desc->is_sproxy) { 603 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 604 + desc->ctrl_region_name); 605 + inst->queue_ctrl_region = devm_ioremap_resource(dev, res); 606 + if (IS_ERR(inst->queue_ctrl_region)) 607 + return PTR_ERR(inst->queue_ctrl_region); 608 + } 609 + 754 610 dev_dbg(dev, "proxy region=%p, queue_state=%p\n", 755 611 inst->queue_proxy_region, inst->queue_state_debug_region); 756 612 ··· 780 620 return -ENOMEM; 781 621 inst->chans = chans; 782 622 783 - for (i = 0, queue_desc = desc->valid_queues; 784 - i < queue_count; i++, qinst++, chans++, queue_desc++) { 785 - ret = ti_msgmgr_queue_setup(i, dev, np, inst, 786 - desc, queue_desc, qinst, chans); 787 - if (ret) 788 - return ret; 623 + if (desc->is_sproxy) { 624 + struct ti_msgmgr_valid_queue_desc sproxy_desc; 625 + 626 + /* All proxies may be valid in Secure Proxy instance */ 627 + for (i = 0; i < queue_count; i++, qinst++, chans++) { 628 + sproxy_desc.queue_id = 0; 629 + sproxy_desc.proxy_id = i; 630 + ret = ti_msgmgr_queue_setup(i, dev, np, inst, 631 + desc, &sproxy_desc, qinst, 632 + chans); 633 + if (ret) 634 + return ret; 635 + } 636 + } else { 637 + /* Only Some proxies are valid in Message Manager */ 638 + for (i = 0, queue_desc = desc->valid_queues; 639 + i < queue_count; i++, qinst++, chans++, queue_desc++) { 640 + ret = ti_msgmgr_queue_setup(i, dev, np, inst, 641 + desc, queue_desc, qinst, 642 + chans); 643 + if (ret) 644 + return ret; 645 + } 789 646 } 790 647 791 648 mbox = &inst->mbox;