Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Mailbox: Common code for Mailbox controllers and users
4 *
5 * Copyright (C) 2013-2014 Linaro Ltd.
6 * Author: Jassi Brar <jassisinghbrar@gmail.com>
7 */
8
9#include <linux/interrupt.h>
10#include <linux/spinlock.h>
11#include <linux/mutex.h>
12#include <linux/delay.h>
13#include <linux/slab.h>
14#include <linux/err.h>
15#include <linux/module.h>
16#include <linux/device.h>
17#include <linux/bitops.h>
18#include <linux/mailbox_client.h>
19#include <linux/mailbox_controller.h>
20
21#include "mailbox.h"
22
23static LIST_HEAD(mbox_cons);
24static DEFINE_MUTEX(con_mutex);
25
26static int add_to_rbuf(struct mbox_chan *chan, void *mssg)
27{
28 int idx;
29 unsigned long flags;
30
31 spin_lock_irqsave(&chan->lock, flags);
32
33 /* See if there is any space left */
34 if (chan->msg_count == MBOX_TX_QUEUE_LEN) {
35 spin_unlock_irqrestore(&chan->lock, flags);
36 return -ENOBUFS;
37 }
38
39 idx = chan->msg_free;
40 chan->msg_data[idx] = mssg;
41 chan->msg_count++;
42
43 if (idx == MBOX_TX_QUEUE_LEN - 1)
44 chan->msg_free = 0;
45 else
46 chan->msg_free++;
47
48 spin_unlock_irqrestore(&chan->lock, flags);
49
50 return idx;
51}
52
53static void msg_submit(struct mbox_chan *chan)
54{
55 unsigned count, idx;
56 unsigned long flags;
57 void *data;
58 int err = -EBUSY;
59
60 spin_lock_irqsave(&chan->lock, flags);
61
62 if (!chan->msg_count || chan->active_req)
63 goto exit;
64
65 count = chan->msg_count;
66 idx = chan->msg_free;
67 if (idx >= count)
68 idx -= count;
69 else
70 idx += MBOX_TX_QUEUE_LEN - count;
71
72 data = chan->msg_data[idx];
73
74 if (chan->cl->tx_prepare)
75 chan->cl->tx_prepare(chan->cl, data);
76 /* Try to submit a message to the MBOX controller */
77 err = chan->mbox->ops->send_data(chan, data);
78 if (!err) {
79 chan->active_req = data;
80 chan->msg_count--;
81 }
82exit:
83 spin_unlock_irqrestore(&chan->lock, flags);
84
85 if (!err && (chan->txdone_method & TXDONE_BY_POLL)) {
86 /* kick start the timer immediately to avoid delays */
87 spin_lock_irqsave(&chan->mbox->poll_hrt_lock, flags);
88 hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
89 spin_unlock_irqrestore(&chan->mbox->poll_hrt_lock, flags);
90 }
91}
92
93static void tx_tick(struct mbox_chan *chan, int r)
94{
95 unsigned long flags;
96 void *mssg;
97
98 spin_lock_irqsave(&chan->lock, flags);
99 mssg = chan->active_req;
100 chan->active_req = NULL;
101 spin_unlock_irqrestore(&chan->lock, flags);
102
103 /* Submit next message */
104 msg_submit(chan);
105
106 if (!mssg)
107 return;
108
109 /* Notify the client */
110 if (chan->cl->tx_done)
111 chan->cl->tx_done(chan->cl, mssg, r);
112
113 if (r != -ETIME && chan->cl->tx_block)
114 complete(&chan->tx_complete);
115}
116
117static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer)
118{
119 struct mbox_controller *mbox =
120 container_of(hrtimer, struct mbox_controller, poll_hrt);
121 bool txdone, resched = false;
122 int i;
123 unsigned long flags;
124
125 for (i = 0; i < mbox->num_chans; i++) {
126 struct mbox_chan *chan = &mbox->chans[i];
127
128 if (chan->active_req && chan->cl) {
129 txdone = chan->mbox->ops->last_tx_done(chan);
130 if (txdone)
131 tx_tick(chan, 0);
132 else
133 resched = true;
134 }
135 }
136
137 if (resched) {
138 spin_lock_irqsave(&mbox->poll_hrt_lock, flags);
139 if (!hrtimer_is_queued(hrtimer))
140 hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period));
141 spin_unlock_irqrestore(&mbox->poll_hrt_lock, flags);
142
143 return HRTIMER_RESTART;
144 }
145 return HRTIMER_NORESTART;
146}
147
148/**
149 * mbox_chan_received_data - A way for controller driver to push data
150 * received from remote to the upper layer.
151 * @chan: Pointer to the mailbox channel on which RX happened.
152 * @mssg: Client specific message typecasted as void *
153 *
154 * After startup and before shutdown any data received on the chan
155 * is passed on to the API via atomic mbox_chan_received_data().
156 * The controller should ACK the RX only after this call returns.
157 */
158void mbox_chan_received_data(struct mbox_chan *chan, void *mssg)
159{
160 /* No buffering the received data */
161 if (chan->cl->rx_callback)
162 chan->cl->rx_callback(chan->cl, mssg);
163}
164EXPORT_SYMBOL_GPL(mbox_chan_received_data);
165
166/**
167 * mbox_chan_txdone - A way for controller driver to notify the
168 * framework that the last TX has completed.
169 * @chan: Pointer to the mailbox chan on which TX happened.
170 * @r: Status of last TX - OK or ERROR
171 *
172 * The controller that has IRQ for TX ACK calls this atomic API
173 * to tick the TX state machine. It works only if txdone_irq
174 * is set by the controller.
175 */
176void mbox_chan_txdone(struct mbox_chan *chan, int r)
177{
178 if (unlikely(!(chan->txdone_method & TXDONE_BY_IRQ))) {
179 dev_err(chan->mbox->dev,
180 "Controller can't run the TX ticker\n");
181 return;
182 }
183
184 tx_tick(chan, r);
185}
186EXPORT_SYMBOL_GPL(mbox_chan_txdone);
187
188/**
189 * mbox_client_txdone - The way for a client to run the TX state machine.
190 * @chan: Mailbox channel assigned to this client.
191 * @r: Success status of last transmission.
192 *
193 * The client/protocol had received some 'ACK' packet and it notifies
194 * the API that the last packet was sent successfully. This only works
195 * if the controller can't sense TX-Done.
196 */
197void mbox_client_txdone(struct mbox_chan *chan, int r)
198{
199 if (unlikely(!(chan->txdone_method & TXDONE_BY_ACK))) {
200 dev_err(chan->mbox->dev, "Client can't run the TX ticker\n");
201 return;
202 }
203
204 tx_tick(chan, r);
205}
206EXPORT_SYMBOL_GPL(mbox_client_txdone);
207
208/**
209 * mbox_client_peek_data - A way for client driver to pull data
210 * received from remote by the controller.
211 * @chan: Mailbox channel assigned to this client.
212 *
213 * A poke to controller driver for any received data.
214 * The data is actually passed onto client via the
215 * mbox_chan_received_data()
216 * The call can be made from atomic context, so the controller's
217 * implementation of peek_data() must not sleep.
218 *
219 * Return: True, if controller has, and is going to push after this,
220 * some data.
221 * False, if controller doesn't have any data to be read.
222 */
223bool mbox_client_peek_data(struct mbox_chan *chan)
224{
225 if (chan->mbox->ops->peek_data)
226 return chan->mbox->ops->peek_data(chan);
227
228 return false;
229}
230EXPORT_SYMBOL_GPL(mbox_client_peek_data);
231
232/**
233 * mbox_send_message - For client to submit a message to be
234 * sent to the remote.
235 * @chan: Mailbox channel assigned to this client.
236 * @mssg: Client specific message typecasted.
237 *
238 * For client to submit data to the controller destined for a remote
239 * processor. If the client had set 'tx_block', the call will return
240 * either when the remote receives the data or when 'tx_tout' millisecs
241 * run out.
242 * In non-blocking mode, the requests are buffered by the API and a
243 * non-negative token is returned for each queued request. If the request
244 * is not queued, a negative token is returned. Upon failure or successful
245 * TX, the API calls 'tx_done' from atomic context, from which the client
246 * could submit yet another request.
247 * The pointer to message should be preserved until it is sent
248 * over the chan, i.e, tx_done() is made.
249 * This function could be called from atomic context as it simply
250 * queues the data and returns a token against the request.
251 *
252 * Return: Non-negative integer for successful submission (non-blocking mode)
253 * or transmission over chan (blocking mode).
254 * Negative value denotes failure.
255 */
256int mbox_send_message(struct mbox_chan *chan, void *mssg)
257{
258 int t;
259
260 if (!chan || !chan->cl)
261 return -EINVAL;
262
263 t = add_to_rbuf(chan, mssg);
264 if (t < 0) {
265 dev_err(chan->mbox->dev, "Try increasing MBOX_TX_QUEUE_LEN\n");
266 return t;
267 }
268
269 msg_submit(chan);
270
271 if (chan->cl->tx_block) {
272 unsigned long wait;
273 int ret;
274
275 if (!chan->cl->tx_tout) /* wait forever */
276 wait = msecs_to_jiffies(3600000);
277 else
278 wait = msecs_to_jiffies(chan->cl->tx_tout);
279
280 ret = wait_for_completion_timeout(&chan->tx_complete, wait);
281 if (ret == 0) {
282 t = -ETIME;
283 tx_tick(chan, t);
284 }
285 }
286
287 return t;
288}
289EXPORT_SYMBOL_GPL(mbox_send_message);
290
291/**
292 * mbox_flush - flush a mailbox channel
293 * @chan: mailbox channel to flush
294 * @timeout: time, in milliseconds, to allow the flush operation to succeed
295 *
296 * Mailbox controllers that need to work in atomic context can implement the
297 * ->flush() callback to busy loop until a transmission has been completed.
298 * The implementation must call mbox_chan_txdone() upon success. Clients can
299 * call the mbox_flush() function at any time after mbox_send_message() to
300 * flush the transmission. After the function returns success, the mailbox
301 * transmission is guaranteed to have completed.
302 *
303 * Returns: 0 on success or a negative error code on failure.
304 */
305int mbox_flush(struct mbox_chan *chan, unsigned long timeout)
306{
307 int ret;
308
309 if (!chan->mbox->ops->flush)
310 return -ENOTSUPP;
311
312 ret = chan->mbox->ops->flush(chan, timeout);
313 if (ret < 0)
314 tx_tick(chan, ret);
315
316 return ret;
317}
318EXPORT_SYMBOL_GPL(mbox_flush);
319
320static int __mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
321{
322 struct device *dev = cl->dev;
323 unsigned long flags;
324 int ret;
325
326 if (chan->cl || !try_module_get(chan->mbox->dev->driver->owner)) {
327 dev_dbg(dev, "%s: mailbox not free\n", __func__);
328 return -EBUSY;
329 }
330
331 spin_lock_irqsave(&chan->lock, flags);
332 chan->msg_free = 0;
333 chan->msg_count = 0;
334 chan->active_req = NULL;
335 chan->cl = cl;
336 init_completion(&chan->tx_complete);
337
338 if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
339 chan->txdone_method = TXDONE_BY_ACK;
340
341 spin_unlock_irqrestore(&chan->lock, flags);
342
343 if (chan->mbox->ops->startup) {
344 ret = chan->mbox->ops->startup(chan);
345
346 if (ret) {
347 dev_err(dev, "Unable to startup the chan (%d)\n", ret);
348 mbox_free_channel(chan);
349 return ret;
350 }
351 }
352
353 return 0;
354}
355
356/**
357 * mbox_bind_client - Request a mailbox channel.
358 * @chan: The mailbox channel to bind the client to.
359 * @cl: Identity of the client requesting the channel.
360 *
361 * The Client specifies its requirements and capabilities while asking for
362 * a mailbox channel. It can't be called from atomic context.
363 * The channel is exclusively allocated and can't be used by another
364 * client before the owner calls mbox_free_channel.
365 * After assignment, any packet received on this channel will be
366 * handed over to the client via the 'rx_callback'.
367 * The framework holds reference to the client, so the mbox_client
368 * structure shouldn't be modified until the mbox_free_channel returns.
369 *
370 * Return: 0 if the channel was assigned to the client successfully.
371 * <0 for request failure.
372 */
373int mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
374{
375 int ret;
376
377 mutex_lock(&con_mutex);
378 ret = __mbox_bind_client(chan, cl);
379 mutex_unlock(&con_mutex);
380
381 return ret;
382}
383EXPORT_SYMBOL_GPL(mbox_bind_client);
384
385/**
386 * mbox_request_channel - Request a mailbox channel.
387 * @cl: Identity of the client requesting the channel.
388 * @index: Index of mailbox specifier in 'mboxes' property.
389 *
390 * The Client specifies its requirements and capabilities while asking for
391 * a mailbox channel. It can't be called from atomic context.
392 * The channel is exclusively allocated and can't be used by another
393 * client before the owner calls mbox_free_channel.
394 * After assignment, any packet received on this channel will be
395 * handed over to the client via the 'rx_callback'.
396 * The framework holds reference to the client, so the mbox_client
397 * structure shouldn't be modified until the mbox_free_channel returns.
398 *
399 * Return: Pointer to the channel assigned to the client if successful.
400 * ERR_PTR for request failure.
401 */
402struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
403{
404 struct device *dev = cl->dev;
405 struct mbox_controller *mbox;
406 struct of_phandle_args spec;
407 struct mbox_chan *chan;
408 int ret;
409
410 if (!dev || !dev->of_node) {
411 pr_debug("%s: No owner device node\n", __func__);
412 return ERR_PTR(-ENODEV);
413 }
414
415 mutex_lock(&con_mutex);
416
417 if (of_parse_phandle_with_args(dev->of_node, "mboxes",
418 "#mbox-cells", index, &spec)) {
419 dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__);
420 mutex_unlock(&con_mutex);
421 return ERR_PTR(-ENODEV);
422 }
423
424 chan = ERR_PTR(-EPROBE_DEFER);
425 list_for_each_entry(mbox, &mbox_cons, node)
426 if (mbox->dev->of_node == spec.np) {
427 chan = mbox->of_xlate(mbox, &spec);
428 if (!IS_ERR(chan))
429 break;
430 }
431
432 of_node_put(spec.np);
433
434 if (IS_ERR(chan)) {
435 mutex_unlock(&con_mutex);
436 return chan;
437 }
438
439 ret = __mbox_bind_client(chan, cl);
440 if (ret)
441 chan = ERR_PTR(ret);
442
443 mutex_unlock(&con_mutex);
444 return chan;
445}
446EXPORT_SYMBOL_GPL(mbox_request_channel);
447
448struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
449 const char *name)
450{
451 struct device_node *np = cl->dev->of_node;
452 struct property *prop;
453 const char *mbox_name;
454 int index = 0;
455
456 if (!np) {
457 dev_err(cl->dev, "%s() currently only supports DT\n", __func__);
458 return ERR_PTR(-EINVAL);
459 }
460
461 if (!of_get_property(np, "mbox-names", NULL)) {
462 dev_err(cl->dev,
463 "%s() requires an \"mbox-names\" property\n", __func__);
464 return ERR_PTR(-EINVAL);
465 }
466
467 of_property_for_each_string(np, "mbox-names", prop, mbox_name) {
468 if (!strncmp(name, mbox_name, strlen(name)))
469 return mbox_request_channel(cl, index);
470 index++;
471 }
472
473 dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n",
474 __func__, name);
475 return ERR_PTR(-EINVAL);
476}
477EXPORT_SYMBOL_GPL(mbox_request_channel_byname);
478
479/**
480 * mbox_free_channel - The client relinquishes control of a mailbox
481 * channel by this call.
482 * @chan: The mailbox channel to be freed.
483 */
484void mbox_free_channel(struct mbox_chan *chan)
485{
486 unsigned long flags;
487
488 if (!chan || !chan->cl)
489 return;
490
491 if (chan->mbox->ops->shutdown)
492 chan->mbox->ops->shutdown(chan);
493
494 /* The queued TX requests are simply aborted, no callbacks are made */
495 spin_lock_irqsave(&chan->lock, flags);
496 chan->cl = NULL;
497 chan->active_req = NULL;
498 if (chan->txdone_method == TXDONE_BY_ACK)
499 chan->txdone_method = TXDONE_BY_POLL;
500
501 module_put(chan->mbox->dev->driver->owner);
502 spin_unlock_irqrestore(&chan->lock, flags);
503}
504EXPORT_SYMBOL_GPL(mbox_free_channel);
505
506static struct mbox_chan *
507of_mbox_index_xlate(struct mbox_controller *mbox,
508 const struct of_phandle_args *sp)
509{
510 int ind = sp->args[0];
511
512 if (ind >= mbox->num_chans)
513 return ERR_PTR(-EINVAL);
514
515 return &mbox->chans[ind];
516}
517
518/**
519 * mbox_controller_register - Register the mailbox controller
520 * @mbox: Pointer to the mailbox controller.
521 *
522 * The controller driver registers its communication channels
523 */
524int mbox_controller_register(struct mbox_controller *mbox)
525{
526 int i, txdone;
527
528 /* Sanity check */
529 if (!mbox || !mbox->dev || !mbox->ops || !mbox->num_chans)
530 return -EINVAL;
531
532 if (mbox->txdone_irq)
533 txdone = TXDONE_BY_IRQ;
534 else if (mbox->txdone_poll)
535 txdone = TXDONE_BY_POLL;
536 else /* It has to be ACK then */
537 txdone = TXDONE_BY_ACK;
538
539 if (txdone == TXDONE_BY_POLL) {
540
541 if (!mbox->ops->last_tx_done) {
542 dev_err(mbox->dev, "last_tx_done method is absent\n");
543 return -EINVAL;
544 }
545
546 hrtimer_init(&mbox->poll_hrt, CLOCK_MONOTONIC,
547 HRTIMER_MODE_REL);
548 mbox->poll_hrt.function = txdone_hrtimer;
549 spin_lock_init(&mbox->poll_hrt_lock);
550 }
551
552 for (i = 0; i < mbox->num_chans; i++) {
553 struct mbox_chan *chan = &mbox->chans[i];
554
555 chan->cl = NULL;
556 chan->mbox = mbox;
557 chan->txdone_method = txdone;
558 spin_lock_init(&chan->lock);
559 }
560
561 if (!mbox->of_xlate)
562 mbox->of_xlate = of_mbox_index_xlate;
563
564 mutex_lock(&con_mutex);
565 list_add_tail(&mbox->node, &mbox_cons);
566 mutex_unlock(&con_mutex);
567
568 return 0;
569}
570EXPORT_SYMBOL_GPL(mbox_controller_register);
571
572/**
573 * mbox_controller_unregister - Unregister the mailbox controller
574 * @mbox: Pointer to the mailbox controller.
575 */
576void mbox_controller_unregister(struct mbox_controller *mbox)
577{
578 int i;
579
580 if (!mbox)
581 return;
582
583 mutex_lock(&con_mutex);
584
585 list_del(&mbox->node);
586
587 for (i = 0; i < mbox->num_chans; i++)
588 mbox_free_channel(&mbox->chans[i]);
589
590 if (mbox->txdone_poll)
591 hrtimer_cancel(&mbox->poll_hrt);
592
593 mutex_unlock(&con_mutex);
594}
595EXPORT_SYMBOL_GPL(mbox_controller_unregister);
596
597static void __devm_mbox_controller_unregister(struct device *dev, void *res)
598{
599 struct mbox_controller **mbox = res;
600
601 mbox_controller_unregister(*mbox);
602}
603
604static int devm_mbox_controller_match(struct device *dev, void *res, void *data)
605{
606 struct mbox_controller **mbox = res;
607
608 if (WARN_ON(!mbox || !*mbox))
609 return 0;
610
611 return *mbox == data;
612}
613
614/**
615 * devm_mbox_controller_register() - managed mbox_controller_register()
616 * @dev: device owning the mailbox controller being registered
617 * @mbox: mailbox controller being registered
618 *
619 * This function adds a device-managed resource that will make sure that the
620 * mailbox controller, which is registered using mbox_controller_register()
621 * as part of this function, will be unregistered along with the rest of
622 * device-managed resources upon driver probe failure or driver removal.
623 *
624 * Returns 0 on success or a negative error code on failure.
625 */
626int devm_mbox_controller_register(struct device *dev,
627 struct mbox_controller *mbox)
628{
629 struct mbox_controller **ptr;
630 int err;
631
632 ptr = devres_alloc(__devm_mbox_controller_unregister, sizeof(*ptr),
633 GFP_KERNEL);
634 if (!ptr)
635 return -ENOMEM;
636
637 err = mbox_controller_register(mbox);
638 if (err < 0) {
639 devres_free(ptr);
640 return err;
641 }
642
643 devres_add(dev, ptr);
644 *ptr = mbox;
645
646 return 0;
647}
648EXPORT_SYMBOL_GPL(devm_mbox_controller_register);
649
650/**
651 * devm_mbox_controller_unregister() - managed mbox_controller_unregister()
652 * @dev: device owning the mailbox controller being unregistered
653 * @mbox: mailbox controller being unregistered
654 *
655 * This function unregisters the mailbox controller and removes the device-
656 * managed resource that was set up to automatically unregister the mailbox
657 * controller on driver probe failure or driver removal. It's typically not
658 * necessary to call this function.
659 */
660void devm_mbox_controller_unregister(struct device *dev, struct mbox_controller *mbox)
661{
662 WARN_ON(devres_release(dev, __devm_mbox_controller_unregister,
663 devm_mbox_controller_match, mbox));
664}
665EXPORT_SYMBOL_GPL(devm_mbox_controller_unregister);