Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mailbox: Use guard/scoped_guard for spinlock

Use guard and scoped_guard for chan->lock and mbox->poll_hrt_lock to
simplify code.

Signed-off-by: Peng Fan <peng.fan@nxp.com>
Signed-off-by: Jassi Brar <jassisinghbrar@gmail.com>

authored by

Peng Fan and committed by
Jassi Brar
2149ec83 16da9a65

+45 -60
+45 -60
drivers/mailbox/mailbox.c
··· 25 25 static int add_to_rbuf(struct mbox_chan *chan, void *mssg) 26 26 { 27 27 int idx; 28 - unsigned long flags; 29 28 30 - spin_lock_irqsave(&chan->lock, flags); 29 + guard(spinlock_irqsave)(&chan->lock); 31 30 32 31 /* See if there is any space left */ 33 - if (chan->msg_count == MBOX_TX_QUEUE_LEN) { 34 - spin_unlock_irqrestore(&chan->lock, flags); 32 + if (chan->msg_count == MBOX_TX_QUEUE_LEN) 35 33 return -ENOBUFS; 36 - } 37 34 38 35 idx = chan->msg_free; 39 36 chan->msg_data[idx] = mssg; ··· 41 44 else 42 45 chan->msg_free++; 43 46 44 - spin_unlock_irqrestore(&chan->lock, flags); 45 - 46 47 return idx; 47 48 } 48 49 49 50 static void msg_submit(struct mbox_chan *chan) 50 51 { 51 52 unsigned count, idx; 52 - unsigned long flags; 53 53 void *data; 54 54 int err = -EBUSY; 55 55 56 - spin_lock_irqsave(&chan->lock, flags); 56 + scoped_guard(spinlock_irqsave, &chan->lock) { 57 + if (!chan->msg_count || chan->active_req) 58 + break; 57 59 58 - if (!chan->msg_count || chan->active_req) 59 - goto exit; 60 + count = chan->msg_count; 61 + idx = chan->msg_free; 62 + if (idx >= count) 63 + idx -= count; 64 + else 65 + idx += MBOX_TX_QUEUE_LEN - count; 60 66 61 - count = chan->msg_count; 62 - idx = chan->msg_free; 63 - if (idx >= count) 64 - idx -= count; 65 - else 66 - idx += MBOX_TX_QUEUE_LEN - count; 67 + data = chan->msg_data[idx]; 67 68 68 - data = chan->msg_data[idx]; 69 - 70 - if (chan->cl->tx_prepare) 71 - chan->cl->tx_prepare(chan->cl, data); 72 - /* Try to submit a message to the MBOX controller */ 73 - err = chan->mbox->ops->send_data(chan, data); 74 - if (!err) { 75 - chan->active_req = data; 76 - chan->msg_count--; 69 + if (chan->cl->tx_prepare) 70 + chan->cl->tx_prepare(chan->cl, data); 71 + /* Try to submit a message to the MBOX controller */ 72 + err = chan->mbox->ops->send_data(chan, data); 73 + if (!err) { 74 + chan->active_req = data; 75 + chan->msg_count--; 76 + } 77 77 } 78 - exit: 79 - spin_unlock_irqrestore(&chan->lock, flags); 80 78 81 79 if (!err && (chan->txdone_method & TXDONE_BY_POLL)) { 82 80 /* kick start the timer immediately to avoid delays */ 83 - spin_lock_irqsave(&chan->mbox->poll_hrt_lock, flags); 84 - hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL); 85 - spin_unlock_irqrestore(&chan->mbox->poll_hrt_lock, flags); 81 + scoped_guard(spinlock_irqsave, &chan->mbox->poll_hrt_lock) 82 + hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL); 86 83 } 87 84 } 88 85 89 86 static void tx_tick(struct mbox_chan *chan, int r) 90 87 { 91 - unsigned long flags; 92 88 void *mssg; 93 89 94 - spin_lock_irqsave(&chan->lock, flags); 95 - mssg = chan->active_req; 96 - chan->active_req = NULL; 97 - spin_unlock_irqrestore(&chan->lock, flags); 90 + scoped_guard(spinlock_irqsave, &chan->lock) { 91 + mssg = chan->active_req; 92 + chan->active_req = NULL; 93 + } 98 94 99 95 /* Submit next message */ 100 96 msg_submit(chan); ··· 109 119 container_of(hrtimer, struct mbox_controller, poll_hrt); 110 120 bool txdone, resched = false; 111 121 int i; 112 - unsigned long flags; 113 122 114 123 for (i = 0; i < mbox->num_chans; i++) { 115 124 struct mbox_chan *chan = &mbox->chans[i]; ··· 123 134 } 124 135 125 136 if (resched) { 126 - spin_lock_irqsave(&mbox->poll_hrt_lock, flags); 127 - if (!hrtimer_is_queued(hrtimer)) 128 - hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period)); 129 - spin_unlock_irqrestore(&mbox->poll_hrt_lock, flags); 137 + scoped_guard(spinlock_irqsave, &mbox->poll_hrt_lock) { 138 + if (!hrtimer_is_queued(hrtimer)) 139 + hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period)); 140 + } 130 141 131 142 return HRTIMER_RESTART; 132 143 } ··· 308 319 static int __mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl) 309 320 { 310 321 struct device *dev = cl->dev; 311 - unsigned long flags; 312 322 int ret; 313 323 314 324 if (chan->cl || !try_module_get(chan->mbox->dev->driver->owner)) { ··· 315 327 return -EBUSY; 316 328 } 317 329 318 - spin_lock_irqsave(&chan->lock, flags); 319 - chan->msg_free = 0; 320 - chan->msg_count = 0; 321 - chan->active_req = NULL; 322 - chan->cl = cl; 323 - init_completion(&chan->tx_complete); 330 + scoped_guard(spinlock_irqsave, &chan->lock) { 331 + chan->msg_free = 0; 332 + chan->msg_count = 0; 333 + chan->active_req = NULL; 334 + chan->cl = cl; 335 + init_completion(&chan->tx_complete); 324 336 325 - if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone) 326 - chan->txdone_method = TXDONE_BY_ACK; 327 - 328 - spin_unlock_irqrestore(&chan->lock, flags); 337 + if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone) 338 + chan->txdone_method = TXDONE_BY_ACK; 339 + } 329 340 330 341 if (chan->mbox->ops->startup) { 331 342 ret = chan->mbox->ops->startup(chan); ··· 452 465 */ 453 466 void mbox_free_channel(struct mbox_chan *chan) 454 467 { 455 - unsigned long flags; 456 - 457 468 if (!chan || !chan->cl) 458 469 return; 459 470 ··· 459 474 chan->mbox->ops->shutdown(chan); 460 475 461 476 /* The queued TX requests are simply aborted, no callbacks are made */ 462 - spin_lock_irqsave(&chan->lock, flags); 463 - chan->cl = NULL; 464 - chan->active_req = NULL; 465 - if (chan->txdone_method == TXDONE_BY_ACK) 466 - chan->txdone_method = TXDONE_BY_POLL; 477 + scoped_guard(spinlock_irqsave, &chan->lock) { 478 + chan->cl = NULL; 479 + chan->active_req = NULL; 480 + if (chan->txdone_method == TXDONE_BY_ACK) 481 + chan->txdone_method = TXDONE_BY_POLL; 482 + } 467 483 468 - spin_unlock_irqrestore(&chan->lock, flags); 469 484 module_put(chan->mbox->dev->driver->owner); 470 485 } 471 486 EXPORT_SYMBOL_GPL(mbox_free_channel);