Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

serial: sh-sci: Use port lock wrappers

When a serial port is used for kernel console output, then all
modifications to the UART registers which are done from other contexts,
e.g. getty, termios, are interference points for the kernel console.

So far this has been ignored and the printk output is based on the
principle of hope. The rework of the console infrastructure which aims to
support threaded and atomic consoles, requires to mark sections which
modify the UART registers as unsafe. This allows the atomic write function
to make informed decisions and eventually to restore operational state. It
also allows to prevent the regular UART code from modifying UART registers
while printk output is in progress.

All modifications of UART registers are guarded by the UART port lock,
which provides an obvious synchronization point with the console
infrastructure.

To avoid adding this functionality to all UART drivers, wrap the
spin_[un]lock*() invocations for uart_port::lock into helper functions
which just contain the spin_[un]lock*() invocations for now. In a
subsequent step these helpers will gain the console synchronization
mechanisms.

Converted with coccinelle. No functional change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: John Ogness <john.ogness@linutronix.de>
Link: https://lore.kernel.org/r/20230914183831.587273-61-john.ogness@linutronix.de
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Thomas Gleixner and committed by
Greg Kroah-Hartman
94c53770 b04cfd7d

+34 -34
+34 -34
drivers/tty/serial/sh-sci.c
··· 1205 1205 1206 1206 dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); 1207 1207 1208 - spin_lock_irqsave(&port->lock, flags); 1208 + uart_port_lock_irqsave(port, &flags); 1209 1209 1210 1210 uart_xmit_advance(port, s->tx_dma_len); 1211 1211 ··· 1229 1229 } 1230 1230 } 1231 1231 1232 - spin_unlock_irqrestore(&port->lock, flags); 1232 + uart_port_unlock_irqrestore(port, flags); 1233 1233 } 1234 1234 1235 1235 /* Locking: called with port lock held */ ··· 1320 1320 dev_dbg(port->dev, "%s(%d) active cookie %d\n", __func__, port->line, 1321 1321 s->active_rx); 1322 1322 1323 - spin_lock_irqsave(&port->lock, flags); 1323 + uart_port_lock_irqsave(port, &flags); 1324 1324 1325 1325 active = sci_dma_rx_find_active(s); 1326 1326 if (active >= 0) ··· 1347 1347 1348 1348 dma_async_issue_pending(chan); 1349 1349 1350 - spin_unlock_irqrestore(&port->lock, flags); 1350 + uart_port_unlock_irqrestore(port, flags); 1351 1351 dev_dbg(port->dev, "%s: cookie %d #%d, new active cookie %d\n", 1352 1352 __func__, s->cookie_rx[active], active, s->active_rx); 1353 1353 return; 1354 1354 1355 1355 fail: 1356 - spin_unlock_irqrestore(&port->lock, flags); 1356 + uart_port_unlock_irqrestore(port, flags); 1357 1357 dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n"); 1358 1358 /* Switch to PIO */ 1359 - spin_lock_irqsave(&port->lock, flags); 1359 + uart_port_lock_irqsave(port, &flags); 1360 1360 dmaengine_terminate_async(chan); 1361 1361 sci_dma_rx_chan_invalidate(s); 1362 1362 sci_dma_rx_reenable_irq(s); 1363 - spin_unlock_irqrestore(&port->lock, flags); 1363 + uart_port_unlock_irqrestore(port, flags); 1364 1364 } 1365 1365 1366 1366 static void sci_dma_tx_release(struct sci_port *s) ··· 1409 1409 fail: 1410 1410 /* Switch to PIO */ 1411 1411 if (!port_lock_held) 1412 - spin_lock_irqsave(&port->lock, flags); 1412 + uart_port_lock_irqsave(port, &flags); 1413 1413 if (i) 1414 1414 dmaengine_terminate_async(chan); 1415 1415 sci_dma_rx_chan_invalidate(s); 1416 1416 sci_start_rx(port); 1417 1417 if (!port_lock_held) 1418 - spin_unlock_irqrestore(&port->lock, flags); 1418 + uart_port_unlock_irqrestore(port, flags); 1419 1419 return -EAGAIN; 1420 1420 } 1421 1421 ··· 1437 1437 * transmit till the end, and then the rest. Take the port lock to get a 1438 1438 * consistent xmit buffer state. 1439 1439 */ 1440 - spin_lock_irq(&port->lock); 1440 + uart_port_lock_irq(port); 1441 1441 head = xmit->head; 1442 1442 tail = xmit->tail; 1443 1443 buf = s->tx_dma_addr + tail; 1444 1444 s->tx_dma_len = CIRC_CNT_TO_END(head, tail, UART_XMIT_SIZE); 1445 1445 if (!s->tx_dma_len) { 1446 1446 /* Transmit buffer has been flushed */ 1447 - spin_unlock_irq(&port->lock); 1447 + uart_port_unlock_irq(port); 1448 1448 return; 1449 1449 } 1450 1450 ··· 1452 1452 DMA_MEM_TO_DEV, 1453 1453 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1454 1454 if (!desc) { 1455 - spin_unlock_irq(&port->lock); 1455 + uart_port_unlock_irq(port); 1456 1456 dev_warn(port->dev, "Failed preparing Tx DMA descriptor\n"); 1457 1457 goto switch_to_pio; 1458 1458 } ··· 1464 1464 desc->callback_param = s; 1465 1465 s->cookie_tx = dmaengine_submit(desc); 1466 1466 if (dma_submit_error(s->cookie_tx)) { 1467 - spin_unlock_irq(&port->lock); 1467 + uart_port_unlock_irq(port); 1468 1468 dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n"); 1469 1469 goto switch_to_pio; 1470 1470 } 1471 1471 1472 - spin_unlock_irq(&port->lock); 1472 + uart_port_unlock_irq(port); 1473 1473 dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n", 1474 1474 __func__, xmit->buf, tail, head, s->cookie_tx); 1475 1475 ··· 1477 1477 return; 1478 1478 1479 1479 switch_to_pio: 1480 - spin_lock_irqsave(&port->lock, flags); 1480 + uart_port_lock_irqsave(port, &flags); 1481 1481 s->chan_tx = NULL; 1482 1482 sci_start_tx(port); 1483 - spin_unlock_irqrestore(&port->lock, flags); 1483 + uart_port_unlock_irqrestore(port, flags); 1484 1484 return; 1485 1485 } 1486 1486 ··· 1497 1497 1498 1498 dev_dbg(port->dev, "DMA Rx timed out\n"); 1499 1499 1500 - spin_lock_irqsave(&port->lock, flags); 1500 + uart_port_lock_irqsave(port, &flags); 1501 1501 1502 1502 active = sci_dma_rx_find_active(s); 1503 1503 if (active < 0) { 1504 - spin_unlock_irqrestore(&port->lock, flags); 1504 + uart_port_unlock_irqrestore(port, flags); 1505 1505 return HRTIMER_NORESTART; 1506 1506 } 1507 1507 1508 1508 status = dmaengine_tx_status(s->chan_rx, s->active_rx, &state); 1509 1509 if (status == DMA_COMPLETE) { 1510 - spin_unlock_irqrestore(&port->lock, flags); 1510 + uart_port_unlock_irqrestore(port, flags); 1511 1511 dev_dbg(port->dev, "Cookie %d #%d has already completed\n", 1512 1512 s->active_rx, active); 1513 1513 ··· 1525 1525 */ 1526 1526 status = dmaengine_tx_status(s->chan_rx, s->active_rx, &state); 1527 1527 if (status == DMA_COMPLETE) { 1528 - spin_unlock_irqrestore(&port->lock, flags); 1528 + uart_port_unlock_irqrestore(port, flags); 1529 1529 dev_dbg(port->dev, "Transaction complete after DMA engine was stopped"); 1530 1530 return HRTIMER_NORESTART; 1531 1531 } ··· 1546 1546 1547 1547 sci_dma_rx_reenable_irq(s); 1548 1548 1549 - spin_unlock_irqrestore(&port->lock, flags); 1549 + uart_port_unlock_irqrestore(port, flags); 1550 1550 1551 1551 return HRTIMER_NORESTART; 1552 1552 } ··· 1770 1770 struct uart_port *port = ptr; 1771 1771 unsigned long flags; 1772 1772 1773 - spin_lock_irqsave(&port->lock, flags); 1773 + uart_port_lock_irqsave(port, &flags); 1774 1774 sci_transmit_chars(port); 1775 - spin_unlock_irqrestore(&port->lock, flags); 1775 + uart_port_unlock_irqrestore(port, flags); 1776 1776 1777 1777 return IRQ_HANDLED; 1778 1778 } ··· 1786 1786 if (port->type != PORT_SCI) 1787 1787 return sci_tx_interrupt(irq, ptr); 1788 1788 1789 - spin_lock_irqsave(&port->lock, flags); 1789 + uart_port_lock_irqsave(port, &flags); 1790 1790 ctrl = serial_port_in(port, SCSCR); 1791 1791 ctrl &= ~(SCSCR_TE | SCSCR_TEIE); 1792 1792 serial_port_out(port, SCSCR, ctrl); 1793 - spin_unlock_irqrestore(&port->lock, flags); 1793 + uart_port_unlock_irqrestore(port, flags); 1794 1794 1795 1795 return IRQ_HANDLED; 1796 1796 } ··· 2187 2187 return; 2188 2188 } 2189 2189 2190 - spin_lock_irqsave(&port->lock, flags); 2190 + uart_port_lock_irqsave(port, &flags); 2191 2191 scsptr = serial_port_in(port, SCSPTR); 2192 2192 scscr = serial_port_in(port, SCSCR); 2193 2193 ··· 2201 2201 2202 2202 serial_port_out(port, SCSPTR, scsptr); 2203 2203 serial_port_out(port, SCSCR, scscr); 2204 - spin_unlock_irqrestore(&port->lock, flags); 2204 + uart_port_unlock_irqrestore(port, flags); 2205 2205 } 2206 2206 2207 2207 static int sci_startup(struct uart_port *port) ··· 2233 2233 s->autorts = false; 2234 2234 mctrl_gpio_disable_ms(to_sci_port(port)->gpios); 2235 2235 2236 - spin_lock_irqsave(&port->lock, flags); 2236 + uart_port_lock_irqsave(port, &flags); 2237 2237 sci_stop_rx(port); 2238 2238 sci_stop_tx(port); 2239 2239 /* ··· 2243 2243 scr = serial_port_in(port, SCSCR); 2244 2244 serial_port_out(port, SCSCR, scr & 2245 2245 (SCSCR_CKE1 | SCSCR_CKE0 | s->hscif_tot)); 2246 - spin_unlock_irqrestore(&port->lock, flags); 2246 + uart_port_unlock_irqrestore(port, flags); 2247 2247 2248 2248 #ifdef CONFIG_SERIAL_SH_SCI_DMA 2249 2249 if (s->chan_rx_saved) { ··· 2545 2545 serial_port_out(port, SCCKS, sccks); 2546 2546 } 2547 2547 2548 - spin_lock_irqsave(&port->lock, flags); 2548 + uart_port_lock_irqsave(port, &flags); 2549 2549 2550 2550 sci_reset(port); 2551 2551 ··· 2667 2667 if ((termios->c_cflag & CREAD) != 0) 2668 2668 sci_start_rx(port); 2669 2669 2670 - spin_unlock_irqrestore(&port->lock, flags); 2670 + uart_port_unlock_irqrestore(port, flags); 2671 2671 2672 2672 sci_port_disable(s); 2673 2673 ··· 3052 3052 if (port->sysrq) 3053 3053 locked = 0; 3054 3054 else if (oops_in_progress) 3055 - locked = spin_trylock_irqsave(&port->lock, flags); 3055 + locked = uart_port_trylock_irqsave(port, &flags); 3056 3056 else 3057 - spin_lock_irqsave(&port->lock, flags); 3057 + uart_port_lock_irqsave(port, &flags); 3058 3058 3059 3059 /* first save SCSCR then disable interrupts, keep clock source */ 3060 3060 ctrl = serial_port_in(port, SCSCR); ··· 3074 3074 serial_port_out(port, SCSCR, ctrl); 3075 3075 3076 3076 if (locked) 3077 - spin_unlock_irqrestore(&port->lock, flags); 3077 + uart_port_unlock_irqrestore(port, flags); 3078 3078 } 3079 3079 3080 3080 static int serial_console_setup(struct console *co, char *options)