Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Driver for AMBA serial ports
4 *
5 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
6 *
7 * Copyright 1999 ARM Limited
8 * Copyright (C) 2000 Deep Blue Solutions Ltd.
9 * Copyright (C) 2010 ST-Ericsson SA
10 *
11 * This is a generic driver for ARM AMBA-type serial ports. They
12 * have a lot of 16550-like features, but are not register compatible.
13 * Note that although they do have CTS, DCD and DSR inputs, they do
14 * not have an RI input, nor do they have DTR or RTS outputs. If
15 * required, these have to be supplied via some other means (eg, GPIO)
16 * and hooked into this driver.
17 */
18
19
20#if defined(CONFIG_SERIAL_AMBA_PL011_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
21#define SUPPORT_SYSRQ
22#endif
23
24#include <linux/module.h>
25#include <linux/ioport.h>
26#include <linux/init.h>
27#include <linux/console.h>
28#include <linux/sysrq.h>
29#include <linux/device.h>
30#include <linux/tty.h>
31#include <linux/tty_flip.h>
32#include <linux/serial_core.h>
33#include <linux/serial.h>
34#include <linux/amba/bus.h>
35#include <linux/amba/serial.h>
36#include <linux/clk.h>
37#include <linux/slab.h>
38#include <linux/dmaengine.h>
39#include <linux/dma-mapping.h>
40#include <linux/scatterlist.h>
41#include <linux/delay.h>
42#include <linux/types.h>
43#include <linux/of.h>
44#include <linux/of_device.h>
45#include <linux/pinctrl/consumer.h>
46#include <linux/sizes.h>
47#include <linux/io.h>
48#include <linux/acpi.h>
49
50#include "amba-pl011.h"
51
52#define UART_NR 14
53
54#define SERIAL_AMBA_MAJOR 204
55#define SERIAL_AMBA_MINOR 64
56#define SERIAL_AMBA_NR UART_NR
57
58#define AMBA_ISR_PASS_LIMIT 256
59
60#define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
61#define UART_DUMMY_DR_RX (1 << 16)
62
63static u16 pl011_std_offsets[REG_ARRAY_SIZE] = {
64 [REG_DR] = UART01x_DR,
65 [REG_FR] = UART01x_FR,
66 [REG_LCRH_RX] = UART011_LCRH,
67 [REG_LCRH_TX] = UART011_LCRH,
68 [REG_IBRD] = UART011_IBRD,
69 [REG_FBRD] = UART011_FBRD,
70 [REG_CR] = UART011_CR,
71 [REG_IFLS] = UART011_IFLS,
72 [REG_IMSC] = UART011_IMSC,
73 [REG_RIS] = UART011_RIS,
74 [REG_MIS] = UART011_MIS,
75 [REG_ICR] = UART011_ICR,
76 [REG_DMACR] = UART011_DMACR,
77};
78
79/* There is by now at least one vendor with differing details, so handle it */
80struct vendor_data {
81 const u16 *reg_offset;
82 unsigned int ifls;
83 unsigned int fr_busy;
84 unsigned int fr_dsr;
85 unsigned int fr_cts;
86 unsigned int fr_ri;
87 unsigned int inv_fr;
88 bool access_32b;
89 bool oversampling;
90 bool dma_threshold;
91 bool cts_event_workaround;
92 bool always_enabled;
93 bool fixed_options;
94
95 unsigned int (*get_fifosize)(struct amba_device *dev);
96};
97
98static unsigned int get_fifosize_arm(struct amba_device *dev)
99{
100 return amba_rev(dev) < 3 ? 16 : 32;
101}
102
103static struct vendor_data vendor_arm = {
104 .reg_offset = pl011_std_offsets,
105 .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
106 .fr_busy = UART01x_FR_BUSY,
107 .fr_dsr = UART01x_FR_DSR,
108 .fr_cts = UART01x_FR_CTS,
109 .fr_ri = UART011_FR_RI,
110 .oversampling = false,
111 .dma_threshold = false,
112 .cts_event_workaround = false,
113 .always_enabled = false,
114 .fixed_options = false,
115 .get_fifosize = get_fifosize_arm,
116};
117
118static const struct vendor_data vendor_sbsa = {
119 .reg_offset = pl011_std_offsets,
120 .fr_busy = UART01x_FR_BUSY,
121 .fr_dsr = UART01x_FR_DSR,
122 .fr_cts = UART01x_FR_CTS,
123 .fr_ri = UART011_FR_RI,
124 .access_32b = true,
125 .oversampling = false,
126 .dma_threshold = false,
127 .cts_event_workaround = false,
128 .always_enabled = true,
129 .fixed_options = true,
130};
131
132#ifdef CONFIG_ACPI_SPCR_TABLE
133static const struct vendor_data vendor_qdt_qdf2400_e44 = {
134 .reg_offset = pl011_std_offsets,
135 .fr_busy = UART011_FR_TXFE,
136 .fr_dsr = UART01x_FR_DSR,
137 .fr_cts = UART01x_FR_CTS,
138 .fr_ri = UART011_FR_RI,
139 .inv_fr = UART011_FR_TXFE,
140 .access_32b = true,
141 .oversampling = false,
142 .dma_threshold = false,
143 .cts_event_workaround = false,
144 .always_enabled = true,
145 .fixed_options = true,
146};
147#endif
148
149static u16 pl011_st_offsets[REG_ARRAY_SIZE] = {
150 [REG_DR] = UART01x_DR,
151 [REG_ST_DMAWM] = ST_UART011_DMAWM,
152 [REG_ST_TIMEOUT] = ST_UART011_TIMEOUT,
153 [REG_FR] = UART01x_FR,
154 [REG_LCRH_RX] = ST_UART011_LCRH_RX,
155 [REG_LCRH_TX] = ST_UART011_LCRH_TX,
156 [REG_IBRD] = UART011_IBRD,
157 [REG_FBRD] = UART011_FBRD,
158 [REG_CR] = UART011_CR,
159 [REG_IFLS] = UART011_IFLS,
160 [REG_IMSC] = UART011_IMSC,
161 [REG_RIS] = UART011_RIS,
162 [REG_MIS] = UART011_MIS,
163 [REG_ICR] = UART011_ICR,
164 [REG_DMACR] = UART011_DMACR,
165 [REG_ST_XFCR] = ST_UART011_XFCR,
166 [REG_ST_XON1] = ST_UART011_XON1,
167 [REG_ST_XON2] = ST_UART011_XON2,
168 [REG_ST_XOFF1] = ST_UART011_XOFF1,
169 [REG_ST_XOFF2] = ST_UART011_XOFF2,
170 [REG_ST_ITCR] = ST_UART011_ITCR,
171 [REG_ST_ITIP] = ST_UART011_ITIP,
172 [REG_ST_ABCR] = ST_UART011_ABCR,
173 [REG_ST_ABIMSC] = ST_UART011_ABIMSC,
174};
175
176static unsigned int get_fifosize_st(struct amba_device *dev)
177{
178 return 64;
179}
180
181static struct vendor_data vendor_st = {
182 .reg_offset = pl011_st_offsets,
183 .ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF,
184 .fr_busy = UART01x_FR_BUSY,
185 .fr_dsr = UART01x_FR_DSR,
186 .fr_cts = UART01x_FR_CTS,
187 .fr_ri = UART011_FR_RI,
188 .oversampling = true,
189 .dma_threshold = true,
190 .cts_event_workaround = true,
191 .always_enabled = false,
192 .fixed_options = false,
193 .get_fifosize = get_fifosize_st,
194};
195
196static const u16 pl011_zte_offsets[REG_ARRAY_SIZE] = {
197 [REG_DR] = ZX_UART011_DR,
198 [REG_FR] = ZX_UART011_FR,
199 [REG_LCRH_RX] = ZX_UART011_LCRH,
200 [REG_LCRH_TX] = ZX_UART011_LCRH,
201 [REG_IBRD] = ZX_UART011_IBRD,
202 [REG_FBRD] = ZX_UART011_FBRD,
203 [REG_CR] = ZX_UART011_CR,
204 [REG_IFLS] = ZX_UART011_IFLS,
205 [REG_IMSC] = ZX_UART011_IMSC,
206 [REG_RIS] = ZX_UART011_RIS,
207 [REG_MIS] = ZX_UART011_MIS,
208 [REG_ICR] = ZX_UART011_ICR,
209 [REG_DMACR] = ZX_UART011_DMACR,
210};
211
212static unsigned int get_fifosize_zte(struct amba_device *dev)
213{
214 return 16;
215}
216
217static struct vendor_data vendor_zte = {
218 .reg_offset = pl011_zte_offsets,
219 .access_32b = true,
220 .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
221 .fr_busy = ZX_UART01x_FR_BUSY,
222 .fr_dsr = ZX_UART01x_FR_DSR,
223 .fr_cts = ZX_UART01x_FR_CTS,
224 .fr_ri = ZX_UART011_FR_RI,
225 .get_fifosize = get_fifosize_zte,
226};
227
228/* Deals with DMA transactions */
229
230struct pl011_sgbuf {
231 struct scatterlist sg;
232 char *buf;
233};
234
235struct pl011_dmarx_data {
236 struct dma_chan *chan;
237 struct completion complete;
238 bool use_buf_b;
239 struct pl011_sgbuf sgbuf_a;
240 struct pl011_sgbuf sgbuf_b;
241 dma_cookie_t cookie;
242 bool running;
243 struct timer_list timer;
244 unsigned int last_residue;
245 unsigned long last_jiffies;
246 bool auto_poll_rate;
247 unsigned int poll_rate;
248 unsigned int poll_timeout;
249};
250
251struct pl011_dmatx_data {
252 struct dma_chan *chan;
253 struct scatterlist sg;
254 char *buf;
255 bool queued;
256};
257
258/*
259 * We wrap our port structure around the generic uart_port.
260 */
261struct uart_amba_port {
262 struct uart_port port;
263 const u16 *reg_offset;
264 struct clk *clk;
265 const struct vendor_data *vendor;
266 unsigned int dmacr; /* dma control reg */
267 unsigned int im; /* interrupt mask */
268 unsigned int old_status;
269 unsigned int fifosize; /* vendor-specific */
270 unsigned int old_cr; /* state during shutdown */
271 unsigned int fixed_baud; /* vendor-set fixed baud rate */
272 char type[12];
273#ifdef CONFIG_DMA_ENGINE
274 /* DMA stuff */
275 bool using_tx_dma;
276 bool using_rx_dma;
277 struct pl011_dmarx_data dmarx;
278 struct pl011_dmatx_data dmatx;
279 bool dma_probed;
280#endif
281};
282
283static unsigned int pl011_reg_to_offset(const struct uart_amba_port *uap,
284 unsigned int reg)
285{
286 return uap->reg_offset[reg];
287}
288
289static unsigned int pl011_read(const struct uart_amba_port *uap,
290 unsigned int reg)
291{
292 void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg);
293
294 return (uap->port.iotype == UPIO_MEM32) ?
295 readl_relaxed(addr) : readw_relaxed(addr);
296}
297
298static void pl011_write(unsigned int val, const struct uart_amba_port *uap,
299 unsigned int reg)
300{
301 void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg);
302
303 if (uap->port.iotype == UPIO_MEM32)
304 writel_relaxed(val, addr);
305 else
306 writew_relaxed(val, addr);
307}
308
309/*
310 * Reads up to 256 characters from the FIFO or until it's empty and
311 * inserts them into the TTY layer. Returns the number of characters
312 * read from the FIFO.
313 */
314static int pl011_fifo_to_tty(struct uart_amba_port *uap)
315{
316 u16 status;
317 unsigned int ch, flag, fifotaken;
318
319 for (fifotaken = 0; fifotaken != 256; fifotaken++) {
320 status = pl011_read(uap, REG_FR);
321 if (status & UART01x_FR_RXFE)
322 break;
323
324 /* Take chars from the FIFO and update status */
325 ch = pl011_read(uap, REG_DR) | UART_DUMMY_DR_RX;
326 flag = TTY_NORMAL;
327 uap->port.icount.rx++;
328
329 if (unlikely(ch & UART_DR_ERROR)) {
330 if (ch & UART011_DR_BE) {
331 ch &= ~(UART011_DR_FE | UART011_DR_PE);
332 uap->port.icount.brk++;
333 if (uart_handle_break(&uap->port))
334 continue;
335 } else if (ch & UART011_DR_PE)
336 uap->port.icount.parity++;
337 else if (ch & UART011_DR_FE)
338 uap->port.icount.frame++;
339 if (ch & UART011_DR_OE)
340 uap->port.icount.overrun++;
341
342 ch &= uap->port.read_status_mask;
343
344 if (ch & UART011_DR_BE)
345 flag = TTY_BREAK;
346 else if (ch & UART011_DR_PE)
347 flag = TTY_PARITY;
348 else if (ch & UART011_DR_FE)
349 flag = TTY_FRAME;
350 }
351
352 if (uart_handle_sysrq_char(&uap->port, ch & 255))
353 continue;
354
355 uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
356 }
357
358 return fifotaken;
359}
360
361
362/*
363 * All the DMA operation mode stuff goes inside this ifdef.
364 * This assumes that you have a generic DMA device interface,
365 * no custom DMA interfaces are supported.
366 */
367#ifdef CONFIG_DMA_ENGINE
368
369#define PL011_DMA_BUFFER_SIZE PAGE_SIZE
370
371static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
372 enum dma_data_direction dir)
373{
374 dma_addr_t dma_addr;
375
376 sg->buf = dma_alloc_coherent(chan->device->dev,
377 PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL);
378 if (!sg->buf)
379 return -ENOMEM;
380
381 sg_init_table(&sg->sg, 1);
382 sg_set_page(&sg->sg, phys_to_page(dma_addr),
383 PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
384 sg_dma_address(&sg->sg) = dma_addr;
385 sg_dma_len(&sg->sg) = PL011_DMA_BUFFER_SIZE;
386
387 return 0;
388}
389
390static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
391 enum dma_data_direction dir)
392{
393 if (sg->buf) {
394 dma_free_coherent(chan->device->dev,
395 PL011_DMA_BUFFER_SIZE, sg->buf,
396 sg_dma_address(&sg->sg));
397 }
398}
399
400static void pl011_dma_probe(struct uart_amba_port *uap)
401{
402 /* DMA is the sole user of the platform data right now */
403 struct amba_pl011_data *plat = dev_get_platdata(uap->port.dev);
404 struct device *dev = uap->port.dev;
405 struct dma_slave_config tx_conf = {
406 .dst_addr = uap->port.mapbase +
407 pl011_reg_to_offset(uap, REG_DR),
408 .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
409 .direction = DMA_MEM_TO_DEV,
410 .dst_maxburst = uap->fifosize >> 1,
411 .device_fc = false,
412 };
413 struct dma_chan *chan;
414 dma_cap_mask_t mask;
415
416 uap->dma_probed = true;
417 chan = dma_request_chan(dev, "tx");
418 if (IS_ERR(chan)) {
419 if (PTR_ERR(chan) == -EPROBE_DEFER) {
420 uap->dma_probed = false;
421 return;
422 }
423
424 /* We need platform data */
425 if (!plat || !plat->dma_filter) {
426 dev_info(uap->port.dev, "no DMA platform data\n");
427 return;
428 }
429
430 /* Try to acquire a generic DMA engine slave TX channel */
431 dma_cap_zero(mask);
432 dma_cap_set(DMA_SLAVE, mask);
433
434 chan = dma_request_channel(mask, plat->dma_filter,
435 plat->dma_tx_param);
436 if (!chan) {
437 dev_err(uap->port.dev, "no TX DMA channel!\n");
438 return;
439 }
440 }
441
442 dmaengine_slave_config(chan, &tx_conf);
443 uap->dmatx.chan = chan;
444
445 dev_info(uap->port.dev, "DMA channel TX %s\n",
446 dma_chan_name(uap->dmatx.chan));
447
448 /* Optionally make use of an RX channel as well */
449 chan = dma_request_slave_channel(dev, "rx");
450
451 if (!chan && plat && plat->dma_rx_param) {
452 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
453
454 if (!chan) {
455 dev_err(uap->port.dev, "no RX DMA channel!\n");
456 return;
457 }
458 }
459
460 if (chan) {
461 struct dma_slave_config rx_conf = {
462 .src_addr = uap->port.mapbase +
463 pl011_reg_to_offset(uap, REG_DR),
464 .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
465 .direction = DMA_DEV_TO_MEM,
466 .src_maxburst = uap->fifosize >> 2,
467 .device_fc = false,
468 };
469 struct dma_slave_caps caps;
470
471 /*
472 * Some DMA controllers provide information on their capabilities.
473 * If the controller does, check for suitable residue processing
474 * otherwise assime all is well.
475 */
476 if (0 == dma_get_slave_caps(chan, &caps)) {
477 if (caps.residue_granularity ==
478 DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
479 dma_release_channel(chan);
480 dev_info(uap->port.dev,
481 "RX DMA disabled - no residue processing\n");
482 return;
483 }
484 }
485 dmaengine_slave_config(chan, &rx_conf);
486 uap->dmarx.chan = chan;
487
488 uap->dmarx.auto_poll_rate = false;
489 if (plat && plat->dma_rx_poll_enable) {
490 /* Set poll rate if specified. */
491 if (plat->dma_rx_poll_rate) {
492 uap->dmarx.auto_poll_rate = false;
493 uap->dmarx.poll_rate = plat->dma_rx_poll_rate;
494 } else {
495 /*
496 * 100 ms defaults to poll rate if not
497 * specified. This will be adjusted with
498 * the baud rate at set_termios.
499 */
500 uap->dmarx.auto_poll_rate = true;
501 uap->dmarx.poll_rate = 100;
502 }
503 /* 3 secs defaults poll_timeout if not specified. */
504 if (plat->dma_rx_poll_timeout)
505 uap->dmarx.poll_timeout =
506 plat->dma_rx_poll_timeout;
507 else
508 uap->dmarx.poll_timeout = 3000;
509 } else if (!plat && dev->of_node) {
510 uap->dmarx.auto_poll_rate = of_property_read_bool(
511 dev->of_node, "auto-poll");
512 if (uap->dmarx.auto_poll_rate) {
513 u32 x;
514
515 if (0 == of_property_read_u32(dev->of_node,
516 "poll-rate-ms", &x))
517 uap->dmarx.poll_rate = x;
518 else
519 uap->dmarx.poll_rate = 100;
520 if (0 == of_property_read_u32(dev->of_node,
521 "poll-timeout-ms", &x))
522 uap->dmarx.poll_timeout = x;
523 else
524 uap->dmarx.poll_timeout = 3000;
525 }
526 }
527 dev_info(uap->port.dev, "DMA channel RX %s\n",
528 dma_chan_name(uap->dmarx.chan));
529 }
530}
531
532static void pl011_dma_remove(struct uart_amba_port *uap)
533{
534 if (uap->dmatx.chan)
535 dma_release_channel(uap->dmatx.chan);
536 if (uap->dmarx.chan)
537 dma_release_channel(uap->dmarx.chan);
538}
539
540/* Forward declare these for the refill routine */
541static int pl011_dma_tx_refill(struct uart_amba_port *uap);
542static void pl011_start_tx_pio(struct uart_amba_port *uap);
543
544/*
545 * The current DMA TX buffer has been sent.
546 * Try to queue up another DMA buffer.
547 */
548static void pl011_dma_tx_callback(void *data)
549{
550 struct uart_amba_port *uap = data;
551 struct pl011_dmatx_data *dmatx = &uap->dmatx;
552 unsigned long flags;
553 u16 dmacr;
554
555 spin_lock_irqsave(&uap->port.lock, flags);
556 if (uap->dmatx.queued)
557 dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
558 DMA_TO_DEVICE);
559
560 dmacr = uap->dmacr;
561 uap->dmacr = dmacr & ~UART011_TXDMAE;
562 pl011_write(uap->dmacr, uap, REG_DMACR);
563
564 /*
565 * If TX DMA was disabled, it means that we've stopped the DMA for
566 * some reason (eg, XOFF received, or we want to send an X-char.)
567 *
568 * Note: we need to be careful here of a potential race between DMA
569 * and the rest of the driver - if the driver disables TX DMA while
570 * a TX buffer completing, we must update the tx queued status to
571 * get further refills (hence we check dmacr).
572 */
573 if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) ||
574 uart_circ_empty(&uap->port.state->xmit)) {
575 uap->dmatx.queued = false;
576 spin_unlock_irqrestore(&uap->port.lock, flags);
577 return;
578 }
579
580 if (pl011_dma_tx_refill(uap) <= 0)
581 /*
582 * We didn't queue a DMA buffer for some reason, but we
583 * have data pending to be sent. Re-enable the TX IRQ.
584 */
585 pl011_start_tx_pio(uap);
586
587 spin_unlock_irqrestore(&uap->port.lock, flags);
588}
589
590/*
591 * Try to refill the TX DMA buffer.
592 * Locking: called with port lock held and IRQs disabled.
593 * Returns:
594 * 1 if we queued up a TX DMA buffer.
595 * 0 if we didn't want to handle this by DMA
596 * <0 on error
597 */
598static int pl011_dma_tx_refill(struct uart_amba_port *uap)
599{
600 struct pl011_dmatx_data *dmatx = &uap->dmatx;
601 struct dma_chan *chan = dmatx->chan;
602 struct dma_device *dma_dev = chan->device;
603 struct dma_async_tx_descriptor *desc;
604 struct circ_buf *xmit = &uap->port.state->xmit;
605 unsigned int count;
606
607 /*
608 * Try to avoid the overhead involved in using DMA if the
609 * transaction fits in the first half of the FIFO, by using
610 * the standard interrupt handling. This ensures that we
611 * issue a uart_write_wakeup() at the appropriate time.
612 */
613 count = uart_circ_chars_pending(xmit);
614 if (count < (uap->fifosize >> 1)) {
615 uap->dmatx.queued = false;
616 return 0;
617 }
618
619 /*
620 * Bodge: don't send the last character by DMA, as this
621 * will prevent XON from notifying us to restart DMA.
622 */
623 count -= 1;
624
625 /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */
626 if (count > PL011_DMA_BUFFER_SIZE)
627 count = PL011_DMA_BUFFER_SIZE;
628
629 if (xmit->tail < xmit->head)
630 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count);
631 else {
632 size_t first = UART_XMIT_SIZE - xmit->tail;
633 size_t second;
634
635 if (first > count)
636 first = count;
637 second = count - first;
638
639 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first);
640 if (second)
641 memcpy(&dmatx->buf[first], &xmit->buf[0], second);
642 }
643
644 dmatx->sg.length = count;
645
646 if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
647 uap->dmatx.queued = false;
648 dev_dbg(uap->port.dev, "unable to map TX DMA\n");
649 return -EBUSY;
650 }
651
652 desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
653 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
654 if (!desc) {
655 dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
656 uap->dmatx.queued = false;
657 /*
658 * If DMA cannot be used right now, we complete this
659 * transaction via IRQ and let the TTY layer retry.
660 */
661 dev_dbg(uap->port.dev, "TX DMA busy\n");
662 return -EBUSY;
663 }
664
665 /* Some data to go along to the callback */
666 desc->callback = pl011_dma_tx_callback;
667 desc->callback_param = uap;
668
669 /* All errors should happen at prepare time */
670 dmaengine_submit(desc);
671
672 /* Fire the DMA transaction */
673 dma_dev->device_issue_pending(chan);
674
675 uap->dmacr |= UART011_TXDMAE;
676 pl011_write(uap->dmacr, uap, REG_DMACR);
677 uap->dmatx.queued = true;
678
679 /*
680 * Now we know that DMA will fire, so advance the ring buffer
681 * with the stuff we just dispatched.
682 */
683 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
684 uap->port.icount.tx += count;
685
686 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
687 uart_write_wakeup(&uap->port);
688
689 return 1;
690}
691
692/*
693 * We received a transmit interrupt without a pending X-char but with
694 * pending characters.
695 * Locking: called with port lock held and IRQs disabled.
696 * Returns:
697 * false if we want to use PIO to transmit
698 * true if we queued a DMA buffer
699 */
700static bool pl011_dma_tx_irq(struct uart_amba_port *uap)
701{
702 if (!uap->using_tx_dma)
703 return false;
704
705 /*
706 * If we already have a TX buffer queued, but received a
707 * TX interrupt, it will be because we've just sent an X-char.
708 * Ensure the TX DMA is enabled and the TX IRQ is disabled.
709 */
710 if (uap->dmatx.queued) {
711 uap->dmacr |= UART011_TXDMAE;
712 pl011_write(uap->dmacr, uap, REG_DMACR);
713 uap->im &= ~UART011_TXIM;
714 pl011_write(uap->im, uap, REG_IMSC);
715 return true;
716 }
717
718 /*
719 * We don't have a TX buffer queued, so try to queue one.
720 * If we successfully queued a buffer, mask the TX IRQ.
721 */
722 if (pl011_dma_tx_refill(uap) > 0) {
723 uap->im &= ~UART011_TXIM;
724 pl011_write(uap->im, uap, REG_IMSC);
725 return true;
726 }
727 return false;
728}
729
730/*
731 * Stop the DMA transmit (eg, due to received XOFF).
732 * Locking: called with port lock held and IRQs disabled.
733 */
734static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
735{
736 if (uap->dmatx.queued) {
737 uap->dmacr &= ~UART011_TXDMAE;
738 pl011_write(uap->dmacr, uap, REG_DMACR);
739 }
740}
741
742/*
743 * Try to start a DMA transmit, or in the case of an XON/OFF
744 * character queued for send, try to get that character out ASAP.
745 * Locking: called with port lock held and IRQs disabled.
746 * Returns:
747 * false if we want the TX IRQ to be enabled
748 * true if we have a buffer queued
749 */
750static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
751{
752 u16 dmacr;
753
754 if (!uap->using_tx_dma)
755 return false;
756
757 if (!uap->port.x_char) {
758 /* no X-char, try to push chars out in DMA mode */
759 bool ret = true;
760
761 if (!uap->dmatx.queued) {
762 if (pl011_dma_tx_refill(uap) > 0) {
763 uap->im &= ~UART011_TXIM;
764 pl011_write(uap->im, uap, REG_IMSC);
765 } else
766 ret = false;
767 } else if (!(uap->dmacr & UART011_TXDMAE)) {
768 uap->dmacr |= UART011_TXDMAE;
769 pl011_write(uap->dmacr, uap, REG_DMACR);
770 }
771 return ret;
772 }
773
774 /*
775 * We have an X-char to send. Disable DMA to prevent it loading
776 * the TX fifo, and then see if we can stuff it into the FIFO.
777 */
778 dmacr = uap->dmacr;
779 uap->dmacr &= ~UART011_TXDMAE;
780 pl011_write(uap->dmacr, uap, REG_DMACR);
781
782 if (pl011_read(uap, REG_FR) & UART01x_FR_TXFF) {
783 /*
784 * No space in the FIFO, so enable the transmit interrupt
785 * so we know when there is space. Note that once we've
786 * loaded the character, we should just re-enable DMA.
787 */
788 return false;
789 }
790
791 pl011_write(uap->port.x_char, uap, REG_DR);
792 uap->port.icount.tx++;
793 uap->port.x_char = 0;
794
795 /* Success - restore the DMA state */
796 uap->dmacr = dmacr;
797 pl011_write(dmacr, uap, REG_DMACR);
798
799 return true;
800}
801
802/*
803 * Flush the transmit buffer.
804 * Locking: called with port lock held and IRQs disabled.
805 */
806static void pl011_dma_flush_buffer(struct uart_port *port)
807__releases(&uap->port.lock)
808__acquires(&uap->port.lock)
809{
810 struct uart_amba_port *uap =
811 container_of(port, struct uart_amba_port, port);
812
813 if (!uap->using_tx_dma)
814 return;
815
816 dmaengine_terminate_async(uap->dmatx.chan);
817
818 if (uap->dmatx.queued) {
819 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
820 DMA_TO_DEVICE);
821 uap->dmatx.queued = false;
822 uap->dmacr &= ~UART011_TXDMAE;
823 pl011_write(uap->dmacr, uap, REG_DMACR);
824 }
825}
826
827static void pl011_dma_rx_callback(void *data);
828
829static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
830{
831 struct dma_chan *rxchan = uap->dmarx.chan;
832 struct pl011_dmarx_data *dmarx = &uap->dmarx;
833 struct dma_async_tx_descriptor *desc;
834 struct pl011_sgbuf *sgbuf;
835
836 if (!rxchan)
837 return -EIO;
838
839 /* Start the RX DMA job */
840 sgbuf = uap->dmarx.use_buf_b ?
841 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
842 desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
843 DMA_DEV_TO_MEM,
844 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
845 /*
846 * If the DMA engine is busy and cannot prepare a
847 * channel, no big deal, the driver will fall back
848 * to interrupt mode as a result of this error code.
849 */
850 if (!desc) {
851 uap->dmarx.running = false;
852 dmaengine_terminate_all(rxchan);
853 return -EBUSY;
854 }
855
856 /* Some data to go along to the callback */
857 desc->callback = pl011_dma_rx_callback;
858 desc->callback_param = uap;
859 dmarx->cookie = dmaengine_submit(desc);
860 dma_async_issue_pending(rxchan);
861
862 uap->dmacr |= UART011_RXDMAE;
863 pl011_write(uap->dmacr, uap, REG_DMACR);
864 uap->dmarx.running = true;
865
866 uap->im &= ~UART011_RXIM;
867 pl011_write(uap->im, uap, REG_IMSC);
868
869 return 0;
870}
871
872/*
873 * This is called when either the DMA job is complete, or
874 * the FIFO timeout interrupt occurred. This must be called
875 * with the port spinlock uap->port.lock held.
876 */
877static void pl011_dma_rx_chars(struct uart_amba_port *uap,
878 u32 pending, bool use_buf_b,
879 bool readfifo)
880{
881 struct tty_port *port = &uap->port.state->port;
882 struct pl011_sgbuf *sgbuf = use_buf_b ?
883 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
884 int dma_count = 0;
885 u32 fifotaken = 0; /* only used for vdbg() */
886
887 struct pl011_dmarx_data *dmarx = &uap->dmarx;
888 int dmataken = 0;
889
890 if (uap->dmarx.poll_rate) {
891 /* The data can be taken by polling */
892 dmataken = sgbuf->sg.length - dmarx->last_residue;
893 /* Recalculate the pending size */
894 if (pending >= dmataken)
895 pending -= dmataken;
896 }
897
898 /* Pick the remain data from the DMA */
899 if (pending) {
900
901 /*
902 * First take all chars in the DMA pipe, then look in the FIFO.
903 * Note that tty_insert_flip_buf() tries to take as many chars
904 * as it can.
905 */
906 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
907 pending);
908
909 uap->port.icount.rx += dma_count;
910 if (dma_count < pending)
911 dev_warn(uap->port.dev,
912 "couldn't insert all characters (TTY is full?)\n");
913 }
914
915 /* Reset the last_residue for Rx DMA poll */
916 if (uap->dmarx.poll_rate)
917 dmarx->last_residue = sgbuf->sg.length;
918
919 /*
920 * Only continue with trying to read the FIFO if all DMA chars have
921 * been taken first.
922 */
923 if (dma_count == pending && readfifo) {
924 /* Clear any error flags */
925 pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS |
926 UART011_FEIS, uap, REG_ICR);
927
928 /*
929 * If we read all the DMA'd characters, and we had an
930 * incomplete buffer, that could be due to an rx error, or
931 * maybe we just timed out. Read any pending chars and check
932 * the error status.
933 *
934 * Error conditions will only occur in the FIFO, these will
935 * trigger an immediate interrupt and stop the DMA job, so we
936 * will always find the error in the FIFO, never in the DMA
937 * buffer.
938 */
939 fifotaken = pl011_fifo_to_tty(uap);
940 }
941
942 spin_unlock(&uap->port.lock);
943 dev_vdbg(uap->port.dev,
944 "Took %d chars from DMA buffer and %d chars from the FIFO\n",
945 dma_count, fifotaken);
946 tty_flip_buffer_push(port);
947 spin_lock(&uap->port.lock);
948}
949
950static void pl011_dma_rx_irq(struct uart_amba_port *uap)
951{
952 struct pl011_dmarx_data *dmarx = &uap->dmarx;
953 struct dma_chan *rxchan = dmarx->chan;
954 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
955 &dmarx->sgbuf_b : &dmarx->sgbuf_a;
956 size_t pending;
957 struct dma_tx_state state;
958 enum dma_status dmastat;
959
960 /*
961 * Pause the transfer so we can trust the current counter,
962 * do this before we pause the PL011 block, else we may
963 * overflow the FIFO.
964 */
965 if (dmaengine_pause(rxchan))
966 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
967 dmastat = rxchan->device->device_tx_status(rxchan,
968 dmarx->cookie, &state);
969 if (dmastat != DMA_PAUSED)
970 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
971
972 /* Disable RX DMA - incoming data will wait in the FIFO */
973 uap->dmacr &= ~UART011_RXDMAE;
974 pl011_write(uap->dmacr, uap, REG_DMACR);
975 uap->dmarx.running = false;
976
977 pending = sgbuf->sg.length - state.residue;
978 BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
979 /* Then we terminate the transfer - we now know our residue */
980 dmaengine_terminate_all(rxchan);
981
982 /*
983 * This will take the chars we have so far and insert
984 * into the framework.
985 */
986 pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true);
987
988 /* Switch buffer & re-trigger DMA job */
989 dmarx->use_buf_b = !dmarx->use_buf_b;
990 if (pl011_dma_rx_trigger_dma(uap)) {
991 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
992 "fall back to interrupt mode\n");
993 uap->im |= UART011_RXIM;
994 pl011_write(uap->im, uap, REG_IMSC);
995 }
996}
997
998static void pl011_dma_rx_callback(void *data)
999{
1000 struct uart_amba_port *uap = data;
1001 struct pl011_dmarx_data *dmarx = &uap->dmarx;
1002 struct dma_chan *rxchan = dmarx->chan;
1003 bool lastbuf = dmarx->use_buf_b;
1004 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
1005 &dmarx->sgbuf_b : &dmarx->sgbuf_a;
1006 size_t pending;
1007 struct dma_tx_state state;
1008 int ret;
1009
1010 /*
1011 * This completion interrupt occurs typically when the
1012 * RX buffer is totally stuffed but no timeout has yet
1013 * occurred. When that happens, we just want the RX
1014 * routine to flush out the secondary DMA buffer while
1015 * we immediately trigger the next DMA job.
1016 */
1017 spin_lock_irq(&uap->port.lock);
1018 /*
1019 * Rx data can be taken by the UART interrupts during
1020 * the DMA irq handler. So we check the residue here.
1021 */
1022 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
1023 pending = sgbuf->sg.length - state.residue;
1024 BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
1025 /* Then we terminate the transfer - we now know our residue */
1026 dmaengine_terminate_all(rxchan);
1027
1028 uap->dmarx.running = false;
1029 dmarx->use_buf_b = !lastbuf;
1030 ret = pl011_dma_rx_trigger_dma(uap);
1031
1032 pl011_dma_rx_chars(uap, pending, lastbuf, false);
1033 spin_unlock_irq(&uap->port.lock);
1034 /*
1035 * Do this check after we picked the DMA chars so we don't
1036 * get some IRQ immediately from RX.
1037 */
1038 if (ret) {
1039 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
1040 "fall back to interrupt mode\n");
1041 uap->im |= UART011_RXIM;
1042 pl011_write(uap->im, uap, REG_IMSC);
1043 }
1044}
1045
1046/*
1047 * Stop accepting received characters, when we're shutting down or
1048 * suspending this port.
1049 * Locking: called with port lock held and IRQs disabled.
1050 */
1051static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
1052{
1053 /* FIXME. Just disable the DMA enable */
1054 uap->dmacr &= ~UART011_RXDMAE;
1055 pl011_write(uap->dmacr, uap, REG_DMACR);
1056}
1057
1058/*
1059 * Timer handler for Rx DMA polling.
1060 * Every polling, It checks the residue in the dma buffer and transfer
1061 * data to the tty. Also, last_residue is updated for the next polling.
1062 */
1063static void pl011_dma_rx_poll(struct timer_list *t)
1064{
1065 struct uart_amba_port *uap = from_timer(uap, t, dmarx.timer);
1066 struct tty_port *port = &uap->port.state->port;
1067 struct pl011_dmarx_data *dmarx = &uap->dmarx;
1068 struct dma_chan *rxchan = uap->dmarx.chan;
1069 unsigned long flags = 0;
1070 unsigned int dmataken = 0;
1071 unsigned int size = 0;
1072 struct pl011_sgbuf *sgbuf;
1073 int dma_count;
1074 struct dma_tx_state state;
1075
1076 sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
1077 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
1078 if (likely(state.residue < dmarx->last_residue)) {
1079 dmataken = sgbuf->sg.length - dmarx->last_residue;
1080 size = dmarx->last_residue - state.residue;
1081 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
1082 size);
1083 if (dma_count == size)
1084 dmarx->last_residue = state.residue;
1085 dmarx->last_jiffies = jiffies;
1086 }
1087 tty_flip_buffer_push(port);
1088
1089 /*
1090 * If no data is received in poll_timeout, the driver will fall back
1091 * to interrupt mode. We will retrigger DMA at the first interrupt.
1092 */
1093 if (jiffies_to_msecs(jiffies - dmarx->last_jiffies)
1094 > uap->dmarx.poll_timeout) {
1095
1096 spin_lock_irqsave(&uap->port.lock, flags);
1097 pl011_dma_rx_stop(uap);
1098 uap->im |= UART011_RXIM;
1099 pl011_write(uap->im, uap, REG_IMSC);
1100 spin_unlock_irqrestore(&uap->port.lock, flags);
1101
1102 uap->dmarx.running = false;
1103 dmaengine_terminate_all(rxchan);
1104 del_timer(&uap->dmarx.timer);
1105 } else {
1106 mod_timer(&uap->dmarx.timer,
1107 jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
1108 }
1109}
1110
1111static void pl011_dma_startup(struct uart_amba_port *uap)
1112{
1113 int ret;
1114
1115 if (!uap->dma_probed)
1116 pl011_dma_probe(uap);
1117
1118 if (!uap->dmatx.chan)
1119 return;
1120
1121 uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL | __GFP_DMA);
1122 if (!uap->dmatx.buf) {
1123 dev_err(uap->port.dev, "no memory for DMA TX buffer\n");
1124 uap->port.fifosize = uap->fifosize;
1125 return;
1126 }
1127
1128 sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
1129
1130 /* The DMA buffer is now the FIFO the TTY subsystem can use */
1131 uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
1132 uap->using_tx_dma = true;
1133
1134 if (!uap->dmarx.chan)
1135 goto skip_rx;
1136
1137 /* Allocate and map DMA RX buffers */
1138 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
1139 DMA_FROM_DEVICE);
1140 if (ret) {
1141 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1142 "RX buffer A", ret);
1143 goto skip_rx;
1144 }
1145
1146 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
1147 DMA_FROM_DEVICE);
1148 if (ret) {
1149 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1150 "RX buffer B", ret);
1151 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
1152 DMA_FROM_DEVICE);
1153 goto skip_rx;
1154 }
1155
1156 uap->using_rx_dma = true;
1157
1158skip_rx:
1159 /* Turn on DMA error (RX/TX will be enabled on demand) */
1160 uap->dmacr |= UART011_DMAONERR;
1161 pl011_write(uap->dmacr, uap, REG_DMACR);
1162
1163 /*
1164 * ST Micro variants has some specific dma burst threshold
1165 * compensation. Set this to 16 bytes, so burst will only
1166 * be issued above/below 16 bytes.
1167 */
1168 if (uap->vendor->dma_threshold)
1169 pl011_write(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
1170 uap, REG_ST_DMAWM);
1171
1172 if (uap->using_rx_dma) {
1173 if (pl011_dma_rx_trigger_dma(uap))
1174 dev_dbg(uap->port.dev, "could not trigger initial "
1175 "RX DMA job, fall back to interrupt mode\n");
1176 if (uap->dmarx.poll_rate) {
1177 timer_setup(&uap->dmarx.timer, pl011_dma_rx_poll, 0);
1178 mod_timer(&uap->dmarx.timer,
1179 jiffies +
1180 msecs_to_jiffies(uap->dmarx.poll_rate));
1181 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
1182 uap->dmarx.last_jiffies = jiffies;
1183 }
1184 }
1185}
1186
1187static void pl011_dma_shutdown(struct uart_amba_port *uap)
1188{
1189 if (!(uap->using_tx_dma || uap->using_rx_dma))
1190 return;
1191
1192 /* Disable RX and TX DMA */
1193 while (pl011_read(uap, REG_FR) & uap->vendor->fr_busy)
1194 cpu_relax();
1195
1196 spin_lock_irq(&uap->port.lock);
1197 uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
1198 pl011_write(uap->dmacr, uap, REG_DMACR);
1199 spin_unlock_irq(&uap->port.lock);
1200
1201 if (uap->using_tx_dma) {
1202 /* In theory, this should already be done by pl011_dma_flush_buffer */
1203 dmaengine_terminate_all(uap->dmatx.chan);
1204 if (uap->dmatx.queued) {
1205 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
1206 DMA_TO_DEVICE);
1207 uap->dmatx.queued = false;
1208 }
1209
1210 kfree(uap->dmatx.buf);
1211 uap->using_tx_dma = false;
1212 }
1213
1214 if (uap->using_rx_dma) {
1215 dmaengine_terminate_all(uap->dmarx.chan);
1216 /* Clean up the RX DMA */
1217 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
1218 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
1219 if (uap->dmarx.poll_rate)
1220 del_timer_sync(&uap->dmarx.timer);
1221 uap->using_rx_dma = false;
1222 }
1223}
1224
1225static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1226{
1227 return uap->using_rx_dma;
1228}
1229
1230static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1231{
1232 return uap->using_rx_dma && uap->dmarx.running;
1233}
1234
1235#else
1236/* Blank functions if the DMA engine is not available */
1237static inline void pl011_dma_remove(struct uart_amba_port *uap)
1238{
1239}
1240
1241static inline void pl011_dma_startup(struct uart_amba_port *uap)
1242{
1243}
1244
1245static inline void pl011_dma_shutdown(struct uart_amba_port *uap)
1246{
1247}
1248
1249static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap)
1250{
1251 return false;
1252}
1253
1254static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
1255{
1256}
1257
1258static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
1259{
1260 return false;
1261}
1262
1263static inline void pl011_dma_rx_irq(struct uart_amba_port *uap)
1264{
1265}
1266
1267static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
1268{
1269}
1270
1271static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
1272{
1273 return -EIO;
1274}
1275
1276static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1277{
1278 return false;
1279}
1280
1281static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1282{
1283 return false;
1284}
1285
1286#define pl011_dma_flush_buffer NULL
1287#endif
1288
1289static void pl011_stop_tx(struct uart_port *port)
1290{
1291 struct uart_amba_port *uap =
1292 container_of(port, struct uart_amba_port, port);
1293
1294 uap->im &= ~UART011_TXIM;
1295 pl011_write(uap->im, uap, REG_IMSC);
1296 pl011_dma_tx_stop(uap);
1297}
1298
1299static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq);
1300
1301/* Start TX with programmed I/O only (no DMA) */
1302static void pl011_start_tx_pio(struct uart_amba_port *uap)
1303{
1304 if (pl011_tx_chars(uap, false)) {
1305 uap->im |= UART011_TXIM;
1306 pl011_write(uap->im, uap, REG_IMSC);
1307 }
1308}
1309
1310static void pl011_start_tx(struct uart_port *port)
1311{
1312 struct uart_amba_port *uap =
1313 container_of(port, struct uart_amba_port, port);
1314
1315 if (!pl011_dma_tx_start(uap))
1316 pl011_start_tx_pio(uap);
1317}
1318
1319static void pl011_stop_rx(struct uart_port *port)
1320{
1321 struct uart_amba_port *uap =
1322 container_of(port, struct uart_amba_port, port);
1323
1324 uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
1325 UART011_PEIM|UART011_BEIM|UART011_OEIM);
1326 pl011_write(uap->im, uap, REG_IMSC);
1327
1328 pl011_dma_rx_stop(uap);
1329}
1330
1331static void pl011_enable_ms(struct uart_port *port)
1332{
1333 struct uart_amba_port *uap =
1334 container_of(port, struct uart_amba_port, port);
1335
1336 uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM;
1337 pl011_write(uap->im, uap, REG_IMSC);
1338}
1339
1340static void pl011_rx_chars(struct uart_amba_port *uap)
1341__releases(&uap->port.lock)
1342__acquires(&uap->port.lock)
1343{
1344 pl011_fifo_to_tty(uap);
1345
1346 spin_unlock(&uap->port.lock);
1347 tty_flip_buffer_push(&uap->port.state->port);
1348 /*
1349 * If we were temporarily out of DMA mode for a while,
1350 * attempt to switch back to DMA mode again.
1351 */
1352 if (pl011_dma_rx_available(uap)) {
1353 if (pl011_dma_rx_trigger_dma(uap)) {
1354 dev_dbg(uap->port.dev, "could not trigger RX DMA job "
1355 "fall back to interrupt mode again\n");
1356 uap->im |= UART011_RXIM;
1357 pl011_write(uap->im, uap, REG_IMSC);
1358 } else {
1359#ifdef CONFIG_DMA_ENGINE
1360 /* Start Rx DMA poll */
1361 if (uap->dmarx.poll_rate) {
1362 uap->dmarx.last_jiffies = jiffies;
1363 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
1364 mod_timer(&uap->dmarx.timer,
1365 jiffies +
1366 msecs_to_jiffies(uap->dmarx.poll_rate));
1367 }
1368#endif
1369 }
1370 }
1371 spin_lock(&uap->port.lock);
1372}
1373
1374static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c,
1375 bool from_irq)
1376{
1377 if (unlikely(!from_irq) &&
1378 pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
1379 return false; /* unable to transmit character */
1380
1381 pl011_write(c, uap, REG_DR);
1382 uap->port.icount.tx++;
1383
1384 return true;
1385}
1386
1387/* Returns true if tx interrupts have to be (kept) enabled */
1388static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
1389{
1390 struct circ_buf *xmit = &uap->port.state->xmit;
1391 int count = uap->fifosize >> 1;
1392
1393 if (uap->port.x_char) {
1394 if (!pl011_tx_char(uap, uap->port.x_char, from_irq))
1395 return true;
1396 uap->port.x_char = 0;
1397 --count;
1398 }
1399 if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) {
1400 pl011_stop_tx(&uap->port);
1401 return false;
1402 }
1403
1404 /* If we are using DMA mode, try to send some characters. */
1405 if (pl011_dma_tx_irq(uap))
1406 return true;
1407
1408 do {
1409 if (likely(from_irq) && count-- == 0)
1410 break;
1411
1412 if (!pl011_tx_char(uap, xmit->buf[xmit->tail], from_irq))
1413 break;
1414
1415 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
1416 } while (!uart_circ_empty(xmit));
1417
1418 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1419 uart_write_wakeup(&uap->port);
1420
1421 if (uart_circ_empty(xmit)) {
1422 pl011_stop_tx(&uap->port);
1423 return false;
1424 }
1425 return true;
1426}
1427
1428static void pl011_modem_status(struct uart_amba_port *uap)
1429{
1430 unsigned int status, delta;
1431
1432 status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY;
1433
1434 delta = status ^ uap->old_status;
1435 uap->old_status = status;
1436
1437 if (!delta)
1438 return;
1439
1440 if (delta & UART01x_FR_DCD)
1441 uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD);
1442
1443 if (delta & uap->vendor->fr_dsr)
1444 uap->port.icount.dsr++;
1445
1446 if (delta & uap->vendor->fr_cts)
1447 uart_handle_cts_change(&uap->port,
1448 status & uap->vendor->fr_cts);
1449
1450 wake_up_interruptible(&uap->port.state->port.delta_msr_wait);
1451}
1452
1453static void check_apply_cts_event_workaround(struct uart_amba_port *uap)
1454{
1455 unsigned int dummy_read;
1456
1457 if (!uap->vendor->cts_event_workaround)
1458 return;
1459
1460 /* workaround to make sure that all bits are unlocked.. */
1461 pl011_write(0x00, uap, REG_ICR);
1462
1463 /*
1464 * WA: introduce 26ns(1 uart clk) delay before W1C;
1465 * single apb access will incur 2 pclk(133.12Mhz) delay,
1466 * so add 2 dummy reads
1467 */
1468 dummy_read = pl011_read(uap, REG_ICR);
1469 dummy_read = pl011_read(uap, REG_ICR);
1470}
1471
1472static irqreturn_t pl011_int(int irq, void *dev_id)
1473{
1474 struct uart_amba_port *uap = dev_id;
1475 unsigned long flags;
1476 unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
1477 int handled = 0;
1478
1479 spin_lock_irqsave(&uap->port.lock, flags);
1480 status = pl011_read(uap, REG_RIS) & uap->im;
1481 if (status) {
1482 do {
1483 check_apply_cts_event_workaround(uap);
1484
1485 pl011_write(status & ~(UART011_TXIS|UART011_RTIS|
1486 UART011_RXIS),
1487 uap, REG_ICR);
1488
1489 if (status & (UART011_RTIS|UART011_RXIS)) {
1490 if (pl011_dma_rx_running(uap))
1491 pl011_dma_rx_irq(uap);
1492 else
1493 pl011_rx_chars(uap);
1494 }
1495 if (status & (UART011_DSRMIS|UART011_DCDMIS|
1496 UART011_CTSMIS|UART011_RIMIS))
1497 pl011_modem_status(uap);
1498 if (status & UART011_TXIS)
1499 pl011_tx_chars(uap, true);
1500
1501 if (pass_counter-- == 0)
1502 break;
1503
1504 status = pl011_read(uap, REG_RIS) & uap->im;
1505 } while (status != 0);
1506 handled = 1;
1507 }
1508
1509 spin_unlock_irqrestore(&uap->port.lock, flags);
1510
1511 return IRQ_RETVAL(handled);
1512}
1513
1514static unsigned int pl011_tx_empty(struct uart_port *port)
1515{
1516 struct uart_amba_port *uap =
1517 container_of(port, struct uart_amba_port, port);
1518
1519 /* Allow feature register bits to be inverted to work around errata */
1520 unsigned int status = pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr;
1521
1522 return status & (uap->vendor->fr_busy | UART01x_FR_TXFF) ?
1523 0 : TIOCSER_TEMT;
1524}
1525
1526static unsigned int pl011_get_mctrl(struct uart_port *port)
1527{
1528 struct uart_amba_port *uap =
1529 container_of(port, struct uart_amba_port, port);
1530 unsigned int result = 0;
1531 unsigned int status = pl011_read(uap, REG_FR);
1532
1533#define TIOCMBIT(uartbit, tiocmbit) \
1534 if (status & uartbit) \
1535 result |= tiocmbit
1536
1537 TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR);
1538 TIOCMBIT(uap->vendor->fr_dsr, TIOCM_DSR);
1539 TIOCMBIT(uap->vendor->fr_cts, TIOCM_CTS);
1540 TIOCMBIT(uap->vendor->fr_ri, TIOCM_RNG);
1541#undef TIOCMBIT
1542 return result;
1543}
1544
1545static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
1546{
1547 struct uart_amba_port *uap =
1548 container_of(port, struct uart_amba_port, port);
1549 unsigned int cr;
1550
1551 cr = pl011_read(uap, REG_CR);
1552
1553#define TIOCMBIT(tiocmbit, uartbit) \
1554 if (mctrl & tiocmbit) \
1555 cr |= uartbit; \
1556 else \
1557 cr &= ~uartbit
1558
1559 TIOCMBIT(TIOCM_RTS, UART011_CR_RTS);
1560 TIOCMBIT(TIOCM_DTR, UART011_CR_DTR);
1561 TIOCMBIT(TIOCM_OUT1, UART011_CR_OUT1);
1562 TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2);
1563 TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE);
1564
1565 if (port->status & UPSTAT_AUTORTS) {
1566 /* We need to disable auto-RTS if we want to turn RTS off */
1567 TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN);
1568 }
1569#undef TIOCMBIT
1570
1571 pl011_write(cr, uap, REG_CR);
1572}
1573
1574static void pl011_break_ctl(struct uart_port *port, int break_state)
1575{
1576 struct uart_amba_port *uap =
1577 container_of(port, struct uart_amba_port, port);
1578 unsigned long flags;
1579 unsigned int lcr_h;
1580
1581 spin_lock_irqsave(&uap->port.lock, flags);
1582 lcr_h = pl011_read(uap, REG_LCRH_TX);
1583 if (break_state == -1)
1584 lcr_h |= UART01x_LCRH_BRK;
1585 else
1586 lcr_h &= ~UART01x_LCRH_BRK;
1587 pl011_write(lcr_h, uap, REG_LCRH_TX);
1588 spin_unlock_irqrestore(&uap->port.lock, flags);
1589}
1590
1591#ifdef CONFIG_CONSOLE_POLL
1592
1593static void pl011_quiesce_irqs(struct uart_port *port)
1594{
1595 struct uart_amba_port *uap =
1596 container_of(port, struct uart_amba_port, port);
1597
1598 pl011_write(pl011_read(uap, REG_MIS), uap, REG_ICR);
1599 /*
1600 * There is no way to clear TXIM as this is "ready to transmit IRQ", so
1601 * we simply mask it. start_tx() will unmask it.
1602 *
1603 * Note we can race with start_tx(), and if the race happens, the
1604 * polling user might get another interrupt just after we clear it.
1605 * But it should be OK and can happen even w/o the race, e.g.
1606 * controller immediately got some new data and raised the IRQ.
1607 *
1608 * And whoever uses polling routines assumes that it manages the device
1609 * (including tx queue), so we're also fine with start_tx()'s caller
1610 * side.
1611 */
1612 pl011_write(pl011_read(uap, REG_IMSC) & ~UART011_TXIM, uap,
1613 REG_IMSC);
1614}
1615
1616static int pl011_get_poll_char(struct uart_port *port)
1617{
1618 struct uart_amba_port *uap =
1619 container_of(port, struct uart_amba_port, port);
1620 unsigned int status;
1621
1622 /*
1623 * The caller might need IRQs lowered, e.g. if used with KDB NMI
1624 * debugger.
1625 */
1626 pl011_quiesce_irqs(port);
1627
1628 status = pl011_read(uap, REG_FR);
1629 if (status & UART01x_FR_RXFE)
1630 return NO_POLL_CHAR;
1631
1632 return pl011_read(uap, REG_DR);
1633}
1634
1635static void pl011_put_poll_char(struct uart_port *port,
1636 unsigned char ch)
1637{
1638 struct uart_amba_port *uap =
1639 container_of(port, struct uart_amba_port, port);
1640
1641 while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
1642 cpu_relax();
1643
1644 pl011_write(ch, uap, REG_DR);
1645}
1646
1647#endif /* CONFIG_CONSOLE_POLL */
1648
1649static int pl011_hwinit(struct uart_port *port)
1650{
1651 struct uart_amba_port *uap =
1652 container_of(port, struct uart_amba_port, port);
1653 int retval;
1654
1655 /* Optionaly enable pins to be muxed in and configured */
1656 pinctrl_pm_select_default_state(port->dev);
1657
1658 /*
1659 * Try to enable the clock producer.
1660 */
1661 retval = clk_prepare_enable(uap->clk);
1662 if (retval)
1663 return retval;
1664
1665 uap->port.uartclk = clk_get_rate(uap->clk);
1666
1667 /* Clear pending error and receive interrupts */
1668 pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS |
1669 UART011_FEIS | UART011_RTIS | UART011_RXIS,
1670 uap, REG_ICR);
1671
1672 /*
1673 * Save interrupts enable mask, and enable RX interrupts in case if
1674 * the interrupt is used for NMI entry.
1675 */
1676 uap->im = pl011_read(uap, REG_IMSC);
1677 pl011_write(UART011_RTIM | UART011_RXIM, uap, REG_IMSC);
1678
1679 if (dev_get_platdata(uap->port.dev)) {
1680 struct amba_pl011_data *plat;
1681
1682 plat = dev_get_platdata(uap->port.dev);
1683 if (plat->init)
1684 plat->init();
1685 }
1686 return 0;
1687}
1688
1689static bool pl011_split_lcrh(const struct uart_amba_port *uap)
1690{
1691 return pl011_reg_to_offset(uap, REG_LCRH_RX) !=
1692 pl011_reg_to_offset(uap, REG_LCRH_TX);
1693}
1694
1695static void pl011_write_lcr_h(struct uart_amba_port *uap, unsigned int lcr_h)
1696{
1697 pl011_write(lcr_h, uap, REG_LCRH_RX);
1698 if (pl011_split_lcrh(uap)) {
1699 int i;
1700 /*
1701 * Wait 10 PCLKs before writing LCRH_TX register,
1702 * to get this delay write read only register 10 times
1703 */
1704 for (i = 0; i < 10; ++i)
1705 pl011_write(0xff, uap, REG_MIS);
1706 pl011_write(lcr_h, uap, REG_LCRH_TX);
1707 }
1708}
1709
1710static int pl011_allocate_irq(struct uart_amba_port *uap)
1711{
1712 pl011_write(uap->im, uap, REG_IMSC);
1713
1714 return request_irq(uap->port.irq, pl011_int, IRQF_SHARED, "uart-pl011", uap);
1715}
1716
1717/*
1718 * Enable interrupts, only timeouts when using DMA
1719 * if initial RX DMA job failed, start in interrupt mode
1720 * as well.
1721 */
1722static void pl011_enable_interrupts(struct uart_amba_port *uap)
1723{
1724 unsigned int i;
1725
1726 spin_lock_irq(&uap->port.lock);
1727
1728 /* Clear out any spuriously appearing RX interrupts */
1729 pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR);
1730
1731 /*
1732 * RXIS is asserted only when the RX FIFO transitions from below
1733 * to above the trigger threshold. If the RX FIFO is already
1734 * full to the threshold this can't happen and RXIS will now be
1735 * stuck off. Drain the RX FIFO explicitly to fix this:
1736 */
1737 for (i = 0; i < uap->fifosize * 2; ++i) {
1738 if (pl011_read(uap, REG_FR) & UART01x_FR_RXFE)
1739 break;
1740
1741 pl011_read(uap, REG_DR);
1742 }
1743
1744 uap->im = UART011_RTIM;
1745 if (!pl011_dma_rx_running(uap))
1746 uap->im |= UART011_RXIM;
1747 pl011_write(uap->im, uap, REG_IMSC);
1748 spin_unlock_irq(&uap->port.lock);
1749}
1750
1751static int pl011_startup(struct uart_port *port)
1752{
1753 struct uart_amba_port *uap =
1754 container_of(port, struct uart_amba_port, port);
1755 unsigned int cr;
1756 int retval;
1757
1758 retval = pl011_hwinit(port);
1759 if (retval)
1760 goto clk_dis;
1761
1762 retval = pl011_allocate_irq(uap);
1763 if (retval)
1764 goto clk_dis;
1765
1766 pl011_write(uap->vendor->ifls, uap, REG_IFLS);
1767
1768 spin_lock_irq(&uap->port.lock);
1769
1770 /* restore RTS and DTR */
1771 cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR);
1772 cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
1773 pl011_write(cr, uap, REG_CR);
1774
1775 spin_unlock_irq(&uap->port.lock);
1776
1777 /*
1778 * initialise the old status of the modem signals
1779 */
1780 uap->old_status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY;
1781
1782 /* Startup DMA */
1783 pl011_dma_startup(uap);
1784
1785 pl011_enable_interrupts(uap);
1786
1787 return 0;
1788
1789 clk_dis:
1790 clk_disable_unprepare(uap->clk);
1791 return retval;
1792}
1793
1794static int sbsa_uart_startup(struct uart_port *port)
1795{
1796 struct uart_amba_port *uap =
1797 container_of(port, struct uart_amba_port, port);
1798 int retval;
1799
1800 retval = pl011_hwinit(port);
1801 if (retval)
1802 return retval;
1803
1804 retval = pl011_allocate_irq(uap);
1805 if (retval)
1806 return retval;
1807
1808 /* The SBSA UART does not support any modem status lines. */
1809 uap->old_status = 0;
1810
1811 pl011_enable_interrupts(uap);
1812
1813 return 0;
1814}
1815
1816static void pl011_shutdown_channel(struct uart_amba_port *uap,
1817 unsigned int lcrh)
1818{
1819 unsigned long val;
1820
1821 val = pl011_read(uap, lcrh);
1822 val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
1823 pl011_write(val, uap, lcrh);
1824}
1825
1826/*
1827 * disable the port. It should not disable RTS and DTR.
1828 * Also RTS and DTR state should be preserved to restore
1829 * it during startup().
1830 */
1831static void pl011_disable_uart(struct uart_amba_port *uap)
1832{
1833 unsigned int cr;
1834
1835 uap->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
1836 spin_lock_irq(&uap->port.lock);
1837 cr = pl011_read(uap, REG_CR);
1838 uap->old_cr = cr;
1839 cr &= UART011_CR_RTS | UART011_CR_DTR;
1840 cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
1841 pl011_write(cr, uap, REG_CR);
1842 spin_unlock_irq(&uap->port.lock);
1843
1844 /*
1845 * disable break condition and fifos
1846 */
1847 pl011_shutdown_channel(uap, REG_LCRH_RX);
1848 if (pl011_split_lcrh(uap))
1849 pl011_shutdown_channel(uap, REG_LCRH_TX);
1850}
1851
1852static void pl011_disable_interrupts(struct uart_amba_port *uap)
1853{
1854 spin_lock_irq(&uap->port.lock);
1855
1856 /* mask all interrupts and clear all pending ones */
1857 uap->im = 0;
1858 pl011_write(uap->im, uap, REG_IMSC);
1859 pl011_write(0xffff, uap, REG_ICR);
1860
1861 spin_unlock_irq(&uap->port.lock);
1862}
1863
1864static void pl011_shutdown(struct uart_port *port)
1865{
1866 struct uart_amba_port *uap =
1867 container_of(port, struct uart_amba_port, port);
1868
1869 pl011_disable_interrupts(uap);
1870
1871 pl011_dma_shutdown(uap);
1872
1873 free_irq(uap->port.irq, uap);
1874
1875 pl011_disable_uart(uap);
1876
1877 /*
1878 * Shut down the clock producer
1879 */
1880 clk_disable_unprepare(uap->clk);
1881 /* Optionally let pins go into sleep states */
1882 pinctrl_pm_select_sleep_state(port->dev);
1883
1884 if (dev_get_platdata(uap->port.dev)) {
1885 struct amba_pl011_data *plat;
1886
1887 plat = dev_get_platdata(uap->port.dev);
1888 if (plat->exit)
1889 plat->exit();
1890 }
1891
1892 if (uap->port.ops->flush_buffer)
1893 uap->port.ops->flush_buffer(port);
1894}
1895
1896static void sbsa_uart_shutdown(struct uart_port *port)
1897{
1898 struct uart_amba_port *uap =
1899 container_of(port, struct uart_amba_port, port);
1900
1901 pl011_disable_interrupts(uap);
1902
1903 free_irq(uap->port.irq, uap);
1904
1905 if (uap->port.ops->flush_buffer)
1906 uap->port.ops->flush_buffer(port);
1907}
1908
1909static void
1910pl011_setup_status_masks(struct uart_port *port, struct ktermios *termios)
1911{
1912 port->read_status_mask = UART011_DR_OE | 255;
1913 if (termios->c_iflag & INPCK)
1914 port->read_status_mask |= UART011_DR_FE | UART011_DR_PE;
1915 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
1916 port->read_status_mask |= UART011_DR_BE;
1917
1918 /*
1919 * Characters to ignore
1920 */
1921 port->ignore_status_mask = 0;
1922 if (termios->c_iflag & IGNPAR)
1923 port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE;
1924 if (termios->c_iflag & IGNBRK) {
1925 port->ignore_status_mask |= UART011_DR_BE;
1926 /*
1927 * If we're ignoring parity and break indicators,
1928 * ignore overruns too (for real raw support).
1929 */
1930 if (termios->c_iflag & IGNPAR)
1931 port->ignore_status_mask |= UART011_DR_OE;
1932 }
1933
1934 /*
1935 * Ignore all characters if CREAD is not set.
1936 */
1937 if ((termios->c_cflag & CREAD) == 0)
1938 port->ignore_status_mask |= UART_DUMMY_DR_RX;
1939}
1940
1941static void
1942pl011_set_termios(struct uart_port *port, struct ktermios *termios,
1943 struct ktermios *old)
1944{
1945 struct uart_amba_port *uap =
1946 container_of(port, struct uart_amba_port, port);
1947 unsigned int lcr_h, old_cr;
1948 unsigned long flags;
1949 unsigned int baud, quot, clkdiv;
1950
1951 if (uap->vendor->oversampling)
1952 clkdiv = 8;
1953 else
1954 clkdiv = 16;
1955
1956 /*
1957 * Ask the core to calculate the divisor for us.
1958 */
1959 baud = uart_get_baud_rate(port, termios, old, 0,
1960 port->uartclk / clkdiv);
1961#ifdef CONFIG_DMA_ENGINE
1962 /*
1963 * Adjust RX DMA polling rate with baud rate if not specified.
1964 */
1965 if (uap->dmarx.auto_poll_rate)
1966 uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud);
1967#endif
1968
1969 if (baud > port->uartclk/16)
1970 quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud);
1971 else
1972 quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud);
1973
1974 switch (termios->c_cflag & CSIZE) {
1975 case CS5:
1976 lcr_h = UART01x_LCRH_WLEN_5;
1977 break;
1978 case CS6:
1979 lcr_h = UART01x_LCRH_WLEN_6;
1980 break;
1981 case CS7:
1982 lcr_h = UART01x_LCRH_WLEN_7;
1983 break;
1984 default: // CS8
1985 lcr_h = UART01x_LCRH_WLEN_8;
1986 break;
1987 }
1988 if (termios->c_cflag & CSTOPB)
1989 lcr_h |= UART01x_LCRH_STP2;
1990 if (termios->c_cflag & PARENB) {
1991 lcr_h |= UART01x_LCRH_PEN;
1992 if (!(termios->c_cflag & PARODD))
1993 lcr_h |= UART01x_LCRH_EPS;
1994 if (termios->c_cflag & CMSPAR)
1995 lcr_h |= UART011_LCRH_SPS;
1996 }
1997 if (uap->fifosize > 1)
1998 lcr_h |= UART01x_LCRH_FEN;
1999
2000 spin_lock_irqsave(&port->lock, flags);
2001
2002 /*
2003 * Update the per-port timeout.
2004 */
2005 uart_update_timeout(port, termios->c_cflag, baud);
2006
2007 pl011_setup_status_masks(port, termios);
2008
2009 if (UART_ENABLE_MS(port, termios->c_cflag))
2010 pl011_enable_ms(port);
2011
2012 /* first, disable everything */
2013 old_cr = pl011_read(uap, REG_CR);
2014 pl011_write(0, uap, REG_CR);
2015
2016 if (termios->c_cflag & CRTSCTS) {
2017 if (old_cr & UART011_CR_RTS)
2018 old_cr |= UART011_CR_RTSEN;
2019
2020 old_cr |= UART011_CR_CTSEN;
2021 port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
2022 } else {
2023 old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN);
2024 port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
2025 }
2026
2027 if (uap->vendor->oversampling) {
2028 if (baud > port->uartclk / 16)
2029 old_cr |= ST_UART011_CR_OVSFACT;
2030 else
2031 old_cr &= ~ST_UART011_CR_OVSFACT;
2032 }
2033
2034 /*
2035 * Workaround for the ST Micro oversampling variants to
2036 * increase the bitrate slightly, by lowering the divisor,
2037 * to avoid delayed sampling of start bit at high speeds,
2038 * else we see data corruption.
2039 */
2040 if (uap->vendor->oversampling) {
2041 if ((baud >= 3000000) && (baud < 3250000) && (quot > 1))
2042 quot -= 1;
2043 else if ((baud > 3250000) && (quot > 2))
2044 quot -= 2;
2045 }
2046 /* Set baud rate */
2047 pl011_write(quot & 0x3f, uap, REG_FBRD);
2048 pl011_write(quot >> 6, uap, REG_IBRD);
2049
2050 /*
2051 * ----------v----------v----------v----------v-----
2052 * NOTE: REG_LCRH_TX and REG_LCRH_RX MUST BE WRITTEN AFTER
2053 * REG_FBRD & REG_IBRD.
2054 * ----------^----------^----------^----------^-----
2055 */
2056 pl011_write_lcr_h(uap, lcr_h);
2057 pl011_write(old_cr, uap, REG_CR);
2058
2059 spin_unlock_irqrestore(&port->lock, flags);
2060}
2061
2062static void
2063sbsa_uart_set_termios(struct uart_port *port, struct ktermios *termios,
2064 struct ktermios *old)
2065{
2066 struct uart_amba_port *uap =
2067 container_of(port, struct uart_amba_port, port);
2068 unsigned long flags;
2069
2070 tty_termios_encode_baud_rate(termios, uap->fixed_baud, uap->fixed_baud);
2071
2072 /* The SBSA UART only supports 8n1 without hardware flow control. */
2073 termios->c_cflag &= ~(CSIZE | CSTOPB | PARENB | PARODD);
2074 termios->c_cflag &= ~(CMSPAR | CRTSCTS);
2075 termios->c_cflag |= CS8 | CLOCAL;
2076
2077 spin_lock_irqsave(&port->lock, flags);
2078 uart_update_timeout(port, CS8, uap->fixed_baud);
2079 pl011_setup_status_masks(port, termios);
2080 spin_unlock_irqrestore(&port->lock, flags);
2081}
2082
2083static const char *pl011_type(struct uart_port *port)
2084{
2085 struct uart_amba_port *uap =
2086 container_of(port, struct uart_amba_port, port);
2087 return uap->port.type == PORT_AMBA ? uap->type : NULL;
2088}
2089
2090/*
2091 * Release the memory region(s) being used by 'port'
2092 */
2093static void pl011_release_port(struct uart_port *port)
2094{
2095 release_mem_region(port->mapbase, SZ_4K);
2096}
2097
2098/*
2099 * Request the memory region(s) being used by 'port'
2100 */
2101static int pl011_request_port(struct uart_port *port)
2102{
2103 return request_mem_region(port->mapbase, SZ_4K, "uart-pl011")
2104 != NULL ? 0 : -EBUSY;
2105}
2106
2107/*
2108 * Configure/autoconfigure the port.
2109 */
2110static void pl011_config_port(struct uart_port *port, int flags)
2111{
2112 if (flags & UART_CONFIG_TYPE) {
2113 port->type = PORT_AMBA;
2114 pl011_request_port(port);
2115 }
2116}
2117
2118/*
2119 * verify the new serial_struct (for TIOCSSERIAL).
2120 */
2121static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser)
2122{
2123 int ret = 0;
2124 if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
2125 ret = -EINVAL;
2126 if (ser->irq < 0 || ser->irq >= nr_irqs)
2127 ret = -EINVAL;
2128 if (ser->baud_base < 9600)
2129 ret = -EINVAL;
2130 return ret;
2131}
2132
2133static const struct uart_ops amba_pl011_pops = {
2134 .tx_empty = pl011_tx_empty,
2135 .set_mctrl = pl011_set_mctrl,
2136 .get_mctrl = pl011_get_mctrl,
2137 .stop_tx = pl011_stop_tx,
2138 .start_tx = pl011_start_tx,
2139 .stop_rx = pl011_stop_rx,
2140 .enable_ms = pl011_enable_ms,
2141 .break_ctl = pl011_break_ctl,
2142 .startup = pl011_startup,
2143 .shutdown = pl011_shutdown,
2144 .flush_buffer = pl011_dma_flush_buffer,
2145 .set_termios = pl011_set_termios,
2146 .type = pl011_type,
2147 .release_port = pl011_release_port,
2148 .request_port = pl011_request_port,
2149 .config_port = pl011_config_port,
2150 .verify_port = pl011_verify_port,
2151#ifdef CONFIG_CONSOLE_POLL
2152 .poll_init = pl011_hwinit,
2153 .poll_get_char = pl011_get_poll_char,
2154 .poll_put_char = pl011_put_poll_char,
2155#endif
2156};
2157
2158static void sbsa_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
2159{
2160}
2161
2162static unsigned int sbsa_uart_get_mctrl(struct uart_port *port)
2163{
2164 return 0;
2165}
2166
2167static const struct uart_ops sbsa_uart_pops = {
2168 .tx_empty = pl011_tx_empty,
2169 .set_mctrl = sbsa_uart_set_mctrl,
2170 .get_mctrl = sbsa_uart_get_mctrl,
2171 .stop_tx = pl011_stop_tx,
2172 .start_tx = pl011_start_tx,
2173 .stop_rx = pl011_stop_rx,
2174 .startup = sbsa_uart_startup,
2175 .shutdown = sbsa_uart_shutdown,
2176 .set_termios = sbsa_uart_set_termios,
2177 .type = pl011_type,
2178 .release_port = pl011_release_port,
2179 .request_port = pl011_request_port,
2180 .config_port = pl011_config_port,
2181 .verify_port = pl011_verify_port,
2182#ifdef CONFIG_CONSOLE_POLL
2183 .poll_init = pl011_hwinit,
2184 .poll_get_char = pl011_get_poll_char,
2185 .poll_put_char = pl011_put_poll_char,
2186#endif
2187};
2188
2189static struct uart_amba_port *amba_ports[UART_NR];
2190
2191#ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE
2192
2193static void pl011_console_putchar(struct uart_port *port, int ch)
2194{
2195 struct uart_amba_port *uap =
2196 container_of(port, struct uart_amba_port, port);
2197
2198 while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
2199 cpu_relax();
2200 pl011_write(ch, uap, REG_DR);
2201}
2202
2203static void
2204pl011_console_write(struct console *co, const char *s, unsigned int count)
2205{
2206 struct uart_amba_port *uap = amba_ports[co->index];
2207 unsigned int old_cr = 0, new_cr;
2208 unsigned long flags;
2209 int locked = 1;
2210
2211 clk_enable(uap->clk);
2212
2213 local_irq_save(flags);
2214 if (uap->port.sysrq)
2215 locked = 0;
2216 else if (oops_in_progress)
2217 locked = spin_trylock(&uap->port.lock);
2218 else
2219 spin_lock(&uap->port.lock);
2220
2221 /*
2222 * First save the CR then disable the interrupts
2223 */
2224 if (!uap->vendor->always_enabled) {
2225 old_cr = pl011_read(uap, REG_CR);
2226 new_cr = old_cr & ~UART011_CR_CTSEN;
2227 new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
2228 pl011_write(new_cr, uap, REG_CR);
2229 }
2230
2231 uart_console_write(&uap->port, s, count, pl011_console_putchar);
2232
2233 /*
2234 * Finally, wait for transmitter to become empty and restore the
2235 * TCR. Allow feature register bits to be inverted to work around
2236 * errata.
2237 */
2238 while ((pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr)
2239 & uap->vendor->fr_busy)
2240 cpu_relax();
2241 if (!uap->vendor->always_enabled)
2242 pl011_write(old_cr, uap, REG_CR);
2243
2244 if (locked)
2245 spin_unlock(&uap->port.lock);
2246 local_irq_restore(flags);
2247
2248 clk_disable(uap->clk);
2249}
2250
2251static void __init
2252pl011_console_get_options(struct uart_amba_port *uap, int *baud,
2253 int *parity, int *bits)
2254{
2255 if (pl011_read(uap, REG_CR) & UART01x_CR_UARTEN) {
2256 unsigned int lcr_h, ibrd, fbrd;
2257
2258 lcr_h = pl011_read(uap, REG_LCRH_TX);
2259
2260 *parity = 'n';
2261 if (lcr_h & UART01x_LCRH_PEN) {
2262 if (lcr_h & UART01x_LCRH_EPS)
2263 *parity = 'e';
2264 else
2265 *parity = 'o';
2266 }
2267
2268 if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7)
2269 *bits = 7;
2270 else
2271 *bits = 8;
2272
2273 ibrd = pl011_read(uap, REG_IBRD);
2274 fbrd = pl011_read(uap, REG_FBRD);
2275
2276 *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd);
2277
2278 if (uap->vendor->oversampling) {
2279 if (pl011_read(uap, REG_CR)
2280 & ST_UART011_CR_OVSFACT)
2281 *baud *= 2;
2282 }
2283 }
2284}
2285
2286static int __init pl011_console_setup(struct console *co, char *options)
2287{
2288 struct uart_amba_port *uap;
2289 int baud = 38400;
2290 int bits = 8;
2291 int parity = 'n';
2292 int flow = 'n';
2293 int ret;
2294
2295 /*
2296 * Check whether an invalid uart number has been specified, and
2297 * if so, search for the first available port that does have
2298 * console support.
2299 */
2300 if (co->index >= UART_NR)
2301 co->index = 0;
2302 uap = amba_ports[co->index];
2303 if (!uap)
2304 return -ENODEV;
2305
2306 /* Allow pins to be muxed in and configured */
2307 pinctrl_pm_select_default_state(uap->port.dev);
2308
2309 ret = clk_prepare(uap->clk);
2310 if (ret)
2311 return ret;
2312
2313 if (dev_get_platdata(uap->port.dev)) {
2314 struct amba_pl011_data *plat;
2315
2316 plat = dev_get_platdata(uap->port.dev);
2317 if (plat->init)
2318 plat->init();
2319 }
2320
2321 uap->port.uartclk = clk_get_rate(uap->clk);
2322
2323 if (uap->vendor->fixed_options) {
2324 baud = uap->fixed_baud;
2325 } else {
2326 if (options)
2327 uart_parse_options(options,
2328 &baud, &parity, &bits, &flow);
2329 else
2330 pl011_console_get_options(uap, &baud, &parity, &bits);
2331 }
2332
2333 return uart_set_options(&uap->port, co, baud, parity, bits, flow);
2334}
2335
2336/**
2337 * pl011_console_match - non-standard console matching
2338 * @co: registering console
2339 * @name: name from console command line
2340 * @idx: index from console command line
2341 * @options: ptr to option string from console command line
2342 *
2343 * Only attempts to match console command lines of the form:
2344 * console=pl011,mmio|mmio32,<addr>[,<options>]
2345 * console=pl011,0x<addr>[,<options>]
2346 * This form is used to register an initial earlycon boot console and
2347 * replace it with the amba_console at pl011 driver init.
2348 *
2349 * Performs console setup for a match (as required by interface)
2350 * If no <options> are specified, then assume the h/w is already setup.
2351 *
2352 * Returns 0 if console matches; otherwise non-zero to use default matching
2353 */
2354static int __init pl011_console_match(struct console *co, char *name, int idx,
2355 char *options)
2356{
2357 unsigned char iotype;
2358 resource_size_t addr;
2359 int i;
2360
2361 /*
2362 * Systems affected by the Qualcomm Technologies QDF2400 E44 erratum
2363 * have a distinct console name, so make sure we check for that.
2364 * The actual implementation of the erratum occurs in the probe
2365 * function.
2366 */
2367 if ((strcmp(name, "qdf2400_e44") != 0) && (strcmp(name, "pl011") != 0))
2368 return -ENODEV;
2369
2370 if (uart_parse_earlycon(options, &iotype, &addr, &options))
2371 return -ENODEV;
2372
2373 if (iotype != UPIO_MEM && iotype != UPIO_MEM32)
2374 return -ENODEV;
2375
2376 /* try to match the port specified on the command line */
2377 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) {
2378 struct uart_port *port;
2379
2380 if (!amba_ports[i])
2381 continue;
2382
2383 port = &amba_ports[i]->port;
2384
2385 if (port->mapbase != addr)
2386 continue;
2387
2388 co->index = i;
2389 port->cons = co;
2390 return pl011_console_setup(co, options);
2391 }
2392
2393 return -ENODEV;
2394}
2395
2396static struct uart_driver amba_reg;
2397static struct console amba_console = {
2398 .name = "ttyAMA",
2399 .write = pl011_console_write,
2400 .device = uart_console_device,
2401 .setup = pl011_console_setup,
2402 .match = pl011_console_match,
2403 .flags = CON_PRINTBUFFER | CON_ANYTIME,
2404 .index = -1,
2405 .data = &amba_reg,
2406};
2407
2408#define AMBA_CONSOLE (&amba_console)
2409
2410static void qdf2400_e44_putc(struct uart_port *port, int c)
2411{
2412 while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
2413 cpu_relax();
2414 writel(c, port->membase + UART01x_DR);
2415 while (!(readl(port->membase + UART01x_FR) & UART011_FR_TXFE))
2416 cpu_relax();
2417}
2418
2419static void qdf2400_e44_early_write(struct console *con, const char *s, unsigned n)
2420{
2421 struct earlycon_device *dev = con->data;
2422
2423 uart_console_write(&dev->port, s, n, qdf2400_e44_putc);
2424}
2425
2426static void pl011_putc(struct uart_port *port, int c)
2427{
2428 while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
2429 cpu_relax();
2430 if (port->iotype == UPIO_MEM32)
2431 writel(c, port->membase + UART01x_DR);
2432 else
2433 writeb(c, port->membase + UART01x_DR);
2434 while (readl(port->membase + UART01x_FR) & UART01x_FR_BUSY)
2435 cpu_relax();
2436}
2437
2438static void pl011_early_write(struct console *con, const char *s, unsigned n)
2439{
2440 struct earlycon_device *dev = con->data;
2441
2442 uart_console_write(&dev->port, s, n, pl011_putc);
2443}
2444
2445/*
2446 * On non-ACPI systems, earlycon is enabled by specifying
2447 * "earlycon=pl011,<address>" on the kernel command line.
2448 *
2449 * On ACPI ARM64 systems, an "early" console is enabled via the SPCR table,
2450 * by specifying only "earlycon" on the command line. Because it requires
2451 * SPCR, the console starts after ACPI is parsed, which is later than a
2452 * traditional early console.
2453 *
2454 * To get the traditional early console that starts before ACPI is parsed,
2455 * specify the full "earlycon=pl011,<address>" option.
2456 */
2457static int __init pl011_early_console_setup(struct earlycon_device *device,
2458 const char *opt)
2459{
2460 if (!device->port.membase)
2461 return -ENODEV;
2462
2463 device->con->write = pl011_early_write;
2464
2465 return 0;
2466}
2467OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup);
2468OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup);
2469
2470/*
2471 * On Qualcomm Datacenter Technologies QDF2400 SOCs affected by
2472 * Erratum 44, traditional earlycon can be enabled by specifying
2473 * "earlycon=qdf2400_e44,<address>". Any options are ignored.
2474 *
2475 * Alternatively, you can just specify "earlycon", and the early console
2476 * will be enabled with the information from the SPCR table. In this
2477 * case, the SPCR code will detect the need for the E44 work-around,
2478 * and set the console name to "qdf2400_e44".
2479 */
2480static int __init
2481qdf2400_e44_early_console_setup(struct earlycon_device *device,
2482 const char *opt)
2483{
2484 if (!device->port.membase)
2485 return -ENODEV;
2486
2487 device->con->write = qdf2400_e44_early_write;
2488 return 0;
2489}
2490EARLYCON_DECLARE(qdf2400_e44, qdf2400_e44_early_console_setup);
2491
2492#else
2493#define AMBA_CONSOLE NULL
2494#endif
2495
2496static struct uart_driver amba_reg = {
2497 .owner = THIS_MODULE,
2498 .driver_name = "ttyAMA",
2499 .dev_name = "ttyAMA",
2500 .major = SERIAL_AMBA_MAJOR,
2501 .minor = SERIAL_AMBA_MINOR,
2502 .nr = UART_NR,
2503 .cons = AMBA_CONSOLE,
2504};
2505
2506static int pl011_probe_dt_alias(int index, struct device *dev)
2507{
2508 struct device_node *np;
2509 static bool seen_dev_with_alias = false;
2510 static bool seen_dev_without_alias = false;
2511 int ret = index;
2512
2513 if (!IS_ENABLED(CONFIG_OF))
2514 return ret;
2515
2516 np = dev->of_node;
2517 if (!np)
2518 return ret;
2519
2520 ret = of_alias_get_id(np, "serial");
2521 if (ret < 0) {
2522 seen_dev_without_alias = true;
2523 ret = index;
2524 } else {
2525 seen_dev_with_alias = true;
2526 if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret] != NULL) {
2527 dev_warn(dev, "requested serial port %d not available.\n", ret);
2528 ret = index;
2529 }
2530 }
2531
2532 if (seen_dev_with_alias && seen_dev_without_alias)
2533 dev_warn(dev, "aliased and non-aliased serial devices found in device tree. Serial port enumeration may be unpredictable.\n");
2534
2535 return ret;
2536}
2537
2538/* unregisters the driver also if no more ports are left */
2539static void pl011_unregister_port(struct uart_amba_port *uap)
2540{
2541 int i;
2542 bool busy = false;
2543
2544 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) {
2545 if (amba_ports[i] == uap)
2546 amba_ports[i] = NULL;
2547 else if (amba_ports[i])
2548 busy = true;
2549 }
2550 pl011_dma_remove(uap);
2551 if (!busy)
2552 uart_unregister_driver(&amba_reg);
2553}
2554
2555static int pl011_find_free_port(void)
2556{
2557 int i;
2558
2559 for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
2560 if (amba_ports[i] == NULL)
2561 return i;
2562
2563 return -EBUSY;
2564}
2565
2566static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
2567 struct resource *mmiobase, int index)
2568{
2569 void __iomem *base;
2570
2571 base = devm_ioremap_resource(dev, mmiobase);
2572 if (IS_ERR(base))
2573 return PTR_ERR(base);
2574
2575 index = pl011_probe_dt_alias(index, dev);
2576
2577 uap->old_cr = 0;
2578 uap->port.dev = dev;
2579 uap->port.mapbase = mmiobase->start;
2580 uap->port.membase = base;
2581 uap->port.fifosize = uap->fifosize;
2582 uap->port.flags = UPF_BOOT_AUTOCONF;
2583 uap->port.line = index;
2584
2585 amba_ports[index] = uap;
2586
2587 return 0;
2588}
2589
2590static int pl011_register_port(struct uart_amba_port *uap)
2591{
2592 int ret;
2593
2594 /* Ensure interrupts from this UART are masked and cleared */
2595 pl011_write(0, uap, REG_IMSC);
2596 pl011_write(0xffff, uap, REG_ICR);
2597
2598 if (!amba_reg.state) {
2599 ret = uart_register_driver(&amba_reg);
2600 if (ret < 0) {
2601 dev_err(uap->port.dev,
2602 "Failed to register AMBA-PL011 driver\n");
2603 return ret;
2604 }
2605 }
2606
2607 ret = uart_add_one_port(&amba_reg, &uap->port);
2608 if (ret)
2609 pl011_unregister_port(uap);
2610
2611 return ret;
2612}
2613
2614static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
2615{
2616 struct uart_amba_port *uap;
2617 struct vendor_data *vendor = id->data;
2618 int portnr, ret;
2619
2620 portnr = pl011_find_free_port();
2621 if (portnr < 0)
2622 return portnr;
2623
2624 uap = devm_kzalloc(&dev->dev, sizeof(struct uart_amba_port),
2625 GFP_KERNEL);
2626 if (!uap)
2627 return -ENOMEM;
2628
2629 uap->clk = devm_clk_get(&dev->dev, NULL);
2630 if (IS_ERR(uap->clk))
2631 return PTR_ERR(uap->clk);
2632
2633 uap->reg_offset = vendor->reg_offset;
2634 uap->vendor = vendor;
2635 uap->fifosize = vendor->get_fifosize(dev);
2636 uap->port.iotype = vendor->access_32b ? UPIO_MEM32 : UPIO_MEM;
2637 uap->port.irq = dev->irq[0];
2638 uap->port.ops = &amba_pl011_pops;
2639
2640 snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
2641
2642 ret = pl011_setup_port(&dev->dev, uap, &dev->res, portnr);
2643 if (ret)
2644 return ret;
2645
2646 amba_set_drvdata(dev, uap);
2647
2648 return pl011_register_port(uap);
2649}
2650
2651static int pl011_remove(struct amba_device *dev)
2652{
2653 struct uart_amba_port *uap = amba_get_drvdata(dev);
2654
2655 uart_remove_one_port(&amba_reg, &uap->port);
2656 pl011_unregister_port(uap);
2657 return 0;
2658}
2659
2660#ifdef CONFIG_PM_SLEEP
2661static int pl011_suspend(struct device *dev)
2662{
2663 struct uart_amba_port *uap = dev_get_drvdata(dev);
2664
2665 if (!uap)
2666 return -EINVAL;
2667
2668 return uart_suspend_port(&amba_reg, &uap->port);
2669}
2670
2671static int pl011_resume(struct device *dev)
2672{
2673 struct uart_amba_port *uap = dev_get_drvdata(dev);
2674
2675 if (!uap)
2676 return -EINVAL;
2677
2678 return uart_resume_port(&amba_reg, &uap->port);
2679}
2680#endif
2681
2682static SIMPLE_DEV_PM_OPS(pl011_dev_pm_ops, pl011_suspend, pl011_resume);
2683
2684static int sbsa_uart_probe(struct platform_device *pdev)
2685{
2686 struct uart_amba_port *uap;
2687 struct resource *r;
2688 int portnr, ret;
2689 int baudrate;
2690
2691 /*
2692 * Check the mandatory baud rate parameter in the DT node early
2693 * so that we can easily exit with the error.
2694 */
2695 if (pdev->dev.of_node) {
2696 struct device_node *np = pdev->dev.of_node;
2697
2698 ret = of_property_read_u32(np, "current-speed", &baudrate);
2699 if (ret)
2700 return ret;
2701 } else {
2702 baudrate = 115200;
2703 }
2704
2705 portnr = pl011_find_free_port();
2706 if (portnr < 0)
2707 return portnr;
2708
2709 uap = devm_kzalloc(&pdev->dev, sizeof(struct uart_amba_port),
2710 GFP_KERNEL);
2711 if (!uap)
2712 return -ENOMEM;
2713
2714 ret = platform_get_irq(pdev, 0);
2715 if (ret < 0)
2716 return ret;
2717 uap->port.irq = ret;
2718
2719#ifdef CONFIG_ACPI_SPCR_TABLE
2720 if (qdf2400_e44_present) {
2721 dev_info(&pdev->dev, "working around QDF2400 SoC erratum 44\n");
2722 uap->vendor = &vendor_qdt_qdf2400_e44;
2723 } else
2724#endif
2725 uap->vendor = &vendor_sbsa;
2726
2727 uap->reg_offset = uap->vendor->reg_offset;
2728 uap->fifosize = 32;
2729 uap->port.iotype = uap->vendor->access_32b ? UPIO_MEM32 : UPIO_MEM;
2730 uap->port.ops = &sbsa_uart_pops;
2731 uap->fixed_baud = baudrate;
2732
2733 snprintf(uap->type, sizeof(uap->type), "SBSA");
2734
2735 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2736
2737 ret = pl011_setup_port(&pdev->dev, uap, r, portnr);
2738 if (ret)
2739 return ret;
2740
2741 platform_set_drvdata(pdev, uap);
2742
2743 return pl011_register_port(uap);
2744}
2745
2746static int sbsa_uart_remove(struct platform_device *pdev)
2747{
2748 struct uart_amba_port *uap = platform_get_drvdata(pdev);
2749
2750 uart_remove_one_port(&amba_reg, &uap->port);
2751 pl011_unregister_port(uap);
2752 return 0;
2753}
2754
2755static const struct of_device_id sbsa_uart_of_match[] = {
2756 { .compatible = "arm,sbsa-uart", },
2757 {},
2758};
2759MODULE_DEVICE_TABLE(of, sbsa_uart_of_match);
2760
2761static const struct acpi_device_id sbsa_uart_acpi_match[] = {
2762 { "ARMH0011", 0 },
2763 {},
2764};
2765MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match);
2766
2767static struct platform_driver arm_sbsa_uart_platform_driver = {
2768 .probe = sbsa_uart_probe,
2769 .remove = sbsa_uart_remove,
2770 .driver = {
2771 .name = "sbsa-uart",
2772 .of_match_table = of_match_ptr(sbsa_uart_of_match),
2773 .acpi_match_table = ACPI_PTR(sbsa_uart_acpi_match),
2774 .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
2775 },
2776};
2777
2778static const struct amba_id pl011_ids[] = {
2779 {
2780 .id = 0x00041011,
2781 .mask = 0x000fffff,
2782 .data = &vendor_arm,
2783 },
2784 {
2785 .id = 0x00380802,
2786 .mask = 0x00ffffff,
2787 .data = &vendor_st,
2788 },
2789 {
2790 .id = AMBA_LINUX_ID(0x00, 0x1, 0xffe),
2791 .mask = 0x00ffffff,
2792 .data = &vendor_zte,
2793 },
2794 { 0, 0 },
2795};
2796
2797MODULE_DEVICE_TABLE(amba, pl011_ids);
2798
2799static struct amba_driver pl011_driver = {
2800 .drv = {
2801 .name = "uart-pl011",
2802 .pm = &pl011_dev_pm_ops,
2803 .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
2804 },
2805 .id_table = pl011_ids,
2806 .probe = pl011_probe,
2807 .remove = pl011_remove,
2808};
2809
2810static int __init pl011_init(void)
2811{
2812 printk(KERN_INFO "Serial: AMBA PL011 UART driver\n");
2813
2814 if (platform_driver_register(&arm_sbsa_uart_platform_driver))
2815 pr_warn("could not register SBSA UART platform driver\n");
2816 return amba_driver_register(&pl011_driver);
2817}
2818
2819static void __exit pl011_exit(void)
2820{
2821 platform_driver_unregister(&arm_sbsa_uart_platform_driver);
2822 amba_driver_unregister(&pl011_driver);
2823}
2824
2825/*
2826 * While this can be a module, if builtin it's most likely the console
2827 * So let's leave module_exit but move module_init to an earlier place
2828 */
2829arch_initcall(pl011_init);
2830module_exit(pl011_exit);
2831
2832MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd");
2833MODULE_DESCRIPTION("ARM AMBA serial port driver");
2834MODULE_LICENSE("GPL");