Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.2-rc2 1470 lines 47 kB view raw
1/* 2 * Driver for CSR SiRFprimaII onboard UARTs. 3 * 4 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. 5 * 6 * Licensed under GPLv2 or later. 7 */ 8 9#include <linux/module.h> 10#include <linux/ioport.h> 11#include <linux/platform_device.h> 12#include <linux/init.h> 13#include <linux/sysrq.h> 14#include <linux/console.h> 15#include <linux/tty.h> 16#include <linux/tty_flip.h> 17#include <linux/serial_core.h> 18#include <linux/serial.h> 19#include <linux/clk.h> 20#include <linux/of.h> 21#include <linux/slab.h> 22#include <linux/io.h> 23#include <linux/of_gpio.h> 24#include <linux/dmaengine.h> 25#include <linux/dma-direction.h> 26#include <linux/dma-mapping.h> 27#include <asm/irq.h> 28#include <asm/mach/irq.h> 29 30#include "sirfsoc_uart.h" 31 32static unsigned int 33sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port *sirfport, int count); 34static unsigned int 35sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count); 36static struct uart_driver sirfsoc_uart_drv; 37 38static void sirfsoc_uart_tx_dma_complete_callback(void *param); 39static const struct sirfsoc_baudrate_to_regv baudrate_to_regv[] = { 40 {4000000, 2359296}, 41 {3500000, 1310721}, 42 {3000000, 1572865}, 43 {2500000, 1245186}, 44 {2000000, 1572866}, 45 {1500000, 1245188}, 46 {1152000, 1638404}, 47 {1000000, 1572869}, 48 {921600, 1114120}, 49 {576000, 1245196}, 50 {500000, 1245198}, 51 {460800, 1572876}, 52 {230400, 1310750}, 53 {115200, 1310781}, 54 {57600, 1310843}, 55 {38400, 1114328}, 56 {19200, 1114545}, 57 {9600, 1114979}, 58}; 59 60static struct sirfsoc_uart_port *sirf_ports[SIRFSOC_UART_NR]; 61 62static inline struct sirfsoc_uart_port *to_sirfport(struct uart_port *port) 63{ 64 return container_of(port, struct sirfsoc_uart_port, port); 65} 66 67static inline unsigned int sirfsoc_uart_tx_empty(struct uart_port *port) 68{ 69 unsigned long reg; 70 struct sirfsoc_uart_port *sirfport = to_sirfport(port); 71 struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; 72 struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status; 73 reg = rd_regl(port, ureg->sirfsoc_tx_fifo_status); 74 return (reg & ufifo_st->ff_empty(port)) ? TIOCSER_TEMT : 0; 75} 76 77static unsigned int sirfsoc_uart_get_mctrl(struct uart_port *port) 78{ 79 struct sirfsoc_uart_port *sirfport = to_sirfport(port); 80 struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; 81 if (!sirfport->hw_flow_ctrl || !sirfport->ms_enabled) 82 goto cts_asserted; 83 if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) { 84 if (!(rd_regl(port, ureg->sirfsoc_afc_ctrl) & 85 SIRFUART_AFC_CTS_STATUS)) 86 goto cts_asserted; 87 else 88 goto cts_deasserted; 89 } else { 90 if (!gpio_get_value(sirfport->cts_gpio)) 91 goto cts_asserted; 92 else 93 goto cts_deasserted; 94 } 95cts_deasserted: 96 return TIOCM_CAR | TIOCM_DSR; 97cts_asserted: 98 return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS; 99} 100 101static void sirfsoc_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) 102{ 103 struct sirfsoc_uart_port *sirfport = to_sirfport(port); 104 struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; 105 unsigned int assert = mctrl & TIOCM_RTS; 106 unsigned int val = assert ? SIRFUART_AFC_CTRL_RX_THD : 0x0; 107 unsigned int current_val; 108 109 if (mctrl & TIOCM_LOOP) { 110 if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) 111 wr_regl(port, ureg->sirfsoc_line_ctrl, 112 rd_regl(port, ureg->sirfsoc_line_ctrl) | 113 SIRFUART_LOOP_BACK); 114 else 115 wr_regl(port, ureg->sirfsoc_mode1, 116 rd_regl(port, ureg->sirfsoc_mode1) | 117 SIRFSOC_USP_LOOP_BACK_CTRL); 118 } else { 119 if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) 120 wr_regl(port, ureg->sirfsoc_line_ctrl, 121 rd_regl(port, ureg->sirfsoc_line_ctrl) & 122 ~SIRFUART_LOOP_BACK); 123 else 124 wr_regl(port, ureg->sirfsoc_mode1, 125 rd_regl(port, ureg->sirfsoc_mode1) & 126 ~SIRFSOC_USP_LOOP_BACK_CTRL); 127 } 128 129 if (!sirfport->hw_flow_ctrl || !sirfport->ms_enabled) 130 return; 131 if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) { 132 current_val = rd_regl(port, ureg->sirfsoc_afc_ctrl) & ~0xFF; 133 val |= current_val; 134 wr_regl(port, ureg->sirfsoc_afc_ctrl, val); 135 } else { 136 if (!val) 137 gpio_set_value(sirfport->rts_gpio, 1); 138 else 139 gpio_set_value(sirfport->rts_gpio, 0); 140 } 141} 142 143static void sirfsoc_uart_stop_tx(struct uart_port *port) 144{ 145 struct sirfsoc_uart_port *sirfport = to_sirfport(port); 146 struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; 147 struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; 148 149 if (sirfport->tx_dma_chan) { 150 if (sirfport->tx_dma_state == TX_DMA_RUNNING) { 151 dmaengine_pause(sirfport->tx_dma_chan); 152 sirfport->tx_dma_state = TX_DMA_PAUSE; 153 } else { 154 if (!sirfport->is_atlas7) 155 wr_regl(port, ureg->sirfsoc_int_en_reg, 156 rd_regl(port, ureg->sirfsoc_int_en_reg) & 157 ~uint_en->sirfsoc_txfifo_empty_en); 158 else 159 wr_regl(port, ureg->sirfsoc_int_en_clr_reg, 160 uint_en->sirfsoc_txfifo_empty_en); 161 } 162 } else { 163 if (sirfport->uart_reg->uart_type == SIRF_USP_UART) 164 wr_regl(port, ureg->sirfsoc_tx_rx_en, rd_regl(port, 165 ureg->sirfsoc_tx_rx_en) & ~SIRFUART_TX_EN); 166 if (!sirfport->is_atlas7) 167 wr_regl(port, ureg->sirfsoc_int_en_reg, 168 rd_regl(port, ureg->sirfsoc_int_en_reg) & 169 ~uint_en->sirfsoc_txfifo_empty_en); 170 else 171 wr_regl(port, ureg->sirfsoc_int_en_clr_reg, 172 uint_en->sirfsoc_txfifo_empty_en); 173 } 174} 175 176static void sirfsoc_uart_tx_with_dma(struct sirfsoc_uart_port *sirfport) 177{ 178 struct uart_port *port = &sirfport->port; 179 struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; 180 struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; 181 struct circ_buf *xmit = &port->state->xmit; 182 unsigned long tran_size; 183 unsigned long tran_start; 184 unsigned long pio_tx_size; 185 186 tran_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); 187 tran_start = (unsigned long)(xmit->buf + xmit->tail); 188 if (uart_circ_empty(xmit) || uart_tx_stopped(port) || 189 !tran_size) 190 return; 191 if (sirfport->tx_dma_state == TX_DMA_PAUSE) { 192 dmaengine_resume(sirfport->tx_dma_chan); 193 return; 194 } 195 if (sirfport->tx_dma_state == TX_DMA_RUNNING) 196 return; 197 if (!sirfport->is_atlas7) 198 wr_regl(port, ureg->sirfsoc_int_en_reg, 199 rd_regl(port, ureg->sirfsoc_int_en_reg)& 200 ~(uint_en->sirfsoc_txfifo_empty_en)); 201 else 202 wr_regl(port, ureg->sirfsoc_int_en_clr_reg, 203 uint_en->sirfsoc_txfifo_empty_en); 204 /* 205 * DMA requires buffer address and buffer length are both aligned with 206 * 4 bytes, so we use PIO for 207 * 1. if address is not aligned with 4bytes, use PIO for the first 1~3 208 * bytes, and move to DMA for the left part aligned with 4bytes 209 * 2. if buffer length is not aligned with 4bytes, use DMA for aligned 210 * part first, move to PIO for the left 1~3 bytes 211 */ 212 if (tran_size < 4 || BYTES_TO_ALIGN(tran_start)) { 213 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP); 214 wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, 215 rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl)| 216 SIRFUART_IO_MODE); 217 if (BYTES_TO_ALIGN(tran_start)) { 218 pio_tx_size = sirfsoc_uart_pio_tx_chars(sirfport, 219 BYTES_TO_ALIGN(tran_start)); 220 tran_size -= pio_tx_size; 221 } 222 if (tran_size < 4) 223 sirfsoc_uart_pio_tx_chars(sirfport, tran_size); 224 if (!sirfport->is_atlas7) 225 wr_regl(port, ureg->sirfsoc_int_en_reg, 226 rd_regl(port, ureg->sirfsoc_int_en_reg)| 227 uint_en->sirfsoc_txfifo_empty_en); 228 else 229 wr_regl(port, ureg->sirfsoc_int_en_reg, 230 uint_en->sirfsoc_txfifo_empty_en); 231 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START); 232 } else { 233 /* tx transfer mode switch into dma mode */ 234 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP); 235 wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, 236 rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl)& 237 ~SIRFUART_IO_MODE); 238 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START); 239 tran_size &= ~(0x3); 240 241 sirfport->tx_dma_addr = dma_map_single(port->dev, 242 xmit->buf + xmit->tail, 243 tran_size, DMA_TO_DEVICE); 244 sirfport->tx_dma_desc = dmaengine_prep_slave_single( 245 sirfport->tx_dma_chan, sirfport->tx_dma_addr, 246 tran_size, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); 247 if (!sirfport->tx_dma_desc) { 248 dev_err(port->dev, "DMA prep slave single fail\n"); 249 return; 250 } 251 sirfport->tx_dma_desc->callback = 252 sirfsoc_uart_tx_dma_complete_callback; 253 sirfport->tx_dma_desc->callback_param = (void *)sirfport; 254 sirfport->transfer_size = tran_size; 255 256 dmaengine_submit(sirfport->tx_dma_desc); 257 dma_async_issue_pending(sirfport->tx_dma_chan); 258 sirfport->tx_dma_state = TX_DMA_RUNNING; 259 } 260} 261 262static void sirfsoc_uart_start_tx(struct uart_port *port) 263{ 264 struct sirfsoc_uart_port *sirfport = to_sirfport(port); 265 struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; 266 struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; 267 if (sirfport->tx_dma_chan) 268 sirfsoc_uart_tx_with_dma(sirfport); 269 else { 270 if (sirfport->uart_reg->uart_type == SIRF_USP_UART) 271 wr_regl(port, ureg->sirfsoc_tx_rx_en, rd_regl(port, 272 ureg->sirfsoc_tx_rx_en) | SIRFUART_TX_EN); 273 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP); 274 sirfsoc_uart_pio_tx_chars(sirfport, port->fifosize); 275 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START); 276 if (!sirfport->is_atlas7) 277 wr_regl(port, ureg->sirfsoc_int_en_reg, 278 rd_regl(port, ureg->sirfsoc_int_en_reg)| 279 uint_en->sirfsoc_txfifo_empty_en); 280 else 281 wr_regl(port, ureg->sirfsoc_int_en_reg, 282 uint_en->sirfsoc_txfifo_empty_en); 283 } 284} 285 286static void sirfsoc_uart_stop_rx(struct uart_port *port) 287{ 288 struct sirfsoc_uart_port *sirfport = to_sirfport(port); 289 struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; 290 struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; 291 292 wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0); 293 if (sirfport->rx_dma_chan) { 294 if (!sirfport->is_atlas7) 295 wr_regl(port, ureg->sirfsoc_int_en_reg, 296 rd_regl(port, ureg->sirfsoc_int_en_reg) & 297 ~(SIRFUART_RX_DMA_INT_EN(uint_en, 298 sirfport->uart_reg->uart_type) | 299 uint_en->sirfsoc_rx_done_en)); 300 else 301 wr_regl(port, ureg->sirfsoc_int_en_clr_reg, 302 SIRFUART_RX_DMA_INT_EN(uint_en, 303 sirfport->uart_reg->uart_type)| 304 uint_en->sirfsoc_rx_done_en); 305 dmaengine_terminate_all(sirfport->rx_dma_chan); 306 } else { 307 if (!sirfport->is_atlas7) 308 wr_regl(port, ureg->sirfsoc_int_en_reg, 309 rd_regl(port, ureg->sirfsoc_int_en_reg)& 310 ~(SIRFUART_RX_IO_INT_EN(uint_en, 311 sirfport->uart_reg->uart_type))); 312 else 313 wr_regl(port, ureg->sirfsoc_int_en_clr_reg, 314 SIRFUART_RX_IO_INT_EN(uint_en, 315 sirfport->uart_reg->uart_type)); 316 } 317} 318 319static void sirfsoc_uart_disable_ms(struct uart_port *port) 320{ 321 struct sirfsoc_uart_port *sirfport = to_sirfport(port); 322 struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; 323 struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; 324 325 if (!sirfport->hw_flow_ctrl) 326 return; 327 sirfport->ms_enabled = false; 328 if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) { 329 wr_regl(port, ureg->sirfsoc_afc_ctrl, 330 rd_regl(port, ureg->sirfsoc_afc_ctrl) & ~0x3FF); 331 if (!sirfport->is_atlas7) 332 wr_regl(port, ureg->sirfsoc_int_en_reg, 333 rd_regl(port, ureg->sirfsoc_int_en_reg)& 334 ~uint_en->sirfsoc_cts_en); 335 else 336 wr_regl(port, ureg->sirfsoc_int_en_clr_reg, 337 uint_en->sirfsoc_cts_en); 338 } else 339 disable_irq(gpio_to_irq(sirfport->cts_gpio)); 340} 341 342static irqreturn_t sirfsoc_uart_usp_cts_handler(int irq, void *dev_id) 343{ 344 struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id; 345 struct uart_port *port = &sirfport->port; 346 spin_lock(&port->lock); 347 if (gpio_is_valid(sirfport->cts_gpio) && sirfport->ms_enabled) 348 uart_handle_cts_change(port, 349 !gpio_get_value(sirfport->cts_gpio)); 350 spin_unlock(&port->lock); 351 return IRQ_HANDLED; 352} 353 354static void sirfsoc_uart_enable_ms(struct uart_port *port) 355{ 356 struct sirfsoc_uart_port *sirfport = to_sirfport(port); 357 struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; 358 struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; 359 360 if (!sirfport->hw_flow_ctrl) 361 return; 362 sirfport->ms_enabled = true; 363 if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) { 364 wr_regl(port, ureg->sirfsoc_afc_ctrl, 365 rd_regl(port, ureg->sirfsoc_afc_ctrl) | 366 SIRFUART_AFC_TX_EN | SIRFUART_AFC_RX_EN | 367 SIRFUART_AFC_CTRL_RX_THD); 368 if (!sirfport->is_atlas7) 369 wr_regl(port, ureg->sirfsoc_int_en_reg, 370 rd_regl(port, ureg->sirfsoc_int_en_reg) 371 | uint_en->sirfsoc_cts_en); 372 else 373 wr_regl(port, ureg->sirfsoc_int_en_reg, 374 uint_en->sirfsoc_cts_en); 375 } else 376 enable_irq(gpio_to_irq(sirfport->cts_gpio)); 377} 378 379static void sirfsoc_uart_break_ctl(struct uart_port *port, int break_state) 380{ 381 struct sirfsoc_uart_port *sirfport = to_sirfport(port); 382 struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; 383 if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) { 384 unsigned long ulcon = rd_regl(port, ureg->sirfsoc_line_ctrl); 385 if (break_state) 386 ulcon |= SIRFUART_SET_BREAK; 387 else 388 ulcon &= ~SIRFUART_SET_BREAK; 389 wr_regl(port, ureg->sirfsoc_line_ctrl, ulcon); 390 } 391} 392 393static unsigned int 394sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count) 395{ 396 struct sirfsoc_uart_port *sirfport = to_sirfport(port); 397 struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; 398 struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status; 399 unsigned int ch, rx_count = 0; 400 struct tty_struct *tty; 401 tty = tty_port_tty_get(&port->state->port); 402 if (!tty) 403 return -ENODEV; 404 while (!(rd_regl(port, ureg->sirfsoc_rx_fifo_status) & 405 ufifo_st->ff_empty(port))) { 406 ch = rd_regl(port, ureg->sirfsoc_rx_fifo_data) | 407 SIRFUART_DUMMY_READ; 408 if (unlikely(uart_handle_sysrq_char(port, ch))) 409 continue; 410 uart_insert_char(port, 0, 0, ch, TTY_NORMAL); 411 rx_count++; 412 if (rx_count >= max_rx_count) 413 break; 414 } 415 416 sirfport->rx_io_count += rx_count; 417 port->icount.rx += rx_count; 418 419 return rx_count; 420} 421 422static unsigned int 423sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port *sirfport, int count) 424{ 425 struct uart_port *port = &sirfport->port; 426 struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; 427 struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status; 428 struct circ_buf *xmit = &port->state->xmit; 429 unsigned int num_tx = 0; 430 while (!uart_circ_empty(xmit) && 431 !(rd_regl(port, ureg->sirfsoc_tx_fifo_status) & 432 ufifo_st->ff_full(port)) && 433 count--) { 434 wr_regl(port, ureg->sirfsoc_tx_fifo_data, 435 xmit->buf[xmit->tail]); 436 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 437 port->icount.tx++; 438 num_tx++; 439 } 440 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 441 uart_write_wakeup(port); 442 return num_tx; 443} 444 445static void sirfsoc_uart_tx_dma_complete_callback(void *param) 446{ 447 struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param; 448 struct uart_port *port = &sirfport->port; 449 struct circ_buf *xmit = &port->state->xmit; 450 unsigned long flags; 451 452 spin_lock_irqsave(&port->lock, flags); 453 xmit->tail = (xmit->tail + sirfport->transfer_size) & 454 (UART_XMIT_SIZE - 1); 455 port->icount.tx += sirfport->transfer_size; 456 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 457 uart_write_wakeup(port); 458 if (sirfport->tx_dma_addr) 459 dma_unmap_single(port->dev, sirfport->tx_dma_addr, 460 sirfport->transfer_size, DMA_TO_DEVICE); 461 sirfport->tx_dma_state = TX_DMA_IDLE; 462 sirfsoc_uart_tx_with_dma(sirfport); 463 spin_unlock_irqrestore(&port->lock, flags); 464} 465 466static irqreturn_t sirfsoc_uart_isr(int irq, void *dev_id) 467{ 468 unsigned long intr_status; 469 unsigned long cts_status; 470 unsigned long flag = TTY_NORMAL; 471 struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id; 472 struct uart_port *port = &sirfport->port; 473 struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; 474 struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status; 475 struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st; 476 struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; 477 struct uart_state *state = port->state; 478 struct circ_buf *xmit = &port->state->xmit; 479 spin_lock(&port->lock); 480 intr_status = rd_regl(port, ureg->sirfsoc_int_st_reg); 481 wr_regl(port, ureg->sirfsoc_int_st_reg, intr_status); 482 intr_status &= rd_regl(port, ureg->sirfsoc_int_en_reg); 483 if (unlikely(intr_status & (SIRFUART_ERR_INT_STAT(uint_st, 484 sirfport->uart_reg->uart_type)))) { 485 if (intr_status & uint_st->sirfsoc_rxd_brk) { 486 port->icount.brk++; 487 if (uart_handle_break(port)) 488 goto recv_char; 489 } 490 if (intr_status & uint_st->sirfsoc_rx_oflow) { 491 port->icount.overrun++; 492 flag = TTY_OVERRUN; 493 } 494 if (intr_status & uint_st->sirfsoc_frm_err) { 495 port->icount.frame++; 496 flag = TTY_FRAME; 497 } 498 if (intr_status & uint_st->sirfsoc_parity_err) { 499 port->icount.parity++; 500 flag = TTY_PARITY; 501 } 502 wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET); 503 wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0); 504 wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START); 505 intr_status &= port->read_status_mask; 506 uart_insert_char(port, intr_status, 507 uint_en->sirfsoc_rx_oflow_en, 0, flag); 508 } 509recv_char: 510 if ((sirfport->uart_reg->uart_type == SIRF_REAL_UART) && 511 (intr_status & SIRFUART_CTS_INT_ST(uint_st)) && 512 !sirfport->tx_dma_state) { 513 cts_status = rd_regl(port, ureg->sirfsoc_afc_ctrl) & 514 SIRFUART_AFC_CTS_STATUS; 515 if (cts_status != 0) 516 cts_status = 0; 517 else 518 cts_status = 1; 519 uart_handle_cts_change(port, cts_status); 520 wake_up_interruptible(&state->port.delta_msr_wait); 521 } 522 if (!sirfport->rx_dma_chan && 523 (intr_status & SIRFUART_RX_IO_INT_ST(uint_st))) { 524 /* 525 * chip will trigger continuous RX_TIMEOUT interrupt 526 * in RXFIFO empty and not trigger if RXFIFO recevice 527 * data in limit time, original method use RX_TIMEOUT 528 * will trigger lots of useless interrupt in RXFIFO 529 * empty.RXFIFO received one byte will trigger RX_DONE 530 * interrupt.use RX_DONE to wait for data received 531 * into RXFIFO, use RX_THD/RX_FULL for lots data receive 532 * and use RX_TIMEOUT for the last left data. 533 */ 534 if (intr_status & uint_st->sirfsoc_rx_done) { 535 if (!sirfport->is_atlas7) { 536 wr_regl(port, ureg->sirfsoc_int_en_reg, 537 rd_regl(port, ureg->sirfsoc_int_en_reg) 538 & ~(uint_en->sirfsoc_rx_done_en)); 539 wr_regl(port, ureg->sirfsoc_int_en_reg, 540 rd_regl(port, ureg->sirfsoc_int_en_reg) 541 | (uint_en->sirfsoc_rx_timeout_en)); 542 } else { 543 wr_regl(port, ureg->sirfsoc_int_en_clr_reg, 544 uint_en->sirfsoc_rx_done_en); 545 wr_regl(port, ureg->sirfsoc_int_en_reg, 546 uint_en->sirfsoc_rx_timeout_en); 547 } 548 } else { 549 if (intr_status & uint_st->sirfsoc_rx_timeout) { 550 if (!sirfport->is_atlas7) { 551 wr_regl(port, ureg->sirfsoc_int_en_reg, 552 rd_regl(port, ureg->sirfsoc_int_en_reg) 553 & ~(uint_en->sirfsoc_rx_timeout_en)); 554 wr_regl(port, ureg->sirfsoc_int_en_reg, 555 rd_regl(port, ureg->sirfsoc_int_en_reg) 556 | (uint_en->sirfsoc_rx_done_en)); 557 } else { 558 wr_regl(port, 559 ureg->sirfsoc_int_en_clr_reg, 560 uint_en->sirfsoc_rx_timeout_en); 561 wr_regl(port, ureg->sirfsoc_int_en_reg, 562 uint_en->sirfsoc_rx_done_en); 563 } 564 } 565 sirfsoc_uart_pio_rx_chars(port, port->fifosize); 566 } 567 } 568 spin_unlock(&port->lock); 569 tty_flip_buffer_push(&state->port); 570 spin_lock(&port->lock); 571 if (intr_status & uint_st->sirfsoc_txfifo_empty) { 572 if (sirfport->tx_dma_chan) 573 sirfsoc_uart_tx_with_dma(sirfport); 574 else { 575 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { 576 spin_unlock(&port->lock); 577 return IRQ_HANDLED; 578 } else { 579 sirfsoc_uart_pio_tx_chars(sirfport, 580 port->fifosize); 581 if ((uart_circ_empty(xmit)) && 582 (rd_regl(port, ureg->sirfsoc_tx_fifo_status) & 583 ufifo_st->ff_empty(port))) 584 sirfsoc_uart_stop_tx(port); 585 } 586 } 587 } 588 spin_unlock(&port->lock); 589 590 return IRQ_HANDLED; 591} 592 593static void sirfsoc_uart_rx_dma_complete_callback(void *param) 594{ 595} 596 597/* submit rx dma task into dmaengine */ 598static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port) 599{ 600 struct sirfsoc_uart_port *sirfport = to_sirfport(port); 601 struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; 602 struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; 603 sirfport->rx_io_count = 0; 604 wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, 605 rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) & 606 ~SIRFUART_IO_MODE); 607 sirfport->rx_dma_items.xmit.tail = 608 sirfport->rx_dma_items.xmit.head = 0; 609 sirfport->rx_dma_items.desc = 610 dmaengine_prep_dma_cyclic(sirfport->rx_dma_chan, 611 sirfport->rx_dma_items.dma_addr, SIRFSOC_RX_DMA_BUF_SIZE, 612 SIRFSOC_RX_DMA_BUF_SIZE / 2, 613 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); 614 if (IS_ERR_OR_NULL(sirfport->rx_dma_items.desc)) { 615 dev_err(port->dev, "DMA slave single fail\n"); 616 return; 617 } 618 sirfport->rx_dma_items.desc->callback = 619 sirfsoc_uart_rx_dma_complete_callback; 620 sirfport->rx_dma_items.desc->callback_param = sirfport; 621 sirfport->rx_dma_items.cookie = 622 dmaengine_submit(sirfport->rx_dma_items.desc); 623 dma_async_issue_pending(sirfport->rx_dma_chan); 624 if (!sirfport->is_atlas7) 625 wr_regl(port, ureg->sirfsoc_int_en_reg, 626 rd_regl(port, ureg->sirfsoc_int_en_reg) | 627 SIRFUART_RX_DMA_INT_EN(uint_en, 628 sirfport->uart_reg->uart_type)); 629 else 630 wr_regl(port, ureg->sirfsoc_int_en_reg, 631 SIRFUART_RX_DMA_INT_EN(uint_en, 632 sirfport->uart_reg->uart_type)); 633} 634 635static void sirfsoc_uart_start_rx(struct uart_port *port) 636{ 637 struct sirfsoc_uart_port *sirfport = to_sirfport(port); 638 struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; 639 struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; 640 641 sirfport->rx_io_count = 0; 642 wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET); 643 wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0); 644 wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START); 645 if (sirfport->rx_dma_chan) 646 sirfsoc_uart_start_next_rx_dma(port); 647 else { 648 if (!sirfport->is_atlas7) 649 wr_regl(port, ureg->sirfsoc_int_en_reg, 650 rd_regl(port, ureg->sirfsoc_int_en_reg) | 651 SIRFUART_RX_IO_INT_EN(uint_en, 652 sirfport->uart_reg->uart_type)); 653 else 654 wr_regl(port, ureg->sirfsoc_int_en_reg, 655 SIRFUART_RX_IO_INT_EN(uint_en, 656 sirfport->uart_reg->uart_type)); 657 } 658} 659 660static unsigned int 661sirfsoc_usp_calc_sample_div(unsigned long set_rate, 662 unsigned long ioclk_rate, unsigned long *sample_reg) 663{ 664 unsigned long min_delta = ~0UL; 665 unsigned short sample_div; 666 unsigned long ioclk_div = 0; 667 unsigned long temp_delta; 668 669 for (sample_div = SIRF_USP_MIN_SAMPLE_DIV; 670 sample_div <= SIRF_MAX_SAMPLE_DIV; sample_div++) { 671 temp_delta = ioclk_rate - 672 (ioclk_rate + (set_rate * sample_div) / 2) 673 / (set_rate * sample_div) * set_rate * sample_div; 674 675 temp_delta = (temp_delta > 0) ? temp_delta : -temp_delta; 676 if (temp_delta < min_delta) { 677 ioclk_div = (2 * ioclk_rate / 678 (set_rate * sample_div) + 1) / 2 - 1; 679 if (ioclk_div > SIRF_IOCLK_DIV_MAX) 680 continue; 681 min_delta = temp_delta; 682 *sample_reg = sample_div; 683 if (!temp_delta) 684 break; 685 } 686 } 687 return ioclk_div; 688} 689 690static unsigned int 691sirfsoc_uart_calc_sample_div(unsigned long baud_rate, 692 unsigned long ioclk_rate, unsigned long *set_baud) 693{ 694 unsigned long min_delta = ~0UL; 695 unsigned short sample_div; 696 unsigned int regv = 0; 697 unsigned long ioclk_div; 698 unsigned long baud_tmp; 699 int temp_delta; 700 701 for (sample_div = SIRF_MIN_SAMPLE_DIV; 702 sample_div <= SIRF_MAX_SAMPLE_DIV; sample_div++) { 703 ioclk_div = (ioclk_rate / (baud_rate * (sample_div + 1))) - 1; 704 if (ioclk_div > SIRF_IOCLK_DIV_MAX) 705 continue; 706 baud_tmp = ioclk_rate / ((ioclk_div + 1) * (sample_div + 1)); 707 temp_delta = baud_tmp - baud_rate; 708 temp_delta = (temp_delta > 0) ? temp_delta : -temp_delta; 709 if (temp_delta < min_delta) { 710 regv = regv & (~SIRF_IOCLK_DIV_MASK); 711 regv = regv | ioclk_div; 712 regv = regv & (~SIRF_SAMPLE_DIV_MASK); 713 regv = regv | (sample_div << SIRF_SAMPLE_DIV_SHIFT); 714 min_delta = temp_delta; 715 *set_baud = baud_tmp; 716 } 717 } 718 return regv; 719} 720 721static void sirfsoc_uart_set_termios(struct uart_port *port, 722 struct ktermios *termios, 723 struct ktermios *old) 724{ 725 struct sirfsoc_uart_port *sirfport = to_sirfport(port); 726 struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; 727 struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; 728 unsigned long config_reg = 0; 729 unsigned long baud_rate; 730 unsigned long set_baud; 731 unsigned long flags; 732 unsigned long ic; 733 unsigned int clk_div_reg = 0; 734 unsigned long txfifo_op_reg, ioclk_rate; 735 unsigned long rx_time_out; 736 int threshold_div; 737 u32 data_bit_len, stop_bit_len, len_val; 738 unsigned long sample_div_reg = 0xf; 739 ioclk_rate = port->uartclk; 740 741 switch (termios->c_cflag & CSIZE) { 742 default: 743 case CS8: 744 data_bit_len = 8; 745 config_reg |= SIRFUART_DATA_BIT_LEN_8; 746 break; 747 case CS7: 748 data_bit_len = 7; 749 config_reg |= SIRFUART_DATA_BIT_LEN_7; 750 break; 751 case CS6: 752 data_bit_len = 6; 753 config_reg |= SIRFUART_DATA_BIT_LEN_6; 754 break; 755 case CS5: 756 data_bit_len = 5; 757 config_reg |= SIRFUART_DATA_BIT_LEN_5; 758 break; 759 } 760 if (termios->c_cflag & CSTOPB) { 761 config_reg |= SIRFUART_STOP_BIT_LEN_2; 762 stop_bit_len = 2; 763 } else 764 stop_bit_len = 1; 765 766 spin_lock_irqsave(&port->lock, flags); 767 port->read_status_mask = uint_en->sirfsoc_rx_oflow_en; 768 port->ignore_status_mask = 0; 769 if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) { 770 if (termios->c_iflag & INPCK) 771 port->read_status_mask |= uint_en->sirfsoc_frm_err_en | 772 uint_en->sirfsoc_parity_err_en; 773 } else { 774 if (termios->c_iflag & INPCK) 775 port->read_status_mask |= uint_en->sirfsoc_frm_err_en; 776 } 777 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) 778 port->read_status_mask |= uint_en->sirfsoc_rxd_brk_en; 779 if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) { 780 if (termios->c_iflag & IGNPAR) 781 port->ignore_status_mask |= 782 uint_en->sirfsoc_frm_err_en | 783 uint_en->sirfsoc_parity_err_en; 784 if (termios->c_cflag & PARENB) { 785 if (termios->c_cflag & CMSPAR) { 786 if (termios->c_cflag & PARODD) 787 config_reg |= SIRFUART_STICK_BIT_MARK; 788 else 789 config_reg |= SIRFUART_STICK_BIT_SPACE; 790 } else { 791 if (termios->c_cflag & PARODD) 792 config_reg |= SIRFUART_STICK_BIT_ODD; 793 else 794 config_reg |= SIRFUART_STICK_BIT_EVEN; 795 } 796 } 797 } else { 798 if (termios->c_iflag & IGNPAR) 799 port->ignore_status_mask |= 800 uint_en->sirfsoc_frm_err_en; 801 if (termios->c_cflag & PARENB) 802 dev_warn(port->dev, 803 "USP-UART not support parity err\n"); 804 } 805 if (termios->c_iflag & IGNBRK) { 806 port->ignore_status_mask |= 807 uint_en->sirfsoc_rxd_brk_en; 808 if (termios->c_iflag & IGNPAR) 809 port->ignore_status_mask |= 810 uint_en->sirfsoc_rx_oflow_en; 811 } 812 if ((termios->c_cflag & CREAD) == 0) 813 port->ignore_status_mask |= SIRFUART_DUMMY_READ; 814 /* Hardware Flow Control Settings */ 815 if (UART_ENABLE_MS(port, termios->c_cflag)) { 816 if (!sirfport->ms_enabled) 817 sirfsoc_uart_enable_ms(port); 818 } else { 819 if (sirfport->ms_enabled) 820 sirfsoc_uart_disable_ms(port); 821 } 822 baud_rate = uart_get_baud_rate(port, termios, old, 0, 4000000); 823 if (ioclk_rate == 150000000) { 824 for (ic = 0; ic < SIRF_BAUD_RATE_SUPPORT_NR; ic++) 825 if (baud_rate == baudrate_to_regv[ic].baud_rate) 826 clk_div_reg = baudrate_to_regv[ic].reg_val; 827 } 828 set_baud = baud_rate; 829 if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) { 830 if (unlikely(clk_div_reg == 0)) 831 clk_div_reg = sirfsoc_uart_calc_sample_div(baud_rate, 832 ioclk_rate, &set_baud); 833 wr_regl(port, ureg->sirfsoc_divisor, clk_div_reg); 834 } else { 835 clk_div_reg = sirfsoc_usp_calc_sample_div(baud_rate, 836 ioclk_rate, &sample_div_reg); 837 sample_div_reg--; 838 set_baud = ((ioclk_rate / (clk_div_reg+1) - 1) / 839 (sample_div_reg + 1)); 840 /* setting usp mode 2 */ 841 len_val = ((1 << SIRFSOC_USP_MODE2_RXD_DELAY_OFFSET) | 842 (1 << SIRFSOC_USP_MODE2_TXD_DELAY_OFFSET)); 843 len_val |= ((clk_div_reg & SIRFSOC_USP_MODE2_CLK_DIVISOR_MASK) 844 << SIRFSOC_USP_MODE2_CLK_DIVISOR_OFFSET); 845 wr_regl(port, ureg->sirfsoc_mode2, len_val); 846 } 847 if (tty_termios_baud_rate(termios)) 848 tty_termios_encode_baud_rate(termios, set_baud, set_baud); 849 /* set receive timeout && data bits len */ 850 rx_time_out = SIRFSOC_UART_RX_TIMEOUT(set_baud, 20000); 851 rx_time_out = SIRFUART_RECV_TIMEOUT_VALUE(rx_time_out); 852 txfifo_op_reg = rd_regl(port, ureg->sirfsoc_tx_fifo_op); 853 wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_STOP); 854 wr_regl(port, ureg->sirfsoc_tx_fifo_op, 855 (txfifo_op_reg & ~SIRFUART_FIFO_START)); 856 if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) { 857 config_reg |= SIRFUART_UART_RECV_TIMEOUT(rx_time_out); 858 wr_regl(port, ureg->sirfsoc_line_ctrl, config_reg); 859 } else { 860 /*tx frame ctrl*/ 861 len_val = (data_bit_len - 1) << SIRFSOC_USP_TX_DATA_LEN_OFFSET; 862 len_val |= (data_bit_len + 1 + stop_bit_len - 1) << 863 SIRFSOC_USP_TX_FRAME_LEN_OFFSET; 864 len_val |= ((data_bit_len - 1) << 865 SIRFSOC_USP_TX_SHIFTER_LEN_OFFSET); 866 len_val |= (((clk_div_reg & 0xc00) >> 10) << 867 SIRFSOC_USP_TX_CLK_DIVISOR_OFFSET); 868 wr_regl(port, ureg->sirfsoc_tx_frame_ctrl, len_val); 869 /*rx frame ctrl*/ 870 len_val = (data_bit_len - 1) << SIRFSOC_USP_RX_DATA_LEN_OFFSET; 871 len_val |= (data_bit_len + 1 + stop_bit_len - 1) << 872 SIRFSOC_USP_RX_FRAME_LEN_OFFSET; 873 len_val |= (data_bit_len - 1) << 874 SIRFSOC_USP_RX_SHIFTER_LEN_OFFSET; 875 len_val |= (((clk_div_reg & 0xf000) >> 12) << 876 SIRFSOC_USP_RX_CLK_DIVISOR_OFFSET); 877 wr_regl(port, ureg->sirfsoc_rx_frame_ctrl, len_val); 878 /*async param*/ 879 wr_regl(port, ureg->sirfsoc_async_param_reg, 880 (SIRFUART_USP_RECV_TIMEOUT(rx_time_out)) | 881 (sample_div_reg & SIRFSOC_USP_ASYNC_DIV2_MASK) << 882 SIRFSOC_USP_ASYNC_DIV2_OFFSET); 883 } 884 if (sirfport->tx_dma_chan) 885 wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, SIRFUART_DMA_MODE); 886 else 887 wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, SIRFUART_IO_MODE); 888 if (sirfport->rx_dma_chan) 889 wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_DMA_MODE); 890 else 891 wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_IO_MODE); 892 sirfport->rx_period_time = 20000000; 893 /* Reset Rx/Tx FIFO Threshold level for proper baudrate */ 894 if (set_baud < 1000000) 895 threshold_div = 1; 896 else 897 threshold_div = 2; 898 wr_regl(port, ureg->sirfsoc_tx_fifo_ctrl, 899 SIRFUART_FIFO_THD(port) / threshold_div); 900 wr_regl(port, ureg->sirfsoc_rx_fifo_ctrl, 901 SIRFUART_FIFO_THD(port) / threshold_div); 902 txfifo_op_reg |= SIRFUART_FIFO_START; 903 wr_regl(port, ureg->sirfsoc_tx_fifo_op, txfifo_op_reg); 904 uart_update_timeout(port, termios->c_cflag, set_baud); 905 sirfsoc_uart_start_rx(port); 906 wr_regl(port, ureg->sirfsoc_tx_rx_en, SIRFUART_TX_EN | SIRFUART_RX_EN); 907 spin_unlock_irqrestore(&port->lock, flags); 908} 909 910static void sirfsoc_uart_pm(struct uart_port *port, unsigned int state, 911 unsigned int oldstate) 912{ 913 struct sirfsoc_uart_port *sirfport = to_sirfport(port); 914 if (!state) 915 clk_prepare_enable(sirfport->clk); 916 else 917 clk_disable_unprepare(sirfport->clk); 918} 919 920static int sirfsoc_uart_startup(struct uart_port *port) 921{ 922 struct sirfsoc_uart_port *sirfport = to_sirfport(port); 923 struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; 924 unsigned int index = port->line; 925 int ret; 926 irq_modify_status(port->irq, IRQ_NOREQUEST, IRQ_NOAUTOEN); 927 ret = request_irq(port->irq, 928 sirfsoc_uart_isr, 929 0, 930 SIRFUART_PORT_NAME, 931 sirfport); 932 if (ret != 0) { 933 dev_err(port->dev, "UART%d request IRQ line (%d) failed.\n", 934 index, port->irq); 935 goto irq_err; 936 } 937 /* initial hardware settings */ 938 wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, 939 rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl) | 940 SIRFUART_IO_MODE); 941 wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, 942 rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) | 943 SIRFUART_IO_MODE); 944 wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, 945 rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) & 946 ~SIRFUART_RX_DMA_FLUSH); 947 wr_regl(port, ureg->sirfsoc_tx_dma_io_len, 0); 948 wr_regl(port, ureg->sirfsoc_rx_dma_io_len, 0); 949 wr_regl(port, ureg->sirfsoc_tx_rx_en, SIRFUART_RX_EN | SIRFUART_TX_EN); 950 if (sirfport->uart_reg->uart_type == SIRF_USP_UART) 951 wr_regl(port, ureg->sirfsoc_mode1, 952 SIRFSOC_USP_ENDIAN_CTRL_LSBF | 953 SIRFSOC_USP_EN); 954 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_RESET); 955 wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET); 956 wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0); 957 wr_regl(port, ureg->sirfsoc_tx_fifo_ctrl, SIRFUART_FIFO_THD(port)); 958 wr_regl(port, ureg->sirfsoc_rx_fifo_ctrl, SIRFUART_FIFO_THD(port)); 959 if (sirfport->rx_dma_chan) 960 wr_regl(port, ureg->sirfsoc_rx_fifo_level_chk, 961 SIRFUART_RX_FIFO_CHK_SC(port->line, 0x4) | 962 SIRFUART_RX_FIFO_CHK_LC(port->line, 0xe) | 963 SIRFUART_RX_FIFO_CHK_HC(port->line, 0x1b)); 964 if (sirfport->tx_dma_chan) { 965 sirfport->tx_dma_state = TX_DMA_IDLE; 966 wr_regl(port, ureg->sirfsoc_tx_fifo_level_chk, 967 SIRFUART_TX_FIFO_CHK_SC(port->line, 0x1b) | 968 SIRFUART_TX_FIFO_CHK_LC(port->line, 0xe) | 969 SIRFUART_TX_FIFO_CHK_HC(port->line, 0x4)); 970 } 971 sirfport->ms_enabled = false; 972 if (sirfport->uart_reg->uart_type == SIRF_USP_UART && 973 sirfport->hw_flow_ctrl) { 974 irq_modify_status(gpio_to_irq(sirfport->cts_gpio), 975 IRQ_NOREQUEST, IRQ_NOAUTOEN); 976 ret = request_irq(gpio_to_irq(sirfport->cts_gpio), 977 sirfsoc_uart_usp_cts_handler, IRQF_TRIGGER_FALLING | 978 IRQF_TRIGGER_RISING, "usp_cts_irq", sirfport); 979 if (ret != 0) { 980 dev_err(port->dev, "UART-USP:request gpio irq fail\n"); 981 goto init_rx_err; 982 } 983 } 984 enable_irq(port->irq); 985 if (sirfport->rx_dma_chan && !sirfport->is_hrt_enabled) { 986 sirfport->is_hrt_enabled = true; 987 sirfport->rx_period_time = 20000000; 988 sirfport->rx_dma_items.xmit.tail = 989 sirfport->rx_dma_items.xmit.head = 0; 990 hrtimer_start(&sirfport->hrt, 991 ns_to_ktime(sirfport->rx_period_time), 992 HRTIMER_MODE_REL); 993 } 994 995 return 0; 996init_rx_err: 997 free_irq(port->irq, sirfport); 998irq_err: 999 return ret; 1000} 1001 1002static void sirfsoc_uart_shutdown(struct uart_port *port) 1003{ 1004 struct sirfsoc_uart_port *sirfport = to_sirfport(port); 1005 struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; 1006 if (!sirfport->is_atlas7) 1007 wr_regl(port, ureg->sirfsoc_int_en_reg, 0); 1008 else 1009 wr_regl(port, ureg->sirfsoc_int_en_clr_reg, ~0UL); 1010 1011 free_irq(port->irq, sirfport); 1012 if (sirfport->ms_enabled) 1013 sirfsoc_uart_disable_ms(port); 1014 if (sirfport->uart_reg->uart_type == SIRF_USP_UART && 1015 sirfport->hw_flow_ctrl) { 1016 gpio_set_value(sirfport->rts_gpio, 1); 1017 free_irq(gpio_to_irq(sirfport->cts_gpio), sirfport); 1018 } 1019 if (sirfport->tx_dma_chan) 1020 sirfport->tx_dma_state = TX_DMA_IDLE; 1021 if (sirfport->rx_dma_chan && sirfport->is_hrt_enabled) { 1022 while ((rd_regl(port, ureg->sirfsoc_rx_fifo_status) & 1023 SIRFUART_RX_FIFO_MASK) > 0) 1024 ; 1025 sirfport->is_hrt_enabled = false; 1026 hrtimer_cancel(&sirfport->hrt); 1027 } 1028} 1029 1030static const char *sirfsoc_uart_type(struct uart_port *port) 1031{ 1032 return port->type == SIRFSOC_PORT_TYPE ? SIRFUART_PORT_NAME : NULL; 1033} 1034 1035static int sirfsoc_uart_request_port(struct uart_port *port) 1036{ 1037 struct sirfsoc_uart_port *sirfport = to_sirfport(port); 1038 struct sirfsoc_uart_param *uart_param = &sirfport->uart_reg->uart_param; 1039 void *ret; 1040 ret = request_mem_region(port->mapbase, 1041 SIRFUART_MAP_SIZE, uart_param->port_name); 1042 return ret ? 0 : -EBUSY; 1043} 1044 1045static void sirfsoc_uart_release_port(struct uart_port *port) 1046{ 1047 release_mem_region(port->mapbase, SIRFUART_MAP_SIZE); 1048} 1049 1050static void sirfsoc_uart_config_port(struct uart_port *port, int flags) 1051{ 1052 if (flags & UART_CONFIG_TYPE) { 1053 port->type = SIRFSOC_PORT_TYPE; 1054 sirfsoc_uart_request_port(port); 1055 } 1056} 1057 1058static struct uart_ops sirfsoc_uart_ops = { 1059 .tx_empty = sirfsoc_uart_tx_empty, 1060 .get_mctrl = sirfsoc_uart_get_mctrl, 1061 .set_mctrl = sirfsoc_uart_set_mctrl, 1062 .stop_tx = sirfsoc_uart_stop_tx, 1063 .start_tx = sirfsoc_uart_start_tx, 1064 .stop_rx = sirfsoc_uart_stop_rx, 1065 .enable_ms = sirfsoc_uart_enable_ms, 1066 .break_ctl = sirfsoc_uart_break_ctl, 1067 .startup = sirfsoc_uart_startup, 1068 .shutdown = sirfsoc_uart_shutdown, 1069 .set_termios = sirfsoc_uart_set_termios, 1070 .pm = sirfsoc_uart_pm, 1071 .type = sirfsoc_uart_type, 1072 .release_port = sirfsoc_uart_release_port, 1073 .request_port = sirfsoc_uart_request_port, 1074 .config_port = sirfsoc_uart_config_port, 1075}; 1076 1077#ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE 1078static int __init 1079sirfsoc_uart_console_setup(struct console *co, char *options) 1080{ 1081 unsigned int baud = 115200; 1082 unsigned int bits = 8; 1083 unsigned int parity = 'n'; 1084 unsigned int flow = 'n'; 1085 struct sirfsoc_uart_port *sirfport; 1086 struct sirfsoc_register *ureg; 1087 if (co->index < 0 || co->index >= SIRFSOC_UART_NR) 1088 co->index = 1; 1089 sirfport = sirf_ports[co->index]; 1090 if (!sirfport) 1091 return -ENODEV; 1092 ureg = &sirfport->uart_reg->uart_reg; 1093 if (!sirfport->port.mapbase) 1094 return -ENODEV; 1095 1096 /* enable usp in mode1 register */ 1097 if (sirfport->uart_reg->uart_type == SIRF_USP_UART) 1098 wr_regl(&sirfport->port, ureg->sirfsoc_mode1, SIRFSOC_USP_EN | 1099 SIRFSOC_USP_ENDIAN_CTRL_LSBF); 1100 if (options) 1101 uart_parse_options(options, &baud, &parity, &bits, &flow); 1102 sirfport->port.cons = co; 1103 1104 /* default console tx/rx transfer using io mode */ 1105 sirfport->rx_dma_chan = NULL; 1106 sirfport->tx_dma_chan = NULL; 1107 return uart_set_options(&sirfport->port, co, baud, parity, bits, flow); 1108} 1109 1110static void sirfsoc_uart_console_putchar(struct uart_port *port, int ch) 1111{ 1112 struct sirfsoc_uart_port *sirfport = to_sirfport(port); 1113 struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; 1114 struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status; 1115 while (rd_regl(port, ureg->sirfsoc_tx_fifo_status) & 1116 ufifo_st->ff_full(port)) 1117 cpu_relax(); 1118 wr_regl(port, ureg->sirfsoc_tx_fifo_data, ch); 1119} 1120 1121static void sirfsoc_uart_console_write(struct console *co, const char *s, 1122 unsigned int count) 1123{ 1124 struct sirfsoc_uart_port *sirfport = sirf_ports[co->index]; 1125 1126 uart_console_write(&sirfport->port, s, count, 1127 sirfsoc_uart_console_putchar); 1128} 1129 1130static struct console sirfsoc_uart_console = { 1131 .name = SIRFSOC_UART_NAME, 1132 .device = uart_console_device, 1133 .flags = CON_PRINTBUFFER, 1134 .index = -1, 1135 .write = sirfsoc_uart_console_write, 1136 .setup = sirfsoc_uart_console_setup, 1137 .data = &sirfsoc_uart_drv, 1138}; 1139 1140static int __init sirfsoc_uart_console_init(void) 1141{ 1142 register_console(&sirfsoc_uart_console); 1143 return 0; 1144} 1145console_initcall(sirfsoc_uart_console_init); 1146#endif 1147 1148static struct uart_driver sirfsoc_uart_drv = { 1149 .owner = THIS_MODULE, 1150 .driver_name = SIRFUART_PORT_NAME, 1151 .nr = SIRFSOC_UART_NR, 1152 .dev_name = SIRFSOC_UART_NAME, 1153 .major = SIRFSOC_UART_MAJOR, 1154 .minor = SIRFSOC_UART_MINOR, 1155#ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE 1156 .cons = &sirfsoc_uart_console, 1157#else 1158 .cons = NULL, 1159#endif 1160}; 1161 1162static enum hrtimer_restart 1163 sirfsoc_uart_rx_dma_hrtimer_callback(struct hrtimer *hrt) 1164{ 1165 struct sirfsoc_uart_port *sirfport; 1166 struct uart_port *port; 1167 int count, inserted; 1168 struct dma_tx_state tx_state; 1169 struct tty_struct *tty; 1170 struct sirfsoc_register *ureg; 1171 struct circ_buf *xmit; 1172 1173 sirfport = container_of(hrt, struct sirfsoc_uart_port, hrt); 1174 port = &sirfport->port; 1175 inserted = 0; 1176 tty = port->state->port.tty; 1177 ureg = &sirfport->uart_reg->uart_reg; 1178 xmit = &sirfport->rx_dma_items.xmit; 1179 dmaengine_tx_status(sirfport->rx_dma_chan, 1180 sirfport->rx_dma_items.cookie, &tx_state); 1181 xmit->head = SIRFSOC_RX_DMA_BUF_SIZE - tx_state.residue; 1182 count = CIRC_CNT_TO_END(xmit->head, xmit->tail, 1183 SIRFSOC_RX_DMA_BUF_SIZE); 1184 while (count > 0) { 1185 inserted = tty_insert_flip_string(tty->port, 1186 (const unsigned char *)&xmit->buf[xmit->tail], count); 1187 if (!inserted) 1188 goto next_hrt; 1189 port->icount.rx += inserted; 1190 xmit->tail = (xmit->tail + inserted) & 1191 (SIRFSOC_RX_DMA_BUF_SIZE - 1); 1192 count = CIRC_CNT_TO_END(xmit->head, xmit->tail, 1193 SIRFSOC_RX_DMA_BUF_SIZE); 1194 tty_flip_buffer_push(tty->port); 1195 } 1196 /* 1197 * if RX DMA buffer data have all push into tty buffer, and there is 1198 * only little data(less than a dma transfer unit) left in rxfifo, 1199 * fetch it out in pio mode and switch back to dma immediately 1200 */ 1201 if (!inserted && !count && 1202 ((rd_regl(port, ureg->sirfsoc_rx_fifo_status) & 1203 SIRFUART_RX_FIFO_MASK) > 0)) { 1204 /* switch to pio mode */ 1205 wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, 1206 rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) | 1207 SIRFUART_IO_MODE); 1208 while ((rd_regl(port, ureg->sirfsoc_rx_fifo_status) & 1209 SIRFUART_RX_FIFO_MASK) > 0) { 1210 if (sirfsoc_uart_pio_rx_chars(port, 16) > 0) 1211 tty_flip_buffer_push(tty->port); 1212 } 1213 wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET); 1214 wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0); 1215 wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START); 1216 /* switch back to dma mode */ 1217 wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, 1218 rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) & 1219 ~SIRFUART_IO_MODE); 1220 } 1221next_hrt: 1222 hrtimer_forward_now(hrt, ns_to_ktime(sirfport->rx_period_time)); 1223 return HRTIMER_RESTART; 1224} 1225 1226static struct of_device_id sirfsoc_uart_ids[] = { 1227 { .compatible = "sirf,prima2-uart", .data = &sirfsoc_uart,}, 1228 { .compatible = "sirf,atlas7-uart", .data = &sirfsoc_uart}, 1229 { .compatible = "sirf,prima2-usp-uart", .data = &sirfsoc_usp}, 1230 { .compatible = "sirf,atlas7-usp-uart", .data = &sirfsoc_usp}, 1231 {} 1232}; 1233MODULE_DEVICE_TABLE(of, sirfsoc_uart_ids); 1234 1235static int sirfsoc_uart_probe(struct platform_device *pdev) 1236{ 1237 struct sirfsoc_uart_port *sirfport; 1238 struct uart_port *port; 1239 struct resource *res; 1240 int ret; 1241 struct dma_slave_config slv_cfg = { 1242 .src_maxburst = 2, 1243 }; 1244 struct dma_slave_config tx_slv_cfg = { 1245 .dst_maxburst = 2, 1246 }; 1247 const struct of_device_id *match; 1248 1249 match = of_match_node(sirfsoc_uart_ids, pdev->dev.of_node); 1250 sirfport = devm_kzalloc(&pdev->dev, sizeof(*sirfport), GFP_KERNEL); 1251 if (!sirfport) { 1252 ret = -ENOMEM; 1253 goto err; 1254 } 1255 sirfport->port.line = of_alias_get_id(pdev->dev.of_node, "serial"); 1256 sirf_ports[sirfport->port.line] = sirfport; 1257 sirfport->port.iotype = UPIO_MEM; 1258 sirfport->port.flags = UPF_BOOT_AUTOCONF; 1259 port = &sirfport->port; 1260 port->dev = &pdev->dev; 1261 port->private_data = sirfport; 1262 sirfport->uart_reg = (struct sirfsoc_uart_register *)match->data; 1263 1264 sirfport->hw_flow_ctrl = of_property_read_bool(pdev->dev.of_node, 1265 "sirf,uart-has-rtscts"); 1266 if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-uart") || 1267 of_device_is_compatible(pdev->dev.of_node, "sirf,atlas7-uart")) 1268 sirfport->uart_reg->uart_type = SIRF_REAL_UART; 1269 if (of_device_is_compatible(pdev->dev.of_node, 1270 "sirf,prima2-usp-uart") || of_device_is_compatible( 1271 pdev->dev.of_node, "sirf,atlas7-usp-uart")) { 1272 sirfport->uart_reg->uart_type = SIRF_USP_UART; 1273 if (!sirfport->hw_flow_ctrl) 1274 goto usp_no_flow_control; 1275 if (of_find_property(pdev->dev.of_node, "cts-gpios", NULL)) 1276 sirfport->cts_gpio = of_get_named_gpio( 1277 pdev->dev.of_node, "cts-gpios", 0); 1278 else 1279 sirfport->cts_gpio = -1; 1280 if (of_find_property(pdev->dev.of_node, "rts-gpios", NULL)) 1281 sirfport->rts_gpio = of_get_named_gpio( 1282 pdev->dev.of_node, "rts-gpios", 0); 1283 else 1284 sirfport->rts_gpio = -1; 1285 1286 if ((!gpio_is_valid(sirfport->cts_gpio) || 1287 !gpio_is_valid(sirfport->rts_gpio))) { 1288 ret = -EINVAL; 1289 dev_err(&pdev->dev, 1290 "Usp flow control must have cts and rts gpio"); 1291 goto err; 1292 } 1293 ret = devm_gpio_request(&pdev->dev, sirfport->cts_gpio, 1294 "usp-cts-gpio"); 1295 if (ret) { 1296 dev_err(&pdev->dev, "Unable request cts gpio"); 1297 goto err; 1298 } 1299 gpio_direction_input(sirfport->cts_gpio); 1300 ret = devm_gpio_request(&pdev->dev, sirfport->rts_gpio, 1301 "usp-rts-gpio"); 1302 if (ret) { 1303 dev_err(&pdev->dev, "Unable request rts gpio"); 1304 goto err; 1305 } 1306 gpio_direction_output(sirfport->rts_gpio, 1); 1307 } 1308usp_no_flow_control: 1309 if (of_device_is_compatible(pdev->dev.of_node, "sirf,atlas7-uart") || 1310 of_device_is_compatible(pdev->dev.of_node, "sirf,atlas7-usp-uart")) 1311 sirfport->is_atlas7 = true; 1312 1313 if (of_property_read_u32(pdev->dev.of_node, 1314 "fifosize", 1315 &port->fifosize)) { 1316 dev_err(&pdev->dev, 1317 "Unable to find fifosize in uart node.\n"); 1318 ret = -EFAULT; 1319 goto err; 1320 } 1321 1322 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1323 if (res == NULL) { 1324 dev_err(&pdev->dev, "Insufficient resources.\n"); 1325 ret = -EFAULT; 1326 goto err; 1327 } 1328 port->mapbase = res->start; 1329 port->membase = devm_ioremap(&pdev->dev, 1330 res->start, resource_size(res)); 1331 if (!port->membase) { 1332 dev_err(&pdev->dev, "Cannot remap resource.\n"); 1333 ret = -ENOMEM; 1334 goto err; 1335 } 1336 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1337 if (res == NULL) { 1338 dev_err(&pdev->dev, "Insufficient resources.\n"); 1339 ret = -EFAULT; 1340 goto err; 1341 } 1342 port->irq = res->start; 1343 1344 sirfport->clk = devm_clk_get(&pdev->dev, NULL); 1345 if (IS_ERR(sirfport->clk)) { 1346 ret = PTR_ERR(sirfport->clk); 1347 goto err; 1348 } 1349 port->uartclk = clk_get_rate(sirfport->clk); 1350 1351 port->ops = &sirfsoc_uart_ops; 1352 spin_lock_init(&port->lock); 1353 1354 platform_set_drvdata(pdev, sirfport); 1355 ret = uart_add_one_port(&sirfsoc_uart_drv, port); 1356 if (ret != 0) { 1357 dev_err(&pdev->dev, "Cannot add UART port(%d).\n", pdev->id); 1358 goto err; 1359 } 1360 1361 sirfport->rx_dma_chan = dma_request_slave_channel(port->dev, "rx"); 1362 sirfport->rx_dma_items.xmit.buf = 1363 dma_alloc_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE, 1364 &sirfport->rx_dma_items.dma_addr, GFP_KERNEL); 1365 if (!sirfport->rx_dma_items.xmit.buf) { 1366 dev_err(port->dev, "Uart alloc bufa failed\n"); 1367 ret = -ENOMEM; 1368 goto alloc_coherent_err; 1369 } 1370 sirfport->rx_dma_items.xmit.head = 1371 sirfport->rx_dma_items.xmit.tail = 0; 1372 if (sirfport->rx_dma_chan) 1373 dmaengine_slave_config(sirfport->rx_dma_chan, &slv_cfg); 1374 sirfport->tx_dma_chan = dma_request_slave_channel(port->dev, "tx"); 1375 if (sirfport->tx_dma_chan) 1376 dmaengine_slave_config(sirfport->tx_dma_chan, &tx_slv_cfg); 1377 if (sirfport->rx_dma_chan) { 1378 hrtimer_init(&sirfport->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1379 sirfport->hrt.function = sirfsoc_uart_rx_dma_hrtimer_callback; 1380 sirfport->is_hrt_enabled = false; 1381 } 1382 1383 return 0; 1384alloc_coherent_err: 1385 dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE, 1386 sirfport->rx_dma_items.xmit.buf, 1387 sirfport->rx_dma_items.dma_addr); 1388 dma_release_channel(sirfport->rx_dma_chan); 1389err: 1390 return ret; 1391} 1392 1393static int sirfsoc_uart_remove(struct platform_device *pdev) 1394{ 1395 struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev); 1396 struct uart_port *port = &sirfport->port; 1397 uart_remove_one_port(&sirfsoc_uart_drv, port); 1398 if (sirfport->rx_dma_chan) { 1399 dmaengine_terminate_all(sirfport->rx_dma_chan); 1400 dma_release_channel(sirfport->rx_dma_chan); 1401 dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE, 1402 sirfport->rx_dma_items.xmit.buf, 1403 sirfport->rx_dma_items.dma_addr); 1404 } 1405 if (sirfport->tx_dma_chan) { 1406 dmaengine_terminate_all(sirfport->tx_dma_chan); 1407 dma_release_channel(sirfport->tx_dma_chan); 1408 } 1409 return 0; 1410} 1411 1412#ifdef CONFIG_PM_SLEEP 1413static int 1414sirfsoc_uart_suspend(struct device *pdev) 1415{ 1416 struct sirfsoc_uart_port *sirfport = dev_get_drvdata(pdev); 1417 struct uart_port *port = &sirfport->port; 1418 uart_suspend_port(&sirfsoc_uart_drv, port); 1419 return 0; 1420} 1421 1422static int sirfsoc_uart_resume(struct device *pdev) 1423{ 1424 struct sirfsoc_uart_port *sirfport = dev_get_drvdata(pdev); 1425 struct uart_port *port = &sirfport->port; 1426 uart_resume_port(&sirfsoc_uart_drv, port); 1427 return 0; 1428} 1429#endif 1430 1431static const struct dev_pm_ops sirfsoc_uart_pm_ops = { 1432 SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_uart_suspend, sirfsoc_uart_resume) 1433}; 1434 1435static struct platform_driver sirfsoc_uart_driver = { 1436 .probe = sirfsoc_uart_probe, 1437 .remove = sirfsoc_uart_remove, 1438 .driver = { 1439 .name = SIRFUART_PORT_NAME, 1440 .of_match_table = sirfsoc_uart_ids, 1441 .pm = &sirfsoc_uart_pm_ops, 1442 }, 1443}; 1444 1445static int __init sirfsoc_uart_init(void) 1446{ 1447 int ret = 0; 1448 1449 ret = uart_register_driver(&sirfsoc_uart_drv); 1450 if (ret) 1451 goto out; 1452 1453 ret = platform_driver_register(&sirfsoc_uart_driver); 1454 if (ret) 1455 uart_unregister_driver(&sirfsoc_uart_drv); 1456out: 1457 return ret; 1458} 1459module_init(sirfsoc_uart_init); 1460 1461static void __exit sirfsoc_uart_exit(void) 1462{ 1463 platform_driver_unregister(&sirfsoc_uart_driver); 1464 uart_unregister_driver(&sirfsoc_uart_drv); 1465} 1466module_exit(sirfsoc_uart_exit); 1467 1468MODULE_LICENSE("GPL v2"); 1469MODULE_AUTHOR("Bin Shi <Bin.Shi@csr.com>, Rong Wang<Rong.Wang@csr.com>"); 1470MODULE_DESCRIPTION("CSR SiRFprimaII Uart Driver");