at v2.6.27-rc2 2157 lines 57 kB view raw
1/* 2 * Generic driver for the MPSC (UART mode) on Marvell parts (e.g., GT64240, 3 * GT64260, MV64340, MV64360, GT96100, ... ). 4 * 5 * Author: Mark A. Greer <mgreer@mvista.com> 6 * 7 * Based on an old MPSC driver that was in the linuxppc tree. It appears to 8 * have been created by Chris Zankel (formerly of MontaVista) but there 9 * is no proper Copyright so I'm not sure. Apparently, parts were also 10 * taken from PPCBoot (now U-Boot). Also based on drivers/serial/8250.c 11 * by Russell King. 12 * 13 * 2004 (c) MontaVista, Software, Inc. This file is licensed under 14 * the terms of the GNU General Public License version 2. This program 15 * is licensed "as is" without any warranty of any kind, whether express 16 * or implied. 17 */ 18/* 19 * The MPSC interface is much like a typical network controller's interface. 20 * That is, you set up separate rings of descriptors for transmitting and 21 * receiving data. There is also a pool of buffers with (one buffer per 22 * descriptor) that incoming data are dma'd into or outgoing data are dma'd 23 * out of. 24 * 25 * The MPSC requires two other controllers to be able to work. The Baud Rate 26 * Generator (BRG) provides a clock at programmable frequencies which determines 27 * the baud rate. The Serial DMA Controller (SDMA) takes incoming data from the 28 * MPSC and DMA's it into memory or DMA's outgoing data and passes it to the 29 * MPSC. It is actually the SDMA interrupt that the driver uses to keep the 30 * transmit and receive "engines" going (i.e., indicate data has been 31 * transmitted or received). 32 * 33 * NOTES: 34 * 35 * 1) Some chips have an erratum where several regs cannot be 36 * read. To work around that, we keep a local copy of those regs in 37 * 'mpsc_port_info'. 38 * 39 * 2) Some chips have an erratum where the ctlr will hang when the SDMA ctlr 40 * accesses system mem with coherency enabled. For that reason, the driver 41 * assumes that coherency for that ctlr has been disabled. This means 42 * that when in a cache coherent system, the driver has to manually manage 43 * the data cache on the areas that it touches because the dma_* macro are 44 * basically no-ops. 45 * 46 * 3) There is an erratum (on PPC) where you can't use the instruction to do 47 * a DMA_TO_DEVICE/cache clean so DMA_BIDIRECTIONAL/flushes are used in places 48 * where a DMA_TO_DEVICE/clean would have [otherwise] sufficed. 49 * 50 * 4) AFAICT, hardware flow control isn't supported by the controller --MAG. 51 */ 52 53 54#if defined(CONFIG_SERIAL_MPSC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) 55#define SUPPORT_SYSRQ 56#endif 57 58#include <linux/module.h> 59#include <linux/moduleparam.h> 60#include <linux/tty.h> 61#include <linux/tty_flip.h> 62#include <linux/ioport.h> 63#include <linux/init.h> 64#include <linux/console.h> 65#include <linux/sysrq.h> 66#include <linux/serial.h> 67#include <linux/serial_core.h> 68#include <linux/delay.h> 69#include <linux/device.h> 70#include <linux/dma-mapping.h> 71#include <linux/mv643xx.h> 72#include <linux/platform_device.h> 73 74#include <asm/io.h> 75#include <asm/irq.h> 76 77#define MPSC_NUM_CTLRS 2 78 79/* 80 * Descriptors and buffers must be cache line aligned. 81 * Buffers lengths must be multiple of cache line size. 82 * Number of Tx & Rx descriptors must be powers of 2. 83 */ 84#define MPSC_RXR_ENTRIES 32 85#define MPSC_RXRE_SIZE dma_get_cache_alignment() 86#define MPSC_RXR_SIZE (MPSC_RXR_ENTRIES * MPSC_RXRE_SIZE) 87#define MPSC_RXBE_SIZE dma_get_cache_alignment() 88#define MPSC_RXB_SIZE (MPSC_RXR_ENTRIES * MPSC_RXBE_SIZE) 89 90#define MPSC_TXR_ENTRIES 32 91#define MPSC_TXRE_SIZE dma_get_cache_alignment() 92#define MPSC_TXR_SIZE (MPSC_TXR_ENTRIES * MPSC_TXRE_SIZE) 93#define MPSC_TXBE_SIZE dma_get_cache_alignment() 94#define MPSC_TXB_SIZE (MPSC_TXR_ENTRIES * MPSC_TXBE_SIZE) 95 96#define MPSC_DMA_ALLOC_SIZE (MPSC_RXR_SIZE + MPSC_RXB_SIZE + MPSC_TXR_SIZE \ 97 + MPSC_TXB_SIZE + dma_get_cache_alignment() /* for alignment */) 98 99/* Rx and Tx Ring entry descriptors -- assume entry size is <= cacheline size */ 100struct mpsc_rx_desc { 101 u16 bufsize; 102 u16 bytecnt; 103 u32 cmdstat; 104 u32 link; 105 u32 buf_ptr; 106} __attribute((packed)); 107 108struct mpsc_tx_desc { 109 u16 bytecnt; 110 u16 shadow; 111 u32 cmdstat; 112 u32 link; 113 u32 buf_ptr; 114} __attribute((packed)); 115 116/* 117 * Some regs that have the erratum that you can't read them are are shared 118 * between the two MPSC controllers. This struct contains those shared regs. 119 */ 120struct mpsc_shared_regs { 121 phys_addr_t mpsc_routing_base_p; 122 phys_addr_t sdma_intr_base_p; 123 124 void __iomem *mpsc_routing_base; 125 void __iomem *sdma_intr_base; 126 127 u32 MPSC_MRR_m; 128 u32 MPSC_RCRR_m; 129 u32 MPSC_TCRR_m; 130 u32 SDMA_INTR_CAUSE_m; 131 u32 SDMA_INTR_MASK_m; 132}; 133 134/* The main driver data structure */ 135struct mpsc_port_info { 136 struct uart_port port; /* Overlay uart_port structure */ 137 138 /* Internal driver state for this ctlr */ 139 u8 ready; 140 u8 rcv_data; 141 tcflag_t c_iflag; /* save termios->c_iflag */ 142 tcflag_t c_cflag; /* save termios->c_cflag */ 143 144 /* Info passed in from platform */ 145 u8 mirror_regs; /* Need to mirror regs? */ 146 u8 cache_mgmt; /* Need manual cache mgmt? */ 147 u8 brg_can_tune; /* BRG has baud tuning? */ 148 u32 brg_clk_src; 149 u16 mpsc_max_idle; 150 int default_baud; 151 int default_bits; 152 int default_parity; 153 int default_flow; 154 155 /* Physical addresses of various blocks of registers (from platform) */ 156 phys_addr_t mpsc_base_p; 157 phys_addr_t sdma_base_p; 158 phys_addr_t brg_base_p; 159 160 /* Virtual addresses of various blocks of registers (from platform) */ 161 void __iomem *mpsc_base; 162 void __iomem *sdma_base; 163 void __iomem *brg_base; 164 165 /* Descriptor ring and buffer allocations */ 166 void *dma_region; 167 dma_addr_t dma_region_p; 168 169 dma_addr_t rxr; /* Rx descriptor ring */ 170 dma_addr_t rxr_p; /* Phys addr of rxr */ 171 u8 *rxb; /* Rx Ring I/O buf */ 172 u8 *rxb_p; /* Phys addr of rxb */ 173 u32 rxr_posn; /* First desc w/ Rx data */ 174 175 dma_addr_t txr; /* Tx descriptor ring */ 176 dma_addr_t txr_p; /* Phys addr of txr */ 177 u8 *txb; /* Tx Ring I/O buf */ 178 u8 *txb_p; /* Phys addr of txb */ 179 int txr_head; /* Where new data goes */ 180 int txr_tail; /* Where sent data comes off */ 181 spinlock_t tx_lock; /* transmit lock */ 182 183 /* Mirrored values of regs we can't read (if 'mirror_regs' set) */ 184 u32 MPSC_MPCR_m; 185 u32 MPSC_CHR_1_m; 186 u32 MPSC_CHR_2_m; 187 u32 MPSC_CHR_10_m; 188 u32 BRG_BCR_m; 189 struct mpsc_shared_regs *shared_regs; 190}; 191 192/* Hooks to platform-specific code */ 193int mpsc_platform_register_driver(void); 194void mpsc_platform_unregister_driver(void); 195 196/* Hooks back in to mpsc common to be called by platform-specific code */ 197struct mpsc_port_info *mpsc_device_probe(int index); 198struct mpsc_port_info *mpsc_device_remove(int index); 199 200/* Main MPSC Configuration Register Offsets */ 201#define MPSC_MMCRL 0x0000 202#define MPSC_MMCRH 0x0004 203#define MPSC_MPCR 0x0008 204#define MPSC_CHR_1 0x000c 205#define MPSC_CHR_2 0x0010 206#define MPSC_CHR_3 0x0014 207#define MPSC_CHR_4 0x0018 208#define MPSC_CHR_5 0x001c 209#define MPSC_CHR_6 0x0020 210#define MPSC_CHR_7 0x0024 211#define MPSC_CHR_8 0x0028 212#define MPSC_CHR_9 0x002c 213#define MPSC_CHR_10 0x0030 214#define MPSC_CHR_11 0x0034 215 216#define MPSC_MPCR_FRZ (1 << 9) 217#define MPSC_MPCR_CL_5 0 218#define MPSC_MPCR_CL_6 1 219#define MPSC_MPCR_CL_7 2 220#define MPSC_MPCR_CL_8 3 221#define MPSC_MPCR_SBL_1 0 222#define MPSC_MPCR_SBL_2 1 223 224#define MPSC_CHR_2_TEV (1<<1) 225#define MPSC_CHR_2_TA (1<<7) 226#define MPSC_CHR_2_TTCS (1<<9) 227#define MPSC_CHR_2_REV (1<<17) 228#define MPSC_CHR_2_RA (1<<23) 229#define MPSC_CHR_2_CRD (1<<25) 230#define MPSC_CHR_2_EH (1<<31) 231#define MPSC_CHR_2_PAR_ODD 0 232#define MPSC_CHR_2_PAR_SPACE 1 233#define MPSC_CHR_2_PAR_EVEN 2 234#define MPSC_CHR_2_PAR_MARK 3 235 236/* MPSC Signal Routing */ 237#define MPSC_MRR 0x0000 238#define MPSC_RCRR 0x0004 239#define MPSC_TCRR 0x0008 240 241/* Serial DMA Controller Interface Registers */ 242#define SDMA_SDC 0x0000 243#define SDMA_SDCM 0x0008 244#define SDMA_RX_DESC 0x0800 245#define SDMA_RX_BUF_PTR 0x0808 246#define SDMA_SCRDP 0x0810 247#define SDMA_TX_DESC 0x0c00 248#define SDMA_SCTDP 0x0c10 249#define SDMA_SFTDP 0x0c14 250 251#define SDMA_DESC_CMDSTAT_PE (1<<0) 252#define SDMA_DESC_CMDSTAT_CDL (1<<1) 253#define SDMA_DESC_CMDSTAT_FR (1<<3) 254#define SDMA_DESC_CMDSTAT_OR (1<<6) 255#define SDMA_DESC_CMDSTAT_BR (1<<9) 256#define SDMA_DESC_CMDSTAT_MI (1<<10) 257#define SDMA_DESC_CMDSTAT_A (1<<11) 258#define SDMA_DESC_CMDSTAT_AM (1<<12) 259#define SDMA_DESC_CMDSTAT_CT (1<<13) 260#define SDMA_DESC_CMDSTAT_C (1<<14) 261#define SDMA_DESC_CMDSTAT_ES (1<<15) 262#define SDMA_DESC_CMDSTAT_L (1<<16) 263#define SDMA_DESC_CMDSTAT_F (1<<17) 264#define SDMA_DESC_CMDSTAT_P (1<<18) 265#define SDMA_DESC_CMDSTAT_EI (1<<23) 266#define SDMA_DESC_CMDSTAT_O (1<<31) 267 268#define SDMA_DESC_DFLT (SDMA_DESC_CMDSTAT_O \ 269 | SDMA_DESC_CMDSTAT_EI) 270 271#define SDMA_SDC_RFT (1<<0) 272#define SDMA_SDC_SFM (1<<1) 273#define SDMA_SDC_BLMR (1<<6) 274#define SDMA_SDC_BLMT (1<<7) 275#define SDMA_SDC_POVR (1<<8) 276#define SDMA_SDC_RIFB (1<<9) 277 278#define SDMA_SDCM_ERD (1<<7) 279#define SDMA_SDCM_AR (1<<15) 280#define SDMA_SDCM_STD (1<<16) 281#define SDMA_SDCM_TXD (1<<23) 282#define SDMA_SDCM_AT (1<<31) 283 284#define SDMA_0_CAUSE_RXBUF (1<<0) 285#define SDMA_0_CAUSE_RXERR (1<<1) 286#define SDMA_0_CAUSE_TXBUF (1<<2) 287#define SDMA_0_CAUSE_TXEND (1<<3) 288#define SDMA_1_CAUSE_RXBUF (1<<8) 289#define SDMA_1_CAUSE_RXERR (1<<9) 290#define SDMA_1_CAUSE_TXBUF (1<<10) 291#define SDMA_1_CAUSE_TXEND (1<<11) 292 293#define SDMA_CAUSE_RX_MASK (SDMA_0_CAUSE_RXBUF | SDMA_0_CAUSE_RXERR \ 294 | SDMA_1_CAUSE_RXBUF | SDMA_1_CAUSE_RXERR) 295#define SDMA_CAUSE_TX_MASK (SDMA_0_CAUSE_TXBUF | SDMA_0_CAUSE_TXEND \ 296 | SDMA_1_CAUSE_TXBUF | SDMA_1_CAUSE_TXEND) 297 298/* SDMA Interrupt registers */ 299#define SDMA_INTR_CAUSE 0x0000 300#define SDMA_INTR_MASK 0x0080 301 302/* Baud Rate Generator Interface Registers */ 303#define BRG_BCR 0x0000 304#define BRG_BTR 0x0004 305 306/* 307 * Define how this driver is known to the outside (we've been assigned a 308 * range on the "Low-density serial ports" major). 309 */ 310#define MPSC_MAJOR 204 311#define MPSC_MINOR_START 44 312#define MPSC_DRIVER_NAME "MPSC" 313#define MPSC_DEV_NAME "ttyMM" 314#define MPSC_VERSION "1.00" 315 316static struct mpsc_port_info mpsc_ports[MPSC_NUM_CTLRS]; 317static struct mpsc_shared_regs mpsc_shared_regs; 318static struct uart_driver mpsc_reg; 319 320static void mpsc_start_rx(struct mpsc_port_info *pi); 321static void mpsc_free_ring_mem(struct mpsc_port_info *pi); 322static void mpsc_release_port(struct uart_port *port); 323/* 324 ****************************************************************************** 325 * 326 * Baud Rate Generator Routines (BRG) 327 * 328 ****************************************************************************** 329 */ 330static void mpsc_brg_init(struct mpsc_port_info *pi, u32 clk_src) 331{ 332 u32 v; 333 334 v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR); 335 v = (v & ~(0xf << 18)) | ((clk_src & 0xf) << 18); 336 337 if (pi->brg_can_tune) 338 v &= ~(1 << 25); 339 340 if (pi->mirror_regs) 341 pi->BRG_BCR_m = v; 342 writel(v, pi->brg_base + BRG_BCR); 343 344 writel(readl(pi->brg_base + BRG_BTR) & 0xffff0000, 345 pi->brg_base + BRG_BTR); 346} 347 348static void mpsc_brg_enable(struct mpsc_port_info *pi) 349{ 350 u32 v; 351 352 v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR); 353 v |= (1 << 16); 354 355 if (pi->mirror_regs) 356 pi->BRG_BCR_m = v; 357 writel(v, pi->brg_base + BRG_BCR); 358} 359 360static void mpsc_brg_disable(struct mpsc_port_info *pi) 361{ 362 u32 v; 363 364 v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR); 365 v &= ~(1 << 16); 366 367 if (pi->mirror_regs) 368 pi->BRG_BCR_m = v; 369 writel(v, pi->brg_base + BRG_BCR); 370} 371 372/* 373 * To set the baud, we adjust the CDV field in the BRG_BCR reg. 374 * From manual: Baud = clk / ((CDV+1)*2) ==> CDV = (clk / (baud*2)) - 1. 375 * However, the input clock is divided by 16 in the MPSC b/c of how 376 * 'MPSC_MMCRH' was set up so we have to divide the 'clk' used in our 377 * calculation by 16 to account for that. So the real calculation 378 * that accounts for the way the mpsc is set up is: 379 * CDV = (clk / (baud*2*16)) - 1 ==> CDV = (clk / (baud << 5)) - 1. 380 */ 381static void mpsc_set_baudrate(struct mpsc_port_info *pi, u32 baud) 382{ 383 u32 cdv = (pi->port.uartclk / (baud << 5)) - 1; 384 u32 v; 385 386 mpsc_brg_disable(pi); 387 v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR); 388 v = (v & 0xffff0000) | (cdv & 0xffff); 389 390 if (pi->mirror_regs) 391 pi->BRG_BCR_m = v; 392 writel(v, pi->brg_base + BRG_BCR); 393 mpsc_brg_enable(pi); 394} 395 396/* 397 ****************************************************************************** 398 * 399 * Serial DMA Routines (SDMA) 400 * 401 ****************************************************************************** 402 */ 403 404static void mpsc_sdma_burstsize(struct mpsc_port_info *pi, u32 burst_size) 405{ 406 u32 v; 407 408 pr_debug("mpsc_sdma_burstsize[%d]: burst_size: %d\n", 409 pi->port.line, burst_size); 410 411 burst_size >>= 3; /* Divide by 8 b/c reg values are 8-byte chunks */ 412 413 if (burst_size < 2) 414 v = 0x0; /* 1 64-bit word */ 415 else if (burst_size < 4) 416 v = 0x1; /* 2 64-bit words */ 417 else if (burst_size < 8) 418 v = 0x2; /* 4 64-bit words */ 419 else 420 v = 0x3; /* 8 64-bit words */ 421 422 writel((readl(pi->sdma_base + SDMA_SDC) & (0x3 << 12)) | (v << 12), 423 pi->sdma_base + SDMA_SDC); 424} 425 426static void mpsc_sdma_init(struct mpsc_port_info *pi, u32 burst_size) 427{ 428 pr_debug("mpsc_sdma_init[%d]: burst_size: %d\n", pi->port.line, 429 burst_size); 430 431 writel((readl(pi->sdma_base + SDMA_SDC) & 0x3ff) | 0x03f, 432 pi->sdma_base + SDMA_SDC); 433 mpsc_sdma_burstsize(pi, burst_size); 434} 435 436static u32 mpsc_sdma_intr_mask(struct mpsc_port_info *pi, u32 mask) 437{ 438 u32 old, v; 439 440 pr_debug("mpsc_sdma_intr_mask[%d]: mask: 0x%x\n", pi->port.line, mask); 441 442 old = v = (pi->mirror_regs) ? pi->shared_regs->SDMA_INTR_MASK_m : 443 readl(pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK); 444 445 mask &= 0xf; 446 if (pi->port.line) 447 mask <<= 8; 448 v &= ~mask; 449 450 if (pi->mirror_regs) 451 pi->shared_regs->SDMA_INTR_MASK_m = v; 452 writel(v, pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK); 453 454 if (pi->port.line) 455 old >>= 8; 456 return old & 0xf; 457} 458 459static void mpsc_sdma_intr_unmask(struct mpsc_port_info *pi, u32 mask) 460{ 461 u32 v; 462 463 pr_debug("mpsc_sdma_intr_unmask[%d]: mask: 0x%x\n", pi->port.line,mask); 464 465 v = (pi->mirror_regs) ? pi->shared_regs->SDMA_INTR_MASK_m 466 : readl(pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK); 467 468 mask &= 0xf; 469 if (pi->port.line) 470 mask <<= 8; 471 v |= mask; 472 473 if (pi->mirror_regs) 474 pi->shared_regs->SDMA_INTR_MASK_m = v; 475 writel(v, pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK); 476} 477 478static void mpsc_sdma_intr_ack(struct mpsc_port_info *pi) 479{ 480 pr_debug("mpsc_sdma_intr_ack[%d]: Acknowledging IRQ\n", pi->port.line); 481 482 if (pi->mirror_regs) 483 pi->shared_regs->SDMA_INTR_CAUSE_m = 0; 484 writeb(0x00, pi->shared_regs->sdma_intr_base + SDMA_INTR_CAUSE 485 + pi->port.line); 486} 487 488static void mpsc_sdma_set_rx_ring(struct mpsc_port_info *pi, 489 struct mpsc_rx_desc *rxre_p) 490{ 491 pr_debug("mpsc_sdma_set_rx_ring[%d]: rxre_p: 0x%x\n", 492 pi->port.line, (u32)rxre_p); 493 494 writel((u32)rxre_p, pi->sdma_base + SDMA_SCRDP); 495} 496 497static void mpsc_sdma_set_tx_ring(struct mpsc_port_info *pi, 498 struct mpsc_tx_desc *txre_p) 499{ 500 writel((u32)txre_p, pi->sdma_base + SDMA_SFTDP); 501 writel((u32)txre_p, pi->sdma_base + SDMA_SCTDP); 502} 503 504static void mpsc_sdma_cmd(struct mpsc_port_info *pi, u32 val) 505{ 506 u32 v; 507 508 v = readl(pi->sdma_base + SDMA_SDCM); 509 if (val) 510 v |= val; 511 else 512 v = 0; 513 wmb(); 514 writel(v, pi->sdma_base + SDMA_SDCM); 515 wmb(); 516} 517 518static uint mpsc_sdma_tx_active(struct mpsc_port_info *pi) 519{ 520 return readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_TXD; 521} 522 523static void mpsc_sdma_start_tx(struct mpsc_port_info *pi) 524{ 525 struct mpsc_tx_desc *txre, *txre_p; 526 527 /* If tx isn't running & there's a desc ready to go, start it */ 528 if (!mpsc_sdma_tx_active(pi)) { 529 txre = (struct mpsc_tx_desc *)(pi->txr 530 + (pi->txr_tail * MPSC_TXRE_SIZE)); 531 dma_cache_sync(pi->port.dev, (void *)txre, MPSC_TXRE_SIZE, 532 DMA_FROM_DEVICE); 533#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 534 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 535 invalidate_dcache_range((ulong)txre, 536 (ulong)txre + MPSC_TXRE_SIZE); 537#endif 538 539 if (be32_to_cpu(txre->cmdstat) & SDMA_DESC_CMDSTAT_O) { 540 txre_p = (struct mpsc_tx_desc *) 541 (pi->txr_p + (pi->txr_tail * MPSC_TXRE_SIZE)); 542 543 mpsc_sdma_set_tx_ring(pi, txre_p); 544 mpsc_sdma_cmd(pi, SDMA_SDCM_STD | SDMA_SDCM_TXD); 545 } 546 } 547} 548 549static void mpsc_sdma_stop(struct mpsc_port_info *pi) 550{ 551 pr_debug("mpsc_sdma_stop[%d]: Stopping SDMA\n", pi->port.line); 552 553 /* Abort any SDMA transfers */ 554 mpsc_sdma_cmd(pi, 0); 555 mpsc_sdma_cmd(pi, SDMA_SDCM_AR | SDMA_SDCM_AT); 556 557 /* Clear the SDMA current and first TX and RX pointers */ 558 mpsc_sdma_set_tx_ring(pi, NULL); 559 mpsc_sdma_set_rx_ring(pi, NULL); 560 561 /* Disable interrupts */ 562 mpsc_sdma_intr_mask(pi, 0xf); 563 mpsc_sdma_intr_ack(pi); 564} 565 566/* 567 ****************************************************************************** 568 * 569 * Multi-Protocol Serial Controller Routines (MPSC) 570 * 571 ****************************************************************************** 572 */ 573 574static void mpsc_hw_init(struct mpsc_port_info *pi) 575{ 576 u32 v; 577 578 pr_debug("mpsc_hw_init[%d]: Initializing hardware\n", pi->port.line); 579 580 /* Set up clock routing */ 581 if (pi->mirror_regs) { 582 v = pi->shared_regs->MPSC_MRR_m; 583 v &= ~0x1c7; 584 pi->shared_regs->MPSC_MRR_m = v; 585 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_MRR); 586 587 v = pi->shared_regs->MPSC_RCRR_m; 588 v = (v & ~0xf0f) | 0x100; 589 pi->shared_regs->MPSC_RCRR_m = v; 590 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_RCRR); 591 592 v = pi->shared_regs->MPSC_TCRR_m; 593 v = (v & ~0xf0f) | 0x100; 594 pi->shared_regs->MPSC_TCRR_m = v; 595 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_TCRR); 596 } else { 597 v = readl(pi->shared_regs->mpsc_routing_base + MPSC_MRR); 598 v &= ~0x1c7; 599 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_MRR); 600 601 v = readl(pi->shared_regs->mpsc_routing_base + MPSC_RCRR); 602 v = (v & ~0xf0f) | 0x100; 603 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_RCRR); 604 605 v = readl(pi->shared_regs->mpsc_routing_base + MPSC_TCRR); 606 v = (v & ~0xf0f) | 0x100; 607 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_TCRR); 608 } 609 610 /* Put MPSC in UART mode & enabel Tx/Rx egines */ 611 writel(0x000004c4, pi->mpsc_base + MPSC_MMCRL); 612 613 /* No preamble, 16x divider, low-latency, */ 614 writel(0x04400400, pi->mpsc_base + MPSC_MMCRH); 615 mpsc_set_baudrate(pi, pi->default_baud); 616 617 if (pi->mirror_regs) { 618 pi->MPSC_CHR_1_m = 0; 619 pi->MPSC_CHR_2_m = 0; 620 } 621 writel(0, pi->mpsc_base + MPSC_CHR_1); 622 writel(0, pi->mpsc_base + MPSC_CHR_2); 623 writel(pi->mpsc_max_idle, pi->mpsc_base + MPSC_CHR_3); 624 writel(0, pi->mpsc_base + MPSC_CHR_4); 625 writel(0, pi->mpsc_base + MPSC_CHR_5); 626 writel(0, pi->mpsc_base + MPSC_CHR_6); 627 writel(0, pi->mpsc_base + MPSC_CHR_7); 628 writel(0, pi->mpsc_base + MPSC_CHR_8); 629 writel(0, pi->mpsc_base + MPSC_CHR_9); 630 writel(0, pi->mpsc_base + MPSC_CHR_10); 631} 632 633static void mpsc_enter_hunt(struct mpsc_port_info *pi) 634{ 635 pr_debug("mpsc_enter_hunt[%d]: Hunting...\n", pi->port.line); 636 637 if (pi->mirror_regs) { 638 writel(pi->MPSC_CHR_2_m | MPSC_CHR_2_EH, 639 pi->mpsc_base + MPSC_CHR_2); 640 /* Erratum prevents reading CHR_2 so just delay for a while */ 641 udelay(100); 642 } else { 643 writel(readl(pi->mpsc_base + MPSC_CHR_2) | MPSC_CHR_2_EH, 644 pi->mpsc_base + MPSC_CHR_2); 645 646 while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_EH) 647 udelay(10); 648 } 649} 650 651static void mpsc_freeze(struct mpsc_port_info *pi) 652{ 653 u32 v; 654 655 pr_debug("mpsc_freeze[%d]: Freezing\n", pi->port.line); 656 657 v = (pi->mirror_regs) ? pi->MPSC_MPCR_m : 658 readl(pi->mpsc_base + MPSC_MPCR); 659 v |= MPSC_MPCR_FRZ; 660 661 if (pi->mirror_regs) 662 pi->MPSC_MPCR_m = v; 663 writel(v, pi->mpsc_base + MPSC_MPCR); 664} 665 666static void mpsc_unfreeze(struct mpsc_port_info *pi) 667{ 668 u32 v; 669 670 v = (pi->mirror_regs) ? pi->MPSC_MPCR_m : 671 readl(pi->mpsc_base + MPSC_MPCR); 672 v &= ~MPSC_MPCR_FRZ; 673 674 if (pi->mirror_regs) 675 pi->MPSC_MPCR_m = v; 676 writel(v, pi->mpsc_base + MPSC_MPCR); 677 678 pr_debug("mpsc_unfreeze[%d]: Unfrozen\n", pi->port.line); 679} 680 681static void mpsc_set_char_length(struct mpsc_port_info *pi, u32 len) 682{ 683 u32 v; 684 685 pr_debug("mpsc_set_char_length[%d]: char len: %d\n", pi->port.line,len); 686 687 v = (pi->mirror_regs) ? pi->MPSC_MPCR_m : 688 readl(pi->mpsc_base + MPSC_MPCR); 689 v = (v & ~(0x3 << 12)) | ((len & 0x3) << 12); 690 691 if (pi->mirror_regs) 692 pi->MPSC_MPCR_m = v; 693 writel(v, pi->mpsc_base + MPSC_MPCR); 694} 695 696static void mpsc_set_stop_bit_length(struct mpsc_port_info *pi, u32 len) 697{ 698 u32 v; 699 700 pr_debug("mpsc_set_stop_bit_length[%d]: stop bits: %d\n", 701 pi->port.line, len); 702 703 v = (pi->mirror_regs) ? pi->MPSC_MPCR_m : 704 readl(pi->mpsc_base + MPSC_MPCR); 705 706 v = (v & ~(1 << 14)) | ((len & 0x1) << 14); 707 708 if (pi->mirror_regs) 709 pi->MPSC_MPCR_m = v; 710 writel(v, pi->mpsc_base + MPSC_MPCR); 711} 712 713static void mpsc_set_parity(struct mpsc_port_info *pi, u32 p) 714{ 715 u32 v; 716 717 pr_debug("mpsc_set_parity[%d]: parity bits: 0x%x\n", pi->port.line, p); 718 719 v = (pi->mirror_regs) ? pi->MPSC_CHR_2_m : 720 readl(pi->mpsc_base + MPSC_CHR_2); 721 722 p &= 0x3; 723 v = (v & ~0xc000c) | (p << 18) | (p << 2); 724 725 if (pi->mirror_regs) 726 pi->MPSC_CHR_2_m = v; 727 writel(v, pi->mpsc_base + MPSC_CHR_2); 728} 729 730/* 731 ****************************************************************************** 732 * 733 * Driver Init Routines 734 * 735 ****************************************************************************** 736 */ 737 738static void mpsc_init_hw(struct mpsc_port_info *pi) 739{ 740 pr_debug("mpsc_init_hw[%d]: Initializing\n", pi->port.line); 741 742 mpsc_brg_init(pi, pi->brg_clk_src); 743 mpsc_brg_enable(pi); 744 mpsc_sdma_init(pi, dma_get_cache_alignment()); /* burst a cacheline */ 745 mpsc_sdma_stop(pi); 746 mpsc_hw_init(pi); 747} 748 749static int mpsc_alloc_ring_mem(struct mpsc_port_info *pi) 750{ 751 int rc = 0; 752 753 pr_debug("mpsc_alloc_ring_mem[%d]: Allocating ring mem\n", 754 pi->port.line); 755 756 if (!pi->dma_region) { 757 if (!dma_supported(pi->port.dev, 0xffffffff)) { 758 printk(KERN_ERR "MPSC: Inadequate DMA support\n"); 759 rc = -ENXIO; 760 } else if ((pi->dma_region = dma_alloc_noncoherent(pi->port.dev, 761 MPSC_DMA_ALLOC_SIZE, 762 &pi->dma_region_p, GFP_KERNEL)) 763 == NULL) { 764 printk(KERN_ERR "MPSC: Can't alloc Desc region\n"); 765 rc = -ENOMEM; 766 } 767 } 768 769 return rc; 770} 771 772static void mpsc_free_ring_mem(struct mpsc_port_info *pi) 773{ 774 pr_debug("mpsc_free_ring_mem[%d]: Freeing ring mem\n", pi->port.line); 775 776 if (pi->dma_region) { 777 dma_free_noncoherent(pi->port.dev, MPSC_DMA_ALLOC_SIZE, 778 pi->dma_region, pi->dma_region_p); 779 pi->dma_region = NULL; 780 pi->dma_region_p = (dma_addr_t)NULL; 781 } 782} 783 784static void mpsc_init_rings(struct mpsc_port_info *pi) 785{ 786 struct mpsc_rx_desc *rxre; 787 struct mpsc_tx_desc *txre; 788 dma_addr_t dp, dp_p; 789 u8 *bp, *bp_p; 790 int i; 791 792 pr_debug("mpsc_init_rings[%d]: Initializing rings\n", pi->port.line); 793 794 BUG_ON(pi->dma_region == NULL); 795 796 memset(pi->dma_region, 0, MPSC_DMA_ALLOC_SIZE); 797 798 /* 799 * Descriptors & buffers are multiples of cacheline size and must be 800 * cacheline aligned. 801 */ 802 dp = ALIGN((u32)pi->dma_region, dma_get_cache_alignment()); 803 dp_p = ALIGN((u32)pi->dma_region_p, dma_get_cache_alignment()); 804 805 /* 806 * Partition dma region into rx ring descriptor, rx buffers, 807 * tx ring descriptors, and tx buffers. 808 */ 809 pi->rxr = dp; 810 pi->rxr_p = dp_p; 811 dp += MPSC_RXR_SIZE; 812 dp_p += MPSC_RXR_SIZE; 813 814 pi->rxb = (u8 *)dp; 815 pi->rxb_p = (u8 *)dp_p; 816 dp += MPSC_RXB_SIZE; 817 dp_p += MPSC_RXB_SIZE; 818 819 pi->rxr_posn = 0; 820 821 pi->txr = dp; 822 pi->txr_p = dp_p; 823 dp += MPSC_TXR_SIZE; 824 dp_p += MPSC_TXR_SIZE; 825 826 pi->txb = (u8 *)dp; 827 pi->txb_p = (u8 *)dp_p; 828 829 pi->txr_head = 0; 830 pi->txr_tail = 0; 831 832 /* Init rx ring descriptors */ 833 dp = pi->rxr; 834 dp_p = pi->rxr_p; 835 bp = pi->rxb; 836 bp_p = pi->rxb_p; 837 838 for (i = 0; i < MPSC_RXR_ENTRIES; i++) { 839 rxre = (struct mpsc_rx_desc *)dp; 840 841 rxre->bufsize = cpu_to_be16(MPSC_RXBE_SIZE); 842 rxre->bytecnt = cpu_to_be16(0); 843 rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O 844 | SDMA_DESC_CMDSTAT_EI | SDMA_DESC_CMDSTAT_F 845 | SDMA_DESC_CMDSTAT_L); 846 rxre->link = cpu_to_be32(dp_p + MPSC_RXRE_SIZE); 847 rxre->buf_ptr = cpu_to_be32(bp_p); 848 849 dp += MPSC_RXRE_SIZE; 850 dp_p += MPSC_RXRE_SIZE; 851 bp += MPSC_RXBE_SIZE; 852 bp_p += MPSC_RXBE_SIZE; 853 } 854 rxre->link = cpu_to_be32(pi->rxr_p); /* Wrap last back to first */ 855 856 /* Init tx ring descriptors */ 857 dp = pi->txr; 858 dp_p = pi->txr_p; 859 bp = pi->txb; 860 bp_p = pi->txb_p; 861 862 for (i = 0; i < MPSC_TXR_ENTRIES; i++) { 863 txre = (struct mpsc_tx_desc *)dp; 864 865 txre->link = cpu_to_be32(dp_p + MPSC_TXRE_SIZE); 866 txre->buf_ptr = cpu_to_be32(bp_p); 867 868 dp += MPSC_TXRE_SIZE; 869 dp_p += MPSC_TXRE_SIZE; 870 bp += MPSC_TXBE_SIZE; 871 bp_p += MPSC_TXBE_SIZE; 872 } 873 txre->link = cpu_to_be32(pi->txr_p); /* Wrap last back to first */ 874 875 dma_cache_sync(pi->port.dev, (void *)pi->dma_region, 876 MPSC_DMA_ALLOC_SIZE, DMA_BIDIRECTIONAL); 877#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 878 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 879 flush_dcache_range((ulong)pi->dma_region, 880 (ulong)pi->dma_region 881 + MPSC_DMA_ALLOC_SIZE); 882#endif 883 884 return; 885} 886 887static void mpsc_uninit_rings(struct mpsc_port_info *pi) 888{ 889 pr_debug("mpsc_uninit_rings[%d]: Uninitializing rings\n",pi->port.line); 890 891 BUG_ON(pi->dma_region == NULL); 892 893 pi->rxr = 0; 894 pi->rxr_p = 0; 895 pi->rxb = NULL; 896 pi->rxb_p = NULL; 897 pi->rxr_posn = 0; 898 899 pi->txr = 0; 900 pi->txr_p = 0; 901 pi->txb = NULL; 902 pi->txb_p = NULL; 903 pi->txr_head = 0; 904 pi->txr_tail = 0; 905} 906 907static int mpsc_make_ready(struct mpsc_port_info *pi) 908{ 909 int rc; 910 911 pr_debug("mpsc_make_ready[%d]: Making cltr ready\n", pi->port.line); 912 913 if (!pi->ready) { 914 mpsc_init_hw(pi); 915 if ((rc = mpsc_alloc_ring_mem(pi))) 916 return rc; 917 mpsc_init_rings(pi); 918 pi->ready = 1; 919 } 920 921 return 0; 922} 923 924#ifdef CONFIG_CONSOLE_POLL 925static int serial_polled; 926#endif 927 928/* 929 ****************************************************************************** 930 * 931 * Interrupt Handling Routines 932 * 933 ****************************************************************************** 934 */ 935 936static int mpsc_rx_intr(struct mpsc_port_info *pi) 937{ 938 struct mpsc_rx_desc *rxre; 939 struct tty_struct *tty = pi->port.info->port.tty; 940 u32 cmdstat, bytes_in, i; 941 int rc = 0; 942 u8 *bp; 943 char flag = TTY_NORMAL; 944 945 pr_debug("mpsc_rx_intr[%d]: Handling Rx intr\n", pi->port.line); 946 947 rxre = (struct mpsc_rx_desc *)(pi->rxr + (pi->rxr_posn*MPSC_RXRE_SIZE)); 948 949 dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, 950 DMA_FROM_DEVICE); 951#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 952 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 953 invalidate_dcache_range((ulong)rxre, 954 (ulong)rxre + MPSC_RXRE_SIZE); 955#endif 956 957 /* 958 * Loop through Rx descriptors handling ones that have been completed. 959 */ 960 while (!((cmdstat = be32_to_cpu(rxre->cmdstat)) 961 & SDMA_DESC_CMDSTAT_O)) { 962 bytes_in = be16_to_cpu(rxre->bytecnt); 963#ifdef CONFIG_CONSOLE_POLL 964 if (unlikely(serial_polled)) { 965 serial_polled = 0; 966 return 0; 967 } 968#endif 969 /* Following use of tty struct directly is deprecated */ 970 if (unlikely(tty_buffer_request_room(tty, bytes_in) 971 < bytes_in)) { 972 if (tty->low_latency) 973 tty_flip_buffer_push(tty); 974 /* 975 * If this failed then we will throw away the bytes 976 * but must do so to clear interrupts. 977 */ 978 } 979 980 bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE); 981 dma_cache_sync(pi->port.dev, (void *)bp, MPSC_RXBE_SIZE, 982 DMA_FROM_DEVICE); 983#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 984 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 985 invalidate_dcache_range((ulong)bp, 986 (ulong)bp + MPSC_RXBE_SIZE); 987#endif 988 989 /* 990 * Other than for parity error, the manual provides little 991 * info on what data will be in a frame flagged by any of 992 * these errors. For parity error, it is the last byte in 993 * the buffer that had the error. As for the rest, I guess 994 * we'll assume there is no data in the buffer. 995 * If there is...it gets lost. 996 */ 997 if (unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR 998 | SDMA_DESC_CMDSTAT_FR 999 | SDMA_DESC_CMDSTAT_OR))) { 1000 1001 pi->port.icount.rx++; 1002 1003 if (cmdstat & SDMA_DESC_CMDSTAT_BR) { /* Break */ 1004 pi->port.icount.brk++; 1005 1006 if (uart_handle_break(&pi->port)) 1007 goto next_frame; 1008 } else if (cmdstat & SDMA_DESC_CMDSTAT_FR) { 1009 pi->port.icount.frame++; 1010 } else if (cmdstat & SDMA_DESC_CMDSTAT_OR) { 1011 pi->port.icount.overrun++; 1012 } 1013 1014 cmdstat &= pi->port.read_status_mask; 1015 1016 if (cmdstat & SDMA_DESC_CMDSTAT_BR) 1017 flag = TTY_BREAK; 1018 else if (cmdstat & SDMA_DESC_CMDSTAT_FR) 1019 flag = TTY_FRAME; 1020 else if (cmdstat & SDMA_DESC_CMDSTAT_OR) 1021 flag = TTY_OVERRUN; 1022 else if (cmdstat & SDMA_DESC_CMDSTAT_PE) 1023 flag = TTY_PARITY; 1024 } 1025 1026 if (uart_handle_sysrq_char(&pi->port, *bp)) { 1027 bp++; 1028 bytes_in--; 1029#ifdef CONFIG_CONSOLE_POLL 1030 if (unlikely(serial_polled)) { 1031 serial_polled = 0; 1032 return 0; 1033 } 1034#endif 1035 goto next_frame; 1036 } 1037 1038 if ((unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR 1039 | SDMA_DESC_CMDSTAT_FR 1040 | SDMA_DESC_CMDSTAT_OR))) 1041 && !(cmdstat & pi->port.ignore_status_mask)) { 1042 tty_insert_flip_char(tty, *bp, flag); 1043 } else { 1044 for (i=0; i<bytes_in; i++) 1045 tty_insert_flip_char(tty, *bp++, TTY_NORMAL); 1046 1047 pi->port.icount.rx += bytes_in; 1048 } 1049 1050next_frame: 1051 rxre->bytecnt = cpu_to_be16(0); 1052 wmb(); 1053 rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O 1054 | SDMA_DESC_CMDSTAT_EI | SDMA_DESC_CMDSTAT_F 1055 | SDMA_DESC_CMDSTAT_L); 1056 wmb(); 1057 dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, 1058 DMA_BIDIRECTIONAL); 1059#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1060 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1061 flush_dcache_range((ulong)rxre, 1062 (ulong)rxre + MPSC_RXRE_SIZE); 1063#endif 1064 1065 /* Advance to next descriptor */ 1066 pi->rxr_posn = (pi->rxr_posn + 1) & (MPSC_RXR_ENTRIES - 1); 1067 rxre = (struct mpsc_rx_desc *) 1068 (pi->rxr + (pi->rxr_posn * MPSC_RXRE_SIZE)); 1069 dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, 1070 DMA_FROM_DEVICE); 1071#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1072 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1073 invalidate_dcache_range((ulong)rxre, 1074 (ulong)rxre + MPSC_RXRE_SIZE); 1075#endif 1076 rc = 1; 1077 } 1078 1079 /* Restart rx engine, if its stopped */ 1080 if ((readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_ERD) == 0) 1081 mpsc_start_rx(pi); 1082 1083 tty_flip_buffer_push(tty); 1084 return rc; 1085} 1086 1087static void mpsc_setup_tx_desc(struct mpsc_port_info *pi, u32 count, u32 intr) 1088{ 1089 struct mpsc_tx_desc *txre; 1090 1091 txre = (struct mpsc_tx_desc *)(pi->txr 1092 + (pi->txr_head * MPSC_TXRE_SIZE)); 1093 1094 txre->bytecnt = cpu_to_be16(count); 1095 txre->shadow = txre->bytecnt; 1096 wmb(); /* ensure cmdstat is last field updated */ 1097 txre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O | SDMA_DESC_CMDSTAT_F 1098 | SDMA_DESC_CMDSTAT_L 1099 | ((intr) ? SDMA_DESC_CMDSTAT_EI : 0)); 1100 wmb(); 1101 dma_cache_sync(pi->port.dev, (void *)txre, MPSC_TXRE_SIZE, 1102 DMA_BIDIRECTIONAL); 1103#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1104 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1105 flush_dcache_range((ulong)txre, 1106 (ulong)txre + MPSC_TXRE_SIZE); 1107#endif 1108} 1109 1110static void mpsc_copy_tx_data(struct mpsc_port_info *pi) 1111{ 1112 struct circ_buf *xmit = &pi->port.info->xmit; 1113 u8 *bp; 1114 u32 i; 1115 1116 /* Make sure the desc ring isn't full */ 1117 while (CIRC_CNT(pi->txr_head, pi->txr_tail, MPSC_TXR_ENTRIES) 1118 < (MPSC_TXR_ENTRIES - 1)) { 1119 if (pi->port.x_char) { 1120 /* 1121 * Ideally, we should use the TCS field in 1122 * CHR_1 to put the x_char out immediately but 1123 * errata prevents us from being able to read 1124 * CHR_2 to know that its safe to write to 1125 * CHR_1. Instead, just put it in-band with 1126 * all the other Tx data. 1127 */ 1128 bp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE); 1129 *bp = pi->port.x_char; 1130 pi->port.x_char = 0; 1131 i = 1; 1132 } else if (!uart_circ_empty(xmit) 1133 && !uart_tx_stopped(&pi->port)) { 1134 i = min((u32)MPSC_TXBE_SIZE, 1135 (u32)uart_circ_chars_pending(xmit)); 1136 i = min(i, (u32)CIRC_CNT_TO_END(xmit->head, xmit->tail, 1137 UART_XMIT_SIZE)); 1138 bp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE); 1139 memcpy(bp, &xmit->buf[xmit->tail], i); 1140 xmit->tail = (xmit->tail + i) & (UART_XMIT_SIZE - 1); 1141 1142 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 1143 uart_write_wakeup(&pi->port); 1144 } else { /* All tx data copied into ring bufs */ 1145 return; 1146 } 1147 1148 dma_cache_sync(pi->port.dev, (void *)bp, MPSC_TXBE_SIZE, 1149 DMA_BIDIRECTIONAL); 1150#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1151 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1152 flush_dcache_range((ulong)bp, 1153 (ulong)bp + MPSC_TXBE_SIZE); 1154#endif 1155 mpsc_setup_tx_desc(pi, i, 1); 1156 1157 /* Advance to next descriptor */ 1158 pi->txr_head = (pi->txr_head + 1) & (MPSC_TXR_ENTRIES - 1); 1159 } 1160} 1161 1162static int mpsc_tx_intr(struct mpsc_port_info *pi) 1163{ 1164 struct mpsc_tx_desc *txre; 1165 int rc = 0; 1166 unsigned long iflags; 1167 1168 spin_lock_irqsave(&pi->tx_lock, iflags); 1169 1170 if (!mpsc_sdma_tx_active(pi)) { 1171 txre = (struct mpsc_tx_desc *)(pi->txr 1172 + (pi->txr_tail * MPSC_TXRE_SIZE)); 1173 1174 dma_cache_sync(pi->port.dev, (void *)txre, MPSC_TXRE_SIZE, 1175 DMA_FROM_DEVICE); 1176#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1177 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1178 invalidate_dcache_range((ulong)txre, 1179 (ulong)txre + MPSC_TXRE_SIZE); 1180#endif 1181 1182 while (!(be32_to_cpu(txre->cmdstat) & SDMA_DESC_CMDSTAT_O)) { 1183 rc = 1; 1184 pi->port.icount.tx += be16_to_cpu(txre->bytecnt); 1185 pi->txr_tail = (pi->txr_tail+1) & (MPSC_TXR_ENTRIES-1); 1186 1187 /* If no more data to tx, fall out of loop */ 1188 if (pi->txr_head == pi->txr_tail) 1189 break; 1190 1191 txre = (struct mpsc_tx_desc *)(pi->txr 1192 + (pi->txr_tail * MPSC_TXRE_SIZE)); 1193 dma_cache_sync(pi->port.dev, (void *)txre, 1194 MPSC_TXRE_SIZE, DMA_FROM_DEVICE); 1195#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1196 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1197 invalidate_dcache_range((ulong)txre, 1198 (ulong)txre + MPSC_TXRE_SIZE); 1199#endif 1200 } 1201 1202 mpsc_copy_tx_data(pi); 1203 mpsc_sdma_start_tx(pi); /* start next desc if ready */ 1204 } 1205 1206 spin_unlock_irqrestore(&pi->tx_lock, iflags); 1207 return rc; 1208} 1209 1210/* 1211 * This is the driver's interrupt handler. To avoid a race, we first clear 1212 * the interrupt, then handle any completed Rx/Tx descriptors. When done 1213 * handling those descriptors, we restart the Rx/Tx engines if they're stopped. 1214 */ 1215static irqreturn_t mpsc_sdma_intr(int irq, void *dev_id) 1216{ 1217 struct mpsc_port_info *pi = dev_id; 1218 ulong iflags; 1219 int rc = IRQ_NONE; 1220 1221 pr_debug("mpsc_sdma_intr[%d]: SDMA Interrupt Received\n",pi->port.line); 1222 1223 spin_lock_irqsave(&pi->port.lock, iflags); 1224 mpsc_sdma_intr_ack(pi); 1225 if (mpsc_rx_intr(pi)) 1226 rc = IRQ_HANDLED; 1227 if (mpsc_tx_intr(pi)) 1228 rc = IRQ_HANDLED; 1229 spin_unlock_irqrestore(&pi->port.lock, iflags); 1230 1231 pr_debug("mpsc_sdma_intr[%d]: SDMA Interrupt Handled\n", pi->port.line); 1232 return rc; 1233} 1234 1235/* 1236 ****************************************************************************** 1237 * 1238 * serial_core.c Interface routines 1239 * 1240 ****************************************************************************** 1241 */ 1242static uint mpsc_tx_empty(struct uart_port *port) 1243{ 1244 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1245 ulong iflags; 1246 uint rc; 1247 1248 spin_lock_irqsave(&pi->port.lock, iflags); 1249 rc = mpsc_sdma_tx_active(pi) ? 0 : TIOCSER_TEMT; 1250 spin_unlock_irqrestore(&pi->port.lock, iflags); 1251 1252 return rc; 1253} 1254 1255static void mpsc_set_mctrl(struct uart_port *port, uint mctrl) 1256{ 1257 /* Have no way to set modem control lines AFAICT */ 1258} 1259 1260static uint mpsc_get_mctrl(struct uart_port *port) 1261{ 1262 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1263 u32 mflags, status; 1264 1265 status = (pi->mirror_regs) ? pi->MPSC_CHR_10_m 1266 : readl(pi->mpsc_base + MPSC_CHR_10); 1267 1268 mflags = 0; 1269 if (status & 0x1) 1270 mflags |= TIOCM_CTS; 1271 if (status & 0x2) 1272 mflags |= TIOCM_CAR; 1273 1274 return mflags | TIOCM_DSR; /* No way to tell if DSR asserted */ 1275} 1276 1277static void mpsc_stop_tx(struct uart_port *port) 1278{ 1279 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1280 1281 pr_debug("mpsc_stop_tx[%d]\n", port->line); 1282 1283 mpsc_freeze(pi); 1284} 1285 1286static void mpsc_start_tx(struct uart_port *port) 1287{ 1288 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1289 unsigned long iflags; 1290 1291 spin_lock_irqsave(&pi->tx_lock, iflags); 1292 1293 mpsc_unfreeze(pi); 1294 mpsc_copy_tx_data(pi); 1295 mpsc_sdma_start_tx(pi); 1296 1297 spin_unlock_irqrestore(&pi->tx_lock, iflags); 1298 1299 pr_debug("mpsc_start_tx[%d]\n", port->line); 1300} 1301 1302static void mpsc_start_rx(struct mpsc_port_info *pi) 1303{ 1304 pr_debug("mpsc_start_rx[%d]: Starting...\n", pi->port.line); 1305 1306 if (pi->rcv_data) { 1307 mpsc_enter_hunt(pi); 1308 mpsc_sdma_cmd(pi, SDMA_SDCM_ERD); 1309 } 1310} 1311 1312static void mpsc_stop_rx(struct uart_port *port) 1313{ 1314 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1315 1316 pr_debug("mpsc_stop_rx[%d]: Stopping...\n", port->line); 1317 1318 if (pi->mirror_regs) { 1319 writel(pi->MPSC_CHR_2_m | MPSC_CHR_2_RA, 1320 pi->mpsc_base + MPSC_CHR_2); 1321 /* Erratum prevents reading CHR_2 so just delay for a while */ 1322 udelay(100); 1323 } else { 1324 writel(readl(pi->mpsc_base + MPSC_CHR_2) | MPSC_CHR_2_RA, 1325 pi->mpsc_base + MPSC_CHR_2); 1326 1327 while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_RA) 1328 udelay(10); 1329 } 1330 1331 mpsc_sdma_cmd(pi, SDMA_SDCM_AR); 1332} 1333 1334static void mpsc_enable_ms(struct uart_port *port) 1335{ 1336} 1337 1338static void mpsc_break_ctl(struct uart_port *port, int ctl) 1339{ 1340 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1341 ulong flags; 1342 u32 v; 1343 1344 v = ctl ? 0x00ff0000 : 0; 1345 1346 spin_lock_irqsave(&pi->port.lock, flags); 1347 if (pi->mirror_regs) 1348 pi->MPSC_CHR_1_m = v; 1349 writel(v, pi->mpsc_base + MPSC_CHR_1); 1350 spin_unlock_irqrestore(&pi->port.lock, flags); 1351} 1352 1353static int mpsc_startup(struct uart_port *port) 1354{ 1355 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1356 u32 flag = 0; 1357 int rc; 1358 1359 pr_debug("mpsc_startup[%d]: Starting up MPSC, irq: %d\n", 1360 port->line, pi->port.irq); 1361 1362 if ((rc = mpsc_make_ready(pi)) == 0) { 1363 /* Setup IRQ handler */ 1364 mpsc_sdma_intr_ack(pi); 1365 1366 /* If irq's are shared, need to set flag */ 1367 if (mpsc_ports[0].port.irq == mpsc_ports[1].port.irq) 1368 flag = IRQF_SHARED; 1369 1370 if (request_irq(pi->port.irq, mpsc_sdma_intr, flag, 1371 "mpsc-sdma", pi)) 1372 printk(KERN_ERR "MPSC: Can't get SDMA IRQ %d\n", 1373 pi->port.irq); 1374 1375 mpsc_sdma_intr_unmask(pi, 0xf); 1376 mpsc_sdma_set_rx_ring(pi, (struct mpsc_rx_desc *)(pi->rxr_p 1377 + (pi->rxr_posn * MPSC_RXRE_SIZE))); 1378 } 1379 1380 return rc; 1381} 1382 1383static void mpsc_shutdown(struct uart_port *port) 1384{ 1385 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1386 1387 pr_debug("mpsc_shutdown[%d]: Shutting down MPSC\n", port->line); 1388 1389 mpsc_sdma_stop(pi); 1390 free_irq(pi->port.irq, pi); 1391} 1392 1393static void mpsc_set_termios(struct uart_port *port, struct ktermios *termios, 1394 struct ktermios *old) 1395{ 1396 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1397 u32 baud; 1398 ulong flags; 1399 u32 chr_bits, stop_bits, par; 1400 1401 pi->c_iflag = termios->c_iflag; 1402 pi->c_cflag = termios->c_cflag; 1403 1404 switch (termios->c_cflag & CSIZE) { 1405 case CS5: 1406 chr_bits = MPSC_MPCR_CL_5; 1407 break; 1408 case CS6: 1409 chr_bits = MPSC_MPCR_CL_6; 1410 break; 1411 case CS7: 1412 chr_bits = MPSC_MPCR_CL_7; 1413 break; 1414 case CS8: 1415 default: 1416 chr_bits = MPSC_MPCR_CL_8; 1417 break; 1418 } 1419 1420 if (termios->c_cflag & CSTOPB) 1421 stop_bits = MPSC_MPCR_SBL_2; 1422 else 1423 stop_bits = MPSC_MPCR_SBL_1; 1424 1425 par = MPSC_CHR_2_PAR_EVEN; 1426 if (termios->c_cflag & PARENB) 1427 if (termios->c_cflag & PARODD) 1428 par = MPSC_CHR_2_PAR_ODD; 1429#ifdef CMSPAR 1430 if (termios->c_cflag & CMSPAR) { 1431 if (termios->c_cflag & PARODD) 1432 par = MPSC_CHR_2_PAR_MARK; 1433 else 1434 par = MPSC_CHR_2_PAR_SPACE; 1435 } 1436#endif 1437 1438 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk); 1439 1440 spin_lock_irqsave(&pi->port.lock, flags); 1441 1442 uart_update_timeout(port, termios->c_cflag, baud); 1443 1444 mpsc_set_char_length(pi, chr_bits); 1445 mpsc_set_stop_bit_length(pi, stop_bits); 1446 mpsc_set_parity(pi, par); 1447 mpsc_set_baudrate(pi, baud); 1448 1449 /* Characters/events to read */ 1450 pi->port.read_status_mask = SDMA_DESC_CMDSTAT_OR; 1451 1452 if (termios->c_iflag & INPCK) 1453 pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_PE 1454 | SDMA_DESC_CMDSTAT_FR; 1455 1456 if (termios->c_iflag & (BRKINT | PARMRK)) 1457 pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_BR; 1458 1459 /* Characters/events to ignore */ 1460 pi->port.ignore_status_mask = 0; 1461 1462 if (termios->c_iflag & IGNPAR) 1463 pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_PE 1464 | SDMA_DESC_CMDSTAT_FR; 1465 1466 if (termios->c_iflag & IGNBRK) { 1467 pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_BR; 1468 1469 if (termios->c_iflag & IGNPAR) 1470 pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_OR; 1471 } 1472 1473 if ((termios->c_cflag & CREAD)) { 1474 if (!pi->rcv_data) { 1475 pi->rcv_data = 1; 1476 mpsc_start_rx(pi); 1477 } 1478 } else if (pi->rcv_data) { 1479 mpsc_stop_rx(port); 1480 pi->rcv_data = 0; 1481 } 1482 1483 spin_unlock_irqrestore(&pi->port.lock, flags); 1484} 1485 1486static const char *mpsc_type(struct uart_port *port) 1487{ 1488 pr_debug("mpsc_type[%d]: port type: %s\n", port->line,MPSC_DRIVER_NAME); 1489 return MPSC_DRIVER_NAME; 1490} 1491 1492static int mpsc_request_port(struct uart_port *port) 1493{ 1494 /* Should make chip/platform specific call */ 1495 return 0; 1496} 1497 1498static void mpsc_release_port(struct uart_port *port) 1499{ 1500 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1501 1502 if (pi->ready) { 1503 mpsc_uninit_rings(pi); 1504 mpsc_free_ring_mem(pi); 1505 pi->ready = 0; 1506 } 1507} 1508 1509static void mpsc_config_port(struct uart_port *port, int flags) 1510{ 1511} 1512 1513static int mpsc_verify_port(struct uart_port *port, struct serial_struct *ser) 1514{ 1515 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1516 int rc = 0; 1517 1518 pr_debug("mpsc_verify_port[%d]: Verifying port data\n", pi->port.line); 1519 1520 if (ser->type != PORT_UNKNOWN && ser->type != PORT_MPSC) 1521 rc = -EINVAL; 1522 else if (pi->port.irq != ser->irq) 1523 rc = -EINVAL; 1524 else if (ser->io_type != SERIAL_IO_MEM) 1525 rc = -EINVAL; 1526 else if (pi->port.uartclk / 16 != ser->baud_base) /* Not sure */ 1527 rc = -EINVAL; 1528 else if ((void *)pi->port.mapbase != ser->iomem_base) 1529 rc = -EINVAL; 1530 else if (pi->port.iobase != ser->port) 1531 rc = -EINVAL; 1532 else if (ser->hub6 != 0) 1533 rc = -EINVAL; 1534 1535 return rc; 1536} 1537#ifdef CONFIG_CONSOLE_POLL 1538/* Serial polling routines for writing and reading from the uart while 1539 * in an interrupt or debug context. 1540 */ 1541 1542static char poll_buf[2048]; 1543static int poll_ptr; 1544static int poll_cnt; 1545static void mpsc_put_poll_char(struct uart_port *port, 1546 unsigned char c); 1547 1548static int mpsc_get_poll_char(struct uart_port *port) 1549{ 1550 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1551 struct mpsc_rx_desc *rxre; 1552 u32 cmdstat, bytes_in, i; 1553 u8 *bp; 1554 1555 if (!serial_polled) 1556 serial_polled = 1; 1557 1558 pr_debug("mpsc_rx_intr[%d]: Handling Rx intr\n", pi->port.line); 1559 1560 if (poll_cnt) { 1561 poll_cnt--; 1562 return poll_buf[poll_ptr++]; 1563 } 1564 poll_ptr = 0; 1565 poll_cnt = 0; 1566 1567 while (poll_cnt == 0) { 1568 rxre = (struct mpsc_rx_desc *)(pi->rxr + 1569 (pi->rxr_posn*MPSC_RXRE_SIZE)); 1570 dma_cache_sync(pi->port.dev, (void *)rxre, 1571 MPSC_RXRE_SIZE, DMA_FROM_DEVICE); 1572#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1573 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1574 invalidate_dcache_range((ulong)rxre, 1575 (ulong)rxre + MPSC_RXRE_SIZE); 1576#endif 1577 /* 1578 * Loop through Rx descriptors handling ones that have 1579 * been completed. 1580 */ 1581 while (poll_cnt == 0 && 1582 !((cmdstat = be32_to_cpu(rxre->cmdstat)) & 1583 SDMA_DESC_CMDSTAT_O)){ 1584 bytes_in = be16_to_cpu(rxre->bytecnt); 1585 bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE); 1586 dma_cache_sync(pi->port.dev, (void *) bp, 1587 MPSC_RXBE_SIZE, DMA_FROM_DEVICE); 1588#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1589 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1590 invalidate_dcache_range((ulong)bp, 1591 (ulong)bp + MPSC_RXBE_SIZE); 1592#endif 1593 if ((unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR | 1594 SDMA_DESC_CMDSTAT_FR | SDMA_DESC_CMDSTAT_OR))) && 1595 !(cmdstat & pi->port.ignore_status_mask)) { 1596 poll_buf[poll_cnt] = *bp; 1597 poll_cnt++; 1598 } else { 1599 for (i = 0; i < bytes_in; i++) { 1600 poll_buf[poll_cnt] = *bp++; 1601 poll_cnt++; 1602 } 1603 pi->port.icount.rx += bytes_in; 1604 } 1605 rxre->bytecnt = cpu_to_be16(0); 1606 wmb(); 1607 rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O | 1608 SDMA_DESC_CMDSTAT_EI | 1609 SDMA_DESC_CMDSTAT_F | 1610 SDMA_DESC_CMDSTAT_L); 1611 wmb(); 1612 dma_cache_sync(pi->port.dev, (void *)rxre, 1613 MPSC_RXRE_SIZE, DMA_BIDIRECTIONAL); 1614#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1615 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1616 flush_dcache_range((ulong)rxre, 1617 (ulong)rxre + MPSC_RXRE_SIZE); 1618#endif 1619 1620 /* Advance to next descriptor */ 1621 pi->rxr_posn = (pi->rxr_posn + 1) & 1622 (MPSC_RXR_ENTRIES - 1); 1623 rxre = (struct mpsc_rx_desc *)(pi->rxr + 1624 (pi->rxr_posn * MPSC_RXRE_SIZE)); 1625 dma_cache_sync(pi->port.dev, (void *)rxre, 1626 MPSC_RXRE_SIZE, DMA_FROM_DEVICE); 1627#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1628 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1629 invalidate_dcache_range((ulong)rxre, 1630 (ulong)rxre + MPSC_RXRE_SIZE); 1631#endif 1632 } 1633 1634 /* Restart rx engine, if its stopped */ 1635 if ((readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_ERD) == 0) 1636 mpsc_start_rx(pi); 1637 } 1638 if (poll_cnt) { 1639 poll_cnt--; 1640 return poll_buf[poll_ptr++]; 1641 } 1642 1643 return 0; 1644} 1645 1646 1647static void mpsc_put_poll_char(struct uart_port *port, 1648 unsigned char c) 1649{ 1650 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1651 u32 data; 1652 1653 data = readl(pi->mpsc_base + MPSC_MPCR); 1654 writeb(c, pi->mpsc_base + MPSC_CHR_1); 1655 mb(); 1656 data = readl(pi->mpsc_base + MPSC_CHR_2); 1657 data |= MPSC_CHR_2_TTCS; 1658 writel(data, pi->mpsc_base + MPSC_CHR_2); 1659 mb(); 1660 1661 while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_TTCS); 1662} 1663#endif 1664 1665static struct uart_ops mpsc_pops = { 1666 .tx_empty = mpsc_tx_empty, 1667 .set_mctrl = mpsc_set_mctrl, 1668 .get_mctrl = mpsc_get_mctrl, 1669 .stop_tx = mpsc_stop_tx, 1670 .start_tx = mpsc_start_tx, 1671 .stop_rx = mpsc_stop_rx, 1672 .enable_ms = mpsc_enable_ms, 1673 .break_ctl = mpsc_break_ctl, 1674 .startup = mpsc_startup, 1675 .shutdown = mpsc_shutdown, 1676 .set_termios = mpsc_set_termios, 1677 .type = mpsc_type, 1678 .release_port = mpsc_release_port, 1679 .request_port = mpsc_request_port, 1680 .config_port = mpsc_config_port, 1681 .verify_port = mpsc_verify_port, 1682#ifdef CONFIG_CONSOLE_POLL 1683 .poll_get_char = mpsc_get_poll_char, 1684 .poll_put_char = mpsc_put_poll_char, 1685#endif 1686}; 1687 1688/* 1689 ****************************************************************************** 1690 * 1691 * Console Interface Routines 1692 * 1693 ****************************************************************************** 1694 */ 1695 1696#ifdef CONFIG_SERIAL_MPSC_CONSOLE 1697static void mpsc_console_write(struct console *co, const char *s, uint count) 1698{ 1699 struct mpsc_port_info *pi = &mpsc_ports[co->index]; 1700 u8 *bp, *dp, add_cr = 0; 1701 int i; 1702 unsigned long iflags; 1703 1704 spin_lock_irqsave(&pi->tx_lock, iflags); 1705 1706 while (pi->txr_head != pi->txr_tail) { 1707 while (mpsc_sdma_tx_active(pi)) 1708 udelay(100); 1709 mpsc_sdma_intr_ack(pi); 1710 mpsc_tx_intr(pi); 1711 } 1712 1713 while (mpsc_sdma_tx_active(pi)) 1714 udelay(100); 1715 1716 while (count > 0) { 1717 bp = dp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE); 1718 1719 for (i = 0; i < MPSC_TXBE_SIZE; i++) { 1720 if (count == 0) 1721 break; 1722 1723 if (add_cr) { 1724 *(dp++) = '\r'; 1725 add_cr = 0; 1726 } else { 1727 *(dp++) = *s; 1728 1729 if (*(s++) == '\n') { /* add '\r' after '\n' */ 1730 add_cr = 1; 1731 count++; 1732 } 1733 } 1734 1735 count--; 1736 } 1737 1738 dma_cache_sync(pi->port.dev, (void *)bp, MPSC_TXBE_SIZE, 1739 DMA_BIDIRECTIONAL); 1740#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1741 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1742 flush_dcache_range((ulong)bp, 1743 (ulong)bp + MPSC_TXBE_SIZE); 1744#endif 1745 mpsc_setup_tx_desc(pi, i, 0); 1746 pi->txr_head = (pi->txr_head + 1) & (MPSC_TXR_ENTRIES - 1); 1747 mpsc_sdma_start_tx(pi); 1748 1749 while (mpsc_sdma_tx_active(pi)) 1750 udelay(100); 1751 1752 pi->txr_tail = (pi->txr_tail + 1) & (MPSC_TXR_ENTRIES - 1); 1753 } 1754 1755 spin_unlock_irqrestore(&pi->tx_lock, iflags); 1756} 1757 1758static int __init mpsc_console_setup(struct console *co, char *options) 1759{ 1760 struct mpsc_port_info *pi; 1761 int baud, bits, parity, flow; 1762 1763 pr_debug("mpsc_console_setup[%d]: options: %s\n", co->index, options); 1764 1765 if (co->index >= MPSC_NUM_CTLRS) 1766 co->index = 0; 1767 1768 pi = &mpsc_ports[co->index]; 1769 1770 baud = pi->default_baud; 1771 bits = pi->default_bits; 1772 parity = pi->default_parity; 1773 flow = pi->default_flow; 1774 1775 if (!pi->port.ops) 1776 return -ENODEV; 1777 1778 spin_lock_init(&pi->port.lock); /* Temporary fix--copied from 8250.c */ 1779 1780 if (options) 1781 uart_parse_options(options, &baud, &parity, &bits, &flow); 1782 1783 return uart_set_options(&pi->port, co, baud, parity, bits, flow); 1784} 1785 1786static struct console mpsc_console = { 1787 .name = MPSC_DEV_NAME, 1788 .write = mpsc_console_write, 1789 .device = uart_console_device, 1790 .setup = mpsc_console_setup, 1791 .flags = CON_PRINTBUFFER, 1792 .index = -1, 1793 .data = &mpsc_reg, 1794}; 1795 1796static int __init mpsc_late_console_init(void) 1797{ 1798 pr_debug("mpsc_late_console_init: Enter\n"); 1799 1800 if (!(mpsc_console.flags & CON_ENABLED)) 1801 register_console(&mpsc_console); 1802 return 0; 1803} 1804 1805late_initcall(mpsc_late_console_init); 1806 1807#define MPSC_CONSOLE &mpsc_console 1808#else 1809#define MPSC_CONSOLE NULL 1810#endif 1811/* 1812 ****************************************************************************** 1813 * 1814 * Dummy Platform Driver to extract & map shared register regions 1815 * 1816 ****************************************************************************** 1817 */ 1818static void mpsc_resource_err(char *s) 1819{ 1820 printk(KERN_WARNING "MPSC: Platform device resource error in %s\n", s); 1821} 1822 1823static int mpsc_shared_map_regs(struct platform_device *pd) 1824{ 1825 struct resource *r; 1826 1827 if ((r = platform_get_resource(pd, IORESOURCE_MEM, 1828 MPSC_ROUTING_BASE_ORDER)) 1829 && request_mem_region(r->start, 1830 MPSC_ROUTING_REG_BLOCK_SIZE, 1831 "mpsc_routing_regs")) { 1832 mpsc_shared_regs.mpsc_routing_base = ioremap(r->start, 1833 MPSC_ROUTING_REG_BLOCK_SIZE); 1834 mpsc_shared_regs.mpsc_routing_base_p = r->start; 1835 } else { 1836 mpsc_resource_err("MPSC routing base"); 1837 return -ENOMEM; 1838 } 1839 1840 if ((r = platform_get_resource(pd, IORESOURCE_MEM, 1841 MPSC_SDMA_INTR_BASE_ORDER)) 1842 && request_mem_region(r->start, 1843 MPSC_SDMA_INTR_REG_BLOCK_SIZE, 1844 "sdma_intr_regs")) { 1845 mpsc_shared_regs.sdma_intr_base = ioremap(r->start, 1846 MPSC_SDMA_INTR_REG_BLOCK_SIZE); 1847 mpsc_shared_regs.sdma_intr_base_p = r->start; 1848 } else { 1849 iounmap(mpsc_shared_regs.mpsc_routing_base); 1850 release_mem_region(mpsc_shared_regs.mpsc_routing_base_p, 1851 MPSC_ROUTING_REG_BLOCK_SIZE); 1852 mpsc_resource_err("SDMA intr base"); 1853 return -ENOMEM; 1854 } 1855 1856 return 0; 1857} 1858 1859static void mpsc_shared_unmap_regs(void) 1860{ 1861 if (!mpsc_shared_regs.mpsc_routing_base) { 1862 iounmap(mpsc_shared_regs.mpsc_routing_base); 1863 release_mem_region(mpsc_shared_regs.mpsc_routing_base_p, 1864 MPSC_ROUTING_REG_BLOCK_SIZE); 1865 } 1866 if (!mpsc_shared_regs.sdma_intr_base) { 1867 iounmap(mpsc_shared_regs.sdma_intr_base); 1868 release_mem_region(mpsc_shared_regs.sdma_intr_base_p, 1869 MPSC_SDMA_INTR_REG_BLOCK_SIZE); 1870 } 1871 1872 mpsc_shared_regs.mpsc_routing_base = NULL; 1873 mpsc_shared_regs.sdma_intr_base = NULL; 1874 1875 mpsc_shared_regs.mpsc_routing_base_p = 0; 1876 mpsc_shared_regs.sdma_intr_base_p = 0; 1877} 1878 1879static int mpsc_shared_drv_probe(struct platform_device *dev) 1880{ 1881 struct mpsc_shared_pdata *pdata; 1882 int rc = -ENODEV; 1883 1884 if (dev->id == 0) { 1885 if (!(rc = mpsc_shared_map_regs(dev))) { 1886 pdata = (struct mpsc_shared_pdata *) 1887 dev->dev.platform_data; 1888 1889 mpsc_shared_regs.MPSC_MRR_m = pdata->mrr_val; 1890 mpsc_shared_regs.MPSC_RCRR_m= pdata->rcrr_val; 1891 mpsc_shared_regs.MPSC_TCRR_m= pdata->tcrr_val; 1892 mpsc_shared_regs.SDMA_INTR_CAUSE_m = 1893 pdata->intr_cause_val; 1894 mpsc_shared_regs.SDMA_INTR_MASK_m = 1895 pdata->intr_mask_val; 1896 1897 rc = 0; 1898 } 1899 } 1900 1901 return rc; 1902} 1903 1904static int mpsc_shared_drv_remove(struct platform_device *dev) 1905{ 1906 int rc = -ENODEV; 1907 1908 if (dev->id == 0) { 1909 mpsc_shared_unmap_regs(); 1910 mpsc_shared_regs.MPSC_MRR_m = 0; 1911 mpsc_shared_regs.MPSC_RCRR_m = 0; 1912 mpsc_shared_regs.MPSC_TCRR_m = 0; 1913 mpsc_shared_regs.SDMA_INTR_CAUSE_m = 0; 1914 mpsc_shared_regs.SDMA_INTR_MASK_m = 0; 1915 rc = 0; 1916 } 1917 1918 return rc; 1919} 1920 1921static struct platform_driver mpsc_shared_driver = { 1922 .probe = mpsc_shared_drv_probe, 1923 .remove = mpsc_shared_drv_remove, 1924 .driver = { 1925 .name = MPSC_SHARED_NAME, 1926 }, 1927}; 1928 1929/* 1930 ****************************************************************************** 1931 * 1932 * Driver Interface Routines 1933 * 1934 ****************************************************************************** 1935 */ 1936static struct uart_driver mpsc_reg = { 1937 .owner = THIS_MODULE, 1938 .driver_name = MPSC_DRIVER_NAME, 1939 .dev_name = MPSC_DEV_NAME, 1940 .major = MPSC_MAJOR, 1941 .minor = MPSC_MINOR_START, 1942 .nr = MPSC_NUM_CTLRS, 1943 .cons = MPSC_CONSOLE, 1944}; 1945 1946static int mpsc_drv_map_regs(struct mpsc_port_info *pi, 1947 struct platform_device *pd) 1948{ 1949 struct resource *r; 1950 1951 if ((r = platform_get_resource(pd, IORESOURCE_MEM, MPSC_BASE_ORDER)) 1952 && request_mem_region(r->start, MPSC_REG_BLOCK_SIZE, 1953 "mpsc_regs")) { 1954 pi->mpsc_base = ioremap(r->start, MPSC_REG_BLOCK_SIZE); 1955 pi->mpsc_base_p = r->start; 1956 } else { 1957 mpsc_resource_err("MPSC base"); 1958 goto err; 1959 } 1960 1961 if ((r = platform_get_resource(pd, IORESOURCE_MEM, 1962 MPSC_SDMA_BASE_ORDER)) 1963 && request_mem_region(r->start, 1964 MPSC_SDMA_REG_BLOCK_SIZE, "sdma_regs")) { 1965 pi->sdma_base = ioremap(r->start,MPSC_SDMA_REG_BLOCK_SIZE); 1966 pi->sdma_base_p = r->start; 1967 } else { 1968 mpsc_resource_err("SDMA base"); 1969 if (pi->mpsc_base) { 1970 iounmap(pi->mpsc_base); 1971 pi->mpsc_base = NULL; 1972 } 1973 goto err; 1974 } 1975 1976 if ((r = platform_get_resource(pd,IORESOURCE_MEM,MPSC_BRG_BASE_ORDER)) 1977 && request_mem_region(r->start, 1978 MPSC_BRG_REG_BLOCK_SIZE, "brg_regs")) { 1979 pi->brg_base = ioremap(r->start, MPSC_BRG_REG_BLOCK_SIZE); 1980 pi->brg_base_p = r->start; 1981 } else { 1982 mpsc_resource_err("BRG base"); 1983 if (pi->mpsc_base) { 1984 iounmap(pi->mpsc_base); 1985 pi->mpsc_base = NULL; 1986 } 1987 if (pi->sdma_base) { 1988 iounmap(pi->sdma_base); 1989 pi->sdma_base = NULL; 1990 } 1991 goto err; 1992 } 1993 return 0; 1994 1995err: 1996 return -ENOMEM; 1997} 1998 1999static void mpsc_drv_unmap_regs(struct mpsc_port_info *pi) 2000{ 2001 if (!pi->mpsc_base) { 2002 iounmap(pi->mpsc_base); 2003 release_mem_region(pi->mpsc_base_p, MPSC_REG_BLOCK_SIZE); 2004 } 2005 if (!pi->sdma_base) { 2006 iounmap(pi->sdma_base); 2007 release_mem_region(pi->sdma_base_p, MPSC_SDMA_REG_BLOCK_SIZE); 2008 } 2009 if (!pi->brg_base) { 2010 iounmap(pi->brg_base); 2011 release_mem_region(pi->brg_base_p, MPSC_BRG_REG_BLOCK_SIZE); 2012 } 2013 2014 pi->mpsc_base = NULL; 2015 pi->sdma_base = NULL; 2016 pi->brg_base = NULL; 2017 2018 pi->mpsc_base_p = 0; 2019 pi->sdma_base_p = 0; 2020 pi->brg_base_p = 0; 2021} 2022 2023static void mpsc_drv_get_platform_data(struct mpsc_port_info *pi, 2024 struct platform_device *pd, int num) 2025{ 2026 struct mpsc_pdata *pdata; 2027 2028 pdata = (struct mpsc_pdata *)pd->dev.platform_data; 2029 2030 pi->port.uartclk = pdata->brg_clk_freq; 2031 pi->port.iotype = UPIO_MEM; 2032 pi->port.line = num; 2033 pi->port.type = PORT_MPSC; 2034 pi->port.fifosize = MPSC_TXBE_SIZE; 2035 pi->port.membase = pi->mpsc_base; 2036 pi->port.mapbase = (ulong)pi->mpsc_base; 2037 pi->port.ops = &mpsc_pops; 2038 2039 pi->mirror_regs = pdata->mirror_regs; 2040 pi->cache_mgmt = pdata->cache_mgmt; 2041 pi->brg_can_tune = pdata->brg_can_tune; 2042 pi->brg_clk_src = pdata->brg_clk_src; 2043 pi->mpsc_max_idle = pdata->max_idle; 2044 pi->default_baud = pdata->default_baud; 2045 pi->default_bits = pdata->default_bits; 2046 pi->default_parity = pdata->default_parity; 2047 pi->default_flow = pdata->default_flow; 2048 2049 /* Initial values of mirrored regs */ 2050 pi->MPSC_CHR_1_m = pdata->chr_1_val; 2051 pi->MPSC_CHR_2_m = pdata->chr_2_val; 2052 pi->MPSC_CHR_10_m = pdata->chr_10_val; 2053 pi->MPSC_MPCR_m = pdata->mpcr_val; 2054 pi->BRG_BCR_m = pdata->bcr_val; 2055 2056 pi->shared_regs = &mpsc_shared_regs; 2057 2058 pi->port.irq = platform_get_irq(pd, 0); 2059} 2060 2061static int mpsc_drv_probe(struct platform_device *dev) 2062{ 2063 struct mpsc_port_info *pi; 2064 int rc = -ENODEV; 2065 2066 pr_debug("mpsc_drv_probe: Adding MPSC %d\n", dev->id); 2067 2068 if (dev->id < MPSC_NUM_CTLRS) { 2069 pi = &mpsc_ports[dev->id]; 2070 2071 if (!(rc = mpsc_drv_map_regs(pi, dev))) { 2072 mpsc_drv_get_platform_data(pi, dev, dev->id); 2073 2074 if (!(rc = mpsc_make_ready(pi))) { 2075 spin_lock_init(&pi->tx_lock); 2076 if (!(rc = uart_add_one_port(&mpsc_reg, 2077 &pi->port))) { 2078 rc = 0; 2079 } else { 2080 mpsc_release_port((struct uart_port *) 2081 pi); 2082 mpsc_drv_unmap_regs(pi); 2083 } 2084 } else { 2085 mpsc_drv_unmap_regs(pi); 2086 } 2087 } 2088 } 2089 2090 return rc; 2091} 2092 2093static int mpsc_drv_remove(struct platform_device *dev) 2094{ 2095 pr_debug("mpsc_drv_exit: Removing MPSC %d\n", dev->id); 2096 2097 if (dev->id < MPSC_NUM_CTLRS) { 2098 uart_remove_one_port(&mpsc_reg, &mpsc_ports[dev->id].port); 2099 mpsc_release_port((struct uart_port *) 2100 &mpsc_ports[dev->id].port); 2101 mpsc_drv_unmap_regs(&mpsc_ports[dev->id]); 2102 return 0; 2103 } else { 2104 return -ENODEV; 2105 } 2106} 2107 2108static struct platform_driver mpsc_driver = { 2109 .probe = mpsc_drv_probe, 2110 .remove = mpsc_drv_remove, 2111 .driver = { 2112 .name = MPSC_CTLR_NAME, 2113 .owner = THIS_MODULE, 2114 }, 2115}; 2116 2117static int __init mpsc_drv_init(void) 2118{ 2119 int rc; 2120 2121 printk(KERN_INFO "Serial: MPSC driver\n"); 2122 2123 memset(mpsc_ports, 0, sizeof(mpsc_ports)); 2124 memset(&mpsc_shared_regs, 0, sizeof(mpsc_shared_regs)); 2125 2126 if (!(rc = uart_register_driver(&mpsc_reg))) { 2127 if (!(rc = platform_driver_register(&mpsc_shared_driver))) { 2128 if ((rc = platform_driver_register(&mpsc_driver))) { 2129 platform_driver_unregister(&mpsc_shared_driver); 2130 uart_unregister_driver(&mpsc_reg); 2131 } 2132 } else { 2133 uart_unregister_driver(&mpsc_reg); 2134 } 2135 } 2136 2137 return rc; 2138} 2139 2140static void __exit mpsc_drv_exit(void) 2141{ 2142 platform_driver_unregister(&mpsc_driver); 2143 platform_driver_unregister(&mpsc_shared_driver); 2144 uart_unregister_driver(&mpsc_reg); 2145 memset(mpsc_ports, 0, sizeof(mpsc_ports)); 2146 memset(&mpsc_shared_regs, 0, sizeof(mpsc_shared_regs)); 2147} 2148 2149module_init(mpsc_drv_init); 2150module_exit(mpsc_drv_exit); 2151 2152MODULE_AUTHOR("Mark A. Greer <mgreer@mvista.com>"); 2153MODULE_DESCRIPTION("Generic Marvell MPSC serial/UART driver"); 2154MODULE_VERSION(MPSC_VERSION); 2155MODULE_LICENSE("GPL"); 2156MODULE_ALIAS_CHARDEV_MAJOR(MPSC_MAJOR); 2157MODULE_ALIAS("platform:" MPSC_CTLR_NAME);