at v2.6.24-rc2 2008 lines 53 kB view raw
1/* 2 * Generic driver for the MPSC (UART mode) on Marvell parts (e.g., GT64240, 3 * GT64260, MV64340, MV64360, GT96100, ... ). 4 * 5 * Author: Mark A. Greer <mgreer@mvista.com> 6 * 7 * Based on an old MPSC driver that was in the linuxppc tree. It appears to 8 * have been created by Chris Zankel (formerly of MontaVista) but there 9 * is no proper Copyright so I'm not sure. Apparently, parts were also 10 * taken from PPCBoot (now U-Boot). Also based on drivers/serial/8250.c 11 * by Russell King. 12 * 13 * 2004 (c) MontaVista, Software, Inc. This file is licensed under 14 * the terms of the GNU General Public License version 2. This program 15 * is licensed "as is" without any warranty of any kind, whether express 16 * or implied. 17 */ 18/* 19 * The MPSC interface is much like a typical network controller's interface. 20 * That is, you set up separate rings of descriptors for transmitting and 21 * receiving data. There is also a pool of buffers with (one buffer per 22 * descriptor) that incoming data are dma'd into or outgoing data are dma'd 23 * out of. 24 * 25 * The MPSC requires two other controllers to be able to work. The Baud Rate 26 * Generator (BRG) provides a clock at programmable frequencies which determines 27 * the baud rate. The Serial DMA Controller (SDMA) takes incoming data from the 28 * MPSC and DMA's it into memory or DMA's outgoing data and passes it to the 29 * MPSC. It is actually the SDMA interrupt that the driver uses to keep the 30 * transmit and receive "engines" going (i.e., indicate data has been 31 * transmitted or received). 32 * 33 * NOTES: 34 * 35 * 1) Some chips have an erratum where several regs cannot be 36 * read. To work around that, we keep a local copy of those regs in 37 * 'mpsc_port_info'. 38 * 39 * 2) Some chips have an erratum where the ctlr will hang when the SDMA ctlr 40 * accesses system mem with coherency enabled. For that reason, the driver 41 * assumes that coherency for that ctlr has been disabled. This means 42 * that when in a cache coherent system, the driver has to manually manage 43 * the data cache on the areas that it touches because the dma_* macro are 44 * basically no-ops. 45 * 46 * 3) There is an erratum (on PPC) where you can't use the instruction to do 47 * a DMA_TO_DEVICE/cache clean so DMA_BIDIRECTIONAL/flushes are used in places 48 * where a DMA_TO_DEVICE/clean would have [otherwise] sufficed. 49 * 50 * 4) AFAICT, hardware flow control isn't supported by the controller --MAG. 51 */ 52 53 54#if defined(CONFIG_SERIAL_MPSC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) 55#define SUPPORT_SYSRQ 56#endif 57 58#include <linux/module.h> 59#include <linux/moduleparam.h> 60#include <linux/tty.h> 61#include <linux/tty_flip.h> 62#include <linux/ioport.h> 63#include <linux/init.h> 64#include <linux/console.h> 65#include <linux/sysrq.h> 66#include <linux/serial.h> 67#include <linux/serial_core.h> 68#include <linux/delay.h> 69#include <linux/device.h> 70#include <linux/dma-mapping.h> 71#include <linux/mv643xx.h> 72#include <linux/platform_device.h> 73 74#include <asm/io.h> 75#include <asm/irq.h> 76 77#define MPSC_NUM_CTLRS 2 78 79/* 80 * Descriptors and buffers must be cache line aligned. 81 * Buffers lengths must be multiple of cache line size. 82 * Number of Tx & Rx descriptors must be powers of 2. 83 */ 84#define MPSC_RXR_ENTRIES 32 85#define MPSC_RXRE_SIZE dma_get_cache_alignment() 86#define MPSC_RXR_SIZE (MPSC_RXR_ENTRIES * MPSC_RXRE_SIZE) 87#define MPSC_RXBE_SIZE dma_get_cache_alignment() 88#define MPSC_RXB_SIZE (MPSC_RXR_ENTRIES * MPSC_RXBE_SIZE) 89 90#define MPSC_TXR_ENTRIES 32 91#define MPSC_TXRE_SIZE dma_get_cache_alignment() 92#define MPSC_TXR_SIZE (MPSC_TXR_ENTRIES * MPSC_TXRE_SIZE) 93#define MPSC_TXBE_SIZE dma_get_cache_alignment() 94#define MPSC_TXB_SIZE (MPSC_TXR_ENTRIES * MPSC_TXBE_SIZE) 95 96#define MPSC_DMA_ALLOC_SIZE (MPSC_RXR_SIZE + MPSC_RXB_SIZE + MPSC_TXR_SIZE \ 97 + MPSC_TXB_SIZE + dma_get_cache_alignment() /* for alignment */) 98 99/* Rx and Tx Ring entry descriptors -- assume entry size is <= cacheline size */ 100struct mpsc_rx_desc { 101 u16 bufsize; 102 u16 bytecnt; 103 u32 cmdstat; 104 u32 link; 105 u32 buf_ptr; 106} __attribute((packed)); 107 108struct mpsc_tx_desc { 109 u16 bytecnt; 110 u16 shadow; 111 u32 cmdstat; 112 u32 link; 113 u32 buf_ptr; 114} __attribute((packed)); 115 116/* 117 * Some regs that have the erratum that you can't read them are are shared 118 * between the two MPSC controllers. This struct contains those shared regs. 119 */ 120struct mpsc_shared_regs { 121 phys_addr_t mpsc_routing_base_p; 122 phys_addr_t sdma_intr_base_p; 123 124 void __iomem *mpsc_routing_base; 125 void __iomem *sdma_intr_base; 126 127 u32 MPSC_MRR_m; 128 u32 MPSC_RCRR_m; 129 u32 MPSC_TCRR_m; 130 u32 SDMA_INTR_CAUSE_m; 131 u32 SDMA_INTR_MASK_m; 132}; 133 134/* The main driver data structure */ 135struct mpsc_port_info { 136 struct uart_port port; /* Overlay uart_port structure */ 137 138 /* Internal driver state for this ctlr */ 139 u8 ready; 140 u8 rcv_data; 141 tcflag_t c_iflag; /* save termios->c_iflag */ 142 tcflag_t c_cflag; /* save termios->c_cflag */ 143 144 /* Info passed in from platform */ 145 u8 mirror_regs; /* Need to mirror regs? */ 146 u8 cache_mgmt; /* Need manual cache mgmt? */ 147 u8 brg_can_tune; /* BRG has baud tuning? */ 148 u32 brg_clk_src; 149 u16 mpsc_max_idle; 150 int default_baud; 151 int default_bits; 152 int default_parity; 153 int default_flow; 154 155 /* Physical addresses of various blocks of registers (from platform) */ 156 phys_addr_t mpsc_base_p; 157 phys_addr_t sdma_base_p; 158 phys_addr_t brg_base_p; 159 160 /* Virtual addresses of various blocks of registers (from platform) */ 161 void __iomem *mpsc_base; 162 void __iomem *sdma_base; 163 void __iomem *brg_base; 164 165 /* Descriptor ring and buffer allocations */ 166 void *dma_region; 167 dma_addr_t dma_region_p; 168 169 dma_addr_t rxr; /* Rx descriptor ring */ 170 dma_addr_t rxr_p; /* Phys addr of rxr */ 171 u8 *rxb; /* Rx Ring I/O buf */ 172 u8 *rxb_p; /* Phys addr of rxb */ 173 u32 rxr_posn; /* First desc w/ Rx data */ 174 175 dma_addr_t txr; /* Tx descriptor ring */ 176 dma_addr_t txr_p; /* Phys addr of txr */ 177 u8 *txb; /* Tx Ring I/O buf */ 178 u8 *txb_p; /* Phys addr of txb */ 179 int txr_head; /* Where new data goes */ 180 int txr_tail; /* Where sent data comes off */ 181 spinlock_t tx_lock; /* transmit lock */ 182 183 /* Mirrored values of regs we can't read (if 'mirror_regs' set) */ 184 u32 MPSC_MPCR_m; 185 u32 MPSC_CHR_1_m; 186 u32 MPSC_CHR_2_m; 187 u32 MPSC_CHR_10_m; 188 u32 BRG_BCR_m; 189 struct mpsc_shared_regs *shared_regs; 190}; 191 192/* Hooks to platform-specific code */ 193int mpsc_platform_register_driver(void); 194void mpsc_platform_unregister_driver(void); 195 196/* Hooks back in to mpsc common to be called by platform-specific code */ 197struct mpsc_port_info *mpsc_device_probe(int index); 198struct mpsc_port_info *mpsc_device_remove(int index); 199 200/* Main MPSC Configuration Register Offsets */ 201#define MPSC_MMCRL 0x0000 202#define MPSC_MMCRH 0x0004 203#define MPSC_MPCR 0x0008 204#define MPSC_CHR_1 0x000c 205#define MPSC_CHR_2 0x0010 206#define MPSC_CHR_3 0x0014 207#define MPSC_CHR_4 0x0018 208#define MPSC_CHR_5 0x001c 209#define MPSC_CHR_6 0x0020 210#define MPSC_CHR_7 0x0024 211#define MPSC_CHR_8 0x0028 212#define MPSC_CHR_9 0x002c 213#define MPSC_CHR_10 0x0030 214#define MPSC_CHR_11 0x0034 215 216#define MPSC_MPCR_FRZ (1 << 9) 217#define MPSC_MPCR_CL_5 0 218#define MPSC_MPCR_CL_6 1 219#define MPSC_MPCR_CL_7 2 220#define MPSC_MPCR_CL_8 3 221#define MPSC_MPCR_SBL_1 0 222#define MPSC_MPCR_SBL_2 1 223 224#define MPSC_CHR_2_TEV (1<<1) 225#define MPSC_CHR_2_TA (1<<7) 226#define MPSC_CHR_2_TTCS (1<<9) 227#define MPSC_CHR_2_REV (1<<17) 228#define MPSC_CHR_2_RA (1<<23) 229#define MPSC_CHR_2_CRD (1<<25) 230#define MPSC_CHR_2_EH (1<<31) 231#define MPSC_CHR_2_PAR_ODD 0 232#define MPSC_CHR_2_PAR_SPACE 1 233#define MPSC_CHR_2_PAR_EVEN 2 234#define MPSC_CHR_2_PAR_MARK 3 235 236/* MPSC Signal Routing */ 237#define MPSC_MRR 0x0000 238#define MPSC_RCRR 0x0004 239#define MPSC_TCRR 0x0008 240 241/* Serial DMA Controller Interface Registers */ 242#define SDMA_SDC 0x0000 243#define SDMA_SDCM 0x0008 244#define SDMA_RX_DESC 0x0800 245#define SDMA_RX_BUF_PTR 0x0808 246#define SDMA_SCRDP 0x0810 247#define SDMA_TX_DESC 0x0c00 248#define SDMA_SCTDP 0x0c10 249#define SDMA_SFTDP 0x0c14 250 251#define SDMA_DESC_CMDSTAT_PE (1<<0) 252#define SDMA_DESC_CMDSTAT_CDL (1<<1) 253#define SDMA_DESC_CMDSTAT_FR (1<<3) 254#define SDMA_DESC_CMDSTAT_OR (1<<6) 255#define SDMA_DESC_CMDSTAT_BR (1<<9) 256#define SDMA_DESC_CMDSTAT_MI (1<<10) 257#define SDMA_DESC_CMDSTAT_A (1<<11) 258#define SDMA_DESC_CMDSTAT_AM (1<<12) 259#define SDMA_DESC_CMDSTAT_CT (1<<13) 260#define SDMA_DESC_CMDSTAT_C (1<<14) 261#define SDMA_DESC_CMDSTAT_ES (1<<15) 262#define SDMA_DESC_CMDSTAT_L (1<<16) 263#define SDMA_DESC_CMDSTAT_F (1<<17) 264#define SDMA_DESC_CMDSTAT_P (1<<18) 265#define SDMA_DESC_CMDSTAT_EI (1<<23) 266#define SDMA_DESC_CMDSTAT_O (1<<31) 267 268#define SDMA_DESC_DFLT (SDMA_DESC_CMDSTAT_O \ 269 | SDMA_DESC_CMDSTAT_EI) 270 271#define SDMA_SDC_RFT (1<<0) 272#define SDMA_SDC_SFM (1<<1) 273#define SDMA_SDC_BLMR (1<<6) 274#define SDMA_SDC_BLMT (1<<7) 275#define SDMA_SDC_POVR (1<<8) 276#define SDMA_SDC_RIFB (1<<9) 277 278#define SDMA_SDCM_ERD (1<<7) 279#define SDMA_SDCM_AR (1<<15) 280#define SDMA_SDCM_STD (1<<16) 281#define SDMA_SDCM_TXD (1<<23) 282#define SDMA_SDCM_AT (1<<31) 283 284#define SDMA_0_CAUSE_RXBUF (1<<0) 285#define SDMA_0_CAUSE_RXERR (1<<1) 286#define SDMA_0_CAUSE_TXBUF (1<<2) 287#define SDMA_0_CAUSE_TXEND (1<<3) 288#define SDMA_1_CAUSE_RXBUF (1<<8) 289#define SDMA_1_CAUSE_RXERR (1<<9) 290#define SDMA_1_CAUSE_TXBUF (1<<10) 291#define SDMA_1_CAUSE_TXEND (1<<11) 292 293#define SDMA_CAUSE_RX_MASK (SDMA_0_CAUSE_RXBUF | SDMA_0_CAUSE_RXERR \ 294 | SDMA_1_CAUSE_RXBUF | SDMA_1_CAUSE_RXERR) 295#define SDMA_CAUSE_TX_MASK (SDMA_0_CAUSE_TXBUF | SDMA_0_CAUSE_TXEND \ 296 | SDMA_1_CAUSE_TXBUF | SDMA_1_CAUSE_TXEND) 297 298/* SDMA Interrupt registers */ 299#define SDMA_INTR_CAUSE 0x0000 300#define SDMA_INTR_MASK 0x0080 301 302/* Baud Rate Generator Interface Registers */ 303#define BRG_BCR 0x0000 304#define BRG_BTR 0x0004 305 306/* 307 * Define how this driver is known to the outside (we've been assigned a 308 * range on the "Low-density serial ports" major). 309 */ 310#define MPSC_MAJOR 204 311#define MPSC_MINOR_START 44 312#define MPSC_DRIVER_NAME "MPSC" 313#define MPSC_DEV_NAME "ttyMM" 314#define MPSC_VERSION "1.00" 315 316static struct mpsc_port_info mpsc_ports[MPSC_NUM_CTLRS]; 317static struct mpsc_shared_regs mpsc_shared_regs; 318static struct uart_driver mpsc_reg; 319 320static void mpsc_start_rx(struct mpsc_port_info *pi); 321static void mpsc_free_ring_mem(struct mpsc_port_info *pi); 322static void mpsc_release_port(struct uart_port *port); 323/* 324 ****************************************************************************** 325 * 326 * Baud Rate Generator Routines (BRG) 327 * 328 ****************************************************************************** 329 */ 330static void mpsc_brg_init(struct mpsc_port_info *pi, u32 clk_src) 331{ 332 u32 v; 333 334 v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR); 335 v = (v & ~(0xf << 18)) | ((clk_src & 0xf) << 18); 336 337 if (pi->brg_can_tune) 338 v &= ~(1 << 25); 339 340 if (pi->mirror_regs) 341 pi->BRG_BCR_m = v; 342 writel(v, pi->brg_base + BRG_BCR); 343 344 writel(readl(pi->brg_base + BRG_BTR) & 0xffff0000, 345 pi->brg_base + BRG_BTR); 346} 347 348static void mpsc_brg_enable(struct mpsc_port_info *pi) 349{ 350 u32 v; 351 352 v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR); 353 v |= (1 << 16); 354 355 if (pi->mirror_regs) 356 pi->BRG_BCR_m = v; 357 writel(v, pi->brg_base + BRG_BCR); 358} 359 360static void mpsc_brg_disable(struct mpsc_port_info *pi) 361{ 362 u32 v; 363 364 v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR); 365 v &= ~(1 << 16); 366 367 if (pi->mirror_regs) 368 pi->BRG_BCR_m = v; 369 writel(v, pi->brg_base + BRG_BCR); 370} 371 372/* 373 * To set the baud, we adjust the CDV field in the BRG_BCR reg. 374 * From manual: Baud = clk / ((CDV+1)*2) ==> CDV = (clk / (baud*2)) - 1. 375 * However, the input clock is divided by 16 in the MPSC b/c of how 376 * 'MPSC_MMCRH' was set up so we have to divide the 'clk' used in our 377 * calculation by 16 to account for that. So the real calculation 378 * that accounts for the way the mpsc is set up is: 379 * CDV = (clk / (baud*2*16)) - 1 ==> CDV = (clk / (baud << 5)) - 1. 380 */ 381static void mpsc_set_baudrate(struct mpsc_port_info *pi, u32 baud) 382{ 383 u32 cdv = (pi->port.uartclk / (baud << 5)) - 1; 384 u32 v; 385 386 mpsc_brg_disable(pi); 387 v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR); 388 v = (v & 0xffff0000) | (cdv & 0xffff); 389 390 if (pi->mirror_regs) 391 pi->BRG_BCR_m = v; 392 writel(v, pi->brg_base + BRG_BCR); 393 mpsc_brg_enable(pi); 394} 395 396/* 397 ****************************************************************************** 398 * 399 * Serial DMA Routines (SDMA) 400 * 401 ****************************************************************************** 402 */ 403 404static void mpsc_sdma_burstsize(struct mpsc_port_info *pi, u32 burst_size) 405{ 406 u32 v; 407 408 pr_debug("mpsc_sdma_burstsize[%d]: burst_size: %d\n", 409 pi->port.line, burst_size); 410 411 burst_size >>= 3; /* Divide by 8 b/c reg values are 8-byte chunks */ 412 413 if (burst_size < 2) 414 v = 0x0; /* 1 64-bit word */ 415 else if (burst_size < 4) 416 v = 0x1; /* 2 64-bit words */ 417 else if (burst_size < 8) 418 v = 0x2; /* 4 64-bit words */ 419 else 420 v = 0x3; /* 8 64-bit words */ 421 422 writel((readl(pi->sdma_base + SDMA_SDC) & (0x3 << 12)) | (v << 12), 423 pi->sdma_base + SDMA_SDC); 424} 425 426static void mpsc_sdma_init(struct mpsc_port_info *pi, u32 burst_size) 427{ 428 pr_debug("mpsc_sdma_init[%d]: burst_size: %d\n", pi->port.line, 429 burst_size); 430 431 writel((readl(pi->sdma_base + SDMA_SDC) & 0x3ff) | 0x03f, 432 pi->sdma_base + SDMA_SDC); 433 mpsc_sdma_burstsize(pi, burst_size); 434} 435 436static u32 mpsc_sdma_intr_mask(struct mpsc_port_info *pi, u32 mask) 437{ 438 u32 old, v; 439 440 pr_debug("mpsc_sdma_intr_mask[%d]: mask: 0x%x\n", pi->port.line, mask); 441 442 old = v = (pi->mirror_regs) ? pi->shared_regs->SDMA_INTR_MASK_m : 443 readl(pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK); 444 445 mask &= 0xf; 446 if (pi->port.line) 447 mask <<= 8; 448 v &= ~mask; 449 450 if (pi->mirror_regs) 451 pi->shared_regs->SDMA_INTR_MASK_m = v; 452 writel(v, pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK); 453 454 if (pi->port.line) 455 old >>= 8; 456 return old & 0xf; 457} 458 459static void mpsc_sdma_intr_unmask(struct mpsc_port_info *pi, u32 mask) 460{ 461 u32 v; 462 463 pr_debug("mpsc_sdma_intr_unmask[%d]: mask: 0x%x\n", pi->port.line,mask); 464 465 v = (pi->mirror_regs) ? pi->shared_regs->SDMA_INTR_MASK_m 466 : readl(pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK); 467 468 mask &= 0xf; 469 if (pi->port.line) 470 mask <<= 8; 471 v |= mask; 472 473 if (pi->mirror_regs) 474 pi->shared_regs->SDMA_INTR_MASK_m = v; 475 writel(v, pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK); 476} 477 478static void mpsc_sdma_intr_ack(struct mpsc_port_info *pi) 479{ 480 pr_debug("mpsc_sdma_intr_ack[%d]: Acknowledging IRQ\n", pi->port.line); 481 482 if (pi->mirror_regs) 483 pi->shared_regs->SDMA_INTR_CAUSE_m = 0; 484 writeb(0x00, pi->shared_regs->sdma_intr_base + SDMA_INTR_CAUSE 485 + pi->port.line); 486} 487 488static void mpsc_sdma_set_rx_ring(struct mpsc_port_info *pi, 489 struct mpsc_rx_desc *rxre_p) 490{ 491 pr_debug("mpsc_sdma_set_rx_ring[%d]: rxre_p: 0x%x\n", 492 pi->port.line, (u32)rxre_p); 493 494 writel((u32)rxre_p, pi->sdma_base + SDMA_SCRDP); 495} 496 497static void mpsc_sdma_set_tx_ring(struct mpsc_port_info *pi, 498 struct mpsc_tx_desc *txre_p) 499{ 500 writel((u32)txre_p, pi->sdma_base + SDMA_SFTDP); 501 writel((u32)txre_p, pi->sdma_base + SDMA_SCTDP); 502} 503 504static void mpsc_sdma_cmd(struct mpsc_port_info *pi, u32 val) 505{ 506 u32 v; 507 508 v = readl(pi->sdma_base + SDMA_SDCM); 509 if (val) 510 v |= val; 511 else 512 v = 0; 513 wmb(); 514 writel(v, pi->sdma_base + SDMA_SDCM); 515 wmb(); 516} 517 518static uint mpsc_sdma_tx_active(struct mpsc_port_info *pi) 519{ 520 return readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_TXD; 521} 522 523static void mpsc_sdma_start_tx(struct mpsc_port_info *pi) 524{ 525 struct mpsc_tx_desc *txre, *txre_p; 526 527 /* If tx isn't running & there's a desc ready to go, start it */ 528 if (!mpsc_sdma_tx_active(pi)) { 529 txre = (struct mpsc_tx_desc *)(pi->txr 530 + (pi->txr_tail * MPSC_TXRE_SIZE)); 531 dma_cache_sync(pi->port.dev, (void *)txre, MPSC_TXRE_SIZE, 532 DMA_FROM_DEVICE); 533#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 534 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 535 invalidate_dcache_range((ulong)txre, 536 (ulong)txre + MPSC_TXRE_SIZE); 537#endif 538 539 if (be32_to_cpu(txre->cmdstat) & SDMA_DESC_CMDSTAT_O) { 540 txre_p = (struct mpsc_tx_desc *) 541 (pi->txr_p + (pi->txr_tail * MPSC_TXRE_SIZE)); 542 543 mpsc_sdma_set_tx_ring(pi, txre_p); 544 mpsc_sdma_cmd(pi, SDMA_SDCM_STD | SDMA_SDCM_TXD); 545 } 546 } 547} 548 549static void mpsc_sdma_stop(struct mpsc_port_info *pi) 550{ 551 pr_debug("mpsc_sdma_stop[%d]: Stopping SDMA\n", pi->port.line); 552 553 /* Abort any SDMA transfers */ 554 mpsc_sdma_cmd(pi, 0); 555 mpsc_sdma_cmd(pi, SDMA_SDCM_AR | SDMA_SDCM_AT); 556 557 /* Clear the SDMA current and first TX and RX pointers */ 558 mpsc_sdma_set_tx_ring(pi, NULL); 559 mpsc_sdma_set_rx_ring(pi, NULL); 560 561 /* Disable interrupts */ 562 mpsc_sdma_intr_mask(pi, 0xf); 563 mpsc_sdma_intr_ack(pi); 564} 565 566/* 567 ****************************************************************************** 568 * 569 * Multi-Protocol Serial Controller Routines (MPSC) 570 * 571 ****************************************************************************** 572 */ 573 574static void mpsc_hw_init(struct mpsc_port_info *pi) 575{ 576 u32 v; 577 578 pr_debug("mpsc_hw_init[%d]: Initializing hardware\n", pi->port.line); 579 580 /* Set up clock routing */ 581 if (pi->mirror_regs) { 582 v = pi->shared_regs->MPSC_MRR_m; 583 v &= ~0x1c7; 584 pi->shared_regs->MPSC_MRR_m = v; 585 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_MRR); 586 587 v = pi->shared_regs->MPSC_RCRR_m; 588 v = (v & ~0xf0f) | 0x100; 589 pi->shared_regs->MPSC_RCRR_m = v; 590 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_RCRR); 591 592 v = pi->shared_regs->MPSC_TCRR_m; 593 v = (v & ~0xf0f) | 0x100; 594 pi->shared_regs->MPSC_TCRR_m = v; 595 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_TCRR); 596 } else { 597 v = readl(pi->shared_regs->mpsc_routing_base + MPSC_MRR); 598 v &= ~0x1c7; 599 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_MRR); 600 601 v = readl(pi->shared_regs->mpsc_routing_base + MPSC_RCRR); 602 v = (v & ~0xf0f) | 0x100; 603 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_RCRR); 604 605 v = readl(pi->shared_regs->mpsc_routing_base + MPSC_TCRR); 606 v = (v & ~0xf0f) | 0x100; 607 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_TCRR); 608 } 609 610 /* Put MPSC in UART mode & enabel Tx/Rx egines */ 611 writel(0x000004c4, pi->mpsc_base + MPSC_MMCRL); 612 613 /* No preamble, 16x divider, low-latency, */ 614 writel(0x04400400, pi->mpsc_base + MPSC_MMCRH); 615 616 if (pi->mirror_regs) { 617 pi->MPSC_CHR_1_m = 0; 618 pi->MPSC_CHR_2_m = 0; 619 } 620 writel(0, pi->mpsc_base + MPSC_CHR_1); 621 writel(0, pi->mpsc_base + MPSC_CHR_2); 622 writel(pi->mpsc_max_idle, pi->mpsc_base + MPSC_CHR_3); 623 writel(0, pi->mpsc_base + MPSC_CHR_4); 624 writel(0, pi->mpsc_base + MPSC_CHR_5); 625 writel(0, pi->mpsc_base + MPSC_CHR_6); 626 writel(0, pi->mpsc_base + MPSC_CHR_7); 627 writel(0, pi->mpsc_base + MPSC_CHR_8); 628 writel(0, pi->mpsc_base + MPSC_CHR_9); 629 writel(0, pi->mpsc_base + MPSC_CHR_10); 630} 631 632static void mpsc_enter_hunt(struct mpsc_port_info *pi) 633{ 634 pr_debug("mpsc_enter_hunt[%d]: Hunting...\n", pi->port.line); 635 636 if (pi->mirror_regs) { 637 writel(pi->MPSC_CHR_2_m | MPSC_CHR_2_EH, 638 pi->mpsc_base + MPSC_CHR_2); 639 /* Erratum prevents reading CHR_2 so just delay for a while */ 640 udelay(100); 641 } else { 642 writel(readl(pi->mpsc_base + MPSC_CHR_2) | MPSC_CHR_2_EH, 643 pi->mpsc_base + MPSC_CHR_2); 644 645 while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_EH) 646 udelay(10); 647 } 648} 649 650static void mpsc_freeze(struct mpsc_port_info *pi) 651{ 652 u32 v; 653 654 pr_debug("mpsc_freeze[%d]: Freezing\n", pi->port.line); 655 656 v = (pi->mirror_regs) ? pi->MPSC_MPCR_m : 657 readl(pi->mpsc_base + MPSC_MPCR); 658 v |= MPSC_MPCR_FRZ; 659 660 if (pi->mirror_regs) 661 pi->MPSC_MPCR_m = v; 662 writel(v, pi->mpsc_base + MPSC_MPCR); 663} 664 665static void mpsc_unfreeze(struct mpsc_port_info *pi) 666{ 667 u32 v; 668 669 v = (pi->mirror_regs) ? pi->MPSC_MPCR_m : 670 readl(pi->mpsc_base + MPSC_MPCR); 671 v &= ~MPSC_MPCR_FRZ; 672 673 if (pi->mirror_regs) 674 pi->MPSC_MPCR_m = v; 675 writel(v, pi->mpsc_base + MPSC_MPCR); 676 677 pr_debug("mpsc_unfreeze[%d]: Unfrozen\n", pi->port.line); 678} 679 680static void mpsc_set_char_length(struct mpsc_port_info *pi, u32 len) 681{ 682 u32 v; 683 684 pr_debug("mpsc_set_char_length[%d]: char len: %d\n", pi->port.line,len); 685 686 v = (pi->mirror_regs) ? pi->MPSC_MPCR_m : 687 readl(pi->mpsc_base + MPSC_MPCR); 688 v = (v & ~(0x3 << 12)) | ((len & 0x3) << 12); 689 690 if (pi->mirror_regs) 691 pi->MPSC_MPCR_m = v; 692 writel(v, pi->mpsc_base + MPSC_MPCR); 693} 694 695static void mpsc_set_stop_bit_length(struct mpsc_port_info *pi, u32 len) 696{ 697 u32 v; 698 699 pr_debug("mpsc_set_stop_bit_length[%d]: stop bits: %d\n", 700 pi->port.line, len); 701 702 v = (pi->mirror_regs) ? pi->MPSC_MPCR_m : 703 readl(pi->mpsc_base + MPSC_MPCR); 704 705 v = (v & ~(1 << 14)) | ((len & 0x1) << 14); 706 707 if (pi->mirror_regs) 708 pi->MPSC_MPCR_m = v; 709 writel(v, pi->mpsc_base + MPSC_MPCR); 710} 711 712static void mpsc_set_parity(struct mpsc_port_info *pi, u32 p) 713{ 714 u32 v; 715 716 pr_debug("mpsc_set_parity[%d]: parity bits: 0x%x\n", pi->port.line, p); 717 718 v = (pi->mirror_regs) ? pi->MPSC_CHR_2_m : 719 readl(pi->mpsc_base + MPSC_CHR_2); 720 721 p &= 0x3; 722 v = (v & ~0xc000c) | (p << 18) | (p << 2); 723 724 if (pi->mirror_regs) 725 pi->MPSC_CHR_2_m = v; 726 writel(v, pi->mpsc_base + MPSC_CHR_2); 727} 728 729/* 730 ****************************************************************************** 731 * 732 * Driver Init Routines 733 * 734 ****************************************************************************** 735 */ 736 737static void mpsc_init_hw(struct mpsc_port_info *pi) 738{ 739 pr_debug("mpsc_init_hw[%d]: Initializing\n", pi->port.line); 740 741 mpsc_brg_init(pi, pi->brg_clk_src); 742 mpsc_brg_enable(pi); 743 mpsc_sdma_init(pi, dma_get_cache_alignment()); /* burst a cacheline */ 744 mpsc_sdma_stop(pi); 745 mpsc_hw_init(pi); 746} 747 748static int mpsc_alloc_ring_mem(struct mpsc_port_info *pi) 749{ 750 int rc = 0; 751 752 pr_debug("mpsc_alloc_ring_mem[%d]: Allocating ring mem\n", 753 pi->port.line); 754 755 if (!pi->dma_region) { 756 if (!dma_supported(pi->port.dev, 0xffffffff)) { 757 printk(KERN_ERR "MPSC: Inadequate DMA support\n"); 758 rc = -ENXIO; 759 } else if ((pi->dma_region = dma_alloc_noncoherent(pi->port.dev, 760 MPSC_DMA_ALLOC_SIZE, 761 &pi->dma_region_p, GFP_KERNEL)) 762 == NULL) { 763 printk(KERN_ERR "MPSC: Can't alloc Desc region\n"); 764 rc = -ENOMEM; 765 } 766 } 767 768 return rc; 769} 770 771static void mpsc_free_ring_mem(struct mpsc_port_info *pi) 772{ 773 pr_debug("mpsc_free_ring_mem[%d]: Freeing ring mem\n", pi->port.line); 774 775 if (pi->dma_region) { 776 dma_free_noncoherent(pi->port.dev, MPSC_DMA_ALLOC_SIZE, 777 pi->dma_region, pi->dma_region_p); 778 pi->dma_region = NULL; 779 pi->dma_region_p = (dma_addr_t)NULL; 780 } 781} 782 783static void mpsc_init_rings(struct mpsc_port_info *pi) 784{ 785 struct mpsc_rx_desc *rxre; 786 struct mpsc_tx_desc *txre; 787 dma_addr_t dp, dp_p; 788 u8 *bp, *bp_p; 789 int i; 790 791 pr_debug("mpsc_init_rings[%d]: Initializing rings\n", pi->port.line); 792 793 BUG_ON(pi->dma_region == NULL); 794 795 memset(pi->dma_region, 0, MPSC_DMA_ALLOC_SIZE); 796 797 /* 798 * Descriptors & buffers are multiples of cacheline size and must be 799 * cacheline aligned. 800 */ 801 dp = ALIGN((u32)pi->dma_region, dma_get_cache_alignment()); 802 dp_p = ALIGN((u32)pi->dma_region_p, dma_get_cache_alignment()); 803 804 /* 805 * Partition dma region into rx ring descriptor, rx buffers, 806 * tx ring descriptors, and tx buffers. 807 */ 808 pi->rxr = dp; 809 pi->rxr_p = dp_p; 810 dp += MPSC_RXR_SIZE; 811 dp_p += MPSC_RXR_SIZE; 812 813 pi->rxb = (u8 *)dp; 814 pi->rxb_p = (u8 *)dp_p; 815 dp += MPSC_RXB_SIZE; 816 dp_p += MPSC_RXB_SIZE; 817 818 pi->rxr_posn = 0; 819 820 pi->txr = dp; 821 pi->txr_p = dp_p; 822 dp += MPSC_TXR_SIZE; 823 dp_p += MPSC_TXR_SIZE; 824 825 pi->txb = (u8 *)dp; 826 pi->txb_p = (u8 *)dp_p; 827 828 pi->txr_head = 0; 829 pi->txr_tail = 0; 830 831 /* Init rx ring descriptors */ 832 dp = pi->rxr; 833 dp_p = pi->rxr_p; 834 bp = pi->rxb; 835 bp_p = pi->rxb_p; 836 837 for (i = 0; i < MPSC_RXR_ENTRIES; i++) { 838 rxre = (struct mpsc_rx_desc *)dp; 839 840 rxre->bufsize = cpu_to_be16(MPSC_RXBE_SIZE); 841 rxre->bytecnt = cpu_to_be16(0); 842 rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O 843 | SDMA_DESC_CMDSTAT_EI | SDMA_DESC_CMDSTAT_F 844 | SDMA_DESC_CMDSTAT_L); 845 rxre->link = cpu_to_be32(dp_p + MPSC_RXRE_SIZE); 846 rxre->buf_ptr = cpu_to_be32(bp_p); 847 848 dp += MPSC_RXRE_SIZE; 849 dp_p += MPSC_RXRE_SIZE; 850 bp += MPSC_RXBE_SIZE; 851 bp_p += MPSC_RXBE_SIZE; 852 } 853 rxre->link = cpu_to_be32(pi->rxr_p); /* Wrap last back to first */ 854 855 /* Init tx ring descriptors */ 856 dp = pi->txr; 857 dp_p = pi->txr_p; 858 bp = pi->txb; 859 bp_p = pi->txb_p; 860 861 for (i = 0; i < MPSC_TXR_ENTRIES; i++) { 862 txre = (struct mpsc_tx_desc *)dp; 863 864 txre->link = cpu_to_be32(dp_p + MPSC_TXRE_SIZE); 865 txre->buf_ptr = cpu_to_be32(bp_p); 866 867 dp += MPSC_TXRE_SIZE; 868 dp_p += MPSC_TXRE_SIZE; 869 bp += MPSC_TXBE_SIZE; 870 bp_p += MPSC_TXBE_SIZE; 871 } 872 txre->link = cpu_to_be32(pi->txr_p); /* Wrap last back to first */ 873 874 dma_cache_sync(pi->port.dev, (void *)pi->dma_region, 875 MPSC_DMA_ALLOC_SIZE, DMA_BIDIRECTIONAL); 876#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 877 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 878 flush_dcache_range((ulong)pi->dma_region, 879 (ulong)pi->dma_region 880 + MPSC_DMA_ALLOC_SIZE); 881#endif 882 883 return; 884} 885 886static void mpsc_uninit_rings(struct mpsc_port_info *pi) 887{ 888 pr_debug("mpsc_uninit_rings[%d]: Uninitializing rings\n",pi->port.line); 889 890 BUG_ON(pi->dma_region == NULL); 891 892 pi->rxr = 0; 893 pi->rxr_p = 0; 894 pi->rxb = NULL; 895 pi->rxb_p = NULL; 896 pi->rxr_posn = 0; 897 898 pi->txr = 0; 899 pi->txr_p = 0; 900 pi->txb = NULL; 901 pi->txb_p = NULL; 902 pi->txr_head = 0; 903 pi->txr_tail = 0; 904} 905 906static int mpsc_make_ready(struct mpsc_port_info *pi) 907{ 908 int rc; 909 910 pr_debug("mpsc_make_ready[%d]: Making cltr ready\n", pi->port.line); 911 912 if (!pi->ready) { 913 mpsc_init_hw(pi); 914 if ((rc = mpsc_alloc_ring_mem(pi))) 915 return rc; 916 mpsc_init_rings(pi); 917 pi->ready = 1; 918 } 919 920 return 0; 921} 922 923/* 924 ****************************************************************************** 925 * 926 * Interrupt Handling Routines 927 * 928 ****************************************************************************** 929 */ 930 931static int mpsc_rx_intr(struct mpsc_port_info *pi) 932{ 933 struct mpsc_rx_desc *rxre; 934 struct tty_struct *tty = pi->port.info->tty; 935 u32 cmdstat, bytes_in, i; 936 int rc = 0; 937 u8 *bp; 938 char flag = TTY_NORMAL; 939 940 pr_debug("mpsc_rx_intr[%d]: Handling Rx intr\n", pi->port.line); 941 942 rxre = (struct mpsc_rx_desc *)(pi->rxr + (pi->rxr_posn*MPSC_RXRE_SIZE)); 943 944 dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, 945 DMA_FROM_DEVICE); 946#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 947 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 948 invalidate_dcache_range((ulong)rxre, 949 (ulong)rxre + MPSC_RXRE_SIZE); 950#endif 951 952 /* 953 * Loop through Rx descriptors handling ones that have been completed. 954 */ 955 while (!((cmdstat = be32_to_cpu(rxre->cmdstat)) 956 & SDMA_DESC_CMDSTAT_O)) { 957 bytes_in = be16_to_cpu(rxre->bytecnt); 958 959 /* Following use of tty struct directly is deprecated */ 960 if (unlikely(tty_buffer_request_room(tty, bytes_in) 961 < bytes_in)) { 962 if (tty->low_latency) 963 tty_flip_buffer_push(tty); 964 /* 965 * If this failed then we will throw away the bytes 966 * but must do so to clear interrupts. 967 */ 968 } 969 970 bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE); 971 dma_cache_sync(pi->port.dev, (void *)bp, MPSC_RXBE_SIZE, 972 DMA_FROM_DEVICE); 973#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 974 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 975 invalidate_dcache_range((ulong)bp, 976 (ulong)bp + MPSC_RXBE_SIZE); 977#endif 978 979 /* 980 * Other than for parity error, the manual provides little 981 * info on what data will be in a frame flagged by any of 982 * these errors. For parity error, it is the last byte in 983 * the buffer that had the error. As for the rest, I guess 984 * we'll assume there is no data in the buffer. 985 * If there is...it gets lost. 986 */ 987 if (unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR 988 | SDMA_DESC_CMDSTAT_FR 989 | SDMA_DESC_CMDSTAT_OR))) { 990 991 pi->port.icount.rx++; 992 993 if (cmdstat & SDMA_DESC_CMDSTAT_BR) { /* Break */ 994 pi->port.icount.brk++; 995 996 if (uart_handle_break(&pi->port)) 997 goto next_frame; 998 } else if (cmdstat & SDMA_DESC_CMDSTAT_FR) { 999 pi->port.icount.frame++; 1000 } else if (cmdstat & SDMA_DESC_CMDSTAT_OR) { 1001 pi->port.icount.overrun++; 1002 } 1003 1004 cmdstat &= pi->port.read_status_mask; 1005 1006 if (cmdstat & SDMA_DESC_CMDSTAT_BR) 1007 flag = TTY_BREAK; 1008 else if (cmdstat & SDMA_DESC_CMDSTAT_FR) 1009 flag = TTY_FRAME; 1010 else if (cmdstat & SDMA_DESC_CMDSTAT_OR) 1011 flag = TTY_OVERRUN; 1012 else if (cmdstat & SDMA_DESC_CMDSTAT_PE) 1013 flag = TTY_PARITY; 1014 } 1015 1016 if (uart_handle_sysrq_char(&pi->port, *bp)) { 1017 bp++; 1018 bytes_in--; 1019 goto next_frame; 1020 } 1021 1022 if ((unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR 1023 | SDMA_DESC_CMDSTAT_FR 1024 | SDMA_DESC_CMDSTAT_OR))) 1025 && !(cmdstat & pi->port.ignore_status_mask)) { 1026 tty_insert_flip_char(tty, *bp, flag); 1027 } else { 1028 for (i=0; i<bytes_in; i++) 1029 tty_insert_flip_char(tty, *bp++, TTY_NORMAL); 1030 1031 pi->port.icount.rx += bytes_in; 1032 } 1033 1034next_frame: 1035 rxre->bytecnt = cpu_to_be16(0); 1036 wmb(); 1037 rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O 1038 | SDMA_DESC_CMDSTAT_EI | SDMA_DESC_CMDSTAT_F 1039 | SDMA_DESC_CMDSTAT_L); 1040 wmb(); 1041 dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, 1042 DMA_BIDIRECTIONAL); 1043#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1044 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1045 flush_dcache_range((ulong)rxre, 1046 (ulong)rxre + MPSC_RXRE_SIZE); 1047#endif 1048 1049 /* Advance to next descriptor */ 1050 pi->rxr_posn = (pi->rxr_posn + 1) & (MPSC_RXR_ENTRIES - 1); 1051 rxre = (struct mpsc_rx_desc *) 1052 (pi->rxr + (pi->rxr_posn * MPSC_RXRE_SIZE)); 1053 dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, 1054 DMA_FROM_DEVICE); 1055#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1056 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1057 invalidate_dcache_range((ulong)rxre, 1058 (ulong)rxre + MPSC_RXRE_SIZE); 1059#endif 1060 rc = 1; 1061 } 1062 1063 /* Restart rx engine, if its stopped */ 1064 if ((readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_ERD) == 0) 1065 mpsc_start_rx(pi); 1066 1067 tty_flip_buffer_push(tty); 1068 return rc; 1069} 1070 1071static void mpsc_setup_tx_desc(struct mpsc_port_info *pi, u32 count, u32 intr) 1072{ 1073 struct mpsc_tx_desc *txre; 1074 1075 txre = (struct mpsc_tx_desc *)(pi->txr 1076 + (pi->txr_head * MPSC_TXRE_SIZE)); 1077 1078 txre->bytecnt = cpu_to_be16(count); 1079 txre->shadow = txre->bytecnt; 1080 wmb(); /* ensure cmdstat is last field updated */ 1081 txre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O | SDMA_DESC_CMDSTAT_F 1082 | SDMA_DESC_CMDSTAT_L 1083 | ((intr) ? SDMA_DESC_CMDSTAT_EI : 0)); 1084 wmb(); 1085 dma_cache_sync(pi->port.dev, (void *)txre, MPSC_TXRE_SIZE, 1086 DMA_BIDIRECTIONAL); 1087#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1088 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1089 flush_dcache_range((ulong)txre, 1090 (ulong)txre + MPSC_TXRE_SIZE); 1091#endif 1092} 1093 1094static void mpsc_copy_tx_data(struct mpsc_port_info *pi) 1095{ 1096 struct circ_buf *xmit = &pi->port.info->xmit; 1097 u8 *bp; 1098 u32 i; 1099 1100 /* Make sure the desc ring isn't full */ 1101 while (CIRC_CNT(pi->txr_head, pi->txr_tail, MPSC_TXR_ENTRIES) 1102 < (MPSC_TXR_ENTRIES - 1)) { 1103 if (pi->port.x_char) { 1104 /* 1105 * Ideally, we should use the TCS field in 1106 * CHR_1 to put the x_char out immediately but 1107 * errata prevents us from being able to read 1108 * CHR_2 to know that its safe to write to 1109 * CHR_1. Instead, just put it in-band with 1110 * all the other Tx data. 1111 */ 1112 bp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE); 1113 *bp = pi->port.x_char; 1114 pi->port.x_char = 0; 1115 i = 1; 1116 } else if (!uart_circ_empty(xmit) 1117 && !uart_tx_stopped(&pi->port)) { 1118 i = min((u32)MPSC_TXBE_SIZE, 1119 (u32)uart_circ_chars_pending(xmit)); 1120 i = min(i, (u32)CIRC_CNT_TO_END(xmit->head, xmit->tail, 1121 UART_XMIT_SIZE)); 1122 bp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE); 1123 memcpy(bp, &xmit->buf[xmit->tail], i); 1124 xmit->tail = (xmit->tail + i) & (UART_XMIT_SIZE - 1); 1125 1126 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 1127 uart_write_wakeup(&pi->port); 1128 } else { /* All tx data copied into ring bufs */ 1129 return; 1130 } 1131 1132 dma_cache_sync(pi->port.dev, (void *)bp, MPSC_TXBE_SIZE, 1133 DMA_BIDIRECTIONAL); 1134#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1135 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1136 flush_dcache_range((ulong)bp, 1137 (ulong)bp + MPSC_TXBE_SIZE); 1138#endif 1139 mpsc_setup_tx_desc(pi, i, 1); 1140 1141 /* Advance to next descriptor */ 1142 pi->txr_head = (pi->txr_head + 1) & (MPSC_TXR_ENTRIES - 1); 1143 } 1144} 1145 1146static int mpsc_tx_intr(struct mpsc_port_info *pi) 1147{ 1148 struct mpsc_tx_desc *txre; 1149 int rc = 0; 1150 unsigned long iflags; 1151 1152 spin_lock_irqsave(&pi->tx_lock, iflags); 1153 1154 if (!mpsc_sdma_tx_active(pi)) { 1155 txre = (struct mpsc_tx_desc *)(pi->txr 1156 + (pi->txr_tail * MPSC_TXRE_SIZE)); 1157 1158 dma_cache_sync(pi->port.dev, (void *)txre, MPSC_TXRE_SIZE, 1159 DMA_FROM_DEVICE); 1160#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1161 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1162 invalidate_dcache_range((ulong)txre, 1163 (ulong)txre + MPSC_TXRE_SIZE); 1164#endif 1165 1166 while (!(be32_to_cpu(txre->cmdstat) & SDMA_DESC_CMDSTAT_O)) { 1167 rc = 1; 1168 pi->port.icount.tx += be16_to_cpu(txre->bytecnt); 1169 pi->txr_tail = (pi->txr_tail+1) & (MPSC_TXR_ENTRIES-1); 1170 1171 /* If no more data to tx, fall out of loop */ 1172 if (pi->txr_head == pi->txr_tail) 1173 break; 1174 1175 txre = (struct mpsc_tx_desc *)(pi->txr 1176 + (pi->txr_tail * MPSC_TXRE_SIZE)); 1177 dma_cache_sync(pi->port.dev, (void *)txre, 1178 MPSC_TXRE_SIZE, DMA_FROM_DEVICE); 1179#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1180 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1181 invalidate_dcache_range((ulong)txre, 1182 (ulong)txre + MPSC_TXRE_SIZE); 1183#endif 1184 } 1185 1186 mpsc_copy_tx_data(pi); 1187 mpsc_sdma_start_tx(pi); /* start next desc if ready */ 1188 } 1189 1190 spin_unlock_irqrestore(&pi->tx_lock, iflags); 1191 return rc; 1192} 1193 1194/* 1195 * This is the driver's interrupt handler. To avoid a race, we first clear 1196 * the interrupt, then handle any completed Rx/Tx descriptors. When done 1197 * handling those descriptors, we restart the Rx/Tx engines if they're stopped. 1198 */ 1199static irqreturn_t mpsc_sdma_intr(int irq, void *dev_id) 1200{ 1201 struct mpsc_port_info *pi = dev_id; 1202 ulong iflags; 1203 int rc = IRQ_NONE; 1204 1205 pr_debug("mpsc_sdma_intr[%d]: SDMA Interrupt Received\n",pi->port.line); 1206 1207 spin_lock_irqsave(&pi->port.lock, iflags); 1208 mpsc_sdma_intr_ack(pi); 1209 if (mpsc_rx_intr(pi)) 1210 rc = IRQ_HANDLED; 1211 if (mpsc_tx_intr(pi)) 1212 rc = IRQ_HANDLED; 1213 spin_unlock_irqrestore(&pi->port.lock, iflags); 1214 1215 pr_debug("mpsc_sdma_intr[%d]: SDMA Interrupt Handled\n", pi->port.line); 1216 return rc; 1217} 1218 1219/* 1220 ****************************************************************************** 1221 * 1222 * serial_core.c Interface routines 1223 * 1224 ****************************************************************************** 1225 */ 1226static uint mpsc_tx_empty(struct uart_port *port) 1227{ 1228 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1229 ulong iflags; 1230 uint rc; 1231 1232 spin_lock_irqsave(&pi->port.lock, iflags); 1233 rc = mpsc_sdma_tx_active(pi) ? 0 : TIOCSER_TEMT; 1234 spin_unlock_irqrestore(&pi->port.lock, iflags); 1235 1236 return rc; 1237} 1238 1239static void mpsc_set_mctrl(struct uart_port *port, uint mctrl) 1240{ 1241 /* Have no way to set modem control lines AFAICT */ 1242} 1243 1244static uint mpsc_get_mctrl(struct uart_port *port) 1245{ 1246 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1247 u32 mflags, status; 1248 1249 status = (pi->mirror_regs) ? pi->MPSC_CHR_10_m 1250 : readl(pi->mpsc_base + MPSC_CHR_10); 1251 1252 mflags = 0; 1253 if (status & 0x1) 1254 mflags |= TIOCM_CTS; 1255 if (status & 0x2) 1256 mflags |= TIOCM_CAR; 1257 1258 return mflags | TIOCM_DSR; /* No way to tell if DSR asserted */ 1259} 1260 1261static void mpsc_stop_tx(struct uart_port *port) 1262{ 1263 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1264 1265 pr_debug("mpsc_stop_tx[%d]\n", port->line); 1266 1267 mpsc_freeze(pi); 1268} 1269 1270static void mpsc_start_tx(struct uart_port *port) 1271{ 1272 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1273 unsigned long iflags; 1274 1275 spin_lock_irqsave(&pi->tx_lock, iflags); 1276 1277 mpsc_unfreeze(pi); 1278 mpsc_copy_tx_data(pi); 1279 mpsc_sdma_start_tx(pi); 1280 1281 spin_unlock_irqrestore(&pi->tx_lock, iflags); 1282 1283 pr_debug("mpsc_start_tx[%d]\n", port->line); 1284} 1285 1286static void mpsc_start_rx(struct mpsc_port_info *pi) 1287{ 1288 pr_debug("mpsc_start_rx[%d]: Starting...\n", pi->port.line); 1289 1290 if (pi->rcv_data) { 1291 mpsc_enter_hunt(pi); 1292 mpsc_sdma_cmd(pi, SDMA_SDCM_ERD); 1293 } 1294} 1295 1296static void mpsc_stop_rx(struct uart_port *port) 1297{ 1298 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1299 1300 pr_debug("mpsc_stop_rx[%d]: Stopping...\n", port->line); 1301 1302 if (pi->mirror_regs) { 1303 writel(pi->MPSC_CHR_2_m | MPSC_CHR_2_RA, 1304 pi->mpsc_base + MPSC_CHR_2); 1305 /* Erratum prevents reading CHR_2 so just delay for a while */ 1306 udelay(100); 1307 } else { 1308 writel(readl(pi->mpsc_base + MPSC_CHR_2) | MPSC_CHR_2_RA, 1309 pi->mpsc_base + MPSC_CHR_2); 1310 1311 while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_RA) 1312 udelay(10); 1313 } 1314 1315 mpsc_sdma_cmd(pi, SDMA_SDCM_AR); 1316} 1317 1318static void mpsc_enable_ms(struct uart_port *port) 1319{ 1320} 1321 1322static void mpsc_break_ctl(struct uart_port *port, int ctl) 1323{ 1324 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1325 ulong flags; 1326 u32 v; 1327 1328 v = ctl ? 0x00ff0000 : 0; 1329 1330 spin_lock_irqsave(&pi->port.lock, flags); 1331 if (pi->mirror_regs) 1332 pi->MPSC_CHR_1_m = v; 1333 writel(v, pi->mpsc_base + MPSC_CHR_1); 1334 spin_unlock_irqrestore(&pi->port.lock, flags); 1335} 1336 1337static int mpsc_startup(struct uart_port *port) 1338{ 1339 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1340 u32 flag = 0; 1341 int rc; 1342 1343 pr_debug("mpsc_startup[%d]: Starting up MPSC, irq: %d\n", 1344 port->line, pi->port.irq); 1345 1346 if ((rc = mpsc_make_ready(pi)) == 0) { 1347 /* Setup IRQ handler */ 1348 mpsc_sdma_intr_ack(pi); 1349 1350 /* If irq's are shared, need to set flag */ 1351 if (mpsc_ports[0].port.irq == mpsc_ports[1].port.irq) 1352 flag = IRQF_SHARED; 1353 1354 if (request_irq(pi->port.irq, mpsc_sdma_intr, flag, 1355 "mpsc-sdma", pi)) 1356 printk(KERN_ERR "MPSC: Can't get SDMA IRQ %d\n", 1357 pi->port.irq); 1358 1359 mpsc_sdma_intr_unmask(pi, 0xf); 1360 mpsc_sdma_set_rx_ring(pi, (struct mpsc_rx_desc *)(pi->rxr_p 1361 + (pi->rxr_posn * MPSC_RXRE_SIZE))); 1362 } 1363 1364 return rc; 1365} 1366 1367static void mpsc_shutdown(struct uart_port *port) 1368{ 1369 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1370 1371 pr_debug("mpsc_shutdown[%d]: Shutting down MPSC\n", port->line); 1372 1373 mpsc_sdma_stop(pi); 1374 free_irq(pi->port.irq, pi); 1375} 1376 1377static void mpsc_set_termios(struct uart_port *port, struct ktermios *termios, 1378 struct ktermios *old) 1379{ 1380 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1381 u32 baud; 1382 ulong flags; 1383 u32 chr_bits, stop_bits, par; 1384 1385 pi->c_iflag = termios->c_iflag; 1386 pi->c_cflag = termios->c_cflag; 1387 1388 switch (termios->c_cflag & CSIZE) { 1389 case CS5: 1390 chr_bits = MPSC_MPCR_CL_5; 1391 break; 1392 case CS6: 1393 chr_bits = MPSC_MPCR_CL_6; 1394 break; 1395 case CS7: 1396 chr_bits = MPSC_MPCR_CL_7; 1397 break; 1398 case CS8: 1399 default: 1400 chr_bits = MPSC_MPCR_CL_8; 1401 break; 1402 } 1403 1404 if (termios->c_cflag & CSTOPB) 1405 stop_bits = MPSC_MPCR_SBL_2; 1406 else 1407 stop_bits = MPSC_MPCR_SBL_1; 1408 1409 par = MPSC_CHR_2_PAR_EVEN; 1410 if (termios->c_cflag & PARENB) 1411 if (termios->c_cflag & PARODD) 1412 par = MPSC_CHR_2_PAR_ODD; 1413#ifdef CMSPAR 1414 if (termios->c_cflag & CMSPAR) { 1415 if (termios->c_cflag & PARODD) 1416 par = MPSC_CHR_2_PAR_MARK; 1417 else 1418 par = MPSC_CHR_2_PAR_SPACE; 1419 } 1420#endif 1421 1422 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk); 1423 1424 spin_lock_irqsave(&pi->port.lock, flags); 1425 1426 uart_update_timeout(port, termios->c_cflag, baud); 1427 1428 mpsc_set_char_length(pi, chr_bits); 1429 mpsc_set_stop_bit_length(pi, stop_bits); 1430 mpsc_set_parity(pi, par); 1431 mpsc_set_baudrate(pi, baud); 1432 1433 /* Characters/events to read */ 1434 pi->port.read_status_mask = SDMA_DESC_CMDSTAT_OR; 1435 1436 if (termios->c_iflag & INPCK) 1437 pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_PE 1438 | SDMA_DESC_CMDSTAT_FR; 1439 1440 if (termios->c_iflag & (BRKINT | PARMRK)) 1441 pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_BR; 1442 1443 /* Characters/events to ignore */ 1444 pi->port.ignore_status_mask = 0; 1445 1446 if (termios->c_iflag & IGNPAR) 1447 pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_PE 1448 | SDMA_DESC_CMDSTAT_FR; 1449 1450 if (termios->c_iflag & IGNBRK) { 1451 pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_BR; 1452 1453 if (termios->c_iflag & IGNPAR) 1454 pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_OR; 1455 } 1456 1457 if ((termios->c_cflag & CREAD)) { 1458 if (!pi->rcv_data) { 1459 pi->rcv_data = 1; 1460 mpsc_start_rx(pi); 1461 } 1462 } else if (pi->rcv_data) { 1463 mpsc_stop_rx(port); 1464 pi->rcv_data = 0; 1465 } 1466 1467 spin_unlock_irqrestore(&pi->port.lock, flags); 1468} 1469 1470static const char *mpsc_type(struct uart_port *port) 1471{ 1472 pr_debug("mpsc_type[%d]: port type: %s\n", port->line,MPSC_DRIVER_NAME); 1473 return MPSC_DRIVER_NAME; 1474} 1475 1476static int mpsc_request_port(struct uart_port *port) 1477{ 1478 /* Should make chip/platform specific call */ 1479 return 0; 1480} 1481 1482static void mpsc_release_port(struct uart_port *port) 1483{ 1484 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1485 1486 if (pi->ready) { 1487 mpsc_uninit_rings(pi); 1488 mpsc_free_ring_mem(pi); 1489 pi->ready = 0; 1490 } 1491} 1492 1493static void mpsc_config_port(struct uart_port *port, int flags) 1494{ 1495} 1496 1497static int mpsc_verify_port(struct uart_port *port, struct serial_struct *ser) 1498{ 1499 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1500 int rc = 0; 1501 1502 pr_debug("mpsc_verify_port[%d]: Verifying port data\n", pi->port.line); 1503 1504 if (ser->type != PORT_UNKNOWN && ser->type != PORT_MPSC) 1505 rc = -EINVAL; 1506 else if (pi->port.irq != ser->irq) 1507 rc = -EINVAL; 1508 else if (ser->io_type != SERIAL_IO_MEM) 1509 rc = -EINVAL; 1510 else if (pi->port.uartclk / 16 != ser->baud_base) /* Not sure */ 1511 rc = -EINVAL; 1512 else if ((void *)pi->port.mapbase != ser->iomem_base) 1513 rc = -EINVAL; 1514 else if (pi->port.iobase != ser->port) 1515 rc = -EINVAL; 1516 else if (ser->hub6 != 0) 1517 rc = -EINVAL; 1518 1519 return rc; 1520} 1521 1522static struct uart_ops mpsc_pops = { 1523 .tx_empty = mpsc_tx_empty, 1524 .set_mctrl = mpsc_set_mctrl, 1525 .get_mctrl = mpsc_get_mctrl, 1526 .stop_tx = mpsc_stop_tx, 1527 .start_tx = mpsc_start_tx, 1528 .stop_rx = mpsc_stop_rx, 1529 .enable_ms = mpsc_enable_ms, 1530 .break_ctl = mpsc_break_ctl, 1531 .startup = mpsc_startup, 1532 .shutdown = mpsc_shutdown, 1533 .set_termios = mpsc_set_termios, 1534 .type = mpsc_type, 1535 .release_port = mpsc_release_port, 1536 .request_port = mpsc_request_port, 1537 .config_port = mpsc_config_port, 1538 .verify_port = mpsc_verify_port, 1539}; 1540 1541/* 1542 ****************************************************************************** 1543 * 1544 * Console Interface Routines 1545 * 1546 ****************************************************************************** 1547 */ 1548 1549#ifdef CONFIG_SERIAL_MPSC_CONSOLE 1550static void mpsc_console_write(struct console *co, const char *s, uint count) 1551{ 1552 struct mpsc_port_info *pi = &mpsc_ports[co->index]; 1553 u8 *bp, *dp, add_cr = 0; 1554 int i; 1555 unsigned long iflags; 1556 1557 spin_lock_irqsave(&pi->tx_lock, iflags); 1558 1559 while (pi->txr_head != pi->txr_tail) { 1560 while (mpsc_sdma_tx_active(pi)) 1561 udelay(100); 1562 mpsc_sdma_intr_ack(pi); 1563 mpsc_tx_intr(pi); 1564 } 1565 1566 while (mpsc_sdma_tx_active(pi)) 1567 udelay(100); 1568 1569 while (count > 0) { 1570 bp = dp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE); 1571 1572 for (i = 0; i < MPSC_TXBE_SIZE; i++) { 1573 if (count == 0) 1574 break; 1575 1576 if (add_cr) { 1577 *(dp++) = '\r'; 1578 add_cr = 0; 1579 } else { 1580 *(dp++) = *s; 1581 1582 if (*(s++) == '\n') { /* add '\r' after '\n' */ 1583 add_cr = 1; 1584 count++; 1585 } 1586 } 1587 1588 count--; 1589 } 1590 1591 dma_cache_sync(pi->port.dev, (void *)bp, MPSC_TXBE_SIZE, 1592 DMA_BIDIRECTIONAL); 1593#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1594 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1595 flush_dcache_range((ulong)bp, 1596 (ulong)bp + MPSC_TXBE_SIZE); 1597#endif 1598 mpsc_setup_tx_desc(pi, i, 0); 1599 pi->txr_head = (pi->txr_head + 1) & (MPSC_TXR_ENTRIES - 1); 1600 mpsc_sdma_start_tx(pi); 1601 1602 while (mpsc_sdma_tx_active(pi)) 1603 udelay(100); 1604 1605 pi->txr_tail = (pi->txr_tail + 1) & (MPSC_TXR_ENTRIES - 1); 1606 } 1607 1608 spin_unlock_irqrestore(&pi->tx_lock, iflags); 1609} 1610 1611static int __init mpsc_console_setup(struct console *co, char *options) 1612{ 1613 struct mpsc_port_info *pi; 1614 int baud, bits, parity, flow; 1615 1616 pr_debug("mpsc_console_setup[%d]: options: %s\n", co->index, options); 1617 1618 if (co->index >= MPSC_NUM_CTLRS) 1619 co->index = 0; 1620 1621 pi = &mpsc_ports[co->index]; 1622 1623 baud = pi->default_baud; 1624 bits = pi->default_bits; 1625 parity = pi->default_parity; 1626 flow = pi->default_flow; 1627 1628 if (!pi->port.ops) 1629 return -ENODEV; 1630 1631 spin_lock_init(&pi->port.lock); /* Temporary fix--copied from 8250.c */ 1632 1633 if (options) 1634 uart_parse_options(options, &baud, &parity, &bits, &flow); 1635 1636 return uart_set_options(&pi->port, co, baud, parity, bits, flow); 1637} 1638 1639static struct console mpsc_console = { 1640 .name = MPSC_DEV_NAME, 1641 .write = mpsc_console_write, 1642 .device = uart_console_device, 1643 .setup = mpsc_console_setup, 1644 .flags = CON_PRINTBUFFER, 1645 .index = -1, 1646 .data = &mpsc_reg, 1647}; 1648 1649static int __init mpsc_late_console_init(void) 1650{ 1651 pr_debug("mpsc_late_console_init: Enter\n"); 1652 1653 if (!(mpsc_console.flags & CON_ENABLED)) 1654 register_console(&mpsc_console); 1655 return 0; 1656} 1657 1658late_initcall(mpsc_late_console_init); 1659 1660#define MPSC_CONSOLE &mpsc_console 1661#else 1662#define MPSC_CONSOLE NULL 1663#endif 1664/* 1665 ****************************************************************************** 1666 * 1667 * Dummy Platform Driver to extract & map shared register regions 1668 * 1669 ****************************************************************************** 1670 */ 1671static void mpsc_resource_err(char *s) 1672{ 1673 printk(KERN_WARNING "MPSC: Platform device resource error in %s\n", s); 1674} 1675 1676static int mpsc_shared_map_regs(struct platform_device *pd) 1677{ 1678 struct resource *r; 1679 1680 if ((r = platform_get_resource(pd, IORESOURCE_MEM, 1681 MPSC_ROUTING_BASE_ORDER)) 1682 && request_mem_region(r->start, 1683 MPSC_ROUTING_REG_BLOCK_SIZE, 1684 "mpsc_routing_regs")) { 1685 mpsc_shared_regs.mpsc_routing_base = ioremap(r->start, 1686 MPSC_ROUTING_REG_BLOCK_SIZE); 1687 mpsc_shared_regs.mpsc_routing_base_p = r->start; 1688 } else { 1689 mpsc_resource_err("MPSC routing base"); 1690 return -ENOMEM; 1691 } 1692 1693 if ((r = platform_get_resource(pd, IORESOURCE_MEM, 1694 MPSC_SDMA_INTR_BASE_ORDER)) 1695 && request_mem_region(r->start, 1696 MPSC_SDMA_INTR_REG_BLOCK_SIZE, 1697 "sdma_intr_regs")) { 1698 mpsc_shared_regs.sdma_intr_base = ioremap(r->start, 1699 MPSC_SDMA_INTR_REG_BLOCK_SIZE); 1700 mpsc_shared_regs.sdma_intr_base_p = r->start; 1701 } else { 1702 iounmap(mpsc_shared_regs.mpsc_routing_base); 1703 release_mem_region(mpsc_shared_regs.mpsc_routing_base_p, 1704 MPSC_ROUTING_REG_BLOCK_SIZE); 1705 mpsc_resource_err("SDMA intr base"); 1706 return -ENOMEM; 1707 } 1708 1709 return 0; 1710} 1711 1712static void mpsc_shared_unmap_regs(void) 1713{ 1714 if (!mpsc_shared_regs.mpsc_routing_base) { 1715 iounmap(mpsc_shared_regs.mpsc_routing_base); 1716 release_mem_region(mpsc_shared_regs.mpsc_routing_base_p, 1717 MPSC_ROUTING_REG_BLOCK_SIZE); 1718 } 1719 if (!mpsc_shared_regs.sdma_intr_base) { 1720 iounmap(mpsc_shared_regs.sdma_intr_base); 1721 release_mem_region(mpsc_shared_regs.sdma_intr_base_p, 1722 MPSC_SDMA_INTR_REG_BLOCK_SIZE); 1723 } 1724 1725 mpsc_shared_regs.mpsc_routing_base = NULL; 1726 mpsc_shared_regs.sdma_intr_base = NULL; 1727 1728 mpsc_shared_regs.mpsc_routing_base_p = 0; 1729 mpsc_shared_regs.sdma_intr_base_p = 0; 1730} 1731 1732static int mpsc_shared_drv_probe(struct platform_device *dev) 1733{ 1734 struct mpsc_shared_pdata *pdata; 1735 int rc = -ENODEV; 1736 1737 if (dev->id == 0) { 1738 if (!(rc = mpsc_shared_map_regs(dev))) { 1739 pdata = (struct mpsc_shared_pdata *) 1740 dev->dev.platform_data; 1741 1742 mpsc_shared_regs.MPSC_MRR_m = pdata->mrr_val; 1743 mpsc_shared_regs.MPSC_RCRR_m= pdata->rcrr_val; 1744 mpsc_shared_regs.MPSC_TCRR_m= pdata->tcrr_val; 1745 mpsc_shared_regs.SDMA_INTR_CAUSE_m = 1746 pdata->intr_cause_val; 1747 mpsc_shared_regs.SDMA_INTR_MASK_m = 1748 pdata->intr_mask_val; 1749 1750 rc = 0; 1751 } 1752 } 1753 1754 return rc; 1755} 1756 1757static int mpsc_shared_drv_remove(struct platform_device *dev) 1758{ 1759 int rc = -ENODEV; 1760 1761 if (dev->id == 0) { 1762 mpsc_shared_unmap_regs(); 1763 mpsc_shared_regs.MPSC_MRR_m = 0; 1764 mpsc_shared_regs.MPSC_RCRR_m = 0; 1765 mpsc_shared_regs.MPSC_TCRR_m = 0; 1766 mpsc_shared_regs.SDMA_INTR_CAUSE_m = 0; 1767 mpsc_shared_regs.SDMA_INTR_MASK_m = 0; 1768 rc = 0; 1769 } 1770 1771 return rc; 1772} 1773 1774static struct platform_driver mpsc_shared_driver = { 1775 .probe = mpsc_shared_drv_probe, 1776 .remove = mpsc_shared_drv_remove, 1777 .driver = { 1778 .name = MPSC_SHARED_NAME, 1779 }, 1780}; 1781 1782/* 1783 ****************************************************************************** 1784 * 1785 * Driver Interface Routines 1786 * 1787 ****************************************************************************** 1788 */ 1789static struct uart_driver mpsc_reg = { 1790 .owner = THIS_MODULE, 1791 .driver_name = MPSC_DRIVER_NAME, 1792 .dev_name = MPSC_DEV_NAME, 1793 .major = MPSC_MAJOR, 1794 .minor = MPSC_MINOR_START, 1795 .nr = MPSC_NUM_CTLRS, 1796 .cons = MPSC_CONSOLE, 1797}; 1798 1799static int mpsc_drv_map_regs(struct mpsc_port_info *pi, 1800 struct platform_device *pd) 1801{ 1802 struct resource *r; 1803 1804 if ((r = platform_get_resource(pd, IORESOURCE_MEM, MPSC_BASE_ORDER)) 1805 && request_mem_region(r->start, MPSC_REG_BLOCK_SIZE, 1806 "mpsc_regs")) { 1807 pi->mpsc_base = ioremap(r->start, MPSC_REG_BLOCK_SIZE); 1808 pi->mpsc_base_p = r->start; 1809 } else { 1810 mpsc_resource_err("MPSC base"); 1811 goto err; 1812 } 1813 1814 if ((r = platform_get_resource(pd, IORESOURCE_MEM, 1815 MPSC_SDMA_BASE_ORDER)) 1816 && request_mem_region(r->start, 1817 MPSC_SDMA_REG_BLOCK_SIZE, "sdma_regs")) { 1818 pi->sdma_base = ioremap(r->start,MPSC_SDMA_REG_BLOCK_SIZE); 1819 pi->sdma_base_p = r->start; 1820 } else { 1821 mpsc_resource_err("SDMA base"); 1822 if (pi->mpsc_base) { 1823 iounmap(pi->mpsc_base); 1824 pi->mpsc_base = NULL; 1825 } 1826 goto err; 1827 } 1828 1829 if ((r = platform_get_resource(pd,IORESOURCE_MEM,MPSC_BRG_BASE_ORDER)) 1830 && request_mem_region(r->start, 1831 MPSC_BRG_REG_BLOCK_SIZE, "brg_regs")) { 1832 pi->brg_base = ioremap(r->start, MPSC_BRG_REG_BLOCK_SIZE); 1833 pi->brg_base_p = r->start; 1834 } else { 1835 mpsc_resource_err("BRG base"); 1836 if (pi->mpsc_base) { 1837 iounmap(pi->mpsc_base); 1838 pi->mpsc_base = NULL; 1839 } 1840 if (pi->sdma_base) { 1841 iounmap(pi->sdma_base); 1842 pi->sdma_base = NULL; 1843 } 1844 goto err; 1845 } 1846 return 0; 1847 1848err: 1849 return -ENOMEM; 1850} 1851 1852static void mpsc_drv_unmap_regs(struct mpsc_port_info *pi) 1853{ 1854 if (!pi->mpsc_base) { 1855 iounmap(pi->mpsc_base); 1856 release_mem_region(pi->mpsc_base_p, MPSC_REG_BLOCK_SIZE); 1857 } 1858 if (!pi->sdma_base) { 1859 iounmap(pi->sdma_base); 1860 release_mem_region(pi->sdma_base_p, MPSC_SDMA_REG_BLOCK_SIZE); 1861 } 1862 if (!pi->brg_base) { 1863 iounmap(pi->brg_base); 1864 release_mem_region(pi->brg_base_p, MPSC_BRG_REG_BLOCK_SIZE); 1865 } 1866 1867 pi->mpsc_base = NULL; 1868 pi->sdma_base = NULL; 1869 pi->brg_base = NULL; 1870 1871 pi->mpsc_base_p = 0; 1872 pi->sdma_base_p = 0; 1873 pi->brg_base_p = 0; 1874} 1875 1876static void mpsc_drv_get_platform_data(struct mpsc_port_info *pi, 1877 struct platform_device *pd, int num) 1878{ 1879 struct mpsc_pdata *pdata; 1880 1881 pdata = (struct mpsc_pdata *)pd->dev.platform_data; 1882 1883 pi->port.uartclk = pdata->brg_clk_freq; 1884 pi->port.iotype = UPIO_MEM; 1885 pi->port.line = num; 1886 pi->port.type = PORT_MPSC; 1887 pi->port.fifosize = MPSC_TXBE_SIZE; 1888 pi->port.membase = pi->mpsc_base; 1889 pi->port.mapbase = (ulong)pi->mpsc_base; 1890 pi->port.ops = &mpsc_pops; 1891 1892 pi->mirror_regs = pdata->mirror_regs; 1893 pi->cache_mgmt = pdata->cache_mgmt; 1894 pi->brg_can_tune = pdata->brg_can_tune; 1895 pi->brg_clk_src = pdata->brg_clk_src; 1896 pi->mpsc_max_idle = pdata->max_idle; 1897 pi->default_baud = pdata->default_baud; 1898 pi->default_bits = pdata->default_bits; 1899 pi->default_parity = pdata->default_parity; 1900 pi->default_flow = pdata->default_flow; 1901 1902 /* Initial values of mirrored regs */ 1903 pi->MPSC_CHR_1_m = pdata->chr_1_val; 1904 pi->MPSC_CHR_2_m = pdata->chr_2_val; 1905 pi->MPSC_CHR_10_m = pdata->chr_10_val; 1906 pi->MPSC_MPCR_m = pdata->mpcr_val; 1907 pi->BRG_BCR_m = pdata->bcr_val; 1908 1909 pi->shared_regs = &mpsc_shared_regs; 1910 1911 pi->port.irq = platform_get_irq(pd, 0); 1912} 1913 1914static int mpsc_drv_probe(struct platform_device *dev) 1915{ 1916 struct mpsc_port_info *pi; 1917 int rc = -ENODEV; 1918 1919 pr_debug("mpsc_drv_probe: Adding MPSC %d\n", dev->id); 1920 1921 if (dev->id < MPSC_NUM_CTLRS) { 1922 pi = &mpsc_ports[dev->id]; 1923 1924 if (!(rc = mpsc_drv_map_regs(pi, dev))) { 1925 mpsc_drv_get_platform_data(pi, dev, dev->id); 1926 1927 if (!(rc = mpsc_make_ready(pi))) { 1928 spin_lock_init(&pi->tx_lock); 1929 if (!(rc = uart_add_one_port(&mpsc_reg, 1930 &pi->port))) { 1931 rc = 0; 1932 } else { 1933 mpsc_release_port((struct uart_port *) 1934 pi); 1935 mpsc_drv_unmap_regs(pi); 1936 } 1937 } else { 1938 mpsc_drv_unmap_regs(pi); 1939 } 1940 } 1941 } 1942 1943 return rc; 1944} 1945 1946static int mpsc_drv_remove(struct platform_device *dev) 1947{ 1948 pr_debug("mpsc_drv_exit: Removing MPSC %d\n", dev->id); 1949 1950 if (dev->id < MPSC_NUM_CTLRS) { 1951 uart_remove_one_port(&mpsc_reg, &mpsc_ports[dev->id].port); 1952 mpsc_release_port((struct uart_port *) 1953 &mpsc_ports[dev->id].port); 1954 mpsc_drv_unmap_regs(&mpsc_ports[dev->id]); 1955 return 0; 1956 } else { 1957 return -ENODEV; 1958 } 1959} 1960 1961static struct platform_driver mpsc_driver = { 1962 .probe = mpsc_drv_probe, 1963 .remove = mpsc_drv_remove, 1964 .driver = { 1965 .name = MPSC_CTLR_NAME, 1966 }, 1967}; 1968 1969static int __init mpsc_drv_init(void) 1970{ 1971 int rc; 1972 1973 printk(KERN_INFO "Serial: MPSC driver $Revision: 1.00 $\n"); 1974 1975 memset(mpsc_ports, 0, sizeof(mpsc_ports)); 1976 memset(&mpsc_shared_regs, 0, sizeof(mpsc_shared_regs)); 1977 1978 if (!(rc = uart_register_driver(&mpsc_reg))) { 1979 if (!(rc = platform_driver_register(&mpsc_shared_driver))) { 1980 if ((rc = platform_driver_register(&mpsc_driver))) { 1981 platform_driver_unregister(&mpsc_shared_driver); 1982 uart_unregister_driver(&mpsc_reg); 1983 } 1984 } else { 1985 uart_unregister_driver(&mpsc_reg); 1986 } 1987 } 1988 1989 return rc; 1990} 1991 1992static void __exit mpsc_drv_exit(void) 1993{ 1994 platform_driver_unregister(&mpsc_driver); 1995 platform_driver_unregister(&mpsc_shared_driver); 1996 uart_unregister_driver(&mpsc_reg); 1997 memset(mpsc_ports, 0, sizeof(mpsc_ports)); 1998 memset(&mpsc_shared_regs, 0, sizeof(mpsc_shared_regs)); 1999} 2000 2001module_init(mpsc_drv_init); 2002module_exit(mpsc_drv_exit); 2003 2004MODULE_AUTHOR("Mark A. Greer <mgreer@mvista.com>"); 2005MODULE_DESCRIPTION("Generic Marvell MPSC serial/UART driver $Revision: 1.00 $"); 2006MODULE_VERSION(MPSC_VERSION); 2007MODULE_LICENSE("GPL"); 2008MODULE_ALIAS_CHARDEV_MAJOR(MPSC_MAJOR);