Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at 989a7241df87526bfef0396567e71ebe53a84ae4 8171 lines 237 kB view raw
1/* 2 * linux/drivers/char/synclink.c 3 * 4 * $Id: synclink.c,v 4.38 2005/11/07 16:30:34 paulkf Exp $ 5 * 6 * Device driver for Microgate SyncLink ISA and PCI 7 * high speed multiprotocol serial adapters. 8 * 9 * written by Paul Fulghum for Microgate Corporation 10 * paulkf@microgate.com 11 * 12 * Microgate and SyncLink are trademarks of Microgate Corporation 13 * 14 * Derived from serial.c written by Theodore Ts'o and Linus Torvalds 15 * 16 * Original release 01/11/99 17 * 18 * This code is released under the GNU General Public License (GPL) 19 * 20 * This driver is primarily intended for use in synchronous 21 * HDLC mode. Asynchronous mode is also provided. 22 * 23 * When operating in synchronous mode, each call to mgsl_write() 24 * contains exactly one complete HDLC frame. Calling mgsl_put_char 25 * will start assembling an HDLC frame that will not be sent until 26 * mgsl_flush_chars or mgsl_write is called. 27 * 28 * Synchronous receive data is reported as complete frames. To accomplish 29 * this, the TTY flip buffer is bypassed (too small to hold largest 30 * frame and may fragment frames) and the line discipline 31 * receive entry point is called directly. 32 * 33 * This driver has been tested with a slightly modified ppp.c driver 34 * for synchronous PPP. 35 * 36 * 2000/02/16 37 * Added interface for syncppp.c driver (an alternate synchronous PPP 38 * implementation that also supports Cisco HDLC). Each device instance 39 * registers as a tty device AND a network device (if dosyncppp option 40 * is set for the device). The functionality is determined by which 41 * device interface is opened. 42 * 43 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 44 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 45 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 46 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 47 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 48 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 49 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 51 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 52 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 53 * OF THE POSSIBILITY OF SUCH DAMAGE. 54 */ 55 56#if defined(__i386__) 57# define BREAKPOINT() asm(" int $3"); 58#else 59# define BREAKPOINT() { } 60#endif 61 62#define MAX_ISA_DEVICES 10 63#define MAX_PCI_DEVICES 10 64#define MAX_TOTAL_DEVICES 20 65 66#include <linux/module.h> 67#include <linux/errno.h> 68#include <linux/signal.h> 69#include <linux/sched.h> 70#include <linux/timer.h> 71#include <linux/interrupt.h> 72#include <linux/pci.h> 73#include <linux/tty.h> 74#include <linux/tty_flip.h> 75#include <linux/serial.h> 76#include <linux/major.h> 77#include <linux/string.h> 78#include <linux/fcntl.h> 79#include <linux/ptrace.h> 80#include <linux/ioport.h> 81#include <linux/mm.h> 82#include <linux/slab.h> 83#include <linux/delay.h> 84#include <linux/netdevice.h> 85#include <linux/vmalloc.h> 86#include <linux/init.h> 87#include <linux/ioctl.h> 88#include <linux/synclink.h> 89 90#include <asm/system.h> 91#include <asm/io.h> 92#include <asm/irq.h> 93#include <asm/dma.h> 94#include <linux/bitops.h> 95#include <asm/types.h> 96#include <linux/termios.h> 97#include <linux/workqueue.h> 98#include <linux/hdlc.h> 99#include <linux/dma-mapping.h> 100 101#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_MODULE)) 102#define SYNCLINK_GENERIC_HDLC 1 103#else 104#define SYNCLINK_GENERIC_HDLC 0 105#endif 106 107#define GET_USER(error,value,addr) error = get_user(value,addr) 108#define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0 109#define PUT_USER(error,value,addr) error = put_user(value,addr) 110#define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0 111 112#include <asm/uaccess.h> 113 114#define RCLRVALUE 0xffff 115 116static MGSL_PARAMS default_params = { 117 MGSL_MODE_HDLC, /* unsigned long mode */ 118 0, /* unsigned char loopback; */ 119 HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */ 120 HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */ 121 0, /* unsigned long clock_speed; */ 122 0xff, /* unsigned char addr_filter; */ 123 HDLC_CRC_16_CCITT, /* unsigned short crc_type; */ 124 HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */ 125 HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */ 126 9600, /* unsigned long data_rate; */ 127 8, /* unsigned char data_bits; */ 128 1, /* unsigned char stop_bits; */ 129 ASYNC_PARITY_NONE /* unsigned char parity; */ 130}; 131 132#define SHARED_MEM_ADDRESS_SIZE 0x40000 133#define BUFFERLISTSIZE 4096 134#define DMABUFFERSIZE 4096 135#define MAXRXFRAMES 7 136 137typedef struct _DMABUFFERENTRY 138{ 139 u32 phys_addr; /* 32-bit flat physical address of data buffer */ 140 volatile u16 count; /* buffer size/data count */ 141 volatile u16 status; /* Control/status field */ 142 volatile u16 rcc; /* character count field */ 143 u16 reserved; /* padding required by 16C32 */ 144 u32 link; /* 32-bit flat link to next buffer entry */ 145 char *virt_addr; /* virtual address of data buffer */ 146 u32 phys_entry; /* physical address of this buffer entry */ 147 dma_addr_t dma_addr; 148} DMABUFFERENTRY, *DMAPBUFFERENTRY; 149 150/* The queue of BH actions to be performed */ 151 152#define BH_RECEIVE 1 153#define BH_TRANSMIT 2 154#define BH_STATUS 4 155 156#define IO_PIN_SHUTDOWN_LIMIT 100 157 158struct _input_signal_events { 159 int ri_up; 160 int ri_down; 161 int dsr_up; 162 int dsr_down; 163 int dcd_up; 164 int dcd_down; 165 int cts_up; 166 int cts_down; 167}; 168 169/* transmit holding buffer definitions*/ 170#define MAX_TX_HOLDING_BUFFERS 5 171struct tx_holding_buffer { 172 int buffer_size; 173 unsigned char * buffer; 174}; 175 176 177/* 178 * Device instance data structure 179 */ 180 181struct mgsl_struct { 182 int magic; 183 int flags; 184 int count; /* count of opens */ 185 int line; 186 int hw_version; 187 unsigned short close_delay; 188 unsigned short closing_wait; /* time to wait before closing */ 189 190 struct mgsl_icount icount; 191 192 struct tty_struct *tty; 193 int timeout; 194 int x_char; /* xon/xoff character */ 195 int blocked_open; /* # of blocked opens */ 196 u16 read_status_mask; 197 u16 ignore_status_mask; 198 unsigned char *xmit_buf; 199 int xmit_head; 200 int xmit_tail; 201 int xmit_cnt; 202 203 wait_queue_head_t open_wait; 204 wait_queue_head_t close_wait; 205 206 wait_queue_head_t status_event_wait_q; 207 wait_queue_head_t event_wait_q; 208 struct timer_list tx_timer; /* HDLC transmit timeout timer */ 209 struct mgsl_struct *next_device; /* device list link */ 210 211 spinlock_t irq_spinlock; /* spinlock for synchronizing with ISR */ 212 struct work_struct task; /* task structure for scheduling bh */ 213 214 u32 EventMask; /* event trigger mask */ 215 u32 RecordedEvents; /* pending events */ 216 217 u32 max_frame_size; /* as set by device config */ 218 219 u32 pending_bh; 220 221 int bh_running; /* Protection from multiple */ 222 int isr_overflow; 223 int bh_requested; 224 225 int dcd_chkcount; /* check counts to prevent */ 226 int cts_chkcount; /* too many IRQs if a signal */ 227 int dsr_chkcount; /* is floating */ 228 int ri_chkcount; 229 230 char *buffer_list; /* virtual address of Rx & Tx buffer lists */ 231 u32 buffer_list_phys; 232 dma_addr_t buffer_list_dma_addr; 233 234 unsigned int rx_buffer_count; /* count of total allocated Rx buffers */ 235 DMABUFFERENTRY *rx_buffer_list; /* list of receive buffer entries */ 236 unsigned int current_rx_buffer; 237 238 int num_tx_dma_buffers; /* number of tx dma frames required */ 239 int tx_dma_buffers_used; 240 unsigned int tx_buffer_count; /* count of total allocated Tx buffers */ 241 DMABUFFERENTRY *tx_buffer_list; /* list of transmit buffer entries */ 242 int start_tx_dma_buffer; /* tx dma buffer to start tx dma operation */ 243 int current_tx_buffer; /* next tx dma buffer to be loaded */ 244 245 unsigned char *intermediate_rxbuffer; 246 247 int num_tx_holding_buffers; /* number of tx holding buffer allocated */ 248 int get_tx_holding_index; /* next tx holding buffer for adapter to load */ 249 int put_tx_holding_index; /* next tx holding buffer to store user request */ 250 int tx_holding_count; /* number of tx holding buffers waiting */ 251 struct tx_holding_buffer tx_holding_buffers[MAX_TX_HOLDING_BUFFERS]; 252 253 int rx_enabled; 254 int rx_overflow; 255 int rx_rcc_underrun; 256 257 int tx_enabled; 258 int tx_active; 259 u32 idle_mode; 260 261 u16 cmr_value; 262 u16 tcsr_value; 263 264 char device_name[25]; /* device instance name */ 265 266 unsigned int bus_type; /* expansion bus type (ISA,EISA,PCI) */ 267 unsigned char bus; /* expansion bus number (zero based) */ 268 unsigned char function; /* PCI device number */ 269 270 unsigned int io_base; /* base I/O address of adapter */ 271 unsigned int io_addr_size; /* size of the I/O address range */ 272 int io_addr_requested; /* nonzero if I/O address requested */ 273 274 unsigned int irq_level; /* interrupt level */ 275 unsigned long irq_flags; 276 int irq_requested; /* nonzero if IRQ requested */ 277 278 unsigned int dma_level; /* DMA channel */ 279 int dma_requested; /* nonzero if dma channel requested */ 280 281 u16 mbre_bit; 282 u16 loopback_bits; 283 u16 usc_idle_mode; 284 285 MGSL_PARAMS params; /* communications parameters */ 286 287 unsigned char serial_signals; /* current serial signal states */ 288 289 int irq_occurred; /* for diagnostics use */ 290 unsigned int init_error; /* Initialization startup error (DIAGS) */ 291 int fDiagnosticsmode; /* Driver in Diagnostic mode? (DIAGS) */ 292 293 u32 last_mem_alloc; 294 unsigned char* memory_base; /* shared memory address (PCI only) */ 295 u32 phys_memory_base; 296 int shared_mem_requested; 297 298 unsigned char* lcr_base; /* local config registers (PCI only) */ 299 u32 phys_lcr_base; 300 u32 lcr_offset; 301 int lcr_mem_requested; 302 303 u32 misc_ctrl_value; 304 char flag_buf[MAX_ASYNC_BUFFER_SIZE]; 305 char char_buf[MAX_ASYNC_BUFFER_SIZE]; 306 BOOLEAN drop_rts_on_tx_done; 307 308 BOOLEAN loopmode_insert_requested; 309 BOOLEAN loopmode_send_done_requested; 310 311 struct _input_signal_events input_signal_events; 312 313 /* generic HDLC device parts */ 314 int netcount; 315 int dosyncppp; 316 spinlock_t netlock; 317 318#if SYNCLINK_GENERIC_HDLC 319 struct net_device *netdev; 320#endif 321}; 322 323#define MGSL_MAGIC 0x5401 324 325/* 326 * The size of the serial xmit buffer is 1 page, or 4096 bytes 327 */ 328#ifndef SERIAL_XMIT_SIZE 329#define SERIAL_XMIT_SIZE 4096 330#endif 331 332/* 333 * These macros define the offsets used in calculating the 334 * I/O address of the specified USC registers. 335 */ 336 337 338#define DCPIN 2 /* Bit 1 of I/O address */ 339#define SDPIN 4 /* Bit 2 of I/O address */ 340 341#define DCAR 0 /* DMA command/address register */ 342#define CCAR SDPIN /* channel command/address register */ 343#define DATAREG DCPIN + SDPIN /* serial data register */ 344#define MSBONLY 0x41 345#define LSBONLY 0x40 346 347/* 348 * These macros define the register address (ordinal number) 349 * used for writing address/value pairs to the USC. 350 */ 351 352#define CMR 0x02 /* Channel mode Register */ 353#define CCSR 0x04 /* Channel Command/status Register */ 354#define CCR 0x06 /* Channel Control Register */ 355#define PSR 0x08 /* Port status Register */ 356#define PCR 0x0a /* Port Control Register */ 357#define TMDR 0x0c /* Test mode Data Register */ 358#define TMCR 0x0e /* Test mode Control Register */ 359#define CMCR 0x10 /* Clock mode Control Register */ 360#define HCR 0x12 /* Hardware Configuration Register */ 361#define IVR 0x14 /* Interrupt Vector Register */ 362#define IOCR 0x16 /* Input/Output Control Register */ 363#define ICR 0x18 /* Interrupt Control Register */ 364#define DCCR 0x1a /* Daisy Chain Control Register */ 365#define MISR 0x1c /* Misc Interrupt status Register */ 366#define SICR 0x1e /* status Interrupt Control Register */ 367#define RDR 0x20 /* Receive Data Register */ 368#define RMR 0x22 /* Receive mode Register */ 369#define RCSR 0x24 /* Receive Command/status Register */ 370#define RICR 0x26 /* Receive Interrupt Control Register */ 371#define RSR 0x28 /* Receive Sync Register */ 372#define RCLR 0x2a /* Receive count Limit Register */ 373#define RCCR 0x2c /* Receive Character count Register */ 374#define TC0R 0x2e /* Time Constant 0 Register */ 375#define TDR 0x30 /* Transmit Data Register */ 376#define TMR 0x32 /* Transmit mode Register */ 377#define TCSR 0x34 /* Transmit Command/status Register */ 378#define TICR 0x36 /* Transmit Interrupt Control Register */ 379#define TSR 0x38 /* Transmit Sync Register */ 380#define TCLR 0x3a /* Transmit count Limit Register */ 381#define TCCR 0x3c /* Transmit Character count Register */ 382#define TC1R 0x3e /* Time Constant 1 Register */ 383 384 385/* 386 * MACRO DEFINITIONS FOR DMA REGISTERS 387 */ 388 389#define DCR 0x06 /* DMA Control Register (shared) */ 390#define DACR 0x08 /* DMA Array count Register (shared) */ 391#define BDCR 0x12 /* Burst/Dwell Control Register (shared) */ 392#define DIVR 0x14 /* DMA Interrupt Vector Register (shared) */ 393#define DICR 0x18 /* DMA Interrupt Control Register (shared) */ 394#define CDIR 0x1a /* Clear DMA Interrupt Register (shared) */ 395#define SDIR 0x1c /* Set DMA Interrupt Register (shared) */ 396 397#define TDMR 0x02 /* Transmit DMA mode Register */ 398#define TDIAR 0x1e /* Transmit DMA Interrupt Arm Register */ 399#define TBCR 0x2a /* Transmit Byte count Register */ 400#define TARL 0x2c /* Transmit Address Register (low) */ 401#define TARU 0x2e /* Transmit Address Register (high) */ 402#define NTBCR 0x3a /* Next Transmit Byte count Register */ 403#define NTARL 0x3c /* Next Transmit Address Register (low) */ 404#define NTARU 0x3e /* Next Transmit Address Register (high) */ 405 406#define RDMR 0x82 /* Receive DMA mode Register (non-shared) */ 407#define RDIAR 0x9e /* Receive DMA Interrupt Arm Register */ 408#define RBCR 0xaa /* Receive Byte count Register */ 409#define RARL 0xac /* Receive Address Register (low) */ 410#define RARU 0xae /* Receive Address Register (high) */ 411#define NRBCR 0xba /* Next Receive Byte count Register */ 412#define NRARL 0xbc /* Next Receive Address Register (low) */ 413#define NRARU 0xbe /* Next Receive Address Register (high) */ 414 415 416/* 417 * MACRO DEFINITIONS FOR MODEM STATUS BITS 418 */ 419 420#define MODEMSTATUS_DTR 0x80 421#define MODEMSTATUS_DSR 0x40 422#define MODEMSTATUS_RTS 0x20 423#define MODEMSTATUS_CTS 0x10 424#define MODEMSTATUS_RI 0x04 425#define MODEMSTATUS_DCD 0x01 426 427 428/* 429 * Channel Command/Address Register (CCAR) Command Codes 430 */ 431 432#define RTCmd_Null 0x0000 433#define RTCmd_ResetHighestIus 0x1000 434#define RTCmd_TriggerChannelLoadDma 0x2000 435#define RTCmd_TriggerRxDma 0x2800 436#define RTCmd_TriggerTxDma 0x3000 437#define RTCmd_TriggerRxAndTxDma 0x3800 438#define RTCmd_PurgeRxFifo 0x4800 439#define RTCmd_PurgeTxFifo 0x5000 440#define RTCmd_PurgeRxAndTxFifo 0x5800 441#define RTCmd_LoadRcc 0x6800 442#define RTCmd_LoadTcc 0x7000 443#define RTCmd_LoadRccAndTcc 0x7800 444#define RTCmd_LoadTC0 0x8800 445#define RTCmd_LoadTC1 0x9000 446#define RTCmd_LoadTC0AndTC1 0x9800 447#define RTCmd_SerialDataLSBFirst 0xa000 448#define RTCmd_SerialDataMSBFirst 0xa800 449#define RTCmd_SelectBigEndian 0xb000 450#define RTCmd_SelectLittleEndian 0xb800 451 452 453/* 454 * DMA Command/Address Register (DCAR) Command Codes 455 */ 456 457#define DmaCmd_Null 0x0000 458#define DmaCmd_ResetTxChannel 0x1000 459#define DmaCmd_ResetRxChannel 0x1200 460#define DmaCmd_StartTxChannel 0x2000 461#define DmaCmd_StartRxChannel 0x2200 462#define DmaCmd_ContinueTxChannel 0x3000 463#define DmaCmd_ContinueRxChannel 0x3200 464#define DmaCmd_PauseTxChannel 0x4000 465#define DmaCmd_PauseRxChannel 0x4200 466#define DmaCmd_AbortTxChannel 0x5000 467#define DmaCmd_AbortRxChannel 0x5200 468#define DmaCmd_InitTxChannel 0x7000 469#define DmaCmd_InitRxChannel 0x7200 470#define DmaCmd_ResetHighestDmaIus 0x8000 471#define DmaCmd_ResetAllChannels 0x9000 472#define DmaCmd_StartAllChannels 0xa000 473#define DmaCmd_ContinueAllChannels 0xb000 474#define DmaCmd_PauseAllChannels 0xc000 475#define DmaCmd_AbortAllChannels 0xd000 476#define DmaCmd_InitAllChannels 0xf000 477 478#define TCmd_Null 0x0000 479#define TCmd_ClearTxCRC 0x2000 480#define TCmd_SelectTicrTtsaData 0x4000 481#define TCmd_SelectTicrTxFifostatus 0x5000 482#define TCmd_SelectTicrIntLevel 0x6000 483#define TCmd_SelectTicrdma_level 0x7000 484#define TCmd_SendFrame 0x8000 485#define TCmd_SendAbort 0x9000 486#define TCmd_EnableDleInsertion 0xc000 487#define TCmd_DisableDleInsertion 0xd000 488#define TCmd_ClearEofEom 0xe000 489#define TCmd_SetEofEom 0xf000 490 491#define RCmd_Null 0x0000 492#define RCmd_ClearRxCRC 0x2000 493#define RCmd_EnterHuntmode 0x3000 494#define RCmd_SelectRicrRtsaData 0x4000 495#define RCmd_SelectRicrRxFifostatus 0x5000 496#define RCmd_SelectRicrIntLevel 0x6000 497#define RCmd_SelectRicrdma_level 0x7000 498 499/* 500 * Bits for enabling and disabling IRQs in Interrupt Control Register (ICR) 501 */ 502 503#define RECEIVE_STATUS BIT5 504#define RECEIVE_DATA BIT4 505#define TRANSMIT_STATUS BIT3 506#define TRANSMIT_DATA BIT2 507#define IO_PIN BIT1 508#define MISC BIT0 509 510 511/* 512 * Receive status Bits in Receive Command/status Register RCSR 513 */ 514 515#define RXSTATUS_SHORT_FRAME BIT8 516#define RXSTATUS_CODE_VIOLATION BIT8 517#define RXSTATUS_EXITED_HUNT BIT7 518#define RXSTATUS_IDLE_RECEIVED BIT6 519#define RXSTATUS_BREAK_RECEIVED BIT5 520#define RXSTATUS_ABORT_RECEIVED BIT5 521#define RXSTATUS_RXBOUND BIT4 522#define RXSTATUS_CRC_ERROR BIT3 523#define RXSTATUS_FRAMING_ERROR BIT3 524#define RXSTATUS_ABORT BIT2 525#define RXSTATUS_PARITY_ERROR BIT2 526#define RXSTATUS_OVERRUN BIT1 527#define RXSTATUS_DATA_AVAILABLE BIT0 528#define RXSTATUS_ALL 0x01f6 529#define usc_UnlatchRxstatusBits(a,b) usc_OutReg( (a), RCSR, (u16)((b) & RXSTATUS_ALL) ) 530 531/* 532 * Values for setting transmit idle mode in 533 * Transmit Control/status Register (TCSR) 534 */ 535#define IDLEMODE_FLAGS 0x0000 536#define IDLEMODE_ALT_ONE_ZERO 0x0100 537#define IDLEMODE_ZERO 0x0200 538#define IDLEMODE_ONE 0x0300 539#define IDLEMODE_ALT_MARK_SPACE 0x0500 540#define IDLEMODE_SPACE 0x0600 541#define IDLEMODE_MARK 0x0700 542#define IDLEMODE_MASK 0x0700 543 544/* 545 * IUSC revision identifiers 546 */ 547#define IUSC_SL1660 0x4d44 548#define IUSC_PRE_SL1660 0x4553 549 550/* 551 * Transmit status Bits in Transmit Command/status Register (TCSR) 552 */ 553 554#define TCSR_PRESERVE 0x0F00 555 556#define TCSR_UNDERWAIT BIT11 557#define TXSTATUS_PREAMBLE_SENT BIT7 558#define TXSTATUS_IDLE_SENT BIT6 559#define TXSTATUS_ABORT_SENT BIT5 560#define TXSTATUS_EOF_SENT BIT4 561#define TXSTATUS_EOM_SENT BIT4 562#define TXSTATUS_CRC_SENT BIT3 563#define TXSTATUS_ALL_SENT BIT2 564#define TXSTATUS_UNDERRUN BIT1 565#define TXSTATUS_FIFO_EMPTY BIT0 566#define TXSTATUS_ALL 0x00fa 567#define usc_UnlatchTxstatusBits(a,b) usc_OutReg( (a), TCSR, (u16)((a)->tcsr_value + ((b) & 0x00FF)) ) 568 569 570#define MISCSTATUS_RXC_LATCHED BIT15 571#define MISCSTATUS_RXC BIT14 572#define MISCSTATUS_TXC_LATCHED BIT13 573#define MISCSTATUS_TXC BIT12 574#define MISCSTATUS_RI_LATCHED BIT11 575#define MISCSTATUS_RI BIT10 576#define MISCSTATUS_DSR_LATCHED BIT9 577#define MISCSTATUS_DSR BIT8 578#define MISCSTATUS_DCD_LATCHED BIT7 579#define MISCSTATUS_DCD BIT6 580#define MISCSTATUS_CTS_LATCHED BIT5 581#define MISCSTATUS_CTS BIT4 582#define MISCSTATUS_RCC_UNDERRUN BIT3 583#define MISCSTATUS_DPLL_NO_SYNC BIT2 584#define MISCSTATUS_BRG1_ZERO BIT1 585#define MISCSTATUS_BRG0_ZERO BIT0 586 587#define usc_UnlatchIostatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0xaaa0)) 588#define usc_UnlatchMiscstatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0x000f)) 589 590#define SICR_RXC_ACTIVE BIT15 591#define SICR_RXC_INACTIVE BIT14 592#define SICR_RXC (BIT15+BIT14) 593#define SICR_TXC_ACTIVE BIT13 594#define SICR_TXC_INACTIVE BIT12 595#define SICR_TXC (BIT13+BIT12) 596#define SICR_RI_ACTIVE BIT11 597#define SICR_RI_INACTIVE BIT10 598#define SICR_RI (BIT11+BIT10) 599#define SICR_DSR_ACTIVE BIT9 600#define SICR_DSR_INACTIVE BIT8 601#define SICR_DSR (BIT9+BIT8) 602#define SICR_DCD_ACTIVE BIT7 603#define SICR_DCD_INACTIVE BIT6 604#define SICR_DCD (BIT7+BIT6) 605#define SICR_CTS_ACTIVE BIT5 606#define SICR_CTS_INACTIVE BIT4 607#define SICR_CTS (BIT5+BIT4) 608#define SICR_RCC_UNDERFLOW BIT3 609#define SICR_DPLL_NO_SYNC BIT2 610#define SICR_BRG1_ZERO BIT1 611#define SICR_BRG0_ZERO BIT0 612 613void usc_DisableMasterIrqBit( struct mgsl_struct *info ); 614void usc_EnableMasterIrqBit( struct mgsl_struct *info ); 615void usc_EnableInterrupts( struct mgsl_struct *info, u16 IrqMask ); 616void usc_DisableInterrupts( struct mgsl_struct *info, u16 IrqMask ); 617void usc_ClearIrqPendingBits( struct mgsl_struct *info, u16 IrqMask ); 618 619#define usc_EnableInterrupts( a, b ) \ 620 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0xc0 + (b)) ) 621 622#define usc_DisableInterrupts( a, b ) \ 623 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0x80 + (b)) ) 624 625#define usc_EnableMasterIrqBit(a) \ 626 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0x0f00) + 0xb000) ) 627 628#define usc_DisableMasterIrqBit(a) \ 629 usc_OutReg( (a), ICR, (u16)(usc_InReg((a),ICR) & 0x7f00) ) 630 631#define usc_ClearIrqPendingBits( a, b ) usc_OutReg( (a), DCCR, 0x40 + (b) ) 632 633/* 634 * Transmit status Bits in Transmit Control status Register (TCSR) 635 * and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) 636 */ 637 638#define TXSTATUS_PREAMBLE_SENT BIT7 639#define TXSTATUS_IDLE_SENT BIT6 640#define TXSTATUS_ABORT_SENT BIT5 641#define TXSTATUS_EOF BIT4 642#define TXSTATUS_CRC_SENT BIT3 643#define TXSTATUS_ALL_SENT BIT2 644#define TXSTATUS_UNDERRUN BIT1 645#define TXSTATUS_FIFO_EMPTY BIT0 646 647#define DICR_MASTER BIT15 648#define DICR_TRANSMIT BIT0 649#define DICR_RECEIVE BIT1 650 651#define usc_EnableDmaInterrupts(a,b) \ 652 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) | (b)) ) 653 654#define usc_DisableDmaInterrupts(a,b) \ 655 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) & ~(b)) ) 656 657#define usc_EnableStatusIrqs(a,b) \ 658 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) | (b)) ) 659 660#define usc_DisablestatusIrqs(a,b) \ 661 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) & ~(b)) ) 662 663/* Transmit status Bits in Transmit Control status Register (TCSR) */ 664/* and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) */ 665 666 667#define DISABLE_UNCONDITIONAL 0 668#define DISABLE_END_OF_FRAME 1 669#define ENABLE_UNCONDITIONAL 2 670#define ENABLE_AUTO_CTS 3 671#define ENABLE_AUTO_DCD 3 672#define usc_EnableTransmitter(a,b) \ 673 usc_OutReg( (a), TMR, (u16)((usc_InReg((a),TMR) & 0xfffc) | (b)) ) 674#define usc_EnableReceiver(a,b) \ 675 usc_OutReg( (a), RMR, (u16)((usc_InReg((a),RMR) & 0xfffc) | (b)) ) 676 677static u16 usc_InDmaReg( struct mgsl_struct *info, u16 Port ); 678static void usc_OutDmaReg( struct mgsl_struct *info, u16 Port, u16 Value ); 679static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd ); 680 681static u16 usc_InReg( struct mgsl_struct *info, u16 Port ); 682static void usc_OutReg( struct mgsl_struct *info, u16 Port, u16 Value ); 683static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd ); 684void usc_RCmd( struct mgsl_struct *info, u16 Cmd ); 685void usc_TCmd( struct mgsl_struct *info, u16 Cmd ); 686 687#define usc_TCmd(a,b) usc_OutReg((a), TCSR, (u16)((a)->tcsr_value + (b))) 688#define usc_RCmd(a,b) usc_OutReg((a), RCSR, (b)) 689 690#define usc_SetTransmitSyncChars(a,s0,s1) usc_OutReg((a), TSR, (u16)(((u16)s0<<8)|(u16)s1)) 691 692static void usc_process_rxoverrun_sync( struct mgsl_struct *info ); 693static void usc_start_receiver( struct mgsl_struct *info ); 694static void usc_stop_receiver( struct mgsl_struct *info ); 695 696static void usc_start_transmitter( struct mgsl_struct *info ); 697static void usc_stop_transmitter( struct mgsl_struct *info ); 698static void usc_set_txidle( struct mgsl_struct *info ); 699static void usc_load_txfifo( struct mgsl_struct *info ); 700 701static void usc_enable_aux_clock( struct mgsl_struct *info, u32 DataRate ); 702static void usc_enable_loopback( struct mgsl_struct *info, int enable ); 703 704static void usc_get_serial_signals( struct mgsl_struct *info ); 705static void usc_set_serial_signals( struct mgsl_struct *info ); 706 707static void usc_reset( struct mgsl_struct *info ); 708 709static void usc_set_sync_mode( struct mgsl_struct *info ); 710static void usc_set_sdlc_mode( struct mgsl_struct *info ); 711static void usc_set_async_mode( struct mgsl_struct *info ); 712static void usc_enable_async_clock( struct mgsl_struct *info, u32 DataRate ); 713 714static void usc_loopback_frame( struct mgsl_struct *info ); 715 716static void mgsl_tx_timeout(unsigned long context); 717 718 719static void usc_loopmode_cancel_transmit( struct mgsl_struct * info ); 720static void usc_loopmode_insert_request( struct mgsl_struct * info ); 721static int usc_loopmode_active( struct mgsl_struct * info); 722static void usc_loopmode_send_done( struct mgsl_struct * info ); 723 724static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg); 725 726#if SYNCLINK_GENERIC_HDLC 727#define dev_to_port(D) (dev_to_hdlc(D)->priv) 728static void hdlcdev_tx_done(struct mgsl_struct *info); 729static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size); 730static int hdlcdev_init(struct mgsl_struct *info); 731static void hdlcdev_exit(struct mgsl_struct *info); 732#endif 733 734/* 735 * Defines a BUS descriptor value for the PCI adapter 736 * local bus address ranges. 737 */ 738 739#define BUS_DESCRIPTOR( WrHold, WrDly, RdDly, Nwdd, Nwad, Nxda, Nrdd, Nrad ) \ 740(0x00400020 + \ 741((WrHold) << 30) + \ 742((WrDly) << 28) + \ 743((RdDly) << 26) + \ 744((Nwdd) << 20) + \ 745((Nwad) << 15) + \ 746((Nxda) << 13) + \ 747((Nrdd) << 11) + \ 748((Nrad) << 6) ) 749 750static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit); 751 752/* 753 * Adapter diagnostic routines 754 */ 755static BOOLEAN mgsl_register_test( struct mgsl_struct *info ); 756static BOOLEAN mgsl_irq_test( struct mgsl_struct *info ); 757static BOOLEAN mgsl_dma_test( struct mgsl_struct *info ); 758static BOOLEAN mgsl_memory_test( struct mgsl_struct *info ); 759static int mgsl_adapter_test( struct mgsl_struct *info ); 760 761/* 762 * device and resource management routines 763 */ 764static int mgsl_claim_resources(struct mgsl_struct *info); 765static void mgsl_release_resources(struct mgsl_struct *info); 766static void mgsl_add_device(struct mgsl_struct *info); 767static struct mgsl_struct* mgsl_allocate_device(void); 768 769/* 770 * DMA buffer manupulation functions. 771 */ 772static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex ); 773static int mgsl_get_rx_frame( struct mgsl_struct *info ); 774static int mgsl_get_raw_rx_frame( struct mgsl_struct *info ); 775static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info ); 776static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info ); 777static int num_free_tx_dma_buffers(struct mgsl_struct *info); 778static void mgsl_load_tx_dma_buffer( struct mgsl_struct *info, const char *Buffer, unsigned int BufferSize); 779static void mgsl_load_pci_memory(char* TargetPtr, const char* SourcePtr, unsigned short count); 780 781/* 782 * DMA and Shared Memory buffer allocation and formatting 783 */ 784static int mgsl_allocate_dma_buffers(struct mgsl_struct *info); 785static void mgsl_free_dma_buffers(struct mgsl_struct *info); 786static int mgsl_alloc_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount); 787static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount); 788static int mgsl_alloc_buffer_list_memory(struct mgsl_struct *info); 789static void mgsl_free_buffer_list_memory(struct mgsl_struct *info); 790static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info); 791static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info); 792static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info); 793static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info); 794static int load_next_tx_holding_buffer(struct mgsl_struct *info); 795static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize); 796 797/* 798 * Bottom half interrupt handlers 799 */ 800static void mgsl_bh_handler(struct work_struct *work); 801static void mgsl_bh_receive(struct mgsl_struct *info); 802static void mgsl_bh_transmit(struct mgsl_struct *info); 803static void mgsl_bh_status(struct mgsl_struct *info); 804 805/* 806 * Interrupt handler routines and dispatch table. 807 */ 808static void mgsl_isr_null( struct mgsl_struct *info ); 809static void mgsl_isr_transmit_data( struct mgsl_struct *info ); 810static void mgsl_isr_receive_data( struct mgsl_struct *info ); 811static void mgsl_isr_receive_status( struct mgsl_struct *info ); 812static void mgsl_isr_transmit_status( struct mgsl_struct *info ); 813static void mgsl_isr_io_pin( struct mgsl_struct *info ); 814static void mgsl_isr_misc( struct mgsl_struct *info ); 815static void mgsl_isr_receive_dma( struct mgsl_struct *info ); 816static void mgsl_isr_transmit_dma( struct mgsl_struct *info ); 817 818typedef void (*isr_dispatch_func)(struct mgsl_struct *); 819 820static isr_dispatch_func UscIsrTable[7] = 821{ 822 mgsl_isr_null, 823 mgsl_isr_misc, 824 mgsl_isr_io_pin, 825 mgsl_isr_transmit_data, 826 mgsl_isr_transmit_status, 827 mgsl_isr_receive_data, 828 mgsl_isr_receive_status 829}; 830 831/* 832 * ioctl call handlers 833 */ 834static int tiocmget(struct tty_struct *tty, struct file *file); 835static int tiocmset(struct tty_struct *tty, struct file *file, 836 unsigned int set, unsigned int clear); 837static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount 838 __user *user_icount); 839static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params); 840static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params); 841static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode); 842static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode); 843static int mgsl_txenable(struct mgsl_struct * info, int enable); 844static int mgsl_txabort(struct mgsl_struct * info); 845static int mgsl_rxenable(struct mgsl_struct * info, int enable); 846static int mgsl_wait_event(struct mgsl_struct * info, int __user *mask); 847static int mgsl_loopmode_send_done( struct mgsl_struct * info ); 848 849/* set non-zero on successful registration with PCI subsystem */ 850static int pci_registered; 851 852/* 853 * Global linked list of SyncLink devices 854 */ 855static struct mgsl_struct *mgsl_device_list; 856static int mgsl_device_count; 857 858/* 859 * Set this param to non-zero to load eax with the 860 * .text section address and breakpoint on module load. 861 * This is useful for use with gdb and add-symbol-file command. 862 */ 863static int break_on_load; 864 865/* 866 * Driver major number, defaults to zero to get auto 867 * assigned major number. May be forced as module parameter. 868 */ 869static int ttymajor; 870 871/* 872 * Array of user specified options for ISA adapters. 873 */ 874static int io[MAX_ISA_DEVICES]; 875static int irq[MAX_ISA_DEVICES]; 876static int dma[MAX_ISA_DEVICES]; 877static int debug_level; 878static int maxframe[MAX_TOTAL_DEVICES]; 879static int dosyncppp[MAX_TOTAL_DEVICES]; 880static int txdmabufs[MAX_TOTAL_DEVICES]; 881static int txholdbufs[MAX_TOTAL_DEVICES]; 882 883module_param(break_on_load, bool, 0); 884module_param(ttymajor, int, 0); 885module_param_array(io, int, NULL, 0); 886module_param_array(irq, int, NULL, 0); 887module_param_array(dma, int, NULL, 0); 888module_param(debug_level, int, 0); 889module_param_array(maxframe, int, NULL, 0); 890module_param_array(dosyncppp, int, NULL, 0); 891module_param_array(txdmabufs, int, NULL, 0); 892module_param_array(txholdbufs, int, NULL, 0); 893 894static char *driver_name = "SyncLink serial driver"; 895static char *driver_version = "$Revision: 4.38 $"; 896 897static int synclink_init_one (struct pci_dev *dev, 898 const struct pci_device_id *ent); 899static void synclink_remove_one (struct pci_dev *dev); 900 901static struct pci_device_id synclink_pci_tbl[] = { 902 { PCI_VENDOR_ID_MICROGATE, PCI_DEVICE_ID_MICROGATE_USC, PCI_ANY_ID, PCI_ANY_ID, }, 903 { PCI_VENDOR_ID_MICROGATE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, }, 904 { 0, }, /* terminate list */ 905}; 906MODULE_DEVICE_TABLE(pci, synclink_pci_tbl); 907 908MODULE_LICENSE("GPL"); 909 910static struct pci_driver synclink_pci_driver = { 911 .name = "synclink", 912 .id_table = synclink_pci_tbl, 913 .probe = synclink_init_one, 914 .remove = __devexit_p(synclink_remove_one), 915}; 916 917static struct tty_driver *serial_driver; 918 919/* number of characters left in xmit buffer before we ask for more */ 920#define WAKEUP_CHARS 256 921 922 923static void mgsl_change_params(struct mgsl_struct *info); 924static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout); 925 926/* 927 * 1st function defined in .text section. Calling this function in 928 * init_module() followed by a breakpoint allows a remote debugger 929 * (gdb) to get the .text address for the add-symbol-file command. 930 * This allows remote debugging of dynamically loadable modules. 931 */ 932static void* mgsl_get_text_ptr(void) 933{ 934 return mgsl_get_text_ptr; 935} 936 937static inline int mgsl_paranoia_check(struct mgsl_struct *info, 938 char *name, const char *routine) 939{ 940#ifdef MGSL_PARANOIA_CHECK 941 static const char *badmagic = 942 "Warning: bad magic number for mgsl struct (%s) in %s\n"; 943 static const char *badinfo = 944 "Warning: null mgsl_struct for (%s) in %s\n"; 945 946 if (!info) { 947 printk(badinfo, name, routine); 948 return 1; 949 } 950 if (info->magic != MGSL_MAGIC) { 951 printk(badmagic, name, routine); 952 return 1; 953 } 954#else 955 if (!info) 956 return 1; 957#endif 958 return 0; 959} 960 961/** 962 * line discipline callback wrappers 963 * 964 * The wrappers maintain line discipline references 965 * while calling into the line discipline. 966 * 967 * ldisc_receive_buf - pass receive data to line discipline 968 */ 969 970static void ldisc_receive_buf(struct tty_struct *tty, 971 const __u8 *data, char *flags, int count) 972{ 973 struct tty_ldisc *ld; 974 if (!tty) 975 return; 976 ld = tty_ldisc_ref(tty); 977 if (ld) { 978 if (ld->receive_buf) 979 ld->receive_buf(tty, data, flags, count); 980 tty_ldisc_deref(ld); 981 } 982} 983 984/* mgsl_stop() throttle (stop) transmitter 985 * 986 * Arguments: tty pointer to tty info structure 987 * Return Value: None 988 */ 989static void mgsl_stop(struct tty_struct *tty) 990{ 991 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; 992 unsigned long flags; 993 994 if (mgsl_paranoia_check(info, tty->name, "mgsl_stop")) 995 return; 996 997 if ( debug_level >= DEBUG_LEVEL_INFO ) 998 printk("mgsl_stop(%s)\n",info->device_name); 999 1000 spin_lock_irqsave(&info->irq_spinlock,flags); 1001 if (info->tx_enabled) 1002 usc_stop_transmitter(info); 1003 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1004 1005} /* end of mgsl_stop() */ 1006 1007/* mgsl_start() release (start) transmitter 1008 * 1009 * Arguments: tty pointer to tty info structure 1010 * Return Value: None 1011 */ 1012static void mgsl_start(struct tty_struct *tty) 1013{ 1014 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; 1015 unsigned long flags; 1016 1017 if (mgsl_paranoia_check(info, tty->name, "mgsl_start")) 1018 return; 1019 1020 if ( debug_level >= DEBUG_LEVEL_INFO ) 1021 printk("mgsl_start(%s)\n",info->device_name); 1022 1023 spin_lock_irqsave(&info->irq_spinlock,flags); 1024 if (!info->tx_enabled) 1025 usc_start_transmitter(info); 1026 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1027 1028} /* end of mgsl_start() */ 1029 1030/* 1031 * Bottom half work queue access functions 1032 */ 1033 1034/* mgsl_bh_action() Return next bottom half action to perform. 1035 * Return Value: BH action code or 0 if nothing to do. 1036 */ 1037static int mgsl_bh_action(struct mgsl_struct *info) 1038{ 1039 unsigned long flags; 1040 int rc = 0; 1041 1042 spin_lock_irqsave(&info->irq_spinlock,flags); 1043 1044 if (info->pending_bh & BH_RECEIVE) { 1045 info->pending_bh &= ~BH_RECEIVE; 1046 rc = BH_RECEIVE; 1047 } else if (info->pending_bh & BH_TRANSMIT) { 1048 info->pending_bh &= ~BH_TRANSMIT; 1049 rc = BH_TRANSMIT; 1050 } else if (info->pending_bh & BH_STATUS) { 1051 info->pending_bh &= ~BH_STATUS; 1052 rc = BH_STATUS; 1053 } 1054 1055 if (!rc) { 1056 /* Mark BH routine as complete */ 1057 info->bh_running = 0; 1058 info->bh_requested = 0; 1059 } 1060 1061 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1062 1063 return rc; 1064} 1065 1066/* 1067 * Perform bottom half processing of work items queued by ISR. 1068 */ 1069static void mgsl_bh_handler(struct work_struct *work) 1070{ 1071 struct mgsl_struct *info = 1072 container_of(work, struct mgsl_struct, task); 1073 int action; 1074 1075 if (!info) 1076 return; 1077 1078 if ( debug_level >= DEBUG_LEVEL_BH ) 1079 printk( "%s(%d):mgsl_bh_handler(%s) entry\n", 1080 __FILE__,__LINE__,info->device_name); 1081 1082 info->bh_running = 1; 1083 1084 while((action = mgsl_bh_action(info)) != 0) { 1085 1086 /* Process work item */ 1087 if ( debug_level >= DEBUG_LEVEL_BH ) 1088 printk( "%s(%d):mgsl_bh_handler() work item action=%d\n", 1089 __FILE__,__LINE__,action); 1090 1091 switch (action) { 1092 1093 case BH_RECEIVE: 1094 mgsl_bh_receive(info); 1095 break; 1096 case BH_TRANSMIT: 1097 mgsl_bh_transmit(info); 1098 break; 1099 case BH_STATUS: 1100 mgsl_bh_status(info); 1101 break; 1102 default: 1103 /* unknown work item ID */ 1104 printk("Unknown work item ID=%08X!\n", action); 1105 break; 1106 } 1107 } 1108 1109 if ( debug_level >= DEBUG_LEVEL_BH ) 1110 printk( "%s(%d):mgsl_bh_handler(%s) exit\n", 1111 __FILE__,__LINE__,info->device_name); 1112} 1113 1114static void mgsl_bh_receive(struct mgsl_struct *info) 1115{ 1116 int (*get_rx_frame)(struct mgsl_struct *info) = 1117 (info->params.mode == MGSL_MODE_HDLC ? mgsl_get_rx_frame : mgsl_get_raw_rx_frame); 1118 1119 if ( debug_level >= DEBUG_LEVEL_BH ) 1120 printk( "%s(%d):mgsl_bh_receive(%s)\n", 1121 __FILE__,__LINE__,info->device_name); 1122 1123 do 1124 { 1125 if (info->rx_rcc_underrun) { 1126 unsigned long flags; 1127 spin_lock_irqsave(&info->irq_spinlock,flags); 1128 usc_start_receiver(info); 1129 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1130 return; 1131 } 1132 } while(get_rx_frame(info)); 1133} 1134 1135static void mgsl_bh_transmit(struct mgsl_struct *info) 1136{ 1137 struct tty_struct *tty = info->tty; 1138 unsigned long flags; 1139 1140 if ( debug_level >= DEBUG_LEVEL_BH ) 1141 printk( "%s(%d):mgsl_bh_transmit() entry on %s\n", 1142 __FILE__,__LINE__,info->device_name); 1143 1144 if (tty) 1145 tty_wakeup(tty); 1146 1147 /* if transmitter idle and loopmode_send_done_requested 1148 * then start echoing RxD to TxD 1149 */ 1150 spin_lock_irqsave(&info->irq_spinlock,flags); 1151 if ( !info->tx_active && info->loopmode_send_done_requested ) 1152 usc_loopmode_send_done( info ); 1153 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1154} 1155 1156static void mgsl_bh_status(struct mgsl_struct *info) 1157{ 1158 if ( debug_level >= DEBUG_LEVEL_BH ) 1159 printk( "%s(%d):mgsl_bh_status() entry on %s\n", 1160 __FILE__,__LINE__,info->device_name); 1161 1162 info->ri_chkcount = 0; 1163 info->dsr_chkcount = 0; 1164 info->dcd_chkcount = 0; 1165 info->cts_chkcount = 0; 1166} 1167 1168/* mgsl_isr_receive_status() 1169 * 1170 * Service a receive status interrupt. The type of status 1171 * interrupt is indicated by the state of the RCSR. 1172 * This is only used for HDLC mode. 1173 * 1174 * Arguments: info pointer to device instance data 1175 * Return Value: None 1176 */ 1177static void mgsl_isr_receive_status( struct mgsl_struct *info ) 1178{ 1179 u16 status = usc_InReg( info, RCSR ); 1180 1181 if ( debug_level >= DEBUG_LEVEL_ISR ) 1182 printk("%s(%d):mgsl_isr_receive_status status=%04X\n", 1183 __FILE__,__LINE__,status); 1184 1185 if ( (status & RXSTATUS_ABORT_RECEIVED) && 1186 info->loopmode_insert_requested && 1187 usc_loopmode_active(info) ) 1188 { 1189 ++info->icount.rxabort; 1190 info->loopmode_insert_requested = FALSE; 1191 1192 /* clear CMR:13 to start echoing RxD to TxD */ 1193 info->cmr_value &= ~BIT13; 1194 usc_OutReg(info, CMR, info->cmr_value); 1195 1196 /* disable received abort irq (no longer required) */ 1197 usc_OutReg(info, RICR, 1198 (usc_InReg(info, RICR) & ~RXSTATUS_ABORT_RECEIVED)); 1199 } 1200 1201 if (status & (RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED)) { 1202 if (status & RXSTATUS_EXITED_HUNT) 1203 info->icount.exithunt++; 1204 if (status & RXSTATUS_IDLE_RECEIVED) 1205 info->icount.rxidle++; 1206 wake_up_interruptible(&info->event_wait_q); 1207 } 1208 1209 if (status & RXSTATUS_OVERRUN){ 1210 info->icount.rxover++; 1211 usc_process_rxoverrun_sync( info ); 1212 } 1213 1214 usc_ClearIrqPendingBits( info, RECEIVE_STATUS ); 1215 usc_UnlatchRxstatusBits( info, status ); 1216 1217} /* end of mgsl_isr_receive_status() */ 1218 1219/* mgsl_isr_transmit_status() 1220 * 1221 * Service a transmit status interrupt 1222 * HDLC mode :end of transmit frame 1223 * Async mode:all data is sent 1224 * transmit status is indicated by bits in the TCSR. 1225 * 1226 * Arguments: info pointer to device instance data 1227 * Return Value: None 1228 */ 1229static void mgsl_isr_transmit_status( struct mgsl_struct *info ) 1230{ 1231 u16 status = usc_InReg( info, TCSR ); 1232 1233 if ( debug_level >= DEBUG_LEVEL_ISR ) 1234 printk("%s(%d):mgsl_isr_transmit_status status=%04X\n", 1235 __FILE__,__LINE__,status); 1236 1237 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS ); 1238 usc_UnlatchTxstatusBits( info, status ); 1239 1240 if ( status & (TXSTATUS_UNDERRUN | TXSTATUS_ABORT_SENT) ) 1241 { 1242 /* finished sending HDLC abort. This may leave */ 1243 /* the TxFifo with data from the aborted frame */ 1244 /* so purge the TxFifo. Also shutdown the DMA */ 1245 /* channel in case there is data remaining in */ 1246 /* the DMA buffer */ 1247 usc_DmaCmd( info, DmaCmd_ResetTxChannel ); 1248 usc_RTCmd( info, RTCmd_PurgeTxFifo ); 1249 } 1250 1251 if ( status & TXSTATUS_EOF_SENT ) 1252 info->icount.txok++; 1253 else if ( status & TXSTATUS_UNDERRUN ) 1254 info->icount.txunder++; 1255 else if ( status & TXSTATUS_ABORT_SENT ) 1256 info->icount.txabort++; 1257 else 1258 info->icount.txunder++; 1259 1260 info->tx_active = 0; 1261 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; 1262 del_timer(&info->tx_timer); 1263 1264 if ( info->drop_rts_on_tx_done ) { 1265 usc_get_serial_signals( info ); 1266 if ( info->serial_signals & SerialSignal_RTS ) { 1267 info->serial_signals &= ~SerialSignal_RTS; 1268 usc_set_serial_signals( info ); 1269 } 1270 info->drop_rts_on_tx_done = 0; 1271 } 1272 1273#if SYNCLINK_GENERIC_HDLC 1274 if (info->netcount) 1275 hdlcdev_tx_done(info); 1276 else 1277#endif 1278 { 1279 if (info->tty->stopped || info->tty->hw_stopped) { 1280 usc_stop_transmitter(info); 1281 return; 1282 } 1283 info->pending_bh |= BH_TRANSMIT; 1284 } 1285 1286} /* end of mgsl_isr_transmit_status() */ 1287 1288/* mgsl_isr_io_pin() 1289 * 1290 * Service an Input/Output pin interrupt. The type of 1291 * interrupt is indicated by bits in the MISR 1292 * 1293 * Arguments: info pointer to device instance data 1294 * Return Value: None 1295 */ 1296static void mgsl_isr_io_pin( struct mgsl_struct *info ) 1297{ 1298 struct mgsl_icount *icount; 1299 u16 status = usc_InReg( info, MISR ); 1300 1301 if ( debug_level >= DEBUG_LEVEL_ISR ) 1302 printk("%s(%d):mgsl_isr_io_pin status=%04X\n", 1303 __FILE__,__LINE__,status); 1304 1305 usc_ClearIrqPendingBits( info, IO_PIN ); 1306 usc_UnlatchIostatusBits( info, status ); 1307 1308 if (status & (MISCSTATUS_CTS_LATCHED | MISCSTATUS_DCD_LATCHED | 1309 MISCSTATUS_DSR_LATCHED | MISCSTATUS_RI_LATCHED) ) { 1310 icount = &info->icount; 1311 /* update input line counters */ 1312 if (status & MISCSTATUS_RI_LATCHED) { 1313 if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) 1314 usc_DisablestatusIrqs(info,SICR_RI); 1315 icount->rng++; 1316 if ( status & MISCSTATUS_RI ) 1317 info->input_signal_events.ri_up++; 1318 else 1319 info->input_signal_events.ri_down++; 1320 } 1321 if (status & MISCSTATUS_DSR_LATCHED) { 1322 if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) 1323 usc_DisablestatusIrqs(info,SICR_DSR); 1324 icount->dsr++; 1325 if ( status & MISCSTATUS_DSR ) 1326 info->input_signal_events.dsr_up++; 1327 else 1328 info->input_signal_events.dsr_down++; 1329 } 1330 if (status & MISCSTATUS_DCD_LATCHED) { 1331 if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) 1332 usc_DisablestatusIrqs(info,SICR_DCD); 1333 icount->dcd++; 1334 if (status & MISCSTATUS_DCD) { 1335 info->input_signal_events.dcd_up++; 1336 } else 1337 info->input_signal_events.dcd_down++; 1338#if SYNCLINK_GENERIC_HDLC 1339 if (info->netcount) { 1340 if (status & MISCSTATUS_DCD) 1341 netif_carrier_on(info->netdev); 1342 else 1343 netif_carrier_off(info->netdev); 1344 } 1345#endif 1346 } 1347 if (status & MISCSTATUS_CTS_LATCHED) 1348 { 1349 if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) 1350 usc_DisablestatusIrqs(info,SICR_CTS); 1351 icount->cts++; 1352 if ( status & MISCSTATUS_CTS ) 1353 info->input_signal_events.cts_up++; 1354 else 1355 info->input_signal_events.cts_down++; 1356 } 1357 wake_up_interruptible(&info->status_event_wait_q); 1358 wake_up_interruptible(&info->event_wait_q); 1359 1360 if ( (info->flags & ASYNC_CHECK_CD) && 1361 (status & MISCSTATUS_DCD_LATCHED) ) { 1362 if ( debug_level >= DEBUG_LEVEL_ISR ) 1363 printk("%s CD now %s...", info->device_name, 1364 (status & MISCSTATUS_DCD) ? "on" : "off"); 1365 if (status & MISCSTATUS_DCD) 1366 wake_up_interruptible(&info->open_wait); 1367 else { 1368 if ( debug_level >= DEBUG_LEVEL_ISR ) 1369 printk("doing serial hangup..."); 1370 if (info->tty) 1371 tty_hangup(info->tty); 1372 } 1373 } 1374 1375 if ( (info->flags & ASYNC_CTS_FLOW) && 1376 (status & MISCSTATUS_CTS_LATCHED) ) { 1377 if (info->tty->hw_stopped) { 1378 if (status & MISCSTATUS_CTS) { 1379 if ( debug_level >= DEBUG_LEVEL_ISR ) 1380 printk("CTS tx start..."); 1381 if (info->tty) 1382 info->tty->hw_stopped = 0; 1383 usc_start_transmitter(info); 1384 info->pending_bh |= BH_TRANSMIT; 1385 return; 1386 } 1387 } else { 1388 if (!(status & MISCSTATUS_CTS)) { 1389 if ( debug_level >= DEBUG_LEVEL_ISR ) 1390 printk("CTS tx stop..."); 1391 if (info->tty) 1392 info->tty->hw_stopped = 1; 1393 usc_stop_transmitter(info); 1394 } 1395 } 1396 } 1397 } 1398 1399 info->pending_bh |= BH_STATUS; 1400 1401 /* for diagnostics set IRQ flag */ 1402 if ( status & MISCSTATUS_TXC_LATCHED ){ 1403 usc_OutReg( info, SICR, 1404 (unsigned short)(usc_InReg(info,SICR) & ~(SICR_TXC_ACTIVE+SICR_TXC_INACTIVE)) ); 1405 usc_UnlatchIostatusBits( info, MISCSTATUS_TXC_LATCHED ); 1406 info->irq_occurred = 1; 1407 } 1408 1409} /* end of mgsl_isr_io_pin() */ 1410 1411/* mgsl_isr_transmit_data() 1412 * 1413 * Service a transmit data interrupt (async mode only). 1414 * 1415 * Arguments: info pointer to device instance data 1416 * Return Value: None 1417 */ 1418static void mgsl_isr_transmit_data( struct mgsl_struct *info ) 1419{ 1420 if ( debug_level >= DEBUG_LEVEL_ISR ) 1421 printk("%s(%d):mgsl_isr_transmit_data xmit_cnt=%d\n", 1422 __FILE__,__LINE__,info->xmit_cnt); 1423 1424 usc_ClearIrqPendingBits( info, TRANSMIT_DATA ); 1425 1426 if (info->tty->stopped || info->tty->hw_stopped) { 1427 usc_stop_transmitter(info); 1428 return; 1429 } 1430 1431 if ( info->xmit_cnt ) 1432 usc_load_txfifo( info ); 1433 else 1434 info->tx_active = 0; 1435 1436 if (info->xmit_cnt < WAKEUP_CHARS) 1437 info->pending_bh |= BH_TRANSMIT; 1438 1439} /* end of mgsl_isr_transmit_data() */ 1440 1441/* mgsl_isr_receive_data() 1442 * 1443 * Service a receive data interrupt. This occurs 1444 * when operating in asynchronous interrupt transfer mode. 1445 * The receive data FIFO is flushed to the receive data buffers. 1446 * 1447 * Arguments: info pointer to device instance data 1448 * Return Value: None 1449 */ 1450static void mgsl_isr_receive_data( struct mgsl_struct *info ) 1451{ 1452 int Fifocount; 1453 u16 status; 1454 int work = 0; 1455 unsigned char DataByte; 1456 struct tty_struct *tty = info->tty; 1457 struct mgsl_icount *icount = &info->icount; 1458 1459 if ( debug_level >= DEBUG_LEVEL_ISR ) 1460 printk("%s(%d):mgsl_isr_receive_data\n", 1461 __FILE__,__LINE__); 1462 1463 usc_ClearIrqPendingBits( info, RECEIVE_DATA ); 1464 1465 /* select FIFO status for RICR readback */ 1466 usc_RCmd( info, RCmd_SelectRicrRxFifostatus ); 1467 1468 /* clear the Wordstatus bit so that status readback */ 1469 /* only reflects the status of this byte */ 1470 usc_OutReg( info, RICR+LSBONLY, (u16)(usc_InReg(info, RICR+LSBONLY) & ~BIT3 )); 1471 1472 /* flush the receive FIFO */ 1473 1474 while( (Fifocount = (usc_InReg(info,RICR) >> 8)) ) { 1475 int flag; 1476 1477 /* read one byte from RxFIFO */ 1478 outw( (inw(info->io_base + CCAR) & 0x0780) | (RDR+LSBONLY), 1479 info->io_base + CCAR ); 1480 DataByte = inb( info->io_base + CCAR ); 1481 1482 /* get the status of the received byte */ 1483 status = usc_InReg(info, RCSR); 1484 if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR + 1485 RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) ) 1486 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL); 1487 1488 icount->rx++; 1489 1490 flag = 0; 1491 if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR + 1492 RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) ) { 1493 printk("rxerr=%04X\n",status); 1494 /* update error statistics */ 1495 if ( status & RXSTATUS_BREAK_RECEIVED ) { 1496 status &= ~(RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR); 1497 icount->brk++; 1498 } else if (status & RXSTATUS_PARITY_ERROR) 1499 icount->parity++; 1500 else if (status & RXSTATUS_FRAMING_ERROR) 1501 icount->frame++; 1502 else if (status & RXSTATUS_OVERRUN) { 1503 /* must issue purge fifo cmd before */ 1504 /* 16C32 accepts more receive chars */ 1505 usc_RTCmd(info,RTCmd_PurgeRxFifo); 1506 icount->overrun++; 1507 } 1508 1509 /* discard char if tty control flags say so */ 1510 if (status & info->ignore_status_mask) 1511 continue; 1512 1513 status &= info->read_status_mask; 1514 1515 if (status & RXSTATUS_BREAK_RECEIVED) { 1516 flag = TTY_BREAK; 1517 if (info->flags & ASYNC_SAK) 1518 do_SAK(tty); 1519 } else if (status & RXSTATUS_PARITY_ERROR) 1520 flag = TTY_PARITY; 1521 else if (status & RXSTATUS_FRAMING_ERROR) 1522 flag = TTY_FRAME; 1523 } /* end of if (error) */ 1524 tty_insert_flip_char(tty, DataByte, flag); 1525 if (status & RXSTATUS_OVERRUN) { 1526 /* Overrun is special, since it's 1527 * reported immediately, and doesn't 1528 * affect the current character 1529 */ 1530 work += tty_insert_flip_char(tty, 0, TTY_OVERRUN); 1531 } 1532 } 1533 1534 if ( debug_level >= DEBUG_LEVEL_ISR ) { 1535 printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n", 1536 __FILE__,__LINE__,icount->rx,icount->brk, 1537 icount->parity,icount->frame,icount->overrun); 1538 } 1539 1540 if(work) 1541 tty_flip_buffer_push(tty); 1542} 1543 1544/* mgsl_isr_misc() 1545 * 1546 * Service a miscellaneous interrupt source. 1547 * 1548 * Arguments: info pointer to device extension (instance data) 1549 * Return Value: None 1550 */ 1551static void mgsl_isr_misc( struct mgsl_struct *info ) 1552{ 1553 u16 status = usc_InReg( info, MISR ); 1554 1555 if ( debug_level >= DEBUG_LEVEL_ISR ) 1556 printk("%s(%d):mgsl_isr_misc status=%04X\n", 1557 __FILE__,__LINE__,status); 1558 1559 if ((status & MISCSTATUS_RCC_UNDERRUN) && 1560 (info->params.mode == MGSL_MODE_HDLC)) { 1561 1562 /* turn off receiver and rx DMA */ 1563 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL); 1564 usc_DmaCmd(info, DmaCmd_ResetRxChannel); 1565 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL); 1566 usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS); 1567 usc_DisableInterrupts(info, RECEIVE_DATA + RECEIVE_STATUS); 1568 1569 /* schedule BH handler to restart receiver */ 1570 info->pending_bh |= BH_RECEIVE; 1571 info->rx_rcc_underrun = 1; 1572 } 1573 1574 usc_ClearIrqPendingBits( info, MISC ); 1575 usc_UnlatchMiscstatusBits( info, status ); 1576 1577} /* end of mgsl_isr_misc() */ 1578 1579/* mgsl_isr_null() 1580 * 1581 * Services undefined interrupt vectors from the 1582 * USC. (hence this function SHOULD never be called) 1583 * 1584 * Arguments: info pointer to device extension (instance data) 1585 * Return Value: None 1586 */ 1587static void mgsl_isr_null( struct mgsl_struct *info ) 1588{ 1589 1590} /* end of mgsl_isr_null() */ 1591 1592/* mgsl_isr_receive_dma() 1593 * 1594 * Service a receive DMA channel interrupt. 1595 * For this driver there are two sources of receive DMA interrupts 1596 * as identified in the Receive DMA mode Register (RDMR): 1597 * 1598 * BIT3 EOA/EOL End of List, all receive buffers in receive 1599 * buffer list have been filled (no more free buffers 1600 * available). The DMA controller has shut down. 1601 * 1602 * BIT2 EOB End of Buffer. This interrupt occurs when a receive 1603 * DMA buffer is terminated in response to completion 1604 * of a good frame or a frame with errors. The status 1605 * of the frame is stored in the buffer entry in the 1606 * list of receive buffer entries. 1607 * 1608 * Arguments: info pointer to device instance data 1609 * Return Value: None 1610 */ 1611static void mgsl_isr_receive_dma( struct mgsl_struct *info ) 1612{ 1613 u16 status; 1614 1615 /* clear interrupt pending and IUS bit for Rx DMA IRQ */ 1616 usc_OutDmaReg( info, CDIR, BIT9+BIT1 ); 1617 1618 /* Read the receive DMA status to identify interrupt type. */ 1619 /* This also clears the status bits. */ 1620 status = usc_InDmaReg( info, RDMR ); 1621 1622 if ( debug_level >= DEBUG_LEVEL_ISR ) 1623 printk("%s(%d):mgsl_isr_receive_dma(%s) status=%04X\n", 1624 __FILE__,__LINE__,info->device_name,status); 1625 1626 info->pending_bh |= BH_RECEIVE; 1627 1628 if ( status & BIT3 ) { 1629 info->rx_overflow = 1; 1630 info->icount.buf_overrun++; 1631 } 1632 1633} /* end of mgsl_isr_receive_dma() */ 1634 1635/* mgsl_isr_transmit_dma() 1636 * 1637 * This function services a transmit DMA channel interrupt. 1638 * 1639 * For this driver there is one source of transmit DMA interrupts 1640 * as identified in the Transmit DMA Mode Register (TDMR): 1641 * 1642 * BIT2 EOB End of Buffer. This interrupt occurs when a 1643 * transmit DMA buffer has been emptied. 1644 * 1645 * The driver maintains enough transmit DMA buffers to hold at least 1646 * one max frame size transmit frame. When operating in a buffered 1647 * transmit mode, there may be enough transmit DMA buffers to hold at 1648 * least two or more max frame size frames. On an EOB condition, 1649 * determine if there are any queued transmit buffers and copy into 1650 * transmit DMA buffers if we have room. 1651 * 1652 * Arguments: info pointer to device instance data 1653 * Return Value: None 1654 */ 1655static void mgsl_isr_transmit_dma( struct mgsl_struct *info ) 1656{ 1657 u16 status; 1658 1659 /* clear interrupt pending and IUS bit for Tx DMA IRQ */ 1660 usc_OutDmaReg(info, CDIR, BIT8+BIT0 ); 1661 1662 /* Read the transmit DMA status to identify interrupt type. */ 1663 /* This also clears the status bits. */ 1664 1665 status = usc_InDmaReg( info, TDMR ); 1666 1667 if ( debug_level >= DEBUG_LEVEL_ISR ) 1668 printk("%s(%d):mgsl_isr_transmit_dma(%s) status=%04X\n", 1669 __FILE__,__LINE__,info->device_name,status); 1670 1671 if ( status & BIT2 ) { 1672 --info->tx_dma_buffers_used; 1673 1674 /* if there are transmit frames queued, 1675 * try to load the next one 1676 */ 1677 if ( load_next_tx_holding_buffer(info) ) { 1678 /* if call returns non-zero value, we have 1679 * at least one free tx holding buffer 1680 */ 1681 info->pending_bh |= BH_TRANSMIT; 1682 } 1683 } 1684 1685} /* end of mgsl_isr_transmit_dma() */ 1686 1687/* mgsl_interrupt() 1688 * 1689 * Interrupt service routine entry point. 1690 * 1691 * Arguments: 1692 * 1693 * irq interrupt number that caused interrupt 1694 * dev_id device ID supplied during interrupt registration 1695 * 1696 * Return Value: None 1697 */ 1698static irqreturn_t mgsl_interrupt(int irq, void *dev_id) 1699{ 1700 struct mgsl_struct * info; 1701 u16 UscVector; 1702 u16 DmaVector; 1703 1704 if ( debug_level >= DEBUG_LEVEL_ISR ) 1705 printk("%s(%d):mgsl_interrupt(%d)entry.\n", 1706 __FILE__,__LINE__,irq); 1707 1708 info = (struct mgsl_struct *)dev_id; 1709 if (!info) 1710 return IRQ_NONE; 1711 1712 spin_lock(&info->irq_spinlock); 1713 1714 for(;;) { 1715 /* Read the interrupt vectors from hardware. */ 1716 UscVector = usc_InReg(info, IVR) >> 9; 1717 DmaVector = usc_InDmaReg(info, DIVR); 1718 1719 if ( debug_level >= DEBUG_LEVEL_ISR ) 1720 printk("%s(%d):%s UscVector=%08X DmaVector=%08X\n", 1721 __FILE__,__LINE__,info->device_name,UscVector,DmaVector); 1722 1723 if ( !UscVector && !DmaVector ) 1724 break; 1725 1726 /* Dispatch interrupt vector */ 1727 if ( UscVector ) 1728 (*UscIsrTable[UscVector])(info); 1729 else if ( (DmaVector&(BIT10|BIT9)) == BIT10) 1730 mgsl_isr_transmit_dma(info); 1731 else 1732 mgsl_isr_receive_dma(info); 1733 1734 if ( info->isr_overflow ) { 1735 printk(KERN_ERR"%s(%d):%s isr overflow irq=%d\n", 1736 __FILE__,__LINE__,info->device_name, irq); 1737 usc_DisableMasterIrqBit(info); 1738 usc_DisableDmaInterrupts(info,DICR_MASTER); 1739 break; 1740 } 1741 } 1742 1743 /* Request bottom half processing if there's something 1744 * for it to do and the bh is not already running 1745 */ 1746 1747 if ( info->pending_bh && !info->bh_running && !info->bh_requested ) { 1748 if ( debug_level >= DEBUG_LEVEL_ISR ) 1749 printk("%s(%d):%s queueing bh task.\n", 1750 __FILE__,__LINE__,info->device_name); 1751 schedule_work(&info->task); 1752 info->bh_requested = 1; 1753 } 1754 1755 spin_unlock(&info->irq_spinlock); 1756 1757 if ( debug_level >= DEBUG_LEVEL_ISR ) 1758 printk("%s(%d):mgsl_interrupt(%d)exit.\n", 1759 __FILE__,__LINE__,irq); 1760 return IRQ_HANDLED; 1761} /* end of mgsl_interrupt() */ 1762 1763/* startup() 1764 * 1765 * Initialize and start device. 1766 * 1767 * Arguments: info pointer to device instance data 1768 * Return Value: 0 if success, otherwise error code 1769 */ 1770static int startup(struct mgsl_struct * info) 1771{ 1772 int retval = 0; 1773 1774 if ( debug_level >= DEBUG_LEVEL_INFO ) 1775 printk("%s(%d):mgsl_startup(%s)\n",__FILE__,__LINE__,info->device_name); 1776 1777 if (info->flags & ASYNC_INITIALIZED) 1778 return 0; 1779 1780 if (!info->xmit_buf) { 1781 /* allocate a page of memory for a transmit buffer */ 1782 info->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL); 1783 if (!info->xmit_buf) { 1784 printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n", 1785 __FILE__,__LINE__,info->device_name); 1786 return -ENOMEM; 1787 } 1788 } 1789 1790 info->pending_bh = 0; 1791 1792 memset(&info->icount, 0, sizeof(info->icount)); 1793 1794 setup_timer(&info->tx_timer, mgsl_tx_timeout, (unsigned long)info); 1795 1796 /* Allocate and claim adapter resources */ 1797 retval = mgsl_claim_resources(info); 1798 1799 /* perform existence check and diagnostics */ 1800 if ( !retval ) 1801 retval = mgsl_adapter_test(info); 1802 1803 if ( retval ) { 1804 if (capable(CAP_SYS_ADMIN) && info->tty) 1805 set_bit(TTY_IO_ERROR, &info->tty->flags); 1806 mgsl_release_resources(info); 1807 return retval; 1808 } 1809 1810 /* program hardware for current parameters */ 1811 mgsl_change_params(info); 1812 1813 if (info->tty) 1814 clear_bit(TTY_IO_ERROR, &info->tty->flags); 1815 1816 info->flags |= ASYNC_INITIALIZED; 1817 1818 return 0; 1819 1820} /* end of startup() */ 1821 1822/* shutdown() 1823 * 1824 * Called by mgsl_close() and mgsl_hangup() to shutdown hardware 1825 * 1826 * Arguments: info pointer to device instance data 1827 * Return Value: None 1828 */ 1829static void shutdown(struct mgsl_struct * info) 1830{ 1831 unsigned long flags; 1832 1833 if (!(info->flags & ASYNC_INITIALIZED)) 1834 return; 1835 1836 if (debug_level >= DEBUG_LEVEL_INFO) 1837 printk("%s(%d):mgsl_shutdown(%s)\n", 1838 __FILE__,__LINE__, info->device_name ); 1839 1840 /* clear status wait queue because status changes */ 1841 /* can't happen after shutting down the hardware */ 1842 wake_up_interruptible(&info->status_event_wait_q); 1843 wake_up_interruptible(&info->event_wait_q); 1844 1845 del_timer_sync(&info->tx_timer); 1846 1847 if (info->xmit_buf) { 1848 free_page((unsigned long) info->xmit_buf); 1849 info->xmit_buf = NULL; 1850 } 1851 1852 spin_lock_irqsave(&info->irq_spinlock,flags); 1853 usc_DisableMasterIrqBit(info); 1854 usc_stop_receiver(info); 1855 usc_stop_transmitter(info); 1856 usc_DisableInterrupts(info,RECEIVE_DATA + RECEIVE_STATUS + 1857 TRANSMIT_DATA + TRANSMIT_STATUS + IO_PIN + MISC ); 1858 usc_DisableDmaInterrupts(info,DICR_MASTER + DICR_TRANSMIT + DICR_RECEIVE); 1859 1860 /* Disable DMAEN (Port 7, Bit 14) */ 1861 /* This disconnects the DMA request signal from the ISA bus */ 1862 /* on the ISA adapter. This has no effect for the PCI adapter */ 1863 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) | BIT14)); 1864 1865 /* Disable INTEN (Port 6, Bit12) */ 1866 /* This disconnects the IRQ request signal to the ISA bus */ 1867 /* on the ISA adapter. This has no effect for the PCI adapter */ 1868 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) | BIT12)); 1869 1870 if (!info->tty || info->tty->termios->c_cflag & HUPCL) { 1871 info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS); 1872 usc_set_serial_signals(info); 1873 } 1874 1875 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1876 1877 mgsl_release_resources(info); 1878 1879 if (info->tty) 1880 set_bit(TTY_IO_ERROR, &info->tty->flags); 1881 1882 info->flags &= ~ASYNC_INITIALIZED; 1883 1884} /* end of shutdown() */ 1885 1886static void mgsl_program_hw(struct mgsl_struct *info) 1887{ 1888 unsigned long flags; 1889 1890 spin_lock_irqsave(&info->irq_spinlock,flags); 1891 1892 usc_stop_receiver(info); 1893 usc_stop_transmitter(info); 1894 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; 1895 1896 if (info->params.mode == MGSL_MODE_HDLC || 1897 info->params.mode == MGSL_MODE_RAW || 1898 info->netcount) 1899 usc_set_sync_mode(info); 1900 else 1901 usc_set_async_mode(info); 1902 1903 usc_set_serial_signals(info); 1904 1905 info->dcd_chkcount = 0; 1906 info->cts_chkcount = 0; 1907 info->ri_chkcount = 0; 1908 info->dsr_chkcount = 0; 1909 1910 usc_EnableStatusIrqs(info,SICR_CTS+SICR_DSR+SICR_DCD+SICR_RI); 1911 usc_EnableInterrupts(info, IO_PIN); 1912 usc_get_serial_signals(info); 1913 1914 if (info->netcount || info->tty->termios->c_cflag & CREAD) 1915 usc_start_receiver(info); 1916 1917 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1918} 1919 1920/* Reconfigure adapter based on new parameters 1921 */ 1922static void mgsl_change_params(struct mgsl_struct *info) 1923{ 1924 unsigned cflag; 1925 int bits_per_char; 1926 1927 if (!info->tty || !info->tty->termios) 1928 return; 1929 1930 if (debug_level >= DEBUG_LEVEL_INFO) 1931 printk("%s(%d):mgsl_change_params(%s)\n", 1932 __FILE__,__LINE__, info->device_name ); 1933 1934 cflag = info->tty->termios->c_cflag; 1935 1936 /* if B0 rate (hangup) specified then negate DTR and RTS */ 1937 /* otherwise assert DTR and RTS */ 1938 if (cflag & CBAUD) 1939 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; 1940 else 1941 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR); 1942 1943 /* byte size and parity */ 1944 1945 switch (cflag & CSIZE) { 1946 case CS5: info->params.data_bits = 5; break; 1947 case CS6: info->params.data_bits = 6; break; 1948 case CS7: info->params.data_bits = 7; break; 1949 case CS8: info->params.data_bits = 8; break; 1950 /* Never happens, but GCC is too dumb to figure it out */ 1951 default: info->params.data_bits = 7; break; 1952 } 1953 1954 if (cflag & CSTOPB) 1955 info->params.stop_bits = 2; 1956 else 1957 info->params.stop_bits = 1; 1958 1959 info->params.parity = ASYNC_PARITY_NONE; 1960 if (cflag & PARENB) { 1961 if (cflag & PARODD) 1962 info->params.parity = ASYNC_PARITY_ODD; 1963 else 1964 info->params.parity = ASYNC_PARITY_EVEN; 1965#ifdef CMSPAR 1966 if (cflag & CMSPAR) 1967 info->params.parity = ASYNC_PARITY_SPACE; 1968#endif 1969 } 1970 1971 /* calculate number of jiffies to transmit a full 1972 * FIFO (32 bytes) at specified data rate 1973 */ 1974 bits_per_char = info->params.data_bits + 1975 info->params.stop_bits + 1; 1976 1977 /* if port data rate is set to 460800 or less then 1978 * allow tty settings to override, otherwise keep the 1979 * current data rate. 1980 */ 1981 if (info->params.data_rate <= 460800) 1982 info->params.data_rate = tty_get_baud_rate(info->tty); 1983 1984 if ( info->params.data_rate ) { 1985 info->timeout = (32*HZ*bits_per_char) / 1986 info->params.data_rate; 1987 } 1988 info->timeout += HZ/50; /* Add .02 seconds of slop */ 1989 1990 if (cflag & CRTSCTS) 1991 info->flags |= ASYNC_CTS_FLOW; 1992 else 1993 info->flags &= ~ASYNC_CTS_FLOW; 1994 1995 if (cflag & CLOCAL) 1996 info->flags &= ~ASYNC_CHECK_CD; 1997 else 1998 info->flags |= ASYNC_CHECK_CD; 1999 2000 /* process tty input control flags */ 2001 2002 info->read_status_mask = RXSTATUS_OVERRUN; 2003 if (I_INPCK(info->tty)) 2004 info->read_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR; 2005 if (I_BRKINT(info->tty) || I_PARMRK(info->tty)) 2006 info->read_status_mask |= RXSTATUS_BREAK_RECEIVED; 2007 2008 if (I_IGNPAR(info->tty)) 2009 info->ignore_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR; 2010 if (I_IGNBRK(info->tty)) { 2011 info->ignore_status_mask |= RXSTATUS_BREAK_RECEIVED; 2012 /* If ignoring parity and break indicators, ignore 2013 * overruns too. (For real raw support). 2014 */ 2015 if (I_IGNPAR(info->tty)) 2016 info->ignore_status_mask |= RXSTATUS_OVERRUN; 2017 } 2018 2019 mgsl_program_hw(info); 2020 2021} /* end of mgsl_change_params() */ 2022 2023/* mgsl_put_char() 2024 * 2025 * Add a character to the transmit buffer. 2026 * 2027 * Arguments: tty pointer to tty information structure 2028 * ch character to add to transmit buffer 2029 * 2030 * Return Value: None 2031 */ 2032static void mgsl_put_char(struct tty_struct *tty, unsigned char ch) 2033{ 2034 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; 2035 unsigned long flags; 2036 2037 if ( debug_level >= DEBUG_LEVEL_INFO ) { 2038 printk( "%s(%d):mgsl_put_char(%d) on %s\n", 2039 __FILE__,__LINE__,ch,info->device_name); 2040 } 2041 2042 if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char")) 2043 return; 2044 2045 if (!tty || !info->xmit_buf) 2046 return; 2047 2048 spin_lock_irqsave(&info->irq_spinlock,flags); 2049 2050 if ( (info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active ) { 2051 2052 if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) { 2053 info->xmit_buf[info->xmit_head++] = ch; 2054 info->xmit_head &= SERIAL_XMIT_SIZE-1; 2055 info->xmit_cnt++; 2056 } 2057 } 2058 2059 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2060 2061} /* end of mgsl_put_char() */ 2062 2063/* mgsl_flush_chars() 2064 * 2065 * Enable transmitter so remaining characters in the 2066 * transmit buffer are sent. 2067 * 2068 * Arguments: tty pointer to tty information structure 2069 * Return Value: None 2070 */ 2071static void mgsl_flush_chars(struct tty_struct *tty) 2072{ 2073 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; 2074 unsigned long flags; 2075 2076 if ( debug_level >= DEBUG_LEVEL_INFO ) 2077 printk( "%s(%d):mgsl_flush_chars() entry on %s xmit_cnt=%d\n", 2078 __FILE__,__LINE__,info->device_name,info->xmit_cnt); 2079 2080 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_chars")) 2081 return; 2082 2083 if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped || 2084 !info->xmit_buf) 2085 return; 2086 2087 if ( debug_level >= DEBUG_LEVEL_INFO ) 2088 printk( "%s(%d):mgsl_flush_chars() entry on %s starting transmitter\n", 2089 __FILE__,__LINE__,info->device_name ); 2090 2091 spin_lock_irqsave(&info->irq_spinlock,flags); 2092 2093 if (!info->tx_active) { 2094 if ( (info->params.mode == MGSL_MODE_HDLC || 2095 info->params.mode == MGSL_MODE_RAW) && info->xmit_cnt ) { 2096 /* operating in synchronous (frame oriented) mode */ 2097 /* copy data from circular xmit_buf to */ 2098 /* transmit DMA buffer. */ 2099 mgsl_load_tx_dma_buffer(info, 2100 info->xmit_buf,info->xmit_cnt); 2101 } 2102 usc_start_transmitter(info); 2103 } 2104 2105 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2106 2107} /* end of mgsl_flush_chars() */ 2108 2109/* mgsl_write() 2110 * 2111 * Send a block of data 2112 * 2113 * Arguments: 2114 * 2115 * tty pointer to tty information structure 2116 * buf pointer to buffer containing send data 2117 * count size of send data in bytes 2118 * 2119 * Return Value: number of characters written 2120 */ 2121static int mgsl_write(struct tty_struct * tty, 2122 const unsigned char *buf, int count) 2123{ 2124 int c, ret = 0; 2125 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; 2126 unsigned long flags; 2127 2128 if ( debug_level >= DEBUG_LEVEL_INFO ) 2129 printk( "%s(%d):mgsl_write(%s) count=%d\n", 2130 __FILE__,__LINE__,info->device_name,count); 2131 2132 if (mgsl_paranoia_check(info, tty->name, "mgsl_write")) 2133 goto cleanup; 2134 2135 if (!tty || !info->xmit_buf) 2136 goto cleanup; 2137 2138 if ( info->params.mode == MGSL_MODE_HDLC || 2139 info->params.mode == MGSL_MODE_RAW ) { 2140 /* operating in synchronous (frame oriented) mode */ 2141 /* operating in synchronous (frame oriented) mode */ 2142 if (info->tx_active) { 2143 2144 if ( info->params.mode == MGSL_MODE_HDLC ) { 2145 ret = 0; 2146 goto cleanup; 2147 } 2148 /* transmitter is actively sending data - 2149 * if we have multiple transmit dma and 2150 * holding buffers, attempt to queue this 2151 * frame for transmission at a later time. 2152 */ 2153 if (info->tx_holding_count >= info->num_tx_holding_buffers ) { 2154 /* no tx holding buffers available */ 2155 ret = 0; 2156 goto cleanup; 2157 } 2158 2159 /* queue transmit frame request */ 2160 ret = count; 2161 save_tx_buffer_request(info,buf,count); 2162 2163 /* if we have sufficient tx dma buffers, 2164 * load the next buffered tx request 2165 */ 2166 spin_lock_irqsave(&info->irq_spinlock,flags); 2167 load_next_tx_holding_buffer(info); 2168 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2169 goto cleanup; 2170 } 2171 2172 /* if operating in HDLC LoopMode and the adapter */ 2173 /* has yet to be inserted into the loop, we can't */ 2174 /* transmit */ 2175 2176 if ( (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) && 2177 !usc_loopmode_active(info) ) 2178 { 2179 ret = 0; 2180 goto cleanup; 2181 } 2182 2183 if ( info->xmit_cnt ) { 2184 /* Send accumulated from send_char() calls */ 2185 /* as frame and wait before accepting more data. */ 2186 ret = 0; 2187 2188 /* copy data from circular xmit_buf to */ 2189 /* transmit DMA buffer. */ 2190 mgsl_load_tx_dma_buffer(info, 2191 info->xmit_buf,info->xmit_cnt); 2192 if ( debug_level >= DEBUG_LEVEL_INFO ) 2193 printk( "%s(%d):mgsl_write(%s) sync xmit_cnt flushing\n", 2194 __FILE__,__LINE__,info->device_name); 2195 } else { 2196 if ( debug_level >= DEBUG_LEVEL_INFO ) 2197 printk( "%s(%d):mgsl_write(%s) sync transmit accepted\n", 2198 __FILE__,__LINE__,info->device_name); 2199 ret = count; 2200 info->xmit_cnt = count; 2201 mgsl_load_tx_dma_buffer(info,buf,count); 2202 } 2203 } else { 2204 while (1) { 2205 spin_lock_irqsave(&info->irq_spinlock,flags); 2206 c = min_t(int, count, 2207 min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1, 2208 SERIAL_XMIT_SIZE - info->xmit_head)); 2209 if (c <= 0) { 2210 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2211 break; 2212 } 2213 memcpy(info->xmit_buf + info->xmit_head, buf, c); 2214 info->xmit_head = ((info->xmit_head + c) & 2215 (SERIAL_XMIT_SIZE-1)); 2216 info->xmit_cnt += c; 2217 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2218 buf += c; 2219 count -= c; 2220 ret += c; 2221 } 2222 } 2223 2224 if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) { 2225 spin_lock_irqsave(&info->irq_spinlock,flags); 2226 if (!info->tx_active) 2227 usc_start_transmitter(info); 2228 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2229 } 2230cleanup: 2231 if ( debug_level >= DEBUG_LEVEL_INFO ) 2232 printk( "%s(%d):mgsl_write(%s) returning=%d\n", 2233 __FILE__,__LINE__,info->device_name,ret); 2234 2235 return ret; 2236 2237} /* end of mgsl_write() */ 2238 2239/* mgsl_write_room() 2240 * 2241 * Return the count of free bytes in transmit buffer 2242 * 2243 * Arguments: tty pointer to tty info structure 2244 * Return Value: None 2245 */ 2246static int mgsl_write_room(struct tty_struct *tty) 2247{ 2248 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; 2249 int ret; 2250 2251 if (mgsl_paranoia_check(info, tty->name, "mgsl_write_room")) 2252 return 0; 2253 ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1; 2254 if (ret < 0) 2255 ret = 0; 2256 2257 if (debug_level >= DEBUG_LEVEL_INFO) 2258 printk("%s(%d):mgsl_write_room(%s)=%d\n", 2259 __FILE__,__LINE__, info->device_name,ret ); 2260 2261 if ( info->params.mode == MGSL_MODE_HDLC || 2262 info->params.mode == MGSL_MODE_RAW ) { 2263 /* operating in synchronous (frame oriented) mode */ 2264 if ( info->tx_active ) 2265 return 0; 2266 else 2267 return HDLC_MAX_FRAME_SIZE; 2268 } 2269 2270 return ret; 2271 2272} /* end of mgsl_write_room() */ 2273 2274/* mgsl_chars_in_buffer() 2275 * 2276 * Return the count of bytes in transmit buffer 2277 * 2278 * Arguments: tty pointer to tty info structure 2279 * Return Value: None 2280 */ 2281static int mgsl_chars_in_buffer(struct tty_struct *tty) 2282{ 2283 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; 2284 2285 if (debug_level >= DEBUG_LEVEL_INFO) 2286 printk("%s(%d):mgsl_chars_in_buffer(%s)\n", 2287 __FILE__,__LINE__, info->device_name ); 2288 2289 if (mgsl_paranoia_check(info, tty->name, "mgsl_chars_in_buffer")) 2290 return 0; 2291 2292 if (debug_level >= DEBUG_LEVEL_INFO) 2293 printk("%s(%d):mgsl_chars_in_buffer(%s)=%d\n", 2294 __FILE__,__LINE__, info->device_name,info->xmit_cnt ); 2295 2296 if ( info->params.mode == MGSL_MODE_HDLC || 2297 info->params.mode == MGSL_MODE_RAW ) { 2298 /* operating in synchronous (frame oriented) mode */ 2299 if ( info->tx_active ) 2300 return info->max_frame_size; 2301 else 2302 return 0; 2303 } 2304 2305 return info->xmit_cnt; 2306} /* end of mgsl_chars_in_buffer() */ 2307 2308/* mgsl_flush_buffer() 2309 * 2310 * Discard all data in the send buffer 2311 * 2312 * Arguments: tty pointer to tty info structure 2313 * Return Value: None 2314 */ 2315static void mgsl_flush_buffer(struct tty_struct *tty) 2316{ 2317 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; 2318 unsigned long flags; 2319 2320 if (debug_level >= DEBUG_LEVEL_INFO) 2321 printk("%s(%d):mgsl_flush_buffer(%s) entry\n", 2322 __FILE__,__LINE__, info->device_name ); 2323 2324 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_buffer")) 2325 return; 2326 2327 spin_lock_irqsave(&info->irq_spinlock,flags); 2328 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; 2329 del_timer(&info->tx_timer); 2330 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2331 2332 tty_wakeup(tty); 2333} 2334 2335/* mgsl_send_xchar() 2336 * 2337 * Send a high-priority XON/XOFF character 2338 * 2339 * Arguments: tty pointer to tty info structure 2340 * ch character to send 2341 * Return Value: None 2342 */ 2343static void mgsl_send_xchar(struct tty_struct *tty, char ch) 2344{ 2345 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; 2346 unsigned long flags; 2347 2348 if (debug_level >= DEBUG_LEVEL_INFO) 2349 printk("%s(%d):mgsl_send_xchar(%s,%d)\n", 2350 __FILE__,__LINE__, info->device_name, ch ); 2351 2352 if (mgsl_paranoia_check(info, tty->name, "mgsl_send_xchar")) 2353 return; 2354 2355 info->x_char = ch; 2356 if (ch) { 2357 /* Make sure transmit interrupts are on */ 2358 spin_lock_irqsave(&info->irq_spinlock,flags); 2359 if (!info->tx_enabled) 2360 usc_start_transmitter(info); 2361 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2362 } 2363} /* end of mgsl_send_xchar() */ 2364 2365/* mgsl_throttle() 2366 * 2367 * Signal remote device to throttle send data (our receive data) 2368 * 2369 * Arguments: tty pointer to tty info structure 2370 * Return Value: None 2371 */ 2372static void mgsl_throttle(struct tty_struct * tty) 2373{ 2374 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; 2375 unsigned long flags; 2376 2377 if (debug_level >= DEBUG_LEVEL_INFO) 2378 printk("%s(%d):mgsl_throttle(%s) entry\n", 2379 __FILE__,__LINE__, info->device_name ); 2380 2381 if (mgsl_paranoia_check(info, tty->name, "mgsl_throttle")) 2382 return; 2383 2384 if (I_IXOFF(tty)) 2385 mgsl_send_xchar(tty, STOP_CHAR(tty)); 2386 2387 if (tty->termios->c_cflag & CRTSCTS) { 2388 spin_lock_irqsave(&info->irq_spinlock,flags); 2389 info->serial_signals &= ~SerialSignal_RTS; 2390 usc_set_serial_signals(info); 2391 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2392 } 2393} /* end of mgsl_throttle() */ 2394 2395/* mgsl_unthrottle() 2396 * 2397 * Signal remote device to stop throttling send data (our receive data) 2398 * 2399 * Arguments: tty pointer to tty info structure 2400 * Return Value: None 2401 */ 2402static void mgsl_unthrottle(struct tty_struct * tty) 2403{ 2404 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; 2405 unsigned long flags; 2406 2407 if (debug_level >= DEBUG_LEVEL_INFO) 2408 printk("%s(%d):mgsl_unthrottle(%s) entry\n", 2409 __FILE__,__LINE__, info->device_name ); 2410 2411 if (mgsl_paranoia_check(info, tty->name, "mgsl_unthrottle")) 2412 return; 2413 2414 if (I_IXOFF(tty)) { 2415 if (info->x_char) 2416 info->x_char = 0; 2417 else 2418 mgsl_send_xchar(tty, START_CHAR(tty)); 2419 } 2420 2421 if (tty->termios->c_cflag & CRTSCTS) { 2422 spin_lock_irqsave(&info->irq_spinlock,flags); 2423 info->serial_signals |= SerialSignal_RTS; 2424 usc_set_serial_signals(info); 2425 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2426 } 2427 2428} /* end of mgsl_unthrottle() */ 2429 2430/* mgsl_get_stats() 2431 * 2432 * get the current serial parameters information 2433 * 2434 * Arguments: info pointer to device instance data 2435 * user_icount pointer to buffer to hold returned stats 2436 * 2437 * Return Value: 0 if success, otherwise error code 2438 */ 2439static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount __user *user_icount) 2440{ 2441 int err; 2442 2443 if (debug_level >= DEBUG_LEVEL_INFO) 2444 printk("%s(%d):mgsl_get_params(%s)\n", 2445 __FILE__,__LINE__, info->device_name); 2446 2447 if (!user_icount) { 2448 memset(&info->icount, 0, sizeof(info->icount)); 2449 } else { 2450 COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount)); 2451 if (err) 2452 return -EFAULT; 2453 } 2454 2455 return 0; 2456 2457} /* end of mgsl_get_stats() */ 2458 2459/* mgsl_get_params() 2460 * 2461 * get the current serial parameters information 2462 * 2463 * Arguments: info pointer to device instance data 2464 * user_params pointer to buffer to hold returned params 2465 * 2466 * Return Value: 0 if success, otherwise error code 2467 */ 2468static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params) 2469{ 2470 int err; 2471 if (debug_level >= DEBUG_LEVEL_INFO) 2472 printk("%s(%d):mgsl_get_params(%s)\n", 2473 __FILE__,__LINE__, info->device_name); 2474 2475 COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS)); 2476 if (err) { 2477 if ( debug_level >= DEBUG_LEVEL_INFO ) 2478 printk( "%s(%d):mgsl_get_params(%s) user buffer copy failed\n", 2479 __FILE__,__LINE__,info->device_name); 2480 return -EFAULT; 2481 } 2482 2483 return 0; 2484 2485} /* end of mgsl_get_params() */ 2486 2487/* mgsl_set_params() 2488 * 2489 * set the serial parameters 2490 * 2491 * Arguments: 2492 * 2493 * info pointer to device instance data 2494 * new_params user buffer containing new serial params 2495 * 2496 * Return Value: 0 if success, otherwise error code 2497 */ 2498static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params) 2499{ 2500 unsigned long flags; 2501 MGSL_PARAMS tmp_params; 2502 int err; 2503 2504 if (debug_level >= DEBUG_LEVEL_INFO) 2505 printk("%s(%d):mgsl_set_params %s\n", __FILE__,__LINE__, 2506 info->device_name ); 2507 COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS)); 2508 if (err) { 2509 if ( debug_level >= DEBUG_LEVEL_INFO ) 2510 printk( "%s(%d):mgsl_set_params(%s) user buffer copy failed\n", 2511 __FILE__,__LINE__,info->device_name); 2512 return -EFAULT; 2513 } 2514 2515 spin_lock_irqsave(&info->irq_spinlock,flags); 2516 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS)); 2517 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2518 2519 mgsl_change_params(info); 2520 2521 return 0; 2522 2523} /* end of mgsl_set_params() */ 2524 2525/* mgsl_get_txidle() 2526 * 2527 * get the current transmit idle mode 2528 * 2529 * Arguments: info pointer to device instance data 2530 * idle_mode pointer to buffer to hold returned idle mode 2531 * 2532 * Return Value: 0 if success, otherwise error code 2533 */ 2534static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode) 2535{ 2536 int err; 2537 2538 if (debug_level >= DEBUG_LEVEL_INFO) 2539 printk("%s(%d):mgsl_get_txidle(%s)=%d\n", 2540 __FILE__,__LINE__, info->device_name, info->idle_mode); 2541 2542 COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int)); 2543 if (err) { 2544 if ( debug_level >= DEBUG_LEVEL_INFO ) 2545 printk( "%s(%d):mgsl_get_txidle(%s) user buffer copy failed\n", 2546 __FILE__,__LINE__,info->device_name); 2547 return -EFAULT; 2548 } 2549 2550 return 0; 2551 2552} /* end of mgsl_get_txidle() */ 2553 2554/* mgsl_set_txidle() service ioctl to set transmit idle mode 2555 * 2556 * Arguments: info pointer to device instance data 2557 * idle_mode new idle mode 2558 * 2559 * Return Value: 0 if success, otherwise error code 2560 */ 2561static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode) 2562{ 2563 unsigned long flags; 2564 2565 if (debug_level >= DEBUG_LEVEL_INFO) 2566 printk("%s(%d):mgsl_set_txidle(%s,%d)\n", __FILE__,__LINE__, 2567 info->device_name, idle_mode ); 2568 2569 spin_lock_irqsave(&info->irq_spinlock,flags); 2570 info->idle_mode = idle_mode; 2571 usc_set_txidle( info ); 2572 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2573 return 0; 2574 2575} /* end of mgsl_set_txidle() */ 2576 2577/* mgsl_txenable() 2578 * 2579 * enable or disable the transmitter 2580 * 2581 * Arguments: 2582 * 2583 * info pointer to device instance data 2584 * enable 1 = enable, 0 = disable 2585 * 2586 * Return Value: 0 if success, otherwise error code 2587 */ 2588static int mgsl_txenable(struct mgsl_struct * info, int enable) 2589{ 2590 unsigned long flags; 2591 2592 if (debug_level >= DEBUG_LEVEL_INFO) 2593 printk("%s(%d):mgsl_txenable(%s,%d)\n", __FILE__,__LINE__, 2594 info->device_name, enable); 2595 2596 spin_lock_irqsave(&info->irq_spinlock,flags); 2597 if ( enable ) { 2598 if ( !info->tx_enabled ) { 2599 2600 usc_start_transmitter(info); 2601 /*-------------------------------------------------- 2602 * if HDLC/SDLC Loop mode, attempt to insert the 2603 * station in the 'loop' by setting CMR:13. Upon 2604 * receipt of the next GoAhead (RxAbort) sequence, 2605 * the OnLoop indicator (CCSR:7) should go active 2606 * to indicate that we are on the loop 2607 *--------------------------------------------------*/ 2608 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE ) 2609 usc_loopmode_insert_request( info ); 2610 } 2611 } else { 2612 if ( info->tx_enabled ) 2613 usc_stop_transmitter(info); 2614 } 2615 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2616 return 0; 2617 2618} /* end of mgsl_txenable() */ 2619 2620/* mgsl_txabort() abort send HDLC frame 2621 * 2622 * Arguments: info pointer to device instance data 2623 * Return Value: 0 if success, otherwise error code 2624 */ 2625static int mgsl_txabort(struct mgsl_struct * info) 2626{ 2627 unsigned long flags; 2628 2629 if (debug_level >= DEBUG_LEVEL_INFO) 2630 printk("%s(%d):mgsl_txabort(%s)\n", __FILE__,__LINE__, 2631 info->device_name); 2632 2633 spin_lock_irqsave(&info->irq_spinlock,flags); 2634 if ( info->tx_active && info->params.mode == MGSL_MODE_HDLC ) 2635 { 2636 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE ) 2637 usc_loopmode_cancel_transmit( info ); 2638 else 2639 usc_TCmd(info,TCmd_SendAbort); 2640 } 2641 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2642 return 0; 2643 2644} /* end of mgsl_txabort() */ 2645 2646/* mgsl_rxenable() enable or disable the receiver 2647 * 2648 * Arguments: info pointer to device instance data 2649 * enable 1 = enable, 0 = disable 2650 * Return Value: 0 if success, otherwise error code 2651 */ 2652static int mgsl_rxenable(struct mgsl_struct * info, int enable) 2653{ 2654 unsigned long flags; 2655 2656 if (debug_level >= DEBUG_LEVEL_INFO) 2657 printk("%s(%d):mgsl_rxenable(%s,%d)\n", __FILE__,__LINE__, 2658 info->device_name, enable); 2659 2660 spin_lock_irqsave(&info->irq_spinlock,flags); 2661 if ( enable ) { 2662 if ( !info->rx_enabled ) 2663 usc_start_receiver(info); 2664 } else { 2665 if ( info->rx_enabled ) 2666 usc_stop_receiver(info); 2667 } 2668 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2669 return 0; 2670 2671} /* end of mgsl_rxenable() */ 2672 2673/* mgsl_wait_event() wait for specified event to occur 2674 * 2675 * Arguments: info pointer to device instance data 2676 * mask pointer to bitmask of events to wait for 2677 * Return Value: 0 if successful and bit mask updated with 2678 * of events triggerred, 2679 * otherwise error code 2680 */ 2681static int mgsl_wait_event(struct mgsl_struct * info, int __user * mask_ptr) 2682{ 2683 unsigned long flags; 2684 int s; 2685 int rc=0; 2686 struct mgsl_icount cprev, cnow; 2687 int events; 2688 int mask; 2689 struct _input_signal_events oldsigs, newsigs; 2690 DECLARE_WAITQUEUE(wait, current); 2691 2692 COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int)); 2693 if (rc) { 2694 return -EFAULT; 2695 } 2696 2697 if (debug_level >= DEBUG_LEVEL_INFO) 2698 printk("%s(%d):mgsl_wait_event(%s,%d)\n", __FILE__,__LINE__, 2699 info->device_name, mask); 2700 2701 spin_lock_irqsave(&info->irq_spinlock,flags); 2702 2703 /* return immediately if state matches requested events */ 2704 usc_get_serial_signals(info); 2705 s = info->serial_signals; 2706 events = mask & 2707 ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) + 2708 ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) + 2709 ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) + 2710 ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) ); 2711 if (events) { 2712 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2713 goto exit; 2714 } 2715 2716 /* save current irq counts */ 2717 cprev = info->icount; 2718 oldsigs = info->input_signal_events; 2719 2720 /* enable hunt and idle irqs if needed */ 2721 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) { 2722 u16 oldreg = usc_InReg(info,RICR); 2723 u16 newreg = oldreg + 2724 (mask & MgslEvent_ExitHuntMode ? RXSTATUS_EXITED_HUNT:0) + 2725 (mask & MgslEvent_IdleReceived ? RXSTATUS_IDLE_RECEIVED:0); 2726 if (oldreg != newreg) 2727 usc_OutReg(info, RICR, newreg); 2728 } 2729 2730 set_current_state(TASK_INTERRUPTIBLE); 2731 add_wait_queue(&info->event_wait_q, &wait); 2732 2733 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2734 2735 2736 for(;;) { 2737 schedule(); 2738 if (signal_pending(current)) { 2739 rc = -ERESTARTSYS; 2740 break; 2741 } 2742 2743 /* get current irq counts */ 2744 spin_lock_irqsave(&info->irq_spinlock,flags); 2745 cnow = info->icount; 2746 newsigs = info->input_signal_events; 2747 set_current_state(TASK_INTERRUPTIBLE); 2748 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2749 2750 /* if no change, wait aborted for some reason */ 2751 if (newsigs.dsr_up == oldsigs.dsr_up && 2752 newsigs.dsr_down == oldsigs.dsr_down && 2753 newsigs.dcd_up == oldsigs.dcd_up && 2754 newsigs.dcd_down == oldsigs.dcd_down && 2755 newsigs.cts_up == oldsigs.cts_up && 2756 newsigs.cts_down == oldsigs.cts_down && 2757 newsigs.ri_up == oldsigs.ri_up && 2758 newsigs.ri_down == oldsigs.ri_down && 2759 cnow.exithunt == cprev.exithunt && 2760 cnow.rxidle == cprev.rxidle) { 2761 rc = -EIO; 2762 break; 2763 } 2764 2765 events = mask & 2766 ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) + 2767 (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) + 2768 (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) + 2769 (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) + 2770 (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) + 2771 (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) + 2772 (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) + 2773 (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) + 2774 (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) + 2775 (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) ); 2776 if (events) 2777 break; 2778 2779 cprev = cnow; 2780 oldsigs = newsigs; 2781 } 2782 2783 remove_wait_queue(&info->event_wait_q, &wait); 2784 set_current_state(TASK_RUNNING); 2785 2786 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) { 2787 spin_lock_irqsave(&info->irq_spinlock,flags); 2788 if (!waitqueue_active(&info->event_wait_q)) { 2789 /* disable enable exit hunt mode/idle rcvd IRQs */ 2790 usc_OutReg(info, RICR, usc_InReg(info,RICR) & 2791 ~(RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED)); 2792 } 2793 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2794 } 2795exit: 2796 if ( rc == 0 ) 2797 PUT_USER(rc, events, mask_ptr); 2798 2799 return rc; 2800 2801} /* end of mgsl_wait_event() */ 2802 2803static int modem_input_wait(struct mgsl_struct *info,int arg) 2804{ 2805 unsigned long flags; 2806 int rc; 2807 struct mgsl_icount cprev, cnow; 2808 DECLARE_WAITQUEUE(wait, current); 2809 2810 /* save current irq counts */ 2811 spin_lock_irqsave(&info->irq_spinlock,flags); 2812 cprev = info->icount; 2813 add_wait_queue(&info->status_event_wait_q, &wait); 2814 set_current_state(TASK_INTERRUPTIBLE); 2815 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2816 2817 for(;;) { 2818 schedule(); 2819 if (signal_pending(current)) { 2820 rc = -ERESTARTSYS; 2821 break; 2822 } 2823 2824 /* get new irq counts */ 2825 spin_lock_irqsave(&info->irq_spinlock,flags); 2826 cnow = info->icount; 2827 set_current_state(TASK_INTERRUPTIBLE); 2828 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2829 2830 /* if no change, wait aborted for some reason */ 2831 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && 2832 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) { 2833 rc = -EIO; 2834 break; 2835 } 2836 2837 /* check for change in caller specified modem input */ 2838 if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) || 2839 (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) || 2840 (arg & TIOCM_CD && cnow.dcd != cprev.dcd) || 2841 (arg & TIOCM_CTS && cnow.cts != cprev.cts)) { 2842 rc = 0; 2843 break; 2844 } 2845 2846 cprev = cnow; 2847 } 2848 remove_wait_queue(&info->status_event_wait_q, &wait); 2849 set_current_state(TASK_RUNNING); 2850 return rc; 2851} 2852 2853/* return the state of the serial control and status signals 2854 */ 2855static int tiocmget(struct tty_struct *tty, struct file *file) 2856{ 2857 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; 2858 unsigned int result; 2859 unsigned long flags; 2860 2861 spin_lock_irqsave(&info->irq_spinlock,flags); 2862 usc_get_serial_signals(info); 2863 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2864 2865 result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) + 2866 ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) + 2867 ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) + 2868 ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) + 2869 ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) + 2870 ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0); 2871 2872 if (debug_level >= DEBUG_LEVEL_INFO) 2873 printk("%s(%d):%s tiocmget() value=%08X\n", 2874 __FILE__,__LINE__, info->device_name, result ); 2875 return result; 2876} 2877 2878/* set modem control signals (DTR/RTS) 2879 */ 2880static int tiocmset(struct tty_struct *tty, struct file *file, 2881 unsigned int set, unsigned int clear) 2882{ 2883 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; 2884 unsigned long flags; 2885 2886 if (debug_level >= DEBUG_LEVEL_INFO) 2887 printk("%s(%d):%s tiocmset(%x,%x)\n", 2888 __FILE__,__LINE__,info->device_name, set, clear); 2889 2890 if (set & TIOCM_RTS) 2891 info->serial_signals |= SerialSignal_RTS; 2892 if (set & TIOCM_DTR) 2893 info->serial_signals |= SerialSignal_DTR; 2894 if (clear & TIOCM_RTS) 2895 info->serial_signals &= ~SerialSignal_RTS; 2896 if (clear & TIOCM_DTR) 2897 info->serial_signals &= ~SerialSignal_DTR; 2898 2899 spin_lock_irqsave(&info->irq_spinlock,flags); 2900 usc_set_serial_signals(info); 2901 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2902 2903 return 0; 2904} 2905 2906/* mgsl_break() Set or clear transmit break condition 2907 * 2908 * Arguments: tty pointer to tty instance data 2909 * break_state -1=set break condition, 0=clear 2910 * Return Value: None 2911 */ 2912static void mgsl_break(struct tty_struct *tty, int break_state) 2913{ 2914 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data; 2915 unsigned long flags; 2916 2917 if (debug_level >= DEBUG_LEVEL_INFO) 2918 printk("%s(%d):mgsl_break(%s,%d)\n", 2919 __FILE__,__LINE__, info->device_name, break_state); 2920 2921 if (mgsl_paranoia_check(info, tty->name, "mgsl_break")) 2922 return; 2923 2924 spin_lock_irqsave(&info->irq_spinlock,flags); 2925 if (break_state == -1) 2926 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) | BIT7)); 2927 else 2928 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) & ~BIT7)); 2929 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2930 2931} /* end of mgsl_break() */ 2932 2933/* mgsl_ioctl() Service an IOCTL request 2934 * 2935 * Arguments: 2936 * 2937 * tty pointer to tty instance data 2938 * file pointer to associated file object for device 2939 * cmd IOCTL command code 2940 * arg command argument/context 2941 * 2942 * Return Value: 0 if success, otherwise error code 2943 */ 2944static int mgsl_ioctl(struct tty_struct *tty, struct file * file, 2945 unsigned int cmd, unsigned long arg) 2946{ 2947 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data; 2948 2949 if (debug_level >= DEBUG_LEVEL_INFO) 2950 printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__, 2951 info->device_name, cmd ); 2952 2953 if (mgsl_paranoia_check(info, tty->name, "mgsl_ioctl")) 2954 return -ENODEV; 2955 2956 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && 2957 (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) { 2958 if (tty->flags & (1 << TTY_IO_ERROR)) 2959 return -EIO; 2960 } 2961 2962 return mgsl_ioctl_common(info, cmd, arg); 2963} 2964 2965static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg) 2966{ 2967 int error; 2968 struct mgsl_icount cnow; /* kernel counter temps */ 2969 void __user *argp = (void __user *)arg; 2970 struct serial_icounter_struct __user *p_cuser; /* user space */ 2971 unsigned long flags; 2972 2973 switch (cmd) { 2974 case MGSL_IOCGPARAMS: 2975 return mgsl_get_params(info, argp); 2976 case MGSL_IOCSPARAMS: 2977 return mgsl_set_params(info, argp); 2978 case MGSL_IOCGTXIDLE: 2979 return mgsl_get_txidle(info, argp); 2980 case MGSL_IOCSTXIDLE: 2981 return mgsl_set_txidle(info,(int)arg); 2982 case MGSL_IOCTXENABLE: 2983 return mgsl_txenable(info,(int)arg); 2984 case MGSL_IOCRXENABLE: 2985 return mgsl_rxenable(info,(int)arg); 2986 case MGSL_IOCTXABORT: 2987 return mgsl_txabort(info); 2988 case MGSL_IOCGSTATS: 2989 return mgsl_get_stats(info, argp); 2990 case MGSL_IOCWAITEVENT: 2991 return mgsl_wait_event(info, argp); 2992 case MGSL_IOCLOOPTXDONE: 2993 return mgsl_loopmode_send_done(info); 2994 /* Wait for modem input (DCD,RI,DSR,CTS) change 2995 * as specified by mask in arg (TIOCM_RNG/DSR/CD/CTS) 2996 */ 2997 case TIOCMIWAIT: 2998 return modem_input_wait(info,(int)arg); 2999 3000 /* 3001 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) 3002 * Return: write counters to the user passed counter struct 3003 * NB: both 1->0 and 0->1 transitions are counted except for 3004 * RI where only 0->1 is counted. 3005 */ 3006 case TIOCGICOUNT: 3007 spin_lock_irqsave(&info->irq_spinlock,flags); 3008 cnow = info->icount; 3009 spin_unlock_irqrestore(&info->irq_spinlock,flags); 3010 p_cuser = argp; 3011 PUT_USER(error,cnow.cts, &p_cuser->cts); 3012 if (error) return error; 3013 PUT_USER(error,cnow.dsr, &p_cuser->dsr); 3014 if (error) return error; 3015 PUT_USER(error,cnow.rng, &p_cuser->rng); 3016 if (error) return error; 3017 PUT_USER(error,cnow.dcd, &p_cuser->dcd); 3018 if (error) return error; 3019 PUT_USER(error,cnow.rx, &p_cuser->rx); 3020 if (error) return error; 3021 PUT_USER(error,cnow.tx, &p_cuser->tx); 3022 if (error) return error; 3023 PUT_USER(error,cnow.frame, &p_cuser->frame); 3024 if (error) return error; 3025 PUT_USER(error,cnow.overrun, &p_cuser->overrun); 3026 if (error) return error; 3027 PUT_USER(error,cnow.parity, &p_cuser->parity); 3028 if (error) return error; 3029 PUT_USER(error,cnow.brk, &p_cuser->brk); 3030 if (error) return error; 3031 PUT_USER(error,cnow.buf_overrun, &p_cuser->buf_overrun); 3032 if (error) return error; 3033 return 0; 3034 default: 3035 return -ENOIOCTLCMD; 3036 } 3037 return 0; 3038} 3039 3040/* mgsl_set_termios() 3041 * 3042 * Set new termios settings 3043 * 3044 * Arguments: 3045 * 3046 * tty pointer to tty structure 3047 * termios pointer to buffer to hold returned old termios 3048 * 3049 * Return Value: None 3050 */ 3051static void mgsl_set_termios(struct tty_struct *tty, struct ktermios *old_termios) 3052{ 3053 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; 3054 unsigned long flags; 3055 3056 if (debug_level >= DEBUG_LEVEL_INFO) 3057 printk("%s(%d):mgsl_set_termios %s\n", __FILE__,__LINE__, 3058 tty->driver->name ); 3059 3060 mgsl_change_params(info); 3061 3062 /* Handle transition to B0 status */ 3063 if (old_termios->c_cflag & CBAUD && 3064 !(tty->termios->c_cflag & CBAUD)) { 3065 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR); 3066 spin_lock_irqsave(&info->irq_spinlock,flags); 3067 usc_set_serial_signals(info); 3068 spin_unlock_irqrestore(&info->irq_spinlock,flags); 3069 } 3070 3071 /* Handle transition away from B0 status */ 3072 if (!(old_termios->c_cflag & CBAUD) && 3073 tty->termios->c_cflag & CBAUD) { 3074 info->serial_signals |= SerialSignal_DTR; 3075 if (!(tty->termios->c_cflag & CRTSCTS) || 3076 !test_bit(TTY_THROTTLED, &tty->flags)) { 3077 info->serial_signals |= SerialSignal_RTS; 3078 } 3079 spin_lock_irqsave(&info->irq_spinlock,flags); 3080 usc_set_serial_signals(info); 3081 spin_unlock_irqrestore(&info->irq_spinlock,flags); 3082 } 3083 3084 /* Handle turning off CRTSCTS */ 3085 if (old_termios->c_cflag & CRTSCTS && 3086 !(tty->termios->c_cflag & CRTSCTS)) { 3087 tty->hw_stopped = 0; 3088 mgsl_start(tty); 3089 } 3090 3091} /* end of mgsl_set_termios() */ 3092 3093/* mgsl_close() 3094 * 3095 * Called when port is closed. Wait for remaining data to be 3096 * sent. Disable port and free resources. 3097 * 3098 * Arguments: 3099 * 3100 * tty pointer to open tty structure 3101 * filp pointer to open file object 3102 * 3103 * Return Value: None 3104 */ 3105static void mgsl_close(struct tty_struct *tty, struct file * filp) 3106{ 3107 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data; 3108 3109 if (mgsl_paranoia_check(info, tty->name, "mgsl_close")) 3110 return; 3111 3112 if (debug_level >= DEBUG_LEVEL_INFO) 3113 printk("%s(%d):mgsl_close(%s) entry, count=%d\n", 3114 __FILE__,__LINE__, info->device_name, info->count); 3115 3116 if (!info->count) 3117 return; 3118 3119 if (tty_hung_up_p(filp)) 3120 goto cleanup; 3121 3122 if ((tty->count == 1) && (info->count != 1)) { 3123 /* 3124 * tty->count is 1 and the tty structure will be freed. 3125 * info->count should be one in this case. 3126 * if it's not, correct it so that the port is shutdown. 3127 */ 3128 printk("mgsl_close: bad refcount; tty->count is 1, " 3129 "info->count is %d\n", info->count); 3130 info->count = 1; 3131 } 3132 3133 info->count--; 3134 3135 /* if at least one open remaining, leave hardware active */ 3136 if (info->count) 3137 goto cleanup; 3138 3139 info->flags |= ASYNC_CLOSING; 3140 3141 /* set tty->closing to notify line discipline to 3142 * only process XON/XOFF characters. Only the N_TTY 3143 * discipline appears to use this (ppp does not). 3144 */ 3145 tty->closing = 1; 3146 3147 /* wait for transmit data to clear all layers */ 3148 3149 if (info->closing_wait != ASYNC_CLOSING_WAIT_NONE) { 3150 if (debug_level >= DEBUG_LEVEL_INFO) 3151 printk("%s(%d):mgsl_close(%s) calling tty_wait_until_sent\n", 3152 __FILE__,__LINE__, info->device_name ); 3153 tty_wait_until_sent(tty, info->closing_wait); 3154 } 3155 3156 if (info->flags & ASYNC_INITIALIZED) 3157 mgsl_wait_until_sent(tty, info->timeout); 3158 3159 if (tty->driver->flush_buffer) 3160 tty->driver->flush_buffer(tty); 3161 3162 tty_ldisc_flush(tty); 3163 3164 shutdown(info); 3165 3166 tty->closing = 0; 3167 info->tty = NULL; 3168 3169 if (info->blocked_open) { 3170 if (info->close_delay) { 3171 msleep_interruptible(jiffies_to_msecs(info->close_delay)); 3172 } 3173 wake_up_interruptible(&info->open_wait); 3174 } 3175 3176 info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING); 3177 3178 wake_up_interruptible(&info->close_wait); 3179 3180cleanup: 3181 if (debug_level >= DEBUG_LEVEL_INFO) 3182 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__, 3183 tty->driver->name, info->count); 3184 3185} /* end of mgsl_close() */ 3186 3187/* mgsl_wait_until_sent() 3188 * 3189 * Wait until the transmitter is empty. 3190 * 3191 * Arguments: 3192 * 3193 * tty pointer to tty info structure 3194 * timeout time to wait for send completion 3195 * 3196 * Return Value: None 3197 */ 3198static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout) 3199{ 3200 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data; 3201 unsigned long orig_jiffies, char_time; 3202 3203 if (!info ) 3204 return; 3205 3206 if (debug_level >= DEBUG_LEVEL_INFO) 3207 printk("%s(%d):mgsl_wait_until_sent(%s) entry\n", 3208 __FILE__,__LINE__, info->device_name ); 3209 3210 if (mgsl_paranoia_check(info, tty->name, "mgsl_wait_until_sent")) 3211 return; 3212 3213 if (!(info->flags & ASYNC_INITIALIZED)) 3214 goto exit; 3215 3216 orig_jiffies = jiffies; 3217 3218 /* Set check interval to 1/5 of estimated time to 3219 * send a character, and make it at least 1. The check 3220 * interval should also be less than the timeout. 3221 * Note: use tight timings here to satisfy the NIST-PCTS. 3222 */ 3223 3224 if ( info->params.data_rate ) { 3225 char_time = info->timeout/(32 * 5); 3226 if (!char_time) 3227 char_time++; 3228 } else 3229 char_time = 1; 3230 3231 if (timeout) 3232 char_time = min_t(unsigned long, char_time, timeout); 3233 3234 if ( info->params.mode == MGSL_MODE_HDLC || 3235 info->params.mode == MGSL_MODE_RAW ) { 3236 while (info->tx_active) { 3237 msleep_interruptible(jiffies_to_msecs(char_time)); 3238 if (signal_pending(current)) 3239 break; 3240 if (timeout && time_after(jiffies, orig_jiffies + timeout)) 3241 break; 3242 } 3243 } else { 3244 while (!(usc_InReg(info,TCSR) & TXSTATUS_ALL_SENT) && 3245 info->tx_enabled) { 3246 msleep_interruptible(jiffies_to_msecs(char_time)); 3247 if (signal_pending(current)) 3248 break; 3249 if (timeout && time_after(jiffies, orig_jiffies + timeout)) 3250 break; 3251 } 3252 } 3253 3254exit: 3255 if (debug_level >= DEBUG_LEVEL_INFO) 3256 printk("%s(%d):mgsl_wait_until_sent(%s) exit\n", 3257 __FILE__,__LINE__, info->device_name ); 3258 3259} /* end of mgsl_wait_until_sent() */ 3260 3261/* mgsl_hangup() 3262 * 3263 * Called by tty_hangup() when a hangup is signaled. 3264 * This is the same as to closing all open files for the port. 3265 * 3266 * Arguments: tty pointer to associated tty object 3267 * Return Value: None 3268 */ 3269static void mgsl_hangup(struct tty_struct *tty) 3270{ 3271 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data; 3272 3273 if (debug_level >= DEBUG_LEVEL_INFO) 3274 printk("%s(%d):mgsl_hangup(%s)\n", 3275 __FILE__,__LINE__, info->device_name ); 3276 3277 if (mgsl_paranoia_check(info, tty->name, "mgsl_hangup")) 3278 return; 3279 3280 mgsl_flush_buffer(tty); 3281 shutdown(info); 3282 3283 info->count = 0; 3284 info->flags &= ~ASYNC_NORMAL_ACTIVE; 3285 info->tty = NULL; 3286 3287 wake_up_interruptible(&info->open_wait); 3288 3289} /* end of mgsl_hangup() */ 3290 3291/* block_til_ready() 3292 * 3293 * Block the current process until the specified port 3294 * is ready to be opened. 3295 * 3296 * Arguments: 3297 * 3298 * tty pointer to tty info structure 3299 * filp pointer to open file object 3300 * info pointer to device instance data 3301 * 3302 * Return Value: 0 if success, otherwise error code 3303 */ 3304static int block_til_ready(struct tty_struct *tty, struct file * filp, 3305 struct mgsl_struct *info) 3306{ 3307 DECLARE_WAITQUEUE(wait, current); 3308 int retval; 3309 int do_clocal = 0, extra_count = 0; 3310 unsigned long flags; 3311 3312 if (debug_level >= DEBUG_LEVEL_INFO) 3313 printk("%s(%d):block_til_ready on %s\n", 3314 __FILE__,__LINE__, tty->driver->name ); 3315 3316 if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){ 3317 /* nonblock mode is set or port is not enabled */ 3318 info->flags |= ASYNC_NORMAL_ACTIVE; 3319 return 0; 3320 } 3321 3322 if (tty->termios->c_cflag & CLOCAL) 3323 do_clocal = 1; 3324 3325 /* Wait for carrier detect and the line to become 3326 * free (i.e., not in use by the callout). While we are in 3327 * this loop, info->count is dropped by one, so that 3328 * mgsl_close() knows when to free things. We restore it upon 3329 * exit, either normal or abnormal. 3330 */ 3331 3332 retval = 0; 3333 add_wait_queue(&info->open_wait, &wait); 3334 3335 if (debug_level >= DEBUG_LEVEL_INFO) 3336 printk("%s(%d):block_til_ready before block on %s count=%d\n", 3337 __FILE__,__LINE__, tty->driver->name, info->count ); 3338 3339 spin_lock_irqsave(&info->irq_spinlock, flags); 3340 if (!tty_hung_up_p(filp)) { 3341 extra_count = 1; 3342 info->count--; 3343 } 3344 spin_unlock_irqrestore(&info->irq_spinlock, flags); 3345 info->blocked_open++; 3346 3347 while (1) { 3348 if (tty->termios->c_cflag & CBAUD) { 3349 spin_lock_irqsave(&info->irq_spinlock,flags); 3350 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; 3351 usc_set_serial_signals(info); 3352 spin_unlock_irqrestore(&info->irq_spinlock,flags); 3353 } 3354 3355 set_current_state(TASK_INTERRUPTIBLE); 3356 3357 if (tty_hung_up_p(filp) || !(info->flags & ASYNC_INITIALIZED)){ 3358 retval = (info->flags & ASYNC_HUP_NOTIFY) ? 3359 -EAGAIN : -ERESTARTSYS; 3360 break; 3361 } 3362 3363 spin_lock_irqsave(&info->irq_spinlock,flags); 3364 usc_get_serial_signals(info); 3365 spin_unlock_irqrestore(&info->irq_spinlock,flags); 3366 3367 if (!(info->flags & ASYNC_CLOSING) && 3368 (do_clocal || (info->serial_signals & SerialSignal_DCD)) ) { 3369 break; 3370 } 3371 3372 if (signal_pending(current)) { 3373 retval = -ERESTARTSYS; 3374 break; 3375 } 3376 3377 if (debug_level >= DEBUG_LEVEL_INFO) 3378 printk("%s(%d):block_til_ready blocking on %s count=%d\n", 3379 __FILE__,__LINE__, tty->driver->name, info->count ); 3380 3381 schedule(); 3382 } 3383 3384 set_current_state(TASK_RUNNING); 3385 remove_wait_queue(&info->open_wait, &wait); 3386 3387 if (extra_count) 3388 info->count++; 3389 info->blocked_open--; 3390 3391 if (debug_level >= DEBUG_LEVEL_INFO) 3392 printk("%s(%d):block_til_ready after blocking on %s count=%d\n", 3393 __FILE__,__LINE__, tty->driver->name, info->count ); 3394 3395 if (!retval) 3396 info->flags |= ASYNC_NORMAL_ACTIVE; 3397 3398 return retval; 3399 3400} /* end of block_til_ready() */ 3401 3402/* mgsl_open() 3403 * 3404 * Called when a port is opened. Init and enable port. 3405 * Perform serial-specific initialization for the tty structure. 3406 * 3407 * Arguments: tty pointer to tty info structure 3408 * filp associated file pointer 3409 * 3410 * Return Value: 0 if success, otherwise error code 3411 */ 3412static int mgsl_open(struct tty_struct *tty, struct file * filp) 3413{ 3414 struct mgsl_struct *info; 3415 int retval, line; 3416 unsigned long flags; 3417 3418 /* verify range of specified line number */ 3419 line = tty->index; 3420 if ((line < 0) || (line >= mgsl_device_count)) { 3421 printk("%s(%d):mgsl_open with invalid line #%d.\n", 3422 __FILE__,__LINE__,line); 3423 return -ENODEV; 3424 } 3425 3426 /* find the info structure for the specified line */ 3427 info = mgsl_device_list; 3428 while(info && info->line != line) 3429 info = info->next_device; 3430 if (mgsl_paranoia_check(info, tty->name, "mgsl_open")) 3431 return -ENODEV; 3432 3433 tty->driver_data = info; 3434 info->tty = tty; 3435 3436 if (debug_level >= DEBUG_LEVEL_INFO) 3437 printk("%s(%d):mgsl_open(%s), old ref count = %d\n", 3438 __FILE__,__LINE__,tty->driver->name, info->count); 3439 3440 /* If port is closing, signal caller to try again */ 3441 if (tty_hung_up_p(filp) || info->flags & ASYNC_CLOSING){ 3442 if (info->flags & ASYNC_CLOSING) 3443 interruptible_sleep_on(&info->close_wait); 3444 retval = ((info->flags & ASYNC_HUP_NOTIFY) ? 3445 -EAGAIN : -ERESTARTSYS); 3446 goto cleanup; 3447 } 3448 3449 info->tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0; 3450 3451 spin_lock_irqsave(&info->netlock, flags); 3452 if (info->netcount) { 3453 retval = -EBUSY; 3454 spin_unlock_irqrestore(&info->netlock, flags); 3455 goto cleanup; 3456 } 3457 info->count++; 3458 spin_unlock_irqrestore(&info->netlock, flags); 3459 3460 if (info->count == 1) { 3461 /* 1st open on this device, init hardware */ 3462 retval = startup(info); 3463 if (retval < 0) 3464 goto cleanup; 3465 } 3466 3467 retval = block_til_ready(tty, filp, info); 3468 if (retval) { 3469 if (debug_level >= DEBUG_LEVEL_INFO) 3470 printk("%s(%d):block_til_ready(%s) returned %d\n", 3471 __FILE__,__LINE__, info->device_name, retval); 3472 goto cleanup; 3473 } 3474 3475 if (debug_level >= DEBUG_LEVEL_INFO) 3476 printk("%s(%d):mgsl_open(%s) success\n", 3477 __FILE__,__LINE__, info->device_name); 3478 retval = 0; 3479 3480cleanup: 3481 if (retval) { 3482 if (tty->count == 1) 3483 info->tty = NULL; /* tty layer will release tty struct */ 3484 if(info->count) 3485 info->count--; 3486 } 3487 3488 return retval; 3489 3490} /* end of mgsl_open() */ 3491 3492/* 3493 * /proc fs routines.... 3494 */ 3495 3496static inline int line_info(char *buf, struct mgsl_struct *info) 3497{ 3498 char stat_buf[30]; 3499 int ret; 3500 unsigned long flags; 3501 3502 if (info->bus_type == MGSL_BUS_TYPE_PCI) { 3503 ret = sprintf(buf, "%s:PCI io:%04X irq:%d mem:%08X lcr:%08X", 3504 info->device_name, info->io_base, info->irq_level, 3505 info->phys_memory_base, info->phys_lcr_base); 3506 } else { 3507 ret = sprintf(buf, "%s:(E)ISA io:%04X irq:%d dma:%d", 3508 info->device_name, info->io_base, 3509 info->irq_level, info->dma_level); 3510 } 3511 3512 /* output current serial signal states */ 3513 spin_lock_irqsave(&info->irq_spinlock,flags); 3514 usc_get_serial_signals(info); 3515 spin_unlock_irqrestore(&info->irq_spinlock,flags); 3516 3517 stat_buf[0] = 0; 3518 stat_buf[1] = 0; 3519 if (info->serial_signals & SerialSignal_RTS) 3520 strcat(stat_buf, "|RTS"); 3521 if (info->serial_signals & SerialSignal_CTS) 3522 strcat(stat_buf, "|CTS"); 3523 if (info->serial_signals & SerialSignal_DTR) 3524 strcat(stat_buf, "|DTR"); 3525 if (info->serial_signals & SerialSignal_DSR) 3526 strcat(stat_buf, "|DSR"); 3527 if (info->serial_signals & SerialSignal_DCD) 3528 strcat(stat_buf, "|CD"); 3529 if (info->serial_signals & SerialSignal_RI) 3530 strcat(stat_buf, "|RI"); 3531 3532 if (info->params.mode == MGSL_MODE_HDLC || 3533 info->params.mode == MGSL_MODE_RAW ) { 3534 ret += sprintf(buf+ret, " HDLC txok:%d rxok:%d", 3535 info->icount.txok, info->icount.rxok); 3536 if (info->icount.txunder) 3537 ret += sprintf(buf+ret, " txunder:%d", info->icount.txunder); 3538 if (info->icount.txabort) 3539 ret += sprintf(buf+ret, " txabort:%d", info->icount.txabort); 3540 if (info->icount.rxshort) 3541 ret += sprintf(buf+ret, " rxshort:%d", info->icount.rxshort); 3542 if (info->icount.rxlong) 3543 ret += sprintf(buf+ret, " rxlong:%d", info->icount.rxlong); 3544 if (info->icount.rxover) 3545 ret += sprintf(buf+ret, " rxover:%d", info->icount.rxover); 3546 if (info->icount.rxcrc) 3547 ret += sprintf(buf+ret, " rxcrc:%d", info->icount.rxcrc); 3548 } else { 3549 ret += sprintf(buf+ret, " ASYNC tx:%d rx:%d", 3550 info->icount.tx, info->icount.rx); 3551 if (info->icount.frame) 3552 ret += sprintf(buf+ret, " fe:%d", info->icount.frame); 3553 if (info->icount.parity) 3554 ret += sprintf(buf+ret, " pe:%d", info->icount.parity); 3555 if (info->icount.brk) 3556 ret += sprintf(buf+ret, " brk:%d", info->icount.brk); 3557 if (info->icount.overrun) 3558 ret += sprintf(buf+ret, " oe:%d", info->icount.overrun); 3559 } 3560 3561 /* Append serial signal status to end */ 3562 ret += sprintf(buf+ret, " %s\n", stat_buf+1); 3563 3564 ret += sprintf(buf+ret, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n", 3565 info->tx_active,info->bh_requested,info->bh_running, 3566 info->pending_bh); 3567 3568 spin_lock_irqsave(&info->irq_spinlock,flags); 3569 { 3570 u16 Tcsr = usc_InReg( info, TCSR ); 3571 u16 Tdmr = usc_InDmaReg( info, TDMR ); 3572 u16 Ticr = usc_InReg( info, TICR ); 3573 u16 Rscr = usc_InReg( info, RCSR ); 3574 u16 Rdmr = usc_InDmaReg( info, RDMR ); 3575 u16 Ricr = usc_InReg( info, RICR ); 3576 u16 Icr = usc_InReg( info, ICR ); 3577 u16 Dccr = usc_InReg( info, DCCR ); 3578 u16 Tmr = usc_InReg( info, TMR ); 3579 u16 Tccr = usc_InReg( info, TCCR ); 3580 u16 Ccar = inw( info->io_base + CCAR ); 3581 ret += sprintf(buf+ret, "tcsr=%04X tdmr=%04X ticr=%04X rcsr=%04X rdmr=%04X\n" 3582 "ricr=%04X icr =%04X dccr=%04X tmr=%04X tccr=%04X ccar=%04X\n", 3583 Tcsr,Tdmr,Ticr,Rscr,Rdmr,Ricr,Icr,Dccr,Tmr,Tccr,Ccar ); 3584 } 3585 spin_unlock_irqrestore(&info->irq_spinlock,flags); 3586 3587 return ret; 3588 3589} /* end of line_info() */ 3590 3591/* mgsl_read_proc() 3592 * 3593 * Called to print information about devices 3594 * 3595 * Arguments: 3596 * page page of memory to hold returned info 3597 * start 3598 * off 3599 * count 3600 * eof 3601 * data 3602 * 3603 * Return Value: 3604 */ 3605static int mgsl_read_proc(char *page, char **start, off_t off, int count, 3606 int *eof, void *data) 3607{ 3608 int len = 0, l; 3609 off_t begin = 0; 3610 struct mgsl_struct *info; 3611 3612 len += sprintf(page, "synclink driver:%s\n", driver_version); 3613 3614 info = mgsl_device_list; 3615 while( info ) { 3616 l = line_info(page + len, info); 3617 len += l; 3618 if (len+begin > off+count) 3619 goto done; 3620 if (len+begin < off) { 3621 begin += len; 3622 len = 0; 3623 } 3624 info = info->next_device; 3625 } 3626 3627 *eof = 1; 3628done: 3629 if (off >= len+begin) 3630 return 0; 3631 *start = page + (off-begin); 3632 return ((count < begin+len-off) ? count : begin+len-off); 3633 3634} /* end of mgsl_read_proc() */ 3635 3636/* mgsl_allocate_dma_buffers() 3637 * 3638 * Allocate and format DMA buffers (ISA adapter) 3639 * or format shared memory buffers (PCI adapter). 3640 * 3641 * Arguments: info pointer to device instance data 3642 * Return Value: 0 if success, otherwise error 3643 */ 3644static int mgsl_allocate_dma_buffers(struct mgsl_struct *info) 3645{ 3646 unsigned short BuffersPerFrame; 3647 3648 info->last_mem_alloc = 0; 3649 3650 /* Calculate the number of DMA buffers necessary to hold the */ 3651 /* largest allowable frame size. Note: If the max frame size is */ 3652 /* not an even multiple of the DMA buffer size then we need to */ 3653 /* round the buffer count per frame up one. */ 3654 3655 BuffersPerFrame = (unsigned short)(info->max_frame_size/DMABUFFERSIZE); 3656 if ( info->max_frame_size % DMABUFFERSIZE ) 3657 BuffersPerFrame++; 3658 3659 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 3660 /* 3661 * The PCI adapter has 256KBytes of shared memory to use. 3662 * This is 64 PAGE_SIZE buffers. 3663 * 3664 * The first page is used for padding at this time so the 3665 * buffer list does not begin at offset 0 of the PCI 3666 * adapter's shared memory. 3667 * 3668 * The 2nd page is used for the buffer list. A 4K buffer 3669 * list can hold 128 DMA_BUFFER structures at 32 bytes 3670 * each. 3671 * 3672 * This leaves 62 4K pages. 3673 * 3674 * The next N pages are used for transmit frame(s). We 3675 * reserve enough 4K page blocks to hold the required 3676 * number of transmit dma buffers (num_tx_dma_buffers), 3677 * each of MaxFrameSize size. 3678 * 3679 * Of the remaining pages (62-N), determine how many can 3680 * be used to receive full MaxFrameSize inbound frames 3681 */ 3682 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame; 3683 info->rx_buffer_count = 62 - info->tx_buffer_count; 3684 } else { 3685 /* Calculate the number of PAGE_SIZE buffers needed for */ 3686 /* receive and transmit DMA buffers. */ 3687 3688 3689 /* Calculate the number of DMA buffers necessary to */ 3690 /* hold 7 max size receive frames and one max size transmit frame. */ 3691 /* The receive buffer count is bumped by one so we avoid an */ 3692 /* End of List condition if all receive buffers are used when */ 3693 /* using linked list DMA buffers. */ 3694 3695 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame; 3696 info->rx_buffer_count = (BuffersPerFrame * MAXRXFRAMES) + 6; 3697 3698 /* 3699 * limit total TxBuffers & RxBuffers to 62 4K total 3700 * (ala PCI Allocation) 3701 */ 3702 3703 if ( (info->tx_buffer_count + info->rx_buffer_count) > 62 ) 3704 info->rx_buffer_count = 62 - info->tx_buffer_count; 3705 3706 } 3707 3708 if ( debug_level >= DEBUG_LEVEL_INFO ) 3709 printk("%s(%d):Allocating %d TX and %d RX DMA buffers.\n", 3710 __FILE__,__LINE__, info->tx_buffer_count,info->rx_buffer_count); 3711 3712 if ( mgsl_alloc_buffer_list_memory( info ) < 0 || 3713 mgsl_alloc_frame_memory(info, info->rx_buffer_list, info->rx_buffer_count) < 0 || 3714 mgsl_alloc_frame_memory(info, info->tx_buffer_list, info->tx_buffer_count) < 0 || 3715 mgsl_alloc_intermediate_rxbuffer_memory(info) < 0 || 3716 mgsl_alloc_intermediate_txbuffer_memory(info) < 0 ) { 3717 printk("%s(%d):Can't allocate DMA buffer memory\n",__FILE__,__LINE__); 3718 return -ENOMEM; 3719 } 3720 3721 mgsl_reset_rx_dma_buffers( info ); 3722 mgsl_reset_tx_dma_buffers( info ); 3723 3724 return 0; 3725 3726} /* end of mgsl_allocate_dma_buffers() */ 3727 3728/* 3729 * mgsl_alloc_buffer_list_memory() 3730 * 3731 * Allocate a common DMA buffer for use as the 3732 * receive and transmit buffer lists. 3733 * 3734 * A buffer list is a set of buffer entries where each entry contains 3735 * a pointer to an actual buffer and a pointer to the next buffer entry 3736 * (plus some other info about the buffer). 3737 * 3738 * The buffer entries for a list are built to form a circular list so 3739 * that when the entire list has been traversed you start back at the 3740 * beginning. 3741 * 3742 * This function allocates memory for just the buffer entries. 3743 * The links (pointer to next entry) are filled in with the physical 3744 * address of the next entry so the adapter can navigate the list 3745 * using bus master DMA. The pointers to the actual buffers are filled 3746 * out later when the actual buffers are allocated. 3747 * 3748 * Arguments: info pointer to device instance data 3749 * Return Value: 0 if success, otherwise error 3750 */ 3751static int mgsl_alloc_buffer_list_memory( struct mgsl_struct *info ) 3752{ 3753 unsigned int i; 3754 3755 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 3756 /* PCI adapter uses shared memory. */ 3757 info->buffer_list = info->memory_base + info->last_mem_alloc; 3758 info->buffer_list_phys = info->last_mem_alloc; 3759 info->last_mem_alloc += BUFFERLISTSIZE; 3760 } else { 3761 /* ISA adapter uses system memory. */ 3762 /* The buffer lists are allocated as a common buffer that both */ 3763 /* the processor and adapter can access. This allows the driver to */ 3764 /* inspect portions of the buffer while other portions are being */ 3765 /* updated by the adapter using Bus Master DMA. */ 3766 3767 info->buffer_list = dma_alloc_coherent(NULL, BUFFERLISTSIZE, &info->buffer_list_dma_addr, GFP_KERNEL); 3768 if (info->buffer_list == NULL) 3769 return -ENOMEM; 3770 info->buffer_list_phys = (u32)(info->buffer_list_dma_addr); 3771 } 3772 3773 /* We got the memory for the buffer entry lists. */ 3774 /* Initialize the memory block to all zeros. */ 3775 memset( info->buffer_list, 0, BUFFERLISTSIZE ); 3776 3777 /* Save virtual address pointers to the receive and */ 3778 /* transmit buffer lists. (Receive 1st). These pointers will */ 3779 /* be used by the processor to access the lists. */ 3780 info->rx_buffer_list = (DMABUFFERENTRY *)info->buffer_list; 3781 info->tx_buffer_list = (DMABUFFERENTRY *)info->buffer_list; 3782 info->tx_buffer_list += info->rx_buffer_count; 3783 3784 /* 3785 * Build the links for the buffer entry lists such that 3786 * two circular lists are built. (Transmit and Receive). 3787 * 3788 * Note: the links are physical addresses 3789 * which are read by the adapter to determine the next 3790 * buffer entry to use. 3791 */ 3792 3793 for ( i = 0; i < info->rx_buffer_count; i++ ) { 3794 /* calculate and store physical address of this buffer entry */ 3795 info->rx_buffer_list[i].phys_entry = 3796 info->buffer_list_phys + (i * sizeof(DMABUFFERENTRY)); 3797 3798 /* calculate and store physical address of */ 3799 /* next entry in cirular list of entries */ 3800 3801 info->rx_buffer_list[i].link = info->buffer_list_phys; 3802 3803 if ( i < info->rx_buffer_count - 1 ) 3804 info->rx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY); 3805 } 3806 3807 for ( i = 0; i < info->tx_buffer_count; i++ ) { 3808 /* calculate and store physical address of this buffer entry */ 3809 info->tx_buffer_list[i].phys_entry = info->buffer_list_phys + 3810 ((info->rx_buffer_count + i) * sizeof(DMABUFFERENTRY)); 3811 3812 /* calculate and store physical address of */ 3813 /* next entry in cirular list of entries */ 3814 3815 info->tx_buffer_list[i].link = info->buffer_list_phys + 3816 info->rx_buffer_count * sizeof(DMABUFFERENTRY); 3817 3818 if ( i < info->tx_buffer_count - 1 ) 3819 info->tx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY); 3820 } 3821 3822 return 0; 3823 3824} /* end of mgsl_alloc_buffer_list_memory() */ 3825 3826/* Free DMA buffers allocated for use as the 3827 * receive and transmit buffer lists. 3828 * Warning: 3829 * 3830 * The data transfer buffers associated with the buffer list 3831 * MUST be freed before freeing the buffer list itself because 3832 * the buffer list contains the information necessary to free 3833 * the individual buffers! 3834 */ 3835static void mgsl_free_buffer_list_memory( struct mgsl_struct *info ) 3836{ 3837 if (info->buffer_list && info->bus_type != MGSL_BUS_TYPE_PCI) 3838 dma_free_coherent(NULL, BUFFERLISTSIZE, info->buffer_list, info->buffer_list_dma_addr); 3839 3840 info->buffer_list = NULL; 3841 info->rx_buffer_list = NULL; 3842 info->tx_buffer_list = NULL; 3843 3844} /* end of mgsl_free_buffer_list_memory() */ 3845 3846/* 3847 * mgsl_alloc_frame_memory() 3848 * 3849 * Allocate the frame DMA buffers used by the specified buffer list. 3850 * Each DMA buffer will be one memory page in size. This is necessary 3851 * because memory can fragment enough that it may be impossible 3852 * contiguous pages. 3853 * 3854 * Arguments: 3855 * 3856 * info pointer to device instance data 3857 * BufferList pointer to list of buffer entries 3858 * Buffercount count of buffer entries in buffer list 3859 * 3860 * Return Value: 0 if success, otherwise -ENOMEM 3861 */ 3862static int mgsl_alloc_frame_memory(struct mgsl_struct *info,DMABUFFERENTRY *BufferList,int Buffercount) 3863{ 3864 int i; 3865 u32 phys_addr; 3866 3867 /* Allocate page sized buffers for the receive buffer list */ 3868 3869 for ( i = 0; i < Buffercount; i++ ) { 3870 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 3871 /* PCI adapter uses shared memory buffers. */ 3872 BufferList[i].virt_addr = info->memory_base + info->last_mem_alloc; 3873 phys_addr = info->last_mem_alloc; 3874 info->last_mem_alloc += DMABUFFERSIZE; 3875 } else { 3876 /* ISA adapter uses system memory. */ 3877 BufferList[i].virt_addr = dma_alloc_coherent(NULL, DMABUFFERSIZE, &BufferList[i].dma_addr, GFP_KERNEL); 3878 if (BufferList[i].virt_addr == NULL) 3879 return -ENOMEM; 3880 phys_addr = (u32)(BufferList[i].dma_addr); 3881 } 3882 BufferList[i].phys_addr = phys_addr; 3883 } 3884 3885 return 0; 3886 3887} /* end of mgsl_alloc_frame_memory() */ 3888 3889/* 3890 * mgsl_free_frame_memory() 3891 * 3892 * Free the buffers associated with 3893 * each buffer entry of a buffer list. 3894 * 3895 * Arguments: 3896 * 3897 * info pointer to device instance data 3898 * BufferList pointer to list of buffer entries 3899 * Buffercount count of buffer entries in buffer list 3900 * 3901 * Return Value: None 3902 */ 3903static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList, int Buffercount) 3904{ 3905 int i; 3906 3907 if ( BufferList ) { 3908 for ( i = 0 ; i < Buffercount ; i++ ) { 3909 if ( BufferList[i].virt_addr ) { 3910 if ( info->bus_type != MGSL_BUS_TYPE_PCI ) 3911 dma_free_coherent(NULL, DMABUFFERSIZE, BufferList[i].virt_addr, BufferList[i].dma_addr); 3912 BufferList[i].virt_addr = NULL; 3913 } 3914 } 3915 } 3916 3917} /* end of mgsl_free_frame_memory() */ 3918 3919/* mgsl_free_dma_buffers() 3920 * 3921 * Free DMA buffers 3922 * 3923 * Arguments: info pointer to device instance data 3924 * Return Value: None 3925 */ 3926static void mgsl_free_dma_buffers( struct mgsl_struct *info ) 3927{ 3928 mgsl_free_frame_memory( info, info->rx_buffer_list, info->rx_buffer_count ); 3929 mgsl_free_frame_memory( info, info->tx_buffer_list, info->tx_buffer_count ); 3930 mgsl_free_buffer_list_memory( info ); 3931 3932} /* end of mgsl_free_dma_buffers() */ 3933 3934 3935/* 3936 * mgsl_alloc_intermediate_rxbuffer_memory() 3937 * 3938 * Allocate a buffer large enough to hold max_frame_size. This buffer 3939 * is used to pass an assembled frame to the line discipline. 3940 * 3941 * Arguments: 3942 * 3943 * info pointer to device instance data 3944 * 3945 * Return Value: 0 if success, otherwise -ENOMEM 3946 */ 3947static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info) 3948{ 3949 info->intermediate_rxbuffer = kmalloc(info->max_frame_size, GFP_KERNEL | GFP_DMA); 3950 if ( info->intermediate_rxbuffer == NULL ) 3951 return -ENOMEM; 3952 3953 return 0; 3954 3955} /* end of mgsl_alloc_intermediate_rxbuffer_memory() */ 3956 3957/* 3958 * mgsl_free_intermediate_rxbuffer_memory() 3959 * 3960 * 3961 * Arguments: 3962 * 3963 * info pointer to device instance data 3964 * 3965 * Return Value: None 3966 */ 3967static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info) 3968{ 3969 kfree(info->intermediate_rxbuffer); 3970 info->intermediate_rxbuffer = NULL; 3971 3972} /* end of mgsl_free_intermediate_rxbuffer_memory() */ 3973 3974/* 3975 * mgsl_alloc_intermediate_txbuffer_memory() 3976 * 3977 * Allocate intermdiate transmit buffer(s) large enough to hold max_frame_size. 3978 * This buffer is used to load transmit frames into the adapter's dma transfer 3979 * buffers when there is sufficient space. 3980 * 3981 * Arguments: 3982 * 3983 * info pointer to device instance data 3984 * 3985 * Return Value: 0 if success, otherwise -ENOMEM 3986 */ 3987static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info) 3988{ 3989 int i; 3990 3991 if ( debug_level >= DEBUG_LEVEL_INFO ) 3992 printk("%s %s(%d) allocating %d tx holding buffers\n", 3993 info->device_name, __FILE__,__LINE__,info->num_tx_holding_buffers); 3994 3995 memset(info->tx_holding_buffers,0,sizeof(info->tx_holding_buffers)); 3996 3997 for ( i=0; i<info->num_tx_holding_buffers; ++i) { 3998 info->tx_holding_buffers[i].buffer = 3999 kmalloc(info->max_frame_size, GFP_KERNEL); 4000 if (info->tx_holding_buffers[i].buffer == NULL) { 4001 for (--i; i >= 0; i--) { 4002 kfree(info->tx_holding_buffers[i].buffer); 4003 info->tx_holding_buffers[i].buffer = NULL; 4004 } 4005 return -ENOMEM; 4006 } 4007 } 4008 4009 return 0; 4010 4011} /* end of mgsl_alloc_intermediate_txbuffer_memory() */ 4012 4013/* 4014 * mgsl_free_intermediate_txbuffer_memory() 4015 * 4016 * 4017 * Arguments: 4018 * 4019 * info pointer to device instance data 4020 * 4021 * Return Value: None 4022 */ 4023static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info) 4024{ 4025 int i; 4026 4027 for ( i=0; i<info->num_tx_holding_buffers; ++i ) { 4028 kfree(info->tx_holding_buffers[i].buffer); 4029 info->tx_holding_buffers[i].buffer = NULL; 4030 } 4031 4032 info->get_tx_holding_index = 0; 4033 info->put_tx_holding_index = 0; 4034 info->tx_holding_count = 0; 4035 4036} /* end of mgsl_free_intermediate_txbuffer_memory() */ 4037 4038 4039/* 4040 * load_next_tx_holding_buffer() 4041 * 4042 * attempts to load the next buffered tx request into the 4043 * tx dma buffers 4044 * 4045 * Arguments: 4046 * 4047 * info pointer to device instance data 4048 * 4049 * Return Value: 1 if next buffered tx request loaded 4050 * into adapter's tx dma buffer, 4051 * 0 otherwise 4052 */ 4053static int load_next_tx_holding_buffer(struct mgsl_struct *info) 4054{ 4055 int ret = 0; 4056 4057 if ( info->tx_holding_count ) { 4058 /* determine if we have enough tx dma buffers 4059 * to accommodate the next tx frame 4060 */ 4061 struct tx_holding_buffer *ptx = 4062 &info->tx_holding_buffers[info->get_tx_holding_index]; 4063 int num_free = num_free_tx_dma_buffers(info); 4064 int num_needed = ptx->buffer_size / DMABUFFERSIZE; 4065 if ( ptx->buffer_size % DMABUFFERSIZE ) 4066 ++num_needed; 4067 4068 if (num_needed <= num_free) { 4069 info->xmit_cnt = ptx->buffer_size; 4070 mgsl_load_tx_dma_buffer(info,ptx->buffer,ptx->buffer_size); 4071 4072 --info->tx_holding_count; 4073 if ( ++info->get_tx_holding_index >= info->num_tx_holding_buffers) 4074 info->get_tx_holding_index=0; 4075 4076 /* restart transmit timer */ 4077 mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000)); 4078 4079 ret = 1; 4080 } 4081 } 4082 4083 return ret; 4084} 4085 4086/* 4087 * save_tx_buffer_request() 4088 * 4089 * attempt to store transmit frame request for later transmission 4090 * 4091 * Arguments: 4092 * 4093 * info pointer to device instance data 4094 * Buffer pointer to buffer containing frame to load 4095 * BufferSize size in bytes of frame in Buffer 4096 * 4097 * Return Value: 1 if able to store, 0 otherwise 4098 */ 4099static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize) 4100{ 4101 struct tx_holding_buffer *ptx; 4102 4103 if ( info->tx_holding_count >= info->num_tx_holding_buffers ) { 4104 return 0; /* all buffers in use */ 4105 } 4106 4107 ptx = &info->tx_holding_buffers[info->put_tx_holding_index]; 4108 ptx->buffer_size = BufferSize; 4109 memcpy( ptx->buffer, Buffer, BufferSize); 4110 4111 ++info->tx_holding_count; 4112 if ( ++info->put_tx_holding_index >= info->num_tx_holding_buffers) 4113 info->put_tx_holding_index=0; 4114 4115 return 1; 4116} 4117 4118static int mgsl_claim_resources(struct mgsl_struct *info) 4119{ 4120 if (request_region(info->io_base,info->io_addr_size,"synclink") == NULL) { 4121 printk( "%s(%d):I/O address conflict on device %s Addr=%08X\n", 4122 __FILE__,__LINE__,info->device_name, info->io_base); 4123 return -ENODEV; 4124 } 4125 info->io_addr_requested = 1; 4126 4127 if ( request_irq(info->irq_level,mgsl_interrupt,info->irq_flags, 4128 info->device_name, info ) < 0 ) { 4129 printk( "%s(%d):Cant request interrupt on device %s IRQ=%d\n", 4130 __FILE__,__LINE__,info->device_name, info->irq_level ); 4131 goto errout; 4132 } 4133 info->irq_requested = 1; 4134 4135 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 4136 if (request_mem_region(info->phys_memory_base,0x40000,"synclink") == NULL) { 4137 printk( "%s(%d):mem addr conflict device %s Addr=%08X\n", 4138 __FILE__,__LINE__,info->device_name, info->phys_memory_base); 4139 goto errout; 4140 } 4141 info->shared_mem_requested = 1; 4142 if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclink") == NULL) { 4143 printk( "%s(%d):lcr mem addr conflict device %s Addr=%08X\n", 4144 __FILE__,__LINE__,info->device_name, info->phys_lcr_base + info->lcr_offset); 4145 goto errout; 4146 } 4147 info->lcr_mem_requested = 1; 4148 4149 info->memory_base = ioremap(info->phys_memory_base,0x40000); 4150 if (!info->memory_base) { 4151 printk( "%s(%d):Cant map shared memory on device %s MemAddr=%08X\n", 4152 __FILE__,__LINE__,info->device_name, info->phys_memory_base ); 4153 goto errout; 4154 } 4155 4156 if ( !mgsl_memory_test(info) ) { 4157 printk( "%s(%d):Failed shared memory test %s MemAddr=%08X\n", 4158 __FILE__,__LINE__,info->device_name, info->phys_memory_base ); 4159 goto errout; 4160 } 4161 4162 info->lcr_base = ioremap(info->phys_lcr_base,PAGE_SIZE) + info->lcr_offset; 4163 if (!info->lcr_base) { 4164 printk( "%s(%d):Cant map LCR memory on device %s MemAddr=%08X\n", 4165 __FILE__,__LINE__,info->device_name, info->phys_lcr_base ); 4166 goto errout; 4167 } 4168 4169 } else { 4170 /* claim DMA channel */ 4171 4172 if (request_dma(info->dma_level,info->device_name) < 0){ 4173 printk( "%s(%d):Cant request DMA channel on device %s DMA=%d\n", 4174 __FILE__,__LINE__,info->device_name, info->dma_level ); 4175 mgsl_release_resources( info ); 4176 return -ENODEV; 4177 } 4178 info->dma_requested = 1; 4179 4180 /* ISA adapter uses bus master DMA */ 4181 set_dma_mode(info->dma_level,DMA_MODE_CASCADE); 4182 enable_dma(info->dma_level); 4183 } 4184 4185 if ( mgsl_allocate_dma_buffers(info) < 0 ) { 4186 printk( "%s(%d):Cant allocate DMA buffers on device %s DMA=%d\n", 4187 __FILE__,__LINE__,info->device_name, info->dma_level ); 4188 goto errout; 4189 } 4190 4191 return 0; 4192errout: 4193 mgsl_release_resources(info); 4194 return -ENODEV; 4195 4196} /* end of mgsl_claim_resources() */ 4197 4198static void mgsl_release_resources(struct mgsl_struct *info) 4199{ 4200 if ( debug_level >= DEBUG_LEVEL_INFO ) 4201 printk( "%s(%d):mgsl_release_resources(%s) entry\n", 4202 __FILE__,__LINE__,info->device_name ); 4203 4204 if ( info->irq_requested ) { 4205 free_irq(info->irq_level, info); 4206 info->irq_requested = 0; 4207 } 4208 if ( info->dma_requested ) { 4209 disable_dma(info->dma_level); 4210 free_dma(info->dma_level); 4211 info->dma_requested = 0; 4212 } 4213 mgsl_free_dma_buffers(info); 4214 mgsl_free_intermediate_rxbuffer_memory(info); 4215 mgsl_free_intermediate_txbuffer_memory(info); 4216 4217 if ( info->io_addr_requested ) { 4218 release_region(info->io_base,info->io_addr_size); 4219 info->io_addr_requested = 0; 4220 } 4221 if ( info->shared_mem_requested ) { 4222 release_mem_region(info->phys_memory_base,0x40000); 4223 info->shared_mem_requested = 0; 4224 } 4225 if ( info->lcr_mem_requested ) { 4226 release_mem_region(info->phys_lcr_base + info->lcr_offset,128); 4227 info->lcr_mem_requested = 0; 4228 } 4229 if (info->memory_base){ 4230 iounmap(info->memory_base); 4231 info->memory_base = NULL; 4232 } 4233 if (info->lcr_base){ 4234 iounmap(info->lcr_base - info->lcr_offset); 4235 info->lcr_base = NULL; 4236 } 4237 4238 if ( debug_level >= DEBUG_LEVEL_INFO ) 4239 printk( "%s(%d):mgsl_release_resources(%s) exit\n", 4240 __FILE__,__LINE__,info->device_name ); 4241 4242} /* end of mgsl_release_resources() */ 4243 4244/* mgsl_add_device() 4245 * 4246 * Add the specified device instance data structure to the 4247 * global linked list of devices and increment the device count. 4248 * 4249 * Arguments: info pointer to device instance data 4250 * Return Value: None 4251 */ 4252static void mgsl_add_device( struct mgsl_struct *info ) 4253{ 4254 info->next_device = NULL; 4255 info->line = mgsl_device_count; 4256 sprintf(info->device_name,"ttySL%d",info->line); 4257 4258 if (info->line < MAX_TOTAL_DEVICES) { 4259 if (maxframe[info->line]) 4260 info->max_frame_size = maxframe[info->line]; 4261 info->dosyncppp = dosyncppp[info->line]; 4262 4263 if (txdmabufs[info->line]) { 4264 info->num_tx_dma_buffers = txdmabufs[info->line]; 4265 if (info->num_tx_dma_buffers < 1) 4266 info->num_tx_dma_buffers = 1; 4267 } 4268 4269 if (txholdbufs[info->line]) { 4270 info->num_tx_holding_buffers = txholdbufs[info->line]; 4271 if (info->num_tx_holding_buffers < 1) 4272 info->num_tx_holding_buffers = 1; 4273 else if (info->num_tx_holding_buffers > MAX_TX_HOLDING_BUFFERS) 4274 info->num_tx_holding_buffers = MAX_TX_HOLDING_BUFFERS; 4275 } 4276 } 4277 4278 mgsl_device_count++; 4279 4280 if ( !mgsl_device_list ) 4281 mgsl_device_list = info; 4282 else { 4283 struct mgsl_struct *current_dev = mgsl_device_list; 4284 while( current_dev->next_device ) 4285 current_dev = current_dev->next_device; 4286 current_dev->next_device = info; 4287 } 4288 4289 if ( info->max_frame_size < 4096 ) 4290 info->max_frame_size = 4096; 4291 else if ( info->max_frame_size > 65535 ) 4292 info->max_frame_size = 65535; 4293 4294 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 4295 printk( "SyncLink PCI v%d %s: IO=%04X IRQ=%d Mem=%08X,%08X MaxFrameSize=%u\n", 4296 info->hw_version + 1, info->device_name, info->io_base, info->irq_level, 4297 info->phys_memory_base, info->phys_lcr_base, 4298 info->max_frame_size ); 4299 } else { 4300 printk( "SyncLink ISA %s: IO=%04X IRQ=%d DMA=%d MaxFrameSize=%u\n", 4301 info->device_name, info->io_base, info->irq_level, info->dma_level, 4302 info->max_frame_size ); 4303 } 4304 4305#if SYNCLINK_GENERIC_HDLC 4306 hdlcdev_init(info); 4307#endif 4308 4309} /* end of mgsl_add_device() */ 4310 4311/* mgsl_allocate_device() 4312 * 4313 * Allocate and initialize a device instance structure 4314 * 4315 * Arguments: none 4316 * Return Value: pointer to mgsl_struct if success, otherwise NULL 4317 */ 4318static struct mgsl_struct* mgsl_allocate_device(void) 4319{ 4320 struct mgsl_struct *info; 4321 4322 info = kzalloc(sizeof(struct mgsl_struct), 4323 GFP_KERNEL); 4324 4325 if (!info) { 4326 printk("Error can't allocate device instance data\n"); 4327 } else { 4328 info->magic = MGSL_MAGIC; 4329 INIT_WORK(&info->task, mgsl_bh_handler); 4330 info->max_frame_size = 4096; 4331 info->close_delay = 5*HZ/10; 4332 info->closing_wait = 30*HZ; 4333 init_waitqueue_head(&info->open_wait); 4334 init_waitqueue_head(&info->close_wait); 4335 init_waitqueue_head(&info->status_event_wait_q); 4336 init_waitqueue_head(&info->event_wait_q); 4337 spin_lock_init(&info->irq_spinlock); 4338 spin_lock_init(&info->netlock); 4339 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS)); 4340 info->idle_mode = HDLC_TXIDLE_FLAGS; 4341 info->num_tx_dma_buffers = 1; 4342 info->num_tx_holding_buffers = 0; 4343 } 4344 4345 return info; 4346 4347} /* end of mgsl_allocate_device()*/ 4348 4349static const struct tty_operations mgsl_ops = { 4350 .open = mgsl_open, 4351 .close = mgsl_close, 4352 .write = mgsl_write, 4353 .put_char = mgsl_put_char, 4354 .flush_chars = mgsl_flush_chars, 4355 .write_room = mgsl_write_room, 4356 .chars_in_buffer = mgsl_chars_in_buffer, 4357 .flush_buffer = mgsl_flush_buffer, 4358 .ioctl = mgsl_ioctl, 4359 .throttle = mgsl_throttle, 4360 .unthrottle = mgsl_unthrottle, 4361 .send_xchar = mgsl_send_xchar, 4362 .break_ctl = mgsl_break, 4363 .wait_until_sent = mgsl_wait_until_sent, 4364 .read_proc = mgsl_read_proc, 4365 .set_termios = mgsl_set_termios, 4366 .stop = mgsl_stop, 4367 .start = mgsl_start, 4368 .hangup = mgsl_hangup, 4369 .tiocmget = tiocmget, 4370 .tiocmset = tiocmset, 4371}; 4372 4373/* 4374 * perform tty device initialization 4375 */ 4376static int mgsl_init_tty(void) 4377{ 4378 int rc; 4379 4380 serial_driver = alloc_tty_driver(128); 4381 if (!serial_driver) 4382 return -ENOMEM; 4383 4384 serial_driver->owner = THIS_MODULE; 4385 serial_driver->driver_name = "synclink"; 4386 serial_driver->name = "ttySL"; 4387 serial_driver->major = ttymajor; 4388 serial_driver->minor_start = 64; 4389 serial_driver->type = TTY_DRIVER_TYPE_SERIAL; 4390 serial_driver->subtype = SERIAL_TYPE_NORMAL; 4391 serial_driver->init_termios = tty_std_termios; 4392 serial_driver->init_termios.c_cflag = 4393 B9600 | CS8 | CREAD | HUPCL | CLOCAL; 4394 serial_driver->init_termios.c_ispeed = 9600; 4395 serial_driver->init_termios.c_ospeed = 9600; 4396 serial_driver->flags = TTY_DRIVER_REAL_RAW; 4397 tty_set_operations(serial_driver, &mgsl_ops); 4398 if ((rc = tty_register_driver(serial_driver)) < 0) { 4399 printk("%s(%d):Couldn't register serial driver\n", 4400 __FILE__,__LINE__); 4401 put_tty_driver(serial_driver); 4402 serial_driver = NULL; 4403 return rc; 4404 } 4405 4406 printk("%s %s, tty major#%d\n", 4407 driver_name, driver_version, 4408 serial_driver->major); 4409 return 0; 4410} 4411 4412/* enumerate user specified ISA adapters 4413 */ 4414static void mgsl_enum_isa_devices(void) 4415{ 4416 struct mgsl_struct *info; 4417 int i; 4418 4419 /* Check for user specified ISA devices */ 4420 4421 for (i=0 ;(i < MAX_ISA_DEVICES) && io[i] && irq[i]; i++){ 4422 if ( debug_level >= DEBUG_LEVEL_INFO ) 4423 printk("ISA device specified io=%04X,irq=%d,dma=%d\n", 4424 io[i], irq[i], dma[i] ); 4425 4426 info = mgsl_allocate_device(); 4427 if ( !info ) { 4428 /* error allocating device instance data */ 4429 if ( debug_level >= DEBUG_LEVEL_ERROR ) 4430 printk( "can't allocate device instance data.\n"); 4431 continue; 4432 } 4433 4434 /* Copy user configuration info to device instance data */ 4435 info->io_base = (unsigned int)io[i]; 4436 info->irq_level = (unsigned int)irq[i]; 4437 info->irq_level = irq_canonicalize(info->irq_level); 4438 info->dma_level = (unsigned int)dma[i]; 4439 info->bus_type = MGSL_BUS_TYPE_ISA; 4440 info->io_addr_size = 16; 4441 info->irq_flags = 0; 4442 4443 mgsl_add_device( info ); 4444 } 4445} 4446 4447static void synclink_cleanup(void) 4448{ 4449 int rc; 4450 struct mgsl_struct *info; 4451 struct mgsl_struct *tmp; 4452 4453 printk("Unloading %s: %s\n", driver_name, driver_version); 4454 4455 if (serial_driver) { 4456 if ((rc = tty_unregister_driver(serial_driver))) 4457 printk("%s(%d) failed to unregister tty driver err=%d\n", 4458 __FILE__,__LINE__,rc); 4459 put_tty_driver(serial_driver); 4460 } 4461 4462 info = mgsl_device_list; 4463 while(info) { 4464#if SYNCLINK_GENERIC_HDLC 4465 hdlcdev_exit(info); 4466#endif 4467 mgsl_release_resources(info); 4468 tmp = info; 4469 info = info->next_device; 4470 kfree(tmp); 4471 } 4472 4473 if (pci_registered) 4474 pci_unregister_driver(&synclink_pci_driver); 4475} 4476 4477static int __init synclink_init(void) 4478{ 4479 int rc; 4480 4481 if (break_on_load) { 4482 mgsl_get_text_ptr(); 4483 BREAKPOINT(); 4484 } 4485 4486 printk("%s %s\n", driver_name, driver_version); 4487 4488 mgsl_enum_isa_devices(); 4489 if ((rc = pci_register_driver(&synclink_pci_driver)) < 0) 4490 printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc); 4491 else 4492 pci_registered = 1; 4493 4494 if ((rc = mgsl_init_tty()) < 0) 4495 goto error; 4496 4497 return 0; 4498 4499error: 4500 synclink_cleanup(); 4501 return rc; 4502} 4503 4504static void __exit synclink_exit(void) 4505{ 4506 synclink_cleanup(); 4507} 4508 4509module_init(synclink_init); 4510module_exit(synclink_exit); 4511 4512/* 4513 * usc_RTCmd() 4514 * 4515 * Issue a USC Receive/Transmit command to the 4516 * Channel Command/Address Register (CCAR). 4517 * 4518 * Notes: 4519 * 4520 * The command is encoded in the most significant 5 bits <15..11> 4521 * of the CCAR value. Bits <10..7> of the CCAR must be preserved 4522 * and Bits <6..0> must be written as zeros. 4523 * 4524 * Arguments: 4525 * 4526 * info pointer to device information structure 4527 * Cmd command mask (use symbolic macros) 4528 * 4529 * Return Value: 4530 * 4531 * None 4532 */ 4533static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd ) 4534{ 4535 /* output command to CCAR in bits <15..11> */ 4536 /* preserve bits <10..7>, bits <6..0> must be zero */ 4537 4538 outw( Cmd + info->loopback_bits, info->io_base + CCAR ); 4539 4540 /* Read to flush write to CCAR */ 4541 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 4542 inw( info->io_base + CCAR ); 4543 4544} /* end of usc_RTCmd() */ 4545 4546/* 4547 * usc_DmaCmd() 4548 * 4549 * Issue a DMA command to the DMA Command/Address Register (DCAR). 4550 * 4551 * Arguments: 4552 * 4553 * info pointer to device information structure 4554 * Cmd DMA command mask (usc_DmaCmd_XX Macros) 4555 * 4556 * Return Value: 4557 * 4558 * None 4559 */ 4560static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd ) 4561{ 4562 /* write command mask to DCAR */ 4563 outw( Cmd + info->mbre_bit, info->io_base ); 4564 4565 /* Read to flush write to DCAR */ 4566 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 4567 inw( info->io_base ); 4568 4569} /* end of usc_DmaCmd() */ 4570 4571/* 4572 * usc_OutDmaReg() 4573 * 4574 * Write a 16-bit value to a USC DMA register 4575 * 4576 * Arguments: 4577 * 4578 * info pointer to device info structure 4579 * RegAddr register address (number) for write 4580 * RegValue 16-bit value to write to register 4581 * 4582 * Return Value: 4583 * 4584 * None 4585 * 4586 */ 4587static void usc_OutDmaReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue ) 4588{ 4589 /* Note: The DCAR is located at the adapter base address */ 4590 /* Note: must preserve state of BIT8 in DCAR */ 4591 4592 outw( RegAddr + info->mbre_bit, info->io_base ); 4593 outw( RegValue, info->io_base ); 4594 4595 /* Read to flush write to DCAR */ 4596 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 4597 inw( info->io_base ); 4598 4599} /* end of usc_OutDmaReg() */ 4600 4601/* 4602 * usc_InDmaReg() 4603 * 4604 * Read a 16-bit value from a DMA register 4605 * 4606 * Arguments: 4607 * 4608 * info pointer to device info structure 4609 * RegAddr register address (number) to read from 4610 * 4611 * Return Value: 4612 * 4613 * The 16-bit value read from register 4614 * 4615 */ 4616static u16 usc_InDmaReg( struct mgsl_struct *info, u16 RegAddr ) 4617{ 4618 /* Note: The DCAR is located at the adapter base address */ 4619 /* Note: must preserve state of BIT8 in DCAR */ 4620 4621 outw( RegAddr + info->mbre_bit, info->io_base ); 4622 return inw( info->io_base ); 4623 4624} /* end of usc_InDmaReg() */ 4625 4626/* 4627 * 4628 * usc_OutReg() 4629 * 4630 * Write a 16-bit value to a USC serial channel register 4631 * 4632 * Arguments: 4633 * 4634 * info pointer to device info structure 4635 * RegAddr register address (number) to write to 4636 * RegValue 16-bit value to write to register 4637 * 4638 * Return Value: 4639 * 4640 * None 4641 * 4642 */ 4643static void usc_OutReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue ) 4644{ 4645 outw( RegAddr + info->loopback_bits, info->io_base + CCAR ); 4646 outw( RegValue, info->io_base + CCAR ); 4647 4648 /* Read to flush write to CCAR */ 4649 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 4650 inw( info->io_base + CCAR ); 4651 4652} /* end of usc_OutReg() */ 4653 4654/* 4655 * usc_InReg() 4656 * 4657 * Reads a 16-bit value from a USC serial channel register 4658 * 4659 * Arguments: 4660 * 4661 * info pointer to device extension 4662 * RegAddr register address (number) to read from 4663 * 4664 * Return Value: 4665 * 4666 * 16-bit value read from register 4667 */ 4668static u16 usc_InReg( struct mgsl_struct *info, u16 RegAddr ) 4669{ 4670 outw( RegAddr + info->loopback_bits, info->io_base + CCAR ); 4671 return inw( info->io_base + CCAR ); 4672 4673} /* end of usc_InReg() */ 4674 4675/* usc_set_sdlc_mode() 4676 * 4677 * Set up the adapter for SDLC DMA communications. 4678 * 4679 * Arguments: info pointer to device instance data 4680 * Return Value: NONE 4681 */ 4682static void usc_set_sdlc_mode( struct mgsl_struct *info ) 4683{ 4684 u16 RegValue; 4685 int PreSL1660; 4686 4687 /* 4688 * determine if the IUSC on the adapter is pre-SL1660. If 4689 * not, take advantage of the UnderWait feature of more 4690 * modern chips. If an underrun occurs and this bit is set, 4691 * the transmitter will idle the programmed idle pattern 4692 * until the driver has time to service the underrun. Otherwise, 4693 * the dma controller may get the cycles previously requested 4694 * and begin transmitting queued tx data. 4695 */ 4696 usc_OutReg(info,TMCR,0x1f); 4697 RegValue=usc_InReg(info,TMDR); 4698 if ( RegValue == IUSC_PRE_SL1660 ) 4699 PreSL1660 = 1; 4700 else 4701 PreSL1660 = 0; 4702 4703 4704 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE ) 4705 { 4706 /* 4707 ** Channel Mode Register (CMR) 4708 ** 4709 ** <15..14> 10 Tx Sub Modes, Send Flag on Underrun 4710 ** <13> 0 0 = Transmit Disabled (initially) 4711 ** <12> 0 1 = Consecutive Idles share common 0 4712 ** <11..8> 1110 Transmitter Mode = HDLC/SDLC Loop 4713 ** <7..4> 0000 Rx Sub Modes, addr/ctrl field handling 4714 ** <3..0> 0110 Receiver Mode = HDLC/SDLC 4715 ** 4716 ** 1000 1110 0000 0110 = 0x8e06 4717 */ 4718 RegValue = 0x8e06; 4719 4720 /*-------------------------------------------------- 4721 * ignore user options for UnderRun Actions and 4722 * preambles 4723 *--------------------------------------------------*/ 4724 } 4725 else 4726 { 4727 /* Channel mode Register (CMR) 4728 * 4729 * <15..14> 00 Tx Sub modes, Underrun Action 4730 * <13> 0 1 = Send Preamble before opening flag 4731 * <12> 0 1 = Consecutive Idles share common 0 4732 * <11..8> 0110 Transmitter mode = HDLC/SDLC 4733 * <7..4> 0000 Rx Sub modes, addr/ctrl field handling 4734 * <3..0> 0110 Receiver mode = HDLC/SDLC 4735 * 4736 * 0000 0110 0000 0110 = 0x0606 4737 */ 4738 if (info->params.mode == MGSL_MODE_RAW) { 4739 RegValue = 0x0001; /* Set Receive mode = external sync */ 4740 4741 usc_OutReg( info, IOCR, /* Set IOCR DCD is RxSync Detect Input */ 4742 (unsigned short)((usc_InReg(info, IOCR) & ~(BIT13|BIT12)) | BIT12)); 4743 4744 /* 4745 * TxSubMode: 4746 * CMR <15> 0 Don't send CRC on Tx Underrun 4747 * CMR <14> x undefined 4748 * CMR <13> 0 Send preamble before openning sync 4749 * CMR <12> 0 Send 8-bit syncs, 1=send Syncs per TxLength 4750 * 4751 * TxMode: 4752 * CMR <11-8) 0100 MonoSync 4753 * 4754 * 0x00 0100 xxxx xxxx 04xx 4755 */ 4756 RegValue |= 0x0400; 4757 } 4758 else { 4759 4760 RegValue = 0x0606; 4761 4762 if ( info->params.flags & HDLC_FLAG_UNDERRUN_ABORT15 ) 4763 RegValue |= BIT14; 4764 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_FLAG ) 4765 RegValue |= BIT15; 4766 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_CRC ) 4767 RegValue |= BIT15 + BIT14; 4768 } 4769 4770 if ( info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE ) 4771 RegValue |= BIT13; 4772 } 4773 4774 if ( info->params.mode == MGSL_MODE_HDLC && 4775 (info->params.flags & HDLC_FLAG_SHARE_ZERO) ) 4776 RegValue |= BIT12; 4777 4778 if ( info->params.addr_filter != 0xff ) 4779 { 4780 /* set up receive address filtering */ 4781 usc_OutReg( info, RSR, info->params.addr_filter ); 4782 RegValue |= BIT4; 4783 } 4784 4785 usc_OutReg( info, CMR, RegValue ); 4786 info->cmr_value = RegValue; 4787 4788 /* Receiver mode Register (RMR) 4789 * 4790 * <15..13> 000 encoding 4791 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1) 4792 * <10> 1 1 = Set CRC to all 1s (use for SDLC/HDLC) 4793 * <9> 0 1 = Include Receive chars in CRC 4794 * <8> 1 1 = Use Abort/PE bit as abort indicator 4795 * <7..6> 00 Even parity 4796 * <5> 0 parity disabled 4797 * <4..2> 000 Receive Char Length = 8 bits 4798 * <1..0> 00 Disable Receiver 4799 * 4800 * 0000 0101 0000 0000 = 0x0500 4801 */ 4802 4803 RegValue = 0x0500; 4804 4805 switch ( info->params.encoding ) { 4806 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break; 4807 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break; 4808 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break; 4809 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break; 4810 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break; 4811 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break; 4812 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break; 4813 } 4814 4815 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT ) 4816 RegValue |= BIT9; 4817 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT ) 4818 RegValue |= ( BIT12 | BIT10 | BIT9 ); 4819 4820 usc_OutReg( info, RMR, RegValue ); 4821 4822 /* Set the Receive count Limit Register (RCLR) to 0xffff. */ 4823 /* When an opening flag of an SDLC frame is recognized the */ 4824 /* Receive Character count (RCC) is loaded with the value in */ 4825 /* RCLR. The RCC is decremented for each received byte. The */ 4826 /* value of RCC is stored after the closing flag of the frame */ 4827 /* allowing the frame size to be computed. */ 4828 4829 usc_OutReg( info, RCLR, RCLRVALUE ); 4830 4831 usc_RCmd( info, RCmd_SelectRicrdma_level ); 4832 4833 /* Receive Interrupt Control Register (RICR) 4834 * 4835 * <15..8> ? RxFIFO DMA Request Level 4836 * <7> 0 Exited Hunt IA (Interrupt Arm) 4837 * <6> 0 Idle Received IA 4838 * <5> 0 Break/Abort IA 4839 * <4> 0 Rx Bound IA 4840 * <3> 1 Queued status reflects oldest 2 bytes in FIFO 4841 * <2> 0 Abort/PE IA 4842 * <1> 1 Rx Overrun IA 4843 * <0> 0 Select TC0 value for readback 4844 * 4845 * 0000 0000 0000 1000 = 0x000a 4846 */ 4847 4848 /* Carry over the Exit Hunt and Idle Received bits */ 4849 /* in case they have been armed by usc_ArmEvents. */ 4850 4851 RegValue = usc_InReg( info, RICR ) & 0xc0; 4852 4853 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 4854 usc_OutReg( info, RICR, (u16)(0x030a | RegValue) ); 4855 else 4856 usc_OutReg( info, RICR, (u16)(0x140a | RegValue) ); 4857 4858 /* Unlatch all Rx status bits and clear Rx status IRQ Pending */ 4859 4860 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); 4861 usc_ClearIrqPendingBits( info, RECEIVE_STATUS ); 4862 4863 /* Transmit mode Register (TMR) 4864 * 4865 * <15..13> 000 encoding 4866 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1) 4867 * <10> 1 1 = Start CRC as all 1s (use for SDLC/HDLC) 4868 * <9> 0 1 = Tx CRC Enabled 4869 * <8> 0 1 = Append CRC to end of transmit frame 4870 * <7..6> 00 Transmit parity Even 4871 * <5> 0 Transmit parity Disabled 4872 * <4..2> 000 Tx Char Length = 8 bits 4873 * <1..0> 00 Disable Transmitter 4874 * 4875 * 0000 0100 0000 0000 = 0x0400 4876 */ 4877 4878 RegValue = 0x0400; 4879 4880 switch ( info->params.encoding ) { 4881 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break; 4882 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break; 4883 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break; 4884 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break; 4885 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break; 4886 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break; 4887 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break; 4888 } 4889 4890 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT ) 4891 RegValue |= BIT9 + BIT8; 4892 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT ) 4893 RegValue |= ( BIT12 | BIT10 | BIT9 | BIT8); 4894 4895 usc_OutReg( info, TMR, RegValue ); 4896 4897 usc_set_txidle( info ); 4898 4899 4900 usc_TCmd( info, TCmd_SelectTicrdma_level ); 4901 4902 /* Transmit Interrupt Control Register (TICR) 4903 * 4904 * <15..8> ? Transmit FIFO DMA Level 4905 * <7> 0 Present IA (Interrupt Arm) 4906 * <6> 0 Idle Sent IA 4907 * <5> 1 Abort Sent IA 4908 * <4> 1 EOF/EOM Sent IA 4909 * <3> 0 CRC Sent IA 4910 * <2> 1 1 = Wait for SW Trigger to Start Frame 4911 * <1> 1 Tx Underrun IA 4912 * <0> 0 TC0 constant on read back 4913 * 4914 * 0000 0000 0011 0110 = 0x0036 4915 */ 4916 4917 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 4918 usc_OutReg( info, TICR, 0x0736 ); 4919 else 4920 usc_OutReg( info, TICR, 0x1436 ); 4921 4922 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL ); 4923 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS ); 4924 4925 /* 4926 ** Transmit Command/Status Register (TCSR) 4927 ** 4928 ** <15..12> 0000 TCmd 4929 ** <11> 0/1 UnderWait 4930 ** <10..08> 000 TxIdle 4931 ** <7> x PreSent 4932 ** <6> x IdleSent 4933 ** <5> x AbortSent 4934 ** <4> x EOF/EOM Sent 4935 ** <3> x CRC Sent 4936 ** <2> x All Sent 4937 ** <1> x TxUnder 4938 ** <0> x TxEmpty 4939 ** 4940 ** 0000 0000 0000 0000 = 0x0000 4941 */ 4942 info->tcsr_value = 0; 4943 4944 if ( !PreSL1660 ) 4945 info->tcsr_value |= TCSR_UNDERWAIT; 4946 4947 usc_OutReg( info, TCSR, info->tcsr_value ); 4948 4949 /* Clock mode Control Register (CMCR) 4950 * 4951 * <15..14> 00 counter 1 Source = Disabled 4952 * <13..12> 00 counter 0 Source = Disabled 4953 * <11..10> 11 BRG1 Input is TxC Pin 4954 * <9..8> 11 BRG0 Input is TxC Pin 4955 * <7..6> 01 DPLL Input is BRG1 Output 4956 * <5..3> XXX TxCLK comes from Port 0 4957 * <2..0> XXX RxCLK comes from Port 1 4958 * 4959 * 0000 1111 0111 0111 = 0x0f77 4960 */ 4961 4962 RegValue = 0x0f40; 4963 4964 if ( info->params.flags & HDLC_FLAG_RXC_DPLL ) 4965 RegValue |= 0x0003; /* RxCLK from DPLL */ 4966 else if ( info->params.flags & HDLC_FLAG_RXC_BRG ) 4967 RegValue |= 0x0004; /* RxCLK from BRG0 */ 4968 else if ( info->params.flags & HDLC_FLAG_RXC_TXCPIN) 4969 RegValue |= 0x0006; /* RxCLK from TXC Input */ 4970 else 4971 RegValue |= 0x0007; /* RxCLK from Port1 */ 4972 4973 if ( info->params.flags & HDLC_FLAG_TXC_DPLL ) 4974 RegValue |= 0x0018; /* TxCLK from DPLL */ 4975 else if ( info->params.flags & HDLC_FLAG_TXC_BRG ) 4976 RegValue |= 0x0020; /* TxCLK from BRG0 */ 4977 else if ( info->params.flags & HDLC_FLAG_TXC_RXCPIN) 4978 RegValue |= 0x0038; /* RxCLK from TXC Input */ 4979 else 4980 RegValue |= 0x0030; /* TxCLK from Port0 */ 4981 4982 usc_OutReg( info, CMCR, RegValue ); 4983 4984 4985 /* Hardware Configuration Register (HCR) 4986 * 4987 * <15..14> 00 CTR0 Divisor:00=32,01=16,10=8,11=4 4988 * <13> 0 CTR1DSel:0=CTR0Div determines CTR0Div 4989 * <12> 0 CVOK:0=report code violation in biphase 4990 * <11..10> 00 DPLL Divisor:00=32,01=16,10=8,11=4 4991 * <9..8> XX DPLL mode:00=disable,01=NRZ,10=Biphase,11=Biphase Level 4992 * <7..6> 00 reserved 4993 * <5> 0 BRG1 mode:0=continuous,1=single cycle 4994 * <4> X BRG1 Enable 4995 * <3..2> 00 reserved 4996 * <1> 0 BRG0 mode:0=continuous,1=single cycle 4997 * <0> 0 BRG0 Enable 4998 */ 4999 5000 RegValue = 0x0000; 5001 5002 if ( info->params.flags & (HDLC_FLAG_RXC_DPLL + HDLC_FLAG_TXC_DPLL) ) { 5003 u32 XtalSpeed; 5004 u32 DpllDivisor; 5005 u16 Tc; 5006 5007 /* DPLL is enabled. Use BRG1 to provide continuous reference clock */ 5008 /* for DPLL. DPLL mode in HCR is dependent on the encoding used. */ 5009 5010 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 5011 XtalSpeed = 11059200; 5012 else 5013 XtalSpeed = 14745600; 5014 5015 if ( info->params.flags & HDLC_FLAG_DPLL_DIV16 ) { 5016 DpllDivisor = 16; 5017 RegValue |= BIT10; 5018 } 5019 else if ( info->params.flags & HDLC_FLAG_DPLL_DIV8 ) { 5020 DpllDivisor = 8; 5021 RegValue |= BIT11; 5022 } 5023 else 5024 DpllDivisor = 32; 5025 5026 /* Tc = (Xtal/Speed) - 1 */ 5027 /* If twice the remainder of (Xtal/Speed) is greater than Speed */ 5028 /* then rounding up gives a more precise time constant. Instead */ 5029 /* of rounding up and then subtracting 1 we just don't subtract */ 5030 /* the one in this case. */ 5031 5032 /*-------------------------------------------------- 5033 * ejz: for DPLL mode, application should use the 5034 * same clock speed as the partner system, even 5035 * though clocking is derived from the input RxData. 5036 * In case the user uses a 0 for the clock speed, 5037 * default to 0xffffffff and don't try to divide by 5038 * zero 5039 *--------------------------------------------------*/ 5040 if ( info->params.clock_speed ) 5041 { 5042 Tc = (u16)((XtalSpeed/DpllDivisor)/info->params.clock_speed); 5043 if ( !((((XtalSpeed/DpllDivisor) % info->params.clock_speed) * 2) 5044 / info->params.clock_speed) ) 5045 Tc--; 5046 } 5047 else 5048 Tc = -1; 5049 5050 5051 /* Write 16-bit Time Constant for BRG1 */ 5052 usc_OutReg( info, TC1R, Tc ); 5053 5054 RegValue |= BIT4; /* enable BRG1 */ 5055 5056 switch ( info->params.encoding ) { 5057 case HDLC_ENCODING_NRZ: 5058 case HDLC_ENCODING_NRZB: 5059 case HDLC_ENCODING_NRZI_MARK: 5060 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT8; break; 5061 case HDLC_ENCODING_BIPHASE_MARK: 5062 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT9; break; 5063 case HDLC_ENCODING_BIPHASE_LEVEL: 5064 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT9 + BIT8; break; 5065 } 5066 } 5067 5068 usc_OutReg( info, HCR, RegValue ); 5069 5070 5071 /* Channel Control/status Register (CCSR) 5072 * 5073 * <15> X RCC FIFO Overflow status (RO) 5074 * <14> X RCC FIFO Not Empty status (RO) 5075 * <13> 0 1 = Clear RCC FIFO (WO) 5076 * <12> X DPLL Sync (RW) 5077 * <11> X DPLL 2 Missed Clocks status (RO) 5078 * <10> X DPLL 1 Missed Clock status (RO) 5079 * <9..8> 00 DPLL Resync on rising and falling edges (RW) 5080 * <7> X SDLC Loop On status (RO) 5081 * <6> X SDLC Loop Send status (RO) 5082 * <5> 1 Bypass counters for TxClk and RxClk (RW) 5083 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW) 5084 * <1..0> 00 reserved 5085 * 5086 * 0000 0000 0010 0000 = 0x0020 5087 */ 5088 5089 usc_OutReg( info, CCSR, 0x1020 ); 5090 5091 5092 if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) { 5093 usc_OutReg( info, SICR, 5094 (u16)(usc_InReg(info,SICR) | SICR_CTS_INACTIVE) ); 5095 } 5096 5097 5098 /* enable Master Interrupt Enable bit (MIE) */ 5099 usc_EnableMasterIrqBit( info ); 5100 5101 usc_ClearIrqPendingBits( info, RECEIVE_STATUS + RECEIVE_DATA + 5102 TRANSMIT_STATUS + TRANSMIT_DATA + MISC); 5103 5104 /* arm RCC underflow interrupt */ 5105 usc_OutReg(info, SICR, (u16)(usc_InReg(info,SICR) | BIT3)); 5106 usc_EnableInterrupts(info, MISC); 5107 5108 info->mbre_bit = 0; 5109 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */ 5110 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */ 5111 info->mbre_bit = BIT8; 5112 outw( BIT8, info->io_base ); /* set Master Bus Enable (DCAR) */ 5113 5114 if (info->bus_type == MGSL_BUS_TYPE_ISA) { 5115 /* Enable DMAEN (Port 7, Bit 14) */ 5116 /* This connects the DMA request signal to the ISA bus */ 5117 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) & ~BIT14)); 5118 } 5119 5120 /* DMA Control Register (DCR) 5121 * 5122 * <15..14> 10 Priority mode = Alternating Tx/Rx 5123 * 01 Rx has priority 5124 * 00 Tx has priority 5125 * 5126 * <13> 1 Enable Priority Preempt per DCR<15..14> 5127 * (WARNING DCR<11..10> must be 00 when this is 1) 5128 * 0 Choose activate channel per DCR<11..10> 5129 * 5130 * <12> 0 Little Endian for Array/List 5131 * <11..10> 00 Both Channels can use each bus grant 5132 * <9..6> 0000 reserved 5133 * <5> 0 7 CLK - Minimum Bus Re-request Interval 5134 * <4> 0 1 = drive D/C and S/D pins 5135 * <3> 1 1 = Add one wait state to all DMA cycles. 5136 * <2> 0 1 = Strobe /UAS on every transfer. 5137 * <1..0> 11 Addr incrementing only affects LS24 bits 5138 * 5139 * 0110 0000 0000 1011 = 0x600b 5140 */ 5141 5142 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 5143 /* PCI adapter does not need DMA wait state */ 5144 usc_OutDmaReg( info, DCR, 0xa00b ); 5145 } 5146 else 5147 usc_OutDmaReg( info, DCR, 0x800b ); 5148 5149 5150 /* Receive DMA mode Register (RDMR) 5151 * 5152 * <15..14> 11 DMA mode = Linked List Buffer mode 5153 * <13> 1 RSBinA/L = store Rx status Block in Arrary/List entry 5154 * <12> 1 Clear count of List Entry after fetching 5155 * <11..10> 00 Address mode = Increment 5156 * <9> 1 Terminate Buffer on RxBound 5157 * <8> 0 Bus Width = 16bits 5158 * <7..0> ? status Bits (write as 0s) 5159 * 5160 * 1111 0010 0000 0000 = 0xf200 5161 */ 5162 5163 usc_OutDmaReg( info, RDMR, 0xf200 ); 5164 5165 5166 /* Transmit DMA mode Register (TDMR) 5167 * 5168 * <15..14> 11 DMA mode = Linked List Buffer mode 5169 * <13> 1 TCBinA/L = fetch Tx Control Block from List entry 5170 * <12> 1 Clear count of List Entry after fetching 5171 * <11..10> 00 Address mode = Increment 5172 * <9> 1 Terminate Buffer on end of frame 5173 * <8> 0 Bus Width = 16bits 5174 * <7..0> ? status Bits (Read Only so write as 0) 5175 * 5176 * 1111 0010 0000 0000 = 0xf200 5177 */ 5178 5179 usc_OutDmaReg( info, TDMR, 0xf200 ); 5180 5181 5182 /* DMA Interrupt Control Register (DICR) 5183 * 5184 * <15> 1 DMA Interrupt Enable 5185 * <14> 0 1 = Disable IEO from USC 5186 * <13> 0 1 = Don't provide vector during IntAck 5187 * <12> 1 1 = Include status in Vector 5188 * <10..2> 0 reserved, Must be 0s 5189 * <1> 0 1 = Rx DMA Interrupt Enabled 5190 * <0> 0 1 = Tx DMA Interrupt Enabled 5191 * 5192 * 1001 0000 0000 0000 = 0x9000 5193 */ 5194 5195 usc_OutDmaReg( info, DICR, 0x9000 ); 5196 5197 usc_InDmaReg( info, RDMR ); /* clear pending receive DMA IRQ bits */ 5198 usc_InDmaReg( info, TDMR ); /* clear pending transmit DMA IRQ bits */ 5199 usc_OutDmaReg( info, CDIR, 0x0303 ); /* clear IUS and Pending for Tx and Rx */ 5200 5201 /* Channel Control Register (CCR) 5202 * 5203 * <15..14> 10 Use 32-bit Tx Control Blocks (TCBs) 5204 * <13> 0 Trigger Tx on SW Command Disabled 5205 * <12> 0 Flag Preamble Disabled 5206 * <11..10> 00 Preamble Length 5207 * <9..8> 00 Preamble Pattern 5208 * <7..6> 10 Use 32-bit Rx status Blocks (RSBs) 5209 * <5> 0 Trigger Rx on SW Command Disabled 5210 * <4..0> 0 reserved 5211 * 5212 * 1000 0000 1000 0000 = 0x8080 5213 */ 5214 5215 RegValue = 0x8080; 5216 5217 switch ( info->params.preamble_length ) { 5218 case HDLC_PREAMBLE_LENGTH_16BITS: RegValue |= BIT10; break; 5219 case HDLC_PREAMBLE_LENGTH_32BITS: RegValue |= BIT11; break; 5220 case HDLC_PREAMBLE_LENGTH_64BITS: RegValue |= BIT11 + BIT10; break; 5221 } 5222 5223 switch ( info->params.preamble ) { 5224 case HDLC_PREAMBLE_PATTERN_FLAGS: RegValue |= BIT8 + BIT12; break; 5225 case HDLC_PREAMBLE_PATTERN_ONES: RegValue |= BIT8; break; 5226 case HDLC_PREAMBLE_PATTERN_10: RegValue |= BIT9; break; 5227 case HDLC_PREAMBLE_PATTERN_01: RegValue |= BIT9 + BIT8; break; 5228 } 5229 5230 usc_OutReg( info, CCR, RegValue ); 5231 5232 5233 /* 5234 * Burst/Dwell Control Register 5235 * 5236 * <15..8> 0x20 Maximum number of transfers per bus grant 5237 * <7..0> 0x00 Maximum number of clock cycles per bus grant 5238 */ 5239 5240 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 5241 /* don't limit bus occupancy on PCI adapter */ 5242 usc_OutDmaReg( info, BDCR, 0x0000 ); 5243 } 5244 else 5245 usc_OutDmaReg( info, BDCR, 0x2000 ); 5246 5247 usc_stop_transmitter(info); 5248 usc_stop_receiver(info); 5249 5250} /* end of usc_set_sdlc_mode() */ 5251 5252/* usc_enable_loopback() 5253 * 5254 * Set the 16C32 for internal loopback mode. 5255 * The TxCLK and RxCLK signals are generated from the BRG0 and 5256 * the TxD is looped back to the RxD internally. 5257 * 5258 * Arguments: info pointer to device instance data 5259 * enable 1 = enable loopback, 0 = disable 5260 * Return Value: None 5261 */ 5262static void usc_enable_loopback(struct mgsl_struct *info, int enable) 5263{ 5264 if (enable) { 5265 /* blank external TXD output */ 5266 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) | (BIT7+BIT6)); 5267 5268 /* Clock mode Control Register (CMCR) 5269 * 5270 * <15..14> 00 counter 1 Disabled 5271 * <13..12> 00 counter 0 Disabled 5272 * <11..10> 11 BRG1 Input is TxC Pin 5273 * <9..8> 11 BRG0 Input is TxC Pin 5274 * <7..6> 01 DPLL Input is BRG1 Output 5275 * <5..3> 100 TxCLK comes from BRG0 5276 * <2..0> 100 RxCLK comes from BRG0 5277 * 5278 * 0000 1111 0110 0100 = 0x0f64 5279 */ 5280 5281 usc_OutReg( info, CMCR, 0x0f64 ); 5282 5283 /* Write 16-bit Time Constant for BRG0 */ 5284 /* use clock speed if available, otherwise use 8 for diagnostics */ 5285 if (info->params.clock_speed) { 5286 if (info->bus_type == MGSL_BUS_TYPE_PCI) 5287 usc_OutReg(info, TC0R, (u16)((11059200/info->params.clock_speed)-1)); 5288 else 5289 usc_OutReg(info, TC0R, (u16)((14745600/info->params.clock_speed)-1)); 5290 } else 5291 usc_OutReg(info, TC0R, (u16)8); 5292 5293 /* Hardware Configuration Register (HCR) Clear Bit 1, BRG0 5294 mode = Continuous Set Bit 0 to enable BRG0. */ 5295 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) ); 5296 5297 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */ 5298 usc_OutReg(info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004)); 5299 5300 /* set Internal Data loopback mode */ 5301 info->loopback_bits = 0x300; 5302 outw( 0x0300, info->io_base + CCAR ); 5303 } else { 5304 /* enable external TXD output */ 5305 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) & ~(BIT7+BIT6)); 5306 5307 /* clear Internal Data loopback mode */ 5308 info->loopback_bits = 0; 5309 outw( 0,info->io_base + CCAR ); 5310 } 5311 5312} /* end of usc_enable_loopback() */ 5313 5314/* usc_enable_aux_clock() 5315 * 5316 * Enabled the AUX clock output at the specified frequency. 5317 * 5318 * Arguments: 5319 * 5320 * info pointer to device extension 5321 * data_rate data rate of clock in bits per second 5322 * A data rate of 0 disables the AUX clock. 5323 * 5324 * Return Value: None 5325 */ 5326static void usc_enable_aux_clock( struct mgsl_struct *info, u32 data_rate ) 5327{ 5328 u32 XtalSpeed; 5329 u16 Tc; 5330 5331 if ( data_rate ) { 5332 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 5333 XtalSpeed = 11059200; 5334 else 5335 XtalSpeed = 14745600; 5336 5337 5338 /* Tc = (Xtal/Speed) - 1 */ 5339 /* If twice the remainder of (Xtal/Speed) is greater than Speed */ 5340 /* then rounding up gives a more precise time constant. Instead */ 5341 /* of rounding up and then subtracting 1 we just don't subtract */ 5342 /* the one in this case. */ 5343 5344 5345 Tc = (u16)(XtalSpeed/data_rate); 5346 if ( !(((XtalSpeed % data_rate) * 2) / data_rate) ) 5347 Tc--; 5348 5349 /* Write 16-bit Time Constant for BRG0 */ 5350 usc_OutReg( info, TC0R, Tc ); 5351 5352 /* 5353 * Hardware Configuration Register (HCR) 5354 * Clear Bit 1, BRG0 mode = Continuous 5355 * Set Bit 0 to enable BRG0. 5356 */ 5357 5358 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) ); 5359 5360 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */ 5361 usc_OutReg( info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) ); 5362 } else { 5363 /* data rate == 0 so turn off BRG0 */ 5364 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) ); 5365 } 5366 5367} /* end of usc_enable_aux_clock() */ 5368 5369/* 5370 * 5371 * usc_process_rxoverrun_sync() 5372 * 5373 * This function processes a receive overrun by resetting the 5374 * receive DMA buffers and issuing a Purge Rx FIFO command 5375 * to allow the receiver to continue receiving. 5376 * 5377 * Arguments: 5378 * 5379 * info pointer to device extension 5380 * 5381 * Return Value: None 5382 */ 5383static void usc_process_rxoverrun_sync( struct mgsl_struct *info ) 5384{ 5385 int start_index; 5386 int end_index; 5387 int frame_start_index; 5388 int start_of_frame_found = FALSE; 5389 int end_of_frame_found = FALSE; 5390 int reprogram_dma = FALSE; 5391 5392 DMABUFFERENTRY *buffer_list = info->rx_buffer_list; 5393 u32 phys_addr; 5394 5395 usc_DmaCmd( info, DmaCmd_PauseRxChannel ); 5396 usc_RCmd( info, RCmd_EnterHuntmode ); 5397 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 5398 5399 /* CurrentRxBuffer points to the 1st buffer of the next */ 5400 /* possibly available receive frame. */ 5401 5402 frame_start_index = start_index = end_index = info->current_rx_buffer; 5403 5404 /* Search for an unfinished string of buffers. This means */ 5405 /* that a receive frame started (at least one buffer with */ 5406 /* count set to zero) but there is no terminiting buffer */ 5407 /* (status set to non-zero). */ 5408 5409 while( !buffer_list[end_index].count ) 5410 { 5411 /* Count field has been reset to zero by 16C32. */ 5412 /* This buffer is currently in use. */ 5413 5414 if ( !start_of_frame_found ) 5415 { 5416 start_of_frame_found = TRUE; 5417 frame_start_index = end_index; 5418 end_of_frame_found = FALSE; 5419 } 5420 5421 if ( buffer_list[end_index].status ) 5422 { 5423 /* Status field has been set by 16C32. */ 5424 /* This is the last buffer of a received frame. */ 5425 5426 /* We want to leave the buffers for this frame intact. */ 5427 /* Move on to next possible frame. */ 5428 5429 start_of_frame_found = FALSE; 5430 end_of_frame_found = TRUE; 5431 } 5432 5433 /* advance to next buffer entry in linked list */ 5434 end_index++; 5435 if ( end_index == info->rx_buffer_count ) 5436 end_index = 0; 5437 5438 if ( start_index == end_index ) 5439 { 5440 /* The entire list has been searched with all Counts == 0 and */ 5441 /* all Status == 0. The receive buffers are */ 5442 /* completely screwed, reset all receive buffers! */ 5443 mgsl_reset_rx_dma_buffers( info ); 5444 frame_start_index = 0; 5445 start_of_frame_found = FALSE; 5446 reprogram_dma = TRUE; 5447 break; 5448 } 5449 } 5450 5451 if ( start_of_frame_found && !end_of_frame_found ) 5452 { 5453 /* There is an unfinished string of receive DMA buffers */ 5454 /* as a result of the receiver overrun. */ 5455 5456 /* Reset the buffers for the unfinished frame */ 5457 /* and reprogram the receive DMA controller to start */ 5458 /* at the 1st buffer of unfinished frame. */ 5459 5460 start_index = frame_start_index; 5461 5462 do 5463 { 5464 *((unsigned long *)&(info->rx_buffer_list[start_index++].count)) = DMABUFFERSIZE; 5465 5466 /* Adjust index for wrap around. */ 5467 if ( start_index == info->rx_buffer_count ) 5468 start_index = 0; 5469 5470 } while( start_index != end_index ); 5471 5472 reprogram_dma = TRUE; 5473 } 5474 5475 if ( reprogram_dma ) 5476 { 5477 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL); 5478 usc_ClearIrqPendingBits(info, RECEIVE_DATA|RECEIVE_STATUS); 5479 usc_UnlatchRxstatusBits(info, RECEIVE_DATA|RECEIVE_STATUS); 5480 5481 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL); 5482 5483 /* This empties the receive FIFO and loads the RCC with RCLR */ 5484 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) ); 5485 5486 /* program 16C32 with physical address of 1st DMA buffer entry */ 5487 phys_addr = info->rx_buffer_list[frame_start_index].phys_entry; 5488 usc_OutDmaReg( info, NRARL, (u16)phys_addr ); 5489 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) ); 5490 5491 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); 5492 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS ); 5493 usc_EnableInterrupts( info, RECEIVE_STATUS ); 5494 5495 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */ 5496 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */ 5497 5498 usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 ); 5499 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) ); 5500 usc_DmaCmd( info, DmaCmd_InitRxChannel ); 5501 if ( info->params.flags & HDLC_FLAG_AUTO_DCD ) 5502 usc_EnableReceiver(info,ENABLE_AUTO_DCD); 5503 else 5504 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL); 5505 } 5506 else 5507 { 5508 /* This empties the receive FIFO and loads the RCC with RCLR */ 5509 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) ); 5510 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 5511 } 5512 5513} /* end of usc_process_rxoverrun_sync() */ 5514 5515/* usc_stop_receiver() 5516 * 5517 * Disable USC receiver 5518 * 5519 * Arguments: info pointer to device instance data 5520 * Return Value: None 5521 */ 5522static void usc_stop_receiver( struct mgsl_struct *info ) 5523{ 5524 if (debug_level >= DEBUG_LEVEL_ISR) 5525 printk("%s(%d):usc_stop_receiver(%s)\n", 5526 __FILE__,__LINE__, info->device_name ); 5527 5528 /* Disable receive DMA channel. */ 5529 /* This also disables receive DMA channel interrupts */ 5530 usc_DmaCmd( info, DmaCmd_ResetRxChannel ); 5531 5532 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); 5533 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS ); 5534 usc_DisableInterrupts( info, RECEIVE_DATA + RECEIVE_STATUS ); 5535 5536 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL); 5537 5538 /* This empties the receive FIFO and loads the RCC with RCLR */ 5539 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) ); 5540 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 5541 5542 info->rx_enabled = 0; 5543 info->rx_overflow = 0; 5544 info->rx_rcc_underrun = 0; 5545 5546} /* end of stop_receiver() */ 5547 5548/* usc_start_receiver() 5549 * 5550 * Enable the USC receiver 5551 * 5552 * Arguments: info pointer to device instance data 5553 * Return Value: None 5554 */ 5555static void usc_start_receiver( struct mgsl_struct *info ) 5556{ 5557 u32 phys_addr; 5558 5559 if (debug_level >= DEBUG_LEVEL_ISR) 5560 printk("%s(%d):usc_start_receiver(%s)\n", 5561 __FILE__,__LINE__, info->device_name ); 5562 5563 mgsl_reset_rx_dma_buffers( info ); 5564 usc_stop_receiver( info ); 5565 5566 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) ); 5567 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 5568 5569 if ( info->params.mode == MGSL_MODE_HDLC || 5570 info->params.mode == MGSL_MODE_RAW ) { 5571 /* DMA mode Transfers */ 5572 /* Program the DMA controller. */ 5573 /* Enable the DMA controller end of buffer interrupt. */ 5574 5575 /* program 16C32 with physical address of 1st DMA buffer entry */ 5576 phys_addr = info->rx_buffer_list[0].phys_entry; 5577 usc_OutDmaReg( info, NRARL, (u16)phys_addr ); 5578 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) ); 5579 5580 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); 5581 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS ); 5582 usc_EnableInterrupts( info, RECEIVE_STATUS ); 5583 5584 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */ 5585 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */ 5586 5587 usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 ); 5588 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) ); 5589 usc_DmaCmd( info, DmaCmd_InitRxChannel ); 5590 if ( info->params.flags & HDLC_FLAG_AUTO_DCD ) 5591 usc_EnableReceiver(info,ENABLE_AUTO_DCD); 5592 else 5593 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL); 5594 } else { 5595 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL); 5596 usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS); 5597 usc_EnableInterrupts(info, RECEIVE_DATA); 5598 5599 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 5600 usc_RCmd( info, RCmd_EnterHuntmode ); 5601 5602 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL); 5603 } 5604 5605 usc_OutReg( info, CCSR, 0x1020 ); 5606 5607 info->rx_enabled = 1; 5608 5609} /* end of usc_start_receiver() */ 5610 5611/* usc_start_transmitter() 5612 * 5613 * Enable the USC transmitter and send a transmit frame if 5614 * one is loaded in the DMA buffers. 5615 * 5616 * Arguments: info pointer to device instance data 5617 * Return Value: None 5618 */ 5619static void usc_start_transmitter( struct mgsl_struct *info ) 5620{ 5621 u32 phys_addr; 5622 unsigned int FrameSize; 5623 5624 if (debug_level >= DEBUG_LEVEL_ISR) 5625 printk("%s(%d):usc_start_transmitter(%s)\n", 5626 __FILE__,__LINE__, info->device_name ); 5627 5628 if ( info->xmit_cnt ) { 5629 5630 /* If auto RTS enabled and RTS is inactive, then assert */ 5631 /* RTS and set a flag indicating that the driver should */ 5632 /* negate RTS when the transmission completes. */ 5633 5634 info->drop_rts_on_tx_done = 0; 5635 5636 if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) { 5637 usc_get_serial_signals( info ); 5638 if ( !(info->serial_signals & SerialSignal_RTS) ) { 5639 info->serial_signals |= SerialSignal_RTS; 5640 usc_set_serial_signals( info ); 5641 info->drop_rts_on_tx_done = 1; 5642 } 5643 } 5644 5645 5646 if ( info->params.mode == MGSL_MODE_ASYNC ) { 5647 if ( !info->tx_active ) { 5648 usc_UnlatchTxstatusBits(info, TXSTATUS_ALL); 5649 usc_ClearIrqPendingBits(info, TRANSMIT_STATUS + TRANSMIT_DATA); 5650 usc_EnableInterrupts(info, TRANSMIT_DATA); 5651 usc_load_txfifo(info); 5652 } 5653 } else { 5654 /* Disable transmit DMA controller while programming. */ 5655 usc_DmaCmd( info, DmaCmd_ResetTxChannel ); 5656 5657 /* Transmit DMA buffer is loaded, so program USC */ 5658 /* to send the frame contained in the buffers. */ 5659 5660 FrameSize = info->tx_buffer_list[info->start_tx_dma_buffer].rcc; 5661 5662 /* if operating in Raw sync mode, reset the rcc component 5663 * of the tx dma buffer entry, otherwise, the serial controller 5664 * will send a closing sync char after this count. 5665 */ 5666 if ( info->params.mode == MGSL_MODE_RAW ) 5667 info->tx_buffer_list[info->start_tx_dma_buffer].rcc = 0; 5668 5669 /* Program the Transmit Character Length Register (TCLR) */ 5670 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */ 5671 usc_OutReg( info, TCLR, (u16)FrameSize ); 5672 5673 usc_RTCmd( info, RTCmd_PurgeTxFifo ); 5674 5675 /* Program the address of the 1st DMA Buffer Entry in linked list */ 5676 phys_addr = info->tx_buffer_list[info->start_tx_dma_buffer].phys_entry; 5677 usc_OutDmaReg( info, NTARL, (u16)phys_addr ); 5678 usc_OutDmaReg( info, NTARU, (u16)(phys_addr >> 16) ); 5679 5680 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL ); 5681 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS ); 5682 usc_EnableInterrupts( info, TRANSMIT_STATUS ); 5683 5684 if ( info->params.mode == MGSL_MODE_RAW && 5685 info->num_tx_dma_buffers > 1 ) { 5686 /* When running external sync mode, attempt to 'stream' transmit */ 5687 /* by filling tx dma buffers as they become available. To do this */ 5688 /* we need to enable Tx DMA EOB Status interrupts : */ 5689 /* */ 5690 /* 1. Arm End of Buffer (EOB) Transmit DMA Interrupt (BIT2 of TDIAR) */ 5691 /* 2. Enable Transmit DMA Interrupts (BIT0 of DICR) */ 5692 5693 usc_OutDmaReg( info, TDIAR, BIT2|BIT3 ); 5694 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT0) ); 5695 } 5696 5697 /* Initialize Transmit DMA Channel */ 5698 usc_DmaCmd( info, DmaCmd_InitTxChannel ); 5699 5700 usc_TCmd( info, TCmd_SendFrame ); 5701 5702 mod_timer(&info->tx_timer, jiffies + 5703 msecs_to_jiffies(5000)); 5704 } 5705 info->tx_active = 1; 5706 } 5707 5708 if ( !info->tx_enabled ) { 5709 info->tx_enabled = 1; 5710 if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) 5711 usc_EnableTransmitter(info,ENABLE_AUTO_CTS); 5712 else 5713 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL); 5714 } 5715 5716} /* end of usc_start_transmitter() */ 5717 5718/* usc_stop_transmitter() 5719 * 5720 * Stops the transmitter and DMA 5721 * 5722 * Arguments: info pointer to device isntance data 5723 * Return Value: None 5724 */ 5725static void usc_stop_transmitter( struct mgsl_struct *info ) 5726{ 5727 if (debug_level >= DEBUG_LEVEL_ISR) 5728 printk("%s(%d):usc_stop_transmitter(%s)\n", 5729 __FILE__,__LINE__, info->device_name ); 5730 5731 del_timer(&info->tx_timer); 5732 5733 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL ); 5734 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA ); 5735 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA ); 5736 5737 usc_EnableTransmitter(info,DISABLE_UNCONDITIONAL); 5738 usc_DmaCmd( info, DmaCmd_ResetTxChannel ); 5739 usc_RTCmd( info, RTCmd_PurgeTxFifo ); 5740 5741 info->tx_enabled = 0; 5742 info->tx_active = 0; 5743 5744} /* end of usc_stop_transmitter() */ 5745 5746/* usc_load_txfifo() 5747 * 5748 * Fill the transmit FIFO until the FIFO is full or 5749 * there is no more data to load. 5750 * 5751 * Arguments: info pointer to device extension (instance data) 5752 * Return Value: None 5753 */ 5754static void usc_load_txfifo( struct mgsl_struct *info ) 5755{ 5756 int Fifocount; 5757 u8 TwoBytes[2]; 5758 5759 if ( !info->xmit_cnt && !info->x_char ) 5760 return; 5761 5762 /* Select transmit FIFO status readback in TICR */ 5763 usc_TCmd( info, TCmd_SelectTicrTxFifostatus ); 5764 5765 /* load the Transmit FIFO until FIFOs full or all data sent */ 5766 5767 while( (Fifocount = usc_InReg(info, TICR) >> 8) && info->xmit_cnt ) { 5768 /* there is more space in the transmit FIFO and */ 5769 /* there is more data in transmit buffer */ 5770 5771 if ( (info->xmit_cnt > 1) && (Fifocount > 1) && !info->x_char ) { 5772 /* write a 16-bit word from transmit buffer to 16C32 */ 5773 5774 TwoBytes[0] = info->xmit_buf[info->xmit_tail++]; 5775 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1); 5776 TwoBytes[1] = info->xmit_buf[info->xmit_tail++]; 5777 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1); 5778 5779 outw( *((u16 *)TwoBytes), info->io_base + DATAREG); 5780 5781 info->xmit_cnt -= 2; 5782 info->icount.tx += 2; 5783 } else { 5784 /* only 1 byte left to transmit or 1 FIFO slot left */ 5785 5786 outw( (inw( info->io_base + CCAR) & 0x0780) | (TDR+LSBONLY), 5787 info->io_base + CCAR ); 5788 5789 if (info->x_char) { 5790 /* transmit pending high priority char */ 5791 outw( info->x_char,info->io_base + CCAR ); 5792 info->x_char = 0; 5793 } else { 5794 outw( info->xmit_buf[info->xmit_tail++],info->io_base + CCAR ); 5795 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1); 5796 info->xmit_cnt--; 5797 } 5798 info->icount.tx++; 5799 } 5800 } 5801 5802} /* end of usc_load_txfifo() */ 5803 5804/* usc_reset() 5805 * 5806 * Reset the adapter to a known state and prepare it for further use. 5807 * 5808 * Arguments: info pointer to device instance data 5809 * Return Value: None 5810 */ 5811static void usc_reset( struct mgsl_struct *info ) 5812{ 5813 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 5814 int i; 5815 u32 readval; 5816 5817 /* Set BIT30 of Misc Control Register */ 5818 /* (Local Control Register 0x50) to force reset of USC. */ 5819 5820 volatile u32 *MiscCtrl = (u32 *)(info->lcr_base + 0x50); 5821 u32 *LCR0BRDR = (u32 *)(info->lcr_base + 0x28); 5822 5823 info->misc_ctrl_value |= BIT30; 5824 *MiscCtrl = info->misc_ctrl_value; 5825 5826 /* 5827 * Force at least 170ns delay before clearing 5828 * reset bit. Each read from LCR takes at least 5829 * 30ns so 10 times for 300ns to be safe. 5830 */ 5831 for(i=0;i<10;i++) 5832 readval = *MiscCtrl; 5833 5834 info->misc_ctrl_value &= ~BIT30; 5835 *MiscCtrl = info->misc_ctrl_value; 5836 5837 *LCR0BRDR = BUS_DESCRIPTOR( 5838 1, // Write Strobe Hold (0-3) 5839 2, // Write Strobe Delay (0-3) 5840 2, // Read Strobe Delay (0-3) 5841 0, // NWDD (Write data-data) (0-3) 5842 4, // NWAD (Write Addr-data) (0-31) 5843 0, // NXDA (Read/Write Data-Addr) (0-3) 5844 0, // NRDD (Read Data-Data) (0-3) 5845 5 // NRAD (Read Addr-Data) (0-31) 5846 ); 5847 } else { 5848 /* do HW reset */ 5849 outb( 0,info->io_base + 8 ); 5850 } 5851 5852 info->mbre_bit = 0; 5853 info->loopback_bits = 0; 5854 info->usc_idle_mode = 0; 5855 5856 /* 5857 * Program the Bus Configuration Register (BCR) 5858 * 5859 * <15> 0 Don't use separate address 5860 * <14..6> 0 reserved 5861 * <5..4> 00 IAckmode = Default, don't care 5862 * <3> 1 Bus Request Totem Pole output 5863 * <2> 1 Use 16 Bit data bus 5864 * <1> 0 IRQ Totem Pole output 5865 * <0> 0 Don't Shift Right Addr 5866 * 5867 * 0000 0000 0000 1100 = 0x000c 5868 * 5869 * By writing to io_base + SDPIN the Wait/Ack pin is 5870 * programmed to work as a Wait pin. 5871 */ 5872 5873 outw( 0x000c,info->io_base + SDPIN ); 5874 5875 5876 outw( 0,info->io_base ); 5877 outw( 0,info->io_base + CCAR ); 5878 5879 /* select little endian byte ordering */ 5880 usc_RTCmd( info, RTCmd_SelectLittleEndian ); 5881 5882 5883 /* Port Control Register (PCR) 5884 * 5885 * <15..14> 11 Port 7 is Output (~DMAEN, Bit 14 : 0 = Enabled) 5886 * <13..12> 11 Port 6 is Output (~INTEN, Bit 12 : 0 = Enabled) 5887 * <11..10> 00 Port 5 is Input (No Connect, Don't Care) 5888 * <9..8> 00 Port 4 is Input (No Connect, Don't Care) 5889 * <7..6> 11 Port 3 is Output (~RTS, Bit 6 : 0 = Enabled ) 5890 * <5..4> 11 Port 2 is Output (~DTR, Bit 4 : 0 = Enabled ) 5891 * <3..2> 01 Port 1 is Input (Dedicated RxC) 5892 * <1..0> 01 Port 0 is Input (Dedicated TxC) 5893 * 5894 * 1111 0000 1111 0101 = 0xf0f5 5895 */ 5896 5897 usc_OutReg( info, PCR, 0xf0f5 ); 5898 5899 5900 /* 5901 * Input/Output Control Register 5902 * 5903 * <15..14> 00 CTS is active low input 5904 * <13..12> 00 DCD is active low input 5905 * <11..10> 00 TxREQ pin is input (DSR) 5906 * <9..8> 00 RxREQ pin is input (RI) 5907 * <7..6> 00 TxD is output (Transmit Data) 5908 * <5..3> 000 TxC Pin in Input (14.7456MHz Clock) 5909 * <2..0> 100 RxC is Output (drive with BRG0) 5910 * 5911 * 0000 0000 0000 0100 = 0x0004 5912 */ 5913 5914 usc_OutReg( info, IOCR, 0x0004 ); 5915 5916} /* end of usc_reset() */ 5917 5918/* usc_set_async_mode() 5919 * 5920 * Program adapter for asynchronous communications. 5921 * 5922 * Arguments: info pointer to device instance data 5923 * Return Value: None 5924 */ 5925static void usc_set_async_mode( struct mgsl_struct *info ) 5926{ 5927 u16 RegValue; 5928 5929 /* disable interrupts while programming USC */ 5930 usc_DisableMasterIrqBit( info ); 5931 5932 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */ 5933 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */ 5934 5935 usc_loopback_frame( info ); 5936 5937 /* Channel mode Register (CMR) 5938 * 5939 * <15..14> 00 Tx Sub modes, 00 = 1 Stop Bit 5940 * <13..12> 00 00 = 16X Clock 5941 * <11..8> 0000 Transmitter mode = Asynchronous 5942 * <7..6> 00 reserved? 5943 * <5..4> 00 Rx Sub modes, 00 = 16X Clock 5944 * <3..0> 0000 Receiver mode = Asynchronous 5945 * 5946 * 0000 0000 0000 0000 = 0x0 5947 */ 5948 5949 RegValue = 0; 5950 if ( info->params.stop_bits != 1 ) 5951 RegValue |= BIT14; 5952 usc_OutReg( info, CMR, RegValue ); 5953 5954 5955 /* Receiver mode Register (RMR) 5956 * 5957 * <15..13> 000 encoding = None 5958 * <12..08> 00000 reserved (Sync Only) 5959 * <7..6> 00 Even parity 5960 * <5> 0 parity disabled 5961 * <4..2> 000 Receive Char Length = 8 bits 5962 * <1..0> 00 Disable Receiver 5963 * 5964 * 0000 0000 0000 0000 = 0x0 5965 */ 5966 5967 RegValue = 0; 5968 5969 if ( info->params.data_bits != 8 ) 5970 RegValue |= BIT4+BIT3+BIT2; 5971 5972 if ( info->params.parity != ASYNC_PARITY_NONE ) { 5973 RegValue |= BIT5; 5974 if ( info->params.parity != ASYNC_PARITY_ODD ) 5975 RegValue |= BIT6; 5976 } 5977 5978 usc_OutReg( info, RMR, RegValue ); 5979 5980 5981 /* Set IRQ trigger level */ 5982 5983 usc_RCmd( info, RCmd_SelectRicrIntLevel ); 5984 5985 5986 /* Receive Interrupt Control Register (RICR) 5987 * 5988 * <15..8> ? RxFIFO IRQ Request Level 5989 * 5990 * Note: For async mode the receive FIFO level must be set 5991 * to 0 to avoid the situation where the FIFO contains fewer bytes 5992 * than the trigger level and no more data is expected. 5993 * 5994 * <7> 0 Exited Hunt IA (Interrupt Arm) 5995 * <6> 0 Idle Received IA 5996 * <5> 0 Break/Abort IA 5997 * <4> 0 Rx Bound IA 5998 * <3> 0 Queued status reflects oldest byte in FIFO 5999 * <2> 0 Abort/PE IA 6000 * <1> 0 Rx Overrun IA 6001 * <0> 0 Select TC0 value for readback 6002 * 6003 * 0000 0000 0100 0000 = 0x0000 + (FIFOLEVEL in MSB) 6004 */ 6005 6006 usc_OutReg( info, RICR, 0x0000 ); 6007 6008 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); 6009 usc_ClearIrqPendingBits( info, RECEIVE_STATUS ); 6010 6011 6012 /* Transmit mode Register (TMR) 6013 * 6014 * <15..13> 000 encoding = None 6015 * <12..08> 00000 reserved (Sync Only) 6016 * <7..6> 00 Transmit parity Even 6017 * <5> 0 Transmit parity Disabled 6018 * <4..2> 000 Tx Char Length = 8 bits 6019 * <1..0> 00 Disable Transmitter 6020 * 6021 * 0000 0000 0000 0000 = 0x0 6022 */ 6023 6024 RegValue = 0; 6025 6026 if ( info->params.data_bits != 8 ) 6027 RegValue |= BIT4+BIT3+BIT2; 6028 6029 if ( info->params.parity != ASYNC_PARITY_NONE ) { 6030 RegValue |= BIT5; 6031 if ( info->params.parity != ASYNC_PARITY_ODD ) 6032 RegValue |= BIT6; 6033 } 6034 6035 usc_OutReg( info, TMR, RegValue ); 6036 6037 usc_set_txidle( info ); 6038 6039 6040 /* Set IRQ trigger level */ 6041 6042 usc_TCmd( info, TCmd_SelectTicrIntLevel ); 6043 6044 6045 /* Transmit Interrupt Control Register (TICR) 6046 * 6047 * <15..8> ? Transmit FIFO IRQ Level 6048 * <7> 0 Present IA (Interrupt Arm) 6049 * <6> 1 Idle Sent IA 6050 * <5> 0 Abort Sent IA 6051 * <4> 0 EOF/EOM Sent IA 6052 * <3> 0 CRC Sent IA 6053 * <2> 0 1 = Wait for SW Trigger to Start Frame 6054 * <1> 0 Tx Underrun IA 6055 * <0> 0 TC0 constant on read back 6056 * 6057 * 0000 0000 0100 0000 = 0x0040 6058 */ 6059 6060 usc_OutReg( info, TICR, 0x1f40 ); 6061 6062 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL ); 6063 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS ); 6064 6065 usc_enable_async_clock( info, info->params.data_rate ); 6066 6067 6068 /* Channel Control/status Register (CCSR) 6069 * 6070 * <15> X RCC FIFO Overflow status (RO) 6071 * <14> X RCC FIFO Not Empty status (RO) 6072 * <13> 0 1 = Clear RCC FIFO (WO) 6073 * <12> X DPLL in Sync status (RO) 6074 * <11> X DPLL 2 Missed Clocks status (RO) 6075 * <10> X DPLL 1 Missed Clock status (RO) 6076 * <9..8> 00 DPLL Resync on rising and falling edges (RW) 6077 * <7> X SDLC Loop On status (RO) 6078 * <6> X SDLC Loop Send status (RO) 6079 * <5> 1 Bypass counters for TxClk and RxClk (RW) 6080 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW) 6081 * <1..0> 00 reserved 6082 * 6083 * 0000 0000 0010 0000 = 0x0020 6084 */ 6085 6086 usc_OutReg( info, CCSR, 0x0020 ); 6087 6088 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA + 6089 RECEIVE_DATA + RECEIVE_STATUS ); 6090 6091 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA + 6092 RECEIVE_DATA + RECEIVE_STATUS ); 6093 6094 usc_EnableMasterIrqBit( info ); 6095 6096 if (info->bus_type == MGSL_BUS_TYPE_ISA) { 6097 /* Enable INTEN (Port 6, Bit12) */ 6098 /* This connects the IRQ request signal to the ISA bus */ 6099 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12)); 6100 } 6101 6102 if (info->params.loopback) { 6103 info->loopback_bits = 0x300; 6104 outw(0x0300, info->io_base + CCAR); 6105 } 6106 6107} /* end of usc_set_async_mode() */ 6108 6109/* usc_loopback_frame() 6110 * 6111 * Loop back a small (2 byte) dummy SDLC frame. 6112 * Interrupts and DMA are NOT used. The purpose of this is to 6113 * clear any 'stale' status info left over from running in async mode. 6114 * 6115 * The 16C32 shows the strange behaviour of marking the 1st 6116 * received SDLC frame with a CRC error even when there is no 6117 * CRC error. To get around this a small dummy from of 2 bytes 6118 * is looped back when switching from async to sync mode. 6119 * 6120 * Arguments: info pointer to device instance data 6121 * Return Value: None 6122 */ 6123static void usc_loopback_frame( struct mgsl_struct *info ) 6124{ 6125 int i; 6126 unsigned long oldmode = info->params.mode; 6127 6128 info->params.mode = MGSL_MODE_HDLC; 6129 6130 usc_DisableMasterIrqBit( info ); 6131 6132 usc_set_sdlc_mode( info ); 6133 usc_enable_loopback( info, 1 ); 6134 6135 /* Write 16-bit Time Constant for BRG0 */ 6136 usc_OutReg( info, TC0R, 0 ); 6137 6138 /* Channel Control Register (CCR) 6139 * 6140 * <15..14> 00 Don't use 32-bit Tx Control Blocks (TCBs) 6141 * <13> 0 Trigger Tx on SW Command Disabled 6142 * <12> 0 Flag Preamble Disabled 6143 * <11..10> 00 Preamble Length = 8-Bits 6144 * <9..8> 01 Preamble Pattern = flags 6145 * <7..6> 10 Don't use 32-bit Rx status Blocks (RSBs) 6146 * <5> 0 Trigger Rx on SW Command Disabled 6147 * <4..0> 0 reserved 6148 * 6149 * 0000 0001 0000 0000 = 0x0100 6150 */ 6151 6152 usc_OutReg( info, CCR, 0x0100 ); 6153 6154 /* SETUP RECEIVER */ 6155 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 6156 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL); 6157 6158 /* SETUP TRANSMITTER */ 6159 /* Program the Transmit Character Length Register (TCLR) */ 6160 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */ 6161 usc_OutReg( info, TCLR, 2 ); 6162 usc_RTCmd( info, RTCmd_PurgeTxFifo ); 6163 6164 /* unlatch Tx status bits, and start transmit channel. */ 6165 usc_UnlatchTxstatusBits(info,TXSTATUS_ALL); 6166 outw(0,info->io_base + DATAREG); 6167 6168 /* ENABLE TRANSMITTER */ 6169 usc_TCmd( info, TCmd_SendFrame ); 6170 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL); 6171 6172 /* WAIT FOR RECEIVE COMPLETE */ 6173 for (i=0 ; i<1000 ; i++) 6174 if (usc_InReg( info, RCSR ) & (BIT8 + BIT4 + BIT3 + BIT1)) 6175 break; 6176 6177 /* clear Internal Data loopback mode */ 6178 usc_enable_loopback(info, 0); 6179 6180 usc_EnableMasterIrqBit(info); 6181 6182 info->params.mode = oldmode; 6183 6184} /* end of usc_loopback_frame() */ 6185 6186/* usc_set_sync_mode() Programs the USC for SDLC communications. 6187 * 6188 * Arguments: info pointer to adapter info structure 6189 * Return Value: None 6190 */ 6191static void usc_set_sync_mode( struct mgsl_struct *info ) 6192{ 6193 usc_loopback_frame( info ); 6194 usc_set_sdlc_mode( info ); 6195 6196 if (info->bus_type == MGSL_BUS_TYPE_ISA) { 6197 /* Enable INTEN (Port 6, Bit12) */ 6198 /* This connects the IRQ request signal to the ISA bus */ 6199 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12)); 6200 } 6201 6202 usc_enable_aux_clock(info, info->params.clock_speed); 6203 6204 if (info->params.loopback) 6205 usc_enable_loopback(info,1); 6206 6207} /* end of mgsl_set_sync_mode() */ 6208 6209/* usc_set_txidle() Set the HDLC idle mode for the transmitter. 6210 * 6211 * Arguments: info pointer to device instance data 6212 * Return Value: None 6213 */ 6214static void usc_set_txidle( struct mgsl_struct *info ) 6215{ 6216 u16 usc_idle_mode = IDLEMODE_FLAGS; 6217 6218 /* Map API idle mode to USC register bits */ 6219 6220 switch( info->idle_mode ){ 6221 case HDLC_TXIDLE_FLAGS: usc_idle_mode = IDLEMODE_FLAGS; break; 6222 case HDLC_TXIDLE_ALT_ZEROS_ONES: usc_idle_mode = IDLEMODE_ALT_ONE_ZERO; break; 6223 case HDLC_TXIDLE_ZEROS: usc_idle_mode = IDLEMODE_ZERO; break; 6224 case HDLC_TXIDLE_ONES: usc_idle_mode = IDLEMODE_ONE; break; 6225 case HDLC_TXIDLE_ALT_MARK_SPACE: usc_idle_mode = IDLEMODE_ALT_MARK_SPACE; break; 6226 case HDLC_TXIDLE_SPACE: usc_idle_mode = IDLEMODE_SPACE; break; 6227 case HDLC_TXIDLE_MARK: usc_idle_mode = IDLEMODE_MARK; break; 6228 } 6229 6230 info->usc_idle_mode = usc_idle_mode; 6231 //usc_OutReg(info, TCSR, usc_idle_mode); 6232 info->tcsr_value &= ~IDLEMODE_MASK; /* clear idle mode bits */ 6233 info->tcsr_value += usc_idle_mode; 6234 usc_OutReg(info, TCSR, info->tcsr_value); 6235 6236 /* 6237 * if SyncLink WAN adapter is running in external sync mode, the 6238 * transmitter has been set to Monosync in order to try to mimic 6239 * a true raw outbound bit stream. Monosync still sends an open/close 6240 * sync char at the start/end of a frame. Try to match those sync 6241 * patterns to the idle mode set here 6242 */ 6243 if ( info->params.mode == MGSL_MODE_RAW ) { 6244 unsigned char syncpat = 0; 6245 switch( info->idle_mode ) { 6246 case HDLC_TXIDLE_FLAGS: 6247 syncpat = 0x7e; 6248 break; 6249 case HDLC_TXIDLE_ALT_ZEROS_ONES: 6250 syncpat = 0x55; 6251 break; 6252 case HDLC_TXIDLE_ZEROS: 6253 case HDLC_TXIDLE_SPACE: 6254 syncpat = 0x00; 6255 break; 6256 case HDLC_TXIDLE_ONES: 6257 case HDLC_TXIDLE_MARK: 6258 syncpat = 0xff; 6259 break; 6260 case HDLC_TXIDLE_ALT_MARK_SPACE: 6261 syncpat = 0xaa; 6262 break; 6263 } 6264 6265 usc_SetTransmitSyncChars(info,syncpat,syncpat); 6266 } 6267 6268} /* end of usc_set_txidle() */ 6269 6270/* usc_get_serial_signals() 6271 * 6272 * Query the adapter for the state of the V24 status (input) signals. 6273 * 6274 * Arguments: info pointer to device instance data 6275 * Return Value: None 6276 */ 6277static void usc_get_serial_signals( struct mgsl_struct *info ) 6278{ 6279 u16 status; 6280 6281 /* clear all serial signals except DTR and RTS */ 6282 info->serial_signals &= SerialSignal_DTR + SerialSignal_RTS; 6283 6284 /* Read the Misc Interrupt status Register (MISR) to get */ 6285 /* the V24 status signals. */ 6286 6287 status = usc_InReg( info, MISR ); 6288 6289 /* set serial signal bits to reflect MISR */ 6290 6291 if ( status & MISCSTATUS_CTS ) 6292 info->serial_signals |= SerialSignal_CTS; 6293 6294 if ( status & MISCSTATUS_DCD ) 6295 info->serial_signals |= SerialSignal_DCD; 6296 6297 if ( status & MISCSTATUS_RI ) 6298 info->serial_signals |= SerialSignal_RI; 6299 6300 if ( status & MISCSTATUS_DSR ) 6301 info->serial_signals |= SerialSignal_DSR; 6302 6303} /* end of usc_get_serial_signals() */ 6304 6305/* usc_set_serial_signals() 6306 * 6307 * Set the state of DTR and RTS based on contents of 6308 * serial_signals member of device extension. 6309 * 6310 * Arguments: info pointer to device instance data 6311 * Return Value: None 6312 */ 6313static void usc_set_serial_signals( struct mgsl_struct *info ) 6314{ 6315 u16 Control; 6316 unsigned char V24Out = info->serial_signals; 6317 6318 /* get the current value of the Port Control Register (PCR) */ 6319 6320 Control = usc_InReg( info, PCR ); 6321 6322 if ( V24Out & SerialSignal_RTS ) 6323 Control &= ~(BIT6); 6324 else 6325 Control |= BIT6; 6326 6327 if ( V24Out & SerialSignal_DTR ) 6328 Control &= ~(BIT4); 6329 else 6330 Control |= BIT4; 6331 6332 usc_OutReg( info, PCR, Control ); 6333 6334} /* end of usc_set_serial_signals() */ 6335 6336/* usc_enable_async_clock() 6337 * 6338 * Enable the async clock at the specified frequency. 6339 * 6340 * Arguments: info pointer to device instance data 6341 * data_rate data rate of clock in bps 6342 * 0 disables the AUX clock. 6343 * Return Value: None 6344 */ 6345static void usc_enable_async_clock( struct mgsl_struct *info, u32 data_rate ) 6346{ 6347 if ( data_rate ) { 6348 /* 6349 * Clock mode Control Register (CMCR) 6350 * 6351 * <15..14> 00 counter 1 Disabled 6352 * <13..12> 00 counter 0 Disabled 6353 * <11..10> 11 BRG1 Input is TxC Pin 6354 * <9..8> 11 BRG0 Input is TxC Pin 6355 * <7..6> 01 DPLL Input is BRG1 Output 6356 * <5..3> 100 TxCLK comes from BRG0 6357 * <2..0> 100 RxCLK comes from BRG0 6358 * 6359 * 0000 1111 0110 0100 = 0x0f64 6360 */ 6361 6362 usc_OutReg( info, CMCR, 0x0f64 ); 6363 6364 6365 /* 6366 * Write 16-bit Time Constant for BRG0 6367 * Time Constant = (ClkSpeed / data_rate) - 1 6368 * ClkSpeed = 921600 (ISA), 691200 (PCI) 6369 */ 6370 6371 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 6372 usc_OutReg( info, TC0R, (u16)((691200/data_rate) - 1) ); 6373 else 6374 usc_OutReg( info, TC0R, (u16)((921600/data_rate) - 1) ); 6375 6376 6377 /* 6378 * Hardware Configuration Register (HCR) 6379 * Clear Bit 1, BRG0 mode = Continuous 6380 * Set Bit 0 to enable BRG0. 6381 */ 6382 6383 usc_OutReg( info, HCR, 6384 (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) ); 6385 6386 6387 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */ 6388 6389 usc_OutReg( info, IOCR, 6390 (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) ); 6391 } else { 6392 /* data rate == 0 so turn off BRG0 */ 6393 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) ); 6394 } 6395 6396} /* end of usc_enable_async_clock() */ 6397 6398/* 6399 * Buffer Structures: 6400 * 6401 * Normal memory access uses virtual addresses that can make discontiguous 6402 * physical memory pages appear to be contiguous in the virtual address 6403 * space (the processors memory mapping handles the conversions). 6404 * 6405 * DMA transfers require physically contiguous memory. This is because 6406 * the DMA system controller and DMA bus masters deal with memory using 6407 * only physical addresses. 6408 * 6409 * This causes a problem under Windows NT when large DMA buffers are 6410 * needed. Fragmentation of the nonpaged pool prevents allocations of 6411 * physically contiguous buffers larger than the PAGE_SIZE. 6412 * 6413 * However the 16C32 supports Bus Master Scatter/Gather DMA which 6414 * allows DMA transfers to physically discontiguous buffers. Information 6415 * about each data transfer buffer is contained in a memory structure 6416 * called a 'buffer entry'. A list of buffer entries is maintained 6417 * to track and control the use of the data transfer buffers. 6418 * 6419 * To support this strategy we will allocate sufficient PAGE_SIZE 6420 * contiguous memory buffers to allow for the total required buffer 6421 * space. 6422 * 6423 * The 16C32 accesses the list of buffer entries using Bus Master 6424 * DMA. Control information is read from the buffer entries by the 6425 * 16C32 to control data transfers. status information is written to 6426 * the buffer entries by the 16C32 to indicate the status of completed 6427 * transfers. 6428 * 6429 * The CPU writes control information to the buffer entries to control 6430 * the 16C32 and reads status information from the buffer entries to 6431 * determine information about received and transmitted frames. 6432 * 6433 * Because the CPU and 16C32 (adapter) both need simultaneous access 6434 * to the buffer entries, the buffer entry memory is allocated with 6435 * HalAllocateCommonBuffer(). This restricts the size of the buffer 6436 * entry list to PAGE_SIZE. 6437 * 6438 * The actual data buffers on the other hand will only be accessed 6439 * by the CPU or the adapter but not by both simultaneously. This allows 6440 * Scatter/Gather packet based DMA procedures for using physically 6441 * discontiguous pages. 6442 */ 6443 6444/* 6445 * mgsl_reset_tx_dma_buffers() 6446 * 6447 * Set the count for all transmit buffers to 0 to indicate the 6448 * buffer is available for use and set the current buffer to the 6449 * first buffer. This effectively makes all buffers free and 6450 * discards any data in buffers. 6451 * 6452 * Arguments: info pointer to device instance data 6453 * Return Value: None 6454 */ 6455static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info ) 6456{ 6457 unsigned int i; 6458 6459 for ( i = 0; i < info->tx_buffer_count; i++ ) { 6460 *((unsigned long *)&(info->tx_buffer_list[i].count)) = 0; 6461 } 6462 6463 info->current_tx_buffer = 0; 6464 info->start_tx_dma_buffer = 0; 6465 info->tx_dma_buffers_used = 0; 6466 6467 info->get_tx_holding_index = 0; 6468 info->put_tx_holding_index = 0; 6469 info->tx_holding_count = 0; 6470 6471} /* end of mgsl_reset_tx_dma_buffers() */ 6472 6473/* 6474 * num_free_tx_dma_buffers() 6475 * 6476 * returns the number of free tx dma buffers available 6477 * 6478 * Arguments: info pointer to device instance data 6479 * Return Value: number of free tx dma buffers 6480 */ 6481static int num_free_tx_dma_buffers(struct mgsl_struct *info) 6482{ 6483 return info->tx_buffer_count - info->tx_dma_buffers_used; 6484} 6485 6486/* 6487 * mgsl_reset_rx_dma_buffers() 6488 * 6489 * Set the count for all receive buffers to DMABUFFERSIZE 6490 * and set the current buffer to the first buffer. This effectively 6491 * makes all buffers free and discards any data in buffers. 6492 * 6493 * Arguments: info pointer to device instance data 6494 * Return Value: None 6495 */ 6496static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info ) 6497{ 6498 unsigned int i; 6499 6500 for ( i = 0; i < info->rx_buffer_count; i++ ) { 6501 *((unsigned long *)&(info->rx_buffer_list[i].count)) = DMABUFFERSIZE; 6502// info->rx_buffer_list[i].count = DMABUFFERSIZE; 6503// info->rx_buffer_list[i].status = 0; 6504 } 6505 6506 info->current_rx_buffer = 0; 6507 6508} /* end of mgsl_reset_rx_dma_buffers() */ 6509 6510/* 6511 * mgsl_free_rx_frame_buffers() 6512 * 6513 * Free the receive buffers used by a received SDLC 6514 * frame such that the buffers can be reused. 6515 * 6516 * Arguments: 6517 * 6518 * info pointer to device instance data 6519 * StartIndex index of 1st receive buffer of frame 6520 * EndIndex index of last receive buffer of frame 6521 * 6522 * Return Value: None 6523 */ 6524static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex ) 6525{ 6526 int Done = 0; 6527 DMABUFFERENTRY *pBufEntry; 6528 unsigned int Index; 6529 6530 /* Starting with 1st buffer entry of the frame clear the status */ 6531 /* field and set the count field to DMA Buffer Size. */ 6532 6533 Index = StartIndex; 6534 6535 while( !Done ) { 6536 pBufEntry = &(info->rx_buffer_list[Index]); 6537 6538 if ( Index == EndIndex ) { 6539 /* This is the last buffer of the frame! */ 6540 Done = 1; 6541 } 6542 6543 /* reset current buffer for reuse */ 6544// pBufEntry->status = 0; 6545// pBufEntry->count = DMABUFFERSIZE; 6546 *((unsigned long *)&(pBufEntry->count)) = DMABUFFERSIZE; 6547 6548 /* advance to next buffer entry in linked list */ 6549 Index++; 6550 if ( Index == info->rx_buffer_count ) 6551 Index = 0; 6552 } 6553 6554 /* set current buffer to next buffer after last buffer of frame */ 6555 info->current_rx_buffer = Index; 6556 6557} /* end of free_rx_frame_buffers() */ 6558 6559/* mgsl_get_rx_frame() 6560 * 6561 * This function attempts to return a received SDLC frame from the 6562 * receive DMA buffers. Only frames received without errors are returned. 6563 * 6564 * Arguments: info pointer to device extension 6565 * Return Value: 1 if frame returned, otherwise 0 6566 */ 6567static int mgsl_get_rx_frame(struct mgsl_struct *info) 6568{ 6569 unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */ 6570 unsigned short status; 6571 DMABUFFERENTRY *pBufEntry; 6572 unsigned int framesize = 0; 6573 int ReturnCode = 0; 6574 unsigned long flags; 6575 struct tty_struct *tty = info->tty; 6576 int return_frame = 0; 6577 6578 /* 6579 * current_rx_buffer points to the 1st buffer of the next available 6580 * receive frame. To find the last buffer of the frame look for 6581 * a non-zero status field in the buffer entries. (The status 6582 * field is set by the 16C32 after completing a receive frame. 6583 */ 6584 6585 StartIndex = EndIndex = info->current_rx_buffer; 6586 6587 while( !info->rx_buffer_list[EndIndex].status ) { 6588 /* 6589 * If the count field of the buffer entry is non-zero then 6590 * this buffer has not been used. (The 16C32 clears the count 6591 * field when it starts using the buffer.) If an unused buffer 6592 * is encountered then there are no frames available. 6593 */ 6594 6595 if ( info->rx_buffer_list[EndIndex].count ) 6596 goto Cleanup; 6597 6598 /* advance to next buffer entry in linked list */ 6599 EndIndex++; 6600 if ( EndIndex == info->rx_buffer_count ) 6601 EndIndex = 0; 6602 6603 /* if entire list searched then no frame available */ 6604 if ( EndIndex == StartIndex ) { 6605 /* If this occurs then something bad happened, 6606 * all buffers have been 'used' but none mark 6607 * the end of a frame. Reset buffers and receiver. 6608 */ 6609 6610 if ( info->rx_enabled ){ 6611 spin_lock_irqsave(&info->irq_spinlock,flags); 6612 usc_start_receiver(info); 6613 spin_unlock_irqrestore(&info->irq_spinlock,flags); 6614 } 6615 goto Cleanup; 6616 } 6617 } 6618 6619 6620 /* check status of receive frame */ 6621 6622 status = info->rx_buffer_list[EndIndex].status; 6623 6624 if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN + 6625 RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) { 6626 if ( status & RXSTATUS_SHORT_FRAME ) 6627 info->icount.rxshort++; 6628 else if ( status & RXSTATUS_ABORT ) 6629 info->icount.rxabort++; 6630 else if ( status & RXSTATUS_OVERRUN ) 6631 info->icount.rxover++; 6632 else { 6633 info->icount.rxcrc++; 6634 if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) 6635 return_frame = 1; 6636 } 6637 framesize = 0; 6638#if SYNCLINK_GENERIC_HDLC 6639 { 6640 struct net_device_stats *stats = hdlc_stats(info->netdev); 6641 stats->rx_errors++; 6642 stats->rx_frame_errors++; 6643 } 6644#endif 6645 } else 6646 return_frame = 1; 6647 6648 if ( return_frame ) { 6649 /* receive frame has no errors, get frame size. 6650 * The frame size is the starting value of the RCC (which was 6651 * set to 0xffff) minus the ending value of the RCC (decremented 6652 * once for each receive character) minus 2 for the 16-bit CRC. 6653 */ 6654 6655 framesize = RCLRVALUE - info->rx_buffer_list[EndIndex].rcc; 6656 6657 /* adjust frame size for CRC if any */ 6658 if ( info->params.crc_type == HDLC_CRC_16_CCITT ) 6659 framesize -= 2; 6660 else if ( info->params.crc_type == HDLC_CRC_32_CCITT ) 6661 framesize -= 4; 6662 } 6663 6664 if ( debug_level >= DEBUG_LEVEL_BH ) 6665 printk("%s(%d):mgsl_get_rx_frame(%s) status=%04X size=%d\n", 6666 __FILE__,__LINE__,info->device_name,status,framesize); 6667 6668 if ( debug_level >= DEBUG_LEVEL_DATA ) 6669 mgsl_trace_block(info,info->rx_buffer_list[StartIndex].virt_addr, 6670 min_t(int, framesize, DMABUFFERSIZE),0); 6671 6672 if (framesize) { 6673 if ( ( (info->params.crc_type & HDLC_CRC_RETURN_EX) && 6674 ((framesize+1) > info->max_frame_size) ) || 6675 (framesize > info->max_frame_size) ) 6676 info->icount.rxlong++; 6677 else { 6678 /* copy dma buffer(s) to contiguous intermediate buffer */ 6679 int copy_count = framesize; 6680 int index = StartIndex; 6681 unsigned char *ptmp = info->intermediate_rxbuffer; 6682 6683 if ( !(status & RXSTATUS_CRC_ERROR)) 6684 info->icount.rxok++; 6685 6686 while(copy_count) { 6687 int partial_count; 6688 if ( copy_count > DMABUFFERSIZE ) 6689 partial_count = DMABUFFERSIZE; 6690 else 6691 partial_count = copy_count; 6692 6693 pBufEntry = &(info->rx_buffer_list[index]); 6694 memcpy( ptmp, pBufEntry->virt_addr, partial_count ); 6695 ptmp += partial_count; 6696 copy_count -= partial_count; 6697 6698 if ( ++index == info->rx_buffer_count ) 6699 index = 0; 6700 } 6701 6702 if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) { 6703 ++framesize; 6704 *ptmp = (status & RXSTATUS_CRC_ERROR ? 6705 RX_CRC_ERROR : 6706 RX_OK); 6707 6708 if ( debug_level >= DEBUG_LEVEL_DATA ) 6709 printk("%s(%d):mgsl_get_rx_frame(%s) rx frame status=%d\n", 6710 __FILE__,__LINE__,info->device_name, 6711 *ptmp); 6712 } 6713 6714#if SYNCLINK_GENERIC_HDLC 6715 if (info->netcount) 6716 hdlcdev_rx(info,info->intermediate_rxbuffer,framesize); 6717 else 6718#endif 6719 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize); 6720 } 6721 } 6722 /* Free the buffers used by this frame. */ 6723 mgsl_free_rx_frame_buffers( info, StartIndex, EndIndex ); 6724 6725 ReturnCode = 1; 6726 6727Cleanup: 6728 6729 if ( info->rx_enabled && info->rx_overflow ) { 6730 /* The receiver needs to restarted because of 6731 * a receive overflow (buffer or FIFO). If the 6732 * receive buffers are now empty, then restart receiver. 6733 */ 6734 6735 if ( !info->rx_buffer_list[EndIndex].status && 6736 info->rx_buffer_list[EndIndex].count ) { 6737 spin_lock_irqsave(&info->irq_spinlock,flags); 6738 usc_start_receiver(info); 6739 spin_unlock_irqrestore(&info->irq_spinlock,flags); 6740 } 6741 } 6742 6743 return ReturnCode; 6744 6745} /* end of mgsl_get_rx_frame() */ 6746 6747/* mgsl_get_raw_rx_frame() 6748 * 6749 * This function attempts to return a received frame from the 6750 * receive DMA buffers when running in external loop mode. In this mode, 6751 * we will return at most one DMABUFFERSIZE frame to the application. 6752 * The USC receiver is triggering off of DCD going active to start a new 6753 * frame, and DCD going inactive to terminate the frame (similar to 6754 * processing a closing flag character). 6755 * 6756 * In this routine, we will return DMABUFFERSIZE "chunks" at a time. 6757 * If DCD goes inactive, the last Rx DMA Buffer will have a non-zero 6758 * status field and the RCC field will indicate the length of the 6759 * entire received frame. We take this RCC field and get the modulus 6760 * of RCC and DMABUFFERSIZE to determine if number of bytes in the 6761 * last Rx DMA buffer and return that last portion of the frame. 6762 * 6763 * Arguments: info pointer to device extension 6764 * Return Value: 1 if frame returned, otherwise 0 6765 */ 6766static int mgsl_get_raw_rx_frame(struct mgsl_struct *info) 6767{ 6768 unsigned int CurrentIndex, NextIndex; 6769 unsigned short status; 6770 DMABUFFERENTRY *pBufEntry; 6771 unsigned int framesize = 0; 6772 int ReturnCode = 0; 6773 unsigned long flags; 6774 struct tty_struct *tty = info->tty; 6775 6776 /* 6777 * current_rx_buffer points to the 1st buffer of the next available 6778 * receive frame. The status field is set by the 16C32 after 6779 * completing a receive frame. If the status field of this buffer 6780 * is zero, either the USC is still filling this buffer or this 6781 * is one of a series of buffers making up a received frame. 6782 * 6783 * If the count field of this buffer is zero, the USC is either 6784 * using this buffer or has used this buffer. Look at the count 6785 * field of the next buffer. If that next buffer's count is 6786 * non-zero, the USC is still actively using the current buffer. 6787 * Otherwise, if the next buffer's count field is zero, the 6788 * current buffer is complete and the USC is using the next 6789 * buffer. 6790 */ 6791 CurrentIndex = NextIndex = info->current_rx_buffer; 6792 ++NextIndex; 6793 if ( NextIndex == info->rx_buffer_count ) 6794 NextIndex = 0; 6795 6796 if ( info->rx_buffer_list[CurrentIndex].status != 0 || 6797 (info->rx_buffer_list[CurrentIndex].count == 0 && 6798 info->rx_buffer_list[NextIndex].count == 0)) { 6799 /* 6800 * Either the status field of this dma buffer is non-zero 6801 * (indicating the last buffer of a receive frame) or the next 6802 * buffer is marked as in use -- implying this buffer is complete 6803 * and an intermediate buffer for this received frame. 6804 */ 6805 6806 status = info->rx_buffer_list[CurrentIndex].status; 6807 6808 if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN + 6809 RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) { 6810 if ( status & RXSTATUS_SHORT_FRAME ) 6811 info->icount.rxshort++; 6812 else if ( status & RXSTATUS_ABORT ) 6813 info->icount.rxabort++; 6814 else if ( status & RXSTATUS_OVERRUN ) 6815 info->icount.rxover++; 6816 else 6817 info->icount.rxcrc++; 6818 framesize = 0; 6819 } else { 6820 /* 6821 * A receive frame is available, get frame size and status. 6822 * 6823 * The frame size is the starting value of the RCC (which was 6824 * set to 0xffff) minus the ending value of the RCC (decremented 6825 * once for each receive character) minus 2 or 4 for the 16-bit 6826 * or 32-bit CRC. 6827 * 6828 * If the status field is zero, this is an intermediate buffer. 6829 * It's size is 4K. 6830 * 6831 * If the DMA Buffer Entry's Status field is non-zero, the 6832 * receive operation completed normally (ie: DCD dropped). The 6833 * RCC field is valid and holds the received frame size. 6834 * It is possible that the RCC field will be zero on a DMA buffer 6835 * entry with a non-zero status. This can occur if the total 6836 * frame size (number of bytes between the time DCD goes active 6837 * to the time DCD goes inactive) exceeds 65535 bytes. In this 6838 * case the 16C32 has underrun on the RCC count and appears to 6839 * stop updating this counter to let us know the actual received 6840 * frame size. If this happens (non-zero status and zero RCC), 6841 * simply return the entire RxDMA Buffer 6842 */ 6843 if ( status ) { 6844 /* 6845 * In the event that the final RxDMA Buffer is 6846 * terminated with a non-zero status and the RCC 6847 * field is zero, we interpret this as the RCC 6848 * having underflowed (received frame > 65535 bytes). 6849 * 6850 * Signal the event to the user by passing back 6851 * a status of RxStatus_CrcError returning the full 6852 * buffer and let the app figure out what data is 6853 * actually valid 6854 */ 6855 if ( info->rx_buffer_list[CurrentIndex].rcc ) 6856 framesize = RCLRVALUE - info->rx_buffer_list[CurrentIndex].rcc; 6857 else 6858 framesize = DMABUFFERSIZE; 6859 } 6860 else 6861 framesize = DMABUFFERSIZE; 6862 } 6863 6864 if ( framesize > DMABUFFERSIZE ) { 6865 /* 6866 * if running in raw sync mode, ISR handler for 6867 * End Of Buffer events terminates all buffers at 4K. 6868 * If this frame size is said to be >4K, get the 6869 * actual number of bytes of the frame in this buffer. 6870 */ 6871 framesize = framesize % DMABUFFERSIZE; 6872 } 6873 6874 6875 if ( debug_level >= DEBUG_LEVEL_BH ) 6876 printk("%s(%d):mgsl_get_raw_rx_frame(%s) status=%04X size=%d\n", 6877 __FILE__,__LINE__,info->device_name,status,framesize); 6878 6879 if ( debug_level >= DEBUG_LEVEL_DATA ) 6880 mgsl_trace_block(info,info->rx_buffer_list[CurrentIndex].virt_addr, 6881 min_t(int, framesize, DMABUFFERSIZE),0); 6882 6883 if (framesize) { 6884 /* copy dma buffer(s) to contiguous intermediate buffer */ 6885 /* NOTE: we never copy more than DMABUFFERSIZE bytes */ 6886 6887 pBufEntry = &(info->rx_buffer_list[CurrentIndex]); 6888 memcpy( info->intermediate_rxbuffer, pBufEntry->virt_addr, framesize); 6889 info->icount.rxok++; 6890 6891 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize); 6892 } 6893 6894 /* Free the buffers used by this frame. */ 6895 mgsl_free_rx_frame_buffers( info, CurrentIndex, CurrentIndex ); 6896 6897 ReturnCode = 1; 6898 } 6899 6900 6901 if ( info->rx_enabled && info->rx_overflow ) { 6902 /* The receiver needs to restarted because of 6903 * a receive overflow (buffer or FIFO). If the 6904 * receive buffers are now empty, then restart receiver. 6905 */ 6906 6907 if ( !info->rx_buffer_list[CurrentIndex].status && 6908 info->rx_buffer_list[CurrentIndex].count ) { 6909 spin_lock_irqsave(&info->irq_spinlock,flags); 6910 usc_start_receiver(info); 6911 spin_unlock_irqrestore(&info->irq_spinlock,flags); 6912 } 6913 } 6914 6915 return ReturnCode; 6916 6917} /* end of mgsl_get_raw_rx_frame() */ 6918 6919/* mgsl_load_tx_dma_buffer() 6920 * 6921 * Load the transmit DMA buffer with the specified data. 6922 * 6923 * Arguments: 6924 * 6925 * info pointer to device extension 6926 * Buffer pointer to buffer containing frame to load 6927 * BufferSize size in bytes of frame in Buffer 6928 * 6929 * Return Value: None 6930 */ 6931static void mgsl_load_tx_dma_buffer(struct mgsl_struct *info, 6932 const char *Buffer, unsigned int BufferSize) 6933{ 6934 unsigned short Copycount; 6935 unsigned int i = 0; 6936 DMABUFFERENTRY *pBufEntry; 6937 6938 if ( debug_level >= DEBUG_LEVEL_DATA ) 6939 mgsl_trace_block(info,Buffer, min_t(int, BufferSize, DMABUFFERSIZE), 1); 6940 6941 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) { 6942 /* set CMR:13 to start transmit when 6943 * next GoAhead (abort) is received 6944 */ 6945 info->cmr_value |= BIT13; 6946 } 6947 6948 /* begin loading the frame in the next available tx dma 6949 * buffer, remember it's starting location for setting 6950 * up tx dma operation 6951 */ 6952 i = info->current_tx_buffer; 6953 info->start_tx_dma_buffer = i; 6954 6955 /* Setup the status and RCC (Frame Size) fields of the 1st */ 6956 /* buffer entry in the transmit DMA buffer list. */ 6957 6958 info->tx_buffer_list[i].status = info->cmr_value & 0xf000; 6959 info->tx_buffer_list[i].rcc = BufferSize; 6960 info->tx_buffer_list[i].count = BufferSize; 6961 6962 /* Copy frame data from 1st source buffer to the DMA buffers. */ 6963 /* The frame data may span multiple DMA buffers. */ 6964 6965 while( BufferSize ){ 6966 /* Get a pointer to next DMA buffer entry. */ 6967 pBufEntry = &info->tx_buffer_list[i++]; 6968 6969 if ( i == info->tx_buffer_count ) 6970 i=0; 6971 6972 /* Calculate the number of bytes that can be copied from */ 6973 /* the source buffer to this DMA buffer. */ 6974 if ( BufferSize > DMABUFFERSIZE ) 6975 Copycount = DMABUFFERSIZE; 6976 else 6977 Copycount = BufferSize; 6978 6979 /* Actually copy data from source buffer to DMA buffer. */ 6980 /* Also set the data count for this individual DMA buffer. */ 6981 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 6982 mgsl_load_pci_memory(pBufEntry->virt_addr, Buffer,Copycount); 6983 else 6984 memcpy(pBufEntry->virt_addr, Buffer, Copycount); 6985 6986 pBufEntry->count = Copycount; 6987 6988 /* Advance source pointer and reduce remaining data count. */ 6989 Buffer += Copycount; 6990 BufferSize -= Copycount; 6991 6992 ++info->tx_dma_buffers_used; 6993 } 6994 6995 /* remember next available tx dma buffer */ 6996 info->current_tx_buffer = i; 6997 6998} /* end of mgsl_load_tx_dma_buffer() */ 6999 7000/* 7001 * mgsl_register_test() 7002 * 7003 * Performs a register test of the 16C32. 7004 * 7005 * Arguments: info pointer to device instance data 7006 * Return Value: TRUE if test passed, otherwise FALSE 7007 */ 7008static BOOLEAN mgsl_register_test( struct mgsl_struct *info ) 7009{ 7010 static unsigned short BitPatterns[] = 7011 { 0x0000, 0xffff, 0xaaaa, 0x5555, 0x1234, 0x6969, 0x9696, 0x0f0f }; 7012 static unsigned int Patterncount = ARRAY_SIZE(BitPatterns); 7013 unsigned int i; 7014 BOOLEAN rc = TRUE; 7015 unsigned long flags; 7016 7017 spin_lock_irqsave(&info->irq_spinlock,flags); 7018 usc_reset(info); 7019 7020 /* Verify the reset state of some registers. */ 7021 7022 if ( (usc_InReg( info, SICR ) != 0) || 7023 (usc_InReg( info, IVR ) != 0) || 7024 (usc_InDmaReg( info, DIVR ) != 0) ){ 7025 rc = FALSE; 7026 } 7027 7028 if ( rc == TRUE ){ 7029 /* Write bit patterns to various registers but do it out of */ 7030 /* sync, then read back and verify values. */ 7031 7032 for ( i = 0 ; i < Patterncount ; i++ ) { 7033 usc_OutReg( info, TC0R, BitPatterns[i] ); 7034 usc_OutReg( info, TC1R, BitPatterns[(i+1)%Patterncount] ); 7035 usc_OutReg( info, TCLR, BitPatterns[(i+2)%Patterncount] ); 7036 usc_OutReg( info, RCLR, BitPatterns[(i+3)%Patterncount] ); 7037 usc_OutReg( info, RSR, BitPatterns[(i+4)%Patterncount] ); 7038 usc_OutDmaReg( info, TBCR, BitPatterns[(i+5)%Patterncount] ); 7039 7040 if ( (usc_InReg( info, TC0R ) != BitPatterns[i]) || 7041 (usc_InReg( info, TC1R ) != BitPatterns[(i+1)%Patterncount]) || 7042 (usc_InReg( info, TCLR ) != BitPatterns[(i+2)%Patterncount]) || 7043 (usc_InReg( info, RCLR ) != BitPatterns[(i+3)%Patterncount]) || 7044 (usc_InReg( info, RSR ) != BitPatterns[(i+4)%Patterncount]) || 7045 (usc_InDmaReg( info, TBCR ) != BitPatterns[(i+5)%Patterncount]) ){ 7046 rc = FALSE; 7047 break; 7048 } 7049 } 7050 } 7051 7052 usc_reset(info); 7053 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7054 7055 return rc; 7056 7057} /* end of mgsl_register_test() */ 7058 7059/* mgsl_irq_test() Perform interrupt test of the 16C32. 7060 * 7061 * Arguments: info pointer to device instance data 7062 * Return Value: TRUE if test passed, otherwise FALSE 7063 */ 7064static BOOLEAN mgsl_irq_test( struct mgsl_struct *info ) 7065{ 7066 unsigned long EndTime; 7067 unsigned long flags; 7068 7069 spin_lock_irqsave(&info->irq_spinlock,flags); 7070 usc_reset(info); 7071 7072 /* 7073 * Setup 16C32 to interrupt on TxC pin (14MHz clock) transition. 7074 * The ISR sets irq_occurred to 1. 7075 */ 7076 7077 info->irq_occurred = FALSE; 7078 7079 /* Enable INTEN gate for ISA adapter (Port 6, Bit12) */ 7080 /* Enable INTEN (Port 6, Bit12) */ 7081 /* This connects the IRQ request signal to the ISA bus */ 7082 /* on the ISA adapter. This has no effect for the PCI adapter */ 7083 usc_OutReg( info, PCR, (unsigned short)((usc_InReg(info, PCR) | BIT13) & ~BIT12) ); 7084 7085 usc_EnableMasterIrqBit(info); 7086 usc_EnableInterrupts(info, IO_PIN); 7087 usc_ClearIrqPendingBits(info, IO_PIN); 7088 7089 usc_UnlatchIostatusBits(info, MISCSTATUS_TXC_LATCHED); 7090 usc_EnableStatusIrqs(info, SICR_TXC_ACTIVE + SICR_TXC_INACTIVE); 7091 7092 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7093 7094 EndTime=100; 7095 while( EndTime-- && !info->irq_occurred ) { 7096 msleep_interruptible(10); 7097 } 7098 7099 spin_lock_irqsave(&info->irq_spinlock,flags); 7100 usc_reset(info); 7101 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7102 7103 if ( !info->irq_occurred ) 7104 return FALSE; 7105 else 7106 return TRUE; 7107 7108} /* end of mgsl_irq_test() */ 7109 7110/* mgsl_dma_test() 7111 * 7112 * Perform a DMA test of the 16C32. A small frame is 7113 * transmitted via DMA from a transmit buffer to a receive buffer 7114 * using single buffer DMA mode. 7115 * 7116 * Arguments: info pointer to device instance data 7117 * Return Value: TRUE if test passed, otherwise FALSE 7118 */ 7119static BOOLEAN mgsl_dma_test( struct mgsl_struct *info ) 7120{ 7121 unsigned short FifoLevel; 7122 unsigned long phys_addr; 7123 unsigned int FrameSize; 7124 unsigned int i; 7125 char *TmpPtr; 7126 BOOLEAN rc = TRUE; 7127 unsigned short status=0; 7128 unsigned long EndTime; 7129 unsigned long flags; 7130 MGSL_PARAMS tmp_params; 7131 7132 /* save current port options */ 7133 memcpy(&tmp_params,&info->params,sizeof(MGSL_PARAMS)); 7134 /* load default port options */ 7135 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS)); 7136 7137#define TESTFRAMESIZE 40 7138 7139 spin_lock_irqsave(&info->irq_spinlock,flags); 7140 7141 /* setup 16C32 for SDLC DMA transfer mode */ 7142 7143 usc_reset(info); 7144 usc_set_sdlc_mode(info); 7145 usc_enable_loopback(info,1); 7146 7147 /* Reprogram the RDMR so that the 16C32 does NOT clear the count 7148 * field of the buffer entry after fetching buffer address. This 7149 * way we can detect a DMA failure for a DMA read (which should be 7150 * non-destructive to system memory) before we try and write to 7151 * memory (where a failure could corrupt system memory). 7152 */ 7153 7154 /* Receive DMA mode Register (RDMR) 7155 * 7156 * <15..14> 11 DMA mode = Linked List Buffer mode 7157 * <13> 1 RSBinA/L = store Rx status Block in List entry 7158 * <12> 0 1 = Clear count of List Entry after fetching 7159 * <11..10> 00 Address mode = Increment 7160 * <9> 1 Terminate Buffer on RxBound 7161 * <8> 0 Bus Width = 16bits 7162 * <7..0> ? status Bits (write as 0s) 7163 * 7164 * 1110 0010 0000 0000 = 0xe200 7165 */ 7166 7167 usc_OutDmaReg( info, RDMR, 0xe200 ); 7168 7169 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7170 7171 7172 /* SETUP TRANSMIT AND RECEIVE DMA BUFFERS */ 7173 7174 FrameSize = TESTFRAMESIZE; 7175 7176 /* setup 1st transmit buffer entry: */ 7177 /* with frame size and transmit control word */ 7178 7179 info->tx_buffer_list[0].count = FrameSize; 7180 info->tx_buffer_list[0].rcc = FrameSize; 7181 info->tx_buffer_list[0].status = 0x4000; 7182 7183 /* build a transmit frame in 1st transmit DMA buffer */ 7184 7185 TmpPtr = info->tx_buffer_list[0].virt_addr; 7186 for (i = 0; i < FrameSize; i++ ) 7187 *TmpPtr++ = i; 7188 7189 /* setup 1st receive buffer entry: */ 7190 /* clear status, set max receive buffer size */ 7191 7192 info->rx_buffer_list[0].status = 0; 7193 info->rx_buffer_list[0].count = FrameSize + 4; 7194 7195 /* zero out the 1st receive buffer */ 7196 7197 memset( info->rx_buffer_list[0].virt_addr, 0, FrameSize + 4 ); 7198 7199 /* Set count field of next buffer entries to prevent */ 7200 /* 16C32 from using buffers after the 1st one. */ 7201 7202 info->tx_buffer_list[1].count = 0; 7203 info->rx_buffer_list[1].count = 0; 7204 7205 7206 /***************************/ 7207 /* Program 16C32 receiver. */ 7208 /***************************/ 7209 7210 spin_lock_irqsave(&info->irq_spinlock,flags); 7211 7212 /* setup DMA transfers */ 7213 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 7214 7215 /* program 16C32 receiver with physical address of 1st DMA buffer entry */ 7216 phys_addr = info->rx_buffer_list[0].phys_entry; 7217 usc_OutDmaReg( info, NRARL, (unsigned short)phys_addr ); 7218 usc_OutDmaReg( info, NRARU, (unsigned short)(phys_addr >> 16) ); 7219 7220 /* Clear the Rx DMA status bits (read RDMR) and start channel */ 7221 usc_InDmaReg( info, RDMR ); 7222 usc_DmaCmd( info, DmaCmd_InitRxChannel ); 7223 7224 /* Enable Receiver (RMR <1..0> = 10) */ 7225 usc_OutReg( info, RMR, (unsigned short)((usc_InReg(info, RMR) & 0xfffc) | 0x0002) ); 7226 7227 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7228 7229 7230 /*************************************************************/ 7231 /* WAIT FOR RECEIVER TO DMA ALL PARAMETERS FROM BUFFER ENTRY */ 7232 /*************************************************************/ 7233 7234 /* Wait 100ms for interrupt. */ 7235 EndTime = jiffies + msecs_to_jiffies(100); 7236 7237 for(;;) { 7238 if (time_after(jiffies, EndTime)) { 7239 rc = FALSE; 7240 break; 7241 } 7242 7243 spin_lock_irqsave(&info->irq_spinlock,flags); 7244 status = usc_InDmaReg( info, RDMR ); 7245 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7246 7247 if ( !(status & BIT4) && (status & BIT5) ) { 7248 /* INITG (BIT 4) is inactive (no entry read in progress) AND */ 7249 /* BUSY (BIT 5) is active (channel still active). */ 7250 /* This means the buffer entry read has completed. */ 7251 break; 7252 } 7253 } 7254 7255 7256 /******************************/ 7257 /* Program 16C32 transmitter. */ 7258 /******************************/ 7259 7260 spin_lock_irqsave(&info->irq_spinlock,flags); 7261 7262 /* Program the Transmit Character Length Register (TCLR) */ 7263 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */ 7264 7265 usc_OutReg( info, TCLR, (unsigned short)info->tx_buffer_list[0].count ); 7266 usc_RTCmd( info, RTCmd_PurgeTxFifo ); 7267 7268 /* Program the address of the 1st DMA Buffer Entry in linked list */ 7269 7270 phys_addr = info->tx_buffer_list[0].phys_entry; 7271 usc_OutDmaReg( info, NTARL, (unsigned short)phys_addr ); 7272 usc_OutDmaReg( info, NTARU, (unsigned short)(phys_addr >> 16) ); 7273 7274 /* unlatch Tx status bits, and start transmit channel. */ 7275 7276 usc_OutReg( info, TCSR, (unsigned short)(( usc_InReg(info, TCSR) & 0x0f00) | 0xfa) ); 7277 usc_DmaCmd( info, DmaCmd_InitTxChannel ); 7278 7279 /* wait for DMA controller to fill transmit FIFO */ 7280 7281 usc_TCmd( info, TCmd_SelectTicrTxFifostatus ); 7282 7283 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7284 7285 7286 /**********************************/ 7287 /* WAIT FOR TRANSMIT FIFO TO FILL */ 7288 /**********************************/ 7289 7290 /* Wait 100ms */ 7291 EndTime = jiffies + msecs_to_jiffies(100); 7292 7293 for(;;) { 7294 if (time_after(jiffies, EndTime)) { 7295 rc = FALSE; 7296 break; 7297 } 7298 7299 spin_lock_irqsave(&info->irq_spinlock,flags); 7300 FifoLevel = usc_InReg(info, TICR) >> 8; 7301 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7302 7303 if ( FifoLevel < 16 ) 7304 break; 7305 else 7306 if ( FrameSize < 32 ) { 7307 /* This frame is smaller than the entire transmit FIFO */ 7308 /* so wait for the entire frame to be loaded. */ 7309 if ( FifoLevel <= (32 - FrameSize) ) 7310 break; 7311 } 7312 } 7313 7314 7315 if ( rc == TRUE ) 7316 { 7317 /* Enable 16C32 transmitter. */ 7318 7319 spin_lock_irqsave(&info->irq_spinlock,flags); 7320 7321 /* Transmit mode Register (TMR), <1..0> = 10, Enable Transmitter */ 7322 usc_TCmd( info, TCmd_SendFrame ); 7323 usc_OutReg( info, TMR, (unsigned short)((usc_InReg(info, TMR) & 0xfffc) | 0x0002) ); 7324 7325 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7326 7327 7328 /******************************/ 7329 /* WAIT FOR TRANSMIT COMPLETE */ 7330 /******************************/ 7331 7332 /* Wait 100ms */ 7333 EndTime = jiffies + msecs_to_jiffies(100); 7334 7335 /* While timer not expired wait for transmit complete */ 7336 7337 spin_lock_irqsave(&info->irq_spinlock,flags); 7338 status = usc_InReg( info, TCSR ); 7339 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7340 7341 while ( !(status & (BIT6+BIT5+BIT4+BIT2+BIT1)) ) { 7342 if (time_after(jiffies, EndTime)) { 7343 rc = FALSE; 7344 break; 7345 } 7346 7347 spin_lock_irqsave(&info->irq_spinlock,flags); 7348 status = usc_InReg( info, TCSR ); 7349 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7350 } 7351 } 7352 7353 7354 if ( rc == TRUE ){ 7355 /* CHECK FOR TRANSMIT ERRORS */ 7356 if ( status & (BIT5 + BIT1) ) 7357 rc = FALSE; 7358 } 7359 7360 if ( rc == TRUE ) { 7361 /* WAIT FOR RECEIVE COMPLETE */ 7362 7363 /* Wait 100ms */ 7364 EndTime = jiffies + msecs_to_jiffies(100); 7365 7366 /* Wait for 16C32 to write receive status to buffer entry. */ 7367 status=info->rx_buffer_list[0].status; 7368 while ( status == 0 ) { 7369 if (time_after(jiffies, EndTime)) { 7370 rc = FALSE; 7371 break; 7372 } 7373 status=info->rx_buffer_list[0].status; 7374 } 7375 } 7376 7377 7378 if ( rc == TRUE ) { 7379 /* CHECK FOR RECEIVE ERRORS */ 7380 status = info->rx_buffer_list[0].status; 7381 7382 if ( status & (BIT8 + BIT3 + BIT1) ) { 7383 /* receive error has occurred */ 7384 rc = FALSE; 7385 } else { 7386 if ( memcmp( info->tx_buffer_list[0].virt_addr , 7387 info->rx_buffer_list[0].virt_addr, FrameSize ) ){ 7388 rc = FALSE; 7389 } 7390 } 7391 } 7392 7393 spin_lock_irqsave(&info->irq_spinlock,flags); 7394 usc_reset( info ); 7395 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7396 7397 /* restore current port options */ 7398 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS)); 7399 7400 return rc; 7401 7402} /* end of mgsl_dma_test() */ 7403 7404/* mgsl_adapter_test() 7405 * 7406 * Perform the register, IRQ, and DMA tests for the 16C32. 7407 * 7408 * Arguments: info pointer to device instance data 7409 * Return Value: 0 if success, otherwise -ENODEV 7410 */ 7411static int mgsl_adapter_test( struct mgsl_struct *info ) 7412{ 7413 if ( debug_level >= DEBUG_LEVEL_INFO ) 7414 printk( "%s(%d):Testing device %s\n", 7415 __FILE__,__LINE__,info->device_name ); 7416 7417 if ( !mgsl_register_test( info ) ) { 7418 info->init_error = DiagStatus_AddressFailure; 7419 printk( "%s(%d):Register test failure for device %s Addr=%04X\n", 7420 __FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) ); 7421 return -ENODEV; 7422 } 7423 7424 if ( !mgsl_irq_test( info ) ) { 7425 info->init_error = DiagStatus_IrqFailure; 7426 printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n", 7427 __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) ); 7428 return -ENODEV; 7429 } 7430 7431 if ( !mgsl_dma_test( info ) ) { 7432 info->init_error = DiagStatus_DmaFailure; 7433 printk( "%s(%d):DMA test failure for device %s DMA=%d\n", 7434 __FILE__,__LINE__,info->device_name, (unsigned short)(info->dma_level) ); 7435 return -ENODEV; 7436 } 7437 7438 if ( debug_level >= DEBUG_LEVEL_INFO ) 7439 printk( "%s(%d):device %s passed diagnostics\n", 7440 __FILE__,__LINE__,info->device_name ); 7441 7442 return 0; 7443 7444} /* end of mgsl_adapter_test() */ 7445 7446/* mgsl_memory_test() 7447 * 7448 * Test the shared memory on a PCI adapter. 7449 * 7450 * Arguments: info pointer to device instance data 7451 * Return Value: TRUE if test passed, otherwise FALSE 7452 */ 7453static BOOLEAN mgsl_memory_test( struct mgsl_struct *info ) 7454{ 7455 static unsigned long BitPatterns[] = 7456 { 0x0, 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999, 0xffffffff, 0x12345678 }; 7457 unsigned long Patterncount = ARRAY_SIZE(BitPatterns); 7458 unsigned long i; 7459 unsigned long TestLimit = SHARED_MEM_ADDRESS_SIZE/sizeof(unsigned long); 7460 unsigned long * TestAddr; 7461 7462 if ( info->bus_type != MGSL_BUS_TYPE_PCI ) 7463 return TRUE; 7464 7465 TestAddr = (unsigned long *)info->memory_base; 7466 7467 /* Test data lines with test pattern at one location. */ 7468 7469 for ( i = 0 ; i < Patterncount ; i++ ) { 7470 *TestAddr = BitPatterns[i]; 7471 if ( *TestAddr != BitPatterns[i] ) 7472 return FALSE; 7473 } 7474 7475 /* Test address lines with incrementing pattern over */ 7476 /* entire address range. */ 7477 7478 for ( i = 0 ; i < TestLimit ; i++ ) { 7479 *TestAddr = i * 4; 7480 TestAddr++; 7481 } 7482 7483 TestAddr = (unsigned long *)info->memory_base; 7484 7485 for ( i = 0 ; i < TestLimit ; i++ ) { 7486 if ( *TestAddr != i * 4 ) 7487 return FALSE; 7488 TestAddr++; 7489 } 7490 7491 memset( info->memory_base, 0, SHARED_MEM_ADDRESS_SIZE ); 7492 7493 return TRUE; 7494 7495} /* End Of mgsl_memory_test() */ 7496 7497 7498/* mgsl_load_pci_memory() 7499 * 7500 * Load a large block of data into the PCI shared memory. 7501 * Use this instead of memcpy() or memmove() to move data 7502 * into the PCI shared memory. 7503 * 7504 * Notes: 7505 * 7506 * This function prevents the PCI9050 interface chip from hogging 7507 * the adapter local bus, which can starve the 16C32 by preventing 7508 * 16C32 bus master cycles. 7509 * 7510 * The PCI9050 documentation says that the 9050 will always release 7511 * control of the local bus after completing the current read 7512 * or write operation. 7513 * 7514 * It appears that as long as the PCI9050 write FIFO is full, the 7515 * PCI9050 treats all of the writes as a single burst transaction 7516 * and will not release the bus. This causes DMA latency problems 7517 * at high speeds when copying large data blocks to the shared 7518 * memory. 7519 * 7520 * This function in effect, breaks the a large shared memory write 7521 * into multiple transations by interleaving a shared memory read 7522 * which will flush the write FIFO and 'complete' the write 7523 * transation. This allows any pending DMA request to gain control 7524 * of the local bus in a timely fasion. 7525 * 7526 * Arguments: 7527 * 7528 * TargetPtr pointer to target address in PCI shared memory 7529 * SourcePtr pointer to source buffer for data 7530 * count count in bytes of data to copy 7531 * 7532 * Return Value: None 7533 */ 7534static void mgsl_load_pci_memory( char* TargetPtr, const char* SourcePtr, 7535 unsigned short count ) 7536{ 7537 /* 16 32-bit writes @ 60ns each = 960ns max latency on local bus */ 7538#define PCI_LOAD_INTERVAL 64 7539 7540 unsigned short Intervalcount = count / PCI_LOAD_INTERVAL; 7541 unsigned short Index; 7542 unsigned long Dummy; 7543 7544 for ( Index = 0 ; Index < Intervalcount ; Index++ ) 7545 { 7546 memcpy(TargetPtr, SourcePtr, PCI_LOAD_INTERVAL); 7547 Dummy = *((volatile unsigned long *)TargetPtr); 7548 TargetPtr += PCI_LOAD_INTERVAL; 7549 SourcePtr += PCI_LOAD_INTERVAL; 7550 } 7551 7552 memcpy( TargetPtr, SourcePtr, count % PCI_LOAD_INTERVAL ); 7553 7554} /* End Of mgsl_load_pci_memory() */ 7555 7556static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit) 7557{ 7558 int i; 7559 int linecount; 7560 if (xmit) 7561 printk("%s tx data:\n",info->device_name); 7562 else 7563 printk("%s rx data:\n",info->device_name); 7564 7565 while(count) { 7566 if (count > 16) 7567 linecount = 16; 7568 else 7569 linecount = count; 7570 7571 for(i=0;i<linecount;i++) 7572 printk("%02X ",(unsigned char)data[i]); 7573 for(;i<17;i++) 7574 printk(" "); 7575 for(i=0;i<linecount;i++) { 7576 if (data[i]>=040 && data[i]<=0176) 7577 printk("%c",data[i]); 7578 else 7579 printk("."); 7580 } 7581 printk("\n"); 7582 7583 data += linecount; 7584 count -= linecount; 7585 } 7586} /* end of mgsl_trace_block() */ 7587 7588/* mgsl_tx_timeout() 7589 * 7590 * called when HDLC frame times out 7591 * update stats and do tx completion processing 7592 * 7593 * Arguments: context pointer to device instance data 7594 * Return Value: None 7595 */ 7596static void mgsl_tx_timeout(unsigned long context) 7597{ 7598 struct mgsl_struct *info = (struct mgsl_struct*)context; 7599 unsigned long flags; 7600 7601 if ( debug_level >= DEBUG_LEVEL_INFO ) 7602 printk( "%s(%d):mgsl_tx_timeout(%s)\n", 7603 __FILE__,__LINE__,info->device_name); 7604 if(info->tx_active && 7605 (info->params.mode == MGSL_MODE_HDLC || 7606 info->params.mode == MGSL_MODE_RAW) ) { 7607 info->icount.txtimeout++; 7608 } 7609 spin_lock_irqsave(&info->irq_spinlock,flags); 7610 info->tx_active = 0; 7611 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; 7612 7613 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE ) 7614 usc_loopmode_cancel_transmit( info ); 7615 7616 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7617 7618#if SYNCLINK_GENERIC_HDLC 7619 if (info->netcount) 7620 hdlcdev_tx_done(info); 7621 else 7622#endif 7623 mgsl_bh_transmit(info); 7624 7625} /* end of mgsl_tx_timeout() */ 7626 7627/* signal that there are no more frames to send, so that 7628 * line is 'released' by echoing RxD to TxD when current 7629 * transmission is complete (or immediately if no tx in progress). 7630 */ 7631static int mgsl_loopmode_send_done( struct mgsl_struct * info ) 7632{ 7633 unsigned long flags; 7634 7635 spin_lock_irqsave(&info->irq_spinlock,flags); 7636 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) { 7637 if (info->tx_active) 7638 info->loopmode_send_done_requested = TRUE; 7639 else 7640 usc_loopmode_send_done(info); 7641 } 7642 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7643 7644 return 0; 7645} 7646 7647/* release the line by echoing RxD to TxD 7648 * upon completion of a transmit frame 7649 */ 7650static void usc_loopmode_send_done( struct mgsl_struct * info ) 7651{ 7652 info->loopmode_send_done_requested = FALSE; 7653 /* clear CMR:13 to 0 to start echoing RxData to TxData */ 7654 info->cmr_value &= ~BIT13; 7655 usc_OutReg(info, CMR, info->cmr_value); 7656} 7657 7658/* abort a transmit in progress while in HDLC LoopMode 7659 */ 7660static void usc_loopmode_cancel_transmit( struct mgsl_struct * info ) 7661{ 7662 /* reset tx dma channel and purge TxFifo */ 7663 usc_RTCmd( info, RTCmd_PurgeTxFifo ); 7664 usc_DmaCmd( info, DmaCmd_ResetTxChannel ); 7665 usc_loopmode_send_done( info ); 7666} 7667 7668/* for HDLC/SDLC LoopMode, setting CMR:13 after the transmitter is enabled 7669 * is an Insert Into Loop action. Upon receipt of a GoAhead sequence (RxAbort) 7670 * we must clear CMR:13 to begin repeating TxData to RxData 7671 */ 7672static void usc_loopmode_insert_request( struct mgsl_struct * info ) 7673{ 7674 info->loopmode_insert_requested = TRUE; 7675 7676 /* enable RxAbort irq. On next RxAbort, clear CMR:13 to 7677 * begin repeating TxData on RxData (complete insertion) 7678 */ 7679 usc_OutReg( info, RICR, 7680 (usc_InReg( info, RICR ) | RXSTATUS_ABORT_RECEIVED ) ); 7681 7682 /* set CMR:13 to insert into loop on next GoAhead (RxAbort) */ 7683 info->cmr_value |= BIT13; 7684 usc_OutReg(info, CMR, info->cmr_value); 7685} 7686 7687/* return 1 if station is inserted into the loop, otherwise 0 7688 */ 7689static int usc_loopmode_active( struct mgsl_struct * info) 7690{ 7691 return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ; 7692} 7693 7694#if SYNCLINK_GENERIC_HDLC 7695 7696/** 7697 * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.) 7698 * set encoding and frame check sequence (FCS) options 7699 * 7700 * dev pointer to network device structure 7701 * encoding serial encoding setting 7702 * parity FCS setting 7703 * 7704 * returns 0 if success, otherwise error code 7705 */ 7706static int hdlcdev_attach(struct net_device *dev, unsigned short encoding, 7707 unsigned short parity) 7708{ 7709 struct mgsl_struct *info = dev_to_port(dev); 7710 unsigned char new_encoding; 7711 unsigned short new_crctype; 7712 7713 /* return error if TTY interface open */ 7714 if (info->count) 7715 return -EBUSY; 7716 7717 switch (encoding) 7718 { 7719 case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break; 7720 case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break; 7721 case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break; 7722 case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break; 7723 case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break; 7724 default: return -EINVAL; 7725 } 7726 7727 switch (parity) 7728 { 7729 case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break; 7730 case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break; 7731 case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break; 7732 default: return -EINVAL; 7733 } 7734 7735 info->params.encoding = new_encoding; 7736 info->params.crc_type = new_crctype; 7737 7738 /* if network interface up, reprogram hardware */ 7739 if (info->netcount) 7740 mgsl_program_hw(info); 7741 7742 return 0; 7743} 7744 7745/** 7746 * called by generic HDLC layer to send frame 7747 * 7748 * skb socket buffer containing HDLC frame 7749 * dev pointer to network device structure 7750 * 7751 * returns 0 if success, otherwise error code 7752 */ 7753static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev) 7754{ 7755 struct mgsl_struct *info = dev_to_port(dev); 7756 struct net_device_stats *stats = hdlc_stats(dev); 7757 unsigned long flags; 7758 7759 if (debug_level >= DEBUG_LEVEL_INFO) 7760 printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name); 7761 7762 /* stop sending until this frame completes */ 7763 netif_stop_queue(dev); 7764 7765 /* copy data to device buffers */ 7766 info->xmit_cnt = skb->len; 7767 mgsl_load_tx_dma_buffer(info, skb->data, skb->len); 7768 7769 /* update network statistics */ 7770 stats->tx_packets++; 7771 stats->tx_bytes += skb->len; 7772 7773 /* done with socket buffer, so free it */ 7774 dev_kfree_skb(skb); 7775 7776 /* save start time for transmit timeout detection */ 7777 dev->trans_start = jiffies; 7778 7779 /* start hardware transmitter if necessary */ 7780 spin_lock_irqsave(&info->irq_spinlock,flags); 7781 if (!info->tx_active) 7782 usc_start_transmitter(info); 7783 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7784 7785 return 0; 7786} 7787 7788/** 7789 * called by network layer when interface enabled 7790 * claim resources and initialize hardware 7791 * 7792 * dev pointer to network device structure 7793 * 7794 * returns 0 if success, otherwise error code 7795 */ 7796static int hdlcdev_open(struct net_device *dev) 7797{ 7798 struct mgsl_struct *info = dev_to_port(dev); 7799 int rc; 7800 unsigned long flags; 7801 7802 if (debug_level >= DEBUG_LEVEL_INFO) 7803 printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name); 7804 7805 /* generic HDLC layer open processing */ 7806 if ((rc = hdlc_open(dev))) 7807 return rc; 7808 7809 /* arbitrate between network and tty opens */ 7810 spin_lock_irqsave(&info->netlock, flags); 7811 if (info->count != 0 || info->netcount != 0) { 7812 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name); 7813 spin_unlock_irqrestore(&info->netlock, flags); 7814 return -EBUSY; 7815 } 7816 info->netcount=1; 7817 spin_unlock_irqrestore(&info->netlock, flags); 7818 7819 /* claim resources and init adapter */ 7820 if ((rc = startup(info)) != 0) { 7821 spin_lock_irqsave(&info->netlock, flags); 7822 info->netcount=0; 7823 spin_unlock_irqrestore(&info->netlock, flags); 7824 return rc; 7825 } 7826 7827 /* assert DTR and RTS, apply hardware settings */ 7828 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; 7829 mgsl_program_hw(info); 7830 7831 /* enable network layer transmit */ 7832 dev->trans_start = jiffies; 7833 netif_start_queue(dev); 7834 7835 /* inform generic HDLC layer of current DCD status */ 7836 spin_lock_irqsave(&info->irq_spinlock, flags); 7837 usc_get_serial_signals(info); 7838 spin_unlock_irqrestore(&info->irq_spinlock, flags); 7839 if (info->serial_signals & SerialSignal_DCD) 7840 netif_carrier_on(dev); 7841 else 7842 netif_carrier_off(dev); 7843 return 0; 7844} 7845 7846/** 7847 * called by network layer when interface is disabled 7848 * shutdown hardware and release resources 7849 * 7850 * dev pointer to network device structure 7851 * 7852 * returns 0 if success, otherwise error code 7853 */ 7854static int hdlcdev_close(struct net_device *dev) 7855{ 7856 struct mgsl_struct *info = dev_to_port(dev); 7857 unsigned long flags; 7858 7859 if (debug_level >= DEBUG_LEVEL_INFO) 7860 printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name); 7861 7862 netif_stop_queue(dev); 7863 7864 /* shutdown adapter and release resources */ 7865 shutdown(info); 7866 7867 hdlc_close(dev); 7868 7869 spin_lock_irqsave(&info->netlock, flags); 7870 info->netcount=0; 7871 spin_unlock_irqrestore(&info->netlock, flags); 7872 7873 return 0; 7874} 7875 7876/** 7877 * called by network layer to process IOCTL call to network device 7878 * 7879 * dev pointer to network device structure 7880 * ifr pointer to network interface request structure 7881 * cmd IOCTL command code 7882 * 7883 * returns 0 if success, otherwise error code 7884 */ 7885static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 7886{ 7887 const size_t size = sizeof(sync_serial_settings); 7888 sync_serial_settings new_line; 7889 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync; 7890 struct mgsl_struct *info = dev_to_port(dev); 7891 unsigned int flags; 7892 7893 if (debug_level >= DEBUG_LEVEL_INFO) 7894 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name); 7895 7896 /* return error if TTY interface open */ 7897 if (info->count) 7898 return -EBUSY; 7899 7900 if (cmd != SIOCWANDEV) 7901 return hdlc_ioctl(dev, ifr, cmd); 7902 7903 switch(ifr->ifr_settings.type) { 7904 case IF_GET_IFACE: /* return current sync_serial_settings */ 7905 7906 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL; 7907 if (ifr->ifr_settings.size < size) { 7908 ifr->ifr_settings.size = size; /* data size wanted */ 7909 return -ENOBUFS; 7910 } 7911 7912 flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | 7913 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | 7914 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | 7915 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); 7916 7917 switch (flags){ 7918 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break; 7919 case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break; 7920 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break; 7921 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break; 7922 default: new_line.clock_type = CLOCK_DEFAULT; 7923 } 7924 7925 new_line.clock_rate = info->params.clock_speed; 7926 new_line.loopback = info->params.loopback ? 1:0; 7927 7928 if (copy_to_user(line, &new_line, size)) 7929 return -EFAULT; 7930 return 0; 7931 7932 case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */ 7933 7934 if(!capable(CAP_NET_ADMIN)) 7935 return -EPERM; 7936 if (copy_from_user(&new_line, line, size)) 7937 return -EFAULT; 7938 7939 switch (new_line.clock_type) 7940 { 7941 case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break; 7942 case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break; 7943 case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break; 7944 case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break; 7945 case CLOCK_DEFAULT: flags = info->params.flags & 7946 (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | 7947 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | 7948 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | 7949 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break; 7950 default: return -EINVAL; 7951 } 7952 7953 if (new_line.loopback != 0 && new_line.loopback != 1) 7954 return -EINVAL; 7955 7956 info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | 7957 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | 7958 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | 7959 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); 7960 info->params.flags |= flags; 7961 7962 info->params.loopback = new_line.loopback; 7963 7964 if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG)) 7965 info->params.clock_speed = new_line.clock_rate; 7966 else 7967 info->params.clock_speed = 0; 7968 7969 /* if network interface up, reprogram hardware */ 7970 if (info->netcount) 7971 mgsl_program_hw(info); 7972 return 0; 7973 7974 default: 7975 return hdlc_ioctl(dev, ifr, cmd); 7976 } 7977} 7978 7979/** 7980 * called by network layer when transmit timeout is detected 7981 * 7982 * dev pointer to network device structure 7983 */ 7984static void hdlcdev_tx_timeout(struct net_device *dev) 7985{ 7986 struct mgsl_struct *info = dev_to_port(dev); 7987 struct net_device_stats *stats = hdlc_stats(dev); 7988 unsigned long flags; 7989 7990 if (debug_level >= DEBUG_LEVEL_INFO) 7991 printk("hdlcdev_tx_timeout(%s)\n",dev->name); 7992 7993 stats->tx_errors++; 7994 stats->tx_aborted_errors++; 7995 7996 spin_lock_irqsave(&info->irq_spinlock,flags); 7997 usc_stop_transmitter(info); 7998 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7999 8000 netif_wake_queue(dev); 8001} 8002 8003/** 8004 * called by device driver when transmit completes 8005 * reenable network layer transmit if stopped 8006 * 8007 * info pointer to device instance information 8008 */ 8009static void hdlcdev_tx_done(struct mgsl_struct *info) 8010{ 8011 if (netif_queue_stopped(info->netdev)) 8012 netif_wake_queue(info->netdev); 8013} 8014 8015/** 8016 * called by device driver when frame received 8017 * pass frame to network layer 8018 * 8019 * info pointer to device instance information 8020 * buf pointer to buffer contianing frame data 8021 * size count of data bytes in buf 8022 */ 8023static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size) 8024{ 8025 struct sk_buff *skb = dev_alloc_skb(size); 8026 struct net_device *dev = info->netdev; 8027 struct net_device_stats *stats = hdlc_stats(dev); 8028 8029 if (debug_level >= DEBUG_LEVEL_INFO) 8030 printk("hdlcdev_rx(%s)\n",dev->name); 8031 8032 if (skb == NULL) { 8033 printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", dev->name); 8034 stats->rx_dropped++; 8035 return; 8036 } 8037 8038 memcpy(skb_put(skb, size),buf,size); 8039 8040 skb->protocol = hdlc_type_trans(skb, info->netdev); 8041 8042 stats->rx_packets++; 8043 stats->rx_bytes += size; 8044 8045 netif_rx(skb); 8046 8047 info->netdev->last_rx = jiffies; 8048} 8049 8050/** 8051 * called by device driver when adding device instance 8052 * do generic HDLC initialization 8053 * 8054 * info pointer to device instance information 8055 * 8056 * returns 0 if success, otherwise error code 8057 */ 8058static int hdlcdev_init(struct mgsl_struct *info) 8059{ 8060 int rc; 8061 struct net_device *dev; 8062 hdlc_device *hdlc; 8063 8064 /* allocate and initialize network and HDLC layer objects */ 8065 8066 if (!(dev = alloc_hdlcdev(info))) { 8067 printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__); 8068 return -ENOMEM; 8069 } 8070 8071 /* for network layer reporting purposes only */ 8072 dev->base_addr = info->io_base; 8073 dev->irq = info->irq_level; 8074 dev->dma = info->dma_level; 8075 8076 /* network layer callbacks and settings */ 8077 dev->do_ioctl = hdlcdev_ioctl; 8078 dev->open = hdlcdev_open; 8079 dev->stop = hdlcdev_close; 8080 dev->tx_timeout = hdlcdev_tx_timeout; 8081 dev->watchdog_timeo = 10*HZ; 8082 dev->tx_queue_len = 50; 8083 8084 /* generic HDLC layer callbacks and settings */ 8085 hdlc = dev_to_hdlc(dev); 8086 hdlc->attach = hdlcdev_attach; 8087 hdlc->xmit = hdlcdev_xmit; 8088 8089 /* register objects with HDLC layer */ 8090 if ((rc = register_hdlc_device(dev))) { 8091 printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__); 8092 free_netdev(dev); 8093 return rc; 8094 } 8095 8096 info->netdev = dev; 8097 return 0; 8098} 8099 8100/** 8101 * called by device driver when removing device instance 8102 * do generic HDLC cleanup 8103 * 8104 * info pointer to device instance information 8105 */ 8106static void hdlcdev_exit(struct mgsl_struct *info) 8107{ 8108 unregister_hdlc_device(info->netdev); 8109 free_netdev(info->netdev); 8110 info->netdev = NULL; 8111} 8112 8113#endif /* CONFIG_HDLC */ 8114 8115 8116static int __devinit synclink_init_one (struct pci_dev *dev, 8117 const struct pci_device_id *ent) 8118{ 8119 struct mgsl_struct *info; 8120 8121 if (pci_enable_device(dev)) { 8122 printk("error enabling pci device %p\n", dev); 8123 return -EIO; 8124 } 8125 8126 if (!(info = mgsl_allocate_device())) { 8127 printk("can't allocate device instance data.\n"); 8128 return -EIO; 8129 } 8130 8131 /* Copy user configuration info to device instance data */ 8132 8133 info->io_base = pci_resource_start(dev, 2); 8134 info->irq_level = dev->irq; 8135 info->phys_memory_base = pci_resource_start(dev, 3); 8136 8137 /* Because veremap only works on page boundaries we must map 8138 * a larger area than is actually implemented for the LCR 8139 * memory range. We map a full page starting at the page boundary. 8140 */ 8141 info->phys_lcr_base = pci_resource_start(dev, 0); 8142 info->lcr_offset = info->phys_lcr_base & (PAGE_SIZE-1); 8143 info->phys_lcr_base &= ~(PAGE_SIZE-1); 8144 8145 info->bus_type = MGSL_BUS_TYPE_PCI; 8146 info->io_addr_size = 8; 8147 info->irq_flags = IRQF_SHARED; 8148 8149 if (dev->device == 0x0210) { 8150 /* Version 1 PCI9030 based universal PCI adapter */ 8151 info->misc_ctrl_value = 0x007c4080; 8152 info->hw_version = 1; 8153 } else { 8154 /* Version 0 PCI9050 based 5V PCI adapter 8155 * A PCI9050 bug prevents reading LCR registers if 8156 * LCR base address bit 7 is set. Maintain shadow 8157 * value so we can write to LCR misc control reg. 8158 */ 8159 info->misc_ctrl_value = 0x087e4546; 8160 info->hw_version = 0; 8161 } 8162 8163 mgsl_add_device(info); 8164 8165 return 0; 8166} 8167 8168static void __devexit synclink_remove_one (struct pci_dev *dev) 8169{ 8170} 8171