Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.29-rc7 8135 lines 236 kB view raw
1/* 2 * linux/drivers/char/synclink.c 3 * 4 * $Id: synclink.c,v 4.38 2005/11/07 16:30:34 paulkf Exp $ 5 * 6 * Device driver for Microgate SyncLink ISA and PCI 7 * high speed multiprotocol serial adapters. 8 * 9 * written by Paul Fulghum for Microgate Corporation 10 * paulkf@microgate.com 11 * 12 * Microgate and SyncLink are trademarks of Microgate Corporation 13 * 14 * Derived from serial.c written by Theodore Ts'o and Linus Torvalds 15 * 16 * Original release 01/11/99 17 * 18 * This code is released under the GNU General Public License (GPL) 19 * 20 * This driver is primarily intended for use in synchronous 21 * HDLC mode. Asynchronous mode is also provided. 22 * 23 * When operating in synchronous mode, each call to mgsl_write() 24 * contains exactly one complete HDLC frame. Calling mgsl_put_char 25 * will start assembling an HDLC frame that will not be sent until 26 * mgsl_flush_chars or mgsl_write is called. 27 * 28 * Synchronous receive data is reported as complete frames. To accomplish 29 * this, the TTY flip buffer is bypassed (too small to hold largest 30 * frame and may fragment frames) and the line discipline 31 * receive entry point is called directly. 32 * 33 * This driver has been tested with a slightly modified ppp.c driver 34 * for synchronous PPP. 35 * 36 * 2000/02/16 37 * Added interface for syncppp.c driver (an alternate synchronous PPP 38 * implementation that also supports Cisco HDLC). Each device instance 39 * registers as a tty device AND a network device (if dosyncppp option 40 * is set for the device). The functionality is determined by which 41 * device interface is opened. 42 * 43 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 44 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 45 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 46 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 47 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 48 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 49 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 51 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 52 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 53 * OF THE POSSIBILITY OF SUCH DAMAGE. 54 */ 55 56#if defined(__i386__) 57# define BREAKPOINT() asm(" int $3"); 58#else 59# define BREAKPOINT() { } 60#endif 61 62#define MAX_ISA_DEVICES 10 63#define MAX_PCI_DEVICES 10 64#define MAX_TOTAL_DEVICES 20 65 66#include <linux/module.h> 67#include <linux/errno.h> 68#include <linux/signal.h> 69#include <linux/sched.h> 70#include <linux/timer.h> 71#include <linux/interrupt.h> 72#include <linux/pci.h> 73#include <linux/tty.h> 74#include <linux/tty_flip.h> 75#include <linux/serial.h> 76#include <linux/major.h> 77#include <linux/string.h> 78#include <linux/fcntl.h> 79#include <linux/ptrace.h> 80#include <linux/ioport.h> 81#include <linux/mm.h> 82#include <linux/slab.h> 83#include <linux/delay.h> 84#include <linux/netdevice.h> 85#include <linux/vmalloc.h> 86#include <linux/init.h> 87#include <linux/ioctl.h> 88#include <linux/synclink.h> 89 90#include <asm/system.h> 91#include <asm/io.h> 92#include <asm/irq.h> 93#include <asm/dma.h> 94#include <linux/bitops.h> 95#include <asm/types.h> 96#include <linux/termios.h> 97#include <linux/workqueue.h> 98#include <linux/hdlc.h> 99#include <linux/dma-mapping.h> 100 101#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_MODULE)) 102#define SYNCLINK_GENERIC_HDLC 1 103#else 104#define SYNCLINK_GENERIC_HDLC 0 105#endif 106 107#define GET_USER(error,value,addr) error = get_user(value,addr) 108#define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0 109#define PUT_USER(error,value,addr) error = put_user(value,addr) 110#define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0 111 112#include <asm/uaccess.h> 113 114#define RCLRVALUE 0xffff 115 116static MGSL_PARAMS default_params = { 117 MGSL_MODE_HDLC, /* unsigned long mode */ 118 0, /* unsigned char loopback; */ 119 HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */ 120 HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */ 121 0, /* unsigned long clock_speed; */ 122 0xff, /* unsigned char addr_filter; */ 123 HDLC_CRC_16_CCITT, /* unsigned short crc_type; */ 124 HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */ 125 HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */ 126 9600, /* unsigned long data_rate; */ 127 8, /* unsigned char data_bits; */ 128 1, /* unsigned char stop_bits; */ 129 ASYNC_PARITY_NONE /* unsigned char parity; */ 130}; 131 132#define SHARED_MEM_ADDRESS_SIZE 0x40000 133#define BUFFERLISTSIZE 4096 134#define DMABUFFERSIZE 4096 135#define MAXRXFRAMES 7 136 137typedef struct _DMABUFFERENTRY 138{ 139 u32 phys_addr; /* 32-bit flat physical address of data buffer */ 140 volatile u16 count; /* buffer size/data count */ 141 volatile u16 status; /* Control/status field */ 142 volatile u16 rcc; /* character count field */ 143 u16 reserved; /* padding required by 16C32 */ 144 u32 link; /* 32-bit flat link to next buffer entry */ 145 char *virt_addr; /* virtual address of data buffer */ 146 u32 phys_entry; /* physical address of this buffer entry */ 147 dma_addr_t dma_addr; 148} DMABUFFERENTRY, *DMAPBUFFERENTRY; 149 150/* The queue of BH actions to be performed */ 151 152#define BH_RECEIVE 1 153#define BH_TRANSMIT 2 154#define BH_STATUS 4 155 156#define IO_PIN_SHUTDOWN_LIMIT 100 157 158struct _input_signal_events { 159 int ri_up; 160 int ri_down; 161 int dsr_up; 162 int dsr_down; 163 int dcd_up; 164 int dcd_down; 165 int cts_up; 166 int cts_down; 167}; 168 169/* transmit holding buffer definitions*/ 170#define MAX_TX_HOLDING_BUFFERS 5 171struct tx_holding_buffer { 172 int buffer_size; 173 unsigned char * buffer; 174}; 175 176 177/* 178 * Device instance data structure 179 */ 180 181struct mgsl_struct { 182 int magic; 183 struct tty_port port; 184 int line; 185 int hw_version; 186 187 struct mgsl_icount icount; 188 189 int timeout; 190 int x_char; /* xon/xoff character */ 191 u16 read_status_mask; 192 u16 ignore_status_mask; 193 unsigned char *xmit_buf; 194 int xmit_head; 195 int xmit_tail; 196 int xmit_cnt; 197 198 wait_queue_head_t status_event_wait_q; 199 wait_queue_head_t event_wait_q; 200 struct timer_list tx_timer; /* HDLC transmit timeout timer */ 201 struct mgsl_struct *next_device; /* device list link */ 202 203 spinlock_t irq_spinlock; /* spinlock for synchronizing with ISR */ 204 struct work_struct task; /* task structure for scheduling bh */ 205 206 u32 EventMask; /* event trigger mask */ 207 u32 RecordedEvents; /* pending events */ 208 209 u32 max_frame_size; /* as set by device config */ 210 211 u32 pending_bh; 212 213 bool bh_running; /* Protection from multiple */ 214 int isr_overflow; 215 bool bh_requested; 216 217 int dcd_chkcount; /* check counts to prevent */ 218 int cts_chkcount; /* too many IRQs if a signal */ 219 int dsr_chkcount; /* is floating */ 220 int ri_chkcount; 221 222 char *buffer_list; /* virtual address of Rx & Tx buffer lists */ 223 u32 buffer_list_phys; 224 dma_addr_t buffer_list_dma_addr; 225 226 unsigned int rx_buffer_count; /* count of total allocated Rx buffers */ 227 DMABUFFERENTRY *rx_buffer_list; /* list of receive buffer entries */ 228 unsigned int current_rx_buffer; 229 230 int num_tx_dma_buffers; /* number of tx dma frames required */ 231 int tx_dma_buffers_used; 232 unsigned int tx_buffer_count; /* count of total allocated Tx buffers */ 233 DMABUFFERENTRY *tx_buffer_list; /* list of transmit buffer entries */ 234 int start_tx_dma_buffer; /* tx dma buffer to start tx dma operation */ 235 int current_tx_buffer; /* next tx dma buffer to be loaded */ 236 237 unsigned char *intermediate_rxbuffer; 238 239 int num_tx_holding_buffers; /* number of tx holding buffer allocated */ 240 int get_tx_holding_index; /* next tx holding buffer for adapter to load */ 241 int put_tx_holding_index; /* next tx holding buffer to store user request */ 242 int tx_holding_count; /* number of tx holding buffers waiting */ 243 struct tx_holding_buffer tx_holding_buffers[MAX_TX_HOLDING_BUFFERS]; 244 245 bool rx_enabled; 246 bool rx_overflow; 247 bool rx_rcc_underrun; 248 249 bool tx_enabled; 250 bool tx_active; 251 u32 idle_mode; 252 253 u16 cmr_value; 254 u16 tcsr_value; 255 256 char device_name[25]; /* device instance name */ 257 258 unsigned int bus_type; /* expansion bus type (ISA,EISA,PCI) */ 259 unsigned char bus; /* expansion bus number (zero based) */ 260 unsigned char function; /* PCI device number */ 261 262 unsigned int io_base; /* base I/O address of adapter */ 263 unsigned int io_addr_size; /* size of the I/O address range */ 264 bool io_addr_requested; /* true if I/O address requested */ 265 266 unsigned int irq_level; /* interrupt level */ 267 unsigned long irq_flags; 268 bool irq_requested; /* true if IRQ requested */ 269 270 unsigned int dma_level; /* DMA channel */ 271 bool dma_requested; /* true if dma channel requested */ 272 273 u16 mbre_bit; 274 u16 loopback_bits; 275 u16 usc_idle_mode; 276 277 MGSL_PARAMS params; /* communications parameters */ 278 279 unsigned char serial_signals; /* current serial signal states */ 280 281 bool irq_occurred; /* for diagnostics use */ 282 unsigned int init_error; /* Initialization startup error (DIAGS) */ 283 int fDiagnosticsmode; /* Driver in Diagnostic mode? (DIAGS) */ 284 285 u32 last_mem_alloc; 286 unsigned char* memory_base; /* shared memory address (PCI only) */ 287 u32 phys_memory_base; 288 bool shared_mem_requested; 289 290 unsigned char* lcr_base; /* local config registers (PCI only) */ 291 u32 phys_lcr_base; 292 u32 lcr_offset; 293 bool lcr_mem_requested; 294 295 u32 misc_ctrl_value; 296 char flag_buf[MAX_ASYNC_BUFFER_SIZE]; 297 char char_buf[MAX_ASYNC_BUFFER_SIZE]; 298 bool drop_rts_on_tx_done; 299 300 bool loopmode_insert_requested; 301 bool loopmode_send_done_requested; 302 303 struct _input_signal_events input_signal_events; 304 305 /* generic HDLC device parts */ 306 int netcount; 307 spinlock_t netlock; 308 309#if SYNCLINK_GENERIC_HDLC 310 struct net_device *netdev; 311#endif 312}; 313 314#define MGSL_MAGIC 0x5401 315 316/* 317 * The size of the serial xmit buffer is 1 page, or 4096 bytes 318 */ 319#ifndef SERIAL_XMIT_SIZE 320#define SERIAL_XMIT_SIZE 4096 321#endif 322 323/* 324 * These macros define the offsets used in calculating the 325 * I/O address of the specified USC registers. 326 */ 327 328 329#define DCPIN 2 /* Bit 1 of I/O address */ 330#define SDPIN 4 /* Bit 2 of I/O address */ 331 332#define DCAR 0 /* DMA command/address register */ 333#define CCAR SDPIN /* channel command/address register */ 334#define DATAREG DCPIN + SDPIN /* serial data register */ 335#define MSBONLY 0x41 336#define LSBONLY 0x40 337 338/* 339 * These macros define the register address (ordinal number) 340 * used for writing address/value pairs to the USC. 341 */ 342 343#define CMR 0x02 /* Channel mode Register */ 344#define CCSR 0x04 /* Channel Command/status Register */ 345#define CCR 0x06 /* Channel Control Register */ 346#define PSR 0x08 /* Port status Register */ 347#define PCR 0x0a /* Port Control Register */ 348#define TMDR 0x0c /* Test mode Data Register */ 349#define TMCR 0x0e /* Test mode Control Register */ 350#define CMCR 0x10 /* Clock mode Control Register */ 351#define HCR 0x12 /* Hardware Configuration Register */ 352#define IVR 0x14 /* Interrupt Vector Register */ 353#define IOCR 0x16 /* Input/Output Control Register */ 354#define ICR 0x18 /* Interrupt Control Register */ 355#define DCCR 0x1a /* Daisy Chain Control Register */ 356#define MISR 0x1c /* Misc Interrupt status Register */ 357#define SICR 0x1e /* status Interrupt Control Register */ 358#define RDR 0x20 /* Receive Data Register */ 359#define RMR 0x22 /* Receive mode Register */ 360#define RCSR 0x24 /* Receive Command/status Register */ 361#define RICR 0x26 /* Receive Interrupt Control Register */ 362#define RSR 0x28 /* Receive Sync Register */ 363#define RCLR 0x2a /* Receive count Limit Register */ 364#define RCCR 0x2c /* Receive Character count Register */ 365#define TC0R 0x2e /* Time Constant 0 Register */ 366#define TDR 0x30 /* Transmit Data Register */ 367#define TMR 0x32 /* Transmit mode Register */ 368#define TCSR 0x34 /* Transmit Command/status Register */ 369#define TICR 0x36 /* Transmit Interrupt Control Register */ 370#define TSR 0x38 /* Transmit Sync Register */ 371#define TCLR 0x3a /* Transmit count Limit Register */ 372#define TCCR 0x3c /* Transmit Character count Register */ 373#define TC1R 0x3e /* Time Constant 1 Register */ 374 375 376/* 377 * MACRO DEFINITIONS FOR DMA REGISTERS 378 */ 379 380#define DCR 0x06 /* DMA Control Register (shared) */ 381#define DACR 0x08 /* DMA Array count Register (shared) */ 382#define BDCR 0x12 /* Burst/Dwell Control Register (shared) */ 383#define DIVR 0x14 /* DMA Interrupt Vector Register (shared) */ 384#define DICR 0x18 /* DMA Interrupt Control Register (shared) */ 385#define CDIR 0x1a /* Clear DMA Interrupt Register (shared) */ 386#define SDIR 0x1c /* Set DMA Interrupt Register (shared) */ 387 388#define TDMR 0x02 /* Transmit DMA mode Register */ 389#define TDIAR 0x1e /* Transmit DMA Interrupt Arm Register */ 390#define TBCR 0x2a /* Transmit Byte count Register */ 391#define TARL 0x2c /* Transmit Address Register (low) */ 392#define TARU 0x2e /* Transmit Address Register (high) */ 393#define NTBCR 0x3a /* Next Transmit Byte count Register */ 394#define NTARL 0x3c /* Next Transmit Address Register (low) */ 395#define NTARU 0x3e /* Next Transmit Address Register (high) */ 396 397#define RDMR 0x82 /* Receive DMA mode Register (non-shared) */ 398#define RDIAR 0x9e /* Receive DMA Interrupt Arm Register */ 399#define RBCR 0xaa /* Receive Byte count Register */ 400#define RARL 0xac /* Receive Address Register (low) */ 401#define RARU 0xae /* Receive Address Register (high) */ 402#define NRBCR 0xba /* Next Receive Byte count Register */ 403#define NRARL 0xbc /* Next Receive Address Register (low) */ 404#define NRARU 0xbe /* Next Receive Address Register (high) */ 405 406 407/* 408 * MACRO DEFINITIONS FOR MODEM STATUS BITS 409 */ 410 411#define MODEMSTATUS_DTR 0x80 412#define MODEMSTATUS_DSR 0x40 413#define MODEMSTATUS_RTS 0x20 414#define MODEMSTATUS_CTS 0x10 415#define MODEMSTATUS_RI 0x04 416#define MODEMSTATUS_DCD 0x01 417 418 419/* 420 * Channel Command/Address Register (CCAR) Command Codes 421 */ 422 423#define RTCmd_Null 0x0000 424#define RTCmd_ResetHighestIus 0x1000 425#define RTCmd_TriggerChannelLoadDma 0x2000 426#define RTCmd_TriggerRxDma 0x2800 427#define RTCmd_TriggerTxDma 0x3000 428#define RTCmd_TriggerRxAndTxDma 0x3800 429#define RTCmd_PurgeRxFifo 0x4800 430#define RTCmd_PurgeTxFifo 0x5000 431#define RTCmd_PurgeRxAndTxFifo 0x5800 432#define RTCmd_LoadRcc 0x6800 433#define RTCmd_LoadTcc 0x7000 434#define RTCmd_LoadRccAndTcc 0x7800 435#define RTCmd_LoadTC0 0x8800 436#define RTCmd_LoadTC1 0x9000 437#define RTCmd_LoadTC0AndTC1 0x9800 438#define RTCmd_SerialDataLSBFirst 0xa000 439#define RTCmd_SerialDataMSBFirst 0xa800 440#define RTCmd_SelectBigEndian 0xb000 441#define RTCmd_SelectLittleEndian 0xb800 442 443 444/* 445 * DMA Command/Address Register (DCAR) Command Codes 446 */ 447 448#define DmaCmd_Null 0x0000 449#define DmaCmd_ResetTxChannel 0x1000 450#define DmaCmd_ResetRxChannel 0x1200 451#define DmaCmd_StartTxChannel 0x2000 452#define DmaCmd_StartRxChannel 0x2200 453#define DmaCmd_ContinueTxChannel 0x3000 454#define DmaCmd_ContinueRxChannel 0x3200 455#define DmaCmd_PauseTxChannel 0x4000 456#define DmaCmd_PauseRxChannel 0x4200 457#define DmaCmd_AbortTxChannel 0x5000 458#define DmaCmd_AbortRxChannel 0x5200 459#define DmaCmd_InitTxChannel 0x7000 460#define DmaCmd_InitRxChannel 0x7200 461#define DmaCmd_ResetHighestDmaIus 0x8000 462#define DmaCmd_ResetAllChannels 0x9000 463#define DmaCmd_StartAllChannels 0xa000 464#define DmaCmd_ContinueAllChannels 0xb000 465#define DmaCmd_PauseAllChannels 0xc000 466#define DmaCmd_AbortAllChannels 0xd000 467#define DmaCmd_InitAllChannels 0xf000 468 469#define TCmd_Null 0x0000 470#define TCmd_ClearTxCRC 0x2000 471#define TCmd_SelectTicrTtsaData 0x4000 472#define TCmd_SelectTicrTxFifostatus 0x5000 473#define TCmd_SelectTicrIntLevel 0x6000 474#define TCmd_SelectTicrdma_level 0x7000 475#define TCmd_SendFrame 0x8000 476#define TCmd_SendAbort 0x9000 477#define TCmd_EnableDleInsertion 0xc000 478#define TCmd_DisableDleInsertion 0xd000 479#define TCmd_ClearEofEom 0xe000 480#define TCmd_SetEofEom 0xf000 481 482#define RCmd_Null 0x0000 483#define RCmd_ClearRxCRC 0x2000 484#define RCmd_EnterHuntmode 0x3000 485#define RCmd_SelectRicrRtsaData 0x4000 486#define RCmd_SelectRicrRxFifostatus 0x5000 487#define RCmd_SelectRicrIntLevel 0x6000 488#define RCmd_SelectRicrdma_level 0x7000 489 490/* 491 * Bits for enabling and disabling IRQs in Interrupt Control Register (ICR) 492 */ 493 494#define RECEIVE_STATUS BIT5 495#define RECEIVE_DATA BIT4 496#define TRANSMIT_STATUS BIT3 497#define TRANSMIT_DATA BIT2 498#define IO_PIN BIT1 499#define MISC BIT0 500 501 502/* 503 * Receive status Bits in Receive Command/status Register RCSR 504 */ 505 506#define RXSTATUS_SHORT_FRAME BIT8 507#define RXSTATUS_CODE_VIOLATION BIT8 508#define RXSTATUS_EXITED_HUNT BIT7 509#define RXSTATUS_IDLE_RECEIVED BIT6 510#define RXSTATUS_BREAK_RECEIVED BIT5 511#define RXSTATUS_ABORT_RECEIVED BIT5 512#define RXSTATUS_RXBOUND BIT4 513#define RXSTATUS_CRC_ERROR BIT3 514#define RXSTATUS_FRAMING_ERROR BIT3 515#define RXSTATUS_ABORT BIT2 516#define RXSTATUS_PARITY_ERROR BIT2 517#define RXSTATUS_OVERRUN BIT1 518#define RXSTATUS_DATA_AVAILABLE BIT0 519#define RXSTATUS_ALL 0x01f6 520#define usc_UnlatchRxstatusBits(a,b) usc_OutReg( (a), RCSR, (u16)((b) & RXSTATUS_ALL) ) 521 522/* 523 * Values for setting transmit idle mode in 524 * Transmit Control/status Register (TCSR) 525 */ 526#define IDLEMODE_FLAGS 0x0000 527#define IDLEMODE_ALT_ONE_ZERO 0x0100 528#define IDLEMODE_ZERO 0x0200 529#define IDLEMODE_ONE 0x0300 530#define IDLEMODE_ALT_MARK_SPACE 0x0500 531#define IDLEMODE_SPACE 0x0600 532#define IDLEMODE_MARK 0x0700 533#define IDLEMODE_MASK 0x0700 534 535/* 536 * IUSC revision identifiers 537 */ 538#define IUSC_SL1660 0x4d44 539#define IUSC_PRE_SL1660 0x4553 540 541/* 542 * Transmit status Bits in Transmit Command/status Register (TCSR) 543 */ 544 545#define TCSR_PRESERVE 0x0F00 546 547#define TCSR_UNDERWAIT BIT11 548#define TXSTATUS_PREAMBLE_SENT BIT7 549#define TXSTATUS_IDLE_SENT BIT6 550#define TXSTATUS_ABORT_SENT BIT5 551#define TXSTATUS_EOF_SENT BIT4 552#define TXSTATUS_EOM_SENT BIT4 553#define TXSTATUS_CRC_SENT BIT3 554#define TXSTATUS_ALL_SENT BIT2 555#define TXSTATUS_UNDERRUN BIT1 556#define TXSTATUS_FIFO_EMPTY BIT0 557#define TXSTATUS_ALL 0x00fa 558#define usc_UnlatchTxstatusBits(a,b) usc_OutReg( (a), TCSR, (u16)((a)->tcsr_value + ((b) & 0x00FF)) ) 559 560 561#define MISCSTATUS_RXC_LATCHED BIT15 562#define MISCSTATUS_RXC BIT14 563#define MISCSTATUS_TXC_LATCHED BIT13 564#define MISCSTATUS_TXC BIT12 565#define MISCSTATUS_RI_LATCHED BIT11 566#define MISCSTATUS_RI BIT10 567#define MISCSTATUS_DSR_LATCHED BIT9 568#define MISCSTATUS_DSR BIT8 569#define MISCSTATUS_DCD_LATCHED BIT7 570#define MISCSTATUS_DCD BIT6 571#define MISCSTATUS_CTS_LATCHED BIT5 572#define MISCSTATUS_CTS BIT4 573#define MISCSTATUS_RCC_UNDERRUN BIT3 574#define MISCSTATUS_DPLL_NO_SYNC BIT2 575#define MISCSTATUS_BRG1_ZERO BIT1 576#define MISCSTATUS_BRG0_ZERO BIT0 577 578#define usc_UnlatchIostatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0xaaa0)) 579#define usc_UnlatchMiscstatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0x000f)) 580 581#define SICR_RXC_ACTIVE BIT15 582#define SICR_RXC_INACTIVE BIT14 583#define SICR_RXC (BIT15+BIT14) 584#define SICR_TXC_ACTIVE BIT13 585#define SICR_TXC_INACTIVE BIT12 586#define SICR_TXC (BIT13+BIT12) 587#define SICR_RI_ACTIVE BIT11 588#define SICR_RI_INACTIVE BIT10 589#define SICR_RI (BIT11+BIT10) 590#define SICR_DSR_ACTIVE BIT9 591#define SICR_DSR_INACTIVE BIT8 592#define SICR_DSR (BIT9+BIT8) 593#define SICR_DCD_ACTIVE BIT7 594#define SICR_DCD_INACTIVE BIT6 595#define SICR_DCD (BIT7+BIT6) 596#define SICR_CTS_ACTIVE BIT5 597#define SICR_CTS_INACTIVE BIT4 598#define SICR_CTS (BIT5+BIT4) 599#define SICR_RCC_UNDERFLOW BIT3 600#define SICR_DPLL_NO_SYNC BIT2 601#define SICR_BRG1_ZERO BIT1 602#define SICR_BRG0_ZERO BIT0 603 604void usc_DisableMasterIrqBit( struct mgsl_struct *info ); 605void usc_EnableMasterIrqBit( struct mgsl_struct *info ); 606void usc_EnableInterrupts( struct mgsl_struct *info, u16 IrqMask ); 607void usc_DisableInterrupts( struct mgsl_struct *info, u16 IrqMask ); 608void usc_ClearIrqPendingBits( struct mgsl_struct *info, u16 IrqMask ); 609 610#define usc_EnableInterrupts( a, b ) \ 611 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0xc0 + (b)) ) 612 613#define usc_DisableInterrupts( a, b ) \ 614 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0x80 + (b)) ) 615 616#define usc_EnableMasterIrqBit(a) \ 617 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0x0f00) + 0xb000) ) 618 619#define usc_DisableMasterIrqBit(a) \ 620 usc_OutReg( (a), ICR, (u16)(usc_InReg((a),ICR) & 0x7f00) ) 621 622#define usc_ClearIrqPendingBits( a, b ) usc_OutReg( (a), DCCR, 0x40 + (b) ) 623 624/* 625 * Transmit status Bits in Transmit Control status Register (TCSR) 626 * and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) 627 */ 628 629#define TXSTATUS_PREAMBLE_SENT BIT7 630#define TXSTATUS_IDLE_SENT BIT6 631#define TXSTATUS_ABORT_SENT BIT5 632#define TXSTATUS_EOF BIT4 633#define TXSTATUS_CRC_SENT BIT3 634#define TXSTATUS_ALL_SENT BIT2 635#define TXSTATUS_UNDERRUN BIT1 636#define TXSTATUS_FIFO_EMPTY BIT0 637 638#define DICR_MASTER BIT15 639#define DICR_TRANSMIT BIT0 640#define DICR_RECEIVE BIT1 641 642#define usc_EnableDmaInterrupts(a,b) \ 643 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) | (b)) ) 644 645#define usc_DisableDmaInterrupts(a,b) \ 646 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) & ~(b)) ) 647 648#define usc_EnableStatusIrqs(a,b) \ 649 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) | (b)) ) 650 651#define usc_DisablestatusIrqs(a,b) \ 652 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) & ~(b)) ) 653 654/* Transmit status Bits in Transmit Control status Register (TCSR) */ 655/* and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) */ 656 657 658#define DISABLE_UNCONDITIONAL 0 659#define DISABLE_END_OF_FRAME 1 660#define ENABLE_UNCONDITIONAL 2 661#define ENABLE_AUTO_CTS 3 662#define ENABLE_AUTO_DCD 3 663#define usc_EnableTransmitter(a,b) \ 664 usc_OutReg( (a), TMR, (u16)((usc_InReg((a),TMR) & 0xfffc) | (b)) ) 665#define usc_EnableReceiver(a,b) \ 666 usc_OutReg( (a), RMR, (u16)((usc_InReg((a),RMR) & 0xfffc) | (b)) ) 667 668static u16 usc_InDmaReg( struct mgsl_struct *info, u16 Port ); 669static void usc_OutDmaReg( struct mgsl_struct *info, u16 Port, u16 Value ); 670static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd ); 671 672static u16 usc_InReg( struct mgsl_struct *info, u16 Port ); 673static void usc_OutReg( struct mgsl_struct *info, u16 Port, u16 Value ); 674static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd ); 675void usc_RCmd( struct mgsl_struct *info, u16 Cmd ); 676void usc_TCmd( struct mgsl_struct *info, u16 Cmd ); 677 678#define usc_TCmd(a,b) usc_OutReg((a), TCSR, (u16)((a)->tcsr_value + (b))) 679#define usc_RCmd(a,b) usc_OutReg((a), RCSR, (b)) 680 681#define usc_SetTransmitSyncChars(a,s0,s1) usc_OutReg((a), TSR, (u16)(((u16)s0<<8)|(u16)s1)) 682 683static void usc_process_rxoverrun_sync( struct mgsl_struct *info ); 684static void usc_start_receiver( struct mgsl_struct *info ); 685static void usc_stop_receiver( struct mgsl_struct *info ); 686 687static void usc_start_transmitter( struct mgsl_struct *info ); 688static void usc_stop_transmitter( struct mgsl_struct *info ); 689static void usc_set_txidle( struct mgsl_struct *info ); 690static void usc_load_txfifo( struct mgsl_struct *info ); 691 692static void usc_enable_aux_clock( struct mgsl_struct *info, u32 DataRate ); 693static void usc_enable_loopback( struct mgsl_struct *info, int enable ); 694 695static void usc_get_serial_signals( struct mgsl_struct *info ); 696static void usc_set_serial_signals( struct mgsl_struct *info ); 697 698static void usc_reset( struct mgsl_struct *info ); 699 700static void usc_set_sync_mode( struct mgsl_struct *info ); 701static void usc_set_sdlc_mode( struct mgsl_struct *info ); 702static void usc_set_async_mode( struct mgsl_struct *info ); 703static void usc_enable_async_clock( struct mgsl_struct *info, u32 DataRate ); 704 705static void usc_loopback_frame( struct mgsl_struct *info ); 706 707static void mgsl_tx_timeout(unsigned long context); 708 709 710static void usc_loopmode_cancel_transmit( struct mgsl_struct * info ); 711static void usc_loopmode_insert_request( struct mgsl_struct * info ); 712static int usc_loopmode_active( struct mgsl_struct * info); 713static void usc_loopmode_send_done( struct mgsl_struct * info ); 714 715static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg); 716 717#if SYNCLINK_GENERIC_HDLC 718#define dev_to_port(D) (dev_to_hdlc(D)->priv) 719static void hdlcdev_tx_done(struct mgsl_struct *info); 720static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size); 721static int hdlcdev_init(struct mgsl_struct *info); 722static void hdlcdev_exit(struct mgsl_struct *info); 723#endif 724 725/* 726 * Defines a BUS descriptor value for the PCI adapter 727 * local bus address ranges. 728 */ 729 730#define BUS_DESCRIPTOR( WrHold, WrDly, RdDly, Nwdd, Nwad, Nxda, Nrdd, Nrad ) \ 731(0x00400020 + \ 732((WrHold) << 30) + \ 733((WrDly) << 28) + \ 734((RdDly) << 26) + \ 735((Nwdd) << 20) + \ 736((Nwad) << 15) + \ 737((Nxda) << 13) + \ 738((Nrdd) << 11) + \ 739((Nrad) << 6) ) 740 741static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit); 742 743/* 744 * Adapter diagnostic routines 745 */ 746static bool mgsl_register_test( struct mgsl_struct *info ); 747static bool mgsl_irq_test( struct mgsl_struct *info ); 748static bool mgsl_dma_test( struct mgsl_struct *info ); 749static bool mgsl_memory_test( struct mgsl_struct *info ); 750static int mgsl_adapter_test( struct mgsl_struct *info ); 751 752/* 753 * device and resource management routines 754 */ 755static int mgsl_claim_resources(struct mgsl_struct *info); 756static void mgsl_release_resources(struct mgsl_struct *info); 757static void mgsl_add_device(struct mgsl_struct *info); 758static struct mgsl_struct* mgsl_allocate_device(void); 759 760/* 761 * DMA buffer manupulation functions. 762 */ 763static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex ); 764static bool mgsl_get_rx_frame( struct mgsl_struct *info ); 765static bool mgsl_get_raw_rx_frame( struct mgsl_struct *info ); 766static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info ); 767static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info ); 768static int num_free_tx_dma_buffers(struct mgsl_struct *info); 769static void mgsl_load_tx_dma_buffer( struct mgsl_struct *info, const char *Buffer, unsigned int BufferSize); 770static void mgsl_load_pci_memory(char* TargetPtr, const char* SourcePtr, unsigned short count); 771 772/* 773 * DMA and Shared Memory buffer allocation and formatting 774 */ 775static int mgsl_allocate_dma_buffers(struct mgsl_struct *info); 776static void mgsl_free_dma_buffers(struct mgsl_struct *info); 777static int mgsl_alloc_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount); 778static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount); 779static int mgsl_alloc_buffer_list_memory(struct mgsl_struct *info); 780static void mgsl_free_buffer_list_memory(struct mgsl_struct *info); 781static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info); 782static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info); 783static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info); 784static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info); 785static bool load_next_tx_holding_buffer(struct mgsl_struct *info); 786static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize); 787 788/* 789 * Bottom half interrupt handlers 790 */ 791static void mgsl_bh_handler(struct work_struct *work); 792static void mgsl_bh_receive(struct mgsl_struct *info); 793static void mgsl_bh_transmit(struct mgsl_struct *info); 794static void mgsl_bh_status(struct mgsl_struct *info); 795 796/* 797 * Interrupt handler routines and dispatch table. 798 */ 799static void mgsl_isr_null( struct mgsl_struct *info ); 800static void mgsl_isr_transmit_data( struct mgsl_struct *info ); 801static void mgsl_isr_receive_data( struct mgsl_struct *info ); 802static void mgsl_isr_receive_status( struct mgsl_struct *info ); 803static void mgsl_isr_transmit_status( struct mgsl_struct *info ); 804static void mgsl_isr_io_pin( struct mgsl_struct *info ); 805static void mgsl_isr_misc( struct mgsl_struct *info ); 806static void mgsl_isr_receive_dma( struct mgsl_struct *info ); 807static void mgsl_isr_transmit_dma( struct mgsl_struct *info ); 808 809typedef void (*isr_dispatch_func)(struct mgsl_struct *); 810 811static isr_dispatch_func UscIsrTable[7] = 812{ 813 mgsl_isr_null, 814 mgsl_isr_misc, 815 mgsl_isr_io_pin, 816 mgsl_isr_transmit_data, 817 mgsl_isr_transmit_status, 818 mgsl_isr_receive_data, 819 mgsl_isr_receive_status 820}; 821 822/* 823 * ioctl call handlers 824 */ 825static int tiocmget(struct tty_struct *tty, struct file *file); 826static int tiocmset(struct tty_struct *tty, struct file *file, 827 unsigned int set, unsigned int clear); 828static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount 829 __user *user_icount); 830static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params); 831static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params); 832static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode); 833static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode); 834static int mgsl_txenable(struct mgsl_struct * info, int enable); 835static int mgsl_txabort(struct mgsl_struct * info); 836static int mgsl_rxenable(struct mgsl_struct * info, int enable); 837static int mgsl_wait_event(struct mgsl_struct * info, int __user *mask); 838static int mgsl_loopmode_send_done( struct mgsl_struct * info ); 839 840/* set non-zero on successful registration with PCI subsystem */ 841static bool pci_registered; 842 843/* 844 * Global linked list of SyncLink devices 845 */ 846static struct mgsl_struct *mgsl_device_list; 847static int mgsl_device_count; 848 849/* 850 * Set this param to non-zero to load eax with the 851 * .text section address and breakpoint on module load. 852 * This is useful for use with gdb and add-symbol-file command. 853 */ 854static int break_on_load; 855 856/* 857 * Driver major number, defaults to zero to get auto 858 * assigned major number. May be forced as module parameter. 859 */ 860static int ttymajor; 861 862/* 863 * Array of user specified options for ISA adapters. 864 */ 865static int io[MAX_ISA_DEVICES]; 866static int irq[MAX_ISA_DEVICES]; 867static int dma[MAX_ISA_DEVICES]; 868static int debug_level; 869static int maxframe[MAX_TOTAL_DEVICES]; 870static int txdmabufs[MAX_TOTAL_DEVICES]; 871static int txholdbufs[MAX_TOTAL_DEVICES]; 872 873module_param(break_on_load, bool, 0); 874module_param(ttymajor, int, 0); 875module_param_array(io, int, NULL, 0); 876module_param_array(irq, int, NULL, 0); 877module_param_array(dma, int, NULL, 0); 878module_param(debug_level, int, 0); 879module_param_array(maxframe, int, NULL, 0); 880module_param_array(txdmabufs, int, NULL, 0); 881module_param_array(txholdbufs, int, NULL, 0); 882 883static char *driver_name = "SyncLink serial driver"; 884static char *driver_version = "$Revision: 4.38 $"; 885 886static int synclink_init_one (struct pci_dev *dev, 887 const struct pci_device_id *ent); 888static void synclink_remove_one (struct pci_dev *dev); 889 890static struct pci_device_id synclink_pci_tbl[] = { 891 { PCI_VENDOR_ID_MICROGATE, PCI_DEVICE_ID_MICROGATE_USC, PCI_ANY_ID, PCI_ANY_ID, }, 892 { PCI_VENDOR_ID_MICROGATE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, }, 893 { 0, }, /* terminate list */ 894}; 895MODULE_DEVICE_TABLE(pci, synclink_pci_tbl); 896 897MODULE_LICENSE("GPL"); 898 899static struct pci_driver synclink_pci_driver = { 900 .name = "synclink", 901 .id_table = synclink_pci_tbl, 902 .probe = synclink_init_one, 903 .remove = __devexit_p(synclink_remove_one), 904}; 905 906static struct tty_driver *serial_driver; 907 908/* number of characters left in xmit buffer before we ask for more */ 909#define WAKEUP_CHARS 256 910 911 912static void mgsl_change_params(struct mgsl_struct *info); 913static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout); 914 915/* 916 * 1st function defined in .text section. Calling this function in 917 * init_module() followed by a breakpoint allows a remote debugger 918 * (gdb) to get the .text address for the add-symbol-file command. 919 * This allows remote debugging of dynamically loadable modules. 920 */ 921static void* mgsl_get_text_ptr(void) 922{ 923 return mgsl_get_text_ptr; 924} 925 926static inline int mgsl_paranoia_check(struct mgsl_struct *info, 927 char *name, const char *routine) 928{ 929#ifdef MGSL_PARANOIA_CHECK 930 static const char *badmagic = 931 "Warning: bad magic number for mgsl struct (%s) in %s\n"; 932 static const char *badinfo = 933 "Warning: null mgsl_struct for (%s) in %s\n"; 934 935 if (!info) { 936 printk(badinfo, name, routine); 937 return 1; 938 } 939 if (info->magic != MGSL_MAGIC) { 940 printk(badmagic, name, routine); 941 return 1; 942 } 943#else 944 if (!info) 945 return 1; 946#endif 947 return 0; 948} 949 950/** 951 * line discipline callback wrappers 952 * 953 * The wrappers maintain line discipline references 954 * while calling into the line discipline. 955 * 956 * ldisc_receive_buf - pass receive data to line discipline 957 */ 958 959static void ldisc_receive_buf(struct tty_struct *tty, 960 const __u8 *data, char *flags, int count) 961{ 962 struct tty_ldisc *ld; 963 if (!tty) 964 return; 965 ld = tty_ldisc_ref(tty); 966 if (ld) { 967 if (ld->ops->receive_buf) 968 ld->ops->receive_buf(tty, data, flags, count); 969 tty_ldisc_deref(ld); 970 } 971} 972 973/* mgsl_stop() throttle (stop) transmitter 974 * 975 * Arguments: tty pointer to tty info structure 976 * Return Value: None 977 */ 978static void mgsl_stop(struct tty_struct *tty) 979{ 980 struct mgsl_struct *info = tty->driver_data; 981 unsigned long flags; 982 983 if (mgsl_paranoia_check(info, tty->name, "mgsl_stop")) 984 return; 985 986 if ( debug_level >= DEBUG_LEVEL_INFO ) 987 printk("mgsl_stop(%s)\n",info->device_name); 988 989 spin_lock_irqsave(&info->irq_spinlock,flags); 990 if (info->tx_enabled) 991 usc_stop_transmitter(info); 992 spin_unlock_irqrestore(&info->irq_spinlock,flags); 993 994} /* end of mgsl_stop() */ 995 996/* mgsl_start() release (start) transmitter 997 * 998 * Arguments: tty pointer to tty info structure 999 * Return Value: None 1000 */ 1001static void mgsl_start(struct tty_struct *tty) 1002{ 1003 struct mgsl_struct *info = tty->driver_data; 1004 unsigned long flags; 1005 1006 if (mgsl_paranoia_check(info, tty->name, "mgsl_start")) 1007 return; 1008 1009 if ( debug_level >= DEBUG_LEVEL_INFO ) 1010 printk("mgsl_start(%s)\n",info->device_name); 1011 1012 spin_lock_irqsave(&info->irq_spinlock,flags); 1013 if (!info->tx_enabled) 1014 usc_start_transmitter(info); 1015 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1016 1017} /* end of mgsl_start() */ 1018 1019/* 1020 * Bottom half work queue access functions 1021 */ 1022 1023/* mgsl_bh_action() Return next bottom half action to perform. 1024 * Return Value: BH action code or 0 if nothing to do. 1025 */ 1026static int mgsl_bh_action(struct mgsl_struct *info) 1027{ 1028 unsigned long flags; 1029 int rc = 0; 1030 1031 spin_lock_irqsave(&info->irq_spinlock,flags); 1032 1033 if (info->pending_bh & BH_RECEIVE) { 1034 info->pending_bh &= ~BH_RECEIVE; 1035 rc = BH_RECEIVE; 1036 } else if (info->pending_bh & BH_TRANSMIT) { 1037 info->pending_bh &= ~BH_TRANSMIT; 1038 rc = BH_TRANSMIT; 1039 } else if (info->pending_bh & BH_STATUS) { 1040 info->pending_bh &= ~BH_STATUS; 1041 rc = BH_STATUS; 1042 } 1043 1044 if (!rc) { 1045 /* Mark BH routine as complete */ 1046 info->bh_running = false; 1047 info->bh_requested = false; 1048 } 1049 1050 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1051 1052 return rc; 1053} 1054 1055/* 1056 * Perform bottom half processing of work items queued by ISR. 1057 */ 1058static void mgsl_bh_handler(struct work_struct *work) 1059{ 1060 struct mgsl_struct *info = 1061 container_of(work, struct mgsl_struct, task); 1062 int action; 1063 1064 if (!info) 1065 return; 1066 1067 if ( debug_level >= DEBUG_LEVEL_BH ) 1068 printk( "%s(%d):mgsl_bh_handler(%s) entry\n", 1069 __FILE__,__LINE__,info->device_name); 1070 1071 info->bh_running = true; 1072 1073 while((action = mgsl_bh_action(info)) != 0) { 1074 1075 /* Process work item */ 1076 if ( debug_level >= DEBUG_LEVEL_BH ) 1077 printk( "%s(%d):mgsl_bh_handler() work item action=%d\n", 1078 __FILE__,__LINE__,action); 1079 1080 switch (action) { 1081 1082 case BH_RECEIVE: 1083 mgsl_bh_receive(info); 1084 break; 1085 case BH_TRANSMIT: 1086 mgsl_bh_transmit(info); 1087 break; 1088 case BH_STATUS: 1089 mgsl_bh_status(info); 1090 break; 1091 default: 1092 /* unknown work item ID */ 1093 printk("Unknown work item ID=%08X!\n", action); 1094 break; 1095 } 1096 } 1097 1098 if ( debug_level >= DEBUG_LEVEL_BH ) 1099 printk( "%s(%d):mgsl_bh_handler(%s) exit\n", 1100 __FILE__,__LINE__,info->device_name); 1101} 1102 1103static void mgsl_bh_receive(struct mgsl_struct *info) 1104{ 1105 bool (*get_rx_frame)(struct mgsl_struct *info) = 1106 (info->params.mode == MGSL_MODE_HDLC ? mgsl_get_rx_frame : mgsl_get_raw_rx_frame); 1107 1108 if ( debug_level >= DEBUG_LEVEL_BH ) 1109 printk( "%s(%d):mgsl_bh_receive(%s)\n", 1110 __FILE__,__LINE__,info->device_name); 1111 1112 do 1113 { 1114 if (info->rx_rcc_underrun) { 1115 unsigned long flags; 1116 spin_lock_irqsave(&info->irq_spinlock,flags); 1117 usc_start_receiver(info); 1118 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1119 return; 1120 } 1121 } while(get_rx_frame(info)); 1122} 1123 1124static void mgsl_bh_transmit(struct mgsl_struct *info) 1125{ 1126 struct tty_struct *tty = info->port.tty; 1127 unsigned long flags; 1128 1129 if ( debug_level >= DEBUG_LEVEL_BH ) 1130 printk( "%s(%d):mgsl_bh_transmit() entry on %s\n", 1131 __FILE__,__LINE__,info->device_name); 1132 1133 if (tty) 1134 tty_wakeup(tty); 1135 1136 /* if transmitter idle and loopmode_send_done_requested 1137 * then start echoing RxD to TxD 1138 */ 1139 spin_lock_irqsave(&info->irq_spinlock,flags); 1140 if ( !info->tx_active && info->loopmode_send_done_requested ) 1141 usc_loopmode_send_done( info ); 1142 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1143} 1144 1145static void mgsl_bh_status(struct mgsl_struct *info) 1146{ 1147 if ( debug_level >= DEBUG_LEVEL_BH ) 1148 printk( "%s(%d):mgsl_bh_status() entry on %s\n", 1149 __FILE__,__LINE__,info->device_name); 1150 1151 info->ri_chkcount = 0; 1152 info->dsr_chkcount = 0; 1153 info->dcd_chkcount = 0; 1154 info->cts_chkcount = 0; 1155} 1156 1157/* mgsl_isr_receive_status() 1158 * 1159 * Service a receive status interrupt. The type of status 1160 * interrupt is indicated by the state of the RCSR. 1161 * This is only used for HDLC mode. 1162 * 1163 * Arguments: info pointer to device instance data 1164 * Return Value: None 1165 */ 1166static void mgsl_isr_receive_status( struct mgsl_struct *info ) 1167{ 1168 u16 status = usc_InReg( info, RCSR ); 1169 1170 if ( debug_level >= DEBUG_LEVEL_ISR ) 1171 printk("%s(%d):mgsl_isr_receive_status status=%04X\n", 1172 __FILE__,__LINE__,status); 1173 1174 if ( (status & RXSTATUS_ABORT_RECEIVED) && 1175 info->loopmode_insert_requested && 1176 usc_loopmode_active(info) ) 1177 { 1178 ++info->icount.rxabort; 1179 info->loopmode_insert_requested = false; 1180 1181 /* clear CMR:13 to start echoing RxD to TxD */ 1182 info->cmr_value &= ~BIT13; 1183 usc_OutReg(info, CMR, info->cmr_value); 1184 1185 /* disable received abort irq (no longer required) */ 1186 usc_OutReg(info, RICR, 1187 (usc_InReg(info, RICR) & ~RXSTATUS_ABORT_RECEIVED)); 1188 } 1189 1190 if (status & (RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED)) { 1191 if (status & RXSTATUS_EXITED_HUNT) 1192 info->icount.exithunt++; 1193 if (status & RXSTATUS_IDLE_RECEIVED) 1194 info->icount.rxidle++; 1195 wake_up_interruptible(&info->event_wait_q); 1196 } 1197 1198 if (status & RXSTATUS_OVERRUN){ 1199 info->icount.rxover++; 1200 usc_process_rxoverrun_sync( info ); 1201 } 1202 1203 usc_ClearIrqPendingBits( info, RECEIVE_STATUS ); 1204 usc_UnlatchRxstatusBits( info, status ); 1205 1206} /* end of mgsl_isr_receive_status() */ 1207 1208/* mgsl_isr_transmit_status() 1209 * 1210 * Service a transmit status interrupt 1211 * HDLC mode :end of transmit frame 1212 * Async mode:all data is sent 1213 * transmit status is indicated by bits in the TCSR. 1214 * 1215 * Arguments: info pointer to device instance data 1216 * Return Value: None 1217 */ 1218static void mgsl_isr_transmit_status( struct mgsl_struct *info ) 1219{ 1220 u16 status = usc_InReg( info, TCSR ); 1221 1222 if ( debug_level >= DEBUG_LEVEL_ISR ) 1223 printk("%s(%d):mgsl_isr_transmit_status status=%04X\n", 1224 __FILE__,__LINE__,status); 1225 1226 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS ); 1227 usc_UnlatchTxstatusBits( info, status ); 1228 1229 if ( status & (TXSTATUS_UNDERRUN | TXSTATUS_ABORT_SENT) ) 1230 { 1231 /* finished sending HDLC abort. This may leave */ 1232 /* the TxFifo with data from the aborted frame */ 1233 /* so purge the TxFifo. Also shutdown the DMA */ 1234 /* channel in case there is data remaining in */ 1235 /* the DMA buffer */ 1236 usc_DmaCmd( info, DmaCmd_ResetTxChannel ); 1237 usc_RTCmd( info, RTCmd_PurgeTxFifo ); 1238 } 1239 1240 if ( status & TXSTATUS_EOF_SENT ) 1241 info->icount.txok++; 1242 else if ( status & TXSTATUS_UNDERRUN ) 1243 info->icount.txunder++; 1244 else if ( status & TXSTATUS_ABORT_SENT ) 1245 info->icount.txabort++; 1246 else 1247 info->icount.txunder++; 1248 1249 info->tx_active = false; 1250 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; 1251 del_timer(&info->tx_timer); 1252 1253 if ( info->drop_rts_on_tx_done ) { 1254 usc_get_serial_signals( info ); 1255 if ( info->serial_signals & SerialSignal_RTS ) { 1256 info->serial_signals &= ~SerialSignal_RTS; 1257 usc_set_serial_signals( info ); 1258 } 1259 info->drop_rts_on_tx_done = false; 1260 } 1261 1262#if SYNCLINK_GENERIC_HDLC 1263 if (info->netcount) 1264 hdlcdev_tx_done(info); 1265 else 1266#endif 1267 { 1268 if (info->port.tty->stopped || info->port.tty->hw_stopped) { 1269 usc_stop_transmitter(info); 1270 return; 1271 } 1272 info->pending_bh |= BH_TRANSMIT; 1273 } 1274 1275} /* end of mgsl_isr_transmit_status() */ 1276 1277/* mgsl_isr_io_pin() 1278 * 1279 * Service an Input/Output pin interrupt. The type of 1280 * interrupt is indicated by bits in the MISR 1281 * 1282 * Arguments: info pointer to device instance data 1283 * Return Value: None 1284 */ 1285static void mgsl_isr_io_pin( struct mgsl_struct *info ) 1286{ 1287 struct mgsl_icount *icount; 1288 u16 status = usc_InReg( info, MISR ); 1289 1290 if ( debug_level >= DEBUG_LEVEL_ISR ) 1291 printk("%s(%d):mgsl_isr_io_pin status=%04X\n", 1292 __FILE__,__LINE__,status); 1293 1294 usc_ClearIrqPendingBits( info, IO_PIN ); 1295 usc_UnlatchIostatusBits( info, status ); 1296 1297 if (status & (MISCSTATUS_CTS_LATCHED | MISCSTATUS_DCD_LATCHED | 1298 MISCSTATUS_DSR_LATCHED | MISCSTATUS_RI_LATCHED) ) { 1299 icount = &info->icount; 1300 /* update input line counters */ 1301 if (status & MISCSTATUS_RI_LATCHED) { 1302 if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) 1303 usc_DisablestatusIrqs(info,SICR_RI); 1304 icount->rng++; 1305 if ( status & MISCSTATUS_RI ) 1306 info->input_signal_events.ri_up++; 1307 else 1308 info->input_signal_events.ri_down++; 1309 } 1310 if (status & MISCSTATUS_DSR_LATCHED) { 1311 if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) 1312 usc_DisablestatusIrqs(info,SICR_DSR); 1313 icount->dsr++; 1314 if ( status & MISCSTATUS_DSR ) 1315 info->input_signal_events.dsr_up++; 1316 else 1317 info->input_signal_events.dsr_down++; 1318 } 1319 if (status & MISCSTATUS_DCD_LATCHED) { 1320 if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) 1321 usc_DisablestatusIrqs(info,SICR_DCD); 1322 icount->dcd++; 1323 if (status & MISCSTATUS_DCD) { 1324 info->input_signal_events.dcd_up++; 1325 } else 1326 info->input_signal_events.dcd_down++; 1327#if SYNCLINK_GENERIC_HDLC 1328 if (info->netcount) { 1329 if (status & MISCSTATUS_DCD) 1330 netif_carrier_on(info->netdev); 1331 else 1332 netif_carrier_off(info->netdev); 1333 } 1334#endif 1335 } 1336 if (status & MISCSTATUS_CTS_LATCHED) 1337 { 1338 if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) 1339 usc_DisablestatusIrqs(info,SICR_CTS); 1340 icount->cts++; 1341 if ( status & MISCSTATUS_CTS ) 1342 info->input_signal_events.cts_up++; 1343 else 1344 info->input_signal_events.cts_down++; 1345 } 1346 wake_up_interruptible(&info->status_event_wait_q); 1347 wake_up_interruptible(&info->event_wait_q); 1348 1349 if ( (info->port.flags & ASYNC_CHECK_CD) && 1350 (status & MISCSTATUS_DCD_LATCHED) ) { 1351 if ( debug_level >= DEBUG_LEVEL_ISR ) 1352 printk("%s CD now %s...", info->device_name, 1353 (status & MISCSTATUS_DCD) ? "on" : "off"); 1354 if (status & MISCSTATUS_DCD) 1355 wake_up_interruptible(&info->port.open_wait); 1356 else { 1357 if ( debug_level >= DEBUG_LEVEL_ISR ) 1358 printk("doing serial hangup..."); 1359 if (info->port.tty) 1360 tty_hangup(info->port.tty); 1361 } 1362 } 1363 1364 if ( (info->port.flags & ASYNC_CTS_FLOW) && 1365 (status & MISCSTATUS_CTS_LATCHED) ) { 1366 if (info->port.tty->hw_stopped) { 1367 if (status & MISCSTATUS_CTS) { 1368 if ( debug_level >= DEBUG_LEVEL_ISR ) 1369 printk("CTS tx start..."); 1370 if (info->port.tty) 1371 info->port.tty->hw_stopped = 0; 1372 usc_start_transmitter(info); 1373 info->pending_bh |= BH_TRANSMIT; 1374 return; 1375 } 1376 } else { 1377 if (!(status & MISCSTATUS_CTS)) { 1378 if ( debug_level >= DEBUG_LEVEL_ISR ) 1379 printk("CTS tx stop..."); 1380 if (info->port.tty) 1381 info->port.tty->hw_stopped = 1; 1382 usc_stop_transmitter(info); 1383 } 1384 } 1385 } 1386 } 1387 1388 info->pending_bh |= BH_STATUS; 1389 1390 /* for diagnostics set IRQ flag */ 1391 if ( status & MISCSTATUS_TXC_LATCHED ){ 1392 usc_OutReg( info, SICR, 1393 (unsigned short)(usc_InReg(info,SICR) & ~(SICR_TXC_ACTIVE+SICR_TXC_INACTIVE)) ); 1394 usc_UnlatchIostatusBits( info, MISCSTATUS_TXC_LATCHED ); 1395 info->irq_occurred = true; 1396 } 1397 1398} /* end of mgsl_isr_io_pin() */ 1399 1400/* mgsl_isr_transmit_data() 1401 * 1402 * Service a transmit data interrupt (async mode only). 1403 * 1404 * Arguments: info pointer to device instance data 1405 * Return Value: None 1406 */ 1407static void mgsl_isr_transmit_data( struct mgsl_struct *info ) 1408{ 1409 if ( debug_level >= DEBUG_LEVEL_ISR ) 1410 printk("%s(%d):mgsl_isr_transmit_data xmit_cnt=%d\n", 1411 __FILE__,__LINE__,info->xmit_cnt); 1412 1413 usc_ClearIrqPendingBits( info, TRANSMIT_DATA ); 1414 1415 if (info->port.tty->stopped || info->port.tty->hw_stopped) { 1416 usc_stop_transmitter(info); 1417 return; 1418 } 1419 1420 if ( info->xmit_cnt ) 1421 usc_load_txfifo( info ); 1422 else 1423 info->tx_active = false; 1424 1425 if (info->xmit_cnt < WAKEUP_CHARS) 1426 info->pending_bh |= BH_TRANSMIT; 1427 1428} /* end of mgsl_isr_transmit_data() */ 1429 1430/* mgsl_isr_receive_data() 1431 * 1432 * Service a receive data interrupt. This occurs 1433 * when operating in asynchronous interrupt transfer mode. 1434 * The receive data FIFO is flushed to the receive data buffers. 1435 * 1436 * Arguments: info pointer to device instance data 1437 * Return Value: None 1438 */ 1439static void mgsl_isr_receive_data( struct mgsl_struct *info ) 1440{ 1441 int Fifocount; 1442 u16 status; 1443 int work = 0; 1444 unsigned char DataByte; 1445 struct tty_struct *tty = info->port.tty; 1446 struct mgsl_icount *icount = &info->icount; 1447 1448 if ( debug_level >= DEBUG_LEVEL_ISR ) 1449 printk("%s(%d):mgsl_isr_receive_data\n", 1450 __FILE__,__LINE__); 1451 1452 usc_ClearIrqPendingBits( info, RECEIVE_DATA ); 1453 1454 /* select FIFO status for RICR readback */ 1455 usc_RCmd( info, RCmd_SelectRicrRxFifostatus ); 1456 1457 /* clear the Wordstatus bit so that status readback */ 1458 /* only reflects the status of this byte */ 1459 usc_OutReg( info, RICR+LSBONLY, (u16)(usc_InReg(info, RICR+LSBONLY) & ~BIT3 )); 1460 1461 /* flush the receive FIFO */ 1462 1463 while( (Fifocount = (usc_InReg(info,RICR) >> 8)) ) { 1464 int flag; 1465 1466 /* read one byte from RxFIFO */ 1467 outw( (inw(info->io_base + CCAR) & 0x0780) | (RDR+LSBONLY), 1468 info->io_base + CCAR ); 1469 DataByte = inb( info->io_base + CCAR ); 1470 1471 /* get the status of the received byte */ 1472 status = usc_InReg(info, RCSR); 1473 if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR + 1474 RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) ) 1475 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL); 1476 1477 icount->rx++; 1478 1479 flag = 0; 1480 if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR + 1481 RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) ) { 1482 printk("rxerr=%04X\n",status); 1483 /* update error statistics */ 1484 if ( status & RXSTATUS_BREAK_RECEIVED ) { 1485 status &= ~(RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR); 1486 icount->brk++; 1487 } else if (status & RXSTATUS_PARITY_ERROR) 1488 icount->parity++; 1489 else if (status & RXSTATUS_FRAMING_ERROR) 1490 icount->frame++; 1491 else if (status & RXSTATUS_OVERRUN) { 1492 /* must issue purge fifo cmd before */ 1493 /* 16C32 accepts more receive chars */ 1494 usc_RTCmd(info,RTCmd_PurgeRxFifo); 1495 icount->overrun++; 1496 } 1497 1498 /* discard char if tty control flags say so */ 1499 if (status & info->ignore_status_mask) 1500 continue; 1501 1502 status &= info->read_status_mask; 1503 1504 if (status & RXSTATUS_BREAK_RECEIVED) { 1505 flag = TTY_BREAK; 1506 if (info->port.flags & ASYNC_SAK) 1507 do_SAK(tty); 1508 } else if (status & RXSTATUS_PARITY_ERROR) 1509 flag = TTY_PARITY; 1510 else if (status & RXSTATUS_FRAMING_ERROR) 1511 flag = TTY_FRAME; 1512 } /* end of if (error) */ 1513 tty_insert_flip_char(tty, DataByte, flag); 1514 if (status & RXSTATUS_OVERRUN) { 1515 /* Overrun is special, since it's 1516 * reported immediately, and doesn't 1517 * affect the current character 1518 */ 1519 work += tty_insert_flip_char(tty, 0, TTY_OVERRUN); 1520 } 1521 } 1522 1523 if ( debug_level >= DEBUG_LEVEL_ISR ) { 1524 printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n", 1525 __FILE__,__LINE__,icount->rx,icount->brk, 1526 icount->parity,icount->frame,icount->overrun); 1527 } 1528 1529 if(work) 1530 tty_flip_buffer_push(tty); 1531} 1532 1533/* mgsl_isr_misc() 1534 * 1535 * Service a miscellaneous interrupt source. 1536 * 1537 * Arguments: info pointer to device extension (instance data) 1538 * Return Value: None 1539 */ 1540static void mgsl_isr_misc( struct mgsl_struct *info ) 1541{ 1542 u16 status = usc_InReg( info, MISR ); 1543 1544 if ( debug_level >= DEBUG_LEVEL_ISR ) 1545 printk("%s(%d):mgsl_isr_misc status=%04X\n", 1546 __FILE__,__LINE__,status); 1547 1548 if ((status & MISCSTATUS_RCC_UNDERRUN) && 1549 (info->params.mode == MGSL_MODE_HDLC)) { 1550 1551 /* turn off receiver and rx DMA */ 1552 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL); 1553 usc_DmaCmd(info, DmaCmd_ResetRxChannel); 1554 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL); 1555 usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS); 1556 usc_DisableInterrupts(info, RECEIVE_DATA + RECEIVE_STATUS); 1557 1558 /* schedule BH handler to restart receiver */ 1559 info->pending_bh |= BH_RECEIVE; 1560 info->rx_rcc_underrun = true; 1561 } 1562 1563 usc_ClearIrqPendingBits( info, MISC ); 1564 usc_UnlatchMiscstatusBits( info, status ); 1565 1566} /* end of mgsl_isr_misc() */ 1567 1568/* mgsl_isr_null() 1569 * 1570 * Services undefined interrupt vectors from the 1571 * USC. (hence this function SHOULD never be called) 1572 * 1573 * Arguments: info pointer to device extension (instance data) 1574 * Return Value: None 1575 */ 1576static void mgsl_isr_null( struct mgsl_struct *info ) 1577{ 1578 1579} /* end of mgsl_isr_null() */ 1580 1581/* mgsl_isr_receive_dma() 1582 * 1583 * Service a receive DMA channel interrupt. 1584 * For this driver there are two sources of receive DMA interrupts 1585 * as identified in the Receive DMA mode Register (RDMR): 1586 * 1587 * BIT3 EOA/EOL End of List, all receive buffers in receive 1588 * buffer list have been filled (no more free buffers 1589 * available). The DMA controller has shut down. 1590 * 1591 * BIT2 EOB End of Buffer. This interrupt occurs when a receive 1592 * DMA buffer is terminated in response to completion 1593 * of a good frame or a frame with errors. The status 1594 * of the frame is stored in the buffer entry in the 1595 * list of receive buffer entries. 1596 * 1597 * Arguments: info pointer to device instance data 1598 * Return Value: None 1599 */ 1600static void mgsl_isr_receive_dma( struct mgsl_struct *info ) 1601{ 1602 u16 status; 1603 1604 /* clear interrupt pending and IUS bit for Rx DMA IRQ */ 1605 usc_OutDmaReg( info, CDIR, BIT9+BIT1 ); 1606 1607 /* Read the receive DMA status to identify interrupt type. */ 1608 /* This also clears the status bits. */ 1609 status = usc_InDmaReg( info, RDMR ); 1610 1611 if ( debug_level >= DEBUG_LEVEL_ISR ) 1612 printk("%s(%d):mgsl_isr_receive_dma(%s) status=%04X\n", 1613 __FILE__,__LINE__,info->device_name,status); 1614 1615 info->pending_bh |= BH_RECEIVE; 1616 1617 if ( status & BIT3 ) { 1618 info->rx_overflow = true; 1619 info->icount.buf_overrun++; 1620 } 1621 1622} /* end of mgsl_isr_receive_dma() */ 1623 1624/* mgsl_isr_transmit_dma() 1625 * 1626 * This function services a transmit DMA channel interrupt. 1627 * 1628 * For this driver there is one source of transmit DMA interrupts 1629 * as identified in the Transmit DMA Mode Register (TDMR): 1630 * 1631 * BIT2 EOB End of Buffer. This interrupt occurs when a 1632 * transmit DMA buffer has been emptied. 1633 * 1634 * The driver maintains enough transmit DMA buffers to hold at least 1635 * one max frame size transmit frame. When operating in a buffered 1636 * transmit mode, there may be enough transmit DMA buffers to hold at 1637 * least two or more max frame size frames. On an EOB condition, 1638 * determine if there are any queued transmit buffers and copy into 1639 * transmit DMA buffers if we have room. 1640 * 1641 * Arguments: info pointer to device instance data 1642 * Return Value: None 1643 */ 1644static void mgsl_isr_transmit_dma( struct mgsl_struct *info ) 1645{ 1646 u16 status; 1647 1648 /* clear interrupt pending and IUS bit for Tx DMA IRQ */ 1649 usc_OutDmaReg(info, CDIR, BIT8+BIT0 ); 1650 1651 /* Read the transmit DMA status to identify interrupt type. */ 1652 /* This also clears the status bits. */ 1653 1654 status = usc_InDmaReg( info, TDMR ); 1655 1656 if ( debug_level >= DEBUG_LEVEL_ISR ) 1657 printk("%s(%d):mgsl_isr_transmit_dma(%s) status=%04X\n", 1658 __FILE__,__LINE__,info->device_name,status); 1659 1660 if ( status & BIT2 ) { 1661 --info->tx_dma_buffers_used; 1662 1663 /* if there are transmit frames queued, 1664 * try to load the next one 1665 */ 1666 if ( load_next_tx_holding_buffer(info) ) { 1667 /* if call returns non-zero value, we have 1668 * at least one free tx holding buffer 1669 */ 1670 info->pending_bh |= BH_TRANSMIT; 1671 } 1672 } 1673 1674} /* end of mgsl_isr_transmit_dma() */ 1675 1676/* mgsl_interrupt() 1677 * 1678 * Interrupt service routine entry point. 1679 * 1680 * Arguments: 1681 * 1682 * irq interrupt number that caused interrupt 1683 * dev_id device ID supplied during interrupt registration 1684 * 1685 * Return Value: None 1686 */ 1687static irqreturn_t mgsl_interrupt(int dummy, void *dev_id) 1688{ 1689 struct mgsl_struct *info = dev_id; 1690 u16 UscVector; 1691 u16 DmaVector; 1692 1693 if ( debug_level >= DEBUG_LEVEL_ISR ) 1694 printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)entry.\n", 1695 __FILE__, __LINE__, info->irq_level); 1696 1697 spin_lock(&info->irq_spinlock); 1698 1699 for(;;) { 1700 /* Read the interrupt vectors from hardware. */ 1701 UscVector = usc_InReg(info, IVR) >> 9; 1702 DmaVector = usc_InDmaReg(info, DIVR); 1703 1704 if ( debug_level >= DEBUG_LEVEL_ISR ) 1705 printk("%s(%d):%s UscVector=%08X DmaVector=%08X\n", 1706 __FILE__,__LINE__,info->device_name,UscVector,DmaVector); 1707 1708 if ( !UscVector && !DmaVector ) 1709 break; 1710 1711 /* Dispatch interrupt vector */ 1712 if ( UscVector ) 1713 (*UscIsrTable[UscVector])(info); 1714 else if ( (DmaVector&(BIT10|BIT9)) == BIT10) 1715 mgsl_isr_transmit_dma(info); 1716 else 1717 mgsl_isr_receive_dma(info); 1718 1719 if ( info->isr_overflow ) { 1720 printk(KERN_ERR "%s(%d):%s isr overflow irq=%d\n", 1721 __FILE__, __LINE__, info->device_name, info->irq_level); 1722 usc_DisableMasterIrqBit(info); 1723 usc_DisableDmaInterrupts(info,DICR_MASTER); 1724 break; 1725 } 1726 } 1727 1728 /* Request bottom half processing if there's something 1729 * for it to do and the bh is not already running 1730 */ 1731 1732 if ( info->pending_bh && !info->bh_running && !info->bh_requested ) { 1733 if ( debug_level >= DEBUG_LEVEL_ISR ) 1734 printk("%s(%d):%s queueing bh task.\n", 1735 __FILE__,__LINE__,info->device_name); 1736 schedule_work(&info->task); 1737 info->bh_requested = true; 1738 } 1739 1740 spin_unlock(&info->irq_spinlock); 1741 1742 if ( debug_level >= DEBUG_LEVEL_ISR ) 1743 printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)exit.\n", 1744 __FILE__, __LINE__, info->irq_level); 1745 1746 return IRQ_HANDLED; 1747} /* end of mgsl_interrupt() */ 1748 1749/* startup() 1750 * 1751 * Initialize and start device. 1752 * 1753 * Arguments: info pointer to device instance data 1754 * Return Value: 0 if success, otherwise error code 1755 */ 1756static int startup(struct mgsl_struct * info) 1757{ 1758 int retval = 0; 1759 1760 if ( debug_level >= DEBUG_LEVEL_INFO ) 1761 printk("%s(%d):mgsl_startup(%s)\n",__FILE__,__LINE__,info->device_name); 1762 1763 if (info->port.flags & ASYNC_INITIALIZED) 1764 return 0; 1765 1766 if (!info->xmit_buf) { 1767 /* allocate a page of memory for a transmit buffer */ 1768 info->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL); 1769 if (!info->xmit_buf) { 1770 printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n", 1771 __FILE__,__LINE__,info->device_name); 1772 return -ENOMEM; 1773 } 1774 } 1775 1776 info->pending_bh = 0; 1777 1778 memset(&info->icount, 0, sizeof(info->icount)); 1779 1780 setup_timer(&info->tx_timer, mgsl_tx_timeout, (unsigned long)info); 1781 1782 /* Allocate and claim adapter resources */ 1783 retval = mgsl_claim_resources(info); 1784 1785 /* perform existence check and diagnostics */ 1786 if ( !retval ) 1787 retval = mgsl_adapter_test(info); 1788 1789 if ( retval ) { 1790 if (capable(CAP_SYS_ADMIN) && info->port.tty) 1791 set_bit(TTY_IO_ERROR, &info->port.tty->flags); 1792 mgsl_release_resources(info); 1793 return retval; 1794 } 1795 1796 /* program hardware for current parameters */ 1797 mgsl_change_params(info); 1798 1799 if (info->port.tty) 1800 clear_bit(TTY_IO_ERROR, &info->port.tty->flags); 1801 1802 info->port.flags |= ASYNC_INITIALIZED; 1803 1804 return 0; 1805 1806} /* end of startup() */ 1807 1808/* shutdown() 1809 * 1810 * Called by mgsl_close() and mgsl_hangup() to shutdown hardware 1811 * 1812 * Arguments: info pointer to device instance data 1813 * Return Value: None 1814 */ 1815static void shutdown(struct mgsl_struct * info) 1816{ 1817 unsigned long flags; 1818 1819 if (!(info->port.flags & ASYNC_INITIALIZED)) 1820 return; 1821 1822 if (debug_level >= DEBUG_LEVEL_INFO) 1823 printk("%s(%d):mgsl_shutdown(%s)\n", 1824 __FILE__,__LINE__, info->device_name ); 1825 1826 /* clear status wait queue because status changes */ 1827 /* can't happen after shutting down the hardware */ 1828 wake_up_interruptible(&info->status_event_wait_q); 1829 wake_up_interruptible(&info->event_wait_q); 1830 1831 del_timer_sync(&info->tx_timer); 1832 1833 if (info->xmit_buf) { 1834 free_page((unsigned long) info->xmit_buf); 1835 info->xmit_buf = NULL; 1836 } 1837 1838 spin_lock_irqsave(&info->irq_spinlock,flags); 1839 usc_DisableMasterIrqBit(info); 1840 usc_stop_receiver(info); 1841 usc_stop_transmitter(info); 1842 usc_DisableInterrupts(info,RECEIVE_DATA + RECEIVE_STATUS + 1843 TRANSMIT_DATA + TRANSMIT_STATUS + IO_PIN + MISC ); 1844 usc_DisableDmaInterrupts(info,DICR_MASTER + DICR_TRANSMIT + DICR_RECEIVE); 1845 1846 /* Disable DMAEN (Port 7, Bit 14) */ 1847 /* This disconnects the DMA request signal from the ISA bus */ 1848 /* on the ISA adapter. This has no effect for the PCI adapter */ 1849 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) | BIT14)); 1850 1851 /* Disable INTEN (Port 6, Bit12) */ 1852 /* This disconnects the IRQ request signal to the ISA bus */ 1853 /* on the ISA adapter. This has no effect for the PCI adapter */ 1854 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) | BIT12)); 1855 1856 if (!info->port.tty || info->port.tty->termios->c_cflag & HUPCL) { 1857 info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS); 1858 usc_set_serial_signals(info); 1859 } 1860 1861 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1862 1863 mgsl_release_resources(info); 1864 1865 if (info->port.tty) 1866 set_bit(TTY_IO_ERROR, &info->port.tty->flags); 1867 1868 info->port.flags &= ~ASYNC_INITIALIZED; 1869 1870} /* end of shutdown() */ 1871 1872static void mgsl_program_hw(struct mgsl_struct *info) 1873{ 1874 unsigned long flags; 1875 1876 spin_lock_irqsave(&info->irq_spinlock,flags); 1877 1878 usc_stop_receiver(info); 1879 usc_stop_transmitter(info); 1880 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; 1881 1882 if (info->params.mode == MGSL_MODE_HDLC || 1883 info->params.mode == MGSL_MODE_RAW || 1884 info->netcount) 1885 usc_set_sync_mode(info); 1886 else 1887 usc_set_async_mode(info); 1888 1889 usc_set_serial_signals(info); 1890 1891 info->dcd_chkcount = 0; 1892 info->cts_chkcount = 0; 1893 info->ri_chkcount = 0; 1894 info->dsr_chkcount = 0; 1895 1896 usc_EnableStatusIrqs(info,SICR_CTS+SICR_DSR+SICR_DCD+SICR_RI); 1897 usc_EnableInterrupts(info, IO_PIN); 1898 usc_get_serial_signals(info); 1899 1900 if (info->netcount || info->port.tty->termios->c_cflag & CREAD) 1901 usc_start_receiver(info); 1902 1903 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1904} 1905 1906/* Reconfigure adapter based on new parameters 1907 */ 1908static void mgsl_change_params(struct mgsl_struct *info) 1909{ 1910 unsigned cflag; 1911 int bits_per_char; 1912 1913 if (!info->port.tty || !info->port.tty->termios) 1914 return; 1915 1916 if (debug_level >= DEBUG_LEVEL_INFO) 1917 printk("%s(%d):mgsl_change_params(%s)\n", 1918 __FILE__,__LINE__, info->device_name ); 1919 1920 cflag = info->port.tty->termios->c_cflag; 1921 1922 /* if B0 rate (hangup) specified then negate DTR and RTS */ 1923 /* otherwise assert DTR and RTS */ 1924 if (cflag & CBAUD) 1925 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; 1926 else 1927 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR); 1928 1929 /* byte size and parity */ 1930 1931 switch (cflag & CSIZE) { 1932 case CS5: info->params.data_bits = 5; break; 1933 case CS6: info->params.data_bits = 6; break; 1934 case CS7: info->params.data_bits = 7; break; 1935 case CS8: info->params.data_bits = 8; break; 1936 /* Never happens, but GCC is too dumb to figure it out */ 1937 default: info->params.data_bits = 7; break; 1938 } 1939 1940 if (cflag & CSTOPB) 1941 info->params.stop_bits = 2; 1942 else 1943 info->params.stop_bits = 1; 1944 1945 info->params.parity = ASYNC_PARITY_NONE; 1946 if (cflag & PARENB) { 1947 if (cflag & PARODD) 1948 info->params.parity = ASYNC_PARITY_ODD; 1949 else 1950 info->params.parity = ASYNC_PARITY_EVEN; 1951#ifdef CMSPAR 1952 if (cflag & CMSPAR) 1953 info->params.parity = ASYNC_PARITY_SPACE; 1954#endif 1955 } 1956 1957 /* calculate number of jiffies to transmit a full 1958 * FIFO (32 bytes) at specified data rate 1959 */ 1960 bits_per_char = info->params.data_bits + 1961 info->params.stop_bits + 1; 1962 1963 /* if port data rate is set to 460800 or less then 1964 * allow tty settings to override, otherwise keep the 1965 * current data rate. 1966 */ 1967 if (info->params.data_rate <= 460800) 1968 info->params.data_rate = tty_get_baud_rate(info->port.tty); 1969 1970 if ( info->params.data_rate ) { 1971 info->timeout = (32*HZ*bits_per_char) / 1972 info->params.data_rate; 1973 } 1974 info->timeout += HZ/50; /* Add .02 seconds of slop */ 1975 1976 if (cflag & CRTSCTS) 1977 info->port.flags |= ASYNC_CTS_FLOW; 1978 else 1979 info->port.flags &= ~ASYNC_CTS_FLOW; 1980 1981 if (cflag & CLOCAL) 1982 info->port.flags &= ~ASYNC_CHECK_CD; 1983 else 1984 info->port.flags |= ASYNC_CHECK_CD; 1985 1986 /* process tty input control flags */ 1987 1988 info->read_status_mask = RXSTATUS_OVERRUN; 1989 if (I_INPCK(info->port.tty)) 1990 info->read_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR; 1991 if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty)) 1992 info->read_status_mask |= RXSTATUS_BREAK_RECEIVED; 1993 1994 if (I_IGNPAR(info->port.tty)) 1995 info->ignore_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR; 1996 if (I_IGNBRK(info->port.tty)) { 1997 info->ignore_status_mask |= RXSTATUS_BREAK_RECEIVED; 1998 /* If ignoring parity and break indicators, ignore 1999 * overruns too. (For real raw support). 2000 */ 2001 if (I_IGNPAR(info->port.tty)) 2002 info->ignore_status_mask |= RXSTATUS_OVERRUN; 2003 } 2004 2005 mgsl_program_hw(info); 2006 2007} /* end of mgsl_change_params() */ 2008 2009/* mgsl_put_char() 2010 * 2011 * Add a character to the transmit buffer. 2012 * 2013 * Arguments: tty pointer to tty information structure 2014 * ch character to add to transmit buffer 2015 * 2016 * Return Value: None 2017 */ 2018static int mgsl_put_char(struct tty_struct *tty, unsigned char ch) 2019{ 2020 struct mgsl_struct *info = tty->driver_data; 2021 unsigned long flags; 2022 int ret = 0; 2023 2024 if (debug_level >= DEBUG_LEVEL_INFO) { 2025 printk(KERN_DEBUG "%s(%d):mgsl_put_char(%d) on %s\n", 2026 __FILE__, __LINE__, ch, info->device_name); 2027 } 2028 2029 if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char")) 2030 return 0; 2031 2032 if (!tty || !info->xmit_buf) 2033 return 0; 2034 2035 spin_lock_irqsave(&info->irq_spinlock, flags); 2036 2037 if ((info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active) { 2038 if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) { 2039 info->xmit_buf[info->xmit_head++] = ch; 2040 info->xmit_head &= SERIAL_XMIT_SIZE-1; 2041 info->xmit_cnt++; 2042 ret = 1; 2043 } 2044 } 2045 spin_unlock_irqrestore(&info->irq_spinlock, flags); 2046 return ret; 2047 2048} /* end of mgsl_put_char() */ 2049 2050/* mgsl_flush_chars() 2051 * 2052 * Enable transmitter so remaining characters in the 2053 * transmit buffer are sent. 2054 * 2055 * Arguments: tty pointer to tty information structure 2056 * Return Value: None 2057 */ 2058static void mgsl_flush_chars(struct tty_struct *tty) 2059{ 2060 struct mgsl_struct *info = tty->driver_data; 2061 unsigned long flags; 2062 2063 if ( debug_level >= DEBUG_LEVEL_INFO ) 2064 printk( "%s(%d):mgsl_flush_chars() entry on %s xmit_cnt=%d\n", 2065 __FILE__,__LINE__,info->device_name,info->xmit_cnt); 2066 2067 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_chars")) 2068 return; 2069 2070 if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped || 2071 !info->xmit_buf) 2072 return; 2073 2074 if ( debug_level >= DEBUG_LEVEL_INFO ) 2075 printk( "%s(%d):mgsl_flush_chars() entry on %s starting transmitter\n", 2076 __FILE__,__LINE__,info->device_name ); 2077 2078 spin_lock_irqsave(&info->irq_spinlock,flags); 2079 2080 if (!info->tx_active) { 2081 if ( (info->params.mode == MGSL_MODE_HDLC || 2082 info->params.mode == MGSL_MODE_RAW) && info->xmit_cnt ) { 2083 /* operating in synchronous (frame oriented) mode */ 2084 /* copy data from circular xmit_buf to */ 2085 /* transmit DMA buffer. */ 2086 mgsl_load_tx_dma_buffer(info, 2087 info->xmit_buf,info->xmit_cnt); 2088 } 2089 usc_start_transmitter(info); 2090 } 2091 2092 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2093 2094} /* end of mgsl_flush_chars() */ 2095 2096/* mgsl_write() 2097 * 2098 * Send a block of data 2099 * 2100 * Arguments: 2101 * 2102 * tty pointer to tty information structure 2103 * buf pointer to buffer containing send data 2104 * count size of send data in bytes 2105 * 2106 * Return Value: number of characters written 2107 */ 2108static int mgsl_write(struct tty_struct * tty, 2109 const unsigned char *buf, int count) 2110{ 2111 int c, ret = 0; 2112 struct mgsl_struct *info = tty->driver_data; 2113 unsigned long flags; 2114 2115 if ( debug_level >= DEBUG_LEVEL_INFO ) 2116 printk( "%s(%d):mgsl_write(%s) count=%d\n", 2117 __FILE__,__LINE__,info->device_name,count); 2118 2119 if (mgsl_paranoia_check(info, tty->name, "mgsl_write")) 2120 goto cleanup; 2121 2122 if (!tty || !info->xmit_buf) 2123 goto cleanup; 2124 2125 if ( info->params.mode == MGSL_MODE_HDLC || 2126 info->params.mode == MGSL_MODE_RAW ) { 2127 /* operating in synchronous (frame oriented) mode */ 2128 /* operating in synchronous (frame oriented) mode */ 2129 if (info->tx_active) { 2130 2131 if ( info->params.mode == MGSL_MODE_HDLC ) { 2132 ret = 0; 2133 goto cleanup; 2134 } 2135 /* transmitter is actively sending data - 2136 * if we have multiple transmit dma and 2137 * holding buffers, attempt to queue this 2138 * frame for transmission at a later time. 2139 */ 2140 if (info->tx_holding_count >= info->num_tx_holding_buffers ) { 2141 /* no tx holding buffers available */ 2142 ret = 0; 2143 goto cleanup; 2144 } 2145 2146 /* queue transmit frame request */ 2147 ret = count; 2148 save_tx_buffer_request(info,buf,count); 2149 2150 /* if we have sufficient tx dma buffers, 2151 * load the next buffered tx request 2152 */ 2153 spin_lock_irqsave(&info->irq_spinlock,flags); 2154 load_next_tx_holding_buffer(info); 2155 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2156 goto cleanup; 2157 } 2158 2159 /* if operating in HDLC LoopMode and the adapter */ 2160 /* has yet to be inserted into the loop, we can't */ 2161 /* transmit */ 2162 2163 if ( (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) && 2164 !usc_loopmode_active(info) ) 2165 { 2166 ret = 0; 2167 goto cleanup; 2168 } 2169 2170 if ( info->xmit_cnt ) { 2171 /* Send accumulated from send_char() calls */ 2172 /* as frame and wait before accepting more data. */ 2173 ret = 0; 2174 2175 /* copy data from circular xmit_buf to */ 2176 /* transmit DMA buffer. */ 2177 mgsl_load_tx_dma_buffer(info, 2178 info->xmit_buf,info->xmit_cnt); 2179 if ( debug_level >= DEBUG_LEVEL_INFO ) 2180 printk( "%s(%d):mgsl_write(%s) sync xmit_cnt flushing\n", 2181 __FILE__,__LINE__,info->device_name); 2182 } else { 2183 if ( debug_level >= DEBUG_LEVEL_INFO ) 2184 printk( "%s(%d):mgsl_write(%s) sync transmit accepted\n", 2185 __FILE__,__LINE__,info->device_name); 2186 ret = count; 2187 info->xmit_cnt = count; 2188 mgsl_load_tx_dma_buffer(info,buf,count); 2189 } 2190 } else { 2191 while (1) { 2192 spin_lock_irqsave(&info->irq_spinlock,flags); 2193 c = min_t(int, count, 2194 min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1, 2195 SERIAL_XMIT_SIZE - info->xmit_head)); 2196 if (c <= 0) { 2197 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2198 break; 2199 } 2200 memcpy(info->xmit_buf + info->xmit_head, buf, c); 2201 info->xmit_head = ((info->xmit_head + c) & 2202 (SERIAL_XMIT_SIZE-1)); 2203 info->xmit_cnt += c; 2204 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2205 buf += c; 2206 count -= c; 2207 ret += c; 2208 } 2209 } 2210 2211 if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) { 2212 spin_lock_irqsave(&info->irq_spinlock,flags); 2213 if (!info->tx_active) 2214 usc_start_transmitter(info); 2215 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2216 } 2217cleanup: 2218 if ( debug_level >= DEBUG_LEVEL_INFO ) 2219 printk( "%s(%d):mgsl_write(%s) returning=%d\n", 2220 __FILE__,__LINE__,info->device_name,ret); 2221 2222 return ret; 2223 2224} /* end of mgsl_write() */ 2225 2226/* mgsl_write_room() 2227 * 2228 * Return the count of free bytes in transmit buffer 2229 * 2230 * Arguments: tty pointer to tty info structure 2231 * Return Value: None 2232 */ 2233static int mgsl_write_room(struct tty_struct *tty) 2234{ 2235 struct mgsl_struct *info = tty->driver_data; 2236 int ret; 2237 2238 if (mgsl_paranoia_check(info, tty->name, "mgsl_write_room")) 2239 return 0; 2240 ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1; 2241 if (ret < 0) 2242 ret = 0; 2243 2244 if (debug_level >= DEBUG_LEVEL_INFO) 2245 printk("%s(%d):mgsl_write_room(%s)=%d\n", 2246 __FILE__,__LINE__, info->device_name,ret ); 2247 2248 if ( info->params.mode == MGSL_MODE_HDLC || 2249 info->params.mode == MGSL_MODE_RAW ) { 2250 /* operating in synchronous (frame oriented) mode */ 2251 if ( info->tx_active ) 2252 return 0; 2253 else 2254 return HDLC_MAX_FRAME_SIZE; 2255 } 2256 2257 return ret; 2258 2259} /* end of mgsl_write_room() */ 2260 2261/* mgsl_chars_in_buffer() 2262 * 2263 * Return the count of bytes in transmit buffer 2264 * 2265 * Arguments: tty pointer to tty info structure 2266 * Return Value: None 2267 */ 2268static int mgsl_chars_in_buffer(struct tty_struct *tty) 2269{ 2270 struct mgsl_struct *info = tty->driver_data; 2271 2272 if (debug_level >= DEBUG_LEVEL_INFO) 2273 printk("%s(%d):mgsl_chars_in_buffer(%s)\n", 2274 __FILE__,__LINE__, info->device_name ); 2275 2276 if (mgsl_paranoia_check(info, tty->name, "mgsl_chars_in_buffer")) 2277 return 0; 2278 2279 if (debug_level >= DEBUG_LEVEL_INFO) 2280 printk("%s(%d):mgsl_chars_in_buffer(%s)=%d\n", 2281 __FILE__,__LINE__, info->device_name,info->xmit_cnt ); 2282 2283 if ( info->params.mode == MGSL_MODE_HDLC || 2284 info->params.mode == MGSL_MODE_RAW ) { 2285 /* operating in synchronous (frame oriented) mode */ 2286 if ( info->tx_active ) 2287 return info->max_frame_size; 2288 else 2289 return 0; 2290 } 2291 2292 return info->xmit_cnt; 2293} /* end of mgsl_chars_in_buffer() */ 2294 2295/* mgsl_flush_buffer() 2296 * 2297 * Discard all data in the send buffer 2298 * 2299 * Arguments: tty pointer to tty info structure 2300 * Return Value: None 2301 */ 2302static void mgsl_flush_buffer(struct tty_struct *tty) 2303{ 2304 struct mgsl_struct *info = tty->driver_data; 2305 unsigned long flags; 2306 2307 if (debug_level >= DEBUG_LEVEL_INFO) 2308 printk("%s(%d):mgsl_flush_buffer(%s) entry\n", 2309 __FILE__,__LINE__, info->device_name ); 2310 2311 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_buffer")) 2312 return; 2313 2314 spin_lock_irqsave(&info->irq_spinlock,flags); 2315 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; 2316 del_timer(&info->tx_timer); 2317 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2318 2319 tty_wakeup(tty); 2320} 2321 2322/* mgsl_send_xchar() 2323 * 2324 * Send a high-priority XON/XOFF character 2325 * 2326 * Arguments: tty pointer to tty info structure 2327 * ch character to send 2328 * Return Value: None 2329 */ 2330static void mgsl_send_xchar(struct tty_struct *tty, char ch) 2331{ 2332 struct mgsl_struct *info = tty->driver_data; 2333 unsigned long flags; 2334 2335 if (debug_level >= DEBUG_LEVEL_INFO) 2336 printk("%s(%d):mgsl_send_xchar(%s,%d)\n", 2337 __FILE__,__LINE__, info->device_name, ch ); 2338 2339 if (mgsl_paranoia_check(info, tty->name, "mgsl_send_xchar")) 2340 return; 2341 2342 info->x_char = ch; 2343 if (ch) { 2344 /* Make sure transmit interrupts are on */ 2345 spin_lock_irqsave(&info->irq_spinlock,flags); 2346 if (!info->tx_enabled) 2347 usc_start_transmitter(info); 2348 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2349 } 2350} /* end of mgsl_send_xchar() */ 2351 2352/* mgsl_throttle() 2353 * 2354 * Signal remote device to throttle send data (our receive data) 2355 * 2356 * Arguments: tty pointer to tty info structure 2357 * Return Value: None 2358 */ 2359static void mgsl_throttle(struct tty_struct * tty) 2360{ 2361 struct mgsl_struct *info = tty->driver_data; 2362 unsigned long flags; 2363 2364 if (debug_level >= DEBUG_LEVEL_INFO) 2365 printk("%s(%d):mgsl_throttle(%s) entry\n", 2366 __FILE__,__LINE__, info->device_name ); 2367 2368 if (mgsl_paranoia_check(info, tty->name, "mgsl_throttle")) 2369 return; 2370 2371 if (I_IXOFF(tty)) 2372 mgsl_send_xchar(tty, STOP_CHAR(tty)); 2373 2374 if (tty->termios->c_cflag & CRTSCTS) { 2375 spin_lock_irqsave(&info->irq_spinlock,flags); 2376 info->serial_signals &= ~SerialSignal_RTS; 2377 usc_set_serial_signals(info); 2378 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2379 } 2380} /* end of mgsl_throttle() */ 2381 2382/* mgsl_unthrottle() 2383 * 2384 * Signal remote device to stop throttling send data (our receive data) 2385 * 2386 * Arguments: tty pointer to tty info structure 2387 * Return Value: None 2388 */ 2389static void mgsl_unthrottle(struct tty_struct * tty) 2390{ 2391 struct mgsl_struct *info = tty->driver_data; 2392 unsigned long flags; 2393 2394 if (debug_level >= DEBUG_LEVEL_INFO) 2395 printk("%s(%d):mgsl_unthrottle(%s) entry\n", 2396 __FILE__,__LINE__, info->device_name ); 2397 2398 if (mgsl_paranoia_check(info, tty->name, "mgsl_unthrottle")) 2399 return; 2400 2401 if (I_IXOFF(tty)) { 2402 if (info->x_char) 2403 info->x_char = 0; 2404 else 2405 mgsl_send_xchar(tty, START_CHAR(tty)); 2406 } 2407 2408 if (tty->termios->c_cflag & CRTSCTS) { 2409 spin_lock_irqsave(&info->irq_spinlock,flags); 2410 info->serial_signals |= SerialSignal_RTS; 2411 usc_set_serial_signals(info); 2412 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2413 } 2414 2415} /* end of mgsl_unthrottle() */ 2416 2417/* mgsl_get_stats() 2418 * 2419 * get the current serial parameters information 2420 * 2421 * Arguments: info pointer to device instance data 2422 * user_icount pointer to buffer to hold returned stats 2423 * 2424 * Return Value: 0 if success, otherwise error code 2425 */ 2426static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount __user *user_icount) 2427{ 2428 int err; 2429 2430 if (debug_level >= DEBUG_LEVEL_INFO) 2431 printk("%s(%d):mgsl_get_params(%s)\n", 2432 __FILE__,__LINE__, info->device_name); 2433 2434 if (!user_icount) { 2435 memset(&info->icount, 0, sizeof(info->icount)); 2436 } else { 2437 COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount)); 2438 if (err) 2439 return -EFAULT; 2440 } 2441 2442 return 0; 2443 2444} /* end of mgsl_get_stats() */ 2445 2446/* mgsl_get_params() 2447 * 2448 * get the current serial parameters information 2449 * 2450 * Arguments: info pointer to device instance data 2451 * user_params pointer to buffer to hold returned params 2452 * 2453 * Return Value: 0 if success, otherwise error code 2454 */ 2455static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params) 2456{ 2457 int err; 2458 if (debug_level >= DEBUG_LEVEL_INFO) 2459 printk("%s(%d):mgsl_get_params(%s)\n", 2460 __FILE__,__LINE__, info->device_name); 2461 2462 COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS)); 2463 if (err) { 2464 if ( debug_level >= DEBUG_LEVEL_INFO ) 2465 printk( "%s(%d):mgsl_get_params(%s) user buffer copy failed\n", 2466 __FILE__,__LINE__,info->device_name); 2467 return -EFAULT; 2468 } 2469 2470 return 0; 2471 2472} /* end of mgsl_get_params() */ 2473 2474/* mgsl_set_params() 2475 * 2476 * set the serial parameters 2477 * 2478 * Arguments: 2479 * 2480 * info pointer to device instance data 2481 * new_params user buffer containing new serial params 2482 * 2483 * Return Value: 0 if success, otherwise error code 2484 */ 2485static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params) 2486{ 2487 unsigned long flags; 2488 MGSL_PARAMS tmp_params; 2489 int err; 2490 2491 if (debug_level >= DEBUG_LEVEL_INFO) 2492 printk("%s(%d):mgsl_set_params %s\n", __FILE__,__LINE__, 2493 info->device_name ); 2494 COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS)); 2495 if (err) { 2496 if ( debug_level >= DEBUG_LEVEL_INFO ) 2497 printk( "%s(%d):mgsl_set_params(%s) user buffer copy failed\n", 2498 __FILE__,__LINE__,info->device_name); 2499 return -EFAULT; 2500 } 2501 2502 spin_lock_irqsave(&info->irq_spinlock,flags); 2503 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS)); 2504 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2505 2506 mgsl_change_params(info); 2507 2508 return 0; 2509 2510} /* end of mgsl_set_params() */ 2511 2512/* mgsl_get_txidle() 2513 * 2514 * get the current transmit idle mode 2515 * 2516 * Arguments: info pointer to device instance data 2517 * idle_mode pointer to buffer to hold returned idle mode 2518 * 2519 * Return Value: 0 if success, otherwise error code 2520 */ 2521static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode) 2522{ 2523 int err; 2524 2525 if (debug_level >= DEBUG_LEVEL_INFO) 2526 printk("%s(%d):mgsl_get_txidle(%s)=%d\n", 2527 __FILE__,__LINE__, info->device_name, info->idle_mode); 2528 2529 COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int)); 2530 if (err) { 2531 if ( debug_level >= DEBUG_LEVEL_INFO ) 2532 printk( "%s(%d):mgsl_get_txidle(%s) user buffer copy failed\n", 2533 __FILE__,__LINE__,info->device_name); 2534 return -EFAULT; 2535 } 2536 2537 return 0; 2538 2539} /* end of mgsl_get_txidle() */ 2540 2541/* mgsl_set_txidle() service ioctl to set transmit idle mode 2542 * 2543 * Arguments: info pointer to device instance data 2544 * idle_mode new idle mode 2545 * 2546 * Return Value: 0 if success, otherwise error code 2547 */ 2548static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode) 2549{ 2550 unsigned long flags; 2551 2552 if (debug_level >= DEBUG_LEVEL_INFO) 2553 printk("%s(%d):mgsl_set_txidle(%s,%d)\n", __FILE__,__LINE__, 2554 info->device_name, idle_mode ); 2555 2556 spin_lock_irqsave(&info->irq_spinlock,flags); 2557 info->idle_mode = idle_mode; 2558 usc_set_txidle( info ); 2559 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2560 return 0; 2561 2562} /* end of mgsl_set_txidle() */ 2563 2564/* mgsl_txenable() 2565 * 2566 * enable or disable the transmitter 2567 * 2568 * Arguments: 2569 * 2570 * info pointer to device instance data 2571 * enable 1 = enable, 0 = disable 2572 * 2573 * Return Value: 0 if success, otherwise error code 2574 */ 2575static int mgsl_txenable(struct mgsl_struct * info, int enable) 2576{ 2577 unsigned long flags; 2578 2579 if (debug_level >= DEBUG_LEVEL_INFO) 2580 printk("%s(%d):mgsl_txenable(%s,%d)\n", __FILE__,__LINE__, 2581 info->device_name, enable); 2582 2583 spin_lock_irqsave(&info->irq_spinlock,flags); 2584 if ( enable ) { 2585 if ( !info->tx_enabled ) { 2586 2587 usc_start_transmitter(info); 2588 /*-------------------------------------------------- 2589 * if HDLC/SDLC Loop mode, attempt to insert the 2590 * station in the 'loop' by setting CMR:13. Upon 2591 * receipt of the next GoAhead (RxAbort) sequence, 2592 * the OnLoop indicator (CCSR:7) should go active 2593 * to indicate that we are on the loop 2594 *--------------------------------------------------*/ 2595 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE ) 2596 usc_loopmode_insert_request( info ); 2597 } 2598 } else { 2599 if ( info->tx_enabled ) 2600 usc_stop_transmitter(info); 2601 } 2602 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2603 return 0; 2604 2605} /* end of mgsl_txenable() */ 2606 2607/* mgsl_txabort() abort send HDLC frame 2608 * 2609 * Arguments: info pointer to device instance data 2610 * Return Value: 0 if success, otherwise error code 2611 */ 2612static int mgsl_txabort(struct mgsl_struct * info) 2613{ 2614 unsigned long flags; 2615 2616 if (debug_level >= DEBUG_LEVEL_INFO) 2617 printk("%s(%d):mgsl_txabort(%s)\n", __FILE__,__LINE__, 2618 info->device_name); 2619 2620 spin_lock_irqsave(&info->irq_spinlock,flags); 2621 if ( info->tx_active && info->params.mode == MGSL_MODE_HDLC ) 2622 { 2623 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE ) 2624 usc_loopmode_cancel_transmit( info ); 2625 else 2626 usc_TCmd(info,TCmd_SendAbort); 2627 } 2628 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2629 return 0; 2630 2631} /* end of mgsl_txabort() */ 2632 2633/* mgsl_rxenable() enable or disable the receiver 2634 * 2635 * Arguments: info pointer to device instance data 2636 * enable 1 = enable, 0 = disable 2637 * Return Value: 0 if success, otherwise error code 2638 */ 2639static int mgsl_rxenable(struct mgsl_struct * info, int enable) 2640{ 2641 unsigned long flags; 2642 2643 if (debug_level >= DEBUG_LEVEL_INFO) 2644 printk("%s(%d):mgsl_rxenable(%s,%d)\n", __FILE__,__LINE__, 2645 info->device_name, enable); 2646 2647 spin_lock_irqsave(&info->irq_spinlock,flags); 2648 if ( enable ) { 2649 if ( !info->rx_enabled ) 2650 usc_start_receiver(info); 2651 } else { 2652 if ( info->rx_enabled ) 2653 usc_stop_receiver(info); 2654 } 2655 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2656 return 0; 2657 2658} /* end of mgsl_rxenable() */ 2659 2660/* mgsl_wait_event() wait for specified event to occur 2661 * 2662 * Arguments: info pointer to device instance data 2663 * mask pointer to bitmask of events to wait for 2664 * Return Value: 0 if successful and bit mask updated with 2665 * of events triggerred, 2666 * otherwise error code 2667 */ 2668static int mgsl_wait_event(struct mgsl_struct * info, int __user * mask_ptr) 2669{ 2670 unsigned long flags; 2671 int s; 2672 int rc=0; 2673 struct mgsl_icount cprev, cnow; 2674 int events; 2675 int mask; 2676 struct _input_signal_events oldsigs, newsigs; 2677 DECLARE_WAITQUEUE(wait, current); 2678 2679 COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int)); 2680 if (rc) { 2681 return -EFAULT; 2682 } 2683 2684 if (debug_level >= DEBUG_LEVEL_INFO) 2685 printk("%s(%d):mgsl_wait_event(%s,%d)\n", __FILE__,__LINE__, 2686 info->device_name, mask); 2687 2688 spin_lock_irqsave(&info->irq_spinlock,flags); 2689 2690 /* return immediately if state matches requested events */ 2691 usc_get_serial_signals(info); 2692 s = info->serial_signals; 2693 events = mask & 2694 ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) + 2695 ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) + 2696 ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) + 2697 ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) ); 2698 if (events) { 2699 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2700 goto exit; 2701 } 2702 2703 /* save current irq counts */ 2704 cprev = info->icount; 2705 oldsigs = info->input_signal_events; 2706 2707 /* enable hunt and idle irqs if needed */ 2708 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) { 2709 u16 oldreg = usc_InReg(info,RICR); 2710 u16 newreg = oldreg + 2711 (mask & MgslEvent_ExitHuntMode ? RXSTATUS_EXITED_HUNT:0) + 2712 (mask & MgslEvent_IdleReceived ? RXSTATUS_IDLE_RECEIVED:0); 2713 if (oldreg != newreg) 2714 usc_OutReg(info, RICR, newreg); 2715 } 2716 2717 set_current_state(TASK_INTERRUPTIBLE); 2718 add_wait_queue(&info->event_wait_q, &wait); 2719 2720 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2721 2722 2723 for(;;) { 2724 schedule(); 2725 if (signal_pending(current)) { 2726 rc = -ERESTARTSYS; 2727 break; 2728 } 2729 2730 /* get current irq counts */ 2731 spin_lock_irqsave(&info->irq_spinlock,flags); 2732 cnow = info->icount; 2733 newsigs = info->input_signal_events; 2734 set_current_state(TASK_INTERRUPTIBLE); 2735 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2736 2737 /* if no change, wait aborted for some reason */ 2738 if (newsigs.dsr_up == oldsigs.dsr_up && 2739 newsigs.dsr_down == oldsigs.dsr_down && 2740 newsigs.dcd_up == oldsigs.dcd_up && 2741 newsigs.dcd_down == oldsigs.dcd_down && 2742 newsigs.cts_up == oldsigs.cts_up && 2743 newsigs.cts_down == oldsigs.cts_down && 2744 newsigs.ri_up == oldsigs.ri_up && 2745 newsigs.ri_down == oldsigs.ri_down && 2746 cnow.exithunt == cprev.exithunt && 2747 cnow.rxidle == cprev.rxidle) { 2748 rc = -EIO; 2749 break; 2750 } 2751 2752 events = mask & 2753 ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) + 2754 (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) + 2755 (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) + 2756 (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) + 2757 (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) + 2758 (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) + 2759 (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) + 2760 (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) + 2761 (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) + 2762 (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) ); 2763 if (events) 2764 break; 2765 2766 cprev = cnow; 2767 oldsigs = newsigs; 2768 } 2769 2770 remove_wait_queue(&info->event_wait_q, &wait); 2771 set_current_state(TASK_RUNNING); 2772 2773 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) { 2774 spin_lock_irqsave(&info->irq_spinlock,flags); 2775 if (!waitqueue_active(&info->event_wait_q)) { 2776 /* disable enable exit hunt mode/idle rcvd IRQs */ 2777 usc_OutReg(info, RICR, usc_InReg(info,RICR) & 2778 ~(RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED)); 2779 } 2780 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2781 } 2782exit: 2783 if ( rc == 0 ) 2784 PUT_USER(rc, events, mask_ptr); 2785 2786 return rc; 2787 2788} /* end of mgsl_wait_event() */ 2789 2790static int modem_input_wait(struct mgsl_struct *info,int arg) 2791{ 2792 unsigned long flags; 2793 int rc; 2794 struct mgsl_icount cprev, cnow; 2795 DECLARE_WAITQUEUE(wait, current); 2796 2797 /* save current irq counts */ 2798 spin_lock_irqsave(&info->irq_spinlock,flags); 2799 cprev = info->icount; 2800 add_wait_queue(&info->status_event_wait_q, &wait); 2801 set_current_state(TASK_INTERRUPTIBLE); 2802 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2803 2804 for(;;) { 2805 schedule(); 2806 if (signal_pending(current)) { 2807 rc = -ERESTARTSYS; 2808 break; 2809 } 2810 2811 /* get new irq counts */ 2812 spin_lock_irqsave(&info->irq_spinlock,flags); 2813 cnow = info->icount; 2814 set_current_state(TASK_INTERRUPTIBLE); 2815 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2816 2817 /* if no change, wait aborted for some reason */ 2818 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && 2819 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) { 2820 rc = -EIO; 2821 break; 2822 } 2823 2824 /* check for change in caller specified modem input */ 2825 if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) || 2826 (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) || 2827 (arg & TIOCM_CD && cnow.dcd != cprev.dcd) || 2828 (arg & TIOCM_CTS && cnow.cts != cprev.cts)) { 2829 rc = 0; 2830 break; 2831 } 2832 2833 cprev = cnow; 2834 } 2835 remove_wait_queue(&info->status_event_wait_q, &wait); 2836 set_current_state(TASK_RUNNING); 2837 return rc; 2838} 2839 2840/* return the state of the serial control and status signals 2841 */ 2842static int tiocmget(struct tty_struct *tty, struct file *file) 2843{ 2844 struct mgsl_struct *info = tty->driver_data; 2845 unsigned int result; 2846 unsigned long flags; 2847 2848 spin_lock_irqsave(&info->irq_spinlock,flags); 2849 usc_get_serial_signals(info); 2850 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2851 2852 result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) + 2853 ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) + 2854 ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) + 2855 ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) + 2856 ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) + 2857 ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0); 2858 2859 if (debug_level >= DEBUG_LEVEL_INFO) 2860 printk("%s(%d):%s tiocmget() value=%08X\n", 2861 __FILE__,__LINE__, info->device_name, result ); 2862 return result; 2863} 2864 2865/* set modem control signals (DTR/RTS) 2866 */ 2867static int tiocmset(struct tty_struct *tty, struct file *file, 2868 unsigned int set, unsigned int clear) 2869{ 2870 struct mgsl_struct *info = tty->driver_data; 2871 unsigned long flags; 2872 2873 if (debug_level >= DEBUG_LEVEL_INFO) 2874 printk("%s(%d):%s tiocmset(%x,%x)\n", 2875 __FILE__,__LINE__,info->device_name, set, clear); 2876 2877 if (set & TIOCM_RTS) 2878 info->serial_signals |= SerialSignal_RTS; 2879 if (set & TIOCM_DTR) 2880 info->serial_signals |= SerialSignal_DTR; 2881 if (clear & TIOCM_RTS) 2882 info->serial_signals &= ~SerialSignal_RTS; 2883 if (clear & TIOCM_DTR) 2884 info->serial_signals &= ~SerialSignal_DTR; 2885 2886 spin_lock_irqsave(&info->irq_spinlock,flags); 2887 usc_set_serial_signals(info); 2888 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2889 2890 return 0; 2891} 2892 2893/* mgsl_break() Set or clear transmit break condition 2894 * 2895 * Arguments: tty pointer to tty instance data 2896 * break_state -1=set break condition, 0=clear 2897 * Return Value: error code 2898 */ 2899static int mgsl_break(struct tty_struct *tty, int break_state) 2900{ 2901 struct mgsl_struct * info = tty->driver_data; 2902 unsigned long flags; 2903 2904 if (debug_level >= DEBUG_LEVEL_INFO) 2905 printk("%s(%d):mgsl_break(%s,%d)\n", 2906 __FILE__,__LINE__, info->device_name, break_state); 2907 2908 if (mgsl_paranoia_check(info, tty->name, "mgsl_break")) 2909 return -EINVAL; 2910 2911 spin_lock_irqsave(&info->irq_spinlock,flags); 2912 if (break_state == -1) 2913 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) | BIT7)); 2914 else 2915 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) & ~BIT7)); 2916 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2917 return 0; 2918 2919} /* end of mgsl_break() */ 2920 2921/* mgsl_ioctl() Service an IOCTL request 2922 * 2923 * Arguments: 2924 * 2925 * tty pointer to tty instance data 2926 * file pointer to associated file object for device 2927 * cmd IOCTL command code 2928 * arg command argument/context 2929 * 2930 * Return Value: 0 if success, otherwise error code 2931 */ 2932static int mgsl_ioctl(struct tty_struct *tty, struct file * file, 2933 unsigned int cmd, unsigned long arg) 2934{ 2935 struct mgsl_struct * info = tty->driver_data; 2936 int ret; 2937 2938 if (debug_level >= DEBUG_LEVEL_INFO) 2939 printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__, 2940 info->device_name, cmd ); 2941 2942 if (mgsl_paranoia_check(info, tty->name, "mgsl_ioctl")) 2943 return -ENODEV; 2944 2945 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && 2946 (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) { 2947 if (tty->flags & (1 << TTY_IO_ERROR)) 2948 return -EIO; 2949 } 2950 2951 lock_kernel(); 2952 ret = mgsl_ioctl_common(info, cmd, arg); 2953 unlock_kernel(); 2954 return ret; 2955} 2956 2957static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg) 2958{ 2959 int error; 2960 struct mgsl_icount cnow; /* kernel counter temps */ 2961 void __user *argp = (void __user *)arg; 2962 struct serial_icounter_struct __user *p_cuser; /* user space */ 2963 unsigned long flags; 2964 2965 switch (cmd) { 2966 case MGSL_IOCGPARAMS: 2967 return mgsl_get_params(info, argp); 2968 case MGSL_IOCSPARAMS: 2969 return mgsl_set_params(info, argp); 2970 case MGSL_IOCGTXIDLE: 2971 return mgsl_get_txidle(info, argp); 2972 case MGSL_IOCSTXIDLE: 2973 return mgsl_set_txidle(info,(int)arg); 2974 case MGSL_IOCTXENABLE: 2975 return mgsl_txenable(info,(int)arg); 2976 case MGSL_IOCRXENABLE: 2977 return mgsl_rxenable(info,(int)arg); 2978 case MGSL_IOCTXABORT: 2979 return mgsl_txabort(info); 2980 case MGSL_IOCGSTATS: 2981 return mgsl_get_stats(info, argp); 2982 case MGSL_IOCWAITEVENT: 2983 return mgsl_wait_event(info, argp); 2984 case MGSL_IOCLOOPTXDONE: 2985 return mgsl_loopmode_send_done(info); 2986 /* Wait for modem input (DCD,RI,DSR,CTS) change 2987 * as specified by mask in arg (TIOCM_RNG/DSR/CD/CTS) 2988 */ 2989 case TIOCMIWAIT: 2990 return modem_input_wait(info,(int)arg); 2991 2992 /* 2993 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) 2994 * Return: write counters to the user passed counter struct 2995 * NB: both 1->0 and 0->1 transitions are counted except for 2996 * RI where only 0->1 is counted. 2997 */ 2998 case TIOCGICOUNT: 2999 spin_lock_irqsave(&info->irq_spinlock,flags); 3000 cnow = info->icount; 3001 spin_unlock_irqrestore(&info->irq_spinlock,flags); 3002 p_cuser = argp; 3003 PUT_USER(error,cnow.cts, &p_cuser->cts); 3004 if (error) return error; 3005 PUT_USER(error,cnow.dsr, &p_cuser->dsr); 3006 if (error) return error; 3007 PUT_USER(error,cnow.rng, &p_cuser->rng); 3008 if (error) return error; 3009 PUT_USER(error,cnow.dcd, &p_cuser->dcd); 3010 if (error) return error; 3011 PUT_USER(error,cnow.rx, &p_cuser->rx); 3012 if (error) return error; 3013 PUT_USER(error,cnow.tx, &p_cuser->tx); 3014 if (error) return error; 3015 PUT_USER(error,cnow.frame, &p_cuser->frame); 3016 if (error) return error; 3017 PUT_USER(error,cnow.overrun, &p_cuser->overrun); 3018 if (error) return error; 3019 PUT_USER(error,cnow.parity, &p_cuser->parity); 3020 if (error) return error; 3021 PUT_USER(error,cnow.brk, &p_cuser->brk); 3022 if (error) return error; 3023 PUT_USER(error,cnow.buf_overrun, &p_cuser->buf_overrun); 3024 if (error) return error; 3025 return 0; 3026 default: 3027 return -ENOIOCTLCMD; 3028 } 3029 return 0; 3030} 3031 3032/* mgsl_set_termios() 3033 * 3034 * Set new termios settings 3035 * 3036 * Arguments: 3037 * 3038 * tty pointer to tty structure 3039 * termios pointer to buffer to hold returned old termios 3040 * 3041 * Return Value: None 3042 */ 3043static void mgsl_set_termios(struct tty_struct *tty, struct ktermios *old_termios) 3044{ 3045 struct mgsl_struct *info = tty->driver_data; 3046 unsigned long flags; 3047 3048 if (debug_level >= DEBUG_LEVEL_INFO) 3049 printk("%s(%d):mgsl_set_termios %s\n", __FILE__,__LINE__, 3050 tty->driver->name ); 3051 3052 mgsl_change_params(info); 3053 3054 /* Handle transition to B0 status */ 3055 if (old_termios->c_cflag & CBAUD && 3056 !(tty->termios->c_cflag & CBAUD)) { 3057 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR); 3058 spin_lock_irqsave(&info->irq_spinlock,flags); 3059 usc_set_serial_signals(info); 3060 spin_unlock_irqrestore(&info->irq_spinlock,flags); 3061 } 3062 3063 /* Handle transition away from B0 status */ 3064 if (!(old_termios->c_cflag & CBAUD) && 3065 tty->termios->c_cflag & CBAUD) { 3066 info->serial_signals |= SerialSignal_DTR; 3067 if (!(tty->termios->c_cflag & CRTSCTS) || 3068 !test_bit(TTY_THROTTLED, &tty->flags)) { 3069 info->serial_signals |= SerialSignal_RTS; 3070 } 3071 spin_lock_irqsave(&info->irq_spinlock,flags); 3072 usc_set_serial_signals(info); 3073 spin_unlock_irqrestore(&info->irq_spinlock,flags); 3074 } 3075 3076 /* Handle turning off CRTSCTS */ 3077 if (old_termios->c_cflag & CRTSCTS && 3078 !(tty->termios->c_cflag & CRTSCTS)) { 3079 tty->hw_stopped = 0; 3080 mgsl_start(tty); 3081 } 3082 3083} /* end of mgsl_set_termios() */ 3084 3085/* mgsl_close() 3086 * 3087 * Called when port is closed. Wait for remaining data to be 3088 * sent. Disable port and free resources. 3089 * 3090 * Arguments: 3091 * 3092 * tty pointer to open tty structure 3093 * filp pointer to open file object 3094 * 3095 * Return Value: None 3096 */ 3097static void mgsl_close(struct tty_struct *tty, struct file * filp) 3098{ 3099 struct mgsl_struct * info = tty->driver_data; 3100 3101 if (mgsl_paranoia_check(info, tty->name, "mgsl_close")) 3102 return; 3103 3104 if (debug_level >= DEBUG_LEVEL_INFO) 3105 printk("%s(%d):mgsl_close(%s) entry, count=%d\n", 3106 __FILE__,__LINE__, info->device_name, info->port.count); 3107 3108 if (tty_port_close_start(&info->port, tty, filp) == 0) 3109 goto cleanup; 3110 3111 if (info->port.flags & ASYNC_INITIALIZED) 3112 mgsl_wait_until_sent(tty, info->timeout); 3113 mgsl_flush_buffer(tty); 3114 tty_ldisc_flush(tty); 3115 shutdown(info); 3116 3117 tty_port_close_end(&info->port, tty); 3118 info->port.tty = NULL; 3119cleanup: 3120 if (debug_level >= DEBUG_LEVEL_INFO) 3121 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__, 3122 tty->driver->name, info->port.count); 3123 3124} /* end of mgsl_close() */ 3125 3126/* mgsl_wait_until_sent() 3127 * 3128 * Wait until the transmitter is empty. 3129 * 3130 * Arguments: 3131 * 3132 * tty pointer to tty info structure 3133 * timeout time to wait for send completion 3134 * 3135 * Return Value: None 3136 */ 3137static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout) 3138{ 3139 struct mgsl_struct * info = tty->driver_data; 3140 unsigned long orig_jiffies, char_time; 3141 3142 if (!info ) 3143 return; 3144 3145 if (debug_level >= DEBUG_LEVEL_INFO) 3146 printk("%s(%d):mgsl_wait_until_sent(%s) entry\n", 3147 __FILE__,__LINE__, info->device_name ); 3148 3149 if (mgsl_paranoia_check(info, tty->name, "mgsl_wait_until_sent")) 3150 return; 3151 3152 if (!(info->port.flags & ASYNC_INITIALIZED)) 3153 goto exit; 3154 3155 orig_jiffies = jiffies; 3156 3157 /* Set check interval to 1/5 of estimated time to 3158 * send a character, and make it at least 1. The check 3159 * interval should also be less than the timeout. 3160 * Note: use tight timings here to satisfy the NIST-PCTS. 3161 */ 3162 3163 lock_kernel(); 3164 if ( info->params.data_rate ) { 3165 char_time = info->timeout/(32 * 5); 3166 if (!char_time) 3167 char_time++; 3168 } else 3169 char_time = 1; 3170 3171 if (timeout) 3172 char_time = min_t(unsigned long, char_time, timeout); 3173 3174 if ( info->params.mode == MGSL_MODE_HDLC || 3175 info->params.mode == MGSL_MODE_RAW ) { 3176 while (info->tx_active) { 3177 msleep_interruptible(jiffies_to_msecs(char_time)); 3178 if (signal_pending(current)) 3179 break; 3180 if (timeout && time_after(jiffies, orig_jiffies + timeout)) 3181 break; 3182 } 3183 } else { 3184 while (!(usc_InReg(info,TCSR) & TXSTATUS_ALL_SENT) && 3185 info->tx_enabled) { 3186 msleep_interruptible(jiffies_to_msecs(char_time)); 3187 if (signal_pending(current)) 3188 break; 3189 if (timeout && time_after(jiffies, orig_jiffies + timeout)) 3190 break; 3191 } 3192 } 3193 unlock_kernel(); 3194 3195exit: 3196 if (debug_level >= DEBUG_LEVEL_INFO) 3197 printk("%s(%d):mgsl_wait_until_sent(%s) exit\n", 3198 __FILE__,__LINE__, info->device_name ); 3199 3200} /* end of mgsl_wait_until_sent() */ 3201 3202/* mgsl_hangup() 3203 * 3204 * Called by tty_hangup() when a hangup is signaled. 3205 * This is the same as to closing all open files for the port. 3206 * 3207 * Arguments: tty pointer to associated tty object 3208 * Return Value: None 3209 */ 3210static void mgsl_hangup(struct tty_struct *tty) 3211{ 3212 struct mgsl_struct * info = tty->driver_data; 3213 3214 if (debug_level >= DEBUG_LEVEL_INFO) 3215 printk("%s(%d):mgsl_hangup(%s)\n", 3216 __FILE__,__LINE__, info->device_name ); 3217 3218 if (mgsl_paranoia_check(info, tty->name, "mgsl_hangup")) 3219 return; 3220 3221 mgsl_flush_buffer(tty); 3222 shutdown(info); 3223 3224 info->port.count = 0; 3225 info->port.flags &= ~ASYNC_NORMAL_ACTIVE; 3226 info->port.tty = NULL; 3227 3228 wake_up_interruptible(&info->port.open_wait); 3229 3230} /* end of mgsl_hangup() */ 3231 3232/* 3233 * carrier_raised() 3234 * 3235 * Return true if carrier is raised 3236 */ 3237 3238static int carrier_raised(struct tty_port *port) 3239{ 3240 unsigned long flags; 3241 struct mgsl_struct *info = container_of(port, struct mgsl_struct, port); 3242 3243 spin_lock_irqsave(&info->irq_spinlock, flags); 3244 usc_get_serial_signals(info); 3245 spin_unlock_irqrestore(&info->irq_spinlock, flags); 3246 return (info->serial_signals & SerialSignal_DCD) ? 1 : 0; 3247} 3248 3249static void raise_dtr_rts(struct tty_port *port) 3250{ 3251 struct mgsl_struct *info = container_of(port, struct mgsl_struct, port); 3252 unsigned long flags; 3253 3254 spin_lock_irqsave(&info->irq_spinlock,flags); 3255 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; 3256 usc_set_serial_signals(info); 3257 spin_unlock_irqrestore(&info->irq_spinlock,flags); 3258} 3259 3260 3261/* block_til_ready() 3262 * 3263 * Block the current process until the specified port 3264 * is ready to be opened. 3265 * 3266 * Arguments: 3267 * 3268 * tty pointer to tty info structure 3269 * filp pointer to open file object 3270 * info pointer to device instance data 3271 * 3272 * Return Value: 0 if success, otherwise error code 3273 */ 3274static int block_til_ready(struct tty_struct *tty, struct file * filp, 3275 struct mgsl_struct *info) 3276{ 3277 DECLARE_WAITQUEUE(wait, current); 3278 int retval; 3279 bool do_clocal = false; 3280 bool extra_count = false; 3281 unsigned long flags; 3282 int dcd; 3283 struct tty_port *port = &info->port; 3284 3285 if (debug_level >= DEBUG_LEVEL_INFO) 3286 printk("%s(%d):block_til_ready on %s\n", 3287 __FILE__,__LINE__, tty->driver->name ); 3288 3289 if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){ 3290 /* nonblock mode is set or port is not enabled */ 3291 port->flags |= ASYNC_NORMAL_ACTIVE; 3292 return 0; 3293 } 3294 3295 if (tty->termios->c_cflag & CLOCAL) 3296 do_clocal = true; 3297 3298 /* Wait for carrier detect and the line to become 3299 * free (i.e., not in use by the callout). While we are in 3300 * this loop, port->count is dropped by one, so that 3301 * mgsl_close() knows when to free things. We restore it upon 3302 * exit, either normal or abnormal. 3303 */ 3304 3305 retval = 0; 3306 add_wait_queue(&port->open_wait, &wait); 3307 3308 if (debug_level >= DEBUG_LEVEL_INFO) 3309 printk("%s(%d):block_til_ready before block on %s count=%d\n", 3310 __FILE__,__LINE__, tty->driver->name, port->count ); 3311 3312 spin_lock_irqsave(&info->irq_spinlock, flags); 3313 if (!tty_hung_up_p(filp)) { 3314 extra_count = true; 3315 port->count--; 3316 } 3317 spin_unlock_irqrestore(&info->irq_spinlock, flags); 3318 port->blocked_open++; 3319 3320 while (1) { 3321 if (tty->termios->c_cflag & CBAUD) 3322 tty_port_raise_dtr_rts(port); 3323 3324 set_current_state(TASK_INTERRUPTIBLE); 3325 3326 if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)){ 3327 retval = (port->flags & ASYNC_HUP_NOTIFY) ? 3328 -EAGAIN : -ERESTARTSYS; 3329 break; 3330 } 3331 3332 dcd = tty_port_carrier_raised(&info->port); 3333 3334 if (!(port->flags & ASYNC_CLOSING) && (do_clocal || dcd)) 3335 break; 3336 3337 if (signal_pending(current)) { 3338 retval = -ERESTARTSYS; 3339 break; 3340 } 3341 3342 if (debug_level >= DEBUG_LEVEL_INFO) 3343 printk("%s(%d):block_til_ready blocking on %s count=%d\n", 3344 __FILE__,__LINE__, tty->driver->name, port->count ); 3345 3346 schedule(); 3347 } 3348 3349 set_current_state(TASK_RUNNING); 3350 remove_wait_queue(&port->open_wait, &wait); 3351 3352 /* FIXME: Racy on hangup during close wait */ 3353 if (extra_count) 3354 port->count++; 3355 port->blocked_open--; 3356 3357 if (debug_level >= DEBUG_LEVEL_INFO) 3358 printk("%s(%d):block_til_ready after blocking on %s count=%d\n", 3359 __FILE__,__LINE__, tty->driver->name, port->count ); 3360 3361 if (!retval) 3362 port->flags |= ASYNC_NORMAL_ACTIVE; 3363 3364 return retval; 3365 3366} /* end of block_til_ready() */ 3367 3368/* mgsl_open() 3369 * 3370 * Called when a port is opened. Init and enable port. 3371 * Perform serial-specific initialization for the tty structure. 3372 * 3373 * Arguments: tty pointer to tty info structure 3374 * filp associated file pointer 3375 * 3376 * Return Value: 0 if success, otherwise error code 3377 */ 3378static int mgsl_open(struct tty_struct *tty, struct file * filp) 3379{ 3380 struct mgsl_struct *info; 3381 int retval, line; 3382 unsigned long flags; 3383 3384 /* verify range of specified line number */ 3385 line = tty->index; 3386 if ((line < 0) || (line >= mgsl_device_count)) { 3387 printk("%s(%d):mgsl_open with invalid line #%d.\n", 3388 __FILE__,__LINE__,line); 3389 return -ENODEV; 3390 } 3391 3392 /* find the info structure for the specified line */ 3393 info = mgsl_device_list; 3394 while(info && info->line != line) 3395 info = info->next_device; 3396 if (mgsl_paranoia_check(info, tty->name, "mgsl_open")) 3397 return -ENODEV; 3398 3399 tty->driver_data = info; 3400 info->port.tty = tty; 3401 3402 if (debug_level >= DEBUG_LEVEL_INFO) 3403 printk("%s(%d):mgsl_open(%s), old ref count = %d\n", 3404 __FILE__,__LINE__,tty->driver->name, info->port.count); 3405 3406 /* If port is closing, signal caller to try again */ 3407 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){ 3408 if (info->port.flags & ASYNC_CLOSING) 3409 interruptible_sleep_on(&info->port.close_wait); 3410 retval = ((info->port.flags & ASYNC_HUP_NOTIFY) ? 3411 -EAGAIN : -ERESTARTSYS); 3412 goto cleanup; 3413 } 3414 3415 info->port.tty->low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0; 3416 3417 spin_lock_irqsave(&info->netlock, flags); 3418 if (info->netcount) { 3419 retval = -EBUSY; 3420 spin_unlock_irqrestore(&info->netlock, flags); 3421 goto cleanup; 3422 } 3423 info->port.count++; 3424 spin_unlock_irqrestore(&info->netlock, flags); 3425 3426 if (info->port.count == 1) { 3427 /* 1st open on this device, init hardware */ 3428 retval = startup(info); 3429 if (retval < 0) 3430 goto cleanup; 3431 } 3432 3433 retval = block_til_ready(tty, filp, info); 3434 if (retval) { 3435 if (debug_level >= DEBUG_LEVEL_INFO) 3436 printk("%s(%d):block_til_ready(%s) returned %d\n", 3437 __FILE__,__LINE__, info->device_name, retval); 3438 goto cleanup; 3439 } 3440 3441 if (debug_level >= DEBUG_LEVEL_INFO) 3442 printk("%s(%d):mgsl_open(%s) success\n", 3443 __FILE__,__LINE__, info->device_name); 3444 retval = 0; 3445 3446cleanup: 3447 if (retval) { 3448 if (tty->count == 1) 3449 info->port.tty = NULL; /* tty layer will release tty struct */ 3450 if(info->port.count) 3451 info->port.count--; 3452 } 3453 3454 return retval; 3455 3456} /* end of mgsl_open() */ 3457 3458/* 3459 * /proc fs routines.... 3460 */ 3461 3462static inline int line_info(char *buf, struct mgsl_struct *info) 3463{ 3464 char stat_buf[30]; 3465 int ret; 3466 unsigned long flags; 3467 3468 if (info->bus_type == MGSL_BUS_TYPE_PCI) { 3469 ret = sprintf(buf, "%s:PCI io:%04X irq:%d mem:%08X lcr:%08X", 3470 info->device_name, info->io_base, info->irq_level, 3471 info->phys_memory_base, info->phys_lcr_base); 3472 } else { 3473 ret = sprintf(buf, "%s:(E)ISA io:%04X irq:%d dma:%d", 3474 info->device_name, info->io_base, 3475 info->irq_level, info->dma_level); 3476 } 3477 3478 /* output current serial signal states */ 3479 spin_lock_irqsave(&info->irq_spinlock,flags); 3480 usc_get_serial_signals(info); 3481 spin_unlock_irqrestore(&info->irq_spinlock,flags); 3482 3483 stat_buf[0] = 0; 3484 stat_buf[1] = 0; 3485 if (info->serial_signals & SerialSignal_RTS) 3486 strcat(stat_buf, "|RTS"); 3487 if (info->serial_signals & SerialSignal_CTS) 3488 strcat(stat_buf, "|CTS"); 3489 if (info->serial_signals & SerialSignal_DTR) 3490 strcat(stat_buf, "|DTR"); 3491 if (info->serial_signals & SerialSignal_DSR) 3492 strcat(stat_buf, "|DSR"); 3493 if (info->serial_signals & SerialSignal_DCD) 3494 strcat(stat_buf, "|CD"); 3495 if (info->serial_signals & SerialSignal_RI) 3496 strcat(stat_buf, "|RI"); 3497 3498 if (info->params.mode == MGSL_MODE_HDLC || 3499 info->params.mode == MGSL_MODE_RAW ) { 3500 ret += sprintf(buf+ret, " HDLC txok:%d rxok:%d", 3501 info->icount.txok, info->icount.rxok); 3502 if (info->icount.txunder) 3503 ret += sprintf(buf+ret, " txunder:%d", info->icount.txunder); 3504 if (info->icount.txabort) 3505 ret += sprintf(buf+ret, " txabort:%d", info->icount.txabort); 3506 if (info->icount.rxshort) 3507 ret += sprintf(buf+ret, " rxshort:%d", info->icount.rxshort); 3508 if (info->icount.rxlong) 3509 ret += sprintf(buf+ret, " rxlong:%d", info->icount.rxlong); 3510 if (info->icount.rxover) 3511 ret += sprintf(buf+ret, " rxover:%d", info->icount.rxover); 3512 if (info->icount.rxcrc) 3513 ret += sprintf(buf+ret, " rxcrc:%d", info->icount.rxcrc); 3514 } else { 3515 ret += sprintf(buf+ret, " ASYNC tx:%d rx:%d", 3516 info->icount.tx, info->icount.rx); 3517 if (info->icount.frame) 3518 ret += sprintf(buf+ret, " fe:%d", info->icount.frame); 3519 if (info->icount.parity) 3520 ret += sprintf(buf+ret, " pe:%d", info->icount.parity); 3521 if (info->icount.brk) 3522 ret += sprintf(buf+ret, " brk:%d", info->icount.brk); 3523 if (info->icount.overrun) 3524 ret += sprintf(buf+ret, " oe:%d", info->icount.overrun); 3525 } 3526 3527 /* Append serial signal status to end */ 3528 ret += sprintf(buf+ret, " %s\n", stat_buf+1); 3529 3530 ret += sprintf(buf+ret, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n", 3531 info->tx_active,info->bh_requested,info->bh_running, 3532 info->pending_bh); 3533 3534 spin_lock_irqsave(&info->irq_spinlock,flags); 3535 { 3536 u16 Tcsr = usc_InReg( info, TCSR ); 3537 u16 Tdmr = usc_InDmaReg( info, TDMR ); 3538 u16 Ticr = usc_InReg( info, TICR ); 3539 u16 Rscr = usc_InReg( info, RCSR ); 3540 u16 Rdmr = usc_InDmaReg( info, RDMR ); 3541 u16 Ricr = usc_InReg( info, RICR ); 3542 u16 Icr = usc_InReg( info, ICR ); 3543 u16 Dccr = usc_InReg( info, DCCR ); 3544 u16 Tmr = usc_InReg( info, TMR ); 3545 u16 Tccr = usc_InReg( info, TCCR ); 3546 u16 Ccar = inw( info->io_base + CCAR ); 3547 ret += sprintf(buf+ret, "tcsr=%04X tdmr=%04X ticr=%04X rcsr=%04X rdmr=%04X\n" 3548 "ricr=%04X icr =%04X dccr=%04X tmr=%04X tccr=%04X ccar=%04X\n", 3549 Tcsr,Tdmr,Ticr,Rscr,Rdmr,Ricr,Icr,Dccr,Tmr,Tccr,Ccar ); 3550 } 3551 spin_unlock_irqrestore(&info->irq_spinlock,flags); 3552 3553 return ret; 3554 3555} /* end of line_info() */ 3556 3557/* mgsl_read_proc() 3558 * 3559 * Called to print information about devices 3560 * 3561 * Arguments: 3562 * page page of memory to hold returned info 3563 * start 3564 * off 3565 * count 3566 * eof 3567 * data 3568 * 3569 * Return Value: 3570 */ 3571static int mgsl_read_proc(char *page, char **start, off_t off, int count, 3572 int *eof, void *data) 3573{ 3574 int len = 0, l; 3575 off_t begin = 0; 3576 struct mgsl_struct *info; 3577 3578 len += sprintf(page, "synclink driver:%s\n", driver_version); 3579 3580 info = mgsl_device_list; 3581 while( info ) { 3582 l = line_info(page + len, info); 3583 len += l; 3584 if (len+begin > off+count) 3585 goto done; 3586 if (len+begin < off) { 3587 begin += len; 3588 len = 0; 3589 } 3590 info = info->next_device; 3591 } 3592 3593 *eof = 1; 3594done: 3595 if (off >= len+begin) 3596 return 0; 3597 *start = page + (off-begin); 3598 return ((count < begin+len-off) ? count : begin+len-off); 3599 3600} /* end of mgsl_read_proc() */ 3601 3602/* mgsl_allocate_dma_buffers() 3603 * 3604 * Allocate and format DMA buffers (ISA adapter) 3605 * or format shared memory buffers (PCI adapter). 3606 * 3607 * Arguments: info pointer to device instance data 3608 * Return Value: 0 if success, otherwise error 3609 */ 3610static int mgsl_allocate_dma_buffers(struct mgsl_struct *info) 3611{ 3612 unsigned short BuffersPerFrame; 3613 3614 info->last_mem_alloc = 0; 3615 3616 /* Calculate the number of DMA buffers necessary to hold the */ 3617 /* largest allowable frame size. Note: If the max frame size is */ 3618 /* not an even multiple of the DMA buffer size then we need to */ 3619 /* round the buffer count per frame up one. */ 3620 3621 BuffersPerFrame = (unsigned short)(info->max_frame_size/DMABUFFERSIZE); 3622 if ( info->max_frame_size % DMABUFFERSIZE ) 3623 BuffersPerFrame++; 3624 3625 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 3626 /* 3627 * The PCI adapter has 256KBytes of shared memory to use. 3628 * This is 64 PAGE_SIZE buffers. 3629 * 3630 * The first page is used for padding at this time so the 3631 * buffer list does not begin at offset 0 of the PCI 3632 * adapter's shared memory. 3633 * 3634 * The 2nd page is used for the buffer list. A 4K buffer 3635 * list can hold 128 DMA_BUFFER structures at 32 bytes 3636 * each. 3637 * 3638 * This leaves 62 4K pages. 3639 * 3640 * The next N pages are used for transmit frame(s). We 3641 * reserve enough 4K page blocks to hold the required 3642 * number of transmit dma buffers (num_tx_dma_buffers), 3643 * each of MaxFrameSize size. 3644 * 3645 * Of the remaining pages (62-N), determine how many can 3646 * be used to receive full MaxFrameSize inbound frames 3647 */ 3648 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame; 3649 info->rx_buffer_count = 62 - info->tx_buffer_count; 3650 } else { 3651 /* Calculate the number of PAGE_SIZE buffers needed for */ 3652 /* receive and transmit DMA buffers. */ 3653 3654 3655 /* Calculate the number of DMA buffers necessary to */ 3656 /* hold 7 max size receive frames and one max size transmit frame. */ 3657 /* The receive buffer count is bumped by one so we avoid an */ 3658 /* End of List condition if all receive buffers are used when */ 3659 /* using linked list DMA buffers. */ 3660 3661 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame; 3662 info->rx_buffer_count = (BuffersPerFrame * MAXRXFRAMES) + 6; 3663 3664 /* 3665 * limit total TxBuffers & RxBuffers to 62 4K total 3666 * (ala PCI Allocation) 3667 */ 3668 3669 if ( (info->tx_buffer_count + info->rx_buffer_count) > 62 ) 3670 info->rx_buffer_count = 62 - info->tx_buffer_count; 3671 3672 } 3673 3674 if ( debug_level >= DEBUG_LEVEL_INFO ) 3675 printk("%s(%d):Allocating %d TX and %d RX DMA buffers.\n", 3676 __FILE__,__LINE__, info->tx_buffer_count,info->rx_buffer_count); 3677 3678 if ( mgsl_alloc_buffer_list_memory( info ) < 0 || 3679 mgsl_alloc_frame_memory(info, info->rx_buffer_list, info->rx_buffer_count) < 0 || 3680 mgsl_alloc_frame_memory(info, info->tx_buffer_list, info->tx_buffer_count) < 0 || 3681 mgsl_alloc_intermediate_rxbuffer_memory(info) < 0 || 3682 mgsl_alloc_intermediate_txbuffer_memory(info) < 0 ) { 3683 printk("%s(%d):Can't allocate DMA buffer memory\n",__FILE__,__LINE__); 3684 return -ENOMEM; 3685 } 3686 3687 mgsl_reset_rx_dma_buffers( info ); 3688 mgsl_reset_tx_dma_buffers( info ); 3689 3690 return 0; 3691 3692} /* end of mgsl_allocate_dma_buffers() */ 3693 3694/* 3695 * mgsl_alloc_buffer_list_memory() 3696 * 3697 * Allocate a common DMA buffer for use as the 3698 * receive and transmit buffer lists. 3699 * 3700 * A buffer list is a set of buffer entries where each entry contains 3701 * a pointer to an actual buffer and a pointer to the next buffer entry 3702 * (plus some other info about the buffer). 3703 * 3704 * The buffer entries for a list are built to form a circular list so 3705 * that when the entire list has been traversed you start back at the 3706 * beginning. 3707 * 3708 * This function allocates memory for just the buffer entries. 3709 * The links (pointer to next entry) are filled in with the physical 3710 * address of the next entry so the adapter can navigate the list 3711 * using bus master DMA. The pointers to the actual buffers are filled 3712 * out later when the actual buffers are allocated. 3713 * 3714 * Arguments: info pointer to device instance data 3715 * Return Value: 0 if success, otherwise error 3716 */ 3717static int mgsl_alloc_buffer_list_memory( struct mgsl_struct *info ) 3718{ 3719 unsigned int i; 3720 3721 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 3722 /* PCI adapter uses shared memory. */ 3723 info->buffer_list = info->memory_base + info->last_mem_alloc; 3724 info->buffer_list_phys = info->last_mem_alloc; 3725 info->last_mem_alloc += BUFFERLISTSIZE; 3726 } else { 3727 /* ISA adapter uses system memory. */ 3728 /* The buffer lists are allocated as a common buffer that both */ 3729 /* the processor and adapter can access. This allows the driver to */ 3730 /* inspect portions of the buffer while other portions are being */ 3731 /* updated by the adapter using Bus Master DMA. */ 3732 3733 info->buffer_list = dma_alloc_coherent(NULL, BUFFERLISTSIZE, &info->buffer_list_dma_addr, GFP_KERNEL); 3734 if (info->buffer_list == NULL) 3735 return -ENOMEM; 3736 info->buffer_list_phys = (u32)(info->buffer_list_dma_addr); 3737 } 3738 3739 /* We got the memory for the buffer entry lists. */ 3740 /* Initialize the memory block to all zeros. */ 3741 memset( info->buffer_list, 0, BUFFERLISTSIZE ); 3742 3743 /* Save virtual address pointers to the receive and */ 3744 /* transmit buffer lists. (Receive 1st). These pointers will */ 3745 /* be used by the processor to access the lists. */ 3746 info->rx_buffer_list = (DMABUFFERENTRY *)info->buffer_list; 3747 info->tx_buffer_list = (DMABUFFERENTRY *)info->buffer_list; 3748 info->tx_buffer_list += info->rx_buffer_count; 3749 3750 /* 3751 * Build the links for the buffer entry lists such that 3752 * two circular lists are built. (Transmit and Receive). 3753 * 3754 * Note: the links are physical addresses 3755 * which are read by the adapter to determine the next 3756 * buffer entry to use. 3757 */ 3758 3759 for ( i = 0; i < info->rx_buffer_count; i++ ) { 3760 /* calculate and store physical address of this buffer entry */ 3761 info->rx_buffer_list[i].phys_entry = 3762 info->buffer_list_phys + (i * sizeof(DMABUFFERENTRY)); 3763 3764 /* calculate and store physical address of */ 3765 /* next entry in cirular list of entries */ 3766 3767 info->rx_buffer_list[i].link = info->buffer_list_phys; 3768 3769 if ( i < info->rx_buffer_count - 1 ) 3770 info->rx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY); 3771 } 3772 3773 for ( i = 0; i < info->tx_buffer_count; i++ ) { 3774 /* calculate and store physical address of this buffer entry */ 3775 info->tx_buffer_list[i].phys_entry = info->buffer_list_phys + 3776 ((info->rx_buffer_count + i) * sizeof(DMABUFFERENTRY)); 3777 3778 /* calculate and store physical address of */ 3779 /* next entry in cirular list of entries */ 3780 3781 info->tx_buffer_list[i].link = info->buffer_list_phys + 3782 info->rx_buffer_count * sizeof(DMABUFFERENTRY); 3783 3784 if ( i < info->tx_buffer_count - 1 ) 3785 info->tx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY); 3786 } 3787 3788 return 0; 3789 3790} /* end of mgsl_alloc_buffer_list_memory() */ 3791 3792/* Free DMA buffers allocated for use as the 3793 * receive and transmit buffer lists. 3794 * Warning: 3795 * 3796 * The data transfer buffers associated with the buffer list 3797 * MUST be freed before freeing the buffer list itself because 3798 * the buffer list contains the information necessary to free 3799 * the individual buffers! 3800 */ 3801static void mgsl_free_buffer_list_memory( struct mgsl_struct *info ) 3802{ 3803 if (info->buffer_list && info->bus_type != MGSL_BUS_TYPE_PCI) 3804 dma_free_coherent(NULL, BUFFERLISTSIZE, info->buffer_list, info->buffer_list_dma_addr); 3805 3806 info->buffer_list = NULL; 3807 info->rx_buffer_list = NULL; 3808 info->tx_buffer_list = NULL; 3809 3810} /* end of mgsl_free_buffer_list_memory() */ 3811 3812/* 3813 * mgsl_alloc_frame_memory() 3814 * 3815 * Allocate the frame DMA buffers used by the specified buffer list. 3816 * Each DMA buffer will be one memory page in size. This is necessary 3817 * because memory can fragment enough that it may be impossible 3818 * contiguous pages. 3819 * 3820 * Arguments: 3821 * 3822 * info pointer to device instance data 3823 * BufferList pointer to list of buffer entries 3824 * Buffercount count of buffer entries in buffer list 3825 * 3826 * Return Value: 0 if success, otherwise -ENOMEM 3827 */ 3828static int mgsl_alloc_frame_memory(struct mgsl_struct *info,DMABUFFERENTRY *BufferList,int Buffercount) 3829{ 3830 int i; 3831 u32 phys_addr; 3832 3833 /* Allocate page sized buffers for the receive buffer list */ 3834 3835 for ( i = 0; i < Buffercount; i++ ) { 3836 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 3837 /* PCI adapter uses shared memory buffers. */ 3838 BufferList[i].virt_addr = info->memory_base + info->last_mem_alloc; 3839 phys_addr = info->last_mem_alloc; 3840 info->last_mem_alloc += DMABUFFERSIZE; 3841 } else { 3842 /* ISA adapter uses system memory. */ 3843 BufferList[i].virt_addr = dma_alloc_coherent(NULL, DMABUFFERSIZE, &BufferList[i].dma_addr, GFP_KERNEL); 3844 if (BufferList[i].virt_addr == NULL) 3845 return -ENOMEM; 3846 phys_addr = (u32)(BufferList[i].dma_addr); 3847 } 3848 BufferList[i].phys_addr = phys_addr; 3849 } 3850 3851 return 0; 3852 3853} /* end of mgsl_alloc_frame_memory() */ 3854 3855/* 3856 * mgsl_free_frame_memory() 3857 * 3858 * Free the buffers associated with 3859 * each buffer entry of a buffer list. 3860 * 3861 * Arguments: 3862 * 3863 * info pointer to device instance data 3864 * BufferList pointer to list of buffer entries 3865 * Buffercount count of buffer entries in buffer list 3866 * 3867 * Return Value: None 3868 */ 3869static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList, int Buffercount) 3870{ 3871 int i; 3872 3873 if ( BufferList ) { 3874 for ( i = 0 ; i < Buffercount ; i++ ) { 3875 if ( BufferList[i].virt_addr ) { 3876 if ( info->bus_type != MGSL_BUS_TYPE_PCI ) 3877 dma_free_coherent(NULL, DMABUFFERSIZE, BufferList[i].virt_addr, BufferList[i].dma_addr); 3878 BufferList[i].virt_addr = NULL; 3879 } 3880 } 3881 } 3882 3883} /* end of mgsl_free_frame_memory() */ 3884 3885/* mgsl_free_dma_buffers() 3886 * 3887 * Free DMA buffers 3888 * 3889 * Arguments: info pointer to device instance data 3890 * Return Value: None 3891 */ 3892static void mgsl_free_dma_buffers( struct mgsl_struct *info ) 3893{ 3894 mgsl_free_frame_memory( info, info->rx_buffer_list, info->rx_buffer_count ); 3895 mgsl_free_frame_memory( info, info->tx_buffer_list, info->tx_buffer_count ); 3896 mgsl_free_buffer_list_memory( info ); 3897 3898} /* end of mgsl_free_dma_buffers() */ 3899 3900 3901/* 3902 * mgsl_alloc_intermediate_rxbuffer_memory() 3903 * 3904 * Allocate a buffer large enough to hold max_frame_size. This buffer 3905 * is used to pass an assembled frame to the line discipline. 3906 * 3907 * Arguments: 3908 * 3909 * info pointer to device instance data 3910 * 3911 * Return Value: 0 if success, otherwise -ENOMEM 3912 */ 3913static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info) 3914{ 3915 info->intermediate_rxbuffer = kmalloc(info->max_frame_size, GFP_KERNEL | GFP_DMA); 3916 if ( info->intermediate_rxbuffer == NULL ) 3917 return -ENOMEM; 3918 3919 return 0; 3920 3921} /* end of mgsl_alloc_intermediate_rxbuffer_memory() */ 3922 3923/* 3924 * mgsl_free_intermediate_rxbuffer_memory() 3925 * 3926 * 3927 * Arguments: 3928 * 3929 * info pointer to device instance data 3930 * 3931 * Return Value: None 3932 */ 3933static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info) 3934{ 3935 kfree(info->intermediate_rxbuffer); 3936 info->intermediate_rxbuffer = NULL; 3937 3938} /* end of mgsl_free_intermediate_rxbuffer_memory() */ 3939 3940/* 3941 * mgsl_alloc_intermediate_txbuffer_memory() 3942 * 3943 * Allocate intermdiate transmit buffer(s) large enough to hold max_frame_size. 3944 * This buffer is used to load transmit frames into the adapter's dma transfer 3945 * buffers when there is sufficient space. 3946 * 3947 * Arguments: 3948 * 3949 * info pointer to device instance data 3950 * 3951 * Return Value: 0 if success, otherwise -ENOMEM 3952 */ 3953static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info) 3954{ 3955 int i; 3956 3957 if ( debug_level >= DEBUG_LEVEL_INFO ) 3958 printk("%s %s(%d) allocating %d tx holding buffers\n", 3959 info->device_name, __FILE__,__LINE__,info->num_tx_holding_buffers); 3960 3961 memset(info->tx_holding_buffers,0,sizeof(info->tx_holding_buffers)); 3962 3963 for ( i=0; i<info->num_tx_holding_buffers; ++i) { 3964 info->tx_holding_buffers[i].buffer = 3965 kmalloc(info->max_frame_size, GFP_KERNEL); 3966 if (info->tx_holding_buffers[i].buffer == NULL) { 3967 for (--i; i >= 0; i--) { 3968 kfree(info->tx_holding_buffers[i].buffer); 3969 info->tx_holding_buffers[i].buffer = NULL; 3970 } 3971 return -ENOMEM; 3972 } 3973 } 3974 3975 return 0; 3976 3977} /* end of mgsl_alloc_intermediate_txbuffer_memory() */ 3978 3979/* 3980 * mgsl_free_intermediate_txbuffer_memory() 3981 * 3982 * 3983 * Arguments: 3984 * 3985 * info pointer to device instance data 3986 * 3987 * Return Value: None 3988 */ 3989static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info) 3990{ 3991 int i; 3992 3993 for ( i=0; i<info->num_tx_holding_buffers; ++i ) { 3994 kfree(info->tx_holding_buffers[i].buffer); 3995 info->tx_holding_buffers[i].buffer = NULL; 3996 } 3997 3998 info->get_tx_holding_index = 0; 3999 info->put_tx_holding_index = 0; 4000 info->tx_holding_count = 0; 4001 4002} /* end of mgsl_free_intermediate_txbuffer_memory() */ 4003 4004 4005/* 4006 * load_next_tx_holding_buffer() 4007 * 4008 * attempts to load the next buffered tx request into the 4009 * tx dma buffers 4010 * 4011 * Arguments: 4012 * 4013 * info pointer to device instance data 4014 * 4015 * Return Value: true if next buffered tx request loaded 4016 * into adapter's tx dma buffer, 4017 * false otherwise 4018 */ 4019static bool load_next_tx_holding_buffer(struct mgsl_struct *info) 4020{ 4021 bool ret = false; 4022 4023 if ( info->tx_holding_count ) { 4024 /* determine if we have enough tx dma buffers 4025 * to accommodate the next tx frame 4026 */ 4027 struct tx_holding_buffer *ptx = 4028 &info->tx_holding_buffers[info->get_tx_holding_index]; 4029 int num_free = num_free_tx_dma_buffers(info); 4030 int num_needed = ptx->buffer_size / DMABUFFERSIZE; 4031 if ( ptx->buffer_size % DMABUFFERSIZE ) 4032 ++num_needed; 4033 4034 if (num_needed <= num_free) { 4035 info->xmit_cnt = ptx->buffer_size; 4036 mgsl_load_tx_dma_buffer(info,ptx->buffer,ptx->buffer_size); 4037 4038 --info->tx_holding_count; 4039 if ( ++info->get_tx_holding_index >= info->num_tx_holding_buffers) 4040 info->get_tx_holding_index=0; 4041 4042 /* restart transmit timer */ 4043 mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000)); 4044 4045 ret = true; 4046 } 4047 } 4048 4049 return ret; 4050} 4051 4052/* 4053 * save_tx_buffer_request() 4054 * 4055 * attempt to store transmit frame request for later transmission 4056 * 4057 * Arguments: 4058 * 4059 * info pointer to device instance data 4060 * Buffer pointer to buffer containing frame to load 4061 * BufferSize size in bytes of frame in Buffer 4062 * 4063 * Return Value: 1 if able to store, 0 otherwise 4064 */ 4065static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize) 4066{ 4067 struct tx_holding_buffer *ptx; 4068 4069 if ( info->tx_holding_count >= info->num_tx_holding_buffers ) { 4070 return 0; /* all buffers in use */ 4071 } 4072 4073 ptx = &info->tx_holding_buffers[info->put_tx_holding_index]; 4074 ptx->buffer_size = BufferSize; 4075 memcpy( ptx->buffer, Buffer, BufferSize); 4076 4077 ++info->tx_holding_count; 4078 if ( ++info->put_tx_holding_index >= info->num_tx_holding_buffers) 4079 info->put_tx_holding_index=0; 4080 4081 return 1; 4082} 4083 4084static int mgsl_claim_resources(struct mgsl_struct *info) 4085{ 4086 if (request_region(info->io_base,info->io_addr_size,"synclink") == NULL) { 4087 printk( "%s(%d):I/O address conflict on device %s Addr=%08X\n", 4088 __FILE__,__LINE__,info->device_name, info->io_base); 4089 return -ENODEV; 4090 } 4091 info->io_addr_requested = true; 4092 4093 if ( request_irq(info->irq_level,mgsl_interrupt,info->irq_flags, 4094 info->device_name, info ) < 0 ) { 4095 printk( "%s(%d):Cant request interrupt on device %s IRQ=%d\n", 4096 __FILE__,__LINE__,info->device_name, info->irq_level ); 4097 goto errout; 4098 } 4099 info->irq_requested = true; 4100 4101 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 4102 if (request_mem_region(info->phys_memory_base,0x40000,"synclink") == NULL) { 4103 printk( "%s(%d):mem addr conflict device %s Addr=%08X\n", 4104 __FILE__,__LINE__,info->device_name, info->phys_memory_base); 4105 goto errout; 4106 } 4107 info->shared_mem_requested = true; 4108 if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclink") == NULL) { 4109 printk( "%s(%d):lcr mem addr conflict device %s Addr=%08X\n", 4110 __FILE__,__LINE__,info->device_name, info->phys_lcr_base + info->lcr_offset); 4111 goto errout; 4112 } 4113 info->lcr_mem_requested = true; 4114 4115 info->memory_base = ioremap_nocache(info->phys_memory_base, 4116 0x40000); 4117 if (!info->memory_base) { 4118 printk( "%s(%d):Cant map shared memory on device %s MemAddr=%08X\n", 4119 __FILE__,__LINE__,info->device_name, info->phys_memory_base ); 4120 goto errout; 4121 } 4122 4123 if ( !mgsl_memory_test(info) ) { 4124 printk( "%s(%d):Failed shared memory test %s MemAddr=%08X\n", 4125 __FILE__,__LINE__,info->device_name, info->phys_memory_base ); 4126 goto errout; 4127 } 4128 4129 info->lcr_base = ioremap_nocache(info->phys_lcr_base, 4130 PAGE_SIZE); 4131 if (!info->lcr_base) { 4132 printk( "%s(%d):Cant map LCR memory on device %s MemAddr=%08X\n", 4133 __FILE__,__LINE__,info->device_name, info->phys_lcr_base ); 4134 goto errout; 4135 } 4136 info->lcr_base += info->lcr_offset; 4137 4138 } else { 4139 /* claim DMA channel */ 4140 4141 if (request_dma(info->dma_level,info->device_name) < 0){ 4142 printk( "%s(%d):Cant request DMA channel on device %s DMA=%d\n", 4143 __FILE__,__LINE__,info->device_name, info->dma_level ); 4144 mgsl_release_resources( info ); 4145 return -ENODEV; 4146 } 4147 info->dma_requested = true; 4148 4149 /* ISA adapter uses bus master DMA */ 4150 set_dma_mode(info->dma_level,DMA_MODE_CASCADE); 4151 enable_dma(info->dma_level); 4152 } 4153 4154 if ( mgsl_allocate_dma_buffers(info) < 0 ) { 4155 printk( "%s(%d):Cant allocate DMA buffers on device %s DMA=%d\n", 4156 __FILE__,__LINE__,info->device_name, info->dma_level ); 4157 goto errout; 4158 } 4159 4160 return 0; 4161errout: 4162 mgsl_release_resources(info); 4163 return -ENODEV; 4164 4165} /* end of mgsl_claim_resources() */ 4166 4167static void mgsl_release_resources(struct mgsl_struct *info) 4168{ 4169 if ( debug_level >= DEBUG_LEVEL_INFO ) 4170 printk( "%s(%d):mgsl_release_resources(%s) entry\n", 4171 __FILE__,__LINE__,info->device_name ); 4172 4173 if ( info->irq_requested ) { 4174 free_irq(info->irq_level, info); 4175 info->irq_requested = false; 4176 } 4177 if ( info->dma_requested ) { 4178 disable_dma(info->dma_level); 4179 free_dma(info->dma_level); 4180 info->dma_requested = false; 4181 } 4182 mgsl_free_dma_buffers(info); 4183 mgsl_free_intermediate_rxbuffer_memory(info); 4184 mgsl_free_intermediate_txbuffer_memory(info); 4185 4186 if ( info->io_addr_requested ) { 4187 release_region(info->io_base,info->io_addr_size); 4188 info->io_addr_requested = false; 4189 } 4190 if ( info->shared_mem_requested ) { 4191 release_mem_region(info->phys_memory_base,0x40000); 4192 info->shared_mem_requested = false; 4193 } 4194 if ( info->lcr_mem_requested ) { 4195 release_mem_region(info->phys_lcr_base + info->lcr_offset,128); 4196 info->lcr_mem_requested = false; 4197 } 4198 if (info->memory_base){ 4199 iounmap(info->memory_base); 4200 info->memory_base = NULL; 4201 } 4202 if (info->lcr_base){ 4203 iounmap(info->lcr_base - info->lcr_offset); 4204 info->lcr_base = NULL; 4205 } 4206 4207 if ( debug_level >= DEBUG_LEVEL_INFO ) 4208 printk( "%s(%d):mgsl_release_resources(%s) exit\n", 4209 __FILE__,__LINE__,info->device_name ); 4210 4211} /* end of mgsl_release_resources() */ 4212 4213/* mgsl_add_device() 4214 * 4215 * Add the specified device instance data structure to the 4216 * global linked list of devices and increment the device count. 4217 * 4218 * Arguments: info pointer to device instance data 4219 * Return Value: None 4220 */ 4221static void mgsl_add_device( struct mgsl_struct *info ) 4222{ 4223 info->next_device = NULL; 4224 info->line = mgsl_device_count; 4225 sprintf(info->device_name,"ttySL%d",info->line); 4226 4227 if (info->line < MAX_TOTAL_DEVICES) { 4228 if (maxframe[info->line]) 4229 info->max_frame_size = maxframe[info->line]; 4230 4231 if (txdmabufs[info->line]) { 4232 info->num_tx_dma_buffers = txdmabufs[info->line]; 4233 if (info->num_tx_dma_buffers < 1) 4234 info->num_tx_dma_buffers = 1; 4235 } 4236 4237 if (txholdbufs[info->line]) { 4238 info->num_tx_holding_buffers = txholdbufs[info->line]; 4239 if (info->num_tx_holding_buffers < 1) 4240 info->num_tx_holding_buffers = 1; 4241 else if (info->num_tx_holding_buffers > MAX_TX_HOLDING_BUFFERS) 4242 info->num_tx_holding_buffers = MAX_TX_HOLDING_BUFFERS; 4243 } 4244 } 4245 4246 mgsl_device_count++; 4247 4248 if ( !mgsl_device_list ) 4249 mgsl_device_list = info; 4250 else { 4251 struct mgsl_struct *current_dev = mgsl_device_list; 4252 while( current_dev->next_device ) 4253 current_dev = current_dev->next_device; 4254 current_dev->next_device = info; 4255 } 4256 4257 if ( info->max_frame_size < 4096 ) 4258 info->max_frame_size = 4096; 4259 else if ( info->max_frame_size > 65535 ) 4260 info->max_frame_size = 65535; 4261 4262 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 4263 printk( "SyncLink PCI v%d %s: IO=%04X IRQ=%d Mem=%08X,%08X MaxFrameSize=%u\n", 4264 info->hw_version + 1, info->device_name, info->io_base, info->irq_level, 4265 info->phys_memory_base, info->phys_lcr_base, 4266 info->max_frame_size ); 4267 } else { 4268 printk( "SyncLink ISA %s: IO=%04X IRQ=%d DMA=%d MaxFrameSize=%u\n", 4269 info->device_name, info->io_base, info->irq_level, info->dma_level, 4270 info->max_frame_size ); 4271 } 4272 4273#if SYNCLINK_GENERIC_HDLC 4274 hdlcdev_init(info); 4275#endif 4276 4277} /* end of mgsl_add_device() */ 4278 4279static const struct tty_port_operations mgsl_port_ops = { 4280 .carrier_raised = carrier_raised, 4281 .raise_dtr_rts = raise_dtr_rts, 4282}; 4283 4284 4285/* mgsl_allocate_device() 4286 * 4287 * Allocate and initialize a device instance structure 4288 * 4289 * Arguments: none 4290 * Return Value: pointer to mgsl_struct if success, otherwise NULL 4291 */ 4292static struct mgsl_struct* mgsl_allocate_device(void) 4293{ 4294 struct mgsl_struct *info; 4295 4296 info = kzalloc(sizeof(struct mgsl_struct), 4297 GFP_KERNEL); 4298 4299 if (!info) { 4300 printk("Error can't allocate device instance data\n"); 4301 } else { 4302 tty_port_init(&info->port); 4303 info->port.ops = &mgsl_port_ops; 4304 info->magic = MGSL_MAGIC; 4305 INIT_WORK(&info->task, mgsl_bh_handler); 4306 info->max_frame_size = 4096; 4307 info->port.close_delay = 5*HZ/10; 4308 info->port.closing_wait = 30*HZ; 4309 init_waitqueue_head(&info->status_event_wait_q); 4310 init_waitqueue_head(&info->event_wait_q); 4311 spin_lock_init(&info->irq_spinlock); 4312 spin_lock_init(&info->netlock); 4313 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS)); 4314 info->idle_mode = HDLC_TXIDLE_FLAGS; 4315 info->num_tx_dma_buffers = 1; 4316 info->num_tx_holding_buffers = 0; 4317 } 4318 4319 return info; 4320 4321} /* end of mgsl_allocate_device()*/ 4322 4323static const struct tty_operations mgsl_ops = { 4324 .open = mgsl_open, 4325 .close = mgsl_close, 4326 .write = mgsl_write, 4327 .put_char = mgsl_put_char, 4328 .flush_chars = mgsl_flush_chars, 4329 .write_room = mgsl_write_room, 4330 .chars_in_buffer = mgsl_chars_in_buffer, 4331 .flush_buffer = mgsl_flush_buffer, 4332 .ioctl = mgsl_ioctl, 4333 .throttle = mgsl_throttle, 4334 .unthrottle = mgsl_unthrottle, 4335 .send_xchar = mgsl_send_xchar, 4336 .break_ctl = mgsl_break, 4337 .wait_until_sent = mgsl_wait_until_sent, 4338 .read_proc = mgsl_read_proc, 4339 .set_termios = mgsl_set_termios, 4340 .stop = mgsl_stop, 4341 .start = mgsl_start, 4342 .hangup = mgsl_hangup, 4343 .tiocmget = tiocmget, 4344 .tiocmset = tiocmset, 4345}; 4346 4347/* 4348 * perform tty device initialization 4349 */ 4350static int mgsl_init_tty(void) 4351{ 4352 int rc; 4353 4354 serial_driver = alloc_tty_driver(128); 4355 if (!serial_driver) 4356 return -ENOMEM; 4357 4358 serial_driver->owner = THIS_MODULE; 4359 serial_driver->driver_name = "synclink"; 4360 serial_driver->name = "ttySL"; 4361 serial_driver->major = ttymajor; 4362 serial_driver->minor_start = 64; 4363 serial_driver->type = TTY_DRIVER_TYPE_SERIAL; 4364 serial_driver->subtype = SERIAL_TYPE_NORMAL; 4365 serial_driver->init_termios = tty_std_termios; 4366 serial_driver->init_termios.c_cflag = 4367 B9600 | CS8 | CREAD | HUPCL | CLOCAL; 4368 serial_driver->init_termios.c_ispeed = 9600; 4369 serial_driver->init_termios.c_ospeed = 9600; 4370 serial_driver->flags = TTY_DRIVER_REAL_RAW; 4371 tty_set_operations(serial_driver, &mgsl_ops); 4372 if ((rc = tty_register_driver(serial_driver)) < 0) { 4373 printk("%s(%d):Couldn't register serial driver\n", 4374 __FILE__,__LINE__); 4375 put_tty_driver(serial_driver); 4376 serial_driver = NULL; 4377 return rc; 4378 } 4379 4380 printk("%s %s, tty major#%d\n", 4381 driver_name, driver_version, 4382 serial_driver->major); 4383 return 0; 4384} 4385 4386/* enumerate user specified ISA adapters 4387 */ 4388static void mgsl_enum_isa_devices(void) 4389{ 4390 struct mgsl_struct *info; 4391 int i; 4392 4393 /* Check for user specified ISA devices */ 4394 4395 for (i=0 ;(i < MAX_ISA_DEVICES) && io[i] && irq[i]; i++){ 4396 if ( debug_level >= DEBUG_LEVEL_INFO ) 4397 printk("ISA device specified io=%04X,irq=%d,dma=%d\n", 4398 io[i], irq[i], dma[i] ); 4399 4400 info = mgsl_allocate_device(); 4401 if ( !info ) { 4402 /* error allocating device instance data */ 4403 if ( debug_level >= DEBUG_LEVEL_ERROR ) 4404 printk( "can't allocate device instance data.\n"); 4405 continue; 4406 } 4407 4408 /* Copy user configuration info to device instance data */ 4409 info->io_base = (unsigned int)io[i]; 4410 info->irq_level = (unsigned int)irq[i]; 4411 info->irq_level = irq_canonicalize(info->irq_level); 4412 info->dma_level = (unsigned int)dma[i]; 4413 info->bus_type = MGSL_BUS_TYPE_ISA; 4414 info->io_addr_size = 16; 4415 info->irq_flags = 0; 4416 4417 mgsl_add_device( info ); 4418 } 4419} 4420 4421static void synclink_cleanup(void) 4422{ 4423 int rc; 4424 struct mgsl_struct *info; 4425 struct mgsl_struct *tmp; 4426 4427 printk("Unloading %s: %s\n", driver_name, driver_version); 4428 4429 if (serial_driver) { 4430 if ((rc = tty_unregister_driver(serial_driver))) 4431 printk("%s(%d) failed to unregister tty driver err=%d\n", 4432 __FILE__,__LINE__,rc); 4433 put_tty_driver(serial_driver); 4434 } 4435 4436 info = mgsl_device_list; 4437 while(info) { 4438#if SYNCLINK_GENERIC_HDLC 4439 hdlcdev_exit(info); 4440#endif 4441 mgsl_release_resources(info); 4442 tmp = info; 4443 info = info->next_device; 4444 kfree(tmp); 4445 } 4446 4447 if (pci_registered) 4448 pci_unregister_driver(&synclink_pci_driver); 4449} 4450 4451static int __init synclink_init(void) 4452{ 4453 int rc; 4454 4455 if (break_on_load) { 4456 mgsl_get_text_ptr(); 4457 BREAKPOINT(); 4458 } 4459 4460 printk("%s %s\n", driver_name, driver_version); 4461 4462 mgsl_enum_isa_devices(); 4463 if ((rc = pci_register_driver(&synclink_pci_driver)) < 0) 4464 printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc); 4465 else 4466 pci_registered = true; 4467 4468 if ((rc = mgsl_init_tty()) < 0) 4469 goto error; 4470 4471 return 0; 4472 4473error: 4474 synclink_cleanup(); 4475 return rc; 4476} 4477 4478static void __exit synclink_exit(void) 4479{ 4480 synclink_cleanup(); 4481} 4482 4483module_init(synclink_init); 4484module_exit(synclink_exit); 4485 4486/* 4487 * usc_RTCmd() 4488 * 4489 * Issue a USC Receive/Transmit command to the 4490 * Channel Command/Address Register (CCAR). 4491 * 4492 * Notes: 4493 * 4494 * The command is encoded in the most significant 5 bits <15..11> 4495 * of the CCAR value. Bits <10..7> of the CCAR must be preserved 4496 * and Bits <6..0> must be written as zeros. 4497 * 4498 * Arguments: 4499 * 4500 * info pointer to device information structure 4501 * Cmd command mask (use symbolic macros) 4502 * 4503 * Return Value: 4504 * 4505 * None 4506 */ 4507static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd ) 4508{ 4509 /* output command to CCAR in bits <15..11> */ 4510 /* preserve bits <10..7>, bits <6..0> must be zero */ 4511 4512 outw( Cmd + info->loopback_bits, info->io_base + CCAR ); 4513 4514 /* Read to flush write to CCAR */ 4515 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 4516 inw( info->io_base + CCAR ); 4517 4518} /* end of usc_RTCmd() */ 4519 4520/* 4521 * usc_DmaCmd() 4522 * 4523 * Issue a DMA command to the DMA Command/Address Register (DCAR). 4524 * 4525 * Arguments: 4526 * 4527 * info pointer to device information structure 4528 * Cmd DMA command mask (usc_DmaCmd_XX Macros) 4529 * 4530 * Return Value: 4531 * 4532 * None 4533 */ 4534static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd ) 4535{ 4536 /* write command mask to DCAR */ 4537 outw( Cmd + info->mbre_bit, info->io_base ); 4538 4539 /* Read to flush write to DCAR */ 4540 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 4541 inw( info->io_base ); 4542 4543} /* end of usc_DmaCmd() */ 4544 4545/* 4546 * usc_OutDmaReg() 4547 * 4548 * Write a 16-bit value to a USC DMA register 4549 * 4550 * Arguments: 4551 * 4552 * info pointer to device info structure 4553 * RegAddr register address (number) for write 4554 * RegValue 16-bit value to write to register 4555 * 4556 * Return Value: 4557 * 4558 * None 4559 * 4560 */ 4561static void usc_OutDmaReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue ) 4562{ 4563 /* Note: The DCAR is located at the adapter base address */ 4564 /* Note: must preserve state of BIT8 in DCAR */ 4565 4566 outw( RegAddr + info->mbre_bit, info->io_base ); 4567 outw( RegValue, info->io_base ); 4568 4569 /* Read to flush write to DCAR */ 4570 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 4571 inw( info->io_base ); 4572 4573} /* end of usc_OutDmaReg() */ 4574 4575/* 4576 * usc_InDmaReg() 4577 * 4578 * Read a 16-bit value from a DMA register 4579 * 4580 * Arguments: 4581 * 4582 * info pointer to device info structure 4583 * RegAddr register address (number) to read from 4584 * 4585 * Return Value: 4586 * 4587 * The 16-bit value read from register 4588 * 4589 */ 4590static u16 usc_InDmaReg( struct mgsl_struct *info, u16 RegAddr ) 4591{ 4592 /* Note: The DCAR is located at the adapter base address */ 4593 /* Note: must preserve state of BIT8 in DCAR */ 4594 4595 outw( RegAddr + info->mbre_bit, info->io_base ); 4596 return inw( info->io_base ); 4597 4598} /* end of usc_InDmaReg() */ 4599 4600/* 4601 * 4602 * usc_OutReg() 4603 * 4604 * Write a 16-bit value to a USC serial channel register 4605 * 4606 * Arguments: 4607 * 4608 * info pointer to device info structure 4609 * RegAddr register address (number) to write to 4610 * RegValue 16-bit value to write to register 4611 * 4612 * Return Value: 4613 * 4614 * None 4615 * 4616 */ 4617static void usc_OutReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue ) 4618{ 4619 outw( RegAddr + info->loopback_bits, info->io_base + CCAR ); 4620 outw( RegValue, info->io_base + CCAR ); 4621 4622 /* Read to flush write to CCAR */ 4623 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 4624 inw( info->io_base + CCAR ); 4625 4626} /* end of usc_OutReg() */ 4627 4628/* 4629 * usc_InReg() 4630 * 4631 * Reads a 16-bit value from a USC serial channel register 4632 * 4633 * Arguments: 4634 * 4635 * info pointer to device extension 4636 * RegAddr register address (number) to read from 4637 * 4638 * Return Value: 4639 * 4640 * 16-bit value read from register 4641 */ 4642static u16 usc_InReg( struct mgsl_struct *info, u16 RegAddr ) 4643{ 4644 outw( RegAddr + info->loopback_bits, info->io_base + CCAR ); 4645 return inw( info->io_base + CCAR ); 4646 4647} /* end of usc_InReg() */ 4648 4649/* usc_set_sdlc_mode() 4650 * 4651 * Set up the adapter for SDLC DMA communications. 4652 * 4653 * Arguments: info pointer to device instance data 4654 * Return Value: NONE 4655 */ 4656static void usc_set_sdlc_mode( struct mgsl_struct *info ) 4657{ 4658 u16 RegValue; 4659 bool PreSL1660; 4660 4661 /* 4662 * determine if the IUSC on the adapter is pre-SL1660. If 4663 * not, take advantage of the UnderWait feature of more 4664 * modern chips. If an underrun occurs and this bit is set, 4665 * the transmitter will idle the programmed idle pattern 4666 * until the driver has time to service the underrun. Otherwise, 4667 * the dma controller may get the cycles previously requested 4668 * and begin transmitting queued tx data. 4669 */ 4670 usc_OutReg(info,TMCR,0x1f); 4671 RegValue=usc_InReg(info,TMDR); 4672 PreSL1660 = (RegValue == IUSC_PRE_SL1660); 4673 4674 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE ) 4675 { 4676 /* 4677 ** Channel Mode Register (CMR) 4678 ** 4679 ** <15..14> 10 Tx Sub Modes, Send Flag on Underrun 4680 ** <13> 0 0 = Transmit Disabled (initially) 4681 ** <12> 0 1 = Consecutive Idles share common 0 4682 ** <11..8> 1110 Transmitter Mode = HDLC/SDLC Loop 4683 ** <7..4> 0000 Rx Sub Modes, addr/ctrl field handling 4684 ** <3..0> 0110 Receiver Mode = HDLC/SDLC 4685 ** 4686 ** 1000 1110 0000 0110 = 0x8e06 4687 */ 4688 RegValue = 0x8e06; 4689 4690 /*-------------------------------------------------- 4691 * ignore user options for UnderRun Actions and 4692 * preambles 4693 *--------------------------------------------------*/ 4694 } 4695 else 4696 { 4697 /* Channel mode Register (CMR) 4698 * 4699 * <15..14> 00 Tx Sub modes, Underrun Action 4700 * <13> 0 1 = Send Preamble before opening flag 4701 * <12> 0 1 = Consecutive Idles share common 0 4702 * <11..8> 0110 Transmitter mode = HDLC/SDLC 4703 * <7..4> 0000 Rx Sub modes, addr/ctrl field handling 4704 * <3..0> 0110 Receiver mode = HDLC/SDLC 4705 * 4706 * 0000 0110 0000 0110 = 0x0606 4707 */ 4708 if (info->params.mode == MGSL_MODE_RAW) { 4709 RegValue = 0x0001; /* Set Receive mode = external sync */ 4710 4711 usc_OutReg( info, IOCR, /* Set IOCR DCD is RxSync Detect Input */ 4712 (unsigned short)((usc_InReg(info, IOCR) & ~(BIT13|BIT12)) | BIT12)); 4713 4714 /* 4715 * TxSubMode: 4716 * CMR <15> 0 Don't send CRC on Tx Underrun 4717 * CMR <14> x undefined 4718 * CMR <13> 0 Send preamble before openning sync 4719 * CMR <12> 0 Send 8-bit syncs, 1=send Syncs per TxLength 4720 * 4721 * TxMode: 4722 * CMR <11-8) 0100 MonoSync 4723 * 4724 * 0x00 0100 xxxx xxxx 04xx 4725 */ 4726 RegValue |= 0x0400; 4727 } 4728 else { 4729 4730 RegValue = 0x0606; 4731 4732 if ( info->params.flags & HDLC_FLAG_UNDERRUN_ABORT15 ) 4733 RegValue |= BIT14; 4734 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_FLAG ) 4735 RegValue |= BIT15; 4736 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_CRC ) 4737 RegValue |= BIT15 + BIT14; 4738 } 4739 4740 if ( info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE ) 4741 RegValue |= BIT13; 4742 } 4743 4744 if ( info->params.mode == MGSL_MODE_HDLC && 4745 (info->params.flags & HDLC_FLAG_SHARE_ZERO) ) 4746 RegValue |= BIT12; 4747 4748 if ( info->params.addr_filter != 0xff ) 4749 { 4750 /* set up receive address filtering */ 4751 usc_OutReg( info, RSR, info->params.addr_filter ); 4752 RegValue |= BIT4; 4753 } 4754 4755 usc_OutReg( info, CMR, RegValue ); 4756 info->cmr_value = RegValue; 4757 4758 /* Receiver mode Register (RMR) 4759 * 4760 * <15..13> 000 encoding 4761 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1) 4762 * <10> 1 1 = Set CRC to all 1s (use for SDLC/HDLC) 4763 * <9> 0 1 = Include Receive chars in CRC 4764 * <8> 1 1 = Use Abort/PE bit as abort indicator 4765 * <7..6> 00 Even parity 4766 * <5> 0 parity disabled 4767 * <4..2> 000 Receive Char Length = 8 bits 4768 * <1..0> 00 Disable Receiver 4769 * 4770 * 0000 0101 0000 0000 = 0x0500 4771 */ 4772 4773 RegValue = 0x0500; 4774 4775 switch ( info->params.encoding ) { 4776 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break; 4777 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break; 4778 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break; 4779 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break; 4780 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break; 4781 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break; 4782 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break; 4783 } 4784 4785 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT ) 4786 RegValue |= BIT9; 4787 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT ) 4788 RegValue |= ( BIT12 | BIT10 | BIT9 ); 4789 4790 usc_OutReg( info, RMR, RegValue ); 4791 4792 /* Set the Receive count Limit Register (RCLR) to 0xffff. */ 4793 /* When an opening flag of an SDLC frame is recognized the */ 4794 /* Receive Character count (RCC) is loaded with the value in */ 4795 /* RCLR. The RCC is decremented for each received byte. The */ 4796 /* value of RCC is stored after the closing flag of the frame */ 4797 /* allowing the frame size to be computed. */ 4798 4799 usc_OutReg( info, RCLR, RCLRVALUE ); 4800 4801 usc_RCmd( info, RCmd_SelectRicrdma_level ); 4802 4803 /* Receive Interrupt Control Register (RICR) 4804 * 4805 * <15..8> ? RxFIFO DMA Request Level 4806 * <7> 0 Exited Hunt IA (Interrupt Arm) 4807 * <6> 0 Idle Received IA 4808 * <5> 0 Break/Abort IA 4809 * <4> 0 Rx Bound IA 4810 * <3> 1 Queued status reflects oldest 2 bytes in FIFO 4811 * <2> 0 Abort/PE IA 4812 * <1> 1 Rx Overrun IA 4813 * <0> 0 Select TC0 value for readback 4814 * 4815 * 0000 0000 0000 1000 = 0x000a 4816 */ 4817 4818 /* Carry over the Exit Hunt and Idle Received bits */ 4819 /* in case they have been armed by usc_ArmEvents. */ 4820 4821 RegValue = usc_InReg( info, RICR ) & 0xc0; 4822 4823 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 4824 usc_OutReg( info, RICR, (u16)(0x030a | RegValue) ); 4825 else 4826 usc_OutReg( info, RICR, (u16)(0x140a | RegValue) ); 4827 4828 /* Unlatch all Rx status bits and clear Rx status IRQ Pending */ 4829 4830 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); 4831 usc_ClearIrqPendingBits( info, RECEIVE_STATUS ); 4832 4833 /* Transmit mode Register (TMR) 4834 * 4835 * <15..13> 000 encoding 4836 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1) 4837 * <10> 1 1 = Start CRC as all 1s (use for SDLC/HDLC) 4838 * <9> 0 1 = Tx CRC Enabled 4839 * <8> 0 1 = Append CRC to end of transmit frame 4840 * <7..6> 00 Transmit parity Even 4841 * <5> 0 Transmit parity Disabled 4842 * <4..2> 000 Tx Char Length = 8 bits 4843 * <1..0> 00 Disable Transmitter 4844 * 4845 * 0000 0100 0000 0000 = 0x0400 4846 */ 4847 4848 RegValue = 0x0400; 4849 4850 switch ( info->params.encoding ) { 4851 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break; 4852 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break; 4853 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break; 4854 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break; 4855 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break; 4856 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break; 4857 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break; 4858 } 4859 4860 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT ) 4861 RegValue |= BIT9 + BIT8; 4862 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT ) 4863 RegValue |= ( BIT12 | BIT10 | BIT9 | BIT8); 4864 4865 usc_OutReg( info, TMR, RegValue ); 4866 4867 usc_set_txidle( info ); 4868 4869 4870 usc_TCmd( info, TCmd_SelectTicrdma_level ); 4871 4872 /* Transmit Interrupt Control Register (TICR) 4873 * 4874 * <15..8> ? Transmit FIFO DMA Level 4875 * <7> 0 Present IA (Interrupt Arm) 4876 * <6> 0 Idle Sent IA 4877 * <5> 1 Abort Sent IA 4878 * <4> 1 EOF/EOM Sent IA 4879 * <3> 0 CRC Sent IA 4880 * <2> 1 1 = Wait for SW Trigger to Start Frame 4881 * <1> 1 Tx Underrun IA 4882 * <0> 0 TC0 constant on read back 4883 * 4884 * 0000 0000 0011 0110 = 0x0036 4885 */ 4886 4887 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 4888 usc_OutReg( info, TICR, 0x0736 ); 4889 else 4890 usc_OutReg( info, TICR, 0x1436 ); 4891 4892 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL ); 4893 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS ); 4894 4895 /* 4896 ** Transmit Command/Status Register (TCSR) 4897 ** 4898 ** <15..12> 0000 TCmd 4899 ** <11> 0/1 UnderWait 4900 ** <10..08> 000 TxIdle 4901 ** <7> x PreSent 4902 ** <6> x IdleSent 4903 ** <5> x AbortSent 4904 ** <4> x EOF/EOM Sent 4905 ** <3> x CRC Sent 4906 ** <2> x All Sent 4907 ** <1> x TxUnder 4908 ** <0> x TxEmpty 4909 ** 4910 ** 0000 0000 0000 0000 = 0x0000 4911 */ 4912 info->tcsr_value = 0; 4913 4914 if ( !PreSL1660 ) 4915 info->tcsr_value |= TCSR_UNDERWAIT; 4916 4917 usc_OutReg( info, TCSR, info->tcsr_value ); 4918 4919 /* Clock mode Control Register (CMCR) 4920 * 4921 * <15..14> 00 counter 1 Source = Disabled 4922 * <13..12> 00 counter 0 Source = Disabled 4923 * <11..10> 11 BRG1 Input is TxC Pin 4924 * <9..8> 11 BRG0 Input is TxC Pin 4925 * <7..6> 01 DPLL Input is BRG1 Output 4926 * <5..3> XXX TxCLK comes from Port 0 4927 * <2..0> XXX RxCLK comes from Port 1 4928 * 4929 * 0000 1111 0111 0111 = 0x0f77 4930 */ 4931 4932 RegValue = 0x0f40; 4933 4934 if ( info->params.flags & HDLC_FLAG_RXC_DPLL ) 4935 RegValue |= 0x0003; /* RxCLK from DPLL */ 4936 else if ( info->params.flags & HDLC_FLAG_RXC_BRG ) 4937 RegValue |= 0x0004; /* RxCLK from BRG0 */ 4938 else if ( info->params.flags & HDLC_FLAG_RXC_TXCPIN) 4939 RegValue |= 0x0006; /* RxCLK from TXC Input */ 4940 else 4941 RegValue |= 0x0007; /* RxCLK from Port1 */ 4942 4943 if ( info->params.flags & HDLC_FLAG_TXC_DPLL ) 4944 RegValue |= 0x0018; /* TxCLK from DPLL */ 4945 else if ( info->params.flags & HDLC_FLAG_TXC_BRG ) 4946 RegValue |= 0x0020; /* TxCLK from BRG0 */ 4947 else if ( info->params.flags & HDLC_FLAG_TXC_RXCPIN) 4948 RegValue |= 0x0038; /* RxCLK from TXC Input */ 4949 else 4950 RegValue |= 0x0030; /* TxCLK from Port0 */ 4951 4952 usc_OutReg( info, CMCR, RegValue ); 4953 4954 4955 /* Hardware Configuration Register (HCR) 4956 * 4957 * <15..14> 00 CTR0 Divisor:00=32,01=16,10=8,11=4 4958 * <13> 0 CTR1DSel:0=CTR0Div determines CTR0Div 4959 * <12> 0 CVOK:0=report code violation in biphase 4960 * <11..10> 00 DPLL Divisor:00=32,01=16,10=8,11=4 4961 * <9..8> XX DPLL mode:00=disable,01=NRZ,10=Biphase,11=Biphase Level 4962 * <7..6> 00 reserved 4963 * <5> 0 BRG1 mode:0=continuous,1=single cycle 4964 * <4> X BRG1 Enable 4965 * <3..2> 00 reserved 4966 * <1> 0 BRG0 mode:0=continuous,1=single cycle 4967 * <0> 0 BRG0 Enable 4968 */ 4969 4970 RegValue = 0x0000; 4971 4972 if ( info->params.flags & (HDLC_FLAG_RXC_DPLL + HDLC_FLAG_TXC_DPLL) ) { 4973 u32 XtalSpeed; 4974 u32 DpllDivisor; 4975 u16 Tc; 4976 4977 /* DPLL is enabled. Use BRG1 to provide continuous reference clock */ 4978 /* for DPLL. DPLL mode in HCR is dependent on the encoding used. */ 4979 4980 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 4981 XtalSpeed = 11059200; 4982 else 4983 XtalSpeed = 14745600; 4984 4985 if ( info->params.flags & HDLC_FLAG_DPLL_DIV16 ) { 4986 DpllDivisor = 16; 4987 RegValue |= BIT10; 4988 } 4989 else if ( info->params.flags & HDLC_FLAG_DPLL_DIV8 ) { 4990 DpllDivisor = 8; 4991 RegValue |= BIT11; 4992 } 4993 else 4994 DpllDivisor = 32; 4995 4996 /* Tc = (Xtal/Speed) - 1 */ 4997 /* If twice the remainder of (Xtal/Speed) is greater than Speed */ 4998 /* then rounding up gives a more precise time constant. Instead */ 4999 /* of rounding up and then subtracting 1 we just don't subtract */ 5000 /* the one in this case. */ 5001 5002 /*-------------------------------------------------- 5003 * ejz: for DPLL mode, application should use the 5004 * same clock speed as the partner system, even 5005 * though clocking is derived from the input RxData. 5006 * In case the user uses a 0 for the clock speed, 5007 * default to 0xffffffff and don't try to divide by 5008 * zero 5009 *--------------------------------------------------*/ 5010 if ( info->params.clock_speed ) 5011 { 5012 Tc = (u16)((XtalSpeed/DpllDivisor)/info->params.clock_speed); 5013 if ( !((((XtalSpeed/DpllDivisor) % info->params.clock_speed) * 2) 5014 / info->params.clock_speed) ) 5015 Tc--; 5016 } 5017 else 5018 Tc = -1; 5019 5020 5021 /* Write 16-bit Time Constant for BRG1 */ 5022 usc_OutReg( info, TC1R, Tc ); 5023 5024 RegValue |= BIT4; /* enable BRG1 */ 5025 5026 switch ( info->params.encoding ) { 5027 case HDLC_ENCODING_NRZ: 5028 case HDLC_ENCODING_NRZB: 5029 case HDLC_ENCODING_NRZI_MARK: 5030 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT8; break; 5031 case HDLC_ENCODING_BIPHASE_MARK: 5032 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT9; break; 5033 case HDLC_ENCODING_BIPHASE_LEVEL: 5034 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT9 + BIT8; break; 5035 } 5036 } 5037 5038 usc_OutReg( info, HCR, RegValue ); 5039 5040 5041 /* Channel Control/status Register (CCSR) 5042 * 5043 * <15> X RCC FIFO Overflow status (RO) 5044 * <14> X RCC FIFO Not Empty status (RO) 5045 * <13> 0 1 = Clear RCC FIFO (WO) 5046 * <12> X DPLL Sync (RW) 5047 * <11> X DPLL 2 Missed Clocks status (RO) 5048 * <10> X DPLL 1 Missed Clock status (RO) 5049 * <9..8> 00 DPLL Resync on rising and falling edges (RW) 5050 * <7> X SDLC Loop On status (RO) 5051 * <6> X SDLC Loop Send status (RO) 5052 * <5> 1 Bypass counters for TxClk and RxClk (RW) 5053 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW) 5054 * <1..0> 00 reserved 5055 * 5056 * 0000 0000 0010 0000 = 0x0020 5057 */ 5058 5059 usc_OutReg( info, CCSR, 0x1020 ); 5060 5061 5062 if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) { 5063 usc_OutReg( info, SICR, 5064 (u16)(usc_InReg(info,SICR) | SICR_CTS_INACTIVE) ); 5065 } 5066 5067 5068 /* enable Master Interrupt Enable bit (MIE) */ 5069 usc_EnableMasterIrqBit( info ); 5070 5071 usc_ClearIrqPendingBits( info, RECEIVE_STATUS + RECEIVE_DATA + 5072 TRANSMIT_STATUS + TRANSMIT_DATA + MISC); 5073 5074 /* arm RCC underflow interrupt */ 5075 usc_OutReg(info, SICR, (u16)(usc_InReg(info,SICR) | BIT3)); 5076 usc_EnableInterrupts(info, MISC); 5077 5078 info->mbre_bit = 0; 5079 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */ 5080 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */ 5081 info->mbre_bit = BIT8; 5082 outw( BIT8, info->io_base ); /* set Master Bus Enable (DCAR) */ 5083 5084 if (info->bus_type == MGSL_BUS_TYPE_ISA) { 5085 /* Enable DMAEN (Port 7, Bit 14) */ 5086 /* This connects the DMA request signal to the ISA bus */ 5087 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) & ~BIT14)); 5088 } 5089 5090 /* DMA Control Register (DCR) 5091 * 5092 * <15..14> 10 Priority mode = Alternating Tx/Rx 5093 * 01 Rx has priority 5094 * 00 Tx has priority 5095 * 5096 * <13> 1 Enable Priority Preempt per DCR<15..14> 5097 * (WARNING DCR<11..10> must be 00 when this is 1) 5098 * 0 Choose activate channel per DCR<11..10> 5099 * 5100 * <12> 0 Little Endian for Array/List 5101 * <11..10> 00 Both Channels can use each bus grant 5102 * <9..6> 0000 reserved 5103 * <5> 0 7 CLK - Minimum Bus Re-request Interval 5104 * <4> 0 1 = drive D/C and S/D pins 5105 * <3> 1 1 = Add one wait state to all DMA cycles. 5106 * <2> 0 1 = Strobe /UAS on every transfer. 5107 * <1..0> 11 Addr incrementing only affects LS24 bits 5108 * 5109 * 0110 0000 0000 1011 = 0x600b 5110 */ 5111 5112 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 5113 /* PCI adapter does not need DMA wait state */ 5114 usc_OutDmaReg( info, DCR, 0xa00b ); 5115 } 5116 else 5117 usc_OutDmaReg( info, DCR, 0x800b ); 5118 5119 5120 /* Receive DMA mode Register (RDMR) 5121 * 5122 * <15..14> 11 DMA mode = Linked List Buffer mode 5123 * <13> 1 RSBinA/L = store Rx status Block in Arrary/List entry 5124 * <12> 1 Clear count of List Entry after fetching 5125 * <11..10> 00 Address mode = Increment 5126 * <9> 1 Terminate Buffer on RxBound 5127 * <8> 0 Bus Width = 16bits 5128 * <7..0> ? status Bits (write as 0s) 5129 * 5130 * 1111 0010 0000 0000 = 0xf200 5131 */ 5132 5133 usc_OutDmaReg( info, RDMR, 0xf200 ); 5134 5135 5136 /* Transmit DMA mode Register (TDMR) 5137 * 5138 * <15..14> 11 DMA mode = Linked List Buffer mode 5139 * <13> 1 TCBinA/L = fetch Tx Control Block from List entry 5140 * <12> 1 Clear count of List Entry after fetching 5141 * <11..10> 00 Address mode = Increment 5142 * <9> 1 Terminate Buffer on end of frame 5143 * <8> 0 Bus Width = 16bits 5144 * <7..0> ? status Bits (Read Only so write as 0) 5145 * 5146 * 1111 0010 0000 0000 = 0xf200 5147 */ 5148 5149 usc_OutDmaReg( info, TDMR, 0xf200 ); 5150 5151 5152 /* DMA Interrupt Control Register (DICR) 5153 * 5154 * <15> 1 DMA Interrupt Enable 5155 * <14> 0 1 = Disable IEO from USC 5156 * <13> 0 1 = Don't provide vector during IntAck 5157 * <12> 1 1 = Include status in Vector 5158 * <10..2> 0 reserved, Must be 0s 5159 * <1> 0 1 = Rx DMA Interrupt Enabled 5160 * <0> 0 1 = Tx DMA Interrupt Enabled 5161 * 5162 * 1001 0000 0000 0000 = 0x9000 5163 */ 5164 5165 usc_OutDmaReg( info, DICR, 0x9000 ); 5166 5167 usc_InDmaReg( info, RDMR ); /* clear pending receive DMA IRQ bits */ 5168 usc_InDmaReg( info, TDMR ); /* clear pending transmit DMA IRQ bits */ 5169 usc_OutDmaReg( info, CDIR, 0x0303 ); /* clear IUS and Pending for Tx and Rx */ 5170 5171 /* Channel Control Register (CCR) 5172 * 5173 * <15..14> 10 Use 32-bit Tx Control Blocks (TCBs) 5174 * <13> 0 Trigger Tx on SW Command Disabled 5175 * <12> 0 Flag Preamble Disabled 5176 * <11..10> 00 Preamble Length 5177 * <9..8> 00 Preamble Pattern 5178 * <7..6> 10 Use 32-bit Rx status Blocks (RSBs) 5179 * <5> 0 Trigger Rx on SW Command Disabled 5180 * <4..0> 0 reserved 5181 * 5182 * 1000 0000 1000 0000 = 0x8080 5183 */ 5184 5185 RegValue = 0x8080; 5186 5187 switch ( info->params.preamble_length ) { 5188 case HDLC_PREAMBLE_LENGTH_16BITS: RegValue |= BIT10; break; 5189 case HDLC_PREAMBLE_LENGTH_32BITS: RegValue |= BIT11; break; 5190 case HDLC_PREAMBLE_LENGTH_64BITS: RegValue |= BIT11 + BIT10; break; 5191 } 5192 5193 switch ( info->params.preamble ) { 5194 case HDLC_PREAMBLE_PATTERN_FLAGS: RegValue |= BIT8 + BIT12; break; 5195 case HDLC_PREAMBLE_PATTERN_ONES: RegValue |= BIT8; break; 5196 case HDLC_PREAMBLE_PATTERN_10: RegValue |= BIT9; break; 5197 case HDLC_PREAMBLE_PATTERN_01: RegValue |= BIT9 + BIT8; break; 5198 } 5199 5200 usc_OutReg( info, CCR, RegValue ); 5201 5202 5203 /* 5204 * Burst/Dwell Control Register 5205 * 5206 * <15..8> 0x20 Maximum number of transfers per bus grant 5207 * <7..0> 0x00 Maximum number of clock cycles per bus grant 5208 */ 5209 5210 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 5211 /* don't limit bus occupancy on PCI adapter */ 5212 usc_OutDmaReg( info, BDCR, 0x0000 ); 5213 } 5214 else 5215 usc_OutDmaReg( info, BDCR, 0x2000 ); 5216 5217 usc_stop_transmitter(info); 5218 usc_stop_receiver(info); 5219 5220} /* end of usc_set_sdlc_mode() */ 5221 5222/* usc_enable_loopback() 5223 * 5224 * Set the 16C32 for internal loopback mode. 5225 * The TxCLK and RxCLK signals are generated from the BRG0 and 5226 * the TxD is looped back to the RxD internally. 5227 * 5228 * Arguments: info pointer to device instance data 5229 * enable 1 = enable loopback, 0 = disable 5230 * Return Value: None 5231 */ 5232static void usc_enable_loopback(struct mgsl_struct *info, int enable) 5233{ 5234 if (enable) { 5235 /* blank external TXD output */ 5236 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) | (BIT7+BIT6)); 5237 5238 /* Clock mode Control Register (CMCR) 5239 * 5240 * <15..14> 00 counter 1 Disabled 5241 * <13..12> 00 counter 0 Disabled 5242 * <11..10> 11 BRG1 Input is TxC Pin 5243 * <9..8> 11 BRG0 Input is TxC Pin 5244 * <7..6> 01 DPLL Input is BRG1 Output 5245 * <5..3> 100 TxCLK comes from BRG0 5246 * <2..0> 100 RxCLK comes from BRG0 5247 * 5248 * 0000 1111 0110 0100 = 0x0f64 5249 */ 5250 5251 usc_OutReg( info, CMCR, 0x0f64 ); 5252 5253 /* Write 16-bit Time Constant for BRG0 */ 5254 /* use clock speed if available, otherwise use 8 for diagnostics */ 5255 if (info->params.clock_speed) { 5256 if (info->bus_type == MGSL_BUS_TYPE_PCI) 5257 usc_OutReg(info, TC0R, (u16)((11059200/info->params.clock_speed)-1)); 5258 else 5259 usc_OutReg(info, TC0R, (u16)((14745600/info->params.clock_speed)-1)); 5260 } else 5261 usc_OutReg(info, TC0R, (u16)8); 5262 5263 /* Hardware Configuration Register (HCR) Clear Bit 1, BRG0 5264 mode = Continuous Set Bit 0 to enable BRG0. */ 5265 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) ); 5266 5267 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */ 5268 usc_OutReg(info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004)); 5269 5270 /* set Internal Data loopback mode */ 5271 info->loopback_bits = 0x300; 5272 outw( 0x0300, info->io_base + CCAR ); 5273 } else { 5274 /* enable external TXD output */ 5275 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) & ~(BIT7+BIT6)); 5276 5277 /* clear Internal Data loopback mode */ 5278 info->loopback_bits = 0; 5279 outw( 0,info->io_base + CCAR ); 5280 } 5281 5282} /* end of usc_enable_loopback() */ 5283 5284/* usc_enable_aux_clock() 5285 * 5286 * Enabled the AUX clock output at the specified frequency. 5287 * 5288 * Arguments: 5289 * 5290 * info pointer to device extension 5291 * data_rate data rate of clock in bits per second 5292 * A data rate of 0 disables the AUX clock. 5293 * 5294 * Return Value: None 5295 */ 5296static void usc_enable_aux_clock( struct mgsl_struct *info, u32 data_rate ) 5297{ 5298 u32 XtalSpeed; 5299 u16 Tc; 5300 5301 if ( data_rate ) { 5302 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 5303 XtalSpeed = 11059200; 5304 else 5305 XtalSpeed = 14745600; 5306 5307 5308 /* Tc = (Xtal/Speed) - 1 */ 5309 /* If twice the remainder of (Xtal/Speed) is greater than Speed */ 5310 /* then rounding up gives a more precise time constant. Instead */ 5311 /* of rounding up and then subtracting 1 we just don't subtract */ 5312 /* the one in this case. */ 5313 5314 5315 Tc = (u16)(XtalSpeed/data_rate); 5316 if ( !(((XtalSpeed % data_rate) * 2) / data_rate) ) 5317 Tc--; 5318 5319 /* Write 16-bit Time Constant for BRG0 */ 5320 usc_OutReg( info, TC0R, Tc ); 5321 5322 /* 5323 * Hardware Configuration Register (HCR) 5324 * Clear Bit 1, BRG0 mode = Continuous 5325 * Set Bit 0 to enable BRG0. 5326 */ 5327 5328 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) ); 5329 5330 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */ 5331 usc_OutReg( info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) ); 5332 } else { 5333 /* data rate == 0 so turn off BRG0 */ 5334 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) ); 5335 } 5336 5337} /* end of usc_enable_aux_clock() */ 5338 5339/* 5340 * 5341 * usc_process_rxoverrun_sync() 5342 * 5343 * This function processes a receive overrun by resetting the 5344 * receive DMA buffers and issuing a Purge Rx FIFO command 5345 * to allow the receiver to continue receiving. 5346 * 5347 * Arguments: 5348 * 5349 * info pointer to device extension 5350 * 5351 * Return Value: None 5352 */ 5353static void usc_process_rxoverrun_sync( struct mgsl_struct *info ) 5354{ 5355 int start_index; 5356 int end_index; 5357 int frame_start_index; 5358 bool start_of_frame_found = false; 5359 bool end_of_frame_found = false; 5360 bool reprogram_dma = false; 5361 5362 DMABUFFERENTRY *buffer_list = info->rx_buffer_list; 5363 u32 phys_addr; 5364 5365 usc_DmaCmd( info, DmaCmd_PauseRxChannel ); 5366 usc_RCmd( info, RCmd_EnterHuntmode ); 5367 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 5368 5369 /* CurrentRxBuffer points to the 1st buffer of the next */ 5370 /* possibly available receive frame. */ 5371 5372 frame_start_index = start_index = end_index = info->current_rx_buffer; 5373 5374 /* Search for an unfinished string of buffers. This means */ 5375 /* that a receive frame started (at least one buffer with */ 5376 /* count set to zero) but there is no terminiting buffer */ 5377 /* (status set to non-zero). */ 5378 5379 while( !buffer_list[end_index].count ) 5380 { 5381 /* Count field has been reset to zero by 16C32. */ 5382 /* This buffer is currently in use. */ 5383 5384 if ( !start_of_frame_found ) 5385 { 5386 start_of_frame_found = true; 5387 frame_start_index = end_index; 5388 end_of_frame_found = false; 5389 } 5390 5391 if ( buffer_list[end_index].status ) 5392 { 5393 /* Status field has been set by 16C32. */ 5394 /* This is the last buffer of a received frame. */ 5395 5396 /* We want to leave the buffers for this frame intact. */ 5397 /* Move on to next possible frame. */ 5398 5399 start_of_frame_found = false; 5400 end_of_frame_found = true; 5401 } 5402 5403 /* advance to next buffer entry in linked list */ 5404 end_index++; 5405 if ( end_index == info->rx_buffer_count ) 5406 end_index = 0; 5407 5408 if ( start_index == end_index ) 5409 { 5410 /* The entire list has been searched with all Counts == 0 and */ 5411 /* all Status == 0. The receive buffers are */ 5412 /* completely screwed, reset all receive buffers! */ 5413 mgsl_reset_rx_dma_buffers( info ); 5414 frame_start_index = 0; 5415 start_of_frame_found = false; 5416 reprogram_dma = true; 5417 break; 5418 } 5419 } 5420 5421 if ( start_of_frame_found && !end_of_frame_found ) 5422 { 5423 /* There is an unfinished string of receive DMA buffers */ 5424 /* as a result of the receiver overrun. */ 5425 5426 /* Reset the buffers for the unfinished frame */ 5427 /* and reprogram the receive DMA controller to start */ 5428 /* at the 1st buffer of unfinished frame. */ 5429 5430 start_index = frame_start_index; 5431 5432 do 5433 { 5434 *((unsigned long *)&(info->rx_buffer_list[start_index++].count)) = DMABUFFERSIZE; 5435 5436 /* Adjust index for wrap around. */ 5437 if ( start_index == info->rx_buffer_count ) 5438 start_index = 0; 5439 5440 } while( start_index != end_index ); 5441 5442 reprogram_dma = true; 5443 } 5444 5445 if ( reprogram_dma ) 5446 { 5447 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL); 5448 usc_ClearIrqPendingBits(info, RECEIVE_DATA|RECEIVE_STATUS); 5449 usc_UnlatchRxstatusBits(info, RECEIVE_DATA|RECEIVE_STATUS); 5450 5451 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL); 5452 5453 /* This empties the receive FIFO and loads the RCC with RCLR */ 5454 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) ); 5455 5456 /* program 16C32 with physical address of 1st DMA buffer entry */ 5457 phys_addr = info->rx_buffer_list[frame_start_index].phys_entry; 5458 usc_OutDmaReg( info, NRARL, (u16)phys_addr ); 5459 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) ); 5460 5461 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); 5462 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS ); 5463 usc_EnableInterrupts( info, RECEIVE_STATUS ); 5464 5465 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */ 5466 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */ 5467 5468 usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 ); 5469 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) ); 5470 usc_DmaCmd( info, DmaCmd_InitRxChannel ); 5471 if ( info->params.flags & HDLC_FLAG_AUTO_DCD ) 5472 usc_EnableReceiver(info,ENABLE_AUTO_DCD); 5473 else 5474 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL); 5475 } 5476 else 5477 { 5478 /* This empties the receive FIFO and loads the RCC with RCLR */ 5479 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) ); 5480 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 5481 } 5482 5483} /* end of usc_process_rxoverrun_sync() */ 5484 5485/* usc_stop_receiver() 5486 * 5487 * Disable USC receiver 5488 * 5489 * Arguments: info pointer to device instance data 5490 * Return Value: None 5491 */ 5492static void usc_stop_receiver( struct mgsl_struct *info ) 5493{ 5494 if (debug_level >= DEBUG_LEVEL_ISR) 5495 printk("%s(%d):usc_stop_receiver(%s)\n", 5496 __FILE__,__LINE__, info->device_name ); 5497 5498 /* Disable receive DMA channel. */ 5499 /* This also disables receive DMA channel interrupts */ 5500 usc_DmaCmd( info, DmaCmd_ResetRxChannel ); 5501 5502 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); 5503 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS ); 5504 usc_DisableInterrupts( info, RECEIVE_DATA + RECEIVE_STATUS ); 5505 5506 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL); 5507 5508 /* This empties the receive FIFO and loads the RCC with RCLR */ 5509 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) ); 5510 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 5511 5512 info->rx_enabled = false; 5513 info->rx_overflow = false; 5514 info->rx_rcc_underrun = false; 5515 5516} /* end of stop_receiver() */ 5517 5518/* usc_start_receiver() 5519 * 5520 * Enable the USC receiver 5521 * 5522 * Arguments: info pointer to device instance data 5523 * Return Value: None 5524 */ 5525static void usc_start_receiver( struct mgsl_struct *info ) 5526{ 5527 u32 phys_addr; 5528 5529 if (debug_level >= DEBUG_LEVEL_ISR) 5530 printk("%s(%d):usc_start_receiver(%s)\n", 5531 __FILE__,__LINE__, info->device_name ); 5532 5533 mgsl_reset_rx_dma_buffers( info ); 5534 usc_stop_receiver( info ); 5535 5536 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) ); 5537 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 5538 5539 if ( info->params.mode == MGSL_MODE_HDLC || 5540 info->params.mode == MGSL_MODE_RAW ) { 5541 /* DMA mode Transfers */ 5542 /* Program the DMA controller. */ 5543 /* Enable the DMA controller end of buffer interrupt. */ 5544 5545 /* program 16C32 with physical address of 1st DMA buffer entry */ 5546 phys_addr = info->rx_buffer_list[0].phys_entry; 5547 usc_OutDmaReg( info, NRARL, (u16)phys_addr ); 5548 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) ); 5549 5550 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); 5551 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS ); 5552 usc_EnableInterrupts( info, RECEIVE_STATUS ); 5553 5554 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */ 5555 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */ 5556 5557 usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 ); 5558 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) ); 5559 usc_DmaCmd( info, DmaCmd_InitRxChannel ); 5560 if ( info->params.flags & HDLC_FLAG_AUTO_DCD ) 5561 usc_EnableReceiver(info,ENABLE_AUTO_DCD); 5562 else 5563 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL); 5564 } else { 5565 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL); 5566 usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS); 5567 usc_EnableInterrupts(info, RECEIVE_DATA); 5568 5569 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 5570 usc_RCmd( info, RCmd_EnterHuntmode ); 5571 5572 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL); 5573 } 5574 5575 usc_OutReg( info, CCSR, 0x1020 ); 5576 5577 info->rx_enabled = true; 5578 5579} /* end of usc_start_receiver() */ 5580 5581/* usc_start_transmitter() 5582 * 5583 * Enable the USC transmitter and send a transmit frame if 5584 * one is loaded in the DMA buffers. 5585 * 5586 * Arguments: info pointer to device instance data 5587 * Return Value: None 5588 */ 5589static void usc_start_transmitter( struct mgsl_struct *info ) 5590{ 5591 u32 phys_addr; 5592 unsigned int FrameSize; 5593 5594 if (debug_level >= DEBUG_LEVEL_ISR) 5595 printk("%s(%d):usc_start_transmitter(%s)\n", 5596 __FILE__,__LINE__, info->device_name ); 5597 5598 if ( info->xmit_cnt ) { 5599 5600 /* If auto RTS enabled and RTS is inactive, then assert */ 5601 /* RTS and set a flag indicating that the driver should */ 5602 /* negate RTS when the transmission completes. */ 5603 5604 info->drop_rts_on_tx_done = false; 5605 5606 if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) { 5607 usc_get_serial_signals( info ); 5608 if ( !(info->serial_signals & SerialSignal_RTS) ) { 5609 info->serial_signals |= SerialSignal_RTS; 5610 usc_set_serial_signals( info ); 5611 info->drop_rts_on_tx_done = true; 5612 } 5613 } 5614 5615 5616 if ( info->params.mode == MGSL_MODE_ASYNC ) { 5617 if ( !info->tx_active ) { 5618 usc_UnlatchTxstatusBits(info, TXSTATUS_ALL); 5619 usc_ClearIrqPendingBits(info, TRANSMIT_STATUS + TRANSMIT_DATA); 5620 usc_EnableInterrupts(info, TRANSMIT_DATA); 5621 usc_load_txfifo(info); 5622 } 5623 } else { 5624 /* Disable transmit DMA controller while programming. */ 5625 usc_DmaCmd( info, DmaCmd_ResetTxChannel ); 5626 5627 /* Transmit DMA buffer is loaded, so program USC */ 5628 /* to send the frame contained in the buffers. */ 5629 5630 FrameSize = info->tx_buffer_list[info->start_tx_dma_buffer].rcc; 5631 5632 /* if operating in Raw sync mode, reset the rcc component 5633 * of the tx dma buffer entry, otherwise, the serial controller 5634 * will send a closing sync char after this count. 5635 */ 5636 if ( info->params.mode == MGSL_MODE_RAW ) 5637 info->tx_buffer_list[info->start_tx_dma_buffer].rcc = 0; 5638 5639 /* Program the Transmit Character Length Register (TCLR) */ 5640 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */ 5641 usc_OutReg( info, TCLR, (u16)FrameSize ); 5642 5643 usc_RTCmd( info, RTCmd_PurgeTxFifo ); 5644 5645 /* Program the address of the 1st DMA Buffer Entry in linked list */ 5646 phys_addr = info->tx_buffer_list[info->start_tx_dma_buffer].phys_entry; 5647 usc_OutDmaReg( info, NTARL, (u16)phys_addr ); 5648 usc_OutDmaReg( info, NTARU, (u16)(phys_addr >> 16) ); 5649 5650 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL ); 5651 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS ); 5652 usc_EnableInterrupts( info, TRANSMIT_STATUS ); 5653 5654 if ( info->params.mode == MGSL_MODE_RAW && 5655 info->num_tx_dma_buffers > 1 ) { 5656 /* When running external sync mode, attempt to 'stream' transmit */ 5657 /* by filling tx dma buffers as they become available. To do this */ 5658 /* we need to enable Tx DMA EOB Status interrupts : */ 5659 /* */ 5660 /* 1. Arm End of Buffer (EOB) Transmit DMA Interrupt (BIT2 of TDIAR) */ 5661 /* 2. Enable Transmit DMA Interrupts (BIT0 of DICR) */ 5662 5663 usc_OutDmaReg( info, TDIAR, BIT2|BIT3 ); 5664 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT0) ); 5665 } 5666 5667 /* Initialize Transmit DMA Channel */ 5668 usc_DmaCmd( info, DmaCmd_InitTxChannel ); 5669 5670 usc_TCmd( info, TCmd_SendFrame ); 5671 5672 mod_timer(&info->tx_timer, jiffies + 5673 msecs_to_jiffies(5000)); 5674 } 5675 info->tx_active = true; 5676 } 5677 5678 if ( !info->tx_enabled ) { 5679 info->tx_enabled = true; 5680 if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) 5681 usc_EnableTransmitter(info,ENABLE_AUTO_CTS); 5682 else 5683 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL); 5684 } 5685 5686} /* end of usc_start_transmitter() */ 5687 5688/* usc_stop_transmitter() 5689 * 5690 * Stops the transmitter and DMA 5691 * 5692 * Arguments: info pointer to device isntance data 5693 * Return Value: None 5694 */ 5695static void usc_stop_transmitter( struct mgsl_struct *info ) 5696{ 5697 if (debug_level >= DEBUG_LEVEL_ISR) 5698 printk("%s(%d):usc_stop_transmitter(%s)\n", 5699 __FILE__,__LINE__, info->device_name ); 5700 5701 del_timer(&info->tx_timer); 5702 5703 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL ); 5704 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA ); 5705 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA ); 5706 5707 usc_EnableTransmitter(info,DISABLE_UNCONDITIONAL); 5708 usc_DmaCmd( info, DmaCmd_ResetTxChannel ); 5709 usc_RTCmd( info, RTCmd_PurgeTxFifo ); 5710 5711 info->tx_enabled = false; 5712 info->tx_active = false; 5713 5714} /* end of usc_stop_transmitter() */ 5715 5716/* usc_load_txfifo() 5717 * 5718 * Fill the transmit FIFO until the FIFO is full or 5719 * there is no more data to load. 5720 * 5721 * Arguments: info pointer to device extension (instance data) 5722 * Return Value: None 5723 */ 5724static void usc_load_txfifo( struct mgsl_struct *info ) 5725{ 5726 int Fifocount; 5727 u8 TwoBytes[2]; 5728 5729 if ( !info->xmit_cnt && !info->x_char ) 5730 return; 5731 5732 /* Select transmit FIFO status readback in TICR */ 5733 usc_TCmd( info, TCmd_SelectTicrTxFifostatus ); 5734 5735 /* load the Transmit FIFO until FIFOs full or all data sent */ 5736 5737 while( (Fifocount = usc_InReg(info, TICR) >> 8) && info->xmit_cnt ) { 5738 /* there is more space in the transmit FIFO and */ 5739 /* there is more data in transmit buffer */ 5740 5741 if ( (info->xmit_cnt > 1) && (Fifocount > 1) && !info->x_char ) { 5742 /* write a 16-bit word from transmit buffer to 16C32 */ 5743 5744 TwoBytes[0] = info->xmit_buf[info->xmit_tail++]; 5745 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1); 5746 TwoBytes[1] = info->xmit_buf[info->xmit_tail++]; 5747 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1); 5748 5749 outw( *((u16 *)TwoBytes), info->io_base + DATAREG); 5750 5751 info->xmit_cnt -= 2; 5752 info->icount.tx += 2; 5753 } else { 5754 /* only 1 byte left to transmit or 1 FIFO slot left */ 5755 5756 outw( (inw( info->io_base + CCAR) & 0x0780) | (TDR+LSBONLY), 5757 info->io_base + CCAR ); 5758 5759 if (info->x_char) { 5760 /* transmit pending high priority char */ 5761 outw( info->x_char,info->io_base + CCAR ); 5762 info->x_char = 0; 5763 } else { 5764 outw( info->xmit_buf[info->xmit_tail++],info->io_base + CCAR ); 5765 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1); 5766 info->xmit_cnt--; 5767 } 5768 info->icount.tx++; 5769 } 5770 } 5771 5772} /* end of usc_load_txfifo() */ 5773 5774/* usc_reset() 5775 * 5776 * Reset the adapter to a known state and prepare it for further use. 5777 * 5778 * Arguments: info pointer to device instance data 5779 * Return Value: None 5780 */ 5781static void usc_reset( struct mgsl_struct *info ) 5782{ 5783 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 5784 int i; 5785 u32 readval; 5786 5787 /* Set BIT30 of Misc Control Register */ 5788 /* (Local Control Register 0x50) to force reset of USC. */ 5789 5790 volatile u32 *MiscCtrl = (u32 *)(info->lcr_base + 0x50); 5791 u32 *LCR0BRDR = (u32 *)(info->lcr_base + 0x28); 5792 5793 info->misc_ctrl_value |= BIT30; 5794 *MiscCtrl = info->misc_ctrl_value; 5795 5796 /* 5797 * Force at least 170ns delay before clearing 5798 * reset bit. Each read from LCR takes at least 5799 * 30ns so 10 times for 300ns to be safe. 5800 */ 5801 for(i=0;i<10;i++) 5802 readval = *MiscCtrl; 5803 5804 info->misc_ctrl_value &= ~BIT30; 5805 *MiscCtrl = info->misc_ctrl_value; 5806 5807 *LCR0BRDR = BUS_DESCRIPTOR( 5808 1, // Write Strobe Hold (0-3) 5809 2, // Write Strobe Delay (0-3) 5810 2, // Read Strobe Delay (0-3) 5811 0, // NWDD (Write data-data) (0-3) 5812 4, // NWAD (Write Addr-data) (0-31) 5813 0, // NXDA (Read/Write Data-Addr) (0-3) 5814 0, // NRDD (Read Data-Data) (0-3) 5815 5 // NRAD (Read Addr-Data) (0-31) 5816 ); 5817 } else { 5818 /* do HW reset */ 5819 outb( 0,info->io_base + 8 ); 5820 } 5821 5822 info->mbre_bit = 0; 5823 info->loopback_bits = 0; 5824 info->usc_idle_mode = 0; 5825 5826 /* 5827 * Program the Bus Configuration Register (BCR) 5828 * 5829 * <15> 0 Don't use separate address 5830 * <14..6> 0 reserved 5831 * <5..4> 00 IAckmode = Default, don't care 5832 * <3> 1 Bus Request Totem Pole output 5833 * <2> 1 Use 16 Bit data bus 5834 * <1> 0 IRQ Totem Pole output 5835 * <0> 0 Don't Shift Right Addr 5836 * 5837 * 0000 0000 0000 1100 = 0x000c 5838 * 5839 * By writing to io_base + SDPIN the Wait/Ack pin is 5840 * programmed to work as a Wait pin. 5841 */ 5842 5843 outw( 0x000c,info->io_base + SDPIN ); 5844 5845 5846 outw( 0,info->io_base ); 5847 outw( 0,info->io_base + CCAR ); 5848 5849 /* select little endian byte ordering */ 5850 usc_RTCmd( info, RTCmd_SelectLittleEndian ); 5851 5852 5853 /* Port Control Register (PCR) 5854 * 5855 * <15..14> 11 Port 7 is Output (~DMAEN, Bit 14 : 0 = Enabled) 5856 * <13..12> 11 Port 6 is Output (~INTEN, Bit 12 : 0 = Enabled) 5857 * <11..10> 00 Port 5 is Input (No Connect, Don't Care) 5858 * <9..8> 00 Port 4 is Input (No Connect, Don't Care) 5859 * <7..6> 11 Port 3 is Output (~RTS, Bit 6 : 0 = Enabled ) 5860 * <5..4> 11 Port 2 is Output (~DTR, Bit 4 : 0 = Enabled ) 5861 * <3..2> 01 Port 1 is Input (Dedicated RxC) 5862 * <1..0> 01 Port 0 is Input (Dedicated TxC) 5863 * 5864 * 1111 0000 1111 0101 = 0xf0f5 5865 */ 5866 5867 usc_OutReg( info, PCR, 0xf0f5 ); 5868 5869 5870 /* 5871 * Input/Output Control Register 5872 * 5873 * <15..14> 00 CTS is active low input 5874 * <13..12> 00 DCD is active low input 5875 * <11..10> 00 TxREQ pin is input (DSR) 5876 * <9..8> 00 RxREQ pin is input (RI) 5877 * <7..6> 00 TxD is output (Transmit Data) 5878 * <5..3> 000 TxC Pin in Input (14.7456MHz Clock) 5879 * <2..0> 100 RxC is Output (drive with BRG0) 5880 * 5881 * 0000 0000 0000 0100 = 0x0004 5882 */ 5883 5884 usc_OutReg( info, IOCR, 0x0004 ); 5885 5886} /* end of usc_reset() */ 5887 5888/* usc_set_async_mode() 5889 * 5890 * Program adapter for asynchronous communications. 5891 * 5892 * Arguments: info pointer to device instance data 5893 * Return Value: None 5894 */ 5895static void usc_set_async_mode( struct mgsl_struct *info ) 5896{ 5897 u16 RegValue; 5898 5899 /* disable interrupts while programming USC */ 5900 usc_DisableMasterIrqBit( info ); 5901 5902 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */ 5903 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */ 5904 5905 usc_loopback_frame( info ); 5906 5907 /* Channel mode Register (CMR) 5908 * 5909 * <15..14> 00 Tx Sub modes, 00 = 1 Stop Bit 5910 * <13..12> 00 00 = 16X Clock 5911 * <11..8> 0000 Transmitter mode = Asynchronous 5912 * <7..6> 00 reserved? 5913 * <5..4> 00 Rx Sub modes, 00 = 16X Clock 5914 * <3..0> 0000 Receiver mode = Asynchronous 5915 * 5916 * 0000 0000 0000 0000 = 0x0 5917 */ 5918 5919 RegValue = 0; 5920 if ( info->params.stop_bits != 1 ) 5921 RegValue |= BIT14; 5922 usc_OutReg( info, CMR, RegValue ); 5923 5924 5925 /* Receiver mode Register (RMR) 5926 * 5927 * <15..13> 000 encoding = None 5928 * <12..08> 00000 reserved (Sync Only) 5929 * <7..6> 00 Even parity 5930 * <5> 0 parity disabled 5931 * <4..2> 000 Receive Char Length = 8 bits 5932 * <1..0> 00 Disable Receiver 5933 * 5934 * 0000 0000 0000 0000 = 0x0 5935 */ 5936 5937 RegValue = 0; 5938 5939 if ( info->params.data_bits != 8 ) 5940 RegValue |= BIT4+BIT3+BIT2; 5941 5942 if ( info->params.parity != ASYNC_PARITY_NONE ) { 5943 RegValue |= BIT5; 5944 if ( info->params.parity != ASYNC_PARITY_ODD ) 5945 RegValue |= BIT6; 5946 } 5947 5948 usc_OutReg( info, RMR, RegValue ); 5949 5950 5951 /* Set IRQ trigger level */ 5952 5953 usc_RCmd( info, RCmd_SelectRicrIntLevel ); 5954 5955 5956 /* Receive Interrupt Control Register (RICR) 5957 * 5958 * <15..8> ? RxFIFO IRQ Request Level 5959 * 5960 * Note: For async mode the receive FIFO level must be set 5961 * to 0 to avoid the situation where the FIFO contains fewer bytes 5962 * than the trigger level and no more data is expected. 5963 * 5964 * <7> 0 Exited Hunt IA (Interrupt Arm) 5965 * <6> 0 Idle Received IA 5966 * <5> 0 Break/Abort IA 5967 * <4> 0 Rx Bound IA 5968 * <3> 0 Queued status reflects oldest byte in FIFO 5969 * <2> 0 Abort/PE IA 5970 * <1> 0 Rx Overrun IA 5971 * <0> 0 Select TC0 value for readback 5972 * 5973 * 0000 0000 0100 0000 = 0x0000 + (FIFOLEVEL in MSB) 5974 */ 5975 5976 usc_OutReg( info, RICR, 0x0000 ); 5977 5978 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); 5979 usc_ClearIrqPendingBits( info, RECEIVE_STATUS ); 5980 5981 5982 /* Transmit mode Register (TMR) 5983 * 5984 * <15..13> 000 encoding = None 5985 * <12..08> 00000 reserved (Sync Only) 5986 * <7..6> 00 Transmit parity Even 5987 * <5> 0 Transmit parity Disabled 5988 * <4..2> 000 Tx Char Length = 8 bits 5989 * <1..0> 00 Disable Transmitter 5990 * 5991 * 0000 0000 0000 0000 = 0x0 5992 */ 5993 5994 RegValue = 0; 5995 5996 if ( info->params.data_bits != 8 ) 5997 RegValue |= BIT4+BIT3+BIT2; 5998 5999 if ( info->params.parity != ASYNC_PARITY_NONE ) { 6000 RegValue |= BIT5; 6001 if ( info->params.parity != ASYNC_PARITY_ODD ) 6002 RegValue |= BIT6; 6003 } 6004 6005 usc_OutReg( info, TMR, RegValue ); 6006 6007 usc_set_txidle( info ); 6008 6009 6010 /* Set IRQ trigger level */ 6011 6012 usc_TCmd( info, TCmd_SelectTicrIntLevel ); 6013 6014 6015 /* Transmit Interrupt Control Register (TICR) 6016 * 6017 * <15..8> ? Transmit FIFO IRQ Level 6018 * <7> 0 Present IA (Interrupt Arm) 6019 * <6> 1 Idle Sent IA 6020 * <5> 0 Abort Sent IA 6021 * <4> 0 EOF/EOM Sent IA 6022 * <3> 0 CRC Sent IA 6023 * <2> 0 1 = Wait for SW Trigger to Start Frame 6024 * <1> 0 Tx Underrun IA 6025 * <0> 0 TC0 constant on read back 6026 * 6027 * 0000 0000 0100 0000 = 0x0040 6028 */ 6029 6030 usc_OutReg( info, TICR, 0x1f40 ); 6031 6032 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL ); 6033 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS ); 6034 6035 usc_enable_async_clock( info, info->params.data_rate ); 6036 6037 6038 /* Channel Control/status Register (CCSR) 6039 * 6040 * <15> X RCC FIFO Overflow status (RO) 6041 * <14> X RCC FIFO Not Empty status (RO) 6042 * <13> 0 1 = Clear RCC FIFO (WO) 6043 * <12> X DPLL in Sync status (RO) 6044 * <11> X DPLL 2 Missed Clocks status (RO) 6045 * <10> X DPLL 1 Missed Clock status (RO) 6046 * <9..8> 00 DPLL Resync on rising and falling edges (RW) 6047 * <7> X SDLC Loop On status (RO) 6048 * <6> X SDLC Loop Send status (RO) 6049 * <5> 1 Bypass counters for TxClk and RxClk (RW) 6050 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW) 6051 * <1..0> 00 reserved 6052 * 6053 * 0000 0000 0010 0000 = 0x0020 6054 */ 6055 6056 usc_OutReg( info, CCSR, 0x0020 ); 6057 6058 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA + 6059 RECEIVE_DATA + RECEIVE_STATUS ); 6060 6061 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA + 6062 RECEIVE_DATA + RECEIVE_STATUS ); 6063 6064 usc_EnableMasterIrqBit( info ); 6065 6066 if (info->bus_type == MGSL_BUS_TYPE_ISA) { 6067 /* Enable INTEN (Port 6, Bit12) */ 6068 /* This connects the IRQ request signal to the ISA bus */ 6069 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12)); 6070 } 6071 6072 if (info->params.loopback) { 6073 info->loopback_bits = 0x300; 6074 outw(0x0300, info->io_base + CCAR); 6075 } 6076 6077} /* end of usc_set_async_mode() */ 6078 6079/* usc_loopback_frame() 6080 * 6081 * Loop back a small (2 byte) dummy SDLC frame. 6082 * Interrupts and DMA are NOT used. The purpose of this is to 6083 * clear any 'stale' status info left over from running in async mode. 6084 * 6085 * The 16C32 shows the strange behaviour of marking the 1st 6086 * received SDLC frame with a CRC error even when there is no 6087 * CRC error. To get around this a small dummy from of 2 bytes 6088 * is looped back when switching from async to sync mode. 6089 * 6090 * Arguments: info pointer to device instance data 6091 * Return Value: None 6092 */ 6093static void usc_loopback_frame( struct mgsl_struct *info ) 6094{ 6095 int i; 6096 unsigned long oldmode = info->params.mode; 6097 6098 info->params.mode = MGSL_MODE_HDLC; 6099 6100 usc_DisableMasterIrqBit( info ); 6101 6102 usc_set_sdlc_mode( info ); 6103 usc_enable_loopback( info, 1 ); 6104 6105 /* Write 16-bit Time Constant for BRG0 */ 6106 usc_OutReg( info, TC0R, 0 ); 6107 6108 /* Channel Control Register (CCR) 6109 * 6110 * <15..14> 00 Don't use 32-bit Tx Control Blocks (TCBs) 6111 * <13> 0 Trigger Tx on SW Command Disabled 6112 * <12> 0 Flag Preamble Disabled 6113 * <11..10> 00 Preamble Length = 8-Bits 6114 * <9..8> 01 Preamble Pattern = flags 6115 * <7..6> 10 Don't use 32-bit Rx status Blocks (RSBs) 6116 * <5> 0 Trigger Rx on SW Command Disabled 6117 * <4..0> 0 reserved 6118 * 6119 * 0000 0001 0000 0000 = 0x0100 6120 */ 6121 6122 usc_OutReg( info, CCR, 0x0100 ); 6123 6124 /* SETUP RECEIVER */ 6125 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 6126 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL); 6127 6128 /* SETUP TRANSMITTER */ 6129 /* Program the Transmit Character Length Register (TCLR) */ 6130 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */ 6131 usc_OutReg( info, TCLR, 2 ); 6132 usc_RTCmd( info, RTCmd_PurgeTxFifo ); 6133 6134 /* unlatch Tx status bits, and start transmit channel. */ 6135 usc_UnlatchTxstatusBits(info,TXSTATUS_ALL); 6136 outw(0,info->io_base + DATAREG); 6137 6138 /* ENABLE TRANSMITTER */ 6139 usc_TCmd( info, TCmd_SendFrame ); 6140 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL); 6141 6142 /* WAIT FOR RECEIVE COMPLETE */ 6143 for (i=0 ; i<1000 ; i++) 6144 if (usc_InReg( info, RCSR ) & (BIT8 + BIT4 + BIT3 + BIT1)) 6145 break; 6146 6147 /* clear Internal Data loopback mode */ 6148 usc_enable_loopback(info, 0); 6149 6150 usc_EnableMasterIrqBit(info); 6151 6152 info->params.mode = oldmode; 6153 6154} /* end of usc_loopback_frame() */ 6155 6156/* usc_set_sync_mode() Programs the USC for SDLC communications. 6157 * 6158 * Arguments: info pointer to adapter info structure 6159 * Return Value: None 6160 */ 6161static void usc_set_sync_mode( struct mgsl_struct *info ) 6162{ 6163 usc_loopback_frame( info ); 6164 usc_set_sdlc_mode( info ); 6165 6166 if (info->bus_type == MGSL_BUS_TYPE_ISA) { 6167 /* Enable INTEN (Port 6, Bit12) */ 6168 /* This connects the IRQ request signal to the ISA bus */ 6169 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12)); 6170 } 6171 6172 usc_enable_aux_clock(info, info->params.clock_speed); 6173 6174 if (info->params.loopback) 6175 usc_enable_loopback(info,1); 6176 6177} /* end of mgsl_set_sync_mode() */ 6178 6179/* usc_set_txidle() Set the HDLC idle mode for the transmitter. 6180 * 6181 * Arguments: info pointer to device instance data 6182 * Return Value: None 6183 */ 6184static void usc_set_txidle( struct mgsl_struct *info ) 6185{ 6186 u16 usc_idle_mode = IDLEMODE_FLAGS; 6187 6188 /* Map API idle mode to USC register bits */ 6189 6190 switch( info->idle_mode ){ 6191 case HDLC_TXIDLE_FLAGS: usc_idle_mode = IDLEMODE_FLAGS; break; 6192 case HDLC_TXIDLE_ALT_ZEROS_ONES: usc_idle_mode = IDLEMODE_ALT_ONE_ZERO; break; 6193 case HDLC_TXIDLE_ZEROS: usc_idle_mode = IDLEMODE_ZERO; break; 6194 case HDLC_TXIDLE_ONES: usc_idle_mode = IDLEMODE_ONE; break; 6195 case HDLC_TXIDLE_ALT_MARK_SPACE: usc_idle_mode = IDLEMODE_ALT_MARK_SPACE; break; 6196 case HDLC_TXIDLE_SPACE: usc_idle_mode = IDLEMODE_SPACE; break; 6197 case HDLC_TXIDLE_MARK: usc_idle_mode = IDLEMODE_MARK; break; 6198 } 6199 6200 info->usc_idle_mode = usc_idle_mode; 6201 //usc_OutReg(info, TCSR, usc_idle_mode); 6202 info->tcsr_value &= ~IDLEMODE_MASK; /* clear idle mode bits */ 6203 info->tcsr_value += usc_idle_mode; 6204 usc_OutReg(info, TCSR, info->tcsr_value); 6205 6206 /* 6207 * if SyncLink WAN adapter is running in external sync mode, the 6208 * transmitter has been set to Monosync in order to try to mimic 6209 * a true raw outbound bit stream. Monosync still sends an open/close 6210 * sync char at the start/end of a frame. Try to match those sync 6211 * patterns to the idle mode set here 6212 */ 6213 if ( info->params.mode == MGSL_MODE_RAW ) { 6214 unsigned char syncpat = 0; 6215 switch( info->idle_mode ) { 6216 case HDLC_TXIDLE_FLAGS: 6217 syncpat = 0x7e; 6218 break; 6219 case HDLC_TXIDLE_ALT_ZEROS_ONES: 6220 syncpat = 0x55; 6221 break; 6222 case HDLC_TXIDLE_ZEROS: 6223 case HDLC_TXIDLE_SPACE: 6224 syncpat = 0x00; 6225 break; 6226 case HDLC_TXIDLE_ONES: 6227 case HDLC_TXIDLE_MARK: 6228 syncpat = 0xff; 6229 break; 6230 case HDLC_TXIDLE_ALT_MARK_SPACE: 6231 syncpat = 0xaa; 6232 break; 6233 } 6234 6235 usc_SetTransmitSyncChars(info,syncpat,syncpat); 6236 } 6237 6238} /* end of usc_set_txidle() */ 6239 6240/* usc_get_serial_signals() 6241 * 6242 * Query the adapter for the state of the V24 status (input) signals. 6243 * 6244 * Arguments: info pointer to device instance data 6245 * Return Value: None 6246 */ 6247static void usc_get_serial_signals( struct mgsl_struct *info ) 6248{ 6249 u16 status; 6250 6251 /* clear all serial signals except DTR and RTS */ 6252 info->serial_signals &= SerialSignal_DTR + SerialSignal_RTS; 6253 6254 /* Read the Misc Interrupt status Register (MISR) to get */ 6255 /* the V24 status signals. */ 6256 6257 status = usc_InReg( info, MISR ); 6258 6259 /* set serial signal bits to reflect MISR */ 6260 6261 if ( status & MISCSTATUS_CTS ) 6262 info->serial_signals |= SerialSignal_CTS; 6263 6264 if ( status & MISCSTATUS_DCD ) 6265 info->serial_signals |= SerialSignal_DCD; 6266 6267 if ( status & MISCSTATUS_RI ) 6268 info->serial_signals |= SerialSignal_RI; 6269 6270 if ( status & MISCSTATUS_DSR ) 6271 info->serial_signals |= SerialSignal_DSR; 6272 6273} /* end of usc_get_serial_signals() */ 6274 6275/* usc_set_serial_signals() 6276 * 6277 * Set the state of DTR and RTS based on contents of 6278 * serial_signals member of device extension. 6279 * 6280 * Arguments: info pointer to device instance data 6281 * Return Value: None 6282 */ 6283static void usc_set_serial_signals( struct mgsl_struct *info ) 6284{ 6285 u16 Control; 6286 unsigned char V24Out = info->serial_signals; 6287 6288 /* get the current value of the Port Control Register (PCR) */ 6289 6290 Control = usc_InReg( info, PCR ); 6291 6292 if ( V24Out & SerialSignal_RTS ) 6293 Control &= ~(BIT6); 6294 else 6295 Control |= BIT6; 6296 6297 if ( V24Out & SerialSignal_DTR ) 6298 Control &= ~(BIT4); 6299 else 6300 Control |= BIT4; 6301 6302 usc_OutReg( info, PCR, Control ); 6303 6304} /* end of usc_set_serial_signals() */ 6305 6306/* usc_enable_async_clock() 6307 * 6308 * Enable the async clock at the specified frequency. 6309 * 6310 * Arguments: info pointer to device instance data 6311 * data_rate data rate of clock in bps 6312 * 0 disables the AUX clock. 6313 * Return Value: None 6314 */ 6315static void usc_enable_async_clock( struct mgsl_struct *info, u32 data_rate ) 6316{ 6317 if ( data_rate ) { 6318 /* 6319 * Clock mode Control Register (CMCR) 6320 * 6321 * <15..14> 00 counter 1 Disabled 6322 * <13..12> 00 counter 0 Disabled 6323 * <11..10> 11 BRG1 Input is TxC Pin 6324 * <9..8> 11 BRG0 Input is TxC Pin 6325 * <7..6> 01 DPLL Input is BRG1 Output 6326 * <5..3> 100 TxCLK comes from BRG0 6327 * <2..0> 100 RxCLK comes from BRG0 6328 * 6329 * 0000 1111 0110 0100 = 0x0f64 6330 */ 6331 6332 usc_OutReg( info, CMCR, 0x0f64 ); 6333 6334 6335 /* 6336 * Write 16-bit Time Constant for BRG0 6337 * Time Constant = (ClkSpeed / data_rate) - 1 6338 * ClkSpeed = 921600 (ISA), 691200 (PCI) 6339 */ 6340 6341 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 6342 usc_OutReg( info, TC0R, (u16)((691200/data_rate) - 1) ); 6343 else 6344 usc_OutReg( info, TC0R, (u16)((921600/data_rate) - 1) ); 6345 6346 6347 /* 6348 * Hardware Configuration Register (HCR) 6349 * Clear Bit 1, BRG0 mode = Continuous 6350 * Set Bit 0 to enable BRG0. 6351 */ 6352 6353 usc_OutReg( info, HCR, 6354 (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) ); 6355 6356 6357 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */ 6358 6359 usc_OutReg( info, IOCR, 6360 (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) ); 6361 } else { 6362 /* data rate == 0 so turn off BRG0 */ 6363 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) ); 6364 } 6365 6366} /* end of usc_enable_async_clock() */ 6367 6368/* 6369 * Buffer Structures: 6370 * 6371 * Normal memory access uses virtual addresses that can make discontiguous 6372 * physical memory pages appear to be contiguous in the virtual address 6373 * space (the processors memory mapping handles the conversions). 6374 * 6375 * DMA transfers require physically contiguous memory. This is because 6376 * the DMA system controller and DMA bus masters deal with memory using 6377 * only physical addresses. 6378 * 6379 * This causes a problem under Windows NT when large DMA buffers are 6380 * needed. Fragmentation of the nonpaged pool prevents allocations of 6381 * physically contiguous buffers larger than the PAGE_SIZE. 6382 * 6383 * However the 16C32 supports Bus Master Scatter/Gather DMA which 6384 * allows DMA transfers to physically discontiguous buffers. Information 6385 * about each data transfer buffer is contained in a memory structure 6386 * called a 'buffer entry'. A list of buffer entries is maintained 6387 * to track and control the use of the data transfer buffers. 6388 * 6389 * To support this strategy we will allocate sufficient PAGE_SIZE 6390 * contiguous memory buffers to allow for the total required buffer 6391 * space. 6392 * 6393 * The 16C32 accesses the list of buffer entries using Bus Master 6394 * DMA. Control information is read from the buffer entries by the 6395 * 16C32 to control data transfers. status information is written to 6396 * the buffer entries by the 16C32 to indicate the status of completed 6397 * transfers. 6398 * 6399 * The CPU writes control information to the buffer entries to control 6400 * the 16C32 and reads status information from the buffer entries to 6401 * determine information about received and transmitted frames. 6402 * 6403 * Because the CPU and 16C32 (adapter) both need simultaneous access 6404 * to the buffer entries, the buffer entry memory is allocated with 6405 * HalAllocateCommonBuffer(). This restricts the size of the buffer 6406 * entry list to PAGE_SIZE. 6407 * 6408 * The actual data buffers on the other hand will only be accessed 6409 * by the CPU or the adapter but not by both simultaneously. This allows 6410 * Scatter/Gather packet based DMA procedures for using physically 6411 * discontiguous pages. 6412 */ 6413 6414/* 6415 * mgsl_reset_tx_dma_buffers() 6416 * 6417 * Set the count for all transmit buffers to 0 to indicate the 6418 * buffer is available for use and set the current buffer to the 6419 * first buffer. This effectively makes all buffers free and 6420 * discards any data in buffers. 6421 * 6422 * Arguments: info pointer to device instance data 6423 * Return Value: None 6424 */ 6425static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info ) 6426{ 6427 unsigned int i; 6428 6429 for ( i = 0; i < info->tx_buffer_count; i++ ) { 6430 *((unsigned long *)&(info->tx_buffer_list[i].count)) = 0; 6431 } 6432 6433 info->current_tx_buffer = 0; 6434 info->start_tx_dma_buffer = 0; 6435 info->tx_dma_buffers_used = 0; 6436 6437 info->get_tx_holding_index = 0; 6438 info->put_tx_holding_index = 0; 6439 info->tx_holding_count = 0; 6440 6441} /* end of mgsl_reset_tx_dma_buffers() */ 6442 6443/* 6444 * num_free_tx_dma_buffers() 6445 * 6446 * returns the number of free tx dma buffers available 6447 * 6448 * Arguments: info pointer to device instance data 6449 * Return Value: number of free tx dma buffers 6450 */ 6451static int num_free_tx_dma_buffers(struct mgsl_struct *info) 6452{ 6453 return info->tx_buffer_count - info->tx_dma_buffers_used; 6454} 6455 6456/* 6457 * mgsl_reset_rx_dma_buffers() 6458 * 6459 * Set the count for all receive buffers to DMABUFFERSIZE 6460 * and set the current buffer to the first buffer. This effectively 6461 * makes all buffers free and discards any data in buffers. 6462 * 6463 * Arguments: info pointer to device instance data 6464 * Return Value: None 6465 */ 6466static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info ) 6467{ 6468 unsigned int i; 6469 6470 for ( i = 0; i < info->rx_buffer_count; i++ ) { 6471 *((unsigned long *)&(info->rx_buffer_list[i].count)) = DMABUFFERSIZE; 6472// info->rx_buffer_list[i].count = DMABUFFERSIZE; 6473// info->rx_buffer_list[i].status = 0; 6474 } 6475 6476 info->current_rx_buffer = 0; 6477 6478} /* end of mgsl_reset_rx_dma_buffers() */ 6479 6480/* 6481 * mgsl_free_rx_frame_buffers() 6482 * 6483 * Free the receive buffers used by a received SDLC 6484 * frame such that the buffers can be reused. 6485 * 6486 * Arguments: 6487 * 6488 * info pointer to device instance data 6489 * StartIndex index of 1st receive buffer of frame 6490 * EndIndex index of last receive buffer of frame 6491 * 6492 * Return Value: None 6493 */ 6494static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex ) 6495{ 6496 bool Done = false; 6497 DMABUFFERENTRY *pBufEntry; 6498 unsigned int Index; 6499 6500 /* Starting with 1st buffer entry of the frame clear the status */ 6501 /* field and set the count field to DMA Buffer Size. */ 6502 6503 Index = StartIndex; 6504 6505 while( !Done ) { 6506 pBufEntry = &(info->rx_buffer_list[Index]); 6507 6508 if ( Index == EndIndex ) { 6509 /* This is the last buffer of the frame! */ 6510 Done = true; 6511 } 6512 6513 /* reset current buffer for reuse */ 6514// pBufEntry->status = 0; 6515// pBufEntry->count = DMABUFFERSIZE; 6516 *((unsigned long *)&(pBufEntry->count)) = DMABUFFERSIZE; 6517 6518 /* advance to next buffer entry in linked list */ 6519 Index++; 6520 if ( Index == info->rx_buffer_count ) 6521 Index = 0; 6522 } 6523 6524 /* set current buffer to next buffer after last buffer of frame */ 6525 info->current_rx_buffer = Index; 6526 6527} /* end of free_rx_frame_buffers() */ 6528 6529/* mgsl_get_rx_frame() 6530 * 6531 * This function attempts to return a received SDLC frame from the 6532 * receive DMA buffers. Only frames received without errors are returned. 6533 * 6534 * Arguments: info pointer to device extension 6535 * Return Value: true if frame returned, otherwise false 6536 */ 6537static bool mgsl_get_rx_frame(struct mgsl_struct *info) 6538{ 6539 unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */ 6540 unsigned short status; 6541 DMABUFFERENTRY *pBufEntry; 6542 unsigned int framesize = 0; 6543 bool ReturnCode = false; 6544 unsigned long flags; 6545 struct tty_struct *tty = info->port.tty; 6546 bool return_frame = false; 6547 6548 /* 6549 * current_rx_buffer points to the 1st buffer of the next available 6550 * receive frame. To find the last buffer of the frame look for 6551 * a non-zero status field in the buffer entries. (The status 6552 * field is set by the 16C32 after completing a receive frame. 6553 */ 6554 6555 StartIndex = EndIndex = info->current_rx_buffer; 6556 6557 while( !info->rx_buffer_list[EndIndex].status ) { 6558 /* 6559 * If the count field of the buffer entry is non-zero then 6560 * this buffer has not been used. (The 16C32 clears the count 6561 * field when it starts using the buffer.) If an unused buffer 6562 * is encountered then there are no frames available. 6563 */ 6564 6565 if ( info->rx_buffer_list[EndIndex].count ) 6566 goto Cleanup; 6567 6568 /* advance to next buffer entry in linked list */ 6569 EndIndex++; 6570 if ( EndIndex == info->rx_buffer_count ) 6571 EndIndex = 0; 6572 6573 /* if entire list searched then no frame available */ 6574 if ( EndIndex == StartIndex ) { 6575 /* If this occurs then something bad happened, 6576 * all buffers have been 'used' but none mark 6577 * the end of a frame. Reset buffers and receiver. 6578 */ 6579 6580 if ( info->rx_enabled ){ 6581 spin_lock_irqsave(&info->irq_spinlock,flags); 6582 usc_start_receiver(info); 6583 spin_unlock_irqrestore(&info->irq_spinlock,flags); 6584 } 6585 goto Cleanup; 6586 } 6587 } 6588 6589 6590 /* check status of receive frame */ 6591 6592 status = info->rx_buffer_list[EndIndex].status; 6593 6594 if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN + 6595 RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) { 6596 if ( status & RXSTATUS_SHORT_FRAME ) 6597 info->icount.rxshort++; 6598 else if ( status & RXSTATUS_ABORT ) 6599 info->icount.rxabort++; 6600 else if ( status & RXSTATUS_OVERRUN ) 6601 info->icount.rxover++; 6602 else { 6603 info->icount.rxcrc++; 6604 if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) 6605 return_frame = true; 6606 } 6607 framesize = 0; 6608#if SYNCLINK_GENERIC_HDLC 6609 { 6610 info->netdev->stats.rx_errors++; 6611 info->netdev->stats.rx_frame_errors++; 6612 } 6613#endif 6614 } else 6615 return_frame = true; 6616 6617 if ( return_frame ) { 6618 /* receive frame has no errors, get frame size. 6619 * The frame size is the starting value of the RCC (which was 6620 * set to 0xffff) minus the ending value of the RCC (decremented 6621 * once for each receive character) minus 2 for the 16-bit CRC. 6622 */ 6623 6624 framesize = RCLRVALUE - info->rx_buffer_list[EndIndex].rcc; 6625 6626 /* adjust frame size for CRC if any */ 6627 if ( info->params.crc_type == HDLC_CRC_16_CCITT ) 6628 framesize -= 2; 6629 else if ( info->params.crc_type == HDLC_CRC_32_CCITT ) 6630 framesize -= 4; 6631 } 6632 6633 if ( debug_level >= DEBUG_LEVEL_BH ) 6634 printk("%s(%d):mgsl_get_rx_frame(%s) status=%04X size=%d\n", 6635 __FILE__,__LINE__,info->device_name,status,framesize); 6636 6637 if ( debug_level >= DEBUG_LEVEL_DATA ) 6638 mgsl_trace_block(info,info->rx_buffer_list[StartIndex].virt_addr, 6639 min_t(int, framesize, DMABUFFERSIZE),0); 6640 6641 if (framesize) { 6642 if ( ( (info->params.crc_type & HDLC_CRC_RETURN_EX) && 6643 ((framesize+1) > info->max_frame_size) ) || 6644 (framesize > info->max_frame_size) ) 6645 info->icount.rxlong++; 6646 else { 6647 /* copy dma buffer(s) to contiguous intermediate buffer */ 6648 int copy_count = framesize; 6649 int index = StartIndex; 6650 unsigned char *ptmp = info->intermediate_rxbuffer; 6651 6652 if ( !(status & RXSTATUS_CRC_ERROR)) 6653 info->icount.rxok++; 6654 6655 while(copy_count) { 6656 int partial_count; 6657 if ( copy_count > DMABUFFERSIZE ) 6658 partial_count = DMABUFFERSIZE; 6659 else 6660 partial_count = copy_count; 6661 6662 pBufEntry = &(info->rx_buffer_list[index]); 6663 memcpy( ptmp, pBufEntry->virt_addr, partial_count ); 6664 ptmp += partial_count; 6665 copy_count -= partial_count; 6666 6667 if ( ++index == info->rx_buffer_count ) 6668 index = 0; 6669 } 6670 6671 if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) { 6672 ++framesize; 6673 *ptmp = (status & RXSTATUS_CRC_ERROR ? 6674 RX_CRC_ERROR : 6675 RX_OK); 6676 6677 if ( debug_level >= DEBUG_LEVEL_DATA ) 6678 printk("%s(%d):mgsl_get_rx_frame(%s) rx frame status=%d\n", 6679 __FILE__,__LINE__,info->device_name, 6680 *ptmp); 6681 } 6682 6683#if SYNCLINK_GENERIC_HDLC 6684 if (info->netcount) 6685 hdlcdev_rx(info,info->intermediate_rxbuffer,framesize); 6686 else 6687#endif 6688 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize); 6689 } 6690 } 6691 /* Free the buffers used by this frame. */ 6692 mgsl_free_rx_frame_buffers( info, StartIndex, EndIndex ); 6693 6694 ReturnCode = true; 6695 6696Cleanup: 6697 6698 if ( info->rx_enabled && info->rx_overflow ) { 6699 /* The receiver needs to restarted because of 6700 * a receive overflow (buffer or FIFO). If the 6701 * receive buffers are now empty, then restart receiver. 6702 */ 6703 6704 if ( !info->rx_buffer_list[EndIndex].status && 6705 info->rx_buffer_list[EndIndex].count ) { 6706 spin_lock_irqsave(&info->irq_spinlock,flags); 6707 usc_start_receiver(info); 6708 spin_unlock_irqrestore(&info->irq_spinlock,flags); 6709 } 6710 } 6711 6712 return ReturnCode; 6713 6714} /* end of mgsl_get_rx_frame() */ 6715 6716/* mgsl_get_raw_rx_frame() 6717 * 6718 * This function attempts to return a received frame from the 6719 * receive DMA buffers when running in external loop mode. In this mode, 6720 * we will return at most one DMABUFFERSIZE frame to the application. 6721 * The USC receiver is triggering off of DCD going active to start a new 6722 * frame, and DCD going inactive to terminate the frame (similar to 6723 * processing a closing flag character). 6724 * 6725 * In this routine, we will return DMABUFFERSIZE "chunks" at a time. 6726 * If DCD goes inactive, the last Rx DMA Buffer will have a non-zero 6727 * status field and the RCC field will indicate the length of the 6728 * entire received frame. We take this RCC field and get the modulus 6729 * of RCC and DMABUFFERSIZE to determine if number of bytes in the 6730 * last Rx DMA buffer and return that last portion of the frame. 6731 * 6732 * Arguments: info pointer to device extension 6733 * Return Value: true if frame returned, otherwise false 6734 */ 6735static bool mgsl_get_raw_rx_frame(struct mgsl_struct *info) 6736{ 6737 unsigned int CurrentIndex, NextIndex; 6738 unsigned short status; 6739 DMABUFFERENTRY *pBufEntry; 6740 unsigned int framesize = 0; 6741 bool ReturnCode = false; 6742 unsigned long flags; 6743 struct tty_struct *tty = info->port.tty; 6744 6745 /* 6746 * current_rx_buffer points to the 1st buffer of the next available 6747 * receive frame. The status field is set by the 16C32 after 6748 * completing a receive frame. If the status field of this buffer 6749 * is zero, either the USC is still filling this buffer or this 6750 * is one of a series of buffers making up a received frame. 6751 * 6752 * If the count field of this buffer is zero, the USC is either 6753 * using this buffer or has used this buffer. Look at the count 6754 * field of the next buffer. If that next buffer's count is 6755 * non-zero, the USC is still actively using the current buffer. 6756 * Otherwise, if the next buffer's count field is zero, the 6757 * current buffer is complete and the USC is using the next 6758 * buffer. 6759 */ 6760 CurrentIndex = NextIndex = info->current_rx_buffer; 6761 ++NextIndex; 6762 if ( NextIndex == info->rx_buffer_count ) 6763 NextIndex = 0; 6764 6765 if ( info->rx_buffer_list[CurrentIndex].status != 0 || 6766 (info->rx_buffer_list[CurrentIndex].count == 0 && 6767 info->rx_buffer_list[NextIndex].count == 0)) { 6768 /* 6769 * Either the status field of this dma buffer is non-zero 6770 * (indicating the last buffer of a receive frame) or the next 6771 * buffer is marked as in use -- implying this buffer is complete 6772 * and an intermediate buffer for this received frame. 6773 */ 6774 6775 status = info->rx_buffer_list[CurrentIndex].status; 6776 6777 if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN + 6778 RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) { 6779 if ( status & RXSTATUS_SHORT_FRAME ) 6780 info->icount.rxshort++; 6781 else if ( status & RXSTATUS_ABORT ) 6782 info->icount.rxabort++; 6783 else if ( status & RXSTATUS_OVERRUN ) 6784 info->icount.rxover++; 6785 else 6786 info->icount.rxcrc++; 6787 framesize = 0; 6788 } else { 6789 /* 6790 * A receive frame is available, get frame size and status. 6791 * 6792 * The frame size is the starting value of the RCC (which was 6793 * set to 0xffff) minus the ending value of the RCC (decremented 6794 * once for each receive character) minus 2 or 4 for the 16-bit 6795 * or 32-bit CRC. 6796 * 6797 * If the status field is zero, this is an intermediate buffer. 6798 * It's size is 4K. 6799 * 6800 * If the DMA Buffer Entry's Status field is non-zero, the 6801 * receive operation completed normally (ie: DCD dropped). The 6802 * RCC field is valid and holds the received frame size. 6803 * It is possible that the RCC field will be zero on a DMA buffer 6804 * entry with a non-zero status. This can occur if the total 6805 * frame size (number of bytes between the time DCD goes active 6806 * to the time DCD goes inactive) exceeds 65535 bytes. In this 6807 * case the 16C32 has underrun on the RCC count and appears to 6808 * stop updating this counter to let us know the actual received 6809 * frame size. If this happens (non-zero status and zero RCC), 6810 * simply return the entire RxDMA Buffer 6811 */ 6812 if ( status ) { 6813 /* 6814 * In the event that the final RxDMA Buffer is 6815 * terminated with a non-zero status and the RCC 6816 * field is zero, we interpret this as the RCC 6817 * having underflowed (received frame > 65535 bytes). 6818 * 6819 * Signal the event to the user by passing back 6820 * a status of RxStatus_CrcError returning the full 6821 * buffer and let the app figure out what data is 6822 * actually valid 6823 */ 6824 if ( info->rx_buffer_list[CurrentIndex].rcc ) 6825 framesize = RCLRVALUE - info->rx_buffer_list[CurrentIndex].rcc; 6826 else 6827 framesize = DMABUFFERSIZE; 6828 } 6829 else 6830 framesize = DMABUFFERSIZE; 6831 } 6832 6833 if ( framesize > DMABUFFERSIZE ) { 6834 /* 6835 * if running in raw sync mode, ISR handler for 6836 * End Of Buffer events terminates all buffers at 4K. 6837 * If this frame size is said to be >4K, get the 6838 * actual number of bytes of the frame in this buffer. 6839 */ 6840 framesize = framesize % DMABUFFERSIZE; 6841 } 6842 6843 6844 if ( debug_level >= DEBUG_LEVEL_BH ) 6845 printk("%s(%d):mgsl_get_raw_rx_frame(%s) status=%04X size=%d\n", 6846 __FILE__,__LINE__,info->device_name,status,framesize); 6847 6848 if ( debug_level >= DEBUG_LEVEL_DATA ) 6849 mgsl_trace_block(info,info->rx_buffer_list[CurrentIndex].virt_addr, 6850 min_t(int, framesize, DMABUFFERSIZE),0); 6851 6852 if (framesize) { 6853 /* copy dma buffer(s) to contiguous intermediate buffer */ 6854 /* NOTE: we never copy more than DMABUFFERSIZE bytes */ 6855 6856 pBufEntry = &(info->rx_buffer_list[CurrentIndex]); 6857 memcpy( info->intermediate_rxbuffer, pBufEntry->virt_addr, framesize); 6858 info->icount.rxok++; 6859 6860 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize); 6861 } 6862 6863 /* Free the buffers used by this frame. */ 6864 mgsl_free_rx_frame_buffers( info, CurrentIndex, CurrentIndex ); 6865 6866 ReturnCode = true; 6867 } 6868 6869 6870 if ( info->rx_enabled && info->rx_overflow ) { 6871 /* The receiver needs to restarted because of 6872 * a receive overflow (buffer or FIFO). If the 6873 * receive buffers are now empty, then restart receiver. 6874 */ 6875 6876 if ( !info->rx_buffer_list[CurrentIndex].status && 6877 info->rx_buffer_list[CurrentIndex].count ) { 6878 spin_lock_irqsave(&info->irq_spinlock,flags); 6879 usc_start_receiver(info); 6880 spin_unlock_irqrestore(&info->irq_spinlock,flags); 6881 } 6882 } 6883 6884 return ReturnCode; 6885 6886} /* end of mgsl_get_raw_rx_frame() */ 6887 6888/* mgsl_load_tx_dma_buffer() 6889 * 6890 * Load the transmit DMA buffer with the specified data. 6891 * 6892 * Arguments: 6893 * 6894 * info pointer to device extension 6895 * Buffer pointer to buffer containing frame to load 6896 * BufferSize size in bytes of frame in Buffer 6897 * 6898 * Return Value: None 6899 */ 6900static void mgsl_load_tx_dma_buffer(struct mgsl_struct *info, 6901 const char *Buffer, unsigned int BufferSize) 6902{ 6903 unsigned short Copycount; 6904 unsigned int i = 0; 6905 DMABUFFERENTRY *pBufEntry; 6906 6907 if ( debug_level >= DEBUG_LEVEL_DATA ) 6908 mgsl_trace_block(info,Buffer, min_t(int, BufferSize, DMABUFFERSIZE), 1); 6909 6910 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) { 6911 /* set CMR:13 to start transmit when 6912 * next GoAhead (abort) is received 6913 */ 6914 info->cmr_value |= BIT13; 6915 } 6916 6917 /* begin loading the frame in the next available tx dma 6918 * buffer, remember it's starting location for setting 6919 * up tx dma operation 6920 */ 6921 i = info->current_tx_buffer; 6922 info->start_tx_dma_buffer = i; 6923 6924 /* Setup the status and RCC (Frame Size) fields of the 1st */ 6925 /* buffer entry in the transmit DMA buffer list. */ 6926 6927 info->tx_buffer_list[i].status = info->cmr_value & 0xf000; 6928 info->tx_buffer_list[i].rcc = BufferSize; 6929 info->tx_buffer_list[i].count = BufferSize; 6930 6931 /* Copy frame data from 1st source buffer to the DMA buffers. */ 6932 /* The frame data may span multiple DMA buffers. */ 6933 6934 while( BufferSize ){ 6935 /* Get a pointer to next DMA buffer entry. */ 6936 pBufEntry = &info->tx_buffer_list[i++]; 6937 6938 if ( i == info->tx_buffer_count ) 6939 i=0; 6940 6941 /* Calculate the number of bytes that can be copied from */ 6942 /* the source buffer to this DMA buffer. */ 6943 if ( BufferSize > DMABUFFERSIZE ) 6944 Copycount = DMABUFFERSIZE; 6945 else 6946 Copycount = BufferSize; 6947 6948 /* Actually copy data from source buffer to DMA buffer. */ 6949 /* Also set the data count for this individual DMA buffer. */ 6950 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 6951 mgsl_load_pci_memory(pBufEntry->virt_addr, Buffer,Copycount); 6952 else 6953 memcpy(pBufEntry->virt_addr, Buffer, Copycount); 6954 6955 pBufEntry->count = Copycount; 6956 6957 /* Advance source pointer and reduce remaining data count. */ 6958 Buffer += Copycount; 6959 BufferSize -= Copycount; 6960 6961 ++info->tx_dma_buffers_used; 6962 } 6963 6964 /* remember next available tx dma buffer */ 6965 info->current_tx_buffer = i; 6966 6967} /* end of mgsl_load_tx_dma_buffer() */ 6968 6969/* 6970 * mgsl_register_test() 6971 * 6972 * Performs a register test of the 16C32. 6973 * 6974 * Arguments: info pointer to device instance data 6975 * Return Value: true if test passed, otherwise false 6976 */ 6977static bool mgsl_register_test( struct mgsl_struct *info ) 6978{ 6979 static unsigned short BitPatterns[] = 6980 { 0x0000, 0xffff, 0xaaaa, 0x5555, 0x1234, 0x6969, 0x9696, 0x0f0f }; 6981 static unsigned int Patterncount = ARRAY_SIZE(BitPatterns); 6982 unsigned int i; 6983 bool rc = true; 6984 unsigned long flags; 6985 6986 spin_lock_irqsave(&info->irq_spinlock,flags); 6987 usc_reset(info); 6988 6989 /* Verify the reset state of some registers. */ 6990 6991 if ( (usc_InReg( info, SICR ) != 0) || 6992 (usc_InReg( info, IVR ) != 0) || 6993 (usc_InDmaReg( info, DIVR ) != 0) ){ 6994 rc = false; 6995 } 6996 6997 if ( rc ){ 6998 /* Write bit patterns to various registers but do it out of */ 6999 /* sync, then read back and verify values. */ 7000 7001 for ( i = 0 ; i < Patterncount ; i++ ) { 7002 usc_OutReg( info, TC0R, BitPatterns[i] ); 7003 usc_OutReg( info, TC1R, BitPatterns[(i+1)%Patterncount] ); 7004 usc_OutReg( info, TCLR, BitPatterns[(i+2)%Patterncount] ); 7005 usc_OutReg( info, RCLR, BitPatterns[(i+3)%Patterncount] ); 7006 usc_OutReg( info, RSR, BitPatterns[(i+4)%Patterncount] ); 7007 usc_OutDmaReg( info, TBCR, BitPatterns[(i+5)%Patterncount] ); 7008 7009 if ( (usc_InReg( info, TC0R ) != BitPatterns[i]) || 7010 (usc_InReg( info, TC1R ) != BitPatterns[(i+1)%Patterncount]) || 7011 (usc_InReg( info, TCLR ) != BitPatterns[(i+2)%Patterncount]) || 7012 (usc_InReg( info, RCLR ) != BitPatterns[(i+3)%Patterncount]) || 7013 (usc_InReg( info, RSR ) != BitPatterns[(i+4)%Patterncount]) || 7014 (usc_InDmaReg( info, TBCR ) != BitPatterns[(i+5)%Patterncount]) ){ 7015 rc = false; 7016 break; 7017 } 7018 } 7019 } 7020 7021 usc_reset(info); 7022 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7023 7024 return rc; 7025 7026} /* end of mgsl_register_test() */ 7027 7028/* mgsl_irq_test() Perform interrupt test of the 16C32. 7029 * 7030 * Arguments: info pointer to device instance data 7031 * Return Value: true if test passed, otherwise false 7032 */ 7033static bool mgsl_irq_test( struct mgsl_struct *info ) 7034{ 7035 unsigned long EndTime; 7036 unsigned long flags; 7037 7038 spin_lock_irqsave(&info->irq_spinlock,flags); 7039 usc_reset(info); 7040 7041 /* 7042 * Setup 16C32 to interrupt on TxC pin (14MHz clock) transition. 7043 * The ISR sets irq_occurred to true. 7044 */ 7045 7046 info->irq_occurred = false; 7047 7048 /* Enable INTEN gate for ISA adapter (Port 6, Bit12) */ 7049 /* Enable INTEN (Port 6, Bit12) */ 7050 /* This connects the IRQ request signal to the ISA bus */ 7051 /* on the ISA adapter. This has no effect for the PCI adapter */ 7052 usc_OutReg( info, PCR, (unsigned short)((usc_InReg(info, PCR) | BIT13) & ~BIT12) ); 7053 7054 usc_EnableMasterIrqBit(info); 7055 usc_EnableInterrupts(info, IO_PIN); 7056 usc_ClearIrqPendingBits(info, IO_PIN); 7057 7058 usc_UnlatchIostatusBits(info, MISCSTATUS_TXC_LATCHED); 7059 usc_EnableStatusIrqs(info, SICR_TXC_ACTIVE + SICR_TXC_INACTIVE); 7060 7061 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7062 7063 EndTime=100; 7064 while( EndTime-- && !info->irq_occurred ) { 7065 msleep_interruptible(10); 7066 } 7067 7068 spin_lock_irqsave(&info->irq_spinlock,flags); 7069 usc_reset(info); 7070 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7071 7072 return info->irq_occurred; 7073 7074} /* end of mgsl_irq_test() */ 7075 7076/* mgsl_dma_test() 7077 * 7078 * Perform a DMA test of the 16C32. A small frame is 7079 * transmitted via DMA from a transmit buffer to a receive buffer 7080 * using single buffer DMA mode. 7081 * 7082 * Arguments: info pointer to device instance data 7083 * Return Value: true if test passed, otherwise false 7084 */ 7085static bool mgsl_dma_test( struct mgsl_struct *info ) 7086{ 7087 unsigned short FifoLevel; 7088 unsigned long phys_addr; 7089 unsigned int FrameSize; 7090 unsigned int i; 7091 char *TmpPtr; 7092 bool rc = true; 7093 unsigned short status=0; 7094 unsigned long EndTime; 7095 unsigned long flags; 7096 MGSL_PARAMS tmp_params; 7097 7098 /* save current port options */ 7099 memcpy(&tmp_params,&info->params,sizeof(MGSL_PARAMS)); 7100 /* load default port options */ 7101 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS)); 7102 7103#define TESTFRAMESIZE 40 7104 7105 spin_lock_irqsave(&info->irq_spinlock,flags); 7106 7107 /* setup 16C32 for SDLC DMA transfer mode */ 7108 7109 usc_reset(info); 7110 usc_set_sdlc_mode(info); 7111 usc_enable_loopback(info,1); 7112 7113 /* Reprogram the RDMR so that the 16C32 does NOT clear the count 7114 * field of the buffer entry after fetching buffer address. This 7115 * way we can detect a DMA failure for a DMA read (which should be 7116 * non-destructive to system memory) before we try and write to 7117 * memory (where a failure could corrupt system memory). 7118 */ 7119 7120 /* Receive DMA mode Register (RDMR) 7121 * 7122 * <15..14> 11 DMA mode = Linked List Buffer mode 7123 * <13> 1 RSBinA/L = store Rx status Block in List entry 7124 * <12> 0 1 = Clear count of List Entry after fetching 7125 * <11..10> 00 Address mode = Increment 7126 * <9> 1 Terminate Buffer on RxBound 7127 * <8> 0 Bus Width = 16bits 7128 * <7..0> ? status Bits (write as 0s) 7129 * 7130 * 1110 0010 0000 0000 = 0xe200 7131 */ 7132 7133 usc_OutDmaReg( info, RDMR, 0xe200 ); 7134 7135 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7136 7137 7138 /* SETUP TRANSMIT AND RECEIVE DMA BUFFERS */ 7139 7140 FrameSize = TESTFRAMESIZE; 7141 7142 /* setup 1st transmit buffer entry: */ 7143 /* with frame size and transmit control word */ 7144 7145 info->tx_buffer_list[0].count = FrameSize; 7146 info->tx_buffer_list[0].rcc = FrameSize; 7147 info->tx_buffer_list[0].status = 0x4000; 7148 7149 /* build a transmit frame in 1st transmit DMA buffer */ 7150 7151 TmpPtr = info->tx_buffer_list[0].virt_addr; 7152 for (i = 0; i < FrameSize; i++ ) 7153 *TmpPtr++ = i; 7154 7155 /* setup 1st receive buffer entry: */ 7156 /* clear status, set max receive buffer size */ 7157 7158 info->rx_buffer_list[0].status = 0; 7159 info->rx_buffer_list[0].count = FrameSize + 4; 7160 7161 /* zero out the 1st receive buffer */ 7162 7163 memset( info->rx_buffer_list[0].virt_addr, 0, FrameSize + 4 ); 7164 7165 /* Set count field of next buffer entries to prevent */ 7166 /* 16C32 from using buffers after the 1st one. */ 7167 7168 info->tx_buffer_list[1].count = 0; 7169 info->rx_buffer_list[1].count = 0; 7170 7171 7172 /***************************/ 7173 /* Program 16C32 receiver. */ 7174 /***************************/ 7175 7176 spin_lock_irqsave(&info->irq_spinlock,flags); 7177 7178 /* setup DMA transfers */ 7179 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 7180 7181 /* program 16C32 receiver with physical address of 1st DMA buffer entry */ 7182 phys_addr = info->rx_buffer_list[0].phys_entry; 7183 usc_OutDmaReg( info, NRARL, (unsigned short)phys_addr ); 7184 usc_OutDmaReg( info, NRARU, (unsigned short)(phys_addr >> 16) ); 7185 7186 /* Clear the Rx DMA status bits (read RDMR) and start channel */ 7187 usc_InDmaReg( info, RDMR ); 7188 usc_DmaCmd( info, DmaCmd_InitRxChannel ); 7189 7190 /* Enable Receiver (RMR <1..0> = 10) */ 7191 usc_OutReg( info, RMR, (unsigned short)((usc_InReg(info, RMR) & 0xfffc) | 0x0002) ); 7192 7193 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7194 7195 7196 /*************************************************************/ 7197 /* WAIT FOR RECEIVER TO DMA ALL PARAMETERS FROM BUFFER ENTRY */ 7198 /*************************************************************/ 7199 7200 /* Wait 100ms for interrupt. */ 7201 EndTime = jiffies + msecs_to_jiffies(100); 7202 7203 for(;;) { 7204 if (time_after(jiffies, EndTime)) { 7205 rc = false; 7206 break; 7207 } 7208 7209 spin_lock_irqsave(&info->irq_spinlock,flags); 7210 status = usc_InDmaReg( info, RDMR ); 7211 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7212 7213 if ( !(status & BIT4) && (status & BIT5) ) { 7214 /* INITG (BIT 4) is inactive (no entry read in progress) AND */ 7215 /* BUSY (BIT 5) is active (channel still active). */ 7216 /* This means the buffer entry read has completed. */ 7217 break; 7218 } 7219 } 7220 7221 7222 /******************************/ 7223 /* Program 16C32 transmitter. */ 7224 /******************************/ 7225 7226 spin_lock_irqsave(&info->irq_spinlock,flags); 7227 7228 /* Program the Transmit Character Length Register (TCLR) */ 7229 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */ 7230 7231 usc_OutReg( info, TCLR, (unsigned short)info->tx_buffer_list[0].count ); 7232 usc_RTCmd( info, RTCmd_PurgeTxFifo ); 7233 7234 /* Program the address of the 1st DMA Buffer Entry in linked list */ 7235 7236 phys_addr = info->tx_buffer_list[0].phys_entry; 7237 usc_OutDmaReg( info, NTARL, (unsigned short)phys_addr ); 7238 usc_OutDmaReg( info, NTARU, (unsigned short)(phys_addr >> 16) ); 7239 7240 /* unlatch Tx status bits, and start transmit channel. */ 7241 7242 usc_OutReg( info, TCSR, (unsigned short)(( usc_InReg(info, TCSR) & 0x0f00) | 0xfa) ); 7243 usc_DmaCmd( info, DmaCmd_InitTxChannel ); 7244 7245 /* wait for DMA controller to fill transmit FIFO */ 7246 7247 usc_TCmd( info, TCmd_SelectTicrTxFifostatus ); 7248 7249 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7250 7251 7252 /**********************************/ 7253 /* WAIT FOR TRANSMIT FIFO TO FILL */ 7254 /**********************************/ 7255 7256 /* Wait 100ms */ 7257 EndTime = jiffies + msecs_to_jiffies(100); 7258 7259 for(;;) { 7260 if (time_after(jiffies, EndTime)) { 7261 rc = false; 7262 break; 7263 } 7264 7265 spin_lock_irqsave(&info->irq_spinlock,flags); 7266 FifoLevel = usc_InReg(info, TICR) >> 8; 7267 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7268 7269 if ( FifoLevel < 16 ) 7270 break; 7271 else 7272 if ( FrameSize < 32 ) { 7273 /* This frame is smaller than the entire transmit FIFO */ 7274 /* so wait for the entire frame to be loaded. */ 7275 if ( FifoLevel <= (32 - FrameSize) ) 7276 break; 7277 } 7278 } 7279 7280 7281 if ( rc ) 7282 { 7283 /* Enable 16C32 transmitter. */ 7284 7285 spin_lock_irqsave(&info->irq_spinlock,flags); 7286 7287 /* Transmit mode Register (TMR), <1..0> = 10, Enable Transmitter */ 7288 usc_TCmd( info, TCmd_SendFrame ); 7289 usc_OutReg( info, TMR, (unsigned short)((usc_InReg(info, TMR) & 0xfffc) | 0x0002) ); 7290 7291 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7292 7293 7294 /******************************/ 7295 /* WAIT FOR TRANSMIT COMPLETE */ 7296 /******************************/ 7297 7298 /* Wait 100ms */ 7299 EndTime = jiffies + msecs_to_jiffies(100); 7300 7301 /* While timer not expired wait for transmit complete */ 7302 7303 spin_lock_irqsave(&info->irq_spinlock,flags); 7304 status = usc_InReg( info, TCSR ); 7305 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7306 7307 while ( !(status & (BIT6+BIT5+BIT4+BIT2+BIT1)) ) { 7308 if (time_after(jiffies, EndTime)) { 7309 rc = false; 7310 break; 7311 } 7312 7313 spin_lock_irqsave(&info->irq_spinlock,flags); 7314 status = usc_InReg( info, TCSR ); 7315 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7316 } 7317 } 7318 7319 7320 if ( rc ){ 7321 /* CHECK FOR TRANSMIT ERRORS */ 7322 if ( status & (BIT5 + BIT1) ) 7323 rc = false; 7324 } 7325 7326 if ( rc ) { 7327 /* WAIT FOR RECEIVE COMPLETE */ 7328 7329 /* Wait 100ms */ 7330 EndTime = jiffies + msecs_to_jiffies(100); 7331 7332 /* Wait for 16C32 to write receive status to buffer entry. */ 7333 status=info->rx_buffer_list[0].status; 7334 while ( status == 0 ) { 7335 if (time_after(jiffies, EndTime)) { 7336 rc = false; 7337 break; 7338 } 7339 status=info->rx_buffer_list[0].status; 7340 } 7341 } 7342 7343 7344 if ( rc ) { 7345 /* CHECK FOR RECEIVE ERRORS */ 7346 status = info->rx_buffer_list[0].status; 7347 7348 if ( status & (BIT8 + BIT3 + BIT1) ) { 7349 /* receive error has occurred */ 7350 rc = false; 7351 } else { 7352 if ( memcmp( info->tx_buffer_list[0].virt_addr , 7353 info->rx_buffer_list[0].virt_addr, FrameSize ) ){ 7354 rc = false; 7355 } 7356 } 7357 } 7358 7359 spin_lock_irqsave(&info->irq_spinlock,flags); 7360 usc_reset( info ); 7361 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7362 7363 /* restore current port options */ 7364 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS)); 7365 7366 return rc; 7367 7368} /* end of mgsl_dma_test() */ 7369 7370/* mgsl_adapter_test() 7371 * 7372 * Perform the register, IRQ, and DMA tests for the 16C32. 7373 * 7374 * Arguments: info pointer to device instance data 7375 * Return Value: 0 if success, otherwise -ENODEV 7376 */ 7377static int mgsl_adapter_test( struct mgsl_struct *info ) 7378{ 7379 if ( debug_level >= DEBUG_LEVEL_INFO ) 7380 printk( "%s(%d):Testing device %s\n", 7381 __FILE__,__LINE__,info->device_name ); 7382 7383 if ( !mgsl_register_test( info ) ) { 7384 info->init_error = DiagStatus_AddressFailure; 7385 printk( "%s(%d):Register test failure for device %s Addr=%04X\n", 7386 __FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) ); 7387 return -ENODEV; 7388 } 7389 7390 if ( !mgsl_irq_test( info ) ) { 7391 info->init_error = DiagStatus_IrqFailure; 7392 printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n", 7393 __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) ); 7394 return -ENODEV; 7395 } 7396 7397 if ( !mgsl_dma_test( info ) ) { 7398 info->init_error = DiagStatus_DmaFailure; 7399 printk( "%s(%d):DMA test failure for device %s DMA=%d\n", 7400 __FILE__,__LINE__,info->device_name, (unsigned short)(info->dma_level) ); 7401 return -ENODEV; 7402 } 7403 7404 if ( debug_level >= DEBUG_LEVEL_INFO ) 7405 printk( "%s(%d):device %s passed diagnostics\n", 7406 __FILE__,__LINE__,info->device_name ); 7407 7408 return 0; 7409 7410} /* end of mgsl_adapter_test() */ 7411 7412/* mgsl_memory_test() 7413 * 7414 * Test the shared memory on a PCI adapter. 7415 * 7416 * Arguments: info pointer to device instance data 7417 * Return Value: true if test passed, otherwise false 7418 */ 7419static bool mgsl_memory_test( struct mgsl_struct *info ) 7420{ 7421 static unsigned long BitPatterns[] = 7422 { 0x0, 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999, 0xffffffff, 0x12345678 }; 7423 unsigned long Patterncount = ARRAY_SIZE(BitPatterns); 7424 unsigned long i; 7425 unsigned long TestLimit = SHARED_MEM_ADDRESS_SIZE/sizeof(unsigned long); 7426 unsigned long * TestAddr; 7427 7428 if ( info->bus_type != MGSL_BUS_TYPE_PCI ) 7429 return true; 7430 7431 TestAddr = (unsigned long *)info->memory_base; 7432 7433 /* Test data lines with test pattern at one location. */ 7434 7435 for ( i = 0 ; i < Patterncount ; i++ ) { 7436 *TestAddr = BitPatterns[i]; 7437 if ( *TestAddr != BitPatterns[i] ) 7438 return false; 7439 } 7440 7441 /* Test address lines with incrementing pattern over */ 7442 /* entire address range. */ 7443 7444 for ( i = 0 ; i < TestLimit ; i++ ) { 7445 *TestAddr = i * 4; 7446 TestAddr++; 7447 } 7448 7449 TestAddr = (unsigned long *)info->memory_base; 7450 7451 for ( i = 0 ; i < TestLimit ; i++ ) { 7452 if ( *TestAddr != i * 4 ) 7453 return false; 7454 TestAddr++; 7455 } 7456 7457 memset( info->memory_base, 0, SHARED_MEM_ADDRESS_SIZE ); 7458 7459 return true; 7460 7461} /* End Of mgsl_memory_test() */ 7462 7463 7464/* mgsl_load_pci_memory() 7465 * 7466 * Load a large block of data into the PCI shared memory. 7467 * Use this instead of memcpy() or memmove() to move data 7468 * into the PCI shared memory. 7469 * 7470 * Notes: 7471 * 7472 * This function prevents the PCI9050 interface chip from hogging 7473 * the adapter local bus, which can starve the 16C32 by preventing 7474 * 16C32 bus master cycles. 7475 * 7476 * The PCI9050 documentation says that the 9050 will always release 7477 * control of the local bus after completing the current read 7478 * or write operation. 7479 * 7480 * It appears that as long as the PCI9050 write FIFO is full, the 7481 * PCI9050 treats all of the writes as a single burst transaction 7482 * and will not release the bus. This causes DMA latency problems 7483 * at high speeds when copying large data blocks to the shared 7484 * memory. 7485 * 7486 * This function in effect, breaks the a large shared memory write 7487 * into multiple transations by interleaving a shared memory read 7488 * which will flush the write FIFO and 'complete' the write 7489 * transation. This allows any pending DMA request to gain control 7490 * of the local bus in a timely fasion. 7491 * 7492 * Arguments: 7493 * 7494 * TargetPtr pointer to target address in PCI shared memory 7495 * SourcePtr pointer to source buffer for data 7496 * count count in bytes of data to copy 7497 * 7498 * Return Value: None 7499 */ 7500static void mgsl_load_pci_memory( char* TargetPtr, const char* SourcePtr, 7501 unsigned short count ) 7502{ 7503 /* 16 32-bit writes @ 60ns each = 960ns max latency on local bus */ 7504#define PCI_LOAD_INTERVAL 64 7505 7506 unsigned short Intervalcount = count / PCI_LOAD_INTERVAL; 7507 unsigned short Index; 7508 unsigned long Dummy; 7509 7510 for ( Index = 0 ; Index < Intervalcount ; Index++ ) 7511 { 7512 memcpy(TargetPtr, SourcePtr, PCI_LOAD_INTERVAL); 7513 Dummy = *((volatile unsigned long *)TargetPtr); 7514 TargetPtr += PCI_LOAD_INTERVAL; 7515 SourcePtr += PCI_LOAD_INTERVAL; 7516 } 7517 7518 memcpy( TargetPtr, SourcePtr, count % PCI_LOAD_INTERVAL ); 7519 7520} /* End Of mgsl_load_pci_memory() */ 7521 7522static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit) 7523{ 7524 int i; 7525 int linecount; 7526 if (xmit) 7527 printk("%s tx data:\n",info->device_name); 7528 else 7529 printk("%s rx data:\n",info->device_name); 7530 7531 while(count) { 7532 if (count > 16) 7533 linecount = 16; 7534 else 7535 linecount = count; 7536 7537 for(i=0;i<linecount;i++) 7538 printk("%02X ",(unsigned char)data[i]); 7539 for(;i<17;i++) 7540 printk(" "); 7541 for(i=0;i<linecount;i++) { 7542 if (data[i]>=040 && data[i]<=0176) 7543 printk("%c",data[i]); 7544 else 7545 printk("."); 7546 } 7547 printk("\n"); 7548 7549 data += linecount; 7550 count -= linecount; 7551 } 7552} /* end of mgsl_trace_block() */ 7553 7554/* mgsl_tx_timeout() 7555 * 7556 * called when HDLC frame times out 7557 * update stats and do tx completion processing 7558 * 7559 * Arguments: context pointer to device instance data 7560 * Return Value: None 7561 */ 7562static void mgsl_tx_timeout(unsigned long context) 7563{ 7564 struct mgsl_struct *info = (struct mgsl_struct*)context; 7565 unsigned long flags; 7566 7567 if ( debug_level >= DEBUG_LEVEL_INFO ) 7568 printk( "%s(%d):mgsl_tx_timeout(%s)\n", 7569 __FILE__,__LINE__,info->device_name); 7570 if(info->tx_active && 7571 (info->params.mode == MGSL_MODE_HDLC || 7572 info->params.mode == MGSL_MODE_RAW) ) { 7573 info->icount.txtimeout++; 7574 } 7575 spin_lock_irqsave(&info->irq_spinlock,flags); 7576 info->tx_active = false; 7577 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; 7578 7579 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE ) 7580 usc_loopmode_cancel_transmit( info ); 7581 7582 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7583 7584#if SYNCLINK_GENERIC_HDLC 7585 if (info->netcount) 7586 hdlcdev_tx_done(info); 7587 else 7588#endif 7589 mgsl_bh_transmit(info); 7590 7591} /* end of mgsl_tx_timeout() */ 7592 7593/* signal that there are no more frames to send, so that 7594 * line is 'released' by echoing RxD to TxD when current 7595 * transmission is complete (or immediately if no tx in progress). 7596 */ 7597static int mgsl_loopmode_send_done( struct mgsl_struct * info ) 7598{ 7599 unsigned long flags; 7600 7601 spin_lock_irqsave(&info->irq_spinlock,flags); 7602 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) { 7603 if (info->tx_active) 7604 info->loopmode_send_done_requested = true; 7605 else 7606 usc_loopmode_send_done(info); 7607 } 7608 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7609 7610 return 0; 7611} 7612 7613/* release the line by echoing RxD to TxD 7614 * upon completion of a transmit frame 7615 */ 7616static void usc_loopmode_send_done( struct mgsl_struct * info ) 7617{ 7618 info->loopmode_send_done_requested = false; 7619 /* clear CMR:13 to 0 to start echoing RxData to TxData */ 7620 info->cmr_value &= ~BIT13; 7621 usc_OutReg(info, CMR, info->cmr_value); 7622} 7623 7624/* abort a transmit in progress while in HDLC LoopMode 7625 */ 7626static void usc_loopmode_cancel_transmit( struct mgsl_struct * info ) 7627{ 7628 /* reset tx dma channel and purge TxFifo */ 7629 usc_RTCmd( info, RTCmd_PurgeTxFifo ); 7630 usc_DmaCmd( info, DmaCmd_ResetTxChannel ); 7631 usc_loopmode_send_done( info ); 7632} 7633 7634/* for HDLC/SDLC LoopMode, setting CMR:13 after the transmitter is enabled 7635 * is an Insert Into Loop action. Upon receipt of a GoAhead sequence (RxAbort) 7636 * we must clear CMR:13 to begin repeating TxData to RxData 7637 */ 7638static void usc_loopmode_insert_request( struct mgsl_struct * info ) 7639{ 7640 info->loopmode_insert_requested = true; 7641 7642 /* enable RxAbort irq. On next RxAbort, clear CMR:13 to 7643 * begin repeating TxData on RxData (complete insertion) 7644 */ 7645 usc_OutReg( info, RICR, 7646 (usc_InReg( info, RICR ) | RXSTATUS_ABORT_RECEIVED ) ); 7647 7648 /* set CMR:13 to insert into loop on next GoAhead (RxAbort) */ 7649 info->cmr_value |= BIT13; 7650 usc_OutReg(info, CMR, info->cmr_value); 7651} 7652 7653/* return 1 if station is inserted into the loop, otherwise 0 7654 */ 7655static int usc_loopmode_active( struct mgsl_struct * info) 7656{ 7657 return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ; 7658} 7659 7660#if SYNCLINK_GENERIC_HDLC 7661 7662/** 7663 * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.) 7664 * set encoding and frame check sequence (FCS) options 7665 * 7666 * dev pointer to network device structure 7667 * encoding serial encoding setting 7668 * parity FCS setting 7669 * 7670 * returns 0 if success, otherwise error code 7671 */ 7672static int hdlcdev_attach(struct net_device *dev, unsigned short encoding, 7673 unsigned short parity) 7674{ 7675 struct mgsl_struct *info = dev_to_port(dev); 7676 unsigned char new_encoding; 7677 unsigned short new_crctype; 7678 7679 /* return error if TTY interface open */ 7680 if (info->port.count) 7681 return -EBUSY; 7682 7683 switch (encoding) 7684 { 7685 case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break; 7686 case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break; 7687 case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break; 7688 case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break; 7689 case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break; 7690 default: return -EINVAL; 7691 } 7692 7693 switch (parity) 7694 { 7695 case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break; 7696 case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break; 7697 case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break; 7698 default: return -EINVAL; 7699 } 7700 7701 info->params.encoding = new_encoding; 7702 info->params.crc_type = new_crctype; 7703 7704 /* if network interface up, reprogram hardware */ 7705 if (info->netcount) 7706 mgsl_program_hw(info); 7707 7708 return 0; 7709} 7710 7711/** 7712 * called by generic HDLC layer to send frame 7713 * 7714 * skb socket buffer containing HDLC frame 7715 * dev pointer to network device structure 7716 * 7717 * returns 0 if success, otherwise error code 7718 */ 7719static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev) 7720{ 7721 struct mgsl_struct *info = dev_to_port(dev); 7722 unsigned long flags; 7723 7724 if (debug_level >= DEBUG_LEVEL_INFO) 7725 printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name); 7726 7727 /* stop sending until this frame completes */ 7728 netif_stop_queue(dev); 7729 7730 /* copy data to device buffers */ 7731 info->xmit_cnt = skb->len; 7732 mgsl_load_tx_dma_buffer(info, skb->data, skb->len); 7733 7734 /* update network statistics */ 7735 dev->stats.tx_packets++; 7736 dev->stats.tx_bytes += skb->len; 7737 7738 /* done with socket buffer, so free it */ 7739 dev_kfree_skb(skb); 7740 7741 /* save start time for transmit timeout detection */ 7742 dev->trans_start = jiffies; 7743 7744 /* start hardware transmitter if necessary */ 7745 spin_lock_irqsave(&info->irq_spinlock,flags); 7746 if (!info->tx_active) 7747 usc_start_transmitter(info); 7748 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7749 7750 return 0; 7751} 7752 7753/** 7754 * called by network layer when interface enabled 7755 * claim resources and initialize hardware 7756 * 7757 * dev pointer to network device structure 7758 * 7759 * returns 0 if success, otherwise error code 7760 */ 7761static int hdlcdev_open(struct net_device *dev) 7762{ 7763 struct mgsl_struct *info = dev_to_port(dev); 7764 int rc; 7765 unsigned long flags; 7766 7767 if (debug_level >= DEBUG_LEVEL_INFO) 7768 printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name); 7769 7770 /* generic HDLC layer open processing */ 7771 if ((rc = hdlc_open(dev))) 7772 return rc; 7773 7774 /* arbitrate between network and tty opens */ 7775 spin_lock_irqsave(&info->netlock, flags); 7776 if (info->port.count != 0 || info->netcount != 0) { 7777 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name); 7778 spin_unlock_irqrestore(&info->netlock, flags); 7779 return -EBUSY; 7780 } 7781 info->netcount=1; 7782 spin_unlock_irqrestore(&info->netlock, flags); 7783 7784 /* claim resources and init adapter */ 7785 if ((rc = startup(info)) != 0) { 7786 spin_lock_irqsave(&info->netlock, flags); 7787 info->netcount=0; 7788 spin_unlock_irqrestore(&info->netlock, flags); 7789 return rc; 7790 } 7791 7792 /* assert DTR and RTS, apply hardware settings */ 7793 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; 7794 mgsl_program_hw(info); 7795 7796 /* enable network layer transmit */ 7797 dev->trans_start = jiffies; 7798 netif_start_queue(dev); 7799 7800 /* inform generic HDLC layer of current DCD status */ 7801 spin_lock_irqsave(&info->irq_spinlock, flags); 7802 usc_get_serial_signals(info); 7803 spin_unlock_irqrestore(&info->irq_spinlock, flags); 7804 if (info->serial_signals & SerialSignal_DCD) 7805 netif_carrier_on(dev); 7806 else 7807 netif_carrier_off(dev); 7808 return 0; 7809} 7810 7811/** 7812 * called by network layer when interface is disabled 7813 * shutdown hardware and release resources 7814 * 7815 * dev pointer to network device structure 7816 * 7817 * returns 0 if success, otherwise error code 7818 */ 7819static int hdlcdev_close(struct net_device *dev) 7820{ 7821 struct mgsl_struct *info = dev_to_port(dev); 7822 unsigned long flags; 7823 7824 if (debug_level >= DEBUG_LEVEL_INFO) 7825 printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name); 7826 7827 netif_stop_queue(dev); 7828 7829 /* shutdown adapter and release resources */ 7830 shutdown(info); 7831 7832 hdlc_close(dev); 7833 7834 spin_lock_irqsave(&info->netlock, flags); 7835 info->netcount=0; 7836 spin_unlock_irqrestore(&info->netlock, flags); 7837 7838 return 0; 7839} 7840 7841/** 7842 * called by network layer to process IOCTL call to network device 7843 * 7844 * dev pointer to network device structure 7845 * ifr pointer to network interface request structure 7846 * cmd IOCTL command code 7847 * 7848 * returns 0 if success, otherwise error code 7849 */ 7850static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 7851{ 7852 const size_t size = sizeof(sync_serial_settings); 7853 sync_serial_settings new_line; 7854 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync; 7855 struct mgsl_struct *info = dev_to_port(dev); 7856 unsigned int flags; 7857 7858 if (debug_level >= DEBUG_LEVEL_INFO) 7859 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name); 7860 7861 /* return error if TTY interface open */ 7862 if (info->port.count) 7863 return -EBUSY; 7864 7865 if (cmd != SIOCWANDEV) 7866 return hdlc_ioctl(dev, ifr, cmd); 7867 7868 switch(ifr->ifr_settings.type) { 7869 case IF_GET_IFACE: /* return current sync_serial_settings */ 7870 7871 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL; 7872 if (ifr->ifr_settings.size < size) { 7873 ifr->ifr_settings.size = size; /* data size wanted */ 7874 return -ENOBUFS; 7875 } 7876 7877 flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | 7878 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | 7879 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | 7880 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); 7881 7882 switch (flags){ 7883 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break; 7884 case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break; 7885 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break; 7886 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break; 7887 default: new_line.clock_type = CLOCK_DEFAULT; 7888 } 7889 7890 new_line.clock_rate = info->params.clock_speed; 7891 new_line.loopback = info->params.loopback ? 1:0; 7892 7893 if (copy_to_user(line, &new_line, size)) 7894 return -EFAULT; 7895 return 0; 7896 7897 case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */ 7898 7899 if(!capable(CAP_NET_ADMIN)) 7900 return -EPERM; 7901 if (copy_from_user(&new_line, line, size)) 7902 return -EFAULT; 7903 7904 switch (new_line.clock_type) 7905 { 7906 case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break; 7907 case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break; 7908 case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break; 7909 case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break; 7910 case CLOCK_DEFAULT: flags = info->params.flags & 7911 (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | 7912 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | 7913 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | 7914 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break; 7915 default: return -EINVAL; 7916 } 7917 7918 if (new_line.loopback != 0 && new_line.loopback != 1) 7919 return -EINVAL; 7920 7921 info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | 7922 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | 7923 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | 7924 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); 7925 info->params.flags |= flags; 7926 7927 info->params.loopback = new_line.loopback; 7928 7929 if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG)) 7930 info->params.clock_speed = new_line.clock_rate; 7931 else 7932 info->params.clock_speed = 0; 7933 7934 /* if network interface up, reprogram hardware */ 7935 if (info->netcount) 7936 mgsl_program_hw(info); 7937 return 0; 7938 7939 default: 7940 return hdlc_ioctl(dev, ifr, cmd); 7941 } 7942} 7943 7944/** 7945 * called by network layer when transmit timeout is detected 7946 * 7947 * dev pointer to network device structure 7948 */ 7949static void hdlcdev_tx_timeout(struct net_device *dev) 7950{ 7951 struct mgsl_struct *info = dev_to_port(dev); 7952 unsigned long flags; 7953 7954 if (debug_level >= DEBUG_LEVEL_INFO) 7955 printk("hdlcdev_tx_timeout(%s)\n",dev->name); 7956 7957 dev->stats.tx_errors++; 7958 dev->stats.tx_aborted_errors++; 7959 7960 spin_lock_irqsave(&info->irq_spinlock,flags); 7961 usc_stop_transmitter(info); 7962 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7963 7964 netif_wake_queue(dev); 7965} 7966 7967/** 7968 * called by device driver when transmit completes 7969 * reenable network layer transmit if stopped 7970 * 7971 * info pointer to device instance information 7972 */ 7973static void hdlcdev_tx_done(struct mgsl_struct *info) 7974{ 7975 if (netif_queue_stopped(info->netdev)) 7976 netif_wake_queue(info->netdev); 7977} 7978 7979/** 7980 * called by device driver when frame received 7981 * pass frame to network layer 7982 * 7983 * info pointer to device instance information 7984 * buf pointer to buffer contianing frame data 7985 * size count of data bytes in buf 7986 */ 7987static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size) 7988{ 7989 struct sk_buff *skb = dev_alloc_skb(size); 7990 struct net_device *dev = info->netdev; 7991 7992 if (debug_level >= DEBUG_LEVEL_INFO) 7993 printk("hdlcdev_rx(%s)\n", dev->name); 7994 7995 if (skb == NULL) { 7996 printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", 7997 dev->name); 7998 dev->stats.rx_dropped++; 7999 return; 8000 } 8001 8002 memcpy(skb_put(skb, size), buf, size); 8003 8004 skb->protocol = hdlc_type_trans(skb, dev); 8005 8006 dev->stats.rx_packets++; 8007 dev->stats.rx_bytes += size; 8008 8009 netif_rx(skb); 8010 8011 dev->last_rx = jiffies; 8012} 8013 8014/** 8015 * called by device driver when adding device instance 8016 * do generic HDLC initialization 8017 * 8018 * info pointer to device instance information 8019 * 8020 * returns 0 if success, otherwise error code 8021 */ 8022static int hdlcdev_init(struct mgsl_struct *info) 8023{ 8024 int rc; 8025 struct net_device *dev; 8026 hdlc_device *hdlc; 8027 8028 /* allocate and initialize network and HDLC layer objects */ 8029 8030 if (!(dev = alloc_hdlcdev(info))) { 8031 printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__); 8032 return -ENOMEM; 8033 } 8034 8035 /* for network layer reporting purposes only */ 8036 dev->base_addr = info->io_base; 8037 dev->irq = info->irq_level; 8038 dev->dma = info->dma_level; 8039 8040 /* network layer callbacks and settings */ 8041 dev->do_ioctl = hdlcdev_ioctl; 8042 dev->open = hdlcdev_open; 8043 dev->stop = hdlcdev_close; 8044 dev->tx_timeout = hdlcdev_tx_timeout; 8045 dev->watchdog_timeo = 10*HZ; 8046 dev->tx_queue_len = 50; 8047 8048 /* generic HDLC layer callbacks and settings */ 8049 hdlc = dev_to_hdlc(dev); 8050 hdlc->attach = hdlcdev_attach; 8051 hdlc->xmit = hdlcdev_xmit; 8052 8053 /* register objects with HDLC layer */ 8054 if ((rc = register_hdlc_device(dev))) { 8055 printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__); 8056 free_netdev(dev); 8057 return rc; 8058 } 8059 8060 info->netdev = dev; 8061 return 0; 8062} 8063 8064/** 8065 * called by device driver when removing device instance 8066 * do generic HDLC cleanup 8067 * 8068 * info pointer to device instance information 8069 */ 8070static void hdlcdev_exit(struct mgsl_struct *info) 8071{ 8072 unregister_hdlc_device(info->netdev); 8073 free_netdev(info->netdev); 8074 info->netdev = NULL; 8075} 8076 8077#endif /* CONFIG_HDLC */ 8078 8079 8080static int __devinit synclink_init_one (struct pci_dev *dev, 8081 const struct pci_device_id *ent) 8082{ 8083 struct mgsl_struct *info; 8084 8085 if (pci_enable_device(dev)) { 8086 printk("error enabling pci device %p\n", dev); 8087 return -EIO; 8088 } 8089 8090 if (!(info = mgsl_allocate_device())) { 8091 printk("can't allocate device instance data.\n"); 8092 return -EIO; 8093 } 8094 8095 /* Copy user configuration info to device instance data */ 8096 8097 info->io_base = pci_resource_start(dev, 2); 8098 info->irq_level = dev->irq; 8099 info->phys_memory_base = pci_resource_start(dev, 3); 8100 8101 /* Because veremap only works on page boundaries we must map 8102 * a larger area than is actually implemented for the LCR 8103 * memory range. We map a full page starting at the page boundary. 8104 */ 8105 info->phys_lcr_base = pci_resource_start(dev, 0); 8106 info->lcr_offset = info->phys_lcr_base & (PAGE_SIZE-1); 8107 info->phys_lcr_base &= ~(PAGE_SIZE-1); 8108 8109 info->bus_type = MGSL_BUS_TYPE_PCI; 8110 info->io_addr_size = 8; 8111 info->irq_flags = IRQF_SHARED; 8112 8113 if (dev->device == 0x0210) { 8114 /* Version 1 PCI9030 based universal PCI adapter */ 8115 info->misc_ctrl_value = 0x007c4080; 8116 info->hw_version = 1; 8117 } else { 8118 /* Version 0 PCI9050 based 5V PCI adapter 8119 * A PCI9050 bug prevents reading LCR registers if 8120 * LCR base address bit 7 is set. Maintain shadow 8121 * value so we can write to LCR misc control reg. 8122 */ 8123 info->misc_ctrl_value = 0x087e4546; 8124 info->hw_version = 0; 8125 } 8126 8127 mgsl_add_device(info); 8128 8129 return 0; 8130} 8131 8132static void __devexit synclink_remove_one (struct pci_dev *dev) 8133{ 8134} 8135