Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.8-rc7 8123 lines 235 kB view raw
1/* 2 * $Id: synclink.c,v 4.38 2005/11/07 16:30:34 paulkf Exp $ 3 * 4 * Device driver for Microgate SyncLink ISA and PCI 5 * high speed multiprotocol serial adapters. 6 * 7 * written by Paul Fulghum for Microgate Corporation 8 * paulkf@microgate.com 9 * 10 * Microgate and SyncLink are trademarks of Microgate Corporation 11 * 12 * Derived from serial.c written by Theodore Ts'o and Linus Torvalds 13 * 14 * Original release 01/11/99 15 * 16 * This code is released under the GNU General Public License (GPL) 17 * 18 * This driver is primarily intended for use in synchronous 19 * HDLC mode. Asynchronous mode is also provided. 20 * 21 * When operating in synchronous mode, each call to mgsl_write() 22 * contains exactly one complete HDLC frame. Calling mgsl_put_char 23 * will start assembling an HDLC frame that will not be sent until 24 * mgsl_flush_chars or mgsl_write is called. 25 * 26 * Synchronous receive data is reported as complete frames. To accomplish 27 * this, the TTY flip buffer is bypassed (too small to hold largest 28 * frame and may fragment frames) and the line discipline 29 * receive entry point is called directly. 30 * 31 * This driver has been tested with a slightly modified ppp.c driver 32 * for synchronous PPP. 33 * 34 * 2000/02/16 35 * Added interface for syncppp.c driver (an alternate synchronous PPP 36 * implementation that also supports Cisco HDLC). Each device instance 37 * registers as a tty device AND a network device (if dosyncppp option 38 * is set for the device). The functionality is determined by which 39 * device interface is opened. 40 * 41 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 42 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 43 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 44 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 45 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 46 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 47 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 49 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 50 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 51 * OF THE POSSIBILITY OF SUCH DAMAGE. 52 */ 53 54#if defined(__i386__) 55# define BREAKPOINT() asm(" int $3"); 56#else 57# define BREAKPOINT() { } 58#endif 59 60#define MAX_ISA_DEVICES 10 61#define MAX_PCI_DEVICES 10 62#define MAX_TOTAL_DEVICES 20 63 64#include <linux/module.h> 65#include <linux/errno.h> 66#include <linux/signal.h> 67#include <linux/sched.h> 68#include <linux/timer.h> 69#include <linux/interrupt.h> 70#include <linux/pci.h> 71#include <linux/tty.h> 72#include <linux/tty_flip.h> 73#include <linux/serial.h> 74#include <linux/major.h> 75#include <linux/string.h> 76#include <linux/fcntl.h> 77#include <linux/ptrace.h> 78#include <linux/ioport.h> 79#include <linux/mm.h> 80#include <linux/seq_file.h> 81#include <linux/slab.h> 82#include <linux/delay.h> 83#include <linux/netdevice.h> 84#include <linux/vmalloc.h> 85#include <linux/init.h> 86#include <linux/ioctl.h> 87#include <linux/synclink.h> 88 89#include <asm/io.h> 90#include <asm/irq.h> 91#include <asm/dma.h> 92#include <linux/bitops.h> 93#include <asm/types.h> 94#include <linux/termios.h> 95#include <linux/workqueue.h> 96#include <linux/hdlc.h> 97#include <linux/dma-mapping.h> 98 99#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_MODULE)) 100#define SYNCLINK_GENERIC_HDLC 1 101#else 102#define SYNCLINK_GENERIC_HDLC 0 103#endif 104 105#define GET_USER(error,value,addr) error = get_user(value,addr) 106#define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0 107#define PUT_USER(error,value,addr) error = put_user(value,addr) 108#define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0 109 110#include <asm/uaccess.h> 111 112#define RCLRVALUE 0xffff 113 114static MGSL_PARAMS default_params = { 115 MGSL_MODE_HDLC, /* unsigned long mode */ 116 0, /* unsigned char loopback; */ 117 HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */ 118 HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */ 119 0, /* unsigned long clock_speed; */ 120 0xff, /* unsigned char addr_filter; */ 121 HDLC_CRC_16_CCITT, /* unsigned short crc_type; */ 122 HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */ 123 HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */ 124 9600, /* unsigned long data_rate; */ 125 8, /* unsigned char data_bits; */ 126 1, /* unsigned char stop_bits; */ 127 ASYNC_PARITY_NONE /* unsigned char parity; */ 128}; 129 130#define SHARED_MEM_ADDRESS_SIZE 0x40000 131#define BUFFERLISTSIZE 4096 132#define DMABUFFERSIZE 4096 133#define MAXRXFRAMES 7 134 135typedef struct _DMABUFFERENTRY 136{ 137 u32 phys_addr; /* 32-bit flat physical address of data buffer */ 138 volatile u16 count; /* buffer size/data count */ 139 volatile u16 status; /* Control/status field */ 140 volatile u16 rcc; /* character count field */ 141 u16 reserved; /* padding required by 16C32 */ 142 u32 link; /* 32-bit flat link to next buffer entry */ 143 char *virt_addr; /* virtual address of data buffer */ 144 u32 phys_entry; /* physical address of this buffer entry */ 145 dma_addr_t dma_addr; 146} DMABUFFERENTRY, *DMAPBUFFERENTRY; 147 148/* The queue of BH actions to be performed */ 149 150#define BH_RECEIVE 1 151#define BH_TRANSMIT 2 152#define BH_STATUS 4 153 154#define IO_PIN_SHUTDOWN_LIMIT 100 155 156struct _input_signal_events { 157 int ri_up; 158 int ri_down; 159 int dsr_up; 160 int dsr_down; 161 int dcd_up; 162 int dcd_down; 163 int cts_up; 164 int cts_down; 165}; 166 167/* transmit holding buffer definitions*/ 168#define MAX_TX_HOLDING_BUFFERS 5 169struct tx_holding_buffer { 170 int buffer_size; 171 unsigned char * buffer; 172}; 173 174 175/* 176 * Device instance data structure 177 */ 178 179struct mgsl_struct { 180 int magic; 181 struct tty_port port; 182 int line; 183 int hw_version; 184 185 struct mgsl_icount icount; 186 187 int timeout; 188 int x_char; /* xon/xoff character */ 189 u16 read_status_mask; 190 u16 ignore_status_mask; 191 unsigned char *xmit_buf; 192 int xmit_head; 193 int xmit_tail; 194 int xmit_cnt; 195 196 wait_queue_head_t status_event_wait_q; 197 wait_queue_head_t event_wait_q; 198 struct timer_list tx_timer; /* HDLC transmit timeout timer */ 199 struct mgsl_struct *next_device; /* device list link */ 200 201 spinlock_t irq_spinlock; /* spinlock for synchronizing with ISR */ 202 struct work_struct task; /* task structure for scheduling bh */ 203 204 u32 EventMask; /* event trigger mask */ 205 u32 RecordedEvents; /* pending events */ 206 207 u32 max_frame_size; /* as set by device config */ 208 209 u32 pending_bh; 210 211 bool bh_running; /* Protection from multiple */ 212 int isr_overflow; 213 bool bh_requested; 214 215 int dcd_chkcount; /* check counts to prevent */ 216 int cts_chkcount; /* too many IRQs if a signal */ 217 int dsr_chkcount; /* is floating */ 218 int ri_chkcount; 219 220 char *buffer_list; /* virtual address of Rx & Tx buffer lists */ 221 u32 buffer_list_phys; 222 dma_addr_t buffer_list_dma_addr; 223 224 unsigned int rx_buffer_count; /* count of total allocated Rx buffers */ 225 DMABUFFERENTRY *rx_buffer_list; /* list of receive buffer entries */ 226 unsigned int current_rx_buffer; 227 228 int num_tx_dma_buffers; /* number of tx dma frames required */ 229 int tx_dma_buffers_used; 230 unsigned int tx_buffer_count; /* count of total allocated Tx buffers */ 231 DMABUFFERENTRY *tx_buffer_list; /* list of transmit buffer entries */ 232 int start_tx_dma_buffer; /* tx dma buffer to start tx dma operation */ 233 int current_tx_buffer; /* next tx dma buffer to be loaded */ 234 235 unsigned char *intermediate_rxbuffer; 236 237 int num_tx_holding_buffers; /* number of tx holding buffer allocated */ 238 int get_tx_holding_index; /* next tx holding buffer for adapter to load */ 239 int put_tx_holding_index; /* next tx holding buffer to store user request */ 240 int tx_holding_count; /* number of tx holding buffers waiting */ 241 struct tx_holding_buffer tx_holding_buffers[MAX_TX_HOLDING_BUFFERS]; 242 243 bool rx_enabled; 244 bool rx_overflow; 245 bool rx_rcc_underrun; 246 247 bool tx_enabled; 248 bool tx_active; 249 u32 idle_mode; 250 251 u16 cmr_value; 252 u16 tcsr_value; 253 254 char device_name[25]; /* device instance name */ 255 256 unsigned int bus_type; /* expansion bus type (ISA,EISA,PCI) */ 257 unsigned char bus; /* expansion bus number (zero based) */ 258 unsigned char function; /* PCI device number */ 259 260 unsigned int io_base; /* base I/O address of adapter */ 261 unsigned int io_addr_size; /* size of the I/O address range */ 262 bool io_addr_requested; /* true if I/O address requested */ 263 264 unsigned int irq_level; /* interrupt level */ 265 unsigned long irq_flags; 266 bool irq_requested; /* true if IRQ requested */ 267 268 unsigned int dma_level; /* DMA channel */ 269 bool dma_requested; /* true if dma channel requested */ 270 271 u16 mbre_bit; 272 u16 loopback_bits; 273 u16 usc_idle_mode; 274 275 MGSL_PARAMS params; /* communications parameters */ 276 277 unsigned char serial_signals; /* current serial signal states */ 278 279 bool irq_occurred; /* for diagnostics use */ 280 unsigned int init_error; /* Initialization startup error (DIAGS) */ 281 int fDiagnosticsmode; /* Driver in Diagnostic mode? (DIAGS) */ 282 283 u32 last_mem_alloc; 284 unsigned char* memory_base; /* shared memory address (PCI only) */ 285 u32 phys_memory_base; 286 bool shared_mem_requested; 287 288 unsigned char* lcr_base; /* local config registers (PCI only) */ 289 u32 phys_lcr_base; 290 u32 lcr_offset; 291 bool lcr_mem_requested; 292 293 u32 misc_ctrl_value; 294 char flag_buf[MAX_ASYNC_BUFFER_SIZE]; 295 char char_buf[MAX_ASYNC_BUFFER_SIZE]; 296 bool drop_rts_on_tx_done; 297 298 bool loopmode_insert_requested; 299 bool loopmode_send_done_requested; 300 301 struct _input_signal_events input_signal_events; 302 303 /* generic HDLC device parts */ 304 int netcount; 305 spinlock_t netlock; 306 307#if SYNCLINK_GENERIC_HDLC 308 struct net_device *netdev; 309#endif 310}; 311 312#define MGSL_MAGIC 0x5401 313 314/* 315 * The size of the serial xmit buffer is 1 page, or 4096 bytes 316 */ 317#ifndef SERIAL_XMIT_SIZE 318#define SERIAL_XMIT_SIZE 4096 319#endif 320 321/* 322 * These macros define the offsets used in calculating the 323 * I/O address of the specified USC registers. 324 */ 325 326 327#define DCPIN 2 /* Bit 1 of I/O address */ 328#define SDPIN 4 /* Bit 2 of I/O address */ 329 330#define DCAR 0 /* DMA command/address register */ 331#define CCAR SDPIN /* channel command/address register */ 332#define DATAREG DCPIN + SDPIN /* serial data register */ 333#define MSBONLY 0x41 334#define LSBONLY 0x40 335 336/* 337 * These macros define the register address (ordinal number) 338 * used for writing address/value pairs to the USC. 339 */ 340 341#define CMR 0x02 /* Channel mode Register */ 342#define CCSR 0x04 /* Channel Command/status Register */ 343#define CCR 0x06 /* Channel Control Register */ 344#define PSR 0x08 /* Port status Register */ 345#define PCR 0x0a /* Port Control Register */ 346#define TMDR 0x0c /* Test mode Data Register */ 347#define TMCR 0x0e /* Test mode Control Register */ 348#define CMCR 0x10 /* Clock mode Control Register */ 349#define HCR 0x12 /* Hardware Configuration Register */ 350#define IVR 0x14 /* Interrupt Vector Register */ 351#define IOCR 0x16 /* Input/Output Control Register */ 352#define ICR 0x18 /* Interrupt Control Register */ 353#define DCCR 0x1a /* Daisy Chain Control Register */ 354#define MISR 0x1c /* Misc Interrupt status Register */ 355#define SICR 0x1e /* status Interrupt Control Register */ 356#define RDR 0x20 /* Receive Data Register */ 357#define RMR 0x22 /* Receive mode Register */ 358#define RCSR 0x24 /* Receive Command/status Register */ 359#define RICR 0x26 /* Receive Interrupt Control Register */ 360#define RSR 0x28 /* Receive Sync Register */ 361#define RCLR 0x2a /* Receive count Limit Register */ 362#define RCCR 0x2c /* Receive Character count Register */ 363#define TC0R 0x2e /* Time Constant 0 Register */ 364#define TDR 0x30 /* Transmit Data Register */ 365#define TMR 0x32 /* Transmit mode Register */ 366#define TCSR 0x34 /* Transmit Command/status Register */ 367#define TICR 0x36 /* Transmit Interrupt Control Register */ 368#define TSR 0x38 /* Transmit Sync Register */ 369#define TCLR 0x3a /* Transmit count Limit Register */ 370#define TCCR 0x3c /* Transmit Character count Register */ 371#define TC1R 0x3e /* Time Constant 1 Register */ 372 373 374/* 375 * MACRO DEFINITIONS FOR DMA REGISTERS 376 */ 377 378#define DCR 0x06 /* DMA Control Register (shared) */ 379#define DACR 0x08 /* DMA Array count Register (shared) */ 380#define BDCR 0x12 /* Burst/Dwell Control Register (shared) */ 381#define DIVR 0x14 /* DMA Interrupt Vector Register (shared) */ 382#define DICR 0x18 /* DMA Interrupt Control Register (shared) */ 383#define CDIR 0x1a /* Clear DMA Interrupt Register (shared) */ 384#define SDIR 0x1c /* Set DMA Interrupt Register (shared) */ 385 386#define TDMR 0x02 /* Transmit DMA mode Register */ 387#define TDIAR 0x1e /* Transmit DMA Interrupt Arm Register */ 388#define TBCR 0x2a /* Transmit Byte count Register */ 389#define TARL 0x2c /* Transmit Address Register (low) */ 390#define TARU 0x2e /* Transmit Address Register (high) */ 391#define NTBCR 0x3a /* Next Transmit Byte count Register */ 392#define NTARL 0x3c /* Next Transmit Address Register (low) */ 393#define NTARU 0x3e /* Next Transmit Address Register (high) */ 394 395#define RDMR 0x82 /* Receive DMA mode Register (non-shared) */ 396#define RDIAR 0x9e /* Receive DMA Interrupt Arm Register */ 397#define RBCR 0xaa /* Receive Byte count Register */ 398#define RARL 0xac /* Receive Address Register (low) */ 399#define RARU 0xae /* Receive Address Register (high) */ 400#define NRBCR 0xba /* Next Receive Byte count Register */ 401#define NRARL 0xbc /* Next Receive Address Register (low) */ 402#define NRARU 0xbe /* Next Receive Address Register (high) */ 403 404 405/* 406 * MACRO DEFINITIONS FOR MODEM STATUS BITS 407 */ 408 409#define MODEMSTATUS_DTR 0x80 410#define MODEMSTATUS_DSR 0x40 411#define MODEMSTATUS_RTS 0x20 412#define MODEMSTATUS_CTS 0x10 413#define MODEMSTATUS_RI 0x04 414#define MODEMSTATUS_DCD 0x01 415 416 417/* 418 * Channel Command/Address Register (CCAR) Command Codes 419 */ 420 421#define RTCmd_Null 0x0000 422#define RTCmd_ResetHighestIus 0x1000 423#define RTCmd_TriggerChannelLoadDma 0x2000 424#define RTCmd_TriggerRxDma 0x2800 425#define RTCmd_TriggerTxDma 0x3000 426#define RTCmd_TriggerRxAndTxDma 0x3800 427#define RTCmd_PurgeRxFifo 0x4800 428#define RTCmd_PurgeTxFifo 0x5000 429#define RTCmd_PurgeRxAndTxFifo 0x5800 430#define RTCmd_LoadRcc 0x6800 431#define RTCmd_LoadTcc 0x7000 432#define RTCmd_LoadRccAndTcc 0x7800 433#define RTCmd_LoadTC0 0x8800 434#define RTCmd_LoadTC1 0x9000 435#define RTCmd_LoadTC0AndTC1 0x9800 436#define RTCmd_SerialDataLSBFirst 0xa000 437#define RTCmd_SerialDataMSBFirst 0xa800 438#define RTCmd_SelectBigEndian 0xb000 439#define RTCmd_SelectLittleEndian 0xb800 440 441 442/* 443 * DMA Command/Address Register (DCAR) Command Codes 444 */ 445 446#define DmaCmd_Null 0x0000 447#define DmaCmd_ResetTxChannel 0x1000 448#define DmaCmd_ResetRxChannel 0x1200 449#define DmaCmd_StartTxChannel 0x2000 450#define DmaCmd_StartRxChannel 0x2200 451#define DmaCmd_ContinueTxChannel 0x3000 452#define DmaCmd_ContinueRxChannel 0x3200 453#define DmaCmd_PauseTxChannel 0x4000 454#define DmaCmd_PauseRxChannel 0x4200 455#define DmaCmd_AbortTxChannel 0x5000 456#define DmaCmd_AbortRxChannel 0x5200 457#define DmaCmd_InitTxChannel 0x7000 458#define DmaCmd_InitRxChannel 0x7200 459#define DmaCmd_ResetHighestDmaIus 0x8000 460#define DmaCmd_ResetAllChannels 0x9000 461#define DmaCmd_StartAllChannels 0xa000 462#define DmaCmd_ContinueAllChannels 0xb000 463#define DmaCmd_PauseAllChannels 0xc000 464#define DmaCmd_AbortAllChannels 0xd000 465#define DmaCmd_InitAllChannels 0xf000 466 467#define TCmd_Null 0x0000 468#define TCmd_ClearTxCRC 0x2000 469#define TCmd_SelectTicrTtsaData 0x4000 470#define TCmd_SelectTicrTxFifostatus 0x5000 471#define TCmd_SelectTicrIntLevel 0x6000 472#define TCmd_SelectTicrdma_level 0x7000 473#define TCmd_SendFrame 0x8000 474#define TCmd_SendAbort 0x9000 475#define TCmd_EnableDleInsertion 0xc000 476#define TCmd_DisableDleInsertion 0xd000 477#define TCmd_ClearEofEom 0xe000 478#define TCmd_SetEofEom 0xf000 479 480#define RCmd_Null 0x0000 481#define RCmd_ClearRxCRC 0x2000 482#define RCmd_EnterHuntmode 0x3000 483#define RCmd_SelectRicrRtsaData 0x4000 484#define RCmd_SelectRicrRxFifostatus 0x5000 485#define RCmd_SelectRicrIntLevel 0x6000 486#define RCmd_SelectRicrdma_level 0x7000 487 488/* 489 * Bits for enabling and disabling IRQs in Interrupt Control Register (ICR) 490 */ 491 492#define RECEIVE_STATUS BIT5 493#define RECEIVE_DATA BIT4 494#define TRANSMIT_STATUS BIT3 495#define TRANSMIT_DATA BIT2 496#define IO_PIN BIT1 497#define MISC BIT0 498 499 500/* 501 * Receive status Bits in Receive Command/status Register RCSR 502 */ 503 504#define RXSTATUS_SHORT_FRAME BIT8 505#define RXSTATUS_CODE_VIOLATION BIT8 506#define RXSTATUS_EXITED_HUNT BIT7 507#define RXSTATUS_IDLE_RECEIVED BIT6 508#define RXSTATUS_BREAK_RECEIVED BIT5 509#define RXSTATUS_ABORT_RECEIVED BIT5 510#define RXSTATUS_RXBOUND BIT4 511#define RXSTATUS_CRC_ERROR BIT3 512#define RXSTATUS_FRAMING_ERROR BIT3 513#define RXSTATUS_ABORT BIT2 514#define RXSTATUS_PARITY_ERROR BIT2 515#define RXSTATUS_OVERRUN BIT1 516#define RXSTATUS_DATA_AVAILABLE BIT0 517#define RXSTATUS_ALL 0x01f6 518#define usc_UnlatchRxstatusBits(a,b) usc_OutReg( (a), RCSR, (u16)((b) & RXSTATUS_ALL) ) 519 520/* 521 * Values for setting transmit idle mode in 522 * Transmit Control/status Register (TCSR) 523 */ 524#define IDLEMODE_FLAGS 0x0000 525#define IDLEMODE_ALT_ONE_ZERO 0x0100 526#define IDLEMODE_ZERO 0x0200 527#define IDLEMODE_ONE 0x0300 528#define IDLEMODE_ALT_MARK_SPACE 0x0500 529#define IDLEMODE_SPACE 0x0600 530#define IDLEMODE_MARK 0x0700 531#define IDLEMODE_MASK 0x0700 532 533/* 534 * IUSC revision identifiers 535 */ 536#define IUSC_SL1660 0x4d44 537#define IUSC_PRE_SL1660 0x4553 538 539/* 540 * Transmit status Bits in Transmit Command/status Register (TCSR) 541 */ 542 543#define TCSR_PRESERVE 0x0F00 544 545#define TCSR_UNDERWAIT BIT11 546#define TXSTATUS_PREAMBLE_SENT BIT7 547#define TXSTATUS_IDLE_SENT BIT6 548#define TXSTATUS_ABORT_SENT BIT5 549#define TXSTATUS_EOF_SENT BIT4 550#define TXSTATUS_EOM_SENT BIT4 551#define TXSTATUS_CRC_SENT BIT3 552#define TXSTATUS_ALL_SENT BIT2 553#define TXSTATUS_UNDERRUN BIT1 554#define TXSTATUS_FIFO_EMPTY BIT0 555#define TXSTATUS_ALL 0x00fa 556#define usc_UnlatchTxstatusBits(a,b) usc_OutReg( (a), TCSR, (u16)((a)->tcsr_value + ((b) & 0x00FF)) ) 557 558 559#define MISCSTATUS_RXC_LATCHED BIT15 560#define MISCSTATUS_RXC BIT14 561#define MISCSTATUS_TXC_LATCHED BIT13 562#define MISCSTATUS_TXC BIT12 563#define MISCSTATUS_RI_LATCHED BIT11 564#define MISCSTATUS_RI BIT10 565#define MISCSTATUS_DSR_LATCHED BIT9 566#define MISCSTATUS_DSR BIT8 567#define MISCSTATUS_DCD_LATCHED BIT7 568#define MISCSTATUS_DCD BIT6 569#define MISCSTATUS_CTS_LATCHED BIT5 570#define MISCSTATUS_CTS BIT4 571#define MISCSTATUS_RCC_UNDERRUN BIT3 572#define MISCSTATUS_DPLL_NO_SYNC BIT2 573#define MISCSTATUS_BRG1_ZERO BIT1 574#define MISCSTATUS_BRG0_ZERO BIT0 575 576#define usc_UnlatchIostatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0xaaa0)) 577#define usc_UnlatchMiscstatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0x000f)) 578 579#define SICR_RXC_ACTIVE BIT15 580#define SICR_RXC_INACTIVE BIT14 581#define SICR_RXC (BIT15+BIT14) 582#define SICR_TXC_ACTIVE BIT13 583#define SICR_TXC_INACTIVE BIT12 584#define SICR_TXC (BIT13+BIT12) 585#define SICR_RI_ACTIVE BIT11 586#define SICR_RI_INACTIVE BIT10 587#define SICR_RI (BIT11+BIT10) 588#define SICR_DSR_ACTIVE BIT9 589#define SICR_DSR_INACTIVE BIT8 590#define SICR_DSR (BIT9+BIT8) 591#define SICR_DCD_ACTIVE BIT7 592#define SICR_DCD_INACTIVE BIT6 593#define SICR_DCD (BIT7+BIT6) 594#define SICR_CTS_ACTIVE BIT5 595#define SICR_CTS_INACTIVE BIT4 596#define SICR_CTS (BIT5+BIT4) 597#define SICR_RCC_UNDERFLOW BIT3 598#define SICR_DPLL_NO_SYNC BIT2 599#define SICR_BRG1_ZERO BIT1 600#define SICR_BRG0_ZERO BIT0 601 602void usc_DisableMasterIrqBit( struct mgsl_struct *info ); 603void usc_EnableMasterIrqBit( struct mgsl_struct *info ); 604void usc_EnableInterrupts( struct mgsl_struct *info, u16 IrqMask ); 605void usc_DisableInterrupts( struct mgsl_struct *info, u16 IrqMask ); 606void usc_ClearIrqPendingBits( struct mgsl_struct *info, u16 IrqMask ); 607 608#define usc_EnableInterrupts( a, b ) \ 609 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0xc0 + (b)) ) 610 611#define usc_DisableInterrupts( a, b ) \ 612 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0x80 + (b)) ) 613 614#define usc_EnableMasterIrqBit(a) \ 615 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0x0f00) + 0xb000) ) 616 617#define usc_DisableMasterIrqBit(a) \ 618 usc_OutReg( (a), ICR, (u16)(usc_InReg((a),ICR) & 0x7f00) ) 619 620#define usc_ClearIrqPendingBits( a, b ) usc_OutReg( (a), DCCR, 0x40 + (b) ) 621 622/* 623 * Transmit status Bits in Transmit Control status Register (TCSR) 624 * and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) 625 */ 626 627#define TXSTATUS_PREAMBLE_SENT BIT7 628#define TXSTATUS_IDLE_SENT BIT6 629#define TXSTATUS_ABORT_SENT BIT5 630#define TXSTATUS_EOF BIT4 631#define TXSTATUS_CRC_SENT BIT3 632#define TXSTATUS_ALL_SENT BIT2 633#define TXSTATUS_UNDERRUN BIT1 634#define TXSTATUS_FIFO_EMPTY BIT0 635 636#define DICR_MASTER BIT15 637#define DICR_TRANSMIT BIT0 638#define DICR_RECEIVE BIT1 639 640#define usc_EnableDmaInterrupts(a,b) \ 641 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) | (b)) ) 642 643#define usc_DisableDmaInterrupts(a,b) \ 644 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) & ~(b)) ) 645 646#define usc_EnableStatusIrqs(a,b) \ 647 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) | (b)) ) 648 649#define usc_DisablestatusIrqs(a,b) \ 650 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) & ~(b)) ) 651 652/* Transmit status Bits in Transmit Control status Register (TCSR) */ 653/* and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) */ 654 655 656#define DISABLE_UNCONDITIONAL 0 657#define DISABLE_END_OF_FRAME 1 658#define ENABLE_UNCONDITIONAL 2 659#define ENABLE_AUTO_CTS 3 660#define ENABLE_AUTO_DCD 3 661#define usc_EnableTransmitter(a,b) \ 662 usc_OutReg( (a), TMR, (u16)((usc_InReg((a),TMR) & 0xfffc) | (b)) ) 663#define usc_EnableReceiver(a,b) \ 664 usc_OutReg( (a), RMR, (u16)((usc_InReg((a),RMR) & 0xfffc) | (b)) ) 665 666static u16 usc_InDmaReg( struct mgsl_struct *info, u16 Port ); 667static void usc_OutDmaReg( struct mgsl_struct *info, u16 Port, u16 Value ); 668static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd ); 669 670static u16 usc_InReg( struct mgsl_struct *info, u16 Port ); 671static void usc_OutReg( struct mgsl_struct *info, u16 Port, u16 Value ); 672static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd ); 673void usc_RCmd( struct mgsl_struct *info, u16 Cmd ); 674void usc_TCmd( struct mgsl_struct *info, u16 Cmd ); 675 676#define usc_TCmd(a,b) usc_OutReg((a), TCSR, (u16)((a)->tcsr_value + (b))) 677#define usc_RCmd(a,b) usc_OutReg((a), RCSR, (b)) 678 679#define usc_SetTransmitSyncChars(a,s0,s1) usc_OutReg((a), TSR, (u16)(((u16)s0<<8)|(u16)s1)) 680 681static void usc_process_rxoverrun_sync( struct mgsl_struct *info ); 682static void usc_start_receiver( struct mgsl_struct *info ); 683static void usc_stop_receiver( struct mgsl_struct *info ); 684 685static void usc_start_transmitter( struct mgsl_struct *info ); 686static void usc_stop_transmitter( struct mgsl_struct *info ); 687static void usc_set_txidle( struct mgsl_struct *info ); 688static void usc_load_txfifo( struct mgsl_struct *info ); 689 690static void usc_enable_aux_clock( struct mgsl_struct *info, u32 DataRate ); 691static void usc_enable_loopback( struct mgsl_struct *info, int enable ); 692 693static void usc_get_serial_signals( struct mgsl_struct *info ); 694static void usc_set_serial_signals( struct mgsl_struct *info ); 695 696static void usc_reset( struct mgsl_struct *info ); 697 698static void usc_set_sync_mode( struct mgsl_struct *info ); 699static void usc_set_sdlc_mode( struct mgsl_struct *info ); 700static void usc_set_async_mode( struct mgsl_struct *info ); 701static void usc_enable_async_clock( struct mgsl_struct *info, u32 DataRate ); 702 703static void usc_loopback_frame( struct mgsl_struct *info ); 704 705static void mgsl_tx_timeout(unsigned long context); 706 707 708static void usc_loopmode_cancel_transmit( struct mgsl_struct * info ); 709static void usc_loopmode_insert_request( struct mgsl_struct * info ); 710static int usc_loopmode_active( struct mgsl_struct * info); 711static void usc_loopmode_send_done( struct mgsl_struct * info ); 712 713static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg); 714 715#if SYNCLINK_GENERIC_HDLC 716#define dev_to_port(D) (dev_to_hdlc(D)->priv) 717static void hdlcdev_tx_done(struct mgsl_struct *info); 718static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size); 719static int hdlcdev_init(struct mgsl_struct *info); 720static void hdlcdev_exit(struct mgsl_struct *info); 721#endif 722 723/* 724 * Defines a BUS descriptor value for the PCI adapter 725 * local bus address ranges. 726 */ 727 728#define BUS_DESCRIPTOR( WrHold, WrDly, RdDly, Nwdd, Nwad, Nxda, Nrdd, Nrad ) \ 729(0x00400020 + \ 730((WrHold) << 30) + \ 731((WrDly) << 28) + \ 732((RdDly) << 26) + \ 733((Nwdd) << 20) + \ 734((Nwad) << 15) + \ 735((Nxda) << 13) + \ 736((Nrdd) << 11) + \ 737((Nrad) << 6) ) 738 739static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit); 740 741/* 742 * Adapter diagnostic routines 743 */ 744static bool mgsl_register_test( struct mgsl_struct *info ); 745static bool mgsl_irq_test( struct mgsl_struct *info ); 746static bool mgsl_dma_test( struct mgsl_struct *info ); 747static bool mgsl_memory_test( struct mgsl_struct *info ); 748static int mgsl_adapter_test( struct mgsl_struct *info ); 749 750/* 751 * device and resource management routines 752 */ 753static int mgsl_claim_resources(struct mgsl_struct *info); 754static void mgsl_release_resources(struct mgsl_struct *info); 755static void mgsl_add_device(struct mgsl_struct *info); 756static struct mgsl_struct* mgsl_allocate_device(void); 757 758/* 759 * DMA buffer manupulation functions. 760 */ 761static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex ); 762static bool mgsl_get_rx_frame( struct mgsl_struct *info ); 763static bool mgsl_get_raw_rx_frame( struct mgsl_struct *info ); 764static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info ); 765static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info ); 766static int num_free_tx_dma_buffers(struct mgsl_struct *info); 767static void mgsl_load_tx_dma_buffer( struct mgsl_struct *info, const char *Buffer, unsigned int BufferSize); 768static void mgsl_load_pci_memory(char* TargetPtr, const char* SourcePtr, unsigned short count); 769 770/* 771 * DMA and Shared Memory buffer allocation and formatting 772 */ 773static int mgsl_allocate_dma_buffers(struct mgsl_struct *info); 774static void mgsl_free_dma_buffers(struct mgsl_struct *info); 775static int mgsl_alloc_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount); 776static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount); 777static int mgsl_alloc_buffer_list_memory(struct mgsl_struct *info); 778static void mgsl_free_buffer_list_memory(struct mgsl_struct *info); 779static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info); 780static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info); 781static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info); 782static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info); 783static bool load_next_tx_holding_buffer(struct mgsl_struct *info); 784static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize); 785 786/* 787 * Bottom half interrupt handlers 788 */ 789static void mgsl_bh_handler(struct work_struct *work); 790static void mgsl_bh_receive(struct mgsl_struct *info); 791static void mgsl_bh_transmit(struct mgsl_struct *info); 792static void mgsl_bh_status(struct mgsl_struct *info); 793 794/* 795 * Interrupt handler routines and dispatch table. 796 */ 797static void mgsl_isr_null( struct mgsl_struct *info ); 798static void mgsl_isr_transmit_data( struct mgsl_struct *info ); 799static void mgsl_isr_receive_data( struct mgsl_struct *info ); 800static void mgsl_isr_receive_status( struct mgsl_struct *info ); 801static void mgsl_isr_transmit_status( struct mgsl_struct *info ); 802static void mgsl_isr_io_pin( struct mgsl_struct *info ); 803static void mgsl_isr_misc( struct mgsl_struct *info ); 804static void mgsl_isr_receive_dma( struct mgsl_struct *info ); 805static void mgsl_isr_transmit_dma( struct mgsl_struct *info ); 806 807typedef void (*isr_dispatch_func)(struct mgsl_struct *); 808 809static isr_dispatch_func UscIsrTable[7] = 810{ 811 mgsl_isr_null, 812 mgsl_isr_misc, 813 mgsl_isr_io_pin, 814 mgsl_isr_transmit_data, 815 mgsl_isr_transmit_status, 816 mgsl_isr_receive_data, 817 mgsl_isr_receive_status 818}; 819 820/* 821 * ioctl call handlers 822 */ 823static int tiocmget(struct tty_struct *tty); 824static int tiocmset(struct tty_struct *tty, 825 unsigned int set, unsigned int clear); 826static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount 827 __user *user_icount); 828static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params); 829static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params); 830static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode); 831static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode); 832static int mgsl_txenable(struct mgsl_struct * info, int enable); 833static int mgsl_txabort(struct mgsl_struct * info); 834static int mgsl_rxenable(struct mgsl_struct * info, int enable); 835static int mgsl_wait_event(struct mgsl_struct * info, int __user *mask); 836static int mgsl_loopmode_send_done( struct mgsl_struct * info ); 837 838/* set non-zero on successful registration with PCI subsystem */ 839static bool pci_registered; 840 841/* 842 * Global linked list of SyncLink devices 843 */ 844static struct mgsl_struct *mgsl_device_list; 845static int mgsl_device_count; 846 847/* 848 * Set this param to non-zero to load eax with the 849 * .text section address and breakpoint on module load. 850 * This is useful for use with gdb and add-symbol-file command. 851 */ 852static bool break_on_load; 853 854/* 855 * Driver major number, defaults to zero to get auto 856 * assigned major number. May be forced as module parameter. 857 */ 858static int ttymajor; 859 860/* 861 * Array of user specified options for ISA adapters. 862 */ 863static int io[MAX_ISA_DEVICES]; 864static int irq[MAX_ISA_DEVICES]; 865static int dma[MAX_ISA_DEVICES]; 866static int debug_level; 867static int maxframe[MAX_TOTAL_DEVICES]; 868static int txdmabufs[MAX_TOTAL_DEVICES]; 869static int txholdbufs[MAX_TOTAL_DEVICES]; 870 871module_param(break_on_load, bool, 0); 872module_param(ttymajor, int, 0); 873module_param_array(io, int, NULL, 0); 874module_param_array(irq, int, NULL, 0); 875module_param_array(dma, int, NULL, 0); 876module_param(debug_level, int, 0); 877module_param_array(maxframe, int, NULL, 0); 878module_param_array(txdmabufs, int, NULL, 0); 879module_param_array(txholdbufs, int, NULL, 0); 880 881static char *driver_name = "SyncLink serial driver"; 882static char *driver_version = "$Revision: 4.38 $"; 883 884static int synclink_init_one (struct pci_dev *dev, 885 const struct pci_device_id *ent); 886static void synclink_remove_one (struct pci_dev *dev); 887 888static struct pci_device_id synclink_pci_tbl[] = { 889 { PCI_VENDOR_ID_MICROGATE, PCI_DEVICE_ID_MICROGATE_USC, PCI_ANY_ID, PCI_ANY_ID, }, 890 { PCI_VENDOR_ID_MICROGATE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, }, 891 { 0, }, /* terminate list */ 892}; 893MODULE_DEVICE_TABLE(pci, synclink_pci_tbl); 894 895MODULE_LICENSE("GPL"); 896 897static struct pci_driver synclink_pci_driver = { 898 .name = "synclink", 899 .id_table = synclink_pci_tbl, 900 .probe = synclink_init_one, 901 .remove = synclink_remove_one, 902}; 903 904static struct tty_driver *serial_driver; 905 906/* number of characters left in xmit buffer before we ask for more */ 907#define WAKEUP_CHARS 256 908 909 910static void mgsl_change_params(struct mgsl_struct *info); 911static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout); 912 913/* 914 * 1st function defined in .text section. Calling this function in 915 * init_module() followed by a breakpoint allows a remote debugger 916 * (gdb) to get the .text address for the add-symbol-file command. 917 * This allows remote debugging of dynamically loadable modules. 918 */ 919static void* mgsl_get_text_ptr(void) 920{ 921 return mgsl_get_text_ptr; 922} 923 924static inline int mgsl_paranoia_check(struct mgsl_struct *info, 925 char *name, const char *routine) 926{ 927#ifdef MGSL_PARANOIA_CHECK 928 static const char *badmagic = 929 "Warning: bad magic number for mgsl struct (%s) in %s\n"; 930 static const char *badinfo = 931 "Warning: null mgsl_struct for (%s) in %s\n"; 932 933 if (!info) { 934 printk(badinfo, name, routine); 935 return 1; 936 } 937 if (info->magic != MGSL_MAGIC) { 938 printk(badmagic, name, routine); 939 return 1; 940 } 941#else 942 if (!info) 943 return 1; 944#endif 945 return 0; 946} 947 948/** 949 * line discipline callback wrappers 950 * 951 * The wrappers maintain line discipline references 952 * while calling into the line discipline. 953 * 954 * ldisc_receive_buf - pass receive data to line discipline 955 */ 956 957static void ldisc_receive_buf(struct tty_struct *tty, 958 const __u8 *data, char *flags, int count) 959{ 960 struct tty_ldisc *ld; 961 if (!tty) 962 return; 963 ld = tty_ldisc_ref(tty); 964 if (ld) { 965 if (ld->ops->receive_buf) 966 ld->ops->receive_buf(tty, data, flags, count); 967 tty_ldisc_deref(ld); 968 } 969} 970 971/* mgsl_stop() throttle (stop) transmitter 972 * 973 * Arguments: tty pointer to tty info structure 974 * Return Value: None 975 */ 976static void mgsl_stop(struct tty_struct *tty) 977{ 978 struct mgsl_struct *info = tty->driver_data; 979 unsigned long flags; 980 981 if (mgsl_paranoia_check(info, tty->name, "mgsl_stop")) 982 return; 983 984 if ( debug_level >= DEBUG_LEVEL_INFO ) 985 printk("mgsl_stop(%s)\n",info->device_name); 986 987 spin_lock_irqsave(&info->irq_spinlock,flags); 988 if (info->tx_enabled) 989 usc_stop_transmitter(info); 990 spin_unlock_irqrestore(&info->irq_spinlock,flags); 991 992} /* end of mgsl_stop() */ 993 994/* mgsl_start() release (start) transmitter 995 * 996 * Arguments: tty pointer to tty info structure 997 * Return Value: None 998 */ 999static void mgsl_start(struct tty_struct *tty) 1000{ 1001 struct mgsl_struct *info = tty->driver_data; 1002 unsigned long flags; 1003 1004 if (mgsl_paranoia_check(info, tty->name, "mgsl_start")) 1005 return; 1006 1007 if ( debug_level >= DEBUG_LEVEL_INFO ) 1008 printk("mgsl_start(%s)\n",info->device_name); 1009 1010 spin_lock_irqsave(&info->irq_spinlock,flags); 1011 if (!info->tx_enabled) 1012 usc_start_transmitter(info); 1013 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1014 1015} /* end of mgsl_start() */ 1016 1017/* 1018 * Bottom half work queue access functions 1019 */ 1020 1021/* mgsl_bh_action() Return next bottom half action to perform. 1022 * Return Value: BH action code or 0 if nothing to do. 1023 */ 1024static int mgsl_bh_action(struct mgsl_struct *info) 1025{ 1026 unsigned long flags; 1027 int rc = 0; 1028 1029 spin_lock_irqsave(&info->irq_spinlock,flags); 1030 1031 if (info->pending_bh & BH_RECEIVE) { 1032 info->pending_bh &= ~BH_RECEIVE; 1033 rc = BH_RECEIVE; 1034 } else if (info->pending_bh & BH_TRANSMIT) { 1035 info->pending_bh &= ~BH_TRANSMIT; 1036 rc = BH_TRANSMIT; 1037 } else if (info->pending_bh & BH_STATUS) { 1038 info->pending_bh &= ~BH_STATUS; 1039 rc = BH_STATUS; 1040 } 1041 1042 if (!rc) { 1043 /* Mark BH routine as complete */ 1044 info->bh_running = false; 1045 info->bh_requested = false; 1046 } 1047 1048 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1049 1050 return rc; 1051} 1052 1053/* 1054 * Perform bottom half processing of work items queued by ISR. 1055 */ 1056static void mgsl_bh_handler(struct work_struct *work) 1057{ 1058 struct mgsl_struct *info = 1059 container_of(work, struct mgsl_struct, task); 1060 int action; 1061 1062 if (!info) 1063 return; 1064 1065 if ( debug_level >= DEBUG_LEVEL_BH ) 1066 printk( "%s(%d):mgsl_bh_handler(%s) entry\n", 1067 __FILE__,__LINE__,info->device_name); 1068 1069 info->bh_running = true; 1070 1071 while((action = mgsl_bh_action(info)) != 0) { 1072 1073 /* Process work item */ 1074 if ( debug_level >= DEBUG_LEVEL_BH ) 1075 printk( "%s(%d):mgsl_bh_handler() work item action=%d\n", 1076 __FILE__,__LINE__,action); 1077 1078 switch (action) { 1079 1080 case BH_RECEIVE: 1081 mgsl_bh_receive(info); 1082 break; 1083 case BH_TRANSMIT: 1084 mgsl_bh_transmit(info); 1085 break; 1086 case BH_STATUS: 1087 mgsl_bh_status(info); 1088 break; 1089 default: 1090 /* unknown work item ID */ 1091 printk("Unknown work item ID=%08X!\n", action); 1092 break; 1093 } 1094 } 1095 1096 if ( debug_level >= DEBUG_LEVEL_BH ) 1097 printk( "%s(%d):mgsl_bh_handler(%s) exit\n", 1098 __FILE__,__LINE__,info->device_name); 1099} 1100 1101static void mgsl_bh_receive(struct mgsl_struct *info) 1102{ 1103 bool (*get_rx_frame)(struct mgsl_struct *info) = 1104 (info->params.mode == MGSL_MODE_HDLC ? mgsl_get_rx_frame : mgsl_get_raw_rx_frame); 1105 1106 if ( debug_level >= DEBUG_LEVEL_BH ) 1107 printk( "%s(%d):mgsl_bh_receive(%s)\n", 1108 __FILE__,__LINE__,info->device_name); 1109 1110 do 1111 { 1112 if (info->rx_rcc_underrun) { 1113 unsigned long flags; 1114 spin_lock_irqsave(&info->irq_spinlock,flags); 1115 usc_start_receiver(info); 1116 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1117 return; 1118 } 1119 } while(get_rx_frame(info)); 1120} 1121 1122static void mgsl_bh_transmit(struct mgsl_struct *info) 1123{ 1124 struct tty_struct *tty = info->port.tty; 1125 unsigned long flags; 1126 1127 if ( debug_level >= DEBUG_LEVEL_BH ) 1128 printk( "%s(%d):mgsl_bh_transmit() entry on %s\n", 1129 __FILE__,__LINE__,info->device_name); 1130 1131 if (tty) 1132 tty_wakeup(tty); 1133 1134 /* if transmitter idle and loopmode_send_done_requested 1135 * then start echoing RxD to TxD 1136 */ 1137 spin_lock_irqsave(&info->irq_spinlock,flags); 1138 if ( !info->tx_active && info->loopmode_send_done_requested ) 1139 usc_loopmode_send_done( info ); 1140 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1141} 1142 1143static void mgsl_bh_status(struct mgsl_struct *info) 1144{ 1145 if ( debug_level >= DEBUG_LEVEL_BH ) 1146 printk( "%s(%d):mgsl_bh_status() entry on %s\n", 1147 __FILE__,__LINE__,info->device_name); 1148 1149 info->ri_chkcount = 0; 1150 info->dsr_chkcount = 0; 1151 info->dcd_chkcount = 0; 1152 info->cts_chkcount = 0; 1153} 1154 1155/* mgsl_isr_receive_status() 1156 * 1157 * Service a receive status interrupt. The type of status 1158 * interrupt is indicated by the state of the RCSR. 1159 * This is only used for HDLC mode. 1160 * 1161 * Arguments: info pointer to device instance data 1162 * Return Value: None 1163 */ 1164static void mgsl_isr_receive_status( struct mgsl_struct *info ) 1165{ 1166 u16 status = usc_InReg( info, RCSR ); 1167 1168 if ( debug_level >= DEBUG_LEVEL_ISR ) 1169 printk("%s(%d):mgsl_isr_receive_status status=%04X\n", 1170 __FILE__,__LINE__,status); 1171 1172 if ( (status & RXSTATUS_ABORT_RECEIVED) && 1173 info->loopmode_insert_requested && 1174 usc_loopmode_active(info) ) 1175 { 1176 ++info->icount.rxabort; 1177 info->loopmode_insert_requested = false; 1178 1179 /* clear CMR:13 to start echoing RxD to TxD */ 1180 info->cmr_value &= ~BIT13; 1181 usc_OutReg(info, CMR, info->cmr_value); 1182 1183 /* disable received abort irq (no longer required) */ 1184 usc_OutReg(info, RICR, 1185 (usc_InReg(info, RICR) & ~RXSTATUS_ABORT_RECEIVED)); 1186 } 1187 1188 if (status & (RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED)) { 1189 if (status & RXSTATUS_EXITED_HUNT) 1190 info->icount.exithunt++; 1191 if (status & RXSTATUS_IDLE_RECEIVED) 1192 info->icount.rxidle++; 1193 wake_up_interruptible(&info->event_wait_q); 1194 } 1195 1196 if (status & RXSTATUS_OVERRUN){ 1197 info->icount.rxover++; 1198 usc_process_rxoverrun_sync( info ); 1199 } 1200 1201 usc_ClearIrqPendingBits( info, RECEIVE_STATUS ); 1202 usc_UnlatchRxstatusBits( info, status ); 1203 1204} /* end of mgsl_isr_receive_status() */ 1205 1206/* mgsl_isr_transmit_status() 1207 * 1208 * Service a transmit status interrupt 1209 * HDLC mode :end of transmit frame 1210 * Async mode:all data is sent 1211 * transmit status is indicated by bits in the TCSR. 1212 * 1213 * Arguments: info pointer to device instance data 1214 * Return Value: None 1215 */ 1216static void mgsl_isr_transmit_status( struct mgsl_struct *info ) 1217{ 1218 u16 status = usc_InReg( info, TCSR ); 1219 1220 if ( debug_level >= DEBUG_LEVEL_ISR ) 1221 printk("%s(%d):mgsl_isr_transmit_status status=%04X\n", 1222 __FILE__,__LINE__,status); 1223 1224 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS ); 1225 usc_UnlatchTxstatusBits( info, status ); 1226 1227 if ( status & (TXSTATUS_UNDERRUN | TXSTATUS_ABORT_SENT) ) 1228 { 1229 /* finished sending HDLC abort. This may leave */ 1230 /* the TxFifo with data from the aborted frame */ 1231 /* so purge the TxFifo. Also shutdown the DMA */ 1232 /* channel in case there is data remaining in */ 1233 /* the DMA buffer */ 1234 usc_DmaCmd( info, DmaCmd_ResetTxChannel ); 1235 usc_RTCmd( info, RTCmd_PurgeTxFifo ); 1236 } 1237 1238 if ( status & TXSTATUS_EOF_SENT ) 1239 info->icount.txok++; 1240 else if ( status & TXSTATUS_UNDERRUN ) 1241 info->icount.txunder++; 1242 else if ( status & TXSTATUS_ABORT_SENT ) 1243 info->icount.txabort++; 1244 else 1245 info->icount.txunder++; 1246 1247 info->tx_active = false; 1248 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; 1249 del_timer(&info->tx_timer); 1250 1251 if ( info->drop_rts_on_tx_done ) { 1252 usc_get_serial_signals( info ); 1253 if ( info->serial_signals & SerialSignal_RTS ) { 1254 info->serial_signals &= ~SerialSignal_RTS; 1255 usc_set_serial_signals( info ); 1256 } 1257 info->drop_rts_on_tx_done = false; 1258 } 1259 1260#if SYNCLINK_GENERIC_HDLC 1261 if (info->netcount) 1262 hdlcdev_tx_done(info); 1263 else 1264#endif 1265 { 1266 if (info->port.tty->stopped || info->port.tty->hw_stopped) { 1267 usc_stop_transmitter(info); 1268 return; 1269 } 1270 info->pending_bh |= BH_TRANSMIT; 1271 } 1272 1273} /* end of mgsl_isr_transmit_status() */ 1274 1275/* mgsl_isr_io_pin() 1276 * 1277 * Service an Input/Output pin interrupt. The type of 1278 * interrupt is indicated by bits in the MISR 1279 * 1280 * Arguments: info pointer to device instance data 1281 * Return Value: None 1282 */ 1283static void mgsl_isr_io_pin( struct mgsl_struct *info ) 1284{ 1285 struct mgsl_icount *icount; 1286 u16 status = usc_InReg( info, MISR ); 1287 1288 if ( debug_level >= DEBUG_LEVEL_ISR ) 1289 printk("%s(%d):mgsl_isr_io_pin status=%04X\n", 1290 __FILE__,__LINE__,status); 1291 1292 usc_ClearIrqPendingBits( info, IO_PIN ); 1293 usc_UnlatchIostatusBits( info, status ); 1294 1295 if (status & (MISCSTATUS_CTS_LATCHED | MISCSTATUS_DCD_LATCHED | 1296 MISCSTATUS_DSR_LATCHED | MISCSTATUS_RI_LATCHED) ) { 1297 icount = &info->icount; 1298 /* update input line counters */ 1299 if (status & MISCSTATUS_RI_LATCHED) { 1300 if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) 1301 usc_DisablestatusIrqs(info,SICR_RI); 1302 icount->rng++; 1303 if ( status & MISCSTATUS_RI ) 1304 info->input_signal_events.ri_up++; 1305 else 1306 info->input_signal_events.ri_down++; 1307 } 1308 if (status & MISCSTATUS_DSR_LATCHED) { 1309 if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) 1310 usc_DisablestatusIrqs(info,SICR_DSR); 1311 icount->dsr++; 1312 if ( status & MISCSTATUS_DSR ) 1313 info->input_signal_events.dsr_up++; 1314 else 1315 info->input_signal_events.dsr_down++; 1316 } 1317 if (status & MISCSTATUS_DCD_LATCHED) { 1318 if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) 1319 usc_DisablestatusIrqs(info,SICR_DCD); 1320 icount->dcd++; 1321 if (status & MISCSTATUS_DCD) { 1322 info->input_signal_events.dcd_up++; 1323 } else 1324 info->input_signal_events.dcd_down++; 1325#if SYNCLINK_GENERIC_HDLC 1326 if (info->netcount) { 1327 if (status & MISCSTATUS_DCD) 1328 netif_carrier_on(info->netdev); 1329 else 1330 netif_carrier_off(info->netdev); 1331 } 1332#endif 1333 } 1334 if (status & MISCSTATUS_CTS_LATCHED) 1335 { 1336 if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) 1337 usc_DisablestatusIrqs(info,SICR_CTS); 1338 icount->cts++; 1339 if ( status & MISCSTATUS_CTS ) 1340 info->input_signal_events.cts_up++; 1341 else 1342 info->input_signal_events.cts_down++; 1343 } 1344 wake_up_interruptible(&info->status_event_wait_q); 1345 wake_up_interruptible(&info->event_wait_q); 1346 1347 if ( (info->port.flags & ASYNC_CHECK_CD) && 1348 (status & MISCSTATUS_DCD_LATCHED) ) { 1349 if ( debug_level >= DEBUG_LEVEL_ISR ) 1350 printk("%s CD now %s...", info->device_name, 1351 (status & MISCSTATUS_DCD) ? "on" : "off"); 1352 if (status & MISCSTATUS_DCD) 1353 wake_up_interruptible(&info->port.open_wait); 1354 else { 1355 if ( debug_level >= DEBUG_LEVEL_ISR ) 1356 printk("doing serial hangup..."); 1357 if (info->port.tty) 1358 tty_hangup(info->port.tty); 1359 } 1360 } 1361 1362 if (tty_port_cts_enabled(&info->port) && 1363 (status & MISCSTATUS_CTS_LATCHED) ) { 1364 if (info->port.tty->hw_stopped) { 1365 if (status & MISCSTATUS_CTS) { 1366 if ( debug_level >= DEBUG_LEVEL_ISR ) 1367 printk("CTS tx start..."); 1368 if (info->port.tty) 1369 info->port.tty->hw_stopped = 0; 1370 usc_start_transmitter(info); 1371 info->pending_bh |= BH_TRANSMIT; 1372 return; 1373 } 1374 } else { 1375 if (!(status & MISCSTATUS_CTS)) { 1376 if ( debug_level >= DEBUG_LEVEL_ISR ) 1377 printk("CTS tx stop..."); 1378 if (info->port.tty) 1379 info->port.tty->hw_stopped = 1; 1380 usc_stop_transmitter(info); 1381 } 1382 } 1383 } 1384 } 1385 1386 info->pending_bh |= BH_STATUS; 1387 1388 /* for diagnostics set IRQ flag */ 1389 if ( status & MISCSTATUS_TXC_LATCHED ){ 1390 usc_OutReg( info, SICR, 1391 (unsigned short)(usc_InReg(info,SICR) & ~(SICR_TXC_ACTIVE+SICR_TXC_INACTIVE)) ); 1392 usc_UnlatchIostatusBits( info, MISCSTATUS_TXC_LATCHED ); 1393 info->irq_occurred = true; 1394 } 1395 1396} /* end of mgsl_isr_io_pin() */ 1397 1398/* mgsl_isr_transmit_data() 1399 * 1400 * Service a transmit data interrupt (async mode only). 1401 * 1402 * Arguments: info pointer to device instance data 1403 * Return Value: None 1404 */ 1405static void mgsl_isr_transmit_data( struct mgsl_struct *info ) 1406{ 1407 if ( debug_level >= DEBUG_LEVEL_ISR ) 1408 printk("%s(%d):mgsl_isr_transmit_data xmit_cnt=%d\n", 1409 __FILE__,__LINE__,info->xmit_cnt); 1410 1411 usc_ClearIrqPendingBits( info, TRANSMIT_DATA ); 1412 1413 if (info->port.tty->stopped || info->port.tty->hw_stopped) { 1414 usc_stop_transmitter(info); 1415 return; 1416 } 1417 1418 if ( info->xmit_cnt ) 1419 usc_load_txfifo( info ); 1420 else 1421 info->tx_active = false; 1422 1423 if (info->xmit_cnt < WAKEUP_CHARS) 1424 info->pending_bh |= BH_TRANSMIT; 1425 1426} /* end of mgsl_isr_transmit_data() */ 1427 1428/* mgsl_isr_receive_data() 1429 * 1430 * Service a receive data interrupt. This occurs 1431 * when operating in asynchronous interrupt transfer mode. 1432 * The receive data FIFO is flushed to the receive data buffers. 1433 * 1434 * Arguments: info pointer to device instance data 1435 * Return Value: None 1436 */ 1437static void mgsl_isr_receive_data( struct mgsl_struct *info ) 1438{ 1439 int Fifocount; 1440 u16 status; 1441 int work = 0; 1442 unsigned char DataByte; 1443 struct tty_struct *tty = info->port.tty; 1444 struct mgsl_icount *icount = &info->icount; 1445 1446 if ( debug_level >= DEBUG_LEVEL_ISR ) 1447 printk("%s(%d):mgsl_isr_receive_data\n", 1448 __FILE__,__LINE__); 1449 1450 usc_ClearIrqPendingBits( info, RECEIVE_DATA ); 1451 1452 /* select FIFO status for RICR readback */ 1453 usc_RCmd( info, RCmd_SelectRicrRxFifostatus ); 1454 1455 /* clear the Wordstatus bit so that status readback */ 1456 /* only reflects the status of this byte */ 1457 usc_OutReg( info, RICR+LSBONLY, (u16)(usc_InReg(info, RICR+LSBONLY) & ~BIT3 )); 1458 1459 /* flush the receive FIFO */ 1460 1461 while( (Fifocount = (usc_InReg(info,RICR) >> 8)) ) { 1462 int flag; 1463 1464 /* read one byte from RxFIFO */ 1465 outw( (inw(info->io_base + CCAR) & 0x0780) | (RDR+LSBONLY), 1466 info->io_base + CCAR ); 1467 DataByte = inb( info->io_base + CCAR ); 1468 1469 /* get the status of the received byte */ 1470 status = usc_InReg(info, RCSR); 1471 if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR + 1472 RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) ) 1473 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL); 1474 1475 icount->rx++; 1476 1477 flag = 0; 1478 if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR + 1479 RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) ) { 1480 printk("rxerr=%04X\n",status); 1481 /* update error statistics */ 1482 if ( status & RXSTATUS_BREAK_RECEIVED ) { 1483 status &= ~(RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR); 1484 icount->brk++; 1485 } else if (status & RXSTATUS_PARITY_ERROR) 1486 icount->parity++; 1487 else if (status & RXSTATUS_FRAMING_ERROR) 1488 icount->frame++; 1489 else if (status & RXSTATUS_OVERRUN) { 1490 /* must issue purge fifo cmd before */ 1491 /* 16C32 accepts more receive chars */ 1492 usc_RTCmd(info,RTCmd_PurgeRxFifo); 1493 icount->overrun++; 1494 } 1495 1496 /* discard char if tty control flags say so */ 1497 if (status & info->ignore_status_mask) 1498 continue; 1499 1500 status &= info->read_status_mask; 1501 1502 if (status & RXSTATUS_BREAK_RECEIVED) { 1503 flag = TTY_BREAK; 1504 if (info->port.flags & ASYNC_SAK) 1505 do_SAK(tty); 1506 } else if (status & RXSTATUS_PARITY_ERROR) 1507 flag = TTY_PARITY; 1508 else if (status & RXSTATUS_FRAMING_ERROR) 1509 flag = TTY_FRAME; 1510 } /* end of if (error) */ 1511 tty_insert_flip_char(tty, DataByte, flag); 1512 if (status & RXSTATUS_OVERRUN) { 1513 /* Overrun is special, since it's 1514 * reported immediately, and doesn't 1515 * affect the current character 1516 */ 1517 work += tty_insert_flip_char(tty, 0, TTY_OVERRUN); 1518 } 1519 } 1520 1521 if ( debug_level >= DEBUG_LEVEL_ISR ) { 1522 printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n", 1523 __FILE__,__LINE__,icount->rx,icount->brk, 1524 icount->parity,icount->frame,icount->overrun); 1525 } 1526 1527 if(work) 1528 tty_flip_buffer_push(tty); 1529} 1530 1531/* mgsl_isr_misc() 1532 * 1533 * Service a miscellaneous interrupt source. 1534 * 1535 * Arguments: info pointer to device extension (instance data) 1536 * Return Value: None 1537 */ 1538static void mgsl_isr_misc( struct mgsl_struct *info ) 1539{ 1540 u16 status = usc_InReg( info, MISR ); 1541 1542 if ( debug_level >= DEBUG_LEVEL_ISR ) 1543 printk("%s(%d):mgsl_isr_misc status=%04X\n", 1544 __FILE__,__LINE__,status); 1545 1546 if ((status & MISCSTATUS_RCC_UNDERRUN) && 1547 (info->params.mode == MGSL_MODE_HDLC)) { 1548 1549 /* turn off receiver and rx DMA */ 1550 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL); 1551 usc_DmaCmd(info, DmaCmd_ResetRxChannel); 1552 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL); 1553 usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS); 1554 usc_DisableInterrupts(info, RECEIVE_DATA + RECEIVE_STATUS); 1555 1556 /* schedule BH handler to restart receiver */ 1557 info->pending_bh |= BH_RECEIVE; 1558 info->rx_rcc_underrun = true; 1559 } 1560 1561 usc_ClearIrqPendingBits( info, MISC ); 1562 usc_UnlatchMiscstatusBits( info, status ); 1563 1564} /* end of mgsl_isr_misc() */ 1565 1566/* mgsl_isr_null() 1567 * 1568 * Services undefined interrupt vectors from the 1569 * USC. (hence this function SHOULD never be called) 1570 * 1571 * Arguments: info pointer to device extension (instance data) 1572 * Return Value: None 1573 */ 1574static void mgsl_isr_null( struct mgsl_struct *info ) 1575{ 1576 1577} /* end of mgsl_isr_null() */ 1578 1579/* mgsl_isr_receive_dma() 1580 * 1581 * Service a receive DMA channel interrupt. 1582 * For this driver there are two sources of receive DMA interrupts 1583 * as identified in the Receive DMA mode Register (RDMR): 1584 * 1585 * BIT3 EOA/EOL End of List, all receive buffers in receive 1586 * buffer list have been filled (no more free buffers 1587 * available). The DMA controller has shut down. 1588 * 1589 * BIT2 EOB End of Buffer. This interrupt occurs when a receive 1590 * DMA buffer is terminated in response to completion 1591 * of a good frame or a frame with errors. The status 1592 * of the frame is stored in the buffer entry in the 1593 * list of receive buffer entries. 1594 * 1595 * Arguments: info pointer to device instance data 1596 * Return Value: None 1597 */ 1598static void mgsl_isr_receive_dma( struct mgsl_struct *info ) 1599{ 1600 u16 status; 1601 1602 /* clear interrupt pending and IUS bit for Rx DMA IRQ */ 1603 usc_OutDmaReg( info, CDIR, BIT9+BIT1 ); 1604 1605 /* Read the receive DMA status to identify interrupt type. */ 1606 /* This also clears the status bits. */ 1607 status = usc_InDmaReg( info, RDMR ); 1608 1609 if ( debug_level >= DEBUG_LEVEL_ISR ) 1610 printk("%s(%d):mgsl_isr_receive_dma(%s) status=%04X\n", 1611 __FILE__,__LINE__,info->device_name,status); 1612 1613 info->pending_bh |= BH_RECEIVE; 1614 1615 if ( status & BIT3 ) { 1616 info->rx_overflow = true; 1617 info->icount.buf_overrun++; 1618 } 1619 1620} /* end of mgsl_isr_receive_dma() */ 1621 1622/* mgsl_isr_transmit_dma() 1623 * 1624 * This function services a transmit DMA channel interrupt. 1625 * 1626 * For this driver there is one source of transmit DMA interrupts 1627 * as identified in the Transmit DMA Mode Register (TDMR): 1628 * 1629 * BIT2 EOB End of Buffer. This interrupt occurs when a 1630 * transmit DMA buffer has been emptied. 1631 * 1632 * The driver maintains enough transmit DMA buffers to hold at least 1633 * one max frame size transmit frame. When operating in a buffered 1634 * transmit mode, there may be enough transmit DMA buffers to hold at 1635 * least two or more max frame size frames. On an EOB condition, 1636 * determine if there are any queued transmit buffers and copy into 1637 * transmit DMA buffers if we have room. 1638 * 1639 * Arguments: info pointer to device instance data 1640 * Return Value: None 1641 */ 1642static void mgsl_isr_transmit_dma( struct mgsl_struct *info ) 1643{ 1644 u16 status; 1645 1646 /* clear interrupt pending and IUS bit for Tx DMA IRQ */ 1647 usc_OutDmaReg(info, CDIR, BIT8+BIT0 ); 1648 1649 /* Read the transmit DMA status to identify interrupt type. */ 1650 /* This also clears the status bits. */ 1651 1652 status = usc_InDmaReg( info, TDMR ); 1653 1654 if ( debug_level >= DEBUG_LEVEL_ISR ) 1655 printk("%s(%d):mgsl_isr_transmit_dma(%s) status=%04X\n", 1656 __FILE__,__LINE__,info->device_name,status); 1657 1658 if ( status & BIT2 ) { 1659 --info->tx_dma_buffers_used; 1660 1661 /* if there are transmit frames queued, 1662 * try to load the next one 1663 */ 1664 if ( load_next_tx_holding_buffer(info) ) { 1665 /* if call returns non-zero value, we have 1666 * at least one free tx holding buffer 1667 */ 1668 info->pending_bh |= BH_TRANSMIT; 1669 } 1670 } 1671 1672} /* end of mgsl_isr_transmit_dma() */ 1673 1674/* mgsl_interrupt() 1675 * 1676 * Interrupt service routine entry point. 1677 * 1678 * Arguments: 1679 * 1680 * irq interrupt number that caused interrupt 1681 * dev_id device ID supplied during interrupt registration 1682 * 1683 * Return Value: None 1684 */ 1685static irqreturn_t mgsl_interrupt(int dummy, void *dev_id) 1686{ 1687 struct mgsl_struct *info = dev_id; 1688 u16 UscVector; 1689 u16 DmaVector; 1690 1691 if ( debug_level >= DEBUG_LEVEL_ISR ) 1692 printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)entry.\n", 1693 __FILE__, __LINE__, info->irq_level); 1694 1695 spin_lock(&info->irq_spinlock); 1696 1697 for(;;) { 1698 /* Read the interrupt vectors from hardware. */ 1699 UscVector = usc_InReg(info, IVR) >> 9; 1700 DmaVector = usc_InDmaReg(info, DIVR); 1701 1702 if ( debug_level >= DEBUG_LEVEL_ISR ) 1703 printk("%s(%d):%s UscVector=%08X DmaVector=%08X\n", 1704 __FILE__,__LINE__,info->device_name,UscVector,DmaVector); 1705 1706 if ( !UscVector && !DmaVector ) 1707 break; 1708 1709 /* Dispatch interrupt vector */ 1710 if ( UscVector ) 1711 (*UscIsrTable[UscVector])(info); 1712 else if ( (DmaVector&(BIT10|BIT9)) == BIT10) 1713 mgsl_isr_transmit_dma(info); 1714 else 1715 mgsl_isr_receive_dma(info); 1716 1717 if ( info->isr_overflow ) { 1718 printk(KERN_ERR "%s(%d):%s isr overflow irq=%d\n", 1719 __FILE__, __LINE__, info->device_name, info->irq_level); 1720 usc_DisableMasterIrqBit(info); 1721 usc_DisableDmaInterrupts(info,DICR_MASTER); 1722 break; 1723 } 1724 } 1725 1726 /* Request bottom half processing if there's something 1727 * for it to do and the bh is not already running 1728 */ 1729 1730 if ( info->pending_bh && !info->bh_running && !info->bh_requested ) { 1731 if ( debug_level >= DEBUG_LEVEL_ISR ) 1732 printk("%s(%d):%s queueing bh task.\n", 1733 __FILE__,__LINE__,info->device_name); 1734 schedule_work(&info->task); 1735 info->bh_requested = true; 1736 } 1737 1738 spin_unlock(&info->irq_spinlock); 1739 1740 if ( debug_level >= DEBUG_LEVEL_ISR ) 1741 printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)exit.\n", 1742 __FILE__, __LINE__, info->irq_level); 1743 1744 return IRQ_HANDLED; 1745} /* end of mgsl_interrupt() */ 1746 1747/* startup() 1748 * 1749 * Initialize and start device. 1750 * 1751 * Arguments: info pointer to device instance data 1752 * Return Value: 0 if success, otherwise error code 1753 */ 1754static int startup(struct mgsl_struct * info) 1755{ 1756 int retval = 0; 1757 1758 if ( debug_level >= DEBUG_LEVEL_INFO ) 1759 printk("%s(%d):mgsl_startup(%s)\n",__FILE__,__LINE__,info->device_name); 1760 1761 if (info->port.flags & ASYNC_INITIALIZED) 1762 return 0; 1763 1764 if (!info->xmit_buf) { 1765 /* allocate a page of memory for a transmit buffer */ 1766 info->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL); 1767 if (!info->xmit_buf) { 1768 printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n", 1769 __FILE__,__LINE__,info->device_name); 1770 return -ENOMEM; 1771 } 1772 } 1773 1774 info->pending_bh = 0; 1775 1776 memset(&info->icount, 0, sizeof(info->icount)); 1777 1778 setup_timer(&info->tx_timer, mgsl_tx_timeout, (unsigned long)info); 1779 1780 /* Allocate and claim adapter resources */ 1781 retval = mgsl_claim_resources(info); 1782 1783 /* perform existence check and diagnostics */ 1784 if ( !retval ) 1785 retval = mgsl_adapter_test(info); 1786 1787 if ( retval ) { 1788 if (capable(CAP_SYS_ADMIN) && info->port.tty) 1789 set_bit(TTY_IO_ERROR, &info->port.tty->flags); 1790 mgsl_release_resources(info); 1791 return retval; 1792 } 1793 1794 /* program hardware for current parameters */ 1795 mgsl_change_params(info); 1796 1797 if (info->port.tty) 1798 clear_bit(TTY_IO_ERROR, &info->port.tty->flags); 1799 1800 info->port.flags |= ASYNC_INITIALIZED; 1801 1802 return 0; 1803 1804} /* end of startup() */ 1805 1806/* shutdown() 1807 * 1808 * Called by mgsl_close() and mgsl_hangup() to shutdown hardware 1809 * 1810 * Arguments: info pointer to device instance data 1811 * Return Value: None 1812 */ 1813static void shutdown(struct mgsl_struct * info) 1814{ 1815 unsigned long flags; 1816 1817 if (!(info->port.flags & ASYNC_INITIALIZED)) 1818 return; 1819 1820 if (debug_level >= DEBUG_LEVEL_INFO) 1821 printk("%s(%d):mgsl_shutdown(%s)\n", 1822 __FILE__,__LINE__, info->device_name ); 1823 1824 /* clear status wait queue because status changes */ 1825 /* can't happen after shutting down the hardware */ 1826 wake_up_interruptible(&info->status_event_wait_q); 1827 wake_up_interruptible(&info->event_wait_q); 1828 1829 del_timer_sync(&info->tx_timer); 1830 1831 if (info->xmit_buf) { 1832 free_page((unsigned long) info->xmit_buf); 1833 info->xmit_buf = NULL; 1834 } 1835 1836 spin_lock_irqsave(&info->irq_spinlock,flags); 1837 usc_DisableMasterIrqBit(info); 1838 usc_stop_receiver(info); 1839 usc_stop_transmitter(info); 1840 usc_DisableInterrupts(info,RECEIVE_DATA + RECEIVE_STATUS + 1841 TRANSMIT_DATA + TRANSMIT_STATUS + IO_PIN + MISC ); 1842 usc_DisableDmaInterrupts(info,DICR_MASTER + DICR_TRANSMIT + DICR_RECEIVE); 1843 1844 /* Disable DMAEN (Port 7, Bit 14) */ 1845 /* This disconnects the DMA request signal from the ISA bus */ 1846 /* on the ISA adapter. This has no effect for the PCI adapter */ 1847 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) | BIT14)); 1848 1849 /* Disable INTEN (Port 6, Bit12) */ 1850 /* This disconnects the IRQ request signal to the ISA bus */ 1851 /* on the ISA adapter. This has no effect for the PCI adapter */ 1852 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) | BIT12)); 1853 1854 if (!info->port.tty || info->port.tty->termios.c_cflag & HUPCL) { 1855 info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS); 1856 usc_set_serial_signals(info); 1857 } 1858 1859 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1860 1861 mgsl_release_resources(info); 1862 1863 if (info->port.tty) 1864 set_bit(TTY_IO_ERROR, &info->port.tty->flags); 1865 1866 info->port.flags &= ~ASYNC_INITIALIZED; 1867 1868} /* end of shutdown() */ 1869 1870static void mgsl_program_hw(struct mgsl_struct *info) 1871{ 1872 unsigned long flags; 1873 1874 spin_lock_irqsave(&info->irq_spinlock,flags); 1875 1876 usc_stop_receiver(info); 1877 usc_stop_transmitter(info); 1878 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; 1879 1880 if (info->params.mode == MGSL_MODE_HDLC || 1881 info->params.mode == MGSL_MODE_RAW || 1882 info->netcount) 1883 usc_set_sync_mode(info); 1884 else 1885 usc_set_async_mode(info); 1886 1887 usc_set_serial_signals(info); 1888 1889 info->dcd_chkcount = 0; 1890 info->cts_chkcount = 0; 1891 info->ri_chkcount = 0; 1892 info->dsr_chkcount = 0; 1893 1894 usc_EnableStatusIrqs(info,SICR_CTS+SICR_DSR+SICR_DCD+SICR_RI); 1895 usc_EnableInterrupts(info, IO_PIN); 1896 usc_get_serial_signals(info); 1897 1898 if (info->netcount || info->port.tty->termios.c_cflag & CREAD) 1899 usc_start_receiver(info); 1900 1901 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1902} 1903 1904/* Reconfigure adapter based on new parameters 1905 */ 1906static void mgsl_change_params(struct mgsl_struct *info) 1907{ 1908 unsigned cflag; 1909 int bits_per_char; 1910 1911 if (!info->port.tty) 1912 return; 1913 1914 if (debug_level >= DEBUG_LEVEL_INFO) 1915 printk("%s(%d):mgsl_change_params(%s)\n", 1916 __FILE__,__LINE__, info->device_name ); 1917 1918 cflag = info->port.tty->termios.c_cflag; 1919 1920 /* if B0 rate (hangup) specified then negate DTR and RTS */ 1921 /* otherwise assert DTR and RTS */ 1922 if (cflag & CBAUD) 1923 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; 1924 else 1925 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR); 1926 1927 /* byte size and parity */ 1928 1929 switch (cflag & CSIZE) { 1930 case CS5: info->params.data_bits = 5; break; 1931 case CS6: info->params.data_bits = 6; break; 1932 case CS7: info->params.data_bits = 7; break; 1933 case CS8: info->params.data_bits = 8; break; 1934 /* Never happens, but GCC is too dumb to figure it out */ 1935 default: info->params.data_bits = 7; break; 1936 } 1937 1938 if (cflag & CSTOPB) 1939 info->params.stop_bits = 2; 1940 else 1941 info->params.stop_bits = 1; 1942 1943 info->params.parity = ASYNC_PARITY_NONE; 1944 if (cflag & PARENB) { 1945 if (cflag & PARODD) 1946 info->params.parity = ASYNC_PARITY_ODD; 1947 else 1948 info->params.parity = ASYNC_PARITY_EVEN; 1949#ifdef CMSPAR 1950 if (cflag & CMSPAR) 1951 info->params.parity = ASYNC_PARITY_SPACE; 1952#endif 1953 } 1954 1955 /* calculate number of jiffies to transmit a full 1956 * FIFO (32 bytes) at specified data rate 1957 */ 1958 bits_per_char = info->params.data_bits + 1959 info->params.stop_bits + 1; 1960 1961 /* if port data rate is set to 460800 or less then 1962 * allow tty settings to override, otherwise keep the 1963 * current data rate. 1964 */ 1965 if (info->params.data_rate <= 460800) 1966 info->params.data_rate = tty_get_baud_rate(info->port.tty); 1967 1968 if ( info->params.data_rate ) { 1969 info->timeout = (32*HZ*bits_per_char) / 1970 info->params.data_rate; 1971 } 1972 info->timeout += HZ/50; /* Add .02 seconds of slop */ 1973 1974 if (cflag & CRTSCTS) 1975 info->port.flags |= ASYNC_CTS_FLOW; 1976 else 1977 info->port.flags &= ~ASYNC_CTS_FLOW; 1978 1979 if (cflag & CLOCAL) 1980 info->port.flags &= ~ASYNC_CHECK_CD; 1981 else 1982 info->port.flags |= ASYNC_CHECK_CD; 1983 1984 /* process tty input control flags */ 1985 1986 info->read_status_mask = RXSTATUS_OVERRUN; 1987 if (I_INPCK(info->port.tty)) 1988 info->read_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR; 1989 if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty)) 1990 info->read_status_mask |= RXSTATUS_BREAK_RECEIVED; 1991 1992 if (I_IGNPAR(info->port.tty)) 1993 info->ignore_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR; 1994 if (I_IGNBRK(info->port.tty)) { 1995 info->ignore_status_mask |= RXSTATUS_BREAK_RECEIVED; 1996 /* If ignoring parity and break indicators, ignore 1997 * overruns too. (For real raw support). 1998 */ 1999 if (I_IGNPAR(info->port.tty)) 2000 info->ignore_status_mask |= RXSTATUS_OVERRUN; 2001 } 2002 2003 mgsl_program_hw(info); 2004 2005} /* end of mgsl_change_params() */ 2006 2007/* mgsl_put_char() 2008 * 2009 * Add a character to the transmit buffer. 2010 * 2011 * Arguments: tty pointer to tty information structure 2012 * ch character to add to transmit buffer 2013 * 2014 * Return Value: None 2015 */ 2016static int mgsl_put_char(struct tty_struct *tty, unsigned char ch) 2017{ 2018 struct mgsl_struct *info = tty->driver_data; 2019 unsigned long flags; 2020 int ret = 0; 2021 2022 if (debug_level >= DEBUG_LEVEL_INFO) { 2023 printk(KERN_DEBUG "%s(%d):mgsl_put_char(%d) on %s\n", 2024 __FILE__, __LINE__, ch, info->device_name); 2025 } 2026 2027 if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char")) 2028 return 0; 2029 2030 if (!info->xmit_buf) 2031 return 0; 2032 2033 spin_lock_irqsave(&info->irq_spinlock, flags); 2034 2035 if ((info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active) { 2036 if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) { 2037 info->xmit_buf[info->xmit_head++] = ch; 2038 info->xmit_head &= SERIAL_XMIT_SIZE-1; 2039 info->xmit_cnt++; 2040 ret = 1; 2041 } 2042 } 2043 spin_unlock_irqrestore(&info->irq_spinlock, flags); 2044 return ret; 2045 2046} /* end of mgsl_put_char() */ 2047 2048/* mgsl_flush_chars() 2049 * 2050 * Enable transmitter so remaining characters in the 2051 * transmit buffer are sent. 2052 * 2053 * Arguments: tty pointer to tty information structure 2054 * Return Value: None 2055 */ 2056static void mgsl_flush_chars(struct tty_struct *tty) 2057{ 2058 struct mgsl_struct *info = tty->driver_data; 2059 unsigned long flags; 2060 2061 if ( debug_level >= DEBUG_LEVEL_INFO ) 2062 printk( "%s(%d):mgsl_flush_chars() entry on %s xmit_cnt=%d\n", 2063 __FILE__,__LINE__,info->device_name,info->xmit_cnt); 2064 2065 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_chars")) 2066 return; 2067 2068 if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped || 2069 !info->xmit_buf) 2070 return; 2071 2072 if ( debug_level >= DEBUG_LEVEL_INFO ) 2073 printk( "%s(%d):mgsl_flush_chars() entry on %s starting transmitter\n", 2074 __FILE__,__LINE__,info->device_name ); 2075 2076 spin_lock_irqsave(&info->irq_spinlock,flags); 2077 2078 if (!info->tx_active) { 2079 if ( (info->params.mode == MGSL_MODE_HDLC || 2080 info->params.mode == MGSL_MODE_RAW) && info->xmit_cnt ) { 2081 /* operating in synchronous (frame oriented) mode */ 2082 /* copy data from circular xmit_buf to */ 2083 /* transmit DMA buffer. */ 2084 mgsl_load_tx_dma_buffer(info, 2085 info->xmit_buf,info->xmit_cnt); 2086 } 2087 usc_start_transmitter(info); 2088 } 2089 2090 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2091 2092} /* end of mgsl_flush_chars() */ 2093 2094/* mgsl_write() 2095 * 2096 * Send a block of data 2097 * 2098 * Arguments: 2099 * 2100 * tty pointer to tty information structure 2101 * buf pointer to buffer containing send data 2102 * count size of send data in bytes 2103 * 2104 * Return Value: number of characters written 2105 */ 2106static int mgsl_write(struct tty_struct * tty, 2107 const unsigned char *buf, int count) 2108{ 2109 int c, ret = 0; 2110 struct mgsl_struct *info = tty->driver_data; 2111 unsigned long flags; 2112 2113 if ( debug_level >= DEBUG_LEVEL_INFO ) 2114 printk( "%s(%d):mgsl_write(%s) count=%d\n", 2115 __FILE__,__LINE__,info->device_name,count); 2116 2117 if (mgsl_paranoia_check(info, tty->name, "mgsl_write")) 2118 goto cleanup; 2119 2120 if (!info->xmit_buf) 2121 goto cleanup; 2122 2123 if ( info->params.mode == MGSL_MODE_HDLC || 2124 info->params.mode == MGSL_MODE_RAW ) { 2125 /* operating in synchronous (frame oriented) mode */ 2126 if (info->tx_active) { 2127 2128 if ( info->params.mode == MGSL_MODE_HDLC ) { 2129 ret = 0; 2130 goto cleanup; 2131 } 2132 /* transmitter is actively sending data - 2133 * if we have multiple transmit dma and 2134 * holding buffers, attempt to queue this 2135 * frame for transmission at a later time. 2136 */ 2137 if (info->tx_holding_count >= info->num_tx_holding_buffers ) { 2138 /* no tx holding buffers available */ 2139 ret = 0; 2140 goto cleanup; 2141 } 2142 2143 /* queue transmit frame request */ 2144 ret = count; 2145 save_tx_buffer_request(info,buf,count); 2146 2147 /* if we have sufficient tx dma buffers, 2148 * load the next buffered tx request 2149 */ 2150 spin_lock_irqsave(&info->irq_spinlock,flags); 2151 load_next_tx_holding_buffer(info); 2152 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2153 goto cleanup; 2154 } 2155 2156 /* if operating in HDLC LoopMode and the adapter */ 2157 /* has yet to be inserted into the loop, we can't */ 2158 /* transmit */ 2159 2160 if ( (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) && 2161 !usc_loopmode_active(info) ) 2162 { 2163 ret = 0; 2164 goto cleanup; 2165 } 2166 2167 if ( info->xmit_cnt ) { 2168 /* Send accumulated from send_char() calls */ 2169 /* as frame and wait before accepting more data. */ 2170 ret = 0; 2171 2172 /* copy data from circular xmit_buf to */ 2173 /* transmit DMA buffer. */ 2174 mgsl_load_tx_dma_buffer(info, 2175 info->xmit_buf,info->xmit_cnt); 2176 if ( debug_level >= DEBUG_LEVEL_INFO ) 2177 printk( "%s(%d):mgsl_write(%s) sync xmit_cnt flushing\n", 2178 __FILE__,__LINE__,info->device_name); 2179 } else { 2180 if ( debug_level >= DEBUG_LEVEL_INFO ) 2181 printk( "%s(%d):mgsl_write(%s) sync transmit accepted\n", 2182 __FILE__,__LINE__,info->device_name); 2183 ret = count; 2184 info->xmit_cnt = count; 2185 mgsl_load_tx_dma_buffer(info,buf,count); 2186 } 2187 } else { 2188 while (1) { 2189 spin_lock_irqsave(&info->irq_spinlock,flags); 2190 c = min_t(int, count, 2191 min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1, 2192 SERIAL_XMIT_SIZE - info->xmit_head)); 2193 if (c <= 0) { 2194 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2195 break; 2196 } 2197 memcpy(info->xmit_buf + info->xmit_head, buf, c); 2198 info->xmit_head = ((info->xmit_head + c) & 2199 (SERIAL_XMIT_SIZE-1)); 2200 info->xmit_cnt += c; 2201 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2202 buf += c; 2203 count -= c; 2204 ret += c; 2205 } 2206 } 2207 2208 if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) { 2209 spin_lock_irqsave(&info->irq_spinlock,flags); 2210 if (!info->tx_active) 2211 usc_start_transmitter(info); 2212 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2213 } 2214cleanup: 2215 if ( debug_level >= DEBUG_LEVEL_INFO ) 2216 printk( "%s(%d):mgsl_write(%s) returning=%d\n", 2217 __FILE__,__LINE__,info->device_name,ret); 2218 2219 return ret; 2220 2221} /* end of mgsl_write() */ 2222 2223/* mgsl_write_room() 2224 * 2225 * Return the count of free bytes in transmit buffer 2226 * 2227 * Arguments: tty pointer to tty info structure 2228 * Return Value: None 2229 */ 2230static int mgsl_write_room(struct tty_struct *tty) 2231{ 2232 struct mgsl_struct *info = tty->driver_data; 2233 int ret; 2234 2235 if (mgsl_paranoia_check(info, tty->name, "mgsl_write_room")) 2236 return 0; 2237 ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1; 2238 if (ret < 0) 2239 ret = 0; 2240 2241 if (debug_level >= DEBUG_LEVEL_INFO) 2242 printk("%s(%d):mgsl_write_room(%s)=%d\n", 2243 __FILE__,__LINE__, info->device_name,ret ); 2244 2245 if ( info->params.mode == MGSL_MODE_HDLC || 2246 info->params.mode == MGSL_MODE_RAW ) { 2247 /* operating in synchronous (frame oriented) mode */ 2248 if ( info->tx_active ) 2249 return 0; 2250 else 2251 return HDLC_MAX_FRAME_SIZE; 2252 } 2253 2254 return ret; 2255 2256} /* end of mgsl_write_room() */ 2257 2258/* mgsl_chars_in_buffer() 2259 * 2260 * Return the count of bytes in transmit buffer 2261 * 2262 * Arguments: tty pointer to tty info structure 2263 * Return Value: None 2264 */ 2265static int mgsl_chars_in_buffer(struct tty_struct *tty) 2266{ 2267 struct mgsl_struct *info = tty->driver_data; 2268 2269 if (debug_level >= DEBUG_LEVEL_INFO) 2270 printk("%s(%d):mgsl_chars_in_buffer(%s)\n", 2271 __FILE__,__LINE__, info->device_name ); 2272 2273 if (mgsl_paranoia_check(info, tty->name, "mgsl_chars_in_buffer")) 2274 return 0; 2275 2276 if (debug_level >= DEBUG_LEVEL_INFO) 2277 printk("%s(%d):mgsl_chars_in_buffer(%s)=%d\n", 2278 __FILE__,__LINE__, info->device_name,info->xmit_cnt ); 2279 2280 if ( info->params.mode == MGSL_MODE_HDLC || 2281 info->params.mode == MGSL_MODE_RAW ) { 2282 /* operating in synchronous (frame oriented) mode */ 2283 if ( info->tx_active ) 2284 return info->max_frame_size; 2285 else 2286 return 0; 2287 } 2288 2289 return info->xmit_cnt; 2290} /* end of mgsl_chars_in_buffer() */ 2291 2292/* mgsl_flush_buffer() 2293 * 2294 * Discard all data in the send buffer 2295 * 2296 * Arguments: tty pointer to tty info structure 2297 * Return Value: None 2298 */ 2299static void mgsl_flush_buffer(struct tty_struct *tty) 2300{ 2301 struct mgsl_struct *info = tty->driver_data; 2302 unsigned long flags; 2303 2304 if (debug_level >= DEBUG_LEVEL_INFO) 2305 printk("%s(%d):mgsl_flush_buffer(%s) entry\n", 2306 __FILE__,__LINE__, info->device_name ); 2307 2308 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_buffer")) 2309 return; 2310 2311 spin_lock_irqsave(&info->irq_spinlock,flags); 2312 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; 2313 del_timer(&info->tx_timer); 2314 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2315 2316 tty_wakeup(tty); 2317} 2318 2319/* mgsl_send_xchar() 2320 * 2321 * Send a high-priority XON/XOFF character 2322 * 2323 * Arguments: tty pointer to tty info structure 2324 * ch character to send 2325 * Return Value: None 2326 */ 2327static void mgsl_send_xchar(struct tty_struct *tty, char ch) 2328{ 2329 struct mgsl_struct *info = tty->driver_data; 2330 unsigned long flags; 2331 2332 if (debug_level >= DEBUG_LEVEL_INFO) 2333 printk("%s(%d):mgsl_send_xchar(%s,%d)\n", 2334 __FILE__,__LINE__, info->device_name, ch ); 2335 2336 if (mgsl_paranoia_check(info, tty->name, "mgsl_send_xchar")) 2337 return; 2338 2339 info->x_char = ch; 2340 if (ch) { 2341 /* Make sure transmit interrupts are on */ 2342 spin_lock_irqsave(&info->irq_spinlock,flags); 2343 if (!info->tx_enabled) 2344 usc_start_transmitter(info); 2345 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2346 } 2347} /* end of mgsl_send_xchar() */ 2348 2349/* mgsl_throttle() 2350 * 2351 * Signal remote device to throttle send data (our receive data) 2352 * 2353 * Arguments: tty pointer to tty info structure 2354 * Return Value: None 2355 */ 2356static void mgsl_throttle(struct tty_struct * tty) 2357{ 2358 struct mgsl_struct *info = tty->driver_data; 2359 unsigned long flags; 2360 2361 if (debug_level >= DEBUG_LEVEL_INFO) 2362 printk("%s(%d):mgsl_throttle(%s) entry\n", 2363 __FILE__,__LINE__, info->device_name ); 2364 2365 if (mgsl_paranoia_check(info, tty->name, "mgsl_throttle")) 2366 return; 2367 2368 if (I_IXOFF(tty)) 2369 mgsl_send_xchar(tty, STOP_CHAR(tty)); 2370 2371 if (tty->termios.c_cflag & CRTSCTS) { 2372 spin_lock_irqsave(&info->irq_spinlock,flags); 2373 info->serial_signals &= ~SerialSignal_RTS; 2374 usc_set_serial_signals(info); 2375 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2376 } 2377} /* end of mgsl_throttle() */ 2378 2379/* mgsl_unthrottle() 2380 * 2381 * Signal remote device to stop throttling send data (our receive data) 2382 * 2383 * Arguments: tty pointer to tty info structure 2384 * Return Value: None 2385 */ 2386static void mgsl_unthrottle(struct tty_struct * tty) 2387{ 2388 struct mgsl_struct *info = tty->driver_data; 2389 unsigned long flags; 2390 2391 if (debug_level >= DEBUG_LEVEL_INFO) 2392 printk("%s(%d):mgsl_unthrottle(%s) entry\n", 2393 __FILE__,__LINE__, info->device_name ); 2394 2395 if (mgsl_paranoia_check(info, tty->name, "mgsl_unthrottle")) 2396 return; 2397 2398 if (I_IXOFF(tty)) { 2399 if (info->x_char) 2400 info->x_char = 0; 2401 else 2402 mgsl_send_xchar(tty, START_CHAR(tty)); 2403 } 2404 2405 if (tty->termios.c_cflag & CRTSCTS) { 2406 spin_lock_irqsave(&info->irq_spinlock,flags); 2407 info->serial_signals |= SerialSignal_RTS; 2408 usc_set_serial_signals(info); 2409 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2410 } 2411 2412} /* end of mgsl_unthrottle() */ 2413 2414/* mgsl_get_stats() 2415 * 2416 * get the current serial parameters information 2417 * 2418 * Arguments: info pointer to device instance data 2419 * user_icount pointer to buffer to hold returned stats 2420 * 2421 * Return Value: 0 if success, otherwise error code 2422 */ 2423static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount __user *user_icount) 2424{ 2425 int err; 2426 2427 if (debug_level >= DEBUG_LEVEL_INFO) 2428 printk("%s(%d):mgsl_get_params(%s)\n", 2429 __FILE__,__LINE__, info->device_name); 2430 2431 if (!user_icount) { 2432 memset(&info->icount, 0, sizeof(info->icount)); 2433 } else { 2434 mutex_lock(&info->port.mutex); 2435 COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount)); 2436 mutex_unlock(&info->port.mutex); 2437 if (err) 2438 return -EFAULT; 2439 } 2440 2441 return 0; 2442 2443} /* end of mgsl_get_stats() */ 2444 2445/* mgsl_get_params() 2446 * 2447 * get the current serial parameters information 2448 * 2449 * Arguments: info pointer to device instance data 2450 * user_params pointer to buffer to hold returned params 2451 * 2452 * Return Value: 0 if success, otherwise error code 2453 */ 2454static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params) 2455{ 2456 int err; 2457 if (debug_level >= DEBUG_LEVEL_INFO) 2458 printk("%s(%d):mgsl_get_params(%s)\n", 2459 __FILE__,__LINE__, info->device_name); 2460 2461 mutex_lock(&info->port.mutex); 2462 COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS)); 2463 mutex_unlock(&info->port.mutex); 2464 if (err) { 2465 if ( debug_level >= DEBUG_LEVEL_INFO ) 2466 printk( "%s(%d):mgsl_get_params(%s) user buffer copy failed\n", 2467 __FILE__,__LINE__,info->device_name); 2468 return -EFAULT; 2469 } 2470 2471 return 0; 2472 2473} /* end of mgsl_get_params() */ 2474 2475/* mgsl_set_params() 2476 * 2477 * set the serial parameters 2478 * 2479 * Arguments: 2480 * 2481 * info pointer to device instance data 2482 * new_params user buffer containing new serial params 2483 * 2484 * Return Value: 0 if success, otherwise error code 2485 */ 2486static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params) 2487{ 2488 unsigned long flags; 2489 MGSL_PARAMS tmp_params; 2490 int err; 2491 2492 if (debug_level >= DEBUG_LEVEL_INFO) 2493 printk("%s(%d):mgsl_set_params %s\n", __FILE__,__LINE__, 2494 info->device_name ); 2495 COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS)); 2496 if (err) { 2497 if ( debug_level >= DEBUG_LEVEL_INFO ) 2498 printk( "%s(%d):mgsl_set_params(%s) user buffer copy failed\n", 2499 __FILE__,__LINE__,info->device_name); 2500 return -EFAULT; 2501 } 2502 2503 mutex_lock(&info->port.mutex); 2504 spin_lock_irqsave(&info->irq_spinlock,flags); 2505 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS)); 2506 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2507 2508 mgsl_change_params(info); 2509 mutex_unlock(&info->port.mutex); 2510 2511 return 0; 2512 2513} /* end of mgsl_set_params() */ 2514 2515/* mgsl_get_txidle() 2516 * 2517 * get the current transmit idle mode 2518 * 2519 * Arguments: info pointer to device instance data 2520 * idle_mode pointer to buffer to hold returned idle mode 2521 * 2522 * Return Value: 0 if success, otherwise error code 2523 */ 2524static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode) 2525{ 2526 int err; 2527 2528 if (debug_level >= DEBUG_LEVEL_INFO) 2529 printk("%s(%d):mgsl_get_txidle(%s)=%d\n", 2530 __FILE__,__LINE__, info->device_name, info->idle_mode); 2531 2532 COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int)); 2533 if (err) { 2534 if ( debug_level >= DEBUG_LEVEL_INFO ) 2535 printk( "%s(%d):mgsl_get_txidle(%s) user buffer copy failed\n", 2536 __FILE__,__LINE__,info->device_name); 2537 return -EFAULT; 2538 } 2539 2540 return 0; 2541 2542} /* end of mgsl_get_txidle() */ 2543 2544/* mgsl_set_txidle() service ioctl to set transmit idle mode 2545 * 2546 * Arguments: info pointer to device instance data 2547 * idle_mode new idle mode 2548 * 2549 * Return Value: 0 if success, otherwise error code 2550 */ 2551static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode) 2552{ 2553 unsigned long flags; 2554 2555 if (debug_level >= DEBUG_LEVEL_INFO) 2556 printk("%s(%d):mgsl_set_txidle(%s,%d)\n", __FILE__,__LINE__, 2557 info->device_name, idle_mode ); 2558 2559 spin_lock_irqsave(&info->irq_spinlock,flags); 2560 info->idle_mode = idle_mode; 2561 usc_set_txidle( info ); 2562 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2563 return 0; 2564 2565} /* end of mgsl_set_txidle() */ 2566 2567/* mgsl_txenable() 2568 * 2569 * enable or disable the transmitter 2570 * 2571 * Arguments: 2572 * 2573 * info pointer to device instance data 2574 * enable 1 = enable, 0 = disable 2575 * 2576 * Return Value: 0 if success, otherwise error code 2577 */ 2578static int mgsl_txenable(struct mgsl_struct * info, int enable) 2579{ 2580 unsigned long flags; 2581 2582 if (debug_level >= DEBUG_LEVEL_INFO) 2583 printk("%s(%d):mgsl_txenable(%s,%d)\n", __FILE__,__LINE__, 2584 info->device_name, enable); 2585 2586 spin_lock_irqsave(&info->irq_spinlock,flags); 2587 if ( enable ) { 2588 if ( !info->tx_enabled ) { 2589 2590 usc_start_transmitter(info); 2591 /*-------------------------------------------------- 2592 * if HDLC/SDLC Loop mode, attempt to insert the 2593 * station in the 'loop' by setting CMR:13. Upon 2594 * receipt of the next GoAhead (RxAbort) sequence, 2595 * the OnLoop indicator (CCSR:7) should go active 2596 * to indicate that we are on the loop 2597 *--------------------------------------------------*/ 2598 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE ) 2599 usc_loopmode_insert_request( info ); 2600 } 2601 } else { 2602 if ( info->tx_enabled ) 2603 usc_stop_transmitter(info); 2604 } 2605 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2606 return 0; 2607 2608} /* end of mgsl_txenable() */ 2609 2610/* mgsl_txabort() abort send HDLC frame 2611 * 2612 * Arguments: info pointer to device instance data 2613 * Return Value: 0 if success, otherwise error code 2614 */ 2615static int mgsl_txabort(struct mgsl_struct * info) 2616{ 2617 unsigned long flags; 2618 2619 if (debug_level >= DEBUG_LEVEL_INFO) 2620 printk("%s(%d):mgsl_txabort(%s)\n", __FILE__,__LINE__, 2621 info->device_name); 2622 2623 spin_lock_irqsave(&info->irq_spinlock,flags); 2624 if ( info->tx_active && info->params.mode == MGSL_MODE_HDLC ) 2625 { 2626 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE ) 2627 usc_loopmode_cancel_transmit( info ); 2628 else 2629 usc_TCmd(info,TCmd_SendAbort); 2630 } 2631 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2632 return 0; 2633 2634} /* end of mgsl_txabort() */ 2635 2636/* mgsl_rxenable() enable or disable the receiver 2637 * 2638 * Arguments: info pointer to device instance data 2639 * enable 1 = enable, 0 = disable 2640 * Return Value: 0 if success, otherwise error code 2641 */ 2642static int mgsl_rxenable(struct mgsl_struct * info, int enable) 2643{ 2644 unsigned long flags; 2645 2646 if (debug_level >= DEBUG_LEVEL_INFO) 2647 printk("%s(%d):mgsl_rxenable(%s,%d)\n", __FILE__,__LINE__, 2648 info->device_name, enable); 2649 2650 spin_lock_irqsave(&info->irq_spinlock,flags); 2651 if ( enable ) { 2652 if ( !info->rx_enabled ) 2653 usc_start_receiver(info); 2654 } else { 2655 if ( info->rx_enabled ) 2656 usc_stop_receiver(info); 2657 } 2658 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2659 return 0; 2660 2661} /* end of mgsl_rxenable() */ 2662 2663/* mgsl_wait_event() wait for specified event to occur 2664 * 2665 * Arguments: info pointer to device instance data 2666 * mask pointer to bitmask of events to wait for 2667 * Return Value: 0 if successful and bit mask updated with 2668 * of events triggerred, 2669 * otherwise error code 2670 */ 2671static int mgsl_wait_event(struct mgsl_struct * info, int __user * mask_ptr) 2672{ 2673 unsigned long flags; 2674 int s; 2675 int rc=0; 2676 struct mgsl_icount cprev, cnow; 2677 int events; 2678 int mask; 2679 struct _input_signal_events oldsigs, newsigs; 2680 DECLARE_WAITQUEUE(wait, current); 2681 2682 COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int)); 2683 if (rc) { 2684 return -EFAULT; 2685 } 2686 2687 if (debug_level >= DEBUG_LEVEL_INFO) 2688 printk("%s(%d):mgsl_wait_event(%s,%d)\n", __FILE__,__LINE__, 2689 info->device_name, mask); 2690 2691 spin_lock_irqsave(&info->irq_spinlock,flags); 2692 2693 /* return immediately if state matches requested events */ 2694 usc_get_serial_signals(info); 2695 s = info->serial_signals; 2696 events = mask & 2697 ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) + 2698 ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) + 2699 ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) + 2700 ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) ); 2701 if (events) { 2702 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2703 goto exit; 2704 } 2705 2706 /* save current irq counts */ 2707 cprev = info->icount; 2708 oldsigs = info->input_signal_events; 2709 2710 /* enable hunt and idle irqs if needed */ 2711 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) { 2712 u16 oldreg = usc_InReg(info,RICR); 2713 u16 newreg = oldreg + 2714 (mask & MgslEvent_ExitHuntMode ? RXSTATUS_EXITED_HUNT:0) + 2715 (mask & MgslEvent_IdleReceived ? RXSTATUS_IDLE_RECEIVED:0); 2716 if (oldreg != newreg) 2717 usc_OutReg(info, RICR, newreg); 2718 } 2719 2720 set_current_state(TASK_INTERRUPTIBLE); 2721 add_wait_queue(&info->event_wait_q, &wait); 2722 2723 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2724 2725 2726 for(;;) { 2727 schedule(); 2728 if (signal_pending(current)) { 2729 rc = -ERESTARTSYS; 2730 break; 2731 } 2732 2733 /* get current irq counts */ 2734 spin_lock_irqsave(&info->irq_spinlock,flags); 2735 cnow = info->icount; 2736 newsigs = info->input_signal_events; 2737 set_current_state(TASK_INTERRUPTIBLE); 2738 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2739 2740 /* if no change, wait aborted for some reason */ 2741 if (newsigs.dsr_up == oldsigs.dsr_up && 2742 newsigs.dsr_down == oldsigs.dsr_down && 2743 newsigs.dcd_up == oldsigs.dcd_up && 2744 newsigs.dcd_down == oldsigs.dcd_down && 2745 newsigs.cts_up == oldsigs.cts_up && 2746 newsigs.cts_down == oldsigs.cts_down && 2747 newsigs.ri_up == oldsigs.ri_up && 2748 newsigs.ri_down == oldsigs.ri_down && 2749 cnow.exithunt == cprev.exithunt && 2750 cnow.rxidle == cprev.rxidle) { 2751 rc = -EIO; 2752 break; 2753 } 2754 2755 events = mask & 2756 ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) + 2757 (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) + 2758 (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) + 2759 (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) + 2760 (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) + 2761 (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) + 2762 (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) + 2763 (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) + 2764 (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) + 2765 (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) ); 2766 if (events) 2767 break; 2768 2769 cprev = cnow; 2770 oldsigs = newsigs; 2771 } 2772 2773 remove_wait_queue(&info->event_wait_q, &wait); 2774 set_current_state(TASK_RUNNING); 2775 2776 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) { 2777 spin_lock_irqsave(&info->irq_spinlock,flags); 2778 if (!waitqueue_active(&info->event_wait_q)) { 2779 /* disable enable exit hunt mode/idle rcvd IRQs */ 2780 usc_OutReg(info, RICR, usc_InReg(info,RICR) & 2781 ~(RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED)); 2782 } 2783 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2784 } 2785exit: 2786 if ( rc == 0 ) 2787 PUT_USER(rc, events, mask_ptr); 2788 2789 return rc; 2790 2791} /* end of mgsl_wait_event() */ 2792 2793static int modem_input_wait(struct mgsl_struct *info,int arg) 2794{ 2795 unsigned long flags; 2796 int rc; 2797 struct mgsl_icount cprev, cnow; 2798 DECLARE_WAITQUEUE(wait, current); 2799 2800 /* save current irq counts */ 2801 spin_lock_irqsave(&info->irq_spinlock,flags); 2802 cprev = info->icount; 2803 add_wait_queue(&info->status_event_wait_q, &wait); 2804 set_current_state(TASK_INTERRUPTIBLE); 2805 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2806 2807 for(;;) { 2808 schedule(); 2809 if (signal_pending(current)) { 2810 rc = -ERESTARTSYS; 2811 break; 2812 } 2813 2814 /* get new irq counts */ 2815 spin_lock_irqsave(&info->irq_spinlock,flags); 2816 cnow = info->icount; 2817 set_current_state(TASK_INTERRUPTIBLE); 2818 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2819 2820 /* if no change, wait aborted for some reason */ 2821 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && 2822 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) { 2823 rc = -EIO; 2824 break; 2825 } 2826 2827 /* check for change in caller specified modem input */ 2828 if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) || 2829 (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) || 2830 (arg & TIOCM_CD && cnow.dcd != cprev.dcd) || 2831 (arg & TIOCM_CTS && cnow.cts != cprev.cts)) { 2832 rc = 0; 2833 break; 2834 } 2835 2836 cprev = cnow; 2837 } 2838 remove_wait_queue(&info->status_event_wait_q, &wait); 2839 set_current_state(TASK_RUNNING); 2840 return rc; 2841} 2842 2843/* return the state of the serial control and status signals 2844 */ 2845static int tiocmget(struct tty_struct *tty) 2846{ 2847 struct mgsl_struct *info = tty->driver_data; 2848 unsigned int result; 2849 unsigned long flags; 2850 2851 spin_lock_irqsave(&info->irq_spinlock,flags); 2852 usc_get_serial_signals(info); 2853 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2854 2855 result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) + 2856 ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) + 2857 ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) + 2858 ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) + 2859 ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) + 2860 ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0); 2861 2862 if (debug_level >= DEBUG_LEVEL_INFO) 2863 printk("%s(%d):%s tiocmget() value=%08X\n", 2864 __FILE__,__LINE__, info->device_name, result ); 2865 return result; 2866} 2867 2868/* set modem control signals (DTR/RTS) 2869 */ 2870static int tiocmset(struct tty_struct *tty, 2871 unsigned int set, unsigned int clear) 2872{ 2873 struct mgsl_struct *info = tty->driver_data; 2874 unsigned long flags; 2875 2876 if (debug_level >= DEBUG_LEVEL_INFO) 2877 printk("%s(%d):%s tiocmset(%x,%x)\n", 2878 __FILE__,__LINE__,info->device_name, set, clear); 2879 2880 if (set & TIOCM_RTS) 2881 info->serial_signals |= SerialSignal_RTS; 2882 if (set & TIOCM_DTR) 2883 info->serial_signals |= SerialSignal_DTR; 2884 if (clear & TIOCM_RTS) 2885 info->serial_signals &= ~SerialSignal_RTS; 2886 if (clear & TIOCM_DTR) 2887 info->serial_signals &= ~SerialSignal_DTR; 2888 2889 spin_lock_irqsave(&info->irq_spinlock,flags); 2890 usc_set_serial_signals(info); 2891 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2892 2893 return 0; 2894} 2895 2896/* mgsl_break() Set or clear transmit break condition 2897 * 2898 * Arguments: tty pointer to tty instance data 2899 * break_state -1=set break condition, 0=clear 2900 * Return Value: error code 2901 */ 2902static int mgsl_break(struct tty_struct *tty, int break_state) 2903{ 2904 struct mgsl_struct * info = tty->driver_data; 2905 unsigned long flags; 2906 2907 if (debug_level >= DEBUG_LEVEL_INFO) 2908 printk("%s(%d):mgsl_break(%s,%d)\n", 2909 __FILE__,__LINE__, info->device_name, break_state); 2910 2911 if (mgsl_paranoia_check(info, tty->name, "mgsl_break")) 2912 return -EINVAL; 2913 2914 spin_lock_irqsave(&info->irq_spinlock,flags); 2915 if (break_state == -1) 2916 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) | BIT7)); 2917 else 2918 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) & ~BIT7)); 2919 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2920 return 0; 2921 2922} /* end of mgsl_break() */ 2923 2924/* 2925 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) 2926 * Return: write counters to the user passed counter struct 2927 * NB: both 1->0 and 0->1 transitions are counted except for 2928 * RI where only 0->1 is counted. 2929 */ 2930static int msgl_get_icount(struct tty_struct *tty, 2931 struct serial_icounter_struct *icount) 2932 2933{ 2934 struct mgsl_struct * info = tty->driver_data; 2935 struct mgsl_icount cnow; /* kernel counter temps */ 2936 unsigned long flags; 2937 2938 spin_lock_irqsave(&info->irq_spinlock,flags); 2939 cnow = info->icount; 2940 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2941 2942 icount->cts = cnow.cts; 2943 icount->dsr = cnow.dsr; 2944 icount->rng = cnow.rng; 2945 icount->dcd = cnow.dcd; 2946 icount->rx = cnow.rx; 2947 icount->tx = cnow.tx; 2948 icount->frame = cnow.frame; 2949 icount->overrun = cnow.overrun; 2950 icount->parity = cnow.parity; 2951 icount->brk = cnow.brk; 2952 icount->buf_overrun = cnow.buf_overrun; 2953 return 0; 2954} 2955 2956/* mgsl_ioctl() Service an IOCTL request 2957 * 2958 * Arguments: 2959 * 2960 * tty pointer to tty instance data 2961 * cmd IOCTL command code 2962 * arg command argument/context 2963 * 2964 * Return Value: 0 if success, otherwise error code 2965 */ 2966static int mgsl_ioctl(struct tty_struct *tty, 2967 unsigned int cmd, unsigned long arg) 2968{ 2969 struct mgsl_struct * info = tty->driver_data; 2970 2971 if (debug_level >= DEBUG_LEVEL_INFO) 2972 printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__, 2973 info->device_name, cmd ); 2974 2975 if (mgsl_paranoia_check(info, tty->name, "mgsl_ioctl")) 2976 return -ENODEV; 2977 2978 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && 2979 (cmd != TIOCMIWAIT)) { 2980 if (tty->flags & (1 << TTY_IO_ERROR)) 2981 return -EIO; 2982 } 2983 2984 return mgsl_ioctl_common(info, cmd, arg); 2985} 2986 2987static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg) 2988{ 2989 void __user *argp = (void __user *)arg; 2990 2991 switch (cmd) { 2992 case MGSL_IOCGPARAMS: 2993 return mgsl_get_params(info, argp); 2994 case MGSL_IOCSPARAMS: 2995 return mgsl_set_params(info, argp); 2996 case MGSL_IOCGTXIDLE: 2997 return mgsl_get_txidle(info, argp); 2998 case MGSL_IOCSTXIDLE: 2999 return mgsl_set_txidle(info,(int)arg); 3000 case MGSL_IOCTXENABLE: 3001 return mgsl_txenable(info,(int)arg); 3002 case MGSL_IOCRXENABLE: 3003 return mgsl_rxenable(info,(int)arg); 3004 case MGSL_IOCTXABORT: 3005 return mgsl_txabort(info); 3006 case MGSL_IOCGSTATS: 3007 return mgsl_get_stats(info, argp); 3008 case MGSL_IOCWAITEVENT: 3009 return mgsl_wait_event(info, argp); 3010 case MGSL_IOCLOOPTXDONE: 3011 return mgsl_loopmode_send_done(info); 3012 /* Wait for modem input (DCD,RI,DSR,CTS) change 3013 * as specified by mask in arg (TIOCM_RNG/DSR/CD/CTS) 3014 */ 3015 case TIOCMIWAIT: 3016 return modem_input_wait(info,(int)arg); 3017 3018 default: 3019 return -ENOIOCTLCMD; 3020 } 3021 return 0; 3022} 3023 3024/* mgsl_set_termios() 3025 * 3026 * Set new termios settings 3027 * 3028 * Arguments: 3029 * 3030 * tty pointer to tty structure 3031 * termios pointer to buffer to hold returned old termios 3032 * 3033 * Return Value: None 3034 */ 3035static void mgsl_set_termios(struct tty_struct *tty, struct ktermios *old_termios) 3036{ 3037 struct mgsl_struct *info = tty->driver_data; 3038 unsigned long flags; 3039 3040 if (debug_level >= DEBUG_LEVEL_INFO) 3041 printk("%s(%d):mgsl_set_termios %s\n", __FILE__,__LINE__, 3042 tty->driver->name ); 3043 3044 mgsl_change_params(info); 3045 3046 /* Handle transition to B0 status */ 3047 if (old_termios->c_cflag & CBAUD && 3048 !(tty->termios.c_cflag & CBAUD)) { 3049 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR); 3050 spin_lock_irqsave(&info->irq_spinlock,flags); 3051 usc_set_serial_signals(info); 3052 spin_unlock_irqrestore(&info->irq_spinlock,flags); 3053 } 3054 3055 /* Handle transition away from B0 status */ 3056 if (!(old_termios->c_cflag & CBAUD) && 3057 tty->termios.c_cflag & CBAUD) { 3058 info->serial_signals |= SerialSignal_DTR; 3059 if (!(tty->termios.c_cflag & CRTSCTS) || 3060 !test_bit(TTY_THROTTLED, &tty->flags)) { 3061 info->serial_signals |= SerialSignal_RTS; 3062 } 3063 spin_lock_irqsave(&info->irq_spinlock,flags); 3064 usc_set_serial_signals(info); 3065 spin_unlock_irqrestore(&info->irq_spinlock,flags); 3066 } 3067 3068 /* Handle turning off CRTSCTS */ 3069 if (old_termios->c_cflag & CRTSCTS && 3070 !(tty->termios.c_cflag & CRTSCTS)) { 3071 tty->hw_stopped = 0; 3072 mgsl_start(tty); 3073 } 3074 3075} /* end of mgsl_set_termios() */ 3076 3077/* mgsl_close() 3078 * 3079 * Called when port is closed. Wait for remaining data to be 3080 * sent. Disable port and free resources. 3081 * 3082 * Arguments: 3083 * 3084 * tty pointer to open tty structure 3085 * filp pointer to open file object 3086 * 3087 * Return Value: None 3088 */ 3089static void mgsl_close(struct tty_struct *tty, struct file * filp) 3090{ 3091 struct mgsl_struct * info = tty->driver_data; 3092 3093 if (mgsl_paranoia_check(info, tty->name, "mgsl_close")) 3094 return; 3095 3096 if (debug_level >= DEBUG_LEVEL_INFO) 3097 printk("%s(%d):mgsl_close(%s) entry, count=%d\n", 3098 __FILE__,__LINE__, info->device_name, info->port.count); 3099 3100 if (tty_port_close_start(&info->port, tty, filp) == 0) 3101 goto cleanup; 3102 3103 mutex_lock(&info->port.mutex); 3104 if (info->port.flags & ASYNC_INITIALIZED) 3105 mgsl_wait_until_sent(tty, info->timeout); 3106 mgsl_flush_buffer(tty); 3107 tty_ldisc_flush(tty); 3108 shutdown(info); 3109 mutex_unlock(&info->port.mutex); 3110 3111 tty_port_close_end(&info->port, tty); 3112 info->port.tty = NULL; 3113cleanup: 3114 if (debug_level >= DEBUG_LEVEL_INFO) 3115 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__, 3116 tty->driver->name, info->port.count); 3117 3118} /* end of mgsl_close() */ 3119 3120/* mgsl_wait_until_sent() 3121 * 3122 * Wait until the transmitter is empty. 3123 * 3124 * Arguments: 3125 * 3126 * tty pointer to tty info structure 3127 * timeout time to wait for send completion 3128 * 3129 * Return Value: None 3130 */ 3131static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout) 3132{ 3133 struct mgsl_struct * info = tty->driver_data; 3134 unsigned long orig_jiffies, char_time; 3135 3136 if (!info ) 3137 return; 3138 3139 if (debug_level >= DEBUG_LEVEL_INFO) 3140 printk("%s(%d):mgsl_wait_until_sent(%s) entry\n", 3141 __FILE__,__LINE__, info->device_name ); 3142 3143 if (mgsl_paranoia_check(info, tty->name, "mgsl_wait_until_sent")) 3144 return; 3145 3146 if (!(info->port.flags & ASYNC_INITIALIZED)) 3147 goto exit; 3148 3149 orig_jiffies = jiffies; 3150 3151 /* Set check interval to 1/5 of estimated time to 3152 * send a character, and make it at least 1. The check 3153 * interval should also be less than the timeout. 3154 * Note: use tight timings here to satisfy the NIST-PCTS. 3155 */ 3156 3157 if ( info->params.data_rate ) { 3158 char_time = info->timeout/(32 * 5); 3159 if (!char_time) 3160 char_time++; 3161 } else 3162 char_time = 1; 3163 3164 if (timeout) 3165 char_time = min_t(unsigned long, char_time, timeout); 3166 3167 if ( info->params.mode == MGSL_MODE_HDLC || 3168 info->params.mode == MGSL_MODE_RAW ) { 3169 while (info->tx_active) { 3170 msleep_interruptible(jiffies_to_msecs(char_time)); 3171 if (signal_pending(current)) 3172 break; 3173 if (timeout && time_after(jiffies, orig_jiffies + timeout)) 3174 break; 3175 } 3176 } else { 3177 while (!(usc_InReg(info,TCSR) & TXSTATUS_ALL_SENT) && 3178 info->tx_enabled) { 3179 msleep_interruptible(jiffies_to_msecs(char_time)); 3180 if (signal_pending(current)) 3181 break; 3182 if (timeout && time_after(jiffies, orig_jiffies + timeout)) 3183 break; 3184 } 3185 } 3186 3187exit: 3188 if (debug_level >= DEBUG_LEVEL_INFO) 3189 printk("%s(%d):mgsl_wait_until_sent(%s) exit\n", 3190 __FILE__,__LINE__, info->device_name ); 3191 3192} /* end of mgsl_wait_until_sent() */ 3193 3194/* mgsl_hangup() 3195 * 3196 * Called by tty_hangup() when a hangup is signaled. 3197 * This is the same as to closing all open files for the port. 3198 * 3199 * Arguments: tty pointer to associated tty object 3200 * Return Value: None 3201 */ 3202static void mgsl_hangup(struct tty_struct *tty) 3203{ 3204 struct mgsl_struct * info = tty->driver_data; 3205 3206 if (debug_level >= DEBUG_LEVEL_INFO) 3207 printk("%s(%d):mgsl_hangup(%s)\n", 3208 __FILE__,__LINE__, info->device_name ); 3209 3210 if (mgsl_paranoia_check(info, tty->name, "mgsl_hangup")) 3211 return; 3212 3213 mgsl_flush_buffer(tty); 3214 shutdown(info); 3215 3216 info->port.count = 0; 3217 info->port.flags &= ~ASYNC_NORMAL_ACTIVE; 3218 info->port.tty = NULL; 3219 3220 wake_up_interruptible(&info->port.open_wait); 3221 3222} /* end of mgsl_hangup() */ 3223 3224/* 3225 * carrier_raised() 3226 * 3227 * Return true if carrier is raised 3228 */ 3229 3230static int carrier_raised(struct tty_port *port) 3231{ 3232 unsigned long flags; 3233 struct mgsl_struct *info = container_of(port, struct mgsl_struct, port); 3234 3235 spin_lock_irqsave(&info->irq_spinlock, flags); 3236 usc_get_serial_signals(info); 3237 spin_unlock_irqrestore(&info->irq_spinlock, flags); 3238 return (info->serial_signals & SerialSignal_DCD) ? 1 : 0; 3239} 3240 3241static void dtr_rts(struct tty_port *port, int on) 3242{ 3243 struct mgsl_struct *info = container_of(port, struct mgsl_struct, port); 3244 unsigned long flags; 3245 3246 spin_lock_irqsave(&info->irq_spinlock,flags); 3247 if (on) 3248 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; 3249 else 3250 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR); 3251 usc_set_serial_signals(info); 3252 spin_unlock_irqrestore(&info->irq_spinlock,flags); 3253} 3254 3255 3256/* block_til_ready() 3257 * 3258 * Block the current process until the specified port 3259 * is ready to be opened. 3260 * 3261 * Arguments: 3262 * 3263 * tty pointer to tty info structure 3264 * filp pointer to open file object 3265 * info pointer to device instance data 3266 * 3267 * Return Value: 0 if success, otherwise error code 3268 */ 3269static int block_til_ready(struct tty_struct *tty, struct file * filp, 3270 struct mgsl_struct *info) 3271{ 3272 DECLARE_WAITQUEUE(wait, current); 3273 int retval; 3274 bool do_clocal = false; 3275 bool extra_count = false; 3276 unsigned long flags; 3277 int dcd; 3278 struct tty_port *port = &info->port; 3279 3280 if (debug_level >= DEBUG_LEVEL_INFO) 3281 printk("%s(%d):block_til_ready on %s\n", 3282 __FILE__,__LINE__, tty->driver->name ); 3283 3284 if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){ 3285 /* nonblock mode is set or port is not enabled */ 3286 port->flags |= ASYNC_NORMAL_ACTIVE; 3287 return 0; 3288 } 3289 3290 if (tty->termios.c_cflag & CLOCAL) 3291 do_clocal = true; 3292 3293 /* Wait for carrier detect and the line to become 3294 * free (i.e., not in use by the callout). While we are in 3295 * this loop, port->count is dropped by one, so that 3296 * mgsl_close() knows when to free things. We restore it upon 3297 * exit, either normal or abnormal. 3298 */ 3299 3300 retval = 0; 3301 add_wait_queue(&port->open_wait, &wait); 3302 3303 if (debug_level >= DEBUG_LEVEL_INFO) 3304 printk("%s(%d):block_til_ready before block on %s count=%d\n", 3305 __FILE__,__LINE__, tty->driver->name, port->count ); 3306 3307 spin_lock_irqsave(&info->irq_spinlock, flags); 3308 if (!tty_hung_up_p(filp)) { 3309 extra_count = true; 3310 port->count--; 3311 } 3312 spin_unlock_irqrestore(&info->irq_spinlock, flags); 3313 port->blocked_open++; 3314 3315 while (1) { 3316 if (tty->termios.c_cflag & CBAUD) 3317 tty_port_raise_dtr_rts(port); 3318 3319 set_current_state(TASK_INTERRUPTIBLE); 3320 3321 if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)){ 3322 retval = (port->flags & ASYNC_HUP_NOTIFY) ? 3323 -EAGAIN : -ERESTARTSYS; 3324 break; 3325 } 3326 3327 dcd = tty_port_carrier_raised(&info->port); 3328 3329 if (!(port->flags & ASYNC_CLOSING) && (do_clocal || dcd)) 3330 break; 3331 3332 if (signal_pending(current)) { 3333 retval = -ERESTARTSYS; 3334 break; 3335 } 3336 3337 if (debug_level >= DEBUG_LEVEL_INFO) 3338 printk("%s(%d):block_til_ready blocking on %s count=%d\n", 3339 __FILE__,__LINE__, tty->driver->name, port->count ); 3340 3341 tty_unlock(tty); 3342 schedule(); 3343 tty_lock(tty); 3344 } 3345 3346 set_current_state(TASK_RUNNING); 3347 remove_wait_queue(&port->open_wait, &wait); 3348 3349 /* FIXME: Racy on hangup during close wait */ 3350 if (extra_count) 3351 port->count++; 3352 port->blocked_open--; 3353 3354 if (debug_level >= DEBUG_LEVEL_INFO) 3355 printk("%s(%d):block_til_ready after blocking on %s count=%d\n", 3356 __FILE__,__LINE__, tty->driver->name, port->count ); 3357 3358 if (!retval) 3359 port->flags |= ASYNC_NORMAL_ACTIVE; 3360 3361 return retval; 3362 3363} /* end of block_til_ready() */ 3364 3365static int mgsl_install(struct tty_driver *driver, struct tty_struct *tty) 3366{ 3367 struct mgsl_struct *info; 3368 int line = tty->index; 3369 3370 /* verify range of specified line number */ 3371 if (line >= mgsl_device_count) { 3372 printk("%s(%d):mgsl_open with invalid line #%d.\n", 3373 __FILE__, __LINE__, line); 3374 return -ENODEV; 3375 } 3376 3377 /* find the info structure for the specified line */ 3378 info = mgsl_device_list; 3379 while (info && info->line != line) 3380 info = info->next_device; 3381 if (mgsl_paranoia_check(info, tty->name, "mgsl_open")) 3382 return -ENODEV; 3383 tty->driver_data = info; 3384 3385 return tty_port_install(&info->port, driver, tty); 3386} 3387 3388/* mgsl_open() 3389 * 3390 * Called when a port is opened. Init and enable port. 3391 * Perform serial-specific initialization for the tty structure. 3392 * 3393 * Arguments: tty pointer to tty info structure 3394 * filp associated file pointer 3395 * 3396 * Return Value: 0 if success, otherwise error code 3397 */ 3398static int mgsl_open(struct tty_struct *tty, struct file * filp) 3399{ 3400 struct mgsl_struct *info = tty->driver_data; 3401 unsigned long flags; 3402 int retval; 3403 3404 info->port.tty = tty; 3405 3406 if (debug_level >= DEBUG_LEVEL_INFO) 3407 printk("%s(%d):mgsl_open(%s), old ref count = %d\n", 3408 __FILE__,__LINE__,tty->driver->name, info->port.count); 3409 3410 /* If port is closing, signal caller to try again */ 3411 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){ 3412 if (info->port.flags & ASYNC_CLOSING) 3413 interruptible_sleep_on(&info->port.close_wait); 3414 retval = ((info->port.flags & ASYNC_HUP_NOTIFY) ? 3415 -EAGAIN : -ERESTARTSYS); 3416 goto cleanup; 3417 } 3418 3419 info->port.tty->low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0; 3420 3421 spin_lock_irqsave(&info->netlock, flags); 3422 if (info->netcount) { 3423 retval = -EBUSY; 3424 spin_unlock_irqrestore(&info->netlock, flags); 3425 goto cleanup; 3426 } 3427 info->port.count++; 3428 spin_unlock_irqrestore(&info->netlock, flags); 3429 3430 if (info->port.count == 1) { 3431 /* 1st open on this device, init hardware */ 3432 retval = startup(info); 3433 if (retval < 0) 3434 goto cleanup; 3435 } 3436 3437 retval = block_til_ready(tty, filp, info); 3438 if (retval) { 3439 if (debug_level >= DEBUG_LEVEL_INFO) 3440 printk("%s(%d):block_til_ready(%s) returned %d\n", 3441 __FILE__,__LINE__, info->device_name, retval); 3442 goto cleanup; 3443 } 3444 3445 if (debug_level >= DEBUG_LEVEL_INFO) 3446 printk("%s(%d):mgsl_open(%s) success\n", 3447 __FILE__,__LINE__, info->device_name); 3448 retval = 0; 3449 3450cleanup: 3451 if (retval) { 3452 if (tty->count == 1) 3453 info->port.tty = NULL; /* tty layer will release tty struct */ 3454 if(info->port.count) 3455 info->port.count--; 3456 } 3457 3458 return retval; 3459 3460} /* end of mgsl_open() */ 3461 3462/* 3463 * /proc fs routines.... 3464 */ 3465 3466static inline void line_info(struct seq_file *m, struct mgsl_struct *info) 3467{ 3468 char stat_buf[30]; 3469 unsigned long flags; 3470 3471 if (info->bus_type == MGSL_BUS_TYPE_PCI) { 3472 seq_printf(m, "%s:PCI io:%04X irq:%d mem:%08X lcr:%08X", 3473 info->device_name, info->io_base, info->irq_level, 3474 info->phys_memory_base, info->phys_lcr_base); 3475 } else { 3476 seq_printf(m, "%s:(E)ISA io:%04X irq:%d dma:%d", 3477 info->device_name, info->io_base, 3478 info->irq_level, info->dma_level); 3479 } 3480 3481 /* output current serial signal states */ 3482 spin_lock_irqsave(&info->irq_spinlock,flags); 3483 usc_get_serial_signals(info); 3484 spin_unlock_irqrestore(&info->irq_spinlock,flags); 3485 3486 stat_buf[0] = 0; 3487 stat_buf[1] = 0; 3488 if (info->serial_signals & SerialSignal_RTS) 3489 strcat(stat_buf, "|RTS"); 3490 if (info->serial_signals & SerialSignal_CTS) 3491 strcat(stat_buf, "|CTS"); 3492 if (info->serial_signals & SerialSignal_DTR) 3493 strcat(stat_buf, "|DTR"); 3494 if (info->serial_signals & SerialSignal_DSR) 3495 strcat(stat_buf, "|DSR"); 3496 if (info->serial_signals & SerialSignal_DCD) 3497 strcat(stat_buf, "|CD"); 3498 if (info->serial_signals & SerialSignal_RI) 3499 strcat(stat_buf, "|RI"); 3500 3501 if (info->params.mode == MGSL_MODE_HDLC || 3502 info->params.mode == MGSL_MODE_RAW ) { 3503 seq_printf(m, " HDLC txok:%d rxok:%d", 3504 info->icount.txok, info->icount.rxok); 3505 if (info->icount.txunder) 3506 seq_printf(m, " txunder:%d", info->icount.txunder); 3507 if (info->icount.txabort) 3508 seq_printf(m, " txabort:%d", info->icount.txabort); 3509 if (info->icount.rxshort) 3510 seq_printf(m, " rxshort:%d", info->icount.rxshort); 3511 if (info->icount.rxlong) 3512 seq_printf(m, " rxlong:%d", info->icount.rxlong); 3513 if (info->icount.rxover) 3514 seq_printf(m, " rxover:%d", info->icount.rxover); 3515 if (info->icount.rxcrc) 3516 seq_printf(m, " rxcrc:%d", info->icount.rxcrc); 3517 } else { 3518 seq_printf(m, " ASYNC tx:%d rx:%d", 3519 info->icount.tx, info->icount.rx); 3520 if (info->icount.frame) 3521 seq_printf(m, " fe:%d", info->icount.frame); 3522 if (info->icount.parity) 3523 seq_printf(m, " pe:%d", info->icount.parity); 3524 if (info->icount.brk) 3525 seq_printf(m, " brk:%d", info->icount.brk); 3526 if (info->icount.overrun) 3527 seq_printf(m, " oe:%d", info->icount.overrun); 3528 } 3529 3530 /* Append serial signal status to end */ 3531 seq_printf(m, " %s\n", stat_buf+1); 3532 3533 seq_printf(m, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n", 3534 info->tx_active,info->bh_requested,info->bh_running, 3535 info->pending_bh); 3536 3537 spin_lock_irqsave(&info->irq_spinlock,flags); 3538 { 3539 u16 Tcsr = usc_InReg( info, TCSR ); 3540 u16 Tdmr = usc_InDmaReg( info, TDMR ); 3541 u16 Ticr = usc_InReg( info, TICR ); 3542 u16 Rscr = usc_InReg( info, RCSR ); 3543 u16 Rdmr = usc_InDmaReg( info, RDMR ); 3544 u16 Ricr = usc_InReg( info, RICR ); 3545 u16 Icr = usc_InReg( info, ICR ); 3546 u16 Dccr = usc_InReg( info, DCCR ); 3547 u16 Tmr = usc_InReg( info, TMR ); 3548 u16 Tccr = usc_InReg( info, TCCR ); 3549 u16 Ccar = inw( info->io_base + CCAR ); 3550 seq_printf(m, "tcsr=%04X tdmr=%04X ticr=%04X rcsr=%04X rdmr=%04X\n" 3551 "ricr=%04X icr =%04X dccr=%04X tmr=%04X tccr=%04X ccar=%04X\n", 3552 Tcsr,Tdmr,Ticr,Rscr,Rdmr,Ricr,Icr,Dccr,Tmr,Tccr,Ccar ); 3553 } 3554 spin_unlock_irqrestore(&info->irq_spinlock,flags); 3555} 3556 3557/* Called to print information about devices */ 3558static int mgsl_proc_show(struct seq_file *m, void *v) 3559{ 3560 struct mgsl_struct *info; 3561 3562 seq_printf(m, "synclink driver:%s\n", driver_version); 3563 3564 info = mgsl_device_list; 3565 while( info ) { 3566 line_info(m, info); 3567 info = info->next_device; 3568 } 3569 return 0; 3570} 3571 3572static int mgsl_proc_open(struct inode *inode, struct file *file) 3573{ 3574 return single_open(file, mgsl_proc_show, NULL); 3575} 3576 3577static const struct file_operations mgsl_proc_fops = { 3578 .owner = THIS_MODULE, 3579 .open = mgsl_proc_open, 3580 .read = seq_read, 3581 .llseek = seq_lseek, 3582 .release = single_release, 3583}; 3584 3585/* mgsl_allocate_dma_buffers() 3586 * 3587 * Allocate and format DMA buffers (ISA adapter) 3588 * or format shared memory buffers (PCI adapter). 3589 * 3590 * Arguments: info pointer to device instance data 3591 * Return Value: 0 if success, otherwise error 3592 */ 3593static int mgsl_allocate_dma_buffers(struct mgsl_struct *info) 3594{ 3595 unsigned short BuffersPerFrame; 3596 3597 info->last_mem_alloc = 0; 3598 3599 /* Calculate the number of DMA buffers necessary to hold the */ 3600 /* largest allowable frame size. Note: If the max frame size is */ 3601 /* not an even multiple of the DMA buffer size then we need to */ 3602 /* round the buffer count per frame up one. */ 3603 3604 BuffersPerFrame = (unsigned short)(info->max_frame_size/DMABUFFERSIZE); 3605 if ( info->max_frame_size % DMABUFFERSIZE ) 3606 BuffersPerFrame++; 3607 3608 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 3609 /* 3610 * The PCI adapter has 256KBytes of shared memory to use. 3611 * This is 64 PAGE_SIZE buffers. 3612 * 3613 * The first page is used for padding at this time so the 3614 * buffer list does not begin at offset 0 of the PCI 3615 * adapter's shared memory. 3616 * 3617 * The 2nd page is used for the buffer list. A 4K buffer 3618 * list can hold 128 DMA_BUFFER structures at 32 bytes 3619 * each. 3620 * 3621 * This leaves 62 4K pages. 3622 * 3623 * The next N pages are used for transmit frame(s). We 3624 * reserve enough 4K page blocks to hold the required 3625 * number of transmit dma buffers (num_tx_dma_buffers), 3626 * each of MaxFrameSize size. 3627 * 3628 * Of the remaining pages (62-N), determine how many can 3629 * be used to receive full MaxFrameSize inbound frames 3630 */ 3631 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame; 3632 info->rx_buffer_count = 62 - info->tx_buffer_count; 3633 } else { 3634 /* Calculate the number of PAGE_SIZE buffers needed for */ 3635 /* receive and transmit DMA buffers. */ 3636 3637 3638 /* Calculate the number of DMA buffers necessary to */ 3639 /* hold 7 max size receive frames and one max size transmit frame. */ 3640 /* The receive buffer count is bumped by one so we avoid an */ 3641 /* End of List condition if all receive buffers are used when */ 3642 /* using linked list DMA buffers. */ 3643 3644 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame; 3645 info->rx_buffer_count = (BuffersPerFrame * MAXRXFRAMES) + 6; 3646 3647 /* 3648 * limit total TxBuffers & RxBuffers to 62 4K total 3649 * (ala PCI Allocation) 3650 */ 3651 3652 if ( (info->tx_buffer_count + info->rx_buffer_count) > 62 ) 3653 info->rx_buffer_count = 62 - info->tx_buffer_count; 3654 3655 } 3656 3657 if ( debug_level >= DEBUG_LEVEL_INFO ) 3658 printk("%s(%d):Allocating %d TX and %d RX DMA buffers.\n", 3659 __FILE__,__LINE__, info->tx_buffer_count,info->rx_buffer_count); 3660 3661 if ( mgsl_alloc_buffer_list_memory( info ) < 0 || 3662 mgsl_alloc_frame_memory(info, info->rx_buffer_list, info->rx_buffer_count) < 0 || 3663 mgsl_alloc_frame_memory(info, info->tx_buffer_list, info->tx_buffer_count) < 0 || 3664 mgsl_alloc_intermediate_rxbuffer_memory(info) < 0 || 3665 mgsl_alloc_intermediate_txbuffer_memory(info) < 0 ) { 3666 printk("%s(%d):Can't allocate DMA buffer memory\n",__FILE__,__LINE__); 3667 return -ENOMEM; 3668 } 3669 3670 mgsl_reset_rx_dma_buffers( info ); 3671 mgsl_reset_tx_dma_buffers( info ); 3672 3673 return 0; 3674 3675} /* end of mgsl_allocate_dma_buffers() */ 3676 3677/* 3678 * mgsl_alloc_buffer_list_memory() 3679 * 3680 * Allocate a common DMA buffer for use as the 3681 * receive and transmit buffer lists. 3682 * 3683 * A buffer list is a set of buffer entries where each entry contains 3684 * a pointer to an actual buffer and a pointer to the next buffer entry 3685 * (plus some other info about the buffer). 3686 * 3687 * The buffer entries for a list are built to form a circular list so 3688 * that when the entire list has been traversed you start back at the 3689 * beginning. 3690 * 3691 * This function allocates memory for just the buffer entries. 3692 * The links (pointer to next entry) are filled in with the physical 3693 * address of the next entry so the adapter can navigate the list 3694 * using bus master DMA. The pointers to the actual buffers are filled 3695 * out later when the actual buffers are allocated. 3696 * 3697 * Arguments: info pointer to device instance data 3698 * Return Value: 0 if success, otherwise error 3699 */ 3700static int mgsl_alloc_buffer_list_memory( struct mgsl_struct *info ) 3701{ 3702 unsigned int i; 3703 3704 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 3705 /* PCI adapter uses shared memory. */ 3706 info->buffer_list = info->memory_base + info->last_mem_alloc; 3707 info->buffer_list_phys = info->last_mem_alloc; 3708 info->last_mem_alloc += BUFFERLISTSIZE; 3709 } else { 3710 /* ISA adapter uses system memory. */ 3711 /* The buffer lists are allocated as a common buffer that both */ 3712 /* the processor and adapter can access. This allows the driver to */ 3713 /* inspect portions of the buffer while other portions are being */ 3714 /* updated by the adapter using Bus Master DMA. */ 3715 3716 info->buffer_list = dma_alloc_coherent(NULL, BUFFERLISTSIZE, &info->buffer_list_dma_addr, GFP_KERNEL); 3717 if (info->buffer_list == NULL) 3718 return -ENOMEM; 3719 info->buffer_list_phys = (u32)(info->buffer_list_dma_addr); 3720 } 3721 3722 /* We got the memory for the buffer entry lists. */ 3723 /* Initialize the memory block to all zeros. */ 3724 memset( info->buffer_list, 0, BUFFERLISTSIZE ); 3725 3726 /* Save virtual address pointers to the receive and */ 3727 /* transmit buffer lists. (Receive 1st). These pointers will */ 3728 /* be used by the processor to access the lists. */ 3729 info->rx_buffer_list = (DMABUFFERENTRY *)info->buffer_list; 3730 info->tx_buffer_list = (DMABUFFERENTRY *)info->buffer_list; 3731 info->tx_buffer_list += info->rx_buffer_count; 3732 3733 /* 3734 * Build the links for the buffer entry lists such that 3735 * two circular lists are built. (Transmit and Receive). 3736 * 3737 * Note: the links are physical addresses 3738 * which are read by the adapter to determine the next 3739 * buffer entry to use. 3740 */ 3741 3742 for ( i = 0; i < info->rx_buffer_count; i++ ) { 3743 /* calculate and store physical address of this buffer entry */ 3744 info->rx_buffer_list[i].phys_entry = 3745 info->buffer_list_phys + (i * sizeof(DMABUFFERENTRY)); 3746 3747 /* calculate and store physical address of */ 3748 /* next entry in cirular list of entries */ 3749 3750 info->rx_buffer_list[i].link = info->buffer_list_phys; 3751 3752 if ( i < info->rx_buffer_count - 1 ) 3753 info->rx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY); 3754 } 3755 3756 for ( i = 0; i < info->tx_buffer_count; i++ ) { 3757 /* calculate and store physical address of this buffer entry */ 3758 info->tx_buffer_list[i].phys_entry = info->buffer_list_phys + 3759 ((info->rx_buffer_count + i) * sizeof(DMABUFFERENTRY)); 3760 3761 /* calculate and store physical address of */ 3762 /* next entry in cirular list of entries */ 3763 3764 info->tx_buffer_list[i].link = info->buffer_list_phys + 3765 info->rx_buffer_count * sizeof(DMABUFFERENTRY); 3766 3767 if ( i < info->tx_buffer_count - 1 ) 3768 info->tx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY); 3769 } 3770 3771 return 0; 3772 3773} /* end of mgsl_alloc_buffer_list_memory() */ 3774 3775/* Free DMA buffers allocated for use as the 3776 * receive and transmit buffer lists. 3777 * Warning: 3778 * 3779 * The data transfer buffers associated with the buffer list 3780 * MUST be freed before freeing the buffer list itself because 3781 * the buffer list contains the information necessary to free 3782 * the individual buffers! 3783 */ 3784static void mgsl_free_buffer_list_memory( struct mgsl_struct *info ) 3785{ 3786 if (info->buffer_list && info->bus_type != MGSL_BUS_TYPE_PCI) 3787 dma_free_coherent(NULL, BUFFERLISTSIZE, info->buffer_list, info->buffer_list_dma_addr); 3788 3789 info->buffer_list = NULL; 3790 info->rx_buffer_list = NULL; 3791 info->tx_buffer_list = NULL; 3792 3793} /* end of mgsl_free_buffer_list_memory() */ 3794 3795/* 3796 * mgsl_alloc_frame_memory() 3797 * 3798 * Allocate the frame DMA buffers used by the specified buffer list. 3799 * Each DMA buffer will be one memory page in size. This is necessary 3800 * because memory can fragment enough that it may be impossible 3801 * contiguous pages. 3802 * 3803 * Arguments: 3804 * 3805 * info pointer to device instance data 3806 * BufferList pointer to list of buffer entries 3807 * Buffercount count of buffer entries in buffer list 3808 * 3809 * Return Value: 0 if success, otherwise -ENOMEM 3810 */ 3811static int mgsl_alloc_frame_memory(struct mgsl_struct *info,DMABUFFERENTRY *BufferList,int Buffercount) 3812{ 3813 int i; 3814 u32 phys_addr; 3815 3816 /* Allocate page sized buffers for the receive buffer list */ 3817 3818 for ( i = 0; i < Buffercount; i++ ) { 3819 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 3820 /* PCI adapter uses shared memory buffers. */ 3821 BufferList[i].virt_addr = info->memory_base + info->last_mem_alloc; 3822 phys_addr = info->last_mem_alloc; 3823 info->last_mem_alloc += DMABUFFERSIZE; 3824 } else { 3825 /* ISA adapter uses system memory. */ 3826 BufferList[i].virt_addr = dma_alloc_coherent(NULL, DMABUFFERSIZE, &BufferList[i].dma_addr, GFP_KERNEL); 3827 if (BufferList[i].virt_addr == NULL) 3828 return -ENOMEM; 3829 phys_addr = (u32)(BufferList[i].dma_addr); 3830 } 3831 BufferList[i].phys_addr = phys_addr; 3832 } 3833 3834 return 0; 3835 3836} /* end of mgsl_alloc_frame_memory() */ 3837 3838/* 3839 * mgsl_free_frame_memory() 3840 * 3841 * Free the buffers associated with 3842 * each buffer entry of a buffer list. 3843 * 3844 * Arguments: 3845 * 3846 * info pointer to device instance data 3847 * BufferList pointer to list of buffer entries 3848 * Buffercount count of buffer entries in buffer list 3849 * 3850 * Return Value: None 3851 */ 3852static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList, int Buffercount) 3853{ 3854 int i; 3855 3856 if ( BufferList ) { 3857 for ( i = 0 ; i < Buffercount ; i++ ) { 3858 if ( BufferList[i].virt_addr ) { 3859 if ( info->bus_type != MGSL_BUS_TYPE_PCI ) 3860 dma_free_coherent(NULL, DMABUFFERSIZE, BufferList[i].virt_addr, BufferList[i].dma_addr); 3861 BufferList[i].virt_addr = NULL; 3862 } 3863 } 3864 } 3865 3866} /* end of mgsl_free_frame_memory() */ 3867 3868/* mgsl_free_dma_buffers() 3869 * 3870 * Free DMA buffers 3871 * 3872 * Arguments: info pointer to device instance data 3873 * Return Value: None 3874 */ 3875static void mgsl_free_dma_buffers( struct mgsl_struct *info ) 3876{ 3877 mgsl_free_frame_memory( info, info->rx_buffer_list, info->rx_buffer_count ); 3878 mgsl_free_frame_memory( info, info->tx_buffer_list, info->tx_buffer_count ); 3879 mgsl_free_buffer_list_memory( info ); 3880 3881} /* end of mgsl_free_dma_buffers() */ 3882 3883 3884/* 3885 * mgsl_alloc_intermediate_rxbuffer_memory() 3886 * 3887 * Allocate a buffer large enough to hold max_frame_size. This buffer 3888 * is used to pass an assembled frame to the line discipline. 3889 * 3890 * Arguments: 3891 * 3892 * info pointer to device instance data 3893 * 3894 * Return Value: 0 if success, otherwise -ENOMEM 3895 */ 3896static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info) 3897{ 3898 info->intermediate_rxbuffer = kmalloc(info->max_frame_size, GFP_KERNEL | GFP_DMA); 3899 if ( info->intermediate_rxbuffer == NULL ) 3900 return -ENOMEM; 3901 3902 return 0; 3903 3904} /* end of mgsl_alloc_intermediate_rxbuffer_memory() */ 3905 3906/* 3907 * mgsl_free_intermediate_rxbuffer_memory() 3908 * 3909 * 3910 * Arguments: 3911 * 3912 * info pointer to device instance data 3913 * 3914 * Return Value: None 3915 */ 3916static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info) 3917{ 3918 kfree(info->intermediate_rxbuffer); 3919 info->intermediate_rxbuffer = NULL; 3920 3921} /* end of mgsl_free_intermediate_rxbuffer_memory() */ 3922 3923/* 3924 * mgsl_alloc_intermediate_txbuffer_memory() 3925 * 3926 * Allocate intermdiate transmit buffer(s) large enough to hold max_frame_size. 3927 * This buffer is used to load transmit frames into the adapter's dma transfer 3928 * buffers when there is sufficient space. 3929 * 3930 * Arguments: 3931 * 3932 * info pointer to device instance data 3933 * 3934 * Return Value: 0 if success, otherwise -ENOMEM 3935 */ 3936static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info) 3937{ 3938 int i; 3939 3940 if ( debug_level >= DEBUG_LEVEL_INFO ) 3941 printk("%s %s(%d) allocating %d tx holding buffers\n", 3942 info->device_name, __FILE__,__LINE__,info->num_tx_holding_buffers); 3943 3944 memset(info->tx_holding_buffers,0,sizeof(info->tx_holding_buffers)); 3945 3946 for ( i=0; i<info->num_tx_holding_buffers; ++i) { 3947 info->tx_holding_buffers[i].buffer = 3948 kmalloc(info->max_frame_size, GFP_KERNEL); 3949 if (info->tx_holding_buffers[i].buffer == NULL) { 3950 for (--i; i >= 0; i--) { 3951 kfree(info->tx_holding_buffers[i].buffer); 3952 info->tx_holding_buffers[i].buffer = NULL; 3953 } 3954 return -ENOMEM; 3955 } 3956 } 3957 3958 return 0; 3959 3960} /* end of mgsl_alloc_intermediate_txbuffer_memory() */ 3961 3962/* 3963 * mgsl_free_intermediate_txbuffer_memory() 3964 * 3965 * 3966 * Arguments: 3967 * 3968 * info pointer to device instance data 3969 * 3970 * Return Value: None 3971 */ 3972static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info) 3973{ 3974 int i; 3975 3976 for ( i=0; i<info->num_tx_holding_buffers; ++i ) { 3977 kfree(info->tx_holding_buffers[i].buffer); 3978 info->tx_holding_buffers[i].buffer = NULL; 3979 } 3980 3981 info->get_tx_holding_index = 0; 3982 info->put_tx_holding_index = 0; 3983 info->tx_holding_count = 0; 3984 3985} /* end of mgsl_free_intermediate_txbuffer_memory() */ 3986 3987 3988/* 3989 * load_next_tx_holding_buffer() 3990 * 3991 * attempts to load the next buffered tx request into the 3992 * tx dma buffers 3993 * 3994 * Arguments: 3995 * 3996 * info pointer to device instance data 3997 * 3998 * Return Value: true if next buffered tx request loaded 3999 * into adapter's tx dma buffer, 4000 * false otherwise 4001 */ 4002static bool load_next_tx_holding_buffer(struct mgsl_struct *info) 4003{ 4004 bool ret = false; 4005 4006 if ( info->tx_holding_count ) { 4007 /* determine if we have enough tx dma buffers 4008 * to accommodate the next tx frame 4009 */ 4010 struct tx_holding_buffer *ptx = 4011 &info->tx_holding_buffers[info->get_tx_holding_index]; 4012 int num_free = num_free_tx_dma_buffers(info); 4013 int num_needed = ptx->buffer_size / DMABUFFERSIZE; 4014 if ( ptx->buffer_size % DMABUFFERSIZE ) 4015 ++num_needed; 4016 4017 if (num_needed <= num_free) { 4018 info->xmit_cnt = ptx->buffer_size; 4019 mgsl_load_tx_dma_buffer(info,ptx->buffer,ptx->buffer_size); 4020 4021 --info->tx_holding_count; 4022 if ( ++info->get_tx_holding_index >= info->num_tx_holding_buffers) 4023 info->get_tx_holding_index=0; 4024 4025 /* restart transmit timer */ 4026 mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000)); 4027 4028 ret = true; 4029 } 4030 } 4031 4032 return ret; 4033} 4034 4035/* 4036 * save_tx_buffer_request() 4037 * 4038 * attempt to store transmit frame request for later transmission 4039 * 4040 * Arguments: 4041 * 4042 * info pointer to device instance data 4043 * Buffer pointer to buffer containing frame to load 4044 * BufferSize size in bytes of frame in Buffer 4045 * 4046 * Return Value: 1 if able to store, 0 otherwise 4047 */ 4048static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize) 4049{ 4050 struct tx_holding_buffer *ptx; 4051 4052 if ( info->tx_holding_count >= info->num_tx_holding_buffers ) { 4053 return 0; /* all buffers in use */ 4054 } 4055 4056 ptx = &info->tx_holding_buffers[info->put_tx_holding_index]; 4057 ptx->buffer_size = BufferSize; 4058 memcpy( ptx->buffer, Buffer, BufferSize); 4059 4060 ++info->tx_holding_count; 4061 if ( ++info->put_tx_holding_index >= info->num_tx_holding_buffers) 4062 info->put_tx_holding_index=0; 4063 4064 return 1; 4065} 4066 4067static int mgsl_claim_resources(struct mgsl_struct *info) 4068{ 4069 if (request_region(info->io_base,info->io_addr_size,"synclink") == NULL) { 4070 printk( "%s(%d):I/O address conflict on device %s Addr=%08X\n", 4071 __FILE__,__LINE__,info->device_name, info->io_base); 4072 return -ENODEV; 4073 } 4074 info->io_addr_requested = true; 4075 4076 if ( request_irq(info->irq_level,mgsl_interrupt,info->irq_flags, 4077 info->device_name, info ) < 0 ) { 4078 printk( "%s(%d):Can't request interrupt on device %s IRQ=%d\n", 4079 __FILE__,__LINE__,info->device_name, info->irq_level ); 4080 goto errout; 4081 } 4082 info->irq_requested = true; 4083 4084 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 4085 if (request_mem_region(info->phys_memory_base,0x40000,"synclink") == NULL) { 4086 printk( "%s(%d):mem addr conflict device %s Addr=%08X\n", 4087 __FILE__,__LINE__,info->device_name, info->phys_memory_base); 4088 goto errout; 4089 } 4090 info->shared_mem_requested = true; 4091 if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclink") == NULL) { 4092 printk( "%s(%d):lcr mem addr conflict device %s Addr=%08X\n", 4093 __FILE__,__LINE__,info->device_name, info->phys_lcr_base + info->lcr_offset); 4094 goto errout; 4095 } 4096 info->lcr_mem_requested = true; 4097 4098 info->memory_base = ioremap_nocache(info->phys_memory_base, 4099 0x40000); 4100 if (!info->memory_base) { 4101 printk( "%s(%d):Can't map shared memory on device %s MemAddr=%08X\n", 4102 __FILE__,__LINE__,info->device_name, info->phys_memory_base ); 4103 goto errout; 4104 } 4105 4106 if ( !mgsl_memory_test(info) ) { 4107 printk( "%s(%d):Failed shared memory test %s MemAddr=%08X\n", 4108 __FILE__,__LINE__,info->device_name, info->phys_memory_base ); 4109 goto errout; 4110 } 4111 4112 info->lcr_base = ioremap_nocache(info->phys_lcr_base, 4113 PAGE_SIZE); 4114 if (!info->lcr_base) { 4115 printk( "%s(%d):Can't map LCR memory on device %s MemAddr=%08X\n", 4116 __FILE__,__LINE__,info->device_name, info->phys_lcr_base ); 4117 goto errout; 4118 } 4119 info->lcr_base += info->lcr_offset; 4120 4121 } else { 4122 /* claim DMA channel */ 4123 4124 if (request_dma(info->dma_level,info->device_name) < 0){ 4125 printk( "%s(%d):Can't request DMA channel on device %s DMA=%d\n", 4126 __FILE__,__LINE__,info->device_name, info->dma_level ); 4127 mgsl_release_resources( info ); 4128 return -ENODEV; 4129 } 4130 info->dma_requested = true; 4131 4132 /* ISA adapter uses bus master DMA */ 4133 set_dma_mode(info->dma_level,DMA_MODE_CASCADE); 4134 enable_dma(info->dma_level); 4135 } 4136 4137 if ( mgsl_allocate_dma_buffers(info) < 0 ) { 4138 printk( "%s(%d):Can't allocate DMA buffers on device %s DMA=%d\n", 4139 __FILE__,__LINE__,info->device_name, info->dma_level ); 4140 goto errout; 4141 } 4142 4143 return 0; 4144errout: 4145 mgsl_release_resources(info); 4146 return -ENODEV; 4147 4148} /* end of mgsl_claim_resources() */ 4149 4150static void mgsl_release_resources(struct mgsl_struct *info) 4151{ 4152 if ( debug_level >= DEBUG_LEVEL_INFO ) 4153 printk( "%s(%d):mgsl_release_resources(%s) entry\n", 4154 __FILE__,__LINE__,info->device_name ); 4155 4156 if ( info->irq_requested ) { 4157 free_irq(info->irq_level, info); 4158 info->irq_requested = false; 4159 } 4160 if ( info->dma_requested ) { 4161 disable_dma(info->dma_level); 4162 free_dma(info->dma_level); 4163 info->dma_requested = false; 4164 } 4165 mgsl_free_dma_buffers(info); 4166 mgsl_free_intermediate_rxbuffer_memory(info); 4167 mgsl_free_intermediate_txbuffer_memory(info); 4168 4169 if ( info->io_addr_requested ) { 4170 release_region(info->io_base,info->io_addr_size); 4171 info->io_addr_requested = false; 4172 } 4173 if ( info->shared_mem_requested ) { 4174 release_mem_region(info->phys_memory_base,0x40000); 4175 info->shared_mem_requested = false; 4176 } 4177 if ( info->lcr_mem_requested ) { 4178 release_mem_region(info->phys_lcr_base + info->lcr_offset,128); 4179 info->lcr_mem_requested = false; 4180 } 4181 if (info->memory_base){ 4182 iounmap(info->memory_base); 4183 info->memory_base = NULL; 4184 } 4185 if (info->lcr_base){ 4186 iounmap(info->lcr_base - info->lcr_offset); 4187 info->lcr_base = NULL; 4188 } 4189 4190 if ( debug_level >= DEBUG_LEVEL_INFO ) 4191 printk( "%s(%d):mgsl_release_resources(%s) exit\n", 4192 __FILE__,__LINE__,info->device_name ); 4193 4194} /* end of mgsl_release_resources() */ 4195 4196/* mgsl_add_device() 4197 * 4198 * Add the specified device instance data structure to the 4199 * global linked list of devices and increment the device count. 4200 * 4201 * Arguments: info pointer to device instance data 4202 * Return Value: None 4203 */ 4204static void mgsl_add_device( struct mgsl_struct *info ) 4205{ 4206 info->next_device = NULL; 4207 info->line = mgsl_device_count; 4208 sprintf(info->device_name,"ttySL%d",info->line); 4209 4210 if (info->line < MAX_TOTAL_DEVICES) { 4211 if (maxframe[info->line]) 4212 info->max_frame_size = maxframe[info->line]; 4213 4214 if (txdmabufs[info->line]) { 4215 info->num_tx_dma_buffers = txdmabufs[info->line]; 4216 if (info->num_tx_dma_buffers < 1) 4217 info->num_tx_dma_buffers = 1; 4218 } 4219 4220 if (txholdbufs[info->line]) { 4221 info->num_tx_holding_buffers = txholdbufs[info->line]; 4222 if (info->num_tx_holding_buffers < 1) 4223 info->num_tx_holding_buffers = 1; 4224 else if (info->num_tx_holding_buffers > MAX_TX_HOLDING_BUFFERS) 4225 info->num_tx_holding_buffers = MAX_TX_HOLDING_BUFFERS; 4226 } 4227 } 4228 4229 mgsl_device_count++; 4230 4231 if ( !mgsl_device_list ) 4232 mgsl_device_list = info; 4233 else { 4234 struct mgsl_struct *current_dev = mgsl_device_list; 4235 while( current_dev->next_device ) 4236 current_dev = current_dev->next_device; 4237 current_dev->next_device = info; 4238 } 4239 4240 if ( info->max_frame_size < 4096 ) 4241 info->max_frame_size = 4096; 4242 else if ( info->max_frame_size > 65535 ) 4243 info->max_frame_size = 65535; 4244 4245 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 4246 printk( "SyncLink PCI v%d %s: IO=%04X IRQ=%d Mem=%08X,%08X MaxFrameSize=%u\n", 4247 info->hw_version + 1, info->device_name, info->io_base, info->irq_level, 4248 info->phys_memory_base, info->phys_lcr_base, 4249 info->max_frame_size ); 4250 } else { 4251 printk( "SyncLink ISA %s: IO=%04X IRQ=%d DMA=%d MaxFrameSize=%u\n", 4252 info->device_name, info->io_base, info->irq_level, info->dma_level, 4253 info->max_frame_size ); 4254 } 4255 4256#if SYNCLINK_GENERIC_HDLC 4257 hdlcdev_init(info); 4258#endif 4259 4260} /* end of mgsl_add_device() */ 4261 4262static const struct tty_port_operations mgsl_port_ops = { 4263 .carrier_raised = carrier_raised, 4264 .dtr_rts = dtr_rts, 4265}; 4266 4267 4268/* mgsl_allocate_device() 4269 * 4270 * Allocate and initialize a device instance structure 4271 * 4272 * Arguments: none 4273 * Return Value: pointer to mgsl_struct if success, otherwise NULL 4274 */ 4275static struct mgsl_struct* mgsl_allocate_device(void) 4276{ 4277 struct mgsl_struct *info; 4278 4279 info = kzalloc(sizeof(struct mgsl_struct), 4280 GFP_KERNEL); 4281 4282 if (!info) { 4283 printk("Error can't allocate device instance data\n"); 4284 } else { 4285 tty_port_init(&info->port); 4286 info->port.ops = &mgsl_port_ops; 4287 info->magic = MGSL_MAGIC; 4288 INIT_WORK(&info->task, mgsl_bh_handler); 4289 info->max_frame_size = 4096; 4290 info->port.close_delay = 5*HZ/10; 4291 info->port.closing_wait = 30*HZ; 4292 init_waitqueue_head(&info->status_event_wait_q); 4293 init_waitqueue_head(&info->event_wait_q); 4294 spin_lock_init(&info->irq_spinlock); 4295 spin_lock_init(&info->netlock); 4296 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS)); 4297 info->idle_mode = HDLC_TXIDLE_FLAGS; 4298 info->num_tx_dma_buffers = 1; 4299 info->num_tx_holding_buffers = 0; 4300 } 4301 4302 return info; 4303 4304} /* end of mgsl_allocate_device()*/ 4305 4306static const struct tty_operations mgsl_ops = { 4307 .install = mgsl_install, 4308 .open = mgsl_open, 4309 .close = mgsl_close, 4310 .write = mgsl_write, 4311 .put_char = mgsl_put_char, 4312 .flush_chars = mgsl_flush_chars, 4313 .write_room = mgsl_write_room, 4314 .chars_in_buffer = mgsl_chars_in_buffer, 4315 .flush_buffer = mgsl_flush_buffer, 4316 .ioctl = mgsl_ioctl, 4317 .throttle = mgsl_throttle, 4318 .unthrottle = mgsl_unthrottle, 4319 .send_xchar = mgsl_send_xchar, 4320 .break_ctl = mgsl_break, 4321 .wait_until_sent = mgsl_wait_until_sent, 4322 .set_termios = mgsl_set_termios, 4323 .stop = mgsl_stop, 4324 .start = mgsl_start, 4325 .hangup = mgsl_hangup, 4326 .tiocmget = tiocmget, 4327 .tiocmset = tiocmset, 4328 .get_icount = msgl_get_icount, 4329 .proc_fops = &mgsl_proc_fops, 4330}; 4331 4332/* 4333 * perform tty device initialization 4334 */ 4335static int mgsl_init_tty(void) 4336{ 4337 int rc; 4338 4339 serial_driver = alloc_tty_driver(128); 4340 if (!serial_driver) 4341 return -ENOMEM; 4342 4343 serial_driver->driver_name = "synclink"; 4344 serial_driver->name = "ttySL"; 4345 serial_driver->major = ttymajor; 4346 serial_driver->minor_start = 64; 4347 serial_driver->type = TTY_DRIVER_TYPE_SERIAL; 4348 serial_driver->subtype = SERIAL_TYPE_NORMAL; 4349 serial_driver->init_termios = tty_std_termios; 4350 serial_driver->init_termios.c_cflag = 4351 B9600 | CS8 | CREAD | HUPCL | CLOCAL; 4352 serial_driver->init_termios.c_ispeed = 9600; 4353 serial_driver->init_termios.c_ospeed = 9600; 4354 serial_driver->flags = TTY_DRIVER_REAL_RAW; 4355 tty_set_operations(serial_driver, &mgsl_ops); 4356 if ((rc = tty_register_driver(serial_driver)) < 0) { 4357 printk("%s(%d):Couldn't register serial driver\n", 4358 __FILE__,__LINE__); 4359 put_tty_driver(serial_driver); 4360 serial_driver = NULL; 4361 return rc; 4362 } 4363 4364 printk("%s %s, tty major#%d\n", 4365 driver_name, driver_version, 4366 serial_driver->major); 4367 return 0; 4368} 4369 4370/* enumerate user specified ISA adapters 4371 */ 4372static void mgsl_enum_isa_devices(void) 4373{ 4374 struct mgsl_struct *info; 4375 int i; 4376 4377 /* Check for user specified ISA devices */ 4378 4379 for (i=0 ;(i < MAX_ISA_DEVICES) && io[i] && irq[i]; i++){ 4380 if ( debug_level >= DEBUG_LEVEL_INFO ) 4381 printk("ISA device specified io=%04X,irq=%d,dma=%d\n", 4382 io[i], irq[i], dma[i] ); 4383 4384 info = mgsl_allocate_device(); 4385 if ( !info ) { 4386 /* error allocating device instance data */ 4387 if ( debug_level >= DEBUG_LEVEL_ERROR ) 4388 printk( "can't allocate device instance data.\n"); 4389 continue; 4390 } 4391 4392 /* Copy user configuration info to device instance data */ 4393 info->io_base = (unsigned int)io[i]; 4394 info->irq_level = (unsigned int)irq[i]; 4395 info->irq_level = irq_canonicalize(info->irq_level); 4396 info->dma_level = (unsigned int)dma[i]; 4397 info->bus_type = MGSL_BUS_TYPE_ISA; 4398 info->io_addr_size = 16; 4399 info->irq_flags = 0; 4400 4401 mgsl_add_device( info ); 4402 } 4403} 4404 4405static void synclink_cleanup(void) 4406{ 4407 int rc; 4408 struct mgsl_struct *info; 4409 struct mgsl_struct *tmp; 4410 4411 printk("Unloading %s: %s\n", driver_name, driver_version); 4412 4413 if (serial_driver) { 4414 if ((rc = tty_unregister_driver(serial_driver))) 4415 printk("%s(%d) failed to unregister tty driver err=%d\n", 4416 __FILE__,__LINE__,rc); 4417 put_tty_driver(serial_driver); 4418 } 4419 4420 info = mgsl_device_list; 4421 while(info) { 4422#if SYNCLINK_GENERIC_HDLC 4423 hdlcdev_exit(info); 4424#endif 4425 mgsl_release_resources(info); 4426 tmp = info; 4427 info = info->next_device; 4428 tty_port_destroy(&tmp->port); 4429 kfree(tmp); 4430 } 4431 4432 if (pci_registered) 4433 pci_unregister_driver(&synclink_pci_driver); 4434} 4435 4436static int __init synclink_init(void) 4437{ 4438 int rc; 4439 4440 if (break_on_load) { 4441 mgsl_get_text_ptr(); 4442 BREAKPOINT(); 4443 } 4444 4445 printk("%s %s\n", driver_name, driver_version); 4446 4447 mgsl_enum_isa_devices(); 4448 if ((rc = pci_register_driver(&synclink_pci_driver)) < 0) 4449 printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc); 4450 else 4451 pci_registered = true; 4452 4453 if ((rc = mgsl_init_tty()) < 0) 4454 goto error; 4455 4456 return 0; 4457 4458error: 4459 synclink_cleanup(); 4460 return rc; 4461} 4462 4463static void __exit synclink_exit(void) 4464{ 4465 synclink_cleanup(); 4466} 4467 4468module_init(synclink_init); 4469module_exit(synclink_exit); 4470 4471/* 4472 * usc_RTCmd() 4473 * 4474 * Issue a USC Receive/Transmit command to the 4475 * Channel Command/Address Register (CCAR). 4476 * 4477 * Notes: 4478 * 4479 * The command is encoded in the most significant 5 bits <15..11> 4480 * of the CCAR value. Bits <10..7> of the CCAR must be preserved 4481 * and Bits <6..0> must be written as zeros. 4482 * 4483 * Arguments: 4484 * 4485 * info pointer to device information structure 4486 * Cmd command mask (use symbolic macros) 4487 * 4488 * Return Value: 4489 * 4490 * None 4491 */ 4492static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd ) 4493{ 4494 /* output command to CCAR in bits <15..11> */ 4495 /* preserve bits <10..7>, bits <6..0> must be zero */ 4496 4497 outw( Cmd + info->loopback_bits, info->io_base + CCAR ); 4498 4499 /* Read to flush write to CCAR */ 4500 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 4501 inw( info->io_base + CCAR ); 4502 4503} /* end of usc_RTCmd() */ 4504 4505/* 4506 * usc_DmaCmd() 4507 * 4508 * Issue a DMA command to the DMA Command/Address Register (DCAR). 4509 * 4510 * Arguments: 4511 * 4512 * info pointer to device information structure 4513 * Cmd DMA command mask (usc_DmaCmd_XX Macros) 4514 * 4515 * Return Value: 4516 * 4517 * None 4518 */ 4519static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd ) 4520{ 4521 /* write command mask to DCAR */ 4522 outw( Cmd + info->mbre_bit, info->io_base ); 4523 4524 /* Read to flush write to DCAR */ 4525 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 4526 inw( info->io_base ); 4527 4528} /* end of usc_DmaCmd() */ 4529 4530/* 4531 * usc_OutDmaReg() 4532 * 4533 * Write a 16-bit value to a USC DMA register 4534 * 4535 * Arguments: 4536 * 4537 * info pointer to device info structure 4538 * RegAddr register address (number) for write 4539 * RegValue 16-bit value to write to register 4540 * 4541 * Return Value: 4542 * 4543 * None 4544 * 4545 */ 4546static void usc_OutDmaReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue ) 4547{ 4548 /* Note: The DCAR is located at the adapter base address */ 4549 /* Note: must preserve state of BIT8 in DCAR */ 4550 4551 outw( RegAddr + info->mbre_bit, info->io_base ); 4552 outw( RegValue, info->io_base ); 4553 4554 /* Read to flush write to DCAR */ 4555 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 4556 inw( info->io_base ); 4557 4558} /* end of usc_OutDmaReg() */ 4559 4560/* 4561 * usc_InDmaReg() 4562 * 4563 * Read a 16-bit value from a DMA register 4564 * 4565 * Arguments: 4566 * 4567 * info pointer to device info structure 4568 * RegAddr register address (number) to read from 4569 * 4570 * Return Value: 4571 * 4572 * The 16-bit value read from register 4573 * 4574 */ 4575static u16 usc_InDmaReg( struct mgsl_struct *info, u16 RegAddr ) 4576{ 4577 /* Note: The DCAR is located at the adapter base address */ 4578 /* Note: must preserve state of BIT8 in DCAR */ 4579 4580 outw( RegAddr + info->mbre_bit, info->io_base ); 4581 return inw( info->io_base ); 4582 4583} /* end of usc_InDmaReg() */ 4584 4585/* 4586 * 4587 * usc_OutReg() 4588 * 4589 * Write a 16-bit value to a USC serial channel register 4590 * 4591 * Arguments: 4592 * 4593 * info pointer to device info structure 4594 * RegAddr register address (number) to write to 4595 * RegValue 16-bit value to write to register 4596 * 4597 * Return Value: 4598 * 4599 * None 4600 * 4601 */ 4602static void usc_OutReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue ) 4603{ 4604 outw( RegAddr + info->loopback_bits, info->io_base + CCAR ); 4605 outw( RegValue, info->io_base + CCAR ); 4606 4607 /* Read to flush write to CCAR */ 4608 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 4609 inw( info->io_base + CCAR ); 4610 4611} /* end of usc_OutReg() */ 4612 4613/* 4614 * usc_InReg() 4615 * 4616 * Reads a 16-bit value from a USC serial channel register 4617 * 4618 * Arguments: 4619 * 4620 * info pointer to device extension 4621 * RegAddr register address (number) to read from 4622 * 4623 * Return Value: 4624 * 4625 * 16-bit value read from register 4626 */ 4627static u16 usc_InReg( struct mgsl_struct *info, u16 RegAddr ) 4628{ 4629 outw( RegAddr + info->loopback_bits, info->io_base + CCAR ); 4630 return inw( info->io_base + CCAR ); 4631 4632} /* end of usc_InReg() */ 4633 4634/* usc_set_sdlc_mode() 4635 * 4636 * Set up the adapter for SDLC DMA communications. 4637 * 4638 * Arguments: info pointer to device instance data 4639 * Return Value: NONE 4640 */ 4641static void usc_set_sdlc_mode( struct mgsl_struct *info ) 4642{ 4643 u16 RegValue; 4644 bool PreSL1660; 4645 4646 /* 4647 * determine if the IUSC on the adapter is pre-SL1660. If 4648 * not, take advantage of the UnderWait feature of more 4649 * modern chips. If an underrun occurs and this bit is set, 4650 * the transmitter will idle the programmed idle pattern 4651 * until the driver has time to service the underrun. Otherwise, 4652 * the dma controller may get the cycles previously requested 4653 * and begin transmitting queued tx data. 4654 */ 4655 usc_OutReg(info,TMCR,0x1f); 4656 RegValue=usc_InReg(info,TMDR); 4657 PreSL1660 = (RegValue == IUSC_PRE_SL1660); 4658 4659 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE ) 4660 { 4661 /* 4662 ** Channel Mode Register (CMR) 4663 ** 4664 ** <15..14> 10 Tx Sub Modes, Send Flag on Underrun 4665 ** <13> 0 0 = Transmit Disabled (initially) 4666 ** <12> 0 1 = Consecutive Idles share common 0 4667 ** <11..8> 1110 Transmitter Mode = HDLC/SDLC Loop 4668 ** <7..4> 0000 Rx Sub Modes, addr/ctrl field handling 4669 ** <3..0> 0110 Receiver Mode = HDLC/SDLC 4670 ** 4671 ** 1000 1110 0000 0110 = 0x8e06 4672 */ 4673 RegValue = 0x8e06; 4674 4675 /*-------------------------------------------------- 4676 * ignore user options for UnderRun Actions and 4677 * preambles 4678 *--------------------------------------------------*/ 4679 } 4680 else 4681 { 4682 /* Channel mode Register (CMR) 4683 * 4684 * <15..14> 00 Tx Sub modes, Underrun Action 4685 * <13> 0 1 = Send Preamble before opening flag 4686 * <12> 0 1 = Consecutive Idles share common 0 4687 * <11..8> 0110 Transmitter mode = HDLC/SDLC 4688 * <7..4> 0000 Rx Sub modes, addr/ctrl field handling 4689 * <3..0> 0110 Receiver mode = HDLC/SDLC 4690 * 4691 * 0000 0110 0000 0110 = 0x0606 4692 */ 4693 if (info->params.mode == MGSL_MODE_RAW) { 4694 RegValue = 0x0001; /* Set Receive mode = external sync */ 4695 4696 usc_OutReg( info, IOCR, /* Set IOCR DCD is RxSync Detect Input */ 4697 (unsigned short)((usc_InReg(info, IOCR) & ~(BIT13|BIT12)) | BIT12)); 4698 4699 /* 4700 * TxSubMode: 4701 * CMR <15> 0 Don't send CRC on Tx Underrun 4702 * CMR <14> x undefined 4703 * CMR <13> 0 Send preamble before openning sync 4704 * CMR <12> 0 Send 8-bit syncs, 1=send Syncs per TxLength 4705 * 4706 * TxMode: 4707 * CMR <11-8) 0100 MonoSync 4708 * 4709 * 0x00 0100 xxxx xxxx 04xx 4710 */ 4711 RegValue |= 0x0400; 4712 } 4713 else { 4714 4715 RegValue = 0x0606; 4716 4717 if ( info->params.flags & HDLC_FLAG_UNDERRUN_ABORT15 ) 4718 RegValue |= BIT14; 4719 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_FLAG ) 4720 RegValue |= BIT15; 4721 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_CRC ) 4722 RegValue |= BIT15 + BIT14; 4723 } 4724 4725 if ( info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE ) 4726 RegValue |= BIT13; 4727 } 4728 4729 if ( info->params.mode == MGSL_MODE_HDLC && 4730 (info->params.flags & HDLC_FLAG_SHARE_ZERO) ) 4731 RegValue |= BIT12; 4732 4733 if ( info->params.addr_filter != 0xff ) 4734 { 4735 /* set up receive address filtering */ 4736 usc_OutReg( info, RSR, info->params.addr_filter ); 4737 RegValue |= BIT4; 4738 } 4739 4740 usc_OutReg( info, CMR, RegValue ); 4741 info->cmr_value = RegValue; 4742 4743 /* Receiver mode Register (RMR) 4744 * 4745 * <15..13> 000 encoding 4746 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1) 4747 * <10> 1 1 = Set CRC to all 1s (use for SDLC/HDLC) 4748 * <9> 0 1 = Include Receive chars in CRC 4749 * <8> 1 1 = Use Abort/PE bit as abort indicator 4750 * <7..6> 00 Even parity 4751 * <5> 0 parity disabled 4752 * <4..2> 000 Receive Char Length = 8 bits 4753 * <1..0> 00 Disable Receiver 4754 * 4755 * 0000 0101 0000 0000 = 0x0500 4756 */ 4757 4758 RegValue = 0x0500; 4759 4760 switch ( info->params.encoding ) { 4761 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break; 4762 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break; 4763 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break; 4764 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break; 4765 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break; 4766 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break; 4767 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break; 4768 } 4769 4770 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT ) 4771 RegValue |= BIT9; 4772 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT ) 4773 RegValue |= ( BIT12 | BIT10 | BIT9 ); 4774 4775 usc_OutReg( info, RMR, RegValue ); 4776 4777 /* Set the Receive count Limit Register (RCLR) to 0xffff. */ 4778 /* When an opening flag of an SDLC frame is recognized the */ 4779 /* Receive Character count (RCC) is loaded with the value in */ 4780 /* RCLR. The RCC is decremented for each received byte. The */ 4781 /* value of RCC is stored after the closing flag of the frame */ 4782 /* allowing the frame size to be computed. */ 4783 4784 usc_OutReg( info, RCLR, RCLRVALUE ); 4785 4786 usc_RCmd( info, RCmd_SelectRicrdma_level ); 4787 4788 /* Receive Interrupt Control Register (RICR) 4789 * 4790 * <15..8> ? RxFIFO DMA Request Level 4791 * <7> 0 Exited Hunt IA (Interrupt Arm) 4792 * <6> 0 Idle Received IA 4793 * <5> 0 Break/Abort IA 4794 * <4> 0 Rx Bound IA 4795 * <3> 1 Queued status reflects oldest 2 bytes in FIFO 4796 * <2> 0 Abort/PE IA 4797 * <1> 1 Rx Overrun IA 4798 * <0> 0 Select TC0 value for readback 4799 * 4800 * 0000 0000 0000 1000 = 0x000a 4801 */ 4802 4803 /* Carry over the Exit Hunt and Idle Received bits */ 4804 /* in case they have been armed by usc_ArmEvents. */ 4805 4806 RegValue = usc_InReg( info, RICR ) & 0xc0; 4807 4808 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 4809 usc_OutReg( info, RICR, (u16)(0x030a | RegValue) ); 4810 else 4811 usc_OutReg( info, RICR, (u16)(0x140a | RegValue) ); 4812 4813 /* Unlatch all Rx status bits and clear Rx status IRQ Pending */ 4814 4815 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); 4816 usc_ClearIrqPendingBits( info, RECEIVE_STATUS ); 4817 4818 /* Transmit mode Register (TMR) 4819 * 4820 * <15..13> 000 encoding 4821 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1) 4822 * <10> 1 1 = Start CRC as all 1s (use for SDLC/HDLC) 4823 * <9> 0 1 = Tx CRC Enabled 4824 * <8> 0 1 = Append CRC to end of transmit frame 4825 * <7..6> 00 Transmit parity Even 4826 * <5> 0 Transmit parity Disabled 4827 * <4..2> 000 Tx Char Length = 8 bits 4828 * <1..0> 00 Disable Transmitter 4829 * 4830 * 0000 0100 0000 0000 = 0x0400 4831 */ 4832 4833 RegValue = 0x0400; 4834 4835 switch ( info->params.encoding ) { 4836 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break; 4837 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break; 4838 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break; 4839 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break; 4840 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break; 4841 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break; 4842 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break; 4843 } 4844 4845 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT ) 4846 RegValue |= BIT9 + BIT8; 4847 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT ) 4848 RegValue |= ( BIT12 | BIT10 | BIT9 | BIT8); 4849 4850 usc_OutReg( info, TMR, RegValue ); 4851 4852 usc_set_txidle( info ); 4853 4854 4855 usc_TCmd( info, TCmd_SelectTicrdma_level ); 4856 4857 /* Transmit Interrupt Control Register (TICR) 4858 * 4859 * <15..8> ? Transmit FIFO DMA Level 4860 * <7> 0 Present IA (Interrupt Arm) 4861 * <6> 0 Idle Sent IA 4862 * <5> 1 Abort Sent IA 4863 * <4> 1 EOF/EOM Sent IA 4864 * <3> 0 CRC Sent IA 4865 * <2> 1 1 = Wait for SW Trigger to Start Frame 4866 * <1> 1 Tx Underrun IA 4867 * <0> 0 TC0 constant on read back 4868 * 4869 * 0000 0000 0011 0110 = 0x0036 4870 */ 4871 4872 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 4873 usc_OutReg( info, TICR, 0x0736 ); 4874 else 4875 usc_OutReg( info, TICR, 0x1436 ); 4876 4877 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL ); 4878 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS ); 4879 4880 /* 4881 ** Transmit Command/Status Register (TCSR) 4882 ** 4883 ** <15..12> 0000 TCmd 4884 ** <11> 0/1 UnderWait 4885 ** <10..08> 000 TxIdle 4886 ** <7> x PreSent 4887 ** <6> x IdleSent 4888 ** <5> x AbortSent 4889 ** <4> x EOF/EOM Sent 4890 ** <3> x CRC Sent 4891 ** <2> x All Sent 4892 ** <1> x TxUnder 4893 ** <0> x TxEmpty 4894 ** 4895 ** 0000 0000 0000 0000 = 0x0000 4896 */ 4897 info->tcsr_value = 0; 4898 4899 if ( !PreSL1660 ) 4900 info->tcsr_value |= TCSR_UNDERWAIT; 4901 4902 usc_OutReg( info, TCSR, info->tcsr_value ); 4903 4904 /* Clock mode Control Register (CMCR) 4905 * 4906 * <15..14> 00 counter 1 Source = Disabled 4907 * <13..12> 00 counter 0 Source = Disabled 4908 * <11..10> 11 BRG1 Input is TxC Pin 4909 * <9..8> 11 BRG0 Input is TxC Pin 4910 * <7..6> 01 DPLL Input is BRG1 Output 4911 * <5..3> XXX TxCLK comes from Port 0 4912 * <2..0> XXX RxCLK comes from Port 1 4913 * 4914 * 0000 1111 0111 0111 = 0x0f77 4915 */ 4916 4917 RegValue = 0x0f40; 4918 4919 if ( info->params.flags & HDLC_FLAG_RXC_DPLL ) 4920 RegValue |= 0x0003; /* RxCLK from DPLL */ 4921 else if ( info->params.flags & HDLC_FLAG_RXC_BRG ) 4922 RegValue |= 0x0004; /* RxCLK from BRG0 */ 4923 else if ( info->params.flags & HDLC_FLAG_RXC_TXCPIN) 4924 RegValue |= 0x0006; /* RxCLK from TXC Input */ 4925 else 4926 RegValue |= 0x0007; /* RxCLK from Port1 */ 4927 4928 if ( info->params.flags & HDLC_FLAG_TXC_DPLL ) 4929 RegValue |= 0x0018; /* TxCLK from DPLL */ 4930 else if ( info->params.flags & HDLC_FLAG_TXC_BRG ) 4931 RegValue |= 0x0020; /* TxCLK from BRG0 */ 4932 else if ( info->params.flags & HDLC_FLAG_TXC_RXCPIN) 4933 RegValue |= 0x0038; /* RxCLK from TXC Input */ 4934 else 4935 RegValue |= 0x0030; /* TxCLK from Port0 */ 4936 4937 usc_OutReg( info, CMCR, RegValue ); 4938 4939 4940 /* Hardware Configuration Register (HCR) 4941 * 4942 * <15..14> 00 CTR0 Divisor:00=32,01=16,10=8,11=4 4943 * <13> 0 CTR1DSel:0=CTR0Div determines CTR0Div 4944 * <12> 0 CVOK:0=report code violation in biphase 4945 * <11..10> 00 DPLL Divisor:00=32,01=16,10=8,11=4 4946 * <9..8> XX DPLL mode:00=disable,01=NRZ,10=Biphase,11=Biphase Level 4947 * <7..6> 00 reserved 4948 * <5> 0 BRG1 mode:0=continuous,1=single cycle 4949 * <4> X BRG1 Enable 4950 * <3..2> 00 reserved 4951 * <1> 0 BRG0 mode:0=continuous,1=single cycle 4952 * <0> 0 BRG0 Enable 4953 */ 4954 4955 RegValue = 0x0000; 4956 4957 if ( info->params.flags & (HDLC_FLAG_RXC_DPLL + HDLC_FLAG_TXC_DPLL) ) { 4958 u32 XtalSpeed; 4959 u32 DpllDivisor; 4960 u16 Tc; 4961 4962 /* DPLL is enabled. Use BRG1 to provide continuous reference clock */ 4963 /* for DPLL. DPLL mode in HCR is dependent on the encoding used. */ 4964 4965 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 4966 XtalSpeed = 11059200; 4967 else 4968 XtalSpeed = 14745600; 4969 4970 if ( info->params.flags & HDLC_FLAG_DPLL_DIV16 ) { 4971 DpllDivisor = 16; 4972 RegValue |= BIT10; 4973 } 4974 else if ( info->params.flags & HDLC_FLAG_DPLL_DIV8 ) { 4975 DpllDivisor = 8; 4976 RegValue |= BIT11; 4977 } 4978 else 4979 DpllDivisor = 32; 4980 4981 /* Tc = (Xtal/Speed) - 1 */ 4982 /* If twice the remainder of (Xtal/Speed) is greater than Speed */ 4983 /* then rounding up gives a more precise time constant. Instead */ 4984 /* of rounding up and then subtracting 1 we just don't subtract */ 4985 /* the one in this case. */ 4986 4987 /*-------------------------------------------------- 4988 * ejz: for DPLL mode, application should use the 4989 * same clock speed as the partner system, even 4990 * though clocking is derived from the input RxData. 4991 * In case the user uses a 0 for the clock speed, 4992 * default to 0xffffffff and don't try to divide by 4993 * zero 4994 *--------------------------------------------------*/ 4995 if ( info->params.clock_speed ) 4996 { 4997 Tc = (u16)((XtalSpeed/DpllDivisor)/info->params.clock_speed); 4998 if ( !((((XtalSpeed/DpllDivisor) % info->params.clock_speed) * 2) 4999 / info->params.clock_speed) ) 5000 Tc--; 5001 } 5002 else 5003 Tc = -1; 5004 5005 5006 /* Write 16-bit Time Constant for BRG1 */ 5007 usc_OutReg( info, TC1R, Tc ); 5008 5009 RegValue |= BIT4; /* enable BRG1 */ 5010 5011 switch ( info->params.encoding ) { 5012 case HDLC_ENCODING_NRZ: 5013 case HDLC_ENCODING_NRZB: 5014 case HDLC_ENCODING_NRZI_MARK: 5015 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT8; break; 5016 case HDLC_ENCODING_BIPHASE_MARK: 5017 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT9; break; 5018 case HDLC_ENCODING_BIPHASE_LEVEL: 5019 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT9 + BIT8; break; 5020 } 5021 } 5022 5023 usc_OutReg( info, HCR, RegValue ); 5024 5025 5026 /* Channel Control/status Register (CCSR) 5027 * 5028 * <15> X RCC FIFO Overflow status (RO) 5029 * <14> X RCC FIFO Not Empty status (RO) 5030 * <13> 0 1 = Clear RCC FIFO (WO) 5031 * <12> X DPLL Sync (RW) 5032 * <11> X DPLL 2 Missed Clocks status (RO) 5033 * <10> X DPLL 1 Missed Clock status (RO) 5034 * <9..8> 00 DPLL Resync on rising and falling edges (RW) 5035 * <7> X SDLC Loop On status (RO) 5036 * <6> X SDLC Loop Send status (RO) 5037 * <5> 1 Bypass counters for TxClk and RxClk (RW) 5038 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW) 5039 * <1..0> 00 reserved 5040 * 5041 * 0000 0000 0010 0000 = 0x0020 5042 */ 5043 5044 usc_OutReg( info, CCSR, 0x1020 ); 5045 5046 5047 if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) { 5048 usc_OutReg( info, SICR, 5049 (u16)(usc_InReg(info,SICR) | SICR_CTS_INACTIVE) ); 5050 } 5051 5052 5053 /* enable Master Interrupt Enable bit (MIE) */ 5054 usc_EnableMasterIrqBit( info ); 5055 5056 usc_ClearIrqPendingBits( info, RECEIVE_STATUS + RECEIVE_DATA + 5057 TRANSMIT_STATUS + TRANSMIT_DATA + MISC); 5058 5059 /* arm RCC underflow interrupt */ 5060 usc_OutReg(info, SICR, (u16)(usc_InReg(info,SICR) | BIT3)); 5061 usc_EnableInterrupts(info, MISC); 5062 5063 info->mbre_bit = 0; 5064 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */ 5065 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */ 5066 info->mbre_bit = BIT8; 5067 outw( BIT8, info->io_base ); /* set Master Bus Enable (DCAR) */ 5068 5069 if (info->bus_type == MGSL_BUS_TYPE_ISA) { 5070 /* Enable DMAEN (Port 7, Bit 14) */ 5071 /* This connects the DMA request signal to the ISA bus */ 5072 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) & ~BIT14)); 5073 } 5074 5075 /* DMA Control Register (DCR) 5076 * 5077 * <15..14> 10 Priority mode = Alternating Tx/Rx 5078 * 01 Rx has priority 5079 * 00 Tx has priority 5080 * 5081 * <13> 1 Enable Priority Preempt per DCR<15..14> 5082 * (WARNING DCR<11..10> must be 00 when this is 1) 5083 * 0 Choose activate channel per DCR<11..10> 5084 * 5085 * <12> 0 Little Endian for Array/List 5086 * <11..10> 00 Both Channels can use each bus grant 5087 * <9..6> 0000 reserved 5088 * <5> 0 7 CLK - Minimum Bus Re-request Interval 5089 * <4> 0 1 = drive D/C and S/D pins 5090 * <3> 1 1 = Add one wait state to all DMA cycles. 5091 * <2> 0 1 = Strobe /UAS on every transfer. 5092 * <1..0> 11 Addr incrementing only affects LS24 bits 5093 * 5094 * 0110 0000 0000 1011 = 0x600b 5095 */ 5096 5097 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 5098 /* PCI adapter does not need DMA wait state */ 5099 usc_OutDmaReg( info, DCR, 0xa00b ); 5100 } 5101 else 5102 usc_OutDmaReg( info, DCR, 0x800b ); 5103 5104 5105 /* Receive DMA mode Register (RDMR) 5106 * 5107 * <15..14> 11 DMA mode = Linked List Buffer mode 5108 * <13> 1 RSBinA/L = store Rx status Block in Arrary/List entry 5109 * <12> 1 Clear count of List Entry after fetching 5110 * <11..10> 00 Address mode = Increment 5111 * <9> 1 Terminate Buffer on RxBound 5112 * <8> 0 Bus Width = 16bits 5113 * <7..0> ? status Bits (write as 0s) 5114 * 5115 * 1111 0010 0000 0000 = 0xf200 5116 */ 5117 5118 usc_OutDmaReg( info, RDMR, 0xf200 ); 5119 5120 5121 /* Transmit DMA mode Register (TDMR) 5122 * 5123 * <15..14> 11 DMA mode = Linked List Buffer mode 5124 * <13> 1 TCBinA/L = fetch Tx Control Block from List entry 5125 * <12> 1 Clear count of List Entry after fetching 5126 * <11..10> 00 Address mode = Increment 5127 * <9> 1 Terminate Buffer on end of frame 5128 * <8> 0 Bus Width = 16bits 5129 * <7..0> ? status Bits (Read Only so write as 0) 5130 * 5131 * 1111 0010 0000 0000 = 0xf200 5132 */ 5133 5134 usc_OutDmaReg( info, TDMR, 0xf200 ); 5135 5136 5137 /* DMA Interrupt Control Register (DICR) 5138 * 5139 * <15> 1 DMA Interrupt Enable 5140 * <14> 0 1 = Disable IEO from USC 5141 * <13> 0 1 = Don't provide vector during IntAck 5142 * <12> 1 1 = Include status in Vector 5143 * <10..2> 0 reserved, Must be 0s 5144 * <1> 0 1 = Rx DMA Interrupt Enabled 5145 * <0> 0 1 = Tx DMA Interrupt Enabled 5146 * 5147 * 1001 0000 0000 0000 = 0x9000 5148 */ 5149 5150 usc_OutDmaReg( info, DICR, 0x9000 ); 5151 5152 usc_InDmaReg( info, RDMR ); /* clear pending receive DMA IRQ bits */ 5153 usc_InDmaReg( info, TDMR ); /* clear pending transmit DMA IRQ bits */ 5154 usc_OutDmaReg( info, CDIR, 0x0303 ); /* clear IUS and Pending for Tx and Rx */ 5155 5156 /* Channel Control Register (CCR) 5157 * 5158 * <15..14> 10 Use 32-bit Tx Control Blocks (TCBs) 5159 * <13> 0 Trigger Tx on SW Command Disabled 5160 * <12> 0 Flag Preamble Disabled 5161 * <11..10> 00 Preamble Length 5162 * <9..8> 00 Preamble Pattern 5163 * <7..6> 10 Use 32-bit Rx status Blocks (RSBs) 5164 * <5> 0 Trigger Rx on SW Command Disabled 5165 * <4..0> 0 reserved 5166 * 5167 * 1000 0000 1000 0000 = 0x8080 5168 */ 5169 5170 RegValue = 0x8080; 5171 5172 switch ( info->params.preamble_length ) { 5173 case HDLC_PREAMBLE_LENGTH_16BITS: RegValue |= BIT10; break; 5174 case HDLC_PREAMBLE_LENGTH_32BITS: RegValue |= BIT11; break; 5175 case HDLC_PREAMBLE_LENGTH_64BITS: RegValue |= BIT11 + BIT10; break; 5176 } 5177 5178 switch ( info->params.preamble ) { 5179 case HDLC_PREAMBLE_PATTERN_FLAGS: RegValue |= BIT8 + BIT12; break; 5180 case HDLC_PREAMBLE_PATTERN_ONES: RegValue |= BIT8; break; 5181 case HDLC_PREAMBLE_PATTERN_10: RegValue |= BIT9; break; 5182 case HDLC_PREAMBLE_PATTERN_01: RegValue |= BIT9 + BIT8; break; 5183 } 5184 5185 usc_OutReg( info, CCR, RegValue ); 5186 5187 5188 /* 5189 * Burst/Dwell Control Register 5190 * 5191 * <15..8> 0x20 Maximum number of transfers per bus grant 5192 * <7..0> 0x00 Maximum number of clock cycles per bus grant 5193 */ 5194 5195 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 5196 /* don't limit bus occupancy on PCI adapter */ 5197 usc_OutDmaReg( info, BDCR, 0x0000 ); 5198 } 5199 else 5200 usc_OutDmaReg( info, BDCR, 0x2000 ); 5201 5202 usc_stop_transmitter(info); 5203 usc_stop_receiver(info); 5204 5205} /* end of usc_set_sdlc_mode() */ 5206 5207/* usc_enable_loopback() 5208 * 5209 * Set the 16C32 for internal loopback mode. 5210 * The TxCLK and RxCLK signals are generated from the BRG0 and 5211 * the TxD is looped back to the RxD internally. 5212 * 5213 * Arguments: info pointer to device instance data 5214 * enable 1 = enable loopback, 0 = disable 5215 * Return Value: None 5216 */ 5217static void usc_enable_loopback(struct mgsl_struct *info, int enable) 5218{ 5219 if (enable) { 5220 /* blank external TXD output */ 5221 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) | (BIT7+BIT6)); 5222 5223 /* Clock mode Control Register (CMCR) 5224 * 5225 * <15..14> 00 counter 1 Disabled 5226 * <13..12> 00 counter 0 Disabled 5227 * <11..10> 11 BRG1 Input is TxC Pin 5228 * <9..8> 11 BRG0 Input is TxC Pin 5229 * <7..6> 01 DPLL Input is BRG1 Output 5230 * <5..3> 100 TxCLK comes from BRG0 5231 * <2..0> 100 RxCLK comes from BRG0 5232 * 5233 * 0000 1111 0110 0100 = 0x0f64 5234 */ 5235 5236 usc_OutReg( info, CMCR, 0x0f64 ); 5237 5238 /* Write 16-bit Time Constant for BRG0 */ 5239 /* use clock speed if available, otherwise use 8 for diagnostics */ 5240 if (info->params.clock_speed) { 5241 if (info->bus_type == MGSL_BUS_TYPE_PCI) 5242 usc_OutReg(info, TC0R, (u16)((11059200/info->params.clock_speed)-1)); 5243 else 5244 usc_OutReg(info, TC0R, (u16)((14745600/info->params.clock_speed)-1)); 5245 } else 5246 usc_OutReg(info, TC0R, (u16)8); 5247 5248 /* Hardware Configuration Register (HCR) Clear Bit 1, BRG0 5249 mode = Continuous Set Bit 0 to enable BRG0. */ 5250 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) ); 5251 5252 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */ 5253 usc_OutReg(info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004)); 5254 5255 /* set Internal Data loopback mode */ 5256 info->loopback_bits = 0x300; 5257 outw( 0x0300, info->io_base + CCAR ); 5258 } else { 5259 /* enable external TXD output */ 5260 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) & ~(BIT7+BIT6)); 5261 5262 /* clear Internal Data loopback mode */ 5263 info->loopback_bits = 0; 5264 outw( 0,info->io_base + CCAR ); 5265 } 5266 5267} /* end of usc_enable_loopback() */ 5268 5269/* usc_enable_aux_clock() 5270 * 5271 * Enabled the AUX clock output at the specified frequency. 5272 * 5273 * Arguments: 5274 * 5275 * info pointer to device extension 5276 * data_rate data rate of clock in bits per second 5277 * A data rate of 0 disables the AUX clock. 5278 * 5279 * Return Value: None 5280 */ 5281static void usc_enable_aux_clock( struct mgsl_struct *info, u32 data_rate ) 5282{ 5283 u32 XtalSpeed; 5284 u16 Tc; 5285 5286 if ( data_rate ) { 5287 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 5288 XtalSpeed = 11059200; 5289 else 5290 XtalSpeed = 14745600; 5291 5292 5293 /* Tc = (Xtal/Speed) - 1 */ 5294 /* If twice the remainder of (Xtal/Speed) is greater than Speed */ 5295 /* then rounding up gives a more precise time constant. Instead */ 5296 /* of rounding up and then subtracting 1 we just don't subtract */ 5297 /* the one in this case. */ 5298 5299 5300 Tc = (u16)(XtalSpeed/data_rate); 5301 if ( !(((XtalSpeed % data_rate) * 2) / data_rate) ) 5302 Tc--; 5303 5304 /* Write 16-bit Time Constant for BRG0 */ 5305 usc_OutReg( info, TC0R, Tc ); 5306 5307 /* 5308 * Hardware Configuration Register (HCR) 5309 * Clear Bit 1, BRG0 mode = Continuous 5310 * Set Bit 0 to enable BRG0. 5311 */ 5312 5313 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) ); 5314 5315 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */ 5316 usc_OutReg( info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) ); 5317 } else { 5318 /* data rate == 0 so turn off BRG0 */ 5319 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) ); 5320 } 5321 5322} /* end of usc_enable_aux_clock() */ 5323 5324/* 5325 * 5326 * usc_process_rxoverrun_sync() 5327 * 5328 * This function processes a receive overrun by resetting the 5329 * receive DMA buffers and issuing a Purge Rx FIFO command 5330 * to allow the receiver to continue receiving. 5331 * 5332 * Arguments: 5333 * 5334 * info pointer to device extension 5335 * 5336 * Return Value: None 5337 */ 5338static void usc_process_rxoverrun_sync( struct mgsl_struct *info ) 5339{ 5340 int start_index; 5341 int end_index; 5342 int frame_start_index; 5343 bool start_of_frame_found = false; 5344 bool end_of_frame_found = false; 5345 bool reprogram_dma = false; 5346 5347 DMABUFFERENTRY *buffer_list = info->rx_buffer_list; 5348 u32 phys_addr; 5349 5350 usc_DmaCmd( info, DmaCmd_PauseRxChannel ); 5351 usc_RCmd( info, RCmd_EnterHuntmode ); 5352 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 5353 5354 /* CurrentRxBuffer points to the 1st buffer of the next */ 5355 /* possibly available receive frame. */ 5356 5357 frame_start_index = start_index = end_index = info->current_rx_buffer; 5358 5359 /* Search for an unfinished string of buffers. This means */ 5360 /* that a receive frame started (at least one buffer with */ 5361 /* count set to zero) but there is no terminiting buffer */ 5362 /* (status set to non-zero). */ 5363 5364 while( !buffer_list[end_index].count ) 5365 { 5366 /* Count field has been reset to zero by 16C32. */ 5367 /* This buffer is currently in use. */ 5368 5369 if ( !start_of_frame_found ) 5370 { 5371 start_of_frame_found = true; 5372 frame_start_index = end_index; 5373 end_of_frame_found = false; 5374 } 5375 5376 if ( buffer_list[end_index].status ) 5377 { 5378 /* Status field has been set by 16C32. */ 5379 /* This is the last buffer of a received frame. */ 5380 5381 /* We want to leave the buffers for this frame intact. */ 5382 /* Move on to next possible frame. */ 5383 5384 start_of_frame_found = false; 5385 end_of_frame_found = true; 5386 } 5387 5388 /* advance to next buffer entry in linked list */ 5389 end_index++; 5390 if ( end_index == info->rx_buffer_count ) 5391 end_index = 0; 5392 5393 if ( start_index == end_index ) 5394 { 5395 /* The entire list has been searched with all Counts == 0 and */ 5396 /* all Status == 0. The receive buffers are */ 5397 /* completely screwed, reset all receive buffers! */ 5398 mgsl_reset_rx_dma_buffers( info ); 5399 frame_start_index = 0; 5400 start_of_frame_found = false; 5401 reprogram_dma = true; 5402 break; 5403 } 5404 } 5405 5406 if ( start_of_frame_found && !end_of_frame_found ) 5407 { 5408 /* There is an unfinished string of receive DMA buffers */ 5409 /* as a result of the receiver overrun. */ 5410 5411 /* Reset the buffers for the unfinished frame */ 5412 /* and reprogram the receive DMA controller to start */ 5413 /* at the 1st buffer of unfinished frame. */ 5414 5415 start_index = frame_start_index; 5416 5417 do 5418 { 5419 *((unsigned long *)&(info->rx_buffer_list[start_index++].count)) = DMABUFFERSIZE; 5420 5421 /* Adjust index for wrap around. */ 5422 if ( start_index == info->rx_buffer_count ) 5423 start_index = 0; 5424 5425 } while( start_index != end_index ); 5426 5427 reprogram_dma = true; 5428 } 5429 5430 if ( reprogram_dma ) 5431 { 5432 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL); 5433 usc_ClearIrqPendingBits(info, RECEIVE_DATA|RECEIVE_STATUS); 5434 usc_UnlatchRxstatusBits(info, RECEIVE_DATA|RECEIVE_STATUS); 5435 5436 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL); 5437 5438 /* This empties the receive FIFO and loads the RCC with RCLR */ 5439 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) ); 5440 5441 /* program 16C32 with physical address of 1st DMA buffer entry */ 5442 phys_addr = info->rx_buffer_list[frame_start_index].phys_entry; 5443 usc_OutDmaReg( info, NRARL, (u16)phys_addr ); 5444 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) ); 5445 5446 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); 5447 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS ); 5448 usc_EnableInterrupts( info, RECEIVE_STATUS ); 5449 5450 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */ 5451 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */ 5452 5453 usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 ); 5454 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) ); 5455 usc_DmaCmd( info, DmaCmd_InitRxChannel ); 5456 if ( info->params.flags & HDLC_FLAG_AUTO_DCD ) 5457 usc_EnableReceiver(info,ENABLE_AUTO_DCD); 5458 else 5459 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL); 5460 } 5461 else 5462 { 5463 /* This empties the receive FIFO and loads the RCC with RCLR */ 5464 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) ); 5465 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 5466 } 5467 5468} /* end of usc_process_rxoverrun_sync() */ 5469 5470/* usc_stop_receiver() 5471 * 5472 * Disable USC receiver 5473 * 5474 * Arguments: info pointer to device instance data 5475 * Return Value: None 5476 */ 5477static void usc_stop_receiver( struct mgsl_struct *info ) 5478{ 5479 if (debug_level >= DEBUG_LEVEL_ISR) 5480 printk("%s(%d):usc_stop_receiver(%s)\n", 5481 __FILE__,__LINE__, info->device_name ); 5482 5483 /* Disable receive DMA channel. */ 5484 /* This also disables receive DMA channel interrupts */ 5485 usc_DmaCmd( info, DmaCmd_ResetRxChannel ); 5486 5487 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); 5488 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS ); 5489 usc_DisableInterrupts( info, RECEIVE_DATA + RECEIVE_STATUS ); 5490 5491 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL); 5492 5493 /* This empties the receive FIFO and loads the RCC with RCLR */ 5494 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) ); 5495 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 5496 5497 info->rx_enabled = false; 5498 info->rx_overflow = false; 5499 info->rx_rcc_underrun = false; 5500 5501} /* end of stop_receiver() */ 5502 5503/* usc_start_receiver() 5504 * 5505 * Enable the USC receiver 5506 * 5507 * Arguments: info pointer to device instance data 5508 * Return Value: None 5509 */ 5510static void usc_start_receiver( struct mgsl_struct *info ) 5511{ 5512 u32 phys_addr; 5513 5514 if (debug_level >= DEBUG_LEVEL_ISR) 5515 printk("%s(%d):usc_start_receiver(%s)\n", 5516 __FILE__,__LINE__, info->device_name ); 5517 5518 mgsl_reset_rx_dma_buffers( info ); 5519 usc_stop_receiver( info ); 5520 5521 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) ); 5522 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 5523 5524 if ( info->params.mode == MGSL_MODE_HDLC || 5525 info->params.mode == MGSL_MODE_RAW ) { 5526 /* DMA mode Transfers */ 5527 /* Program the DMA controller. */ 5528 /* Enable the DMA controller end of buffer interrupt. */ 5529 5530 /* program 16C32 with physical address of 1st DMA buffer entry */ 5531 phys_addr = info->rx_buffer_list[0].phys_entry; 5532 usc_OutDmaReg( info, NRARL, (u16)phys_addr ); 5533 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) ); 5534 5535 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); 5536 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS ); 5537 usc_EnableInterrupts( info, RECEIVE_STATUS ); 5538 5539 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */ 5540 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */ 5541 5542 usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 ); 5543 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) ); 5544 usc_DmaCmd( info, DmaCmd_InitRxChannel ); 5545 if ( info->params.flags & HDLC_FLAG_AUTO_DCD ) 5546 usc_EnableReceiver(info,ENABLE_AUTO_DCD); 5547 else 5548 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL); 5549 } else { 5550 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL); 5551 usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS); 5552 usc_EnableInterrupts(info, RECEIVE_DATA); 5553 5554 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 5555 usc_RCmd( info, RCmd_EnterHuntmode ); 5556 5557 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL); 5558 } 5559 5560 usc_OutReg( info, CCSR, 0x1020 ); 5561 5562 info->rx_enabled = true; 5563 5564} /* end of usc_start_receiver() */ 5565 5566/* usc_start_transmitter() 5567 * 5568 * Enable the USC transmitter and send a transmit frame if 5569 * one is loaded in the DMA buffers. 5570 * 5571 * Arguments: info pointer to device instance data 5572 * Return Value: None 5573 */ 5574static void usc_start_transmitter( struct mgsl_struct *info ) 5575{ 5576 u32 phys_addr; 5577 unsigned int FrameSize; 5578 5579 if (debug_level >= DEBUG_LEVEL_ISR) 5580 printk("%s(%d):usc_start_transmitter(%s)\n", 5581 __FILE__,__LINE__, info->device_name ); 5582 5583 if ( info->xmit_cnt ) { 5584 5585 /* If auto RTS enabled and RTS is inactive, then assert */ 5586 /* RTS and set a flag indicating that the driver should */ 5587 /* negate RTS when the transmission completes. */ 5588 5589 info->drop_rts_on_tx_done = false; 5590 5591 if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) { 5592 usc_get_serial_signals( info ); 5593 if ( !(info->serial_signals & SerialSignal_RTS) ) { 5594 info->serial_signals |= SerialSignal_RTS; 5595 usc_set_serial_signals( info ); 5596 info->drop_rts_on_tx_done = true; 5597 } 5598 } 5599 5600 5601 if ( info->params.mode == MGSL_MODE_ASYNC ) { 5602 if ( !info->tx_active ) { 5603 usc_UnlatchTxstatusBits(info, TXSTATUS_ALL); 5604 usc_ClearIrqPendingBits(info, TRANSMIT_STATUS + TRANSMIT_DATA); 5605 usc_EnableInterrupts(info, TRANSMIT_DATA); 5606 usc_load_txfifo(info); 5607 } 5608 } else { 5609 /* Disable transmit DMA controller while programming. */ 5610 usc_DmaCmd( info, DmaCmd_ResetTxChannel ); 5611 5612 /* Transmit DMA buffer is loaded, so program USC */ 5613 /* to send the frame contained in the buffers. */ 5614 5615 FrameSize = info->tx_buffer_list[info->start_tx_dma_buffer].rcc; 5616 5617 /* if operating in Raw sync mode, reset the rcc component 5618 * of the tx dma buffer entry, otherwise, the serial controller 5619 * will send a closing sync char after this count. 5620 */ 5621 if ( info->params.mode == MGSL_MODE_RAW ) 5622 info->tx_buffer_list[info->start_tx_dma_buffer].rcc = 0; 5623 5624 /* Program the Transmit Character Length Register (TCLR) */ 5625 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */ 5626 usc_OutReg( info, TCLR, (u16)FrameSize ); 5627 5628 usc_RTCmd( info, RTCmd_PurgeTxFifo ); 5629 5630 /* Program the address of the 1st DMA Buffer Entry in linked list */ 5631 phys_addr = info->tx_buffer_list[info->start_tx_dma_buffer].phys_entry; 5632 usc_OutDmaReg( info, NTARL, (u16)phys_addr ); 5633 usc_OutDmaReg( info, NTARU, (u16)(phys_addr >> 16) ); 5634 5635 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL ); 5636 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS ); 5637 usc_EnableInterrupts( info, TRANSMIT_STATUS ); 5638 5639 if ( info->params.mode == MGSL_MODE_RAW && 5640 info->num_tx_dma_buffers > 1 ) { 5641 /* When running external sync mode, attempt to 'stream' transmit */ 5642 /* by filling tx dma buffers as they become available. To do this */ 5643 /* we need to enable Tx DMA EOB Status interrupts : */ 5644 /* */ 5645 /* 1. Arm End of Buffer (EOB) Transmit DMA Interrupt (BIT2 of TDIAR) */ 5646 /* 2. Enable Transmit DMA Interrupts (BIT0 of DICR) */ 5647 5648 usc_OutDmaReg( info, TDIAR, BIT2|BIT3 ); 5649 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT0) ); 5650 } 5651 5652 /* Initialize Transmit DMA Channel */ 5653 usc_DmaCmd( info, DmaCmd_InitTxChannel ); 5654 5655 usc_TCmd( info, TCmd_SendFrame ); 5656 5657 mod_timer(&info->tx_timer, jiffies + 5658 msecs_to_jiffies(5000)); 5659 } 5660 info->tx_active = true; 5661 } 5662 5663 if ( !info->tx_enabled ) { 5664 info->tx_enabled = true; 5665 if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) 5666 usc_EnableTransmitter(info,ENABLE_AUTO_CTS); 5667 else 5668 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL); 5669 } 5670 5671} /* end of usc_start_transmitter() */ 5672 5673/* usc_stop_transmitter() 5674 * 5675 * Stops the transmitter and DMA 5676 * 5677 * Arguments: info pointer to device isntance data 5678 * Return Value: None 5679 */ 5680static void usc_stop_transmitter( struct mgsl_struct *info ) 5681{ 5682 if (debug_level >= DEBUG_LEVEL_ISR) 5683 printk("%s(%d):usc_stop_transmitter(%s)\n", 5684 __FILE__,__LINE__, info->device_name ); 5685 5686 del_timer(&info->tx_timer); 5687 5688 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL ); 5689 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA ); 5690 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA ); 5691 5692 usc_EnableTransmitter(info,DISABLE_UNCONDITIONAL); 5693 usc_DmaCmd( info, DmaCmd_ResetTxChannel ); 5694 usc_RTCmd( info, RTCmd_PurgeTxFifo ); 5695 5696 info->tx_enabled = false; 5697 info->tx_active = false; 5698 5699} /* end of usc_stop_transmitter() */ 5700 5701/* usc_load_txfifo() 5702 * 5703 * Fill the transmit FIFO until the FIFO is full or 5704 * there is no more data to load. 5705 * 5706 * Arguments: info pointer to device extension (instance data) 5707 * Return Value: None 5708 */ 5709static void usc_load_txfifo( struct mgsl_struct *info ) 5710{ 5711 int Fifocount; 5712 u8 TwoBytes[2]; 5713 5714 if ( !info->xmit_cnt && !info->x_char ) 5715 return; 5716 5717 /* Select transmit FIFO status readback in TICR */ 5718 usc_TCmd( info, TCmd_SelectTicrTxFifostatus ); 5719 5720 /* load the Transmit FIFO until FIFOs full or all data sent */ 5721 5722 while( (Fifocount = usc_InReg(info, TICR) >> 8) && info->xmit_cnt ) { 5723 /* there is more space in the transmit FIFO and */ 5724 /* there is more data in transmit buffer */ 5725 5726 if ( (info->xmit_cnt > 1) && (Fifocount > 1) && !info->x_char ) { 5727 /* write a 16-bit word from transmit buffer to 16C32 */ 5728 5729 TwoBytes[0] = info->xmit_buf[info->xmit_tail++]; 5730 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1); 5731 TwoBytes[1] = info->xmit_buf[info->xmit_tail++]; 5732 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1); 5733 5734 outw( *((u16 *)TwoBytes), info->io_base + DATAREG); 5735 5736 info->xmit_cnt -= 2; 5737 info->icount.tx += 2; 5738 } else { 5739 /* only 1 byte left to transmit or 1 FIFO slot left */ 5740 5741 outw( (inw( info->io_base + CCAR) & 0x0780) | (TDR+LSBONLY), 5742 info->io_base + CCAR ); 5743 5744 if (info->x_char) { 5745 /* transmit pending high priority char */ 5746 outw( info->x_char,info->io_base + CCAR ); 5747 info->x_char = 0; 5748 } else { 5749 outw( info->xmit_buf[info->xmit_tail++],info->io_base + CCAR ); 5750 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1); 5751 info->xmit_cnt--; 5752 } 5753 info->icount.tx++; 5754 } 5755 } 5756 5757} /* end of usc_load_txfifo() */ 5758 5759/* usc_reset() 5760 * 5761 * Reset the adapter to a known state and prepare it for further use. 5762 * 5763 * Arguments: info pointer to device instance data 5764 * Return Value: None 5765 */ 5766static void usc_reset( struct mgsl_struct *info ) 5767{ 5768 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 5769 int i; 5770 u32 readval; 5771 5772 /* Set BIT30 of Misc Control Register */ 5773 /* (Local Control Register 0x50) to force reset of USC. */ 5774 5775 volatile u32 *MiscCtrl = (u32 *)(info->lcr_base + 0x50); 5776 u32 *LCR0BRDR = (u32 *)(info->lcr_base + 0x28); 5777 5778 info->misc_ctrl_value |= BIT30; 5779 *MiscCtrl = info->misc_ctrl_value; 5780 5781 /* 5782 * Force at least 170ns delay before clearing 5783 * reset bit. Each read from LCR takes at least 5784 * 30ns so 10 times for 300ns to be safe. 5785 */ 5786 for(i=0;i<10;i++) 5787 readval = *MiscCtrl; 5788 5789 info->misc_ctrl_value &= ~BIT30; 5790 *MiscCtrl = info->misc_ctrl_value; 5791 5792 *LCR0BRDR = BUS_DESCRIPTOR( 5793 1, // Write Strobe Hold (0-3) 5794 2, // Write Strobe Delay (0-3) 5795 2, // Read Strobe Delay (0-3) 5796 0, // NWDD (Write data-data) (0-3) 5797 4, // NWAD (Write Addr-data) (0-31) 5798 0, // NXDA (Read/Write Data-Addr) (0-3) 5799 0, // NRDD (Read Data-Data) (0-3) 5800 5 // NRAD (Read Addr-Data) (0-31) 5801 ); 5802 } else { 5803 /* do HW reset */ 5804 outb( 0,info->io_base + 8 ); 5805 } 5806 5807 info->mbre_bit = 0; 5808 info->loopback_bits = 0; 5809 info->usc_idle_mode = 0; 5810 5811 /* 5812 * Program the Bus Configuration Register (BCR) 5813 * 5814 * <15> 0 Don't use separate address 5815 * <14..6> 0 reserved 5816 * <5..4> 00 IAckmode = Default, don't care 5817 * <3> 1 Bus Request Totem Pole output 5818 * <2> 1 Use 16 Bit data bus 5819 * <1> 0 IRQ Totem Pole output 5820 * <0> 0 Don't Shift Right Addr 5821 * 5822 * 0000 0000 0000 1100 = 0x000c 5823 * 5824 * By writing to io_base + SDPIN the Wait/Ack pin is 5825 * programmed to work as a Wait pin. 5826 */ 5827 5828 outw( 0x000c,info->io_base + SDPIN ); 5829 5830 5831 outw( 0,info->io_base ); 5832 outw( 0,info->io_base + CCAR ); 5833 5834 /* select little endian byte ordering */ 5835 usc_RTCmd( info, RTCmd_SelectLittleEndian ); 5836 5837 5838 /* Port Control Register (PCR) 5839 * 5840 * <15..14> 11 Port 7 is Output (~DMAEN, Bit 14 : 0 = Enabled) 5841 * <13..12> 11 Port 6 is Output (~INTEN, Bit 12 : 0 = Enabled) 5842 * <11..10> 00 Port 5 is Input (No Connect, Don't Care) 5843 * <9..8> 00 Port 4 is Input (No Connect, Don't Care) 5844 * <7..6> 11 Port 3 is Output (~RTS, Bit 6 : 0 = Enabled ) 5845 * <5..4> 11 Port 2 is Output (~DTR, Bit 4 : 0 = Enabled ) 5846 * <3..2> 01 Port 1 is Input (Dedicated RxC) 5847 * <1..0> 01 Port 0 is Input (Dedicated TxC) 5848 * 5849 * 1111 0000 1111 0101 = 0xf0f5 5850 */ 5851 5852 usc_OutReg( info, PCR, 0xf0f5 ); 5853 5854 5855 /* 5856 * Input/Output Control Register 5857 * 5858 * <15..14> 00 CTS is active low input 5859 * <13..12> 00 DCD is active low input 5860 * <11..10> 00 TxREQ pin is input (DSR) 5861 * <9..8> 00 RxREQ pin is input (RI) 5862 * <7..6> 00 TxD is output (Transmit Data) 5863 * <5..3> 000 TxC Pin in Input (14.7456MHz Clock) 5864 * <2..0> 100 RxC is Output (drive with BRG0) 5865 * 5866 * 0000 0000 0000 0100 = 0x0004 5867 */ 5868 5869 usc_OutReg( info, IOCR, 0x0004 ); 5870 5871} /* end of usc_reset() */ 5872 5873/* usc_set_async_mode() 5874 * 5875 * Program adapter for asynchronous communications. 5876 * 5877 * Arguments: info pointer to device instance data 5878 * Return Value: None 5879 */ 5880static void usc_set_async_mode( struct mgsl_struct *info ) 5881{ 5882 u16 RegValue; 5883 5884 /* disable interrupts while programming USC */ 5885 usc_DisableMasterIrqBit( info ); 5886 5887 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */ 5888 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */ 5889 5890 usc_loopback_frame( info ); 5891 5892 /* Channel mode Register (CMR) 5893 * 5894 * <15..14> 00 Tx Sub modes, 00 = 1 Stop Bit 5895 * <13..12> 00 00 = 16X Clock 5896 * <11..8> 0000 Transmitter mode = Asynchronous 5897 * <7..6> 00 reserved? 5898 * <5..4> 00 Rx Sub modes, 00 = 16X Clock 5899 * <3..0> 0000 Receiver mode = Asynchronous 5900 * 5901 * 0000 0000 0000 0000 = 0x0 5902 */ 5903 5904 RegValue = 0; 5905 if ( info->params.stop_bits != 1 ) 5906 RegValue |= BIT14; 5907 usc_OutReg( info, CMR, RegValue ); 5908 5909 5910 /* Receiver mode Register (RMR) 5911 * 5912 * <15..13> 000 encoding = None 5913 * <12..08> 00000 reserved (Sync Only) 5914 * <7..6> 00 Even parity 5915 * <5> 0 parity disabled 5916 * <4..2> 000 Receive Char Length = 8 bits 5917 * <1..0> 00 Disable Receiver 5918 * 5919 * 0000 0000 0000 0000 = 0x0 5920 */ 5921 5922 RegValue = 0; 5923 5924 if ( info->params.data_bits != 8 ) 5925 RegValue |= BIT4+BIT3+BIT2; 5926 5927 if ( info->params.parity != ASYNC_PARITY_NONE ) { 5928 RegValue |= BIT5; 5929 if ( info->params.parity != ASYNC_PARITY_ODD ) 5930 RegValue |= BIT6; 5931 } 5932 5933 usc_OutReg( info, RMR, RegValue ); 5934 5935 5936 /* Set IRQ trigger level */ 5937 5938 usc_RCmd( info, RCmd_SelectRicrIntLevel ); 5939 5940 5941 /* Receive Interrupt Control Register (RICR) 5942 * 5943 * <15..8> ? RxFIFO IRQ Request Level 5944 * 5945 * Note: For async mode the receive FIFO level must be set 5946 * to 0 to avoid the situation where the FIFO contains fewer bytes 5947 * than the trigger level and no more data is expected. 5948 * 5949 * <7> 0 Exited Hunt IA (Interrupt Arm) 5950 * <6> 0 Idle Received IA 5951 * <5> 0 Break/Abort IA 5952 * <4> 0 Rx Bound IA 5953 * <3> 0 Queued status reflects oldest byte in FIFO 5954 * <2> 0 Abort/PE IA 5955 * <1> 0 Rx Overrun IA 5956 * <0> 0 Select TC0 value for readback 5957 * 5958 * 0000 0000 0100 0000 = 0x0000 + (FIFOLEVEL in MSB) 5959 */ 5960 5961 usc_OutReg( info, RICR, 0x0000 ); 5962 5963 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); 5964 usc_ClearIrqPendingBits( info, RECEIVE_STATUS ); 5965 5966 5967 /* Transmit mode Register (TMR) 5968 * 5969 * <15..13> 000 encoding = None 5970 * <12..08> 00000 reserved (Sync Only) 5971 * <7..6> 00 Transmit parity Even 5972 * <5> 0 Transmit parity Disabled 5973 * <4..2> 000 Tx Char Length = 8 bits 5974 * <1..0> 00 Disable Transmitter 5975 * 5976 * 0000 0000 0000 0000 = 0x0 5977 */ 5978 5979 RegValue = 0; 5980 5981 if ( info->params.data_bits != 8 ) 5982 RegValue |= BIT4+BIT3+BIT2; 5983 5984 if ( info->params.parity != ASYNC_PARITY_NONE ) { 5985 RegValue |= BIT5; 5986 if ( info->params.parity != ASYNC_PARITY_ODD ) 5987 RegValue |= BIT6; 5988 } 5989 5990 usc_OutReg( info, TMR, RegValue ); 5991 5992 usc_set_txidle( info ); 5993 5994 5995 /* Set IRQ trigger level */ 5996 5997 usc_TCmd( info, TCmd_SelectTicrIntLevel ); 5998 5999 6000 /* Transmit Interrupt Control Register (TICR) 6001 * 6002 * <15..8> ? Transmit FIFO IRQ Level 6003 * <7> 0 Present IA (Interrupt Arm) 6004 * <6> 1 Idle Sent IA 6005 * <5> 0 Abort Sent IA 6006 * <4> 0 EOF/EOM Sent IA 6007 * <3> 0 CRC Sent IA 6008 * <2> 0 1 = Wait for SW Trigger to Start Frame 6009 * <1> 0 Tx Underrun IA 6010 * <0> 0 TC0 constant on read back 6011 * 6012 * 0000 0000 0100 0000 = 0x0040 6013 */ 6014 6015 usc_OutReg( info, TICR, 0x1f40 ); 6016 6017 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL ); 6018 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS ); 6019 6020 usc_enable_async_clock( info, info->params.data_rate ); 6021 6022 6023 /* Channel Control/status Register (CCSR) 6024 * 6025 * <15> X RCC FIFO Overflow status (RO) 6026 * <14> X RCC FIFO Not Empty status (RO) 6027 * <13> 0 1 = Clear RCC FIFO (WO) 6028 * <12> X DPLL in Sync status (RO) 6029 * <11> X DPLL 2 Missed Clocks status (RO) 6030 * <10> X DPLL 1 Missed Clock status (RO) 6031 * <9..8> 00 DPLL Resync on rising and falling edges (RW) 6032 * <7> X SDLC Loop On status (RO) 6033 * <6> X SDLC Loop Send status (RO) 6034 * <5> 1 Bypass counters for TxClk and RxClk (RW) 6035 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW) 6036 * <1..0> 00 reserved 6037 * 6038 * 0000 0000 0010 0000 = 0x0020 6039 */ 6040 6041 usc_OutReg( info, CCSR, 0x0020 ); 6042 6043 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA + 6044 RECEIVE_DATA + RECEIVE_STATUS ); 6045 6046 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA + 6047 RECEIVE_DATA + RECEIVE_STATUS ); 6048 6049 usc_EnableMasterIrqBit( info ); 6050 6051 if (info->bus_type == MGSL_BUS_TYPE_ISA) { 6052 /* Enable INTEN (Port 6, Bit12) */ 6053 /* This connects the IRQ request signal to the ISA bus */ 6054 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12)); 6055 } 6056 6057 if (info->params.loopback) { 6058 info->loopback_bits = 0x300; 6059 outw(0x0300, info->io_base + CCAR); 6060 } 6061 6062} /* end of usc_set_async_mode() */ 6063 6064/* usc_loopback_frame() 6065 * 6066 * Loop back a small (2 byte) dummy SDLC frame. 6067 * Interrupts and DMA are NOT used. The purpose of this is to 6068 * clear any 'stale' status info left over from running in async mode. 6069 * 6070 * The 16C32 shows the strange behaviour of marking the 1st 6071 * received SDLC frame with a CRC error even when there is no 6072 * CRC error. To get around this a small dummy from of 2 bytes 6073 * is looped back when switching from async to sync mode. 6074 * 6075 * Arguments: info pointer to device instance data 6076 * Return Value: None 6077 */ 6078static void usc_loopback_frame( struct mgsl_struct *info ) 6079{ 6080 int i; 6081 unsigned long oldmode = info->params.mode; 6082 6083 info->params.mode = MGSL_MODE_HDLC; 6084 6085 usc_DisableMasterIrqBit( info ); 6086 6087 usc_set_sdlc_mode( info ); 6088 usc_enable_loopback( info, 1 ); 6089 6090 /* Write 16-bit Time Constant for BRG0 */ 6091 usc_OutReg( info, TC0R, 0 ); 6092 6093 /* Channel Control Register (CCR) 6094 * 6095 * <15..14> 00 Don't use 32-bit Tx Control Blocks (TCBs) 6096 * <13> 0 Trigger Tx on SW Command Disabled 6097 * <12> 0 Flag Preamble Disabled 6098 * <11..10> 00 Preamble Length = 8-Bits 6099 * <9..8> 01 Preamble Pattern = flags 6100 * <7..6> 10 Don't use 32-bit Rx status Blocks (RSBs) 6101 * <5> 0 Trigger Rx on SW Command Disabled 6102 * <4..0> 0 reserved 6103 * 6104 * 0000 0001 0000 0000 = 0x0100 6105 */ 6106 6107 usc_OutReg( info, CCR, 0x0100 ); 6108 6109 /* SETUP RECEIVER */ 6110 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 6111 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL); 6112 6113 /* SETUP TRANSMITTER */ 6114 /* Program the Transmit Character Length Register (TCLR) */ 6115 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */ 6116 usc_OutReg( info, TCLR, 2 ); 6117 usc_RTCmd( info, RTCmd_PurgeTxFifo ); 6118 6119 /* unlatch Tx status bits, and start transmit channel. */ 6120 usc_UnlatchTxstatusBits(info,TXSTATUS_ALL); 6121 outw(0,info->io_base + DATAREG); 6122 6123 /* ENABLE TRANSMITTER */ 6124 usc_TCmd( info, TCmd_SendFrame ); 6125 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL); 6126 6127 /* WAIT FOR RECEIVE COMPLETE */ 6128 for (i=0 ; i<1000 ; i++) 6129 if (usc_InReg( info, RCSR ) & (BIT8 + BIT4 + BIT3 + BIT1)) 6130 break; 6131 6132 /* clear Internal Data loopback mode */ 6133 usc_enable_loopback(info, 0); 6134 6135 usc_EnableMasterIrqBit(info); 6136 6137 info->params.mode = oldmode; 6138 6139} /* end of usc_loopback_frame() */ 6140 6141/* usc_set_sync_mode() Programs the USC for SDLC communications. 6142 * 6143 * Arguments: info pointer to adapter info structure 6144 * Return Value: None 6145 */ 6146static void usc_set_sync_mode( struct mgsl_struct *info ) 6147{ 6148 usc_loopback_frame( info ); 6149 usc_set_sdlc_mode( info ); 6150 6151 if (info->bus_type == MGSL_BUS_TYPE_ISA) { 6152 /* Enable INTEN (Port 6, Bit12) */ 6153 /* This connects the IRQ request signal to the ISA bus */ 6154 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12)); 6155 } 6156 6157 usc_enable_aux_clock(info, info->params.clock_speed); 6158 6159 if (info->params.loopback) 6160 usc_enable_loopback(info,1); 6161 6162} /* end of mgsl_set_sync_mode() */ 6163 6164/* usc_set_txidle() Set the HDLC idle mode for the transmitter. 6165 * 6166 * Arguments: info pointer to device instance data 6167 * Return Value: None 6168 */ 6169static void usc_set_txidle( struct mgsl_struct *info ) 6170{ 6171 u16 usc_idle_mode = IDLEMODE_FLAGS; 6172 6173 /* Map API idle mode to USC register bits */ 6174 6175 switch( info->idle_mode ){ 6176 case HDLC_TXIDLE_FLAGS: usc_idle_mode = IDLEMODE_FLAGS; break; 6177 case HDLC_TXIDLE_ALT_ZEROS_ONES: usc_idle_mode = IDLEMODE_ALT_ONE_ZERO; break; 6178 case HDLC_TXIDLE_ZEROS: usc_idle_mode = IDLEMODE_ZERO; break; 6179 case HDLC_TXIDLE_ONES: usc_idle_mode = IDLEMODE_ONE; break; 6180 case HDLC_TXIDLE_ALT_MARK_SPACE: usc_idle_mode = IDLEMODE_ALT_MARK_SPACE; break; 6181 case HDLC_TXIDLE_SPACE: usc_idle_mode = IDLEMODE_SPACE; break; 6182 case HDLC_TXIDLE_MARK: usc_idle_mode = IDLEMODE_MARK; break; 6183 } 6184 6185 info->usc_idle_mode = usc_idle_mode; 6186 //usc_OutReg(info, TCSR, usc_idle_mode); 6187 info->tcsr_value &= ~IDLEMODE_MASK; /* clear idle mode bits */ 6188 info->tcsr_value += usc_idle_mode; 6189 usc_OutReg(info, TCSR, info->tcsr_value); 6190 6191 /* 6192 * if SyncLink WAN adapter is running in external sync mode, the 6193 * transmitter has been set to Monosync in order to try to mimic 6194 * a true raw outbound bit stream. Monosync still sends an open/close 6195 * sync char at the start/end of a frame. Try to match those sync 6196 * patterns to the idle mode set here 6197 */ 6198 if ( info->params.mode == MGSL_MODE_RAW ) { 6199 unsigned char syncpat = 0; 6200 switch( info->idle_mode ) { 6201 case HDLC_TXIDLE_FLAGS: 6202 syncpat = 0x7e; 6203 break; 6204 case HDLC_TXIDLE_ALT_ZEROS_ONES: 6205 syncpat = 0x55; 6206 break; 6207 case HDLC_TXIDLE_ZEROS: 6208 case HDLC_TXIDLE_SPACE: 6209 syncpat = 0x00; 6210 break; 6211 case HDLC_TXIDLE_ONES: 6212 case HDLC_TXIDLE_MARK: 6213 syncpat = 0xff; 6214 break; 6215 case HDLC_TXIDLE_ALT_MARK_SPACE: 6216 syncpat = 0xaa; 6217 break; 6218 } 6219 6220 usc_SetTransmitSyncChars(info,syncpat,syncpat); 6221 } 6222 6223} /* end of usc_set_txidle() */ 6224 6225/* usc_get_serial_signals() 6226 * 6227 * Query the adapter for the state of the V24 status (input) signals. 6228 * 6229 * Arguments: info pointer to device instance data 6230 * Return Value: None 6231 */ 6232static void usc_get_serial_signals( struct mgsl_struct *info ) 6233{ 6234 u16 status; 6235 6236 /* clear all serial signals except DTR and RTS */ 6237 info->serial_signals &= SerialSignal_DTR + SerialSignal_RTS; 6238 6239 /* Read the Misc Interrupt status Register (MISR) to get */ 6240 /* the V24 status signals. */ 6241 6242 status = usc_InReg( info, MISR ); 6243 6244 /* set serial signal bits to reflect MISR */ 6245 6246 if ( status & MISCSTATUS_CTS ) 6247 info->serial_signals |= SerialSignal_CTS; 6248 6249 if ( status & MISCSTATUS_DCD ) 6250 info->serial_signals |= SerialSignal_DCD; 6251 6252 if ( status & MISCSTATUS_RI ) 6253 info->serial_signals |= SerialSignal_RI; 6254 6255 if ( status & MISCSTATUS_DSR ) 6256 info->serial_signals |= SerialSignal_DSR; 6257 6258} /* end of usc_get_serial_signals() */ 6259 6260/* usc_set_serial_signals() 6261 * 6262 * Set the state of DTR and RTS based on contents of 6263 * serial_signals member of device extension. 6264 * 6265 * Arguments: info pointer to device instance data 6266 * Return Value: None 6267 */ 6268static void usc_set_serial_signals( struct mgsl_struct *info ) 6269{ 6270 u16 Control; 6271 unsigned char V24Out = info->serial_signals; 6272 6273 /* get the current value of the Port Control Register (PCR) */ 6274 6275 Control = usc_InReg( info, PCR ); 6276 6277 if ( V24Out & SerialSignal_RTS ) 6278 Control &= ~(BIT6); 6279 else 6280 Control |= BIT6; 6281 6282 if ( V24Out & SerialSignal_DTR ) 6283 Control &= ~(BIT4); 6284 else 6285 Control |= BIT4; 6286 6287 usc_OutReg( info, PCR, Control ); 6288 6289} /* end of usc_set_serial_signals() */ 6290 6291/* usc_enable_async_clock() 6292 * 6293 * Enable the async clock at the specified frequency. 6294 * 6295 * Arguments: info pointer to device instance data 6296 * data_rate data rate of clock in bps 6297 * 0 disables the AUX clock. 6298 * Return Value: None 6299 */ 6300static void usc_enable_async_clock( struct mgsl_struct *info, u32 data_rate ) 6301{ 6302 if ( data_rate ) { 6303 /* 6304 * Clock mode Control Register (CMCR) 6305 * 6306 * <15..14> 00 counter 1 Disabled 6307 * <13..12> 00 counter 0 Disabled 6308 * <11..10> 11 BRG1 Input is TxC Pin 6309 * <9..8> 11 BRG0 Input is TxC Pin 6310 * <7..6> 01 DPLL Input is BRG1 Output 6311 * <5..3> 100 TxCLK comes from BRG0 6312 * <2..0> 100 RxCLK comes from BRG0 6313 * 6314 * 0000 1111 0110 0100 = 0x0f64 6315 */ 6316 6317 usc_OutReg( info, CMCR, 0x0f64 ); 6318 6319 6320 /* 6321 * Write 16-bit Time Constant for BRG0 6322 * Time Constant = (ClkSpeed / data_rate) - 1 6323 * ClkSpeed = 921600 (ISA), 691200 (PCI) 6324 */ 6325 6326 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 6327 usc_OutReg( info, TC0R, (u16)((691200/data_rate) - 1) ); 6328 else 6329 usc_OutReg( info, TC0R, (u16)((921600/data_rate) - 1) ); 6330 6331 6332 /* 6333 * Hardware Configuration Register (HCR) 6334 * Clear Bit 1, BRG0 mode = Continuous 6335 * Set Bit 0 to enable BRG0. 6336 */ 6337 6338 usc_OutReg( info, HCR, 6339 (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) ); 6340 6341 6342 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */ 6343 6344 usc_OutReg( info, IOCR, 6345 (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) ); 6346 } else { 6347 /* data rate == 0 so turn off BRG0 */ 6348 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) ); 6349 } 6350 6351} /* end of usc_enable_async_clock() */ 6352 6353/* 6354 * Buffer Structures: 6355 * 6356 * Normal memory access uses virtual addresses that can make discontiguous 6357 * physical memory pages appear to be contiguous in the virtual address 6358 * space (the processors memory mapping handles the conversions). 6359 * 6360 * DMA transfers require physically contiguous memory. This is because 6361 * the DMA system controller and DMA bus masters deal with memory using 6362 * only physical addresses. 6363 * 6364 * This causes a problem under Windows NT when large DMA buffers are 6365 * needed. Fragmentation of the nonpaged pool prevents allocations of 6366 * physically contiguous buffers larger than the PAGE_SIZE. 6367 * 6368 * However the 16C32 supports Bus Master Scatter/Gather DMA which 6369 * allows DMA transfers to physically discontiguous buffers. Information 6370 * about each data transfer buffer is contained in a memory structure 6371 * called a 'buffer entry'. A list of buffer entries is maintained 6372 * to track and control the use of the data transfer buffers. 6373 * 6374 * To support this strategy we will allocate sufficient PAGE_SIZE 6375 * contiguous memory buffers to allow for the total required buffer 6376 * space. 6377 * 6378 * The 16C32 accesses the list of buffer entries using Bus Master 6379 * DMA. Control information is read from the buffer entries by the 6380 * 16C32 to control data transfers. status information is written to 6381 * the buffer entries by the 16C32 to indicate the status of completed 6382 * transfers. 6383 * 6384 * The CPU writes control information to the buffer entries to control 6385 * the 16C32 and reads status information from the buffer entries to 6386 * determine information about received and transmitted frames. 6387 * 6388 * Because the CPU and 16C32 (adapter) both need simultaneous access 6389 * to the buffer entries, the buffer entry memory is allocated with 6390 * HalAllocateCommonBuffer(). This restricts the size of the buffer 6391 * entry list to PAGE_SIZE. 6392 * 6393 * The actual data buffers on the other hand will only be accessed 6394 * by the CPU or the adapter but not by both simultaneously. This allows 6395 * Scatter/Gather packet based DMA procedures for using physically 6396 * discontiguous pages. 6397 */ 6398 6399/* 6400 * mgsl_reset_tx_dma_buffers() 6401 * 6402 * Set the count for all transmit buffers to 0 to indicate the 6403 * buffer is available for use and set the current buffer to the 6404 * first buffer. This effectively makes all buffers free and 6405 * discards any data in buffers. 6406 * 6407 * Arguments: info pointer to device instance data 6408 * Return Value: None 6409 */ 6410static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info ) 6411{ 6412 unsigned int i; 6413 6414 for ( i = 0; i < info->tx_buffer_count; i++ ) { 6415 *((unsigned long *)&(info->tx_buffer_list[i].count)) = 0; 6416 } 6417 6418 info->current_tx_buffer = 0; 6419 info->start_tx_dma_buffer = 0; 6420 info->tx_dma_buffers_used = 0; 6421 6422 info->get_tx_holding_index = 0; 6423 info->put_tx_holding_index = 0; 6424 info->tx_holding_count = 0; 6425 6426} /* end of mgsl_reset_tx_dma_buffers() */ 6427 6428/* 6429 * num_free_tx_dma_buffers() 6430 * 6431 * returns the number of free tx dma buffers available 6432 * 6433 * Arguments: info pointer to device instance data 6434 * Return Value: number of free tx dma buffers 6435 */ 6436static int num_free_tx_dma_buffers(struct mgsl_struct *info) 6437{ 6438 return info->tx_buffer_count - info->tx_dma_buffers_used; 6439} 6440 6441/* 6442 * mgsl_reset_rx_dma_buffers() 6443 * 6444 * Set the count for all receive buffers to DMABUFFERSIZE 6445 * and set the current buffer to the first buffer. This effectively 6446 * makes all buffers free and discards any data in buffers. 6447 * 6448 * Arguments: info pointer to device instance data 6449 * Return Value: None 6450 */ 6451static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info ) 6452{ 6453 unsigned int i; 6454 6455 for ( i = 0; i < info->rx_buffer_count; i++ ) { 6456 *((unsigned long *)&(info->rx_buffer_list[i].count)) = DMABUFFERSIZE; 6457// info->rx_buffer_list[i].count = DMABUFFERSIZE; 6458// info->rx_buffer_list[i].status = 0; 6459 } 6460 6461 info->current_rx_buffer = 0; 6462 6463} /* end of mgsl_reset_rx_dma_buffers() */ 6464 6465/* 6466 * mgsl_free_rx_frame_buffers() 6467 * 6468 * Free the receive buffers used by a received SDLC 6469 * frame such that the buffers can be reused. 6470 * 6471 * Arguments: 6472 * 6473 * info pointer to device instance data 6474 * StartIndex index of 1st receive buffer of frame 6475 * EndIndex index of last receive buffer of frame 6476 * 6477 * Return Value: None 6478 */ 6479static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex ) 6480{ 6481 bool Done = false; 6482 DMABUFFERENTRY *pBufEntry; 6483 unsigned int Index; 6484 6485 /* Starting with 1st buffer entry of the frame clear the status */ 6486 /* field and set the count field to DMA Buffer Size. */ 6487 6488 Index = StartIndex; 6489 6490 while( !Done ) { 6491 pBufEntry = &(info->rx_buffer_list[Index]); 6492 6493 if ( Index == EndIndex ) { 6494 /* This is the last buffer of the frame! */ 6495 Done = true; 6496 } 6497 6498 /* reset current buffer for reuse */ 6499// pBufEntry->status = 0; 6500// pBufEntry->count = DMABUFFERSIZE; 6501 *((unsigned long *)&(pBufEntry->count)) = DMABUFFERSIZE; 6502 6503 /* advance to next buffer entry in linked list */ 6504 Index++; 6505 if ( Index == info->rx_buffer_count ) 6506 Index = 0; 6507 } 6508 6509 /* set current buffer to next buffer after last buffer of frame */ 6510 info->current_rx_buffer = Index; 6511 6512} /* end of free_rx_frame_buffers() */ 6513 6514/* mgsl_get_rx_frame() 6515 * 6516 * This function attempts to return a received SDLC frame from the 6517 * receive DMA buffers. Only frames received without errors are returned. 6518 * 6519 * Arguments: info pointer to device extension 6520 * Return Value: true if frame returned, otherwise false 6521 */ 6522static bool mgsl_get_rx_frame(struct mgsl_struct *info) 6523{ 6524 unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */ 6525 unsigned short status; 6526 DMABUFFERENTRY *pBufEntry; 6527 unsigned int framesize = 0; 6528 bool ReturnCode = false; 6529 unsigned long flags; 6530 struct tty_struct *tty = info->port.tty; 6531 bool return_frame = false; 6532 6533 /* 6534 * current_rx_buffer points to the 1st buffer of the next available 6535 * receive frame. To find the last buffer of the frame look for 6536 * a non-zero status field in the buffer entries. (The status 6537 * field is set by the 16C32 after completing a receive frame. 6538 */ 6539 6540 StartIndex = EndIndex = info->current_rx_buffer; 6541 6542 while( !info->rx_buffer_list[EndIndex].status ) { 6543 /* 6544 * If the count field of the buffer entry is non-zero then 6545 * this buffer has not been used. (The 16C32 clears the count 6546 * field when it starts using the buffer.) If an unused buffer 6547 * is encountered then there are no frames available. 6548 */ 6549 6550 if ( info->rx_buffer_list[EndIndex].count ) 6551 goto Cleanup; 6552 6553 /* advance to next buffer entry in linked list */ 6554 EndIndex++; 6555 if ( EndIndex == info->rx_buffer_count ) 6556 EndIndex = 0; 6557 6558 /* if entire list searched then no frame available */ 6559 if ( EndIndex == StartIndex ) { 6560 /* If this occurs then something bad happened, 6561 * all buffers have been 'used' but none mark 6562 * the end of a frame. Reset buffers and receiver. 6563 */ 6564 6565 if ( info->rx_enabled ){ 6566 spin_lock_irqsave(&info->irq_spinlock,flags); 6567 usc_start_receiver(info); 6568 spin_unlock_irqrestore(&info->irq_spinlock,flags); 6569 } 6570 goto Cleanup; 6571 } 6572 } 6573 6574 6575 /* check status of receive frame */ 6576 6577 status = info->rx_buffer_list[EndIndex].status; 6578 6579 if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN + 6580 RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) { 6581 if ( status & RXSTATUS_SHORT_FRAME ) 6582 info->icount.rxshort++; 6583 else if ( status & RXSTATUS_ABORT ) 6584 info->icount.rxabort++; 6585 else if ( status & RXSTATUS_OVERRUN ) 6586 info->icount.rxover++; 6587 else { 6588 info->icount.rxcrc++; 6589 if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) 6590 return_frame = true; 6591 } 6592 framesize = 0; 6593#if SYNCLINK_GENERIC_HDLC 6594 { 6595 info->netdev->stats.rx_errors++; 6596 info->netdev->stats.rx_frame_errors++; 6597 } 6598#endif 6599 } else 6600 return_frame = true; 6601 6602 if ( return_frame ) { 6603 /* receive frame has no errors, get frame size. 6604 * The frame size is the starting value of the RCC (which was 6605 * set to 0xffff) minus the ending value of the RCC (decremented 6606 * once for each receive character) minus 2 for the 16-bit CRC. 6607 */ 6608 6609 framesize = RCLRVALUE - info->rx_buffer_list[EndIndex].rcc; 6610 6611 /* adjust frame size for CRC if any */ 6612 if ( info->params.crc_type == HDLC_CRC_16_CCITT ) 6613 framesize -= 2; 6614 else if ( info->params.crc_type == HDLC_CRC_32_CCITT ) 6615 framesize -= 4; 6616 } 6617 6618 if ( debug_level >= DEBUG_LEVEL_BH ) 6619 printk("%s(%d):mgsl_get_rx_frame(%s) status=%04X size=%d\n", 6620 __FILE__,__LINE__,info->device_name,status,framesize); 6621 6622 if ( debug_level >= DEBUG_LEVEL_DATA ) 6623 mgsl_trace_block(info,info->rx_buffer_list[StartIndex].virt_addr, 6624 min_t(int, framesize, DMABUFFERSIZE),0); 6625 6626 if (framesize) { 6627 if ( ( (info->params.crc_type & HDLC_CRC_RETURN_EX) && 6628 ((framesize+1) > info->max_frame_size) ) || 6629 (framesize > info->max_frame_size) ) 6630 info->icount.rxlong++; 6631 else { 6632 /* copy dma buffer(s) to contiguous intermediate buffer */ 6633 int copy_count = framesize; 6634 int index = StartIndex; 6635 unsigned char *ptmp = info->intermediate_rxbuffer; 6636 6637 if ( !(status & RXSTATUS_CRC_ERROR)) 6638 info->icount.rxok++; 6639 6640 while(copy_count) { 6641 int partial_count; 6642 if ( copy_count > DMABUFFERSIZE ) 6643 partial_count = DMABUFFERSIZE; 6644 else 6645 partial_count = copy_count; 6646 6647 pBufEntry = &(info->rx_buffer_list[index]); 6648 memcpy( ptmp, pBufEntry->virt_addr, partial_count ); 6649 ptmp += partial_count; 6650 copy_count -= partial_count; 6651 6652 if ( ++index == info->rx_buffer_count ) 6653 index = 0; 6654 } 6655 6656 if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) { 6657 ++framesize; 6658 *ptmp = (status & RXSTATUS_CRC_ERROR ? 6659 RX_CRC_ERROR : 6660 RX_OK); 6661 6662 if ( debug_level >= DEBUG_LEVEL_DATA ) 6663 printk("%s(%d):mgsl_get_rx_frame(%s) rx frame status=%d\n", 6664 __FILE__,__LINE__,info->device_name, 6665 *ptmp); 6666 } 6667 6668#if SYNCLINK_GENERIC_HDLC 6669 if (info->netcount) 6670 hdlcdev_rx(info,info->intermediate_rxbuffer,framesize); 6671 else 6672#endif 6673 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize); 6674 } 6675 } 6676 /* Free the buffers used by this frame. */ 6677 mgsl_free_rx_frame_buffers( info, StartIndex, EndIndex ); 6678 6679 ReturnCode = true; 6680 6681Cleanup: 6682 6683 if ( info->rx_enabled && info->rx_overflow ) { 6684 /* The receiver needs to restarted because of 6685 * a receive overflow (buffer or FIFO). If the 6686 * receive buffers are now empty, then restart receiver. 6687 */ 6688 6689 if ( !info->rx_buffer_list[EndIndex].status && 6690 info->rx_buffer_list[EndIndex].count ) { 6691 spin_lock_irqsave(&info->irq_spinlock,flags); 6692 usc_start_receiver(info); 6693 spin_unlock_irqrestore(&info->irq_spinlock,flags); 6694 } 6695 } 6696 6697 return ReturnCode; 6698 6699} /* end of mgsl_get_rx_frame() */ 6700 6701/* mgsl_get_raw_rx_frame() 6702 * 6703 * This function attempts to return a received frame from the 6704 * receive DMA buffers when running in external loop mode. In this mode, 6705 * we will return at most one DMABUFFERSIZE frame to the application. 6706 * The USC receiver is triggering off of DCD going active to start a new 6707 * frame, and DCD going inactive to terminate the frame (similar to 6708 * processing a closing flag character). 6709 * 6710 * In this routine, we will return DMABUFFERSIZE "chunks" at a time. 6711 * If DCD goes inactive, the last Rx DMA Buffer will have a non-zero 6712 * status field and the RCC field will indicate the length of the 6713 * entire received frame. We take this RCC field and get the modulus 6714 * of RCC and DMABUFFERSIZE to determine if number of bytes in the 6715 * last Rx DMA buffer and return that last portion of the frame. 6716 * 6717 * Arguments: info pointer to device extension 6718 * Return Value: true if frame returned, otherwise false 6719 */ 6720static bool mgsl_get_raw_rx_frame(struct mgsl_struct *info) 6721{ 6722 unsigned int CurrentIndex, NextIndex; 6723 unsigned short status; 6724 DMABUFFERENTRY *pBufEntry; 6725 unsigned int framesize = 0; 6726 bool ReturnCode = false; 6727 unsigned long flags; 6728 struct tty_struct *tty = info->port.tty; 6729 6730 /* 6731 * current_rx_buffer points to the 1st buffer of the next available 6732 * receive frame. The status field is set by the 16C32 after 6733 * completing a receive frame. If the status field of this buffer 6734 * is zero, either the USC is still filling this buffer or this 6735 * is one of a series of buffers making up a received frame. 6736 * 6737 * If the count field of this buffer is zero, the USC is either 6738 * using this buffer or has used this buffer. Look at the count 6739 * field of the next buffer. If that next buffer's count is 6740 * non-zero, the USC is still actively using the current buffer. 6741 * Otherwise, if the next buffer's count field is zero, the 6742 * current buffer is complete and the USC is using the next 6743 * buffer. 6744 */ 6745 CurrentIndex = NextIndex = info->current_rx_buffer; 6746 ++NextIndex; 6747 if ( NextIndex == info->rx_buffer_count ) 6748 NextIndex = 0; 6749 6750 if ( info->rx_buffer_list[CurrentIndex].status != 0 || 6751 (info->rx_buffer_list[CurrentIndex].count == 0 && 6752 info->rx_buffer_list[NextIndex].count == 0)) { 6753 /* 6754 * Either the status field of this dma buffer is non-zero 6755 * (indicating the last buffer of a receive frame) or the next 6756 * buffer is marked as in use -- implying this buffer is complete 6757 * and an intermediate buffer for this received frame. 6758 */ 6759 6760 status = info->rx_buffer_list[CurrentIndex].status; 6761 6762 if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN + 6763 RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) { 6764 if ( status & RXSTATUS_SHORT_FRAME ) 6765 info->icount.rxshort++; 6766 else if ( status & RXSTATUS_ABORT ) 6767 info->icount.rxabort++; 6768 else if ( status & RXSTATUS_OVERRUN ) 6769 info->icount.rxover++; 6770 else 6771 info->icount.rxcrc++; 6772 framesize = 0; 6773 } else { 6774 /* 6775 * A receive frame is available, get frame size and status. 6776 * 6777 * The frame size is the starting value of the RCC (which was 6778 * set to 0xffff) minus the ending value of the RCC (decremented 6779 * once for each receive character) minus 2 or 4 for the 16-bit 6780 * or 32-bit CRC. 6781 * 6782 * If the status field is zero, this is an intermediate buffer. 6783 * It's size is 4K. 6784 * 6785 * If the DMA Buffer Entry's Status field is non-zero, the 6786 * receive operation completed normally (ie: DCD dropped). The 6787 * RCC field is valid and holds the received frame size. 6788 * It is possible that the RCC field will be zero on a DMA buffer 6789 * entry with a non-zero status. This can occur if the total 6790 * frame size (number of bytes between the time DCD goes active 6791 * to the time DCD goes inactive) exceeds 65535 bytes. In this 6792 * case the 16C32 has underrun on the RCC count and appears to 6793 * stop updating this counter to let us know the actual received 6794 * frame size. If this happens (non-zero status and zero RCC), 6795 * simply return the entire RxDMA Buffer 6796 */ 6797 if ( status ) { 6798 /* 6799 * In the event that the final RxDMA Buffer is 6800 * terminated with a non-zero status and the RCC 6801 * field is zero, we interpret this as the RCC 6802 * having underflowed (received frame > 65535 bytes). 6803 * 6804 * Signal the event to the user by passing back 6805 * a status of RxStatus_CrcError returning the full 6806 * buffer and let the app figure out what data is 6807 * actually valid 6808 */ 6809 if ( info->rx_buffer_list[CurrentIndex].rcc ) 6810 framesize = RCLRVALUE - info->rx_buffer_list[CurrentIndex].rcc; 6811 else 6812 framesize = DMABUFFERSIZE; 6813 } 6814 else 6815 framesize = DMABUFFERSIZE; 6816 } 6817 6818 if ( framesize > DMABUFFERSIZE ) { 6819 /* 6820 * if running in raw sync mode, ISR handler for 6821 * End Of Buffer events terminates all buffers at 4K. 6822 * If this frame size is said to be >4K, get the 6823 * actual number of bytes of the frame in this buffer. 6824 */ 6825 framesize = framesize % DMABUFFERSIZE; 6826 } 6827 6828 6829 if ( debug_level >= DEBUG_LEVEL_BH ) 6830 printk("%s(%d):mgsl_get_raw_rx_frame(%s) status=%04X size=%d\n", 6831 __FILE__,__LINE__,info->device_name,status,framesize); 6832 6833 if ( debug_level >= DEBUG_LEVEL_DATA ) 6834 mgsl_trace_block(info,info->rx_buffer_list[CurrentIndex].virt_addr, 6835 min_t(int, framesize, DMABUFFERSIZE),0); 6836 6837 if (framesize) { 6838 /* copy dma buffer(s) to contiguous intermediate buffer */ 6839 /* NOTE: we never copy more than DMABUFFERSIZE bytes */ 6840 6841 pBufEntry = &(info->rx_buffer_list[CurrentIndex]); 6842 memcpy( info->intermediate_rxbuffer, pBufEntry->virt_addr, framesize); 6843 info->icount.rxok++; 6844 6845 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize); 6846 } 6847 6848 /* Free the buffers used by this frame. */ 6849 mgsl_free_rx_frame_buffers( info, CurrentIndex, CurrentIndex ); 6850 6851 ReturnCode = true; 6852 } 6853 6854 6855 if ( info->rx_enabled && info->rx_overflow ) { 6856 /* The receiver needs to restarted because of 6857 * a receive overflow (buffer or FIFO). If the 6858 * receive buffers are now empty, then restart receiver. 6859 */ 6860 6861 if ( !info->rx_buffer_list[CurrentIndex].status && 6862 info->rx_buffer_list[CurrentIndex].count ) { 6863 spin_lock_irqsave(&info->irq_spinlock,flags); 6864 usc_start_receiver(info); 6865 spin_unlock_irqrestore(&info->irq_spinlock,flags); 6866 } 6867 } 6868 6869 return ReturnCode; 6870 6871} /* end of mgsl_get_raw_rx_frame() */ 6872 6873/* mgsl_load_tx_dma_buffer() 6874 * 6875 * Load the transmit DMA buffer with the specified data. 6876 * 6877 * Arguments: 6878 * 6879 * info pointer to device extension 6880 * Buffer pointer to buffer containing frame to load 6881 * BufferSize size in bytes of frame in Buffer 6882 * 6883 * Return Value: None 6884 */ 6885static void mgsl_load_tx_dma_buffer(struct mgsl_struct *info, 6886 const char *Buffer, unsigned int BufferSize) 6887{ 6888 unsigned short Copycount; 6889 unsigned int i = 0; 6890 DMABUFFERENTRY *pBufEntry; 6891 6892 if ( debug_level >= DEBUG_LEVEL_DATA ) 6893 mgsl_trace_block(info,Buffer, min_t(int, BufferSize, DMABUFFERSIZE), 1); 6894 6895 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) { 6896 /* set CMR:13 to start transmit when 6897 * next GoAhead (abort) is received 6898 */ 6899 info->cmr_value |= BIT13; 6900 } 6901 6902 /* begin loading the frame in the next available tx dma 6903 * buffer, remember it's starting location for setting 6904 * up tx dma operation 6905 */ 6906 i = info->current_tx_buffer; 6907 info->start_tx_dma_buffer = i; 6908 6909 /* Setup the status and RCC (Frame Size) fields of the 1st */ 6910 /* buffer entry in the transmit DMA buffer list. */ 6911 6912 info->tx_buffer_list[i].status = info->cmr_value & 0xf000; 6913 info->tx_buffer_list[i].rcc = BufferSize; 6914 info->tx_buffer_list[i].count = BufferSize; 6915 6916 /* Copy frame data from 1st source buffer to the DMA buffers. */ 6917 /* The frame data may span multiple DMA buffers. */ 6918 6919 while( BufferSize ){ 6920 /* Get a pointer to next DMA buffer entry. */ 6921 pBufEntry = &info->tx_buffer_list[i++]; 6922 6923 if ( i == info->tx_buffer_count ) 6924 i=0; 6925 6926 /* Calculate the number of bytes that can be copied from */ 6927 /* the source buffer to this DMA buffer. */ 6928 if ( BufferSize > DMABUFFERSIZE ) 6929 Copycount = DMABUFFERSIZE; 6930 else 6931 Copycount = BufferSize; 6932 6933 /* Actually copy data from source buffer to DMA buffer. */ 6934 /* Also set the data count for this individual DMA buffer. */ 6935 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 6936 mgsl_load_pci_memory(pBufEntry->virt_addr, Buffer,Copycount); 6937 else 6938 memcpy(pBufEntry->virt_addr, Buffer, Copycount); 6939 6940 pBufEntry->count = Copycount; 6941 6942 /* Advance source pointer and reduce remaining data count. */ 6943 Buffer += Copycount; 6944 BufferSize -= Copycount; 6945 6946 ++info->tx_dma_buffers_used; 6947 } 6948 6949 /* remember next available tx dma buffer */ 6950 info->current_tx_buffer = i; 6951 6952} /* end of mgsl_load_tx_dma_buffer() */ 6953 6954/* 6955 * mgsl_register_test() 6956 * 6957 * Performs a register test of the 16C32. 6958 * 6959 * Arguments: info pointer to device instance data 6960 * Return Value: true if test passed, otherwise false 6961 */ 6962static bool mgsl_register_test( struct mgsl_struct *info ) 6963{ 6964 static unsigned short BitPatterns[] = 6965 { 0x0000, 0xffff, 0xaaaa, 0x5555, 0x1234, 0x6969, 0x9696, 0x0f0f }; 6966 static unsigned int Patterncount = ARRAY_SIZE(BitPatterns); 6967 unsigned int i; 6968 bool rc = true; 6969 unsigned long flags; 6970 6971 spin_lock_irqsave(&info->irq_spinlock,flags); 6972 usc_reset(info); 6973 6974 /* Verify the reset state of some registers. */ 6975 6976 if ( (usc_InReg( info, SICR ) != 0) || 6977 (usc_InReg( info, IVR ) != 0) || 6978 (usc_InDmaReg( info, DIVR ) != 0) ){ 6979 rc = false; 6980 } 6981 6982 if ( rc ){ 6983 /* Write bit patterns to various registers but do it out of */ 6984 /* sync, then read back and verify values. */ 6985 6986 for ( i = 0 ; i < Patterncount ; i++ ) { 6987 usc_OutReg( info, TC0R, BitPatterns[i] ); 6988 usc_OutReg( info, TC1R, BitPatterns[(i+1)%Patterncount] ); 6989 usc_OutReg( info, TCLR, BitPatterns[(i+2)%Patterncount] ); 6990 usc_OutReg( info, RCLR, BitPatterns[(i+3)%Patterncount] ); 6991 usc_OutReg( info, RSR, BitPatterns[(i+4)%Patterncount] ); 6992 usc_OutDmaReg( info, TBCR, BitPatterns[(i+5)%Patterncount] ); 6993 6994 if ( (usc_InReg( info, TC0R ) != BitPatterns[i]) || 6995 (usc_InReg( info, TC1R ) != BitPatterns[(i+1)%Patterncount]) || 6996 (usc_InReg( info, TCLR ) != BitPatterns[(i+2)%Patterncount]) || 6997 (usc_InReg( info, RCLR ) != BitPatterns[(i+3)%Patterncount]) || 6998 (usc_InReg( info, RSR ) != BitPatterns[(i+4)%Patterncount]) || 6999 (usc_InDmaReg( info, TBCR ) != BitPatterns[(i+5)%Patterncount]) ){ 7000 rc = false; 7001 break; 7002 } 7003 } 7004 } 7005 7006 usc_reset(info); 7007 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7008 7009 return rc; 7010 7011} /* end of mgsl_register_test() */ 7012 7013/* mgsl_irq_test() Perform interrupt test of the 16C32. 7014 * 7015 * Arguments: info pointer to device instance data 7016 * Return Value: true if test passed, otherwise false 7017 */ 7018static bool mgsl_irq_test( struct mgsl_struct *info ) 7019{ 7020 unsigned long EndTime; 7021 unsigned long flags; 7022 7023 spin_lock_irqsave(&info->irq_spinlock,flags); 7024 usc_reset(info); 7025 7026 /* 7027 * Setup 16C32 to interrupt on TxC pin (14MHz clock) transition. 7028 * The ISR sets irq_occurred to true. 7029 */ 7030 7031 info->irq_occurred = false; 7032 7033 /* Enable INTEN gate for ISA adapter (Port 6, Bit12) */ 7034 /* Enable INTEN (Port 6, Bit12) */ 7035 /* This connects the IRQ request signal to the ISA bus */ 7036 /* on the ISA adapter. This has no effect for the PCI adapter */ 7037 usc_OutReg( info, PCR, (unsigned short)((usc_InReg(info, PCR) | BIT13) & ~BIT12) ); 7038 7039 usc_EnableMasterIrqBit(info); 7040 usc_EnableInterrupts(info, IO_PIN); 7041 usc_ClearIrqPendingBits(info, IO_PIN); 7042 7043 usc_UnlatchIostatusBits(info, MISCSTATUS_TXC_LATCHED); 7044 usc_EnableStatusIrqs(info, SICR_TXC_ACTIVE + SICR_TXC_INACTIVE); 7045 7046 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7047 7048 EndTime=100; 7049 while( EndTime-- && !info->irq_occurred ) { 7050 msleep_interruptible(10); 7051 } 7052 7053 spin_lock_irqsave(&info->irq_spinlock,flags); 7054 usc_reset(info); 7055 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7056 7057 return info->irq_occurred; 7058 7059} /* end of mgsl_irq_test() */ 7060 7061/* mgsl_dma_test() 7062 * 7063 * Perform a DMA test of the 16C32. A small frame is 7064 * transmitted via DMA from a transmit buffer to a receive buffer 7065 * using single buffer DMA mode. 7066 * 7067 * Arguments: info pointer to device instance data 7068 * Return Value: true if test passed, otherwise false 7069 */ 7070static bool mgsl_dma_test( struct mgsl_struct *info ) 7071{ 7072 unsigned short FifoLevel; 7073 unsigned long phys_addr; 7074 unsigned int FrameSize; 7075 unsigned int i; 7076 char *TmpPtr; 7077 bool rc = true; 7078 unsigned short status=0; 7079 unsigned long EndTime; 7080 unsigned long flags; 7081 MGSL_PARAMS tmp_params; 7082 7083 /* save current port options */ 7084 memcpy(&tmp_params,&info->params,sizeof(MGSL_PARAMS)); 7085 /* load default port options */ 7086 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS)); 7087 7088#define TESTFRAMESIZE 40 7089 7090 spin_lock_irqsave(&info->irq_spinlock,flags); 7091 7092 /* setup 16C32 for SDLC DMA transfer mode */ 7093 7094 usc_reset(info); 7095 usc_set_sdlc_mode(info); 7096 usc_enable_loopback(info,1); 7097 7098 /* Reprogram the RDMR so that the 16C32 does NOT clear the count 7099 * field of the buffer entry after fetching buffer address. This 7100 * way we can detect a DMA failure for a DMA read (which should be 7101 * non-destructive to system memory) before we try and write to 7102 * memory (where a failure could corrupt system memory). 7103 */ 7104 7105 /* Receive DMA mode Register (RDMR) 7106 * 7107 * <15..14> 11 DMA mode = Linked List Buffer mode 7108 * <13> 1 RSBinA/L = store Rx status Block in List entry 7109 * <12> 0 1 = Clear count of List Entry after fetching 7110 * <11..10> 00 Address mode = Increment 7111 * <9> 1 Terminate Buffer on RxBound 7112 * <8> 0 Bus Width = 16bits 7113 * <7..0> ? status Bits (write as 0s) 7114 * 7115 * 1110 0010 0000 0000 = 0xe200 7116 */ 7117 7118 usc_OutDmaReg( info, RDMR, 0xe200 ); 7119 7120 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7121 7122 7123 /* SETUP TRANSMIT AND RECEIVE DMA BUFFERS */ 7124 7125 FrameSize = TESTFRAMESIZE; 7126 7127 /* setup 1st transmit buffer entry: */ 7128 /* with frame size and transmit control word */ 7129 7130 info->tx_buffer_list[0].count = FrameSize; 7131 info->tx_buffer_list[0].rcc = FrameSize; 7132 info->tx_buffer_list[0].status = 0x4000; 7133 7134 /* build a transmit frame in 1st transmit DMA buffer */ 7135 7136 TmpPtr = info->tx_buffer_list[0].virt_addr; 7137 for (i = 0; i < FrameSize; i++ ) 7138 *TmpPtr++ = i; 7139 7140 /* setup 1st receive buffer entry: */ 7141 /* clear status, set max receive buffer size */ 7142 7143 info->rx_buffer_list[0].status = 0; 7144 info->rx_buffer_list[0].count = FrameSize + 4; 7145 7146 /* zero out the 1st receive buffer */ 7147 7148 memset( info->rx_buffer_list[0].virt_addr, 0, FrameSize + 4 ); 7149 7150 /* Set count field of next buffer entries to prevent */ 7151 /* 16C32 from using buffers after the 1st one. */ 7152 7153 info->tx_buffer_list[1].count = 0; 7154 info->rx_buffer_list[1].count = 0; 7155 7156 7157 /***************************/ 7158 /* Program 16C32 receiver. */ 7159 /***************************/ 7160 7161 spin_lock_irqsave(&info->irq_spinlock,flags); 7162 7163 /* setup DMA transfers */ 7164 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 7165 7166 /* program 16C32 receiver with physical address of 1st DMA buffer entry */ 7167 phys_addr = info->rx_buffer_list[0].phys_entry; 7168 usc_OutDmaReg( info, NRARL, (unsigned short)phys_addr ); 7169 usc_OutDmaReg( info, NRARU, (unsigned short)(phys_addr >> 16) ); 7170 7171 /* Clear the Rx DMA status bits (read RDMR) and start channel */ 7172 usc_InDmaReg( info, RDMR ); 7173 usc_DmaCmd( info, DmaCmd_InitRxChannel ); 7174 7175 /* Enable Receiver (RMR <1..0> = 10) */ 7176 usc_OutReg( info, RMR, (unsigned short)((usc_InReg(info, RMR) & 0xfffc) | 0x0002) ); 7177 7178 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7179 7180 7181 /*************************************************************/ 7182 /* WAIT FOR RECEIVER TO DMA ALL PARAMETERS FROM BUFFER ENTRY */ 7183 /*************************************************************/ 7184 7185 /* Wait 100ms for interrupt. */ 7186 EndTime = jiffies + msecs_to_jiffies(100); 7187 7188 for(;;) { 7189 if (time_after(jiffies, EndTime)) { 7190 rc = false; 7191 break; 7192 } 7193 7194 spin_lock_irqsave(&info->irq_spinlock,flags); 7195 status = usc_InDmaReg( info, RDMR ); 7196 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7197 7198 if ( !(status & BIT4) && (status & BIT5) ) { 7199 /* INITG (BIT 4) is inactive (no entry read in progress) AND */ 7200 /* BUSY (BIT 5) is active (channel still active). */ 7201 /* This means the buffer entry read has completed. */ 7202 break; 7203 } 7204 } 7205 7206 7207 /******************************/ 7208 /* Program 16C32 transmitter. */ 7209 /******************************/ 7210 7211 spin_lock_irqsave(&info->irq_spinlock,flags); 7212 7213 /* Program the Transmit Character Length Register (TCLR) */ 7214 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */ 7215 7216 usc_OutReg( info, TCLR, (unsigned short)info->tx_buffer_list[0].count ); 7217 usc_RTCmd( info, RTCmd_PurgeTxFifo ); 7218 7219 /* Program the address of the 1st DMA Buffer Entry in linked list */ 7220 7221 phys_addr = info->tx_buffer_list[0].phys_entry; 7222 usc_OutDmaReg( info, NTARL, (unsigned short)phys_addr ); 7223 usc_OutDmaReg( info, NTARU, (unsigned short)(phys_addr >> 16) ); 7224 7225 /* unlatch Tx status bits, and start transmit channel. */ 7226 7227 usc_OutReg( info, TCSR, (unsigned short)(( usc_InReg(info, TCSR) & 0x0f00) | 0xfa) ); 7228 usc_DmaCmd( info, DmaCmd_InitTxChannel ); 7229 7230 /* wait for DMA controller to fill transmit FIFO */ 7231 7232 usc_TCmd( info, TCmd_SelectTicrTxFifostatus ); 7233 7234 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7235 7236 7237 /**********************************/ 7238 /* WAIT FOR TRANSMIT FIFO TO FILL */ 7239 /**********************************/ 7240 7241 /* Wait 100ms */ 7242 EndTime = jiffies + msecs_to_jiffies(100); 7243 7244 for(;;) { 7245 if (time_after(jiffies, EndTime)) { 7246 rc = false; 7247 break; 7248 } 7249 7250 spin_lock_irqsave(&info->irq_spinlock,flags); 7251 FifoLevel = usc_InReg(info, TICR) >> 8; 7252 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7253 7254 if ( FifoLevel < 16 ) 7255 break; 7256 else 7257 if ( FrameSize < 32 ) { 7258 /* This frame is smaller than the entire transmit FIFO */ 7259 /* so wait for the entire frame to be loaded. */ 7260 if ( FifoLevel <= (32 - FrameSize) ) 7261 break; 7262 } 7263 } 7264 7265 7266 if ( rc ) 7267 { 7268 /* Enable 16C32 transmitter. */ 7269 7270 spin_lock_irqsave(&info->irq_spinlock,flags); 7271 7272 /* Transmit mode Register (TMR), <1..0> = 10, Enable Transmitter */ 7273 usc_TCmd( info, TCmd_SendFrame ); 7274 usc_OutReg( info, TMR, (unsigned short)((usc_InReg(info, TMR) & 0xfffc) | 0x0002) ); 7275 7276 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7277 7278 7279 /******************************/ 7280 /* WAIT FOR TRANSMIT COMPLETE */ 7281 /******************************/ 7282 7283 /* Wait 100ms */ 7284 EndTime = jiffies + msecs_to_jiffies(100); 7285 7286 /* While timer not expired wait for transmit complete */ 7287 7288 spin_lock_irqsave(&info->irq_spinlock,flags); 7289 status = usc_InReg( info, TCSR ); 7290 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7291 7292 while ( !(status & (BIT6+BIT5+BIT4+BIT2+BIT1)) ) { 7293 if (time_after(jiffies, EndTime)) { 7294 rc = false; 7295 break; 7296 } 7297 7298 spin_lock_irqsave(&info->irq_spinlock,flags); 7299 status = usc_InReg( info, TCSR ); 7300 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7301 } 7302 } 7303 7304 7305 if ( rc ){ 7306 /* CHECK FOR TRANSMIT ERRORS */ 7307 if ( status & (BIT5 + BIT1) ) 7308 rc = false; 7309 } 7310 7311 if ( rc ) { 7312 /* WAIT FOR RECEIVE COMPLETE */ 7313 7314 /* Wait 100ms */ 7315 EndTime = jiffies + msecs_to_jiffies(100); 7316 7317 /* Wait for 16C32 to write receive status to buffer entry. */ 7318 status=info->rx_buffer_list[0].status; 7319 while ( status == 0 ) { 7320 if (time_after(jiffies, EndTime)) { 7321 rc = false; 7322 break; 7323 } 7324 status=info->rx_buffer_list[0].status; 7325 } 7326 } 7327 7328 7329 if ( rc ) { 7330 /* CHECK FOR RECEIVE ERRORS */ 7331 status = info->rx_buffer_list[0].status; 7332 7333 if ( status & (BIT8 + BIT3 + BIT1) ) { 7334 /* receive error has occurred */ 7335 rc = false; 7336 } else { 7337 if ( memcmp( info->tx_buffer_list[0].virt_addr , 7338 info->rx_buffer_list[0].virt_addr, FrameSize ) ){ 7339 rc = false; 7340 } 7341 } 7342 } 7343 7344 spin_lock_irqsave(&info->irq_spinlock,flags); 7345 usc_reset( info ); 7346 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7347 7348 /* restore current port options */ 7349 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS)); 7350 7351 return rc; 7352 7353} /* end of mgsl_dma_test() */ 7354 7355/* mgsl_adapter_test() 7356 * 7357 * Perform the register, IRQ, and DMA tests for the 16C32. 7358 * 7359 * Arguments: info pointer to device instance data 7360 * Return Value: 0 if success, otherwise -ENODEV 7361 */ 7362static int mgsl_adapter_test( struct mgsl_struct *info ) 7363{ 7364 if ( debug_level >= DEBUG_LEVEL_INFO ) 7365 printk( "%s(%d):Testing device %s\n", 7366 __FILE__,__LINE__,info->device_name ); 7367 7368 if ( !mgsl_register_test( info ) ) { 7369 info->init_error = DiagStatus_AddressFailure; 7370 printk( "%s(%d):Register test failure for device %s Addr=%04X\n", 7371 __FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) ); 7372 return -ENODEV; 7373 } 7374 7375 if ( !mgsl_irq_test( info ) ) { 7376 info->init_error = DiagStatus_IrqFailure; 7377 printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n", 7378 __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) ); 7379 return -ENODEV; 7380 } 7381 7382 if ( !mgsl_dma_test( info ) ) { 7383 info->init_error = DiagStatus_DmaFailure; 7384 printk( "%s(%d):DMA test failure for device %s DMA=%d\n", 7385 __FILE__,__LINE__,info->device_name, (unsigned short)(info->dma_level) ); 7386 return -ENODEV; 7387 } 7388 7389 if ( debug_level >= DEBUG_LEVEL_INFO ) 7390 printk( "%s(%d):device %s passed diagnostics\n", 7391 __FILE__,__LINE__,info->device_name ); 7392 7393 return 0; 7394 7395} /* end of mgsl_adapter_test() */ 7396 7397/* mgsl_memory_test() 7398 * 7399 * Test the shared memory on a PCI adapter. 7400 * 7401 * Arguments: info pointer to device instance data 7402 * Return Value: true if test passed, otherwise false 7403 */ 7404static bool mgsl_memory_test( struct mgsl_struct *info ) 7405{ 7406 static unsigned long BitPatterns[] = 7407 { 0x0, 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999, 0xffffffff, 0x12345678 }; 7408 unsigned long Patterncount = ARRAY_SIZE(BitPatterns); 7409 unsigned long i; 7410 unsigned long TestLimit = SHARED_MEM_ADDRESS_SIZE/sizeof(unsigned long); 7411 unsigned long * TestAddr; 7412 7413 if ( info->bus_type != MGSL_BUS_TYPE_PCI ) 7414 return true; 7415 7416 TestAddr = (unsigned long *)info->memory_base; 7417 7418 /* Test data lines with test pattern at one location. */ 7419 7420 for ( i = 0 ; i < Patterncount ; i++ ) { 7421 *TestAddr = BitPatterns[i]; 7422 if ( *TestAddr != BitPatterns[i] ) 7423 return false; 7424 } 7425 7426 /* Test address lines with incrementing pattern over */ 7427 /* entire address range. */ 7428 7429 for ( i = 0 ; i < TestLimit ; i++ ) { 7430 *TestAddr = i * 4; 7431 TestAddr++; 7432 } 7433 7434 TestAddr = (unsigned long *)info->memory_base; 7435 7436 for ( i = 0 ; i < TestLimit ; i++ ) { 7437 if ( *TestAddr != i * 4 ) 7438 return false; 7439 TestAddr++; 7440 } 7441 7442 memset( info->memory_base, 0, SHARED_MEM_ADDRESS_SIZE ); 7443 7444 return true; 7445 7446} /* End Of mgsl_memory_test() */ 7447 7448 7449/* mgsl_load_pci_memory() 7450 * 7451 * Load a large block of data into the PCI shared memory. 7452 * Use this instead of memcpy() or memmove() to move data 7453 * into the PCI shared memory. 7454 * 7455 * Notes: 7456 * 7457 * This function prevents the PCI9050 interface chip from hogging 7458 * the adapter local bus, which can starve the 16C32 by preventing 7459 * 16C32 bus master cycles. 7460 * 7461 * The PCI9050 documentation says that the 9050 will always release 7462 * control of the local bus after completing the current read 7463 * or write operation. 7464 * 7465 * It appears that as long as the PCI9050 write FIFO is full, the 7466 * PCI9050 treats all of the writes as a single burst transaction 7467 * and will not release the bus. This causes DMA latency problems 7468 * at high speeds when copying large data blocks to the shared 7469 * memory. 7470 * 7471 * This function in effect, breaks the a large shared memory write 7472 * into multiple transations by interleaving a shared memory read 7473 * which will flush the write FIFO and 'complete' the write 7474 * transation. This allows any pending DMA request to gain control 7475 * of the local bus in a timely fasion. 7476 * 7477 * Arguments: 7478 * 7479 * TargetPtr pointer to target address in PCI shared memory 7480 * SourcePtr pointer to source buffer for data 7481 * count count in bytes of data to copy 7482 * 7483 * Return Value: None 7484 */ 7485static void mgsl_load_pci_memory( char* TargetPtr, const char* SourcePtr, 7486 unsigned short count ) 7487{ 7488 /* 16 32-bit writes @ 60ns each = 960ns max latency on local bus */ 7489#define PCI_LOAD_INTERVAL 64 7490 7491 unsigned short Intervalcount = count / PCI_LOAD_INTERVAL; 7492 unsigned short Index; 7493 unsigned long Dummy; 7494 7495 for ( Index = 0 ; Index < Intervalcount ; Index++ ) 7496 { 7497 memcpy(TargetPtr, SourcePtr, PCI_LOAD_INTERVAL); 7498 Dummy = *((volatile unsigned long *)TargetPtr); 7499 TargetPtr += PCI_LOAD_INTERVAL; 7500 SourcePtr += PCI_LOAD_INTERVAL; 7501 } 7502 7503 memcpy( TargetPtr, SourcePtr, count % PCI_LOAD_INTERVAL ); 7504 7505} /* End Of mgsl_load_pci_memory() */ 7506 7507static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit) 7508{ 7509 int i; 7510 int linecount; 7511 if (xmit) 7512 printk("%s tx data:\n",info->device_name); 7513 else 7514 printk("%s rx data:\n",info->device_name); 7515 7516 while(count) { 7517 if (count > 16) 7518 linecount = 16; 7519 else 7520 linecount = count; 7521 7522 for(i=0;i<linecount;i++) 7523 printk("%02X ",(unsigned char)data[i]); 7524 for(;i<17;i++) 7525 printk(" "); 7526 for(i=0;i<linecount;i++) { 7527 if (data[i]>=040 && data[i]<=0176) 7528 printk("%c",data[i]); 7529 else 7530 printk("."); 7531 } 7532 printk("\n"); 7533 7534 data += linecount; 7535 count -= linecount; 7536 } 7537} /* end of mgsl_trace_block() */ 7538 7539/* mgsl_tx_timeout() 7540 * 7541 * called when HDLC frame times out 7542 * update stats and do tx completion processing 7543 * 7544 * Arguments: context pointer to device instance data 7545 * Return Value: None 7546 */ 7547static void mgsl_tx_timeout(unsigned long context) 7548{ 7549 struct mgsl_struct *info = (struct mgsl_struct*)context; 7550 unsigned long flags; 7551 7552 if ( debug_level >= DEBUG_LEVEL_INFO ) 7553 printk( "%s(%d):mgsl_tx_timeout(%s)\n", 7554 __FILE__,__LINE__,info->device_name); 7555 if(info->tx_active && 7556 (info->params.mode == MGSL_MODE_HDLC || 7557 info->params.mode == MGSL_MODE_RAW) ) { 7558 info->icount.txtimeout++; 7559 } 7560 spin_lock_irqsave(&info->irq_spinlock,flags); 7561 info->tx_active = false; 7562 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; 7563 7564 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE ) 7565 usc_loopmode_cancel_transmit( info ); 7566 7567 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7568 7569#if SYNCLINK_GENERIC_HDLC 7570 if (info->netcount) 7571 hdlcdev_tx_done(info); 7572 else 7573#endif 7574 mgsl_bh_transmit(info); 7575 7576} /* end of mgsl_tx_timeout() */ 7577 7578/* signal that there are no more frames to send, so that 7579 * line is 'released' by echoing RxD to TxD when current 7580 * transmission is complete (or immediately if no tx in progress). 7581 */ 7582static int mgsl_loopmode_send_done( struct mgsl_struct * info ) 7583{ 7584 unsigned long flags; 7585 7586 spin_lock_irqsave(&info->irq_spinlock,flags); 7587 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) { 7588 if (info->tx_active) 7589 info->loopmode_send_done_requested = true; 7590 else 7591 usc_loopmode_send_done(info); 7592 } 7593 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7594 7595 return 0; 7596} 7597 7598/* release the line by echoing RxD to TxD 7599 * upon completion of a transmit frame 7600 */ 7601static void usc_loopmode_send_done( struct mgsl_struct * info ) 7602{ 7603 info->loopmode_send_done_requested = false; 7604 /* clear CMR:13 to 0 to start echoing RxData to TxData */ 7605 info->cmr_value &= ~BIT13; 7606 usc_OutReg(info, CMR, info->cmr_value); 7607} 7608 7609/* abort a transmit in progress while in HDLC LoopMode 7610 */ 7611static void usc_loopmode_cancel_transmit( struct mgsl_struct * info ) 7612{ 7613 /* reset tx dma channel and purge TxFifo */ 7614 usc_RTCmd( info, RTCmd_PurgeTxFifo ); 7615 usc_DmaCmd( info, DmaCmd_ResetTxChannel ); 7616 usc_loopmode_send_done( info ); 7617} 7618 7619/* for HDLC/SDLC LoopMode, setting CMR:13 after the transmitter is enabled 7620 * is an Insert Into Loop action. Upon receipt of a GoAhead sequence (RxAbort) 7621 * we must clear CMR:13 to begin repeating TxData to RxData 7622 */ 7623static void usc_loopmode_insert_request( struct mgsl_struct * info ) 7624{ 7625 info->loopmode_insert_requested = true; 7626 7627 /* enable RxAbort irq. On next RxAbort, clear CMR:13 to 7628 * begin repeating TxData on RxData (complete insertion) 7629 */ 7630 usc_OutReg( info, RICR, 7631 (usc_InReg( info, RICR ) | RXSTATUS_ABORT_RECEIVED ) ); 7632 7633 /* set CMR:13 to insert into loop on next GoAhead (RxAbort) */ 7634 info->cmr_value |= BIT13; 7635 usc_OutReg(info, CMR, info->cmr_value); 7636} 7637 7638/* return 1 if station is inserted into the loop, otherwise 0 7639 */ 7640static int usc_loopmode_active( struct mgsl_struct * info) 7641{ 7642 return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ; 7643} 7644 7645#if SYNCLINK_GENERIC_HDLC 7646 7647/** 7648 * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.) 7649 * set encoding and frame check sequence (FCS) options 7650 * 7651 * dev pointer to network device structure 7652 * encoding serial encoding setting 7653 * parity FCS setting 7654 * 7655 * returns 0 if success, otherwise error code 7656 */ 7657static int hdlcdev_attach(struct net_device *dev, unsigned short encoding, 7658 unsigned short parity) 7659{ 7660 struct mgsl_struct *info = dev_to_port(dev); 7661 unsigned char new_encoding; 7662 unsigned short new_crctype; 7663 7664 /* return error if TTY interface open */ 7665 if (info->port.count) 7666 return -EBUSY; 7667 7668 switch (encoding) 7669 { 7670 case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break; 7671 case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break; 7672 case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break; 7673 case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break; 7674 case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break; 7675 default: return -EINVAL; 7676 } 7677 7678 switch (parity) 7679 { 7680 case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break; 7681 case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break; 7682 case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break; 7683 default: return -EINVAL; 7684 } 7685 7686 info->params.encoding = new_encoding; 7687 info->params.crc_type = new_crctype; 7688 7689 /* if network interface up, reprogram hardware */ 7690 if (info->netcount) 7691 mgsl_program_hw(info); 7692 7693 return 0; 7694} 7695 7696/** 7697 * called by generic HDLC layer to send frame 7698 * 7699 * skb socket buffer containing HDLC frame 7700 * dev pointer to network device structure 7701 */ 7702static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb, 7703 struct net_device *dev) 7704{ 7705 struct mgsl_struct *info = dev_to_port(dev); 7706 unsigned long flags; 7707 7708 if (debug_level >= DEBUG_LEVEL_INFO) 7709 printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name); 7710 7711 /* stop sending until this frame completes */ 7712 netif_stop_queue(dev); 7713 7714 /* copy data to device buffers */ 7715 info->xmit_cnt = skb->len; 7716 mgsl_load_tx_dma_buffer(info, skb->data, skb->len); 7717 7718 /* update network statistics */ 7719 dev->stats.tx_packets++; 7720 dev->stats.tx_bytes += skb->len; 7721 7722 /* done with socket buffer, so free it */ 7723 dev_kfree_skb(skb); 7724 7725 /* save start time for transmit timeout detection */ 7726 dev->trans_start = jiffies; 7727 7728 /* start hardware transmitter if necessary */ 7729 spin_lock_irqsave(&info->irq_spinlock,flags); 7730 if (!info->tx_active) 7731 usc_start_transmitter(info); 7732 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7733 7734 return NETDEV_TX_OK; 7735} 7736 7737/** 7738 * called by network layer when interface enabled 7739 * claim resources and initialize hardware 7740 * 7741 * dev pointer to network device structure 7742 * 7743 * returns 0 if success, otherwise error code 7744 */ 7745static int hdlcdev_open(struct net_device *dev) 7746{ 7747 struct mgsl_struct *info = dev_to_port(dev); 7748 int rc; 7749 unsigned long flags; 7750 7751 if (debug_level >= DEBUG_LEVEL_INFO) 7752 printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name); 7753 7754 /* generic HDLC layer open processing */ 7755 if ((rc = hdlc_open(dev))) 7756 return rc; 7757 7758 /* arbitrate between network and tty opens */ 7759 spin_lock_irqsave(&info->netlock, flags); 7760 if (info->port.count != 0 || info->netcount != 0) { 7761 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name); 7762 spin_unlock_irqrestore(&info->netlock, flags); 7763 return -EBUSY; 7764 } 7765 info->netcount=1; 7766 spin_unlock_irqrestore(&info->netlock, flags); 7767 7768 /* claim resources and init adapter */ 7769 if ((rc = startup(info)) != 0) { 7770 spin_lock_irqsave(&info->netlock, flags); 7771 info->netcount=0; 7772 spin_unlock_irqrestore(&info->netlock, flags); 7773 return rc; 7774 } 7775 7776 /* assert DTR and RTS, apply hardware settings */ 7777 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; 7778 mgsl_program_hw(info); 7779 7780 /* enable network layer transmit */ 7781 dev->trans_start = jiffies; 7782 netif_start_queue(dev); 7783 7784 /* inform generic HDLC layer of current DCD status */ 7785 spin_lock_irqsave(&info->irq_spinlock, flags); 7786 usc_get_serial_signals(info); 7787 spin_unlock_irqrestore(&info->irq_spinlock, flags); 7788 if (info->serial_signals & SerialSignal_DCD) 7789 netif_carrier_on(dev); 7790 else 7791 netif_carrier_off(dev); 7792 return 0; 7793} 7794 7795/** 7796 * called by network layer when interface is disabled 7797 * shutdown hardware and release resources 7798 * 7799 * dev pointer to network device structure 7800 * 7801 * returns 0 if success, otherwise error code 7802 */ 7803static int hdlcdev_close(struct net_device *dev) 7804{ 7805 struct mgsl_struct *info = dev_to_port(dev); 7806 unsigned long flags; 7807 7808 if (debug_level >= DEBUG_LEVEL_INFO) 7809 printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name); 7810 7811 netif_stop_queue(dev); 7812 7813 /* shutdown adapter and release resources */ 7814 shutdown(info); 7815 7816 hdlc_close(dev); 7817 7818 spin_lock_irqsave(&info->netlock, flags); 7819 info->netcount=0; 7820 spin_unlock_irqrestore(&info->netlock, flags); 7821 7822 return 0; 7823} 7824 7825/** 7826 * called by network layer to process IOCTL call to network device 7827 * 7828 * dev pointer to network device structure 7829 * ifr pointer to network interface request structure 7830 * cmd IOCTL command code 7831 * 7832 * returns 0 if success, otherwise error code 7833 */ 7834static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 7835{ 7836 const size_t size = sizeof(sync_serial_settings); 7837 sync_serial_settings new_line; 7838 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync; 7839 struct mgsl_struct *info = dev_to_port(dev); 7840 unsigned int flags; 7841 7842 if (debug_level >= DEBUG_LEVEL_INFO) 7843 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name); 7844 7845 /* return error if TTY interface open */ 7846 if (info->port.count) 7847 return -EBUSY; 7848 7849 if (cmd != SIOCWANDEV) 7850 return hdlc_ioctl(dev, ifr, cmd); 7851 7852 switch(ifr->ifr_settings.type) { 7853 case IF_GET_IFACE: /* return current sync_serial_settings */ 7854 7855 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL; 7856 if (ifr->ifr_settings.size < size) { 7857 ifr->ifr_settings.size = size; /* data size wanted */ 7858 return -ENOBUFS; 7859 } 7860 7861 flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | 7862 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | 7863 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | 7864 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); 7865 7866 switch (flags){ 7867 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break; 7868 case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break; 7869 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break; 7870 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break; 7871 default: new_line.clock_type = CLOCK_DEFAULT; 7872 } 7873 7874 new_line.clock_rate = info->params.clock_speed; 7875 new_line.loopback = info->params.loopback ? 1:0; 7876 7877 if (copy_to_user(line, &new_line, size)) 7878 return -EFAULT; 7879 return 0; 7880 7881 case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */ 7882 7883 if(!capable(CAP_NET_ADMIN)) 7884 return -EPERM; 7885 if (copy_from_user(&new_line, line, size)) 7886 return -EFAULT; 7887 7888 switch (new_line.clock_type) 7889 { 7890 case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break; 7891 case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break; 7892 case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break; 7893 case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break; 7894 case CLOCK_DEFAULT: flags = info->params.flags & 7895 (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | 7896 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | 7897 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | 7898 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break; 7899 default: return -EINVAL; 7900 } 7901 7902 if (new_line.loopback != 0 && new_line.loopback != 1) 7903 return -EINVAL; 7904 7905 info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | 7906 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | 7907 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | 7908 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); 7909 info->params.flags |= flags; 7910 7911 info->params.loopback = new_line.loopback; 7912 7913 if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG)) 7914 info->params.clock_speed = new_line.clock_rate; 7915 else 7916 info->params.clock_speed = 0; 7917 7918 /* if network interface up, reprogram hardware */ 7919 if (info->netcount) 7920 mgsl_program_hw(info); 7921 return 0; 7922 7923 default: 7924 return hdlc_ioctl(dev, ifr, cmd); 7925 } 7926} 7927 7928/** 7929 * called by network layer when transmit timeout is detected 7930 * 7931 * dev pointer to network device structure 7932 */ 7933static void hdlcdev_tx_timeout(struct net_device *dev) 7934{ 7935 struct mgsl_struct *info = dev_to_port(dev); 7936 unsigned long flags; 7937 7938 if (debug_level >= DEBUG_LEVEL_INFO) 7939 printk("hdlcdev_tx_timeout(%s)\n",dev->name); 7940 7941 dev->stats.tx_errors++; 7942 dev->stats.tx_aborted_errors++; 7943 7944 spin_lock_irqsave(&info->irq_spinlock,flags); 7945 usc_stop_transmitter(info); 7946 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7947 7948 netif_wake_queue(dev); 7949} 7950 7951/** 7952 * called by device driver when transmit completes 7953 * reenable network layer transmit if stopped 7954 * 7955 * info pointer to device instance information 7956 */ 7957static void hdlcdev_tx_done(struct mgsl_struct *info) 7958{ 7959 if (netif_queue_stopped(info->netdev)) 7960 netif_wake_queue(info->netdev); 7961} 7962 7963/** 7964 * called by device driver when frame received 7965 * pass frame to network layer 7966 * 7967 * info pointer to device instance information 7968 * buf pointer to buffer contianing frame data 7969 * size count of data bytes in buf 7970 */ 7971static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size) 7972{ 7973 struct sk_buff *skb = dev_alloc_skb(size); 7974 struct net_device *dev = info->netdev; 7975 7976 if (debug_level >= DEBUG_LEVEL_INFO) 7977 printk("hdlcdev_rx(%s)\n", dev->name); 7978 7979 if (skb == NULL) { 7980 printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", 7981 dev->name); 7982 dev->stats.rx_dropped++; 7983 return; 7984 } 7985 7986 memcpy(skb_put(skb, size), buf, size); 7987 7988 skb->protocol = hdlc_type_trans(skb, dev); 7989 7990 dev->stats.rx_packets++; 7991 dev->stats.rx_bytes += size; 7992 7993 netif_rx(skb); 7994} 7995 7996static const struct net_device_ops hdlcdev_ops = { 7997 .ndo_open = hdlcdev_open, 7998 .ndo_stop = hdlcdev_close, 7999 .ndo_change_mtu = hdlc_change_mtu, 8000 .ndo_start_xmit = hdlc_start_xmit, 8001 .ndo_do_ioctl = hdlcdev_ioctl, 8002 .ndo_tx_timeout = hdlcdev_tx_timeout, 8003}; 8004 8005/** 8006 * called by device driver when adding device instance 8007 * do generic HDLC initialization 8008 * 8009 * info pointer to device instance information 8010 * 8011 * returns 0 if success, otherwise error code 8012 */ 8013static int hdlcdev_init(struct mgsl_struct *info) 8014{ 8015 int rc; 8016 struct net_device *dev; 8017 hdlc_device *hdlc; 8018 8019 /* allocate and initialize network and HDLC layer objects */ 8020 8021 if (!(dev = alloc_hdlcdev(info))) { 8022 printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__); 8023 return -ENOMEM; 8024 } 8025 8026 /* for network layer reporting purposes only */ 8027 dev->base_addr = info->io_base; 8028 dev->irq = info->irq_level; 8029 dev->dma = info->dma_level; 8030 8031 /* network layer callbacks and settings */ 8032 dev->netdev_ops = &hdlcdev_ops; 8033 dev->watchdog_timeo = 10 * HZ; 8034 dev->tx_queue_len = 50; 8035 8036 /* generic HDLC layer callbacks and settings */ 8037 hdlc = dev_to_hdlc(dev); 8038 hdlc->attach = hdlcdev_attach; 8039 hdlc->xmit = hdlcdev_xmit; 8040 8041 /* register objects with HDLC layer */ 8042 if ((rc = register_hdlc_device(dev))) { 8043 printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__); 8044 free_netdev(dev); 8045 return rc; 8046 } 8047 8048 info->netdev = dev; 8049 return 0; 8050} 8051 8052/** 8053 * called by device driver when removing device instance 8054 * do generic HDLC cleanup 8055 * 8056 * info pointer to device instance information 8057 */ 8058static void hdlcdev_exit(struct mgsl_struct *info) 8059{ 8060 unregister_hdlc_device(info->netdev); 8061 free_netdev(info->netdev); 8062 info->netdev = NULL; 8063} 8064 8065#endif /* CONFIG_HDLC */ 8066 8067 8068static int synclink_init_one (struct pci_dev *dev, 8069 const struct pci_device_id *ent) 8070{ 8071 struct mgsl_struct *info; 8072 8073 if (pci_enable_device(dev)) { 8074 printk("error enabling pci device %p\n", dev); 8075 return -EIO; 8076 } 8077 8078 if (!(info = mgsl_allocate_device())) { 8079 printk("can't allocate device instance data.\n"); 8080 return -EIO; 8081 } 8082 8083 /* Copy user configuration info to device instance data */ 8084 8085 info->io_base = pci_resource_start(dev, 2); 8086 info->irq_level = dev->irq; 8087 info->phys_memory_base = pci_resource_start(dev, 3); 8088 8089 /* Because veremap only works on page boundaries we must map 8090 * a larger area than is actually implemented for the LCR 8091 * memory range. We map a full page starting at the page boundary. 8092 */ 8093 info->phys_lcr_base = pci_resource_start(dev, 0); 8094 info->lcr_offset = info->phys_lcr_base & (PAGE_SIZE-1); 8095 info->phys_lcr_base &= ~(PAGE_SIZE-1); 8096 8097 info->bus_type = MGSL_BUS_TYPE_PCI; 8098 info->io_addr_size = 8; 8099 info->irq_flags = IRQF_SHARED; 8100 8101 if (dev->device == 0x0210) { 8102 /* Version 1 PCI9030 based universal PCI adapter */ 8103 info->misc_ctrl_value = 0x007c4080; 8104 info->hw_version = 1; 8105 } else { 8106 /* Version 0 PCI9050 based 5V PCI adapter 8107 * A PCI9050 bug prevents reading LCR registers if 8108 * LCR base address bit 7 is set. Maintain shadow 8109 * value so we can write to LCR misc control reg. 8110 */ 8111 info->misc_ctrl_value = 0x087e4546; 8112 info->hw_version = 0; 8113 } 8114 8115 mgsl_add_device(info); 8116 8117 return 0; 8118} 8119 8120static void synclink_remove_one (struct pci_dev *dev) 8121{ 8122} 8123