Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.36-rc2 8125 lines 236 kB view raw
1/* 2 * linux/drivers/char/synclink.c 3 * 4 * $Id: synclink.c,v 4.38 2005/11/07 16:30:34 paulkf Exp $ 5 * 6 * Device driver for Microgate SyncLink ISA and PCI 7 * high speed multiprotocol serial adapters. 8 * 9 * written by Paul Fulghum for Microgate Corporation 10 * paulkf@microgate.com 11 * 12 * Microgate and SyncLink are trademarks of Microgate Corporation 13 * 14 * Derived from serial.c written by Theodore Ts'o and Linus Torvalds 15 * 16 * Original release 01/11/99 17 * 18 * This code is released under the GNU General Public License (GPL) 19 * 20 * This driver is primarily intended for use in synchronous 21 * HDLC mode. Asynchronous mode is also provided. 22 * 23 * When operating in synchronous mode, each call to mgsl_write() 24 * contains exactly one complete HDLC frame. Calling mgsl_put_char 25 * will start assembling an HDLC frame that will not be sent until 26 * mgsl_flush_chars or mgsl_write is called. 27 * 28 * Synchronous receive data is reported as complete frames. To accomplish 29 * this, the TTY flip buffer is bypassed (too small to hold largest 30 * frame and may fragment frames) and the line discipline 31 * receive entry point is called directly. 32 * 33 * This driver has been tested with a slightly modified ppp.c driver 34 * for synchronous PPP. 35 * 36 * 2000/02/16 37 * Added interface for syncppp.c driver (an alternate synchronous PPP 38 * implementation that also supports Cisco HDLC). Each device instance 39 * registers as a tty device AND a network device (if dosyncppp option 40 * is set for the device). The functionality is determined by which 41 * device interface is opened. 42 * 43 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 44 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 45 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 46 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 47 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 48 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 49 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 51 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 52 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 53 * OF THE POSSIBILITY OF SUCH DAMAGE. 54 */ 55 56#if defined(__i386__) 57# define BREAKPOINT() asm(" int $3"); 58#else 59# define BREAKPOINT() { } 60#endif 61 62#define MAX_ISA_DEVICES 10 63#define MAX_PCI_DEVICES 10 64#define MAX_TOTAL_DEVICES 20 65 66#include <linux/module.h> 67#include <linux/errno.h> 68#include <linux/signal.h> 69#include <linux/sched.h> 70#include <linux/timer.h> 71#include <linux/interrupt.h> 72#include <linux/pci.h> 73#include <linux/tty.h> 74#include <linux/tty_flip.h> 75#include <linux/serial.h> 76#include <linux/major.h> 77#include <linux/string.h> 78#include <linux/fcntl.h> 79#include <linux/ptrace.h> 80#include <linux/ioport.h> 81#include <linux/mm.h> 82#include <linux/seq_file.h> 83#include <linux/slab.h> 84#include <linux/delay.h> 85#include <linux/netdevice.h> 86#include <linux/vmalloc.h> 87#include <linux/init.h> 88#include <linux/ioctl.h> 89#include <linux/synclink.h> 90 91#include <asm/system.h> 92#include <asm/io.h> 93#include <asm/irq.h> 94#include <asm/dma.h> 95#include <linux/bitops.h> 96#include <asm/types.h> 97#include <linux/termios.h> 98#include <linux/workqueue.h> 99#include <linux/hdlc.h> 100#include <linux/dma-mapping.h> 101 102#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_MODULE)) 103#define SYNCLINK_GENERIC_HDLC 1 104#else 105#define SYNCLINK_GENERIC_HDLC 0 106#endif 107 108#define GET_USER(error,value,addr) error = get_user(value,addr) 109#define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0 110#define PUT_USER(error,value,addr) error = put_user(value,addr) 111#define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0 112 113#include <asm/uaccess.h> 114 115#define RCLRVALUE 0xffff 116 117static MGSL_PARAMS default_params = { 118 MGSL_MODE_HDLC, /* unsigned long mode */ 119 0, /* unsigned char loopback; */ 120 HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */ 121 HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */ 122 0, /* unsigned long clock_speed; */ 123 0xff, /* unsigned char addr_filter; */ 124 HDLC_CRC_16_CCITT, /* unsigned short crc_type; */ 125 HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */ 126 HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */ 127 9600, /* unsigned long data_rate; */ 128 8, /* unsigned char data_bits; */ 129 1, /* unsigned char stop_bits; */ 130 ASYNC_PARITY_NONE /* unsigned char parity; */ 131}; 132 133#define SHARED_MEM_ADDRESS_SIZE 0x40000 134#define BUFFERLISTSIZE 4096 135#define DMABUFFERSIZE 4096 136#define MAXRXFRAMES 7 137 138typedef struct _DMABUFFERENTRY 139{ 140 u32 phys_addr; /* 32-bit flat physical address of data buffer */ 141 volatile u16 count; /* buffer size/data count */ 142 volatile u16 status; /* Control/status field */ 143 volatile u16 rcc; /* character count field */ 144 u16 reserved; /* padding required by 16C32 */ 145 u32 link; /* 32-bit flat link to next buffer entry */ 146 char *virt_addr; /* virtual address of data buffer */ 147 u32 phys_entry; /* physical address of this buffer entry */ 148 dma_addr_t dma_addr; 149} DMABUFFERENTRY, *DMAPBUFFERENTRY; 150 151/* The queue of BH actions to be performed */ 152 153#define BH_RECEIVE 1 154#define BH_TRANSMIT 2 155#define BH_STATUS 4 156 157#define IO_PIN_SHUTDOWN_LIMIT 100 158 159struct _input_signal_events { 160 int ri_up; 161 int ri_down; 162 int dsr_up; 163 int dsr_down; 164 int dcd_up; 165 int dcd_down; 166 int cts_up; 167 int cts_down; 168}; 169 170/* transmit holding buffer definitions*/ 171#define MAX_TX_HOLDING_BUFFERS 5 172struct tx_holding_buffer { 173 int buffer_size; 174 unsigned char * buffer; 175}; 176 177 178/* 179 * Device instance data structure 180 */ 181 182struct mgsl_struct { 183 int magic; 184 struct tty_port port; 185 int line; 186 int hw_version; 187 188 struct mgsl_icount icount; 189 190 int timeout; 191 int x_char; /* xon/xoff character */ 192 u16 read_status_mask; 193 u16 ignore_status_mask; 194 unsigned char *xmit_buf; 195 int xmit_head; 196 int xmit_tail; 197 int xmit_cnt; 198 199 wait_queue_head_t status_event_wait_q; 200 wait_queue_head_t event_wait_q; 201 struct timer_list tx_timer; /* HDLC transmit timeout timer */ 202 struct mgsl_struct *next_device; /* device list link */ 203 204 spinlock_t irq_spinlock; /* spinlock for synchronizing with ISR */ 205 struct work_struct task; /* task structure for scheduling bh */ 206 207 u32 EventMask; /* event trigger mask */ 208 u32 RecordedEvents; /* pending events */ 209 210 u32 max_frame_size; /* as set by device config */ 211 212 u32 pending_bh; 213 214 bool bh_running; /* Protection from multiple */ 215 int isr_overflow; 216 bool bh_requested; 217 218 int dcd_chkcount; /* check counts to prevent */ 219 int cts_chkcount; /* too many IRQs if a signal */ 220 int dsr_chkcount; /* is floating */ 221 int ri_chkcount; 222 223 char *buffer_list; /* virtual address of Rx & Tx buffer lists */ 224 u32 buffer_list_phys; 225 dma_addr_t buffer_list_dma_addr; 226 227 unsigned int rx_buffer_count; /* count of total allocated Rx buffers */ 228 DMABUFFERENTRY *rx_buffer_list; /* list of receive buffer entries */ 229 unsigned int current_rx_buffer; 230 231 int num_tx_dma_buffers; /* number of tx dma frames required */ 232 int tx_dma_buffers_used; 233 unsigned int tx_buffer_count; /* count of total allocated Tx buffers */ 234 DMABUFFERENTRY *tx_buffer_list; /* list of transmit buffer entries */ 235 int start_tx_dma_buffer; /* tx dma buffer to start tx dma operation */ 236 int current_tx_buffer; /* next tx dma buffer to be loaded */ 237 238 unsigned char *intermediate_rxbuffer; 239 240 int num_tx_holding_buffers; /* number of tx holding buffer allocated */ 241 int get_tx_holding_index; /* next tx holding buffer for adapter to load */ 242 int put_tx_holding_index; /* next tx holding buffer to store user request */ 243 int tx_holding_count; /* number of tx holding buffers waiting */ 244 struct tx_holding_buffer tx_holding_buffers[MAX_TX_HOLDING_BUFFERS]; 245 246 bool rx_enabled; 247 bool rx_overflow; 248 bool rx_rcc_underrun; 249 250 bool tx_enabled; 251 bool tx_active; 252 u32 idle_mode; 253 254 u16 cmr_value; 255 u16 tcsr_value; 256 257 char device_name[25]; /* device instance name */ 258 259 unsigned int bus_type; /* expansion bus type (ISA,EISA,PCI) */ 260 unsigned char bus; /* expansion bus number (zero based) */ 261 unsigned char function; /* PCI device number */ 262 263 unsigned int io_base; /* base I/O address of adapter */ 264 unsigned int io_addr_size; /* size of the I/O address range */ 265 bool io_addr_requested; /* true if I/O address requested */ 266 267 unsigned int irq_level; /* interrupt level */ 268 unsigned long irq_flags; 269 bool irq_requested; /* true if IRQ requested */ 270 271 unsigned int dma_level; /* DMA channel */ 272 bool dma_requested; /* true if dma channel requested */ 273 274 u16 mbre_bit; 275 u16 loopback_bits; 276 u16 usc_idle_mode; 277 278 MGSL_PARAMS params; /* communications parameters */ 279 280 unsigned char serial_signals; /* current serial signal states */ 281 282 bool irq_occurred; /* for diagnostics use */ 283 unsigned int init_error; /* Initialization startup error (DIAGS) */ 284 int fDiagnosticsmode; /* Driver in Diagnostic mode? (DIAGS) */ 285 286 u32 last_mem_alloc; 287 unsigned char* memory_base; /* shared memory address (PCI only) */ 288 u32 phys_memory_base; 289 bool shared_mem_requested; 290 291 unsigned char* lcr_base; /* local config registers (PCI only) */ 292 u32 phys_lcr_base; 293 u32 lcr_offset; 294 bool lcr_mem_requested; 295 296 u32 misc_ctrl_value; 297 char flag_buf[MAX_ASYNC_BUFFER_SIZE]; 298 char char_buf[MAX_ASYNC_BUFFER_SIZE]; 299 bool drop_rts_on_tx_done; 300 301 bool loopmode_insert_requested; 302 bool loopmode_send_done_requested; 303 304 struct _input_signal_events input_signal_events; 305 306 /* generic HDLC device parts */ 307 int netcount; 308 spinlock_t netlock; 309 310#if SYNCLINK_GENERIC_HDLC 311 struct net_device *netdev; 312#endif 313}; 314 315#define MGSL_MAGIC 0x5401 316 317/* 318 * The size of the serial xmit buffer is 1 page, or 4096 bytes 319 */ 320#ifndef SERIAL_XMIT_SIZE 321#define SERIAL_XMIT_SIZE 4096 322#endif 323 324/* 325 * These macros define the offsets used in calculating the 326 * I/O address of the specified USC registers. 327 */ 328 329 330#define DCPIN 2 /* Bit 1 of I/O address */ 331#define SDPIN 4 /* Bit 2 of I/O address */ 332 333#define DCAR 0 /* DMA command/address register */ 334#define CCAR SDPIN /* channel command/address register */ 335#define DATAREG DCPIN + SDPIN /* serial data register */ 336#define MSBONLY 0x41 337#define LSBONLY 0x40 338 339/* 340 * These macros define the register address (ordinal number) 341 * used for writing address/value pairs to the USC. 342 */ 343 344#define CMR 0x02 /* Channel mode Register */ 345#define CCSR 0x04 /* Channel Command/status Register */ 346#define CCR 0x06 /* Channel Control Register */ 347#define PSR 0x08 /* Port status Register */ 348#define PCR 0x0a /* Port Control Register */ 349#define TMDR 0x0c /* Test mode Data Register */ 350#define TMCR 0x0e /* Test mode Control Register */ 351#define CMCR 0x10 /* Clock mode Control Register */ 352#define HCR 0x12 /* Hardware Configuration Register */ 353#define IVR 0x14 /* Interrupt Vector Register */ 354#define IOCR 0x16 /* Input/Output Control Register */ 355#define ICR 0x18 /* Interrupt Control Register */ 356#define DCCR 0x1a /* Daisy Chain Control Register */ 357#define MISR 0x1c /* Misc Interrupt status Register */ 358#define SICR 0x1e /* status Interrupt Control Register */ 359#define RDR 0x20 /* Receive Data Register */ 360#define RMR 0x22 /* Receive mode Register */ 361#define RCSR 0x24 /* Receive Command/status Register */ 362#define RICR 0x26 /* Receive Interrupt Control Register */ 363#define RSR 0x28 /* Receive Sync Register */ 364#define RCLR 0x2a /* Receive count Limit Register */ 365#define RCCR 0x2c /* Receive Character count Register */ 366#define TC0R 0x2e /* Time Constant 0 Register */ 367#define TDR 0x30 /* Transmit Data Register */ 368#define TMR 0x32 /* Transmit mode Register */ 369#define TCSR 0x34 /* Transmit Command/status Register */ 370#define TICR 0x36 /* Transmit Interrupt Control Register */ 371#define TSR 0x38 /* Transmit Sync Register */ 372#define TCLR 0x3a /* Transmit count Limit Register */ 373#define TCCR 0x3c /* Transmit Character count Register */ 374#define TC1R 0x3e /* Time Constant 1 Register */ 375 376 377/* 378 * MACRO DEFINITIONS FOR DMA REGISTERS 379 */ 380 381#define DCR 0x06 /* DMA Control Register (shared) */ 382#define DACR 0x08 /* DMA Array count Register (shared) */ 383#define BDCR 0x12 /* Burst/Dwell Control Register (shared) */ 384#define DIVR 0x14 /* DMA Interrupt Vector Register (shared) */ 385#define DICR 0x18 /* DMA Interrupt Control Register (shared) */ 386#define CDIR 0x1a /* Clear DMA Interrupt Register (shared) */ 387#define SDIR 0x1c /* Set DMA Interrupt Register (shared) */ 388 389#define TDMR 0x02 /* Transmit DMA mode Register */ 390#define TDIAR 0x1e /* Transmit DMA Interrupt Arm Register */ 391#define TBCR 0x2a /* Transmit Byte count Register */ 392#define TARL 0x2c /* Transmit Address Register (low) */ 393#define TARU 0x2e /* Transmit Address Register (high) */ 394#define NTBCR 0x3a /* Next Transmit Byte count Register */ 395#define NTARL 0x3c /* Next Transmit Address Register (low) */ 396#define NTARU 0x3e /* Next Transmit Address Register (high) */ 397 398#define RDMR 0x82 /* Receive DMA mode Register (non-shared) */ 399#define RDIAR 0x9e /* Receive DMA Interrupt Arm Register */ 400#define RBCR 0xaa /* Receive Byte count Register */ 401#define RARL 0xac /* Receive Address Register (low) */ 402#define RARU 0xae /* Receive Address Register (high) */ 403#define NRBCR 0xba /* Next Receive Byte count Register */ 404#define NRARL 0xbc /* Next Receive Address Register (low) */ 405#define NRARU 0xbe /* Next Receive Address Register (high) */ 406 407 408/* 409 * MACRO DEFINITIONS FOR MODEM STATUS BITS 410 */ 411 412#define MODEMSTATUS_DTR 0x80 413#define MODEMSTATUS_DSR 0x40 414#define MODEMSTATUS_RTS 0x20 415#define MODEMSTATUS_CTS 0x10 416#define MODEMSTATUS_RI 0x04 417#define MODEMSTATUS_DCD 0x01 418 419 420/* 421 * Channel Command/Address Register (CCAR) Command Codes 422 */ 423 424#define RTCmd_Null 0x0000 425#define RTCmd_ResetHighestIus 0x1000 426#define RTCmd_TriggerChannelLoadDma 0x2000 427#define RTCmd_TriggerRxDma 0x2800 428#define RTCmd_TriggerTxDma 0x3000 429#define RTCmd_TriggerRxAndTxDma 0x3800 430#define RTCmd_PurgeRxFifo 0x4800 431#define RTCmd_PurgeTxFifo 0x5000 432#define RTCmd_PurgeRxAndTxFifo 0x5800 433#define RTCmd_LoadRcc 0x6800 434#define RTCmd_LoadTcc 0x7000 435#define RTCmd_LoadRccAndTcc 0x7800 436#define RTCmd_LoadTC0 0x8800 437#define RTCmd_LoadTC1 0x9000 438#define RTCmd_LoadTC0AndTC1 0x9800 439#define RTCmd_SerialDataLSBFirst 0xa000 440#define RTCmd_SerialDataMSBFirst 0xa800 441#define RTCmd_SelectBigEndian 0xb000 442#define RTCmd_SelectLittleEndian 0xb800 443 444 445/* 446 * DMA Command/Address Register (DCAR) Command Codes 447 */ 448 449#define DmaCmd_Null 0x0000 450#define DmaCmd_ResetTxChannel 0x1000 451#define DmaCmd_ResetRxChannel 0x1200 452#define DmaCmd_StartTxChannel 0x2000 453#define DmaCmd_StartRxChannel 0x2200 454#define DmaCmd_ContinueTxChannel 0x3000 455#define DmaCmd_ContinueRxChannel 0x3200 456#define DmaCmd_PauseTxChannel 0x4000 457#define DmaCmd_PauseRxChannel 0x4200 458#define DmaCmd_AbortTxChannel 0x5000 459#define DmaCmd_AbortRxChannel 0x5200 460#define DmaCmd_InitTxChannel 0x7000 461#define DmaCmd_InitRxChannel 0x7200 462#define DmaCmd_ResetHighestDmaIus 0x8000 463#define DmaCmd_ResetAllChannels 0x9000 464#define DmaCmd_StartAllChannels 0xa000 465#define DmaCmd_ContinueAllChannels 0xb000 466#define DmaCmd_PauseAllChannels 0xc000 467#define DmaCmd_AbortAllChannels 0xd000 468#define DmaCmd_InitAllChannels 0xf000 469 470#define TCmd_Null 0x0000 471#define TCmd_ClearTxCRC 0x2000 472#define TCmd_SelectTicrTtsaData 0x4000 473#define TCmd_SelectTicrTxFifostatus 0x5000 474#define TCmd_SelectTicrIntLevel 0x6000 475#define TCmd_SelectTicrdma_level 0x7000 476#define TCmd_SendFrame 0x8000 477#define TCmd_SendAbort 0x9000 478#define TCmd_EnableDleInsertion 0xc000 479#define TCmd_DisableDleInsertion 0xd000 480#define TCmd_ClearEofEom 0xe000 481#define TCmd_SetEofEom 0xf000 482 483#define RCmd_Null 0x0000 484#define RCmd_ClearRxCRC 0x2000 485#define RCmd_EnterHuntmode 0x3000 486#define RCmd_SelectRicrRtsaData 0x4000 487#define RCmd_SelectRicrRxFifostatus 0x5000 488#define RCmd_SelectRicrIntLevel 0x6000 489#define RCmd_SelectRicrdma_level 0x7000 490 491/* 492 * Bits for enabling and disabling IRQs in Interrupt Control Register (ICR) 493 */ 494 495#define RECEIVE_STATUS BIT5 496#define RECEIVE_DATA BIT4 497#define TRANSMIT_STATUS BIT3 498#define TRANSMIT_DATA BIT2 499#define IO_PIN BIT1 500#define MISC BIT0 501 502 503/* 504 * Receive status Bits in Receive Command/status Register RCSR 505 */ 506 507#define RXSTATUS_SHORT_FRAME BIT8 508#define RXSTATUS_CODE_VIOLATION BIT8 509#define RXSTATUS_EXITED_HUNT BIT7 510#define RXSTATUS_IDLE_RECEIVED BIT6 511#define RXSTATUS_BREAK_RECEIVED BIT5 512#define RXSTATUS_ABORT_RECEIVED BIT5 513#define RXSTATUS_RXBOUND BIT4 514#define RXSTATUS_CRC_ERROR BIT3 515#define RXSTATUS_FRAMING_ERROR BIT3 516#define RXSTATUS_ABORT BIT2 517#define RXSTATUS_PARITY_ERROR BIT2 518#define RXSTATUS_OVERRUN BIT1 519#define RXSTATUS_DATA_AVAILABLE BIT0 520#define RXSTATUS_ALL 0x01f6 521#define usc_UnlatchRxstatusBits(a,b) usc_OutReg( (a), RCSR, (u16)((b) & RXSTATUS_ALL) ) 522 523/* 524 * Values for setting transmit idle mode in 525 * Transmit Control/status Register (TCSR) 526 */ 527#define IDLEMODE_FLAGS 0x0000 528#define IDLEMODE_ALT_ONE_ZERO 0x0100 529#define IDLEMODE_ZERO 0x0200 530#define IDLEMODE_ONE 0x0300 531#define IDLEMODE_ALT_MARK_SPACE 0x0500 532#define IDLEMODE_SPACE 0x0600 533#define IDLEMODE_MARK 0x0700 534#define IDLEMODE_MASK 0x0700 535 536/* 537 * IUSC revision identifiers 538 */ 539#define IUSC_SL1660 0x4d44 540#define IUSC_PRE_SL1660 0x4553 541 542/* 543 * Transmit status Bits in Transmit Command/status Register (TCSR) 544 */ 545 546#define TCSR_PRESERVE 0x0F00 547 548#define TCSR_UNDERWAIT BIT11 549#define TXSTATUS_PREAMBLE_SENT BIT7 550#define TXSTATUS_IDLE_SENT BIT6 551#define TXSTATUS_ABORT_SENT BIT5 552#define TXSTATUS_EOF_SENT BIT4 553#define TXSTATUS_EOM_SENT BIT4 554#define TXSTATUS_CRC_SENT BIT3 555#define TXSTATUS_ALL_SENT BIT2 556#define TXSTATUS_UNDERRUN BIT1 557#define TXSTATUS_FIFO_EMPTY BIT0 558#define TXSTATUS_ALL 0x00fa 559#define usc_UnlatchTxstatusBits(a,b) usc_OutReg( (a), TCSR, (u16)((a)->tcsr_value + ((b) & 0x00FF)) ) 560 561 562#define MISCSTATUS_RXC_LATCHED BIT15 563#define MISCSTATUS_RXC BIT14 564#define MISCSTATUS_TXC_LATCHED BIT13 565#define MISCSTATUS_TXC BIT12 566#define MISCSTATUS_RI_LATCHED BIT11 567#define MISCSTATUS_RI BIT10 568#define MISCSTATUS_DSR_LATCHED BIT9 569#define MISCSTATUS_DSR BIT8 570#define MISCSTATUS_DCD_LATCHED BIT7 571#define MISCSTATUS_DCD BIT6 572#define MISCSTATUS_CTS_LATCHED BIT5 573#define MISCSTATUS_CTS BIT4 574#define MISCSTATUS_RCC_UNDERRUN BIT3 575#define MISCSTATUS_DPLL_NO_SYNC BIT2 576#define MISCSTATUS_BRG1_ZERO BIT1 577#define MISCSTATUS_BRG0_ZERO BIT0 578 579#define usc_UnlatchIostatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0xaaa0)) 580#define usc_UnlatchMiscstatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0x000f)) 581 582#define SICR_RXC_ACTIVE BIT15 583#define SICR_RXC_INACTIVE BIT14 584#define SICR_RXC (BIT15+BIT14) 585#define SICR_TXC_ACTIVE BIT13 586#define SICR_TXC_INACTIVE BIT12 587#define SICR_TXC (BIT13+BIT12) 588#define SICR_RI_ACTIVE BIT11 589#define SICR_RI_INACTIVE BIT10 590#define SICR_RI (BIT11+BIT10) 591#define SICR_DSR_ACTIVE BIT9 592#define SICR_DSR_INACTIVE BIT8 593#define SICR_DSR (BIT9+BIT8) 594#define SICR_DCD_ACTIVE BIT7 595#define SICR_DCD_INACTIVE BIT6 596#define SICR_DCD (BIT7+BIT6) 597#define SICR_CTS_ACTIVE BIT5 598#define SICR_CTS_INACTIVE BIT4 599#define SICR_CTS (BIT5+BIT4) 600#define SICR_RCC_UNDERFLOW BIT3 601#define SICR_DPLL_NO_SYNC BIT2 602#define SICR_BRG1_ZERO BIT1 603#define SICR_BRG0_ZERO BIT0 604 605void usc_DisableMasterIrqBit( struct mgsl_struct *info ); 606void usc_EnableMasterIrqBit( struct mgsl_struct *info ); 607void usc_EnableInterrupts( struct mgsl_struct *info, u16 IrqMask ); 608void usc_DisableInterrupts( struct mgsl_struct *info, u16 IrqMask ); 609void usc_ClearIrqPendingBits( struct mgsl_struct *info, u16 IrqMask ); 610 611#define usc_EnableInterrupts( a, b ) \ 612 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0xc0 + (b)) ) 613 614#define usc_DisableInterrupts( a, b ) \ 615 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0x80 + (b)) ) 616 617#define usc_EnableMasterIrqBit(a) \ 618 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0x0f00) + 0xb000) ) 619 620#define usc_DisableMasterIrqBit(a) \ 621 usc_OutReg( (a), ICR, (u16)(usc_InReg((a),ICR) & 0x7f00) ) 622 623#define usc_ClearIrqPendingBits( a, b ) usc_OutReg( (a), DCCR, 0x40 + (b) ) 624 625/* 626 * Transmit status Bits in Transmit Control status Register (TCSR) 627 * and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) 628 */ 629 630#define TXSTATUS_PREAMBLE_SENT BIT7 631#define TXSTATUS_IDLE_SENT BIT6 632#define TXSTATUS_ABORT_SENT BIT5 633#define TXSTATUS_EOF BIT4 634#define TXSTATUS_CRC_SENT BIT3 635#define TXSTATUS_ALL_SENT BIT2 636#define TXSTATUS_UNDERRUN BIT1 637#define TXSTATUS_FIFO_EMPTY BIT0 638 639#define DICR_MASTER BIT15 640#define DICR_TRANSMIT BIT0 641#define DICR_RECEIVE BIT1 642 643#define usc_EnableDmaInterrupts(a,b) \ 644 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) | (b)) ) 645 646#define usc_DisableDmaInterrupts(a,b) \ 647 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) & ~(b)) ) 648 649#define usc_EnableStatusIrqs(a,b) \ 650 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) | (b)) ) 651 652#define usc_DisablestatusIrqs(a,b) \ 653 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) & ~(b)) ) 654 655/* Transmit status Bits in Transmit Control status Register (TCSR) */ 656/* and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) */ 657 658 659#define DISABLE_UNCONDITIONAL 0 660#define DISABLE_END_OF_FRAME 1 661#define ENABLE_UNCONDITIONAL 2 662#define ENABLE_AUTO_CTS 3 663#define ENABLE_AUTO_DCD 3 664#define usc_EnableTransmitter(a,b) \ 665 usc_OutReg( (a), TMR, (u16)((usc_InReg((a),TMR) & 0xfffc) | (b)) ) 666#define usc_EnableReceiver(a,b) \ 667 usc_OutReg( (a), RMR, (u16)((usc_InReg((a),RMR) & 0xfffc) | (b)) ) 668 669static u16 usc_InDmaReg( struct mgsl_struct *info, u16 Port ); 670static void usc_OutDmaReg( struct mgsl_struct *info, u16 Port, u16 Value ); 671static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd ); 672 673static u16 usc_InReg( struct mgsl_struct *info, u16 Port ); 674static void usc_OutReg( struct mgsl_struct *info, u16 Port, u16 Value ); 675static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd ); 676void usc_RCmd( struct mgsl_struct *info, u16 Cmd ); 677void usc_TCmd( struct mgsl_struct *info, u16 Cmd ); 678 679#define usc_TCmd(a,b) usc_OutReg((a), TCSR, (u16)((a)->tcsr_value + (b))) 680#define usc_RCmd(a,b) usc_OutReg((a), RCSR, (b)) 681 682#define usc_SetTransmitSyncChars(a,s0,s1) usc_OutReg((a), TSR, (u16)(((u16)s0<<8)|(u16)s1)) 683 684static void usc_process_rxoverrun_sync( struct mgsl_struct *info ); 685static void usc_start_receiver( struct mgsl_struct *info ); 686static void usc_stop_receiver( struct mgsl_struct *info ); 687 688static void usc_start_transmitter( struct mgsl_struct *info ); 689static void usc_stop_transmitter( struct mgsl_struct *info ); 690static void usc_set_txidle( struct mgsl_struct *info ); 691static void usc_load_txfifo( struct mgsl_struct *info ); 692 693static void usc_enable_aux_clock( struct mgsl_struct *info, u32 DataRate ); 694static void usc_enable_loopback( struct mgsl_struct *info, int enable ); 695 696static void usc_get_serial_signals( struct mgsl_struct *info ); 697static void usc_set_serial_signals( struct mgsl_struct *info ); 698 699static void usc_reset( struct mgsl_struct *info ); 700 701static void usc_set_sync_mode( struct mgsl_struct *info ); 702static void usc_set_sdlc_mode( struct mgsl_struct *info ); 703static void usc_set_async_mode( struct mgsl_struct *info ); 704static void usc_enable_async_clock( struct mgsl_struct *info, u32 DataRate ); 705 706static void usc_loopback_frame( struct mgsl_struct *info ); 707 708static void mgsl_tx_timeout(unsigned long context); 709 710 711static void usc_loopmode_cancel_transmit( struct mgsl_struct * info ); 712static void usc_loopmode_insert_request( struct mgsl_struct * info ); 713static int usc_loopmode_active( struct mgsl_struct * info); 714static void usc_loopmode_send_done( struct mgsl_struct * info ); 715 716static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg); 717 718#if SYNCLINK_GENERIC_HDLC 719#define dev_to_port(D) (dev_to_hdlc(D)->priv) 720static void hdlcdev_tx_done(struct mgsl_struct *info); 721static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size); 722static int hdlcdev_init(struct mgsl_struct *info); 723static void hdlcdev_exit(struct mgsl_struct *info); 724#endif 725 726/* 727 * Defines a BUS descriptor value for the PCI adapter 728 * local bus address ranges. 729 */ 730 731#define BUS_DESCRIPTOR( WrHold, WrDly, RdDly, Nwdd, Nwad, Nxda, Nrdd, Nrad ) \ 732(0x00400020 + \ 733((WrHold) << 30) + \ 734((WrDly) << 28) + \ 735((RdDly) << 26) + \ 736((Nwdd) << 20) + \ 737((Nwad) << 15) + \ 738((Nxda) << 13) + \ 739((Nrdd) << 11) + \ 740((Nrad) << 6) ) 741 742static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit); 743 744/* 745 * Adapter diagnostic routines 746 */ 747static bool mgsl_register_test( struct mgsl_struct *info ); 748static bool mgsl_irq_test( struct mgsl_struct *info ); 749static bool mgsl_dma_test( struct mgsl_struct *info ); 750static bool mgsl_memory_test( struct mgsl_struct *info ); 751static int mgsl_adapter_test( struct mgsl_struct *info ); 752 753/* 754 * device and resource management routines 755 */ 756static int mgsl_claim_resources(struct mgsl_struct *info); 757static void mgsl_release_resources(struct mgsl_struct *info); 758static void mgsl_add_device(struct mgsl_struct *info); 759static struct mgsl_struct* mgsl_allocate_device(void); 760 761/* 762 * DMA buffer manupulation functions. 763 */ 764static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex ); 765static bool mgsl_get_rx_frame( struct mgsl_struct *info ); 766static bool mgsl_get_raw_rx_frame( struct mgsl_struct *info ); 767static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info ); 768static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info ); 769static int num_free_tx_dma_buffers(struct mgsl_struct *info); 770static void mgsl_load_tx_dma_buffer( struct mgsl_struct *info, const char *Buffer, unsigned int BufferSize); 771static void mgsl_load_pci_memory(char* TargetPtr, const char* SourcePtr, unsigned short count); 772 773/* 774 * DMA and Shared Memory buffer allocation and formatting 775 */ 776static int mgsl_allocate_dma_buffers(struct mgsl_struct *info); 777static void mgsl_free_dma_buffers(struct mgsl_struct *info); 778static int mgsl_alloc_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount); 779static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount); 780static int mgsl_alloc_buffer_list_memory(struct mgsl_struct *info); 781static void mgsl_free_buffer_list_memory(struct mgsl_struct *info); 782static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info); 783static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info); 784static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info); 785static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info); 786static bool load_next_tx_holding_buffer(struct mgsl_struct *info); 787static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize); 788 789/* 790 * Bottom half interrupt handlers 791 */ 792static void mgsl_bh_handler(struct work_struct *work); 793static void mgsl_bh_receive(struct mgsl_struct *info); 794static void mgsl_bh_transmit(struct mgsl_struct *info); 795static void mgsl_bh_status(struct mgsl_struct *info); 796 797/* 798 * Interrupt handler routines and dispatch table. 799 */ 800static void mgsl_isr_null( struct mgsl_struct *info ); 801static void mgsl_isr_transmit_data( struct mgsl_struct *info ); 802static void mgsl_isr_receive_data( struct mgsl_struct *info ); 803static void mgsl_isr_receive_status( struct mgsl_struct *info ); 804static void mgsl_isr_transmit_status( struct mgsl_struct *info ); 805static void mgsl_isr_io_pin( struct mgsl_struct *info ); 806static void mgsl_isr_misc( struct mgsl_struct *info ); 807static void mgsl_isr_receive_dma( struct mgsl_struct *info ); 808static void mgsl_isr_transmit_dma( struct mgsl_struct *info ); 809 810typedef void (*isr_dispatch_func)(struct mgsl_struct *); 811 812static isr_dispatch_func UscIsrTable[7] = 813{ 814 mgsl_isr_null, 815 mgsl_isr_misc, 816 mgsl_isr_io_pin, 817 mgsl_isr_transmit_data, 818 mgsl_isr_transmit_status, 819 mgsl_isr_receive_data, 820 mgsl_isr_receive_status 821}; 822 823/* 824 * ioctl call handlers 825 */ 826static int tiocmget(struct tty_struct *tty, struct file *file); 827static int tiocmset(struct tty_struct *tty, struct file *file, 828 unsigned int set, unsigned int clear); 829static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount 830 __user *user_icount); 831static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params); 832static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params); 833static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode); 834static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode); 835static int mgsl_txenable(struct mgsl_struct * info, int enable); 836static int mgsl_txabort(struct mgsl_struct * info); 837static int mgsl_rxenable(struct mgsl_struct * info, int enable); 838static int mgsl_wait_event(struct mgsl_struct * info, int __user *mask); 839static int mgsl_loopmode_send_done( struct mgsl_struct * info ); 840 841/* set non-zero on successful registration with PCI subsystem */ 842static bool pci_registered; 843 844/* 845 * Global linked list of SyncLink devices 846 */ 847static struct mgsl_struct *mgsl_device_list; 848static int mgsl_device_count; 849 850/* 851 * Set this param to non-zero to load eax with the 852 * .text section address and breakpoint on module load. 853 * This is useful for use with gdb and add-symbol-file command. 854 */ 855static int break_on_load; 856 857/* 858 * Driver major number, defaults to zero to get auto 859 * assigned major number. May be forced as module parameter. 860 */ 861static int ttymajor; 862 863/* 864 * Array of user specified options for ISA adapters. 865 */ 866static int io[MAX_ISA_DEVICES]; 867static int irq[MAX_ISA_DEVICES]; 868static int dma[MAX_ISA_DEVICES]; 869static int debug_level; 870static int maxframe[MAX_TOTAL_DEVICES]; 871static int txdmabufs[MAX_TOTAL_DEVICES]; 872static int txholdbufs[MAX_TOTAL_DEVICES]; 873 874module_param(break_on_load, bool, 0); 875module_param(ttymajor, int, 0); 876module_param_array(io, int, NULL, 0); 877module_param_array(irq, int, NULL, 0); 878module_param_array(dma, int, NULL, 0); 879module_param(debug_level, int, 0); 880module_param_array(maxframe, int, NULL, 0); 881module_param_array(txdmabufs, int, NULL, 0); 882module_param_array(txholdbufs, int, NULL, 0); 883 884static char *driver_name = "SyncLink serial driver"; 885static char *driver_version = "$Revision: 4.38 $"; 886 887static int synclink_init_one (struct pci_dev *dev, 888 const struct pci_device_id *ent); 889static void synclink_remove_one (struct pci_dev *dev); 890 891static struct pci_device_id synclink_pci_tbl[] = { 892 { PCI_VENDOR_ID_MICROGATE, PCI_DEVICE_ID_MICROGATE_USC, PCI_ANY_ID, PCI_ANY_ID, }, 893 { PCI_VENDOR_ID_MICROGATE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, }, 894 { 0, }, /* terminate list */ 895}; 896MODULE_DEVICE_TABLE(pci, synclink_pci_tbl); 897 898MODULE_LICENSE("GPL"); 899 900static struct pci_driver synclink_pci_driver = { 901 .name = "synclink", 902 .id_table = synclink_pci_tbl, 903 .probe = synclink_init_one, 904 .remove = __devexit_p(synclink_remove_one), 905}; 906 907static struct tty_driver *serial_driver; 908 909/* number of characters left in xmit buffer before we ask for more */ 910#define WAKEUP_CHARS 256 911 912 913static void mgsl_change_params(struct mgsl_struct *info); 914static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout); 915 916/* 917 * 1st function defined in .text section. Calling this function in 918 * init_module() followed by a breakpoint allows a remote debugger 919 * (gdb) to get the .text address for the add-symbol-file command. 920 * This allows remote debugging of dynamically loadable modules. 921 */ 922static void* mgsl_get_text_ptr(void) 923{ 924 return mgsl_get_text_ptr; 925} 926 927static inline int mgsl_paranoia_check(struct mgsl_struct *info, 928 char *name, const char *routine) 929{ 930#ifdef MGSL_PARANOIA_CHECK 931 static const char *badmagic = 932 "Warning: bad magic number for mgsl struct (%s) in %s\n"; 933 static const char *badinfo = 934 "Warning: null mgsl_struct for (%s) in %s\n"; 935 936 if (!info) { 937 printk(badinfo, name, routine); 938 return 1; 939 } 940 if (info->magic != MGSL_MAGIC) { 941 printk(badmagic, name, routine); 942 return 1; 943 } 944#else 945 if (!info) 946 return 1; 947#endif 948 return 0; 949} 950 951/** 952 * line discipline callback wrappers 953 * 954 * The wrappers maintain line discipline references 955 * while calling into the line discipline. 956 * 957 * ldisc_receive_buf - pass receive data to line discipline 958 */ 959 960static void ldisc_receive_buf(struct tty_struct *tty, 961 const __u8 *data, char *flags, int count) 962{ 963 struct tty_ldisc *ld; 964 if (!tty) 965 return; 966 ld = tty_ldisc_ref(tty); 967 if (ld) { 968 if (ld->ops->receive_buf) 969 ld->ops->receive_buf(tty, data, flags, count); 970 tty_ldisc_deref(ld); 971 } 972} 973 974/* mgsl_stop() throttle (stop) transmitter 975 * 976 * Arguments: tty pointer to tty info structure 977 * Return Value: None 978 */ 979static void mgsl_stop(struct tty_struct *tty) 980{ 981 struct mgsl_struct *info = tty->driver_data; 982 unsigned long flags; 983 984 if (mgsl_paranoia_check(info, tty->name, "mgsl_stop")) 985 return; 986 987 if ( debug_level >= DEBUG_LEVEL_INFO ) 988 printk("mgsl_stop(%s)\n",info->device_name); 989 990 spin_lock_irqsave(&info->irq_spinlock,flags); 991 if (info->tx_enabled) 992 usc_stop_transmitter(info); 993 spin_unlock_irqrestore(&info->irq_spinlock,flags); 994 995} /* end of mgsl_stop() */ 996 997/* mgsl_start() release (start) transmitter 998 * 999 * Arguments: tty pointer to tty info structure 1000 * Return Value: None 1001 */ 1002static void mgsl_start(struct tty_struct *tty) 1003{ 1004 struct mgsl_struct *info = tty->driver_data; 1005 unsigned long flags; 1006 1007 if (mgsl_paranoia_check(info, tty->name, "mgsl_start")) 1008 return; 1009 1010 if ( debug_level >= DEBUG_LEVEL_INFO ) 1011 printk("mgsl_start(%s)\n",info->device_name); 1012 1013 spin_lock_irqsave(&info->irq_spinlock,flags); 1014 if (!info->tx_enabled) 1015 usc_start_transmitter(info); 1016 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1017 1018} /* end of mgsl_start() */ 1019 1020/* 1021 * Bottom half work queue access functions 1022 */ 1023 1024/* mgsl_bh_action() Return next bottom half action to perform. 1025 * Return Value: BH action code or 0 if nothing to do. 1026 */ 1027static int mgsl_bh_action(struct mgsl_struct *info) 1028{ 1029 unsigned long flags; 1030 int rc = 0; 1031 1032 spin_lock_irqsave(&info->irq_spinlock,flags); 1033 1034 if (info->pending_bh & BH_RECEIVE) { 1035 info->pending_bh &= ~BH_RECEIVE; 1036 rc = BH_RECEIVE; 1037 } else if (info->pending_bh & BH_TRANSMIT) { 1038 info->pending_bh &= ~BH_TRANSMIT; 1039 rc = BH_TRANSMIT; 1040 } else if (info->pending_bh & BH_STATUS) { 1041 info->pending_bh &= ~BH_STATUS; 1042 rc = BH_STATUS; 1043 } 1044 1045 if (!rc) { 1046 /* Mark BH routine as complete */ 1047 info->bh_running = false; 1048 info->bh_requested = false; 1049 } 1050 1051 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1052 1053 return rc; 1054} 1055 1056/* 1057 * Perform bottom half processing of work items queued by ISR. 1058 */ 1059static void mgsl_bh_handler(struct work_struct *work) 1060{ 1061 struct mgsl_struct *info = 1062 container_of(work, struct mgsl_struct, task); 1063 int action; 1064 1065 if (!info) 1066 return; 1067 1068 if ( debug_level >= DEBUG_LEVEL_BH ) 1069 printk( "%s(%d):mgsl_bh_handler(%s) entry\n", 1070 __FILE__,__LINE__,info->device_name); 1071 1072 info->bh_running = true; 1073 1074 while((action = mgsl_bh_action(info)) != 0) { 1075 1076 /* Process work item */ 1077 if ( debug_level >= DEBUG_LEVEL_BH ) 1078 printk( "%s(%d):mgsl_bh_handler() work item action=%d\n", 1079 __FILE__,__LINE__,action); 1080 1081 switch (action) { 1082 1083 case BH_RECEIVE: 1084 mgsl_bh_receive(info); 1085 break; 1086 case BH_TRANSMIT: 1087 mgsl_bh_transmit(info); 1088 break; 1089 case BH_STATUS: 1090 mgsl_bh_status(info); 1091 break; 1092 default: 1093 /* unknown work item ID */ 1094 printk("Unknown work item ID=%08X!\n", action); 1095 break; 1096 } 1097 } 1098 1099 if ( debug_level >= DEBUG_LEVEL_BH ) 1100 printk( "%s(%d):mgsl_bh_handler(%s) exit\n", 1101 __FILE__,__LINE__,info->device_name); 1102} 1103 1104static void mgsl_bh_receive(struct mgsl_struct *info) 1105{ 1106 bool (*get_rx_frame)(struct mgsl_struct *info) = 1107 (info->params.mode == MGSL_MODE_HDLC ? mgsl_get_rx_frame : mgsl_get_raw_rx_frame); 1108 1109 if ( debug_level >= DEBUG_LEVEL_BH ) 1110 printk( "%s(%d):mgsl_bh_receive(%s)\n", 1111 __FILE__,__LINE__,info->device_name); 1112 1113 do 1114 { 1115 if (info->rx_rcc_underrun) { 1116 unsigned long flags; 1117 spin_lock_irqsave(&info->irq_spinlock,flags); 1118 usc_start_receiver(info); 1119 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1120 return; 1121 } 1122 } while(get_rx_frame(info)); 1123} 1124 1125static void mgsl_bh_transmit(struct mgsl_struct *info) 1126{ 1127 struct tty_struct *tty = info->port.tty; 1128 unsigned long flags; 1129 1130 if ( debug_level >= DEBUG_LEVEL_BH ) 1131 printk( "%s(%d):mgsl_bh_transmit() entry on %s\n", 1132 __FILE__,__LINE__,info->device_name); 1133 1134 if (tty) 1135 tty_wakeup(tty); 1136 1137 /* if transmitter idle and loopmode_send_done_requested 1138 * then start echoing RxD to TxD 1139 */ 1140 spin_lock_irqsave(&info->irq_spinlock,flags); 1141 if ( !info->tx_active && info->loopmode_send_done_requested ) 1142 usc_loopmode_send_done( info ); 1143 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1144} 1145 1146static void mgsl_bh_status(struct mgsl_struct *info) 1147{ 1148 if ( debug_level >= DEBUG_LEVEL_BH ) 1149 printk( "%s(%d):mgsl_bh_status() entry on %s\n", 1150 __FILE__,__LINE__,info->device_name); 1151 1152 info->ri_chkcount = 0; 1153 info->dsr_chkcount = 0; 1154 info->dcd_chkcount = 0; 1155 info->cts_chkcount = 0; 1156} 1157 1158/* mgsl_isr_receive_status() 1159 * 1160 * Service a receive status interrupt. The type of status 1161 * interrupt is indicated by the state of the RCSR. 1162 * This is only used for HDLC mode. 1163 * 1164 * Arguments: info pointer to device instance data 1165 * Return Value: None 1166 */ 1167static void mgsl_isr_receive_status( struct mgsl_struct *info ) 1168{ 1169 u16 status = usc_InReg( info, RCSR ); 1170 1171 if ( debug_level >= DEBUG_LEVEL_ISR ) 1172 printk("%s(%d):mgsl_isr_receive_status status=%04X\n", 1173 __FILE__,__LINE__,status); 1174 1175 if ( (status & RXSTATUS_ABORT_RECEIVED) && 1176 info->loopmode_insert_requested && 1177 usc_loopmode_active(info) ) 1178 { 1179 ++info->icount.rxabort; 1180 info->loopmode_insert_requested = false; 1181 1182 /* clear CMR:13 to start echoing RxD to TxD */ 1183 info->cmr_value &= ~BIT13; 1184 usc_OutReg(info, CMR, info->cmr_value); 1185 1186 /* disable received abort irq (no longer required) */ 1187 usc_OutReg(info, RICR, 1188 (usc_InReg(info, RICR) & ~RXSTATUS_ABORT_RECEIVED)); 1189 } 1190 1191 if (status & (RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED)) { 1192 if (status & RXSTATUS_EXITED_HUNT) 1193 info->icount.exithunt++; 1194 if (status & RXSTATUS_IDLE_RECEIVED) 1195 info->icount.rxidle++; 1196 wake_up_interruptible(&info->event_wait_q); 1197 } 1198 1199 if (status & RXSTATUS_OVERRUN){ 1200 info->icount.rxover++; 1201 usc_process_rxoverrun_sync( info ); 1202 } 1203 1204 usc_ClearIrqPendingBits( info, RECEIVE_STATUS ); 1205 usc_UnlatchRxstatusBits( info, status ); 1206 1207} /* end of mgsl_isr_receive_status() */ 1208 1209/* mgsl_isr_transmit_status() 1210 * 1211 * Service a transmit status interrupt 1212 * HDLC mode :end of transmit frame 1213 * Async mode:all data is sent 1214 * transmit status is indicated by bits in the TCSR. 1215 * 1216 * Arguments: info pointer to device instance data 1217 * Return Value: None 1218 */ 1219static void mgsl_isr_transmit_status( struct mgsl_struct *info ) 1220{ 1221 u16 status = usc_InReg( info, TCSR ); 1222 1223 if ( debug_level >= DEBUG_LEVEL_ISR ) 1224 printk("%s(%d):mgsl_isr_transmit_status status=%04X\n", 1225 __FILE__,__LINE__,status); 1226 1227 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS ); 1228 usc_UnlatchTxstatusBits( info, status ); 1229 1230 if ( status & (TXSTATUS_UNDERRUN | TXSTATUS_ABORT_SENT) ) 1231 { 1232 /* finished sending HDLC abort. This may leave */ 1233 /* the TxFifo with data from the aborted frame */ 1234 /* so purge the TxFifo. Also shutdown the DMA */ 1235 /* channel in case there is data remaining in */ 1236 /* the DMA buffer */ 1237 usc_DmaCmd( info, DmaCmd_ResetTxChannel ); 1238 usc_RTCmd( info, RTCmd_PurgeTxFifo ); 1239 } 1240 1241 if ( status & TXSTATUS_EOF_SENT ) 1242 info->icount.txok++; 1243 else if ( status & TXSTATUS_UNDERRUN ) 1244 info->icount.txunder++; 1245 else if ( status & TXSTATUS_ABORT_SENT ) 1246 info->icount.txabort++; 1247 else 1248 info->icount.txunder++; 1249 1250 info->tx_active = false; 1251 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; 1252 del_timer(&info->tx_timer); 1253 1254 if ( info->drop_rts_on_tx_done ) { 1255 usc_get_serial_signals( info ); 1256 if ( info->serial_signals & SerialSignal_RTS ) { 1257 info->serial_signals &= ~SerialSignal_RTS; 1258 usc_set_serial_signals( info ); 1259 } 1260 info->drop_rts_on_tx_done = false; 1261 } 1262 1263#if SYNCLINK_GENERIC_HDLC 1264 if (info->netcount) 1265 hdlcdev_tx_done(info); 1266 else 1267#endif 1268 { 1269 if (info->port.tty->stopped || info->port.tty->hw_stopped) { 1270 usc_stop_transmitter(info); 1271 return; 1272 } 1273 info->pending_bh |= BH_TRANSMIT; 1274 } 1275 1276} /* end of mgsl_isr_transmit_status() */ 1277 1278/* mgsl_isr_io_pin() 1279 * 1280 * Service an Input/Output pin interrupt. The type of 1281 * interrupt is indicated by bits in the MISR 1282 * 1283 * Arguments: info pointer to device instance data 1284 * Return Value: None 1285 */ 1286static void mgsl_isr_io_pin( struct mgsl_struct *info ) 1287{ 1288 struct mgsl_icount *icount; 1289 u16 status = usc_InReg( info, MISR ); 1290 1291 if ( debug_level >= DEBUG_LEVEL_ISR ) 1292 printk("%s(%d):mgsl_isr_io_pin status=%04X\n", 1293 __FILE__,__LINE__,status); 1294 1295 usc_ClearIrqPendingBits( info, IO_PIN ); 1296 usc_UnlatchIostatusBits( info, status ); 1297 1298 if (status & (MISCSTATUS_CTS_LATCHED | MISCSTATUS_DCD_LATCHED | 1299 MISCSTATUS_DSR_LATCHED | MISCSTATUS_RI_LATCHED) ) { 1300 icount = &info->icount; 1301 /* update input line counters */ 1302 if (status & MISCSTATUS_RI_LATCHED) { 1303 if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) 1304 usc_DisablestatusIrqs(info,SICR_RI); 1305 icount->rng++; 1306 if ( status & MISCSTATUS_RI ) 1307 info->input_signal_events.ri_up++; 1308 else 1309 info->input_signal_events.ri_down++; 1310 } 1311 if (status & MISCSTATUS_DSR_LATCHED) { 1312 if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) 1313 usc_DisablestatusIrqs(info,SICR_DSR); 1314 icount->dsr++; 1315 if ( status & MISCSTATUS_DSR ) 1316 info->input_signal_events.dsr_up++; 1317 else 1318 info->input_signal_events.dsr_down++; 1319 } 1320 if (status & MISCSTATUS_DCD_LATCHED) { 1321 if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) 1322 usc_DisablestatusIrqs(info,SICR_DCD); 1323 icount->dcd++; 1324 if (status & MISCSTATUS_DCD) { 1325 info->input_signal_events.dcd_up++; 1326 } else 1327 info->input_signal_events.dcd_down++; 1328#if SYNCLINK_GENERIC_HDLC 1329 if (info->netcount) { 1330 if (status & MISCSTATUS_DCD) 1331 netif_carrier_on(info->netdev); 1332 else 1333 netif_carrier_off(info->netdev); 1334 } 1335#endif 1336 } 1337 if (status & MISCSTATUS_CTS_LATCHED) 1338 { 1339 if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) 1340 usc_DisablestatusIrqs(info,SICR_CTS); 1341 icount->cts++; 1342 if ( status & MISCSTATUS_CTS ) 1343 info->input_signal_events.cts_up++; 1344 else 1345 info->input_signal_events.cts_down++; 1346 } 1347 wake_up_interruptible(&info->status_event_wait_q); 1348 wake_up_interruptible(&info->event_wait_q); 1349 1350 if ( (info->port.flags & ASYNC_CHECK_CD) && 1351 (status & MISCSTATUS_DCD_LATCHED) ) { 1352 if ( debug_level >= DEBUG_LEVEL_ISR ) 1353 printk("%s CD now %s...", info->device_name, 1354 (status & MISCSTATUS_DCD) ? "on" : "off"); 1355 if (status & MISCSTATUS_DCD) 1356 wake_up_interruptible(&info->port.open_wait); 1357 else { 1358 if ( debug_level >= DEBUG_LEVEL_ISR ) 1359 printk("doing serial hangup..."); 1360 if (info->port.tty) 1361 tty_hangup(info->port.tty); 1362 } 1363 } 1364 1365 if ( (info->port.flags & ASYNC_CTS_FLOW) && 1366 (status & MISCSTATUS_CTS_LATCHED) ) { 1367 if (info->port.tty->hw_stopped) { 1368 if (status & MISCSTATUS_CTS) { 1369 if ( debug_level >= DEBUG_LEVEL_ISR ) 1370 printk("CTS tx start..."); 1371 if (info->port.tty) 1372 info->port.tty->hw_stopped = 0; 1373 usc_start_transmitter(info); 1374 info->pending_bh |= BH_TRANSMIT; 1375 return; 1376 } 1377 } else { 1378 if (!(status & MISCSTATUS_CTS)) { 1379 if ( debug_level >= DEBUG_LEVEL_ISR ) 1380 printk("CTS tx stop..."); 1381 if (info->port.tty) 1382 info->port.tty->hw_stopped = 1; 1383 usc_stop_transmitter(info); 1384 } 1385 } 1386 } 1387 } 1388 1389 info->pending_bh |= BH_STATUS; 1390 1391 /* for diagnostics set IRQ flag */ 1392 if ( status & MISCSTATUS_TXC_LATCHED ){ 1393 usc_OutReg( info, SICR, 1394 (unsigned short)(usc_InReg(info,SICR) & ~(SICR_TXC_ACTIVE+SICR_TXC_INACTIVE)) ); 1395 usc_UnlatchIostatusBits( info, MISCSTATUS_TXC_LATCHED ); 1396 info->irq_occurred = true; 1397 } 1398 1399} /* end of mgsl_isr_io_pin() */ 1400 1401/* mgsl_isr_transmit_data() 1402 * 1403 * Service a transmit data interrupt (async mode only). 1404 * 1405 * Arguments: info pointer to device instance data 1406 * Return Value: None 1407 */ 1408static void mgsl_isr_transmit_data( struct mgsl_struct *info ) 1409{ 1410 if ( debug_level >= DEBUG_LEVEL_ISR ) 1411 printk("%s(%d):mgsl_isr_transmit_data xmit_cnt=%d\n", 1412 __FILE__,__LINE__,info->xmit_cnt); 1413 1414 usc_ClearIrqPendingBits( info, TRANSMIT_DATA ); 1415 1416 if (info->port.tty->stopped || info->port.tty->hw_stopped) { 1417 usc_stop_transmitter(info); 1418 return; 1419 } 1420 1421 if ( info->xmit_cnt ) 1422 usc_load_txfifo( info ); 1423 else 1424 info->tx_active = false; 1425 1426 if (info->xmit_cnt < WAKEUP_CHARS) 1427 info->pending_bh |= BH_TRANSMIT; 1428 1429} /* end of mgsl_isr_transmit_data() */ 1430 1431/* mgsl_isr_receive_data() 1432 * 1433 * Service a receive data interrupt. This occurs 1434 * when operating in asynchronous interrupt transfer mode. 1435 * The receive data FIFO is flushed to the receive data buffers. 1436 * 1437 * Arguments: info pointer to device instance data 1438 * Return Value: None 1439 */ 1440static void mgsl_isr_receive_data( struct mgsl_struct *info ) 1441{ 1442 int Fifocount; 1443 u16 status; 1444 int work = 0; 1445 unsigned char DataByte; 1446 struct tty_struct *tty = info->port.tty; 1447 struct mgsl_icount *icount = &info->icount; 1448 1449 if ( debug_level >= DEBUG_LEVEL_ISR ) 1450 printk("%s(%d):mgsl_isr_receive_data\n", 1451 __FILE__,__LINE__); 1452 1453 usc_ClearIrqPendingBits( info, RECEIVE_DATA ); 1454 1455 /* select FIFO status for RICR readback */ 1456 usc_RCmd( info, RCmd_SelectRicrRxFifostatus ); 1457 1458 /* clear the Wordstatus bit so that status readback */ 1459 /* only reflects the status of this byte */ 1460 usc_OutReg( info, RICR+LSBONLY, (u16)(usc_InReg(info, RICR+LSBONLY) & ~BIT3 )); 1461 1462 /* flush the receive FIFO */ 1463 1464 while( (Fifocount = (usc_InReg(info,RICR) >> 8)) ) { 1465 int flag; 1466 1467 /* read one byte from RxFIFO */ 1468 outw( (inw(info->io_base + CCAR) & 0x0780) | (RDR+LSBONLY), 1469 info->io_base + CCAR ); 1470 DataByte = inb( info->io_base + CCAR ); 1471 1472 /* get the status of the received byte */ 1473 status = usc_InReg(info, RCSR); 1474 if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR + 1475 RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) ) 1476 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL); 1477 1478 icount->rx++; 1479 1480 flag = 0; 1481 if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR + 1482 RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) ) { 1483 printk("rxerr=%04X\n",status); 1484 /* update error statistics */ 1485 if ( status & RXSTATUS_BREAK_RECEIVED ) { 1486 status &= ~(RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR); 1487 icount->brk++; 1488 } else if (status & RXSTATUS_PARITY_ERROR) 1489 icount->parity++; 1490 else if (status & RXSTATUS_FRAMING_ERROR) 1491 icount->frame++; 1492 else if (status & RXSTATUS_OVERRUN) { 1493 /* must issue purge fifo cmd before */ 1494 /* 16C32 accepts more receive chars */ 1495 usc_RTCmd(info,RTCmd_PurgeRxFifo); 1496 icount->overrun++; 1497 } 1498 1499 /* discard char if tty control flags say so */ 1500 if (status & info->ignore_status_mask) 1501 continue; 1502 1503 status &= info->read_status_mask; 1504 1505 if (status & RXSTATUS_BREAK_RECEIVED) { 1506 flag = TTY_BREAK; 1507 if (info->port.flags & ASYNC_SAK) 1508 do_SAK(tty); 1509 } else if (status & RXSTATUS_PARITY_ERROR) 1510 flag = TTY_PARITY; 1511 else if (status & RXSTATUS_FRAMING_ERROR) 1512 flag = TTY_FRAME; 1513 } /* end of if (error) */ 1514 tty_insert_flip_char(tty, DataByte, flag); 1515 if (status & RXSTATUS_OVERRUN) { 1516 /* Overrun is special, since it's 1517 * reported immediately, and doesn't 1518 * affect the current character 1519 */ 1520 work += tty_insert_flip_char(tty, 0, TTY_OVERRUN); 1521 } 1522 } 1523 1524 if ( debug_level >= DEBUG_LEVEL_ISR ) { 1525 printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n", 1526 __FILE__,__LINE__,icount->rx,icount->brk, 1527 icount->parity,icount->frame,icount->overrun); 1528 } 1529 1530 if(work) 1531 tty_flip_buffer_push(tty); 1532} 1533 1534/* mgsl_isr_misc() 1535 * 1536 * Service a miscellaneous interrupt source. 1537 * 1538 * Arguments: info pointer to device extension (instance data) 1539 * Return Value: None 1540 */ 1541static void mgsl_isr_misc( struct mgsl_struct *info ) 1542{ 1543 u16 status = usc_InReg( info, MISR ); 1544 1545 if ( debug_level >= DEBUG_LEVEL_ISR ) 1546 printk("%s(%d):mgsl_isr_misc status=%04X\n", 1547 __FILE__,__LINE__,status); 1548 1549 if ((status & MISCSTATUS_RCC_UNDERRUN) && 1550 (info->params.mode == MGSL_MODE_HDLC)) { 1551 1552 /* turn off receiver and rx DMA */ 1553 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL); 1554 usc_DmaCmd(info, DmaCmd_ResetRxChannel); 1555 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL); 1556 usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS); 1557 usc_DisableInterrupts(info, RECEIVE_DATA + RECEIVE_STATUS); 1558 1559 /* schedule BH handler to restart receiver */ 1560 info->pending_bh |= BH_RECEIVE; 1561 info->rx_rcc_underrun = true; 1562 } 1563 1564 usc_ClearIrqPendingBits( info, MISC ); 1565 usc_UnlatchMiscstatusBits( info, status ); 1566 1567} /* end of mgsl_isr_misc() */ 1568 1569/* mgsl_isr_null() 1570 * 1571 * Services undefined interrupt vectors from the 1572 * USC. (hence this function SHOULD never be called) 1573 * 1574 * Arguments: info pointer to device extension (instance data) 1575 * Return Value: None 1576 */ 1577static void mgsl_isr_null( struct mgsl_struct *info ) 1578{ 1579 1580} /* end of mgsl_isr_null() */ 1581 1582/* mgsl_isr_receive_dma() 1583 * 1584 * Service a receive DMA channel interrupt. 1585 * For this driver there are two sources of receive DMA interrupts 1586 * as identified in the Receive DMA mode Register (RDMR): 1587 * 1588 * BIT3 EOA/EOL End of List, all receive buffers in receive 1589 * buffer list have been filled (no more free buffers 1590 * available). The DMA controller has shut down. 1591 * 1592 * BIT2 EOB End of Buffer. This interrupt occurs when a receive 1593 * DMA buffer is terminated in response to completion 1594 * of a good frame or a frame with errors. The status 1595 * of the frame is stored in the buffer entry in the 1596 * list of receive buffer entries. 1597 * 1598 * Arguments: info pointer to device instance data 1599 * Return Value: None 1600 */ 1601static void mgsl_isr_receive_dma( struct mgsl_struct *info ) 1602{ 1603 u16 status; 1604 1605 /* clear interrupt pending and IUS bit for Rx DMA IRQ */ 1606 usc_OutDmaReg( info, CDIR, BIT9+BIT1 ); 1607 1608 /* Read the receive DMA status to identify interrupt type. */ 1609 /* This also clears the status bits. */ 1610 status = usc_InDmaReg( info, RDMR ); 1611 1612 if ( debug_level >= DEBUG_LEVEL_ISR ) 1613 printk("%s(%d):mgsl_isr_receive_dma(%s) status=%04X\n", 1614 __FILE__,__LINE__,info->device_name,status); 1615 1616 info->pending_bh |= BH_RECEIVE; 1617 1618 if ( status & BIT3 ) { 1619 info->rx_overflow = true; 1620 info->icount.buf_overrun++; 1621 } 1622 1623} /* end of mgsl_isr_receive_dma() */ 1624 1625/* mgsl_isr_transmit_dma() 1626 * 1627 * This function services a transmit DMA channel interrupt. 1628 * 1629 * For this driver there is one source of transmit DMA interrupts 1630 * as identified in the Transmit DMA Mode Register (TDMR): 1631 * 1632 * BIT2 EOB End of Buffer. This interrupt occurs when a 1633 * transmit DMA buffer has been emptied. 1634 * 1635 * The driver maintains enough transmit DMA buffers to hold at least 1636 * one max frame size transmit frame. When operating in a buffered 1637 * transmit mode, there may be enough transmit DMA buffers to hold at 1638 * least two or more max frame size frames. On an EOB condition, 1639 * determine if there are any queued transmit buffers and copy into 1640 * transmit DMA buffers if we have room. 1641 * 1642 * Arguments: info pointer to device instance data 1643 * Return Value: None 1644 */ 1645static void mgsl_isr_transmit_dma( struct mgsl_struct *info ) 1646{ 1647 u16 status; 1648 1649 /* clear interrupt pending and IUS bit for Tx DMA IRQ */ 1650 usc_OutDmaReg(info, CDIR, BIT8+BIT0 ); 1651 1652 /* Read the transmit DMA status to identify interrupt type. */ 1653 /* This also clears the status bits. */ 1654 1655 status = usc_InDmaReg( info, TDMR ); 1656 1657 if ( debug_level >= DEBUG_LEVEL_ISR ) 1658 printk("%s(%d):mgsl_isr_transmit_dma(%s) status=%04X\n", 1659 __FILE__,__LINE__,info->device_name,status); 1660 1661 if ( status & BIT2 ) { 1662 --info->tx_dma_buffers_used; 1663 1664 /* if there are transmit frames queued, 1665 * try to load the next one 1666 */ 1667 if ( load_next_tx_holding_buffer(info) ) { 1668 /* if call returns non-zero value, we have 1669 * at least one free tx holding buffer 1670 */ 1671 info->pending_bh |= BH_TRANSMIT; 1672 } 1673 } 1674 1675} /* end of mgsl_isr_transmit_dma() */ 1676 1677/* mgsl_interrupt() 1678 * 1679 * Interrupt service routine entry point. 1680 * 1681 * Arguments: 1682 * 1683 * irq interrupt number that caused interrupt 1684 * dev_id device ID supplied during interrupt registration 1685 * 1686 * Return Value: None 1687 */ 1688static irqreturn_t mgsl_interrupt(int dummy, void *dev_id) 1689{ 1690 struct mgsl_struct *info = dev_id; 1691 u16 UscVector; 1692 u16 DmaVector; 1693 1694 if ( debug_level >= DEBUG_LEVEL_ISR ) 1695 printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)entry.\n", 1696 __FILE__, __LINE__, info->irq_level); 1697 1698 spin_lock(&info->irq_spinlock); 1699 1700 for(;;) { 1701 /* Read the interrupt vectors from hardware. */ 1702 UscVector = usc_InReg(info, IVR) >> 9; 1703 DmaVector = usc_InDmaReg(info, DIVR); 1704 1705 if ( debug_level >= DEBUG_LEVEL_ISR ) 1706 printk("%s(%d):%s UscVector=%08X DmaVector=%08X\n", 1707 __FILE__,__LINE__,info->device_name,UscVector,DmaVector); 1708 1709 if ( !UscVector && !DmaVector ) 1710 break; 1711 1712 /* Dispatch interrupt vector */ 1713 if ( UscVector ) 1714 (*UscIsrTable[UscVector])(info); 1715 else if ( (DmaVector&(BIT10|BIT9)) == BIT10) 1716 mgsl_isr_transmit_dma(info); 1717 else 1718 mgsl_isr_receive_dma(info); 1719 1720 if ( info->isr_overflow ) { 1721 printk(KERN_ERR "%s(%d):%s isr overflow irq=%d\n", 1722 __FILE__, __LINE__, info->device_name, info->irq_level); 1723 usc_DisableMasterIrqBit(info); 1724 usc_DisableDmaInterrupts(info,DICR_MASTER); 1725 break; 1726 } 1727 } 1728 1729 /* Request bottom half processing if there's something 1730 * for it to do and the bh is not already running 1731 */ 1732 1733 if ( info->pending_bh && !info->bh_running && !info->bh_requested ) { 1734 if ( debug_level >= DEBUG_LEVEL_ISR ) 1735 printk("%s(%d):%s queueing bh task.\n", 1736 __FILE__,__LINE__,info->device_name); 1737 schedule_work(&info->task); 1738 info->bh_requested = true; 1739 } 1740 1741 spin_unlock(&info->irq_spinlock); 1742 1743 if ( debug_level >= DEBUG_LEVEL_ISR ) 1744 printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)exit.\n", 1745 __FILE__, __LINE__, info->irq_level); 1746 1747 return IRQ_HANDLED; 1748} /* end of mgsl_interrupt() */ 1749 1750/* startup() 1751 * 1752 * Initialize and start device. 1753 * 1754 * Arguments: info pointer to device instance data 1755 * Return Value: 0 if success, otherwise error code 1756 */ 1757static int startup(struct mgsl_struct * info) 1758{ 1759 int retval = 0; 1760 1761 if ( debug_level >= DEBUG_LEVEL_INFO ) 1762 printk("%s(%d):mgsl_startup(%s)\n",__FILE__,__LINE__,info->device_name); 1763 1764 if (info->port.flags & ASYNC_INITIALIZED) 1765 return 0; 1766 1767 if (!info->xmit_buf) { 1768 /* allocate a page of memory for a transmit buffer */ 1769 info->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL); 1770 if (!info->xmit_buf) { 1771 printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n", 1772 __FILE__,__LINE__,info->device_name); 1773 return -ENOMEM; 1774 } 1775 } 1776 1777 info->pending_bh = 0; 1778 1779 memset(&info->icount, 0, sizeof(info->icount)); 1780 1781 setup_timer(&info->tx_timer, mgsl_tx_timeout, (unsigned long)info); 1782 1783 /* Allocate and claim adapter resources */ 1784 retval = mgsl_claim_resources(info); 1785 1786 /* perform existence check and diagnostics */ 1787 if ( !retval ) 1788 retval = mgsl_adapter_test(info); 1789 1790 if ( retval ) { 1791 if (capable(CAP_SYS_ADMIN) && info->port.tty) 1792 set_bit(TTY_IO_ERROR, &info->port.tty->flags); 1793 mgsl_release_resources(info); 1794 return retval; 1795 } 1796 1797 /* program hardware for current parameters */ 1798 mgsl_change_params(info); 1799 1800 if (info->port.tty) 1801 clear_bit(TTY_IO_ERROR, &info->port.tty->flags); 1802 1803 info->port.flags |= ASYNC_INITIALIZED; 1804 1805 return 0; 1806 1807} /* end of startup() */ 1808 1809/* shutdown() 1810 * 1811 * Called by mgsl_close() and mgsl_hangup() to shutdown hardware 1812 * 1813 * Arguments: info pointer to device instance data 1814 * Return Value: None 1815 */ 1816static void shutdown(struct mgsl_struct * info) 1817{ 1818 unsigned long flags; 1819 1820 if (!(info->port.flags & ASYNC_INITIALIZED)) 1821 return; 1822 1823 if (debug_level >= DEBUG_LEVEL_INFO) 1824 printk("%s(%d):mgsl_shutdown(%s)\n", 1825 __FILE__,__LINE__, info->device_name ); 1826 1827 /* clear status wait queue because status changes */ 1828 /* can't happen after shutting down the hardware */ 1829 wake_up_interruptible(&info->status_event_wait_q); 1830 wake_up_interruptible(&info->event_wait_q); 1831 1832 del_timer_sync(&info->tx_timer); 1833 1834 if (info->xmit_buf) { 1835 free_page((unsigned long) info->xmit_buf); 1836 info->xmit_buf = NULL; 1837 } 1838 1839 spin_lock_irqsave(&info->irq_spinlock,flags); 1840 usc_DisableMasterIrqBit(info); 1841 usc_stop_receiver(info); 1842 usc_stop_transmitter(info); 1843 usc_DisableInterrupts(info,RECEIVE_DATA + RECEIVE_STATUS + 1844 TRANSMIT_DATA + TRANSMIT_STATUS + IO_PIN + MISC ); 1845 usc_DisableDmaInterrupts(info,DICR_MASTER + DICR_TRANSMIT + DICR_RECEIVE); 1846 1847 /* Disable DMAEN (Port 7, Bit 14) */ 1848 /* This disconnects the DMA request signal from the ISA bus */ 1849 /* on the ISA adapter. This has no effect for the PCI adapter */ 1850 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) | BIT14)); 1851 1852 /* Disable INTEN (Port 6, Bit12) */ 1853 /* This disconnects the IRQ request signal to the ISA bus */ 1854 /* on the ISA adapter. This has no effect for the PCI adapter */ 1855 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) | BIT12)); 1856 1857 if (!info->port.tty || info->port.tty->termios->c_cflag & HUPCL) { 1858 info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS); 1859 usc_set_serial_signals(info); 1860 } 1861 1862 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1863 1864 mgsl_release_resources(info); 1865 1866 if (info->port.tty) 1867 set_bit(TTY_IO_ERROR, &info->port.tty->flags); 1868 1869 info->port.flags &= ~ASYNC_INITIALIZED; 1870 1871} /* end of shutdown() */ 1872 1873static void mgsl_program_hw(struct mgsl_struct *info) 1874{ 1875 unsigned long flags; 1876 1877 spin_lock_irqsave(&info->irq_spinlock,flags); 1878 1879 usc_stop_receiver(info); 1880 usc_stop_transmitter(info); 1881 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; 1882 1883 if (info->params.mode == MGSL_MODE_HDLC || 1884 info->params.mode == MGSL_MODE_RAW || 1885 info->netcount) 1886 usc_set_sync_mode(info); 1887 else 1888 usc_set_async_mode(info); 1889 1890 usc_set_serial_signals(info); 1891 1892 info->dcd_chkcount = 0; 1893 info->cts_chkcount = 0; 1894 info->ri_chkcount = 0; 1895 info->dsr_chkcount = 0; 1896 1897 usc_EnableStatusIrqs(info,SICR_CTS+SICR_DSR+SICR_DCD+SICR_RI); 1898 usc_EnableInterrupts(info, IO_PIN); 1899 usc_get_serial_signals(info); 1900 1901 if (info->netcount || info->port.tty->termios->c_cflag & CREAD) 1902 usc_start_receiver(info); 1903 1904 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1905} 1906 1907/* Reconfigure adapter based on new parameters 1908 */ 1909static void mgsl_change_params(struct mgsl_struct *info) 1910{ 1911 unsigned cflag; 1912 int bits_per_char; 1913 1914 if (!info->port.tty || !info->port.tty->termios) 1915 return; 1916 1917 if (debug_level >= DEBUG_LEVEL_INFO) 1918 printk("%s(%d):mgsl_change_params(%s)\n", 1919 __FILE__,__LINE__, info->device_name ); 1920 1921 cflag = info->port.tty->termios->c_cflag; 1922 1923 /* if B0 rate (hangup) specified then negate DTR and RTS */ 1924 /* otherwise assert DTR and RTS */ 1925 if (cflag & CBAUD) 1926 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; 1927 else 1928 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR); 1929 1930 /* byte size and parity */ 1931 1932 switch (cflag & CSIZE) { 1933 case CS5: info->params.data_bits = 5; break; 1934 case CS6: info->params.data_bits = 6; break; 1935 case CS7: info->params.data_bits = 7; break; 1936 case CS8: info->params.data_bits = 8; break; 1937 /* Never happens, but GCC is too dumb to figure it out */ 1938 default: info->params.data_bits = 7; break; 1939 } 1940 1941 if (cflag & CSTOPB) 1942 info->params.stop_bits = 2; 1943 else 1944 info->params.stop_bits = 1; 1945 1946 info->params.parity = ASYNC_PARITY_NONE; 1947 if (cflag & PARENB) { 1948 if (cflag & PARODD) 1949 info->params.parity = ASYNC_PARITY_ODD; 1950 else 1951 info->params.parity = ASYNC_PARITY_EVEN; 1952#ifdef CMSPAR 1953 if (cflag & CMSPAR) 1954 info->params.parity = ASYNC_PARITY_SPACE; 1955#endif 1956 } 1957 1958 /* calculate number of jiffies to transmit a full 1959 * FIFO (32 bytes) at specified data rate 1960 */ 1961 bits_per_char = info->params.data_bits + 1962 info->params.stop_bits + 1; 1963 1964 /* if port data rate is set to 460800 or less then 1965 * allow tty settings to override, otherwise keep the 1966 * current data rate. 1967 */ 1968 if (info->params.data_rate <= 460800) 1969 info->params.data_rate = tty_get_baud_rate(info->port.tty); 1970 1971 if ( info->params.data_rate ) { 1972 info->timeout = (32*HZ*bits_per_char) / 1973 info->params.data_rate; 1974 } 1975 info->timeout += HZ/50; /* Add .02 seconds of slop */ 1976 1977 if (cflag & CRTSCTS) 1978 info->port.flags |= ASYNC_CTS_FLOW; 1979 else 1980 info->port.flags &= ~ASYNC_CTS_FLOW; 1981 1982 if (cflag & CLOCAL) 1983 info->port.flags &= ~ASYNC_CHECK_CD; 1984 else 1985 info->port.flags |= ASYNC_CHECK_CD; 1986 1987 /* process tty input control flags */ 1988 1989 info->read_status_mask = RXSTATUS_OVERRUN; 1990 if (I_INPCK(info->port.tty)) 1991 info->read_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR; 1992 if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty)) 1993 info->read_status_mask |= RXSTATUS_BREAK_RECEIVED; 1994 1995 if (I_IGNPAR(info->port.tty)) 1996 info->ignore_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR; 1997 if (I_IGNBRK(info->port.tty)) { 1998 info->ignore_status_mask |= RXSTATUS_BREAK_RECEIVED; 1999 /* If ignoring parity and break indicators, ignore 2000 * overruns too. (For real raw support). 2001 */ 2002 if (I_IGNPAR(info->port.tty)) 2003 info->ignore_status_mask |= RXSTATUS_OVERRUN; 2004 } 2005 2006 mgsl_program_hw(info); 2007 2008} /* end of mgsl_change_params() */ 2009 2010/* mgsl_put_char() 2011 * 2012 * Add a character to the transmit buffer. 2013 * 2014 * Arguments: tty pointer to tty information structure 2015 * ch character to add to transmit buffer 2016 * 2017 * Return Value: None 2018 */ 2019static int mgsl_put_char(struct tty_struct *tty, unsigned char ch) 2020{ 2021 struct mgsl_struct *info = tty->driver_data; 2022 unsigned long flags; 2023 int ret = 0; 2024 2025 if (debug_level >= DEBUG_LEVEL_INFO) { 2026 printk(KERN_DEBUG "%s(%d):mgsl_put_char(%d) on %s\n", 2027 __FILE__, __LINE__, ch, info->device_name); 2028 } 2029 2030 if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char")) 2031 return 0; 2032 2033 if (!info->xmit_buf) 2034 return 0; 2035 2036 spin_lock_irqsave(&info->irq_spinlock, flags); 2037 2038 if ((info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active) { 2039 if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) { 2040 info->xmit_buf[info->xmit_head++] = ch; 2041 info->xmit_head &= SERIAL_XMIT_SIZE-1; 2042 info->xmit_cnt++; 2043 ret = 1; 2044 } 2045 } 2046 spin_unlock_irqrestore(&info->irq_spinlock, flags); 2047 return ret; 2048 2049} /* end of mgsl_put_char() */ 2050 2051/* mgsl_flush_chars() 2052 * 2053 * Enable transmitter so remaining characters in the 2054 * transmit buffer are sent. 2055 * 2056 * Arguments: tty pointer to tty information structure 2057 * Return Value: None 2058 */ 2059static void mgsl_flush_chars(struct tty_struct *tty) 2060{ 2061 struct mgsl_struct *info = tty->driver_data; 2062 unsigned long flags; 2063 2064 if ( debug_level >= DEBUG_LEVEL_INFO ) 2065 printk( "%s(%d):mgsl_flush_chars() entry on %s xmit_cnt=%d\n", 2066 __FILE__,__LINE__,info->device_name,info->xmit_cnt); 2067 2068 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_chars")) 2069 return; 2070 2071 if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped || 2072 !info->xmit_buf) 2073 return; 2074 2075 if ( debug_level >= DEBUG_LEVEL_INFO ) 2076 printk( "%s(%d):mgsl_flush_chars() entry on %s starting transmitter\n", 2077 __FILE__,__LINE__,info->device_name ); 2078 2079 spin_lock_irqsave(&info->irq_spinlock,flags); 2080 2081 if (!info->tx_active) { 2082 if ( (info->params.mode == MGSL_MODE_HDLC || 2083 info->params.mode == MGSL_MODE_RAW) && info->xmit_cnt ) { 2084 /* operating in synchronous (frame oriented) mode */ 2085 /* copy data from circular xmit_buf to */ 2086 /* transmit DMA buffer. */ 2087 mgsl_load_tx_dma_buffer(info, 2088 info->xmit_buf,info->xmit_cnt); 2089 } 2090 usc_start_transmitter(info); 2091 } 2092 2093 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2094 2095} /* end of mgsl_flush_chars() */ 2096 2097/* mgsl_write() 2098 * 2099 * Send a block of data 2100 * 2101 * Arguments: 2102 * 2103 * tty pointer to tty information structure 2104 * buf pointer to buffer containing send data 2105 * count size of send data in bytes 2106 * 2107 * Return Value: number of characters written 2108 */ 2109static int mgsl_write(struct tty_struct * tty, 2110 const unsigned char *buf, int count) 2111{ 2112 int c, ret = 0; 2113 struct mgsl_struct *info = tty->driver_data; 2114 unsigned long flags; 2115 2116 if ( debug_level >= DEBUG_LEVEL_INFO ) 2117 printk( "%s(%d):mgsl_write(%s) count=%d\n", 2118 __FILE__,__LINE__,info->device_name,count); 2119 2120 if (mgsl_paranoia_check(info, tty->name, "mgsl_write")) 2121 goto cleanup; 2122 2123 if (!info->xmit_buf) 2124 goto cleanup; 2125 2126 if ( info->params.mode == MGSL_MODE_HDLC || 2127 info->params.mode == MGSL_MODE_RAW ) { 2128 /* operating in synchronous (frame oriented) mode */ 2129 /* operating in synchronous (frame oriented) mode */ 2130 if (info->tx_active) { 2131 2132 if ( info->params.mode == MGSL_MODE_HDLC ) { 2133 ret = 0; 2134 goto cleanup; 2135 } 2136 /* transmitter is actively sending data - 2137 * if we have multiple transmit dma and 2138 * holding buffers, attempt to queue this 2139 * frame for transmission at a later time. 2140 */ 2141 if (info->tx_holding_count >= info->num_tx_holding_buffers ) { 2142 /* no tx holding buffers available */ 2143 ret = 0; 2144 goto cleanup; 2145 } 2146 2147 /* queue transmit frame request */ 2148 ret = count; 2149 save_tx_buffer_request(info,buf,count); 2150 2151 /* if we have sufficient tx dma buffers, 2152 * load the next buffered tx request 2153 */ 2154 spin_lock_irqsave(&info->irq_spinlock,flags); 2155 load_next_tx_holding_buffer(info); 2156 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2157 goto cleanup; 2158 } 2159 2160 /* if operating in HDLC LoopMode and the adapter */ 2161 /* has yet to be inserted into the loop, we can't */ 2162 /* transmit */ 2163 2164 if ( (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) && 2165 !usc_loopmode_active(info) ) 2166 { 2167 ret = 0; 2168 goto cleanup; 2169 } 2170 2171 if ( info->xmit_cnt ) { 2172 /* Send accumulated from send_char() calls */ 2173 /* as frame and wait before accepting more data. */ 2174 ret = 0; 2175 2176 /* copy data from circular xmit_buf to */ 2177 /* transmit DMA buffer. */ 2178 mgsl_load_tx_dma_buffer(info, 2179 info->xmit_buf,info->xmit_cnt); 2180 if ( debug_level >= DEBUG_LEVEL_INFO ) 2181 printk( "%s(%d):mgsl_write(%s) sync xmit_cnt flushing\n", 2182 __FILE__,__LINE__,info->device_name); 2183 } else { 2184 if ( debug_level >= DEBUG_LEVEL_INFO ) 2185 printk( "%s(%d):mgsl_write(%s) sync transmit accepted\n", 2186 __FILE__,__LINE__,info->device_name); 2187 ret = count; 2188 info->xmit_cnt = count; 2189 mgsl_load_tx_dma_buffer(info,buf,count); 2190 } 2191 } else { 2192 while (1) { 2193 spin_lock_irqsave(&info->irq_spinlock,flags); 2194 c = min_t(int, count, 2195 min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1, 2196 SERIAL_XMIT_SIZE - info->xmit_head)); 2197 if (c <= 0) { 2198 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2199 break; 2200 } 2201 memcpy(info->xmit_buf + info->xmit_head, buf, c); 2202 info->xmit_head = ((info->xmit_head + c) & 2203 (SERIAL_XMIT_SIZE-1)); 2204 info->xmit_cnt += c; 2205 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2206 buf += c; 2207 count -= c; 2208 ret += c; 2209 } 2210 } 2211 2212 if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) { 2213 spin_lock_irqsave(&info->irq_spinlock,flags); 2214 if (!info->tx_active) 2215 usc_start_transmitter(info); 2216 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2217 } 2218cleanup: 2219 if ( debug_level >= DEBUG_LEVEL_INFO ) 2220 printk( "%s(%d):mgsl_write(%s) returning=%d\n", 2221 __FILE__,__LINE__,info->device_name,ret); 2222 2223 return ret; 2224 2225} /* end of mgsl_write() */ 2226 2227/* mgsl_write_room() 2228 * 2229 * Return the count of free bytes in transmit buffer 2230 * 2231 * Arguments: tty pointer to tty info structure 2232 * Return Value: None 2233 */ 2234static int mgsl_write_room(struct tty_struct *tty) 2235{ 2236 struct mgsl_struct *info = tty->driver_data; 2237 int ret; 2238 2239 if (mgsl_paranoia_check(info, tty->name, "mgsl_write_room")) 2240 return 0; 2241 ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1; 2242 if (ret < 0) 2243 ret = 0; 2244 2245 if (debug_level >= DEBUG_LEVEL_INFO) 2246 printk("%s(%d):mgsl_write_room(%s)=%d\n", 2247 __FILE__,__LINE__, info->device_name,ret ); 2248 2249 if ( info->params.mode == MGSL_MODE_HDLC || 2250 info->params.mode == MGSL_MODE_RAW ) { 2251 /* operating in synchronous (frame oriented) mode */ 2252 if ( info->tx_active ) 2253 return 0; 2254 else 2255 return HDLC_MAX_FRAME_SIZE; 2256 } 2257 2258 return ret; 2259 2260} /* end of mgsl_write_room() */ 2261 2262/* mgsl_chars_in_buffer() 2263 * 2264 * Return the count of bytes in transmit buffer 2265 * 2266 * Arguments: tty pointer to tty info structure 2267 * Return Value: None 2268 */ 2269static int mgsl_chars_in_buffer(struct tty_struct *tty) 2270{ 2271 struct mgsl_struct *info = tty->driver_data; 2272 2273 if (debug_level >= DEBUG_LEVEL_INFO) 2274 printk("%s(%d):mgsl_chars_in_buffer(%s)\n", 2275 __FILE__,__LINE__, info->device_name ); 2276 2277 if (mgsl_paranoia_check(info, tty->name, "mgsl_chars_in_buffer")) 2278 return 0; 2279 2280 if (debug_level >= DEBUG_LEVEL_INFO) 2281 printk("%s(%d):mgsl_chars_in_buffer(%s)=%d\n", 2282 __FILE__,__LINE__, info->device_name,info->xmit_cnt ); 2283 2284 if ( info->params.mode == MGSL_MODE_HDLC || 2285 info->params.mode == MGSL_MODE_RAW ) { 2286 /* operating in synchronous (frame oriented) mode */ 2287 if ( info->tx_active ) 2288 return info->max_frame_size; 2289 else 2290 return 0; 2291 } 2292 2293 return info->xmit_cnt; 2294} /* end of mgsl_chars_in_buffer() */ 2295 2296/* mgsl_flush_buffer() 2297 * 2298 * Discard all data in the send buffer 2299 * 2300 * Arguments: tty pointer to tty info structure 2301 * Return Value: None 2302 */ 2303static void mgsl_flush_buffer(struct tty_struct *tty) 2304{ 2305 struct mgsl_struct *info = tty->driver_data; 2306 unsigned long flags; 2307 2308 if (debug_level >= DEBUG_LEVEL_INFO) 2309 printk("%s(%d):mgsl_flush_buffer(%s) entry\n", 2310 __FILE__,__LINE__, info->device_name ); 2311 2312 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_buffer")) 2313 return; 2314 2315 spin_lock_irqsave(&info->irq_spinlock,flags); 2316 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; 2317 del_timer(&info->tx_timer); 2318 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2319 2320 tty_wakeup(tty); 2321} 2322 2323/* mgsl_send_xchar() 2324 * 2325 * Send a high-priority XON/XOFF character 2326 * 2327 * Arguments: tty pointer to tty info structure 2328 * ch character to send 2329 * Return Value: None 2330 */ 2331static void mgsl_send_xchar(struct tty_struct *tty, char ch) 2332{ 2333 struct mgsl_struct *info = tty->driver_data; 2334 unsigned long flags; 2335 2336 if (debug_level >= DEBUG_LEVEL_INFO) 2337 printk("%s(%d):mgsl_send_xchar(%s,%d)\n", 2338 __FILE__,__LINE__, info->device_name, ch ); 2339 2340 if (mgsl_paranoia_check(info, tty->name, "mgsl_send_xchar")) 2341 return; 2342 2343 info->x_char = ch; 2344 if (ch) { 2345 /* Make sure transmit interrupts are on */ 2346 spin_lock_irqsave(&info->irq_spinlock,flags); 2347 if (!info->tx_enabled) 2348 usc_start_transmitter(info); 2349 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2350 } 2351} /* end of mgsl_send_xchar() */ 2352 2353/* mgsl_throttle() 2354 * 2355 * Signal remote device to throttle send data (our receive data) 2356 * 2357 * Arguments: tty pointer to tty info structure 2358 * Return Value: None 2359 */ 2360static void mgsl_throttle(struct tty_struct * tty) 2361{ 2362 struct mgsl_struct *info = tty->driver_data; 2363 unsigned long flags; 2364 2365 if (debug_level >= DEBUG_LEVEL_INFO) 2366 printk("%s(%d):mgsl_throttle(%s) entry\n", 2367 __FILE__,__LINE__, info->device_name ); 2368 2369 if (mgsl_paranoia_check(info, tty->name, "mgsl_throttle")) 2370 return; 2371 2372 if (I_IXOFF(tty)) 2373 mgsl_send_xchar(tty, STOP_CHAR(tty)); 2374 2375 if (tty->termios->c_cflag & CRTSCTS) { 2376 spin_lock_irqsave(&info->irq_spinlock,flags); 2377 info->serial_signals &= ~SerialSignal_RTS; 2378 usc_set_serial_signals(info); 2379 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2380 } 2381} /* end of mgsl_throttle() */ 2382 2383/* mgsl_unthrottle() 2384 * 2385 * Signal remote device to stop throttling send data (our receive data) 2386 * 2387 * Arguments: tty pointer to tty info structure 2388 * Return Value: None 2389 */ 2390static void mgsl_unthrottle(struct tty_struct * tty) 2391{ 2392 struct mgsl_struct *info = tty->driver_data; 2393 unsigned long flags; 2394 2395 if (debug_level >= DEBUG_LEVEL_INFO) 2396 printk("%s(%d):mgsl_unthrottle(%s) entry\n", 2397 __FILE__,__LINE__, info->device_name ); 2398 2399 if (mgsl_paranoia_check(info, tty->name, "mgsl_unthrottle")) 2400 return; 2401 2402 if (I_IXOFF(tty)) { 2403 if (info->x_char) 2404 info->x_char = 0; 2405 else 2406 mgsl_send_xchar(tty, START_CHAR(tty)); 2407 } 2408 2409 if (tty->termios->c_cflag & CRTSCTS) { 2410 spin_lock_irqsave(&info->irq_spinlock,flags); 2411 info->serial_signals |= SerialSignal_RTS; 2412 usc_set_serial_signals(info); 2413 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2414 } 2415 2416} /* end of mgsl_unthrottle() */ 2417 2418/* mgsl_get_stats() 2419 * 2420 * get the current serial parameters information 2421 * 2422 * Arguments: info pointer to device instance data 2423 * user_icount pointer to buffer to hold returned stats 2424 * 2425 * Return Value: 0 if success, otherwise error code 2426 */ 2427static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount __user *user_icount) 2428{ 2429 int err; 2430 2431 if (debug_level >= DEBUG_LEVEL_INFO) 2432 printk("%s(%d):mgsl_get_params(%s)\n", 2433 __FILE__,__LINE__, info->device_name); 2434 2435 if (!user_icount) { 2436 memset(&info->icount, 0, sizeof(info->icount)); 2437 } else { 2438 mutex_lock(&info->port.mutex); 2439 COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount)); 2440 mutex_unlock(&info->port.mutex); 2441 if (err) 2442 return -EFAULT; 2443 } 2444 2445 return 0; 2446 2447} /* end of mgsl_get_stats() */ 2448 2449/* mgsl_get_params() 2450 * 2451 * get the current serial parameters information 2452 * 2453 * Arguments: info pointer to device instance data 2454 * user_params pointer to buffer to hold returned params 2455 * 2456 * Return Value: 0 if success, otherwise error code 2457 */ 2458static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params) 2459{ 2460 int err; 2461 if (debug_level >= DEBUG_LEVEL_INFO) 2462 printk("%s(%d):mgsl_get_params(%s)\n", 2463 __FILE__,__LINE__, info->device_name); 2464 2465 mutex_lock(&info->port.mutex); 2466 COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS)); 2467 mutex_unlock(&info->port.mutex); 2468 if (err) { 2469 if ( debug_level >= DEBUG_LEVEL_INFO ) 2470 printk( "%s(%d):mgsl_get_params(%s) user buffer copy failed\n", 2471 __FILE__,__LINE__,info->device_name); 2472 return -EFAULT; 2473 } 2474 2475 return 0; 2476 2477} /* end of mgsl_get_params() */ 2478 2479/* mgsl_set_params() 2480 * 2481 * set the serial parameters 2482 * 2483 * Arguments: 2484 * 2485 * info pointer to device instance data 2486 * new_params user buffer containing new serial params 2487 * 2488 * Return Value: 0 if success, otherwise error code 2489 */ 2490static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params) 2491{ 2492 unsigned long flags; 2493 MGSL_PARAMS tmp_params; 2494 int err; 2495 2496 if (debug_level >= DEBUG_LEVEL_INFO) 2497 printk("%s(%d):mgsl_set_params %s\n", __FILE__,__LINE__, 2498 info->device_name ); 2499 COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS)); 2500 if (err) { 2501 if ( debug_level >= DEBUG_LEVEL_INFO ) 2502 printk( "%s(%d):mgsl_set_params(%s) user buffer copy failed\n", 2503 __FILE__,__LINE__,info->device_name); 2504 return -EFAULT; 2505 } 2506 2507 mutex_lock(&info->port.mutex); 2508 spin_lock_irqsave(&info->irq_spinlock,flags); 2509 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS)); 2510 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2511 2512 mgsl_change_params(info); 2513 mutex_unlock(&info->port.mutex); 2514 2515 return 0; 2516 2517} /* end of mgsl_set_params() */ 2518 2519/* mgsl_get_txidle() 2520 * 2521 * get the current transmit idle mode 2522 * 2523 * Arguments: info pointer to device instance data 2524 * idle_mode pointer to buffer to hold returned idle mode 2525 * 2526 * Return Value: 0 if success, otherwise error code 2527 */ 2528static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode) 2529{ 2530 int err; 2531 2532 if (debug_level >= DEBUG_LEVEL_INFO) 2533 printk("%s(%d):mgsl_get_txidle(%s)=%d\n", 2534 __FILE__,__LINE__, info->device_name, info->idle_mode); 2535 2536 COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int)); 2537 if (err) { 2538 if ( debug_level >= DEBUG_LEVEL_INFO ) 2539 printk( "%s(%d):mgsl_get_txidle(%s) user buffer copy failed\n", 2540 __FILE__,__LINE__,info->device_name); 2541 return -EFAULT; 2542 } 2543 2544 return 0; 2545 2546} /* end of mgsl_get_txidle() */ 2547 2548/* mgsl_set_txidle() service ioctl to set transmit idle mode 2549 * 2550 * Arguments: info pointer to device instance data 2551 * idle_mode new idle mode 2552 * 2553 * Return Value: 0 if success, otherwise error code 2554 */ 2555static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode) 2556{ 2557 unsigned long flags; 2558 2559 if (debug_level >= DEBUG_LEVEL_INFO) 2560 printk("%s(%d):mgsl_set_txidle(%s,%d)\n", __FILE__,__LINE__, 2561 info->device_name, idle_mode ); 2562 2563 spin_lock_irqsave(&info->irq_spinlock,flags); 2564 info->idle_mode = idle_mode; 2565 usc_set_txidle( info ); 2566 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2567 return 0; 2568 2569} /* end of mgsl_set_txidle() */ 2570 2571/* mgsl_txenable() 2572 * 2573 * enable or disable the transmitter 2574 * 2575 * Arguments: 2576 * 2577 * info pointer to device instance data 2578 * enable 1 = enable, 0 = disable 2579 * 2580 * Return Value: 0 if success, otherwise error code 2581 */ 2582static int mgsl_txenable(struct mgsl_struct * info, int enable) 2583{ 2584 unsigned long flags; 2585 2586 if (debug_level >= DEBUG_LEVEL_INFO) 2587 printk("%s(%d):mgsl_txenable(%s,%d)\n", __FILE__,__LINE__, 2588 info->device_name, enable); 2589 2590 spin_lock_irqsave(&info->irq_spinlock,flags); 2591 if ( enable ) { 2592 if ( !info->tx_enabled ) { 2593 2594 usc_start_transmitter(info); 2595 /*-------------------------------------------------- 2596 * if HDLC/SDLC Loop mode, attempt to insert the 2597 * station in the 'loop' by setting CMR:13. Upon 2598 * receipt of the next GoAhead (RxAbort) sequence, 2599 * the OnLoop indicator (CCSR:7) should go active 2600 * to indicate that we are on the loop 2601 *--------------------------------------------------*/ 2602 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE ) 2603 usc_loopmode_insert_request( info ); 2604 } 2605 } else { 2606 if ( info->tx_enabled ) 2607 usc_stop_transmitter(info); 2608 } 2609 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2610 return 0; 2611 2612} /* end of mgsl_txenable() */ 2613 2614/* mgsl_txabort() abort send HDLC frame 2615 * 2616 * Arguments: info pointer to device instance data 2617 * Return Value: 0 if success, otherwise error code 2618 */ 2619static int mgsl_txabort(struct mgsl_struct * info) 2620{ 2621 unsigned long flags; 2622 2623 if (debug_level >= DEBUG_LEVEL_INFO) 2624 printk("%s(%d):mgsl_txabort(%s)\n", __FILE__,__LINE__, 2625 info->device_name); 2626 2627 spin_lock_irqsave(&info->irq_spinlock,flags); 2628 if ( info->tx_active && info->params.mode == MGSL_MODE_HDLC ) 2629 { 2630 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE ) 2631 usc_loopmode_cancel_transmit( info ); 2632 else 2633 usc_TCmd(info,TCmd_SendAbort); 2634 } 2635 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2636 return 0; 2637 2638} /* end of mgsl_txabort() */ 2639 2640/* mgsl_rxenable() enable or disable the receiver 2641 * 2642 * Arguments: info pointer to device instance data 2643 * enable 1 = enable, 0 = disable 2644 * Return Value: 0 if success, otherwise error code 2645 */ 2646static int mgsl_rxenable(struct mgsl_struct * info, int enable) 2647{ 2648 unsigned long flags; 2649 2650 if (debug_level >= DEBUG_LEVEL_INFO) 2651 printk("%s(%d):mgsl_rxenable(%s,%d)\n", __FILE__,__LINE__, 2652 info->device_name, enable); 2653 2654 spin_lock_irqsave(&info->irq_spinlock,flags); 2655 if ( enable ) { 2656 if ( !info->rx_enabled ) 2657 usc_start_receiver(info); 2658 } else { 2659 if ( info->rx_enabled ) 2660 usc_stop_receiver(info); 2661 } 2662 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2663 return 0; 2664 2665} /* end of mgsl_rxenable() */ 2666 2667/* mgsl_wait_event() wait for specified event to occur 2668 * 2669 * Arguments: info pointer to device instance data 2670 * mask pointer to bitmask of events to wait for 2671 * Return Value: 0 if successful and bit mask updated with 2672 * of events triggerred, 2673 * otherwise error code 2674 */ 2675static int mgsl_wait_event(struct mgsl_struct * info, int __user * mask_ptr) 2676{ 2677 unsigned long flags; 2678 int s; 2679 int rc=0; 2680 struct mgsl_icount cprev, cnow; 2681 int events; 2682 int mask; 2683 struct _input_signal_events oldsigs, newsigs; 2684 DECLARE_WAITQUEUE(wait, current); 2685 2686 COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int)); 2687 if (rc) { 2688 return -EFAULT; 2689 } 2690 2691 if (debug_level >= DEBUG_LEVEL_INFO) 2692 printk("%s(%d):mgsl_wait_event(%s,%d)\n", __FILE__,__LINE__, 2693 info->device_name, mask); 2694 2695 spin_lock_irqsave(&info->irq_spinlock,flags); 2696 2697 /* return immediately if state matches requested events */ 2698 usc_get_serial_signals(info); 2699 s = info->serial_signals; 2700 events = mask & 2701 ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) + 2702 ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) + 2703 ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) + 2704 ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) ); 2705 if (events) { 2706 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2707 goto exit; 2708 } 2709 2710 /* save current irq counts */ 2711 cprev = info->icount; 2712 oldsigs = info->input_signal_events; 2713 2714 /* enable hunt and idle irqs if needed */ 2715 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) { 2716 u16 oldreg = usc_InReg(info,RICR); 2717 u16 newreg = oldreg + 2718 (mask & MgslEvent_ExitHuntMode ? RXSTATUS_EXITED_HUNT:0) + 2719 (mask & MgslEvent_IdleReceived ? RXSTATUS_IDLE_RECEIVED:0); 2720 if (oldreg != newreg) 2721 usc_OutReg(info, RICR, newreg); 2722 } 2723 2724 set_current_state(TASK_INTERRUPTIBLE); 2725 add_wait_queue(&info->event_wait_q, &wait); 2726 2727 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2728 2729 2730 for(;;) { 2731 schedule(); 2732 if (signal_pending(current)) { 2733 rc = -ERESTARTSYS; 2734 break; 2735 } 2736 2737 /* get current irq counts */ 2738 spin_lock_irqsave(&info->irq_spinlock,flags); 2739 cnow = info->icount; 2740 newsigs = info->input_signal_events; 2741 set_current_state(TASK_INTERRUPTIBLE); 2742 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2743 2744 /* if no change, wait aborted for some reason */ 2745 if (newsigs.dsr_up == oldsigs.dsr_up && 2746 newsigs.dsr_down == oldsigs.dsr_down && 2747 newsigs.dcd_up == oldsigs.dcd_up && 2748 newsigs.dcd_down == oldsigs.dcd_down && 2749 newsigs.cts_up == oldsigs.cts_up && 2750 newsigs.cts_down == oldsigs.cts_down && 2751 newsigs.ri_up == oldsigs.ri_up && 2752 newsigs.ri_down == oldsigs.ri_down && 2753 cnow.exithunt == cprev.exithunt && 2754 cnow.rxidle == cprev.rxidle) { 2755 rc = -EIO; 2756 break; 2757 } 2758 2759 events = mask & 2760 ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) + 2761 (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) + 2762 (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) + 2763 (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) + 2764 (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) + 2765 (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) + 2766 (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) + 2767 (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) + 2768 (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) + 2769 (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) ); 2770 if (events) 2771 break; 2772 2773 cprev = cnow; 2774 oldsigs = newsigs; 2775 } 2776 2777 remove_wait_queue(&info->event_wait_q, &wait); 2778 set_current_state(TASK_RUNNING); 2779 2780 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) { 2781 spin_lock_irqsave(&info->irq_spinlock,flags); 2782 if (!waitqueue_active(&info->event_wait_q)) { 2783 /* disable enable exit hunt mode/idle rcvd IRQs */ 2784 usc_OutReg(info, RICR, usc_InReg(info,RICR) & 2785 ~(RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED)); 2786 } 2787 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2788 } 2789exit: 2790 if ( rc == 0 ) 2791 PUT_USER(rc, events, mask_ptr); 2792 2793 return rc; 2794 2795} /* end of mgsl_wait_event() */ 2796 2797static int modem_input_wait(struct mgsl_struct *info,int arg) 2798{ 2799 unsigned long flags; 2800 int rc; 2801 struct mgsl_icount cprev, cnow; 2802 DECLARE_WAITQUEUE(wait, current); 2803 2804 /* save current irq counts */ 2805 spin_lock_irqsave(&info->irq_spinlock,flags); 2806 cprev = info->icount; 2807 add_wait_queue(&info->status_event_wait_q, &wait); 2808 set_current_state(TASK_INTERRUPTIBLE); 2809 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2810 2811 for(;;) { 2812 schedule(); 2813 if (signal_pending(current)) { 2814 rc = -ERESTARTSYS; 2815 break; 2816 } 2817 2818 /* get new irq counts */ 2819 spin_lock_irqsave(&info->irq_spinlock,flags); 2820 cnow = info->icount; 2821 set_current_state(TASK_INTERRUPTIBLE); 2822 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2823 2824 /* if no change, wait aborted for some reason */ 2825 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && 2826 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) { 2827 rc = -EIO; 2828 break; 2829 } 2830 2831 /* check for change in caller specified modem input */ 2832 if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) || 2833 (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) || 2834 (arg & TIOCM_CD && cnow.dcd != cprev.dcd) || 2835 (arg & TIOCM_CTS && cnow.cts != cprev.cts)) { 2836 rc = 0; 2837 break; 2838 } 2839 2840 cprev = cnow; 2841 } 2842 remove_wait_queue(&info->status_event_wait_q, &wait); 2843 set_current_state(TASK_RUNNING); 2844 return rc; 2845} 2846 2847/* return the state of the serial control and status signals 2848 */ 2849static int tiocmget(struct tty_struct *tty, struct file *file) 2850{ 2851 struct mgsl_struct *info = tty->driver_data; 2852 unsigned int result; 2853 unsigned long flags; 2854 2855 spin_lock_irqsave(&info->irq_spinlock,flags); 2856 usc_get_serial_signals(info); 2857 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2858 2859 result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) + 2860 ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) + 2861 ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) + 2862 ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) + 2863 ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) + 2864 ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0); 2865 2866 if (debug_level >= DEBUG_LEVEL_INFO) 2867 printk("%s(%d):%s tiocmget() value=%08X\n", 2868 __FILE__,__LINE__, info->device_name, result ); 2869 return result; 2870} 2871 2872/* set modem control signals (DTR/RTS) 2873 */ 2874static int tiocmset(struct tty_struct *tty, struct file *file, 2875 unsigned int set, unsigned int clear) 2876{ 2877 struct mgsl_struct *info = tty->driver_data; 2878 unsigned long flags; 2879 2880 if (debug_level >= DEBUG_LEVEL_INFO) 2881 printk("%s(%d):%s tiocmset(%x,%x)\n", 2882 __FILE__,__LINE__,info->device_name, set, clear); 2883 2884 if (set & TIOCM_RTS) 2885 info->serial_signals |= SerialSignal_RTS; 2886 if (set & TIOCM_DTR) 2887 info->serial_signals |= SerialSignal_DTR; 2888 if (clear & TIOCM_RTS) 2889 info->serial_signals &= ~SerialSignal_RTS; 2890 if (clear & TIOCM_DTR) 2891 info->serial_signals &= ~SerialSignal_DTR; 2892 2893 spin_lock_irqsave(&info->irq_spinlock,flags); 2894 usc_set_serial_signals(info); 2895 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2896 2897 return 0; 2898} 2899 2900/* mgsl_break() Set or clear transmit break condition 2901 * 2902 * Arguments: tty pointer to tty instance data 2903 * break_state -1=set break condition, 0=clear 2904 * Return Value: error code 2905 */ 2906static int mgsl_break(struct tty_struct *tty, int break_state) 2907{ 2908 struct mgsl_struct * info = tty->driver_data; 2909 unsigned long flags; 2910 2911 if (debug_level >= DEBUG_LEVEL_INFO) 2912 printk("%s(%d):mgsl_break(%s,%d)\n", 2913 __FILE__,__LINE__, info->device_name, break_state); 2914 2915 if (mgsl_paranoia_check(info, tty->name, "mgsl_break")) 2916 return -EINVAL; 2917 2918 spin_lock_irqsave(&info->irq_spinlock,flags); 2919 if (break_state == -1) 2920 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) | BIT7)); 2921 else 2922 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) & ~BIT7)); 2923 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2924 return 0; 2925 2926} /* end of mgsl_break() */ 2927 2928/* mgsl_ioctl() Service an IOCTL request 2929 * 2930 * Arguments: 2931 * 2932 * tty pointer to tty instance data 2933 * file pointer to associated file object for device 2934 * cmd IOCTL command code 2935 * arg command argument/context 2936 * 2937 * Return Value: 0 if success, otherwise error code 2938 */ 2939static int mgsl_ioctl(struct tty_struct *tty, struct file * file, 2940 unsigned int cmd, unsigned long arg) 2941{ 2942 struct mgsl_struct * info = tty->driver_data; 2943 2944 if (debug_level >= DEBUG_LEVEL_INFO) 2945 printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__, 2946 info->device_name, cmd ); 2947 2948 if (mgsl_paranoia_check(info, tty->name, "mgsl_ioctl")) 2949 return -ENODEV; 2950 2951 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && 2952 (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) { 2953 if (tty->flags & (1 << TTY_IO_ERROR)) 2954 return -EIO; 2955 } 2956 2957 return mgsl_ioctl_common(info, cmd, arg); 2958} 2959 2960static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg) 2961{ 2962 int error; 2963 struct mgsl_icount cnow; /* kernel counter temps */ 2964 void __user *argp = (void __user *)arg; 2965 struct serial_icounter_struct __user *p_cuser; /* user space */ 2966 unsigned long flags; 2967 2968 switch (cmd) { 2969 case MGSL_IOCGPARAMS: 2970 return mgsl_get_params(info, argp); 2971 case MGSL_IOCSPARAMS: 2972 return mgsl_set_params(info, argp); 2973 case MGSL_IOCGTXIDLE: 2974 return mgsl_get_txidle(info, argp); 2975 case MGSL_IOCSTXIDLE: 2976 return mgsl_set_txidle(info,(int)arg); 2977 case MGSL_IOCTXENABLE: 2978 return mgsl_txenable(info,(int)arg); 2979 case MGSL_IOCRXENABLE: 2980 return mgsl_rxenable(info,(int)arg); 2981 case MGSL_IOCTXABORT: 2982 return mgsl_txabort(info); 2983 case MGSL_IOCGSTATS: 2984 return mgsl_get_stats(info, argp); 2985 case MGSL_IOCWAITEVENT: 2986 return mgsl_wait_event(info, argp); 2987 case MGSL_IOCLOOPTXDONE: 2988 return mgsl_loopmode_send_done(info); 2989 /* Wait for modem input (DCD,RI,DSR,CTS) change 2990 * as specified by mask in arg (TIOCM_RNG/DSR/CD/CTS) 2991 */ 2992 case TIOCMIWAIT: 2993 return modem_input_wait(info,(int)arg); 2994 2995 /* 2996 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) 2997 * Return: write counters to the user passed counter struct 2998 * NB: both 1->0 and 0->1 transitions are counted except for 2999 * RI where only 0->1 is counted. 3000 */ 3001 case TIOCGICOUNT: 3002 spin_lock_irqsave(&info->irq_spinlock,flags); 3003 cnow = info->icount; 3004 spin_unlock_irqrestore(&info->irq_spinlock,flags); 3005 p_cuser = argp; 3006 PUT_USER(error,cnow.cts, &p_cuser->cts); 3007 if (error) return error; 3008 PUT_USER(error,cnow.dsr, &p_cuser->dsr); 3009 if (error) return error; 3010 PUT_USER(error,cnow.rng, &p_cuser->rng); 3011 if (error) return error; 3012 PUT_USER(error,cnow.dcd, &p_cuser->dcd); 3013 if (error) return error; 3014 PUT_USER(error,cnow.rx, &p_cuser->rx); 3015 if (error) return error; 3016 PUT_USER(error,cnow.tx, &p_cuser->tx); 3017 if (error) return error; 3018 PUT_USER(error,cnow.frame, &p_cuser->frame); 3019 if (error) return error; 3020 PUT_USER(error,cnow.overrun, &p_cuser->overrun); 3021 if (error) return error; 3022 PUT_USER(error,cnow.parity, &p_cuser->parity); 3023 if (error) return error; 3024 PUT_USER(error,cnow.brk, &p_cuser->brk); 3025 if (error) return error; 3026 PUT_USER(error,cnow.buf_overrun, &p_cuser->buf_overrun); 3027 if (error) return error; 3028 return 0; 3029 default: 3030 return -ENOIOCTLCMD; 3031 } 3032 return 0; 3033} 3034 3035/* mgsl_set_termios() 3036 * 3037 * Set new termios settings 3038 * 3039 * Arguments: 3040 * 3041 * tty pointer to tty structure 3042 * termios pointer to buffer to hold returned old termios 3043 * 3044 * Return Value: None 3045 */ 3046static void mgsl_set_termios(struct tty_struct *tty, struct ktermios *old_termios) 3047{ 3048 struct mgsl_struct *info = tty->driver_data; 3049 unsigned long flags; 3050 3051 if (debug_level >= DEBUG_LEVEL_INFO) 3052 printk("%s(%d):mgsl_set_termios %s\n", __FILE__,__LINE__, 3053 tty->driver->name ); 3054 3055 mgsl_change_params(info); 3056 3057 /* Handle transition to B0 status */ 3058 if (old_termios->c_cflag & CBAUD && 3059 !(tty->termios->c_cflag & CBAUD)) { 3060 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR); 3061 spin_lock_irqsave(&info->irq_spinlock,flags); 3062 usc_set_serial_signals(info); 3063 spin_unlock_irqrestore(&info->irq_spinlock,flags); 3064 } 3065 3066 /* Handle transition away from B0 status */ 3067 if (!(old_termios->c_cflag & CBAUD) && 3068 tty->termios->c_cflag & CBAUD) { 3069 info->serial_signals |= SerialSignal_DTR; 3070 if (!(tty->termios->c_cflag & CRTSCTS) || 3071 !test_bit(TTY_THROTTLED, &tty->flags)) { 3072 info->serial_signals |= SerialSignal_RTS; 3073 } 3074 spin_lock_irqsave(&info->irq_spinlock,flags); 3075 usc_set_serial_signals(info); 3076 spin_unlock_irqrestore(&info->irq_spinlock,flags); 3077 } 3078 3079 /* Handle turning off CRTSCTS */ 3080 if (old_termios->c_cflag & CRTSCTS && 3081 !(tty->termios->c_cflag & CRTSCTS)) { 3082 tty->hw_stopped = 0; 3083 mgsl_start(tty); 3084 } 3085 3086} /* end of mgsl_set_termios() */ 3087 3088/* mgsl_close() 3089 * 3090 * Called when port is closed. Wait for remaining data to be 3091 * sent. Disable port and free resources. 3092 * 3093 * Arguments: 3094 * 3095 * tty pointer to open tty structure 3096 * filp pointer to open file object 3097 * 3098 * Return Value: None 3099 */ 3100static void mgsl_close(struct tty_struct *tty, struct file * filp) 3101{ 3102 struct mgsl_struct * info = tty->driver_data; 3103 3104 if (mgsl_paranoia_check(info, tty->name, "mgsl_close")) 3105 return; 3106 3107 if (debug_level >= DEBUG_LEVEL_INFO) 3108 printk("%s(%d):mgsl_close(%s) entry, count=%d\n", 3109 __FILE__,__LINE__, info->device_name, info->port.count); 3110 3111 if (tty_port_close_start(&info->port, tty, filp) == 0) 3112 goto cleanup; 3113 3114 mutex_lock(&info->port.mutex); 3115 if (info->port.flags & ASYNC_INITIALIZED) 3116 mgsl_wait_until_sent(tty, info->timeout); 3117 mgsl_flush_buffer(tty); 3118 tty_ldisc_flush(tty); 3119 shutdown(info); 3120 mutex_unlock(&info->port.mutex); 3121 3122 tty_port_close_end(&info->port, tty); 3123 info->port.tty = NULL; 3124cleanup: 3125 if (debug_level >= DEBUG_LEVEL_INFO) 3126 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__, 3127 tty->driver->name, info->port.count); 3128 3129} /* end of mgsl_close() */ 3130 3131/* mgsl_wait_until_sent() 3132 * 3133 * Wait until the transmitter is empty. 3134 * 3135 * Arguments: 3136 * 3137 * tty pointer to tty info structure 3138 * timeout time to wait for send completion 3139 * 3140 * Return Value: None 3141 */ 3142static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout) 3143{ 3144 struct mgsl_struct * info = tty->driver_data; 3145 unsigned long orig_jiffies, char_time; 3146 3147 if (!info ) 3148 return; 3149 3150 if (debug_level >= DEBUG_LEVEL_INFO) 3151 printk("%s(%d):mgsl_wait_until_sent(%s) entry\n", 3152 __FILE__,__LINE__, info->device_name ); 3153 3154 if (mgsl_paranoia_check(info, tty->name, "mgsl_wait_until_sent")) 3155 return; 3156 3157 if (!(info->port.flags & ASYNC_INITIALIZED)) 3158 goto exit; 3159 3160 orig_jiffies = jiffies; 3161 3162 /* Set check interval to 1/5 of estimated time to 3163 * send a character, and make it at least 1. The check 3164 * interval should also be less than the timeout. 3165 * Note: use tight timings here to satisfy the NIST-PCTS. 3166 */ 3167 3168 if ( info->params.data_rate ) { 3169 char_time = info->timeout/(32 * 5); 3170 if (!char_time) 3171 char_time++; 3172 } else 3173 char_time = 1; 3174 3175 if (timeout) 3176 char_time = min_t(unsigned long, char_time, timeout); 3177 3178 if ( info->params.mode == MGSL_MODE_HDLC || 3179 info->params.mode == MGSL_MODE_RAW ) { 3180 while (info->tx_active) { 3181 msleep_interruptible(jiffies_to_msecs(char_time)); 3182 if (signal_pending(current)) 3183 break; 3184 if (timeout && time_after(jiffies, orig_jiffies + timeout)) 3185 break; 3186 } 3187 } else { 3188 while (!(usc_InReg(info,TCSR) & TXSTATUS_ALL_SENT) && 3189 info->tx_enabled) { 3190 msleep_interruptible(jiffies_to_msecs(char_time)); 3191 if (signal_pending(current)) 3192 break; 3193 if (timeout && time_after(jiffies, orig_jiffies + timeout)) 3194 break; 3195 } 3196 } 3197 3198exit: 3199 if (debug_level >= DEBUG_LEVEL_INFO) 3200 printk("%s(%d):mgsl_wait_until_sent(%s) exit\n", 3201 __FILE__,__LINE__, info->device_name ); 3202 3203} /* end of mgsl_wait_until_sent() */ 3204 3205/* mgsl_hangup() 3206 * 3207 * Called by tty_hangup() when a hangup is signaled. 3208 * This is the same as to closing all open files for the port. 3209 * 3210 * Arguments: tty pointer to associated tty object 3211 * Return Value: None 3212 */ 3213static void mgsl_hangup(struct tty_struct *tty) 3214{ 3215 struct mgsl_struct * info = tty->driver_data; 3216 3217 if (debug_level >= DEBUG_LEVEL_INFO) 3218 printk("%s(%d):mgsl_hangup(%s)\n", 3219 __FILE__,__LINE__, info->device_name ); 3220 3221 if (mgsl_paranoia_check(info, tty->name, "mgsl_hangup")) 3222 return; 3223 3224 mgsl_flush_buffer(tty); 3225 shutdown(info); 3226 3227 info->port.count = 0; 3228 info->port.flags &= ~ASYNC_NORMAL_ACTIVE; 3229 info->port.tty = NULL; 3230 3231 wake_up_interruptible(&info->port.open_wait); 3232 3233} /* end of mgsl_hangup() */ 3234 3235/* 3236 * carrier_raised() 3237 * 3238 * Return true if carrier is raised 3239 */ 3240 3241static int carrier_raised(struct tty_port *port) 3242{ 3243 unsigned long flags; 3244 struct mgsl_struct *info = container_of(port, struct mgsl_struct, port); 3245 3246 spin_lock_irqsave(&info->irq_spinlock, flags); 3247 usc_get_serial_signals(info); 3248 spin_unlock_irqrestore(&info->irq_spinlock, flags); 3249 return (info->serial_signals & SerialSignal_DCD) ? 1 : 0; 3250} 3251 3252static void dtr_rts(struct tty_port *port, int on) 3253{ 3254 struct mgsl_struct *info = container_of(port, struct mgsl_struct, port); 3255 unsigned long flags; 3256 3257 spin_lock_irqsave(&info->irq_spinlock,flags); 3258 if (on) 3259 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; 3260 else 3261 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR); 3262 usc_set_serial_signals(info); 3263 spin_unlock_irqrestore(&info->irq_spinlock,flags); 3264} 3265 3266 3267/* block_til_ready() 3268 * 3269 * Block the current process until the specified port 3270 * is ready to be opened. 3271 * 3272 * Arguments: 3273 * 3274 * tty pointer to tty info structure 3275 * filp pointer to open file object 3276 * info pointer to device instance data 3277 * 3278 * Return Value: 0 if success, otherwise error code 3279 */ 3280static int block_til_ready(struct tty_struct *tty, struct file * filp, 3281 struct mgsl_struct *info) 3282{ 3283 DECLARE_WAITQUEUE(wait, current); 3284 int retval; 3285 bool do_clocal = false; 3286 bool extra_count = false; 3287 unsigned long flags; 3288 int dcd; 3289 struct tty_port *port = &info->port; 3290 3291 if (debug_level >= DEBUG_LEVEL_INFO) 3292 printk("%s(%d):block_til_ready on %s\n", 3293 __FILE__,__LINE__, tty->driver->name ); 3294 3295 if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){ 3296 /* nonblock mode is set or port is not enabled */ 3297 port->flags |= ASYNC_NORMAL_ACTIVE; 3298 return 0; 3299 } 3300 3301 if (tty->termios->c_cflag & CLOCAL) 3302 do_clocal = true; 3303 3304 /* Wait for carrier detect and the line to become 3305 * free (i.e., not in use by the callout). While we are in 3306 * this loop, port->count is dropped by one, so that 3307 * mgsl_close() knows when to free things. We restore it upon 3308 * exit, either normal or abnormal. 3309 */ 3310 3311 retval = 0; 3312 add_wait_queue(&port->open_wait, &wait); 3313 3314 if (debug_level >= DEBUG_LEVEL_INFO) 3315 printk("%s(%d):block_til_ready before block on %s count=%d\n", 3316 __FILE__,__LINE__, tty->driver->name, port->count ); 3317 3318 spin_lock_irqsave(&info->irq_spinlock, flags); 3319 if (!tty_hung_up_p(filp)) { 3320 extra_count = true; 3321 port->count--; 3322 } 3323 spin_unlock_irqrestore(&info->irq_spinlock, flags); 3324 port->blocked_open++; 3325 3326 while (1) { 3327 if (tty->termios->c_cflag & CBAUD) 3328 tty_port_raise_dtr_rts(port); 3329 3330 set_current_state(TASK_INTERRUPTIBLE); 3331 3332 if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)){ 3333 retval = (port->flags & ASYNC_HUP_NOTIFY) ? 3334 -EAGAIN : -ERESTARTSYS; 3335 break; 3336 } 3337 3338 dcd = tty_port_carrier_raised(&info->port); 3339 3340 if (!(port->flags & ASYNC_CLOSING) && (do_clocal || dcd)) 3341 break; 3342 3343 if (signal_pending(current)) { 3344 retval = -ERESTARTSYS; 3345 break; 3346 } 3347 3348 if (debug_level >= DEBUG_LEVEL_INFO) 3349 printk("%s(%d):block_til_ready blocking on %s count=%d\n", 3350 __FILE__,__LINE__, tty->driver->name, port->count ); 3351 3352 tty_unlock(); 3353 schedule(); 3354 tty_lock(); 3355 } 3356 3357 set_current_state(TASK_RUNNING); 3358 remove_wait_queue(&port->open_wait, &wait); 3359 3360 /* FIXME: Racy on hangup during close wait */ 3361 if (extra_count) 3362 port->count++; 3363 port->blocked_open--; 3364 3365 if (debug_level >= DEBUG_LEVEL_INFO) 3366 printk("%s(%d):block_til_ready after blocking on %s count=%d\n", 3367 __FILE__,__LINE__, tty->driver->name, port->count ); 3368 3369 if (!retval) 3370 port->flags |= ASYNC_NORMAL_ACTIVE; 3371 3372 return retval; 3373 3374} /* end of block_til_ready() */ 3375 3376/* mgsl_open() 3377 * 3378 * Called when a port is opened. Init and enable port. 3379 * Perform serial-specific initialization for the tty structure. 3380 * 3381 * Arguments: tty pointer to tty info structure 3382 * filp associated file pointer 3383 * 3384 * Return Value: 0 if success, otherwise error code 3385 */ 3386static int mgsl_open(struct tty_struct *tty, struct file * filp) 3387{ 3388 struct mgsl_struct *info; 3389 int retval, line; 3390 unsigned long flags; 3391 3392 /* verify range of specified line number */ 3393 line = tty->index; 3394 if ((line < 0) || (line >= mgsl_device_count)) { 3395 printk("%s(%d):mgsl_open with invalid line #%d.\n", 3396 __FILE__,__LINE__,line); 3397 return -ENODEV; 3398 } 3399 3400 /* find the info structure for the specified line */ 3401 info = mgsl_device_list; 3402 while(info && info->line != line) 3403 info = info->next_device; 3404 if (mgsl_paranoia_check(info, tty->name, "mgsl_open")) 3405 return -ENODEV; 3406 3407 tty->driver_data = info; 3408 info->port.tty = tty; 3409 3410 if (debug_level >= DEBUG_LEVEL_INFO) 3411 printk("%s(%d):mgsl_open(%s), old ref count = %d\n", 3412 __FILE__,__LINE__,tty->driver->name, info->port.count); 3413 3414 /* If port is closing, signal caller to try again */ 3415 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){ 3416 if (info->port.flags & ASYNC_CLOSING) 3417 interruptible_sleep_on(&info->port.close_wait); 3418 retval = ((info->port.flags & ASYNC_HUP_NOTIFY) ? 3419 -EAGAIN : -ERESTARTSYS); 3420 goto cleanup; 3421 } 3422 3423 info->port.tty->low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0; 3424 3425 spin_lock_irqsave(&info->netlock, flags); 3426 if (info->netcount) { 3427 retval = -EBUSY; 3428 spin_unlock_irqrestore(&info->netlock, flags); 3429 goto cleanup; 3430 } 3431 info->port.count++; 3432 spin_unlock_irqrestore(&info->netlock, flags); 3433 3434 if (info->port.count == 1) { 3435 /* 1st open on this device, init hardware */ 3436 retval = startup(info); 3437 if (retval < 0) 3438 goto cleanup; 3439 } 3440 3441 retval = block_til_ready(tty, filp, info); 3442 if (retval) { 3443 if (debug_level >= DEBUG_LEVEL_INFO) 3444 printk("%s(%d):block_til_ready(%s) returned %d\n", 3445 __FILE__,__LINE__, info->device_name, retval); 3446 goto cleanup; 3447 } 3448 3449 if (debug_level >= DEBUG_LEVEL_INFO) 3450 printk("%s(%d):mgsl_open(%s) success\n", 3451 __FILE__,__LINE__, info->device_name); 3452 retval = 0; 3453 3454cleanup: 3455 if (retval) { 3456 if (tty->count == 1) 3457 info->port.tty = NULL; /* tty layer will release tty struct */ 3458 if(info->port.count) 3459 info->port.count--; 3460 } 3461 3462 return retval; 3463 3464} /* end of mgsl_open() */ 3465 3466/* 3467 * /proc fs routines.... 3468 */ 3469 3470static inline void line_info(struct seq_file *m, struct mgsl_struct *info) 3471{ 3472 char stat_buf[30]; 3473 unsigned long flags; 3474 3475 if (info->bus_type == MGSL_BUS_TYPE_PCI) { 3476 seq_printf(m, "%s:PCI io:%04X irq:%d mem:%08X lcr:%08X", 3477 info->device_name, info->io_base, info->irq_level, 3478 info->phys_memory_base, info->phys_lcr_base); 3479 } else { 3480 seq_printf(m, "%s:(E)ISA io:%04X irq:%d dma:%d", 3481 info->device_name, info->io_base, 3482 info->irq_level, info->dma_level); 3483 } 3484 3485 /* output current serial signal states */ 3486 spin_lock_irqsave(&info->irq_spinlock,flags); 3487 usc_get_serial_signals(info); 3488 spin_unlock_irqrestore(&info->irq_spinlock,flags); 3489 3490 stat_buf[0] = 0; 3491 stat_buf[1] = 0; 3492 if (info->serial_signals & SerialSignal_RTS) 3493 strcat(stat_buf, "|RTS"); 3494 if (info->serial_signals & SerialSignal_CTS) 3495 strcat(stat_buf, "|CTS"); 3496 if (info->serial_signals & SerialSignal_DTR) 3497 strcat(stat_buf, "|DTR"); 3498 if (info->serial_signals & SerialSignal_DSR) 3499 strcat(stat_buf, "|DSR"); 3500 if (info->serial_signals & SerialSignal_DCD) 3501 strcat(stat_buf, "|CD"); 3502 if (info->serial_signals & SerialSignal_RI) 3503 strcat(stat_buf, "|RI"); 3504 3505 if (info->params.mode == MGSL_MODE_HDLC || 3506 info->params.mode == MGSL_MODE_RAW ) { 3507 seq_printf(m, " HDLC txok:%d rxok:%d", 3508 info->icount.txok, info->icount.rxok); 3509 if (info->icount.txunder) 3510 seq_printf(m, " txunder:%d", info->icount.txunder); 3511 if (info->icount.txabort) 3512 seq_printf(m, " txabort:%d", info->icount.txabort); 3513 if (info->icount.rxshort) 3514 seq_printf(m, " rxshort:%d", info->icount.rxshort); 3515 if (info->icount.rxlong) 3516 seq_printf(m, " rxlong:%d", info->icount.rxlong); 3517 if (info->icount.rxover) 3518 seq_printf(m, " rxover:%d", info->icount.rxover); 3519 if (info->icount.rxcrc) 3520 seq_printf(m, " rxcrc:%d", info->icount.rxcrc); 3521 } else { 3522 seq_printf(m, " ASYNC tx:%d rx:%d", 3523 info->icount.tx, info->icount.rx); 3524 if (info->icount.frame) 3525 seq_printf(m, " fe:%d", info->icount.frame); 3526 if (info->icount.parity) 3527 seq_printf(m, " pe:%d", info->icount.parity); 3528 if (info->icount.brk) 3529 seq_printf(m, " brk:%d", info->icount.brk); 3530 if (info->icount.overrun) 3531 seq_printf(m, " oe:%d", info->icount.overrun); 3532 } 3533 3534 /* Append serial signal status to end */ 3535 seq_printf(m, " %s\n", stat_buf+1); 3536 3537 seq_printf(m, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n", 3538 info->tx_active,info->bh_requested,info->bh_running, 3539 info->pending_bh); 3540 3541 spin_lock_irqsave(&info->irq_spinlock,flags); 3542 { 3543 u16 Tcsr = usc_InReg( info, TCSR ); 3544 u16 Tdmr = usc_InDmaReg( info, TDMR ); 3545 u16 Ticr = usc_InReg( info, TICR ); 3546 u16 Rscr = usc_InReg( info, RCSR ); 3547 u16 Rdmr = usc_InDmaReg( info, RDMR ); 3548 u16 Ricr = usc_InReg( info, RICR ); 3549 u16 Icr = usc_InReg( info, ICR ); 3550 u16 Dccr = usc_InReg( info, DCCR ); 3551 u16 Tmr = usc_InReg( info, TMR ); 3552 u16 Tccr = usc_InReg( info, TCCR ); 3553 u16 Ccar = inw( info->io_base + CCAR ); 3554 seq_printf(m, "tcsr=%04X tdmr=%04X ticr=%04X rcsr=%04X rdmr=%04X\n" 3555 "ricr=%04X icr =%04X dccr=%04X tmr=%04X tccr=%04X ccar=%04X\n", 3556 Tcsr,Tdmr,Ticr,Rscr,Rdmr,Ricr,Icr,Dccr,Tmr,Tccr,Ccar ); 3557 } 3558 spin_unlock_irqrestore(&info->irq_spinlock,flags); 3559} 3560 3561/* Called to print information about devices */ 3562static int mgsl_proc_show(struct seq_file *m, void *v) 3563{ 3564 struct mgsl_struct *info; 3565 3566 seq_printf(m, "synclink driver:%s\n", driver_version); 3567 3568 info = mgsl_device_list; 3569 while( info ) { 3570 line_info(m, info); 3571 info = info->next_device; 3572 } 3573 return 0; 3574} 3575 3576static int mgsl_proc_open(struct inode *inode, struct file *file) 3577{ 3578 return single_open(file, mgsl_proc_show, NULL); 3579} 3580 3581static const struct file_operations mgsl_proc_fops = { 3582 .owner = THIS_MODULE, 3583 .open = mgsl_proc_open, 3584 .read = seq_read, 3585 .llseek = seq_lseek, 3586 .release = single_release, 3587}; 3588 3589/* mgsl_allocate_dma_buffers() 3590 * 3591 * Allocate and format DMA buffers (ISA adapter) 3592 * or format shared memory buffers (PCI adapter). 3593 * 3594 * Arguments: info pointer to device instance data 3595 * Return Value: 0 if success, otherwise error 3596 */ 3597static int mgsl_allocate_dma_buffers(struct mgsl_struct *info) 3598{ 3599 unsigned short BuffersPerFrame; 3600 3601 info->last_mem_alloc = 0; 3602 3603 /* Calculate the number of DMA buffers necessary to hold the */ 3604 /* largest allowable frame size. Note: If the max frame size is */ 3605 /* not an even multiple of the DMA buffer size then we need to */ 3606 /* round the buffer count per frame up one. */ 3607 3608 BuffersPerFrame = (unsigned short)(info->max_frame_size/DMABUFFERSIZE); 3609 if ( info->max_frame_size % DMABUFFERSIZE ) 3610 BuffersPerFrame++; 3611 3612 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 3613 /* 3614 * The PCI adapter has 256KBytes of shared memory to use. 3615 * This is 64 PAGE_SIZE buffers. 3616 * 3617 * The first page is used for padding at this time so the 3618 * buffer list does not begin at offset 0 of the PCI 3619 * adapter's shared memory. 3620 * 3621 * The 2nd page is used for the buffer list. A 4K buffer 3622 * list can hold 128 DMA_BUFFER structures at 32 bytes 3623 * each. 3624 * 3625 * This leaves 62 4K pages. 3626 * 3627 * The next N pages are used for transmit frame(s). We 3628 * reserve enough 4K page blocks to hold the required 3629 * number of transmit dma buffers (num_tx_dma_buffers), 3630 * each of MaxFrameSize size. 3631 * 3632 * Of the remaining pages (62-N), determine how many can 3633 * be used to receive full MaxFrameSize inbound frames 3634 */ 3635 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame; 3636 info->rx_buffer_count = 62 - info->tx_buffer_count; 3637 } else { 3638 /* Calculate the number of PAGE_SIZE buffers needed for */ 3639 /* receive and transmit DMA buffers. */ 3640 3641 3642 /* Calculate the number of DMA buffers necessary to */ 3643 /* hold 7 max size receive frames and one max size transmit frame. */ 3644 /* The receive buffer count is bumped by one so we avoid an */ 3645 /* End of List condition if all receive buffers are used when */ 3646 /* using linked list DMA buffers. */ 3647 3648 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame; 3649 info->rx_buffer_count = (BuffersPerFrame * MAXRXFRAMES) + 6; 3650 3651 /* 3652 * limit total TxBuffers & RxBuffers to 62 4K total 3653 * (ala PCI Allocation) 3654 */ 3655 3656 if ( (info->tx_buffer_count + info->rx_buffer_count) > 62 ) 3657 info->rx_buffer_count = 62 - info->tx_buffer_count; 3658 3659 } 3660 3661 if ( debug_level >= DEBUG_LEVEL_INFO ) 3662 printk("%s(%d):Allocating %d TX and %d RX DMA buffers.\n", 3663 __FILE__,__LINE__, info->tx_buffer_count,info->rx_buffer_count); 3664 3665 if ( mgsl_alloc_buffer_list_memory( info ) < 0 || 3666 mgsl_alloc_frame_memory(info, info->rx_buffer_list, info->rx_buffer_count) < 0 || 3667 mgsl_alloc_frame_memory(info, info->tx_buffer_list, info->tx_buffer_count) < 0 || 3668 mgsl_alloc_intermediate_rxbuffer_memory(info) < 0 || 3669 mgsl_alloc_intermediate_txbuffer_memory(info) < 0 ) { 3670 printk("%s(%d):Can't allocate DMA buffer memory\n",__FILE__,__LINE__); 3671 return -ENOMEM; 3672 } 3673 3674 mgsl_reset_rx_dma_buffers( info ); 3675 mgsl_reset_tx_dma_buffers( info ); 3676 3677 return 0; 3678 3679} /* end of mgsl_allocate_dma_buffers() */ 3680 3681/* 3682 * mgsl_alloc_buffer_list_memory() 3683 * 3684 * Allocate a common DMA buffer for use as the 3685 * receive and transmit buffer lists. 3686 * 3687 * A buffer list is a set of buffer entries where each entry contains 3688 * a pointer to an actual buffer and a pointer to the next buffer entry 3689 * (plus some other info about the buffer). 3690 * 3691 * The buffer entries for a list are built to form a circular list so 3692 * that when the entire list has been traversed you start back at the 3693 * beginning. 3694 * 3695 * This function allocates memory for just the buffer entries. 3696 * The links (pointer to next entry) are filled in with the physical 3697 * address of the next entry so the adapter can navigate the list 3698 * using bus master DMA. The pointers to the actual buffers are filled 3699 * out later when the actual buffers are allocated. 3700 * 3701 * Arguments: info pointer to device instance data 3702 * Return Value: 0 if success, otherwise error 3703 */ 3704static int mgsl_alloc_buffer_list_memory( struct mgsl_struct *info ) 3705{ 3706 unsigned int i; 3707 3708 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 3709 /* PCI adapter uses shared memory. */ 3710 info->buffer_list = info->memory_base + info->last_mem_alloc; 3711 info->buffer_list_phys = info->last_mem_alloc; 3712 info->last_mem_alloc += BUFFERLISTSIZE; 3713 } else { 3714 /* ISA adapter uses system memory. */ 3715 /* The buffer lists are allocated as a common buffer that both */ 3716 /* the processor and adapter can access. This allows the driver to */ 3717 /* inspect portions of the buffer while other portions are being */ 3718 /* updated by the adapter using Bus Master DMA. */ 3719 3720 info->buffer_list = dma_alloc_coherent(NULL, BUFFERLISTSIZE, &info->buffer_list_dma_addr, GFP_KERNEL); 3721 if (info->buffer_list == NULL) 3722 return -ENOMEM; 3723 info->buffer_list_phys = (u32)(info->buffer_list_dma_addr); 3724 } 3725 3726 /* We got the memory for the buffer entry lists. */ 3727 /* Initialize the memory block to all zeros. */ 3728 memset( info->buffer_list, 0, BUFFERLISTSIZE ); 3729 3730 /* Save virtual address pointers to the receive and */ 3731 /* transmit buffer lists. (Receive 1st). These pointers will */ 3732 /* be used by the processor to access the lists. */ 3733 info->rx_buffer_list = (DMABUFFERENTRY *)info->buffer_list; 3734 info->tx_buffer_list = (DMABUFFERENTRY *)info->buffer_list; 3735 info->tx_buffer_list += info->rx_buffer_count; 3736 3737 /* 3738 * Build the links for the buffer entry lists such that 3739 * two circular lists are built. (Transmit and Receive). 3740 * 3741 * Note: the links are physical addresses 3742 * which are read by the adapter to determine the next 3743 * buffer entry to use. 3744 */ 3745 3746 for ( i = 0; i < info->rx_buffer_count; i++ ) { 3747 /* calculate and store physical address of this buffer entry */ 3748 info->rx_buffer_list[i].phys_entry = 3749 info->buffer_list_phys + (i * sizeof(DMABUFFERENTRY)); 3750 3751 /* calculate and store physical address of */ 3752 /* next entry in cirular list of entries */ 3753 3754 info->rx_buffer_list[i].link = info->buffer_list_phys; 3755 3756 if ( i < info->rx_buffer_count - 1 ) 3757 info->rx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY); 3758 } 3759 3760 for ( i = 0; i < info->tx_buffer_count; i++ ) { 3761 /* calculate and store physical address of this buffer entry */ 3762 info->tx_buffer_list[i].phys_entry = info->buffer_list_phys + 3763 ((info->rx_buffer_count + i) * sizeof(DMABUFFERENTRY)); 3764 3765 /* calculate and store physical address of */ 3766 /* next entry in cirular list of entries */ 3767 3768 info->tx_buffer_list[i].link = info->buffer_list_phys + 3769 info->rx_buffer_count * sizeof(DMABUFFERENTRY); 3770 3771 if ( i < info->tx_buffer_count - 1 ) 3772 info->tx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY); 3773 } 3774 3775 return 0; 3776 3777} /* end of mgsl_alloc_buffer_list_memory() */ 3778 3779/* Free DMA buffers allocated for use as the 3780 * receive and transmit buffer lists. 3781 * Warning: 3782 * 3783 * The data transfer buffers associated with the buffer list 3784 * MUST be freed before freeing the buffer list itself because 3785 * the buffer list contains the information necessary to free 3786 * the individual buffers! 3787 */ 3788static void mgsl_free_buffer_list_memory( struct mgsl_struct *info ) 3789{ 3790 if (info->buffer_list && info->bus_type != MGSL_BUS_TYPE_PCI) 3791 dma_free_coherent(NULL, BUFFERLISTSIZE, info->buffer_list, info->buffer_list_dma_addr); 3792 3793 info->buffer_list = NULL; 3794 info->rx_buffer_list = NULL; 3795 info->tx_buffer_list = NULL; 3796 3797} /* end of mgsl_free_buffer_list_memory() */ 3798 3799/* 3800 * mgsl_alloc_frame_memory() 3801 * 3802 * Allocate the frame DMA buffers used by the specified buffer list. 3803 * Each DMA buffer will be one memory page in size. This is necessary 3804 * because memory can fragment enough that it may be impossible 3805 * contiguous pages. 3806 * 3807 * Arguments: 3808 * 3809 * info pointer to device instance data 3810 * BufferList pointer to list of buffer entries 3811 * Buffercount count of buffer entries in buffer list 3812 * 3813 * Return Value: 0 if success, otherwise -ENOMEM 3814 */ 3815static int mgsl_alloc_frame_memory(struct mgsl_struct *info,DMABUFFERENTRY *BufferList,int Buffercount) 3816{ 3817 int i; 3818 u32 phys_addr; 3819 3820 /* Allocate page sized buffers for the receive buffer list */ 3821 3822 for ( i = 0; i < Buffercount; i++ ) { 3823 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 3824 /* PCI adapter uses shared memory buffers. */ 3825 BufferList[i].virt_addr = info->memory_base + info->last_mem_alloc; 3826 phys_addr = info->last_mem_alloc; 3827 info->last_mem_alloc += DMABUFFERSIZE; 3828 } else { 3829 /* ISA adapter uses system memory. */ 3830 BufferList[i].virt_addr = dma_alloc_coherent(NULL, DMABUFFERSIZE, &BufferList[i].dma_addr, GFP_KERNEL); 3831 if (BufferList[i].virt_addr == NULL) 3832 return -ENOMEM; 3833 phys_addr = (u32)(BufferList[i].dma_addr); 3834 } 3835 BufferList[i].phys_addr = phys_addr; 3836 } 3837 3838 return 0; 3839 3840} /* end of mgsl_alloc_frame_memory() */ 3841 3842/* 3843 * mgsl_free_frame_memory() 3844 * 3845 * Free the buffers associated with 3846 * each buffer entry of a buffer list. 3847 * 3848 * Arguments: 3849 * 3850 * info pointer to device instance data 3851 * BufferList pointer to list of buffer entries 3852 * Buffercount count of buffer entries in buffer list 3853 * 3854 * Return Value: None 3855 */ 3856static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList, int Buffercount) 3857{ 3858 int i; 3859 3860 if ( BufferList ) { 3861 for ( i = 0 ; i < Buffercount ; i++ ) { 3862 if ( BufferList[i].virt_addr ) { 3863 if ( info->bus_type != MGSL_BUS_TYPE_PCI ) 3864 dma_free_coherent(NULL, DMABUFFERSIZE, BufferList[i].virt_addr, BufferList[i].dma_addr); 3865 BufferList[i].virt_addr = NULL; 3866 } 3867 } 3868 } 3869 3870} /* end of mgsl_free_frame_memory() */ 3871 3872/* mgsl_free_dma_buffers() 3873 * 3874 * Free DMA buffers 3875 * 3876 * Arguments: info pointer to device instance data 3877 * Return Value: None 3878 */ 3879static void mgsl_free_dma_buffers( struct mgsl_struct *info ) 3880{ 3881 mgsl_free_frame_memory( info, info->rx_buffer_list, info->rx_buffer_count ); 3882 mgsl_free_frame_memory( info, info->tx_buffer_list, info->tx_buffer_count ); 3883 mgsl_free_buffer_list_memory( info ); 3884 3885} /* end of mgsl_free_dma_buffers() */ 3886 3887 3888/* 3889 * mgsl_alloc_intermediate_rxbuffer_memory() 3890 * 3891 * Allocate a buffer large enough to hold max_frame_size. This buffer 3892 * is used to pass an assembled frame to the line discipline. 3893 * 3894 * Arguments: 3895 * 3896 * info pointer to device instance data 3897 * 3898 * Return Value: 0 if success, otherwise -ENOMEM 3899 */ 3900static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info) 3901{ 3902 info->intermediate_rxbuffer = kmalloc(info->max_frame_size, GFP_KERNEL | GFP_DMA); 3903 if ( info->intermediate_rxbuffer == NULL ) 3904 return -ENOMEM; 3905 3906 return 0; 3907 3908} /* end of mgsl_alloc_intermediate_rxbuffer_memory() */ 3909 3910/* 3911 * mgsl_free_intermediate_rxbuffer_memory() 3912 * 3913 * 3914 * Arguments: 3915 * 3916 * info pointer to device instance data 3917 * 3918 * Return Value: None 3919 */ 3920static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info) 3921{ 3922 kfree(info->intermediate_rxbuffer); 3923 info->intermediate_rxbuffer = NULL; 3924 3925} /* end of mgsl_free_intermediate_rxbuffer_memory() */ 3926 3927/* 3928 * mgsl_alloc_intermediate_txbuffer_memory() 3929 * 3930 * Allocate intermdiate transmit buffer(s) large enough to hold max_frame_size. 3931 * This buffer is used to load transmit frames into the adapter's dma transfer 3932 * buffers when there is sufficient space. 3933 * 3934 * Arguments: 3935 * 3936 * info pointer to device instance data 3937 * 3938 * Return Value: 0 if success, otherwise -ENOMEM 3939 */ 3940static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info) 3941{ 3942 int i; 3943 3944 if ( debug_level >= DEBUG_LEVEL_INFO ) 3945 printk("%s %s(%d) allocating %d tx holding buffers\n", 3946 info->device_name, __FILE__,__LINE__,info->num_tx_holding_buffers); 3947 3948 memset(info->tx_holding_buffers,0,sizeof(info->tx_holding_buffers)); 3949 3950 for ( i=0; i<info->num_tx_holding_buffers; ++i) { 3951 info->tx_holding_buffers[i].buffer = 3952 kmalloc(info->max_frame_size, GFP_KERNEL); 3953 if (info->tx_holding_buffers[i].buffer == NULL) { 3954 for (--i; i >= 0; i--) { 3955 kfree(info->tx_holding_buffers[i].buffer); 3956 info->tx_holding_buffers[i].buffer = NULL; 3957 } 3958 return -ENOMEM; 3959 } 3960 } 3961 3962 return 0; 3963 3964} /* end of mgsl_alloc_intermediate_txbuffer_memory() */ 3965 3966/* 3967 * mgsl_free_intermediate_txbuffer_memory() 3968 * 3969 * 3970 * Arguments: 3971 * 3972 * info pointer to device instance data 3973 * 3974 * Return Value: None 3975 */ 3976static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info) 3977{ 3978 int i; 3979 3980 for ( i=0; i<info->num_tx_holding_buffers; ++i ) { 3981 kfree(info->tx_holding_buffers[i].buffer); 3982 info->tx_holding_buffers[i].buffer = NULL; 3983 } 3984 3985 info->get_tx_holding_index = 0; 3986 info->put_tx_holding_index = 0; 3987 info->tx_holding_count = 0; 3988 3989} /* end of mgsl_free_intermediate_txbuffer_memory() */ 3990 3991 3992/* 3993 * load_next_tx_holding_buffer() 3994 * 3995 * attempts to load the next buffered tx request into the 3996 * tx dma buffers 3997 * 3998 * Arguments: 3999 * 4000 * info pointer to device instance data 4001 * 4002 * Return Value: true if next buffered tx request loaded 4003 * into adapter's tx dma buffer, 4004 * false otherwise 4005 */ 4006static bool load_next_tx_holding_buffer(struct mgsl_struct *info) 4007{ 4008 bool ret = false; 4009 4010 if ( info->tx_holding_count ) { 4011 /* determine if we have enough tx dma buffers 4012 * to accommodate the next tx frame 4013 */ 4014 struct tx_holding_buffer *ptx = 4015 &info->tx_holding_buffers[info->get_tx_holding_index]; 4016 int num_free = num_free_tx_dma_buffers(info); 4017 int num_needed = ptx->buffer_size / DMABUFFERSIZE; 4018 if ( ptx->buffer_size % DMABUFFERSIZE ) 4019 ++num_needed; 4020 4021 if (num_needed <= num_free) { 4022 info->xmit_cnt = ptx->buffer_size; 4023 mgsl_load_tx_dma_buffer(info,ptx->buffer,ptx->buffer_size); 4024 4025 --info->tx_holding_count; 4026 if ( ++info->get_tx_holding_index >= info->num_tx_holding_buffers) 4027 info->get_tx_holding_index=0; 4028 4029 /* restart transmit timer */ 4030 mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000)); 4031 4032 ret = true; 4033 } 4034 } 4035 4036 return ret; 4037} 4038 4039/* 4040 * save_tx_buffer_request() 4041 * 4042 * attempt to store transmit frame request for later transmission 4043 * 4044 * Arguments: 4045 * 4046 * info pointer to device instance data 4047 * Buffer pointer to buffer containing frame to load 4048 * BufferSize size in bytes of frame in Buffer 4049 * 4050 * Return Value: 1 if able to store, 0 otherwise 4051 */ 4052static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize) 4053{ 4054 struct tx_holding_buffer *ptx; 4055 4056 if ( info->tx_holding_count >= info->num_tx_holding_buffers ) { 4057 return 0; /* all buffers in use */ 4058 } 4059 4060 ptx = &info->tx_holding_buffers[info->put_tx_holding_index]; 4061 ptx->buffer_size = BufferSize; 4062 memcpy( ptx->buffer, Buffer, BufferSize); 4063 4064 ++info->tx_holding_count; 4065 if ( ++info->put_tx_holding_index >= info->num_tx_holding_buffers) 4066 info->put_tx_holding_index=0; 4067 4068 return 1; 4069} 4070 4071static int mgsl_claim_resources(struct mgsl_struct *info) 4072{ 4073 if (request_region(info->io_base,info->io_addr_size,"synclink") == NULL) { 4074 printk( "%s(%d):I/O address conflict on device %s Addr=%08X\n", 4075 __FILE__,__LINE__,info->device_name, info->io_base); 4076 return -ENODEV; 4077 } 4078 info->io_addr_requested = true; 4079 4080 if ( request_irq(info->irq_level,mgsl_interrupt,info->irq_flags, 4081 info->device_name, info ) < 0 ) { 4082 printk( "%s(%d):Cant request interrupt on device %s IRQ=%d\n", 4083 __FILE__,__LINE__,info->device_name, info->irq_level ); 4084 goto errout; 4085 } 4086 info->irq_requested = true; 4087 4088 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 4089 if (request_mem_region(info->phys_memory_base,0x40000,"synclink") == NULL) { 4090 printk( "%s(%d):mem addr conflict device %s Addr=%08X\n", 4091 __FILE__,__LINE__,info->device_name, info->phys_memory_base); 4092 goto errout; 4093 } 4094 info->shared_mem_requested = true; 4095 if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclink") == NULL) { 4096 printk( "%s(%d):lcr mem addr conflict device %s Addr=%08X\n", 4097 __FILE__,__LINE__,info->device_name, info->phys_lcr_base + info->lcr_offset); 4098 goto errout; 4099 } 4100 info->lcr_mem_requested = true; 4101 4102 info->memory_base = ioremap_nocache(info->phys_memory_base, 4103 0x40000); 4104 if (!info->memory_base) { 4105 printk( "%s(%d):Cant map shared memory on device %s MemAddr=%08X\n", 4106 __FILE__,__LINE__,info->device_name, info->phys_memory_base ); 4107 goto errout; 4108 } 4109 4110 if ( !mgsl_memory_test(info) ) { 4111 printk( "%s(%d):Failed shared memory test %s MemAddr=%08X\n", 4112 __FILE__,__LINE__,info->device_name, info->phys_memory_base ); 4113 goto errout; 4114 } 4115 4116 info->lcr_base = ioremap_nocache(info->phys_lcr_base, 4117 PAGE_SIZE); 4118 if (!info->lcr_base) { 4119 printk( "%s(%d):Cant map LCR memory on device %s MemAddr=%08X\n", 4120 __FILE__,__LINE__,info->device_name, info->phys_lcr_base ); 4121 goto errout; 4122 } 4123 info->lcr_base += info->lcr_offset; 4124 4125 } else { 4126 /* claim DMA channel */ 4127 4128 if (request_dma(info->dma_level,info->device_name) < 0){ 4129 printk( "%s(%d):Cant request DMA channel on device %s DMA=%d\n", 4130 __FILE__,__LINE__,info->device_name, info->dma_level ); 4131 mgsl_release_resources( info ); 4132 return -ENODEV; 4133 } 4134 info->dma_requested = true; 4135 4136 /* ISA adapter uses bus master DMA */ 4137 set_dma_mode(info->dma_level,DMA_MODE_CASCADE); 4138 enable_dma(info->dma_level); 4139 } 4140 4141 if ( mgsl_allocate_dma_buffers(info) < 0 ) { 4142 printk( "%s(%d):Cant allocate DMA buffers on device %s DMA=%d\n", 4143 __FILE__,__LINE__,info->device_name, info->dma_level ); 4144 goto errout; 4145 } 4146 4147 return 0; 4148errout: 4149 mgsl_release_resources(info); 4150 return -ENODEV; 4151 4152} /* end of mgsl_claim_resources() */ 4153 4154static void mgsl_release_resources(struct mgsl_struct *info) 4155{ 4156 if ( debug_level >= DEBUG_LEVEL_INFO ) 4157 printk( "%s(%d):mgsl_release_resources(%s) entry\n", 4158 __FILE__,__LINE__,info->device_name ); 4159 4160 if ( info->irq_requested ) { 4161 free_irq(info->irq_level, info); 4162 info->irq_requested = false; 4163 } 4164 if ( info->dma_requested ) { 4165 disable_dma(info->dma_level); 4166 free_dma(info->dma_level); 4167 info->dma_requested = false; 4168 } 4169 mgsl_free_dma_buffers(info); 4170 mgsl_free_intermediate_rxbuffer_memory(info); 4171 mgsl_free_intermediate_txbuffer_memory(info); 4172 4173 if ( info->io_addr_requested ) { 4174 release_region(info->io_base,info->io_addr_size); 4175 info->io_addr_requested = false; 4176 } 4177 if ( info->shared_mem_requested ) { 4178 release_mem_region(info->phys_memory_base,0x40000); 4179 info->shared_mem_requested = false; 4180 } 4181 if ( info->lcr_mem_requested ) { 4182 release_mem_region(info->phys_lcr_base + info->lcr_offset,128); 4183 info->lcr_mem_requested = false; 4184 } 4185 if (info->memory_base){ 4186 iounmap(info->memory_base); 4187 info->memory_base = NULL; 4188 } 4189 if (info->lcr_base){ 4190 iounmap(info->lcr_base - info->lcr_offset); 4191 info->lcr_base = NULL; 4192 } 4193 4194 if ( debug_level >= DEBUG_LEVEL_INFO ) 4195 printk( "%s(%d):mgsl_release_resources(%s) exit\n", 4196 __FILE__,__LINE__,info->device_name ); 4197 4198} /* end of mgsl_release_resources() */ 4199 4200/* mgsl_add_device() 4201 * 4202 * Add the specified device instance data structure to the 4203 * global linked list of devices and increment the device count. 4204 * 4205 * Arguments: info pointer to device instance data 4206 * Return Value: None 4207 */ 4208static void mgsl_add_device( struct mgsl_struct *info ) 4209{ 4210 info->next_device = NULL; 4211 info->line = mgsl_device_count; 4212 sprintf(info->device_name,"ttySL%d",info->line); 4213 4214 if (info->line < MAX_TOTAL_DEVICES) { 4215 if (maxframe[info->line]) 4216 info->max_frame_size = maxframe[info->line]; 4217 4218 if (txdmabufs[info->line]) { 4219 info->num_tx_dma_buffers = txdmabufs[info->line]; 4220 if (info->num_tx_dma_buffers < 1) 4221 info->num_tx_dma_buffers = 1; 4222 } 4223 4224 if (txholdbufs[info->line]) { 4225 info->num_tx_holding_buffers = txholdbufs[info->line]; 4226 if (info->num_tx_holding_buffers < 1) 4227 info->num_tx_holding_buffers = 1; 4228 else if (info->num_tx_holding_buffers > MAX_TX_HOLDING_BUFFERS) 4229 info->num_tx_holding_buffers = MAX_TX_HOLDING_BUFFERS; 4230 } 4231 } 4232 4233 mgsl_device_count++; 4234 4235 if ( !mgsl_device_list ) 4236 mgsl_device_list = info; 4237 else { 4238 struct mgsl_struct *current_dev = mgsl_device_list; 4239 while( current_dev->next_device ) 4240 current_dev = current_dev->next_device; 4241 current_dev->next_device = info; 4242 } 4243 4244 if ( info->max_frame_size < 4096 ) 4245 info->max_frame_size = 4096; 4246 else if ( info->max_frame_size > 65535 ) 4247 info->max_frame_size = 65535; 4248 4249 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 4250 printk( "SyncLink PCI v%d %s: IO=%04X IRQ=%d Mem=%08X,%08X MaxFrameSize=%u\n", 4251 info->hw_version + 1, info->device_name, info->io_base, info->irq_level, 4252 info->phys_memory_base, info->phys_lcr_base, 4253 info->max_frame_size ); 4254 } else { 4255 printk( "SyncLink ISA %s: IO=%04X IRQ=%d DMA=%d MaxFrameSize=%u\n", 4256 info->device_name, info->io_base, info->irq_level, info->dma_level, 4257 info->max_frame_size ); 4258 } 4259 4260#if SYNCLINK_GENERIC_HDLC 4261 hdlcdev_init(info); 4262#endif 4263 4264} /* end of mgsl_add_device() */ 4265 4266static const struct tty_port_operations mgsl_port_ops = { 4267 .carrier_raised = carrier_raised, 4268 .dtr_rts = dtr_rts, 4269}; 4270 4271 4272/* mgsl_allocate_device() 4273 * 4274 * Allocate and initialize a device instance structure 4275 * 4276 * Arguments: none 4277 * Return Value: pointer to mgsl_struct if success, otherwise NULL 4278 */ 4279static struct mgsl_struct* mgsl_allocate_device(void) 4280{ 4281 struct mgsl_struct *info; 4282 4283 info = kzalloc(sizeof(struct mgsl_struct), 4284 GFP_KERNEL); 4285 4286 if (!info) { 4287 printk("Error can't allocate device instance data\n"); 4288 } else { 4289 tty_port_init(&info->port); 4290 info->port.ops = &mgsl_port_ops; 4291 info->magic = MGSL_MAGIC; 4292 INIT_WORK(&info->task, mgsl_bh_handler); 4293 info->max_frame_size = 4096; 4294 info->port.close_delay = 5*HZ/10; 4295 info->port.closing_wait = 30*HZ; 4296 init_waitqueue_head(&info->status_event_wait_q); 4297 init_waitqueue_head(&info->event_wait_q); 4298 spin_lock_init(&info->irq_spinlock); 4299 spin_lock_init(&info->netlock); 4300 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS)); 4301 info->idle_mode = HDLC_TXIDLE_FLAGS; 4302 info->num_tx_dma_buffers = 1; 4303 info->num_tx_holding_buffers = 0; 4304 } 4305 4306 return info; 4307 4308} /* end of mgsl_allocate_device()*/ 4309 4310static const struct tty_operations mgsl_ops = { 4311 .open = mgsl_open, 4312 .close = mgsl_close, 4313 .write = mgsl_write, 4314 .put_char = mgsl_put_char, 4315 .flush_chars = mgsl_flush_chars, 4316 .write_room = mgsl_write_room, 4317 .chars_in_buffer = mgsl_chars_in_buffer, 4318 .flush_buffer = mgsl_flush_buffer, 4319 .ioctl = mgsl_ioctl, 4320 .throttle = mgsl_throttle, 4321 .unthrottle = mgsl_unthrottle, 4322 .send_xchar = mgsl_send_xchar, 4323 .break_ctl = mgsl_break, 4324 .wait_until_sent = mgsl_wait_until_sent, 4325 .set_termios = mgsl_set_termios, 4326 .stop = mgsl_stop, 4327 .start = mgsl_start, 4328 .hangup = mgsl_hangup, 4329 .tiocmget = tiocmget, 4330 .tiocmset = tiocmset, 4331 .proc_fops = &mgsl_proc_fops, 4332}; 4333 4334/* 4335 * perform tty device initialization 4336 */ 4337static int mgsl_init_tty(void) 4338{ 4339 int rc; 4340 4341 serial_driver = alloc_tty_driver(128); 4342 if (!serial_driver) 4343 return -ENOMEM; 4344 4345 serial_driver->owner = THIS_MODULE; 4346 serial_driver->driver_name = "synclink"; 4347 serial_driver->name = "ttySL"; 4348 serial_driver->major = ttymajor; 4349 serial_driver->minor_start = 64; 4350 serial_driver->type = TTY_DRIVER_TYPE_SERIAL; 4351 serial_driver->subtype = SERIAL_TYPE_NORMAL; 4352 serial_driver->init_termios = tty_std_termios; 4353 serial_driver->init_termios.c_cflag = 4354 B9600 | CS8 | CREAD | HUPCL | CLOCAL; 4355 serial_driver->init_termios.c_ispeed = 9600; 4356 serial_driver->init_termios.c_ospeed = 9600; 4357 serial_driver->flags = TTY_DRIVER_REAL_RAW; 4358 tty_set_operations(serial_driver, &mgsl_ops); 4359 if ((rc = tty_register_driver(serial_driver)) < 0) { 4360 printk("%s(%d):Couldn't register serial driver\n", 4361 __FILE__,__LINE__); 4362 put_tty_driver(serial_driver); 4363 serial_driver = NULL; 4364 return rc; 4365 } 4366 4367 printk("%s %s, tty major#%d\n", 4368 driver_name, driver_version, 4369 serial_driver->major); 4370 return 0; 4371} 4372 4373/* enumerate user specified ISA adapters 4374 */ 4375static void mgsl_enum_isa_devices(void) 4376{ 4377 struct mgsl_struct *info; 4378 int i; 4379 4380 /* Check for user specified ISA devices */ 4381 4382 for (i=0 ;(i < MAX_ISA_DEVICES) && io[i] && irq[i]; i++){ 4383 if ( debug_level >= DEBUG_LEVEL_INFO ) 4384 printk("ISA device specified io=%04X,irq=%d,dma=%d\n", 4385 io[i], irq[i], dma[i] ); 4386 4387 info = mgsl_allocate_device(); 4388 if ( !info ) { 4389 /* error allocating device instance data */ 4390 if ( debug_level >= DEBUG_LEVEL_ERROR ) 4391 printk( "can't allocate device instance data.\n"); 4392 continue; 4393 } 4394 4395 /* Copy user configuration info to device instance data */ 4396 info->io_base = (unsigned int)io[i]; 4397 info->irq_level = (unsigned int)irq[i]; 4398 info->irq_level = irq_canonicalize(info->irq_level); 4399 info->dma_level = (unsigned int)dma[i]; 4400 info->bus_type = MGSL_BUS_TYPE_ISA; 4401 info->io_addr_size = 16; 4402 info->irq_flags = 0; 4403 4404 mgsl_add_device( info ); 4405 } 4406} 4407 4408static void synclink_cleanup(void) 4409{ 4410 int rc; 4411 struct mgsl_struct *info; 4412 struct mgsl_struct *tmp; 4413 4414 printk("Unloading %s: %s\n", driver_name, driver_version); 4415 4416 if (serial_driver) { 4417 if ((rc = tty_unregister_driver(serial_driver))) 4418 printk("%s(%d) failed to unregister tty driver err=%d\n", 4419 __FILE__,__LINE__,rc); 4420 put_tty_driver(serial_driver); 4421 } 4422 4423 info = mgsl_device_list; 4424 while(info) { 4425#if SYNCLINK_GENERIC_HDLC 4426 hdlcdev_exit(info); 4427#endif 4428 mgsl_release_resources(info); 4429 tmp = info; 4430 info = info->next_device; 4431 kfree(tmp); 4432 } 4433 4434 if (pci_registered) 4435 pci_unregister_driver(&synclink_pci_driver); 4436} 4437 4438static int __init synclink_init(void) 4439{ 4440 int rc; 4441 4442 if (break_on_load) { 4443 mgsl_get_text_ptr(); 4444 BREAKPOINT(); 4445 } 4446 4447 printk("%s %s\n", driver_name, driver_version); 4448 4449 mgsl_enum_isa_devices(); 4450 if ((rc = pci_register_driver(&synclink_pci_driver)) < 0) 4451 printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc); 4452 else 4453 pci_registered = true; 4454 4455 if ((rc = mgsl_init_tty()) < 0) 4456 goto error; 4457 4458 return 0; 4459 4460error: 4461 synclink_cleanup(); 4462 return rc; 4463} 4464 4465static void __exit synclink_exit(void) 4466{ 4467 synclink_cleanup(); 4468} 4469 4470module_init(synclink_init); 4471module_exit(synclink_exit); 4472 4473/* 4474 * usc_RTCmd() 4475 * 4476 * Issue a USC Receive/Transmit command to the 4477 * Channel Command/Address Register (CCAR). 4478 * 4479 * Notes: 4480 * 4481 * The command is encoded in the most significant 5 bits <15..11> 4482 * of the CCAR value. Bits <10..7> of the CCAR must be preserved 4483 * and Bits <6..0> must be written as zeros. 4484 * 4485 * Arguments: 4486 * 4487 * info pointer to device information structure 4488 * Cmd command mask (use symbolic macros) 4489 * 4490 * Return Value: 4491 * 4492 * None 4493 */ 4494static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd ) 4495{ 4496 /* output command to CCAR in bits <15..11> */ 4497 /* preserve bits <10..7>, bits <6..0> must be zero */ 4498 4499 outw( Cmd + info->loopback_bits, info->io_base + CCAR ); 4500 4501 /* Read to flush write to CCAR */ 4502 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 4503 inw( info->io_base + CCAR ); 4504 4505} /* end of usc_RTCmd() */ 4506 4507/* 4508 * usc_DmaCmd() 4509 * 4510 * Issue a DMA command to the DMA Command/Address Register (DCAR). 4511 * 4512 * Arguments: 4513 * 4514 * info pointer to device information structure 4515 * Cmd DMA command mask (usc_DmaCmd_XX Macros) 4516 * 4517 * Return Value: 4518 * 4519 * None 4520 */ 4521static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd ) 4522{ 4523 /* write command mask to DCAR */ 4524 outw( Cmd + info->mbre_bit, info->io_base ); 4525 4526 /* Read to flush write to DCAR */ 4527 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 4528 inw( info->io_base ); 4529 4530} /* end of usc_DmaCmd() */ 4531 4532/* 4533 * usc_OutDmaReg() 4534 * 4535 * Write a 16-bit value to a USC DMA register 4536 * 4537 * Arguments: 4538 * 4539 * info pointer to device info structure 4540 * RegAddr register address (number) for write 4541 * RegValue 16-bit value to write to register 4542 * 4543 * Return Value: 4544 * 4545 * None 4546 * 4547 */ 4548static void usc_OutDmaReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue ) 4549{ 4550 /* Note: The DCAR is located at the adapter base address */ 4551 /* Note: must preserve state of BIT8 in DCAR */ 4552 4553 outw( RegAddr + info->mbre_bit, info->io_base ); 4554 outw( RegValue, info->io_base ); 4555 4556 /* Read to flush write to DCAR */ 4557 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 4558 inw( info->io_base ); 4559 4560} /* end of usc_OutDmaReg() */ 4561 4562/* 4563 * usc_InDmaReg() 4564 * 4565 * Read a 16-bit value from a DMA register 4566 * 4567 * Arguments: 4568 * 4569 * info pointer to device info structure 4570 * RegAddr register address (number) to read from 4571 * 4572 * Return Value: 4573 * 4574 * The 16-bit value read from register 4575 * 4576 */ 4577static u16 usc_InDmaReg( struct mgsl_struct *info, u16 RegAddr ) 4578{ 4579 /* Note: The DCAR is located at the adapter base address */ 4580 /* Note: must preserve state of BIT8 in DCAR */ 4581 4582 outw( RegAddr + info->mbre_bit, info->io_base ); 4583 return inw( info->io_base ); 4584 4585} /* end of usc_InDmaReg() */ 4586 4587/* 4588 * 4589 * usc_OutReg() 4590 * 4591 * Write a 16-bit value to a USC serial channel register 4592 * 4593 * Arguments: 4594 * 4595 * info pointer to device info structure 4596 * RegAddr register address (number) to write to 4597 * RegValue 16-bit value to write to register 4598 * 4599 * Return Value: 4600 * 4601 * None 4602 * 4603 */ 4604static void usc_OutReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue ) 4605{ 4606 outw( RegAddr + info->loopback_bits, info->io_base + CCAR ); 4607 outw( RegValue, info->io_base + CCAR ); 4608 4609 /* Read to flush write to CCAR */ 4610 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 4611 inw( info->io_base + CCAR ); 4612 4613} /* end of usc_OutReg() */ 4614 4615/* 4616 * usc_InReg() 4617 * 4618 * Reads a 16-bit value from a USC serial channel register 4619 * 4620 * Arguments: 4621 * 4622 * info pointer to device extension 4623 * RegAddr register address (number) to read from 4624 * 4625 * Return Value: 4626 * 4627 * 16-bit value read from register 4628 */ 4629static u16 usc_InReg( struct mgsl_struct *info, u16 RegAddr ) 4630{ 4631 outw( RegAddr + info->loopback_bits, info->io_base + CCAR ); 4632 return inw( info->io_base + CCAR ); 4633 4634} /* end of usc_InReg() */ 4635 4636/* usc_set_sdlc_mode() 4637 * 4638 * Set up the adapter for SDLC DMA communications. 4639 * 4640 * Arguments: info pointer to device instance data 4641 * Return Value: NONE 4642 */ 4643static void usc_set_sdlc_mode( struct mgsl_struct *info ) 4644{ 4645 u16 RegValue; 4646 bool PreSL1660; 4647 4648 /* 4649 * determine if the IUSC on the adapter is pre-SL1660. If 4650 * not, take advantage of the UnderWait feature of more 4651 * modern chips. If an underrun occurs and this bit is set, 4652 * the transmitter will idle the programmed idle pattern 4653 * until the driver has time to service the underrun. Otherwise, 4654 * the dma controller may get the cycles previously requested 4655 * and begin transmitting queued tx data. 4656 */ 4657 usc_OutReg(info,TMCR,0x1f); 4658 RegValue=usc_InReg(info,TMDR); 4659 PreSL1660 = (RegValue == IUSC_PRE_SL1660); 4660 4661 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE ) 4662 { 4663 /* 4664 ** Channel Mode Register (CMR) 4665 ** 4666 ** <15..14> 10 Tx Sub Modes, Send Flag on Underrun 4667 ** <13> 0 0 = Transmit Disabled (initially) 4668 ** <12> 0 1 = Consecutive Idles share common 0 4669 ** <11..8> 1110 Transmitter Mode = HDLC/SDLC Loop 4670 ** <7..4> 0000 Rx Sub Modes, addr/ctrl field handling 4671 ** <3..0> 0110 Receiver Mode = HDLC/SDLC 4672 ** 4673 ** 1000 1110 0000 0110 = 0x8e06 4674 */ 4675 RegValue = 0x8e06; 4676 4677 /*-------------------------------------------------- 4678 * ignore user options for UnderRun Actions and 4679 * preambles 4680 *--------------------------------------------------*/ 4681 } 4682 else 4683 { 4684 /* Channel mode Register (CMR) 4685 * 4686 * <15..14> 00 Tx Sub modes, Underrun Action 4687 * <13> 0 1 = Send Preamble before opening flag 4688 * <12> 0 1 = Consecutive Idles share common 0 4689 * <11..8> 0110 Transmitter mode = HDLC/SDLC 4690 * <7..4> 0000 Rx Sub modes, addr/ctrl field handling 4691 * <3..0> 0110 Receiver mode = HDLC/SDLC 4692 * 4693 * 0000 0110 0000 0110 = 0x0606 4694 */ 4695 if (info->params.mode == MGSL_MODE_RAW) { 4696 RegValue = 0x0001; /* Set Receive mode = external sync */ 4697 4698 usc_OutReg( info, IOCR, /* Set IOCR DCD is RxSync Detect Input */ 4699 (unsigned short)((usc_InReg(info, IOCR) & ~(BIT13|BIT12)) | BIT12)); 4700 4701 /* 4702 * TxSubMode: 4703 * CMR <15> 0 Don't send CRC on Tx Underrun 4704 * CMR <14> x undefined 4705 * CMR <13> 0 Send preamble before openning sync 4706 * CMR <12> 0 Send 8-bit syncs, 1=send Syncs per TxLength 4707 * 4708 * TxMode: 4709 * CMR <11-8) 0100 MonoSync 4710 * 4711 * 0x00 0100 xxxx xxxx 04xx 4712 */ 4713 RegValue |= 0x0400; 4714 } 4715 else { 4716 4717 RegValue = 0x0606; 4718 4719 if ( info->params.flags & HDLC_FLAG_UNDERRUN_ABORT15 ) 4720 RegValue |= BIT14; 4721 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_FLAG ) 4722 RegValue |= BIT15; 4723 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_CRC ) 4724 RegValue |= BIT15 + BIT14; 4725 } 4726 4727 if ( info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE ) 4728 RegValue |= BIT13; 4729 } 4730 4731 if ( info->params.mode == MGSL_MODE_HDLC && 4732 (info->params.flags & HDLC_FLAG_SHARE_ZERO) ) 4733 RegValue |= BIT12; 4734 4735 if ( info->params.addr_filter != 0xff ) 4736 { 4737 /* set up receive address filtering */ 4738 usc_OutReg( info, RSR, info->params.addr_filter ); 4739 RegValue |= BIT4; 4740 } 4741 4742 usc_OutReg( info, CMR, RegValue ); 4743 info->cmr_value = RegValue; 4744 4745 /* Receiver mode Register (RMR) 4746 * 4747 * <15..13> 000 encoding 4748 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1) 4749 * <10> 1 1 = Set CRC to all 1s (use for SDLC/HDLC) 4750 * <9> 0 1 = Include Receive chars in CRC 4751 * <8> 1 1 = Use Abort/PE bit as abort indicator 4752 * <7..6> 00 Even parity 4753 * <5> 0 parity disabled 4754 * <4..2> 000 Receive Char Length = 8 bits 4755 * <1..0> 00 Disable Receiver 4756 * 4757 * 0000 0101 0000 0000 = 0x0500 4758 */ 4759 4760 RegValue = 0x0500; 4761 4762 switch ( info->params.encoding ) { 4763 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break; 4764 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break; 4765 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break; 4766 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break; 4767 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break; 4768 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break; 4769 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break; 4770 } 4771 4772 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT ) 4773 RegValue |= BIT9; 4774 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT ) 4775 RegValue |= ( BIT12 | BIT10 | BIT9 ); 4776 4777 usc_OutReg( info, RMR, RegValue ); 4778 4779 /* Set the Receive count Limit Register (RCLR) to 0xffff. */ 4780 /* When an opening flag of an SDLC frame is recognized the */ 4781 /* Receive Character count (RCC) is loaded with the value in */ 4782 /* RCLR. The RCC is decremented for each received byte. The */ 4783 /* value of RCC is stored after the closing flag of the frame */ 4784 /* allowing the frame size to be computed. */ 4785 4786 usc_OutReg( info, RCLR, RCLRVALUE ); 4787 4788 usc_RCmd( info, RCmd_SelectRicrdma_level ); 4789 4790 /* Receive Interrupt Control Register (RICR) 4791 * 4792 * <15..8> ? RxFIFO DMA Request Level 4793 * <7> 0 Exited Hunt IA (Interrupt Arm) 4794 * <6> 0 Idle Received IA 4795 * <5> 0 Break/Abort IA 4796 * <4> 0 Rx Bound IA 4797 * <3> 1 Queued status reflects oldest 2 bytes in FIFO 4798 * <2> 0 Abort/PE IA 4799 * <1> 1 Rx Overrun IA 4800 * <0> 0 Select TC0 value for readback 4801 * 4802 * 0000 0000 0000 1000 = 0x000a 4803 */ 4804 4805 /* Carry over the Exit Hunt and Idle Received bits */ 4806 /* in case they have been armed by usc_ArmEvents. */ 4807 4808 RegValue = usc_InReg( info, RICR ) & 0xc0; 4809 4810 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 4811 usc_OutReg( info, RICR, (u16)(0x030a | RegValue) ); 4812 else 4813 usc_OutReg( info, RICR, (u16)(0x140a | RegValue) ); 4814 4815 /* Unlatch all Rx status bits and clear Rx status IRQ Pending */ 4816 4817 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); 4818 usc_ClearIrqPendingBits( info, RECEIVE_STATUS ); 4819 4820 /* Transmit mode Register (TMR) 4821 * 4822 * <15..13> 000 encoding 4823 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1) 4824 * <10> 1 1 = Start CRC as all 1s (use for SDLC/HDLC) 4825 * <9> 0 1 = Tx CRC Enabled 4826 * <8> 0 1 = Append CRC to end of transmit frame 4827 * <7..6> 00 Transmit parity Even 4828 * <5> 0 Transmit parity Disabled 4829 * <4..2> 000 Tx Char Length = 8 bits 4830 * <1..0> 00 Disable Transmitter 4831 * 4832 * 0000 0100 0000 0000 = 0x0400 4833 */ 4834 4835 RegValue = 0x0400; 4836 4837 switch ( info->params.encoding ) { 4838 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break; 4839 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break; 4840 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break; 4841 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break; 4842 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break; 4843 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break; 4844 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break; 4845 } 4846 4847 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT ) 4848 RegValue |= BIT9 + BIT8; 4849 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT ) 4850 RegValue |= ( BIT12 | BIT10 | BIT9 | BIT8); 4851 4852 usc_OutReg( info, TMR, RegValue ); 4853 4854 usc_set_txidle( info ); 4855 4856 4857 usc_TCmd( info, TCmd_SelectTicrdma_level ); 4858 4859 /* Transmit Interrupt Control Register (TICR) 4860 * 4861 * <15..8> ? Transmit FIFO DMA Level 4862 * <7> 0 Present IA (Interrupt Arm) 4863 * <6> 0 Idle Sent IA 4864 * <5> 1 Abort Sent IA 4865 * <4> 1 EOF/EOM Sent IA 4866 * <3> 0 CRC Sent IA 4867 * <2> 1 1 = Wait for SW Trigger to Start Frame 4868 * <1> 1 Tx Underrun IA 4869 * <0> 0 TC0 constant on read back 4870 * 4871 * 0000 0000 0011 0110 = 0x0036 4872 */ 4873 4874 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 4875 usc_OutReg( info, TICR, 0x0736 ); 4876 else 4877 usc_OutReg( info, TICR, 0x1436 ); 4878 4879 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL ); 4880 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS ); 4881 4882 /* 4883 ** Transmit Command/Status Register (TCSR) 4884 ** 4885 ** <15..12> 0000 TCmd 4886 ** <11> 0/1 UnderWait 4887 ** <10..08> 000 TxIdle 4888 ** <7> x PreSent 4889 ** <6> x IdleSent 4890 ** <5> x AbortSent 4891 ** <4> x EOF/EOM Sent 4892 ** <3> x CRC Sent 4893 ** <2> x All Sent 4894 ** <1> x TxUnder 4895 ** <0> x TxEmpty 4896 ** 4897 ** 0000 0000 0000 0000 = 0x0000 4898 */ 4899 info->tcsr_value = 0; 4900 4901 if ( !PreSL1660 ) 4902 info->tcsr_value |= TCSR_UNDERWAIT; 4903 4904 usc_OutReg( info, TCSR, info->tcsr_value ); 4905 4906 /* Clock mode Control Register (CMCR) 4907 * 4908 * <15..14> 00 counter 1 Source = Disabled 4909 * <13..12> 00 counter 0 Source = Disabled 4910 * <11..10> 11 BRG1 Input is TxC Pin 4911 * <9..8> 11 BRG0 Input is TxC Pin 4912 * <7..6> 01 DPLL Input is BRG1 Output 4913 * <5..3> XXX TxCLK comes from Port 0 4914 * <2..0> XXX RxCLK comes from Port 1 4915 * 4916 * 0000 1111 0111 0111 = 0x0f77 4917 */ 4918 4919 RegValue = 0x0f40; 4920 4921 if ( info->params.flags & HDLC_FLAG_RXC_DPLL ) 4922 RegValue |= 0x0003; /* RxCLK from DPLL */ 4923 else if ( info->params.flags & HDLC_FLAG_RXC_BRG ) 4924 RegValue |= 0x0004; /* RxCLK from BRG0 */ 4925 else if ( info->params.flags & HDLC_FLAG_RXC_TXCPIN) 4926 RegValue |= 0x0006; /* RxCLK from TXC Input */ 4927 else 4928 RegValue |= 0x0007; /* RxCLK from Port1 */ 4929 4930 if ( info->params.flags & HDLC_FLAG_TXC_DPLL ) 4931 RegValue |= 0x0018; /* TxCLK from DPLL */ 4932 else if ( info->params.flags & HDLC_FLAG_TXC_BRG ) 4933 RegValue |= 0x0020; /* TxCLK from BRG0 */ 4934 else if ( info->params.flags & HDLC_FLAG_TXC_RXCPIN) 4935 RegValue |= 0x0038; /* RxCLK from TXC Input */ 4936 else 4937 RegValue |= 0x0030; /* TxCLK from Port0 */ 4938 4939 usc_OutReg( info, CMCR, RegValue ); 4940 4941 4942 /* Hardware Configuration Register (HCR) 4943 * 4944 * <15..14> 00 CTR0 Divisor:00=32,01=16,10=8,11=4 4945 * <13> 0 CTR1DSel:0=CTR0Div determines CTR0Div 4946 * <12> 0 CVOK:0=report code violation in biphase 4947 * <11..10> 00 DPLL Divisor:00=32,01=16,10=8,11=4 4948 * <9..8> XX DPLL mode:00=disable,01=NRZ,10=Biphase,11=Biphase Level 4949 * <7..6> 00 reserved 4950 * <5> 0 BRG1 mode:0=continuous,1=single cycle 4951 * <4> X BRG1 Enable 4952 * <3..2> 00 reserved 4953 * <1> 0 BRG0 mode:0=continuous,1=single cycle 4954 * <0> 0 BRG0 Enable 4955 */ 4956 4957 RegValue = 0x0000; 4958 4959 if ( info->params.flags & (HDLC_FLAG_RXC_DPLL + HDLC_FLAG_TXC_DPLL) ) { 4960 u32 XtalSpeed; 4961 u32 DpllDivisor; 4962 u16 Tc; 4963 4964 /* DPLL is enabled. Use BRG1 to provide continuous reference clock */ 4965 /* for DPLL. DPLL mode in HCR is dependent on the encoding used. */ 4966 4967 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 4968 XtalSpeed = 11059200; 4969 else 4970 XtalSpeed = 14745600; 4971 4972 if ( info->params.flags & HDLC_FLAG_DPLL_DIV16 ) { 4973 DpllDivisor = 16; 4974 RegValue |= BIT10; 4975 } 4976 else if ( info->params.flags & HDLC_FLAG_DPLL_DIV8 ) { 4977 DpllDivisor = 8; 4978 RegValue |= BIT11; 4979 } 4980 else 4981 DpllDivisor = 32; 4982 4983 /* Tc = (Xtal/Speed) - 1 */ 4984 /* If twice the remainder of (Xtal/Speed) is greater than Speed */ 4985 /* then rounding up gives a more precise time constant. Instead */ 4986 /* of rounding up and then subtracting 1 we just don't subtract */ 4987 /* the one in this case. */ 4988 4989 /*-------------------------------------------------- 4990 * ejz: for DPLL mode, application should use the 4991 * same clock speed as the partner system, even 4992 * though clocking is derived from the input RxData. 4993 * In case the user uses a 0 for the clock speed, 4994 * default to 0xffffffff and don't try to divide by 4995 * zero 4996 *--------------------------------------------------*/ 4997 if ( info->params.clock_speed ) 4998 { 4999 Tc = (u16)((XtalSpeed/DpllDivisor)/info->params.clock_speed); 5000 if ( !((((XtalSpeed/DpllDivisor) % info->params.clock_speed) * 2) 5001 / info->params.clock_speed) ) 5002 Tc--; 5003 } 5004 else 5005 Tc = -1; 5006 5007 5008 /* Write 16-bit Time Constant for BRG1 */ 5009 usc_OutReg( info, TC1R, Tc ); 5010 5011 RegValue |= BIT4; /* enable BRG1 */ 5012 5013 switch ( info->params.encoding ) { 5014 case HDLC_ENCODING_NRZ: 5015 case HDLC_ENCODING_NRZB: 5016 case HDLC_ENCODING_NRZI_MARK: 5017 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT8; break; 5018 case HDLC_ENCODING_BIPHASE_MARK: 5019 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT9; break; 5020 case HDLC_ENCODING_BIPHASE_LEVEL: 5021 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT9 + BIT8; break; 5022 } 5023 } 5024 5025 usc_OutReg( info, HCR, RegValue ); 5026 5027 5028 /* Channel Control/status Register (CCSR) 5029 * 5030 * <15> X RCC FIFO Overflow status (RO) 5031 * <14> X RCC FIFO Not Empty status (RO) 5032 * <13> 0 1 = Clear RCC FIFO (WO) 5033 * <12> X DPLL Sync (RW) 5034 * <11> X DPLL 2 Missed Clocks status (RO) 5035 * <10> X DPLL 1 Missed Clock status (RO) 5036 * <9..8> 00 DPLL Resync on rising and falling edges (RW) 5037 * <7> X SDLC Loop On status (RO) 5038 * <6> X SDLC Loop Send status (RO) 5039 * <5> 1 Bypass counters for TxClk and RxClk (RW) 5040 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW) 5041 * <1..0> 00 reserved 5042 * 5043 * 0000 0000 0010 0000 = 0x0020 5044 */ 5045 5046 usc_OutReg( info, CCSR, 0x1020 ); 5047 5048 5049 if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) { 5050 usc_OutReg( info, SICR, 5051 (u16)(usc_InReg(info,SICR) | SICR_CTS_INACTIVE) ); 5052 } 5053 5054 5055 /* enable Master Interrupt Enable bit (MIE) */ 5056 usc_EnableMasterIrqBit( info ); 5057 5058 usc_ClearIrqPendingBits( info, RECEIVE_STATUS + RECEIVE_DATA + 5059 TRANSMIT_STATUS + TRANSMIT_DATA + MISC); 5060 5061 /* arm RCC underflow interrupt */ 5062 usc_OutReg(info, SICR, (u16)(usc_InReg(info,SICR) | BIT3)); 5063 usc_EnableInterrupts(info, MISC); 5064 5065 info->mbre_bit = 0; 5066 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */ 5067 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */ 5068 info->mbre_bit = BIT8; 5069 outw( BIT8, info->io_base ); /* set Master Bus Enable (DCAR) */ 5070 5071 if (info->bus_type == MGSL_BUS_TYPE_ISA) { 5072 /* Enable DMAEN (Port 7, Bit 14) */ 5073 /* This connects the DMA request signal to the ISA bus */ 5074 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) & ~BIT14)); 5075 } 5076 5077 /* DMA Control Register (DCR) 5078 * 5079 * <15..14> 10 Priority mode = Alternating Tx/Rx 5080 * 01 Rx has priority 5081 * 00 Tx has priority 5082 * 5083 * <13> 1 Enable Priority Preempt per DCR<15..14> 5084 * (WARNING DCR<11..10> must be 00 when this is 1) 5085 * 0 Choose activate channel per DCR<11..10> 5086 * 5087 * <12> 0 Little Endian for Array/List 5088 * <11..10> 00 Both Channels can use each bus grant 5089 * <9..6> 0000 reserved 5090 * <5> 0 7 CLK - Minimum Bus Re-request Interval 5091 * <4> 0 1 = drive D/C and S/D pins 5092 * <3> 1 1 = Add one wait state to all DMA cycles. 5093 * <2> 0 1 = Strobe /UAS on every transfer. 5094 * <1..0> 11 Addr incrementing only affects LS24 bits 5095 * 5096 * 0110 0000 0000 1011 = 0x600b 5097 */ 5098 5099 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 5100 /* PCI adapter does not need DMA wait state */ 5101 usc_OutDmaReg( info, DCR, 0xa00b ); 5102 } 5103 else 5104 usc_OutDmaReg( info, DCR, 0x800b ); 5105 5106 5107 /* Receive DMA mode Register (RDMR) 5108 * 5109 * <15..14> 11 DMA mode = Linked List Buffer mode 5110 * <13> 1 RSBinA/L = store Rx status Block in Arrary/List entry 5111 * <12> 1 Clear count of List Entry after fetching 5112 * <11..10> 00 Address mode = Increment 5113 * <9> 1 Terminate Buffer on RxBound 5114 * <8> 0 Bus Width = 16bits 5115 * <7..0> ? status Bits (write as 0s) 5116 * 5117 * 1111 0010 0000 0000 = 0xf200 5118 */ 5119 5120 usc_OutDmaReg( info, RDMR, 0xf200 ); 5121 5122 5123 /* Transmit DMA mode Register (TDMR) 5124 * 5125 * <15..14> 11 DMA mode = Linked List Buffer mode 5126 * <13> 1 TCBinA/L = fetch Tx Control Block from List entry 5127 * <12> 1 Clear count of List Entry after fetching 5128 * <11..10> 00 Address mode = Increment 5129 * <9> 1 Terminate Buffer on end of frame 5130 * <8> 0 Bus Width = 16bits 5131 * <7..0> ? status Bits (Read Only so write as 0) 5132 * 5133 * 1111 0010 0000 0000 = 0xf200 5134 */ 5135 5136 usc_OutDmaReg( info, TDMR, 0xf200 ); 5137 5138 5139 /* DMA Interrupt Control Register (DICR) 5140 * 5141 * <15> 1 DMA Interrupt Enable 5142 * <14> 0 1 = Disable IEO from USC 5143 * <13> 0 1 = Don't provide vector during IntAck 5144 * <12> 1 1 = Include status in Vector 5145 * <10..2> 0 reserved, Must be 0s 5146 * <1> 0 1 = Rx DMA Interrupt Enabled 5147 * <0> 0 1 = Tx DMA Interrupt Enabled 5148 * 5149 * 1001 0000 0000 0000 = 0x9000 5150 */ 5151 5152 usc_OutDmaReg( info, DICR, 0x9000 ); 5153 5154 usc_InDmaReg( info, RDMR ); /* clear pending receive DMA IRQ bits */ 5155 usc_InDmaReg( info, TDMR ); /* clear pending transmit DMA IRQ bits */ 5156 usc_OutDmaReg( info, CDIR, 0x0303 ); /* clear IUS and Pending for Tx and Rx */ 5157 5158 /* Channel Control Register (CCR) 5159 * 5160 * <15..14> 10 Use 32-bit Tx Control Blocks (TCBs) 5161 * <13> 0 Trigger Tx on SW Command Disabled 5162 * <12> 0 Flag Preamble Disabled 5163 * <11..10> 00 Preamble Length 5164 * <9..8> 00 Preamble Pattern 5165 * <7..6> 10 Use 32-bit Rx status Blocks (RSBs) 5166 * <5> 0 Trigger Rx on SW Command Disabled 5167 * <4..0> 0 reserved 5168 * 5169 * 1000 0000 1000 0000 = 0x8080 5170 */ 5171 5172 RegValue = 0x8080; 5173 5174 switch ( info->params.preamble_length ) { 5175 case HDLC_PREAMBLE_LENGTH_16BITS: RegValue |= BIT10; break; 5176 case HDLC_PREAMBLE_LENGTH_32BITS: RegValue |= BIT11; break; 5177 case HDLC_PREAMBLE_LENGTH_64BITS: RegValue |= BIT11 + BIT10; break; 5178 } 5179 5180 switch ( info->params.preamble ) { 5181 case HDLC_PREAMBLE_PATTERN_FLAGS: RegValue |= BIT8 + BIT12; break; 5182 case HDLC_PREAMBLE_PATTERN_ONES: RegValue |= BIT8; break; 5183 case HDLC_PREAMBLE_PATTERN_10: RegValue |= BIT9; break; 5184 case HDLC_PREAMBLE_PATTERN_01: RegValue |= BIT9 + BIT8; break; 5185 } 5186 5187 usc_OutReg( info, CCR, RegValue ); 5188 5189 5190 /* 5191 * Burst/Dwell Control Register 5192 * 5193 * <15..8> 0x20 Maximum number of transfers per bus grant 5194 * <7..0> 0x00 Maximum number of clock cycles per bus grant 5195 */ 5196 5197 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 5198 /* don't limit bus occupancy on PCI adapter */ 5199 usc_OutDmaReg( info, BDCR, 0x0000 ); 5200 } 5201 else 5202 usc_OutDmaReg( info, BDCR, 0x2000 ); 5203 5204 usc_stop_transmitter(info); 5205 usc_stop_receiver(info); 5206 5207} /* end of usc_set_sdlc_mode() */ 5208 5209/* usc_enable_loopback() 5210 * 5211 * Set the 16C32 for internal loopback mode. 5212 * The TxCLK and RxCLK signals are generated from the BRG0 and 5213 * the TxD is looped back to the RxD internally. 5214 * 5215 * Arguments: info pointer to device instance data 5216 * enable 1 = enable loopback, 0 = disable 5217 * Return Value: None 5218 */ 5219static void usc_enable_loopback(struct mgsl_struct *info, int enable) 5220{ 5221 if (enable) { 5222 /* blank external TXD output */ 5223 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) | (BIT7+BIT6)); 5224 5225 /* Clock mode Control Register (CMCR) 5226 * 5227 * <15..14> 00 counter 1 Disabled 5228 * <13..12> 00 counter 0 Disabled 5229 * <11..10> 11 BRG1 Input is TxC Pin 5230 * <9..8> 11 BRG0 Input is TxC Pin 5231 * <7..6> 01 DPLL Input is BRG1 Output 5232 * <5..3> 100 TxCLK comes from BRG0 5233 * <2..0> 100 RxCLK comes from BRG0 5234 * 5235 * 0000 1111 0110 0100 = 0x0f64 5236 */ 5237 5238 usc_OutReg( info, CMCR, 0x0f64 ); 5239 5240 /* Write 16-bit Time Constant for BRG0 */ 5241 /* use clock speed if available, otherwise use 8 for diagnostics */ 5242 if (info->params.clock_speed) { 5243 if (info->bus_type == MGSL_BUS_TYPE_PCI) 5244 usc_OutReg(info, TC0R, (u16)((11059200/info->params.clock_speed)-1)); 5245 else 5246 usc_OutReg(info, TC0R, (u16)((14745600/info->params.clock_speed)-1)); 5247 } else 5248 usc_OutReg(info, TC0R, (u16)8); 5249 5250 /* Hardware Configuration Register (HCR) Clear Bit 1, BRG0 5251 mode = Continuous Set Bit 0 to enable BRG0. */ 5252 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) ); 5253 5254 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */ 5255 usc_OutReg(info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004)); 5256 5257 /* set Internal Data loopback mode */ 5258 info->loopback_bits = 0x300; 5259 outw( 0x0300, info->io_base + CCAR ); 5260 } else { 5261 /* enable external TXD output */ 5262 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) & ~(BIT7+BIT6)); 5263 5264 /* clear Internal Data loopback mode */ 5265 info->loopback_bits = 0; 5266 outw( 0,info->io_base + CCAR ); 5267 } 5268 5269} /* end of usc_enable_loopback() */ 5270 5271/* usc_enable_aux_clock() 5272 * 5273 * Enabled the AUX clock output at the specified frequency. 5274 * 5275 * Arguments: 5276 * 5277 * info pointer to device extension 5278 * data_rate data rate of clock in bits per second 5279 * A data rate of 0 disables the AUX clock. 5280 * 5281 * Return Value: None 5282 */ 5283static void usc_enable_aux_clock( struct mgsl_struct *info, u32 data_rate ) 5284{ 5285 u32 XtalSpeed; 5286 u16 Tc; 5287 5288 if ( data_rate ) { 5289 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 5290 XtalSpeed = 11059200; 5291 else 5292 XtalSpeed = 14745600; 5293 5294 5295 /* Tc = (Xtal/Speed) - 1 */ 5296 /* If twice the remainder of (Xtal/Speed) is greater than Speed */ 5297 /* then rounding up gives a more precise time constant. Instead */ 5298 /* of rounding up and then subtracting 1 we just don't subtract */ 5299 /* the one in this case. */ 5300 5301 5302 Tc = (u16)(XtalSpeed/data_rate); 5303 if ( !(((XtalSpeed % data_rate) * 2) / data_rate) ) 5304 Tc--; 5305 5306 /* Write 16-bit Time Constant for BRG0 */ 5307 usc_OutReg( info, TC0R, Tc ); 5308 5309 /* 5310 * Hardware Configuration Register (HCR) 5311 * Clear Bit 1, BRG0 mode = Continuous 5312 * Set Bit 0 to enable BRG0. 5313 */ 5314 5315 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) ); 5316 5317 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */ 5318 usc_OutReg( info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) ); 5319 } else { 5320 /* data rate == 0 so turn off BRG0 */ 5321 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) ); 5322 } 5323 5324} /* end of usc_enable_aux_clock() */ 5325 5326/* 5327 * 5328 * usc_process_rxoverrun_sync() 5329 * 5330 * This function processes a receive overrun by resetting the 5331 * receive DMA buffers and issuing a Purge Rx FIFO command 5332 * to allow the receiver to continue receiving. 5333 * 5334 * Arguments: 5335 * 5336 * info pointer to device extension 5337 * 5338 * Return Value: None 5339 */ 5340static void usc_process_rxoverrun_sync( struct mgsl_struct *info ) 5341{ 5342 int start_index; 5343 int end_index; 5344 int frame_start_index; 5345 bool start_of_frame_found = false; 5346 bool end_of_frame_found = false; 5347 bool reprogram_dma = false; 5348 5349 DMABUFFERENTRY *buffer_list = info->rx_buffer_list; 5350 u32 phys_addr; 5351 5352 usc_DmaCmd( info, DmaCmd_PauseRxChannel ); 5353 usc_RCmd( info, RCmd_EnterHuntmode ); 5354 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 5355 5356 /* CurrentRxBuffer points to the 1st buffer of the next */ 5357 /* possibly available receive frame. */ 5358 5359 frame_start_index = start_index = end_index = info->current_rx_buffer; 5360 5361 /* Search for an unfinished string of buffers. This means */ 5362 /* that a receive frame started (at least one buffer with */ 5363 /* count set to zero) but there is no terminiting buffer */ 5364 /* (status set to non-zero). */ 5365 5366 while( !buffer_list[end_index].count ) 5367 { 5368 /* Count field has been reset to zero by 16C32. */ 5369 /* This buffer is currently in use. */ 5370 5371 if ( !start_of_frame_found ) 5372 { 5373 start_of_frame_found = true; 5374 frame_start_index = end_index; 5375 end_of_frame_found = false; 5376 } 5377 5378 if ( buffer_list[end_index].status ) 5379 { 5380 /* Status field has been set by 16C32. */ 5381 /* This is the last buffer of a received frame. */ 5382 5383 /* We want to leave the buffers for this frame intact. */ 5384 /* Move on to next possible frame. */ 5385 5386 start_of_frame_found = false; 5387 end_of_frame_found = true; 5388 } 5389 5390 /* advance to next buffer entry in linked list */ 5391 end_index++; 5392 if ( end_index == info->rx_buffer_count ) 5393 end_index = 0; 5394 5395 if ( start_index == end_index ) 5396 { 5397 /* The entire list has been searched with all Counts == 0 and */ 5398 /* all Status == 0. The receive buffers are */ 5399 /* completely screwed, reset all receive buffers! */ 5400 mgsl_reset_rx_dma_buffers( info ); 5401 frame_start_index = 0; 5402 start_of_frame_found = false; 5403 reprogram_dma = true; 5404 break; 5405 } 5406 } 5407 5408 if ( start_of_frame_found && !end_of_frame_found ) 5409 { 5410 /* There is an unfinished string of receive DMA buffers */ 5411 /* as a result of the receiver overrun. */ 5412 5413 /* Reset the buffers for the unfinished frame */ 5414 /* and reprogram the receive DMA controller to start */ 5415 /* at the 1st buffer of unfinished frame. */ 5416 5417 start_index = frame_start_index; 5418 5419 do 5420 { 5421 *((unsigned long *)&(info->rx_buffer_list[start_index++].count)) = DMABUFFERSIZE; 5422 5423 /* Adjust index for wrap around. */ 5424 if ( start_index == info->rx_buffer_count ) 5425 start_index = 0; 5426 5427 } while( start_index != end_index ); 5428 5429 reprogram_dma = true; 5430 } 5431 5432 if ( reprogram_dma ) 5433 { 5434 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL); 5435 usc_ClearIrqPendingBits(info, RECEIVE_DATA|RECEIVE_STATUS); 5436 usc_UnlatchRxstatusBits(info, RECEIVE_DATA|RECEIVE_STATUS); 5437 5438 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL); 5439 5440 /* This empties the receive FIFO and loads the RCC with RCLR */ 5441 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) ); 5442 5443 /* program 16C32 with physical address of 1st DMA buffer entry */ 5444 phys_addr = info->rx_buffer_list[frame_start_index].phys_entry; 5445 usc_OutDmaReg( info, NRARL, (u16)phys_addr ); 5446 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) ); 5447 5448 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); 5449 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS ); 5450 usc_EnableInterrupts( info, RECEIVE_STATUS ); 5451 5452 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */ 5453 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */ 5454 5455 usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 ); 5456 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) ); 5457 usc_DmaCmd( info, DmaCmd_InitRxChannel ); 5458 if ( info->params.flags & HDLC_FLAG_AUTO_DCD ) 5459 usc_EnableReceiver(info,ENABLE_AUTO_DCD); 5460 else 5461 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL); 5462 } 5463 else 5464 { 5465 /* This empties the receive FIFO and loads the RCC with RCLR */ 5466 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) ); 5467 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 5468 } 5469 5470} /* end of usc_process_rxoverrun_sync() */ 5471 5472/* usc_stop_receiver() 5473 * 5474 * Disable USC receiver 5475 * 5476 * Arguments: info pointer to device instance data 5477 * Return Value: None 5478 */ 5479static void usc_stop_receiver( struct mgsl_struct *info ) 5480{ 5481 if (debug_level >= DEBUG_LEVEL_ISR) 5482 printk("%s(%d):usc_stop_receiver(%s)\n", 5483 __FILE__,__LINE__, info->device_name ); 5484 5485 /* Disable receive DMA channel. */ 5486 /* This also disables receive DMA channel interrupts */ 5487 usc_DmaCmd( info, DmaCmd_ResetRxChannel ); 5488 5489 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); 5490 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS ); 5491 usc_DisableInterrupts( info, RECEIVE_DATA + RECEIVE_STATUS ); 5492 5493 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL); 5494 5495 /* This empties the receive FIFO and loads the RCC with RCLR */ 5496 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) ); 5497 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 5498 5499 info->rx_enabled = false; 5500 info->rx_overflow = false; 5501 info->rx_rcc_underrun = false; 5502 5503} /* end of stop_receiver() */ 5504 5505/* usc_start_receiver() 5506 * 5507 * Enable the USC receiver 5508 * 5509 * Arguments: info pointer to device instance data 5510 * Return Value: None 5511 */ 5512static void usc_start_receiver( struct mgsl_struct *info ) 5513{ 5514 u32 phys_addr; 5515 5516 if (debug_level >= DEBUG_LEVEL_ISR) 5517 printk("%s(%d):usc_start_receiver(%s)\n", 5518 __FILE__,__LINE__, info->device_name ); 5519 5520 mgsl_reset_rx_dma_buffers( info ); 5521 usc_stop_receiver( info ); 5522 5523 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) ); 5524 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 5525 5526 if ( info->params.mode == MGSL_MODE_HDLC || 5527 info->params.mode == MGSL_MODE_RAW ) { 5528 /* DMA mode Transfers */ 5529 /* Program the DMA controller. */ 5530 /* Enable the DMA controller end of buffer interrupt. */ 5531 5532 /* program 16C32 with physical address of 1st DMA buffer entry */ 5533 phys_addr = info->rx_buffer_list[0].phys_entry; 5534 usc_OutDmaReg( info, NRARL, (u16)phys_addr ); 5535 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) ); 5536 5537 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); 5538 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS ); 5539 usc_EnableInterrupts( info, RECEIVE_STATUS ); 5540 5541 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */ 5542 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */ 5543 5544 usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 ); 5545 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) ); 5546 usc_DmaCmd( info, DmaCmd_InitRxChannel ); 5547 if ( info->params.flags & HDLC_FLAG_AUTO_DCD ) 5548 usc_EnableReceiver(info,ENABLE_AUTO_DCD); 5549 else 5550 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL); 5551 } else { 5552 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL); 5553 usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS); 5554 usc_EnableInterrupts(info, RECEIVE_DATA); 5555 5556 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 5557 usc_RCmd( info, RCmd_EnterHuntmode ); 5558 5559 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL); 5560 } 5561 5562 usc_OutReg( info, CCSR, 0x1020 ); 5563 5564 info->rx_enabled = true; 5565 5566} /* end of usc_start_receiver() */ 5567 5568/* usc_start_transmitter() 5569 * 5570 * Enable the USC transmitter and send a transmit frame if 5571 * one is loaded in the DMA buffers. 5572 * 5573 * Arguments: info pointer to device instance data 5574 * Return Value: None 5575 */ 5576static void usc_start_transmitter( struct mgsl_struct *info ) 5577{ 5578 u32 phys_addr; 5579 unsigned int FrameSize; 5580 5581 if (debug_level >= DEBUG_LEVEL_ISR) 5582 printk("%s(%d):usc_start_transmitter(%s)\n", 5583 __FILE__,__LINE__, info->device_name ); 5584 5585 if ( info->xmit_cnt ) { 5586 5587 /* If auto RTS enabled and RTS is inactive, then assert */ 5588 /* RTS and set a flag indicating that the driver should */ 5589 /* negate RTS when the transmission completes. */ 5590 5591 info->drop_rts_on_tx_done = false; 5592 5593 if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) { 5594 usc_get_serial_signals( info ); 5595 if ( !(info->serial_signals & SerialSignal_RTS) ) { 5596 info->serial_signals |= SerialSignal_RTS; 5597 usc_set_serial_signals( info ); 5598 info->drop_rts_on_tx_done = true; 5599 } 5600 } 5601 5602 5603 if ( info->params.mode == MGSL_MODE_ASYNC ) { 5604 if ( !info->tx_active ) { 5605 usc_UnlatchTxstatusBits(info, TXSTATUS_ALL); 5606 usc_ClearIrqPendingBits(info, TRANSMIT_STATUS + TRANSMIT_DATA); 5607 usc_EnableInterrupts(info, TRANSMIT_DATA); 5608 usc_load_txfifo(info); 5609 } 5610 } else { 5611 /* Disable transmit DMA controller while programming. */ 5612 usc_DmaCmd( info, DmaCmd_ResetTxChannel ); 5613 5614 /* Transmit DMA buffer is loaded, so program USC */ 5615 /* to send the frame contained in the buffers. */ 5616 5617 FrameSize = info->tx_buffer_list[info->start_tx_dma_buffer].rcc; 5618 5619 /* if operating in Raw sync mode, reset the rcc component 5620 * of the tx dma buffer entry, otherwise, the serial controller 5621 * will send a closing sync char after this count. 5622 */ 5623 if ( info->params.mode == MGSL_MODE_RAW ) 5624 info->tx_buffer_list[info->start_tx_dma_buffer].rcc = 0; 5625 5626 /* Program the Transmit Character Length Register (TCLR) */ 5627 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */ 5628 usc_OutReg( info, TCLR, (u16)FrameSize ); 5629 5630 usc_RTCmd( info, RTCmd_PurgeTxFifo ); 5631 5632 /* Program the address of the 1st DMA Buffer Entry in linked list */ 5633 phys_addr = info->tx_buffer_list[info->start_tx_dma_buffer].phys_entry; 5634 usc_OutDmaReg( info, NTARL, (u16)phys_addr ); 5635 usc_OutDmaReg( info, NTARU, (u16)(phys_addr >> 16) ); 5636 5637 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL ); 5638 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS ); 5639 usc_EnableInterrupts( info, TRANSMIT_STATUS ); 5640 5641 if ( info->params.mode == MGSL_MODE_RAW && 5642 info->num_tx_dma_buffers > 1 ) { 5643 /* When running external sync mode, attempt to 'stream' transmit */ 5644 /* by filling tx dma buffers as they become available. To do this */ 5645 /* we need to enable Tx DMA EOB Status interrupts : */ 5646 /* */ 5647 /* 1. Arm End of Buffer (EOB) Transmit DMA Interrupt (BIT2 of TDIAR) */ 5648 /* 2. Enable Transmit DMA Interrupts (BIT0 of DICR) */ 5649 5650 usc_OutDmaReg( info, TDIAR, BIT2|BIT3 ); 5651 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT0) ); 5652 } 5653 5654 /* Initialize Transmit DMA Channel */ 5655 usc_DmaCmd( info, DmaCmd_InitTxChannel ); 5656 5657 usc_TCmd( info, TCmd_SendFrame ); 5658 5659 mod_timer(&info->tx_timer, jiffies + 5660 msecs_to_jiffies(5000)); 5661 } 5662 info->tx_active = true; 5663 } 5664 5665 if ( !info->tx_enabled ) { 5666 info->tx_enabled = true; 5667 if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) 5668 usc_EnableTransmitter(info,ENABLE_AUTO_CTS); 5669 else 5670 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL); 5671 } 5672 5673} /* end of usc_start_transmitter() */ 5674 5675/* usc_stop_transmitter() 5676 * 5677 * Stops the transmitter and DMA 5678 * 5679 * Arguments: info pointer to device isntance data 5680 * Return Value: None 5681 */ 5682static void usc_stop_transmitter( struct mgsl_struct *info ) 5683{ 5684 if (debug_level >= DEBUG_LEVEL_ISR) 5685 printk("%s(%d):usc_stop_transmitter(%s)\n", 5686 __FILE__,__LINE__, info->device_name ); 5687 5688 del_timer(&info->tx_timer); 5689 5690 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL ); 5691 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA ); 5692 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA ); 5693 5694 usc_EnableTransmitter(info,DISABLE_UNCONDITIONAL); 5695 usc_DmaCmd( info, DmaCmd_ResetTxChannel ); 5696 usc_RTCmd( info, RTCmd_PurgeTxFifo ); 5697 5698 info->tx_enabled = false; 5699 info->tx_active = false; 5700 5701} /* end of usc_stop_transmitter() */ 5702 5703/* usc_load_txfifo() 5704 * 5705 * Fill the transmit FIFO until the FIFO is full or 5706 * there is no more data to load. 5707 * 5708 * Arguments: info pointer to device extension (instance data) 5709 * Return Value: None 5710 */ 5711static void usc_load_txfifo( struct mgsl_struct *info ) 5712{ 5713 int Fifocount; 5714 u8 TwoBytes[2]; 5715 5716 if ( !info->xmit_cnt && !info->x_char ) 5717 return; 5718 5719 /* Select transmit FIFO status readback in TICR */ 5720 usc_TCmd( info, TCmd_SelectTicrTxFifostatus ); 5721 5722 /* load the Transmit FIFO until FIFOs full or all data sent */ 5723 5724 while( (Fifocount = usc_InReg(info, TICR) >> 8) && info->xmit_cnt ) { 5725 /* there is more space in the transmit FIFO and */ 5726 /* there is more data in transmit buffer */ 5727 5728 if ( (info->xmit_cnt > 1) && (Fifocount > 1) && !info->x_char ) { 5729 /* write a 16-bit word from transmit buffer to 16C32 */ 5730 5731 TwoBytes[0] = info->xmit_buf[info->xmit_tail++]; 5732 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1); 5733 TwoBytes[1] = info->xmit_buf[info->xmit_tail++]; 5734 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1); 5735 5736 outw( *((u16 *)TwoBytes), info->io_base + DATAREG); 5737 5738 info->xmit_cnt -= 2; 5739 info->icount.tx += 2; 5740 } else { 5741 /* only 1 byte left to transmit or 1 FIFO slot left */ 5742 5743 outw( (inw( info->io_base + CCAR) & 0x0780) | (TDR+LSBONLY), 5744 info->io_base + CCAR ); 5745 5746 if (info->x_char) { 5747 /* transmit pending high priority char */ 5748 outw( info->x_char,info->io_base + CCAR ); 5749 info->x_char = 0; 5750 } else { 5751 outw( info->xmit_buf[info->xmit_tail++],info->io_base + CCAR ); 5752 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1); 5753 info->xmit_cnt--; 5754 } 5755 info->icount.tx++; 5756 } 5757 } 5758 5759} /* end of usc_load_txfifo() */ 5760 5761/* usc_reset() 5762 * 5763 * Reset the adapter to a known state and prepare it for further use. 5764 * 5765 * Arguments: info pointer to device instance data 5766 * Return Value: None 5767 */ 5768static void usc_reset( struct mgsl_struct *info ) 5769{ 5770 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 5771 int i; 5772 u32 readval; 5773 5774 /* Set BIT30 of Misc Control Register */ 5775 /* (Local Control Register 0x50) to force reset of USC. */ 5776 5777 volatile u32 *MiscCtrl = (u32 *)(info->lcr_base + 0x50); 5778 u32 *LCR0BRDR = (u32 *)(info->lcr_base + 0x28); 5779 5780 info->misc_ctrl_value |= BIT30; 5781 *MiscCtrl = info->misc_ctrl_value; 5782 5783 /* 5784 * Force at least 170ns delay before clearing 5785 * reset bit. Each read from LCR takes at least 5786 * 30ns so 10 times for 300ns to be safe. 5787 */ 5788 for(i=0;i<10;i++) 5789 readval = *MiscCtrl; 5790 5791 info->misc_ctrl_value &= ~BIT30; 5792 *MiscCtrl = info->misc_ctrl_value; 5793 5794 *LCR0BRDR = BUS_DESCRIPTOR( 5795 1, // Write Strobe Hold (0-3) 5796 2, // Write Strobe Delay (0-3) 5797 2, // Read Strobe Delay (0-3) 5798 0, // NWDD (Write data-data) (0-3) 5799 4, // NWAD (Write Addr-data) (0-31) 5800 0, // NXDA (Read/Write Data-Addr) (0-3) 5801 0, // NRDD (Read Data-Data) (0-3) 5802 5 // NRAD (Read Addr-Data) (0-31) 5803 ); 5804 } else { 5805 /* do HW reset */ 5806 outb( 0,info->io_base + 8 ); 5807 } 5808 5809 info->mbre_bit = 0; 5810 info->loopback_bits = 0; 5811 info->usc_idle_mode = 0; 5812 5813 /* 5814 * Program the Bus Configuration Register (BCR) 5815 * 5816 * <15> 0 Don't use separate address 5817 * <14..6> 0 reserved 5818 * <5..4> 00 IAckmode = Default, don't care 5819 * <3> 1 Bus Request Totem Pole output 5820 * <2> 1 Use 16 Bit data bus 5821 * <1> 0 IRQ Totem Pole output 5822 * <0> 0 Don't Shift Right Addr 5823 * 5824 * 0000 0000 0000 1100 = 0x000c 5825 * 5826 * By writing to io_base + SDPIN the Wait/Ack pin is 5827 * programmed to work as a Wait pin. 5828 */ 5829 5830 outw( 0x000c,info->io_base + SDPIN ); 5831 5832 5833 outw( 0,info->io_base ); 5834 outw( 0,info->io_base + CCAR ); 5835 5836 /* select little endian byte ordering */ 5837 usc_RTCmd( info, RTCmd_SelectLittleEndian ); 5838 5839 5840 /* Port Control Register (PCR) 5841 * 5842 * <15..14> 11 Port 7 is Output (~DMAEN, Bit 14 : 0 = Enabled) 5843 * <13..12> 11 Port 6 is Output (~INTEN, Bit 12 : 0 = Enabled) 5844 * <11..10> 00 Port 5 is Input (No Connect, Don't Care) 5845 * <9..8> 00 Port 4 is Input (No Connect, Don't Care) 5846 * <7..6> 11 Port 3 is Output (~RTS, Bit 6 : 0 = Enabled ) 5847 * <5..4> 11 Port 2 is Output (~DTR, Bit 4 : 0 = Enabled ) 5848 * <3..2> 01 Port 1 is Input (Dedicated RxC) 5849 * <1..0> 01 Port 0 is Input (Dedicated TxC) 5850 * 5851 * 1111 0000 1111 0101 = 0xf0f5 5852 */ 5853 5854 usc_OutReg( info, PCR, 0xf0f5 ); 5855 5856 5857 /* 5858 * Input/Output Control Register 5859 * 5860 * <15..14> 00 CTS is active low input 5861 * <13..12> 00 DCD is active low input 5862 * <11..10> 00 TxREQ pin is input (DSR) 5863 * <9..8> 00 RxREQ pin is input (RI) 5864 * <7..6> 00 TxD is output (Transmit Data) 5865 * <5..3> 000 TxC Pin in Input (14.7456MHz Clock) 5866 * <2..0> 100 RxC is Output (drive with BRG0) 5867 * 5868 * 0000 0000 0000 0100 = 0x0004 5869 */ 5870 5871 usc_OutReg( info, IOCR, 0x0004 ); 5872 5873} /* end of usc_reset() */ 5874 5875/* usc_set_async_mode() 5876 * 5877 * Program adapter for asynchronous communications. 5878 * 5879 * Arguments: info pointer to device instance data 5880 * Return Value: None 5881 */ 5882static void usc_set_async_mode( struct mgsl_struct *info ) 5883{ 5884 u16 RegValue; 5885 5886 /* disable interrupts while programming USC */ 5887 usc_DisableMasterIrqBit( info ); 5888 5889 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */ 5890 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */ 5891 5892 usc_loopback_frame( info ); 5893 5894 /* Channel mode Register (CMR) 5895 * 5896 * <15..14> 00 Tx Sub modes, 00 = 1 Stop Bit 5897 * <13..12> 00 00 = 16X Clock 5898 * <11..8> 0000 Transmitter mode = Asynchronous 5899 * <7..6> 00 reserved? 5900 * <5..4> 00 Rx Sub modes, 00 = 16X Clock 5901 * <3..0> 0000 Receiver mode = Asynchronous 5902 * 5903 * 0000 0000 0000 0000 = 0x0 5904 */ 5905 5906 RegValue = 0; 5907 if ( info->params.stop_bits != 1 ) 5908 RegValue |= BIT14; 5909 usc_OutReg( info, CMR, RegValue ); 5910 5911 5912 /* Receiver mode Register (RMR) 5913 * 5914 * <15..13> 000 encoding = None 5915 * <12..08> 00000 reserved (Sync Only) 5916 * <7..6> 00 Even parity 5917 * <5> 0 parity disabled 5918 * <4..2> 000 Receive Char Length = 8 bits 5919 * <1..0> 00 Disable Receiver 5920 * 5921 * 0000 0000 0000 0000 = 0x0 5922 */ 5923 5924 RegValue = 0; 5925 5926 if ( info->params.data_bits != 8 ) 5927 RegValue |= BIT4+BIT3+BIT2; 5928 5929 if ( info->params.parity != ASYNC_PARITY_NONE ) { 5930 RegValue |= BIT5; 5931 if ( info->params.parity != ASYNC_PARITY_ODD ) 5932 RegValue |= BIT6; 5933 } 5934 5935 usc_OutReg( info, RMR, RegValue ); 5936 5937 5938 /* Set IRQ trigger level */ 5939 5940 usc_RCmd( info, RCmd_SelectRicrIntLevel ); 5941 5942 5943 /* Receive Interrupt Control Register (RICR) 5944 * 5945 * <15..8> ? RxFIFO IRQ Request Level 5946 * 5947 * Note: For async mode the receive FIFO level must be set 5948 * to 0 to avoid the situation where the FIFO contains fewer bytes 5949 * than the trigger level and no more data is expected. 5950 * 5951 * <7> 0 Exited Hunt IA (Interrupt Arm) 5952 * <6> 0 Idle Received IA 5953 * <5> 0 Break/Abort IA 5954 * <4> 0 Rx Bound IA 5955 * <3> 0 Queued status reflects oldest byte in FIFO 5956 * <2> 0 Abort/PE IA 5957 * <1> 0 Rx Overrun IA 5958 * <0> 0 Select TC0 value for readback 5959 * 5960 * 0000 0000 0100 0000 = 0x0000 + (FIFOLEVEL in MSB) 5961 */ 5962 5963 usc_OutReg( info, RICR, 0x0000 ); 5964 5965 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); 5966 usc_ClearIrqPendingBits( info, RECEIVE_STATUS ); 5967 5968 5969 /* Transmit mode Register (TMR) 5970 * 5971 * <15..13> 000 encoding = None 5972 * <12..08> 00000 reserved (Sync Only) 5973 * <7..6> 00 Transmit parity Even 5974 * <5> 0 Transmit parity Disabled 5975 * <4..2> 000 Tx Char Length = 8 bits 5976 * <1..0> 00 Disable Transmitter 5977 * 5978 * 0000 0000 0000 0000 = 0x0 5979 */ 5980 5981 RegValue = 0; 5982 5983 if ( info->params.data_bits != 8 ) 5984 RegValue |= BIT4+BIT3+BIT2; 5985 5986 if ( info->params.parity != ASYNC_PARITY_NONE ) { 5987 RegValue |= BIT5; 5988 if ( info->params.parity != ASYNC_PARITY_ODD ) 5989 RegValue |= BIT6; 5990 } 5991 5992 usc_OutReg( info, TMR, RegValue ); 5993 5994 usc_set_txidle( info ); 5995 5996 5997 /* Set IRQ trigger level */ 5998 5999 usc_TCmd( info, TCmd_SelectTicrIntLevel ); 6000 6001 6002 /* Transmit Interrupt Control Register (TICR) 6003 * 6004 * <15..8> ? Transmit FIFO IRQ Level 6005 * <7> 0 Present IA (Interrupt Arm) 6006 * <6> 1 Idle Sent IA 6007 * <5> 0 Abort Sent IA 6008 * <4> 0 EOF/EOM Sent IA 6009 * <3> 0 CRC Sent IA 6010 * <2> 0 1 = Wait for SW Trigger to Start Frame 6011 * <1> 0 Tx Underrun IA 6012 * <0> 0 TC0 constant on read back 6013 * 6014 * 0000 0000 0100 0000 = 0x0040 6015 */ 6016 6017 usc_OutReg( info, TICR, 0x1f40 ); 6018 6019 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL ); 6020 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS ); 6021 6022 usc_enable_async_clock( info, info->params.data_rate ); 6023 6024 6025 /* Channel Control/status Register (CCSR) 6026 * 6027 * <15> X RCC FIFO Overflow status (RO) 6028 * <14> X RCC FIFO Not Empty status (RO) 6029 * <13> 0 1 = Clear RCC FIFO (WO) 6030 * <12> X DPLL in Sync status (RO) 6031 * <11> X DPLL 2 Missed Clocks status (RO) 6032 * <10> X DPLL 1 Missed Clock status (RO) 6033 * <9..8> 00 DPLL Resync on rising and falling edges (RW) 6034 * <7> X SDLC Loop On status (RO) 6035 * <6> X SDLC Loop Send status (RO) 6036 * <5> 1 Bypass counters for TxClk and RxClk (RW) 6037 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW) 6038 * <1..0> 00 reserved 6039 * 6040 * 0000 0000 0010 0000 = 0x0020 6041 */ 6042 6043 usc_OutReg( info, CCSR, 0x0020 ); 6044 6045 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA + 6046 RECEIVE_DATA + RECEIVE_STATUS ); 6047 6048 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA + 6049 RECEIVE_DATA + RECEIVE_STATUS ); 6050 6051 usc_EnableMasterIrqBit( info ); 6052 6053 if (info->bus_type == MGSL_BUS_TYPE_ISA) { 6054 /* Enable INTEN (Port 6, Bit12) */ 6055 /* This connects the IRQ request signal to the ISA bus */ 6056 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12)); 6057 } 6058 6059 if (info->params.loopback) { 6060 info->loopback_bits = 0x300; 6061 outw(0x0300, info->io_base + CCAR); 6062 } 6063 6064} /* end of usc_set_async_mode() */ 6065 6066/* usc_loopback_frame() 6067 * 6068 * Loop back a small (2 byte) dummy SDLC frame. 6069 * Interrupts and DMA are NOT used. The purpose of this is to 6070 * clear any 'stale' status info left over from running in async mode. 6071 * 6072 * The 16C32 shows the strange behaviour of marking the 1st 6073 * received SDLC frame with a CRC error even when there is no 6074 * CRC error. To get around this a small dummy from of 2 bytes 6075 * is looped back when switching from async to sync mode. 6076 * 6077 * Arguments: info pointer to device instance data 6078 * Return Value: None 6079 */ 6080static void usc_loopback_frame( struct mgsl_struct *info ) 6081{ 6082 int i; 6083 unsigned long oldmode = info->params.mode; 6084 6085 info->params.mode = MGSL_MODE_HDLC; 6086 6087 usc_DisableMasterIrqBit( info ); 6088 6089 usc_set_sdlc_mode( info ); 6090 usc_enable_loopback( info, 1 ); 6091 6092 /* Write 16-bit Time Constant for BRG0 */ 6093 usc_OutReg( info, TC0R, 0 ); 6094 6095 /* Channel Control Register (CCR) 6096 * 6097 * <15..14> 00 Don't use 32-bit Tx Control Blocks (TCBs) 6098 * <13> 0 Trigger Tx on SW Command Disabled 6099 * <12> 0 Flag Preamble Disabled 6100 * <11..10> 00 Preamble Length = 8-Bits 6101 * <9..8> 01 Preamble Pattern = flags 6102 * <7..6> 10 Don't use 32-bit Rx status Blocks (RSBs) 6103 * <5> 0 Trigger Rx on SW Command Disabled 6104 * <4..0> 0 reserved 6105 * 6106 * 0000 0001 0000 0000 = 0x0100 6107 */ 6108 6109 usc_OutReg( info, CCR, 0x0100 ); 6110 6111 /* SETUP RECEIVER */ 6112 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 6113 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL); 6114 6115 /* SETUP TRANSMITTER */ 6116 /* Program the Transmit Character Length Register (TCLR) */ 6117 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */ 6118 usc_OutReg( info, TCLR, 2 ); 6119 usc_RTCmd( info, RTCmd_PurgeTxFifo ); 6120 6121 /* unlatch Tx status bits, and start transmit channel. */ 6122 usc_UnlatchTxstatusBits(info,TXSTATUS_ALL); 6123 outw(0,info->io_base + DATAREG); 6124 6125 /* ENABLE TRANSMITTER */ 6126 usc_TCmd( info, TCmd_SendFrame ); 6127 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL); 6128 6129 /* WAIT FOR RECEIVE COMPLETE */ 6130 for (i=0 ; i<1000 ; i++) 6131 if (usc_InReg( info, RCSR ) & (BIT8 + BIT4 + BIT3 + BIT1)) 6132 break; 6133 6134 /* clear Internal Data loopback mode */ 6135 usc_enable_loopback(info, 0); 6136 6137 usc_EnableMasterIrqBit(info); 6138 6139 info->params.mode = oldmode; 6140 6141} /* end of usc_loopback_frame() */ 6142 6143/* usc_set_sync_mode() Programs the USC for SDLC communications. 6144 * 6145 * Arguments: info pointer to adapter info structure 6146 * Return Value: None 6147 */ 6148static void usc_set_sync_mode( struct mgsl_struct *info ) 6149{ 6150 usc_loopback_frame( info ); 6151 usc_set_sdlc_mode( info ); 6152 6153 if (info->bus_type == MGSL_BUS_TYPE_ISA) { 6154 /* Enable INTEN (Port 6, Bit12) */ 6155 /* This connects the IRQ request signal to the ISA bus */ 6156 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12)); 6157 } 6158 6159 usc_enable_aux_clock(info, info->params.clock_speed); 6160 6161 if (info->params.loopback) 6162 usc_enable_loopback(info,1); 6163 6164} /* end of mgsl_set_sync_mode() */ 6165 6166/* usc_set_txidle() Set the HDLC idle mode for the transmitter. 6167 * 6168 * Arguments: info pointer to device instance data 6169 * Return Value: None 6170 */ 6171static void usc_set_txidle( struct mgsl_struct *info ) 6172{ 6173 u16 usc_idle_mode = IDLEMODE_FLAGS; 6174 6175 /* Map API idle mode to USC register bits */ 6176 6177 switch( info->idle_mode ){ 6178 case HDLC_TXIDLE_FLAGS: usc_idle_mode = IDLEMODE_FLAGS; break; 6179 case HDLC_TXIDLE_ALT_ZEROS_ONES: usc_idle_mode = IDLEMODE_ALT_ONE_ZERO; break; 6180 case HDLC_TXIDLE_ZEROS: usc_idle_mode = IDLEMODE_ZERO; break; 6181 case HDLC_TXIDLE_ONES: usc_idle_mode = IDLEMODE_ONE; break; 6182 case HDLC_TXIDLE_ALT_MARK_SPACE: usc_idle_mode = IDLEMODE_ALT_MARK_SPACE; break; 6183 case HDLC_TXIDLE_SPACE: usc_idle_mode = IDLEMODE_SPACE; break; 6184 case HDLC_TXIDLE_MARK: usc_idle_mode = IDLEMODE_MARK; break; 6185 } 6186 6187 info->usc_idle_mode = usc_idle_mode; 6188 //usc_OutReg(info, TCSR, usc_idle_mode); 6189 info->tcsr_value &= ~IDLEMODE_MASK; /* clear idle mode bits */ 6190 info->tcsr_value += usc_idle_mode; 6191 usc_OutReg(info, TCSR, info->tcsr_value); 6192 6193 /* 6194 * if SyncLink WAN adapter is running in external sync mode, the 6195 * transmitter has been set to Monosync in order to try to mimic 6196 * a true raw outbound bit stream. Monosync still sends an open/close 6197 * sync char at the start/end of a frame. Try to match those sync 6198 * patterns to the idle mode set here 6199 */ 6200 if ( info->params.mode == MGSL_MODE_RAW ) { 6201 unsigned char syncpat = 0; 6202 switch( info->idle_mode ) { 6203 case HDLC_TXIDLE_FLAGS: 6204 syncpat = 0x7e; 6205 break; 6206 case HDLC_TXIDLE_ALT_ZEROS_ONES: 6207 syncpat = 0x55; 6208 break; 6209 case HDLC_TXIDLE_ZEROS: 6210 case HDLC_TXIDLE_SPACE: 6211 syncpat = 0x00; 6212 break; 6213 case HDLC_TXIDLE_ONES: 6214 case HDLC_TXIDLE_MARK: 6215 syncpat = 0xff; 6216 break; 6217 case HDLC_TXIDLE_ALT_MARK_SPACE: 6218 syncpat = 0xaa; 6219 break; 6220 } 6221 6222 usc_SetTransmitSyncChars(info,syncpat,syncpat); 6223 } 6224 6225} /* end of usc_set_txidle() */ 6226 6227/* usc_get_serial_signals() 6228 * 6229 * Query the adapter for the state of the V24 status (input) signals. 6230 * 6231 * Arguments: info pointer to device instance data 6232 * Return Value: None 6233 */ 6234static void usc_get_serial_signals( struct mgsl_struct *info ) 6235{ 6236 u16 status; 6237 6238 /* clear all serial signals except DTR and RTS */ 6239 info->serial_signals &= SerialSignal_DTR + SerialSignal_RTS; 6240 6241 /* Read the Misc Interrupt status Register (MISR) to get */ 6242 /* the V24 status signals. */ 6243 6244 status = usc_InReg( info, MISR ); 6245 6246 /* set serial signal bits to reflect MISR */ 6247 6248 if ( status & MISCSTATUS_CTS ) 6249 info->serial_signals |= SerialSignal_CTS; 6250 6251 if ( status & MISCSTATUS_DCD ) 6252 info->serial_signals |= SerialSignal_DCD; 6253 6254 if ( status & MISCSTATUS_RI ) 6255 info->serial_signals |= SerialSignal_RI; 6256 6257 if ( status & MISCSTATUS_DSR ) 6258 info->serial_signals |= SerialSignal_DSR; 6259 6260} /* end of usc_get_serial_signals() */ 6261 6262/* usc_set_serial_signals() 6263 * 6264 * Set the state of DTR and RTS based on contents of 6265 * serial_signals member of device extension. 6266 * 6267 * Arguments: info pointer to device instance data 6268 * Return Value: None 6269 */ 6270static void usc_set_serial_signals( struct mgsl_struct *info ) 6271{ 6272 u16 Control; 6273 unsigned char V24Out = info->serial_signals; 6274 6275 /* get the current value of the Port Control Register (PCR) */ 6276 6277 Control = usc_InReg( info, PCR ); 6278 6279 if ( V24Out & SerialSignal_RTS ) 6280 Control &= ~(BIT6); 6281 else 6282 Control |= BIT6; 6283 6284 if ( V24Out & SerialSignal_DTR ) 6285 Control &= ~(BIT4); 6286 else 6287 Control |= BIT4; 6288 6289 usc_OutReg( info, PCR, Control ); 6290 6291} /* end of usc_set_serial_signals() */ 6292 6293/* usc_enable_async_clock() 6294 * 6295 * Enable the async clock at the specified frequency. 6296 * 6297 * Arguments: info pointer to device instance data 6298 * data_rate data rate of clock in bps 6299 * 0 disables the AUX clock. 6300 * Return Value: None 6301 */ 6302static void usc_enable_async_clock( struct mgsl_struct *info, u32 data_rate ) 6303{ 6304 if ( data_rate ) { 6305 /* 6306 * Clock mode Control Register (CMCR) 6307 * 6308 * <15..14> 00 counter 1 Disabled 6309 * <13..12> 00 counter 0 Disabled 6310 * <11..10> 11 BRG1 Input is TxC Pin 6311 * <9..8> 11 BRG0 Input is TxC Pin 6312 * <7..6> 01 DPLL Input is BRG1 Output 6313 * <5..3> 100 TxCLK comes from BRG0 6314 * <2..0> 100 RxCLK comes from BRG0 6315 * 6316 * 0000 1111 0110 0100 = 0x0f64 6317 */ 6318 6319 usc_OutReg( info, CMCR, 0x0f64 ); 6320 6321 6322 /* 6323 * Write 16-bit Time Constant for BRG0 6324 * Time Constant = (ClkSpeed / data_rate) - 1 6325 * ClkSpeed = 921600 (ISA), 691200 (PCI) 6326 */ 6327 6328 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 6329 usc_OutReg( info, TC0R, (u16)((691200/data_rate) - 1) ); 6330 else 6331 usc_OutReg( info, TC0R, (u16)((921600/data_rate) - 1) ); 6332 6333 6334 /* 6335 * Hardware Configuration Register (HCR) 6336 * Clear Bit 1, BRG0 mode = Continuous 6337 * Set Bit 0 to enable BRG0. 6338 */ 6339 6340 usc_OutReg( info, HCR, 6341 (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) ); 6342 6343 6344 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */ 6345 6346 usc_OutReg( info, IOCR, 6347 (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) ); 6348 } else { 6349 /* data rate == 0 so turn off BRG0 */ 6350 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) ); 6351 } 6352 6353} /* end of usc_enable_async_clock() */ 6354 6355/* 6356 * Buffer Structures: 6357 * 6358 * Normal memory access uses virtual addresses that can make discontiguous 6359 * physical memory pages appear to be contiguous in the virtual address 6360 * space (the processors memory mapping handles the conversions). 6361 * 6362 * DMA transfers require physically contiguous memory. This is because 6363 * the DMA system controller and DMA bus masters deal with memory using 6364 * only physical addresses. 6365 * 6366 * This causes a problem under Windows NT when large DMA buffers are 6367 * needed. Fragmentation of the nonpaged pool prevents allocations of 6368 * physically contiguous buffers larger than the PAGE_SIZE. 6369 * 6370 * However the 16C32 supports Bus Master Scatter/Gather DMA which 6371 * allows DMA transfers to physically discontiguous buffers. Information 6372 * about each data transfer buffer is contained in a memory structure 6373 * called a 'buffer entry'. A list of buffer entries is maintained 6374 * to track and control the use of the data transfer buffers. 6375 * 6376 * To support this strategy we will allocate sufficient PAGE_SIZE 6377 * contiguous memory buffers to allow for the total required buffer 6378 * space. 6379 * 6380 * The 16C32 accesses the list of buffer entries using Bus Master 6381 * DMA. Control information is read from the buffer entries by the 6382 * 16C32 to control data transfers. status information is written to 6383 * the buffer entries by the 16C32 to indicate the status of completed 6384 * transfers. 6385 * 6386 * The CPU writes control information to the buffer entries to control 6387 * the 16C32 and reads status information from the buffer entries to 6388 * determine information about received and transmitted frames. 6389 * 6390 * Because the CPU and 16C32 (adapter) both need simultaneous access 6391 * to the buffer entries, the buffer entry memory is allocated with 6392 * HalAllocateCommonBuffer(). This restricts the size of the buffer 6393 * entry list to PAGE_SIZE. 6394 * 6395 * The actual data buffers on the other hand will only be accessed 6396 * by the CPU or the adapter but not by both simultaneously. This allows 6397 * Scatter/Gather packet based DMA procedures for using physically 6398 * discontiguous pages. 6399 */ 6400 6401/* 6402 * mgsl_reset_tx_dma_buffers() 6403 * 6404 * Set the count for all transmit buffers to 0 to indicate the 6405 * buffer is available for use and set the current buffer to the 6406 * first buffer. This effectively makes all buffers free and 6407 * discards any data in buffers. 6408 * 6409 * Arguments: info pointer to device instance data 6410 * Return Value: None 6411 */ 6412static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info ) 6413{ 6414 unsigned int i; 6415 6416 for ( i = 0; i < info->tx_buffer_count; i++ ) { 6417 *((unsigned long *)&(info->tx_buffer_list[i].count)) = 0; 6418 } 6419 6420 info->current_tx_buffer = 0; 6421 info->start_tx_dma_buffer = 0; 6422 info->tx_dma_buffers_used = 0; 6423 6424 info->get_tx_holding_index = 0; 6425 info->put_tx_holding_index = 0; 6426 info->tx_holding_count = 0; 6427 6428} /* end of mgsl_reset_tx_dma_buffers() */ 6429 6430/* 6431 * num_free_tx_dma_buffers() 6432 * 6433 * returns the number of free tx dma buffers available 6434 * 6435 * Arguments: info pointer to device instance data 6436 * Return Value: number of free tx dma buffers 6437 */ 6438static int num_free_tx_dma_buffers(struct mgsl_struct *info) 6439{ 6440 return info->tx_buffer_count - info->tx_dma_buffers_used; 6441} 6442 6443/* 6444 * mgsl_reset_rx_dma_buffers() 6445 * 6446 * Set the count for all receive buffers to DMABUFFERSIZE 6447 * and set the current buffer to the first buffer. This effectively 6448 * makes all buffers free and discards any data in buffers. 6449 * 6450 * Arguments: info pointer to device instance data 6451 * Return Value: None 6452 */ 6453static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info ) 6454{ 6455 unsigned int i; 6456 6457 for ( i = 0; i < info->rx_buffer_count; i++ ) { 6458 *((unsigned long *)&(info->rx_buffer_list[i].count)) = DMABUFFERSIZE; 6459// info->rx_buffer_list[i].count = DMABUFFERSIZE; 6460// info->rx_buffer_list[i].status = 0; 6461 } 6462 6463 info->current_rx_buffer = 0; 6464 6465} /* end of mgsl_reset_rx_dma_buffers() */ 6466 6467/* 6468 * mgsl_free_rx_frame_buffers() 6469 * 6470 * Free the receive buffers used by a received SDLC 6471 * frame such that the buffers can be reused. 6472 * 6473 * Arguments: 6474 * 6475 * info pointer to device instance data 6476 * StartIndex index of 1st receive buffer of frame 6477 * EndIndex index of last receive buffer of frame 6478 * 6479 * Return Value: None 6480 */ 6481static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex ) 6482{ 6483 bool Done = false; 6484 DMABUFFERENTRY *pBufEntry; 6485 unsigned int Index; 6486 6487 /* Starting with 1st buffer entry of the frame clear the status */ 6488 /* field and set the count field to DMA Buffer Size. */ 6489 6490 Index = StartIndex; 6491 6492 while( !Done ) { 6493 pBufEntry = &(info->rx_buffer_list[Index]); 6494 6495 if ( Index == EndIndex ) { 6496 /* This is the last buffer of the frame! */ 6497 Done = true; 6498 } 6499 6500 /* reset current buffer for reuse */ 6501// pBufEntry->status = 0; 6502// pBufEntry->count = DMABUFFERSIZE; 6503 *((unsigned long *)&(pBufEntry->count)) = DMABUFFERSIZE; 6504 6505 /* advance to next buffer entry in linked list */ 6506 Index++; 6507 if ( Index == info->rx_buffer_count ) 6508 Index = 0; 6509 } 6510 6511 /* set current buffer to next buffer after last buffer of frame */ 6512 info->current_rx_buffer = Index; 6513 6514} /* end of free_rx_frame_buffers() */ 6515 6516/* mgsl_get_rx_frame() 6517 * 6518 * This function attempts to return a received SDLC frame from the 6519 * receive DMA buffers. Only frames received without errors are returned. 6520 * 6521 * Arguments: info pointer to device extension 6522 * Return Value: true if frame returned, otherwise false 6523 */ 6524static bool mgsl_get_rx_frame(struct mgsl_struct *info) 6525{ 6526 unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */ 6527 unsigned short status; 6528 DMABUFFERENTRY *pBufEntry; 6529 unsigned int framesize = 0; 6530 bool ReturnCode = false; 6531 unsigned long flags; 6532 struct tty_struct *tty = info->port.tty; 6533 bool return_frame = false; 6534 6535 /* 6536 * current_rx_buffer points to the 1st buffer of the next available 6537 * receive frame. To find the last buffer of the frame look for 6538 * a non-zero status field in the buffer entries. (The status 6539 * field is set by the 16C32 after completing a receive frame. 6540 */ 6541 6542 StartIndex = EndIndex = info->current_rx_buffer; 6543 6544 while( !info->rx_buffer_list[EndIndex].status ) { 6545 /* 6546 * If the count field of the buffer entry is non-zero then 6547 * this buffer has not been used. (The 16C32 clears the count 6548 * field when it starts using the buffer.) If an unused buffer 6549 * is encountered then there are no frames available. 6550 */ 6551 6552 if ( info->rx_buffer_list[EndIndex].count ) 6553 goto Cleanup; 6554 6555 /* advance to next buffer entry in linked list */ 6556 EndIndex++; 6557 if ( EndIndex == info->rx_buffer_count ) 6558 EndIndex = 0; 6559 6560 /* if entire list searched then no frame available */ 6561 if ( EndIndex == StartIndex ) { 6562 /* If this occurs then something bad happened, 6563 * all buffers have been 'used' but none mark 6564 * the end of a frame. Reset buffers and receiver. 6565 */ 6566 6567 if ( info->rx_enabled ){ 6568 spin_lock_irqsave(&info->irq_spinlock,flags); 6569 usc_start_receiver(info); 6570 spin_unlock_irqrestore(&info->irq_spinlock,flags); 6571 } 6572 goto Cleanup; 6573 } 6574 } 6575 6576 6577 /* check status of receive frame */ 6578 6579 status = info->rx_buffer_list[EndIndex].status; 6580 6581 if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN + 6582 RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) { 6583 if ( status & RXSTATUS_SHORT_FRAME ) 6584 info->icount.rxshort++; 6585 else if ( status & RXSTATUS_ABORT ) 6586 info->icount.rxabort++; 6587 else if ( status & RXSTATUS_OVERRUN ) 6588 info->icount.rxover++; 6589 else { 6590 info->icount.rxcrc++; 6591 if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) 6592 return_frame = true; 6593 } 6594 framesize = 0; 6595#if SYNCLINK_GENERIC_HDLC 6596 { 6597 info->netdev->stats.rx_errors++; 6598 info->netdev->stats.rx_frame_errors++; 6599 } 6600#endif 6601 } else 6602 return_frame = true; 6603 6604 if ( return_frame ) { 6605 /* receive frame has no errors, get frame size. 6606 * The frame size is the starting value of the RCC (which was 6607 * set to 0xffff) minus the ending value of the RCC (decremented 6608 * once for each receive character) minus 2 for the 16-bit CRC. 6609 */ 6610 6611 framesize = RCLRVALUE - info->rx_buffer_list[EndIndex].rcc; 6612 6613 /* adjust frame size for CRC if any */ 6614 if ( info->params.crc_type == HDLC_CRC_16_CCITT ) 6615 framesize -= 2; 6616 else if ( info->params.crc_type == HDLC_CRC_32_CCITT ) 6617 framesize -= 4; 6618 } 6619 6620 if ( debug_level >= DEBUG_LEVEL_BH ) 6621 printk("%s(%d):mgsl_get_rx_frame(%s) status=%04X size=%d\n", 6622 __FILE__,__LINE__,info->device_name,status,framesize); 6623 6624 if ( debug_level >= DEBUG_LEVEL_DATA ) 6625 mgsl_trace_block(info,info->rx_buffer_list[StartIndex].virt_addr, 6626 min_t(int, framesize, DMABUFFERSIZE),0); 6627 6628 if (framesize) { 6629 if ( ( (info->params.crc_type & HDLC_CRC_RETURN_EX) && 6630 ((framesize+1) > info->max_frame_size) ) || 6631 (framesize > info->max_frame_size) ) 6632 info->icount.rxlong++; 6633 else { 6634 /* copy dma buffer(s) to contiguous intermediate buffer */ 6635 int copy_count = framesize; 6636 int index = StartIndex; 6637 unsigned char *ptmp = info->intermediate_rxbuffer; 6638 6639 if ( !(status & RXSTATUS_CRC_ERROR)) 6640 info->icount.rxok++; 6641 6642 while(copy_count) { 6643 int partial_count; 6644 if ( copy_count > DMABUFFERSIZE ) 6645 partial_count = DMABUFFERSIZE; 6646 else 6647 partial_count = copy_count; 6648 6649 pBufEntry = &(info->rx_buffer_list[index]); 6650 memcpy( ptmp, pBufEntry->virt_addr, partial_count ); 6651 ptmp += partial_count; 6652 copy_count -= partial_count; 6653 6654 if ( ++index == info->rx_buffer_count ) 6655 index = 0; 6656 } 6657 6658 if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) { 6659 ++framesize; 6660 *ptmp = (status & RXSTATUS_CRC_ERROR ? 6661 RX_CRC_ERROR : 6662 RX_OK); 6663 6664 if ( debug_level >= DEBUG_LEVEL_DATA ) 6665 printk("%s(%d):mgsl_get_rx_frame(%s) rx frame status=%d\n", 6666 __FILE__,__LINE__,info->device_name, 6667 *ptmp); 6668 } 6669 6670#if SYNCLINK_GENERIC_HDLC 6671 if (info->netcount) 6672 hdlcdev_rx(info,info->intermediate_rxbuffer,framesize); 6673 else 6674#endif 6675 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize); 6676 } 6677 } 6678 /* Free the buffers used by this frame. */ 6679 mgsl_free_rx_frame_buffers( info, StartIndex, EndIndex ); 6680 6681 ReturnCode = true; 6682 6683Cleanup: 6684 6685 if ( info->rx_enabled && info->rx_overflow ) { 6686 /* The receiver needs to restarted because of 6687 * a receive overflow (buffer or FIFO). If the 6688 * receive buffers are now empty, then restart receiver. 6689 */ 6690 6691 if ( !info->rx_buffer_list[EndIndex].status && 6692 info->rx_buffer_list[EndIndex].count ) { 6693 spin_lock_irqsave(&info->irq_spinlock,flags); 6694 usc_start_receiver(info); 6695 spin_unlock_irqrestore(&info->irq_spinlock,flags); 6696 } 6697 } 6698 6699 return ReturnCode; 6700 6701} /* end of mgsl_get_rx_frame() */ 6702 6703/* mgsl_get_raw_rx_frame() 6704 * 6705 * This function attempts to return a received frame from the 6706 * receive DMA buffers when running in external loop mode. In this mode, 6707 * we will return at most one DMABUFFERSIZE frame to the application. 6708 * The USC receiver is triggering off of DCD going active to start a new 6709 * frame, and DCD going inactive to terminate the frame (similar to 6710 * processing a closing flag character). 6711 * 6712 * In this routine, we will return DMABUFFERSIZE "chunks" at a time. 6713 * If DCD goes inactive, the last Rx DMA Buffer will have a non-zero 6714 * status field and the RCC field will indicate the length of the 6715 * entire received frame. We take this RCC field and get the modulus 6716 * of RCC and DMABUFFERSIZE to determine if number of bytes in the 6717 * last Rx DMA buffer and return that last portion of the frame. 6718 * 6719 * Arguments: info pointer to device extension 6720 * Return Value: true if frame returned, otherwise false 6721 */ 6722static bool mgsl_get_raw_rx_frame(struct mgsl_struct *info) 6723{ 6724 unsigned int CurrentIndex, NextIndex; 6725 unsigned short status; 6726 DMABUFFERENTRY *pBufEntry; 6727 unsigned int framesize = 0; 6728 bool ReturnCode = false; 6729 unsigned long flags; 6730 struct tty_struct *tty = info->port.tty; 6731 6732 /* 6733 * current_rx_buffer points to the 1st buffer of the next available 6734 * receive frame. The status field is set by the 16C32 after 6735 * completing a receive frame. If the status field of this buffer 6736 * is zero, either the USC is still filling this buffer or this 6737 * is one of a series of buffers making up a received frame. 6738 * 6739 * If the count field of this buffer is zero, the USC is either 6740 * using this buffer or has used this buffer. Look at the count 6741 * field of the next buffer. If that next buffer's count is 6742 * non-zero, the USC is still actively using the current buffer. 6743 * Otherwise, if the next buffer's count field is zero, the 6744 * current buffer is complete and the USC is using the next 6745 * buffer. 6746 */ 6747 CurrentIndex = NextIndex = info->current_rx_buffer; 6748 ++NextIndex; 6749 if ( NextIndex == info->rx_buffer_count ) 6750 NextIndex = 0; 6751 6752 if ( info->rx_buffer_list[CurrentIndex].status != 0 || 6753 (info->rx_buffer_list[CurrentIndex].count == 0 && 6754 info->rx_buffer_list[NextIndex].count == 0)) { 6755 /* 6756 * Either the status field of this dma buffer is non-zero 6757 * (indicating the last buffer of a receive frame) or the next 6758 * buffer is marked as in use -- implying this buffer is complete 6759 * and an intermediate buffer for this received frame. 6760 */ 6761 6762 status = info->rx_buffer_list[CurrentIndex].status; 6763 6764 if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN + 6765 RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) { 6766 if ( status & RXSTATUS_SHORT_FRAME ) 6767 info->icount.rxshort++; 6768 else if ( status & RXSTATUS_ABORT ) 6769 info->icount.rxabort++; 6770 else if ( status & RXSTATUS_OVERRUN ) 6771 info->icount.rxover++; 6772 else 6773 info->icount.rxcrc++; 6774 framesize = 0; 6775 } else { 6776 /* 6777 * A receive frame is available, get frame size and status. 6778 * 6779 * The frame size is the starting value of the RCC (which was 6780 * set to 0xffff) minus the ending value of the RCC (decremented 6781 * once for each receive character) minus 2 or 4 for the 16-bit 6782 * or 32-bit CRC. 6783 * 6784 * If the status field is zero, this is an intermediate buffer. 6785 * It's size is 4K. 6786 * 6787 * If the DMA Buffer Entry's Status field is non-zero, the 6788 * receive operation completed normally (ie: DCD dropped). The 6789 * RCC field is valid and holds the received frame size. 6790 * It is possible that the RCC field will be zero on a DMA buffer 6791 * entry with a non-zero status. This can occur if the total 6792 * frame size (number of bytes between the time DCD goes active 6793 * to the time DCD goes inactive) exceeds 65535 bytes. In this 6794 * case the 16C32 has underrun on the RCC count and appears to 6795 * stop updating this counter to let us know the actual received 6796 * frame size. If this happens (non-zero status and zero RCC), 6797 * simply return the entire RxDMA Buffer 6798 */ 6799 if ( status ) { 6800 /* 6801 * In the event that the final RxDMA Buffer is 6802 * terminated with a non-zero status and the RCC 6803 * field is zero, we interpret this as the RCC 6804 * having underflowed (received frame > 65535 bytes). 6805 * 6806 * Signal the event to the user by passing back 6807 * a status of RxStatus_CrcError returning the full 6808 * buffer and let the app figure out what data is 6809 * actually valid 6810 */ 6811 if ( info->rx_buffer_list[CurrentIndex].rcc ) 6812 framesize = RCLRVALUE - info->rx_buffer_list[CurrentIndex].rcc; 6813 else 6814 framesize = DMABUFFERSIZE; 6815 } 6816 else 6817 framesize = DMABUFFERSIZE; 6818 } 6819 6820 if ( framesize > DMABUFFERSIZE ) { 6821 /* 6822 * if running in raw sync mode, ISR handler for 6823 * End Of Buffer events terminates all buffers at 4K. 6824 * If this frame size is said to be >4K, get the 6825 * actual number of bytes of the frame in this buffer. 6826 */ 6827 framesize = framesize % DMABUFFERSIZE; 6828 } 6829 6830 6831 if ( debug_level >= DEBUG_LEVEL_BH ) 6832 printk("%s(%d):mgsl_get_raw_rx_frame(%s) status=%04X size=%d\n", 6833 __FILE__,__LINE__,info->device_name,status,framesize); 6834 6835 if ( debug_level >= DEBUG_LEVEL_DATA ) 6836 mgsl_trace_block(info,info->rx_buffer_list[CurrentIndex].virt_addr, 6837 min_t(int, framesize, DMABUFFERSIZE),0); 6838 6839 if (framesize) { 6840 /* copy dma buffer(s) to contiguous intermediate buffer */ 6841 /* NOTE: we never copy more than DMABUFFERSIZE bytes */ 6842 6843 pBufEntry = &(info->rx_buffer_list[CurrentIndex]); 6844 memcpy( info->intermediate_rxbuffer, pBufEntry->virt_addr, framesize); 6845 info->icount.rxok++; 6846 6847 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize); 6848 } 6849 6850 /* Free the buffers used by this frame. */ 6851 mgsl_free_rx_frame_buffers( info, CurrentIndex, CurrentIndex ); 6852 6853 ReturnCode = true; 6854 } 6855 6856 6857 if ( info->rx_enabled && info->rx_overflow ) { 6858 /* The receiver needs to restarted because of 6859 * a receive overflow (buffer or FIFO). If the 6860 * receive buffers are now empty, then restart receiver. 6861 */ 6862 6863 if ( !info->rx_buffer_list[CurrentIndex].status && 6864 info->rx_buffer_list[CurrentIndex].count ) { 6865 spin_lock_irqsave(&info->irq_spinlock,flags); 6866 usc_start_receiver(info); 6867 spin_unlock_irqrestore(&info->irq_spinlock,flags); 6868 } 6869 } 6870 6871 return ReturnCode; 6872 6873} /* end of mgsl_get_raw_rx_frame() */ 6874 6875/* mgsl_load_tx_dma_buffer() 6876 * 6877 * Load the transmit DMA buffer with the specified data. 6878 * 6879 * Arguments: 6880 * 6881 * info pointer to device extension 6882 * Buffer pointer to buffer containing frame to load 6883 * BufferSize size in bytes of frame in Buffer 6884 * 6885 * Return Value: None 6886 */ 6887static void mgsl_load_tx_dma_buffer(struct mgsl_struct *info, 6888 const char *Buffer, unsigned int BufferSize) 6889{ 6890 unsigned short Copycount; 6891 unsigned int i = 0; 6892 DMABUFFERENTRY *pBufEntry; 6893 6894 if ( debug_level >= DEBUG_LEVEL_DATA ) 6895 mgsl_trace_block(info,Buffer, min_t(int, BufferSize, DMABUFFERSIZE), 1); 6896 6897 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) { 6898 /* set CMR:13 to start transmit when 6899 * next GoAhead (abort) is received 6900 */ 6901 info->cmr_value |= BIT13; 6902 } 6903 6904 /* begin loading the frame in the next available tx dma 6905 * buffer, remember it's starting location for setting 6906 * up tx dma operation 6907 */ 6908 i = info->current_tx_buffer; 6909 info->start_tx_dma_buffer = i; 6910 6911 /* Setup the status and RCC (Frame Size) fields of the 1st */ 6912 /* buffer entry in the transmit DMA buffer list. */ 6913 6914 info->tx_buffer_list[i].status = info->cmr_value & 0xf000; 6915 info->tx_buffer_list[i].rcc = BufferSize; 6916 info->tx_buffer_list[i].count = BufferSize; 6917 6918 /* Copy frame data from 1st source buffer to the DMA buffers. */ 6919 /* The frame data may span multiple DMA buffers. */ 6920 6921 while( BufferSize ){ 6922 /* Get a pointer to next DMA buffer entry. */ 6923 pBufEntry = &info->tx_buffer_list[i++]; 6924 6925 if ( i == info->tx_buffer_count ) 6926 i=0; 6927 6928 /* Calculate the number of bytes that can be copied from */ 6929 /* the source buffer to this DMA buffer. */ 6930 if ( BufferSize > DMABUFFERSIZE ) 6931 Copycount = DMABUFFERSIZE; 6932 else 6933 Copycount = BufferSize; 6934 6935 /* Actually copy data from source buffer to DMA buffer. */ 6936 /* Also set the data count for this individual DMA buffer. */ 6937 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 6938 mgsl_load_pci_memory(pBufEntry->virt_addr, Buffer,Copycount); 6939 else 6940 memcpy(pBufEntry->virt_addr, Buffer, Copycount); 6941 6942 pBufEntry->count = Copycount; 6943 6944 /* Advance source pointer and reduce remaining data count. */ 6945 Buffer += Copycount; 6946 BufferSize -= Copycount; 6947 6948 ++info->tx_dma_buffers_used; 6949 } 6950 6951 /* remember next available tx dma buffer */ 6952 info->current_tx_buffer = i; 6953 6954} /* end of mgsl_load_tx_dma_buffer() */ 6955 6956/* 6957 * mgsl_register_test() 6958 * 6959 * Performs a register test of the 16C32. 6960 * 6961 * Arguments: info pointer to device instance data 6962 * Return Value: true if test passed, otherwise false 6963 */ 6964static bool mgsl_register_test( struct mgsl_struct *info ) 6965{ 6966 static unsigned short BitPatterns[] = 6967 { 0x0000, 0xffff, 0xaaaa, 0x5555, 0x1234, 0x6969, 0x9696, 0x0f0f }; 6968 static unsigned int Patterncount = ARRAY_SIZE(BitPatterns); 6969 unsigned int i; 6970 bool rc = true; 6971 unsigned long flags; 6972 6973 spin_lock_irqsave(&info->irq_spinlock,flags); 6974 usc_reset(info); 6975 6976 /* Verify the reset state of some registers. */ 6977 6978 if ( (usc_InReg( info, SICR ) != 0) || 6979 (usc_InReg( info, IVR ) != 0) || 6980 (usc_InDmaReg( info, DIVR ) != 0) ){ 6981 rc = false; 6982 } 6983 6984 if ( rc ){ 6985 /* Write bit patterns to various registers but do it out of */ 6986 /* sync, then read back and verify values. */ 6987 6988 for ( i = 0 ; i < Patterncount ; i++ ) { 6989 usc_OutReg( info, TC0R, BitPatterns[i] ); 6990 usc_OutReg( info, TC1R, BitPatterns[(i+1)%Patterncount] ); 6991 usc_OutReg( info, TCLR, BitPatterns[(i+2)%Patterncount] ); 6992 usc_OutReg( info, RCLR, BitPatterns[(i+3)%Patterncount] ); 6993 usc_OutReg( info, RSR, BitPatterns[(i+4)%Patterncount] ); 6994 usc_OutDmaReg( info, TBCR, BitPatterns[(i+5)%Patterncount] ); 6995 6996 if ( (usc_InReg( info, TC0R ) != BitPatterns[i]) || 6997 (usc_InReg( info, TC1R ) != BitPatterns[(i+1)%Patterncount]) || 6998 (usc_InReg( info, TCLR ) != BitPatterns[(i+2)%Patterncount]) || 6999 (usc_InReg( info, RCLR ) != BitPatterns[(i+3)%Patterncount]) || 7000 (usc_InReg( info, RSR ) != BitPatterns[(i+4)%Patterncount]) || 7001 (usc_InDmaReg( info, TBCR ) != BitPatterns[(i+5)%Patterncount]) ){ 7002 rc = false; 7003 break; 7004 } 7005 } 7006 } 7007 7008 usc_reset(info); 7009 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7010 7011 return rc; 7012 7013} /* end of mgsl_register_test() */ 7014 7015/* mgsl_irq_test() Perform interrupt test of the 16C32. 7016 * 7017 * Arguments: info pointer to device instance data 7018 * Return Value: true if test passed, otherwise false 7019 */ 7020static bool mgsl_irq_test( struct mgsl_struct *info ) 7021{ 7022 unsigned long EndTime; 7023 unsigned long flags; 7024 7025 spin_lock_irqsave(&info->irq_spinlock,flags); 7026 usc_reset(info); 7027 7028 /* 7029 * Setup 16C32 to interrupt on TxC pin (14MHz clock) transition. 7030 * The ISR sets irq_occurred to true. 7031 */ 7032 7033 info->irq_occurred = false; 7034 7035 /* Enable INTEN gate for ISA adapter (Port 6, Bit12) */ 7036 /* Enable INTEN (Port 6, Bit12) */ 7037 /* This connects the IRQ request signal to the ISA bus */ 7038 /* on the ISA adapter. This has no effect for the PCI adapter */ 7039 usc_OutReg( info, PCR, (unsigned short)((usc_InReg(info, PCR) | BIT13) & ~BIT12) ); 7040 7041 usc_EnableMasterIrqBit(info); 7042 usc_EnableInterrupts(info, IO_PIN); 7043 usc_ClearIrqPendingBits(info, IO_PIN); 7044 7045 usc_UnlatchIostatusBits(info, MISCSTATUS_TXC_LATCHED); 7046 usc_EnableStatusIrqs(info, SICR_TXC_ACTIVE + SICR_TXC_INACTIVE); 7047 7048 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7049 7050 EndTime=100; 7051 while( EndTime-- && !info->irq_occurred ) { 7052 msleep_interruptible(10); 7053 } 7054 7055 spin_lock_irqsave(&info->irq_spinlock,flags); 7056 usc_reset(info); 7057 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7058 7059 return info->irq_occurred; 7060 7061} /* end of mgsl_irq_test() */ 7062 7063/* mgsl_dma_test() 7064 * 7065 * Perform a DMA test of the 16C32. A small frame is 7066 * transmitted via DMA from a transmit buffer to a receive buffer 7067 * using single buffer DMA mode. 7068 * 7069 * Arguments: info pointer to device instance data 7070 * Return Value: true if test passed, otherwise false 7071 */ 7072static bool mgsl_dma_test( struct mgsl_struct *info ) 7073{ 7074 unsigned short FifoLevel; 7075 unsigned long phys_addr; 7076 unsigned int FrameSize; 7077 unsigned int i; 7078 char *TmpPtr; 7079 bool rc = true; 7080 unsigned short status=0; 7081 unsigned long EndTime; 7082 unsigned long flags; 7083 MGSL_PARAMS tmp_params; 7084 7085 /* save current port options */ 7086 memcpy(&tmp_params,&info->params,sizeof(MGSL_PARAMS)); 7087 /* load default port options */ 7088 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS)); 7089 7090#define TESTFRAMESIZE 40 7091 7092 spin_lock_irqsave(&info->irq_spinlock,flags); 7093 7094 /* setup 16C32 for SDLC DMA transfer mode */ 7095 7096 usc_reset(info); 7097 usc_set_sdlc_mode(info); 7098 usc_enable_loopback(info,1); 7099 7100 /* Reprogram the RDMR so that the 16C32 does NOT clear the count 7101 * field of the buffer entry after fetching buffer address. This 7102 * way we can detect a DMA failure for a DMA read (which should be 7103 * non-destructive to system memory) before we try and write to 7104 * memory (where a failure could corrupt system memory). 7105 */ 7106 7107 /* Receive DMA mode Register (RDMR) 7108 * 7109 * <15..14> 11 DMA mode = Linked List Buffer mode 7110 * <13> 1 RSBinA/L = store Rx status Block in List entry 7111 * <12> 0 1 = Clear count of List Entry after fetching 7112 * <11..10> 00 Address mode = Increment 7113 * <9> 1 Terminate Buffer on RxBound 7114 * <8> 0 Bus Width = 16bits 7115 * <7..0> ? status Bits (write as 0s) 7116 * 7117 * 1110 0010 0000 0000 = 0xe200 7118 */ 7119 7120 usc_OutDmaReg( info, RDMR, 0xe200 ); 7121 7122 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7123 7124 7125 /* SETUP TRANSMIT AND RECEIVE DMA BUFFERS */ 7126 7127 FrameSize = TESTFRAMESIZE; 7128 7129 /* setup 1st transmit buffer entry: */ 7130 /* with frame size and transmit control word */ 7131 7132 info->tx_buffer_list[0].count = FrameSize; 7133 info->tx_buffer_list[0].rcc = FrameSize; 7134 info->tx_buffer_list[0].status = 0x4000; 7135 7136 /* build a transmit frame in 1st transmit DMA buffer */ 7137 7138 TmpPtr = info->tx_buffer_list[0].virt_addr; 7139 for (i = 0; i < FrameSize; i++ ) 7140 *TmpPtr++ = i; 7141 7142 /* setup 1st receive buffer entry: */ 7143 /* clear status, set max receive buffer size */ 7144 7145 info->rx_buffer_list[0].status = 0; 7146 info->rx_buffer_list[0].count = FrameSize + 4; 7147 7148 /* zero out the 1st receive buffer */ 7149 7150 memset( info->rx_buffer_list[0].virt_addr, 0, FrameSize + 4 ); 7151 7152 /* Set count field of next buffer entries to prevent */ 7153 /* 16C32 from using buffers after the 1st one. */ 7154 7155 info->tx_buffer_list[1].count = 0; 7156 info->rx_buffer_list[1].count = 0; 7157 7158 7159 /***************************/ 7160 /* Program 16C32 receiver. */ 7161 /***************************/ 7162 7163 spin_lock_irqsave(&info->irq_spinlock,flags); 7164 7165 /* setup DMA transfers */ 7166 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 7167 7168 /* program 16C32 receiver with physical address of 1st DMA buffer entry */ 7169 phys_addr = info->rx_buffer_list[0].phys_entry; 7170 usc_OutDmaReg( info, NRARL, (unsigned short)phys_addr ); 7171 usc_OutDmaReg( info, NRARU, (unsigned short)(phys_addr >> 16) ); 7172 7173 /* Clear the Rx DMA status bits (read RDMR) and start channel */ 7174 usc_InDmaReg( info, RDMR ); 7175 usc_DmaCmd( info, DmaCmd_InitRxChannel ); 7176 7177 /* Enable Receiver (RMR <1..0> = 10) */ 7178 usc_OutReg( info, RMR, (unsigned short)((usc_InReg(info, RMR) & 0xfffc) | 0x0002) ); 7179 7180 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7181 7182 7183 /*************************************************************/ 7184 /* WAIT FOR RECEIVER TO DMA ALL PARAMETERS FROM BUFFER ENTRY */ 7185 /*************************************************************/ 7186 7187 /* Wait 100ms for interrupt. */ 7188 EndTime = jiffies + msecs_to_jiffies(100); 7189 7190 for(;;) { 7191 if (time_after(jiffies, EndTime)) { 7192 rc = false; 7193 break; 7194 } 7195 7196 spin_lock_irqsave(&info->irq_spinlock,flags); 7197 status = usc_InDmaReg( info, RDMR ); 7198 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7199 7200 if ( !(status & BIT4) && (status & BIT5) ) { 7201 /* INITG (BIT 4) is inactive (no entry read in progress) AND */ 7202 /* BUSY (BIT 5) is active (channel still active). */ 7203 /* This means the buffer entry read has completed. */ 7204 break; 7205 } 7206 } 7207 7208 7209 /******************************/ 7210 /* Program 16C32 transmitter. */ 7211 /******************************/ 7212 7213 spin_lock_irqsave(&info->irq_spinlock,flags); 7214 7215 /* Program the Transmit Character Length Register (TCLR) */ 7216 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */ 7217 7218 usc_OutReg( info, TCLR, (unsigned short)info->tx_buffer_list[0].count ); 7219 usc_RTCmd( info, RTCmd_PurgeTxFifo ); 7220 7221 /* Program the address of the 1st DMA Buffer Entry in linked list */ 7222 7223 phys_addr = info->tx_buffer_list[0].phys_entry; 7224 usc_OutDmaReg( info, NTARL, (unsigned short)phys_addr ); 7225 usc_OutDmaReg( info, NTARU, (unsigned short)(phys_addr >> 16) ); 7226 7227 /* unlatch Tx status bits, and start transmit channel. */ 7228 7229 usc_OutReg( info, TCSR, (unsigned short)(( usc_InReg(info, TCSR) & 0x0f00) | 0xfa) ); 7230 usc_DmaCmd( info, DmaCmd_InitTxChannel ); 7231 7232 /* wait for DMA controller to fill transmit FIFO */ 7233 7234 usc_TCmd( info, TCmd_SelectTicrTxFifostatus ); 7235 7236 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7237 7238 7239 /**********************************/ 7240 /* WAIT FOR TRANSMIT FIFO TO FILL */ 7241 /**********************************/ 7242 7243 /* Wait 100ms */ 7244 EndTime = jiffies + msecs_to_jiffies(100); 7245 7246 for(;;) { 7247 if (time_after(jiffies, EndTime)) { 7248 rc = false; 7249 break; 7250 } 7251 7252 spin_lock_irqsave(&info->irq_spinlock,flags); 7253 FifoLevel = usc_InReg(info, TICR) >> 8; 7254 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7255 7256 if ( FifoLevel < 16 ) 7257 break; 7258 else 7259 if ( FrameSize < 32 ) { 7260 /* This frame is smaller than the entire transmit FIFO */ 7261 /* so wait for the entire frame to be loaded. */ 7262 if ( FifoLevel <= (32 - FrameSize) ) 7263 break; 7264 } 7265 } 7266 7267 7268 if ( rc ) 7269 { 7270 /* Enable 16C32 transmitter. */ 7271 7272 spin_lock_irqsave(&info->irq_spinlock,flags); 7273 7274 /* Transmit mode Register (TMR), <1..0> = 10, Enable Transmitter */ 7275 usc_TCmd( info, TCmd_SendFrame ); 7276 usc_OutReg( info, TMR, (unsigned short)((usc_InReg(info, TMR) & 0xfffc) | 0x0002) ); 7277 7278 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7279 7280 7281 /******************************/ 7282 /* WAIT FOR TRANSMIT COMPLETE */ 7283 /******************************/ 7284 7285 /* Wait 100ms */ 7286 EndTime = jiffies + msecs_to_jiffies(100); 7287 7288 /* While timer not expired wait for transmit complete */ 7289 7290 spin_lock_irqsave(&info->irq_spinlock,flags); 7291 status = usc_InReg( info, TCSR ); 7292 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7293 7294 while ( !(status & (BIT6+BIT5+BIT4+BIT2+BIT1)) ) { 7295 if (time_after(jiffies, EndTime)) { 7296 rc = false; 7297 break; 7298 } 7299 7300 spin_lock_irqsave(&info->irq_spinlock,flags); 7301 status = usc_InReg( info, TCSR ); 7302 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7303 } 7304 } 7305 7306 7307 if ( rc ){ 7308 /* CHECK FOR TRANSMIT ERRORS */ 7309 if ( status & (BIT5 + BIT1) ) 7310 rc = false; 7311 } 7312 7313 if ( rc ) { 7314 /* WAIT FOR RECEIVE COMPLETE */ 7315 7316 /* Wait 100ms */ 7317 EndTime = jiffies + msecs_to_jiffies(100); 7318 7319 /* Wait for 16C32 to write receive status to buffer entry. */ 7320 status=info->rx_buffer_list[0].status; 7321 while ( status == 0 ) { 7322 if (time_after(jiffies, EndTime)) { 7323 rc = false; 7324 break; 7325 } 7326 status=info->rx_buffer_list[0].status; 7327 } 7328 } 7329 7330 7331 if ( rc ) { 7332 /* CHECK FOR RECEIVE ERRORS */ 7333 status = info->rx_buffer_list[0].status; 7334 7335 if ( status & (BIT8 + BIT3 + BIT1) ) { 7336 /* receive error has occurred */ 7337 rc = false; 7338 } else { 7339 if ( memcmp( info->tx_buffer_list[0].virt_addr , 7340 info->rx_buffer_list[0].virt_addr, FrameSize ) ){ 7341 rc = false; 7342 } 7343 } 7344 } 7345 7346 spin_lock_irqsave(&info->irq_spinlock,flags); 7347 usc_reset( info ); 7348 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7349 7350 /* restore current port options */ 7351 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS)); 7352 7353 return rc; 7354 7355} /* end of mgsl_dma_test() */ 7356 7357/* mgsl_adapter_test() 7358 * 7359 * Perform the register, IRQ, and DMA tests for the 16C32. 7360 * 7361 * Arguments: info pointer to device instance data 7362 * Return Value: 0 if success, otherwise -ENODEV 7363 */ 7364static int mgsl_adapter_test( struct mgsl_struct *info ) 7365{ 7366 if ( debug_level >= DEBUG_LEVEL_INFO ) 7367 printk( "%s(%d):Testing device %s\n", 7368 __FILE__,__LINE__,info->device_name ); 7369 7370 if ( !mgsl_register_test( info ) ) { 7371 info->init_error = DiagStatus_AddressFailure; 7372 printk( "%s(%d):Register test failure for device %s Addr=%04X\n", 7373 __FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) ); 7374 return -ENODEV; 7375 } 7376 7377 if ( !mgsl_irq_test( info ) ) { 7378 info->init_error = DiagStatus_IrqFailure; 7379 printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n", 7380 __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) ); 7381 return -ENODEV; 7382 } 7383 7384 if ( !mgsl_dma_test( info ) ) { 7385 info->init_error = DiagStatus_DmaFailure; 7386 printk( "%s(%d):DMA test failure for device %s DMA=%d\n", 7387 __FILE__,__LINE__,info->device_name, (unsigned short)(info->dma_level) ); 7388 return -ENODEV; 7389 } 7390 7391 if ( debug_level >= DEBUG_LEVEL_INFO ) 7392 printk( "%s(%d):device %s passed diagnostics\n", 7393 __FILE__,__LINE__,info->device_name ); 7394 7395 return 0; 7396 7397} /* end of mgsl_adapter_test() */ 7398 7399/* mgsl_memory_test() 7400 * 7401 * Test the shared memory on a PCI adapter. 7402 * 7403 * Arguments: info pointer to device instance data 7404 * Return Value: true if test passed, otherwise false 7405 */ 7406static bool mgsl_memory_test( struct mgsl_struct *info ) 7407{ 7408 static unsigned long BitPatterns[] = 7409 { 0x0, 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999, 0xffffffff, 0x12345678 }; 7410 unsigned long Patterncount = ARRAY_SIZE(BitPatterns); 7411 unsigned long i; 7412 unsigned long TestLimit = SHARED_MEM_ADDRESS_SIZE/sizeof(unsigned long); 7413 unsigned long * TestAddr; 7414 7415 if ( info->bus_type != MGSL_BUS_TYPE_PCI ) 7416 return true; 7417 7418 TestAddr = (unsigned long *)info->memory_base; 7419 7420 /* Test data lines with test pattern at one location. */ 7421 7422 for ( i = 0 ; i < Patterncount ; i++ ) { 7423 *TestAddr = BitPatterns[i]; 7424 if ( *TestAddr != BitPatterns[i] ) 7425 return false; 7426 } 7427 7428 /* Test address lines with incrementing pattern over */ 7429 /* entire address range. */ 7430 7431 for ( i = 0 ; i < TestLimit ; i++ ) { 7432 *TestAddr = i * 4; 7433 TestAddr++; 7434 } 7435 7436 TestAddr = (unsigned long *)info->memory_base; 7437 7438 for ( i = 0 ; i < TestLimit ; i++ ) { 7439 if ( *TestAddr != i * 4 ) 7440 return false; 7441 TestAddr++; 7442 } 7443 7444 memset( info->memory_base, 0, SHARED_MEM_ADDRESS_SIZE ); 7445 7446 return true; 7447 7448} /* End Of mgsl_memory_test() */ 7449 7450 7451/* mgsl_load_pci_memory() 7452 * 7453 * Load a large block of data into the PCI shared memory. 7454 * Use this instead of memcpy() or memmove() to move data 7455 * into the PCI shared memory. 7456 * 7457 * Notes: 7458 * 7459 * This function prevents the PCI9050 interface chip from hogging 7460 * the adapter local bus, which can starve the 16C32 by preventing 7461 * 16C32 bus master cycles. 7462 * 7463 * The PCI9050 documentation says that the 9050 will always release 7464 * control of the local bus after completing the current read 7465 * or write operation. 7466 * 7467 * It appears that as long as the PCI9050 write FIFO is full, the 7468 * PCI9050 treats all of the writes as a single burst transaction 7469 * and will not release the bus. This causes DMA latency problems 7470 * at high speeds when copying large data blocks to the shared 7471 * memory. 7472 * 7473 * This function in effect, breaks the a large shared memory write 7474 * into multiple transations by interleaving a shared memory read 7475 * which will flush the write FIFO and 'complete' the write 7476 * transation. This allows any pending DMA request to gain control 7477 * of the local bus in a timely fasion. 7478 * 7479 * Arguments: 7480 * 7481 * TargetPtr pointer to target address in PCI shared memory 7482 * SourcePtr pointer to source buffer for data 7483 * count count in bytes of data to copy 7484 * 7485 * Return Value: None 7486 */ 7487static void mgsl_load_pci_memory( char* TargetPtr, const char* SourcePtr, 7488 unsigned short count ) 7489{ 7490 /* 16 32-bit writes @ 60ns each = 960ns max latency on local bus */ 7491#define PCI_LOAD_INTERVAL 64 7492 7493 unsigned short Intervalcount = count / PCI_LOAD_INTERVAL; 7494 unsigned short Index; 7495 unsigned long Dummy; 7496 7497 for ( Index = 0 ; Index < Intervalcount ; Index++ ) 7498 { 7499 memcpy(TargetPtr, SourcePtr, PCI_LOAD_INTERVAL); 7500 Dummy = *((volatile unsigned long *)TargetPtr); 7501 TargetPtr += PCI_LOAD_INTERVAL; 7502 SourcePtr += PCI_LOAD_INTERVAL; 7503 } 7504 7505 memcpy( TargetPtr, SourcePtr, count % PCI_LOAD_INTERVAL ); 7506 7507} /* End Of mgsl_load_pci_memory() */ 7508 7509static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit) 7510{ 7511 int i; 7512 int linecount; 7513 if (xmit) 7514 printk("%s tx data:\n",info->device_name); 7515 else 7516 printk("%s rx data:\n",info->device_name); 7517 7518 while(count) { 7519 if (count > 16) 7520 linecount = 16; 7521 else 7522 linecount = count; 7523 7524 for(i=0;i<linecount;i++) 7525 printk("%02X ",(unsigned char)data[i]); 7526 for(;i<17;i++) 7527 printk(" "); 7528 for(i=0;i<linecount;i++) { 7529 if (data[i]>=040 && data[i]<=0176) 7530 printk("%c",data[i]); 7531 else 7532 printk("."); 7533 } 7534 printk("\n"); 7535 7536 data += linecount; 7537 count -= linecount; 7538 } 7539} /* end of mgsl_trace_block() */ 7540 7541/* mgsl_tx_timeout() 7542 * 7543 * called when HDLC frame times out 7544 * update stats and do tx completion processing 7545 * 7546 * Arguments: context pointer to device instance data 7547 * Return Value: None 7548 */ 7549static void mgsl_tx_timeout(unsigned long context) 7550{ 7551 struct mgsl_struct *info = (struct mgsl_struct*)context; 7552 unsigned long flags; 7553 7554 if ( debug_level >= DEBUG_LEVEL_INFO ) 7555 printk( "%s(%d):mgsl_tx_timeout(%s)\n", 7556 __FILE__,__LINE__,info->device_name); 7557 if(info->tx_active && 7558 (info->params.mode == MGSL_MODE_HDLC || 7559 info->params.mode == MGSL_MODE_RAW) ) { 7560 info->icount.txtimeout++; 7561 } 7562 spin_lock_irqsave(&info->irq_spinlock,flags); 7563 info->tx_active = false; 7564 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; 7565 7566 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE ) 7567 usc_loopmode_cancel_transmit( info ); 7568 7569 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7570 7571#if SYNCLINK_GENERIC_HDLC 7572 if (info->netcount) 7573 hdlcdev_tx_done(info); 7574 else 7575#endif 7576 mgsl_bh_transmit(info); 7577 7578} /* end of mgsl_tx_timeout() */ 7579 7580/* signal that there are no more frames to send, so that 7581 * line is 'released' by echoing RxD to TxD when current 7582 * transmission is complete (or immediately if no tx in progress). 7583 */ 7584static int mgsl_loopmode_send_done( struct mgsl_struct * info ) 7585{ 7586 unsigned long flags; 7587 7588 spin_lock_irqsave(&info->irq_spinlock,flags); 7589 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) { 7590 if (info->tx_active) 7591 info->loopmode_send_done_requested = true; 7592 else 7593 usc_loopmode_send_done(info); 7594 } 7595 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7596 7597 return 0; 7598} 7599 7600/* release the line by echoing RxD to TxD 7601 * upon completion of a transmit frame 7602 */ 7603static void usc_loopmode_send_done( struct mgsl_struct * info ) 7604{ 7605 info->loopmode_send_done_requested = false; 7606 /* clear CMR:13 to 0 to start echoing RxData to TxData */ 7607 info->cmr_value &= ~BIT13; 7608 usc_OutReg(info, CMR, info->cmr_value); 7609} 7610 7611/* abort a transmit in progress while in HDLC LoopMode 7612 */ 7613static void usc_loopmode_cancel_transmit( struct mgsl_struct * info ) 7614{ 7615 /* reset tx dma channel and purge TxFifo */ 7616 usc_RTCmd( info, RTCmd_PurgeTxFifo ); 7617 usc_DmaCmd( info, DmaCmd_ResetTxChannel ); 7618 usc_loopmode_send_done( info ); 7619} 7620 7621/* for HDLC/SDLC LoopMode, setting CMR:13 after the transmitter is enabled 7622 * is an Insert Into Loop action. Upon receipt of a GoAhead sequence (RxAbort) 7623 * we must clear CMR:13 to begin repeating TxData to RxData 7624 */ 7625static void usc_loopmode_insert_request( struct mgsl_struct * info ) 7626{ 7627 info->loopmode_insert_requested = true; 7628 7629 /* enable RxAbort irq. On next RxAbort, clear CMR:13 to 7630 * begin repeating TxData on RxData (complete insertion) 7631 */ 7632 usc_OutReg( info, RICR, 7633 (usc_InReg( info, RICR ) | RXSTATUS_ABORT_RECEIVED ) ); 7634 7635 /* set CMR:13 to insert into loop on next GoAhead (RxAbort) */ 7636 info->cmr_value |= BIT13; 7637 usc_OutReg(info, CMR, info->cmr_value); 7638} 7639 7640/* return 1 if station is inserted into the loop, otherwise 0 7641 */ 7642static int usc_loopmode_active( struct mgsl_struct * info) 7643{ 7644 return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ; 7645} 7646 7647#if SYNCLINK_GENERIC_HDLC 7648 7649/** 7650 * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.) 7651 * set encoding and frame check sequence (FCS) options 7652 * 7653 * dev pointer to network device structure 7654 * encoding serial encoding setting 7655 * parity FCS setting 7656 * 7657 * returns 0 if success, otherwise error code 7658 */ 7659static int hdlcdev_attach(struct net_device *dev, unsigned short encoding, 7660 unsigned short parity) 7661{ 7662 struct mgsl_struct *info = dev_to_port(dev); 7663 unsigned char new_encoding; 7664 unsigned short new_crctype; 7665 7666 /* return error if TTY interface open */ 7667 if (info->port.count) 7668 return -EBUSY; 7669 7670 switch (encoding) 7671 { 7672 case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break; 7673 case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break; 7674 case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break; 7675 case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break; 7676 case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break; 7677 default: return -EINVAL; 7678 } 7679 7680 switch (parity) 7681 { 7682 case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break; 7683 case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break; 7684 case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break; 7685 default: return -EINVAL; 7686 } 7687 7688 info->params.encoding = new_encoding; 7689 info->params.crc_type = new_crctype; 7690 7691 /* if network interface up, reprogram hardware */ 7692 if (info->netcount) 7693 mgsl_program_hw(info); 7694 7695 return 0; 7696} 7697 7698/** 7699 * called by generic HDLC layer to send frame 7700 * 7701 * skb socket buffer containing HDLC frame 7702 * dev pointer to network device structure 7703 */ 7704static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb, 7705 struct net_device *dev) 7706{ 7707 struct mgsl_struct *info = dev_to_port(dev); 7708 unsigned long flags; 7709 7710 if (debug_level >= DEBUG_LEVEL_INFO) 7711 printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name); 7712 7713 /* stop sending until this frame completes */ 7714 netif_stop_queue(dev); 7715 7716 /* copy data to device buffers */ 7717 info->xmit_cnt = skb->len; 7718 mgsl_load_tx_dma_buffer(info, skb->data, skb->len); 7719 7720 /* update network statistics */ 7721 dev->stats.tx_packets++; 7722 dev->stats.tx_bytes += skb->len; 7723 7724 /* done with socket buffer, so free it */ 7725 dev_kfree_skb(skb); 7726 7727 /* save start time for transmit timeout detection */ 7728 dev->trans_start = jiffies; 7729 7730 /* start hardware transmitter if necessary */ 7731 spin_lock_irqsave(&info->irq_spinlock,flags); 7732 if (!info->tx_active) 7733 usc_start_transmitter(info); 7734 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7735 7736 return NETDEV_TX_OK; 7737} 7738 7739/** 7740 * called by network layer when interface enabled 7741 * claim resources and initialize hardware 7742 * 7743 * dev pointer to network device structure 7744 * 7745 * returns 0 if success, otherwise error code 7746 */ 7747static int hdlcdev_open(struct net_device *dev) 7748{ 7749 struct mgsl_struct *info = dev_to_port(dev); 7750 int rc; 7751 unsigned long flags; 7752 7753 if (debug_level >= DEBUG_LEVEL_INFO) 7754 printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name); 7755 7756 /* generic HDLC layer open processing */ 7757 if ((rc = hdlc_open(dev))) 7758 return rc; 7759 7760 /* arbitrate between network and tty opens */ 7761 spin_lock_irqsave(&info->netlock, flags); 7762 if (info->port.count != 0 || info->netcount != 0) { 7763 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name); 7764 spin_unlock_irqrestore(&info->netlock, flags); 7765 return -EBUSY; 7766 } 7767 info->netcount=1; 7768 spin_unlock_irqrestore(&info->netlock, flags); 7769 7770 /* claim resources and init adapter */ 7771 if ((rc = startup(info)) != 0) { 7772 spin_lock_irqsave(&info->netlock, flags); 7773 info->netcount=0; 7774 spin_unlock_irqrestore(&info->netlock, flags); 7775 return rc; 7776 } 7777 7778 /* assert DTR and RTS, apply hardware settings */ 7779 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; 7780 mgsl_program_hw(info); 7781 7782 /* enable network layer transmit */ 7783 dev->trans_start = jiffies; 7784 netif_start_queue(dev); 7785 7786 /* inform generic HDLC layer of current DCD status */ 7787 spin_lock_irqsave(&info->irq_spinlock, flags); 7788 usc_get_serial_signals(info); 7789 spin_unlock_irqrestore(&info->irq_spinlock, flags); 7790 if (info->serial_signals & SerialSignal_DCD) 7791 netif_carrier_on(dev); 7792 else 7793 netif_carrier_off(dev); 7794 return 0; 7795} 7796 7797/** 7798 * called by network layer when interface is disabled 7799 * shutdown hardware and release resources 7800 * 7801 * dev pointer to network device structure 7802 * 7803 * returns 0 if success, otherwise error code 7804 */ 7805static int hdlcdev_close(struct net_device *dev) 7806{ 7807 struct mgsl_struct *info = dev_to_port(dev); 7808 unsigned long flags; 7809 7810 if (debug_level >= DEBUG_LEVEL_INFO) 7811 printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name); 7812 7813 netif_stop_queue(dev); 7814 7815 /* shutdown adapter and release resources */ 7816 shutdown(info); 7817 7818 hdlc_close(dev); 7819 7820 spin_lock_irqsave(&info->netlock, flags); 7821 info->netcount=0; 7822 spin_unlock_irqrestore(&info->netlock, flags); 7823 7824 return 0; 7825} 7826 7827/** 7828 * called by network layer to process IOCTL call to network device 7829 * 7830 * dev pointer to network device structure 7831 * ifr pointer to network interface request structure 7832 * cmd IOCTL command code 7833 * 7834 * returns 0 if success, otherwise error code 7835 */ 7836static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 7837{ 7838 const size_t size = sizeof(sync_serial_settings); 7839 sync_serial_settings new_line; 7840 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync; 7841 struct mgsl_struct *info = dev_to_port(dev); 7842 unsigned int flags; 7843 7844 if (debug_level >= DEBUG_LEVEL_INFO) 7845 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name); 7846 7847 /* return error if TTY interface open */ 7848 if (info->port.count) 7849 return -EBUSY; 7850 7851 if (cmd != SIOCWANDEV) 7852 return hdlc_ioctl(dev, ifr, cmd); 7853 7854 switch(ifr->ifr_settings.type) { 7855 case IF_GET_IFACE: /* return current sync_serial_settings */ 7856 7857 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL; 7858 if (ifr->ifr_settings.size < size) { 7859 ifr->ifr_settings.size = size; /* data size wanted */ 7860 return -ENOBUFS; 7861 } 7862 7863 flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | 7864 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | 7865 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | 7866 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); 7867 7868 switch (flags){ 7869 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break; 7870 case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break; 7871 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break; 7872 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break; 7873 default: new_line.clock_type = CLOCK_DEFAULT; 7874 } 7875 7876 new_line.clock_rate = info->params.clock_speed; 7877 new_line.loopback = info->params.loopback ? 1:0; 7878 7879 if (copy_to_user(line, &new_line, size)) 7880 return -EFAULT; 7881 return 0; 7882 7883 case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */ 7884 7885 if(!capable(CAP_NET_ADMIN)) 7886 return -EPERM; 7887 if (copy_from_user(&new_line, line, size)) 7888 return -EFAULT; 7889 7890 switch (new_line.clock_type) 7891 { 7892 case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break; 7893 case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break; 7894 case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break; 7895 case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break; 7896 case CLOCK_DEFAULT: flags = info->params.flags & 7897 (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | 7898 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | 7899 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | 7900 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break; 7901 default: return -EINVAL; 7902 } 7903 7904 if (new_line.loopback != 0 && new_line.loopback != 1) 7905 return -EINVAL; 7906 7907 info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | 7908 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | 7909 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | 7910 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); 7911 info->params.flags |= flags; 7912 7913 info->params.loopback = new_line.loopback; 7914 7915 if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG)) 7916 info->params.clock_speed = new_line.clock_rate; 7917 else 7918 info->params.clock_speed = 0; 7919 7920 /* if network interface up, reprogram hardware */ 7921 if (info->netcount) 7922 mgsl_program_hw(info); 7923 return 0; 7924 7925 default: 7926 return hdlc_ioctl(dev, ifr, cmd); 7927 } 7928} 7929 7930/** 7931 * called by network layer when transmit timeout is detected 7932 * 7933 * dev pointer to network device structure 7934 */ 7935static void hdlcdev_tx_timeout(struct net_device *dev) 7936{ 7937 struct mgsl_struct *info = dev_to_port(dev); 7938 unsigned long flags; 7939 7940 if (debug_level >= DEBUG_LEVEL_INFO) 7941 printk("hdlcdev_tx_timeout(%s)\n",dev->name); 7942 7943 dev->stats.tx_errors++; 7944 dev->stats.tx_aborted_errors++; 7945 7946 spin_lock_irqsave(&info->irq_spinlock,flags); 7947 usc_stop_transmitter(info); 7948 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7949 7950 netif_wake_queue(dev); 7951} 7952 7953/** 7954 * called by device driver when transmit completes 7955 * reenable network layer transmit if stopped 7956 * 7957 * info pointer to device instance information 7958 */ 7959static void hdlcdev_tx_done(struct mgsl_struct *info) 7960{ 7961 if (netif_queue_stopped(info->netdev)) 7962 netif_wake_queue(info->netdev); 7963} 7964 7965/** 7966 * called by device driver when frame received 7967 * pass frame to network layer 7968 * 7969 * info pointer to device instance information 7970 * buf pointer to buffer contianing frame data 7971 * size count of data bytes in buf 7972 */ 7973static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size) 7974{ 7975 struct sk_buff *skb = dev_alloc_skb(size); 7976 struct net_device *dev = info->netdev; 7977 7978 if (debug_level >= DEBUG_LEVEL_INFO) 7979 printk("hdlcdev_rx(%s)\n", dev->name); 7980 7981 if (skb == NULL) { 7982 printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", 7983 dev->name); 7984 dev->stats.rx_dropped++; 7985 return; 7986 } 7987 7988 memcpy(skb_put(skb, size), buf, size); 7989 7990 skb->protocol = hdlc_type_trans(skb, dev); 7991 7992 dev->stats.rx_packets++; 7993 dev->stats.rx_bytes += size; 7994 7995 netif_rx(skb); 7996} 7997 7998static const struct net_device_ops hdlcdev_ops = { 7999 .ndo_open = hdlcdev_open, 8000 .ndo_stop = hdlcdev_close, 8001 .ndo_change_mtu = hdlc_change_mtu, 8002 .ndo_start_xmit = hdlc_start_xmit, 8003 .ndo_do_ioctl = hdlcdev_ioctl, 8004 .ndo_tx_timeout = hdlcdev_tx_timeout, 8005}; 8006 8007/** 8008 * called by device driver when adding device instance 8009 * do generic HDLC initialization 8010 * 8011 * info pointer to device instance information 8012 * 8013 * returns 0 if success, otherwise error code 8014 */ 8015static int hdlcdev_init(struct mgsl_struct *info) 8016{ 8017 int rc; 8018 struct net_device *dev; 8019 hdlc_device *hdlc; 8020 8021 /* allocate and initialize network and HDLC layer objects */ 8022 8023 if (!(dev = alloc_hdlcdev(info))) { 8024 printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__); 8025 return -ENOMEM; 8026 } 8027 8028 /* for network layer reporting purposes only */ 8029 dev->base_addr = info->io_base; 8030 dev->irq = info->irq_level; 8031 dev->dma = info->dma_level; 8032 8033 /* network layer callbacks and settings */ 8034 dev->netdev_ops = &hdlcdev_ops; 8035 dev->watchdog_timeo = 10 * HZ; 8036 dev->tx_queue_len = 50; 8037 8038 /* generic HDLC layer callbacks and settings */ 8039 hdlc = dev_to_hdlc(dev); 8040 hdlc->attach = hdlcdev_attach; 8041 hdlc->xmit = hdlcdev_xmit; 8042 8043 /* register objects with HDLC layer */ 8044 if ((rc = register_hdlc_device(dev))) { 8045 printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__); 8046 free_netdev(dev); 8047 return rc; 8048 } 8049 8050 info->netdev = dev; 8051 return 0; 8052} 8053 8054/** 8055 * called by device driver when removing device instance 8056 * do generic HDLC cleanup 8057 * 8058 * info pointer to device instance information 8059 */ 8060static void hdlcdev_exit(struct mgsl_struct *info) 8061{ 8062 unregister_hdlc_device(info->netdev); 8063 free_netdev(info->netdev); 8064 info->netdev = NULL; 8065} 8066 8067#endif /* CONFIG_HDLC */ 8068 8069 8070static int __devinit synclink_init_one (struct pci_dev *dev, 8071 const struct pci_device_id *ent) 8072{ 8073 struct mgsl_struct *info; 8074 8075 if (pci_enable_device(dev)) { 8076 printk("error enabling pci device %p\n", dev); 8077 return -EIO; 8078 } 8079 8080 if (!(info = mgsl_allocate_device())) { 8081 printk("can't allocate device instance data.\n"); 8082 return -EIO; 8083 } 8084 8085 /* Copy user configuration info to device instance data */ 8086 8087 info->io_base = pci_resource_start(dev, 2); 8088 info->irq_level = dev->irq; 8089 info->phys_memory_base = pci_resource_start(dev, 3); 8090 8091 /* Because veremap only works on page boundaries we must map 8092 * a larger area than is actually implemented for the LCR 8093 * memory range. We map a full page starting at the page boundary. 8094 */ 8095 info->phys_lcr_base = pci_resource_start(dev, 0); 8096 info->lcr_offset = info->phys_lcr_base & (PAGE_SIZE-1); 8097 info->phys_lcr_base &= ~(PAGE_SIZE-1); 8098 8099 info->bus_type = MGSL_BUS_TYPE_PCI; 8100 info->io_addr_size = 8; 8101 info->irq_flags = IRQF_SHARED; 8102 8103 if (dev->device == 0x0210) { 8104 /* Version 1 PCI9030 based universal PCI adapter */ 8105 info->misc_ctrl_value = 0x007c4080; 8106 info->hw_version = 1; 8107 } else { 8108 /* Version 0 PCI9050 based 5V PCI adapter 8109 * A PCI9050 bug prevents reading LCR registers if 8110 * LCR base address bit 7 is set. Maintain shadow 8111 * value so we can write to LCR misc control reg. 8112 */ 8113 info->misc_ctrl_value = 0x087e4546; 8114 info->hw_version = 0; 8115 } 8116 8117 mgsl_add_device(info); 8118 8119 return 0; 8120} 8121 8122static void __devexit synclink_remove_one (struct pci_dev *dev) 8123{ 8124} 8125