Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.16-rc2 1449 lines 37 kB view raw
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * Driver for high-speed SCC boards (those with DMA support) 4 * Copyright (C) 1997-2000 Klaus Kudielka 5 * 6 * S5SCC/DMA support by Janko Koleznik S52HI 7 */ 8 9 10#include <linux/module.h> 11#include <linux/bitops.h> 12#include <linux/delay.h> 13#include <linux/errno.h> 14#include <linux/if_arp.h> 15#include <linux/in.h> 16#include <linux/init.h> 17#include <linux/interrupt.h> 18#include <linux/ioport.h> 19#include <linux/kernel.h> 20#include <linux/mm.h> 21#include <linux/netdevice.h> 22#include <linux/slab.h> 23#include <linux/rtnetlink.h> 24#include <linux/sockios.h> 25#include <linux/workqueue.h> 26#include <linux/atomic.h> 27#include <asm/dma.h> 28#include <asm/io.h> 29#include <asm/irq.h> 30#include <linux/uaccess.h> 31#include <net/ax25.h> 32#include "z8530.h" 33 34 35/* Number of buffers per channel */ 36 37#define NUM_TX_BUF 2 /* NUM_TX_BUF >= 1 (min. 2 recommended) */ 38#define NUM_RX_BUF 6 /* NUM_RX_BUF >= 1 (min. 2 recommended) */ 39#define BUF_SIZE 1576 /* BUF_SIZE >= mtu + hard_header_len */ 40 41 42/* Cards supported */ 43 44#define HW_PI { "Ottawa PI", 0x300, 0x20, 0x10, 8, \ 45 0, 8, 1843200, 3686400 } 46#define HW_PI2 { "Ottawa PI2", 0x300, 0x20, 0x10, 8, \ 47 0, 8, 3686400, 7372800 } 48#define HW_TWIN { "Gracilis PackeTwin", 0x200, 0x10, 0x10, 32, \ 49 0, 4, 6144000, 6144000 } 50#define HW_S5 { "S5SCC/DMA", 0x200, 0x10, 0x10, 32, \ 51 0, 8, 4915200, 9830400 } 52 53#define HARDWARE { HW_PI, HW_PI2, HW_TWIN, HW_S5 } 54 55#define TMR_0_HZ 25600 /* Frequency of timer 0 */ 56 57#define TYPE_PI 0 58#define TYPE_PI2 1 59#define TYPE_TWIN 2 60#define TYPE_S5 3 61#define NUM_TYPES 4 62 63#define MAX_NUM_DEVS 32 64 65 66/* SCC chips supported */ 67 68#define Z8530 0 69#define Z85C30 1 70#define Z85230 2 71 72#define CHIPNAMES { "Z8530", "Z85C30", "Z85230" } 73 74 75/* I/O registers */ 76 77/* 8530 registers relative to card base */ 78#define SCCB_CMD 0x00 79#define SCCB_DATA 0x01 80#define SCCA_CMD 0x02 81#define SCCA_DATA 0x03 82 83/* 8253/8254 registers relative to card base */ 84#define TMR_CNT0 0x00 85#define TMR_CNT1 0x01 86#define TMR_CNT2 0x02 87#define TMR_CTRL 0x03 88 89/* Additional PI/PI2 registers relative to card base */ 90#define PI_DREQ_MASK 0x04 91 92/* Additional PackeTwin registers relative to card base */ 93#define TWIN_INT_REG 0x08 94#define TWIN_CLR_TMR1 0x09 95#define TWIN_CLR_TMR2 0x0a 96#define TWIN_SPARE_1 0x0b 97#define TWIN_DMA_CFG 0x08 98#define TWIN_SERIAL_CFG 0x09 99#define TWIN_DMA_CLR_FF 0x0a 100#define TWIN_SPARE_2 0x0b 101 102 103/* PackeTwin I/O register values */ 104 105/* INT_REG */ 106#define TWIN_SCC_MSK 0x01 107#define TWIN_TMR1_MSK 0x02 108#define TWIN_TMR2_MSK 0x04 109#define TWIN_INT_MSK 0x07 110 111/* SERIAL_CFG */ 112#define TWIN_DTRA_ON 0x01 113#define TWIN_DTRB_ON 0x02 114#define TWIN_EXTCLKA 0x04 115#define TWIN_EXTCLKB 0x08 116#define TWIN_LOOPA_ON 0x10 117#define TWIN_LOOPB_ON 0x20 118#define TWIN_EI 0x80 119 120/* DMA_CFG */ 121#define TWIN_DMA_HDX_T1 0x08 122#define TWIN_DMA_HDX_R1 0x0a 123#define TWIN_DMA_HDX_T3 0x14 124#define TWIN_DMA_HDX_R3 0x16 125#define TWIN_DMA_FDX_T3R1 0x1b 126#define TWIN_DMA_FDX_T1R3 0x1d 127 128 129/* Status values */ 130 131#define IDLE 0 132#define TX_HEAD 1 133#define TX_DATA 2 134#define TX_PAUSE 3 135#define TX_TAIL 4 136#define RTS_OFF 5 137#define WAIT 6 138#define DCD_ON 7 139#define RX_ON 8 140#define DCD_OFF 9 141 142 143/* Ioctls */ 144 145#define SIOCGSCCPARAM SIOCDEVPRIVATE 146#define SIOCSSCCPARAM (SIOCDEVPRIVATE+1) 147 148 149/* Data types */ 150 151struct scc_param { 152 int pclk_hz; /* frequency of BRG input (don't change) */ 153 int brg_tc; /* BRG terminal count; BRG disabled if < 0 */ 154 int nrzi; /* 0 (nrz), 1 (nrzi) */ 155 int clocks; /* see dmascc_cfg documentation */ 156 int txdelay; /* [1/TMR_0_HZ] */ 157 int txtimeout; /* [1/HZ] */ 158 int txtail; /* [1/TMR_0_HZ] */ 159 int waittime; /* [1/TMR_0_HZ] */ 160 int slottime; /* [1/TMR_0_HZ] */ 161 int persist; /* 1 ... 256 */ 162 int dma; /* -1 (disable), 0, 1, 3 */ 163 int txpause; /* [1/TMR_0_HZ] */ 164 int rtsoff; /* [1/TMR_0_HZ] */ 165 int dcdon; /* [1/TMR_0_HZ] */ 166 int dcdoff; /* [1/TMR_0_HZ] */ 167}; 168 169struct scc_hardware { 170 char *name; 171 int io_region; 172 int io_delta; 173 int io_size; 174 int num_devs; 175 int scc_offset; 176 int tmr_offset; 177 int tmr_hz; 178 int pclk_hz; 179}; 180 181struct scc_priv { 182 int type; 183 int chip; 184 struct net_device *dev; 185 struct scc_info *info; 186 187 int channel; 188 int card_base, scc_cmd, scc_data; 189 int tmr_cnt, tmr_ctrl, tmr_mode; 190 struct scc_param param; 191 char rx_buf[NUM_RX_BUF][BUF_SIZE]; 192 int rx_len[NUM_RX_BUF]; 193 int rx_ptr; 194 struct work_struct rx_work; 195 int rx_head, rx_tail, rx_count; 196 int rx_over; 197 char tx_buf[NUM_TX_BUF][BUF_SIZE]; 198 int tx_len[NUM_TX_BUF]; 199 int tx_ptr; 200 int tx_head, tx_tail, tx_count; 201 int state; 202 unsigned long tx_start; 203 int rr0; 204 spinlock_t *register_lock; /* Per scc_info */ 205 spinlock_t ring_lock; 206}; 207 208struct scc_info { 209 int irq_used; 210 int twin_serial_cfg; 211 struct net_device *dev[2]; 212 struct scc_priv priv[2]; 213 struct scc_info *next; 214 spinlock_t register_lock; /* Per device register lock */ 215}; 216 217 218/* Function declarations */ 219static int setup_adapter(int card_base, int type, int n) __init; 220 221static void write_scc(struct scc_priv *priv, int reg, int val); 222static void write_scc_data(struct scc_priv *priv, int val, int fast); 223static int read_scc(struct scc_priv *priv, int reg); 224static int read_scc_data(struct scc_priv *priv); 225 226static int scc_open(struct net_device *dev); 227static int scc_close(struct net_device *dev); 228static int scc_siocdevprivate(struct net_device *dev, struct ifreq *ifr, 229 void __user *data, int cmd); 230static int scc_send_packet(struct sk_buff *skb, struct net_device *dev); 231static int scc_set_mac_address(struct net_device *dev, void *sa); 232 233static inline void tx_on(struct scc_priv *priv); 234static inline void rx_on(struct scc_priv *priv); 235static inline void rx_off(struct scc_priv *priv); 236static void start_timer(struct scc_priv *priv, int t, int r15); 237static inline unsigned char random(void); 238 239static inline void z8530_isr(struct scc_info *info); 240static irqreturn_t scc_isr(int irq, void *dev_id); 241static void rx_isr(struct scc_priv *priv); 242static void special_condition(struct scc_priv *priv, int rc); 243static void rx_bh(struct work_struct *); 244static void tx_isr(struct scc_priv *priv); 245static void es_isr(struct scc_priv *priv); 246static void tm_isr(struct scc_priv *priv); 247 248 249/* Initialization variables */ 250 251static int io[MAX_NUM_DEVS] __initdata = { 0, }; 252 253/* Beware! hw[] is also used in dmascc_exit(). */ 254static struct scc_hardware hw[NUM_TYPES] = HARDWARE; 255 256 257/* Global variables */ 258 259static struct scc_info *first; 260static unsigned long rand; 261 262 263MODULE_AUTHOR("Klaus Kudielka"); 264MODULE_DESCRIPTION("Driver for high-speed SCC boards"); 265module_param_hw_array(io, int, ioport, NULL, 0); 266MODULE_LICENSE("GPL"); 267 268static void __exit dmascc_exit(void) 269{ 270 int i; 271 struct scc_info *info; 272 273 while (first) { 274 info = first; 275 276 /* Unregister devices */ 277 for (i = 0; i < 2; i++) 278 unregister_netdev(info->dev[i]); 279 280 /* Reset board */ 281 if (info->priv[0].type == TYPE_TWIN) 282 outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG); 283 write_scc(&info->priv[0], R9, FHWRES); 284 release_region(info->dev[0]->base_addr, 285 hw[info->priv[0].type].io_size); 286 287 for (i = 0; i < 2; i++) 288 free_netdev(info->dev[i]); 289 290 /* Free memory */ 291 first = info->next; 292 kfree(info); 293 } 294} 295 296static int __init dmascc_init(void) 297{ 298 int h, i, j, n; 299 int base[MAX_NUM_DEVS], tcmd[MAX_NUM_DEVS], t0[MAX_NUM_DEVS], 300 t1[MAX_NUM_DEVS]; 301 unsigned t_val; 302 unsigned long time, start[MAX_NUM_DEVS], delay[MAX_NUM_DEVS], 303 counting[MAX_NUM_DEVS]; 304 305 /* Initialize random number generator */ 306 rand = jiffies; 307 /* Cards found = 0 */ 308 n = 0; 309 /* Warning message */ 310 if (!io[0]) 311 printk(KERN_INFO "dmascc: autoprobing (dangerous)\n"); 312 313 /* Run autodetection for each card type */ 314 for (h = 0; h < NUM_TYPES; h++) { 315 316 if (io[0]) { 317 /* User-specified I/O address regions */ 318 for (i = 0; i < hw[h].num_devs; i++) 319 base[i] = 0; 320 for (i = 0; i < MAX_NUM_DEVS && io[i]; i++) { 321 j = (io[i] - 322 hw[h].io_region) / hw[h].io_delta; 323 if (j >= 0 && j < hw[h].num_devs && 324 hw[h].io_region + 325 j * hw[h].io_delta == io[i]) { 326 base[j] = io[i]; 327 } 328 } 329 } else { 330 /* Default I/O address regions */ 331 for (i = 0; i < hw[h].num_devs; i++) { 332 base[i] = 333 hw[h].io_region + i * hw[h].io_delta; 334 } 335 } 336 337 /* Check valid I/O address regions */ 338 for (i = 0; i < hw[h].num_devs; i++) 339 if (base[i]) { 340 if (!request_region 341 (base[i], hw[h].io_size, "dmascc")) 342 base[i] = 0; 343 else { 344 tcmd[i] = 345 base[i] + hw[h].tmr_offset + 346 TMR_CTRL; 347 t0[i] = 348 base[i] + hw[h].tmr_offset + 349 TMR_CNT0; 350 t1[i] = 351 base[i] + hw[h].tmr_offset + 352 TMR_CNT1; 353 } 354 } 355 356 /* Start timers */ 357 for (i = 0; i < hw[h].num_devs; i++) 358 if (base[i]) { 359 /* Timer 0: LSB+MSB, Mode 3, TMR_0_HZ */ 360 outb(0x36, tcmd[i]); 361 outb((hw[h].tmr_hz / TMR_0_HZ) & 0xFF, 362 t0[i]); 363 outb((hw[h].tmr_hz / TMR_0_HZ) >> 8, 364 t0[i]); 365 /* Timer 1: LSB+MSB, Mode 0, HZ/10 */ 366 outb(0x70, tcmd[i]); 367 outb((TMR_0_HZ / HZ * 10) & 0xFF, t1[i]); 368 outb((TMR_0_HZ / HZ * 10) >> 8, t1[i]); 369 start[i] = jiffies; 370 delay[i] = 0; 371 counting[i] = 1; 372 /* Timer 2: LSB+MSB, Mode 0 */ 373 outb(0xb0, tcmd[i]); 374 } 375 time = jiffies; 376 /* Wait until counter registers are loaded */ 377 udelay(2000000 / TMR_0_HZ); 378 379 /* Timing loop */ 380 while (jiffies - time < 13) { 381 for (i = 0; i < hw[h].num_devs; i++) 382 if (base[i] && counting[i]) { 383 /* Read back Timer 1: latch; read LSB; read MSB */ 384 outb(0x40, tcmd[i]); 385 t_val = 386 inb(t1[i]) + (inb(t1[i]) << 8); 387 /* Also check whether counter did wrap */ 388 if (t_val == 0 || 389 t_val > TMR_0_HZ / HZ * 10) 390 counting[i] = 0; 391 delay[i] = jiffies - start[i]; 392 } 393 } 394 395 /* Evaluate measurements */ 396 for (i = 0; i < hw[h].num_devs; i++) 397 if (base[i]) { 398 if ((delay[i] >= 9 && delay[i] <= 11) && 399 /* Ok, we have found an adapter */ 400 (setup_adapter(base[i], h, n) == 0)) 401 n++; 402 else 403 release_region(base[i], 404 hw[h].io_size); 405 } 406 407 } /* NUM_TYPES */ 408 409 /* If any adapter was successfully initialized, return ok */ 410 if (n) 411 return 0; 412 413 /* If no adapter found, return error */ 414 printk(KERN_INFO "dmascc: no adapters found\n"); 415 return -EIO; 416} 417 418module_init(dmascc_init); 419module_exit(dmascc_exit); 420 421static void __init dev_setup(struct net_device *dev) 422{ 423 dev->type = ARPHRD_AX25; 424 dev->hard_header_len = AX25_MAX_HEADER_LEN; 425 dev->mtu = 1500; 426 dev->addr_len = AX25_ADDR_LEN; 427 dev->tx_queue_len = 64; 428 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); 429 dev_addr_set(dev, (u8 *)&ax25_defaddr); 430} 431 432static const struct net_device_ops scc_netdev_ops = { 433 .ndo_open = scc_open, 434 .ndo_stop = scc_close, 435 .ndo_start_xmit = scc_send_packet, 436 .ndo_siocdevprivate = scc_siocdevprivate, 437 .ndo_set_mac_address = scc_set_mac_address, 438}; 439 440static int __init setup_adapter(int card_base, int type, int n) 441{ 442 int i, irq, chip, err; 443 struct scc_info *info; 444 struct net_device *dev; 445 struct scc_priv *priv; 446 unsigned long time; 447 unsigned int irqs; 448 int tmr_base = card_base + hw[type].tmr_offset; 449 int scc_base = card_base + hw[type].scc_offset; 450 char *chipnames[] = CHIPNAMES; 451 452 /* Initialize what is necessary for write_scc and write_scc_data */ 453 info = kzalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA); 454 if (!info) { 455 err = -ENOMEM; 456 goto out; 457 } 458 459 info->dev[0] = alloc_netdev(0, "", NET_NAME_UNKNOWN, dev_setup); 460 if (!info->dev[0]) { 461 printk(KERN_ERR "dmascc: " 462 "could not allocate memory for %s at %#3x\n", 463 hw[type].name, card_base); 464 err = -ENOMEM; 465 goto out1; 466 } 467 468 info->dev[1] = alloc_netdev(0, "", NET_NAME_UNKNOWN, dev_setup); 469 if (!info->dev[1]) { 470 printk(KERN_ERR "dmascc: " 471 "could not allocate memory for %s at %#3x\n", 472 hw[type].name, card_base); 473 err = -ENOMEM; 474 goto out2; 475 } 476 spin_lock_init(&info->register_lock); 477 478 priv = &info->priv[0]; 479 priv->type = type; 480 priv->card_base = card_base; 481 priv->scc_cmd = scc_base + SCCA_CMD; 482 priv->scc_data = scc_base + SCCA_DATA; 483 priv->register_lock = &info->register_lock; 484 485 /* Reset SCC */ 486 write_scc(priv, R9, FHWRES | MIE | NV); 487 488 /* Determine type of chip by enabling SDLC/HDLC enhancements */ 489 write_scc(priv, R15, SHDLCE); 490 if (!read_scc(priv, R15)) { 491 /* WR7' not present. This is an ordinary Z8530 SCC. */ 492 chip = Z8530; 493 } else { 494 /* Put one character in TX FIFO */ 495 write_scc_data(priv, 0, 0); 496 if (read_scc(priv, R0) & Tx_BUF_EMP) { 497 /* TX FIFO not full. This is a Z85230 ESCC with a 4-byte FIFO. */ 498 chip = Z85230; 499 } else { 500 /* TX FIFO full. This is a Z85C30 SCC with a 1-byte FIFO. */ 501 chip = Z85C30; 502 } 503 } 504 write_scc(priv, R15, 0); 505 506 /* Start IRQ auto-detection */ 507 irqs = probe_irq_on(); 508 509 /* Enable interrupts */ 510 if (type == TYPE_TWIN) { 511 outb(0, card_base + TWIN_DMA_CFG); 512 inb(card_base + TWIN_CLR_TMR1); 513 inb(card_base + TWIN_CLR_TMR2); 514 info->twin_serial_cfg = TWIN_EI; 515 outb(info->twin_serial_cfg, card_base + TWIN_SERIAL_CFG); 516 } else { 517 write_scc(priv, R15, CTSIE); 518 write_scc(priv, R0, RES_EXT_INT); 519 write_scc(priv, R1, EXT_INT_ENAB); 520 } 521 522 /* Start timer */ 523 outb(1, tmr_base + TMR_CNT1); 524 outb(0, tmr_base + TMR_CNT1); 525 526 /* Wait and detect IRQ */ 527 time = jiffies; 528 while (jiffies - time < 2 + HZ / TMR_0_HZ); 529 irq = probe_irq_off(irqs); 530 531 /* Clear pending interrupt, disable interrupts */ 532 if (type == TYPE_TWIN) { 533 inb(card_base + TWIN_CLR_TMR1); 534 } else { 535 write_scc(priv, R1, 0); 536 write_scc(priv, R15, 0); 537 write_scc(priv, R0, RES_EXT_INT); 538 } 539 540 if (irq <= 0) { 541 printk(KERN_ERR 542 "dmascc: could not find irq of %s at %#3x (irq=%d)\n", 543 hw[type].name, card_base, irq); 544 err = -ENODEV; 545 goto out3; 546 } 547 548 /* Set up data structures */ 549 for (i = 0; i < 2; i++) { 550 dev = info->dev[i]; 551 priv = &info->priv[i]; 552 priv->type = type; 553 priv->chip = chip; 554 priv->dev = dev; 555 priv->info = info; 556 priv->channel = i; 557 spin_lock_init(&priv->ring_lock); 558 priv->register_lock = &info->register_lock; 559 priv->card_base = card_base; 560 priv->scc_cmd = scc_base + (i ? SCCB_CMD : SCCA_CMD); 561 priv->scc_data = scc_base + (i ? SCCB_DATA : SCCA_DATA); 562 priv->tmr_cnt = tmr_base + (i ? TMR_CNT2 : TMR_CNT1); 563 priv->tmr_ctrl = tmr_base + TMR_CTRL; 564 priv->tmr_mode = i ? 0xb0 : 0x70; 565 priv->param.pclk_hz = hw[type].pclk_hz; 566 priv->param.brg_tc = -1; 567 priv->param.clocks = TCTRxCP | RCRTxCP; 568 priv->param.persist = 256; 569 priv->param.dma = -1; 570 INIT_WORK(&priv->rx_work, rx_bh); 571 dev->ml_priv = priv; 572 snprintf(dev->name, sizeof(dev->name), "dmascc%i", 2 * n + i); 573 dev->base_addr = card_base; 574 dev->irq = irq; 575 dev->netdev_ops = &scc_netdev_ops; 576 dev->header_ops = &ax25_header_ops; 577 } 578 if (register_netdev(info->dev[0])) { 579 printk(KERN_ERR "dmascc: could not register %s\n", 580 info->dev[0]->name); 581 err = -ENODEV; 582 goto out3; 583 } 584 if (register_netdev(info->dev[1])) { 585 printk(KERN_ERR "dmascc: could not register %s\n", 586 info->dev[1]->name); 587 err = -ENODEV; 588 goto out4; 589 } 590 591 592 info->next = first; 593 first = info; 594 printk(KERN_INFO "dmascc: found %s (%s) at %#3x, irq %d\n", 595 hw[type].name, chipnames[chip], card_base, irq); 596 return 0; 597 598 out4: 599 unregister_netdev(info->dev[0]); 600 out3: 601 if (info->priv[0].type == TYPE_TWIN) 602 outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG); 603 write_scc(&info->priv[0], R9, FHWRES); 604 free_netdev(info->dev[1]); 605 out2: 606 free_netdev(info->dev[0]); 607 out1: 608 kfree(info); 609 out: 610 return err; 611} 612 613 614/* Driver functions */ 615 616static void write_scc(struct scc_priv *priv, int reg, int val) 617{ 618 unsigned long flags; 619 switch (priv->type) { 620 case TYPE_S5: 621 if (reg) 622 outb(reg, priv->scc_cmd); 623 outb(val, priv->scc_cmd); 624 return; 625 case TYPE_TWIN: 626 if (reg) 627 outb_p(reg, priv->scc_cmd); 628 outb_p(val, priv->scc_cmd); 629 return; 630 default: 631 spin_lock_irqsave(priv->register_lock, flags); 632 outb_p(0, priv->card_base + PI_DREQ_MASK); 633 if (reg) 634 outb_p(reg, priv->scc_cmd); 635 outb_p(val, priv->scc_cmd); 636 outb(1, priv->card_base + PI_DREQ_MASK); 637 spin_unlock_irqrestore(priv->register_lock, flags); 638 return; 639 } 640} 641 642 643static void write_scc_data(struct scc_priv *priv, int val, int fast) 644{ 645 unsigned long flags; 646 switch (priv->type) { 647 case TYPE_S5: 648 outb(val, priv->scc_data); 649 return; 650 case TYPE_TWIN: 651 outb_p(val, priv->scc_data); 652 return; 653 default: 654 if (fast) 655 outb_p(val, priv->scc_data); 656 else { 657 spin_lock_irqsave(priv->register_lock, flags); 658 outb_p(0, priv->card_base + PI_DREQ_MASK); 659 outb_p(val, priv->scc_data); 660 outb(1, priv->card_base + PI_DREQ_MASK); 661 spin_unlock_irqrestore(priv->register_lock, flags); 662 } 663 return; 664 } 665} 666 667 668static int read_scc(struct scc_priv *priv, int reg) 669{ 670 int rc; 671 unsigned long flags; 672 switch (priv->type) { 673 case TYPE_S5: 674 if (reg) 675 outb(reg, priv->scc_cmd); 676 return inb(priv->scc_cmd); 677 case TYPE_TWIN: 678 if (reg) 679 outb_p(reg, priv->scc_cmd); 680 return inb_p(priv->scc_cmd); 681 default: 682 spin_lock_irqsave(priv->register_lock, flags); 683 outb_p(0, priv->card_base + PI_DREQ_MASK); 684 if (reg) 685 outb_p(reg, priv->scc_cmd); 686 rc = inb_p(priv->scc_cmd); 687 outb(1, priv->card_base + PI_DREQ_MASK); 688 spin_unlock_irqrestore(priv->register_lock, flags); 689 return rc; 690 } 691} 692 693 694static int read_scc_data(struct scc_priv *priv) 695{ 696 int rc; 697 unsigned long flags; 698 switch (priv->type) { 699 case TYPE_S5: 700 return inb(priv->scc_data); 701 case TYPE_TWIN: 702 return inb_p(priv->scc_data); 703 default: 704 spin_lock_irqsave(priv->register_lock, flags); 705 outb_p(0, priv->card_base + PI_DREQ_MASK); 706 rc = inb_p(priv->scc_data); 707 outb(1, priv->card_base + PI_DREQ_MASK); 708 spin_unlock_irqrestore(priv->register_lock, flags); 709 return rc; 710 } 711} 712 713 714static int scc_open(struct net_device *dev) 715{ 716 struct scc_priv *priv = dev->ml_priv; 717 struct scc_info *info = priv->info; 718 int card_base = priv->card_base; 719 720 /* Request IRQ if not already used by other channel */ 721 if (!info->irq_used) { 722 if (request_irq(dev->irq, scc_isr, 0, "dmascc", info)) { 723 return -EAGAIN; 724 } 725 } 726 info->irq_used++; 727 728 /* Request DMA if required */ 729 if (priv->param.dma >= 0) { 730 if (request_dma(priv->param.dma, "dmascc")) { 731 if (--info->irq_used == 0) 732 free_irq(dev->irq, info); 733 return -EAGAIN; 734 } else { 735 unsigned long flags = claim_dma_lock(); 736 clear_dma_ff(priv->param.dma); 737 release_dma_lock(flags); 738 } 739 } 740 741 /* Initialize local variables */ 742 priv->rx_ptr = 0; 743 priv->rx_over = 0; 744 priv->rx_head = priv->rx_tail = priv->rx_count = 0; 745 priv->state = IDLE; 746 priv->tx_head = priv->tx_tail = priv->tx_count = 0; 747 priv->tx_ptr = 0; 748 749 /* Reset channel */ 750 write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV); 751 /* X1 clock, SDLC mode */ 752 write_scc(priv, R4, SDLC | X1CLK); 753 /* DMA */ 754 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN); 755 /* 8 bit RX char, RX disable */ 756 write_scc(priv, R3, Rx8); 757 /* 8 bit TX char, TX disable */ 758 write_scc(priv, R5, Tx8); 759 /* SDLC address field */ 760 write_scc(priv, R6, 0); 761 /* SDLC flag */ 762 write_scc(priv, R7, FLAG); 763 switch (priv->chip) { 764 case Z85C30: 765 /* Select WR7' */ 766 write_scc(priv, R15, SHDLCE); 767 /* Auto EOM reset */ 768 write_scc(priv, R7, AUTOEOM); 769 write_scc(priv, R15, 0); 770 break; 771 case Z85230: 772 /* Select WR7' */ 773 write_scc(priv, R15, SHDLCE); 774 /* The following bits are set (see 2.5.2.1): 775 - Automatic EOM reset 776 - Interrupt request if RX FIFO is half full 777 This bit should be ignored in DMA mode (according to the 778 documentation), but actually isn't. The receiver doesn't work if 779 it is set. Thus, we have to clear it in DMA mode. 780 - Interrupt/DMA request if TX FIFO is completely empty 781 a) If set, the ESCC behaves as if it had no TX FIFO (Z85C30 782 compatibility). 783 b) If cleared, DMA requests may follow each other very quickly, 784 filling up the TX FIFO. 785 Advantage: TX works even in case of high bus latency. 786 Disadvantage: Edge-triggered DMA request circuitry may miss 787 a request. No more data is delivered, resulting 788 in a TX FIFO underrun. 789 Both PI2 and S5SCC/DMA seem to work fine with TXFIFOE cleared. 790 The PackeTwin doesn't. I don't know about the PI, but let's 791 assume it behaves like the PI2. 792 */ 793 if (priv->param.dma >= 0) { 794 if (priv->type == TYPE_TWIN) 795 write_scc(priv, R7, AUTOEOM | TXFIFOE); 796 else 797 write_scc(priv, R7, AUTOEOM); 798 } else { 799 write_scc(priv, R7, AUTOEOM | RXFIFOH); 800 } 801 write_scc(priv, R15, 0); 802 break; 803 } 804 /* Preset CRC, NRZ(I) encoding */ 805 write_scc(priv, R10, CRCPS | (priv->param.nrzi ? NRZI : NRZ)); 806 807 /* Configure baud rate generator */ 808 if (priv->param.brg_tc >= 0) { 809 /* Program BR generator */ 810 write_scc(priv, R12, priv->param.brg_tc & 0xFF); 811 write_scc(priv, R13, (priv->param.brg_tc >> 8) & 0xFF); 812 /* BRG source = SYS CLK; enable BRG; DTR REQ function (required by 813 PackeTwin, not connected on the PI2); set DPLL source to BRG */ 814 write_scc(priv, R14, SSBR | DTRREQ | BRSRC | BRENABL); 815 /* Enable DPLL */ 816 write_scc(priv, R14, SEARCH | DTRREQ | BRSRC | BRENABL); 817 } else { 818 /* Disable BR generator */ 819 write_scc(priv, R14, DTRREQ | BRSRC); 820 } 821 822 /* Configure clocks */ 823 if (priv->type == TYPE_TWIN) { 824 /* Disable external TX clock receiver */ 825 outb((info->twin_serial_cfg &= 826 ~(priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)), 827 card_base + TWIN_SERIAL_CFG); 828 } 829 write_scc(priv, R11, priv->param.clocks); 830 if ((priv->type == TYPE_TWIN) && !(priv->param.clocks & TRxCOI)) { 831 /* Enable external TX clock receiver */ 832 outb((info->twin_serial_cfg |= 833 (priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)), 834 card_base + TWIN_SERIAL_CFG); 835 } 836 837 /* Configure PackeTwin */ 838 if (priv->type == TYPE_TWIN) { 839 /* Assert DTR, enable interrupts */ 840 outb((info->twin_serial_cfg |= TWIN_EI | 841 (priv->channel ? TWIN_DTRB_ON : TWIN_DTRA_ON)), 842 card_base + TWIN_SERIAL_CFG); 843 } 844 845 /* Read current status */ 846 priv->rr0 = read_scc(priv, R0); 847 /* Enable DCD interrupt */ 848 write_scc(priv, R15, DCDIE); 849 850 netif_start_queue(dev); 851 852 return 0; 853} 854 855 856static int scc_close(struct net_device *dev) 857{ 858 struct scc_priv *priv = dev->ml_priv; 859 struct scc_info *info = priv->info; 860 int card_base = priv->card_base; 861 862 netif_stop_queue(dev); 863 864 if (priv->type == TYPE_TWIN) { 865 /* Drop DTR */ 866 outb((info->twin_serial_cfg &= 867 (priv->channel ? ~TWIN_DTRB_ON : ~TWIN_DTRA_ON)), 868 card_base + TWIN_SERIAL_CFG); 869 } 870 871 /* Reset channel, free DMA and IRQ */ 872 write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV); 873 if (priv->param.dma >= 0) { 874 if (priv->type == TYPE_TWIN) 875 outb(0, card_base + TWIN_DMA_CFG); 876 free_dma(priv->param.dma); 877 } 878 if (--info->irq_used == 0) 879 free_irq(dev->irq, info); 880 881 return 0; 882} 883 884 885static int scc_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd) 886{ 887 struct scc_priv *priv = dev->ml_priv; 888 889 switch (cmd) { 890 case SIOCGSCCPARAM: 891 if (copy_to_user(data, &priv->param, sizeof(struct scc_param))) 892 return -EFAULT; 893 return 0; 894 case SIOCSSCCPARAM: 895 if (!capable(CAP_NET_ADMIN)) 896 return -EPERM; 897 if (netif_running(dev)) 898 return -EAGAIN; 899 if (copy_from_user(&priv->param, data, 900 sizeof(struct scc_param))) 901 return -EFAULT; 902 return 0; 903 default: 904 return -EOPNOTSUPP; 905 } 906} 907 908 909static int scc_send_packet(struct sk_buff *skb, struct net_device *dev) 910{ 911 struct scc_priv *priv = dev->ml_priv; 912 unsigned long flags; 913 int i; 914 915 if (skb->protocol == htons(ETH_P_IP)) 916 return ax25_ip_xmit(skb); 917 918 /* Temporarily stop the scheduler feeding us packets */ 919 netif_stop_queue(dev); 920 921 /* Transfer data to DMA buffer */ 922 i = priv->tx_head; 923 skb_copy_from_linear_data_offset(skb, 1, priv->tx_buf[i], skb->len - 1); 924 priv->tx_len[i] = skb->len - 1; 925 926 /* Clear interrupts while we touch our circular buffers */ 927 928 spin_lock_irqsave(&priv->ring_lock, flags); 929 /* Move the ring buffer's head */ 930 priv->tx_head = (i + 1) % NUM_TX_BUF; 931 priv->tx_count++; 932 933 /* If we just filled up the last buffer, leave queue stopped. 934 The higher layers must wait until we have a DMA buffer 935 to accept the data. */ 936 if (priv->tx_count < NUM_TX_BUF) 937 netif_wake_queue(dev); 938 939 /* Set new TX state */ 940 if (priv->state == IDLE) { 941 /* Assert RTS, start timer */ 942 priv->state = TX_HEAD; 943 priv->tx_start = jiffies; 944 write_scc(priv, R5, TxCRC_ENAB | RTS | TxENAB | Tx8); 945 write_scc(priv, R15, 0); 946 start_timer(priv, priv->param.txdelay, 0); 947 } 948 949 /* Turn interrupts back on and free buffer */ 950 spin_unlock_irqrestore(&priv->ring_lock, flags); 951 dev_kfree_skb(skb); 952 953 return NETDEV_TX_OK; 954} 955 956 957static int scc_set_mac_address(struct net_device *dev, void *sa) 958{ 959 dev_addr_set(dev, ((struct sockaddr *)sa)->sa_data); 960 return 0; 961} 962 963 964static inline void tx_on(struct scc_priv *priv) 965{ 966 int i, n; 967 unsigned long flags; 968 969 if (priv->param.dma >= 0) { 970 n = (priv->chip == Z85230) ? 3 : 1; 971 /* Program DMA controller */ 972 flags = claim_dma_lock(); 973 set_dma_mode(priv->param.dma, DMA_MODE_WRITE); 974 set_dma_addr(priv->param.dma, 975 virt_to_bus(priv->tx_buf[priv->tx_tail]) + n); 976 set_dma_count(priv->param.dma, 977 priv->tx_len[priv->tx_tail] - n); 978 release_dma_lock(flags); 979 /* Enable TX underrun interrupt */ 980 write_scc(priv, R15, TxUIE); 981 /* Configure DREQ */ 982 if (priv->type == TYPE_TWIN) 983 outb((priv->param.dma == 984 1) ? TWIN_DMA_HDX_T1 : TWIN_DMA_HDX_T3, 985 priv->card_base + TWIN_DMA_CFG); 986 else 987 write_scc(priv, R1, 988 EXT_INT_ENAB | WT_FN_RDYFN | 989 WT_RDY_ENAB); 990 /* Write first byte(s) */ 991 spin_lock_irqsave(priv->register_lock, flags); 992 for (i = 0; i < n; i++) 993 write_scc_data(priv, 994 priv->tx_buf[priv->tx_tail][i], 1); 995 enable_dma(priv->param.dma); 996 spin_unlock_irqrestore(priv->register_lock, flags); 997 } else { 998 write_scc(priv, R15, TxUIE); 999 write_scc(priv, R1, 1000 EXT_INT_ENAB | WT_FN_RDYFN | TxINT_ENAB); 1001 tx_isr(priv); 1002 } 1003 /* Reset EOM latch if we do not have the AUTOEOM feature */ 1004 if (priv->chip == Z8530) 1005 write_scc(priv, R0, RES_EOM_L); 1006} 1007 1008 1009static inline void rx_on(struct scc_priv *priv) 1010{ 1011 unsigned long flags; 1012 1013 /* Clear RX FIFO */ 1014 while (read_scc(priv, R0) & Rx_CH_AV) 1015 read_scc_data(priv); 1016 priv->rx_over = 0; 1017 if (priv->param.dma >= 0) { 1018 /* Program DMA controller */ 1019 flags = claim_dma_lock(); 1020 set_dma_mode(priv->param.dma, DMA_MODE_READ); 1021 set_dma_addr(priv->param.dma, 1022 virt_to_bus(priv->rx_buf[priv->rx_head])); 1023 set_dma_count(priv->param.dma, BUF_SIZE); 1024 release_dma_lock(flags); 1025 enable_dma(priv->param.dma); 1026 /* Configure PackeTwin DMA */ 1027 if (priv->type == TYPE_TWIN) { 1028 outb((priv->param.dma == 1029 1) ? TWIN_DMA_HDX_R1 : TWIN_DMA_HDX_R3, 1030 priv->card_base + TWIN_DMA_CFG); 1031 } 1032 /* Sp. cond. intr. only, ext int enable, RX DMA enable */ 1033 write_scc(priv, R1, EXT_INT_ENAB | INT_ERR_Rx | 1034 WT_RDY_RT | WT_FN_RDYFN | WT_RDY_ENAB); 1035 } else { 1036 /* Reset current frame */ 1037 priv->rx_ptr = 0; 1038 /* Intr. on all Rx characters and Sp. cond., ext int enable */ 1039 write_scc(priv, R1, EXT_INT_ENAB | INT_ALL_Rx | WT_RDY_RT | 1040 WT_FN_RDYFN); 1041 } 1042 write_scc(priv, R0, ERR_RES); 1043 write_scc(priv, R3, RxENABLE | Rx8 | RxCRC_ENAB); 1044} 1045 1046 1047static inline void rx_off(struct scc_priv *priv) 1048{ 1049 /* Disable receiver */ 1050 write_scc(priv, R3, Rx8); 1051 /* Disable DREQ / RX interrupt */ 1052 if (priv->param.dma >= 0 && priv->type == TYPE_TWIN) 1053 outb(0, priv->card_base + TWIN_DMA_CFG); 1054 else 1055 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN); 1056 /* Disable DMA */ 1057 if (priv->param.dma >= 0) 1058 disable_dma(priv->param.dma); 1059} 1060 1061 1062static void start_timer(struct scc_priv *priv, int t, int r15) 1063{ 1064 outb(priv->tmr_mode, priv->tmr_ctrl); 1065 if (t == 0) { 1066 tm_isr(priv); 1067 } else if (t > 0) { 1068 outb(t & 0xFF, priv->tmr_cnt); 1069 outb((t >> 8) & 0xFF, priv->tmr_cnt); 1070 if (priv->type != TYPE_TWIN) { 1071 write_scc(priv, R15, r15 | CTSIE); 1072 priv->rr0 |= CTS; 1073 } 1074 } 1075} 1076 1077 1078static inline unsigned char random(void) 1079{ 1080 /* See "Numerical Recipes in C", second edition, p. 284 */ 1081 rand = rand * 1664525L + 1013904223L; 1082 return (unsigned char) (rand >> 24); 1083} 1084 1085static inline void z8530_isr(struct scc_info *info) 1086{ 1087 int is, i = 100; 1088 1089 while ((is = read_scc(&info->priv[0], R3)) && i--) { 1090 if (is & CHARxIP) { 1091 rx_isr(&info->priv[0]); 1092 } else if (is & CHATxIP) { 1093 tx_isr(&info->priv[0]); 1094 } else if (is & CHAEXT) { 1095 es_isr(&info->priv[0]); 1096 } else if (is & CHBRxIP) { 1097 rx_isr(&info->priv[1]); 1098 } else if (is & CHBTxIP) { 1099 tx_isr(&info->priv[1]); 1100 } else { 1101 es_isr(&info->priv[1]); 1102 } 1103 write_scc(&info->priv[0], R0, RES_H_IUS); 1104 i++; 1105 } 1106 if (i < 0) { 1107 printk(KERN_ERR "dmascc: stuck in ISR with RR3=0x%02x.\n", 1108 is); 1109 } 1110 /* Ok, no interrupts pending from this 8530. The INT line should 1111 be inactive now. */ 1112} 1113 1114 1115static irqreturn_t scc_isr(int irq, void *dev_id) 1116{ 1117 struct scc_info *info = dev_id; 1118 1119 spin_lock(info->priv[0].register_lock); 1120 /* At this point interrupts are enabled, and the interrupt under service 1121 is already acknowledged, but masked off. 1122 1123 Interrupt processing: We loop until we know that the IRQ line is 1124 low. If another positive edge occurs afterwards during the ISR, 1125 another interrupt will be triggered by the interrupt controller 1126 as soon as the IRQ level is enabled again (see asm/irq.h). 1127 1128 Bottom-half handlers will be processed after scc_isr(). This is 1129 important, since we only have small ringbuffers and want new data 1130 to be fetched/delivered immediately. */ 1131 1132 if (info->priv[0].type == TYPE_TWIN) { 1133 int is, card_base = info->priv[0].card_base; 1134 while ((is = ~inb(card_base + TWIN_INT_REG)) & 1135 TWIN_INT_MSK) { 1136 if (is & TWIN_SCC_MSK) { 1137 z8530_isr(info); 1138 } else if (is & TWIN_TMR1_MSK) { 1139 inb(card_base + TWIN_CLR_TMR1); 1140 tm_isr(&info->priv[0]); 1141 } else { 1142 inb(card_base + TWIN_CLR_TMR2); 1143 tm_isr(&info->priv[1]); 1144 } 1145 } 1146 } else 1147 z8530_isr(info); 1148 spin_unlock(info->priv[0].register_lock); 1149 return IRQ_HANDLED; 1150} 1151 1152 1153static void rx_isr(struct scc_priv *priv) 1154{ 1155 if (priv->param.dma >= 0) { 1156 /* Check special condition and perform error reset. See 2.4.7.5. */ 1157 special_condition(priv, read_scc(priv, R1)); 1158 write_scc(priv, R0, ERR_RES); 1159 } else { 1160 /* Check special condition for each character. Error reset not necessary. 1161 Same algorithm for SCC and ESCC. See 2.4.7.1 and 2.4.7.4. */ 1162 int rc; 1163 while (read_scc(priv, R0) & Rx_CH_AV) { 1164 rc = read_scc(priv, R1); 1165 if (priv->rx_ptr < BUF_SIZE) 1166 priv->rx_buf[priv->rx_head][priv-> 1167 rx_ptr++] = 1168 read_scc_data(priv); 1169 else { 1170 priv->rx_over = 2; 1171 read_scc_data(priv); 1172 } 1173 special_condition(priv, rc); 1174 } 1175 } 1176} 1177 1178 1179static void special_condition(struct scc_priv *priv, int rc) 1180{ 1181 int cb; 1182 unsigned long flags; 1183 1184 /* See Figure 2-15. Only overrun and EOF need to be checked. */ 1185 1186 if (rc & Rx_OVR) { 1187 /* Receiver overrun */ 1188 priv->rx_over = 1; 1189 if (priv->param.dma < 0) 1190 write_scc(priv, R0, ERR_RES); 1191 } else if (rc & END_FR) { 1192 /* End of frame. Get byte count */ 1193 if (priv->param.dma >= 0) { 1194 flags = claim_dma_lock(); 1195 cb = BUF_SIZE - get_dma_residue(priv->param.dma) - 1196 2; 1197 release_dma_lock(flags); 1198 } else { 1199 cb = priv->rx_ptr - 2; 1200 } 1201 if (priv->rx_over) { 1202 /* We had an overrun */ 1203 priv->dev->stats.rx_errors++; 1204 if (priv->rx_over == 2) 1205 priv->dev->stats.rx_length_errors++; 1206 else 1207 priv->dev->stats.rx_fifo_errors++; 1208 priv->rx_over = 0; 1209 } else if (rc & CRC_ERR) { 1210 /* Count invalid CRC only if packet length >= minimum */ 1211 if (cb >= 15) { 1212 priv->dev->stats.rx_errors++; 1213 priv->dev->stats.rx_crc_errors++; 1214 } 1215 } else { 1216 if (cb >= 15) { 1217 if (priv->rx_count < NUM_RX_BUF - 1) { 1218 /* Put good frame in FIFO */ 1219 priv->rx_len[priv->rx_head] = cb; 1220 priv->rx_head = 1221 (priv->rx_head + 1222 1) % NUM_RX_BUF; 1223 priv->rx_count++; 1224 schedule_work(&priv->rx_work); 1225 } else { 1226 priv->dev->stats.rx_errors++; 1227 priv->dev->stats.rx_over_errors++; 1228 } 1229 } 1230 } 1231 /* Get ready for new frame */ 1232 if (priv->param.dma >= 0) { 1233 flags = claim_dma_lock(); 1234 set_dma_addr(priv->param.dma, 1235 virt_to_bus(priv->rx_buf[priv->rx_head])); 1236 set_dma_count(priv->param.dma, BUF_SIZE); 1237 release_dma_lock(flags); 1238 } else { 1239 priv->rx_ptr = 0; 1240 } 1241 } 1242} 1243 1244 1245static void rx_bh(struct work_struct *ugli_api) 1246{ 1247 struct scc_priv *priv = container_of(ugli_api, struct scc_priv, rx_work); 1248 int i = priv->rx_tail; 1249 int cb; 1250 unsigned long flags; 1251 struct sk_buff *skb; 1252 unsigned char *data; 1253 1254 spin_lock_irqsave(&priv->ring_lock, flags); 1255 while (priv->rx_count) { 1256 spin_unlock_irqrestore(&priv->ring_lock, flags); 1257 cb = priv->rx_len[i]; 1258 /* Allocate buffer */ 1259 skb = dev_alloc_skb(cb + 1); 1260 if (skb == NULL) { 1261 /* Drop packet */ 1262 priv->dev->stats.rx_dropped++; 1263 } else { 1264 /* Fill buffer */ 1265 data = skb_put(skb, cb + 1); 1266 data[0] = 0; 1267 memcpy(&data[1], priv->rx_buf[i], cb); 1268 skb->protocol = ax25_type_trans(skb, priv->dev); 1269 netif_rx(skb); 1270 priv->dev->stats.rx_packets++; 1271 priv->dev->stats.rx_bytes += cb; 1272 } 1273 spin_lock_irqsave(&priv->ring_lock, flags); 1274 /* Move tail */ 1275 priv->rx_tail = i = (i + 1) % NUM_RX_BUF; 1276 priv->rx_count--; 1277 } 1278 spin_unlock_irqrestore(&priv->ring_lock, flags); 1279} 1280 1281 1282static void tx_isr(struct scc_priv *priv) 1283{ 1284 int i = priv->tx_tail, p = priv->tx_ptr; 1285 1286 /* Suspend TX interrupts if we don't want to send anything. 1287 See Figure 2-22. */ 1288 if (p == priv->tx_len[i]) { 1289 write_scc(priv, R0, RES_Tx_P); 1290 return; 1291 } 1292 1293 /* Write characters */ 1294 while ((read_scc(priv, R0) & Tx_BUF_EMP) && p < priv->tx_len[i]) { 1295 write_scc_data(priv, priv->tx_buf[i][p++], 0); 1296 } 1297 1298 /* Reset EOM latch of Z8530 */ 1299 if (!priv->tx_ptr && p && priv->chip == Z8530) 1300 write_scc(priv, R0, RES_EOM_L); 1301 1302 priv->tx_ptr = p; 1303} 1304 1305 1306static void es_isr(struct scc_priv *priv) 1307{ 1308 int i, rr0, drr0, res; 1309 unsigned long flags; 1310 1311 /* Read status, reset interrupt bit (open latches) */ 1312 rr0 = read_scc(priv, R0); 1313 write_scc(priv, R0, RES_EXT_INT); 1314 drr0 = priv->rr0 ^ rr0; 1315 priv->rr0 = rr0; 1316 1317 /* Transmit underrun (2.4.9.6). We can't check the TxEOM flag, since 1318 it might have already been cleared again by AUTOEOM. */ 1319 if (priv->state == TX_DATA) { 1320 /* Get remaining bytes */ 1321 i = priv->tx_tail; 1322 if (priv->param.dma >= 0) { 1323 disable_dma(priv->param.dma); 1324 flags = claim_dma_lock(); 1325 res = get_dma_residue(priv->param.dma); 1326 release_dma_lock(flags); 1327 } else { 1328 res = priv->tx_len[i] - priv->tx_ptr; 1329 priv->tx_ptr = 0; 1330 } 1331 /* Disable DREQ / TX interrupt */ 1332 if (priv->param.dma >= 0 && priv->type == TYPE_TWIN) 1333 outb(0, priv->card_base + TWIN_DMA_CFG); 1334 else 1335 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN); 1336 if (res) { 1337 /* Update packet statistics */ 1338 priv->dev->stats.tx_errors++; 1339 priv->dev->stats.tx_fifo_errors++; 1340 /* Other underrun interrupts may already be waiting */ 1341 write_scc(priv, R0, RES_EXT_INT); 1342 write_scc(priv, R0, RES_EXT_INT); 1343 } else { 1344 /* Update packet statistics */ 1345 priv->dev->stats.tx_packets++; 1346 priv->dev->stats.tx_bytes += priv->tx_len[i]; 1347 /* Remove frame from FIFO */ 1348 priv->tx_tail = (i + 1) % NUM_TX_BUF; 1349 priv->tx_count--; 1350 /* Inform upper layers */ 1351 netif_wake_queue(priv->dev); 1352 } 1353 /* Switch state */ 1354 write_scc(priv, R15, 0); 1355 if (priv->tx_count && 1356 (jiffies - priv->tx_start) < priv->param.txtimeout) { 1357 priv->state = TX_PAUSE; 1358 start_timer(priv, priv->param.txpause, 0); 1359 } else { 1360 priv->state = TX_TAIL; 1361 start_timer(priv, priv->param.txtail, 0); 1362 } 1363 } 1364 1365 /* DCD transition */ 1366 if (drr0 & DCD) { 1367 if (rr0 & DCD) { 1368 switch (priv->state) { 1369 case IDLE: 1370 case WAIT: 1371 priv->state = DCD_ON; 1372 write_scc(priv, R15, 0); 1373 start_timer(priv, priv->param.dcdon, 0); 1374 } 1375 } else { 1376 switch (priv->state) { 1377 case RX_ON: 1378 rx_off(priv); 1379 priv->state = DCD_OFF; 1380 write_scc(priv, R15, 0); 1381 start_timer(priv, priv->param.dcdoff, 0); 1382 } 1383 } 1384 } 1385 1386 /* CTS transition */ 1387 if ((drr0 & CTS) && (~rr0 & CTS) && priv->type != TYPE_TWIN) 1388 tm_isr(priv); 1389 1390} 1391 1392 1393static void tm_isr(struct scc_priv *priv) 1394{ 1395 switch (priv->state) { 1396 case TX_HEAD: 1397 case TX_PAUSE: 1398 tx_on(priv); 1399 priv->state = TX_DATA; 1400 break; 1401 case TX_TAIL: 1402 write_scc(priv, R5, TxCRC_ENAB | Tx8); 1403 priv->state = RTS_OFF; 1404 if (priv->type != TYPE_TWIN) 1405 write_scc(priv, R15, 0); 1406 start_timer(priv, priv->param.rtsoff, 0); 1407 break; 1408 case RTS_OFF: 1409 write_scc(priv, R15, DCDIE); 1410 priv->rr0 = read_scc(priv, R0); 1411 if (priv->rr0 & DCD) { 1412 priv->dev->stats.collisions++; 1413 rx_on(priv); 1414 priv->state = RX_ON; 1415 } else { 1416 priv->state = WAIT; 1417 start_timer(priv, priv->param.waittime, DCDIE); 1418 } 1419 break; 1420 case WAIT: 1421 if (priv->tx_count) { 1422 priv->state = TX_HEAD; 1423 priv->tx_start = jiffies; 1424 write_scc(priv, R5, 1425 TxCRC_ENAB | RTS | TxENAB | Tx8); 1426 write_scc(priv, R15, 0); 1427 start_timer(priv, priv->param.txdelay, 0); 1428 } else { 1429 priv->state = IDLE; 1430 if (priv->type != TYPE_TWIN) 1431 write_scc(priv, R15, DCDIE); 1432 } 1433 break; 1434 case DCD_ON: 1435 case DCD_OFF: 1436 write_scc(priv, R15, DCDIE); 1437 priv->rr0 = read_scc(priv, R0); 1438 if (priv->rr0 & DCD) { 1439 rx_on(priv); 1440 priv->state = RX_ON; 1441 } else { 1442 priv->state = WAIT; 1443 start_timer(priv, 1444 random() / priv->param.persist * 1445 priv->param.slottime, DCDIE); 1446 } 1447 break; 1448 } 1449}