Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.18-rc2 1883 lines 52 kB view raw
1/********************************************************************* 2 * 3 * vlsi_ir.c: VLSI82C147 PCI IrDA controller driver for Linux 4 * 5 * Copyright (c) 2001-2003 Martin Diehl 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License as 9 * published by the Free Software Foundation; either version 2 of 10 * the License, or (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, see <http://www.gnu.org/licenses/>. 19 * 20 ********************************************************************/ 21 22#include <linux/module.h> 23 24#define DRIVER_NAME "vlsi_ir" 25#define DRIVER_VERSION "v0.5" 26#define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 27#define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 28 29MODULE_DESCRIPTION(DRIVER_DESCRIPTION); 30MODULE_AUTHOR(DRIVER_AUTHOR); 31MODULE_LICENSE("GPL"); 32 33/********************************************************/ 34 35#include <linux/kernel.h> 36#include <linux/init.h> 37#include <linux/interrupt.h> 38#include <linux/pci.h> 39#include <linux/slab.h> 40#include <linux/netdevice.h> 41#include <linux/skbuff.h> 42#include <linux/delay.h> 43#include <linux/time.h> 44#include <linux/proc_fs.h> 45#include <linux/seq_file.h> 46#include <linux/mutex.h> 47#include <asm/uaccess.h> 48#include <asm/byteorder.h> 49 50#include <net/irda/irda.h> 51#include <net/irda/irda_device.h> 52#include <net/irda/wrapper.h> 53#include <net/irda/crc.h> 54 55#include "vlsi_ir.h" 56 57/********************************************************/ 58 59static /* const */ char drivername[] = DRIVER_NAME; 60 61static const struct pci_device_id vlsi_irda_table[] = { 62 { 63 .class = PCI_CLASS_WIRELESS_IRDA << 8, 64 .class_mask = PCI_CLASS_SUBCLASS_MASK << 8, 65 .vendor = PCI_VENDOR_ID_VLSI, 66 .device = PCI_DEVICE_ID_VLSI_82C147, 67 .subvendor = PCI_ANY_ID, 68 .subdevice = PCI_ANY_ID, 69 }, 70 { /* all zeroes */ } 71}; 72 73MODULE_DEVICE_TABLE(pci, vlsi_irda_table); 74 75/********************************************************/ 76 77/* clksrc: which clock source to be used 78 * 0: auto - try PLL, fallback to 40MHz XCLK 79 * 1: on-chip 48MHz PLL 80 * 2: external 48MHz XCLK 81 * 3: external 40MHz XCLK (HP OB-800) 82 */ 83 84static int clksrc = 0; /* default is 0(auto) */ 85module_param(clksrc, int, 0); 86MODULE_PARM_DESC(clksrc, "clock input source selection"); 87 88/* ringsize: size of the tx and rx descriptor rings 89 * independent for tx and rx 90 * specify as ringsize=tx[,rx] 91 * allowed values: 4, 8, 16, 32, 64 92 * Due to the IrDA 1.x max. allowed window size=7, 93 * there should be no gain when using rings larger than 8 94 */ 95 96static int ringsize[] = {8,8}; /* default is tx=8 / rx=8 */ 97module_param_array(ringsize, int, NULL, 0); 98MODULE_PARM_DESC(ringsize, "TX, RX ring descriptor size"); 99 100/* sirpulse: tuning of the SIR pulse width within IrPHY 1.3 limits 101 * 0: very short, 1.5us (exception: 6us at 2.4 kbaud) 102 * 1: nominal 3/16 bittime width 103 * note: IrDA compliant peer devices should be happy regardless 104 * which one is used. Primary goal is to save some power 105 * on the sender's side - at 9.6kbaud for example the short 106 * pulse width saves more than 90% of the transmitted IR power. 107 */ 108 109static int sirpulse = 1; /* default is 3/16 bittime */ 110module_param(sirpulse, int, 0); 111MODULE_PARM_DESC(sirpulse, "SIR pulse width tuning"); 112 113/* qos_mtt_bits: encoded min-turn-time value we require the peer device 114 * to use before transmitting to us. "Type 1" (per-station) 115 * bitfield according to IrLAP definition (section 6.6.8) 116 * Don't know which transceiver is used by my OB800 - the 117 * pretty common HP HDLS-1100 requires 1 msec - so lets use this. 118 */ 119 120static int qos_mtt_bits = 0x07; /* default is 1 ms or more */ 121module_param(qos_mtt_bits, int, 0); 122MODULE_PARM_DESC(qos_mtt_bits, "IrLAP bitfield representing min-turn-time"); 123 124/********************************************************/ 125 126static void vlsi_reg_debug(unsigned iobase, const char *s) 127{ 128 int i; 129 130 printk(KERN_DEBUG "%s: ", s); 131 for (i = 0; i < 0x20; i++) 132 printk("%02x", (unsigned)inb((iobase+i))); 133 printk("\n"); 134} 135 136static void vlsi_ring_debug(struct vlsi_ring *r) 137{ 138 struct ring_descr *rd; 139 unsigned i; 140 141 printk(KERN_DEBUG "%s - ring %p / size %u / mask 0x%04x / len %u / dir %d / hw %p\n", 142 __func__, r, r->size, r->mask, r->len, r->dir, r->rd[0].hw); 143 printk(KERN_DEBUG "%s - head = %d / tail = %d\n", __func__, 144 atomic_read(&r->head) & r->mask, atomic_read(&r->tail) & r->mask); 145 for (i = 0; i < r->size; i++) { 146 rd = &r->rd[i]; 147 printk(KERN_DEBUG "%s - ring descr %u: ", __func__, i); 148 printk("skb=%p data=%p hw=%p\n", rd->skb, rd->buf, rd->hw); 149 printk(KERN_DEBUG "%s - hw: status=%02x count=%u addr=0x%08x\n", 150 __func__, (unsigned) rd_get_status(rd), 151 (unsigned) rd_get_count(rd), (unsigned) rd_get_addr(rd)); 152 } 153} 154 155/********************************************************/ 156 157/* needed regardless of CONFIG_PROC_FS */ 158static struct proc_dir_entry *vlsi_proc_root = NULL; 159 160#ifdef CONFIG_PROC_FS 161 162static void vlsi_proc_pdev(struct seq_file *seq, struct pci_dev *pdev) 163{ 164 unsigned iobase = pci_resource_start(pdev, 0); 165 unsigned i; 166 167 seq_printf(seq, "\n%s (vid/did: [%04x:%04x])\n", 168 pci_name(pdev), (int)pdev->vendor, (int)pdev->device); 169 seq_printf(seq, "pci-power-state: %u\n", (unsigned) pdev->current_state); 170 seq_printf(seq, "resources: irq=%u / io=0x%04x / dma_mask=0x%016Lx\n", 171 pdev->irq, (unsigned)pci_resource_start(pdev, 0), (unsigned long long)pdev->dma_mask); 172 seq_printf(seq, "hw registers: "); 173 for (i = 0; i < 0x20; i++) 174 seq_printf(seq, "%02x", (unsigned)inb((iobase+i))); 175 seq_printf(seq, "\n"); 176} 177 178static void vlsi_proc_ndev(struct seq_file *seq, struct net_device *ndev) 179{ 180 vlsi_irda_dev_t *idev = netdev_priv(ndev); 181 u8 byte; 182 u16 word; 183 unsigned delta1, delta2; 184 struct timeval now; 185 unsigned iobase = ndev->base_addr; 186 187 seq_printf(seq, "\n%s link state: %s / %s / %s / %s\n", ndev->name, 188 netif_device_present(ndev) ? "attached" : "detached", 189 netif_running(ndev) ? "running" : "not running", 190 netif_carrier_ok(ndev) ? "carrier ok" : "no carrier", 191 netif_queue_stopped(ndev) ? "queue stopped" : "queue running"); 192 193 if (!netif_running(ndev)) 194 return; 195 196 seq_printf(seq, "\nhw-state:\n"); 197 pci_read_config_byte(idev->pdev, VLSI_PCI_IRMISC, &byte); 198 seq_printf(seq, "IRMISC:%s%s%s uart%s", 199 (byte&IRMISC_IRRAIL) ? " irrail" : "", 200 (byte&IRMISC_IRPD) ? " irpd" : "", 201 (byte&IRMISC_UARTTST) ? " uarttest" : "", 202 (byte&IRMISC_UARTEN) ? "@" : " disabled\n"); 203 if (byte&IRMISC_UARTEN) { 204 seq_printf(seq, "0x%s\n", 205 (byte&2) ? ((byte&1) ? "3e8" : "2e8") 206 : ((byte&1) ? "3f8" : "2f8")); 207 } 208 pci_read_config_byte(idev->pdev, VLSI_PCI_CLKCTL, &byte); 209 seq_printf(seq, "CLKCTL: PLL %s%s%s / clock %s / wakeup %s\n", 210 (byte&CLKCTL_PD_INV) ? "powered" : "down", 211 (byte&CLKCTL_LOCK) ? " locked" : "", 212 (byte&CLKCTL_EXTCLK) ? ((byte&CLKCTL_XCKSEL)?" / 40 MHz XCLK":" / 48 MHz XCLK") : "", 213 (byte&CLKCTL_CLKSTP) ? "stopped" : "running", 214 (byte&CLKCTL_WAKE) ? "enabled" : "disabled"); 215 pci_read_config_byte(idev->pdev, VLSI_PCI_MSTRPAGE, &byte); 216 seq_printf(seq, "MSTRPAGE: 0x%02x\n", (unsigned)byte); 217 218 byte = inb(iobase+VLSI_PIO_IRINTR); 219 seq_printf(seq, "IRINTR:%s%s%s%s%s%s%s%s\n", 220 (byte&IRINTR_ACTEN) ? " ACTEN" : "", 221 (byte&IRINTR_RPKTEN) ? " RPKTEN" : "", 222 (byte&IRINTR_TPKTEN) ? " TPKTEN" : "", 223 (byte&IRINTR_OE_EN) ? " OE_EN" : "", 224 (byte&IRINTR_ACTIVITY) ? " ACTIVITY" : "", 225 (byte&IRINTR_RPKTINT) ? " RPKTINT" : "", 226 (byte&IRINTR_TPKTINT) ? " TPKTINT" : "", 227 (byte&IRINTR_OE_INT) ? " OE_INT" : ""); 228 word = inw(iobase+VLSI_PIO_RINGPTR); 229 seq_printf(seq, "RINGPTR: rx=%u / tx=%u\n", RINGPTR_GET_RX(word), RINGPTR_GET_TX(word)); 230 word = inw(iobase+VLSI_PIO_RINGBASE); 231 seq_printf(seq, "RINGBASE: busmap=0x%08x\n", 232 ((unsigned)word << 10)|(MSTRPAGE_VALUE<<24)); 233 word = inw(iobase+VLSI_PIO_RINGSIZE); 234 seq_printf(seq, "RINGSIZE: rx=%u / tx=%u\n", RINGSIZE_TO_RXSIZE(word), 235 RINGSIZE_TO_TXSIZE(word)); 236 237 word = inw(iobase+VLSI_PIO_IRCFG); 238 seq_printf(seq, "IRCFG:%s%s%s%s%s%s%s%s%s%s%s%s%s\n", 239 (word&IRCFG_LOOP) ? " LOOP" : "", 240 (word&IRCFG_ENTX) ? " ENTX" : "", 241 (word&IRCFG_ENRX) ? " ENRX" : "", 242 (word&IRCFG_MSTR) ? " MSTR" : "", 243 (word&IRCFG_RXANY) ? " RXANY" : "", 244 (word&IRCFG_CRC16) ? " CRC16" : "", 245 (word&IRCFG_FIR) ? " FIR" : "", 246 (word&IRCFG_MIR) ? " MIR" : "", 247 (word&IRCFG_SIR) ? " SIR" : "", 248 (word&IRCFG_SIRFILT) ? " SIRFILT" : "", 249 (word&IRCFG_SIRTEST) ? " SIRTEST" : "", 250 (word&IRCFG_TXPOL) ? " TXPOL" : "", 251 (word&IRCFG_RXPOL) ? " RXPOL" : ""); 252 word = inw(iobase+VLSI_PIO_IRENABLE); 253 seq_printf(seq, "IRENABLE:%s%s%s%s%s%s%s%s\n", 254 (word&IRENABLE_PHYANDCLOCK) ? " PHYANDCLOCK" : "", 255 (word&IRENABLE_CFGER) ? " CFGERR" : "", 256 (word&IRENABLE_FIR_ON) ? " FIR_ON" : "", 257 (word&IRENABLE_MIR_ON) ? " MIR_ON" : "", 258 (word&IRENABLE_SIR_ON) ? " SIR_ON" : "", 259 (word&IRENABLE_ENTXST) ? " ENTXST" : "", 260 (word&IRENABLE_ENRXST) ? " ENRXST" : "", 261 (word&IRENABLE_CRC16_ON) ? " CRC16_ON" : ""); 262 word = inw(iobase+VLSI_PIO_PHYCTL); 263 seq_printf(seq, "PHYCTL: baud-divisor=%u / pulsewidth=%u / preamble=%u\n", 264 (unsigned)PHYCTL_TO_BAUD(word), 265 (unsigned)PHYCTL_TO_PLSWID(word), 266 (unsigned)PHYCTL_TO_PREAMB(word)); 267 word = inw(iobase+VLSI_PIO_NPHYCTL); 268 seq_printf(seq, "NPHYCTL: baud-divisor=%u / pulsewidth=%u / preamble=%u\n", 269 (unsigned)PHYCTL_TO_BAUD(word), 270 (unsigned)PHYCTL_TO_PLSWID(word), 271 (unsigned)PHYCTL_TO_PREAMB(word)); 272 word = inw(iobase+VLSI_PIO_MAXPKT); 273 seq_printf(seq, "MAXPKT: max. rx packet size = %u\n", word); 274 word = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 275 seq_printf(seq, "RCVBCNT: rx-fifo filling level = %u\n", word); 276 277 seq_printf(seq, "\nsw-state:\n"); 278 seq_printf(seq, "IrPHY setup: %d baud - %s encoding\n", idev->baud, 279 (idev->mode==IFF_SIR)?"SIR":((idev->mode==IFF_MIR)?"MIR":"FIR")); 280 do_gettimeofday(&now); 281 if (now.tv_usec >= idev->last_rx.tv_usec) { 282 delta2 = now.tv_usec - idev->last_rx.tv_usec; 283 delta1 = 0; 284 } 285 else { 286 delta2 = 1000000 + now.tv_usec - idev->last_rx.tv_usec; 287 delta1 = 1; 288 } 289 seq_printf(seq, "last rx: %lu.%06u sec\n", 290 now.tv_sec - idev->last_rx.tv_sec - delta1, delta2); 291 292 seq_printf(seq, "RX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu", 293 ndev->stats.rx_packets, ndev->stats.rx_bytes, ndev->stats.rx_errors, 294 ndev->stats.rx_dropped); 295 seq_printf(seq, " / overrun=%lu / length=%lu / frame=%lu / crc=%lu\n", 296 ndev->stats.rx_over_errors, ndev->stats.rx_length_errors, 297 ndev->stats.rx_frame_errors, ndev->stats.rx_crc_errors); 298 seq_printf(seq, "TX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu / fifo=%lu\n", 299 ndev->stats.tx_packets, ndev->stats.tx_bytes, ndev->stats.tx_errors, 300 ndev->stats.tx_dropped, ndev->stats.tx_fifo_errors); 301 302} 303 304static void vlsi_proc_ring(struct seq_file *seq, struct vlsi_ring *r) 305{ 306 struct ring_descr *rd; 307 unsigned i, j; 308 int h, t; 309 310 seq_printf(seq, "size %u / mask 0x%04x / len %u / dir %d / hw %p\n", 311 r->size, r->mask, r->len, r->dir, r->rd[0].hw); 312 h = atomic_read(&r->head) & r->mask; 313 t = atomic_read(&r->tail) & r->mask; 314 seq_printf(seq, "head = %d / tail = %d ", h, t); 315 if (h == t) 316 seq_printf(seq, "(empty)\n"); 317 else { 318 if (((t+1)&r->mask) == h) 319 seq_printf(seq, "(full)\n"); 320 else 321 seq_printf(seq, "(level = %d)\n", ((unsigned)(t-h) & r->mask)); 322 rd = &r->rd[h]; 323 j = (unsigned) rd_get_count(rd); 324 seq_printf(seq, "current: rd = %d / status = %02x / len = %u\n", 325 h, (unsigned)rd_get_status(rd), j); 326 if (j > 0) { 327 seq_printf(seq, " data: %*ph\n", 328 min_t(unsigned, j, 20), rd->buf); 329 } 330 } 331 for (i = 0; i < r->size; i++) { 332 rd = &r->rd[i]; 333 seq_printf(seq, "> ring descr %u: ", i); 334 seq_printf(seq, "skb=%p data=%p hw=%p\n", rd->skb, rd->buf, rd->hw); 335 seq_printf(seq, " hw: status=%02x count=%u busaddr=0x%08x\n", 336 (unsigned) rd_get_status(rd), 337 (unsigned) rd_get_count(rd), (unsigned) rd_get_addr(rd)); 338 } 339} 340 341static int vlsi_seq_show(struct seq_file *seq, void *v) 342{ 343 struct net_device *ndev = seq->private; 344 vlsi_irda_dev_t *idev = netdev_priv(ndev); 345 unsigned long flags; 346 347 seq_printf(seq, "\n%s %s\n\n", DRIVER_NAME, DRIVER_VERSION); 348 seq_printf(seq, "clksrc: %s\n", 349 (clksrc>=2) ? ((clksrc==3)?"40MHz XCLK":"48MHz XCLK") 350 : ((clksrc==1)?"48MHz PLL":"autodetect")); 351 seq_printf(seq, "ringsize: tx=%d / rx=%d\n", 352 ringsize[0], ringsize[1]); 353 seq_printf(seq, "sirpulse: %s\n", (sirpulse)?"3/16 bittime":"short"); 354 seq_printf(seq, "qos_mtt_bits: 0x%02x\n", (unsigned)qos_mtt_bits); 355 356 spin_lock_irqsave(&idev->lock, flags); 357 if (idev->pdev != NULL) { 358 vlsi_proc_pdev(seq, idev->pdev); 359 360 if (idev->pdev->current_state == 0) 361 vlsi_proc_ndev(seq, ndev); 362 else 363 seq_printf(seq, "\nPCI controller down - resume_ok = %d\n", 364 idev->resume_ok); 365 if (netif_running(ndev) && idev->rx_ring && idev->tx_ring) { 366 seq_printf(seq, "\n--------- RX ring -----------\n\n"); 367 vlsi_proc_ring(seq, idev->rx_ring); 368 seq_printf(seq, "\n--------- TX ring -----------\n\n"); 369 vlsi_proc_ring(seq, idev->tx_ring); 370 } 371 } 372 seq_printf(seq, "\n"); 373 spin_unlock_irqrestore(&idev->lock, flags); 374 375 return 0; 376} 377 378static int vlsi_seq_open(struct inode *inode, struct file *file) 379{ 380 return single_open(file, vlsi_seq_show, PDE_DATA(inode)); 381} 382 383static const struct file_operations vlsi_proc_fops = { 384 .owner = THIS_MODULE, 385 .open = vlsi_seq_open, 386 .read = seq_read, 387 .llseek = seq_lseek, 388 .release = single_release, 389}; 390 391#define VLSI_PROC_FOPS (&vlsi_proc_fops) 392 393#else /* CONFIG_PROC_FS */ 394#define VLSI_PROC_FOPS NULL 395#endif 396 397/********************************************************/ 398 399static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr_hw *hwmap, 400 unsigned size, unsigned len, int dir) 401{ 402 struct vlsi_ring *r; 403 struct ring_descr *rd; 404 unsigned i, j; 405 dma_addr_t busaddr; 406 407 if (!size || ((size-1)&size)!=0) /* must be >0 and power of 2 */ 408 return NULL; 409 410 r = kmalloc(sizeof(*r) + size * sizeof(struct ring_descr), GFP_KERNEL); 411 if (!r) 412 return NULL; 413 memset(r, 0, sizeof(*r)); 414 415 r->pdev = pdev; 416 r->dir = dir; 417 r->len = len; 418 r->rd = (struct ring_descr *)(r+1); 419 r->mask = size - 1; 420 r->size = size; 421 atomic_set(&r->head, 0); 422 atomic_set(&r->tail, 0); 423 424 for (i = 0; i < size; i++) { 425 rd = r->rd + i; 426 memset(rd, 0, sizeof(*rd)); 427 rd->hw = hwmap + i; 428 rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA); 429 if (rd->buf == NULL || 430 !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) { 431 if (rd->buf) { 432 IRDA_ERROR("%s: failed to create PCI-MAP for %p", 433 __func__, rd->buf); 434 kfree(rd->buf); 435 rd->buf = NULL; 436 } 437 for (j = 0; j < i; j++) { 438 rd = r->rd + j; 439 busaddr = rd_get_addr(rd); 440 rd_set_addr_status(rd, 0, 0); 441 if (busaddr) 442 pci_unmap_single(pdev, busaddr, len, dir); 443 kfree(rd->buf); 444 rd->buf = NULL; 445 } 446 kfree(r); 447 return NULL; 448 } 449 rd_set_addr_status(rd, busaddr, 0); 450 /* initially, the dma buffer is owned by the CPU */ 451 rd->skb = NULL; 452 } 453 return r; 454} 455 456static int vlsi_free_ring(struct vlsi_ring *r) 457{ 458 struct ring_descr *rd; 459 unsigned i; 460 dma_addr_t busaddr; 461 462 for (i = 0; i < r->size; i++) { 463 rd = r->rd + i; 464 if (rd->skb) 465 dev_kfree_skb_any(rd->skb); 466 busaddr = rd_get_addr(rd); 467 rd_set_addr_status(rd, 0, 0); 468 if (busaddr) 469 pci_unmap_single(r->pdev, busaddr, r->len, r->dir); 470 kfree(rd->buf); 471 } 472 kfree(r); 473 return 0; 474} 475 476static int vlsi_create_hwif(vlsi_irda_dev_t *idev) 477{ 478 char *ringarea; 479 struct ring_descr_hw *hwmap; 480 481 idev->virtaddr = NULL; 482 idev->busaddr = 0; 483 484 ringarea = pci_zalloc_consistent(idev->pdev, HW_RING_AREA_SIZE, 485 &idev->busaddr); 486 if (!ringarea) { 487 IRDA_ERROR("%s: insufficient memory for descriptor rings\n", 488 __func__); 489 goto out; 490 } 491 492 hwmap = (struct ring_descr_hw *)ringarea; 493 idev->rx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[1], 494 XFER_BUF_SIZE, PCI_DMA_FROMDEVICE); 495 if (idev->rx_ring == NULL) 496 goto out_unmap; 497 498 hwmap += MAX_RING_DESCR; 499 idev->tx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[0], 500 XFER_BUF_SIZE, PCI_DMA_TODEVICE); 501 if (idev->tx_ring == NULL) 502 goto out_free_rx; 503 504 idev->virtaddr = ringarea; 505 return 0; 506 507out_free_rx: 508 vlsi_free_ring(idev->rx_ring); 509out_unmap: 510 idev->rx_ring = idev->tx_ring = NULL; 511 pci_free_consistent(idev->pdev, HW_RING_AREA_SIZE, ringarea, idev->busaddr); 512 idev->busaddr = 0; 513out: 514 return -ENOMEM; 515} 516 517static int vlsi_destroy_hwif(vlsi_irda_dev_t *idev) 518{ 519 vlsi_free_ring(idev->rx_ring); 520 vlsi_free_ring(idev->tx_ring); 521 idev->rx_ring = idev->tx_ring = NULL; 522 523 if (idev->busaddr) 524 pci_free_consistent(idev->pdev,HW_RING_AREA_SIZE,idev->virtaddr,idev->busaddr); 525 526 idev->virtaddr = NULL; 527 idev->busaddr = 0; 528 529 return 0; 530} 531 532/********************************************************/ 533 534static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd) 535{ 536 u16 status; 537 int crclen, len = 0; 538 struct sk_buff *skb; 539 int ret = 0; 540 struct net_device *ndev = pci_get_drvdata(r->pdev); 541 vlsi_irda_dev_t *idev = netdev_priv(ndev); 542 543 pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir); 544 /* dma buffer now owned by the CPU */ 545 status = rd_get_status(rd); 546 if (status & RD_RX_ERROR) { 547 if (status & RD_RX_OVER) 548 ret |= VLSI_RX_OVER; 549 if (status & RD_RX_LENGTH) 550 ret |= VLSI_RX_LENGTH; 551 if (status & RD_RX_PHYERR) 552 ret |= VLSI_RX_FRAME; 553 if (status & RD_RX_CRCERR) 554 ret |= VLSI_RX_CRC; 555 goto done; 556 } 557 558 len = rd_get_count(rd); 559 crclen = (idev->mode==IFF_FIR) ? sizeof(u32) : sizeof(u16); 560 len -= crclen; /* remove trailing CRC */ 561 if (len <= 0) { 562 IRDA_DEBUG(0, "%s: strange frame (len=%d)\n", __func__, len); 563 ret |= VLSI_RX_DROP; 564 goto done; 565 } 566 567 if (idev->mode == IFF_SIR) { /* hw checks CRC in MIR, FIR mode */ 568 569 /* rd->buf is a streaming PCI_DMA_FROMDEVICE map. Doing the 570 * endian-adjustment there just in place will dirty a cache line 571 * which belongs to the map and thus we must be sure it will 572 * get flushed before giving the buffer back to hardware. 573 * vlsi_fill_rx() will do this anyway - but here we rely on. 574 */ 575 le16_to_cpus(rd->buf+len); 576 if (irda_calc_crc16(INIT_FCS,rd->buf,len+crclen) != GOOD_FCS) { 577 IRDA_DEBUG(0, "%s: crc error\n", __func__); 578 ret |= VLSI_RX_CRC; 579 goto done; 580 } 581 } 582 583 if (!rd->skb) { 584 IRDA_WARNING("%s: rx packet lost\n", __func__); 585 ret |= VLSI_RX_DROP; 586 goto done; 587 } 588 589 skb = rd->skb; 590 rd->skb = NULL; 591 skb->dev = ndev; 592 memcpy(skb_put(skb,len), rd->buf, len); 593 skb_reset_mac_header(skb); 594 if (in_interrupt()) 595 netif_rx(skb); 596 else 597 netif_rx_ni(skb); 598 599done: 600 rd_set_status(rd, 0); 601 rd_set_count(rd, 0); 602 /* buffer still owned by CPU */ 603 604 return (ret) ? -ret : len; 605} 606 607static void vlsi_fill_rx(struct vlsi_ring *r) 608{ 609 struct ring_descr *rd; 610 611 for (rd = ring_last(r); rd != NULL; rd = ring_put(r)) { 612 if (rd_is_active(rd)) { 613 IRDA_WARNING("%s: driver bug: rx descr race with hw\n", 614 __func__); 615 vlsi_ring_debug(r); 616 break; 617 } 618 if (!rd->skb) { 619 rd->skb = dev_alloc_skb(IRLAP_SKB_ALLOCSIZE); 620 if (rd->skb) { 621 skb_reserve(rd->skb,1); 622 rd->skb->protocol = htons(ETH_P_IRDA); 623 } 624 else 625 break; /* probably not worth logging? */ 626 } 627 /* give dma buffer back to busmaster */ 628 pci_dma_sync_single_for_device(r->pdev, rd_get_addr(rd), r->len, r->dir); 629 rd_activate(rd); 630 } 631} 632 633static void vlsi_rx_interrupt(struct net_device *ndev) 634{ 635 vlsi_irda_dev_t *idev = netdev_priv(ndev); 636 struct vlsi_ring *r = idev->rx_ring; 637 struct ring_descr *rd; 638 int ret; 639 640 for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) { 641 642 if (rd_is_active(rd)) 643 break; 644 645 ret = vlsi_process_rx(r, rd); 646 647 if (ret < 0) { 648 ret = -ret; 649 ndev->stats.rx_errors++; 650 if (ret & VLSI_RX_DROP) 651 ndev->stats.rx_dropped++; 652 if (ret & VLSI_RX_OVER) 653 ndev->stats.rx_over_errors++; 654 if (ret & VLSI_RX_LENGTH) 655 ndev->stats.rx_length_errors++; 656 if (ret & VLSI_RX_FRAME) 657 ndev->stats.rx_frame_errors++; 658 if (ret & VLSI_RX_CRC) 659 ndev->stats.rx_crc_errors++; 660 } 661 else if (ret > 0) { 662 ndev->stats.rx_packets++; 663 ndev->stats.rx_bytes += ret; 664 } 665 } 666 667 do_gettimeofday(&idev->last_rx); /* remember "now" for later mtt delay */ 668 669 vlsi_fill_rx(r); 670 671 if (ring_first(r) == NULL) { 672 /* we are in big trouble, if this should ever happen */ 673 IRDA_ERROR("%s: rx ring exhausted!\n", __func__); 674 vlsi_ring_debug(r); 675 } 676 else 677 outw(0, ndev->base_addr+VLSI_PIO_PROMPT); 678} 679 680/* caller must have stopped the controller from busmastering */ 681 682static void vlsi_unarm_rx(vlsi_irda_dev_t *idev) 683{ 684 struct net_device *ndev = pci_get_drvdata(idev->pdev); 685 struct vlsi_ring *r = idev->rx_ring; 686 struct ring_descr *rd; 687 int ret; 688 689 for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) { 690 691 ret = 0; 692 if (rd_is_active(rd)) { 693 rd_set_status(rd, 0); 694 if (rd_get_count(rd)) { 695 IRDA_DEBUG(0, "%s - dropping rx packet\n", __func__); 696 ret = -VLSI_RX_DROP; 697 } 698 rd_set_count(rd, 0); 699 pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir); 700 if (rd->skb) { 701 dev_kfree_skb_any(rd->skb); 702 rd->skb = NULL; 703 } 704 } 705 else 706 ret = vlsi_process_rx(r, rd); 707 708 if (ret < 0) { 709 ret = -ret; 710 ndev->stats.rx_errors++; 711 if (ret & VLSI_RX_DROP) 712 ndev->stats.rx_dropped++; 713 if (ret & VLSI_RX_OVER) 714 ndev->stats.rx_over_errors++; 715 if (ret & VLSI_RX_LENGTH) 716 ndev->stats.rx_length_errors++; 717 if (ret & VLSI_RX_FRAME) 718 ndev->stats.rx_frame_errors++; 719 if (ret & VLSI_RX_CRC) 720 ndev->stats.rx_crc_errors++; 721 } 722 else if (ret > 0) { 723 ndev->stats.rx_packets++; 724 ndev->stats.rx_bytes += ret; 725 } 726 } 727} 728 729/********************************************************/ 730 731static int vlsi_process_tx(struct vlsi_ring *r, struct ring_descr *rd) 732{ 733 u16 status; 734 int len; 735 int ret; 736 737 pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir); 738 /* dma buffer now owned by the CPU */ 739 status = rd_get_status(rd); 740 if (status & RD_TX_UNDRN) 741 ret = VLSI_TX_FIFO; 742 else 743 ret = 0; 744 rd_set_status(rd, 0); 745 746 if (rd->skb) { 747 len = rd->skb->len; 748 dev_kfree_skb_any(rd->skb); 749 rd->skb = NULL; 750 } 751 else /* tx-skb already freed? - should never happen */ 752 len = rd_get_count(rd); /* incorrect for SIR! (due to wrapping) */ 753 754 rd_set_count(rd, 0); 755 /* dma buffer still owned by the CPU */ 756 757 return (ret) ? -ret : len; 758} 759 760static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase) 761{ 762 u16 nphyctl; 763 u16 config; 764 unsigned mode; 765 int ret; 766 int baudrate; 767 int fifocnt; 768 769 baudrate = idev->new_baud; 770 IRDA_DEBUG(2, "%s: %d -> %d\n", __func__, idev->baud, idev->new_baud); 771 if (baudrate == 4000000) { 772 mode = IFF_FIR; 773 config = IRCFG_FIR; 774 nphyctl = PHYCTL_FIR; 775 } 776 else if (baudrate == 1152000) { 777 mode = IFF_MIR; 778 config = IRCFG_MIR | IRCFG_CRC16; 779 nphyctl = PHYCTL_MIR(clksrc==3); 780 } 781 else { 782 mode = IFF_SIR; 783 config = IRCFG_SIR | IRCFG_SIRFILT | IRCFG_RXANY; 784 switch(baudrate) { 785 default: 786 IRDA_WARNING("%s: undefined baudrate %d - fallback to 9600!\n", 787 __func__, baudrate); 788 baudrate = 9600; 789 /* fallthru */ 790 case 2400: 791 case 9600: 792 case 19200: 793 case 38400: 794 case 57600: 795 case 115200: 796 nphyctl = PHYCTL_SIR(baudrate,sirpulse,clksrc==3); 797 break; 798 } 799 } 800 config |= IRCFG_MSTR | IRCFG_ENRX; 801 802 fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 803 if (fifocnt != 0) { 804 IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __func__, fifocnt); 805 } 806 807 outw(0, iobase+VLSI_PIO_IRENABLE); 808 outw(config, iobase+VLSI_PIO_IRCFG); 809 outw(nphyctl, iobase+VLSI_PIO_NPHYCTL); 810 wmb(); 811 outw(IRENABLE_PHYANDCLOCK, iobase+VLSI_PIO_IRENABLE); 812 mb(); 813 814 udelay(1); /* chip applies IRCFG on next rising edge of its 8MHz clock */ 815 816 /* read back settings for validation */ 817 818 config = inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_MASK; 819 820 if (mode == IFF_FIR) 821 config ^= IRENABLE_FIR_ON; 822 else if (mode == IFF_MIR) 823 config ^= (IRENABLE_MIR_ON|IRENABLE_CRC16_ON); 824 else 825 config ^= IRENABLE_SIR_ON; 826 827 if (config != (IRENABLE_PHYANDCLOCK|IRENABLE_ENRXST)) { 828 IRDA_WARNING("%s: failed to set %s mode!\n", __func__, 829 (mode==IFF_SIR)?"SIR":((mode==IFF_MIR)?"MIR":"FIR")); 830 ret = -1; 831 } 832 else { 833 if (inw(iobase+VLSI_PIO_PHYCTL) != nphyctl) { 834 IRDA_WARNING("%s: failed to apply baudrate %d\n", 835 __func__, baudrate); 836 ret = -1; 837 } 838 else { 839 idev->mode = mode; 840 idev->baud = baudrate; 841 idev->new_baud = 0; 842 ret = 0; 843 } 844 } 845 846 if (ret) 847 vlsi_reg_debug(iobase,__func__); 848 849 return ret; 850} 851 852static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb, 853 struct net_device *ndev) 854{ 855 vlsi_irda_dev_t *idev = netdev_priv(ndev); 856 struct vlsi_ring *r = idev->tx_ring; 857 struct ring_descr *rd; 858 unsigned long flags; 859 unsigned iobase = ndev->base_addr; 860 u8 status; 861 u16 config; 862 int mtt; 863 int len, speed; 864 struct timeval now, ready; 865 char *msg = NULL; 866 867 speed = irda_get_next_speed(skb); 868 spin_lock_irqsave(&idev->lock, flags); 869 if (speed != -1 && speed != idev->baud) { 870 netif_stop_queue(ndev); 871 idev->new_baud = speed; 872 status = RD_TX_CLRENTX; /* stop tx-ring after this frame */ 873 } 874 else 875 status = 0; 876 877 if (skb->len == 0) { 878 /* handle zero packets - should be speed change */ 879 if (status == 0) { 880 msg = "bogus zero-length packet"; 881 goto drop_unlock; 882 } 883 884 /* due to the completely asynch tx operation we might have 885 * IrLAP racing with the hardware here, f.e. if the controller 886 * is just sending the last packet with current speed while 887 * the LAP is already switching the speed using synchronous 888 * len=0 packet. Immediate execution would lead to hw lockup 889 * requiring a powercycle to reset. Good candidate to trigger 890 * this is the final UA:RSP packet after receiving a DISC:CMD 891 * when getting the LAP down. 892 * Note that we are not protected by the queue_stop approach 893 * because the final UA:RSP arrives _without_ request to apply 894 * new-speed-after-this-packet - hence the driver doesn't know 895 * this was the last packet and doesn't stop the queue. So the 896 * forced switch to default speed from LAP gets through as fast 897 * as only some 10 usec later while the UA:RSP is still processed 898 * by the hardware and we would get screwed. 899 */ 900 901 if (ring_first(idev->tx_ring) == NULL) { 902 /* no race - tx-ring already empty */ 903 vlsi_set_baud(idev, iobase); 904 netif_wake_queue(ndev); 905 } 906 else 907 ; 908 /* keep the speed change pending like it would 909 * for any len>0 packet. tx completion interrupt 910 * will apply it when the tx ring becomes empty. 911 */ 912 spin_unlock_irqrestore(&idev->lock, flags); 913 dev_kfree_skb_any(skb); 914 return NETDEV_TX_OK; 915 } 916 917 /* sanity checks - simply drop the packet */ 918 919 rd = ring_last(r); 920 if (!rd) { 921 msg = "ring full, but queue wasn't stopped"; 922 goto drop_unlock; 923 } 924 925 if (rd_is_active(rd)) { 926 msg = "entry still owned by hw"; 927 goto drop_unlock; 928 } 929 930 if (!rd->buf) { 931 msg = "tx ring entry without pci buffer"; 932 goto drop_unlock; 933 } 934 935 if (rd->skb) { 936 msg = "ring entry with old skb still attached"; 937 goto drop_unlock; 938 } 939 940 /* no need for serialization or interrupt disable during mtt */ 941 spin_unlock_irqrestore(&idev->lock, flags); 942 943 if ((mtt = irda_get_mtt(skb)) > 0) { 944 945 ready.tv_usec = idev->last_rx.tv_usec + mtt; 946 ready.tv_sec = idev->last_rx.tv_sec; 947 if (ready.tv_usec >= 1000000) { 948 ready.tv_usec -= 1000000; 949 ready.tv_sec++; /* IrLAP 1.1: mtt always < 1 sec */ 950 } 951 for(;;) { 952 do_gettimeofday(&now); 953 if (now.tv_sec > ready.tv_sec || 954 (now.tv_sec==ready.tv_sec && now.tv_usec>=ready.tv_usec)) 955 break; 956 udelay(100); 957 /* must not sleep here - called under netif_tx_lock! */ 958 } 959 } 960 961 /* tx buffer already owned by CPU due to pci_dma_sync_single_for_cpu() 962 * after subsequent tx-completion 963 */ 964 965 if (idev->mode == IFF_SIR) { 966 status |= RD_TX_DISCRC; /* no hw-crc creation */ 967 len = async_wrap_skb(skb, rd->buf, r->len); 968 969 /* Some rare worst case situation in SIR mode might lead to 970 * potential buffer overflow. The wrapper detects this, returns 971 * with a shortened frame (without FCS/EOF) but doesn't provide 972 * any error indication about the invalid packet which we are 973 * going to transmit. 974 * Therefore we log if the buffer got filled to the point, where the 975 * wrapper would abort, i.e. when there are less than 5 bytes left to 976 * allow appending the FCS/EOF. 977 */ 978 979 if (len >= r->len-5) 980 IRDA_WARNING("%s: possible buffer overflow with SIR wrapping!\n", 981 __func__); 982 } 983 else { 984 /* hw deals with MIR/FIR mode wrapping */ 985 status |= RD_TX_PULSE; /* send 2 us highspeed indication pulse */ 986 len = skb->len; 987 if (len > r->len) { 988 msg = "frame exceeds tx buffer length"; 989 goto drop; 990 } 991 else 992 skb_copy_from_linear_data(skb, rd->buf, len); 993 } 994 995 rd->skb = skb; /* remember skb for tx-complete stats */ 996 997 rd_set_count(rd, len); 998 rd_set_status(rd, status); /* not yet active! */ 999 1000 /* give dma buffer back to busmaster-hw (flush caches to make 1001 * CPU-driven changes visible from the pci bus). 1002 */ 1003 1004 pci_dma_sync_single_for_device(r->pdev, rd_get_addr(rd), r->len, r->dir); 1005 1006/* Switching to TX mode here races with the controller 1007 * which may stop TX at any time when fetching an inactive descriptor 1008 * or one with CLR_ENTX set. So we switch on TX only, if TX was not running 1009 * _after_ the new descriptor was activated on the ring. This ensures 1010 * we will either find TX already stopped or we can be sure, there 1011 * will be a TX-complete interrupt even if the chip stopped doing 1012 * TX just after we found it still running. The ISR will then find 1013 * the non-empty ring and restart TX processing. The enclosing 1014 * spinlock provides the correct serialization to prevent race with isr. 1015 */ 1016 1017 spin_lock_irqsave(&idev->lock,flags); 1018 1019 rd_activate(rd); 1020 1021 if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) { 1022 int fifocnt; 1023 1024 fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 1025 if (fifocnt != 0) { 1026 IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __func__, fifocnt); 1027 } 1028 1029 config = inw(iobase+VLSI_PIO_IRCFG); 1030 mb(); 1031 outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG); 1032 wmb(); 1033 outw(0, iobase+VLSI_PIO_PROMPT); 1034 } 1035 1036 if (ring_put(r) == NULL) { 1037 netif_stop_queue(ndev); 1038 IRDA_DEBUG(3, "%s: tx ring full - queue stopped\n", __func__); 1039 } 1040 spin_unlock_irqrestore(&idev->lock, flags); 1041 1042 return NETDEV_TX_OK; 1043 1044drop_unlock: 1045 spin_unlock_irqrestore(&idev->lock, flags); 1046drop: 1047 IRDA_WARNING("%s: dropping packet - %s\n", __func__, msg); 1048 dev_kfree_skb_any(skb); 1049 ndev->stats.tx_errors++; 1050 ndev->stats.tx_dropped++; 1051 /* Don't even think about returning NET_XMIT_DROP (=1) here! 1052 * In fact any retval!=0 causes the packet scheduler to requeue the 1053 * packet for later retry of transmission - which isn't exactly 1054 * what we want after we've just called dev_kfree_skb_any ;-) 1055 */ 1056 return NETDEV_TX_OK; 1057} 1058 1059static void vlsi_tx_interrupt(struct net_device *ndev) 1060{ 1061 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1062 struct vlsi_ring *r = idev->tx_ring; 1063 struct ring_descr *rd; 1064 unsigned iobase; 1065 int ret; 1066 u16 config; 1067 1068 for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) { 1069 1070 if (rd_is_active(rd)) 1071 break; 1072 1073 ret = vlsi_process_tx(r, rd); 1074 1075 if (ret < 0) { 1076 ret = -ret; 1077 ndev->stats.tx_errors++; 1078 if (ret & VLSI_TX_DROP) 1079 ndev->stats.tx_dropped++; 1080 if (ret & VLSI_TX_FIFO) 1081 ndev->stats.tx_fifo_errors++; 1082 } 1083 else if (ret > 0){ 1084 ndev->stats.tx_packets++; 1085 ndev->stats.tx_bytes += ret; 1086 } 1087 } 1088 1089 iobase = ndev->base_addr; 1090 1091 if (idev->new_baud && rd == NULL) /* tx ring empty and speed change pending */ 1092 vlsi_set_baud(idev, iobase); 1093 1094 config = inw(iobase+VLSI_PIO_IRCFG); 1095 if (rd == NULL) /* tx ring empty: re-enable rx */ 1096 outw((config & ~IRCFG_ENTX) | IRCFG_ENRX, iobase+VLSI_PIO_IRCFG); 1097 1098 else if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) { 1099 int fifocnt; 1100 1101 fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 1102 if (fifocnt != 0) { 1103 IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", 1104 __func__, fifocnt); 1105 } 1106 outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG); 1107 } 1108 1109 outw(0, iobase+VLSI_PIO_PROMPT); 1110 1111 if (netif_queue_stopped(ndev) && !idev->new_baud) { 1112 netif_wake_queue(ndev); 1113 IRDA_DEBUG(3, "%s: queue awoken\n", __func__); 1114 } 1115} 1116 1117/* caller must have stopped the controller from busmastering */ 1118 1119static void vlsi_unarm_tx(vlsi_irda_dev_t *idev) 1120{ 1121 struct net_device *ndev = pci_get_drvdata(idev->pdev); 1122 struct vlsi_ring *r = idev->tx_ring; 1123 struct ring_descr *rd; 1124 int ret; 1125 1126 for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) { 1127 1128 ret = 0; 1129 if (rd_is_active(rd)) { 1130 rd_set_status(rd, 0); 1131 rd_set_count(rd, 0); 1132 pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir); 1133 if (rd->skb) { 1134 dev_kfree_skb_any(rd->skb); 1135 rd->skb = NULL; 1136 } 1137 IRDA_DEBUG(0, "%s - dropping tx packet\n", __func__); 1138 ret = -VLSI_TX_DROP; 1139 } 1140 else 1141 ret = vlsi_process_tx(r, rd); 1142 1143 if (ret < 0) { 1144 ret = -ret; 1145 ndev->stats.tx_errors++; 1146 if (ret & VLSI_TX_DROP) 1147 ndev->stats.tx_dropped++; 1148 if (ret & VLSI_TX_FIFO) 1149 ndev->stats.tx_fifo_errors++; 1150 } 1151 else if (ret > 0){ 1152 ndev->stats.tx_packets++; 1153 ndev->stats.tx_bytes += ret; 1154 } 1155 } 1156 1157} 1158 1159/********************************************************/ 1160 1161static int vlsi_start_clock(struct pci_dev *pdev) 1162{ 1163 u8 clkctl, lock; 1164 int i, count; 1165 1166 if (clksrc < 2) { /* auto or PLL: try PLL */ 1167 clkctl = CLKCTL_PD_INV | CLKCTL_CLKSTP; 1168 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1169 1170 /* procedure to detect PLL lock synchronisation: 1171 * after 0.5 msec initial delay we expect to find 3 PLL lock 1172 * indications within 10 msec for successful PLL detection. 1173 */ 1174 udelay(500); 1175 count = 0; 1176 for (i = 500; i <= 10000; i += 50) { /* max 10 msec */ 1177 pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &lock); 1178 if (lock&CLKCTL_LOCK) { 1179 if (++count >= 3) 1180 break; 1181 } 1182 udelay(50); 1183 } 1184 if (count < 3) { 1185 if (clksrc == 1) { /* explicitly asked for PLL hence bail out */ 1186 IRDA_ERROR("%s: no PLL or failed to lock!\n", 1187 __func__); 1188 clkctl = CLKCTL_CLKSTP; 1189 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1190 return -1; 1191 } 1192 else /* was: clksrc=0(auto) */ 1193 clksrc = 3; /* fallback to 40MHz XCLK (OB800) */ 1194 1195 IRDA_DEBUG(0, "%s: PLL not locked, fallback to clksrc=%d\n", 1196 __func__, clksrc); 1197 } 1198 else 1199 clksrc = 1; /* got successful PLL lock */ 1200 } 1201 1202 if (clksrc != 1) { 1203 /* we get here if either no PLL detected in auto-mode or 1204 an external clock source was explicitly specified */ 1205 1206 clkctl = CLKCTL_EXTCLK | CLKCTL_CLKSTP; 1207 if (clksrc == 3) 1208 clkctl |= CLKCTL_XCKSEL; 1209 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1210 1211 /* no way to test for working XCLK */ 1212 } 1213 else 1214 pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl); 1215 1216 /* ok, now going to connect the chip with the clock source */ 1217 1218 clkctl &= ~CLKCTL_CLKSTP; 1219 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1220 1221 return 0; 1222} 1223 1224static void vlsi_stop_clock(struct pci_dev *pdev) 1225{ 1226 u8 clkctl; 1227 1228 /* disconnect chip from clock source */ 1229 pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl); 1230 clkctl |= CLKCTL_CLKSTP; 1231 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1232 1233 /* disable all clock sources */ 1234 clkctl &= ~(CLKCTL_EXTCLK | CLKCTL_PD_INV); 1235 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1236} 1237 1238/********************************************************/ 1239 1240/* writing all-zero to the VLSI PCI IO register area seems to prevent 1241 * some occasional situations where the hardware fails (symptoms are 1242 * what appears as stalled tx/rx state machines, i.e. everything ok for 1243 * receive or transmit but hw makes no progress or is unable to access 1244 * the bus memory locations). 1245 * Best place to call this is immediately after/before the internal clock 1246 * gets started/stopped. 1247 */ 1248 1249static inline void vlsi_clear_regs(unsigned iobase) 1250{ 1251 unsigned i; 1252 const unsigned chip_io_extent = 32; 1253 1254 for (i = 0; i < chip_io_extent; i += sizeof(u16)) 1255 outw(0, iobase + i); 1256} 1257 1258static int vlsi_init_chip(struct pci_dev *pdev) 1259{ 1260 struct net_device *ndev = pci_get_drvdata(pdev); 1261 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1262 unsigned iobase; 1263 u16 ptr; 1264 1265 /* start the clock and clean the registers */ 1266 1267 if (vlsi_start_clock(pdev)) { 1268 IRDA_ERROR("%s: no valid clock source\n", __func__); 1269 return -1; 1270 } 1271 iobase = ndev->base_addr; 1272 vlsi_clear_regs(iobase); 1273 1274 outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* w/c pending IRQ, disable all INT */ 1275 1276 outw(0, iobase+VLSI_PIO_IRENABLE); /* disable IrPHY-interface */ 1277 1278 /* disable everything, particularly IRCFG_MSTR - (also resetting the RING_PTR) */ 1279 1280 outw(0, iobase+VLSI_PIO_IRCFG); 1281 wmb(); 1282 1283 outw(MAX_PACKET_LENGTH, iobase+VLSI_PIO_MAXPKT); /* max possible value=0x0fff */ 1284 1285 outw(BUS_TO_RINGBASE(idev->busaddr), iobase+VLSI_PIO_RINGBASE); 1286 1287 outw(TX_RX_TO_RINGSIZE(idev->tx_ring->size, idev->rx_ring->size), 1288 iobase+VLSI_PIO_RINGSIZE); 1289 1290 ptr = inw(iobase+VLSI_PIO_RINGPTR); 1291 atomic_set(&idev->rx_ring->head, RINGPTR_GET_RX(ptr)); 1292 atomic_set(&idev->rx_ring->tail, RINGPTR_GET_RX(ptr)); 1293 atomic_set(&idev->tx_ring->head, RINGPTR_GET_TX(ptr)); 1294 atomic_set(&idev->tx_ring->tail, RINGPTR_GET_TX(ptr)); 1295 1296 vlsi_set_baud(idev, iobase); /* idev->new_baud used as provided by caller */ 1297 1298 outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* just in case - w/c pending IRQ's */ 1299 wmb(); 1300 1301 /* DO NOT BLINDLY ENABLE IRINTR_ACTEN! 1302 * basically every received pulse fires an ACTIVITY-INT 1303 * leading to >>1000 INT's per second instead of few 10 1304 */ 1305 1306 outb(IRINTR_RPKTEN|IRINTR_TPKTEN, iobase+VLSI_PIO_IRINTR); 1307 1308 return 0; 1309} 1310 1311static int vlsi_start_hw(vlsi_irda_dev_t *idev) 1312{ 1313 struct pci_dev *pdev = idev->pdev; 1314 struct net_device *ndev = pci_get_drvdata(pdev); 1315 unsigned iobase = ndev->base_addr; 1316 u8 byte; 1317 1318 /* we don't use the legacy UART, disable its address decoding */ 1319 1320 pci_read_config_byte(pdev, VLSI_PCI_IRMISC, &byte); 1321 byte &= ~(IRMISC_UARTEN | IRMISC_UARTTST); 1322 pci_write_config_byte(pdev, VLSI_PCI_IRMISC, byte); 1323 1324 /* enable PCI busmaster access to our 16MB page */ 1325 1326 pci_write_config_byte(pdev, VLSI_PCI_MSTRPAGE, MSTRPAGE_VALUE); 1327 pci_set_master(pdev); 1328 1329 if (vlsi_init_chip(pdev) < 0) { 1330 pci_disable_device(pdev); 1331 return -1; 1332 } 1333 1334 vlsi_fill_rx(idev->rx_ring); 1335 1336 do_gettimeofday(&idev->last_rx); /* first mtt may start from now on */ 1337 1338 outw(0, iobase+VLSI_PIO_PROMPT); /* kick hw state machine */ 1339 1340 return 0; 1341} 1342 1343static int vlsi_stop_hw(vlsi_irda_dev_t *idev) 1344{ 1345 struct pci_dev *pdev = idev->pdev; 1346 struct net_device *ndev = pci_get_drvdata(pdev); 1347 unsigned iobase = ndev->base_addr; 1348 unsigned long flags; 1349 1350 spin_lock_irqsave(&idev->lock,flags); 1351 outw(0, iobase+VLSI_PIO_IRENABLE); 1352 outw(0, iobase+VLSI_PIO_IRCFG); /* disable everything */ 1353 1354 /* disable and w/c irqs */ 1355 outb(0, iobase+VLSI_PIO_IRINTR); 1356 wmb(); 1357 outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); 1358 spin_unlock_irqrestore(&idev->lock,flags); 1359 1360 vlsi_unarm_tx(idev); 1361 vlsi_unarm_rx(idev); 1362 1363 vlsi_clear_regs(iobase); 1364 vlsi_stop_clock(pdev); 1365 1366 pci_disable_device(pdev); 1367 1368 return 0; 1369} 1370 1371/**************************************************************/ 1372 1373static void vlsi_tx_timeout(struct net_device *ndev) 1374{ 1375 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1376 1377 1378 vlsi_reg_debug(ndev->base_addr, __func__); 1379 vlsi_ring_debug(idev->tx_ring); 1380 1381 if (netif_running(ndev)) 1382 netif_stop_queue(ndev); 1383 1384 vlsi_stop_hw(idev); 1385 1386 /* now simply restart the whole thing */ 1387 1388 if (!idev->new_baud) 1389 idev->new_baud = idev->baud; /* keep current baudrate */ 1390 1391 if (vlsi_start_hw(idev)) 1392 IRDA_ERROR("%s: failed to restart hw - %s(%s) unusable!\n", 1393 __func__, pci_name(idev->pdev), ndev->name); 1394 else 1395 netif_start_queue(ndev); 1396} 1397 1398static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) 1399{ 1400 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1401 struct if_irda_req *irq = (struct if_irda_req *) rq; 1402 unsigned long flags; 1403 u16 fifocnt; 1404 int ret = 0; 1405 1406 switch (cmd) { 1407 case SIOCSBANDWIDTH: 1408 if (!capable(CAP_NET_ADMIN)) { 1409 ret = -EPERM; 1410 break; 1411 } 1412 spin_lock_irqsave(&idev->lock, flags); 1413 idev->new_baud = irq->ifr_baudrate; 1414 /* when called from userland there might be a minor race window here 1415 * if the stack tries to change speed concurrently - which would be 1416 * pretty strange anyway with the userland having full control... 1417 */ 1418 vlsi_set_baud(idev, ndev->base_addr); 1419 spin_unlock_irqrestore(&idev->lock, flags); 1420 break; 1421 case SIOCSMEDIABUSY: 1422 if (!capable(CAP_NET_ADMIN)) { 1423 ret = -EPERM; 1424 break; 1425 } 1426 irda_device_set_media_busy(ndev, TRUE); 1427 break; 1428 case SIOCGRECEIVING: 1429 /* the best we can do: check whether there are any bytes in rx fifo. 1430 * The trustable window (in case some data arrives just afterwards) 1431 * may be as short as 1usec or so at 4Mbps. 1432 */ 1433 fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 1434 irq->ifr_receiving = (fifocnt!=0) ? 1 : 0; 1435 break; 1436 default: 1437 IRDA_WARNING("%s: notsupp - cmd=%04x\n", 1438 __func__, cmd); 1439 ret = -EOPNOTSUPP; 1440 } 1441 1442 return ret; 1443} 1444 1445/********************************************************/ 1446 1447static irqreturn_t vlsi_interrupt(int irq, void *dev_instance) 1448{ 1449 struct net_device *ndev = dev_instance; 1450 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1451 unsigned iobase; 1452 u8 irintr; 1453 int boguscount = 5; 1454 unsigned long flags; 1455 int handled = 0; 1456 1457 iobase = ndev->base_addr; 1458 spin_lock_irqsave(&idev->lock,flags); 1459 do { 1460 irintr = inb(iobase+VLSI_PIO_IRINTR); 1461 mb(); 1462 outb(irintr, iobase+VLSI_PIO_IRINTR); /* acknowledge asap */ 1463 1464 if (!(irintr&=IRINTR_INT_MASK)) /* not our INT - probably shared */ 1465 break; 1466 1467 handled = 1; 1468 1469 if (unlikely(!(irintr & ~IRINTR_ACTIVITY))) 1470 break; /* nothing todo if only activity */ 1471 1472 if (irintr&IRINTR_RPKTINT) 1473 vlsi_rx_interrupt(ndev); 1474 1475 if (irintr&IRINTR_TPKTINT) 1476 vlsi_tx_interrupt(ndev); 1477 1478 } while (--boguscount > 0); 1479 spin_unlock_irqrestore(&idev->lock,flags); 1480 1481 if (boguscount <= 0) 1482 IRDA_MESSAGE("%s: too much work in interrupt!\n", 1483 __func__); 1484 return IRQ_RETVAL(handled); 1485} 1486 1487/********************************************************/ 1488 1489static int vlsi_open(struct net_device *ndev) 1490{ 1491 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1492 int err = -EAGAIN; 1493 char hwname[32]; 1494 1495 if (pci_request_regions(idev->pdev, drivername)) { 1496 IRDA_WARNING("%s: io resource busy\n", __func__); 1497 goto errout; 1498 } 1499 ndev->base_addr = pci_resource_start(idev->pdev,0); 1500 ndev->irq = idev->pdev->irq; 1501 1502 /* under some rare occasions the chip apparently comes up with 1503 * IRQ's pending. We better w/c pending IRQ and disable them all 1504 */ 1505 1506 outb(IRINTR_INT_MASK, ndev->base_addr+VLSI_PIO_IRINTR); 1507 1508 if (request_irq(ndev->irq, vlsi_interrupt, IRQF_SHARED, 1509 drivername, ndev)) { 1510 IRDA_WARNING("%s: couldn't get IRQ: %d\n", 1511 __func__, ndev->irq); 1512 goto errout_io; 1513 } 1514 1515 if ((err = vlsi_create_hwif(idev)) != 0) 1516 goto errout_irq; 1517 1518 sprintf(hwname, "VLSI-FIR @ 0x%04x", (unsigned)ndev->base_addr); 1519 idev->irlap = irlap_open(ndev,&idev->qos,hwname); 1520 if (!idev->irlap) 1521 goto errout_free_ring; 1522 1523 do_gettimeofday(&idev->last_rx); /* first mtt may start from now on */ 1524 1525 idev->new_baud = 9600; /* start with IrPHY using 9600(SIR) mode */ 1526 1527 if ((err = vlsi_start_hw(idev)) != 0) 1528 goto errout_close_irlap; 1529 1530 netif_start_queue(ndev); 1531 1532 IRDA_MESSAGE("%s: device %s operational\n", __func__, ndev->name); 1533 1534 return 0; 1535 1536errout_close_irlap: 1537 irlap_close(idev->irlap); 1538errout_free_ring: 1539 vlsi_destroy_hwif(idev); 1540errout_irq: 1541 free_irq(ndev->irq,ndev); 1542errout_io: 1543 pci_release_regions(idev->pdev); 1544errout: 1545 return err; 1546} 1547 1548static int vlsi_close(struct net_device *ndev) 1549{ 1550 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1551 1552 netif_stop_queue(ndev); 1553 1554 if (idev->irlap) 1555 irlap_close(idev->irlap); 1556 idev->irlap = NULL; 1557 1558 vlsi_stop_hw(idev); 1559 1560 vlsi_destroy_hwif(idev); 1561 1562 free_irq(ndev->irq,ndev); 1563 1564 pci_release_regions(idev->pdev); 1565 1566 IRDA_MESSAGE("%s: device %s stopped\n", __func__, ndev->name); 1567 1568 return 0; 1569} 1570 1571static const struct net_device_ops vlsi_netdev_ops = { 1572 .ndo_open = vlsi_open, 1573 .ndo_stop = vlsi_close, 1574 .ndo_start_xmit = vlsi_hard_start_xmit, 1575 .ndo_do_ioctl = vlsi_ioctl, 1576 .ndo_tx_timeout = vlsi_tx_timeout, 1577}; 1578 1579static int vlsi_irda_init(struct net_device *ndev) 1580{ 1581 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1582 struct pci_dev *pdev = idev->pdev; 1583 1584 ndev->irq = pdev->irq; 1585 ndev->base_addr = pci_resource_start(pdev,0); 1586 1587 /* PCI busmastering 1588 * see include file for details why we need these 2 masks, in this order! 1589 */ 1590 1591 if (pci_set_dma_mask(pdev,DMA_MASK_USED_BY_HW) || 1592 pci_set_dma_mask(pdev,DMA_MASK_MSTRPAGE)) { 1593 IRDA_ERROR("%s: aborting due to PCI BM-DMA address limitations\n", __func__); 1594 return -1; 1595 } 1596 1597 irda_init_max_qos_capabilies(&idev->qos); 1598 1599 /* the VLSI82C147 does not support 576000! */ 1600 1601 idev->qos.baud_rate.bits = IR_2400 | IR_9600 1602 | IR_19200 | IR_38400 | IR_57600 | IR_115200 1603 | IR_1152000 | (IR_4000000 << 8); 1604 1605 idev->qos.min_turn_time.bits = qos_mtt_bits; 1606 1607 irda_qos_bits_to_value(&idev->qos); 1608 1609 /* currently no public media definitions for IrDA */ 1610 1611 ndev->flags |= IFF_PORTSEL | IFF_AUTOMEDIA; 1612 ndev->if_port = IF_PORT_UNKNOWN; 1613 1614 ndev->netdev_ops = &vlsi_netdev_ops; 1615 ndev->watchdog_timeo = 500*HZ/1000; /* max. allowed turn time for IrLAP */ 1616 1617 SET_NETDEV_DEV(ndev, &pdev->dev); 1618 1619 return 0; 1620} 1621 1622/**************************************************************/ 1623 1624static int 1625vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1626{ 1627 struct net_device *ndev; 1628 vlsi_irda_dev_t *idev; 1629 1630 if (pci_enable_device(pdev)) 1631 goto out; 1632 else 1633 pdev->current_state = 0; /* hw must be running now */ 1634 1635 IRDA_MESSAGE("%s: IrDA PCI controller %s detected\n", 1636 drivername, pci_name(pdev)); 1637 1638 if ( !pci_resource_start(pdev,0) || 1639 !(pci_resource_flags(pdev,0) & IORESOURCE_IO) ) { 1640 IRDA_ERROR("%s: bar 0 invalid", __func__); 1641 goto out_disable; 1642 } 1643 1644 ndev = alloc_irdadev(sizeof(*idev)); 1645 if (ndev==NULL) { 1646 IRDA_ERROR("%s: Unable to allocate device memory.\n", 1647 __func__); 1648 goto out_disable; 1649 } 1650 1651 idev = netdev_priv(ndev); 1652 1653 spin_lock_init(&idev->lock); 1654 mutex_init(&idev->mtx); 1655 mutex_lock(&idev->mtx); 1656 idev->pdev = pdev; 1657 1658 if (vlsi_irda_init(ndev) < 0) 1659 goto out_freedev; 1660 1661 if (register_netdev(ndev) < 0) { 1662 IRDA_ERROR("%s: register_netdev failed\n", __func__); 1663 goto out_freedev; 1664 } 1665 1666 if (vlsi_proc_root != NULL) { 1667 struct proc_dir_entry *ent; 1668 1669 ent = proc_create_data(ndev->name, S_IFREG|S_IRUGO, 1670 vlsi_proc_root, VLSI_PROC_FOPS, ndev); 1671 if (!ent) { 1672 IRDA_WARNING("%s: failed to create proc entry\n", 1673 __func__); 1674 } else { 1675 proc_set_size(ent, 0); 1676 } 1677 idev->proc_entry = ent; 1678 } 1679 IRDA_MESSAGE("%s: registered device %s\n", drivername, ndev->name); 1680 1681 pci_set_drvdata(pdev, ndev); 1682 mutex_unlock(&idev->mtx); 1683 1684 return 0; 1685 1686out_freedev: 1687 mutex_unlock(&idev->mtx); 1688 free_netdev(ndev); 1689out_disable: 1690 pci_disable_device(pdev); 1691out: 1692 return -ENODEV; 1693} 1694 1695static void vlsi_irda_remove(struct pci_dev *pdev) 1696{ 1697 struct net_device *ndev = pci_get_drvdata(pdev); 1698 vlsi_irda_dev_t *idev; 1699 1700 if (!ndev) { 1701 IRDA_ERROR("%s: lost netdevice?\n", drivername); 1702 return; 1703 } 1704 1705 unregister_netdev(ndev); 1706 1707 idev = netdev_priv(ndev); 1708 mutex_lock(&idev->mtx); 1709 if (idev->proc_entry) { 1710 remove_proc_entry(ndev->name, vlsi_proc_root); 1711 idev->proc_entry = NULL; 1712 } 1713 mutex_unlock(&idev->mtx); 1714 1715 free_netdev(ndev); 1716 1717 IRDA_MESSAGE("%s: %s removed\n", drivername, pci_name(pdev)); 1718} 1719 1720#ifdef CONFIG_PM 1721 1722/* The Controller doesn't provide PCI PM capabilities as defined by PCI specs. 1723 * Some of the Linux PCI-PM code however depends on this, for example in 1724 * pci_set_power_state(). So we have to take care to perform the required 1725 * operations on our own (particularly reflecting the pdev->current_state) 1726 * otherwise we might get cheated by pci-pm. 1727 */ 1728 1729 1730static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state) 1731{ 1732 struct net_device *ndev = pci_get_drvdata(pdev); 1733 vlsi_irda_dev_t *idev; 1734 1735 if (!ndev) { 1736 IRDA_ERROR("%s - %s: no netdevice\n", 1737 __func__, pci_name(pdev)); 1738 return 0; 1739 } 1740 idev = netdev_priv(ndev); 1741 mutex_lock(&idev->mtx); 1742 if (pdev->current_state != 0) { /* already suspended */ 1743 if (state.event > pdev->current_state) { /* simply go deeper */ 1744 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 1745 pdev->current_state = state.event; 1746 } 1747 else 1748 IRDA_ERROR("%s - %s: invalid suspend request %u -> %u\n", __func__, pci_name(pdev), pdev->current_state, state.event); 1749 mutex_unlock(&idev->mtx); 1750 return 0; 1751 } 1752 1753 if (netif_running(ndev)) { 1754 netif_device_detach(ndev); 1755 vlsi_stop_hw(idev); 1756 pci_save_state(pdev); 1757 if (!idev->new_baud) 1758 /* remember speed settings to restore on resume */ 1759 idev->new_baud = idev->baud; 1760 } 1761 1762 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 1763 pdev->current_state = state.event; 1764 idev->resume_ok = 1; 1765 mutex_unlock(&idev->mtx); 1766 return 0; 1767} 1768 1769static int vlsi_irda_resume(struct pci_dev *pdev) 1770{ 1771 struct net_device *ndev = pci_get_drvdata(pdev); 1772 vlsi_irda_dev_t *idev; 1773 1774 if (!ndev) { 1775 IRDA_ERROR("%s - %s: no netdevice\n", 1776 __func__, pci_name(pdev)); 1777 return 0; 1778 } 1779 idev = netdev_priv(ndev); 1780 mutex_lock(&idev->mtx); 1781 if (pdev->current_state == 0) { 1782 mutex_unlock(&idev->mtx); 1783 IRDA_WARNING("%s - %s: already resumed\n", 1784 __func__, pci_name(pdev)); 1785 return 0; 1786 } 1787 1788 pci_set_power_state(pdev, PCI_D0); 1789 pdev->current_state = PM_EVENT_ON; 1790 1791 if (!idev->resume_ok) { 1792 /* should be obsolete now - but used to happen due to: 1793 * - pci layer initially setting pdev->current_state = 4 (unknown) 1794 * - pci layer did not walk the save_state-tree (might be APM problem) 1795 * so we could not refuse to suspend from undefined state 1796 * - vlsi_irda_suspend detected invalid state and refused to save 1797 * configuration for resume - but was too late to stop suspending 1798 * - vlsi_irda_resume got screwed when trying to resume from garbage 1799 * 1800 * now we explicitly set pdev->current_state = 0 after enabling the 1801 * device and independently resume_ok should catch any garbage config. 1802 */ 1803 IRDA_WARNING("%s - hm, nothing to resume?\n", __func__); 1804 mutex_unlock(&idev->mtx); 1805 return 0; 1806 } 1807 1808 if (netif_running(ndev)) { 1809 pci_restore_state(pdev); 1810 vlsi_start_hw(idev); 1811 netif_device_attach(ndev); 1812 } 1813 idev->resume_ok = 0; 1814 mutex_unlock(&idev->mtx); 1815 return 0; 1816} 1817 1818#endif /* CONFIG_PM */ 1819 1820/*********************************************************/ 1821 1822static struct pci_driver vlsi_irda_driver = { 1823 .name = drivername, 1824 .id_table = vlsi_irda_table, 1825 .probe = vlsi_irda_probe, 1826 .remove = vlsi_irda_remove, 1827#ifdef CONFIG_PM 1828 .suspend = vlsi_irda_suspend, 1829 .resume = vlsi_irda_resume, 1830#endif 1831}; 1832 1833#define PROC_DIR ("driver/" DRIVER_NAME) 1834 1835static int __init vlsi_mod_init(void) 1836{ 1837 int i, ret; 1838 1839 if (clksrc < 0 || clksrc > 3) { 1840 IRDA_ERROR("%s: invalid clksrc=%d\n", drivername, clksrc); 1841 return -1; 1842 } 1843 1844 for (i = 0; i < 2; i++) { 1845 switch(ringsize[i]) { 1846 case 4: 1847 case 8: 1848 case 16: 1849 case 32: 1850 case 64: 1851 break; 1852 default: 1853 IRDA_WARNING("%s: invalid %s ringsize %d, using default=8", drivername, (i)?"rx":"tx", ringsize[i]); 1854 ringsize[i] = 8; 1855 break; 1856 } 1857 } 1858 1859 sirpulse = !!sirpulse; 1860 1861 /* proc_mkdir returns NULL if !CONFIG_PROC_FS. 1862 * Failure to create the procfs entry is handled like running 1863 * without procfs - it's not required for the driver to work. 1864 */ 1865 vlsi_proc_root = proc_mkdir(PROC_DIR, NULL); 1866 1867 ret = pci_register_driver(&vlsi_irda_driver); 1868 1869 if (ret && vlsi_proc_root) 1870 remove_proc_entry(PROC_DIR, NULL); 1871 return ret; 1872 1873} 1874 1875static void __exit vlsi_mod_exit(void) 1876{ 1877 pci_unregister_driver(&vlsi_irda_driver); 1878 if (vlsi_proc_root) 1879 remove_proc_entry(PROC_DIR, NULL); 1880} 1881 1882module_init(vlsi_mod_init); 1883module_exit(vlsi_mod_exit);