Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.16-rc2 957 lines 24 kB view raw
1/* 2 * Alchemy Semi Au1000 IrDA driver 3 * 4 * Copyright 2001 MontaVista Software Inc. 5 * Author: MontaVista Software, Inc. 6 * ppopov@mvista.com or source@mvista.com 7 * 8 * This program is free software; you can distribute it and/or modify it 9 * under the terms of the GNU General Public License (Version 2) as 10 * published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 15 * for more details. 16 * 17 * You should have received a copy of the GNU General Public License along 18 * with this program; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21#include <linux/module.h> 22#include <linux/netdevice.h> 23#include <linux/interrupt.h> 24#include <linux/platform_device.h> 25#include <linux/slab.h> 26#include <linux/time.h> 27#include <linux/types.h> 28#include <linux/ioport.h> 29 30#include <net/irda/irda.h> 31#include <net/irda/irmod.h> 32#include <net/irda/wrapper.h> 33#include <net/irda/irda_device.h> 34#include <asm/mach-au1x00/au1000.h> 35 36/* registers */ 37#define IR_RING_PTR_STATUS 0x00 38#define IR_RING_BASE_ADDR_H 0x04 39#define IR_RING_BASE_ADDR_L 0x08 40#define IR_RING_SIZE 0x0C 41#define IR_RING_PROMPT 0x10 42#define IR_RING_ADDR_CMPR 0x14 43#define IR_INT_CLEAR 0x18 44#define IR_CONFIG_1 0x20 45#define IR_SIR_FLAGS 0x24 46#define IR_STATUS 0x28 47#define IR_READ_PHY_CONFIG 0x2C 48#define IR_WRITE_PHY_CONFIG 0x30 49#define IR_MAX_PKT_LEN 0x34 50#define IR_RX_BYTE_CNT 0x38 51#define IR_CONFIG_2 0x3C 52#define IR_ENABLE 0x40 53 54/* Config1 */ 55#define IR_RX_INVERT_LED (1 << 0) 56#define IR_TX_INVERT_LED (1 << 1) 57#define IR_ST (1 << 2) 58#define IR_SF (1 << 3) 59#define IR_SIR (1 << 4) 60#define IR_MIR (1 << 5) 61#define IR_FIR (1 << 6) 62#define IR_16CRC (1 << 7) 63#define IR_TD (1 << 8) 64#define IR_RX_ALL (1 << 9) 65#define IR_DMA_ENABLE (1 << 10) 66#define IR_RX_ENABLE (1 << 11) 67#define IR_TX_ENABLE (1 << 12) 68#define IR_LOOPBACK (1 << 14) 69#define IR_SIR_MODE (IR_SIR | IR_DMA_ENABLE | \ 70 IR_RX_ALL | IR_RX_ENABLE | IR_SF | \ 71 IR_16CRC) 72 73/* ir_status */ 74#define IR_RX_STATUS (1 << 9) 75#define IR_TX_STATUS (1 << 10) 76#define IR_PHYEN (1 << 15) 77 78/* ir_write_phy_config */ 79#define IR_BR(x) (((x) & 0x3f) << 10) /* baud rate */ 80#define IR_PW(x) (((x) & 0x1f) << 5) /* pulse width */ 81#define IR_P(x) ((x) & 0x1f) /* preamble bits */ 82 83/* Config2 */ 84#define IR_MODE_INV (1 << 0) 85#define IR_ONE_PIN (1 << 1) 86#define IR_PHYCLK_40MHZ (0 << 2) 87#define IR_PHYCLK_48MHZ (1 << 2) 88#define IR_PHYCLK_56MHZ (2 << 2) 89#define IR_PHYCLK_64MHZ (3 << 2) 90#define IR_DP (1 << 4) 91#define IR_DA (1 << 5) 92#define IR_FLT_HIGH (0 << 6) 93#define IR_FLT_MEDHI (1 << 6) 94#define IR_FLT_MEDLO (2 << 6) 95#define IR_FLT_LO (3 << 6) 96#define IR_IEN (1 << 8) 97 98/* ir_enable */ 99#define IR_HC (1 << 3) /* divide SBUS clock by 2 */ 100#define IR_CE (1 << 2) /* clock enable */ 101#define IR_C (1 << 1) /* coherency bit */ 102#define IR_BE (1 << 0) /* set in big endian mode */ 103 104#define NUM_IR_DESC 64 105#define RING_SIZE_4 0x0 106#define RING_SIZE_16 0x3 107#define RING_SIZE_64 0xF 108#define MAX_NUM_IR_DESC 64 109#define MAX_BUF_SIZE 2048 110 111/* Ring descriptor flags */ 112#define AU_OWN (1 << 7) /* tx,rx */ 113#define IR_DIS_CRC (1 << 6) /* tx */ 114#define IR_BAD_CRC (1 << 5) /* tx */ 115#define IR_NEED_PULSE (1 << 4) /* tx */ 116#define IR_FORCE_UNDER (1 << 3) /* tx */ 117#define IR_DISABLE_TX (1 << 2) /* tx */ 118#define IR_HW_UNDER (1 << 0) /* tx */ 119#define IR_TX_ERROR (IR_DIS_CRC | IR_BAD_CRC | IR_HW_UNDER) 120 121#define IR_PHY_ERROR (1 << 6) /* rx */ 122#define IR_CRC_ERROR (1 << 5) /* rx */ 123#define IR_MAX_LEN (1 << 4) /* rx */ 124#define IR_FIFO_OVER (1 << 3) /* rx */ 125#define IR_SIR_ERROR (1 << 2) /* rx */ 126#define IR_RX_ERROR (IR_PHY_ERROR | IR_CRC_ERROR | \ 127 IR_MAX_LEN | IR_FIFO_OVER | IR_SIR_ERROR) 128 129struct db_dest { 130 struct db_dest *pnext; 131 volatile u32 *vaddr; 132 dma_addr_t dma_addr; 133}; 134 135struct ring_dest { 136 u8 count_0; /* 7:0 */ 137 u8 count_1; /* 12:8 */ 138 u8 reserved; 139 u8 flags; 140 u8 addr_0; /* 7:0 */ 141 u8 addr_1; /* 15:8 */ 142 u8 addr_2; /* 23:16 */ 143 u8 addr_3; /* 31:24 */ 144}; 145 146/* Private data for each instance */ 147struct au1k_private { 148 void __iomem *iobase; 149 int irq_rx, irq_tx; 150 151 struct db_dest *pDBfree; 152 struct db_dest db[2 * NUM_IR_DESC]; 153 volatile struct ring_dest *rx_ring[NUM_IR_DESC]; 154 volatile struct ring_dest *tx_ring[NUM_IR_DESC]; 155 struct db_dest *rx_db_inuse[NUM_IR_DESC]; 156 struct db_dest *tx_db_inuse[NUM_IR_DESC]; 157 u32 rx_head; 158 u32 tx_head; 159 u32 tx_tail; 160 u32 tx_full; 161 162 iobuff_t rx_buff; 163 164 struct net_device *netdev; 165 struct timeval stamp; 166 struct timeval now; 167 struct qos_info qos; 168 struct irlap_cb *irlap; 169 170 u8 open; 171 u32 speed; 172 u32 newspeed; 173 174 struct timer_list timer; 175 176 struct resource *ioarea; 177 struct au1k_irda_platform_data *platdata; 178}; 179 180static int qos_mtt_bits = 0x07; /* 1 ms or more */ 181 182#define RUN_AT(x) (jiffies + (x)) 183 184static void au1k_irda_plat_set_phy_mode(struct au1k_private *p, int mode) 185{ 186 if (p->platdata && p->platdata->set_phy_mode) 187 p->platdata->set_phy_mode(mode); 188} 189 190static inline unsigned long irda_read(struct au1k_private *p, 191 unsigned long ofs) 192{ 193 /* 194 * IrDA peripheral bug. You have to read the register 195 * twice to get the right value. 196 */ 197 (void)__raw_readl(p->iobase + ofs); 198 return __raw_readl(p->iobase + ofs); 199} 200 201static inline void irda_write(struct au1k_private *p, unsigned long ofs, 202 unsigned long val) 203{ 204 __raw_writel(val, p->iobase + ofs); 205 wmb(); 206} 207 208/* 209 * Buffer allocation/deallocation routines. The buffer descriptor returned 210 * has the virtual and dma address of a buffer suitable for 211 * both, receive and transmit operations. 212 */ 213static struct db_dest *GetFreeDB(struct au1k_private *aup) 214{ 215 struct db_dest *db; 216 db = aup->pDBfree; 217 218 if (db) 219 aup->pDBfree = db->pnext; 220 return db; 221} 222 223/* 224 DMA memory allocation, derived from pci_alloc_consistent. 225 However, the Au1000 data cache is coherent (when programmed 226 so), therefore we return KSEG0 address, not KSEG1. 227*/ 228static void *dma_alloc(size_t size, dma_addr_t *dma_handle) 229{ 230 void *ret; 231 int gfp = GFP_ATOMIC | GFP_DMA; 232 233 ret = (void *)__get_free_pages(gfp, get_order(size)); 234 235 if (ret != NULL) { 236 memset(ret, 0, size); 237 *dma_handle = virt_to_bus(ret); 238 ret = (void *)KSEG0ADDR(ret); 239 } 240 return ret; 241} 242 243static void dma_free(void *vaddr, size_t size) 244{ 245 vaddr = (void *)KSEG0ADDR(vaddr); 246 free_pages((unsigned long) vaddr, get_order(size)); 247} 248 249 250static void setup_hw_rings(struct au1k_private *aup, u32 rx_base, u32 tx_base) 251{ 252 int i; 253 for (i = 0; i < NUM_IR_DESC; i++) { 254 aup->rx_ring[i] = (volatile struct ring_dest *) 255 (rx_base + sizeof(struct ring_dest) * i); 256 } 257 for (i = 0; i < NUM_IR_DESC; i++) { 258 aup->tx_ring[i] = (volatile struct ring_dest *) 259 (tx_base + sizeof(struct ring_dest) * i); 260 } 261} 262 263static int au1k_irda_init_iobuf(iobuff_t *io, int size) 264{ 265 io->head = kmalloc(size, GFP_KERNEL); 266 if (io->head != NULL) { 267 io->truesize = size; 268 io->in_frame = FALSE; 269 io->state = OUTSIDE_FRAME; 270 io->data = io->head; 271 } 272 return io->head ? 0 : -ENOMEM; 273} 274 275/* 276 * Set the IrDA communications speed. 277 */ 278static int au1k_irda_set_speed(struct net_device *dev, int speed) 279{ 280 struct au1k_private *aup = netdev_priv(dev); 281 volatile struct ring_dest *ptxd; 282 unsigned long control; 283 int ret = 0, timeout = 10, i; 284 285 if (speed == aup->speed) 286 return ret; 287 288 /* disable PHY first */ 289 au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_OFF); 290 irda_write(aup, IR_STATUS, irda_read(aup, IR_STATUS) & ~IR_PHYEN); 291 292 /* disable RX/TX */ 293 irda_write(aup, IR_CONFIG_1, 294 irda_read(aup, IR_CONFIG_1) & ~(IR_RX_ENABLE | IR_TX_ENABLE)); 295 msleep(20); 296 while (irda_read(aup, IR_STATUS) & (IR_RX_STATUS | IR_TX_STATUS)) { 297 msleep(20); 298 if (!timeout--) { 299 printk(KERN_ERR "%s: rx/tx disable timeout\n", 300 dev->name); 301 break; 302 } 303 } 304 305 /* disable DMA */ 306 irda_write(aup, IR_CONFIG_1, 307 irda_read(aup, IR_CONFIG_1) & ~IR_DMA_ENABLE); 308 msleep(20); 309 310 /* After we disable tx/rx. the index pointers go back to zero. */ 311 aup->tx_head = aup->tx_tail = aup->rx_head = 0; 312 for (i = 0; i < NUM_IR_DESC; i++) { 313 ptxd = aup->tx_ring[i]; 314 ptxd->flags = 0; 315 ptxd->count_0 = 0; 316 ptxd->count_1 = 0; 317 } 318 319 for (i = 0; i < NUM_IR_DESC; i++) { 320 ptxd = aup->rx_ring[i]; 321 ptxd->count_0 = 0; 322 ptxd->count_1 = 0; 323 ptxd->flags = AU_OWN; 324 } 325 326 if (speed == 4000000) 327 au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_FIR); 328 else 329 au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_SIR); 330 331 switch (speed) { 332 case 9600: 333 irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(11) | IR_PW(12)); 334 irda_write(aup, IR_CONFIG_1, IR_SIR_MODE); 335 break; 336 case 19200: 337 irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(5) | IR_PW(12)); 338 irda_write(aup, IR_CONFIG_1, IR_SIR_MODE); 339 break; 340 case 38400: 341 irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(2) | IR_PW(12)); 342 irda_write(aup, IR_CONFIG_1, IR_SIR_MODE); 343 break; 344 case 57600: 345 irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(1) | IR_PW(12)); 346 irda_write(aup, IR_CONFIG_1, IR_SIR_MODE); 347 break; 348 case 115200: 349 irda_write(aup, IR_WRITE_PHY_CONFIG, IR_PW(12)); 350 irda_write(aup, IR_CONFIG_1, IR_SIR_MODE); 351 break; 352 case 4000000: 353 irda_write(aup, IR_WRITE_PHY_CONFIG, IR_P(15)); 354 irda_write(aup, IR_CONFIG_1, IR_FIR | IR_DMA_ENABLE | 355 IR_RX_ENABLE); 356 break; 357 default: 358 printk(KERN_ERR "%s unsupported speed %x\n", dev->name, speed); 359 ret = -EINVAL; 360 break; 361 } 362 363 aup->speed = speed; 364 irda_write(aup, IR_STATUS, irda_read(aup, IR_STATUS) | IR_PHYEN); 365 366 control = irda_read(aup, IR_STATUS); 367 irda_write(aup, IR_RING_PROMPT, 0); 368 369 if (control & (1 << 14)) { 370 printk(KERN_ERR "%s: configuration error\n", dev->name); 371 } else { 372 if (control & (1 << 11)) 373 printk(KERN_DEBUG "%s Valid SIR config\n", dev->name); 374 if (control & (1 << 12)) 375 printk(KERN_DEBUG "%s Valid MIR config\n", dev->name); 376 if (control & (1 << 13)) 377 printk(KERN_DEBUG "%s Valid FIR config\n", dev->name); 378 if (control & (1 << 10)) 379 printk(KERN_DEBUG "%s TX enabled\n", dev->name); 380 if (control & (1 << 9)) 381 printk(KERN_DEBUG "%s RX enabled\n", dev->name); 382 } 383 384 return ret; 385} 386 387static void update_rx_stats(struct net_device *dev, u32 status, u32 count) 388{ 389 struct net_device_stats *ps = &dev->stats; 390 391 ps->rx_packets++; 392 393 if (status & IR_RX_ERROR) { 394 ps->rx_errors++; 395 if (status & (IR_PHY_ERROR | IR_FIFO_OVER)) 396 ps->rx_missed_errors++; 397 if (status & IR_MAX_LEN) 398 ps->rx_length_errors++; 399 if (status & IR_CRC_ERROR) 400 ps->rx_crc_errors++; 401 } else 402 ps->rx_bytes += count; 403} 404 405static void update_tx_stats(struct net_device *dev, u32 status, u32 pkt_len) 406{ 407 struct net_device_stats *ps = &dev->stats; 408 409 ps->tx_packets++; 410 ps->tx_bytes += pkt_len; 411 412 if (status & IR_TX_ERROR) { 413 ps->tx_errors++; 414 ps->tx_aborted_errors++; 415 } 416} 417 418static void au1k_tx_ack(struct net_device *dev) 419{ 420 struct au1k_private *aup = netdev_priv(dev); 421 volatile struct ring_dest *ptxd; 422 423 ptxd = aup->tx_ring[aup->tx_tail]; 424 while (!(ptxd->flags & AU_OWN) && (aup->tx_tail != aup->tx_head)) { 425 update_tx_stats(dev, ptxd->flags, 426 (ptxd->count_1 << 8) | ptxd->count_0); 427 ptxd->count_0 = 0; 428 ptxd->count_1 = 0; 429 wmb(); 430 aup->tx_tail = (aup->tx_tail + 1) & (NUM_IR_DESC - 1); 431 ptxd = aup->tx_ring[aup->tx_tail]; 432 433 if (aup->tx_full) { 434 aup->tx_full = 0; 435 netif_wake_queue(dev); 436 } 437 } 438 439 if (aup->tx_tail == aup->tx_head) { 440 if (aup->newspeed) { 441 au1k_irda_set_speed(dev, aup->newspeed); 442 aup->newspeed = 0; 443 } else { 444 irda_write(aup, IR_CONFIG_1, 445 irda_read(aup, IR_CONFIG_1) & ~IR_TX_ENABLE); 446 irda_write(aup, IR_CONFIG_1, 447 irda_read(aup, IR_CONFIG_1) | IR_RX_ENABLE); 448 irda_write(aup, IR_RING_PROMPT, 0); 449 } 450 } 451} 452 453static int au1k_irda_rx(struct net_device *dev) 454{ 455 struct au1k_private *aup = netdev_priv(dev); 456 volatile struct ring_dest *prxd; 457 struct sk_buff *skb; 458 struct db_dest *pDB; 459 u32 flags, count; 460 461 prxd = aup->rx_ring[aup->rx_head]; 462 flags = prxd->flags; 463 464 while (!(flags & AU_OWN)) { 465 pDB = aup->rx_db_inuse[aup->rx_head]; 466 count = (prxd->count_1 << 8) | prxd->count_0; 467 if (!(flags & IR_RX_ERROR)) { 468 /* good frame */ 469 update_rx_stats(dev, flags, count); 470 skb = alloc_skb(count + 1, GFP_ATOMIC); 471 if (skb == NULL) { 472 dev->stats.rx_dropped++; 473 continue; 474 } 475 skb_reserve(skb, 1); 476 if (aup->speed == 4000000) 477 skb_put(skb, count); 478 else 479 skb_put(skb, count - 2); 480 skb_copy_to_linear_data(skb, (void *)pDB->vaddr, 481 count - 2); 482 skb->dev = dev; 483 skb_reset_mac_header(skb); 484 skb->protocol = htons(ETH_P_IRDA); 485 netif_rx(skb); 486 prxd->count_0 = 0; 487 prxd->count_1 = 0; 488 } 489 prxd->flags |= AU_OWN; 490 aup->rx_head = (aup->rx_head + 1) & (NUM_IR_DESC - 1); 491 irda_write(aup, IR_RING_PROMPT, 0); 492 493 /* next descriptor */ 494 prxd = aup->rx_ring[aup->rx_head]; 495 flags = prxd->flags; 496 497 } 498 return 0; 499} 500 501static irqreturn_t au1k_irda_interrupt(int dummy, void *dev_id) 502{ 503 struct net_device *dev = dev_id; 504 struct au1k_private *aup = netdev_priv(dev); 505 506 irda_write(aup, IR_INT_CLEAR, 0); /* ack irda interrupts */ 507 508 au1k_irda_rx(dev); 509 au1k_tx_ack(dev); 510 511 return IRQ_HANDLED; 512} 513 514static int au1k_init(struct net_device *dev) 515{ 516 struct au1k_private *aup = netdev_priv(dev); 517 u32 enable, ring_address; 518 int i; 519 520 enable = IR_HC | IR_CE | IR_C; 521#ifndef CONFIG_CPU_LITTLE_ENDIAN 522 enable |= IR_BE; 523#endif 524 aup->tx_head = 0; 525 aup->tx_tail = 0; 526 aup->rx_head = 0; 527 528 for (i = 0; i < NUM_IR_DESC; i++) 529 aup->rx_ring[i]->flags = AU_OWN; 530 531 irda_write(aup, IR_ENABLE, enable); 532 msleep(20); 533 534 /* disable PHY */ 535 au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_OFF); 536 irda_write(aup, IR_STATUS, irda_read(aup, IR_STATUS) & ~IR_PHYEN); 537 msleep(20); 538 539 irda_write(aup, IR_MAX_PKT_LEN, MAX_BUF_SIZE); 540 541 ring_address = (u32)virt_to_phys((void *)aup->rx_ring[0]); 542 irda_write(aup, IR_RING_BASE_ADDR_H, ring_address >> 26); 543 irda_write(aup, IR_RING_BASE_ADDR_L, (ring_address >> 10) & 0xffff); 544 545 irda_write(aup, IR_RING_SIZE, 546 (RING_SIZE_64 << 8) | (RING_SIZE_64 << 12)); 547 548 irda_write(aup, IR_CONFIG_2, IR_PHYCLK_48MHZ | IR_ONE_PIN); 549 irda_write(aup, IR_RING_ADDR_CMPR, 0); 550 551 au1k_irda_set_speed(dev, 9600); 552 return 0; 553} 554 555static int au1k_irda_start(struct net_device *dev) 556{ 557 struct au1k_private *aup = netdev_priv(dev); 558 char hwname[32]; 559 int retval; 560 561 retval = au1k_init(dev); 562 if (retval) { 563 printk(KERN_ERR "%s: error in au1k_init\n", dev->name); 564 return retval; 565 } 566 567 retval = request_irq(aup->irq_tx, &au1k_irda_interrupt, 0, 568 dev->name, dev); 569 if (retval) { 570 printk(KERN_ERR "%s: unable to get IRQ %d\n", 571 dev->name, dev->irq); 572 return retval; 573 } 574 retval = request_irq(aup->irq_rx, &au1k_irda_interrupt, 0, 575 dev->name, dev); 576 if (retval) { 577 free_irq(aup->irq_tx, dev); 578 printk(KERN_ERR "%s: unable to get IRQ %d\n", 579 dev->name, dev->irq); 580 return retval; 581 } 582 583 /* Give self a hardware name */ 584 sprintf(hwname, "Au1000 SIR/FIR"); 585 aup->irlap = irlap_open(dev, &aup->qos, hwname); 586 netif_start_queue(dev); 587 588 /* int enable */ 589 irda_write(aup, IR_CONFIG_2, irda_read(aup, IR_CONFIG_2) | IR_IEN); 590 591 /* power up */ 592 au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_SIR); 593 594 aup->timer.expires = RUN_AT((3 * HZ)); 595 aup->timer.data = (unsigned long)dev; 596 return 0; 597} 598 599static int au1k_irda_stop(struct net_device *dev) 600{ 601 struct au1k_private *aup = netdev_priv(dev); 602 603 au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_OFF); 604 605 /* disable interrupts */ 606 irda_write(aup, IR_CONFIG_2, irda_read(aup, IR_CONFIG_2) & ~IR_IEN); 607 irda_write(aup, IR_CONFIG_1, 0); 608 irda_write(aup, IR_ENABLE, 0); /* disable clock */ 609 610 if (aup->irlap) { 611 irlap_close(aup->irlap); 612 aup->irlap = NULL; 613 } 614 615 netif_stop_queue(dev); 616 del_timer(&aup->timer); 617 618 /* disable the interrupt */ 619 free_irq(aup->irq_tx, dev); 620 free_irq(aup->irq_rx, dev); 621 622 return 0; 623} 624 625/* 626 * Au1000 transmit routine. 627 */ 628static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev) 629{ 630 struct au1k_private *aup = netdev_priv(dev); 631 int speed = irda_get_next_speed(skb); 632 volatile struct ring_dest *ptxd; 633 struct db_dest *pDB; 634 u32 len, flags; 635 636 if (speed != aup->speed && speed != -1) 637 aup->newspeed = speed; 638 639 if ((skb->len == 0) && (aup->newspeed)) { 640 if (aup->tx_tail == aup->tx_head) { 641 au1k_irda_set_speed(dev, speed); 642 aup->newspeed = 0; 643 } 644 dev_kfree_skb(skb); 645 return NETDEV_TX_OK; 646 } 647 648 ptxd = aup->tx_ring[aup->tx_head]; 649 flags = ptxd->flags; 650 651 if (flags & AU_OWN) { 652 printk(KERN_DEBUG "%s: tx_full\n", dev->name); 653 netif_stop_queue(dev); 654 aup->tx_full = 1; 655 return 1; 656 } else if (((aup->tx_head + 1) & (NUM_IR_DESC - 1)) == aup->tx_tail) { 657 printk(KERN_DEBUG "%s: tx_full\n", dev->name); 658 netif_stop_queue(dev); 659 aup->tx_full = 1; 660 return 1; 661 } 662 663 pDB = aup->tx_db_inuse[aup->tx_head]; 664 665#if 0 666 if (irda_read(aup, IR_RX_BYTE_CNT) != 0) { 667 printk(KERN_DEBUG "tx warning: rx byte cnt %x\n", 668 irda_read(aup, IR_RX_BYTE_CNT)); 669 } 670#endif 671 672 if (aup->speed == 4000000) { 673 /* FIR */ 674 skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len); 675 ptxd->count_0 = skb->len & 0xff; 676 ptxd->count_1 = (skb->len >> 8) & 0xff; 677 } else { 678 /* SIR */ 679 len = async_wrap_skb(skb, (u8 *)pDB->vaddr, MAX_BUF_SIZE); 680 ptxd->count_0 = len & 0xff; 681 ptxd->count_1 = (len >> 8) & 0xff; 682 ptxd->flags |= IR_DIS_CRC; 683 } 684 ptxd->flags |= AU_OWN; 685 wmb(); 686 687 irda_write(aup, IR_CONFIG_1, 688 irda_read(aup, IR_CONFIG_1) | IR_TX_ENABLE); 689 irda_write(aup, IR_RING_PROMPT, 0); 690 691 dev_kfree_skb(skb); 692 aup->tx_head = (aup->tx_head + 1) & (NUM_IR_DESC - 1); 693 return NETDEV_TX_OK; 694} 695 696/* 697 * The Tx ring has been full longer than the watchdog timeout 698 * value. The transmitter must be hung? 699 */ 700static void au1k_tx_timeout(struct net_device *dev) 701{ 702 u32 speed; 703 struct au1k_private *aup = netdev_priv(dev); 704 705 printk(KERN_ERR "%s: tx timeout\n", dev->name); 706 speed = aup->speed; 707 aup->speed = 0; 708 au1k_irda_set_speed(dev, speed); 709 aup->tx_full = 0; 710 netif_wake_queue(dev); 711} 712 713static int au1k_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd) 714{ 715 struct if_irda_req *rq = (struct if_irda_req *)ifreq; 716 struct au1k_private *aup = netdev_priv(dev); 717 int ret = -EOPNOTSUPP; 718 719 switch (cmd) { 720 case SIOCSBANDWIDTH: 721 if (capable(CAP_NET_ADMIN)) { 722 /* 723 * We are unable to set the speed if the 724 * device is not running. 725 */ 726 if (aup->open) 727 ret = au1k_irda_set_speed(dev, 728 rq->ifr_baudrate); 729 else { 730 printk(KERN_ERR "%s ioctl: !netif_running\n", 731 dev->name); 732 ret = 0; 733 } 734 } 735 break; 736 737 case SIOCSMEDIABUSY: 738 ret = -EPERM; 739 if (capable(CAP_NET_ADMIN)) { 740 irda_device_set_media_busy(dev, TRUE); 741 ret = 0; 742 } 743 break; 744 745 case SIOCGRECEIVING: 746 rq->ifr_receiving = 0; 747 break; 748 default: 749 break; 750 } 751 return ret; 752} 753 754static const struct net_device_ops au1k_irda_netdev_ops = { 755 .ndo_open = au1k_irda_start, 756 .ndo_stop = au1k_irda_stop, 757 .ndo_start_xmit = au1k_irda_hard_xmit, 758 .ndo_tx_timeout = au1k_tx_timeout, 759 .ndo_do_ioctl = au1k_irda_ioctl, 760}; 761 762static int au1k_irda_net_init(struct net_device *dev) 763{ 764 struct au1k_private *aup = netdev_priv(dev); 765 struct db_dest *pDB, *pDBfree; 766 int i, err, retval = 0; 767 dma_addr_t temp; 768 769 err = au1k_irda_init_iobuf(&aup->rx_buff, 14384); 770 if (err) 771 goto out1; 772 773 dev->netdev_ops = &au1k_irda_netdev_ops; 774 775 irda_init_max_qos_capabilies(&aup->qos); 776 777 /* The only value we must override it the baudrate */ 778 aup->qos.baud_rate.bits = IR_9600 | IR_19200 | IR_38400 | 779 IR_57600 | IR_115200 | IR_576000 | (IR_4000000 << 8); 780 781 aup->qos.min_turn_time.bits = qos_mtt_bits; 782 irda_qos_bits_to_value(&aup->qos); 783 784 retval = -ENOMEM; 785 786 /* Tx ring follows rx ring + 512 bytes */ 787 /* we need a 1k aligned buffer */ 788 aup->rx_ring[0] = (struct ring_dest *) 789 dma_alloc(2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)), 790 &temp); 791 if (!aup->rx_ring[0]) 792 goto out2; 793 794 /* allocate the data buffers */ 795 aup->db[0].vaddr = 796 dma_alloc(MAX_BUF_SIZE * 2 * NUM_IR_DESC, &temp); 797 if (!aup->db[0].vaddr) 798 goto out3; 799 800 setup_hw_rings(aup, (u32)aup->rx_ring[0], (u32)aup->rx_ring[0] + 512); 801 802 pDBfree = NULL; 803 pDB = aup->db; 804 for (i = 0; i < (2 * NUM_IR_DESC); i++) { 805 pDB->pnext = pDBfree; 806 pDBfree = pDB; 807 pDB->vaddr = 808 (u32 *)((unsigned)aup->db[0].vaddr + (MAX_BUF_SIZE * i)); 809 pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr); 810 pDB++; 811 } 812 aup->pDBfree = pDBfree; 813 814 /* attach a data buffer to each descriptor */ 815 for (i = 0; i < NUM_IR_DESC; i++) { 816 pDB = GetFreeDB(aup); 817 if (!pDB) 818 goto out3; 819 aup->rx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff); 820 aup->rx_ring[i]->addr_1 = (u8)((pDB->dma_addr >> 8) & 0xff); 821 aup->rx_ring[i]->addr_2 = (u8)((pDB->dma_addr >> 16) & 0xff); 822 aup->rx_ring[i]->addr_3 = (u8)((pDB->dma_addr >> 24) & 0xff); 823 aup->rx_db_inuse[i] = pDB; 824 } 825 for (i = 0; i < NUM_IR_DESC; i++) { 826 pDB = GetFreeDB(aup); 827 if (!pDB) 828 goto out3; 829 aup->tx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff); 830 aup->tx_ring[i]->addr_1 = (u8)((pDB->dma_addr >> 8) & 0xff); 831 aup->tx_ring[i]->addr_2 = (u8)((pDB->dma_addr >> 16) & 0xff); 832 aup->tx_ring[i]->addr_3 = (u8)((pDB->dma_addr >> 24) & 0xff); 833 aup->tx_ring[i]->count_0 = 0; 834 aup->tx_ring[i]->count_1 = 0; 835 aup->tx_ring[i]->flags = 0; 836 aup->tx_db_inuse[i] = pDB; 837 } 838 839 return 0; 840 841out3: 842 dma_free((void *)aup->rx_ring[0], 843 2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest))); 844out2: 845 kfree(aup->rx_buff.head); 846out1: 847 printk(KERN_ERR "au1k_irda_net_init() failed. Returns %d\n", retval); 848 return retval; 849} 850 851static int au1k_irda_probe(struct platform_device *pdev) 852{ 853 struct au1k_private *aup; 854 struct net_device *dev; 855 struct resource *r; 856 int err; 857 858 dev = alloc_irdadev(sizeof(struct au1k_private)); 859 if (!dev) 860 return -ENOMEM; 861 862 aup = netdev_priv(dev); 863 864 aup->platdata = pdev->dev.platform_data; 865 866 err = -EINVAL; 867 r = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 868 if (!r) 869 goto out; 870 871 aup->irq_tx = r->start; 872 873 r = platform_get_resource(pdev, IORESOURCE_IRQ, 1); 874 if (!r) 875 goto out; 876 877 aup->irq_rx = r->start; 878 879 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 880 if (!r) 881 goto out; 882 883 err = -EBUSY; 884 aup->ioarea = request_mem_region(r->start, resource_size(r), 885 pdev->name); 886 if (!aup->ioarea) 887 goto out; 888 889 aup->iobase = ioremap_nocache(r->start, resource_size(r)); 890 if (!aup->iobase) 891 goto out2; 892 893 dev->irq = aup->irq_rx; 894 895 err = au1k_irda_net_init(dev); 896 if (err) 897 goto out3; 898 err = register_netdev(dev); 899 if (err) 900 goto out4; 901 902 platform_set_drvdata(pdev, dev); 903 904 printk(KERN_INFO "IrDA: Registered device %s\n", dev->name); 905 return 0; 906 907out4: 908 dma_free((void *)aup->db[0].vaddr, 909 MAX_BUF_SIZE * 2 * NUM_IR_DESC); 910 dma_free((void *)aup->rx_ring[0], 911 2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest))); 912 kfree(aup->rx_buff.head); 913out3: 914 iounmap(aup->iobase); 915out2: 916 release_resource(aup->ioarea); 917 kfree(aup->ioarea); 918out: 919 free_netdev(dev); 920 return err; 921} 922 923static int au1k_irda_remove(struct platform_device *pdev) 924{ 925 struct net_device *dev = platform_get_drvdata(pdev); 926 struct au1k_private *aup = netdev_priv(dev); 927 928 unregister_netdev(dev); 929 930 dma_free((void *)aup->db[0].vaddr, 931 MAX_BUF_SIZE * 2 * NUM_IR_DESC); 932 dma_free((void *)aup->rx_ring[0], 933 2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest))); 934 kfree(aup->rx_buff.head); 935 936 iounmap(aup->iobase); 937 release_resource(aup->ioarea); 938 kfree(aup->ioarea); 939 940 free_netdev(dev); 941 942 return 0; 943} 944 945static struct platform_driver au1k_irda_driver = { 946 .driver = { 947 .name = "au1000-irda", 948 .owner = THIS_MODULE, 949 }, 950 .probe = au1k_irda_probe, 951 .remove = au1k_irda_remove, 952}; 953 954module_platform_driver(au1k_irda_driver); 955 956MODULE_AUTHOR("Pete Popov <ppopov@mvista.com>"); 957MODULE_DESCRIPTION("Au1000 IrDA Device Driver");