Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.32-rc2 841 lines 20 kB view raw
1/* 2 * Alchemy Semi Au1000 IrDA driver 3 * 4 * Copyright 2001 MontaVista Software Inc. 5 * Author: MontaVista Software, Inc. 6 * ppopov@mvista.com or source@mvista.com 7 * 8 * This program is free software; you can distribute it and/or modify it 9 * under the terms of the GNU General Public License (Version 2) as 10 * published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 15 * for more details. 16 * 17 * You should have received a copy of the GNU General Public License along 18 * with this program; if not, write to the Free Software Foundation, Inc., 19 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 20 */ 21#include <linux/module.h> 22#include <linux/types.h> 23#include <linux/init.h> 24#include <linux/errno.h> 25#include <linux/netdevice.h> 26#include <linux/slab.h> 27#include <linux/rtnetlink.h> 28#include <linux/interrupt.h> 29#include <linux/pm.h> 30#include <linux/bitops.h> 31 32#include <asm/irq.h> 33#include <asm/io.h> 34#include <asm/au1000.h> 35#if defined(CONFIG_MIPS_PB1000) || defined(CONFIG_MIPS_PB1100) 36#include <asm/pb1000.h> 37#elif defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100) 38#include <asm/db1x00.h> 39#else 40#error au1k_ir: unsupported board 41#endif 42 43#include <net/irda/irda.h> 44#include <net/irda/irmod.h> 45#include <net/irda/wrapper.h> 46#include <net/irda/irda_device.h> 47#include "au1000_ircc.h" 48 49static int au1k_irda_net_init(struct net_device *); 50static int au1k_irda_start(struct net_device *); 51static int au1k_irda_stop(struct net_device *dev); 52static int au1k_irda_hard_xmit(struct sk_buff *, struct net_device *); 53static int au1k_irda_rx(struct net_device *); 54static void au1k_irda_interrupt(int, void *); 55static void au1k_tx_timeout(struct net_device *); 56static int au1k_irda_ioctl(struct net_device *, struct ifreq *, int); 57static int au1k_irda_set_speed(struct net_device *dev, int speed); 58 59static void *dma_alloc(size_t, dma_addr_t *); 60static void dma_free(void *, size_t); 61 62static int qos_mtt_bits = 0x07; /* 1 ms or more */ 63static struct net_device *ir_devs[NUM_IR_IFF]; 64static char version[] __devinitdata = 65 "au1k_ircc:1.2 ppopov@mvista.com\n"; 66 67#define RUN_AT(x) (jiffies + (x)) 68 69#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100) 70static BCSR * const bcsr = (BCSR *)0xAE000000; 71#endif 72 73static DEFINE_SPINLOCK(ir_lock); 74 75/* 76 * IrDA peripheral bug. You have to read the register 77 * twice to get the right value. 78 */ 79u32 read_ir_reg(u32 addr) 80{ 81 readl(addr); 82 return readl(addr); 83} 84 85 86/* 87 * Buffer allocation/deallocation routines. The buffer descriptor returned 88 * has the virtual and dma address of a buffer suitable for 89 * both, receive and transmit operations. 90 */ 91static db_dest_t *GetFreeDB(struct au1k_private *aup) 92{ 93 db_dest_t *pDB; 94 pDB = aup->pDBfree; 95 96 if (pDB) { 97 aup->pDBfree = pDB->pnext; 98 } 99 return pDB; 100} 101 102static void ReleaseDB(struct au1k_private *aup, db_dest_t *pDB) 103{ 104 db_dest_t *pDBfree = aup->pDBfree; 105 if (pDBfree) 106 pDBfree->pnext = pDB; 107 aup->pDBfree = pDB; 108} 109 110 111/* 112 DMA memory allocation, derived from pci_alloc_consistent. 113 However, the Au1000 data cache is coherent (when programmed 114 so), therefore we return KSEG0 address, not KSEG1. 115*/ 116static void *dma_alloc(size_t size, dma_addr_t * dma_handle) 117{ 118 void *ret; 119 int gfp = GFP_ATOMIC | GFP_DMA; 120 121 ret = (void *) __get_free_pages(gfp, get_order(size)); 122 123 if (ret != NULL) { 124 memset(ret, 0, size); 125 *dma_handle = virt_to_bus(ret); 126 ret = (void *)KSEG0ADDR(ret); 127 } 128 return ret; 129} 130 131 132static void dma_free(void *vaddr, size_t size) 133{ 134 vaddr = (void *)KSEG0ADDR(vaddr); 135 free_pages((unsigned long) vaddr, get_order(size)); 136} 137 138 139static void 140setup_hw_rings(struct au1k_private *aup, u32 rx_base, u32 tx_base) 141{ 142 int i; 143 for (i=0; i<NUM_IR_DESC; i++) { 144 aup->rx_ring[i] = (volatile ring_dest_t *) 145 (rx_base + sizeof(ring_dest_t)*i); 146 } 147 for (i=0; i<NUM_IR_DESC; i++) { 148 aup->tx_ring[i] = (volatile ring_dest_t *) 149 (tx_base + sizeof(ring_dest_t)*i); 150 } 151} 152 153static int au1k_irda_init(void) 154{ 155 static unsigned version_printed = 0; 156 struct au1k_private *aup; 157 struct net_device *dev; 158 int err; 159 160 if (version_printed++ == 0) printk(version); 161 162 dev = alloc_irdadev(sizeof(struct au1k_private)); 163 if (!dev) 164 return -ENOMEM; 165 166 dev->irq = AU1000_IRDA_RX_INT; /* TX has its own interrupt */ 167 err = au1k_irda_net_init(dev); 168 if (err) 169 goto out; 170 err = register_netdev(dev); 171 if (err) 172 goto out1; 173 ir_devs[0] = dev; 174 printk(KERN_INFO "IrDA: Registered device %s\n", dev->name); 175 return 0; 176 177out1: 178 aup = netdev_priv(dev); 179 dma_free((void *)aup->db[0].vaddr, 180 MAX_BUF_SIZE * 2*NUM_IR_DESC); 181 dma_free((void *)aup->rx_ring[0], 182 2 * MAX_NUM_IR_DESC*(sizeof(ring_dest_t))); 183 kfree(aup->rx_buff.head); 184out: 185 free_netdev(dev); 186 return err; 187} 188 189static int au1k_irda_init_iobuf(iobuff_t *io, int size) 190{ 191 io->head = kmalloc(size, GFP_KERNEL); 192 if (io->head != NULL) { 193 io->truesize = size; 194 io->in_frame = FALSE; 195 io->state = OUTSIDE_FRAME; 196 io->data = io->head; 197 } 198 return io->head ? 0 : -ENOMEM; 199} 200 201static const struct net_device_ops au1k_irda_netdev_ops = { 202 .ndo_open = au1k_irda_start, 203 .ndo_stop = au1k_irda_stop, 204 .ndo_start_xmit = au1k_irda_hard_xmit, 205 .ndo_tx_timeout = au1k_tx_timeout, 206 .ndo_do_ioctl = au1k_irda_ioctl, 207}; 208 209static int au1k_irda_net_init(struct net_device *dev) 210{ 211 struct au1k_private *aup = netdev_priv(dev); 212 int i, retval = 0, err; 213 db_dest_t *pDB, *pDBfree; 214 dma_addr_t temp; 215 216 err = au1k_irda_init_iobuf(&aup->rx_buff, 14384); 217 if (err) 218 goto out1; 219 220 dev->netdev_ops = &au1k_irda_netdev_ops; 221 222 irda_init_max_qos_capabilies(&aup->qos); 223 224 /* The only value we must override it the baudrate */ 225 aup->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600| 226 IR_115200|IR_576000 |(IR_4000000 << 8); 227 228 aup->qos.min_turn_time.bits = qos_mtt_bits; 229 irda_qos_bits_to_value(&aup->qos); 230 231 retval = -ENOMEM; 232 233 /* Tx ring follows rx ring + 512 bytes */ 234 /* we need a 1k aligned buffer */ 235 aup->rx_ring[0] = (ring_dest_t *) 236 dma_alloc(2*MAX_NUM_IR_DESC*(sizeof(ring_dest_t)), &temp); 237 if (!aup->rx_ring[0]) 238 goto out2; 239 240 /* allocate the data buffers */ 241 aup->db[0].vaddr = 242 (void *)dma_alloc(MAX_BUF_SIZE * 2*NUM_IR_DESC, &temp); 243 if (!aup->db[0].vaddr) 244 goto out3; 245 246 setup_hw_rings(aup, (u32)aup->rx_ring[0], (u32)aup->rx_ring[0] + 512); 247 248 pDBfree = NULL; 249 pDB = aup->db; 250 for (i=0; i<(2*NUM_IR_DESC); i++) { 251 pDB->pnext = pDBfree; 252 pDBfree = pDB; 253 pDB->vaddr = 254 (u32 *)((unsigned)aup->db[0].vaddr + MAX_BUF_SIZE*i); 255 pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr); 256 pDB++; 257 } 258 aup->pDBfree = pDBfree; 259 260 /* attach a data buffer to each descriptor */ 261 for (i=0; i<NUM_IR_DESC; i++) { 262 pDB = GetFreeDB(aup); 263 if (!pDB) goto out; 264 aup->rx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff); 265 aup->rx_ring[i]->addr_1 = (u8)((pDB->dma_addr>>8) & 0xff); 266 aup->rx_ring[i]->addr_2 = (u8)((pDB->dma_addr>>16) & 0xff); 267 aup->rx_ring[i]->addr_3 = (u8)((pDB->dma_addr>>24) & 0xff); 268 aup->rx_db_inuse[i] = pDB; 269 } 270 for (i=0; i<NUM_IR_DESC; i++) { 271 pDB = GetFreeDB(aup); 272 if (!pDB) goto out; 273 aup->tx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff); 274 aup->tx_ring[i]->addr_1 = (u8)((pDB->dma_addr>>8) & 0xff); 275 aup->tx_ring[i]->addr_2 = (u8)((pDB->dma_addr>>16) & 0xff); 276 aup->tx_ring[i]->addr_3 = (u8)((pDB->dma_addr>>24) & 0xff); 277 aup->tx_ring[i]->count_0 = 0; 278 aup->tx_ring[i]->count_1 = 0; 279 aup->tx_ring[i]->flags = 0; 280 aup->tx_db_inuse[i] = pDB; 281 } 282 283#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100) 284 /* power on */ 285 bcsr->resets &= ~BCSR_RESETS_IRDA_MODE_MASK; 286 bcsr->resets |= BCSR_RESETS_IRDA_MODE_FULL; 287 au_sync(); 288#endif 289 290 return 0; 291 292out3: 293 dma_free((void *)aup->rx_ring[0], 294 2 * MAX_NUM_IR_DESC*(sizeof(ring_dest_t))); 295out2: 296 kfree(aup->rx_buff.head); 297out1: 298 printk(KERN_ERR "au1k_init_module failed. Returns %d\n", retval); 299 return retval; 300} 301 302 303static int au1k_init(struct net_device *dev) 304{ 305 struct au1k_private *aup = netdev_priv(dev); 306 int i; 307 u32 control; 308 u32 ring_address; 309 310 /* bring the device out of reset */ 311 control = 0xe; /* coherent, clock enable, one half system clock */ 312 313#ifndef CONFIG_CPU_LITTLE_ENDIAN 314 control |= 1; 315#endif 316 aup->tx_head = 0; 317 aup->tx_tail = 0; 318 aup->rx_head = 0; 319 320 for (i=0; i<NUM_IR_DESC; i++) { 321 aup->rx_ring[i]->flags = AU_OWN; 322 } 323 324 writel(control, IR_INTERFACE_CONFIG); 325 au_sync_delay(10); 326 327 writel(read_ir_reg(IR_ENABLE) & ~0x8000, IR_ENABLE); /* disable PHY */ 328 au_sync_delay(1); 329 330 writel(MAX_BUF_SIZE, IR_MAX_PKT_LEN); 331 332 ring_address = (u32)virt_to_phys((void *)aup->rx_ring[0]); 333 writel(ring_address >> 26, IR_RING_BASE_ADDR_H); 334 writel((ring_address >> 10) & 0xffff, IR_RING_BASE_ADDR_L); 335 336 writel(RING_SIZE_64<<8 | RING_SIZE_64<<12, IR_RING_SIZE); 337 338 writel(1<<2 | IR_ONE_PIN, IR_CONFIG_2); /* 48MHz */ 339 writel(0, IR_RING_ADDR_CMPR); 340 341 au1k_irda_set_speed(dev, 9600); 342 return 0; 343} 344 345static int au1k_irda_start(struct net_device *dev) 346{ 347 int retval; 348 char hwname[32]; 349 struct au1k_private *aup = netdev_priv(dev); 350 351 if ((retval = au1k_init(dev))) { 352 printk(KERN_ERR "%s: error in au1k_init\n", dev->name); 353 return retval; 354 } 355 356 if ((retval = request_irq(AU1000_IRDA_TX_INT, &au1k_irda_interrupt, 357 0, dev->name, dev))) { 358 printk(KERN_ERR "%s: unable to get IRQ %d\n", 359 dev->name, dev->irq); 360 return retval; 361 } 362 if ((retval = request_irq(AU1000_IRDA_RX_INT, &au1k_irda_interrupt, 363 0, dev->name, dev))) { 364 free_irq(AU1000_IRDA_TX_INT, dev); 365 printk(KERN_ERR "%s: unable to get IRQ %d\n", 366 dev->name, dev->irq); 367 return retval; 368 } 369 370 /* Give self a hardware name */ 371 sprintf(hwname, "Au1000 SIR/FIR"); 372 aup->irlap = irlap_open(dev, &aup->qos, hwname); 373 netif_start_queue(dev); 374 375 writel(read_ir_reg(IR_CONFIG_2) | 1<<8, IR_CONFIG_2); /* int enable */ 376 377 aup->timer.expires = RUN_AT((3*HZ)); 378 aup->timer.data = (unsigned long)dev; 379 return 0; 380} 381 382static int au1k_irda_stop(struct net_device *dev) 383{ 384 struct au1k_private *aup = netdev_priv(dev); 385 386 /* disable interrupts */ 387 writel(read_ir_reg(IR_CONFIG_2) & ~(1<<8), IR_CONFIG_2); 388 writel(0, IR_CONFIG_1); 389 writel(0, IR_INTERFACE_CONFIG); /* disable clock */ 390 au_sync(); 391 392 if (aup->irlap) { 393 irlap_close(aup->irlap); 394 aup->irlap = NULL; 395 } 396 397 netif_stop_queue(dev); 398 del_timer(&aup->timer); 399 400 /* disable the interrupt */ 401 free_irq(AU1000_IRDA_TX_INT, dev); 402 free_irq(AU1000_IRDA_RX_INT, dev); 403 return 0; 404} 405 406static void __exit au1k_irda_exit(void) 407{ 408 struct net_device *dev = ir_devs[0]; 409 struct au1k_private *aup = netdev_priv(dev); 410 411 unregister_netdev(dev); 412 413 dma_free((void *)aup->db[0].vaddr, 414 MAX_BUF_SIZE * 2*NUM_IR_DESC); 415 dma_free((void *)aup->rx_ring[0], 416 2 * MAX_NUM_IR_DESC*(sizeof(ring_dest_t))); 417 kfree(aup->rx_buff.head); 418 free_netdev(dev); 419} 420 421 422static inline void 423update_tx_stats(struct net_device *dev, u32 status, u32 pkt_len) 424{ 425 struct au1k_private *aup = netdev_priv(dev); 426 struct net_device_stats *ps = &aup->stats; 427 428 ps->tx_packets++; 429 ps->tx_bytes += pkt_len; 430 431 if (status & IR_TX_ERROR) { 432 ps->tx_errors++; 433 ps->tx_aborted_errors++; 434 } 435} 436 437 438static void au1k_tx_ack(struct net_device *dev) 439{ 440 struct au1k_private *aup = netdev_priv(dev); 441 volatile ring_dest_t *ptxd; 442 443 ptxd = aup->tx_ring[aup->tx_tail]; 444 while (!(ptxd->flags & AU_OWN) && (aup->tx_tail != aup->tx_head)) { 445 update_tx_stats(dev, ptxd->flags, 446 ptxd->count_1<<8 | ptxd->count_0); 447 ptxd->count_0 = 0; 448 ptxd->count_1 = 0; 449 au_sync(); 450 451 aup->tx_tail = (aup->tx_tail + 1) & (NUM_IR_DESC - 1); 452 ptxd = aup->tx_ring[aup->tx_tail]; 453 454 if (aup->tx_full) { 455 aup->tx_full = 0; 456 netif_wake_queue(dev); 457 } 458 } 459 460 if (aup->tx_tail == aup->tx_head) { 461 if (aup->newspeed) { 462 au1k_irda_set_speed(dev, aup->newspeed); 463 aup->newspeed = 0; 464 } 465 else { 466 writel(read_ir_reg(IR_CONFIG_1) & ~IR_TX_ENABLE, 467 IR_CONFIG_1); 468 au_sync(); 469 writel(read_ir_reg(IR_CONFIG_1) | IR_RX_ENABLE, 470 IR_CONFIG_1); 471 writel(0, IR_RING_PROMPT); 472 au_sync(); 473 } 474 } 475} 476 477 478/* 479 * Au1000 transmit routine. 480 */ 481static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev) 482{ 483 struct au1k_private *aup = netdev_priv(dev); 484 int speed = irda_get_next_speed(skb); 485 volatile ring_dest_t *ptxd; 486 u32 len; 487 488 u32 flags; 489 db_dest_t *pDB; 490 491 if (speed != aup->speed && speed != -1) { 492 aup->newspeed = speed; 493 } 494 495 if ((skb->len == 0) && (aup->newspeed)) { 496 if (aup->tx_tail == aup->tx_head) { 497 au1k_irda_set_speed(dev, speed); 498 aup->newspeed = 0; 499 } 500 dev_kfree_skb(skb); 501 return NETDEV_TX_OK; 502 } 503 504 ptxd = aup->tx_ring[aup->tx_head]; 505 flags = ptxd->flags; 506 507 if (flags & AU_OWN) { 508 printk(KERN_DEBUG "%s: tx_full\n", dev->name); 509 netif_stop_queue(dev); 510 aup->tx_full = 1; 511 return NETDEV_TX_BUSY; 512 } 513 else if (((aup->tx_head + 1) & (NUM_IR_DESC - 1)) == aup->tx_tail) { 514 printk(KERN_DEBUG "%s: tx_full\n", dev->name); 515 netif_stop_queue(dev); 516 aup->tx_full = 1; 517 return NETDEV_TX_BUSY; 518 } 519 520 pDB = aup->tx_db_inuse[aup->tx_head]; 521 522#if 0 523 if (read_ir_reg(IR_RX_BYTE_CNT) != 0) { 524 printk("tx warning: rx byte cnt %x\n", 525 read_ir_reg(IR_RX_BYTE_CNT)); 526 } 527#endif 528 529 if (aup->speed == 4000000) { 530 /* FIR */ 531 skb_copy_from_linear_data(skb, pDB->vaddr, skb->len); 532 ptxd->count_0 = skb->len & 0xff; 533 ptxd->count_1 = (skb->len >> 8) & 0xff; 534 535 } 536 else { 537 /* SIR */ 538 len = async_wrap_skb(skb, (u8 *)pDB->vaddr, MAX_BUF_SIZE); 539 ptxd->count_0 = len & 0xff; 540 ptxd->count_1 = (len >> 8) & 0xff; 541 ptxd->flags |= IR_DIS_CRC; 542 au_writel(au_readl(0xae00000c) & ~(1<<13), 0xae00000c); 543 } 544 ptxd->flags |= AU_OWN; 545 au_sync(); 546 547 writel(read_ir_reg(IR_CONFIG_1) | IR_TX_ENABLE, IR_CONFIG_1); 548 writel(0, IR_RING_PROMPT); 549 au_sync(); 550 551 dev_kfree_skb(skb); 552 aup->tx_head = (aup->tx_head + 1) & (NUM_IR_DESC - 1); 553 dev->trans_start = jiffies; 554 return NETDEV_TX_OK; 555} 556 557 558static inline void 559update_rx_stats(struct net_device *dev, u32 status, u32 count) 560{ 561 struct au1k_private *aup = netdev_priv(dev); 562 struct net_device_stats *ps = &aup->stats; 563 564 ps->rx_packets++; 565 566 if (status & IR_RX_ERROR) { 567 ps->rx_errors++; 568 if (status & (IR_PHY_ERROR|IR_FIFO_OVER)) 569 ps->rx_missed_errors++; 570 if (status & IR_MAX_LEN) 571 ps->rx_length_errors++; 572 if (status & IR_CRC_ERROR) 573 ps->rx_crc_errors++; 574 } 575 else 576 ps->rx_bytes += count; 577} 578 579/* 580 * Au1000 receive routine. 581 */ 582static int au1k_irda_rx(struct net_device *dev) 583{ 584 struct au1k_private *aup = netdev_priv(dev); 585 struct sk_buff *skb; 586 volatile ring_dest_t *prxd; 587 u32 flags, count; 588 db_dest_t *pDB; 589 590 prxd = aup->rx_ring[aup->rx_head]; 591 flags = prxd->flags; 592 593 while (!(flags & AU_OWN)) { 594 pDB = aup->rx_db_inuse[aup->rx_head]; 595 count = prxd->count_1<<8 | prxd->count_0; 596 if (!(flags & IR_RX_ERROR)) { 597 /* good frame */ 598 update_rx_stats(dev, flags, count); 599 skb=alloc_skb(count+1,GFP_ATOMIC); 600 if (skb == NULL) { 601 aup->netdev->stats.rx_dropped++; 602 continue; 603 } 604 skb_reserve(skb, 1); 605 if (aup->speed == 4000000) 606 skb_put(skb, count); 607 else 608 skb_put(skb, count-2); 609 skb_copy_to_linear_data(skb, pDB->vaddr, count - 2); 610 skb->dev = dev; 611 skb_reset_mac_header(skb); 612 skb->protocol = htons(ETH_P_IRDA); 613 netif_rx(skb); 614 prxd->count_0 = 0; 615 prxd->count_1 = 0; 616 } 617 prxd->flags |= AU_OWN; 618 aup->rx_head = (aup->rx_head + 1) & (NUM_IR_DESC - 1); 619 writel(0, IR_RING_PROMPT); 620 au_sync(); 621 622 /* next descriptor */ 623 prxd = aup->rx_ring[aup->rx_head]; 624 flags = prxd->flags; 625 626 } 627 return 0; 628} 629 630 631static irqreturn_t au1k_irda_interrupt(int dummy, void *dev_id) 632{ 633 struct net_device *dev = dev_id; 634 635 writel(0, IR_INT_CLEAR); /* ack irda interrupts */ 636 637 au1k_irda_rx(dev); 638 au1k_tx_ack(dev); 639 640 return IRQ_HANDLED; 641} 642 643 644/* 645 * The Tx ring has been full longer than the watchdog timeout 646 * value. The transmitter must be hung? 647 */ 648static void au1k_tx_timeout(struct net_device *dev) 649{ 650 u32 speed; 651 struct au1k_private *aup = netdev_priv(dev); 652 653 printk(KERN_ERR "%s: tx timeout\n", dev->name); 654 speed = aup->speed; 655 aup->speed = 0; 656 au1k_irda_set_speed(dev, speed); 657 aup->tx_full = 0; 658 netif_wake_queue(dev); 659} 660 661 662/* 663 * Set the IrDA communications speed. 664 */ 665static int 666au1k_irda_set_speed(struct net_device *dev, int speed) 667{ 668 unsigned long flags; 669 struct au1k_private *aup = netdev_priv(dev); 670 u32 control; 671 int ret = 0, timeout = 10, i; 672 volatile ring_dest_t *ptxd; 673#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100) 674 unsigned long irda_resets; 675#endif 676 677 if (speed == aup->speed) 678 return ret; 679 680 spin_lock_irqsave(&ir_lock, flags); 681 682 /* disable PHY first */ 683 writel(read_ir_reg(IR_ENABLE) & ~0x8000, IR_ENABLE); 684 685 /* disable RX/TX */ 686 writel(read_ir_reg(IR_CONFIG_1) & ~(IR_RX_ENABLE|IR_TX_ENABLE), 687 IR_CONFIG_1); 688 au_sync_delay(1); 689 while (read_ir_reg(IR_ENABLE) & (IR_RX_STATUS | IR_TX_STATUS)) { 690 mdelay(1); 691 if (!timeout--) { 692 printk(KERN_ERR "%s: rx/tx disable timeout\n", 693 dev->name); 694 break; 695 } 696 } 697 698 /* disable DMA */ 699 writel(read_ir_reg(IR_CONFIG_1) & ~IR_DMA_ENABLE, IR_CONFIG_1); 700 au_sync_delay(1); 701 702 /* 703 * After we disable tx/rx. the index pointers 704 * go back to zero. 705 */ 706 aup->tx_head = aup->tx_tail = aup->rx_head = 0; 707 for (i=0; i<NUM_IR_DESC; i++) { 708 ptxd = aup->tx_ring[i]; 709 ptxd->flags = 0; 710 ptxd->count_0 = 0; 711 ptxd->count_1 = 0; 712 } 713 714 for (i=0; i<NUM_IR_DESC; i++) { 715 ptxd = aup->rx_ring[i]; 716 ptxd->count_0 = 0; 717 ptxd->count_1 = 0; 718 ptxd->flags = AU_OWN; 719 } 720 721 if (speed == 4000000) { 722#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100) 723 bcsr->resets |= BCSR_RESETS_FIR_SEL; 724#else /* Pb1000 and Pb1100 */ 725 writel(1<<13, CPLD_AUX1); 726#endif 727 } 728 else { 729#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100) 730 bcsr->resets &= ~BCSR_RESETS_FIR_SEL; 731#else /* Pb1000 and Pb1100 */ 732 writel(readl(CPLD_AUX1) & ~(1<<13), CPLD_AUX1); 733#endif 734 } 735 736 switch (speed) { 737 case 9600: 738 writel(11<<10 | 12<<5, IR_WRITE_PHY_CONFIG); 739 writel(IR_SIR_MODE, IR_CONFIG_1); 740 break; 741 case 19200: 742 writel(5<<10 | 12<<5, IR_WRITE_PHY_CONFIG); 743 writel(IR_SIR_MODE, IR_CONFIG_1); 744 break; 745 case 38400: 746 writel(2<<10 | 12<<5, IR_WRITE_PHY_CONFIG); 747 writel(IR_SIR_MODE, IR_CONFIG_1); 748 break; 749 case 57600: 750 writel(1<<10 | 12<<5, IR_WRITE_PHY_CONFIG); 751 writel(IR_SIR_MODE, IR_CONFIG_1); 752 break; 753 case 115200: 754 writel(12<<5, IR_WRITE_PHY_CONFIG); 755 writel(IR_SIR_MODE, IR_CONFIG_1); 756 break; 757 case 4000000: 758 writel(0xF, IR_WRITE_PHY_CONFIG); 759 writel(IR_FIR|IR_DMA_ENABLE|IR_RX_ENABLE, IR_CONFIG_1); 760 break; 761 default: 762 printk(KERN_ERR "%s unsupported speed %x\n", dev->name, speed); 763 ret = -EINVAL; 764 break; 765 } 766 767 aup->speed = speed; 768 writel(read_ir_reg(IR_ENABLE) | 0x8000, IR_ENABLE); 769 au_sync(); 770 771 control = read_ir_reg(IR_ENABLE); 772 writel(0, IR_RING_PROMPT); 773 au_sync(); 774 775 if (control & (1<<14)) { 776 printk(KERN_ERR "%s: configuration error\n", dev->name); 777 } 778 else { 779 if (control & (1<<11)) 780 printk(KERN_DEBUG "%s Valid SIR config\n", dev->name); 781 if (control & (1<<12)) 782 printk(KERN_DEBUG "%s Valid MIR config\n", dev->name); 783 if (control & (1<<13)) 784 printk(KERN_DEBUG "%s Valid FIR config\n", dev->name); 785 if (control & (1<<10)) 786 printk(KERN_DEBUG "%s TX enabled\n", dev->name); 787 if (control & (1<<9)) 788 printk(KERN_DEBUG "%s RX enabled\n", dev->name); 789 } 790 791 spin_unlock_irqrestore(&ir_lock, flags); 792 return ret; 793} 794 795static int 796au1k_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd) 797{ 798 struct if_irda_req *rq = (struct if_irda_req *)ifreq; 799 struct au1k_private *aup = netdev_priv(dev); 800 int ret = -EOPNOTSUPP; 801 802 switch (cmd) { 803 case SIOCSBANDWIDTH: 804 if (capable(CAP_NET_ADMIN)) { 805 /* 806 * We are unable to set the speed if the 807 * device is not running. 808 */ 809 if (aup->open) 810 ret = au1k_irda_set_speed(dev, 811 rq->ifr_baudrate); 812 else { 813 printk(KERN_ERR "%s ioctl: !netif_running\n", 814 dev->name); 815 ret = 0; 816 } 817 } 818 break; 819 820 case SIOCSMEDIABUSY: 821 ret = -EPERM; 822 if (capable(CAP_NET_ADMIN)) { 823 irda_device_set_media_busy(dev, TRUE); 824 ret = 0; 825 } 826 break; 827 828 case SIOCGRECEIVING: 829 rq->ifr_receiving = 0; 830 break; 831 default: 832 break; 833 } 834 return ret; 835} 836 837MODULE_AUTHOR("Pete Popov <ppopov@mvista.com>"); 838MODULE_DESCRIPTION("Au1000 IrDA Device Driver"); 839 840module_init(au1k_irda_init); 841module_exit(au1k_irda_exit);