Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.22-rc1 4011 lines 121 kB view raw
1/* 2 * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved. 3 * 4 * Author: Shlomi Gridish <gridish@freescale.com> 5 * Li Yang <leoli@freescale.com> 6 * 7 * Description: 8 * QE UCC Gigabit Ethernet Driver 9 * 10 * This program is free software; you can redistribute it and/or modify it 11 * under the terms of the GNU General Public License as published by the 12 * Free Software Foundation; either version 2 of the License, or (at your 13 * option) any later version. 14 */ 15#include <linux/kernel.h> 16#include <linux/init.h> 17#include <linux/errno.h> 18#include <linux/slab.h> 19#include <linux/stddef.h> 20#include <linux/interrupt.h> 21#include <linux/netdevice.h> 22#include <linux/etherdevice.h> 23#include <linux/skbuff.h> 24#include <linux/spinlock.h> 25#include <linux/mm.h> 26#include <linux/ethtool.h> 27#include <linux/delay.h> 28#include <linux/dma-mapping.h> 29#include <linux/fsl_devices.h> 30#include <linux/ethtool.h> 31#include <linux/mii.h> 32#include <linux/phy.h> 33#include <linux/workqueue.h> 34 35#include <asm/of_platform.h> 36#include <asm/uaccess.h> 37#include <asm/irq.h> 38#include <asm/io.h> 39#include <asm/immap_qe.h> 40#include <asm/qe.h> 41#include <asm/ucc.h> 42#include <asm/ucc_fast.h> 43 44#include "ucc_geth.h" 45#include "ucc_geth_mii.h" 46 47#undef DEBUG 48 49#define DRV_DESC "QE UCC Gigabit Ethernet Controller" 50#define DRV_NAME "ucc_geth" 51#define DRV_VERSION "1.1" 52 53#define ugeth_printk(level, format, arg...) \ 54 printk(level format "\n", ## arg) 55 56#define ugeth_dbg(format, arg...) \ 57 ugeth_printk(KERN_DEBUG , format , ## arg) 58#define ugeth_err(format, arg...) \ 59 ugeth_printk(KERN_ERR , format , ## arg) 60#define ugeth_info(format, arg...) \ 61 ugeth_printk(KERN_INFO , format , ## arg) 62#define ugeth_warn(format, arg...) \ 63 ugeth_printk(KERN_WARNING , format , ## arg) 64 65#ifdef UGETH_VERBOSE_DEBUG 66#define ugeth_vdbg ugeth_dbg 67#else 68#define ugeth_vdbg(fmt, args...) do { } while (0) 69#endif /* UGETH_VERBOSE_DEBUG */ 70 71static DEFINE_SPINLOCK(ugeth_lock); 72 73static struct ucc_geth_info ugeth_primary_info = { 74 .uf_info = { 75 .bd_mem_part = MEM_PART_SYSTEM, 76 .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES, 77 .max_rx_buf_length = 1536, 78 /* adjusted at startup if max-speed 1000 */ 79 .urfs = UCC_GETH_URFS_INIT, 80 .urfet = UCC_GETH_URFET_INIT, 81 .urfset = UCC_GETH_URFSET_INIT, 82 .utfs = UCC_GETH_UTFS_INIT, 83 .utfet = UCC_GETH_UTFET_INIT, 84 .utftt = UCC_GETH_UTFTT_INIT, 85 .ufpt = 256, 86 .mode = UCC_FAST_PROTOCOL_MODE_ETHERNET, 87 .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL, 88 .tenc = UCC_FAST_TX_ENCODING_NRZ, 89 .renc = UCC_FAST_RX_ENCODING_NRZ, 90 .tcrc = UCC_FAST_16_BIT_CRC, 91 .synl = UCC_FAST_SYNC_LEN_NOT_USED, 92 }, 93 .numQueuesTx = 1, 94 .numQueuesRx = 1, 95 .extendedFilteringChainPointer = ((uint32_t) NULL), 96 .typeorlen = 3072 /*1536 */ , 97 .nonBackToBackIfgPart1 = 0x40, 98 .nonBackToBackIfgPart2 = 0x60, 99 .miminumInterFrameGapEnforcement = 0x50, 100 .backToBackInterFrameGap = 0x60, 101 .mblinterval = 128, 102 .nortsrbytetime = 5, 103 .fracsiz = 1, 104 .strictpriorityq = 0xff, 105 .altBebTruncation = 0xa, 106 .excessDefer = 1, 107 .maxRetransmission = 0xf, 108 .collisionWindow = 0x37, 109 .receiveFlowControl = 1, 110 .maxGroupAddrInHash = 4, 111 .maxIndAddrInHash = 4, 112 .prel = 7, 113 .maxFrameLength = 1518, 114 .minFrameLength = 64, 115 .maxD1Length = 1520, 116 .maxD2Length = 1520, 117 .vlantype = 0x8100, 118 .ecamptr = ((uint32_t) NULL), 119 .eventRegMask = UCCE_OTHER, 120 .pausePeriod = 0xf000, 121 .interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1}, 122 .bdRingLenTx = { 123 TX_BD_RING_LEN, 124 TX_BD_RING_LEN, 125 TX_BD_RING_LEN, 126 TX_BD_RING_LEN, 127 TX_BD_RING_LEN, 128 TX_BD_RING_LEN, 129 TX_BD_RING_LEN, 130 TX_BD_RING_LEN}, 131 132 .bdRingLenRx = { 133 RX_BD_RING_LEN, 134 RX_BD_RING_LEN, 135 RX_BD_RING_LEN, 136 RX_BD_RING_LEN, 137 RX_BD_RING_LEN, 138 RX_BD_RING_LEN, 139 RX_BD_RING_LEN, 140 RX_BD_RING_LEN}, 141 142 .numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1, 143 .largestexternallookupkeysize = 144 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE, 145 .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_NONE, 146 .vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP, 147 .vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP, 148 .rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT, 149 .aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE, 150 .padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC, 151 .numThreadsTx = UCC_GETH_NUM_OF_THREADS_4, 152 .numThreadsRx = UCC_GETH_NUM_OF_THREADS_4, 153 .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2, 154 .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2, 155}; 156 157static struct ucc_geth_info ugeth_info[8]; 158 159#ifdef DEBUG 160static void mem_disp(u8 *addr, int size) 161{ 162 u8 *i; 163 int size16Aling = (size >> 4) << 4; 164 int size4Aling = (size >> 2) << 2; 165 int notAlign = 0; 166 if (size % 16) 167 notAlign = 1; 168 169 for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16) 170 printk("0x%08x: %08x %08x %08x %08x\r\n", 171 (u32) i, 172 *((u32 *) (i)), 173 *((u32 *) (i + 4)), 174 *((u32 *) (i + 8)), *((u32 *) (i + 12))); 175 if (notAlign == 1) 176 printk("0x%08x: ", (u32) i); 177 for (; (u32) i < (u32) addr + size4Aling; i += 4) 178 printk("%08x ", *((u32 *) (i))); 179 for (; (u32) i < (u32) addr + size; i++) 180 printk("%02x", *((u8 *) (i))); 181 if (notAlign == 1) 182 printk("\r\n"); 183} 184#endif /* DEBUG */ 185 186#ifdef CONFIG_UGETH_FILTERING 187static void enqueue(struct list_head *node, struct list_head *lh) 188{ 189 unsigned long flags; 190 191 spin_lock_irqsave(&ugeth_lock, flags); 192 list_add_tail(node, lh); 193 spin_unlock_irqrestore(&ugeth_lock, flags); 194} 195#endif /* CONFIG_UGETH_FILTERING */ 196 197static struct list_head *dequeue(struct list_head *lh) 198{ 199 unsigned long flags; 200 201 spin_lock_irqsave(&ugeth_lock, flags); 202 if (!list_empty(lh)) { 203 struct list_head *node = lh->next; 204 list_del(node); 205 spin_unlock_irqrestore(&ugeth_lock, flags); 206 return node; 207 } else { 208 spin_unlock_irqrestore(&ugeth_lock, flags); 209 return NULL; 210 } 211} 212 213static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, u8 *bd) 214{ 215 struct sk_buff *skb = NULL; 216 217 skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length + 218 UCC_GETH_RX_DATA_BUF_ALIGNMENT); 219 220 if (skb == NULL) 221 return NULL; 222 223 /* We need the data buffer to be aligned properly. We will reserve 224 * as many bytes as needed to align the data properly 225 */ 226 skb_reserve(skb, 227 UCC_GETH_RX_DATA_BUF_ALIGNMENT - 228 (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT - 229 1))); 230 231 skb->dev = ugeth->dev; 232 233 out_be32(&((struct qe_bd *)bd)->buf, 234 dma_map_single(NULL, 235 skb->data, 236 ugeth->ug_info->uf_info.max_rx_buf_length + 237 UCC_GETH_RX_DATA_BUF_ALIGNMENT, 238 DMA_FROM_DEVICE)); 239 240 out_be32((u32 *)bd, (R_E | R_I | (in_be32((u32 *)bd) & R_W))); 241 242 return skb; 243} 244 245static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ) 246{ 247 u8 *bd; 248 u32 bd_status; 249 struct sk_buff *skb; 250 int i; 251 252 bd = ugeth->p_rx_bd_ring[rxQ]; 253 i = 0; 254 255 do { 256 bd_status = in_be32((u32*)bd); 257 skb = get_new_skb(ugeth, bd); 258 259 if (!skb) /* If can not allocate data buffer, 260 abort. Cleanup will be elsewhere */ 261 return -ENOMEM; 262 263 ugeth->rx_skbuff[rxQ][i] = skb; 264 265 /* advance the BD pointer */ 266 bd += sizeof(struct qe_bd); 267 i++; 268 } while (!(bd_status & R_W)); 269 270 return 0; 271} 272 273static int fill_init_enet_entries(struct ucc_geth_private *ugeth, 274 volatile u32 *p_start, 275 u8 num_entries, 276 u32 thread_size, 277 u32 thread_alignment, 278 enum qe_risc_allocation risc, 279 int skip_page_for_first_entry) 280{ 281 u32 init_enet_offset; 282 u8 i; 283 int snum; 284 285 for (i = 0; i < num_entries; i++) { 286 if ((snum = qe_get_snum()) < 0) { 287 ugeth_err("fill_init_enet_entries: Can not get SNUM."); 288 return snum; 289 } 290 if ((i == 0) && skip_page_for_first_entry) 291 /* First entry of Rx does not have page */ 292 init_enet_offset = 0; 293 else { 294 init_enet_offset = 295 qe_muram_alloc(thread_size, thread_alignment); 296 if (IS_ERR_VALUE(init_enet_offset)) { 297 ugeth_err 298 ("fill_init_enet_entries: Can not allocate DPRAM memory."); 299 qe_put_snum((u8) snum); 300 return -ENOMEM; 301 } 302 } 303 *(p_start++) = 304 ((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset 305 | risc; 306 } 307 308 return 0; 309} 310 311static int return_init_enet_entries(struct ucc_geth_private *ugeth, 312 volatile u32 *p_start, 313 u8 num_entries, 314 enum qe_risc_allocation risc, 315 int skip_page_for_first_entry) 316{ 317 u32 init_enet_offset; 318 u8 i; 319 int snum; 320 321 for (i = 0; i < num_entries; i++) { 322 /* Check that this entry was actually valid -- 323 needed in case failed in allocations */ 324 if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) { 325 snum = 326 (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >> 327 ENET_INIT_PARAM_SNUM_SHIFT; 328 qe_put_snum((u8) snum); 329 if (!((i == 0) && skip_page_for_first_entry)) { 330 /* First entry of Rx does not have page */ 331 init_enet_offset = 332 (in_be32(p_start) & 333 ENET_INIT_PARAM_PTR_MASK); 334 qe_muram_free(init_enet_offset); 335 } 336 *(p_start++) = 0; /* Just for cosmetics */ 337 } 338 } 339 340 return 0; 341} 342 343#ifdef DEBUG 344static int dump_init_enet_entries(struct ucc_geth_private *ugeth, 345 volatile u32 *p_start, 346 u8 num_entries, 347 u32 thread_size, 348 enum qe_risc_allocation risc, 349 int skip_page_for_first_entry) 350{ 351 u32 init_enet_offset; 352 u8 i; 353 int snum; 354 355 for (i = 0; i < num_entries; i++) { 356 /* Check that this entry was actually valid -- 357 needed in case failed in allocations */ 358 if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) { 359 snum = 360 (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >> 361 ENET_INIT_PARAM_SNUM_SHIFT; 362 qe_put_snum((u8) snum); 363 if (!((i == 0) && skip_page_for_first_entry)) { 364 /* First entry of Rx does not have page */ 365 init_enet_offset = 366 (in_be32(p_start) & 367 ENET_INIT_PARAM_PTR_MASK); 368 ugeth_info("Init enet entry %d:", i); 369 ugeth_info("Base address: 0x%08x", 370 (u32) 371 qe_muram_addr(init_enet_offset)); 372 mem_disp(qe_muram_addr(init_enet_offset), 373 thread_size); 374 } 375 p_start++; 376 } 377 } 378 379 return 0; 380} 381#endif 382 383#ifdef CONFIG_UGETH_FILTERING 384static struct enet_addr_container *get_enet_addr_container(void) 385{ 386 struct enet_addr_container *enet_addr_cont; 387 388 /* allocate memory */ 389 enet_addr_cont = kmalloc(sizeof(struct enet_addr_container), GFP_KERNEL); 390 if (!enet_addr_cont) { 391 ugeth_err("%s: No memory for enet_addr_container object.", 392 __FUNCTION__); 393 return NULL; 394 } 395 396 return enet_addr_cont; 397} 398#endif /* CONFIG_UGETH_FILTERING */ 399 400static void put_enet_addr_container(struct enet_addr_container *enet_addr_cont) 401{ 402 kfree(enet_addr_cont); 403} 404 405static void set_mac_addr(__be16 __iomem *reg, u8 *mac) 406{ 407 out_be16(&reg[0], ((u16)mac[5] << 8) | mac[4]); 408 out_be16(&reg[1], ((u16)mac[3] << 8) | mac[2]); 409 out_be16(&reg[2], ((u16)mac[1] << 8) | mac[0]); 410} 411 412#ifdef CONFIG_UGETH_FILTERING 413static int hw_add_addr_in_paddr(struct ucc_geth_private *ugeth, 414 u8 *p_enet_addr, u8 paddr_num) 415{ 416 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; 417 418 if (!(paddr_num < NUM_OF_PADDRS)) { 419 ugeth_warn("%s: Illegal paddr_num.", __FUNCTION__); 420 return -EINVAL; 421 } 422 423 p_82xx_addr_filt = 424 (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram-> 425 addressfiltering; 426 427 /* Ethernet frames are defined in Little Endian mode, */ 428 /* therefore to insert the address we reverse the bytes. */ 429 set_mac_addr(&p_82xx_addr_filt->paddr[paddr_num].h, p_enet_addr); 430 return 0; 431} 432#endif /* CONFIG_UGETH_FILTERING */ 433 434static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num) 435{ 436 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; 437 438 if (!(paddr_num < NUM_OF_PADDRS)) { 439 ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__); 440 return -EINVAL; 441 } 442 443 p_82xx_addr_filt = 444 (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram-> 445 addressfiltering; 446 447 /* Writing address ff.ff.ff.ff.ff.ff disables address 448 recognition for this register */ 449 out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff); 450 out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff); 451 out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff); 452 453 return 0; 454} 455 456static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth, 457 u8 *p_enet_addr) 458{ 459 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; 460 u32 cecr_subblock; 461 462 p_82xx_addr_filt = 463 (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram-> 464 addressfiltering; 465 466 cecr_subblock = 467 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); 468 469 /* Ethernet frames are defined in Little Endian mode, 470 therefor to insert */ 471 /* the address to the hash (Big Endian mode), we reverse the bytes.*/ 472 473 set_mac_addr(&p_82xx_addr_filt->taddr.h, p_enet_addr); 474 475 qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock, 476 QE_CR_PROTOCOL_ETHERNET, 0); 477} 478 479#ifdef CONFIG_UGETH_MAGIC_PACKET 480static void magic_packet_detection_enable(struct ucc_geth_private *ugeth) 481{ 482 struct ucc_fast_private *uccf; 483 struct ucc_geth *ug_regs; 484 u32 maccfg2, uccm; 485 486 uccf = ugeth->uccf; 487 ug_regs = ugeth->ug_regs; 488 489 /* Enable interrupts for magic packet detection */ 490 uccm = in_be32(uccf->p_uccm); 491 uccm |= UCCE_MPD; 492 out_be32(uccf->p_uccm, uccm); 493 494 /* Enable magic packet detection */ 495 maccfg2 = in_be32(&ug_regs->maccfg2); 496 maccfg2 |= MACCFG2_MPE; 497 out_be32(&ug_regs->maccfg2, maccfg2); 498} 499 500static void magic_packet_detection_disable(struct ucc_geth_private *ugeth) 501{ 502 struct ucc_fast_private *uccf; 503 struct ucc_geth *ug_regs; 504 u32 maccfg2, uccm; 505 506 uccf = ugeth->uccf; 507 ug_regs = ugeth->ug_regs; 508 509 /* Disable interrupts for magic packet detection */ 510 uccm = in_be32(uccf->p_uccm); 511 uccm &= ~UCCE_MPD; 512 out_be32(uccf->p_uccm, uccm); 513 514 /* Disable magic packet detection */ 515 maccfg2 = in_be32(&ug_regs->maccfg2); 516 maccfg2 &= ~MACCFG2_MPE; 517 out_be32(&ug_regs->maccfg2, maccfg2); 518} 519#endif /* MAGIC_PACKET */ 520 521static inline int compare_addr(u8 **addr1, u8 **addr2) 522{ 523 return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS); 524} 525 526#ifdef DEBUG 527static void get_statistics(struct ucc_geth_private *ugeth, 528 struct ucc_geth_tx_firmware_statistics * 529 tx_firmware_statistics, 530 struct ucc_geth_rx_firmware_statistics * 531 rx_firmware_statistics, 532 struct ucc_geth_hardware_statistics *hardware_statistics) 533{ 534 struct ucc_fast *uf_regs; 535 struct ucc_geth *ug_regs; 536 struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram; 537 struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram; 538 539 ug_regs = ugeth->ug_regs; 540 uf_regs = (struct ucc_fast *) ug_regs; 541 p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram; 542 p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram; 543 544 /* Tx firmware only if user handed pointer and driver actually 545 gathers Tx firmware statistics */ 546 if (tx_firmware_statistics && p_tx_fw_statistics_pram) { 547 tx_firmware_statistics->sicoltx = 548 in_be32(&p_tx_fw_statistics_pram->sicoltx); 549 tx_firmware_statistics->mulcoltx = 550 in_be32(&p_tx_fw_statistics_pram->mulcoltx); 551 tx_firmware_statistics->latecoltxfr = 552 in_be32(&p_tx_fw_statistics_pram->latecoltxfr); 553 tx_firmware_statistics->frabortduecol = 554 in_be32(&p_tx_fw_statistics_pram->frabortduecol); 555 tx_firmware_statistics->frlostinmactxer = 556 in_be32(&p_tx_fw_statistics_pram->frlostinmactxer); 557 tx_firmware_statistics->carriersenseertx = 558 in_be32(&p_tx_fw_statistics_pram->carriersenseertx); 559 tx_firmware_statistics->frtxok = 560 in_be32(&p_tx_fw_statistics_pram->frtxok); 561 tx_firmware_statistics->txfrexcessivedefer = 562 in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer); 563 tx_firmware_statistics->txpkts256 = 564 in_be32(&p_tx_fw_statistics_pram->txpkts256); 565 tx_firmware_statistics->txpkts512 = 566 in_be32(&p_tx_fw_statistics_pram->txpkts512); 567 tx_firmware_statistics->txpkts1024 = 568 in_be32(&p_tx_fw_statistics_pram->txpkts1024); 569 tx_firmware_statistics->txpktsjumbo = 570 in_be32(&p_tx_fw_statistics_pram->txpktsjumbo); 571 } 572 573 /* Rx firmware only if user handed pointer and driver actually 574 * gathers Rx firmware statistics */ 575 if (rx_firmware_statistics && p_rx_fw_statistics_pram) { 576 int i; 577 rx_firmware_statistics->frrxfcser = 578 in_be32(&p_rx_fw_statistics_pram->frrxfcser); 579 rx_firmware_statistics->fraligner = 580 in_be32(&p_rx_fw_statistics_pram->fraligner); 581 rx_firmware_statistics->inrangelenrxer = 582 in_be32(&p_rx_fw_statistics_pram->inrangelenrxer); 583 rx_firmware_statistics->outrangelenrxer = 584 in_be32(&p_rx_fw_statistics_pram->outrangelenrxer); 585 rx_firmware_statistics->frtoolong = 586 in_be32(&p_rx_fw_statistics_pram->frtoolong); 587 rx_firmware_statistics->runt = 588 in_be32(&p_rx_fw_statistics_pram->runt); 589 rx_firmware_statistics->verylongevent = 590 in_be32(&p_rx_fw_statistics_pram->verylongevent); 591 rx_firmware_statistics->symbolerror = 592 in_be32(&p_rx_fw_statistics_pram->symbolerror); 593 rx_firmware_statistics->dropbsy = 594 in_be32(&p_rx_fw_statistics_pram->dropbsy); 595 for (i = 0; i < 0x8; i++) 596 rx_firmware_statistics->res0[i] = 597 p_rx_fw_statistics_pram->res0[i]; 598 rx_firmware_statistics->mismatchdrop = 599 in_be32(&p_rx_fw_statistics_pram->mismatchdrop); 600 rx_firmware_statistics->underpkts = 601 in_be32(&p_rx_fw_statistics_pram->underpkts); 602 rx_firmware_statistics->pkts256 = 603 in_be32(&p_rx_fw_statistics_pram->pkts256); 604 rx_firmware_statistics->pkts512 = 605 in_be32(&p_rx_fw_statistics_pram->pkts512); 606 rx_firmware_statistics->pkts1024 = 607 in_be32(&p_rx_fw_statistics_pram->pkts1024); 608 rx_firmware_statistics->pktsjumbo = 609 in_be32(&p_rx_fw_statistics_pram->pktsjumbo); 610 rx_firmware_statistics->frlossinmacer = 611 in_be32(&p_rx_fw_statistics_pram->frlossinmacer); 612 rx_firmware_statistics->pausefr = 613 in_be32(&p_rx_fw_statistics_pram->pausefr); 614 for (i = 0; i < 0x4; i++) 615 rx_firmware_statistics->res1[i] = 616 p_rx_fw_statistics_pram->res1[i]; 617 rx_firmware_statistics->removevlan = 618 in_be32(&p_rx_fw_statistics_pram->removevlan); 619 rx_firmware_statistics->replacevlan = 620 in_be32(&p_rx_fw_statistics_pram->replacevlan); 621 rx_firmware_statistics->insertvlan = 622 in_be32(&p_rx_fw_statistics_pram->insertvlan); 623 } 624 625 /* Hardware only if user handed pointer and driver actually 626 gathers hardware statistics */ 627 if (hardware_statistics && (in_be32(&uf_regs->upsmr) & UPSMR_HSE)) { 628 hardware_statistics->tx64 = in_be32(&ug_regs->tx64); 629 hardware_statistics->tx127 = in_be32(&ug_regs->tx127); 630 hardware_statistics->tx255 = in_be32(&ug_regs->tx255); 631 hardware_statistics->rx64 = in_be32(&ug_regs->rx64); 632 hardware_statistics->rx127 = in_be32(&ug_regs->rx127); 633 hardware_statistics->rx255 = in_be32(&ug_regs->rx255); 634 hardware_statistics->txok = in_be32(&ug_regs->txok); 635 hardware_statistics->txcf = in_be16(&ug_regs->txcf); 636 hardware_statistics->tmca = in_be32(&ug_regs->tmca); 637 hardware_statistics->tbca = in_be32(&ug_regs->tbca); 638 hardware_statistics->rxfok = in_be32(&ug_regs->rxfok); 639 hardware_statistics->rxbok = in_be32(&ug_regs->rxbok); 640 hardware_statistics->rbyt = in_be32(&ug_regs->rbyt); 641 hardware_statistics->rmca = in_be32(&ug_regs->rmca); 642 hardware_statistics->rbca = in_be32(&ug_regs->rbca); 643 } 644} 645 646static void dump_bds(struct ucc_geth_private *ugeth) 647{ 648 int i; 649 int length; 650 651 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { 652 if (ugeth->p_tx_bd_ring[i]) { 653 length = 654 (ugeth->ug_info->bdRingLenTx[i] * 655 sizeof(struct qe_bd)); 656 ugeth_info("TX BDs[%d]", i); 657 mem_disp(ugeth->p_tx_bd_ring[i], length); 658 } 659 } 660 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { 661 if (ugeth->p_rx_bd_ring[i]) { 662 length = 663 (ugeth->ug_info->bdRingLenRx[i] * 664 sizeof(struct qe_bd)); 665 ugeth_info("RX BDs[%d]", i); 666 mem_disp(ugeth->p_rx_bd_ring[i], length); 667 } 668 } 669} 670 671static void dump_regs(struct ucc_geth_private *ugeth) 672{ 673 int i; 674 675 ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num); 676 ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs); 677 678 ugeth_info("maccfg1 : addr - 0x%08x, val - 0x%08x", 679 (u32) & ugeth->ug_regs->maccfg1, 680 in_be32(&ugeth->ug_regs->maccfg1)); 681 ugeth_info("maccfg2 : addr - 0x%08x, val - 0x%08x", 682 (u32) & ugeth->ug_regs->maccfg2, 683 in_be32(&ugeth->ug_regs->maccfg2)); 684 ugeth_info("ipgifg : addr - 0x%08x, val - 0x%08x", 685 (u32) & ugeth->ug_regs->ipgifg, 686 in_be32(&ugeth->ug_regs->ipgifg)); 687 ugeth_info("hafdup : addr - 0x%08x, val - 0x%08x", 688 (u32) & ugeth->ug_regs->hafdup, 689 in_be32(&ugeth->ug_regs->hafdup)); 690 ugeth_info("ifctl : addr - 0x%08x, val - 0x%08x", 691 (u32) & ugeth->ug_regs->ifctl, 692 in_be32(&ugeth->ug_regs->ifctl)); 693 ugeth_info("ifstat : addr - 0x%08x, val - 0x%08x", 694 (u32) & ugeth->ug_regs->ifstat, 695 in_be32(&ugeth->ug_regs->ifstat)); 696 ugeth_info("macstnaddr1: addr - 0x%08x, val - 0x%08x", 697 (u32) & ugeth->ug_regs->macstnaddr1, 698 in_be32(&ugeth->ug_regs->macstnaddr1)); 699 ugeth_info("macstnaddr2: addr - 0x%08x, val - 0x%08x", 700 (u32) & ugeth->ug_regs->macstnaddr2, 701 in_be32(&ugeth->ug_regs->macstnaddr2)); 702 ugeth_info("uempr : addr - 0x%08x, val - 0x%08x", 703 (u32) & ugeth->ug_regs->uempr, 704 in_be32(&ugeth->ug_regs->uempr)); 705 ugeth_info("utbipar : addr - 0x%08x, val - 0x%08x", 706 (u32) & ugeth->ug_regs->utbipar, 707 in_be32(&ugeth->ug_regs->utbipar)); 708 ugeth_info("uescr : addr - 0x%08x, val - 0x%04x", 709 (u32) & ugeth->ug_regs->uescr, 710 in_be16(&ugeth->ug_regs->uescr)); 711 ugeth_info("tx64 : addr - 0x%08x, val - 0x%08x", 712 (u32) & ugeth->ug_regs->tx64, 713 in_be32(&ugeth->ug_regs->tx64)); 714 ugeth_info("tx127 : addr - 0x%08x, val - 0x%08x", 715 (u32) & ugeth->ug_regs->tx127, 716 in_be32(&ugeth->ug_regs->tx127)); 717 ugeth_info("tx255 : addr - 0x%08x, val - 0x%08x", 718 (u32) & ugeth->ug_regs->tx255, 719 in_be32(&ugeth->ug_regs->tx255)); 720 ugeth_info("rx64 : addr - 0x%08x, val - 0x%08x", 721 (u32) & ugeth->ug_regs->rx64, 722 in_be32(&ugeth->ug_regs->rx64)); 723 ugeth_info("rx127 : addr - 0x%08x, val - 0x%08x", 724 (u32) & ugeth->ug_regs->rx127, 725 in_be32(&ugeth->ug_regs->rx127)); 726 ugeth_info("rx255 : addr - 0x%08x, val - 0x%08x", 727 (u32) & ugeth->ug_regs->rx255, 728 in_be32(&ugeth->ug_regs->rx255)); 729 ugeth_info("txok : addr - 0x%08x, val - 0x%08x", 730 (u32) & ugeth->ug_regs->txok, 731 in_be32(&ugeth->ug_regs->txok)); 732 ugeth_info("txcf : addr - 0x%08x, val - 0x%04x", 733 (u32) & ugeth->ug_regs->txcf, 734 in_be16(&ugeth->ug_regs->txcf)); 735 ugeth_info("tmca : addr - 0x%08x, val - 0x%08x", 736 (u32) & ugeth->ug_regs->tmca, 737 in_be32(&ugeth->ug_regs->tmca)); 738 ugeth_info("tbca : addr - 0x%08x, val - 0x%08x", 739 (u32) & ugeth->ug_regs->tbca, 740 in_be32(&ugeth->ug_regs->tbca)); 741 ugeth_info("rxfok : addr - 0x%08x, val - 0x%08x", 742 (u32) & ugeth->ug_regs->rxfok, 743 in_be32(&ugeth->ug_regs->rxfok)); 744 ugeth_info("rxbok : addr - 0x%08x, val - 0x%08x", 745 (u32) & ugeth->ug_regs->rxbok, 746 in_be32(&ugeth->ug_regs->rxbok)); 747 ugeth_info("rbyt : addr - 0x%08x, val - 0x%08x", 748 (u32) & ugeth->ug_regs->rbyt, 749 in_be32(&ugeth->ug_regs->rbyt)); 750 ugeth_info("rmca : addr - 0x%08x, val - 0x%08x", 751 (u32) & ugeth->ug_regs->rmca, 752 in_be32(&ugeth->ug_regs->rmca)); 753 ugeth_info("rbca : addr - 0x%08x, val - 0x%08x", 754 (u32) & ugeth->ug_regs->rbca, 755 in_be32(&ugeth->ug_regs->rbca)); 756 ugeth_info("scar : addr - 0x%08x, val - 0x%08x", 757 (u32) & ugeth->ug_regs->scar, 758 in_be32(&ugeth->ug_regs->scar)); 759 ugeth_info("scam : addr - 0x%08x, val - 0x%08x", 760 (u32) & ugeth->ug_regs->scam, 761 in_be32(&ugeth->ug_regs->scam)); 762 763 if (ugeth->p_thread_data_tx) { 764 int numThreadsTxNumerical; 765 switch (ugeth->ug_info->numThreadsTx) { 766 case UCC_GETH_NUM_OF_THREADS_1: 767 numThreadsTxNumerical = 1; 768 break; 769 case UCC_GETH_NUM_OF_THREADS_2: 770 numThreadsTxNumerical = 2; 771 break; 772 case UCC_GETH_NUM_OF_THREADS_4: 773 numThreadsTxNumerical = 4; 774 break; 775 case UCC_GETH_NUM_OF_THREADS_6: 776 numThreadsTxNumerical = 6; 777 break; 778 case UCC_GETH_NUM_OF_THREADS_8: 779 numThreadsTxNumerical = 8; 780 break; 781 default: 782 numThreadsTxNumerical = 0; 783 break; 784 } 785 786 ugeth_info("Thread data TXs:"); 787 ugeth_info("Base address: 0x%08x", 788 (u32) ugeth->p_thread_data_tx); 789 for (i = 0; i < numThreadsTxNumerical; i++) { 790 ugeth_info("Thread data TX[%d]:", i); 791 ugeth_info("Base address: 0x%08x", 792 (u32) & ugeth->p_thread_data_tx[i]); 793 mem_disp((u8 *) & ugeth->p_thread_data_tx[i], 794 sizeof(struct ucc_geth_thread_data_tx)); 795 } 796 } 797 if (ugeth->p_thread_data_rx) { 798 int numThreadsRxNumerical; 799 switch (ugeth->ug_info->numThreadsRx) { 800 case UCC_GETH_NUM_OF_THREADS_1: 801 numThreadsRxNumerical = 1; 802 break; 803 case UCC_GETH_NUM_OF_THREADS_2: 804 numThreadsRxNumerical = 2; 805 break; 806 case UCC_GETH_NUM_OF_THREADS_4: 807 numThreadsRxNumerical = 4; 808 break; 809 case UCC_GETH_NUM_OF_THREADS_6: 810 numThreadsRxNumerical = 6; 811 break; 812 case UCC_GETH_NUM_OF_THREADS_8: 813 numThreadsRxNumerical = 8; 814 break; 815 default: 816 numThreadsRxNumerical = 0; 817 break; 818 } 819 820 ugeth_info("Thread data RX:"); 821 ugeth_info("Base address: 0x%08x", 822 (u32) ugeth->p_thread_data_rx); 823 for (i = 0; i < numThreadsRxNumerical; i++) { 824 ugeth_info("Thread data RX[%d]:", i); 825 ugeth_info("Base address: 0x%08x", 826 (u32) & ugeth->p_thread_data_rx[i]); 827 mem_disp((u8 *) & ugeth->p_thread_data_rx[i], 828 sizeof(struct ucc_geth_thread_data_rx)); 829 } 830 } 831 if (ugeth->p_exf_glbl_param) { 832 ugeth_info("EXF global param:"); 833 ugeth_info("Base address: 0x%08x", 834 (u32) ugeth->p_exf_glbl_param); 835 mem_disp((u8 *) ugeth->p_exf_glbl_param, 836 sizeof(*ugeth->p_exf_glbl_param)); 837 } 838 if (ugeth->p_tx_glbl_pram) { 839 ugeth_info("TX global param:"); 840 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_tx_glbl_pram); 841 ugeth_info("temoder : addr - 0x%08x, val - 0x%04x", 842 (u32) & ugeth->p_tx_glbl_pram->temoder, 843 in_be16(&ugeth->p_tx_glbl_pram->temoder)); 844 ugeth_info("sqptr : addr - 0x%08x, val - 0x%08x", 845 (u32) & ugeth->p_tx_glbl_pram->sqptr, 846 in_be32(&ugeth->p_tx_glbl_pram->sqptr)); 847 ugeth_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x", 848 (u32) & ugeth->p_tx_glbl_pram->schedulerbasepointer, 849 in_be32(&ugeth->p_tx_glbl_pram-> 850 schedulerbasepointer)); 851 ugeth_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x", 852 (u32) & ugeth->p_tx_glbl_pram->txrmonbaseptr, 853 in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr)); 854 ugeth_info("tstate : addr - 0x%08x, val - 0x%08x", 855 (u32) & ugeth->p_tx_glbl_pram->tstate, 856 in_be32(&ugeth->p_tx_glbl_pram->tstate)); 857 ugeth_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x", 858 (u32) & ugeth->p_tx_glbl_pram->iphoffset[0], 859 ugeth->p_tx_glbl_pram->iphoffset[0]); 860 ugeth_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x", 861 (u32) & ugeth->p_tx_glbl_pram->iphoffset[1], 862 ugeth->p_tx_glbl_pram->iphoffset[1]); 863 ugeth_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x", 864 (u32) & ugeth->p_tx_glbl_pram->iphoffset[2], 865 ugeth->p_tx_glbl_pram->iphoffset[2]); 866 ugeth_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x", 867 (u32) & ugeth->p_tx_glbl_pram->iphoffset[3], 868 ugeth->p_tx_glbl_pram->iphoffset[3]); 869 ugeth_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x", 870 (u32) & ugeth->p_tx_glbl_pram->iphoffset[4], 871 ugeth->p_tx_glbl_pram->iphoffset[4]); 872 ugeth_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x", 873 (u32) & ugeth->p_tx_glbl_pram->iphoffset[5], 874 ugeth->p_tx_glbl_pram->iphoffset[5]); 875 ugeth_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x", 876 (u32) & ugeth->p_tx_glbl_pram->iphoffset[6], 877 ugeth->p_tx_glbl_pram->iphoffset[6]); 878 ugeth_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x", 879 (u32) & ugeth->p_tx_glbl_pram->iphoffset[7], 880 ugeth->p_tx_glbl_pram->iphoffset[7]); 881 ugeth_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x", 882 (u32) & ugeth->p_tx_glbl_pram->vtagtable[0], 883 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0])); 884 ugeth_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x", 885 (u32) & ugeth->p_tx_glbl_pram->vtagtable[1], 886 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1])); 887 ugeth_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x", 888 (u32) & ugeth->p_tx_glbl_pram->vtagtable[2], 889 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2])); 890 ugeth_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x", 891 (u32) & ugeth->p_tx_glbl_pram->vtagtable[3], 892 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3])); 893 ugeth_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x", 894 (u32) & ugeth->p_tx_glbl_pram->vtagtable[4], 895 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4])); 896 ugeth_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x", 897 (u32) & ugeth->p_tx_glbl_pram->vtagtable[5], 898 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5])); 899 ugeth_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x", 900 (u32) & ugeth->p_tx_glbl_pram->vtagtable[6], 901 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6])); 902 ugeth_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x", 903 (u32) & ugeth->p_tx_glbl_pram->vtagtable[7], 904 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7])); 905 ugeth_info("tqptr : addr - 0x%08x, val - 0x%08x", 906 (u32) & ugeth->p_tx_glbl_pram->tqptr, 907 in_be32(&ugeth->p_tx_glbl_pram->tqptr)); 908 } 909 if (ugeth->p_rx_glbl_pram) { 910 ugeth_info("RX global param:"); 911 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_glbl_pram); 912 ugeth_info("remoder : addr - 0x%08x, val - 0x%08x", 913 (u32) & ugeth->p_rx_glbl_pram->remoder, 914 in_be32(&ugeth->p_rx_glbl_pram->remoder)); 915 ugeth_info("rqptr : addr - 0x%08x, val - 0x%08x", 916 (u32) & ugeth->p_rx_glbl_pram->rqptr, 917 in_be32(&ugeth->p_rx_glbl_pram->rqptr)); 918 ugeth_info("typeorlen : addr - 0x%08x, val - 0x%04x", 919 (u32) & ugeth->p_rx_glbl_pram->typeorlen, 920 in_be16(&ugeth->p_rx_glbl_pram->typeorlen)); 921 ugeth_info("rxgstpack : addr - 0x%08x, val - 0x%02x", 922 (u32) & ugeth->p_rx_glbl_pram->rxgstpack, 923 ugeth->p_rx_glbl_pram->rxgstpack); 924 ugeth_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x", 925 (u32) & ugeth->p_rx_glbl_pram->rxrmonbaseptr, 926 in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr)); 927 ugeth_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x", 928 (u32) & ugeth->p_rx_glbl_pram->intcoalescingptr, 929 in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr)); 930 ugeth_info("rstate : addr - 0x%08x, val - 0x%02x", 931 (u32) & ugeth->p_rx_glbl_pram->rstate, 932 ugeth->p_rx_glbl_pram->rstate); 933 ugeth_info("mrblr : addr - 0x%08x, val - 0x%04x", 934 (u32) & ugeth->p_rx_glbl_pram->mrblr, 935 in_be16(&ugeth->p_rx_glbl_pram->mrblr)); 936 ugeth_info("rbdqptr : addr - 0x%08x, val - 0x%08x", 937 (u32) & ugeth->p_rx_glbl_pram->rbdqptr, 938 in_be32(&ugeth->p_rx_glbl_pram->rbdqptr)); 939 ugeth_info("mflr : addr - 0x%08x, val - 0x%04x", 940 (u32) & ugeth->p_rx_glbl_pram->mflr, 941 in_be16(&ugeth->p_rx_glbl_pram->mflr)); 942 ugeth_info("minflr : addr - 0x%08x, val - 0x%04x", 943 (u32) & ugeth->p_rx_glbl_pram->minflr, 944 in_be16(&ugeth->p_rx_glbl_pram->minflr)); 945 ugeth_info("maxd1 : addr - 0x%08x, val - 0x%04x", 946 (u32) & ugeth->p_rx_glbl_pram->maxd1, 947 in_be16(&ugeth->p_rx_glbl_pram->maxd1)); 948 ugeth_info("maxd2 : addr - 0x%08x, val - 0x%04x", 949 (u32) & ugeth->p_rx_glbl_pram->maxd2, 950 in_be16(&ugeth->p_rx_glbl_pram->maxd2)); 951 ugeth_info("ecamptr : addr - 0x%08x, val - 0x%08x", 952 (u32) & ugeth->p_rx_glbl_pram->ecamptr, 953 in_be32(&ugeth->p_rx_glbl_pram->ecamptr)); 954 ugeth_info("l2qt : addr - 0x%08x, val - 0x%08x", 955 (u32) & ugeth->p_rx_glbl_pram->l2qt, 956 in_be32(&ugeth->p_rx_glbl_pram->l2qt)); 957 ugeth_info("l3qt[0] : addr - 0x%08x, val - 0x%08x", 958 (u32) & ugeth->p_rx_glbl_pram->l3qt[0], 959 in_be32(&ugeth->p_rx_glbl_pram->l3qt[0])); 960 ugeth_info("l3qt[1] : addr - 0x%08x, val - 0x%08x", 961 (u32) & ugeth->p_rx_glbl_pram->l3qt[1], 962 in_be32(&ugeth->p_rx_glbl_pram->l3qt[1])); 963 ugeth_info("l3qt[2] : addr - 0x%08x, val - 0x%08x", 964 (u32) & ugeth->p_rx_glbl_pram->l3qt[2], 965 in_be32(&ugeth->p_rx_glbl_pram->l3qt[2])); 966 ugeth_info("l3qt[3] : addr - 0x%08x, val - 0x%08x", 967 (u32) & ugeth->p_rx_glbl_pram->l3qt[3], 968 in_be32(&ugeth->p_rx_glbl_pram->l3qt[3])); 969 ugeth_info("l3qt[4] : addr - 0x%08x, val - 0x%08x", 970 (u32) & ugeth->p_rx_glbl_pram->l3qt[4], 971 in_be32(&ugeth->p_rx_glbl_pram->l3qt[4])); 972 ugeth_info("l3qt[5] : addr - 0x%08x, val - 0x%08x", 973 (u32) & ugeth->p_rx_glbl_pram->l3qt[5], 974 in_be32(&ugeth->p_rx_glbl_pram->l3qt[5])); 975 ugeth_info("l3qt[6] : addr - 0x%08x, val - 0x%08x", 976 (u32) & ugeth->p_rx_glbl_pram->l3qt[6], 977 in_be32(&ugeth->p_rx_glbl_pram->l3qt[6])); 978 ugeth_info("l3qt[7] : addr - 0x%08x, val - 0x%08x", 979 (u32) & ugeth->p_rx_glbl_pram->l3qt[7], 980 in_be32(&ugeth->p_rx_glbl_pram->l3qt[7])); 981 ugeth_info("vlantype : addr - 0x%08x, val - 0x%04x", 982 (u32) & ugeth->p_rx_glbl_pram->vlantype, 983 in_be16(&ugeth->p_rx_glbl_pram->vlantype)); 984 ugeth_info("vlantci : addr - 0x%08x, val - 0x%04x", 985 (u32) & ugeth->p_rx_glbl_pram->vlantci, 986 in_be16(&ugeth->p_rx_glbl_pram->vlantci)); 987 for (i = 0; i < 64; i++) 988 ugeth_info 989 ("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x", 990 i, 991 (u32) & ugeth->p_rx_glbl_pram->addressfiltering[i], 992 ugeth->p_rx_glbl_pram->addressfiltering[i]); 993 ugeth_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x", 994 (u32) & ugeth->p_rx_glbl_pram->exfGlobalParam, 995 in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam)); 996 } 997 if (ugeth->p_send_q_mem_reg) { 998 ugeth_info("Send Q memory registers:"); 999 ugeth_info("Base address: 0x%08x", 1000 (u32) ugeth->p_send_q_mem_reg); 1001 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { 1002 ugeth_info("SQQD[%d]:", i); 1003 ugeth_info("Base address: 0x%08x", 1004 (u32) & ugeth->p_send_q_mem_reg->sqqd[i]); 1005 mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i], 1006 sizeof(struct ucc_geth_send_queue_qd)); 1007 } 1008 } 1009 if (ugeth->p_scheduler) { 1010 ugeth_info("Scheduler:"); 1011 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_scheduler); 1012 mem_disp((u8 *) ugeth->p_scheduler, 1013 sizeof(*ugeth->p_scheduler)); 1014 } 1015 if (ugeth->p_tx_fw_statistics_pram) { 1016 ugeth_info("TX FW statistics pram:"); 1017 ugeth_info("Base address: 0x%08x", 1018 (u32) ugeth->p_tx_fw_statistics_pram); 1019 mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram, 1020 sizeof(*ugeth->p_tx_fw_statistics_pram)); 1021 } 1022 if (ugeth->p_rx_fw_statistics_pram) { 1023 ugeth_info("RX FW statistics pram:"); 1024 ugeth_info("Base address: 0x%08x", 1025 (u32) ugeth->p_rx_fw_statistics_pram); 1026 mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram, 1027 sizeof(*ugeth->p_rx_fw_statistics_pram)); 1028 } 1029 if (ugeth->p_rx_irq_coalescing_tbl) { 1030 ugeth_info("RX IRQ coalescing tables:"); 1031 ugeth_info("Base address: 0x%08x", 1032 (u32) ugeth->p_rx_irq_coalescing_tbl); 1033 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { 1034 ugeth_info("RX IRQ coalescing table entry[%d]:", i); 1035 ugeth_info("Base address: 0x%08x", 1036 (u32) & ugeth->p_rx_irq_coalescing_tbl-> 1037 coalescingentry[i]); 1038 ugeth_info 1039 ("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x", 1040 (u32) & ugeth->p_rx_irq_coalescing_tbl-> 1041 coalescingentry[i].interruptcoalescingmaxvalue, 1042 in_be32(&ugeth->p_rx_irq_coalescing_tbl-> 1043 coalescingentry[i]. 1044 interruptcoalescingmaxvalue)); 1045 ugeth_info 1046 ("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x", 1047 (u32) & ugeth->p_rx_irq_coalescing_tbl-> 1048 coalescingentry[i].interruptcoalescingcounter, 1049 in_be32(&ugeth->p_rx_irq_coalescing_tbl-> 1050 coalescingentry[i]. 1051 interruptcoalescingcounter)); 1052 } 1053 } 1054 if (ugeth->p_rx_bd_qs_tbl) { 1055 ugeth_info("RX BD QS tables:"); 1056 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_bd_qs_tbl); 1057 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { 1058 ugeth_info("RX BD QS table[%d]:", i); 1059 ugeth_info("Base address: 0x%08x", 1060 (u32) & ugeth->p_rx_bd_qs_tbl[i]); 1061 ugeth_info 1062 ("bdbaseptr : addr - 0x%08x, val - 0x%08x", 1063 (u32) & ugeth->p_rx_bd_qs_tbl[i].bdbaseptr, 1064 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr)); 1065 ugeth_info 1066 ("bdptr : addr - 0x%08x, val - 0x%08x", 1067 (u32) & ugeth->p_rx_bd_qs_tbl[i].bdptr, 1068 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr)); 1069 ugeth_info 1070 ("externalbdbaseptr: addr - 0x%08x, val - 0x%08x", 1071 (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, 1072 in_be32(&ugeth->p_rx_bd_qs_tbl[i]. 1073 externalbdbaseptr)); 1074 ugeth_info 1075 ("externalbdptr : addr - 0x%08x, val - 0x%08x", 1076 (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdptr, 1077 in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr)); 1078 ugeth_info("ucode RX Prefetched BDs:"); 1079 ugeth_info("Base address: 0x%08x", 1080 (u32) 1081 qe_muram_addr(in_be32 1082 (&ugeth->p_rx_bd_qs_tbl[i]. 1083 bdbaseptr))); 1084 mem_disp((u8 *) 1085 qe_muram_addr(in_be32 1086 (&ugeth->p_rx_bd_qs_tbl[i]. 1087 bdbaseptr)), 1088 sizeof(struct ucc_geth_rx_prefetched_bds)); 1089 } 1090 } 1091 if (ugeth->p_init_enet_param_shadow) { 1092 int size; 1093 ugeth_info("Init enet param shadow:"); 1094 ugeth_info("Base address: 0x%08x", 1095 (u32) ugeth->p_init_enet_param_shadow); 1096 mem_disp((u8 *) ugeth->p_init_enet_param_shadow, 1097 sizeof(*ugeth->p_init_enet_param_shadow)); 1098 1099 size = sizeof(struct ucc_geth_thread_rx_pram); 1100 if (ugeth->ug_info->rxExtendedFiltering) { 1101 size += 1102 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; 1103 if (ugeth->ug_info->largestexternallookupkeysize == 1104 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES) 1105 size += 1106 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; 1107 if (ugeth->ug_info->largestexternallookupkeysize == 1108 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES) 1109 size += 1110 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; 1111 } 1112 1113 dump_init_enet_entries(ugeth, 1114 &(ugeth->p_init_enet_param_shadow-> 1115 txthread[0]), 1116 ENET_INIT_PARAM_MAX_ENTRIES_TX, 1117 sizeof(struct ucc_geth_thread_tx_pram), 1118 ugeth->ug_info->riscTx, 0); 1119 dump_init_enet_entries(ugeth, 1120 &(ugeth->p_init_enet_param_shadow-> 1121 rxthread[0]), 1122 ENET_INIT_PARAM_MAX_ENTRIES_RX, size, 1123 ugeth->ug_info->riscRx, 1); 1124 } 1125} 1126#endif /* DEBUG */ 1127 1128static void init_default_reg_vals(volatile u32 *upsmr_register, 1129 volatile u32 *maccfg1_register, 1130 volatile u32 *maccfg2_register) 1131{ 1132 out_be32(upsmr_register, UCC_GETH_UPSMR_INIT); 1133 out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT); 1134 out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT); 1135} 1136 1137static int init_half_duplex_params(int alt_beb, 1138 int back_pressure_no_backoff, 1139 int no_backoff, 1140 int excess_defer, 1141 u8 alt_beb_truncation, 1142 u8 max_retransmissions, 1143 u8 collision_window, 1144 volatile u32 *hafdup_register) 1145{ 1146 u32 value = 0; 1147 1148 if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) || 1149 (max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) || 1150 (collision_window > HALFDUP_COLLISION_WINDOW_MAX)) 1151 return -EINVAL; 1152 1153 value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT); 1154 1155 if (alt_beb) 1156 value |= HALFDUP_ALT_BEB; 1157 if (back_pressure_no_backoff) 1158 value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF; 1159 if (no_backoff) 1160 value |= HALFDUP_NO_BACKOFF; 1161 if (excess_defer) 1162 value |= HALFDUP_EXCESSIVE_DEFER; 1163 1164 value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT); 1165 1166 value |= collision_window; 1167 1168 out_be32(hafdup_register, value); 1169 return 0; 1170} 1171 1172static int init_inter_frame_gap_params(u8 non_btb_cs_ipg, 1173 u8 non_btb_ipg, 1174 u8 min_ifg, 1175 u8 btb_ipg, 1176 volatile u32 *ipgifg_register) 1177{ 1178 u32 value = 0; 1179 1180 /* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back 1181 IPG part 2 */ 1182 if (non_btb_cs_ipg > non_btb_ipg) 1183 return -EINVAL; 1184 1185 if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) || 1186 (non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) || 1187 /*(min_ifg > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */ 1188 (btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX)) 1189 return -EINVAL; 1190 1191 value |= 1192 ((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) & 1193 IPGIFG_NBTB_CS_IPG_MASK); 1194 value |= 1195 ((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) & 1196 IPGIFG_NBTB_IPG_MASK); 1197 value |= 1198 ((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) & 1199 IPGIFG_MIN_IFG_MASK); 1200 value |= (btb_ipg & IPGIFG_BTB_IPG_MASK); 1201 1202 out_be32(ipgifg_register, value); 1203 return 0; 1204} 1205 1206static int init_flow_control_params(u32 automatic_flow_control_mode, 1207 int rx_flow_control_enable, 1208 int tx_flow_control_enable, 1209 u16 pause_period, 1210 u16 extension_field, 1211 volatile u32 *upsmr_register, 1212 volatile u32 *uempr_register, 1213 volatile u32 *maccfg1_register) 1214{ 1215 u32 value = 0; 1216 1217 /* Set UEMPR register */ 1218 value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT; 1219 value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT; 1220 out_be32(uempr_register, value); 1221 1222 /* Set UPSMR register */ 1223 value = in_be32(upsmr_register); 1224 value |= automatic_flow_control_mode; 1225 out_be32(upsmr_register, value); 1226 1227 value = in_be32(maccfg1_register); 1228 if (rx_flow_control_enable) 1229 value |= MACCFG1_FLOW_RX; 1230 if (tx_flow_control_enable) 1231 value |= MACCFG1_FLOW_TX; 1232 out_be32(maccfg1_register, value); 1233 1234 return 0; 1235} 1236 1237static int init_hw_statistics_gathering_mode(int enable_hardware_statistics, 1238 int auto_zero_hardware_statistics, 1239 volatile u32 *upsmr_register, 1240 volatile u16 *uescr_register) 1241{ 1242 u32 upsmr_value = 0; 1243 u16 uescr_value = 0; 1244 /* Enable hardware statistics gathering if requested */ 1245 if (enable_hardware_statistics) { 1246 upsmr_value = in_be32(upsmr_register); 1247 upsmr_value |= UPSMR_HSE; 1248 out_be32(upsmr_register, upsmr_value); 1249 } 1250 1251 /* Clear hardware statistics counters */ 1252 uescr_value = in_be16(uescr_register); 1253 uescr_value |= UESCR_CLRCNT; 1254 /* Automatically zero hardware statistics counters on read, 1255 if requested */ 1256 if (auto_zero_hardware_statistics) 1257 uescr_value |= UESCR_AUTOZ; 1258 out_be16(uescr_register, uescr_value); 1259 1260 return 0; 1261} 1262 1263static int init_firmware_statistics_gathering_mode(int 1264 enable_tx_firmware_statistics, 1265 int enable_rx_firmware_statistics, 1266 volatile u32 *tx_rmon_base_ptr, 1267 u32 tx_firmware_statistics_structure_address, 1268 volatile u32 *rx_rmon_base_ptr, 1269 u32 rx_firmware_statistics_structure_address, 1270 volatile u16 *temoder_register, 1271 volatile u32 *remoder_register) 1272{ 1273 /* Note: this function does not check if */ 1274 /* the parameters it receives are NULL */ 1275 u16 temoder_value; 1276 u32 remoder_value; 1277 1278 if (enable_tx_firmware_statistics) { 1279 out_be32(tx_rmon_base_ptr, 1280 tx_firmware_statistics_structure_address); 1281 temoder_value = in_be16(temoder_register); 1282 temoder_value |= TEMODER_TX_RMON_STATISTICS_ENABLE; 1283 out_be16(temoder_register, temoder_value); 1284 } 1285 1286 if (enable_rx_firmware_statistics) { 1287 out_be32(rx_rmon_base_ptr, 1288 rx_firmware_statistics_structure_address); 1289 remoder_value = in_be32(remoder_register); 1290 remoder_value |= REMODER_RX_RMON_STATISTICS_ENABLE; 1291 out_be32(remoder_register, remoder_value); 1292 } 1293 1294 return 0; 1295} 1296 1297static int init_mac_station_addr_regs(u8 address_byte_0, 1298 u8 address_byte_1, 1299 u8 address_byte_2, 1300 u8 address_byte_3, 1301 u8 address_byte_4, 1302 u8 address_byte_5, 1303 volatile u32 *macstnaddr1_register, 1304 volatile u32 *macstnaddr2_register) 1305{ 1306 u32 value = 0; 1307 1308 /* Example: for a station address of 0x12345678ABCD, */ 1309 /* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */ 1310 1311 /* MACSTNADDR1 Register: */ 1312 1313 /* 0 7 8 15 */ 1314 /* station address byte 5 station address byte 4 */ 1315 /* 16 23 24 31 */ 1316 /* station address byte 3 station address byte 2 */ 1317 value |= (u32) ((address_byte_2 << 0) & 0x000000FF); 1318 value |= (u32) ((address_byte_3 << 8) & 0x0000FF00); 1319 value |= (u32) ((address_byte_4 << 16) & 0x00FF0000); 1320 value |= (u32) ((address_byte_5 << 24) & 0xFF000000); 1321 1322 out_be32(macstnaddr1_register, value); 1323 1324 /* MACSTNADDR2 Register: */ 1325 1326 /* 0 7 8 15 */ 1327 /* station address byte 1 station address byte 0 */ 1328 /* 16 23 24 31 */ 1329 /* reserved reserved */ 1330 value = 0; 1331 value |= (u32) ((address_byte_0 << 16) & 0x00FF0000); 1332 value |= (u32) ((address_byte_1 << 24) & 0xFF000000); 1333 1334 out_be32(macstnaddr2_register, value); 1335 1336 return 0; 1337} 1338 1339static int init_check_frame_length_mode(int length_check, 1340 volatile u32 *maccfg2_register) 1341{ 1342 u32 value = 0; 1343 1344 value = in_be32(maccfg2_register); 1345 1346 if (length_check) 1347 value |= MACCFG2_LC; 1348 else 1349 value &= ~MACCFG2_LC; 1350 1351 out_be32(maccfg2_register, value); 1352 return 0; 1353} 1354 1355static int init_preamble_length(u8 preamble_length, 1356 volatile u32 *maccfg2_register) 1357{ 1358 u32 value = 0; 1359 1360 if ((preamble_length < 3) || (preamble_length > 7)) 1361 return -EINVAL; 1362 1363 value = in_be32(maccfg2_register); 1364 value &= ~MACCFG2_PREL_MASK; 1365 value |= (preamble_length << MACCFG2_PREL_SHIFT); 1366 out_be32(maccfg2_register, value); 1367 return 0; 1368} 1369 1370static int init_rx_parameters(int reject_broadcast, 1371 int receive_short_frames, 1372 int promiscuous, volatile u32 *upsmr_register) 1373{ 1374 u32 value = 0; 1375 1376 value = in_be32(upsmr_register); 1377 1378 if (reject_broadcast) 1379 value |= UPSMR_BRO; 1380 else 1381 value &= ~UPSMR_BRO; 1382 1383 if (receive_short_frames) 1384 value |= UPSMR_RSH; 1385 else 1386 value &= ~UPSMR_RSH; 1387 1388 if (promiscuous) 1389 value |= UPSMR_PRO; 1390 else 1391 value &= ~UPSMR_PRO; 1392 1393 out_be32(upsmr_register, value); 1394 1395 return 0; 1396} 1397 1398static int init_max_rx_buff_len(u16 max_rx_buf_len, 1399 volatile u16 *mrblr_register) 1400{ 1401 /* max_rx_buf_len value must be a multiple of 128 */ 1402 if ((max_rx_buf_len == 0) 1403 || (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT)) 1404 return -EINVAL; 1405 1406 out_be16(mrblr_register, max_rx_buf_len); 1407 return 0; 1408} 1409 1410static int init_min_frame_len(u16 min_frame_length, 1411 volatile u16 *minflr_register, 1412 volatile u16 *mrblr_register) 1413{ 1414 u16 mrblr_value = 0; 1415 1416 mrblr_value = in_be16(mrblr_register); 1417 if (min_frame_length >= (mrblr_value - 4)) 1418 return -EINVAL; 1419 1420 out_be16(minflr_register, min_frame_length); 1421 return 0; 1422} 1423 1424static int adjust_enet_interface(struct ucc_geth_private *ugeth) 1425{ 1426 struct ucc_geth_info *ug_info; 1427 struct ucc_geth *ug_regs; 1428 struct ucc_fast *uf_regs; 1429 int ret_val; 1430 u32 upsmr, maccfg2, tbiBaseAddress; 1431 u16 value; 1432 1433 ugeth_vdbg("%s: IN", __FUNCTION__); 1434 1435 ug_info = ugeth->ug_info; 1436 ug_regs = ugeth->ug_regs; 1437 uf_regs = ugeth->uccf->uf_regs; 1438 1439 /* Set MACCFG2 */ 1440 maccfg2 = in_be32(&ug_regs->maccfg2); 1441 maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK; 1442 if ((ugeth->max_speed == SPEED_10) || 1443 (ugeth->max_speed == SPEED_100)) 1444 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; 1445 else if (ugeth->max_speed == SPEED_1000) 1446 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; 1447 maccfg2 |= ug_info->padAndCrc; 1448 out_be32(&ug_regs->maccfg2, maccfg2); 1449 1450 /* Set UPSMR */ 1451 upsmr = in_be32(&uf_regs->upsmr); 1452 upsmr &= ~(UPSMR_RPM | UPSMR_R10M | UPSMR_TBIM | UPSMR_RMM); 1453 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) || 1454 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) || 1455 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) || 1456 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { 1457 upsmr |= UPSMR_RPM; 1458 switch (ugeth->max_speed) { 1459 case SPEED_10: 1460 upsmr |= UPSMR_R10M; 1461 /* FALLTHROUGH */ 1462 case SPEED_100: 1463 if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI) 1464 upsmr |= UPSMR_RMM; 1465 } 1466 } 1467 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) || 1468 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { 1469 upsmr |= UPSMR_TBIM; 1470 } 1471 out_be32(&uf_regs->upsmr, upsmr); 1472 1473 /* Disable autonegotiation in tbi mode, because by default it 1474 comes up in autonegotiation mode. */ 1475 /* Note that this depends on proper setting in utbipar register. */ 1476 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) || 1477 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { 1478 tbiBaseAddress = in_be32(&ug_regs->utbipar); 1479 tbiBaseAddress &= UTBIPAR_PHY_ADDRESS_MASK; 1480 tbiBaseAddress >>= UTBIPAR_PHY_ADDRESS_SHIFT; 1481 value = ugeth->phydev->bus->read(ugeth->phydev->bus, 1482 (u8) tbiBaseAddress, ENET_TBI_MII_CR); 1483 value &= ~0x1000; /* Turn off autonegotiation */ 1484 ugeth->phydev->bus->write(ugeth->phydev->bus, 1485 (u8) tbiBaseAddress, ENET_TBI_MII_CR, value); 1486 } 1487 1488 init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2); 1489 1490 ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2); 1491 if (ret_val != 0) { 1492 ugeth_err 1493 ("%s: Preamble length must be between 3 and 7 inclusive.", 1494 __FUNCTION__); 1495 return ret_val; 1496 } 1497 1498 return 0; 1499} 1500 1501/* Called every time the controller might need to be made 1502 * aware of new link state. The PHY code conveys this 1503 * information through variables in the ugeth structure, and this 1504 * function converts those variables into the appropriate 1505 * register values, and can bring down the device if needed. 1506 */ 1507 1508static void adjust_link(struct net_device *dev) 1509{ 1510 struct ucc_geth_private *ugeth = netdev_priv(dev); 1511 struct ucc_geth *ug_regs; 1512 struct ucc_fast *uf_regs; 1513 struct phy_device *phydev = ugeth->phydev; 1514 unsigned long flags; 1515 int new_state = 0; 1516 1517 ug_regs = ugeth->ug_regs; 1518 uf_regs = ugeth->uccf->uf_regs; 1519 1520 spin_lock_irqsave(&ugeth->lock, flags); 1521 1522 if (phydev->link) { 1523 u32 tempval = in_be32(&ug_regs->maccfg2); 1524 u32 upsmr = in_be32(&uf_regs->upsmr); 1525 /* Now we make sure that we can be in full duplex mode. 1526 * If not, we operate in half-duplex mode. */ 1527 if (phydev->duplex != ugeth->oldduplex) { 1528 new_state = 1; 1529 if (!(phydev->duplex)) 1530 tempval &= ~(MACCFG2_FDX); 1531 else 1532 tempval |= MACCFG2_FDX; 1533 ugeth->oldduplex = phydev->duplex; 1534 } 1535 1536 if (phydev->speed != ugeth->oldspeed) { 1537 new_state = 1; 1538 switch (phydev->speed) { 1539 case SPEED_1000: 1540 tempval = ((tempval & 1541 ~(MACCFG2_INTERFACE_MODE_MASK)) | 1542 MACCFG2_INTERFACE_MODE_BYTE); 1543 break; 1544 case SPEED_100: 1545 case SPEED_10: 1546 tempval = ((tempval & 1547 ~(MACCFG2_INTERFACE_MODE_MASK)) | 1548 MACCFG2_INTERFACE_MODE_NIBBLE); 1549 /* if reduced mode, re-set UPSMR.R10M */ 1550 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) || 1551 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) || 1552 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) || 1553 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { 1554 if (phydev->speed == SPEED_10) 1555 upsmr |= UPSMR_R10M; 1556 else 1557 upsmr &= ~(UPSMR_R10M); 1558 } 1559 break; 1560 default: 1561 if (netif_msg_link(ugeth)) 1562 ugeth_warn( 1563 "%s: Ack! Speed (%d) is not 10/100/1000!", 1564 dev->name, phydev->speed); 1565 break; 1566 } 1567 ugeth->oldspeed = phydev->speed; 1568 } 1569 1570 out_be32(&ug_regs->maccfg2, tempval); 1571 out_be32(&uf_regs->upsmr, upsmr); 1572 1573 if (!ugeth->oldlink) { 1574 new_state = 1; 1575 ugeth->oldlink = 1; 1576 netif_schedule(dev); 1577 } 1578 } else if (ugeth->oldlink) { 1579 new_state = 1; 1580 ugeth->oldlink = 0; 1581 ugeth->oldspeed = 0; 1582 ugeth->oldduplex = -1; 1583 } 1584 1585 if (new_state && netif_msg_link(ugeth)) 1586 phy_print_status(phydev); 1587 1588 spin_unlock_irqrestore(&ugeth->lock, flags); 1589} 1590 1591/* Configure the PHY for dev. 1592 * returns 0 if success. -1 if failure 1593 */ 1594static int init_phy(struct net_device *dev) 1595{ 1596 struct ucc_geth_private *priv = netdev_priv(dev); 1597 struct phy_device *phydev; 1598 char phy_id[BUS_ID_SIZE]; 1599 1600 priv->oldlink = 0; 1601 priv->oldspeed = 0; 1602 priv->oldduplex = -1; 1603 1604 snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, priv->ug_info->mdio_bus, 1605 priv->ug_info->phy_address); 1606 1607 phydev = phy_connect(dev, phy_id, &adjust_link, 0, priv->phy_interface); 1608 1609 if (IS_ERR(phydev)) { 1610 printk("%s: Could not attach to PHY\n", dev->name); 1611 return PTR_ERR(phydev); 1612 } 1613 1614 phydev->supported &= (ADVERTISED_10baseT_Half | 1615 ADVERTISED_10baseT_Full | 1616 ADVERTISED_100baseT_Half | 1617 ADVERTISED_100baseT_Full); 1618 1619 if (priv->max_speed == SPEED_1000) 1620 phydev->supported |= ADVERTISED_1000baseT_Full; 1621 1622 phydev->advertising = phydev->supported; 1623 1624 priv->phydev = phydev; 1625 1626 return 0; 1627} 1628 1629 1630 1631static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth) 1632{ 1633 struct ucc_fast_private *uccf; 1634 u32 cecr_subblock; 1635 u32 temp; 1636 1637 uccf = ugeth->uccf; 1638 1639 /* Mask GRACEFUL STOP TX interrupt bit and clear it */ 1640 temp = in_be32(uccf->p_uccm); 1641 temp &= ~UCCE_GRA; 1642 out_be32(uccf->p_uccm, temp); 1643 out_be32(uccf->p_ucce, UCCE_GRA); /* clear by writing 1 */ 1644 1645 /* Issue host command */ 1646 cecr_subblock = 1647 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); 1648 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock, 1649 QE_CR_PROTOCOL_ETHERNET, 0); 1650 1651 /* Wait for command to complete */ 1652 do { 1653 temp = in_be32(uccf->p_ucce); 1654 } while (!(temp & UCCE_GRA)); 1655 1656 uccf->stopped_tx = 1; 1657 1658 return 0; 1659} 1660 1661static int ugeth_graceful_stop_rx(struct ucc_geth_private * ugeth) 1662{ 1663 struct ucc_fast_private *uccf; 1664 u32 cecr_subblock; 1665 u8 temp; 1666 1667 uccf = ugeth->uccf; 1668 1669 /* Clear acknowledge bit */ 1670 temp = ugeth->p_rx_glbl_pram->rxgstpack; 1671 temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX; 1672 ugeth->p_rx_glbl_pram->rxgstpack = temp; 1673 1674 /* Keep issuing command and checking acknowledge bit until 1675 it is asserted, according to spec */ 1676 do { 1677 /* Issue host command */ 1678 cecr_subblock = 1679 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info. 1680 ucc_num); 1681 qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock, 1682 QE_CR_PROTOCOL_ETHERNET, 0); 1683 1684 temp = ugeth->p_rx_glbl_pram->rxgstpack; 1685 } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX)); 1686 1687 uccf->stopped_rx = 1; 1688 1689 return 0; 1690} 1691 1692static int ugeth_restart_tx(struct ucc_geth_private *ugeth) 1693{ 1694 struct ucc_fast_private *uccf; 1695 u32 cecr_subblock; 1696 1697 uccf = ugeth->uccf; 1698 1699 cecr_subblock = 1700 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); 1701 qe_issue_cmd(QE_RESTART_TX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 0); 1702 uccf->stopped_tx = 0; 1703 1704 return 0; 1705} 1706 1707static int ugeth_restart_rx(struct ucc_geth_private *ugeth) 1708{ 1709 struct ucc_fast_private *uccf; 1710 u32 cecr_subblock; 1711 1712 uccf = ugeth->uccf; 1713 1714 cecr_subblock = 1715 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); 1716 qe_issue_cmd(QE_RESTART_RX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 1717 0); 1718 uccf->stopped_rx = 0; 1719 1720 return 0; 1721} 1722 1723static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode) 1724{ 1725 struct ucc_fast_private *uccf; 1726 int enabled_tx, enabled_rx; 1727 1728 uccf = ugeth->uccf; 1729 1730 /* check if the UCC number is in range. */ 1731 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { 1732 ugeth_err("%s: ucc_num out of range.", __FUNCTION__); 1733 return -EINVAL; 1734 } 1735 1736 enabled_tx = uccf->enabled_tx; 1737 enabled_rx = uccf->enabled_rx; 1738 1739 /* Get Tx and Rx going again, in case this channel was actively 1740 disabled. */ 1741 if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx) 1742 ugeth_restart_tx(ugeth); 1743 if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx) 1744 ugeth_restart_rx(ugeth); 1745 1746 ucc_fast_enable(uccf, mode); /* OK to do even if not disabled */ 1747 1748 return 0; 1749 1750} 1751 1752static int ugeth_disable(struct ucc_geth_private * ugeth, enum comm_dir mode) 1753{ 1754 struct ucc_fast_private *uccf; 1755 1756 uccf = ugeth->uccf; 1757 1758 /* check if the UCC number is in range. */ 1759 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { 1760 ugeth_err("%s: ucc_num out of range.", __FUNCTION__); 1761 return -EINVAL; 1762 } 1763 1764 /* Stop any transmissions */ 1765 if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx) 1766 ugeth_graceful_stop_tx(ugeth); 1767 1768 /* Stop any receptions */ 1769 if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx) 1770 ugeth_graceful_stop_rx(ugeth); 1771 1772 ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */ 1773 1774 return 0; 1775} 1776 1777static void ugeth_dump_regs(struct ucc_geth_private *ugeth) 1778{ 1779#ifdef DEBUG 1780 ucc_fast_dump_regs(ugeth->uccf); 1781 dump_regs(ugeth); 1782 dump_bds(ugeth); 1783#endif 1784} 1785 1786#ifdef CONFIG_UGETH_FILTERING 1787static int ugeth_ext_filtering_serialize_tad(struct ucc_geth_tad_params * 1788 p_UccGethTadParams, 1789 struct qe_fltr_tad *qe_fltr_tad) 1790{ 1791 u16 temp; 1792 1793 /* Zero serialized TAD */ 1794 memset(qe_fltr_tad, 0, QE_FLTR_TAD_SIZE); 1795 1796 qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_V; /* Must have this */ 1797 if (p_UccGethTadParams->rx_non_dynamic_extended_features_mode || 1798 (p_UccGethTadParams->vtag_op != UCC_GETH_VLAN_OPERATION_TAGGED_NOP) 1799 || (p_UccGethTadParams->vnontag_op != 1800 UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP) 1801 ) 1802 qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_EF; 1803 if (p_UccGethTadParams->reject_frame) 1804 qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_REJ; 1805 temp = 1806 (u16) (((u16) p_UccGethTadParams-> 1807 vtag_op) << UCC_GETH_TAD_VTAG_OP_SHIFT); 1808 qe_fltr_tad->serialized[0] |= (u8) (temp >> 8); /* upper bits */ 1809 1810 qe_fltr_tad->serialized[1] |= (u8) (temp & 0x00ff); /* lower bits */ 1811 if (p_UccGethTadParams->vnontag_op == 1812 UCC_GETH_VLAN_OPERATION_NON_TAGGED_Q_TAG_INSERT) 1813 qe_fltr_tad->serialized[1] |= UCC_GETH_TAD_V_NON_VTAG_OP; 1814 qe_fltr_tad->serialized[1] |= 1815 p_UccGethTadParams->rqos << UCC_GETH_TAD_RQOS_SHIFT; 1816 1817 qe_fltr_tad->serialized[2] |= 1818 p_UccGethTadParams->vpri << UCC_GETH_TAD_V_PRIORITY_SHIFT; 1819 /* upper bits */ 1820 qe_fltr_tad->serialized[2] |= (u8) (p_UccGethTadParams->vid >> 8); 1821 /* lower bits */ 1822 qe_fltr_tad->serialized[3] |= (u8) (p_UccGethTadParams->vid & 0x00ff); 1823 1824 return 0; 1825} 1826 1827static struct enet_addr_container_t 1828 *ugeth_82xx_filtering_get_match_addr_in_hash(struct ucc_geth_private *ugeth, 1829 struct enet_addr *p_enet_addr) 1830{ 1831 struct enet_addr_container *enet_addr_cont; 1832 struct list_head *p_lh; 1833 u16 i, num; 1834 int32_t j; 1835 u8 *p_counter; 1836 1837 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) { 1838 p_lh = &ugeth->group_hash_q; 1839 p_counter = &(ugeth->numGroupAddrInHash); 1840 } else { 1841 p_lh = &ugeth->ind_hash_q; 1842 p_counter = &(ugeth->numIndAddrInHash); 1843 } 1844 1845 if (!p_lh) 1846 return NULL; 1847 1848 num = *p_counter; 1849 1850 for (i = 0; i < num; i++) { 1851 enet_addr_cont = 1852 (struct enet_addr_container *) 1853 ENET_ADDR_CONT_ENTRY(dequeue(p_lh)); 1854 for (j = ENET_NUM_OCTETS_PER_ADDRESS - 1; j >= 0; j--) { 1855 if ((*p_enet_addr)[j] != (enet_addr_cont->address)[j]) 1856 break; 1857 if (j == 0) 1858 return enet_addr_cont; /* Found */ 1859 } 1860 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */ 1861 } 1862 return NULL; 1863} 1864 1865static int ugeth_82xx_filtering_add_addr_in_hash(struct ucc_geth_private *ugeth, 1866 struct enet_addr *p_enet_addr) 1867{ 1868 enum ucc_geth_enet_address_recognition_location location; 1869 struct enet_addr_container *enet_addr_cont; 1870 struct list_head *p_lh; 1871 u8 i; 1872 u32 limit; 1873 u8 *p_counter; 1874 1875 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) { 1876 p_lh = &ugeth->group_hash_q; 1877 limit = ugeth->ug_info->maxGroupAddrInHash; 1878 location = 1879 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_GROUP_HASH; 1880 p_counter = &(ugeth->numGroupAddrInHash); 1881 } else { 1882 p_lh = &ugeth->ind_hash_q; 1883 limit = ugeth->ug_info->maxIndAddrInHash; 1884 location = 1885 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_INDIVIDUAL_HASH; 1886 p_counter = &(ugeth->numIndAddrInHash); 1887 } 1888 1889 if ((enet_addr_cont = 1890 ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr))) { 1891 list_add(p_lh, &enet_addr_cont->node); /* Put it back */ 1892 return 0; 1893 } 1894 if ((!p_lh) || (!(*p_counter < limit))) 1895 return -EBUSY; 1896 if (!(enet_addr_cont = get_enet_addr_container())) 1897 return -ENOMEM; 1898 for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++) 1899 (enet_addr_cont->address)[i] = (*p_enet_addr)[i]; 1900 enet_addr_cont->location = location; 1901 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */ 1902 ++(*p_counter); 1903 1904 hw_add_addr_in_hash(ugeth, enet_addr_cont->address); 1905 return 0; 1906} 1907 1908static int ugeth_82xx_filtering_clear_addr_in_hash(struct ucc_geth_private *ugeth, 1909 struct enet_addr *p_enet_addr) 1910{ 1911 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; 1912 struct enet_addr_container *enet_addr_cont; 1913 struct ucc_fast_private *uccf; 1914 enum comm_dir comm_dir; 1915 u16 i, num; 1916 struct list_head *p_lh; 1917 u32 *addr_h, *addr_l; 1918 u8 *p_counter; 1919 1920 uccf = ugeth->uccf; 1921 1922 p_82xx_addr_filt = 1923 (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram-> 1924 addressfiltering; 1925 1926 if (! 1927 (enet_addr_cont = 1928 ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr))) 1929 return -ENOENT; 1930 1931 /* It's been found and removed from the CQ. */ 1932 /* Now destroy its container */ 1933 put_enet_addr_container(enet_addr_cont); 1934 1935 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) { 1936 addr_h = &(p_82xx_addr_filt->gaddr_h); 1937 addr_l = &(p_82xx_addr_filt->gaddr_l); 1938 p_lh = &ugeth->group_hash_q; 1939 p_counter = &(ugeth->numGroupAddrInHash); 1940 } else { 1941 addr_h = &(p_82xx_addr_filt->iaddr_h); 1942 addr_l = &(p_82xx_addr_filt->iaddr_l); 1943 p_lh = &ugeth->ind_hash_q; 1944 p_counter = &(ugeth->numIndAddrInHash); 1945 } 1946 1947 comm_dir = 0; 1948 if (uccf->enabled_tx) 1949 comm_dir |= COMM_DIR_TX; 1950 if (uccf->enabled_rx) 1951 comm_dir |= COMM_DIR_RX; 1952 if (comm_dir) 1953 ugeth_disable(ugeth, comm_dir); 1954 1955 /* Clear the hash table. */ 1956 out_be32(addr_h, 0x00000000); 1957 out_be32(addr_l, 0x00000000); 1958 1959 /* Add all remaining CQ elements back into hash */ 1960 num = --(*p_counter); 1961 for (i = 0; i < num; i++) { 1962 enet_addr_cont = 1963 (struct enet_addr_container *) 1964 ENET_ADDR_CONT_ENTRY(dequeue(p_lh)); 1965 hw_add_addr_in_hash(ugeth, enet_addr_cont->address); 1966 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */ 1967 } 1968 1969 if (comm_dir) 1970 ugeth_enable(ugeth, comm_dir); 1971 1972 return 0; 1973} 1974#endif /* CONFIG_UGETH_FILTERING */ 1975 1976static int ugeth_82xx_filtering_clear_all_addr_in_hash(struct ucc_geth_private * 1977 ugeth, 1978 enum enet_addr_type 1979 enet_addr_type) 1980{ 1981 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; 1982 struct ucc_fast_private *uccf; 1983 enum comm_dir comm_dir; 1984 struct list_head *p_lh; 1985 u16 i, num; 1986 u32 *addr_h, *addr_l; 1987 u8 *p_counter; 1988 1989 uccf = ugeth->uccf; 1990 1991 p_82xx_addr_filt = 1992 (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram-> 1993 addressfiltering; 1994 1995 if (enet_addr_type == ENET_ADDR_TYPE_GROUP) { 1996 addr_h = &(p_82xx_addr_filt->gaddr_h); 1997 addr_l = &(p_82xx_addr_filt->gaddr_l); 1998 p_lh = &ugeth->group_hash_q; 1999 p_counter = &(ugeth->numGroupAddrInHash); 2000 } else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) { 2001 addr_h = &(p_82xx_addr_filt->iaddr_h); 2002 addr_l = &(p_82xx_addr_filt->iaddr_l); 2003 p_lh = &ugeth->ind_hash_q; 2004 p_counter = &(ugeth->numIndAddrInHash); 2005 } else 2006 return -EINVAL; 2007 2008 comm_dir = 0; 2009 if (uccf->enabled_tx) 2010 comm_dir |= COMM_DIR_TX; 2011 if (uccf->enabled_rx) 2012 comm_dir |= COMM_DIR_RX; 2013 if (comm_dir) 2014 ugeth_disable(ugeth, comm_dir); 2015 2016 /* Clear the hash table. */ 2017 out_be32(addr_h, 0x00000000); 2018 out_be32(addr_l, 0x00000000); 2019 2020 if (!p_lh) 2021 return 0; 2022 2023 num = *p_counter; 2024 2025 /* Delete all remaining CQ elements */ 2026 for (i = 0; i < num; i++) 2027 put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh))); 2028 2029 *p_counter = 0; 2030 2031 if (comm_dir) 2032 ugeth_enable(ugeth, comm_dir); 2033 2034 return 0; 2035} 2036 2037#ifdef CONFIG_UGETH_FILTERING 2038static int ugeth_82xx_filtering_add_addr_in_paddr(struct ucc_geth_private *ugeth, 2039 struct enet_addr *p_enet_addr, 2040 u8 paddr_num) 2041{ 2042 int i; 2043 2044 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) 2045 ugeth_warn 2046 ("%s: multicast address added to paddr will have no " 2047 "effect - is this what you wanted?", 2048 __FUNCTION__); 2049 2050 ugeth->indAddrRegUsed[paddr_num] = 1; /* mark this paddr as used */ 2051 /* store address in our database */ 2052 for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++) 2053 ugeth->paddr[paddr_num][i] = (*p_enet_addr)[i]; 2054 /* put in hardware */ 2055 return hw_add_addr_in_paddr(ugeth, p_enet_addr, paddr_num); 2056} 2057#endif /* CONFIG_UGETH_FILTERING */ 2058 2059static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *ugeth, 2060 u8 paddr_num) 2061{ 2062 ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */ 2063 return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */ 2064} 2065 2066static void ucc_geth_memclean(struct ucc_geth_private *ugeth) 2067{ 2068 u16 i, j; 2069 u8 *bd; 2070 2071 if (!ugeth) 2072 return; 2073 2074 if (ugeth->uccf) 2075 ucc_fast_free(ugeth->uccf); 2076 2077 if (ugeth->p_thread_data_tx) { 2078 qe_muram_free(ugeth->thread_dat_tx_offset); 2079 ugeth->p_thread_data_tx = NULL; 2080 } 2081 if (ugeth->p_thread_data_rx) { 2082 qe_muram_free(ugeth->thread_dat_rx_offset); 2083 ugeth->p_thread_data_rx = NULL; 2084 } 2085 if (ugeth->p_exf_glbl_param) { 2086 qe_muram_free(ugeth->exf_glbl_param_offset); 2087 ugeth->p_exf_glbl_param = NULL; 2088 } 2089 if (ugeth->p_rx_glbl_pram) { 2090 qe_muram_free(ugeth->rx_glbl_pram_offset); 2091 ugeth->p_rx_glbl_pram = NULL; 2092 } 2093 if (ugeth->p_tx_glbl_pram) { 2094 qe_muram_free(ugeth->tx_glbl_pram_offset); 2095 ugeth->p_tx_glbl_pram = NULL; 2096 } 2097 if (ugeth->p_send_q_mem_reg) { 2098 qe_muram_free(ugeth->send_q_mem_reg_offset); 2099 ugeth->p_send_q_mem_reg = NULL; 2100 } 2101 if (ugeth->p_scheduler) { 2102 qe_muram_free(ugeth->scheduler_offset); 2103 ugeth->p_scheduler = NULL; 2104 } 2105 if (ugeth->p_tx_fw_statistics_pram) { 2106 qe_muram_free(ugeth->tx_fw_statistics_pram_offset); 2107 ugeth->p_tx_fw_statistics_pram = NULL; 2108 } 2109 if (ugeth->p_rx_fw_statistics_pram) { 2110 qe_muram_free(ugeth->rx_fw_statistics_pram_offset); 2111 ugeth->p_rx_fw_statistics_pram = NULL; 2112 } 2113 if (ugeth->p_rx_irq_coalescing_tbl) { 2114 qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset); 2115 ugeth->p_rx_irq_coalescing_tbl = NULL; 2116 } 2117 if (ugeth->p_rx_bd_qs_tbl) { 2118 qe_muram_free(ugeth->rx_bd_qs_tbl_offset); 2119 ugeth->p_rx_bd_qs_tbl = NULL; 2120 } 2121 if (ugeth->p_init_enet_param_shadow) { 2122 return_init_enet_entries(ugeth, 2123 &(ugeth->p_init_enet_param_shadow-> 2124 rxthread[0]), 2125 ENET_INIT_PARAM_MAX_ENTRIES_RX, 2126 ugeth->ug_info->riscRx, 1); 2127 return_init_enet_entries(ugeth, 2128 &(ugeth->p_init_enet_param_shadow-> 2129 txthread[0]), 2130 ENET_INIT_PARAM_MAX_ENTRIES_TX, 2131 ugeth->ug_info->riscTx, 0); 2132 kfree(ugeth->p_init_enet_param_shadow); 2133 ugeth->p_init_enet_param_shadow = NULL; 2134 } 2135 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { 2136 bd = ugeth->p_tx_bd_ring[i]; 2137 if (!bd) 2138 continue; 2139 for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { 2140 if (ugeth->tx_skbuff[i][j]) { 2141 dma_unmap_single(NULL, 2142 ((qe_bd_t *)bd)->buf, 2143 (in_be32((u32 *)bd) & 2144 BD_LENGTH_MASK), 2145 DMA_TO_DEVICE); 2146 dev_kfree_skb_any(ugeth->tx_skbuff[i][j]); 2147 ugeth->tx_skbuff[i][j] = NULL; 2148 } 2149 } 2150 2151 kfree(ugeth->tx_skbuff[i]); 2152 2153 if (ugeth->p_tx_bd_ring[i]) { 2154 if (ugeth->ug_info->uf_info.bd_mem_part == 2155 MEM_PART_SYSTEM) 2156 kfree((void *)ugeth->tx_bd_ring_offset[i]); 2157 else if (ugeth->ug_info->uf_info.bd_mem_part == 2158 MEM_PART_MURAM) 2159 qe_muram_free(ugeth->tx_bd_ring_offset[i]); 2160 ugeth->p_tx_bd_ring[i] = NULL; 2161 } 2162 } 2163 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { 2164 if (ugeth->p_rx_bd_ring[i]) { 2165 /* Return existing data buffers in ring */ 2166 bd = ugeth->p_rx_bd_ring[i]; 2167 for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { 2168 if (ugeth->rx_skbuff[i][j]) { 2169 dma_unmap_single(NULL, 2170 ((struct qe_bd *)bd)->buf, 2171 ugeth->ug_info-> 2172 uf_info.max_rx_buf_length + 2173 UCC_GETH_RX_DATA_BUF_ALIGNMENT, 2174 DMA_FROM_DEVICE); 2175 dev_kfree_skb_any( 2176 ugeth->rx_skbuff[i][j]); 2177 ugeth->rx_skbuff[i][j] = NULL; 2178 } 2179 bd += sizeof(struct qe_bd); 2180 } 2181 2182 kfree(ugeth->rx_skbuff[i]); 2183 2184 if (ugeth->ug_info->uf_info.bd_mem_part == 2185 MEM_PART_SYSTEM) 2186 kfree((void *)ugeth->rx_bd_ring_offset[i]); 2187 else if (ugeth->ug_info->uf_info.bd_mem_part == 2188 MEM_PART_MURAM) 2189 qe_muram_free(ugeth->rx_bd_ring_offset[i]); 2190 ugeth->p_rx_bd_ring[i] = NULL; 2191 } 2192 } 2193 while (!list_empty(&ugeth->group_hash_q)) 2194 put_enet_addr_container(ENET_ADDR_CONT_ENTRY 2195 (dequeue(&ugeth->group_hash_q))); 2196 while (!list_empty(&ugeth->ind_hash_q)) 2197 put_enet_addr_container(ENET_ADDR_CONT_ENTRY 2198 (dequeue(&ugeth->ind_hash_q))); 2199 2200} 2201 2202static void ucc_geth_set_multi(struct net_device *dev) 2203{ 2204 struct ucc_geth_private *ugeth; 2205 struct dev_mc_list *dmi; 2206 struct ucc_fast *uf_regs; 2207 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; 2208 u8 tempaddr[6]; 2209 u8 *mcptr, *tdptr; 2210 int i, j; 2211 2212 ugeth = netdev_priv(dev); 2213 2214 uf_regs = ugeth->uccf->uf_regs; 2215 2216 if (dev->flags & IFF_PROMISC) { 2217 2218 uf_regs->upsmr |= UPSMR_PRO; 2219 2220 } else { 2221 2222 uf_regs->upsmr &= ~UPSMR_PRO; 2223 2224 p_82xx_addr_filt = 2225 (struct ucc_geth_82xx_address_filtering_pram *) ugeth-> 2226 p_rx_glbl_pram->addressfiltering; 2227 2228 if (dev->flags & IFF_ALLMULTI) { 2229 /* Catch all multicast addresses, so set the 2230 * filter to all 1's. 2231 */ 2232 out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff); 2233 out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff); 2234 } else { 2235 /* Clear filter and add the addresses in the list. 2236 */ 2237 out_be32(&p_82xx_addr_filt->gaddr_h, 0x0); 2238 out_be32(&p_82xx_addr_filt->gaddr_l, 0x0); 2239 2240 dmi = dev->mc_list; 2241 2242 for (i = 0; i < dev->mc_count; i++, dmi = dmi->next) { 2243 2244 /* Only support group multicast for now. 2245 */ 2246 if (!(dmi->dmi_addr[0] & 1)) 2247 continue; 2248 2249 /* The address in dmi_addr is LSB first, 2250 * and taddr is MSB first. We have to 2251 * copy bytes MSB first from dmi_addr. 2252 */ 2253 mcptr = (u8 *) dmi->dmi_addr + 5; 2254 tdptr = (u8 *) tempaddr; 2255 for (j = 0; j < 6; j++) 2256 *tdptr++ = *mcptr--; 2257 2258 /* Ask CPM to run CRC and set bit in 2259 * filter mask. 2260 */ 2261 hw_add_addr_in_hash(ugeth, tempaddr); 2262 } 2263 } 2264 } 2265} 2266 2267static void ucc_geth_stop(struct ucc_geth_private *ugeth) 2268{ 2269 struct ucc_geth *ug_regs = ugeth->ug_regs; 2270 struct phy_device *phydev = ugeth->phydev; 2271 u32 tempval; 2272 2273 ugeth_vdbg("%s: IN", __FUNCTION__); 2274 2275 /* Disable the controller */ 2276 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); 2277 2278 /* Tell the kernel the link is down */ 2279 phy_stop(phydev); 2280 2281 /* Mask all interrupts */ 2282 out_be32(ugeth->uccf->p_ucce, 0x00000000); 2283 2284 /* Clear all interrupts */ 2285 out_be32(ugeth->uccf->p_ucce, 0xffffffff); 2286 2287 /* Disable Rx and Tx */ 2288 tempval = in_be32(&ug_regs->maccfg1); 2289 tempval &= ~(MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); 2290 out_be32(&ug_regs->maccfg1, tempval); 2291 2292 free_irq(ugeth->ug_info->uf_info.irq, ugeth->dev); 2293 2294 ucc_geth_memclean(ugeth); 2295} 2296 2297static int ucc_struct_init(struct ucc_geth_private *ugeth) 2298{ 2299 struct ucc_geth_info *ug_info; 2300 struct ucc_fast_info *uf_info; 2301 int i; 2302 2303 ug_info = ugeth->ug_info; 2304 uf_info = &ug_info->uf_info; 2305 2306 /* Create CQs for hash tables */ 2307 INIT_LIST_HEAD(&ugeth->group_hash_q); 2308 INIT_LIST_HEAD(&ugeth->ind_hash_q); 2309 2310 if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) || 2311 (uf_info->bd_mem_part == MEM_PART_MURAM))) { 2312 ugeth_err("%s: Bad memory partition value.", __FUNCTION__); 2313 return -EINVAL; 2314 } 2315 2316 /* Rx BD lengths */ 2317 for (i = 0; i < ug_info->numQueuesRx; i++) { 2318 if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) || 2319 (ug_info->bdRingLenRx[i] % 2320 UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) { 2321 ugeth_err 2322 ("%s: Rx BD ring length must be multiple of 4," 2323 " no smaller than 8.", __FUNCTION__); 2324 return -EINVAL; 2325 } 2326 } 2327 2328 /* Tx BD lengths */ 2329 for (i = 0; i < ug_info->numQueuesTx; i++) { 2330 if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) { 2331 ugeth_err 2332 ("%s: Tx BD ring length must be no smaller than 2.", 2333 __FUNCTION__); 2334 return -EINVAL; 2335 } 2336 } 2337 2338 /* mrblr */ 2339 if ((uf_info->max_rx_buf_length == 0) || 2340 (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) { 2341 ugeth_err 2342 ("%s: max_rx_buf_length must be non-zero multiple of 128.", 2343 __FUNCTION__); 2344 return -EINVAL; 2345 } 2346 2347 /* num Tx queues */ 2348 if (ug_info->numQueuesTx > NUM_TX_QUEUES) { 2349 ugeth_err("%s: number of tx queues too large.", __FUNCTION__); 2350 return -EINVAL; 2351 } 2352 2353 /* num Rx queues */ 2354 if (ug_info->numQueuesRx > NUM_RX_QUEUES) { 2355 ugeth_err("%s: number of rx queues too large.", __FUNCTION__); 2356 return -EINVAL; 2357 } 2358 2359 /* l2qt */ 2360 for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) { 2361 if (ug_info->l2qt[i] >= ug_info->numQueuesRx) { 2362 ugeth_err 2363 ("%s: VLAN priority table entry must not be" 2364 " larger than number of Rx queues.", 2365 __FUNCTION__); 2366 return -EINVAL; 2367 } 2368 } 2369 2370 /* l3qt */ 2371 for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) { 2372 if (ug_info->l3qt[i] >= ug_info->numQueuesRx) { 2373 ugeth_err 2374 ("%s: IP priority table entry must not be" 2375 " larger than number of Rx queues.", 2376 __FUNCTION__); 2377 return -EINVAL; 2378 } 2379 } 2380 2381 if (ug_info->cam && !ug_info->ecamptr) { 2382 ugeth_err("%s: If cam mode is chosen, must supply cam ptr.", 2383 __FUNCTION__); 2384 return -EINVAL; 2385 } 2386 2387 if ((ug_info->numStationAddresses != 2388 UCC_GETH_NUM_OF_STATION_ADDRESSES_1) 2389 && ug_info->rxExtendedFiltering) { 2390 ugeth_err("%s: Number of station addresses greater than 1 " 2391 "not allowed in extended parsing mode.", 2392 __FUNCTION__); 2393 return -EINVAL; 2394 } 2395 2396 /* Generate uccm_mask for receive */ 2397 uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */ 2398 for (i = 0; i < ug_info->numQueuesRx; i++) 2399 uf_info->uccm_mask |= (UCCE_RXBF_SINGLE_MASK << i); 2400 2401 for (i = 0; i < ug_info->numQueuesTx; i++) 2402 uf_info->uccm_mask |= (UCCE_TXBF_SINGLE_MASK << i); 2403 /* Initialize the general fast UCC block. */ 2404 if (ucc_fast_init(uf_info, &ugeth->uccf)) { 2405 ugeth_err("%s: Failed to init uccf.", __FUNCTION__); 2406 ucc_geth_memclean(ugeth); 2407 return -ENOMEM; 2408 } 2409 2410 ugeth->ug_regs = (struct ucc_geth *) ioremap(uf_info->regs, sizeof(struct ucc_geth)); 2411 2412 return 0; 2413} 2414 2415static int ucc_geth_startup(struct ucc_geth_private *ugeth) 2416{ 2417 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; 2418 struct ucc_geth_init_pram *p_init_enet_pram; 2419 struct ucc_fast_private *uccf; 2420 struct ucc_geth_info *ug_info; 2421 struct ucc_fast_info *uf_info; 2422 struct ucc_fast *uf_regs; 2423 struct ucc_geth *ug_regs; 2424 int ret_val = -EINVAL; 2425 u32 remoder = UCC_GETH_REMODER_INIT; 2426 u32 init_enet_pram_offset, cecr_subblock, command, maccfg1; 2427 u32 ifstat, i, j, size, l2qt, l3qt, length; 2428 u16 temoder = UCC_GETH_TEMODER_INIT; 2429 u16 test; 2430 u8 function_code = 0; 2431 u8 *bd, *endOfRing; 2432 u8 numThreadsRxNumerical, numThreadsTxNumerical; 2433 2434 ugeth_vdbg("%s: IN", __FUNCTION__); 2435 uccf = ugeth->uccf; 2436 ug_info = ugeth->ug_info; 2437 uf_info = &ug_info->uf_info; 2438 uf_regs = uccf->uf_regs; 2439 ug_regs = ugeth->ug_regs; 2440 2441 switch (ug_info->numThreadsRx) { 2442 case UCC_GETH_NUM_OF_THREADS_1: 2443 numThreadsRxNumerical = 1; 2444 break; 2445 case UCC_GETH_NUM_OF_THREADS_2: 2446 numThreadsRxNumerical = 2; 2447 break; 2448 case UCC_GETH_NUM_OF_THREADS_4: 2449 numThreadsRxNumerical = 4; 2450 break; 2451 case UCC_GETH_NUM_OF_THREADS_6: 2452 numThreadsRxNumerical = 6; 2453 break; 2454 case UCC_GETH_NUM_OF_THREADS_8: 2455 numThreadsRxNumerical = 8; 2456 break; 2457 default: 2458 ugeth_err("%s: Bad number of Rx threads value.", __FUNCTION__); 2459 ucc_geth_memclean(ugeth); 2460 return -EINVAL; 2461 break; 2462 } 2463 2464 switch (ug_info->numThreadsTx) { 2465 case UCC_GETH_NUM_OF_THREADS_1: 2466 numThreadsTxNumerical = 1; 2467 break; 2468 case UCC_GETH_NUM_OF_THREADS_2: 2469 numThreadsTxNumerical = 2; 2470 break; 2471 case UCC_GETH_NUM_OF_THREADS_4: 2472 numThreadsTxNumerical = 4; 2473 break; 2474 case UCC_GETH_NUM_OF_THREADS_6: 2475 numThreadsTxNumerical = 6; 2476 break; 2477 case UCC_GETH_NUM_OF_THREADS_8: 2478 numThreadsTxNumerical = 8; 2479 break; 2480 default: 2481 ugeth_err("%s: Bad number of Tx threads value.", __FUNCTION__); 2482 ucc_geth_memclean(ugeth); 2483 return -EINVAL; 2484 break; 2485 } 2486 2487 /* Calculate rx_extended_features */ 2488 ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck || 2489 ug_info->ipAddressAlignment || 2490 (ug_info->numStationAddresses != 2491 UCC_GETH_NUM_OF_STATION_ADDRESSES_1); 2492 2493 ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features || 2494 (ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP) 2495 || (ug_info->vlanOperationNonTagged != 2496 UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP); 2497 2498 init_default_reg_vals(&uf_regs->upsmr, 2499 &ug_regs->maccfg1, &ug_regs->maccfg2); 2500 2501 /* Set UPSMR */ 2502 /* For more details see the hardware spec. */ 2503 init_rx_parameters(ug_info->bro, 2504 ug_info->rsh, ug_info->pro, &uf_regs->upsmr); 2505 2506 /* We're going to ignore other registers for now, */ 2507 /* except as needed to get up and running */ 2508 2509 /* Set MACCFG1 */ 2510 /* For more details see the hardware spec. */ 2511 init_flow_control_params(ug_info->aufc, 2512 ug_info->receiveFlowControl, 2513 1, 2514 ug_info->pausePeriod, 2515 ug_info->extensionField, 2516 &uf_regs->upsmr, 2517 &ug_regs->uempr, &ug_regs->maccfg1); 2518 2519 maccfg1 = in_be32(&ug_regs->maccfg1); 2520 maccfg1 |= MACCFG1_ENABLE_RX; 2521 maccfg1 |= MACCFG1_ENABLE_TX; 2522 out_be32(&ug_regs->maccfg1, maccfg1); 2523 2524 /* Set IPGIFG */ 2525 /* For more details see the hardware spec. */ 2526 ret_val = init_inter_frame_gap_params(ug_info->nonBackToBackIfgPart1, 2527 ug_info->nonBackToBackIfgPart2, 2528 ug_info-> 2529 miminumInterFrameGapEnforcement, 2530 ug_info->backToBackInterFrameGap, 2531 &ug_regs->ipgifg); 2532 if (ret_val != 0) { 2533 ugeth_err("%s: IPGIFG initialization parameter too large.", 2534 __FUNCTION__); 2535 ucc_geth_memclean(ugeth); 2536 return ret_val; 2537 } 2538 2539 /* Set HAFDUP */ 2540 /* For more details see the hardware spec. */ 2541 ret_val = init_half_duplex_params(ug_info->altBeb, 2542 ug_info->backPressureNoBackoff, 2543 ug_info->noBackoff, 2544 ug_info->excessDefer, 2545 ug_info->altBebTruncation, 2546 ug_info->maxRetransmission, 2547 ug_info->collisionWindow, 2548 &ug_regs->hafdup); 2549 if (ret_val != 0) { 2550 ugeth_err("%s: Half Duplex initialization parameter too large.", 2551 __FUNCTION__); 2552 ucc_geth_memclean(ugeth); 2553 return ret_val; 2554 } 2555 2556 /* Set IFSTAT */ 2557 /* For more details see the hardware spec. */ 2558 /* Read only - resets upon read */ 2559 ifstat = in_be32(&ug_regs->ifstat); 2560 2561 /* Clear UEMPR */ 2562 /* For more details see the hardware spec. */ 2563 out_be32(&ug_regs->uempr, 0); 2564 2565 /* Set UESCR */ 2566 /* For more details see the hardware spec. */ 2567 init_hw_statistics_gathering_mode((ug_info->statisticsMode & 2568 UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE), 2569 0, &uf_regs->upsmr, &ug_regs->uescr); 2570 2571 /* Allocate Tx bds */ 2572 for (j = 0; j < ug_info->numQueuesTx; j++) { 2573 /* Allocate in multiple of 2574 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT, 2575 according to spec */ 2576 length = ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) 2577 / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) 2578 * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; 2579 if ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) % 2580 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) 2581 length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; 2582 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) { 2583 u32 align = 4; 2584 if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4) 2585 align = UCC_GETH_TX_BD_RING_ALIGNMENT; 2586 ugeth->tx_bd_ring_offset[j] = 2587 kmalloc((u32) (length + align), GFP_KERNEL); 2588 2589 if (ugeth->tx_bd_ring_offset[j] != 0) 2590 ugeth->p_tx_bd_ring[j] = 2591 (void*)((ugeth->tx_bd_ring_offset[j] + 2592 align) & ~(align - 1)); 2593 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { 2594 ugeth->tx_bd_ring_offset[j] = 2595 qe_muram_alloc(length, 2596 UCC_GETH_TX_BD_RING_ALIGNMENT); 2597 if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j])) 2598 ugeth->p_tx_bd_ring[j] = 2599 (u8 *) qe_muram_addr(ugeth-> 2600 tx_bd_ring_offset[j]); 2601 } 2602 if (!ugeth->p_tx_bd_ring[j]) { 2603 ugeth_err 2604 ("%s: Can not allocate memory for Tx bd rings.", 2605 __FUNCTION__); 2606 ucc_geth_memclean(ugeth); 2607 return -ENOMEM; 2608 } 2609 /* Zero unused end of bd ring, according to spec */ 2610 memset(ugeth->p_tx_bd_ring[j] + 2611 ug_info->bdRingLenTx[j] * sizeof(struct qe_bd), 0, 2612 length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)); 2613 } 2614 2615 /* Allocate Rx bds */ 2616 for (j = 0; j < ug_info->numQueuesRx; j++) { 2617 length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd); 2618 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) { 2619 u32 align = 4; 2620 if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4) 2621 align = UCC_GETH_RX_BD_RING_ALIGNMENT; 2622 ugeth->rx_bd_ring_offset[j] = 2623 kmalloc((u32) (length + align), GFP_KERNEL); 2624 if (ugeth->rx_bd_ring_offset[j] != 0) 2625 ugeth->p_rx_bd_ring[j] = 2626 (void*)((ugeth->rx_bd_ring_offset[j] + 2627 align) & ~(align - 1)); 2628 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { 2629 ugeth->rx_bd_ring_offset[j] = 2630 qe_muram_alloc(length, 2631 UCC_GETH_RX_BD_RING_ALIGNMENT); 2632 if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j])) 2633 ugeth->p_rx_bd_ring[j] = 2634 (u8 *) qe_muram_addr(ugeth-> 2635 rx_bd_ring_offset[j]); 2636 } 2637 if (!ugeth->p_rx_bd_ring[j]) { 2638 ugeth_err 2639 ("%s: Can not allocate memory for Rx bd rings.", 2640 __FUNCTION__); 2641 ucc_geth_memclean(ugeth); 2642 return -ENOMEM; 2643 } 2644 } 2645 2646 /* Init Tx bds */ 2647 for (j = 0; j < ug_info->numQueuesTx; j++) { 2648 /* Setup the skbuff rings */ 2649 ugeth->tx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) * 2650 ugeth->ug_info->bdRingLenTx[j], 2651 GFP_KERNEL); 2652 2653 if (ugeth->tx_skbuff[j] == NULL) { 2654 ugeth_err("%s: Could not allocate tx_skbuff", 2655 __FUNCTION__); 2656 ucc_geth_memclean(ugeth); 2657 return -ENOMEM; 2658 } 2659 2660 for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++) 2661 ugeth->tx_skbuff[j][i] = NULL; 2662 2663 ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0; 2664 bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j]; 2665 for (i = 0; i < ug_info->bdRingLenTx[j]; i++) { 2666 /* clear bd buffer */ 2667 out_be32(&((struct qe_bd *)bd)->buf, 0); 2668 /* set bd status and length */ 2669 out_be32((u32 *)bd, 0); 2670 bd += sizeof(struct qe_bd); 2671 } 2672 bd -= sizeof(struct qe_bd); 2673 /* set bd status and length */ 2674 out_be32((u32 *)bd, T_W); /* for last BD set Wrap bit */ 2675 } 2676 2677 /* Init Rx bds */ 2678 for (j = 0; j < ug_info->numQueuesRx; j++) { 2679 /* Setup the skbuff rings */ 2680 ugeth->rx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) * 2681 ugeth->ug_info->bdRingLenRx[j], 2682 GFP_KERNEL); 2683 2684 if (ugeth->rx_skbuff[j] == NULL) { 2685 ugeth_err("%s: Could not allocate rx_skbuff", 2686 __FUNCTION__); 2687 ucc_geth_memclean(ugeth); 2688 return -ENOMEM; 2689 } 2690 2691 for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++) 2692 ugeth->rx_skbuff[j][i] = NULL; 2693 2694 ugeth->skb_currx[j] = 0; 2695 bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j]; 2696 for (i = 0; i < ug_info->bdRingLenRx[j]; i++) { 2697 /* set bd status and length */ 2698 out_be32((u32 *)bd, R_I); 2699 /* clear bd buffer */ 2700 out_be32(&((struct qe_bd *)bd)->buf, 0); 2701 bd += sizeof(struct qe_bd); 2702 } 2703 bd -= sizeof(struct qe_bd); 2704 /* set bd status and length */ 2705 out_be32((u32 *)bd, R_W); /* for last BD set Wrap bit */ 2706 } 2707 2708 /* 2709 * Global PRAM 2710 */ 2711 /* Tx global PRAM */ 2712 /* Allocate global tx parameter RAM page */ 2713 ugeth->tx_glbl_pram_offset = 2714 qe_muram_alloc(sizeof(struct ucc_geth_tx_global_pram), 2715 UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT); 2716 if (IS_ERR_VALUE(ugeth->tx_glbl_pram_offset)) { 2717 ugeth_err 2718 ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.", 2719 __FUNCTION__); 2720 ucc_geth_memclean(ugeth); 2721 return -ENOMEM; 2722 } 2723 ugeth->p_tx_glbl_pram = 2724 (struct ucc_geth_tx_global_pram *) qe_muram_addr(ugeth-> 2725 tx_glbl_pram_offset); 2726 /* Zero out p_tx_glbl_pram */ 2727 memset(ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram)); 2728 2729 /* Fill global PRAM */ 2730 2731 /* TQPTR */ 2732 /* Size varies with number of Tx threads */ 2733 ugeth->thread_dat_tx_offset = 2734 qe_muram_alloc(numThreadsTxNumerical * 2735 sizeof(struct ucc_geth_thread_data_tx) + 2736 32 * (numThreadsTxNumerical == 1), 2737 UCC_GETH_THREAD_DATA_ALIGNMENT); 2738 if (IS_ERR_VALUE(ugeth->thread_dat_tx_offset)) { 2739 ugeth_err 2740 ("%s: Can not allocate DPRAM memory for p_thread_data_tx.", 2741 __FUNCTION__); 2742 ucc_geth_memclean(ugeth); 2743 return -ENOMEM; 2744 } 2745 2746 ugeth->p_thread_data_tx = 2747 (struct ucc_geth_thread_data_tx *) qe_muram_addr(ugeth-> 2748 thread_dat_tx_offset); 2749 out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset); 2750 2751 /* vtagtable */ 2752 for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++) 2753 out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i], 2754 ug_info->vtagtable[i]); 2755 2756 /* iphoffset */ 2757 for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++) 2758 ugeth->p_tx_glbl_pram->iphoffset[i] = ug_info->iphoffset[i]; 2759 2760 /* SQPTR */ 2761 /* Size varies with number of Tx queues */ 2762 ugeth->send_q_mem_reg_offset = 2763 qe_muram_alloc(ug_info->numQueuesTx * 2764 sizeof(struct ucc_geth_send_queue_qd), 2765 UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT); 2766 if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) { 2767 ugeth_err 2768 ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.", 2769 __FUNCTION__); 2770 ucc_geth_memclean(ugeth); 2771 return -ENOMEM; 2772 } 2773 2774 ugeth->p_send_q_mem_reg = 2775 (struct ucc_geth_send_queue_mem_region *) qe_muram_addr(ugeth-> 2776 send_q_mem_reg_offset); 2777 out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset); 2778 2779 /* Setup the table */ 2780 /* Assume BD rings are already established */ 2781 for (i = 0; i < ug_info->numQueuesTx; i++) { 2782 endOfRing = 2783 ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] - 2784 1) * sizeof(struct qe_bd); 2785 if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) { 2786 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base, 2787 (u32) virt_to_phys(ugeth->p_tx_bd_ring[i])); 2788 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i]. 2789 last_bd_completed_address, 2790 (u32) virt_to_phys(endOfRing)); 2791 } else if (ugeth->ug_info->uf_info.bd_mem_part == 2792 MEM_PART_MURAM) { 2793 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base, 2794 (u32) immrbar_virt_to_phys(ugeth-> 2795 p_tx_bd_ring[i])); 2796 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i]. 2797 last_bd_completed_address, 2798 (u32) immrbar_virt_to_phys(endOfRing)); 2799 } 2800 } 2801 2802 /* schedulerbasepointer */ 2803 2804 if (ug_info->numQueuesTx > 1) { 2805 /* scheduler exists only if more than 1 tx queue */ 2806 ugeth->scheduler_offset = 2807 qe_muram_alloc(sizeof(struct ucc_geth_scheduler), 2808 UCC_GETH_SCHEDULER_ALIGNMENT); 2809 if (IS_ERR_VALUE(ugeth->scheduler_offset)) { 2810 ugeth_err 2811 ("%s: Can not allocate DPRAM memory for p_scheduler.", 2812 __FUNCTION__); 2813 ucc_geth_memclean(ugeth); 2814 return -ENOMEM; 2815 } 2816 2817 ugeth->p_scheduler = 2818 (struct ucc_geth_scheduler *) qe_muram_addr(ugeth-> 2819 scheduler_offset); 2820 out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer, 2821 ugeth->scheduler_offset); 2822 /* Zero out p_scheduler */ 2823 memset(ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler)); 2824 2825 /* Set values in scheduler */ 2826 out_be32(&ugeth->p_scheduler->mblinterval, 2827 ug_info->mblinterval); 2828 out_be16(&ugeth->p_scheduler->nortsrbytetime, 2829 ug_info->nortsrbytetime); 2830 ugeth->p_scheduler->fracsiz = ug_info->fracsiz; 2831 ugeth->p_scheduler->strictpriorityq = ug_info->strictpriorityq; 2832 ugeth->p_scheduler->txasap = ug_info->txasap; 2833 ugeth->p_scheduler->extrabw = ug_info->extrabw; 2834 for (i = 0; i < NUM_TX_QUEUES; i++) 2835 ugeth->p_scheduler->weightfactor[i] = 2836 ug_info->weightfactor[i]; 2837 2838 /* Set pointers to cpucount registers in scheduler */ 2839 ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0); 2840 ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1); 2841 ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2); 2842 ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3); 2843 ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4); 2844 ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5); 2845 ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6); 2846 ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7); 2847 } 2848 2849 /* schedulerbasepointer */ 2850 /* TxRMON_PTR (statistics) */ 2851 if (ug_info-> 2852 statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) { 2853 ugeth->tx_fw_statistics_pram_offset = 2854 qe_muram_alloc(sizeof 2855 (struct ucc_geth_tx_firmware_statistics_pram), 2856 UCC_GETH_TX_STATISTICS_ALIGNMENT); 2857 if (IS_ERR_VALUE(ugeth->tx_fw_statistics_pram_offset)) { 2858 ugeth_err 2859 ("%s: Can not allocate DPRAM memory for" 2860 " p_tx_fw_statistics_pram.", __FUNCTION__); 2861 ucc_geth_memclean(ugeth); 2862 return -ENOMEM; 2863 } 2864 ugeth->p_tx_fw_statistics_pram = 2865 (struct ucc_geth_tx_firmware_statistics_pram *) 2866 qe_muram_addr(ugeth->tx_fw_statistics_pram_offset); 2867 /* Zero out p_tx_fw_statistics_pram */ 2868 memset(ugeth->p_tx_fw_statistics_pram, 2869 0, sizeof(struct ucc_geth_tx_firmware_statistics_pram)); 2870 } 2871 2872 /* temoder */ 2873 /* Already has speed set */ 2874 2875 if (ug_info->numQueuesTx > 1) 2876 temoder |= TEMODER_SCHEDULER_ENABLE; 2877 if (ug_info->ipCheckSumGenerate) 2878 temoder |= TEMODER_IP_CHECKSUM_GENERATE; 2879 temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT); 2880 out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder); 2881 2882 test = in_be16(&ugeth->p_tx_glbl_pram->temoder); 2883 2884 /* Function code register value to be used later */ 2885 function_code = QE_BMR_BYTE_ORDER_BO_MOT | UCC_FAST_FUNCTION_CODE_GBL; 2886 /* Required for QE */ 2887 2888 /* function code register */ 2889 out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24); 2890 2891 /* Rx global PRAM */ 2892 /* Allocate global rx parameter RAM page */ 2893 ugeth->rx_glbl_pram_offset = 2894 qe_muram_alloc(sizeof(struct ucc_geth_rx_global_pram), 2895 UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT); 2896 if (IS_ERR_VALUE(ugeth->rx_glbl_pram_offset)) { 2897 ugeth_err 2898 ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.", 2899 __FUNCTION__); 2900 ucc_geth_memclean(ugeth); 2901 return -ENOMEM; 2902 } 2903 ugeth->p_rx_glbl_pram = 2904 (struct ucc_geth_rx_global_pram *) qe_muram_addr(ugeth-> 2905 rx_glbl_pram_offset); 2906 /* Zero out p_rx_glbl_pram */ 2907 memset(ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram)); 2908 2909 /* Fill global PRAM */ 2910 2911 /* RQPTR */ 2912 /* Size varies with number of Rx threads */ 2913 ugeth->thread_dat_rx_offset = 2914 qe_muram_alloc(numThreadsRxNumerical * 2915 sizeof(struct ucc_geth_thread_data_rx), 2916 UCC_GETH_THREAD_DATA_ALIGNMENT); 2917 if (IS_ERR_VALUE(ugeth->thread_dat_rx_offset)) { 2918 ugeth_err 2919 ("%s: Can not allocate DPRAM memory for p_thread_data_rx.", 2920 __FUNCTION__); 2921 ucc_geth_memclean(ugeth); 2922 return -ENOMEM; 2923 } 2924 2925 ugeth->p_thread_data_rx = 2926 (struct ucc_geth_thread_data_rx *) qe_muram_addr(ugeth-> 2927 thread_dat_rx_offset); 2928 out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset); 2929 2930 /* typeorlen */ 2931 out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen); 2932 2933 /* rxrmonbaseptr (statistics) */ 2934 if (ug_info-> 2935 statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) { 2936 ugeth->rx_fw_statistics_pram_offset = 2937 qe_muram_alloc(sizeof 2938 (struct ucc_geth_rx_firmware_statistics_pram), 2939 UCC_GETH_RX_STATISTICS_ALIGNMENT); 2940 if (IS_ERR_VALUE(ugeth->rx_fw_statistics_pram_offset)) { 2941 ugeth_err 2942 ("%s: Can not allocate DPRAM memory for" 2943 " p_rx_fw_statistics_pram.", __FUNCTION__); 2944 ucc_geth_memclean(ugeth); 2945 return -ENOMEM; 2946 } 2947 ugeth->p_rx_fw_statistics_pram = 2948 (struct ucc_geth_rx_firmware_statistics_pram *) 2949 qe_muram_addr(ugeth->rx_fw_statistics_pram_offset); 2950 /* Zero out p_rx_fw_statistics_pram */ 2951 memset(ugeth->p_rx_fw_statistics_pram, 0, 2952 sizeof(struct ucc_geth_rx_firmware_statistics_pram)); 2953 } 2954 2955 /* intCoalescingPtr */ 2956 2957 /* Size varies with number of Rx queues */ 2958 ugeth->rx_irq_coalescing_tbl_offset = 2959 qe_muram_alloc(ug_info->numQueuesRx * 2960 sizeof(struct ucc_geth_rx_interrupt_coalescing_entry) 2961 + 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT); 2962 if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) { 2963 ugeth_err 2964 ("%s: Can not allocate DPRAM memory for" 2965 " p_rx_irq_coalescing_tbl.", __FUNCTION__); 2966 ucc_geth_memclean(ugeth); 2967 return -ENOMEM; 2968 } 2969 2970 ugeth->p_rx_irq_coalescing_tbl = 2971 (struct ucc_geth_rx_interrupt_coalescing_table *) 2972 qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset); 2973 out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr, 2974 ugeth->rx_irq_coalescing_tbl_offset); 2975 2976 /* Fill interrupt coalescing table */ 2977 for (i = 0; i < ug_info->numQueuesRx; i++) { 2978 out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i]. 2979 interruptcoalescingmaxvalue, 2980 ug_info->interruptcoalescingmaxvalue[i]); 2981 out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i]. 2982 interruptcoalescingcounter, 2983 ug_info->interruptcoalescingmaxvalue[i]); 2984 } 2985 2986 /* MRBLR */ 2987 init_max_rx_buff_len(uf_info->max_rx_buf_length, 2988 &ugeth->p_rx_glbl_pram->mrblr); 2989 /* MFLR */ 2990 out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength); 2991 /* MINFLR */ 2992 init_min_frame_len(ug_info->minFrameLength, 2993 &ugeth->p_rx_glbl_pram->minflr, 2994 &ugeth->p_rx_glbl_pram->mrblr); 2995 /* MAXD1 */ 2996 out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length); 2997 /* MAXD2 */ 2998 out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length); 2999 3000 /* l2qt */ 3001 l2qt = 0; 3002 for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) 3003 l2qt |= (ug_info->l2qt[i] << (28 - 4 * i)); 3004 out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt); 3005 3006 /* l3qt */ 3007 for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) { 3008 l3qt = 0; 3009 for (i = 0; i < 8; i++) 3010 l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i)); 3011 out_be32(&ugeth->p_rx_glbl_pram->l3qt[j/8], l3qt); 3012 } 3013 3014 /* vlantype */ 3015 out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype); 3016 3017 /* vlantci */ 3018 out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci); 3019 3020 /* ecamptr */ 3021 out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr); 3022 3023 /* RBDQPTR */ 3024 /* Size varies with number of Rx queues */ 3025 ugeth->rx_bd_qs_tbl_offset = 3026 qe_muram_alloc(ug_info->numQueuesRx * 3027 (sizeof(struct ucc_geth_rx_bd_queues_entry) + 3028 sizeof(struct ucc_geth_rx_prefetched_bds)), 3029 UCC_GETH_RX_BD_QUEUES_ALIGNMENT); 3030 if (IS_ERR_VALUE(ugeth->rx_bd_qs_tbl_offset)) { 3031 ugeth_err 3032 ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.", 3033 __FUNCTION__); 3034 ucc_geth_memclean(ugeth); 3035 return -ENOMEM; 3036 } 3037 3038 ugeth->p_rx_bd_qs_tbl = 3039 (struct ucc_geth_rx_bd_queues_entry *) qe_muram_addr(ugeth-> 3040 rx_bd_qs_tbl_offset); 3041 out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset); 3042 /* Zero out p_rx_bd_qs_tbl */ 3043 memset(ugeth->p_rx_bd_qs_tbl, 3044 0, 3045 ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) + 3046 sizeof(struct ucc_geth_rx_prefetched_bds))); 3047 3048 /* Setup the table */ 3049 /* Assume BD rings are already established */ 3050 for (i = 0; i < ug_info->numQueuesRx; i++) { 3051 if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) { 3052 out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, 3053 (u32) virt_to_phys(ugeth->p_rx_bd_ring[i])); 3054 } else if (ugeth->ug_info->uf_info.bd_mem_part == 3055 MEM_PART_MURAM) { 3056 out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, 3057 (u32) immrbar_virt_to_phys(ugeth-> 3058 p_rx_bd_ring[i])); 3059 } 3060 /* rest of fields handled by QE */ 3061 } 3062 3063 /* remoder */ 3064 /* Already has speed set */ 3065 3066 if (ugeth->rx_extended_features) 3067 remoder |= REMODER_RX_EXTENDED_FEATURES; 3068 if (ug_info->rxExtendedFiltering) 3069 remoder |= REMODER_RX_EXTENDED_FILTERING; 3070 if (ug_info->dynamicMaxFrameLength) 3071 remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH; 3072 if (ug_info->dynamicMinFrameLength) 3073 remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH; 3074 remoder |= 3075 ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT; 3076 remoder |= 3077 ug_info-> 3078 vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT; 3079 remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT; 3080 remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT); 3081 if (ug_info->ipCheckSumCheck) 3082 remoder |= REMODER_IP_CHECKSUM_CHECK; 3083 if (ug_info->ipAddressAlignment) 3084 remoder |= REMODER_IP_ADDRESS_ALIGNMENT; 3085 out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder); 3086 3087 /* Note that this function must be called */ 3088 /* ONLY AFTER p_tx_fw_statistics_pram */ 3089 /* andp_UccGethRxFirmwareStatisticsPram are allocated ! */ 3090 init_firmware_statistics_gathering_mode((ug_info-> 3091 statisticsMode & 3092 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX), 3093 (ug_info->statisticsMode & 3094 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX), 3095 &ugeth->p_tx_glbl_pram->txrmonbaseptr, 3096 ugeth->tx_fw_statistics_pram_offset, 3097 &ugeth->p_rx_glbl_pram->rxrmonbaseptr, 3098 ugeth->rx_fw_statistics_pram_offset, 3099 &ugeth->p_tx_glbl_pram->temoder, 3100 &ugeth->p_rx_glbl_pram->remoder); 3101 3102 /* function code register */ 3103 ugeth->p_rx_glbl_pram->rstate = function_code; 3104 3105 /* initialize extended filtering */ 3106 if (ug_info->rxExtendedFiltering) { 3107 if (!ug_info->extendedFilteringChainPointer) { 3108 ugeth_err("%s: Null Extended Filtering Chain Pointer.", 3109 __FUNCTION__); 3110 ucc_geth_memclean(ugeth); 3111 return -EINVAL; 3112 } 3113 3114 /* Allocate memory for extended filtering Mode Global 3115 Parameters */ 3116 ugeth->exf_glbl_param_offset = 3117 qe_muram_alloc(sizeof(struct ucc_geth_exf_global_pram), 3118 UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT); 3119 if (IS_ERR_VALUE(ugeth->exf_glbl_param_offset)) { 3120 ugeth_err 3121 ("%s: Can not allocate DPRAM memory for" 3122 " p_exf_glbl_param.", __FUNCTION__); 3123 ucc_geth_memclean(ugeth); 3124 return -ENOMEM; 3125 } 3126 3127 ugeth->p_exf_glbl_param = 3128 (struct ucc_geth_exf_global_pram *) qe_muram_addr(ugeth-> 3129 exf_glbl_param_offset); 3130 out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam, 3131 ugeth->exf_glbl_param_offset); 3132 out_be32(&ugeth->p_exf_glbl_param->l2pcdptr, 3133 (u32) ug_info->extendedFilteringChainPointer); 3134 3135 } else { /* initialize 82xx style address filtering */ 3136 3137 /* Init individual address recognition registers to disabled */ 3138 3139 for (j = 0; j < NUM_OF_PADDRS; j++) 3140 ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j); 3141 3142 p_82xx_addr_filt = 3143 (struct ucc_geth_82xx_address_filtering_pram *) ugeth-> 3144 p_rx_glbl_pram->addressfiltering; 3145 3146 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, 3147 ENET_ADDR_TYPE_GROUP); 3148 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, 3149 ENET_ADDR_TYPE_INDIVIDUAL); 3150 } 3151 3152 /* 3153 * Initialize UCC at QE level 3154 */ 3155 3156 command = QE_INIT_TX_RX; 3157 3158 /* Allocate shadow InitEnet command parameter structure. 3159 * This is needed because after the InitEnet command is executed, 3160 * the structure in DPRAM is released, because DPRAM is a premium 3161 * resource. 3162 * This shadow structure keeps a copy of what was done so that the 3163 * allocated resources can be released when the channel is freed. 3164 */ 3165 if (!(ugeth->p_init_enet_param_shadow = 3166 kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) { 3167 ugeth_err 3168 ("%s: Can not allocate memory for" 3169 " p_UccInitEnetParamShadows.", __FUNCTION__); 3170 ucc_geth_memclean(ugeth); 3171 return -ENOMEM; 3172 } 3173 /* Zero out *p_init_enet_param_shadow */ 3174 memset((char *)ugeth->p_init_enet_param_shadow, 3175 0, sizeof(struct ucc_geth_init_pram)); 3176 3177 /* Fill shadow InitEnet command parameter structure */ 3178 3179 ugeth->p_init_enet_param_shadow->resinit1 = 3180 ENET_INIT_PARAM_MAGIC_RES_INIT1; 3181 ugeth->p_init_enet_param_shadow->resinit2 = 3182 ENET_INIT_PARAM_MAGIC_RES_INIT2; 3183 ugeth->p_init_enet_param_shadow->resinit3 = 3184 ENET_INIT_PARAM_MAGIC_RES_INIT3; 3185 ugeth->p_init_enet_param_shadow->resinit4 = 3186 ENET_INIT_PARAM_MAGIC_RES_INIT4; 3187 ugeth->p_init_enet_param_shadow->resinit5 = 3188 ENET_INIT_PARAM_MAGIC_RES_INIT5; 3189 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= 3190 ((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT; 3191 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= 3192 ((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT; 3193 3194 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= 3195 ugeth->rx_glbl_pram_offset | ug_info->riscRx; 3196 if ((ug_info->largestexternallookupkeysize != 3197 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE) 3198 && (ug_info->largestexternallookupkeysize != 3199 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES) 3200 && (ug_info->largestexternallookupkeysize != 3201 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) { 3202 ugeth_err("%s: Invalid largest External Lookup Key Size.", 3203 __FUNCTION__); 3204 ucc_geth_memclean(ugeth); 3205 return -EINVAL; 3206 } 3207 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize = 3208 ug_info->largestexternallookupkeysize; 3209 size = sizeof(struct ucc_geth_thread_rx_pram); 3210 if (ug_info->rxExtendedFiltering) { 3211 size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; 3212 if (ug_info->largestexternallookupkeysize == 3213 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES) 3214 size += 3215 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; 3216 if (ug_info->largestexternallookupkeysize == 3217 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES) 3218 size += 3219 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; 3220 } 3221 3222 if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth-> 3223 p_init_enet_param_shadow->rxthread[0]), 3224 (u8) (numThreadsRxNumerical + 1) 3225 /* Rx needs one extra for terminator */ 3226 , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT, 3227 ug_info->riscRx, 1)) != 0) { 3228 ugeth_err("%s: Can not fill p_init_enet_param_shadow.", 3229 __FUNCTION__); 3230 ucc_geth_memclean(ugeth); 3231 return ret_val; 3232 } 3233 3234 ugeth->p_init_enet_param_shadow->txglobal = 3235 ugeth->tx_glbl_pram_offset | ug_info->riscTx; 3236 if ((ret_val = 3237 fill_init_enet_entries(ugeth, 3238 &(ugeth->p_init_enet_param_shadow-> 3239 txthread[0]), numThreadsTxNumerical, 3240 sizeof(struct ucc_geth_thread_tx_pram), 3241 UCC_GETH_THREAD_TX_PRAM_ALIGNMENT, 3242 ug_info->riscTx, 0)) != 0) { 3243 ugeth_err("%s: Can not fill p_init_enet_param_shadow.", 3244 __FUNCTION__); 3245 ucc_geth_memclean(ugeth); 3246 return ret_val; 3247 } 3248 3249 /* Load Rx bds with buffers */ 3250 for (i = 0; i < ug_info->numQueuesRx; i++) { 3251 if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) { 3252 ugeth_err("%s: Can not fill Rx bds with buffers.", 3253 __FUNCTION__); 3254 ucc_geth_memclean(ugeth); 3255 return ret_val; 3256 } 3257 } 3258 3259 /* Allocate InitEnet command parameter structure */ 3260 init_enet_pram_offset = qe_muram_alloc(sizeof(struct ucc_geth_init_pram), 4); 3261 if (IS_ERR_VALUE(init_enet_pram_offset)) { 3262 ugeth_err 3263 ("%s: Can not allocate DPRAM memory for p_init_enet_pram.", 3264 __FUNCTION__); 3265 ucc_geth_memclean(ugeth); 3266 return -ENOMEM; 3267 } 3268 p_init_enet_pram = 3269 (struct ucc_geth_init_pram *) qe_muram_addr(init_enet_pram_offset); 3270 3271 /* Copy shadow InitEnet command parameter structure into PRAM */ 3272 p_init_enet_pram->resinit1 = ugeth->p_init_enet_param_shadow->resinit1; 3273 p_init_enet_pram->resinit2 = ugeth->p_init_enet_param_shadow->resinit2; 3274 p_init_enet_pram->resinit3 = ugeth->p_init_enet_param_shadow->resinit3; 3275 p_init_enet_pram->resinit4 = ugeth->p_init_enet_param_shadow->resinit4; 3276 out_be16(&p_init_enet_pram->resinit5, 3277 ugeth->p_init_enet_param_shadow->resinit5); 3278 p_init_enet_pram->largestexternallookupkeysize = 3279 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize; 3280 out_be32(&p_init_enet_pram->rgftgfrxglobal, 3281 ugeth->p_init_enet_param_shadow->rgftgfrxglobal); 3282 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++) 3283 out_be32(&p_init_enet_pram->rxthread[i], 3284 ugeth->p_init_enet_param_shadow->rxthread[i]); 3285 out_be32(&p_init_enet_pram->txglobal, 3286 ugeth->p_init_enet_param_shadow->txglobal); 3287 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++) 3288 out_be32(&p_init_enet_pram->txthread[i], 3289 ugeth->p_init_enet_param_shadow->txthread[i]); 3290 3291 /* Issue QE command */ 3292 cecr_subblock = 3293 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); 3294 qe_issue_cmd(command, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 3295 init_enet_pram_offset); 3296 3297 /* Free InitEnet command parameter */ 3298 qe_muram_free(init_enet_pram_offset); 3299 3300 return 0; 3301} 3302 3303/* returns a net_device_stats structure pointer */ 3304static struct net_device_stats *ucc_geth_get_stats(struct net_device *dev) 3305{ 3306 struct ucc_geth_private *ugeth = netdev_priv(dev); 3307 3308 return &(ugeth->stats); 3309} 3310 3311/* ucc_geth_timeout gets called when a packet has not been 3312 * transmitted after a set amount of time. 3313 * For now, assume that clearing out all the structures, and 3314 * starting over will fix the problem. */ 3315static void ucc_geth_timeout(struct net_device *dev) 3316{ 3317 struct ucc_geth_private *ugeth = netdev_priv(dev); 3318 3319 ugeth_vdbg("%s: IN", __FUNCTION__); 3320 3321 ugeth->stats.tx_errors++; 3322 3323 ugeth_dump_regs(ugeth); 3324 3325 if (dev->flags & IFF_UP) { 3326 ucc_geth_stop(ugeth); 3327 ucc_geth_startup(ugeth); 3328 } 3329 3330 netif_schedule(dev); 3331} 3332 3333/* This is called by the kernel when a frame is ready for transmission. */ 3334/* It is pointed to by the dev->hard_start_xmit function pointer */ 3335static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) 3336{ 3337 struct ucc_geth_private *ugeth = netdev_priv(dev); 3338#ifdef CONFIG_UGETH_TX_ON_DEMAND 3339 struct ucc_fast_private *uccf; 3340#endif 3341 u8 *bd; /* BD pointer */ 3342 u32 bd_status; 3343 u8 txQ = 0; 3344 3345 ugeth_vdbg("%s: IN", __FUNCTION__); 3346 3347 spin_lock_irq(&ugeth->lock); 3348 3349 ugeth->stats.tx_bytes += skb->len; 3350 3351 /* Start from the next BD that should be filled */ 3352 bd = ugeth->txBd[txQ]; 3353 bd_status = in_be32((u32 *)bd); 3354 /* Save the skb pointer so we can free it later */ 3355 ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb; 3356 3357 /* Update the current skb pointer (wrapping if this was the last) */ 3358 ugeth->skb_curtx[txQ] = 3359 (ugeth->skb_curtx[txQ] + 3360 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); 3361 3362 /* set up the buffer descriptor */ 3363 out_be32(&((struct qe_bd *)bd)->buf, 3364 dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE)); 3365 3366 /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */ 3367 3368 bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len; 3369 3370 /* set bd status and length */ 3371 out_be32((u32 *)bd, bd_status); 3372 3373 dev->trans_start = jiffies; 3374 3375 /* Move to next BD in the ring */ 3376 if (!(bd_status & T_W)) 3377 bd += sizeof(struct qe_bd); 3378 else 3379 bd = ugeth->p_tx_bd_ring[txQ]; 3380 3381 /* If the next BD still needs to be cleaned up, then the bds 3382 are full. We need to tell the kernel to stop sending us stuff. */ 3383 if (bd == ugeth->confBd[txQ]) { 3384 if (!netif_queue_stopped(dev)) 3385 netif_stop_queue(dev); 3386 } 3387 3388 ugeth->txBd[txQ] = bd; 3389 3390 if (ugeth->p_scheduler) { 3391 ugeth->cpucount[txQ]++; 3392 /* Indicate to QE that there are more Tx bds ready for 3393 transmission */ 3394 /* This is done by writing a running counter of the bd 3395 count to the scheduler PRAM. */ 3396 out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]); 3397 } 3398 3399#ifdef CONFIG_UGETH_TX_ON_DEMAND 3400 uccf = ugeth->uccf; 3401 out_be16(uccf->p_utodr, UCC_FAST_TOD); 3402#endif 3403 spin_unlock_irq(&ugeth->lock); 3404 3405 return 0; 3406} 3407 3408static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit) 3409{ 3410 struct sk_buff *skb; 3411 u8 *bd; 3412 u16 length, howmany = 0; 3413 u32 bd_status; 3414 u8 *bdBuffer; 3415 3416 ugeth_vdbg("%s: IN", __FUNCTION__); 3417 3418 /* collect received buffers */ 3419 bd = ugeth->rxBd[rxQ]; 3420 3421 bd_status = in_be32((u32 *)bd); 3422 3423 /* while there are received buffers and BD is full (~R_E) */ 3424 while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) { 3425 bdBuffer = (u8 *) in_be32(&((struct qe_bd *)bd)->buf); 3426 length = (u16) ((bd_status & BD_LENGTH_MASK) - 4); 3427 skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]]; 3428 3429 /* determine whether buffer is first, last, first and last 3430 (single buffer frame) or middle (not first and not last) */ 3431 if (!skb || 3432 (!(bd_status & (R_F | R_L))) || 3433 (bd_status & R_ERRORS_FATAL)) { 3434 ugeth_vdbg("%s, %d: ERROR!!! skb - 0x%08x", 3435 __FUNCTION__, __LINE__, (u32) skb); 3436 if (skb) 3437 dev_kfree_skb_any(skb); 3438 3439 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL; 3440 ugeth->stats.rx_dropped++; 3441 } else { 3442 ugeth->stats.rx_packets++; 3443 howmany++; 3444 3445 /* Prep the skb for the packet */ 3446 skb_put(skb, length); 3447 3448 /* Tell the skb what kind of packet this is */ 3449 skb->protocol = eth_type_trans(skb, ugeth->dev); 3450 3451 ugeth->stats.rx_bytes += length; 3452 /* Send the packet up the stack */ 3453#ifdef CONFIG_UGETH_NAPI 3454 netif_receive_skb(skb); 3455#else 3456 netif_rx(skb); 3457#endif /* CONFIG_UGETH_NAPI */ 3458 } 3459 3460 ugeth->dev->last_rx = jiffies; 3461 3462 skb = get_new_skb(ugeth, bd); 3463 if (!skb) { 3464 ugeth_warn("%s: No Rx Data Buffer", __FUNCTION__); 3465 ugeth->stats.rx_dropped++; 3466 break; 3467 } 3468 3469 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb; 3470 3471 /* update to point at the next skb */ 3472 ugeth->skb_currx[rxQ] = 3473 (ugeth->skb_currx[rxQ] + 3474 1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]); 3475 3476 if (bd_status & R_W) 3477 bd = ugeth->p_rx_bd_ring[rxQ]; 3478 else 3479 bd += sizeof(struct qe_bd); 3480 3481 bd_status = in_be32((u32 *)bd); 3482 } 3483 3484 ugeth->rxBd[rxQ] = bd; 3485 return howmany; 3486} 3487 3488static int ucc_geth_tx(struct net_device *dev, u8 txQ) 3489{ 3490 /* Start from the next BD that should be filled */ 3491 struct ucc_geth_private *ugeth = netdev_priv(dev); 3492 u8 *bd; /* BD pointer */ 3493 u32 bd_status; 3494 3495 bd = ugeth->confBd[txQ]; 3496 bd_status = in_be32((u32 *)bd); 3497 3498 /* Normal processing. */ 3499 while ((bd_status & T_R) == 0) { 3500 /* BD contains already transmitted buffer. */ 3501 /* Handle the transmitted buffer and release */ 3502 /* the BD to be used with the current frame */ 3503 3504 if ((bd == ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0)) 3505 break; 3506 3507 ugeth->stats.tx_packets++; 3508 3509 /* Free the sk buffer associated with this TxBD */ 3510 dev_kfree_skb_irq(ugeth-> 3511 tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]); 3512 ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL; 3513 ugeth->skb_dirtytx[txQ] = 3514 (ugeth->skb_dirtytx[txQ] + 3515 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); 3516 3517 /* We freed a buffer, so now we can restart transmission */ 3518 if (netif_queue_stopped(dev)) 3519 netif_wake_queue(dev); 3520 3521 /* Advance the confirmation BD pointer */ 3522 if (!(bd_status & T_W)) 3523 bd += sizeof(struct qe_bd); 3524 else 3525 bd = ugeth->p_tx_bd_ring[txQ]; 3526 bd_status = in_be32((u32 *)bd); 3527 } 3528 ugeth->confBd[txQ] = bd; 3529 return 0; 3530} 3531 3532#ifdef CONFIG_UGETH_NAPI 3533static int ucc_geth_poll(struct net_device *dev, int *budget) 3534{ 3535 struct ucc_geth_private *ugeth = netdev_priv(dev); 3536 struct ucc_geth_info *ug_info; 3537 struct ucc_fast_private *uccf; 3538 int howmany; 3539 u8 i; 3540 int rx_work_limit; 3541 register u32 uccm; 3542 3543 ug_info = ugeth->ug_info; 3544 3545 rx_work_limit = *budget; 3546 if (rx_work_limit > dev->quota) 3547 rx_work_limit = dev->quota; 3548 3549 howmany = 0; 3550 3551 for (i = 0; i < ug_info->numQueuesRx; i++) { 3552 howmany += ucc_geth_rx(ugeth, i, rx_work_limit); 3553 } 3554 3555 dev->quota -= howmany; 3556 rx_work_limit -= howmany; 3557 *budget -= howmany; 3558 3559 if (rx_work_limit > 0) { 3560 netif_rx_complete(dev); 3561 uccf = ugeth->uccf; 3562 uccm = in_be32(uccf->p_uccm); 3563 uccm |= UCCE_RX_EVENTS; 3564 out_be32(uccf->p_uccm, uccm); 3565 } 3566 3567 return (rx_work_limit > 0) ? 0 : 1; 3568} 3569#endif /* CONFIG_UGETH_NAPI */ 3570 3571static irqreturn_t ucc_geth_irq_handler(int irq, void *info) 3572{ 3573 struct net_device *dev = (struct net_device *)info; 3574 struct ucc_geth_private *ugeth = netdev_priv(dev); 3575 struct ucc_fast_private *uccf; 3576 struct ucc_geth_info *ug_info; 3577 register u32 ucce; 3578 register u32 uccm; 3579#ifndef CONFIG_UGETH_NAPI 3580 register u32 rx_mask; 3581#endif 3582 register u32 tx_mask; 3583 u8 i; 3584 3585 ugeth_vdbg("%s: IN", __FUNCTION__); 3586 3587 if (!ugeth) 3588 return IRQ_NONE; 3589 3590 uccf = ugeth->uccf; 3591 ug_info = ugeth->ug_info; 3592 3593 /* read and clear events */ 3594 ucce = (u32) in_be32(uccf->p_ucce); 3595 uccm = (u32) in_be32(uccf->p_uccm); 3596 ucce &= uccm; 3597 out_be32(uccf->p_ucce, ucce); 3598 3599 /* check for receive events that require processing */ 3600 if (ucce & UCCE_RX_EVENTS) { 3601#ifdef CONFIG_UGETH_NAPI 3602 if (netif_rx_schedule_prep(dev)) { 3603 uccm &= ~UCCE_RX_EVENTS; 3604 out_be32(uccf->p_uccm, uccm); 3605 __netif_rx_schedule(dev); 3606 } 3607#else 3608 rx_mask = UCCE_RXBF_SINGLE_MASK; 3609 for (i = 0; i < ug_info->numQueuesRx; i++) { 3610 if (ucce & rx_mask) 3611 ucc_geth_rx(ugeth, i, (int)ugeth->ug_info->bdRingLenRx[i]); 3612 ucce &= ~rx_mask; 3613 rx_mask <<= 1; 3614 } 3615#endif /* CONFIG_UGETH_NAPI */ 3616 } 3617 3618 /* Tx event processing */ 3619 if (ucce & UCCE_TX_EVENTS) { 3620 spin_lock(&ugeth->lock); 3621 tx_mask = UCCE_TXBF_SINGLE_MASK; 3622 for (i = 0; i < ug_info->numQueuesTx; i++) { 3623 if (ucce & tx_mask) 3624 ucc_geth_tx(dev, i); 3625 ucce &= ~tx_mask; 3626 tx_mask <<= 1; 3627 } 3628 spin_unlock(&ugeth->lock); 3629 } 3630 3631 /* Errors and other events */ 3632 if (ucce & UCCE_OTHER) { 3633 if (ucce & UCCE_BSY) { 3634 ugeth->stats.rx_errors++; 3635 } 3636 if (ucce & UCCE_TXE) { 3637 ugeth->stats.tx_errors++; 3638 } 3639 } 3640 3641 return IRQ_HANDLED; 3642} 3643 3644/* Called when something needs to use the ethernet device */ 3645/* Returns 0 for success. */ 3646static int ucc_geth_open(struct net_device *dev) 3647{ 3648 struct ucc_geth_private *ugeth = netdev_priv(dev); 3649 int err; 3650 3651 ugeth_vdbg("%s: IN", __FUNCTION__); 3652 3653 /* Test station address */ 3654 if (dev->dev_addr[0] & ENET_GROUP_ADDR) { 3655 ugeth_err("%s: Multicast address used for station address" 3656 " - is this what you wanted?", __FUNCTION__); 3657 return -EINVAL; 3658 } 3659 3660 err = ucc_struct_init(ugeth); 3661 if (err) { 3662 ugeth_err("%s: Cannot configure internal struct, aborting.", dev->name); 3663 return err; 3664 } 3665 3666 err = ucc_geth_startup(ugeth); 3667 if (err) { 3668 ugeth_err("%s: Cannot configure net device, aborting.", 3669 dev->name); 3670 return err; 3671 } 3672 3673 err = adjust_enet_interface(ugeth); 3674 if (err) { 3675 ugeth_err("%s: Cannot configure net device, aborting.", 3676 dev->name); 3677 return err; 3678 } 3679 3680 /* Set MACSTNADDR1, MACSTNADDR2 */ 3681 /* For more details see the hardware spec. */ 3682 init_mac_station_addr_regs(dev->dev_addr[0], 3683 dev->dev_addr[1], 3684 dev->dev_addr[2], 3685 dev->dev_addr[3], 3686 dev->dev_addr[4], 3687 dev->dev_addr[5], 3688 &ugeth->ug_regs->macstnaddr1, 3689 &ugeth->ug_regs->macstnaddr2); 3690 3691 err = init_phy(dev); 3692 if (err) { 3693 ugeth_err("%s: Cannot initialize PHY, aborting.", dev->name); 3694 return err; 3695 } 3696 3697 phy_start(ugeth->phydev); 3698 3699 err = 3700 request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler, 0, 3701 "UCC Geth", dev); 3702 if (err) { 3703 ugeth_err("%s: Cannot get IRQ for net device, aborting.", 3704 dev->name); 3705 ucc_geth_stop(ugeth); 3706 return err; 3707 } 3708 3709 err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); 3710 if (err) { 3711 ugeth_err("%s: Cannot enable net device, aborting.", dev->name); 3712 ucc_geth_stop(ugeth); 3713 return err; 3714 } 3715 3716 netif_start_queue(dev); 3717 3718 return err; 3719} 3720 3721/* Stops the kernel queue, and halts the controller */ 3722static int ucc_geth_close(struct net_device *dev) 3723{ 3724 struct ucc_geth_private *ugeth = netdev_priv(dev); 3725 3726 ugeth_vdbg("%s: IN", __FUNCTION__); 3727 3728 ucc_geth_stop(ugeth); 3729 3730 phy_disconnect(ugeth->phydev); 3731 ugeth->phydev = NULL; 3732 3733 netif_stop_queue(dev); 3734 3735 return 0; 3736} 3737 3738const struct ethtool_ops ucc_geth_ethtool_ops = { }; 3739 3740static phy_interface_t to_phy_interface(const char *interface_type) 3741{ 3742 if (strcasecmp(interface_type, "mii") == 0) 3743 return PHY_INTERFACE_MODE_MII; 3744 if (strcasecmp(interface_type, "gmii") == 0) 3745 return PHY_INTERFACE_MODE_GMII; 3746 if (strcasecmp(interface_type, "tbi") == 0) 3747 return PHY_INTERFACE_MODE_TBI; 3748 if (strcasecmp(interface_type, "rmii") == 0) 3749 return PHY_INTERFACE_MODE_RMII; 3750 if (strcasecmp(interface_type, "rgmii") == 0) 3751 return PHY_INTERFACE_MODE_RGMII; 3752 if (strcasecmp(interface_type, "rgmii-id") == 0) 3753 return PHY_INTERFACE_MODE_RGMII_ID; 3754 if (strcasecmp(interface_type, "rtbi") == 0) 3755 return PHY_INTERFACE_MODE_RTBI; 3756 3757 return PHY_INTERFACE_MODE_MII; 3758} 3759 3760static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *match) 3761{ 3762 struct device *device = &ofdev->dev; 3763 struct device_node *np = ofdev->node; 3764 struct device_node *mdio; 3765 struct net_device *dev = NULL; 3766 struct ucc_geth_private *ugeth = NULL; 3767 struct ucc_geth_info *ug_info; 3768 struct resource res; 3769 struct device_node *phy; 3770 int err, ucc_num, max_speed = 0; 3771 const phandle *ph; 3772 const unsigned int *prop; 3773 const void *mac_addr; 3774 phy_interface_t phy_interface; 3775 static const int enet_to_speed[] = { 3776 SPEED_10, SPEED_10, SPEED_10, 3777 SPEED_100, SPEED_100, SPEED_100, 3778 SPEED_1000, SPEED_1000, SPEED_1000, SPEED_1000, 3779 }; 3780 static const phy_interface_t enet_to_phy_interface[] = { 3781 PHY_INTERFACE_MODE_MII, PHY_INTERFACE_MODE_RMII, 3782 PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_MII, 3783 PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII, 3784 PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_RGMII, 3785 PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI, 3786 }; 3787 3788 ugeth_vdbg("%s: IN", __FUNCTION__); 3789 3790 prop = of_get_property(np, "device-id", NULL); 3791 ucc_num = *prop - 1; 3792 if ((ucc_num < 0) || (ucc_num > 7)) 3793 return -ENODEV; 3794 3795 ug_info = &ugeth_info[ucc_num]; 3796 ug_info->uf_info.ucc_num = ucc_num; 3797 3798 prop = of_get_property(np, "rx-clock", NULL); 3799 ug_info->uf_info.rx_clock = *prop; 3800 prop = of_get_property(np, "tx-clock", NULL); 3801 ug_info->uf_info.tx_clock = *prop; 3802 err = of_address_to_resource(np, 0, &res); 3803 if (err) 3804 return -EINVAL; 3805 3806 ug_info->uf_info.regs = res.start; 3807 ug_info->uf_info.irq = irq_of_parse_and_map(np, 0); 3808 3809 ph = of_get_property(np, "phy-handle", NULL); 3810 phy = of_find_node_by_phandle(*ph); 3811 3812 if (phy == NULL) 3813 return -ENODEV; 3814 3815 /* set the PHY address */ 3816 prop = of_get_property(phy, "reg", NULL); 3817 if (prop == NULL) 3818 return -1; 3819 ug_info->phy_address = *prop; 3820 3821 /* get the phy interface type, or default to MII */ 3822 prop = of_get_property(np, "interface-type", NULL); 3823 if (!prop) { 3824 /* handle interface property present in old trees */ 3825 prop = of_get_property(phy, "interface", NULL); 3826 if (prop != NULL) 3827 phy_interface = enet_to_phy_interface[*prop]; 3828 else 3829 phy_interface = PHY_INTERFACE_MODE_MII; 3830 } else { 3831 phy_interface = to_phy_interface((const char *)prop); 3832 } 3833 3834 /* get speed, or derive from interface */ 3835 prop = of_get_property(np, "max-speed", NULL); 3836 if (!prop) { 3837 /* handle interface property present in old trees */ 3838 prop = of_get_property(phy, "interface", NULL); 3839 if (prop != NULL) 3840 max_speed = enet_to_speed[*prop]; 3841 } else { 3842 max_speed = *prop; 3843 } 3844 if (!max_speed) { 3845 switch (phy_interface) { 3846 case PHY_INTERFACE_MODE_GMII: 3847 case PHY_INTERFACE_MODE_RGMII: 3848 case PHY_INTERFACE_MODE_RGMII_ID: 3849 case PHY_INTERFACE_MODE_TBI: 3850 case PHY_INTERFACE_MODE_RTBI: 3851 max_speed = SPEED_1000; 3852 break; 3853 default: 3854 max_speed = SPEED_100; 3855 break; 3856 } 3857 } 3858 3859 if (max_speed == SPEED_1000) { 3860 ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT; 3861 ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT; 3862 ug_info->uf_info.urfset = UCC_GETH_URFSET_GIGA_INIT; 3863 ug_info->uf_info.utfs = UCC_GETH_UTFS_GIGA_INIT; 3864 ug_info->uf_info.utfet = UCC_GETH_UTFET_GIGA_INIT; 3865 ug_info->uf_info.utftt = UCC_GETH_UTFTT_GIGA_INIT; 3866 } 3867 3868 /* Set the bus id */ 3869 mdio = of_get_parent(phy); 3870 3871 if (mdio == NULL) 3872 return -1; 3873 3874 err = of_address_to_resource(mdio, 0, &res); 3875 of_node_put(mdio); 3876 3877 if (err) 3878 return -1; 3879 3880 ug_info->mdio_bus = res.start; 3881 3882 printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n", 3883 ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs, 3884 ug_info->uf_info.irq); 3885 3886 if (ug_info == NULL) { 3887 ugeth_err("%s: [%d] Missing additional data!", __FUNCTION__, 3888 ucc_num); 3889 return -ENODEV; 3890 } 3891 3892 /* Create an ethernet device instance */ 3893 dev = alloc_etherdev(sizeof(*ugeth)); 3894 3895 if (dev == NULL) 3896 return -ENOMEM; 3897 3898 ugeth = netdev_priv(dev); 3899 spin_lock_init(&ugeth->lock); 3900 3901 dev_set_drvdata(device, dev); 3902 3903 /* Set the dev->base_addr to the gfar reg region */ 3904 dev->base_addr = (unsigned long)(ug_info->uf_info.regs); 3905 3906 SET_MODULE_OWNER(dev); 3907 SET_NETDEV_DEV(dev, device); 3908 3909 /* Fill in the dev structure */ 3910 dev->open = ucc_geth_open; 3911 dev->hard_start_xmit = ucc_geth_start_xmit; 3912 dev->tx_timeout = ucc_geth_timeout; 3913 dev->watchdog_timeo = TX_TIMEOUT; 3914#ifdef CONFIG_UGETH_NAPI 3915 dev->poll = ucc_geth_poll; 3916 dev->weight = UCC_GETH_DEV_WEIGHT; 3917#endif /* CONFIG_UGETH_NAPI */ 3918 dev->stop = ucc_geth_close; 3919 dev->get_stats = ucc_geth_get_stats; 3920// dev->change_mtu = ucc_geth_change_mtu; 3921 dev->mtu = 1500; 3922 dev->set_multicast_list = ucc_geth_set_multi; 3923 dev->ethtool_ops = &ucc_geth_ethtool_ops; 3924 3925 ugeth->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 3926 ugeth->phy_interface = phy_interface; 3927 ugeth->max_speed = max_speed; 3928 3929 err = register_netdev(dev); 3930 if (err) { 3931 ugeth_err("%s: Cannot register net device, aborting.", 3932 dev->name); 3933 free_netdev(dev); 3934 return err; 3935 } 3936 3937 mac_addr = of_get_mac_address(np); 3938 if (mac_addr) 3939 memcpy(dev->dev_addr, mac_addr, 6); 3940 3941 ugeth->ug_info = ug_info; 3942 ugeth->dev = dev; 3943 3944 return 0; 3945} 3946 3947static int ucc_geth_remove(struct of_device* ofdev) 3948{ 3949 struct device *device = &ofdev->dev; 3950 struct net_device *dev = dev_get_drvdata(device); 3951 struct ucc_geth_private *ugeth = netdev_priv(dev); 3952 3953 dev_set_drvdata(device, NULL); 3954 ucc_geth_memclean(ugeth); 3955 free_netdev(dev); 3956 3957 return 0; 3958} 3959 3960static struct of_device_id ucc_geth_match[] = { 3961 { 3962 .type = "network", 3963 .compatible = "ucc_geth", 3964 }, 3965 {}, 3966}; 3967 3968MODULE_DEVICE_TABLE(of, ucc_geth_match); 3969 3970static struct of_platform_driver ucc_geth_driver = { 3971 .name = DRV_NAME, 3972 .match_table = ucc_geth_match, 3973 .probe = ucc_geth_probe, 3974 .remove = ucc_geth_remove, 3975}; 3976 3977static int __init ucc_geth_init(void) 3978{ 3979 int i, ret; 3980 3981 ret = uec_mdio_init(); 3982 3983 if (ret) 3984 return ret; 3985 3986 printk(KERN_INFO "ucc_geth: " DRV_DESC "\n"); 3987 for (i = 0; i < 8; i++) 3988 memcpy(&(ugeth_info[i]), &ugeth_primary_info, 3989 sizeof(ugeth_primary_info)); 3990 3991 ret = of_register_platform_driver(&ucc_geth_driver); 3992 3993 if (ret) 3994 uec_mdio_exit(); 3995 3996 return ret; 3997} 3998 3999static void __exit ucc_geth_exit(void) 4000{ 4001 of_unregister_platform_driver(&ucc_geth_driver); 4002 uec_mdio_exit(); 4003} 4004 4005module_init(ucc_geth_init); 4006module_exit(ucc_geth_exit); 4007 4008MODULE_AUTHOR("Freescale Semiconductor, Inc"); 4009MODULE_DESCRIPTION(DRV_DESC); 4010MODULE_VERSION(DRV_VERSION); 4011MODULE_LICENSE("GPL");