Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.28 4127 lines 125 kB view raw
1/* 2 * Copyright (C) 2006-2007 Freescale Semicondutor, Inc. All rights reserved. 3 * 4 * Author: Shlomi Gridish <gridish@freescale.com> 5 * Li Yang <leoli@freescale.com> 6 * 7 * Description: 8 * QE UCC Gigabit Ethernet Driver 9 * 10 * This program is free software; you can redistribute it and/or modify it 11 * under the terms of the GNU General Public License as published by the 12 * Free Software Foundation; either version 2 of the License, or (at your 13 * option) any later version. 14 */ 15#include <linux/kernel.h> 16#include <linux/init.h> 17#include <linux/errno.h> 18#include <linux/slab.h> 19#include <linux/stddef.h> 20#include <linux/interrupt.h> 21#include <linux/netdevice.h> 22#include <linux/etherdevice.h> 23#include <linux/skbuff.h> 24#include <linux/spinlock.h> 25#include <linux/mm.h> 26#include <linux/dma-mapping.h> 27#include <linux/fsl_devices.h> 28#include <linux/mii.h> 29#include <linux/phy.h> 30#include <linux/workqueue.h> 31#include <linux/of_platform.h> 32 33#include <asm/uaccess.h> 34#include <asm/irq.h> 35#include <asm/io.h> 36#include <asm/immap_qe.h> 37#include <asm/qe.h> 38#include <asm/ucc.h> 39#include <asm/ucc_fast.h> 40 41#include "ucc_geth.h" 42#include "ucc_geth_mii.h" 43 44#undef DEBUG 45 46#define ugeth_printk(level, format, arg...) \ 47 printk(level format "\n", ## arg) 48 49#define ugeth_dbg(format, arg...) \ 50 ugeth_printk(KERN_DEBUG , format , ## arg) 51#define ugeth_err(format, arg...) \ 52 ugeth_printk(KERN_ERR , format , ## arg) 53#define ugeth_info(format, arg...) \ 54 ugeth_printk(KERN_INFO , format , ## arg) 55#define ugeth_warn(format, arg...) \ 56 ugeth_printk(KERN_WARNING , format , ## arg) 57 58#ifdef UGETH_VERBOSE_DEBUG 59#define ugeth_vdbg ugeth_dbg 60#else 61#define ugeth_vdbg(fmt, args...) do { } while (0) 62#endif /* UGETH_VERBOSE_DEBUG */ 63#define UGETH_MSG_DEFAULT (NETIF_MSG_IFUP << 1 ) - 1 64 65 66static DEFINE_SPINLOCK(ugeth_lock); 67 68static struct { 69 u32 msg_enable; 70} debug = { -1 }; 71 72module_param_named(debug, debug.msg_enable, int, 0); 73MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 0xffff=all)"); 74 75static struct ucc_geth_info ugeth_primary_info = { 76 .uf_info = { 77 .bd_mem_part = MEM_PART_SYSTEM, 78 .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES, 79 .max_rx_buf_length = 1536, 80 /* adjusted at startup if max-speed 1000 */ 81 .urfs = UCC_GETH_URFS_INIT, 82 .urfet = UCC_GETH_URFET_INIT, 83 .urfset = UCC_GETH_URFSET_INIT, 84 .utfs = UCC_GETH_UTFS_INIT, 85 .utfet = UCC_GETH_UTFET_INIT, 86 .utftt = UCC_GETH_UTFTT_INIT, 87 .ufpt = 256, 88 .mode = UCC_FAST_PROTOCOL_MODE_ETHERNET, 89 .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL, 90 .tenc = UCC_FAST_TX_ENCODING_NRZ, 91 .renc = UCC_FAST_RX_ENCODING_NRZ, 92 .tcrc = UCC_FAST_16_BIT_CRC, 93 .synl = UCC_FAST_SYNC_LEN_NOT_USED, 94 }, 95 .numQueuesTx = 1, 96 .numQueuesRx = 1, 97 .extendedFilteringChainPointer = ((uint32_t) NULL), 98 .typeorlen = 3072 /*1536 */ , 99 .nonBackToBackIfgPart1 = 0x40, 100 .nonBackToBackIfgPart2 = 0x60, 101 .miminumInterFrameGapEnforcement = 0x50, 102 .backToBackInterFrameGap = 0x60, 103 .mblinterval = 128, 104 .nortsrbytetime = 5, 105 .fracsiz = 1, 106 .strictpriorityq = 0xff, 107 .altBebTruncation = 0xa, 108 .excessDefer = 1, 109 .maxRetransmission = 0xf, 110 .collisionWindow = 0x37, 111 .receiveFlowControl = 1, 112 .transmitFlowControl = 1, 113 .maxGroupAddrInHash = 4, 114 .maxIndAddrInHash = 4, 115 .prel = 7, 116 .maxFrameLength = 1518, 117 .minFrameLength = 64, 118 .maxD1Length = 1520, 119 .maxD2Length = 1520, 120 .vlantype = 0x8100, 121 .ecamptr = ((uint32_t) NULL), 122 .eventRegMask = UCCE_OTHER, 123 .pausePeriod = 0xf000, 124 .interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1}, 125 .bdRingLenTx = { 126 TX_BD_RING_LEN, 127 TX_BD_RING_LEN, 128 TX_BD_RING_LEN, 129 TX_BD_RING_LEN, 130 TX_BD_RING_LEN, 131 TX_BD_RING_LEN, 132 TX_BD_RING_LEN, 133 TX_BD_RING_LEN}, 134 135 .bdRingLenRx = { 136 RX_BD_RING_LEN, 137 RX_BD_RING_LEN, 138 RX_BD_RING_LEN, 139 RX_BD_RING_LEN, 140 RX_BD_RING_LEN, 141 RX_BD_RING_LEN, 142 RX_BD_RING_LEN, 143 RX_BD_RING_LEN}, 144 145 .numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1, 146 .largestexternallookupkeysize = 147 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE, 148 .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE | 149 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX | 150 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX, 151 .vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP, 152 .vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP, 153 .rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT, 154 .aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE, 155 .padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC, 156 .numThreadsTx = UCC_GETH_NUM_OF_THREADS_1, 157 .numThreadsRx = UCC_GETH_NUM_OF_THREADS_1, 158 .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2, 159 .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2, 160}; 161 162static struct ucc_geth_info ugeth_info[8]; 163 164#ifdef DEBUG 165static void mem_disp(u8 *addr, int size) 166{ 167 u8 *i; 168 int size16Aling = (size >> 4) << 4; 169 int size4Aling = (size >> 2) << 2; 170 int notAlign = 0; 171 if (size % 16) 172 notAlign = 1; 173 174 for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16) 175 printk("0x%08x: %08x %08x %08x %08x\r\n", 176 (u32) i, 177 *((u32 *) (i)), 178 *((u32 *) (i + 4)), 179 *((u32 *) (i + 8)), *((u32 *) (i + 12))); 180 if (notAlign == 1) 181 printk("0x%08x: ", (u32) i); 182 for (; (u32) i < (u32) addr + size4Aling; i += 4) 183 printk("%08x ", *((u32 *) (i))); 184 for (; (u32) i < (u32) addr + size; i++) 185 printk("%02x", *((u8 *) (i))); 186 if (notAlign == 1) 187 printk("\r\n"); 188} 189#endif /* DEBUG */ 190 191#ifdef CONFIG_UGETH_FILTERING 192static void enqueue(struct list_head *node, struct list_head *lh) 193{ 194 unsigned long flags; 195 196 spin_lock_irqsave(&ugeth_lock, flags); 197 list_add_tail(node, lh); 198 spin_unlock_irqrestore(&ugeth_lock, flags); 199} 200#endif /* CONFIG_UGETH_FILTERING */ 201 202static struct list_head *dequeue(struct list_head *lh) 203{ 204 unsigned long flags; 205 206 spin_lock_irqsave(&ugeth_lock, flags); 207 if (!list_empty(lh)) { 208 struct list_head *node = lh->next; 209 list_del(node); 210 spin_unlock_irqrestore(&ugeth_lock, flags); 211 return node; 212 } else { 213 spin_unlock_irqrestore(&ugeth_lock, flags); 214 return NULL; 215 } 216} 217 218static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, 219 u8 __iomem *bd) 220{ 221 struct sk_buff *skb = NULL; 222 223 skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length + 224 UCC_GETH_RX_DATA_BUF_ALIGNMENT); 225 226 if (skb == NULL) 227 return NULL; 228 229 /* We need the data buffer to be aligned properly. We will reserve 230 * as many bytes as needed to align the data properly 231 */ 232 skb_reserve(skb, 233 UCC_GETH_RX_DATA_BUF_ALIGNMENT - 234 (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT - 235 1))); 236 237 skb->dev = ugeth->dev; 238 239 out_be32(&((struct qe_bd __iomem *)bd)->buf, 240 dma_map_single(&ugeth->dev->dev, 241 skb->data, 242 ugeth->ug_info->uf_info.max_rx_buf_length + 243 UCC_GETH_RX_DATA_BUF_ALIGNMENT, 244 DMA_FROM_DEVICE)); 245 246 out_be32((u32 __iomem *)bd, 247 (R_E | R_I | (in_be32((u32 __iomem*)bd) & R_W))); 248 249 return skb; 250} 251 252static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ) 253{ 254 u8 __iomem *bd; 255 u32 bd_status; 256 struct sk_buff *skb; 257 int i; 258 259 bd = ugeth->p_rx_bd_ring[rxQ]; 260 i = 0; 261 262 do { 263 bd_status = in_be32((u32 __iomem *)bd); 264 skb = get_new_skb(ugeth, bd); 265 266 if (!skb) /* If can not allocate data buffer, 267 abort. Cleanup will be elsewhere */ 268 return -ENOMEM; 269 270 ugeth->rx_skbuff[rxQ][i] = skb; 271 272 /* advance the BD pointer */ 273 bd += sizeof(struct qe_bd); 274 i++; 275 } while (!(bd_status & R_W)); 276 277 return 0; 278} 279 280static int fill_init_enet_entries(struct ucc_geth_private *ugeth, 281 u32 *p_start, 282 u8 num_entries, 283 u32 thread_size, 284 u32 thread_alignment, 285 enum qe_risc_allocation risc, 286 int skip_page_for_first_entry) 287{ 288 u32 init_enet_offset; 289 u8 i; 290 int snum; 291 292 for (i = 0; i < num_entries; i++) { 293 if ((snum = qe_get_snum()) < 0) { 294 if (netif_msg_ifup(ugeth)) 295 ugeth_err("fill_init_enet_entries: Can not get SNUM."); 296 return snum; 297 } 298 if ((i == 0) && skip_page_for_first_entry) 299 /* First entry of Rx does not have page */ 300 init_enet_offset = 0; 301 else { 302 init_enet_offset = 303 qe_muram_alloc(thread_size, thread_alignment); 304 if (IS_ERR_VALUE(init_enet_offset)) { 305 if (netif_msg_ifup(ugeth)) 306 ugeth_err("fill_init_enet_entries: Can not allocate DPRAM memory."); 307 qe_put_snum((u8) snum); 308 return -ENOMEM; 309 } 310 } 311 *(p_start++) = 312 ((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset 313 | risc; 314 } 315 316 return 0; 317} 318 319static int return_init_enet_entries(struct ucc_geth_private *ugeth, 320 u32 *p_start, 321 u8 num_entries, 322 enum qe_risc_allocation risc, 323 int skip_page_for_first_entry) 324{ 325 u32 init_enet_offset; 326 u8 i; 327 int snum; 328 329 for (i = 0; i < num_entries; i++) { 330 u32 val = *p_start; 331 332 /* Check that this entry was actually valid -- 333 needed in case failed in allocations */ 334 if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) { 335 snum = 336 (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >> 337 ENET_INIT_PARAM_SNUM_SHIFT; 338 qe_put_snum((u8) snum); 339 if (!((i == 0) && skip_page_for_first_entry)) { 340 /* First entry of Rx does not have page */ 341 init_enet_offset = 342 (val & ENET_INIT_PARAM_PTR_MASK); 343 qe_muram_free(init_enet_offset); 344 } 345 *p_start++ = 0; 346 } 347 } 348 349 return 0; 350} 351 352#ifdef DEBUG 353static int dump_init_enet_entries(struct ucc_geth_private *ugeth, 354 u32 __iomem *p_start, 355 u8 num_entries, 356 u32 thread_size, 357 enum qe_risc_allocation risc, 358 int skip_page_for_first_entry) 359{ 360 u32 init_enet_offset; 361 u8 i; 362 int snum; 363 364 for (i = 0; i < num_entries; i++) { 365 u32 val = in_be32(p_start); 366 367 /* Check that this entry was actually valid -- 368 needed in case failed in allocations */ 369 if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) { 370 snum = 371 (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >> 372 ENET_INIT_PARAM_SNUM_SHIFT; 373 qe_put_snum((u8) snum); 374 if (!((i == 0) && skip_page_for_first_entry)) { 375 /* First entry of Rx does not have page */ 376 init_enet_offset = 377 (in_be32(p_start) & 378 ENET_INIT_PARAM_PTR_MASK); 379 ugeth_info("Init enet entry %d:", i); 380 ugeth_info("Base address: 0x%08x", 381 (u32) 382 qe_muram_addr(init_enet_offset)); 383 mem_disp(qe_muram_addr(init_enet_offset), 384 thread_size); 385 } 386 p_start++; 387 } 388 } 389 390 return 0; 391} 392#endif 393 394#ifdef CONFIG_UGETH_FILTERING 395static struct enet_addr_container *get_enet_addr_container(void) 396{ 397 struct enet_addr_container *enet_addr_cont; 398 399 /* allocate memory */ 400 enet_addr_cont = kmalloc(sizeof(struct enet_addr_container), GFP_KERNEL); 401 if (!enet_addr_cont) { 402 ugeth_err("%s: No memory for enet_addr_container object.", 403 __func__); 404 return NULL; 405 } 406 407 return enet_addr_cont; 408} 409#endif /* CONFIG_UGETH_FILTERING */ 410 411static void put_enet_addr_container(struct enet_addr_container *enet_addr_cont) 412{ 413 kfree(enet_addr_cont); 414} 415 416static void set_mac_addr(__be16 __iomem *reg, u8 *mac) 417{ 418 out_be16(&reg[0], ((u16)mac[5] << 8) | mac[4]); 419 out_be16(&reg[1], ((u16)mac[3] << 8) | mac[2]); 420 out_be16(&reg[2], ((u16)mac[1] << 8) | mac[0]); 421} 422 423#ifdef CONFIG_UGETH_FILTERING 424static int hw_add_addr_in_paddr(struct ucc_geth_private *ugeth, 425 u8 *p_enet_addr, u8 paddr_num) 426{ 427 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; 428 429 if (!(paddr_num < NUM_OF_PADDRS)) { 430 ugeth_warn("%s: Illegal paddr_num.", __func__); 431 return -EINVAL; 432 } 433 434 p_82xx_addr_filt = 435 (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram-> 436 addressfiltering; 437 438 /* Ethernet frames are defined in Little Endian mode, */ 439 /* therefore to insert the address we reverse the bytes. */ 440 set_mac_addr(&p_82xx_addr_filt->paddr[paddr_num].h, p_enet_addr); 441 return 0; 442} 443#endif /* CONFIG_UGETH_FILTERING */ 444 445static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num) 446{ 447 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; 448 449 if (!(paddr_num < NUM_OF_PADDRS)) { 450 ugeth_warn("%s: Illagel paddr_num.", __func__); 451 return -EINVAL; 452 } 453 454 p_82xx_addr_filt = 455 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram-> 456 addressfiltering; 457 458 /* Writing address ff.ff.ff.ff.ff.ff disables address 459 recognition for this register */ 460 out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff); 461 out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff); 462 out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff); 463 464 return 0; 465} 466 467static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth, 468 u8 *p_enet_addr) 469{ 470 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; 471 u32 cecr_subblock; 472 473 p_82xx_addr_filt = 474 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram-> 475 addressfiltering; 476 477 cecr_subblock = 478 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); 479 480 /* Ethernet frames are defined in Little Endian mode, 481 therefor to insert */ 482 /* the address to the hash (Big Endian mode), we reverse the bytes.*/ 483 484 set_mac_addr(&p_82xx_addr_filt->taddr.h, p_enet_addr); 485 486 qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock, 487 QE_CR_PROTOCOL_ETHERNET, 0); 488} 489 490#ifdef CONFIG_UGETH_MAGIC_PACKET 491static void magic_packet_detection_enable(struct ucc_geth_private *ugeth) 492{ 493 struct ucc_fast_private *uccf; 494 struct ucc_geth __iomem *ug_regs; 495 u32 maccfg2, uccm; 496 497 uccf = ugeth->uccf; 498 ug_regs = ugeth->ug_regs; 499 500 /* Enable interrupts for magic packet detection */ 501 uccm = in_be32(uccf->p_uccm); 502 uccm |= UCCE_MPD; 503 out_be32(uccf->p_uccm, uccm); 504 505 /* Enable magic packet detection */ 506 maccfg2 = in_be32(&ug_regs->maccfg2); 507 maccfg2 |= MACCFG2_MPE; 508 out_be32(&ug_regs->maccfg2, maccfg2); 509} 510 511static void magic_packet_detection_disable(struct ucc_geth_private *ugeth) 512{ 513 struct ucc_fast_private *uccf; 514 struct ucc_geth __iomem *ug_regs; 515 u32 maccfg2, uccm; 516 517 uccf = ugeth->uccf; 518 ug_regs = ugeth->ug_regs; 519 520 /* Disable interrupts for magic packet detection */ 521 uccm = in_be32(uccf->p_uccm); 522 uccm &= ~UCCE_MPD; 523 out_be32(uccf->p_uccm, uccm); 524 525 /* Disable magic packet detection */ 526 maccfg2 = in_be32(&ug_regs->maccfg2); 527 maccfg2 &= ~MACCFG2_MPE; 528 out_be32(&ug_regs->maccfg2, maccfg2); 529} 530#endif /* MAGIC_PACKET */ 531 532static inline int compare_addr(u8 **addr1, u8 **addr2) 533{ 534 return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS); 535} 536 537#ifdef DEBUG 538static void get_statistics(struct ucc_geth_private *ugeth, 539 struct ucc_geth_tx_firmware_statistics * 540 tx_firmware_statistics, 541 struct ucc_geth_rx_firmware_statistics * 542 rx_firmware_statistics, 543 struct ucc_geth_hardware_statistics *hardware_statistics) 544{ 545 struct ucc_fast __iomem *uf_regs; 546 struct ucc_geth __iomem *ug_regs; 547 struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram; 548 struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram; 549 550 ug_regs = ugeth->ug_regs; 551 uf_regs = (struct ucc_fast __iomem *) ug_regs; 552 p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram; 553 p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram; 554 555 /* Tx firmware only if user handed pointer and driver actually 556 gathers Tx firmware statistics */ 557 if (tx_firmware_statistics && p_tx_fw_statistics_pram) { 558 tx_firmware_statistics->sicoltx = 559 in_be32(&p_tx_fw_statistics_pram->sicoltx); 560 tx_firmware_statistics->mulcoltx = 561 in_be32(&p_tx_fw_statistics_pram->mulcoltx); 562 tx_firmware_statistics->latecoltxfr = 563 in_be32(&p_tx_fw_statistics_pram->latecoltxfr); 564 tx_firmware_statistics->frabortduecol = 565 in_be32(&p_tx_fw_statistics_pram->frabortduecol); 566 tx_firmware_statistics->frlostinmactxer = 567 in_be32(&p_tx_fw_statistics_pram->frlostinmactxer); 568 tx_firmware_statistics->carriersenseertx = 569 in_be32(&p_tx_fw_statistics_pram->carriersenseertx); 570 tx_firmware_statistics->frtxok = 571 in_be32(&p_tx_fw_statistics_pram->frtxok); 572 tx_firmware_statistics->txfrexcessivedefer = 573 in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer); 574 tx_firmware_statistics->txpkts256 = 575 in_be32(&p_tx_fw_statistics_pram->txpkts256); 576 tx_firmware_statistics->txpkts512 = 577 in_be32(&p_tx_fw_statistics_pram->txpkts512); 578 tx_firmware_statistics->txpkts1024 = 579 in_be32(&p_tx_fw_statistics_pram->txpkts1024); 580 tx_firmware_statistics->txpktsjumbo = 581 in_be32(&p_tx_fw_statistics_pram->txpktsjumbo); 582 } 583 584 /* Rx firmware only if user handed pointer and driver actually 585 * gathers Rx firmware statistics */ 586 if (rx_firmware_statistics && p_rx_fw_statistics_pram) { 587 int i; 588 rx_firmware_statistics->frrxfcser = 589 in_be32(&p_rx_fw_statistics_pram->frrxfcser); 590 rx_firmware_statistics->fraligner = 591 in_be32(&p_rx_fw_statistics_pram->fraligner); 592 rx_firmware_statistics->inrangelenrxer = 593 in_be32(&p_rx_fw_statistics_pram->inrangelenrxer); 594 rx_firmware_statistics->outrangelenrxer = 595 in_be32(&p_rx_fw_statistics_pram->outrangelenrxer); 596 rx_firmware_statistics->frtoolong = 597 in_be32(&p_rx_fw_statistics_pram->frtoolong); 598 rx_firmware_statistics->runt = 599 in_be32(&p_rx_fw_statistics_pram->runt); 600 rx_firmware_statistics->verylongevent = 601 in_be32(&p_rx_fw_statistics_pram->verylongevent); 602 rx_firmware_statistics->symbolerror = 603 in_be32(&p_rx_fw_statistics_pram->symbolerror); 604 rx_firmware_statistics->dropbsy = 605 in_be32(&p_rx_fw_statistics_pram->dropbsy); 606 for (i = 0; i < 0x8; i++) 607 rx_firmware_statistics->res0[i] = 608 p_rx_fw_statistics_pram->res0[i]; 609 rx_firmware_statistics->mismatchdrop = 610 in_be32(&p_rx_fw_statistics_pram->mismatchdrop); 611 rx_firmware_statistics->underpkts = 612 in_be32(&p_rx_fw_statistics_pram->underpkts); 613 rx_firmware_statistics->pkts256 = 614 in_be32(&p_rx_fw_statistics_pram->pkts256); 615 rx_firmware_statistics->pkts512 = 616 in_be32(&p_rx_fw_statistics_pram->pkts512); 617 rx_firmware_statistics->pkts1024 = 618 in_be32(&p_rx_fw_statistics_pram->pkts1024); 619 rx_firmware_statistics->pktsjumbo = 620 in_be32(&p_rx_fw_statistics_pram->pktsjumbo); 621 rx_firmware_statistics->frlossinmacer = 622 in_be32(&p_rx_fw_statistics_pram->frlossinmacer); 623 rx_firmware_statistics->pausefr = 624 in_be32(&p_rx_fw_statistics_pram->pausefr); 625 for (i = 0; i < 0x4; i++) 626 rx_firmware_statistics->res1[i] = 627 p_rx_fw_statistics_pram->res1[i]; 628 rx_firmware_statistics->removevlan = 629 in_be32(&p_rx_fw_statistics_pram->removevlan); 630 rx_firmware_statistics->replacevlan = 631 in_be32(&p_rx_fw_statistics_pram->replacevlan); 632 rx_firmware_statistics->insertvlan = 633 in_be32(&p_rx_fw_statistics_pram->insertvlan); 634 } 635 636 /* Hardware only if user handed pointer and driver actually 637 gathers hardware statistics */ 638 if (hardware_statistics && (in_be32(&uf_regs->upsmr) & UPSMR_HSE)) { 639 hardware_statistics->tx64 = in_be32(&ug_regs->tx64); 640 hardware_statistics->tx127 = in_be32(&ug_regs->tx127); 641 hardware_statistics->tx255 = in_be32(&ug_regs->tx255); 642 hardware_statistics->rx64 = in_be32(&ug_regs->rx64); 643 hardware_statistics->rx127 = in_be32(&ug_regs->rx127); 644 hardware_statistics->rx255 = in_be32(&ug_regs->rx255); 645 hardware_statistics->txok = in_be32(&ug_regs->txok); 646 hardware_statistics->txcf = in_be16(&ug_regs->txcf); 647 hardware_statistics->tmca = in_be32(&ug_regs->tmca); 648 hardware_statistics->tbca = in_be32(&ug_regs->tbca); 649 hardware_statistics->rxfok = in_be32(&ug_regs->rxfok); 650 hardware_statistics->rxbok = in_be32(&ug_regs->rxbok); 651 hardware_statistics->rbyt = in_be32(&ug_regs->rbyt); 652 hardware_statistics->rmca = in_be32(&ug_regs->rmca); 653 hardware_statistics->rbca = in_be32(&ug_regs->rbca); 654 } 655} 656 657static void dump_bds(struct ucc_geth_private *ugeth) 658{ 659 int i; 660 int length; 661 662 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { 663 if (ugeth->p_tx_bd_ring[i]) { 664 length = 665 (ugeth->ug_info->bdRingLenTx[i] * 666 sizeof(struct qe_bd)); 667 ugeth_info("TX BDs[%d]", i); 668 mem_disp(ugeth->p_tx_bd_ring[i], length); 669 } 670 } 671 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { 672 if (ugeth->p_rx_bd_ring[i]) { 673 length = 674 (ugeth->ug_info->bdRingLenRx[i] * 675 sizeof(struct qe_bd)); 676 ugeth_info("RX BDs[%d]", i); 677 mem_disp(ugeth->p_rx_bd_ring[i], length); 678 } 679 } 680} 681 682static void dump_regs(struct ucc_geth_private *ugeth) 683{ 684 int i; 685 686 ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num); 687 ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs); 688 689 ugeth_info("maccfg1 : addr - 0x%08x, val - 0x%08x", 690 (u32) & ugeth->ug_regs->maccfg1, 691 in_be32(&ugeth->ug_regs->maccfg1)); 692 ugeth_info("maccfg2 : addr - 0x%08x, val - 0x%08x", 693 (u32) & ugeth->ug_regs->maccfg2, 694 in_be32(&ugeth->ug_regs->maccfg2)); 695 ugeth_info("ipgifg : addr - 0x%08x, val - 0x%08x", 696 (u32) & ugeth->ug_regs->ipgifg, 697 in_be32(&ugeth->ug_regs->ipgifg)); 698 ugeth_info("hafdup : addr - 0x%08x, val - 0x%08x", 699 (u32) & ugeth->ug_regs->hafdup, 700 in_be32(&ugeth->ug_regs->hafdup)); 701 ugeth_info("ifctl : addr - 0x%08x, val - 0x%08x", 702 (u32) & ugeth->ug_regs->ifctl, 703 in_be32(&ugeth->ug_regs->ifctl)); 704 ugeth_info("ifstat : addr - 0x%08x, val - 0x%08x", 705 (u32) & ugeth->ug_regs->ifstat, 706 in_be32(&ugeth->ug_regs->ifstat)); 707 ugeth_info("macstnaddr1: addr - 0x%08x, val - 0x%08x", 708 (u32) & ugeth->ug_regs->macstnaddr1, 709 in_be32(&ugeth->ug_regs->macstnaddr1)); 710 ugeth_info("macstnaddr2: addr - 0x%08x, val - 0x%08x", 711 (u32) & ugeth->ug_regs->macstnaddr2, 712 in_be32(&ugeth->ug_regs->macstnaddr2)); 713 ugeth_info("uempr : addr - 0x%08x, val - 0x%08x", 714 (u32) & ugeth->ug_regs->uempr, 715 in_be32(&ugeth->ug_regs->uempr)); 716 ugeth_info("utbipar : addr - 0x%08x, val - 0x%08x", 717 (u32) & ugeth->ug_regs->utbipar, 718 in_be32(&ugeth->ug_regs->utbipar)); 719 ugeth_info("uescr : addr - 0x%08x, val - 0x%04x", 720 (u32) & ugeth->ug_regs->uescr, 721 in_be16(&ugeth->ug_regs->uescr)); 722 ugeth_info("tx64 : addr - 0x%08x, val - 0x%08x", 723 (u32) & ugeth->ug_regs->tx64, 724 in_be32(&ugeth->ug_regs->tx64)); 725 ugeth_info("tx127 : addr - 0x%08x, val - 0x%08x", 726 (u32) & ugeth->ug_regs->tx127, 727 in_be32(&ugeth->ug_regs->tx127)); 728 ugeth_info("tx255 : addr - 0x%08x, val - 0x%08x", 729 (u32) & ugeth->ug_regs->tx255, 730 in_be32(&ugeth->ug_regs->tx255)); 731 ugeth_info("rx64 : addr - 0x%08x, val - 0x%08x", 732 (u32) & ugeth->ug_regs->rx64, 733 in_be32(&ugeth->ug_regs->rx64)); 734 ugeth_info("rx127 : addr - 0x%08x, val - 0x%08x", 735 (u32) & ugeth->ug_regs->rx127, 736 in_be32(&ugeth->ug_regs->rx127)); 737 ugeth_info("rx255 : addr - 0x%08x, val - 0x%08x", 738 (u32) & ugeth->ug_regs->rx255, 739 in_be32(&ugeth->ug_regs->rx255)); 740 ugeth_info("txok : addr - 0x%08x, val - 0x%08x", 741 (u32) & ugeth->ug_regs->txok, 742 in_be32(&ugeth->ug_regs->txok)); 743 ugeth_info("txcf : addr - 0x%08x, val - 0x%04x", 744 (u32) & ugeth->ug_regs->txcf, 745 in_be16(&ugeth->ug_regs->txcf)); 746 ugeth_info("tmca : addr - 0x%08x, val - 0x%08x", 747 (u32) & ugeth->ug_regs->tmca, 748 in_be32(&ugeth->ug_regs->tmca)); 749 ugeth_info("tbca : addr - 0x%08x, val - 0x%08x", 750 (u32) & ugeth->ug_regs->tbca, 751 in_be32(&ugeth->ug_regs->tbca)); 752 ugeth_info("rxfok : addr - 0x%08x, val - 0x%08x", 753 (u32) & ugeth->ug_regs->rxfok, 754 in_be32(&ugeth->ug_regs->rxfok)); 755 ugeth_info("rxbok : addr - 0x%08x, val - 0x%08x", 756 (u32) & ugeth->ug_regs->rxbok, 757 in_be32(&ugeth->ug_regs->rxbok)); 758 ugeth_info("rbyt : addr - 0x%08x, val - 0x%08x", 759 (u32) & ugeth->ug_regs->rbyt, 760 in_be32(&ugeth->ug_regs->rbyt)); 761 ugeth_info("rmca : addr - 0x%08x, val - 0x%08x", 762 (u32) & ugeth->ug_regs->rmca, 763 in_be32(&ugeth->ug_regs->rmca)); 764 ugeth_info("rbca : addr - 0x%08x, val - 0x%08x", 765 (u32) & ugeth->ug_regs->rbca, 766 in_be32(&ugeth->ug_regs->rbca)); 767 ugeth_info("scar : addr - 0x%08x, val - 0x%08x", 768 (u32) & ugeth->ug_regs->scar, 769 in_be32(&ugeth->ug_regs->scar)); 770 ugeth_info("scam : addr - 0x%08x, val - 0x%08x", 771 (u32) & ugeth->ug_regs->scam, 772 in_be32(&ugeth->ug_regs->scam)); 773 774 if (ugeth->p_thread_data_tx) { 775 int numThreadsTxNumerical; 776 switch (ugeth->ug_info->numThreadsTx) { 777 case UCC_GETH_NUM_OF_THREADS_1: 778 numThreadsTxNumerical = 1; 779 break; 780 case UCC_GETH_NUM_OF_THREADS_2: 781 numThreadsTxNumerical = 2; 782 break; 783 case UCC_GETH_NUM_OF_THREADS_4: 784 numThreadsTxNumerical = 4; 785 break; 786 case UCC_GETH_NUM_OF_THREADS_6: 787 numThreadsTxNumerical = 6; 788 break; 789 case UCC_GETH_NUM_OF_THREADS_8: 790 numThreadsTxNumerical = 8; 791 break; 792 default: 793 numThreadsTxNumerical = 0; 794 break; 795 } 796 797 ugeth_info("Thread data TXs:"); 798 ugeth_info("Base address: 0x%08x", 799 (u32) ugeth->p_thread_data_tx); 800 for (i = 0; i < numThreadsTxNumerical; i++) { 801 ugeth_info("Thread data TX[%d]:", i); 802 ugeth_info("Base address: 0x%08x", 803 (u32) & ugeth->p_thread_data_tx[i]); 804 mem_disp((u8 *) & ugeth->p_thread_data_tx[i], 805 sizeof(struct ucc_geth_thread_data_tx)); 806 } 807 } 808 if (ugeth->p_thread_data_rx) { 809 int numThreadsRxNumerical; 810 switch (ugeth->ug_info->numThreadsRx) { 811 case UCC_GETH_NUM_OF_THREADS_1: 812 numThreadsRxNumerical = 1; 813 break; 814 case UCC_GETH_NUM_OF_THREADS_2: 815 numThreadsRxNumerical = 2; 816 break; 817 case UCC_GETH_NUM_OF_THREADS_4: 818 numThreadsRxNumerical = 4; 819 break; 820 case UCC_GETH_NUM_OF_THREADS_6: 821 numThreadsRxNumerical = 6; 822 break; 823 case UCC_GETH_NUM_OF_THREADS_8: 824 numThreadsRxNumerical = 8; 825 break; 826 default: 827 numThreadsRxNumerical = 0; 828 break; 829 } 830 831 ugeth_info("Thread data RX:"); 832 ugeth_info("Base address: 0x%08x", 833 (u32) ugeth->p_thread_data_rx); 834 for (i = 0; i < numThreadsRxNumerical; i++) { 835 ugeth_info("Thread data RX[%d]:", i); 836 ugeth_info("Base address: 0x%08x", 837 (u32) & ugeth->p_thread_data_rx[i]); 838 mem_disp((u8 *) & ugeth->p_thread_data_rx[i], 839 sizeof(struct ucc_geth_thread_data_rx)); 840 } 841 } 842 if (ugeth->p_exf_glbl_param) { 843 ugeth_info("EXF global param:"); 844 ugeth_info("Base address: 0x%08x", 845 (u32) ugeth->p_exf_glbl_param); 846 mem_disp((u8 *) ugeth->p_exf_glbl_param, 847 sizeof(*ugeth->p_exf_glbl_param)); 848 } 849 if (ugeth->p_tx_glbl_pram) { 850 ugeth_info("TX global param:"); 851 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_tx_glbl_pram); 852 ugeth_info("temoder : addr - 0x%08x, val - 0x%04x", 853 (u32) & ugeth->p_tx_glbl_pram->temoder, 854 in_be16(&ugeth->p_tx_glbl_pram->temoder)); 855 ugeth_info("sqptr : addr - 0x%08x, val - 0x%08x", 856 (u32) & ugeth->p_tx_glbl_pram->sqptr, 857 in_be32(&ugeth->p_tx_glbl_pram->sqptr)); 858 ugeth_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x", 859 (u32) & ugeth->p_tx_glbl_pram->schedulerbasepointer, 860 in_be32(&ugeth->p_tx_glbl_pram-> 861 schedulerbasepointer)); 862 ugeth_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x", 863 (u32) & ugeth->p_tx_glbl_pram->txrmonbaseptr, 864 in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr)); 865 ugeth_info("tstate : addr - 0x%08x, val - 0x%08x", 866 (u32) & ugeth->p_tx_glbl_pram->tstate, 867 in_be32(&ugeth->p_tx_glbl_pram->tstate)); 868 ugeth_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x", 869 (u32) & ugeth->p_tx_glbl_pram->iphoffset[0], 870 ugeth->p_tx_glbl_pram->iphoffset[0]); 871 ugeth_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x", 872 (u32) & ugeth->p_tx_glbl_pram->iphoffset[1], 873 ugeth->p_tx_glbl_pram->iphoffset[1]); 874 ugeth_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x", 875 (u32) & ugeth->p_tx_glbl_pram->iphoffset[2], 876 ugeth->p_tx_glbl_pram->iphoffset[2]); 877 ugeth_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x", 878 (u32) & ugeth->p_tx_glbl_pram->iphoffset[3], 879 ugeth->p_tx_glbl_pram->iphoffset[3]); 880 ugeth_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x", 881 (u32) & ugeth->p_tx_glbl_pram->iphoffset[4], 882 ugeth->p_tx_glbl_pram->iphoffset[4]); 883 ugeth_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x", 884 (u32) & ugeth->p_tx_glbl_pram->iphoffset[5], 885 ugeth->p_tx_glbl_pram->iphoffset[5]); 886 ugeth_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x", 887 (u32) & ugeth->p_tx_glbl_pram->iphoffset[6], 888 ugeth->p_tx_glbl_pram->iphoffset[6]); 889 ugeth_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x", 890 (u32) & ugeth->p_tx_glbl_pram->iphoffset[7], 891 ugeth->p_tx_glbl_pram->iphoffset[7]); 892 ugeth_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x", 893 (u32) & ugeth->p_tx_glbl_pram->vtagtable[0], 894 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0])); 895 ugeth_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x", 896 (u32) & ugeth->p_tx_glbl_pram->vtagtable[1], 897 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1])); 898 ugeth_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x", 899 (u32) & ugeth->p_tx_glbl_pram->vtagtable[2], 900 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2])); 901 ugeth_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x", 902 (u32) & ugeth->p_tx_glbl_pram->vtagtable[3], 903 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3])); 904 ugeth_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x", 905 (u32) & ugeth->p_tx_glbl_pram->vtagtable[4], 906 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4])); 907 ugeth_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x", 908 (u32) & ugeth->p_tx_glbl_pram->vtagtable[5], 909 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5])); 910 ugeth_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x", 911 (u32) & ugeth->p_tx_glbl_pram->vtagtable[6], 912 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6])); 913 ugeth_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x", 914 (u32) & ugeth->p_tx_glbl_pram->vtagtable[7], 915 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7])); 916 ugeth_info("tqptr : addr - 0x%08x, val - 0x%08x", 917 (u32) & ugeth->p_tx_glbl_pram->tqptr, 918 in_be32(&ugeth->p_tx_glbl_pram->tqptr)); 919 } 920 if (ugeth->p_rx_glbl_pram) { 921 ugeth_info("RX global param:"); 922 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_glbl_pram); 923 ugeth_info("remoder : addr - 0x%08x, val - 0x%08x", 924 (u32) & ugeth->p_rx_glbl_pram->remoder, 925 in_be32(&ugeth->p_rx_glbl_pram->remoder)); 926 ugeth_info("rqptr : addr - 0x%08x, val - 0x%08x", 927 (u32) & ugeth->p_rx_glbl_pram->rqptr, 928 in_be32(&ugeth->p_rx_glbl_pram->rqptr)); 929 ugeth_info("typeorlen : addr - 0x%08x, val - 0x%04x", 930 (u32) & ugeth->p_rx_glbl_pram->typeorlen, 931 in_be16(&ugeth->p_rx_glbl_pram->typeorlen)); 932 ugeth_info("rxgstpack : addr - 0x%08x, val - 0x%02x", 933 (u32) & ugeth->p_rx_glbl_pram->rxgstpack, 934 ugeth->p_rx_glbl_pram->rxgstpack); 935 ugeth_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x", 936 (u32) & ugeth->p_rx_glbl_pram->rxrmonbaseptr, 937 in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr)); 938 ugeth_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x", 939 (u32) & ugeth->p_rx_glbl_pram->intcoalescingptr, 940 in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr)); 941 ugeth_info("rstate : addr - 0x%08x, val - 0x%02x", 942 (u32) & ugeth->p_rx_glbl_pram->rstate, 943 ugeth->p_rx_glbl_pram->rstate); 944 ugeth_info("mrblr : addr - 0x%08x, val - 0x%04x", 945 (u32) & ugeth->p_rx_glbl_pram->mrblr, 946 in_be16(&ugeth->p_rx_glbl_pram->mrblr)); 947 ugeth_info("rbdqptr : addr - 0x%08x, val - 0x%08x", 948 (u32) & ugeth->p_rx_glbl_pram->rbdqptr, 949 in_be32(&ugeth->p_rx_glbl_pram->rbdqptr)); 950 ugeth_info("mflr : addr - 0x%08x, val - 0x%04x", 951 (u32) & ugeth->p_rx_glbl_pram->mflr, 952 in_be16(&ugeth->p_rx_glbl_pram->mflr)); 953 ugeth_info("minflr : addr - 0x%08x, val - 0x%04x", 954 (u32) & ugeth->p_rx_glbl_pram->minflr, 955 in_be16(&ugeth->p_rx_glbl_pram->minflr)); 956 ugeth_info("maxd1 : addr - 0x%08x, val - 0x%04x", 957 (u32) & ugeth->p_rx_glbl_pram->maxd1, 958 in_be16(&ugeth->p_rx_glbl_pram->maxd1)); 959 ugeth_info("maxd2 : addr - 0x%08x, val - 0x%04x", 960 (u32) & ugeth->p_rx_glbl_pram->maxd2, 961 in_be16(&ugeth->p_rx_glbl_pram->maxd2)); 962 ugeth_info("ecamptr : addr - 0x%08x, val - 0x%08x", 963 (u32) & ugeth->p_rx_glbl_pram->ecamptr, 964 in_be32(&ugeth->p_rx_glbl_pram->ecamptr)); 965 ugeth_info("l2qt : addr - 0x%08x, val - 0x%08x", 966 (u32) & ugeth->p_rx_glbl_pram->l2qt, 967 in_be32(&ugeth->p_rx_glbl_pram->l2qt)); 968 ugeth_info("l3qt[0] : addr - 0x%08x, val - 0x%08x", 969 (u32) & ugeth->p_rx_glbl_pram->l3qt[0], 970 in_be32(&ugeth->p_rx_glbl_pram->l3qt[0])); 971 ugeth_info("l3qt[1] : addr - 0x%08x, val - 0x%08x", 972 (u32) & ugeth->p_rx_glbl_pram->l3qt[1], 973 in_be32(&ugeth->p_rx_glbl_pram->l3qt[1])); 974 ugeth_info("l3qt[2] : addr - 0x%08x, val - 0x%08x", 975 (u32) & ugeth->p_rx_glbl_pram->l3qt[2], 976 in_be32(&ugeth->p_rx_glbl_pram->l3qt[2])); 977 ugeth_info("l3qt[3] : addr - 0x%08x, val - 0x%08x", 978 (u32) & ugeth->p_rx_glbl_pram->l3qt[3], 979 in_be32(&ugeth->p_rx_glbl_pram->l3qt[3])); 980 ugeth_info("l3qt[4] : addr - 0x%08x, val - 0x%08x", 981 (u32) & ugeth->p_rx_glbl_pram->l3qt[4], 982 in_be32(&ugeth->p_rx_glbl_pram->l3qt[4])); 983 ugeth_info("l3qt[5] : addr - 0x%08x, val - 0x%08x", 984 (u32) & ugeth->p_rx_glbl_pram->l3qt[5], 985 in_be32(&ugeth->p_rx_glbl_pram->l3qt[5])); 986 ugeth_info("l3qt[6] : addr - 0x%08x, val - 0x%08x", 987 (u32) & ugeth->p_rx_glbl_pram->l3qt[6], 988 in_be32(&ugeth->p_rx_glbl_pram->l3qt[6])); 989 ugeth_info("l3qt[7] : addr - 0x%08x, val - 0x%08x", 990 (u32) & ugeth->p_rx_glbl_pram->l3qt[7], 991 in_be32(&ugeth->p_rx_glbl_pram->l3qt[7])); 992 ugeth_info("vlantype : addr - 0x%08x, val - 0x%04x", 993 (u32) & ugeth->p_rx_glbl_pram->vlantype, 994 in_be16(&ugeth->p_rx_glbl_pram->vlantype)); 995 ugeth_info("vlantci : addr - 0x%08x, val - 0x%04x", 996 (u32) & ugeth->p_rx_glbl_pram->vlantci, 997 in_be16(&ugeth->p_rx_glbl_pram->vlantci)); 998 for (i = 0; i < 64; i++) 999 ugeth_info 1000 ("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x", 1001 i, 1002 (u32) & ugeth->p_rx_glbl_pram->addressfiltering[i], 1003 ugeth->p_rx_glbl_pram->addressfiltering[i]); 1004 ugeth_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x", 1005 (u32) & ugeth->p_rx_glbl_pram->exfGlobalParam, 1006 in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam)); 1007 } 1008 if (ugeth->p_send_q_mem_reg) { 1009 ugeth_info("Send Q memory registers:"); 1010 ugeth_info("Base address: 0x%08x", 1011 (u32) ugeth->p_send_q_mem_reg); 1012 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { 1013 ugeth_info("SQQD[%d]:", i); 1014 ugeth_info("Base address: 0x%08x", 1015 (u32) & ugeth->p_send_q_mem_reg->sqqd[i]); 1016 mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i], 1017 sizeof(struct ucc_geth_send_queue_qd)); 1018 } 1019 } 1020 if (ugeth->p_scheduler) { 1021 ugeth_info("Scheduler:"); 1022 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_scheduler); 1023 mem_disp((u8 *) ugeth->p_scheduler, 1024 sizeof(*ugeth->p_scheduler)); 1025 } 1026 if (ugeth->p_tx_fw_statistics_pram) { 1027 ugeth_info("TX FW statistics pram:"); 1028 ugeth_info("Base address: 0x%08x", 1029 (u32) ugeth->p_tx_fw_statistics_pram); 1030 mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram, 1031 sizeof(*ugeth->p_tx_fw_statistics_pram)); 1032 } 1033 if (ugeth->p_rx_fw_statistics_pram) { 1034 ugeth_info("RX FW statistics pram:"); 1035 ugeth_info("Base address: 0x%08x", 1036 (u32) ugeth->p_rx_fw_statistics_pram); 1037 mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram, 1038 sizeof(*ugeth->p_rx_fw_statistics_pram)); 1039 } 1040 if (ugeth->p_rx_irq_coalescing_tbl) { 1041 ugeth_info("RX IRQ coalescing tables:"); 1042 ugeth_info("Base address: 0x%08x", 1043 (u32) ugeth->p_rx_irq_coalescing_tbl); 1044 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { 1045 ugeth_info("RX IRQ coalescing table entry[%d]:", i); 1046 ugeth_info("Base address: 0x%08x", 1047 (u32) & ugeth->p_rx_irq_coalescing_tbl-> 1048 coalescingentry[i]); 1049 ugeth_info 1050 ("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x", 1051 (u32) & ugeth->p_rx_irq_coalescing_tbl-> 1052 coalescingentry[i].interruptcoalescingmaxvalue, 1053 in_be32(&ugeth->p_rx_irq_coalescing_tbl-> 1054 coalescingentry[i]. 1055 interruptcoalescingmaxvalue)); 1056 ugeth_info 1057 ("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x", 1058 (u32) & ugeth->p_rx_irq_coalescing_tbl-> 1059 coalescingentry[i].interruptcoalescingcounter, 1060 in_be32(&ugeth->p_rx_irq_coalescing_tbl-> 1061 coalescingentry[i]. 1062 interruptcoalescingcounter)); 1063 } 1064 } 1065 if (ugeth->p_rx_bd_qs_tbl) { 1066 ugeth_info("RX BD QS tables:"); 1067 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_bd_qs_tbl); 1068 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { 1069 ugeth_info("RX BD QS table[%d]:", i); 1070 ugeth_info("Base address: 0x%08x", 1071 (u32) & ugeth->p_rx_bd_qs_tbl[i]); 1072 ugeth_info 1073 ("bdbaseptr : addr - 0x%08x, val - 0x%08x", 1074 (u32) & ugeth->p_rx_bd_qs_tbl[i].bdbaseptr, 1075 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr)); 1076 ugeth_info 1077 ("bdptr : addr - 0x%08x, val - 0x%08x", 1078 (u32) & ugeth->p_rx_bd_qs_tbl[i].bdptr, 1079 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr)); 1080 ugeth_info 1081 ("externalbdbaseptr: addr - 0x%08x, val - 0x%08x", 1082 (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, 1083 in_be32(&ugeth->p_rx_bd_qs_tbl[i]. 1084 externalbdbaseptr)); 1085 ugeth_info 1086 ("externalbdptr : addr - 0x%08x, val - 0x%08x", 1087 (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdptr, 1088 in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr)); 1089 ugeth_info("ucode RX Prefetched BDs:"); 1090 ugeth_info("Base address: 0x%08x", 1091 (u32) 1092 qe_muram_addr(in_be32 1093 (&ugeth->p_rx_bd_qs_tbl[i]. 1094 bdbaseptr))); 1095 mem_disp((u8 *) 1096 qe_muram_addr(in_be32 1097 (&ugeth->p_rx_bd_qs_tbl[i]. 1098 bdbaseptr)), 1099 sizeof(struct ucc_geth_rx_prefetched_bds)); 1100 } 1101 } 1102 if (ugeth->p_init_enet_param_shadow) { 1103 int size; 1104 ugeth_info("Init enet param shadow:"); 1105 ugeth_info("Base address: 0x%08x", 1106 (u32) ugeth->p_init_enet_param_shadow); 1107 mem_disp((u8 *) ugeth->p_init_enet_param_shadow, 1108 sizeof(*ugeth->p_init_enet_param_shadow)); 1109 1110 size = sizeof(struct ucc_geth_thread_rx_pram); 1111 if (ugeth->ug_info->rxExtendedFiltering) { 1112 size += 1113 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; 1114 if (ugeth->ug_info->largestexternallookupkeysize == 1115 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES) 1116 size += 1117 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; 1118 if (ugeth->ug_info->largestexternallookupkeysize == 1119 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES) 1120 size += 1121 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; 1122 } 1123 1124 dump_init_enet_entries(ugeth, 1125 &(ugeth->p_init_enet_param_shadow-> 1126 txthread[0]), 1127 ENET_INIT_PARAM_MAX_ENTRIES_TX, 1128 sizeof(struct ucc_geth_thread_tx_pram), 1129 ugeth->ug_info->riscTx, 0); 1130 dump_init_enet_entries(ugeth, 1131 &(ugeth->p_init_enet_param_shadow-> 1132 rxthread[0]), 1133 ENET_INIT_PARAM_MAX_ENTRIES_RX, size, 1134 ugeth->ug_info->riscRx, 1); 1135 } 1136} 1137#endif /* DEBUG */ 1138 1139static void init_default_reg_vals(u32 __iomem *upsmr_register, 1140 u32 __iomem *maccfg1_register, 1141 u32 __iomem *maccfg2_register) 1142{ 1143 out_be32(upsmr_register, UCC_GETH_UPSMR_INIT); 1144 out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT); 1145 out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT); 1146} 1147 1148static int init_half_duplex_params(int alt_beb, 1149 int back_pressure_no_backoff, 1150 int no_backoff, 1151 int excess_defer, 1152 u8 alt_beb_truncation, 1153 u8 max_retransmissions, 1154 u8 collision_window, 1155 u32 __iomem *hafdup_register) 1156{ 1157 u32 value = 0; 1158 1159 if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) || 1160 (max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) || 1161 (collision_window > HALFDUP_COLLISION_WINDOW_MAX)) 1162 return -EINVAL; 1163 1164 value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT); 1165 1166 if (alt_beb) 1167 value |= HALFDUP_ALT_BEB; 1168 if (back_pressure_no_backoff) 1169 value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF; 1170 if (no_backoff) 1171 value |= HALFDUP_NO_BACKOFF; 1172 if (excess_defer) 1173 value |= HALFDUP_EXCESSIVE_DEFER; 1174 1175 value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT); 1176 1177 value |= collision_window; 1178 1179 out_be32(hafdup_register, value); 1180 return 0; 1181} 1182 1183static int init_inter_frame_gap_params(u8 non_btb_cs_ipg, 1184 u8 non_btb_ipg, 1185 u8 min_ifg, 1186 u8 btb_ipg, 1187 u32 __iomem *ipgifg_register) 1188{ 1189 u32 value = 0; 1190 1191 /* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back 1192 IPG part 2 */ 1193 if (non_btb_cs_ipg > non_btb_ipg) 1194 return -EINVAL; 1195 1196 if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) || 1197 (non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) || 1198 /*(min_ifg > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */ 1199 (btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX)) 1200 return -EINVAL; 1201 1202 value |= 1203 ((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) & 1204 IPGIFG_NBTB_CS_IPG_MASK); 1205 value |= 1206 ((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) & 1207 IPGIFG_NBTB_IPG_MASK); 1208 value |= 1209 ((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) & 1210 IPGIFG_MIN_IFG_MASK); 1211 value |= (btb_ipg & IPGIFG_BTB_IPG_MASK); 1212 1213 out_be32(ipgifg_register, value); 1214 return 0; 1215} 1216 1217int init_flow_control_params(u32 automatic_flow_control_mode, 1218 int rx_flow_control_enable, 1219 int tx_flow_control_enable, 1220 u16 pause_period, 1221 u16 extension_field, 1222 u32 __iomem *upsmr_register, 1223 u32 __iomem *uempr_register, 1224 u32 __iomem *maccfg1_register) 1225{ 1226 u32 value = 0; 1227 1228 /* Set UEMPR register */ 1229 value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT; 1230 value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT; 1231 out_be32(uempr_register, value); 1232 1233 /* Set UPSMR register */ 1234 value = in_be32(upsmr_register); 1235 value |= automatic_flow_control_mode; 1236 out_be32(upsmr_register, value); 1237 1238 value = in_be32(maccfg1_register); 1239 if (rx_flow_control_enable) 1240 value |= MACCFG1_FLOW_RX; 1241 if (tx_flow_control_enable) 1242 value |= MACCFG1_FLOW_TX; 1243 out_be32(maccfg1_register, value); 1244 1245 return 0; 1246} 1247 1248static int init_hw_statistics_gathering_mode(int enable_hardware_statistics, 1249 int auto_zero_hardware_statistics, 1250 u32 __iomem *upsmr_register, 1251 u16 __iomem *uescr_register) 1252{ 1253 u32 upsmr_value = 0; 1254 u16 uescr_value = 0; 1255 /* Enable hardware statistics gathering if requested */ 1256 if (enable_hardware_statistics) { 1257 upsmr_value = in_be32(upsmr_register); 1258 upsmr_value |= UPSMR_HSE; 1259 out_be32(upsmr_register, upsmr_value); 1260 } 1261 1262 /* Clear hardware statistics counters */ 1263 uescr_value = in_be16(uescr_register); 1264 uescr_value |= UESCR_CLRCNT; 1265 /* Automatically zero hardware statistics counters on read, 1266 if requested */ 1267 if (auto_zero_hardware_statistics) 1268 uescr_value |= UESCR_AUTOZ; 1269 out_be16(uescr_register, uescr_value); 1270 1271 return 0; 1272} 1273 1274static int init_firmware_statistics_gathering_mode(int 1275 enable_tx_firmware_statistics, 1276 int enable_rx_firmware_statistics, 1277 u32 __iomem *tx_rmon_base_ptr, 1278 u32 tx_firmware_statistics_structure_address, 1279 u32 __iomem *rx_rmon_base_ptr, 1280 u32 rx_firmware_statistics_structure_address, 1281 u16 __iomem *temoder_register, 1282 u32 __iomem *remoder_register) 1283{ 1284 /* Note: this function does not check if */ 1285 /* the parameters it receives are NULL */ 1286 u16 temoder_value; 1287 u32 remoder_value; 1288 1289 if (enable_tx_firmware_statistics) { 1290 out_be32(tx_rmon_base_ptr, 1291 tx_firmware_statistics_structure_address); 1292 temoder_value = in_be16(temoder_register); 1293 temoder_value |= TEMODER_TX_RMON_STATISTICS_ENABLE; 1294 out_be16(temoder_register, temoder_value); 1295 } 1296 1297 if (enable_rx_firmware_statistics) { 1298 out_be32(rx_rmon_base_ptr, 1299 rx_firmware_statistics_structure_address); 1300 remoder_value = in_be32(remoder_register); 1301 remoder_value |= REMODER_RX_RMON_STATISTICS_ENABLE; 1302 out_be32(remoder_register, remoder_value); 1303 } 1304 1305 return 0; 1306} 1307 1308static int init_mac_station_addr_regs(u8 address_byte_0, 1309 u8 address_byte_1, 1310 u8 address_byte_2, 1311 u8 address_byte_3, 1312 u8 address_byte_4, 1313 u8 address_byte_5, 1314 u32 __iomem *macstnaddr1_register, 1315 u32 __iomem *macstnaddr2_register) 1316{ 1317 u32 value = 0; 1318 1319 /* Example: for a station address of 0x12345678ABCD, */ 1320 /* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */ 1321 1322 /* MACSTNADDR1 Register: */ 1323 1324 /* 0 7 8 15 */ 1325 /* station address byte 5 station address byte 4 */ 1326 /* 16 23 24 31 */ 1327 /* station address byte 3 station address byte 2 */ 1328 value |= (u32) ((address_byte_2 << 0) & 0x000000FF); 1329 value |= (u32) ((address_byte_3 << 8) & 0x0000FF00); 1330 value |= (u32) ((address_byte_4 << 16) & 0x00FF0000); 1331 value |= (u32) ((address_byte_5 << 24) & 0xFF000000); 1332 1333 out_be32(macstnaddr1_register, value); 1334 1335 /* MACSTNADDR2 Register: */ 1336 1337 /* 0 7 8 15 */ 1338 /* station address byte 1 station address byte 0 */ 1339 /* 16 23 24 31 */ 1340 /* reserved reserved */ 1341 value = 0; 1342 value |= (u32) ((address_byte_0 << 16) & 0x00FF0000); 1343 value |= (u32) ((address_byte_1 << 24) & 0xFF000000); 1344 1345 out_be32(macstnaddr2_register, value); 1346 1347 return 0; 1348} 1349 1350static int init_check_frame_length_mode(int length_check, 1351 u32 __iomem *maccfg2_register) 1352{ 1353 u32 value = 0; 1354 1355 value = in_be32(maccfg2_register); 1356 1357 if (length_check) 1358 value |= MACCFG2_LC; 1359 else 1360 value &= ~MACCFG2_LC; 1361 1362 out_be32(maccfg2_register, value); 1363 return 0; 1364} 1365 1366static int init_preamble_length(u8 preamble_length, 1367 u32 __iomem *maccfg2_register) 1368{ 1369 u32 value = 0; 1370 1371 if ((preamble_length < 3) || (preamble_length > 7)) 1372 return -EINVAL; 1373 1374 value = in_be32(maccfg2_register); 1375 value &= ~MACCFG2_PREL_MASK; 1376 value |= (preamble_length << MACCFG2_PREL_SHIFT); 1377 out_be32(maccfg2_register, value); 1378 return 0; 1379} 1380 1381static int init_rx_parameters(int reject_broadcast, 1382 int receive_short_frames, 1383 int promiscuous, u32 __iomem *upsmr_register) 1384{ 1385 u32 value = 0; 1386 1387 value = in_be32(upsmr_register); 1388 1389 if (reject_broadcast) 1390 value |= UPSMR_BRO; 1391 else 1392 value &= ~UPSMR_BRO; 1393 1394 if (receive_short_frames) 1395 value |= UPSMR_RSH; 1396 else 1397 value &= ~UPSMR_RSH; 1398 1399 if (promiscuous) 1400 value |= UPSMR_PRO; 1401 else 1402 value &= ~UPSMR_PRO; 1403 1404 out_be32(upsmr_register, value); 1405 1406 return 0; 1407} 1408 1409static int init_max_rx_buff_len(u16 max_rx_buf_len, 1410 u16 __iomem *mrblr_register) 1411{ 1412 /* max_rx_buf_len value must be a multiple of 128 */ 1413 if ((max_rx_buf_len == 0) 1414 || (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT)) 1415 return -EINVAL; 1416 1417 out_be16(mrblr_register, max_rx_buf_len); 1418 return 0; 1419} 1420 1421static int init_min_frame_len(u16 min_frame_length, 1422 u16 __iomem *minflr_register, 1423 u16 __iomem *mrblr_register) 1424{ 1425 u16 mrblr_value = 0; 1426 1427 mrblr_value = in_be16(mrblr_register); 1428 if (min_frame_length >= (mrblr_value - 4)) 1429 return -EINVAL; 1430 1431 out_be16(minflr_register, min_frame_length); 1432 return 0; 1433} 1434 1435static int adjust_enet_interface(struct ucc_geth_private *ugeth) 1436{ 1437 struct ucc_geth_info *ug_info; 1438 struct ucc_geth __iomem *ug_regs; 1439 struct ucc_fast __iomem *uf_regs; 1440 int ret_val; 1441 u32 upsmr, maccfg2, tbiBaseAddress; 1442 u16 value; 1443 1444 ugeth_vdbg("%s: IN", __func__); 1445 1446 ug_info = ugeth->ug_info; 1447 ug_regs = ugeth->ug_regs; 1448 uf_regs = ugeth->uccf->uf_regs; 1449 1450 /* Set MACCFG2 */ 1451 maccfg2 = in_be32(&ug_regs->maccfg2); 1452 maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK; 1453 if ((ugeth->max_speed == SPEED_10) || 1454 (ugeth->max_speed == SPEED_100)) 1455 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; 1456 else if (ugeth->max_speed == SPEED_1000) 1457 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; 1458 maccfg2 |= ug_info->padAndCrc; 1459 out_be32(&ug_regs->maccfg2, maccfg2); 1460 1461 /* Set UPSMR */ 1462 upsmr = in_be32(&uf_regs->upsmr); 1463 upsmr &= ~(UPSMR_RPM | UPSMR_R10M | UPSMR_TBIM | UPSMR_RMM); 1464 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) || 1465 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) || 1466 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) || 1467 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) || 1468 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) || 1469 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { 1470 upsmr |= UPSMR_RPM; 1471 switch (ugeth->max_speed) { 1472 case SPEED_10: 1473 upsmr |= UPSMR_R10M; 1474 /* FALLTHROUGH */ 1475 case SPEED_100: 1476 if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI) 1477 upsmr |= UPSMR_RMM; 1478 } 1479 } 1480 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) || 1481 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { 1482 upsmr |= UPSMR_TBIM; 1483 } 1484 out_be32(&uf_regs->upsmr, upsmr); 1485 1486 /* Disable autonegotiation in tbi mode, because by default it 1487 comes up in autonegotiation mode. */ 1488 /* Note that this depends on proper setting in utbipar register. */ 1489 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) || 1490 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { 1491 tbiBaseAddress = in_be32(&ug_regs->utbipar); 1492 tbiBaseAddress &= UTBIPAR_PHY_ADDRESS_MASK; 1493 tbiBaseAddress >>= UTBIPAR_PHY_ADDRESS_SHIFT; 1494 value = ugeth->phydev->bus->read(ugeth->phydev->bus, 1495 (u8) tbiBaseAddress, ENET_TBI_MII_CR); 1496 value &= ~0x1000; /* Turn off autonegotiation */ 1497 ugeth->phydev->bus->write(ugeth->phydev->bus, 1498 (u8) tbiBaseAddress, ENET_TBI_MII_CR, value); 1499 } 1500 1501 init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2); 1502 1503 ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2); 1504 if (ret_val != 0) { 1505 if (netif_msg_probe(ugeth)) 1506 ugeth_err("%s: Preamble length must be between 3 and 7 inclusive.", 1507 __func__); 1508 return ret_val; 1509 } 1510 1511 return 0; 1512} 1513 1514/* Called every time the controller might need to be made 1515 * aware of new link state. The PHY code conveys this 1516 * information through variables in the ugeth structure, and this 1517 * function converts those variables into the appropriate 1518 * register values, and can bring down the device if needed. 1519 */ 1520 1521static void adjust_link(struct net_device *dev) 1522{ 1523 struct ucc_geth_private *ugeth = netdev_priv(dev); 1524 struct ucc_geth __iomem *ug_regs; 1525 struct ucc_fast __iomem *uf_regs; 1526 struct phy_device *phydev = ugeth->phydev; 1527 unsigned long flags; 1528 int new_state = 0; 1529 1530 ug_regs = ugeth->ug_regs; 1531 uf_regs = ugeth->uccf->uf_regs; 1532 1533 spin_lock_irqsave(&ugeth->lock, flags); 1534 1535 if (phydev->link) { 1536 u32 tempval = in_be32(&ug_regs->maccfg2); 1537 u32 upsmr = in_be32(&uf_regs->upsmr); 1538 /* Now we make sure that we can be in full duplex mode. 1539 * If not, we operate in half-duplex mode. */ 1540 if (phydev->duplex != ugeth->oldduplex) { 1541 new_state = 1; 1542 if (!(phydev->duplex)) 1543 tempval &= ~(MACCFG2_FDX); 1544 else 1545 tempval |= MACCFG2_FDX; 1546 ugeth->oldduplex = phydev->duplex; 1547 } 1548 1549 if (phydev->speed != ugeth->oldspeed) { 1550 new_state = 1; 1551 switch (phydev->speed) { 1552 case SPEED_1000: 1553 tempval = ((tempval & 1554 ~(MACCFG2_INTERFACE_MODE_MASK)) | 1555 MACCFG2_INTERFACE_MODE_BYTE); 1556 break; 1557 case SPEED_100: 1558 case SPEED_10: 1559 tempval = ((tempval & 1560 ~(MACCFG2_INTERFACE_MODE_MASK)) | 1561 MACCFG2_INTERFACE_MODE_NIBBLE); 1562 /* if reduced mode, re-set UPSMR.R10M */ 1563 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) || 1564 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) || 1565 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) || 1566 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) || 1567 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) || 1568 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { 1569 if (phydev->speed == SPEED_10) 1570 upsmr |= UPSMR_R10M; 1571 else 1572 upsmr &= ~(UPSMR_R10M); 1573 } 1574 break; 1575 default: 1576 if (netif_msg_link(ugeth)) 1577 ugeth_warn( 1578 "%s: Ack! Speed (%d) is not 10/100/1000!", 1579 dev->name, phydev->speed); 1580 break; 1581 } 1582 ugeth->oldspeed = phydev->speed; 1583 } 1584 1585 out_be32(&ug_regs->maccfg2, tempval); 1586 out_be32(&uf_regs->upsmr, upsmr); 1587 1588 if (!ugeth->oldlink) { 1589 new_state = 1; 1590 ugeth->oldlink = 1; 1591 } 1592 } else if (ugeth->oldlink) { 1593 new_state = 1; 1594 ugeth->oldlink = 0; 1595 ugeth->oldspeed = 0; 1596 ugeth->oldduplex = -1; 1597 } 1598 1599 if (new_state && netif_msg_link(ugeth)) 1600 phy_print_status(phydev); 1601 1602 spin_unlock_irqrestore(&ugeth->lock, flags); 1603} 1604 1605/* Configure the PHY for dev. 1606 * returns 0 if success. -1 if failure 1607 */ 1608static int init_phy(struct net_device *dev) 1609{ 1610 struct ucc_geth_private *priv = netdev_priv(dev); 1611 struct phy_device *phydev; 1612 char phy_id[BUS_ID_SIZE]; 1613 1614 priv->oldlink = 0; 1615 priv->oldspeed = 0; 1616 priv->oldduplex = -1; 1617 1618 snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, priv->ug_info->mdio_bus, 1619 priv->ug_info->phy_address); 1620 1621 phydev = phy_connect(dev, phy_id, &adjust_link, 0, priv->phy_interface); 1622 1623 if (IS_ERR(phydev)) { 1624 printk("%s: Could not attach to PHY\n", dev->name); 1625 return PTR_ERR(phydev); 1626 } 1627 1628 phydev->supported &= (ADVERTISED_10baseT_Half | 1629 ADVERTISED_10baseT_Full | 1630 ADVERTISED_100baseT_Half | 1631 ADVERTISED_100baseT_Full); 1632 1633 if (priv->max_speed == SPEED_1000) 1634 phydev->supported |= ADVERTISED_1000baseT_Full; 1635 1636 phydev->advertising = phydev->supported; 1637 1638 priv->phydev = phydev; 1639 1640 return 0; 1641} 1642 1643 1644 1645static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth) 1646{ 1647 struct ucc_fast_private *uccf; 1648 u32 cecr_subblock; 1649 u32 temp; 1650 1651 uccf = ugeth->uccf; 1652 1653 /* Mask GRACEFUL STOP TX interrupt bit and clear it */ 1654 temp = in_be32(uccf->p_uccm); 1655 temp &= ~UCCE_GRA; 1656 out_be32(uccf->p_uccm, temp); 1657 out_be32(uccf->p_ucce, UCCE_GRA); /* clear by writing 1 */ 1658 1659 /* Issue host command */ 1660 cecr_subblock = 1661 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); 1662 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock, 1663 QE_CR_PROTOCOL_ETHERNET, 0); 1664 1665 /* Wait for command to complete */ 1666 do { 1667 temp = in_be32(uccf->p_ucce); 1668 } while (!(temp & UCCE_GRA)); 1669 1670 uccf->stopped_tx = 1; 1671 1672 return 0; 1673} 1674 1675static int ugeth_graceful_stop_rx(struct ucc_geth_private * ugeth) 1676{ 1677 struct ucc_fast_private *uccf; 1678 u32 cecr_subblock; 1679 u8 temp; 1680 1681 uccf = ugeth->uccf; 1682 1683 /* Clear acknowledge bit */ 1684 temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack); 1685 temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX; 1686 out_8(&ugeth->p_rx_glbl_pram->rxgstpack, temp); 1687 1688 /* Keep issuing command and checking acknowledge bit until 1689 it is asserted, according to spec */ 1690 do { 1691 /* Issue host command */ 1692 cecr_subblock = 1693 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info. 1694 ucc_num); 1695 qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock, 1696 QE_CR_PROTOCOL_ETHERNET, 0); 1697 1698 temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack); 1699 } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX)); 1700 1701 uccf->stopped_rx = 1; 1702 1703 return 0; 1704} 1705 1706static int ugeth_restart_tx(struct ucc_geth_private *ugeth) 1707{ 1708 struct ucc_fast_private *uccf; 1709 u32 cecr_subblock; 1710 1711 uccf = ugeth->uccf; 1712 1713 cecr_subblock = 1714 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); 1715 qe_issue_cmd(QE_RESTART_TX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 0); 1716 uccf->stopped_tx = 0; 1717 1718 return 0; 1719} 1720 1721static int ugeth_restart_rx(struct ucc_geth_private *ugeth) 1722{ 1723 struct ucc_fast_private *uccf; 1724 u32 cecr_subblock; 1725 1726 uccf = ugeth->uccf; 1727 1728 cecr_subblock = 1729 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); 1730 qe_issue_cmd(QE_RESTART_RX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 1731 0); 1732 uccf->stopped_rx = 0; 1733 1734 return 0; 1735} 1736 1737static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode) 1738{ 1739 struct ucc_fast_private *uccf; 1740 int enabled_tx, enabled_rx; 1741 1742 uccf = ugeth->uccf; 1743 1744 /* check if the UCC number is in range. */ 1745 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { 1746 if (netif_msg_probe(ugeth)) 1747 ugeth_err("%s: ucc_num out of range.", __func__); 1748 return -EINVAL; 1749 } 1750 1751 enabled_tx = uccf->enabled_tx; 1752 enabled_rx = uccf->enabled_rx; 1753 1754 /* Get Tx and Rx going again, in case this channel was actively 1755 disabled. */ 1756 if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx) 1757 ugeth_restart_tx(ugeth); 1758 if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx) 1759 ugeth_restart_rx(ugeth); 1760 1761 ucc_fast_enable(uccf, mode); /* OK to do even if not disabled */ 1762 1763 return 0; 1764 1765} 1766 1767static int ugeth_disable(struct ucc_geth_private * ugeth, enum comm_dir mode) 1768{ 1769 struct ucc_fast_private *uccf; 1770 1771 uccf = ugeth->uccf; 1772 1773 /* check if the UCC number is in range. */ 1774 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { 1775 if (netif_msg_probe(ugeth)) 1776 ugeth_err("%s: ucc_num out of range.", __func__); 1777 return -EINVAL; 1778 } 1779 1780 /* Stop any transmissions */ 1781 if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx) 1782 ugeth_graceful_stop_tx(ugeth); 1783 1784 /* Stop any receptions */ 1785 if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx) 1786 ugeth_graceful_stop_rx(ugeth); 1787 1788 ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */ 1789 1790 return 0; 1791} 1792 1793static void ugeth_dump_regs(struct ucc_geth_private *ugeth) 1794{ 1795#ifdef DEBUG 1796 ucc_fast_dump_regs(ugeth->uccf); 1797 dump_regs(ugeth); 1798 dump_bds(ugeth); 1799#endif 1800} 1801 1802#ifdef CONFIG_UGETH_FILTERING 1803static int ugeth_ext_filtering_serialize_tad(struct ucc_geth_tad_params * 1804 p_UccGethTadParams, 1805 struct qe_fltr_tad *qe_fltr_tad) 1806{ 1807 u16 temp; 1808 1809 /* Zero serialized TAD */ 1810 memset(qe_fltr_tad, 0, QE_FLTR_TAD_SIZE); 1811 1812 qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_V; /* Must have this */ 1813 if (p_UccGethTadParams->rx_non_dynamic_extended_features_mode || 1814 (p_UccGethTadParams->vtag_op != UCC_GETH_VLAN_OPERATION_TAGGED_NOP) 1815 || (p_UccGethTadParams->vnontag_op != 1816 UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP) 1817 ) 1818 qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_EF; 1819 if (p_UccGethTadParams->reject_frame) 1820 qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_REJ; 1821 temp = 1822 (u16) (((u16) p_UccGethTadParams-> 1823 vtag_op) << UCC_GETH_TAD_VTAG_OP_SHIFT); 1824 qe_fltr_tad->serialized[0] |= (u8) (temp >> 8); /* upper bits */ 1825 1826 qe_fltr_tad->serialized[1] |= (u8) (temp & 0x00ff); /* lower bits */ 1827 if (p_UccGethTadParams->vnontag_op == 1828 UCC_GETH_VLAN_OPERATION_NON_TAGGED_Q_TAG_INSERT) 1829 qe_fltr_tad->serialized[1] |= UCC_GETH_TAD_V_NON_VTAG_OP; 1830 qe_fltr_tad->serialized[1] |= 1831 p_UccGethTadParams->rqos << UCC_GETH_TAD_RQOS_SHIFT; 1832 1833 qe_fltr_tad->serialized[2] |= 1834 p_UccGethTadParams->vpri << UCC_GETH_TAD_V_PRIORITY_SHIFT; 1835 /* upper bits */ 1836 qe_fltr_tad->serialized[2] |= (u8) (p_UccGethTadParams->vid >> 8); 1837 /* lower bits */ 1838 qe_fltr_tad->serialized[3] |= (u8) (p_UccGethTadParams->vid & 0x00ff); 1839 1840 return 0; 1841} 1842 1843static struct enet_addr_container_t 1844 *ugeth_82xx_filtering_get_match_addr_in_hash(struct ucc_geth_private *ugeth, 1845 struct enet_addr *p_enet_addr) 1846{ 1847 struct enet_addr_container *enet_addr_cont; 1848 struct list_head *p_lh; 1849 u16 i, num; 1850 int32_t j; 1851 u8 *p_counter; 1852 1853 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) { 1854 p_lh = &ugeth->group_hash_q; 1855 p_counter = &(ugeth->numGroupAddrInHash); 1856 } else { 1857 p_lh = &ugeth->ind_hash_q; 1858 p_counter = &(ugeth->numIndAddrInHash); 1859 } 1860 1861 if (!p_lh) 1862 return NULL; 1863 1864 num = *p_counter; 1865 1866 for (i = 0; i < num; i++) { 1867 enet_addr_cont = 1868 (struct enet_addr_container *) 1869 ENET_ADDR_CONT_ENTRY(dequeue(p_lh)); 1870 for (j = ENET_NUM_OCTETS_PER_ADDRESS - 1; j >= 0; j--) { 1871 if ((*p_enet_addr)[j] != (enet_addr_cont->address)[j]) 1872 break; 1873 if (j == 0) 1874 return enet_addr_cont; /* Found */ 1875 } 1876 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */ 1877 } 1878 return NULL; 1879} 1880 1881static int ugeth_82xx_filtering_add_addr_in_hash(struct ucc_geth_private *ugeth, 1882 struct enet_addr *p_enet_addr) 1883{ 1884 enum ucc_geth_enet_address_recognition_location location; 1885 struct enet_addr_container *enet_addr_cont; 1886 struct list_head *p_lh; 1887 u8 i; 1888 u32 limit; 1889 u8 *p_counter; 1890 1891 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) { 1892 p_lh = &ugeth->group_hash_q; 1893 limit = ugeth->ug_info->maxGroupAddrInHash; 1894 location = 1895 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_GROUP_HASH; 1896 p_counter = &(ugeth->numGroupAddrInHash); 1897 } else { 1898 p_lh = &ugeth->ind_hash_q; 1899 limit = ugeth->ug_info->maxIndAddrInHash; 1900 location = 1901 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_INDIVIDUAL_HASH; 1902 p_counter = &(ugeth->numIndAddrInHash); 1903 } 1904 1905 if ((enet_addr_cont = 1906 ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr))) { 1907 list_add(p_lh, &enet_addr_cont->node); /* Put it back */ 1908 return 0; 1909 } 1910 if ((!p_lh) || (!(*p_counter < limit))) 1911 return -EBUSY; 1912 if (!(enet_addr_cont = get_enet_addr_container())) 1913 return -ENOMEM; 1914 for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++) 1915 (enet_addr_cont->address)[i] = (*p_enet_addr)[i]; 1916 enet_addr_cont->location = location; 1917 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */ 1918 ++(*p_counter); 1919 1920 hw_add_addr_in_hash(ugeth, enet_addr_cont->address); 1921 return 0; 1922} 1923 1924static int ugeth_82xx_filtering_clear_addr_in_hash(struct ucc_geth_private *ugeth, 1925 struct enet_addr *p_enet_addr) 1926{ 1927 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; 1928 struct enet_addr_container *enet_addr_cont; 1929 struct ucc_fast_private *uccf; 1930 enum comm_dir comm_dir; 1931 u16 i, num; 1932 struct list_head *p_lh; 1933 u32 *addr_h, *addr_l; 1934 u8 *p_counter; 1935 1936 uccf = ugeth->uccf; 1937 1938 p_82xx_addr_filt = 1939 (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram-> 1940 addressfiltering; 1941 1942 if (! 1943 (enet_addr_cont = 1944 ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr))) 1945 return -ENOENT; 1946 1947 /* It's been found and removed from the CQ. */ 1948 /* Now destroy its container */ 1949 put_enet_addr_container(enet_addr_cont); 1950 1951 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) { 1952 addr_h = &(p_82xx_addr_filt->gaddr_h); 1953 addr_l = &(p_82xx_addr_filt->gaddr_l); 1954 p_lh = &ugeth->group_hash_q; 1955 p_counter = &(ugeth->numGroupAddrInHash); 1956 } else { 1957 addr_h = &(p_82xx_addr_filt->iaddr_h); 1958 addr_l = &(p_82xx_addr_filt->iaddr_l); 1959 p_lh = &ugeth->ind_hash_q; 1960 p_counter = &(ugeth->numIndAddrInHash); 1961 } 1962 1963 comm_dir = 0; 1964 if (uccf->enabled_tx) 1965 comm_dir |= COMM_DIR_TX; 1966 if (uccf->enabled_rx) 1967 comm_dir |= COMM_DIR_RX; 1968 if (comm_dir) 1969 ugeth_disable(ugeth, comm_dir); 1970 1971 /* Clear the hash table. */ 1972 out_be32(addr_h, 0x00000000); 1973 out_be32(addr_l, 0x00000000); 1974 1975 /* Add all remaining CQ elements back into hash */ 1976 num = --(*p_counter); 1977 for (i = 0; i < num; i++) { 1978 enet_addr_cont = 1979 (struct enet_addr_container *) 1980 ENET_ADDR_CONT_ENTRY(dequeue(p_lh)); 1981 hw_add_addr_in_hash(ugeth, enet_addr_cont->address); 1982 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */ 1983 } 1984 1985 if (comm_dir) 1986 ugeth_enable(ugeth, comm_dir); 1987 1988 return 0; 1989} 1990#endif /* CONFIG_UGETH_FILTERING */ 1991 1992static int ugeth_82xx_filtering_clear_all_addr_in_hash(struct ucc_geth_private * 1993 ugeth, 1994 enum enet_addr_type 1995 enet_addr_type) 1996{ 1997 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; 1998 struct ucc_fast_private *uccf; 1999 enum comm_dir comm_dir; 2000 struct list_head *p_lh; 2001 u16 i, num; 2002 u32 __iomem *addr_h; 2003 u32 __iomem *addr_l; 2004 u8 *p_counter; 2005 2006 uccf = ugeth->uccf; 2007 2008 p_82xx_addr_filt = 2009 (struct ucc_geth_82xx_address_filtering_pram __iomem *) 2010 ugeth->p_rx_glbl_pram->addressfiltering; 2011 2012 if (enet_addr_type == ENET_ADDR_TYPE_GROUP) { 2013 addr_h = &(p_82xx_addr_filt->gaddr_h); 2014 addr_l = &(p_82xx_addr_filt->gaddr_l); 2015 p_lh = &ugeth->group_hash_q; 2016 p_counter = &(ugeth->numGroupAddrInHash); 2017 } else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) { 2018 addr_h = &(p_82xx_addr_filt->iaddr_h); 2019 addr_l = &(p_82xx_addr_filt->iaddr_l); 2020 p_lh = &ugeth->ind_hash_q; 2021 p_counter = &(ugeth->numIndAddrInHash); 2022 } else 2023 return -EINVAL; 2024 2025 comm_dir = 0; 2026 if (uccf->enabled_tx) 2027 comm_dir |= COMM_DIR_TX; 2028 if (uccf->enabled_rx) 2029 comm_dir |= COMM_DIR_RX; 2030 if (comm_dir) 2031 ugeth_disable(ugeth, comm_dir); 2032 2033 /* Clear the hash table. */ 2034 out_be32(addr_h, 0x00000000); 2035 out_be32(addr_l, 0x00000000); 2036 2037 if (!p_lh) 2038 return 0; 2039 2040 num = *p_counter; 2041 2042 /* Delete all remaining CQ elements */ 2043 for (i = 0; i < num; i++) 2044 put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh))); 2045 2046 *p_counter = 0; 2047 2048 if (comm_dir) 2049 ugeth_enable(ugeth, comm_dir); 2050 2051 return 0; 2052} 2053 2054#ifdef CONFIG_UGETH_FILTERING 2055static int ugeth_82xx_filtering_add_addr_in_paddr(struct ucc_geth_private *ugeth, 2056 struct enet_addr *p_enet_addr, 2057 u8 paddr_num) 2058{ 2059 int i; 2060 2061 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) 2062 ugeth_warn 2063 ("%s: multicast address added to paddr will have no " 2064 "effect - is this what you wanted?", 2065 __func__); 2066 2067 ugeth->indAddrRegUsed[paddr_num] = 1; /* mark this paddr as used */ 2068 /* store address in our database */ 2069 for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++) 2070 ugeth->paddr[paddr_num][i] = (*p_enet_addr)[i]; 2071 /* put in hardware */ 2072 return hw_add_addr_in_paddr(ugeth, p_enet_addr, paddr_num); 2073} 2074#endif /* CONFIG_UGETH_FILTERING */ 2075 2076static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *ugeth, 2077 u8 paddr_num) 2078{ 2079 ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */ 2080 return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */ 2081} 2082 2083static void ucc_geth_memclean(struct ucc_geth_private *ugeth) 2084{ 2085 u16 i, j; 2086 u8 __iomem *bd; 2087 2088 if (!ugeth) 2089 return; 2090 2091 if (ugeth->uccf) { 2092 ucc_fast_free(ugeth->uccf); 2093 ugeth->uccf = NULL; 2094 } 2095 2096 if (ugeth->p_thread_data_tx) { 2097 qe_muram_free(ugeth->thread_dat_tx_offset); 2098 ugeth->p_thread_data_tx = NULL; 2099 } 2100 if (ugeth->p_thread_data_rx) { 2101 qe_muram_free(ugeth->thread_dat_rx_offset); 2102 ugeth->p_thread_data_rx = NULL; 2103 } 2104 if (ugeth->p_exf_glbl_param) { 2105 qe_muram_free(ugeth->exf_glbl_param_offset); 2106 ugeth->p_exf_glbl_param = NULL; 2107 } 2108 if (ugeth->p_rx_glbl_pram) { 2109 qe_muram_free(ugeth->rx_glbl_pram_offset); 2110 ugeth->p_rx_glbl_pram = NULL; 2111 } 2112 if (ugeth->p_tx_glbl_pram) { 2113 qe_muram_free(ugeth->tx_glbl_pram_offset); 2114 ugeth->p_tx_glbl_pram = NULL; 2115 } 2116 if (ugeth->p_send_q_mem_reg) { 2117 qe_muram_free(ugeth->send_q_mem_reg_offset); 2118 ugeth->p_send_q_mem_reg = NULL; 2119 } 2120 if (ugeth->p_scheduler) { 2121 qe_muram_free(ugeth->scheduler_offset); 2122 ugeth->p_scheduler = NULL; 2123 } 2124 if (ugeth->p_tx_fw_statistics_pram) { 2125 qe_muram_free(ugeth->tx_fw_statistics_pram_offset); 2126 ugeth->p_tx_fw_statistics_pram = NULL; 2127 } 2128 if (ugeth->p_rx_fw_statistics_pram) { 2129 qe_muram_free(ugeth->rx_fw_statistics_pram_offset); 2130 ugeth->p_rx_fw_statistics_pram = NULL; 2131 } 2132 if (ugeth->p_rx_irq_coalescing_tbl) { 2133 qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset); 2134 ugeth->p_rx_irq_coalescing_tbl = NULL; 2135 } 2136 if (ugeth->p_rx_bd_qs_tbl) { 2137 qe_muram_free(ugeth->rx_bd_qs_tbl_offset); 2138 ugeth->p_rx_bd_qs_tbl = NULL; 2139 } 2140 if (ugeth->p_init_enet_param_shadow) { 2141 return_init_enet_entries(ugeth, 2142 &(ugeth->p_init_enet_param_shadow-> 2143 rxthread[0]), 2144 ENET_INIT_PARAM_MAX_ENTRIES_RX, 2145 ugeth->ug_info->riscRx, 1); 2146 return_init_enet_entries(ugeth, 2147 &(ugeth->p_init_enet_param_shadow-> 2148 txthread[0]), 2149 ENET_INIT_PARAM_MAX_ENTRIES_TX, 2150 ugeth->ug_info->riscTx, 0); 2151 kfree(ugeth->p_init_enet_param_shadow); 2152 ugeth->p_init_enet_param_shadow = NULL; 2153 } 2154 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { 2155 bd = ugeth->p_tx_bd_ring[i]; 2156 if (!bd) 2157 continue; 2158 for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { 2159 if (ugeth->tx_skbuff[i][j]) { 2160 dma_unmap_single(&ugeth->dev->dev, 2161 in_be32(&((struct qe_bd __iomem *)bd)->buf), 2162 (in_be32((u32 __iomem *)bd) & 2163 BD_LENGTH_MASK), 2164 DMA_TO_DEVICE); 2165 dev_kfree_skb_any(ugeth->tx_skbuff[i][j]); 2166 ugeth->tx_skbuff[i][j] = NULL; 2167 } 2168 } 2169 2170 kfree(ugeth->tx_skbuff[i]); 2171 2172 if (ugeth->p_tx_bd_ring[i]) { 2173 if (ugeth->ug_info->uf_info.bd_mem_part == 2174 MEM_PART_SYSTEM) 2175 kfree((void *)ugeth->tx_bd_ring_offset[i]); 2176 else if (ugeth->ug_info->uf_info.bd_mem_part == 2177 MEM_PART_MURAM) 2178 qe_muram_free(ugeth->tx_bd_ring_offset[i]); 2179 ugeth->p_tx_bd_ring[i] = NULL; 2180 } 2181 } 2182 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { 2183 if (ugeth->p_rx_bd_ring[i]) { 2184 /* Return existing data buffers in ring */ 2185 bd = ugeth->p_rx_bd_ring[i]; 2186 for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { 2187 if (ugeth->rx_skbuff[i][j]) { 2188 dma_unmap_single(&ugeth->dev->dev, 2189 in_be32(&((struct qe_bd __iomem *)bd)->buf), 2190 ugeth->ug_info-> 2191 uf_info.max_rx_buf_length + 2192 UCC_GETH_RX_DATA_BUF_ALIGNMENT, 2193 DMA_FROM_DEVICE); 2194 dev_kfree_skb_any( 2195 ugeth->rx_skbuff[i][j]); 2196 ugeth->rx_skbuff[i][j] = NULL; 2197 } 2198 bd += sizeof(struct qe_bd); 2199 } 2200 2201 kfree(ugeth->rx_skbuff[i]); 2202 2203 if (ugeth->ug_info->uf_info.bd_mem_part == 2204 MEM_PART_SYSTEM) 2205 kfree((void *)ugeth->rx_bd_ring_offset[i]); 2206 else if (ugeth->ug_info->uf_info.bd_mem_part == 2207 MEM_PART_MURAM) 2208 qe_muram_free(ugeth->rx_bd_ring_offset[i]); 2209 ugeth->p_rx_bd_ring[i] = NULL; 2210 } 2211 } 2212 while (!list_empty(&ugeth->group_hash_q)) 2213 put_enet_addr_container(ENET_ADDR_CONT_ENTRY 2214 (dequeue(&ugeth->group_hash_q))); 2215 while (!list_empty(&ugeth->ind_hash_q)) 2216 put_enet_addr_container(ENET_ADDR_CONT_ENTRY 2217 (dequeue(&ugeth->ind_hash_q))); 2218 2219} 2220 2221static void ucc_geth_set_multi(struct net_device *dev) 2222{ 2223 struct ucc_geth_private *ugeth; 2224 struct dev_mc_list *dmi; 2225 struct ucc_fast __iomem *uf_regs; 2226 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; 2227 int i; 2228 2229 ugeth = netdev_priv(dev); 2230 2231 uf_regs = ugeth->uccf->uf_regs; 2232 2233 if (dev->flags & IFF_PROMISC) { 2234 2235 out_be32(&uf_regs->upsmr, in_be32(&uf_regs->upsmr) | UPSMR_PRO); 2236 2237 } else { 2238 2239 out_be32(&uf_regs->upsmr, in_be32(&uf_regs->upsmr)&~UPSMR_PRO); 2240 2241 p_82xx_addr_filt = 2242 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth-> 2243 p_rx_glbl_pram->addressfiltering; 2244 2245 if (dev->flags & IFF_ALLMULTI) { 2246 /* Catch all multicast addresses, so set the 2247 * filter to all 1's. 2248 */ 2249 out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff); 2250 out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff); 2251 } else { 2252 /* Clear filter and add the addresses in the list. 2253 */ 2254 out_be32(&p_82xx_addr_filt->gaddr_h, 0x0); 2255 out_be32(&p_82xx_addr_filt->gaddr_l, 0x0); 2256 2257 dmi = dev->mc_list; 2258 2259 for (i = 0; i < dev->mc_count; i++, dmi = dmi->next) { 2260 2261 /* Only support group multicast for now. 2262 */ 2263 if (!(dmi->dmi_addr[0] & 1)) 2264 continue; 2265 2266 /* Ask CPM to run CRC and set bit in 2267 * filter mask. 2268 */ 2269 hw_add_addr_in_hash(ugeth, dmi->dmi_addr); 2270 } 2271 } 2272 } 2273} 2274 2275static void ucc_geth_stop(struct ucc_geth_private *ugeth) 2276{ 2277 struct ucc_geth __iomem *ug_regs = ugeth->ug_regs; 2278 struct phy_device *phydev = ugeth->phydev; 2279 u32 tempval; 2280 2281 ugeth_vdbg("%s: IN", __func__); 2282 2283 /* Disable the controller */ 2284 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); 2285 2286 /* Tell the kernel the link is down */ 2287 phy_stop(phydev); 2288 2289 /* Mask all interrupts */ 2290 out_be32(ugeth->uccf->p_uccm, 0x00000000); 2291 2292 /* Clear all interrupts */ 2293 out_be32(ugeth->uccf->p_ucce, 0xffffffff); 2294 2295 /* Disable Rx and Tx */ 2296 tempval = in_be32(&ug_regs->maccfg1); 2297 tempval &= ~(MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); 2298 out_be32(&ug_regs->maccfg1, tempval); 2299 2300 free_irq(ugeth->ug_info->uf_info.irq, ugeth->dev); 2301 2302 ucc_geth_memclean(ugeth); 2303} 2304 2305static int ucc_struct_init(struct ucc_geth_private *ugeth) 2306{ 2307 struct ucc_geth_info *ug_info; 2308 struct ucc_fast_info *uf_info; 2309 int i; 2310 2311 ug_info = ugeth->ug_info; 2312 uf_info = &ug_info->uf_info; 2313 2314 if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) || 2315 (uf_info->bd_mem_part == MEM_PART_MURAM))) { 2316 if (netif_msg_probe(ugeth)) 2317 ugeth_err("%s: Bad memory partition value.", 2318 __func__); 2319 return -EINVAL; 2320 } 2321 2322 /* Rx BD lengths */ 2323 for (i = 0; i < ug_info->numQueuesRx; i++) { 2324 if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) || 2325 (ug_info->bdRingLenRx[i] % 2326 UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) { 2327 if (netif_msg_probe(ugeth)) 2328 ugeth_err 2329 ("%s: Rx BD ring length must be multiple of 4, no smaller than 8.", 2330 __func__); 2331 return -EINVAL; 2332 } 2333 } 2334 2335 /* Tx BD lengths */ 2336 for (i = 0; i < ug_info->numQueuesTx; i++) { 2337 if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) { 2338 if (netif_msg_probe(ugeth)) 2339 ugeth_err 2340 ("%s: Tx BD ring length must be no smaller than 2.", 2341 __func__); 2342 return -EINVAL; 2343 } 2344 } 2345 2346 /* mrblr */ 2347 if ((uf_info->max_rx_buf_length == 0) || 2348 (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) { 2349 if (netif_msg_probe(ugeth)) 2350 ugeth_err 2351 ("%s: max_rx_buf_length must be non-zero multiple of 128.", 2352 __func__); 2353 return -EINVAL; 2354 } 2355 2356 /* num Tx queues */ 2357 if (ug_info->numQueuesTx > NUM_TX_QUEUES) { 2358 if (netif_msg_probe(ugeth)) 2359 ugeth_err("%s: number of tx queues too large.", __func__); 2360 return -EINVAL; 2361 } 2362 2363 /* num Rx queues */ 2364 if (ug_info->numQueuesRx > NUM_RX_QUEUES) { 2365 if (netif_msg_probe(ugeth)) 2366 ugeth_err("%s: number of rx queues too large.", __func__); 2367 return -EINVAL; 2368 } 2369 2370 /* l2qt */ 2371 for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) { 2372 if (ug_info->l2qt[i] >= ug_info->numQueuesRx) { 2373 if (netif_msg_probe(ugeth)) 2374 ugeth_err 2375 ("%s: VLAN priority table entry must not be" 2376 " larger than number of Rx queues.", 2377 __func__); 2378 return -EINVAL; 2379 } 2380 } 2381 2382 /* l3qt */ 2383 for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) { 2384 if (ug_info->l3qt[i] >= ug_info->numQueuesRx) { 2385 if (netif_msg_probe(ugeth)) 2386 ugeth_err 2387 ("%s: IP priority table entry must not be" 2388 " larger than number of Rx queues.", 2389 __func__); 2390 return -EINVAL; 2391 } 2392 } 2393 2394 if (ug_info->cam && !ug_info->ecamptr) { 2395 if (netif_msg_probe(ugeth)) 2396 ugeth_err("%s: If cam mode is chosen, must supply cam ptr.", 2397 __func__); 2398 return -EINVAL; 2399 } 2400 2401 if ((ug_info->numStationAddresses != 2402 UCC_GETH_NUM_OF_STATION_ADDRESSES_1) 2403 && ug_info->rxExtendedFiltering) { 2404 if (netif_msg_probe(ugeth)) 2405 ugeth_err("%s: Number of station addresses greater than 1 " 2406 "not allowed in extended parsing mode.", 2407 __func__); 2408 return -EINVAL; 2409 } 2410 2411 /* Generate uccm_mask for receive */ 2412 uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */ 2413 for (i = 0; i < ug_info->numQueuesRx; i++) 2414 uf_info->uccm_mask |= (UCCE_RXBF_SINGLE_MASK << i); 2415 2416 for (i = 0; i < ug_info->numQueuesTx; i++) 2417 uf_info->uccm_mask |= (UCCE_TXBF_SINGLE_MASK << i); 2418 /* Initialize the general fast UCC block. */ 2419 if (ucc_fast_init(uf_info, &ugeth->uccf)) { 2420 if (netif_msg_probe(ugeth)) 2421 ugeth_err("%s: Failed to init uccf.", __func__); 2422 ucc_geth_memclean(ugeth); 2423 return -ENOMEM; 2424 } 2425 2426 ugeth->ug_regs = (struct ucc_geth __iomem *) ioremap(uf_info->regs, sizeof(struct ucc_geth)); 2427 2428 return 0; 2429} 2430 2431static int ucc_geth_startup(struct ucc_geth_private *ugeth) 2432{ 2433 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; 2434 struct ucc_geth_init_pram __iomem *p_init_enet_pram; 2435 struct ucc_fast_private *uccf; 2436 struct ucc_geth_info *ug_info; 2437 struct ucc_fast_info *uf_info; 2438 struct ucc_fast __iomem *uf_regs; 2439 struct ucc_geth __iomem *ug_regs; 2440 int ret_val = -EINVAL; 2441 u32 remoder = UCC_GETH_REMODER_INIT; 2442 u32 init_enet_pram_offset, cecr_subblock, command, maccfg1; 2443 u32 ifstat, i, j, size, l2qt, l3qt, length; 2444 u16 temoder = UCC_GETH_TEMODER_INIT; 2445 u16 test; 2446 u8 function_code = 0; 2447 u8 __iomem *bd; 2448 u8 __iomem *endOfRing; 2449 u8 numThreadsRxNumerical, numThreadsTxNumerical; 2450 2451 ugeth_vdbg("%s: IN", __func__); 2452 uccf = ugeth->uccf; 2453 ug_info = ugeth->ug_info; 2454 uf_info = &ug_info->uf_info; 2455 uf_regs = uccf->uf_regs; 2456 ug_regs = ugeth->ug_regs; 2457 2458 switch (ug_info->numThreadsRx) { 2459 case UCC_GETH_NUM_OF_THREADS_1: 2460 numThreadsRxNumerical = 1; 2461 break; 2462 case UCC_GETH_NUM_OF_THREADS_2: 2463 numThreadsRxNumerical = 2; 2464 break; 2465 case UCC_GETH_NUM_OF_THREADS_4: 2466 numThreadsRxNumerical = 4; 2467 break; 2468 case UCC_GETH_NUM_OF_THREADS_6: 2469 numThreadsRxNumerical = 6; 2470 break; 2471 case UCC_GETH_NUM_OF_THREADS_8: 2472 numThreadsRxNumerical = 8; 2473 break; 2474 default: 2475 if (netif_msg_ifup(ugeth)) 2476 ugeth_err("%s: Bad number of Rx threads value.", 2477 __func__); 2478 ucc_geth_memclean(ugeth); 2479 return -EINVAL; 2480 break; 2481 } 2482 2483 switch (ug_info->numThreadsTx) { 2484 case UCC_GETH_NUM_OF_THREADS_1: 2485 numThreadsTxNumerical = 1; 2486 break; 2487 case UCC_GETH_NUM_OF_THREADS_2: 2488 numThreadsTxNumerical = 2; 2489 break; 2490 case UCC_GETH_NUM_OF_THREADS_4: 2491 numThreadsTxNumerical = 4; 2492 break; 2493 case UCC_GETH_NUM_OF_THREADS_6: 2494 numThreadsTxNumerical = 6; 2495 break; 2496 case UCC_GETH_NUM_OF_THREADS_8: 2497 numThreadsTxNumerical = 8; 2498 break; 2499 default: 2500 if (netif_msg_ifup(ugeth)) 2501 ugeth_err("%s: Bad number of Tx threads value.", 2502 __func__); 2503 ucc_geth_memclean(ugeth); 2504 return -EINVAL; 2505 break; 2506 } 2507 2508 /* Calculate rx_extended_features */ 2509 ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck || 2510 ug_info->ipAddressAlignment || 2511 (ug_info->numStationAddresses != 2512 UCC_GETH_NUM_OF_STATION_ADDRESSES_1); 2513 2514 ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features || 2515 (ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP) 2516 || (ug_info->vlanOperationNonTagged != 2517 UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP); 2518 2519 init_default_reg_vals(&uf_regs->upsmr, 2520 &ug_regs->maccfg1, &ug_regs->maccfg2); 2521 2522 /* Set UPSMR */ 2523 /* For more details see the hardware spec. */ 2524 init_rx_parameters(ug_info->bro, 2525 ug_info->rsh, ug_info->pro, &uf_regs->upsmr); 2526 2527 /* We're going to ignore other registers for now, */ 2528 /* except as needed to get up and running */ 2529 2530 /* Set MACCFG1 */ 2531 /* For more details see the hardware spec. */ 2532 init_flow_control_params(ug_info->aufc, 2533 ug_info->receiveFlowControl, 2534 ug_info->transmitFlowControl, 2535 ug_info->pausePeriod, 2536 ug_info->extensionField, 2537 &uf_regs->upsmr, 2538 &ug_regs->uempr, &ug_regs->maccfg1); 2539 2540 maccfg1 = in_be32(&ug_regs->maccfg1); 2541 maccfg1 |= MACCFG1_ENABLE_RX; 2542 maccfg1 |= MACCFG1_ENABLE_TX; 2543 out_be32(&ug_regs->maccfg1, maccfg1); 2544 2545 /* Set IPGIFG */ 2546 /* For more details see the hardware spec. */ 2547 ret_val = init_inter_frame_gap_params(ug_info->nonBackToBackIfgPart1, 2548 ug_info->nonBackToBackIfgPart2, 2549 ug_info-> 2550 miminumInterFrameGapEnforcement, 2551 ug_info->backToBackInterFrameGap, 2552 &ug_regs->ipgifg); 2553 if (ret_val != 0) { 2554 if (netif_msg_ifup(ugeth)) 2555 ugeth_err("%s: IPGIFG initialization parameter too large.", 2556 __func__); 2557 ucc_geth_memclean(ugeth); 2558 return ret_val; 2559 } 2560 2561 /* Set HAFDUP */ 2562 /* For more details see the hardware spec. */ 2563 ret_val = init_half_duplex_params(ug_info->altBeb, 2564 ug_info->backPressureNoBackoff, 2565 ug_info->noBackoff, 2566 ug_info->excessDefer, 2567 ug_info->altBebTruncation, 2568 ug_info->maxRetransmission, 2569 ug_info->collisionWindow, 2570 &ug_regs->hafdup); 2571 if (ret_val != 0) { 2572 if (netif_msg_ifup(ugeth)) 2573 ugeth_err("%s: Half Duplex initialization parameter too large.", 2574 __func__); 2575 ucc_geth_memclean(ugeth); 2576 return ret_val; 2577 } 2578 2579 /* Set IFSTAT */ 2580 /* For more details see the hardware spec. */ 2581 /* Read only - resets upon read */ 2582 ifstat = in_be32(&ug_regs->ifstat); 2583 2584 /* Clear UEMPR */ 2585 /* For more details see the hardware spec. */ 2586 out_be32(&ug_regs->uempr, 0); 2587 2588 /* Set UESCR */ 2589 /* For more details see the hardware spec. */ 2590 init_hw_statistics_gathering_mode((ug_info->statisticsMode & 2591 UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE), 2592 0, &uf_regs->upsmr, &ug_regs->uescr); 2593 2594 /* Allocate Tx bds */ 2595 for (j = 0; j < ug_info->numQueuesTx; j++) { 2596 /* Allocate in multiple of 2597 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT, 2598 according to spec */ 2599 length = ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) 2600 / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) 2601 * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; 2602 if ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) % 2603 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) 2604 length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; 2605 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) { 2606 u32 align = 4; 2607 if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4) 2608 align = UCC_GETH_TX_BD_RING_ALIGNMENT; 2609 ugeth->tx_bd_ring_offset[j] = 2610 (u32) kmalloc((u32) (length + align), GFP_KERNEL); 2611 2612 if (ugeth->tx_bd_ring_offset[j] != 0) 2613 ugeth->p_tx_bd_ring[j] = 2614 (u8 __iomem *)((ugeth->tx_bd_ring_offset[j] + 2615 align) & ~(align - 1)); 2616 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { 2617 ugeth->tx_bd_ring_offset[j] = 2618 qe_muram_alloc(length, 2619 UCC_GETH_TX_BD_RING_ALIGNMENT); 2620 if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j])) 2621 ugeth->p_tx_bd_ring[j] = 2622 (u8 __iomem *) qe_muram_addr(ugeth-> 2623 tx_bd_ring_offset[j]); 2624 } 2625 if (!ugeth->p_tx_bd_ring[j]) { 2626 if (netif_msg_ifup(ugeth)) 2627 ugeth_err 2628 ("%s: Can not allocate memory for Tx bd rings.", 2629 __func__); 2630 ucc_geth_memclean(ugeth); 2631 return -ENOMEM; 2632 } 2633 /* Zero unused end of bd ring, according to spec */ 2634 memset_io((void __iomem *)(ugeth->p_tx_bd_ring[j] + 2635 ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)), 0, 2636 length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)); 2637 } 2638 2639 /* Allocate Rx bds */ 2640 for (j = 0; j < ug_info->numQueuesRx; j++) { 2641 length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd); 2642 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) { 2643 u32 align = 4; 2644 if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4) 2645 align = UCC_GETH_RX_BD_RING_ALIGNMENT; 2646 ugeth->rx_bd_ring_offset[j] = 2647 (u32) kmalloc((u32) (length + align), GFP_KERNEL); 2648 if (ugeth->rx_bd_ring_offset[j] != 0) 2649 ugeth->p_rx_bd_ring[j] = 2650 (u8 __iomem *)((ugeth->rx_bd_ring_offset[j] + 2651 align) & ~(align - 1)); 2652 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { 2653 ugeth->rx_bd_ring_offset[j] = 2654 qe_muram_alloc(length, 2655 UCC_GETH_RX_BD_RING_ALIGNMENT); 2656 if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j])) 2657 ugeth->p_rx_bd_ring[j] = 2658 (u8 __iomem *) qe_muram_addr(ugeth-> 2659 rx_bd_ring_offset[j]); 2660 } 2661 if (!ugeth->p_rx_bd_ring[j]) { 2662 if (netif_msg_ifup(ugeth)) 2663 ugeth_err 2664 ("%s: Can not allocate memory for Rx bd rings.", 2665 __func__); 2666 ucc_geth_memclean(ugeth); 2667 return -ENOMEM; 2668 } 2669 } 2670 2671 /* Init Tx bds */ 2672 for (j = 0; j < ug_info->numQueuesTx; j++) { 2673 /* Setup the skbuff rings */ 2674 ugeth->tx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) * 2675 ugeth->ug_info->bdRingLenTx[j], 2676 GFP_KERNEL); 2677 2678 if (ugeth->tx_skbuff[j] == NULL) { 2679 if (netif_msg_ifup(ugeth)) 2680 ugeth_err("%s: Could not allocate tx_skbuff", 2681 __func__); 2682 ucc_geth_memclean(ugeth); 2683 return -ENOMEM; 2684 } 2685 2686 for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++) 2687 ugeth->tx_skbuff[j][i] = NULL; 2688 2689 ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0; 2690 bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j]; 2691 for (i = 0; i < ug_info->bdRingLenTx[j]; i++) { 2692 /* clear bd buffer */ 2693 out_be32(&((struct qe_bd __iomem *)bd)->buf, 0); 2694 /* set bd status and length */ 2695 out_be32((u32 __iomem *)bd, 0); 2696 bd += sizeof(struct qe_bd); 2697 } 2698 bd -= sizeof(struct qe_bd); 2699 /* set bd status and length */ 2700 out_be32((u32 __iomem *)bd, T_W); /* for last BD set Wrap bit */ 2701 } 2702 2703 /* Init Rx bds */ 2704 for (j = 0; j < ug_info->numQueuesRx; j++) { 2705 /* Setup the skbuff rings */ 2706 ugeth->rx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) * 2707 ugeth->ug_info->bdRingLenRx[j], 2708 GFP_KERNEL); 2709 2710 if (ugeth->rx_skbuff[j] == NULL) { 2711 if (netif_msg_ifup(ugeth)) 2712 ugeth_err("%s: Could not allocate rx_skbuff", 2713 __func__); 2714 ucc_geth_memclean(ugeth); 2715 return -ENOMEM; 2716 } 2717 2718 for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++) 2719 ugeth->rx_skbuff[j][i] = NULL; 2720 2721 ugeth->skb_currx[j] = 0; 2722 bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j]; 2723 for (i = 0; i < ug_info->bdRingLenRx[j]; i++) { 2724 /* set bd status and length */ 2725 out_be32((u32 __iomem *)bd, R_I); 2726 /* clear bd buffer */ 2727 out_be32(&((struct qe_bd __iomem *)bd)->buf, 0); 2728 bd += sizeof(struct qe_bd); 2729 } 2730 bd -= sizeof(struct qe_bd); 2731 /* set bd status and length */ 2732 out_be32((u32 __iomem *)bd, R_W); /* for last BD set Wrap bit */ 2733 } 2734 2735 /* 2736 * Global PRAM 2737 */ 2738 /* Tx global PRAM */ 2739 /* Allocate global tx parameter RAM page */ 2740 ugeth->tx_glbl_pram_offset = 2741 qe_muram_alloc(sizeof(struct ucc_geth_tx_global_pram), 2742 UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT); 2743 if (IS_ERR_VALUE(ugeth->tx_glbl_pram_offset)) { 2744 if (netif_msg_ifup(ugeth)) 2745 ugeth_err 2746 ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.", 2747 __func__); 2748 ucc_geth_memclean(ugeth); 2749 return -ENOMEM; 2750 } 2751 ugeth->p_tx_glbl_pram = 2752 (struct ucc_geth_tx_global_pram __iomem *) qe_muram_addr(ugeth-> 2753 tx_glbl_pram_offset); 2754 /* Zero out p_tx_glbl_pram */ 2755 memset_io((void __iomem *)ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram)); 2756 2757 /* Fill global PRAM */ 2758 2759 /* TQPTR */ 2760 /* Size varies with number of Tx threads */ 2761 ugeth->thread_dat_tx_offset = 2762 qe_muram_alloc(numThreadsTxNumerical * 2763 sizeof(struct ucc_geth_thread_data_tx) + 2764 32 * (numThreadsTxNumerical == 1), 2765 UCC_GETH_THREAD_DATA_ALIGNMENT); 2766 if (IS_ERR_VALUE(ugeth->thread_dat_tx_offset)) { 2767 if (netif_msg_ifup(ugeth)) 2768 ugeth_err 2769 ("%s: Can not allocate DPRAM memory for p_thread_data_tx.", 2770 __func__); 2771 ucc_geth_memclean(ugeth); 2772 return -ENOMEM; 2773 } 2774 2775 ugeth->p_thread_data_tx = 2776 (struct ucc_geth_thread_data_tx __iomem *) qe_muram_addr(ugeth-> 2777 thread_dat_tx_offset); 2778 out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset); 2779 2780 /* vtagtable */ 2781 for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++) 2782 out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i], 2783 ug_info->vtagtable[i]); 2784 2785 /* iphoffset */ 2786 for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++) 2787 out_8(&ugeth->p_tx_glbl_pram->iphoffset[i], 2788 ug_info->iphoffset[i]); 2789 2790 /* SQPTR */ 2791 /* Size varies with number of Tx queues */ 2792 ugeth->send_q_mem_reg_offset = 2793 qe_muram_alloc(ug_info->numQueuesTx * 2794 sizeof(struct ucc_geth_send_queue_qd), 2795 UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT); 2796 if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) { 2797 if (netif_msg_ifup(ugeth)) 2798 ugeth_err 2799 ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.", 2800 __func__); 2801 ucc_geth_memclean(ugeth); 2802 return -ENOMEM; 2803 } 2804 2805 ugeth->p_send_q_mem_reg = 2806 (struct ucc_geth_send_queue_mem_region __iomem *) qe_muram_addr(ugeth-> 2807 send_q_mem_reg_offset); 2808 out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset); 2809 2810 /* Setup the table */ 2811 /* Assume BD rings are already established */ 2812 for (i = 0; i < ug_info->numQueuesTx; i++) { 2813 endOfRing = 2814 ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] - 2815 1) * sizeof(struct qe_bd); 2816 if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) { 2817 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base, 2818 (u32) virt_to_phys(ugeth->p_tx_bd_ring[i])); 2819 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i]. 2820 last_bd_completed_address, 2821 (u32) virt_to_phys(endOfRing)); 2822 } else if (ugeth->ug_info->uf_info.bd_mem_part == 2823 MEM_PART_MURAM) { 2824 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base, 2825 (u32) immrbar_virt_to_phys(ugeth-> 2826 p_tx_bd_ring[i])); 2827 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i]. 2828 last_bd_completed_address, 2829 (u32) immrbar_virt_to_phys(endOfRing)); 2830 } 2831 } 2832 2833 /* schedulerbasepointer */ 2834 2835 if (ug_info->numQueuesTx > 1) { 2836 /* scheduler exists only if more than 1 tx queue */ 2837 ugeth->scheduler_offset = 2838 qe_muram_alloc(sizeof(struct ucc_geth_scheduler), 2839 UCC_GETH_SCHEDULER_ALIGNMENT); 2840 if (IS_ERR_VALUE(ugeth->scheduler_offset)) { 2841 if (netif_msg_ifup(ugeth)) 2842 ugeth_err 2843 ("%s: Can not allocate DPRAM memory for p_scheduler.", 2844 __func__); 2845 ucc_geth_memclean(ugeth); 2846 return -ENOMEM; 2847 } 2848 2849 ugeth->p_scheduler = 2850 (struct ucc_geth_scheduler __iomem *) qe_muram_addr(ugeth-> 2851 scheduler_offset); 2852 out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer, 2853 ugeth->scheduler_offset); 2854 /* Zero out p_scheduler */ 2855 memset_io((void __iomem *)ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler)); 2856 2857 /* Set values in scheduler */ 2858 out_be32(&ugeth->p_scheduler->mblinterval, 2859 ug_info->mblinterval); 2860 out_be16(&ugeth->p_scheduler->nortsrbytetime, 2861 ug_info->nortsrbytetime); 2862 out_8(&ugeth->p_scheduler->fracsiz, ug_info->fracsiz); 2863 out_8(&ugeth->p_scheduler->strictpriorityq, 2864 ug_info->strictpriorityq); 2865 out_8(&ugeth->p_scheduler->txasap, ug_info->txasap); 2866 out_8(&ugeth->p_scheduler->extrabw, ug_info->extrabw); 2867 for (i = 0; i < NUM_TX_QUEUES; i++) 2868 out_8(&ugeth->p_scheduler->weightfactor[i], 2869 ug_info->weightfactor[i]); 2870 2871 /* Set pointers to cpucount registers in scheduler */ 2872 ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0); 2873 ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1); 2874 ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2); 2875 ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3); 2876 ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4); 2877 ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5); 2878 ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6); 2879 ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7); 2880 } 2881 2882 /* schedulerbasepointer */ 2883 /* TxRMON_PTR (statistics) */ 2884 if (ug_info-> 2885 statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) { 2886 ugeth->tx_fw_statistics_pram_offset = 2887 qe_muram_alloc(sizeof 2888 (struct ucc_geth_tx_firmware_statistics_pram), 2889 UCC_GETH_TX_STATISTICS_ALIGNMENT); 2890 if (IS_ERR_VALUE(ugeth->tx_fw_statistics_pram_offset)) { 2891 if (netif_msg_ifup(ugeth)) 2892 ugeth_err 2893 ("%s: Can not allocate DPRAM memory for" 2894 " p_tx_fw_statistics_pram.", 2895 __func__); 2896 ucc_geth_memclean(ugeth); 2897 return -ENOMEM; 2898 } 2899 ugeth->p_tx_fw_statistics_pram = 2900 (struct ucc_geth_tx_firmware_statistics_pram __iomem *) 2901 qe_muram_addr(ugeth->tx_fw_statistics_pram_offset); 2902 /* Zero out p_tx_fw_statistics_pram */ 2903 memset_io((void __iomem *)ugeth->p_tx_fw_statistics_pram, 2904 0, sizeof(struct ucc_geth_tx_firmware_statistics_pram)); 2905 } 2906 2907 /* temoder */ 2908 /* Already has speed set */ 2909 2910 if (ug_info->numQueuesTx > 1) 2911 temoder |= TEMODER_SCHEDULER_ENABLE; 2912 if (ug_info->ipCheckSumGenerate) 2913 temoder |= TEMODER_IP_CHECKSUM_GENERATE; 2914 temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT); 2915 out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder); 2916 2917 test = in_be16(&ugeth->p_tx_glbl_pram->temoder); 2918 2919 /* Function code register value to be used later */ 2920 function_code = UCC_BMR_BO_BE | UCC_BMR_GBL; 2921 /* Required for QE */ 2922 2923 /* function code register */ 2924 out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24); 2925 2926 /* Rx global PRAM */ 2927 /* Allocate global rx parameter RAM page */ 2928 ugeth->rx_glbl_pram_offset = 2929 qe_muram_alloc(sizeof(struct ucc_geth_rx_global_pram), 2930 UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT); 2931 if (IS_ERR_VALUE(ugeth->rx_glbl_pram_offset)) { 2932 if (netif_msg_ifup(ugeth)) 2933 ugeth_err 2934 ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.", 2935 __func__); 2936 ucc_geth_memclean(ugeth); 2937 return -ENOMEM; 2938 } 2939 ugeth->p_rx_glbl_pram = 2940 (struct ucc_geth_rx_global_pram __iomem *) qe_muram_addr(ugeth-> 2941 rx_glbl_pram_offset); 2942 /* Zero out p_rx_glbl_pram */ 2943 memset_io((void __iomem *)ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram)); 2944 2945 /* Fill global PRAM */ 2946 2947 /* RQPTR */ 2948 /* Size varies with number of Rx threads */ 2949 ugeth->thread_dat_rx_offset = 2950 qe_muram_alloc(numThreadsRxNumerical * 2951 sizeof(struct ucc_geth_thread_data_rx), 2952 UCC_GETH_THREAD_DATA_ALIGNMENT); 2953 if (IS_ERR_VALUE(ugeth->thread_dat_rx_offset)) { 2954 if (netif_msg_ifup(ugeth)) 2955 ugeth_err 2956 ("%s: Can not allocate DPRAM memory for p_thread_data_rx.", 2957 __func__); 2958 ucc_geth_memclean(ugeth); 2959 return -ENOMEM; 2960 } 2961 2962 ugeth->p_thread_data_rx = 2963 (struct ucc_geth_thread_data_rx __iomem *) qe_muram_addr(ugeth-> 2964 thread_dat_rx_offset); 2965 out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset); 2966 2967 /* typeorlen */ 2968 out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen); 2969 2970 /* rxrmonbaseptr (statistics) */ 2971 if (ug_info-> 2972 statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) { 2973 ugeth->rx_fw_statistics_pram_offset = 2974 qe_muram_alloc(sizeof 2975 (struct ucc_geth_rx_firmware_statistics_pram), 2976 UCC_GETH_RX_STATISTICS_ALIGNMENT); 2977 if (IS_ERR_VALUE(ugeth->rx_fw_statistics_pram_offset)) { 2978 if (netif_msg_ifup(ugeth)) 2979 ugeth_err 2980 ("%s: Can not allocate DPRAM memory for" 2981 " p_rx_fw_statistics_pram.", __func__); 2982 ucc_geth_memclean(ugeth); 2983 return -ENOMEM; 2984 } 2985 ugeth->p_rx_fw_statistics_pram = 2986 (struct ucc_geth_rx_firmware_statistics_pram __iomem *) 2987 qe_muram_addr(ugeth->rx_fw_statistics_pram_offset); 2988 /* Zero out p_rx_fw_statistics_pram */ 2989 memset_io((void __iomem *)ugeth->p_rx_fw_statistics_pram, 0, 2990 sizeof(struct ucc_geth_rx_firmware_statistics_pram)); 2991 } 2992 2993 /* intCoalescingPtr */ 2994 2995 /* Size varies with number of Rx queues */ 2996 ugeth->rx_irq_coalescing_tbl_offset = 2997 qe_muram_alloc(ug_info->numQueuesRx * 2998 sizeof(struct ucc_geth_rx_interrupt_coalescing_entry) 2999 + 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT); 3000 if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) { 3001 if (netif_msg_ifup(ugeth)) 3002 ugeth_err 3003 ("%s: Can not allocate DPRAM memory for" 3004 " p_rx_irq_coalescing_tbl.", __func__); 3005 ucc_geth_memclean(ugeth); 3006 return -ENOMEM; 3007 } 3008 3009 ugeth->p_rx_irq_coalescing_tbl = 3010 (struct ucc_geth_rx_interrupt_coalescing_table __iomem *) 3011 qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset); 3012 out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr, 3013 ugeth->rx_irq_coalescing_tbl_offset); 3014 3015 /* Fill interrupt coalescing table */ 3016 for (i = 0; i < ug_info->numQueuesRx; i++) { 3017 out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i]. 3018 interruptcoalescingmaxvalue, 3019 ug_info->interruptcoalescingmaxvalue[i]); 3020 out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i]. 3021 interruptcoalescingcounter, 3022 ug_info->interruptcoalescingmaxvalue[i]); 3023 } 3024 3025 /* MRBLR */ 3026 init_max_rx_buff_len(uf_info->max_rx_buf_length, 3027 &ugeth->p_rx_glbl_pram->mrblr); 3028 /* MFLR */ 3029 out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength); 3030 /* MINFLR */ 3031 init_min_frame_len(ug_info->minFrameLength, 3032 &ugeth->p_rx_glbl_pram->minflr, 3033 &ugeth->p_rx_glbl_pram->mrblr); 3034 /* MAXD1 */ 3035 out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length); 3036 /* MAXD2 */ 3037 out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length); 3038 3039 /* l2qt */ 3040 l2qt = 0; 3041 for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) 3042 l2qt |= (ug_info->l2qt[i] << (28 - 4 * i)); 3043 out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt); 3044 3045 /* l3qt */ 3046 for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) { 3047 l3qt = 0; 3048 for (i = 0; i < 8; i++) 3049 l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i)); 3050 out_be32(&ugeth->p_rx_glbl_pram->l3qt[j/8], l3qt); 3051 } 3052 3053 /* vlantype */ 3054 out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype); 3055 3056 /* vlantci */ 3057 out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci); 3058 3059 /* ecamptr */ 3060 out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr); 3061 3062 /* RBDQPTR */ 3063 /* Size varies with number of Rx queues */ 3064 ugeth->rx_bd_qs_tbl_offset = 3065 qe_muram_alloc(ug_info->numQueuesRx * 3066 (sizeof(struct ucc_geth_rx_bd_queues_entry) + 3067 sizeof(struct ucc_geth_rx_prefetched_bds)), 3068 UCC_GETH_RX_BD_QUEUES_ALIGNMENT); 3069 if (IS_ERR_VALUE(ugeth->rx_bd_qs_tbl_offset)) { 3070 if (netif_msg_ifup(ugeth)) 3071 ugeth_err 3072 ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.", 3073 __func__); 3074 ucc_geth_memclean(ugeth); 3075 return -ENOMEM; 3076 } 3077 3078 ugeth->p_rx_bd_qs_tbl = 3079 (struct ucc_geth_rx_bd_queues_entry __iomem *) qe_muram_addr(ugeth-> 3080 rx_bd_qs_tbl_offset); 3081 out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset); 3082 /* Zero out p_rx_bd_qs_tbl */ 3083 memset_io((void __iomem *)ugeth->p_rx_bd_qs_tbl, 3084 0, 3085 ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) + 3086 sizeof(struct ucc_geth_rx_prefetched_bds))); 3087 3088 /* Setup the table */ 3089 /* Assume BD rings are already established */ 3090 for (i = 0; i < ug_info->numQueuesRx; i++) { 3091 if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) { 3092 out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, 3093 (u32) virt_to_phys(ugeth->p_rx_bd_ring[i])); 3094 } else if (ugeth->ug_info->uf_info.bd_mem_part == 3095 MEM_PART_MURAM) { 3096 out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, 3097 (u32) immrbar_virt_to_phys(ugeth-> 3098 p_rx_bd_ring[i])); 3099 } 3100 /* rest of fields handled by QE */ 3101 } 3102 3103 /* remoder */ 3104 /* Already has speed set */ 3105 3106 if (ugeth->rx_extended_features) 3107 remoder |= REMODER_RX_EXTENDED_FEATURES; 3108 if (ug_info->rxExtendedFiltering) 3109 remoder |= REMODER_RX_EXTENDED_FILTERING; 3110 if (ug_info->dynamicMaxFrameLength) 3111 remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH; 3112 if (ug_info->dynamicMinFrameLength) 3113 remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH; 3114 remoder |= 3115 ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT; 3116 remoder |= 3117 ug_info-> 3118 vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT; 3119 remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT; 3120 remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT); 3121 if (ug_info->ipCheckSumCheck) 3122 remoder |= REMODER_IP_CHECKSUM_CHECK; 3123 if (ug_info->ipAddressAlignment) 3124 remoder |= REMODER_IP_ADDRESS_ALIGNMENT; 3125 out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder); 3126 3127 /* Note that this function must be called */ 3128 /* ONLY AFTER p_tx_fw_statistics_pram */ 3129 /* andp_UccGethRxFirmwareStatisticsPram are allocated ! */ 3130 init_firmware_statistics_gathering_mode((ug_info-> 3131 statisticsMode & 3132 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX), 3133 (ug_info->statisticsMode & 3134 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX), 3135 &ugeth->p_tx_glbl_pram->txrmonbaseptr, 3136 ugeth->tx_fw_statistics_pram_offset, 3137 &ugeth->p_rx_glbl_pram->rxrmonbaseptr, 3138 ugeth->rx_fw_statistics_pram_offset, 3139 &ugeth->p_tx_glbl_pram->temoder, 3140 &ugeth->p_rx_glbl_pram->remoder); 3141 3142 /* function code register */ 3143 out_8(&ugeth->p_rx_glbl_pram->rstate, function_code); 3144 3145 /* initialize extended filtering */ 3146 if (ug_info->rxExtendedFiltering) { 3147 if (!ug_info->extendedFilteringChainPointer) { 3148 if (netif_msg_ifup(ugeth)) 3149 ugeth_err("%s: Null Extended Filtering Chain Pointer.", 3150 __func__); 3151 ucc_geth_memclean(ugeth); 3152 return -EINVAL; 3153 } 3154 3155 /* Allocate memory for extended filtering Mode Global 3156 Parameters */ 3157 ugeth->exf_glbl_param_offset = 3158 qe_muram_alloc(sizeof(struct ucc_geth_exf_global_pram), 3159 UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT); 3160 if (IS_ERR_VALUE(ugeth->exf_glbl_param_offset)) { 3161 if (netif_msg_ifup(ugeth)) 3162 ugeth_err 3163 ("%s: Can not allocate DPRAM memory for" 3164 " p_exf_glbl_param.", __func__); 3165 ucc_geth_memclean(ugeth); 3166 return -ENOMEM; 3167 } 3168 3169 ugeth->p_exf_glbl_param = 3170 (struct ucc_geth_exf_global_pram __iomem *) qe_muram_addr(ugeth-> 3171 exf_glbl_param_offset); 3172 out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam, 3173 ugeth->exf_glbl_param_offset); 3174 out_be32(&ugeth->p_exf_glbl_param->l2pcdptr, 3175 (u32) ug_info->extendedFilteringChainPointer); 3176 3177 } else { /* initialize 82xx style address filtering */ 3178 3179 /* Init individual address recognition registers to disabled */ 3180 3181 for (j = 0; j < NUM_OF_PADDRS; j++) 3182 ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j); 3183 3184 p_82xx_addr_filt = 3185 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth-> 3186 p_rx_glbl_pram->addressfiltering; 3187 3188 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, 3189 ENET_ADDR_TYPE_GROUP); 3190 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, 3191 ENET_ADDR_TYPE_INDIVIDUAL); 3192 } 3193 3194 /* 3195 * Initialize UCC at QE level 3196 */ 3197 3198 command = QE_INIT_TX_RX; 3199 3200 /* Allocate shadow InitEnet command parameter structure. 3201 * This is needed because after the InitEnet command is executed, 3202 * the structure in DPRAM is released, because DPRAM is a premium 3203 * resource. 3204 * This shadow structure keeps a copy of what was done so that the 3205 * allocated resources can be released when the channel is freed. 3206 */ 3207 if (!(ugeth->p_init_enet_param_shadow = 3208 kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) { 3209 if (netif_msg_ifup(ugeth)) 3210 ugeth_err 3211 ("%s: Can not allocate memory for" 3212 " p_UccInitEnetParamShadows.", __func__); 3213 ucc_geth_memclean(ugeth); 3214 return -ENOMEM; 3215 } 3216 /* Zero out *p_init_enet_param_shadow */ 3217 memset((char *)ugeth->p_init_enet_param_shadow, 3218 0, sizeof(struct ucc_geth_init_pram)); 3219 3220 /* Fill shadow InitEnet command parameter structure */ 3221 3222 ugeth->p_init_enet_param_shadow->resinit1 = 3223 ENET_INIT_PARAM_MAGIC_RES_INIT1; 3224 ugeth->p_init_enet_param_shadow->resinit2 = 3225 ENET_INIT_PARAM_MAGIC_RES_INIT2; 3226 ugeth->p_init_enet_param_shadow->resinit3 = 3227 ENET_INIT_PARAM_MAGIC_RES_INIT3; 3228 ugeth->p_init_enet_param_shadow->resinit4 = 3229 ENET_INIT_PARAM_MAGIC_RES_INIT4; 3230 ugeth->p_init_enet_param_shadow->resinit5 = 3231 ENET_INIT_PARAM_MAGIC_RES_INIT5; 3232 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= 3233 ((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT; 3234 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= 3235 ((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT; 3236 3237 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= 3238 ugeth->rx_glbl_pram_offset | ug_info->riscRx; 3239 if ((ug_info->largestexternallookupkeysize != 3240 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE) 3241 && (ug_info->largestexternallookupkeysize != 3242 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES) 3243 && (ug_info->largestexternallookupkeysize != 3244 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) { 3245 if (netif_msg_ifup(ugeth)) 3246 ugeth_err("%s: Invalid largest External Lookup Key Size.", 3247 __func__); 3248 ucc_geth_memclean(ugeth); 3249 return -EINVAL; 3250 } 3251 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize = 3252 ug_info->largestexternallookupkeysize; 3253 size = sizeof(struct ucc_geth_thread_rx_pram); 3254 if (ug_info->rxExtendedFiltering) { 3255 size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; 3256 if (ug_info->largestexternallookupkeysize == 3257 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES) 3258 size += 3259 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; 3260 if (ug_info->largestexternallookupkeysize == 3261 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES) 3262 size += 3263 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; 3264 } 3265 3266 if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth-> 3267 p_init_enet_param_shadow->rxthread[0]), 3268 (u8) (numThreadsRxNumerical + 1) 3269 /* Rx needs one extra for terminator */ 3270 , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT, 3271 ug_info->riscRx, 1)) != 0) { 3272 if (netif_msg_ifup(ugeth)) 3273 ugeth_err("%s: Can not fill p_init_enet_param_shadow.", 3274 __func__); 3275 ucc_geth_memclean(ugeth); 3276 return ret_val; 3277 } 3278 3279 ugeth->p_init_enet_param_shadow->txglobal = 3280 ugeth->tx_glbl_pram_offset | ug_info->riscTx; 3281 if ((ret_val = 3282 fill_init_enet_entries(ugeth, 3283 &(ugeth->p_init_enet_param_shadow-> 3284 txthread[0]), numThreadsTxNumerical, 3285 sizeof(struct ucc_geth_thread_tx_pram), 3286 UCC_GETH_THREAD_TX_PRAM_ALIGNMENT, 3287 ug_info->riscTx, 0)) != 0) { 3288 if (netif_msg_ifup(ugeth)) 3289 ugeth_err("%s: Can not fill p_init_enet_param_shadow.", 3290 __func__); 3291 ucc_geth_memclean(ugeth); 3292 return ret_val; 3293 } 3294 3295 /* Load Rx bds with buffers */ 3296 for (i = 0; i < ug_info->numQueuesRx; i++) { 3297 if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) { 3298 if (netif_msg_ifup(ugeth)) 3299 ugeth_err("%s: Can not fill Rx bds with buffers.", 3300 __func__); 3301 ucc_geth_memclean(ugeth); 3302 return ret_val; 3303 } 3304 } 3305 3306 /* Allocate InitEnet command parameter structure */ 3307 init_enet_pram_offset = qe_muram_alloc(sizeof(struct ucc_geth_init_pram), 4); 3308 if (IS_ERR_VALUE(init_enet_pram_offset)) { 3309 if (netif_msg_ifup(ugeth)) 3310 ugeth_err 3311 ("%s: Can not allocate DPRAM memory for p_init_enet_pram.", 3312 __func__); 3313 ucc_geth_memclean(ugeth); 3314 return -ENOMEM; 3315 } 3316 p_init_enet_pram = 3317 (struct ucc_geth_init_pram __iomem *) qe_muram_addr(init_enet_pram_offset); 3318 3319 /* Copy shadow InitEnet command parameter structure into PRAM */ 3320 out_8(&p_init_enet_pram->resinit1, 3321 ugeth->p_init_enet_param_shadow->resinit1); 3322 out_8(&p_init_enet_pram->resinit2, 3323 ugeth->p_init_enet_param_shadow->resinit2); 3324 out_8(&p_init_enet_pram->resinit3, 3325 ugeth->p_init_enet_param_shadow->resinit3); 3326 out_8(&p_init_enet_pram->resinit4, 3327 ugeth->p_init_enet_param_shadow->resinit4); 3328 out_be16(&p_init_enet_pram->resinit5, 3329 ugeth->p_init_enet_param_shadow->resinit5); 3330 out_8(&p_init_enet_pram->largestexternallookupkeysize, 3331 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize); 3332 out_be32(&p_init_enet_pram->rgftgfrxglobal, 3333 ugeth->p_init_enet_param_shadow->rgftgfrxglobal); 3334 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++) 3335 out_be32(&p_init_enet_pram->rxthread[i], 3336 ugeth->p_init_enet_param_shadow->rxthread[i]); 3337 out_be32(&p_init_enet_pram->txglobal, 3338 ugeth->p_init_enet_param_shadow->txglobal); 3339 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++) 3340 out_be32(&p_init_enet_pram->txthread[i], 3341 ugeth->p_init_enet_param_shadow->txthread[i]); 3342 3343 /* Issue QE command */ 3344 cecr_subblock = 3345 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); 3346 qe_issue_cmd(command, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 3347 init_enet_pram_offset); 3348 3349 /* Free InitEnet command parameter */ 3350 qe_muram_free(init_enet_pram_offset); 3351 3352 return 0; 3353} 3354 3355/* ucc_geth_timeout gets called when a packet has not been 3356 * transmitted after a set amount of time. 3357 * For now, assume that clearing out all the structures, and 3358 * starting over will fix the problem. */ 3359static void ucc_geth_timeout(struct net_device *dev) 3360{ 3361 struct ucc_geth_private *ugeth = netdev_priv(dev); 3362 3363 ugeth_vdbg("%s: IN", __func__); 3364 3365 dev->stats.tx_errors++; 3366 3367 ugeth_dump_regs(ugeth); 3368 3369 if (dev->flags & IFF_UP) { 3370 ucc_geth_stop(ugeth); 3371 ucc_geth_startup(ugeth); 3372 } 3373 3374 netif_tx_schedule_all(dev); 3375} 3376 3377/* This is called by the kernel when a frame is ready for transmission. */ 3378/* It is pointed to by the dev->hard_start_xmit function pointer */ 3379static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) 3380{ 3381 struct ucc_geth_private *ugeth = netdev_priv(dev); 3382#ifdef CONFIG_UGETH_TX_ON_DEMAND 3383 struct ucc_fast_private *uccf; 3384#endif 3385 u8 __iomem *bd; /* BD pointer */ 3386 u32 bd_status; 3387 u8 txQ = 0; 3388 3389 ugeth_vdbg("%s: IN", __func__); 3390 3391 spin_lock_irq(&ugeth->lock); 3392 3393 dev->stats.tx_bytes += skb->len; 3394 3395 /* Start from the next BD that should be filled */ 3396 bd = ugeth->txBd[txQ]; 3397 bd_status = in_be32((u32 __iomem *)bd); 3398 /* Save the skb pointer so we can free it later */ 3399 ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb; 3400 3401 /* Update the current skb pointer (wrapping if this was the last) */ 3402 ugeth->skb_curtx[txQ] = 3403 (ugeth->skb_curtx[txQ] + 3404 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); 3405 3406 /* set up the buffer descriptor */ 3407 out_be32(&((struct qe_bd __iomem *)bd)->buf, 3408 dma_map_single(&ugeth->dev->dev, skb->data, 3409 skb->len, DMA_TO_DEVICE)); 3410 3411 /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */ 3412 3413 bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len; 3414 3415 /* set bd status and length */ 3416 out_be32((u32 __iomem *)bd, bd_status); 3417 3418 dev->trans_start = jiffies; 3419 3420 /* Move to next BD in the ring */ 3421 if (!(bd_status & T_W)) 3422 bd += sizeof(struct qe_bd); 3423 else 3424 bd = ugeth->p_tx_bd_ring[txQ]; 3425 3426 /* If the next BD still needs to be cleaned up, then the bds 3427 are full. We need to tell the kernel to stop sending us stuff. */ 3428 if (bd == ugeth->confBd[txQ]) { 3429 if (!netif_queue_stopped(dev)) 3430 netif_stop_queue(dev); 3431 } 3432 3433 ugeth->txBd[txQ] = bd; 3434 3435 if (ugeth->p_scheduler) { 3436 ugeth->cpucount[txQ]++; 3437 /* Indicate to QE that there are more Tx bds ready for 3438 transmission */ 3439 /* This is done by writing a running counter of the bd 3440 count to the scheduler PRAM. */ 3441 out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]); 3442 } 3443 3444#ifdef CONFIG_UGETH_TX_ON_DEMAND 3445 uccf = ugeth->uccf; 3446 out_be16(uccf->p_utodr, UCC_FAST_TOD); 3447#endif 3448 spin_unlock_irq(&ugeth->lock); 3449 3450 return 0; 3451} 3452 3453static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit) 3454{ 3455 struct sk_buff *skb; 3456 u8 __iomem *bd; 3457 u16 length, howmany = 0; 3458 u32 bd_status; 3459 u8 *bdBuffer; 3460 struct net_device *dev; 3461 3462 ugeth_vdbg("%s: IN", __func__); 3463 3464 dev = ugeth->dev; 3465 3466 /* collect received buffers */ 3467 bd = ugeth->rxBd[rxQ]; 3468 3469 bd_status = in_be32((u32 __iomem *)bd); 3470 3471 /* while there are received buffers and BD is full (~R_E) */ 3472 while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) { 3473 bdBuffer = (u8 *) in_be32(&((struct qe_bd __iomem *)bd)->buf); 3474 length = (u16) ((bd_status & BD_LENGTH_MASK) - 4); 3475 skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]]; 3476 3477 /* determine whether buffer is first, last, first and last 3478 (single buffer frame) or middle (not first and not last) */ 3479 if (!skb || 3480 (!(bd_status & (R_F | R_L))) || 3481 (bd_status & R_ERRORS_FATAL)) { 3482 if (netif_msg_rx_err(ugeth)) 3483 ugeth_err("%s, %d: ERROR!!! skb - 0x%08x", 3484 __func__, __LINE__, (u32) skb); 3485 if (skb) 3486 dev_kfree_skb_any(skb); 3487 3488 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL; 3489 dev->stats.rx_dropped++; 3490 } else { 3491 dev->stats.rx_packets++; 3492 howmany++; 3493 3494 /* Prep the skb for the packet */ 3495 skb_put(skb, length); 3496 3497 /* Tell the skb what kind of packet this is */ 3498 skb->protocol = eth_type_trans(skb, ugeth->dev); 3499 3500 dev->stats.rx_bytes += length; 3501 /* Send the packet up the stack */ 3502 netif_receive_skb(skb); 3503 } 3504 3505 ugeth->dev->last_rx = jiffies; 3506 3507 skb = get_new_skb(ugeth, bd); 3508 if (!skb) { 3509 if (netif_msg_rx_err(ugeth)) 3510 ugeth_warn("%s: No Rx Data Buffer", __func__); 3511 dev->stats.rx_dropped++; 3512 break; 3513 } 3514 3515 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb; 3516 3517 /* update to point at the next skb */ 3518 ugeth->skb_currx[rxQ] = 3519 (ugeth->skb_currx[rxQ] + 3520 1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]); 3521 3522 if (bd_status & R_W) 3523 bd = ugeth->p_rx_bd_ring[rxQ]; 3524 else 3525 bd += sizeof(struct qe_bd); 3526 3527 bd_status = in_be32((u32 __iomem *)bd); 3528 } 3529 3530 ugeth->rxBd[rxQ] = bd; 3531 return howmany; 3532} 3533 3534static int ucc_geth_tx(struct net_device *dev, u8 txQ) 3535{ 3536 /* Start from the next BD that should be filled */ 3537 struct ucc_geth_private *ugeth = netdev_priv(dev); 3538 u8 __iomem *bd; /* BD pointer */ 3539 u32 bd_status; 3540 3541 bd = ugeth->confBd[txQ]; 3542 bd_status = in_be32((u32 __iomem *)bd); 3543 3544 /* Normal processing. */ 3545 while ((bd_status & T_R) == 0) { 3546 /* BD contains already transmitted buffer. */ 3547 /* Handle the transmitted buffer and release */ 3548 /* the BD to be used with the current frame */ 3549 3550 if ((bd == ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0)) 3551 break; 3552 3553 dev->stats.tx_packets++; 3554 3555 /* Free the sk buffer associated with this TxBD */ 3556 dev_kfree_skb_irq(ugeth-> 3557 tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]); 3558 ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL; 3559 ugeth->skb_dirtytx[txQ] = 3560 (ugeth->skb_dirtytx[txQ] + 3561 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); 3562 3563 /* We freed a buffer, so now we can restart transmission */ 3564 if (netif_queue_stopped(dev)) 3565 netif_wake_queue(dev); 3566 3567 /* Advance the confirmation BD pointer */ 3568 if (!(bd_status & T_W)) 3569 bd += sizeof(struct qe_bd); 3570 else 3571 bd = ugeth->p_tx_bd_ring[txQ]; 3572 bd_status = in_be32((u32 __iomem *)bd); 3573 } 3574 ugeth->confBd[txQ] = bd; 3575 return 0; 3576} 3577 3578static int ucc_geth_poll(struct napi_struct *napi, int budget) 3579{ 3580 struct ucc_geth_private *ugeth = container_of(napi, struct ucc_geth_private, napi); 3581 struct net_device *dev = ugeth->dev; 3582 struct ucc_geth_info *ug_info; 3583 int howmany, i; 3584 3585 ug_info = ugeth->ug_info; 3586 3587 howmany = 0; 3588 for (i = 0; i < ug_info->numQueuesRx; i++) 3589 howmany += ucc_geth_rx(ugeth, i, budget - howmany); 3590 3591 if (howmany < budget) { 3592 struct ucc_fast_private *uccf; 3593 u32 uccm; 3594 3595 netif_rx_complete(dev, napi); 3596 uccf = ugeth->uccf; 3597 uccm = in_be32(uccf->p_uccm); 3598 uccm |= UCCE_RX_EVENTS; 3599 out_be32(uccf->p_uccm, uccm); 3600 } 3601 3602 return howmany; 3603} 3604 3605static irqreturn_t ucc_geth_irq_handler(int irq, void *info) 3606{ 3607 struct net_device *dev = info; 3608 struct ucc_geth_private *ugeth = netdev_priv(dev); 3609 struct ucc_fast_private *uccf; 3610 struct ucc_geth_info *ug_info; 3611 register u32 ucce; 3612 register u32 uccm; 3613 register u32 tx_mask; 3614 u8 i; 3615 3616 ugeth_vdbg("%s: IN", __func__); 3617 3618 uccf = ugeth->uccf; 3619 ug_info = ugeth->ug_info; 3620 3621 /* read and clear events */ 3622 ucce = (u32) in_be32(uccf->p_ucce); 3623 uccm = (u32) in_be32(uccf->p_uccm); 3624 ucce &= uccm; 3625 out_be32(uccf->p_ucce, ucce); 3626 3627 /* check for receive events that require processing */ 3628 if (ucce & UCCE_RX_EVENTS) { 3629 if (netif_rx_schedule_prep(dev, &ugeth->napi)) { 3630 uccm &= ~UCCE_RX_EVENTS; 3631 out_be32(uccf->p_uccm, uccm); 3632 __netif_rx_schedule(dev, &ugeth->napi); 3633 } 3634 } 3635 3636 /* Tx event processing */ 3637 if (ucce & UCCE_TX_EVENTS) { 3638 spin_lock(&ugeth->lock); 3639 tx_mask = UCCE_TXBF_SINGLE_MASK; 3640 for (i = 0; i < ug_info->numQueuesTx; i++) { 3641 if (ucce & tx_mask) 3642 ucc_geth_tx(dev, i); 3643 ucce &= ~tx_mask; 3644 tx_mask <<= 1; 3645 } 3646 spin_unlock(&ugeth->lock); 3647 } 3648 3649 /* Errors and other events */ 3650 if (ucce & UCCE_OTHER) { 3651 if (ucce & UCCE_BSY) { 3652 dev->stats.rx_errors++; 3653 } 3654 if (ucce & UCCE_TXE) { 3655 dev->stats.tx_errors++; 3656 } 3657 } 3658 3659 return IRQ_HANDLED; 3660} 3661 3662#ifdef CONFIG_NET_POLL_CONTROLLER 3663/* 3664 * Polling 'interrupt' - used by things like netconsole to send skbs 3665 * without having to re-enable interrupts. It's not called while 3666 * the interrupt routine is executing. 3667 */ 3668static void ucc_netpoll(struct net_device *dev) 3669{ 3670 struct ucc_geth_private *ugeth = netdev_priv(dev); 3671 int irq = ugeth->ug_info->uf_info.irq; 3672 3673 disable_irq(irq); 3674 ucc_geth_irq_handler(irq, dev); 3675 enable_irq(irq); 3676} 3677#endif /* CONFIG_NET_POLL_CONTROLLER */ 3678 3679/* Called when something needs to use the ethernet device */ 3680/* Returns 0 for success. */ 3681static int ucc_geth_open(struct net_device *dev) 3682{ 3683 struct ucc_geth_private *ugeth = netdev_priv(dev); 3684 int err; 3685 3686 ugeth_vdbg("%s: IN", __func__); 3687 3688 /* Test station address */ 3689 if (dev->dev_addr[0] & ENET_GROUP_ADDR) { 3690 if (netif_msg_ifup(ugeth)) 3691 ugeth_err("%s: Multicast address used for station address" 3692 " - is this what you wanted?", __func__); 3693 return -EINVAL; 3694 } 3695 3696 err = ucc_struct_init(ugeth); 3697 if (err) { 3698 if (netif_msg_ifup(ugeth)) 3699 ugeth_err("%s: Cannot configure internal struct, aborting.", dev->name); 3700 return err; 3701 } 3702 3703 napi_enable(&ugeth->napi); 3704 3705 err = ucc_geth_startup(ugeth); 3706 if (err) { 3707 if (netif_msg_ifup(ugeth)) 3708 ugeth_err("%s: Cannot configure net device, aborting.", 3709 dev->name); 3710 goto out_err; 3711 } 3712 3713 err = adjust_enet_interface(ugeth); 3714 if (err) { 3715 if (netif_msg_ifup(ugeth)) 3716 ugeth_err("%s: Cannot configure net device, aborting.", 3717 dev->name); 3718 goto out_err; 3719 } 3720 3721 /* Set MACSTNADDR1, MACSTNADDR2 */ 3722 /* For more details see the hardware spec. */ 3723 init_mac_station_addr_regs(dev->dev_addr[0], 3724 dev->dev_addr[1], 3725 dev->dev_addr[2], 3726 dev->dev_addr[3], 3727 dev->dev_addr[4], 3728 dev->dev_addr[5], 3729 &ugeth->ug_regs->macstnaddr1, 3730 &ugeth->ug_regs->macstnaddr2); 3731 3732 err = init_phy(dev); 3733 if (err) { 3734 if (netif_msg_ifup(ugeth)) 3735 ugeth_err("%s: Cannot initialize PHY, aborting.", dev->name); 3736 goto out_err; 3737 } 3738 3739 phy_start(ugeth->phydev); 3740 3741 err = 3742 request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler, 0, 3743 "UCC Geth", dev); 3744 if (err) { 3745 if (netif_msg_ifup(ugeth)) 3746 ugeth_err("%s: Cannot get IRQ for net device, aborting.", 3747 dev->name); 3748 ucc_geth_stop(ugeth); 3749 goto out_err; 3750 } 3751 3752 err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); 3753 if (err) { 3754 if (netif_msg_ifup(ugeth)) 3755 ugeth_err("%s: Cannot enable net device, aborting.", dev->name); 3756 ucc_geth_stop(ugeth); 3757 goto out_err; 3758 } 3759 3760 netif_start_queue(dev); 3761 3762 return err; 3763 3764out_err: 3765 napi_disable(&ugeth->napi); 3766 3767 return err; 3768} 3769 3770/* Stops the kernel queue, and halts the controller */ 3771static int ucc_geth_close(struct net_device *dev) 3772{ 3773 struct ucc_geth_private *ugeth = netdev_priv(dev); 3774 3775 ugeth_vdbg("%s: IN", __func__); 3776 3777 napi_disable(&ugeth->napi); 3778 3779 ucc_geth_stop(ugeth); 3780 3781 phy_disconnect(ugeth->phydev); 3782 ugeth->phydev = NULL; 3783 3784 netif_stop_queue(dev); 3785 3786 return 0; 3787} 3788 3789static phy_interface_t to_phy_interface(const char *phy_connection_type) 3790{ 3791 if (strcasecmp(phy_connection_type, "mii") == 0) 3792 return PHY_INTERFACE_MODE_MII; 3793 if (strcasecmp(phy_connection_type, "gmii") == 0) 3794 return PHY_INTERFACE_MODE_GMII; 3795 if (strcasecmp(phy_connection_type, "tbi") == 0) 3796 return PHY_INTERFACE_MODE_TBI; 3797 if (strcasecmp(phy_connection_type, "rmii") == 0) 3798 return PHY_INTERFACE_MODE_RMII; 3799 if (strcasecmp(phy_connection_type, "rgmii") == 0) 3800 return PHY_INTERFACE_MODE_RGMII; 3801 if (strcasecmp(phy_connection_type, "rgmii-id") == 0) 3802 return PHY_INTERFACE_MODE_RGMII_ID; 3803 if (strcasecmp(phy_connection_type, "rgmii-txid") == 0) 3804 return PHY_INTERFACE_MODE_RGMII_TXID; 3805 if (strcasecmp(phy_connection_type, "rgmii-rxid") == 0) 3806 return PHY_INTERFACE_MODE_RGMII_RXID; 3807 if (strcasecmp(phy_connection_type, "rtbi") == 0) 3808 return PHY_INTERFACE_MODE_RTBI; 3809 3810 return PHY_INTERFACE_MODE_MII; 3811} 3812 3813static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *match) 3814{ 3815 struct device *device = &ofdev->dev; 3816 struct device_node *np = ofdev->node; 3817 struct device_node *mdio; 3818 struct net_device *dev = NULL; 3819 struct ucc_geth_private *ugeth = NULL; 3820 struct ucc_geth_info *ug_info; 3821 struct resource res; 3822 struct device_node *phy; 3823 int err, ucc_num, max_speed = 0; 3824 const phandle *ph; 3825 const u32 *fixed_link; 3826 const unsigned int *prop; 3827 const char *sprop; 3828 const void *mac_addr; 3829 phy_interface_t phy_interface; 3830 static const int enet_to_speed[] = { 3831 SPEED_10, SPEED_10, SPEED_10, 3832 SPEED_100, SPEED_100, SPEED_100, 3833 SPEED_1000, SPEED_1000, SPEED_1000, SPEED_1000, 3834 }; 3835 static const phy_interface_t enet_to_phy_interface[] = { 3836 PHY_INTERFACE_MODE_MII, PHY_INTERFACE_MODE_RMII, 3837 PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_MII, 3838 PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII, 3839 PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_RGMII, 3840 PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI, 3841 }; 3842 3843 ugeth_vdbg("%s: IN", __func__); 3844 3845 prop = of_get_property(np, "cell-index", NULL); 3846 if (!prop) { 3847 prop = of_get_property(np, "device-id", NULL); 3848 if (!prop) 3849 return -ENODEV; 3850 } 3851 3852 ucc_num = *prop - 1; 3853 if ((ucc_num < 0) || (ucc_num > 7)) 3854 return -ENODEV; 3855 3856 ug_info = &ugeth_info[ucc_num]; 3857 if (ug_info == NULL) { 3858 if (netif_msg_probe(&debug)) 3859 ugeth_err("%s: [%d] Missing additional data!", 3860 __func__, ucc_num); 3861 return -ENODEV; 3862 } 3863 3864 ug_info->uf_info.ucc_num = ucc_num; 3865 3866 sprop = of_get_property(np, "rx-clock-name", NULL); 3867 if (sprop) { 3868 ug_info->uf_info.rx_clock = qe_clock_source(sprop); 3869 if ((ug_info->uf_info.rx_clock < QE_CLK_NONE) || 3870 (ug_info->uf_info.rx_clock > QE_CLK24)) { 3871 printk(KERN_ERR 3872 "ucc_geth: invalid rx-clock-name property\n"); 3873 return -EINVAL; 3874 } 3875 } else { 3876 prop = of_get_property(np, "rx-clock", NULL); 3877 if (!prop) { 3878 /* If both rx-clock-name and rx-clock are missing, 3879 we want to tell people to use rx-clock-name. */ 3880 printk(KERN_ERR 3881 "ucc_geth: missing rx-clock-name property\n"); 3882 return -EINVAL; 3883 } 3884 if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) { 3885 printk(KERN_ERR 3886 "ucc_geth: invalid rx-clock propperty\n"); 3887 return -EINVAL; 3888 } 3889 ug_info->uf_info.rx_clock = *prop; 3890 } 3891 3892 sprop = of_get_property(np, "tx-clock-name", NULL); 3893 if (sprop) { 3894 ug_info->uf_info.tx_clock = qe_clock_source(sprop); 3895 if ((ug_info->uf_info.tx_clock < QE_CLK_NONE) || 3896 (ug_info->uf_info.tx_clock > QE_CLK24)) { 3897 printk(KERN_ERR 3898 "ucc_geth: invalid tx-clock-name property\n"); 3899 return -EINVAL; 3900 } 3901 } else { 3902 prop = of_get_property(np, "tx-clock", NULL); 3903 if (!prop) { 3904 printk(KERN_ERR 3905 "ucc_geth: mising tx-clock-name property\n"); 3906 return -EINVAL; 3907 } 3908 if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) { 3909 printk(KERN_ERR 3910 "ucc_geth: invalid tx-clock property\n"); 3911 return -EINVAL; 3912 } 3913 ug_info->uf_info.tx_clock = *prop; 3914 } 3915 3916 err = of_address_to_resource(np, 0, &res); 3917 if (err) 3918 return -EINVAL; 3919 3920 ug_info->uf_info.regs = res.start; 3921 ug_info->uf_info.irq = irq_of_parse_and_map(np, 0); 3922 fixed_link = of_get_property(np, "fixed-link", NULL); 3923 if (fixed_link) { 3924 snprintf(ug_info->mdio_bus, MII_BUS_ID_SIZE, "0"); 3925 ug_info->phy_address = fixed_link[0]; 3926 phy = NULL; 3927 } else { 3928 ph = of_get_property(np, "phy-handle", NULL); 3929 phy = of_find_node_by_phandle(*ph); 3930 3931 if (phy == NULL) 3932 return -ENODEV; 3933 3934 /* set the PHY address */ 3935 prop = of_get_property(phy, "reg", NULL); 3936 if (prop == NULL) 3937 return -1; 3938 ug_info->phy_address = *prop; 3939 3940 /* Set the bus id */ 3941 mdio = of_get_parent(phy); 3942 3943 if (mdio == NULL) 3944 return -1; 3945 3946 err = of_address_to_resource(mdio, 0, &res); 3947 of_node_put(mdio); 3948 3949 if (err) 3950 return -1; 3951 3952 snprintf(ug_info->mdio_bus, MII_BUS_ID_SIZE, "%x", res.start); 3953 } 3954 3955 /* get the phy interface type, or default to MII */ 3956 prop = of_get_property(np, "phy-connection-type", NULL); 3957 if (!prop) { 3958 /* handle interface property present in old trees */ 3959 prop = of_get_property(phy, "interface", NULL); 3960 if (prop != NULL) { 3961 phy_interface = enet_to_phy_interface[*prop]; 3962 max_speed = enet_to_speed[*prop]; 3963 } else 3964 phy_interface = PHY_INTERFACE_MODE_MII; 3965 } else { 3966 phy_interface = to_phy_interface((const char *)prop); 3967 } 3968 3969 /* get speed, or derive from PHY interface */ 3970 if (max_speed == 0) 3971 switch (phy_interface) { 3972 case PHY_INTERFACE_MODE_GMII: 3973 case PHY_INTERFACE_MODE_RGMII: 3974 case PHY_INTERFACE_MODE_RGMII_ID: 3975 case PHY_INTERFACE_MODE_RGMII_RXID: 3976 case PHY_INTERFACE_MODE_RGMII_TXID: 3977 case PHY_INTERFACE_MODE_TBI: 3978 case PHY_INTERFACE_MODE_RTBI: 3979 max_speed = SPEED_1000; 3980 break; 3981 default: 3982 max_speed = SPEED_100; 3983 break; 3984 } 3985 3986 if (max_speed == SPEED_1000) { 3987 /* configure muram FIFOs for gigabit operation */ 3988 ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT; 3989 ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT; 3990 ug_info->uf_info.urfset = UCC_GETH_URFSET_GIGA_INIT; 3991 ug_info->uf_info.utfs = UCC_GETH_UTFS_GIGA_INIT; 3992 ug_info->uf_info.utfet = UCC_GETH_UTFET_GIGA_INIT; 3993 ug_info->uf_info.utftt = UCC_GETH_UTFTT_GIGA_INIT; 3994 ug_info->numThreadsTx = UCC_GETH_NUM_OF_THREADS_4; 3995 ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_4; 3996 } 3997 3998 if (netif_msg_probe(&debug)) 3999 printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n", 4000 ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs, 4001 ug_info->uf_info.irq); 4002 4003 /* Create an ethernet device instance */ 4004 dev = alloc_etherdev(sizeof(*ugeth)); 4005 4006 if (dev == NULL) 4007 return -ENOMEM; 4008 4009 ugeth = netdev_priv(dev); 4010 spin_lock_init(&ugeth->lock); 4011 4012 /* Create CQs for hash tables */ 4013 INIT_LIST_HEAD(&ugeth->group_hash_q); 4014 INIT_LIST_HEAD(&ugeth->ind_hash_q); 4015 4016 dev_set_drvdata(device, dev); 4017 4018 /* Set the dev->base_addr to the gfar reg region */ 4019 dev->base_addr = (unsigned long)(ug_info->uf_info.regs); 4020 4021 SET_NETDEV_DEV(dev, device); 4022 4023 /* Fill in the dev structure */ 4024 uec_set_ethtool_ops(dev); 4025 dev->open = ucc_geth_open; 4026 dev->hard_start_xmit = ucc_geth_start_xmit; 4027 dev->tx_timeout = ucc_geth_timeout; 4028 dev->watchdog_timeo = TX_TIMEOUT; 4029 netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, UCC_GETH_DEV_WEIGHT); 4030#ifdef CONFIG_NET_POLL_CONTROLLER 4031 dev->poll_controller = ucc_netpoll; 4032#endif 4033 dev->stop = ucc_geth_close; 4034// dev->change_mtu = ucc_geth_change_mtu; 4035 dev->mtu = 1500; 4036 dev->set_multicast_list = ucc_geth_set_multi; 4037 4038 ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT); 4039 ugeth->phy_interface = phy_interface; 4040 ugeth->max_speed = max_speed; 4041 4042 err = register_netdev(dev); 4043 if (err) { 4044 if (netif_msg_probe(ugeth)) 4045 ugeth_err("%s: Cannot register net device, aborting.", 4046 dev->name); 4047 free_netdev(dev); 4048 return err; 4049 } 4050 4051 mac_addr = of_get_mac_address(np); 4052 if (mac_addr) 4053 memcpy(dev->dev_addr, mac_addr, 6); 4054 4055 ugeth->ug_info = ug_info; 4056 ugeth->dev = dev; 4057 4058 return 0; 4059} 4060 4061static int ucc_geth_remove(struct of_device* ofdev) 4062{ 4063 struct device *device = &ofdev->dev; 4064 struct net_device *dev = dev_get_drvdata(device); 4065 struct ucc_geth_private *ugeth = netdev_priv(dev); 4066 4067 unregister_netdev(dev); 4068 free_netdev(dev); 4069 ucc_geth_memclean(ugeth); 4070 dev_set_drvdata(device, NULL); 4071 4072 return 0; 4073} 4074 4075static struct of_device_id ucc_geth_match[] = { 4076 { 4077 .type = "network", 4078 .compatible = "ucc_geth", 4079 }, 4080 {}, 4081}; 4082 4083MODULE_DEVICE_TABLE(of, ucc_geth_match); 4084 4085static struct of_platform_driver ucc_geth_driver = { 4086 .name = DRV_NAME, 4087 .match_table = ucc_geth_match, 4088 .probe = ucc_geth_probe, 4089 .remove = ucc_geth_remove, 4090}; 4091 4092static int __init ucc_geth_init(void) 4093{ 4094 int i, ret; 4095 4096 ret = uec_mdio_init(); 4097 4098 if (ret) 4099 return ret; 4100 4101 if (netif_msg_drv(&debug)) 4102 printk(KERN_INFO "ucc_geth: " DRV_DESC "\n"); 4103 for (i = 0; i < 8; i++) 4104 memcpy(&(ugeth_info[i]), &ugeth_primary_info, 4105 sizeof(ugeth_primary_info)); 4106 4107 ret = of_register_platform_driver(&ucc_geth_driver); 4108 4109 if (ret) 4110 uec_mdio_exit(); 4111 4112 return ret; 4113} 4114 4115static void __exit ucc_geth_exit(void) 4116{ 4117 of_unregister_platform_driver(&ucc_geth_driver); 4118 uec_mdio_exit(); 4119} 4120 4121module_init(ucc_geth_init); 4122module_exit(ucc_geth_exit); 4123 4124MODULE_AUTHOR("Freescale Semiconductor, Inc"); 4125MODULE_DESCRIPTION(DRV_DESC); 4126MODULE_VERSION(DRV_VERSION); 4127MODULE_LICENSE("GPL");