Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.33-rc7 3999 lines 122 kB view raw
1/* 2 * Copyright (C) 2006-2009 Freescale Semicondutor, Inc. All rights reserved. 3 * 4 * Author: Shlomi Gridish <gridish@freescale.com> 5 * Li Yang <leoli@freescale.com> 6 * 7 * Description: 8 * QE UCC Gigabit Ethernet Driver 9 * 10 * This program is free software; you can redistribute it and/or modify it 11 * under the terms of the GNU General Public License as published by the 12 * Free Software Foundation; either version 2 of the License, or (at your 13 * option) any later version. 14 */ 15#include <linux/kernel.h> 16#include <linux/init.h> 17#include <linux/errno.h> 18#include <linux/slab.h> 19#include <linux/stddef.h> 20#include <linux/interrupt.h> 21#include <linux/netdevice.h> 22#include <linux/etherdevice.h> 23#include <linux/skbuff.h> 24#include <linux/spinlock.h> 25#include <linux/mm.h> 26#include <linux/dma-mapping.h> 27#include <linux/mii.h> 28#include <linux/phy.h> 29#include <linux/workqueue.h> 30#include <linux/of_mdio.h> 31#include <linux/of_platform.h> 32 33#include <asm/uaccess.h> 34#include <asm/irq.h> 35#include <asm/io.h> 36#include <asm/immap_qe.h> 37#include <asm/qe.h> 38#include <asm/ucc.h> 39#include <asm/ucc_fast.h> 40 41#include "ucc_geth.h" 42#include "fsl_pq_mdio.h" 43 44#undef DEBUG 45 46#define ugeth_printk(level, format, arg...) \ 47 printk(level format "\n", ## arg) 48 49#define ugeth_dbg(format, arg...) \ 50 ugeth_printk(KERN_DEBUG , format , ## arg) 51#define ugeth_err(format, arg...) \ 52 ugeth_printk(KERN_ERR , format , ## arg) 53#define ugeth_info(format, arg...) \ 54 ugeth_printk(KERN_INFO , format , ## arg) 55#define ugeth_warn(format, arg...) \ 56 ugeth_printk(KERN_WARNING , format , ## arg) 57 58#ifdef UGETH_VERBOSE_DEBUG 59#define ugeth_vdbg ugeth_dbg 60#else 61#define ugeth_vdbg(fmt, args...) do { } while (0) 62#endif /* UGETH_VERBOSE_DEBUG */ 63#define UGETH_MSG_DEFAULT (NETIF_MSG_IFUP << 1 ) - 1 64 65 66static DEFINE_SPINLOCK(ugeth_lock); 67 68static struct { 69 u32 msg_enable; 70} debug = { -1 }; 71 72module_param_named(debug, debug.msg_enable, int, 0); 73MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 0xffff=all)"); 74 75static struct ucc_geth_info ugeth_primary_info = { 76 .uf_info = { 77 .bd_mem_part = MEM_PART_SYSTEM, 78 .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES, 79 .max_rx_buf_length = 1536, 80 /* adjusted at startup if max-speed 1000 */ 81 .urfs = UCC_GETH_URFS_INIT, 82 .urfet = UCC_GETH_URFET_INIT, 83 .urfset = UCC_GETH_URFSET_INIT, 84 .utfs = UCC_GETH_UTFS_INIT, 85 .utfet = UCC_GETH_UTFET_INIT, 86 .utftt = UCC_GETH_UTFTT_INIT, 87 .ufpt = 256, 88 .mode = UCC_FAST_PROTOCOL_MODE_ETHERNET, 89 .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL, 90 .tenc = UCC_FAST_TX_ENCODING_NRZ, 91 .renc = UCC_FAST_RX_ENCODING_NRZ, 92 .tcrc = UCC_FAST_16_BIT_CRC, 93 .synl = UCC_FAST_SYNC_LEN_NOT_USED, 94 }, 95 .numQueuesTx = 1, 96 .numQueuesRx = 1, 97 .extendedFilteringChainPointer = ((uint32_t) NULL), 98 .typeorlen = 3072 /*1536 */ , 99 .nonBackToBackIfgPart1 = 0x40, 100 .nonBackToBackIfgPart2 = 0x60, 101 .miminumInterFrameGapEnforcement = 0x50, 102 .backToBackInterFrameGap = 0x60, 103 .mblinterval = 128, 104 .nortsrbytetime = 5, 105 .fracsiz = 1, 106 .strictpriorityq = 0xff, 107 .altBebTruncation = 0xa, 108 .excessDefer = 1, 109 .maxRetransmission = 0xf, 110 .collisionWindow = 0x37, 111 .receiveFlowControl = 1, 112 .transmitFlowControl = 1, 113 .maxGroupAddrInHash = 4, 114 .maxIndAddrInHash = 4, 115 .prel = 7, 116 .maxFrameLength = 1518, 117 .minFrameLength = 64, 118 .maxD1Length = 1520, 119 .maxD2Length = 1520, 120 .vlantype = 0x8100, 121 .ecamptr = ((uint32_t) NULL), 122 .eventRegMask = UCCE_OTHER, 123 .pausePeriod = 0xf000, 124 .interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1}, 125 .bdRingLenTx = { 126 TX_BD_RING_LEN, 127 TX_BD_RING_LEN, 128 TX_BD_RING_LEN, 129 TX_BD_RING_LEN, 130 TX_BD_RING_LEN, 131 TX_BD_RING_LEN, 132 TX_BD_RING_LEN, 133 TX_BD_RING_LEN}, 134 135 .bdRingLenRx = { 136 RX_BD_RING_LEN, 137 RX_BD_RING_LEN, 138 RX_BD_RING_LEN, 139 RX_BD_RING_LEN, 140 RX_BD_RING_LEN, 141 RX_BD_RING_LEN, 142 RX_BD_RING_LEN, 143 RX_BD_RING_LEN}, 144 145 .numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1, 146 .largestexternallookupkeysize = 147 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE, 148 .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE | 149 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX | 150 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX, 151 .vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP, 152 .vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP, 153 .rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT, 154 .aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE, 155 .padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC, 156 .numThreadsTx = UCC_GETH_NUM_OF_THREADS_1, 157 .numThreadsRx = UCC_GETH_NUM_OF_THREADS_1, 158 .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2, 159 .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2, 160}; 161 162static struct ucc_geth_info ugeth_info[8]; 163 164#ifdef DEBUG 165static void mem_disp(u8 *addr, int size) 166{ 167 u8 *i; 168 int size16Aling = (size >> 4) << 4; 169 int size4Aling = (size >> 2) << 2; 170 int notAlign = 0; 171 if (size % 16) 172 notAlign = 1; 173 174 for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16) 175 printk("0x%08x: %08x %08x %08x %08x\r\n", 176 (u32) i, 177 *((u32 *) (i)), 178 *((u32 *) (i + 4)), 179 *((u32 *) (i + 8)), *((u32 *) (i + 12))); 180 if (notAlign == 1) 181 printk("0x%08x: ", (u32) i); 182 for (; (u32) i < (u32) addr + size4Aling; i += 4) 183 printk("%08x ", *((u32 *) (i))); 184 for (; (u32) i < (u32) addr + size; i++) 185 printk("%02x", *((u8 *) (i))); 186 if (notAlign == 1) 187 printk("\r\n"); 188} 189#endif /* DEBUG */ 190 191static struct list_head *dequeue(struct list_head *lh) 192{ 193 unsigned long flags; 194 195 spin_lock_irqsave(&ugeth_lock, flags); 196 if (!list_empty(lh)) { 197 struct list_head *node = lh->next; 198 list_del(node); 199 spin_unlock_irqrestore(&ugeth_lock, flags); 200 return node; 201 } else { 202 spin_unlock_irqrestore(&ugeth_lock, flags); 203 return NULL; 204 } 205} 206 207static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, 208 u8 __iomem *bd) 209{ 210 struct sk_buff *skb = NULL; 211 212 skb = __skb_dequeue(&ugeth->rx_recycle); 213 if (!skb) 214 skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length + 215 UCC_GETH_RX_DATA_BUF_ALIGNMENT); 216 if (skb == NULL) 217 return NULL; 218 219 /* We need the data buffer to be aligned properly. We will reserve 220 * as many bytes as needed to align the data properly 221 */ 222 skb_reserve(skb, 223 UCC_GETH_RX_DATA_BUF_ALIGNMENT - 224 (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT - 225 1))); 226 227 skb->dev = ugeth->ndev; 228 229 out_be32(&((struct qe_bd __iomem *)bd)->buf, 230 dma_map_single(ugeth->dev, 231 skb->data, 232 ugeth->ug_info->uf_info.max_rx_buf_length + 233 UCC_GETH_RX_DATA_BUF_ALIGNMENT, 234 DMA_FROM_DEVICE)); 235 236 out_be32((u32 __iomem *)bd, 237 (R_E | R_I | (in_be32((u32 __iomem*)bd) & R_W))); 238 239 return skb; 240} 241 242static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ) 243{ 244 u8 __iomem *bd; 245 u32 bd_status; 246 struct sk_buff *skb; 247 int i; 248 249 bd = ugeth->p_rx_bd_ring[rxQ]; 250 i = 0; 251 252 do { 253 bd_status = in_be32((u32 __iomem *)bd); 254 skb = get_new_skb(ugeth, bd); 255 256 if (!skb) /* If can not allocate data buffer, 257 abort. Cleanup will be elsewhere */ 258 return -ENOMEM; 259 260 ugeth->rx_skbuff[rxQ][i] = skb; 261 262 /* advance the BD pointer */ 263 bd += sizeof(struct qe_bd); 264 i++; 265 } while (!(bd_status & R_W)); 266 267 return 0; 268} 269 270static int fill_init_enet_entries(struct ucc_geth_private *ugeth, 271 u32 *p_start, 272 u8 num_entries, 273 u32 thread_size, 274 u32 thread_alignment, 275 unsigned int risc, 276 int skip_page_for_first_entry) 277{ 278 u32 init_enet_offset; 279 u8 i; 280 int snum; 281 282 for (i = 0; i < num_entries; i++) { 283 if ((snum = qe_get_snum()) < 0) { 284 if (netif_msg_ifup(ugeth)) 285 ugeth_err("fill_init_enet_entries: Can not get SNUM."); 286 return snum; 287 } 288 if ((i == 0) && skip_page_for_first_entry) 289 /* First entry of Rx does not have page */ 290 init_enet_offset = 0; 291 else { 292 init_enet_offset = 293 qe_muram_alloc(thread_size, thread_alignment); 294 if (IS_ERR_VALUE(init_enet_offset)) { 295 if (netif_msg_ifup(ugeth)) 296 ugeth_err("fill_init_enet_entries: Can not allocate DPRAM memory."); 297 qe_put_snum((u8) snum); 298 return -ENOMEM; 299 } 300 } 301 *(p_start++) = 302 ((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset 303 | risc; 304 } 305 306 return 0; 307} 308 309static int return_init_enet_entries(struct ucc_geth_private *ugeth, 310 u32 *p_start, 311 u8 num_entries, 312 unsigned int risc, 313 int skip_page_for_first_entry) 314{ 315 u32 init_enet_offset; 316 u8 i; 317 int snum; 318 319 for (i = 0; i < num_entries; i++) { 320 u32 val = *p_start; 321 322 /* Check that this entry was actually valid -- 323 needed in case failed in allocations */ 324 if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) { 325 snum = 326 (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >> 327 ENET_INIT_PARAM_SNUM_SHIFT; 328 qe_put_snum((u8) snum); 329 if (!((i == 0) && skip_page_for_first_entry)) { 330 /* First entry of Rx does not have page */ 331 init_enet_offset = 332 (val & ENET_INIT_PARAM_PTR_MASK); 333 qe_muram_free(init_enet_offset); 334 } 335 *p_start++ = 0; 336 } 337 } 338 339 return 0; 340} 341 342#ifdef DEBUG 343static int dump_init_enet_entries(struct ucc_geth_private *ugeth, 344 u32 __iomem *p_start, 345 u8 num_entries, 346 u32 thread_size, 347 unsigned int risc, 348 int skip_page_for_first_entry) 349{ 350 u32 init_enet_offset; 351 u8 i; 352 int snum; 353 354 for (i = 0; i < num_entries; i++) { 355 u32 val = in_be32(p_start); 356 357 /* Check that this entry was actually valid -- 358 needed in case failed in allocations */ 359 if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) { 360 snum = 361 (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >> 362 ENET_INIT_PARAM_SNUM_SHIFT; 363 qe_put_snum((u8) snum); 364 if (!((i == 0) && skip_page_for_first_entry)) { 365 /* First entry of Rx does not have page */ 366 init_enet_offset = 367 (in_be32(p_start) & 368 ENET_INIT_PARAM_PTR_MASK); 369 ugeth_info("Init enet entry %d:", i); 370 ugeth_info("Base address: 0x%08x", 371 (u32) 372 qe_muram_addr(init_enet_offset)); 373 mem_disp(qe_muram_addr(init_enet_offset), 374 thread_size); 375 } 376 p_start++; 377 } 378 } 379 380 return 0; 381} 382#endif 383 384static void put_enet_addr_container(struct enet_addr_container *enet_addr_cont) 385{ 386 kfree(enet_addr_cont); 387} 388 389static void set_mac_addr(__be16 __iomem *reg, u8 *mac) 390{ 391 out_be16(&reg[0], ((u16)mac[5] << 8) | mac[4]); 392 out_be16(&reg[1], ((u16)mac[3] << 8) | mac[2]); 393 out_be16(&reg[2], ((u16)mac[1] << 8) | mac[0]); 394} 395 396static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num) 397{ 398 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; 399 400 if (!(paddr_num < NUM_OF_PADDRS)) { 401 ugeth_warn("%s: Illagel paddr_num.", __func__); 402 return -EINVAL; 403 } 404 405 p_82xx_addr_filt = 406 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram-> 407 addressfiltering; 408 409 /* Writing address ff.ff.ff.ff.ff.ff disables address 410 recognition for this register */ 411 out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff); 412 out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff); 413 out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff); 414 415 return 0; 416} 417 418static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth, 419 u8 *p_enet_addr) 420{ 421 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; 422 u32 cecr_subblock; 423 424 p_82xx_addr_filt = 425 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram-> 426 addressfiltering; 427 428 cecr_subblock = 429 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); 430 431 /* Ethernet frames are defined in Little Endian mode, 432 therefor to insert */ 433 /* the address to the hash (Big Endian mode), we reverse the bytes.*/ 434 435 set_mac_addr(&p_82xx_addr_filt->taddr.h, p_enet_addr); 436 437 qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock, 438 QE_CR_PROTOCOL_ETHERNET, 0); 439} 440 441static inline int compare_addr(u8 **addr1, u8 **addr2) 442{ 443 return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS); 444} 445 446#ifdef DEBUG 447static void get_statistics(struct ucc_geth_private *ugeth, 448 struct ucc_geth_tx_firmware_statistics * 449 tx_firmware_statistics, 450 struct ucc_geth_rx_firmware_statistics * 451 rx_firmware_statistics, 452 struct ucc_geth_hardware_statistics *hardware_statistics) 453{ 454 struct ucc_fast __iomem *uf_regs; 455 struct ucc_geth __iomem *ug_regs; 456 struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram; 457 struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram; 458 459 ug_regs = ugeth->ug_regs; 460 uf_regs = (struct ucc_fast __iomem *) ug_regs; 461 p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram; 462 p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram; 463 464 /* Tx firmware only if user handed pointer and driver actually 465 gathers Tx firmware statistics */ 466 if (tx_firmware_statistics && p_tx_fw_statistics_pram) { 467 tx_firmware_statistics->sicoltx = 468 in_be32(&p_tx_fw_statistics_pram->sicoltx); 469 tx_firmware_statistics->mulcoltx = 470 in_be32(&p_tx_fw_statistics_pram->mulcoltx); 471 tx_firmware_statistics->latecoltxfr = 472 in_be32(&p_tx_fw_statistics_pram->latecoltxfr); 473 tx_firmware_statistics->frabortduecol = 474 in_be32(&p_tx_fw_statistics_pram->frabortduecol); 475 tx_firmware_statistics->frlostinmactxer = 476 in_be32(&p_tx_fw_statistics_pram->frlostinmactxer); 477 tx_firmware_statistics->carriersenseertx = 478 in_be32(&p_tx_fw_statistics_pram->carriersenseertx); 479 tx_firmware_statistics->frtxok = 480 in_be32(&p_tx_fw_statistics_pram->frtxok); 481 tx_firmware_statistics->txfrexcessivedefer = 482 in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer); 483 tx_firmware_statistics->txpkts256 = 484 in_be32(&p_tx_fw_statistics_pram->txpkts256); 485 tx_firmware_statistics->txpkts512 = 486 in_be32(&p_tx_fw_statistics_pram->txpkts512); 487 tx_firmware_statistics->txpkts1024 = 488 in_be32(&p_tx_fw_statistics_pram->txpkts1024); 489 tx_firmware_statistics->txpktsjumbo = 490 in_be32(&p_tx_fw_statistics_pram->txpktsjumbo); 491 } 492 493 /* Rx firmware only if user handed pointer and driver actually 494 * gathers Rx firmware statistics */ 495 if (rx_firmware_statistics && p_rx_fw_statistics_pram) { 496 int i; 497 rx_firmware_statistics->frrxfcser = 498 in_be32(&p_rx_fw_statistics_pram->frrxfcser); 499 rx_firmware_statistics->fraligner = 500 in_be32(&p_rx_fw_statistics_pram->fraligner); 501 rx_firmware_statistics->inrangelenrxer = 502 in_be32(&p_rx_fw_statistics_pram->inrangelenrxer); 503 rx_firmware_statistics->outrangelenrxer = 504 in_be32(&p_rx_fw_statistics_pram->outrangelenrxer); 505 rx_firmware_statistics->frtoolong = 506 in_be32(&p_rx_fw_statistics_pram->frtoolong); 507 rx_firmware_statistics->runt = 508 in_be32(&p_rx_fw_statistics_pram->runt); 509 rx_firmware_statistics->verylongevent = 510 in_be32(&p_rx_fw_statistics_pram->verylongevent); 511 rx_firmware_statistics->symbolerror = 512 in_be32(&p_rx_fw_statistics_pram->symbolerror); 513 rx_firmware_statistics->dropbsy = 514 in_be32(&p_rx_fw_statistics_pram->dropbsy); 515 for (i = 0; i < 0x8; i++) 516 rx_firmware_statistics->res0[i] = 517 p_rx_fw_statistics_pram->res0[i]; 518 rx_firmware_statistics->mismatchdrop = 519 in_be32(&p_rx_fw_statistics_pram->mismatchdrop); 520 rx_firmware_statistics->underpkts = 521 in_be32(&p_rx_fw_statistics_pram->underpkts); 522 rx_firmware_statistics->pkts256 = 523 in_be32(&p_rx_fw_statistics_pram->pkts256); 524 rx_firmware_statistics->pkts512 = 525 in_be32(&p_rx_fw_statistics_pram->pkts512); 526 rx_firmware_statistics->pkts1024 = 527 in_be32(&p_rx_fw_statistics_pram->pkts1024); 528 rx_firmware_statistics->pktsjumbo = 529 in_be32(&p_rx_fw_statistics_pram->pktsjumbo); 530 rx_firmware_statistics->frlossinmacer = 531 in_be32(&p_rx_fw_statistics_pram->frlossinmacer); 532 rx_firmware_statistics->pausefr = 533 in_be32(&p_rx_fw_statistics_pram->pausefr); 534 for (i = 0; i < 0x4; i++) 535 rx_firmware_statistics->res1[i] = 536 p_rx_fw_statistics_pram->res1[i]; 537 rx_firmware_statistics->removevlan = 538 in_be32(&p_rx_fw_statistics_pram->removevlan); 539 rx_firmware_statistics->replacevlan = 540 in_be32(&p_rx_fw_statistics_pram->replacevlan); 541 rx_firmware_statistics->insertvlan = 542 in_be32(&p_rx_fw_statistics_pram->insertvlan); 543 } 544 545 /* Hardware only if user handed pointer and driver actually 546 gathers hardware statistics */ 547 if (hardware_statistics && 548 (in_be32(&uf_regs->upsmr) & UCC_GETH_UPSMR_HSE)) { 549 hardware_statistics->tx64 = in_be32(&ug_regs->tx64); 550 hardware_statistics->tx127 = in_be32(&ug_regs->tx127); 551 hardware_statistics->tx255 = in_be32(&ug_regs->tx255); 552 hardware_statistics->rx64 = in_be32(&ug_regs->rx64); 553 hardware_statistics->rx127 = in_be32(&ug_regs->rx127); 554 hardware_statistics->rx255 = in_be32(&ug_regs->rx255); 555 hardware_statistics->txok = in_be32(&ug_regs->txok); 556 hardware_statistics->txcf = in_be16(&ug_regs->txcf); 557 hardware_statistics->tmca = in_be32(&ug_regs->tmca); 558 hardware_statistics->tbca = in_be32(&ug_regs->tbca); 559 hardware_statistics->rxfok = in_be32(&ug_regs->rxfok); 560 hardware_statistics->rxbok = in_be32(&ug_regs->rxbok); 561 hardware_statistics->rbyt = in_be32(&ug_regs->rbyt); 562 hardware_statistics->rmca = in_be32(&ug_regs->rmca); 563 hardware_statistics->rbca = in_be32(&ug_regs->rbca); 564 } 565} 566 567static void dump_bds(struct ucc_geth_private *ugeth) 568{ 569 int i; 570 int length; 571 572 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { 573 if (ugeth->p_tx_bd_ring[i]) { 574 length = 575 (ugeth->ug_info->bdRingLenTx[i] * 576 sizeof(struct qe_bd)); 577 ugeth_info("TX BDs[%d]", i); 578 mem_disp(ugeth->p_tx_bd_ring[i], length); 579 } 580 } 581 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { 582 if (ugeth->p_rx_bd_ring[i]) { 583 length = 584 (ugeth->ug_info->bdRingLenRx[i] * 585 sizeof(struct qe_bd)); 586 ugeth_info("RX BDs[%d]", i); 587 mem_disp(ugeth->p_rx_bd_ring[i], length); 588 } 589 } 590} 591 592static void dump_regs(struct ucc_geth_private *ugeth) 593{ 594 int i; 595 596 ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num); 597 ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs); 598 599 ugeth_info("maccfg1 : addr - 0x%08x, val - 0x%08x", 600 (u32) & ugeth->ug_regs->maccfg1, 601 in_be32(&ugeth->ug_regs->maccfg1)); 602 ugeth_info("maccfg2 : addr - 0x%08x, val - 0x%08x", 603 (u32) & ugeth->ug_regs->maccfg2, 604 in_be32(&ugeth->ug_regs->maccfg2)); 605 ugeth_info("ipgifg : addr - 0x%08x, val - 0x%08x", 606 (u32) & ugeth->ug_regs->ipgifg, 607 in_be32(&ugeth->ug_regs->ipgifg)); 608 ugeth_info("hafdup : addr - 0x%08x, val - 0x%08x", 609 (u32) & ugeth->ug_regs->hafdup, 610 in_be32(&ugeth->ug_regs->hafdup)); 611 ugeth_info("ifctl : addr - 0x%08x, val - 0x%08x", 612 (u32) & ugeth->ug_regs->ifctl, 613 in_be32(&ugeth->ug_regs->ifctl)); 614 ugeth_info("ifstat : addr - 0x%08x, val - 0x%08x", 615 (u32) & ugeth->ug_regs->ifstat, 616 in_be32(&ugeth->ug_regs->ifstat)); 617 ugeth_info("macstnaddr1: addr - 0x%08x, val - 0x%08x", 618 (u32) & ugeth->ug_regs->macstnaddr1, 619 in_be32(&ugeth->ug_regs->macstnaddr1)); 620 ugeth_info("macstnaddr2: addr - 0x%08x, val - 0x%08x", 621 (u32) & ugeth->ug_regs->macstnaddr2, 622 in_be32(&ugeth->ug_regs->macstnaddr2)); 623 ugeth_info("uempr : addr - 0x%08x, val - 0x%08x", 624 (u32) & ugeth->ug_regs->uempr, 625 in_be32(&ugeth->ug_regs->uempr)); 626 ugeth_info("utbipar : addr - 0x%08x, val - 0x%08x", 627 (u32) & ugeth->ug_regs->utbipar, 628 in_be32(&ugeth->ug_regs->utbipar)); 629 ugeth_info("uescr : addr - 0x%08x, val - 0x%04x", 630 (u32) & ugeth->ug_regs->uescr, 631 in_be16(&ugeth->ug_regs->uescr)); 632 ugeth_info("tx64 : addr - 0x%08x, val - 0x%08x", 633 (u32) & ugeth->ug_regs->tx64, 634 in_be32(&ugeth->ug_regs->tx64)); 635 ugeth_info("tx127 : addr - 0x%08x, val - 0x%08x", 636 (u32) & ugeth->ug_regs->tx127, 637 in_be32(&ugeth->ug_regs->tx127)); 638 ugeth_info("tx255 : addr - 0x%08x, val - 0x%08x", 639 (u32) & ugeth->ug_regs->tx255, 640 in_be32(&ugeth->ug_regs->tx255)); 641 ugeth_info("rx64 : addr - 0x%08x, val - 0x%08x", 642 (u32) & ugeth->ug_regs->rx64, 643 in_be32(&ugeth->ug_regs->rx64)); 644 ugeth_info("rx127 : addr - 0x%08x, val - 0x%08x", 645 (u32) & ugeth->ug_regs->rx127, 646 in_be32(&ugeth->ug_regs->rx127)); 647 ugeth_info("rx255 : addr - 0x%08x, val - 0x%08x", 648 (u32) & ugeth->ug_regs->rx255, 649 in_be32(&ugeth->ug_regs->rx255)); 650 ugeth_info("txok : addr - 0x%08x, val - 0x%08x", 651 (u32) & ugeth->ug_regs->txok, 652 in_be32(&ugeth->ug_regs->txok)); 653 ugeth_info("txcf : addr - 0x%08x, val - 0x%04x", 654 (u32) & ugeth->ug_regs->txcf, 655 in_be16(&ugeth->ug_regs->txcf)); 656 ugeth_info("tmca : addr - 0x%08x, val - 0x%08x", 657 (u32) & ugeth->ug_regs->tmca, 658 in_be32(&ugeth->ug_regs->tmca)); 659 ugeth_info("tbca : addr - 0x%08x, val - 0x%08x", 660 (u32) & ugeth->ug_regs->tbca, 661 in_be32(&ugeth->ug_regs->tbca)); 662 ugeth_info("rxfok : addr - 0x%08x, val - 0x%08x", 663 (u32) & ugeth->ug_regs->rxfok, 664 in_be32(&ugeth->ug_regs->rxfok)); 665 ugeth_info("rxbok : addr - 0x%08x, val - 0x%08x", 666 (u32) & ugeth->ug_regs->rxbok, 667 in_be32(&ugeth->ug_regs->rxbok)); 668 ugeth_info("rbyt : addr - 0x%08x, val - 0x%08x", 669 (u32) & ugeth->ug_regs->rbyt, 670 in_be32(&ugeth->ug_regs->rbyt)); 671 ugeth_info("rmca : addr - 0x%08x, val - 0x%08x", 672 (u32) & ugeth->ug_regs->rmca, 673 in_be32(&ugeth->ug_regs->rmca)); 674 ugeth_info("rbca : addr - 0x%08x, val - 0x%08x", 675 (u32) & ugeth->ug_regs->rbca, 676 in_be32(&ugeth->ug_regs->rbca)); 677 ugeth_info("scar : addr - 0x%08x, val - 0x%08x", 678 (u32) & ugeth->ug_regs->scar, 679 in_be32(&ugeth->ug_regs->scar)); 680 ugeth_info("scam : addr - 0x%08x, val - 0x%08x", 681 (u32) & ugeth->ug_regs->scam, 682 in_be32(&ugeth->ug_regs->scam)); 683 684 if (ugeth->p_thread_data_tx) { 685 int numThreadsTxNumerical; 686 switch (ugeth->ug_info->numThreadsTx) { 687 case UCC_GETH_NUM_OF_THREADS_1: 688 numThreadsTxNumerical = 1; 689 break; 690 case UCC_GETH_NUM_OF_THREADS_2: 691 numThreadsTxNumerical = 2; 692 break; 693 case UCC_GETH_NUM_OF_THREADS_4: 694 numThreadsTxNumerical = 4; 695 break; 696 case UCC_GETH_NUM_OF_THREADS_6: 697 numThreadsTxNumerical = 6; 698 break; 699 case UCC_GETH_NUM_OF_THREADS_8: 700 numThreadsTxNumerical = 8; 701 break; 702 default: 703 numThreadsTxNumerical = 0; 704 break; 705 } 706 707 ugeth_info("Thread data TXs:"); 708 ugeth_info("Base address: 0x%08x", 709 (u32) ugeth->p_thread_data_tx); 710 for (i = 0; i < numThreadsTxNumerical; i++) { 711 ugeth_info("Thread data TX[%d]:", i); 712 ugeth_info("Base address: 0x%08x", 713 (u32) & ugeth->p_thread_data_tx[i]); 714 mem_disp((u8 *) & ugeth->p_thread_data_tx[i], 715 sizeof(struct ucc_geth_thread_data_tx)); 716 } 717 } 718 if (ugeth->p_thread_data_rx) { 719 int numThreadsRxNumerical; 720 switch (ugeth->ug_info->numThreadsRx) { 721 case UCC_GETH_NUM_OF_THREADS_1: 722 numThreadsRxNumerical = 1; 723 break; 724 case UCC_GETH_NUM_OF_THREADS_2: 725 numThreadsRxNumerical = 2; 726 break; 727 case UCC_GETH_NUM_OF_THREADS_4: 728 numThreadsRxNumerical = 4; 729 break; 730 case UCC_GETH_NUM_OF_THREADS_6: 731 numThreadsRxNumerical = 6; 732 break; 733 case UCC_GETH_NUM_OF_THREADS_8: 734 numThreadsRxNumerical = 8; 735 break; 736 default: 737 numThreadsRxNumerical = 0; 738 break; 739 } 740 741 ugeth_info("Thread data RX:"); 742 ugeth_info("Base address: 0x%08x", 743 (u32) ugeth->p_thread_data_rx); 744 for (i = 0; i < numThreadsRxNumerical; i++) { 745 ugeth_info("Thread data RX[%d]:", i); 746 ugeth_info("Base address: 0x%08x", 747 (u32) & ugeth->p_thread_data_rx[i]); 748 mem_disp((u8 *) & ugeth->p_thread_data_rx[i], 749 sizeof(struct ucc_geth_thread_data_rx)); 750 } 751 } 752 if (ugeth->p_exf_glbl_param) { 753 ugeth_info("EXF global param:"); 754 ugeth_info("Base address: 0x%08x", 755 (u32) ugeth->p_exf_glbl_param); 756 mem_disp((u8 *) ugeth->p_exf_glbl_param, 757 sizeof(*ugeth->p_exf_glbl_param)); 758 } 759 if (ugeth->p_tx_glbl_pram) { 760 ugeth_info("TX global param:"); 761 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_tx_glbl_pram); 762 ugeth_info("temoder : addr - 0x%08x, val - 0x%04x", 763 (u32) & ugeth->p_tx_glbl_pram->temoder, 764 in_be16(&ugeth->p_tx_glbl_pram->temoder)); 765 ugeth_info("sqptr : addr - 0x%08x, val - 0x%08x", 766 (u32) & ugeth->p_tx_glbl_pram->sqptr, 767 in_be32(&ugeth->p_tx_glbl_pram->sqptr)); 768 ugeth_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x", 769 (u32) & ugeth->p_tx_glbl_pram->schedulerbasepointer, 770 in_be32(&ugeth->p_tx_glbl_pram-> 771 schedulerbasepointer)); 772 ugeth_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x", 773 (u32) & ugeth->p_tx_glbl_pram->txrmonbaseptr, 774 in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr)); 775 ugeth_info("tstate : addr - 0x%08x, val - 0x%08x", 776 (u32) & ugeth->p_tx_glbl_pram->tstate, 777 in_be32(&ugeth->p_tx_glbl_pram->tstate)); 778 ugeth_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x", 779 (u32) & ugeth->p_tx_glbl_pram->iphoffset[0], 780 ugeth->p_tx_glbl_pram->iphoffset[0]); 781 ugeth_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x", 782 (u32) & ugeth->p_tx_glbl_pram->iphoffset[1], 783 ugeth->p_tx_glbl_pram->iphoffset[1]); 784 ugeth_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x", 785 (u32) & ugeth->p_tx_glbl_pram->iphoffset[2], 786 ugeth->p_tx_glbl_pram->iphoffset[2]); 787 ugeth_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x", 788 (u32) & ugeth->p_tx_glbl_pram->iphoffset[3], 789 ugeth->p_tx_glbl_pram->iphoffset[3]); 790 ugeth_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x", 791 (u32) & ugeth->p_tx_glbl_pram->iphoffset[4], 792 ugeth->p_tx_glbl_pram->iphoffset[4]); 793 ugeth_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x", 794 (u32) & ugeth->p_tx_glbl_pram->iphoffset[5], 795 ugeth->p_tx_glbl_pram->iphoffset[5]); 796 ugeth_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x", 797 (u32) & ugeth->p_tx_glbl_pram->iphoffset[6], 798 ugeth->p_tx_glbl_pram->iphoffset[6]); 799 ugeth_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x", 800 (u32) & ugeth->p_tx_glbl_pram->iphoffset[7], 801 ugeth->p_tx_glbl_pram->iphoffset[7]); 802 ugeth_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x", 803 (u32) & ugeth->p_tx_glbl_pram->vtagtable[0], 804 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0])); 805 ugeth_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x", 806 (u32) & ugeth->p_tx_glbl_pram->vtagtable[1], 807 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1])); 808 ugeth_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x", 809 (u32) & ugeth->p_tx_glbl_pram->vtagtable[2], 810 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2])); 811 ugeth_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x", 812 (u32) & ugeth->p_tx_glbl_pram->vtagtable[3], 813 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3])); 814 ugeth_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x", 815 (u32) & ugeth->p_tx_glbl_pram->vtagtable[4], 816 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4])); 817 ugeth_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x", 818 (u32) & ugeth->p_tx_glbl_pram->vtagtable[5], 819 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5])); 820 ugeth_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x", 821 (u32) & ugeth->p_tx_glbl_pram->vtagtable[6], 822 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6])); 823 ugeth_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x", 824 (u32) & ugeth->p_tx_glbl_pram->vtagtable[7], 825 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7])); 826 ugeth_info("tqptr : addr - 0x%08x, val - 0x%08x", 827 (u32) & ugeth->p_tx_glbl_pram->tqptr, 828 in_be32(&ugeth->p_tx_glbl_pram->tqptr)); 829 } 830 if (ugeth->p_rx_glbl_pram) { 831 ugeth_info("RX global param:"); 832 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_glbl_pram); 833 ugeth_info("remoder : addr - 0x%08x, val - 0x%08x", 834 (u32) & ugeth->p_rx_glbl_pram->remoder, 835 in_be32(&ugeth->p_rx_glbl_pram->remoder)); 836 ugeth_info("rqptr : addr - 0x%08x, val - 0x%08x", 837 (u32) & ugeth->p_rx_glbl_pram->rqptr, 838 in_be32(&ugeth->p_rx_glbl_pram->rqptr)); 839 ugeth_info("typeorlen : addr - 0x%08x, val - 0x%04x", 840 (u32) & ugeth->p_rx_glbl_pram->typeorlen, 841 in_be16(&ugeth->p_rx_glbl_pram->typeorlen)); 842 ugeth_info("rxgstpack : addr - 0x%08x, val - 0x%02x", 843 (u32) & ugeth->p_rx_glbl_pram->rxgstpack, 844 ugeth->p_rx_glbl_pram->rxgstpack); 845 ugeth_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x", 846 (u32) & ugeth->p_rx_glbl_pram->rxrmonbaseptr, 847 in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr)); 848 ugeth_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x", 849 (u32) & ugeth->p_rx_glbl_pram->intcoalescingptr, 850 in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr)); 851 ugeth_info("rstate : addr - 0x%08x, val - 0x%02x", 852 (u32) & ugeth->p_rx_glbl_pram->rstate, 853 ugeth->p_rx_glbl_pram->rstate); 854 ugeth_info("mrblr : addr - 0x%08x, val - 0x%04x", 855 (u32) & ugeth->p_rx_glbl_pram->mrblr, 856 in_be16(&ugeth->p_rx_glbl_pram->mrblr)); 857 ugeth_info("rbdqptr : addr - 0x%08x, val - 0x%08x", 858 (u32) & ugeth->p_rx_glbl_pram->rbdqptr, 859 in_be32(&ugeth->p_rx_glbl_pram->rbdqptr)); 860 ugeth_info("mflr : addr - 0x%08x, val - 0x%04x", 861 (u32) & ugeth->p_rx_glbl_pram->mflr, 862 in_be16(&ugeth->p_rx_glbl_pram->mflr)); 863 ugeth_info("minflr : addr - 0x%08x, val - 0x%04x", 864 (u32) & ugeth->p_rx_glbl_pram->minflr, 865 in_be16(&ugeth->p_rx_glbl_pram->minflr)); 866 ugeth_info("maxd1 : addr - 0x%08x, val - 0x%04x", 867 (u32) & ugeth->p_rx_glbl_pram->maxd1, 868 in_be16(&ugeth->p_rx_glbl_pram->maxd1)); 869 ugeth_info("maxd2 : addr - 0x%08x, val - 0x%04x", 870 (u32) & ugeth->p_rx_glbl_pram->maxd2, 871 in_be16(&ugeth->p_rx_glbl_pram->maxd2)); 872 ugeth_info("ecamptr : addr - 0x%08x, val - 0x%08x", 873 (u32) & ugeth->p_rx_glbl_pram->ecamptr, 874 in_be32(&ugeth->p_rx_glbl_pram->ecamptr)); 875 ugeth_info("l2qt : addr - 0x%08x, val - 0x%08x", 876 (u32) & ugeth->p_rx_glbl_pram->l2qt, 877 in_be32(&ugeth->p_rx_glbl_pram->l2qt)); 878 ugeth_info("l3qt[0] : addr - 0x%08x, val - 0x%08x", 879 (u32) & ugeth->p_rx_glbl_pram->l3qt[0], 880 in_be32(&ugeth->p_rx_glbl_pram->l3qt[0])); 881 ugeth_info("l3qt[1] : addr - 0x%08x, val - 0x%08x", 882 (u32) & ugeth->p_rx_glbl_pram->l3qt[1], 883 in_be32(&ugeth->p_rx_glbl_pram->l3qt[1])); 884 ugeth_info("l3qt[2] : addr - 0x%08x, val - 0x%08x", 885 (u32) & ugeth->p_rx_glbl_pram->l3qt[2], 886 in_be32(&ugeth->p_rx_glbl_pram->l3qt[2])); 887 ugeth_info("l3qt[3] : addr - 0x%08x, val - 0x%08x", 888 (u32) & ugeth->p_rx_glbl_pram->l3qt[3], 889 in_be32(&ugeth->p_rx_glbl_pram->l3qt[3])); 890 ugeth_info("l3qt[4] : addr - 0x%08x, val - 0x%08x", 891 (u32) & ugeth->p_rx_glbl_pram->l3qt[4], 892 in_be32(&ugeth->p_rx_glbl_pram->l3qt[4])); 893 ugeth_info("l3qt[5] : addr - 0x%08x, val - 0x%08x", 894 (u32) & ugeth->p_rx_glbl_pram->l3qt[5], 895 in_be32(&ugeth->p_rx_glbl_pram->l3qt[5])); 896 ugeth_info("l3qt[6] : addr - 0x%08x, val - 0x%08x", 897 (u32) & ugeth->p_rx_glbl_pram->l3qt[6], 898 in_be32(&ugeth->p_rx_glbl_pram->l3qt[6])); 899 ugeth_info("l3qt[7] : addr - 0x%08x, val - 0x%08x", 900 (u32) & ugeth->p_rx_glbl_pram->l3qt[7], 901 in_be32(&ugeth->p_rx_glbl_pram->l3qt[7])); 902 ugeth_info("vlantype : addr - 0x%08x, val - 0x%04x", 903 (u32) & ugeth->p_rx_glbl_pram->vlantype, 904 in_be16(&ugeth->p_rx_glbl_pram->vlantype)); 905 ugeth_info("vlantci : addr - 0x%08x, val - 0x%04x", 906 (u32) & ugeth->p_rx_glbl_pram->vlantci, 907 in_be16(&ugeth->p_rx_glbl_pram->vlantci)); 908 for (i = 0; i < 64; i++) 909 ugeth_info 910 ("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x", 911 i, 912 (u32) & ugeth->p_rx_glbl_pram->addressfiltering[i], 913 ugeth->p_rx_glbl_pram->addressfiltering[i]); 914 ugeth_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x", 915 (u32) & ugeth->p_rx_glbl_pram->exfGlobalParam, 916 in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam)); 917 } 918 if (ugeth->p_send_q_mem_reg) { 919 ugeth_info("Send Q memory registers:"); 920 ugeth_info("Base address: 0x%08x", 921 (u32) ugeth->p_send_q_mem_reg); 922 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { 923 ugeth_info("SQQD[%d]:", i); 924 ugeth_info("Base address: 0x%08x", 925 (u32) & ugeth->p_send_q_mem_reg->sqqd[i]); 926 mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i], 927 sizeof(struct ucc_geth_send_queue_qd)); 928 } 929 } 930 if (ugeth->p_scheduler) { 931 ugeth_info("Scheduler:"); 932 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_scheduler); 933 mem_disp((u8 *) ugeth->p_scheduler, 934 sizeof(*ugeth->p_scheduler)); 935 } 936 if (ugeth->p_tx_fw_statistics_pram) { 937 ugeth_info("TX FW statistics pram:"); 938 ugeth_info("Base address: 0x%08x", 939 (u32) ugeth->p_tx_fw_statistics_pram); 940 mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram, 941 sizeof(*ugeth->p_tx_fw_statistics_pram)); 942 } 943 if (ugeth->p_rx_fw_statistics_pram) { 944 ugeth_info("RX FW statistics pram:"); 945 ugeth_info("Base address: 0x%08x", 946 (u32) ugeth->p_rx_fw_statistics_pram); 947 mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram, 948 sizeof(*ugeth->p_rx_fw_statistics_pram)); 949 } 950 if (ugeth->p_rx_irq_coalescing_tbl) { 951 ugeth_info("RX IRQ coalescing tables:"); 952 ugeth_info("Base address: 0x%08x", 953 (u32) ugeth->p_rx_irq_coalescing_tbl); 954 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { 955 ugeth_info("RX IRQ coalescing table entry[%d]:", i); 956 ugeth_info("Base address: 0x%08x", 957 (u32) & ugeth->p_rx_irq_coalescing_tbl-> 958 coalescingentry[i]); 959 ugeth_info 960 ("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x", 961 (u32) & ugeth->p_rx_irq_coalescing_tbl-> 962 coalescingentry[i].interruptcoalescingmaxvalue, 963 in_be32(&ugeth->p_rx_irq_coalescing_tbl-> 964 coalescingentry[i]. 965 interruptcoalescingmaxvalue)); 966 ugeth_info 967 ("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x", 968 (u32) & ugeth->p_rx_irq_coalescing_tbl-> 969 coalescingentry[i].interruptcoalescingcounter, 970 in_be32(&ugeth->p_rx_irq_coalescing_tbl-> 971 coalescingentry[i]. 972 interruptcoalescingcounter)); 973 } 974 } 975 if (ugeth->p_rx_bd_qs_tbl) { 976 ugeth_info("RX BD QS tables:"); 977 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_bd_qs_tbl); 978 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { 979 ugeth_info("RX BD QS table[%d]:", i); 980 ugeth_info("Base address: 0x%08x", 981 (u32) & ugeth->p_rx_bd_qs_tbl[i]); 982 ugeth_info 983 ("bdbaseptr : addr - 0x%08x, val - 0x%08x", 984 (u32) & ugeth->p_rx_bd_qs_tbl[i].bdbaseptr, 985 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr)); 986 ugeth_info 987 ("bdptr : addr - 0x%08x, val - 0x%08x", 988 (u32) & ugeth->p_rx_bd_qs_tbl[i].bdptr, 989 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr)); 990 ugeth_info 991 ("externalbdbaseptr: addr - 0x%08x, val - 0x%08x", 992 (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, 993 in_be32(&ugeth->p_rx_bd_qs_tbl[i]. 994 externalbdbaseptr)); 995 ugeth_info 996 ("externalbdptr : addr - 0x%08x, val - 0x%08x", 997 (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdptr, 998 in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr)); 999 ugeth_info("ucode RX Prefetched BDs:"); 1000 ugeth_info("Base address: 0x%08x", 1001 (u32) 1002 qe_muram_addr(in_be32 1003 (&ugeth->p_rx_bd_qs_tbl[i]. 1004 bdbaseptr))); 1005 mem_disp((u8 *) 1006 qe_muram_addr(in_be32 1007 (&ugeth->p_rx_bd_qs_tbl[i]. 1008 bdbaseptr)), 1009 sizeof(struct ucc_geth_rx_prefetched_bds)); 1010 } 1011 } 1012 if (ugeth->p_init_enet_param_shadow) { 1013 int size; 1014 ugeth_info("Init enet param shadow:"); 1015 ugeth_info("Base address: 0x%08x", 1016 (u32) ugeth->p_init_enet_param_shadow); 1017 mem_disp((u8 *) ugeth->p_init_enet_param_shadow, 1018 sizeof(*ugeth->p_init_enet_param_shadow)); 1019 1020 size = sizeof(struct ucc_geth_thread_rx_pram); 1021 if (ugeth->ug_info->rxExtendedFiltering) { 1022 size += 1023 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; 1024 if (ugeth->ug_info->largestexternallookupkeysize == 1025 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES) 1026 size += 1027 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; 1028 if (ugeth->ug_info->largestexternallookupkeysize == 1029 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES) 1030 size += 1031 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; 1032 } 1033 1034 dump_init_enet_entries(ugeth, 1035 &(ugeth->p_init_enet_param_shadow-> 1036 txthread[0]), 1037 ENET_INIT_PARAM_MAX_ENTRIES_TX, 1038 sizeof(struct ucc_geth_thread_tx_pram), 1039 ugeth->ug_info->riscTx, 0); 1040 dump_init_enet_entries(ugeth, 1041 &(ugeth->p_init_enet_param_shadow-> 1042 rxthread[0]), 1043 ENET_INIT_PARAM_MAX_ENTRIES_RX, size, 1044 ugeth->ug_info->riscRx, 1); 1045 } 1046} 1047#endif /* DEBUG */ 1048 1049static void init_default_reg_vals(u32 __iomem *upsmr_register, 1050 u32 __iomem *maccfg1_register, 1051 u32 __iomem *maccfg2_register) 1052{ 1053 out_be32(upsmr_register, UCC_GETH_UPSMR_INIT); 1054 out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT); 1055 out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT); 1056} 1057 1058static int init_half_duplex_params(int alt_beb, 1059 int back_pressure_no_backoff, 1060 int no_backoff, 1061 int excess_defer, 1062 u8 alt_beb_truncation, 1063 u8 max_retransmissions, 1064 u8 collision_window, 1065 u32 __iomem *hafdup_register) 1066{ 1067 u32 value = 0; 1068 1069 if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) || 1070 (max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) || 1071 (collision_window > HALFDUP_COLLISION_WINDOW_MAX)) 1072 return -EINVAL; 1073 1074 value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT); 1075 1076 if (alt_beb) 1077 value |= HALFDUP_ALT_BEB; 1078 if (back_pressure_no_backoff) 1079 value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF; 1080 if (no_backoff) 1081 value |= HALFDUP_NO_BACKOFF; 1082 if (excess_defer) 1083 value |= HALFDUP_EXCESSIVE_DEFER; 1084 1085 value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT); 1086 1087 value |= collision_window; 1088 1089 out_be32(hafdup_register, value); 1090 return 0; 1091} 1092 1093static int init_inter_frame_gap_params(u8 non_btb_cs_ipg, 1094 u8 non_btb_ipg, 1095 u8 min_ifg, 1096 u8 btb_ipg, 1097 u32 __iomem *ipgifg_register) 1098{ 1099 u32 value = 0; 1100 1101 /* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back 1102 IPG part 2 */ 1103 if (non_btb_cs_ipg > non_btb_ipg) 1104 return -EINVAL; 1105 1106 if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) || 1107 (non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) || 1108 /*(min_ifg > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */ 1109 (btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX)) 1110 return -EINVAL; 1111 1112 value |= 1113 ((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) & 1114 IPGIFG_NBTB_CS_IPG_MASK); 1115 value |= 1116 ((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) & 1117 IPGIFG_NBTB_IPG_MASK); 1118 value |= 1119 ((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) & 1120 IPGIFG_MIN_IFG_MASK); 1121 value |= (btb_ipg & IPGIFG_BTB_IPG_MASK); 1122 1123 out_be32(ipgifg_register, value); 1124 return 0; 1125} 1126 1127int init_flow_control_params(u32 automatic_flow_control_mode, 1128 int rx_flow_control_enable, 1129 int tx_flow_control_enable, 1130 u16 pause_period, 1131 u16 extension_field, 1132 u32 __iomem *upsmr_register, 1133 u32 __iomem *uempr_register, 1134 u32 __iomem *maccfg1_register) 1135{ 1136 u32 value = 0; 1137 1138 /* Set UEMPR register */ 1139 value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT; 1140 value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT; 1141 out_be32(uempr_register, value); 1142 1143 /* Set UPSMR register */ 1144 setbits32(upsmr_register, automatic_flow_control_mode); 1145 1146 value = in_be32(maccfg1_register); 1147 if (rx_flow_control_enable) 1148 value |= MACCFG1_FLOW_RX; 1149 if (tx_flow_control_enable) 1150 value |= MACCFG1_FLOW_TX; 1151 out_be32(maccfg1_register, value); 1152 1153 return 0; 1154} 1155 1156static int init_hw_statistics_gathering_mode(int enable_hardware_statistics, 1157 int auto_zero_hardware_statistics, 1158 u32 __iomem *upsmr_register, 1159 u16 __iomem *uescr_register) 1160{ 1161 u16 uescr_value = 0; 1162 1163 /* Enable hardware statistics gathering if requested */ 1164 if (enable_hardware_statistics) 1165 setbits32(upsmr_register, UCC_GETH_UPSMR_HSE); 1166 1167 /* Clear hardware statistics counters */ 1168 uescr_value = in_be16(uescr_register); 1169 uescr_value |= UESCR_CLRCNT; 1170 /* Automatically zero hardware statistics counters on read, 1171 if requested */ 1172 if (auto_zero_hardware_statistics) 1173 uescr_value |= UESCR_AUTOZ; 1174 out_be16(uescr_register, uescr_value); 1175 1176 return 0; 1177} 1178 1179static int init_firmware_statistics_gathering_mode(int 1180 enable_tx_firmware_statistics, 1181 int enable_rx_firmware_statistics, 1182 u32 __iomem *tx_rmon_base_ptr, 1183 u32 tx_firmware_statistics_structure_address, 1184 u32 __iomem *rx_rmon_base_ptr, 1185 u32 rx_firmware_statistics_structure_address, 1186 u16 __iomem *temoder_register, 1187 u32 __iomem *remoder_register) 1188{ 1189 /* Note: this function does not check if */ 1190 /* the parameters it receives are NULL */ 1191 1192 if (enable_tx_firmware_statistics) { 1193 out_be32(tx_rmon_base_ptr, 1194 tx_firmware_statistics_structure_address); 1195 setbits16(temoder_register, TEMODER_TX_RMON_STATISTICS_ENABLE); 1196 } 1197 1198 if (enable_rx_firmware_statistics) { 1199 out_be32(rx_rmon_base_ptr, 1200 rx_firmware_statistics_structure_address); 1201 setbits32(remoder_register, REMODER_RX_RMON_STATISTICS_ENABLE); 1202 } 1203 1204 return 0; 1205} 1206 1207static int init_mac_station_addr_regs(u8 address_byte_0, 1208 u8 address_byte_1, 1209 u8 address_byte_2, 1210 u8 address_byte_3, 1211 u8 address_byte_4, 1212 u8 address_byte_5, 1213 u32 __iomem *macstnaddr1_register, 1214 u32 __iomem *macstnaddr2_register) 1215{ 1216 u32 value = 0; 1217 1218 /* Example: for a station address of 0x12345678ABCD, */ 1219 /* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */ 1220 1221 /* MACSTNADDR1 Register: */ 1222 1223 /* 0 7 8 15 */ 1224 /* station address byte 5 station address byte 4 */ 1225 /* 16 23 24 31 */ 1226 /* station address byte 3 station address byte 2 */ 1227 value |= (u32) ((address_byte_2 << 0) & 0x000000FF); 1228 value |= (u32) ((address_byte_3 << 8) & 0x0000FF00); 1229 value |= (u32) ((address_byte_4 << 16) & 0x00FF0000); 1230 value |= (u32) ((address_byte_5 << 24) & 0xFF000000); 1231 1232 out_be32(macstnaddr1_register, value); 1233 1234 /* MACSTNADDR2 Register: */ 1235 1236 /* 0 7 8 15 */ 1237 /* station address byte 1 station address byte 0 */ 1238 /* 16 23 24 31 */ 1239 /* reserved reserved */ 1240 value = 0; 1241 value |= (u32) ((address_byte_0 << 16) & 0x00FF0000); 1242 value |= (u32) ((address_byte_1 << 24) & 0xFF000000); 1243 1244 out_be32(macstnaddr2_register, value); 1245 1246 return 0; 1247} 1248 1249static int init_check_frame_length_mode(int length_check, 1250 u32 __iomem *maccfg2_register) 1251{ 1252 u32 value = 0; 1253 1254 value = in_be32(maccfg2_register); 1255 1256 if (length_check) 1257 value |= MACCFG2_LC; 1258 else 1259 value &= ~MACCFG2_LC; 1260 1261 out_be32(maccfg2_register, value); 1262 return 0; 1263} 1264 1265static int init_preamble_length(u8 preamble_length, 1266 u32 __iomem *maccfg2_register) 1267{ 1268 if ((preamble_length < 3) || (preamble_length > 7)) 1269 return -EINVAL; 1270 1271 clrsetbits_be32(maccfg2_register, MACCFG2_PREL_MASK, 1272 preamble_length << MACCFG2_PREL_SHIFT); 1273 1274 return 0; 1275} 1276 1277static int init_rx_parameters(int reject_broadcast, 1278 int receive_short_frames, 1279 int promiscuous, u32 __iomem *upsmr_register) 1280{ 1281 u32 value = 0; 1282 1283 value = in_be32(upsmr_register); 1284 1285 if (reject_broadcast) 1286 value |= UCC_GETH_UPSMR_BRO; 1287 else 1288 value &= ~UCC_GETH_UPSMR_BRO; 1289 1290 if (receive_short_frames) 1291 value |= UCC_GETH_UPSMR_RSH; 1292 else 1293 value &= ~UCC_GETH_UPSMR_RSH; 1294 1295 if (promiscuous) 1296 value |= UCC_GETH_UPSMR_PRO; 1297 else 1298 value &= ~UCC_GETH_UPSMR_PRO; 1299 1300 out_be32(upsmr_register, value); 1301 1302 return 0; 1303} 1304 1305static int init_max_rx_buff_len(u16 max_rx_buf_len, 1306 u16 __iomem *mrblr_register) 1307{ 1308 /* max_rx_buf_len value must be a multiple of 128 */ 1309 if ((max_rx_buf_len == 0) || 1310 (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT)) 1311 return -EINVAL; 1312 1313 out_be16(mrblr_register, max_rx_buf_len); 1314 return 0; 1315} 1316 1317static int init_min_frame_len(u16 min_frame_length, 1318 u16 __iomem *minflr_register, 1319 u16 __iomem *mrblr_register) 1320{ 1321 u16 mrblr_value = 0; 1322 1323 mrblr_value = in_be16(mrblr_register); 1324 if (min_frame_length >= (mrblr_value - 4)) 1325 return -EINVAL; 1326 1327 out_be16(minflr_register, min_frame_length); 1328 return 0; 1329} 1330 1331static int adjust_enet_interface(struct ucc_geth_private *ugeth) 1332{ 1333 struct ucc_geth_info *ug_info; 1334 struct ucc_geth __iomem *ug_regs; 1335 struct ucc_fast __iomem *uf_regs; 1336 int ret_val; 1337 u32 upsmr, maccfg2, tbiBaseAddress; 1338 u16 value; 1339 1340 ugeth_vdbg("%s: IN", __func__); 1341 1342 ug_info = ugeth->ug_info; 1343 ug_regs = ugeth->ug_regs; 1344 uf_regs = ugeth->uccf->uf_regs; 1345 1346 /* Set MACCFG2 */ 1347 maccfg2 = in_be32(&ug_regs->maccfg2); 1348 maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK; 1349 if ((ugeth->max_speed == SPEED_10) || 1350 (ugeth->max_speed == SPEED_100)) 1351 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; 1352 else if (ugeth->max_speed == SPEED_1000) 1353 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; 1354 maccfg2 |= ug_info->padAndCrc; 1355 out_be32(&ug_regs->maccfg2, maccfg2); 1356 1357 /* Set UPSMR */ 1358 upsmr = in_be32(&uf_regs->upsmr); 1359 upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M | 1360 UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM); 1361 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) || 1362 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) || 1363 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) || 1364 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) || 1365 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) || 1366 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { 1367 if (ugeth->phy_interface != PHY_INTERFACE_MODE_RMII) 1368 upsmr |= UCC_GETH_UPSMR_RPM; 1369 switch (ugeth->max_speed) { 1370 case SPEED_10: 1371 upsmr |= UCC_GETH_UPSMR_R10M; 1372 /* FALLTHROUGH */ 1373 case SPEED_100: 1374 if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI) 1375 upsmr |= UCC_GETH_UPSMR_RMM; 1376 } 1377 } 1378 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) || 1379 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { 1380 upsmr |= UCC_GETH_UPSMR_TBIM; 1381 } 1382 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII)) 1383 upsmr |= UCC_GETH_UPSMR_SGMM; 1384 1385 out_be32(&uf_regs->upsmr, upsmr); 1386 1387 /* Disable autonegotiation in tbi mode, because by default it 1388 comes up in autonegotiation mode. */ 1389 /* Note that this depends on proper setting in utbipar register. */ 1390 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) || 1391 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { 1392 tbiBaseAddress = in_be32(&ug_regs->utbipar); 1393 tbiBaseAddress &= UTBIPAR_PHY_ADDRESS_MASK; 1394 tbiBaseAddress >>= UTBIPAR_PHY_ADDRESS_SHIFT; 1395 value = ugeth->phydev->bus->read(ugeth->phydev->bus, 1396 (u8) tbiBaseAddress, ENET_TBI_MII_CR); 1397 value &= ~0x1000; /* Turn off autonegotiation */ 1398 ugeth->phydev->bus->write(ugeth->phydev->bus, 1399 (u8) tbiBaseAddress, ENET_TBI_MII_CR, value); 1400 } 1401 1402 init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2); 1403 1404 ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2); 1405 if (ret_val != 0) { 1406 if (netif_msg_probe(ugeth)) 1407 ugeth_err("%s: Preamble length must be between 3 and 7 inclusive.", 1408 __func__); 1409 return ret_val; 1410 } 1411 1412 return 0; 1413} 1414 1415static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth) 1416{ 1417 struct ucc_fast_private *uccf; 1418 u32 cecr_subblock; 1419 u32 temp; 1420 int i = 10; 1421 1422 uccf = ugeth->uccf; 1423 1424 /* Mask GRACEFUL STOP TX interrupt bit and clear it */ 1425 clrbits32(uccf->p_uccm, UCC_GETH_UCCE_GRA); 1426 out_be32(uccf->p_ucce, UCC_GETH_UCCE_GRA); /* clear by writing 1 */ 1427 1428 /* Issue host command */ 1429 cecr_subblock = 1430 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); 1431 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock, 1432 QE_CR_PROTOCOL_ETHERNET, 0); 1433 1434 /* Wait for command to complete */ 1435 do { 1436 msleep(10); 1437 temp = in_be32(uccf->p_ucce); 1438 } while (!(temp & UCC_GETH_UCCE_GRA) && --i); 1439 1440 uccf->stopped_tx = 1; 1441 1442 return 0; 1443} 1444 1445static int ugeth_graceful_stop_rx(struct ucc_geth_private *ugeth) 1446{ 1447 struct ucc_fast_private *uccf; 1448 u32 cecr_subblock; 1449 u8 temp; 1450 int i = 10; 1451 1452 uccf = ugeth->uccf; 1453 1454 /* Clear acknowledge bit */ 1455 temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack); 1456 temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX; 1457 out_8(&ugeth->p_rx_glbl_pram->rxgstpack, temp); 1458 1459 /* Keep issuing command and checking acknowledge bit until 1460 it is asserted, according to spec */ 1461 do { 1462 /* Issue host command */ 1463 cecr_subblock = 1464 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info. 1465 ucc_num); 1466 qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock, 1467 QE_CR_PROTOCOL_ETHERNET, 0); 1468 msleep(10); 1469 temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack); 1470 } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX) && --i); 1471 1472 uccf->stopped_rx = 1; 1473 1474 return 0; 1475} 1476 1477static int ugeth_restart_tx(struct ucc_geth_private *ugeth) 1478{ 1479 struct ucc_fast_private *uccf; 1480 u32 cecr_subblock; 1481 1482 uccf = ugeth->uccf; 1483 1484 cecr_subblock = 1485 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); 1486 qe_issue_cmd(QE_RESTART_TX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 0); 1487 uccf->stopped_tx = 0; 1488 1489 return 0; 1490} 1491 1492static int ugeth_restart_rx(struct ucc_geth_private *ugeth) 1493{ 1494 struct ucc_fast_private *uccf; 1495 u32 cecr_subblock; 1496 1497 uccf = ugeth->uccf; 1498 1499 cecr_subblock = 1500 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); 1501 qe_issue_cmd(QE_RESTART_RX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 1502 0); 1503 uccf->stopped_rx = 0; 1504 1505 return 0; 1506} 1507 1508static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode) 1509{ 1510 struct ucc_fast_private *uccf; 1511 int enabled_tx, enabled_rx; 1512 1513 uccf = ugeth->uccf; 1514 1515 /* check if the UCC number is in range. */ 1516 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { 1517 if (netif_msg_probe(ugeth)) 1518 ugeth_err("%s: ucc_num out of range.", __func__); 1519 return -EINVAL; 1520 } 1521 1522 enabled_tx = uccf->enabled_tx; 1523 enabled_rx = uccf->enabled_rx; 1524 1525 /* Get Tx and Rx going again, in case this channel was actively 1526 disabled. */ 1527 if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx) 1528 ugeth_restart_tx(ugeth); 1529 if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx) 1530 ugeth_restart_rx(ugeth); 1531 1532 ucc_fast_enable(uccf, mode); /* OK to do even if not disabled */ 1533 1534 return 0; 1535 1536} 1537 1538static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode) 1539{ 1540 struct ucc_fast_private *uccf; 1541 1542 uccf = ugeth->uccf; 1543 1544 /* check if the UCC number is in range. */ 1545 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { 1546 if (netif_msg_probe(ugeth)) 1547 ugeth_err("%s: ucc_num out of range.", __func__); 1548 return -EINVAL; 1549 } 1550 1551 /* Stop any transmissions */ 1552 if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx) 1553 ugeth_graceful_stop_tx(ugeth); 1554 1555 /* Stop any receptions */ 1556 if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx) 1557 ugeth_graceful_stop_rx(ugeth); 1558 1559 ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */ 1560 1561 return 0; 1562} 1563 1564static void ugeth_quiesce(struct ucc_geth_private *ugeth) 1565{ 1566 /* Prevent any further xmits, plus detach the device. */ 1567 netif_device_detach(ugeth->ndev); 1568 1569 /* Wait for any current xmits to finish. */ 1570 netif_tx_disable(ugeth->ndev); 1571 1572 /* Disable the interrupt to avoid NAPI rescheduling. */ 1573 disable_irq(ugeth->ug_info->uf_info.irq); 1574 1575 /* Stop NAPI, and possibly wait for its completion. */ 1576 napi_disable(&ugeth->napi); 1577} 1578 1579static void ugeth_activate(struct ucc_geth_private *ugeth) 1580{ 1581 napi_enable(&ugeth->napi); 1582 enable_irq(ugeth->ug_info->uf_info.irq); 1583 netif_device_attach(ugeth->ndev); 1584} 1585 1586/* Called every time the controller might need to be made 1587 * aware of new link state. The PHY code conveys this 1588 * information through variables in the ugeth structure, and this 1589 * function converts those variables into the appropriate 1590 * register values, and can bring down the device if needed. 1591 */ 1592 1593static void adjust_link(struct net_device *dev) 1594{ 1595 struct ucc_geth_private *ugeth = netdev_priv(dev); 1596 struct ucc_geth __iomem *ug_regs; 1597 struct ucc_fast __iomem *uf_regs; 1598 struct phy_device *phydev = ugeth->phydev; 1599 int new_state = 0; 1600 1601 ug_regs = ugeth->ug_regs; 1602 uf_regs = ugeth->uccf->uf_regs; 1603 1604 if (phydev->link) { 1605 u32 tempval = in_be32(&ug_regs->maccfg2); 1606 u32 upsmr = in_be32(&uf_regs->upsmr); 1607 /* Now we make sure that we can be in full duplex mode. 1608 * If not, we operate in half-duplex mode. */ 1609 if (phydev->duplex != ugeth->oldduplex) { 1610 new_state = 1; 1611 if (!(phydev->duplex)) 1612 tempval &= ~(MACCFG2_FDX); 1613 else 1614 tempval |= MACCFG2_FDX; 1615 ugeth->oldduplex = phydev->duplex; 1616 } 1617 1618 if (phydev->speed != ugeth->oldspeed) { 1619 new_state = 1; 1620 switch (phydev->speed) { 1621 case SPEED_1000: 1622 tempval = ((tempval & 1623 ~(MACCFG2_INTERFACE_MODE_MASK)) | 1624 MACCFG2_INTERFACE_MODE_BYTE); 1625 break; 1626 case SPEED_100: 1627 case SPEED_10: 1628 tempval = ((tempval & 1629 ~(MACCFG2_INTERFACE_MODE_MASK)) | 1630 MACCFG2_INTERFACE_MODE_NIBBLE); 1631 /* if reduced mode, re-set UPSMR.R10M */ 1632 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) || 1633 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) || 1634 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) || 1635 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) || 1636 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) || 1637 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { 1638 if (phydev->speed == SPEED_10) 1639 upsmr |= UCC_GETH_UPSMR_R10M; 1640 else 1641 upsmr &= ~UCC_GETH_UPSMR_R10M; 1642 } 1643 break; 1644 default: 1645 if (netif_msg_link(ugeth)) 1646 ugeth_warn( 1647 "%s: Ack! Speed (%d) is not 10/100/1000!", 1648 dev->name, phydev->speed); 1649 break; 1650 } 1651 ugeth->oldspeed = phydev->speed; 1652 } 1653 1654 if (!ugeth->oldlink) { 1655 new_state = 1; 1656 ugeth->oldlink = 1; 1657 } 1658 1659 if (new_state) { 1660 /* 1661 * To change the MAC configuration we need to disable 1662 * the controller. To do so, we have to either grab 1663 * ugeth->lock, which is a bad idea since 'graceful 1664 * stop' commands might take quite a while, or we can 1665 * quiesce driver's activity. 1666 */ 1667 ugeth_quiesce(ugeth); 1668 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); 1669 1670 out_be32(&ug_regs->maccfg2, tempval); 1671 out_be32(&uf_regs->upsmr, upsmr); 1672 1673 ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); 1674 ugeth_activate(ugeth); 1675 } 1676 } else if (ugeth->oldlink) { 1677 new_state = 1; 1678 ugeth->oldlink = 0; 1679 ugeth->oldspeed = 0; 1680 ugeth->oldduplex = -1; 1681 } 1682 1683 if (new_state && netif_msg_link(ugeth)) 1684 phy_print_status(phydev); 1685} 1686 1687/* Initialize TBI PHY interface for communicating with the 1688 * SERDES lynx PHY on the chip. We communicate with this PHY 1689 * through the MDIO bus on each controller, treating it as a 1690 * "normal" PHY at the address found in the UTBIPA register. We assume 1691 * that the UTBIPA register is valid. Either the MDIO bus code will set 1692 * it to a value that doesn't conflict with other PHYs on the bus, or the 1693 * value doesn't matter, as there are no other PHYs on the bus. 1694 */ 1695static void uec_configure_serdes(struct net_device *dev) 1696{ 1697 struct ucc_geth_private *ugeth = netdev_priv(dev); 1698 struct ucc_geth_info *ug_info = ugeth->ug_info; 1699 struct phy_device *tbiphy; 1700 1701 if (!ug_info->tbi_node) { 1702 dev_warn(&dev->dev, "SGMII mode requires that the device " 1703 "tree specify a tbi-handle\n"); 1704 return; 1705 } 1706 1707 tbiphy = of_phy_find_device(ug_info->tbi_node); 1708 if (!tbiphy) { 1709 dev_err(&dev->dev, "error: Could not get TBI device\n"); 1710 return; 1711 } 1712 1713 /* 1714 * If the link is already up, we must already be ok, and don't need to 1715 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured 1716 * everything for us? Resetting it takes the link down and requires 1717 * several seconds for it to come back. 1718 */ 1719 if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS) 1720 return; 1721 1722 /* Single clk mode, mii mode off(for serdes communication) */ 1723 phy_write(tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS); 1724 1725 phy_write(tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT); 1726 1727 phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS); 1728} 1729 1730/* Configure the PHY for dev. 1731 * returns 0 if success. -1 if failure 1732 */ 1733static int init_phy(struct net_device *dev) 1734{ 1735 struct ucc_geth_private *priv = netdev_priv(dev); 1736 struct ucc_geth_info *ug_info = priv->ug_info; 1737 struct phy_device *phydev; 1738 1739 priv->oldlink = 0; 1740 priv->oldspeed = 0; 1741 priv->oldduplex = -1; 1742 1743 phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0, 1744 priv->phy_interface); 1745 if (!phydev) 1746 phydev = of_phy_connect_fixed_link(dev, &adjust_link, 1747 priv->phy_interface); 1748 if (!phydev) { 1749 dev_err(&dev->dev, "Could not attach to PHY\n"); 1750 return -ENODEV; 1751 } 1752 1753 if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII) 1754 uec_configure_serdes(dev); 1755 1756 phydev->supported &= (ADVERTISED_10baseT_Half | 1757 ADVERTISED_10baseT_Full | 1758 ADVERTISED_100baseT_Half | 1759 ADVERTISED_100baseT_Full); 1760 1761 if (priv->max_speed == SPEED_1000) 1762 phydev->supported |= ADVERTISED_1000baseT_Full; 1763 1764 phydev->advertising = phydev->supported; 1765 1766 priv->phydev = phydev; 1767 1768 return 0; 1769} 1770 1771static void ugeth_dump_regs(struct ucc_geth_private *ugeth) 1772{ 1773#ifdef DEBUG 1774 ucc_fast_dump_regs(ugeth->uccf); 1775 dump_regs(ugeth); 1776 dump_bds(ugeth); 1777#endif 1778} 1779 1780static int ugeth_82xx_filtering_clear_all_addr_in_hash(struct ucc_geth_private * 1781 ugeth, 1782 enum enet_addr_type 1783 enet_addr_type) 1784{ 1785 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; 1786 struct ucc_fast_private *uccf; 1787 enum comm_dir comm_dir; 1788 struct list_head *p_lh; 1789 u16 i, num; 1790 u32 __iomem *addr_h; 1791 u32 __iomem *addr_l; 1792 u8 *p_counter; 1793 1794 uccf = ugeth->uccf; 1795 1796 p_82xx_addr_filt = 1797 (struct ucc_geth_82xx_address_filtering_pram __iomem *) 1798 ugeth->p_rx_glbl_pram->addressfiltering; 1799 1800 if (enet_addr_type == ENET_ADDR_TYPE_GROUP) { 1801 addr_h = &(p_82xx_addr_filt->gaddr_h); 1802 addr_l = &(p_82xx_addr_filt->gaddr_l); 1803 p_lh = &ugeth->group_hash_q; 1804 p_counter = &(ugeth->numGroupAddrInHash); 1805 } else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) { 1806 addr_h = &(p_82xx_addr_filt->iaddr_h); 1807 addr_l = &(p_82xx_addr_filt->iaddr_l); 1808 p_lh = &ugeth->ind_hash_q; 1809 p_counter = &(ugeth->numIndAddrInHash); 1810 } else 1811 return -EINVAL; 1812 1813 comm_dir = 0; 1814 if (uccf->enabled_tx) 1815 comm_dir |= COMM_DIR_TX; 1816 if (uccf->enabled_rx) 1817 comm_dir |= COMM_DIR_RX; 1818 if (comm_dir) 1819 ugeth_disable(ugeth, comm_dir); 1820 1821 /* Clear the hash table. */ 1822 out_be32(addr_h, 0x00000000); 1823 out_be32(addr_l, 0x00000000); 1824 1825 if (!p_lh) 1826 return 0; 1827 1828 num = *p_counter; 1829 1830 /* Delete all remaining CQ elements */ 1831 for (i = 0; i < num; i++) 1832 put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh))); 1833 1834 *p_counter = 0; 1835 1836 if (comm_dir) 1837 ugeth_enable(ugeth, comm_dir); 1838 1839 return 0; 1840} 1841 1842static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *ugeth, 1843 u8 paddr_num) 1844{ 1845 ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */ 1846 return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */ 1847} 1848 1849static void ucc_geth_memclean(struct ucc_geth_private *ugeth) 1850{ 1851 u16 i, j; 1852 u8 __iomem *bd; 1853 1854 if (!ugeth) 1855 return; 1856 1857 if (ugeth->uccf) { 1858 ucc_fast_free(ugeth->uccf); 1859 ugeth->uccf = NULL; 1860 } 1861 1862 if (ugeth->p_thread_data_tx) { 1863 qe_muram_free(ugeth->thread_dat_tx_offset); 1864 ugeth->p_thread_data_tx = NULL; 1865 } 1866 if (ugeth->p_thread_data_rx) { 1867 qe_muram_free(ugeth->thread_dat_rx_offset); 1868 ugeth->p_thread_data_rx = NULL; 1869 } 1870 if (ugeth->p_exf_glbl_param) { 1871 qe_muram_free(ugeth->exf_glbl_param_offset); 1872 ugeth->p_exf_glbl_param = NULL; 1873 } 1874 if (ugeth->p_rx_glbl_pram) { 1875 qe_muram_free(ugeth->rx_glbl_pram_offset); 1876 ugeth->p_rx_glbl_pram = NULL; 1877 } 1878 if (ugeth->p_tx_glbl_pram) { 1879 qe_muram_free(ugeth->tx_glbl_pram_offset); 1880 ugeth->p_tx_glbl_pram = NULL; 1881 } 1882 if (ugeth->p_send_q_mem_reg) { 1883 qe_muram_free(ugeth->send_q_mem_reg_offset); 1884 ugeth->p_send_q_mem_reg = NULL; 1885 } 1886 if (ugeth->p_scheduler) { 1887 qe_muram_free(ugeth->scheduler_offset); 1888 ugeth->p_scheduler = NULL; 1889 } 1890 if (ugeth->p_tx_fw_statistics_pram) { 1891 qe_muram_free(ugeth->tx_fw_statistics_pram_offset); 1892 ugeth->p_tx_fw_statistics_pram = NULL; 1893 } 1894 if (ugeth->p_rx_fw_statistics_pram) { 1895 qe_muram_free(ugeth->rx_fw_statistics_pram_offset); 1896 ugeth->p_rx_fw_statistics_pram = NULL; 1897 } 1898 if (ugeth->p_rx_irq_coalescing_tbl) { 1899 qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset); 1900 ugeth->p_rx_irq_coalescing_tbl = NULL; 1901 } 1902 if (ugeth->p_rx_bd_qs_tbl) { 1903 qe_muram_free(ugeth->rx_bd_qs_tbl_offset); 1904 ugeth->p_rx_bd_qs_tbl = NULL; 1905 } 1906 if (ugeth->p_init_enet_param_shadow) { 1907 return_init_enet_entries(ugeth, 1908 &(ugeth->p_init_enet_param_shadow-> 1909 rxthread[0]), 1910 ENET_INIT_PARAM_MAX_ENTRIES_RX, 1911 ugeth->ug_info->riscRx, 1); 1912 return_init_enet_entries(ugeth, 1913 &(ugeth->p_init_enet_param_shadow-> 1914 txthread[0]), 1915 ENET_INIT_PARAM_MAX_ENTRIES_TX, 1916 ugeth->ug_info->riscTx, 0); 1917 kfree(ugeth->p_init_enet_param_shadow); 1918 ugeth->p_init_enet_param_shadow = NULL; 1919 } 1920 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { 1921 bd = ugeth->p_tx_bd_ring[i]; 1922 if (!bd) 1923 continue; 1924 for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { 1925 if (ugeth->tx_skbuff[i][j]) { 1926 dma_unmap_single(ugeth->dev, 1927 in_be32(&((struct qe_bd __iomem *)bd)->buf), 1928 (in_be32((u32 __iomem *)bd) & 1929 BD_LENGTH_MASK), 1930 DMA_TO_DEVICE); 1931 dev_kfree_skb_any(ugeth->tx_skbuff[i][j]); 1932 ugeth->tx_skbuff[i][j] = NULL; 1933 } 1934 } 1935 1936 kfree(ugeth->tx_skbuff[i]); 1937 1938 if (ugeth->p_tx_bd_ring[i]) { 1939 if (ugeth->ug_info->uf_info.bd_mem_part == 1940 MEM_PART_SYSTEM) 1941 kfree((void *)ugeth->tx_bd_ring_offset[i]); 1942 else if (ugeth->ug_info->uf_info.bd_mem_part == 1943 MEM_PART_MURAM) 1944 qe_muram_free(ugeth->tx_bd_ring_offset[i]); 1945 ugeth->p_tx_bd_ring[i] = NULL; 1946 } 1947 } 1948 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { 1949 if (ugeth->p_rx_bd_ring[i]) { 1950 /* Return existing data buffers in ring */ 1951 bd = ugeth->p_rx_bd_ring[i]; 1952 for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { 1953 if (ugeth->rx_skbuff[i][j]) { 1954 dma_unmap_single(ugeth->dev, 1955 in_be32(&((struct qe_bd __iomem *)bd)->buf), 1956 ugeth->ug_info-> 1957 uf_info.max_rx_buf_length + 1958 UCC_GETH_RX_DATA_BUF_ALIGNMENT, 1959 DMA_FROM_DEVICE); 1960 dev_kfree_skb_any( 1961 ugeth->rx_skbuff[i][j]); 1962 ugeth->rx_skbuff[i][j] = NULL; 1963 } 1964 bd += sizeof(struct qe_bd); 1965 } 1966 1967 kfree(ugeth->rx_skbuff[i]); 1968 1969 if (ugeth->ug_info->uf_info.bd_mem_part == 1970 MEM_PART_SYSTEM) 1971 kfree((void *)ugeth->rx_bd_ring_offset[i]); 1972 else if (ugeth->ug_info->uf_info.bd_mem_part == 1973 MEM_PART_MURAM) 1974 qe_muram_free(ugeth->rx_bd_ring_offset[i]); 1975 ugeth->p_rx_bd_ring[i] = NULL; 1976 } 1977 } 1978 while (!list_empty(&ugeth->group_hash_q)) 1979 put_enet_addr_container(ENET_ADDR_CONT_ENTRY 1980 (dequeue(&ugeth->group_hash_q))); 1981 while (!list_empty(&ugeth->ind_hash_q)) 1982 put_enet_addr_container(ENET_ADDR_CONT_ENTRY 1983 (dequeue(&ugeth->ind_hash_q))); 1984 if (ugeth->ug_regs) { 1985 iounmap(ugeth->ug_regs); 1986 ugeth->ug_regs = NULL; 1987 } 1988 1989 skb_queue_purge(&ugeth->rx_recycle); 1990} 1991 1992static void ucc_geth_set_multi(struct net_device *dev) 1993{ 1994 struct ucc_geth_private *ugeth; 1995 struct dev_mc_list *dmi; 1996 struct ucc_fast __iomem *uf_regs; 1997 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; 1998 int i; 1999 2000 ugeth = netdev_priv(dev); 2001 2002 uf_regs = ugeth->uccf->uf_regs; 2003 2004 if (dev->flags & IFF_PROMISC) { 2005 setbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO); 2006 } else { 2007 clrbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO); 2008 2009 p_82xx_addr_filt = 2010 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth-> 2011 p_rx_glbl_pram->addressfiltering; 2012 2013 if (dev->flags & IFF_ALLMULTI) { 2014 /* Catch all multicast addresses, so set the 2015 * filter to all 1's. 2016 */ 2017 out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff); 2018 out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff); 2019 } else { 2020 /* Clear filter and add the addresses in the list. 2021 */ 2022 out_be32(&p_82xx_addr_filt->gaddr_h, 0x0); 2023 out_be32(&p_82xx_addr_filt->gaddr_l, 0x0); 2024 2025 dmi = dev->mc_list; 2026 2027 for (i = 0; i < dev->mc_count; i++, dmi = dmi->next) { 2028 2029 /* Only support group multicast for now. 2030 */ 2031 if (!(dmi->dmi_addr[0] & 1)) 2032 continue; 2033 2034 /* Ask CPM to run CRC and set bit in 2035 * filter mask. 2036 */ 2037 hw_add_addr_in_hash(ugeth, dmi->dmi_addr); 2038 } 2039 } 2040 } 2041} 2042 2043static void ucc_geth_stop(struct ucc_geth_private *ugeth) 2044{ 2045 struct ucc_geth __iomem *ug_regs = ugeth->ug_regs; 2046 struct phy_device *phydev = ugeth->phydev; 2047 2048 ugeth_vdbg("%s: IN", __func__); 2049 2050 /* Disable the controller */ 2051 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); 2052 2053 /* Tell the kernel the link is down */ 2054 phy_stop(phydev); 2055 2056 /* Mask all interrupts */ 2057 out_be32(ugeth->uccf->p_uccm, 0x00000000); 2058 2059 /* Clear all interrupts */ 2060 out_be32(ugeth->uccf->p_ucce, 0xffffffff); 2061 2062 /* Disable Rx and Tx */ 2063 clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); 2064 2065 phy_disconnect(ugeth->phydev); 2066 ugeth->phydev = NULL; 2067 2068 ucc_geth_memclean(ugeth); 2069} 2070 2071static int ucc_struct_init(struct ucc_geth_private *ugeth) 2072{ 2073 struct ucc_geth_info *ug_info; 2074 struct ucc_fast_info *uf_info; 2075 int i; 2076 2077 ug_info = ugeth->ug_info; 2078 uf_info = &ug_info->uf_info; 2079 2080 if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) || 2081 (uf_info->bd_mem_part == MEM_PART_MURAM))) { 2082 if (netif_msg_probe(ugeth)) 2083 ugeth_err("%s: Bad memory partition value.", 2084 __func__); 2085 return -EINVAL; 2086 } 2087 2088 /* Rx BD lengths */ 2089 for (i = 0; i < ug_info->numQueuesRx; i++) { 2090 if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) || 2091 (ug_info->bdRingLenRx[i] % 2092 UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) { 2093 if (netif_msg_probe(ugeth)) 2094 ugeth_err 2095 ("%s: Rx BD ring length must be multiple of 4, no smaller than 8.", 2096 __func__); 2097 return -EINVAL; 2098 } 2099 } 2100 2101 /* Tx BD lengths */ 2102 for (i = 0; i < ug_info->numQueuesTx; i++) { 2103 if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) { 2104 if (netif_msg_probe(ugeth)) 2105 ugeth_err 2106 ("%s: Tx BD ring length must be no smaller than 2.", 2107 __func__); 2108 return -EINVAL; 2109 } 2110 } 2111 2112 /* mrblr */ 2113 if ((uf_info->max_rx_buf_length == 0) || 2114 (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) { 2115 if (netif_msg_probe(ugeth)) 2116 ugeth_err 2117 ("%s: max_rx_buf_length must be non-zero multiple of 128.", 2118 __func__); 2119 return -EINVAL; 2120 } 2121 2122 /* num Tx queues */ 2123 if (ug_info->numQueuesTx > NUM_TX_QUEUES) { 2124 if (netif_msg_probe(ugeth)) 2125 ugeth_err("%s: number of tx queues too large.", __func__); 2126 return -EINVAL; 2127 } 2128 2129 /* num Rx queues */ 2130 if (ug_info->numQueuesRx > NUM_RX_QUEUES) { 2131 if (netif_msg_probe(ugeth)) 2132 ugeth_err("%s: number of rx queues too large.", __func__); 2133 return -EINVAL; 2134 } 2135 2136 /* l2qt */ 2137 for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) { 2138 if (ug_info->l2qt[i] >= ug_info->numQueuesRx) { 2139 if (netif_msg_probe(ugeth)) 2140 ugeth_err 2141 ("%s: VLAN priority table entry must not be" 2142 " larger than number of Rx queues.", 2143 __func__); 2144 return -EINVAL; 2145 } 2146 } 2147 2148 /* l3qt */ 2149 for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) { 2150 if (ug_info->l3qt[i] >= ug_info->numQueuesRx) { 2151 if (netif_msg_probe(ugeth)) 2152 ugeth_err 2153 ("%s: IP priority table entry must not be" 2154 " larger than number of Rx queues.", 2155 __func__); 2156 return -EINVAL; 2157 } 2158 } 2159 2160 if (ug_info->cam && !ug_info->ecamptr) { 2161 if (netif_msg_probe(ugeth)) 2162 ugeth_err("%s: If cam mode is chosen, must supply cam ptr.", 2163 __func__); 2164 return -EINVAL; 2165 } 2166 2167 if ((ug_info->numStationAddresses != 2168 UCC_GETH_NUM_OF_STATION_ADDRESSES_1) && 2169 ug_info->rxExtendedFiltering) { 2170 if (netif_msg_probe(ugeth)) 2171 ugeth_err("%s: Number of station addresses greater than 1 " 2172 "not allowed in extended parsing mode.", 2173 __func__); 2174 return -EINVAL; 2175 } 2176 2177 /* Generate uccm_mask for receive */ 2178 uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */ 2179 for (i = 0; i < ug_info->numQueuesRx; i++) 2180 uf_info->uccm_mask |= (UCC_GETH_UCCE_RXF0 << i); 2181 2182 for (i = 0; i < ug_info->numQueuesTx; i++) 2183 uf_info->uccm_mask |= (UCC_GETH_UCCE_TXB0 << i); 2184 /* Initialize the general fast UCC block. */ 2185 if (ucc_fast_init(uf_info, &ugeth->uccf)) { 2186 if (netif_msg_probe(ugeth)) 2187 ugeth_err("%s: Failed to init uccf.", __func__); 2188 return -ENOMEM; 2189 } 2190 2191 /* read the number of risc engines, update the riscTx and riscRx 2192 * if there are 4 riscs in QE 2193 */ 2194 if (qe_get_num_of_risc() == 4) { 2195 ug_info->riscTx = QE_RISC_ALLOCATION_FOUR_RISCS; 2196 ug_info->riscRx = QE_RISC_ALLOCATION_FOUR_RISCS; 2197 } 2198 2199 ugeth->ug_regs = ioremap(uf_info->regs, sizeof(*ugeth->ug_regs)); 2200 if (!ugeth->ug_regs) { 2201 if (netif_msg_probe(ugeth)) 2202 ugeth_err("%s: Failed to ioremap regs.", __func__); 2203 return -ENOMEM; 2204 } 2205 2206 skb_queue_head_init(&ugeth->rx_recycle); 2207 2208 return 0; 2209} 2210 2211static int ucc_geth_startup(struct ucc_geth_private *ugeth) 2212{ 2213 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; 2214 struct ucc_geth_init_pram __iomem *p_init_enet_pram; 2215 struct ucc_fast_private *uccf; 2216 struct ucc_geth_info *ug_info; 2217 struct ucc_fast_info *uf_info; 2218 struct ucc_fast __iomem *uf_regs; 2219 struct ucc_geth __iomem *ug_regs; 2220 int ret_val = -EINVAL; 2221 u32 remoder = UCC_GETH_REMODER_INIT; 2222 u32 init_enet_pram_offset, cecr_subblock, command; 2223 u32 ifstat, i, j, size, l2qt, l3qt, length; 2224 u16 temoder = UCC_GETH_TEMODER_INIT; 2225 u16 test; 2226 u8 function_code = 0; 2227 u8 __iomem *bd; 2228 u8 __iomem *endOfRing; 2229 u8 numThreadsRxNumerical, numThreadsTxNumerical; 2230 2231 ugeth_vdbg("%s: IN", __func__); 2232 uccf = ugeth->uccf; 2233 ug_info = ugeth->ug_info; 2234 uf_info = &ug_info->uf_info; 2235 uf_regs = uccf->uf_regs; 2236 ug_regs = ugeth->ug_regs; 2237 2238 switch (ug_info->numThreadsRx) { 2239 case UCC_GETH_NUM_OF_THREADS_1: 2240 numThreadsRxNumerical = 1; 2241 break; 2242 case UCC_GETH_NUM_OF_THREADS_2: 2243 numThreadsRxNumerical = 2; 2244 break; 2245 case UCC_GETH_NUM_OF_THREADS_4: 2246 numThreadsRxNumerical = 4; 2247 break; 2248 case UCC_GETH_NUM_OF_THREADS_6: 2249 numThreadsRxNumerical = 6; 2250 break; 2251 case UCC_GETH_NUM_OF_THREADS_8: 2252 numThreadsRxNumerical = 8; 2253 break; 2254 default: 2255 if (netif_msg_ifup(ugeth)) 2256 ugeth_err("%s: Bad number of Rx threads value.", 2257 __func__); 2258 return -EINVAL; 2259 break; 2260 } 2261 2262 switch (ug_info->numThreadsTx) { 2263 case UCC_GETH_NUM_OF_THREADS_1: 2264 numThreadsTxNumerical = 1; 2265 break; 2266 case UCC_GETH_NUM_OF_THREADS_2: 2267 numThreadsTxNumerical = 2; 2268 break; 2269 case UCC_GETH_NUM_OF_THREADS_4: 2270 numThreadsTxNumerical = 4; 2271 break; 2272 case UCC_GETH_NUM_OF_THREADS_6: 2273 numThreadsTxNumerical = 6; 2274 break; 2275 case UCC_GETH_NUM_OF_THREADS_8: 2276 numThreadsTxNumerical = 8; 2277 break; 2278 default: 2279 if (netif_msg_ifup(ugeth)) 2280 ugeth_err("%s: Bad number of Tx threads value.", 2281 __func__); 2282 return -EINVAL; 2283 break; 2284 } 2285 2286 /* Calculate rx_extended_features */ 2287 ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck || 2288 ug_info->ipAddressAlignment || 2289 (ug_info->numStationAddresses != 2290 UCC_GETH_NUM_OF_STATION_ADDRESSES_1); 2291 2292 ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features || 2293 (ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP) || 2294 (ug_info->vlanOperationNonTagged != 2295 UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP); 2296 2297 init_default_reg_vals(&uf_regs->upsmr, 2298 &ug_regs->maccfg1, &ug_regs->maccfg2); 2299 2300 /* Set UPSMR */ 2301 /* For more details see the hardware spec. */ 2302 init_rx_parameters(ug_info->bro, 2303 ug_info->rsh, ug_info->pro, &uf_regs->upsmr); 2304 2305 /* We're going to ignore other registers for now, */ 2306 /* except as needed to get up and running */ 2307 2308 /* Set MACCFG1 */ 2309 /* For more details see the hardware spec. */ 2310 init_flow_control_params(ug_info->aufc, 2311 ug_info->receiveFlowControl, 2312 ug_info->transmitFlowControl, 2313 ug_info->pausePeriod, 2314 ug_info->extensionField, 2315 &uf_regs->upsmr, 2316 &ug_regs->uempr, &ug_regs->maccfg1); 2317 2318 setbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); 2319 2320 /* Set IPGIFG */ 2321 /* For more details see the hardware spec. */ 2322 ret_val = init_inter_frame_gap_params(ug_info->nonBackToBackIfgPart1, 2323 ug_info->nonBackToBackIfgPart2, 2324 ug_info-> 2325 miminumInterFrameGapEnforcement, 2326 ug_info->backToBackInterFrameGap, 2327 &ug_regs->ipgifg); 2328 if (ret_val != 0) { 2329 if (netif_msg_ifup(ugeth)) 2330 ugeth_err("%s: IPGIFG initialization parameter too large.", 2331 __func__); 2332 return ret_val; 2333 } 2334 2335 /* Set HAFDUP */ 2336 /* For more details see the hardware spec. */ 2337 ret_val = init_half_duplex_params(ug_info->altBeb, 2338 ug_info->backPressureNoBackoff, 2339 ug_info->noBackoff, 2340 ug_info->excessDefer, 2341 ug_info->altBebTruncation, 2342 ug_info->maxRetransmission, 2343 ug_info->collisionWindow, 2344 &ug_regs->hafdup); 2345 if (ret_val != 0) { 2346 if (netif_msg_ifup(ugeth)) 2347 ugeth_err("%s: Half Duplex initialization parameter too large.", 2348 __func__); 2349 return ret_val; 2350 } 2351 2352 /* Set IFSTAT */ 2353 /* For more details see the hardware spec. */ 2354 /* Read only - resets upon read */ 2355 ifstat = in_be32(&ug_regs->ifstat); 2356 2357 /* Clear UEMPR */ 2358 /* For more details see the hardware spec. */ 2359 out_be32(&ug_regs->uempr, 0); 2360 2361 /* Set UESCR */ 2362 /* For more details see the hardware spec. */ 2363 init_hw_statistics_gathering_mode((ug_info->statisticsMode & 2364 UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE), 2365 0, &uf_regs->upsmr, &ug_regs->uescr); 2366 2367 /* Allocate Tx bds */ 2368 for (j = 0; j < ug_info->numQueuesTx; j++) { 2369 /* Allocate in multiple of 2370 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT, 2371 according to spec */ 2372 length = ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) 2373 / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) 2374 * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; 2375 if ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) % 2376 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) 2377 length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; 2378 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) { 2379 u32 align = 4; 2380 if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4) 2381 align = UCC_GETH_TX_BD_RING_ALIGNMENT; 2382 ugeth->tx_bd_ring_offset[j] = 2383 (u32) kmalloc((u32) (length + align), GFP_KERNEL); 2384 2385 if (ugeth->tx_bd_ring_offset[j] != 0) 2386 ugeth->p_tx_bd_ring[j] = 2387 (u8 __iomem *)((ugeth->tx_bd_ring_offset[j] + 2388 align) & ~(align - 1)); 2389 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { 2390 ugeth->tx_bd_ring_offset[j] = 2391 qe_muram_alloc(length, 2392 UCC_GETH_TX_BD_RING_ALIGNMENT); 2393 if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j])) 2394 ugeth->p_tx_bd_ring[j] = 2395 (u8 __iomem *) qe_muram_addr(ugeth-> 2396 tx_bd_ring_offset[j]); 2397 } 2398 if (!ugeth->p_tx_bd_ring[j]) { 2399 if (netif_msg_ifup(ugeth)) 2400 ugeth_err 2401 ("%s: Can not allocate memory for Tx bd rings.", 2402 __func__); 2403 return -ENOMEM; 2404 } 2405 /* Zero unused end of bd ring, according to spec */ 2406 memset_io((void __iomem *)(ugeth->p_tx_bd_ring[j] + 2407 ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)), 0, 2408 length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)); 2409 } 2410 2411 /* Allocate Rx bds */ 2412 for (j = 0; j < ug_info->numQueuesRx; j++) { 2413 length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd); 2414 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) { 2415 u32 align = 4; 2416 if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4) 2417 align = UCC_GETH_RX_BD_RING_ALIGNMENT; 2418 ugeth->rx_bd_ring_offset[j] = 2419 (u32) kmalloc((u32) (length + align), GFP_KERNEL); 2420 if (ugeth->rx_bd_ring_offset[j] != 0) 2421 ugeth->p_rx_bd_ring[j] = 2422 (u8 __iomem *)((ugeth->rx_bd_ring_offset[j] + 2423 align) & ~(align - 1)); 2424 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { 2425 ugeth->rx_bd_ring_offset[j] = 2426 qe_muram_alloc(length, 2427 UCC_GETH_RX_BD_RING_ALIGNMENT); 2428 if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j])) 2429 ugeth->p_rx_bd_ring[j] = 2430 (u8 __iomem *) qe_muram_addr(ugeth-> 2431 rx_bd_ring_offset[j]); 2432 } 2433 if (!ugeth->p_rx_bd_ring[j]) { 2434 if (netif_msg_ifup(ugeth)) 2435 ugeth_err 2436 ("%s: Can not allocate memory for Rx bd rings.", 2437 __func__); 2438 return -ENOMEM; 2439 } 2440 } 2441 2442 /* Init Tx bds */ 2443 for (j = 0; j < ug_info->numQueuesTx; j++) { 2444 /* Setup the skbuff rings */ 2445 ugeth->tx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) * 2446 ugeth->ug_info->bdRingLenTx[j], 2447 GFP_KERNEL); 2448 2449 if (ugeth->tx_skbuff[j] == NULL) { 2450 if (netif_msg_ifup(ugeth)) 2451 ugeth_err("%s: Could not allocate tx_skbuff", 2452 __func__); 2453 return -ENOMEM; 2454 } 2455 2456 for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++) 2457 ugeth->tx_skbuff[j][i] = NULL; 2458 2459 ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0; 2460 bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j]; 2461 for (i = 0; i < ug_info->bdRingLenTx[j]; i++) { 2462 /* clear bd buffer */ 2463 out_be32(&((struct qe_bd __iomem *)bd)->buf, 0); 2464 /* set bd status and length */ 2465 out_be32((u32 __iomem *)bd, 0); 2466 bd += sizeof(struct qe_bd); 2467 } 2468 bd -= sizeof(struct qe_bd); 2469 /* set bd status and length */ 2470 out_be32((u32 __iomem *)bd, T_W); /* for last BD set Wrap bit */ 2471 } 2472 2473 /* Init Rx bds */ 2474 for (j = 0; j < ug_info->numQueuesRx; j++) { 2475 /* Setup the skbuff rings */ 2476 ugeth->rx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) * 2477 ugeth->ug_info->bdRingLenRx[j], 2478 GFP_KERNEL); 2479 2480 if (ugeth->rx_skbuff[j] == NULL) { 2481 if (netif_msg_ifup(ugeth)) 2482 ugeth_err("%s: Could not allocate rx_skbuff", 2483 __func__); 2484 return -ENOMEM; 2485 } 2486 2487 for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++) 2488 ugeth->rx_skbuff[j][i] = NULL; 2489 2490 ugeth->skb_currx[j] = 0; 2491 bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j]; 2492 for (i = 0; i < ug_info->bdRingLenRx[j]; i++) { 2493 /* set bd status and length */ 2494 out_be32((u32 __iomem *)bd, R_I); 2495 /* clear bd buffer */ 2496 out_be32(&((struct qe_bd __iomem *)bd)->buf, 0); 2497 bd += sizeof(struct qe_bd); 2498 } 2499 bd -= sizeof(struct qe_bd); 2500 /* set bd status and length */ 2501 out_be32((u32 __iomem *)bd, R_W); /* for last BD set Wrap bit */ 2502 } 2503 2504 /* 2505 * Global PRAM 2506 */ 2507 /* Tx global PRAM */ 2508 /* Allocate global tx parameter RAM page */ 2509 ugeth->tx_glbl_pram_offset = 2510 qe_muram_alloc(sizeof(struct ucc_geth_tx_global_pram), 2511 UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT); 2512 if (IS_ERR_VALUE(ugeth->tx_glbl_pram_offset)) { 2513 if (netif_msg_ifup(ugeth)) 2514 ugeth_err 2515 ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.", 2516 __func__); 2517 return -ENOMEM; 2518 } 2519 ugeth->p_tx_glbl_pram = 2520 (struct ucc_geth_tx_global_pram __iomem *) qe_muram_addr(ugeth-> 2521 tx_glbl_pram_offset); 2522 /* Zero out p_tx_glbl_pram */ 2523 memset_io((void __iomem *)ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram)); 2524 2525 /* Fill global PRAM */ 2526 2527 /* TQPTR */ 2528 /* Size varies with number of Tx threads */ 2529 ugeth->thread_dat_tx_offset = 2530 qe_muram_alloc(numThreadsTxNumerical * 2531 sizeof(struct ucc_geth_thread_data_tx) + 2532 32 * (numThreadsTxNumerical == 1), 2533 UCC_GETH_THREAD_DATA_ALIGNMENT); 2534 if (IS_ERR_VALUE(ugeth->thread_dat_tx_offset)) { 2535 if (netif_msg_ifup(ugeth)) 2536 ugeth_err 2537 ("%s: Can not allocate DPRAM memory for p_thread_data_tx.", 2538 __func__); 2539 return -ENOMEM; 2540 } 2541 2542 ugeth->p_thread_data_tx = 2543 (struct ucc_geth_thread_data_tx __iomem *) qe_muram_addr(ugeth-> 2544 thread_dat_tx_offset); 2545 out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset); 2546 2547 /* vtagtable */ 2548 for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++) 2549 out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i], 2550 ug_info->vtagtable[i]); 2551 2552 /* iphoffset */ 2553 for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++) 2554 out_8(&ugeth->p_tx_glbl_pram->iphoffset[i], 2555 ug_info->iphoffset[i]); 2556 2557 /* SQPTR */ 2558 /* Size varies with number of Tx queues */ 2559 ugeth->send_q_mem_reg_offset = 2560 qe_muram_alloc(ug_info->numQueuesTx * 2561 sizeof(struct ucc_geth_send_queue_qd), 2562 UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT); 2563 if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) { 2564 if (netif_msg_ifup(ugeth)) 2565 ugeth_err 2566 ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.", 2567 __func__); 2568 return -ENOMEM; 2569 } 2570 2571 ugeth->p_send_q_mem_reg = 2572 (struct ucc_geth_send_queue_mem_region __iomem *) qe_muram_addr(ugeth-> 2573 send_q_mem_reg_offset); 2574 out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset); 2575 2576 /* Setup the table */ 2577 /* Assume BD rings are already established */ 2578 for (i = 0; i < ug_info->numQueuesTx; i++) { 2579 endOfRing = 2580 ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] - 2581 1) * sizeof(struct qe_bd); 2582 if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) { 2583 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base, 2584 (u32) virt_to_phys(ugeth->p_tx_bd_ring[i])); 2585 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i]. 2586 last_bd_completed_address, 2587 (u32) virt_to_phys(endOfRing)); 2588 } else if (ugeth->ug_info->uf_info.bd_mem_part == 2589 MEM_PART_MURAM) { 2590 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base, 2591 (u32) immrbar_virt_to_phys(ugeth-> 2592 p_tx_bd_ring[i])); 2593 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i]. 2594 last_bd_completed_address, 2595 (u32) immrbar_virt_to_phys(endOfRing)); 2596 } 2597 } 2598 2599 /* schedulerbasepointer */ 2600 2601 if (ug_info->numQueuesTx > 1) { 2602 /* scheduler exists only if more than 1 tx queue */ 2603 ugeth->scheduler_offset = 2604 qe_muram_alloc(sizeof(struct ucc_geth_scheduler), 2605 UCC_GETH_SCHEDULER_ALIGNMENT); 2606 if (IS_ERR_VALUE(ugeth->scheduler_offset)) { 2607 if (netif_msg_ifup(ugeth)) 2608 ugeth_err 2609 ("%s: Can not allocate DPRAM memory for p_scheduler.", 2610 __func__); 2611 return -ENOMEM; 2612 } 2613 2614 ugeth->p_scheduler = 2615 (struct ucc_geth_scheduler __iomem *) qe_muram_addr(ugeth-> 2616 scheduler_offset); 2617 out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer, 2618 ugeth->scheduler_offset); 2619 /* Zero out p_scheduler */ 2620 memset_io((void __iomem *)ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler)); 2621 2622 /* Set values in scheduler */ 2623 out_be32(&ugeth->p_scheduler->mblinterval, 2624 ug_info->mblinterval); 2625 out_be16(&ugeth->p_scheduler->nortsrbytetime, 2626 ug_info->nortsrbytetime); 2627 out_8(&ugeth->p_scheduler->fracsiz, ug_info->fracsiz); 2628 out_8(&ugeth->p_scheduler->strictpriorityq, 2629 ug_info->strictpriorityq); 2630 out_8(&ugeth->p_scheduler->txasap, ug_info->txasap); 2631 out_8(&ugeth->p_scheduler->extrabw, ug_info->extrabw); 2632 for (i = 0; i < NUM_TX_QUEUES; i++) 2633 out_8(&ugeth->p_scheduler->weightfactor[i], 2634 ug_info->weightfactor[i]); 2635 2636 /* Set pointers to cpucount registers in scheduler */ 2637 ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0); 2638 ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1); 2639 ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2); 2640 ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3); 2641 ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4); 2642 ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5); 2643 ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6); 2644 ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7); 2645 } 2646 2647 /* schedulerbasepointer */ 2648 /* TxRMON_PTR (statistics) */ 2649 if (ug_info-> 2650 statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) { 2651 ugeth->tx_fw_statistics_pram_offset = 2652 qe_muram_alloc(sizeof 2653 (struct ucc_geth_tx_firmware_statistics_pram), 2654 UCC_GETH_TX_STATISTICS_ALIGNMENT); 2655 if (IS_ERR_VALUE(ugeth->tx_fw_statistics_pram_offset)) { 2656 if (netif_msg_ifup(ugeth)) 2657 ugeth_err 2658 ("%s: Can not allocate DPRAM memory for" 2659 " p_tx_fw_statistics_pram.", 2660 __func__); 2661 return -ENOMEM; 2662 } 2663 ugeth->p_tx_fw_statistics_pram = 2664 (struct ucc_geth_tx_firmware_statistics_pram __iomem *) 2665 qe_muram_addr(ugeth->tx_fw_statistics_pram_offset); 2666 /* Zero out p_tx_fw_statistics_pram */ 2667 memset_io((void __iomem *)ugeth->p_tx_fw_statistics_pram, 2668 0, sizeof(struct ucc_geth_tx_firmware_statistics_pram)); 2669 } 2670 2671 /* temoder */ 2672 /* Already has speed set */ 2673 2674 if (ug_info->numQueuesTx > 1) 2675 temoder |= TEMODER_SCHEDULER_ENABLE; 2676 if (ug_info->ipCheckSumGenerate) 2677 temoder |= TEMODER_IP_CHECKSUM_GENERATE; 2678 temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT); 2679 out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder); 2680 2681 test = in_be16(&ugeth->p_tx_glbl_pram->temoder); 2682 2683 /* Function code register value to be used later */ 2684 function_code = UCC_BMR_BO_BE | UCC_BMR_GBL; 2685 /* Required for QE */ 2686 2687 /* function code register */ 2688 out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24); 2689 2690 /* Rx global PRAM */ 2691 /* Allocate global rx parameter RAM page */ 2692 ugeth->rx_glbl_pram_offset = 2693 qe_muram_alloc(sizeof(struct ucc_geth_rx_global_pram), 2694 UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT); 2695 if (IS_ERR_VALUE(ugeth->rx_glbl_pram_offset)) { 2696 if (netif_msg_ifup(ugeth)) 2697 ugeth_err 2698 ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.", 2699 __func__); 2700 return -ENOMEM; 2701 } 2702 ugeth->p_rx_glbl_pram = 2703 (struct ucc_geth_rx_global_pram __iomem *) qe_muram_addr(ugeth-> 2704 rx_glbl_pram_offset); 2705 /* Zero out p_rx_glbl_pram */ 2706 memset_io((void __iomem *)ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram)); 2707 2708 /* Fill global PRAM */ 2709 2710 /* RQPTR */ 2711 /* Size varies with number of Rx threads */ 2712 ugeth->thread_dat_rx_offset = 2713 qe_muram_alloc(numThreadsRxNumerical * 2714 sizeof(struct ucc_geth_thread_data_rx), 2715 UCC_GETH_THREAD_DATA_ALIGNMENT); 2716 if (IS_ERR_VALUE(ugeth->thread_dat_rx_offset)) { 2717 if (netif_msg_ifup(ugeth)) 2718 ugeth_err 2719 ("%s: Can not allocate DPRAM memory for p_thread_data_rx.", 2720 __func__); 2721 return -ENOMEM; 2722 } 2723 2724 ugeth->p_thread_data_rx = 2725 (struct ucc_geth_thread_data_rx __iomem *) qe_muram_addr(ugeth-> 2726 thread_dat_rx_offset); 2727 out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset); 2728 2729 /* typeorlen */ 2730 out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen); 2731 2732 /* rxrmonbaseptr (statistics) */ 2733 if (ug_info-> 2734 statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) { 2735 ugeth->rx_fw_statistics_pram_offset = 2736 qe_muram_alloc(sizeof 2737 (struct ucc_geth_rx_firmware_statistics_pram), 2738 UCC_GETH_RX_STATISTICS_ALIGNMENT); 2739 if (IS_ERR_VALUE(ugeth->rx_fw_statistics_pram_offset)) { 2740 if (netif_msg_ifup(ugeth)) 2741 ugeth_err 2742 ("%s: Can not allocate DPRAM memory for" 2743 " p_rx_fw_statistics_pram.", __func__); 2744 return -ENOMEM; 2745 } 2746 ugeth->p_rx_fw_statistics_pram = 2747 (struct ucc_geth_rx_firmware_statistics_pram __iomem *) 2748 qe_muram_addr(ugeth->rx_fw_statistics_pram_offset); 2749 /* Zero out p_rx_fw_statistics_pram */ 2750 memset_io((void __iomem *)ugeth->p_rx_fw_statistics_pram, 0, 2751 sizeof(struct ucc_geth_rx_firmware_statistics_pram)); 2752 } 2753 2754 /* intCoalescingPtr */ 2755 2756 /* Size varies with number of Rx queues */ 2757 ugeth->rx_irq_coalescing_tbl_offset = 2758 qe_muram_alloc(ug_info->numQueuesRx * 2759 sizeof(struct ucc_geth_rx_interrupt_coalescing_entry) 2760 + 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT); 2761 if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) { 2762 if (netif_msg_ifup(ugeth)) 2763 ugeth_err 2764 ("%s: Can not allocate DPRAM memory for" 2765 " p_rx_irq_coalescing_tbl.", __func__); 2766 return -ENOMEM; 2767 } 2768 2769 ugeth->p_rx_irq_coalescing_tbl = 2770 (struct ucc_geth_rx_interrupt_coalescing_table __iomem *) 2771 qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset); 2772 out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr, 2773 ugeth->rx_irq_coalescing_tbl_offset); 2774 2775 /* Fill interrupt coalescing table */ 2776 for (i = 0; i < ug_info->numQueuesRx; i++) { 2777 out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i]. 2778 interruptcoalescingmaxvalue, 2779 ug_info->interruptcoalescingmaxvalue[i]); 2780 out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i]. 2781 interruptcoalescingcounter, 2782 ug_info->interruptcoalescingmaxvalue[i]); 2783 } 2784 2785 /* MRBLR */ 2786 init_max_rx_buff_len(uf_info->max_rx_buf_length, 2787 &ugeth->p_rx_glbl_pram->mrblr); 2788 /* MFLR */ 2789 out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength); 2790 /* MINFLR */ 2791 init_min_frame_len(ug_info->minFrameLength, 2792 &ugeth->p_rx_glbl_pram->minflr, 2793 &ugeth->p_rx_glbl_pram->mrblr); 2794 /* MAXD1 */ 2795 out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length); 2796 /* MAXD2 */ 2797 out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length); 2798 2799 /* l2qt */ 2800 l2qt = 0; 2801 for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) 2802 l2qt |= (ug_info->l2qt[i] << (28 - 4 * i)); 2803 out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt); 2804 2805 /* l3qt */ 2806 for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) { 2807 l3qt = 0; 2808 for (i = 0; i < 8; i++) 2809 l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i)); 2810 out_be32(&ugeth->p_rx_glbl_pram->l3qt[j/8], l3qt); 2811 } 2812 2813 /* vlantype */ 2814 out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype); 2815 2816 /* vlantci */ 2817 out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci); 2818 2819 /* ecamptr */ 2820 out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr); 2821 2822 /* RBDQPTR */ 2823 /* Size varies with number of Rx queues */ 2824 ugeth->rx_bd_qs_tbl_offset = 2825 qe_muram_alloc(ug_info->numQueuesRx * 2826 (sizeof(struct ucc_geth_rx_bd_queues_entry) + 2827 sizeof(struct ucc_geth_rx_prefetched_bds)), 2828 UCC_GETH_RX_BD_QUEUES_ALIGNMENT); 2829 if (IS_ERR_VALUE(ugeth->rx_bd_qs_tbl_offset)) { 2830 if (netif_msg_ifup(ugeth)) 2831 ugeth_err 2832 ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.", 2833 __func__); 2834 return -ENOMEM; 2835 } 2836 2837 ugeth->p_rx_bd_qs_tbl = 2838 (struct ucc_geth_rx_bd_queues_entry __iomem *) qe_muram_addr(ugeth-> 2839 rx_bd_qs_tbl_offset); 2840 out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset); 2841 /* Zero out p_rx_bd_qs_tbl */ 2842 memset_io((void __iomem *)ugeth->p_rx_bd_qs_tbl, 2843 0, 2844 ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) + 2845 sizeof(struct ucc_geth_rx_prefetched_bds))); 2846 2847 /* Setup the table */ 2848 /* Assume BD rings are already established */ 2849 for (i = 0; i < ug_info->numQueuesRx; i++) { 2850 if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) { 2851 out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, 2852 (u32) virt_to_phys(ugeth->p_rx_bd_ring[i])); 2853 } else if (ugeth->ug_info->uf_info.bd_mem_part == 2854 MEM_PART_MURAM) { 2855 out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, 2856 (u32) immrbar_virt_to_phys(ugeth-> 2857 p_rx_bd_ring[i])); 2858 } 2859 /* rest of fields handled by QE */ 2860 } 2861 2862 /* remoder */ 2863 /* Already has speed set */ 2864 2865 if (ugeth->rx_extended_features) 2866 remoder |= REMODER_RX_EXTENDED_FEATURES; 2867 if (ug_info->rxExtendedFiltering) 2868 remoder |= REMODER_RX_EXTENDED_FILTERING; 2869 if (ug_info->dynamicMaxFrameLength) 2870 remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH; 2871 if (ug_info->dynamicMinFrameLength) 2872 remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH; 2873 remoder |= 2874 ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT; 2875 remoder |= 2876 ug_info-> 2877 vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT; 2878 remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT; 2879 remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT); 2880 if (ug_info->ipCheckSumCheck) 2881 remoder |= REMODER_IP_CHECKSUM_CHECK; 2882 if (ug_info->ipAddressAlignment) 2883 remoder |= REMODER_IP_ADDRESS_ALIGNMENT; 2884 out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder); 2885 2886 /* Note that this function must be called */ 2887 /* ONLY AFTER p_tx_fw_statistics_pram */ 2888 /* andp_UccGethRxFirmwareStatisticsPram are allocated ! */ 2889 init_firmware_statistics_gathering_mode((ug_info-> 2890 statisticsMode & 2891 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX), 2892 (ug_info->statisticsMode & 2893 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX), 2894 &ugeth->p_tx_glbl_pram->txrmonbaseptr, 2895 ugeth->tx_fw_statistics_pram_offset, 2896 &ugeth->p_rx_glbl_pram->rxrmonbaseptr, 2897 ugeth->rx_fw_statistics_pram_offset, 2898 &ugeth->p_tx_glbl_pram->temoder, 2899 &ugeth->p_rx_glbl_pram->remoder); 2900 2901 /* function code register */ 2902 out_8(&ugeth->p_rx_glbl_pram->rstate, function_code); 2903 2904 /* initialize extended filtering */ 2905 if (ug_info->rxExtendedFiltering) { 2906 if (!ug_info->extendedFilteringChainPointer) { 2907 if (netif_msg_ifup(ugeth)) 2908 ugeth_err("%s: Null Extended Filtering Chain Pointer.", 2909 __func__); 2910 return -EINVAL; 2911 } 2912 2913 /* Allocate memory for extended filtering Mode Global 2914 Parameters */ 2915 ugeth->exf_glbl_param_offset = 2916 qe_muram_alloc(sizeof(struct ucc_geth_exf_global_pram), 2917 UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT); 2918 if (IS_ERR_VALUE(ugeth->exf_glbl_param_offset)) { 2919 if (netif_msg_ifup(ugeth)) 2920 ugeth_err 2921 ("%s: Can not allocate DPRAM memory for" 2922 " p_exf_glbl_param.", __func__); 2923 return -ENOMEM; 2924 } 2925 2926 ugeth->p_exf_glbl_param = 2927 (struct ucc_geth_exf_global_pram __iomem *) qe_muram_addr(ugeth-> 2928 exf_glbl_param_offset); 2929 out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam, 2930 ugeth->exf_glbl_param_offset); 2931 out_be32(&ugeth->p_exf_glbl_param->l2pcdptr, 2932 (u32) ug_info->extendedFilteringChainPointer); 2933 2934 } else { /* initialize 82xx style address filtering */ 2935 2936 /* Init individual address recognition registers to disabled */ 2937 2938 for (j = 0; j < NUM_OF_PADDRS; j++) 2939 ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j); 2940 2941 p_82xx_addr_filt = 2942 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth-> 2943 p_rx_glbl_pram->addressfiltering; 2944 2945 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, 2946 ENET_ADDR_TYPE_GROUP); 2947 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, 2948 ENET_ADDR_TYPE_INDIVIDUAL); 2949 } 2950 2951 /* 2952 * Initialize UCC at QE level 2953 */ 2954 2955 command = QE_INIT_TX_RX; 2956 2957 /* Allocate shadow InitEnet command parameter structure. 2958 * This is needed because after the InitEnet command is executed, 2959 * the structure in DPRAM is released, because DPRAM is a premium 2960 * resource. 2961 * This shadow structure keeps a copy of what was done so that the 2962 * allocated resources can be released when the channel is freed. 2963 */ 2964 if (!(ugeth->p_init_enet_param_shadow = 2965 kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) { 2966 if (netif_msg_ifup(ugeth)) 2967 ugeth_err 2968 ("%s: Can not allocate memory for" 2969 " p_UccInitEnetParamShadows.", __func__); 2970 return -ENOMEM; 2971 } 2972 /* Zero out *p_init_enet_param_shadow */ 2973 memset((char *)ugeth->p_init_enet_param_shadow, 2974 0, sizeof(struct ucc_geth_init_pram)); 2975 2976 /* Fill shadow InitEnet command parameter structure */ 2977 2978 ugeth->p_init_enet_param_shadow->resinit1 = 2979 ENET_INIT_PARAM_MAGIC_RES_INIT1; 2980 ugeth->p_init_enet_param_shadow->resinit2 = 2981 ENET_INIT_PARAM_MAGIC_RES_INIT2; 2982 ugeth->p_init_enet_param_shadow->resinit3 = 2983 ENET_INIT_PARAM_MAGIC_RES_INIT3; 2984 ugeth->p_init_enet_param_shadow->resinit4 = 2985 ENET_INIT_PARAM_MAGIC_RES_INIT4; 2986 ugeth->p_init_enet_param_shadow->resinit5 = 2987 ENET_INIT_PARAM_MAGIC_RES_INIT5; 2988 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= 2989 ((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT; 2990 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= 2991 ((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT; 2992 2993 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= 2994 ugeth->rx_glbl_pram_offset | ug_info->riscRx; 2995 if ((ug_info->largestexternallookupkeysize != 2996 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE) && 2997 (ug_info->largestexternallookupkeysize != 2998 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES) && 2999 (ug_info->largestexternallookupkeysize != 3000 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) { 3001 if (netif_msg_ifup(ugeth)) 3002 ugeth_err("%s: Invalid largest External Lookup Key Size.", 3003 __func__); 3004 return -EINVAL; 3005 } 3006 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize = 3007 ug_info->largestexternallookupkeysize; 3008 size = sizeof(struct ucc_geth_thread_rx_pram); 3009 if (ug_info->rxExtendedFiltering) { 3010 size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; 3011 if (ug_info->largestexternallookupkeysize == 3012 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES) 3013 size += 3014 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; 3015 if (ug_info->largestexternallookupkeysize == 3016 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES) 3017 size += 3018 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; 3019 } 3020 3021 if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth-> 3022 p_init_enet_param_shadow->rxthread[0]), 3023 (u8) (numThreadsRxNumerical + 1) 3024 /* Rx needs one extra for terminator */ 3025 , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT, 3026 ug_info->riscRx, 1)) != 0) { 3027 if (netif_msg_ifup(ugeth)) 3028 ugeth_err("%s: Can not fill p_init_enet_param_shadow.", 3029 __func__); 3030 return ret_val; 3031 } 3032 3033 ugeth->p_init_enet_param_shadow->txglobal = 3034 ugeth->tx_glbl_pram_offset | ug_info->riscTx; 3035 if ((ret_val = 3036 fill_init_enet_entries(ugeth, 3037 &(ugeth->p_init_enet_param_shadow-> 3038 txthread[0]), numThreadsTxNumerical, 3039 sizeof(struct ucc_geth_thread_tx_pram), 3040 UCC_GETH_THREAD_TX_PRAM_ALIGNMENT, 3041 ug_info->riscTx, 0)) != 0) { 3042 if (netif_msg_ifup(ugeth)) 3043 ugeth_err("%s: Can not fill p_init_enet_param_shadow.", 3044 __func__); 3045 return ret_val; 3046 } 3047 3048 /* Load Rx bds with buffers */ 3049 for (i = 0; i < ug_info->numQueuesRx; i++) { 3050 if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) { 3051 if (netif_msg_ifup(ugeth)) 3052 ugeth_err("%s: Can not fill Rx bds with buffers.", 3053 __func__); 3054 return ret_val; 3055 } 3056 } 3057 3058 /* Allocate InitEnet command parameter structure */ 3059 init_enet_pram_offset = qe_muram_alloc(sizeof(struct ucc_geth_init_pram), 4); 3060 if (IS_ERR_VALUE(init_enet_pram_offset)) { 3061 if (netif_msg_ifup(ugeth)) 3062 ugeth_err 3063 ("%s: Can not allocate DPRAM memory for p_init_enet_pram.", 3064 __func__); 3065 return -ENOMEM; 3066 } 3067 p_init_enet_pram = 3068 (struct ucc_geth_init_pram __iomem *) qe_muram_addr(init_enet_pram_offset); 3069 3070 /* Copy shadow InitEnet command parameter structure into PRAM */ 3071 out_8(&p_init_enet_pram->resinit1, 3072 ugeth->p_init_enet_param_shadow->resinit1); 3073 out_8(&p_init_enet_pram->resinit2, 3074 ugeth->p_init_enet_param_shadow->resinit2); 3075 out_8(&p_init_enet_pram->resinit3, 3076 ugeth->p_init_enet_param_shadow->resinit3); 3077 out_8(&p_init_enet_pram->resinit4, 3078 ugeth->p_init_enet_param_shadow->resinit4); 3079 out_be16(&p_init_enet_pram->resinit5, 3080 ugeth->p_init_enet_param_shadow->resinit5); 3081 out_8(&p_init_enet_pram->largestexternallookupkeysize, 3082 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize); 3083 out_be32(&p_init_enet_pram->rgftgfrxglobal, 3084 ugeth->p_init_enet_param_shadow->rgftgfrxglobal); 3085 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++) 3086 out_be32(&p_init_enet_pram->rxthread[i], 3087 ugeth->p_init_enet_param_shadow->rxthread[i]); 3088 out_be32(&p_init_enet_pram->txglobal, 3089 ugeth->p_init_enet_param_shadow->txglobal); 3090 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++) 3091 out_be32(&p_init_enet_pram->txthread[i], 3092 ugeth->p_init_enet_param_shadow->txthread[i]); 3093 3094 /* Issue QE command */ 3095 cecr_subblock = 3096 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); 3097 qe_issue_cmd(command, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 3098 init_enet_pram_offset); 3099 3100 /* Free InitEnet command parameter */ 3101 qe_muram_free(init_enet_pram_offset); 3102 3103 return 0; 3104} 3105 3106/* This is called by the kernel when a frame is ready for transmission. */ 3107/* It is pointed to by the dev->hard_start_xmit function pointer */ 3108static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) 3109{ 3110 struct ucc_geth_private *ugeth = netdev_priv(dev); 3111#ifdef CONFIG_UGETH_TX_ON_DEMAND 3112 struct ucc_fast_private *uccf; 3113#endif 3114 u8 __iomem *bd; /* BD pointer */ 3115 u32 bd_status; 3116 u8 txQ = 0; 3117 unsigned long flags; 3118 3119 ugeth_vdbg("%s: IN", __func__); 3120 3121 spin_lock_irqsave(&ugeth->lock, flags); 3122 3123 dev->stats.tx_bytes += skb->len; 3124 3125 /* Start from the next BD that should be filled */ 3126 bd = ugeth->txBd[txQ]; 3127 bd_status = in_be32((u32 __iomem *)bd); 3128 /* Save the skb pointer so we can free it later */ 3129 ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb; 3130 3131 /* Update the current skb pointer (wrapping if this was the last) */ 3132 ugeth->skb_curtx[txQ] = 3133 (ugeth->skb_curtx[txQ] + 3134 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); 3135 3136 /* set up the buffer descriptor */ 3137 out_be32(&((struct qe_bd __iomem *)bd)->buf, 3138 dma_map_single(ugeth->dev, skb->data, 3139 skb->len, DMA_TO_DEVICE)); 3140 3141 /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */ 3142 3143 bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len; 3144 3145 /* set bd status and length */ 3146 out_be32((u32 __iomem *)bd, bd_status); 3147 3148 dev->trans_start = jiffies; 3149 3150 /* Move to next BD in the ring */ 3151 if (!(bd_status & T_W)) 3152 bd += sizeof(struct qe_bd); 3153 else 3154 bd = ugeth->p_tx_bd_ring[txQ]; 3155 3156 /* If the next BD still needs to be cleaned up, then the bds 3157 are full. We need to tell the kernel to stop sending us stuff. */ 3158 if (bd == ugeth->confBd[txQ]) { 3159 if (!netif_queue_stopped(dev)) 3160 netif_stop_queue(dev); 3161 } 3162 3163 ugeth->txBd[txQ] = bd; 3164 3165 if (ugeth->p_scheduler) { 3166 ugeth->cpucount[txQ]++; 3167 /* Indicate to QE that there are more Tx bds ready for 3168 transmission */ 3169 /* This is done by writing a running counter of the bd 3170 count to the scheduler PRAM. */ 3171 out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]); 3172 } 3173 3174#ifdef CONFIG_UGETH_TX_ON_DEMAND 3175 uccf = ugeth->uccf; 3176 out_be16(uccf->p_utodr, UCC_FAST_TOD); 3177#endif 3178 spin_unlock_irqrestore(&ugeth->lock, flags); 3179 3180 return NETDEV_TX_OK; 3181} 3182 3183static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit) 3184{ 3185 struct sk_buff *skb; 3186 u8 __iomem *bd; 3187 u16 length, howmany = 0; 3188 u32 bd_status; 3189 u8 *bdBuffer; 3190 struct net_device *dev; 3191 3192 ugeth_vdbg("%s: IN", __func__); 3193 3194 dev = ugeth->ndev; 3195 3196 /* collect received buffers */ 3197 bd = ugeth->rxBd[rxQ]; 3198 3199 bd_status = in_be32((u32 __iomem *)bd); 3200 3201 /* while there are received buffers and BD is full (~R_E) */ 3202 while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) { 3203 bdBuffer = (u8 *) in_be32(&((struct qe_bd __iomem *)bd)->buf); 3204 length = (u16) ((bd_status & BD_LENGTH_MASK) - 4); 3205 skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]]; 3206 3207 /* determine whether buffer is first, last, first and last 3208 (single buffer frame) or middle (not first and not last) */ 3209 if (!skb || 3210 (!(bd_status & (R_F | R_L))) || 3211 (bd_status & R_ERRORS_FATAL)) { 3212 if (netif_msg_rx_err(ugeth)) 3213 ugeth_err("%s, %d: ERROR!!! skb - 0x%08x", 3214 __func__, __LINE__, (u32) skb); 3215 if (skb) { 3216 skb->data = skb->head + NET_SKB_PAD; 3217 __skb_queue_head(&ugeth->rx_recycle, skb); 3218 } 3219 3220 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL; 3221 dev->stats.rx_dropped++; 3222 } else { 3223 dev->stats.rx_packets++; 3224 howmany++; 3225 3226 /* Prep the skb for the packet */ 3227 skb_put(skb, length); 3228 3229 /* Tell the skb what kind of packet this is */ 3230 skb->protocol = eth_type_trans(skb, ugeth->ndev); 3231 3232 dev->stats.rx_bytes += length; 3233 /* Send the packet up the stack */ 3234 netif_receive_skb(skb); 3235 } 3236 3237 skb = get_new_skb(ugeth, bd); 3238 if (!skb) { 3239 if (netif_msg_rx_err(ugeth)) 3240 ugeth_warn("%s: No Rx Data Buffer", __func__); 3241 dev->stats.rx_dropped++; 3242 break; 3243 } 3244 3245 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb; 3246 3247 /* update to point at the next skb */ 3248 ugeth->skb_currx[rxQ] = 3249 (ugeth->skb_currx[rxQ] + 3250 1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]); 3251 3252 if (bd_status & R_W) 3253 bd = ugeth->p_rx_bd_ring[rxQ]; 3254 else 3255 bd += sizeof(struct qe_bd); 3256 3257 bd_status = in_be32((u32 __iomem *)bd); 3258 } 3259 3260 ugeth->rxBd[rxQ] = bd; 3261 return howmany; 3262} 3263 3264static int ucc_geth_tx(struct net_device *dev, u8 txQ) 3265{ 3266 /* Start from the next BD that should be filled */ 3267 struct ucc_geth_private *ugeth = netdev_priv(dev); 3268 u8 __iomem *bd; /* BD pointer */ 3269 u32 bd_status; 3270 3271 bd = ugeth->confBd[txQ]; 3272 bd_status = in_be32((u32 __iomem *)bd); 3273 3274 /* Normal processing. */ 3275 while ((bd_status & T_R) == 0) { 3276 struct sk_buff *skb; 3277 3278 /* BD contains already transmitted buffer. */ 3279 /* Handle the transmitted buffer and release */ 3280 /* the BD to be used with the current frame */ 3281 3282 skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]; 3283 if (!skb) 3284 break; 3285 3286 dev->stats.tx_packets++; 3287 3288 if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN && 3289 skb_recycle_check(skb, 3290 ugeth->ug_info->uf_info.max_rx_buf_length + 3291 UCC_GETH_RX_DATA_BUF_ALIGNMENT)) 3292 __skb_queue_head(&ugeth->rx_recycle, skb); 3293 else 3294 dev_kfree_skb(skb); 3295 3296 ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL; 3297 ugeth->skb_dirtytx[txQ] = 3298 (ugeth->skb_dirtytx[txQ] + 3299 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); 3300 3301 /* We freed a buffer, so now we can restart transmission */ 3302 if (netif_queue_stopped(dev)) 3303 netif_wake_queue(dev); 3304 3305 /* Advance the confirmation BD pointer */ 3306 if (!(bd_status & T_W)) 3307 bd += sizeof(struct qe_bd); 3308 else 3309 bd = ugeth->p_tx_bd_ring[txQ]; 3310 bd_status = in_be32((u32 __iomem *)bd); 3311 } 3312 ugeth->confBd[txQ] = bd; 3313 return 0; 3314} 3315 3316static int ucc_geth_poll(struct napi_struct *napi, int budget) 3317{ 3318 struct ucc_geth_private *ugeth = container_of(napi, struct ucc_geth_private, napi); 3319 struct ucc_geth_info *ug_info; 3320 int howmany, i; 3321 3322 ug_info = ugeth->ug_info; 3323 3324 /* Tx event processing */ 3325 spin_lock(&ugeth->lock); 3326 for (i = 0; i < ug_info->numQueuesTx; i++) 3327 ucc_geth_tx(ugeth->ndev, i); 3328 spin_unlock(&ugeth->lock); 3329 3330 howmany = 0; 3331 for (i = 0; i < ug_info->numQueuesRx; i++) 3332 howmany += ucc_geth_rx(ugeth, i, budget - howmany); 3333 3334 if (howmany < budget) { 3335 napi_complete(napi); 3336 setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS | UCCE_TX_EVENTS); 3337 } 3338 3339 return howmany; 3340} 3341 3342static irqreturn_t ucc_geth_irq_handler(int irq, void *info) 3343{ 3344 struct net_device *dev = info; 3345 struct ucc_geth_private *ugeth = netdev_priv(dev); 3346 struct ucc_fast_private *uccf; 3347 struct ucc_geth_info *ug_info; 3348 register u32 ucce; 3349 register u32 uccm; 3350 3351 ugeth_vdbg("%s: IN", __func__); 3352 3353 uccf = ugeth->uccf; 3354 ug_info = ugeth->ug_info; 3355 3356 /* read and clear events */ 3357 ucce = (u32) in_be32(uccf->p_ucce); 3358 uccm = (u32) in_be32(uccf->p_uccm); 3359 ucce &= uccm; 3360 out_be32(uccf->p_ucce, ucce); 3361 3362 /* check for receive events that require processing */ 3363 if (ucce & (UCCE_RX_EVENTS | UCCE_TX_EVENTS)) { 3364 if (napi_schedule_prep(&ugeth->napi)) { 3365 uccm &= ~(UCCE_RX_EVENTS | UCCE_TX_EVENTS); 3366 out_be32(uccf->p_uccm, uccm); 3367 __napi_schedule(&ugeth->napi); 3368 } 3369 } 3370 3371 /* Errors and other events */ 3372 if (ucce & UCCE_OTHER) { 3373 if (ucce & UCC_GETH_UCCE_BSY) 3374 dev->stats.rx_errors++; 3375 if (ucce & UCC_GETH_UCCE_TXE) 3376 dev->stats.tx_errors++; 3377 } 3378 3379 return IRQ_HANDLED; 3380} 3381 3382#ifdef CONFIG_NET_POLL_CONTROLLER 3383/* 3384 * Polling 'interrupt' - used by things like netconsole to send skbs 3385 * without having to re-enable interrupts. It's not called while 3386 * the interrupt routine is executing. 3387 */ 3388static void ucc_netpoll(struct net_device *dev) 3389{ 3390 struct ucc_geth_private *ugeth = netdev_priv(dev); 3391 int irq = ugeth->ug_info->uf_info.irq; 3392 3393 disable_irq(irq); 3394 ucc_geth_irq_handler(irq, dev); 3395 enable_irq(irq); 3396} 3397#endif /* CONFIG_NET_POLL_CONTROLLER */ 3398 3399static int ucc_geth_set_mac_addr(struct net_device *dev, void *p) 3400{ 3401 struct ucc_geth_private *ugeth = netdev_priv(dev); 3402 struct sockaddr *addr = p; 3403 3404 if (!is_valid_ether_addr(addr->sa_data)) 3405 return -EADDRNOTAVAIL; 3406 3407 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 3408 3409 /* 3410 * If device is not running, we will set mac addr register 3411 * when opening the device. 3412 */ 3413 if (!netif_running(dev)) 3414 return 0; 3415 3416 spin_lock_irq(&ugeth->lock); 3417 init_mac_station_addr_regs(dev->dev_addr[0], 3418 dev->dev_addr[1], 3419 dev->dev_addr[2], 3420 dev->dev_addr[3], 3421 dev->dev_addr[4], 3422 dev->dev_addr[5], 3423 &ugeth->ug_regs->macstnaddr1, 3424 &ugeth->ug_regs->macstnaddr2); 3425 spin_unlock_irq(&ugeth->lock); 3426 3427 return 0; 3428} 3429 3430static int ucc_geth_init_mac(struct ucc_geth_private *ugeth) 3431{ 3432 struct net_device *dev = ugeth->ndev; 3433 int err; 3434 3435 err = ucc_struct_init(ugeth); 3436 if (err) { 3437 if (netif_msg_ifup(ugeth)) 3438 ugeth_err("%s: Cannot configure internal struct, " 3439 "aborting.", dev->name); 3440 goto err; 3441 } 3442 3443 err = ucc_geth_startup(ugeth); 3444 if (err) { 3445 if (netif_msg_ifup(ugeth)) 3446 ugeth_err("%s: Cannot configure net device, aborting.", 3447 dev->name); 3448 goto err; 3449 } 3450 3451 err = adjust_enet_interface(ugeth); 3452 if (err) { 3453 if (netif_msg_ifup(ugeth)) 3454 ugeth_err("%s: Cannot configure net device, aborting.", 3455 dev->name); 3456 goto err; 3457 } 3458 3459 /* Set MACSTNADDR1, MACSTNADDR2 */ 3460 /* For more details see the hardware spec. */ 3461 init_mac_station_addr_regs(dev->dev_addr[0], 3462 dev->dev_addr[1], 3463 dev->dev_addr[2], 3464 dev->dev_addr[3], 3465 dev->dev_addr[4], 3466 dev->dev_addr[5], 3467 &ugeth->ug_regs->macstnaddr1, 3468 &ugeth->ug_regs->macstnaddr2); 3469 3470 err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); 3471 if (err) { 3472 if (netif_msg_ifup(ugeth)) 3473 ugeth_err("%s: Cannot enable net device, aborting.", dev->name); 3474 goto err; 3475 } 3476 3477 return 0; 3478err: 3479 ucc_geth_stop(ugeth); 3480 return err; 3481} 3482 3483/* Called when something needs to use the ethernet device */ 3484/* Returns 0 for success. */ 3485static int ucc_geth_open(struct net_device *dev) 3486{ 3487 struct ucc_geth_private *ugeth = netdev_priv(dev); 3488 int err; 3489 3490 ugeth_vdbg("%s: IN", __func__); 3491 3492 /* Test station address */ 3493 if (dev->dev_addr[0] & ENET_GROUP_ADDR) { 3494 if (netif_msg_ifup(ugeth)) 3495 ugeth_err("%s: Multicast address used for station " 3496 "address - is this what you wanted?", 3497 __func__); 3498 return -EINVAL; 3499 } 3500 3501 err = init_phy(dev); 3502 if (err) { 3503 if (netif_msg_ifup(ugeth)) 3504 ugeth_err("%s: Cannot initialize PHY, aborting.", 3505 dev->name); 3506 return err; 3507 } 3508 3509 err = ucc_geth_init_mac(ugeth); 3510 if (err) { 3511 if (netif_msg_ifup(ugeth)) 3512 ugeth_err("%s: Cannot initialize MAC, aborting.", 3513 dev->name); 3514 goto err; 3515 } 3516 3517 err = request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler, 3518 0, "UCC Geth", dev); 3519 if (err) { 3520 if (netif_msg_ifup(ugeth)) 3521 ugeth_err("%s: Cannot get IRQ for net device, aborting.", 3522 dev->name); 3523 goto err; 3524 } 3525 3526 phy_start(ugeth->phydev); 3527 napi_enable(&ugeth->napi); 3528 netif_start_queue(dev); 3529 3530 device_set_wakeup_capable(&dev->dev, 3531 qe_alive_during_sleep() || ugeth->phydev->irq); 3532 device_set_wakeup_enable(&dev->dev, ugeth->wol_en); 3533 3534 return err; 3535 3536err: 3537 ucc_geth_stop(ugeth); 3538 return err; 3539} 3540 3541/* Stops the kernel queue, and halts the controller */ 3542static int ucc_geth_close(struct net_device *dev) 3543{ 3544 struct ucc_geth_private *ugeth = netdev_priv(dev); 3545 3546 ugeth_vdbg("%s: IN", __func__); 3547 3548 napi_disable(&ugeth->napi); 3549 3550 ucc_geth_stop(ugeth); 3551 3552 free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev); 3553 3554 netif_stop_queue(dev); 3555 3556 return 0; 3557} 3558 3559/* Reopen device. This will reset the MAC and PHY. */ 3560static void ucc_geth_timeout_work(struct work_struct *work) 3561{ 3562 struct ucc_geth_private *ugeth; 3563 struct net_device *dev; 3564 3565 ugeth = container_of(work, struct ucc_geth_private, timeout_work); 3566 dev = ugeth->ndev; 3567 3568 ugeth_vdbg("%s: IN", __func__); 3569 3570 dev->stats.tx_errors++; 3571 3572 ugeth_dump_regs(ugeth); 3573 3574 if (dev->flags & IFF_UP) { 3575 /* 3576 * Must reset MAC *and* PHY. This is done by reopening 3577 * the device. 3578 */ 3579 ucc_geth_close(dev); 3580 ucc_geth_open(dev); 3581 } 3582 3583 netif_tx_schedule_all(dev); 3584} 3585 3586/* 3587 * ucc_geth_timeout gets called when a packet has not been 3588 * transmitted after a set amount of time. 3589 */ 3590static void ucc_geth_timeout(struct net_device *dev) 3591{ 3592 struct ucc_geth_private *ugeth = netdev_priv(dev); 3593 3594 netif_carrier_off(dev); 3595 schedule_work(&ugeth->timeout_work); 3596} 3597 3598 3599#ifdef CONFIG_PM 3600 3601static int ucc_geth_suspend(struct of_device *ofdev, pm_message_t state) 3602{ 3603 struct net_device *ndev = dev_get_drvdata(&ofdev->dev); 3604 struct ucc_geth_private *ugeth = netdev_priv(ndev); 3605 3606 if (!netif_running(ndev)) 3607 return 0; 3608 3609 netif_device_detach(ndev); 3610 napi_disable(&ugeth->napi); 3611 3612 /* 3613 * Disable the controller, otherwise we'll wakeup on any network 3614 * activity. 3615 */ 3616 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); 3617 3618 if (ugeth->wol_en & WAKE_MAGIC) { 3619 setbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD); 3620 setbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE); 3621 ucc_fast_enable(ugeth->uccf, COMM_DIR_RX_AND_TX); 3622 } else if (!(ugeth->wol_en & WAKE_PHY)) { 3623 phy_stop(ugeth->phydev); 3624 } 3625 3626 return 0; 3627} 3628 3629static int ucc_geth_resume(struct of_device *ofdev) 3630{ 3631 struct net_device *ndev = dev_get_drvdata(&ofdev->dev); 3632 struct ucc_geth_private *ugeth = netdev_priv(ndev); 3633 int err; 3634 3635 if (!netif_running(ndev)) 3636 return 0; 3637 3638 if (qe_alive_during_sleep()) { 3639 if (ugeth->wol_en & WAKE_MAGIC) { 3640 ucc_fast_disable(ugeth->uccf, COMM_DIR_RX_AND_TX); 3641 clrbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE); 3642 clrbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD); 3643 } 3644 ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); 3645 } else { 3646 /* 3647 * Full reinitialization is required if QE shuts down 3648 * during sleep. 3649 */ 3650 ucc_geth_memclean(ugeth); 3651 3652 err = ucc_geth_init_mac(ugeth); 3653 if (err) { 3654 ugeth_err("%s: Cannot initialize MAC, aborting.", 3655 ndev->name); 3656 return err; 3657 } 3658 } 3659 3660 ugeth->oldlink = 0; 3661 ugeth->oldspeed = 0; 3662 ugeth->oldduplex = -1; 3663 3664 phy_stop(ugeth->phydev); 3665 phy_start(ugeth->phydev); 3666 3667 napi_enable(&ugeth->napi); 3668 netif_device_attach(ndev); 3669 3670 return 0; 3671} 3672 3673#else 3674#define ucc_geth_suspend NULL 3675#define ucc_geth_resume NULL 3676#endif 3677 3678static phy_interface_t to_phy_interface(const char *phy_connection_type) 3679{ 3680 if (strcasecmp(phy_connection_type, "mii") == 0) 3681 return PHY_INTERFACE_MODE_MII; 3682 if (strcasecmp(phy_connection_type, "gmii") == 0) 3683 return PHY_INTERFACE_MODE_GMII; 3684 if (strcasecmp(phy_connection_type, "tbi") == 0) 3685 return PHY_INTERFACE_MODE_TBI; 3686 if (strcasecmp(phy_connection_type, "rmii") == 0) 3687 return PHY_INTERFACE_MODE_RMII; 3688 if (strcasecmp(phy_connection_type, "rgmii") == 0) 3689 return PHY_INTERFACE_MODE_RGMII; 3690 if (strcasecmp(phy_connection_type, "rgmii-id") == 0) 3691 return PHY_INTERFACE_MODE_RGMII_ID; 3692 if (strcasecmp(phy_connection_type, "rgmii-txid") == 0) 3693 return PHY_INTERFACE_MODE_RGMII_TXID; 3694 if (strcasecmp(phy_connection_type, "rgmii-rxid") == 0) 3695 return PHY_INTERFACE_MODE_RGMII_RXID; 3696 if (strcasecmp(phy_connection_type, "rtbi") == 0) 3697 return PHY_INTERFACE_MODE_RTBI; 3698 if (strcasecmp(phy_connection_type, "sgmii") == 0) 3699 return PHY_INTERFACE_MODE_SGMII; 3700 3701 return PHY_INTERFACE_MODE_MII; 3702} 3703 3704static const struct net_device_ops ucc_geth_netdev_ops = { 3705 .ndo_open = ucc_geth_open, 3706 .ndo_stop = ucc_geth_close, 3707 .ndo_start_xmit = ucc_geth_start_xmit, 3708 .ndo_validate_addr = eth_validate_addr, 3709 .ndo_set_mac_address = ucc_geth_set_mac_addr, 3710 .ndo_change_mtu = eth_change_mtu, 3711 .ndo_set_multicast_list = ucc_geth_set_multi, 3712 .ndo_tx_timeout = ucc_geth_timeout, 3713#ifdef CONFIG_NET_POLL_CONTROLLER 3714 .ndo_poll_controller = ucc_netpoll, 3715#endif 3716}; 3717 3718static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *match) 3719{ 3720 struct device *device = &ofdev->dev; 3721 struct device_node *np = ofdev->node; 3722 struct net_device *dev = NULL; 3723 struct ucc_geth_private *ugeth = NULL; 3724 struct ucc_geth_info *ug_info; 3725 struct resource res; 3726 int err, ucc_num, max_speed = 0; 3727 const unsigned int *prop; 3728 const char *sprop; 3729 const void *mac_addr; 3730 phy_interface_t phy_interface; 3731 static const int enet_to_speed[] = { 3732 SPEED_10, SPEED_10, SPEED_10, 3733 SPEED_100, SPEED_100, SPEED_100, 3734 SPEED_1000, SPEED_1000, SPEED_1000, SPEED_1000, 3735 }; 3736 static const phy_interface_t enet_to_phy_interface[] = { 3737 PHY_INTERFACE_MODE_MII, PHY_INTERFACE_MODE_RMII, 3738 PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_MII, 3739 PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII, 3740 PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_RGMII, 3741 PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI, 3742 PHY_INTERFACE_MODE_SGMII, 3743 }; 3744 3745 ugeth_vdbg("%s: IN", __func__); 3746 3747 prop = of_get_property(np, "cell-index", NULL); 3748 if (!prop) { 3749 prop = of_get_property(np, "device-id", NULL); 3750 if (!prop) 3751 return -ENODEV; 3752 } 3753 3754 ucc_num = *prop - 1; 3755 if ((ucc_num < 0) || (ucc_num > 7)) 3756 return -ENODEV; 3757 3758 ug_info = &ugeth_info[ucc_num]; 3759 if (ug_info == NULL) { 3760 if (netif_msg_probe(&debug)) 3761 ugeth_err("%s: [%d] Missing additional data!", 3762 __func__, ucc_num); 3763 return -ENODEV; 3764 } 3765 3766 ug_info->uf_info.ucc_num = ucc_num; 3767 3768 sprop = of_get_property(np, "rx-clock-name", NULL); 3769 if (sprop) { 3770 ug_info->uf_info.rx_clock = qe_clock_source(sprop); 3771 if ((ug_info->uf_info.rx_clock < QE_CLK_NONE) || 3772 (ug_info->uf_info.rx_clock > QE_CLK24)) { 3773 printk(KERN_ERR 3774 "ucc_geth: invalid rx-clock-name property\n"); 3775 return -EINVAL; 3776 } 3777 } else { 3778 prop = of_get_property(np, "rx-clock", NULL); 3779 if (!prop) { 3780 /* If both rx-clock-name and rx-clock are missing, 3781 we want to tell people to use rx-clock-name. */ 3782 printk(KERN_ERR 3783 "ucc_geth: missing rx-clock-name property\n"); 3784 return -EINVAL; 3785 } 3786 if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) { 3787 printk(KERN_ERR 3788 "ucc_geth: invalid rx-clock propperty\n"); 3789 return -EINVAL; 3790 } 3791 ug_info->uf_info.rx_clock = *prop; 3792 } 3793 3794 sprop = of_get_property(np, "tx-clock-name", NULL); 3795 if (sprop) { 3796 ug_info->uf_info.tx_clock = qe_clock_source(sprop); 3797 if ((ug_info->uf_info.tx_clock < QE_CLK_NONE) || 3798 (ug_info->uf_info.tx_clock > QE_CLK24)) { 3799 printk(KERN_ERR 3800 "ucc_geth: invalid tx-clock-name property\n"); 3801 return -EINVAL; 3802 } 3803 } else { 3804 prop = of_get_property(np, "tx-clock", NULL); 3805 if (!prop) { 3806 printk(KERN_ERR 3807 "ucc_geth: missing tx-clock-name property\n"); 3808 return -EINVAL; 3809 } 3810 if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) { 3811 printk(KERN_ERR 3812 "ucc_geth: invalid tx-clock property\n"); 3813 return -EINVAL; 3814 } 3815 ug_info->uf_info.tx_clock = *prop; 3816 } 3817 3818 err = of_address_to_resource(np, 0, &res); 3819 if (err) 3820 return -EINVAL; 3821 3822 ug_info->uf_info.regs = res.start; 3823 ug_info->uf_info.irq = irq_of_parse_and_map(np, 0); 3824 3825 ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0); 3826 3827 /* Find the TBI PHY node. If it's not there, we don't support SGMII */ 3828 ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0); 3829 3830 /* get the phy interface type, or default to MII */ 3831 prop = of_get_property(np, "phy-connection-type", NULL); 3832 if (!prop) { 3833 /* handle interface property present in old trees */ 3834 prop = of_get_property(ug_info->phy_node, "interface", NULL); 3835 if (prop != NULL) { 3836 phy_interface = enet_to_phy_interface[*prop]; 3837 max_speed = enet_to_speed[*prop]; 3838 } else 3839 phy_interface = PHY_INTERFACE_MODE_MII; 3840 } else { 3841 phy_interface = to_phy_interface((const char *)prop); 3842 } 3843 3844 /* get speed, or derive from PHY interface */ 3845 if (max_speed == 0) 3846 switch (phy_interface) { 3847 case PHY_INTERFACE_MODE_GMII: 3848 case PHY_INTERFACE_MODE_RGMII: 3849 case PHY_INTERFACE_MODE_RGMII_ID: 3850 case PHY_INTERFACE_MODE_RGMII_RXID: 3851 case PHY_INTERFACE_MODE_RGMII_TXID: 3852 case PHY_INTERFACE_MODE_TBI: 3853 case PHY_INTERFACE_MODE_RTBI: 3854 case PHY_INTERFACE_MODE_SGMII: 3855 max_speed = SPEED_1000; 3856 break; 3857 default: 3858 max_speed = SPEED_100; 3859 break; 3860 } 3861 3862 if (max_speed == SPEED_1000) { 3863 /* configure muram FIFOs for gigabit operation */ 3864 ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT; 3865 ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT; 3866 ug_info->uf_info.urfset = UCC_GETH_URFSET_GIGA_INIT; 3867 ug_info->uf_info.utfs = UCC_GETH_UTFS_GIGA_INIT; 3868 ug_info->uf_info.utfet = UCC_GETH_UTFET_GIGA_INIT; 3869 ug_info->uf_info.utftt = UCC_GETH_UTFTT_GIGA_INIT; 3870 ug_info->numThreadsTx = UCC_GETH_NUM_OF_THREADS_4; 3871 3872 /* If QE's snum number is 46 which means we need to support 3873 * 4 UECs at 1000Base-T simultaneously, we need to allocate 3874 * more Threads to Rx. 3875 */ 3876 if (qe_get_num_of_snums() == 46) 3877 ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_6; 3878 else 3879 ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_4; 3880 } 3881 3882 if (netif_msg_probe(&debug)) 3883 printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n", 3884 ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs, 3885 ug_info->uf_info.irq); 3886 3887 /* Create an ethernet device instance */ 3888 dev = alloc_etherdev(sizeof(*ugeth)); 3889 3890 if (dev == NULL) 3891 return -ENOMEM; 3892 3893 ugeth = netdev_priv(dev); 3894 spin_lock_init(&ugeth->lock); 3895 3896 /* Create CQs for hash tables */ 3897 INIT_LIST_HEAD(&ugeth->group_hash_q); 3898 INIT_LIST_HEAD(&ugeth->ind_hash_q); 3899 3900 dev_set_drvdata(device, dev); 3901 3902 /* Set the dev->base_addr to the gfar reg region */ 3903 dev->base_addr = (unsigned long)(ug_info->uf_info.regs); 3904 3905 SET_NETDEV_DEV(dev, device); 3906 3907 /* Fill in the dev structure */ 3908 uec_set_ethtool_ops(dev); 3909 dev->netdev_ops = &ucc_geth_netdev_ops; 3910 dev->watchdog_timeo = TX_TIMEOUT; 3911 INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work); 3912 netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64); 3913 dev->mtu = 1500; 3914 3915 ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT); 3916 ugeth->phy_interface = phy_interface; 3917 ugeth->max_speed = max_speed; 3918 3919 err = register_netdev(dev); 3920 if (err) { 3921 if (netif_msg_probe(ugeth)) 3922 ugeth_err("%s: Cannot register net device, aborting.", 3923 dev->name); 3924 free_netdev(dev); 3925 return err; 3926 } 3927 3928 mac_addr = of_get_mac_address(np); 3929 if (mac_addr) 3930 memcpy(dev->dev_addr, mac_addr, 6); 3931 3932 ugeth->ug_info = ug_info; 3933 ugeth->dev = device; 3934 ugeth->ndev = dev; 3935 ugeth->node = np; 3936 3937 return 0; 3938} 3939 3940static int ucc_geth_remove(struct of_device* ofdev) 3941{ 3942 struct device *device = &ofdev->dev; 3943 struct net_device *dev = dev_get_drvdata(device); 3944 struct ucc_geth_private *ugeth = netdev_priv(dev); 3945 3946 unregister_netdev(dev); 3947 free_netdev(dev); 3948 ucc_geth_memclean(ugeth); 3949 dev_set_drvdata(device, NULL); 3950 3951 return 0; 3952} 3953 3954static struct of_device_id ucc_geth_match[] = { 3955 { 3956 .type = "network", 3957 .compatible = "ucc_geth", 3958 }, 3959 {}, 3960}; 3961 3962MODULE_DEVICE_TABLE(of, ucc_geth_match); 3963 3964static struct of_platform_driver ucc_geth_driver = { 3965 .name = DRV_NAME, 3966 .match_table = ucc_geth_match, 3967 .probe = ucc_geth_probe, 3968 .remove = ucc_geth_remove, 3969 .suspend = ucc_geth_suspend, 3970 .resume = ucc_geth_resume, 3971}; 3972 3973static int __init ucc_geth_init(void) 3974{ 3975 int i, ret; 3976 3977 if (netif_msg_drv(&debug)) 3978 printk(KERN_INFO "ucc_geth: " DRV_DESC "\n"); 3979 for (i = 0; i < 8; i++) 3980 memcpy(&(ugeth_info[i]), &ugeth_primary_info, 3981 sizeof(ugeth_primary_info)); 3982 3983 ret = of_register_platform_driver(&ucc_geth_driver); 3984 3985 return ret; 3986} 3987 3988static void __exit ucc_geth_exit(void) 3989{ 3990 of_unregister_platform_driver(&ucc_geth_driver); 3991} 3992 3993module_init(ucc_geth_init); 3994module_exit(ucc_geth_exit); 3995 3996MODULE_AUTHOR("Freescale Semiconductor, Inc"); 3997MODULE_DESCRIPTION(DRV_DESC); 3998MODULE_VERSION(DRV_VERSION); 3999MODULE_LICENSE("GPL");