Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] ucc_geth: changes to ucc_geth driver as a result of qe_lib changes and bugfixes

changes due to qe_lib changes include:

o removed inclusion of platform header file
o removed platform_device code, replaced with of_device
o removed typedefs
o uint -> u32 conversions
o removed following defines:
QE_SIZEOF_BD, BD_BUFFER_ARG, BD_BUFFER_CLEAR, BD_BUFFER,
BD_STATUS_AND_LENGTH_SET, BD_STATUS_AND_LENGTH, and BD_BUFFER_SET
because they hid sizeof/in_be32/out_be32 operations from the reader.
o removed irrelevant comments, added others to resemble removed BD_ defines
o const'd and uncasted all get_property() assignments

bugfixes, courtesy of Scott Wood, include:

- Read phy_address as a u32, not u8.
- Match on type == "network" as well as compatible == "ucc_geth", as
device_is_compatible() will only compare up to the length of the
test string, allowing "ucc_geth_phy" to match as well.
- fixes the MAC setting code in ucc_geth.c. The old code was overwriting and dereferencing random stack contents.

Signed-off-by: Li Yang <leoli@freescale.com>
Signed-off-by: Kim Phillips <kim.phillips@freescale.com>
Signed-off-by: Scott Wood <scottwood@freescale.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>

authored by

Li Yang and committed by
Jeff Garzik
18a8e864 470ea7eb

+476 -437
+1 -1
drivers/net/Kconfig
··· 2288 2288 2289 2289 config UGETH_HAS_GIGA 2290 2290 bool 2291 - depends on UCC_GETH && MPC836x 2291 + depends on UCC_GETH && PPC_MPC836x 2292 2292 2293 2293 config MV643XX_ETH 2294 2294 tristate "MV-643XX Ethernet support"
+345 -290
drivers/net/ucc_geth.c
··· 2 2 * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved. 3 3 * 4 4 * Author: Shlomi Gridish <gridish@freescale.com> 5 + * Li Yang <leoli@freescale.com> 5 6 * 6 7 * Description: 7 8 * QE UCC Gigabit Ethernet Driver 8 - * 9 - * Changelog: 10 - * Jul 6, 2006 Li Yang <LeoLi@freescale.com> 11 - * - Rearrange code and style fixes 12 9 * 13 10 * This program is free software; you can redistribute it and/or modify it 14 11 * under the terms of the GNU General Public License as published by the ··· 28 31 #include <linux/dma-mapping.h> 29 32 #include <linux/fsl_devices.h> 30 33 #include <linux/ethtool.h> 31 - #include <linux/platform_device.h> 32 34 #include <linux/mii.h> 33 35 36 + #include <asm/of_device.h> 34 37 #include <asm/uaccess.h> 35 38 #include <asm/irq.h> 36 39 #include <asm/io.h> ··· 67 70 68 71 static DEFINE_SPINLOCK(ugeth_lock); 69 72 70 - static ucc_geth_info_t ugeth_primary_info = { 73 + static struct ucc_geth_info ugeth_primary_info = { 71 74 .uf_info = { 72 75 .bd_mem_part = MEM_PART_SYSTEM, 73 76 .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES, ··· 160 163 .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2, 161 164 }; 162 165 163 - static ucc_geth_info_t ugeth_info[8]; 166 + static struct ucc_geth_info ugeth_info[8]; 164 167 165 168 #ifdef DEBUG 166 169 static void mem_disp(u8 *addr, int size) ··· 216 219 } 217 220 } 218 221 219 - static int get_interface_details(enet_interface_e enet_interface, 220 - enet_speed_e *speed, 222 + static int get_interface_details(enum enet_interface enet_interface, 223 + enum enet_speed *speed, 221 224 int *r10m, 222 225 int *rmm, 223 226 int *rpm, ··· 280 283 return 0; 281 284 } 282 285 283 - static struct sk_buff *get_new_skb(ucc_geth_private_t *ugeth, u8 *bd) 286 + static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, u8 *bd) 284 287 { 285 288 struct sk_buff *skb = NULL; 286 289 ··· 300 303 301 304 skb->dev = ugeth->dev; 302 305 303 - BD_BUFFER_SET(bd, 306 + out_be32(&((struct qe_bd *)bd)->buf, 304 307 dma_map_single(NULL, 305 308 skb->data, 306 309 ugeth->ug_info->uf_info.max_rx_buf_length + 307 310 UCC_GETH_RX_DATA_BUF_ALIGNMENT, 308 311 DMA_FROM_DEVICE)); 309 312 310 - BD_STATUS_AND_LENGTH_SET(bd, 311 - (R_E | R_I | 312 - (BD_STATUS_AND_LENGTH(bd) & R_W))); 313 + out_be32((u32 *)bd, (R_E | R_I | (in_be32((u32 *)bd) & R_W))); 313 314 314 315 return skb; 315 316 } 316 317 317 - static int rx_bd_buffer_set(ucc_geth_private_t *ugeth, u8 rxQ) 318 + static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ) 318 319 { 319 320 u8 *bd; 320 321 u32 bd_status; ··· 323 328 i = 0; 324 329 325 330 do { 326 - bd_status = BD_STATUS_AND_LENGTH(bd); 331 + bd_status = in_be32((u32*)bd); 327 332 skb = get_new_skb(ugeth, bd); 328 333 329 334 if (!skb) /* If can not allocate data buffer, ··· 333 338 ugeth->rx_skbuff[rxQ][i] = skb; 334 339 335 340 /* advance the BD pointer */ 336 - bd += UCC_GETH_SIZE_OF_BD; 341 + bd += sizeof(struct qe_bd); 337 342 i++; 338 343 } while (!(bd_status & R_W)); 339 344 340 345 return 0; 341 346 } 342 347 343 - static int fill_init_enet_entries(ucc_geth_private_t *ugeth, 348 + static int fill_init_enet_entries(struct ucc_geth_private *ugeth, 344 349 volatile u32 *p_start, 345 350 u8 num_entries, 346 351 u32 thread_size, 347 352 u32 thread_alignment, 348 - qe_risc_allocation_e risc, 353 + enum qe_risc_allocation risc, 349 354 int skip_page_for_first_entry) 350 355 { 351 356 u32 init_enet_offset; ··· 378 383 return 0; 379 384 } 380 385 381 - static int return_init_enet_entries(ucc_geth_private_t *ugeth, 386 + static int return_init_enet_entries(struct ucc_geth_private *ugeth, 382 387 volatile u32 *p_start, 383 388 u8 num_entries, 384 - qe_risc_allocation_e risc, 389 + enum qe_risc_allocation risc, 385 390 int skip_page_for_first_entry) 386 391 { 387 392 u32 init_enet_offset; ··· 411 416 } 412 417 413 418 #ifdef DEBUG 414 - static int dump_init_enet_entries(ucc_geth_private_t *ugeth, 419 + static int dump_init_enet_entries(struct ucc_geth_private *ugeth, 415 420 volatile u32 *p_start, 416 421 u8 num_entries, 417 422 u32 thread_size, 418 - qe_risc_allocation_e risc, 423 + enum qe_risc_allocation risc, 419 424 int skip_page_for_first_entry) 420 425 { 421 426 u32 init_enet_offset; ··· 451 456 #endif 452 457 453 458 #ifdef CONFIG_UGETH_FILTERING 454 - static enet_addr_container_t *get_enet_addr_container(void) 459 + static struct enet_addr_container *get_enet_addr_container(void) 455 460 { 456 - enet_addr_container_t *enet_addr_cont; 461 + struct enet_addr_container *enet_addr_cont; 457 462 458 463 /* allocate memory */ 459 - enet_addr_cont = kmalloc(sizeof(enet_addr_container_t), GFP_KERNEL); 464 + enet_addr_cont = kmalloc(sizeof(struct enet_addr_container), GFP_KERNEL); 460 465 if (!enet_addr_cont) { 461 - ugeth_err("%s: No memory for enet_addr_container_t object.", 466 + ugeth_err("%s: No memory for enet_addr_container object.", 462 467 __FUNCTION__); 463 468 return NULL; 464 469 } ··· 467 472 } 468 473 #endif /* CONFIG_UGETH_FILTERING */ 469 474 470 - static void put_enet_addr_container(enet_addr_container_t *enet_addr_cont) 475 + static void put_enet_addr_container(struct enet_addr_container *enet_addr_cont) 471 476 { 472 477 kfree(enet_addr_cont); 473 478 } 474 479 475 - #ifdef CONFIG_UGETH_FILTERING 476 - static int hw_add_addr_in_paddr(ucc_geth_private_t *ugeth, 477 - enet_addr_t *p_enet_addr, u8 paddr_num) 480 + static int set_mac_addr(__be16 __iomem *reg, u8 *mac) 478 481 { 479 - ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt; 482 + out_be16(&reg[0], ((u16)mac[5] << 8) | mac[4]); 483 + out_be16(&reg[1], ((u16)mac[3] << 8) | mac[2]); 484 + out_be16(&reg[2], ((u16)mac[1] << 8) | mac[0]); 485 + } 486 + 487 + #ifdef CONFIG_UGETH_FILTERING 488 + static int hw_add_addr_in_paddr(struct ucc_geth_private *ugeth, 489 + u8 *p_enet_addr, u8 paddr_num) 490 + { 491 + struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; 480 492 481 493 if (!(paddr_num < NUM_OF_PADDRS)) { 482 - ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__); 494 + ugeth_warn("%s: Illegal paddr_num.", __FUNCTION__); 483 495 return -EINVAL; 484 496 } 485 497 486 498 p_82xx_addr_filt = 487 - (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram-> 499 + (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram-> 488 500 addressfiltering; 489 501 490 502 /* Ethernet frames are defined in Little Endian mode, */ 491 503 /* therefore to insert the address we reverse the bytes. */ 492 - out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 493 - (u16) (((u16) (((u16) ((*p_enet_addr)[5])) << 8)) | 494 - (u16) (*p_enet_addr)[4])); 495 - out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 496 - (u16) (((u16) (((u16) ((*p_enet_addr)[3])) << 8)) | 497 - (u16) (*p_enet_addr)[2])); 498 - out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 499 - (u16) (((u16) (((u16) ((*p_enet_addr)[1])) << 8)) | 500 - (u16) (*p_enet_addr)[0])); 501 - 504 + set_mac_addr(&p_82xx_addr_filt->paddr[paddr_num].h, p_enet_addr); 502 505 return 0; 503 506 } 504 507 #endif /* CONFIG_UGETH_FILTERING */ 505 508 506 - static int hw_clear_addr_in_paddr(ucc_geth_private_t *ugeth, u8 paddr_num) 509 + static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num) 507 510 { 508 - ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt; 511 + struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; 509 512 510 513 if (!(paddr_num < NUM_OF_PADDRS)) { 511 514 ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__); ··· 511 518 } 512 519 513 520 p_82xx_addr_filt = 514 - (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram-> 521 + (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram-> 515 522 addressfiltering; 516 523 517 524 /* Writing address ff.ff.ff.ff.ff.ff disables address ··· 523 530 return 0; 524 531 } 525 532 526 - static void hw_add_addr_in_hash(ucc_geth_private_t *ugeth, 527 - enet_addr_t *p_enet_addr) 533 + static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth, 534 + u8 *p_enet_addr) 528 535 { 529 - ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt; 536 + struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; 530 537 u32 cecr_subblock; 531 538 532 539 p_82xx_addr_filt = 533 - (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram-> 540 + (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram-> 534 541 addressfiltering; 535 542 536 543 cecr_subblock = ··· 539 546 /* Ethernet frames are defined in Little Endian mode, 540 547 therefor to insert */ 541 548 /* the address to the hash (Big Endian mode), we reverse the bytes.*/ 542 - out_be16(&p_82xx_addr_filt->taddr.h, 543 - (u16) (((u16) (((u16) ((*p_enet_addr)[5])) << 8)) | 544 - (u16) (*p_enet_addr)[4])); 545 - out_be16(&p_82xx_addr_filt->taddr.m, 546 - (u16) (((u16) (((u16) ((*p_enet_addr)[3])) << 8)) | 547 - (u16) (*p_enet_addr)[2])); 548 - out_be16(&p_82xx_addr_filt->taddr.l, 549 - (u16) (((u16) (((u16) ((*p_enet_addr)[1])) << 8)) | 550 - (u16) (*p_enet_addr)[0])); 549 + 550 + set_mac_addr(&p_82xx_addr_filt->taddr.h, p_enet_addr); 551 551 552 552 qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock, 553 - (u8) QE_CR_PROTOCOL_ETHERNET, 0); 553 + QE_CR_PROTOCOL_ETHERNET, 0); 554 554 } 555 555 556 556 #ifdef CONFIG_UGETH_MAGIC_PACKET 557 - static void magic_packet_detection_enable(ucc_geth_private_t *ugeth) 557 + static void magic_packet_detection_enable(struct ucc_geth_private *ugeth) 558 558 { 559 - ucc_fast_private_t *uccf; 560 - ucc_geth_t *ug_regs; 559 + struct ucc_fast_private *uccf; 560 + struct ucc_geth *ug_regs; 561 561 u32 maccfg2, uccm; 562 562 563 563 uccf = ugeth->uccf; ··· 567 581 out_be32(&ug_regs->maccfg2, maccfg2); 568 582 } 569 583 570 - static void magic_packet_detection_disable(ucc_geth_private_t *ugeth) 584 + static void magic_packet_detection_disable(struct ucc_geth_private *ugeth) 571 585 { 572 - ucc_fast_private_t *uccf; 573 - ucc_geth_t *ug_regs; 586 + struct ucc_fast_private *uccf; 587 + struct ucc_geth *ug_regs; 574 588 u32 maccfg2, uccm; 575 589 576 590 uccf = ugeth->uccf; ··· 588 602 } 589 603 #endif /* MAGIC_PACKET */ 590 604 591 - static inline int compare_addr(enet_addr_t *addr1, enet_addr_t *addr2) 605 + static inline int compare_addr(u8 **addr1, u8 **addr2) 592 606 { 593 607 return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS); 594 608 } 595 609 596 610 #ifdef DEBUG 597 - static void get_statistics(ucc_geth_private_t *ugeth, 598 - ucc_geth_tx_firmware_statistics_t * 611 + static void get_statistics(struct ucc_geth_private *ugeth, 612 + struct ucc_geth_tx_firmware_statistics * 599 613 tx_firmware_statistics, 600 - ucc_geth_rx_firmware_statistics_t * 614 + struct ucc_geth_rx_firmware_statistics * 601 615 rx_firmware_statistics, 602 - ucc_geth_hardware_statistics_t *hardware_statistics) 616 + struct ucc_geth_hardware_statistics *hardware_statistics) 603 617 { 604 - ucc_fast_t *uf_regs; 605 - ucc_geth_t *ug_regs; 606 - ucc_geth_tx_firmware_statistics_pram_t *p_tx_fw_statistics_pram; 607 - ucc_geth_rx_firmware_statistics_pram_t *p_rx_fw_statistics_pram; 618 + struct ucc_fast *uf_regs; 619 + struct ucc_geth *ug_regs; 620 + struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram; 621 + struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram; 608 622 609 623 ug_regs = ugeth->ug_regs; 610 - uf_regs = (ucc_fast_t *) ug_regs; 624 + uf_regs = (struct ucc_fast *) ug_regs; 611 625 p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram; 612 626 p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram; 613 627 ··· 713 727 } 714 728 } 715 729 716 - static void dump_bds(ucc_geth_private_t *ugeth) 730 + static void dump_bds(struct ucc_geth_private *ugeth) 717 731 { 718 732 int i; 719 733 int length; ··· 722 736 if (ugeth->p_tx_bd_ring[i]) { 723 737 length = 724 738 (ugeth->ug_info->bdRingLenTx[i] * 725 - UCC_GETH_SIZE_OF_BD); 739 + sizeof(struct qe_bd)); 726 740 ugeth_info("TX BDs[%d]", i); 727 741 mem_disp(ugeth->p_tx_bd_ring[i], length); 728 742 } ··· 731 745 if (ugeth->p_rx_bd_ring[i]) { 732 746 length = 733 747 (ugeth->ug_info->bdRingLenRx[i] * 734 - UCC_GETH_SIZE_OF_BD); 748 + sizeof(struct qe_bd)); 735 749 ugeth_info("RX BDs[%d]", i); 736 750 mem_disp(ugeth->p_rx_bd_ring[i], length); 737 751 } 738 752 } 739 753 } 740 754 741 - static void dump_regs(ucc_geth_private_t *ugeth) 755 + static void dump_regs(struct ucc_geth_private *ugeth) 742 756 { 743 757 int i; 744 758 ··· 879 893 ugeth_info("Base address: 0x%08x", 880 894 (u32) & ugeth->p_thread_data_tx[i]); 881 895 mem_disp((u8 *) & ugeth->p_thread_data_tx[i], 882 - sizeof(ucc_geth_thread_data_tx_t)); 896 + sizeof(struct ucc_geth_thread_data_tx)); 883 897 } 884 898 } 885 899 if (ugeth->p_thread_data_rx) { ··· 913 927 ugeth_info("Base address: 0x%08x", 914 928 (u32) & ugeth->p_thread_data_rx[i]); 915 929 mem_disp((u8 *) & ugeth->p_thread_data_rx[i], 916 - sizeof(ucc_geth_thread_data_rx_t)); 930 + sizeof(struct ucc_geth_thread_data_rx)); 917 931 } 918 932 } 919 933 if (ugeth->p_exf_glbl_param) { ··· 1091 1105 ugeth_info("Base address: 0x%08x", 1092 1106 (u32) & ugeth->p_send_q_mem_reg->sqqd[i]); 1093 1107 mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i], 1094 - sizeof(ucc_geth_send_queue_qd_t)); 1108 + sizeof(struct ucc_geth_send_queue_qd)); 1095 1109 } 1096 1110 } 1097 1111 if (ugeth->p_scheduler) { ··· 1173 1187 qe_muram_addr(in_be32 1174 1188 (&ugeth->p_rx_bd_qs_tbl[i]. 1175 1189 bdbaseptr)), 1176 - sizeof(ucc_geth_rx_prefetched_bds_t)); 1190 + sizeof(struct ucc_geth_rx_prefetched_bds)); 1177 1191 } 1178 1192 } 1179 1193 if (ugeth->p_init_enet_param_shadow) { ··· 1184 1198 mem_disp((u8 *) ugeth->p_init_enet_param_shadow, 1185 1199 sizeof(*ugeth->p_init_enet_param_shadow)); 1186 1200 1187 - size = sizeof(ucc_geth_thread_rx_pram_t); 1201 + size = sizeof(struct ucc_geth_thread_rx_pram); 1188 1202 if (ugeth->ug_info->rxExtendedFiltering) { 1189 1203 size += 1190 1204 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; ··· 1202 1216 &(ugeth->p_init_enet_param_shadow-> 1203 1217 txthread[0]), 1204 1218 ENET_INIT_PARAM_MAX_ENTRIES_TX, 1205 - sizeof(ucc_geth_thread_tx_pram_t), 1219 + sizeof(struct ucc_geth_thread_tx_pram), 1206 1220 ugeth->ug_info->riscTx, 0); 1207 1221 dump_init_enet_entries(ugeth, 1208 1222 &(ugeth->p_init_enet_param_shadow-> ··· 1564 1578 return 0; 1565 1579 } 1566 1580 1567 - static int adjust_enet_interface(ucc_geth_private_t *ugeth) 1581 + static int adjust_enet_interface(struct ucc_geth_private *ugeth) 1568 1582 { 1569 - ucc_geth_info_t *ug_info; 1570 - ucc_geth_t *ug_regs; 1571 - ucc_fast_t *uf_regs; 1572 - enet_speed_e speed; 1583 + struct ucc_geth_info *ug_info; 1584 + struct ucc_geth *ug_regs; 1585 + struct ucc_fast *uf_regs; 1586 + enum enet_speed speed; 1573 1587 int ret_val, rpm = 0, tbi = 0, r10m = 0, rmm = 1574 1588 0, limited_to_full_duplex = 0; 1575 1589 u32 upsmr, maccfg2, utbipar, tbiBaseAddress; ··· 1677 1691 */ 1678 1692 static void adjust_link(struct net_device *dev) 1679 1693 { 1680 - ucc_geth_private_t *ugeth = netdev_priv(dev); 1681 - ucc_geth_t *ug_regs; 1694 + struct ucc_geth_private *ugeth = netdev_priv(dev); 1695 + struct ucc_geth *ug_regs; 1682 1696 u32 tempval; 1683 1697 struct ugeth_mii_info *mii_info = ugeth->mii_info; 1684 1698 ··· 1708 1722 if (mii_info->speed != ugeth->oldspeed) { 1709 1723 switch (mii_info->speed) { 1710 1724 case 1000: 1711 - #ifdef CONFIG_MPC836x 1725 + #ifdef CONFIG_PPC_MPC836x 1712 1726 /* FIXME: This code is for 100Mbs BUG fixing, 1713 1727 remove this when it is fixed!!! */ 1714 1728 if (ugeth->ug_info->enet_interface == ··· 1754 1768 break; 1755 1769 case 100: 1756 1770 case 10: 1757 - #ifdef CONFIG_MPC836x 1771 + #ifdef CONFIG_PPC_MPC836x 1758 1772 /* FIXME: This code is for 100Mbs BUG fixing, 1759 1773 remove this lines when it will be fixed!!! */ 1760 1774 ugeth->ug_info->enet_interface = ENET_100_RGMII; ··· 1813 1827 */ 1814 1828 static int init_phy(struct net_device *dev) 1815 1829 { 1816 - ucc_geth_private_t *ugeth = netdev_priv(dev); 1830 + struct ucc_geth_private *ugeth = netdev_priv(dev); 1817 1831 struct phy_info *curphy; 1818 - ucc_mii_mng_t *mii_regs; 1832 + struct ucc_mii_mng *mii_regs; 1819 1833 struct ugeth_mii_info *mii_info; 1820 1834 int err; 1821 1835 ··· 1900 1914 } 1901 1915 1902 1916 #ifdef CONFIG_UGETH_TX_ON_DEMOND 1903 - static int ugeth_transmit_on_demand(ucc_geth_private_t *ugeth) 1917 + static int ugeth_transmit_on_demand(struct ucc_geth_private *ugeth) 1904 1918 { 1905 - ucc_fast_transmit_on_demand(ugeth->uccf); 1919 + struct ucc_fastransmit_on_demand(ugeth->uccf); 1906 1920 1907 1921 return 0; 1908 1922 } 1909 1923 #endif 1910 1924 1911 - static int ugeth_graceful_stop_tx(ucc_geth_private_t *ugeth) 1925 + static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth) 1912 1926 { 1913 - ucc_fast_private_t *uccf; 1927 + struct ucc_fast_private *uccf; 1914 1928 u32 cecr_subblock; 1915 1929 u32 temp; 1916 1930 ··· 1926 1940 cecr_subblock = 1927 1941 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); 1928 1942 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock, 1929 - (u8) QE_CR_PROTOCOL_ETHERNET, 0); 1943 + QE_CR_PROTOCOL_ETHERNET, 0); 1930 1944 1931 1945 /* Wait for command to complete */ 1932 1946 do { ··· 1938 1952 return 0; 1939 1953 } 1940 1954 1941 - static int ugeth_graceful_stop_rx(ucc_geth_private_t * ugeth) 1955 + static int ugeth_graceful_stop_rx(struct ucc_geth_private * ugeth) 1942 1956 { 1943 - ucc_fast_private_t *uccf; 1957 + struct ucc_fast_private *uccf; 1944 1958 u32 cecr_subblock; 1945 1959 u8 temp; 1946 1960 ··· 1959 1973 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info. 1960 1974 ucc_num); 1961 1975 qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock, 1962 - (u8) QE_CR_PROTOCOL_ETHERNET, 0); 1976 + QE_CR_PROTOCOL_ETHERNET, 0); 1963 1977 1964 1978 temp = ugeth->p_rx_glbl_pram->rxgstpack; 1965 1979 } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX)); ··· 1969 1983 return 0; 1970 1984 } 1971 1985 1972 - static int ugeth_restart_tx(ucc_geth_private_t *ugeth) 1986 + static int ugeth_restart_tx(struct ucc_geth_private *ugeth) 1973 1987 { 1974 - ucc_fast_private_t *uccf; 1988 + struct ucc_fast_private *uccf; 1975 1989 u32 cecr_subblock; 1976 1990 1977 1991 uccf = ugeth->uccf; 1978 1992 1979 1993 cecr_subblock = 1980 1994 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); 1981 - qe_issue_cmd(QE_RESTART_TX, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET, 1982 - 0); 1995 + qe_issue_cmd(QE_RESTART_TX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 0); 1983 1996 uccf->stopped_tx = 0; 1984 1997 1985 1998 return 0; 1986 1999 } 1987 2000 1988 - static int ugeth_restart_rx(ucc_geth_private_t *ugeth) 2001 + static int ugeth_restart_rx(struct ucc_geth_private *ugeth) 1989 2002 { 1990 - ucc_fast_private_t *uccf; 2003 + struct ucc_fast_private *uccf; 1991 2004 u32 cecr_subblock; 1992 2005 1993 2006 uccf = ugeth->uccf; 1994 2007 1995 2008 cecr_subblock = 1996 2009 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); 1997 - qe_issue_cmd(QE_RESTART_RX, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET, 2010 + qe_issue_cmd(QE_RESTART_RX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 1998 2011 0); 1999 2012 uccf->stopped_rx = 0; 2000 2013 2001 2014 return 0; 2002 2015 } 2003 2016 2004 - static int ugeth_enable(ucc_geth_private_t *ugeth, comm_dir_e mode) 2017 + static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode) 2005 2018 { 2006 - ucc_fast_private_t *uccf; 2019 + struct ucc_fast_private *uccf; 2007 2020 int enabled_tx, enabled_rx; 2008 2021 2009 2022 uccf = ugeth->uccf; ··· 2029 2044 2030 2045 } 2031 2046 2032 - static int ugeth_disable(ucc_geth_private_t * ugeth, comm_dir_e mode) 2047 + static int ugeth_disable(struct ucc_geth_private * ugeth, enum comm_dir mode) 2033 2048 { 2034 - ucc_fast_private_t *uccf; 2049 + struct ucc_fast_private *uccf; 2035 2050 2036 2051 uccf = ugeth->uccf; 2037 2052 ··· 2054 2069 return 0; 2055 2070 } 2056 2071 2057 - static void ugeth_dump_regs(ucc_geth_private_t *ugeth) 2072 + static void ugeth_dump_regs(struct ucc_geth_private *ugeth) 2058 2073 { 2059 2074 #ifdef DEBUG 2060 2075 ucc_fast_dump_regs(ugeth->uccf); ··· 2064 2079 } 2065 2080 2066 2081 #ifdef CONFIG_UGETH_FILTERING 2067 - static int ugeth_ext_filtering_serialize_tad(ucc_geth_tad_params_t * 2082 + static int ugeth_ext_filtering_serialize_tad(struct ucc_geth_tad_params * 2068 2083 p_UccGethTadParams, 2069 - qe_fltr_tad_t *qe_fltr_tad) 2084 + struct qe_fltr_tad *qe_fltr_tad) 2070 2085 { 2071 2086 u16 temp; 2072 2087 ··· 2104 2119 return 0; 2105 2120 } 2106 2121 2107 - static enet_addr_container_t 2108 - *ugeth_82xx_filtering_get_match_addr_in_hash(ucc_geth_private_t *ugeth, 2109 - enet_addr_t *p_enet_addr) 2122 + static struct enet_addr_container_t 2123 + *ugeth_82xx_filtering_get_match_addr_in_hash(struct ucc_geth_private *ugeth, 2124 + struct enet_addr *p_enet_addr) 2110 2125 { 2111 - enet_addr_container_t *enet_addr_cont; 2126 + struct enet_addr_container *enet_addr_cont; 2112 2127 struct list_head *p_lh; 2113 2128 u16 i, num; 2114 2129 int32_t j; ··· 2129 2144 2130 2145 for (i = 0; i < num; i++) { 2131 2146 enet_addr_cont = 2132 - (enet_addr_container_t *) 2147 + (struct enet_addr_container *) 2133 2148 ENET_ADDR_CONT_ENTRY(dequeue(p_lh)); 2134 2149 for (j = ENET_NUM_OCTETS_PER_ADDRESS - 1; j >= 0; j--) { 2135 2150 if ((*p_enet_addr)[j] != (enet_addr_cont->address)[j]) ··· 2142 2157 return NULL; 2143 2158 } 2144 2159 2145 - static int ugeth_82xx_filtering_add_addr_in_hash(ucc_geth_private_t *ugeth, 2146 - enet_addr_t *p_enet_addr) 2160 + static int ugeth_82xx_filtering_add_addr_in_hash(struct ucc_geth_private *ugeth, 2161 + struct enet_addr *p_enet_addr) 2147 2162 { 2148 - ucc_geth_enet_address_recognition_location_e location; 2149 - enet_addr_container_t *enet_addr_cont; 2163 + enum ucc_geth_enet_address_recognition_location location; 2164 + struct enet_addr_container *enet_addr_cont; 2150 2165 struct list_head *p_lh; 2151 2166 u8 i; 2152 2167 u32 limit; ··· 2181 2196 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */ 2182 2197 ++(*p_counter); 2183 2198 2184 - hw_add_addr_in_hash(ugeth, &(enet_addr_cont->address)); 2185 - 2199 + hw_add_addr_in_hash(ugeth, enet_addr_cont->address); 2186 2200 return 0; 2187 2201 } 2188 2202 2189 - static int ugeth_82xx_filtering_clear_addr_in_hash(ucc_geth_private_t *ugeth, 2190 - enet_addr_t *p_enet_addr) 2203 + static int ugeth_82xx_filtering_clear_addr_in_hash(struct ucc_geth_private *ugeth, 2204 + struct enet_addr *p_enet_addr) 2191 2205 { 2192 - ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt; 2193 - enet_addr_container_t *enet_addr_cont; 2194 - ucc_fast_private_t *uccf; 2195 - comm_dir_e comm_dir; 2206 + struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; 2207 + struct enet_addr_container *enet_addr_cont; 2208 + struct ucc_fast_private *uccf; 2209 + enum comm_dir comm_dir; 2196 2210 u16 i, num; 2197 2211 struct list_head *p_lh; 2198 2212 u32 *addr_h, *addr_l; ··· 2200 2216 uccf = ugeth->uccf; 2201 2217 2202 2218 p_82xx_addr_filt = 2203 - (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram-> 2219 + (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram-> 2204 2220 addressfiltering; 2205 2221 2206 2222 if (! ··· 2240 2256 num = --(*p_counter); 2241 2257 for (i = 0; i < num; i++) { 2242 2258 enet_addr_cont = 2243 - (enet_addr_container_t *) 2259 + (struct enet_addr_container *) 2244 2260 ENET_ADDR_CONT_ENTRY(dequeue(p_lh)); 2245 - hw_add_addr_in_hash(ugeth, &(enet_addr_cont->address)); 2261 + hw_add_addr_in_hash(ugeth, enet_addr_cont->address); 2246 2262 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */ 2247 2263 } 2248 2264 ··· 2253 2269 } 2254 2270 #endif /* CONFIG_UGETH_FILTERING */ 2255 2271 2256 - static int ugeth_82xx_filtering_clear_all_addr_in_hash(ucc_geth_private_t * 2272 + static int ugeth_82xx_filtering_clear_all_addr_in_hash(struct ucc_geth_private * 2257 2273 ugeth, 2258 - enet_addr_type_e 2274 + enum enet_addr_type 2259 2275 enet_addr_type) 2260 2276 { 2261 - ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt; 2262 - ucc_fast_private_t *uccf; 2263 - comm_dir_e comm_dir; 2277 + struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; 2278 + struct ucc_fast_private *uccf; 2279 + enum comm_dir comm_dir; 2264 2280 struct list_head *p_lh; 2265 2281 u16 i, num; 2266 2282 u32 *addr_h, *addr_l; ··· 2269 2285 uccf = ugeth->uccf; 2270 2286 2271 2287 p_82xx_addr_filt = 2272 - (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram-> 2288 + (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram-> 2273 2289 addressfiltering; 2274 2290 2275 2291 if (enet_addr_type == ENET_ADDR_TYPE_GROUP) { ··· 2315 2331 } 2316 2332 2317 2333 #ifdef CONFIG_UGETH_FILTERING 2318 - static int ugeth_82xx_filtering_add_addr_in_paddr(ucc_geth_private_t *ugeth, 2319 - enet_addr_t *p_enet_addr, 2334 + static int ugeth_82xx_filtering_add_addr_in_paddr(struct ucc_geth_private *ugeth, 2335 + struct enet_addr *p_enet_addr, 2320 2336 u8 paddr_num) 2321 2337 { 2322 2338 int i; ··· 2336 2352 } 2337 2353 #endif /* CONFIG_UGETH_FILTERING */ 2338 2354 2339 - static int ugeth_82xx_filtering_clear_addr_in_paddr(ucc_geth_private_t *ugeth, 2355 + static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *ugeth, 2340 2356 u8 paddr_num) 2341 2357 { 2342 2358 ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */ 2343 2359 return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */ 2344 2360 } 2345 2361 2346 - static void ucc_geth_memclean(ucc_geth_private_t *ugeth) 2362 + static void ucc_geth_memclean(struct ucc_geth_private *ugeth) 2347 2363 { 2348 2364 u16 i, j; 2349 2365 u8 *bd; ··· 2417 2433 for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { 2418 2434 if (ugeth->tx_skbuff[i][j]) { 2419 2435 dma_unmap_single(NULL, 2420 - BD_BUFFER_ARG(bd), 2421 - (BD_STATUS_AND_LENGTH(bd) & 2436 + ((qe_bd_t *)bd)->buf, 2437 + (in_be32((u32 *)bd) & 2422 2438 BD_LENGTH_MASK), 2423 2439 DMA_TO_DEVICE); 2424 2440 dev_kfree_skb_any(ugeth->tx_skbuff[i][j]); ··· 2444 2460 bd = ugeth->p_rx_bd_ring[i]; 2445 2461 for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { 2446 2462 if (ugeth->rx_skbuff[i][j]) { 2447 - dma_unmap_single(NULL, BD_BUFFER(bd), 2448 - ugeth->ug_info-> 2449 - uf_info. 2450 - max_rx_buf_length + 2451 - UCC_GETH_RX_DATA_BUF_ALIGNMENT, 2452 - DMA_FROM_DEVICE); 2453 - 2454 - dev_kfree_skb_any(ugeth-> 2455 - rx_skbuff[i][j]); 2463 + dma_unmap_single(NULL, 2464 + ((struct qe_bd *)bd)->buf, 2465 + ugeth->ug_info-> 2466 + uf_info.max_rx_buf_length + 2467 + UCC_GETH_RX_DATA_BUF_ALIGNMENT, 2468 + DMA_FROM_DEVICE); 2469 + dev_kfree_skb_any( 2470 + ugeth->rx_skbuff[i][j]); 2456 2471 ugeth->rx_skbuff[i][j] = NULL; 2457 2472 } 2458 - bd += UCC_GETH_SIZE_OF_BD; 2473 + bd += sizeof(struct qe_bd); 2459 2474 } 2460 2475 2461 2476 kfree(ugeth->rx_skbuff[i]); ··· 2479 2496 2480 2497 static void ucc_geth_set_multi(struct net_device *dev) 2481 2498 { 2482 - ucc_geth_private_t *ugeth; 2499 + struct ucc_geth_private *ugeth; 2483 2500 struct dev_mc_list *dmi; 2484 - ucc_fast_t *uf_regs; 2485 - ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt; 2486 - enet_addr_t tempaddr; 2501 + struct ucc_fast *uf_regs; 2502 + struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; 2503 + u8 tempaddr[6]; 2487 2504 u8 *mcptr, *tdptr; 2488 2505 int i, j; 2489 2506 ··· 2500 2517 uf_regs->upsmr &= ~UPSMR_PRO; 2501 2518 2502 2519 p_82xx_addr_filt = 2503 - (ucc_geth_82xx_address_filtering_pram_t *) ugeth-> 2520 + (struct ucc_geth_82xx_address_filtering_pram *) ugeth-> 2504 2521 p_rx_glbl_pram->addressfiltering; 2505 2522 2506 2523 if (dev->flags & IFF_ALLMULTI) { ··· 2529 2546 * copy bytes MSB first from dmi_addr. 2530 2547 */ 2531 2548 mcptr = (u8 *) dmi->dmi_addr + 5; 2532 - tdptr = (u8 *) & tempaddr; 2549 + tdptr = (u8 *) tempaddr; 2533 2550 for (j = 0; j < 6; j++) 2534 2551 *tdptr++ = *mcptr--; 2535 2552 2536 2553 /* Ask CPM to run CRC and set bit in 2537 2554 * filter mask. 2538 2555 */ 2539 - hw_add_addr_in_hash(ugeth, &tempaddr); 2540 - 2556 + hw_add_addr_in_hash(ugeth, tempaddr); 2541 2557 } 2542 2558 } 2543 2559 } 2544 2560 } 2545 2561 2546 - static void ucc_geth_stop(ucc_geth_private_t *ugeth) 2562 + static void ucc_geth_stop(struct ucc_geth_private *ugeth) 2547 2563 { 2548 - ucc_geth_t *ug_regs = ugeth->ug_regs; 2564 + struct ucc_geth *ug_regs = ugeth->ug_regs; 2549 2565 u32 tempval; 2550 2566 2551 2567 ugeth_vdbg("%s: IN", __FUNCTION__); ··· 2587 2605 ucc_geth_memclean(ugeth); 2588 2606 } 2589 2607 2590 - static int ucc_geth_startup(ucc_geth_private_t *ugeth) 2608 + static int ucc_geth_startup(struct ucc_geth_private *ugeth) 2591 2609 { 2592 - ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt; 2593 - ucc_geth_init_pram_t *p_init_enet_pram; 2594 - ucc_fast_private_t *uccf; 2595 - ucc_geth_info_t *ug_info; 2596 - ucc_fast_info_t *uf_info; 2597 - ucc_fast_t *uf_regs; 2598 - ucc_geth_t *ug_regs; 2610 + struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; 2611 + struct ucc_geth_init_pram *p_init_enet_pram; 2612 + struct ucc_fast_private *uccf; 2613 + struct ucc_geth_info *ug_info; 2614 + struct ucc_fast_info *uf_info; 2615 + struct ucc_fast *uf_regs; 2616 + struct ucc_geth *ug_regs; 2599 2617 int ret_val = -EINVAL; 2600 2618 u32 remoder = UCC_GETH_REMODER_INIT; 2601 2619 u32 init_enet_pram_offset, cecr_subblock, command, maccfg1; ··· 2770 2788 UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP); 2771 2789 2772 2790 uf_regs = uccf->uf_regs; 2773 - ug_regs = (ucc_geth_t *) (uccf->uf_regs); 2791 + ug_regs = (struct ucc_geth *) (uccf->uf_regs); 2774 2792 ugeth->ug_regs = ug_regs; 2775 2793 2776 2794 init_default_reg_vals(&uf_regs->upsmr, ··· 2851 2869 /* Allocate in multiple of 2852 2870 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT, 2853 2871 according to spec */ 2854 - length = ((ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD) 2872 + length = ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) 2855 2873 / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) 2856 2874 * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; 2857 - if ((ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD) % 2875 + if ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) % 2858 2876 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) 2859 2877 length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; 2860 2878 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) { ··· 2886 2904 } 2887 2905 /* Zero unused end of bd ring, according to spec */ 2888 2906 memset(ugeth->p_tx_bd_ring[j] + 2889 - ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD, 0, 2890 - length - ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD); 2907 + ug_info->bdRingLenTx[j] * sizeof(struct qe_bd), 0, 2908 + length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)); 2891 2909 } 2892 2910 2893 2911 /* Allocate Rx bds */ 2894 2912 for (j = 0; j < ug_info->numQueuesRx; j++) { 2895 - length = ug_info->bdRingLenRx[j] * UCC_GETH_SIZE_OF_BD; 2913 + length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd); 2896 2914 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) { 2897 2915 u32 align = 4; 2898 2916 if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4) ··· 2942 2960 ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0; 2943 2961 bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j]; 2944 2962 for (i = 0; i < ug_info->bdRingLenTx[j]; i++) { 2945 - BD_BUFFER_CLEAR(bd); 2946 - BD_STATUS_AND_LENGTH_SET(bd, 0); 2947 - bd += UCC_GETH_SIZE_OF_BD; 2963 + /* clear bd buffer */ 2964 + out_be32(&((struct qe_bd *)bd)->buf, 0); 2965 + /* set bd status and length */ 2966 + out_be32((u32 *)bd, 0); 2967 + bd += sizeof(struct qe_bd); 2948 2968 } 2949 - bd -= UCC_GETH_SIZE_OF_BD; 2950 - BD_STATUS_AND_LENGTH_SET(bd, T_W);/* for last BD set Wrap bit */ 2969 + bd -= sizeof(struct qe_bd); 2970 + /* set bd status and length */ 2971 + out_be32((u32 *)bd, T_W); /* for last BD set Wrap bit */ 2951 2972 } 2952 2973 2953 2974 /* Init Rx bds */ ··· 2974 2989 ugeth->skb_currx[j] = 0; 2975 2990 bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j]; 2976 2991 for (i = 0; i < ug_info->bdRingLenRx[j]; i++) { 2977 - BD_STATUS_AND_LENGTH_SET(bd, R_I); 2978 - BD_BUFFER_CLEAR(bd); 2979 - bd += UCC_GETH_SIZE_OF_BD; 2992 + /* set bd status and length */ 2993 + out_be32((u32 *)bd, R_I); 2994 + /* clear bd buffer */ 2995 + out_be32(&((struct qe_bd *)bd)->buf, 0); 2996 + bd += sizeof(struct qe_bd); 2980 2997 } 2981 - bd -= UCC_GETH_SIZE_OF_BD; 2982 - BD_STATUS_AND_LENGTH_SET(bd, R_W);/* for last BD set Wrap bit */ 2998 + bd -= sizeof(struct qe_bd); 2999 + /* set bd status and length */ 3000 + out_be32((u32 *)bd, R_W); /* for last BD set Wrap bit */ 2983 3001 } 2984 3002 2985 3003 /* ··· 2991 3003 /* Tx global PRAM */ 2992 3004 /* Allocate global tx parameter RAM page */ 2993 3005 ugeth->tx_glbl_pram_offset = 2994 - qe_muram_alloc(sizeof(ucc_geth_tx_global_pram_t), 3006 + qe_muram_alloc(sizeof(struct ucc_geth_tx_global_pram), 2995 3007 UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT); 2996 3008 if (IS_MURAM_ERR(ugeth->tx_glbl_pram_offset)) { 2997 3009 ugeth_err ··· 3001 3013 return -ENOMEM; 3002 3014 } 3003 3015 ugeth->p_tx_glbl_pram = 3004 - (ucc_geth_tx_global_pram_t *) qe_muram_addr(ugeth-> 3016 + (struct ucc_geth_tx_global_pram *) qe_muram_addr(ugeth-> 3005 3017 tx_glbl_pram_offset); 3006 3018 /* Zero out p_tx_glbl_pram */ 3007 - memset(ugeth->p_tx_glbl_pram, 0, sizeof(ucc_geth_tx_global_pram_t)); 3019 + memset(ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram)); 3008 3020 3009 3021 /* Fill global PRAM */ 3010 3022 ··· 3012 3024 /* Size varies with number of Tx threads */ 3013 3025 ugeth->thread_dat_tx_offset = 3014 3026 qe_muram_alloc(numThreadsTxNumerical * 3015 - sizeof(ucc_geth_thread_data_tx_t) + 3027 + sizeof(struct ucc_geth_thread_data_tx) + 3016 3028 32 * (numThreadsTxNumerical == 1), 3017 3029 UCC_GETH_THREAD_DATA_ALIGNMENT); 3018 3030 if (IS_MURAM_ERR(ugeth->thread_dat_tx_offset)) { ··· 3024 3036 } 3025 3037 3026 3038 ugeth->p_thread_data_tx = 3027 - (ucc_geth_thread_data_tx_t *) qe_muram_addr(ugeth-> 3039 + (struct ucc_geth_thread_data_tx *) qe_muram_addr(ugeth-> 3028 3040 thread_dat_tx_offset); 3029 3041 out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset); 3030 3042 ··· 3041 3053 /* Size varies with number of Tx queues */ 3042 3054 ugeth->send_q_mem_reg_offset = 3043 3055 qe_muram_alloc(ug_info->numQueuesTx * 3044 - sizeof(ucc_geth_send_queue_qd_t), 3056 + sizeof(struct ucc_geth_send_queue_qd), 3045 3057 UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT); 3046 3058 if (IS_MURAM_ERR(ugeth->send_q_mem_reg_offset)) { 3047 3059 ugeth_err ··· 3052 3064 } 3053 3065 3054 3066 ugeth->p_send_q_mem_reg = 3055 - (ucc_geth_send_queue_mem_region_t *) qe_muram_addr(ugeth-> 3067 + (struct ucc_geth_send_queue_mem_region *) qe_muram_addr(ugeth-> 3056 3068 send_q_mem_reg_offset); 3057 3069 out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset); 3058 3070 ··· 3061 3073 for (i = 0; i < ug_info->numQueuesTx; i++) { 3062 3074 endOfRing = 3063 3075 ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] - 3064 - 1) * UCC_GETH_SIZE_OF_BD; 3076 + 1) * sizeof(struct qe_bd); 3065 3077 if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) { 3066 3078 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base, 3067 3079 (u32) virt_to_phys(ugeth->p_tx_bd_ring[i])); ··· 3084 3096 if (ug_info->numQueuesTx > 1) { 3085 3097 /* scheduler exists only if more than 1 tx queue */ 3086 3098 ugeth->scheduler_offset = 3087 - qe_muram_alloc(sizeof(ucc_geth_scheduler_t), 3099 + qe_muram_alloc(sizeof(struct ucc_geth_scheduler), 3088 3100 UCC_GETH_SCHEDULER_ALIGNMENT); 3089 3101 if (IS_MURAM_ERR(ugeth->scheduler_offset)) { 3090 3102 ugeth_err ··· 3095 3107 } 3096 3108 3097 3109 ugeth->p_scheduler = 3098 - (ucc_geth_scheduler_t *) qe_muram_addr(ugeth-> 3110 + (struct ucc_geth_scheduler *) qe_muram_addr(ugeth-> 3099 3111 scheduler_offset); 3100 3112 out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer, 3101 3113 ugeth->scheduler_offset); 3102 3114 /* Zero out p_scheduler */ 3103 - memset(ugeth->p_scheduler, 0, sizeof(ucc_geth_scheduler_t)); 3115 + memset(ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler)); 3104 3116 3105 3117 /* Set values in scheduler */ 3106 3118 out_be32(&ugeth->p_scheduler->mblinterval, ··· 3132 3144 statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) { 3133 3145 ugeth->tx_fw_statistics_pram_offset = 3134 3146 qe_muram_alloc(sizeof 3135 - (ucc_geth_tx_firmware_statistics_pram_t), 3147 + (struct ucc_geth_tx_firmware_statistics_pram), 3136 3148 UCC_GETH_TX_STATISTICS_ALIGNMENT); 3137 3149 if (IS_MURAM_ERR(ugeth->tx_fw_statistics_pram_offset)) { 3138 3150 ugeth_err ··· 3142 3154 return -ENOMEM; 3143 3155 } 3144 3156 ugeth->p_tx_fw_statistics_pram = 3145 - (ucc_geth_tx_firmware_statistics_pram_t *) 3157 + (struct ucc_geth_tx_firmware_statistics_pram *) 3146 3158 qe_muram_addr(ugeth->tx_fw_statistics_pram_offset); 3147 3159 /* Zero out p_tx_fw_statistics_pram */ 3148 3160 memset(ugeth->p_tx_fw_statistics_pram, 3149 - 0, sizeof(ucc_geth_tx_firmware_statistics_pram_t)); 3161 + 0, sizeof(struct ucc_geth_tx_firmware_statistics_pram)); 3150 3162 } 3151 3163 3152 3164 /* temoder */ ··· 3171 3183 /* Rx global PRAM */ 3172 3184 /* Allocate global rx parameter RAM page */ 3173 3185 ugeth->rx_glbl_pram_offset = 3174 - qe_muram_alloc(sizeof(ucc_geth_rx_global_pram_t), 3186 + qe_muram_alloc(sizeof(struct ucc_geth_rx_global_pram), 3175 3187 UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT); 3176 3188 if (IS_MURAM_ERR(ugeth->rx_glbl_pram_offset)) { 3177 3189 ugeth_err ··· 3181 3193 return -ENOMEM; 3182 3194 } 3183 3195 ugeth->p_rx_glbl_pram = 3184 - (ucc_geth_rx_global_pram_t *) qe_muram_addr(ugeth-> 3196 + (struct ucc_geth_rx_global_pram *) qe_muram_addr(ugeth-> 3185 3197 rx_glbl_pram_offset); 3186 3198 /* Zero out p_rx_glbl_pram */ 3187 - memset(ugeth->p_rx_glbl_pram, 0, sizeof(ucc_geth_rx_global_pram_t)); 3199 + memset(ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram)); 3188 3200 3189 3201 /* Fill global PRAM */ 3190 3202 ··· 3192 3204 /* Size varies with number of Rx threads */ 3193 3205 ugeth->thread_dat_rx_offset = 3194 3206 qe_muram_alloc(numThreadsRxNumerical * 3195 - sizeof(ucc_geth_thread_data_rx_t), 3207 + sizeof(struct ucc_geth_thread_data_rx), 3196 3208 UCC_GETH_THREAD_DATA_ALIGNMENT); 3197 3209 if (IS_MURAM_ERR(ugeth->thread_dat_rx_offset)) { 3198 3210 ugeth_err ··· 3203 3215 } 3204 3216 3205 3217 ugeth->p_thread_data_rx = 3206 - (ucc_geth_thread_data_rx_t *) qe_muram_addr(ugeth-> 3218 + (struct ucc_geth_thread_data_rx *) qe_muram_addr(ugeth-> 3207 3219 thread_dat_rx_offset); 3208 3220 out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset); 3209 3221 ··· 3215 3227 statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) { 3216 3228 ugeth->rx_fw_statistics_pram_offset = 3217 3229 qe_muram_alloc(sizeof 3218 - (ucc_geth_rx_firmware_statistics_pram_t), 3230 + (struct ucc_geth_rx_firmware_statistics_pram), 3219 3231 UCC_GETH_RX_STATISTICS_ALIGNMENT); 3220 3232 if (IS_MURAM_ERR(ugeth->rx_fw_statistics_pram_offset)) { 3221 3233 ugeth_err ··· 3225 3237 return -ENOMEM; 3226 3238 } 3227 3239 ugeth->p_rx_fw_statistics_pram = 3228 - (ucc_geth_rx_firmware_statistics_pram_t *) 3240 + (struct ucc_geth_rx_firmware_statistics_pram *) 3229 3241 qe_muram_addr(ugeth->rx_fw_statistics_pram_offset); 3230 3242 /* Zero out p_rx_fw_statistics_pram */ 3231 3243 memset(ugeth->p_rx_fw_statistics_pram, 0, 3232 - sizeof(ucc_geth_rx_firmware_statistics_pram_t)); 3244 + sizeof(struct ucc_geth_rx_firmware_statistics_pram)); 3233 3245 } 3234 3246 3235 3247 /* intCoalescingPtr */ ··· 3237 3249 /* Size varies with number of Rx queues */ 3238 3250 ugeth->rx_irq_coalescing_tbl_offset = 3239 3251 qe_muram_alloc(ug_info->numQueuesRx * 3240 - sizeof(ucc_geth_rx_interrupt_coalescing_entry_t), 3252 + sizeof(struct ucc_geth_rx_interrupt_coalescing_entry), 3241 3253 UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT); 3242 3254 if (IS_MURAM_ERR(ugeth->rx_irq_coalescing_tbl_offset)) { 3243 3255 ugeth_err ··· 3248 3260 } 3249 3261 3250 3262 ugeth->p_rx_irq_coalescing_tbl = 3251 - (ucc_geth_rx_interrupt_coalescing_table_t *) 3263 + (struct ucc_geth_rx_interrupt_coalescing_table *) 3252 3264 qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset); 3253 3265 out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr, 3254 3266 ugeth->rx_irq_coalescing_tbl_offset); ··· 3288 3300 l3qt = 0; 3289 3301 for (i = 0; i < 8; i++) 3290 3302 l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i)); 3291 - out_be32(&ugeth->p_rx_glbl_pram->l3qt[j], l3qt); 3303 + out_be32(&ugeth->p_rx_glbl_pram->l3qt[j/8], l3qt); 3292 3304 } 3293 3305 3294 3306 /* vlantype */ ··· 3304 3316 /* Size varies with number of Rx queues */ 3305 3317 ugeth->rx_bd_qs_tbl_offset = 3306 3318 qe_muram_alloc(ug_info->numQueuesRx * 3307 - (sizeof(ucc_geth_rx_bd_queues_entry_t) + 3308 - sizeof(ucc_geth_rx_prefetched_bds_t)), 3319 + (sizeof(struct ucc_geth_rx_bd_queues_entry) + 3320 + sizeof(struct ucc_geth_rx_prefetched_bds)), 3309 3321 UCC_GETH_RX_BD_QUEUES_ALIGNMENT); 3310 3322 if (IS_MURAM_ERR(ugeth->rx_bd_qs_tbl_offset)) { 3311 3323 ugeth_err ··· 3316 3328 } 3317 3329 3318 3330 ugeth->p_rx_bd_qs_tbl = 3319 - (ucc_geth_rx_bd_queues_entry_t *) qe_muram_addr(ugeth-> 3331 + (struct ucc_geth_rx_bd_queues_entry *) qe_muram_addr(ugeth-> 3320 3332 rx_bd_qs_tbl_offset); 3321 3333 out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset); 3322 3334 /* Zero out p_rx_bd_qs_tbl */ 3323 3335 memset(ugeth->p_rx_bd_qs_tbl, 3324 3336 0, 3325 - ug_info->numQueuesRx * (sizeof(ucc_geth_rx_bd_queues_entry_t) + 3326 - sizeof(ucc_geth_rx_prefetched_bds_t))); 3337 + ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) + 3338 + sizeof(struct ucc_geth_rx_prefetched_bds))); 3327 3339 3328 3340 /* Setup the table */ 3329 3341 /* Assume BD rings are already established */ ··· 3394 3406 /* Allocate memory for extended filtering Mode Global 3395 3407 Parameters */ 3396 3408 ugeth->exf_glbl_param_offset = 3397 - qe_muram_alloc(sizeof(ucc_geth_exf_global_pram_t), 3409 + qe_muram_alloc(sizeof(struct ucc_geth_exf_global_pram), 3398 3410 UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT); 3399 3411 if (IS_MURAM_ERR(ugeth->exf_glbl_param_offset)) { 3400 3412 ugeth_err ··· 3405 3417 } 3406 3418 3407 3419 ugeth->p_exf_glbl_param = 3408 - (ucc_geth_exf_global_pram_t *) qe_muram_addr(ugeth-> 3420 + (struct ucc_geth_exf_global_pram *) qe_muram_addr(ugeth-> 3409 3421 exf_glbl_param_offset); 3410 3422 out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam, 3411 3423 ugeth->exf_glbl_param_offset); ··· 3427 3439 INIT_LIST_HEAD(&ugeth->ind_hash_q); 3428 3440 } 3429 3441 p_82xx_addr_filt = 3430 - (ucc_geth_82xx_address_filtering_pram_t *) ugeth-> 3442 + (struct ucc_geth_82xx_address_filtering_pram *) ugeth-> 3431 3443 p_rx_glbl_pram->addressfiltering; 3432 3444 3433 3445 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, ··· 3450 3462 * allocated resources can be released when the channel is freed. 3451 3463 */ 3452 3464 if (!(ugeth->p_init_enet_param_shadow = 3453 - (ucc_geth_init_pram_t *) kmalloc(sizeof(ucc_geth_init_pram_t), 3465 + (struct ucc_geth_init_pram *) kmalloc(sizeof(struct ucc_geth_init_pram), 3454 3466 GFP_KERNEL))) { 3455 3467 ugeth_err 3456 3468 ("%s: Can not allocate memory for" ··· 3460 3472 } 3461 3473 /* Zero out *p_init_enet_param_shadow */ 3462 3474 memset((char *)ugeth->p_init_enet_param_shadow, 3463 - 0, sizeof(ucc_geth_init_pram_t)); 3475 + 0, sizeof(struct ucc_geth_init_pram)); 3464 3476 3465 3477 /* Fill shadow InitEnet command parameter structure */ 3466 3478 ··· 3494 3506 } 3495 3507 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize = 3496 3508 ug_info->largestexternallookupkeysize; 3497 - size = sizeof(ucc_geth_thread_rx_pram_t); 3509 + size = sizeof(struct ucc_geth_thread_rx_pram); 3498 3510 if (ug_info->rxExtendedFiltering) { 3499 3511 size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; 3500 3512 if (ug_info->largestexternallookupkeysize == ··· 3525 3537 fill_init_enet_entries(ugeth, 3526 3538 &(ugeth->p_init_enet_param_shadow-> 3527 3539 txthread[0]), numThreadsTxNumerical, 3528 - sizeof(ucc_geth_thread_tx_pram_t), 3540 + sizeof(struct ucc_geth_thread_tx_pram), 3529 3541 UCC_GETH_THREAD_TX_PRAM_ALIGNMENT, 3530 3542 ug_info->riscTx, 0)) != 0) { 3531 3543 ugeth_err("%s: Can not fill p_init_enet_param_shadow.", ··· 3545 3557 } 3546 3558 3547 3559 /* Allocate InitEnet command parameter structure */ 3548 - init_enet_pram_offset = qe_muram_alloc(sizeof(ucc_geth_init_pram_t), 4); 3560 + init_enet_pram_offset = qe_muram_alloc(sizeof(struct ucc_geth_init_pram), 4); 3549 3561 if (IS_MURAM_ERR(init_enet_pram_offset)) { 3550 3562 ugeth_err 3551 3563 ("%s: Can not allocate DPRAM memory for p_init_enet_pram.", ··· 3554 3566 return -ENOMEM; 3555 3567 } 3556 3568 p_init_enet_pram = 3557 - (ucc_geth_init_pram_t *) qe_muram_addr(init_enet_pram_offset); 3569 + (struct ucc_geth_init_pram *) qe_muram_addr(init_enet_pram_offset); 3558 3570 3559 3571 /* Copy shadow InitEnet command parameter structure into PRAM */ 3560 3572 p_init_enet_pram->resinit1 = ugeth->p_init_enet_param_shadow->resinit1; ··· 3579 3591 /* Issue QE command */ 3580 3592 cecr_subblock = 3581 3593 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); 3582 - qe_issue_cmd(command, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET, 3594 + qe_issue_cmd(command, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 3583 3595 init_enet_pram_offset); 3584 3596 3585 3597 /* Free InitEnet command parameter */ ··· 3591 3603 /* returns a net_device_stats structure pointer */ 3592 3604 static struct net_device_stats *ucc_geth_get_stats(struct net_device *dev) 3593 3605 { 3594 - ucc_geth_private_t *ugeth = netdev_priv(dev); 3606 + struct ucc_geth_private *ugeth = netdev_priv(dev); 3595 3607 3596 3608 return &(ugeth->stats); 3597 3609 } ··· 3602 3614 * starting over will fix the problem. */ 3603 3615 static void ucc_geth_timeout(struct net_device *dev) 3604 3616 { 3605 - ucc_geth_private_t *ugeth = netdev_priv(dev); 3617 + struct ucc_geth_private *ugeth = netdev_priv(dev); 3606 3618 3607 3619 ugeth_vdbg("%s: IN", __FUNCTION__); 3608 3620 ··· 3622 3634 /* It is pointed to by the dev->hard_start_xmit function pointer */ 3623 3635 static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) 3624 3636 { 3625 - ucc_geth_private_t *ugeth = netdev_priv(dev); 3637 + struct ucc_geth_private *ugeth = netdev_priv(dev); 3626 3638 u8 *bd; /* BD pointer */ 3627 3639 u32 bd_status; 3628 3640 u8 txQ = 0; ··· 3635 3647 3636 3648 /* Start from the next BD that should be filled */ 3637 3649 bd = ugeth->txBd[txQ]; 3638 - bd_status = BD_STATUS_AND_LENGTH(bd); 3650 + bd_status = in_be32((u32 *)bd); 3639 3651 /* Save the skb pointer so we can free it later */ 3640 3652 ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb; 3641 3653 ··· 3645 3657 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); 3646 3658 3647 3659 /* set up the buffer descriptor */ 3648 - BD_BUFFER_SET(bd, 3660 + out_be32(&((struct qe_bd *)bd)->buf, 3649 3661 dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE)); 3650 3662 3651 - //printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); 3663 + /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */ 3652 3664 3653 3665 bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len; 3654 3666 3655 - BD_STATUS_AND_LENGTH_SET(bd, bd_status); 3667 + /* set bd status and length */ 3668 + out_be32((u32 *)bd, bd_status); 3656 3669 3657 3670 dev->trans_start = jiffies; 3658 3671 3659 3672 /* Move to next BD in the ring */ 3660 3673 if (!(bd_status & T_W)) 3661 - ugeth->txBd[txQ] = bd + UCC_GETH_SIZE_OF_BD; 3674 + ugeth->txBd[txQ] = bd + sizeof(struct qe_bd); 3662 3675 else 3663 3676 ugeth->txBd[txQ] = ugeth->p_tx_bd_ring[txQ]; 3664 3677 ··· 3684 3695 return 0; 3685 3696 } 3686 3697 3687 - static int ucc_geth_rx(ucc_geth_private_t *ugeth, u8 rxQ, int rx_work_limit) 3698 + static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit) 3688 3699 { 3689 3700 struct sk_buff *skb; 3690 3701 u8 *bd; ··· 3698 3709 /* collect received buffers */ 3699 3710 bd = ugeth->rxBd[rxQ]; 3700 3711 3701 - bd_status = BD_STATUS_AND_LENGTH(bd); 3712 + bd_status = in_be32((u32 *)bd); 3702 3713 3703 3714 /* while there are received buffers and BD is full (~R_E) */ 3704 3715 while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) { 3705 - bdBuffer = (u8 *) BD_BUFFER(bd); 3716 + bdBuffer = (u8 *) in_be32(&((struct qe_bd *)bd)->buf); 3706 3717 length = (u16) ((bd_status & BD_LENGTH_MASK) - 4); 3707 3718 skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]]; 3708 3719 ··· 3757 3768 if (bd_status & R_W) 3758 3769 bd = ugeth->p_rx_bd_ring[rxQ]; 3759 3770 else 3760 - bd += UCC_GETH_SIZE_OF_BD; 3771 + bd += sizeof(struct qe_bd); 3761 3772 3762 - bd_status = BD_STATUS_AND_LENGTH(bd); 3773 + bd_status = in_be32((u32 *)bd); 3763 3774 } 3764 3775 3765 3776 ugeth->rxBd[rxQ] = bd; ··· 3770 3781 static int ucc_geth_tx(struct net_device *dev, u8 txQ) 3771 3782 { 3772 3783 /* Start from the next BD that should be filled */ 3773 - ucc_geth_private_t *ugeth = netdev_priv(dev); 3784 + struct ucc_geth_private *ugeth = netdev_priv(dev); 3774 3785 u8 *bd; /* BD pointer */ 3775 3786 u32 bd_status; 3776 3787 3777 3788 bd = ugeth->confBd[txQ]; 3778 - bd_status = BD_STATUS_AND_LENGTH(bd); 3789 + bd_status = in_be32((u32 *)bd); 3779 3790 3780 3791 /* Normal processing. */ 3781 3792 while ((bd_status & T_R) == 0) { ··· 3802 3813 3803 3814 /* Advance the confirmation BD pointer */ 3804 3815 if (!(bd_status & T_W)) 3805 - ugeth->confBd[txQ] += UCC_GETH_SIZE_OF_BD; 3816 + ugeth->confBd[txQ] += sizeof(struct qe_bd); 3806 3817 else 3807 3818 ugeth->confBd[txQ] = ugeth->p_tx_bd_ring[txQ]; 3808 3819 } ··· 3812 3823 #ifdef CONFIG_UGETH_NAPI 3813 3824 static int ucc_geth_poll(struct net_device *dev, int *budget) 3814 3825 { 3815 - ucc_geth_private_t *ugeth = netdev_priv(dev); 3826 + struct ucc_geth_private *ugeth = netdev_priv(dev); 3816 3827 int howmany; 3817 3828 int rx_work_limit = *budget; 3818 3829 u8 rxQ = 0; ··· 3836 3847 static irqreturn_t ucc_geth_irq_handler(int irq, void *info) 3837 3848 { 3838 3849 struct net_device *dev = (struct net_device *)info; 3839 - ucc_geth_private_t *ugeth = netdev_priv(dev); 3840 - ucc_fast_private_t *uccf; 3841 - ucc_geth_info_t *ug_info; 3850 + struct ucc_geth_private *ugeth = netdev_priv(dev); 3851 + struct ucc_fast_private *uccf; 3852 + struct ucc_geth_info *ug_info; 3842 3853 register u32 ucce = 0; 3843 3854 register u32 bit_mask = UCCE_RXBF_SINGLE_MASK; 3844 3855 register u32 tx_mask = UCCE_TXBF_SINGLE_MASK; ··· 3901 3912 static irqreturn_t phy_interrupt(int irq, void *dev_id) 3902 3913 { 3903 3914 struct net_device *dev = (struct net_device *)dev_id; 3904 - ucc_geth_private_t *ugeth = netdev_priv(dev); 3915 + struct ucc_geth_private *ugeth = netdev_priv(dev); 3905 3916 3906 3917 ugeth_vdbg("%s: IN", __FUNCTION__); 3907 3918 ··· 3921 3932 static void ugeth_phy_change(void *data) 3922 3933 { 3923 3934 struct net_device *dev = (struct net_device *)data; 3924 - ucc_geth_private_t *ugeth = netdev_priv(dev); 3925 - ucc_geth_t *ug_regs; 3935 + struct ucc_geth_private *ugeth = netdev_priv(dev); 3936 + struct ucc_geth *ug_regs; 3926 3937 int result = 0; 3927 3938 3928 3939 ugeth_vdbg("%s: IN", __FUNCTION__); ··· 3952 3963 static void ugeth_phy_timer(unsigned long data) 3953 3964 { 3954 3965 struct net_device *dev = (struct net_device *)data; 3955 - ucc_geth_private_t *ugeth = netdev_priv(dev); 3966 + struct ucc_geth_private *ugeth = netdev_priv(dev); 3956 3967 3957 3968 schedule_work(&ugeth->tq); 3958 3969 ··· 3968 3979 static void ugeth_phy_startup_timer(unsigned long data) 3969 3980 { 3970 3981 struct ugeth_mii_info *mii_info = (struct ugeth_mii_info *)data; 3971 - ucc_geth_private_t *ugeth = netdev_priv(mii_info->dev); 3982 + struct ucc_geth_private *ugeth = netdev_priv(mii_info->dev); 3972 3983 static int secondary = UGETH_AN_TIMEOUT; 3973 3984 int result; 3974 3985 ··· 4023 4034 /* Returns 0 for success. */ 4024 4035 static int ucc_geth_open(struct net_device *dev) 4025 4036 { 4026 - ucc_geth_private_t *ugeth = netdev_priv(dev); 4037 + struct ucc_geth_private *ugeth = netdev_priv(dev); 4027 4038 int err; 4028 4039 4029 4040 ugeth_vdbg("%s: IN", __FUNCTION__); ··· 4100 4111 /* Stops the kernel queue, and halts the controller */ 4101 4112 static int ucc_geth_close(struct net_device *dev) 4102 4113 { 4103 - ucc_geth_private_t *ugeth = netdev_priv(dev); 4114 + struct ucc_geth_private *ugeth = netdev_priv(dev); 4104 4115 4105 4116 ugeth_vdbg("%s: IN", __FUNCTION__); 4106 4117 ··· 4119 4130 4120 4131 const struct ethtool_ops ucc_geth_ethtool_ops = { }; 4121 4132 4122 - static int ucc_geth_probe(struct device *device) 4133 + static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *match) 4123 4134 { 4124 - struct platform_device *pdev = to_platform_device(device); 4125 - struct ucc_geth_platform_data *ugeth_pdata; 4135 + struct device *device = &ofdev->dev; 4136 + struct device_node *np = ofdev->node; 4126 4137 struct net_device *dev = NULL; 4127 4138 struct ucc_geth_private *ugeth = NULL; 4128 4139 struct ucc_geth_info *ug_info; 4129 - int err; 4140 + struct resource res; 4141 + struct device_node *phy; 4142 + int err, ucc_num, phy_interface; 4130 4143 static int mii_mng_configured = 0; 4144 + const phandle *ph; 4145 + const unsigned int *prop; 4131 4146 4132 4147 ugeth_vdbg("%s: IN", __FUNCTION__); 4133 4148 4134 - ugeth_pdata = (struct ucc_geth_platform_data *)pdev->dev.platform_data; 4149 + prop = get_property(np, "device-id", NULL); 4150 + ucc_num = *prop - 1; 4151 + if ((ucc_num < 0) || (ucc_num > 7)) 4152 + return -ENODEV; 4135 4153 4136 - ug_info = &ugeth_info[pdev->id]; 4137 - ug_info->uf_info.ucc_num = pdev->id; 4138 - ug_info->uf_info.rx_clock = ugeth_pdata->rx_clock; 4139 - ug_info->uf_info.tx_clock = ugeth_pdata->tx_clock; 4140 - ug_info->uf_info.regs = ugeth_pdata->phy_reg_addr; 4141 - ug_info->uf_info.irq = platform_get_irq(pdev, 0); 4142 - ug_info->phy_address = ugeth_pdata->phy_id; 4143 - ug_info->enet_interface = ugeth_pdata->phy_interface; 4144 - ug_info->board_flags = ugeth_pdata->board_flags; 4145 - ug_info->phy_interrupt = ugeth_pdata->phy_interrupt; 4154 + ug_info = &ugeth_info[ucc_num]; 4155 + ug_info->uf_info.ucc_num = ucc_num; 4156 + prop = get_property(np, "rx-clock", NULL); 4157 + ug_info->uf_info.rx_clock = *prop; 4158 + prop = get_property(np, "tx-clock", NULL); 4159 + ug_info->uf_info.tx_clock = *prop; 4160 + err = of_address_to_resource(np, 0, &res); 4161 + if (err) 4162 + return -EINVAL; 4163 + 4164 + ug_info->uf_info.regs = res.start; 4165 + ug_info->uf_info.irq = irq_of_parse_and_map(np, 0); 4166 + 4167 + ph = get_property(np, "phy-handle", NULL); 4168 + phy = of_find_node_by_phandle(*ph); 4169 + 4170 + if (phy == NULL) 4171 + return -ENODEV; 4172 + 4173 + prop = get_property(phy, "reg", NULL); 4174 + ug_info->phy_address = *prop; 4175 + prop = get_property(phy, "interface", NULL); 4176 + ug_info->enet_interface = *prop; 4177 + ug_info->phy_interrupt = irq_of_parse_and_map(phy, 0); 4178 + ug_info->board_flags = (ug_info->phy_interrupt == NO_IRQ)? 4179 + 0:FSL_UGETH_BRD_HAS_PHY_INTR; 4146 4180 4147 4181 printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n", 4148 4182 ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs, ··· 4173 4161 4174 4162 if (ug_info == NULL) { 4175 4163 ugeth_err("%s: [%d] Missing additional data!", __FUNCTION__, 4176 - pdev->id); 4164 + ucc_num); 4177 4165 return -ENODEV; 4178 4166 } 4179 4167 4168 + /* FIXME: Work around for early chip rev. */ 4169 + /* There's a bug in initial chip rev(s) in the RGMII ac */ 4170 + /* timing. */ 4171 + /* The following compensates by writing to the reserved */ 4172 + /* QE Port Output Hold Registers (CPOH1?). */ 4173 + prop = get_property(phy, "interface", NULL); 4174 + phy_interface = *prop; 4175 + if ((phy_interface == ENET_1000_RGMII) || 4176 + (phy_interface == ENET_100_RGMII) || 4177 + (phy_interface == ENET_10_RGMII)) { 4178 + struct device_node *soc; 4179 + phys_addr_t immrbase = -1; 4180 + u32 *tmp_reg; 4181 + u32 tmp_val; 4182 + 4183 + soc = of_find_node_by_type(NULL, "soc"); 4184 + if (soc) { 4185 + unsigned int size; 4186 + const void *prop = get_property(soc, "reg", &size); 4187 + immrbase = of_translate_address(soc, prop); 4188 + of_node_put(soc); 4189 + }; 4190 + 4191 + tmp_reg = (u32 *) ioremap(immrbase + 0x14A8, 0x4); 4192 + tmp_val = in_be32(tmp_reg); 4193 + if (ucc_num == 1) 4194 + out_be32(tmp_reg, tmp_val | 0x00003000); 4195 + else if (ucc_num == 2) 4196 + out_be32(tmp_reg, tmp_val | 0x0c000000); 4197 + iounmap(tmp_reg); 4198 + } 4199 + 4180 4200 if (!mii_mng_configured) { 4181 - ucc_set_qe_mux_mii_mng(ug_info->uf_info.ucc_num); 4201 + ucc_set_qe_mux_mii_mng(ucc_num); 4182 4202 mii_mng_configured = 1; 4183 4203 } 4184 4204 ··· 4257 4213 4258 4214 ugeth->ug_info = ug_info; 4259 4215 ugeth->dev = dev; 4260 - memcpy(dev->dev_addr, ugeth_pdata->mac_addr, 6); 4216 + memcpy(dev->dev_addr, get_property(np, "mac-address", NULL), 6); 4261 4217 4262 4218 return 0; 4263 4219 } 4264 4220 4265 - static int ucc_geth_remove(struct device *device) 4221 + static int ucc_geth_remove(struct of_device* ofdev) 4266 4222 { 4223 + struct device *device = &ofdev->dev; 4267 4224 struct net_device *dev = dev_get_drvdata(device); 4268 4225 struct ucc_geth_private *ugeth = netdev_priv(dev); 4269 4226 ··· 4275 4230 return 0; 4276 4231 } 4277 4232 4278 - /* Structure for a device driver */ 4279 - static struct device_driver ucc_geth_driver = { 4280 - .name = DRV_NAME, 4281 - .bus = &platform_bus_type, 4282 - .probe = ucc_geth_probe, 4283 - .remove = ucc_geth_remove, 4233 + static struct of_device_id ucc_geth_match[] = { 4234 + { 4235 + .type = "network", 4236 + .compatible = "ucc_geth", 4237 + }, 4238 + {}, 4239 + }; 4240 + 4241 + MODULE_DEVICE_TABLE(of, ucc_geth_match); 4242 + 4243 + static struct of_platform_driver ucc_geth_driver = { 4244 + .name = DRV_NAME, 4245 + .match_table = ucc_geth_match, 4246 + .probe = ucc_geth_probe, 4247 + .remove = ucc_geth_remove, 4284 4248 }; 4285 4249 4286 4250 static int __init ucc_geth_init(void) 4287 4251 { 4288 4252 int i; 4253 + 4289 4254 printk(KERN_INFO "ucc_geth: " DRV_DESC "\n"); 4290 4255 for (i = 0; i < 8; i++) 4291 4256 memcpy(&(ugeth_info[i]), &ugeth_primary_info, 4292 4257 sizeof(ugeth_primary_info)); 4293 4258 4294 - return driver_register(&ucc_geth_driver); 4259 + return of_register_driver(&ucc_geth_driver); 4295 4260 } 4296 4261 4297 4262 static void __exit ucc_geth_exit(void) 4298 4263 { 4299 - driver_unregister(&ucc_geth_driver); 4264 + of_unregister_driver(&ucc_geth_driver); 4300 4265 } 4301 4266 4302 4267 module_init(ucc_geth_init);
+123 -125
drivers/net/ucc_geth.h
··· 36 36 #define ENET_INIT_PARAM_MAX_ENTRIES_RX 9 37 37 #define ENET_INIT_PARAM_MAX_ENTRIES_TX 8 38 38 39 - typedef struct ucc_mii_mng { 39 + struct ucc_mii_mng { 40 40 u32 miimcfg; /* MII management configuration reg */ 41 41 u32 miimcom; /* MII management command reg */ 42 42 u32 miimadd; /* MII management address reg */ 43 43 u32 miimcon; /* MII management control reg */ 44 44 u32 miimstat; /* MII management status reg */ 45 45 u32 miimind; /* MII management indication reg */ 46 - } __attribute__ ((packed)) ucc_mii_mng_t; 46 + } __attribute__ ((packed)); 47 47 48 - typedef struct ucc_geth { 49 - ucc_fast_t uccf; 48 + struct ucc_geth { 49 + struct ucc_fast uccf; 50 50 51 51 u32 maccfg1; /* mac configuration reg. 1 */ 52 52 u32 maccfg2; /* mac configuration reg. 2 */ 53 53 u32 ipgifg; /* interframe gap reg. */ 54 54 u32 hafdup; /* half-duplex reg. */ 55 55 u8 res1[0x10]; 56 - ucc_mii_mng_t miimng; /* MII management structure */ 56 + struct ucc_mii_mng miimng; /* MII management structure */ 57 57 u32 ifctl; /* interface control reg */ 58 58 u32 ifstat; /* interface statux reg */ 59 59 u32 macstnaddr1; /* mac station address part 1 reg */ ··· 111 111 u32 scar; /* Statistics carry register */ 112 112 u32 scam; /* Statistics caryy mask register */ 113 113 u8 res5[0x200 - 0x1c4]; 114 - } __attribute__ ((packed)) ucc_geth_t; 114 + } __attribute__ ((packed)); 115 115 116 116 /* UCC GETH TEMODR Register */ 117 117 #define TEMODER_TX_RMON_STATISTICS_ENABLE 0x0100 /* enable Tx statistics ··· 508 508 /* UCC GETH UDSR (Data Synchronization Register) */ 509 509 #define UDSR_MAGIC 0x067E 510 510 511 - typedef struct ucc_geth_thread_data_tx { 511 + struct ucc_geth_thread_data_tx { 512 512 u8 res0[104]; 513 - } __attribute__ ((packed)) ucc_geth_thread_data_tx_t; 513 + } __attribute__ ((packed)); 514 514 515 - typedef struct ucc_geth_thread_data_rx { 515 + struct ucc_geth_thread_data_rx { 516 516 u8 res0[40]; 517 - } __attribute__ ((packed)) ucc_geth_thread_data_rx_t; 517 + } __attribute__ ((packed)); 518 518 519 519 /* Send Queue Queue-Descriptor */ 520 - typedef struct ucc_geth_send_queue_qd { 520 + struct ucc_geth_send_queue_qd { 521 521 u32 bd_ring_base; /* pointer to BD ring base address */ 522 522 u8 res0[0x8]; 523 523 u32 last_bd_completed_address;/* initialize to last entry in BD ring */ 524 524 u8 res1[0x30]; 525 - } __attribute__ ((packed)) ucc_geth_send_queue_qd_t; 525 + } __attribute__ ((packed)); 526 526 527 - typedef struct ucc_geth_send_queue_mem_region { 528 - ucc_geth_send_queue_qd_t sqqd[NUM_TX_QUEUES]; 529 - } __attribute__ ((packed)) ucc_geth_send_queue_mem_region_t; 527 + struct ucc_geth_send_queue_mem_region { 528 + struct ucc_geth_send_queue_qd sqqd[NUM_TX_QUEUES]; 529 + } __attribute__ ((packed)); 530 530 531 - typedef struct ucc_geth_thread_tx_pram { 531 + struct ucc_geth_thread_tx_pram { 532 532 u8 res0[64]; 533 - } __attribute__ ((packed)) ucc_geth_thread_tx_pram_t; 533 + } __attribute__ ((packed)); 534 534 535 - typedef struct ucc_geth_thread_rx_pram { 535 + struct ucc_geth_thread_rx_pram { 536 536 u8 res0[128]; 537 - } __attribute__ ((packed)) ucc_geth_thread_rx_pram_t; 537 + } __attribute__ ((packed)); 538 538 539 539 #define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING 64 540 540 #define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8 64 541 541 #define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16 96 542 542 543 - typedef struct ucc_geth_scheduler { 543 + struct ucc_geth_scheduler { 544 544 u16 cpucount0; /* CPU packet counter */ 545 545 u16 cpucount1; /* CPU packet counter */ 546 546 u16 cecount0; /* QE packet counter */ ··· 574 574 /**< weight factor for queues */ 575 575 u32 minw; /* temporary variable handled by QE */ 576 576 u8 res1[0x70 - 0x64]; 577 - } __attribute__ ((packed)) ucc_geth_scheduler_t; 577 + } __attribute__ ((packed)); 578 578 579 - typedef struct ucc_geth_tx_firmware_statistics_pram { 579 + struct ucc_geth_tx_firmware_statistics_pram { 580 580 u32 sicoltx; /* single collision */ 581 581 u32 mulcoltx; /* multiple collision */ 582 582 u32 latecoltxfr; /* late collision */ ··· 596 596 and 1518 octets */ 597 597 u32 txpktsjumbo; /* total packets (including bad) between 1024 598 598 and MAXLength octets */ 599 - } __attribute__ ((packed)) ucc_geth_tx_firmware_statistics_pram_t; 599 + } __attribute__ ((packed)); 600 600 601 - typedef struct ucc_geth_rx_firmware_statistics_pram { 601 + struct ucc_geth_rx_firmware_statistics_pram { 602 602 u32 frrxfcser; /* frames with crc error */ 603 603 u32 fraligner; /* frames with alignment error */ 604 604 u32 inrangelenrxer; /* in range length error */ ··· 630 630 replaced */ 631 631 u32 insertvlan; /* total frames that had their VLAN tag 632 632 inserted */ 633 - } __attribute__ ((packed)) ucc_geth_rx_firmware_statistics_pram_t; 633 + } __attribute__ ((packed)); 634 634 635 - typedef struct ucc_geth_rx_interrupt_coalescing_entry { 635 + struct ucc_geth_rx_interrupt_coalescing_entry { 636 636 u32 interruptcoalescingmaxvalue; /* interrupt coalescing max 637 637 value */ 638 638 u32 interruptcoalescingcounter; /* interrupt coalescing counter, 639 639 initialize to 640 640 interruptcoalescingmaxvalue */ 641 - } __attribute__ ((packed)) ucc_geth_rx_interrupt_coalescing_entry_t; 641 + } __attribute__ ((packed)); 642 642 643 - typedef struct ucc_geth_rx_interrupt_coalescing_table { 644 - ucc_geth_rx_interrupt_coalescing_entry_t coalescingentry[NUM_RX_QUEUES]; 643 + struct ucc_geth_rx_interrupt_coalescing_table { 644 + struct ucc_geth_rx_interrupt_coalescing_entry coalescingentry[NUM_RX_QUEUES]; 645 645 /**< interrupt coalescing entry */ 646 - } __attribute__ ((packed)) ucc_geth_rx_interrupt_coalescing_table_t; 646 + } __attribute__ ((packed)); 647 647 648 - typedef struct ucc_geth_rx_prefetched_bds { 649 - qe_bd_t bd[NUM_BDS_IN_PREFETCHED_BDS]; /* prefetched bd */ 650 - } __attribute__ ((packed)) ucc_geth_rx_prefetched_bds_t; 648 + struct ucc_geth_rx_prefetched_bds { 649 + struct qe_bd bd[NUM_BDS_IN_PREFETCHED_BDS]; /* prefetched bd */ 650 + } __attribute__ ((packed)); 651 651 652 - typedef struct ucc_geth_rx_bd_queues_entry { 652 + struct ucc_geth_rx_bd_queues_entry { 653 653 u32 bdbaseptr; /* BD base pointer */ 654 654 u32 bdptr; /* BD pointer */ 655 655 u32 externalbdbaseptr; /* external BD base pointer */ 656 656 u32 externalbdptr; /* external BD pointer */ 657 - } __attribute__ ((packed)) ucc_geth_rx_bd_queues_entry_t; 657 + } __attribute__ ((packed)); 658 658 659 - typedef struct ucc_geth_tx_global_pram { 659 + struct ucc_geth_tx_global_pram { 660 660 u16 temoder; 661 661 u8 res0[0x38 - 0x02]; 662 662 u32 sqptr; /* a base pointer to send queue memory region */ ··· 670 670 u32 tqptr; /* a base pointer to the Tx Queues Memory 671 671 Region */ 672 672 u8 res2[0x80 - 0x74]; 673 - } __attribute__ ((packed)) ucc_geth_tx_global_pram_t; 673 + } __attribute__ ((packed)); 674 674 675 675 /* structure representing Extended Filtering Global Parameters in PRAM */ 676 - typedef struct ucc_geth_exf_global_pram { 676 + struct ucc_geth_exf_global_pram { 677 677 u32 l2pcdptr; /* individual address filter, high */ 678 678 u8 res0[0x10 - 0x04]; 679 - } __attribute__ ((packed)) ucc_geth_exf_global_pram_t; 679 + } __attribute__ ((packed)); 680 680 681 - typedef struct ucc_geth_rx_global_pram { 681 + struct ucc_geth_rx_global_pram { 682 682 u32 remoder; /* ethernet mode reg. */ 683 683 u32 rqptr; /* base pointer to the Rx Queues Memory Region*/ 684 684 u32 res0[0x1]; ··· 710 710 u32 exfGlobalParam; /* base address for extended filtering global 711 711 parameters */ 712 712 u8 res6[0x100 - 0xC4]; /* Initialize to zero */ 713 - } __attribute__ ((packed)) ucc_geth_rx_global_pram_t; 713 + } __attribute__ ((packed)); 714 714 715 715 #define GRACEFUL_STOP_ACKNOWLEDGE_RX 0x01 716 716 717 717 /* structure representing InitEnet command */ 718 - typedef struct ucc_geth_init_pram { 718 + struct ucc_geth_init_pram { 719 719 u8 resinit1; 720 720 u8 resinit2; 721 721 u8 resinit3; ··· 729 729 u32 txglobal; /* tx global */ 730 730 u32 txthread[ENET_INIT_PARAM_MAX_ENTRIES_TX]; /* tx threads */ 731 731 u8 res3[0x1]; 732 - } __attribute__ ((packed)) ucc_geth_init_pram_t; 732 + } __attribute__ ((packed)); 733 733 734 734 #define ENET_INIT_PARAM_RGF_SHIFT (32 - 4) 735 735 #define ENET_INIT_PARAM_TGF_SHIFT (32 - 8) ··· 746 746 #define ENET_INIT_PARAM_MAGIC_RES_INIT5 0x0400 747 747 748 748 /* structure representing 82xx Address Filtering Enet Address in PRAM */ 749 - typedef struct ucc_geth_82xx_enet_address { 749 + struct ucc_geth_82xx_enet_address { 750 750 u8 res1[0x2]; 751 751 u16 h; /* address (MSB) */ 752 752 u16 m; /* address */ 753 753 u16 l; /* address (LSB) */ 754 - } __attribute__ ((packed)) ucc_geth_82xx_enet_address_t; 754 + } __attribute__ ((packed)); 755 755 756 756 /* structure representing 82xx Address Filtering PRAM */ 757 - typedef struct ucc_geth_82xx_address_filtering_pram { 757 + struct ucc_geth_82xx_address_filtering_pram { 758 758 u32 iaddr_h; /* individual address filter, high */ 759 759 u32 iaddr_l; /* individual address filter, low */ 760 760 u32 gaddr_h; /* group address filter, high */ 761 761 u32 gaddr_l; /* group address filter, low */ 762 - ucc_geth_82xx_enet_address_t taddr; 763 - ucc_geth_82xx_enet_address_t paddr[NUM_OF_PADDRS]; 762 + struct ucc_geth_82xx_enet_address taddr; 763 + struct ucc_geth_82xx_enet_address paddr[NUM_OF_PADDRS]; 764 764 u8 res0[0x40 - 0x38]; 765 - } __attribute__ ((packed)) ucc_geth_82xx_address_filtering_pram_t; 765 + } __attribute__ ((packed)); 766 766 767 767 /* GETH Tx firmware statistics structure, used when calling 768 768 UCC_GETH_GetStatistics. */ 769 - typedef struct ucc_geth_tx_firmware_statistics { 769 + struct ucc_geth_tx_firmware_statistics { 770 770 u32 sicoltx; /* single collision */ 771 771 u32 mulcoltx; /* multiple collision */ 772 772 u32 latecoltxfr; /* late collision */ ··· 786 786 and 1518 octets */ 787 787 u32 txpktsjumbo; /* total packets (including bad) between 1024 788 788 and MAXLength octets */ 789 - } __attribute__ ((packed)) ucc_geth_tx_firmware_statistics_t; 789 + } __attribute__ ((packed)); 790 790 791 791 /* GETH Rx firmware statistics structure, used when calling 792 792 UCC_GETH_GetStatistics. */ 793 - typedef struct ucc_geth_rx_firmware_statistics { 793 + struct ucc_geth_rx_firmware_statistics { 794 794 u32 frrxfcser; /* frames with crc error */ 795 795 u32 fraligner; /* frames with alignment error */ 796 796 u32 inrangelenrxer; /* in range length error */ ··· 822 822 replaced */ 823 823 u32 insertvlan; /* total frames that had their VLAN tag 824 824 inserted */ 825 - } __attribute__ ((packed)) ucc_geth_rx_firmware_statistics_t; 825 + } __attribute__ ((packed)); 826 826 827 827 /* GETH hardware statistics structure, used when calling 828 828 UCC_GETH_GetStatistics. */ 829 - typedef struct ucc_geth_hardware_statistics { 829 + struct ucc_geth_hardware_statistics { 830 830 u32 tx64; /* Total number of frames (including bad 831 831 frames) transmitted that were exactly of the 832 832 minimal length (64 for un tagged, 68 for ··· 871 871 u32 rbca; /* Total number of frames received succesfully 872 872 that had destination address equal to the 873 873 broadcast address */ 874 - } __attribute__ ((packed)) ucc_geth_hardware_statistics_t; 874 + } __attribute__ ((packed)); 875 875 876 876 /* UCC GETH Tx errors returned via TxConf callback */ 877 877 #define TX_ERRORS_DEF 0x0200 ··· 1013 1013 (MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_112) 1014 1014 1015 1015 /* Ethernet speed */ 1016 - typedef enum enet_speed { 1016 + enum enet_speed { 1017 1017 ENET_SPEED_10BT, /* 10 Base T */ 1018 1018 ENET_SPEED_100BT, /* 100 Base T */ 1019 1019 ENET_SPEED_1000BT /* 1000 Base T */ 1020 - } enet_speed_e; 1020 + }; 1021 1021 1022 1022 /* Ethernet Address Type. */ 1023 - typedef enum enet_addr_type { 1023 + enum enet_addr_type { 1024 1024 ENET_ADDR_TYPE_INDIVIDUAL, 1025 1025 ENET_ADDR_TYPE_GROUP, 1026 1026 ENET_ADDR_TYPE_BROADCAST 1027 - } enet_addr_type_e; 1027 + }; 1028 1028 1029 1029 /* TBI / MII Set Register */ 1030 - typedef enum enet_tbi_mii_reg { 1030 + enum enet_tbi_mii_reg { 1031 1031 ENET_TBI_MII_CR = 0x00, /* Control (CR ) */ 1032 1032 ENET_TBI_MII_SR = 0x01, /* Status (SR ) */ 1033 1033 ENET_TBI_MII_ANA = 0x04, /* AN advertisement (ANA ) */ ··· 1040 1040 ENET_TBI_MII_EXST = 0x0F, /* Extended status (EXST ) */ 1041 1041 ENET_TBI_MII_JD = 0x10, /* Jitter diagnostics (JD ) */ 1042 1042 ENET_TBI_MII_TBICON = 0x11 /* TBI control (TBICON ) */ 1043 - } enet_tbi_mii_reg_e; 1043 + }; 1044 1044 1045 1045 /* UCC GETH 82xx Ethernet Address Recognition Location */ 1046 - typedef enum ucc_geth_enet_address_recognition_location { 1046 + enum ucc_geth_enet_address_recognition_location { 1047 1047 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_STATION_ADDRESS,/* station 1048 1048 address */ 1049 1049 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR_FIRST, /* additional ··· 1065 1065 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_GROUP_HASH, /* group hash */ 1066 1066 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_INDIVIDUAL_HASH /* individual 1067 1067 hash */ 1068 - } ucc_geth_enet_address_recognition_location_e; 1068 + }; 1069 1069 1070 1070 /* UCC GETH vlan operation tagged */ 1071 - typedef enum ucc_geth_vlan_operation_tagged { 1071 + enum ucc_geth_vlan_operation_tagged { 1072 1072 UCC_GETH_VLAN_OPERATION_TAGGED_NOP = 0x0, /* Tagged - nop */ 1073 1073 UCC_GETH_VLAN_OPERATION_TAGGED_REPLACE_VID_PORTION_OF_Q_TAG 1074 1074 = 0x1, /* Tagged - replace vid portion of q tag */ ··· 1076 1076 = 0x2, /* Tagged - if vid0 replace vid with default value */ 1077 1077 UCC_GETH_VLAN_OPERATION_TAGGED_EXTRACT_Q_TAG_FROM_FRAME 1078 1078 = 0x3 /* Tagged - extract q tag from frame */ 1079 - } ucc_geth_vlan_operation_tagged_e; 1079 + }; 1080 1080 1081 1081 /* UCC GETH vlan operation non-tagged */ 1082 - typedef enum ucc_geth_vlan_operation_non_tagged { 1082 + enum ucc_geth_vlan_operation_non_tagged { 1083 1083 UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP = 0x0, /* Non tagged - nop */ 1084 1084 UCC_GETH_VLAN_OPERATION_NON_TAGGED_Q_TAG_INSERT = 0x1 /* Non tagged - 1085 1085 q tag insert 1086 1086 */ 1087 - } ucc_geth_vlan_operation_non_tagged_e; 1087 + }; 1088 1088 1089 1089 /* UCC GETH Rx Quality of Service Mode */ 1090 - typedef enum ucc_geth_qos_mode { 1090 + enum ucc_geth_qos_mode { 1091 1091 UCC_GETH_QOS_MODE_DEFAULT = 0x0, /* default queue */ 1092 1092 UCC_GETH_QOS_MODE_QUEUE_NUM_FROM_L2_CRITERIA = 0x1, /* queue 1093 1093 determined ··· 1097 1097 determined 1098 1098 by L3 1099 1099 criteria */ 1100 - } ucc_geth_qos_mode_e; 1100 + }; 1101 1101 1102 1102 /* UCC GETH Statistics Gathering Mode - These are bit flags, 'or' them together 1103 1103 for combined functionality */ 1104 - typedef enum ucc_geth_statistics_gathering_mode { 1104 + enum ucc_geth_statistics_gathering_mode { 1105 1105 UCC_GETH_STATISTICS_GATHERING_MODE_NONE = 0x00000000, /* No 1106 1106 statistics 1107 1107 gathering */ ··· 1122 1122 statistics 1123 1123 gathering 1124 1124 */ 1125 - } ucc_geth_statistics_gathering_mode_e; 1125 + }; 1126 1126 1127 1127 /* UCC GETH Pad and CRC Mode - Note, Padding without CRC is not possible */ 1128 - typedef enum ucc_geth_maccfg2_pad_and_crc_mode { 1128 + enum ucc_geth_maccfg2_pad_and_crc_mode { 1129 1129 UCC_GETH_PAD_AND_CRC_MODE_NONE 1130 1130 = MACCFG2_PAD_AND_CRC_MODE_NONE, /* Neither Padding 1131 1131 short frames ··· 1135 1135 CRC only */ 1136 1136 UCC_GETH_PAD_AND_CRC_MODE_PAD_AND_CRC = 1137 1137 MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC 1138 - } ucc_geth_maccfg2_pad_and_crc_mode_e; 1138 + }; 1139 1139 1140 1140 /* UCC GETH upsmr Flow Control Mode */ 1141 - typedef enum ucc_geth_flow_control_mode { 1141 + enum ucc_geth_flow_control_mode { 1142 1142 UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE = 0x00000000, /* No automatic 1143 1143 flow control 1144 1144 */ 1145 1145 UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_PAUSE_WHEN_EMERGENCY 1146 1146 = 0x00004000 /* Send pause frame when RxFIFO reaches its 1147 1147 emergency threshold */ 1148 - } ucc_geth_flow_control_mode_e; 1148 + }; 1149 1149 1150 1150 /* UCC GETH number of threads */ 1151 - typedef enum ucc_geth_num_of_threads { 1151 + enum ucc_geth_num_of_threads { 1152 1152 UCC_GETH_NUM_OF_THREADS_1 = 0x1, /* 1 */ 1153 1153 UCC_GETH_NUM_OF_THREADS_2 = 0x2, /* 2 */ 1154 1154 UCC_GETH_NUM_OF_THREADS_4 = 0x0, /* 4 */ 1155 1155 UCC_GETH_NUM_OF_THREADS_6 = 0x3, /* 6 */ 1156 1156 UCC_GETH_NUM_OF_THREADS_8 = 0x4 /* 8 */ 1157 - } ucc_geth_num_of_threads_e; 1157 + }; 1158 1158 1159 1159 /* UCC GETH number of station addresses */ 1160 - typedef enum ucc_geth_num_of_station_addresses { 1160 + enum ucc_geth_num_of_station_addresses { 1161 1161 UCC_GETH_NUM_OF_STATION_ADDRESSES_1, /* 1 */ 1162 1162 UCC_GETH_NUM_OF_STATION_ADDRESSES_5 /* 5 */ 1163 - } ucc_geth_num_of_station_addresses_e; 1164 - 1165 - typedef u8 enet_addr_t[ENET_NUM_OCTETS_PER_ADDRESS]; 1163 + }; 1166 1164 1167 1165 /* UCC GETH 82xx Ethernet Address Container */ 1168 - typedef struct enet_addr_container { 1169 - enet_addr_t address; /* ethernet address */ 1170 - ucc_geth_enet_address_recognition_location_e location; /* location in 1166 + struct enet_addr_container { 1167 + u8 address[ENET_NUM_OCTETS_PER_ADDRESS]; /* ethernet address */ 1168 + enum ucc_geth_enet_address_recognition_location location; /* location in 1171 1169 82xx address 1172 1170 recognition 1173 1171 hardware */ 1174 1172 struct list_head node; 1175 - } enet_addr_container_t; 1173 + }; 1176 1174 1177 - #define ENET_ADDR_CONT_ENTRY(ptr) list_entry(ptr, enet_addr_container_t, node) 1175 + #define ENET_ADDR_CONT_ENTRY(ptr) list_entry(ptr, struct enet_addr_container, node) 1178 1176 1179 1177 /* UCC GETH Termination Action Descriptor (TAD) structure. */ 1180 - typedef struct ucc_geth_tad_params { 1178 + struct ucc_geth_tad_params { 1181 1179 int rx_non_dynamic_extended_features_mode; 1182 1180 int reject_frame; 1183 - ucc_geth_vlan_operation_tagged_e vtag_op; 1184 - ucc_geth_vlan_operation_non_tagged_e vnontag_op; 1185 - ucc_geth_qos_mode_e rqos; 1181 + enum ucc_geth_vlan_operation_tagged vtag_op; 1182 + enum ucc_geth_vlan_operation_non_tagged vnontag_op; 1183 + enum ucc_geth_qos_mode rqos; 1186 1184 u8 vpri; 1187 1185 u16 vid; 1188 - } ucc_geth_tad_params_t; 1186 + }; 1189 1187 1190 1188 /* GETH protocol initialization structure */ 1191 - typedef struct ucc_geth_info { 1192 - ucc_fast_info_t uf_info; 1189 + struct ucc_geth_info { 1190 + struct ucc_fast_info uf_info; 1193 1191 u8 numQueuesTx; 1194 1192 u8 numQueuesRx; 1195 1193 int ipCheckSumCheck; ··· 1249 1251 u8 iphoffset[TX_IP_OFFSET_ENTRY_MAX]; 1250 1252 u16 bdRingLenTx[NUM_TX_QUEUES]; 1251 1253 u16 bdRingLenRx[NUM_RX_QUEUES]; 1252 - enet_interface_e enet_interface; 1253 - ucc_geth_num_of_station_addresses_e numStationAddresses; 1254 - qe_fltr_largest_external_tbl_lookup_key_size_e 1254 + enum enet_interface enet_interface; 1255 + enum ucc_geth_num_of_station_addresses numStationAddresses; 1256 + enum qe_fltr_largest_external_tbl_lookup_key_size 1255 1257 largestexternallookupkeysize; 1256 - ucc_geth_statistics_gathering_mode_e statisticsMode; 1257 - ucc_geth_vlan_operation_tagged_e vlanOperationTagged; 1258 - ucc_geth_vlan_operation_non_tagged_e vlanOperationNonTagged; 1259 - ucc_geth_qos_mode_e rxQoSMode; 1260 - ucc_geth_flow_control_mode_e aufc; 1261 - ucc_geth_maccfg2_pad_and_crc_mode_e padAndCrc; 1262 - ucc_geth_num_of_threads_e numThreadsTx; 1263 - ucc_geth_num_of_threads_e numThreadsRx; 1264 - qe_risc_allocation_e riscTx; 1265 - qe_risc_allocation_e riscRx; 1266 - } ucc_geth_info_t; 1258 + enum ucc_geth_statistics_gathering_mode statisticsMode; 1259 + enum ucc_geth_vlan_operation_tagged vlanOperationTagged; 1260 + enum ucc_geth_vlan_operation_non_tagged vlanOperationNonTagged; 1261 + enum ucc_geth_qos_mode rxQoSMode; 1262 + enum ucc_geth_flow_control_mode aufc; 1263 + enum ucc_geth_maccfg2_pad_and_crc_mode padAndCrc; 1264 + enum ucc_geth_num_of_threads numThreadsTx; 1265 + enum ucc_geth_num_of_threads numThreadsRx; 1266 + enum qe_risc_allocation riscTx; 1267 + enum qe_risc_allocation riscRx; 1268 + }; 1267 1269 1268 1270 /* structure representing UCC GETH */ 1269 - typedef struct ucc_geth_private { 1270 - ucc_geth_info_t *ug_info; 1271 - ucc_fast_private_t *uccf; 1271 + struct ucc_geth_private { 1272 + struct ucc_geth_info *ug_info; 1273 + struct ucc_fast_private *uccf; 1272 1274 struct net_device *dev; 1273 1275 struct net_device_stats stats; /* linux network statistics */ 1274 - ucc_geth_t *ug_regs; 1275 - ucc_geth_init_pram_t *p_init_enet_param_shadow; 1276 - ucc_geth_exf_global_pram_t *p_exf_glbl_param; 1276 + struct ucc_geth *ug_regs; 1277 + struct ucc_geth_init_pram *p_init_enet_param_shadow; 1278 + struct ucc_geth_exf_global_pram *p_exf_glbl_param; 1277 1279 u32 exf_glbl_param_offset; 1278 - ucc_geth_rx_global_pram_t *p_rx_glbl_pram; 1280 + struct ucc_geth_rx_global_pram *p_rx_glbl_pram; 1279 1281 u32 rx_glbl_pram_offset; 1280 - ucc_geth_tx_global_pram_t *p_tx_glbl_pram; 1282 + struct ucc_geth_tx_global_pram *p_tx_glbl_pram; 1281 1283 u32 tx_glbl_pram_offset; 1282 - ucc_geth_send_queue_mem_region_t *p_send_q_mem_reg; 1284 + struct ucc_geth_send_queue_mem_region *p_send_q_mem_reg; 1283 1285 u32 send_q_mem_reg_offset; 1284 - ucc_geth_thread_data_tx_t *p_thread_data_tx; 1286 + struct ucc_geth_thread_data_tx *p_thread_data_tx; 1285 1287 u32 thread_dat_tx_offset; 1286 - ucc_geth_thread_data_rx_t *p_thread_data_rx; 1288 + struct ucc_geth_thread_data_rx *p_thread_data_rx; 1287 1289 u32 thread_dat_rx_offset; 1288 - ucc_geth_scheduler_t *p_scheduler; 1290 + struct ucc_geth_scheduler *p_scheduler; 1289 1291 u32 scheduler_offset; 1290 - ucc_geth_tx_firmware_statistics_pram_t *p_tx_fw_statistics_pram; 1292 + struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram; 1291 1293 u32 tx_fw_statistics_pram_offset; 1292 - ucc_geth_rx_firmware_statistics_pram_t *p_rx_fw_statistics_pram; 1294 + struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram; 1293 1295 u32 rx_fw_statistics_pram_offset; 1294 - ucc_geth_rx_interrupt_coalescing_table_t *p_rx_irq_coalescing_tbl; 1296 + struct ucc_geth_rx_interrupt_coalescing_table *p_rx_irq_coalescing_tbl; 1295 1297 u32 rx_irq_coalescing_tbl_offset; 1296 - ucc_geth_rx_bd_queues_entry_t *p_rx_bd_qs_tbl; 1298 + struct ucc_geth_rx_bd_queues_entry *p_rx_bd_qs_tbl; 1297 1299 u32 rx_bd_qs_tbl_offset; 1298 1300 u8 *p_tx_bd_ring[NUM_TX_QUEUES]; 1299 1301 u32 tx_bd_ring_offset[NUM_TX_QUEUES]; ··· 1306 1308 u16 cpucount[NUM_TX_QUEUES]; 1307 1309 volatile u16 *p_cpucount[NUM_TX_QUEUES]; 1308 1310 int indAddrRegUsed[NUM_OF_PADDRS]; 1309 - enet_addr_t paddr[NUM_OF_PADDRS]; 1311 + u8 paddr[NUM_OF_PADDRS][ENET_NUM_OCTETS_PER_ADDRESS]; /* ethernet address */ 1310 1312 u8 numGroupAddrInHash; 1311 1313 u8 numIndAddrInHash; 1312 1314 u8 numIndAddrInReg; ··· 1332 1334 int oldspeed; 1333 1335 int oldduplex; 1334 1336 int oldlink; 1335 - } ucc_geth_private_t; 1337 + }; 1336 1338 1337 1339 #endif /* __UCC_GETH_H__ */
+6 -20
drivers/net/ucc_geth_phy.c
··· 42 42 43 43 #include "ucc_geth.h" 44 44 #include "ucc_geth_phy.h" 45 - #include <platforms/83xx/mpc8360e_pb.h> 46 45 47 46 #define ugphy_printk(level, format, arg...) \ 48 47 printk(level format "\n", ## arg) ··· 71 72 u16 phy_read(struct ugeth_mii_info *mii_info, u16 regnum); 72 73 void phy_write(struct ugeth_mii_info *mii_info, u16 regnum, u16 val); 73 74 74 - static u8 *bcsr_regs = NULL; 75 - 76 75 /* Write value to the PHY for this device to the register at regnum, */ 77 76 /* waiting until the write is done before it returns. All PHY */ 78 77 /* configuration has to be done through the TSEC1 MIIM regs */ 79 78 void write_phy_reg(struct net_device *dev, int mii_id, int regnum, int value) 80 79 { 81 - ucc_geth_private_t *ugeth = netdev_priv(dev); 82 - ucc_mii_mng_t *mii_regs; 83 - enet_tbi_mii_reg_e mii_reg = (enet_tbi_mii_reg_e) regnum; 80 + struct ucc_geth_private *ugeth = netdev_priv(dev); 81 + struct ucc_mii_mng *mii_regs; 82 + enum enet_tbi_mii_reg mii_reg = (enum enet_tbi_mii_reg) regnum; 84 83 u32 tmp_reg; 85 84 86 85 ugphy_vdbg("%s: IN", __FUNCTION__); ··· 113 116 /* configuration has to be done through the TSEC1 MIIM regs */ 114 117 int read_phy_reg(struct net_device *dev, int mii_id, int regnum) 115 118 { 116 - ucc_geth_private_t *ugeth = netdev_priv(dev); 117 - ucc_mii_mng_t *mii_regs; 118 - enet_tbi_mii_reg_e mii_reg = (enet_tbi_mii_reg_e) regnum; 119 + struct ucc_geth_private *ugeth = netdev_priv(dev); 120 + struct ucc_mii_mng *mii_regs; 121 + enum enet_tbi_mii_reg mii_reg = (enum enet_tbi_mii_reg) regnum; 119 122 u32 tmp_reg; 120 123 u16 value; 121 124 ··· 631 634 632 635 static int dm9161_ack_interrupt(struct ugeth_mii_info *mii_info) 633 636 { 634 - /* FIXME: This lines are for BUG fixing in the mpc8325. 635 - Remove this from here when it's fixed */ 636 - if (bcsr_regs == NULL) 637 - bcsr_regs = (u8 *) ioremap(BCSR_PHYS_ADDR, BCSR_SIZE); 638 - bcsr_regs[14] |= 0x40; 639 637 ugphy_vdbg("%s: IN", __FUNCTION__); 640 638 641 639 /* Clear the interrupts by reading the reg */ ··· 642 650 643 651 static int dm9161_config_intr(struct ugeth_mii_info *mii_info) 644 652 { 645 - /* FIXME: This lines are for BUG fixing in the mpc8325. 646 - Remove this from here when it's fixed */ 647 - if (bcsr_regs == NULL) { 648 - bcsr_regs = (u8 *) ioremap(BCSR_PHYS_ADDR, BCSR_SIZE); 649 - bcsr_regs[14] &= ~0x40; 650 - } 651 653 ugphy_vdbg("%s: IN", __FUNCTION__); 652 654 653 655 if (mii_info->interrupts == MII_INTERRUPT_ENABLED)
+1 -1
drivers/net/ucc_geth_phy.h
··· 126 126 /* And management functions */ 127 127 struct phy_info *phyinfo; 128 128 129 - ucc_mii_mng_t *mii_regs; 129 + struct ucc_mii_mng *mii_regs; 130 130 131 131 /* forced speed & duplex (no autoneg) 132 132 * partner speed & duplex & pause (autoneg)