ucc_geth: Fix a bunch of sparse warnings

ucc_geth didn't have anything marked as __iomem. It was also inconsistent
with its use of in/out accessors (using them sometimes, not using them other
times). Cleaning this up cuts the warnings down from hundreds to just over a
dozen.

Signed-off-by: Andy Fleming <afleming@freescale.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>

authored by Andy Fleming and committed by Jeff Garzik 6fee40e9 9b9a8bfc

+174 -160
+144 -132
drivers/net/ucc_geth.c
··· 62 #endif /* UGETH_VERBOSE_DEBUG */ 63 #define UGETH_MSG_DEFAULT (NETIF_MSG_IFUP << 1 ) - 1 64 65 - void uec_set_ethtool_ops(struct net_device *netdev); 66 67 static DEFINE_SPINLOCK(ugeth_lock); 68 ··· 215 } 216 } 217 218 - static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, u8 *bd) 219 { 220 struct sk_buff *skb = NULL; 221 ··· 236 237 skb->dev = ugeth->dev; 238 239 - out_be32(&((struct qe_bd *)bd)->buf, 240 dma_map_single(NULL, 241 skb->data, 242 ugeth->ug_info->uf_info.max_rx_buf_length + 243 UCC_GETH_RX_DATA_BUF_ALIGNMENT, 244 DMA_FROM_DEVICE)); 245 246 - out_be32((u32 *)bd, (R_E | R_I | (in_be32((u32 *)bd) & R_W))); 247 248 return skb; 249 } 250 251 static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ) 252 { 253 - u8 *bd; 254 u32 bd_status; 255 struct sk_buff *skb; 256 int i; ··· 260 i = 0; 261 262 do { 263 - bd_status = in_be32((u32*)bd); 264 skb = get_new_skb(ugeth, bd); 265 266 if (!skb) /* If can not allocate data buffer, ··· 278 } 279 280 static int fill_init_enet_entries(struct ucc_geth_private *ugeth, 281 - volatile u32 *p_start, 282 u8 num_entries, 283 u32 thread_size, 284 u32 thread_alignment, ··· 317 } 318 319 static int return_init_enet_entries(struct ucc_geth_private *ugeth, 320 - volatile u32 *p_start, 321 u8 num_entries, 322 enum qe_risc_allocation risc, 323 int skip_page_for_first_entry) ··· 327 int snum; 328 329 for (i = 0; i < num_entries; i++) { 330 /* Check that this entry was actually valid -- 331 needed in case failed in allocations */ 332 - if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) { 333 snum = 334 - (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >> 335 ENET_INIT_PARAM_SNUM_SHIFT; 336 qe_put_snum((u8) snum); 337 if (!((i == 0) && skip_page_for_first_entry)) { 338 /* First entry of Rx does not have page */ 339 init_enet_offset = 340 - (in_be32(p_start) & 341 - ENET_INIT_PARAM_PTR_MASK); 342 qe_muram_free(init_enet_offset); 343 } 344 - *(p_start++) = 0; /* Just for cosmetics */ 345 } 346 } 347 ··· 351 352 #ifdef DEBUG 353 static int dump_init_enet_entries(struct ucc_geth_private *ugeth, 354 - volatile u32 *p_start, 355 u8 num_entries, 356 u32 thread_size, 357 enum qe_risc_allocation risc, ··· 362 int snum; 363 364 for (i = 0; i < num_entries; i++) { 365 /* Check that this entry was actually valid -- 366 needed in case failed in allocations */ 367 - if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) { 368 snum = 369 - (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >> 370 ENET_INIT_PARAM_SNUM_SHIFT; 371 qe_put_snum((u8) snum); 372 if (!((i == 0) && skip_page_for_first_entry)) { ··· 444 445 static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num) 446 { 447 - struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; 448 449 if (!(paddr_num < NUM_OF_PADDRS)) { 450 ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__); ··· 452 } 453 454 p_82xx_addr_filt = 455 - (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram-> 456 addressfiltering; 457 458 /* Writing address ff.ff.ff.ff.ff.ff disables address ··· 467 static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth, 468 u8 *p_enet_addr) 469 { 470 - struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; 471 u32 cecr_subblock; 472 473 p_82xx_addr_filt = 474 - (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram-> 475 addressfiltering; 476 477 cecr_subblock = ··· 491 static void magic_packet_detection_enable(struct ucc_geth_private *ugeth) 492 { 493 struct ucc_fast_private *uccf; 494 - struct ucc_geth *ug_regs; 495 u32 maccfg2, uccm; 496 497 uccf = ugeth->uccf; ··· 511 static void magic_packet_detection_disable(struct ucc_geth_private *ugeth) 512 { 513 struct ucc_fast_private *uccf; 514 - struct ucc_geth *ug_regs; 515 u32 maccfg2, uccm; 516 517 uccf = ugeth->uccf; ··· 542 rx_firmware_statistics, 543 struct ucc_geth_hardware_statistics *hardware_statistics) 544 { 545 - struct ucc_fast *uf_regs; 546 - struct ucc_geth *ug_regs; 547 struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram; 548 struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram; 549 550 ug_regs = ugeth->ug_regs; 551 - uf_regs = (struct ucc_fast *) ug_regs; 552 p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram; 553 p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram; 554 ··· 1136 } 1137 #endif /* DEBUG */ 1138 1139 - static void init_default_reg_vals(volatile u32 *upsmr_register, 1140 - volatile u32 *maccfg1_register, 1141 - volatile u32 *maccfg2_register) 1142 { 1143 out_be32(upsmr_register, UCC_GETH_UPSMR_INIT); 1144 out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT); ··· 1152 u8 alt_beb_truncation, 1153 u8 max_retransmissions, 1154 u8 collision_window, 1155 - volatile u32 *hafdup_register) 1156 { 1157 u32 value = 0; 1158 ··· 1184 u8 non_btb_ipg, 1185 u8 min_ifg, 1186 u8 btb_ipg, 1187 - volatile u32 *ipgifg_register) 1188 { 1189 u32 value = 0; 1190 ··· 1219 int tx_flow_control_enable, 1220 u16 pause_period, 1221 u16 extension_field, 1222 - volatile u32 *upsmr_register, 1223 - volatile u32 *uempr_register, 1224 - volatile u32 *maccfg1_register) 1225 { 1226 u32 value = 0; 1227 ··· 1247 1248 static int init_hw_statistics_gathering_mode(int enable_hardware_statistics, 1249 int auto_zero_hardware_statistics, 1250 - volatile u32 *upsmr_register, 1251 - volatile u16 *uescr_register) 1252 { 1253 u32 upsmr_value = 0; 1254 u16 uescr_value = 0; ··· 1274 static int init_firmware_statistics_gathering_mode(int 1275 enable_tx_firmware_statistics, 1276 int enable_rx_firmware_statistics, 1277 - volatile u32 *tx_rmon_base_ptr, 1278 u32 tx_firmware_statistics_structure_address, 1279 - volatile u32 *rx_rmon_base_ptr, 1280 u32 rx_firmware_statistics_structure_address, 1281 - volatile u16 *temoder_register, 1282 - volatile u32 *remoder_register) 1283 { 1284 /* Note: this function does not check if */ 1285 /* the parameters it receives are NULL */ ··· 1311 u8 address_byte_3, 1312 u8 address_byte_4, 1313 u8 address_byte_5, 1314 - volatile u32 *macstnaddr1_register, 1315 - volatile u32 *macstnaddr2_register) 1316 { 1317 u32 value = 0; 1318 ··· 1348 } 1349 1350 static int init_check_frame_length_mode(int length_check, 1351 - volatile u32 *maccfg2_register) 1352 { 1353 u32 value = 0; 1354 ··· 1364 } 1365 1366 static int init_preamble_length(u8 preamble_length, 1367 - volatile u32 *maccfg2_register) 1368 { 1369 u32 value = 0; 1370 ··· 1380 1381 static int init_rx_parameters(int reject_broadcast, 1382 int receive_short_frames, 1383 - int promiscuous, volatile u32 *upsmr_register) 1384 { 1385 u32 value = 0; 1386 ··· 1407 } 1408 1409 static int init_max_rx_buff_len(u16 max_rx_buf_len, 1410 - volatile u16 *mrblr_register) 1411 { 1412 /* max_rx_buf_len value must be a multiple of 128 */ 1413 if ((max_rx_buf_len == 0) ··· 1419 } 1420 1421 static int init_min_frame_len(u16 min_frame_length, 1422 - volatile u16 *minflr_register, 1423 - volatile u16 *mrblr_register) 1424 { 1425 u16 mrblr_value = 0; 1426 ··· 1435 static int adjust_enet_interface(struct ucc_geth_private *ugeth) 1436 { 1437 struct ucc_geth_info *ug_info; 1438 - struct ucc_geth *ug_regs; 1439 - struct ucc_fast *uf_regs; 1440 int ret_val; 1441 u32 upsmr, maccfg2, tbiBaseAddress; 1442 u16 value; ··· 1521 static void adjust_link(struct net_device *dev) 1522 { 1523 struct ucc_geth_private *ugeth = netdev_priv(dev); 1524 - struct ucc_geth *ug_regs; 1525 - struct ucc_fast *uf_regs; 1526 struct phy_device *phydev = ugeth->phydev; 1527 unsigned long flags; 1528 int new_state = 0; ··· 1682 uccf = ugeth->uccf; 1683 1684 /* Clear acknowledge bit */ 1685 - temp = ugeth->p_rx_glbl_pram->rxgstpack; 1686 temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX; 1687 - ugeth->p_rx_glbl_pram->rxgstpack = temp; 1688 1689 /* Keep issuing command and checking acknowledge bit until 1690 it is asserted, according to spec */ ··· 1696 qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock, 1697 QE_CR_PROTOCOL_ETHERNET, 0); 1698 1699 - temp = ugeth->p_rx_glbl_pram->rxgstpack; 1700 } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX)); 1701 1702 uccf->stopped_rx = 1; ··· 1995 enum enet_addr_type 1996 enet_addr_type) 1997 { 1998 - struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; 1999 struct ucc_fast_private *uccf; 2000 enum comm_dir comm_dir; 2001 struct list_head *p_lh; 2002 u16 i, num; 2003 - u32 *addr_h, *addr_l; 2004 u8 *p_counter; 2005 2006 uccf = ugeth->uccf; 2007 2008 p_82xx_addr_filt = 2009 - (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram-> 2010 - addressfiltering; 2011 2012 if (enet_addr_type == ENET_ADDR_TYPE_GROUP) { 2013 addr_h = &(p_82xx_addr_filt->gaddr_h); ··· 2084 static void ucc_geth_memclean(struct ucc_geth_private *ugeth) 2085 { 2086 u16 i, j; 2087 - u8 *bd; 2088 2089 if (!ugeth) 2090 return; ··· 2159 for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { 2160 if (ugeth->tx_skbuff[i][j]) { 2161 dma_unmap_single(NULL, 2162 - ((struct qe_bd *)bd)->buf, 2163 - (in_be32((u32 *)bd) & 2164 BD_LENGTH_MASK), 2165 DMA_TO_DEVICE); 2166 dev_kfree_skb_any(ugeth->tx_skbuff[i][j]); ··· 2187 for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { 2188 if (ugeth->rx_skbuff[i][j]) { 2189 dma_unmap_single(NULL, 2190 - ((struct qe_bd *)bd)->buf, 2191 ugeth->ug_info-> 2192 uf_info.max_rx_buf_length + 2193 UCC_GETH_RX_DATA_BUF_ALIGNMENT, ··· 2223 { 2224 struct ucc_geth_private *ugeth; 2225 struct dev_mc_list *dmi; 2226 - struct ucc_fast *uf_regs; 2227 - struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; 2228 int i; 2229 2230 ugeth = netdev_priv(dev); ··· 2233 2234 if (dev->flags & IFF_PROMISC) { 2235 2236 - uf_regs->upsmr |= UPSMR_PRO; 2237 2238 } else { 2239 2240 - uf_regs->upsmr &= ~UPSMR_PRO; 2241 2242 p_82xx_addr_filt = 2243 - (struct ucc_geth_82xx_address_filtering_pram *) ugeth-> 2244 p_rx_glbl_pram->addressfiltering; 2245 2246 if (dev->flags & IFF_ALLMULTI) { ··· 2275 2276 static void ucc_geth_stop(struct ucc_geth_private *ugeth) 2277 { 2278 - struct ucc_geth *ug_regs = ugeth->ug_regs; 2279 struct phy_device *phydev = ugeth->phydev; 2280 u32 tempval; 2281 ··· 2424 return -ENOMEM; 2425 } 2426 2427 - ugeth->ug_regs = (struct ucc_geth *) ioremap(uf_info->regs, sizeof(struct ucc_geth)); 2428 2429 return 0; 2430 } 2431 2432 static int ucc_geth_startup(struct ucc_geth_private *ugeth) 2433 { 2434 - struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; 2435 - struct ucc_geth_init_pram *p_init_enet_pram; 2436 struct ucc_fast_private *uccf; 2437 struct ucc_geth_info *ug_info; 2438 struct ucc_fast_info *uf_info; 2439 - struct ucc_fast *uf_regs; 2440 - struct ucc_geth *ug_regs; 2441 int ret_val = -EINVAL; 2442 u32 remoder = UCC_GETH_REMODER_INIT; 2443 u32 init_enet_pram_offset, cecr_subblock, command, maccfg1; ··· 2445 u16 temoder = UCC_GETH_TEMODER_INIT; 2446 u16 test; 2447 u8 function_code = 0; 2448 - u8 *bd, *endOfRing; 2449 u8 numThreadsRxNumerical, numThreadsTxNumerical; 2450 2451 ugeth_vdbg("%s: IN", __FUNCTION__); ··· 2608 if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4) 2609 align = UCC_GETH_TX_BD_RING_ALIGNMENT; 2610 ugeth->tx_bd_ring_offset[j] = 2611 - kmalloc((u32) (length + align), GFP_KERNEL); 2612 2613 if (ugeth->tx_bd_ring_offset[j] != 0) 2614 ugeth->p_tx_bd_ring[j] = 2615 - (void*)((ugeth->tx_bd_ring_offset[j] + 2616 align) & ~(align - 1)); 2617 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { 2618 ugeth->tx_bd_ring_offset[j] = ··· 2620 UCC_GETH_TX_BD_RING_ALIGNMENT); 2621 if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j])) 2622 ugeth->p_tx_bd_ring[j] = 2623 - (u8 *) qe_muram_addr(ugeth-> 2624 tx_bd_ring_offset[j]); 2625 } 2626 if (!ugeth->p_tx_bd_ring[j]) { ··· 2632 return -ENOMEM; 2633 } 2634 /* Zero unused end of bd ring, according to spec */ 2635 - memset(ugeth->p_tx_bd_ring[j] + 2636 - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd), 0, 2637 length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)); 2638 } 2639 ··· 2645 if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4) 2646 align = UCC_GETH_RX_BD_RING_ALIGNMENT; 2647 ugeth->rx_bd_ring_offset[j] = 2648 - kmalloc((u32) (length + align), GFP_KERNEL); 2649 if (ugeth->rx_bd_ring_offset[j] != 0) 2650 ugeth->p_rx_bd_ring[j] = 2651 - (void*)((ugeth->rx_bd_ring_offset[j] + 2652 align) & ~(align - 1)); 2653 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { 2654 ugeth->rx_bd_ring_offset[j] = ··· 2656 UCC_GETH_RX_BD_RING_ALIGNMENT); 2657 if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j])) 2658 ugeth->p_rx_bd_ring[j] = 2659 - (u8 *) qe_muram_addr(ugeth-> 2660 rx_bd_ring_offset[j]); 2661 } 2662 if (!ugeth->p_rx_bd_ring[j]) { ··· 2691 bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j]; 2692 for (i = 0; i < ug_info->bdRingLenTx[j]; i++) { 2693 /* clear bd buffer */ 2694 - out_be32(&((struct qe_bd *)bd)->buf, 0); 2695 /* set bd status and length */ 2696 - out_be32((u32 *)bd, 0); 2697 bd += sizeof(struct qe_bd); 2698 } 2699 bd -= sizeof(struct qe_bd); 2700 /* set bd status and length */ 2701 - out_be32((u32 *)bd, T_W); /* for last BD set Wrap bit */ 2702 } 2703 2704 /* Init Rx bds */ ··· 2723 bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j]; 2724 for (i = 0; i < ug_info->bdRingLenRx[j]; i++) { 2725 /* set bd status and length */ 2726 - out_be32((u32 *)bd, R_I); 2727 /* clear bd buffer */ 2728 - out_be32(&((struct qe_bd *)bd)->buf, 0); 2729 bd += sizeof(struct qe_bd); 2730 } 2731 bd -= sizeof(struct qe_bd); 2732 /* set bd status and length */ 2733 - out_be32((u32 *)bd, R_W); /* for last BD set Wrap bit */ 2734 } 2735 2736 /* ··· 2750 return -ENOMEM; 2751 } 2752 ugeth->p_tx_glbl_pram = 2753 - (struct ucc_geth_tx_global_pram *) qe_muram_addr(ugeth-> 2754 tx_glbl_pram_offset); 2755 /* Zero out p_tx_glbl_pram */ 2756 - memset(ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram)); 2757 2758 /* Fill global PRAM */ 2759 ··· 2774 } 2775 2776 ugeth->p_thread_data_tx = 2777 - (struct ucc_geth_thread_data_tx *) qe_muram_addr(ugeth-> 2778 thread_dat_tx_offset); 2779 out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset); 2780 ··· 2785 2786 /* iphoffset */ 2787 for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++) 2788 - ugeth->p_tx_glbl_pram->iphoffset[i] = ug_info->iphoffset[i]; 2789 2790 /* SQPTR */ 2791 /* Size varies with number of Tx queues */ ··· 2804 } 2805 2806 ugeth->p_send_q_mem_reg = 2807 - (struct ucc_geth_send_queue_mem_region *) qe_muram_addr(ugeth-> 2808 send_q_mem_reg_offset); 2809 out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset); 2810 ··· 2848 } 2849 2850 ugeth->p_scheduler = 2851 - (struct ucc_geth_scheduler *) qe_muram_addr(ugeth-> 2852 scheduler_offset); 2853 out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer, 2854 ugeth->scheduler_offset); 2855 /* Zero out p_scheduler */ 2856 - memset(ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler)); 2857 2858 /* Set values in scheduler */ 2859 out_be32(&ugeth->p_scheduler->mblinterval, 2860 ug_info->mblinterval); 2861 out_be16(&ugeth->p_scheduler->nortsrbytetime, 2862 ug_info->nortsrbytetime); 2863 - ugeth->p_scheduler->fracsiz = ug_info->fracsiz; 2864 - ugeth->p_scheduler->strictpriorityq = ug_info->strictpriorityq; 2865 - ugeth->p_scheduler->txasap = ug_info->txasap; 2866 - ugeth->p_scheduler->extrabw = ug_info->extrabw; 2867 for (i = 0; i < NUM_TX_QUEUES; i++) 2868 - ugeth->p_scheduler->weightfactor[i] = 2869 - ug_info->weightfactor[i]; 2870 2871 /* Set pointers to cpucount registers in scheduler */ 2872 ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0); ··· 2898 return -ENOMEM; 2899 } 2900 ugeth->p_tx_fw_statistics_pram = 2901 - (struct ucc_geth_tx_firmware_statistics_pram *) 2902 qe_muram_addr(ugeth->tx_fw_statistics_pram_offset); 2903 /* Zero out p_tx_fw_statistics_pram */ 2904 - memset(ugeth->p_tx_fw_statistics_pram, 2905 0, sizeof(struct ucc_geth_tx_firmware_statistics_pram)); 2906 } 2907 ··· 2938 return -ENOMEM; 2939 } 2940 ugeth->p_rx_glbl_pram = 2941 - (struct ucc_geth_rx_global_pram *) qe_muram_addr(ugeth-> 2942 rx_glbl_pram_offset); 2943 /* Zero out p_rx_glbl_pram */ 2944 - memset(ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram)); 2945 2946 /* Fill global PRAM */ 2947 ··· 2961 } 2962 2963 ugeth->p_thread_data_rx = 2964 - (struct ucc_geth_thread_data_rx *) qe_muram_addr(ugeth-> 2965 thread_dat_rx_offset); 2966 out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset); 2967 ··· 2984 return -ENOMEM; 2985 } 2986 ugeth->p_rx_fw_statistics_pram = 2987 - (struct ucc_geth_rx_firmware_statistics_pram *) 2988 qe_muram_addr(ugeth->rx_fw_statistics_pram_offset); 2989 /* Zero out p_rx_fw_statistics_pram */ 2990 - memset(ugeth->p_rx_fw_statistics_pram, 0, 2991 sizeof(struct ucc_geth_rx_firmware_statistics_pram)); 2992 } 2993 ··· 3008 } 3009 3010 ugeth->p_rx_irq_coalescing_tbl = 3011 - (struct ucc_geth_rx_interrupt_coalescing_table *) 3012 qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset); 3013 out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr, 3014 ugeth->rx_irq_coalescing_tbl_offset); ··· 3077 } 3078 3079 ugeth->p_rx_bd_qs_tbl = 3080 - (struct ucc_geth_rx_bd_queues_entry *) qe_muram_addr(ugeth-> 3081 rx_bd_qs_tbl_offset); 3082 out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset); 3083 /* Zero out p_rx_bd_qs_tbl */ 3084 - memset(ugeth->p_rx_bd_qs_tbl, 3085 0, 3086 ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) + 3087 sizeof(struct ucc_geth_rx_prefetched_bds))); ··· 3141 &ugeth->p_rx_glbl_pram->remoder); 3142 3143 /* function code register */ 3144 - ugeth->p_rx_glbl_pram->rstate = function_code; 3145 3146 /* initialize extended filtering */ 3147 if (ug_info->rxExtendedFiltering) { ··· 3168 } 3169 3170 ugeth->p_exf_glbl_param = 3171 - (struct ucc_geth_exf_global_pram *) qe_muram_addr(ugeth-> 3172 exf_glbl_param_offset); 3173 out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam, 3174 ugeth->exf_glbl_param_offset); ··· 3183 ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j); 3184 3185 p_82xx_addr_filt = 3186 - (struct ucc_geth_82xx_address_filtering_pram *) ugeth-> 3187 p_rx_glbl_pram->addressfiltering; 3188 3189 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, ··· 3315 return -ENOMEM; 3316 } 3317 p_init_enet_pram = 3318 - (struct ucc_geth_init_pram *) qe_muram_addr(init_enet_pram_offset); 3319 3320 /* Copy shadow InitEnet command parameter structure into PRAM */ 3321 - p_init_enet_pram->resinit1 = ugeth->p_init_enet_param_shadow->resinit1; 3322 - p_init_enet_pram->resinit2 = ugeth->p_init_enet_param_shadow->resinit2; 3323 - p_init_enet_pram->resinit3 = ugeth->p_init_enet_param_shadow->resinit3; 3324 - p_init_enet_pram->resinit4 = ugeth->p_init_enet_param_shadow->resinit4; 3325 out_be16(&p_init_enet_pram->resinit5, 3326 ugeth->p_init_enet_param_shadow->resinit5); 3327 - p_init_enet_pram->largestexternallookupkeysize = 3328 - ugeth->p_init_enet_param_shadow->largestexternallookupkeysize; 3329 out_be32(&p_init_enet_pram->rgftgfrxglobal, 3330 ugeth->p_init_enet_param_shadow->rgftgfrxglobal); 3331 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++) ··· 3383 #ifdef CONFIG_UGETH_TX_ON_DEMAND 3384 struct ucc_fast_private *uccf; 3385 #endif 3386 - u8 *bd; /* BD pointer */ 3387 u32 bd_status; 3388 u8 txQ = 0; 3389 ··· 3395 3396 /* Start from the next BD that should be filled */ 3397 bd = ugeth->txBd[txQ]; 3398 - bd_status = in_be32((u32 *)bd); 3399 /* Save the skb pointer so we can free it later */ 3400 ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb; 3401 ··· 3405 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); 3406 3407 /* set up the buffer descriptor */ 3408 - out_be32(&((struct qe_bd *)bd)->buf, 3409 dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE)); 3410 3411 /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */ ··· 3413 bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len; 3414 3415 /* set bd status and length */ 3416 - out_be32((u32 *)bd, bd_status); 3417 3418 dev->trans_start = jiffies; 3419 ··· 3453 static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit) 3454 { 3455 struct sk_buff *skb; 3456 - u8 *bd; 3457 u16 length, howmany = 0; 3458 u32 bd_status; 3459 u8 *bdBuffer; ··· 3466 /* collect received buffers */ 3467 bd = ugeth->rxBd[rxQ]; 3468 3469 - bd_status = in_be32((u32 *)bd); 3470 3471 /* while there are received buffers and BD is full (~R_E) */ 3472 while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) { 3473 - bdBuffer = (u8 *) in_be32(&((struct qe_bd *)bd)->buf); 3474 length = (u16) ((bd_status & BD_LENGTH_MASK) - 4); 3475 skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]]; 3476 ··· 3528 else 3529 bd += sizeof(struct qe_bd); 3530 3531 - bd_status = in_be32((u32 *)bd); 3532 } 3533 3534 ugeth->rxBd[rxQ] = bd; ··· 3539 { 3540 /* Start from the next BD that should be filled */ 3541 struct ucc_geth_private *ugeth = netdev_priv(dev); 3542 - u8 *bd; /* BD pointer */ 3543 u32 bd_status; 3544 3545 bd = ugeth->confBd[txQ]; 3546 - bd_status = in_be32((u32 *)bd); 3547 3548 /* Normal processing. */ 3549 while ((bd_status & T_R) == 0) { ··· 3573 bd += sizeof(struct qe_bd); 3574 else 3575 bd = ugeth->p_tx_bd_ring[txQ]; 3576 - bd_status = in_be32((u32 *)bd); 3577 } 3578 ugeth->confBd[txQ] = bd; 3579 return 0;
··· 62 #endif /* UGETH_VERBOSE_DEBUG */ 63 #define UGETH_MSG_DEFAULT (NETIF_MSG_IFUP << 1 ) - 1 64 65 66 static DEFINE_SPINLOCK(ugeth_lock); 67 ··· 216 } 217 } 218 219 + static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, 220 + u8 __iomem *bd) 221 { 222 struct sk_buff *skb = NULL; 223 ··· 236 237 skb->dev = ugeth->dev; 238 239 + out_be32(&((struct qe_bd __iomem *)bd)->buf, 240 dma_map_single(NULL, 241 skb->data, 242 ugeth->ug_info->uf_info.max_rx_buf_length + 243 UCC_GETH_RX_DATA_BUF_ALIGNMENT, 244 DMA_FROM_DEVICE)); 245 246 + out_be32((u32 __iomem *)bd, 247 + (R_E | R_I | (in_be32((u32 __iomem*)bd) & R_W))); 248 249 return skb; 250 } 251 252 static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ) 253 { 254 + u8 __iomem *bd; 255 u32 bd_status; 256 struct sk_buff *skb; 257 int i; ··· 259 i = 0; 260 261 do { 262 + bd_status = in_be32((u32 __iomem *)bd); 263 skb = get_new_skb(ugeth, bd); 264 265 if (!skb) /* If can not allocate data buffer, ··· 277 } 278 279 static int fill_init_enet_entries(struct ucc_geth_private *ugeth, 280 + u32 *p_start, 281 u8 num_entries, 282 u32 thread_size, 283 u32 thread_alignment, ··· 316 } 317 318 static int return_init_enet_entries(struct ucc_geth_private *ugeth, 319 + u32 *p_start, 320 u8 num_entries, 321 enum qe_risc_allocation risc, 322 int skip_page_for_first_entry) ··· 326 int snum; 327 328 for (i = 0; i < num_entries; i++) { 329 + u32 val = *p_start; 330 + 331 /* Check that this entry was actually valid -- 332 needed in case failed in allocations */ 333 + if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) { 334 snum = 335 + (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >> 336 ENET_INIT_PARAM_SNUM_SHIFT; 337 qe_put_snum((u8) snum); 338 if (!((i == 0) && skip_page_for_first_entry)) { 339 /* First entry of Rx does not have page */ 340 init_enet_offset = 341 + (val & ENET_INIT_PARAM_PTR_MASK); 342 qe_muram_free(init_enet_offset); 343 } 344 + *p_start++ = 0; 345 } 346 } 347 ··· 349 350 #ifdef DEBUG 351 static int dump_init_enet_entries(struct ucc_geth_private *ugeth, 352 + u32 __iomem *p_start, 353 u8 num_entries, 354 u32 thread_size, 355 enum qe_risc_allocation risc, ··· 360 int snum; 361 362 for (i = 0; i < num_entries; i++) { 363 + u32 val = in_be32(p_start); 364 + 365 /* Check that this entry was actually valid -- 366 needed in case failed in allocations */ 367 + if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) { 368 snum = 369 + (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >> 370 ENET_INIT_PARAM_SNUM_SHIFT; 371 qe_put_snum((u8) snum); 372 if (!((i == 0) && skip_page_for_first_entry)) { ··· 440 441 static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num) 442 { 443 + struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; 444 445 if (!(paddr_num < NUM_OF_PADDRS)) { 446 ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__); ··· 448 } 449 450 p_82xx_addr_filt = 451 + (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram-> 452 addressfiltering; 453 454 /* Writing address ff.ff.ff.ff.ff.ff disables address ··· 463 static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth, 464 u8 *p_enet_addr) 465 { 466 + struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; 467 u32 cecr_subblock; 468 469 p_82xx_addr_filt = 470 + (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram-> 471 addressfiltering; 472 473 cecr_subblock = ··· 487 static void magic_packet_detection_enable(struct ucc_geth_private *ugeth) 488 { 489 struct ucc_fast_private *uccf; 490 + struct ucc_geth __iomem *ug_regs; 491 u32 maccfg2, uccm; 492 493 uccf = ugeth->uccf; ··· 507 static void magic_packet_detection_disable(struct ucc_geth_private *ugeth) 508 { 509 struct ucc_fast_private *uccf; 510 + struct ucc_geth __iomem *ug_regs; 511 u32 maccfg2, uccm; 512 513 uccf = ugeth->uccf; ··· 538 rx_firmware_statistics, 539 struct ucc_geth_hardware_statistics *hardware_statistics) 540 { 541 + struct ucc_fast __iomem *uf_regs; 542 + struct ucc_geth __iomem *ug_regs; 543 struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram; 544 struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram; 545 546 ug_regs = ugeth->ug_regs; 547 + uf_regs = (struct ucc_fast __iomem *) ug_regs; 548 p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram; 549 p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram; 550 ··· 1132 } 1133 #endif /* DEBUG */ 1134 1135 + static void init_default_reg_vals(u32 __iomem *upsmr_register, 1136 + u32 __iomem *maccfg1_register, 1137 + u32 __iomem *maccfg2_register) 1138 { 1139 out_be32(upsmr_register, UCC_GETH_UPSMR_INIT); 1140 out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT); ··· 1148 u8 alt_beb_truncation, 1149 u8 max_retransmissions, 1150 u8 collision_window, 1151 + u32 __iomem *hafdup_register) 1152 { 1153 u32 value = 0; 1154 ··· 1180 u8 non_btb_ipg, 1181 u8 min_ifg, 1182 u8 btb_ipg, 1183 + u32 __iomem *ipgifg_register) 1184 { 1185 u32 value = 0; 1186 ··· 1215 int tx_flow_control_enable, 1216 u16 pause_period, 1217 u16 extension_field, 1218 + u32 __iomem *upsmr_register, 1219 + u32 __iomem *uempr_register, 1220 + u32 __iomem *maccfg1_register) 1221 { 1222 u32 value = 0; 1223 ··· 1243 1244 static int init_hw_statistics_gathering_mode(int enable_hardware_statistics, 1245 int auto_zero_hardware_statistics, 1246 + u32 __iomem *upsmr_register, 1247 + u16 __iomem *uescr_register) 1248 { 1249 u32 upsmr_value = 0; 1250 u16 uescr_value = 0; ··· 1270 static int init_firmware_statistics_gathering_mode(int 1271 enable_tx_firmware_statistics, 1272 int enable_rx_firmware_statistics, 1273 + u32 __iomem *tx_rmon_base_ptr, 1274 u32 tx_firmware_statistics_structure_address, 1275 + u32 __iomem *rx_rmon_base_ptr, 1276 u32 rx_firmware_statistics_structure_address, 1277 + u16 __iomem *temoder_register, 1278 + u32 __iomem *remoder_register) 1279 { 1280 /* Note: this function does not check if */ 1281 /* the parameters it receives are NULL */ ··· 1307 u8 address_byte_3, 1308 u8 address_byte_4, 1309 u8 address_byte_5, 1310 + u32 __iomem *macstnaddr1_register, 1311 + u32 __iomem *macstnaddr2_register) 1312 { 1313 u32 value = 0; 1314 ··· 1344 } 1345 1346 static int init_check_frame_length_mode(int length_check, 1347 + u32 __iomem *maccfg2_register) 1348 { 1349 u32 value = 0; 1350 ··· 1360 } 1361 1362 static int init_preamble_length(u8 preamble_length, 1363 + u32 __iomem *maccfg2_register) 1364 { 1365 u32 value = 0; 1366 ··· 1376 1377 static int init_rx_parameters(int reject_broadcast, 1378 int receive_short_frames, 1379 + int promiscuous, u32 __iomem *upsmr_register) 1380 { 1381 u32 value = 0; 1382 ··· 1403 } 1404 1405 static int init_max_rx_buff_len(u16 max_rx_buf_len, 1406 + u16 __iomem *mrblr_register) 1407 { 1408 /* max_rx_buf_len value must be a multiple of 128 */ 1409 if ((max_rx_buf_len == 0) ··· 1415 } 1416 1417 static int init_min_frame_len(u16 min_frame_length, 1418 + u16 __iomem *minflr_register, 1419 + u16 __iomem *mrblr_register) 1420 { 1421 u16 mrblr_value = 0; 1422 ··· 1431 static int adjust_enet_interface(struct ucc_geth_private *ugeth) 1432 { 1433 struct ucc_geth_info *ug_info; 1434 + struct ucc_geth __iomem *ug_regs; 1435 + struct ucc_fast __iomem *uf_regs; 1436 int ret_val; 1437 u32 upsmr, maccfg2, tbiBaseAddress; 1438 u16 value; ··· 1517 static void adjust_link(struct net_device *dev) 1518 { 1519 struct ucc_geth_private *ugeth = netdev_priv(dev); 1520 + struct ucc_geth __iomem *ug_regs; 1521 + struct ucc_fast __iomem *uf_regs; 1522 struct phy_device *phydev = ugeth->phydev; 1523 unsigned long flags; 1524 int new_state = 0; ··· 1678 uccf = ugeth->uccf; 1679 1680 /* Clear acknowledge bit */ 1681 + temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack); 1682 temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX; 1683 + out_8(&ugeth->p_rx_glbl_pram->rxgstpack, temp); 1684 1685 /* Keep issuing command and checking acknowledge bit until 1686 it is asserted, according to spec */ ··· 1692 qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock, 1693 QE_CR_PROTOCOL_ETHERNET, 0); 1694 1695 + temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack); 1696 } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX)); 1697 1698 uccf->stopped_rx = 1; ··· 1991 enum enet_addr_type 1992 enet_addr_type) 1993 { 1994 + struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; 1995 struct ucc_fast_private *uccf; 1996 enum comm_dir comm_dir; 1997 struct list_head *p_lh; 1998 u16 i, num; 1999 + u32 __iomem *addr_h; 2000 + u32 __iomem *addr_l; 2001 u8 *p_counter; 2002 2003 uccf = ugeth->uccf; 2004 2005 p_82xx_addr_filt = 2006 + (struct ucc_geth_82xx_address_filtering_pram __iomem *) 2007 + ugeth->p_rx_glbl_pram->addressfiltering; 2008 2009 if (enet_addr_type == ENET_ADDR_TYPE_GROUP) { 2010 addr_h = &(p_82xx_addr_filt->gaddr_h); ··· 2079 static void ucc_geth_memclean(struct ucc_geth_private *ugeth) 2080 { 2081 u16 i, j; 2082 + u8 __iomem *bd; 2083 2084 if (!ugeth) 2085 return; ··· 2154 for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { 2155 if (ugeth->tx_skbuff[i][j]) { 2156 dma_unmap_single(NULL, 2157 + in_be32(&((struct qe_bd __iomem *)bd)->buf), 2158 + (in_be32((u32 __iomem *)bd) & 2159 BD_LENGTH_MASK), 2160 DMA_TO_DEVICE); 2161 dev_kfree_skb_any(ugeth->tx_skbuff[i][j]); ··· 2182 for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { 2183 if (ugeth->rx_skbuff[i][j]) { 2184 dma_unmap_single(NULL, 2185 + in_be32(&((struct qe_bd __iomem *)bd)->buf), 2186 ugeth->ug_info-> 2187 uf_info.max_rx_buf_length + 2188 UCC_GETH_RX_DATA_BUF_ALIGNMENT, ··· 2218 { 2219 struct ucc_geth_private *ugeth; 2220 struct dev_mc_list *dmi; 2221 + struct ucc_fast __iomem *uf_regs; 2222 + struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; 2223 int i; 2224 2225 ugeth = netdev_priv(dev); ··· 2228 2229 if (dev->flags & IFF_PROMISC) { 2230 2231 + out_be32(&uf_regs->upsmr, in_be32(&uf_regs->upsmr) | UPSMR_PRO); 2232 2233 } else { 2234 2235 + out_be32(&uf_regs->upsmr, in_be32(&uf_regs->upsmr)&~UPSMR_PRO); 2236 2237 p_82xx_addr_filt = 2238 + (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth-> 2239 p_rx_glbl_pram->addressfiltering; 2240 2241 if (dev->flags & IFF_ALLMULTI) { ··· 2270 2271 static void ucc_geth_stop(struct ucc_geth_private *ugeth) 2272 { 2273 + struct ucc_geth __iomem *ug_regs = ugeth->ug_regs; 2274 struct phy_device *phydev = ugeth->phydev; 2275 u32 tempval; 2276 ··· 2419 return -ENOMEM; 2420 } 2421 2422 + ugeth->ug_regs = (struct ucc_geth __iomem *) ioremap(uf_info->regs, sizeof(struct ucc_geth)); 2423 2424 return 0; 2425 } 2426 2427 static int ucc_geth_startup(struct ucc_geth_private *ugeth) 2428 { 2429 + struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; 2430 + struct ucc_geth_init_pram __iomem *p_init_enet_pram; 2431 struct ucc_fast_private *uccf; 2432 struct ucc_geth_info *ug_info; 2433 struct ucc_fast_info *uf_info; 2434 + struct ucc_fast __iomem *uf_regs; 2435 + struct ucc_geth __iomem *ug_regs; 2436 int ret_val = -EINVAL; 2437 u32 remoder = UCC_GETH_REMODER_INIT; 2438 u32 init_enet_pram_offset, cecr_subblock, command, maccfg1; ··· 2440 u16 temoder = UCC_GETH_TEMODER_INIT; 2441 u16 test; 2442 u8 function_code = 0; 2443 + u8 __iomem *bd; 2444 + u8 __iomem *endOfRing; 2445 u8 numThreadsRxNumerical, numThreadsTxNumerical; 2446 2447 ugeth_vdbg("%s: IN", __FUNCTION__); ··· 2602 if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4) 2603 align = UCC_GETH_TX_BD_RING_ALIGNMENT; 2604 ugeth->tx_bd_ring_offset[j] = 2605 + (u32) kmalloc((u32) (length + align), GFP_KERNEL); 2606 2607 if (ugeth->tx_bd_ring_offset[j] != 0) 2608 ugeth->p_tx_bd_ring[j] = 2609 + (u8 __iomem *)((ugeth->tx_bd_ring_offset[j] + 2610 align) & ~(align - 1)); 2611 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { 2612 ugeth->tx_bd_ring_offset[j] = ··· 2614 UCC_GETH_TX_BD_RING_ALIGNMENT); 2615 if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j])) 2616 ugeth->p_tx_bd_ring[j] = 2617 + (u8 __iomem *) qe_muram_addr(ugeth-> 2618 tx_bd_ring_offset[j]); 2619 } 2620 if (!ugeth->p_tx_bd_ring[j]) { ··· 2626 return -ENOMEM; 2627 } 2628 /* Zero unused end of bd ring, according to spec */ 2629 + memset_io((void __iomem *)(ugeth->p_tx_bd_ring[j] + 2630 + ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)), 0, 2631 length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)); 2632 } 2633 ··· 2639 if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4) 2640 align = UCC_GETH_RX_BD_RING_ALIGNMENT; 2641 ugeth->rx_bd_ring_offset[j] = 2642 + (u32) kmalloc((u32) (length + align), GFP_KERNEL); 2643 if (ugeth->rx_bd_ring_offset[j] != 0) 2644 ugeth->p_rx_bd_ring[j] = 2645 + (u8 __iomem *)((ugeth->rx_bd_ring_offset[j] + 2646 align) & ~(align - 1)); 2647 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { 2648 ugeth->rx_bd_ring_offset[j] = ··· 2650 UCC_GETH_RX_BD_RING_ALIGNMENT); 2651 if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j])) 2652 ugeth->p_rx_bd_ring[j] = 2653 + (u8 __iomem *) qe_muram_addr(ugeth-> 2654 rx_bd_ring_offset[j]); 2655 } 2656 if (!ugeth->p_rx_bd_ring[j]) { ··· 2685 bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j]; 2686 for (i = 0; i < ug_info->bdRingLenTx[j]; i++) { 2687 /* clear bd buffer */ 2688 + out_be32(&((struct qe_bd __iomem *)bd)->buf, 0); 2689 /* set bd status and length */ 2690 + out_be32((u32 __iomem *)bd, 0); 2691 bd += sizeof(struct qe_bd); 2692 } 2693 bd -= sizeof(struct qe_bd); 2694 /* set bd status and length */ 2695 + out_be32((u32 __iomem *)bd, T_W); /* for last BD set Wrap bit */ 2696 } 2697 2698 /* Init Rx bds */ ··· 2717 bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j]; 2718 for (i = 0; i < ug_info->bdRingLenRx[j]; i++) { 2719 /* set bd status and length */ 2720 + out_be32((u32 __iomem *)bd, R_I); 2721 /* clear bd buffer */ 2722 + out_be32(&((struct qe_bd __iomem *)bd)->buf, 0); 2723 bd += sizeof(struct qe_bd); 2724 } 2725 bd -= sizeof(struct qe_bd); 2726 /* set bd status and length */ 2727 + out_be32((u32 __iomem *)bd, R_W); /* for last BD set Wrap bit */ 2728 } 2729 2730 /* ··· 2744 return -ENOMEM; 2745 } 2746 ugeth->p_tx_glbl_pram = 2747 + (struct ucc_geth_tx_global_pram __iomem *) qe_muram_addr(ugeth-> 2748 tx_glbl_pram_offset); 2749 /* Zero out p_tx_glbl_pram */ 2750 + memset_io((void __iomem *)ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram)); 2751 2752 /* Fill global PRAM */ 2753 ··· 2768 } 2769 2770 ugeth->p_thread_data_tx = 2771 + (struct ucc_geth_thread_data_tx __iomem *) qe_muram_addr(ugeth-> 2772 thread_dat_tx_offset); 2773 out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset); 2774 ··· 2779 2780 /* iphoffset */ 2781 for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++) 2782 + out_8(&ugeth->p_tx_glbl_pram->iphoffset[i], 2783 + ug_info->iphoffset[i]); 2784 2785 /* SQPTR */ 2786 /* Size varies with number of Tx queues */ ··· 2797 } 2798 2799 ugeth->p_send_q_mem_reg = 2800 + (struct ucc_geth_send_queue_mem_region __iomem *) qe_muram_addr(ugeth-> 2801 send_q_mem_reg_offset); 2802 out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset); 2803 ··· 2841 } 2842 2843 ugeth->p_scheduler = 2844 + (struct ucc_geth_scheduler __iomem *) qe_muram_addr(ugeth-> 2845 scheduler_offset); 2846 out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer, 2847 ugeth->scheduler_offset); 2848 /* Zero out p_scheduler */ 2849 + memset_io((void __iomem *)ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler)); 2850 2851 /* Set values in scheduler */ 2852 out_be32(&ugeth->p_scheduler->mblinterval, 2853 ug_info->mblinterval); 2854 out_be16(&ugeth->p_scheduler->nortsrbytetime, 2855 ug_info->nortsrbytetime); 2856 + out_8(&ugeth->p_scheduler->fracsiz, ug_info->fracsiz); 2857 + out_8(&ugeth->p_scheduler->strictpriorityq, 2858 + ug_info->strictpriorityq); 2859 + out_8(&ugeth->p_scheduler->txasap, ug_info->txasap); 2860 + out_8(&ugeth->p_scheduler->extrabw, ug_info->extrabw); 2861 for (i = 0; i < NUM_TX_QUEUES; i++) 2862 + out_8(&ugeth->p_scheduler->weightfactor[i], 2863 + ug_info->weightfactor[i]); 2864 2865 /* Set pointers to cpucount registers in scheduler */ 2866 ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0); ··· 2890 return -ENOMEM; 2891 } 2892 ugeth->p_tx_fw_statistics_pram = 2893 + (struct ucc_geth_tx_firmware_statistics_pram __iomem *) 2894 qe_muram_addr(ugeth->tx_fw_statistics_pram_offset); 2895 /* Zero out p_tx_fw_statistics_pram */ 2896 + memset_io((void __iomem *)ugeth->p_tx_fw_statistics_pram, 2897 0, sizeof(struct ucc_geth_tx_firmware_statistics_pram)); 2898 } 2899 ··· 2930 return -ENOMEM; 2931 } 2932 ugeth->p_rx_glbl_pram = 2933 + (struct ucc_geth_rx_global_pram __iomem *) qe_muram_addr(ugeth-> 2934 rx_glbl_pram_offset); 2935 /* Zero out p_rx_glbl_pram */ 2936 + memset_io((void __iomem *)ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram)); 2937 2938 /* Fill global PRAM */ 2939 ··· 2953 } 2954 2955 ugeth->p_thread_data_rx = 2956 + (struct ucc_geth_thread_data_rx __iomem *) qe_muram_addr(ugeth-> 2957 thread_dat_rx_offset); 2958 out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset); 2959 ··· 2976 return -ENOMEM; 2977 } 2978 ugeth->p_rx_fw_statistics_pram = 2979 + (struct ucc_geth_rx_firmware_statistics_pram __iomem *) 2980 qe_muram_addr(ugeth->rx_fw_statistics_pram_offset); 2981 /* Zero out p_rx_fw_statistics_pram */ 2982 + memset_io((void __iomem *)ugeth->p_rx_fw_statistics_pram, 0, 2983 sizeof(struct ucc_geth_rx_firmware_statistics_pram)); 2984 } 2985 ··· 3000 } 3001 3002 ugeth->p_rx_irq_coalescing_tbl = 3003 + (struct ucc_geth_rx_interrupt_coalescing_table __iomem *) 3004 qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset); 3005 out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr, 3006 ugeth->rx_irq_coalescing_tbl_offset); ··· 3069 } 3070 3071 ugeth->p_rx_bd_qs_tbl = 3072 + (struct ucc_geth_rx_bd_queues_entry __iomem *) qe_muram_addr(ugeth-> 3073 rx_bd_qs_tbl_offset); 3074 out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset); 3075 /* Zero out p_rx_bd_qs_tbl */ 3076 + memset_io((void __iomem *)ugeth->p_rx_bd_qs_tbl, 3077 0, 3078 ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) + 3079 sizeof(struct ucc_geth_rx_prefetched_bds))); ··· 3133 &ugeth->p_rx_glbl_pram->remoder); 3134 3135 /* function code register */ 3136 + out_8(&ugeth->p_rx_glbl_pram->rstate, function_code); 3137 3138 /* initialize extended filtering */ 3139 if (ug_info->rxExtendedFiltering) { ··· 3160 } 3161 3162 ugeth->p_exf_glbl_param = 3163 + (struct ucc_geth_exf_global_pram __iomem *) qe_muram_addr(ugeth-> 3164 exf_glbl_param_offset); 3165 out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam, 3166 ugeth->exf_glbl_param_offset); ··· 3175 ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j); 3176 3177 p_82xx_addr_filt = 3178 + (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth-> 3179 p_rx_glbl_pram->addressfiltering; 3180 3181 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, ··· 3307 return -ENOMEM; 3308 } 3309 p_init_enet_pram = 3310 + (struct ucc_geth_init_pram __iomem *) qe_muram_addr(init_enet_pram_offset); 3311 3312 /* Copy shadow InitEnet command parameter structure into PRAM */ 3313 + out_8(&p_init_enet_pram->resinit1, 3314 + ugeth->p_init_enet_param_shadow->resinit1); 3315 + out_8(&p_init_enet_pram->resinit2, 3316 + ugeth->p_init_enet_param_shadow->resinit2); 3317 + out_8(&p_init_enet_pram->resinit3, 3318 + ugeth->p_init_enet_param_shadow->resinit3); 3319 + out_8(&p_init_enet_pram->resinit4, 3320 + ugeth->p_init_enet_param_shadow->resinit4); 3321 out_be16(&p_init_enet_pram->resinit5, 3322 ugeth->p_init_enet_param_shadow->resinit5); 3323 + out_8(&p_init_enet_pram->largestexternallookupkeysize, 3324 + ugeth->p_init_enet_param_shadow->largestexternallookupkeysize); 3325 out_be32(&p_init_enet_pram->rgftgfrxglobal, 3326 ugeth->p_init_enet_param_shadow->rgftgfrxglobal); 3327 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++) ··· 3371 #ifdef CONFIG_UGETH_TX_ON_DEMAND 3372 struct ucc_fast_private *uccf; 3373 #endif 3374 + u8 __iomem *bd; /* BD pointer */ 3375 u32 bd_status; 3376 u8 txQ = 0; 3377 ··· 3383 3384 /* Start from the next BD that should be filled */ 3385 bd = ugeth->txBd[txQ]; 3386 + bd_status = in_be32((u32 __iomem *)bd); 3387 /* Save the skb pointer so we can free it later */ 3388 ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb; 3389 ··· 3393 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); 3394 3395 /* set up the buffer descriptor */ 3396 + out_be32(&((struct qe_bd __iomem *)bd)->buf, 3397 dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE)); 3398 3399 /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */ ··· 3401 bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len; 3402 3403 /* set bd status and length */ 3404 + out_be32((u32 __iomem *)bd, bd_status); 3405 3406 dev->trans_start = jiffies; 3407 ··· 3441 static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit) 3442 { 3443 struct sk_buff *skb; 3444 + u8 __iomem *bd; 3445 u16 length, howmany = 0; 3446 u32 bd_status; 3447 u8 *bdBuffer; ··· 3454 /* collect received buffers */ 3455 bd = ugeth->rxBd[rxQ]; 3456 3457 + bd_status = in_be32((u32 __iomem *)bd); 3458 3459 /* while there are received buffers and BD is full (~R_E) */ 3460 while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) { 3461 + bdBuffer = (u8 *) in_be32(&((struct qe_bd __iomem *)bd)->buf); 3462 length = (u16) ((bd_status & BD_LENGTH_MASK) - 4); 3463 skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]]; 3464 ··· 3516 else 3517 bd += sizeof(struct qe_bd); 3518 3519 + bd_status = in_be32((u32 __iomem *)bd); 3520 } 3521 3522 ugeth->rxBd[rxQ] = bd; ··· 3527 { 3528 /* Start from the next BD that should be filled */ 3529 struct ucc_geth_private *ugeth = netdev_priv(dev); 3530 + u8 __iomem *bd; /* BD pointer */ 3531 u32 bd_status; 3532 3533 bd = ugeth->confBd[txQ]; 3534 + bd_status = in_be32((u32 __iomem *)bd); 3535 3536 /* Normal processing. */ 3537 while ((bd_status & T_R) == 0) { ··· 3561 bd += sizeof(struct qe_bd); 3562 else 3563 bd = ugeth->p_tx_bd_ring[txQ]; 3564 + bd_status = in_be32((u32 __iomem *)bd); 3565 } 3566 ugeth->confBd[txQ] = bd; 3567 return 0;
+28 -20
drivers/net/ucc_geth.h
··· 700 u32 iaddr_l; /* individual address filter, low */ 701 u32 gaddr_h; /* group address filter, high */ 702 u32 gaddr_l; /* group address filter, low */ 703 - struct ucc_geth_82xx_enet_address taddr; 704 - struct ucc_geth_82xx_enet_address paddr[NUM_OF_PADDRS]; 705 u8 res0[0x40 - 0x38]; 706 } __attribute__ ((packed)); 707 ··· 1186 struct ucc_fast_private *uccf; 1187 struct net_device *dev; 1188 struct napi_struct napi; 1189 - struct ucc_geth *ug_regs; 1190 struct ucc_geth_init_pram *p_init_enet_param_shadow; 1191 - struct ucc_geth_exf_global_pram *p_exf_glbl_param; 1192 u32 exf_glbl_param_offset; 1193 - struct ucc_geth_rx_global_pram *p_rx_glbl_pram; 1194 u32 rx_glbl_pram_offset; 1195 - struct ucc_geth_tx_global_pram *p_tx_glbl_pram; 1196 u32 tx_glbl_pram_offset; 1197 - struct ucc_geth_send_queue_mem_region *p_send_q_mem_reg; 1198 u32 send_q_mem_reg_offset; 1199 - struct ucc_geth_thread_data_tx *p_thread_data_tx; 1200 u32 thread_dat_tx_offset; 1201 - struct ucc_geth_thread_data_rx *p_thread_data_rx; 1202 u32 thread_dat_rx_offset; 1203 - struct ucc_geth_scheduler *p_scheduler; 1204 u32 scheduler_offset; 1205 - struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram; 1206 u32 tx_fw_statistics_pram_offset; 1207 - struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram; 1208 u32 rx_fw_statistics_pram_offset; 1209 - struct ucc_geth_rx_interrupt_coalescing_table *p_rx_irq_coalescing_tbl; 1210 u32 rx_irq_coalescing_tbl_offset; 1211 - struct ucc_geth_rx_bd_queues_entry *p_rx_bd_qs_tbl; 1212 u32 rx_bd_qs_tbl_offset; 1213 - u8 *p_tx_bd_ring[NUM_TX_QUEUES]; 1214 u32 tx_bd_ring_offset[NUM_TX_QUEUES]; 1215 - u8 *p_rx_bd_ring[NUM_RX_QUEUES]; 1216 u32 rx_bd_ring_offset[NUM_RX_QUEUES]; 1217 - u8 *confBd[NUM_TX_QUEUES]; 1218 - u8 *txBd[NUM_TX_QUEUES]; 1219 - u8 *rxBd[NUM_RX_QUEUES]; 1220 int badFrame[NUM_RX_QUEUES]; 1221 u16 cpucount[NUM_TX_QUEUES]; 1222 - volatile u16 *p_cpucount[NUM_TX_QUEUES]; 1223 int indAddrRegUsed[NUM_OF_PADDRS]; 1224 u8 paddr[NUM_OF_PADDRS][ENET_NUM_OCTETS_PER_ADDRESS]; /* ethernet address */ 1225 u8 numGroupAddrInHash; ··· 1250 int oldduplex; 1251 int oldlink; 1252 }; 1253 1254 #endif /* __UCC_GETH_H__ */
··· 700 u32 iaddr_l; /* individual address filter, low */ 701 u32 gaddr_h; /* group address filter, high */ 702 u32 gaddr_l; /* group address filter, low */ 703 + struct ucc_geth_82xx_enet_address __iomem taddr; 704 + struct ucc_geth_82xx_enet_address __iomem paddr[NUM_OF_PADDRS]; 705 u8 res0[0x40 - 0x38]; 706 } __attribute__ ((packed)); 707 ··· 1186 struct ucc_fast_private *uccf; 1187 struct net_device *dev; 1188 struct napi_struct napi; 1189 + struct ucc_geth __iomem *ug_regs; 1190 struct ucc_geth_init_pram *p_init_enet_param_shadow; 1191 + struct ucc_geth_exf_global_pram __iomem *p_exf_glbl_param; 1192 u32 exf_glbl_param_offset; 1193 + struct ucc_geth_rx_global_pram __iomem *p_rx_glbl_pram; 1194 u32 rx_glbl_pram_offset; 1195 + struct ucc_geth_tx_global_pram __iomem *p_tx_glbl_pram; 1196 u32 tx_glbl_pram_offset; 1197 + struct ucc_geth_send_queue_mem_region __iomem *p_send_q_mem_reg; 1198 u32 send_q_mem_reg_offset; 1199 + struct ucc_geth_thread_data_tx __iomem *p_thread_data_tx; 1200 u32 thread_dat_tx_offset; 1201 + struct ucc_geth_thread_data_rx __iomem *p_thread_data_rx; 1202 u32 thread_dat_rx_offset; 1203 + struct ucc_geth_scheduler __iomem *p_scheduler; 1204 u32 scheduler_offset; 1205 + struct ucc_geth_tx_firmware_statistics_pram __iomem *p_tx_fw_statistics_pram; 1206 u32 tx_fw_statistics_pram_offset; 1207 + struct ucc_geth_rx_firmware_statistics_pram __iomem *p_rx_fw_statistics_pram; 1208 u32 rx_fw_statistics_pram_offset; 1209 + struct ucc_geth_rx_interrupt_coalescing_table __iomem *p_rx_irq_coalescing_tbl; 1210 u32 rx_irq_coalescing_tbl_offset; 1211 + struct ucc_geth_rx_bd_queues_entry __iomem *p_rx_bd_qs_tbl; 1212 u32 rx_bd_qs_tbl_offset; 1213 + u8 __iomem *p_tx_bd_ring[NUM_TX_QUEUES]; 1214 u32 tx_bd_ring_offset[NUM_TX_QUEUES]; 1215 + u8 __iomem *p_rx_bd_ring[NUM_RX_QUEUES]; 1216 u32 rx_bd_ring_offset[NUM_RX_QUEUES]; 1217 + u8 __iomem *confBd[NUM_TX_QUEUES]; 1218 + u8 __iomem *txBd[NUM_TX_QUEUES]; 1219 + u8 __iomem *rxBd[NUM_RX_QUEUES]; 1220 int badFrame[NUM_RX_QUEUES]; 1221 u16 cpucount[NUM_TX_QUEUES]; 1222 + u16 __iomem *p_cpucount[NUM_TX_QUEUES]; 1223 int indAddrRegUsed[NUM_OF_PADDRS]; 1224 u8 paddr[NUM_OF_PADDRS][ENET_NUM_OCTETS_PER_ADDRESS]; /* ethernet address */ 1225 u8 numGroupAddrInHash; ··· 1250 int oldduplex; 1251 int oldlink; 1252 }; 1253 + 1254 + void uec_set_ethtool_ops(struct net_device *netdev); 1255 + int init_flow_control_params(u32 automatic_flow_control_mode, 1256 + int rx_flow_control_enable, int tx_flow_control_enable, 1257 + u16 pause_period, u16 extension_field, 1258 + u32 __iomem *upsmr_register, u32 __iomem *uempr_register, 1259 + u32 __iomem *maccfg1_register); 1260 + 1261 1262 #endif /* __UCC_GETH_H__ */
-6
drivers/net/ucc_geth_ethtool.c
··· 108 #define UEC_TX_FW_STATS_LEN ARRAY_SIZE(tx_fw_stat_gstrings) 109 #define UEC_RX_FW_STATS_LEN ARRAY_SIZE(rx_fw_stat_gstrings) 110 111 - extern int init_flow_control_params(u32 automatic_flow_control_mode, 112 - int rx_flow_control_enable, 113 - int tx_flow_control_enable, u16 pause_period, 114 - u16 extension_field, volatile u32 *upsmr_register, 115 - volatile u32 *uempr_register, volatile u32 *maccfg1_register); 116 - 117 static int 118 uec_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) 119 {
··· 108 #define UEC_TX_FW_STATS_LEN ARRAY_SIZE(tx_fw_stat_gstrings) 109 #define UEC_RX_FW_STATS_LEN ARRAY_SIZE(rx_fw_stat_gstrings) 110 111 static int 112 uec_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) 113 {
+2 -2
drivers/net/ucc_geth_mii.c
··· 104 } 105 106 /* Reset the MIIM registers, and wait for the bus to free */ 107 - int uec_mdio_reset(struct mii_bus *bus) 108 { 109 struct ucc_mii_mng __iomem *regs = (void __iomem *)bus->priv; 110 unsigned int timeout = PHY_INIT_TIMEOUT; ··· 240 return err; 241 } 242 243 - int uec_mdio_remove(struct of_device *ofdev) 244 { 245 struct device *device = &ofdev->dev; 246 struct mii_bus *bus = dev_get_drvdata(device);
··· 104 } 105 106 /* Reset the MIIM registers, and wait for the bus to free */ 107 + static int uec_mdio_reset(struct mii_bus *bus) 108 { 109 struct ucc_mii_mng __iomem *regs = (void __iomem *)bus->priv; 110 unsigned int timeout = PHY_INIT_TIMEOUT; ··· 240 return err; 241 } 242 243 + static int uec_mdio_remove(struct of_device *ofdev) 244 { 245 struct device *device = &ofdev->dev; 246 struct mii_bus *bus = dev_get_drvdata(device);