Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ethernet: Remove casts to same type

Adding casts of objects to the same type is unnecessary
and confusing for a human reader.

For example, this cast:

int y;
int *p = (int *)&y;

I used the coccinelle script below to find and remove these
unnecessary casts. I manually removed the conversions this
script produces of casts with __force, __iomem and __user.

@@
type T;
T *p;
@@

- (T *)p
+ p

A function in atl1e_main.c was passed a const pointer
when it actually modified elements of the structure.

Change the argument to a non-const pointer.

A function in stmmac needed a __force to avoid a sparse
warning. Added it.

Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Joe Perches and committed by
David S. Miller
64699336 20d5ec43

+87 -97
+1 -1
drivers/net/ethernet/8390/apne.c
··· 454 454 buf[count-1] = inb(NE_BASE + NE_DATAPORT); 455 455 } 456 456 } else { 457 - ptrc = (char*)buf; 457 + ptrc = buf; 458 458 for (cnt = 0; cnt < count; cnt++) 459 459 *ptrc++ = inb(NE_BASE + NE_DATAPORT); 460 460 }
+4 -4
drivers/net/ethernet/aeroflex/greth.c
··· 1014 1014 struct greth_regs *regs; 1015 1015 1016 1016 greth = netdev_priv(dev); 1017 - regs = (struct greth_regs *) greth->regs; 1017 + regs = greth->regs; 1018 1018 1019 1019 if (!is_valid_ether_addr(addr->sa_data)) 1020 1020 return -EADDRNOTAVAIL; ··· 1036 1036 { 1037 1037 struct netdev_hw_addr *ha; 1038 1038 struct greth_private *greth = netdev_priv(dev); 1039 - struct greth_regs *regs = (struct greth_regs *) greth->regs; 1039 + struct greth_regs *regs = greth->regs; 1040 1040 u32 mc_filter[2]; 1041 1041 unsigned int bitnr; 1042 1042 ··· 1055 1055 { 1056 1056 int cfg; 1057 1057 struct greth_private *greth = netdev_priv(dev); 1058 - struct greth_regs *regs = (struct greth_regs *) greth->regs; 1058 + struct greth_regs *regs = greth->regs; 1059 1059 1060 1060 cfg = GRETH_REGLOAD(regs->control); 1061 1061 if (dev->flags & IFF_PROMISC) ··· 1414 1414 goto error1; 1415 1415 } 1416 1416 1417 - regs = (struct greth_regs *) greth->regs; 1417 + regs = greth->regs; 1418 1418 greth->irq = ofdev->archdata.irqs[0]; 1419 1419 1420 1420 dev_set_drvdata(greth->dev, dev);
+2 -2
drivers/net/ethernet/amd/declance.c
··· 623 623 skb_put(skb, len); /* make room */ 624 624 625 625 cp_from_buf(lp->type, skb->data, 626 - (char *)lp->rx_buf_ptr_cpu[entry], len); 626 + lp->rx_buf_ptr_cpu[entry], len); 627 627 628 628 skb->protocol = eth_type_trans(skb, dev); 629 629 netif_rx(skb); ··· 919 919 *lib_ptr(ib, btx_ring[entry].length, lp->type) = (-len); 920 920 *lib_ptr(ib, btx_ring[entry].misc, lp->type) = 0; 921 921 922 - cp_to_buf(lp->type, (char *)lp->tx_buf_ptr_cpu[entry], skb->data, len); 922 + cp_to_buf(lp->type, lp->tx_buf_ptr_cpu[entry], skb->data, len); 923 923 924 924 /* Now, give the packet to the lance */ 925 925 *lib_ptr(ib, btx_ring[entry].tmd1, lp->type) =
+1 -1
drivers/net/ethernet/apple/macmace.c
··· 228 228 * bits are reversed. 229 229 */ 230 230 231 - addr = (void *)MACE_PROM; 231 + addr = MACE_PROM; 232 232 233 233 for (j = 0; j < 6; ++j) { 234 234 u8 v = bitrev8(addr[j<<4]);
+3 -3
drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
··· 602 602 603 603 int atl1c_phy_init(struct atl1c_hw *hw) 604 604 { 605 - struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter; 605 + struct atl1c_adapter *adapter = hw->adapter; 606 606 struct pci_dev *pdev = adapter->pdev; 607 607 int ret_val; 608 608 u16 mii_bmcr_data = BMCR_RESET; ··· 696 696 /* select one link mode to get lower power consumption */ 697 697 int atl1c_phy_to_ps_link(struct atl1c_hw *hw) 698 698 { 699 - struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter; 699 + struct atl1c_adapter *adapter = hw->adapter; 700 700 struct pci_dev *pdev = adapter->pdev; 701 701 int ret = 0; 702 702 u16 autoneg_advertised = ADVERTISED_10baseT_Half; ··· 768 768 769 769 int atl1c_power_saving(struct atl1c_hw *hw, u32 wufc) 770 770 { 771 - struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter; 771 + struct atl1c_adapter *adapter = hw->adapter; 772 772 struct pci_dev *pdev = adapter->pdev; 773 773 u32 master_ctrl, mac_ctrl, phy_ctrl; 774 774 u32 wol_ctrl, speed;
+4 -5
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
··· 989 989 } 990 990 for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) { 991 991 tpd_ring[i].buffer_info = 992 - (struct atl1c_buffer *) (tpd_ring->buffer_info + count); 992 + (tpd_ring->buffer_info + count); 993 993 count += tpd_ring[i].count; 994 994 } 995 995 996 996 rfd_ring->buffer_info = 997 - (struct atl1c_buffer *) (tpd_ring->buffer_info + count); 997 + (tpd_ring->buffer_info + count); 998 998 count += rfd_ring->count; 999 999 rx_desc_count += rfd_ring->count; 1000 1000 ··· 1227 1227 */ 1228 1228 static int atl1c_reset_mac(struct atl1c_hw *hw) 1229 1229 { 1230 - struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter; 1230 + struct atl1c_adapter *adapter = hw->adapter; 1231 1231 struct pci_dev *pdev = adapter->pdev; 1232 1232 u32 ctrl_data = 0; 1233 1233 ··· 1531 1531 static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter, 1532 1532 enum atl1c_trans_queue type) 1533 1533 { 1534 - struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *) 1535 - &adapter->tpd_ring[type]; 1534 + struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type]; 1536 1535 struct atl1c_buffer *buffer_info; 1537 1536 struct pci_dev *pdev = adapter->pdev; 1538 1537 u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
+1 -1
drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
··· 268 268 if (eeprom_buff == NULL) 269 269 return -ENOMEM; 270 270 271 - ptr = (u32 *)eeprom_buff; 271 + ptr = eeprom_buff; 272 272 273 273 if (eeprom->offset & 3) { 274 274 /* need read/modify/write of first changed EEPROM word */
+15 -20
drivers/net/ethernet/atheros/atl1e/atl1e_main.c
··· 641 641 */ 642 642 static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter) 643 643 { 644 - struct atl1e_tx_ring *tx_ring = (struct atl1e_tx_ring *) 645 - &adapter->tx_ring; 644 + struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; 646 645 struct atl1e_tx_buffer *tx_buffer = NULL; 647 646 struct pci_dev *pdev = adapter->pdev; 648 647 u16 index, ring_count; ··· 685 686 static void atl1e_clean_rx_ring(struct atl1e_adapter *adapter) 686 687 { 687 688 struct atl1e_rx_ring *rx_ring = 688 - (struct atl1e_rx_ring *)&adapter->rx_ring; 689 + &adapter->rx_ring; 689 690 struct atl1e_rx_page_desc *rx_page_desc = rx_ring->rx_page_desc; 690 691 u16 i, j; 691 692 ··· 883 884 return err; 884 885 } 885 886 886 - static inline void atl1e_configure_des_ring(const struct atl1e_adapter *adapter) 887 + static inline void atl1e_configure_des_ring(struct atl1e_adapter *adapter) 887 888 { 888 889 889 - struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw; 890 - struct atl1e_rx_ring *rx_ring = 891 - (struct atl1e_rx_ring *)&adapter->rx_ring; 892 - struct atl1e_tx_ring *tx_ring = 893 - (struct atl1e_tx_ring *)&adapter->tx_ring; 890 + struct atl1e_hw *hw = &adapter->hw; 891 + struct atl1e_rx_ring *rx_ring = &adapter->rx_ring; 892 + struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; 894 893 struct atl1e_rx_page_desc *rx_page_desc = NULL; 895 894 int i, j; 896 895 ··· 929 932 930 933 static inline void atl1e_configure_tx(struct atl1e_adapter *adapter) 931 934 { 932 - struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw; 935 + struct atl1e_hw *hw = &adapter->hw; 933 936 u32 dev_ctrl_data = 0; 934 937 u32 max_pay_load = 0; 935 938 u32 jumbo_thresh = 0; ··· 972 975 973 976 static inline void atl1e_configure_rx(struct atl1e_adapter *adapter) 974 977 { 975 - struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw; 978 + struct atl1e_hw *hw = &adapter->hw; 976 979 u32 rxf_len = 0; 977 980 u32 rxf_low = 0; 978 981 u32 rxf_high = 0; ··· 1221 1224 1222 1225 static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter) 1223 1226 { 1224 - struct atl1e_tx_ring *tx_ring = (struct atl1e_tx_ring *) 1225 - &adapter->tx_ring; 1227 + struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; 1226 1228 struct atl1e_tx_buffer *tx_buffer = NULL; 1227 1229 u16 hw_next_to_clean = AT_READ_REGW(&adapter->hw, REG_TPD_CONS_IDX); 1228 1230 u16 next_to_clean = atomic_read(&tx_ring->next_to_clean); ··· 1380 1384 (struct atl1e_rx_page_desc *) adapter->rx_ring.rx_page_desc; 1381 1385 u8 rx_using = rx_page_desc[que].rx_using; 1382 1386 1383 - return (struct atl1e_rx_page *)&(rx_page_desc[que].rx_page[rx_using]); 1387 + return &(rx_page_desc[que].rx_page[rx_using]); 1384 1388 } 1385 1389 1386 1390 static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que, 1387 1391 int *work_done, int work_to_do) 1388 1392 { 1389 1393 struct net_device *netdev = adapter->netdev; 1390 - struct atl1e_rx_ring *rx_ring = (struct atl1e_rx_ring *) 1391 - &adapter->rx_ring; 1394 + struct atl1e_rx_ring *rx_ring = &adapter->rx_ring; 1392 1395 struct atl1e_rx_page_desc *rx_page_desc = 1393 1396 (struct atl1e_rx_page_desc *) rx_ring->rx_page_desc; 1394 1397 struct sk_buff *skb = NULL; ··· 1571 1576 tx_ring->next_to_use = 0; 1572 1577 1573 1578 memset(&tx_ring->desc[next_to_use], 0, sizeof(struct atl1e_tpd_desc)); 1574 - return (struct atl1e_tpd_desc *)&tx_ring->desc[next_to_use]; 1579 + return &tx_ring->desc[next_to_use]; 1575 1580 } 1576 1581 1577 1582 static struct atl1e_tx_buffer * ··· 2056 2061 2057 2062 if (wufc) { 2058 2063 /* get link status */ 2059 - atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data); 2060 - atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data); 2064 + atl1e_read_phy_reg(hw, MII_BMSR, &mii_bmsr_data); 2065 + atl1e_read_phy_reg(hw, MII_BMSR, &mii_bmsr_data); 2061 2066 2062 2067 mii_advertise_data = ADVERTISE_10HALF; 2063 2068 ··· 2081 2086 for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) { 2082 2087 msleep(100); 2083 2088 atl1e_read_phy_reg(hw, MII_BMSR, 2084 - (u16 *)&mii_bmsr_data); 2089 + &mii_bmsr_data); 2085 2090 if (mii_bmsr_data & BMSR_LSTATUS) 2086 2091 break; 2087 2092 }
+1 -1
drivers/net/ethernet/atheros/atlx/atl1.c
··· 1061 1061 goto err_nomem; 1062 1062 } 1063 1063 rfd_ring->buffer_info = 1064 - (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count); 1064 + (tpd_ring->buffer_info + tpd_ring->count); 1065 1065 1066 1066 /* 1067 1067 * real ring DMA buffer
+1 -2
drivers/net/ethernet/broadcom/bnx2.c
··· 872 872 873 873 bnapi = &bp->bnx2_napi[i]; 874 874 875 - sblk = (void *) (status_blk + 876 - BNX2_SBLK_MSIX_ALIGN_SIZE * i); 875 + sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i); 877 876 bnapi->status_blk.msix = sblk; 878 877 bnapi->hw_tx_cons_ptr = 879 878 &sblk->status_tx_quick_consumer_index;
+6 -6
drivers/net/ethernet/broadcom/cnic.c
··· 2585 2585 return; 2586 2586 } 2587 2587 2588 - cqes[0] = (struct kcqe *) &kcqe; 2588 + cqes[0] = &kcqe; 2589 2589 cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1); 2590 2590 } 2591 2591 ··· 4665 4665 4666 4666 cp->kcq1.sw_prod_idx = 0; 4667 4667 cp->kcq1.hw_prod_idx_ptr = 4668 - (u16 *) &sblk->status_completion_producer_index; 4668 + &sblk->status_completion_producer_index; 4669 4669 4670 - cp->kcq1.status_idx_ptr = (u16 *) &sblk->status_idx; 4670 + cp->kcq1.status_idx_ptr = &sblk->status_idx; 4671 4671 4672 4672 /* Initialize the kernel complete queue context. */ 4673 4673 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | ··· 4693 4693 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id); 4694 4694 4695 4695 cp->kcq1.hw_prod_idx_ptr = 4696 - (u16 *) &msblk->status_completion_producer_index; 4697 - cp->kcq1.status_idx_ptr = (u16 *) &msblk->status_idx; 4698 - cp->kwq_con_idx_ptr = (u16 *) &msblk->status_cmd_consumer_index; 4696 + &msblk->status_completion_producer_index; 4697 + cp->kcq1.status_idx_ptr = &msblk->status_idx; 4698 + cp->kwq_con_idx_ptr = &msblk->status_cmd_consumer_index; 4699 4699 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT; 4700 4700 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); 4701 4701 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
+2 -2
drivers/net/ethernet/brocade/bna/cna_fwimg.c
··· 67 67 { 68 68 switch (asic_gen) { 69 69 case BFI_ASIC_GEN_CT: 70 - return (u32 *)(bfi_image_ct_cna + off); 70 + return (bfi_image_ct_cna + off); 71 71 break; 72 72 case BFI_ASIC_GEN_CT2: 73 - return (u32 *)(bfi_image_ct2_cna + off); 73 + return (bfi_image_ct2_cna + off); 74 74 break; 75 75 default: 76 76 return NULL;
+1 -1
drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
··· 575 575 if (!skb) { 576 576 spin_lock_bh(&td->tid_release_lock); 577 577 p->ctx = (void *)td->tid_release_list; 578 - td->tid_release_list = (struct t3c_tid_entry *)p; 578 + td->tid_release_list = p; 579 579 break; 580 580 } 581 581 mk_tid_release(skb, p - td->tid_maps.tid_tab);
+1 -1
drivers/net/ethernet/chelsio/cxgb4/sge.c
··· 753 753 end = (void *)q->desc + part1; 754 754 } 755 755 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ 756 - *(u64 *)end = 0; 756 + *end = 0; 757 757 } 758 758 759 759 /**
+1 -1
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
··· 418 418 * restart a TX Ethernet Queue which was stopped for lack of 419 419 * free TX Queue Descriptors ... 420 420 */ 421 - const struct cpl_sge_egr_update *p = (void *)cpl; 421 + const struct cpl_sge_egr_update *p = cpl; 422 422 unsigned int qid = EGR_QID(be32_to_cpu(p->opcode_qid)); 423 423 struct sge *s = &adapter->sge; 424 424 struct sge_txq *tq;
+2 -3
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
··· 934 934 end = (void *)tq->desc + part1; 935 935 } 936 936 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ 937 - *(u64 *)end = 0; 937 + *end = 0; 938 938 } 939 939 940 940 /** ··· 1323 1323 */ 1324 1324 if (unlikely((void *)sgl == (void *)tq->stat)) { 1325 1325 sgl = (void *)tq->desc; 1326 - end = (void *)((void *)tq->desc + 1327 - ((void *)end - (void *)tq->stat)); 1326 + end = ((void *)tq->desc + ((void *)end - (void *)tq->stat)); 1328 1327 } 1329 1328 1330 1329 write_sgl(skb, tq, sgl, end, 0, addr);
+1 -1
drivers/net/ethernet/dec/tulip/de4x5.c
··· 3973 3973 tmp = srom_rd(aprom_addr, i); 3974 3974 *p++ = cpu_to_le16(tmp); 3975 3975 } 3976 - de4x5_dbg_srom((struct de4x5_srom *)&lp->srom); 3976 + de4x5_dbg_srom(&lp->srom); 3977 3977 } 3978 3978 } 3979 3979
+1 -1
drivers/net/ethernet/freescale/ucc_geth.c
··· 185 185 for (; (u32) i < (u32) addr + size4Aling; i += 4) 186 186 printk("%08x ", *((u32 *) (i))); 187 187 for (; (u32) i < (u32) addr + size; i++) 188 - printk("%02x", *((u8 *) (i))); 188 + printk("%02x", *((i))); 189 189 if (notAlign == 1) 190 190 printk("\r\n"); 191 191 }
+3 -3
drivers/net/ethernet/hp/hp100.c
··· 1217 1217 1218 1218 ringptr->pdl = pdlptr + 1; 1219 1219 ringptr->pdl_paddr = virt_to_whatever(dev, pdlptr + 1); 1220 - ringptr->skb = (void *) NULL; 1220 + ringptr->skb = NULL; 1221 1221 1222 1222 /* 1223 1223 * Write address and length of first PDL Fragment (which is used for ··· 1243 1243 1244 1244 ringptr->pdl = pdlptr; /* +1; */ 1245 1245 ringptr->pdl_paddr = virt_to_whatever(dev, pdlptr); /* +1 */ 1246 - ringptr->skb = (void *) NULL; 1246 + ringptr->skb = NULL; 1247 1247 1248 1248 return roundup(MAX_TX_FRAG * 2 + 2, 4); 1249 1249 } ··· 1628 1628 /* Conversion to new PCI API : NOP */ 1629 1629 pci_unmap_single(lp->pci_dev, (dma_addr_t) lp->txrhead->pdl[1], lp->txrhead->pdl[2], PCI_DMA_TODEVICE); 1630 1630 dev_kfree_skb_any(lp->txrhead->skb); 1631 - lp->txrhead->skb = (void *) NULL; 1631 + lp->txrhead->skb = NULL; 1632 1632 lp->txrhead = lp->txrhead->next; 1633 1633 lp->txrcommit--; 1634 1634 }
+4 -4
drivers/net/ethernet/i825xx/lp486e.c
··· 629 629 630 630 memcpy ((void *)lp->eth_addr, dev->dev_addr, 6); 631 631 lp->set_add.command = CmdIASetup; 632 - i596_add_cmd(dev, (struct i596_cmd *)&lp->set_add); 632 + i596_add_cmd(dev, &lp->set_add); 633 633 634 634 lp->tdr.command = CmdTDR; 635 - i596_add_cmd(dev, (struct i596_cmd *)&lp->tdr); 635 + i596_add_cmd(dev, &lp->tdr); 636 636 637 637 if (lp->scb.command && i596_timeout(dev, "i82596 init", 200)) 638 638 return 1; ··· 737 737 738 738 lp = netdev_priv(dev); 739 739 while (lp->cmd_head) { 740 - cmd = (struct i596_cmd *)lp->cmd_head; 740 + cmd = lp->cmd_head; 741 741 742 742 lp->cmd_head = pa_to_va(lp->cmd_head->pa_next); 743 743 lp->cmd_backlog--; ··· 1281 1281 lp->i596_config[8] |= 0x01; 1282 1282 } 1283 1283 1284 - i596_add_cmd(dev, (struct i596_cmd *) &lp->set_conf); 1284 + i596_add_cmd(dev, &lp->set_conf); 1285 1285 } 1286 1286 } 1287 1287
+2 -2
drivers/net/ethernet/i825xx/sun3_82586.c
··· 571 571 } 572 572 #endif 573 573 574 - ptr = alloc_rfa(dev,(void *)ptr); /* init receive-frame-area */ 574 + ptr = alloc_rfa(dev,ptr); /* init receive-frame-area */ 575 575 576 576 /* 577 577 * alloc xmit-buffs / init xmit_cmds ··· 584 584 ptr = (char *) ptr + XMIT_BUFF_SIZE; 585 585 p->xmit_buffs[i] = (struct tbd_struct *)ptr; /* TBD */ 586 586 ptr = (char *) ptr + sizeof(struct tbd_struct); 587 - if((void *)ptr > (void *)dev->mem_end) 587 + if(ptr > (void *)dev->mem_end) 588 588 { 589 589 printk("%s: not enough shared-mem for your configuration!\n",dev->name); 590 590 return 1;
+2 -2
drivers/net/ethernet/marvell/mv643xx_eth.c
··· 1894 1894 goto out_free; 1895 1895 } 1896 1896 1897 - rx_desc = (struct rx_desc *)rxq->rx_desc_area; 1897 + rx_desc = rxq->rx_desc_area; 1898 1898 for (i = 0; i < rxq->rx_ring_size; i++) { 1899 1899 int nexti; 1900 1900 ··· 1999 1999 2000 2000 txq->tx_desc_area_size = size; 2001 2001 2002 - tx_desc = (struct tx_desc *)txq->tx_desc_area; 2002 + tx_desc = txq->tx_desc_area; 2003 2003 for (i = 0; i < txq->tx_ring_size; i++) { 2004 2004 struct tx_desc *txd = tx_desc + i; 2005 2005 int nexti;
+2 -2
drivers/net/ethernet/marvell/pxa168_eth.c
··· 1032 1032 } 1033 1033 memset((void *)pep->p_rx_desc_area, 0, size); 1034 1034 /* initialize the next_desc_ptr links in the Rx descriptors ring */ 1035 - p_rx_desc = (struct rx_desc *)pep->p_rx_desc_area; 1035 + p_rx_desc = pep->p_rx_desc_area; 1036 1036 for (i = 0; i < rx_desc_num; i++) { 1037 1037 p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma + 1038 1038 ((i + 1) % rx_desc_num) * sizeof(struct rx_desc); ··· 1095 1095 } 1096 1096 memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size); 1097 1097 /* Initialize the next_desc_ptr links in the Tx descriptors ring */ 1098 - p_tx_desc = (struct tx_desc *)pep->p_tx_desc_area; 1098 + p_tx_desc = pep->p_tx_desc_area; 1099 1099 for (i = 0; i < tx_desc_num; i++) { 1100 1100 p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma + 1101 1101 ((i + 1) % tx_desc_num) * sizeof(struct tx_desc);
+2 -2
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
··· 779 779 r->com.to_state = state; 780 780 r->com.state = RES_QP_BUSY; 781 781 if (qp) 782 - *qp = (struct res_qp *)r; 782 + *qp = r; 783 783 } 784 784 } 785 785 ··· 832 832 r->com.to_state = state; 833 833 r->com.state = RES_MPT_BUSY; 834 834 if (mpt) 835 - *mpt = (struct res_mpt *)r; 835 + *mpt = r; 836 836 } 837 837 } 838 838
+7 -7
drivers/net/ethernet/neterion/s2io.c
··· 6946 6946 if (sp->rxd_mode == RXD_MODE_3B) 6947 6947 ba = &ring->ba[j][k]; 6948 6948 if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb, 6949 - (u64 *)&temp0_64, 6950 - (u64 *)&temp1_64, 6951 - (u64 *)&temp2_64, 6949 + &temp0_64, 6950 + &temp1_64, 6951 + &temp2_64, 6952 6952 size) == -ENOMEM) { 6953 6953 return 0; 6954 6954 } ··· 7149 7149 int i, ret = 0; 7150 7150 struct config_param *config; 7151 7151 struct mac_info *mac_control; 7152 - struct net_device *dev = (struct net_device *)sp->dev; 7152 + struct net_device *dev = sp->dev; 7153 7153 u16 interruptible; 7154 7154 7155 7155 /* Initialize the H/W I/O registers */ ··· 7325 7325 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) 7326 7326 { 7327 7327 struct s2io_nic *sp = ring_data->nic; 7328 - struct net_device *dev = (struct net_device *)ring_data->dev; 7328 + struct net_device *dev = ring_data->dev; 7329 7329 struct sk_buff *skb = (struct sk_buff *) 7330 7330 ((unsigned long)rxdp->Host_Control); 7331 7331 int ring_no = ring_data->ring_no; ··· 7508 7508 7509 7509 static void s2io_link(struct s2io_nic *sp, int link) 7510 7510 { 7511 - struct net_device *dev = (struct net_device *)sp->dev; 7511 + struct net_device *dev = sp->dev; 7512 7512 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat; 7513 7513 7514 7514 if (link != sp->last_link_state) { ··· 8280 8280 return -1; 8281 8281 } 8282 8282 8283 - *ip = (struct iphdr *)((u8 *)buffer + ip_off); 8283 + *ip = (struct iphdr *)(buffer + ip_off); 8284 8284 ip_len = (u8)((*ip)->ihl); 8285 8285 ip_len <<= 2; 8286 8286 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
+4 -4
drivers/net/ethernet/neterion/vxge/vxge-config.c
··· 2346 2346 2347 2347 for (i = 0; i < nreq; i++) 2348 2348 vxge_os_dma_malloc_async( 2349 - ((struct __vxge_hw_device *)blockpool->hldev)->pdev, 2349 + (blockpool->hldev)->pdev, 2350 2350 blockpool->hldev, VXGE_HW_BLOCK_SIZE); 2351 2351 } 2352 2352 ··· 2428 2428 break; 2429 2429 2430 2430 pci_unmap_single( 2431 - ((struct __vxge_hw_device *)blockpool->hldev)->pdev, 2431 + (blockpool->hldev)->pdev, 2432 2432 ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, 2433 2433 ((struct __vxge_hw_blockpool_entry *)p)->length, 2434 2434 PCI_DMA_BIDIRECTIONAL); 2435 2435 2436 2436 vxge_os_dma_free( 2437 - ((struct __vxge_hw_device *)blockpool->hldev)->pdev, 2437 + (blockpool->hldev)->pdev, 2438 2438 ((struct __vxge_hw_blockpool_entry *)p)->memblock, 2439 2439 &((struct __vxge_hw_blockpool_entry *)p)->acc_handle); 2440 2440 ··· 4059 4059 enum vxge_hw_status status = VXGE_HW_OK; 4060 4060 struct __vxge_hw_virtualpath *vpath; 4061 4061 4062 - vpath = (struct __vxge_hw_virtualpath *)&hldev->virtual_paths[vp_id]; 4062 + vpath = &hldev->virtual_paths[vp_id]; 4063 4063 4064 4064 if (vpath->ringh) { 4065 4065 status = __vxge_hw_ring_reset(vpath->ringh);
+1 -1
drivers/net/ethernet/neterion/vxge/vxge-config.h
··· 1922 1922 /* misaligned, free current one and try allocating 1923 1923 * size + VXGE_CACHE_LINE_SIZE memory 1924 1924 */ 1925 - kfree((void *) vaddr); 1925 + kfree(vaddr); 1926 1926 size += VXGE_CACHE_LINE_SIZE; 1927 1927 realloc_flag = 1; 1928 1928 goto realloc;
+4 -4
drivers/net/ethernet/neterion/vxge/vxge-main.c
··· 1134 1134 "%s:%d", __func__, __LINE__); 1135 1135 1136 1136 vdev = netdev_priv(dev); 1137 - hldev = (struct __vxge_hw_device *)vdev->devh; 1137 + hldev = vdev->devh; 1138 1138 1139 1139 if (unlikely(!is_vxge_card_up(vdev))) 1140 1140 return; ··· 3989 3989 continue; 3990 3990 vxge_debug_ll_config(VXGE_TRACE, 3991 3991 "%s: MTU size - %d", vdev->ndev->name, 3992 - ((struct __vxge_hw_device *)(vdev->devh))-> 3992 + ((vdev->devh))-> 3993 3993 config.vp_config[i].mtu); 3994 3994 vxge_debug_init(VXGE_TRACE, 3995 3995 "%s: VLAN tag stripping %s", vdev->ndev->name, 3996 - ((struct __vxge_hw_device *)(vdev->devh))-> 3996 + ((vdev->devh))-> 3997 3997 config.vp_config[i].rpa_strip_vlan_tag 3998 3998 ? "Enabled" : "Disabled"); 3999 3999 vxge_debug_ll_config(VXGE_TRACE, 4000 4000 "%s: Max frags : %d", vdev->ndev->name, 4001 - ((struct __vxge_hw_device *)(vdev->devh))-> 4001 + ((vdev->devh))-> 4002 4002 config.vp_config[i].fifo.max_frags); 4003 4003 break; 4004 4004 }
+2 -3
drivers/net/ethernet/neterion/vxge/vxge-traffic.c
··· 533 533 534 534 /* notify driver */ 535 535 if (hldev->uld_callbacks->crit_err) 536 - hldev->uld_callbacks->crit_err( 537 - (struct __vxge_hw_device *)hldev, 536 + hldev->uld_callbacks->crit_err(hldev, 538 537 type, vp_id); 539 538 out: 540 539 ··· 1321 1322 /* check whether it is not the end */ 1322 1323 if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) { 1323 1324 1324 - vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control != 1325 + vxge_assert((rxdp)->host_control != 1325 1326 0); 1326 1327 1327 1328 ++ring->cmpl_cnt;
+2 -2
drivers/net/ethernet/sgi/ioc3-eth.c
··· 583 583 unsigned long *rxr; 584 584 u32 w0, err; 585 585 586 - rxr = (unsigned long *) ip->rxr; /* Ring base */ 586 + rxr = ip->rxr; /* Ring base */ 587 587 rx_entry = ip->rx_ci; /* RX consume index */ 588 588 n_entry = ip->rx_pi; 589 589 ··· 903 903 if (ip->rxr == NULL) { 904 904 /* Allocate and initialize rx ring. 4kb = 512 entries */ 905 905 ip->rxr = (unsigned long *) get_zeroed_page(GFP_ATOMIC); 906 - rxr = (unsigned long *) ip->rxr; 906 + rxr = ip->rxr; 907 907 if (!rxr) 908 908 printk("ioc3_alloc_rings(): get_zeroed_page() failed!\n"); 909 909
+1 -2
drivers/net/ethernet/smsc/smsc9420.c
··· 1640 1640 goto out_free_io_4; 1641 1641 1642 1642 /* descriptors are aligned due to the nature of pci_alloc_consistent */ 1643 - pd->tx_ring = (struct smsc9420_dma_desc *) 1644 - (pd->rx_ring + RX_RING_SIZE); 1643 + pd->tx_ring = (pd->rx_ring + RX_RING_SIZE); 1645 1644 pd->tx_dma_addr = pd->rx_dma_addr + 1646 1645 sizeof(struct smsc9420_dma_desc) * RX_RING_SIZE; 1647 1646
+1 -1
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
··· 190 190 191 191 platform_set_drvdata(pdev, NULL); 192 192 193 - iounmap((void *)priv->ioaddr); 193 + iounmap((void __force __iomem *)priv->ioaddr); 194 194 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 195 195 release_mem_region(res->start, resource_size(res)); 196 196
+1 -1
drivers/net/ethernet/sun/sunqe.c
··· 441 441 } else { 442 442 skb_reserve(skb, 2); 443 443 skb_put(skb, len); 444 - skb_copy_to_linear_data(skb, (unsigned char *) this_qbuf, 444 + skb_copy_to_linear_data(skb, this_qbuf, 445 445 len); 446 446 skb->protocol = eth_type_trans(skb, qep->dev); 447 447 netif_rx(skb);
+1 -1
drivers/net/ethernet/via/via-velocity.c
··· 486 486 velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname); 487 487 velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname); 488 488 velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname); 489 - velocity_set_int_opt((int *) &opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname); 489 + velocity_set_int_opt(&opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname); 490 490 opts->numrx = (opts->numrx & ~3); 491 491 } 492 492