Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drivers/net: Remove casts of void *

Unnecessary casts of void * clutter the code.

These are the remainder casts after several specific
patches to remove netdev_priv and dev_priv.

Done via coccinelle script (and a little editing):

$ cat cast_void_pointer.cocci
@@
type T;
T *pt;
void *pv;
@@

- pt = (T *)pv;
+ pt = pv;

Signed-off-by: Joe Perches <joe@perches.com>
Acked-by: Sjur Brændeland <sjur.brandeland@stericsson.com>
Acked-By: Chris Snook <chris.snook@gmail.com>
Acked-by: Jon Mason <jdmason@kudzu.us>
Acked-by: Geert Uytterhoeven <geert@linux-m68k.org>
Acked-by: David Dillow <dave@thedillows.org>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Joe Perches and committed by
David S. Miller
43d620c8 dadbe85a

+124 -147
+3 -8
drivers/net/a2065.c
··· 421 421 422 422 static irqreturn_t lance_interrupt (int irq, void *dev_id) 423 423 { 424 - struct net_device *dev; 425 - struct lance_private *lp; 426 - volatile struct lance_regs *ll; 424 + struct net_device *dev = dev_id; 425 + struct lance_private *lp = netdev_priv(dev); 426 + volatile struct lance_regs *ll = lp->ll; 427 427 int csr0; 428 - 429 - dev = (struct net_device *) dev_id; 430 - 431 - lp = netdev_priv(dev); 432 - ll = lp->ll; 433 428 434 429 ll->rap = LE_CSR0; /* LANCE Controller Status */ 435 430 csr0 = ll->rdp;
+4 -4
drivers/net/appletalk/ltpc.c
··· 652 652 int ret; 653 653 654 654 if(i) { 655 - qels[i].cbuf = (unsigned char *) cbuf; 655 + qels[i].cbuf = cbuf; 656 656 qels[i].cbuflen = cbuflen; 657 - qels[i].dbuf = (unsigned char *) dbuf; 657 + qels[i].dbuf = dbuf; 658 658 qels[i].dbuflen = dbuflen; 659 659 qels[i].QWrite = 1; 660 660 qels[i].mailbox = i; /* this should be initted rather */ ··· 676 676 int ret; 677 677 678 678 if(i) { 679 - qels[i].cbuf = (unsigned char *) cbuf; 679 + qels[i].cbuf = cbuf; 680 680 qels[i].cbuflen = cbuflen; 681 - qels[i].dbuf = (unsigned char *) dbuf; 681 + qels[i].dbuf = dbuf; 682 682 qels[i].dbuflen = dbuflen; 683 683 qels[i].QWrite = 0; 684 684 qels[i].mailbox = i; /* this should be initted rather */
+2 -3
drivers/net/atl1e/atl1e_main.c
··· 800 800 /* Init TPD Ring */ 801 801 tx_ring->dma = roundup(adapter->ring_dma, 8); 802 802 offset = tx_ring->dma - adapter->ring_dma; 803 - tx_ring->desc = (struct atl1e_tpd_desc *) 804 - (adapter->ring_vir_addr + offset); 803 + tx_ring->desc = adapter->ring_vir_addr + offset; 805 804 size = sizeof(struct atl1e_tx_buffer) * (tx_ring->count); 806 805 tx_ring->tx_buffer = kzalloc(size, GFP_KERNEL); 807 806 if (tx_ring->tx_buffer == NULL) { ··· 826 827 827 828 /* Init CMB dma address */ 828 829 tx_ring->cmb_dma = adapter->ring_dma + offset; 829 - tx_ring->cmb = (u32 *)(adapter->ring_vir_addr + offset); 830 + tx_ring->cmb = adapter->ring_vir_addr + offset; 830 831 offset += sizeof(u32); 831 832 832 833 for (i = 0; i < adapter->num_rx_queues; i++) {
+1 -2
drivers/net/atlx/atl2.c
··· 311 311 adapter->txd_dma = adapter->ring_dma ; 312 312 offset = (adapter->txd_dma & 0x7) ? (8 - (adapter->txd_dma & 0x7)) : 0; 313 313 adapter->txd_dma += offset; 314 - adapter->txd_ring = (struct tx_pkt_header *) (adapter->ring_vir_addr + 315 - offset); 314 + adapter->txd_ring = adapter->ring_vir_addr + offset; 316 315 317 316 /* Init TXS Ring */ 318 317 adapter->txs_dma = adapter->txd_dma + adapter->txd_ring_size;
+1 -2
drivers/net/benet/be_cmds.c
··· 2334 2334 2335 2335 status = be_mbox_notify_wait(adapter); 2336 2336 if (!status) { 2337 - attribs = (struct mgmt_controller_attrib *)( attribs_cmd.va + 2338 - sizeof(struct be_cmd_resp_hdr)); 2337 + attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr); 2339 2338 adapter->hba_port_num = attribs->hba_attribs.phy_port; 2340 2339 } 2341 2340
+2 -2
drivers/net/benet/be_ethtool.c
··· 408 408 } 409 409 status = be_cmd_get_phy_info(adapter, &phy_cmd); 410 410 if (!status) { 411 - resp = (struct be_cmd_resp_get_phy_info *) phy_cmd.va; 411 + resp = phy_cmd.va; 412 412 intf_type = le16_to_cpu(resp->interface_type); 413 413 414 414 switch (intf_type) { ··· 712 712 status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd); 713 713 714 714 if (!status) { 715 - resp = (struct be_cmd_resp_seeprom_read *) eeprom_cmd.va; 715 + resp = eeprom_cmd.va; 716 716 memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len); 717 717 } 718 718 dma_free_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va,
+1 -1
drivers/net/bna/bfa_cee.c
··· 236 236 bfa_cee_hbfail(void *arg) 237 237 { 238 238 struct bfa_cee *cee; 239 - cee = (struct bfa_cee *) arg; 239 + cee = arg; 240 240 241 241 if (cee->get_attr_pending == true) { 242 242 cee->get_attr_status = BFA_STATUS_FAILED;
+1 -1
drivers/net/bna/cna.h
··· 74 74 bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe)); \ 75 75 bfa_q_qe_init(*((struct list_head **) _qe)); \ 76 76 } else { \ 77 - *((struct list_head **) (_qe)) = (struct list_head *) NULL; \ 77 + *((struct list_head **)(_qe)) = NULL; \ 78 78 } \ 79 79 } 80 80
+1 -1
drivers/net/caif/caif_shmcore.c
··· 134 134 u32 avail_emptybuff = 0; 135 135 unsigned long flags = 0; 136 136 137 - pshm_drv = (struct shmdrv_layer *)priv; 137 + pshm_drv = priv; 138 138 139 139 /* Check for received buffers. */ 140 140 if (mbx_msg & SHM_FULL_MASK) {
+2 -2
drivers/net/cnic.c
··· 4318 4318 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); 4319 4319 cnic_ctx_wr(dev, cid_addr, offset1, val); 4320 4320 4321 - txbd = (struct tx_bd *) udev->l2_ring; 4321 + txbd = udev->l2_ring; 4322 4322 4323 4323 buf_map = udev->l2_buf_map; 4324 4324 for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) { ··· 4377 4377 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id); 4378 4378 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val); 4379 4379 4380 - rxbd = (struct rx_bd *) (udev->l2_ring + BCM_PAGE_SIZE); 4380 + rxbd = udev->l2_ring + BCM_PAGE_SIZE; 4381 4381 for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) { 4382 4382 dma_addr_t buf_map; 4383 4383 int n = (i % cp->l2_rx_ring_size) + 1;
+1 -1
drivers/net/cxgb3/cxgb3_offload.c
··· 567 567 while (td->tid_release_list) { 568 568 struct t3c_tid_entry *p = td->tid_release_list; 569 569 570 - td->tid_release_list = (struct t3c_tid_entry *)p->ctx; 570 + td->tid_release_list = p->ctx; 571 571 spin_unlock_bh(&td->tid_release_lock); 572 572 573 573 skb = alloc_skb(sizeof(struct cpl_tid_release),
+1 -1
drivers/net/davinci_cpdma.c
··· 167 167 } else { 168 168 pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys, 169 169 GFP_KERNEL); 170 - pool->iomap = (void __force __iomem *)pool->cpumap; 170 + pool->iomap = pool->cpumap; 171 171 pool->hw_addr = pool->phys; 172 172 } 173 173
+22 -16
drivers/net/declance.c
··· 326 326 */ 327 327 static void cp_to_buf(const int type, void *to, const void *from, int len) 328 328 { 329 - unsigned short *tp, *fp, clen; 330 - unsigned char *rtp, *rfp; 329 + unsigned short *tp; 330 + const unsigned short *fp; 331 + unsigned short clen; 332 + unsigned char *rtp; 333 + const unsigned char *rfp; 331 334 332 335 if (type == PMAD_LANCE) { 333 336 memcpy(to, from, len); 334 337 } else if (type == PMAX_LANCE) { 335 338 clen = len >> 1; 336 - tp = (unsigned short *) to; 337 - fp = (unsigned short *) from; 339 + tp = to; 340 + fp = from; 338 341 339 342 while (clen--) { 340 343 *tp++ = *fp++; ··· 345 342 } 346 343 347 344 clen = len & 1; 348 - rtp = (unsigned char *) tp; 349 - rfp = (unsigned char *) fp; 345 + rtp = tp; 346 + rfp = fp; 350 347 while (clen--) { 351 348 *rtp++ = *rfp++; 352 349 } ··· 355 352 * copy 16 Byte chunks 356 353 */ 357 354 clen = len >> 4; 358 - tp = (unsigned short *) to; 359 - fp = (unsigned short *) from; 355 + tp = to; 356 + fp = from; 360 357 while (clen--) { 361 358 *tp++ = *fp++; 362 359 *tp++ = *fp++; ··· 385 382 386 383 static void cp_from_buf(const int type, void *to, const void *from, int len) 387 384 { 388 - unsigned short *tp, *fp, clen; 389 - unsigned char *rtp, *rfp; 385 + unsigned short *tp; 386 + const unsigned short *fp; 387 + unsigned short clen; 388 + unsigned char *rtp; 389 + const unsigned char *rfp; 390 390 391 391 if (type == PMAD_LANCE) { 392 392 memcpy(to, from, len); 393 393 } else if (type == PMAX_LANCE) { 394 394 clen = len >> 1; 395 - tp = (unsigned short *) to; 396 - fp = (unsigned short *) from; 395 + tp = to; 396 + fp = from; 397 397 while (clen--) { 398 398 *tp++ = *fp++; 399 399 fp++; ··· 404 398 405 399 clen = len & 1; 406 400 407 - rtp = (unsigned char *) tp; 408 - rfp = (unsigned char *) fp; 401 + rtp = tp; 402 + rfp = fp; 409 403 410 404 while (clen--) { 411 405 *rtp++ = *rfp++; ··· 416 410 * copy 16 Byte chunks 417 411 */ 418 412 clen = len >> 4; 419 - tp = (unsigned short *) to; 420 - fp = (unsigned short *) from; 413 + tp = to; 414 + fp = from; 421 415 while (clen--) { 422 416 *tp++ = *fp++; 423 417 *tp++ = *fp++;
+2 -2
drivers/net/depca.c
··· 708 708 709 709 /* Tx & Rx descriptors (aligned to a quadword boundary) */ 710 710 offset = (offset + DEPCA_ALIGN) & ~DEPCA_ALIGN; 711 - lp->rx_ring = (struct depca_rx_desc __iomem *) (lp->sh_mem + offset); 711 + lp->rx_ring = lp->sh_mem + offset; 712 712 lp->rx_ring_offset = offset; 713 713 714 714 offset += (sizeof(struct depca_rx_desc) * NUM_RX_DESC); 715 - lp->tx_ring = (struct depca_tx_desc __iomem *) (lp->sh_mem + offset); 715 + lp->tx_ring = lp->sh_mem + offset; 716 716 lp->tx_ring_offset = offset; 717 717 718 718 offset += (sizeof(struct depca_tx_desc) * NUM_TX_DESC);
+2 -2
drivers/net/dl2k.c
··· 221 221 ring_space = pci_alloc_consistent (pdev, TX_TOTAL_SIZE, &ring_dma); 222 222 if (!ring_space) 223 223 goto err_out_iounmap; 224 - np->tx_ring = (struct netdev_desc *) ring_space; 224 + np->tx_ring = ring_space; 225 225 np->tx_ring_dma = ring_dma; 226 226 227 227 ring_space = pci_alloc_consistent (pdev, RX_TOTAL_SIZE, &ring_dma); 228 228 if (!ring_space) 229 229 goto err_out_unmap_tx; 230 - np->rx_ring = (struct netdev_desc *) ring_space; 230 + np->rx_ring = ring_space; 231 231 np->rx_ring_dma = ring_dma; 232 232 233 233 /* Parse eeprom data */
+1 -1
drivers/net/ehea/ehea_qmr.c
··· 331 331 unsigned long flags; 332 332 333 333 spin_lock_irqsave(&eq->spinlock, flags); 334 - eqe = (struct ehea_eqe *)hw_eqit_eq_get_inc_valid(&eq->hw_queue); 334 + eqe = hw_eqit_eq_get_inc_valid(&eq->hw_queue); 335 335 spin_unlock_irqrestore(&eq->spinlock, flags); 336 336 337 337 return eqe;
+2 -2
drivers/net/epic100.c
··· 391 391 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma); 392 392 if (!ring_space) 393 393 goto err_out_iounmap; 394 - ep->tx_ring = (struct epic_tx_desc *)ring_space; 394 + ep->tx_ring = ring_space; 395 395 ep->tx_ring_dma = ring_dma; 396 396 397 397 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma); 398 398 if (!ring_space) 399 399 goto err_out_unmap_tx; 400 - ep->rx_ring = (struct epic_rx_desc *)ring_space; 400 + ep->rx_ring = ring_space; 401 401 ep->rx_ring_dma = ring_dma; 402 402 403 403 if (dev->mem_start) {
+2 -2
drivers/net/fealnx.c
··· 566 566 err = -ENOMEM; 567 567 goto err_out_free_dev; 568 568 } 569 - np->rx_ring = (struct fealnx_desc *)ring_space; 569 + np->rx_ring = ring_space; 570 570 np->rx_ring_dma = ring_dma; 571 571 572 572 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma); ··· 574 574 err = -ENOMEM; 575 575 goto err_out_free_rx; 576 576 } 577 - np->tx_ring = (struct fealnx_desc *)ring_space; 577 + np->tx_ring = ring_space; 578 578 np->tx_ring_dma = ring_dma; 579 579 580 580 /* find the connected MII xcvrs */
+2 -2
drivers/net/gianfar.c
··· 267 267 268 268 for (i = 0; i < priv->num_tx_queues; i++) { 269 269 tx_queue = priv->tx_queue[i]; 270 - tx_queue->tx_bd_base = (struct txbd8 *) vaddr; 270 + tx_queue->tx_bd_base = vaddr; 271 271 tx_queue->tx_bd_dma_base = addr; 272 272 tx_queue->dev = ndev; 273 273 /* enet DMA only understands physical addresses */ ··· 278 278 /* Start the rx descriptor ring where the tx ring leaves off */ 279 279 for (i = 0; i < priv->num_rx_queues; i++) { 280 280 rx_queue = priv->rx_queue[i]; 281 - rx_queue->rx_bd_base = (struct rxbd8 *) vaddr; 281 + rx_queue->rx_bd_base = vaddr; 282 282 rx_queue->rx_bd_dma_base = addr; 283 283 rx_queue->dev = ndev; 284 284 addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
+2 -2
drivers/net/hamachi.c
··· 648 648 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma); 649 649 if (!ring_space) 650 650 goto err_out_cleardev; 651 - hmp->tx_ring = (struct hamachi_desc *)ring_space; 651 + hmp->tx_ring = ring_space; 652 652 hmp->tx_ring_dma = ring_dma; 653 653 654 654 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma); 655 655 if (!ring_space) 656 656 goto err_out_unmap_tx; 657 - hmp->rx_ring = (struct hamachi_desc *)ring_space; 657 + hmp->rx_ring = ring_space; 658 658 hmp->rx_ring_dma = ring_dma; 659 659 660 660 /* Check for options being passed in */
+1 -1
drivers/net/macmace.c
··· 221 221 SET_NETDEV_DEV(dev, &pdev->dev); 222 222 223 223 dev->base_addr = (u32)MACE_BASE; 224 - mp->mace = (volatile struct mace *) MACE_BASE; 224 + mp->mace = MACE_BASE; 225 225 226 226 dev->irq = IRQ_MAC_MACE; 227 227 mp->dma_intr = IRQ_MAC_MACE_DMA;
+1 -1
drivers/net/mlx4/en_rx.c
··· 859 859 priv->rx_ring[0].cqn, &context); 860 860 861 861 ptr = ((void *) &context) + 0x3c; 862 - rss_context = (struct mlx4_en_rss_context *) ptr; 862 + rss_context = ptr; 863 863 rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num) << 24 | 864 864 (rss_map->base_qpn)); 865 865 rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
+2 -3
drivers/net/mlx4/en_tx.c
··· 238 238 } else { 239 239 if (!tx_info->inl) { 240 240 if ((void *) data >= end) { 241 - data = (struct mlx4_wqe_data_seg *) 242 - (ring->buf + ((void *) data - end)); 241 + data = ring->buf + ((void *)data - end); 243 242 } 244 243 245 244 if (tx_info->linear) { ··· 252 253 for (i = 0; i < frags; i++) { 253 254 /* Check for wraparound before unmapping */ 254 255 if ((void *) data >= end) 255 - data = (struct mlx4_wqe_data_seg *) ring->buf; 256 + data = ring->buf; 256 257 frag = &skb_shinfo(skb)->frags[i]; 257 258 pci_unmap_page(mdev->pdev, 258 259 (dma_addr_t) be64_to_cpu(data->addr),
+8 -8
drivers/net/netxen/netxen_nic_ctx.c
··· 163 163 rq_size, &hostrq_phys_addr); 164 164 if (addr == NULL) 165 165 return -ENOMEM; 166 - prq = (nx_hostrq_rx_ctx_t *)addr; 166 + prq = addr; 167 167 168 168 addr = pci_alloc_consistent(adapter->pdev, 169 169 rsp_size, &cardrsp_phys_addr); ··· 171 171 err = -ENOMEM; 172 172 goto out_free_rq; 173 173 } 174 - prsp = (nx_cardrsp_rx_ctx_t *)addr; 174 + prsp = addr; 175 175 176 176 prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr); 177 177 ··· 318 318 } 319 319 320 320 memset(rq_addr, 0, rq_size); 321 - prq = (nx_hostrq_tx_ctx_t *)rq_addr; 321 + prq = rq_addr; 322 322 323 323 memset(rsp_addr, 0, rsp_size); 324 - prsp = (nx_cardrsp_tx_ctx_t *)rsp_addr; 324 + prsp = rsp_addr; 325 325 326 326 prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr); 327 327 ··· 629 629 } 630 630 631 631 memset(addr, 0, sizeof(struct netxen_ring_ctx)); 632 - recv_ctx->hwctx = (struct netxen_ring_ctx *)addr; 632 + recv_ctx->hwctx = addr; 633 633 recv_ctx->hwctx->ctx_id = cpu_to_le32(port); 634 634 recv_ctx->hwctx->cmd_consumer_offset = 635 635 cpu_to_le64(recv_ctx->phys_addr + ··· 648 648 goto err_out_free; 649 649 } 650 650 651 - tx_ring->desc_head = (struct cmd_desc_type0 *)addr; 651 + tx_ring->desc_head = addr; 652 652 653 653 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 654 654 rds_ring = &recv_ctx->rds_rings[ring]; ··· 662 662 err = -ENOMEM; 663 663 goto err_out_free; 664 664 } 665 - rds_ring->desc_head = (struct rcv_desc *)addr; 665 + rds_ring->desc_head = addr; 666 666 667 667 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) 668 668 rds_ring->crb_rcv_producer = ··· 683 683 err = -ENOMEM; 684 684 goto err_out_free; 685 685 } 686 - sds_ring->desc_head = (struct status_desc *)addr; 686 + sds_ring->desc_head = addr; 687 687 688 688 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 689 689 sds_ring->crb_sts_consumer =
+1 -1
drivers/net/pxa168_eth.c
··· 502 502 * Pick the appropriate table, start scanning for free/reusable 503 503 * entries at the index obtained by hashing the specified MAC address 504 504 */ 505 - start = (struct addr_table_entry *)(pep->htpr); 505 + start = pep->htpr; 506 506 entry = start + hash_function(mac_addr); 507 507 for (i = 0; i < HOP_NUMBER; i++) { 508 508 if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) {
+2 -3
drivers/net/qla3xxx.c
··· 2873 2873 PAGE_SIZE, &qdev->shadow_reg_phy_addr); 2874 2874 2875 2875 if (qdev->shadow_reg_virt_addr != NULL) { 2876 - qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr; 2876 + qdev->preq_consumer_index = qdev->shadow_reg_virt_addr; 2877 2877 qdev->req_consumer_index_phy_addr_high = 2878 2878 MS_64BITS(qdev->shadow_reg_phy_addr); 2879 2879 qdev->req_consumer_index_phy_addr_low = ··· 3114 3114 qdev->small_buf_release_cnt = 8; 3115 3115 qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1; 3116 3116 qdev->lrg_buf_release_cnt = 8; 3117 - qdev->lrg_buf_next_free = 3118 - (struct bufq_addr_element *)qdev->lrg_buf_q_virt_addr; 3117 + qdev->lrg_buf_next_free = qdev->lrg_buf_q_virt_addr; 3119 3118 qdev->small_buf_index = 0; 3120 3119 qdev->lrg_buf_index = 0; 3121 3120 qdev->lrg_buf_free_count = 0;
+13 -13
drivers/net/qlcnic/qlcnic_ctx.c
··· 126 126 err = -EIO; 127 127 goto error; 128 128 } 129 - tmp_tmpl = (struct qlcnic_dump_template_hdr *) tmp_addr; 129 + tmp_tmpl = tmp_addr; 130 130 csum = qlcnic_temp_checksum((uint32_t *) tmp_addr, temp_size); 131 131 if (csum) { 132 132 dev_err(&adapter->pdev->dev, ··· 139 139 err = -EIO; 140 140 goto error; 141 141 } 142 - tmp_buf = (u32 *) tmp_addr; 142 + tmp_buf = tmp_addr; 143 143 template = (u32 *) ahw->fw_dump.tmpl_hdr; 144 144 for (i = 0; i < temp_size/sizeof(u32); i++) 145 145 *template++ = __le32_to_cpu(*tmp_buf++); ··· 214 214 &hostrq_phys_addr, GFP_KERNEL); 215 215 if (addr == NULL) 216 216 return -ENOMEM; 217 - prq = (struct qlcnic_hostrq_rx_ctx *)addr; 217 + prq = addr; 218 218 219 219 addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size, 220 220 &cardrsp_phys_addr, GFP_KERNEL); ··· 222 222 err = -ENOMEM; 223 223 goto out_free_rq; 224 224 } 225 - prsp = (struct qlcnic_cardrsp_rx_ctx *)addr; 225 + prsp = addr; 226 226 227 227 prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr); 228 228 ··· 380 380 } 381 381 382 382 memset(rq_addr, 0, rq_size); 383 - prq = (struct qlcnic_hostrq_tx_ctx *)rq_addr; 383 + prq = rq_addr; 384 384 385 385 memset(rsp_addr, 0, rsp_size); 386 - prsp = (struct qlcnic_cardrsp_tx_ctx *)rsp_addr; 386 + prsp = rsp_addr; 387 387 388 388 prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr); 389 389 ··· 493 493 goto err_out_free; 494 494 } 495 495 496 - tx_ring->desc_head = (struct cmd_desc_type0 *)addr; 496 + tx_ring->desc_head = addr; 497 497 498 498 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 499 499 rds_ring = &recv_ctx->rds_rings[ring]; ··· 506 506 err = -ENOMEM; 507 507 goto err_out_free; 508 508 } 509 - rds_ring->desc_head = (struct rcv_desc *)addr; 509 + rds_ring->desc_head = addr; 510 510 511 511 } 512 512 ··· 522 522 err = -ENOMEM; 523 523 goto err_out_free; 524 524 } 525 - sds_ring->desc_head = (struct status_desc *)addr; 525 + sds_ring->desc_head = addr; 526 526 } 527 527 528 528 return 0; ··· 662 662 return -ENOMEM; 663 663 memset(nic_info_addr, 0, nic_size); 664 664 665 - nic_info = (struct qlcnic_info *) nic_info_addr; 665 + nic_info = nic_info_addr; 666 666 err = qlcnic_issue_cmd(adapter, 667 667 adapter->ahw->pci_func, 668 668 adapter->fw_hal_version, ··· 720 720 return -ENOMEM; 721 721 722 722 memset(nic_info_addr, 0, nic_size); 723 - nic_info = (struct qlcnic_info *)nic_info_addr; 723 + nic_info = nic_info_addr; 724 724 725 725 nic_info->pci_func = cpu_to_le16(nic->pci_func); 726 726 nic_info->op_mode = cpu_to_le16(nic->op_mode); ··· 769 769 return -ENOMEM; 770 770 memset(pci_info_addr, 0, pci_size); 771 771 772 - npar = (struct qlcnic_pci_info *) pci_info_addr; 772 + npar = pci_info_addr; 773 773 err = qlcnic_issue_cmd(adapter, 774 774 adapter->ahw->pci_func, 775 775 adapter->fw_hal_version, ··· 877 877 QLCNIC_CDRP_CMD_GET_ESWITCH_STATS); 878 878 879 879 if (!err) { 880 - stats = (struct __qlcnic_esw_statistics *)stats_addr; 880 + stats = stats_addr; 881 881 esw_stats->context_id = le16_to_cpu(stats->context_id); 882 882 esw_stats->version = le16_to_cpu(stats->version); 883 883 esw_stats->size = le16_to_cpu(stats->size);
+1 -1
drivers/net/qlcnic/qlcnic_ethtool.c
··· 996 996 /* Copy template header first */ 997 997 copy_sz = fw_dump->tmpl_hdr->size; 998 998 hdr_ptr = (u32 *) fw_dump->tmpl_hdr; 999 - data = (u32 *) buffer; 999 + data = buffer; 1000 1000 for (i = 0; i < copy_sz/sizeof(u32); i++) 1001 1001 *data++ = cpu_to_le32(*hdr_ptr++); 1002 1002
+1 -2
drivers/net/qlcnic/qlcnic_hw.c
··· 1673 1673 tmpl_hdr->sys_info[1] = adapter->fw_version; 1674 1674 1675 1675 for (i = 0; i < no_entries; i++) { 1676 - entry = (struct qlcnic_dump_entry *) ((void *) tmpl_hdr + 1677 - entry_offset); 1676 + entry = (void *)tmpl_hdr + entry_offset; 1678 1677 if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) { 1679 1678 entry->hdr.flags |= QLCNIC_DUMP_SKIP; 1680 1679 entry_offset += entry->hdr.offset;
+1 -1
drivers/net/qlcnic/qlcnic_main.c
··· 1861 1861 return; 1862 1862 1863 1863 adapter->fhash.fmax = QLCNIC_LB_MAX_FILTERS; 1864 - adapter->fhash.fhead = (struct hlist_head *)head; 1864 + adapter->fhash.fhead = head; 1865 1865 1866 1866 for (i = 0; i < adapter->fhash.fmax; i++) 1867 1867 INIT_HLIST_HEAD(&adapter->fhash.fhead[i]);
+2 -2
drivers/net/qlge/qlge_main.c
··· 3096 3096 if (rx_ring->lbq_len) { 3097 3097 cqicb->flags |= FLAGS_LL; /* Load lbq values */ 3098 3098 tmp = (u64)rx_ring->lbq_base_dma; 3099 - base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect; 3099 + base_indirect_ptr = rx_ring->lbq_base_indirect; 3100 3100 page_entries = 0; 3101 3101 do { 3102 3102 *base_indirect_ptr = cpu_to_le64(tmp); ··· 3120 3120 if (rx_ring->sbq_len) { 3121 3121 cqicb->flags |= FLAGS_LS; /* Load sbq values */ 3122 3122 tmp = (u64)rx_ring->sbq_base_dma; 3123 - base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect; 3123 + base_indirect_ptr = rx_ring->sbq_base_indirect; 3124 3124 page_entries = 0; 3125 3125 do { 3126 3126 *base_indirect_ptr = cpu_to_le64(tmp);
+7 -11
drivers/net/s2io.c
··· 841 841 tmp_p_addr = ring->rx_blocks[j].block_dma_addr; 842 842 tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr; 843 843 844 - pre_rxd_blk = (struct RxD_block *)tmp_v_addr; 844 + pre_rxd_blk = tmp_v_addr; 845 845 pre_rxd_blk->reserved_2_pNext_RxD_block = 846 846 (unsigned long)tmp_v_addr_next; 847 847 pre_rxd_blk->pNext_RxD_Blk_physical = ··· 918 918 mac_control->stats_mem_sz = size; 919 919 920 920 tmp_v_addr = mac_control->stats_mem; 921 - mac_control->stats_info = (struct stat_block *)tmp_v_addr; 921 + mac_control->stats_info = tmp_v_addr; 922 922 memset(tmp_v_addr, 0, size); 923 923 DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n", 924 924 dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr); ··· 2439 2439 2440 2440 spin_lock_irqsave(&fifo->tx_lock, flags); 2441 2441 for (j = 0; j < tx_cfg->fifo_len; j++) { 2442 - txdp = (struct TxD *)fifo->list_info[j].list_virt_addr; 2442 + txdp = fifo->list_info[j].list_virt_addr; 2443 2443 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j); 2444 2444 if (skb) { 2445 2445 swstats->mem_freed += skb->truesize; ··· 3075 3075 3076 3076 get_info = fifo_data->tx_curr_get_info; 3077 3077 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info)); 3078 - txdlp = (struct TxD *) 3079 - fifo_data->list_info[get_info.offset].list_virt_addr; 3078 + txdlp = fifo_data->list_info[get_info.offset].list_virt_addr; 3080 3079 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) && 3081 3080 (get_info.offset != put_info.offset) && 3082 3081 (txdlp->Host_Control)) { ··· 3128 3129 get_info.offset++; 3129 3130 if (get_info.offset == get_info.fifo_len + 1) 3130 3131 get_info.offset = 0; 3131 - txdlp = (struct TxD *) 3132 - fifo_data->list_info[get_info.offset].list_virt_addr; 3132 + txdlp = fifo_data->list_info[get_info.offset].list_virt_addr; 3133 3133 fifo_data->tx_curr_get_info.offset = get_info.offset; 3134 3134 } 3135 3135 ··· 4161 4163 4162 4164 put_off = (u16)fifo->tx_curr_put_info.offset; 4163 4165 get_off = (u16)fifo->tx_curr_get_info.offset; 4164 - txdp = (struct TxD *)fifo->list_info[put_off].list_virt_addr; 4166 + txdp = fifo->list_info[put_off].list_virt_addr; 4165 4167 4166 4168 queue_len = fifo->tx_curr_put_info.fifo_len + 1; 4167 4169 /* Avoid "put" pointer going beyond "get" pointer */ ··· 7970 7972 7971 7973 /* Initializing the BAR1 address as the start of the FIFO pointer. */ 7972 7974 for (j = 0; j < MAX_TX_FIFOS; j++) { 7973 - mac_control->tx_FIFO_start[j] = 7974 - (struct TxFIFO_element __iomem *) 7975 - (sp->bar1 + (j * 0x00020000)); 7975 + mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000); 7976 7976 } 7977 7977 7978 7978 /* Driver entry points */
+1 -1
drivers/net/sfc/siena.c
··· 400 400 u64 generation_end; 401 401 402 402 mac_stats = &efx->mac_stats; 403 - dma_stats = (u64 *)efx->stats_buffer.addr; 403 + dma_stats = efx->stats_buffer.addr; 404 404 405 405 generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; 406 406 if (generation_end == STATS_GENERATION_INVALID)
+2 -2
drivers/net/sis900.c
··· 482 482 ret = -ENOMEM; 483 483 goto err_out_cleardev; 484 484 } 485 - sis_priv->tx_ring = (BufferDesc *)ring_space; 485 + sis_priv->tx_ring = ring_space; 486 486 sis_priv->tx_ring_dma = ring_dma; 487 487 488 488 ring_space = pci_alloc_consistent(pci_dev, RX_TOTAL_SIZE, &ring_dma); ··· 490 490 ret = -ENOMEM; 491 491 goto err_unmap_tx; 492 492 } 493 - sis_priv->rx_ring = (BufferDesc *)ring_space; 493 + sis_priv->rx_ring = ring_space; 494 494 sis_priv->rx_ring_dma = ring_dma; 495 495 496 496 /* The SiS900-specific entries in the device structure. */
+1 -1
drivers/net/tokenring/madgemc.c
··· 418 418 return IRQ_NONE; 419 419 } 420 420 421 - dev = (struct net_device *)dev_id; 421 + dev = dev_id; 422 422 423 423 /* Make sure its really us. -- the Madge way */ 424 424 pending = inb(dev->base_addr + MC_CONTROL_REG0);
+1 -1
drivers/net/typhoon.c
··· 2367 2367 2368 2368 dev->irq = pdev->irq; 2369 2369 tp = netdev_priv(dev); 2370 - tp->shared = (struct typhoon_shared *) shared; 2370 + tp->shared = shared; 2371 2371 tp->shared_dma = shared_dma; 2372 2372 tp->pdev = pdev; 2373 2373 tp->tx_pdev = pdev;
+15 -29
drivers/net/vxge/vxge-config.c
··· 582 582 goto exit; 583 583 584 584 val64 = readq(&legacy_reg->toc_first_pointer); 585 - toc = (struct vxge_hw_toc_reg __iomem *)(bar0+val64); 585 + toc = bar0 + val64; 586 586 exit: 587 587 return toc; 588 588 } ··· 600 600 u32 i; 601 601 enum vxge_hw_status status = VXGE_HW_OK; 602 602 603 - hldev->legacy_reg = (struct vxge_hw_legacy_reg __iomem *)hldev->bar0; 603 + hldev->legacy_reg = hldev->bar0; 604 604 605 605 hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0); 606 606 if (hldev->toc_reg == NULL) { ··· 609 609 } 610 610 611 611 val64 = readq(&hldev->toc_reg->toc_common_pointer); 612 - hldev->common_reg = 613 - (struct vxge_hw_common_reg __iomem *)(hldev->bar0 + val64); 612 + hldev->common_reg = hldev->bar0 + val64; 614 613 615 614 val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer); 616 - hldev->mrpcim_reg = 617 - (struct vxge_hw_mrpcim_reg __iomem *)(hldev->bar0 + val64); 615 + hldev->mrpcim_reg = hldev->bar0 + val64; 618 616 619 617 for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) { 620 618 val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]); 621 - hldev->srpcim_reg[i] = 622 - (struct vxge_hw_srpcim_reg __iomem *) 623 - (hldev->bar0 + val64); 619 + hldev->srpcim_reg[i] = hldev->bar0 + val64; 624 620 } 625 621 626 622 for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) { 627 623 val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]); 628 - hldev->vpmgmt_reg[i] = 629 - (struct vxge_hw_vpmgmt_reg __iomem *)(hldev->bar0 + val64); 624 + hldev->vpmgmt_reg[i] = hldev->bar0 + val64; 630 625 } 631 626 632 627 for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) { 633 628 val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]); 634 - hldev->vpath_reg[i] = 635 - (struct vxge_hw_vpath_reg __iomem *) 636 - (hldev->bar0 + val64); 629 + hldev->vpath_reg[i] = hldev->bar0 + val64; 637 630 } 638 631 639 632 val64 = readq(&hldev->toc_reg->toc_kdfc); 640 633 641 634 switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) { 642 635 case 0: 643 - hldev->kdfc = (u8 __iomem *)(hldev->bar0 + 644 - VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64)); 636 + hldev->kdfc = hldev->bar0 + VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64) ; 645 637 break; 646 638 default: 647 639 break; ··· 1016 1024 } 1017 1025 1018 1026 val64 = readq(&toc->toc_common_pointer); 1019 - common_reg = (struct vxge_hw_common_reg __iomem *)(bar0 + val64); 1027 + common_reg = bar0 + val64; 1020 1028 1021 1029 status = __vxge_hw_device_vpath_reset_in_prog_check( 1022 1030 (u64 __iomem *)&common_reg->vpath_rst_in_prog); ··· 1036 1044 1037 1045 val64 = readq(&toc->toc_vpmgmt_pointer[i]); 1038 1046 1039 - vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *) 1040 - (bar0 + val64); 1047 + vpmgmt_reg = bar0 + val64; 1041 1048 1042 1049 hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg); 1043 1050 if (__vxge_hw_device_access_rights_get(hw_info->host_type, ··· 1045 1054 1046 1055 val64 = readq(&toc->toc_mrpcim_pointer); 1047 1056 1048 - mrpcim_reg = (struct vxge_hw_mrpcim_reg __iomem *) 1049 - (bar0 + val64); 1057 + mrpcim_reg = bar0 + val64; 1050 1058 1051 1059 writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask); 1052 1060 wmb(); ··· 1054 1064 val64 = readq(&toc->toc_vpath_pointer[i]); 1055 1065 1056 1066 spin_lock_init(&vpath.lock); 1057 - vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *) 1058 - (bar0 + val64); 1067 + vpath.vp_reg = bar0 + val64; 1059 1068 vpath.vp_open = VXGE_HW_VP_NOT_OPEN; 1060 1069 1061 1070 status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info); ··· 1077 1088 continue; 1078 1089 1079 1090 val64 = readq(&toc->toc_vpath_pointer[i]); 1080 - vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *) 1081 - (bar0 + val64); 1091 + vpath.vp_reg = bar0 + val64; 1082 1092 vpath.vp_open = VXGE_HW_VP_NOT_OPEN; 1083 1093 1084 1094 status = __vxge_hw_vpath_addr_get(&vpath, ··· 2128 2140 memblock_index, item, 2129 2141 &memblock_item_idx); 2130 2142 2131 - rxdp = (struct vxge_hw_ring_rxd_1 *) 2132 - ring->channel.reserve_arr[reserve_index]; 2143 + rxdp = ring->channel.reserve_arr[reserve_index]; 2133 2144 2134 2145 uld_priv = ((u8 *)rxdblock_priv + ring->rxd_priv_size * i); 2135 2146 ··· 4867 4880 goto vpath_open_exit8; 4868 4881 } 4869 4882 4870 - vpath->hw_stats = (struct vxge_hw_vpath_stats_hw_info *)vpath-> 4871 - stats_block->memblock; 4883 + vpath->hw_stats = vpath->stats_block->memblock; 4872 4884 memset(vpath->hw_stats, 0, 4873 4885 sizeof(struct vxge_hw_vpath_stats_hw_info)); 4874 4886
+3 -3
drivers/net/vxge/vxge-traffic.c
··· 1309 1309 1310 1310 vxge_hw_channel_dtr_try_complete(channel, rxdh); 1311 1311 1312 - rxdp = (struct vxge_hw_ring_rxd_1 *)*rxdh; 1312 + rxdp = *rxdh; 1313 1313 if (rxdp == NULL) { 1314 1314 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS; 1315 1315 goto exit; ··· 1565 1565 channel = &fifo->channel; 1566 1566 1567 1567 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh); 1568 - txdp_first = (struct vxge_hw_fifo_txd *)txdlh; 1568 + txdp_first = txdlh; 1569 1569 1570 1570 txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1); 1571 1571 txdp_last->control_0 |= ··· 1631 1631 1632 1632 vxge_hw_channel_dtr_try_complete(channel, txdlh); 1633 1633 1634 - txdp = (struct vxge_hw_fifo_txd *)*txdlh; 1634 + txdp = *txdlh; 1635 1635 if (txdp == NULL) { 1636 1636 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS; 1637 1637 goto exit;
+1 -1
drivers/net/wan/pc300_tty.c
··· 755 755 756 756 dsr_rx = cpc_readb(card->hw.scabase + DSR_RX(ch)); 757 757 758 - cpc_tty = (st_cpc_tty_area *)pc300dev->cpc_tty; 758 + cpc_tty = pc300dev->cpc_tty; 759 759 760 760 while (1) { 761 761 rx_len = 0;
+2 -2
drivers/net/xilinx_emaclite.c
··· 252 252 u16 *from_u16_ptr, *to_u16_ptr; 253 253 254 254 to_u32_ptr = dest_ptr; 255 - from_u16_ptr = (u16 *) src_ptr; 255 + from_u16_ptr = src_ptr; 256 256 align_buffer = 0; 257 257 258 258 for (; length > 3; length -= 4) { 259 - to_u16_ptr = (u16 *) ((void *) &align_buffer); 259 + to_u16_ptr = (u16 *)&align_buffer; 260 260 *to_u16_ptr++ = *from_u16_ptr++; 261 261 *to_u16_ptr++ = *from_u16_ptr++; 262 262
+3 -3
drivers/net/yellowfin.c
··· 442 442 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma); 443 443 if (!ring_space) 444 444 goto err_out_cleardev; 445 - np->tx_ring = (struct yellowfin_desc *)ring_space; 445 + np->tx_ring = ring_space; 446 446 np->tx_ring_dma = ring_dma; 447 447 448 448 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma); 449 449 if (!ring_space) 450 450 goto err_out_unmap_tx; 451 - np->rx_ring = (struct yellowfin_desc *)ring_space; 451 + np->rx_ring = ring_space; 452 452 np->rx_ring_dma = ring_dma; 453 453 454 454 ring_space = pci_alloc_consistent(pdev, STATUS_TOTAL_SIZE, &ring_dma); 455 455 if (!ring_space) 456 456 goto err_out_unmap_rx; 457 - np->tx_status = (struct tx_status_words *)ring_space; 457 + np->tx_status = ring_space; 458 458 np->tx_status_dma = ring_dma; 459 459 460 460 if (dev->mem_start)