Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

eth: sundance: fix endian issues

Fix sparse warnings about endianness. Store DMA addr to a variable
of correct type and then only convert it when writing to the descriptor.

Reviewed-by: Simon Horman <horms@kernel.org>
Link: https://patch.msgid.link/20250901210818.1025316-2-kuba@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+20 -15
+20 -15
drivers/net/ethernet/dlink/sundance.c
··· 1033 1033 1034 1034 /* Fill in the Rx buffers. Handle allocation failure gracefully. */ 1035 1035 for (i = 0; i < RX_RING_SIZE; i++) { 1036 + dma_addr_t addr; 1037 + 1036 1038 struct sk_buff *skb = 1037 1039 netdev_alloc_skb(dev, np->rx_buf_sz + 2); 1038 1040 np->rx_skbuff[i] = skb; 1039 1041 if (skb == NULL) 1040 1042 break; 1041 1043 skb_reserve(skb, 2); /* 16 byte align the IP header. */ 1042 - np->rx_ring[i].frag.addr = cpu_to_le32( 1043 - dma_map_single(&np->pci_dev->dev, skb->data, 1044 - np->rx_buf_sz, DMA_FROM_DEVICE)); 1045 - if (dma_mapping_error(&np->pci_dev->dev, 1046 - np->rx_ring[i].frag.addr)) { 1044 + addr = dma_map_single(&np->pci_dev->dev, skb->data, 1045 + np->rx_buf_sz, DMA_FROM_DEVICE); 1046 + if (dma_mapping_error(&np->pci_dev->dev, addr)) { 1047 1047 dev_kfree_skb(skb); 1048 1048 np->rx_skbuff[i] = NULL; 1049 1049 break; 1050 1050 } 1051 + np->rx_ring[i].frag.addr = cpu_to_le32(addr); 1051 1052 np->rx_ring[i].frag.length = cpu_to_le32(np->rx_buf_sz | LastFrag); 1052 1053 } 1053 1054 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE); ··· 1089 1088 { 1090 1089 struct netdev_private *np = netdev_priv(dev); 1091 1090 struct netdev_desc *txdesc; 1091 + dma_addr_t addr; 1092 1092 unsigned entry; 1093 1093 1094 1094 /* Calculate the next Tx descriptor entry. */ ··· 1097 1095 np->tx_skbuff[entry] = skb; 1098 1096 txdesc = &np->tx_ring[entry]; 1099 1097 1098 + addr = dma_map_single(&np->pci_dev->dev, skb->data, skb->len, 1099 + DMA_TO_DEVICE); 1100 + if (dma_mapping_error(&np->pci_dev->dev, addr)) 1101 + goto drop_frame; 1102 + 1100 1103 txdesc->next_desc = 0; 1101 1104 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign); 1102 - txdesc->frag.addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev, 1103 - skb->data, skb->len, DMA_TO_DEVICE)); 1104 - if (dma_mapping_error(&np->pci_dev->dev, 1105 - txdesc->frag.addr)) 1106 - goto drop_frame; 1105 + txdesc->frag.addr = cpu_to_le32(addr); 1107 1106 txdesc->frag.length = cpu_to_le32 (skb->len | LastFrag); 1108 1107 1109 1108 /* Increment cur_tx before tasklet_schedule() */ ··· 1422 1419 for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0; 1423 1420 np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) { 1424 1421 struct sk_buff *skb; 1422 + dma_addr_t addr; 1423 + 1425 1424 entry = np->dirty_rx % RX_RING_SIZE; 1426 1425 if (np->rx_skbuff[entry] == NULL) { 1427 1426 skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2); ··· 1431 1426 if (skb == NULL) 1432 1427 break; /* Better luck next round. */ 1433 1428 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 1434 - np->rx_ring[entry].frag.addr = cpu_to_le32( 1435 - dma_map_single(&np->pci_dev->dev, skb->data, 1436 - np->rx_buf_sz, DMA_FROM_DEVICE)); 1437 - if (dma_mapping_error(&np->pci_dev->dev, 1438 - np->rx_ring[entry].frag.addr)) { 1429 + addr = dma_map_single(&np->pci_dev->dev, skb->data, 1430 + np->rx_buf_sz, DMA_FROM_DEVICE); 1431 + if (dma_mapping_error(&np->pci_dev->dev, addr)) { 1439 1432 dev_kfree_skb_irq(skb); 1440 1433 np->rx_skbuff[entry] = NULL; 1441 1434 break; 1442 1435 } 1436 + 1437 + np->rx_ring[entry].frag.addr = cpu_to_le32(addr); 1443 1438 } 1444 1439 /* Perhaps we need not reset this field. */ 1445 1440 np->rx_ring[entry].frag.length =