Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sungem, sunhme, sunvnet: Update drivers to use dma_wmb/rmb

This patch goes through and replaces wmb/rmb with dma_wmb/dma_rmb in cases
where the barrier is being used to order writes or reads to just memory and
doesn't involve any programmed I/O.

Signed-off-by: Alexander Duyck <alexander.h.duyck@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Alexander Duyck and committed by
David S. Miller
b4468cc6 04abac5f

+18 -18
+7 -7
drivers/net/ethernet/sun/sungem.c
··· 718 718 cluster_start = curr = (gp->rx_new & ~(4 - 1)); 719 719 count = 0; 720 720 kick = -1; 721 - wmb(); 721 + dma_wmb(); 722 722 while (curr != limit) { 723 723 curr = NEXT_RX(curr); 724 724 if (++count == 4) { ··· 1038 1038 if (gem_intme(entry)) 1039 1039 ctrl |= TXDCTRL_INTME; 1040 1040 txd->buffer = cpu_to_le64(mapping); 1041 - wmb(); 1041 + dma_wmb(); 1042 1042 txd->control_word = cpu_to_le64(ctrl); 1043 1043 entry = NEXT_TX(entry); 1044 1044 } else { ··· 1076 1076 1077 1077 txd = &gp->init_block->txd[entry]; 1078 1078 txd->buffer = cpu_to_le64(mapping); 1079 - wmb(); 1079 + dma_wmb(); 1080 1080 txd->control_word = cpu_to_le64(this_ctrl | len); 1081 1081 1082 1082 if (gem_intme(entry)) ··· 1086 1086 } 1087 1087 txd = &gp->init_block->txd[first_entry]; 1088 1088 txd->buffer = cpu_to_le64(first_mapping); 1089 - wmb(); 1089 + dma_wmb(); 1090 1090 txd->control_word = 1091 1091 cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len); 1092 1092 } ··· 1585 1585 gp->rx_skbs[i] = NULL; 1586 1586 } 1587 1587 rxd->status_word = 0; 1588 - wmb(); 1588 + dma_wmb(); 1589 1589 rxd->buffer = 0; 1590 1590 } 1591 1591 ··· 1647 1647 RX_BUF_ALLOC_SIZE(gp), 1648 1648 PCI_DMA_FROMDEVICE); 1649 1649 rxd->buffer = cpu_to_le64(dma_addr); 1650 - wmb(); 1650 + dma_wmb(); 1651 1651 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); 1652 1652 skb_reserve(skb, RX_OFFSET); 1653 1653 } ··· 1656 1656 struct gem_txd *txd = &gb->txd[i]; 1657 1657 1658 1658 txd->control_word = 0; 1659 - wmb(); 1659 + dma_wmb(); 1660 1660 txd->buffer = 0; 1661 1661 } 1662 1662 wmb();
+8 -8
drivers/net/ethernet/sun/sunhme.c
··· 196 196 static void sbus_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr) 197 197 { 198 198 rxd->rx_addr = (__force hme32)addr; 199 - wmb(); 199 + dma_wmb(); 200 200 rxd->rx_flags = (__force hme32)flags; 201 201 } 202 202 203 203 static void sbus_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr) 204 204 { 205 205 txd->tx_addr = (__force hme32)addr; 206 - wmb(); 206 + dma_wmb(); 207 207 txd->tx_flags = (__force hme32)flags; 208 208 } 209 209 ··· 225 225 static void pci_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr) 226 226 { 227 227 rxd->rx_addr = (__force hme32)cpu_to_le32(addr); 228 - wmb(); 228 + dma_wmb(); 229 229 rxd->rx_flags = (__force hme32)cpu_to_le32(flags); 230 230 } 231 231 232 232 static void pci_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr) 233 233 { 234 234 txd->tx_addr = (__force hme32)cpu_to_le32(addr); 235 - wmb(); 235 + dma_wmb(); 236 236 txd->tx_flags = (__force hme32)cpu_to_le32(flags); 237 237 } 238 238 ··· 268 268 sbus_readl(__reg) 269 269 #define hme_write_rxd(__hp, __rxd, __flags, __addr) \ 270 270 do { (__rxd)->rx_addr = (__force hme32)(u32)(__addr); \ 271 - wmb(); \ 271 + dma_wmb(); \ 272 272 (__rxd)->rx_flags = (__force hme32)(u32)(__flags); \ 273 273 } while(0) 274 274 #define hme_write_txd(__hp, __txd, __flags, __addr) \ 275 275 do { (__txd)->tx_addr = (__force hme32)(u32)(__addr); \ 276 - wmb(); \ 276 + dma_wmb(); \ 277 277 (__txd)->tx_flags = (__force hme32)(u32)(__flags); \ 278 278 } while(0) 279 279 #define hme_read_desc32(__hp, __p) ((__force u32)(hme32)*(__p)) ··· 293 293 readl(__reg) 294 294 #define hme_write_rxd(__hp, __rxd, __flags, __addr) \ 295 295 do { (__rxd)->rx_addr = (__force hme32)cpu_to_le32(__addr); \ 296 - wmb(); \ 296 + dma_wmb(); \ 297 297 (__rxd)->rx_flags = (__force hme32)cpu_to_le32(__flags); \ 298 298 } while(0) 299 299 #define hme_write_txd(__hp, __txd, __flags, __addr) \ 300 300 do { (__txd)->tx_addr = (__force hme32)cpu_to_le32(__addr); \ 301 - wmb(); \ 301 + dma_wmb(); \ 302 302 (__txd)->tx_flags = (__force hme32)cpu_to_le32(__flags); \ 303 303 } while(0) 304 304 static inline u32 hme_read_desc32(struct happy_meal *hp, hme32 *p)
+3 -3
drivers/net/ethernet/sun/sunvnet.c
··· 519 519 if (desc->hdr.state != VIO_DESC_READY) 520 520 return 1; 521 521 522 - rmb(); 522 + dma_rmb(); 523 523 524 524 viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n", 525 525 desc->hdr.state, desc->hdr.ack, ··· 1380 1380 /* This has to be a non-SMP write barrier because we are writing 1381 1381 * to memory which is shared with the peer LDOM. 1382 1382 */ 1383 - wmb(); 1383 + dma_wmb(); 1384 1384 1385 1385 d->hdr.state = VIO_DESC_READY; 1386 1386 ··· 1395 1395 * is marked READY, but start_cons was false. 1396 1396 * If so, vnet_ack() should send out the missed "start" trigger. 1397 1397 * 1398 - * Note that the wmb() above makes sure the cookies et al. are 1398 + * Note that the dma_wmb() above makes sure the cookies et al. are 1399 1399 * not globally visible before the VIO_DESC_READY, and that the 1400 1400 * stores are ordered correctly by the compiler. The consumer will 1401 1401 * not proceed until the VIO_DESC_READY is visible assuring that