Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'sh_eth-remove-BE-desc-support'

Sergei Shtylyov says:

====================
sh_eth: remove unused BE descriptor support

Here's a set of 2 patches against DaveM's 'net-next.git' repo plus the
recently merged to 'net.git' repo fix for the 16-bit descriptor endianness.
We get rid of ~30 LoCs and ~300 bytes of code.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+30 -59
+29 -57
drivers/net/ethernet/renesas/sh_eth.c
··· 967 967 skb_reserve(skb, SH_ETH_RX_ALIGN - reserve); 968 968 } 969 969 970 - 971 - /* CPU <-> EDMAC endian convert */ 972 - static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x) 973 - { 974 - switch (mdp->edmac_endian) { 975 - case EDMAC_LITTLE_ENDIAN: 976 - return cpu_to_le32(x); 977 - case EDMAC_BIG_ENDIAN: 978 - return cpu_to_be32(x); 979 - } 980 - return x; 981 - } 982 - 983 - static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x) 984 - { 985 - switch (mdp->edmac_endian) { 986 - case EDMAC_LITTLE_ENDIAN: 987 - return le32_to_cpu(x); 988 - case EDMAC_BIG_ENDIAN: 989 - return be32_to_cpu(x); 990 - } 991 - return x; 992 - } 993 - 994 970 /* Program the hardware MAC address from dev->dev_addr. */ 995 971 static void update_mac_address(struct net_device *ndev) 996 972 { ··· 1140 1164 rxdesc = &mdp->rx_ring[i]; 1141 1165 /* The size of the buffer is a multiple of 32 bytes. */ 1142 1166 buf_len = ALIGN(mdp->rx_buf_sz, 32); 1143 - rxdesc->len = cpu_to_edmac(mdp, buf_len << 16); 1167 + rxdesc->len = cpu_to_le32(buf_len << 16); 1144 1168 dma_addr = dma_map_single(&ndev->dev, skb->data, buf_len, 1145 1169 DMA_FROM_DEVICE); 1146 1170 if (dma_mapping_error(&ndev->dev, dma_addr)) { ··· 1148 1172 break; 1149 1173 } 1150 1174 mdp->rx_skbuff[i] = skb; 1151 - rxdesc->addr = cpu_to_edmac(mdp, dma_addr); 1152 - rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); 1175 + rxdesc->addr = cpu_to_le32(dma_addr); 1176 + rxdesc->status = cpu_to_le32(RD_RACT | RD_RFP); 1153 1177 1154 1178 /* Rx descriptor address set */ 1155 1179 if (i == 0) { ··· 1163 1187 mdp->dirty_rx = (u32) (i - mdp->num_rx_ring); 1164 1188 1165 1189 /* Mark the last entry as wrapping the ring. */ 1166 - rxdesc->status |= cpu_to_edmac(mdp, RD_RDLE); 1190 + rxdesc->status |= cpu_to_le32(RD_RDLE); 1167 1191 1168 1192 memset(mdp->tx_ring, 0, tx_ringsize); 1169 1193 ··· 1171 1195 for (i = 0; i < mdp->num_tx_ring; i++) { 1172 1196 mdp->tx_skbuff[i] = NULL; 1173 1197 txdesc = &mdp->tx_ring[i]; 1174 - txdesc->status = cpu_to_edmac(mdp, TD_TFP); 1175 - txdesc->len = cpu_to_edmac(mdp, 0); 1198 + txdesc->status = cpu_to_le32(TD_TFP); 1199 + txdesc->len = cpu_to_le32(0); 1176 1200 if (i == 0) { 1177 1201 /* Tx descriptor address set */ 1178 1202 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR); ··· 1182 1206 } 1183 1207 } 1184 1208 1185 - txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); 1209 + txdesc->status |= cpu_to_le32(TD_TDLE); 1186 1210 } 1187 1211 1188 1212 /* Get skb and descriptor buffer */ ··· 1338 1362 * packet boundary if it's currently running 1339 1363 */ 1340 1364 for (i = 0; i < mdp->num_tx_ring; i++) 1341 - mdp->tx_ring[i].status &= ~cpu_to_edmac(mdp, TD_TACT); 1365 + mdp->tx_ring[i].status &= ~cpu_to_le32(TD_TACT); 1342 1366 1343 1367 /* Disable TX FIFO egress to MAC */ 1344 1368 sh_eth_rcv_snd_disable(ndev); ··· 1370 1394 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { 1371 1395 entry = mdp->dirty_tx % mdp->num_tx_ring; 1372 1396 txdesc = &mdp->tx_ring[entry]; 1373 - if (txdesc->status & cpu_to_edmac(mdp, TD_TACT)) 1397 + if (txdesc->status & cpu_to_le32(TD_TACT)) 1374 1398 break; 1375 1399 /* TACT bit must be checked before all the following reads */ 1376 1400 dma_rmb(); 1377 1401 netif_info(mdp, tx_done, ndev, 1378 1402 "tx entry %d status 0x%08x\n", 1379 - entry, edmac_to_cpu(mdp, txdesc->status)); 1403 + entry, le32_to_cpu(txdesc->status)); 1380 1404 /* Free the original skb. */ 1381 1405 if (mdp->tx_skbuff[entry]) { 1382 - dma_unmap_single(&ndev->dev, 1383 - edmac_to_cpu(mdp, txdesc->addr), 1384 - edmac_to_cpu(mdp, txdesc->len) >> 16, 1406 + dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr), 1407 + le32_to_cpu(txdesc->len) >> 16, 1385 1408 DMA_TO_DEVICE); 1386 1409 dev_kfree_skb_irq(mdp->tx_skbuff[entry]); 1387 1410 mdp->tx_skbuff[entry] = NULL; 1388 1411 free_num++; 1389 1412 } 1390 - txdesc->status = cpu_to_edmac(mdp, TD_TFP); 1413 + txdesc->status = cpu_to_le32(TD_TFP); 1391 1414 if (entry >= mdp->num_tx_ring - 1) 1392 - txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); 1415 + txdesc->status |= cpu_to_le32(TD_TDLE); 1393 1416 1394 1417 ndev->stats.tx_packets++; 1395 - ndev->stats.tx_bytes += edmac_to_cpu(mdp, txdesc->len) >> 16; 1418 + ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16; 1396 1419 } 1397 1420 return free_num; 1398 1421 } ··· 1415 1440 boguscnt = min(boguscnt, *quota); 1416 1441 limit = boguscnt; 1417 1442 rxdesc = &mdp->rx_ring[entry]; 1418 - while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { 1443 + while (!(rxdesc->status & cpu_to_le32(RD_RACT))) { 1419 1444 /* RACT bit must be checked before all the following reads */ 1420 1445 dma_rmb(); 1421 - desc_status = edmac_to_cpu(mdp, rxdesc->status); 1422 - pkt_len = edmac_to_cpu(mdp, rxdesc->len) & RD_RFL; 1446 + desc_status = le32_to_cpu(rxdesc->status); 1447 + pkt_len = le32_to_cpu(rxdesc->len) & RD_RFL; 1423 1448 1424 1449 if (--boguscnt < 0) 1425 1450 break; ··· 1457 1482 if (desc_status & RD_RFS10) 1458 1483 ndev->stats.rx_over_errors++; 1459 1484 } else if (skb) { 1460 - dma_addr = edmac_to_cpu(mdp, rxdesc->addr); 1485 + dma_addr = le32_to_cpu(rxdesc->addr); 1461 1486 if (!mdp->cd->hw_swap) 1462 1487 sh_eth_soft_swap( 1463 1488 phys_to_virt(ALIGN(dma_addr, 4)), ··· 1486 1511 rxdesc = &mdp->rx_ring[entry]; 1487 1512 /* The size of the buffer is 32 byte boundary. */ 1488 1513 buf_len = ALIGN(mdp->rx_buf_sz, 32); 1489 - rxdesc->len = cpu_to_edmac(mdp, buf_len << 16); 1514 + rxdesc->len = cpu_to_le32(buf_len << 16); 1490 1515 1491 1516 if (mdp->rx_skbuff[entry] == NULL) { 1492 1517 skb = netdev_alloc_skb(ndev, skbuff_size); ··· 1502 1527 mdp->rx_skbuff[entry] = skb; 1503 1528 1504 1529 skb_checksum_none_assert(skb); 1505 - rxdesc->addr = cpu_to_edmac(mdp, dma_addr); 1530 + rxdesc->addr = cpu_to_le32(dma_addr); 1506 1531 } 1507 1532 dma_wmb(); /* RACT bit must be set after all the above writes */ 1508 1533 if (entry >= mdp->num_rx_ring - 1) 1509 1534 rxdesc->status |= 1510 - cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDLE); 1535 + cpu_to_le32(RD_RACT | RD_RFP | RD_RDLE); 1511 1536 else 1512 - rxdesc->status |= 1513 - cpu_to_edmac(mdp, RD_RACT | RD_RFP); 1537 + rxdesc->status |= cpu_to_le32(RD_RACT | RD_RFP); 1514 1538 } 1515 1539 1516 1540 /* Restart Rx engine if stopped. */ ··· 2309 2335 /* Free all the skbuffs in the Rx queue. */ 2310 2336 for (i = 0; i < mdp->num_rx_ring; i++) { 2311 2337 rxdesc = &mdp->rx_ring[i]; 2312 - rxdesc->status = cpu_to_edmac(mdp, 0); 2313 - rxdesc->addr = cpu_to_edmac(mdp, 0xBADF00D0); 2338 + rxdesc->status = cpu_to_le32(0); 2339 + rxdesc->addr = cpu_to_le32(0xBADF00D0); 2314 2340 dev_kfree_skb(mdp->rx_skbuff[i]); 2315 2341 mdp->rx_skbuff[i] = NULL; 2316 2342 } ··· 2358 2384 kfree_skb(skb); 2359 2385 return NETDEV_TX_OK; 2360 2386 } 2361 - txdesc->addr = cpu_to_edmac(mdp, dma_addr); 2362 - txdesc->len = cpu_to_edmac(mdp, skb->len << 16); 2387 + txdesc->addr = cpu_to_le32(dma_addr); 2388 + txdesc->len = cpu_to_le32(skb->len << 16); 2363 2389 2364 2390 dma_wmb(); /* TACT bit must be set after all the above writes */ 2365 2391 if (entry >= mdp->num_tx_ring - 1) 2366 - txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); 2392 + txdesc->status |= cpu_to_le32(TD_TACT | TD_TDLE); 2367 2393 else 2368 - txdesc->status |= cpu_to_edmac(mdp, TD_TACT); 2394 + txdesc->status |= cpu_to_le32(TD_TACT); 2369 2395 2370 2396 mdp->cur_tx++; 2371 2397 ··· 3071 3097 /* get PHY ID */ 3072 3098 mdp->phy_id = pd->phy; 3073 3099 mdp->phy_interface = pd->phy_interface; 3074 - /* EDMAC endian */ 3075 - mdp->edmac_endian = pd->edmac_endian; 3076 3100 mdp->no_ether_link = pd->no_ether_link; 3077 3101 mdp->ether_link_active_low = pd->ether_link_active_low; 3078 3102
-1
drivers/net/ethernet/renesas/sh_eth.h
··· 513 513 u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */ 514 514 u32 cur_tx, dirty_tx; 515 515 u32 rx_buf_sz; /* Based on MTU+slack. */ 516 - int edmac_endian; 517 516 struct napi_struct napi; 518 517 bool irq_enabled; 519 518 /* MII transceiver section. */
+1 -1
include/linux/sh_eth.h
··· 4 4 #include <linux/phy.h> 5 5 #include <linux/if_ether.h> 6 6 7 - enum {EDMAC_LITTLE_ENDIAN, EDMAC_BIG_ENDIAN}; 7 + enum {EDMAC_LITTLE_ENDIAN}; 8 8 9 9 struct sh_eth_plat_data { 10 10 int phy;