Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'add-functional-support-for-gigabit-ethernet-driver'

Biju Das says:

====================
Add functional support for Gigabit Ethernet driver

The DMAC and EMAC blocks of Gigabit Ethernet IP found on RZ/G2L SoC are
similar to the R-Car Ethernet AVB IP.

The Gigabit Ethernet IP consists of Ethernet controller (E-MAC), Internal
TCP/IP Offload Engine (TOE) and Dedicated Direct memory access controller
(DMAC).

With a few changes in the driver we can support both IPs.

This patch series is aims to add functional support for Gigabit Ethernet
driver by filling all the stubs except set_features.

set_feature patch will send as separate RFC patch along with rx_checksum
patch, as it needs further discussion related to HW checksum.

With this series, we can do boot kernel with rootFS mounted on NFS on
RZ/G2L platforms.
====================

Link: https://lore.kernel.org/r/20211012163613.30030-1-biju.das.jz@bp.renesas.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+295 -52
+16 -3
drivers/net/ethernet/renesas/ravb.h
··· 196 196 MAHR = 0x05c0, 197 197 MALR = 0x05c8, 198 198 TROCR = 0x0700, /* R-Car Gen3 and RZ/G2L only */ 199 + CXR41 = 0x0708, /* RZ/G2L only */ 200 + CXR42 = 0x0710, /* RZ/G2L only */ 199 201 CEFCR = 0x0740, 200 202 FRECR = 0x0748, 201 203 TSFRCR = 0x0750, 202 204 TLFRCR = 0x0758, 203 205 RFCR = 0x0760, 204 206 MAFCR = 0x0778, 207 + CSR0 = 0x0800, /* RZ/G2L only */ 205 208 }; 206 209 207 210 ··· 829 826 ECSR_MPD = 0x00000002, 830 827 ECSR_LCHNG = 0x00000004, 831 828 ECSR_PHYI = 0x00000008, 832 - ECSR_PFRI = 0x00000010, 829 + ECSR_PFRI = 0x00000010, /* Documented for R-Car Gen3 and RZ/G2L */ 833 830 }; 834 831 835 832 /* ECSIPR */ ··· 965 962 CXR31_SEL_LINK1 = 0x00000008, 966 963 }; 967 964 965 + enum CSR0_BIT { 966 + CSR0_TPE = 0x00000010, 967 + CSR0_RPE = 0x00000020, 968 + }; 969 + 968 970 #define DBAT_ENTRY_NUM 22 969 971 #define RX_QUEUE_OFFSET 4 970 972 #define NUM_RX_QUEUE 2 ··· 978 970 #define RX_BUF_SZ (2048 - ETH_FCS_LEN + sizeof(__sum16)) 979 971 980 972 #define GBETH_RX_BUFF_MAX 8192 973 + #define GBETH_RX_DESC_DATA_SIZE 4080 981 974 982 975 struct ravb_tstamp_skb { 983 976 struct list_head list; ··· 1018 1009 netdev_features_t net_features; 1019 1010 int stats_len; 1020 1011 size_t max_rx_len; 1021 - u32 tsrq; 1012 + u32 tccr_mask; 1013 + u32 rx_max_buf_size; 1022 1014 unsigned aligned_tx: 1; 1023 1015 1024 1016 /* hardware features */ 1025 1017 unsigned internal_delay:1; /* AVB-DMAC has internal delays */ 1026 1018 unsigned tx_counters:1; /* E-MAC has TX counters */ 1019 + unsigned carrier_counters:1; /* E-MAC has carrier counters */ 1027 1020 unsigned multi_irqs:1; /* AVB-DMAC and E-MAC has multiple irqs */ 1028 1021 unsigned gptp:1; /* AVB-DMAC has gPTP support */ 1029 1022 unsigned ccc_gac:1; /* AVB-DMAC has gPTP support active in config mode */ 1030 - unsigned nc_queue:1; /* AVB-DMAC has NC queue */ 1023 + unsigned nc_queues:1; /* AVB-DMAC has RX and TX NC queues */ 1031 1024 unsigned magic_pkt:1; /* E-MAC supports magic packet detection */ 1032 1025 unsigned half_duplex:1; /* E-MAC supports half duplex mode */ 1033 1026 }; ··· 1048 1037 struct ravb_desc *desc_bat; 1049 1038 dma_addr_t rx_desc_dma[NUM_RX_QUEUE]; 1050 1039 dma_addr_t tx_desc_dma[NUM_TX_QUEUE]; 1040 + struct ravb_rx_desc *gbeth_rx_ring; 1051 1041 struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE]; 1052 1042 struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE]; 1053 1043 void *tx_align[NUM_TX_QUEUE]; 1044 + struct sk_buff *rx_1st_skb; 1054 1045 struct sk_buff **rx_skb[NUM_RX_QUEUE]; 1055 1046 struct sk_buff **tx_skb[NUM_TX_QUEUE]; 1056 1047 u32 rx_over_errors;
+279 -49
drivers/net/ethernet/renesas/ravb_main.c
··· 236 236 237 237 static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q) 238 238 { 239 - /* Place holder */ 239 + struct ravb_private *priv = netdev_priv(ndev); 240 + unsigned int ring_size; 241 + unsigned int i; 242 + 243 + if (!priv->gbeth_rx_ring) 244 + return; 245 + 246 + for (i = 0; i < priv->num_rx_ring[q]; i++) { 247 + struct ravb_rx_desc *desc = &priv->gbeth_rx_ring[i]; 248 + 249 + if (!dma_mapping_error(ndev->dev.parent, 250 + le32_to_cpu(desc->dptr))) 251 + dma_unmap_single(ndev->dev.parent, 252 + le32_to_cpu(desc->dptr), 253 + GBETH_RX_BUFF_MAX, 254 + DMA_FROM_DEVICE); 255 + } 256 + ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1); 257 + dma_free_coherent(ndev->dev.parent, ring_size, priv->gbeth_rx_ring, 258 + priv->rx_desc_dma[q]); 259 + priv->gbeth_rx_ring = NULL; 240 260 } 241 261 242 - static void ravb_rx_ring_free(struct net_device *ndev, int q) 262 + static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q) 243 263 { 244 264 struct ravb_private *priv = netdev_priv(ndev); 245 265 unsigned int ring_size; ··· 327 307 328 308 static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q) 329 309 { 330 - /* Place holder */ 310 + struct ravb_private *priv = netdev_priv(ndev); 311 + struct ravb_rx_desc *rx_desc; 312 + unsigned int rx_ring_size; 313 + dma_addr_t dma_addr; 314 + unsigned int i; 315 + 316 + rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; 317 + memset(priv->gbeth_rx_ring, 0, rx_ring_size); 318 + /* Build RX ring buffer */ 319 + for (i = 0; i < priv->num_rx_ring[q]; i++) { 320 + /* RX descriptor */ 321 + rx_desc = &priv->gbeth_rx_ring[i]; 322 + rx_desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE); 323 + dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data, 324 + GBETH_RX_BUFF_MAX, 325 + DMA_FROM_DEVICE); 326 + /* We just set the data size to 0 for a failed mapping which 327 + * should prevent DMA from happening... 328 + */ 329 + if (dma_mapping_error(ndev->dev.parent, dma_addr)) 330 + rx_desc->ds_cc = cpu_to_le16(0); 331 + rx_desc->dptr = cpu_to_le32(dma_addr); 332 + rx_desc->die_dt = DT_FEMPTY; 333 + } 334 + rx_desc = &priv->gbeth_rx_ring[i]; 335 + rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); 336 + rx_desc->die_dt = DT_LINKFIX; /* type */ 331 337 } 332 338 333 - static void ravb_rx_ring_format(struct net_device *ndev, int q) 339 + static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q) 334 340 { 335 341 struct ravb_private *priv = netdev_priv(ndev); 336 342 struct ravb_ex_rx_desc *rx_desc; ··· 431 385 432 386 static void *ravb_alloc_rx_desc_gbeth(struct net_device *ndev, int q) 433 387 { 434 - /* Place holder */ 435 - return NULL; 388 + struct ravb_private *priv = netdev_priv(ndev); 389 + unsigned int ring_size; 390 + 391 + ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1); 392 + 393 + priv->gbeth_rx_ring = dma_alloc_coherent(ndev->dev.parent, ring_size, 394 + &priv->rx_desc_dma[q], 395 + GFP_KERNEL); 396 + return priv->gbeth_rx_ring; 436 397 } 437 398 438 - static void *ravb_alloc_rx_desc(struct net_device *ndev, int q) 399 + static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q) 439 400 { 440 401 struct ravb_private *priv = netdev_priv(ndev); 441 402 unsigned int ring_size; ··· 519 466 /* Receive frame limit set register */ 520 467 ravb_write(ndev, GBETH_RX_BUFF_MAX + ETH_FCS_LEN, RFLR); 521 468 522 - /* PAUSE prohibition */ 469 + /* EMAC Mode: PAUSE prohibition; Duplex; TX; RX; CRC Pass Through */ 523 470 ravb_write(ndev, ECMR_ZPF | ((priv->duplex > 0) ? ECMR_DM : 0) | 524 471 ECMR_TE | ECMR_RE | ECMR_RCPT | 525 - ECMR_TXF | ECMR_RXF | ECMR_PRM, ECMR); 472 + ECMR_TXF | ECMR_RXF, ECMR); 526 473 527 474 ravb_set_rate_gbeth(ndev); 528 475 ··· 534 481 535 482 /* E-MAC status register clear */ 536 483 ravb_write(ndev, ECSR_ICD | ECSR_LCHNG | ECSR_PFRI, ECSR); 484 + ravb_write(ndev, CSR0_TPE | CSR0_RPE, CSR0); 537 485 538 486 /* E-MAC interrupt enable register */ 539 487 ravb_write(ndev, ECSIPR_ICDIP, ECSIPR); 540 488 541 - ravb_modify(ndev, CXR31, CXR31_SEL_LINK1, 0); 542 - ravb_modify(ndev, CXR31, CXR31_SEL_LINK0, CXR31_SEL_LINK0); 489 + ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, CXR31_SEL_LINK0); 543 490 } 544 491 545 492 static void ravb_emac_init_rcar(struct net_device *ndev) ··· 588 535 /* Descriptor format */ 589 536 ravb_ring_format(ndev, RAVB_BE); 590 537 591 - /* Set AVB RX */ 538 + /* Set DMAC RX */ 592 539 ravb_write(ndev, 0x60000000, RCR); 593 540 594 541 /* Set Max Frame Length (RTC) */ ··· 734 681 skb_trim(skb, skb->len - sizeof(__sum16)); 735 682 } 736 683 684 + static struct sk_buff *ravb_get_skb_gbeth(struct net_device *ndev, int entry, 685 + struct ravb_rx_desc *desc) 686 + { 687 + struct ravb_private *priv = netdev_priv(ndev); 688 + struct sk_buff *skb; 689 + 690 + skb = priv->rx_skb[RAVB_BE][entry]; 691 + priv->rx_skb[RAVB_BE][entry] = NULL; 692 + dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), 693 + ALIGN(GBETH_RX_BUFF_MAX, 16), DMA_FROM_DEVICE); 694 + 695 + return skb; 696 + } 697 + 737 698 /* Packet receive function for Gigabit Ethernet */ 738 699 static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q) 739 700 { 740 - /* Place holder */ 741 - return true; 701 + struct ravb_private *priv = netdev_priv(ndev); 702 + const struct ravb_hw_info *info = priv->info; 703 + struct net_device_stats *stats; 704 + struct ravb_rx_desc *desc; 705 + struct sk_buff *skb; 706 + dma_addr_t dma_addr; 707 + u8 desc_status; 708 + int boguscnt; 709 + u16 pkt_len; 710 + u8 die_dt; 711 + int entry; 712 + int limit; 713 + 714 + entry = priv->cur_rx[q] % priv->num_rx_ring[q]; 715 + boguscnt = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q]; 716 + stats = &priv->stats[q]; 717 + 718 + boguscnt = min(boguscnt, *quota); 719 + limit = boguscnt; 720 + desc = &priv->gbeth_rx_ring[entry]; 721 + while (desc->die_dt != DT_FEMPTY) { 722 + /* Descriptor type must be checked before all other reads */ 723 + dma_rmb(); 724 + desc_status = desc->msc; 725 + pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS; 726 + 727 + if (--boguscnt < 0) 728 + break; 729 + 730 + /* We use 0-byte descriptors to mark the DMA mapping errors */ 731 + if (!pkt_len) 732 + continue; 733 + 734 + if (desc_status & MSC_MC) 735 + stats->multicast++; 736 + 737 + if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF | MSC_CEEF)) { 738 + stats->rx_errors++; 739 + if (desc_status & MSC_CRC) 740 + stats->rx_crc_errors++; 741 + if (desc_status & MSC_RFE) 742 + stats->rx_frame_errors++; 743 + if (desc_status & (MSC_RTLF | MSC_RTSF)) 744 + stats->rx_length_errors++; 745 + if (desc_status & MSC_CEEF) 746 + stats->rx_missed_errors++; 747 + } else { 748 + die_dt = desc->die_dt & 0xF0; 749 + switch (die_dt) { 750 + case DT_FSINGLE: 751 + skb = ravb_get_skb_gbeth(ndev, entry, desc); 752 + skb_put(skb, pkt_len); 753 + skb->protocol = eth_type_trans(skb, ndev); 754 + napi_gro_receive(&priv->napi[q], skb); 755 + stats->rx_packets++; 756 + stats->rx_bytes += pkt_len; 757 + break; 758 + case DT_FSTART: 759 + priv->rx_1st_skb = ravb_get_skb_gbeth(ndev, entry, desc); 760 + skb_put(priv->rx_1st_skb, pkt_len); 761 + break; 762 + case DT_FMID: 763 + skb = ravb_get_skb_gbeth(ndev, entry, desc); 764 + skb_copy_to_linear_data_offset(priv->rx_1st_skb, 765 + priv->rx_1st_skb->len, 766 + skb->data, 767 + pkt_len); 768 + skb_put(priv->rx_1st_skb, pkt_len); 769 + dev_kfree_skb(skb); 770 + break; 771 + case DT_FEND: 772 + skb = ravb_get_skb_gbeth(ndev, entry, desc); 773 + skb_copy_to_linear_data_offset(priv->rx_1st_skb, 774 + priv->rx_1st_skb->len, 775 + skb->data, 776 + pkt_len); 777 + skb_put(priv->rx_1st_skb, pkt_len); 778 + dev_kfree_skb(skb); 779 + priv->rx_1st_skb->protocol = 780 + eth_type_trans(priv->rx_1st_skb, ndev); 781 + napi_gro_receive(&priv->napi[q], 782 + priv->rx_1st_skb); 783 + stats->rx_packets++; 784 + stats->rx_bytes += priv->rx_1st_skb->len; 785 + break; 786 + } 787 + } 788 + 789 + entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q]; 790 + desc = &priv->gbeth_rx_ring[entry]; 791 + } 792 + 793 + /* Refill the RX ring buffers. */ 794 + for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) { 795 + entry = priv->dirty_rx[q] % priv->num_rx_ring[q]; 796 + desc = &priv->gbeth_rx_ring[entry]; 797 + desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE); 798 + 799 + if (!priv->rx_skb[q][entry]) { 800 + skb = netdev_alloc_skb(ndev, info->max_rx_len); 801 + if (!skb) 802 + break; 803 + ravb_set_buffer_align(skb); 804 + dma_addr = dma_map_single(ndev->dev.parent, 805 + skb->data, 806 + GBETH_RX_BUFF_MAX, 807 + DMA_FROM_DEVICE); 808 + skb_checksum_none_assert(skb); 809 + /* We just set the data size to 0 for a failed mapping 810 + * which should prevent DMA from happening... 811 + */ 812 + if (dma_mapping_error(ndev->dev.parent, dma_addr)) 813 + desc->ds_cc = cpu_to_le16(0); 814 + desc->dptr = cpu_to_le32(dma_addr); 815 + priv->rx_skb[q][entry] = skb; 816 + } 817 + /* Descriptor type must be set after all the above writes */ 818 + dma_wmb(); 819 + desc->die_dt = DT_FEMPTY; 820 + } 821 + 822 + *quota -= limit - (++boguscnt); 823 + 824 + return boguscnt <= 0; 742 825 } 743 826 744 827 /* Packet receive function for Ethernet AVB */ 745 - static bool ravb_rcar_rx(struct net_device *ndev, int *quota, int q) 828 + static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q) 746 829 { 747 830 struct ravb_private *priv = netdev_priv(ndev); 748 831 const struct ravb_hw_info *info = priv->info; ··· 1021 832 int error; 1022 833 1023 834 /* Wait for stopping the hardware TX process */ 1024 - error = ravb_wait(ndev, TCCR, info->tsrq, 0); 835 + error = ravb_wait(ndev, TCCR, info->tccr_mask, 0); 1025 836 1026 837 if (error) 1027 838 return error; ··· 1177 988 result = IRQ_HANDLED; 1178 989 1179 990 /* Network control and best effort queue RX/TX */ 1180 - if (info->nc_queue) { 991 + if (info->nc_queues) { 1181 992 for (q = RAVB_NC; q >= RAVB_BE; q--) { 1182 993 if (ravb_queue_interrupt(ndev, q)) 1183 994 result = IRQ_HANDLED; ··· 1273 1084 struct net_device *ndev = napi->dev; 1274 1085 struct ravb_private *priv = netdev_priv(ndev); 1275 1086 const struct ravb_hw_info *info = priv->info; 1087 + bool gptp = info->gptp || info->ccc_gac; 1088 + struct ravb_rx_desc *desc; 1276 1089 unsigned long flags; 1277 1090 int q = napi - priv->napi; 1278 1091 int mask = BIT(q); 1279 1092 int quota = budget; 1093 + unsigned int entry; 1280 1094 1095 + if (!gptp) { 1096 + entry = priv->cur_rx[q] % priv->num_rx_ring[q]; 1097 + desc = &priv->gbeth_rx_ring[entry]; 1098 + } 1281 1099 /* Processing RX Descriptor Ring */ 1282 1100 /* Clear RX interrupt */ 1283 1101 ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0); 1284 - if (ravb_rx(ndev, &quota, q)) 1285 - goto out; 1102 + if (gptp || desc->die_dt != DT_FEMPTY) { 1103 + if (ravb_rx(ndev, &quota, q)) 1104 + goto out; 1105 + } 1286 1106 1287 1107 /* Processing TX Descriptor Ring */ 1288 1108 spin_lock_irqsave(&priv->lock, flags); ··· 1316 1118 1317 1119 /* Receive error message handling */ 1318 1120 priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors; 1319 - if (info->nc_queue) 1121 + if (info->nc_queues) 1320 1122 priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors; 1321 1123 if (priv->rx_over_errors != ndev->stats.rx_over_errors) 1322 1124 ndev->stats.rx_over_errors = priv->rx_over_errors; ··· 1493 1295 priv->msg_enable = value; 1494 1296 } 1495 1297 1298 + static const char ravb_gstrings_stats_gbeth[][ETH_GSTRING_LEN] = { 1299 + "rx_queue_0_current", 1300 + "tx_queue_0_current", 1301 + "rx_queue_0_dirty", 1302 + "tx_queue_0_dirty", 1303 + "rx_queue_0_packets", 1304 + "tx_queue_0_packets", 1305 + "rx_queue_0_bytes", 1306 + "tx_queue_0_bytes", 1307 + "rx_queue_0_mcast_packets", 1308 + "rx_queue_0_errors", 1309 + "rx_queue_0_crc_errors", 1310 + "rx_queue_0_frame_errors", 1311 + "rx_queue_0_length_errors", 1312 + "rx_queue_0_csum_offload_errors", 1313 + "rx_queue_0_over_errors", 1314 + }; 1315 + 1496 1316 static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = { 1497 1317 "rx_queue_0_current", 1498 1318 "tx_queue_0_current", ··· 1567 1351 int i = 0; 1568 1352 int q; 1569 1353 1570 - num_rx_q = info->nc_queue ? NUM_RX_QUEUE : 1; 1354 + num_rx_q = info->nc_queues ? NUM_RX_QUEUE : 1; 1571 1355 /* Device-specific stats */ 1572 1356 for (q = RAVB_BE; q < num_rx_q; q++) { 1573 1357 struct net_device_stats *stats = &priv->stats[q]; ··· 1644 1428 1645 1429 /* Free all the skb's in the RX queue and the DMA buffers. */ 1646 1430 ravb_ring_free(ndev, RAVB_BE); 1647 - if (info->nc_queue) 1431 + if (info->nc_queues) 1648 1432 ravb_ring_free(ndev, RAVB_NC); 1649 1433 } 1650 1434 ··· 1764 1548 int error; 1765 1549 1766 1550 napi_enable(&priv->napi[RAVB_BE]); 1767 - if (info->nc_queue) 1551 + if (info->nc_queues) 1768 1552 napi_enable(&priv->napi[RAVB_NC]); 1769 1553 1770 1554 if (!info->multi_irqs) { ··· 1839 1623 out_free_irq: 1840 1624 free_irq(ndev->irq, ndev); 1841 1625 out_napi_off: 1842 - if (info->nc_queue) 1626 + if (info->nc_queues) 1843 1627 napi_disable(&priv->napi[RAVB_NC]); 1844 1628 napi_disable(&priv->napi[RAVB_BE]); 1845 1629 return error; ··· 1889 1673 } 1890 1674 1891 1675 ravb_ring_free(ndev, RAVB_BE); 1892 - if (info->nc_queue) 1676 + if (info->nc_queues) 1893 1677 ravb_ring_free(ndev, RAVB_NC); 1894 1678 1895 1679 /* Device init */ ··· 2071 1855 ravb_write(ndev, 0, TROCR); /* (write clear) */ 2072 1856 } 2073 1857 1858 + if (info->carrier_counters) { 1859 + nstats->collisions += ravb_read(ndev, CXR41); 1860 + ravb_write(ndev, 0, CXR41); /* (write clear) */ 1861 + nstats->tx_carrier_errors += ravb_read(ndev, CXR42); 1862 + ravb_write(ndev, 0, CXR42); /* (write clear) */ 1863 + } 1864 + 2074 1865 nstats->rx_packets = stats0->rx_packets; 2075 1866 nstats->tx_packets = stats0->tx_packets; 2076 1867 nstats->rx_bytes = stats0->rx_bytes; ··· 2089 1866 nstats->rx_length_errors = stats0->rx_length_errors; 2090 1867 nstats->rx_missed_errors = stats0->rx_missed_errors; 2091 1868 nstats->rx_over_errors = stats0->rx_over_errors; 2092 - if (info->nc_queue) { 1869 + if (info->nc_queues) { 2093 1870 stats1 = &priv->stats[RAVB_NC]; 2094 1871 2095 1872 nstats->rx_packets += stats1->rx_packets; ··· 2170 1947 } 2171 1948 free_irq(ndev->irq, ndev); 2172 1949 2173 - if (info->nc_queue) 1950 + if (info->nc_queues) 2174 1951 napi_disable(&priv->napi[RAVB_NC]); 2175 1952 napi_disable(&priv->napi[RAVB_BE]); 2176 1953 2177 1954 /* Free all the skb's in the RX queue and the DMA buffers. */ 2178 1955 ravb_ring_free(ndev, RAVB_BE); 2179 - if (info->nc_queue) 1956 + if (info->nc_queues) 2180 1957 ravb_ring_free(ndev, RAVB_NC); 2181 1958 2182 1959 return 0; ··· 2396 2173 } 2397 2174 2398 2175 static const struct ravb_hw_info ravb_gen3_hw_info = { 2399 - .rx_ring_free = ravb_rx_ring_free, 2400 - .rx_ring_format = ravb_rx_ring_format, 2401 - .alloc_rx_desc = ravb_alloc_rx_desc, 2402 - .receive = ravb_rcar_rx, 2176 + .rx_ring_free = ravb_rx_ring_free_rcar, 2177 + .rx_ring_format = ravb_rx_ring_format_rcar, 2178 + .alloc_rx_desc = ravb_alloc_rx_desc_rcar, 2179 + .receive = ravb_rx_rcar, 2403 2180 .set_rate = ravb_set_rate_rcar, 2404 2181 .set_feature = ravb_set_features_rcar, 2405 2182 .dmac_init = ravb_dmac_init_rcar, ··· 2410 2187 .net_features = NETIF_F_RXCSUM, 2411 2188 .stats_len = ARRAY_SIZE(ravb_gstrings_stats), 2412 2189 .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1, 2413 - .tsrq = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, 2190 + .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, 2191 + .rx_max_buf_size = SZ_2K, 2414 2192 .internal_delay = 1, 2415 2193 .tx_counters = 1, 2416 2194 .multi_irqs = 1, 2417 2195 .ccc_gac = 1, 2418 - .nc_queue = 1, 2196 + .nc_queues = 1, 2419 2197 .magic_pkt = 1, 2420 2198 }; 2421 2199 2422 2200 static const struct ravb_hw_info ravb_gen2_hw_info = { 2423 - .rx_ring_free = ravb_rx_ring_free, 2424 - .rx_ring_format = ravb_rx_ring_format, 2425 - .alloc_rx_desc = ravb_alloc_rx_desc, 2426 - .receive = ravb_rcar_rx, 2201 + .rx_ring_free = ravb_rx_ring_free_rcar, 2202 + .rx_ring_format = ravb_rx_ring_format_rcar, 2203 + .alloc_rx_desc = ravb_alloc_rx_desc_rcar, 2204 + .receive = ravb_rx_rcar, 2427 2205 .set_rate = ravb_set_rate_rcar, 2428 2206 .set_feature = ravb_set_features_rcar, 2429 2207 .dmac_init = ravb_dmac_init_rcar, ··· 2435 2211 .net_features = NETIF_F_RXCSUM, 2436 2212 .stats_len = ARRAY_SIZE(ravb_gstrings_stats), 2437 2213 .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1, 2438 - .tsrq = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, 2214 + .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, 2215 + .rx_max_buf_size = SZ_2K, 2439 2216 .aligned_tx = 1, 2440 2217 .gptp = 1, 2441 - .nc_queue = 1, 2218 + .nc_queues = 1, 2442 2219 .magic_pkt = 1, 2443 2220 }; 2444 2221 ··· 2452 2227 .set_feature = ravb_set_features_gbeth, 2453 2228 .dmac_init = ravb_dmac_init_gbeth, 2454 2229 .emac_init = ravb_emac_init_gbeth, 2455 - .max_rx_len = GBETH_RX_BUFF_MAX + RAVB_ALIGN - 1, 2456 - .tsrq = TCCR_TSRQ0, 2230 + .gstrings_stats = ravb_gstrings_stats_gbeth, 2231 + .gstrings_size = sizeof(ravb_gstrings_stats_gbeth), 2232 + .stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth), 2233 + .max_rx_len = ALIGN(GBETH_RX_BUFF_MAX, RAVB_ALIGN), 2234 + .tccr_mask = TCCR_TSRQ0, 2235 + .rx_max_buf_size = SZ_8K, 2457 2236 .aligned_tx = 1, 2458 2237 .tx_counters = 1, 2238 + .carrier_counters = 1, 2459 2239 .half_duplex = 1, 2460 2240 }; 2461 2241 ··· 2619 2389 priv->pdev = pdev; 2620 2390 priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE; 2621 2391 priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE; 2622 - if (info->nc_queue) { 2392 + if (info->nc_queues) { 2623 2393 priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE; 2624 2394 priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE; 2625 2395 } ··· 2682 2452 } 2683 2453 clk_prepare_enable(priv->refclk); 2684 2454 2685 - ndev->max_mtu = 2048 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); 2455 + ndev->max_mtu = info->rx_max_buf_size - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); 2686 2456 ndev->min_mtu = ETH_MIN_MTU; 2687 2457 2688 2458 /* FIXME: R-Car Gen2 has 4byte alignment restriction for tx buffer ··· 2755 2525 } 2756 2526 2757 2527 netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64); 2758 - if (info->nc_queue) 2528 + if (info->nc_queues) 2759 2529 netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64); 2760 2530 2761 2531 /* Network device register */ ··· 2774 2544 return 0; 2775 2545 2776 2546 out_napi_del: 2777 - if (info->nc_queue) 2547 + if (info->nc_queues) 2778 2548 netif_napi_del(&priv->napi[RAVB_NC]); 2779 2549 2780 2550 netif_napi_del(&priv->napi[RAVB_BE]); ··· 2815 2585 ravb_write(ndev, CCC_OPC_RESET, CCC); 2816 2586 pm_runtime_put_sync(&pdev->dev); 2817 2587 unregister_netdev(ndev); 2818 - if (info->nc_queue) 2588 + if (info->nc_queues) 2819 2589 netif_napi_del(&priv->napi[RAVB_NC]); 2820 2590 netif_napi_del(&priv->napi[RAVB_BE]); 2821 2591 ravb_mdio_release(priv); ··· 2839 2609 2840 2610 /* Only allow ECI interrupts */ 2841 2611 synchronize_irq(priv->emac_irq); 2842 - if (info->nc_queue) 2612 + if (info->nc_queues) 2843 2613 napi_disable(&priv->napi[RAVB_NC]); 2844 2614 napi_disable(&priv->napi[RAVB_BE]); 2845 2615 ravb_write(ndev, ECSIPR_MPDIP, ECSIPR); ··· 2856 2626 const struct ravb_hw_info *info = priv->info; 2857 2627 int ret; 2858 2628 2859 - if (info->nc_queue) 2629 + if (info->nc_queues) 2860 2630 napi_enable(&priv->napi[RAVB_NC]); 2861 2631 napi_enable(&priv->napi[RAVB_BE]); 2862 2632