Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'r8152-RX-improve'

Hayes says:

====================
v2:
For patch #2, replace list_for_each_safe with list_for_each_entry_safe.
Remove unlikely in WARN_ON. Adjust the coding style.

For patch #4, replace list_for_each_safe with list_for_each_entry_safe.
Remove "else" after "continue".

For patch #5. replace sysfs with ethtool to modify rx_copybreak and
rx_pending.

v1:
The different chips use different rx buffer size.

Use skb_add_rx_frag() to reduce memory copy for RX.
====================

Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>

+307 -73
+307 -73
drivers/net/usb/r8152.c
··· 22 22 #include <linux/mdio.h> 23 23 #include <linux/usb/cdc.h> 24 24 #include <linux/suspend.h> 25 + #include <linux/atomic.h> 25 26 #include <linux/acpi.h> 26 27 27 28 /* Information for net-next */ 28 - #define NETNEXT_VERSION "09" 29 + #define NETNEXT_VERSION "10" 29 30 30 31 /* Information for net */ 31 32 #define NET_VERSION "10" ··· 584 583 #define TX_ALIGN 4 585 584 #define RX_ALIGN 8 586 585 586 + #define RTL8152_RX_MAX_PENDING 4096 587 + #define RTL8152_RXFG_HEADSZ 256 588 + 587 589 #define INTR_LINK 0x0004 588 590 589 591 #define RTL8152_REQT_READ 0xc0 ··· 698 694 struct r8152; 699 695 700 696 struct rx_agg { 701 - struct list_head list; 697 + struct list_head list, info_list; 702 698 struct urb *urb; 703 699 struct r8152 *context; 700 + struct page *page; 704 701 void *buffer; 705 - void *head; 706 702 }; 707 703 708 704 struct tx_agg { ··· 723 719 struct net_device *netdev; 724 720 struct urb *intr_urb; 725 721 struct tx_agg tx_info[RTL8152_MAX_TX]; 726 - struct rx_agg rx_info[RTL8152_MAX_RX]; 722 + struct list_head rx_info, rx_used; 727 723 struct list_head rx_done, tx_free; 728 724 struct sk_buff_head tx_queue, rx_queue; 729 725 spinlock_t rx_lock, tx_lock; ··· 748 744 void (*autosuspend_en)(struct r8152 *tp, bool enable); 749 745 } rtl_ops; 750 746 747 + atomic_t rx_count; 748 + 751 749 int intr_interval; 752 750 u32 saved_wolopts; 753 751 u32 msg_enable; 754 752 u32 tx_qlen; 755 753 u32 coalesce; 754 + u32 rx_buf_sz; 755 + u32 rx_copybreak; 756 + u32 rx_pending; 757 + 756 758 u16 ocp_base; 757 759 u16 speed; 758 760 u8 *intr_buff; ··· 1477 1467 return (void *)ALIGN((uintptr_t)data, TX_ALIGN); 1478 1468 } 1479 1469 1470 + static void free_rx_agg(struct r8152 *tp, struct rx_agg *agg) 1471 + { 1472 + list_del(&agg->info_list); 1473 + 1474 + usb_free_urb(agg->urb); 1475 + put_page(agg->page); 1476 + kfree(agg); 1477 + 1478 + atomic_dec(&tp->rx_count); 1479 + } 1480 + 1481 + static struct rx_agg *alloc_rx_agg(struct r8152 *tp, gfp_t mflags) 1482 + { 1483 + struct net_device *netdev = tp->netdev; 1484 + int node = netdev->dev.parent ? dev_to_node(netdev->dev.parent) : -1; 1485 + unsigned int order = get_order(tp->rx_buf_sz); 1486 + struct rx_agg *rx_agg; 1487 + unsigned long flags; 1488 + 1489 + rx_agg = kmalloc_node(sizeof(*rx_agg), mflags, node); 1490 + if (!rx_agg) 1491 + return NULL; 1492 + 1493 + rx_agg->page = alloc_pages(mflags | __GFP_COMP, order); 1494 + if (!rx_agg->page) 1495 + goto free_rx; 1496 + 1497 + rx_agg->buffer = page_address(rx_agg->page); 1498 + 1499 + rx_agg->urb = usb_alloc_urb(0, mflags); 1500 + if (!rx_agg->urb) 1501 + goto free_buf; 1502 + 1503 + rx_agg->context = tp; 1504 + 1505 + INIT_LIST_HEAD(&rx_agg->list); 1506 + INIT_LIST_HEAD(&rx_agg->info_list); 1507 + spin_lock_irqsave(&tp->rx_lock, flags); 1508 + list_add_tail(&rx_agg->info_list, &tp->rx_info); 1509 + spin_unlock_irqrestore(&tp->rx_lock, flags); 1510 + 1511 + atomic_inc(&tp->rx_count); 1512 + 1513 + return rx_agg; 1514 + 1515 + free_buf: 1516 + __free_pages(rx_agg->page, order); 1517 + free_rx: 1518 + kfree(rx_agg); 1519 + return NULL; 1520 + } 1521 + 1480 1522 static void free_all_mem(struct r8152 *tp) 1481 1523 { 1524 + struct rx_agg *agg, *agg_next; 1525 + unsigned long flags; 1482 1526 int i; 1483 1527 1484 - for (i = 0; i < RTL8152_MAX_RX; i++) { 1485 - usb_free_urb(tp->rx_info[i].urb); 1486 - tp->rx_info[i].urb = NULL; 1528 + spin_lock_irqsave(&tp->rx_lock, flags); 1487 1529 1488 - kfree(tp->rx_info[i].buffer); 1489 - tp->rx_info[i].buffer = NULL; 1490 - tp->rx_info[i].head = NULL; 1491 - } 1530 + list_for_each_entry_safe(agg, agg_next, &tp->rx_info, info_list) 1531 + free_rx_agg(tp, agg); 1532 + 1533 + spin_unlock_irqrestore(&tp->rx_lock, flags); 1534 + 1535 + WARN_ON(atomic_read(&tp->rx_count)); 1492 1536 1493 1537 for (i = 0; i < RTL8152_MAX_TX; i++) { 1494 1538 usb_free_urb(tp->tx_info[i].urb); ··· 1566 1502 struct usb_interface *intf = tp->intf; 1567 1503 struct usb_host_interface *alt = intf->cur_altsetting; 1568 1504 struct usb_host_endpoint *ep_intr = alt->endpoint + 2; 1569 - struct urb *urb; 1570 1505 int node, i; 1571 - u8 *buf; 1572 1506 1573 1507 node = netdev->dev.parent ? dev_to_node(netdev->dev.parent) : -1; 1574 1508 1575 1509 spin_lock_init(&tp->rx_lock); 1576 1510 spin_lock_init(&tp->tx_lock); 1511 + INIT_LIST_HEAD(&tp->rx_info); 1577 1512 INIT_LIST_HEAD(&tp->tx_free); 1578 1513 INIT_LIST_HEAD(&tp->rx_done); 1579 1514 skb_queue_head_init(&tp->tx_queue); 1580 1515 skb_queue_head_init(&tp->rx_queue); 1516 + atomic_set(&tp->rx_count, 0); 1581 1517 1582 1518 for (i = 0; i < RTL8152_MAX_RX; i++) { 1583 - buf = kmalloc_node(agg_buf_sz, GFP_KERNEL, node); 1584 - if (!buf) 1519 + if (!alloc_rx_agg(tp, GFP_KERNEL)) 1585 1520 goto err1; 1586 - 1587 - if (buf != rx_agg_align(buf)) { 1588 - kfree(buf); 1589 - buf = kmalloc_node(agg_buf_sz + RX_ALIGN, GFP_KERNEL, 1590 - node); 1591 - if (!buf) 1592 - goto err1; 1593 - } 1594 - 1595 - urb = usb_alloc_urb(0, GFP_KERNEL); 1596 - if (!urb) { 1597 - kfree(buf); 1598 - goto err1; 1599 - } 1600 - 1601 - INIT_LIST_HEAD(&tp->rx_info[i].list); 1602 - tp->rx_info[i].context = tp; 1603 - tp->rx_info[i].urb = urb; 1604 - tp->rx_info[i].buffer = buf; 1605 - tp->rx_info[i].head = rx_agg_align(buf); 1606 1521 } 1607 1522 1608 1523 for (i = 0; i < RTL8152_MAX_TX; i++) { 1524 + struct urb *urb; 1525 + u8 *buf; 1526 + 1609 1527 buf = kmalloc_node(agg_buf_sz, GFP_KERNEL, node); 1610 1528 if (!buf) 1611 1529 goto err1; ··· 1953 1907 return checksum; 1954 1908 } 1955 1909 1910 + static inline bool rx_count_exceed(struct r8152 *tp) 1911 + { 1912 + return atomic_read(&tp->rx_count) > RTL8152_MAX_RX; 1913 + } 1914 + 1915 + static inline int agg_offset(struct rx_agg *agg, void *addr) 1916 + { 1917 + return (int)(addr - agg->buffer); 1918 + } 1919 + 1920 + static struct rx_agg *rtl_get_free_rx(struct r8152 *tp, gfp_t mflags) 1921 + { 1922 + struct rx_agg *agg, *agg_next, *agg_free = NULL; 1923 + unsigned long flags; 1924 + 1925 + spin_lock_irqsave(&tp->rx_lock, flags); 1926 + 1927 + list_for_each_entry_safe(agg, agg_next, &tp->rx_used, list) { 1928 + if (page_count(agg->page) == 1) { 1929 + if (!agg_free) { 1930 + list_del_init(&agg->list); 1931 + agg_free = agg; 1932 + continue; 1933 + } 1934 + if (rx_count_exceed(tp)) { 1935 + list_del_init(&agg->list); 1936 + free_rx_agg(tp, agg); 1937 + } 1938 + break; 1939 + } 1940 + } 1941 + 1942 + spin_unlock_irqrestore(&tp->rx_lock, flags); 1943 + 1944 + if (!agg_free && atomic_read(&tp->rx_count) < tp->rx_pending) 1945 + agg_free = alloc_rx_agg(tp, mflags); 1946 + 1947 + return agg_free; 1948 + } 1949 + 1956 1950 static int rx_bottom(struct r8152 *tp, int budget) 1957 1951 { 1958 1952 unsigned long flags; ··· 2028 1942 2029 1943 list_for_each_safe(cursor, next, &rx_queue) { 2030 1944 struct rx_desc *rx_desc; 2031 - struct rx_agg *agg; 1945 + struct rx_agg *agg, *agg_free; 2032 1946 int len_used = 0; 2033 1947 struct urb *urb; 2034 1948 u8 *rx_data; ··· 2040 1954 if (urb->actual_length < ETH_ZLEN) 2041 1955 goto submit; 2042 1956 2043 - rx_desc = agg->head; 2044 - rx_data = agg->head; 1957 + agg_free = rtl_get_free_rx(tp, GFP_ATOMIC); 1958 + 1959 + rx_desc = agg->buffer; 1960 + rx_data = agg->buffer; 2045 1961 len_used += sizeof(struct rx_desc); 2046 1962 2047 1963 while (urb->actual_length > len_used) { 2048 1964 struct net_device *netdev = tp->netdev; 2049 1965 struct net_device_stats *stats = &netdev->stats; 2050 - unsigned int pkt_len; 1966 + unsigned int pkt_len, rx_frag_head_sz; 2051 1967 struct sk_buff *skb; 2052 1968 2053 1969 /* limite the skb numbers for rx_queue */ ··· 2067 1979 pkt_len -= ETH_FCS_LEN; 2068 1980 rx_data += sizeof(struct rx_desc); 2069 1981 2070 - skb = napi_alloc_skb(napi, pkt_len); 1982 + if (!agg_free || tp->rx_copybreak > pkt_len) 1983 + rx_frag_head_sz = pkt_len; 1984 + else 1985 + rx_frag_head_sz = tp->rx_copybreak; 1986 + 1987 + skb = napi_alloc_skb(napi, rx_frag_head_sz); 2071 1988 if (!skb) { 2072 1989 stats->rx_dropped++; 2073 1990 goto find_next_rx; 2074 1991 } 2075 1992 2076 1993 skb->ip_summed = r8152_rx_csum(tp, rx_desc); 2077 - memcpy(skb->data, rx_data, pkt_len); 2078 - skb_put(skb, pkt_len); 1994 + memcpy(skb->data, rx_data, rx_frag_head_sz); 1995 + skb_put(skb, rx_frag_head_sz); 1996 + pkt_len -= rx_frag_head_sz; 1997 + rx_data += rx_frag_head_sz; 1998 + if (pkt_len) { 1999 + skb_add_rx_frag(skb, 0, agg->page, 2000 + agg_offset(agg, rx_data), 2001 + pkt_len, 2002 + SKB_DATA_ALIGN(pkt_len)); 2003 + get_page(agg->page); 2004 + } 2005 + 2079 2006 skb->protocol = eth_type_trans(skb, netdev); 2080 2007 rtl_rx_vlan_tag(rx_desc, skb); 2081 2008 if (work_done < budget) { 2082 2009 napi_gro_receive(napi, skb); 2083 2010 work_done++; 2084 2011 stats->rx_packets++; 2085 - stats->rx_bytes += pkt_len; 2012 + stats->rx_bytes += skb->len; 2086 2013 } else { 2087 2014 __skb_queue_tail(&tp->rx_queue, skb); 2088 2015 } ··· 2105 2002 find_next_rx: 2106 2003 rx_data = rx_agg_align(rx_data + pkt_len + ETH_FCS_LEN); 2107 2004 rx_desc = (struct rx_desc *)rx_data; 2108 - len_used = (int)(rx_data - (u8 *)agg->head); 2005 + len_used = agg_offset(agg, rx_data); 2109 2006 len_used += sizeof(struct rx_desc); 2007 + } 2008 + 2009 + WARN_ON(!agg_free && page_count(agg->page) > 1); 2010 + 2011 + if (agg_free) { 2012 + spin_lock_irqsave(&tp->rx_lock, flags); 2013 + if (page_count(agg->page) == 1) { 2014 + list_add(&agg_free->list, &tp->rx_used); 2015 + } else { 2016 + list_add_tail(&agg->list, &tp->rx_used); 2017 + agg = agg_free; 2018 + urb = agg->urb; 2019 + } 2020 + spin_unlock_irqrestore(&tp->rx_lock, flags); 2110 2021 } 2111 2022 2112 2023 submit: ··· 2230 2113 return 0; 2231 2114 2232 2115 usb_fill_bulk_urb(agg->urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1), 2233 - agg->head, agg_buf_sz, 2116 + agg->buffer, tp->rx_buf_sz, 2234 2117 (usb_complete_t)read_bulk_callback, agg); 2235 2118 2236 2119 ret = usb_submit_urb(agg->urb, mem_flags); ··· 2447 2330 2448 2331 static int rtl_start_rx(struct r8152 *tp) 2449 2332 { 2450 - int i, ret = 0; 2333 + struct rx_agg *agg, *agg_next; 2334 + struct list_head tmp_list; 2335 + unsigned long flags; 2336 + int ret = 0, i = 0; 2337 + 2338 + INIT_LIST_HEAD(&tmp_list); 2339 + 2340 + spin_lock_irqsave(&tp->rx_lock, flags); 2451 2341 2452 2342 INIT_LIST_HEAD(&tp->rx_done); 2453 - for (i = 0; i < RTL8152_MAX_RX; i++) { 2454 - INIT_LIST_HEAD(&tp->rx_info[i].list); 2455 - ret = r8152_submit_rx(tp, &tp->rx_info[i], GFP_KERNEL); 2456 - if (ret) 2457 - break; 2343 + INIT_LIST_HEAD(&tp->rx_used); 2344 + 2345 + list_splice_init(&tp->rx_info, &tmp_list); 2346 + 2347 + spin_unlock_irqrestore(&tp->rx_lock, flags); 2348 + 2349 + list_for_each_entry_safe(agg, agg_next, &tmp_list, info_list) { 2350 + INIT_LIST_HEAD(&agg->list); 2351 + 2352 + /* Only RTL8152_MAX_RX rx_agg need to be submitted. */ 2353 + if (++i > RTL8152_MAX_RX) { 2354 + spin_lock_irqsave(&tp->rx_lock, flags); 2355 + list_add_tail(&agg->list, &tp->rx_used); 2356 + spin_unlock_irqrestore(&tp->rx_lock, flags); 2357 + } else if (unlikely(ret < 0)) { 2358 + spin_lock_irqsave(&tp->rx_lock, flags); 2359 + list_add_tail(&agg->list, &tp->rx_done); 2360 + spin_unlock_irqrestore(&tp->rx_lock, flags); 2361 + } else { 2362 + ret = r8152_submit_rx(tp, agg, GFP_KERNEL); 2363 + } 2458 2364 } 2459 2365 2460 - if (ret && ++i < RTL8152_MAX_RX) { 2461 - struct list_head rx_queue; 2462 - unsigned long flags; 2463 - 2464 - INIT_LIST_HEAD(&rx_queue); 2465 - 2466 - do { 2467 - struct rx_agg *agg = &tp->rx_info[i++]; 2468 - struct urb *urb = agg->urb; 2469 - 2470 - urb->actual_length = 0; 2471 - list_add_tail(&agg->list, &rx_queue); 2472 - } while (i < RTL8152_MAX_RX); 2473 - 2474 - spin_lock_irqsave(&tp->rx_lock, flags); 2475 - list_splice_tail(&rx_queue, &tp->rx_done); 2476 - spin_unlock_irqrestore(&tp->rx_lock, flags); 2477 - } 2366 + spin_lock_irqsave(&tp->rx_lock, flags); 2367 + WARN_ON(!list_empty(&tp->rx_info)); 2368 + list_splice(&tmp_list, &tp->rx_info); 2369 + spin_unlock_irqrestore(&tp->rx_lock, flags); 2478 2370 2479 2371 return ret; 2480 2372 } 2481 2373 2482 2374 static int rtl_stop_rx(struct r8152 *tp) 2483 2375 { 2484 - int i; 2376 + struct rx_agg *agg, *agg_next; 2377 + struct list_head tmp_list; 2378 + unsigned long flags; 2485 2379 2486 - for (i = 0; i < RTL8152_MAX_RX; i++) 2487 - usb_kill_urb(tp->rx_info[i].urb); 2380 + INIT_LIST_HEAD(&tmp_list); 2381 + 2382 + /* The usb_kill_urb() couldn't be used in atomic. 2383 + * Therefore, move the list of rx_info to a tmp one. 2384 + * Then, list_for_each_entry_safe could be used without 2385 + * spin lock. 2386 + */ 2387 + 2388 + spin_lock_irqsave(&tp->rx_lock, flags); 2389 + list_splice_init(&tp->rx_info, &tmp_list); 2390 + spin_unlock_irqrestore(&tp->rx_lock, flags); 2391 + 2392 + list_for_each_entry_safe(agg, agg_next, &tmp_list, info_list) { 2393 + /* At least RTL8152_MAX_RX rx_agg have the page_count being 2394 + * equal to 1, so the other ones could be freed safely. 2395 + */ 2396 + if (page_count(agg->page) > 1) 2397 + free_rx_agg(tp, agg); 2398 + else 2399 + usb_kill_urb(agg->urb); 2400 + } 2401 + 2402 + /* Move back the list of temp to the rx_info */ 2403 + spin_lock_irqsave(&tp->rx_lock, flags); 2404 + WARN_ON(!list_empty(&tp->rx_info)); 2405 + list_splice(&tmp_list, &tp->rx_info); 2406 + spin_unlock_irqrestore(&tp->rx_lock, flags); 2488 2407 2489 2408 while (!skb_queue_empty(&tp->rx_queue)) 2490 2409 dev_kfree_skb(__skb_dequeue(&tp->rx_queue)); ··· 2600 2447 2601 2448 static void r8153_set_rx_early_size(struct r8152 *tp) 2602 2449 { 2603 - u32 ocp_data = agg_buf_sz - rx_reserved_size(tp->netdev->mtu); 2450 + u32 ocp_data = tp->rx_buf_sz - rx_reserved_size(tp->netdev->mtu); 2604 2451 2605 2452 switch (tp->version) { 2606 2453 case RTL_VER_03: ··· 5107 4954 return ret; 5108 4955 } 5109 4956 4957 + static int rtl8152_get_tunable(struct net_device *netdev, 4958 + const struct ethtool_tunable *tunable, void *d) 4959 + { 4960 + struct r8152 *tp = netdev_priv(netdev); 4961 + 4962 + switch (tunable->id) { 4963 + case ETHTOOL_RX_COPYBREAK: 4964 + *(u32 *)d = tp->rx_copybreak; 4965 + break; 4966 + default: 4967 + return -EOPNOTSUPP; 4968 + } 4969 + 4970 + return 0; 4971 + } 4972 + 4973 + static int rtl8152_set_tunable(struct net_device *netdev, 4974 + const struct ethtool_tunable *tunable, 4975 + const void *d) 4976 + { 4977 + struct r8152 *tp = netdev_priv(netdev); 4978 + u32 val; 4979 + 4980 + switch (tunable->id) { 4981 + case ETHTOOL_RX_COPYBREAK: 4982 + val = *(u32 *)d; 4983 + if (val < ETH_ZLEN) { 4984 + netif_err(tp, rx_err, netdev, 4985 + "Invalid rx copy break value\n"); 4986 + return -EINVAL; 4987 + } 4988 + 4989 + if (tp->rx_copybreak != val) { 4990 + napi_disable(&tp->napi); 4991 + tp->rx_copybreak = val; 4992 + napi_enable(&tp->napi); 4993 + } 4994 + break; 4995 + default: 4996 + return -EOPNOTSUPP; 4997 + } 4998 + 4999 + return 0; 5000 + } 5001 + 5002 + static void rtl8152_get_ringparam(struct net_device *netdev, 5003 + struct ethtool_ringparam *ring) 5004 + { 5005 + struct r8152 *tp = netdev_priv(netdev); 5006 + 5007 + ring->rx_max_pending = RTL8152_RX_MAX_PENDING; 5008 + ring->rx_pending = tp->rx_pending; 5009 + } 5010 + 5011 + static int rtl8152_set_ringparam(struct net_device *netdev, 5012 + struct ethtool_ringparam *ring) 5013 + { 5014 + struct r8152 *tp = netdev_priv(netdev); 5015 + 5016 + if (ring->rx_pending < (RTL8152_MAX_RX * 2)) 5017 + return -EINVAL; 5018 + 5019 + if (tp->rx_pending != ring->rx_pending) { 5020 + napi_disable(&tp->napi); 5021 + tp->rx_pending = ring->rx_pending; 5022 + napi_enable(&tp->napi); 5023 + } 5024 + 5025 + return 0; 5026 + } 5027 + 5110 5028 static const struct ethtool_ops ops = { 5111 5029 .get_drvinfo = rtl8152_get_drvinfo, 5112 5030 .get_link = ethtool_op_get_link, ··· 5195 4971 .set_eee = rtl_ethtool_set_eee, 5196 4972 .get_link_ksettings = rtl8152_get_link_ksettings, 5197 4973 .set_link_ksettings = rtl8152_set_link_ksettings, 4974 + .get_tunable = rtl8152_get_tunable, 4975 + .set_tunable = rtl8152_set_tunable, 4976 + .get_ringparam = rtl8152_get_ringparam, 4977 + .set_ringparam = rtl8152_set_ringparam, 5198 4978 }; 5199 4979 5200 4980 static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) ··· 5343 5115 ops->in_nway = rtl8152_in_nway; 5344 5116 ops->hw_phy_cfg = r8152b_hw_phy_cfg; 5345 5117 ops->autosuspend_en = rtl_runtime_suspend_enable; 5118 + tp->rx_buf_sz = 16 * 1024; 5346 5119 break; 5347 5120 5348 5121 case RTL_VER_03: ··· 5361 5132 ops->in_nway = rtl8153_in_nway; 5362 5133 ops->hw_phy_cfg = r8153_hw_phy_cfg; 5363 5134 ops->autosuspend_en = rtl8153_runtime_enable; 5135 + tp->rx_buf_sz = 32 * 1024; 5364 5136 break; 5365 5137 5366 5138 case RTL_VER_08: ··· 5377 5147 ops->in_nway = rtl8153_in_nway; 5378 5148 ops->hw_phy_cfg = r8153b_hw_phy_cfg; 5379 5149 ops->autosuspend_en = rtl8153b_runtime_enable; 5150 + tp->rx_buf_sz = 32 * 1024; 5380 5151 break; 5381 5152 5382 5153 default: ··· 5551 5320 tp->autoneg = AUTONEG_ENABLE; 5552 5321 tp->speed = tp->mii.supports_gmii ? SPEED_1000 : SPEED_100; 5553 5322 tp->duplex = DUPLEX_FULL; 5323 + 5324 + tp->rx_copybreak = RTL8152_RXFG_HEADSZ; 5325 + tp->rx_pending = 10 * RTL8152_MAX_RX; 5554 5326 5555 5327 intf->needs_remote_wakeup = 1; 5556 5328