Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'qdisc-RED-offload'

Jiri Pirko says:

====================
qdisc RED offload

Nogah says:

Add an offload support for RED qdisc for mlxsw driver.
The first patch adds the ability to offload RED qdisc by using
ndo_setup_tc. It gives RED three commands, to offload, change or delete
the qdisc, to get the qdisc generic stats and to get it's RED xstats.
There is no enforcement on a driver to offload or not offload the qdisc and
it is up to the driver to decide.
RED qdisc is first being created and only later graft to a parent (unless
it is a root qdisc). For that reason the return value of the offload
replace command that is called in the init process doesn't reflect actual
offload state. The offload state is determined in the dump function so it
can be reflected to the user. This function is also responsible for stats
update.

The patchses 2-3 change the name of TC_SETUP_MQPRIO & TC_SETUP_CBS to match
with the new convention of QDISC prefix.
The rest of the patchset is driver support for the qdisc. Currently only
as root qdisc that is being set on the default traffic class. It supports
only the following parameters of RED: min, max, probability and ECN mode.
Limit and burst size related params are being ignored at this moment.

---
v7->v8 internal: (external RFC->v1)
- patch 1/9:
- unite the offload and un-offload functions
- clean the OFFLOAD flag when the qdisc in not offloaded
- patch 2/9:
- minor change to avoid a conflict
- patch 5/9:
- check for bad min/max values
- clean the offloaded qdisc after a bad config call
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+690 -21
+1 -1
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
··· 2206 2206 struct tc_mqprio_qopt *mqprio = type_data; 2207 2207 u8 tc; 2208 2208 2209 - if (type != TC_SETUP_MQPRIO) 2209 + if (type != TC_SETUP_QDISC_MQPRIO) 2210 2210 return -EOPNOTSUPP; 2211 2211 2212 2212 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+1 -1
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
··· 4289 4289 { 4290 4290 struct tc_mqprio_qopt *mqprio = type_data; 4291 4291 4292 - if (type != TC_SETUP_MQPRIO) 4292 + if (type != TC_SETUP_QDISC_MQPRIO) 4293 4293 return -EOPNOTSUPP; 4294 4294 4295 4295 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+1 -1
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 7388 7388 switch (type) { 7389 7389 case TC_SETUP_BLOCK: 7390 7390 return bnxt_setup_tc_block(dev, type_data); 7391 - case TC_SETUP_MQPRIO: { 7391 + case TC_SETUP_QDISC_MQPRIO: { 7392 7392 struct tc_mqprio_qopt *mqprio = type_data; 7393 7393 7394 7394 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+1 -1
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
··· 351 351 u8 num_tc; 352 352 int i; 353 353 354 - if (type != TC_SETUP_MQPRIO) 354 + if (type != TC_SETUP_QDISC_MQPRIO) 355 355 return -EOPNOTSUPP; 356 356 357 357 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+1 -1
drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
··· 1252 1252 static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, 1253 1253 void *type_data) 1254 1254 { 1255 - if (type != TC_SETUP_MQPRIO) 1255 + if (type != TC_SETUP_QDISC_MQPRIO) 1256 1256 return -EOPNOTSUPP; 1257 1257 1258 1258 return hns3_setup_tc(dev, type_data);
+1 -1
drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
··· 1389 1389 { 1390 1390 struct tc_mqprio_qopt *mqprio = type_data; 1391 1391 1392 - if (type != TC_SETUP_MQPRIO) 1392 + if (type != TC_SETUP_QDISC_MQPRIO) 1393 1393 return -EOPNOTSUPP; 1394 1394 1395 1395 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+1 -1
drivers/net/ethernet/intel/i40e/i40e_main.c
··· 7550 7550 void *type_data) 7551 7551 { 7552 7552 switch (type) { 7553 - case TC_SETUP_MQPRIO: 7553 + case TC_SETUP_QDISC_MQPRIO: 7554 7554 return i40e_setup_tc(netdev, type_data); 7555 7555 case TC_SETUP_BLOCK: 7556 7556 return i40e_setup_tc_block(netdev, type_data);
+1 -1
drivers/net/ethernet/intel/igb/igb_main.c
··· 2488 2488 struct igb_adapter *adapter = netdev_priv(dev); 2489 2489 2490 2490 switch (type) { 2491 - case TC_SETUP_CBS: 2491 + case TC_SETUP_QDISC_CBS: 2492 2492 return igb_offload_cbs(adapter, type_data); 2493 2493 2494 2494 default:
+1 -1
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 9431 9431 switch (type) { 9432 9432 case TC_SETUP_BLOCK: 9433 9433 return ixgbe_setup_tc_block(dev, type_data); 9434 - case TC_SETUP_MQPRIO: 9434 + case TC_SETUP_QDISC_MQPRIO: 9435 9435 return ixgbe_setup_tc_mqprio(dev, type_data); 9436 9436 default: 9437 9437 return -EOPNOTSUPP;
+1 -1
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
··· 135 135 { 136 136 struct tc_mqprio_qopt *mqprio = type_data; 137 137 138 - if (type != TC_SETUP_MQPRIO) 138 + if (type != TC_SETUP_QDISC_MQPRIO) 139 139 return -EOPNOTSUPP; 140 140 141 141 if (mqprio->num_tc && mqprio->num_tc != MLX4_EN_NUM_UP_HIGH)
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 3146 3146 case TC_SETUP_BLOCK: 3147 3147 return mlx5e_setup_tc_block(dev, type_data); 3148 3148 #endif 3149 - case TC_SETUP_MQPRIO: 3149 + case TC_SETUP_QDISC_MQPRIO: 3150 3150 return mlx5e_setup_tc_mqprio(dev, type_data); 3151 3151 default: 3152 3152 return -EOPNOTSUPP;
+2 -1
drivers/net/ethernet/mellanox/mlxsw/Makefile
··· 19 19 spectrum_acl.o spectrum_flower.o \ 20 20 spectrum_cnt.o spectrum_fid.o \ 21 21 spectrum_ipip.o spectrum_acl_flex_actions.o \ 22 - spectrum_mr.o spectrum_mr_tcam.o 22 + spectrum_mr.o spectrum_mr_tcam.o \ 23 + spectrum_qdisc.o 23 24 mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o 24 25 mlxsw_spectrum-$(CONFIG_NET_DEVLINK) += spectrum_dpipe.o 25 26 obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o
+206
drivers/net/ethernet/mellanox/mlxsw/reg.h
··· 1758 1758 } 1759 1759 } 1760 1760 1761 + /* CWTP - Congetion WRED ECN TClass Profile 1762 + * ---------------------------------------- 1763 + * Configures the profiles for queues of egress port and traffic class 1764 + */ 1765 + #define MLXSW_REG_CWTP_ID 0x2802 1766 + #define MLXSW_REG_CWTP_BASE_LEN 0x28 1767 + #define MLXSW_REG_CWTP_PROFILE_DATA_REC_LEN 0x08 1768 + #define MLXSW_REG_CWTP_LEN 0x40 1769 + 1770 + MLXSW_REG_DEFINE(cwtp, MLXSW_REG_CWTP_ID, MLXSW_REG_CWTP_LEN); 1771 + 1772 + /* reg_cwtp_local_port 1773 + * Local port number 1774 + * Not supported for CPU port 1775 + * Access: Index 1776 + */ 1777 + MLXSW_ITEM32(reg, cwtp, local_port, 0, 16, 8); 1778 + 1779 + /* reg_cwtp_traffic_class 1780 + * Traffic Class to configure 1781 + * Access: Index 1782 + */ 1783 + MLXSW_ITEM32(reg, cwtp, traffic_class, 32, 0, 8); 1784 + 1785 + /* reg_cwtp_profile_min 1786 + * Minimum Average Queue Size of the profile in cells. 1787 + * Access: RW 1788 + */ 1789 + MLXSW_ITEM32_INDEXED(reg, cwtp, profile_min, MLXSW_REG_CWTP_BASE_LEN, 1790 + 0, 20, MLXSW_REG_CWTP_PROFILE_DATA_REC_LEN, 0, false); 1791 + 1792 + /* reg_cwtp_profile_percent 1793 + * Percentage of WRED and ECN marking for maximum Average Queue size 1794 + * Range is 0 to 100, units of integer percentage 1795 + * Access: RW 1796 + */ 1797 + MLXSW_ITEM32_INDEXED(reg, cwtp, profile_percent, MLXSW_REG_CWTP_BASE_LEN, 1798 + 24, 7, MLXSW_REG_CWTP_PROFILE_DATA_REC_LEN, 4, false); 1799 + 1800 + /* reg_cwtp_profile_max 1801 + * Maximum Average Queue size of the profile in cells 1802 + * Access: RW 1803 + */ 1804 + MLXSW_ITEM32_INDEXED(reg, cwtp, profile_max, MLXSW_REG_CWTP_BASE_LEN, 1805 + 0, 20, MLXSW_REG_CWTP_PROFILE_DATA_REC_LEN, 4, false); 1806 + 1807 + #define MLXSW_REG_CWTP_MIN_VALUE 64 1808 + #define MLXSW_REG_CWTP_MAX_PROFILE 2 1809 + #define MLXSW_REG_CWTP_DEFAULT_PROFILE 1 1810 + 1811 + static inline void mlxsw_reg_cwtp_pack(char *payload, u8 local_port, 1812 + u8 traffic_class) 1813 + { 1814 + int i; 1815 + 1816 + MLXSW_REG_ZERO(cwtp, payload); 1817 + mlxsw_reg_cwtp_local_port_set(payload, local_port); 1818 + mlxsw_reg_cwtp_traffic_class_set(payload, traffic_class); 1819 + 1820 + for (i = 0; i <= MLXSW_REG_CWTP_MAX_PROFILE; i++) { 1821 + mlxsw_reg_cwtp_profile_min_set(payload, i, 1822 + MLXSW_REG_CWTP_MIN_VALUE); 1823 + mlxsw_reg_cwtp_profile_max_set(payload, i, 1824 + MLXSW_REG_CWTP_MIN_VALUE); 1825 + } 1826 + } 1827 + 1828 + #define MLXSW_REG_CWTP_PROFILE_TO_INDEX(profile) (profile - 1) 1829 + 1830 + static inline void 1831 + mlxsw_reg_cwtp_profile_pack(char *payload, u8 profile, u32 min, u32 max, 1832 + u32 probability) 1833 + { 1834 + u8 index = MLXSW_REG_CWTP_PROFILE_TO_INDEX(profile); 1835 + 1836 + mlxsw_reg_cwtp_profile_min_set(payload, index, min); 1837 + mlxsw_reg_cwtp_profile_max_set(payload, index, max); 1838 + mlxsw_reg_cwtp_profile_percent_set(payload, index, probability); 1839 + } 1840 + 1841 + /* CWTPM - Congestion WRED ECN TClass and Pool Mapping 1842 + * --------------------------------------------------- 1843 + * The CWTPM register maps each egress port and traffic class to profile num. 1844 + */ 1845 + #define MLXSW_REG_CWTPM_ID 0x2803 1846 + #define MLXSW_REG_CWTPM_LEN 0x44 1847 + 1848 + MLXSW_REG_DEFINE(cwtpm, MLXSW_REG_CWTPM_ID, MLXSW_REG_CWTPM_LEN); 1849 + 1850 + /* reg_cwtpm_local_port 1851 + * Local port number 1852 + * Not supported for CPU port 1853 + * Access: Index 1854 + */ 1855 + MLXSW_ITEM32(reg, cwtpm, local_port, 0, 16, 8); 1856 + 1857 + /* reg_cwtpm_traffic_class 1858 + * Traffic Class to configure 1859 + * Access: Index 1860 + */ 1861 + MLXSW_ITEM32(reg, cwtpm, traffic_class, 32, 0, 8); 1862 + 1863 + /* reg_cwtpm_ew 1864 + * Control enablement of WRED for traffic class: 1865 + * 0 - Disable 1866 + * 1 - Enable 1867 + * Access: RW 1868 + */ 1869 + MLXSW_ITEM32(reg, cwtpm, ew, 36, 1, 1); 1870 + 1871 + /* reg_cwtpm_ee 1872 + * Control enablement of ECN for traffic class: 1873 + * 0 - Disable 1874 + * 1 - Enable 1875 + * Access: RW 1876 + */ 1877 + MLXSW_ITEM32(reg, cwtpm, ee, 36, 0, 1); 1878 + 1879 + /* reg_cwtpm_tcp_g 1880 + * TCP Green Profile. 1881 + * Index of the profile within {port, traffic class} to use. 1882 + * 0 for disabling both WRED and ECN for this type of traffic. 1883 + * Access: RW 1884 + */ 1885 + MLXSW_ITEM32(reg, cwtpm, tcp_g, 52, 0, 2); 1886 + 1887 + /* reg_cwtpm_tcp_y 1888 + * TCP Yellow Profile. 1889 + * Index of the profile within {port, traffic class} to use. 1890 + * 0 for disabling both WRED and ECN for this type of traffic. 1891 + * Access: RW 1892 + */ 1893 + MLXSW_ITEM32(reg, cwtpm, tcp_y, 56, 16, 2); 1894 + 1895 + /* reg_cwtpm_tcp_r 1896 + * TCP Red Profile. 1897 + * Index of the profile within {port, traffic class} to use. 1898 + * 0 for disabling both WRED and ECN for this type of traffic. 1899 + * Access: RW 1900 + */ 1901 + MLXSW_ITEM32(reg, cwtpm, tcp_r, 56, 0, 2); 1902 + 1903 + /* reg_cwtpm_ntcp_g 1904 + * Non-TCP Green Profile. 1905 + * Index of the profile within {port, traffic class} to use. 1906 + * 0 for disabling both WRED and ECN for this type of traffic. 1907 + * Access: RW 1908 + */ 1909 + MLXSW_ITEM32(reg, cwtpm, ntcp_g, 60, 0, 2); 1910 + 1911 + /* reg_cwtpm_ntcp_y 1912 + * Non-TCP Yellow Profile. 1913 + * Index of the profile within {port, traffic class} to use. 1914 + * 0 for disabling both WRED and ECN for this type of traffic. 1915 + * Access: RW 1916 + */ 1917 + MLXSW_ITEM32(reg, cwtpm, ntcp_y, 64, 16, 2); 1918 + 1919 + /* reg_cwtpm_ntcp_r 1920 + * Non-TCP Red Profile. 1921 + * Index of the profile within {port, traffic class} to use. 1922 + * 0 for disabling both WRED and ECN for this type of traffic. 1923 + * Access: RW 1924 + */ 1925 + MLXSW_ITEM32(reg, cwtpm, ntcp_r, 64, 0, 2); 1926 + 1927 + #define MLXSW_REG_CWTPM_RESET_PROFILE 0 1928 + 1929 + static inline void mlxsw_reg_cwtpm_pack(char *payload, u8 local_port, 1930 + u8 traffic_class, u8 profile, 1931 + bool wred, bool ecn) 1932 + { 1933 + MLXSW_REG_ZERO(cwtpm, payload); 1934 + mlxsw_reg_cwtpm_local_port_set(payload, local_port); 1935 + mlxsw_reg_cwtpm_traffic_class_set(payload, traffic_class); 1936 + mlxsw_reg_cwtpm_ew_set(payload, wred); 1937 + mlxsw_reg_cwtpm_ee_set(payload, ecn); 1938 + mlxsw_reg_cwtpm_tcp_g_set(payload, profile); 1939 + mlxsw_reg_cwtpm_tcp_y_set(payload, profile); 1940 + mlxsw_reg_cwtpm_tcp_r_set(payload, profile); 1941 + mlxsw_reg_cwtpm_ntcp_g_set(payload, profile); 1942 + mlxsw_reg_cwtpm_ntcp_y_set(payload, profile); 1943 + mlxsw_reg_cwtpm_ntcp_r_set(payload, profile); 1944 + } 1945 + 1761 1946 /* PPBT - Policy-Engine Port Binding Table 1762 1947 * --------------------------------------- 1763 1948 * This register is used for configuration of the Port Binding Table. ··· 3341 3156 3342 3157 enum mlxsw_reg_ppcnt_grp { 3343 3158 MLXSW_REG_PPCNT_IEEE_8023_CNT = 0x0, 3159 + MLXSW_REG_PPCNT_EXT_CNT = 0x5, 3344 3160 MLXSW_REG_PPCNT_PRIO_CNT = 0x10, 3345 3161 MLXSW_REG_PPCNT_TC_CNT = 0x11, 3162 + MLXSW_REG_PPCNT_TC_CONG_TC = 0x13, 3346 3163 }; 3347 3164 3348 3165 /* reg_ppcnt_grp ··· 3360 3173 * 0x10: Per Priority Counters 3361 3174 * 0x11: Per Traffic Class Counters 3362 3175 * 0x12: Physical Layer Counters 3176 + * 0x13: Per Traffic Class Congestion Counters 3363 3177 * Access: Index 3364 3178 */ 3365 3179 MLXSW_ITEM32(reg, ppcnt, grp, 0x00, 0, 6); ··· 3499 3311 MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted, 3500 3312 MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x90, 0, 64); 3501 3313 3314 + /* Ethernet Extended Counter Group Counters */ 3315 + 3316 + /* reg_ppcnt_ecn_marked 3317 + * Access: RO 3318 + */ 3319 + MLXSW_ITEM64(reg, ppcnt, ecn_marked, 3320 + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64); 3321 + 3502 3322 /* Ethernet Per Priority Group Counters */ 3503 3323 3504 3324 /* reg_ppcnt_rx_octets ··· 3581 3385 */ 3582 3386 MLXSW_ITEM64(reg, ppcnt, tc_no_buffer_discard_uc, 3583 3387 MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64); 3388 + 3389 + /* Ethernet Per Traffic Class Congestion Group Counters */ 3390 + 3391 + /* reg_ppcnt_wred_discard 3392 + * Access: RO 3393 + */ 3394 + MLXSW_ITEM64(reg, ppcnt, wred_discard, 3395 + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x00, 0, 64); 3584 3396 3585 3397 static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port, 3586 3398 enum mlxsw_reg_ppcnt_grp grp, ··· 7609 7405 MLXSW_REG(svpe), 7610 7406 MLXSW_REG(sfmr), 7611 7407 MLXSW_REG(spvmlr), 7408 + MLXSW_REG(cwtp), 7409 + MLXSW_REG(cwtpm), 7612 7410 MLXSW_REG(ppbt), 7613 7411 MLXSW_REG(pacl), 7614 7412 MLXSW_REG(pagt),
+36
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
··· 1324 1324 return err; 1325 1325 } 1326 1326 1327 + static void 1328 + mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 1329 + struct mlxsw_sp_port_xstats *xstats) 1330 + { 1331 + char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1332 + int err, i; 1333 + 1334 + err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 1335 + ppcnt_pl); 1336 + if (!err) 1337 + xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 1338 + 1339 + for (i = 0; i < TC_MAX_QUEUE; i++) { 1340 + err = mlxsw_sp_port_get_stats_raw(dev, 1341 + MLXSW_REG_PPCNT_TC_CONG_TC, 1342 + i, ppcnt_pl); 1343 + if (!err) 1344 + xstats->wred_drop[i] = 1345 + mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 1346 + 1347 + err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 1348 + i, ppcnt_pl); 1349 + if (err) 1350 + continue; 1351 + 1352 + xstats->backlog[i] = 1353 + mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 1354 + xstats->tail_drop[i] = 1355 + mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 1356 + } 1357 + } 1358 + 1327 1359 static void update_stats_cache(struct work_struct *work) 1328 1360 { 1329 1361 struct mlxsw_sp_port *mlxsw_sp_port = ··· 1367 1335 1368 1336 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 1369 1337 &mlxsw_sp_port->periodic_hw_stats.stats); 1338 + mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 1339 + &mlxsw_sp_port->periodic_hw_stats.xstats); 1370 1340 1371 1341 out: 1372 1342 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, ··· 1831 1797 switch (type) { 1832 1798 case TC_SETUP_BLOCK: 1833 1799 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1800 + case TC_SETUP_QDISC_RED: 1801 + return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1834 1802 default: 1835 1803 return -EOPNOTSUPP; 1836 1804 }
+38
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
··· 48 48 #include <linux/notifier.h> 49 49 #include <net/psample.h> 50 50 #include <net/pkt_cls.h> 51 + #include <net/red.h> 51 52 52 53 #include "port.h" 53 54 #include "core.h" ··· 204 203 struct list_head bridge_vlan_node; 205 204 }; 206 205 206 + enum mlxsw_sp_qdisc_type { 207 + MLXSW_SP_QDISC_NO_QDISC, 208 + MLXSW_SP_QDISC_RED, 209 + }; 210 + 211 + struct mlxsw_sp_qdisc { 212 + u32 handle; 213 + enum mlxsw_sp_qdisc_type type; 214 + struct red_stats xstats_base; 215 + union { 216 + struct { 217 + u64 tail_drop_base; 218 + u64 ecn_base; 219 + u64 wred_drop_base; 220 + } red; 221 + } xstats; 222 + 223 + u64 tx_bytes; 224 + u64 tx_packets; 225 + u64 drops; 226 + u64 overlimits; 227 + }; 228 + 229 + /* No need an internal lock; At worse - miss a single periodic iteration */ 230 + struct mlxsw_sp_port_xstats { 231 + u64 ecn; 232 + u64 wred_drop[TC_MAX_QUEUE]; 233 + u64 tail_drop[TC_MAX_QUEUE]; 234 + u64 backlog[TC_MAX_QUEUE]; 235 + }; 236 + 207 237 struct mlxsw_sp_port { 208 238 struct net_device *dev; 209 239 struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats; ··· 264 232 struct { 265 233 #define MLXSW_HW_STATS_UPDATE_TIME HZ 266 234 struct rtnl_link_stats64 stats; 235 + struct mlxsw_sp_port_xstats xstats; 267 236 struct delayed_work update_dw; 268 237 } periodic_hw_stats; 269 238 struct mlxsw_sp_port_sample *sample; 270 239 struct list_head vlans_list; 240 + struct mlxsw_sp_qdisc root_qdisc; 271 241 }; 272 242 273 243 static inline bool ··· 579 545 struct tc_cls_flower_offload *f); 580 546 int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, 581 547 struct tc_cls_flower_offload *f); 548 + 549 + /* spectrum_qdisc.c */ 550 + int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port, 551 + struct tc_red_qopt_offload *p); 582 552 583 553 /* spectrum_fid.c */ 584 554 int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
+276
drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
··· 1 + /* 2 + * drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 3 + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. 4 + * Copyright (c) 2017 Nogah Frankel <nogahf@mellanox.com> 5 + * 6 + * Redistribution and use in source and binary forms, with or without 7 + * modification, are permitted provided that the following conditions are met: 8 + * 9 + * 1. Redistributions of source code must retain the above copyright 10 + * notice, this list of conditions and the following disclaimer. 11 + * 2. Redistributions in binary form must reproduce the above copyright 12 + * notice, this list of conditions and the following disclaimer in the 13 + * documentation and/or other materials provided with the distribution. 14 + * 3. Neither the names of the copyright holders nor the names of its 15 + * contributors may be used to endorse or promote products derived from 16 + * this software without specific prior written permission. 17 + * 18 + * Alternatively, this software may be distributed under the terms of the 19 + * GNU General Public License ("GPL") version 2 as published by the Free 20 + * Software Foundation. 21 + * 22 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 + * POSSIBILITY OF SUCH DAMAGE. 33 + */ 34 + 35 + #include <linux/kernel.h> 36 + #include <linux/errno.h> 37 + #include <linux/netdevice.h> 38 + #include <net/pkt_cls.h> 39 + #include <net/red.h> 40 + 41 + #include "spectrum.h" 42 + #include "reg.h" 43 + 44 + static int 45 + mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port, 46 + int tclass_num, u32 min, u32 max, 47 + u32 probability, bool is_ecn) 48 + { 49 + char cwtp_cmd[max_t(u8, MLXSW_REG_CWTP_LEN, MLXSW_REG_CWTPM_LEN)]; 50 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 51 + int err; 52 + 53 + mlxsw_reg_cwtp_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num); 54 + mlxsw_reg_cwtp_profile_pack(cwtp_cmd, MLXSW_REG_CWTP_DEFAULT_PROFILE, 55 + roundup(min, MLXSW_REG_CWTP_MIN_VALUE), 56 + roundup(max, MLXSW_REG_CWTP_MIN_VALUE), 57 + probability); 58 + 59 + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtp), cwtp_cmd); 60 + if (err) 61 + return err; 62 + 63 + mlxsw_reg_cwtpm_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num, 64 + MLXSW_REG_CWTP_DEFAULT_PROFILE, true, is_ecn); 65 + 66 + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtp_cmd); 67 + } 68 + 69 + static int 70 + mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port *mlxsw_sp_port, 71 + int tclass_num) 72 + { 73 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 74 + char cwtpm_cmd[MLXSW_REG_CWTPM_LEN]; 75 + 76 + mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num, 77 + MLXSW_REG_CWTPM_RESET_PROFILE, false, false); 78 + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd); 79 + } 80 + 81 + static void 82 + mlxsw_sp_setup_tc_qdisc_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port, 83 + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 84 + int tclass_num) 85 + { 86 + struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base; 87 + struct mlxsw_sp_port_xstats *xstats; 88 + struct rtnl_link_stats64 *stats; 89 + 90 + xstats = &mlxsw_sp_port->periodic_hw_stats.xstats; 91 + stats = &mlxsw_sp_port->periodic_hw_stats.stats; 92 + 93 + mlxsw_sp_qdisc->tx_packets = stats->tx_packets; 94 + mlxsw_sp_qdisc->tx_bytes = stats->tx_bytes; 95 + 96 + switch (mlxsw_sp_qdisc->type) { 97 + case MLXSW_SP_QDISC_RED: 98 + xstats_base->prob_mark = xstats->ecn; 99 + xstats_base->prob_drop = xstats->wred_drop[tclass_num]; 100 + xstats_base->pdrop = xstats->tail_drop[tclass_num]; 101 + 102 + mlxsw_sp_qdisc->overlimits = xstats_base->prob_drop + 103 + xstats_base->prob_mark; 104 + mlxsw_sp_qdisc->drops = xstats_base->prob_drop + 105 + xstats_base->pdrop; 106 + break; 107 + default: 108 + break; 109 + } 110 + } 111 + 112 + static int 113 + mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, 114 + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 115 + int tclass_num) 116 + { 117 + int err; 118 + 119 + if (mlxsw_sp_qdisc->handle != handle) 120 + return 0; 121 + 122 + err = mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port, tclass_num); 123 + mlxsw_sp_qdisc->handle = TC_H_UNSPEC; 124 + mlxsw_sp_qdisc->type = MLXSW_SP_QDISC_NO_QDISC; 125 + 126 + return err; 127 + } 128 + 129 + static int 130 + mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, 131 + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 132 + int tclass_num, 133 + struct tc_red_qopt_offload_params *p) 134 + { 135 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 136 + u32 min, max; 137 + u64 prob; 138 + int err = 0; 139 + 140 + if (p->min > p->max) { 141 + dev_err(mlxsw_sp->bus_info->dev, 142 + "spectrum: RED: min %u is bigger then max %u\n", p->min, 143 + p->max); 144 + goto err_bad_param; 145 + } 146 + if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) { 147 + dev_err(mlxsw_sp->bus_info->dev, 148 + "spectrum: RED: max value %u is too big\n", p->max); 149 + goto err_bad_param; 150 + } 151 + if (p->min == 0 || p->max == 0) { 152 + dev_err(mlxsw_sp->bus_info->dev, 153 + "spectrum: RED: 0 value is illegal for min and max\n"); 154 + goto err_bad_param; 155 + } 156 + 157 + /* calculate probability in percentage */ 158 + prob = p->probability; 159 + prob *= 100; 160 + prob = DIV_ROUND_UP(prob, 1 << 16); 161 + prob = DIV_ROUND_UP(prob, 1 << 16); 162 + min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min); 163 + max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max); 164 + err = mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num, min, 165 + max, prob, p->is_ecn); 166 + if (err) 167 + goto err_config; 168 + 169 + mlxsw_sp_qdisc->type = MLXSW_SP_QDISC_RED; 170 + if (mlxsw_sp_qdisc->handle != handle) 171 + mlxsw_sp_setup_tc_qdisc_clean_stats(mlxsw_sp_port, 172 + mlxsw_sp_qdisc, 173 + tclass_num); 174 + 175 + mlxsw_sp_qdisc->handle = handle; 176 + return 0; 177 + 178 + err_bad_param: 179 + err = -EINVAL; 180 + err_config: 181 + mlxsw_sp_qdisc_red_destroy(mlxsw_sp_port, mlxsw_sp_qdisc->handle, 182 + mlxsw_sp_qdisc, tclass_num); 183 + return err; 184 + } 185 + 186 + static int 187 + mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, 188 + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 189 + int tclass_num, struct red_stats *res) 190 + { 191 + struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base; 192 + struct mlxsw_sp_port_xstats *xstats; 193 + 194 + if (mlxsw_sp_qdisc->handle != handle || 195 + mlxsw_sp_qdisc->type != MLXSW_SP_QDISC_RED) 196 + return -EOPNOTSUPP; 197 + 198 + xstats = &mlxsw_sp_port->periodic_hw_stats.xstats; 199 + 200 + res->prob_drop = xstats->wred_drop[tclass_num] - xstats_base->prob_drop; 201 + res->prob_mark = xstats->ecn - xstats_base->prob_mark; 202 + res->pdrop = xstats->tail_drop[tclass_num] - xstats_base->pdrop; 203 + return 0; 204 + } 205 + 206 + static int 207 + mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, 208 + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 209 + int tclass_num, 210 + struct tc_red_qopt_offload_stats *res) 211 + { 212 + u64 tx_bytes, tx_packets, overlimits, drops; 213 + struct mlxsw_sp_port_xstats *xstats; 214 + struct rtnl_link_stats64 *stats; 215 + 216 + if (mlxsw_sp_qdisc->handle != handle || 217 + mlxsw_sp_qdisc->type != MLXSW_SP_QDISC_RED) 218 + return -EOPNOTSUPP; 219 + 220 + xstats = &mlxsw_sp_port->periodic_hw_stats.xstats; 221 + stats = &mlxsw_sp_port->periodic_hw_stats.stats; 222 + 223 + tx_bytes = stats->tx_bytes - mlxsw_sp_qdisc->tx_bytes; 224 + tx_packets = stats->tx_packets - mlxsw_sp_qdisc->tx_packets; 225 + overlimits = xstats->wred_drop[tclass_num] + xstats->ecn - 226 + mlxsw_sp_qdisc->overlimits; 227 + drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] - 228 + mlxsw_sp_qdisc->drops; 229 + 230 + _bstats_update(res->bstats, tx_bytes, tx_packets); 231 + res->qstats->overlimits += overlimits; 232 + res->qstats->drops += drops; 233 + res->qstats->backlog += mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp, 234 + xstats->backlog[tclass_num]); 235 + 236 + mlxsw_sp_qdisc->drops += drops; 237 + mlxsw_sp_qdisc->overlimits += overlimits; 238 + mlxsw_sp_qdisc->tx_bytes += tx_bytes; 239 + mlxsw_sp_qdisc->tx_packets += tx_packets; 240 + return 0; 241 + } 242 + 243 + #define MLXSW_SP_PORT_DEFAULT_TCLASS 0 244 + 245 + int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port, 246 + struct tc_red_qopt_offload *p) 247 + { 248 + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc; 249 + int tclass_num; 250 + 251 + if (p->parent != TC_H_ROOT) 252 + return -EOPNOTSUPP; 253 + 254 + mlxsw_sp_qdisc = &mlxsw_sp_port->root_qdisc; 255 + tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS; 256 + 257 + switch (p->command) { 258 + case TC_RED_REPLACE: 259 + return mlxsw_sp_qdisc_red_replace(mlxsw_sp_port, p->handle, 260 + mlxsw_sp_qdisc, tclass_num, 261 + &p->set); 262 + case TC_RED_DESTROY: 263 + return mlxsw_sp_qdisc_red_destroy(mlxsw_sp_port, p->handle, 264 + mlxsw_sp_qdisc, tclass_num); 265 + case TC_RED_XSTATS: 266 + return mlxsw_sp_qdisc_get_red_xstats(mlxsw_sp_port, p->handle, 267 + mlxsw_sp_qdisc, tclass_num, 268 + p->xstats); 269 + case TC_RED_STATS: 270 + return mlxsw_sp_qdisc_get_red_stats(mlxsw_sp_port, p->handle, 271 + mlxsw_sp_qdisc, tclass_num, 272 + &p->stats); 273 + default: 274 + return -EOPNOTSUPP; 275 + } 276 + }
+1 -1
drivers/net/ethernet/sfc/falcon/tx.c
··· 435 435 unsigned tc, num_tc; 436 436 int rc; 437 437 438 - if (type != TC_SETUP_MQPRIO) 438 + if (type != TC_SETUP_QDISC_MQPRIO) 439 439 return -EOPNOTSUPP; 440 440 441 441 num_tc = mqprio->num_tc;
+1 -1
drivers/net/ethernet/sfc/tx.c
··· 663 663 unsigned tc, num_tc; 664 664 int rc; 665 665 666 - if (type != TC_SETUP_MQPRIO) 666 + if (type != TC_SETUP_QDISC_MQPRIO) 667 667 return -EOPNOTSUPP; 668 668 669 669 num_tc = mqprio->num_tc;
+1 -1
drivers/net/ethernet/ti/netcp_core.c
··· 1887 1887 /* setup tc must be called under rtnl lock */ 1888 1888 ASSERT_RTNL(); 1889 1889 1890 - if (type != TC_SETUP_MQPRIO) 1890 + if (type != TC_SETUP_QDISC_MQPRIO) 1891 1891 return -EOPNOTSUPP; 1892 1892 1893 1893 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+3 -2
include/linux/netdevice.h
··· 770 770 struct sk_buff *skb); 771 771 772 772 enum tc_setup_type { 773 - TC_SETUP_MQPRIO, 773 + TC_SETUP_QDISC_MQPRIO, 774 774 TC_SETUP_CLSU32, 775 775 TC_SETUP_CLSFLOWER, 776 776 TC_SETUP_CLSMATCHALL, 777 777 TC_SETUP_CLSBPF, 778 778 TC_SETUP_BLOCK, 779 - TC_SETUP_CBS, 779 + TC_SETUP_QDISC_CBS, 780 + TC_SETUP_QDISC_RED, 780 781 }; 781 782 782 783 /* These structures hold the attributes of bpf state that are being passed
+30
include/net/pkt_cls.h
··· 703 703 u8 *data; 704 704 u32 len; 705 705 }; 706 + 707 + enum tc_red_command { 708 + TC_RED_REPLACE, 709 + TC_RED_DESTROY, 710 + TC_RED_STATS, 711 + TC_RED_XSTATS, 712 + }; 713 + 714 + struct tc_red_qopt_offload_params { 715 + u32 min; 716 + u32 max; 717 + u32 probability; 718 + bool is_ecn; 719 + }; 720 + struct tc_red_qopt_offload_stats { 721 + struct gnet_stats_basic_packed *bstats; 722 + struct gnet_stats_queue *qstats; 723 + }; 724 + 725 + struct tc_red_qopt_offload { 726 + enum tc_red_command command; 727 + u32 handle; 728 + u32 parent; 729 + union { 730 + struct tc_red_qopt_offload_params set; 731 + struct tc_red_qopt_offload_stats stats; 732 + struct red_stats *xstats; 733 + }; 734 + }; 735 + 706 736 #endif
+1
include/uapi/linux/pkt_sched.h
··· 256 256 #define TC_RED_ECN 1 257 257 #define TC_RED_HARDDROP 2 258 258 #define TC_RED_ADAPTATIVE 4 259 + #define TC_RED_OFFLOADED 8 259 260 }; 260 261 261 262 struct tc_red_xstats {
+2 -2
net/sched/sch_cbs.c
··· 212 212 cbs.queue = q->queue; 213 213 cbs.enable = 0; 214 214 215 - err = ops->ndo_setup_tc(dev, TC_SETUP_CBS, &cbs); 215 + err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_CBS, &cbs); 216 216 if (err < 0) 217 217 pr_warn("Couldn't disable CBS offload for queue %d\n", 218 218 cbs.queue); ··· 236 236 cbs.idleslope = opt->idleslope; 237 237 cbs.sendslope = opt->sendslope; 238 238 239 - err = ops->ndo_setup_tc(dev, TC_SETUP_CBS, &cbs); 239 + err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_CBS, &cbs); 240 240 if (err < 0) 241 241 return err; 242 242
+3 -2
net/sched/sch_mqprio.c
··· 50 50 switch (priv->mode) { 51 51 case TC_MQPRIO_MODE_DCB: 52 52 case TC_MQPRIO_MODE_CHANNEL: 53 - dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_MQPRIO, 53 + dev->netdev_ops->ndo_setup_tc(dev, 54 + TC_SETUP_QDISC_MQPRIO, 54 55 &mqprio); 55 56 break; 56 57 default: ··· 266 265 return -EINVAL; 267 266 } 268 267 err = dev->netdev_ops->ndo_setup_tc(dev, 269 - TC_SETUP_MQPRIO, 268 + TC_SETUP_QDISC_MQPRIO, 270 269 &mqprio); 271 270 if (err) 272 271 return err;
+79
net/sched/sch_red.c
··· 19 19 #include <linux/kernel.h> 20 20 #include <linux/skbuff.h> 21 21 #include <net/pkt_sched.h> 22 + #include <net/pkt_cls.h> 22 23 #include <net/inet_ecn.h> 23 24 #include <net/red.h> 24 25 ··· 149 148 red_restart(&q->vars); 150 149 } 151 150 151 + static int red_offload(struct Qdisc *sch, bool enable) 152 + { 153 + struct red_sched_data *q = qdisc_priv(sch); 154 + struct net_device *dev = qdisc_dev(sch); 155 + struct tc_red_qopt_offload opt = { 156 + .handle = sch->handle, 157 + .parent = sch->parent, 158 + }; 159 + 160 + if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) 161 + return -EOPNOTSUPP; 162 + 163 + if (enable) { 164 + opt.command = TC_RED_REPLACE; 165 + opt.set.min = q->parms.qth_min >> q->parms.Wlog; 166 + opt.set.max = q->parms.qth_max >> q->parms.Wlog; 167 + opt.set.probability = q->parms.max_P; 168 + opt.set.is_ecn = red_use_ecn(q); 169 + } else { 170 + opt.command = TC_RED_DESTROY; 171 + } 172 + 173 + return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt); 174 + } 175 + 152 176 static void red_destroy(struct Qdisc *sch) 153 177 { 154 178 struct red_sched_data *q = qdisc_priv(sch); 155 179 156 180 del_timer_sync(&q->adapt_timer); 181 + red_offload(sch, false); 157 182 qdisc_destroy(q->qdisc); 158 183 } 159 184 ··· 246 219 red_start_of_idle_period(&q->vars); 247 220 248 221 sch_tree_unlock(sch); 222 + red_offload(sch, true); 249 223 return 0; 250 224 } 251 225 ··· 272 244 return red_change(sch, opt); 273 245 } 274 246 247 + static int red_dump_offload(struct Qdisc *sch, struct tc_red_qopt *opt) 248 + { 249 + struct net_device *dev = qdisc_dev(sch); 250 + struct tc_red_qopt_offload hw_stats = { 251 + .handle = sch->handle, 252 + .parent = sch->parent, 253 + .command = TC_RED_STATS, 254 + .stats.bstats = &sch->bstats, 255 + .stats.qstats = &sch->qstats, 256 + }; 257 + int err; 258 + 259 + opt->flags &= ~TC_RED_OFFLOADED; 260 + if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) 261 + return 0; 262 + 263 + err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, 264 + &hw_stats); 265 + if (err == -EOPNOTSUPP) 266 + return 0; 267 + 268 + if (!err) 269 + opt->flags |= TC_RED_OFFLOADED; 270 + 271 + return err; 272 + } 273 + 275 274 static int red_dump(struct Qdisc *sch, struct sk_buff *skb) 276 275 { 277 276 struct red_sched_data *q = qdisc_priv(sch); ··· 312 257 .Plog = q->parms.Plog, 313 258 .Scell_log = q->parms.Scell_log, 314 259 }; 260 + int err; 315 261 316 262 sch->qstats.backlog = q->qdisc->qstats.backlog; 263 + err = red_dump_offload(sch, &opt); 264 + if (err) 265 + goto nla_put_failure; 266 + 317 267 opts = nla_nest_start(skb, TCA_OPTIONS); 318 268 if (opts == NULL) 319 269 goto nla_put_failure; ··· 335 275 static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d) 336 276 { 337 277 struct red_sched_data *q = qdisc_priv(sch); 278 + struct net_device *dev = qdisc_dev(sch); 338 279 struct tc_red_xstats st = { 339 280 .early = q->stats.prob_drop + q->stats.forced_drop, 340 281 .pdrop = q->stats.pdrop, 341 282 .other = q->stats.other, 342 283 .marked = q->stats.prob_mark + q->stats.forced_mark, 343 284 }; 285 + 286 + if (tc_can_offload(dev) && dev->netdev_ops->ndo_setup_tc) { 287 + struct red_stats hw_stats = {0}; 288 + struct tc_red_qopt_offload hw_stats_request = { 289 + .handle = sch->handle, 290 + .parent = sch->parent, 291 + .command = TC_RED_XSTATS, 292 + .xstats = &hw_stats, 293 + }; 294 + if (!dev->netdev_ops->ndo_setup_tc(dev, 295 + TC_SETUP_QDISC_RED, 296 + &hw_stats_request)) { 297 + st.early += hw_stats.prob_drop + hw_stats.forced_drop; 298 + st.pdrop += hw_stats.pdrop; 299 + st.other += hw_stats.other; 300 + st.marked += hw_stats.prob_mark + hw_stats.forced_mark; 301 + } 302 + } 344 303 345 304 return gnet_stats_copy_app(d, &st, sizeof(st)); 346 305 }