Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'mlxsw-Add-support-for-buffer-drops-mirroring'

Petr Machata says:

====================
mlxsw: Add support for buffer drops mirroring

This set offloads the recently introduced qevent infrastructure in TC and
allows mlxsw to support mirroring of packets that were dropped due to
buffer related reasons (e.g., early drops) during forwarding.

Up until now mlxsw only supported mirroring that was either triggered by
per-port triggers (i.e., via matchall) or by the policy engine (i.e.,
via flower). Packets that are dropped due to buffer related reasons are
mirrored using a third type of trigger, a global trigger.

Global triggers are bound once to a mirroring (SPAN) agent and enabled
on a per-{port, TC} basis. This allows users, for example, to request
that only packets that were early dropped on a specific netdev to be
mirrored.

Patch set overview:

Patch #1 extends flow_block_offload and indirect offload structure to pass
a scheduler instead of a netdevice. That is necessary, because binding type
and netdevice are not a unique identifier of the block anymore.

Patches #2-#3 add the required registers to support above mentioned
functionality.

Patches #4-#6 gradually add support for global mirroring triggers.

Patch #7 adds support for enablement of global mirroring triggers.

Patches #8-#11 are cleanups in the flow offload code and shuffle some
code around to make the qevent offload easier.

Patch #12 implements offload of RED early_drop qevent.

Patch #13 extends the RED selftest for offloaded datapath to cover
early_drop qevent.

v2:
- Patch #1:
- In struct flow_block_indr, track both sch and dev.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+1179 -142
+4 -5
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
··· 1888 1888 kfree(priv); 1889 1889 } 1890 1890 1891 - static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp, 1891 + static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct Qdisc *sch, struct bnxt *bp, 1892 1892 struct flow_block_offload *f, void *data, 1893 1893 void (*cleanup)(struct flow_block_cb *block_cb)) 1894 1894 { ··· 1911 1911 block_cb = flow_indr_block_cb_alloc(bnxt_tc_setup_indr_block_cb, 1912 1912 cb_priv, cb_priv, 1913 1913 bnxt_tc_setup_indr_rel, f, 1914 - netdev, data, bp, cleanup); 1914 + netdev, sch, data, bp, cleanup); 1915 1915 if (IS_ERR(block_cb)) { 1916 1916 list_del(&cb_priv->list); 1917 1917 kfree(cb_priv); ··· 1946 1946 return netif_is_vxlan(netdev); 1947 1947 } 1948 1948 1949 - static int bnxt_tc_setup_indr_cb(struct net_device *netdev, void *cb_priv, 1949 + static int bnxt_tc_setup_indr_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv, 1950 1950 enum tc_setup_type type, void *type_data, 1951 1951 void *data, 1952 1952 void (*cleanup)(struct flow_block_cb *block_cb)) ··· 1956 1956 1957 1957 switch (type) { 1958 1958 case TC_SETUP_BLOCK: 1959 - return bnxt_tc_setup_indr_block(netdev, cb_priv, type_data, data, 1960 - cleanup); 1959 + return bnxt_tc_setup_indr_block(netdev, sch, cb_priv, type_data, data, cleanup); 1961 1960 default: 1962 1961 break; 1963 1962 }
+5 -5
drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
··· 404 404 static LIST_HEAD(mlx5e_block_cb_list); 405 405 406 406 static int 407 - mlx5e_rep_indr_setup_block(struct net_device *netdev, 407 + mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch, 408 408 struct mlx5e_rep_priv *rpriv, 409 409 struct flow_block_offload *f, 410 410 flow_setup_cb_t *setup_cb, ··· 442 442 443 443 block_cb = flow_indr_block_cb_alloc(setup_cb, indr_priv, indr_priv, 444 444 mlx5e_rep_indr_block_unbind, 445 - f, netdev, data, rpriv, 445 + f, netdev, sch, data, rpriv, 446 446 cleanup); 447 447 if (IS_ERR(block_cb)) { 448 448 list_del(&indr_priv->list); ··· 472 472 } 473 473 474 474 static 475 - int mlx5e_rep_indr_setup_cb(struct net_device *netdev, void *cb_priv, 475 + int mlx5e_rep_indr_setup_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv, 476 476 enum tc_setup_type type, void *type_data, 477 477 void *data, 478 478 void (*cleanup)(struct flow_block_cb *block_cb)) 479 479 { 480 480 switch (type) { 481 481 case TC_SETUP_BLOCK: 482 - return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data, 482 + return mlx5e_rep_indr_setup_block(netdev, sch, cb_priv, type_data, 483 483 mlx5e_rep_indr_setup_tc_cb, 484 484 data, cleanup); 485 485 case TC_SETUP_FT: 486 - return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data, 486 + return mlx5e_rep_indr_setup_block(netdev, sch, cb_priv, type_data, 487 487 mlx5e_rep_indr_setup_ft_cb, 488 488 data, cleanup); 489 489 default:
+102
drivers/net/ethernet/mellanox/mlxsw/reg.h
··· 9502 9502 */ 9503 9503 MLXSW_ITEM32(reg, mogcr, ptp_eftc, 0x00, 0, 1); 9504 9504 9505 + /* MPAGR - Monitoring Port Analyzer Global Register 9506 + * ------------------------------------------------ 9507 + * This register is used for global port analyzer configurations. 9508 + * Note: This register is not supported by current FW versions for Spectrum-1. 9509 + */ 9510 + #define MLXSW_REG_MPAGR_ID 0x9089 9511 + #define MLXSW_REG_MPAGR_LEN 0x0C 9512 + 9513 + MLXSW_REG_DEFINE(mpagr, MLXSW_REG_MPAGR_ID, MLXSW_REG_MPAGR_LEN); 9514 + 9515 + enum mlxsw_reg_mpagr_trigger { 9516 + MLXSW_REG_MPAGR_TRIGGER_EGRESS, 9517 + MLXSW_REG_MPAGR_TRIGGER_INGRESS, 9518 + MLXSW_REG_MPAGR_TRIGGER_INGRESS_WRED, 9519 + MLXSW_REG_MPAGR_TRIGGER_INGRESS_SHARED_BUFFER, 9520 + MLXSW_REG_MPAGR_TRIGGER_INGRESS_ING_CONG, 9521 + MLXSW_REG_MPAGR_TRIGGER_INGRESS_EGR_CONG, 9522 + MLXSW_REG_MPAGR_TRIGGER_EGRESS_ECN, 9523 + MLXSW_REG_MPAGR_TRIGGER_EGRESS_HIGH_LATENCY, 9524 + }; 9525 + 9526 + /* reg_mpagr_trigger 9527 + * Mirror trigger. 9528 + * Access: Index 9529 + */ 9530 + MLXSW_ITEM32(reg, mpagr, trigger, 0x00, 0, 4); 9531 + 9532 + /* reg_mpagr_pa_id 9533 + * Port analyzer ID. 9534 + * Access: RW 9535 + */ 9536 + MLXSW_ITEM32(reg, mpagr, pa_id, 0x04, 0, 4); 9537 + 9538 + /* reg_mpagr_probability_rate 9539 + * Sampling rate. 9540 + * Valid values are: 1 to 3.5*10^9 9541 + * Value of 1 means "sample all". Default is 1. 9542 + * Access: RW 9543 + */ 9544 + MLXSW_ITEM32(reg, mpagr, probability_rate, 0x08, 0, 32); 9545 + 9546 + static inline void mlxsw_reg_mpagr_pack(char *payload, 9547 + enum mlxsw_reg_mpagr_trigger trigger, 9548 + u8 pa_id, u32 probability_rate) 9549 + { 9550 + MLXSW_REG_ZERO(mpagr, payload); 9551 + mlxsw_reg_mpagr_trigger_set(payload, trigger); 9552 + mlxsw_reg_mpagr_pa_id_set(payload, pa_id); 9553 + mlxsw_reg_mpagr_probability_rate_set(payload, probability_rate); 9554 + } 9555 + 9556 + /* MOMTE - Monitoring Mirror Trigger Enable Register 9557 + * ------------------------------------------------- 9558 + * This register is used to configure the mirror enable for different mirror 9559 + * reasons. 9560 + */ 9561 + #define MLXSW_REG_MOMTE_ID 0x908D 9562 + #define MLXSW_REG_MOMTE_LEN 0x10 9563 + 9564 + MLXSW_REG_DEFINE(momte, MLXSW_REG_MOMTE_ID, MLXSW_REG_MOMTE_LEN); 9565 + 9566 + /* reg_momte_local_port 9567 + * Local port number. 9568 + * Access: Index 9569 + */ 9570 + MLXSW_ITEM32(reg, momte, local_port, 0x00, 16, 8); 9571 + 9572 + enum mlxsw_reg_momte_type { 9573 + MLXSW_REG_MOMTE_TYPE_WRED = 0x20, 9574 + MLXSW_REG_MOMTE_TYPE_SHARED_BUFFER_TCLASS = 0x31, 9575 + MLXSW_REG_MOMTE_TYPE_SHARED_BUFFER_TCLASS_DESCRIPTORS = 0x32, 9576 + MLXSW_REG_MOMTE_TYPE_SHARED_BUFFER_EGRESS_PORT = 0x33, 9577 + MLXSW_REG_MOMTE_TYPE_ING_CONG = 0x40, 9578 + MLXSW_REG_MOMTE_TYPE_EGR_CONG = 0x50, 9579 + MLXSW_REG_MOMTE_TYPE_ECN = 0x60, 9580 + MLXSW_REG_MOMTE_TYPE_HIGH_LATENCY = 0x70, 9581 + }; 9582 + 9583 + /* reg_momte_type 9584 + * Type of mirroring. 9585 + * Access: Index 9586 + */ 9587 + MLXSW_ITEM32(reg, momte, type, 0x04, 0, 8); 9588 + 9589 + /* reg_momte_tclass_en 9590 + * TClass/PG mirror enable. Each bit represents corresponding tclass. 9591 + * 0: disable (default) 9592 + * 1: enable 9593 + * Access: RW 9594 + */ 9595 + MLXSW_ITEM_BIT_ARRAY(reg, momte, tclass_en, 0x08, 0x08, 1); 9596 + 9597 + static inline void mlxsw_reg_momte_pack(char *payload, u8 local_port, 9598 + enum mlxsw_reg_momte_type type) 9599 + { 9600 + MLXSW_REG_ZERO(momte, payload); 9601 + mlxsw_reg_momte_local_port_set(payload, local_port); 9602 + mlxsw_reg_momte_type_set(payload, type); 9603 + } 9604 + 9505 9605 /* MTPPPC - Time Precision Packet Port Configuration 9506 9606 * ------------------------------------------------- 9507 9607 * This register serves for configuration of which PTP messages should be ··· 10953 10853 MLXSW_REG(mgpc), 10954 10854 MLXSW_REG(mprs), 10955 10855 MLXSW_REG(mogcr), 10856 + MLXSW_REG(mpagr), 10857 + MLXSW_REG(momte), 10956 10858 MLXSW_REG(mtpppc), 10957 10859 MLXSW_REG(mtpptr), 10958 10860 MLXSW_REG(mtptpt),
+15 -50
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
··· 175 175 struct mlxsw_sp *mlxsw_sp; 176 176 }; 177 177 178 - struct mlxsw_sp_span_ops { 179 - u32 (*buffsize_get)(int mtu, u32 speed); 180 - }; 181 - 182 178 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev, 183 179 u16 component_index, u32 *p_max_size, 184 180 u8 *p_align_bits, u16 *p_max_write_size) ··· 1327 1331 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1328 1332 1329 1333 return 0; 1334 + } 1335 + 1336 + static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1337 + struct flow_block_offload *f) 1338 + { 1339 + switch (f->binder_type) { 1340 + case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS: 1341 + return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true); 1342 + case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS: 1343 + return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false); 1344 + case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP: 1345 + return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f); 1346 + default: 1347 + return -EOPNOTSUPP; 1348 + } 1330 1349 } 1331 1350 1332 1351 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, ··· 2822 2811 .get_stats_strings = mlxsw_sp2_get_stats_strings, 2823 2812 .get_stats = mlxsw_sp2_get_stats, 2824 2813 }; 2825 - 2826 - static u32 mlxsw_sp1_span_buffsize_get(int mtu, u32 speed) 2827 - { 2828 - return mtu * 5 / 2; 2829 - } 2830 - 2831 - static const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = { 2832 - .buffsize_get = mlxsw_sp1_span_buffsize_get, 2833 - }; 2834 - 2835 - #define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38 2836 - #define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50 2837 - 2838 - static u32 __mlxsw_sp_span_buffsize_get(int mtu, u32 speed, u32 buffer_factor) 2839 - { 2840 - return 3 * mtu + buffer_factor * speed / 1000; 2841 - } 2842 - 2843 - static u32 mlxsw_sp2_span_buffsize_get(int mtu, u32 speed) 2844 - { 2845 - int factor = MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR; 2846 - 2847 - return __mlxsw_sp_span_buffsize_get(mtu, speed, factor); 2848 - } 2849 - 2850 - static const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = { 2851 - .buffsize_get = mlxsw_sp2_span_buffsize_get, 2852 - }; 2853 - 2854 - static u32 mlxsw_sp3_span_buffsize_get(int mtu, u32 speed) 2855 - { 2856 - int factor = MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR; 2857 - 2858 - return __mlxsw_sp_span_buffsize_get(mtu, speed, factor); 2859 - } 2860 - 2861 - static const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops = { 2862 - .buffsize_get = mlxsw_sp3_span_buffsize_get, 2863 - }; 2864 - 2865 - u32 mlxsw_sp_span_buffsize_get(struct mlxsw_sp *mlxsw_sp, int mtu, u32 speed) 2866 - { 2867 - u32 buffsize = mlxsw_sp->span_ops->buffsize_get(speed, mtu); 2868 - 2869 - return mlxsw_sp_bytes_cells(mlxsw_sp, buffsize) + 1; 2870 - } 2871 2814 2872 2815 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 2873 2816 unsigned long event, void *ptr);
+29 -4
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
··· 539 539 unsigned int *p_counter_index); 540 540 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 541 541 unsigned int counter_index); 542 - u32 mlxsw_sp_span_buffsize_get(struct mlxsw_sp *mlxsw_sp, int mtu, u32 speed); 543 542 bool mlxsw_sp_port_dev_check(const struct net_device *dev); 544 543 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev); 545 544 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev); ··· 710 711 711 712 struct mlxsw_sp_flow_block_binding { 712 713 struct list_head list; 713 - struct net_device *dev; 714 714 struct mlxsw_sp_port *mlxsw_sp_port; 715 715 bool ingress; 716 716 }; ··· 767 769 struct mlxsw_sp_flow_block *mlxsw_sp_flow_block_create(struct mlxsw_sp *mlxsw_sp, 768 770 struct net *net); 769 771 void mlxsw_sp_flow_block_destroy(struct mlxsw_sp_flow_block *block); 770 - int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 771 - struct flow_block_offload *f); 772 + int mlxsw_sp_setup_tc_block_clsact(struct mlxsw_sp_port *mlxsw_sp_port, 773 + struct flow_block_offload *f, 774 + bool ingress); 772 775 773 776 /* spectrum_acl.c */ 774 777 struct mlxsw_sp_acl_ruleset; ··· 961 962 extern const struct mlxsw_afk_ops mlxsw_sp2_afk_ops; 962 963 963 964 /* spectrum_matchall.c */ 965 + enum mlxsw_sp_mall_action_type { 966 + MLXSW_SP_MALL_ACTION_TYPE_MIRROR, 967 + MLXSW_SP_MALL_ACTION_TYPE_SAMPLE, 968 + MLXSW_SP_MALL_ACTION_TYPE_TRAP, 969 + }; 970 + 971 + struct mlxsw_sp_mall_mirror_entry { 972 + const struct net_device *to_dev; 973 + int span_id; 974 + }; 975 + 976 + struct mlxsw_sp_mall_entry { 977 + struct list_head list; 978 + unsigned long cookie; 979 + unsigned int priority; 980 + enum mlxsw_sp_mall_action_type type; 981 + bool ingress; 982 + union { 983 + struct mlxsw_sp_mall_mirror_entry mirror; 984 + struct mlxsw_sp_port_sample sample; 985 + }; 986 + struct rcu_head rcu; 987 + }; 988 + 964 989 int mlxsw_sp_mall_replace(struct mlxsw_sp *mlxsw_sp, 965 990 struct mlxsw_sp_flow_block *block, 966 991 struct tc_cls_matchall_offload *f); ··· 1031 1008 struct tc_tbf_qopt_offload *p); 1032 1009 int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port, 1033 1010 struct tc_fifo_qopt_offload *p); 1011 + int mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port *mlxsw_sp_port, 1012 + struct flow_block_offload *f); 1034 1013 1035 1014 /* spectrum_fid.c */ 1036 1015 bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index);
+4 -14
drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c
··· 219 219 mlxsw_sp_tc_block_release); 220 220 if (IS_ERR(block_cb)) { 221 221 mlxsw_sp_flow_block_destroy(flow_block); 222 - err = PTR_ERR(block_cb); 223 - goto err_cb_register; 222 + return PTR_ERR(block_cb); 224 223 } 225 224 register_block = true; 226 225 } else { ··· 246 247 err_block_bind: 247 248 if (!flow_block_cb_decref(block_cb)) 248 249 flow_block_cb_free(block_cb); 249 - err_cb_register: 250 250 return err; 251 251 } 252 252 ··· 277 279 } 278 280 } 279 281 280 - int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 281 - struct flow_block_offload *f) 282 + int mlxsw_sp_setup_tc_block_clsact(struct mlxsw_sp_port *mlxsw_sp_port, 283 + struct flow_block_offload *f, 284 + bool ingress) 282 285 { 283 - bool ingress; 284 - 285 - if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 286 - ingress = true; 287 - else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) 288 - ingress = false; 289 - else 290 - return -EOPNOTSUPP; 291 - 292 286 f->driver_block_list = &mlxsw_sp_block_cb_list; 293 287 294 288 switch (f->command) {
-23
drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c
··· 10 10 #include "spectrum_span.h" 11 11 #include "reg.h" 12 12 13 - enum mlxsw_sp_mall_action_type { 14 - MLXSW_SP_MALL_ACTION_TYPE_MIRROR, 15 - MLXSW_SP_MALL_ACTION_TYPE_SAMPLE, 16 - }; 17 - 18 - struct mlxsw_sp_mall_mirror_entry { 19 - const struct net_device *to_dev; 20 - int span_id; 21 - }; 22 - 23 - struct mlxsw_sp_mall_entry { 24 - struct list_head list; 25 - unsigned long cookie; 26 - unsigned int priority; 27 - enum mlxsw_sp_mall_action_type type; 28 - bool ingress; 29 - union { 30 - struct mlxsw_sp_mall_mirror_entry mirror; 31 - struct mlxsw_sp_port_sample sample; 32 - }; 33 - struct rcu_head rcu; 34 - }; 35 - 36 13 static struct mlxsw_sp_mall_entry * 37 14 mlxsw_sp_mall_entry_find(struct mlxsw_sp_flow_block *block, unsigned long cookie) 38 15 {
+472
drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
··· 8 8 #include <net/red.h> 9 9 10 10 #include "spectrum.h" 11 + #include "spectrum_span.h" 11 12 #include "reg.h" 12 13 13 14 #define MLXSW_SP_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - band - 1) ··· 1271 1270 default: 1272 1271 return -EOPNOTSUPP; 1273 1272 } 1273 + } 1274 + 1275 + struct mlxsw_sp_qevent_block { 1276 + struct list_head binding_list; 1277 + struct list_head mall_entry_list; 1278 + struct mlxsw_sp *mlxsw_sp; 1279 + }; 1280 + 1281 + struct mlxsw_sp_qevent_binding { 1282 + struct list_head list; 1283 + struct mlxsw_sp_port *mlxsw_sp_port; 1284 + u32 handle; 1285 + int tclass_num; 1286 + enum mlxsw_sp_span_trigger span_trigger; 1287 + }; 1288 + 1289 + static LIST_HEAD(mlxsw_sp_qevent_block_cb_list); 1290 + 1291 + static int mlxsw_sp_qevent_mirror_configure(struct mlxsw_sp *mlxsw_sp, 1292 + struct mlxsw_sp_mall_entry *mall_entry, 1293 + struct mlxsw_sp_qevent_binding *qevent_binding) 1294 + { 1295 + struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port; 1296 + struct mlxsw_sp_span_trigger_parms trigger_parms = {}; 1297 + int span_id; 1298 + int err; 1299 + 1300 + err = mlxsw_sp_span_agent_get(mlxsw_sp, mall_entry->mirror.to_dev, &span_id); 1301 + if (err) 1302 + return err; 1303 + 1304 + err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, true); 1305 + if (err) 1306 + goto err_analyzed_port_get; 1307 + 1308 + trigger_parms.span_id = span_id; 1309 + err = mlxsw_sp_span_agent_bind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port, 1310 + &trigger_parms); 1311 + if (err) 1312 + goto err_agent_bind; 1313 + 1314 + err = mlxsw_sp_span_trigger_enable(mlxsw_sp_port, qevent_binding->span_trigger, 1315 + qevent_binding->tclass_num); 1316 + if (err) 1317 + goto err_trigger_enable; 1318 + 1319 + mall_entry->mirror.span_id = span_id; 1320 + return 0; 1321 + 1322 + err_trigger_enable: 1323 + mlxsw_sp_span_agent_unbind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port, 1324 + &trigger_parms); 1325 + err_agent_bind: 1326 + mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, true); 1327 + err_analyzed_port_get: 1328 + mlxsw_sp_span_agent_put(mlxsw_sp, span_id); 1329 + return err; 1330 + } 1331 + 1332 + static void mlxsw_sp_qevent_mirror_deconfigure(struct mlxsw_sp *mlxsw_sp, 1333 + struct mlxsw_sp_mall_entry *mall_entry, 1334 + struct mlxsw_sp_qevent_binding *qevent_binding) 1335 + { 1336 + struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port; 1337 + struct mlxsw_sp_span_trigger_parms trigger_parms = { 1338 + .span_id = mall_entry->mirror.span_id, 1339 + }; 1340 + 1341 + mlxsw_sp_span_trigger_disable(mlxsw_sp_port, qevent_binding->span_trigger, 1342 + qevent_binding->tclass_num); 1343 + mlxsw_sp_span_agent_unbind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port, 1344 + &trigger_parms); 1345 + mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, true); 1346 + mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->mirror.span_id); 1347 + } 1348 + 1349 + static int mlxsw_sp_qevent_entry_configure(struct mlxsw_sp *mlxsw_sp, 1350 + struct mlxsw_sp_mall_entry *mall_entry, 1351 + struct mlxsw_sp_qevent_binding *qevent_binding) 1352 + { 1353 + switch (mall_entry->type) { 1354 + case MLXSW_SP_MALL_ACTION_TYPE_MIRROR: 1355 + return mlxsw_sp_qevent_mirror_configure(mlxsw_sp, mall_entry, qevent_binding); 1356 + default: 1357 + /* This should have been validated away. */ 1358 + WARN_ON(1); 1359 + return -EOPNOTSUPP; 1360 + } 1361 + } 1362 + 1363 + static void mlxsw_sp_qevent_entry_deconfigure(struct mlxsw_sp *mlxsw_sp, 1364 + struct mlxsw_sp_mall_entry *mall_entry, 1365 + struct mlxsw_sp_qevent_binding *qevent_binding) 1366 + { 1367 + switch (mall_entry->type) { 1368 + case MLXSW_SP_MALL_ACTION_TYPE_MIRROR: 1369 + return mlxsw_sp_qevent_mirror_deconfigure(mlxsw_sp, mall_entry, qevent_binding); 1370 + default: 1371 + WARN_ON(1); 1372 + return; 1373 + } 1374 + } 1375 + 1376 + static int mlxsw_sp_qevent_binding_configure(struct mlxsw_sp_qevent_block *qevent_block, 1377 + struct mlxsw_sp_qevent_binding *qevent_binding) 1378 + { 1379 + struct mlxsw_sp_mall_entry *mall_entry; 1380 + int err; 1381 + 1382 + list_for_each_entry(mall_entry, &qevent_block->mall_entry_list, list) { 1383 + err = mlxsw_sp_qevent_entry_configure(qevent_block->mlxsw_sp, mall_entry, 1384 + qevent_binding); 1385 + if (err) 1386 + goto err_entry_configure; 1387 + } 1388 + 1389 + return 0; 1390 + 1391 + err_entry_configure: 1392 + list_for_each_entry_continue_reverse(mall_entry, &qevent_block->mall_entry_list, list) 1393 + mlxsw_sp_qevent_entry_deconfigure(qevent_block->mlxsw_sp, mall_entry, 1394 + qevent_binding); 1395 + return err; 1396 + } 1397 + 1398 + static void mlxsw_sp_qevent_binding_deconfigure(struct mlxsw_sp_qevent_block *qevent_block, 1399 + struct mlxsw_sp_qevent_binding *qevent_binding) 1400 + { 1401 + struct mlxsw_sp_mall_entry *mall_entry; 1402 + 1403 + list_for_each_entry(mall_entry, &qevent_block->mall_entry_list, list) 1404 + mlxsw_sp_qevent_entry_deconfigure(qevent_block->mlxsw_sp, mall_entry, 1405 + qevent_binding); 1406 + } 1407 + 1408 + static int mlxsw_sp_qevent_block_configure(struct mlxsw_sp_qevent_block *qevent_block) 1409 + { 1410 + struct mlxsw_sp_qevent_binding *qevent_binding; 1411 + int err; 1412 + 1413 + list_for_each_entry(qevent_binding, &qevent_block->binding_list, list) { 1414 + err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding); 1415 + if (err) 1416 + goto err_binding_configure; 1417 + } 1418 + 1419 + return 0; 1420 + 1421 + err_binding_configure: 1422 + list_for_each_entry_continue_reverse(qevent_binding, &qevent_block->binding_list, list) 1423 + mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding); 1424 + return err; 1425 + } 1426 + 1427 + static void mlxsw_sp_qevent_block_deconfigure(struct mlxsw_sp_qevent_block *qevent_block) 1428 + { 1429 + struct mlxsw_sp_qevent_binding *qevent_binding; 1430 + 1431 + list_for_each_entry(qevent_binding, &qevent_block->binding_list, list) 1432 + mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding); 1433 + } 1434 + 1435 + static struct mlxsw_sp_mall_entry * 1436 + mlxsw_sp_qevent_mall_entry_find(struct mlxsw_sp_qevent_block *block, unsigned long cookie) 1437 + { 1438 + struct mlxsw_sp_mall_entry *mall_entry; 1439 + 1440 + list_for_each_entry(mall_entry, &block->mall_entry_list, list) 1441 + if (mall_entry->cookie == cookie) 1442 + return mall_entry; 1443 + 1444 + return NULL; 1445 + } 1446 + 1447 + static int mlxsw_sp_qevent_mall_replace(struct mlxsw_sp *mlxsw_sp, 1448 + struct mlxsw_sp_qevent_block *qevent_block, 1449 + struct tc_cls_matchall_offload *f) 1450 + { 1451 + struct mlxsw_sp_mall_entry *mall_entry; 1452 + struct flow_action_entry *act; 1453 + int err; 1454 + 1455 + /* It should not currently be possible to replace a matchall rule. So 1456 + * this must be a new rule. 1457 + */ 1458 + if (!list_empty(&qevent_block->mall_entry_list)) { 1459 + NL_SET_ERR_MSG(f->common.extack, "At most one filter supported"); 1460 + return -EOPNOTSUPP; 1461 + } 1462 + if (f->rule->action.num_entries != 1) { 1463 + NL_SET_ERR_MSG(f->common.extack, "Only singular actions supported"); 1464 + return -EOPNOTSUPP; 1465 + } 1466 + if (f->common.chain_index) { 1467 + NL_SET_ERR_MSG(f->common.extack, "Only chain 0 is supported"); 1468 + return -EOPNOTSUPP; 1469 + } 1470 + if (f->common.protocol != htons(ETH_P_ALL)) { 1471 + NL_SET_ERR_MSG(f->common.extack, "Protocol matching not supported"); 1472 + return -EOPNOTSUPP; 1473 + } 1474 + 1475 + act = &f->rule->action.entries[0]; 1476 + if (!(act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED)) { 1477 + NL_SET_ERR_MSG(f->common.extack, "HW counters not supported on qevents"); 1478 + return -EOPNOTSUPP; 1479 + } 1480 + 1481 + mall_entry = kzalloc(sizeof(*mall_entry), GFP_KERNEL); 1482 + if (!mall_entry) 1483 + return -ENOMEM; 1484 + mall_entry->cookie = f->cookie; 1485 + 1486 + if (act->id == FLOW_ACTION_MIRRED) { 1487 + mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_MIRROR; 1488 + mall_entry->mirror.to_dev = act->dev; 1489 + } else { 1490 + NL_SET_ERR_MSG(f->common.extack, "Unsupported action"); 1491 + err = -EOPNOTSUPP; 1492 + goto err_unsupported_action; 1493 + } 1494 + 1495 + list_add_tail(&mall_entry->list, &qevent_block->mall_entry_list); 1496 + 1497 + err = mlxsw_sp_qevent_block_configure(qevent_block); 1498 + if (err) 1499 + goto err_block_configure; 1500 + 1501 + return 0; 1502 + 1503 + err_block_configure: 1504 + list_del(&mall_entry->list); 1505 + err_unsupported_action: 1506 + kfree(mall_entry); 1507 + return err; 1508 + } 1509 + 1510 + static void mlxsw_sp_qevent_mall_destroy(struct mlxsw_sp_qevent_block *qevent_block, 1511 + struct tc_cls_matchall_offload *f) 1512 + { 1513 + struct mlxsw_sp_mall_entry *mall_entry; 1514 + 1515 + mall_entry = mlxsw_sp_qevent_mall_entry_find(qevent_block, f->cookie); 1516 + if (!mall_entry) 1517 + return; 1518 + 1519 + mlxsw_sp_qevent_block_deconfigure(qevent_block); 1520 + 1521 + list_del(&mall_entry->list); 1522 + kfree(mall_entry); 1523 + } 1524 + 1525 + static int mlxsw_sp_qevent_block_mall_cb(struct mlxsw_sp_qevent_block *qevent_block, 1526 + struct tc_cls_matchall_offload *f) 1527 + { 1528 + struct mlxsw_sp *mlxsw_sp = qevent_block->mlxsw_sp; 1529 + 1530 + switch (f->command) { 1531 + case TC_CLSMATCHALL_REPLACE: 1532 + return mlxsw_sp_qevent_mall_replace(mlxsw_sp, qevent_block, f); 1533 + case TC_CLSMATCHALL_DESTROY: 1534 + mlxsw_sp_qevent_mall_destroy(qevent_block, f); 1535 + return 0; 1536 + default: 1537 + return -EOPNOTSUPP; 1538 + } 1539 + } 1540 + 1541 + static int mlxsw_sp_qevent_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) 1542 + { 1543 + struct mlxsw_sp_qevent_block *qevent_block = cb_priv; 1544 + 1545 + switch (type) { 1546 + case TC_SETUP_CLSMATCHALL: 1547 + return mlxsw_sp_qevent_block_mall_cb(qevent_block, type_data); 1548 + default: 1549 + return -EOPNOTSUPP; 1550 + } 1551 + } 1552 + 1553 + static struct mlxsw_sp_qevent_block *mlxsw_sp_qevent_block_create(struct mlxsw_sp *mlxsw_sp, 1554 + struct net *net) 1555 + { 1556 + struct mlxsw_sp_qevent_block *qevent_block; 1557 + 1558 + qevent_block = kzalloc(sizeof(*qevent_block), GFP_KERNEL); 1559 + if (!qevent_block) 1560 + return NULL; 1561 + 1562 + INIT_LIST_HEAD(&qevent_block->binding_list); 1563 + INIT_LIST_HEAD(&qevent_block->mall_entry_list); 1564 + qevent_block->mlxsw_sp = mlxsw_sp; 1565 + return qevent_block; 1566 + } 1567 + 1568 + static void 1569 + mlxsw_sp_qevent_block_destroy(struct mlxsw_sp_qevent_block *qevent_block) 1570 + { 1571 + WARN_ON(!list_empty(&qevent_block->binding_list)); 1572 + WARN_ON(!list_empty(&qevent_block->mall_entry_list)); 1573 + kfree(qevent_block); 1574 + } 1575 + 1576 + static void mlxsw_sp_qevent_block_release(void *cb_priv) 1577 + { 1578 + struct mlxsw_sp_qevent_block *qevent_block = cb_priv; 1579 + 1580 + mlxsw_sp_qevent_block_destroy(qevent_block); 1581 + } 1582 + 1583 + static struct mlxsw_sp_qevent_binding * 1584 + mlxsw_sp_qevent_binding_create(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, int tclass_num, 1585 + enum mlxsw_sp_span_trigger span_trigger) 1586 + { 1587 + struct mlxsw_sp_qevent_binding *binding; 1588 + 1589 + binding = kzalloc(sizeof(*binding), GFP_KERNEL); 1590 + if (!binding) 1591 + return ERR_PTR(-ENOMEM); 1592 + 1593 + binding->mlxsw_sp_port = mlxsw_sp_port; 1594 + binding->handle = handle; 1595 + binding->tclass_num = tclass_num; 1596 + binding->span_trigger = span_trigger; 1597 + return binding; 1598 + } 1599 + 1600 + static void 1601 + mlxsw_sp_qevent_binding_destroy(struct mlxsw_sp_qevent_binding *binding) 1602 + { 1603 + kfree(binding); 1604 + } 1605 + 1606 + static struct mlxsw_sp_qevent_binding * 1607 + mlxsw_sp_qevent_binding_lookup(struct mlxsw_sp_qevent_block *block, 1608 + struct mlxsw_sp_port *mlxsw_sp_port, 1609 + u32 handle, 1610 + enum mlxsw_sp_span_trigger span_trigger) 1611 + { 1612 + struct mlxsw_sp_qevent_binding *qevent_binding; 1613 + 1614 + list_for_each_entry(qevent_binding, &block->binding_list, list) 1615 + if (qevent_binding->mlxsw_sp_port == mlxsw_sp_port && 1616 + qevent_binding->handle == handle && 1617 + qevent_binding->span_trigger == span_trigger) 1618 + return qevent_binding; 1619 + return NULL; 1620 + } 1621 + 1622 + static int mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_port, 1623 + struct flow_block_offload *f, 1624 + enum mlxsw_sp_span_trigger span_trigger) 1625 + { 1626 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1627 + struct mlxsw_sp_qevent_binding *qevent_binding; 1628 + struct mlxsw_sp_qevent_block *qevent_block; 1629 + struct flow_block_cb *block_cb; 1630 + struct mlxsw_sp_qdisc *qdisc; 1631 + bool register_block = false; 1632 + int err; 1633 + 1634 + block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_qevent_block_cb, mlxsw_sp); 1635 + if (!block_cb) { 1636 + qevent_block = mlxsw_sp_qevent_block_create(mlxsw_sp, f->net); 1637 + if (!qevent_block) 1638 + return -ENOMEM; 1639 + block_cb = flow_block_cb_alloc(mlxsw_sp_qevent_block_cb, mlxsw_sp, qevent_block, 1640 + mlxsw_sp_qevent_block_release); 1641 + if (IS_ERR(block_cb)) { 1642 + mlxsw_sp_qevent_block_destroy(qevent_block); 1643 + return PTR_ERR(block_cb); 1644 + } 1645 + register_block = true; 1646 + } else { 1647 + qevent_block = flow_block_cb_priv(block_cb); 1648 + } 1649 + flow_block_cb_incref(block_cb); 1650 + 1651 + qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port, f->sch->handle); 1652 + if (!qdisc) { 1653 + NL_SET_ERR_MSG(f->extack, "Qdisc not offloaded"); 1654 + err = -ENOENT; 1655 + goto err_find_qdisc; 1656 + } 1657 + 1658 + if (WARN_ON(mlxsw_sp_qevent_binding_lookup(qevent_block, mlxsw_sp_port, f->sch->handle, 1659 + span_trigger))) { 1660 + err = -EEXIST; 1661 + goto err_binding_exists; 1662 + } 1663 + 1664 + qevent_binding = mlxsw_sp_qevent_binding_create(mlxsw_sp_port, f->sch->handle, 1665 + qdisc->tclass_num, span_trigger); 1666 + if (IS_ERR(qevent_binding)) { 1667 + err = PTR_ERR(qevent_binding); 1668 + goto err_binding_create; 1669 + } 1670 + 1671 + err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding); 1672 + if (err) 1673 + goto err_binding_configure; 1674 + 1675 + list_add(&qevent_binding->list, &qevent_block->binding_list); 1676 + 1677 + if (register_block) { 1678 + flow_block_cb_add(block_cb, f); 1679 + list_add_tail(&block_cb->driver_list, &mlxsw_sp_qevent_block_cb_list); 1680 + } 1681 + 1682 + return 0; 1683 + 1684 + err_binding_configure: 1685 + mlxsw_sp_qevent_binding_destroy(qevent_binding); 1686 + err_binding_create: 1687 + err_binding_exists: 1688 + err_find_qdisc: 1689 + if (!flow_block_cb_decref(block_cb)) 1690 + flow_block_cb_free(block_cb); 1691 + return err; 1692 + } 1693 + 1694 + static void mlxsw_sp_setup_tc_block_qevent_unbind(struct mlxsw_sp_port *mlxsw_sp_port, 1695 + struct flow_block_offload *f, 1696 + enum mlxsw_sp_span_trigger span_trigger) 1697 + { 1698 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1699 + struct mlxsw_sp_qevent_binding *qevent_binding; 1700 + struct mlxsw_sp_qevent_block *qevent_block; 1701 + struct flow_block_cb *block_cb; 1702 + 1703 + block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_qevent_block_cb, mlxsw_sp); 1704 + if (!block_cb) 1705 + return; 1706 + qevent_block = flow_block_cb_priv(block_cb); 1707 + 1708 + qevent_binding = mlxsw_sp_qevent_binding_lookup(qevent_block, mlxsw_sp_port, f->sch->handle, 1709 + span_trigger); 1710 + if (!qevent_binding) 1711 + return; 1712 + 1713 + list_del(&qevent_binding->list); 1714 + mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding); 1715 + mlxsw_sp_qevent_binding_destroy(qevent_binding); 1716 + 1717 + if (!flow_block_cb_decref(block_cb)) { 1718 + flow_block_cb_remove(block_cb, f); 1719 + list_del(&block_cb->driver_list); 1720 + } 1721 + } 1722 + 1723 + static int mlxsw_sp_setup_tc_block_qevent(struct mlxsw_sp_port *mlxsw_sp_port, 1724 + struct flow_block_offload *f, 1725 + enum mlxsw_sp_span_trigger span_trigger) 1726 + { 1727 + f->driver_block_list = &mlxsw_sp_qevent_block_cb_list; 1728 + 1729 + switch (f->command) { 1730 + case FLOW_BLOCK_BIND: 1731 + return mlxsw_sp_setup_tc_block_qevent_bind(mlxsw_sp_port, f, span_trigger); 1732 + case FLOW_BLOCK_UNBIND: 1733 + mlxsw_sp_setup_tc_block_qevent_unbind(mlxsw_sp_port, f, span_trigger); 1734 + return 0; 1735 + default: 1736 + return -EOPNOTSUPP; 1737 + } 1738 + } 1739 + 1740 + int mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port *mlxsw_sp_port, 1741 + struct flow_block_offload *f) 1742 + { 1743 + return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f, MLXSW_SP_SPAN_TRIGGER_EARLY_DROP); 1274 1744 } 1275 1745 1276 1746 int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
+381 -16
drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
··· 21 21 struct mlxsw_sp_span { 22 22 struct work_struct work; 23 23 struct mlxsw_sp *mlxsw_sp; 24 + const struct mlxsw_sp_span_trigger_ops **span_trigger_ops_arr; 24 25 struct list_head analyzed_ports_list; 25 26 struct mutex analyzed_ports_lock; /* Protects analyzed_ports_list */ 26 27 struct list_head trigger_entries_list; ··· 39 38 40 39 struct mlxsw_sp_span_trigger_entry { 41 40 struct list_head list; /* Member of trigger_entries_list */ 41 + struct mlxsw_sp_span *span; 42 + const struct mlxsw_sp_span_trigger_ops *ops; 42 43 refcount_t ref_count; 43 44 u8 local_port; 44 45 enum mlxsw_sp_span_trigger trigger; 45 46 struct mlxsw_sp_span_trigger_parms parms; 47 + }; 48 + 49 + enum mlxsw_sp_span_trigger_type { 50 + MLXSW_SP_SPAN_TRIGGER_TYPE_PORT, 51 + MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL, 52 + }; 53 + 54 + struct mlxsw_sp_span_trigger_ops { 55 + int (*bind)(struct mlxsw_sp_span_trigger_entry *trigger_entry); 56 + void (*unbind)(struct mlxsw_sp_span_trigger_entry *trigger_entry); 57 + bool (*matches)(struct mlxsw_sp_span_trigger_entry *trigger_entry, 58 + enum mlxsw_sp_span_trigger trigger, 59 + struct mlxsw_sp_port *mlxsw_sp_port); 60 + int (*enable)(struct mlxsw_sp_span_trigger_entry *trigger_entry, 61 + struct mlxsw_sp_port *mlxsw_sp_port, u8 tc); 62 + void (*disable)(struct mlxsw_sp_span_trigger_entry *trigger_entry, 63 + struct mlxsw_sp_port *mlxsw_sp_port, u8 tc); 46 64 }; 47 65 48 66 static void mlxsw_sp_span_respin_work(struct work_struct *work); ··· 77 57 { 78 58 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); 79 59 struct mlxsw_sp_span *span; 80 - int i, entries_count; 60 + int i, entries_count, err; 81 61 82 62 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN)) 83 63 return -EIO; ··· 97 77 for (i = 0; i < mlxsw_sp->span->entries_count; i++) 98 78 mlxsw_sp->span->entries[i].id = i; 99 79 80 + err = mlxsw_sp->span_ops->init(mlxsw_sp); 81 + if (err) 82 + goto err_init; 83 + 100 84 devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN, 101 85 mlxsw_sp_span_occ_get, mlxsw_sp); 102 86 INIT_WORK(&span->work, mlxsw_sp_span_respin_work); 103 87 104 88 return 0; 89 + 90 + err_init: 91 + mutex_destroy(&mlxsw_sp->span->analyzed_ports_lock); 92 + kfree(mlxsw_sp->span); 93 + return err; 105 94 } 106 95 107 96 void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp) ··· 795 766 return 0; 796 767 } 797 768 769 + static u32 mlxsw_sp_span_buffsize_get(struct mlxsw_sp *mlxsw_sp, int mtu, 770 + u32 speed) 771 + { 772 + u32 buffsize = mlxsw_sp->span_ops->buffsize_get(speed, mtu); 773 + 774 + return mlxsw_sp_bytes_cells(mlxsw_sp, buffsize) + 1; 775 + } 776 + 798 777 static int 799 778 mlxsw_sp_span_port_buffer_update(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 800 779 { ··· 1088 1051 } 1089 1052 1090 1053 static int 1091 - __mlxsw_sp_span_trigger_entry_bind(struct mlxsw_sp_span *span, 1092 - struct mlxsw_sp_span_trigger_entry * 1093 - trigger_entry, bool enable) 1054 + __mlxsw_sp_span_trigger_port_bind(struct mlxsw_sp_span *span, 1055 + struct mlxsw_sp_span_trigger_entry * 1056 + trigger_entry, bool enable) 1094 1057 { 1095 1058 char mpar_pl[MLXSW_REG_MPAR_LEN]; 1096 1059 enum mlxsw_reg_mpar_i_e i_e; ··· 1113 1076 } 1114 1077 1115 1078 static int 1116 - mlxsw_sp_span_trigger_entry_bind(struct mlxsw_sp_span *span, 1117 - struct mlxsw_sp_span_trigger_entry * 1118 - trigger_entry) 1079 + mlxsw_sp_span_trigger_port_bind(struct mlxsw_sp_span_trigger_entry * 1080 + trigger_entry) 1119 1081 { 1120 - return __mlxsw_sp_span_trigger_entry_bind(span, trigger_entry, true); 1082 + return __mlxsw_sp_span_trigger_port_bind(trigger_entry->span, 1083 + trigger_entry, true); 1121 1084 } 1122 1085 1123 1086 static void 1124 - mlxsw_sp_span_trigger_entry_unbind(struct mlxsw_sp_span *span, 1125 - struct mlxsw_sp_span_trigger_entry * 1087 + mlxsw_sp_span_trigger_port_unbind(struct mlxsw_sp_span_trigger_entry * 1088 + trigger_entry) 1089 + { 1090 + __mlxsw_sp_span_trigger_port_bind(trigger_entry->span, trigger_entry, 1091 + false); 1092 + } 1093 + 1094 + static bool 1095 + mlxsw_sp_span_trigger_port_matches(struct mlxsw_sp_span_trigger_entry * 1096 + trigger_entry, 1097 + enum mlxsw_sp_span_trigger trigger, 1098 + struct mlxsw_sp_port *mlxsw_sp_port) 1099 + { 1100 + return trigger_entry->trigger == trigger && 1101 + trigger_entry->local_port == mlxsw_sp_port->local_port; 1102 + } 1103 + 1104 + static int 1105 + mlxsw_sp_span_trigger_port_enable(struct mlxsw_sp_span_trigger_entry * 1106 + trigger_entry, 1107 + struct mlxsw_sp_port *mlxsw_sp_port, u8 tc) 1108 + { 1109 + /* Port trigger are enabled during binding. */ 1110 + return 0; 1111 + } 1112 + 1113 + static void 1114 + mlxsw_sp_span_trigger_port_disable(struct mlxsw_sp_span_trigger_entry * 1115 + trigger_entry, 1116 + struct mlxsw_sp_port *mlxsw_sp_port, u8 tc) 1117 + { 1118 + } 1119 + 1120 + static const struct mlxsw_sp_span_trigger_ops 1121 + mlxsw_sp_span_trigger_port_ops = { 1122 + .bind = mlxsw_sp_span_trigger_port_bind, 1123 + .unbind = mlxsw_sp_span_trigger_port_unbind, 1124 + .matches = mlxsw_sp_span_trigger_port_matches, 1125 + .enable = mlxsw_sp_span_trigger_port_enable, 1126 + .disable = mlxsw_sp_span_trigger_port_disable, 1127 + }; 1128 + 1129 + static int 1130 + mlxsw_sp1_span_trigger_global_bind(struct mlxsw_sp_span_trigger_entry * 1126 1131 trigger_entry) 1127 1132 { 1128 - __mlxsw_sp_span_trigger_entry_bind(span, trigger_entry, false); 1133 + return -EOPNOTSUPP; 1134 + } 1135 + 1136 + static void 1137 + mlxsw_sp1_span_trigger_global_unbind(struct mlxsw_sp_span_trigger_entry * 1138 + trigger_entry) 1139 + { 1140 + } 1141 + 1142 + static bool 1143 + mlxsw_sp1_span_trigger_global_matches(struct mlxsw_sp_span_trigger_entry * 1144 + trigger_entry, 1145 + enum mlxsw_sp_span_trigger trigger, 1146 + struct mlxsw_sp_port *mlxsw_sp_port) 1147 + { 1148 + WARN_ON_ONCE(1); 1149 + return false; 1150 + } 1151 + 1152 + static int 1153 + mlxsw_sp1_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry * 1154 + trigger_entry, 1155 + struct mlxsw_sp_port *mlxsw_sp_port, 1156 + u8 tc) 1157 + { 1158 + return -EOPNOTSUPP; 1159 + } 1160 + 1161 + static void 1162 + mlxsw_sp1_span_trigger_global_disable(struct mlxsw_sp_span_trigger_entry * 1163 + trigger_entry, 1164 + struct mlxsw_sp_port *mlxsw_sp_port, 1165 + u8 tc) 1166 + { 1167 + } 1168 + 1169 + static const struct mlxsw_sp_span_trigger_ops 1170 + mlxsw_sp1_span_trigger_global_ops = { 1171 + .bind = mlxsw_sp1_span_trigger_global_bind, 1172 + .unbind = mlxsw_sp1_span_trigger_global_unbind, 1173 + .matches = mlxsw_sp1_span_trigger_global_matches, 1174 + .enable = mlxsw_sp1_span_trigger_global_enable, 1175 + .disable = mlxsw_sp1_span_trigger_global_disable, 1176 + }; 1177 + 1178 + static const struct mlxsw_sp_span_trigger_ops * 1179 + mlxsw_sp1_span_trigger_ops_arr[] = { 1180 + [MLXSW_SP_SPAN_TRIGGER_TYPE_PORT] = &mlxsw_sp_span_trigger_port_ops, 1181 + [MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL] = 1182 + &mlxsw_sp1_span_trigger_global_ops, 1183 + }; 1184 + 1185 + static int 1186 + mlxsw_sp2_span_trigger_global_bind(struct mlxsw_sp_span_trigger_entry * 1187 + trigger_entry) 1188 + { 1189 + struct mlxsw_sp *mlxsw_sp = trigger_entry->span->mlxsw_sp; 1190 + enum mlxsw_reg_mpagr_trigger trigger; 1191 + char mpagr_pl[MLXSW_REG_MPAGR_LEN]; 1192 + 1193 + switch (trigger_entry->trigger) { 1194 + case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: 1195 + trigger = MLXSW_REG_MPAGR_TRIGGER_INGRESS_SHARED_BUFFER; 1196 + break; 1197 + case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: 1198 + trigger = MLXSW_REG_MPAGR_TRIGGER_INGRESS_WRED; 1199 + break; 1200 + case MLXSW_SP_SPAN_TRIGGER_ECN: 1201 + trigger = MLXSW_REG_MPAGR_TRIGGER_EGRESS_ECN; 1202 + break; 1203 + default: 1204 + WARN_ON_ONCE(1); 1205 + return -EINVAL; 1206 + } 1207 + 1208 + mlxsw_reg_mpagr_pack(mpagr_pl, trigger, trigger_entry->parms.span_id, 1209 + 1); 1210 + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpagr), mpagr_pl); 1211 + } 1212 + 1213 + static void 1214 + mlxsw_sp2_span_trigger_global_unbind(struct mlxsw_sp_span_trigger_entry * 1215 + trigger_entry) 1216 + { 1217 + /* There is no unbinding for global triggers. The trigger should be 1218 + * disabled on all ports by now. 1219 + */ 1220 + } 1221 + 1222 + static bool 1223 + mlxsw_sp2_span_trigger_global_matches(struct mlxsw_sp_span_trigger_entry * 1224 + trigger_entry, 1225 + enum mlxsw_sp_span_trigger trigger, 1226 + struct mlxsw_sp_port *mlxsw_sp_port) 1227 + { 1228 + return trigger_entry->trigger == trigger; 1229 + } 1230 + 1231 + static int 1232 + __mlxsw_sp2_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry * 1233 + trigger_entry, 1234 + struct mlxsw_sp_port *mlxsw_sp_port, 1235 + u8 tc, bool enable) 1236 + { 1237 + struct mlxsw_sp *mlxsw_sp = trigger_entry->span->mlxsw_sp; 1238 + char momte_pl[MLXSW_REG_MOMTE_LEN]; 1239 + enum mlxsw_reg_momte_type type; 1240 + int err; 1241 + 1242 + switch (trigger_entry->trigger) { 1243 + case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: 1244 + type = MLXSW_REG_MOMTE_TYPE_SHARED_BUFFER_TCLASS; 1245 + break; 1246 + case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: 1247 + type = MLXSW_REG_MOMTE_TYPE_WRED; 1248 + break; 1249 + case MLXSW_SP_SPAN_TRIGGER_ECN: 1250 + type = MLXSW_REG_MOMTE_TYPE_ECN; 1251 + break; 1252 + default: 1253 + WARN_ON_ONCE(1); 1254 + return -EINVAL; 1255 + } 1256 + 1257 + /* Query existing configuration in order to only change the state of 1258 + * the specified traffic class. 1259 + */ 1260 + mlxsw_reg_momte_pack(momte_pl, mlxsw_sp_port->local_port, type); 1261 + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(momte), momte_pl); 1262 + if (err) 1263 + return err; 1264 + 1265 + mlxsw_reg_momte_tclass_en_set(momte_pl, tc, enable); 1266 + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(momte), momte_pl); 1267 + } 1268 + 1269 + static int 1270 + mlxsw_sp2_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry * 1271 + trigger_entry, 1272 + struct mlxsw_sp_port *mlxsw_sp_port, 1273 + u8 tc) 1274 + { 1275 + return __mlxsw_sp2_span_trigger_global_enable(trigger_entry, 1276 + mlxsw_sp_port, tc, true); 1277 + } 1278 + 1279 + static void 1280 + mlxsw_sp2_span_trigger_global_disable(struct mlxsw_sp_span_trigger_entry * 1281 + trigger_entry, 1282 + struct mlxsw_sp_port *mlxsw_sp_port, 1283 + u8 tc) 1284 + { 1285 + __mlxsw_sp2_span_trigger_global_enable(trigger_entry, mlxsw_sp_port, tc, 1286 + false); 1287 + } 1288 + 1289 + static const struct mlxsw_sp_span_trigger_ops 1290 + mlxsw_sp2_span_trigger_global_ops = { 1291 + .bind = mlxsw_sp2_span_trigger_global_bind, 1292 + .unbind = mlxsw_sp2_span_trigger_global_unbind, 1293 + .matches = mlxsw_sp2_span_trigger_global_matches, 1294 + .enable = mlxsw_sp2_span_trigger_global_enable, 1295 + .disable = mlxsw_sp2_span_trigger_global_disable, 1296 + }; 1297 + 1298 + static const struct mlxsw_sp_span_trigger_ops * 1299 + mlxsw_sp2_span_trigger_ops_arr[] = { 1300 + [MLXSW_SP_SPAN_TRIGGER_TYPE_PORT] = &mlxsw_sp_span_trigger_port_ops, 1301 + [MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL] = 1302 + &mlxsw_sp2_span_trigger_global_ops, 1303 + }; 1304 + 1305 + static void 1306 + mlxsw_sp_span_trigger_ops_set(struct mlxsw_sp_span_trigger_entry *trigger_entry) 1307 + { 1308 + struct mlxsw_sp_span *span = trigger_entry->span; 1309 + enum mlxsw_sp_span_trigger_type type; 1310 + 1311 + switch (trigger_entry->trigger) { 1312 + case MLXSW_SP_SPAN_TRIGGER_INGRESS: /* fall-through */ 1313 + case MLXSW_SP_SPAN_TRIGGER_EGRESS: 1314 + type = MLXSW_SP_SPAN_TRIGGER_TYPE_PORT; 1315 + break; 1316 + case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: /* fall-through */ 1317 + case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: /* fall-through */ 1318 + case MLXSW_SP_SPAN_TRIGGER_ECN: 1319 + type = MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL; 1320 + break; 1321 + default: 1322 + WARN_ON_ONCE(1); 1323 + return; 1324 + } 1325 + 1326 + trigger_entry->ops = span->span_trigger_ops_arr[type]; 1129 1327 } 1130 1328 1131 1329 static struct mlxsw_sp_span_trigger_entry * ··· 1378 1106 return ERR_PTR(-ENOMEM); 1379 1107 1380 1108 refcount_set(&trigger_entry->ref_count, 1); 1381 - trigger_entry->local_port = mlxsw_sp_port->local_port; 1109 + trigger_entry->local_port = mlxsw_sp_port ? mlxsw_sp_port->local_port : 1110 + 0; 1382 1111 trigger_entry->trigger = trigger; 1383 1112 memcpy(&trigger_entry->parms, parms, sizeof(trigger_entry->parms)); 1113 + trigger_entry->span = span; 1114 + mlxsw_sp_span_trigger_ops_set(trigger_entry); 1384 1115 list_add_tail(&trigger_entry->list, &span->trigger_entries_list); 1385 1116 1386 - err = mlxsw_sp_span_trigger_entry_bind(span, trigger_entry); 1117 + err = trigger_entry->ops->bind(trigger_entry); 1387 1118 if (err) 1388 1119 goto err_trigger_entry_bind; 1389 1120 ··· 1403 1128 struct mlxsw_sp_span_trigger_entry * 1404 1129 trigger_entry) 1405 1130 { 1406 - mlxsw_sp_span_trigger_entry_unbind(span, trigger_entry); 1131 + trigger_entry->ops->unbind(trigger_entry); 1407 1132 list_del(&trigger_entry->list); 1408 1133 kfree(trigger_entry); 1409 1134 } ··· 1416 1141 struct mlxsw_sp_span_trigger_entry *trigger_entry; 1417 1142 1418 1143 list_for_each_entry(trigger_entry, &span->trigger_entries_list, list) { 1419 - if (trigger_entry->trigger == trigger && 1420 - trigger_entry->local_port == mlxsw_sp_port->local_port) 1144 + if (trigger_entry->ops->matches(trigger_entry, trigger, 1145 + mlxsw_sp_port)) 1421 1146 return trigger_entry; 1422 1147 } 1423 1148 ··· 1482 1207 1483 1208 mlxsw_sp_span_trigger_entry_destroy(mlxsw_sp->span, trigger_entry); 1484 1209 } 1210 + 1211 + int mlxsw_sp_span_trigger_enable(struct mlxsw_sp_port *mlxsw_sp_port, 1212 + enum mlxsw_sp_span_trigger trigger, u8 tc) 1213 + { 1214 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1215 + struct mlxsw_sp_span_trigger_entry *trigger_entry; 1216 + 1217 + ASSERT_RTNL(); 1218 + 1219 + trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span, 1220 + trigger, 1221 + mlxsw_sp_port); 1222 + if (WARN_ON_ONCE(!trigger_entry)) 1223 + return -EINVAL; 1224 + 1225 + return trigger_entry->ops->enable(trigger_entry, mlxsw_sp_port, tc); 1226 + } 1227 + 1228 + void mlxsw_sp_span_trigger_disable(struct mlxsw_sp_port *mlxsw_sp_port, 1229 + enum mlxsw_sp_span_trigger trigger, u8 tc) 1230 + { 1231 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1232 + struct mlxsw_sp_span_trigger_entry *trigger_entry; 1233 + 1234 + ASSERT_RTNL(); 1235 + 1236 + trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span, 1237 + trigger, 1238 + mlxsw_sp_port); 1239 + if (WARN_ON_ONCE(!trigger_entry)) 1240 + return; 1241 + 1242 + return trigger_entry->ops->disable(trigger_entry, mlxsw_sp_port, tc); 1243 + } 1244 + 1245 + static int mlxsw_sp1_span_init(struct mlxsw_sp *mlxsw_sp) 1246 + { 1247 + mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp1_span_trigger_ops_arr; 1248 + 1249 + return 0; 1250 + } 1251 + 1252 + static u32 mlxsw_sp1_span_buffsize_get(int mtu, u32 speed) 1253 + { 1254 + return mtu * 5 / 2; 1255 + } 1256 + 1257 + const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = { 1258 + .init = mlxsw_sp1_span_init, 1259 + .buffsize_get = mlxsw_sp1_span_buffsize_get, 1260 + }; 1261 + 1262 + static int mlxsw_sp2_span_init(struct mlxsw_sp *mlxsw_sp) 1263 + { 1264 + mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp2_span_trigger_ops_arr; 1265 + 1266 + return 0; 1267 + } 1268 + 1269 + #define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38 1270 + #define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50 1271 + 1272 + static u32 __mlxsw_sp_span_buffsize_get(int mtu, u32 speed, u32 buffer_factor) 1273 + { 1274 + return 3 * mtu + buffer_factor * speed / 1000; 1275 + } 1276 + 1277 + static u32 mlxsw_sp2_span_buffsize_get(int mtu, u32 speed) 1278 + { 1279 + int factor = MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR; 1280 + 1281 + return __mlxsw_sp_span_buffsize_get(mtu, speed, factor); 1282 + } 1283 + 1284 + const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = { 1285 + .init = mlxsw_sp2_span_init, 1286 + .buffsize_get = mlxsw_sp2_span_buffsize_get, 1287 + }; 1288 + 1289 + static u32 mlxsw_sp3_span_buffsize_get(int mtu, u32 speed) 1290 + { 1291 + int factor = MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR; 1292 + 1293 + return __mlxsw_sp_span_buffsize_get(mtu, speed, factor); 1294 + } 1295 + 1296 + const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops = { 1297 + .init = mlxsw_sp2_span_init, 1298 + .buffsize_get = mlxsw_sp3_span_buffsize_get, 1299 + };
+16
drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
··· 26 26 enum mlxsw_sp_span_trigger { 27 27 MLXSW_SP_SPAN_TRIGGER_INGRESS, 28 28 MLXSW_SP_SPAN_TRIGGER_EGRESS, 29 + MLXSW_SP_SPAN_TRIGGER_TAIL_DROP, 30 + MLXSW_SP_SPAN_TRIGGER_EARLY_DROP, 31 + MLXSW_SP_SPAN_TRIGGER_ECN, 29 32 }; 30 33 31 34 struct mlxsw_sp_span_trigger_parms { ··· 36 33 }; 37 34 38 35 struct mlxsw_sp_span_entry_ops; 36 + 37 + struct mlxsw_sp_span_ops { 38 + int (*init)(struct mlxsw_sp *mlxsw_sp); 39 + u32 (*buffsize_get)(int mtu, u32 speed); 40 + }; 39 41 40 42 struct mlxsw_sp_span_entry { 41 43 const struct net_device *to_dev; ··· 89 81 enum mlxsw_sp_span_trigger trigger, 90 82 struct mlxsw_sp_port *mlxsw_sp_port, 91 83 const struct mlxsw_sp_span_trigger_parms *parms); 84 + int mlxsw_sp_span_trigger_enable(struct mlxsw_sp_port *mlxsw_sp_port, 85 + enum mlxsw_sp_span_trigger trigger, u8 tc); 86 + void mlxsw_sp_span_trigger_disable(struct mlxsw_sp_port *mlxsw_sp_port, 87 + enum mlxsw_sp_span_trigger trigger, u8 tc); 88 + 89 + extern const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops; 90 + extern const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops; 91 + extern const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops; 92 92 93 93 #endif
+1 -1
drivers/net/ethernet/netronome/nfp/flower/main.h
··· 458 458 int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev, 459 459 struct tc_cls_matchall_offload *flow); 460 460 void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb); 461 - int nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv, 461 + int nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv, 462 462 enum tc_setup_type type, void *type_data, 463 463 void *data, 464 464 void (*cleanup)(struct flow_block_cb *block_cb));
+4 -4
drivers/net/ethernet/netronome/nfp/flower/offload.c
··· 1646 1646 } 1647 1647 1648 1648 static int 1649 - nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, 1649 + nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct Qdisc *sch, struct nfp_app *app, 1650 1650 struct flow_block_offload *f, void *data, 1651 1651 void (*cleanup)(struct flow_block_cb *block_cb)) 1652 1652 { ··· 1680 1680 block_cb = flow_indr_block_cb_alloc(nfp_flower_setup_indr_block_cb, 1681 1681 cb_priv, cb_priv, 1682 1682 nfp_flower_setup_indr_tc_release, 1683 - f, netdev, data, app, cleanup); 1683 + f, netdev, sch, data, app, cleanup); 1684 1684 if (IS_ERR(block_cb)) { 1685 1685 list_del(&cb_priv->list); 1686 1686 kfree(cb_priv); ··· 1711 1711 } 1712 1712 1713 1713 int 1714 - nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv, 1714 + nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv, 1715 1715 enum tc_setup_type type, void *type_data, 1716 1716 void *data, 1717 1717 void (*cleanup)(struct flow_block_cb *block_cb)) ··· 1721 1721 1722 1722 switch (type) { 1723 1723 case TC_SETUP_BLOCK: 1724 - return nfp_flower_setup_indr_tc_block(netdev, cb_priv, 1724 + return nfp_flower_setup_indr_tc_block(netdev, sch, cb_priv, 1725 1725 type_data, data, cleanup); 1726 1726 default: 1727 1727 return -EOPNOTSUPP;
+6 -3
include/net/flow_offload.h
··· 444 444 struct list_head cb_list; 445 445 struct list_head *driver_block_list; 446 446 struct netlink_ext_ack *extack; 447 + struct Qdisc *sch; 447 448 }; 448 449 449 450 enum tc_setup_type; ··· 456 455 struct flow_block_indr { 457 456 struct list_head list; 458 457 struct net_device *dev; 458 + struct Qdisc *sch; 459 459 enum flow_block_binder_type binder_type; 460 460 void *data; 461 461 void *cb_priv; ··· 481 479 void *cb_ident, void *cb_priv, 482 480 void (*release)(void *cb_priv), 483 481 struct flow_block_offload *bo, 484 - struct net_device *dev, void *data, 482 + struct net_device *dev, 483 + struct Qdisc *sch, void *data, 485 484 void *indr_cb_priv, 486 485 void (*cleanup)(struct flow_block_cb *block_cb)); 487 486 void flow_block_cb_free(struct flow_block_cb *block_cb); ··· 556 553 INIT_LIST_HEAD(&flow_block->cb_list); 557 554 } 558 555 559 - typedef int flow_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv, 556 + typedef int flow_indr_block_bind_cb_t(struct net_device *dev, struct Qdisc *sch, void *cb_priv, 560 557 enum tc_setup_type type, void *type_data, 561 558 void *data, 562 559 void (*cleanup)(struct flow_block_cb *block_cb)); ··· 564 561 int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv); 565 562 void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv, 566 563 void (*release)(void *cb_priv)); 567 - int flow_indr_dev_setup_offload(struct net_device *dev, 564 + int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch, 568 565 enum tc_setup_type type, void *data, 569 566 struct flow_block_offload *bo, 570 567 void (*cleanup)(struct flow_block_cb *block_cb));
+7 -5
net/core/flow_offload.c
··· 429 429 430 430 static void flow_block_indr_init(struct flow_block_cb *flow_block, 431 431 struct flow_block_offload *bo, 432 - struct net_device *dev, void *data, 432 + struct net_device *dev, struct Qdisc *sch, void *data, 433 433 void *cb_priv, 434 434 void (*cleanup)(struct flow_block_cb *block_cb)) 435 435 { ··· 437 437 flow_block->indr.data = data; 438 438 flow_block->indr.cb_priv = cb_priv; 439 439 flow_block->indr.dev = dev; 440 + flow_block->indr.sch = sch; 440 441 flow_block->indr.cleanup = cleanup; 441 442 } 442 443 ··· 445 444 void *cb_ident, void *cb_priv, 446 445 void (*release)(void *cb_priv), 447 446 struct flow_block_offload *bo, 448 - struct net_device *dev, void *data, 447 + struct net_device *dev, 448 + struct Qdisc *sch, void *data, 449 449 void *indr_cb_priv, 450 450 void (*cleanup)(struct flow_block_cb *block_cb)) 451 451 { ··· 456 454 if (IS_ERR(block_cb)) 457 455 goto out; 458 456 459 - flow_block_indr_init(block_cb, bo, dev, data, indr_cb_priv, cleanup); 457 + flow_block_indr_init(block_cb, bo, dev, sch, data, indr_cb_priv, cleanup); 460 458 list_add(&block_cb->indr.list, &flow_block_indr_list); 461 459 462 460 out: ··· 464 462 } 465 463 EXPORT_SYMBOL(flow_indr_block_cb_alloc); 466 464 467 - int flow_indr_dev_setup_offload(struct net_device *dev, 465 + int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch, 468 466 enum tc_setup_type type, void *data, 469 467 struct flow_block_offload *bo, 470 468 void (*cleanup)(struct flow_block_cb *block_cb)) ··· 473 471 474 472 mutex_lock(&flow_indr_block_lock); 475 473 list_for_each_entry(this, &flow_block_indr_dev_list, list) 476 - this->cb(dev, this->cb_priv, type, bo, data, cleanup); 474 + this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup); 477 475 478 476 mutex_unlock(&flow_indr_block_lock); 479 477
+1 -1
net/netfilter/nf_flow_table_offload.c
··· 964 964 nf_flow_table_block_offload_init(bo, dev_net(dev), cmd, flowtable, 965 965 extack); 966 966 967 - return flow_indr_dev_setup_offload(dev, TC_SETUP_FT, flowtable, bo, 967 + return flow_indr_dev_setup_offload(dev, NULL, TC_SETUP_FT, flowtable, bo, 968 968 nf_flow_table_indr_cleanup); 969 969 } 970 970
+1 -1
net/netfilter/nf_tables_offload.c
··· 312 312 313 313 nft_flow_block_offload_init(&bo, dev_net(dev), cmd, basechain, &extack); 314 314 315 - err = flow_indr_dev_setup_offload(dev, TC_SETUP_BLOCK, basechain, &bo, 315 + err = flow_indr_dev_setup_offload(dev, NULL, TC_SETUP_BLOCK, basechain, &bo, 316 316 nft_indr_block_cleanup); 317 317 if (err < 0) 318 318 return err;
+9 -7
net/sched/cls_api.c
··· 622 622 struct flow_block_offload *bo); 623 623 624 624 static void tcf_block_offload_init(struct flow_block_offload *bo, 625 - struct net_device *dev, 625 + struct net_device *dev, struct Qdisc *sch, 626 626 enum flow_block_command command, 627 627 enum flow_block_binder_type binder_type, 628 628 struct flow_block *flow_block, ··· 634 634 bo->block = flow_block; 635 635 bo->block_shared = shared; 636 636 bo->extack = extack; 637 + bo->sch = sch; 637 638 INIT_LIST_HEAD(&bo->cb_list); 638 639 } 639 640 ··· 645 644 { 646 645 struct tcf_block *block = block_cb->indr.data; 647 646 struct net_device *dev = block_cb->indr.dev; 647 + struct Qdisc *sch = block_cb->indr.sch; 648 648 struct netlink_ext_ack extack = {}; 649 649 struct flow_block_offload bo; 650 650 651 - tcf_block_offload_init(&bo, dev, FLOW_BLOCK_UNBIND, 651 + tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND, 652 652 block_cb->indr.binder_type, 653 653 &block->flow_block, tcf_block_shared(block), 654 654 &extack); ··· 668 666 } 669 667 670 668 static int tcf_block_offload_cmd(struct tcf_block *block, 671 - struct net_device *dev, 669 + struct net_device *dev, struct Qdisc *sch, 672 670 struct tcf_block_ext_info *ei, 673 671 enum flow_block_command command, 674 672 struct netlink_ext_ack *extack) 675 673 { 676 674 struct flow_block_offload bo = {}; 677 675 678 - tcf_block_offload_init(&bo, dev, command, ei->binder_type, 676 + tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type, 679 677 &block->flow_block, tcf_block_shared(block), 680 678 extack); 681 679 ··· 692 690 return tcf_block_setup(block, &bo); 693 691 } 694 692 695 - flow_indr_dev_setup_offload(dev, TC_SETUP_BLOCK, block, &bo, 693 + flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo, 696 694 tc_block_indr_cleanup); 697 695 tcf_block_setup(block, &bo); 698 696 ··· 719 717 goto err_unlock; 720 718 } 721 719 722 - err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_BIND, extack); 720 + err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack); 723 721 if (err == -EOPNOTSUPP) 724 722 goto no_offload_dev_inc; 725 723 if (err) ··· 746 744 int err; 747 745 748 746 down_write(&block->cb_lock); 749 - err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL); 747 + err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL); 750 748 if (err == -EOPNOTSUPP) 751 749 goto no_offload_dev_dec; 752 750 up_write(&block->cb_lock);
+103 -3
tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
··· 121 121 h2_create() 122 122 { 123 123 host_create $h2 2 124 + tc qdisc add dev $h2 clsact 124 125 125 126 # Some of the tests in this suite use multicast traffic. As this traffic 126 127 # enters BR2_10 resp. BR2_11, it is flooded to all other ports. Thus ··· 142 141 h2_destroy() 143 142 { 144 143 ethtool -s $h2 autoneg on 144 + tc qdisc del dev $h2 clsact 145 145 host_destroy $h2 146 146 } 147 147 ··· 338 336 qdisc_stats_get $swp3 $(get_qdisc_handle $vlan) .packets 339 337 } 340 338 339 + send_packets() 340 + { 341 + local vlan=$1; shift 342 + local proto=$1; shift 343 + local pkts=$1; shift 344 + 345 + $MZ $h2.$vlan -p 8000 -a own -b $h3_mac \ 346 + -A $(ipaddr 2 $vlan) -B $(ipaddr 3 $vlan) \ 347 + -t $proto -q -c $pkts "$@" 348 + } 349 + 341 350 # This sends traffic in an attempt to build a backlog of $size. Returns 0 on 342 351 # success. After 10 failed attempts it bails out and returns 1. It dumps the 343 352 # backlog size to stdout. ··· 377 364 return 1 378 365 fi 379 366 380 - $MZ $h2.$vlan -p 8000 -a own -b $h3_mac \ 381 - -A $(ipaddr 2 $vlan) -B $(ipaddr 3 $vlan) \ 382 - -t $proto -q -c $pkts "$@" 367 + send_packets $vlan $proto $pkts "$@" 383 368 done 384 369 } 385 370 ··· 541 530 stop_traffic 542 531 543 532 log_test "TC $((vlan - 10)): Qdisc reports MC backlog" 533 + } 534 + 535 + do_drop_test() 536 + { 537 + local vlan=$1; shift 538 + local limit=$1; shift 539 + local trigger=$1; shift 540 + local subtest=$1; shift 541 + local fetch_counter=$1; shift 542 + local backlog 543 + local base 544 + local now 545 + local pct 546 + 547 + RET=0 548 + 549 + start_traffic $h1.$vlan $(ipaddr 1 $vlan) $(ipaddr 3 $vlan) $h3_mac 550 + 551 + # Create a bit of a backlog and observe no mirroring due to drops. 552 + qevent_rule_install_$subtest 553 + base=$($fetch_counter) 554 + 555 + build_backlog $vlan $((2 * limit / 3)) udp >/dev/null 556 + 557 + busywait 1100 until_counter_is ">= $((base + 1))" $fetch_counter >/dev/null 558 + check_fail $? "Spurious packets observed without buffer pressure" 559 + 560 + qevent_rule_uninstall_$subtest 561 + 562 + # Push to the queue until it's at the limit. The configured limit is 563 + # rounded by the qdisc and then by the driver, so this is the best we 564 + # can do to get to the real limit of the system. Do this with the rules 565 + # uninstalled so that the inevitable drops don't get counted. 566 + build_backlog $vlan $((3 * limit / 2)) udp >/dev/null 567 + 568 + qevent_rule_install_$subtest 569 + base=$($fetch_counter) 570 + 571 + send_packets $vlan udp 11 572 + 573 + now=$(busywait 1100 until_counter_is ">= $((base + 10))" $fetch_counter) 574 + check_err $? "Dropped packets not observed: 11 expected, $((now - base)) seen" 575 + 576 + # When no extra traffic is injected, there should be no mirroring. 577 + busywait 1100 until_counter_is ">= $((base + 20))" $fetch_counter >/dev/null 578 + check_fail $? "Spurious packets observed" 579 + 580 + # When the rule is uninstalled, there should be no mirroring. 581 + qevent_rule_uninstall_$subtest 582 + send_packets $vlan udp 11 583 + busywait 1100 until_counter_is ">= $((base + 20))" $fetch_counter >/dev/null 584 + check_fail $? "Spurious packets observed after uninstall" 585 + 586 + log_test "TC $((vlan - 10)): ${trigger}ped packets $subtest'd" 587 + 588 + stop_traffic 589 + sleep 1 590 + } 591 + 592 + qevent_rule_install_mirror() 593 + { 594 + tc filter add block 10 pref 1234 handle 102 matchall skip_sw \ 595 + action mirred egress mirror dev $swp2 hw_stats disabled 596 + } 597 + 598 + qevent_rule_uninstall_mirror() 599 + { 600 + tc filter del block 10 pref 1234 handle 102 matchall 601 + } 602 + 603 + qevent_counter_fetch_mirror() 604 + { 605 + tc_rule_handle_stats_get "dev $h2 ingress" 101 606 + } 607 + 608 + do_drop_mirror_test() 609 + { 610 + local vlan=$1; shift 611 + local limit=$1; shift 612 + local qevent_name=$1; shift 613 + 614 + tc filter add dev $h2 ingress pref 1 handle 101 prot ip \ 615 + flower skip_sw ip_proto udp \ 616 + action drop 617 + 618 + do_drop_test "$vlan" "$limit" "$qevent_name" mirror \ 619 + qevent_counter_fetch_mirror 620 + 621 + tc filter del dev $h2 ingress pref 1 handle 101 flower 544 622 }
+11
tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh
··· 7 7 ecn_nodrop_test 8 8 red_test 9 9 mc_backlog_test 10 + red_mirror_test 10 11 " 11 12 : ${QDISC:=ets} 12 13 source sch_red_core.sh ··· 80 79 # configuration, but are arbitrary. 81 80 do_mc_backlog_test 10 $BACKLOG1 82 81 do_mc_backlog_test 11 $BACKLOG2 82 + 83 + uninstall_qdisc 84 + } 85 + 86 + red_mirror_test() 87 + { 88 + install_qdisc qevent early_drop block 10 89 + 90 + do_drop_mirror_test 10 $BACKLOG1 early_drop 91 + do_drop_mirror_test 11 $BACKLOG2 early_drop 83 92 84 93 uninstall_qdisc 85 94 }
+8
tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh
··· 7 7 ecn_nodrop_test 8 8 red_test 9 9 mc_backlog_test 10 + red_mirror_test 10 11 " 11 12 source sch_red_core.sh 12 13 ··· 55 54 # Note that the backlog value here does not correspond to RED 56 55 # configuration, but is arbitrary. 57 56 do_mc_backlog_test 10 $BACKLOG 57 + uninstall_qdisc 58 + } 59 + 60 + red_mirror_test() 61 + { 62 + install_qdisc qevent early_drop block 10 63 + do_drop_mirror_test 10 $BACKLOG 58 64 uninstall_qdisc 59 65 } 60 66