Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'mlxsw-Mirror-to-CPU-preparations'

Ido Schimmel says:

====================
mlxsw: Mirror to CPU preparations

A future patch set will add the ability to trap packets that were
dropped due to buffer related reasons (e.g., early drop). Internally
this is implemented by mirroring these packets towards the CPU port.
This patch set adds the required infrastructure to enable such
mirroring.

Patches #1-#2 extend two registers needed for above mentioned
functionality.

Patches #3-#6 gradually add support for setting the mirroring target of
a SPAN (mirroring) agent as the CPU port. This is only supported from
Spectrum-2 onwards, so an error is returned for Spectrum-1.

Patches #7-#8 add the ability to set a policer on a SPAN agent. This is
required because unlike regularly trapped packets, a policer cannot be
set on the trap group with which the mirroring trap is associated.

Patches #9-#12 parse the mirror reason field from the Completion Queue
Element (CQE). Unlike other trapped packets, the trap identifier of
mirrored packets only indicates that the packet was mirrored, but not
why. The reason (e.g., tail drop) is encoded in the mirror reason field.

Patch #13 utilizes the mirror reason field in order to lookup the
matching Rx listener. This allows us to maintain the abstraction that an
Rx listener is mapped to a single trap reason. Without taking the mirror
reason into account we would need to register a single Rx listener for
all mirrored packets.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+318 -31
+4 -2
drivers/net/ethernet/mellanox/mlxsw/core.c
··· 1524 1524 { 1525 1525 return (rxl_a->func == rxl_b->func && 1526 1526 rxl_a->local_port == rxl_b->local_port && 1527 - rxl_a->trap_id == rxl_b->trap_id); 1527 + rxl_a->trap_id == rxl_b->trap_id && 1528 + rxl_a->mirror_reason == rxl_b->mirror_reason); 1528 1529 } 1529 1530 1530 1531 static struct mlxsw_rx_listener_item * ··· 2045 2044 rxl = &rxl_item->rxl; 2046 2045 if ((rxl->local_port == MLXSW_PORT_DONT_CARE || 2047 2046 rxl->local_port == local_port) && 2048 - rxl->trap_id == rx_info->trap_id) { 2047 + rxl->trap_id == rx_info->trap_id && 2048 + rxl->mirror_reason == rx_info->mirror_reason) { 2049 2049 if (rxl_item->enabled) 2050 2050 found = true; 2051 2051 break;
+2
drivers/net/ethernet/mellanox/mlxsw/core.h
··· 61 61 struct mlxsw_rx_listener { 62 62 void (*func)(struct sk_buff *skb, u8 local_port, void *priv); 63 63 u8 local_port; 64 + u8 mirror_reason; 64 65 u16 trap_id; 65 66 }; 66 67 ··· 177 176 u16 lag_id; 178 177 } u; 179 178 u8 lag_port_index; 179 + u8 mirror_reason; 180 180 int trap_id; 181 181 }; 182 182
+5 -1
drivers/net/ethernet/mellanox/mlxsw/pci.c
··· 547 547 { 548 548 struct pci_dev *pdev = mlxsw_pci->pdev; 549 549 struct mlxsw_pci_queue_elem_info *elem_info; 550 + struct mlxsw_rx_info rx_info = {}; 550 551 char *wqe; 551 552 struct sk_buff *skb; 552 - struct mlxsw_rx_info rx_info; 553 553 u16 byte_count; 554 554 int err; 555 555 ··· 582 582 if (mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) 583 583 cookie_index = mlxsw_pci_cqe2_user_def_val_orig_pkt_len_get(cqe); 584 584 mlxsw_skb_cb(skb)->cookie_index = cookie_index; 585 + } else if (rx_info.trap_id >= MLXSW_TRAP_ID_MIRROR_SESSION0 && 586 + rx_info.trap_id <= MLXSW_TRAP_ID_MIRROR_SESSION7 && 587 + mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) { 588 + rx_info.mirror_reason = mlxsw_pci_cqe2_mirror_reason_get(cqe); 585 589 } 586 590 587 591 byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
+6 -1
drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
··· 176 176 /* pci_cqe_trap_id 177 177 * Trap ID that captured the packet. 178 178 */ 179 - MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 9); 179 + MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 10); 180 180 181 181 /* pci_cqe_crc 182 182 * Length include CRC. Indicates the length field includes ··· 212 212 * When trap_id is an ACL: User defined value from policy engine action. 213 213 */ 214 214 MLXSW_ITEM32(pci, cqe2, user_def_val_orig_pkt_len, 0x14, 0, 20); 215 + 216 + /* pci_cqe_mirror_reason 217 + * Mirror reason. 218 + */ 219 + MLXSW_ITEM32(pci, cqe2, mirror_reason, 0x18, 24, 8); 215 220 216 221 /* pci_cqe_owner 217 222 * Ownership bit.
+28 -1
drivers/net/ethernet/mellanox/mlxsw/reg.h
··· 5778 5778 * Note: A trap ID can only be associated with a single trap group. The device 5779 5779 * will associate the trap ID with the last trap group configured. 5780 5780 */ 5781 - MLXSW_ITEM32(reg, hpkt, trap_id, 0x00, 0, 9); 5781 + MLXSW_ITEM32(reg, hpkt, trap_id, 0x00, 0, 10); 5782 5782 5783 5783 enum { 5784 5784 MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT, ··· 8662 8662 */ 8663 8663 MLXSW_ITEM32(reg, mpat, pa_id, 0x00, 28, 4); 8664 8664 8665 + /* reg_mpat_session_id 8666 + * Mirror Session ID. 8667 + * Used for MIRROR_SESSION<i> trap. 8668 + * Access: RW 8669 + */ 8670 + MLXSW_ITEM32(reg, mpat, session_id, 0x00, 24, 4); 8671 + 8665 8672 /* reg_mpat_system_port 8666 8673 * A unique port identifier for the final destination of the packet. 8667 8674 * Access: RW ··· 8725 8718 * Access: RW 8726 8719 */ 8727 8720 MLXSW_ITEM32(reg, mpat, span_type, 0x04, 0, 4); 8721 + 8722 + /* reg_mpat_pide 8723 + * Policer enable. 8724 + * Access: RW 8725 + */ 8726 + MLXSW_ITEM32(reg, mpat, pide, 0x0C, 15, 1); 8727 + 8728 + /* reg_mpat_pid 8729 + * Policer ID. 8730 + * Access: RW 8731 + */ 8732 + MLXSW_ITEM32(reg, mpat, pid, 0x0C, 0, 14); 8728 8733 8729 8734 /* Remote SPAN - Ethernet VLAN 8730 8735 * - - - - - - - - - - - - - - ··· 9520 9501 * Access: RW 9521 9502 */ 9522 9503 MLXSW_ITEM32(reg, mogcr, ptp_eftc, 0x00, 0, 1); 9504 + 9505 + /* reg_mogcr_mirroring_pid_base 9506 + * Base policer id for mirroring policers. 9507 + * Must have an even value (e.g. 1000, not 1001). 9508 + * Reserved when SwitchX/-2, Switch-IB/2, Spectrum-1 and Quantum. 9509 + * Access: RW 9510 + */ 9511 + MLXSW_ITEM32(reg, mogcr, mirroring_pid_base, 0x0C, 0, 14); 9523 9512 9524 9513 /* MPAGR - Monitoring Port Analyzer Global Register 9525 9514 * ------------------------------------------------
+3 -1
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
··· 136 136 const struct net_device *out_dev, 137 137 bool ingress, int *p_span_id) 138 138 { 139 + struct mlxsw_sp_span_agent_parms agent_parms = {}; 139 140 struct mlxsw_sp_port *mlxsw_sp_port; 140 141 struct mlxsw_sp *mlxsw_sp = priv; 141 142 int err; 142 143 143 - err = mlxsw_sp_span_agent_get(mlxsw_sp, out_dev, p_span_id); 144 + agent_parms.to_dev = out_dev; 145 + err = mlxsw_sp_span_agent_get(mlxsw_sp, p_span_id, &agent_parms); 144 146 if (err) 145 147 return err; 146 148
+4 -2
drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c
··· 27 27 struct mlxsw_sp_mall_entry *mall_entry) 28 28 { 29 29 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 30 + struct mlxsw_sp_span_agent_parms agent_parms = {}; 30 31 struct mlxsw_sp_span_trigger_parms parms; 31 32 enum mlxsw_sp_span_trigger trigger; 32 33 int err; ··· 37 36 return -EINVAL; 38 37 } 39 38 40 - err = mlxsw_sp_span_agent_get(mlxsw_sp, mall_entry->mirror.to_dev, 41 - &mall_entry->mirror.span_id); 39 + agent_parms.to_dev = mall_entry->mirror.to_dev; 40 + err = mlxsw_sp_span_agent_get(mlxsw_sp, &mall_entry->mirror.span_id, 41 + &agent_parms); 42 42 if (err) 43 43 return err; 44 44
+4 -1
drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
··· 1295 1295 { 1296 1296 struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port; 1297 1297 struct mlxsw_sp_span_trigger_parms trigger_parms = {}; 1298 + struct mlxsw_sp_span_agent_parms agent_parms = { 1299 + .to_dev = mall_entry->mirror.to_dev, 1300 + }; 1298 1301 int span_id; 1299 1302 int err; 1300 1303 1301 - err = mlxsw_sp_span_agent_get(mlxsw_sp, mall_entry->mirror.to_dev, &span_id); 1304 + err = mlxsw_sp_span_agent_get(mlxsw_sp, &span_id, &agent_parms); 1302 1305 if (err) 1303 1306 return err; 1304 1307
+239 -18
drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
··· 22 22 struct work_struct work; 23 23 struct mlxsw_sp *mlxsw_sp; 24 24 const struct mlxsw_sp_span_trigger_ops **span_trigger_ops_arr; 25 + const struct mlxsw_sp_span_entry_ops **span_entry_ops_arr; 26 + size_t span_entry_ops_arr_size; 25 27 struct list_head analyzed_ports_list; 26 28 struct mutex analyzed_ports_lock; /* Protects analyzed_ports_list */ 27 29 struct list_head trigger_entries_list; 30 + u16 policer_id_base; 31 + refcount_t policer_id_base_ref_count; 28 32 atomic_t active_entries_count; 29 33 int entries_count; 30 34 struct mlxsw_sp_span_entry entries[]; ··· 90 86 span = kzalloc(struct_size(span, entries, entries_count), GFP_KERNEL); 91 87 if (!span) 92 88 return -ENOMEM; 89 + refcount_set(&span->policer_id_base_ref_count, 0); 93 90 span->entries_count = entries_count; 94 91 atomic_set(&span->active_entries_count, 0); 95 92 mutex_init(&span->analyzed_ports_lock); ··· 131 126 kfree(mlxsw_sp->span); 132 127 } 133 128 129 + static bool mlxsw_sp1_span_cpu_can_handle(const struct net_device *dev) 130 + { 131 + return !dev; 132 + } 133 + 134 + static int mlxsw_sp1_span_entry_cpu_parms(struct mlxsw_sp *mlxsw_sp, 135 + const struct net_device *to_dev, 136 + struct mlxsw_sp_span_parms *sparmsp) 137 + { 138 + return -EOPNOTSUPP; 139 + } 140 + 134 141 static int 135 - mlxsw_sp_span_entry_phys_parms(const struct net_device *to_dev, 142 + mlxsw_sp1_span_entry_cpu_configure(struct mlxsw_sp_span_entry *span_entry, 143 + struct mlxsw_sp_span_parms sparms) 144 + { 145 + return -EOPNOTSUPP; 146 + } 147 + 148 + static void 149 + mlxsw_sp1_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry) 150 + { 151 + } 152 + 153 + static const 154 + struct mlxsw_sp_span_entry_ops mlxsw_sp1_span_entry_ops_cpu = { 155 + .can_handle = mlxsw_sp1_span_cpu_can_handle, 156 + .parms_set = mlxsw_sp1_span_entry_cpu_parms, 157 + .configure = mlxsw_sp1_span_entry_cpu_configure, 158 + .deconfigure = mlxsw_sp1_span_entry_cpu_deconfigure, 159 + }; 160 + 161 + static int 162 + mlxsw_sp_span_entry_phys_parms(struct mlxsw_sp *mlxsw_sp, 163 + const struct net_device *to_dev, 136 164 struct mlxsw_sp_span_parms *sparmsp) 137 165 { 138 166 sparmsp->dest_port = netdev_priv(to_dev); ··· 185 147 /* Create a new port analayzer entry for local_port. */ 186 148 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 187 149 MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH); 150 + mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); 151 + mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); 188 152 189 153 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 190 154 } ··· 443 403 } 444 404 445 405 static int 446 - mlxsw_sp_span_entry_gretap4_parms(const struct net_device *to_dev, 406 + mlxsw_sp_span_entry_gretap4_parms(struct mlxsw_sp *mlxsw_sp, 407 + const struct net_device *to_dev, 447 408 struct mlxsw_sp_span_parms *sparmsp) 448 409 { 449 410 struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev); ··· 483 442 /* Create a new port analayzer entry for local_port. */ 484 443 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 485 444 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); 445 + mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); 446 + mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); 486 447 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); 487 448 mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl, 488 449 MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER, ··· 547 504 } 548 505 549 506 static int 550 - mlxsw_sp_span_entry_gretap6_parms(const struct net_device *to_dev, 507 + mlxsw_sp_span_entry_gretap6_parms(struct mlxsw_sp *mlxsw_sp, 508 + const struct net_device *to_dev, 551 509 struct mlxsw_sp_span_parms *sparmsp) 552 510 { 553 511 struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(to_dev); ··· 587 543 /* Create a new port analayzer entry for local_port. */ 588 544 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 589 545 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); 546 + mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); 547 + mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); 590 548 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); 591 549 mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl, 592 550 MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER, ··· 624 578 } 625 579 626 580 static int 627 - mlxsw_sp_span_entry_vlan_parms(const struct net_device *to_dev, 581 + mlxsw_sp_span_entry_vlan_parms(struct mlxsw_sp *mlxsw_sp, 582 + const struct net_device *to_dev, 628 583 struct mlxsw_sp_span_parms *sparmsp) 629 584 { 630 585 struct net_device *real_dev; ··· 652 605 653 606 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 654 607 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH); 608 + mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); 609 + mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); 655 610 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); 656 611 657 612 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); ··· 675 626 }; 676 627 677 628 static const 678 - struct mlxsw_sp_span_entry_ops *const mlxsw_sp_span_entry_types[] = { 629 + struct mlxsw_sp_span_entry_ops *mlxsw_sp1_span_entry_ops_arr[] = { 630 + &mlxsw_sp1_span_entry_ops_cpu, 631 + &mlxsw_sp_span_entry_ops_phys, 632 + #if IS_ENABLED(CONFIG_NET_IPGRE) 633 + &mlxsw_sp_span_entry_ops_gretap4, 634 + #endif 635 + #if IS_ENABLED(CONFIG_IPV6_GRE) 636 + &mlxsw_sp_span_entry_ops_gretap6, 637 + #endif 638 + &mlxsw_sp_span_entry_ops_vlan, 639 + }; 640 + 641 + static bool mlxsw_sp2_span_cpu_can_handle(const struct net_device *dev) 642 + { 643 + return !dev; 644 + } 645 + 646 + static int mlxsw_sp2_span_entry_cpu_parms(struct mlxsw_sp *mlxsw_sp, 647 + const struct net_device *to_dev, 648 + struct mlxsw_sp_span_parms *sparmsp) 649 + { 650 + sparmsp->dest_port = mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; 651 + return 0; 652 + } 653 + 654 + static int 655 + mlxsw_sp2_span_entry_cpu_configure(struct mlxsw_sp_span_entry *span_entry, 656 + struct mlxsw_sp_span_parms sparms) 657 + { 658 + /* Mirroring to the CPU port is like mirroring to any other physical 659 + * port. Its local port is used instead of that of the physical port. 660 + */ 661 + return mlxsw_sp_span_entry_phys_configure(span_entry, sparms); 662 + } 663 + 664 + static void 665 + mlxsw_sp2_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry) 666 + { 667 + enum mlxsw_reg_mpat_span_type span_type; 668 + 669 + span_type = MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH; 670 + mlxsw_sp_span_entry_deconfigure_common(span_entry, span_type); 671 + } 672 + 673 + static const 674 + struct mlxsw_sp_span_entry_ops mlxsw_sp2_span_entry_ops_cpu = { 675 + .can_handle = mlxsw_sp2_span_cpu_can_handle, 676 + .parms_set = mlxsw_sp2_span_entry_cpu_parms, 677 + .configure = mlxsw_sp2_span_entry_cpu_configure, 678 + .deconfigure = mlxsw_sp2_span_entry_cpu_deconfigure, 679 + }; 680 + 681 + static const 682 + struct mlxsw_sp_span_entry_ops *mlxsw_sp2_span_entry_ops_arr[] = { 683 + &mlxsw_sp2_span_entry_ops_cpu, 679 684 &mlxsw_sp_span_entry_ops_phys, 680 685 #if IS_ENABLED(CONFIG_NET_IPGRE) 681 686 &mlxsw_sp_span_entry_ops_gretap4, ··· 741 638 }; 742 639 743 640 static int 744 - mlxsw_sp_span_entry_nop_parms(const struct net_device *to_dev, 641 + mlxsw_sp_span_entry_nop_parms(struct mlxsw_sp *mlxsw_sp, 642 + const struct net_device *to_dev, 745 643 struct mlxsw_sp_span_parms *sparmsp) 746 644 { 747 645 return mlxsw_sp_span_entry_unoffloadable(sparmsp); ··· 777 673 goto set_parms; 778 674 779 675 if (sparms.dest_port->mlxsw_sp != mlxsw_sp) { 780 - netdev_err(span_entry->to_dev, "Cannot mirror to %s, which belongs to a different mlxsw instance", 781 - sparms.dest_port->dev->name); 676 + dev_err(mlxsw_sp->bus_info->dev, 677 + "Cannot mirror to a port which belongs to a different mlxsw instance\n"); 782 678 sparms.dest_port = NULL; 783 679 goto set_parms; 784 680 } 785 681 786 682 err = span_entry->ops->configure(span_entry, sparms); 787 683 if (err) { 788 - netdev_err(span_entry->to_dev, "Failed to offload mirror to %s", 789 - sparms.dest_port->dev->name); 684 + dev_err(mlxsw_sp->bus_info->dev, "Failed to offload mirror\n"); 790 685 sparms.dest_port = NULL; 791 686 goto set_parms; 792 687 } ··· 799 696 { 800 697 if (span_entry->parms.dest_port) 801 698 span_entry->ops->deconfigure(span_entry); 699 + } 700 + 701 + static int mlxsw_sp_span_policer_id_base_set(struct mlxsw_sp_span *span, 702 + u16 policer_id) 703 + { 704 + struct mlxsw_sp *mlxsw_sp = span->mlxsw_sp; 705 + u16 policer_id_base; 706 + int err; 707 + 708 + /* Policers set on SPAN agents must be in the range of 709 + * `policer_id_base .. policer_id_base + max_span_agents - 1`. If the 710 + * base is set and the new policer is not within the range, then we 711 + * must error out. 712 + */ 713 + if (refcount_read(&span->policer_id_base_ref_count)) { 714 + if (policer_id < span->policer_id_base || 715 + policer_id >= span->policer_id_base + span->entries_count) 716 + return -EINVAL; 717 + 718 + refcount_inc(&span->policer_id_base_ref_count); 719 + return 0; 720 + } 721 + 722 + /* Base must be even. */ 723 + policer_id_base = policer_id % 2 == 0 ? policer_id : policer_id - 1; 724 + err = mlxsw_sp->span_ops->policer_id_base_set(mlxsw_sp, 725 + policer_id_base); 726 + if (err) 727 + return err; 728 + 729 + span->policer_id_base = policer_id_base; 730 + refcount_set(&span->policer_id_base_ref_count, 1); 731 + 732 + return 0; 733 + } 734 + 735 + static void mlxsw_sp_span_policer_id_base_unset(struct mlxsw_sp_span *span) 736 + { 737 + refcount_dec(&span->policer_id_base_ref_count); 802 738 } 803 739 804 740 static struct mlxsw_sp_span_entry * ··· 859 717 if (!span_entry) 860 718 return NULL; 861 719 720 + if (sparms.policer_enable) { 721 + int err; 722 + 723 + err = mlxsw_sp_span_policer_id_base_set(mlxsw_sp->span, 724 + sparms.policer_id); 725 + if (err) 726 + return NULL; 727 + } 728 + 862 729 atomic_inc(&mlxsw_sp->span->active_entries_count); 863 730 span_entry->ops = ops; 864 731 refcount_set(&span_entry->ref_count, 1); ··· 882 731 { 883 732 mlxsw_sp_span_entry_deconfigure(span_entry); 884 733 atomic_dec(&mlxsw_sp->span->active_entries_count); 734 + if (span_entry->parms.policer_enable) 735 + mlxsw_sp_span_policer_id_base_unset(mlxsw_sp->span); 885 736 } 886 737 887 738 struct mlxsw_sp_span_entry * ··· 923 770 } 924 771 925 772 static struct mlxsw_sp_span_entry * 773 + mlxsw_sp_span_entry_find_by_parms(struct mlxsw_sp *mlxsw_sp, 774 + const struct net_device *to_dev, 775 + const struct mlxsw_sp_span_parms *sparms) 776 + { 777 + int i; 778 + 779 + for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 780 + struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i]; 781 + 782 + if (refcount_read(&curr->ref_count) && curr->to_dev == to_dev && 783 + curr->parms.policer_enable == sparms->policer_enable && 784 + curr->parms.policer_id == sparms->policer_id) 785 + return curr; 786 + } 787 + return NULL; 788 + } 789 + 790 + static struct mlxsw_sp_span_entry * 926 791 mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp, 927 792 const struct net_device *to_dev, 928 793 const struct mlxsw_sp_span_entry_ops *ops, ··· 948 777 { 949 778 struct mlxsw_sp_span_entry *span_entry; 950 779 951 - span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, to_dev); 780 + span_entry = mlxsw_sp_span_entry_find_by_parms(mlxsw_sp, to_dev, 781 + &sparms); 952 782 if (span_entry) { 953 783 /* Already exists, just take a reference */ 954 784 refcount_inc(&span_entry->ref_count); ··· 1066 894 mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp, 1067 895 const struct net_device *to_dev) 1068 896 { 897 + struct mlxsw_sp_span *span = mlxsw_sp->span; 1069 898 size_t i; 1070 899 1071 - for (i = 0; i < ARRAY_SIZE(mlxsw_sp_span_entry_types); ++i) 1072 - if (mlxsw_sp_span_entry_types[i]->can_handle(to_dev)) 1073 - return mlxsw_sp_span_entry_types[i]; 900 + for (i = 0; i < span->span_entry_ops_arr_size; ++i) 901 + if (span->span_entry_ops_arr[i]->can_handle(to_dev)) 902 + return span->span_entry_ops_arr[i]; 1074 903 1075 904 return NULL; 1076 905 } ··· 1093 920 if (!refcount_read(&curr->ref_count)) 1094 921 continue; 1095 922 1096 - err = curr->ops->parms_set(curr->to_dev, &sparms); 923 + err = curr->ops->parms_set(mlxsw_sp, curr->to_dev, &sparms); 1097 924 if (err) 1098 925 continue; 1099 926 ··· 1112 939 mlxsw_core_schedule_work(&mlxsw_sp->span->work); 1113 940 } 1114 941 1115 - int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp, 1116 - const struct net_device *to_dev, int *p_span_id) 942 + int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp, int *p_span_id, 943 + const struct mlxsw_sp_span_agent_parms *parms) 1117 944 { 945 + const struct net_device *to_dev = parms->to_dev; 1118 946 const struct mlxsw_sp_span_entry_ops *ops; 1119 947 struct mlxsw_sp_span_entry *span_entry; 1120 948 struct mlxsw_sp_span_parms sparms; ··· 1130 956 } 1131 957 1132 958 memset(&sparms, 0, sizeof(sparms)); 1133 - err = ops->parms_set(to_dev, &sparms); 959 + err = ops->parms_set(mlxsw_sp, to_dev, &sparms); 1134 960 if (err) 1135 961 return err; 1136 962 963 + sparms.policer_id = parms->policer_id; 964 + sparms.policer_enable = parms->policer_enable; 1137 965 span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms); 1138 966 if (!span_entry) 1139 967 return -ENOBUFS; ··· 1695 1519 1696 1520 static int mlxsw_sp1_span_init(struct mlxsw_sp *mlxsw_sp) 1697 1521 { 1522 + size_t arr_size = ARRAY_SIZE(mlxsw_sp1_span_entry_ops_arr); 1523 + 1524 + /* Must be first to avoid NULL pointer dereference by subsequent 1525 + * can_handle() callbacks. 1526 + */ 1527 + if (WARN_ON(mlxsw_sp1_span_entry_ops_arr[0] != 1528 + &mlxsw_sp1_span_entry_ops_cpu)) 1529 + return -EINVAL; 1530 + 1698 1531 mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp1_span_trigger_ops_arr; 1532 + mlxsw_sp->span->span_entry_ops_arr = mlxsw_sp1_span_entry_ops_arr; 1533 + mlxsw_sp->span->span_entry_ops_arr_size = arr_size; 1699 1534 1700 1535 return 0; 1701 1536 } ··· 1716 1529 return mtu * 5 / 2; 1717 1530 } 1718 1531 1532 + static int mlxsw_sp1_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp, 1533 + u16 policer_id_base) 1534 + { 1535 + return -EOPNOTSUPP; 1536 + } 1537 + 1719 1538 const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = { 1720 1539 .init = mlxsw_sp1_span_init, 1721 1540 .buffsize_get = mlxsw_sp1_span_buffsize_get, 1541 + .policer_id_base_set = mlxsw_sp1_span_policer_id_base_set, 1722 1542 }; 1723 1543 1724 1544 static int mlxsw_sp2_span_init(struct mlxsw_sp *mlxsw_sp) 1725 1545 { 1546 + size_t arr_size = ARRAY_SIZE(mlxsw_sp2_span_entry_ops_arr); 1547 + 1548 + /* Must be first to avoid NULL pointer dereference by subsequent 1549 + * can_handle() callbacks. 1550 + */ 1551 + if (WARN_ON(mlxsw_sp2_span_entry_ops_arr[0] != 1552 + &mlxsw_sp2_span_entry_ops_cpu)) 1553 + return -EINVAL; 1554 + 1726 1555 mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp2_span_trigger_ops_arr; 1556 + mlxsw_sp->span->span_entry_ops_arr = mlxsw_sp2_span_entry_ops_arr; 1557 + mlxsw_sp->span->span_entry_ops_arr_size = arr_size; 1727 1558 1728 1559 return 0; 1729 1560 } ··· 1761 1556 return __mlxsw_sp_span_buffsize_get(mtu, speed, factor); 1762 1557 } 1763 1558 1559 + static int mlxsw_sp2_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp, 1560 + u16 policer_id_base) 1561 + { 1562 + char mogcr_pl[MLXSW_REG_MOGCR_LEN]; 1563 + int err; 1564 + 1565 + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl); 1566 + if (err) 1567 + return err; 1568 + 1569 + mlxsw_reg_mogcr_mirroring_pid_base_set(mogcr_pl, policer_id_base); 1570 + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl); 1571 + } 1572 + 1764 1573 const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = { 1765 1574 .init = mlxsw_sp2_span_init, 1766 1575 .buffsize_get = mlxsw_sp2_span_buffsize_get, 1576 + .policer_id_base_set = mlxsw_sp2_span_policer_id_base_set, 1767 1577 }; 1768 1578 1769 1579 static u32 mlxsw_sp3_span_buffsize_get(int mtu, u32 speed) ··· 1791 1571 const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops = { 1792 1572 .init = mlxsw_sp2_span_init, 1793 1573 .buffsize_get = mlxsw_sp3_span_buffsize_get, 1574 + .policer_id_base_set = mlxsw_sp2_span_policer_id_base_set, 1794 1575 };
+14 -3
drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
··· 21 21 union mlxsw_sp_l3addr daddr; 22 22 union mlxsw_sp_l3addr saddr; 23 23 u16 vid; 24 + u16 policer_id; 25 + bool policer_enable; 24 26 }; 25 27 26 28 enum mlxsw_sp_span_trigger { ··· 37 35 int span_id; 38 36 }; 39 37 38 + struct mlxsw_sp_span_agent_parms { 39 + const struct net_device *to_dev; 40 + u16 policer_id; 41 + bool policer_enable; 42 + }; 43 + 40 44 struct mlxsw_sp_span_entry_ops; 41 45 42 46 struct mlxsw_sp_span_ops { 43 47 int (*init)(struct mlxsw_sp *mlxsw_sp); 44 48 u32 (*buffsize_get)(int mtu, u32 speed); 49 + int (*policer_id_base_set)(struct mlxsw_sp *mlxsw_sp, 50 + u16 policer_id_base); 45 51 }; 46 52 47 53 struct mlxsw_sp_span_entry { ··· 62 52 63 53 struct mlxsw_sp_span_entry_ops { 64 54 bool (*can_handle)(const struct net_device *to_dev); 65 - int (*parms_set)(const struct net_device *to_dev, 55 + int (*parms_set)(struct mlxsw_sp *mlxsw_sp, 56 + const struct net_device *to_dev, 66 57 struct mlxsw_sp_span_parms *sparmsp); 67 58 int (*configure)(struct mlxsw_sp_span_entry *span_entry, 68 59 struct mlxsw_sp_span_parms sparms); ··· 84 73 int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu); 85 74 void mlxsw_sp_span_speed_update_work(struct work_struct *work); 86 75 87 - int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp, 88 - const struct net_device *to_dev, int *p_span_id); 76 + int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp, int *p_span_id, 77 + const struct mlxsw_sp_span_agent_parms *parms); 89 78 void mlxsw_sp_span_agent_put(struct mlxsw_sp *mlxsw_sp, int span_id); 90 79 int mlxsw_sp_span_analyzed_port_get(struct mlxsw_sp_port *mlxsw_sp_port, 91 80 bool ingress);
+9 -1
drivers/net/ethernet/mellanox/mlxsw/trap.h
··· 107 107 MLXSW_TRAP_ID_ACL2 = 0x1C2, 108 108 MLXSW_TRAP_ID_DISCARD_INGRESS_ACL = 0x1C3, 109 109 MLXSW_TRAP_ID_DISCARD_EGRESS_ACL = 0x1C4, 110 + MLXSW_TRAP_ID_MIRROR_SESSION0 = 0x220, 111 + MLXSW_TRAP_ID_MIRROR_SESSION1 = 0x221, 112 + MLXSW_TRAP_ID_MIRROR_SESSION2 = 0x222, 113 + MLXSW_TRAP_ID_MIRROR_SESSION3 = 0x223, 114 + MLXSW_TRAP_ID_MIRROR_SESSION4 = 0x224, 115 + MLXSW_TRAP_ID_MIRROR_SESSION5 = 0x225, 116 + MLXSW_TRAP_ID_MIRROR_SESSION6 = 0x226, 117 + MLXSW_TRAP_ID_MIRROR_SESSION7 = 0x227, 110 118 111 - MLXSW_TRAP_ID_MAX = 0x1FF 119 + MLXSW_TRAP_ID_MAX = 0x3FF, 112 120 }; 113 121 114 122 enum mlxsw_event_trap_id {