Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'mlxsw-Introduce-TC-Flower-offload-using-TCAM'

Jiri Pirko says:

====================
mlxsw: Introduce TC Flower offload using TCAM

This patchset introduces support for offloading TC cls_flower and actions
to Spectrum TCAM-base policy engine.

The patchset contains patches to allow work with flexible keys and actions
which are used in Spectrum TCAM.

It also contains in-driver infrastructure for offloading TC rules to TCAM HW.
The TCAM management code is simple and limited for now. It is going to be
extended as a follow-up work.

The last patch uses the previously introduced infra to allow to implement
cls_flower offloading. Initially, only limited set of match-keys and only
a drop and forward actions are supported.

As a dependency, this patchset introduces parman - priority array
area manager - as a library.

v1->v2:
- patch11:
- use __set_bit and __test_and_clear_bit as suggested by DaveM
- patch16:
- Added documentation to the API functions as suggested by Tom Herbert
- patch17:
- use __set_bit and __clear_bit as suggested by DaveM
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+5184 -16
+8
MAINTAINERS
··· 9382 9382 F: drivers/video/console/sti* 9383 9383 F: drivers/video/logo/logo_parisc* 9384 9384 9385 + PARMAN 9386 + M: Jiri Pirko <jiri@mellanox.com> 9387 + L: netdev@vger.kernel.org 9388 + S: Supported 9389 + F: lib/parman.c 9390 + F: lib/test_parman.c 9391 + F: include/linux/parman.h 9392 + 9385 9393 PC87360 HARDWARE MONITORING DRIVER 9386 9394 M: Jim Cromie <jim.cromie@gmail.com> 9387 9395 L: linux-hwmon@vger.kernel.org
+1
drivers/net/ethernet/mellanox/mlxsw/Kconfig
··· 73 73 config MLXSW_SPECTRUM 74 74 tristate "Mellanox Technologies Spectrum support" 75 75 depends on MLXSW_CORE && MLXSW_PCI && NET_SWITCHDEV && VLAN_8021Q 76 + select PARMAN 76 77 default m 77 78 ---help--- 78 79 This driver supports Mellanox Technologies Spectrum Ethernet
+4 -2
drivers/net/ethernet/mellanox/mlxsw/Makefile
··· 1 1 obj-$(CONFIG_MLXSW_CORE) += mlxsw_core.o 2 - mlxsw_core-objs := core.o 2 + mlxsw_core-objs := core.o core_acl_flex_keys.o \ 3 + core_acl_flex_actions.o 3 4 mlxsw_core-$(CONFIG_MLXSW_CORE_HWMON) += core_hwmon.o 4 5 mlxsw_core-$(CONFIG_MLXSW_CORE_THERMAL) += core_thermal.o 5 6 obj-$(CONFIG_MLXSW_PCI) += mlxsw_pci.o ··· 14 13 obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o 15 14 mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ 16 15 spectrum_switchdev.o spectrum_router.o \ 17 - spectrum_kvdl.o 16 + spectrum_kvdl.o spectrum_acl_tcam.o \ 17 + spectrum_acl.o spectrum_flower.o 18 18 mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o 19 19 obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o 20 20 mlxsw_minimal-objs := minimal.o
+685
drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
··· 1 + /* 2 + * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 3 + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. 4 + * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> 5 + * 6 + * Redistribution and use in source and binary forms, with or without 7 + * modification, are permitted provided that the following conditions are met: 8 + * 9 + * 1. Redistributions of source code must retain the above copyright 10 + * notice, this list of conditions and the following disclaimer. 11 + * 2. Redistributions in binary form must reproduce the above copyright 12 + * notice, this list of conditions and the following disclaimer in the 13 + * documentation and/or other materials provided with the distribution. 14 + * 3. Neither the names of the copyright holders nor the names of its 15 + * contributors may be used to endorse or promote products derived from 16 + * this software without specific prior written permission. 17 + * 18 + * Alternatively, this software may be distributed under the terms of the 19 + * GNU General Public License ("GPL") version 2 as published by the Free 20 + * Software Foundation. 21 + * 22 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 + * POSSIBILITY OF SUCH DAMAGE. 33 + */ 34 + 35 + #include <linux/kernel.h> 36 + #include <linux/types.h> 37 + #include <linux/slab.h> 38 + #include <linux/errno.h> 39 + #include <linux/rhashtable.h> 40 + #include <linux/list.h> 41 + 42 + #include "item.h" 43 + #include "core_acl_flex_actions.h" 44 + 45 + enum mlxsw_afa_set_type { 46 + MLXSW_AFA_SET_TYPE_NEXT, 47 + MLXSW_AFA_SET_TYPE_GOTO, 48 + }; 49 + 50 + /* afa_set_type 51 + * Type of the record at the end of the action set. 52 + */ 53 + MLXSW_ITEM32(afa, set, type, 0xA0, 28, 4); 54 + 55 + /* afa_set_next_action_set_ptr 56 + * A pointer to the next action set in the KVD Centralized database. 57 + */ 58 + MLXSW_ITEM32(afa, set, next_action_set_ptr, 0xA4, 0, 24); 59 + 60 + /* afa_set_goto_g 61 + * group - When set, the binding is of an ACL group. When cleared, 62 + * the binding is of an ACL. 63 + * Must be set to 1 for Spectrum. 64 + */ 65 + MLXSW_ITEM32(afa, set, goto_g, 0xA4, 29, 1); 66 + 67 + enum mlxsw_afa_set_goto_binding_cmd { 68 + /* continue go the next binding point */ 69 + MLXSW_AFA_SET_GOTO_BINDING_CMD_NONE, 70 + /* jump to the next binding point no return */ 71 + MLXSW_AFA_SET_GOTO_BINDING_CMD_JUMP, 72 + /* terminate the acl binding */ 73 + MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM = 4, 74 + }; 75 + 76 + /* afa_set_goto_binding_cmd */ 77 + MLXSW_ITEM32(afa, set, goto_binding_cmd, 0xA4, 24, 3); 78 + 79 + /* afa_set_goto_next_binding 80 + * ACL/ACL group identifier. If the g bit is set, this field should hold 81 + * the acl_group_id, else it should hold the acl_id. 82 + */ 83 + MLXSW_ITEM32(afa, set, goto_next_binding, 0xA4, 0, 16); 84 + 85 + /* afa_all_action_type 86 + * Action Type. 87 + */ 88 + MLXSW_ITEM32(afa, all, action_type, 0x00, 24, 6); 89 + 90 + struct mlxsw_afa { 91 + unsigned int max_acts_per_set; 92 + const struct mlxsw_afa_ops *ops; 93 + void *ops_priv; 94 + struct rhashtable set_ht; 95 + struct rhashtable fwd_entry_ht; 96 + }; 97 + 98 + #define MLXSW_AFA_SET_LEN 0xA8 99 + 100 + struct mlxsw_afa_set_ht_key { 101 + char enc_actions[MLXSW_AFA_SET_LEN]; /* Encoded set */ 102 + bool is_first; 103 + }; 104 + 105 + /* Set structure holds one action set record. It contains up to three 106 + * actions (depends on size of particular actions). The set is either 107 + * put directly to a rule, or it is stored in KVD linear area. 108 + * To prevent duplicate entries in KVD linear area, a hashtable is 109 + * used to track sets that were previously inserted and may be shared. 110 + */ 111 + 112 + struct mlxsw_afa_set { 113 + struct rhash_head ht_node; 114 + struct mlxsw_afa_set_ht_key ht_key; 115 + u32 kvdl_index; 116 + bool shared; /* Inserted in hashtable (doesn't mean that 117 + * kvdl_index is valid). 118 + */ 119 + unsigned int ref_count; 120 + struct mlxsw_afa_set *next; /* Pointer to the next set. */ 121 + struct mlxsw_afa_set *prev; /* Pointer to the previous set, 122 + * note that set may have multiple 123 + * sets from multiple blocks 124 + * pointing at it. This is only 125 + * usable until commit. 126 + */ 127 + }; 128 + 129 + static const struct rhashtable_params mlxsw_afa_set_ht_params = { 130 + .key_len = sizeof(struct mlxsw_afa_set_ht_key), 131 + .key_offset = offsetof(struct mlxsw_afa_set, ht_key), 132 + .head_offset = offsetof(struct mlxsw_afa_set, ht_node), 133 + .automatic_shrinking = true, 134 + }; 135 + 136 + struct mlxsw_afa_fwd_entry_ht_key { 137 + u8 local_port; 138 + }; 139 + 140 + struct mlxsw_afa_fwd_entry { 141 + struct rhash_head ht_node; 142 + struct mlxsw_afa_fwd_entry_ht_key ht_key; 143 + u32 kvdl_index; 144 + unsigned int ref_count; 145 + }; 146 + 147 + static const struct rhashtable_params mlxsw_afa_fwd_entry_ht_params = { 148 + .key_len = sizeof(struct mlxsw_afa_fwd_entry_ht_key), 149 + .key_offset = offsetof(struct mlxsw_afa_fwd_entry, ht_key), 150 + .head_offset = offsetof(struct mlxsw_afa_fwd_entry, ht_node), 151 + .automatic_shrinking = true, 152 + }; 153 + 154 + struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set, 155 + const struct mlxsw_afa_ops *ops, 156 + void *ops_priv) 157 + { 158 + struct mlxsw_afa *mlxsw_afa; 159 + int err; 160 + 161 + mlxsw_afa = kzalloc(sizeof(*mlxsw_afa), GFP_KERNEL); 162 + if (!mlxsw_afa) 163 + return ERR_PTR(-ENOMEM); 164 + err = rhashtable_init(&mlxsw_afa->set_ht, &mlxsw_afa_set_ht_params); 165 + if (err) 166 + goto err_set_rhashtable_init; 167 + err = rhashtable_init(&mlxsw_afa->fwd_entry_ht, 168 + &mlxsw_afa_fwd_entry_ht_params); 169 + if (err) 170 + goto err_fwd_entry_rhashtable_init; 171 + mlxsw_afa->max_acts_per_set = max_acts_per_set; 172 + mlxsw_afa->ops = ops; 173 + mlxsw_afa->ops_priv = ops_priv; 174 + return mlxsw_afa; 175 + 176 + err_fwd_entry_rhashtable_init: 177 + rhashtable_destroy(&mlxsw_afa->set_ht); 178 + err_set_rhashtable_init: 179 + kfree(mlxsw_afa); 180 + return ERR_PTR(err); 181 + } 182 + EXPORT_SYMBOL(mlxsw_afa_create); 183 + 184 + void mlxsw_afa_destroy(struct mlxsw_afa *mlxsw_afa) 185 + { 186 + rhashtable_destroy(&mlxsw_afa->fwd_entry_ht); 187 + rhashtable_destroy(&mlxsw_afa->set_ht); 188 + kfree(mlxsw_afa); 189 + } 190 + EXPORT_SYMBOL(mlxsw_afa_destroy); 191 + 192 + static void mlxsw_afa_set_goto_set(struct mlxsw_afa_set *set, 193 + enum mlxsw_afa_set_goto_binding_cmd cmd, 194 + u16 group_id) 195 + { 196 + char *actions = set->ht_key.enc_actions; 197 + 198 + mlxsw_afa_set_type_set(actions, MLXSW_AFA_SET_TYPE_GOTO); 199 + mlxsw_afa_set_goto_g_set(actions, true); 200 + mlxsw_afa_set_goto_binding_cmd_set(actions, cmd); 201 + mlxsw_afa_set_goto_next_binding_set(actions, group_id); 202 + } 203 + 204 + static void mlxsw_afa_set_next_set(struct mlxsw_afa_set *set, 205 + u32 next_set_kvdl_index) 206 + { 207 + char *actions = set->ht_key.enc_actions; 208 + 209 + mlxsw_afa_set_type_set(actions, MLXSW_AFA_SET_TYPE_NEXT); 210 + mlxsw_afa_set_next_action_set_ptr_set(actions, next_set_kvdl_index); 211 + } 212 + 213 + static struct mlxsw_afa_set *mlxsw_afa_set_create(bool is_first) 214 + { 215 + struct mlxsw_afa_set *set; 216 + 217 + set = kzalloc(sizeof(*set), GFP_KERNEL); 218 + if (!set) 219 + return NULL; 220 + /* Need to initialize the set to pass by default */ 221 + mlxsw_afa_set_goto_set(set, MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM, 0); 222 + set->ht_key.is_first = is_first; 223 + set->ref_count = 1; 224 + return set; 225 + } 226 + 227 + static void mlxsw_afa_set_destroy(struct mlxsw_afa_set *set) 228 + { 229 + kfree(set); 230 + } 231 + 232 + static int mlxsw_afa_set_share(struct mlxsw_afa *mlxsw_afa, 233 + struct mlxsw_afa_set *set) 234 + { 235 + int err; 236 + 237 + err = rhashtable_insert_fast(&mlxsw_afa->set_ht, &set->ht_node, 238 + mlxsw_afa_set_ht_params); 239 + if (err) 240 + return err; 241 + err = mlxsw_afa->ops->kvdl_set_add(mlxsw_afa->ops_priv, 242 + &set->kvdl_index, 243 + set->ht_key.enc_actions, 244 + set->ht_key.is_first); 245 + if (err) 246 + goto err_kvdl_set_add; 247 + set->shared = true; 248 + set->prev = NULL; 249 + return 0; 250 + 251 + err_kvdl_set_add: 252 + rhashtable_remove_fast(&mlxsw_afa->set_ht, &set->ht_node, 253 + mlxsw_afa_set_ht_params); 254 + return err; 255 + } 256 + 257 + static void mlxsw_afa_set_unshare(struct mlxsw_afa *mlxsw_afa, 258 + struct mlxsw_afa_set *set) 259 + { 260 + mlxsw_afa->ops->kvdl_set_del(mlxsw_afa->ops_priv, 261 + set->kvdl_index, 262 + set->ht_key.is_first); 263 + rhashtable_remove_fast(&mlxsw_afa->set_ht, &set->ht_node, 264 + mlxsw_afa_set_ht_params); 265 + set->shared = false; 266 + } 267 + 268 + static void mlxsw_afa_set_put(struct mlxsw_afa *mlxsw_afa, 269 + struct mlxsw_afa_set *set) 270 + { 271 + if (--set->ref_count) 272 + return; 273 + if (set->shared) 274 + mlxsw_afa_set_unshare(mlxsw_afa, set); 275 + mlxsw_afa_set_destroy(set); 276 + } 277 + 278 + static struct mlxsw_afa_set *mlxsw_afa_set_get(struct mlxsw_afa *mlxsw_afa, 279 + struct mlxsw_afa_set *orig_set) 280 + { 281 + struct mlxsw_afa_set *set; 282 + int err; 283 + 284 + /* There is a hashtable of sets maintained. If a set with the exact 285 + * same encoding exists, we reuse it. Otherwise, the current set 286 + * is shared by making it available to others using the hash table. 287 + */ 288 + set = rhashtable_lookup_fast(&mlxsw_afa->set_ht, &orig_set->ht_key, 289 + mlxsw_afa_set_ht_params); 290 + if (set) { 291 + set->ref_count++; 292 + mlxsw_afa_set_put(mlxsw_afa, orig_set); 293 + } else { 294 + set = orig_set; 295 + err = mlxsw_afa_set_share(mlxsw_afa, set); 296 + if (err) 297 + return ERR_PTR(err); 298 + } 299 + return set; 300 + } 301 + 302 + /* Block structure holds a list of action sets. One action block 303 + * represents one chain of actions executed upon match of a rule. 304 + */ 305 + 306 + struct mlxsw_afa_block { 307 + struct mlxsw_afa *afa; 308 + bool finished; 309 + struct mlxsw_afa_set *first_set; 310 + struct mlxsw_afa_set *cur_set; 311 + unsigned int cur_act_index; /* In current set. */ 312 + struct list_head fwd_entry_ref_list; 313 + }; 314 + 315 + struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa) 316 + { 317 + struct mlxsw_afa_block *block; 318 + 319 + block = kzalloc(sizeof(*block), GFP_KERNEL); 320 + if (!block) 321 + return NULL; 322 + INIT_LIST_HEAD(&block->fwd_entry_ref_list); 323 + block->afa = mlxsw_afa; 324 + 325 + /* At least one action set is always present, so just create it here */ 326 + block->first_set = mlxsw_afa_set_create(true); 327 + if (!block->first_set) 328 + goto err_first_set_create; 329 + block->cur_set = block->first_set; 330 + return block; 331 + 332 + err_first_set_create: 333 + kfree(block); 334 + return NULL; 335 + } 336 + EXPORT_SYMBOL(mlxsw_afa_block_create); 337 + 338 + static void mlxsw_afa_fwd_entry_refs_destroy(struct mlxsw_afa_block *block); 339 + 340 + void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block) 341 + { 342 + struct mlxsw_afa_set *set = block->first_set; 343 + struct mlxsw_afa_set *next_set; 344 + 345 + do { 346 + next_set = set->next; 347 + mlxsw_afa_set_put(block->afa, set); 348 + set = next_set; 349 + } while (set); 350 + mlxsw_afa_fwd_entry_refs_destroy(block); 351 + kfree(block); 352 + } 353 + EXPORT_SYMBOL(mlxsw_afa_block_destroy); 354 + 355 + int mlxsw_afa_block_commit(struct mlxsw_afa_block *block) 356 + { 357 + struct mlxsw_afa_set *set = block->cur_set; 358 + struct mlxsw_afa_set *prev_set; 359 + int err; 360 + 361 + block->cur_set = NULL; 362 + 363 + /* Go over all linked sets starting from last 364 + * and try to find existing set in the hash table. 365 + * In case it is not there, assign a KVD linear index 366 + * and insert it. 367 + */ 368 + do { 369 + prev_set = set->prev; 370 + set = mlxsw_afa_set_get(block->afa, set); 371 + if (IS_ERR(set)) { 372 + err = PTR_ERR(set); 373 + goto rollback; 374 + } 375 + if (prev_set) { 376 + prev_set->next = set; 377 + mlxsw_afa_set_next_set(prev_set, set->kvdl_index); 378 + set = prev_set; 379 + } 380 + } while (prev_set); 381 + 382 + block->first_set = set; 383 + block->finished = true; 384 + return 0; 385 + 386 + rollback: 387 + while ((set = set->next)) 388 + mlxsw_afa_set_put(block->afa, set); 389 + return err; 390 + } 391 + EXPORT_SYMBOL(mlxsw_afa_block_commit); 392 + 393 + char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block) 394 + { 395 + return block->first_set->ht_key.enc_actions; 396 + } 397 + EXPORT_SYMBOL(mlxsw_afa_block_first_set); 398 + 399 + u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block) 400 + { 401 + return block->first_set->kvdl_index; 402 + } 403 + EXPORT_SYMBOL(mlxsw_afa_block_first_set_kvdl_index); 404 + 405 + void mlxsw_afa_block_continue(struct mlxsw_afa_block *block) 406 + { 407 + if (WARN_ON(block->finished)) 408 + return; 409 + mlxsw_afa_set_goto_set(block->cur_set, 410 + MLXSW_AFA_SET_GOTO_BINDING_CMD_NONE, 0); 411 + block->finished = true; 412 + } 413 + EXPORT_SYMBOL(mlxsw_afa_block_continue); 414 + 415 + void mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id) 416 + { 417 + if (WARN_ON(block->finished)) 418 + return; 419 + mlxsw_afa_set_goto_set(block->cur_set, 420 + MLXSW_AFA_SET_GOTO_BINDING_CMD_JUMP, group_id); 421 + block->finished = true; 422 + } 423 + EXPORT_SYMBOL(mlxsw_afa_block_jump); 424 + 425 + static struct mlxsw_afa_fwd_entry * 426 + mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u8 local_port) 427 + { 428 + struct mlxsw_afa_fwd_entry *fwd_entry; 429 + int err; 430 + 431 + fwd_entry = kzalloc(sizeof(*fwd_entry), GFP_KERNEL); 432 + if (!fwd_entry) 433 + return ERR_PTR(-ENOMEM); 434 + fwd_entry->ht_key.local_port = local_port; 435 + fwd_entry->ref_count = 1; 436 + 437 + err = rhashtable_insert_fast(&mlxsw_afa->fwd_entry_ht, 438 + &fwd_entry->ht_node, 439 + mlxsw_afa_fwd_entry_ht_params); 440 + if (err) 441 + goto err_rhashtable_insert; 442 + 443 + err = mlxsw_afa->ops->kvdl_fwd_entry_add(mlxsw_afa->ops_priv, 444 + &fwd_entry->kvdl_index, 445 + local_port); 446 + if (err) 447 + goto err_kvdl_fwd_entry_add; 448 + return fwd_entry; 449 + 450 + err_kvdl_fwd_entry_add: 451 + rhashtable_remove_fast(&mlxsw_afa->fwd_entry_ht, &fwd_entry->ht_node, 452 + mlxsw_afa_fwd_entry_ht_params); 453 + err_rhashtable_insert: 454 + kfree(fwd_entry); 455 + return ERR_PTR(err); 456 + } 457 + 458 + static void mlxsw_afa_fwd_entry_destroy(struct mlxsw_afa *mlxsw_afa, 459 + struct mlxsw_afa_fwd_entry *fwd_entry) 460 + { 461 + mlxsw_afa->ops->kvdl_fwd_entry_del(mlxsw_afa->ops_priv, 462 + fwd_entry->kvdl_index); 463 + rhashtable_remove_fast(&mlxsw_afa->fwd_entry_ht, &fwd_entry->ht_node, 464 + mlxsw_afa_fwd_entry_ht_params); 465 + kfree(fwd_entry); 466 + } 467 + 468 + static struct mlxsw_afa_fwd_entry * 469 + mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u8 local_port) 470 + { 471 + struct mlxsw_afa_fwd_entry_ht_key ht_key = {0}; 472 + struct mlxsw_afa_fwd_entry *fwd_entry; 473 + 474 + ht_key.local_port = local_port; 475 + fwd_entry = rhashtable_lookup_fast(&mlxsw_afa->fwd_entry_ht, &ht_key, 476 + mlxsw_afa_fwd_entry_ht_params); 477 + if (fwd_entry) { 478 + fwd_entry->ref_count++; 479 + return fwd_entry; 480 + } 481 + return mlxsw_afa_fwd_entry_create(mlxsw_afa, local_port); 482 + } 483 + 484 + static void mlxsw_afa_fwd_entry_put(struct mlxsw_afa *mlxsw_afa, 485 + struct mlxsw_afa_fwd_entry *fwd_entry) 486 + { 487 + if (--fwd_entry->ref_count) 488 + return; 489 + mlxsw_afa_fwd_entry_destroy(mlxsw_afa, fwd_entry); 490 + } 491 + 492 + struct mlxsw_afa_fwd_entry_ref { 493 + struct list_head list; 494 + struct mlxsw_afa_fwd_entry *fwd_entry; 495 + }; 496 + 497 + static struct mlxsw_afa_fwd_entry_ref * 498 + mlxsw_afa_fwd_entry_ref_create(struct mlxsw_afa_block *block, u8 local_port) 499 + { 500 + struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref; 501 + struct mlxsw_afa_fwd_entry *fwd_entry; 502 + int err; 503 + 504 + fwd_entry_ref = kzalloc(sizeof(*fwd_entry_ref), GFP_KERNEL); 505 + if (!fwd_entry_ref) 506 + return ERR_PTR(-ENOMEM); 507 + fwd_entry = mlxsw_afa_fwd_entry_get(block->afa, local_port); 508 + if (IS_ERR(fwd_entry)) { 509 + err = PTR_ERR(fwd_entry); 510 + goto err_fwd_entry_get; 511 + } 512 + fwd_entry_ref->fwd_entry = fwd_entry; 513 + list_add(&fwd_entry_ref->list, &block->fwd_entry_ref_list); 514 + return fwd_entry_ref; 515 + 516 + err_fwd_entry_get: 517 + kfree(fwd_entry_ref); 518 + return ERR_PTR(err); 519 + } 520 + 521 + static void 522 + mlxsw_afa_fwd_entry_ref_destroy(struct mlxsw_afa_block *block, 523 + struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref) 524 + { 525 + list_del(&fwd_entry_ref->list); 526 + mlxsw_afa_fwd_entry_put(block->afa, fwd_entry_ref->fwd_entry); 527 + kfree(fwd_entry_ref); 528 + } 529 + 530 + static void mlxsw_afa_fwd_entry_refs_destroy(struct mlxsw_afa_block *block) 531 + { 532 + struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref; 533 + struct mlxsw_afa_fwd_entry_ref *tmp; 534 + 535 + list_for_each_entry_safe(fwd_entry_ref, tmp, 536 + &block->fwd_entry_ref_list, list) 537 + mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref); 538 + } 539 + 540 + #define MLXSW_AFA_ONE_ACTION_LEN 32 541 + #define MLXSW_AFA_PAYLOAD_OFFSET 4 542 + 543 + static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block, 544 + u8 action_code, u8 action_size) 545 + { 546 + char *oneact; 547 + char *actions; 548 + 549 + if (WARN_ON(block->finished)) 550 + return NULL; 551 + if (block->cur_act_index + action_size > 552 + block->afa->max_acts_per_set) { 553 + struct mlxsw_afa_set *set; 554 + 555 + /* The appended action won't fit into the current action set, 556 + * so create a new set. 557 + */ 558 + set = mlxsw_afa_set_create(false); 559 + if (!set) 560 + return NULL; 561 + set->prev = block->cur_set; 562 + block->cur_act_index = 0; 563 + block->cur_set->next = set; 564 + block->cur_set = set; 565 + } 566 + 567 + actions = block->cur_set->ht_key.enc_actions; 568 + oneact = actions + block->cur_act_index * MLXSW_AFA_ONE_ACTION_LEN; 569 + block->cur_act_index += action_size; 570 + mlxsw_afa_all_action_type_set(oneact, action_code); 571 + return oneact + MLXSW_AFA_PAYLOAD_OFFSET; 572 + } 573 + 574 + /* Trap / Discard Action 575 + * --------------------- 576 + * The Trap / Discard action enables trapping / mirroring packets to the CPU 577 + * as well as discarding packets. 578 + * The ACL Trap / Discard separates the forward/discard control from CPU 579 + * trap control. In addition, the Trap / Discard action enables activating 580 + * SPAN (port mirroring). 581 + */ 582 + 583 + #define MLXSW_AFA_TRAPDISC_CODE 0x03 584 + #define MLXSW_AFA_TRAPDISC_SIZE 1 585 + 586 + enum mlxsw_afa_trapdisc_forward_action { 587 + MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD = 3, 588 + }; 589 + 590 + /* afa_trapdisc_forward_action 591 + * Forward Action. 592 + */ 593 + MLXSW_ITEM32(afa, trapdisc, forward_action, 0x00, 0, 4); 594 + 595 + static inline void 596 + mlxsw_afa_trapdisc_pack(char *payload, 597 + enum mlxsw_afa_trapdisc_forward_action forward_action) 598 + { 599 + mlxsw_afa_trapdisc_forward_action_set(payload, forward_action); 600 + } 601 + 602 + int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block) 603 + { 604 + char *act = mlxsw_afa_block_append_action(block, 605 + MLXSW_AFA_TRAPDISC_CODE, 606 + MLXSW_AFA_TRAPDISC_SIZE); 607 + 608 + if (!act) 609 + return -ENOBUFS; 610 + mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD); 611 + return 0; 612 + } 613 + EXPORT_SYMBOL(mlxsw_afa_block_append_drop); 614 + 615 + /* Forwarding Action 616 + * ----------------- 617 + * Forwarding Action can be used to implement Policy Based Switching (PBS) 618 + * as well as OpenFlow related "Output" action. 619 + */ 620 + 621 + #define MLXSW_AFA_FORWARD_CODE 0x07 622 + #define MLXSW_AFA_FORWARD_SIZE 1 623 + 624 + enum mlxsw_afa_forward_type { 625 + /* PBS, Policy Based Switching */ 626 + MLXSW_AFA_FORWARD_TYPE_PBS, 627 + /* Output, OpenFlow output type */ 628 + MLXSW_AFA_FORWARD_TYPE_OUTPUT, 629 + }; 630 + 631 + /* afa_forward_type */ 632 + MLXSW_ITEM32(afa, forward, type, 0x00, 24, 2); 633 + 634 + /* afa_forward_pbs_ptr 635 + * A pointer to the PBS entry configured by PPBS register. 636 + * Reserved when in_port is set. 637 + */ 638 + MLXSW_ITEM32(afa, forward, pbs_ptr, 0x08, 0, 24); 639 + 640 + /* afa_forward_in_port 641 + * Packet is forwarded back to the ingress port. 642 + */ 643 + MLXSW_ITEM32(afa, forward, in_port, 0x0C, 0, 1); 644 + 645 + static inline void 646 + mlxsw_afa_forward_pack(char *payload, enum mlxsw_afa_forward_type type, 647 + u32 pbs_ptr, bool in_port) 648 + { 649 + mlxsw_afa_forward_type_set(payload, type); 650 + mlxsw_afa_forward_pbs_ptr_set(payload, pbs_ptr); 651 + mlxsw_afa_forward_in_port_set(payload, in_port); 652 + } 653 + 654 + int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block, 655 + u8 local_port, bool in_port) 656 + { 657 + struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref; 658 + u32 kvdl_index = 0; 659 + char *act; 660 + int err; 661 + 662 + if (!in_port) { 663 + fwd_entry_ref = mlxsw_afa_fwd_entry_ref_create(block, 664 + local_port); 665 + if (IS_ERR(fwd_entry_ref)) 666 + return PTR_ERR(fwd_entry_ref); 667 + kvdl_index = fwd_entry_ref->fwd_entry->kvdl_index; 668 + } 669 + 670 + act = mlxsw_afa_block_append_action(block, MLXSW_AFA_FORWARD_CODE, 671 + MLXSW_AFA_FORWARD_SIZE); 672 + if (!act) { 673 + err = -ENOBUFS; 674 + goto err_append_action; 675 + } 676 + mlxsw_afa_forward_pack(act, MLXSW_AFA_FORWARD_TYPE_OUTPUT, 677 + kvdl_index, in_port); 678 + return 0; 679 + 680 + err_append_action: 681 + if (!in_port) 682 + mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref); 683 + return err; 684 + } 685 + EXPORT_SYMBOL(mlxsw_afa_block_append_fwd);
+66
drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
··· 1 + /* 2 + * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h 3 + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. 4 + * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> 5 + * 6 + * Redistribution and use in source and binary forms, with or without 7 + * modification, are permitted provided that the following conditions are met: 8 + * 9 + * 1. Redistributions of source code must retain the above copyright 10 + * notice, this list of conditions and the following disclaimer. 11 + * 2. Redistributions in binary form must reproduce the above copyright 12 + * notice, this list of conditions and the following disclaimer in the 13 + * documentation and/or other materials provided with the distribution. 14 + * 3. Neither the names of the copyright holders nor the names of its 15 + * contributors may be used to endorse or promote products derived from 16 + * this software without specific prior written permission. 17 + * 18 + * Alternatively, this software may be distributed under the terms of the 19 + * GNU General Public License ("GPL") version 2 as published by the Free 20 + * Software Foundation. 21 + * 22 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 + * POSSIBILITY OF SUCH DAMAGE. 33 + */ 34 + 35 + #ifndef _MLXSW_CORE_ACL_FLEX_ACTIONS_H 36 + #define _MLXSW_CORE_ACL_FLEX_ACTIONS_H 37 + 38 + #include <linux/types.h> 39 + 40 + struct mlxsw_afa; 41 + struct mlxsw_afa_block; 42 + 43 + struct mlxsw_afa_ops { 44 + int (*kvdl_set_add)(void *priv, u32 *p_kvdl_index, 45 + char *enc_actions, bool is_first); 46 + void (*kvdl_set_del)(void *priv, u32 kvdl_index, bool is_first); 47 + int (*kvdl_fwd_entry_add)(void *priv, u32 *p_kvdl_index, u8 local_port); 48 + void (*kvdl_fwd_entry_del)(void *priv, u32 kvdl_index); 49 + }; 50 + 51 + struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set, 52 + const struct mlxsw_afa_ops *ops, 53 + void *ops_priv); 54 + void mlxsw_afa_destroy(struct mlxsw_afa *mlxsw_afa); 55 + struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa); 56 + void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block); 57 + int mlxsw_afa_block_commit(struct mlxsw_afa_block *block); 58 + char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block); 59 + u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block); 60 + void mlxsw_afa_block_continue(struct mlxsw_afa_block *block); 61 + void mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id); 62 + int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block); 63 + int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block, 64 + u8 local_port, bool in_port); 65 + 66 + #endif
+475
drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
··· 1 + /* 2 + * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c 3 + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. 4 + * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> 5 + * 6 + * Redistribution and use in source and binary forms, with or without 7 + * modification, are permitted provided that the following conditions are met: 8 + * 9 + * 1. Redistributions of source code must retain the above copyright 10 + * notice, this list of conditions and the following disclaimer. 11 + * 2. Redistributions in binary form must reproduce the above copyright 12 + * notice, this list of conditions and the following disclaimer in the 13 + * documentation and/or other materials provided with the distribution. 14 + * 3. Neither the names of the copyright holders nor the names of its 15 + * contributors may be used to endorse or promote products derived from 16 + * this software without specific prior written permission. 17 + * 18 + * Alternatively, this software may be distributed under the terms of the 19 + * GNU General Public License ("GPL") version 2 as published by the Free 20 + * Software Foundation. 21 + * 22 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 + * POSSIBILITY OF SUCH DAMAGE. 33 + */ 34 + 35 + #include <linux/kernel.h> 36 + #include <linux/slab.h> 37 + #include <linux/list.h> 38 + #include <linux/errno.h> 39 + 40 + #include "item.h" 41 + #include "core_acl_flex_keys.h" 42 + 43 + struct mlxsw_afk { 44 + struct list_head key_info_list; 45 + unsigned int max_blocks; 46 + const struct mlxsw_afk_block *blocks; 47 + unsigned int blocks_count; 48 + }; 49 + 50 + static bool mlxsw_afk_blocks_check(struct mlxsw_afk *mlxsw_afk) 51 + { 52 + int i; 53 + int j; 54 + 55 + for (i = 0; i < mlxsw_afk->blocks_count; i++) { 56 + const struct mlxsw_afk_block *block = &mlxsw_afk->blocks[i]; 57 + 58 + for (j = 0; j < block->instances_count; j++) { 59 + struct mlxsw_afk_element_inst *elinst; 60 + 61 + elinst = &block->instances[j]; 62 + if (elinst->type != elinst->info->type || 63 + elinst->item.size.bits != 64 + elinst->info->item.size.bits) 65 + return false; 66 + } 67 + } 68 + return true; 69 + } 70 + 71 + struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks, 72 + const struct mlxsw_afk_block *blocks, 73 + unsigned int blocks_count) 74 + { 75 + struct mlxsw_afk *mlxsw_afk; 76 + 77 + mlxsw_afk = kzalloc(sizeof(*mlxsw_afk), GFP_KERNEL); 78 + if (!mlxsw_afk) 79 + return NULL; 80 + INIT_LIST_HEAD(&mlxsw_afk->key_info_list); 81 + mlxsw_afk->max_blocks = max_blocks; 82 + mlxsw_afk->blocks = blocks; 83 + mlxsw_afk->blocks_count = blocks_count; 84 + WARN_ON(!mlxsw_afk_blocks_check(mlxsw_afk)); 85 + return mlxsw_afk; 86 + } 87 + EXPORT_SYMBOL(mlxsw_afk_create); 88 + 89 + void mlxsw_afk_destroy(struct mlxsw_afk *mlxsw_afk) 90 + { 91 + WARN_ON(!list_empty(&mlxsw_afk->key_info_list)); 92 + kfree(mlxsw_afk); 93 + } 94 + EXPORT_SYMBOL(mlxsw_afk_destroy); 95 + 96 + struct mlxsw_afk_key_info { 97 + struct list_head list; 98 + unsigned int ref_count; 99 + unsigned int blocks_count; 100 + int element_to_block[MLXSW_AFK_ELEMENT_MAX]; /* index is element, value 101 + * is index inside "blocks" 102 + */ 103 + struct mlxsw_afk_element_usage elusage; 104 + const struct mlxsw_afk_block *blocks[0]; 105 + }; 106 + 107 + static bool 108 + mlxsw_afk_key_info_elements_eq(struct mlxsw_afk_key_info *key_info, 109 + struct mlxsw_afk_element_usage *elusage) 110 + { 111 + return memcmp(&key_info->elusage, elusage, sizeof(*elusage)) == 0; 112 + } 113 + 114 + static struct mlxsw_afk_key_info * 115 + mlxsw_afk_key_info_find(struct mlxsw_afk *mlxsw_afk, 116 + struct mlxsw_afk_element_usage *elusage) 117 + { 118 + struct mlxsw_afk_key_info *key_info; 119 + 120 + list_for_each_entry(key_info, &mlxsw_afk->key_info_list, list) { 121 + if (mlxsw_afk_key_info_elements_eq(key_info, elusage)) 122 + return key_info; 123 + } 124 + return NULL; 125 + } 126 + 127 + struct mlxsw_afk_picker { 128 + struct { 129 + DECLARE_BITMAP(element, MLXSW_AFK_ELEMENT_MAX); 130 + unsigned int total; 131 + } hits[0]; 132 + }; 133 + 134 + static void mlxsw_afk_picker_count_hits(struct mlxsw_afk *mlxsw_afk, 135 + struct mlxsw_afk_picker *picker, 136 + enum mlxsw_afk_element element) 137 + { 138 + int i; 139 + int j; 140 + 141 + for (i = 0; i < mlxsw_afk->blocks_count; i++) { 142 + const struct mlxsw_afk_block *block = &mlxsw_afk->blocks[i]; 143 + 144 + for (j = 0; j < block->instances_count; j++) { 145 + struct mlxsw_afk_element_inst *elinst; 146 + 147 + elinst = &block->instances[j]; 148 + if (elinst->info->element == element) { 149 + __set_bit(element, picker->hits[i].element); 150 + picker->hits[i].total++; 151 + } 152 + } 153 + } 154 + } 155 + 156 + static void mlxsw_afk_picker_subtract_hits(struct mlxsw_afk *mlxsw_afk, 157 + struct mlxsw_afk_picker *picker, 158 + int block_index) 159 + { 160 + DECLARE_BITMAP(hits_element, MLXSW_AFK_ELEMENT_MAX); 161 + int i; 162 + int j; 163 + 164 + memcpy(&hits_element, &picker->hits[block_index].element, 165 + sizeof(hits_element)); 166 + 167 + for (i = 0; i < mlxsw_afk->blocks_count; i++) { 168 + for_each_set_bit(j, hits_element, MLXSW_AFK_ELEMENT_MAX) { 169 + if (__test_and_clear_bit(j, picker->hits[i].element)) 170 + picker->hits[i].total--; 171 + } 172 + } 173 + } 174 + 175 + static int mlxsw_afk_picker_most_hits_get(struct mlxsw_afk *mlxsw_afk, 176 + struct mlxsw_afk_picker *picker) 177 + { 178 + int most_index = -EINVAL; /* Should never happen to return this */ 179 + int most_hits = 0; 180 + int i; 181 + 182 + for (i = 0; i < mlxsw_afk->blocks_count; i++) { 183 + if (picker->hits[i].total > most_hits) { 184 + most_hits = picker->hits[i].total; 185 + most_index = i; 186 + } 187 + } 188 + return most_index; 189 + } 190 + 191 + static int mlxsw_afk_picker_key_info_add(struct mlxsw_afk *mlxsw_afk, 192 + struct mlxsw_afk_picker *picker, 193 + int block_index, 194 + struct mlxsw_afk_key_info *key_info) 195 + { 196 + enum mlxsw_afk_element element; 197 + 198 + if (key_info->blocks_count == mlxsw_afk->max_blocks) 199 + return -EINVAL; 200 + 201 + for_each_set_bit(element, picker->hits[block_index].element, 202 + MLXSW_AFK_ELEMENT_MAX) { 203 + key_info->element_to_block[element] = key_info->blocks_count; 204 + mlxsw_afk_element_usage_add(&key_info->elusage, element); 205 + } 206 + 207 + key_info->blocks[key_info->blocks_count] = 208 + &mlxsw_afk->blocks[block_index]; 209 + key_info->blocks_count++; 210 + return 0; 211 + } 212 + 213 + static int mlxsw_afk_picker(struct mlxsw_afk *mlxsw_afk, 214 + struct mlxsw_afk_key_info *key_info, 215 + struct mlxsw_afk_element_usage *elusage) 216 + { 217 + struct mlxsw_afk_picker *picker; 218 + enum mlxsw_afk_element element; 219 + size_t alloc_size; 220 + int err; 221 + 222 + alloc_size = sizeof(picker->hits[0]) * mlxsw_afk->blocks_count; 223 + picker = kzalloc(alloc_size, GFP_KERNEL); 224 + if (!picker) 225 + return -ENOMEM; 226 + 227 + /* Since the same elements could be present in multiple blocks, 228 + * we must find out optimal block list in order to make the 229 + * block count as low as possible. 230 + * 231 + * First, we count hits. We go over all available blocks and count 232 + * how many of requested elements are covered by each. 233 + * 234 + * Then in loop, we find block with most hits and add it to 235 + * output key_info. Then we have to subtract this block hits so 236 + * the next iteration will find most suitable block for 237 + * the rest of requested elements. 238 + */ 239 + 240 + mlxsw_afk_element_usage_for_each(element, elusage) 241 + mlxsw_afk_picker_count_hits(mlxsw_afk, picker, element); 242 + 243 + do { 244 + int block_index; 245 + 246 + block_index = mlxsw_afk_picker_most_hits_get(mlxsw_afk, picker); 247 + if (block_index < 0) { 248 + err = block_index; 249 + goto out; 250 + } 251 + err = mlxsw_afk_picker_key_info_add(mlxsw_afk, picker, 252 + block_index, key_info); 253 + if (err) 254 + goto out; 255 + mlxsw_afk_picker_subtract_hits(mlxsw_afk, picker, block_index); 256 + } while (!mlxsw_afk_key_info_elements_eq(key_info, elusage)); 257 + 258 + err = 0; 259 + out: 260 + kfree(picker); 261 + return err; 262 + } 263 + 264 + static struct mlxsw_afk_key_info * 265 + mlxsw_afk_key_info_create(struct mlxsw_afk *mlxsw_afk, 266 + struct mlxsw_afk_element_usage *elusage) 267 + { 268 + struct mlxsw_afk_key_info *key_info; 269 + size_t alloc_size; 270 + int err; 271 + 272 + alloc_size = sizeof(*key_info) + 273 + sizeof(key_info->blocks[0]) * mlxsw_afk->max_blocks; 274 + key_info = kzalloc(alloc_size, GFP_KERNEL); 275 + if (!key_info) 276 + return ERR_PTR(-ENOMEM); 277 + err = mlxsw_afk_picker(mlxsw_afk, key_info, elusage); 278 + if (err) 279 + goto err_picker; 280 + list_add(&key_info->list, &mlxsw_afk->key_info_list); 281 + key_info->ref_count = 1; 282 + return key_info; 283 + 284 + err_picker: 285 + kfree(key_info); 286 + return ERR_PTR(err); 287 + } 288 + 289 + static void mlxsw_afk_key_info_destroy(struct mlxsw_afk_key_info *key_info) 290 + { 291 + list_del(&key_info->list); 292 + kfree(key_info); 293 + } 294 + 295 + struct mlxsw_afk_key_info * 296 + mlxsw_afk_key_info_get(struct mlxsw_afk *mlxsw_afk, 297 + struct mlxsw_afk_element_usage *elusage) 298 + { 299 + struct mlxsw_afk_key_info *key_info; 300 + 301 + key_info = mlxsw_afk_key_info_find(mlxsw_afk, elusage); 302 + if (key_info) { 303 + key_info->ref_count++; 304 + return key_info; 305 + } 306 + return mlxsw_afk_key_info_create(mlxsw_afk, elusage); 307 + } 308 + EXPORT_SYMBOL(mlxsw_afk_key_info_get); 309 + 310 + void mlxsw_afk_key_info_put(struct mlxsw_afk_key_info *key_info) 311 + { 312 + if (--key_info->ref_count) 313 + return; 314 + mlxsw_afk_key_info_destroy(key_info); 315 + } 316 + EXPORT_SYMBOL(mlxsw_afk_key_info_put); 317 + 318 + bool mlxsw_afk_key_info_subset(struct mlxsw_afk_key_info *key_info, 319 + struct mlxsw_afk_element_usage *elusage) 320 + { 321 + return mlxsw_afk_element_usage_subset(elusage, &key_info->elusage); 322 + } 323 + EXPORT_SYMBOL(mlxsw_afk_key_info_subset); 324 + 325 + static const struct mlxsw_afk_element_inst * 326 + mlxsw_afk_block_elinst_get(const struct mlxsw_afk_block *block, 327 + enum mlxsw_afk_element element) 328 + { 329 + int i; 330 + 331 + for (i = 0; i < block->instances_count; i++) { 332 + struct mlxsw_afk_element_inst *elinst; 333 + 334 + elinst = &block->instances[i]; 335 + if (elinst->info->element == element) 336 + return elinst; 337 + } 338 + return NULL; 339 + } 340 + 341 + static const struct mlxsw_afk_element_inst * 342 + mlxsw_afk_key_info_elinst_get(struct mlxsw_afk_key_info *key_info, 343 + enum mlxsw_afk_element element, 344 + int *p_block_index) 345 + { 346 + const struct mlxsw_afk_element_inst *elinst; 347 + const struct mlxsw_afk_block *block; 348 + int block_index; 349 + 350 + if (WARN_ON(!test_bit(element, key_info->elusage.usage))) 351 + return NULL; 352 + block_index = key_info->element_to_block[element]; 353 + block = key_info->blocks[block_index]; 354 + 355 + elinst = mlxsw_afk_block_elinst_get(block, element); 356 + if (WARN_ON(!elinst)) 357 + return NULL; 358 + 359 + *p_block_index = block_index; 360 + return elinst; 361 + } 362 + 363 + u16 364 + mlxsw_afk_key_info_block_encoding_get(const struct mlxsw_afk_key_info *key_info, 365 + int block_index) 366 + { 367 + return key_info->blocks[block_index]->encoding; 368 + } 369 + EXPORT_SYMBOL(mlxsw_afk_key_info_block_encoding_get); 370 + 371 + unsigned int 372 + mlxsw_afk_key_info_blocks_count_get(const struct mlxsw_afk_key_info *key_info) 373 + { 374 + return key_info->blocks_count; 375 + } 376 + EXPORT_SYMBOL(mlxsw_afk_key_info_blocks_count_get); 377 + 378 + void mlxsw_afk_values_add_u32(struct mlxsw_afk_element_values *values, 379 + enum mlxsw_afk_element element, 380 + u32 key_value, u32 mask_value) 381 + { 382 + const struct mlxsw_afk_element_info *elinfo = 383 + &mlxsw_afk_element_infos[element]; 384 + const struct mlxsw_item *storage_item = &elinfo->item; 385 + 386 + if (!mask_value) 387 + return; 388 + if (WARN_ON(elinfo->type != MLXSW_AFK_ELEMENT_TYPE_U32)) 389 + return; 390 + __mlxsw_item_set32(values->storage.key, storage_item, 0, key_value); 391 + __mlxsw_item_set32(values->storage.mask, storage_item, 0, mask_value); 392 + mlxsw_afk_element_usage_add(&values->elusage, element); 393 + } 394 + EXPORT_SYMBOL(mlxsw_afk_values_add_u32); 395 + 396 + void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values, 397 + enum mlxsw_afk_element element, 398 + const char *key_value, const char *mask_value, 399 + unsigned int len) 400 + { 401 + const struct mlxsw_afk_element_info *elinfo = 402 + &mlxsw_afk_element_infos[element]; 403 + const struct mlxsw_item *storage_item = &elinfo->item; 404 + 405 + if (!memchr_inv(mask_value, 0, len)) /* If mask is zero */ 406 + return; 407 + if (WARN_ON(elinfo->type != MLXSW_AFK_ELEMENT_TYPE_BUF) || 408 + WARN_ON(elinfo->item.size.bytes != len)) 409 + return; 410 + __mlxsw_item_memcpy_to(values->storage.key, key_value, 411 + storage_item, 0); 412 + __mlxsw_item_memcpy_to(values->storage.mask, mask_value, 413 + storage_item, 0); 414 + mlxsw_afk_element_usage_add(&values->elusage, element); 415 + } 416 + EXPORT_SYMBOL(mlxsw_afk_values_add_buf); 417 + 418 + static void mlxsw_afk_encode_u32(const struct mlxsw_item *storage_item, 419 + const struct mlxsw_item *output_item, 420 + char *storage, char *output_indexed) 421 + { 422 + u32 value; 423 + 424 + value = __mlxsw_item_get32(storage, storage_item, 0); 425 + __mlxsw_item_set32(output_indexed, output_item, 0, value); 426 + } 427 + 428 + static void mlxsw_afk_encode_buf(const struct mlxsw_item *storage_item, 429 + const struct mlxsw_item *output_item, 430 + char *storage, char *output_indexed) 431 + { 432 + char *storage_data = __mlxsw_item_data(storage, storage_item, 0); 433 + char *output_data = __mlxsw_item_data(output_indexed, output_item, 0); 434 + size_t len = output_item->size.bytes; 435 + 436 + memcpy(output_data, storage_data, len); 437 + } 438 + 439 + #define MLXSW_AFK_KEY_BLOCK_SIZE 16 440 + 441 + static void mlxsw_afk_encode_one(const struct mlxsw_afk_element_inst *elinst, 442 + int block_index, char *storage, char *output) 443 + { 444 + char *output_indexed = output + block_index * MLXSW_AFK_KEY_BLOCK_SIZE; 445 + const struct mlxsw_item *storage_item = &elinst->info->item; 446 + const struct mlxsw_item *output_item = &elinst->item; 447 + 448 + if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_U32) 449 + mlxsw_afk_encode_u32(storage_item, output_item, 450 + storage, output_indexed); 451 + else if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_BUF) 452 + mlxsw_afk_encode_buf(storage_item, output_item, 453 + storage, output_indexed); 454 + } 455 + 456 + void mlxsw_afk_encode(struct mlxsw_afk_key_info *key_info, 457 + struct mlxsw_afk_element_values *values, 458 + char *key, char *mask) 459 + { 460 + const struct mlxsw_afk_element_inst *elinst; 461 + enum mlxsw_afk_element element; 462 + int block_index; 463 + 464 + mlxsw_afk_element_usage_for_each(element, &values->elusage) { 465 + elinst = mlxsw_afk_key_info_elinst_get(key_info, element, 466 + &block_index); 467 + if (!elinst) 468 + continue; 469 + mlxsw_afk_encode_one(elinst, block_index, 470 + values->storage.key, key); 471 + mlxsw_afk_encode_one(elinst, block_index, 472 + values->storage.mask, mask); 473 + } 474 + } 475 + EXPORT_SYMBOL(mlxsw_afk_encode);
+238
drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
··· 1 + /* 2 + * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h 3 + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. 4 + * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> 5 + * 6 + * Redistribution and use in source and binary forms, with or without 7 + * modification, are permitted provided that the following conditions are met: 8 + * 9 + * 1. Redistributions of source code must retain the above copyright 10 + * notice, this list of conditions and the following disclaimer. 11 + * 2. Redistributions in binary form must reproduce the above copyright 12 + * notice, this list of conditions and the following disclaimer in the 13 + * documentation and/or other materials provided with the distribution. 14 + * 3. Neither the names of the copyright holders nor the names of its 15 + * contributors may be used to endorse or promote products derived from 16 + * this software without specific prior written permission. 17 + * 18 + * Alternatively, this software may be distributed under the terms of the 19 + * GNU General Public License ("GPL") version 2 as published by the Free 20 + * Software Foundation. 21 + * 22 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 + * POSSIBILITY OF SUCH DAMAGE. 33 + */ 34 + 35 + #ifndef _MLXSW_CORE_ACL_FLEX_KEYS_H 36 + #define _MLXSW_CORE_ACL_FLEX_KEYS_H 37 + 38 + #include <linux/types.h> 39 + #include <linux/bitmap.h> 40 + 41 + #include "item.h" 42 + 43 + enum mlxsw_afk_element { 44 + MLXSW_AFK_ELEMENT_SRC_SYS_PORT, 45 + MLXSW_AFK_ELEMENT_DMAC, 46 + MLXSW_AFK_ELEMENT_SMAC, 47 + MLXSW_AFK_ELEMENT_ETHERTYPE, 48 + MLXSW_AFK_ELEMENT_IP_PROTO, 49 + MLXSW_AFK_ELEMENT_SRC_IP4, 50 + MLXSW_AFK_ELEMENT_DST_IP4, 51 + MLXSW_AFK_ELEMENT_SRC_IP6_HI, 52 + MLXSW_AFK_ELEMENT_SRC_IP6_LO, 53 + MLXSW_AFK_ELEMENT_DST_IP6_HI, 54 + MLXSW_AFK_ELEMENT_DST_IP6_LO, 55 + MLXSW_AFK_ELEMENT_DST_L4_PORT, 56 + MLXSW_AFK_ELEMENT_SRC_L4_PORT, 57 + MLXSW_AFK_ELEMENT_MAX, 58 + }; 59 + 60 + enum mlxsw_afk_element_type { 61 + MLXSW_AFK_ELEMENT_TYPE_U32, 62 + MLXSW_AFK_ELEMENT_TYPE_BUF, 63 + }; 64 + 65 + struct mlxsw_afk_element_info { 66 + enum mlxsw_afk_element element; /* element ID */ 67 + enum mlxsw_afk_element_type type; 68 + struct mlxsw_item item; /* element geometry in internal storage */ 69 + }; 70 + 71 + #define MLXSW_AFK_ELEMENT_INFO(_type, _element, _offset, _shift, _size) \ 72 + [MLXSW_AFK_ELEMENT_##_element] = { \ 73 + .element = MLXSW_AFK_ELEMENT_##_element, \ 74 + .type = _type, \ 75 + .item = { \ 76 + .offset = _offset, \ 77 + .shift = _shift, \ 78 + .size = {.bits = _size}, \ 79 + .name = #_element, \ 80 + }, \ 81 + } 82 + 83 + #define MLXSW_AFK_ELEMENT_INFO_U32(_element, _offset, _shift, _size) \ 84 + MLXSW_AFK_ELEMENT_INFO(MLXSW_AFK_ELEMENT_TYPE_U32, \ 85 + _element, _offset, _shift, _size) 86 + 87 + #define MLXSW_AFK_ELEMENT_INFO_BUF(_element, _offset, _size) \ 88 + MLXSW_AFK_ELEMENT_INFO(MLXSW_AFK_ELEMENT_TYPE_BUF, \ 89 + _element, _offset, 0, _size) 90 + 91 + /* For the purpose of the driver, define a internal storage scratchpad 92 + * that will be used to store key/mask values. For each defined element type 93 + * define an internal storage geometry. 94 + */ 95 + static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = { 96 + MLXSW_AFK_ELEMENT_INFO_U32(SRC_SYS_PORT, 0x00, 16, 16), 97 + MLXSW_AFK_ELEMENT_INFO_BUF(DMAC, 0x04, 6), 98 + MLXSW_AFK_ELEMENT_INFO_BUF(SMAC, 0x0A, 6), 99 + MLXSW_AFK_ELEMENT_INFO_U32(ETHERTYPE, 0x00, 0, 16), 100 + MLXSW_AFK_ELEMENT_INFO_U32(IP_PROTO, 0x10, 0, 8), 101 + MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x18, 0, 32), 102 + MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x1C, 0, 32), 103 + MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x18, 8), 104 + MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x20, 8), 105 + MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x28, 8), 106 + MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x30, 8), 107 + MLXSW_AFK_ELEMENT_INFO_U32(DST_L4_PORT, 0x14, 0, 16), 108 + MLXSW_AFK_ELEMENT_INFO_U32(SRC_L4_PORT, 0x14, 16, 16), 109 + }; 110 + 111 + #define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x38 112 + 113 + struct mlxsw_afk_element_inst { /* element instance in actual block */ 114 + const struct mlxsw_afk_element_info *info; 115 + enum mlxsw_afk_element_type type; 116 + struct mlxsw_item item; /* element geometry in block */ 117 + }; 118 + 119 + #define MLXSW_AFK_ELEMENT_INST(_type, _element, _offset, _shift, _size) \ 120 + { \ 121 + .info = &mlxsw_afk_element_infos[MLXSW_AFK_ELEMENT_##_element], \ 122 + .type = _type, \ 123 + .item = { \ 124 + .offset = _offset, \ 125 + .shift = _shift, \ 126 + .size = {.bits = _size}, \ 127 + .name = #_element, \ 128 + }, \ 129 + } 130 + 131 + #define MLXSW_AFK_ELEMENT_INST_U32(_element, _offset, _shift, _size) \ 132 + MLXSW_AFK_ELEMENT_INST(MLXSW_AFK_ELEMENT_TYPE_U32, \ 133 + _element, _offset, _shift, _size) 134 + 135 + #define MLXSW_AFK_ELEMENT_INST_BUF(_element, _offset, _size) \ 136 + MLXSW_AFK_ELEMENT_INST(MLXSW_AFK_ELEMENT_TYPE_BUF, \ 137 + _element, _offset, 0, _size) 138 + 139 + struct mlxsw_afk_block { 140 + u16 encoding; /* block ID */ 141 + struct mlxsw_afk_element_inst *instances; 142 + unsigned int instances_count; 143 + }; 144 + 145 + #define MLXSW_AFK_BLOCK(_encoding, _instances) \ 146 + { \ 147 + .encoding = _encoding, \ 148 + .instances = _instances, \ 149 + .instances_count = ARRAY_SIZE(_instances), \ 150 + } 151 + 152 + struct mlxsw_afk_element_usage { 153 + DECLARE_BITMAP(usage, MLXSW_AFK_ELEMENT_MAX); 154 + }; 155 + 156 + #define mlxsw_afk_element_usage_for_each(element, elusage) \ 157 + for_each_set_bit(element, (elusage)->usage, MLXSW_AFK_ELEMENT_MAX) 158 + 159 + static inline void 160 + mlxsw_afk_element_usage_add(struct mlxsw_afk_element_usage *elusage, 161 + enum mlxsw_afk_element element) 162 + { 163 + __set_bit(element, elusage->usage); 164 + } 165 + 166 + static inline void 167 + mlxsw_afk_element_usage_zero(struct mlxsw_afk_element_usage *elusage) 168 + { 169 + bitmap_zero(elusage->usage, MLXSW_AFK_ELEMENT_MAX); 170 + } 171 + 172 + static inline void 173 + mlxsw_afk_element_usage_fill(struct mlxsw_afk_element_usage *elusage, 174 + const enum mlxsw_afk_element *elements, 175 + unsigned int elements_count) 176 + { 177 + int i; 178 + 179 + mlxsw_afk_element_usage_zero(elusage); 180 + for (i = 0; i < elements_count; i++) 181 + mlxsw_afk_element_usage_add(elusage, elements[i]); 182 + } 183 + 184 + static inline bool 185 + mlxsw_afk_element_usage_subset(struct mlxsw_afk_element_usage *elusage_small, 186 + struct mlxsw_afk_element_usage *elusage_big) 187 + { 188 + int i; 189 + 190 + for (i = 0; i < MLXSW_AFK_ELEMENT_MAX; i++) 191 + if (test_bit(i, elusage_small->usage) && 192 + !test_bit(i, elusage_big->usage)) 193 + return false; 194 + return true; 195 + } 196 + 197 + struct mlxsw_afk; 198 + 199 + struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks, 200 + const struct mlxsw_afk_block *blocks, 201 + unsigned int blocks_count); 202 + void mlxsw_afk_destroy(struct mlxsw_afk *mlxsw_afk); 203 + 204 + struct mlxsw_afk_key_info; 205 + 206 + struct mlxsw_afk_key_info * 207 + mlxsw_afk_key_info_get(struct mlxsw_afk *mlxsw_afk, 208 + struct mlxsw_afk_element_usage *elusage); 209 + void mlxsw_afk_key_info_put(struct mlxsw_afk_key_info *key_info); 210 + bool mlxsw_afk_key_info_subset(struct mlxsw_afk_key_info *key_info, 211 + struct mlxsw_afk_element_usage *elusage); 212 + 213 + u16 214 + mlxsw_afk_key_info_block_encoding_get(const struct mlxsw_afk_key_info *key_info, 215 + int block_index); 216 + unsigned int 217 + mlxsw_afk_key_info_blocks_count_get(const struct mlxsw_afk_key_info *key_info); 218 + 219 + struct mlxsw_afk_element_values { 220 + struct mlxsw_afk_element_usage elusage; 221 + struct { 222 + char key[MLXSW_AFK_ELEMENT_STORAGE_SIZE]; 223 + char mask[MLXSW_AFK_ELEMENT_STORAGE_SIZE]; 224 + } storage; 225 + }; 226 + 227 + void mlxsw_afk_values_add_u32(struct mlxsw_afk_element_values *values, 228 + enum mlxsw_afk_element element, 229 + u32 key_value, u32 mask_value); 230 + void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values, 231 + enum mlxsw_afk_element element, 232 + const char *key_value, const char *mask_value, 233 + unsigned int len); 234 + void mlxsw_afk_encode(struct mlxsw_afk_key_info *key_info, 235 + struct mlxsw_afk_element_values *values, 236 + char *key, char *mask); 237 + 238 + #endif
+96 -2
drivers/net/ethernet/mellanox/mlxsw/item.h
··· 1 1 /* 2 2 * drivers/net/ethernet/mellanox/mlxsw/item.h 3 - * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 4 - * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> 3 + * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved. 4 + * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com> 5 5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 6 * 7 7 * Redistribution and use in source and binary forms, with or without ··· 70 70 71 71 return ((item->offset + item->step * index + item->in_step_offset) / 72 72 typesize); 73 + } 74 + 75 + static inline u8 __mlxsw_item_get8(const char *buf, 76 + const struct mlxsw_item *item, 77 + unsigned short index) 78 + { 79 + unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u8)); 80 + u8 *b = (u8 *) buf; 81 + u8 tmp; 82 + 83 + tmp = b[offset]; 84 + tmp >>= item->shift; 85 + tmp &= GENMASK(item->size.bits - 1, 0); 86 + if (item->no_real_shift) 87 + tmp <<= item->shift; 88 + return tmp; 89 + } 90 + 91 + static inline void __mlxsw_item_set8(char *buf, const struct mlxsw_item *item, 92 + unsigned short index, u8 val) 93 + { 94 + unsigned int offset = __mlxsw_item_offset(item, index, 95 + sizeof(u8)); 96 + u8 *b = (u8 *) buf; 97 + u8 mask = GENMASK(item->size.bits - 1, 0) << item->shift; 98 + u8 tmp; 99 + 100 + if (!item->no_real_shift) 101 + val <<= item->shift; 102 + val &= mask; 103 + tmp = b[offset]; 104 + tmp &= ~mask; 105 + tmp |= val; 106 + b[offset] = tmp; 73 107 } 74 108 75 109 static inline u16 __mlxsw_item_get16(const char *buf, ··· 225 191 memcpy(&buf[offset], src, item->size.bytes); 226 192 } 227 193 194 + static inline char *__mlxsw_item_data(char *buf, const struct mlxsw_item *item, 195 + unsigned short index) 196 + { 197 + unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char)); 198 + 199 + return &buf[offset]; 200 + } 201 + 228 202 static inline u16 229 203 __mlxsw_item_bit_array_offset(const struct mlxsw_item *item, 230 204 u16 index, u8 *shift) ··· 294 252 * _cname: containter name (e.g. command name, register name) 295 253 * _iname: item name within the container 296 254 */ 255 + 256 + #define MLXSW_ITEM8(_type, _cname, _iname, _offset, _shift, _sizebits) \ 257 + static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ 258 + .offset = _offset, \ 259 + .shift = _shift, \ 260 + .size = {.bits = _sizebits,}, \ 261 + .name = #_type "_" #_cname "_" #_iname, \ 262 + }; \ 263 + static inline u8 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \ 264 + { \ 265 + return __mlxsw_item_get8(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \ 266 + } \ 267 + static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u8 val)\ 268 + { \ 269 + __mlxsw_item_set8(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \ 270 + } 271 + 272 + #define MLXSW_ITEM8_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \ 273 + _step, _instepoffset, _norealshift) \ 274 + static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ 275 + .offset = _offset, \ 276 + .step = _step, \ 277 + .in_step_offset = _instepoffset, \ 278 + .shift = _shift, \ 279 + .no_real_shift = _norealshift, \ 280 + .size = {.bits = _sizebits,}, \ 281 + .name = #_type "_" #_cname "_" #_iname, \ 282 + }; \ 283 + static inline u8 \ 284 + mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\ 285 + { \ 286 + return __mlxsw_item_get8(buf, &__ITEM_NAME(_type, _cname, _iname), \ 287 + index); \ 288 + } \ 289 + static inline void \ 290 + mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \ 291 + u8 val) \ 292 + { \ 293 + __mlxsw_item_set8(buf, &__ITEM_NAME(_type, _cname, _iname), \ 294 + index, val); \ 295 + } 297 296 298 297 #define MLXSW_ITEM16(_type, _cname, _iname, _offset, _shift, _sizebits) \ 299 298 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ ··· 476 393 { \ 477 394 __mlxsw_item_memcpy_to(buf, src, \ 478 395 &__ITEM_NAME(_type, _cname, _iname), 0); \ 396 + } \ 397 + static inline char * \ 398 + mlxsw_##_type##_##_cname##_##_iname##_data(char *buf) \ 399 + { \ 400 + return __mlxsw_item_data(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \ 479 401 } 480 402 481 403 #define MLXSW_ITEM_BUF_INDEXED(_type, _cname, _iname, _offset, _sizebytes, \ ··· 507 419 { \ 508 420 __mlxsw_item_memcpy_to(buf, src, \ 509 421 &__ITEM_NAME(_type, _cname, _iname), index); \ 422 + } \ 423 + static inline char * \ 424 + mlxsw_##_type##_##_cname##_##_iname##_data(char *buf, unsigned short index) \ 425 + { \ 426 + return __mlxsw_item_data(buf, \ 427 + &__ITEM_NAME(_type, _cname, _iname), index); \ 510 428 } 511 429 512 430 #define MLXSW_ITEM_BIT_ARRAY(_type, _cname, _iname, _offset, _sizebytes, \
+509 -2
drivers/net/ethernet/mellanox/mlxsw/reg.h
··· 1 1 /* 2 2 * drivers/net/ethernet/mellanox/mlxsw/reg.h 3 - * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 3 + * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved. 4 4 * Copyright (c) 2015-2016 Ido Schimmel <idosch@mellanox.com> 5 5 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 6 - * Copyright (c) 2015-2016 Jiri Pirko <jiri@mellanox.com> 6 + * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com> 7 7 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com> 8 8 * 9 9 * Redistribution and use in source and binary forms, with or without ··· 1755 1755 mlxsw_reg_spvmlr_rec_learn_enable_set(payload, i, learn_enable); 1756 1756 mlxsw_reg_spvmlr_rec_vid_set(payload, i, vid_begin + i); 1757 1757 } 1758 + } 1759 + 1760 + /* PPBT - Policy-Engine Port Binding Table 1761 + * --------------------------------------- 1762 + * This register is used for configuration of the Port Binding Table. 1763 + */ 1764 + #define MLXSW_REG_PPBT_ID 0x3002 1765 + #define MLXSW_REG_PPBT_LEN 0x14 1766 + 1767 + MLXSW_REG_DEFINE(ppbt, MLXSW_REG_PPBT_ID, MLXSW_REG_PPBT_LEN); 1768 + 1769 + enum mlxsw_reg_pxbt_e { 1770 + MLXSW_REG_PXBT_E_IACL, 1771 + MLXSW_REG_PXBT_E_EACL, 1772 + }; 1773 + 1774 + /* reg_ppbt_e 1775 + * Access: Index 1776 + */ 1777 + MLXSW_ITEM32(reg, ppbt, e, 0x00, 31, 1); 1778 + 1779 + enum mlxsw_reg_pxbt_op { 1780 + MLXSW_REG_PXBT_OP_BIND, 1781 + MLXSW_REG_PXBT_OP_UNBIND, 1782 + }; 1783 + 1784 + /* reg_ppbt_op 1785 + * Access: RW 1786 + */ 1787 + MLXSW_ITEM32(reg, ppbt, op, 0x00, 28, 3); 1788 + 1789 + /* reg_ppbt_local_port 1790 + * Local port. Not including CPU port. 1791 + * Access: Index 1792 + */ 1793 + MLXSW_ITEM32(reg, ppbt, local_port, 0x00, 16, 8); 1794 + 1795 + /* reg_ppbt_g 1796 + * group - When set, the binding is of an ACL group. When cleared, 1797 + * the binding is of an ACL. 1798 + * Must be set to 1 for Spectrum. 1799 + * Access: RW 1800 + */ 1801 + MLXSW_ITEM32(reg, ppbt, g, 0x10, 31, 1); 1802 + 1803 + /* reg_ppbt_acl_info 1804 + * ACL/ACL group identifier. If the g bit is set, this field should hold 1805 + * the acl_group_id, else it should hold the acl_id. 1806 + * Access: RW 1807 + */ 1808 + MLXSW_ITEM32(reg, ppbt, acl_info, 0x10, 0, 16); 1809 + 1810 + static inline void mlxsw_reg_ppbt_pack(char *payload, enum mlxsw_reg_pxbt_e e, 1811 + enum mlxsw_reg_pxbt_op op, 1812 + u8 local_port, u16 acl_info) 1813 + { 1814 + MLXSW_REG_ZERO(ppbt, payload); 1815 + mlxsw_reg_ppbt_e_set(payload, e); 1816 + mlxsw_reg_ppbt_op_set(payload, op); 1817 + mlxsw_reg_ppbt_local_port_set(payload, local_port); 1818 + mlxsw_reg_ppbt_g_set(payload, true); 1819 + mlxsw_reg_ppbt_acl_info_set(payload, acl_info); 1820 + } 1821 + 1822 + /* PACL - Policy-Engine ACL Register 1823 + * --------------------------------- 1824 + * This register is used for configuration of the ACL. 1825 + */ 1826 + #define MLXSW_REG_PACL_ID 0x3004 1827 + #define MLXSW_REG_PACL_LEN 0x70 1828 + 1829 + MLXSW_REG_DEFINE(pacl, MLXSW_REG_PACL_ID, MLXSW_REG_PACL_LEN); 1830 + 1831 + /* reg_pacl_v 1832 + * Valid. Setting the v bit makes the ACL valid. It should not be cleared 1833 + * while the ACL is bounded to either a port, VLAN or ACL rule. 1834 + * Access: RW 1835 + */ 1836 + MLXSW_ITEM32(reg, pacl, v, 0x00, 24, 1); 1837 + 1838 + /* reg_pacl_acl_id 1839 + * An identifier representing the ACL (managed by software) 1840 + * Range 0 .. cap_max_acl_regions - 1 1841 + * Access: Index 1842 + */ 1843 + MLXSW_ITEM32(reg, pacl, acl_id, 0x08, 0, 16); 1844 + 1845 + #define MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN 16 1846 + 1847 + /* reg_pacl_tcam_region_info 1848 + * Opaque object that represents a TCAM region. 1849 + * Obtained through PTAR register. 1850 + * Access: RW 1851 + */ 1852 + MLXSW_ITEM_BUF(reg, pacl, tcam_region_info, 0x30, 1853 + MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN); 1854 + 1855 + static inline void mlxsw_reg_pacl_pack(char *payload, u16 acl_id, 1856 + bool valid, const char *tcam_region_info) 1857 + { 1858 + MLXSW_REG_ZERO(pacl, payload); 1859 + mlxsw_reg_pacl_acl_id_set(payload, acl_id); 1860 + mlxsw_reg_pacl_v_set(payload, valid); 1861 + mlxsw_reg_pacl_tcam_region_info_memcpy_to(payload, tcam_region_info); 1862 + } 1863 + 1864 + /* PAGT - Policy-Engine ACL Group Table 1865 + * ------------------------------------ 1866 + * This register is used for configuration of the ACL Group Table. 1867 + */ 1868 + #define MLXSW_REG_PAGT_ID 0x3005 1869 + #define MLXSW_REG_PAGT_BASE_LEN 0x30 1870 + #define MLXSW_REG_PAGT_ACL_LEN 4 1871 + #define MLXSW_REG_PAGT_ACL_MAX_NUM 16 1872 + #define MLXSW_REG_PAGT_LEN (MLXSW_REG_PAGT_BASE_LEN + \ 1873 + MLXSW_REG_PAGT_ACL_MAX_NUM * MLXSW_REG_PAGT_ACL_LEN) 1874 + 1875 + MLXSW_REG_DEFINE(pagt, MLXSW_REG_PAGT_ID, MLXSW_REG_PAGT_LEN); 1876 + 1877 + /* reg_pagt_size 1878 + * Number of ACLs in the group. 1879 + * Size 0 invalidates a group. 1880 + * Range 0 .. cap_max_acl_group_size (hard coded to 16 for now) 1881 + * Total number of ACLs in all groups must be lower or equal 1882 + * to cap_max_acl_tot_groups 1883 + * Note: a group which is binded must not be invalidated 1884 + * Access: Index 1885 + */ 1886 + MLXSW_ITEM32(reg, pagt, size, 0x00, 0, 8); 1887 + 1888 + /* reg_pagt_acl_group_id 1889 + * An identifier (numbered from 0..cap_max_acl_groups-1) representing 1890 + * the ACL Group identifier (managed by software). 1891 + * Access: Index 1892 + */ 1893 + MLXSW_ITEM32(reg, pagt, acl_group_id, 0x08, 0, 16); 1894 + 1895 + /* reg_pagt_acl_id 1896 + * ACL identifier 1897 + * Access: RW 1898 + */ 1899 + MLXSW_ITEM32_INDEXED(reg, pagt, acl_id, 0x30, 0, 16, 0x04, 0x00, false); 1900 + 1901 + static inline void mlxsw_reg_pagt_pack(char *payload, u16 acl_group_id) 1902 + { 1903 + MLXSW_REG_ZERO(pagt, payload); 1904 + mlxsw_reg_pagt_acl_group_id_set(payload, acl_group_id); 1905 + } 1906 + 1907 + static inline void mlxsw_reg_pagt_acl_id_pack(char *payload, int index, 1908 + u16 acl_id) 1909 + { 1910 + u8 size = mlxsw_reg_pagt_size_get(payload); 1911 + 1912 + if (index >= size) 1913 + mlxsw_reg_pagt_size_set(payload, index + 1); 1914 + mlxsw_reg_pagt_acl_id_set(payload, index, acl_id); 1915 + } 1916 + 1917 + /* PTAR - Policy-Engine TCAM Allocation Register 1918 + * --------------------------------------------- 1919 + * This register is used for allocation of regions in the TCAM. 1920 + * Note: Query method is not supported on this register. 1921 + */ 1922 + #define MLXSW_REG_PTAR_ID 0x3006 1923 + #define MLXSW_REG_PTAR_BASE_LEN 0x20 1924 + #define MLXSW_REG_PTAR_KEY_ID_LEN 1 1925 + #define MLXSW_REG_PTAR_KEY_ID_MAX_NUM 16 1926 + #define MLXSW_REG_PTAR_LEN (MLXSW_REG_PTAR_BASE_LEN + \ 1927 + MLXSW_REG_PTAR_KEY_ID_MAX_NUM * MLXSW_REG_PTAR_KEY_ID_LEN) 1928 + 1929 + MLXSW_REG_DEFINE(ptar, MLXSW_REG_PTAR_ID, MLXSW_REG_PTAR_LEN); 1930 + 1931 + enum mlxsw_reg_ptar_op { 1932 + /* allocate a TCAM region */ 1933 + MLXSW_REG_PTAR_OP_ALLOC, 1934 + /* resize a TCAM region */ 1935 + MLXSW_REG_PTAR_OP_RESIZE, 1936 + /* deallocate TCAM region */ 1937 + MLXSW_REG_PTAR_OP_FREE, 1938 + /* test allocation */ 1939 + MLXSW_REG_PTAR_OP_TEST, 1940 + }; 1941 + 1942 + /* reg_ptar_op 1943 + * Access: OP 1944 + */ 1945 + MLXSW_ITEM32(reg, ptar, op, 0x00, 28, 4); 1946 + 1947 + /* reg_ptar_action_set_type 1948 + * Type of action set to be used on this region. 1949 + * For Spectrum, this is always type 2 - "flexible" 1950 + * Access: WO 1951 + */ 1952 + MLXSW_ITEM32(reg, ptar, action_set_type, 0x00, 16, 8); 1953 + 1954 + /* reg_ptar_key_type 1955 + * TCAM key type for the region. 1956 + * For Spectrum, this is always type 0x50 - "FLEX_KEY" 1957 + * Access: WO 1958 + */ 1959 + MLXSW_ITEM32(reg, ptar, key_type, 0x00, 0, 8); 1960 + 1961 + /* reg_ptar_region_size 1962 + * TCAM region size. When allocating/resizing this is the requested size, 1963 + * the response is the actual size. Note that actual size may be 1964 + * larger than requested. 1965 + * Allowed range 1 .. cap_max_rules-1 1966 + * Reserved during op deallocate. 1967 + * Access: WO 1968 + */ 1969 + MLXSW_ITEM32(reg, ptar, region_size, 0x04, 0, 16); 1970 + 1971 + /* reg_ptar_region_id 1972 + * Region identifier 1973 + * Range 0 .. cap_max_regions-1 1974 + * Access: Index 1975 + */ 1976 + MLXSW_ITEM32(reg, ptar, region_id, 0x08, 0, 16); 1977 + 1978 + /* reg_ptar_tcam_region_info 1979 + * Opaque object that represents the TCAM region. 1980 + * Returned when allocating a region. 1981 + * Provided by software for ACL generation and region deallocation and resize. 1982 + * Access: RW 1983 + */ 1984 + MLXSW_ITEM_BUF(reg, ptar, tcam_region_info, 0x10, 1985 + MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN); 1986 + 1987 + /* reg_ptar_flexible_key_id 1988 + * Identifier of the Flexible Key. 1989 + * Only valid if key_type == "FLEX_KEY" 1990 + * The key size will be rounded up to one of the following values: 1991 + * 9B, 18B, 36B, 54B. 1992 + * This field is reserved for in resize operation. 1993 + * Access: WO 1994 + */ 1995 + MLXSW_ITEM8_INDEXED(reg, ptar, flexible_key_id, 0x20, 0, 8, 1996 + MLXSW_REG_PTAR_KEY_ID_LEN, 0x00, false); 1997 + 1998 + static inline void mlxsw_reg_ptar_pack(char *payload, enum mlxsw_reg_ptar_op op, 1999 + u16 region_size, u16 region_id, 2000 + const char *tcam_region_info) 2001 + { 2002 + MLXSW_REG_ZERO(ptar, payload); 2003 + mlxsw_reg_ptar_op_set(payload, op); 2004 + mlxsw_reg_ptar_action_set_type_set(payload, 2); /* "flexible" */ 2005 + mlxsw_reg_ptar_key_type_set(payload, 0x50); /* "FLEX_KEY" */ 2006 + mlxsw_reg_ptar_region_size_set(payload, region_size); 2007 + mlxsw_reg_ptar_region_id_set(payload, region_id); 2008 + mlxsw_reg_ptar_tcam_region_info_memcpy_to(payload, tcam_region_info); 2009 + } 2010 + 2011 + static inline void mlxsw_reg_ptar_key_id_pack(char *payload, int index, 2012 + u16 key_id) 2013 + { 2014 + mlxsw_reg_ptar_flexible_key_id_set(payload, index, key_id); 2015 + } 2016 + 2017 + static inline void mlxsw_reg_ptar_unpack(char *payload, char *tcam_region_info) 2018 + { 2019 + mlxsw_reg_ptar_tcam_region_info_memcpy_from(payload, tcam_region_info); 2020 + } 2021 + 2022 + /* PPBS - Policy-Engine Policy Based Switching Register 2023 + * ---------------------------------------------------- 2024 + * This register retrieves and sets Policy Based Switching Table entries. 2025 + */ 2026 + #define MLXSW_REG_PPBS_ID 0x300C 2027 + #define MLXSW_REG_PPBS_LEN 0x14 2028 + 2029 + MLXSW_REG_DEFINE(ppbs, MLXSW_REG_PPBS_ID, MLXSW_REG_PPBS_LEN); 2030 + 2031 + /* reg_ppbs_pbs_ptr 2032 + * Index into the PBS table. 2033 + * For Spectrum, the index points to the KVD Linear. 2034 + * Access: Index 2035 + */ 2036 + MLXSW_ITEM32(reg, ppbs, pbs_ptr, 0x08, 0, 24); 2037 + 2038 + /* reg_ppbs_system_port 2039 + * Unique port identifier for the final destination of the packet. 2040 + * Access: RW 2041 + */ 2042 + MLXSW_ITEM32(reg, ppbs, system_port, 0x10, 0, 16); 2043 + 2044 + static inline void mlxsw_reg_ppbs_pack(char *payload, u32 pbs_ptr, 2045 + u16 system_port) 2046 + { 2047 + MLXSW_REG_ZERO(ppbs, payload); 2048 + mlxsw_reg_ppbs_pbs_ptr_set(payload, pbs_ptr); 2049 + mlxsw_reg_ppbs_system_port_set(payload, system_port); 2050 + } 2051 + 2052 + /* PRCR - Policy-Engine Rules Copy Register 2053 + * ---------------------------------------- 2054 + * This register is used for accessing rules within a TCAM region. 2055 + */ 2056 + #define MLXSW_REG_PRCR_ID 0x300D 2057 + #define MLXSW_REG_PRCR_LEN 0x40 2058 + 2059 + MLXSW_REG_DEFINE(prcr, MLXSW_REG_PRCR_ID, MLXSW_REG_PRCR_LEN); 2060 + 2061 + enum mlxsw_reg_prcr_op { 2062 + /* Move rules. Moves the rules from "tcam_region_info" starting 2063 + * at offset "offset" to "dest_tcam_region_info" 2064 + * at offset "dest_offset." 2065 + */ 2066 + MLXSW_REG_PRCR_OP_MOVE, 2067 + /* Copy rules. Copies the rules from "tcam_region_info" starting 2068 + * at offset "offset" to "dest_tcam_region_info" 2069 + * at offset "dest_offset." 2070 + */ 2071 + MLXSW_REG_PRCR_OP_COPY, 2072 + }; 2073 + 2074 + /* reg_prcr_op 2075 + * Access: OP 2076 + */ 2077 + MLXSW_ITEM32(reg, prcr, op, 0x00, 28, 4); 2078 + 2079 + /* reg_prcr_offset 2080 + * Offset within the source region to copy/move from. 2081 + * Access: Index 2082 + */ 2083 + MLXSW_ITEM32(reg, prcr, offset, 0x00, 0, 16); 2084 + 2085 + /* reg_prcr_size 2086 + * The number of rules to copy/move. 2087 + * Access: WO 2088 + */ 2089 + MLXSW_ITEM32(reg, prcr, size, 0x04, 0, 16); 2090 + 2091 + /* reg_prcr_tcam_region_info 2092 + * Opaque object that represents the source TCAM region. 2093 + * Access: Index 2094 + */ 2095 + MLXSW_ITEM_BUF(reg, prcr, tcam_region_info, 0x10, 2096 + MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN); 2097 + 2098 + /* reg_prcr_dest_offset 2099 + * Offset within the source region to copy/move to. 2100 + * Access: Index 2101 + */ 2102 + MLXSW_ITEM32(reg, prcr, dest_offset, 0x20, 0, 16); 2103 + 2104 + /* reg_prcr_dest_tcam_region_info 2105 + * Opaque object that represents the destination TCAM region. 2106 + * Access: Index 2107 + */ 2108 + MLXSW_ITEM_BUF(reg, prcr, dest_tcam_region_info, 0x30, 2109 + MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN); 2110 + 2111 + static inline void mlxsw_reg_prcr_pack(char *payload, enum mlxsw_reg_prcr_op op, 2112 + const char *src_tcam_region_info, 2113 + u16 src_offset, 2114 + const char *dest_tcam_region_info, 2115 + u16 dest_offset, u16 size) 2116 + { 2117 + MLXSW_REG_ZERO(prcr, payload); 2118 + mlxsw_reg_prcr_op_set(payload, op); 2119 + mlxsw_reg_prcr_offset_set(payload, src_offset); 2120 + mlxsw_reg_prcr_size_set(payload, size); 2121 + mlxsw_reg_prcr_tcam_region_info_memcpy_to(payload, 2122 + src_tcam_region_info); 2123 + mlxsw_reg_prcr_dest_offset_set(payload, dest_offset); 2124 + mlxsw_reg_prcr_dest_tcam_region_info_memcpy_to(payload, 2125 + dest_tcam_region_info); 2126 + } 2127 + 2128 + /* PEFA - Policy-Engine Extended Flexible Action Register 2129 + * ------------------------------------------------------ 2130 + * This register is used for accessing an extended flexible action entry 2131 + * in the central KVD Linear Database. 2132 + */ 2133 + #define MLXSW_REG_PEFA_ID 0x300F 2134 + #define MLXSW_REG_PEFA_LEN 0xB0 2135 + 2136 + MLXSW_REG_DEFINE(pefa, MLXSW_REG_PEFA_ID, MLXSW_REG_PEFA_LEN); 2137 + 2138 + /* reg_pefa_index 2139 + * Index in the KVD Linear Centralized Database. 2140 + * Access: Index 2141 + */ 2142 + MLXSW_ITEM32(reg, pefa, index, 0x00, 0, 24); 2143 + 2144 + #define MLXSW_REG_PXXX_FLEX_ACTION_SET_LEN 0xA8 2145 + 2146 + /* reg_pefa_flex_action_set 2147 + * Action-set to perform when rule is matched. 2148 + * Must be zero padded if action set is shorter. 2149 + * Access: RW 2150 + */ 2151 + MLXSW_ITEM_BUF(reg, pefa, flex_action_set, 0x08, 2152 + MLXSW_REG_PXXX_FLEX_ACTION_SET_LEN); 2153 + 2154 + static inline void mlxsw_reg_pefa_pack(char *payload, u32 index, 2155 + const char *flex_action_set) 2156 + { 2157 + MLXSW_REG_ZERO(pefa, payload); 2158 + mlxsw_reg_pefa_index_set(payload, index); 2159 + mlxsw_reg_pefa_flex_action_set_memcpy_to(payload, flex_action_set); 2160 + } 2161 + 2162 + /* PTCE-V2 - Policy-Engine TCAM Entry Register Version 2 2163 + * ----------------------------------------------------- 2164 + * This register is used for accessing rules within a TCAM region. 2165 + * It is a new version of PTCE in order to support wider key, 2166 + * mask and action within a TCAM region. This register is not supported 2167 + * by SwitchX and SwitchX-2. 2168 + */ 2169 + #define MLXSW_REG_PTCE2_ID 0x3017 2170 + #define MLXSW_REG_PTCE2_LEN 0x1D8 2171 + 2172 + MLXSW_REG_DEFINE(ptce2, MLXSW_REG_PTCE2_ID, MLXSW_REG_PTCE2_LEN); 2173 + 2174 + /* reg_ptce2_v 2175 + * Valid. 2176 + * Access: RW 2177 + */ 2178 + MLXSW_ITEM32(reg, ptce2, v, 0x00, 31, 1); 2179 + 2180 + /* reg_ptce2_a 2181 + * Activity. Set if a packet lookup has hit on the specific entry. 2182 + * To clear the "a" bit, use "clear activity" op or "clear on read" op. 2183 + * Access: RO 2184 + */ 2185 + MLXSW_ITEM32(reg, ptce2, a, 0x00, 30, 1); 2186 + 2187 + enum mlxsw_reg_ptce2_op { 2188 + /* Read operation. */ 2189 + MLXSW_REG_PTCE2_OP_QUERY_READ = 0, 2190 + /* clear on read operation. Used to read entry 2191 + * and clear Activity bit. 2192 + */ 2193 + MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ = 1, 2194 + /* Write operation. Used to write a new entry to the table. 2195 + * All R/W fields are relevant for new entry. Activity bit is set 2196 + * for new entries - Note write with v = 0 will delete the entry. 2197 + */ 2198 + MLXSW_REG_PTCE2_OP_WRITE_WRITE = 0, 2199 + /* Update action. Only action set will be updated. */ 2200 + MLXSW_REG_PTCE2_OP_WRITE_UPDATE = 1, 2201 + /* Clear activity. A bit is cleared for the entry. */ 2202 + MLXSW_REG_PTCE2_OP_WRITE_CLEAR_ACTIVITY = 2, 2203 + }; 2204 + 2205 + /* reg_ptce2_op 2206 + * Access: OP 2207 + */ 2208 + MLXSW_ITEM32(reg, ptce2, op, 0x00, 20, 3); 2209 + 2210 + /* reg_ptce2_offset 2211 + * Access: Index 2212 + */ 2213 + MLXSW_ITEM32(reg, ptce2, offset, 0x00, 0, 16); 2214 + 2215 + /* reg_ptce2_tcam_region_info 2216 + * Opaque object that represents the TCAM region. 2217 + * Access: Index 2218 + */ 2219 + MLXSW_ITEM_BUF(reg, ptce2, tcam_region_info, 0x10, 2220 + MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN); 2221 + 2222 + #define MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN 96 2223 + 2224 + /* reg_ptce2_flex_key_blocks 2225 + * ACL Key. 2226 + * Access: RW 2227 + */ 2228 + MLXSW_ITEM_BUF(reg, ptce2, flex_key_blocks, 0x20, 2229 + MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN); 2230 + 2231 + /* reg_ptce2_mask 2232 + * mask- in the same size as key. A bit that is set directs the TCAM 2233 + * to compare the corresponding bit in key. A bit that is clear directs 2234 + * the TCAM to ignore the corresponding bit in key. 2235 + * Access: RW 2236 + */ 2237 + MLXSW_ITEM_BUF(reg, ptce2, mask, 0x80, 2238 + MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN); 2239 + 2240 + /* reg_ptce2_flex_action_set 2241 + * ACL action set. 2242 + * Access: RW 2243 + */ 2244 + MLXSW_ITEM_BUF(reg, ptce2, flex_action_set, 0xE0, 2245 + MLXSW_REG_PXXX_FLEX_ACTION_SET_LEN); 2246 + 2247 + static inline void mlxsw_reg_ptce2_pack(char *payload, bool valid, 2248 + enum mlxsw_reg_ptce2_op op, 2249 + const char *tcam_region_info, 2250 + u16 offset) 2251 + { 2252 + MLXSW_REG_ZERO(ptce2, payload); 2253 + mlxsw_reg_ptce2_v_set(payload, valid); 2254 + mlxsw_reg_ptce2_op_set(payload, op); 2255 + mlxsw_reg_ptce2_offset_set(payload, offset); 2256 + mlxsw_reg_ptce2_tcam_region_info_memcpy_to(payload, tcam_region_info); 1758 2257 } 1759 2258 1760 2259 /* QPCR - QoS Policer Configuration Register ··· 5933 5434 MLXSW_REG(svpe), 5934 5435 MLXSW_REG(sfmr), 5935 5436 MLXSW_REG(spvmlr), 5437 + MLXSW_REG(ppbt), 5438 + MLXSW_REG(pacl), 5439 + MLXSW_REG(pagt), 5440 + MLXSW_REG(ptar), 5441 + MLXSW_REG(ppbs), 5442 + MLXSW_REG(prcr), 5443 + MLXSW_REG(pefa), 5444 + MLXSW_REG(ptce2), 5936 5445 MLXSW_REG(qpcr), 5937 5446 MLXSW_REG(qtct), 5938 5447 MLXSW_REG(qeec),
+18 -2
drivers/net/ethernet/mellanox/mlxsw/resources.h
··· 1 1 /* 2 2 * drivers/net/ethernet/mellanox/mlxsw/resources.h 3 - * Copyright (c) 2016 Mellanox Technologies. All rights reserved. 4 - * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com> 3 + * Copyright (c) 2016-2017 Mellanox Technologies. All rights reserved. 4 + * Copyright (c) 2016-2017 Jiri Pirko <jiri@mellanox.com> 5 5 * 6 6 * Redistribution and use in source and binary forms, with or without 7 7 * modification, are permitted provided that the following conditions are met: ··· 48 48 MLXSW_RES_ID_MAX_LAG, 49 49 MLXSW_RES_ID_MAX_LAG_MEMBERS, 50 50 MLXSW_RES_ID_MAX_BUFFER_SIZE, 51 + MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS, 52 + MLXSW_RES_ID_ACL_MAX_TCAM_RULES, 53 + MLXSW_RES_ID_ACL_MAX_REGIONS, 54 + MLXSW_RES_ID_ACL_MAX_GROUPS, 55 + MLXSW_RES_ID_ACL_MAX_GROUP_SIZE, 56 + MLXSW_RES_ID_ACL_FLEX_KEYS, 57 + MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE, 58 + MLXSW_RES_ID_ACL_ACTIONS_PER_SET, 51 59 MLXSW_RES_ID_MAX_CPU_POLICERS, 52 60 MLXSW_RES_ID_MAX_VRS, 53 61 MLXSW_RES_ID_MAX_RIFS, ··· 80 72 [MLXSW_RES_ID_MAX_LAG] = 0x2520, 81 73 [MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521, 82 74 [MLXSW_RES_ID_MAX_BUFFER_SIZE] = 0x2802, /* Bytes */ 75 + [MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS] = 0x2901, 76 + [MLXSW_RES_ID_ACL_MAX_TCAM_RULES] = 0x2902, 77 + [MLXSW_RES_ID_ACL_MAX_REGIONS] = 0x2903, 78 + [MLXSW_RES_ID_ACL_MAX_GROUPS] = 0x2904, 79 + [MLXSW_RES_ID_ACL_MAX_GROUP_SIZE] = 0x2905, 80 + [MLXSW_RES_ID_ACL_FLEX_KEYS] = 0x2910, 81 + [MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE] = 0x2911, 82 + [MLXSW_RES_ID_ACL_ACTIONS_PER_SET] = 0x2912, 83 83 [MLXSW_RES_ID_MAX_CPU_POLICERS] = 0x2A13, 84 84 [MLXSW_RES_ID_MAX_VRS] = 0x2C01, 85 85 [MLXSW_RES_ID_MAX_RIFS] = 0x2C02,
+26 -6
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
··· 1 1 /* 2 2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c 3 - * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 4 - * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> 3 + * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved. 4 + * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com> 5 5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 7 7 * ··· 137 137 * 6 - Control packets 138 138 */ 139 139 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 140 - 141 - static bool mlxsw_sp_port_dev_check(const struct net_device *dev); 142 140 143 141 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 144 142 const struct mlxsw_tx_info *tx_info) ··· 1355 1357 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1356 1358 bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS); 1357 1359 1358 - if (tc->type == TC_SETUP_MATCHALL) { 1360 + switch (tc->type) { 1361 + case TC_SETUP_MATCHALL: 1359 1362 switch (tc->cls_mall->command) { 1360 1363 case TC_CLSMATCHALL_REPLACE: 1361 1364 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, ··· 1369 1370 return 0; 1370 1371 default: 1371 1372 return -EINVAL; 1373 + } 1374 + case TC_SETUP_CLSFLOWER: 1375 + switch (tc->cls_flower->command) { 1376 + case TC_CLSFLOWER_REPLACE: 1377 + return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, 1378 + proto, tc->cls_flower); 1379 + case TC_CLSFLOWER_DESTROY: 1380 + mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, 1381 + tc->cls_flower); 1382 + return 0; 1383 + default: 1384 + return -EOPNOTSUPP; 1372 1385 } 1373 1386 } 1374 1387 ··· 3214 3203 goto err_span_init; 3215 3204 } 3216 3205 3206 + err = mlxsw_sp_acl_init(mlxsw_sp); 3207 + if (err) { 3208 + dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 3209 + goto err_acl_init; 3210 + } 3211 + 3217 3212 err = mlxsw_sp_ports_create(mlxsw_sp); 3218 3213 if (err) { 3219 3214 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); ··· 3229 3212 return 0; 3230 3213 3231 3214 err_ports_create: 3215 + mlxsw_sp_acl_fini(mlxsw_sp); 3216 + err_acl_init: 3232 3217 mlxsw_sp_span_fini(mlxsw_sp); 3233 3218 err_span_init: 3234 3219 mlxsw_sp_router_fini(mlxsw_sp); ··· 3251 3232 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3252 3233 3253 3234 mlxsw_sp_ports_remove(mlxsw_sp); 3235 + mlxsw_sp_acl_fini(mlxsw_sp); 3254 3236 mlxsw_sp_span_fini(mlxsw_sp); 3255 3237 mlxsw_sp_router_fini(mlxsw_sp); 3256 3238 mlxsw_sp_switchdev_fini(mlxsw_sp); ··· 3317 3297 .profile = &mlxsw_sp_config_profile, 3318 3298 }; 3319 3299 3320 - static bool mlxsw_sp_port_dev_check(const struct net_device *dev) 3300 + bool mlxsw_sp_port_dev_check(const struct net_device *dev) 3321 3301 { 3322 3302 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 3323 3303 }
+104 -2
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
··· 1 1 /* 2 2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.h 3 - * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 4 - * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> 3 + * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved. 4 + * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com> 5 5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 7 7 * ··· 47 47 #include <linux/in6.h> 48 48 #include <linux/notifier.h> 49 49 #include <net/psample.h> 50 + #include <net/pkt_cls.h> 50 51 51 52 #include "port.h" 52 53 #include "core.h" 54 + #include "core_acl_flex_keys.h" 55 + #include "core_acl_flex_actions.h" 53 56 54 57 #define MLXSW_SP_VFID_BASE VLAN_N_VID 55 58 #define MLXSW_SP_VFID_MAX 6656 /* Bridged VLAN interfaces */ ··· 265 262 bool aborted; 266 263 }; 267 264 265 + struct mlxsw_sp_acl; 266 + 268 267 struct mlxsw_sp { 269 268 struct { 270 269 struct list_head list; ··· 296 291 u8 port_to_module[MLXSW_PORT_MAX_PORTS]; 297 292 struct mlxsw_sp_sb sb; 298 293 struct mlxsw_sp_router router; 294 + struct mlxsw_sp_acl *acl; 299 295 struct { 300 296 DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE); 301 297 } kvdl; ··· 379 373 struct mlxsw_sp_port_sample *sample; 380 374 }; 381 375 376 + bool mlxsw_sp_port_dev_check(const struct net_device *dev); 382 377 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev); 383 378 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port); 384 379 ··· 608 601 609 602 int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count); 610 603 void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index); 604 + 605 + struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl); 606 + 607 + struct mlxsw_sp_acl_rule_info { 608 + unsigned int priority; 609 + struct mlxsw_afk_element_values values; 610 + struct mlxsw_afa_block *act_block; 611 + }; 612 + 613 + enum mlxsw_sp_acl_profile { 614 + MLXSW_SP_ACL_PROFILE_FLOWER, 615 + }; 616 + 617 + struct mlxsw_sp_acl_profile_ops { 618 + size_t ruleset_priv_size; 619 + int (*ruleset_add)(struct mlxsw_sp *mlxsw_sp, 620 + void *priv, void *ruleset_priv); 621 + void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv); 622 + int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, 623 + struct net_device *dev, bool ingress); 624 + void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv); 625 + size_t rule_priv_size; 626 + int (*rule_add)(struct mlxsw_sp *mlxsw_sp, 627 + void *ruleset_priv, void *rule_priv, 628 + struct mlxsw_sp_acl_rule_info *rulei); 629 + void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv); 630 + }; 631 + 632 + struct mlxsw_sp_acl_ops { 633 + size_t priv_size; 634 + int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv); 635 + void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv); 636 + const struct mlxsw_sp_acl_profile_ops * 637 + (*profile_ops)(struct mlxsw_sp *mlxsw_sp, 638 + enum mlxsw_sp_acl_profile profile); 639 + }; 640 + 641 + struct mlxsw_sp_acl_ruleset; 642 + 643 + struct mlxsw_sp_acl_ruleset * 644 + mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, 645 + struct net_device *dev, bool ingress, 646 + enum mlxsw_sp_acl_profile profile); 647 + void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp, 648 + struct mlxsw_sp_acl_ruleset *ruleset); 649 + 650 + struct mlxsw_sp_acl_rule_info * 651 + mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl); 652 + void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei); 653 + int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei); 654 + void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei, 655 + unsigned int priority); 656 + void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei, 657 + enum mlxsw_afk_element element, 658 + u32 key_value, u32 mask_value); 659 + void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei, 660 + enum mlxsw_afk_element element, 661 + const char *key_value, 662 + const char *mask_value, unsigned int len); 663 + void mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei); 664 + void mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei, 665 + u16 group_id); 666 + int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei); 667 + int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp, 668 + struct mlxsw_sp_acl_rule_info *rulei, 669 + struct net_device *out_dev); 670 + 671 + struct mlxsw_sp_acl_rule; 672 + 673 + struct mlxsw_sp_acl_rule * 674 + mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp, 675 + struct mlxsw_sp_acl_ruleset *ruleset, 676 + unsigned long cookie); 677 + void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp, 678 + struct mlxsw_sp_acl_rule *rule); 679 + int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp, 680 + struct mlxsw_sp_acl_rule *rule); 681 + void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp, 682 + struct mlxsw_sp_acl_rule *rule); 683 + struct mlxsw_sp_acl_rule * 684 + mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp, 685 + struct mlxsw_sp_acl_ruleset *ruleset, 686 + unsigned long cookie); 687 + struct mlxsw_sp_acl_rule_info * 688 + mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule); 689 + 690 + int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp); 691 + void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp); 692 + 693 + extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops; 694 + 695 + int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, 696 + __be16 protocol, struct tc_cls_flower_offload *f); 697 + void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, 698 + struct tc_cls_flower_offload *f); 611 699 612 700 #endif
+572
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
··· 1 + /* 2 + * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 3 + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. 4 + * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> 5 + * 6 + * Redistribution and use in source and binary forms, with or without 7 + * modification, are permitted provided that the following conditions are met: 8 + * 9 + * 1. Redistributions of source code must retain the above copyright 10 + * notice, this list of conditions and the following disclaimer. 11 + * 2. Redistributions in binary form must reproduce the above copyright 12 + * notice, this list of conditions and the following disclaimer in the 13 + * documentation and/or other materials provided with the distribution. 14 + * 3. Neither the names of the copyright holders nor the names of its 15 + * contributors may be used to endorse or promote products derived from 16 + * this software without specific prior written permission. 17 + * 18 + * Alternatively, this software may be distributed under the terms of the 19 + * GNU General Public License ("GPL") version 2 as published by the Free 20 + * Software Foundation. 21 + * 22 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 + * POSSIBILITY OF SUCH DAMAGE. 33 + */ 34 + 35 + #include <linux/kernel.h> 36 + #include <linux/slab.h> 37 + #include <linux/errno.h> 38 + #include <linux/list.h> 39 + #include <linux/string.h> 40 + #include <linux/rhashtable.h> 41 + #include <linux/netdevice.h> 42 + 43 + #include "reg.h" 44 + #include "core.h" 45 + #include "resources.h" 46 + #include "spectrum.h" 47 + #include "core_acl_flex_keys.h" 48 + #include "core_acl_flex_actions.h" 49 + #include "spectrum_acl_flex_keys.h" 50 + 51 + struct mlxsw_sp_acl { 52 + struct mlxsw_afk *afk; 53 + struct mlxsw_afa *afa; 54 + const struct mlxsw_sp_acl_ops *ops; 55 + struct rhashtable ruleset_ht; 56 + unsigned long priv[0]; 57 + /* priv has to be always the last item */ 58 + }; 59 + 60 + struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl) 61 + { 62 + return acl->afk; 63 + } 64 + 65 + struct mlxsw_sp_acl_ruleset_ht_key { 66 + struct net_device *dev; /* dev this ruleset is bound to */ 67 + bool ingress; 68 + const struct mlxsw_sp_acl_profile_ops *ops; 69 + }; 70 + 71 + struct mlxsw_sp_acl_ruleset { 72 + struct rhash_head ht_node; /* Member of acl HT */ 73 + struct mlxsw_sp_acl_ruleset_ht_key ht_key; 74 + struct rhashtable rule_ht; 75 + unsigned int ref_count; 76 + unsigned long priv[0]; 77 + /* priv has to be always the last item */ 78 + }; 79 + 80 + struct mlxsw_sp_acl_rule { 81 + struct rhash_head ht_node; /* Member of rule HT */ 82 + unsigned long cookie; /* HT key */ 83 + struct mlxsw_sp_acl_ruleset *ruleset; 84 + struct mlxsw_sp_acl_rule_info *rulei; 85 + unsigned long priv[0]; 86 + /* priv has to be always the last item */ 87 + }; 88 + 89 + static const struct rhashtable_params mlxsw_sp_acl_ruleset_ht_params = { 90 + .key_len = sizeof(struct mlxsw_sp_acl_ruleset_ht_key), 91 + .key_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_key), 92 + .head_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_node), 93 + .automatic_shrinking = true, 94 + }; 95 + 96 + static const struct rhashtable_params mlxsw_sp_acl_rule_ht_params = { 97 + .key_len = sizeof(unsigned long), 98 + .key_offset = offsetof(struct mlxsw_sp_acl_rule, cookie), 99 + .head_offset = offsetof(struct mlxsw_sp_acl_rule, ht_node), 100 + .automatic_shrinking = true, 101 + }; 102 + 103 + static struct mlxsw_sp_acl_ruleset * 104 + mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp, 105 + const struct mlxsw_sp_acl_profile_ops *ops) 106 + { 107 + struct mlxsw_sp_acl *acl = mlxsw_sp->acl; 108 + struct mlxsw_sp_acl_ruleset *ruleset; 109 + size_t alloc_size; 110 + int err; 111 + 112 + alloc_size = sizeof(*ruleset) + ops->ruleset_priv_size; 113 + ruleset = kzalloc(alloc_size, GFP_KERNEL); 114 + if (!ruleset) 115 + return ERR_PTR(-ENOMEM); 116 + ruleset->ref_count = 1; 117 + ruleset->ht_key.ops = ops; 118 + 119 + err = rhashtable_init(&ruleset->rule_ht, &mlxsw_sp_acl_rule_ht_params); 120 + if (err) 121 + goto err_rhashtable_init; 122 + 123 + err = ops->ruleset_add(mlxsw_sp, acl->priv, ruleset->priv); 124 + if (err) 125 + goto err_ops_ruleset_add; 126 + 127 + return ruleset; 128 + 129 + err_ops_ruleset_add: 130 + rhashtable_destroy(&ruleset->rule_ht); 131 + err_rhashtable_init: 132 + kfree(ruleset); 133 + return ERR_PTR(err); 134 + } 135 + 136 + static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp, 137 + struct mlxsw_sp_acl_ruleset *ruleset) 138 + { 139 + const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; 140 + 141 + ops->ruleset_del(mlxsw_sp, ruleset->priv); 142 + rhashtable_destroy(&ruleset->rule_ht); 143 + kfree(ruleset); 144 + } 145 + 146 + static int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp, 147 + struct mlxsw_sp_acl_ruleset *ruleset, 148 + struct net_device *dev, bool ingress) 149 + { 150 + const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; 151 + struct mlxsw_sp_acl *acl = mlxsw_sp->acl; 152 + int err; 153 + 154 + ruleset->ht_key.dev = dev; 155 + ruleset->ht_key.ingress = ingress; 156 + err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node, 157 + mlxsw_sp_acl_ruleset_ht_params); 158 + if (err) 159 + return err; 160 + err = ops->ruleset_bind(mlxsw_sp, ruleset->priv, dev, ingress); 161 + if (err) 162 + goto err_ops_ruleset_bind; 163 + return 0; 164 + 165 + err_ops_ruleset_bind: 166 + rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node, 167 + mlxsw_sp_acl_ruleset_ht_params); 168 + return err; 169 + } 170 + 171 + static void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp, 172 + struct mlxsw_sp_acl_ruleset *ruleset) 173 + { 174 + const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; 175 + struct mlxsw_sp_acl *acl = mlxsw_sp->acl; 176 + 177 + ops->ruleset_unbind(mlxsw_sp, ruleset->priv); 178 + rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node, 179 + mlxsw_sp_acl_ruleset_ht_params); 180 + } 181 + 182 + static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset) 183 + { 184 + ruleset->ref_count++; 185 + } 186 + 187 + static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp, 188 + struct mlxsw_sp_acl_ruleset *ruleset) 189 + { 190 + if (--ruleset->ref_count) 191 + return; 192 + mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, ruleset); 193 + mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset); 194 + } 195 + 196 + struct mlxsw_sp_acl_ruleset * 197 + mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, 198 + struct net_device *dev, bool ingress, 199 + enum mlxsw_sp_acl_profile profile) 200 + { 201 + const struct mlxsw_sp_acl_profile_ops *ops; 202 + struct mlxsw_sp_acl *acl = mlxsw_sp->acl; 203 + struct mlxsw_sp_acl_ruleset_ht_key ht_key; 204 + struct mlxsw_sp_acl_ruleset *ruleset; 205 + int err; 206 + 207 + ops = acl->ops->profile_ops(mlxsw_sp, profile); 208 + if (!ops) 209 + return ERR_PTR(-EINVAL); 210 + 211 + memset(&ht_key, 0, sizeof(ht_key)); 212 + ht_key.dev = dev; 213 + ht_key.ingress = ingress; 214 + ht_key.ops = ops; 215 + ruleset = rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key, 216 + mlxsw_sp_acl_ruleset_ht_params); 217 + if (ruleset) { 218 + mlxsw_sp_acl_ruleset_ref_inc(ruleset); 219 + return ruleset; 220 + } 221 + ruleset = mlxsw_sp_acl_ruleset_create(mlxsw_sp, ops); 222 + if (IS_ERR(ruleset)) 223 + return ruleset; 224 + err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, ruleset, dev, ingress); 225 + if (err) 226 + goto err_ruleset_bind; 227 + return ruleset; 228 + 229 + err_ruleset_bind: 230 + mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset); 231 + return ERR_PTR(err); 232 + } 233 + 234 + void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp, 235 + struct mlxsw_sp_acl_ruleset *ruleset) 236 + { 237 + mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset); 238 + } 239 + 240 + struct mlxsw_sp_acl_rule_info * 241 + mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl) 242 + { 243 + struct mlxsw_sp_acl_rule_info *rulei; 244 + int err; 245 + 246 + rulei = kzalloc(sizeof(*rulei), GFP_KERNEL); 247 + if (!rulei) 248 + return NULL; 249 + rulei->act_block = mlxsw_afa_block_create(acl->afa); 250 + if (IS_ERR(rulei->act_block)) { 251 + err = PTR_ERR(rulei->act_block); 252 + goto err_afa_block_create; 253 + } 254 + return rulei; 255 + 256 + err_afa_block_create: 257 + kfree(rulei); 258 + return ERR_PTR(err); 259 + } 260 + 261 + void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei) 262 + { 263 + mlxsw_afa_block_destroy(rulei->act_block); 264 + kfree(rulei); 265 + } 266 + 267 + int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei) 268 + { 269 + return mlxsw_afa_block_commit(rulei->act_block); 270 + } 271 + 272 + void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei, 273 + unsigned int priority) 274 + { 275 + rulei->priority = priority; 276 + } 277 + 278 + void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei, 279 + enum mlxsw_afk_element element, 280 + u32 key_value, u32 mask_value) 281 + { 282 + mlxsw_afk_values_add_u32(&rulei->values, element, 283 + key_value, mask_value); 284 + } 285 + 286 + void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei, 287 + enum mlxsw_afk_element element, 288 + const char *key_value, 289 + const char *mask_value, unsigned int len) 290 + { 291 + mlxsw_afk_values_add_buf(&rulei->values, element, 292 + key_value, mask_value, len); 293 + } 294 + 295 + void mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei) 296 + { 297 + mlxsw_afa_block_continue(rulei->act_block); 298 + } 299 + 300 + void mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei, 301 + u16 group_id) 302 + { 303 + mlxsw_afa_block_jump(rulei->act_block, group_id); 304 + } 305 + 306 + int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei) 307 + { 308 + return mlxsw_afa_block_append_drop(rulei->act_block); 309 + } 310 + 311 + int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp, 312 + struct mlxsw_sp_acl_rule_info *rulei, 313 + struct net_device *out_dev) 314 + { 315 + struct mlxsw_sp_port *mlxsw_sp_port; 316 + u8 local_port; 317 + bool in_port; 318 + 319 + if (out_dev) { 320 + if (!mlxsw_sp_port_dev_check(out_dev)) 321 + return -EINVAL; 322 + mlxsw_sp_port = netdev_priv(out_dev); 323 + if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp) 324 + return -EINVAL; 325 + local_port = mlxsw_sp_port->local_port; 326 + in_port = false; 327 + } else { 328 + /* If out_dev is NULL, the called wants to 329 + * set forward to ingress port. 330 + */ 331 + local_port = 0; 332 + in_port = true; 333 + } 334 + return mlxsw_afa_block_append_fwd(rulei->act_block, 335 + local_port, in_port); 336 + } 337 + 338 + struct mlxsw_sp_acl_rule * 339 + mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp, 340 + struct mlxsw_sp_acl_ruleset *ruleset, 341 + unsigned long cookie) 342 + { 343 + const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; 344 + struct mlxsw_sp_acl_rule *rule; 345 + int err; 346 + 347 + mlxsw_sp_acl_ruleset_ref_inc(ruleset); 348 + rule = kzalloc(sizeof(*rule) + ops->rule_priv_size, GFP_KERNEL); 349 + if (!rule) { 350 + err = -ENOMEM; 351 + goto err_alloc; 352 + } 353 + rule->cookie = cookie; 354 + rule->ruleset = ruleset; 355 + 356 + rule->rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl); 357 + if (IS_ERR(rule->rulei)) { 358 + err = PTR_ERR(rule->rulei); 359 + goto err_rulei_create; 360 + } 361 + return rule; 362 + 363 + err_rulei_create: 364 + kfree(rule); 365 + err_alloc: 366 + mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset); 367 + return ERR_PTR(err); 368 + } 369 + 370 + void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp, 371 + struct mlxsw_sp_acl_rule *rule) 372 + { 373 + struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; 374 + 375 + mlxsw_sp_acl_rulei_destroy(rule->rulei); 376 + kfree(rule); 377 + mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset); 378 + } 379 + 380 + int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp, 381 + struct mlxsw_sp_acl_rule *rule) 382 + { 383 + struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; 384 + const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; 385 + int err; 386 + 387 + err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei); 388 + if (err) 389 + return err; 390 + 391 + err = rhashtable_insert_fast(&ruleset->rule_ht, &rule->ht_node, 392 + mlxsw_sp_acl_rule_ht_params); 393 + if (err) 394 + goto err_rhashtable_insert; 395 + 396 + return 0; 397 + 398 + err_rhashtable_insert: 399 + ops->rule_del(mlxsw_sp, rule->priv); 400 + return err; 401 + } 402 + 403 + void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp, 404 + struct mlxsw_sp_acl_rule *rule) 405 + { 406 + struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; 407 + const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; 408 + 409 + rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node, 410 + mlxsw_sp_acl_rule_ht_params); 411 + ops->rule_del(mlxsw_sp, rule->priv); 412 + } 413 + 414 + struct mlxsw_sp_acl_rule * 415 + mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp, 416 + struct mlxsw_sp_acl_ruleset *ruleset, 417 + unsigned long cookie) 418 + { 419 + return rhashtable_lookup_fast(&ruleset->rule_ht, &cookie, 420 + mlxsw_sp_acl_rule_ht_params); 421 + } 422 + 423 + struct mlxsw_sp_acl_rule_info * 424 + mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule) 425 + { 426 + return rule->rulei; 427 + } 428 + 429 + #define MLXSW_SP_KDVL_ACT_EXT_SIZE 1 430 + 431 + static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index, 432 + char *enc_actions, bool is_first) 433 + { 434 + struct mlxsw_sp *mlxsw_sp = priv; 435 + char pefa_pl[MLXSW_REG_PEFA_LEN]; 436 + u32 kvdl_index; 437 + int ret; 438 + int err; 439 + 440 + /* The first action set of a TCAM entry is stored directly in TCAM, 441 + * not KVD linear area. 442 + */ 443 + if (is_first) 444 + return 0; 445 + 446 + ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KDVL_ACT_EXT_SIZE); 447 + if (ret < 0) 448 + return ret; 449 + kvdl_index = ret; 450 + mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions); 451 + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl); 452 + if (err) 453 + goto err_pefa_write; 454 + *p_kvdl_index = kvdl_index; 455 + return 0; 456 + 457 + err_pefa_write: 458 + mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); 459 + return err; 460 + } 461 + 462 + static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index, 463 + bool is_first) 464 + { 465 + struct mlxsw_sp *mlxsw_sp = priv; 466 + 467 + if (is_first) 468 + return; 469 + mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); 470 + } 471 + 472 + static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index, 473 + u8 local_port) 474 + { 475 + struct mlxsw_sp *mlxsw_sp = priv; 476 + char ppbs_pl[MLXSW_REG_PPBS_LEN]; 477 + u32 kvdl_index; 478 + int ret; 479 + int err; 480 + 481 + ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1); 482 + if (ret < 0) 483 + return ret; 484 + kvdl_index = ret; 485 + mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port); 486 + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbs), ppbs_pl); 487 + if (err) 488 + goto err_ppbs_write; 489 + *p_kvdl_index = kvdl_index; 490 + return 0; 491 + 492 + err_ppbs_write: 493 + mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); 494 + return err; 495 + } 496 + 497 + static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index) 498 + { 499 + struct mlxsw_sp *mlxsw_sp = priv; 500 + 501 + mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); 502 + } 503 + 504 + static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = { 505 + .kvdl_set_add = mlxsw_sp_act_kvdl_set_add, 506 + .kvdl_set_del = mlxsw_sp_act_kvdl_set_del, 507 + .kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add, 508 + .kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del, 509 + }; 510 + 511 + int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp) 512 + { 513 + const struct mlxsw_sp_acl_ops *acl_ops = &mlxsw_sp_acl_tcam_ops; 514 + struct mlxsw_sp_acl *acl; 515 + int err; 516 + 517 + acl = kzalloc(sizeof(*acl) + acl_ops->priv_size, GFP_KERNEL); 518 + if (!acl) 519 + return -ENOMEM; 520 + mlxsw_sp->acl = acl; 521 + 522 + acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core, 523 + ACL_FLEX_KEYS), 524 + mlxsw_sp_afk_blocks, 525 + MLXSW_SP_AFK_BLOCKS_COUNT); 526 + if (!acl->afk) { 527 + err = -ENOMEM; 528 + goto err_afk_create; 529 + } 530 + 531 + acl->afa = mlxsw_afa_create(MLXSW_CORE_RES_GET(mlxsw_sp->core, 532 + ACL_ACTIONS_PER_SET), 533 + &mlxsw_sp_act_afa_ops, mlxsw_sp); 534 + if (IS_ERR(acl->afa)) { 535 + err = PTR_ERR(acl->afa); 536 + goto err_afa_create; 537 + } 538 + 539 + err = rhashtable_init(&acl->ruleset_ht, 540 + &mlxsw_sp_acl_ruleset_ht_params); 541 + if (err) 542 + goto err_rhashtable_init; 543 + 544 + err = acl_ops->init(mlxsw_sp, acl->priv); 545 + if (err) 546 + goto err_acl_ops_init; 547 + 548 + acl->ops = acl_ops; 549 + return 0; 550 + 551 + err_acl_ops_init: 552 + rhashtable_destroy(&acl->ruleset_ht); 553 + err_rhashtable_init: 554 + mlxsw_afa_destroy(acl->afa); 555 + err_afa_create: 556 + mlxsw_afk_destroy(acl->afk); 557 + err_afk_create: 558 + kfree(acl); 559 + return err; 560 + } 561 + 562 + void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp) 563 + { 564 + struct mlxsw_sp_acl *acl = mlxsw_sp->acl; 565 + const struct mlxsw_sp_acl_ops *acl_ops = acl->ops; 566 + 567 + acl_ops->fini(mlxsw_sp, acl->priv); 568 + rhashtable_destroy(&acl->ruleset_ht); 569 + mlxsw_afa_destroy(acl->afa); 570 + mlxsw_afk_destroy(acl->afk); 571 + kfree(acl); 572 + }
+109
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
··· 1 + /* 2 + * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h 3 + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. 4 + * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> 5 + * 6 + * Redistribution and use in source and binary forms, with or without 7 + * modification, are permitted provided that the following conditions are met: 8 + * 9 + * 1. Redistributions of source code must retain the above copyright 10 + * notice, this list of conditions and the following disclaimer. 11 + * 2. Redistributions in binary form must reproduce the above copyright 12 + * notice, this list of conditions and the following disclaimer in the 13 + * documentation and/or other materials provided with the distribution. 14 + * 3. Neither the names of the copyright holders nor the names of its 15 + * contributors may be used to endorse or promote products derived from 16 + * this software without specific prior written permission. 17 + * 18 + * Alternatively, this software may be distributed under the terms of the 19 + * GNU General Public License ("GPL") version 2 as published by the Free 20 + * Software Foundation. 21 + * 22 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 + * POSSIBILITY OF SUCH DAMAGE. 33 + */ 34 + 35 + #ifndef _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H 36 + #define _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H 37 + 38 + #include "core_acl_flex_keys.h" 39 + 40 + static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = { 41 + MLXSW_AFK_ELEMENT_INST_BUF(DMAC, 0x00, 6), 42 + MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), 43 + }; 44 + 45 + static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = { 46 + MLXSW_AFK_ELEMENT_INST_BUF(SMAC, 0x00, 6), 47 + MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), 48 + }; 49 + 50 + static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = { 51 + MLXSW_AFK_ELEMENT_INST_BUF(SMAC, 0x02, 6), 52 + MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x0C, 0, 16), 53 + }; 54 + 55 + static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_sip[] = { 56 + MLXSW_AFK_ELEMENT_INST_U32(SRC_IP4, 0x00, 0, 32), 57 + MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8), 58 + MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), 59 + }; 60 + 61 + static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = { 62 + MLXSW_AFK_ELEMENT_INST_U32(DST_IP4, 0x00, 0, 32), 63 + MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8), 64 + MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), 65 + }; 66 + 67 + static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = { 68 + MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x08, 0, 16), 69 + MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x0C, 0, 16), 70 + }; 71 + 72 + static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_dip[] = { 73 + MLXSW_AFK_ELEMENT_INST_BUF(DST_IP6_LO, 0x00, 8), 74 + }; 75 + 76 + static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_ex1[] = { 77 + MLXSW_AFK_ELEMENT_INST_BUF(DST_IP6_HI, 0x00, 8), 78 + MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8), 79 + }; 80 + 81 + static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip[] = { 82 + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP6_LO, 0x00, 8), 83 + }; 84 + 85 + static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip_ex[] = { 86 + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP6_HI, 0x00, 8), 87 + }; 88 + 89 + static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_packet_type[] = { 90 + MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x00, 0, 16), 91 + }; 92 + 93 + static const struct mlxsw_afk_block mlxsw_sp_afk_blocks[] = { 94 + MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_l2_dmac), 95 + MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_l2_smac), 96 + MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_l2_smac_ex), 97 + MLXSW_AFK_BLOCK(0x30, mlxsw_sp_afk_element_info_ipv4_sip), 98 + MLXSW_AFK_BLOCK(0x31, mlxsw_sp_afk_element_info_ipv4_dip), 99 + MLXSW_AFK_BLOCK(0x33, mlxsw_sp_afk_element_info_ipv4_ex), 100 + MLXSW_AFK_BLOCK(0x60, mlxsw_sp_afk_element_info_ipv6_dip), 101 + MLXSW_AFK_BLOCK(0x65, mlxsw_sp_afk_element_info_ipv6_ex1), 102 + MLXSW_AFK_BLOCK(0x62, mlxsw_sp_afk_element_info_ipv6_sip), 103 + MLXSW_AFK_BLOCK(0x63, mlxsw_sp_afk_element_info_ipv6_sip_ex), 104 + MLXSW_AFK_BLOCK(0xB0, mlxsw_sp_afk_element_info_packet_type), 105 + }; 106 + 107 + #define MLXSW_SP_AFK_BLOCKS_COUNT ARRAY_SIZE(mlxsw_sp_afk_blocks) 108 + 109 + #endif
+1084
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
··· 1 + /* 2 + * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c 3 + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. 4 + * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> 5 + * 6 + * Redistribution and use in source and binary forms, with or without 7 + * modification, are permitted provided that the following conditions are met: 8 + * 9 + * 1. Redistributions of source code must retain the above copyright 10 + * notice, this list of conditions and the following disclaimer. 11 + * 2. Redistributions in binary form must reproduce the above copyright 12 + * notice, this list of conditions and the following disclaimer in the 13 + * documentation and/or other materials provided with the distribution. 14 + * 3. Neither the names of the copyright holders nor the names of its 15 + * contributors may be used to endorse or promote products derived from 16 + * this software without specific prior written permission. 17 + * 18 + * Alternatively, this software may be distributed under the terms of the 19 + * GNU General Public License ("GPL") version 2 as published by the Free 20 + * Software Foundation. 21 + * 22 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 + * POSSIBILITY OF SUCH DAMAGE. 33 + */ 34 + 35 + #include <linux/kernel.h> 36 + #include <linux/slab.h> 37 + #include <linux/errno.h> 38 + #include <linux/bitops.h> 39 + #include <linux/list.h> 40 + #include <linux/rhashtable.h> 41 + #include <linux/netdevice.h> 42 + #include <linux/parman.h> 43 + 44 + #include "reg.h" 45 + #include "core.h" 46 + #include "resources.h" 47 + #include "spectrum.h" 48 + #include "core_acl_flex_keys.h" 49 + 50 + struct mlxsw_sp_acl_tcam { 51 + unsigned long *used_regions; /* bit array */ 52 + unsigned int max_regions; 53 + unsigned long *used_groups; /* bit array */ 54 + unsigned int max_groups; 55 + unsigned int max_group_size; 56 + }; 57 + 58 + static int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv) 59 + { 60 + struct mlxsw_sp_acl_tcam *tcam = priv; 61 + u64 max_tcam_regions; 62 + u64 max_regions; 63 + u64 max_groups; 64 + size_t alloc_size; 65 + int err; 66 + 67 + max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, 68 + ACL_MAX_TCAM_REGIONS); 69 + max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS); 70 + 71 + /* Use 1:1 mapping between ACL region and TCAM region */ 72 + if (max_tcam_regions < max_regions) 73 + max_regions = max_tcam_regions; 74 + 75 + alloc_size = sizeof(tcam->used_regions[0]) * BITS_TO_LONGS(max_regions); 76 + tcam->used_regions = kzalloc(alloc_size, GFP_KERNEL); 77 + if (!tcam->used_regions) 78 + return -ENOMEM; 79 + tcam->max_regions = max_regions; 80 + 81 + max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS); 82 + alloc_size = sizeof(tcam->used_groups[0]) * BITS_TO_LONGS(max_groups); 83 + tcam->used_groups = kzalloc(alloc_size, GFP_KERNEL); 84 + if (!tcam->used_groups) { 85 + err = -ENOMEM; 86 + goto err_alloc_used_groups; 87 + } 88 + tcam->max_groups = max_groups; 89 + tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, 90 + ACL_MAX_GROUP_SIZE); 91 + return 0; 92 + 93 + err_alloc_used_groups: 94 + kfree(tcam->used_regions); 95 + return err; 96 + } 97 + 98 + static void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv) 99 + { 100 + struct mlxsw_sp_acl_tcam *tcam = priv; 101 + 102 + kfree(tcam->used_groups); 103 + kfree(tcam->used_regions); 104 + } 105 + 106 + static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam, 107 + u16 *p_id) 108 + { 109 + u16 id; 110 + 111 + id = find_first_zero_bit(tcam->used_regions, tcam->max_regions); 112 + if (id < tcam->max_regions) { 113 + __set_bit(id, tcam->used_regions); 114 + *p_id = id; 115 + return 0; 116 + } 117 + return -ENOBUFS; 118 + } 119 + 120 + static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam, 121 + u16 id) 122 + { 123 + __clear_bit(id, tcam->used_regions); 124 + } 125 + 126 + static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam, 127 + u16 *p_id) 128 + { 129 + u16 id; 130 + 131 + id = find_first_zero_bit(tcam->used_groups, tcam->max_groups); 132 + if (id < tcam->max_groups) { 133 + __set_bit(id, tcam->used_groups); 134 + *p_id = id; 135 + return 0; 136 + } 137 + return -ENOBUFS; 138 + } 139 + 140 + static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam, 141 + u16 id) 142 + { 143 + __clear_bit(id, tcam->used_groups); 144 + } 145 + 146 + struct mlxsw_sp_acl_tcam_pattern { 147 + const enum mlxsw_afk_element *elements; 148 + unsigned int elements_count; 149 + }; 150 + 151 + struct mlxsw_sp_acl_tcam_group { 152 + struct mlxsw_sp_acl_tcam *tcam; 153 + u16 id; 154 + struct list_head region_list; 155 + unsigned int region_count; 156 + struct rhashtable chunk_ht; 157 + struct { 158 + u16 local_port; 159 + bool ingress; 160 + } bound; 161 + struct mlxsw_sp_acl_tcam_group_ops *ops; 162 + const struct mlxsw_sp_acl_tcam_pattern *patterns; 163 + unsigned int patterns_count; 164 + }; 165 + 166 + struct mlxsw_sp_acl_tcam_region { 167 + struct list_head list; /* Member of a TCAM group */ 168 + struct list_head chunk_list; /* List of chunks under this region */ 169 + struct parman *parman; 170 + struct mlxsw_sp *mlxsw_sp; 171 + struct mlxsw_sp_acl_tcam_group *group; 172 + u16 id; /* ACL ID and region ID - they are same */ 173 + char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN]; 174 + struct mlxsw_afk_key_info *key_info; 175 + struct { 176 + struct parman_prio parman_prio; 177 + struct parman_item parman_item; 178 + struct mlxsw_sp_acl_rule_info *rulei; 179 + } catchall; 180 + }; 181 + 182 + struct mlxsw_sp_acl_tcam_chunk { 183 + struct list_head list; /* Member of a TCAM region */ 184 + struct rhash_head ht_node; /* Member of a chunk HT */ 185 + unsigned int priority; /* Priority within the region and group */ 186 + struct parman_prio parman_prio; 187 + struct mlxsw_sp_acl_tcam_group *group; 188 + struct mlxsw_sp_acl_tcam_region *region; 189 + unsigned int ref_count; 190 + }; 191 + 192 + struct mlxsw_sp_acl_tcam_entry { 193 + struct parman_item parman_item; 194 + struct mlxsw_sp_acl_tcam_chunk *chunk; 195 + }; 196 + 197 + static const struct rhashtable_params mlxsw_sp_acl_tcam_chunk_ht_params = { 198 + .key_len = sizeof(unsigned int), 199 + .key_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, priority), 200 + .head_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, ht_node), 201 + .automatic_shrinking = true, 202 + }; 203 + 204 + static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp *mlxsw_sp, 205 + struct mlxsw_sp_acl_tcam_group *group) 206 + { 207 + struct mlxsw_sp_acl_tcam_region *region; 208 + char pagt_pl[MLXSW_REG_PAGT_LEN]; 209 + int acl_index = 0; 210 + 211 + mlxsw_reg_pagt_pack(pagt_pl, group->id); 212 + list_for_each_entry(region, &group->region_list, list) 213 + mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++, region->id); 214 + mlxsw_reg_pagt_size_set(pagt_pl, acl_index); 215 + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl); 216 + } 217 + 218 + static int 219 + mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp, 220 + struct mlxsw_sp_acl_tcam *tcam, 221 + struct mlxsw_sp_acl_tcam_group *group, 222 + const struct mlxsw_sp_acl_tcam_pattern *patterns, 223 + unsigned int patterns_count) 224 + { 225 + int err; 226 + 227 + group->tcam = tcam; 228 + group->patterns = patterns; 229 + group->patterns_count = patterns_count; 230 + INIT_LIST_HEAD(&group->region_list); 231 + err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id); 232 + if (err) 233 + return err; 234 + 235 + err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group); 236 + if (err) 237 + goto err_group_update; 238 + 239 + err = rhashtable_init(&group->chunk_ht, 240 + &mlxsw_sp_acl_tcam_chunk_ht_params); 241 + if (err) 242 + goto err_rhashtable_init; 243 + 244 + return 0; 245 + 246 + err_rhashtable_init: 247 + err_group_update: 248 + mlxsw_sp_acl_tcam_group_id_put(tcam, group->id); 249 + return err; 250 + } 251 + 252 + static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp *mlxsw_sp, 253 + struct mlxsw_sp_acl_tcam_group *group) 254 + { 255 + struct mlxsw_sp_acl_tcam *tcam = group->tcam; 256 + 257 + rhashtable_destroy(&group->chunk_ht); 258 + mlxsw_sp_acl_tcam_group_id_put(tcam, group->id); 259 + WARN_ON(!list_empty(&group->region_list)); 260 + } 261 + 262 + static int 263 + mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp, 264 + struct mlxsw_sp_acl_tcam_group *group, 265 + struct net_device *dev, bool ingress) 266 + { 267 + struct mlxsw_sp_port *mlxsw_sp_port; 268 + char ppbt_pl[MLXSW_REG_PPBT_LEN]; 269 + 270 + if (!mlxsw_sp_port_dev_check(dev)) 271 + return -EINVAL; 272 + 273 + mlxsw_sp_port = netdev_priv(dev); 274 + group->bound.local_port = mlxsw_sp_port->local_port; 275 + group->bound.ingress = ingress; 276 + mlxsw_reg_ppbt_pack(ppbt_pl, 277 + group->bound.ingress ? MLXSW_REG_PXBT_E_IACL : 278 + MLXSW_REG_PXBT_E_EACL, 279 + MLXSW_REG_PXBT_OP_BIND, group->bound.local_port, 280 + group->id); 281 + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl); 282 + } 283 + 284 + static void 285 + mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp, 286 + struct mlxsw_sp_acl_tcam_group *group) 287 + { 288 + char ppbt_pl[MLXSW_REG_PPBT_LEN]; 289 + 290 + mlxsw_reg_ppbt_pack(ppbt_pl, 291 + group->bound.ingress ? MLXSW_REG_PXBT_E_IACL : 292 + MLXSW_REG_PXBT_E_EACL, 293 + MLXSW_REG_PXBT_OP_UNBIND, group->bound.local_port, 294 + group->id); 295 + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl); 296 + } 297 + 298 + static unsigned int 299 + mlxsw_sp_acl_tcam_region_prio(struct mlxsw_sp_acl_tcam_region *region) 300 + { 301 + struct mlxsw_sp_acl_tcam_chunk *chunk; 302 + 303 + if (list_empty(&region->chunk_list)) 304 + return 0; 305 + /* As a priority of a region, return priority of the first chunk */ 306 + chunk = list_first_entry(&region->chunk_list, typeof(*chunk), list); 307 + return chunk->priority; 308 + } 309 + 310 + static unsigned int 311 + mlxsw_sp_acl_tcam_region_max_prio(struct mlxsw_sp_acl_tcam_region *region) 312 + { 313 + struct mlxsw_sp_acl_tcam_chunk *chunk; 314 + 315 + if (list_empty(&region->chunk_list)) 316 + return 0; 317 + chunk = list_last_entry(&region->chunk_list, typeof(*chunk), list); 318 + return chunk->priority; 319 + } 320 + 321 + static void 322 + mlxsw_sp_acl_tcam_group_list_add(struct mlxsw_sp_acl_tcam_group *group, 323 + struct mlxsw_sp_acl_tcam_region *region) 324 + { 325 + struct mlxsw_sp_acl_tcam_region *region2; 326 + struct list_head *pos; 327 + 328 + /* Position the region inside the list according to priority */ 329 + list_for_each(pos, &group->region_list) { 330 + region2 = list_entry(pos, typeof(*region2), list); 331 + if (mlxsw_sp_acl_tcam_region_prio(region2) > 332 + mlxsw_sp_acl_tcam_region_prio(region)) 333 + break; 334 + } 335 + list_add_tail(&region->list, pos); 336 + group->region_count++; 337 + } 338 + 339 + static void 340 + mlxsw_sp_acl_tcam_group_list_del(struct mlxsw_sp_acl_tcam_group *group, 341 + struct mlxsw_sp_acl_tcam_region *region) 342 + { 343 + group->region_count--; 344 + list_del(&region->list); 345 + } 346 + 347 + static int 348 + mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp, 349 + struct mlxsw_sp_acl_tcam_group *group, 350 + struct mlxsw_sp_acl_tcam_region *region) 351 + { 352 + int err; 353 + 354 + if (group->region_count == group->tcam->max_group_size) 355 + return -ENOBUFS; 356 + 357 + mlxsw_sp_acl_tcam_group_list_add(group, region); 358 + 359 + err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group); 360 + if (err) 361 + goto err_group_update; 362 + region->group = group; 363 + 364 + return 0; 365 + 366 + err_group_update: 367 + mlxsw_sp_acl_tcam_group_list_del(group, region); 368 + mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group); 369 + return err; 370 + } 371 + 372 + static void 373 + mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp, 374 + struct mlxsw_sp_acl_tcam_region *region) 375 + { 376 + struct mlxsw_sp_acl_tcam_group *group = region->group; 377 + 378 + mlxsw_sp_acl_tcam_group_list_del(group, region); 379 + mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group); 380 + } 381 + 382 + static struct mlxsw_sp_acl_tcam_region * 383 + mlxsw_sp_acl_tcam_group_region_find(struct mlxsw_sp_acl_tcam_group *group, 384 + unsigned int priority, 385 + struct mlxsw_afk_element_usage *elusage, 386 + bool *p_need_split) 387 + { 388 + struct mlxsw_sp_acl_tcam_region *region, *region2; 389 + struct list_head *pos; 390 + bool issubset; 391 + 392 + list_for_each(pos, &group->region_list) { 393 + region = list_entry(pos, typeof(*region), list); 394 + 395 + /* First, check if the requested priority does not rather belong 396 + * under some of the next regions. 397 + */ 398 + if (pos->next != &group->region_list) { /* not last */ 399 + region2 = list_entry(pos->next, typeof(*region2), list); 400 + if (priority >= mlxsw_sp_acl_tcam_region_prio(region2)) 401 + continue; 402 + } 403 + 404 + issubset = mlxsw_afk_key_info_subset(region->key_info, elusage); 405 + 406 + /* If requested element usage would not fit and the priority 407 + * is lower than the currently inspected region we cannot 408 + * use this region, so return NULL to indicate new region has 409 + * to be created. 410 + */ 411 + if (!issubset && 412 + priority < mlxsw_sp_acl_tcam_region_prio(region)) 413 + return NULL; 414 + 415 + /* If requested element usage would not fit and the priority 416 + * is higher than the currently inspected region we cannot 417 + * use this region. There is still some hope that the next 418 + * region would be the fit. So let it be processed and 419 + * eventually break at the check right above this. 420 + */ 421 + if (!issubset && 422 + priority > mlxsw_sp_acl_tcam_region_max_prio(region)) 423 + continue; 424 + 425 + /* Indicate if the region needs to be split in order to add 426 + * the requested priority. Split is needed when requested 427 + * element usage won't fit into the found region. 428 + */ 429 + *p_need_split = !issubset; 430 + return region; 431 + } 432 + return NULL; /* New region has to be created. */ 433 + } 434 + 435 + static void 436 + mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group, 437 + struct mlxsw_afk_element_usage *elusage, 438 + struct mlxsw_afk_element_usage *out) 439 + { 440 + const struct mlxsw_sp_acl_tcam_pattern *pattern; 441 + int i; 442 + 443 + for (i = 0; i < group->patterns_count; i++) { 444 + pattern = &group->patterns[i]; 445 + mlxsw_afk_element_usage_fill(out, pattern->elements, 446 + pattern->elements_count); 447 + if (mlxsw_afk_element_usage_subset(elusage, out)) 448 + return; 449 + } 450 + memcpy(out, elusage, sizeof(*out)); 451 + } 452 + 453 + #define MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT 16 454 + #define MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP 16 455 + 456 + static int 457 + mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp, 458 + struct mlxsw_sp_acl_tcam_region *region) 459 + { 460 + struct mlxsw_afk_key_info *key_info = region->key_info; 461 + char ptar_pl[MLXSW_REG_PTAR_LEN]; 462 + unsigned int encodings_count; 463 + int i; 464 + int err; 465 + 466 + mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC, 467 + MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT, 468 + region->id, region->tcam_region_info); 469 + encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info); 470 + for (i = 0; i < encodings_count; i++) { 471 + u16 encoding; 472 + 473 + encoding = mlxsw_afk_key_info_block_encoding_get(key_info, i); 474 + mlxsw_reg_ptar_key_id_pack(ptar_pl, i, encoding); 475 + } 476 + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl); 477 + if (err) 478 + return err; 479 + mlxsw_reg_ptar_unpack(ptar_pl, region->tcam_region_info); 480 + return 0; 481 + } 482 + 483 + static void 484 + mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp, 485 + struct mlxsw_sp_acl_tcam_region *region) 486 + { 487 + char ptar_pl[MLXSW_REG_PTAR_LEN]; 488 + 489 + mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE, 0, region->id, 490 + region->tcam_region_info); 491 + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl); 492 + } 493 + 494 + static int 495 + mlxsw_sp_acl_tcam_region_resize(struct mlxsw_sp *mlxsw_sp, 496 + struct mlxsw_sp_acl_tcam_region *region, 497 + u16 new_size) 498 + { 499 + char ptar_pl[MLXSW_REG_PTAR_LEN]; 500 + 501 + mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_RESIZE, 502 + new_size, region->id, region->tcam_region_info); 503 + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl); 504 + } 505 + 506 + static int 507 + mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp, 508 + struct mlxsw_sp_acl_tcam_region *region) 509 + { 510 + char pacl_pl[MLXSW_REG_PACL_LEN]; 511 + 512 + mlxsw_reg_pacl_pack(pacl_pl, region->id, true, 513 + region->tcam_region_info); 514 + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl); 515 + } 516 + 517 + static void 518 + mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp, 519 + struct mlxsw_sp_acl_tcam_region *region) 520 + { 521 + char pacl_pl[MLXSW_REG_PACL_LEN]; 522 + 523 + mlxsw_reg_pacl_pack(pacl_pl, region->id, false, 524 + region->tcam_region_info); 525 + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl); 526 + } 527 + 528 + static int 529 + mlxsw_sp_acl_tcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, 530 + struct mlxsw_sp_acl_tcam_region *region, 531 + unsigned int offset, 532 + struct mlxsw_sp_acl_rule_info *rulei) 533 + { 534 + char ptce2_pl[MLXSW_REG_PTCE2_LEN]; 535 + char *act_set; 536 + char *mask; 537 + char *key; 538 + 539 + mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE, 540 + region->tcam_region_info, offset); 541 + key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl); 542 + mask = mlxsw_reg_ptce2_mask_data(ptce2_pl); 543 + mlxsw_afk_encode(region->key_info, &rulei->values, key, mask); 544 + 545 + /* Only the first action set belongs here, the rest is in KVD */ 546 + act_set = mlxsw_afa_block_first_set(rulei->act_block); 547 + mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set); 548 + 549 + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); 550 + } 551 + 552 + static void 553 + mlxsw_sp_acl_tcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp, 554 + struct mlxsw_sp_acl_tcam_region *region, 555 + unsigned int offset) 556 + { 557 + char ptce2_pl[MLXSW_REG_PTCE2_LEN]; 558 + 559 + mlxsw_reg_ptce2_pack(ptce2_pl, false, MLXSW_REG_PTCE2_OP_WRITE_WRITE, 560 + region->tcam_region_info, offset); 561 + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); 562 + } 563 + 564 + #define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (-1UL) 565 + 566 + static int 567 + mlxsw_sp_acl_tcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp, 568 + struct mlxsw_sp_acl_tcam_region *region) 569 + { 570 + struct parman_prio *parman_prio = &region->catchall.parman_prio; 571 + struct parman_item *parman_item = &region->catchall.parman_item; 572 + struct mlxsw_sp_acl_rule_info *rulei; 573 + int err; 574 + 575 + parman_prio_init(region->parman, parman_prio, 576 + MLXSW_SP_ACL_TCAM_CATCHALL_PRIO); 577 + err = parman_item_add(region->parman, parman_prio, parman_item); 578 + if (err) 579 + goto err_parman_item_add; 580 + 581 + rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl); 582 + if (IS_ERR(rulei)) { 583 + err = PTR_ERR(rulei); 584 + goto err_rulei_create; 585 + } 586 + 587 + mlxsw_sp_acl_rulei_act_continue(rulei); 588 + err = mlxsw_sp_acl_rulei_commit(rulei); 589 + if (err) 590 + goto err_rulei_commit; 591 + 592 + err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region, 593 + parman_item->index, rulei); 594 + region->catchall.rulei = rulei; 595 + if (err) 596 + goto err_rule_insert; 597 + 598 + return 0; 599 + 600 + err_rule_insert: 601 + err_rulei_commit: 602 + mlxsw_sp_acl_rulei_destroy(rulei); 603 + err_rulei_create: 604 + parman_item_remove(region->parman, parman_prio, parman_item); 605 + err_parman_item_add: 606 + parman_prio_fini(parman_prio); 607 + return err; 608 + } 609 + 610 + static void 611 + mlxsw_sp_acl_tcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp, 612 + struct mlxsw_sp_acl_tcam_region *region) 613 + { 614 + struct parman_prio *parman_prio = &region->catchall.parman_prio; 615 + struct parman_item *parman_item = &region->catchall.parman_item; 616 + struct mlxsw_sp_acl_rule_info *rulei = region->catchall.rulei; 617 + 618 + mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region, 619 + parman_item->index); 620 + mlxsw_sp_acl_rulei_destroy(rulei); 621 + parman_item_remove(region->parman, parman_prio, parman_item); 622 + parman_prio_fini(parman_prio); 623 + } 624 + 625 + static void 626 + mlxsw_sp_acl_tcam_region_move(struct mlxsw_sp *mlxsw_sp, 627 + struct mlxsw_sp_acl_tcam_region *region, 628 + u16 src_offset, u16 dst_offset, u16 size) 629 + { 630 + char prcr_pl[MLXSW_REG_PRCR_LEN]; 631 + 632 + mlxsw_reg_prcr_pack(prcr_pl, MLXSW_REG_PRCR_OP_MOVE, 633 + region->tcam_region_info, src_offset, 634 + region->tcam_region_info, dst_offset, size); 635 + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(prcr), prcr_pl); 636 + } 637 + 638 + static int mlxsw_sp_acl_tcam_region_parman_resize(void *priv, 639 + unsigned long new_count) 640 + { 641 + struct mlxsw_sp_acl_tcam_region *region = priv; 642 + struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp; 643 + u64 max_tcam_rules; 644 + 645 + max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES); 646 + if (new_count > max_tcam_rules) 647 + return -EINVAL; 648 + return mlxsw_sp_acl_tcam_region_resize(mlxsw_sp, region, new_count); 649 + } 650 + 651 + static void mlxsw_sp_acl_tcam_region_parman_move(void *priv, 652 + unsigned long from_index, 653 + unsigned long to_index, 654 + unsigned long count) 655 + { 656 + struct mlxsw_sp_acl_tcam_region *region = priv; 657 + struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp; 658 + 659 + mlxsw_sp_acl_tcam_region_move(mlxsw_sp, region, 660 + from_index, to_index, count); 661 + } 662 + 663 + static const struct parman_ops mlxsw_sp_acl_tcam_region_parman_ops = { 664 + .base_count = MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT, 665 + .resize_step = MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP, 666 + .resize = mlxsw_sp_acl_tcam_region_parman_resize, 667 + .move = mlxsw_sp_acl_tcam_region_parman_move, 668 + .algo = PARMAN_ALGO_TYPE_LSORT, 669 + }; 670 + 671 + static struct mlxsw_sp_acl_tcam_region * 672 + mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp, 673 + struct mlxsw_sp_acl_tcam *tcam, 674 + struct mlxsw_afk_element_usage *elusage) 675 + { 676 + struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl); 677 + struct mlxsw_sp_acl_tcam_region *region; 678 + int err; 679 + 680 + region = kzalloc(sizeof(*region), GFP_KERNEL); 681 + if (!region) 682 + return ERR_PTR(-ENOMEM); 683 + INIT_LIST_HEAD(&region->chunk_list); 684 + region->mlxsw_sp = mlxsw_sp; 685 + 686 + region->parman = parman_create(&mlxsw_sp_acl_tcam_region_parman_ops, 687 + region); 688 + if (!region->parman) { 689 + err = -ENOMEM; 690 + goto err_parman_create; 691 + } 692 + 693 + region->key_info = mlxsw_afk_key_info_get(afk, elusage); 694 + if (IS_ERR(region->key_info)) { 695 + err = PTR_ERR(region->key_info); 696 + goto err_key_info_get; 697 + } 698 + 699 + err = mlxsw_sp_acl_tcam_region_id_get(tcam, &region->id); 700 + if (err) 701 + goto err_region_id_get; 702 + 703 + err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region); 704 + if (err) 705 + goto err_tcam_region_alloc; 706 + 707 + err = mlxsw_sp_acl_tcam_region_enable(mlxsw_sp, region); 708 + if (err) 709 + goto err_tcam_region_enable; 710 + 711 + err = mlxsw_sp_acl_tcam_region_catchall_add(mlxsw_sp, region); 712 + if (err) 713 + goto err_tcam_region_catchall_add; 714 + 715 + return region; 716 + 717 + err_tcam_region_catchall_add: 718 + mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region); 719 + err_tcam_region_enable: 720 + mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region); 721 + err_tcam_region_alloc: 722 + mlxsw_sp_acl_tcam_region_id_put(tcam, region->id); 723 + err_region_id_get: 724 + mlxsw_afk_key_info_put(region->key_info); 725 + err_key_info_get: 726 + parman_destroy(region->parman); 727 + err_parman_create: 728 + kfree(region); 729 + return ERR_PTR(err); 730 + } 731 + 732 + static void 733 + mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp, 734 + struct mlxsw_sp_acl_tcam_region *region) 735 + { 736 + mlxsw_sp_acl_tcam_region_catchall_del(mlxsw_sp, region); 737 + mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region); 738 + mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region); 739 + mlxsw_sp_acl_tcam_region_id_put(region->group->tcam, region->id); 740 + mlxsw_afk_key_info_put(region->key_info); 741 + parman_destroy(region->parman); 742 + kfree(region); 743 + } 744 + 745 + static int 746 + mlxsw_sp_acl_tcam_chunk_assoc(struct mlxsw_sp *mlxsw_sp, 747 + struct mlxsw_sp_acl_tcam_group *group, 748 + unsigned int priority, 749 + struct mlxsw_afk_element_usage *elusage, 750 + struct mlxsw_sp_acl_tcam_chunk *chunk) 751 + { 752 + struct mlxsw_sp_acl_tcam_region *region; 753 + bool region_created = false; 754 + bool need_split; 755 + int err; 756 + 757 + region = mlxsw_sp_acl_tcam_group_region_find(group, priority, elusage, 758 + &need_split); 759 + if (region && need_split) { 760 + /* According to priority, the chunk should belong to an 761 + * existing region. However, this chunk needs elements 762 + * that region does not contain. We need to split the existing 763 + * region into two and create a new region for this chunk 764 + * in between. This is not supported now. 765 + */ 766 + return -EOPNOTSUPP; 767 + } 768 + if (!region) { 769 + struct mlxsw_afk_element_usage region_elusage; 770 + 771 + mlxsw_sp_acl_tcam_group_use_patterns(group, elusage, 772 + &region_elusage); 773 + region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, group->tcam, 774 + &region_elusage); 775 + if (IS_ERR(region)) 776 + return PTR_ERR(region); 777 + region_created = true; 778 + } 779 + 780 + chunk->region = region; 781 + list_add_tail(&chunk->list, &region->chunk_list); 782 + 783 + if (!region_created) 784 + return 0; 785 + 786 + err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, group, region); 787 + if (err) 788 + goto err_group_region_attach; 789 + 790 + return 0; 791 + 792 + err_group_region_attach: 793 + mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region); 794 + return err; 795 + } 796 + 797 + static void 798 + mlxsw_sp_acl_tcam_chunk_deassoc(struct mlxsw_sp *mlxsw_sp, 799 + struct mlxsw_sp_acl_tcam_chunk *chunk) 800 + { 801 + struct mlxsw_sp_acl_tcam_region *region = chunk->region; 802 + 803 + list_del(&chunk->list); 804 + if (list_empty(&region->chunk_list)) { 805 + mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, region); 806 + mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region); 807 + } 808 + } 809 + 810 + static struct mlxsw_sp_acl_tcam_chunk * 811 + mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp, 812 + struct mlxsw_sp_acl_tcam_group *group, 813 + unsigned int priority, 814 + struct mlxsw_afk_element_usage *elusage) 815 + { 816 + struct mlxsw_sp_acl_tcam_chunk *chunk; 817 + int err; 818 + 819 + if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO) 820 + return ERR_PTR(-EINVAL); 821 + 822 + chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); 823 + if (!chunk) 824 + return ERR_PTR(-ENOMEM); 825 + chunk->priority = priority; 826 + chunk->group = group; 827 + chunk->ref_count = 1; 828 + 829 + err = mlxsw_sp_acl_tcam_chunk_assoc(mlxsw_sp, group, priority, 830 + elusage, chunk); 831 + if (err) 832 + goto err_chunk_assoc; 833 + 834 + parman_prio_init(chunk->region->parman, &chunk->parman_prio, priority); 835 + 836 + err = rhashtable_insert_fast(&group->chunk_ht, &chunk->ht_node, 837 + mlxsw_sp_acl_tcam_chunk_ht_params); 838 + if (err) 839 + goto err_rhashtable_insert; 840 + 841 + return chunk; 842 + 843 + err_rhashtable_insert: 844 + parman_prio_fini(&chunk->parman_prio); 845 + mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk); 846 + err_chunk_assoc: 847 + kfree(chunk); 848 + return ERR_PTR(err); 849 + } 850 + 851 + static void 852 + mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp, 853 + struct mlxsw_sp_acl_tcam_chunk *chunk) 854 + { 855 + struct mlxsw_sp_acl_tcam_group *group = chunk->group; 856 + 857 + rhashtable_remove_fast(&group->chunk_ht, &chunk->ht_node, 858 + mlxsw_sp_acl_tcam_chunk_ht_params); 859 + parman_prio_fini(&chunk->parman_prio); 860 + mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk); 861 + kfree(chunk); 862 + } 863 + 864 + static struct mlxsw_sp_acl_tcam_chunk * 865 + mlxsw_sp_acl_tcam_chunk_get(struct mlxsw_sp *mlxsw_sp, 866 + struct mlxsw_sp_acl_tcam_group *group, 867 + unsigned int priority, 868 + struct mlxsw_afk_element_usage *elusage) 869 + { 870 + struct mlxsw_sp_acl_tcam_chunk *chunk; 871 + 872 + chunk = rhashtable_lookup_fast(&group->chunk_ht, &priority, 873 + mlxsw_sp_acl_tcam_chunk_ht_params); 874 + if (chunk) { 875 + if (WARN_ON(!mlxsw_afk_key_info_subset(chunk->region->key_info, 876 + elusage))) 877 + return ERR_PTR(-EINVAL); 878 + chunk->ref_count++; 879 + return chunk; 880 + } 881 + return mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, group, 882 + priority, elusage); 883 + } 884 + 885 + static void mlxsw_sp_acl_tcam_chunk_put(struct mlxsw_sp *mlxsw_sp, 886 + struct mlxsw_sp_acl_tcam_chunk *chunk) 887 + { 888 + if (--chunk->ref_count) 889 + return; 890 + mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, chunk); 891 + } 892 + 893 + static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp, 894 + struct mlxsw_sp_acl_tcam_group *group, 895 + struct mlxsw_sp_acl_tcam_entry *entry, 896 + struct mlxsw_sp_acl_rule_info *rulei) 897 + { 898 + struct mlxsw_sp_acl_tcam_chunk *chunk; 899 + struct mlxsw_sp_acl_tcam_region *region; 900 + int err; 901 + 902 + chunk = mlxsw_sp_acl_tcam_chunk_get(mlxsw_sp, group, rulei->priority, 903 + &rulei->values.elusage); 904 + if (IS_ERR(chunk)) 905 + return PTR_ERR(chunk); 906 + 907 + region = chunk->region; 908 + err = parman_item_add(region->parman, &chunk->parman_prio, 909 + &entry->parman_item); 910 + if (err) 911 + goto err_parman_item_add; 912 + 913 + err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region, 914 + entry->parman_item.index, 915 + rulei); 916 + if (err) 917 + goto err_rule_insert; 918 + entry->chunk = chunk; 919 + 920 + return 0; 921 + 922 + err_rule_insert: 923 + parman_item_remove(region->parman, &chunk->parman_prio, 924 + &entry->parman_item); 925 + err_parman_item_add: 926 + mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk); 927 + return err; 928 + } 929 + 930 + static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp, 931 + struct mlxsw_sp_acl_tcam_entry *entry) 932 + { 933 + struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk; 934 + struct mlxsw_sp_acl_tcam_region *region = chunk->region; 935 + 936 + mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region, 937 + entry->parman_item.index); 938 + parman_item_remove(region->parman, &chunk->parman_prio, 939 + &entry->parman_item); 940 + mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk); 941 + } 942 + 943 + static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = { 944 + MLXSW_AFK_ELEMENT_SRC_SYS_PORT, 945 + MLXSW_AFK_ELEMENT_DMAC, 946 + MLXSW_AFK_ELEMENT_SMAC, 947 + MLXSW_AFK_ELEMENT_ETHERTYPE, 948 + MLXSW_AFK_ELEMENT_IP_PROTO, 949 + MLXSW_AFK_ELEMENT_SRC_IP4, 950 + MLXSW_AFK_ELEMENT_DST_IP4, 951 + MLXSW_AFK_ELEMENT_DST_L4_PORT, 952 + MLXSW_AFK_ELEMENT_SRC_L4_PORT, 953 + }; 954 + 955 + static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = { 956 + MLXSW_AFK_ELEMENT_ETHERTYPE, 957 + MLXSW_AFK_ELEMENT_IP_PROTO, 958 + MLXSW_AFK_ELEMENT_SRC_IP6_HI, 959 + MLXSW_AFK_ELEMENT_SRC_IP6_LO, 960 + MLXSW_AFK_ELEMENT_DST_IP6_HI, 961 + MLXSW_AFK_ELEMENT_DST_IP6_LO, 962 + MLXSW_AFK_ELEMENT_DST_L4_PORT, 963 + MLXSW_AFK_ELEMENT_SRC_L4_PORT, 964 + }; 965 + 966 + static const struct mlxsw_sp_acl_tcam_pattern mlxsw_sp_acl_tcam_patterns[] = { 967 + { 968 + .elements = mlxsw_sp_acl_tcam_pattern_ipv4, 969 + .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv4), 970 + }, 971 + { 972 + .elements = mlxsw_sp_acl_tcam_pattern_ipv6, 973 + .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv6), 974 + }, 975 + }; 976 + 977 + #define MLXSW_SP_ACL_TCAM_PATTERNS_COUNT \ 978 + ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns) 979 + 980 + struct mlxsw_sp_acl_tcam_flower_ruleset { 981 + struct mlxsw_sp_acl_tcam_group group; 982 + }; 983 + 984 + struct mlxsw_sp_acl_tcam_flower_rule { 985 + struct mlxsw_sp_acl_tcam_entry entry; 986 + }; 987 + 988 + static int 989 + mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp, 990 + void *priv, void *ruleset_priv) 991 + { 992 + struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; 993 + struct mlxsw_sp_acl_tcam *tcam = priv; 994 + 995 + return mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group, 996 + mlxsw_sp_acl_tcam_patterns, 997 + MLXSW_SP_ACL_TCAM_PATTERNS_COUNT); 998 + } 999 + 1000 + static void 1001 + mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp, 1002 + void *ruleset_priv) 1003 + { 1004 + struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; 1005 + 1006 + mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group); 1007 + } 1008 + 1009 + static int 1010 + mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp, 1011 + void *ruleset_priv, 1012 + struct net_device *dev, bool ingress) 1013 + { 1014 + struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; 1015 + 1016 + return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->group, 1017 + dev, ingress); 1018 + } 1019 + 1020 + static void 1021 + mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp, 1022 + void *ruleset_priv) 1023 + { 1024 + struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; 1025 + 1026 + mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group); 1027 + } 1028 + 1029 + static int 1030 + mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp, 1031 + void *ruleset_priv, void *rule_priv, 1032 + struct mlxsw_sp_acl_rule_info *rulei) 1033 + { 1034 + struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; 1035 + struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv; 1036 + 1037 + return mlxsw_sp_acl_tcam_entry_add(mlxsw_sp, &ruleset->group, 1038 + &rule->entry, rulei); 1039 + } 1040 + 1041 + static void 1042 + mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv) 1043 + { 1044 + struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv; 1045 + 1046 + mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry); 1047 + } 1048 + 1049 + static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = { 1050 + .ruleset_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset), 1051 + .ruleset_add = mlxsw_sp_acl_tcam_flower_ruleset_add, 1052 + .ruleset_del = mlxsw_sp_acl_tcam_flower_ruleset_del, 1053 + .ruleset_bind = mlxsw_sp_acl_tcam_flower_ruleset_bind, 1054 + .ruleset_unbind = mlxsw_sp_acl_tcam_flower_ruleset_unbind, 1055 + .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_rule), 1056 + .rule_add = mlxsw_sp_acl_tcam_flower_rule_add, 1057 + .rule_del = mlxsw_sp_acl_tcam_flower_rule_del, 1058 + }; 1059 + 1060 + static const struct mlxsw_sp_acl_profile_ops * 1061 + mlxsw_sp_acl_tcam_profile_ops_arr[] = { 1062 + [MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops, 1063 + }; 1064 + 1065 + static const struct mlxsw_sp_acl_profile_ops * 1066 + mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp, 1067 + enum mlxsw_sp_acl_profile profile) 1068 + { 1069 + const struct mlxsw_sp_acl_profile_ops *ops; 1070 + 1071 + if (WARN_ON(profile >= ARRAY_SIZE(mlxsw_sp_acl_tcam_profile_ops_arr))) 1072 + return NULL; 1073 + ops = mlxsw_sp_acl_tcam_profile_ops_arr[profile]; 1074 + if (WARN_ON(!ops)) 1075 + return NULL; 1076 + return ops; 1077 + } 1078 + 1079 + const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops = { 1080 + .priv_size = sizeof(struct mlxsw_sp_acl_tcam), 1081 + .init = mlxsw_sp_acl_tcam_init, 1082 + .fini = mlxsw_sp_acl_tcam_fini, 1083 + .profile_ops = mlxsw_sp_acl_tcam_profile_ops, 1084 + };
+309
drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
··· 1 + /* 2 + * drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c 3 + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. 4 + * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> 5 + * 6 + * Redistribution and use in source and binary forms, with or without 7 + * modification, are permitted provided that the following conditions are met: 8 + * 9 + * 1. Redistributions of source code must retain the above copyright 10 + * notice, this list of conditions and the following disclaimer. 11 + * 2. Redistributions in binary form must reproduce the above copyright 12 + * notice, this list of conditions and the following disclaimer in the 13 + * documentation and/or other materials provided with the distribution. 14 + * 3. Neither the names of the copyright holders nor the names of its 15 + * contributors may be used to endorse or promote products derived from 16 + * this software without specific prior written permission. 17 + * 18 + * Alternatively, this software may be distributed under the terms of the 19 + * GNU General Public License ("GPL") version 2 as published by the Free 20 + * Software Foundation. 21 + * 22 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 + * POSSIBILITY OF SUCH DAMAGE. 33 + */ 34 + 35 + #include <linux/kernel.h> 36 + #include <linux/errno.h> 37 + #include <linux/netdevice.h> 38 + #include <net/flow_dissector.h> 39 + #include <net/pkt_cls.h> 40 + #include <net/tc_act/tc_gact.h> 41 + #include <net/tc_act/tc_mirred.h> 42 + 43 + #include "spectrum.h" 44 + #include "core_acl_flex_keys.h" 45 + 46 + static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, 47 + struct net_device *dev, 48 + struct mlxsw_sp_acl_rule_info *rulei, 49 + struct tcf_exts *exts) 50 + { 51 + const struct tc_action *a; 52 + LIST_HEAD(actions); 53 + int err; 54 + 55 + if (tc_no_actions(exts)) 56 + return 0; 57 + 58 + tcf_exts_to_list(exts, &actions); 59 + list_for_each_entry(a, &actions, list) { 60 + if (is_tcf_gact_shot(a)) { 61 + err = mlxsw_sp_acl_rulei_act_drop(rulei); 62 + if (err) 63 + return err; 64 + } else if (is_tcf_mirred_egress_redirect(a)) { 65 + int ifindex = tcf_mirred_ifindex(a); 66 + struct net_device *out_dev; 67 + 68 + out_dev = __dev_get_by_index(dev_net(dev), ifindex); 69 + if (out_dev == dev) 70 + out_dev = NULL; 71 + 72 + err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei, 73 + out_dev); 74 + if (err) 75 + return err; 76 + } else { 77 + dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n"); 78 + return -EOPNOTSUPP; 79 + } 80 + } 81 + return 0; 82 + } 83 + 84 + static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei, 85 + struct tc_cls_flower_offload *f) 86 + { 87 + struct flow_dissector_key_ipv4_addrs *key = 88 + skb_flow_dissector_target(f->dissector, 89 + FLOW_DISSECTOR_KEY_IPV4_ADDRS, 90 + f->key); 91 + struct flow_dissector_key_ipv4_addrs *mask = 92 + skb_flow_dissector_target(f->dissector, 93 + FLOW_DISSECTOR_KEY_IPV4_ADDRS, 94 + f->mask); 95 + 96 + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_IP4, 97 + ntohl(key->src), ntohl(mask->src)); 98 + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_IP4, 99 + ntohl(key->dst), ntohl(mask->dst)); 100 + } 101 + 102 + static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei, 103 + struct tc_cls_flower_offload *f) 104 + { 105 + struct flow_dissector_key_ipv6_addrs *key = 106 + skb_flow_dissector_target(f->dissector, 107 + FLOW_DISSECTOR_KEY_IPV6_ADDRS, 108 + f->key); 109 + struct flow_dissector_key_ipv6_addrs *mask = 110 + skb_flow_dissector_target(f->dissector, 111 + FLOW_DISSECTOR_KEY_IPV6_ADDRS, 112 + f->mask); 113 + size_t addr_half_size = sizeof(key->src) / 2; 114 + 115 + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_HI, 116 + &key->src.s6_addr[0], 117 + &mask->src.s6_addr[0], 118 + addr_half_size); 119 + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_LO, 120 + &key->src.s6_addr[addr_half_size], 121 + &mask->src.s6_addr[addr_half_size], 122 + addr_half_size); 123 + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_HI, 124 + &key->dst.s6_addr[0], 125 + &mask->dst.s6_addr[0], 126 + addr_half_size); 127 + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_LO, 128 + &key->dst.s6_addr[addr_half_size], 129 + &mask->dst.s6_addr[addr_half_size], 130 + addr_half_size); 131 + } 132 + 133 + static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp, 134 + struct mlxsw_sp_acl_rule_info *rulei, 135 + struct tc_cls_flower_offload *f, 136 + u8 ip_proto) 137 + { 138 + struct flow_dissector_key_ports *key, *mask; 139 + 140 + if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) 141 + return 0; 142 + 143 + if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) { 144 + dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n"); 145 + return -EINVAL; 146 + } 147 + 148 + key = skb_flow_dissector_target(f->dissector, 149 + FLOW_DISSECTOR_KEY_PORTS, 150 + f->key); 151 + mask = skb_flow_dissector_target(f->dissector, 152 + FLOW_DISSECTOR_KEY_PORTS, 153 + f->mask); 154 + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT, 155 + ntohs(key->dst), ntohs(mask->dst)); 156 + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT, 157 + ntohs(key->src), ntohs(mask->src)); 158 + return 0; 159 + } 160 + 161 + static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, 162 + struct net_device *dev, 163 + struct mlxsw_sp_acl_rule_info *rulei, 164 + struct tc_cls_flower_offload *f) 165 + { 166 + u16 addr_type = 0; 167 + u8 ip_proto = 0; 168 + int err; 169 + 170 + if (f->dissector->used_keys & 171 + ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | 172 + BIT(FLOW_DISSECTOR_KEY_BASIC) | 173 + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 174 + BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 175 + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 176 + BIT(FLOW_DISSECTOR_KEY_PORTS))) { 177 + dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n"); 178 + return -EOPNOTSUPP; 179 + } 180 + 181 + mlxsw_sp_acl_rulei_priority(rulei, f->prio); 182 + 183 + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { 184 + struct flow_dissector_key_control *key = 185 + skb_flow_dissector_target(f->dissector, 186 + FLOW_DISSECTOR_KEY_CONTROL, 187 + f->key); 188 + addr_type = key->addr_type; 189 + } 190 + 191 + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { 192 + struct flow_dissector_key_basic *key = 193 + skb_flow_dissector_target(f->dissector, 194 + FLOW_DISSECTOR_KEY_BASIC, 195 + f->key); 196 + struct flow_dissector_key_basic *mask = 197 + skb_flow_dissector_target(f->dissector, 198 + FLOW_DISSECTOR_KEY_BASIC, 199 + f->mask); 200 + ip_proto = key->ip_proto; 201 + mlxsw_sp_acl_rulei_keymask_u32(rulei, 202 + MLXSW_AFK_ELEMENT_ETHERTYPE, 203 + ntohs(key->n_proto), 204 + ntohs(mask->n_proto)); 205 + mlxsw_sp_acl_rulei_keymask_u32(rulei, 206 + MLXSW_AFK_ELEMENT_IP_PROTO, 207 + key->ip_proto, mask->ip_proto); 208 + } 209 + 210 + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 211 + struct flow_dissector_key_eth_addrs *key = 212 + skb_flow_dissector_target(f->dissector, 213 + FLOW_DISSECTOR_KEY_ETH_ADDRS, 214 + f->key); 215 + struct flow_dissector_key_eth_addrs *mask = 216 + skb_flow_dissector_target(f->dissector, 217 + FLOW_DISSECTOR_KEY_ETH_ADDRS, 218 + f->mask); 219 + 220 + mlxsw_sp_acl_rulei_keymask_buf(rulei, 221 + MLXSW_AFK_ELEMENT_DMAC, 222 + key->dst, mask->dst, 223 + sizeof(key->dst)); 224 + mlxsw_sp_acl_rulei_keymask_buf(rulei, 225 + MLXSW_AFK_ELEMENT_SMAC, 226 + key->src, mask->src, 227 + sizeof(key->src)); 228 + } 229 + 230 + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) 231 + mlxsw_sp_flower_parse_ipv4(rulei, f); 232 + 233 + if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) 234 + mlxsw_sp_flower_parse_ipv6(rulei, f); 235 + 236 + err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto); 237 + if (err) 238 + return err; 239 + 240 + return mlxsw_sp_flower_parse_actions(mlxsw_sp, dev, rulei, f->exts); 241 + } 242 + 243 + int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, 244 + __be16 protocol, struct tc_cls_flower_offload *f) 245 + { 246 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 247 + struct net_device *dev = mlxsw_sp_port->dev; 248 + struct mlxsw_sp_acl_rule_info *rulei; 249 + struct mlxsw_sp_acl_ruleset *ruleset; 250 + struct mlxsw_sp_acl_rule *rule; 251 + int err; 252 + 253 + ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, dev, ingress, 254 + MLXSW_SP_ACL_PROFILE_FLOWER); 255 + if (IS_ERR(ruleset)) 256 + return PTR_ERR(ruleset); 257 + 258 + rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie); 259 + if (IS_ERR(rule)) { 260 + err = PTR_ERR(rule); 261 + goto err_rule_create; 262 + } 263 + 264 + rulei = mlxsw_sp_acl_rule_rulei(rule); 265 + err = mlxsw_sp_flower_parse(mlxsw_sp, dev, rulei, f); 266 + if (err) 267 + goto err_flower_parse; 268 + 269 + err = mlxsw_sp_acl_rulei_commit(rulei); 270 + if (err) 271 + goto err_rulei_commit; 272 + 273 + err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule); 274 + if (err) 275 + goto err_rule_add; 276 + 277 + mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 278 + return 0; 279 + 280 + err_rule_add: 281 + err_rulei_commit: 282 + err_flower_parse: 283 + mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule); 284 + err_rule_create: 285 + mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 286 + return err; 287 + } 288 + 289 + void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, 290 + struct tc_cls_flower_offload *f) 291 + { 292 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 293 + struct mlxsw_sp_acl_ruleset *ruleset; 294 + struct mlxsw_sp_acl_rule *rule; 295 + 296 + ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev, 297 + ingress, 298 + MLXSW_SP_ACL_PROFILE_FLOWER); 299 + if (WARN_ON(IS_ERR(ruleset))) 300 + return; 301 + 302 + rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie); 303 + if (!WARN_ON(!rule)) { 304 + mlxsw_sp_acl_rule_del(mlxsw_sp, rule); 305 + mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule); 306 + } 307 + 308 + mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 309 + }
+13
include/linux/list.h
··· 527 527 pos = list_next_entry(pos, member)) 528 528 529 529 /** 530 + * list_for_each_entry_from_reverse - iterate backwards over list of given type 531 + * from the current point 532 + * @pos: the type * to use as a loop cursor. 533 + * @head: the head for your list. 534 + * @member: the name of the list_head within the struct. 535 + * 536 + * Iterate backwards over list of given type, continuing from current position. 537 + */ 538 + #define list_for_each_entry_from_reverse(pos, head, member) \ 539 + for (; &pos->member != (head); \ 540 + pos = list_prev_entry(pos, member)) 541 + 542 + /** 530 543 * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry 531 544 * @pos: the type * to use as a loop cursor. 532 545 * @n: another type * to use as temporary storage
+76
include/linux/parman.h
··· 1 + /* 2 + * include/linux/parman.h - Manager for linear priority array areas 3 + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. 4 + * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> 5 + * 6 + * Redistribution and use in source and binary forms, with or without 7 + * modification, are permitted provided that the following conditions are met: 8 + * 9 + * 1. Redistributions of source code must retain the above copyright 10 + * notice, this list of conditions and the following disclaimer. 11 + * 2. Redistributions in binary form must reproduce the above copyright 12 + * notice, this list of conditions and the following disclaimer in the 13 + * documentation and/or other materials provided with the distribution. 14 + * 3. Neither the names of the copyright holders nor the names of its 15 + * contributors may be used to endorse or promote products derived from 16 + * this software without specific prior written permission. 17 + * 18 + * Alternatively, this software may be distributed under the terms of the 19 + * GNU General Public License ("GPL") version 2 as published by the Free 20 + * Software Foundation. 21 + * 22 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 + * POSSIBILITY OF SUCH DAMAGE. 33 + */ 34 + 35 + #ifndef _PARMAN_H 36 + #define _PARMAN_H 37 + 38 + #include <linux/list.h> 39 + 40 + enum parman_algo_type { 41 + PARMAN_ALGO_TYPE_LSORT, 42 + }; 43 + 44 + struct parman_item { 45 + struct list_head list; 46 + unsigned long index; 47 + }; 48 + 49 + struct parman_prio { 50 + struct list_head list; 51 + struct list_head item_list; 52 + unsigned long priority; 53 + }; 54 + 55 + struct parman_ops { 56 + unsigned long base_count; 57 + unsigned long resize_step; 58 + int (*resize)(void *priv, unsigned long new_count); 59 + void (*move)(void *priv, unsigned long from_index, 60 + unsigned long to_index, unsigned long count); 61 + enum parman_algo_type algo; 62 + }; 63 + 64 + struct parman; 65 + 66 + struct parman *parman_create(const struct parman_ops *ops, void *priv); 67 + void parman_destroy(struct parman *parman); 68 + void parman_prio_init(struct parman *parman, struct parman_prio *prio, 69 + unsigned long priority); 70 + void parman_prio_fini(struct parman_prio *prio); 71 + int parman_item_add(struct parman *parman, struct parman_prio *prio, 72 + struct parman_item *item); 73 + void parman_item_remove(struct parman *parman, struct parman_prio *prio, 74 + struct parman_item *item); 75 + 76 + #endif
+1
include/net/pkt_cls.h
··· 481 481 482 482 struct tc_cls_flower_offload { 483 483 enum tc_fl_command command; 484 + u32 prio; 484 485 unsigned long cookie; 485 486 struct flow_dissector *dissector; 486 487 struct fl_flow_key *mask;
+3
lib/Kconfig
··· 550 550 config SBITMAP 551 551 bool 552 552 553 + config PARMAN 554 + tristate "parman" 555 + 553 556 endmenu
+10
lib/Kconfig.debug
··· 1826 1826 This is intended to help people writing architecture-specific 1827 1827 optimized versions. If unsure, say N. 1828 1828 1829 + config TEST_PARMAN 1830 + tristate "Perform selftest on priority array manager" 1831 + default n 1832 + depends on PARMAN 1833 + help 1834 + Enable this option to test priority array manager on boot 1835 + (or module load). 1836 + 1837 + If unsure, say N. 1838 + 1829 1839 endmenu # runtime tests 1830 1840 1831 1841 config PROVIDE_OHCI1394_DMA_INIT
+3
lib/Makefile
··· 56 56 obj-$(CONFIG_TEST_PRINTF) += test_printf.o 57 57 obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o 58 58 obj-$(CONFIG_TEST_UUID) += test_uuid.o 59 + obj-$(CONFIG_TEST_PARMAN) += test_parman.o 59 60 60 61 ifeq ($(CONFIG_DEBUG_KOBJECT),y) 61 62 CFLAGS_kobject.o += -DDEBUG ··· 231 230 UBSAN_SANITIZE_ubsan.o := n 232 231 233 232 obj-$(CONFIG_SBITMAP) += sbitmap.o 233 + 234 + obj-$(CONFIG_PARMAN) += parman.o
+376
lib/parman.c
··· 1 + /* 2 + * lib/parman.c - Manager for linear priority array areas 3 + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. 4 + * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> 5 + * 6 + * Redistribution and use in source and binary forms, with or without 7 + * modification, are permitted provided that the following conditions are met: 8 + * 9 + * 1. Redistributions of source code must retain the above copyright 10 + * notice, this list of conditions and the following disclaimer. 11 + * 2. Redistributions in binary form must reproduce the above copyright 12 + * notice, this list of conditions and the following disclaimer in the 13 + * documentation and/or other materials provided with the distribution. 14 + * 3. Neither the names of the copyright holders nor the names of its 15 + * contributors may be used to endorse or promote products derived from 16 + * this software without specific prior written permission. 17 + * 18 + * Alternatively, this software may be distributed under the terms of the 19 + * GNU General Public License ("GPL") version 2 as published by the Free 20 + * Software Foundation. 21 + * 22 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 + * POSSIBILITY OF SUCH DAMAGE. 33 + */ 34 + 35 + #include <linux/kernel.h> 36 + #include <linux/module.h> 37 + #include <linux/slab.h> 38 + #include <linux/export.h> 39 + #include <linux/list.h> 40 + #include <linux/err.h> 41 + #include <linux/parman.h> 42 + 43 + struct parman_algo { 44 + int (*item_add)(struct parman *parman, struct parman_prio *prio, 45 + struct parman_item *item); 46 + void (*item_remove)(struct parman *parman, struct parman_prio *prio, 47 + struct parman_item *item); 48 + }; 49 + 50 + struct parman { 51 + const struct parman_ops *ops; 52 + void *priv; 53 + const struct parman_algo *algo; 54 + unsigned long count; 55 + unsigned long limit_count; 56 + struct list_head prio_list; 57 + }; 58 + 59 + static int parman_enlarge(struct parman *parman) 60 + { 61 + unsigned long new_count = parman->limit_count + 62 + parman->ops->resize_step; 63 + int err; 64 + 65 + err = parman->ops->resize(parman->priv, new_count); 66 + if (err) 67 + return err; 68 + parman->limit_count = new_count; 69 + return 0; 70 + } 71 + 72 + static int parman_shrink(struct parman *parman) 73 + { 74 + unsigned long new_count = parman->limit_count - 75 + parman->ops->resize_step; 76 + int err; 77 + 78 + if (new_count < parman->ops->base_count) 79 + return 0; 80 + err = parman->ops->resize(parman->priv, new_count); 81 + if (err) 82 + return err; 83 + parman->limit_count = new_count; 84 + return 0; 85 + } 86 + 87 + static bool parman_prio_used(struct parman_prio *prio) 88 + 89 + { 90 + return !list_empty(&prio->item_list); 91 + } 92 + 93 + static struct parman_item *parman_prio_first_item(struct parman_prio *prio) 94 + { 95 + return list_first_entry(&prio->item_list, 96 + typeof(struct parman_item), list); 97 + } 98 + 99 + static unsigned long parman_prio_first_index(struct parman_prio *prio) 100 + { 101 + return parman_prio_first_item(prio)->index; 102 + } 103 + 104 + static struct parman_item *parman_prio_last_item(struct parman_prio *prio) 105 + { 106 + return list_last_entry(&prio->item_list, 107 + typeof(struct parman_item), list); 108 + } 109 + 110 + static unsigned long parman_prio_last_index(struct parman_prio *prio) 111 + { 112 + return parman_prio_last_item(prio)->index; 113 + } 114 + 115 + static unsigned long parman_lsort_new_index_find(struct parman *parman, 116 + struct parman_prio *prio) 117 + { 118 + list_for_each_entry_from_reverse(prio, &parman->prio_list, list) { 119 + if (!parman_prio_used(prio)) 120 + continue; 121 + return parman_prio_last_index(prio) + 1; 122 + } 123 + return 0; 124 + } 125 + 126 + static void __parman_prio_move(struct parman *parman, struct parman_prio *prio, 127 + struct parman_item *item, unsigned long to_index, 128 + unsigned long count) 129 + { 130 + parman->ops->move(parman->priv, item->index, to_index, count); 131 + } 132 + 133 + static void parman_prio_shift_down(struct parman *parman, 134 + struct parman_prio *prio) 135 + { 136 + struct parman_item *item; 137 + unsigned long to_index; 138 + 139 + if (!parman_prio_used(prio)) 140 + return; 141 + item = parman_prio_first_item(prio); 142 + to_index = parman_prio_last_index(prio) + 1; 143 + __parman_prio_move(parman, prio, item, to_index, 1); 144 + list_move_tail(&item->list, &prio->item_list); 145 + item->index = to_index; 146 + } 147 + 148 + static void parman_prio_shift_up(struct parman *parman, 149 + struct parman_prio *prio) 150 + { 151 + struct parman_item *item; 152 + unsigned long to_index; 153 + 154 + if (!parman_prio_used(prio)) 155 + return; 156 + item = parman_prio_last_item(prio); 157 + to_index = parman_prio_first_index(prio) - 1; 158 + __parman_prio_move(parman, prio, item, to_index, 1); 159 + list_move(&item->list, &prio->item_list); 160 + item->index = to_index; 161 + } 162 + 163 + static void parman_prio_item_remove(struct parman *parman, 164 + struct parman_prio *prio, 165 + struct parman_item *item) 166 + { 167 + struct parman_item *last_item; 168 + unsigned long to_index; 169 + 170 + last_item = parman_prio_last_item(prio); 171 + if (last_item == item) { 172 + list_del(&item->list); 173 + return; 174 + } 175 + to_index = item->index; 176 + __parman_prio_move(parman, prio, last_item, to_index, 1); 177 + list_del(&last_item->list); 178 + list_replace(&item->list, &last_item->list); 179 + last_item->index = to_index; 180 + } 181 + 182 + static int parman_lsort_item_add(struct parman *parman, 183 + struct parman_prio *prio, 184 + struct parman_item *item) 185 + { 186 + struct parman_prio *prio2; 187 + unsigned long new_index; 188 + int err; 189 + 190 + if (parman->count + 1 > parman->limit_count) { 191 + err = parman_enlarge(parman); 192 + if (err) 193 + return err; 194 + } 195 + 196 + new_index = parman_lsort_new_index_find(parman, prio); 197 + list_for_each_entry_reverse(prio2, &parman->prio_list, list) { 198 + if (prio2 == prio) 199 + break; 200 + parman_prio_shift_down(parman, prio2); 201 + } 202 + item->index = new_index; 203 + list_add_tail(&item->list, &prio->item_list); 204 + parman->count++; 205 + return 0; 206 + } 207 + 208 + static void parman_lsort_item_remove(struct parman *parman, 209 + struct parman_prio *prio, 210 + struct parman_item *item) 211 + { 212 + parman_prio_item_remove(parman, prio, item); 213 + list_for_each_entry_continue(prio, &parman->prio_list, list) 214 + parman_prio_shift_up(parman, prio); 215 + parman->count--; 216 + if (parman->limit_count - parman->count >= parman->ops->resize_step) 217 + parman_shrink(parman); 218 + } 219 + 220 + static const struct parman_algo parman_lsort = { 221 + .item_add = parman_lsort_item_add, 222 + .item_remove = parman_lsort_item_remove, 223 + }; 224 + 225 + static const struct parman_algo *parman_algos[] = { 226 + &parman_lsort, 227 + }; 228 + 229 + /** 230 + * parman_create - creates a new parman instance 231 + * @ops: caller-specific callbacks 232 + * @priv: pointer to a private data passed to the ops 233 + * 234 + * Note: all locking must be provided by the caller. 235 + * 236 + * Each parman instance manages an array area with chunks of entries 237 + * with the same priority. Consider following example: 238 + * 239 + * item 1 with prio 10 240 + * item 2 with prio 10 241 + * item 3 with prio 10 242 + * item 4 with prio 20 243 + * item 5 with prio 20 244 + * item 6 with prio 30 245 + * item 7 with prio 30 246 + * item 8 with prio 30 247 + * 248 + * In this example, there are 3 priority chunks. The order of the priorities 249 + * matters, however the order of items within a single priority chunk does not 250 + * matter. So the same array could be ordered as follows: 251 + * 252 + * item 2 with prio 10 253 + * item 3 with prio 10 254 + * item 1 with prio 10 255 + * item 5 with prio 20 256 + * item 4 with prio 20 257 + * item 7 with prio 30 258 + * item 8 with prio 30 259 + * item 6 with prio 30 260 + * 261 + * The goal of parman is to maintain the priority ordering. The caller 262 + * provides @ops with callbacks parman uses to move the items 263 + * and resize the array area. 264 + * 265 + * Returns a pointer to newly created parman instance in case of success, 266 + * otherwise it returns NULL. 267 + */ 268 + struct parman *parman_create(const struct parman_ops *ops, void *priv) 269 + { 270 + struct parman *parman; 271 + 272 + parman = kzalloc(sizeof(*parman), GFP_KERNEL); 273 + if (!parman) 274 + return NULL; 275 + INIT_LIST_HEAD(&parman->prio_list); 276 + parman->ops = ops; 277 + parman->priv = priv; 278 + parman->limit_count = ops->base_count; 279 + parman->algo = parman_algos[ops->algo]; 280 + return parman; 281 + } 282 + EXPORT_SYMBOL(parman_create); 283 + 284 + /** 285 + * parman_destroy - destroys existing parman instance 286 + * @parman: parman instance 287 + * 288 + * Note: all locking must be provided by the caller. 289 + */ 290 + void parman_destroy(struct parman *parman) 291 + { 292 + WARN_ON(!list_empty(&parman->prio_list)); 293 + kfree(parman); 294 + } 295 + EXPORT_SYMBOL(parman_destroy); 296 + 297 + /** 298 + * parman_prio_init - initializes a parman priority chunk 299 + * @parman: parman instance 300 + * @prio: parman prio structure to be initialized 301 + * @prority: desired priority of the chunk 302 + * 303 + * Note: all locking must be provided by the caller. 304 + * 305 + * Before caller could add an item with certain priority, he has to 306 + * initialize a priority chunk for it using this function. 307 + */ 308 + void parman_prio_init(struct parman *parman, struct parman_prio *prio, 309 + unsigned long priority) 310 + { 311 + struct parman_prio *prio2; 312 + struct list_head *pos; 313 + 314 + INIT_LIST_HEAD(&prio->item_list); 315 + prio->priority = priority; 316 + 317 + /* Position inside the list according to priority */ 318 + list_for_each(pos, &parman->prio_list) { 319 + prio2 = list_entry(pos, typeof(*prio2), list); 320 + if (prio2->priority > prio->priority) 321 + break; 322 + } 323 + list_add_tail(&prio->list, pos); 324 + } 325 + EXPORT_SYMBOL(parman_prio_init); 326 + 327 + /** 328 + * parman_prio_fini - finalizes use of parman priority chunk 329 + * @prio: parman prio structure 330 + * 331 + * Note: all locking must be provided by the caller. 332 + */ 333 + void parman_prio_fini(struct parman_prio *prio) 334 + { 335 + WARN_ON(parman_prio_used(prio)); 336 + list_del(&prio->list); 337 + } 338 + EXPORT_SYMBOL(parman_prio_fini); 339 + 340 + /** 341 + * parman_item_add - adds a parman item under defined priority 342 + * @parman: parman instance 343 + * @prio: parman prio instance to add the item to 344 + * @item: parman item instance 345 + * 346 + * Note: all locking must be provided by the caller. 347 + * 348 + * Adds item to a array managed by parman instance under the specified priority. 349 + * 350 + * Returns 0 in case of success, negative number to indicate an error. 351 + */ 352 + int parman_item_add(struct parman *parman, struct parman_prio *prio, 353 + struct parman_item *item) 354 + { 355 + return parman->algo->item_add(parman, prio, item); 356 + } 357 + EXPORT_SYMBOL(parman_item_add); 358 + 359 + /** 360 + * parman_item_del - deletes parman item 361 + * @parman: parman instance 362 + * @prio: parman prio instance to delete the item from 363 + * @item: parman item instance 364 + * 365 + * Note: all locking must be provided by the caller. 366 + */ 367 + void parman_item_remove(struct parman *parman, struct parman_prio *prio, 368 + struct parman_item *item) 369 + { 370 + parman->algo->item_remove(parman, prio, item); 371 + } 372 + EXPORT_SYMBOL(parman_item_remove); 373 + 374 + MODULE_LICENSE("Dual BSD/GPL"); 375 + MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 376 + MODULE_DESCRIPTION("Priority-based array manager");
+395
lib/test_parman.c
··· 1 + /* 2 + * lib/test_parman.c - Test module for parman 3 + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. 4 + * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> 5 + * 6 + * Redistribution and use in source and binary forms, with or without 7 + * modification, are permitted provided that the following conditions are met: 8 + * 9 + * 1. Redistributions of source code must retain the above copyright 10 + * notice, this list of conditions and the following disclaimer. 11 + * 2. Redistributions in binary form must reproduce the above copyright 12 + * notice, this list of conditions and the following disclaimer in the 13 + * documentation and/or other materials provided with the distribution. 14 + * 3. Neither the names of the copyright holders nor the names of its 15 + * contributors may be used to endorse or promote products derived from 16 + * this software without specific prior written permission. 17 + * 18 + * Alternatively, this software may be distributed under the terms of the 19 + * GNU General Public License ("GPL") version 2 as published by the Free 20 + * Software Foundation. 21 + * 22 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 + * POSSIBILITY OF SUCH DAMAGE. 33 + */ 34 + 35 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 36 + 37 + #include <linux/kernel.h> 38 + #include <linux/module.h> 39 + #include <linux/slab.h> 40 + #include <linux/bitops.h> 41 + #include <linux/err.h> 42 + #include <linux/random.h> 43 + #include <linux/parman.h> 44 + 45 + #define TEST_PARMAN_PRIO_SHIFT 7 /* defines number of prios for testing */ 46 + #define TEST_PARMAN_PRIO_COUNT BIT(TEST_PARMAN_PRIO_SHIFT) 47 + #define TEST_PARMAN_PRIO_MASK (TEST_PARMAN_PRIO_COUNT - 1) 48 + 49 + #define TEST_PARMAN_ITEM_SHIFT 13 /* defines a total number 50 + * of items for testing 51 + */ 52 + #define TEST_PARMAN_ITEM_COUNT BIT(TEST_PARMAN_ITEM_SHIFT) 53 + #define TEST_PARMAN_ITEM_MASK (TEST_PARMAN_ITEM_COUNT - 1) 54 + 55 + #define TEST_PARMAN_BASE_SHIFT 8 56 + #define TEST_PARMAN_BASE_COUNT BIT(TEST_PARMAN_BASE_SHIFT) 57 + #define TEST_PARMAN_RESIZE_STEP_SHIFT 7 58 + #define TEST_PARMAN_RESIZE_STEP_COUNT BIT(TEST_PARMAN_RESIZE_STEP_SHIFT) 59 + 60 + #define TEST_PARMAN_BULK_MAX_SHIFT (2 + TEST_PARMAN_RESIZE_STEP_SHIFT) 61 + #define TEST_PARMAN_BULK_MAX_COUNT BIT(TEST_PARMAN_BULK_MAX_SHIFT) 62 + #define TEST_PARMAN_BULK_MAX_MASK (TEST_PARMAN_BULK_MAX_COUNT - 1) 63 + 64 + #define TEST_PARMAN_RUN_BUDGET (TEST_PARMAN_ITEM_COUNT * 256) 65 + 66 + struct test_parman_prio { 67 + struct parman_prio parman_prio; 68 + unsigned long priority; 69 + }; 70 + 71 + struct test_parman_item { 72 + struct parman_item parman_item; 73 + struct test_parman_prio *prio; 74 + bool used; 75 + }; 76 + 77 + struct test_parman { 78 + struct parman *parman; 79 + struct test_parman_item **prio_array; 80 + unsigned long prio_array_limit; 81 + struct test_parman_prio prios[TEST_PARMAN_PRIO_COUNT]; 82 + struct test_parman_item items[TEST_PARMAN_ITEM_COUNT]; 83 + struct rnd_state rnd; 84 + unsigned long run_budget; 85 + unsigned long bulk_budget; 86 + bool bulk_noop; 87 + unsigned int used_items; 88 + }; 89 + 90 + #define ITEM_PTRS_SIZE(count) (sizeof(struct test_parman_item *) * (count)) 91 + 92 + static int test_parman_resize(void *priv, unsigned long new_count) 93 + { 94 + struct test_parman *test_parman = priv; 95 + struct test_parman_item **prio_array; 96 + unsigned long old_count; 97 + 98 + prio_array = krealloc(test_parman->prio_array, 99 + ITEM_PTRS_SIZE(new_count), GFP_KERNEL); 100 + if (new_count == 0) 101 + return 0; 102 + if (!prio_array) 103 + return -ENOMEM; 104 + old_count = test_parman->prio_array_limit; 105 + if (new_count > old_count) 106 + memset(&prio_array[old_count], 0, 107 + ITEM_PTRS_SIZE(new_count - old_count)); 108 + test_parman->prio_array = prio_array; 109 + test_parman->prio_array_limit = new_count; 110 + return 0; 111 + } 112 + 113 + static void test_parman_move(void *priv, unsigned long from_index, 114 + unsigned long to_index, unsigned long count) 115 + { 116 + struct test_parman *test_parman = priv; 117 + struct test_parman_item **prio_array = test_parman->prio_array; 118 + 119 + memmove(&prio_array[to_index], &prio_array[from_index], 120 + ITEM_PTRS_SIZE(count)); 121 + memset(&prio_array[from_index], 0, ITEM_PTRS_SIZE(count)); 122 + } 123 + 124 + static const struct parman_ops test_parman_lsort_ops = { 125 + .base_count = TEST_PARMAN_BASE_COUNT, 126 + .resize_step = TEST_PARMAN_RESIZE_STEP_COUNT, 127 + .resize = test_parman_resize, 128 + .move = test_parman_move, 129 + .algo = PARMAN_ALGO_TYPE_LSORT, 130 + }; 131 + 132 + static void test_parman_rnd_init(struct test_parman *test_parman) 133 + { 134 + prandom_seed_state(&test_parman->rnd, 3141592653589793238ULL); 135 + } 136 + 137 + static u32 test_parman_rnd_get(struct test_parman *test_parman) 138 + { 139 + return prandom_u32_state(&test_parman->rnd); 140 + } 141 + 142 + static unsigned long test_parman_priority_gen(struct test_parman *test_parman) 143 + { 144 + unsigned long priority; 145 + int i; 146 + 147 + again: 148 + priority = test_parman_rnd_get(test_parman); 149 + if (priority == 0) 150 + goto again; 151 + 152 + for (i = 0; i < TEST_PARMAN_PRIO_COUNT; i++) { 153 + struct test_parman_prio *prio = &test_parman->prios[i]; 154 + 155 + if (prio->priority == 0) 156 + break; 157 + if (prio->priority == priority) 158 + goto again; 159 + } 160 + return priority; 161 + } 162 + 163 + static void test_parman_prios_init(struct test_parman *test_parman) 164 + { 165 + int i; 166 + 167 + for (i = 0; i < TEST_PARMAN_PRIO_COUNT; i++) { 168 + struct test_parman_prio *prio = &test_parman->prios[i]; 169 + 170 + /* Assign random uniqueue priority to each prio structure */ 171 + prio->priority = test_parman_priority_gen(test_parman); 172 + parman_prio_init(test_parman->parman, &prio->parman_prio, 173 + prio->priority); 174 + } 175 + } 176 + 177 + static void test_parman_prios_fini(struct test_parman *test_parman) 178 + { 179 + int i; 180 + 181 + for (i = 0; i < TEST_PARMAN_PRIO_COUNT; i++) { 182 + struct test_parman_prio *prio = &test_parman->prios[i]; 183 + 184 + parman_prio_fini(&prio->parman_prio); 185 + } 186 + } 187 + 188 + static void test_parman_items_init(struct test_parman *test_parman) 189 + { 190 + int i; 191 + 192 + for (i = 0; i < TEST_PARMAN_ITEM_COUNT; i++) { 193 + struct test_parman_item *item = &test_parman->items[i]; 194 + unsigned int prio_index = test_parman_rnd_get(test_parman) & 195 + TEST_PARMAN_PRIO_MASK; 196 + 197 + /* Assign random prio to each item structure */ 198 + item->prio = &test_parman->prios[prio_index]; 199 + } 200 + } 201 + 202 + static void test_parman_items_fini(struct test_parman *test_parman) 203 + { 204 + int i; 205 + 206 + for (i = 0; i < TEST_PARMAN_ITEM_COUNT; i++) { 207 + struct test_parman_item *item = &test_parman->items[i]; 208 + 209 + if (!item->used) 210 + continue; 211 + parman_item_remove(test_parman->parman, 212 + &item->prio->parman_prio, 213 + &item->parman_item); 214 + } 215 + } 216 + 217 + static struct test_parman *test_parman_create(const struct parman_ops *ops) 218 + { 219 + struct test_parman *test_parman; 220 + int err; 221 + 222 + test_parman = kzalloc(sizeof(*test_parman), GFP_KERNEL); 223 + if (!test_parman) 224 + return ERR_PTR(-ENOMEM); 225 + err = test_parman_resize(test_parman, TEST_PARMAN_BASE_COUNT); 226 + if (err) 227 + goto err_resize; 228 + test_parman->parman = parman_create(ops, test_parman); 229 + if (!test_parman->parman) { 230 + err = -ENOMEM; 231 + goto err_parman_create; 232 + } 233 + test_parman_rnd_init(test_parman); 234 + test_parman_prios_init(test_parman); 235 + test_parman_items_init(test_parman); 236 + test_parman->run_budget = TEST_PARMAN_RUN_BUDGET; 237 + return test_parman; 238 + 239 + err_parman_create: 240 + test_parman_resize(test_parman, 0); 241 + err_resize: 242 + kfree(test_parman); 243 + return ERR_PTR(err); 244 + } 245 + 246 + static void test_parman_destroy(struct test_parman *test_parman) 247 + { 248 + test_parman_items_fini(test_parman); 249 + test_parman_prios_fini(test_parman); 250 + parman_destroy(test_parman->parman); 251 + test_parman_resize(test_parman, 0); 252 + kfree(test_parman); 253 + } 254 + 255 + static bool test_parman_run_check_budgets(struct test_parman *test_parman) 256 + { 257 + if (test_parman->run_budget-- == 0) 258 + return false; 259 + if (test_parman->bulk_budget-- != 0) 260 + return true; 261 + 262 + test_parman->bulk_budget = test_parman_rnd_get(test_parman) & 263 + TEST_PARMAN_BULK_MAX_MASK; 264 + test_parman->bulk_noop = test_parman_rnd_get(test_parman) & 1; 265 + return true; 266 + } 267 + 268 + static int test_parman_run(struct test_parman *test_parman) 269 + { 270 + unsigned int i = test_parman_rnd_get(test_parman); 271 + int err; 272 + 273 + while (test_parman_run_check_budgets(test_parman)) { 274 + unsigned int item_index = i++ & TEST_PARMAN_ITEM_MASK; 275 + struct test_parman_item *item = &test_parman->items[item_index]; 276 + 277 + if (test_parman->bulk_noop) 278 + continue; 279 + 280 + if (!item->used) { 281 + err = parman_item_add(test_parman->parman, 282 + &item->prio->parman_prio, 283 + &item->parman_item); 284 + if (err) 285 + return err; 286 + test_parman->prio_array[item->parman_item.index] = item; 287 + test_parman->used_items++; 288 + } else { 289 + test_parman->prio_array[item->parman_item.index] = NULL; 290 + parman_item_remove(test_parman->parman, 291 + &item->prio->parman_prio, 292 + &item->parman_item); 293 + test_parman->used_items--; 294 + } 295 + item->used = !item->used; 296 + } 297 + return 0; 298 + } 299 + 300 + static int test_parman_check_array(struct test_parman *test_parman, 301 + bool gaps_allowed) 302 + { 303 + unsigned int last_unused_items = 0; 304 + unsigned long last_priority = 0; 305 + unsigned int used_items = 0; 306 + int i; 307 + 308 + if (test_parman->prio_array_limit < TEST_PARMAN_BASE_COUNT) { 309 + pr_err("Array limit is lower than the base count (%lu < %lu)\n", 310 + test_parman->prio_array_limit, TEST_PARMAN_BASE_COUNT); 311 + return -EINVAL; 312 + } 313 + 314 + for (i = 0; i < test_parman->prio_array_limit; i++) { 315 + struct test_parman_item *item = test_parman->prio_array[i]; 316 + 317 + if (!item) { 318 + last_unused_items++; 319 + continue; 320 + } 321 + if (last_unused_items && !gaps_allowed) { 322 + pr_err("Gap found in array even though they are forbidden\n"); 323 + return -EINVAL; 324 + } 325 + 326 + last_unused_items = 0; 327 + used_items++; 328 + 329 + if (item->prio->priority < last_priority) { 330 + pr_err("Item belongs under higher priority then the last one (current: %lu, previous: %lu)\n", 331 + item->prio->priority, last_priority); 332 + return -EINVAL; 333 + } 334 + last_priority = item->prio->priority; 335 + 336 + if (item->parman_item.index != i) { 337 + pr_err("Item has different index in compare to where it actualy is (%lu != %d)\n", 338 + item->parman_item.index, i); 339 + return -EINVAL; 340 + } 341 + } 342 + 343 + if (used_items != test_parman->used_items) { 344 + pr_err("Number of used items in array does not match (%u != %u)\n", 345 + used_items, test_parman->used_items); 346 + return -EINVAL; 347 + } 348 + 349 + if (last_unused_items >= TEST_PARMAN_RESIZE_STEP_COUNT) { 350 + pr_err("Number of unused item at the end of array is bigger than resize step (%u >= %lu)\n", 351 + last_unused_items, TEST_PARMAN_RESIZE_STEP_COUNT); 352 + return -EINVAL; 353 + } 354 + 355 + pr_info("Priority array check successful\n"); 356 + 357 + return 0; 358 + } 359 + 360 + static int test_parman_lsort(void) 361 + { 362 + struct test_parman *test_parman; 363 + int err; 364 + 365 + test_parman = test_parman_create(&test_parman_lsort_ops); 366 + if (IS_ERR(test_parman)) 367 + return PTR_ERR(test_parman); 368 + 369 + err = test_parman_run(test_parman); 370 + if (err) 371 + goto out; 372 + 373 + err = test_parman_check_array(test_parman, false); 374 + if (err) 375 + goto out; 376 + out: 377 + test_parman_destroy(test_parman); 378 + return err; 379 + } 380 + 381 + static int __init test_parman_init(void) 382 + { 383 + return test_parman_lsort(); 384 + } 385 + 386 + static void __exit test_parman_exit(void) 387 + { 388 + } 389 + 390 + module_init(test_parman_init); 391 + module_exit(test_parman_exit); 392 + 393 + MODULE_LICENSE("Dual BSD/GPL"); 394 + MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 395 + MODULE_DESCRIPTION("Test module for parman");
+3
net/sched/cls_flower.c
··· 229 229 return; 230 230 231 231 offload.command = TC_CLSFLOWER_DESTROY; 232 + offload.prio = tp->prio; 232 233 offload.cookie = (unsigned long)f; 233 234 234 235 tc->type = TC_SETUP_CLSFLOWER; ··· 261 260 } 262 261 263 262 offload.command = TC_CLSFLOWER_REPLACE; 263 + offload.prio = tp->prio; 264 264 offload.cookie = (unsigned long)f; 265 265 offload.dissector = dissector; 266 266 offload.mask = mask; ··· 289 287 return; 290 288 291 289 offload.command = TC_CLSFLOWER_STATS; 290 + offload.prio = tp->prio; 292 291 offload.cookie = (unsigned long)f; 293 292 offload.exts = &f->exts; 294 293