Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'mlxsw-devlink-shared-buffers'

Jiri Pirko says:

====================
devlink + mlxsw: add support for config and control of shared buffers

ASICs implement shared buffer for packet forwarding purposes and enable
flexible partitioning of the shared buffer for different flows and ports,
enabling non-blocking progress of different flows as well as separation
of lossy traffic from loss-less traffic when using Per-Priority Flow
Control (PFC). The shared buffer optimizes the buffer utilization for better
absorption of packet bursts.

This patchset implements API which is based on the model SAI uses. That is
aligned with multiple ASIC vendors so this API should be vendor neutral.

Userspace counterpart patchset for devlink iproute2 tool can be found here:
https://github.com/jpirko/iproute2_mlxsw/tree/devlink_sb

Couple of examples of usage:

switch$ devlink sb help
Usage: devlink sb show [ DEV [ sb SB_INDEX ] ]
devlink sb pool show [ DEV [ sb SB_INDEX ] pool POOL_INDEX ]
devlink sb pool set DEV [ sb SB_INDEX ] pool POOL_INDEX
size POOL_SIZE thtype { static | dynamic }
devlink sb port pool show [ DEV/PORT_INDEX [ sb SB_INDEX ]
pool POOL_INDEX ]
devlink sb port pool set DEV/PORT_INDEX [ sb SB_INDEX ]
pool POOL_INDEX th THRESHOLD
devlink sb tc bind show [ DEV/PORT_INDEX [ sb SB_INDEX ] tc TC_INDEX ]
devlink sb tc bind set DEV/PORT_INDEX [ sb SB_INDEX ] tc TC_INDEX
type { ingress | egress } pool POOL_INDEX
th THRESHOLD
devlink sb occupancy show { DEV | DEV/PORT_INDEX } [ sb SB_INDEX ]
devlink sb occupancy snapshot DEV [ sb SB_INDEX ]
devlink sb occupancy clearmax DEV [ sb SB_INDEX ]

switch$ devlink sb show
pci/0000:03:00.0: sb 0 size 16777216 ing_pools 4 eg_pools 4 ing_tcs 8 eg_tcs 8

switch$ devlink sb pool show
pci/0000:03:00.0: sb 0 pool 0 type ingress size 12400032 thtype dynamic
pci/0000:03:00.0: sb 0 pool 1 type ingress size 0 thtype dynamic
pci/0000:03:00.0: sb 0 pool 2 type ingress size 0 thtype dynamic
pci/0000:03:00.0: sb 0 pool 3 type ingress size 200064 thtype dynamic
pci/0000:03:00.0: sb 0 pool 4 type egress size 13220064 thtype dynamic
pci/0000:03:00.0: sb 0 pool 5 type egress size 0 thtype dynamic
pci/0000:03:00.0: sb 0 pool 6 type egress size 0 thtype dynamic
pci/0000:03:00.0: sb 0 pool 7 type egress size 0 thtype dynamic

switch$ devlink sb port pool show sw0p7 pool 0
sw0p7: sb 0 pool 0 threshold 16

switch$ sudo devlink sb port pool set sw0p7 pool 0 th 15

switch$ devlink sb port pool show sw0p7 pool 0
sw0p7: sb 0 pool 0 threshold 15

switch$ devlink sb tc bind show sw0p7 tc 0 type ingress
sw0p7: sb 0 tc 0 type ingress pool 0 threshold 10

switch$ sudo devlink sb tc bind set sw0p7 tc 0 type ingress pool 0 th 9

switch$ devlink sb tc bind show sw0p7 tc 0 type ingress
sw0p7: sb 0 tc 0 type ingress pool 0 threshold 9

switch$ sudo devlink sb occupancy snapshot pci/0000:03:00.0

switch$ devlink sb occupancy show sw0p7
sw0p7:
pool: 0: 82944/3217344 1: 0/0 2: 0/0 3: 0/0
4: 0/384 5: 0/0 6: 0/0 7: 0/0
itc: 0(0): 96768/3217344 1(0): 0/0 2(0): 0/0 3(0): 0/0
4(0): 0/0 5(0): 0/0 6(0): 0/0 7(0): 0/0
etc: 0(4): 0/384 1(4): 0/0 2(4): 0/0 3(4): 0/0
4(4): 0/0 5(4): 0/0 6(4): 0/0 7(4): 0/0

switch$ sudo devlink sb occupancy clearmax pci/0000:03:00.0
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+2691 -426
+512 -184
drivers/net/ethernet/mellanox/mlxsw/core.c
··· 44 44 #include <linux/seq_file.h> 45 45 #include <linux/u64_stats_sync.h> 46 46 #include <linux/netdevice.h> 47 - #include <linux/wait.h> 47 + #include <linux/completion.h> 48 48 #include <linux/skbuff.h> 49 49 #include <linux/etherdevice.h> 50 50 #include <linux/types.h> ··· 55 55 #include <linux/mutex.h> 56 56 #include <linux/rcupdate.h> 57 57 #include <linux/slab.h> 58 + #include <linux/workqueue.h> 58 59 #include <asm/byteorder.h> 59 60 #include <net/devlink.h> 60 61 ··· 73 72 static const char mlxsw_core_driver_name[] = "mlxsw_core"; 74 73 75 74 static struct dentry *mlxsw_core_dbg_root; 75 + 76 + static struct workqueue_struct *mlxsw_wq; 76 77 77 78 struct mlxsw_core_pcpu_stats { 78 79 u64 trap_rx_packets[MLXSW_TRAP_ID_MAX]; ··· 96 93 struct list_head rx_listener_list; 97 94 struct list_head event_listener_list; 98 95 struct { 99 - struct sk_buff *resp_skb; 100 - u64 tid; 101 - wait_queue_head_t wait; 102 - bool trans_active; 103 - struct mutex lock; /* One EMAD transaction at a time. */ 96 + atomic64_t tid; 97 + struct list_head trans_list; 98 + spinlock_t trans_list_lock; /* protects trans_list writes */ 104 99 bool use_emad; 105 100 } emad; 106 101 struct mlxsw_core_pcpu_stats __percpu *pcpu_stats; ··· 291 290 static void mlxsw_emad_pack_op_tlv(char *op_tlv, 292 291 const struct mlxsw_reg_info *reg, 293 292 enum mlxsw_core_reg_access_type type, 294 - struct mlxsw_core *mlxsw_core) 293 + u64 tid) 295 294 { 296 295 mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP); 297 296 mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN); ··· 307 306 MLXSW_EMAD_OP_TLV_METHOD_WRITE); 308 307 mlxsw_emad_op_tlv_class_set(op_tlv, 309 308 MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS); 310 - mlxsw_emad_op_tlv_tid_set(op_tlv, mlxsw_core->emad.tid); 309 + mlxsw_emad_op_tlv_tid_set(op_tlv, tid); 311 310 } 312 311 313 312 static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb) ··· 329 328 const struct mlxsw_reg_info *reg, 330 329 char *payload, 331 330 enum mlxsw_core_reg_access_type type, 332 - struct mlxsw_core *mlxsw_core) 331 + u64 tid) 333 332 { 334 333 char *buf; 335 334 ··· 340 339 mlxsw_emad_pack_reg_tlv(buf, reg, payload); 341 340 342 341 buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)); 343 - mlxsw_emad_pack_op_tlv(buf, reg, type, mlxsw_core); 342 + mlxsw_emad_pack_op_tlv(buf, reg, type, tid); 344 343 345 344 mlxsw_emad_construct_eth_hdr(skb); 346 345 } ··· 377 376 return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE); 378 377 } 379 378 380 - #define MLXSW_EMAD_TIMEOUT_MS 200 381 - 382 - static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, 383 - struct sk_buff *skb, 384 - const struct mlxsw_tx_info *tx_info) 379 + static int mlxsw_emad_process_status(char *op_tlv, 380 + enum mlxsw_emad_op_tlv_status *p_status) 385 381 { 386 - int err; 387 - int ret; 382 + *p_status = mlxsw_emad_op_tlv_status_get(op_tlv); 388 383 389 - mlxsw_core->emad.trans_active = true; 390 - 391 - err = mlxsw_core_skb_transmit(mlxsw_core, skb, tx_info); 392 - if (err) { 393 - dev_err(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n", 394 - mlxsw_core->emad.tid); 395 - dev_kfree_skb(skb); 396 - goto trans_inactive_out; 397 - } 398 - 399 - ret = wait_event_timeout(mlxsw_core->emad.wait, 400 - !(mlxsw_core->emad.trans_active), 401 - msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS)); 402 - if (!ret) { 403 - dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n", 404 - mlxsw_core->emad.tid); 405 - err = -EIO; 406 - goto trans_inactive_out; 407 - } 408 - 409 - return 0; 410 - 411 - trans_inactive_out: 412 - mlxsw_core->emad.trans_active = false; 413 - return err; 414 - } 415 - 416 - static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core, 417 - char *op_tlv) 418 - { 419 - enum mlxsw_emad_op_tlv_status status; 420 - u64 tid; 421 - 422 - status = mlxsw_emad_op_tlv_status_get(op_tlv); 423 - tid = mlxsw_emad_op_tlv_tid_get(op_tlv); 424 - 425 - switch (status) { 384 + switch (*p_status) { 426 385 case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS: 427 386 return 0; 428 387 case MLXSW_EMAD_OP_TLV_STATUS_BUSY: 429 388 case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK: 430 - dev_warn(mlxsw_core->bus_info->dev, "Reg access status again (tid=%llx,status=%x(%s))\n", 431 - tid, status, mlxsw_emad_op_tlv_status_str(status)); 432 389 return -EAGAIN; 433 390 case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED: 434 391 case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV: ··· 397 438 case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE: 398 439 case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR: 399 440 default: 400 - dev_err(mlxsw_core->bus_info->dev, "Reg access status failed (tid=%llx,status=%x(%s))\n", 401 - tid, status, mlxsw_emad_op_tlv_status_str(status)); 402 441 return -EIO; 403 442 } 404 443 } 405 444 406 - static int mlxsw_emad_process_status_skb(struct mlxsw_core *mlxsw_core, 407 - struct sk_buff *skb) 445 + static int 446 + mlxsw_emad_process_status_skb(struct sk_buff *skb, 447 + enum mlxsw_emad_op_tlv_status *p_status) 408 448 { 409 - return mlxsw_emad_process_status(mlxsw_core, mlxsw_emad_op_tlv(skb)); 449 + return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status); 450 + } 451 + 452 + struct mlxsw_reg_trans { 453 + struct list_head list; 454 + struct list_head bulk_list; 455 + struct mlxsw_core *core; 456 + struct sk_buff *tx_skb; 457 + struct mlxsw_tx_info tx_info; 458 + struct delayed_work timeout_dw; 459 + unsigned int retries; 460 + u64 tid; 461 + struct completion completion; 462 + atomic_t active; 463 + mlxsw_reg_trans_cb_t *cb; 464 + unsigned long cb_priv; 465 + const struct mlxsw_reg_info *reg; 466 + enum mlxsw_core_reg_access_type type; 467 + int err; 468 + enum mlxsw_emad_op_tlv_status emad_status; 469 + struct rcu_head rcu; 470 + }; 471 + 472 + #define MLXSW_EMAD_TIMEOUT_MS 200 473 + 474 + static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans) 475 + { 476 + unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS); 477 + 478 + mlxsw_core_schedule_dw(&trans->timeout_dw, timeout); 410 479 } 411 480 412 481 static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, 413 - struct sk_buff *skb, 414 - const struct mlxsw_tx_info *tx_info) 482 + struct mlxsw_reg_trans *trans) 415 483 { 416 - struct sk_buff *trans_skb; 417 - int n_retry; 484 + struct sk_buff *skb; 418 485 int err; 419 486 420 - n_retry = 0; 421 - retry: 422 - /* We copy the EMAD to a new skb, since we might need 423 - * to retransmit it in case of failure. 424 - */ 425 - trans_skb = skb_copy(skb, GFP_KERNEL); 426 - if (!trans_skb) { 427 - err = -ENOMEM; 428 - goto out; 487 + skb = skb_copy(trans->tx_skb, GFP_KERNEL); 488 + if (!skb) 489 + return -ENOMEM; 490 + 491 + atomic_set(&trans->active, 1); 492 + err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info); 493 + if (err) { 494 + dev_kfree_skb(skb); 495 + return err; 429 496 } 430 - 431 - err = __mlxsw_emad_transmit(mlxsw_core, trans_skb, tx_info); 432 - if (!err) { 433 - struct sk_buff *resp_skb = mlxsw_core->emad.resp_skb; 434 - 435 - err = mlxsw_emad_process_status_skb(mlxsw_core, resp_skb); 436 - if (err) 437 - dev_kfree_skb(resp_skb); 438 - if (!err || err != -EAGAIN) 439 - goto out; 440 - } 441 - if (n_retry++ < MLXSW_EMAD_MAX_RETRY) 442 - goto retry; 443 - 444 - out: 445 - dev_kfree_skb(skb); 446 - mlxsw_core->emad.tid++; 447 - return err; 497 + mlxsw_emad_trans_timeout_schedule(trans); 498 + return 0; 448 499 } 449 500 501 + static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err) 502 + { 503 + struct mlxsw_core *mlxsw_core = trans->core; 504 + 505 + dev_kfree_skb(trans->tx_skb); 506 + spin_lock_bh(&mlxsw_core->emad.trans_list_lock); 507 + list_del_rcu(&trans->list); 508 + spin_unlock_bh(&mlxsw_core->emad.trans_list_lock); 509 + trans->err = err; 510 + complete(&trans->completion); 511 + } 512 + 513 + static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core, 514 + struct mlxsw_reg_trans *trans) 515 + { 516 + int err; 517 + 518 + if (trans->retries < MLXSW_EMAD_MAX_RETRY) { 519 + trans->retries++; 520 + err = mlxsw_emad_transmit(trans->core, trans); 521 + if (err == 0) 522 + return; 523 + } else { 524 + err = -EIO; 525 + } 526 + mlxsw_emad_trans_finish(trans, err); 527 + } 528 + 529 + static void mlxsw_emad_trans_timeout_work(struct work_struct *work) 530 + { 531 + struct mlxsw_reg_trans *trans = container_of(work, 532 + struct mlxsw_reg_trans, 533 + timeout_dw.work); 534 + 535 + if (!atomic_dec_and_test(&trans->active)) 536 + return; 537 + 538 + mlxsw_emad_transmit_retry(trans->core, trans); 539 + } 540 + 541 + static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core, 542 + struct mlxsw_reg_trans *trans, 543 + struct sk_buff *skb) 544 + { 545 + int err; 546 + 547 + if (!atomic_dec_and_test(&trans->active)) 548 + return; 549 + 550 + err = mlxsw_emad_process_status_skb(skb, &trans->emad_status); 551 + if (err == -EAGAIN) { 552 + mlxsw_emad_transmit_retry(mlxsw_core, trans); 553 + } else { 554 + if (err == 0) { 555 + char *op_tlv = mlxsw_emad_op_tlv(skb); 556 + 557 + if (trans->cb) 558 + trans->cb(mlxsw_core, 559 + mlxsw_emad_reg_payload(op_tlv), 560 + trans->reg->len, trans->cb_priv); 561 + } 562 + mlxsw_emad_trans_finish(trans, err); 563 + } 564 + } 565 + 566 + /* called with rcu read lock held */ 450 567 static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port, 451 568 void *priv) 452 569 { 453 570 struct mlxsw_core *mlxsw_core = priv; 571 + struct mlxsw_reg_trans *trans; 454 572 455 - if (mlxsw_emad_is_resp(skb) && 456 - mlxsw_core->emad.trans_active && 457 - mlxsw_emad_get_tid(skb) == mlxsw_core->emad.tid) { 458 - mlxsw_core->emad.resp_skb = skb; 459 - mlxsw_core->emad.trans_active = false; 460 - wake_up(&mlxsw_core->emad.wait); 461 - } else { 462 - dev_kfree_skb(skb); 573 + if (!mlxsw_emad_is_resp(skb)) 574 + goto free_skb; 575 + 576 + list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) { 577 + if (mlxsw_emad_get_tid(skb) == trans->tid) { 578 + mlxsw_emad_process_response(mlxsw_core, trans, skb); 579 + break; 580 + } 463 581 } 582 + 583 + free_skb: 584 + dev_kfree_skb(skb); 464 585 } 465 586 466 587 static const struct mlxsw_rx_listener mlxsw_emad_rx_listener = { ··· 567 528 568 529 static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) 569 530 { 531 + u64 tid; 570 532 int err; 571 533 572 534 /* Set the upper 32 bits of the transaction ID field to a random 573 535 * number. This allows us to discard EMADs addressed to other 574 536 * devices. 575 537 */ 576 - get_random_bytes(&mlxsw_core->emad.tid, 4); 577 - mlxsw_core->emad.tid = mlxsw_core->emad.tid << 32; 538 + get_random_bytes(&tid, 4); 539 + tid <<= 32; 540 + atomic64_set(&mlxsw_core->emad.tid, tid); 578 541 579 - init_waitqueue_head(&mlxsw_core->emad.wait); 580 - mlxsw_core->emad.trans_active = false; 581 - mutex_init(&mlxsw_core->emad.lock); 542 + INIT_LIST_HEAD(&mlxsw_core->emad.trans_list); 543 + spin_lock_init(&mlxsw_core->emad.trans_list_lock); 582 544 583 545 err = mlxsw_core_rx_listener_register(mlxsw_core, 584 546 &mlxsw_emad_rx_listener, ··· 635 595 skb_reserve(skb, emad_len); 636 596 637 597 return skb; 598 + } 599 + 600 + static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core, 601 + const struct mlxsw_reg_info *reg, 602 + char *payload, 603 + enum mlxsw_core_reg_access_type type, 604 + struct mlxsw_reg_trans *trans, 605 + struct list_head *bulk_list, 606 + mlxsw_reg_trans_cb_t *cb, 607 + unsigned long cb_priv, u64 tid) 608 + { 609 + struct sk_buff *skb; 610 + int err; 611 + 612 + dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n", 613 + trans->tid, reg->id, mlxsw_reg_id_str(reg->id), 614 + mlxsw_core_reg_access_type_str(type)); 615 + 616 + skb = mlxsw_emad_alloc(mlxsw_core, reg->len); 617 + if (!skb) 618 + return -ENOMEM; 619 + 620 + list_add_tail(&trans->bulk_list, bulk_list); 621 + trans->core = mlxsw_core; 622 + trans->tx_skb = skb; 623 + trans->tx_info.local_port = MLXSW_PORT_CPU_PORT; 624 + trans->tx_info.is_emad = true; 625 + INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work); 626 + trans->tid = tid; 627 + init_completion(&trans->completion); 628 + trans->cb = cb; 629 + trans->cb_priv = cb_priv; 630 + trans->reg = reg; 631 + trans->type = type; 632 + 633 + mlxsw_emad_construct(skb, reg, payload, type, trans->tid); 634 + mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info); 635 + 636 + spin_lock_bh(&mlxsw_core->emad.trans_list_lock); 637 + list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list); 638 + spin_unlock_bh(&mlxsw_core->emad.trans_list_lock); 639 + err = mlxsw_emad_transmit(mlxsw_core, trans); 640 + if (err) 641 + goto err_out; 642 + return 0; 643 + 644 + err_out: 645 + spin_lock_bh(&mlxsw_core->emad.trans_list_lock); 646 + list_del_rcu(&trans->list); 647 + spin_unlock_bh(&mlxsw_core->emad.trans_list_lock); 648 + list_del(&trans->bulk_list); 649 + dev_kfree_skb(trans->tx_skb); 650 + return err; 638 651 } 639 652 640 653 /***************** ··· 778 685 .read = seq_read, 779 686 .llseek = seq_lseek 780 687 }; 781 - 782 - static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core, 783 - const char *buf, size_t size) 784 - { 785 - __be32 *m = (__be32 *) buf; 786 - int i; 787 - int count = size / sizeof(__be32); 788 - 789 - for (i = count - 1; i >= 0; i--) 790 - if (m[i]) 791 - break; 792 - i++; 793 - count = i ? i : 1; 794 - for (i = 0; i < count; i += 4) 795 - dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n", 796 - i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]), 797 - be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3])); 798 - } 799 688 800 689 int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver) 801 690 { ··· 891 816 return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index); 892 817 } 893 818 819 + static int 820 + mlxsw_devlink_sb_pool_get(struct devlink *devlink, 821 + unsigned int sb_index, u16 pool_index, 822 + struct devlink_sb_pool_info *pool_info) 823 + { 824 + struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 825 + struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 826 + 827 + if (!mlxsw_driver->sb_pool_get) 828 + return -EOPNOTSUPP; 829 + return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index, 830 + pool_index, pool_info); 831 + } 832 + 833 + static int 834 + mlxsw_devlink_sb_pool_set(struct devlink *devlink, 835 + unsigned int sb_index, u16 pool_index, u32 size, 836 + enum devlink_sb_threshold_type threshold_type) 837 + { 838 + struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 839 + struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 840 + 841 + if (!mlxsw_driver->sb_pool_set) 842 + return -EOPNOTSUPP; 843 + return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index, 844 + pool_index, size, threshold_type); 845 + } 846 + 847 + static void *__dl_port(struct devlink_port *devlink_port) 848 + { 849 + return container_of(devlink_port, struct mlxsw_core_port, devlink_port); 850 + } 851 + 852 + static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port, 853 + unsigned int sb_index, u16 pool_index, 854 + u32 *p_threshold) 855 + { 856 + struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 857 + struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 858 + struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 859 + 860 + if (!mlxsw_driver->sb_port_pool_get) 861 + return -EOPNOTSUPP; 862 + return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index, 863 + pool_index, p_threshold); 864 + } 865 + 866 + static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port, 867 + unsigned int sb_index, u16 pool_index, 868 + u32 threshold) 869 + { 870 + struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 871 + struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 872 + struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 873 + 874 + if (!mlxsw_driver->sb_port_pool_set) 875 + return -EOPNOTSUPP; 876 + return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index, 877 + pool_index, threshold); 878 + } 879 + 880 + static int 881 + mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port, 882 + unsigned int sb_index, u16 tc_index, 883 + enum devlink_sb_pool_type pool_type, 884 + u16 *p_pool_index, u32 *p_threshold) 885 + { 886 + struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 887 + struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 888 + struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 889 + 890 + if (!mlxsw_driver->sb_tc_pool_bind_get) 891 + return -EOPNOTSUPP; 892 + return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index, 893 + tc_index, pool_type, 894 + p_pool_index, p_threshold); 895 + } 896 + 897 + static int 898 + mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port, 899 + unsigned int sb_index, u16 tc_index, 900 + enum devlink_sb_pool_type pool_type, 901 + u16 pool_index, u32 threshold) 902 + { 903 + struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 904 + struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 905 + struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 906 + 907 + if (!mlxsw_driver->sb_tc_pool_bind_set) 908 + return -EOPNOTSUPP; 909 + return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index, 910 + tc_index, pool_type, 911 + pool_index, threshold); 912 + } 913 + 914 + static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink, 915 + unsigned int sb_index) 916 + { 917 + struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 918 + struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 919 + 920 + if (!mlxsw_driver->sb_occ_snapshot) 921 + return -EOPNOTSUPP; 922 + return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index); 923 + } 924 + 925 + static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink, 926 + unsigned int sb_index) 927 + { 928 + struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 929 + struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 930 + 931 + if (!mlxsw_driver->sb_occ_max_clear) 932 + return -EOPNOTSUPP; 933 + return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index); 934 + } 935 + 936 + static int 937 + mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port, 938 + unsigned int sb_index, u16 pool_index, 939 + u32 *p_cur, u32 *p_max) 940 + { 941 + struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 942 + struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 943 + struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 944 + 945 + if (!mlxsw_driver->sb_occ_port_pool_get) 946 + return -EOPNOTSUPP; 947 + return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index, 948 + pool_index, p_cur, p_max); 949 + } 950 + 951 + static int 952 + mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port, 953 + unsigned int sb_index, u16 tc_index, 954 + enum devlink_sb_pool_type pool_type, 955 + u32 *p_cur, u32 *p_max) 956 + { 957 + struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 958 + struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 959 + struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 960 + 961 + if (!mlxsw_driver->sb_occ_tc_port_bind_get) 962 + return -EOPNOTSUPP; 963 + return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port, 964 + sb_index, tc_index, 965 + pool_type, p_cur, p_max); 966 + } 967 + 894 968 static const struct devlink_ops mlxsw_devlink_ops = { 895 - .port_split = mlxsw_devlink_port_split, 896 - .port_unsplit = mlxsw_devlink_port_unsplit, 969 + .port_split = mlxsw_devlink_port_split, 970 + .port_unsplit = mlxsw_devlink_port_unsplit, 971 + .sb_pool_get = mlxsw_devlink_sb_pool_get, 972 + .sb_pool_set = mlxsw_devlink_sb_pool_set, 973 + .sb_port_pool_get = mlxsw_devlink_sb_port_pool_get, 974 + .sb_port_pool_set = mlxsw_devlink_sb_port_pool_set, 975 + .sb_tc_pool_bind_get = mlxsw_devlink_sb_tc_pool_bind_get, 976 + .sb_tc_pool_bind_set = mlxsw_devlink_sb_tc_pool_bind_set, 977 + .sb_occ_snapshot = mlxsw_devlink_sb_occ_snapshot, 978 + .sb_occ_max_clear = mlxsw_devlink_sb_occ_max_clear, 979 + .sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get, 980 + .sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get, 897 981 }; 898 982 899 983 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, ··· 1336 1102 } 1337 1103 EXPORT_SYMBOL(mlxsw_core_event_listener_unregister); 1338 1104 1105 + static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core) 1106 + { 1107 + return atomic64_inc_return(&mlxsw_core->emad.tid); 1108 + } 1109 + 1339 1110 static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core, 1340 1111 const struct mlxsw_reg_info *reg, 1341 1112 char *payload, 1342 - enum mlxsw_core_reg_access_type type) 1113 + enum mlxsw_core_reg_access_type type, 1114 + struct list_head *bulk_list, 1115 + mlxsw_reg_trans_cb_t *cb, 1116 + unsigned long cb_priv) 1343 1117 { 1118 + u64 tid = mlxsw_core_tid_get(mlxsw_core); 1119 + struct mlxsw_reg_trans *trans; 1344 1120 int err; 1345 - char *op_tlv; 1346 - struct sk_buff *skb; 1347 - struct mlxsw_tx_info tx_info = { 1348 - .local_port = MLXSW_PORT_CPU_PORT, 1349 - .is_emad = true, 1350 - }; 1351 1121 1352 - skb = mlxsw_emad_alloc(mlxsw_core, reg->len); 1353 - if (!skb) 1122 + trans = kzalloc(sizeof(*trans), GFP_KERNEL); 1123 + if (!trans) 1354 1124 return -ENOMEM; 1355 1125 1356 - mlxsw_emad_construct(skb, reg, payload, type, mlxsw_core); 1357 - mlxsw_core->driver->txhdr_construct(skb, &tx_info); 1358 - 1359 - dev_dbg(mlxsw_core->bus_info->dev, "EMAD send (tid=%llx)\n", 1360 - mlxsw_core->emad.tid); 1361 - mlxsw_core_buf_dump_dbg(mlxsw_core, skb->data, skb->len); 1362 - 1363 - err = mlxsw_emad_transmit(mlxsw_core, skb, &tx_info); 1364 - if (!err) { 1365 - op_tlv = mlxsw_emad_op_tlv(mlxsw_core->emad.resp_skb); 1366 - memcpy(payload, mlxsw_emad_reg_payload(op_tlv), 1367 - reg->len); 1368 - 1369 - dev_dbg(mlxsw_core->bus_info->dev, "EMAD recv (tid=%llx)\n", 1370 - mlxsw_core->emad.tid - 1); 1371 - mlxsw_core_buf_dump_dbg(mlxsw_core, 1372 - mlxsw_core->emad.resp_skb->data, 1373 - mlxsw_core->emad.resp_skb->len); 1374 - 1375 - dev_kfree_skb(mlxsw_core->emad.resp_skb); 1126 + err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans, 1127 + bulk_list, cb, cb_priv, tid); 1128 + if (err) { 1129 + kfree(trans); 1130 + return err; 1376 1131 } 1132 + return 0; 1133 + } 1377 1134 1135 + int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core, 1136 + const struct mlxsw_reg_info *reg, char *payload, 1137 + struct list_head *bulk_list, 1138 + mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv) 1139 + { 1140 + return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload, 1141 + MLXSW_CORE_REG_ACCESS_TYPE_QUERY, 1142 + bulk_list, cb, cb_priv); 1143 + } 1144 + EXPORT_SYMBOL(mlxsw_reg_trans_query); 1145 + 1146 + int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core, 1147 + const struct mlxsw_reg_info *reg, char *payload, 1148 + struct list_head *bulk_list, 1149 + mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv) 1150 + { 1151 + return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload, 1152 + MLXSW_CORE_REG_ACCESS_TYPE_WRITE, 1153 + bulk_list, cb, cb_priv); 1154 + } 1155 + EXPORT_SYMBOL(mlxsw_reg_trans_write); 1156 + 1157 + static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans) 1158 + { 1159 + struct mlxsw_core *mlxsw_core = trans->core; 1160 + int err; 1161 + 1162 + wait_for_completion(&trans->completion); 1163 + cancel_delayed_work_sync(&trans->timeout_dw); 1164 + err = trans->err; 1165 + 1166 + if (trans->retries) 1167 + dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n", 1168 + trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid); 1169 + if (err) 1170 + dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n", 1171 + trans->tid, trans->reg->id, 1172 + mlxsw_reg_id_str(trans->reg->id), 1173 + mlxsw_core_reg_access_type_str(trans->type), 1174 + trans->emad_status, 1175 + mlxsw_emad_op_tlv_status_str(trans->emad_status)); 1176 + 1177 + list_del(&trans->bulk_list); 1178 + kfree_rcu(trans, rcu); 1378 1179 return err; 1379 1180 } 1181 + 1182 + int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list) 1183 + { 1184 + struct mlxsw_reg_trans *trans; 1185 + struct mlxsw_reg_trans *tmp; 1186 + int sum_err = 0; 1187 + int err; 1188 + 1189 + list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) { 1190 + err = mlxsw_reg_trans_wait(trans); 1191 + if (err && sum_err == 0) 1192 + sum_err = err; /* first error to be returned */ 1193 + } 1194 + return sum_err; 1195 + } 1196 + EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait); 1380 1197 1381 1198 static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core, 1382 1199 const struct mlxsw_reg_info *reg, 1383 1200 char *payload, 1384 1201 enum mlxsw_core_reg_access_type type) 1385 1202 { 1203 + enum mlxsw_emad_op_tlv_status status; 1386 1204 int err, n_retry; 1387 1205 char *in_mbox, *out_mbox, *tmp; 1206 + 1207 + dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n", 1208 + reg->id, mlxsw_reg_id_str(reg->id), 1209 + mlxsw_core_reg_access_type_str(type)); 1388 1210 1389 1211 in_mbox = mlxsw_cmd_mbox_alloc(); 1390 1212 if (!in_mbox) ··· 1452 1162 goto free_in_mbox; 1453 1163 } 1454 1164 1455 - mlxsw_emad_pack_op_tlv(in_mbox, reg, type, mlxsw_core); 1165 + mlxsw_emad_pack_op_tlv(in_mbox, reg, type, 1166 + mlxsw_core_tid_get(mlxsw_core)); 1456 1167 tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32); 1457 1168 mlxsw_emad_pack_reg_tlv(tmp, reg, payload); 1458 1169 ··· 1461 1170 retry: 1462 1171 err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox); 1463 1172 if (!err) { 1464 - err = mlxsw_emad_process_status(mlxsw_core, out_mbox); 1465 - if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY) 1466 - goto retry; 1173 + err = mlxsw_emad_process_status(out_mbox, &status); 1174 + if (err) { 1175 + if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY) 1176 + goto retry; 1177 + dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n", 1178 + status, mlxsw_emad_op_tlv_status_str(status)); 1179 + } 1467 1180 } 1468 1181 1469 1182 if (!err) 1470 1183 memcpy(payload, mlxsw_emad_reg_payload(out_mbox), 1471 1184 reg->len); 1472 1185 1473 - mlxsw_core->emad.tid++; 1474 1186 mlxsw_cmd_mbox_free(out_mbox); 1475 1187 free_in_mbox: 1476 1188 mlxsw_cmd_mbox_free(in_mbox); 1189 + if (err) 1190 + dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n", 1191 + reg->id, mlxsw_reg_id_str(reg->id), 1192 + mlxsw_core_reg_access_type_str(type)); 1477 1193 return err; 1194 + } 1195 + 1196 + static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core, 1197 + char *payload, size_t payload_len, 1198 + unsigned long cb_priv) 1199 + { 1200 + char *orig_payload = (char *) cb_priv; 1201 + 1202 + memcpy(orig_payload, payload, payload_len); 1478 1203 } 1479 1204 1480 1205 static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core, ··· 1498 1191 char *payload, 1499 1192 enum mlxsw_core_reg_access_type type) 1500 1193 { 1501 - u64 cur_tid; 1194 + LIST_HEAD(bulk_list); 1502 1195 int err; 1503 - 1504 - if (mutex_lock_interruptible(&mlxsw_core->emad.lock)) { 1505 - dev_err(mlxsw_core->bus_info->dev, "Reg access interrupted (reg_id=%x(%s),type=%s)\n", 1506 - reg->id, mlxsw_reg_id_str(reg->id), 1507 - mlxsw_core_reg_access_type_str(type)); 1508 - return -EINTR; 1509 - } 1510 - 1511 - cur_tid = mlxsw_core->emad.tid; 1512 - dev_dbg(mlxsw_core->bus_info->dev, "Reg access (tid=%llx,reg_id=%x(%s),type=%s)\n", 1513 - cur_tid, reg->id, mlxsw_reg_id_str(reg->id), 1514 - mlxsw_core_reg_access_type_str(type)); 1515 1196 1516 1197 /* During initialization EMAD interface is not available to us, 1517 1198 * so we default to command interface. We switch to EMAD interface 1518 1199 * after setting the appropriate traps. 1519 1200 */ 1520 1201 if (!mlxsw_core->emad.use_emad) 1521 - err = mlxsw_core_reg_access_cmd(mlxsw_core, reg, 1522 - payload, type); 1523 - else 1524 - err = mlxsw_core_reg_access_emad(mlxsw_core, reg, 1202 + return mlxsw_core_reg_access_cmd(mlxsw_core, reg, 1525 1203 payload, type); 1526 1204 1205 + err = mlxsw_core_reg_access_emad(mlxsw_core, reg, 1206 + payload, type, &bulk_list, 1207 + mlxsw_core_reg_access_cb, 1208 + (unsigned long) payload); 1527 1209 if (err) 1528 - dev_err(mlxsw_core->bus_info->dev, "Reg access failed (tid=%llx,reg_id=%x(%s),type=%s)\n", 1529 - cur_tid, reg->id, mlxsw_reg_id_str(reg->id), 1530 - mlxsw_core_reg_access_type_str(type)); 1531 - 1532 - mutex_unlock(&mlxsw_core->emad.lock); 1533 - return err; 1210 + return err; 1211 + return mlxsw_reg_trans_bulk_wait(&bulk_list); 1534 1212 } 1535 1213 1536 1214 int mlxsw_reg_query(struct mlxsw_core *mlxsw_core, ··· 1666 1374 } 1667 1375 EXPORT_SYMBOL(mlxsw_core_port_fini); 1668 1376 1377 + static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core, 1378 + const char *buf, size_t size) 1379 + { 1380 + __be32 *m = (__be32 *) buf; 1381 + int i; 1382 + int count = size / sizeof(__be32); 1383 + 1384 + for (i = count - 1; i >= 0; i--) 1385 + if (m[i]) 1386 + break; 1387 + i++; 1388 + count = i ? i : 1; 1389 + for (i = 0; i < count; i += 4) 1390 + dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n", 1391 + i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]), 1392 + be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3])); 1393 + } 1394 + 1669 1395 int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, 1670 1396 u32 in_mod, bool out_mbox_direct, 1671 1397 char *in_mbox, size_t in_mbox_size, ··· 1726 1416 } 1727 1417 EXPORT_SYMBOL(mlxsw_cmd_exec); 1728 1418 1419 + int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay) 1420 + { 1421 + return queue_delayed_work(mlxsw_wq, dwork, delay); 1422 + } 1423 + EXPORT_SYMBOL(mlxsw_core_schedule_dw); 1424 + 1729 1425 static int __init mlxsw_core_module_init(void) 1730 1426 { 1731 - mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL); 1732 - if (!mlxsw_core_dbg_root) 1427 + int err; 1428 + 1429 + mlxsw_wq = create_workqueue(mlxsw_core_driver_name); 1430 + if (!mlxsw_wq) 1733 1431 return -ENOMEM; 1432 + mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL); 1433 + if (!mlxsw_core_dbg_root) { 1434 + err = -ENOMEM; 1435 + goto err_debugfs_create_dir; 1436 + } 1734 1437 return 0; 1438 + 1439 + err_debugfs_create_dir: 1440 + destroy_workqueue(mlxsw_wq); 1441 + return err; 1735 1442 } 1736 1443 1737 1444 static void __exit mlxsw_core_module_exit(void) 1738 1445 { 1739 1446 debugfs_remove_recursive(mlxsw_core_dbg_root); 1447 + destroy_workqueue(mlxsw_wq); 1740 1448 } 1741 1449 1742 1450 module_init(mlxsw_core_module_init);
+56
drivers/net/ethernet/mellanox/mlxsw/core.h
··· 43 43 #include <linux/gfp.h> 44 44 #include <linux/types.h> 45 45 #include <linux/skbuff.h> 46 + #include <linux/workqueue.h> 46 47 #include <net/devlink.h> 47 48 48 49 #include "trap.h" ··· 109 108 const struct mlxsw_event_listener *el, 110 109 void *priv); 111 110 111 + typedef void mlxsw_reg_trans_cb_t(struct mlxsw_core *mlxsw_core, char *payload, 112 + size_t payload_len, unsigned long cb_priv); 113 + 114 + int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core, 115 + const struct mlxsw_reg_info *reg, char *payload, 116 + struct list_head *bulk_list, 117 + mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv); 118 + int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core, 119 + const struct mlxsw_reg_info *reg, char *payload, 120 + struct list_head *bulk_list, 121 + mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv); 122 + int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list); 123 + 112 124 int mlxsw_reg_query(struct mlxsw_core *mlxsw_core, 113 125 const struct mlxsw_reg_info *reg, char *payload); 114 126 int mlxsw_reg_write(struct mlxsw_core *mlxsw_core, ··· 151 137 struct devlink_port devlink_port; 152 138 }; 153 139 140 + static inline void * 141 + mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port) 142 + { 143 + /* mlxsw_core_port is ensured to always be the first field in driver 144 + * port structure. 145 + */ 146 + return mlxsw_core_port; 147 + } 148 + 154 149 int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, 155 150 struct mlxsw_core_port *mlxsw_core_port, u8 local_port, 156 151 struct net_device *dev, bool split, u32 split_group); 157 152 void mlxsw_core_port_fini(struct mlxsw_core_port *mlxsw_core_port); 153 + 154 + int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay); 158 155 159 156 #define MLXSW_CONFIG_PROFILE_SWID_COUNT 8 160 157 ··· 225 200 int (*port_split)(struct mlxsw_core *mlxsw_core, u8 local_port, 226 201 unsigned int count); 227 202 int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u8 local_port); 203 + int (*sb_pool_get)(struct mlxsw_core *mlxsw_core, 204 + unsigned int sb_index, u16 pool_index, 205 + struct devlink_sb_pool_info *pool_info); 206 + int (*sb_pool_set)(struct mlxsw_core *mlxsw_core, 207 + unsigned int sb_index, u16 pool_index, u32 size, 208 + enum devlink_sb_threshold_type threshold_type); 209 + int (*sb_port_pool_get)(struct mlxsw_core_port *mlxsw_core_port, 210 + unsigned int sb_index, u16 pool_index, 211 + u32 *p_threshold); 212 + int (*sb_port_pool_set)(struct mlxsw_core_port *mlxsw_core_port, 213 + unsigned int sb_index, u16 pool_index, 214 + u32 threshold); 215 + int (*sb_tc_pool_bind_get)(struct mlxsw_core_port *mlxsw_core_port, 216 + unsigned int sb_index, u16 tc_index, 217 + enum devlink_sb_pool_type pool_type, 218 + u16 *p_pool_index, u32 *p_threshold); 219 + int (*sb_tc_pool_bind_set)(struct mlxsw_core_port *mlxsw_core_port, 220 + unsigned int sb_index, u16 tc_index, 221 + enum devlink_sb_pool_type pool_type, 222 + u16 pool_index, u32 threshold); 223 + int (*sb_occ_snapshot)(struct mlxsw_core *mlxsw_core, 224 + unsigned int sb_index); 225 + int (*sb_occ_max_clear)(struct mlxsw_core *mlxsw_core, 226 + unsigned int sb_index); 227 + int (*sb_occ_port_pool_get)(struct mlxsw_core_port *mlxsw_core_port, 228 + unsigned int sb_index, u16 pool_index, 229 + u32 *p_cur, u32 *p_max); 230 + int (*sb_occ_tc_port_bind_get)(struct mlxsw_core_port *mlxsw_core_port, 231 + unsigned int sb_index, u16 tc_index, 232 + enum devlink_sb_pool_type pool_type, 233 + u32 *p_cur, u32 *p_max); 228 234 void (*txhdr_construct)(struct sk_buff *skb, 229 235 const struct mlxsw_tx_info *tx_info); 230 236 u8 txhdr_len;
+134 -1
drivers/net/ethernet/mellanox/mlxsw/reg.h
··· 3566 3566 */ 3567 3567 MLXSW_ITEM32(reg, sbcm, min_buff, 0x18, 0, 24); 3568 3568 3569 + /* shared max_buff limits for dynamic threshold for SBCM, SBPM */ 3570 + #define MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN 1 3571 + #define MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX 14 3572 + 3569 3573 /* reg_sbcm_max_buff 3570 3574 * When the pool associated to the port-pg/tclass is configured to 3571 3575 * static, Maximum buffer size for the limiter configured in cells. ··· 3636 3632 */ 3637 3633 MLXSW_ITEM32(reg, sbpm, dir, 0x00, 0, 2); 3638 3634 3635 + /* reg_sbpm_buff_occupancy 3636 + * Current buffer occupancy in cells. 3637 + * Access: RO 3638 + */ 3639 + MLXSW_ITEM32(reg, sbpm, buff_occupancy, 0x10, 0, 24); 3640 + 3641 + /* reg_sbpm_clr 3642 + * Clear Max Buffer Occupancy 3643 + * When this bit is set, max_buff_occupancy field is cleared (and a 3644 + * new max value is tracked from the time the clear was performed). 3645 + * Access: OP 3646 + */ 3647 + MLXSW_ITEM32(reg, sbpm, clr, 0x14, 31, 1); 3648 + 3649 + /* reg_sbpm_max_buff_occupancy 3650 + * Maximum value of buffer occupancy in cells monitored. Cleared by 3651 + * writing to the clr field. 3652 + * Access: RO 3653 + */ 3654 + MLXSW_ITEM32(reg, sbpm, max_buff_occupancy, 0x14, 0, 24); 3655 + 3639 3656 /* reg_sbpm_min_buff 3640 3657 * Minimum buffer size for the limiter, in cells. 3641 3658 * Access: RW ··· 3677 3652 MLXSW_ITEM32(reg, sbpm, max_buff, 0x1C, 0, 24); 3678 3653 3679 3654 static inline void mlxsw_reg_sbpm_pack(char *payload, u8 local_port, u8 pool, 3680 - enum mlxsw_reg_sbxx_dir dir, 3655 + enum mlxsw_reg_sbxx_dir dir, bool clr, 3681 3656 u32 min_buff, u32 max_buff) 3682 3657 { 3683 3658 MLXSW_REG_ZERO(sbpm, payload); 3684 3659 mlxsw_reg_sbpm_local_port_set(payload, local_port); 3685 3660 mlxsw_reg_sbpm_pool_set(payload, pool); 3686 3661 mlxsw_reg_sbpm_dir_set(payload, dir); 3662 + mlxsw_reg_sbpm_clr_set(payload, clr); 3687 3663 mlxsw_reg_sbpm_min_buff_set(payload, min_buff); 3688 3664 mlxsw_reg_sbpm_max_buff_set(payload, max_buff); 3665 + } 3666 + 3667 + static inline void mlxsw_reg_sbpm_unpack(char *payload, u32 *p_buff_occupancy, 3668 + u32 *p_max_buff_occupancy) 3669 + { 3670 + *p_buff_occupancy = mlxsw_reg_sbpm_buff_occupancy_get(payload); 3671 + *p_max_buff_occupancy = mlxsw_reg_sbpm_max_buff_occupancy_get(payload); 3689 3672 } 3690 3673 3691 3674 /* SBMM - Shared Buffer Multicast Management Register ··· 3749 3716 mlxsw_reg_sbmm_min_buff_set(payload, min_buff); 3750 3717 mlxsw_reg_sbmm_max_buff_set(payload, max_buff); 3751 3718 mlxsw_reg_sbmm_pool_set(payload, pool); 3719 + } 3720 + 3721 + /* SBSR - Shared Buffer Status Register 3722 + * ------------------------------------ 3723 + * The SBSR register retrieves the shared buffer occupancy according to 3724 + * Port-Pool. Note that this register enables reading a large amount of data. 3725 + * It is the user's responsibility to limit the amount of data to ensure the 3726 + * response can match the maximum transfer unit. In case the response exceeds 3727 + * the maximum transport unit, it will be truncated with no special notice. 3728 + */ 3729 + #define MLXSW_REG_SBSR_ID 0xB005 3730 + #define MLXSW_REG_SBSR_BASE_LEN 0x5C /* base length, without records */ 3731 + #define MLXSW_REG_SBSR_REC_LEN 0x8 /* record length */ 3732 + #define MLXSW_REG_SBSR_REC_MAX_COUNT 120 3733 + #define MLXSW_REG_SBSR_LEN (MLXSW_REG_SBSR_BASE_LEN + \ 3734 + MLXSW_REG_SBSR_REC_LEN * \ 3735 + MLXSW_REG_SBSR_REC_MAX_COUNT) 3736 + 3737 + static const struct mlxsw_reg_info mlxsw_reg_sbsr = { 3738 + .id = MLXSW_REG_SBSR_ID, 3739 + .len = MLXSW_REG_SBSR_LEN, 3740 + }; 3741 + 3742 + /* reg_sbsr_clr 3743 + * Clear Max Buffer Occupancy. When this bit is set, the max_buff_occupancy 3744 + * field is cleared (and a new max value is tracked from the time the clear 3745 + * was performed). 3746 + * Access: OP 3747 + */ 3748 + MLXSW_ITEM32(reg, sbsr, clr, 0x00, 31, 1); 3749 + 3750 + /* reg_sbsr_ingress_port_mask 3751 + * Bit vector for all ingress network ports. 3752 + * Indicates which of the ports (for which the relevant bit is set) 3753 + * are affected by the set operation. Configuration of any other port 3754 + * does not change. 3755 + * Access: Index 3756 + */ 3757 + MLXSW_ITEM_BIT_ARRAY(reg, sbsr, ingress_port_mask, 0x10, 0x20, 1); 3758 + 3759 + /* reg_sbsr_pg_buff_mask 3760 + * Bit vector for all switch priority groups. 3761 + * Indicates which of the priorities (for which the relevant bit is set) 3762 + * are affected by the set operation. Configuration of any other priority 3763 + * does not change. 3764 + * Range is 0..cap_max_pg_buffers - 1 3765 + * Access: Index 3766 + */ 3767 + MLXSW_ITEM_BIT_ARRAY(reg, sbsr, pg_buff_mask, 0x30, 0x4, 1); 3768 + 3769 + /* reg_sbsr_egress_port_mask 3770 + * Bit vector for all egress network ports. 3771 + * Indicates which of the ports (for which the relevant bit is set) 3772 + * are affected by the set operation. Configuration of any other port 3773 + * does not change. 3774 + * Access: Index 3775 + */ 3776 + MLXSW_ITEM_BIT_ARRAY(reg, sbsr, egress_port_mask, 0x34, 0x20, 1); 3777 + 3778 + /* reg_sbsr_tclass_mask 3779 + * Bit vector for all traffic classes. 3780 + * Indicates which of the traffic classes (for which the relevant bit is 3781 + * set) are affected by the set operation. Configuration of any other 3782 + * traffic class does not change. 3783 + * Range is 0..cap_max_tclass - 1 3784 + * Access: Index 3785 + */ 3786 + MLXSW_ITEM_BIT_ARRAY(reg, sbsr, tclass_mask, 0x54, 0x8, 1); 3787 + 3788 + static inline void mlxsw_reg_sbsr_pack(char *payload, bool clr) 3789 + { 3790 + MLXSW_REG_ZERO(sbsr, payload); 3791 + mlxsw_reg_sbsr_clr_set(payload, clr); 3792 + } 3793 + 3794 + /* reg_sbsr_rec_buff_occupancy 3795 + * Current buffer occupancy in cells. 3796 + * Access: RO 3797 + */ 3798 + MLXSW_ITEM32_INDEXED(reg, sbsr, rec_buff_occupancy, MLXSW_REG_SBSR_BASE_LEN, 3799 + 0, 24, MLXSW_REG_SBSR_REC_LEN, 0x00, false); 3800 + 3801 + /* reg_sbsr_rec_max_buff_occupancy 3802 + * Maximum value of buffer occupancy in cells monitored. Cleared by 3803 + * writing to the clr field. 3804 + * Access: RO 3805 + */ 3806 + MLXSW_ITEM32_INDEXED(reg, sbsr, rec_max_buff_occupancy, MLXSW_REG_SBSR_BASE_LEN, 3807 + 0, 24, MLXSW_REG_SBSR_REC_LEN, 0x04, false); 3808 + 3809 + static inline void mlxsw_reg_sbsr_rec_unpack(char *payload, int rec_index, 3810 + u32 *p_buff_occupancy, 3811 + u32 *p_max_buff_occupancy) 3812 + { 3813 + *p_buff_occupancy = 3814 + mlxsw_reg_sbsr_rec_buff_occupancy_get(payload, rec_index); 3815 + *p_max_buff_occupancy = 3816 + mlxsw_reg_sbsr_rec_max_buff_occupancy_get(payload, rec_index); 3752 3817 } 3753 3818 3754 3819 static inline const char *mlxsw_reg_id_str(u16 reg_id) ··· 3944 3813 return "SBPM"; 3945 3814 case MLXSW_REG_SBMM_ID: 3946 3815 return "SBMM"; 3816 + case MLXSW_REG_SBSR_ID: 3817 + return "SBSR"; 3947 3818 default: 3948 3819 return "*UNKNOWN*"; 3949 3820 }
+22 -10
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
··· 2434 2434 2435 2435 err_switchdev_init: 2436 2436 err_lag_init: 2437 + mlxsw_sp_buffers_fini(mlxsw_sp); 2437 2438 err_buffers_init: 2438 2439 err_flood_init: 2439 2440 mlxsw_sp_traps_fini(mlxsw_sp); ··· 2449 2448 { 2450 2449 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2451 2450 2451 + mlxsw_sp_buffers_fini(mlxsw_sp); 2452 2452 mlxsw_sp_switchdev_fini(mlxsw_sp); 2453 2453 mlxsw_sp_traps_fini(mlxsw_sp); 2454 2454 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); ··· 2493 2491 }; 2494 2492 2495 2493 static struct mlxsw_driver mlxsw_sp_driver = { 2496 - .kind = MLXSW_DEVICE_KIND_SPECTRUM, 2497 - .owner = THIS_MODULE, 2498 - .priv_size = sizeof(struct mlxsw_sp), 2499 - .init = mlxsw_sp_init, 2500 - .fini = mlxsw_sp_fini, 2501 - .port_split = mlxsw_sp_port_split, 2502 - .port_unsplit = mlxsw_sp_port_unsplit, 2503 - .txhdr_construct = mlxsw_sp_txhdr_construct, 2504 - .txhdr_len = MLXSW_TXHDR_LEN, 2505 - .profile = &mlxsw_sp_config_profile, 2494 + .kind = MLXSW_DEVICE_KIND_SPECTRUM, 2495 + .owner = THIS_MODULE, 2496 + .priv_size = sizeof(struct mlxsw_sp), 2497 + .init = mlxsw_sp_init, 2498 + .fini = mlxsw_sp_fini, 2499 + .port_split = mlxsw_sp_port_split, 2500 + .port_unsplit = mlxsw_sp_port_unsplit, 2501 + .sb_pool_get = mlxsw_sp_sb_pool_get, 2502 + .sb_pool_set = mlxsw_sp_sb_pool_set, 2503 + .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 2504 + .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 2505 + .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 2506 + .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 2507 + .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 2508 + .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 2509 + .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 2510 + .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 2511 + .txhdr_construct = mlxsw_sp_txhdr_construct, 2512 + .txhdr_len = MLXSW_TXHDR_LEN, 2513 + .profile = &mlxsw_sp_config_profile, 2506 2514 }; 2507 2515 2508 2516 static int
+68
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
··· 65 65 #define MLXSW_SP_BYTES_PER_CELL 96 66 66 67 67 #define MLXSW_SP_BYTES_TO_CELLS(b) DIV_ROUND_UP(b, MLXSW_SP_BYTES_PER_CELL) 68 + #define MLXSW_SP_CELLS_TO_BYTES(c) (c * MLXSW_SP_BYTES_PER_CELL) 68 69 69 70 /* Maximum delay buffer needed in case of PAUSE frames, in cells. 70 71 * Assumes 100m cable and maximum MTU. ··· 118 117 return fid >= MLXSW_SP_VFID_BASE; 119 118 } 120 119 120 + struct mlxsw_sp_sb_pr { 121 + enum mlxsw_reg_sbpr_mode mode; 122 + u32 size; 123 + }; 124 + 125 + struct mlxsw_cp_sb_occ { 126 + u32 cur; 127 + u32 max; 128 + }; 129 + 130 + struct mlxsw_sp_sb_cm { 131 + u32 min_buff; 132 + u32 max_buff; 133 + u8 pool; 134 + struct mlxsw_cp_sb_occ occ; 135 + }; 136 + 137 + struct mlxsw_sp_sb_pm { 138 + u32 min_buff; 139 + u32 max_buff; 140 + struct mlxsw_cp_sb_occ occ; 141 + }; 142 + 143 + #define MLXSW_SP_SB_POOL_COUNT 4 144 + #define MLXSW_SP_SB_TC_COUNT 8 145 + 146 + struct mlxsw_sp_sb { 147 + struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT]; 148 + struct { 149 + struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT]; 150 + struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT]; 151 + } ports[MLXSW_PORT_MAX_PORTS]; 152 + }; 153 + 121 154 struct mlxsw_sp { 122 155 struct { 123 156 struct list_head list; ··· 182 147 struct mlxsw_sp_upper master_bridge; 183 148 struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX]; 184 149 u8 port_to_module[MLXSW_PORT_MAX_PORTS]; 150 + struct mlxsw_sp_sb sb; 185 151 }; 186 152 187 153 static inline struct mlxsw_sp_upper * ··· 313 277 }; 314 278 315 279 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp); 280 + void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp); 316 281 int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port); 282 + int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core, 283 + unsigned int sb_index, u16 pool_index, 284 + struct devlink_sb_pool_info *pool_info); 285 + int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core, 286 + unsigned int sb_index, u16 pool_index, u32 size, 287 + enum devlink_sb_threshold_type threshold_type); 288 + int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port, 289 + unsigned int sb_index, u16 pool_index, 290 + u32 *p_threshold); 291 + int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port, 292 + unsigned int sb_index, u16 pool_index, 293 + u32 threshold); 294 + int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port, 295 + unsigned int sb_index, u16 tc_index, 296 + enum devlink_sb_pool_type pool_type, 297 + u16 *p_pool_index, u32 *p_threshold); 298 + int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port, 299 + unsigned int sb_index, u16 tc_index, 300 + enum devlink_sb_pool_type pool_type, 301 + u16 pool_index, u32 threshold); 302 + int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core, 303 + unsigned int sb_index); 304 + int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core, 305 + unsigned int sb_index); 306 + int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port, 307 + unsigned int sb_index, u16 pool_index, 308 + u32 *p_cur, u32 *p_max); 309 + int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port, 310 + unsigned int sb_index, u16 tc_index, 311 + enum devlink_sb_pool_type pool_type, 312 + u32 *p_cur, u32 *p_max); 317 313 318 314 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp); 319 315 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp);
+748 -228
drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
··· 36 36 #include <linux/types.h> 37 37 #include <linux/dcbnl.h> 38 38 #include <linux/if_ether.h> 39 + #include <linux/list.h> 39 40 40 41 #include "spectrum.h" 41 42 #include "core.h" 42 43 #include "port.h" 43 44 #include "reg.h" 44 45 45 - struct mlxsw_sp_pb { 46 - u8 index; 47 - u16 size; 48 - }; 46 + static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp, 47 + u8 pool, 48 + enum mlxsw_reg_sbxx_dir dir) 49 + { 50 + return &mlxsw_sp->sb.prs[dir][pool]; 51 + } 49 52 50 - #define MLXSW_SP_PB(_index, _size) \ 51 - { \ 52 - .index = _index, \ 53 - .size = _size, \ 53 + static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp, 54 + u8 local_port, u8 pg_buff, 55 + enum mlxsw_reg_sbxx_dir dir) 56 + { 57 + return &mlxsw_sp->sb.ports[local_port].cms[dir][pg_buff]; 58 + } 59 + 60 + static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp, 61 + u8 local_port, u8 pool, 62 + enum mlxsw_reg_sbxx_dir dir) 63 + { 64 + return &mlxsw_sp->sb.ports[local_port].pms[dir][pool]; 65 + } 66 + 67 + static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u8 pool, 68 + enum mlxsw_reg_sbxx_dir dir, 69 + enum mlxsw_reg_sbpr_mode mode, u32 size) 70 + { 71 + char sbpr_pl[MLXSW_REG_SBPR_LEN]; 72 + struct mlxsw_sp_sb_pr *pr; 73 + int err; 74 + 75 + mlxsw_reg_sbpr_pack(sbpr_pl, pool, dir, mode, size); 76 + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl); 77 + if (err) 78 + return err; 79 + 80 + pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir); 81 + pr->mode = mode; 82 + pr->size = size; 83 + return 0; 84 + } 85 + 86 + static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port, 87 + u8 pg_buff, enum mlxsw_reg_sbxx_dir dir, 88 + u32 min_buff, u32 max_buff, u8 pool) 89 + { 90 + char sbcm_pl[MLXSW_REG_SBCM_LEN]; 91 + int err; 92 + 93 + mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, dir, 94 + min_buff, max_buff, pool); 95 + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl); 96 + if (err) 97 + return err; 98 + if (pg_buff < MLXSW_SP_SB_TC_COUNT) { 99 + struct mlxsw_sp_sb_cm *cm; 100 + 101 + cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff, dir); 102 + cm->min_buff = min_buff; 103 + cm->max_buff = max_buff; 104 + cm->pool = pool; 54 105 } 106 + return 0; 107 + } 55 108 56 - static const struct mlxsw_sp_pb mlxsw_sp_pbs[] = { 57 - MLXSW_SP_PB(0, 2 * MLXSW_SP_BYTES_TO_CELLS(ETH_FRAME_LEN)), 58 - MLXSW_SP_PB(1, 0), 59 - MLXSW_SP_PB(2, 0), 60 - MLXSW_SP_PB(3, 0), 61 - MLXSW_SP_PB(4, 0), 62 - MLXSW_SP_PB(5, 0), 63 - MLXSW_SP_PB(6, 0), 64 - MLXSW_SP_PB(7, 0), 65 - MLXSW_SP_PB(9, 2 * MLXSW_SP_BYTES_TO_CELLS(MLXSW_PORT_MAX_MTU)), 109 + static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port, 110 + u8 pool, enum mlxsw_reg_sbxx_dir dir, 111 + u32 min_buff, u32 max_buff) 112 + { 113 + char sbpm_pl[MLXSW_REG_SBPM_LEN]; 114 + struct mlxsw_sp_sb_pm *pm; 115 + int err; 116 + 117 + mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false, 118 + min_buff, max_buff); 119 + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl); 120 + if (err) 121 + return err; 122 + 123 + pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir); 124 + pm->min_buff = min_buff; 125 + pm->max_buff = max_buff; 126 + return 0; 127 + } 128 + 129 + static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port, 130 + u8 pool, enum mlxsw_reg_sbxx_dir dir, 131 + struct list_head *bulk_list) 132 + { 133 + char sbpm_pl[MLXSW_REG_SBPM_LEN]; 134 + 135 + mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, true, 0, 0); 136 + return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl, 137 + bulk_list, NULL, 0); 138 + } 139 + 140 + static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core, 141 + char *sbpm_pl, size_t sbpm_pl_len, 142 + unsigned long cb_priv) 143 + { 144 + struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv; 145 + 146 + mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max); 147 + } 148 + 149 + static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port, 150 + u8 pool, enum mlxsw_reg_sbxx_dir dir, 151 + struct list_head *bulk_list) 152 + { 153 + char sbpm_pl[MLXSW_REG_SBPM_LEN]; 154 + struct mlxsw_sp_sb_pm *pm; 155 + 156 + pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir); 157 + mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false, 0, 0); 158 + return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl, 159 + bulk_list, 160 + mlxsw_sp_sb_pm_occ_query_cb, 161 + (unsigned long) pm); 162 + } 163 + 164 + static const u16 mlxsw_sp_pbs[] = { 165 + 2 * MLXSW_SP_BYTES_TO_CELLS(ETH_FRAME_LEN), 166 + 0, 167 + 0, 168 + 0, 169 + 0, 170 + 0, 171 + 0, 172 + 0, 173 + 0, /* Unused */ 174 + 2 * MLXSW_SP_BYTES_TO_CELLS(MLXSW_PORT_MAX_MTU), 66 175 }; 67 176 68 177 #define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs) ··· 184 75 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 185 76 0xffff, 0xffff / 2); 186 77 for (i = 0; i < MLXSW_SP_PBS_LEN; i++) { 187 - const struct mlxsw_sp_pb *pb; 188 - 189 - pb = &mlxsw_sp_pbs[i]; 190 - mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pb->index, pb->size); 78 + if (i == 8) 79 + continue; 80 + mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, mlxsw_sp_pbs[i]); 191 81 } 192 82 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, 193 83 MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0); ··· 216 108 return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port); 217 109 } 218 110 219 - struct mlxsw_sp_sb_pool { 220 - u8 pool; 221 - enum mlxsw_reg_sbxx_dir dir; 222 - enum mlxsw_reg_sbpr_mode mode; 223 - u32 size; 224 - }; 225 - 226 - #define MLXSW_SP_SB_POOL_INGRESS_SIZE \ 111 + #define MLXSW_SP_SB_PR_INGRESS_SIZE \ 227 112 (15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS)) 228 - #define MLXSW_SP_SB_POOL_EGRESS_SIZE \ 113 + #define MLXSW_SP_SB_PR_INGRESS_MNG_SIZE (200 * 1000) 114 + #define MLXSW_SP_SB_PR_EGRESS_SIZE \ 229 115 (14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS)) 230 116 231 - #define MLXSW_SP_SB_POOL(_pool, _dir, _mode, _size) \ 232 - { \ 233 - .pool = _pool, \ 234 - .dir = _dir, \ 235 - .mode = _mode, \ 236 - .size = _size, \ 117 + #define MLXSW_SP_SB_PR(_mode, _size) \ 118 + { \ 119 + .mode = _mode, \ 120 + .size = _size, \ 237 121 } 238 122 239 - #define MLXSW_SP_SB_POOL_INGRESS(_pool, _size) \ 240 - MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBXX_DIR_INGRESS, \ 241 - MLXSW_REG_SBPR_MODE_DYNAMIC, _size) 242 - 243 - #define MLXSW_SP_SB_POOL_EGRESS(_pool, _size) \ 244 - MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBXX_DIR_EGRESS, \ 245 - MLXSW_REG_SBPR_MODE_DYNAMIC, _size) 246 - 247 - static const struct mlxsw_sp_sb_pool mlxsw_sp_sb_pools[] = { 248 - MLXSW_SP_SB_POOL_INGRESS(0, MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_POOL_INGRESS_SIZE)), 249 - MLXSW_SP_SB_POOL_INGRESS(1, 0), 250 - MLXSW_SP_SB_POOL_INGRESS(2, 0), 251 - MLXSW_SP_SB_POOL_INGRESS(3, 0), 252 - MLXSW_SP_SB_POOL_EGRESS(0, MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_POOL_EGRESS_SIZE)), 253 - MLXSW_SP_SB_POOL_EGRESS(1, 0), 254 - MLXSW_SP_SB_POOL_EGRESS(2, 0), 255 - MLXSW_SP_SB_POOL_EGRESS(2, MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_POOL_EGRESS_SIZE)), 123 + static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_ingress[] = { 124 + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 125 + MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_INGRESS_SIZE)), 126 + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), 127 + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), 128 + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 129 + MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_INGRESS_MNG_SIZE)), 256 130 }; 257 131 258 - #define MLXSW_SP_SB_POOLS_LEN ARRAY_SIZE(mlxsw_sp_sb_pools) 132 + #define MLXSW_SP_SB_PRS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_ingress) 259 133 260 - static int mlxsw_sp_sb_pools_init(struct mlxsw_sp *mlxsw_sp) 134 + static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_egress[] = { 135 + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 136 + MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_EGRESS_SIZE)), 137 + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), 138 + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), 139 + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), 140 + }; 141 + 142 + #define MLXSW_SP_SB_PRS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_egress) 143 + 144 + static int __mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp, 145 + enum mlxsw_reg_sbxx_dir dir, 146 + const struct mlxsw_sp_sb_pr *prs, 147 + size_t prs_len) 261 148 { 262 - char sbpr_pl[MLXSW_REG_SBPR_LEN]; 263 149 int i; 264 150 int err; 265 151 266 - for (i = 0; i < MLXSW_SP_SB_POOLS_LEN; i++) { 267 - const struct mlxsw_sp_sb_pool *pool; 152 + for (i = 0; i < prs_len; i++) { 153 + const struct mlxsw_sp_sb_pr *pr; 268 154 269 - pool = &mlxsw_sp_sb_pools[i]; 270 - mlxsw_reg_sbpr_pack(sbpr_pl, pool->pool, pool->dir, 271 - pool->mode, pool->size); 272 - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl); 155 + pr = &prs[i]; 156 + err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, dir, 157 + pr->mode, pr->size); 273 158 if (err) 274 159 return err; 275 160 } 276 161 return 0; 277 162 } 278 163 279 - struct mlxsw_sp_sb_cm { 280 - union { 281 - u8 pg; 282 - u8 tc; 283 - } u; 284 - enum mlxsw_reg_sbxx_dir dir; 285 - u32 min_buff; 286 - u32 max_buff; 287 - u8 pool; 288 - }; 164 + static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp) 165 + { 166 + int err; 289 167 290 - #define MLXSW_SP_SB_CM(_pg_tc, _dir, _min_buff, _max_buff, _pool) \ 291 - { \ 292 - .u.pg = _pg_tc, \ 293 - .dir = _dir, \ 294 - .min_buff = _min_buff, \ 295 - .max_buff = _max_buff, \ 296 - .pool = _pool, \ 168 + err = __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_INGRESS, 169 + mlxsw_sp_sb_prs_ingress, 170 + MLXSW_SP_SB_PRS_INGRESS_LEN); 171 + if (err) 172 + return err; 173 + return __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_EGRESS, 174 + mlxsw_sp_sb_prs_egress, 175 + MLXSW_SP_SB_PRS_EGRESS_LEN); 176 + } 177 + 178 + #define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool) \ 179 + { \ 180 + .min_buff = _min_buff, \ 181 + .max_buff = _max_buff, \ 182 + .pool = _pool, \ 297 183 } 298 184 299 - #define MLXSW_SP_SB_CM_INGRESS(_pg, _min_buff, _max_buff) \ 300 - MLXSW_SP_SB_CM(_pg, MLXSW_REG_SBXX_DIR_INGRESS, \ 301 - _min_buff, _max_buff, 0) 302 - 303 - #define MLXSW_SP_SB_CM_EGRESS(_tc, _min_buff, _max_buff) \ 304 - MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBXX_DIR_EGRESS, \ 305 - _min_buff, _max_buff, 0) 306 - 307 - #define MLXSW_SP_CPU_PORT_SB_CM_EGRESS(_tc) \ 308 - MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBXX_DIR_EGRESS, 104, 2, 3) 309 - 310 - static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms[] = { 311 - MLXSW_SP_SB_CM_INGRESS(0, MLXSW_SP_BYTES_TO_CELLS(10000), 8), 312 - MLXSW_SP_SB_CM_INGRESS(1, 0, 0), 313 - MLXSW_SP_SB_CM_INGRESS(2, 0, 0), 314 - MLXSW_SP_SB_CM_INGRESS(3, 0, 0), 315 - MLXSW_SP_SB_CM_INGRESS(4, 0, 0), 316 - MLXSW_SP_SB_CM_INGRESS(5, 0, 0), 317 - MLXSW_SP_SB_CM_INGRESS(6, 0, 0), 318 - MLXSW_SP_SB_CM_INGRESS(7, 0, 0), 319 - MLXSW_SP_SB_CM_INGRESS(9, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff), 320 - MLXSW_SP_SB_CM_EGRESS(0, MLXSW_SP_BYTES_TO_CELLS(1500), 9), 321 - MLXSW_SP_SB_CM_EGRESS(1, MLXSW_SP_BYTES_TO_CELLS(1500), 9), 322 - MLXSW_SP_SB_CM_EGRESS(2, MLXSW_SP_BYTES_TO_CELLS(1500), 9), 323 - MLXSW_SP_SB_CM_EGRESS(3, MLXSW_SP_BYTES_TO_CELLS(1500), 9), 324 - MLXSW_SP_SB_CM_EGRESS(4, MLXSW_SP_BYTES_TO_CELLS(1500), 9), 325 - MLXSW_SP_SB_CM_EGRESS(5, MLXSW_SP_BYTES_TO_CELLS(1500), 9), 326 - MLXSW_SP_SB_CM_EGRESS(6, MLXSW_SP_BYTES_TO_CELLS(1500), 9), 327 - MLXSW_SP_SB_CM_EGRESS(7, MLXSW_SP_BYTES_TO_CELLS(1500), 9), 328 - MLXSW_SP_SB_CM_EGRESS(8, 0, 0), 329 - MLXSW_SP_SB_CM_EGRESS(9, 0, 0), 330 - MLXSW_SP_SB_CM_EGRESS(10, 0, 0), 331 - MLXSW_SP_SB_CM_EGRESS(11, 0, 0), 332 - MLXSW_SP_SB_CM_EGRESS(12, 0, 0), 333 - MLXSW_SP_SB_CM_EGRESS(13, 0, 0), 334 - MLXSW_SP_SB_CM_EGRESS(14, 0, 0), 335 - MLXSW_SP_SB_CM_EGRESS(15, 0, 0), 336 - MLXSW_SP_SB_CM_EGRESS(16, 1, 0xff), 185 + static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = { 186 + MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 8, 0), 187 + MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), 188 + MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), 189 + MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), 190 + MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), 191 + MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), 192 + MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), 193 + MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), 194 + MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */ 195 + MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(20000), 1, 3), 337 196 }; 338 197 339 - #define MLXSW_SP_SB_CMS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms) 198 + #define MLXSW_SP_SB_CMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_ingress) 199 + 200 + static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = { 201 + MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), 202 + MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), 203 + MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), 204 + MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), 205 + MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), 206 + MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), 207 + MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), 208 + MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), 209 + MLXSW_SP_SB_CM(0, 0, 0), 210 + MLXSW_SP_SB_CM(0, 0, 0), 211 + MLXSW_SP_SB_CM(0, 0, 0), 212 + MLXSW_SP_SB_CM(0, 0, 0), 213 + MLXSW_SP_SB_CM(0, 0, 0), 214 + MLXSW_SP_SB_CM(0, 0, 0), 215 + MLXSW_SP_SB_CM(0, 0, 0), 216 + MLXSW_SP_SB_CM(0, 0, 0), 217 + MLXSW_SP_SB_CM(1, 0xff, 0), 218 + }; 219 + 220 + #define MLXSW_SP_SB_CMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_egress) 221 + 222 + #define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 0) 340 223 341 224 static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = { 342 - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(0), 343 - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(1), 344 - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(2), 345 - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(3), 346 - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(4), 347 - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(5), 348 - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(6), 349 - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(7), 350 - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(8), 351 - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(9), 352 - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(10), 353 - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(11), 354 - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(12), 355 - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(13), 356 - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(14), 357 - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(15), 358 - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(16), 359 - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(17), 360 - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(18), 361 - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(19), 362 - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(20), 363 - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(21), 364 - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(22), 365 - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(23), 366 - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(24), 367 - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(25), 368 - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(26), 369 - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(27), 370 - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(28), 371 - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(29), 372 - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(30), 373 - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(31), 225 + MLXSW_SP_CPU_PORT_SB_CM, 226 + MLXSW_SP_CPU_PORT_SB_CM, 227 + MLXSW_SP_CPU_PORT_SB_CM, 228 + MLXSW_SP_CPU_PORT_SB_CM, 229 + MLXSW_SP_CPU_PORT_SB_CM, 230 + MLXSW_SP_CPU_PORT_SB_CM, 231 + MLXSW_SP_CPU_PORT_SB_CM, 232 + MLXSW_SP_CPU_PORT_SB_CM, 233 + MLXSW_SP_CPU_PORT_SB_CM, 234 + MLXSW_SP_CPU_PORT_SB_CM, 235 + MLXSW_SP_CPU_PORT_SB_CM, 236 + MLXSW_SP_CPU_PORT_SB_CM, 237 + MLXSW_SP_CPU_PORT_SB_CM, 238 + MLXSW_SP_CPU_PORT_SB_CM, 239 + MLXSW_SP_CPU_PORT_SB_CM, 240 + MLXSW_SP_CPU_PORT_SB_CM, 241 + MLXSW_SP_CPU_PORT_SB_CM, 242 + MLXSW_SP_CPU_PORT_SB_CM, 243 + MLXSW_SP_CPU_PORT_SB_CM, 244 + MLXSW_SP_CPU_PORT_SB_CM, 245 + MLXSW_SP_CPU_PORT_SB_CM, 246 + MLXSW_SP_CPU_PORT_SB_CM, 247 + MLXSW_SP_CPU_PORT_SB_CM, 248 + MLXSW_SP_CPU_PORT_SB_CM, 249 + MLXSW_SP_CPU_PORT_SB_CM, 250 + MLXSW_SP_CPU_PORT_SB_CM, 251 + MLXSW_SP_CPU_PORT_SB_CM, 252 + MLXSW_SP_CPU_PORT_SB_CM, 253 + MLXSW_SP_CPU_PORT_SB_CM, 254 + MLXSW_SP_CPU_PORT_SB_CM, 255 + MLXSW_SP_CPU_PORT_SB_CM, 256 + MLXSW_SP_CPU_PORT_SB_CM, 374 257 }; 375 258 376 259 #define MLXSW_SP_CPU_PORT_SB_MCS_LEN \ 377 260 ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms) 378 261 379 - static int mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, 380 - const struct mlxsw_sp_sb_cm *cms, 381 - size_t cms_len) 262 + static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, 263 + enum mlxsw_reg_sbxx_dir dir, 264 + const struct mlxsw_sp_sb_cm *cms, 265 + size_t cms_len) 382 266 { 383 - char sbcm_pl[MLXSW_REG_SBCM_LEN]; 384 267 int i; 385 268 int err; 386 269 387 270 for (i = 0; i < cms_len; i++) { 388 271 const struct mlxsw_sp_sb_cm *cm; 389 272 273 + if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS) 274 + continue; /* PG number 8 does not exist, skip it */ 390 275 cm = &cms[i]; 391 - mlxsw_reg_sbcm_pack(sbcm_pl, local_port, cm->u.pg, cm->dir, 392 - cm->min_buff, cm->max_buff, cm->pool); 393 - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl); 276 + err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i, dir, 277 + cm->min_buff, cm->max_buff, 278 + cm->pool); 394 279 if (err) 395 280 return err; 396 281 } ··· 392 291 393 292 static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port) 394 293 { 395 - return mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp, 396 - mlxsw_sp_port->local_port, mlxsw_sp_sb_cms, 397 - MLXSW_SP_SB_CMS_LEN); 294 + int err; 295 + 296 + err = __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp, 297 + mlxsw_sp_port->local_port, 298 + MLXSW_REG_SBXX_DIR_INGRESS, 299 + mlxsw_sp_sb_cms_ingress, 300 + MLXSW_SP_SB_CMS_INGRESS_LEN); 301 + if (err) 302 + return err; 303 + return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp, 304 + mlxsw_sp_port->local_port, 305 + MLXSW_REG_SBXX_DIR_EGRESS, 306 + mlxsw_sp_sb_cms_egress, 307 + MLXSW_SP_SB_CMS_EGRESS_LEN); 398 308 } 399 309 400 310 static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp) 401 311 { 402 - return mlxsw_sp_sb_cms_init(mlxsw_sp, 0, mlxsw_sp_cpu_port_sb_cms, 403 - MLXSW_SP_CPU_PORT_SB_MCS_LEN); 312 + return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS, 313 + mlxsw_sp_cpu_port_sb_cms, 314 + MLXSW_SP_CPU_PORT_SB_MCS_LEN); 404 315 } 405 316 406 - struct mlxsw_sp_sb_pm { 407 - u8 pool; 408 - enum mlxsw_reg_sbxx_dir dir; 409 - u32 min_buff; 410 - u32 max_buff; 411 - }; 412 - 413 - #define MLXSW_SP_SB_PM(_pool, _dir, _min_buff, _max_buff) \ 414 - { \ 415 - .pool = _pool, \ 416 - .dir = _dir, \ 417 - .min_buff = _min_buff, \ 418 - .max_buff = _max_buff, \ 317 + #define MLXSW_SP_SB_PM(_min_buff, _max_buff) \ 318 + { \ 319 + .min_buff = _min_buff, \ 320 + .max_buff = _max_buff, \ 419 321 } 420 322 421 - #define MLXSW_SP_SB_PM_INGRESS(_pool, _min_buff, _max_buff) \ 422 - MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBXX_DIR_INGRESS, \ 423 - _min_buff, _max_buff) 424 - 425 - #define MLXSW_SP_SB_PM_EGRESS(_pool, _min_buff, _max_buff) \ 426 - MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBXX_DIR_EGRESS, \ 427 - _min_buff, _max_buff) 428 - 429 - static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms[] = { 430 - MLXSW_SP_SB_PM_INGRESS(0, 0, 0xff), 431 - MLXSW_SP_SB_PM_INGRESS(1, 0, 0), 432 - MLXSW_SP_SB_PM_INGRESS(2, 0, 0), 433 - MLXSW_SP_SB_PM_INGRESS(3, 0, 0), 434 - MLXSW_SP_SB_PM_EGRESS(0, 0, 7), 435 - MLXSW_SP_SB_PM_EGRESS(1, 0, 0), 436 - MLXSW_SP_SB_PM_EGRESS(2, 0, 0), 437 - MLXSW_SP_SB_PM_EGRESS(3, 0, 0), 323 + static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_ingress[] = { 324 + MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX), 325 + MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), 326 + MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), 327 + MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX), 438 328 }; 439 329 440 - #define MLXSW_SP_SB_PMS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms) 330 + #define MLXSW_SP_SB_PMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_ingress) 441 331 442 - static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port) 332 + static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_egress[] = { 333 + MLXSW_SP_SB_PM(0, 7), 334 + MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), 335 + MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), 336 + MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), 337 + }; 338 + 339 + #define MLXSW_SP_SB_PMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_egress) 340 + 341 + static int __mlxsw_sp_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, 342 + enum mlxsw_reg_sbxx_dir dir, 343 + const struct mlxsw_sp_sb_pm *pms, 344 + size_t pms_len) 443 345 { 444 - char sbpm_pl[MLXSW_REG_SBPM_LEN]; 445 346 int i; 446 347 int err; 447 348 448 - for (i = 0; i < MLXSW_SP_SB_PMS_LEN; i++) { 349 + for (i = 0; i < pms_len; i++) { 449 350 const struct mlxsw_sp_sb_pm *pm; 450 351 451 - pm = &mlxsw_sp_sb_pms[i]; 452 - mlxsw_reg_sbpm_pack(sbpm_pl, mlxsw_sp_port->local_port, 453 - pm->pool, pm->dir, 454 - pm->min_buff, pm->max_buff); 455 - err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, 456 - MLXSW_REG(sbpm), sbpm_pl); 352 + pm = &pms[i]; 353 + err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, i, dir, 354 + pm->min_buff, pm->max_buff); 457 355 if (err) 458 356 return err; 459 357 } 460 358 return 0; 461 359 } 462 360 361 + static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port) 362 + { 363 + int err; 364 + 365 + err = __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp, 366 + mlxsw_sp_port->local_port, 367 + MLXSW_REG_SBXX_DIR_INGRESS, 368 + mlxsw_sp_sb_pms_ingress, 369 + MLXSW_SP_SB_PMS_INGRESS_LEN); 370 + if (err) 371 + return err; 372 + return __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp, 373 + mlxsw_sp_port->local_port, 374 + MLXSW_REG_SBXX_DIR_EGRESS, 375 + mlxsw_sp_sb_pms_egress, 376 + MLXSW_SP_SB_PMS_EGRESS_LEN); 377 + } 378 + 463 379 struct mlxsw_sp_sb_mm { 464 - u8 prio; 465 380 u32 min_buff; 466 381 u32 max_buff; 467 382 u8 pool; 468 383 }; 469 384 470 - #define MLXSW_SP_SB_MM(_prio, _min_buff, _max_buff, _pool) \ 471 - { \ 472 - .prio = _prio, \ 473 - .min_buff = _min_buff, \ 474 - .max_buff = _max_buff, \ 475 - .pool = _pool, \ 385 + #define MLXSW_SP_SB_MM(_min_buff, _max_buff, _pool) \ 386 + { \ 387 + .min_buff = _min_buff, \ 388 + .max_buff = _max_buff, \ 389 + .pool = _pool, \ 476 390 } 477 391 478 392 static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = { 479 - MLXSW_SP_SB_MM(0, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), 480 - MLXSW_SP_SB_MM(1, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), 481 - MLXSW_SP_SB_MM(2, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), 482 - MLXSW_SP_SB_MM(3, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), 483 - MLXSW_SP_SB_MM(4, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), 484 - MLXSW_SP_SB_MM(5, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), 485 - MLXSW_SP_SB_MM(6, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), 486 - MLXSW_SP_SB_MM(7, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), 487 - MLXSW_SP_SB_MM(8, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), 488 - MLXSW_SP_SB_MM(9, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), 489 - MLXSW_SP_SB_MM(10, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), 490 - MLXSW_SP_SB_MM(11, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), 491 - MLXSW_SP_SB_MM(12, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), 492 - MLXSW_SP_SB_MM(13, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), 493 - MLXSW_SP_SB_MM(14, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), 393 + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), 394 + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), 395 + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), 396 + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), 397 + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), 398 + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), 399 + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), 400 + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), 401 + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), 402 + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), 403 + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), 404 + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), 405 + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), 406 + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), 407 + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), 494 408 }; 495 409 496 410 #define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms) ··· 520 404 const struct mlxsw_sp_sb_mm *mc; 521 405 522 406 mc = &mlxsw_sp_sb_mms[i]; 523 - mlxsw_reg_sbmm_pack(sbmm_pl, mc->prio, mc->min_buff, 407 + mlxsw_reg_sbmm_pack(sbmm_pl, i, mc->min_buff, 524 408 mc->max_buff, mc->pool); 525 409 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl); 526 410 if (err) ··· 529 413 return 0; 530 414 } 531 415 416 + #define MLXSW_SP_SB_SIZE (16 * 1024 * 1024) 417 + 532 418 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp) 533 419 { 534 420 int err; 535 421 536 - err = mlxsw_sp_sb_pools_init(mlxsw_sp); 422 + err = mlxsw_sp_sb_prs_init(mlxsw_sp); 537 423 if (err) 538 424 return err; 539 425 err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp); 540 426 if (err) 541 427 return err; 542 428 err = mlxsw_sp_sb_mms_init(mlxsw_sp); 429 + if (err) 430 + return err; 431 + return devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0, 432 + MLXSW_SP_SB_SIZE, 433 + MLXSW_SP_SB_POOL_COUNT, 434 + MLXSW_SP_SB_POOL_COUNT, 435 + MLXSW_SP_SB_TC_COUNT, 436 + MLXSW_SP_SB_TC_COUNT); 437 + } 543 438 544 - return err; 439 + void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp) 440 + { 441 + devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0); 545 442 } 546 443 547 444 int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port) ··· 570 441 err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port); 571 442 572 443 return err; 444 + } 445 + 446 + static u8 pool_get(u16 pool_index) 447 + { 448 + return pool_index % MLXSW_SP_SB_POOL_COUNT; 449 + } 450 + 451 + static u16 pool_index_get(u8 pool, enum mlxsw_reg_sbxx_dir dir) 452 + { 453 + u16 pool_index; 454 + 455 + pool_index = pool; 456 + if (dir == MLXSW_REG_SBXX_DIR_EGRESS) 457 + pool_index += MLXSW_SP_SB_POOL_COUNT; 458 + return pool_index; 459 + } 460 + 461 + static enum mlxsw_reg_sbxx_dir dir_get(u16 pool_index) 462 + { 463 + return pool_index < MLXSW_SP_SB_POOL_COUNT ? 464 + MLXSW_REG_SBXX_DIR_INGRESS : MLXSW_REG_SBXX_DIR_EGRESS; 465 + } 466 + 467 + int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core, 468 + unsigned int sb_index, u16 pool_index, 469 + struct devlink_sb_pool_info *pool_info) 470 + { 471 + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 472 + u8 pool = pool_get(pool_index); 473 + enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index); 474 + struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir); 475 + 476 + pool_info->pool_type = dir; 477 + pool_info->size = MLXSW_SP_CELLS_TO_BYTES(pr->size); 478 + pool_info->threshold_type = pr->mode; 479 + return 0; 480 + } 481 + 482 + int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core, 483 + unsigned int sb_index, u16 pool_index, u32 size, 484 + enum devlink_sb_threshold_type threshold_type) 485 + { 486 + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 487 + u8 pool = pool_get(pool_index); 488 + enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index); 489 + enum mlxsw_reg_sbpr_mode mode = threshold_type; 490 + u32 pool_size = MLXSW_SP_BYTES_TO_CELLS(size); 491 + 492 + return mlxsw_sp_sb_pr_write(mlxsw_sp, pool, dir, mode, pool_size); 493 + } 494 + 495 + #define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */ 496 + 497 + static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u8 pool, 498 + enum mlxsw_reg_sbxx_dir dir, u32 max_buff) 499 + { 500 + struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir); 501 + 502 + if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) 503 + return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET; 504 + return MLXSW_SP_CELLS_TO_BYTES(max_buff); 505 + } 506 + 507 + static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u8 pool, 508 + enum mlxsw_reg_sbxx_dir dir, u32 threshold, 509 + u32 *p_max_buff) 510 + { 511 + struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir); 512 + 513 + if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) { 514 + int val; 515 + 516 + val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET; 517 + if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN || 518 + val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX) 519 + return -EINVAL; 520 + *p_max_buff = val; 521 + } else { 522 + *p_max_buff = MLXSW_SP_BYTES_TO_CELLS(threshold); 523 + } 524 + return 0; 525 + } 526 + 527 + int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port, 528 + unsigned int sb_index, u16 pool_index, 529 + u32 *p_threshold) 530 + { 531 + struct mlxsw_sp_port *mlxsw_sp_port = 532 + mlxsw_core_port_driver_priv(mlxsw_core_port); 533 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 534 + u8 local_port = mlxsw_sp_port->local_port; 535 + u8 pool = pool_get(pool_index); 536 + enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index); 537 + struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, 538 + pool, dir); 539 + 540 + *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool, dir, 541 + pm->max_buff); 542 + return 0; 543 + } 544 + 545 + int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port, 546 + unsigned int sb_index, u16 pool_index, 547 + u32 threshold) 548 + { 549 + struct mlxsw_sp_port *mlxsw_sp_port = 550 + mlxsw_core_port_driver_priv(mlxsw_core_port); 551 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 552 + u8 local_port = mlxsw_sp_port->local_port; 553 + u8 pool = pool_get(pool_index); 554 + enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index); 555 + u32 max_buff; 556 + int err; 557 + 558 + err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir, 559 + threshold, &max_buff); 560 + if (err) 561 + return err; 562 + 563 + return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool, dir, 564 + 0, max_buff); 565 + } 566 + 567 + int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port, 568 + unsigned int sb_index, u16 tc_index, 569 + enum devlink_sb_pool_type pool_type, 570 + u16 *p_pool_index, u32 *p_threshold) 571 + { 572 + struct mlxsw_sp_port *mlxsw_sp_port = 573 + mlxsw_core_port_driver_priv(mlxsw_core_port); 574 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 575 + u8 local_port = mlxsw_sp_port->local_port; 576 + u8 pg_buff = tc_index; 577 + enum mlxsw_reg_sbxx_dir dir = pool_type; 578 + struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, 579 + pg_buff, dir); 580 + 581 + *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool, dir, 582 + cm->max_buff); 583 + *p_pool_index = pool_index_get(cm->pool, pool_type); 584 + return 0; 585 + } 586 + 587 + int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port, 588 + unsigned int sb_index, u16 tc_index, 589 + enum devlink_sb_pool_type pool_type, 590 + u16 pool_index, u32 threshold) 591 + { 592 + struct mlxsw_sp_port *mlxsw_sp_port = 593 + mlxsw_core_port_driver_priv(mlxsw_core_port); 594 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 595 + u8 local_port = mlxsw_sp_port->local_port; 596 + u8 pg_buff = tc_index; 597 + enum mlxsw_reg_sbxx_dir dir = pool_type; 598 + u8 pool = pool_index; 599 + u32 max_buff; 600 + int err; 601 + 602 + err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir, 603 + threshold, &max_buff); 604 + if (err) 605 + return err; 606 + 607 + if (pool_type == DEVLINK_SB_POOL_TYPE_EGRESS) { 608 + if (pool < MLXSW_SP_SB_POOL_COUNT) 609 + return -EINVAL; 610 + pool -= MLXSW_SP_SB_POOL_COUNT; 611 + } else if (pool >= MLXSW_SP_SB_POOL_COUNT) { 612 + return -EINVAL; 613 + } 614 + return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, dir, 615 + 0, max_buff, pool); 616 + } 617 + 618 + #define MASKED_COUNT_MAX \ 619 + (MLXSW_REG_SBSR_REC_MAX_COUNT / (MLXSW_SP_SB_TC_COUNT * 2)) 620 + 621 + struct mlxsw_sp_sb_sr_occ_query_cb_ctx { 622 + u8 masked_count; 623 + u8 local_port_1; 624 + }; 625 + 626 + static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core, 627 + char *sbsr_pl, size_t sbsr_pl_len, 628 + unsigned long cb_priv) 629 + { 630 + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 631 + struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx; 632 + u8 masked_count; 633 + u8 local_port; 634 + int rec_index = 0; 635 + struct mlxsw_sp_sb_cm *cm; 636 + int i; 637 + 638 + memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx)); 639 + 640 + masked_count = 0; 641 + for (local_port = cb_ctx.local_port_1; 642 + local_port < MLXSW_PORT_MAX_PORTS; local_port++) { 643 + if (!mlxsw_sp->ports[local_port]) 644 + continue; 645 + for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) { 646 + cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i, 647 + MLXSW_REG_SBXX_DIR_INGRESS); 648 + mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++, 649 + &cm->occ.cur, &cm->occ.max); 650 + } 651 + if (++masked_count == cb_ctx.masked_count) 652 + break; 653 + } 654 + masked_count = 0; 655 + for (local_port = cb_ctx.local_port_1; 656 + local_port < MLXSW_PORT_MAX_PORTS; local_port++) { 657 + if (!mlxsw_sp->ports[local_port]) 658 + continue; 659 + for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) { 660 + cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i, 661 + MLXSW_REG_SBXX_DIR_EGRESS); 662 + mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++, 663 + &cm->occ.cur, &cm->occ.max); 664 + } 665 + if (++masked_count == cb_ctx.masked_count) 666 + break; 667 + } 668 + } 669 + 670 + int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core, 671 + unsigned int sb_index) 672 + { 673 + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 674 + struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx; 675 + unsigned long cb_priv; 676 + LIST_HEAD(bulk_list); 677 + char *sbsr_pl; 678 + u8 masked_count; 679 + u8 local_port_1; 680 + u8 local_port = 0; 681 + int i; 682 + int err; 683 + int err2; 684 + 685 + sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL); 686 + if (!sbsr_pl) 687 + return -ENOMEM; 688 + 689 + next_batch: 690 + local_port++; 691 + local_port_1 = local_port; 692 + masked_count = 0; 693 + mlxsw_reg_sbsr_pack(sbsr_pl, false); 694 + for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) { 695 + mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1); 696 + mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1); 697 + } 698 + for (; local_port < MLXSW_PORT_MAX_PORTS; local_port++) { 699 + if (!mlxsw_sp->ports[local_port]) 700 + continue; 701 + mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1); 702 + mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1); 703 + for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) { 704 + err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i, 705 + MLXSW_REG_SBXX_DIR_INGRESS, 706 + &bulk_list); 707 + if (err) 708 + goto out; 709 + err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i, 710 + MLXSW_REG_SBXX_DIR_EGRESS, 711 + &bulk_list); 712 + if (err) 713 + goto out; 714 + } 715 + if (++masked_count == MASKED_COUNT_MAX) 716 + goto do_query; 717 + } 718 + 719 + do_query: 720 + cb_ctx.masked_count = masked_count; 721 + cb_ctx.local_port_1 = local_port_1; 722 + memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx)); 723 + err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl, 724 + &bulk_list, mlxsw_sp_sb_sr_occ_query_cb, 725 + cb_priv); 726 + if (err) 727 + goto out; 728 + if (local_port < MLXSW_PORT_MAX_PORTS) 729 + goto next_batch; 730 + 731 + out: 732 + err2 = mlxsw_reg_trans_bulk_wait(&bulk_list); 733 + if (!err) 734 + err = err2; 735 + kfree(sbsr_pl); 736 + return err; 737 + } 738 + 739 + int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core, 740 + unsigned int sb_index) 741 + { 742 + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 743 + LIST_HEAD(bulk_list); 744 + char *sbsr_pl; 745 + unsigned int masked_count; 746 + u8 local_port = 0; 747 + int i; 748 + int err; 749 + int err2; 750 + 751 + sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL); 752 + if (!sbsr_pl) 753 + return -ENOMEM; 754 + 755 + next_batch: 756 + local_port++; 757 + masked_count = 0; 758 + mlxsw_reg_sbsr_pack(sbsr_pl, true); 759 + for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) { 760 + mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1); 761 + mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1); 762 + } 763 + for (; local_port < MLXSW_PORT_MAX_PORTS; local_port++) { 764 + if (!mlxsw_sp->ports[local_port]) 765 + continue; 766 + mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1); 767 + mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1); 768 + for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) { 769 + err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i, 770 + MLXSW_REG_SBXX_DIR_INGRESS, 771 + &bulk_list); 772 + if (err) 773 + goto out; 774 + err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i, 775 + MLXSW_REG_SBXX_DIR_EGRESS, 776 + &bulk_list); 777 + if (err) 778 + goto out; 779 + } 780 + if (++masked_count == MASKED_COUNT_MAX) 781 + goto do_query; 782 + } 783 + 784 + do_query: 785 + err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl, 786 + &bulk_list, NULL, 0); 787 + if (err) 788 + goto out; 789 + if (local_port < MLXSW_PORT_MAX_PORTS) 790 + goto next_batch; 791 + 792 + out: 793 + err2 = mlxsw_reg_trans_bulk_wait(&bulk_list); 794 + if (!err) 795 + err = err2; 796 + kfree(sbsr_pl); 797 + return err; 798 + } 799 + 800 + int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port, 801 + unsigned int sb_index, u16 pool_index, 802 + u32 *p_cur, u32 *p_max) 803 + { 804 + struct mlxsw_sp_port *mlxsw_sp_port = 805 + mlxsw_core_port_driver_priv(mlxsw_core_port); 806 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 807 + u8 local_port = mlxsw_sp_port->local_port; 808 + u8 pool = pool_get(pool_index); 809 + enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index); 810 + struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, 811 + pool, dir); 812 + 813 + *p_cur = MLXSW_SP_CELLS_TO_BYTES(pm->occ.cur); 814 + *p_max = MLXSW_SP_CELLS_TO_BYTES(pm->occ.max); 815 + return 0; 816 + } 817 + 818 + int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port, 819 + unsigned int sb_index, u16 tc_index, 820 + enum devlink_sb_pool_type pool_type, 821 + u32 *p_cur, u32 *p_max) 822 + { 823 + struct mlxsw_sp_port *mlxsw_sp_port = 824 + mlxsw_core_port_driver_priv(mlxsw_core_port); 825 + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 826 + u8 local_port = mlxsw_sp_port->local_port; 827 + u8 pg_buff = tc_index; 828 + enum mlxsw_reg_sbxx_dir dir = pool_type; 829 + struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, 830 + pg_buff, dir); 831 + 832 + *p_cur = MLXSW_SP_CELLS_TO_BYTES(cm->occ.cur); 833 + *p_max = MLXSW_SP_CELLS_TO_BYTES(cm->occ.max); 834 + return 0; 573 835 }
+2 -2
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
··· 1430 1430 1431 1431 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp) 1432 1432 { 1433 - schedule_delayed_work(&mlxsw_sp->fdb_notify.dw, 1434 - msecs_to_jiffies(mlxsw_sp->fdb_notify.interval)); 1433 + mlxsw_core_schedule_dw(&mlxsw_sp->fdb_notify.dw, 1434 + msecs_to_jiffies(mlxsw_sp->fdb_notify.interval)); 1435 1435 } 1436 1436 1437 1437 static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
+59
include/net/devlink.h
··· 24 24 struct devlink { 25 25 struct list_head list; 26 26 struct list_head port_list; 27 + struct list_head sb_list; 27 28 const struct devlink_ops *ops; 28 29 struct device *dev; 29 30 possible_net_t _net; ··· 43 42 u32 split_group; 44 43 }; 45 44 45 + struct devlink_sb_pool_info { 46 + enum devlink_sb_pool_type pool_type; 47 + u32 size; 48 + enum devlink_sb_threshold_type threshold_type; 49 + }; 50 + 46 51 struct devlink_ops { 47 52 size_t priv_size; 48 53 int (*port_type_set)(struct devlink_port *devlink_port, ··· 56 49 int (*port_split)(struct devlink *devlink, unsigned int port_index, 57 50 unsigned int count); 58 51 int (*port_unsplit)(struct devlink *devlink, unsigned int port_index); 52 + int (*sb_pool_get)(struct devlink *devlink, unsigned int sb_index, 53 + u16 pool_index, 54 + struct devlink_sb_pool_info *pool_info); 55 + int (*sb_pool_set)(struct devlink *devlink, unsigned int sb_index, 56 + u16 pool_index, u32 size, 57 + enum devlink_sb_threshold_type threshold_type); 58 + int (*sb_port_pool_get)(struct devlink_port *devlink_port, 59 + unsigned int sb_index, u16 pool_index, 60 + u32 *p_threshold); 61 + int (*sb_port_pool_set)(struct devlink_port *devlink_port, 62 + unsigned int sb_index, u16 pool_index, 63 + u32 threshold); 64 + int (*sb_tc_pool_bind_get)(struct devlink_port *devlink_port, 65 + unsigned int sb_index, 66 + u16 tc_index, 67 + enum devlink_sb_pool_type pool_type, 68 + u16 *p_pool_index, u32 *p_threshold); 69 + int (*sb_tc_pool_bind_set)(struct devlink_port *devlink_port, 70 + unsigned int sb_index, 71 + u16 tc_index, 72 + enum devlink_sb_pool_type pool_type, 73 + u16 pool_index, u32 threshold); 74 + int (*sb_occ_snapshot)(struct devlink *devlink, 75 + unsigned int sb_index); 76 + int (*sb_occ_max_clear)(struct devlink *devlink, 77 + unsigned int sb_index); 78 + int (*sb_occ_port_pool_get)(struct devlink_port *devlink_port, 79 + unsigned int sb_index, u16 pool_index, 80 + u32 *p_cur, u32 *p_max); 81 + int (*sb_occ_tc_port_bind_get)(struct devlink_port *devlink_port, 82 + unsigned int sb_index, 83 + u16 tc_index, 84 + enum devlink_sb_pool_type pool_type, 85 + u32 *p_cur, u32 *p_max); 59 86 }; 60 87 61 88 static inline void *devlink_priv(struct devlink *devlink) ··· 123 82 void devlink_port_type_clear(struct devlink_port *devlink_port); 124 83 void devlink_port_split_set(struct devlink_port *devlink_port, 125 84 u32 split_group); 85 + int devlink_sb_register(struct devlink *devlink, unsigned int sb_index, 86 + u32 size, u16 ingress_pools_count, 87 + u16 egress_pools_count, u16 ingress_tc_count, 88 + u16 egress_tc_count); 89 + void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index); 126 90 127 91 #else 128 92 ··· 178 132 179 133 static inline void devlink_port_split_set(struct devlink_port *devlink_port, 180 134 u32 split_group) 135 + { 136 + } 137 + 138 + static inline int devlink_sb_register(struct devlink *devlink, 139 + unsigned int sb_index, u32 size, 140 + u16 ingress_pools_count, 141 + u16 egress_pools_count, u16 tc_count) 142 + { 143 + return 0; 144 + } 145 + 146 + static inline void devlink_sb_unregister(struct devlink *devlink, 147 + unsigned int sb_index) 181 148 { 182 149 } 183 150
+63
include/uapi/linux/devlink.h
··· 33 33 DEVLINK_CMD_PORT_SPLIT, 34 34 DEVLINK_CMD_PORT_UNSPLIT, 35 35 36 + DEVLINK_CMD_SB_GET, /* can dump */ 37 + DEVLINK_CMD_SB_SET, 38 + DEVLINK_CMD_SB_NEW, 39 + DEVLINK_CMD_SB_DEL, 40 + 41 + DEVLINK_CMD_SB_POOL_GET, /* can dump */ 42 + DEVLINK_CMD_SB_POOL_SET, 43 + DEVLINK_CMD_SB_POOL_NEW, 44 + DEVLINK_CMD_SB_POOL_DEL, 45 + 46 + DEVLINK_CMD_SB_PORT_POOL_GET, /* can dump */ 47 + DEVLINK_CMD_SB_PORT_POOL_SET, 48 + DEVLINK_CMD_SB_PORT_POOL_NEW, 49 + DEVLINK_CMD_SB_PORT_POOL_DEL, 50 + 51 + DEVLINK_CMD_SB_TC_POOL_BIND_GET, /* can dump */ 52 + DEVLINK_CMD_SB_TC_POOL_BIND_SET, 53 + DEVLINK_CMD_SB_TC_POOL_BIND_NEW, 54 + DEVLINK_CMD_SB_TC_POOL_BIND_DEL, 55 + 56 + /* Shared buffer occupancy monitoring commands */ 57 + DEVLINK_CMD_SB_OCC_SNAPSHOT, 58 + DEVLINK_CMD_SB_OCC_MAX_CLEAR, 59 + 36 60 /* add new commands above here */ 37 61 38 62 __DEVLINK_CMD_MAX, ··· 69 45 DEVLINK_PORT_TYPE_ETH, 70 46 DEVLINK_PORT_TYPE_IB, 71 47 }; 48 + 49 + enum devlink_sb_pool_type { 50 + DEVLINK_SB_POOL_TYPE_INGRESS, 51 + DEVLINK_SB_POOL_TYPE_EGRESS, 52 + }; 53 + 54 + /* static threshold - limiting the maximum number of bytes. 55 + * dynamic threshold - limiting the maximum number of bytes 56 + * based on the currently available free space in the shared buffer pool. 57 + * In this mode, the maximum quota is calculated based 58 + * on the following formula: 59 + * max_quota = alpha / (1 + alpha) * Free_Buffer 60 + * While Free_Buffer is the amount of none-occupied buffer associated to 61 + * the relevant pool. 62 + * The value range which can be passed is 0-20 and serves 63 + * for computation of alpha by following formula: 64 + * alpha = 2 ^ (passed_value - 10) 65 + */ 66 + 67 + enum devlink_sb_threshold_type { 68 + DEVLINK_SB_THRESHOLD_TYPE_STATIC, 69 + DEVLINK_SB_THRESHOLD_TYPE_DYNAMIC, 70 + }; 71 + 72 + #define DEVLINK_SB_THRESHOLD_TO_ALPHA_MAX 20 72 73 73 74 enum devlink_attr { 74 75 /* don't change the order or add anything between, this is ABI! */ ··· 111 62 DEVLINK_ATTR_PORT_IBDEV_NAME, /* string */ 112 63 DEVLINK_ATTR_PORT_SPLIT_COUNT, /* u32 */ 113 64 DEVLINK_ATTR_PORT_SPLIT_GROUP, /* u32 */ 65 + DEVLINK_ATTR_SB_INDEX, /* u32 */ 66 + DEVLINK_ATTR_SB_SIZE, /* u32 */ 67 + DEVLINK_ATTR_SB_INGRESS_POOL_COUNT, /* u16 */ 68 + DEVLINK_ATTR_SB_EGRESS_POOL_COUNT, /* u16 */ 69 + DEVLINK_ATTR_SB_INGRESS_TC_COUNT, /* u16 */ 70 + DEVLINK_ATTR_SB_EGRESS_TC_COUNT, /* u16 */ 71 + DEVLINK_ATTR_SB_POOL_INDEX, /* u16 */ 72 + DEVLINK_ATTR_SB_POOL_TYPE, /* u8 */ 73 + DEVLINK_ATTR_SB_POOL_SIZE, /* u32 */ 74 + DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE, /* u8 */ 75 + DEVLINK_ATTR_SB_THRESHOLD, /* u32 */ 76 + DEVLINK_ATTR_SB_TC_INDEX, /* u16 */ 77 + DEVLINK_ATTR_SB_OCC_CUR, /* u32 */ 78 + DEVLINK_ATTR_SB_OCC_MAX, /* u32 */ 114 79 115 80 /* add new attributes above here, update the policy in devlink.c */ 116 81
+1027 -1
net/core/devlink.c
··· 119 119 return devlink_port_get_from_attrs(devlink, info->attrs); 120 120 } 121 121 122 + struct devlink_sb { 123 + struct list_head list; 124 + unsigned int index; 125 + u32 size; 126 + u16 ingress_pools_count; 127 + u16 egress_pools_count; 128 + u16 ingress_tc_count; 129 + u16 egress_tc_count; 130 + }; 131 + 132 + static u16 devlink_sb_pool_count(struct devlink_sb *devlink_sb) 133 + { 134 + return devlink_sb->ingress_pools_count + devlink_sb->egress_pools_count; 135 + } 136 + 137 + static struct devlink_sb *devlink_sb_get_by_index(struct devlink *devlink, 138 + unsigned int sb_index) 139 + { 140 + struct devlink_sb *devlink_sb; 141 + 142 + list_for_each_entry(devlink_sb, &devlink->sb_list, list) { 143 + if (devlink_sb->index == sb_index) 144 + return devlink_sb; 145 + } 146 + return NULL; 147 + } 148 + 149 + static bool devlink_sb_index_exists(struct devlink *devlink, 150 + unsigned int sb_index) 151 + { 152 + return devlink_sb_get_by_index(devlink, sb_index); 153 + } 154 + 155 + static struct devlink_sb *devlink_sb_get_from_attrs(struct devlink *devlink, 156 + struct nlattr **attrs) 157 + { 158 + if (attrs[DEVLINK_ATTR_SB_INDEX]) { 159 + u32 sb_index = nla_get_u32(attrs[DEVLINK_ATTR_SB_INDEX]); 160 + struct devlink_sb *devlink_sb; 161 + 162 + devlink_sb = devlink_sb_get_by_index(devlink, sb_index); 163 + if (!devlink_sb) 164 + return ERR_PTR(-ENODEV); 165 + return devlink_sb; 166 + } 167 + return ERR_PTR(-EINVAL); 168 + } 169 + 170 + static struct devlink_sb *devlink_sb_get_from_info(struct devlink *devlink, 171 + struct genl_info *info) 172 + { 173 + return devlink_sb_get_from_attrs(devlink, info->attrs); 174 + } 175 + 176 + static int devlink_sb_pool_index_get_from_attrs(struct devlink_sb *devlink_sb, 177 + struct nlattr **attrs, 178 + u16 *p_pool_index) 179 + { 180 + u16 val; 181 + 182 + if (!attrs[DEVLINK_ATTR_SB_POOL_INDEX]) 183 + return -EINVAL; 184 + 185 + val = nla_get_u16(attrs[DEVLINK_ATTR_SB_POOL_INDEX]); 186 + if (val >= devlink_sb_pool_count(devlink_sb)) 187 + return -EINVAL; 188 + *p_pool_index = val; 189 + return 0; 190 + } 191 + 192 + static int devlink_sb_pool_index_get_from_info(struct devlink_sb *devlink_sb, 193 + struct genl_info *info, 194 + u16 *p_pool_index) 195 + { 196 + return devlink_sb_pool_index_get_from_attrs(devlink_sb, info->attrs, 197 + p_pool_index); 198 + } 199 + 200 + static int 201 + devlink_sb_pool_type_get_from_attrs(struct nlattr **attrs, 202 + enum devlink_sb_pool_type *p_pool_type) 203 + { 204 + u8 val; 205 + 206 + if (!attrs[DEVLINK_ATTR_SB_POOL_TYPE]) 207 + return -EINVAL; 208 + 209 + val = nla_get_u8(attrs[DEVLINK_ATTR_SB_POOL_TYPE]); 210 + if (val != DEVLINK_SB_POOL_TYPE_INGRESS && 211 + val != DEVLINK_SB_POOL_TYPE_EGRESS) 212 + return -EINVAL; 213 + *p_pool_type = val; 214 + return 0; 215 + } 216 + 217 + static int 218 + devlink_sb_pool_type_get_from_info(struct genl_info *info, 219 + enum devlink_sb_pool_type *p_pool_type) 220 + { 221 + return devlink_sb_pool_type_get_from_attrs(info->attrs, p_pool_type); 222 + } 223 + 224 + static int 225 + devlink_sb_th_type_get_from_attrs(struct nlattr **attrs, 226 + enum devlink_sb_threshold_type *p_th_type) 227 + { 228 + u8 val; 229 + 230 + if (!attrs[DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE]) 231 + return -EINVAL; 232 + 233 + val = nla_get_u8(attrs[DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE]); 234 + if (val != DEVLINK_SB_THRESHOLD_TYPE_STATIC && 235 + val != DEVLINK_SB_THRESHOLD_TYPE_DYNAMIC) 236 + return -EINVAL; 237 + *p_th_type = val; 238 + return 0; 239 + } 240 + 241 + static int 242 + devlink_sb_th_type_get_from_info(struct genl_info *info, 243 + enum devlink_sb_threshold_type *p_th_type) 244 + { 245 + return devlink_sb_th_type_get_from_attrs(info->attrs, p_th_type); 246 + } 247 + 248 + static int 249 + devlink_sb_tc_index_get_from_attrs(struct devlink_sb *devlink_sb, 250 + struct nlattr **attrs, 251 + enum devlink_sb_pool_type pool_type, 252 + u16 *p_tc_index) 253 + { 254 + u16 val; 255 + 256 + if (!attrs[DEVLINK_ATTR_SB_TC_INDEX]) 257 + return -EINVAL; 258 + 259 + val = nla_get_u16(attrs[DEVLINK_ATTR_SB_TC_INDEX]); 260 + if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS && 261 + val >= devlink_sb->ingress_tc_count) 262 + return -EINVAL; 263 + if (pool_type == DEVLINK_SB_POOL_TYPE_EGRESS && 264 + val >= devlink_sb->egress_tc_count) 265 + return -EINVAL; 266 + *p_tc_index = val; 267 + return 0; 268 + } 269 + 270 + static int 271 + devlink_sb_tc_index_get_from_info(struct devlink_sb *devlink_sb, 272 + struct genl_info *info, 273 + enum devlink_sb_pool_type pool_type, 274 + u16 *p_tc_index) 275 + { 276 + return devlink_sb_tc_index_get_from_attrs(devlink_sb, info->attrs, 277 + pool_type, p_tc_index); 278 + } 279 + 122 280 #define DEVLINK_NL_FLAG_NEED_DEVLINK BIT(0) 123 281 #define DEVLINK_NL_FLAG_NEED_PORT BIT(1) 282 + #define DEVLINK_NL_FLAG_NEED_SB BIT(2) 283 + #define DEVLINK_NL_FLAG_LOCK_PORTS BIT(3) 284 + /* port is not needed but we need to ensure they don't 285 + * change in the middle of command 286 + */ 124 287 125 288 static int devlink_nl_pre_doit(const struct genl_ops *ops, 126 289 struct sk_buff *skb, struct genl_info *info) ··· 310 147 } 311 148 info->user_ptr[0] = devlink_port; 312 149 } 150 + if (ops->internal_flags & DEVLINK_NL_FLAG_LOCK_PORTS) { 151 + mutex_lock(&devlink_port_mutex); 152 + } 153 + if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_SB) { 154 + struct devlink_sb *devlink_sb; 155 + 156 + devlink_sb = devlink_sb_get_from_info(devlink, info); 157 + if (IS_ERR(devlink_sb)) { 158 + if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT) 159 + mutex_unlock(&devlink_port_mutex); 160 + mutex_unlock(&devlink_mutex); 161 + return PTR_ERR(devlink_sb); 162 + } 163 + info->user_ptr[1] = devlink_sb; 164 + } 313 165 return 0; 314 166 } 315 167 316 168 static void devlink_nl_post_doit(const struct genl_ops *ops, 317 169 struct sk_buff *skb, struct genl_info *info) 318 170 { 319 - if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT) 171 + if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT || 172 + ops->internal_flags & DEVLINK_NL_FLAG_LOCK_PORTS) 320 173 mutex_unlock(&devlink_port_mutex); 321 174 mutex_unlock(&devlink_mutex); 322 175 } ··· 678 499 return devlink_port_unsplit(devlink, port_index); 679 500 } 680 501 502 + static int devlink_nl_sb_fill(struct sk_buff *msg, struct devlink *devlink, 503 + struct devlink_sb *devlink_sb, 504 + enum devlink_command cmd, u32 portid, 505 + u32 seq, int flags) 506 + { 507 + void *hdr; 508 + 509 + hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); 510 + if (!hdr) 511 + return -EMSGSIZE; 512 + 513 + if (devlink_nl_put_handle(msg, devlink)) 514 + goto nla_put_failure; 515 + if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index)) 516 + goto nla_put_failure; 517 + if (nla_put_u32(msg, DEVLINK_ATTR_SB_SIZE, devlink_sb->size)) 518 + goto nla_put_failure; 519 + if (nla_put_u16(msg, DEVLINK_ATTR_SB_INGRESS_POOL_COUNT, 520 + devlink_sb->ingress_pools_count)) 521 + goto nla_put_failure; 522 + if (nla_put_u16(msg, DEVLINK_ATTR_SB_EGRESS_POOL_COUNT, 523 + devlink_sb->egress_pools_count)) 524 + goto nla_put_failure; 525 + if (nla_put_u16(msg, DEVLINK_ATTR_SB_INGRESS_TC_COUNT, 526 + devlink_sb->ingress_tc_count)) 527 + goto nla_put_failure; 528 + if (nla_put_u16(msg, DEVLINK_ATTR_SB_EGRESS_TC_COUNT, 529 + devlink_sb->egress_tc_count)) 530 + goto nla_put_failure; 531 + 532 + genlmsg_end(msg, hdr); 533 + return 0; 534 + 535 + nla_put_failure: 536 + genlmsg_cancel(msg, hdr); 537 + return -EMSGSIZE; 538 + } 539 + 540 + static int devlink_nl_cmd_sb_get_doit(struct sk_buff *skb, 541 + struct genl_info *info) 542 + { 543 + struct devlink *devlink = info->user_ptr[0]; 544 + struct devlink_sb *devlink_sb = info->user_ptr[1]; 545 + struct sk_buff *msg; 546 + int err; 547 + 548 + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 549 + if (!msg) 550 + return -ENOMEM; 551 + 552 + err = devlink_nl_sb_fill(msg, devlink, devlink_sb, 553 + DEVLINK_CMD_SB_NEW, 554 + info->snd_portid, info->snd_seq, 0); 555 + if (err) { 556 + nlmsg_free(msg); 557 + return err; 558 + } 559 + 560 + return genlmsg_reply(msg, info); 561 + } 562 + 563 + static int devlink_nl_cmd_sb_get_dumpit(struct sk_buff *msg, 564 + struct netlink_callback *cb) 565 + { 566 + struct devlink *devlink; 567 + struct devlink_sb *devlink_sb; 568 + int start = cb->args[0]; 569 + int idx = 0; 570 + int err; 571 + 572 + mutex_lock(&devlink_mutex); 573 + list_for_each_entry(devlink, &devlink_list, list) { 574 + if (!net_eq(devlink_net(devlink), sock_net(msg->sk))) 575 + continue; 576 + list_for_each_entry(devlink_sb, &devlink->sb_list, list) { 577 + if (idx < start) { 578 + idx++; 579 + continue; 580 + } 581 + err = devlink_nl_sb_fill(msg, devlink, devlink_sb, 582 + DEVLINK_CMD_SB_NEW, 583 + NETLINK_CB(cb->skb).portid, 584 + cb->nlh->nlmsg_seq, 585 + NLM_F_MULTI); 586 + if (err) 587 + goto out; 588 + idx++; 589 + } 590 + } 591 + out: 592 + mutex_unlock(&devlink_mutex); 593 + 594 + cb->args[0] = idx; 595 + return msg->len; 596 + } 597 + 598 + static int devlink_nl_sb_pool_fill(struct sk_buff *msg, struct devlink *devlink, 599 + struct devlink_sb *devlink_sb, 600 + u16 pool_index, enum devlink_command cmd, 601 + u32 portid, u32 seq, int flags) 602 + { 603 + struct devlink_sb_pool_info pool_info; 604 + void *hdr; 605 + int err; 606 + 607 + err = devlink->ops->sb_pool_get(devlink, devlink_sb->index, 608 + pool_index, &pool_info); 609 + if (err) 610 + return err; 611 + 612 + hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); 613 + if (!hdr) 614 + return -EMSGSIZE; 615 + 616 + if (devlink_nl_put_handle(msg, devlink)) 617 + goto nla_put_failure; 618 + if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index)) 619 + goto nla_put_failure; 620 + if (nla_put_u16(msg, DEVLINK_ATTR_SB_POOL_INDEX, pool_index)) 621 + goto nla_put_failure; 622 + if (nla_put_u8(msg, DEVLINK_ATTR_SB_POOL_TYPE, pool_info.pool_type)) 623 + goto nla_put_failure; 624 + if (nla_put_u32(msg, DEVLINK_ATTR_SB_POOL_SIZE, pool_info.size)) 625 + goto nla_put_failure; 626 + if (nla_put_u8(msg, DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE, 627 + pool_info.threshold_type)) 628 + goto nla_put_failure; 629 + 630 + genlmsg_end(msg, hdr); 631 + return 0; 632 + 633 + nla_put_failure: 634 + genlmsg_cancel(msg, hdr); 635 + return -EMSGSIZE; 636 + } 637 + 638 + static int devlink_nl_cmd_sb_pool_get_doit(struct sk_buff *skb, 639 + struct genl_info *info) 640 + { 641 + struct devlink *devlink = info->user_ptr[0]; 642 + struct devlink_sb *devlink_sb = info->user_ptr[1]; 643 + struct sk_buff *msg; 644 + u16 pool_index; 645 + int err; 646 + 647 + err = devlink_sb_pool_index_get_from_info(devlink_sb, info, 648 + &pool_index); 649 + if (err) 650 + return err; 651 + 652 + if (!devlink->ops || !devlink->ops->sb_pool_get) 653 + return -EOPNOTSUPP; 654 + 655 + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 656 + if (!msg) 657 + return -ENOMEM; 658 + 659 + err = devlink_nl_sb_pool_fill(msg, devlink, devlink_sb, pool_index, 660 + DEVLINK_CMD_SB_POOL_NEW, 661 + info->snd_portid, info->snd_seq, 0); 662 + if (err) { 663 + nlmsg_free(msg); 664 + return err; 665 + } 666 + 667 + return genlmsg_reply(msg, info); 668 + } 669 + 670 + static int __sb_pool_get_dumpit(struct sk_buff *msg, int start, int *p_idx, 671 + struct devlink *devlink, 672 + struct devlink_sb *devlink_sb, 673 + u32 portid, u32 seq) 674 + { 675 + u16 pool_count = devlink_sb_pool_count(devlink_sb); 676 + u16 pool_index; 677 + int err; 678 + 679 + for (pool_index = 0; pool_index < pool_count; pool_index++) { 680 + if (*p_idx < start) { 681 + (*p_idx)++; 682 + continue; 683 + } 684 + err = devlink_nl_sb_pool_fill(msg, devlink, 685 + devlink_sb, 686 + pool_index, 687 + DEVLINK_CMD_SB_POOL_NEW, 688 + portid, seq, NLM_F_MULTI); 689 + if (err) 690 + return err; 691 + (*p_idx)++; 692 + } 693 + return 0; 694 + } 695 + 696 + static int devlink_nl_cmd_sb_pool_get_dumpit(struct sk_buff *msg, 697 + struct netlink_callback *cb) 698 + { 699 + struct devlink *devlink; 700 + struct devlink_sb *devlink_sb; 701 + int start = cb->args[0]; 702 + int idx = 0; 703 + int err; 704 + 705 + mutex_lock(&devlink_mutex); 706 + list_for_each_entry(devlink, &devlink_list, list) { 707 + if (!net_eq(devlink_net(devlink), sock_net(msg->sk)) || 708 + !devlink->ops || !devlink->ops->sb_pool_get) 709 + continue; 710 + list_for_each_entry(devlink_sb, &devlink->sb_list, list) { 711 + err = __sb_pool_get_dumpit(msg, start, &idx, devlink, 712 + devlink_sb, 713 + NETLINK_CB(cb->skb).portid, 714 + cb->nlh->nlmsg_seq); 715 + if (err && err != -EOPNOTSUPP) 716 + goto out; 717 + } 718 + } 719 + out: 720 + mutex_unlock(&devlink_mutex); 721 + 722 + cb->args[0] = idx; 723 + return msg->len; 724 + } 725 + 726 + static int devlink_sb_pool_set(struct devlink *devlink, unsigned int sb_index, 727 + u16 pool_index, u32 size, 728 + enum devlink_sb_threshold_type threshold_type) 729 + 730 + { 731 + const struct devlink_ops *ops = devlink->ops; 732 + 733 + if (ops && ops->sb_pool_set) 734 + return ops->sb_pool_set(devlink, sb_index, pool_index, 735 + size, threshold_type); 736 + return -EOPNOTSUPP; 737 + } 738 + 739 + static int devlink_nl_cmd_sb_pool_set_doit(struct sk_buff *skb, 740 + struct genl_info *info) 741 + { 742 + struct devlink *devlink = info->user_ptr[0]; 743 + struct devlink_sb *devlink_sb = info->user_ptr[1]; 744 + enum devlink_sb_threshold_type threshold_type; 745 + u16 pool_index; 746 + u32 size; 747 + int err; 748 + 749 + err = devlink_sb_pool_index_get_from_info(devlink_sb, info, 750 + &pool_index); 751 + if (err) 752 + return err; 753 + 754 + err = devlink_sb_th_type_get_from_info(info, &threshold_type); 755 + if (err) 756 + return err; 757 + 758 + if (!info->attrs[DEVLINK_ATTR_SB_POOL_SIZE]) 759 + return -EINVAL; 760 + 761 + size = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_POOL_SIZE]); 762 + return devlink_sb_pool_set(devlink, devlink_sb->index, 763 + pool_index, size, threshold_type); 764 + } 765 + 766 + static int devlink_nl_sb_port_pool_fill(struct sk_buff *msg, 767 + struct devlink *devlink, 768 + struct devlink_port *devlink_port, 769 + struct devlink_sb *devlink_sb, 770 + u16 pool_index, 771 + enum devlink_command cmd, 772 + u32 portid, u32 seq, int flags) 773 + { 774 + const struct devlink_ops *ops = devlink->ops; 775 + u32 threshold; 776 + void *hdr; 777 + int err; 778 + 779 + err = ops->sb_port_pool_get(devlink_port, devlink_sb->index, 780 + pool_index, &threshold); 781 + if (err) 782 + return err; 783 + 784 + hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); 785 + if (!hdr) 786 + return -EMSGSIZE; 787 + 788 + if (devlink_nl_put_handle(msg, devlink)) 789 + goto nla_put_failure; 790 + if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index)) 791 + goto nla_put_failure; 792 + if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index)) 793 + goto nla_put_failure; 794 + if (nla_put_u16(msg, DEVLINK_ATTR_SB_POOL_INDEX, pool_index)) 795 + goto nla_put_failure; 796 + if (nla_put_u32(msg, DEVLINK_ATTR_SB_THRESHOLD, threshold)) 797 + goto nla_put_failure; 798 + 799 + if (ops->sb_occ_port_pool_get) { 800 + u32 cur; 801 + u32 max; 802 + 803 + err = ops->sb_occ_port_pool_get(devlink_port, devlink_sb->index, 804 + pool_index, &cur, &max); 805 + if (err && err != -EOPNOTSUPP) 806 + return err; 807 + if (!err) { 808 + if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_CUR, cur)) 809 + goto nla_put_failure; 810 + if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_MAX, max)) 811 + goto nla_put_failure; 812 + } 813 + } 814 + 815 + genlmsg_end(msg, hdr); 816 + return 0; 817 + 818 + nla_put_failure: 819 + genlmsg_cancel(msg, hdr); 820 + return -EMSGSIZE; 821 + } 822 + 823 + static int devlink_nl_cmd_sb_port_pool_get_doit(struct sk_buff *skb, 824 + struct genl_info *info) 825 + { 826 + struct devlink_port *devlink_port = info->user_ptr[0]; 827 + struct devlink *devlink = devlink_port->devlink; 828 + struct devlink_sb *devlink_sb = info->user_ptr[1]; 829 + struct sk_buff *msg; 830 + u16 pool_index; 831 + int err; 832 + 833 + err = devlink_sb_pool_index_get_from_info(devlink_sb, info, 834 + &pool_index); 835 + if (err) 836 + return err; 837 + 838 + if (!devlink->ops || !devlink->ops->sb_port_pool_get) 839 + return -EOPNOTSUPP; 840 + 841 + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 842 + if (!msg) 843 + return -ENOMEM; 844 + 845 + err = devlink_nl_sb_port_pool_fill(msg, devlink, devlink_port, 846 + devlink_sb, pool_index, 847 + DEVLINK_CMD_SB_PORT_POOL_NEW, 848 + info->snd_portid, info->snd_seq, 0); 849 + if (err) { 850 + nlmsg_free(msg); 851 + return err; 852 + } 853 + 854 + return genlmsg_reply(msg, info); 855 + } 856 + 857 + static int __sb_port_pool_get_dumpit(struct sk_buff *msg, int start, int *p_idx, 858 + struct devlink *devlink, 859 + struct devlink_sb *devlink_sb, 860 + u32 portid, u32 seq) 861 + { 862 + struct devlink_port *devlink_port; 863 + u16 pool_count = devlink_sb_pool_count(devlink_sb); 864 + u16 pool_index; 865 + int err; 866 + 867 + list_for_each_entry(devlink_port, &devlink->port_list, list) { 868 + for (pool_index = 0; pool_index < pool_count; pool_index++) { 869 + if (*p_idx < start) { 870 + (*p_idx)++; 871 + continue; 872 + } 873 + err = devlink_nl_sb_port_pool_fill(msg, devlink, 874 + devlink_port, 875 + devlink_sb, 876 + pool_index, 877 + DEVLINK_CMD_SB_PORT_POOL_NEW, 878 + portid, seq, 879 + NLM_F_MULTI); 880 + if (err) 881 + return err; 882 + (*p_idx)++; 883 + } 884 + } 885 + return 0; 886 + } 887 + 888 + static int devlink_nl_cmd_sb_port_pool_get_dumpit(struct sk_buff *msg, 889 + struct netlink_callback *cb) 890 + { 891 + struct devlink *devlink; 892 + struct devlink_sb *devlink_sb; 893 + int start = cb->args[0]; 894 + int idx = 0; 895 + int err; 896 + 897 + mutex_lock(&devlink_mutex); 898 + mutex_lock(&devlink_port_mutex); 899 + list_for_each_entry(devlink, &devlink_list, list) { 900 + if (!net_eq(devlink_net(devlink), sock_net(msg->sk)) || 901 + !devlink->ops || !devlink->ops->sb_port_pool_get) 902 + continue; 903 + list_for_each_entry(devlink_sb, &devlink->sb_list, list) { 904 + err = __sb_port_pool_get_dumpit(msg, start, &idx, 905 + devlink, devlink_sb, 906 + NETLINK_CB(cb->skb).portid, 907 + cb->nlh->nlmsg_seq); 908 + if (err && err != -EOPNOTSUPP) 909 + goto out; 910 + } 911 + } 912 + out: 913 + mutex_unlock(&devlink_port_mutex); 914 + mutex_unlock(&devlink_mutex); 915 + 916 + cb->args[0] = idx; 917 + return msg->len; 918 + } 919 + 920 + static int devlink_sb_port_pool_set(struct devlink_port *devlink_port, 921 + unsigned int sb_index, u16 pool_index, 922 + u32 threshold) 923 + 924 + { 925 + const struct devlink_ops *ops = devlink_port->devlink->ops; 926 + 927 + if (ops && ops->sb_port_pool_set) 928 + return ops->sb_port_pool_set(devlink_port, sb_index, 929 + pool_index, threshold); 930 + return -EOPNOTSUPP; 931 + } 932 + 933 + static int devlink_nl_cmd_sb_port_pool_set_doit(struct sk_buff *skb, 934 + struct genl_info *info) 935 + { 936 + struct devlink_port *devlink_port = info->user_ptr[0]; 937 + struct devlink_sb *devlink_sb = info->user_ptr[1]; 938 + u16 pool_index; 939 + u32 threshold; 940 + int err; 941 + 942 + err = devlink_sb_pool_index_get_from_info(devlink_sb, info, 943 + &pool_index); 944 + if (err) 945 + return err; 946 + 947 + if (!info->attrs[DEVLINK_ATTR_SB_THRESHOLD]) 948 + return -EINVAL; 949 + 950 + threshold = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_THRESHOLD]); 951 + return devlink_sb_port_pool_set(devlink_port, devlink_sb->index, 952 + pool_index, threshold); 953 + } 954 + 955 + static int 956 + devlink_nl_sb_tc_pool_bind_fill(struct sk_buff *msg, struct devlink *devlink, 957 + struct devlink_port *devlink_port, 958 + struct devlink_sb *devlink_sb, u16 tc_index, 959 + enum devlink_sb_pool_type pool_type, 960 + enum devlink_command cmd, 961 + u32 portid, u32 seq, int flags) 962 + { 963 + const struct devlink_ops *ops = devlink->ops; 964 + u16 pool_index; 965 + u32 threshold; 966 + void *hdr; 967 + int err; 968 + 969 + err = ops->sb_tc_pool_bind_get(devlink_port, devlink_sb->index, 970 + tc_index, pool_type, 971 + &pool_index, &threshold); 972 + if (err) 973 + return err; 974 + 975 + hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); 976 + if (!hdr) 977 + return -EMSGSIZE; 978 + 979 + if (devlink_nl_put_handle(msg, devlink)) 980 + goto nla_put_failure; 981 + if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index)) 982 + goto nla_put_failure; 983 + if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index)) 984 + goto nla_put_failure; 985 + if (nla_put_u16(msg, DEVLINK_ATTR_SB_TC_INDEX, tc_index)) 986 + goto nla_put_failure; 987 + if (nla_put_u8(msg, DEVLINK_ATTR_SB_POOL_TYPE, pool_type)) 988 + goto nla_put_failure; 989 + if (nla_put_u16(msg, DEVLINK_ATTR_SB_POOL_INDEX, pool_index)) 990 + goto nla_put_failure; 991 + if (nla_put_u32(msg, DEVLINK_ATTR_SB_THRESHOLD, threshold)) 992 + goto nla_put_failure; 993 + 994 + if (ops->sb_occ_tc_port_bind_get) { 995 + u32 cur; 996 + u32 max; 997 + 998 + err = ops->sb_occ_tc_port_bind_get(devlink_port, 999 + devlink_sb->index, 1000 + tc_index, pool_type, 1001 + &cur, &max); 1002 + if (err && err != -EOPNOTSUPP) 1003 + return err; 1004 + if (!err) { 1005 + if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_CUR, cur)) 1006 + goto nla_put_failure; 1007 + if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_MAX, max)) 1008 + goto nla_put_failure; 1009 + } 1010 + } 1011 + 1012 + genlmsg_end(msg, hdr); 1013 + return 0; 1014 + 1015 + nla_put_failure: 1016 + genlmsg_cancel(msg, hdr); 1017 + return -EMSGSIZE; 1018 + } 1019 + 1020 + static int devlink_nl_cmd_sb_tc_pool_bind_get_doit(struct sk_buff *skb, 1021 + struct genl_info *info) 1022 + { 1023 + struct devlink_port *devlink_port = info->user_ptr[0]; 1024 + struct devlink *devlink = devlink_port->devlink; 1025 + struct devlink_sb *devlink_sb = info->user_ptr[1]; 1026 + struct sk_buff *msg; 1027 + enum devlink_sb_pool_type pool_type; 1028 + u16 tc_index; 1029 + int err; 1030 + 1031 + err = devlink_sb_pool_type_get_from_info(info, &pool_type); 1032 + if (err) 1033 + return err; 1034 + 1035 + err = devlink_sb_tc_index_get_from_info(devlink_sb, info, 1036 + pool_type, &tc_index); 1037 + if (err) 1038 + return err; 1039 + 1040 + if (!devlink->ops || !devlink->ops->sb_tc_pool_bind_get) 1041 + return -EOPNOTSUPP; 1042 + 1043 + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1044 + if (!msg) 1045 + return -ENOMEM; 1046 + 1047 + err = devlink_nl_sb_tc_pool_bind_fill(msg, devlink, devlink_port, 1048 + devlink_sb, tc_index, pool_type, 1049 + DEVLINK_CMD_SB_TC_POOL_BIND_NEW, 1050 + info->snd_portid, 1051 + info->snd_seq, 0); 1052 + if (err) { 1053 + nlmsg_free(msg); 1054 + return err; 1055 + } 1056 + 1057 + return genlmsg_reply(msg, info); 1058 + } 1059 + 1060 + static int __sb_tc_pool_bind_get_dumpit(struct sk_buff *msg, 1061 + int start, int *p_idx, 1062 + struct devlink *devlink, 1063 + struct devlink_sb *devlink_sb, 1064 + u32 portid, u32 seq) 1065 + { 1066 + struct devlink_port *devlink_port; 1067 + u16 tc_index; 1068 + int err; 1069 + 1070 + list_for_each_entry(devlink_port, &devlink->port_list, list) { 1071 + for (tc_index = 0; 1072 + tc_index < devlink_sb->ingress_tc_count; tc_index++) { 1073 + if (*p_idx < start) { 1074 + (*p_idx)++; 1075 + continue; 1076 + } 1077 + err = devlink_nl_sb_tc_pool_bind_fill(msg, devlink, 1078 + devlink_port, 1079 + devlink_sb, 1080 + tc_index, 1081 + DEVLINK_SB_POOL_TYPE_INGRESS, 1082 + DEVLINK_CMD_SB_TC_POOL_BIND_NEW, 1083 + portid, seq, 1084 + NLM_F_MULTI); 1085 + if (err) 1086 + return err; 1087 + (*p_idx)++; 1088 + } 1089 + for (tc_index = 0; 1090 + tc_index < devlink_sb->egress_tc_count; tc_index++) { 1091 + if (*p_idx < start) { 1092 + (*p_idx)++; 1093 + continue; 1094 + } 1095 + err = devlink_nl_sb_tc_pool_bind_fill(msg, devlink, 1096 + devlink_port, 1097 + devlink_sb, 1098 + tc_index, 1099 + DEVLINK_SB_POOL_TYPE_EGRESS, 1100 + DEVLINK_CMD_SB_TC_POOL_BIND_NEW, 1101 + portid, seq, 1102 + NLM_F_MULTI); 1103 + if (err) 1104 + return err; 1105 + (*p_idx)++; 1106 + } 1107 + } 1108 + return 0; 1109 + } 1110 + 1111 + static int 1112 + devlink_nl_cmd_sb_tc_pool_bind_get_dumpit(struct sk_buff *msg, 1113 + struct netlink_callback *cb) 1114 + { 1115 + struct devlink *devlink; 1116 + struct devlink_sb *devlink_sb; 1117 + int start = cb->args[0]; 1118 + int idx = 0; 1119 + int err; 1120 + 1121 + mutex_lock(&devlink_mutex); 1122 + mutex_lock(&devlink_port_mutex); 1123 + list_for_each_entry(devlink, &devlink_list, list) { 1124 + if (!net_eq(devlink_net(devlink), sock_net(msg->sk)) || 1125 + !devlink->ops || !devlink->ops->sb_tc_pool_bind_get) 1126 + continue; 1127 + list_for_each_entry(devlink_sb, &devlink->sb_list, list) { 1128 + err = __sb_tc_pool_bind_get_dumpit(msg, start, &idx, 1129 + devlink, 1130 + devlink_sb, 1131 + NETLINK_CB(cb->skb).portid, 1132 + cb->nlh->nlmsg_seq); 1133 + if (err && err != -EOPNOTSUPP) 1134 + goto out; 1135 + } 1136 + } 1137 + out: 1138 + mutex_unlock(&devlink_port_mutex); 1139 + mutex_unlock(&devlink_mutex); 1140 + 1141 + cb->args[0] = idx; 1142 + return msg->len; 1143 + } 1144 + 1145 + static int devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port, 1146 + unsigned int sb_index, u16 tc_index, 1147 + enum devlink_sb_pool_type pool_type, 1148 + u16 pool_index, u32 threshold) 1149 + 1150 + { 1151 + const struct devlink_ops *ops = devlink_port->devlink->ops; 1152 + 1153 + if (ops && ops->sb_tc_pool_bind_set) 1154 + return ops->sb_tc_pool_bind_set(devlink_port, sb_index, 1155 + tc_index, pool_type, 1156 + pool_index, threshold); 1157 + return -EOPNOTSUPP; 1158 + } 1159 + 1160 + static int devlink_nl_cmd_sb_tc_pool_bind_set_doit(struct sk_buff *skb, 1161 + struct genl_info *info) 1162 + { 1163 + struct devlink_port *devlink_port = info->user_ptr[0]; 1164 + struct devlink_sb *devlink_sb = info->user_ptr[1]; 1165 + enum devlink_sb_pool_type pool_type; 1166 + u16 tc_index; 1167 + u16 pool_index; 1168 + u32 threshold; 1169 + int err; 1170 + 1171 + err = devlink_sb_pool_type_get_from_info(info, &pool_type); 1172 + if (err) 1173 + return err; 1174 + 1175 + err = devlink_sb_tc_index_get_from_info(devlink_sb, info, 1176 + pool_type, &tc_index); 1177 + if (err) 1178 + return err; 1179 + 1180 + err = devlink_sb_pool_index_get_from_info(devlink_sb, info, 1181 + &pool_index); 1182 + if (err) 1183 + return err; 1184 + 1185 + if (!info->attrs[DEVLINK_ATTR_SB_THRESHOLD]) 1186 + return -EINVAL; 1187 + 1188 + threshold = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_THRESHOLD]); 1189 + return devlink_sb_tc_pool_bind_set(devlink_port, devlink_sb->index, 1190 + tc_index, pool_type, 1191 + pool_index, threshold); 1192 + } 1193 + 1194 + static int devlink_nl_cmd_sb_occ_snapshot_doit(struct sk_buff *skb, 1195 + struct genl_info *info) 1196 + { 1197 + struct devlink *devlink = info->user_ptr[0]; 1198 + struct devlink_sb *devlink_sb = info->user_ptr[1]; 1199 + const struct devlink_ops *ops = devlink->ops; 1200 + 1201 + if (ops && ops->sb_occ_snapshot) 1202 + return ops->sb_occ_snapshot(devlink, devlink_sb->index); 1203 + return -EOPNOTSUPP; 1204 + } 1205 + 1206 + static int devlink_nl_cmd_sb_occ_max_clear_doit(struct sk_buff *skb, 1207 + struct genl_info *info) 1208 + { 1209 + struct devlink *devlink = info->user_ptr[0]; 1210 + struct devlink_sb *devlink_sb = info->user_ptr[1]; 1211 + const struct devlink_ops *ops = devlink->ops; 1212 + 1213 + if (ops && ops->sb_occ_max_clear) 1214 + return ops->sb_occ_max_clear(devlink, devlink_sb->index); 1215 + return -EOPNOTSUPP; 1216 + } 1217 + 681 1218 static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = { 682 1219 [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING }, 683 1220 [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING }, 684 1221 [DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32 }, 685 1222 [DEVLINK_ATTR_PORT_TYPE] = { .type = NLA_U16 }, 686 1223 [DEVLINK_ATTR_PORT_SPLIT_COUNT] = { .type = NLA_U32 }, 1224 + [DEVLINK_ATTR_SB_INDEX] = { .type = NLA_U32 }, 1225 + [DEVLINK_ATTR_SB_POOL_INDEX] = { .type = NLA_U16 }, 1226 + [DEVLINK_ATTR_SB_POOL_TYPE] = { .type = NLA_U8 }, 1227 + [DEVLINK_ATTR_SB_POOL_SIZE] = { .type = NLA_U32 }, 1228 + [DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE] = { .type = NLA_U8 }, 1229 + [DEVLINK_ATTR_SB_THRESHOLD] = { .type = NLA_U32 }, 1230 + [DEVLINK_ATTR_SB_TC_INDEX] = { .type = NLA_U16 }, 687 1231 }; 688 1232 689 1233 static const struct genl_ops devlink_nl_ops[] = { ··· 1447 545 .flags = GENL_ADMIN_PERM, 1448 546 .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, 1449 547 }, 548 + { 549 + .cmd = DEVLINK_CMD_SB_GET, 550 + .doit = devlink_nl_cmd_sb_get_doit, 551 + .dumpit = devlink_nl_cmd_sb_get_dumpit, 552 + .policy = devlink_nl_policy, 553 + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK | 554 + DEVLINK_NL_FLAG_NEED_SB, 555 + /* can be retrieved by unprivileged users */ 556 + }, 557 + { 558 + .cmd = DEVLINK_CMD_SB_POOL_GET, 559 + .doit = devlink_nl_cmd_sb_pool_get_doit, 560 + .dumpit = devlink_nl_cmd_sb_pool_get_dumpit, 561 + .policy = devlink_nl_policy, 562 + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK | 563 + DEVLINK_NL_FLAG_NEED_SB, 564 + /* can be retrieved by unprivileged users */ 565 + }, 566 + { 567 + .cmd = DEVLINK_CMD_SB_POOL_SET, 568 + .doit = devlink_nl_cmd_sb_pool_set_doit, 569 + .policy = devlink_nl_policy, 570 + .flags = GENL_ADMIN_PERM, 571 + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK | 572 + DEVLINK_NL_FLAG_NEED_SB, 573 + }, 574 + { 575 + .cmd = DEVLINK_CMD_SB_PORT_POOL_GET, 576 + .doit = devlink_nl_cmd_sb_port_pool_get_doit, 577 + .dumpit = devlink_nl_cmd_sb_port_pool_get_dumpit, 578 + .policy = devlink_nl_policy, 579 + .internal_flags = DEVLINK_NL_FLAG_NEED_PORT | 580 + DEVLINK_NL_FLAG_NEED_SB, 581 + /* can be retrieved by unprivileged users */ 582 + }, 583 + { 584 + .cmd = DEVLINK_CMD_SB_PORT_POOL_SET, 585 + .doit = devlink_nl_cmd_sb_port_pool_set_doit, 586 + .policy = devlink_nl_policy, 587 + .flags = GENL_ADMIN_PERM, 588 + .internal_flags = DEVLINK_NL_FLAG_NEED_PORT | 589 + DEVLINK_NL_FLAG_NEED_SB, 590 + }, 591 + { 592 + .cmd = DEVLINK_CMD_SB_TC_POOL_BIND_GET, 593 + .doit = devlink_nl_cmd_sb_tc_pool_bind_get_doit, 594 + .dumpit = devlink_nl_cmd_sb_tc_pool_bind_get_dumpit, 595 + .policy = devlink_nl_policy, 596 + .internal_flags = DEVLINK_NL_FLAG_NEED_PORT | 597 + DEVLINK_NL_FLAG_NEED_SB, 598 + /* can be retrieved by unprivileged users */ 599 + }, 600 + { 601 + .cmd = DEVLINK_CMD_SB_TC_POOL_BIND_SET, 602 + .doit = devlink_nl_cmd_sb_tc_pool_bind_set_doit, 603 + .policy = devlink_nl_policy, 604 + .flags = GENL_ADMIN_PERM, 605 + .internal_flags = DEVLINK_NL_FLAG_NEED_PORT | 606 + DEVLINK_NL_FLAG_NEED_SB, 607 + }, 608 + { 609 + .cmd = DEVLINK_CMD_SB_OCC_SNAPSHOT, 610 + .doit = devlink_nl_cmd_sb_occ_snapshot_doit, 611 + .policy = devlink_nl_policy, 612 + .flags = GENL_ADMIN_PERM, 613 + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK | 614 + DEVLINK_NL_FLAG_NEED_SB | 615 + DEVLINK_NL_FLAG_LOCK_PORTS, 616 + }, 617 + { 618 + .cmd = DEVLINK_CMD_SB_OCC_MAX_CLEAR, 619 + .doit = devlink_nl_cmd_sb_occ_max_clear_doit, 620 + .policy = devlink_nl_policy, 621 + .flags = GENL_ADMIN_PERM, 622 + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK | 623 + DEVLINK_NL_FLAG_NEED_SB | 624 + DEVLINK_NL_FLAG_LOCK_PORTS, 625 + }, 1450 626 }; 1451 627 1452 628 /** ··· 1546 566 devlink->ops = ops; 1547 567 devlink_net_set(devlink, &init_net); 1548 568 INIT_LIST_HEAD(&devlink->port_list); 569 + INIT_LIST_HEAD(&devlink->sb_list); 1549 570 return devlink; 1550 571 } 1551 572 EXPORT_SYMBOL_GPL(devlink_alloc); ··· 1701 720 devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW); 1702 721 } 1703 722 EXPORT_SYMBOL_GPL(devlink_port_split_set); 723 + 724 + int devlink_sb_register(struct devlink *devlink, unsigned int sb_index, 725 + u32 size, u16 ingress_pools_count, 726 + u16 egress_pools_count, u16 ingress_tc_count, 727 + u16 egress_tc_count) 728 + { 729 + struct devlink_sb *devlink_sb; 730 + int err = 0; 731 + 732 + mutex_lock(&devlink_mutex); 733 + if (devlink_sb_index_exists(devlink, sb_index)) { 734 + err = -EEXIST; 735 + goto unlock; 736 + } 737 + 738 + devlink_sb = kzalloc(sizeof(*devlink_sb), GFP_KERNEL); 739 + if (!devlink_sb) { 740 + err = -ENOMEM; 741 + goto unlock; 742 + } 743 + devlink_sb->index = sb_index; 744 + devlink_sb->size = size; 745 + devlink_sb->ingress_pools_count = ingress_pools_count; 746 + devlink_sb->egress_pools_count = egress_pools_count; 747 + devlink_sb->ingress_tc_count = ingress_tc_count; 748 + devlink_sb->egress_tc_count = egress_tc_count; 749 + list_add_tail(&devlink_sb->list, &devlink->sb_list); 750 + unlock: 751 + mutex_unlock(&devlink_mutex); 752 + return err; 753 + } 754 + EXPORT_SYMBOL_GPL(devlink_sb_register); 755 + 756 + void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index) 757 + { 758 + struct devlink_sb *devlink_sb; 759 + 760 + mutex_lock(&devlink_mutex); 761 + devlink_sb = devlink_sb_get_by_index(devlink, sb_index); 762 + WARN_ON(!devlink_sb); 763 + list_del(&devlink_sb->list); 764 + mutex_unlock(&devlink_mutex); 765 + kfree(devlink_sb); 766 + } 767 + EXPORT_SYMBOL_GPL(devlink_sb_unregister); 1704 768 1705 769 static int __init devlink_module_init(void) 1706 770 {