Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'cxgb4-add-TC-MATCHALL-classifier-offload'

Rahul Lakkireddy says:

====================
cxgb4: add TC-MATCHALL classifier offload

This series of patches add support to offload TC-MATCHALL classifier
to hardware to classify all outgoing and incoming traffic on the
underlying port. Only 1 egress and 1 ingress rule each can be
offloaded on the underlying port.

Patch 1 adds support for TC-MATCHALL classifier offload on the egress
side. TC-POLICE is the only action that can be offloaded on the egress
side and is used to rate limit all outgoing traffic to specified max
rate.

Patch 2 adds logic to reject the current rule offload if its priority
conflicts with existing rules in the TCAM.

Patch 3 adds support for TC-MATCHALL classifier offload on the ingress
side. The same set of actions supported by existing TC-FLOWER
classifier offload can be applied on all the incoming traffic.

v5:
- Fixed commit message and comment to include comparison for equal
priority in patch 2.

v4:
- Removed check in patch 1 to reject police offload if prio is not 1.
- Moved TC_SETUP_BLOCK code to separate function in patch 1.
- Added logic to ensure the prio passed by TC doesn't conflict with
other rules in TCAM in patch 2.
- Higher index has lower priority than lower index in TCAM. So, rework
cxgb4_get_free_ftid() to search free index from end of TCAM in
descending order in patch 2.
- Added check to ensure the matchall rule's prio doesn't conflict with
other rules in TCAM in patch 3.
- Added logic to fill default mask for VIID, if none has been
provided, to prevent conflict with duplicate VIID rules in patch 3.
- Used existing variables in private structure to fill VIID info,
instead of extracting the info manually in patch 3.

v3:
- Added check in patch 1 to reject police offload if prio is not 1.
- Assign block_shared variable only for TC_SETUP_BLOCK in patch 1.

v2:
- Added check to reject flow block sharing for policers in patch 1.
- Removed logic to fetch free index from end of TCAM in patch 2.
Must maintain the same ordering as in kernel.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+710 -90
+2 -1
drivers/net/ethernet/chelsio/cxgb4/Makefile
··· 8 8 cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o \ 9 9 cxgb4_uld.o srq.o sched.o cxgb4_filter.o cxgb4_tc_u32.o \ 10 10 cxgb4_ptp.o cxgb4_tc_flower.o cxgb4_cudbg.o cxgb4_mps.o \ 11 - cudbg_common.o cudbg_lib.o cudbg_zlib.o cxgb4_tc_mqprio.o 11 + cudbg_common.o cudbg_lib.o cudbg_zlib.o cxgb4_tc_mqprio.o \ 12 + cxgb4_tc_matchall.o 12 13 cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o 13 14 cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o 14 15 cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
+10 -1
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
··· 603 603 u8 vivld; 604 604 u8 smt_idx; 605 605 u8 rx_cchan; 606 + 607 + bool tc_block_shared; 606 608 }; 607 609 608 610 struct dentry; ··· 1103 1101 1104 1102 /* TC MQPRIO offload */ 1105 1103 struct cxgb4_tc_mqprio *tc_mqprio; 1104 + 1105 + /* TC MATCHALL classifier offload */ 1106 + struct cxgb4_tc_matchall *tc_matchall; 1106 1107 }; 1107 1108 1108 1109 /* Support for "sched-class" command to allow a TX Scheduling Class to be ··· 1135 1130 1136 1131 enum { 1137 1132 SCHED_CLASS_LEVEL_CL_RL = 0, /* class rate limiter */ 1133 + SCHED_CLASS_LEVEL_CH_RL = 2, /* channel rate limiter */ 1138 1134 }; 1139 1135 1140 1136 enum { ··· 1286 1280 u16 nat_lport; /* local port to use after NAT'ing */ 1287 1281 u16 nat_fport; /* foreign port to use after NAT'ing */ 1288 1282 1283 + u32 tc_prio; /* TC's filter priority index */ 1284 + u64 tc_cookie; /* Unique cookie identifying TC rules */ 1285 + 1289 1286 /* reservation for future additions */ 1290 - u8 rsvd[24]; 1287 + u8 rsvd[12]; 1291 1288 1292 1289 /* Filter rule value/mask pairs. 1293 1290 */
+98 -26
drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
··· 440 440 { 441 441 struct adapter *adap = netdev2adap(dev); 442 442 struct tid_info *t = &adap->tids; 443 + bool found = false; 444 + u8 i, n, cnt; 443 445 int ftid; 444 446 445 - spin_lock_bh(&t->ftid_lock); 446 - if (family == PF_INET) { 447 - ftid = find_first_zero_bit(t->ftid_bmap, t->nftids); 448 - if (ftid >= t->nftids) 449 - ftid = -1; 450 - } else { 451 - if (is_t6(adap->params.chip)) { 452 - ftid = bitmap_find_free_region(t->ftid_bmap, 453 - t->nftids, 1); 454 - if (ftid < 0) 455 - goto out_unlock; 456 - 457 - /* this is only a lookup, keep the found region 458 - * unallocated 459 - */ 460 - bitmap_release_region(t->ftid_bmap, ftid, 1); 461 - } else { 462 - ftid = bitmap_find_free_region(t->ftid_bmap, 463 - t->nftids, 2); 464 - if (ftid < 0) 465 - goto out_unlock; 466 - 467 - bitmap_release_region(t->ftid_bmap, ftid, 2); 468 - } 447 + /* IPv4 occupy 1 slot. IPv6 occupy 2 slots on T6 and 4 slots 448 + * on T5. 449 + */ 450 + n = 1; 451 + if (family == PF_INET6) { 452 + n++; 453 + if (CHELSIO_CHIP_VERSION(adap->params.chip) < CHELSIO_T6) 454 + n += 2; 469 455 } 470 - out_unlock: 456 + 457 + if (n > t->nftids) 458 + return -ENOMEM; 459 + 460 + /* Find free filter slots from the end of TCAM. Appropriate 461 + * checks must be done by caller later to ensure the prio 462 + * passed by TC doesn't conflict with prio saved by existing 463 + * rules in the TCAM. 464 + */ 465 + spin_lock_bh(&t->ftid_lock); 466 + ftid = t->nftids - 1; 467 + while (ftid >= n - 1) { 468 + cnt = 0; 469 + for (i = 0; i < n; i++) { 470 + if (test_bit(ftid - i, t->ftid_bmap)) 471 + break; 472 + cnt++; 473 + } 474 + if (cnt == n) { 475 + ftid &= ~(n - 1); 476 + found = true; 477 + break; 478 + } 479 + 480 + ftid -= n; 481 + } 471 482 spin_unlock_bh(&t->ftid_lock); 472 - return ftid; 483 + 484 + return found ? ftid : -ENOMEM; 473 485 } 474 486 475 487 static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family, ··· 520 508 bitmap_release_region(t->ftid_bmap, fidx, 1); 521 509 } 522 510 spin_unlock_bh(&t->ftid_lock); 511 + } 512 + 513 + bool cxgb4_filter_prio_in_range(struct net_device *dev, u32 idx, u32 prio) 514 + { 515 + struct adapter *adap = netdev2adap(dev); 516 + struct filter_entry *prev_fe, *next_fe; 517 + struct tid_info *t = &adap->tids; 518 + u32 prev_ftid, next_ftid; 519 + bool valid = true; 520 + 521 + /* Only insert the rule if both of the following conditions 522 + * are met: 523 + * 1. The immediate previous rule has priority <= @prio. 524 + * 2. The immediate next rule has priority >= @prio. 525 + */ 526 + spin_lock_bh(&t->ftid_lock); 527 + /* Don't insert if there's a rule already present at @idx. */ 528 + if (test_bit(idx, t->ftid_bmap)) { 529 + valid = false; 530 + goto out_unlock; 531 + } 532 + 533 + next_ftid = find_next_bit(t->ftid_bmap, t->nftids, idx); 534 + if (next_ftid >= t->nftids) 535 + next_ftid = idx; 536 + 537 + next_fe = &adap->tids.ftid_tab[next_ftid]; 538 + 539 + prev_ftid = find_last_bit(t->ftid_bmap, idx); 540 + if (prev_ftid >= idx) 541 + prev_ftid = idx; 542 + 543 + /* See if the filter entry belongs to an IPv6 rule, which 544 + * occupy 4 slots on T5 and 2 slots on T6. Adjust the 545 + * reference to the previously inserted filter entry 546 + * accordingly. 547 + */ 548 + if (CHELSIO_CHIP_VERSION(adap->params.chip) < CHELSIO_T6) { 549 + prev_fe = &adap->tids.ftid_tab[prev_ftid & ~0x3]; 550 + if (!prev_fe->fs.type) 551 + prev_fe = &adap->tids.ftid_tab[prev_ftid]; 552 + } else { 553 + prev_fe = &adap->tids.ftid_tab[prev_ftid & ~0x1]; 554 + if (!prev_fe->fs.type) 555 + prev_fe = &adap->tids.ftid_tab[prev_ftid]; 556 + } 557 + 558 + if ((prev_fe->valid && prio < prev_fe->fs.tc_prio) || 559 + (next_fe->valid && prio > next_fe->fs.tc_prio)) 560 + valid = false; 561 + 562 + out_unlock: 563 + spin_unlock_bh(&t->ftid_lock); 564 + return valid; 523 565 } 524 566 525 567 /* Delete the filter at a specified index. */ ··· 872 806 fs->mask.tos |= ~0; 873 807 if (fs->val.proto && !fs->mask.proto) 874 808 fs->mask.proto |= ~0; 809 + if (fs->val.pfvf_vld && !fs->mask.pfvf_vld) 810 + fs->mask.pfvf_vld |= ~0; 811 + if (fs->val.pf && !fs->mask.pf) 812 + fs->mask.pf |= ~0; 813 + if (fs->val.vf && !fs->mask.vf) 814 + fs->mask.vf |= ~0; 875 815 876 816 for (i = 0; i < ARRAY_SIZE(fs->val.lip); i++) { 877 817 lip |= fs->val.lip[i];
+1
drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h
··· 53 53 void init_hash_filter(struct adapter *adap); 54 54 bool is_filter_exact_match(struct adapter *adap, 55 55 struct ch_filter_specification *fs); 56 + bool cxgb4_filter_prio_in_range(struct net_device *dev, u32 idx, u32 prio); 56 57 #endif /* __CXGB4_FILTER_H */
+83 -8
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
··· 84 84 #include "cxgb4_tc_u32.h" 85 85 #include "cxgb4_tc_flower.h" 86 86 #include "cxgb4_tc_mqprio.h" 87 + #include "cxgb4_tc_matchall.h" 87 88 #include "cxgb4_ptp.h" 88 89 #include "cxgb4_cudbg.h" 89 90 ··· 3235 3234 } 3236 3235 } 3237 3236 3238 - static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 3239 - void *cb_priv) 3237 + static int cxgb_setup_tc_matchall(struct net_device *dev, 3238 + struct tc_cls_matchall_offload *cls_matchall, 3239 + bool ingress) 3240 + { 3241 + struct adapter *adap = netdev2adap(dev); 3242 + 3243 + if (!adap->tc_matchall) 3244 + return -ENOMEM; 3245 + 3246 + switch (cls_matchall->command) { 3247 + case TC_CLSMATCHALL_REPLACE: 3248 + return cxgb4_tc_matchall_replace(dev, cls_matchall, ingress); 3249 + case TC_CLSMATCHALL_DESTROY: 3250 + return cxgb4_tc_matchall_destroy(dev, cls_matchall, ingress); 3251 + case TC_CLSMATCHALL_STATS: 3252 + if (ingress) 3253 + return cxgb4_tc_matchall_stats(dev, cls_matchall); 3254 + break; 3255 + default: 3256 + break; 3257 + } 3258 + 3259 + return -EOPNOTSUPP; 3260 + } 3261 + 3262 + static int cxgb_setup_tc_block_ingress_cb(enum tc_setup_type type, 3263 + void *type_data, void *cb_priv) 3240 3264 { 3241 3265 struct net_device *dev = cb_priv; 3242 3266 struct port_info *pi = netdev2pinfo(dev); ··· 3282 3256 return cxgb_setup_tc_cls_u32(dev, type_data); 3283 3257 case TC_SETUP_CLSFLOWER: 3284 3258 return cxgb_setup_tc_flower(dev, type_data); 3259 + case TC_SETUP_CLSMATCHALL: 3260 + return cxgb_setup_tc_matchall(dev, type_data, true); 3285 3261 default: 3286 3262 return -EOPNOTSUPP; 3287 3263 } 3264 + } 3265 + 3266 + static int cxgb_setup_tc_block_egress_cb(enum tc_setup_type type, 3267 + void *type_data, void *cb_priv) 3268 + { 3269 + struct net_device *dev = cb_priv; 3270 + struct port_info *pi = netdev2pinfo(dev); 3271 + struct adapter *adap = netdev2adap(dev); 3272 + 3273 + if (!(adap->flags & CXGB4_FULL_INIT_DONE)) { 3274 + dev_err(adap->pdev_dev, 3275 + "Failed to setup tc on port %d. Link Down?\n", 3276 + pi->port_id); 3277 + return -EINVAL; 3278 + } 3279 + 3280 + if (!tc_cls_can_offload_and_chain0(dev, type_data)) 3281 + return -EOPNOTSUPP; 3282 + 3283 + switch (type) { 3284 + case TC_SETUP_CLSMATCHALL: 3285 + return cxgb_setup_tc_matchall(dev, type_data, false); 3286 + default: 3287 + break; 3288 + } 3289 + 3290 + return -EOPNOTSUPP; 3288 3291 } 3289 3292 3290 3293 static int cxgb_setup_tc_mqprio(struct net_device *dev, ··· 3329 3274 3330 3275 static LIST_HEAD(cxgb_block_cb_list); 3331 3276 3277 + static int cxgb_setup_tc_block(struct net_device *dev, 3278 + struct flow_block_offload *f) 3279 + { 3280 + struct port_info *pi = netdev_priv(dev); 3281 + flow_setup_cb_t *cb; 3282 + bool ingress_only; 3283 + 3284 + pi->tc_block_shared = f->block_shared; 3285 + if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 3286 + cb = cxgb_setup_tc_block_egress_cb; 3287 + ingress_only = false; 3288 + } else { 3289 + cb = cxgb_setup_tc_block_ingress_cb; 3290 + ingress_only = true; 3291 + } 3292 + 3293 + return flow_block_cb_setup_simple(f, &cxgb_block_cb_list, 3294 + cb, pi, dev, ingress_only); 3295 + } 3296 + 3332 3297 static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type, 3333 3298 void *type_data) 3334 3299 { 3335 - struct port_info *pi = netdev2pinfo(dev); 3336 - 3337 3300 switch (type) { 3338 3301 case TC_SETUP_QDISC_MQPRIO: 3339 3302 return cxgb_setup_tc_mqprio(dev, type_data); 3340 3303 case TC_SETUP_BLOCK: 3341 - return flow_block_cb_setup_simple(type_data, 3342 - &cxgb_block_cb_list, 3343 - cxgb_setup_tc_block_cb, 3344 - pi, dev, true); 3304 + return cxgb_setup_tc_block(dev, type_data); 3345 3305 default: 3346 3306 return -EOPNOTSUPP; 3347 3307 } ··· 5811 5741 kvfree(adapter->srq); 5812 5742 t4_cleanup_sched(adapter); 5813 5743 kvfree(adapter->tids.tid_tab); 5744 + cxgb4_cleanup_tc_matchall(adapter); 5814 5745 cxgb4_cleanup_tc_mqprio(adapter); 5815 5746 cxgb4_cleanup_tc_flower(adapter); 5816 5747 cxgb4_cleanup_tc_u32(adapter); ··· 6386 6315 if (cxgb4_init_tc_mqprio(adapter)) 6387 6316 dev_warn(&pdev->dev, 6388 6317 "could not offload tc mqprio, continuing\n"); 6318 + 6319 + if (cxgb4_init_tc_matchall(adapter)) 6320 + dev_warn(&pdev->dev, 6321 + "could not offload tc matchall, continuing\n"); 6389 6322 } 6390 6323 6391 6324 if (is_offload(adapter) || is_hashfilter(adapter)) {
+36 -16
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
··· 378 378 } 379 379 } 380 380 381 - static void cxgb4_process_flow_actions(struct net_device *in, 382 - struct flow_cls_offload *cls, 383 - struct ch_filter_specification *fs) 381 + void cxgb4_process_flow_actions(struct net_device *in, 382 + struct flow_action *actions, 383 + struct ch_filter_specification *fs) 384 384 { 385 - struct flow_rule *rule = flow_cls_offload_flow_rule(cls); 386 385 struct flow_action_entry *act; 387 386 int i; 388 387 389 - flow_action_for_each(i, act, &rule->action) { 388 + flow_action_for_each(i, act, actions) { 390 389 switch (act->id) { 391 390 case FLOW_ACTION_ACCEPT: 392 391 fs->action = FILTER_PASS; ··· 543 544 return true; 544 545 } 545 546 546 - static int cxgb4_validate_flow_actions(struct net_device *dev, 547 - struct flow_cls_offload *cls) 547 + int cxgb4_validate_flow_actions(struct net_device *dev, 548 + struct flow_action *actions) 548 549 { 549 - struct flow_rule *rule = flow_cls_offload_flow_rule(cls); 550 550 struct flow_action_entry *act; 551 551 bool act_redir = false; 552 552 bool act_pedit = false; 553 553 bool act_vlan = false; 554 554 int i; 555 555 556 - flow_action_for_each(i, act, &rule->action) { 556 + flow_action_for_each(i, act, actions) { 557 557 switch (act->id) { 558 558 case FLOW_ACTION_ACCEPT: 559 559 case FLOW_ACTION_DROP: ··· 634 636 int cxgb4_tc_flower_replace(struct net_device *dev, 635 637 struct flow_cls_offload *cls) 636 638 { 639 + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); 640 + struct netlink_ext_ack *extack = cls->common.extack; 637 641 struct adapter *adap = netdev2adap(dev); 638 642 struct ch_tc_flower_entry *ch_flower; 639 643 struct ch_filter_specification *fs; 640 644 struct filter_ctx ctx; 641 - int fidx; 642 - int ret; 645 + int fidx, ret; 643 646 644 - if (cxgb4_validate_flow_actions(dev, cls)) 647 + if (cxgb4_validate_flow_actions(dev, &rule->action)) 645 648 return -EOPNOTSUPP; 646 649 647 650 if (cxgb4_validate_flow_match(dev, cls)) ··· 657 658 fs = &ch_flower->fs; 658 659 fs->hitcnts = 1; 659 660 cxgb4_process_flow_match(dev, cls, fs); 660 - cxgb4_process_flow_actions(dev, cls, fs); 661 + cxgb4_process_flow_actions(dev, &rule->action, fs); 661 662 662 663 fs->hash = is_filter_exact_match(adap, fs); 663 664 if (fs->hash) { 664 665 fidx = 0; 665 666 } else { 666 - fidx = cxgb4_get_free_ftid(dev, fs->type ? PF_INET6 : PF_INET); 667 - if (fidx < 0) { 668 - netdev_err(dev, "%s: No fidx for offload.\n", __func__); 667 + u8 inet_family; 668 + 669 + inet_family = fs->type ? PF_INET6 : PF_INET; 670 + 671 + /* Note that TC uses prio 0 to indicate stack to 672 + * generate automatic prio and hence doesn't pass prio 673 + * 0 to driver. However, the hardware TCAM index 674 + * starts from 0. Hence, the -1 here. 675 + */ 676 + if (cls->common.prio <= adap->tids.nftids) 677 + fidx = cls->common.prio - 1; 678 + else 679 + fidx = cxgb4_get_free_ftid(dev, inet_family); 680 + 681 + /* Only insert FLOWER rule if its priority doesn't 682 + * conflict with existing rules in the LETCAM. 683 + */ 684 + if (fidx < 0 || 685 + !cxgb4_filter_prio_in_range(dev, fidx, cls->common.prio)) { 686 + NL_SET_ERR_MSG_MOD(extack, 687 + "No free LETCAM index available"); 669 688 ret = -ENOMEM; 670 689 goto free_entry; 671 690 } 672 691 } 692 + 693 + fs->tc_prio = cls->common.prio; 694 + fs->tc_cookie = cls->cookie; 673 695 674 696 init_completion(&ctx.completion); 675 697 ret = __cxgb4_set_filter(dev, fidx, fs, &ctx);
+6
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
··· 108 108 #define PEDIT_TCP_SPORT_DPORT 0x0 109 109 #define PEDIT_UDP_SPORT_DPORT 0x0 110 110 111 + void cxgb4_process_flow_actions(struct net_device *in, 112 + struct flow_action *actions, 113 + struct ch_filter_specification *fs); 114 + int cxgb4_validate_flow_actions(struct net_device *dev, 115 + struct flow_action *actions); 116 + 111 117 int cxgb4_tc_flower_replace(struct net_device *dev, 112 118 struct flow_cls_offload *cls); 113 119 int cxgb4_tc_flower_destroy(struct net_device *dev,
+354
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* Copyright (C) 2019 Chelsio Communications. All rights reserved. */ 3 + 4 + #include "cxgb4.h" 5 + #include "cxgb4_tc_matchall.h" 6 + #include "sched.h" 7 + #include "cxgb4_uld.h" 8 + #include "cxgb4_filter.h" 9 + #include "cxgb4_tc_flower.h" 10 + 11 + static int cxgb4_matchall_egress_validate(struct net_device *dev, 12 + struct tc_cls_matchall_offload *cls) 13 + { 14 + struct netlink_ext_ack *extack = cls->common.extack; 15 + struct flow_action *actions = &cls->rule->action; 16 + struct port_info *pi = netdev2pinfo(dev); 17 + struct flow_action_entry *entry; 18 + u64 max_link_rate; 19 + u32 i, speed; 20 + int ret; 21 + 22 + if (!flow_action_has_entries(actions)) { 23 + NL_SET_ERR_MSG_MOD(extack, 24 + "Egress MATCHALL offload needs at least 1 policing action"); 25 + return -EINVAL; 26 + } else if (!flow_offload_has_one_action(actions)) { 27 + NL_SET_ERR_MSG_MOD(extack, 28 + "Egress MATCHALL offload only supports 1 policing action"); 29 + return -EINVAL; 30 + } else if (pi->tc_block_shared) { 31 + NL_SET_ERR_MSG_MOD(extack, 32 + "Egress MATCHALL offload not supported with shared blocks"); 33 + return -EINVAL; 34 + } 35 + 36 + ret = t4_get_link_params(pi, NULL, &speed, NULL); 37 + if (ret) { 38 + NL_SET_ERR_MSG_MOD(extack, 39 + "Failed to get max speed supported by the link"); 40 + return -EINVAL; 41 + } 42 + 43 + /* Convert from Mbps to bps */ 44 + max_link_rate = (u64)speed * 1000 * 1000; 45 + 46 + flow_action_for_each(i, entry, actions) { 47 + switch (entry->id) { 48 + case FLOW_ACTION_POLICE: 49 + /* Convert bytes per second to bits per second */ 50 + if (entry->police.rate_bytes_ps * 8 > max_link_rate) { 51 + NL_SET_ERR_MSG_MOD(extack, 52 + "Specified policing max rate is larger than underlying link speed"); 53 + return -ERANGE; 54 + } 55 + break; 56 + default: 57 + NL_SET_ERR_MSG_MOD(extack, 58 + "Only policing action supported with Egress MATCHALL offload"); 59 + return -EOPNOTSUPP; 60 + } 61 + } 62 + 63 + return 0; 64 + } 65 + 66 + static int cxgb4_matchall_alloc_tc(struct net_device *dev, 67 + struct tc_cls_matchall_offload *cls) 68 + { 69 + struct ch_sched_params p = { 70 + .type = SCHED_CLASS_TYPE_PACKET, 71 + .u.params.level = SCHED_CLASS_LEVEL_CH_RL, 72 + .u.params.mode = SCHED_CLASS_MODE_CLASS, 73 + .u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS, 74 + .u.params.ratemode = SCHED_CLASS_RATEMODE_ABS, 75 + .u.params.class = SCHED_CLS_NONE, 76 + .u.params.minrate = 0, 77 + .u.params.weight = 0, 78 + .u.params.pktsize = dev->mtu, 79 + }; 80 + struct netlink_ext_ack *extack = cls->common.extack; 81 + struct cxgb4_tc_port_matchall *tc_port_matchall; 82 + struct port_info *pi = netdev2pinfo(dev); 83 + struct adapter *adap = netdev2adap(dev); 84 + struct flow_action_entry *entry; 85 + struct sched_class *e; 86 + u32 i; 87 + 88 + tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; 89 + 90 + flow_action_for_each(i, entry, &cls->rule->action) 91 + if (entry->id == FLOW_ACTION_POLICE) 92 + break; 93 + 94 + /* Convert from bytes per second to Kbps */ 95 + p.u.params.maxrate = div_u64(entry->police.rate_bytes_ps * 8, 1000); 96 + p.u.params.channel = pi->tx_chan; 97 + e = cxgb4_sched_class_alloc(dev, &p); 98 + if (!e) { 99 + NL_SET_ERR_MSG_MOD(extack, 100 + "No free traffic class available for policing action"); 101 + return -ENOMEM; 102 + } 103 + 104 + tc_port_matchall->egress.hwtc = e->idx; 105 + tc_port_matchall->egress.cookie = cls->cookie; 106 + tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_ENABLED; 107 + return 0; 108 + } 109 + 110 + static void cxgb4_matchall_free_tc(struct net_device *dev) 111 + { 112 + struct cxgb4_tc_port_matchall *tc_port_matchall; 113 + struct port_info *pi = netdev2pinfo(dev); 114 + struct adapter *adap = netdev2adap(dev); 115 + 116 + tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; 117 + cxgb4_sched_class_free(dev, tc_port_matchall->egress.hwtc); 118 + 119 + tc_port_matchall->egress.hwtc = SCHED_CLS_NONE; 120 + tc_port_matchall->egress.cookie = 0; 121 + tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_DISABLED; 122 + } 123 + 124 + static int cxgb4_matchall_alloc_filter(struct net_device *dev, 125 + struct tc_cls_matchall_offload *cls) 126 + { 127 + struct netlink_ext_ack *extack = cls->common.extack; 128 + struct cxgb4_tc_port_matchall *tc_port_matchall; 129 + struct port_info *pi = netdev2pinfo(dev); 130 + struct adapter *adap = netdev2adap(dev); 131 + struct ch_filter_specification *fs; 132 + int ret, fidx; 133 + 134 + /* Note that TC uses prio 0 to indicate stack to generate 135 + * automatic prio and hence doesn't pass prio 0 to driver. 136 + * However, the hardware TCAM index starts from 0. Hence, the 137 + * -1 here. 1 slot is enough to create a wildcard matchall 138 + * VIID rule. 139 + */ 140 + if (cls->common.prio <= adap->tids.nftids) 141 + fidx = cls->common.prio - 1; 142 + else 143 + fidx = cxgb4_get_free_ftid(dev, PF_INET); 144 + 145 + /* Only insert MATCHALL rule if its priority doesn't conflict 146 + * with existing rules in the LETCAM. 147 + */ 148 + if (fidx < 0 || 149 + !cxgb4_filter_prio_in_range(dev, fidx, cls->common.prio)) { 150 + NL_SET_ERR_MSG_MOD(extack, 151 + "No free LETCAM index available"); 152 + return -ENOMEM; 153 + } 154 + 155 + tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; 156 + fs = &tc_port_matchall->ingress.fs; 157 + memset(fs, 0, sizeof(*fs)); 158 + 159 + fs->tc_prio = cls->common.prio; 160 + fs->tc_cookie = cls->cookie; 161 + fs->hitcnts = 1; 162 + 163 + fs->val.pfvf_vld = 1; 164 + fs->val.pf = adap->pf; 165 + fs->val.vf = pi->vin; 166 + 167 + cxgb4_process_flow_actions(dev, &cls->rule->action, fs); 168 + 169 + ret = cxgb4_set_filter(dev, fidx, fs); 170 + if (ret) 171 + return ret; 172 + 173 + tc_port_matchall->ingress.tid = fidx; 174 + tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_ENABLED; 175 + return 0; 176 + } 177 + 178 + static int cxgb4_matchall_free_filter(struct net_device *dev) 179 + { 180 + struct cxgb4_tc_port_matchall *tc_port_matchall; 181 + struct port_info *pi = netdev2pinfo(dev); 182 + struct adapter *adap = netdev2adap(dev); 183 + int ret; 184 + 185 + tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; 186 + 187 + ret = cxgb4_del_filter(dev, tc_port_matchall->ingress.tid, 188 + &tc_port_matchall->ingress.fs); 189 + if (ret) 190 + return ret; 191 + 192 + tc_port_matchall->ingress.packets = 0; 193 + tc_port_matchall->ingress.bytes = 0; 194 + tc_port_matchall->ingress.last_used = 0; 195 + tc_port_matchall->ingress.tid = 0; 196 + tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_DISABLED; 197 + return 0; 198 + } 199 + 200 + int cxgb4_tc_matchall_replace(struct net_device *dev, 201 + struct tc_cls_matchall_offload *cls_matchall, 202 + bool ingress) 203 + { 204 + struct netlink_ext_ack *extack = cls_matchall->common.extack; 205 + struct cxgb4_tc_port_matchall *tc_port_matchall; 206 + struct port_info *pi = netdev2pinfo(dev); 207 + struct adapter *adap = netdev2adap(dev); 208 + int ret; 209 + 210 + tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; 211 + if (ingress) { 212 + if (tc_port_matchall->ingress.state == 213 + CXGB4_MATCHALL_STATE_ENABLED) { 214 + NL_SET_ERR_MSG_MOD(extack, 215 + "Only 1 Ingress MATCHALL can be offloaded"); 216 + return -ENOMEM; 217 + } 218 + 219 + ret = cxgb4_validate_flow_actions(dev, 220 + &cls_matchall->rule->action); 221 + if (ret) 222 + return ret; 223 + 224 + return cxgb4_matchall_alloc_filter(dev, cls_matchall); 225 + } 226 + 227 + if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED) { 228 + NL_SET_ERR_MSG_MOD(extack, 229 + "Only 1 Egress MATCHALL can be offloaded"); 230 + return -ENOMEM; 231 + } 232 + 233 + ret = cxgb4_matchall_egress_validate(dev, cls_matchall); 234 + if (ret) 235 + return ret; 236 + 237 + return cxgb4_matchall_alloc_tc(dev, cls_matchall); 238 + } 239 + 240 + int cxgb4_tc_matchall_destroy(struct net_device *dev, 241 + struct tc_cls_matchall_offload *cls_matchall, 242 + bool ingress) 243 + { 244 + struct cxgb4_tc_port_matchall *tc_port_matchall; 245 + struct port_info *pi = netdev2pinfo(dev); 246 + struct adapter *adap = netdev2adap(dev); 247 + 248 + tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; 249 + if (ingress) { 250 + if (cls_matchall->cookie != 251 + tc_port_matchall->ingress.fs.tc_cookie) 252 + return -ENOENT; 253 + 254 + return cxgb4_matchall_free_filter(dev); 255 + } 256 + 257 + if (cls_matchall->cookie != tc_port_matchall->egress.cookie) 258 + return -ENOENT; 259 + 260 + cxgb4_matchall_free_tc(dev); 261 + return 0; 262 + } 263 + 264 + int cxgb4_tc_matchall_stats(struct net_device *dev, 265 + struct tc_cls_matchall_offload *cls_matchall) 266 + { 267 + struct cxgb4_tc_port_matchall *tc_port_matchall; 268 + struct port_info *pi = netdev2pinfo(dev); 269 + struct adapter *adap = netdev2adap(dev); 270 + u64 packets, bytes; 271 + int ret; 272 + 273 + tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; 274 + if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_DISABLED) 275 + return -ENOENT; 276 + 277 + ret = cxgb4_get_filter_counters(dev, tc_port_matchall->ingress.tid, 278 + &packets, &bytes, 279 + tc_port_matchall->ingress.fs.hash); 280 + if (ret) 281 + return ret; 282 + 283 + if (tc_port_matchall->ingress.packets != packets) { 284 + flow_stats_update(&cls_matchall->stats, 285 + bytes - tc_port_matchall->ingress.bytes, 286 + packets - tc_port_matchall->ingress.packets, 287 + tc_port_matchall->ingress.last_used); 288 + 289 + tc_port_matchall->ingress.packets = packets; 290 + tc_port_matchall->ingress.bytes = bytes; 291 + tc_port_matchall->ingress.last_used = jiffies; 292 + } 293 + 294 + return 0; 295 + } 296 + 297 + static void cxgb4_matchall_disable_offload(struct net_device *dev) 298 + { 299 + struct cxgb4_tc_port_matchall *tc_port_matchall; 300 + struct port_info *pi = netdev2pinfo(dev); 301 + struct adapter *adap = netdev2adap(dev); 302 + 303 + tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; 304 + if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED) 305 + cxgb4_matchall_free_tc(dev); 306 + 307 + if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_ENABLED) 308 + cxgb4_matchall_free_filter(dev); 309 + } 310 + 311 + int cxgb4_init_tc_matchall(struct adapter *adap) 312 + { 313 + struct cxgb4_tc_port_matchall *tc_port_matchall; 314 + struct cxgb4_tc_matchall *tc_matchall; 315 + int ret; 316 + 317 + tc_matchall = kzalloc(sizeof(*tc_matchall), GFP_KERNEL); 318 + if (!tc_matchall) 319 + return -ENOMEM; 320 + 321 + tc_port_matchall = kcalloc(adap->params.nports, 322 + sizeof(*tc_port_matchall), 323 + GFP_KERNEL); 324 + if (!tc_port_matchall) { 325 + ret = -ENOMEM; 326 + goto out_free_matchall; 327 + } 328 + 329 + tc_matchall->port_matchall = tc_port_matchall; 330 + adap->tc_matchall = tc_matchall; 331 + return 0; 332 + 333 + out_free_matchall: 334 + kfree(tc_matchall); 335 + return ret; 336 + } 337 + 338 + void cxgb4_cleanup_tc_matchall(struct adapter *adap) 339 + { 340 + u8 i; 341 + 342 + if (adap->tc_matchall) { 343 + if (adap->tc_matchall->port_matchall) { 344 + for (i = 0; i < adap->params.nports; i++) { 345 + struct net_device *dev = adap->port[i]; 346 + 347 + if (dev) 348 + cxgb4_matchall_disable_offload(dev); 349 + } 350 + kfree(adap->tc_matchall->port_matchall); 351 + } 352 + kfree(adap->tc_matchall); 353 + } 354 + }
+49
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* Copyright (C) 2019 Chelsio Communications. All rights reserved. */ 3 + 4 + #ifndef __CXGB4_TC_MATCHALL_H__ 5 + #define __CXGB4_TC_MATCHALL_H__ 6 + 7 + #include <net/pkt_cls.h> 8 + 9 + enum cxgb4_matchall_state { 10 + CXGB4_MATCHALL_STATE_DISABLED = 0, 11 + CXGB4_MATCHALL_STATE_ENABLED, 12 + }; 13 + 14 + struct cxgb4_matchall_egress_entry { 15 + enum cxgb4_matchall_state state; /* Current MATCHALL offload state */ 16 + u8 hwtc; /* Traffic class bound to port */ 17 + u64 cookie; /* Used to identify the MATCHALL rule offloaded */ 18 + }; 19 + 20 + struct cxgb4_matchall_ingress_entry { 21 + enum cxgb4_matchall_state state; /* Current MATCHALL offload state */ 22 + u32 tid; /* Index to hardware filter entry */ 23 + struct ch_filter_specification fs; /* Filter entry */ 24 + u64 bytes; /* # of bytes hitting the filter */ 25 + u64 packets; /* # of packets hitting the filter */ 26 + u64 last_used; /* Last updated jiffies time */ 27 + }; 28 + 29 + struct cxgb4_tc_port_matchall { 30 + struct cxgb4_matchall_egress_entry egress; /* Egress offload info */ 31 + struct cxgb4_matchall_ingress_entry ingress; /* Ingress offload info */ 32 + }; 33 + 34 + struct cxgb4_tc_matchall { 35 + struct cxgb4_tc_port_matchall *port_matchall; /* Per port entry */ 36 + }; 37 + 38 + int cxgb4_tc_matchall_replace(struct net_device *dev, 39 + struct tc_cls_matchall_offload *cls_matchall, 40 + bool ingress); 41 + int cxgb4_tc_matchall_destroy(struct net_device *dev, 42 + struct tc_cls_matchall_offload *cls_matchall, 43 + bool ingress); 44 + int cxgb4_tc_matchall_stats(struct net_device *dev, 45 + struct tc_cls_matchall_offload *cls_matchall); 46 + 47 + int cxgb4_init_tc_matchall(struct adapter *adap); 48 + void cxgb4_cleanup_tc_matchall(struct adapter *adap); 49 + #endif /* __CXGB4_TC_MATCHALL_H__ */
+2 -3
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
··· 11 11 u64 min_rate = 0, max_rate = 0, max_link_rate; 12 12 struct port_info *pi = netdev2pinfo(dev); 13 13 struct adapter *adap = netdev2adap(dev); 14 - u32 qcount = 0, qoffset = 0; 15 - u32 link_ok, speed, mtu; 14 + u32 speed, qcount = 0, qoffset = 0; 16 15 int ret; 17 16 u8 i; 18 17 ··· 34 35 return -ERANGE; 35 36 } 36 37 37 - ret = t4_get_link_params(pi, &link_ok, &speed, &mtu); 38 + ret = t4_get_link_params(pi, NULL, &speed, NULL); 38 39 if (ret) { 39 40 netdev_err(dev, "Failed to get link speed, ret: %d\n", ret); 40 41 return -EINVAL;
+22 -14
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
··· 36 36 #include <net/tc_act/tc_mirred.h> 37 37 38 38 #include "cxgb4.h" 39 + #include "cxgb4_filter.h" 39 40 #include "cxgb4_tc_u32_parse.h" 40 41 #include "cxgb4_tc_u32.h" 41 42 ··· 149 148 int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) 150 149 { 151 150 const struct cxgb4_match_field *start, *link_start = NULL; 151 + struct netlink_ext_ack *extack = cls->common.extack; 152 152 struct adapter *adapter = netdev2adap(dev); 153 153 __be16 protocol = cls->common.protocol; 154 154 struct ch_filter_specification fs; ··· 166 164 if (protocol != htons(ETH_P_IP) && protocol != htons(ETH_P_IPV6)) 167 165 return -EOPNOTSUPP; 168 166 169 - /* Fetch the location to insert the filter. */ 170 - filter_id = cls->knode.handle & 0xFFFFF; 167 + /* Note that TC uses prio 0 to indicate stack to generate 168 + * automatic prio and hence doesn't pass prio 0 to driver. 169 + * However, the hardware TCAM index starts from 0. Hence, the 170 + * -1 here. 171 + */ 172 + filter_id = TC_U32_NODE(cls->knode.handle) - 1; 171 173 172 - if (filter_id > adapter->tids.nftids) { 173 - dev_err(adapter->pdev_dev, 174 - "Location %d out of range for insertion. Max: %d\n", 175 - filter_id, adapter->tids.nftids); 176 - return -ERANGE; 174 + /* Only insert U32 rule if its priority doesn't conflict with 175 + * existing rules in the LETCAM. 176 + */ 177 + if (filter_id >= adapter->tids.nftids || 178 + !cxgb4_filter_prio_in_range(dev, filter_id, cls->common.prio)) { 179 + NL_SET_ERR_MSG_MOD(extack, 180 + "No free LETCAM index available"); 181 + return -ENOMEM; 177 182 } 178 183 179 184 t = adapter->tc_u32; ··· 198 189 return -EINVAL; 199 190 200 191 memset(&fs, 0, sizeof(fs)); 192 + 193 + fs.tc_prio = cls->common.prio; 194 + fs.tc_cookie = cls->knode.handle; 201 195 202 196 if (protocol == htons(ETH_P_IPV6)) { 203 197 start = cxgb4_ipv6_fields; ··· 362 350 return -EOPNOTSUPP; 363 351 364 352 /* Fetch the location to delete the filter. */ 365 - filter_id = cls->knode.handle & 0xFFFFF; 366 - 367 - if (filter_id > adapter->tids.nftids) { 368 - dev_err(adapter->pdev_dev, 369 - "Location %d out of range for deletion. Max: %d\n", 370 - filter_id, adapter->tids.nftids); 353 + filter_id = TC_U32_NODE(cls->knode.handle) - 1; 354 + if (filter_id >= adapter->tids.nftids || 355 + cls->knode.handle != adapter->tids.ftid_tab[filter_id].fs.tc_cookie) 371 356 return -ERANGE; 372 - } 373 357 374 358 t = adapter->tc_u32; 375 359 handle = cls->knode.handle;
+39 -17
drivers/net/ethernet/chelsio/cxgb4/sched.c
··· 50 50 e = &s->tab[p->u.params.class]; 51 51 switch (op) { 52 52 case SCHED_FW_OP_ADD: 53 + case SCHED_FW_OP_DEL: 53 54 err = t4_sched_params(adap, p->type, 54 55 p->u.params.level, p->u.params.mode, 55 56 p->u.params.rateunit, ··· 189 188 e = &pi->sched_tbl->tab[qe->param.class]; 190 189 list_del(&qe->list); 191 190 kvfree(qe); 192 - if (atomic_dec_and_test(&e->refcnt)) { 193 - e->state = SCHED_STATE_UNUSED; 194 - memset(&e->info, 0, sizeof(e->info)); 195 - } 191 + if (atomic_dec_and_test(&e->refcnt)) 192 + cxgb4_sched_class_free(adap->port[pi->port_id], e->idx); 196 193 } 197 194 return err; 198 195 } ··· 260 261 e = &pi->sched_tbl->tab[fe->param.class]; 261 262 list_del(&fe->list); 262 263 kvfree(fe); 263 - if (atomic_dec_and_test(&e->refcnt)) { 264 - e->state = SCHED_STATE_UNUSED; 265 - memset(&e->info, 0, sizeof(e->info)); 266 - } 264 + if (atomic_dec_and_test(&e->refcnt)) 265 + cxgb4_sched_class_free(adap->port[pi->port_id], e->idx); 267 266 } 268 267 return err; 269 268 } ··· 466 469 struct sched_class *found = NULL; 467 470 struct sched_class *e, *end; 468 471 469 - /* Only allow tc to be shared among SCHED_FLOWC types. For 470 - * other types, always allocate a new tc. 471 - */ 472 - if (!p || p->u.params.mode != SCHED_CLASS_MODE_FLOW) { 472 + if (!p) { 473 473 /* Get any available unused class */ 474 474 end = &s->tab[s->sched_size]; 475 475 for (e = &s->tab[0]; e != end; ++e) { ··· 508 514 static struct sched_class *t4_sched_class_alloc(struct port_info *pi, 509 515 struct ch_sched_params *p) 510 516 { 511 - struct sched_class *e; 517 + struct sched_class *e = NULL; 512 518 u8 class_id; 513 519 int err; 514 520 ··· 523 529 if (class_id != SCHED_CLS_NONE) 524 530 return NULL; 525 531 526 - /* See if there's an exisiting class with same 527 - * requested sched params 532 + /* See if there's an exisiting class with same requested sched 533 + * params. Classes can only be shared among FLOWC types. For 534 + * other types, always request a new class. 528 535 */ 529 - e = t4_sched_class_lookup(pi, p); 536 + if (p->u.params.mode == SCHED_CLASS_MODE_FLOW) 537 + e = t4_sched_class_lookup(pi, p); 538 + 530 539 if (!e) { 531 540 struct ch_sched_params np; 532 541 ··· 589 592 { 590 593 struct port_info *pi = netdev2pinfo(dev); 591 594 struct sched_table *s = pi->sched_tbl; 595 + struct ch_sched_params p; 592 596 struct sched_class *e; 597 + u32 speed; 598 + int ret; 593 599 594 600 e = &s->tab[classid]; 595 - if (!atomic_read(&e->refcnt)) { 601 + if (!atomic_read(&e->refcnt) && e->state != SCHED_STATE_UNUSED) { 602 + /* Port based rate limiting needs explicit reset back 603 + * to max rate. But, we'll do explicit reset for all 604 + * types, instead of just port based type, to be on 605 + * the safer side. 606 + */ 607 + memcpy(&p, &e->info, sizeof(p)); 608 + /* Always reset mode to 0. Otherwise, FLOWC mode will 609 + * still be enabled even after resetting the traffic 610 + * class. 611 + */ 612 + p.u.params.mode = 0; 613 + p.u.params.minrate = 0; 614 + p.u.params.pktsize = 0; 615 + 616 + ret = t4_get_link_params(pi, NULL, &speed, NULL); 617 + if (!ret) 618 + p.u.params.maxrate = speed * 1000; /* Mbps to Kbps */ 619 + else 620 + p.u.params.maxrate = SCHED_MAX_RATE_KBPS; 621 + 622 + t4_sched_class_fw_cmd(pi, &p, SCHED_FW_OP_DEL); 623 + 596 624 e->state = SCHED_STATE_UNUSED; 597 625 memset(&e->info, 0, sizeof(e->info)); 598 626 }
+1
drivers/net/ethernet/chelsio/cxgb4/sched.h
··· 52 52 53 53 enum sched_fw_ops { 54 54 SCHED_FW_OP_ADD, 55 + SCHED_FW_OP_DEL, 55 56 }; 56 57 57 58 enum sched_bind_type {
+7 -4
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
··· 8777 8777 unsigned int *speedp, unsigned int *mtup) 8778 8778 { 8779 8779 unsigned int fw_caps = pi->adapter->params.fw_caps_support; 8780 - struct fw_port_cmd port_cmd; 8781 8780 unsigned int action, link_ok, mtu; 8781 + struct fw_port_cmd port_cmd; 8782 8782 fw_port_cap32_t linkattr; 8783 8783 int ret; 8784 8784 ··· 8813 8813 be32_to_cpu(port_cmd.u.info32.auxlinfo32_mtu32)); 8814 8814 } 8815 8815 8816 - *link_okp = link_ok; 8817 - *speedp = fwcap_to_speed(linkattr); 8818 - *mtup = mtu; 8816 + if (link_okp) 8817 + *link_okp = link_ok; 8818 + if (speedp) 8819 + *speedp = fwcap_to_speed(linkattr); 8820 + if (mtup) 8821 + *mtup = mtu; 8819 8822 8820 8823 return 0; 8821 8824 }