Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sfc: offload conntrack flow entries (match only) from CT zones

No handling yet for FLOW_ACTION_MANGLE (NAT or NAPT) actions.

Reviewed-by: Pieter Jansen van Vuuren <pieter.jansen-van-vuuren@amd.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Signed-off-by: Edward Cree <ecree.xilinx@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Edward Cree and committed by
David S. Miller
1909387f 94aa05bd

+429 -7
+2 -2
drivers/net/ethernet/sfc/tc.h
··· 18 18 19 19 #define IS_ALL_ONES(v) (!(typeof (v))~(v)) 20 20 21 - #ifdef CONFIG_IPV6 22 21 static inline bool efx_ipv6_addr_all_ones(struct in6_addr *addr) 23 22 { 24 23 return !memchr_inv(addr, 0xff, sizeof(*addr)); 25 24 } 26 - #endif 27 25 28 26 struct efx_tc_encap_action; /* see tc_encap_actions.h */ 29 27 ··· 195 197 * @encap_match_ht: Hashtable of TC encap matches 196 198 * @match_action_ht: Hashtable of TC match-action rules 197 199 * @ct_zone_ht: Hashtable of TC conntrack flowtable bindings 200 + * @ct_ht: Hashtable of TC conntrack flow entries 198 201 * @neigh_ht: Hashtable of neighbour watches (&struct efx_neigh_binder) 199 202 * @meta_ct: MAE table layout for conntrack table 200 203 * @reps_mport_id: MAE port allocated for representor RX ··· 229 230 struct rhashtable encap_match_ht; 230 231 struct rhashtable match_action_ht; 231 232 struct rhashtable ct_zone_ht; 233 + struct rhashtable ct_ht; 232 234 struct rhashtable neigh_ht; 233 235 struct efx_tc_table_ct meta_ct; 234 236 u32 reps_mport_id, reps_mport_vport_id;
+416 -1
drivers/net/ethernet/sfc/tc_conntrack.c
··· 21 21 .head_offset = offsetof(struct efx_tc_ct_zone, linkage), 22 22 }; 23 23 24 + static const struct rhashtable_params efx_tc_ct_ht_params = { 25 + .key_len = offsetof(struct efx_tc_ct_entry, linkage), 26 + .key_offset = 0, 27 + .head_offset = offsetof(struct efx_tc_ct_entry, linkage), 28 + }; 29 + 24 30 static void efx_tc_ct_zone_free(void *ptr, void *arg) 25 31 { 26 32 struct efx_tc_ct_zone *zone = ptr; ··· 40 34 kfree(zone); 41 35 } 42 36 37 + static void efx_tc_ct_free(void *ptr, void *arg) 38 + { 39 + struct efx_tc_ct_entry *conn = ptr; 40 + struct efx_nic *efx = arg; 41 + 42 + netif_err(efx, drv, efx->net_dev, 43 + "tc ct_entry %lx still present at teardown\n", 44 + conn->cookie); 45 + 46 + /* We can release the counter, but we can't remove the CT itself 47 + * from hardware because the table meta is already gone. 48 + */ 49 + efx_tc_flower_release_counter(efx, conn->cnt); 50 + kfree(conn); 51 + } 52 + 43 53 int efx_tc_init_conntrack(struct efx_nic *efx) 44 54 { 45 55 int rc; 46 56 47 57 rc = rhashtable_init(&efx->tc->ct_zone_ht, &efx_tc_ct_zone_ht_params); 48 58 if (rc < 0) 49 - return rc; 59 + goto fail_ct_zone_ht; 60 + rc = rhashtable_init(&efx->tc->ct_ht, &efx_tc_ct_ht_params); 61 + if (rc < 0) 62 + goto fail_ct_ht; 50 63 return 0; 64 + fail_ct_ht: 65 + rhashtable_destroy(&efx->tc->ct_zone_ht); 66 + fail_ct_zone_ht: 67 + return rc; 51 68 } 52 69 53 70 void efx_tc_fini_conntrack(struct efx_nic *efx) 54 71 { 55 72 rhashtable_free_and_destroy(&efx->tc->ct_zone_ht, efx_tc_ct_zone_free, NULL); 73 + rhashtable_free_and_destroy(&efx->tc->ct_ht, efx_tc_ct_free, efx); 74 + } 75 + 76 + #define EFX_NF_TCP_FLAG(flg) cpu_to_be16(be32_to_cpu(TCP_FLAG_##flg) >> 16) 77 + 78 + static int efx_tc_ct_parse_match(struct efx_nic *efx, struct flow_rule *fr, 79 + struct efx_tc_ct_entry *conn) 80 + { 81 + struct flow_dissector *dissector = fr->match.dissector; 82 + unsigned char ipv = 0; 83 + bool tcp = false; 84 + 85 + if (flow_rule_match_key(fr, FLOW_DISSECTOR_KEY_CONTROL)) { 86 + struct flow_match_control fm; 87 + 88 + flow_rule_match_control(fr, &fm); 89 + if (IS_ALL_ONES(fm.mask->addr_type)) 90 + switch (fm.key->addr_type) { 91 + case FLOW_DISSECTOR_KEY_IPV4_ADDRS: 92 + ipv = 4; 93 + break; 94 + case FLOW_DISSECTOR_KEY_IPV6_ADDRS: 95 + ipv = 6; 96 + break; 97 + default: 98 + break; 99 + } 100 + } 101 + 102 + if (!ipv) { 103 + netif_dbg(efx, drv, efx->net_dev, 104 + "Conntrack missing ipv specification\n"); 105 + return -EOPNOTSUPP; 106 + } 107 + 108 + if (dissector->used_keys & 109 + ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | 110 + BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | 111 + BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 112 + BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 113 + BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | 114 + BIT_ULL(FLOW_DISSECTOR_KEY_TCP) | 115 + BIT_ULL(FLOW_DISSECTOR_KEY_META))) { 116 + netif_dbg(efx, drv, efx->net_dev, 117 + "Unsupported conntrack keys %#llx\n", 118 + dissector->used_keys); 119 + return -EOPNOTSUPP; 120 + } 121 + 122 + if (flow_rule_match_key(fr, FLOW_DISSECTOR_KEY_BASIC)) { 123 + struct flow_match_basic fm; 124 + 125 + flow_rule_match_basic(fr, &fm); 126 + if (!IS_ALL_ONES(fm.mask->n_proto)) { 127 + netif_dbg(efx, drv, efx->net_dev, 128 + "Conntrack eth_proto is not exact-match; mask %04x\n", 129 + ntohs(fm.mask->n_proto)); 130 + return -EOPNOTSUPP; 131 + } 132 + conn->eth_proto = fm.key->n_proto; 133 + if (conn->eth_proto != (ipv == 4 ? htons(ETH_P_IP) 134 + : htons(ETH_P_IPV6))) { 135 + netif_dbg(efx, drv, efx->net_dev, 136 + "Conntrack eth_proto is not IPv%u, is %04x\n", 137 + ipv, ntohs(conn->eth_proto)); 138 + return -EOPNOTSUPP; 139 + } 140 + if (!IS_ALL_ONES(fm.mask->ip_proto)) { 141 + netif_dbg(efx, drv, efx->net_dev, 142 + "Conntrack ip_proto is not exact-match; mask %02x\n", 143 + fm.mask->ip_proto); 144 + return -EOPNOTSUPP; 145 + } 146 + conn->ip_proto = fm.key->ip_proto; 147 + switch (conn->ip_proto) { 148 + case IPPROTO_TCP: 149 + tcp = true; 150 + break; 151 + case IPPROTO_UDP: 152 + break; 153 + default: 154 + netif_dbg(efx, drv, efx->net_dev, 155 + "Conntrack ip_proto not TCP or UDP, is %02x\n", 156 + conn->ip_proto); 157 + return -EOPNOTSUPP; 158 + } 159 + } else { 160 + netif_dbg(efx, drv, efx->net_dev, 161 + "Conntrack missing eth_proto, ip_proto\n"); 162 + return -EOPNOTSUPP; 163 + } 164 + 165 + if (ipv == 4 && flow_rule_match_key(fr, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { 166 + struct flow_match_ipv4_addrs fm; 167 + 168 + flow_rule_match_ipv4_addrs(fr, &fm); 169 + if (!IS_ALL_ONES(fm.mask->src)) { 170 + netif_dbg(efx, drv, efx->net_dev, 171 + "Conntrack ipv4.src is not exact-match; mask %08x\n", 172 + ntohl(fm.mask->src)); 173 + return -EOPNOTSUPP; 174 + } 175 + conn->src_ip = fm.key->src; 176 + if (!IS_ALL_ONES(fm.mask->dst)) { 177 + netif_dbg(efx, drv, efx->net_dev, 178 + "Conntrack ipv4.dst is not exact-match; mask %08x\n", 179 + ntohl(fm.mask->dst)); 180 + return -EOPNOTSUPP; 181 + } 182 + conn->dst_ip = fm.key->dst; 183 + } else if (ipv == 6 && flow_rule_match_key(fr, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { 184 + struct flow_match_ipv6_addrs fm; 185 + 186 + flow_rule_match_ipv6_addrs(fr, &fm); 187 + if (!efx_ipv6_addr_all_ones(&fm.mask->src)) { 188 + netif_dbg(efx, drv, efx->net_dev, 189 + "Conntrack ipv6.src is not exact-match; mask %pI6\n", 190 + &fm.mask->src); 191 + return -EOPNOTSUPP; 192 + } 193 + conn->src_ip6 = fm.key->src; 194 + if (!efx_ipv6_addr_all_ones(&fm.mask->dst)) { 195 + netif_dbg(efx, drv, efx->net_dev, 196 + "Conntrack ipv6.dst is not exact-match; mask %pI6\n", 197 + &fm.mask->dst); 198 + return -EOPNOTSUPP; 199 + } 200 + conn->dst_ip6 = fm.key->dst; 201 + } else { 202 + netif_dbg(efx, drv, efx->net_dev, 203 + "Conntrack missing IPv%u addrs\n", ipv); 204 + return -EOPNOTSUPP; 205 + } 206 + 207 + if (flow_rule_match_key(fr, FLOW_DISSECTOR_KEY_PORTS)) { 208 + struct flow_match_ports fm; 209 + 210 + flow_rule_match_ports(fr, &fm); 211 + if (!IS_ALL_ONES(fm.mask->src)) { 212 + netif_dbg(efx, drv, efx->net_dev, 213 + "Conntrack ports.src is not exact-match; mask %04x\n", 214 + ntohs(fm.mask->src)); 215 + return -EOPNOTSUPP; 216 + } 217 + conn->l4_sport = fm.key->src; 218 + if (!IS_ALL_ONES(fm.mask->dst)) { 219 + netif_dbg(efx, drv, efx->net_dev, 220 + "Conntrack ports.dst is not exact-match; mask %04x\n", 221 + ntohs(fm.mask->dst)); 222 + return -EOPNOTSUPP; 223 + } 224 + conn->l4_dport = fm.key->dst; 225 + } else { 226 + netif_dbg(efx, drv, efx->net_dev, "Conntrack missing L4 ports\n"); 227 + return -EOPNOTSUPP; 228 + } 229 + 230 + if (flow_rule_match_key(fr, FLOW_DISSECTOR_KEY_TCP)) { 231 + __be16 tcp_interesting_flags; 232 + struct flow_match_tcp fm; 233 + 234 + if (!tcp) { 235 + netif_dbg(efx, drv, efx->net_dev, 236 + "Conntrack matching on TCP keys but ipproto is not tcp\n"); 237 + return -EOPNOTSUPP; 238 + } 239 + flow_rule_match_tcp(fr, &fm); 240 + tcp_interesting_flags = EFX_NF_TCP_FLAG(SYN) | 241 + EFX_NF_TCP_FLAG(RST) | 242 + EFX_NF_TCP_FLAG(FIN); 243 + /* If any of the tcp_interesting_flags is set, we always 244 + * inhibit CT lookup in LHS (so SW can update CT table). 245 + */ 246 + if (fm.key->flags & tcp_interesting_flags) { 247 + netif_dbg(efx, drv, efx->net_dev, 248 + "Unsupported conntrack tcp.flags %04x/%04x\n", 249 + ntohs(fm.key->flags), ntohs(fm.mask->flags)); 250 + return -EOPNOTSUPP; 251 + } 252 + /* Other TCP flags cannot be filtered at CT */ 253 + if (fm.mask->flags & ~tcp_interesting_flags) { 254 + netif_dbg(efx, drv, efx->net_dev, 255 + "Unsupported conntrack tcp.flags %04x/%04x\n", 256 + ntohs(fm.key->flags), ntohs(fm.mask->flags)); 257 + return -EOPNOTSUPP; 258 + } 259 + } 260 + 261 + return 0; 262 + } 263 + 264 + static int efx_tc_ct_replace(struct efx_tc_ct_zone *ct_zone, 265 + struct flow_cls_offload *tc) 266 + { 267 + struct flow_rule *fr = flow_cls_offload_flow_rule(tc); 268 + struct efx_tc_ct_entry *conn, *old; 269 + struct efx_nic *efx = ct_zone->efx; 270 + const struct flow_action_entry *fa; 271 + struct efx_tc_counter *cnt; 272 + int rc, i; 273 + 274 + if (WARN_ON(!efx->tc)) 275 + return -ENETDOWN; 276 + if (WARN_ON(!efx->tc->up)) 277 + return -ENETDOWN; 278 + 279 + conn = kzalloc(sizeof(*conn), GFP_USER); 280 + if (!conn) 281 + return -ENOMEM; 282 + conn->cookie = tc->cookie; 283 + old = rhashtable_lookup_get_insert_fast(&efx->tc->ct_ht, 284 + &conn->linkage, 285 + efx_tc_ct_ht_params); 286 + if (old) { 287 + netif_dbg(efx, drv, efx->net_dev, 288 + "Already offloaded conntrack (cookie %lx)\n", tc->cookie); 289 + rc = -EEXIST; 290 + goto release; 291 + } 292 + 293 + /* Parse match */ 294 + conn->zone = ct_zone; 295 + rc = efx_tc_ct_parse_match(efx, fr, conn); 296 + if (rc) 297 + goto release; 298 + 299 + /* Parse actions */ 300 + flow_action_for_each(i, fa, &fr->action) { 301 + switch (fa->id) { 302 + case FLOW_ACTION_CT_METADATA: 303 + conn->mark = fa->ct_metadata.mark; 304 + if (memchr_inv(fa->ct_metadata.labels, 0, sizeof(fa->ct_metadata.labels))) { 305 + netif_dbg(efx, drv, efx->net_dev, 306 + "Setting CT label not supported\n"); 307 + rc = -EOPNOTSUPP; 308 + goto release; 309 + } 310 + break; 311 + default: 312 + netif_dbg(efx, drv, efx->net_dev, 313 + "Unhandled action %u for conntrack\n", fa->id); 314 + rc = -EOPNOTSUPP; 315 + goto release; 316 + } 317 + } 318 + 319 + /* fill in defaults for unmangled values */ 320 + conn->nat_ip = conn->dnat ? conn->dst_ip : conn->src_ip; 321 + conn->l4_natport = conn->dnat ? conn->l4_dport : conn->l4_sport; 322 + 323 + cnt = efx_tc_flower_allocate_counter(efx, EFX_TC_COUNTER_TYPE_CT); 324 + if (IS_ERR(cnt)) { 325 + rc = PTR_ERR(cnt); 326 + goto release; 327 + } 328 + conn->cnt = cnt; 329 + 330 + rc = efx_mae_insert_ct(efx, conn); 331 + if (rc) { 332 + netif_dbg(efx, drv, efx->net_dev, 333 + "Failed to insert conntrack, %d\n", rc); 334 + goto release; 335 + } 336 + mutex_lock(&ct_zone->mutex); 337 + list_add_tail(&conn->list, &ct_zone->cts); 338 + mutex_unlock(&ct_zone->mutex); 339 + return 0; 340 + release: 341 + if (conn->cnt) 342 + efx_tc_flower_release_counter(efx, conn->cnt); 343 + if (!old) 344 + rhashtable_remove_fast(&efx->tc->ct_ht, &conn->linkage, 345 + efx_tc_ct_ht_params); 346 + kfree(conn); 347 + return rc; 348 + } 349 + 350 + /* Caller must follow with efx_tc_ct_remove_finish() after RCU grace period! */ 351 + static void efx_tc_ct_remove(struct efx_nic *efx, struct efx_tc_ct_entry *conn) 352 + { 353 + int rc; 354 + 355 + /* Remove it from HW */ 356 + rc = efx_mae_remove_ct(efx, conn); 357 + /* Delete it from SW */ 358 + rhashtable_remove_fast(&efx->tc->ct_ht, &conn->linkage, 359 + efx_tc_ct_ht_params); 360 + if (rc) { 361 + netif_err(efx, drv, efx->net_dev, 362 + "Failed to remove conntrack %lx from hw, rc %d\n", 363 + conn->cookie, rc); 364 + } else { 365 + netif_dbg(efx, drv, efx->net_dev, "Removed conntrack %lx\n", 366 + conn->cookie); 367 + } 368 + } 369 + 370 + static void efx_tc_ct_remove_finish(struct efx_nic *efx, struct efx_tc_ct_entry *conn) 371 + { 372 + /* Remove related CT counter. This is delayed after the conn object we 373 + * are working with has been successfully removed. This protects the 374 + * counter from being used-after-free inside efx_tc_ct_stats. 375 + */ 376 + efx_tc_flower_release_counter(efx, conn->cnt); 377 + kfree(conn); 378 + } 379 + 380 + static int efx_tc_ct_destroy(struct efx_tc_ct_zone *ct_zone, 381 + struct flow_cls_offload *tc) 382 + { 383 + struct efx_nic *efx = ct_zone->efx; 384 + struct efx_tc_ct_entry *conn; 385 + 386 + conn = rhashtable_lookup_fast(&efx->tc->ct_ht, &tc->cookie, 387 + efx_tc_ct_ht_params); 388 + if (!conn) { 389 + netif_warn(efx, drv, efx->net_dev, 390 + "Conntrack %lx not found to remove\n", tc->cookie); 391 + return -ENOENT; 392 + } 393 + 394 + mutex_lock(&ct_zone->mutex); 395 + list_del(&conn->list); 396 + efx_tc_ct_remove(efx, conn); 397 + mutex_unlock(&ct_zone->mutex); 398 + synchronize_rcu(); 399 + efx_tc_ct_remove_finish(efx, conn); 400 + return 0; 401 + } 402 + 403 + static int efx_tc_ct_stats(struct efx_tc_ct_zone *ct_zone, 404 + struct flow_cls_offload *tc) 405 + { 406 + struct efx_nic *efx = ct_zone->efx; 407 + struct efx_tc_ct_entry *conn; 408 + struct efx_tc_counter *cnt; 409 + 410 + rcu_read_lock(); 411 + conn = rhashtable_lookup_fast(&efx->tc->ct_ht, &tc->cookie, 412 + efx_tc_ct_ht_params); 413 + if (!conn) { 414 + netif_warn(efx, drv, efx->net_dev, 415 + "Conntrack %lx not found for stats\n", tc->cookie); 416 + rcu_read_unlock(); 417 + return -ENOENT; 418 + } 419 + 420 + cnt = conn->cnt; 421 + spin_lock_bh(&cnt->lock); 422 + /* Report only last use */ 423 + flow_stats_update(&tc->stats, 0, 0, 0, cnt->touched, 424 + FLOW_ACTION_HW_STATS_DELAYED); 425 + spin_unlock_bh(&cnt->lock); 426 + rcu_read_unlock(); 427 + 428 + return 0; 56 429 } 57 430 58 431 static int efx_tc_flow_block(enum tc_setup_type type, void *type_data, 59 432 void *cb_priv) 60 433 { 434 + struct flow_cls_offload *tcb = type_data; 435 + struct efx_tc_ct_zone *ct_zone = cb_priv; 436 + 437 + if (type != TC_SETUP_CLSFLOWER) 438 + return -EOPNOTSUPP; 439 + 440 + switch (tcb->command) { 441 + case FLOW_CLS_REPLACE: 442 + return efx_tc_ct_replace(ct_zone, tcb); 443 + case FLOW_CLS_DESTROY: 444 + return efx_tc_ct_destroy(ct_zone, tcb); 445 + case FLOW_CLS_STATS: 446 + return efx_tc_ct_stats(ct_zone, tcb); 447 + default: 448 + break; 449 + }; 450 + 61 451 return -EOPNOTSUPP; 62 452 } 63 453 ··· 483 81 } 484 82 ct_zone->nf_ft = ct_ft; 485 83 ct_zone->efx = efx; 84 + INIT_LIST_HEAD(&ct_zone->cts); 85 + mutex_init(&ct_zone->mutex); 486 86 rc = nf_flow_table_offload_add_cb(ct_ft, efx_tc_flow_block, ct_zone); 487 87 netif_dbg(efx, drv, efx->net_dev, "Adding new ct_zone for %u, rc %d\n", 488 88 zone, rc); ··· 502 98 void efx_tc_ct_unregister_zone(struct efx_nic *efx, 503 99 struct efx_tc_ct_zone *ct_zone) 504 100 { 101 + struct efx_tc_ct_entry *conn, *next; 102 + 505 103 if (!refcount_dec_and_test(&ct_zone->ref)) 506 104 return; /* still in use */ 507 105 nf_flow_table_offload_del_cb(ct_zone->nf_ft, efx_tc_flow_block, ct_zone); 508 106 rhashtable_remove_fast(&efx->tc->ct_zone_ht, &ct_zone->linkage, 509 107 efx_tc_ct_zone_ht_params); 108 + mutex_lock(&ct_zone->mutex); 109 + list_for_each_entry(conn, &ct_zone->cts, list) 110 + efx_tc_ct_remove(efx, conn); 111 + synchronize_rcu(); 112 + /* need to use _safe because efx_tc_ct_remove_finish() frees conn */ 113 + list_for_each_entry_safe(conn, next, &ct_zone->cts, list) 114 + efx_tc_ct_remove_finish(efx, conn); 115 + mutex_unlock(&ct_zone->mutex); 116 + mutex_destroy(&ct_zone->mutex); 510 117 netif_dbg(efx, drv, efx->net_dev, "Removed ct_zone for %u\n", 511 118 ct_zone->zone); 512 119 kfree(ct_zone);
+3
drivers/net/ethernet/sfc/tc_conntrack.h
··· 22 22 refcount_t ref; 23 23 struct nf_flowtable *nf_ft; 24 24 struct efx_nic *efx; 25 + struct mutex mutex; /* protects cts list */ 26 + struct list_head cts; /* list of efx_tc_ct_entry in this zone */ 25 27 }; 26 28 27 29 /* create/teardown hashtables */ ··· 47 45 struct efx_tc_ct_zone *zone; 48 46 u32 mark; 49 47 struct efx_tc_counter *cnt; 48 + struct list_head list; /* entry on zone->cts */ 50 49 }; 51 50 52 51 #endif /* CONFIG_SFC_SRIOV */
+4 -4
drivers/net/ethernet/sfc/tc_counters.c
··· 129 129 130 130 /* Counter allocation */ 131 131 132 - static struct efx_tc_counter *efx_tc_flower_allocate_counter(struct efx_nic *efx, 133 - int type) 132 + struct efx_tc_counter *efx_tc_flower_allocate_counter(struct efx_nic *efx, 133 + int type) 134 134 { 135 135 struct efx_tc_counter *cnt; 136 136 int rc, rc2; ··· 169 169 return ERR_PTR(rc > 0 ? -EIO : rc); 170 170 } 171 171 172 - static void efx_tc_flower_release_counter(struct efx_nic *efx, 173 - struct efx_tc_counter *cnt) 172 + void efx_tc_flower_release_counter(struct efx_nic *efx, 173 + struct efx_tc_counter *cnt) 174 174 { 175 175 int rc; 176 176
+4
drivers/net/ethernet/sfc/tc_counters.h
··· 49 49 void efx_tc_destroy_counters(struct efx_nic *efx); 50 50 void efx_tc_fini_counters(struct efx_nic *efx); 51 51 52 + struct efx_tc_counter *efx_tc_flower_allocate_counter(struct efx_nic *efx, 53 + int type); 54 + void efx_tc_flower_release_counter(struct efx_nic *efx, 55 + struct efx_tc_counter *cnt); 52 56 struct efx_tc_counter_index *efx_tc_flower_get_counter_index( 53 57 struct efx_nic *efx, unsigned long cookie, 54 58 enum efx_tc_counter_type type);