Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'flow_offload-fixes'

Pablo Neira Ayuso says:

====================
flow_offload fixes

The following patchset contains fixes for the flow_offload infrastructure:

1) Fix possible build breakage before patch 3/4. Both the flow_offload
infrastructure and OVS define the flow_stats structure. Patch 3/4 in
this batch indirectly pulls in the flow_stats definition from
include/net/flow_offload.h into OVS, leading to structure redefinition
compile-time errors.

2) Remove netns parameter from flow_block_cb_alloc(), this is not
required as Jiri suggests. The flow_block_cb_is_busy() function uses
the per-driver block list to check for used blocks which was the
original intention for this parameter.

3) Rename tc_setup_cb_t to flow_setup_cb_t. This callback is not
exclusive of tc anymore, this might confuse the reader as Jiri
suggests, fix this semantic inconsistency.

Add #include <linux/list.h> to include/net/netfilter/nf_tables_offload.h
to avoid a compile break with CONFIG_HEADER_TEST=y.

4) Fix block sharing feature: Add flow_block structure and use it,
update flow_block_cb_lookup() to use this flow_block object.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+94 -81
+2 -3
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
··· 735 735 list_add(&indr_priv->list, 736 736 &rpriv->uplink_priv.tc_indr_block_priv_list); 737 737 738 - block_cb = flow_block_cb_alloc(f->net, 739 - mlx5e_rep_indr_setup_block_cb, 738 + block_cb = flow_block_cb_alloc(mlx5e_rep_indr_setup_block_cb, 740 739 indr_priv, indr_priv, 741 740 mlx5e_rep_indr_tc_block_unbind); 742 741 if (IS_ERR(block_cb)) { ··· 752 753 if (!indr_priv) 753 754 return -ENOENT; 754 755 755 - block_cb = flow_block_cb_lookup(f, 756 + block_cb = flow_block_cb_lookup(f->block, 756 757 mlx5e_rep_indr_setup_block_cb, 757 758 indr_priv); 758 759 if (!block_cb)
+8 -7
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
··· 1604 1604 bool register_block = false; 1605 1605 int err; 1606 1606 1607 - block_cb = flow_block_cb_lookup(f, mlxsw_sp_setup_tc_block_cb_flower, 1607 + block_cb = flow_block_cb_lookup(f->block, 1608 + mlxsw_sp_setup_tc_block_cb_flower, 1608 1609 mlxsw_sp); 1609 1610 if (!block_cb) { 1610 1611 acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, f->net); 1611 1612 if (!acl_block) 1612 1613 return -ENOMEM; 1613 - block_cb = flow_block_cb_alloc(f->net, 1614 - mlxsw_sp_setup_tc_block_cb_flower, 1614 + block_cb = flow_block_cb_alloc(mlxsw_sp_setup_tc_block_cb_flower, 1615 1615 mlxsw_sp, acl_block, 1616 1616 mlxsw_sp_tc_block_flower_release); 1617 1617 if (IS_ERR(block_cb)) { ··· 1657 1657 struct flow_block_cb *block_cb; 1658 1658 int err; 1659 1659 1660 - block_cb = flow_block_cb_lookup(f, mlxsw_sp_setup_tc_block_cb_flower, 1660 + block_cb = flow_block_cb_lookup(f->block, 1661 + mlxsw_sp_setup_tc_block_cb_flower, 1661 1662 mlxsw_sp); 1662 1663 if (!block_cb) 1663 1664 return; ··· 1681 1680 struct flow_block_offload *f) 1682 1681 { 1683 1682 struct flow_block_cb *block_cb; 1684 - tc_setup_cb_t *cb; 1683 + flow_setup_cb_t *cb; 1685 1684 bool ingress; 1686 1685 int err; 1687 1686 ··· 1703 1702 &mlxsw_sp_block_cb_list)) 1704 1703 return -EBUSY; 1705 1704 1706 - block_cb = flow_block_cb_alloc(f->net, cb, mlxsw_sp_port, 1705 + block_cb = flow_block_cb_alloc(cb, mlxsw_sp_port, 1707 1706 mlxsw_sp_port, NULL); 1708 1707 if (IS_ERR(block_cb)) 1709 1708 return PTR_ERR(block_cb); ··· 1719 1718 case FLOW_BLOCK_UNBIND: 1720 1719 mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port, 1721 1720 f, ingress); 1722 - block_cb = flow_block_cb_lookup(f, cb, mlxsw_sp_port); 1721 + block_cb = flow_block_cb_lookup(f->block, cb, mlxsw_sp_port); 1723 1722 if (!block_cb) 1724 1723 return -ENOENT; 1725 1724
+5 -6
drivers/net/ethernet/mscc/ocelot_flower.c
··· 316 316 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) 317 317 return -EOPNOTSUPP; 318 318 319 - block_cb = flow_block_cb_lookup(f, ocelot_setup_tc_block_cb_flower, 320 - port); 319 + block_cb = flow_block_cb_lookup(f->block, 320 + ocelot_setup_tc_block_cb_flower, port); 321 321 if (!block_cb) { 322 322 port_block = ocelot_port_block_create(port); 323 323 if (!port_block) 324 324 return -ENOMEM; 325 325 326 - block_cb = flow_block_cb_alloc(f->net, 327 - ocelot_setup_tc_block_cb_flower, 326 + block_cb = flow_block_cb_alloc(ocelot_setup_tc_block_cb_flower, 328 327 port, port_block, 329 328 ocelot_tc_block_unbind); 330 329 if (IS_ERR(block_cb)) { ··· 350 351 { 351 352 struct flow_block_cb *block_cb; 352 353 353 - block_cb = flow_block_cb_lookup(f, ocelot_setup_tc_block_cb_flower, 354 - port); 354 + block_cb = flow_block_cb_lookup(f->block, 355 + ocelot_setup_tc_block_cb_flower, port); 355 356 if (!block_cb) 356 357 return; 357 358
+3 -3
drivers/net/ethernet/mscc/ocelot_tc.c
··· 134 134 struct flow_block_offload *f) 135 135 { 136 136 struct flow_block_cb *block_cb; 137 - tc_setup_cb_t *cb; 137 + flow_setup_cb_t *cb; 138 138 int err; 139 139 140 140 netdev_dbg(port->dev, "tc_block command %d, binder_type %d\n", ··· 156 156 if (flow_block_cb_is_busy(cb, port, &ocelot_block_cb_list)) 157 157 return -EBUSY; 158 158 159 - block_cb = flow_block_cb_alloc(f->net, cb, port, port, NULL); 159 + block_cb = flow_block_cb_alloc(cb, port, port, NULL); 160 160 if (IS_ERR(block_cb)) 161 161 return PTR_ERR(block_cb); 162 162 ··· 169 169 list_add_tail(&block_cb->driver_list, f->driver_block_list); 170 170 return 0; 171 171 case FLOW_BLOCK_UNBIND: 172 - block_cb = flow_block_cb_lookup(f, cb, port); 172 + block_cb = flow_block_cb_lookup(f->block, cb, port); 173 173 if (!block_cb) 174 174 return -ENOENT; 175 175
+5 -6
drivers/net/ethernet/netronome/nfp/flower/offload.c
··· 1318 1318 &nfp_block_cb_list)) 1319 1319 return -EBUSY; 1320 1320 1321 - block_cb = flow_block_cb_alloc(f->net, 1322 - nfp_flower_setup_tc_block_cb, 1321 + block_cb = flow_block_cb_alloc(nfp_flower_setup_tc_block_cb, 1323 1322 repr, repr, NULL); 1324 1323 if (IS_ERR(block_cb)) 1325 1324 return PTR_ERR(block_cb); ··· 1327 1328 list_add_tail(&block_cb->driver_list, &nfp_block_cb_list); 1328 1329 return 0; 1329 1330 case FLOW_BLOCK_UNBIND: 1330 - block_cb = flow_block_cb_lookup(f, nfp_flower_setup_tc_block_cb, 1331 + block_cb = flow_block_cb_lookup(f->block, 1332 + nfp_flower_setup_tc_block_cb, 1331 1333 repr); 1332 1334 if (!block_cb) 1333 1335 return -ENOENT; ··· 1424 1424 cb_priv->app = app; 1425 1425 list_add(&cb_priv->list, &priv->indr_block_cb_priv); 1426 1426 1427 - block_cb = flow_block_cb_alloc(f->net, 1428 - nfp_flower_setup_indr_block_cb, 1427 + block_cb = flow_block_cb_alloc(nfp_flower_setup_indr_block_cb, 1429 1428 cb_priv, cb_priv, 1430 1429 nfp_flower_setup_indr_tc_release); 1431 1430 if (IS_ERR(block_cb)) { ··· 1441 1442 if (!cb_priv) 1442 1443 return -ENOENT; 1443 1444 1444 - block_cb = flow_block_cb_lookup(f, 1445 + block_cb = flow_block_cb_lookup(f->block, 1445 1446 nfp_flower_setup_indr_block_cb, 1446 1447 cb_priv); 1447 1448 if (!block_cb)
+22 -8
include/net/flow_offload.h
··· 2 2 #define _NET_FLOW_OFFLOAD_H 3 3 4 4 #include <linux/kernel.h> 5 + #include <linux/list.h> 5 6 #include <net/flow_dissector.h> 6 - #include <net/sch_generic.h> 7 7 8 8 struct flow_match { 9 9 struct flow_dissector *dissector; ··· 249 249 FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS, 250 250 }; 251 251 252 + struct flow_block { 253 + struct list_head cb_list; 254 + }; 255 + 252 256 struct netlink_ext_ack; 253 257 254 258 struct flow_block_offload { ··· 260 256 enum flow_block_binder_type binder_type; 261 257 bool block_shared; 262 258 struct net *net; 259 + struct flow_block *block; 263 260 struct list_head cb_list; 264 261 struct list_head *driver_block_list; 265 262 struct netlink_ext_ack *extack; 266 263 }; 267 264 265 + enum tc_setup_type; 266 + typedef int flow_setup_cb_t(enum tc_setup_type type, void *type_data, 267 + void *cb_priv); 268 + 268 269 struct flow_block_cb { 269 270 struct list_head driver_list; 270 271 struct list_head list; 271 - struct net *net; 272 - tc_setup_cb_t *cb; 272 + flow_setup_cb_t *cb; 273 273 void *cb_ident; 274 274 void *cb_priv; 275 275 void (*release)(void *cb_priv); 276 276 unsigned int refcnt; 277 277 }; 278 278 279 - struct flow_block_cb *flow_block_cb_alloc(struct net *net, tc_setup_cb_t *cb, 279 + struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb, 280 280 void *cb_ident, void *cb_priv, 281 281 void (*release)(void *cb_priv)); 282 282 void flow_block_cb_free(struct flow_block_cb *block_cb); 283 283 284 - struct flow_block_cb *flow_block_cb_lookup(struct flow_block_offload *offload, 285 - tc_setup_cb_t *cb, void *cb_ident); 284 + struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block, 285 + flow_setup_cb_t *cb, void *cb_ident); 286 286 287 287 void *flow_block_cb_priv(struct flow_block_cb *block_cb); 288 288 void flow_block_cb_incref(struct flow_block_cb *block_cb); ··· 304 296 list_move(&block_cb->list, &offload->cb_list); 305 297 } 306 298 307 - bool flow_block_cb_is_busy(tc_setup_cb_t *cb, void *cb_ident, 299 + bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident, 308 300 struct list_head *driver_block_list); 309 301 310 302 int flow_block_cb_setup_simple(struct flow_block_offload *f, 311 - struct list_head *driver_list, tc_setup_cb_t *cb, 303 + struct list_head *driver_list, 304 + flow_setup_cb_t *cb, 312 305 void *cb_ident, void *cb_priv, bool ingress_only); 313 306 314 307 enum flow_cls_command { ··· 340 331 flow_cls_offload_flow_rule(struct flow_cls_offload *flow_cmd) 341 332 { 342 333 return flow_cmd->rule; 334 + } 335 + 336 + static inline void flow_block_init(struct flow_block *flow_block) 337 + { 338 + INIT_LIST_HEAD(&flow_block->cb_list); 343 339 } 344 340 345 341 #endif /* _NET_FLOW_OFFLOAD_H */
+3 -2
include/net/netfilter/nf_tables.h
··· 11 11 #include <linux/rhashtable.h> 12 12 #include <net/netfilter/nf_flow_table.h> 13 13 #include <net/netlink.h> 14 + #include <net/flow_offload.h> 14 15 15 16 struct module; 16 17 ··· 952 951 * @stats: per-cpu chain stats 953 952 * @chain: the chain 954 953 * @dev_name: device name that this base chain is attached to (if any) 955 - * @cb_list: list of flow block callbacks (for hardware offload) 954 + * @flow_block: flow block (for hardware offload) 956 955 */ 957 956 struct nft_base_chain { 958 957 struct nf_hook_ops ops; ··· 962 961 struct nft_stats __percpu *stats; 963 962 struct nft_chain chain; 964 963 char dev_name[IFNAMSIZ]; 965 - struct list_head cb_list; 964 + struct flow_block flow_block; 966 965 }; 967 966 968 967 static inline struct nft_base_chain *nft_base_chain(const struct nft_chain *chain)
+2 -3
include/net/pkt_cls.h
··· 6 6 #include <linux/workqueue.h> 7 7 #include <net/sch_generic.h> 8 8 #include <net/act_api.h> 9 - #include <net/flow_offload.h> 10 9 #include <net/net_namespace.h> 11 10 12 11 /* TC action not accessible from user space */ ··· 125 126 } 126 127 127 128 static inline 128 - int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb, 129 + int tc_setup_cb_block_register(struct tcf_block *block, flow_setup_cb_t *cb, 129 130 void *cb_priv) 130 131 { 131 132 return 0; 132 133 } 133 134 134 135 static inline 135 - void tc_setup_cb_block_unregister(struct tcf_block *block, tc_setup_cb_t *cb, 136 + void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb, 136 137 void *cb_priv) 137 138 { 138 139 }
+3 -5
include/net/sch_generic.h
··· 15 15 #include <linux/mutex.h> 16 16 #include <net/gen_stats.h> 17 17 #include <net/rtnetlink.h> 18 + #include <net/flow_offload.h> 18 19 19 20 struct Qdisc_ops; 20 21 struct qdisc_walker; 21 22 struct tcf_walker; 22 23 struct module; 23 24 struct bpf_flow_keys; 24 - 25 - typedef int tc_setup_cb_t(enum tc_setup_type type, 26 - void *type_data, void *cb_priv); 27 25 28 26 typedef int tc_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv, 29 27 enum tc_setup_type type, void *type_data); ··· 311 313 void (*walk)(struct tcf_proto *tp, 312 314 struct tcf_walker *arg, bool rtnl_held); 313 315 int (*reoffload)(struct tcf_proto *tp, bool add, 314 - tc_setup_cb_t *cb, void *cb_priv, 316 + flow_setup_cb_t *cb, void *cb_priv, 315 317 struct netlink_ext_ack *extack); 316 318 void (*bind_class)(void *, u32, unsigned long); 317 319 void * (*tmplt_create)(struct net *net, ··· 399 401 refcount_t refcnt; 400 402 struct net *net; 401 403 struct Qdisc *q; 402 - struct list_head cb_list; 404 + struct flow_block flow_block; 403 405 struct list_head owner_list; 404 406 bool keep_dst; 405 407 unsigned int offloadcnt; /* Number of oddloaded filters */
+10 -12
net/core/flow_offload.c
··· 165 165 } 166 166 EXPORT_SYMBOL(flow_rule_match_enc_opts); 167 167 168 - struct flow_block_cb *flow_block_cb_alloc(struct net *net, tc_setup_cb_t *cb, 168 + struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb, 169 169 void *cb_ident, void *cb_priv, 170 170 void (*release)(void *cb_priv)) 171 171 { ··· 175 175 if (!block_cb) 176 176 return ERR_PTR(-ENOMEM); 177 177 178 - block_cb->net = net; 179 178 block_cb->cb = cb; 180 179 block_cb->cb_ident = cb_ident; 181 180 block_cb->cb_priv = cb_priv; ··· 193 194 } 194 195 EXPORT_SYMBOL(flow_block_cb_free); 195 196 196 - struct flow_block_cb *flow_block_cb_lookup(struct flow_block_offload *f, 197 - tc_setup_cb_t *cb, void *cb_ident) 197 + struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block, 198 + flow_setup_cb_t *cb, void *cb_ident) 198 199 { 199 200 struct flow_block_cb *block_cb; 200 201 201 - list_for_each_entry(block_cb, f->driver_block_list, driver_list) { 202 - if (block_cb->net == f->net && 203 - block_cb->cb == cb && 202 + list_for_each_entry(block_cb, &block->cb_list, list) { 203 + if (block_cb->cb == cb && 204 204 block_cb->cb_ident == cb_ident) 205 205 return block_cb; 206 206 } ··· 226 228 } 227 229 EXPORT_SYMBOL(flow_block_cb_decref); 228 230 229 - bool flow_block_cb_is_busy(tc_setup_cb_t *cb, void *cb_ident, 231 + bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident, 230 232 struct list_head *driver_block_list) 231 233 { 232 234 struct flow_block_cb *block_cb; ··· 243 245 244 246 int flow_block_cb_setup_simple(struct flow_block_offload *f, 245 247 struct list_head *driver_block_list, 246 - tc_setup_cb_t *cb, void *cb_ident, void *cb_priv, 248 + flow_setup_cb_t *cb, 249 + void *cb_ident, void *cb_priv, 247 250 bool ingress_only) 248 251 { 249 252 struct flow_block_cb *block_cb; ··· 260 261 if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list)) 261 262 return -EBUSY; 262 263 263 - block_cb = flow_block_cb_alloc(f->net, cb, cb_ident, 264 - cb_priv, NULL); 264 + block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, NULL); 265 265 if (IS_ERR(block_cb)) 266 266 return PTR_ERR(block_cb); 267 267 ··· 268 270 list_add_tail(&block_cb->driver_list, driver_block_list); 269 271 return 0; 270 272 case FLOW_BLOCK_UNBIND: 271 - block_cb = flow_block_cb_lookup(f, cb, cb_ident); 273 + block_cb = flow_block_cb_lookup(f->block, cb, cb_ident); 272 274 if (!block_cb) 273 275 return -ENOENT; 274 276
+3 -3
net/dsa/slave.c
··· 951 951 struct flow_block_offload *f) 952 952 { 953 953 struct flow_block_cb *block_cb; 954 - tc_setup_cb_t *cb; 954 + flow_setup_cb_t *cb; 955 955 956 956 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 957 957 cb = dsa_slave_setup_tc_block_cb_ig; ··· 967 967 if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list)) 968 968 return -EBUSY; 969 969 970 - block_cb = flow_block_cb_alloc(f->net, cb, dev, dev, NULL); 970 + block_cb = flow_block_cb_alloc(cb, dev, dev, NULL); 971 971 if (IS_ERR(block_cb)) 972 972 return PTR_ERR(block_cb); 973 973 ··· 975 975 list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list); 976 976 return 0; 977 977 case FLOW_BLOCK_UNBIND: 978 - block_cb = flow_block_cb_lookup(f, cb, dev); 978 + block_cb = flow_block_cb_lookup(f->block, cb, dev); 979 979 if (!block_cb) 980 980 return -ENOENT; 981 981
+1 -1
net/netfilter/nf_tables_api.c
··· 1662 1662 1663 1663 chain->flags |= NFT_BASE_CHAIN | flags; 1664 1664 basechain->policy = NF_ACCEPT; 1665 - INIT_LIST_HEAD(&basechain->cb_list); 1665 + flow_block_init(&basechain->flow_block); 1666 1666 } else { 1667 1667 chain = kzalloc(sizeof(*chain), GFP_KERNEL); 1668 1668 if (chain == NULL)
+3 -2
net/netfilter/nf_tables_offload.c
··· 116 116 struct flow_block_cb *block_cb; 117 117 int err; 118 118 119 - list_for_each_entry(block_cb, &basechain->cb_list, list) { 119 + list_for_each_entry(block_cb, &basechain->flow_block.cb_list, list) { 120 120 err = block_cb->cb(type, type_data, block_cb->cb_priv); 121 121 if (err < 0) 122 122 return err; ··· 154 154 static int nft_flow_offload_bind(struct flow_block_offload *bo, 155 155 struct nft_base_chain *basechain) 156 156 { 157 - list_splice(&bo->cb_list, &basechain->cb_list); 157 + list_splice(&bo->cb_list, &basechain->flow_block.cb_list); 158 158 return 0; 159 159 } 160 160 ··· 198 198 return -EOPNOTSUPP; 199 199 200 200 bo.command = cmd; 201 + bo.block = &basechain->flow_block; 201 202 bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS; 202 203 bo.extack = &extack; 203 204 INIT_LIST_HEAD(&bo.cb_list);
+4 -4
net/openvswitch/flow.c
··· 59 59 void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, 60 60 const struct sk_buff *skb) 61 61 { 62 - struct flow_stats *stats; 62 + struct sw_flow_stats *stats; 63 63 unsigned int cpu = smp_processor_id(); 64 64 int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0); 65 65 ··· 87 87 if (likely(flow->stats_last_writer != -1) && 88 88 likely(!rcu_access_pointer(flow->stats[cpu]))) { 89 89 /* Try to allocate CPU-specific stats. */ 90 - struct flow_stats *new_stats; 90 + struct sw_flow_stats *new_stats; 91 91 92 92 new_stats = 93 93 kmem_cache_alloc_node(flow_stats_cache, ··· 134 134 135 135 /* We open code this to make sure cpu 0 is always considered */ 136 136 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) { 137 - struct flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]); 137 + struct sw_flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]); 138 138 139 139 if (stats) { 140 140 /* Local CPU may write on non-local stats, so we must ··· 158 158 159 159 /* We open code this to make sure cpu 0 is always considered */ 160 160 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) { 161 - struct flow_stats *stats = ovsl_dereference(flow->stats[cpu]); 161 + struct sw_flow_stats *stats = ovsl_dereference(flow->stats[cpu]); 162 162 163 163 if (stats) { 164 164 spin_lock_bh(&stats->lock);
+2 -2
net/openvswitch/flow.h
··· 194 194 struct nlattr actions[]; 195 195 }; 196 196 197 - struct flow_stats { 197 + struct sw_flow_stats { 198 198 u64 packet_count; /* Number of packets matched. */ 199 199 u64 byte_count; /* Number of bytes matched. */ 200 200 unsigned long used; /* Last used time (in jiffies). */ ··· 216 216 struct cpumask cpu_used_mask; 217 217 struct sw_flow_mask *mask; 218 218 struct sw_flow_actions __rcu *sf_acts; 219 - struct flow_stats __rcu *stats[]; /* One for each CPU. First one 219 + struct sw_flow_stats __rcu *stats[]; /* One for each CPU. First one 220 220 * is allocated at flow creation time, 221 221 * the rest are allocated on demand 222 222 * while holding the 'stats[0].lock'.
+4 -4
net/openvswitch/flow_table.c
··· 66 66 struct sw_flow *ovs_flow_alloc(void) 67 67 { 68 68 struct sw_flow *flow; 69 - struct flow_stats *stats; 69 + struct sw_flow_stats *stats; 70 70 71 71 flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL); 72 72 if (!flow) ··· 110 110 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) 111 111 if (flow->stats[cpu]) 112 112 kmem_cache_free(flow_stats_cache, 113 - (struct flow_stats __force *)flow->stats[cpu]); 113 + (struct sw_flow_stats __force *)flow->stats[cpu]); 114 114 kmem_cache_free(flow_cache, flow); 115 115 } 116 116 ··· 712 712 713 713 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow) 714 714 + (nr_cpu_ids 715 - * sizeof(struct flow_stats *)), 715 + * sizeof(struct sw_flow_stats *)), 716 716 0, 0, NULL); 717 717 if (flow_cache == NULL) 718 718 return -ENOMEM; 719 719 720 720 flow_stats_cache 721 - = kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats), 721 + = kmem_cache_create("sw_flow_stats", sizeof(struct sw_flow_stats), 722 722 0, SLAB_HWCACHE_ALIGN, NULL); 723 723 if (flow_stats_cache == NULL) { 724 724 kmem_cache_destroy(flow_cache);
+8 -4
net/sched/cls_api.c
··· 691 691 if (!indr_dev->block) 692 692 return; 693 693 694 + bo.block = &indr_dev->block->flow_block; 695 + 694 696 indr_block_cb->cb(indr_dev->dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK, 695 697 &bo); 696 698 tcf_block_setup(indr_dev->block, &bo); ··· 777 775 .command = command, 778 776 .binder_type = ei->binder_type, 779 777 .net = dev_net(dev), 778 + .block = &block->flow_block, 780 779 .block_shared = tcf_block_shared(block), 781 780 .extack = extack, 782 781 }; ··· 813 810 bo.net = dev_net(dev); 814 811 bo.command = command; 815 812 bo.binder_type = ei->binder_type; 813 + bo.block = &block->flow_block; 816 814 bo.block_shared = tcf_block_shared(block); 817 815 bo.extack = extack; 818 816 INIT_LIST_HEAD(&bo.cb_list); ··· 991 987 return ERR_PTR(-ENOMEM); 992 988 } 993 989 mutex_init(&block->lock); 990 + flow_block_init(&block->flow_block); 994 991 INIT_LIST_HEAD(&block->chain_list); 995 - INIT_LIST_HEAD(&block->cb_list); 996 992 INIT_LIST_HEAD(&block->owner_list); 997 993 INIT_LIST_HEAD(&block->chain0.filter_chain_list); 998 994 ··· 1518 1514 EXPORT_SYMBOL(tcf_block_put); 1519 1515 1520 1516 static int 1521 - tcf_block_playback_offloads(struct tcf_block *block, tc_setup_cb_t *cb, 1517 + tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb, 1522 1518 void *cb_priv, bool add, bool offload_in_use, 1523 1519 struct netlink_ext_ack *extack) 1524 1520 { ··· 1574 1570 1575 1571 i++; 1576 1572 } 1577 - list_splice(&bo->cb_list, &block->cb_list); 1573 + list_splice(&bo->cb_list, &block->flow_block.cb_list); 1578 1574 1579 1575 return 0; 1580 1576 ··· 3160 3156 if (block->nooffloaddevcnt && err_stop) 3161 3157 return -EOPNOTSUPP; 3162 3158 3163 - list_for_each_entry(block_cb, &block->cb_list, list) { 3159 + list_for_each_entry(block_cb, &block->flow_block.cb_list, list) { 3164 3160 err = block_cb->cb(type, type_data, block_cb->cb_priv); 3165 3161 if (err) { 3166 3162 if (err_stop)
+1 -1
net/sched/cls_bpf.c
··· 651 651 } 652 652 } 653 653 654 - static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, 654 + static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, 655 655 void *cb_priv, struct netlink_ext_ack *extack) 656 656 { 657 657 struct cls_bpf_head *head = rtnl_dereference(tp->root);
+1 -1
net/sched/cls_flower.c
··· 1800 1800 return NULL; 1801 1801 } 1802 1802 1803 - static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, 1803 + static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, 1804 1804 void *cb_priv, struct netlink_ext_ack *extack) 1805 1805 { 1806 1806 struct tcf_block *block = tp->chain->block;
+1 -1
net/sched/cls_matchall.c
··· 282 282 arg->count++; 283 283 } 284 284 285 - static int mall_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, 285 + static int mall_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, 286 286 void *cb_priv, struct netlink_ext_ack *extack) 287 287 { 288 288 struct cls_mall_head *head = rtnl_dereference(tp->root);
+3 -3
net/sched/cls_u32.c
··· 1152 1152 } 1153 1153 1154 1154 static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht, 1155 - bool add, tc_setup_cb_t *cb, void *cb_priv, 1155 + bool add, flow_setup_cb_t *cb, void *cb_priv, 1156 1156 struct netlink_ext_ack *extack) 1157 1157 { 1158 1158 struct tc_cls_u32_offload cls_u32 = {}; ··· 1172 1172 } 1173 1173 1174 1174 static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n, 1175 - bool add, tc_setup_cb_t *cb, void *cb_priv, 1175 + bool add, flow_setup_cb_t *cb, void *cb_priv, 1176 1176 struct netlink_ext_ack *extack) 1177 1177 { 1178 1178 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down); ··· 1213 1213 return 0; 1214 1214 } 1215 1215 1216 - static int u32_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, 1216 + static int u32_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, 1217 1217 void *cb_priv, struct netlink_ext_ack *extack) 1218 1218 { 1219 1219 struct tc_u_common *tp_c = tp->data;