Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'nfp-flower-rework'

Simon Horman says:

====================
nfp: flower: decap neighbour table rework

Louis Peens says:

This patch series reworks the way in which flow rules that outputs to
OVS internal ports gets handled by the nfp driver.

Previously this made use of a small pre_tun_table, but this only used
destination MAC addresses, and made the implicit assumption that there is
only a single source MAC":"destination MAC" mapping per tunnel. In
hindsight this seems to be a pretty obvious oversight, but this was hidden
in plain sight for quite some time.

This series changes the implementation to make use of the same Neighbour
table for decap that is in use for the tunnel encap solution. It stores
any new Neighbour updates in this table. Previously this path was only
triggered for encapsulation candidates, and the entries were send and
forget, not saved on the host as it is after this series. It also keeps
track of any flow rule that outputs to OVS internal ports (and some
other criteria not worth mentioning here), very similar to how it was
done previously, except now these flows are kept track of in a list.

When a new Neighbour entry gets added this list gets iterated for
potential matches, in which case the table gets updated with a reference
to the flow, and the Neighbour entry on the card gets updated with the
relevant host_ctx. The same happens when a new qualifying flow gets
added - the Neighbour table gets iterated for applicable matches, and
once again the firmware gets updated with the host_ctx when any matches
are found.

Since this also requires a firmware change we add a new capability bit,
and keep the old behaviour in case of older firmware without this bit
set.

This series starts by doing some preparation, then adding the new list
and table entries. Next the functionality to link/unlink these entries
are added, and finally this new functionality is enabled by adding the
DECAP_V2 bit to the driver feature list.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+465 -279
+2 -1
drivers/net/ethernet/netronome/nfp/flower/action.c
··· 220 220 } 221 221 output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid); 222 222 } else if (nfp_flower_internal_port_can_offload(app, out_dev)) { 223 - if (!(priv->flower_ext_feats & NFP_FL_FEATS_PRE_TUN_RULES)) { 223 + if (!(priv->flower_ext_feats & NFP_FL_FEATS_PRE_TUN_RULES) && 224 + !(priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2)) { 224 225 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pre-tunnel rules not supported in loaded firmware"); 225 226 return -EOPNOTSUPP; 226 227 }
+101 -9
drivers/net/ethernet/netronome/nfp/flower/main.h
··· 51 51 #define NFP_FL_FEATS_VLAN_QINQ BIT(8) 52 52 #define NFP_FL_FEATS_QOS_PPS BIT(9) 53 53 #define NFP_FL_FEATS_QOS_METER BIT(10) 54 + #define NFP_FL_FEATS_DECAP_V2 BIT(11) 54 55 #define NFP_FL_FEATS_HOST_ACK BIT(31) 55 56 56 57 #define NFP_FL_ENABLE_FLOW_MERGE BIT(0) ··· 68 67 NFP_FL_FEATS_IPV6_TUN | \ 69 68 NFP_FL_FEATS_VLAN_QINQ | \ 70 69 NFP_FL_FEATS_QOS_PPS | \ 71 - NFP_FL_FEATS_QOS_METER) 70 + NFP_FL_FEATS_QOS_METER | \ 71 + NFP_FL_FEATS_DECAP_V2) 72 72 73 73 struct nfp_fl_mask_id { 74 74 struct circ_buf mask_id_free_list; ··· 88 86 * @offloaded_macs: Hashtable of the offloaded MAC addresses 89 87 * @ipv4_off_list: List of IPv4 addresses to offload 90 88 * @ipv6_off_list: List of IPv6 addresses to offload 91 - * @neigh_off_list_v4: List of IPv4 neighbour offloads 92 - * @neigh_off_list_v6: List of IPv6 neighbour offloads 93 89 * @ipv4_off_lock: Lock for the IPv4 address list 94 90 * @ipv6_off_lock: Lock for the IPv6 address list 95 - * @neigh_off_lock_v4: Lock for the IPv4 neighbour address list 96 - * @neigh_off_lock_v6: Lock for the IPv6 neighbour address list 97 91 * @mac_off_ids: IDA to manage id assignment for offloaded MACs 98 92 * @neigh_nb: Notifier to monitor neighbour state 99 93 */ ··· 97 99 struct rhashtable offloaded_macs; 98 100 struct list_head ipv4_off_list; 99 101 struct list_head ipv6_off_list; 100 - struct list_head neigh_off_list_v4; 101 - struct list_head neigh_off_list_v6; 102 102 struct mutex ipv4_off_lock; 103 103 struct mutex ipv6_off_lock; 104 - spinlock_t neigh_off_lock_v4; 105 - spinlock_t neigh_off_lock_v6; 106 104 struct ida mac_off_ids; 107 105 struct notifier_block neigh_nb; 106 + }; 107 + 108 + /** 109 + * struct nfp_tun_neigh - basic neighbour data 110 + * @dst_addr: Destination MAC address 111 + * @src_addr: Source MAC address 112 + * @port_id: NFP port to output packet on - associated with source IPv4 113 + */ 114 + struct nfp_tun_neigh { 115 + u8 dst_addr[ETH_ALEN]; 116 + u8 src_addr[ETH_ALEN]; 117 + __be32 port_id; 118 + }; 119 + 120 + /** 121 + * struct nfp_tun_neigh_ext - extended neighbour data 122 + * @vlan_tpid: VLAN_TPID match field 123 + * @vlan_tci: VLAN_TCI match field 124 + * @host_ctx: Host context ID to be saved here 125 + */ 126 + struct nfp_tun_neigh_ext { 127 + __be16 vlan_tpid; 128 + __be16 vlan_tci; 129 + __be32 host_ctx; 130 + }; 131 + 132 + /** 133 + * struct nfp_tun_neigh_v4 - neighbour/route entry on the NFP for IPv4 134 + * @dst_ipv4: Destination IPv4 address 135 + * @src_ipv4: Source IPv4 address 136 + * @common: Neighbour/route common info 137 + * @ext: Neighbour/route extended info 138 + */ 139 + struct nfp_tun_neigh_v4 { 140 + __be32 dst_ipv4; 141 + __be32 src_ipv4; 142 + struct nfp_tun_neigh common; 143 + struct nfp_tun_neigh_ext ext; 144 + }; 145 + 146 + /** 147 + * struct nfp_tun_neigh_v6 - neighbour/route entry on the NFP for IPv6 148 + * @dst_ipv6: Destination IPv6 address 149 + * @src_ipv6: Source IPv6 address 150 + * @common: Neighbour/route common info 151 + * @ext: Neighbour/route extended info 152 + */ 153 + struct nfp_tun_neigh_v6 { 154 + struct in6_addr dst_ipv6; 155 + struct in6_addr src_ipv6; 156 + struct nfp_tun_neigh common; 157 + struct nfp_tun_neigh_ext ext; 158 + }; 159 + 160 + /** 161 + * struct nfp_neigh_entry 162 + * @neigh_cookie: Cookie for hashtable lookup 163 + * @ht_node: rhash_head entry for hashtable 164 + * @list_head: Needed as member of linked_nn_entries list 165 + * @payload: The neighbour info payload 166 + * @flow: Linked flow rule 167 + * @is_ipv6: Flag to indicate if payload is ipv6 or ipv4 168 + */ 169 + struct nfp_neigh_entry { 170 + unsigned long neigh_cookie; 171 + struct rhash_head ht_node; 172 + struct list_head list_head; 173 + char *payload; 174 + struct nfp_predt_entry *flow; 175 + bool is_ipv6; 176 + }; 177 + 178 + /** 179 + * struct nfp_predt_entry 180 + * @list_head: List head to attach to predt_list 181 + * @flow_pay: Direct link to flow_payload 182 + * @nn_list: List of linked nfp_neigh_entries 183 + */ 184 + struct nfp_predt_entry { 185 + struct list_head list_head; 186 + struct nfp_fl_payload *flow_pay; 187 + struct list_head nn_list; 108 188 }; 109 189 110 190 /** ··· 278 202 * @ct_zone_table: Hash table used to store the different zones 279 203 * @ct_zone_wc: Special zone entry for wildcarded zone matches 280 204 * @ct_map_table: Hash table used to referennce ct flows 205 + * @predt_list: List to keep track of decap pretun flows 206 + * @neigh_table: Table to keep track of neighbor entries 207 + * @predt_lock: Lock to serialise predt/neigh table updates 281 208 */ 282 209 struct nfp_flower_priv { 283 210 struct nfp_app *app; ··· 320 241 struct rhashtable ct_zone_table; 321 242 struct nfp_fl_ct_zone_entry *ct_zone_wc; 322 243 struct rhashtable ct_map_table; 244 + struct list_head predt_list; 245 + struct rhashtable neigh_table; 246 + spinlock_t predt_lock; /* Lock to serialise predt/neigh table updates */ 323 247 }; 324 248 325 249 /** ··· 426 344 struct list_head linked_flows; 427 345 bool in_hw; 428 346 struct { 347 + struct nfp_predt_entry *predt; 429 348 struct net_device *dev; 349 + __be16 vlan_tpid; 430 350 __be16 vlan_tci; 431 351 __be16 port_idx; 352 + u8 loc_mac[ETH_ALEN]; 353 + u8 rem_mac[ETH_ALEN]; 354 + bool is_ipv6; 432 355 } pre_tun_rule; 433 356 }; 434 357 ··· 456 369 457 370 extern const struct rhashtable_params nfp_flower_table_params; 458 371 extern const struct rhashtable_params merge_table_params; 372 + extern const struct rhashtable_params neigh_table_params; 459 373 460 374 struct nfp_merge_info { 461 375 u64 parent_ctx; ··· 668 580 nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev); 669 581 u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app, 670 582 struct net_device *netdev); 583 + void nfp_tun_link_and_update_nn_entries(struct nfp_app *app, 584 + struct nfp_predt_entry *predt); 585 + void nfp_tun_unlink_and_update_nn_entries(struct nfp_app *app, 586 + struct nfp_predt_entry *predt); 671 587 int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app, 672 588 struct nfp_fl_payload *flow); 673 589 int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app,
+18 -1
drivers/net/ethernet/netronome/nfp/flower/metadata.c
··· 502 502 .automatic_shrinking = true, 503 503 }; 504 504 505 + const struct rhashtable_params neigh_table_params = { 506 + .key_offset = offsetof(struct nfp_neigh_entry, neigh_cookie), 507 + .head_offset = offsetof(struct nfp_neigh_entry, ht_node), 508 + .key_len = sizeof(unsigned long), 509 + }; 510 + 505 511 int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, 506 512 unsigned int host_num_mems) 507 513 { ··· 536 530 if (err) 537 531 goto err_free_ct_zone_table; 538 532 533 + err = rhashtable_init(&priv->neigh_table, &neigh_table_params); 534 + if (err) 535 + goto err_free_ct_map_table; 536 + 537 + INIT_LIST_HEAD(&priv->predt_list); 538 + 539 539 get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed)); 540 540 541 541 /* Init ring buffer and unallocated mask_ids. */ ··· 549 537 kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS, 550 538 NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL); 551 539 if (!priv->mask_ids.mask_id_free_list.buf) 552 - goto err_free_ct_map_table; 540 + goto err_free_neigh_table; 553 541 554 542 priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1; 555 543 ··· 577 565 goto err_free_ring_buf; 578 566 579 567 spin_lock_init(&priv->stats_lock); 568 + spin_lock_init(&priv->predt_lock); 580 569 581 570 return 0; 582 571 ··· 587 574 kfree(priv->mask_ids.last_used); 588 575 err_free_mask_id: 589 576 kfree(priv->mask_ids.mask_id_free_list.buf); 577 + err_free_neigh_table: 578 + rhashtable_destroy(&priv->neigh_table); 590 579 err_free_ct_map_table: 591 580 rhashtable_destroy(&priv->ct_map_table); 592 581 err_free_ct_zone_table: ··· 715 700 716 701 rhashtable_free_and_destroy(&priv->ct_map_table, 717 702 nfp_free_map_table_entry, NULL); 703 + rhashtable_free_and_destroy(&priv->neigh_table, 704 + nfp_check_rhashtable_empty, NULL); 718 705 kvfree(priv->stats); 719 706 kfree(priv->mask_ids.mask_id_free_list.buf); 720 707 kfree(priv->mask_ids.last_used);
+73 -13
drivers/net/ethernet/netronome/nfp/flower/offload.c
··· 1170 1170 return -EOPNOTSUPP; 1171 1171 } 1172 1172 1173 + if (key_layer & NFP_FLOWER_LAYER_IPV6) 1174 + flow->pre_tun_rule.is_ipv6 = true; 1175 + else 1176 + flow->pre_tun_rule.is_ipv6 = false; 1177 + 1173 1178 /* Skip fields known to exist. */ 1174 1179 mask += sizeof(struct nfp_flower_meta_tci); 1175 1180 ext += sizeof(struct nfp_flower_meta_tci); ··· 1185 1180 mask += sizeof(struct nfp_flower_in_port); 1186 1181 ext += sizeof(struct nfp_flower_in_port); 1187 1182 1188 - /* Ensure destination MAC address matches pre_tun_dev. */ 1189 - mac = (struct nfp_flower_mac_mpls *)ext; 1190 - if (memcmp(&mac->mac_dst[0], flow->pre_tun_rule.dev->dev_addr, 6)) { 1191 - NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC must match output dev MAC"); 1192 - return -EOPNOTSUPP; 1193 - } 1194 - 1195 1183 /* Ensure destination MAC address is fully matched. */ 1196 1184 mac = (struct nfp_flower_mac_mpls *)mask; 1197 1185 if (!is_broadcast_ether_addr(&mac->mac_dst[0])) { ··· 1192 1194 return -EOPNOTSUPP; 1193 1195 } 1194 1196 1197 + /* Ensure source MAC address is fully matched. This is only needed 1198 + * for firmware with the DECAP_V2 feature enabled. Don't do this 1199 + * for firmware without this feature to keep old behaviour. 1200 + */ 1201 + if (priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2) { 1202 + mac = (struct nfp_flower_mac_mpls *)mask; 1203 + if (!is_broadcast_ether_addr(&mac->mac_src[0])) { 1204 + NL_SET_ERR_MSG_MOD(extack, 1205 + "unsupported pre-tunnel rule: source MAC field must not be masked"); 1206 + return -EOPNOTSUPP; 1207 + } 1208 + } 1209 + 1195 1210 if (mac->mpls_lse) { 1196 1211 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MPLS not supported"); 1197 1212 return -EOPNOTSUPP; 1198 1213 } 1214 + 1215 + /* Ensure destination MAC address matches pre_tun_dev. */ 1216 + mac = (struct nfp_flower_mac_mpls *)ext; 1217 + if (memcmp(&mac->mac_dst[0], flow->pre_tun_rule.dev->dev_addr, 6)) { 1218 + NL_SET_ERR_MSG_MOD(extack, 1219 + "unsupported pre-tunnel rule: dest MAC must match output dev MAC"); 1220 + return -EOPNOTSUPP; 1221 + } 1222 + 1223 + /* Save mac addresses in pre_tun_rule entry for later use */ 1224 + memcpy(&flow->pre_tun_rule.loc_mac, &mac->mac_dst[0], ETH_ALEN); 1225 + memcpy(&flow->pre_tun_rule.rem_mac, &mac->mac_src[0], ETH_ALEN); 1199 1226 1200 1227 mask += sizeof(struct nfp_flower_mac_mpls); 1201 1228 ext += sizeof(struct nfp_flower_mac_mpls); ··· 1250 1227 if ((priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) { 1251 1228 if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_QINQ) { 1252 1229 struct nfp_flower_vlan *vlan_tags; 1230 + u16 vlan_tpid; 1253 1231 u16 vlan_tci; 1254 1232 1255 1233 vlan_tags = (struct nfp_flower_vlan *)ext; 1256 1234 1257 1235 vlan_tci = be16_to_cpu(vlan_tags->outer_tci); 1236 + vlan_tpid = be16_to_cpu(vlan_tags->outer_tpid); 1258 1237 1259 1238 vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT; 1260 1239 flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci); 1240 + flow->pre_tun_rule.vlan_tpid = cpu_to_be16(vlan_tpid); 1261 1241 vlan = true; 1262 1242 } else { 1263 1243 flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff); 1244 + flow->pre_tun_rule.vlan_tpid = cpu_to_be16(0xffff); 1264 1245 } 1265 1246 } 1266 1247 ··· 1389 1362 goto err_release_metadata; 1390 1363 } 1391 1364 1392 - if (flow_pay->pre_tun_rule.dev) 1393 - err = nfp_flower_xmit_pre_tun_flow(app, flow_pay); 1394 - else 1365 + if (flow_pay->pre_tun_rule.dev) { 1366 + if (priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2) { 1367 + struct nfp_predt_entry *predt; 1368 + 1369 + predt = kzalloc(sizeof(*predt), GFP_KERNEL); 1370 + if (!predt) { 1371 + err = -ENOMEM; 1372 + goto err_remove_rhash; 1373 + } 1374 + predt->flow_pay = flow_pay; 1375 + INIT_LIST_HEAD(&predt->nn_list); 1376 + spin_lock_bh(&priv->predt_lock); 1377 + list_add(&predt->list_head, &priv->predt_list); 1378 + flow_pay->pre_tun_rule.predt = predt; 1379 + nfp_tun_link_and_update_nn_entries(app, predt); 1380 + spin_unlock_bh(&priv->predt_lock); 1381 + } else { 1382 + err = nfp_flower_xmit_pre_tun_flow(app, flow_pay); 1383 + } 1384 + } else { 1395 1385 err = nfp_flower_xmit_flow(app, flow_pay, 1396 1386 NFP_FLOWER_CMSG_TYPE_FLOW_ADD); 1387 + } 1388 + 1397 1389 if (err) 1398 1390 goto err_remove_rhash; 1399 1391 ··· 1584 1538 goto err_free_merge_flow; 1585 1539 } 1586 1540 1587 - if (nfp_flow->pre_tun_rule.dev) 1588 - err = nfp_flower_xmit_pre_tun_del_flow(app, nfp_flow); 1589 - else 1541 + if (nfp_flow->pre_tun_rule.dev) { 1542 + if (priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2) { 1543 + struct nfp_predt_entry *predt; 1544 + 1545 + predt = nfp_flow->pre_tun_rule.predt; 1546 + if (predt) { 1547 + spin_lock_bh(&priv->predt_lock); 1548 + nfp_tun_unlink_and_update_nn_entries(app, predt); 1549 + list_del(&predt->list_head); 1550 + spin_unlock_bh(&priv->predt_lock); 1551 + kfree(predt); 1552 + } 1553 + } else { 1554 + err = nfp_flower_xmit_pre_tun_del_flow(app, nfp_flow); 1555 + } 1556 + } else { 1590 1557 err = nfp_flower_xmit_flow(app, nfp_flow, 1591 1558 NFP_FLOWER_CMSG_TYPE_FLOW_DEL); 1559 + } 1592 1560 /* Fall through on error. */ 1593 1561 1594 1562 err_free_merge_flow:
+271 -255
drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
··· 77 77 }; 78 78 79 79 /** 80 - * struct nfp_tun_neigh - neighbour/route entry on the NFP 81 - * @dst_ipv4: destination IPv4 address 82 - * @src_ipv4: source IPv4 address 83 - * @dst_addr: destination MAC address 84 - * @src_addr: source MAC address 85 - * @port_id: NFP port to output packet on - associated with source IPv4 86 - */ 87 - struct nfp_tun_neigh { 88 - __be32 dst_ipv4; 89 - __be32 src_ipv4; 90 - u8 dst_addr[ETH_ALEN]; 91 - u8 src_addr[ETH_ALEN]; 92 - __be32 port_id; 93 - }; 94 - 95 - /** 96 - * struct nfp_tun_neigh_v6 - neighbour/route entry on the NFP 97 - * @dst_ipv6: destination IPv6 address 98 - * @src_ipv6: source IPv6 address 99 - * @dst_addr: destination MAC address 100 - * @src_addr: source MAC address 101 - * @port_id: NFP port to output packet on - associated with source IPv6 102 - */ 103 - struct nfp_tun_neigh_v6 { 104 - struct in6_addr dst_ipv6; 105 - struct in6_addr src_ipv6; 106 - u8 dst_addr[ETH_ALEN]; 107 - u8 src_addr[ETH_ALEN]; 108 - __be32 port_id; 109 - }; 110 - 111 - /** 112 80 * struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup 113 81 * @ingress_port: ingress port of packet that signalled request 114 82 * @ipv4_addr: destination ipv4 address for route ··· 281 313 nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata, 282 314 gfp_t flag) 283 315 { 316 + struct nfp_flower_priv *priv = app->priv; 284 317 struct sk_buff *skb; 285 318 unsigned char *msg; 319 + 320 + if (!(priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2) && 321 + (mtype == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH || 322 + mtype == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6)) 323 + plen -= sizeof(struct nfp_tun_neigh_ext); 286 324 287 325 skb = nfp_flower_cmsg_alloc(app, plen, mtype, flag); 288 326 if (!skb) ··· 301 327 return 0; 302 328 } 303 329 304 - static bool 305 - __nfp_tun_has_route(struct list_head *route_list, spinlock_t *list_lock, 306 - void *add, int add_len) 330 + static void 331 + nfp_tun_mutual_link(struct nfp_predt_entry *predt, 332 + struct nfp_neigh_entry *neigh) 307 333 { 308 - struct nfp_offloaded_route *entry; 334 + struct nfp_fl_payload *flow_pay = predt->flow_pay; 335 + struct nfp_tun_neigh_ext *ext; 336 + struct nfp_tun_neigh *common; 309 337 310 - spin_lock_bh(list_lock); 311 - list_for_each_entry(entry, route_list, list) 312 - if (!memcmp(entry->ip_add, add, add_len)) { 313 - spin_unlock_bh(list_lock); 314 - return true; 315 - } 316 - spin_unlock_bh(list_lock); 317 - return false; 338 + if (flow_pay->pre_tun_rule.is_ipv6 != neigh->is_ipv6) 339 + return; 340 + 341 + /* In the case of bonding it is possible that there might already 342 + * be a flow linked (as the MAC address gets shared). If a flow 343 + * is already linked just return. 344 + */ 345 + if (neigh->flow) 346 + return; 347 + 348 + common = neigh->is_ipv6 ? 349 + &((struct nfp_tun_neigh_v6 *)neigh->payload)->common : 350 + &((struct nfp_tun_neigh_v4 *)neigh->payload)->common; 351 + ext = neigh->is_ipv6 ? 352 + &((struct nfp_tun_neigh_v6 *)neigh->payload)->ext : 353 + &((struct nfp_tun_neigh_v4 *)neigh->payload)->ext; 354 + 355 + if (memcmp(flow_pay->pre_tun_rule.loc_mac, 356 + common->src_addr, ETH_ALEN) || 357 + memcmp(flow_pay->pre_tun_rule.rem_mac, 358 + common->dst_addr, ETH_ALEN)) 359 + return; 360 + 361 + list_add(&neigh->list_head, &predt->nn_list); 362 + neigh->flow = predt; 363 + ext->host_ctx = flow_pay->meta.host_ctx_id; 364 + ext->vlan_tci = flow_pay->pre_tun_rule.vlan_tci; 365 + ext->vlan_tpid = flow_pay->pre_tun_rule.vlan_tpid; 318 366 } 319 367 320 - static int 321 - __nfp_tun_add_route_to_cache(struct list_head *route_list, 322 - spinlock_t *list_lock, void *add, int add_len) 368 + static void 369 + nfp_tun_link_predt_entries(struct nfp_app *app, 370 + struct nfp_neigh_entry *nn_entry) 323 371 { 324 - struct nfp_offloaded_route *entry; 372 + struct nfp_flower_priv *priv = app->priv; 373 + struct nfp_predt_entry *predt, *tmp; 325 374 326 - spin_lock_bh(list_lock); 327 - list_for_each_entry(entry, route_list, list) 328 - if (!memcmp(entry->ip_add, add, add_len)) { 329 - spin_unlock_bh(list_lock); 330 - return 0; 331 - } 332 - 333 - entry = kmalloc(struct_size(entry, ip_add, add_len), GFP_ATOMIC); 334 - if (!entry) { 335 - spin_unlock_bh(list_lock); 336 - return -ENOMEM; 375 + list_for_each_entry_safe(predt, tmp, &priv->predt_list, list_head) { 376 + nfp_tun_mutual_link(predt, nn_entry); 337 377 } 378 + } 338 379 339 - memcpy(entry->ip_add, add, add_len); 340 - list_add_tail(&entry->list, route_list); 341 - spin_unlock_bh(list_lock); 380 + void nfp_tun_link_and_update_nn_entries(struct nfp_app *app, 381 + struct nfp_predt_entry *predt) 382 + { 383 + struct nfp_flower_priv *priv = app->priv; 384 + struct nfp_neigh_entry *nn_entry; 385 + struct rhashtable_iter iter; 386 + size_t neigh_size; 387 + u8 type; 342 388 343 - return 0; 389 + rhashtable_walk_enter(&priv->neigh_table, &iter); 390 + rhashtable_walk_start(&iter); 391 + while ((nn_entry = rhashtable_walk_next(&iter)) != NULL) { 392 + if (IS_ERR(nn_entry)) 393 + continue; 394 + nfp_tun_mutual_link(predt, nn_entry); 395 + neigh_size = nn_entry->is_ipv6 ? 396 + sizeof(struct nfp_tun_neigh_v6) : 397 + sizeof(struct nfp_tun_neigh_v4); 398 + type = nn_entry->is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 : 399 + NFP_FLOWER_CMSG_TYPE_TUN_NEIGH; 400 + nfp_flower_xmit_tun_conf(app, type, neigh_size, 401 + nn_entry->payload, 402 + GFP_ATOMIC); 403 + } 404 + rhashtable_walk_stop(&iter); 405 + rhashtable_walk_exit(&iter); 406 + } 407 + 408 + static void nfp_tun_cleanup_nn_entries(struct nfp_app *app) 409 + { 410 + struct nfp_flower_priv *priv = app->priv; 411 + struct nfp_neigh_entry *neigh; 412 + struct nfp_tun_neigh_ext *ext; 413 + struct rhashtable_iter iter; 414 + size_t neigh_size; 415 + u8 type; 416 + 417 + rhashtable_walk_enter(&priv->neigh_table, &iter); 418 + rhashtable_walk_start(&iter); 419 + while ((neigh = rhashtable_walk_next(&iter)) != NULL) { 420 + if (IS_ERR(neigh)) 421 + continue; 422 + ext = neigh->is_ipv6 ? 423 + &((struct nfp_tun_neigh_v6 *)neigh->payload)->ext : 424 + &((struct nfp_tun_neigh_v4 *)neigh->payload)->ext; 425 + ext->host_ctx = cpu_to_be32(U32_MAX); 426 + ext->vlan_tpid = cpu_to_be16(U16_MAX); 427 + ext->vlan_tci = cpu_to_be16(U16_MAX); 428 + 429 + neigh_size = neigh->is_ipv6 ? 430 + sizeof(struct nfp_tun_neigh_v6) : 431 + sizeof(struct nfp_tun_neigh_v4); 432 + type = neigh->is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 : 433 + NFP_FLOWER_CMSG_TYPE_TUN_NEIGH; 434 + nfp_flower_xmit_tun_conf(app, type, neigh_size, neigh->payload, 435 + GFP_ATOMIC); 436 + 437 + rhashtable_remove_fast(&priv->neigh_table, &neigh->ht_node, 438 + neigh_table_params); 439 + if (neigh->flow) 440 + list_del(&neigh->list_head); 441 + kfree(neigh); 442 + } 443 + rhashtable_walk_stop(&iter); 444 + rhashtable_walk_exit(&iter); 445 + } 446 + 447 + void nfp_tun_unlink_and_update_nn_entries(struct nfp_app *app, 448 + struct nfp_predt_entry *predt) 449 + { 450 + struct nfp_neigh_entry *neigh, *tmp; 451 + struct nfp_tun_neigh_ext *ext; 452 + size_t neigh_size; 453 + u8 type; 454 + 455 + list_for_each_entry_safe(neigh, tmp, &predt->nn_list, list_head) { 456 + ext = neigh->is_ipv6 ? 457 + &((struct nfp_tun_neigh_v6 *)neigh->payload)->ext : 458 + &((struct nfp_tun_neigh_v4 *)neigh->payload)->ext; 459 + neigh->flow = NULL; 460 + ext->host_ctx = cpu_to_be32(U32_MAX); 461 + ext->vlan_tpid = cpu_to_be16(U16_MAX); 462 + ext->vlan_tci = cpu_to_be16(U16_MAX); 463 + list_del(&neigh->list_head); 464 + neigh_size = neigh->is_ipv6 ? 465 + sizeof(struct nfp_tun_neigh_v6) : 466 + sizeof(struct nfp_tun_neigh_v4); 467 + type = neigh->is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 : 468 + NFP_FLOWER_CMSG_TYPE_TUN_NEIGH; 469 + nfp_flower_xmit_tun_conf(app, type, neigh_size, neigh->payload, 470 + GFP_ATOMIC); 471 + } 344 472 } 345 473 346 474 static void 347 - __nfp_tun_del_route_from_cache(struct list_head *route_list, 348 - spinlock_t *list_lock, void *add, int add_len) 475 + nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, 476 + void *flow, struct neighbour *neigh, bool is_ipv6) 349 477 { 350 - struct nfp_offloaded_route *entry; 351 - 352 - spin_lock_bh(list_lock); 353 - list_for_each_entry(entry, route_list, list) 354 - if (!memcmp(entry->ip_add, add, add_len)) { 355 - list_del(&entry->list); 356 - kfree(entry); 357 - break; 358 - } 359 - spin_unlock_bh(list_lock); 360 - } 361 - 362 - static bool nfp_tun_has_route_v4(struct nfp_app *app, __be32 *ipv4_addr) 363 - { 478 + bool neigh_invalid = !(neigh->nud_state & NUD_VALID) || neigh->dead; 479 + size_t neigh_size = is_ipv6 ? sizeof(struct nfp_tun_neigh_v6) : 480 + sizeof(struct nfp_tun_neigh_v4); 481 + unsigned long cookie = (unsigned long)neigh; 364 482 struct nfp_flower_priv *priv = app->priv; 365 - 366 - return __nfp_tun_has_route(&priv->tun.neigh_off_list_v4, 367 - &priv->tun.neigh_off_lock_v4, ipv4_addr, 368 - sizeof(*ipv4_addr)); 369 - } 370 - 371 - static bool 372 - nfp_tun_has_route_v6(struct nfp_app *app, struct in6_addr *ipv6_addr) 373 - { 374 - struct nfp_flower_priv *priv = app->priv; 375 - 376 - return __nfp_tun_has_route(&priv->tun.neigh_off_list_v6, 377 - &priv->tun.neigh_off_lock_v6, ipv6_addr, 378 - sizeof(*ipv6_addr)); 379 - } 380 - 381 - static void 382 - nfp_tun_add_route_to_cache_v4(struct nfp_app *app, __be32 *ipv4_addr) 383 - { 384 - struct nfp_flower_priv *priv = app->priv; 385 - 386 - __nfp_tun_add_route_to_cache(&priv->tun.neigh_off_list_v4, 387 - &priv->tun.neigh_off_lock_v4, ipv4_addr, 388 - sizeof(*ipv4_addr)); 389 - } 390 - 391 - static void 392 - nfp_tun_add_route_to_cache_v6(struct nfp_app *app, struct in6_addr *ipv6_addr) 393 - { 394 - struct nfp_flower_priv *priv = app->priv; 395 - 396 - __nfp_tun_add_route_to_cache(&priv->tun.neigh_off_list_v6, 397 - &priv->tun.neigh_off_lock_v6, ipv6_addr, 398 - sizeof(*ipv6_addr)); 399 - } 400 - 401 - static void 402 - nfp_tun_del_route_from_cache_v4(struct nfp_app *app, __be32 *ipv4_addr) 403 - { 404 - struct nfp_flower_priv *priv = app->priv; 405 - 406 - __nfp_tun_del_route_from_cache(&priv->tun.neigh_off_list_v4, 407 - &priv->tun.neigh_off_lock_v4, ipv4_addr, 408 - sizeof(*ipv4_addr)); 409 - } 410 - 411 - static void 412 - nfp_tun_del_route_from_cache_v6(struct nfp_app *app, struct in6_addr *ipv6_addr) 413 - { 414 - struct nfp_flower_priv *priv = app->priv; 415 - 416 - __nfp_tun_del_route_from_cache(&priv->tun.neigh_off_list_v6, 417 - &priv->tun.neigh_off_lock_v6, ipv6_addr, 418 - sizeof(*ipv6_addr)); 419 - } 420 - 421 - static void 422 - nfp_tun_write_neigh_v4(struct net_device *netdev, struct nfp_app *app, 423 - struct flowi4 *flow, struct neighbour *neigh, gfp_t flag) 424 - { 425 - struct nfp_tun_neigh payload; 483 + struct nfp_neigh_entry *nn_entry; 426 484 u32 port_id; 485 + u8 mtype; 427 486 428 487 port_id = nfp_flower_get_port_id_from_netdev(app, netdev); 429 488 if (!port_id) 430 489 return; 431 490 432 - memset(&payload, 0, sizeof(struct nfp_tun_neigh)); 433 - payload.dst_ipv4 = flow->daddr; 491 + spin_lock_bh(&priv->predt_lock); 492 + nn_entry = rhashtable_lookup_fast(&priv->neigh_table, &cookie, 493 + neigh_table_params); 494 + if (!nn_entry && !neigh_invalid) { 495 + struct nfp_tun_neigh_ext *ext; 496 + struct nfp_tun_neigh *common; 434 497 435 - /* If entry has expired send dst IP with all other fields 0. */ 436 - if (!(neigh->nud_state & NUD_VALID) || neigh->dead) { 437 - nfp_tun_del_route_from_cache_v4(app, &payload.dst_ipv4); 498 + nn_entry = kzalloc(sizeof(*nn_entry) + neigh_size, 499 + GFP_ATOMIC); 500 + if (!nn_entry) 501 + goto err; 502 + 503 + nn_entry->payload = (char *)&nn_entry[1]; 504 + nn_entry->neigh_cookie = cookie; 505 + nn_entry->is_ipv6 = is_ipv6; 506 + nn_entry->flow = NULL; 507 + if (is_ipv6) { 508 + struct flowi6 *flowi6 = (struct flowi6 *)flow; 509 + struct nfp_tun_neigh_v6 *payload; 510 + 511 + payload = (struct nfp_tun_neigh_v6 *)nn_entry->payload; 512 + payload->src_ipv6 = flowi6->saddr; 513 + payload->dst_ipv6 = flowi6->daddr; 514 + common = &payload->common; 515 + ext = &payload->ext; 516 + mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6; 517 + } else { 518 + struct flowi4 *flowi4 = (struct flowi4 *)flow; 519 + struct nfp_tun_neigh_v4 *payload; 520 + 521 + payload = (struct nfp_tun_neigh_v4 *)nn_entry->payload; 522 + payload->src_ipv4 = flowi4->saddr; 523 + payload->dst_ipv4 = flowi4->daddr; 524 + common = &payload->common; 525 + ext = &payload->ext; 526 + mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH; 527 + } 528 + ext->host_ctx = cpu_to_be32(U32_MAX); 529 + ext->vlan_tpid = cpu_to_be16(U16_MAX); 530 + ext->vlan_tci = cpu_to_be16(U16_MAX); 531 + ether_addr_copy(common->src_addr, netdev->dev_addr); 532 + neigh_ha_snapshot(common->dst_addr, neigh, netdev); 533 + common->port_id = cpu_to_be32(port_id); 534 + 535 + if (rhashtable_insert_fast(&priv->neigh_table, 536 + &nn_entry->ht_node, 537 + neigh_table_params)) 538 + goto err; 539 + 540 + nfp_tun_link_predt_entries(app, nn_entry); 541 + nfp_flower_xmit_tun_conf(app, mtype, neigh_size, 542 + nn_entry->payload, 543 + GFP_ATOMIC); 544 + } else if (nn_entry && neigh_invalid) { 545 + if (is_ipv6) { 546 + struct flowi6 *flowi6 = (struct flowi6 *)flow; 547 + struct nfp_tun_neigh_v6 *payload; 548 + 549 + payload = (struct nfp_tun_neigh_v6 *)nn_entry->payload; 550 + memset(payload, 0, sizeof(struct nfp_tun_neigh_v6)); 551 + payload->dst_ipv6 = flowi6->daddr; 552 + mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6; 553 + } else { 554 + struct flowi4 *flowi4 = (struct flowi4 *)flow; 555 + struct nfp_tun_neigh_v4 *payload; 556 + 557 + payload = (struct nfp_tun_neigh_v4 *)nn_entry->payload; 558 + memset(payload, 0, sizeof(struct nfp_tun_neigh_v4)); 559 + payload->dst_ipv4 = flowi4->daddr; 560 + mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH; 561 + } 438 562 /* Trigger ARP to verify invalid neighbour state. */ 439 563 neigh_event_send(neigh, NULL); 440 - goto send_msg; 564 + rhashtable_remove_fast(&priv->neigh_table, 565 + &nn_entry->ht_node, 566 + neigh_table_params); 567 + 568 + nfp_flower_xmit_tun_conf(app, mtype, neigh_size, 569 + nn_entry->payload, 570 + GFP_ATOMIC); 571 + 572 + if (nn_entry->flow) 573 + list_del(&nn_entry->list_head); 574 + kfree(nn_entry); 441 575 } 442 576 443 - /* Have a valid neighbour so populate rest of entry. */ 444 - payload.src_ipv4 = flow->saddr; 445 - ether_addr_copy(payload.src_addr, netdev->dev_addr); 446 - neigh_ha_snapshot(payload.dst_addr, neigh, netdev); 447 - payload.port_id = cpu_to_be32(port_id); 448 - /* Add destination of new route to NFP cache. */ 449 - nfp_tun_add_route_to_cache_v4(app, &payload.dst_ipv4); 577 + spin_unlock_bh(&priv->predt_lock); 578 + return; 450 579 451 - send_msg: 452 - nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH, 453 - sizeof(struct nfp_tun_neigh), 454 - (unsigned char *)&payload, flag); 455 - } 456 - 457 - static void 458 - nfp_tun_write_neigh_v6(struct net_device *netdev, struct nfp_app *app, 459 - struct flowi6 *flow, struct neighbour *neigh, gfp_t flag) 460 - { 461 - struct nfp_tun_neigh_v6 payload; 462 - u32 port_id; 463 - 464 - port_id = nfp_flower_get_port_id_from_netdev(app, netdev); 465 - if (!port_id) 466 - return; 467 - 468 - memset(&payload, 0, sizeof(struct nfp_tun_neigh_v6)); 469 - payload.dst_ipv6 = flow->daddr; 470 - 471 - /* If entry has expired send dst IP with all other fields 0. */ 472 - if (!(neigh->nud_state & NUD_VALID) || neigh->dead) { 473 - nfp_tun_del_route_from_cache_v6(app, &payload.dst_ipv6); 474 - /* Trigger probe to verify invalid neighbour state. */ 475 - neigh_event_send(neigh, NULL); 476 - goto send_msg; 477 - } 478 - 479 - /* Have a valid neighbour so populate rest of entry. */ 480 - payload.src_ipv6 = flow->saddr; 481 - ether_addr_copy(payload.src_addr, netdev->dev_addr); 482 - neigh_ha_snapshot(payload.dst_addr, neigh, netdev); 483 - payload.port_id = cpu_to_be32(port_id); 484 - /* Add destination of new route to NFP cache. */ 485 - nfp_tun_add_route_to_cache_v6(app, &payload.dst_ipv6); 486 - 487 - send_msg: 488 - nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6, 489 - sizeof(struct nfp_tun_neigh_v6), 490 - (unsigned char *)&payload, flag); 580 + err: 581 + kfree(nn_entry); 582 + spin_unlock_bh(&priv->predt_lock); 583 + nfp_flower_cmsg_warn(app, "Neighbour configuration failed.\n"); 491 584 } 492 585 493 586 static int ··· 567 526 struct flowi6 flow6 = {}; 568 527 struct neighbour *n; 569 528 struct nfp_app *app; 570 - struct rtable *rt; 529 + bool neigh_invalid; 571 530 bool ipv6 = false; 572 531 int err; 573 532 ··· 586 545 if (n->tbl->family == AF_INET6) 587 546 ipv6 = true; 588 547 548 + neigh_invalid = !(n->nud_state & NUD_VALID) || n->dead; 549 + 589 550 if (ipv6) 590 551 flow6.daddr = *(struct in6_addr *)n->primary_key; 591 552 else ··· 600 557 !nfp_flower_internal_port_can_offload(app, n->dev)) 601 558 return NOTIFY_DONE; 602 559 603 - /* Only concerned with changes to routes already added to NFP. */ 604 - if ((ipv6 && !nfp_tun_has_route_v6(app, &flow6.daddr)) || 605 - (!ipv6 && !nfp_tun_has_route_v4(app, &flow4.daddr))) 606 - return NOTIFY_DONE; 607 - 608 560 #if IS_ENABLED(CONFIG_INET) 609 561 if (ipv6) { 610 562 #if IS_ENABLED(CONFIG_IPV6) 611 - struct dst_entry *dst; 563 + if (!neigh_invalid) { 564 + struct dst_entry *dst; 565 + /* Use ipv6_dst_lookup_flow to populate flow6->saddr 566 + * and other fields. This information is only needed 567 + * for new entries, lookup can be skipped when an entry 568 + * gets invalidated - as only the daddr is needed for 569 + * deleting. 570 + */ 571 + dst = ip6_dst_lookup_flow(dev_net(n->dev), NULL, 572 + &flow6, NULL); 573 + if (IS_ERR(dst)) 574 + return NOTIFY_DONE; 612 575 613 - dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(n->dev), NULL, 614 - &flow6, NULL); 615 - if (IS_ERR(dst)) 616 - return NOTIFY_DONE; 617 - 618 - dst_release(dst); 619 - flow6.flowi6_proto = IPPROTO_UDP; 620 - nfp_tun_write_neigh_v6(n->dev, app, &flow6, n, GFP_ATOMIC); 576 + dst_release(dst); 577 + } 578 + nfp_tun_write_neigh(n->dev, app, &flow6, n, true); 621 579 #else 622 580 return NOTIFY_DONE; 623 581 #endif /* CONFIG_IPV6 */ 624 582 } else { 625 - /* Do a route lookup to populate flow data. */ 626 - rt = ip_route_output_key(dev_net(n->dev), &flow4); 627 - err = PTR_ERR_OR_ZERO(rt); 628 - if (err) 629 - return NOTIFY_DONE; 583 + if (!neigh_invalid) { 584 + struct rtable *rt; 585 + /* Use ip_route_output_key to populate flow4->saddr and 586 + * other fields. This information is only needed for 587 + * new entries, lookup can be skipped when an entry 588 + * gets invalidated - as only the daddr is needed for 589 + * deleting. 590 + */ 591 + rt = ip_route_output_key(dev_net(n->dev), &flow4); 592 + err = PTR_ERR_OR_ZERO(rt); 593 + if (err) 594 + return NOTIFY_DONE; 630 595 631 - ip_rt_put(rt); 632 - 633 - flow4.flowi4_proto = IPPROTO_UDP; 634 - nfp_tun_write_neigh_v4(n->dev, app, &flow4, n, GFP_ATOMIC); 596 + ip_rt_put(rt); 597 + } 598 + nfp_tun_write_neigh(n->dev, app, &flow4, n, false); 635 599 } 636 600 #else 637 601 return NOTIFY_DONE; ··· 681 631 ip_rt_put(rt); 682 632 if (!n) 683 633 goto fail_rcu_unlock; 684 - nfp_tun_write_neigh_v4(n->dev, app, &flow, n, GFP_ATOMIC); 634 + nfp_tun_write_neigh(n->dev, app, &flow, n, false); 685 635 neigh_release(n); 686 636 rcu_read_unlock(); 687 637 return; ··· 723 673 if (!n) 724 674 goto fail_rcu_unlock; 725 675 726 - nfp_tun_write_neigh_v6(n->dev, app, &flow, n, GFP_ATOMIC); 676 + nfp_tun_write_neigh(n->dev, app, &flow, n, true); 727 677 neigh_release(n); 728 678 rcu_read_unlock(); 729 679 return; ··· 1418 1368 INIT_LIST_HEAD(&priv->tun.ipv6_off_list); 1419 1369 1420 1370 /* Initialise priv data for neighbour offloading. */ 1421 - spin_lock_init(&priv->tun.neigh_off_lock_v4); 1422 - INIT_LIST_HEAD(&priv->tun.neigh_off_list_v4); 1423 - spin_lock_init(&priv->tun.neigh_off_lock_v6); 1424 - INIT_LIST_HEAD(&priv->tun.neigh_off_list_v6); 1425 1371 priv->tun.neigh_nb.notifier_call = nfp_tun_neigh_event_handler; 1426 1372 1427 1373 err = register_netevent_notifier(&priv->tun.neigh_nb); ··· 1432 1386 1433 1387 void nfp_tunnel_config_stop(struct nfp_app *app) 1434 1388 { 1435 - struct nfp_offloaded_route *route_entry, *temp; 1436 1389 struct nfp_flower_priv *priv = app->priv; 1437 1390 struct nfp_ipv4_addr_entry *ip_entry; 1438 - struct nfp_tun_neigh_v6 ipv6_route; 1439 - struct nfp_tun_neigh ipv4_route; 1440 1391 struct list_head *ptr, *storage; 1441 1392 1442 1393 unregister_netevent_notifier(&priv->tun.neigh_nb); ··· 1449 1406 1450 1407 mutex_destroy(&priv->tun.ipv6_off_lock); 1451 1408 1452 - /* Free memory in the route list and remove entries from fw cache. */ 1453 - list_for_each_entry_safe(route_entry, temp, 1454 - &priv->tun.neigh_off_list_v4, list) { 1455 - memset(&ipv4_route, 0, sizeof(ipv4_route)); 1456 - memcpy(&ipv4_route.dst_ipv4, &route_entry->ip_add, 1457 - sizeof(ipv4_route.dst_ipv4)); 1458 - list_del(&route_entry->list); 1459 - kfree(route_entry); 1460 - 1461 - nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH, 1462 - sizeof(struct nfp_tun_neigh), 1463 - (unsigned char *)&ipv4_route, 1464 - GFP_KERNEL); 1465 - } 1466 - 1467 - list_for_each_entry_safe(route_entry, temp, 1468 - &priv->tun.neigh_off_list_v6, list) { 1469 - memset(&ipv6_route, 0, sizeof(ipv6_route)); 1470 - memcpy(&ipv6_route.dst_ipv6, &route_entry->ip_add, 1471 - sizeof(ipv6_route.dst_ipv6)); 1472 - list_del(&route_entry->list); 1473 - kfree(route_entry); 1474 - 1475 - nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6, 1476 - sizeof(struct nfp_tun_neigh), 1477 - (unsigned char *)&ipv6_route, 1478 - GFP_KERNEL); 1479 - } 1480 - 1481 1409 /* Destroy rhash. Entries should be cleaned on netdev notifier unreg. */ 1482 1410 rhashtable_free_and_destroy(&priv->tun.offloaded_macs, 1483 1411 nfp_check_rhashtable_empty, NULL); 1412 + 1413 + nfp_tun_cleanup_nn_entries(app); 1484 1414 }