Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: macsec: add nla support for changing the offloading selection

MACsec offloading to underlying hardware devices is disabled by default
(the software implementation is used). This patch adds support for
changing this setting through the MACsec netlink interface. Many checks
are done when enabling offloading on a given MACsec interface as there
are limitations (it must be supported by the hardware, only a single
interface can be offloaded on a given physical device at a time, rules
can't be moved for now).

Signed-off-by: Antoine Tenart <antoine.tenart@bootlin.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Antoine Tenart and committed by
David S. Miller
dcb780fb 3cf3227a

+153 -3
+142 -3
drivers/net/macsec.c
··· 1484 1484 [MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 }, 1485 1485 [MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED }, 1486 1486 [MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED }, 1487 + [MACSEC_ATTR_OFFLOAD] = { .type = NLA_NESTED }, 1487 1488 }; 1488 1489 1489 1490 static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = { ··· 1500 1499 .len = MACSEC_KEYID_LEN, }, 1501 1500 [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY, 1502 1501 .len = MACSEC_MAX_KEY_LEN, }, 1502 + }; 1503 + 1504 + static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = { 1505 + [MACSEC_OFFLOAD_ATTR_TYPE] = { .type = NLA_U8 }, 1503 1506 }; 1504 1507 1505 1508 /* Offloads an operation to a device driver */ ··· 2334 2329 return ret; 2335 2330 } 2336 2331 2332 + static bool macsec_is_configured(struct macsec_dev *macsec) 2333 + { 2334 + struct macsec_secy *secy = &macsec->secy; 2335 + struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2336 + int i; 2337 + 2338 + if (secy->n_rx_sc > 0) 2339 + return true; 2340 + 2341 + for (i = 0; i < MACSEC_NUM_AN; i++) 2342 + if (tx_sc->sa[i]) 2343 + return true; 2344 + 2345 + return false; 2346 + } 2347 + 2348 + static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) 2349 + { 2350 + struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1]; 2351 + enum macsec_offload offload, prev_offload; 2352 + int (*func)(struct macsec_context *ctx); 2353 + struct nlattr **attrs = info->attrs; 2354 + struct net_device *dev, *loop_dev; 2355 + const struct macsec_ops *ops; 2356 + struct macsec_context ctx; 2357 + struct macsec_dev *macsec; 2358 + struct net *loop_net; 2359 + int ret; 2360 + 2361 + if (!attrs[MACSEC_ATTR_IFINDEX]) 2362 + return -EINVAL; 2363 + 2364 + if (!attrs[MACSEC_ATTR_OFFLOAD]) 2365 + return -EINVAL; 2366 + 2367 + if (nla_parse_nested_deprecated(tb_offload, MACSEC_OFFLOAD_ATTR_MAX, 2368 + attrs[MACSEC_ATTR_OFFLOAD], 2369 + macsec_genl_offload_policy, NULL)) 2370 + return -EINVAL; 2371 + 2372 + dev = get_dev_from_nl(genl_info_net(info), attrs); 2373 + if (IS_ERR(dev)) 2374 + return PTR_ERR(dev); 2375 + macsec = macsec_priv(dev); 2376 + 2377 + offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]); 2378 + if (macsec->offload == offload) 2379 + return 0; 2380 + 2381 + /* Check if the offloading mode is supported by the underlying layers */ 2382 + if (offload != MACSEC_OFFLOAD_OFF && 2383 + !macsec_check_offload(offload, macsec)) 2384 + return -EOPNOTSUPP; 2385 + 2386 + if (offload == MACSEC_OFFLOAD_OFF) 2387 + goto skip_limitation; 2388 + 2389 + /* Check the physical interface isn't offloading another interface 2390 + * first. 2391 + */ 2392 + for_each_net(loop_net) { 2393 + for_each_netdev(loop_net, loop_dev) { 2394 + struct macsec_dev *priv; 2395 + 2396 + if (!netif_is_macsec(loop_dev)) 2397 + continue; 2398 + 2399 + priv = macsec_priv(loop_dev); 2400 + 2401 + if (priv->real_dev == macsec->real_dev && 2402 + priv->offload != MACSEC_OFFLOAD_OFF) 2403 + return -EBUSY; 2404 + } 2405 + } 2406 + 2407 + skip_limitation: 2408 + /* Check if the net device is busy. */ 2409 + if (netif_running(dev)) 2410 + return -EBUSY; 2411 + 2412 + rtnl_lock(); 2413 + 2414 + prev_offload = macsec->offload; 2415 + macsec->offload = offload; 2416 + 2417 + /* Check if the device already has rules configured: we do not support 2418 + * rules migration. 2419 + */ 2420 + if (macsec_is_configured(macsec)) { 2421 + ret = -EBUSY; 2422 + goto rollback; 2423 + } 2424 + 2425 + ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload, 2426 + macsec, &ctx); 2427 + if (!ops) { 2428 + ret = -EOPNOTSUPP; 2429 + goto rollback; 2430 + } 2431 + 2432 + if (prev_offload == MACSEC_OFFLOAD_OFF) 2433 + func = ops->mdo_add_secy; 2434 + else 2435 + func = ops->mdo_del_secy; 2436 + 2437 + ctx.secy = &macsec->secy; 2438 + ret = macsec_offload(func, &ctx); 2439 + if (ret) 2440 + goto rollback; 2441 + 2442 + rtnl_unlock(); 2443 + return 0; 2444 + 2445 + rollback: 2446 + macsec->offload = prev_offload; 2447 + 2448 + rtnl_unlock(); 2449 + return ret; 2450 + } 2451 + 2337 2452 static int copy_tx_sa_stats(struct sk_buff *skb, 2338 2453 struct macsec_tx_sa_stats __percpu *pstats) 2339 2454 { ··· 2715 2590 dump_secy(struct macsec_secy *secy, struct net_device *dev, 2716 2591 struct sk_buff *skb, struct netlink_callback *cb) 2717 2592 { 2718 - struct macsec_rx_sc *rx_sc; 2593 + struct macsec_dev *macsec = netdev_priv(dev); 2719 2594 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2720 2595 struct nlattr *txsa_list, *rxsc_list; 2721 - int i, j; 2722 - void *hdr; 2596 + struct macsec_rx_sc *rx_sc; 2723 2597 struct nlattr *attr; 2598 + void *hdr; 2599 + int i, j; 2724 2600 2725 2601 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 2726 2602 &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC); ··· 2732 2606 2733 2607 if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex)) 2734 2608 goto nla_put_failure; 2609 + 2610 + attr = nla_nest_start_noflag(skb, MACSEC_ATTR_OFFLOAD); 2611 + if (!attr) 2612 + goto nla_put_failure; 2613 + if (nla_put_u8(skb, MACSEC_OFFLOAD_ATTR_TYPE, macsec->offload)) 2614 + goto nla_put_failure; 2615 + nla_nest_end(skb, attr); 2735 2616 2736 2617 if (nla_put_secy(secy, skb)) 2737 2618 goto nla_put_failure; ··· 3003 2870 .cmd = MACSEC_CMD_UPD_RXSA, 3004 2871 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3005 2872 .doit = macsec_upd_rxsa, 2873 + .flags = GENL_ADMIN_PERM, 2874 + }, 2875 + { 2876 + .cmd = MACSEC_CMD_UPD_OFFLOAD, 2877 + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2878 + .doit = macsec_upd_offload, 3006 2879 .flags = GENL_ADMIN_PERM, 3007 2880 }, 3008 2881 };
+11
include/uapi/linux/if_macsec.h
··· 45 45 MACSEC_ATTR_RXSC_LIST, /* dump, nested, macsec_rxsc_attrs for each RXSC */ 46 46 MACSEC_ATTR_TXSC_STATS, /* dump, nested, macsec_txsc_stats_attr */ 47 47 MACSEC_ATTR_SECY_STATS, /* dump, nested, macsec_secy_stats_attr */ 48 + MACSEC_ATTR_OFFLOAD, /* config, nested, macsec_offload_attrs */ 48 49 __MACSEC_ATTR_END, 49 50 NUM_MACSEC_ATTR = __MACSEC_ATTR_END, 50 51 MACSEC_ATTR_MAX = __MACSEC_ATTR_END - 1, ··· 98 97 MACSEC_SA_ATTR_MAX = __MACSEC_SA_ATTR_END - 1, 99 98 }; 100 99 100 + enum macsec_offload_attrs { 101 + MACSEC_OFFLOAD_ATTR_UNSPEC, 102 + MACSEC_OFFLOAD_ATTR_TYPE, /* config/dump, u8 0..2 */ 103 + MACSEC_OFFLOAD_ATTR_PAD, 104 + __MACSEC_OFFLOAD_ATTR_END, 105 + NUM_MACSEC_OFFLOAD_ATTR = __MACSEC_OFFLOAD_ATTR_END, 106 + MACSEC_OFFLOAD_ATTR_MAX = __MACSEC_OFFLOAD_ATTR_END - 1, 107 + }; 108 + 101 109 enum macsec_nl_commands { 102 110 MACSEC_CMD_GET_TXSC, 103 111 MACSEC_CMD_ADD_RXSC, ··· 118 108 MACSEC_CMD_ADD_RXSA, 119 109 MACSEC_CMD_DEL_RXSA, 120 110 MACSEC_CMD_UPD_RXSA, 111 + MACSEC_CMD_UPD_OFFLOAD, 121 112 }; 122 113 123 114 /* u64 per-RXSC stats */