Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next

Steffen Klassert says:

====================
ipsec-next 2022-11-26

1) Remove redundant variable in esp6.
From Colin Ian King.

2) Update x->lastused for every packet. It was used only for
outgoing mobile IPv6 packets, but showed to be usefull
to check if the a SA is still in use in general.
From Antony Antony.

3) Remove unused variable in xfrm_byidx_resize.
From Leon Romanovsky.

4) Finalize extack support for xfrm.
From Sabrina Dubroca.

* 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next:
xfrm: add extack to xfrm_set_spdinfo
xfrm: add extack to xfrm_alloc_userspi
xfrm: add extack to xfrm_do_migrate
xfrm: add extack to xfrm_new_ae and xfrm_replay_verify_len
xfrm: add extack to xfrm_del_sa
xfrm: add extack to xfrm_add_sa_expire
xfrm: a few coding style clean ups
xfrm: Remove not-used total variable
xfrm: update x->lastused for every packet
esp6: remove redundant variable err
====================

Link: https://lore.kernel.org/r/20221126110303.1859238-1-steffen.klassert@secunet.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+114 -49
+5 -3
include/net/xfrm.h
··· 1681 1681 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid); 1682 1682 void xfrm_policy_hash_rebuild(struct net *net); 1683 1683 u32 xfrm_get_acqseq(void); 1684 - int verify_spi_info(u8 proto, u32 min, u32 max); 1685 - int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi); 1684 + int verify_spi_info(u8 proto, u32 min, u32 max, struct netlink_ext_ack *extack); 1685 + int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi, 1686 + struct netlink_ext_ack *extack); 1686 1687 struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, 1687 1688 u8 mode, u32 reqid, u32 if_id, u8 proto, 1688 1689 const xfrm_address_t *daddr, ··· 1704 1703 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 1705 1704 struct xfrm_migrate *m, int num_bundles, 1706 1705 struct xfrm_kmaddress *k, struct net *net, 1707 - struct xfrm_encap_tmpl *encap, u32 if_id); 1706 + struct xfrm_encap_tmpl *encap, u32 if_id, 1707 + struct netlink_ext_ack *extack); 1708 1708 #endif 1709 1709 1710 1710 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
+1 -2
net/ipv6/esp6_offload.c
··· 56 56 __be32 seq; 57 57 __be32 spi; 58 58 int nhoff; 59 - int err; 60 59 61 60 if (!pskb_pull(skb, offset)) 62 61 return NULL; 63 62 64 - if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0) 63 + if (xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq) != 0) 65 64 goto out; 66 65 67 66 xo = xfrm_offload(skb);
+3 -3
net/key/af_key.c
··· 1377 1377 max_spi = range->sadb_spirange_max; 1378 1378 } 1379 1379 1380 - err = verify_spi_info(x->id.proto, min_spi, max_spi); 1380 + err = verify_spi_info(x->id.proto, min_spi, max_spi, NULL); 1381 1381 if (err) { 1382 1382 xfrm_state_put(x); 1383 1383 return err; 1384 1384 } 1385 1385 1386 - err = xfrm_alloc_spi(x, min_spi, max_spi); 1386 + err = xfrm_alloc_spi(x, min_spi, max_spi, NULL); 1387 1387 resp_skb = err ? ERR_PTR(err) : pfkey_xfrm_state2msg(x); 1388 1388 1389 1389 if (IS_ERR(resp_skb)) { ··· 2626 2626 } 2627 2627 2628 2628 return xfrm_migrate(&sel, dir, XFRM_POLICY_TYPE_MAIN, m, i, 2629 - kma ? &k : NULL, net, NULL, 0); 2629 + kma ? &k : NULL, net, NULL, 0, NULL); 2630 2630 2631 2631 out: 2632 2632 return err;
+1
net/xfrm/xfrm_input.c
··· 671 671 672 672 x->curlft.bytes += skb->len; 673 673 x->curlft.packets++; 674 + x->lastused = ktime_get_real_seconds(); 674 675 675 676 spin_unlock(&x->lock); 676 677
+1 -2
net/xfrm/xfrm_output.c
··· 209 209 __skb_pull(skb, hdr_len); 210 210 memmove(ipv6_hdr(skb), iph, hdr_len); 211 211 212 - x->lastused = ktime_get_real_seconds(); 213 - 214 212 return 0; 215 213 #else 216 214 WARN_ON_ONCE(1); ··· 532 534 533 535 x->curlft.bytes += skb->len; 534 536 x->curlft.packets++; 537 + x->lastused = ktime_get_real_seconds(); 535 538 536 539 spin_unlock_bh(&x->lock); 537 540
+26 -11
net/xfrm/xfrm_policy.c
··· 605 605 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head)); 606 606 } 607 607 608 - static void xfrm_byidx_resize(struct net *net, int total) 608 + static void xfrm_byidx_resize(struct net *net) 609 609 { 610 610 unsigned int hmask = net->xfrm.policy_idx_hmask; 611 611 unsigned int nhashmask = xfrm_new_hash_mask(hmask); ··· 683 683 xfrm_bydst_resize(net, dir); 684 684 } 685 685 if (xfrm_byidx_should_resize(net, total)) 686 - xfrm_byidx_resize(net, total); 686 + xfrm_byidx_resize(net); 687 687 688 688 mutex_unlock(&hash_resize_mutex); 689 689 } ··· 4333 4333 4334 4334 /* update endpoint address(es) of template(s) */ 4335 4335 static int xfrm_policy_migrate(struct xfrm_policy *pol, 4336 - struct xfrm_migrate *m, int num_migrate) 4336 + struct xfrm_migrate *m, int num_migrate, 4337 + struct netlink_ext_ack *extack) 4337 4338 { 4338 4339 struct xfrm_migrate *mp; 4339 4340 int i, j, n = 0; ··· 4342 4341 write_lock_bh(&pol->lock); 4343 4342 if (unlikely(pol->walk.dead)) { 4344 4343 /* target policy has been deleted */ 4344 + NL_SET_ERR_MSG(extack, "Target policy not found"); 4345 4345 write_unlock_bh(&pol->lock); 4346 4346 return -ENOENT; 4347 4347 } ··· 4374 4372 return 0; 4375 4373 } 4376 4374 4377 - static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate) 4375 + static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate, 4376 + struct netlink_ext_ack *extack) 4378 4377 { 4379 4378 int i, j; 4380 4379 4381 - if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH) 4380 + if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH) { 4381 + NL_SET_ERR_MSG(extack, "Invalid number of SAs to migrate, must be 0 < num <= XFRM_MAX_DEPTH (6)"); 4382 4382 return -EINVAL; 4383 + } 4383 4384 4384 4385 for (i = 0; i < num_migrate; i++) { 4385 4386 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) || 4386 - xfrm_addr_any(&m[i].new_saddr, m[i].new_family)) 4387 + xfrm_addr_any(&m[i].new_saddr, m[i].new_family)) { 4388 + NL_SET_ERR_MSG(extack, "Addresses in the MIGRATE attribute's list cannot be null"); 4387 4389 return -EINVAL; 4390 + } 4388 4391 4389 4392 /* check if there is any duplicated entry */ 4390 4393 for (j = i + 1; j < num_migrate; j++) { ··· 4400 4393 m[i].proto == m[j].proto && 4401 4394 m[i].mode == m[j].mode && 4402 4395 m[i].reqid == m[j].reqid && 4403 - m[i].old_family == m[j].old_family) 4396 + m[i].old_family == m[j].old_family) { 4397 + NL_SET_ERR_MSG(extack, "Entries in the MIGRATE attribute's list must be unique"); 4404 4398 return -EINVAL; 4399 + } 4405 4400 } 4406 4401 } 4407 4402 ··· 4413 4404 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 4414 4405 struct xfrm_migrate *m, int num_migrate, 4415 4406 struct xfrm_kmaddress *k, struct net *net, 4416 - struct xfrm_encap_tmpl *encap, u32 if_id) 4407 + struct xfrm_encap_tmpl *encap, u32 if_id, 4408 + struct netlink_ext_ack *extack) 4417 4409 { 4418 4410 int i, err, nx_cur = 0, nx_new = 0; 4419 4411 struct xfrm_policy *pol = NULL; ··· 4424 4414 struct xfrm_migrate *mp; 4425 4415 4426 4416 /* Stage 0 - sanity checks */ 4427 - if ((err = xfrm_migrate_check(m, num_migrate)) < 0) 4417 + err = xfrm_migrate_check(m, num_migrate, extack); 4418 + if (err < 0) 4428 4419 goto out; 4429 4420 4430 4421 if (dir >= XFRM_POLICY_MAX) { 4422 + NL_SET_ERR_MSG(extack, "Invalid policy direction"); 4431 4423 err = -EINVAL; 4432 4424 goto out; 4433 4425 } 4434 4426 4435 4427 /* Stage 1 - find policy */ 4436 - if ((pol = xfrm_migrate_policy_find(sel, dir, type, net, if_id)) == NULL) { 4428 + pol = xfrm_migrate_policy_find(sel, dir, type, net, if_id); 4429 + if (!pol) { 4430 + NL_SET_ERR_MSG(extack, "Target policy not found"); 4437 4431 err = -ENOENT; 4438 4432 goto out; 4439 4433 } ··· 4459 4445 } 4460 4446 4461 4447 /* Stage 3 - update policy */ 4462 - if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0) 4448 + err = xfrm_policy_migrate(pol, m, num_migrate, extack); 4449 + if (err < 0) 4463 4450 goto restore_state; 4464 4451 4465 4452 /* Stage 4 - delete old state(s) */
+16 -5
net/xfrm/xfrm_state.c
··· 2017 2017 } 2018 2018 EXPORT_SYMBOL(xfrm_get_acqseq); 2019 2019 2020 - int verify_spi_info(u8 proto, u32 min, u32 max) 2020 + int verify_spi_info(u8 proto, u32 min, u32 max, struct netlink_ext_ack *extack) 2021 2021 { 2022 2022 switch (proto) { 2023 2023 case IPPROTO_AH: ··· 2026 2026 2027 2027 case IPPROTO_COMP: 2028 2028 /* IPCOMP spi is 16-bits. */ 2029 - if (max >= 0x10000) 2029 + if (max >= 0x10000) { 2030 + NL_SET_ERR_MSG(extack, "IPCOMP SPI must be <= 65535"); 2030 2031 return -EINVAL; 2032 + } 2031 2033 break; 2032 2034 2033 2035 default: 2036 + NL_SET_ERR_MSG(extack, "Invalid protocol, must be one of AH, ESP, IPCOMP"); 2034 2037 return -EINVAL; 2035 2038 } 2036 2039 2037 - if (min > max) 2040 + if (min > max) { 2041 + NL_SET_ERR_MSG(extack, "Invalid SPI range: min > max"); 2038 2042 return -EINVAL; 2043 + } 2039 2044 2040 2045 return 0; 2041 2046 } 2042 2047 EXPORT_SYMBOL(verify_spi_info); 2043 2048 2044 - int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high) 2049 + int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high, 2050 + struct netlink_ext_ack *extack) 2045 2051 { 2046 2052 struct net *net = xs_net(x); 2047 2053 unsigned int h; ··· 2059 2053 u32 mark = x->mark.v & x->mark.m; 2060 2054 2061 2055 spin_lock_bh(&x->lock); 2062 - if (x->km.state == XFRM_STATE_DEAD) 2056 + if (x->km.state == XFRM_STATE_DEAD) { 2057 + NL_SET_ERR_MSG(extack, "Target ACQUIRE is in DEAD state"); 2063 2058 goto unlock; 2059 + } 2064 2060 2065 2061 err = 0; 2066 2062 if (x->id.spi) ··· 2073 2065 if (minspi == maxspi) { 2074 2066 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, minspi, x->id.proto, x->props.family); 2075 2067 if (x0) { 2068 + NL_SET_ERR_MSG(extack, "Requested SPI is already in use"); 2076 2069 xfrm_state_put(x0); 2077 2070 goto unlock; 2078 2071 } ··· 2098 2089 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 2099 2090 2100 2091 err = 0; 2092 + } else { 2093 + NL_SET_ERR_MSG(extack, "No SPI available in the requested range"); 2101 2094 } 2102 2095 2103 2096 unlock:
+61 -23
net/xfrm/xfrm_user.c
··· 515 515 } 516 516 517 517 static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_esn, 518 - struct nlattr *rp) 518 + struct nlattr *rp, 519 + struct netlink_ext_ack *extack) 519 520 { 520 521 struct xfrm_replay_state_esn *up; 521 522 unsigned int ulen; ··· 529 528 530 529 /* Check the overall length and the internal bitmap length to avoid 531 530 * potential overflow. */ 532 - if (nla_len(rp) < (int)ulen || 533 - xfrm_replay_state_esn_len(replay_esn) != ulen || 534 - replay_esn->bmp_len != up->bmp_len) 531 + if (nla_len(rp) < (int)ulen) { 532 + NL_SET_ERR_MSG(extack, "ESN attribute is too short"); 535 533 return -EINVAL; 534 + } 536 535 537 - if (up->replay_window > up->bmp_len * sizeof(__u32) * 8) 536 + if (xfrm_replay_state_esn_len(replay_esn) != ulen) { 537 + NL_SET_ERR_MSG(extack, "New ESN size doesn't match the existing SA's ESN size"); 538 538 return -EINVAL; 539 + } 540 + 541 + if (replay_esn->bmp_len != up->bmp_len) { 542 + NL_SET_ERR_MSG(extack, "New ESN bitmap size doesn't match the existing SA's ESN bitmap"); 543 + return -EINVAL; 544 + } 545 + 546 + if (up->replay_window > up->bmp_len * sizeof(__u32) * 8) { 547 + NL_SET_ERR_MSG(extack, "ESN replay window is longer than the bitmap"); 548 + return -EINVAL; 549 + } 539 550 540 551 return 0; 541 552 } ··· 875 862 goto out; 876 863 877 864 if (xfrm_state_kern(x)) { 865 + NL_SET_ERR_MSG(extack, "SA is in use by tunnels"); 878 866 err = -EPERM; 879 867 goto out; 880 868 } 881 869 882 870 err = xfrm_state_delete(x); 883 - 884 871 if (err < 0) 885 872 goto out; 886 873 ··· 1367 1354 if (attrs[XFRMA_SPD_IPV4_HTHRESH]) { 1368 1355 struct nlattr *rta = attrs[XFRMA_SPD_IPV4_HTHRESH]; 1369 1356 1370 - if (nla_len(rta) < sizeof(*thresh4)) 1357 + if (nla_len(rta) < sizeof(*thresh4)) { 1358 + NL_SET_ERR_MSG(extack, "Invalid SPD_IPV4_HTHRESH attribute length"); 1371 1359 return -EINVAL; 1360 + } 1372 1361 thresh4 = nla_data(rta); 1373 - if (thresh4->lbits > 32 || thresh4->rbits > 32) 1362 + if (thresh4->lbits > 32 || thresh4->rbits > 32) { 1363 + NL_SET_ERR_MSG(extack, "Invalid hash threshold (must be <= 32 for IPv4)"); 1374 1364 return -EINVAL; 1365 + } 1375 1366 } 1376 1367 if (attrs[XFRMA_SPD_IPV6_HTHRESH]) { 1377 1368 struct nlattr *rta = attrs[XFRMA_SPD_IPV6_HTHRESH]; 1378 1369 1379 - if (nla_len(rta) < sizeof(*thresh6)) 1370 + if (nla_len(rta) < sizeof(*thresh6)) { 1371 + NL_SET_ERR_MSG(extack, "Invalid SPD_IPV6_HTHRESH attribute length"); 1380 1372 return -EINVAL; 1373 + } 1381 1374 thresh6 = nla_data(rta); 1382 - if (thresh6->lbits > 128 || thresh6->rbits > 128) 1375 + if (thresh6->lbits > 128 || thresh6->rbits > 128) { 1376 + NL_SET_ERR_MSG(extack, "Invalid hash threshold (must be <= 128 for IPv6)"); 1383 1377 return -EINVAL; 1378 + } 1384 1379 } 1385 1380 1386 1381 if (thresh4 || thresh6) { ··· 1531 1510 u32 if_id = 0; 1532 1511 1533 1512 p = nlmsg_data(nlh); 1534 - err = verify_spi_info(p->info.id.proto, p->min, p->max); 1513 + err = verify_spi_info(p->info.id.proto, p->min, p->max, extack); 1535 1514 if (err) 1536 1515 goto out_noput; 1537 1516 ··· 1559 1538 &p->info.saddr, 1, 1560 1539 family); 1561 1540 err = -ENOENT; 1562 - if (x == NULL) 1541 + if (!x) { 1542 + NL_SET_ERR_MSG(extack, "Target ACQUIRE not found"); 1563 1543 goto out_noput; 1544 + } 1564 1545 1565 - err = xfrm_alloc_spi(x, p->min, p->max); 1546 + err = xfrm_alloc_spi(x, p->min, p->max, extack); 1566 1547 if (err) 1567 1548 goto out; 1568 1549 ··· 2456 2433 struct nlattr *et = attrs[XFRMA_ETIMER_THRESH]; 2457 2434 struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH]; 2458 2435 2459 - if (!lt && !rp && !re && !et && !rt) 2436 + if (!lt && !rp && !re && !et && !rt) { 2437 + NL_SET_ERR_MSG(extack, "Missing required attribute for AE"); 2460 2438 return err; 2439 + } 2461 2440 2462 2441 /* pedantic mode - thou shalt sayeth replaceth */ 2463 - if (!(nlh->nlmsg_flags&NLM_F_REPLACE)) 2442 + if (!(nlh->nlmsg_flags & NLM_F_REPLACE)) { 2443 + NL_SET_ERR_MSG(extack, "NLM_F_REPLACE flag is required"); 2464 2444 return err; 2445 + } 2465 2446 2466 2447 mark = xfrm_mark_get(attrs, &m); 2467 2448 ··· 2473 2446 if (x == NULL) 2474 2447 return -ESRCH; 2475 2448 2476 - if (x->km.state != XFRM_STATE_VALID) 2449 + if (x->km.state != XFRM_STATE_VALID) { 2450 + NL_SET_ERR_MSG(extack, "SA must be in VALID state"); 2477 2451 goto out; 2452 + } 2478 2453 2479 - err = xfrm_replay_verify_len(x->replay_esn, re); 2454 + err = xfrm_replay_verify_len(x->replay_esn, re, extack); 2480 2455 if (err) 2481 2456 goto out; 2482 2457 ··· 2613 2584 2614 2585 spin_lock_bh(&x->lock); 2615 2586 err = -EINVAL; 2616 - if (x->km.state != XFRM_STATE_VALID) 2587 + if (x->km.state != XFRM_STATE_VALID) { 2588 + NL_SET_ERR_MSG(extack, "SA must be in VALID state"); 2617 2589 goto out; 2590 + } 2591 + 2618 2592 km_state_expired(x, ue->hard, nlh->nlmsg_pid); 2619 2593 2620 2594 if (ue->hard) { ··· 2697 2665 #ifdef CONFIG_XFRM_MIGRATE 2698 2666 static int copy_from_user_migrate(struct xfrm_migrate *ma, 2699 2667 struct xfrm_kmaddress *k, 2700 - struct nlattr **attrs, int *num) 2668 + struct nlattr **attrs, int *num, 2669 + struct netlink_ext_ack *extack) 2701 2670 { 2702 2671 struct nlattr *rt = attrs[XFRMA_MIGRATE]; 2703 2672 struct xfrm_user_migrate *um; ··· 2717 2684 um = nla_data(rt); 2718 2685 num_migrate = nla_len(rt) / sizeof(*um); 2719 2686 2720 - if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH) 2687 + if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH) { 2688 + NL_SET_ERR_MSG(extack, "Invalid number of SAs to migrate, must be 0 < num <= XFRM_MAX_DEPTH (6)"); 2721 2689 return -EINVAL; 2690 + } 2722 2691 2723 2692 for (i = 0; i < num_migrate; i++, um++, ma++) { 2724 2693 memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr)); ··· 2753 2718 struct xfrm_encap_tmpl *encap = NULL; 2754 2719 u32 if_id = 0; 2755 2720 2756 - if (attrs[XFRMA_MIGRATE] == NULL) 2721 + if (!attrs[XFRMA_MIGRATE]) { 2722 + NL_SET_ERR_MSG(extack, "Missing required MIGRATE attribute"); 2757 2723 return -EINVAL; 2724 + } 2758 2725 2759 2726 kmp = attrs[XFRMA_KMADDRESS] ? &km : NULL; 2760 2727 ··· 2764 2727 if (err) 2765 2728 return err; 2766 2729 2767 - err = copy_from_user_migrate((struct xfrm_migrate *)m, kmp, attrs, &n); 2730 + err = copy_from_user_migrate(m, kmp, attrs, &n, extack); 2768 2731 if (err) 2769 2732 return err; 2770 2733 ··· 2781 2744 if (attrs[XFRMA_IF_ID]) 2782 2745 if_id = nla_get_u32(attrs[XFRMA_IF_ID]); 2783 2746 2784 - err = xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp, net, encap, if_id); 2747 + err = xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp, net, encap, 2748 + if_id, extack); 2785 2749 2786 2750 kfree(encap); 2787 2751