Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'mctp-core-protocol-updates-minor-fixes-tests'

Jeremy Kerr says:

====================
MCTP core protocol updates, minor fixes & tests

This series implements some procotol improvements for AF_MCTP,
particularly for systems with multiple MCTP networks defined. For those,
we need to add the network ID to the tag lookups, which then suggests an
updated version of the tag allocate / drop ioctl to allow the net ID to
be specified there too.

The ioctl change affects uabi, so might warrant some extra attention.

There are also a couple of new kunit tests for multiple-net
configurations.

We have a fix for populating the flow data when fragmenting, and a
testcase for that too.

Of course, any queries/comments/etc., please let me know!
====================

Link: https://lore.kernel.org/r/cover.1708335994.git.jk@codeconstruct.com.au
Signed-off-by: Paolo Abeni <pabeni@redhat.com>

+630 -55
+4 -2
include/net/mctp.h
··· 87 87 }; 88 88 89 89 /* Key for matching incoming packets to sockets or reassembly contexts. 90 - * Packets are matched on (src,dest,tag). 90 + * Packets are matched on (peer EID, local EID, tag). 91 91 * 92 92 * Lifetime / locking requirements: 93 93 * ··· 133 133 * - through an expiry timeout, on a per-socket timer 134 134 */ 135 135 struct mctp_sk_key { 136 + unsigned int net; 136 137 mctp_eid_t peer_addr; 137 138 mctp_eid_t local_addr; /* MCTP_ADDR_ANY for local owned tags */ 138 139 __u8 tag; /* incoming tag match; invert TO for local */ ··· 255 254 256 255 void mctp_key_unref(struct mctp_sk_key *key); 257 256 struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk, 258 - mctp_eid_t daddr, mctp_eid_t saddr, 257 + unsigned int netid, 258 + mctp_eid_t local, mctp_eid_t peer, 259 259 bool manual, u8 *tagp); 260 260 261 261 /* routing <--> device interface */
+32
include/uapi/linux/mctp.h
··· 50 50 51 51 #define SIOCMCTPALLOCTAG (SIOCPROTOPRIVATE + 0) 52 52 #define SIOCMCTPDROPTAG (SIOCPROTOPRIVATE + 1) 53 + #define SIOCMCTPALLOCTAG2 (SIOCPROTOPRIVATE + 2) 54 + #define SIOCMCTPDROPTAG2 (SIOCPROTOPRIVATE + 3) 53 55 56 + /* Deprecated: use mctp_ioc_tag_ctl2 / TAG2 ioctls instead, which defines the 57 + * MCTP network ID as part of the allocated tag. Using this assumes the default 58 + * net ID for allocated tags, which may not give correct behaviour on system 59 + * with multiple networks configured. 60 + */ 54 61 struct mctp_ioc_tag_ctl { 55 62 mctp_eid_t peer_addr; 56 63 ··· 70 63 */ 71 64 __u8 tag; 72 65 __u16 flags; 66 + }; 67 + 68 + struct mctp_ioc_tag_ctl2 { 69 + /* Peer details: network ID, peer EID, local EID. All set by the 70 + * caller. 71 + * 72 + * Local EID must be MCTP_ADDR_NULL or MCTP_ADDR_ANY in current 73 + * kernels. 74 + */ 75 + unsigned int net; 76 + mctp_eid_t peer_addr; 77 + mctp_eid_t local_addr; 78 + 79 + /* Set by caller, but no flags defined currently. Must be 0 */ 80 + __u16 flags; 81 + 82 + /* For SIOCMCTPALLOCTAG2: must be passed as zero, kernel will 83 + * populate with the allocated tag value. Returned tag value will 84 + * always have TO and PREALLOC set. 85 + * 86 + * For SIOCMCTPDROPTAG2: userspace provides tag value to drop, from 87 + * a prior SIOCMCTPALLOCTAG2 call (and so must have TO and PREALLOC set). 88 + */ 89 + __u8 tag; 90 + 73 91 }; 74 92 75 93 #endif /* __UAPI_MCTP_H */
+8
net/core/skbuff.c
··· 6850 6850 xfrm_state_hold(sp->xvec[i]); 6851 6851 } 6852 6852 #endif 6853 + #ifdef CONFIG_MCTP_FLOWS 6854 + if (old_active & (1 << SKB_EXT_MCTP)) { 6855 + struct mctp_flow *flow = skb_ext_get_ptr(old, SKB_EXT_MCTP); 6856 + 6857 + if (flow->key) 6858 + refcount_inc(&flow->key->refs); 6859 + } 6860 + #endif 6853 6861 __skb_ext_put(old); 6854 6862 return new; 6855 6863 }
+1
net/mctp/Kconfig
··· 14 14 15 15 config MCTP_TEST 16 16 bool "MCTP core tests" if !KUNIT_ALL_TESTS 17 + select MCTP_FLOWS 17 18 depends on MCTP=y && KUNIT=y 18 19 default KUNIT_ALL_TESTS 19 20
+97 -20
net/mctp/af_mctp.c
··· 350 350 return -EINVAL; 351 351 } 352 352 353 - static int mctp_ioctl_alloctag(struct mctp_sock *msk, unsigned long arg) 353 + /* helpers for reading/writing the tag ioc, handling compatibility across the 354 + * two versions, and some basic API error checking 355 + */ 356 + static int mctp_ioctl_tag_copy_from_user(unsigned long arg, 357 + struct mctp_ioc_tag_ctl2 *ctl, 358 + bool tagv2) 359 + { 360 + struct mctp_ioc_tag_ctl ctl_compat; 361 + unsigned long size; 362 + void *ptr; 363 + int rc; 364 + 365 + if (tagv2) { 366 + size = sizeof(*ctl); 367 + ptr = ctl; 368 + } else { 369 + size = sizeof(ctl_compat); 370 + ptr = &ctl_compat; 371 + } 372 + 373 + rc = copy_from_user(ptr, (void __user *)arg, size); 374 + if (rc) 375 + return -EFAULT; 376 + 377 + if (!tagv2) { 378 + /* compat, using defaults for new fields */ 379 + ctl->net = MCTP_INITIAL_DEFAULT_NET; 380 + ctl->peer_addr = ctl_compat.peer_addr; 381 + ctl->local_addr = MCTP_ADDR_ANY; 382 + ctl->flags = ctl_compat.flags; 383 + ctl->tag = ctl_compat.tag; 384 + } 385 + 386 + if (ctl->flags) 387 + return -EINVAL; 388 + 389 + if (ctl->local_addr != MCTP_ADDR_ANY && 390 + ctl->local_addr != MCTP_ADDR_NULL) 391 + return -EINVAL; 392 + 393 + return 0; 394 + } 395 + 396 + static int mctp_ioctl_tag_copy_to_user(unsigned long arg, 397 + struct mctp_ioc_tag_ctl2 *ctl, 398 + bool tagv2) 399 + { 400 + struct mctp_ioc_tag_ctl ctl_compat; 401 + unsigned long size; 402 + void *ptr; 403 + int rc; 404 + 405 + if (tagv2) { 406 + ptr = ctl; 407 + size = sizeof(*ctl); 408 + } else { 409 + ctl_compat.peer_addr = ctl->peer_addr; 410 + ctl_compat.tag = ctl->tag; 411 + ctl_compat.flags = ctl->flags; 412 + 413 + ptr = &ctl_compat; 414 + size = sizeof(ctl_compat); 415 + } 416 + 417 + rc = copy_to_user((void __user *)arg, ptr, size); 418 + if (rc) 419 + return -EFAULT; 420 + 421 + return 0; 422 + } 423 + 424 + static int mctp_ioctl_alloctag(struct mctp_sock *msk, bool tagv2, 425 + unsigned long arg) 354 426 { 355 427 struct net *net = sock_net(&msk->sk); 356 428 struct mctp_sk_key *key = NULL; 357 - struct mctp_ioc_tag_ctl ctl; 429 + struct mctp_ioc_tag_ctl2 ctl; 358 430 unsigned long flags; 359 431 u8 tag; 432 + int rc; 360 433 361 - if (copy_from_user(&ctl, (void __user *)arg, sizeof(ctl))) 362 - return -EFAULT; 434 + rc = mctp_ioctl_tag_copy_from_user(arg, &ctl, tagv2); 435 + if (rc) 436 + return rc; 363 437 364 438 if (ctl.tag) 365 439 return -EINVAL; 366 440 367 - if (ctl.flags) 368 - return -EINVAL; 369 - 370 - key = mctp_alloc_local_tag(msk, ctl.peer_addr, MCTP_ADDR_ANY, 371 - true, &tag); 441 + key = mctp_alloc_local_tag(msk, ctl.net, MCTP_ADDR_ANY, 442 + ctl.peer_addr, true, &tag); 372 443 if (IS_ERR(key)) 373 444 return PTR_ERR(key); 374 445 375 446 ctl.tag = tag | MCTP_TAG_OWNER | MCTP_TAG_PREALLOC; 376 - if (copy_to_user((void __user *)arg, &ctl, sizeof(ctl))) { 447 + rc = mctp_ioctl_tag_copy_to_user(arg, &ctl, tagv2); 448 + if (rc) { 377 449 unsigned long fl2; 378 450 /* Unwind our key allocation: the keys list lock needs to be 379 451 * taken before the individual key locks, and we need a valid ··· 457 385 __mctp_key_remove(key, net, fl2, MCTP_TRACE_KEY_DROPPED); 458 386 mctp_key_unref(key); 459 387 spin_unlock_irqrestore(&net->mctp.keys_lock, flags); 460 - return -EFAULT; 388 + return rc; 461 389 } 462 390 463 391 mctp_key_unref(key); 464 392 return 0; 465 393 } 466 394 467 - static int mctp_ioctl_droptag(struct mctp_sock *msk, unsigned long arg) 395 + static int mctp_ioctl_droptag(struct mctp_sock *msk, bool tagv2, 396 + unsigned long arg) 468 397 { 469 398 struct net *net = sock_net(&msk->sk); 470 - struct mctp_ioc_tag_ctl ctl; 399 + struct mctp_ioc_tag_ctl2 ctl; 471 400 unsigned long flags, fl2; 472 401 struct mctp_sk_key *key; 473 402 struct hlist_node *tmp; 474 403 int rc; 475 404 u8 tag; 476 405 477 - if (copy_from_user(&ctl, (void __user *)arg, sizeof(ctl))) 478 - return -EFAULT; 479 - 480 - if (ctl.flags) 481 - return -EINVAL; 406 + rc = mctp_ioctl_tag_copy_from_user(arg, &ctl, tagv2); 407 + if (rc) 408 + return rc; 482 409 483 410 /* Must be a local tag, TO set, preallocated */ 484 411 if ((ctl.tag & ~MCTP_TAG_MASK) != (MCTP_TAG_OWNER | MCTP_TAG_PREALLOC)) ··· 493 422 */ 494 423 spin_lock_irqsave(&key->lock, fl2); 495 424 if (key->manual_alloc && 425 + ctl.net == key->net && 496 426 ctl.peer_addr == key->peer_addr && 497 427 tag == key->tag) { 498 428 __mctp_key_remove(key, net, fl2, ··· 511 439 static int mctp_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 512 440 { 513 441 struct mctp_sock *msk = container_of(sock->sk, struct mctp_sock, sk); 442 + bool tagv2 = false; 514 443 515 444 switch (cmd) { 445 + case SIOCMCTPALLOCTAG2: 516 446 case SIOCMCTPALLOCTAG: 517 - return mctp_ioctl_alloctag(msk, arg); 447 + tagv2 = cmd == SIOCMCTPALLOCTAG2; 448 + return mctp_ioctl_alloctag(msk, tagv2, arg); 518 449 case SIOCMCTPDROPTAG: 519 - return mctp_ioctl_droptag(msk, arg); 450 + case SIOCMCTPDROPTAG2: 451 + tagv2 = cmd == SIOCMCTPDROPTAG2; 452 + return mctp_ioctl_droptag(msk, tagv2, arg); 520 453 } 521 454 522 455 return -EINVAL;
+86 -19
net/mctp/route.c
··· 73 73 return NULL; 74 74 } 75 75 76 - static bool mctp_key_match(struct mctp_sk_key *key, mctp_eid_t local, 77 - mctp_eid_t peer, u8 tag) 76 + /* A note on the key allocations. 77 + * 78 + * struct net->mctp.keys contains our set of currently-allocated keys for 79 + * MCTP tag management. The lookup tuple for these is the peer EID, 80 + * local EID and MCTP tag. 81 + * 82 + * In some cases, the peer EID may be MCTP_EID_ANY: for example, when a 83 + * broadcast message is sent, we may receive responses from any peer EID. 84 + * Because the broadcast dest address is equivalent to ANY, we create 85 + * a key with (local = local-eid, peer = ANY). This allows a match on the 86 + * incoming broadcast responses from any peer. 87 + * 88 + * We perform lookups when packets are received, and when tags are allocated 89 + * in two scenarios: 90 + * 91 + * - when a packet is sent, with a locally-owned tag: we need to find an 92 + * unused tag value for the (local, peer) EID pair. 93 + * 94 + * - when a tag is manually allocated: we need to find an unused tag value 95 + * for the peer EID, but don't have a specific local EID at that stage. 96 + * 97 + * in the latter case, on successful allocation, we end up with a tag with 98 + * (local = ANY, peer = peer-eid). 99 + * 100 + * So, the key set allows both a local EID of ANY, as well as a peer EID of 101 + * ANY in the lookup tuple. Both may be ANY if we prealloc for a broadcast. 102 + * The matching (in mctp_key_match()) during lookup allows the match value to 103 + * be ANY in either the dest or source addresses. 104 + * 105 + * When allocating (+ inserting) a tag, we need to check for conflicts amongst 106 + * the existing tag set. This requires macthing either exactly on the local 107 + * and peer addresses, or either being ANY. 108 + */ 109 + 110 + static bool mctp_key_match(struct mctp_sk_key *key, unsigned int net, 111 + mctp_eid_t local, mctp_eid_t peer, u8 tag) 78 112 { 113 + if (key->net != net) 114 + return false; 115 + 79 116 if (!mctp_address_matches(key->local_addr, local)) 80 117 return false; 81 118 82 - if (key->peer_addr != peer) 119 + if (!mctp_address_matches(key->peer_addr, peer)) 83 120 return false; 84 121 85 122 if (key->tag != tag) ··· 129 92 * key exists. 130 93 */ 131 94 static struct mctp_sk_key *mctp_lookup_key(struct net *net, struct sk_buff *skb, 132 - mctp_eid_t peer, 95 + unsigned int netid, mctp_eid_t peer, 133 96 unsigned long *irqflags) 134 97 __acquires(&key->lock) 135 98 { ··· 145 108 spin_lock_irqsave(&net->mctp.keys_lock, flags); 146 109 147 110 hlist_for_each_entry(key, &net->mctp.keys, hlist) { 148 - if (!mctp_key_match(key, mh->dest, peer, tag)) 111 + if (!mctp_key_match(key, netid, mh->dest, peer, tag)) 149 112 continue; 150 113 151 114 spin_lock(&key->lock); ··· 168 131 } 169 132 170 133 static struct mctp_sk_key *mctp_key_alloc(struct mctp_sock *msk, 134 + unsigned int net, 171 135 mctp_eid_t local, mctp_eid_t peer, 172 136 u8 tag, gfp_t gfp) 173 137 { ··· 178 140 if (!key) 179 141 return NULL; 180 142 143 + key->net = net; 181 144 key->peer_addr = peer; 182 145 key->local_addr = local; 183 146 key->tag = tag; ··· 224 185 } 225 186 226 187 hlist_for_each_entry(tmp, &net->mctp.keys, hlist) { 227 - if (mctp_key_match(tmp, key->local_addr, key->peer_addr, 228 - key->tag)) { 188 + if (mctp_key_match(tmp, key->net, key->local_addr, 189 + key->peer_addr, key->tag)) { 229 190 spin_lock(&tmp->lock); 230 191 if (tmp->valid) 231 192 rc = -EEXIST; ··· 366 327 struct net *net = dev_net(skb->dev); 367 328 struct mctp_sock *msk; 368 329 struct mctp_hdr *mh; 330 + unsigned int netid; 369 331 unsigned long f; 370 332 u8 tag, flags; 371 333 int rc; ··· 385 345 386 346 /* grab header, advance data ptr */ 387 347 mh = mctp_hdr(skb); 348 + netid = mctp_cb(skb)->net; 388 349 skb_pull(skb, sizeof(struct mctp_hdr)); 389 350 390 351 if (mh->ver != 1) ··· 399 358 /* lookup socket / reasm context, exactly matching (src,dest,tag). 400 359 * we hold a ref on the key, and key->lock held. 401 360 */ 402 - key = mctp_lookup_key(net, skb, mh->src, &f); 361 + key = mctp_lookup_key(net, skb, netid, mh->src, &f); 403 362 404 363 if (flags & MCTP_HDR_FLAG_SOM) { 405 364 if (key) { ··· 409 368 * key lookup to find the socket, but don't use this 410 369 * key for reassembly - we'll create a more specific 411 370 * one for future packets if required (ie, !EOM). 371 + * 372 + * this lookup requires key->peer to be MCTP_ADDR_ANY, 373 + * it doesn't match just any key->peer. 412 374 */ 413 - any_key = mctp_lookup_key(net, skb, MCTP_ADDR_ANY, &f); 375 + any_key = mctp_lookup_key(net, skb, netid, 376 + MCTP_ADDR_ANY, &f); 414 377 if (any_key) { 415 378 msk = container_of(any_key->sk, 416 379 struct mctp_sock, sk); ··· 451 406 * packets for this message 452 407 */ 453 408 if (!key) { 454 - key = mctp_key_alloc(msk, mh->dest, mh->src, 409 + key = mctp_key_alloc(msk, netid, mh->dest, mh->src, 455 410 tag, GFP_ATOMIC); 456 411 if (!key) { 457 412 rc = -ENOMEM; ··· 641 596 refcount_inc(&key->refs); 642 597 } 643 598 644 - /* Allocate a locally-owned tag value for (saddr, daddr), and reserve 599 + /* Allocate a locally-owned tag value for (local, peer), and reserve 645 600 * it for the socket msk 646 601 */ 647 602 struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk, 648 - mctp_eid_t daddr, mctp_eid_t saddr, 603 + unsigned int netid, 604 + mctp_eid_t local, mctp_eid_t peer, 649 605 bool manual, u8 *tagp) 650 606 { 651 607 struct net *net = sock_net(&msk->sk); ··· 656 610 u8 tagbits; 657 611 658 612 /* for NULL destination EIDs, we may get a response from any peer */ 659 - if (daddr == MCTP_ADDR_NULL) 660 - daddr = MCTP_ADDR_ANY; 613 + if (peer == MCTP_ADDR_NULL) 614 + peer = MCTP_ADDR_ANY; 661 615 662 616 /* be optimistic, alloc now */ 663 - key = mctp_key_alloc(msk, saddr, daddr, 0, GFP_KERNEL); 617 + key = mctp_key_alloc(msk, netid, local, peer, 0, GFP_KERNEL); 664 618 if (!key) 665 619 return ERR_PTR(-ENOMEM); 666 620 ··· 677 631 * lock held, they don't change over the lifetime of the key. 678 632 */ 679 633 634 + /* tags are net-specific */ 635 + if (tmp->net != netid) 636 + continue; 637 + 680 638 /* if we don't own the tag, it can't conflict */ 681 639 if (tmp->tag & MCTP_HDR_FLAG_TO) 682 640 continue; 683 641 684 - if (!(mctp_address_matches(tmp->peer_addr, daddr) && 685 - mctp_address_matches(tmp->local_addr, saddr))) 642 + /* Since we're avoiding conflicting entries, match peer and 643 + * local addresses, including with a wildcard on ANY. See 644 + * 'A note on key allocations' for background. 645 + */ 646 + if (peer != MCTP_ADDR_ANY && 647 + !mctp_address_matches(tmp->peer_addr, peer)) 648 + continue; 649 + 650 + if (local != MCTP_ADDR_ANY && 651 + !mctp_address_matches(tmp->local_addr, local)) 686 652 continue; 687 653 688 654 spin_lock(&tmp->lock); ··· 729 671 } 730 672 731 673 static struct mctp_sk_key *mctp_lookup_prealloc_tag(struct mctp_sock *msk, 674 + unsigned int netid, 732 675 mctp_eid_t daddr, 733 676 u8 req_tag, u8 *tagp) 734 677 { ··· 744 685 spin_lock_irqsave(&mns->keys_lock, flags); 745 686 746 687 hlist_for_each_entry(tmp, &mns->keys, hlist) { 688 + if (tmp->net != netid) 689 + continue; 690 + 747 691 if (tmp->tag != req_tag) 748 692 continue; 749 693 ··· 905 843 /* copy message payload */ 906 844 skb_copy_bits(skb, pos, skb_transport_header(skb2), size); 907 845 846 + /* we need to copy the extensions, for MCTP flow data */ 847 + skb_ext_copy(skb2, skb); 848 + 908 849 /* do route */ 909 850 rc = rt->output(rt, skb2); 910 851 if (rc) ··· 930 865 struct mctp_sk_key *key; 931 866 struct mctp_hdr *hdr; 932 867 unsigned long flags; 868 + unsigned int netid; 933 869 unsigned int mtu; 934 870 mctp_eid_t saddr; 935 871 bool ext_rt; ··· 981 915 rc = 0; 982 916 } 983 917 spin_unlock_irqrestore(&rt->dev->addrs_lock, flags); 918 + netid = READ_ONCE(rt->dev->net); 984 919 985 920 if (rc) 986 921 goto out_release; 987 922 988 923 if (req_tag & MCTP_TAG_OWNER) { 989 924 if (req_tag & MCTP_TAG_PREALLOC) 990 - key = mctp_lookup_prealloc_tag(msk, daddr, 925 + key = mctp_lookup_prealloc_tag(msk, netid, daddr, 991 926 req_tag, &tag); 992 927 else 993 - key = mctp_alloc_local_tag(msk, daddr, saddr, 928 + key = mctp_alloc_local_tag(msk, netid, saddr, daddr, 994 929 false, &tag); 995 930 996 931 if (IS_ERR(key)) {
+399 -14
net/mctp/test/route-test.c
··· 79 79 kfree_rcu(&rt->rt, rcu); 80 80 } 81 81 82 + static void mctp_test_skb_set_dev(struct sk_buff *skb, 83 + struct mctp_test_dev *dev) 84 + { 85 + struct mctp_skb_cb *cb; 86 + 87 + cb = mctp_cb(skb); 88 + cb->net = READ_ONCE(dev->mdev->net); 89 + skb->dev = dev->ndev; 90 + } 91 + 82 92 static struct sk_buff *mctp_test_create_skb(const struct mctp_hdr *hdr, 83 93 unsigned int data_len) 84 94 { ··· 101 91 if (!skb) 102 92 return NULL; 103 93 94 + __mctp_cb(skb); 104 95 memcpy(skb_put(skb, hdr_len), hdr, hdr_len); 105 96 106 97 buf = skb_put(skb, data_len); ··· 122 111 if (!skb) 123 112 return NULL; 124 113 114 + __mctp_cb(skb); 125 115 memcpy(skb_put(skb, hdr_len), hdr, hdr_len); 126 116 memcpy(skb_put(skb, data_len), data, data_len); 127 117 ··· 261 249 skb = mctp_test_create_skb(&params->hdr, 1); 262 250 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb); 263 251 264 - __mctp_cb(skb); 265 - 266 252 mctp_pkttype_receive(skb, dev->ndev, &mctp_packet_type, NULL); 267 253 268 254 KUNIT_EXPECT_EQ(test, !!rt->pkts.qlen, params->input); ··· 293 283 static void __mctp_route_test_init(struct kunit *test, 294 284 struct mctp_test_dev **devp, 295 285 struct mctp_test_route **rtp, 296 - struct socket **sockp) 286 + struct socket **sockp, 287 + unsigned int netid) 297 288 { 298 289 struct sockaddr_mctp addr = {0}; 299 290 struct mctp_test_route *rt; ··· 304 293 305 294 dev = mctp_test_create_dev(); 306 295 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev); 296 + if (netid != MCTP_NET_ANY) 297 + WRITE_ONCE(dev->mdev->net, netid); 307 298 308 299 rt = mctp_test_create_route(&init_net, dev->mdev, 8, 68); 309 300 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt); ··· 314 301 KUNIT_ASSERT_EQ(test, rc, 0); 315 302 316 303 addr.smctp_family = AF_MCTP; 317 - addr.smctp_network = MCTP_NET_ANY; 304 + addr.smctp_network = netid; 318 305 addr.smctp_addr.s_addr = 8; 319 306 addr.smctp_type = 0; 320 307 rc = kernel_bind(sock, (struct sockaddr *)&addr, sizeof(addr)); ··· 352 339 353 340 params = test->param_value; 354 341 355 - __mctp_route_test_init(test, &dev, &rt, &sock); 342 + __mctp_route_test_init(test, &dev, &rt, &sock, MCTP_NET_ANY); 356 343 357 344 skb = mctp_test_create_skb_data(&params->hdr, &params->type); 358 345 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb); 359 346 360 - skb->dev = dev->ndev; 361 - __mctp_cb(skb); 347 + mctp_test_skb_set_dev(skb, dev); 362 348 363 349 rc = mctp_route_input(&rt->rt, skb); 364 350 ··· 422 410 423 411 params = test->param_value; 424 412 425 - __mctp_route_test_init(test, &dev, &rt, &sock); 413 + __mctp_route_test_init(test, &dev, &rt, &sock, MCTP_NET_ANY); 426 414 427 415 for (i = 0; i < params->n_hdrs; i++) { 428 416 c = i; 429 417 skb = mctp_test_create_skb_data(&params->hdrs[i], &c); 430 418 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb); 431 419 432 - skb->dev = dev->ndev; 433 - __mctp_cb(skb); 420 + mctp_test_skb_set_dev(skb, dev); 434 421 435 422 rc = mctp_route_input(&rt->rt, skb); 436 423 } ··· 555 544 struct mctp_sock *msk; 556 545 struct socket *sock; 557 546 unsigned long flags; 547 + unsigned int net; 558 548 int rc; 559 549 u8 c; 560 550 ··· 563 551 564 552 dev = mctp_test_create_dev(); 565 553 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev); 554 + net = READ_ONCE(dev->mdev->net); 566 555 567 556 rt = mctp_test_create_route(&init_net, dev->mdev, 8, 68); 568 557 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt); ··· 575 562 mns = &sock_net(sock->sk)->mctp; 576 563 577 564 /* set the incoming tag according to test params */ 578 - key = mctp_key_alloc(msk, params->key_local_addr, params->key_peer_addr, 579 - params->key_tag, GFP_KERNEL); 565 + key = mctp_key_alloc(msk, net, params->key_local_addr, 566 + params->key_peer_addr, params->key_tag, 567 + GFP_KERNEL); 580 568 581 569 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, key); 582 570 ··· 590 576 skb = mctp_test_create_skb_data(&params->hdr, &c); 591 577 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb); 592 578 593 - skb->dev = dev->ndev; 594 - __mctp_cb(skb); 579 + mctp_test_skb_set_dev(skb, dev); 595 580 596 581 rc = mctp_route_input(&rt->rt, skb); 597 582 ··· 678 665 KUNIT_ARRAY_PARAM(mctp_route_input_sk_keys, mctp_route_input_sk_keys_tests, 679 666 mctp_route_input_sk_keys_to_desc); 680 667 668 + struct test_net { 669 + unsigned int netid; 670 + struct mctp_test_dev *dev; 671 + struct mctp_test_route *rt; 672 + struct socket *sock; 673 + struct sk_buff *skb; 674 + struct mctp_sk_key *key; 675 + struct { 676 + u8 type; 677 + unsigned int data; 678 + } msg; 679 + }; 680 + 681 + static void 682 + mctp_test_route_input_multiple_nets_bind_init(struct kunit *test, 683 + struct test_net *t) 684 + { 685 + struct mctp_hdr hdr = RX_HDR(1, 9, 8, FL_S | FL_E | FL_T(1) | FL_TO); 686 + 687 + t->msg.data = t->netid; 688 + 689 + __mctp_route_test_init(test, &t->dev, &t->rt, &t->sock, t->netid); 690 + 691 + t->skb = mctp_test_create_skb_data(&hdr, &t->msg); 692 + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, t->skb); 693 + mctp_test_skb_set_dev(t->skb, t->dev); 694 + } 695 + 696 + static void 697 + mctp_test_route_input_multiple_nets_bind_fini(struct kunit *test, 698 + struct test_net *t) 699 + { 700 + __mctp_route_test_fini(test, t->dev, t->rt, t->sock); 701 + } 702 + 703 + /* Test that skbs from different nets (otherwise identical) get routed to their 704 + * corresponding socket via the sockets' bind() 705 + */ 706 + static void mctp_test_route_input_multiple_nets_bind(struct kunit *test) 707 + { 708 + struct sk_buff *rx_skb1, *rx_skb2; 709 + struct test_net t1, t2; 710 + int rc; 711 + 712 + t1.netid = 1; 713 + t2.netid = 2; 714 + 715 + t1.msg.type = 0; 716 + t2.msg.type = 0; 717 + 718 + mctp_test_route_input_multiple_nets_bind_init(test, &t1); 719 + mctp_test_route_input_multiple_nets_bind_init(test, &t2); 720 + 721 + rc = mctp_route_input(&t1.rt->rt, t1.skb); 722 + KUNIT_ASSERT_EQ(test, rc, 0); 723 + rc = mctp_route_input(&t2.rt->rt, t2.skb); 724 + KUNIT_ASSERT_EQ(test, rc, 0); 725 + 726 + rx_skb1 = skb_recv_datagram(t1.sock->sk, MSG_DONTWAIT, &rc); 727 + KUNIT_EXPECT_NOT_ERR_OR_NULL(test, rx_skb1); 728 + KUNIT_EXPECT_EQ(test, rx_skb1->len, sizeof(t1.msg)); 729 + KUNIT_EXPECT_EQ(test, 730 + *(unsigned int *)skb_pull(rx_skb1, sizeof(t1.msg.data)), 731 + t1.netid); 732 + kfree_skb(rx_skb1); 733 + 734 + rx_skb2 = skb_recv_datagram(t2.sock->sk, MSG_DONTWAIT, &rc); 735 + KUNIT_EXPECT_NOT_ERR_OR_NULL(test, rx_skb2); 736 + KUNIT_EXPECT_EQ(test, rx_skb2->len, sizeof(t2.msg)); 737 + KUNIT_EXPECT_EQ(test, 738 + *(unsigned int *)skb_pull(rx_skb2, sizeof(t2.msg.data)), 739 + t2.netid); 740 + kfree_skb(rx_skb2); 741 + 742 + mctp_test_route_input_multiple_nets_bind_fini(test, &t1); 743 + mctp_test_route_input_multiple_nets_bind_fini(test, &t2); 744 + } 745 + 746 + static void 747 + mctp_test_route_input_multiple_nets_key_init(struct kunit *test, 748 + struct test_net *t) 749 + { 750 + struct mctp_hdr hdr = RX_HDR(1, 9, 8, FL_S | FL_E | FL_T(1)); 751 + struct mctp_sock *msk; 752 + struct netns_mctp *mns; 753 + unsigned long flags; 754 + 755 + t->msg.data = t->netid; 756 + 757 + __mctp_route_test_init(test, &t->dev, &t->rt, &t->sock, t->netid); 758 + 759 + msk = container_of(t->sock->sk, struct mctp_sock, sk); 760 + 761 + t->key = mctp_key_alloc(msk, t->netid, hdr.dest, hdr.src, 1, GFP_KERNEL); 762 + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, t->key); 763 + 764 + mns = &sock_net(t->sock->sk)->mctp; 765 + spin_lock_irqsave(&mns->keys_lock, flags); 766 + mctp_reserve_tag(&init_net, t->key, msk); 767 + spin_unlock_irqrestore(&mns->keys_lock, flags); 768 + 769 + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, t->key); 770 + t->skb = mctp_test_create_skb_data(&hdr, &t->msg); 771 + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, t->skb); 772 + mctp_test_skb_set_dev(t->skb, t->dev); 773 + } 774 + 775 + static void 776 + mctp_test_route_input_multiple_nets_key_fini(struct kunit *test, 777 + struct test_net *t) 778 + { 779 + mctp_key_unref(t->key); 780 + __mctp_route_test_fini(test, t->dev, t->rt, t->sock); 781 + } 782 + 783 + /* test that skbs from different nets (otherwise identical) get routed to their 784 + * corresponding socket via the sk_key 785 + */ 786 + static void mctp_test_route_input_multiple_nets_key(struct kunit *test) 787 + { 788 + struct sk_buff *rx_skb1, *rx_skb2; 789 + struct test_net t1, t2; 790 + int rc; 791 + 792 + t1.netid = 1; 793 + t2.netid = 2; 794 + 795 + /* use type 1 which is not bound */ 796 + t1.msg.type = 1; 797 + t2.msg.type = 1; 798 + 799 + mctp_test_route_input_multiple_nets_key_init(test, &t1); 800 + mctp_test_route_input_multiple_nets_key_init(test, &t2); 801 + 802 + rc = mctp_route_input(&t1.rt->rt, t1.skb); 803 + KUNIT_ASSERT_EQ(test, rc, 0); 804 + rc = mctp_route_input(&t2.rt->rt, t2.skb); 805 + KUNIT_ASSERT_EQ(test, rc, 0); 806 + 807 + rx_skb1 = skb_recv_datagram(t1.sock->sk, MSG_DONTWAIT, &rc); 808 + KUNIT_EXPECT_NOT_ERR_OR_NULL(test, rx_skb1); 809 + KUNIT_EXPECT_EQ(test, rx_skb1->len, sizeof(t1.msg)); 810 + KUNIT_EXPECT_EQ(test, 811 + *(unsigned int *)skb_pull(rx_skb1, sizeof(t1.msg.data)), 812 + t1.netid); 813 + kfree_skb(rx_skb1); 814 + 815 + rx_skb2 = skb_recv_datagram(t2.sock->sk, MSG_DONTWAIT, &rc); 816 + KUNIT_EXPECT_NOT_ERR_OR_NULL(test, rx_skb2); 817 + KUNIT_EXPECT_EQ(test, rx_skb2->len, sizeof(t2.msg)); 818 + KUNIT_EXPECT_EQ(test, 819 + *(unsigned int *)skb_pull(rx_skb2, sizeof(t2.msg.data)), 820 + t2.netid); 821 + kfree_skb(rx_skb2); 822 + 823 + mctp_test_route_input_multiple_nets_key_fini(test, &t1); 824 + mctp_test_route_input_multiple_nets_key_fini(test, &t2); 825 + } 826 + 827 + #if IS_ENABLED(CONFIG_MCTP_FLOWS) 828 + 829 + static void mctp_test_flow_init(struct kunit *test, 830 + struct mctp_test_dev **devp, 831 + struct mctp_test_route **rtp, 832 + struct socket **sock, 833 + struct sk_buff **skbp, 834 + unsigned int len) 835 + { 836 + struct mctp_test_route *rt; 837 + struct mctp_test_dev *dev; 838 + struct sk_buff *skb; 839 + 840 + /* we have a slightly odd routing setup here; the test route 841 + * is for EID 8, which is our local EID. We don't do a routing 842 + * lookup, so that's fine - all we require is a path through 843 + * mctp_local_output, which will call rt->output on whatever 844 + * route we provide 845 + */ 846 + __mctp_route_test_init(test, &dev, &rt, sock, MCTP_NET_ANY); 847 + 848 + /* Assign a single EID. ->addrs is freed on mctp netdev release */ 849 + dev->mdev->addrs = kmalloc(sizeof(u8), GFP_KERNEL); 850 + dev->mdev->num_addrs = 1; 851 + dev->mdev->addrs[0] = 8; 852 + 853 + skb = alloc_skb(len + sizeof(struct mctp_hdr) + 1, GFP_KERNEL); 854 + KUNIT_ASSERT_TRUE(test, skb); 855 + __mctp_cb(skb); 856 + skb_reserve(skb, sizeof(struct mctp_hdr) + 1); 857 + memset(skb_put(skb, len), 0, len); 858 + 859 + /* take a ref for the route, we'll decrement in local output */ 860 + refcount_inc(&rt->rt.refs); 861 + 862 + *devp = dev; 863 + *rtp = rt; 864 + *skbp = skb; 865 + } 866 + 867 + static void mctp_test_flow_fini(struct kunit *test, 868 + struct mctp_test_dev *dev, 869 + struct mctp_test_route *rt, 870 + struct socket *sock) 871 + { 872 + __mctp_route_test_fini(test, dev, rt, sock); 873 + } 874 + 875 + /* test that an outgoing skb has the correct MCTP extension data set */ 876 + static void mctp_test_packet_flow(struct kunit *test) 877 + { 878 + struct sk_buff *skb, *skb2; 879 + struct mctp_test_route *rt; 880 + struct mctp_test_dev *dev; 881 + struct mctp_flow *flow; 882 + struct socket *sock; 883 + u8 dst = 8; 884 + int n, rc; 885 + 886 + mctp_test_flow_init(test, &dev, &rt, &sock, &skb, 30); 887 + 888 + rc = mctp_local_output(sock->sk, &rt->rt, skb, dst, MCTP_TAG_OWNER); 889 + KUNIT_ASSERT_EQ(test, rc, 0); 890 + 891 + n = rt->pkts.qlen; 892 + KUNIT_ASSERT_EQ(test, n, 1); 893 + 894 + skb2 = skb_dequeue(&rt->pkts); 895 + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb2); 896 + 897 + flow = skb_ext_find(skb2, SKB_EXT_MCTP); 898 + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flow); 899 + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flow->key); 900 + KUNIT_ASSERT_PTR_EQ(test, flow->key->sk, sock->sk); 901 + 902 + kfree_skb(skb2); 903 + mctp_test_flow_fini(test, dev, rt, sock); 904 + } 905 + 906 + /* test that outgoing skbs, after fragmentation, all have the correct MCTP 907 + * extension data set. 908 + */ 909 + static void mctp_test_fragment_flow(struct kunit *test) 910 + { 911 + struct mctp_flow *flows[2]; 912 + struct sk_buff *tx_skbs[2]; 913 + struct mctp_test_route *rt; 914 + struct mctp_test_dev *dev; 915 + struct sk_buff *skb; 916 + struct socket *sock; 917 + u8 dst = 8; 918 + int n, rc; 919 + 920 + mctp_test_flow_init(test, &dev, &rt, &sock, &skb, 100); 921 + 922 + rc = mctp_local_output(sock->sk, &rt->rt, skb, dst, MCTP_TAG_OWNER); 923 + KUNIT_ASSERT_EQ(test, rc, 0); 924 + 925 + n = rt->pkts.qlen; 926 + KUNIT_ASSERT_EQ(test, n, 2); 927 + 928 + /* both resulting packets should have the same flow data */ 929 + tx_skbs[0] = skb_dequeue(&rt->pkts); 930 + tx_skbs[1] = skb_dequeue(&rt->pkts); 931 + 932 + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, tx_skbs[0]); 933 + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, tx_skbs[1]); 934 + 935 + flows[0] = skb_ext_find(tx_skbs[0], SKB_EXT_MCTP); 936 + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flows[0]); 937 + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flows[0]->key); 938 + KUNIT_ASSERT_PTR_EQ(test, flows[0]->key->sk, sock->sk); 939 + 940 + flows[1] = skb_ext_find(tx_skbs[1], SKB_EXT_MCTP); 941 + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flows[1]); 942 + KUNIT_ASSERT_PTR_EQ(test, flows[1]->key, flows[0]->key); 943 + 944 + kfree_skb(tx_skbs[0]); 945 + kfree_skb(tx_skbs[1]); 946 + mctp_test_flow_fini(test, dev, rt, sock); 947 + } 948 + 949 + #else 950 + static void mctp_test_packet_flow(struct kunit *test) 951 + { 952 + kunit_skip(test, "Requires CONFIG_MCTP_FLOWS=y"); 953 + } 954 + 955 + static void mctp_test_fragment_flow(struct kunit *test) 956 + { 957 + kunit_skip(test, "Requires CONFIG_MCTP_FLOWS=y"); 958 + } 959 + #endif 960 + 961 + /* Test that outgoing skbs cause a suitable tag to be created */ 962 + static void mctp_test_route_output_key_create(struct kunit *test) 963 + { 964 + const unsigned int netid = 50; 965 + const u8 dst = 26, src = 15; 966 + struct mctp_test_route *rt; 967 + struct mctp_test_dev *dev; 968 + struct mctp_sk_key *key; 969 + struct netns_mctp *mns; 970 + unsigned long flags; 971 + struct socket *sock; 972 + struct sk_buff *skb; 973 + bool empty, single; 974 + const int len = 2; 975 + int rc; 976 + 977 + dev = mctp_test_create_dev(); 978 + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev); 979 + WRITE_ONCE(dev->mdev->net, netid); 980 + 981 + rt = mctp_test_create_route(&init_net, dev->mdev, dst, 68); 982 + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt); 983 + 984 + rc = sock_create_kern(&init_net, AF_MCTP, SOCK_DGRAM, 0, &sock); 985 + KUNIT_ASSERT_EQ(test, rc, 0); 986 + 987 + dev->mdev->addrs = kmalloc(sizeof(u8), GFP_KERNEL); 988 + dev->mdev->num_addrs = 1; 989 + dev->mdev->addrs[0] = src; 990 + 991 + skb = alloc_skb(sizeof(struct mctp_hdr) + 1 + len, GFP_KERNEL); 992 + KUNIT_ASSERT_TRUE(test, skb); 993 + __mctp_cb(skb); 994 + skb_reserve(skb, sizeof(struct mctp_hdr) + 1 + len); 995 + memset(skb_put(skb, len), 0, len); 996 + 997 + refcount_inc(&rt->rt.refs); 998 + 999 + mns = &sock_net(sock->sk)->mctp; 1000 + 1001 + /* We assume we're starting from an empty keys list, which requires 1002 + * preceding tests to clean up correctly! 1003 + */ 1004 + spin_lock_irqsave(&mns->keys_lock, flags); 1005 + empty = hlist_empty(&mns->keys); 1006 + spin_unlock_irqrestore(&mns->keys_lock, flags); 1007 + KUNIT_ASSERT_TRUE(test, empty); 1008 + 1009 + rc = mctp_local_output(sock->sk, &rt->rt, skb, dst, MCTP_TAG_OWNER); 1010 + KUNIT_ASSERT_EQ(test, rc, 0); 1011 + 1012 + key = NULL; 1013 + single = false; 1014 + spin_lock_irqsave(&mns->keys_lock, flags); 1015 + if (!hlist_empty(&mns->keys)) { 1016 + key = hlist_entry(mns->keys.first, struct mctp_sk_key, hlist); 1017 + single = hlist_is_singular_node(&key->hlist, &mns->keys); 1018 + } 1019 + spin_unlock_irqrestore(&mns->keys_lock, flags); 1020 + 1021 + KUNIT_ASSERT_NOT_NULL(test, key); 1022 + KUNIT_ASSERT_TRUE(test, single); 1023 + 1024 + KUNIT_EXPECT_EQ(test, key->net, netid); 1025 + KUNIT_EXPECT_EQ(test, key->local_addr, src); 1026 + KUNIT_EXPECT_EQ(test, key->peer_addr, dst); 1027 + /* key has incoming tag, so inverse of what we sent */ 1028 + KUNIT_EXPECT_FALSE(test, key->tag & MCTP_TAG_OWNER); 1029 + 1030 + sock_release(sock); 1031 + mctp_test_route_destroy(test, rt); 1032 + mctp_test_destroy_dev(dev); 1033 + } 1034 + 681 1035 static struct kunit_case mctp_test_cases[] = { 682 1036 KUNIT_CASE_PARAM(mctp_test_fragment, mctp_frag_gen_params), 683 1037 KUNIT_CASE_PARAM(mctp_test_rx_input, mctp_rx_input_gen_params), ··· 1053 673 mctp_route_input_sk_reasm_gen_params), 1054 674 KUNIT_CASE_PARAM(mctp_test_route_input_sk_keys, 1055 675 mctp_route_input_sk_keys_gen_params), 676 + KUNIT_CASE(mctp_test_route_input_multiple_nets_bind), 677 + KUNIT_CASE(mctp_test_route_input_multiple_nets_key), 678 + KUNIT_CASE(mctp_test_packet_flow), 679 + KUNIT_CASE(mctp_test_fragment_flow), 680 + KUNIT_CASE(mctp_test_route_output_key_create), 1056 681 {} 1057 682 }; 1058 683
+2
net/mctp/test/utils.c
··· 4 4 #include <linux/mctp.h> 5 5 #include <linux/if_arp.h> 6 6 7 + #include <net/mctp.h> 7 8 #include <net/mctpdevice.h> 8 9 #include <net/pkt_sched.h> 9 10 ··· 55 54 56 55 rcu_read_lock(); 57 56 dev->mdev = __mctp_dev_get(ndev); 57 + dev->mdev->net = mctp_default_net(dev_net(ndev)); 58 58 rcu_read_unlock(); 59 59 60 60 return dev;
+1
tools/testing/kunit/configs/all_tests.config
··· 23 23 24 24 CONFIG_NET=y 25 25 CONFIG_MCTP=y 26 + CONFIG_MCTP_FLOWS=y 26 27 27 28 CONFIG_INET=y 28 29 CONFIG_MPTCP=y