Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mctp: Add SIOCMCTP{ALLOC,DROP}TAG ioctls for tag control

This change adds a couple of new ioctls for mctp sockets:
SIOCMCTPALLOCTAG and SIOCMCTPDROPTAG. These ioctls provide facilities
for explicit allocation / release of tags, overriding the automatic
allocate-on-send/release-on-reply and timeout behaviours. This allows
userspace more control over messages that may not fit a simple
request/response model.

In order to indicate a pre-allocated tag to the sendmsg() syscall, we
introduce a new flag to the struct sockaddr_mctp.smctp_tag value:
MCTP_TAG_PREALLOC.

Additional changes from Jeremy Kerr <jk@codeconstruct.com.au>.

Contains a fix that was:
Reported-by: kernel test robot <lkp@intel.com>

Signed-off-by: Matt Johnston <matt@codeconstruct.com.au>
Signed-off-by: Jeremy Kerr <jk@codeconstruct.com.au>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Matt Johnston and committed by
David S. Miller
63ed1aab 0de55a7d

+329 -56
+48
Documentation/networking/mctp.rst
··· 212 212 Like the send calls, sockets will only receive responses to requests they have 213 213 sent (TO=1) and may only respond (TO=0) to requests they have received. 214 214 215 + ``ioctl(SIOCMCTPALLOCTAG)`` and ``ioctl(SIOCMCTPDROPTAG)`` 216 + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 217 + 218 + These tags give applications more control over MCTP message tags, by allocating 219 + (and dropping) tag values explicitly, rather than the kernel automatically 220 + allocating a per-message tag at ``sendmsg()`` time. 221 + 222 + In general, you will only need to use these ioctls if your MCTP protocol does 223 + not fit the usual request/response model. For example, if you need to persist 224 + tags across multiple requests, or a request may generate more than one response. 225 + In these cases, the ioctls allow you to decouple the tag allocation (and 226 + release) from individual message send and receive operations. 227 + 228 + Both ioctls are passed a pointer to a ``struct mctp_ioc_tag_ctl``: 229 + 230 + .. code-block:: C 231 + 232 + struct mctp_ioc_tag_ctl { 233 + mctp_eid_t peer_addr; 234 + __u8 tag; 235 + __u16 flags; 236 + }; 237 + 238 + ``SIOCMCTPALLOCTAG`` allocates a tag for a specific peer, which an application 239 + can use in future ``sendmsg()`` calls. The application populates the 240 + ``peer_addr`` member with the remote EID. Other fields must be zero. 241 + 242 + On return, the ``tag`` member will be populated with the allocated tag value. 243 + The allocated tag will have the following tag bits set: 244 + 245 + - ``MCTP_TAG_OWNER``: it only makes sense to allocate tags if you're the tag 246 + owner 247 + 248 + - ``MCTP_TAG_PREALLOC``: to indicate to ``sendmsg()`` that this is a 249 + preallocated tag. 250 + 251 + - ... and the actual tag value, within the least-significant three bits 252 + (``MCTP_TAG_MASK``). Note that zero is a valid tag value. 253 + 254 + The tag value should be used as-is for the ``smctp_tag`` member of ``struct 255 + sockaddr_mctp``. 256 + 257 + ``SIOCMCTPDROPTAG`` releases a tag that has been previously allocated by a 258 + ``SIOCMCTPALLOCTAG`` ioctl. The ``peer_addr`` must be the same as used for the 259 + allocation, and the ``tag`` value must match exactly the tag returned from the 260 + allocation (including the ``MCTP_TAG_OWNER`` and ``MCTP_TAG_PREALLOC`` bits). 261 + The ``flags`` field must be zero. 262 + 215 263 Kernel internals 216 264 ================ 217 265
+10 -1
include/net/mctp.h
··· 126 126 */ 127 127 struct mctp_sk_key { 128 128 mctp_eid_t peer_addr; 129 - mctp_eid_t local_addr; 129 + mctp_eid_t local_addr; /* MCTP_ADDR_ANY for local owned tags */ 130 130 __u8 tag; /* incoming tag match; invert TO for local */ 131 131 132 132 /* we hold a ref to sk when set */ ··· 163 163 */ 164 164 unsigned long dev_flow_state; 165 165 struct mctp_dev *dev; 166 + 167 + /* a tag allocated with SIOCMCTPALLOCTAG ioctl will not expire 168 + * automatically on timeout or response, instead SIOCMCTPDROPTAG 169 + * is used. 170 + */ 171 + bool manual_alloc; 166 172 }; 167 173 168 174 struct mctp_skb_cb { ··· 245 239 struct sk_buff *skb, mctp_eid_t daddr, u8 req_tag); 246 240 247 241 void mctp_key_unref(struct mctp_sk_key *key); 242 + struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk, 243 + mctp_eid_t daddr, mctp_eid_t saddr, 244 + bool manual, u8 *tagp); 248 245 249 246 /* routing <--> device interface */ 250 247 unsigned int mctp_default_net(struct net *net);
+4 -1
include/trace/events/mctp.h
··· 15 15 MCTP_TRACE_KEY_REPLIED, 16 16 MCTP_TRACE_KEY_INVALIDATED, 17 17 MCTP_TRACE_KEY_CLOSED, 18 + MCTP_TRACE_KEY_DROPPED, 18 19 }; 19 20 #endif /* __TRACE_MCTP_ENUMS */ 20 21 ··· 23 22 TRACE_DEFINE_ENUM(MCTP_TRACE_KEY_REPLIED); 24 23 TRACE_DEFINE_ENUM(MCTP_TRACE_KEY_INVALIDATED); 25 24 TRACE_DEFINE_ENUM(MCTP_TRACE_KEY_CLOSED); 25 + TRACE_DEFINE_ENUM(MCTP_TRACE_KEY_DROPPED); 26 26 27 27 TRACE_EVENT(mctp_key_acquire, 28 28 TP_PROTO(const struct mctp_sk_key *key), ··· 68 66 { MCTP_TRACE_KEY_TIMEOUT, "timeout" }, 69 67 { MCTP_TRACE_KEY_REPLIED, "replied" }, 70 68 { MCTP_TRACE_KEY_INVALIDATED, "invalidated" }, 71 - { MCTP_TRACE_KEY_CLOSED, "closed" }) 69 + { MCTP_TRACE_KEY_CLOSED, "closed" }, 70 + { MCTP_TRACE_KEY_DROPPED, "dropped" }) 72 71 ) 73 72 ); 74 73
+18
include/uapi/linux/mctp.h
··· 44 44 45 45 #define MCTP_TAG_MASK 0x07 46 46 #define MCTP_TAG_OWNER 0x08 47 + #define MCTP_TAG_PREALLOC 0x10 47 48 48 49 #define MCTP_OPT_ADDR_EXT 1 50 + 51 + #define SIOCMCTPALLOCTAG (SIOCPROTOPRIVATE + 0) 52 + #define SIOCMCTPDROPTAG (SIOCPROTOPRIVATE + 1) 53 + 54 + struct mctp_ioc_tag_ctl { 55 + mctp_eid_t peer_addr; 56 + 57 + /* For SIOCMCTPALLOCTAG: must be passed as zero, kernel will 58 + * populate with the allocated tag value. Returned tag value will 59 + * always have TO and PREALLOC set. 60 + * 61 + * For SIOCMCTPDROPTAG: userspace provides tag value to drop, from 62 + * a prior SIOCMCTPALLOCTAG call (and so must have TO and PREALLOC set). 63 + */ 64 + __u8 tag; 65 + __u16 flags; 66 + }; 49 67 50 68 #endif /* __UAPI_MCTP_H */
+163 -26
net/mctp/af_mctp.c
··· 6 6 * Copyright (c) 2021 Google 7 7 */ 8 8 9 + #include <linux/compat.h> 9 10 #include <linux/if_arp.h> 10 11 #include <linux/net.h> 11 12 #include <linux/mctp.h> ··· 21 20 #include <trace/events/mctp.h> 22 21 23 22 /* socket implementation */ 23 + 24 + static void mctp_sk_expire_keys(struct timer_list *timer); 24 25 25 26 static int mctp_release(struct socket *sock) 26 27 { ··· 102 99 struct sk_buff *skb; 103 100 104 101 if (addr) { 102 + const u8 tagbits = MCTP_TAG_MASK | MCTP_TAG_OWNER | 103 + MCTP_TAG_PREALLOC; 104 + 105 105 if (addrlen < sizeof(struct sockaddr_mctp)) 106 106 return -EINVAL; 107 107 if (addr->smctp_family != AF_MCTP) 108 108 return -EINVAL; 109 109 if (!mctp_sockaddr_is_ok(addr)) 110 110 return -EINVAL; 111 - if (addr->smctp_tag & ~(MCTP_TAG_MASK | MCTP_TAG_OWNER)) 111 + if (addr->smctp_tag & ~tagbits) 112 + return -EINVAL; 113 + /* can't preallocate a non-owned tag */ 114 + if (addr->smctp_tag & MCTP_TAG_PREALLOC && 115 + !(addr->smctp_tag & MCTP_TAG_OWNER)) 112 116 return -EINVAL; 113 117 114 118 } else { ··· 258 248 return rc; 259 249 } 260 250 251 + /* We're done with the key; invalidate, stop reassembly, and remove from lists. 252 + */ 253 + static void __mctp_key_remove(struct mctp_sk_key *key, struct net *net, 254 + unsigned long flags, unsigned long reason) 255 + __releases(&key->lock) 256 + __must_hold(&net->mctp.keys_lock) 257 + { 258 + struct sk_buff *skb; 259 + 260 + trace_mctp_key_release(key, reason); 261 + skb = key->reasm_head; 262 + key->reasm_head = NULL; 263 + key->reasm_dead = true; 264 + key->valid = false; 265 + mctp_dev_release_key(key->dev, key); 266 + spin_unlock_irqrestore(&key->lock, flags); 267 + 268 + hlist_del(&key->hlist); 269 + hlist_del(&key->sklist); 270 + 271 + /* unref for the lists */ 272 + mctp_key_unref(key); 273 + 274 + kfree_skb(skb); 275 + } 276 + 261 277 static int mctp_setsockopt(struct socket *sock, int level, int optname, 262 278 sockptr_t optval, unsigned int optlen) 263 279 { ··· 329 293 return -EINVAL; 330 294 } 331 295 296 + static int mctp_ioctl_alloctag(struct mctp_sock *msk, unsigned long arg) 297 + { 298 + struct net *net = sock_net(&msk->sk); 299 + struct mctp_sk_key *key = NULL; 300 + struct mctp_ioc_tag_ctl ctl; 301 + unsigned long flags; 302 + u8 tag; 303 + 304 + if (copy_from_user(&ctl, (void __user *)arg, sizeof(ctl))) 305 + return -EFAULT; 306 + 307 + if (ctl.tag) 308 + return -EINVAL; 309 + 310 + if (ctl.flags) 311 + return -EINVAL; 312 + 313 + key = mctp_alloc_local_tag(msk, ctl.peer_addr, MCTP_ADDR_ANY, 314 + true, &tag); 315 + if (IS_ERR(key)) 316 + return PTR_ERR(key); 317 + 318 + ctl.tag = tag | MCTP_TAG_OWNER | MCTP_TAG_PREALLOC; 319 + if (copy_to_user((void __user *)arg, &ctl, sizeof(ctl))) { 320 + spin_lock_irqsave(&key->lock, flags); 321 + __mctp_key_remove(key, net, flags, MCTP_TRACE_KEY_DROPPED); 322 + mctp_key_unref(key); 323 + return -EFAULT; 324 + } 325 + 326 + mctp_key_unref(key); 327 + return 0; 328 + } 329 + 330 + static int mctp_ioctl_droptag(struct mctp_sock *msk, unsigned long arg) 331 + { 332 + struct net *net = sock_net(&msk->sk); 333 + struct mctp_ioc_tag_ctl ctl; 334 + unsigned long flags, fl2; 335 + struct mctp_sk_key *key; 336 + struct hlist_node *tmp; 337 + int rc; 338 + u8 tag; 339 + 340 + if (copy_from_user(&ctl, (void __user *)arg, sizeof(ctl))) 341 + return -EFAULT; 342 + 343 + if (ctl.flags) 344 + return -EINVAL; 345 + 346 + /* Must be a local tag, TO set, preallocated */ 347 + if ((ctl.tag & ~MCTP_TAG_MASK) != (MCTP_TAG_OWNER | MCTP_TAG_PREALLOC)) 348 + return -EINVAL; 349 + 350 + tag = ctl.tag & MCTP_TAG_MASK; 351 + rc = -EINVAL; 352 + 353 + spin_lock_irqsave(&net->mctp.keys_lock, flags); 354 + hlist_for_each_entry_safe(key, tmp, &msk->keys, sklist) { 355 + /* we do an irqsave here, even though we know the irq state, 356 + * so we have the flags to pass to __mctp_key_remove 357 + */ 358 + spin_lock_irqsave(&key->lock, fl2); 359 + if (key->manual_alloc && 360 + ctl.peer_addr == key->peer_addr && 361 + tag == key->tag) { 362 + __mctp_key_remove(key, net, fl2, 363 + MCTP_TRACE_KEY_DROPPED); 364 + rc = 0; 365 + } else { 366 + spin_unlock_irqrestore(&key->lock, fl2); 367 + } 368 + } 369 + spin_unlock_irqrestore(&net->mctp.keys_lock, flags); 370 + 371 + return rc; 372 + } 373 + 374 + static int mctp_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 375 + { 376 + struct mctp_sock *msk = container_of(sock->sk, struct mctp_sock, sk); 377 + 378 + switch (cmd) { 379 + case SIOCMCTPALLOCTAG: 380 + return mctp_ioctl_alloctag(msk, arg); 381 + case SIOCMCTPDROPTAG: 382 + return mctp_ioctl_droptag(msk, arg); 383 + } 384 + 385 + return -EINVAL; 386 + } 387 + 388 + #ifdef CONFIG_COMPAT 389 + static int mctp_compat_ioctl(struct socket *sock, unsigned int cmd, 390 + unsigned long arg) 391 + { 392 + void __user *argp = compat_ptr(arg); 393 + 394 + switch (cmd) { 395 + /* These have compatible ptr layouts */ 396 + case SIOCMCTPALLOCTAG: 397 + case SIOCMCTPDROPTAG: 398 + return mctp_ioctl(sock, cmd, (unsigned long)argp); 399 + } 400 + 401 + return -ENOIOCTLCMD; 402 + } 403 + #endif 404 + 332 405 static const struct proto_ops mctp_dgram_ops = { 333 406 .family = PF_MCTP, 334 407 .release = mctp_release, ··· 447 302 .accept = sock_no_accept, 448 303 .getname = sock_no_getname, 449 304 .poll = datagram_poll, 450 - .ioctl = sock_no_ioctl, 305 + .ioctl = mctp_ioctl, 451 306 .gettstamp = sock_gettstamp, 452 307 .listen = sock_no_listen, 453 308 .shutdown = sock_no_shutdown, ··· 457 312 .recvmsg = mctp_recvmsg, 458 313 .mmap = sock_no_mmap, 459 314 .sendpage = sock_no_sendpage, 315 + #ifdef CONFIG_COMPAT 316 + .compat_ioctl = mctp_compat_ioctl, 317 + #endif 460 318 }; 461 319 462 320 static void mctp_sk_expire_keys(struct timer_list *timer) ··· 467 319 struct mctp_sock *msk = container_of(timer, struct mctp_sock, 468 320 key_expiry); 469 321 struct net *net = sock_net(&msk->sk); 470 - unsigned long next_expiry, flags; 322 + unsigned long next_expiry, flags, fl2; 471 323 struct mctp_sk_key *key; 472 324 struct hlist_node *tmp; 473 325 bool next_expiry_valid = false; ··· 475 327 spin_lock_irqsave(&net->mctp.keys_lock, flags); 476 328 477 329 hlist_for_each_entry_safe(key, tmp, &msk->keys, sklist) { 478 - spin_lock(&key->lock); 330 + /* don't expire. manual_alloc is immutable, no locking 331 + * required. 332 + */ 333 + if (key->manual_alloc) 334 + continue; 479 335 336 + spin_lock_irqsave(&key->lock, fl2); 480 337 if (!time_after_eq(key->expiry, jiffies)) { 481 - trace_mctp_key_release(key, MCTP_TRACE_KEY_TIMEOUT); 482 - key->valid = false; 483 - hlist_del_rcu(&key->hlist); 484 - hlist_del_rcu(&key->sklist); 485 - spin_unlock(&key->lock); 486 - mctp_key_unref(key); 338 + __mctp_key_remove(key, net, fl2, 339 + MCTP_TRACE_KEY_TIMEOUT); 487 340 continue; 488 341 } 489 342 ··· 495 346 next_expiry = key->expiry; 496 347 next_expiry_valid = true; 497 348 } 498 - spin_unlock(&key->lock); 349 + spin_unlock_irqrestore(&key->lock, fl2); 499 350 } 500 351 501 352 spin_unlock_irqrestore(&net->mctp.keys_lock, flags); ··· 536 387 { 537 388 struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk); 538 389 struct net *net = sock_net(sk); 390 + unsigned long flags, fl2; 539 391 struct mctp_sk_key *key; 540 392 struct hlist_node *tmp; 541 - unsigned long flags; 542 393 543 394 /* remove from any type-based binds */ 544 395 mutex_lock(&net->mctp.bind_lock); ··· 548 399 /* remove tag allocations */ 549 400 spin_lock_irqsave(&net->mctp.keys_lock, flags); 550 401 hlist_for_each_entry_safe(key, tmp, &msk->keys, sklist) { 551 - hlist_del(&key->sklist); 552 - hlist_del(&key->hlist); 553 - 554 - trace_mctp_key_release(key, MCTP_TRACE_KEY_CLOSED); 555 - 556 - spin_lock(&key->lock); 557 - kfree_skb(key->reasm_head); 558 - key->reasm_head = NULL; 559 - key->reasm_dead = true; 560 - key->valid = false; 561 - spin_unlock(&key->lock); 562 - 563 - /* key is no longer on the lookup lists, unref */ 564 - mctp_key_unref(key); 402 + spin_lock_irqsave(&key->lock, fl2); 403 + __mctp_key_remove(key, net, fl2, MCTP_TRACE_KEY_CLOSED); 565 404 } 566 405 spin_unlock_irqrestore(&net->mctp.keys_lock, flags); 567 406 }
+86 -28
net/mctp/route.c
··· 203 203 return rc; 204 204 } 205 205 206 - /* We're done with the key; unset valid and remove from lists. There may still 207 - * be outstanding refs on the key though... 206 + /* Helper for mctp_route_input(). 207 + * We're done with the key; unlock and unref the key. 208 + * For the usual case of automatic expiry we remove the key from lists. 209 + * In the case that manual allocation is set on a key we release the lock 210 + * and local ref, reset reassembly, but don't remove from lists. 208 211 */ 209 - static void __mctp_key_unlock_drop(struct mctp_sk_key *key, struct net *net, 210 - unsigned long flags) 211 - __releases(&key->lock) 212 + static void __mctp_key_done_in(struct mctp_sk_key *key, struct net *net, 213 + unsigned long flags, unsigned long reason) 214 + __releases(&key->lock) 212 215 { 213 216 struct sk_buff *skb; 214 217 218 + trace_mctp_key_release(key, reason); 215 219 skb = key->reasm_head; 216 220 key->reasm_head = NULL; 217 - key->reasm_dead = true; 218 - key->valid = false; 219 - mctp_dev_release_key(key->dev, key); 221 + 222 + if (!key->manual_alloc) { 223 + key->reasm_dead = true; 224 + key->valid = false; 225 + mctp_dev_release_key(key->dev, key); 226 + } 220 227 spin_unlock_irqrestore(&key->lock, flags); 221 228 222 - spin_lock_irqsave(&net->mctp.keys_lock, flags); 223 - hlist_del(&key->hlist); 224 - hlist_del(&key->sklist); 225 - spin_unlock_irqrestore(&net->mctp.keys_lock, flags); 229 + if (!key->manual_alloc) { 230 + spin_lock_irqsave(&net->mctp.keys_lock, flags); 231 + hlist_del(&key->hlist); 232 + hlist_del(&key->sklist); 233 + spin_unlock_irqrestore(&net->mctp.keys_lock, flags); 226 234 227 - /* one unref for the lists */ 228 - mctp_key_unref(key); 235 + /* unref for the lists */ 236 + mctp_key_unref(key); 237 + } 229 238 230 239 /* and one for the local reference */ 231 240 mctp_key_unref(key); ··· 388 379 /* we've hit a pending reassembly; not much we 389 380 * can do but drop it 390 381 */ 391 - trace_mctp_key_release(key, 392 - MCTP_TRACE_KEY_REPLIED); 393 - __mctp_key_unlock_drop(key, net, f); 382 + __mctp_key_done_in(key, net, f, 383 + MCTP_TRACE_KEY_REPLIED); 394 384 key = NULL; 395 385 } 396 386 rc = 0; ··· 431 423 } else { 432 424 if (key->reasm_head || key->reasm_dead) { 433 425 /* duplicate start? drop everything */ 434 - trace_mctp_key_release(key, 435 - MCTP_TRACE_KEY_INVALIDATED); 436 - __mctp_key_unlock_drop(key, net, f); 426 + __mctp_key_done_in(key, net, f, 427 + MCTP_TRACE_KEY_INVALIDATED); 437 428 rc = -EEXIST; 438 429 key = NULL; 439 430 } else { ··· 455 448 * the reassembly/response key 456 449 */ 457 450 if (!rc && flags & MCTP_HDR_FLAG_EOM) { 451 + msk = container_of(key->sk, struct mctp_sock, sk); 458 452 sock_queue_rcv_skb(key->sk, key->reasm_head); 459 453 key->reasm_head = NULL; 460 - trace_mctp_key_release(key, MCTP_TRACE_KEY_REPLIED); 461 - __mctp_key_unlock_drop(key, net, f); 454 + __mctp_key_done_in(key, net, f, MCTP_TRACE_KEY_REPLIED); 462 455 key = NULL; 463 456 } 464 457 ··· 586 579 /* Allocate a locally-owned tag value for (saddr, daddr), and reserve 587 580 * it for the socket msk 588 581 */ 589 - static struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk, 590 - mctp_eid_t saddr, 591 - mctp_eid_t daddr, u8 *tagp) 582 + struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk, 583 + mctp_eid_t daddr, mctp_eid_t saddr, 584 + bool manual, u8 *tagp) 592 585 { 593 586 struct net *net = sock_net(&msk->sk); 594 587 struct netns_mctp *mns = &net->mctp; ··· 643 636 mctp_reserve_tag(net, key, msk); 644 637 trace_mctp_key_acquire(key); 645 638 639 + key->manual_alloc = manual; 646 640 *tagp = key->tag; 647 641 } 648 642 ··· 653 645 kfree(key); 654 646 return ERR_PTR(-EBUSY); 655 647 } 648 + 649 + return key; 650 + } 651 + 652 + static struct mctp_sk_key *mctp_lookup_prealloc_tag(struct mctp_sock *msk, 653 + mctp_eid_t daddr, 654 + u8 req_tag, u8 *tagp) 655 + { 656 + struct net *net = sock_net(&msk->sk); 657 + struct netns_mctp *mns = &net->mctp; 658 + struct mctp_sk_key *key, *tmp; 659 + unsigned long flags; 660 + 661 + req_tag &= ~(MCTP_TAG_PREALLOC | MCTP_TAG_OWNER); 662 + key = NULL; 663 + 664 + spin_lock_irqsave(&mns->keys_lock, flags); 665 + 666 + hlist_for_each_entry(tmp, &mns->keys, hlist) { 667 + if (tmp->tag != req_tag) 668 + continue; 669 + 670 + if (!mctp_address_matches(tmp->peer_addr, daddr)) 671 + continue; 672 + 673 + if (!tmp->manual_alloc) 674 + continue; 675 + 676 + spin_lock(&tmp->lock); 677 + if (tmp->valid) { 678 + key = tmp; 679 + refcount_inc(&key->refs); 680 + spin_unlock(&tmp->lock); 681 + break; 682 + } 683 + spin_unlock(&tmp->lock); 684 + } 685 + spin_unlock_irqrestore(&mns->keys_lock, flags); 686 + 687 + if (!key) 688 + return ERR_PTR(-ENOENT); 689 + 690 + if (tagp) 691 + *tagp = key->tag; 656 692 657 693 return key; 658 694 } ··· 895 843 if (rc) 896 844 goto out_release; 897 845 898 - if (req_tag & MCTP_HDR_FLAG_TO) { 899 - key = mctp_alloc_local_tag(msk, saddr, daddr, &tag); 846 + if (req_tag & MCTP_TAG_OWNER) { 847 + if (req_tag & MCTP_TAG_PREALLOC) 848 + key = mctp_lookup_prealloc_tag(msk, daddr, 849 + req_tag, &tag); 850 + else 851 + key = mctp_alloc_local_tag(msk, daddr, saddr, 852 + false, &tag); 853 + 900 854 if (IS_ERR(key)) { 901 855 rc = PTR_ERR(key); 902 856 goto out_release; ··· 913 855 tag |= MCTP_HDR_FLAG_TO; 914 856 } else { 915 857 key = NULL; 916 - tag = req_tag; 858 + tag = req_tag & MCTP_TAG_MASK; 917 859 } 918 860 919 861 skb->protocol = htons(ETH_P_MCTP);