Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

rxrpc: Allow CHALLENGEs to the passed to the app for a RESPONSE

Allow the app to request that CHALLENGEs be passed to it through an
out-of-band queue that allows recvmsg() to pick it up so that the app can
add data to it with sendmsg().

This will allow the application (AFS or userspace) to interact with the
process if it wants to and put values into user-defined fields. This will
be used by AFS when talking to a fileserver to supply that fileserver with
a crypto key by which callback RPCs can be encrypted (ie. notifications
from the fileserver to the client).

Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: Simon Horman <horms@kernel.org>
cc: linux-afs@lists.infradead.org
Link: https://patch.msgid.link/20250411095303.2316168-5-dhowells@redhat.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

David Howells and committed by
Jakub Kicinski
5800b1cf 019c8433

+1192 -169
+2
Documentation/networking/rxrpc.rst
··· 1179 1179 1180 1180 .. kernel-doc:: net/rxrpc/af_rxrpc.c 1181 1181 .. kernel-doc:: net/rxrpc/key.c 1182 + .. kernel-doc:: net/rxrpc/oob.c 1182 1183 .. kernel-doc:: net/rxrpc/peer_object.c 1183 1184 .. kernel-doc:: net/rxrpc/recvmsg.c 1185 + .. kernel-doc:: net/rxrpc/rxkad.c 1184 1186 .. kernel-doc:: net/rxrpc/sendmsg.c 1185 1187 .. kernel-doc:: net/rxrpc/server_key.c
+1
fs/afs/Makefile
··· 8 8 addr_prefs.o \ 9 9 callback.o \ 10 10 cell.o \ 11 + cm_security.o \ 11 12 cmservice.o \ 12 13 dir.o \ 13 14 dir_edit.o \
+73
fs/afs/cm_security.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-or-later 2 + /* Cache manager security. 3 + * 4 + * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved. 5 + * Written by David Howells (dhowells@redhat.com) 6 + */ 7 + 8 + #include <linux/slab.h> 9 + #include "internal.h" 10 + #include "afs_fs.h" 11 + #include "protocol_yfs.h" 12 + #define RXRPC_TRACE_ONLY_DEFINE_ENUMS 13 + #include <trace/events/rxrpc.h> 14 + 15 + /* 16 + * Respond to an RxGK challenge, adding appdata. 17 + */ 18 + static int afs_respond_to_challenge(struct sk_buff *challenge) 19 + { 20 + struct rxrpc_peer *peer; 21 + unsigned long peer_data; 22 + u16 service_id; 23 + u8 security_index; 24 + 25 + rxrpc_kernel_query_challenge(challenge, &peer, &peer_data, 26 + &service_id, &security_index); 27 + 28 + _enter("%u,%u", service_id, security_index); 29 + 30 + switch (service_id) { 31 + /* We don't send CM_SERVICE RPCs, so don't expect a challenge 32 + * therefrom. 33 + */ 34 + case FS_SERVICE: 35 + case VL_SERVICE: 36 + case YFS_FS_SERVICE: 37 + case YFS_VL_SERVICE: 38 + break; 39 + default: 40 + pr_warn("Can't respond to unknown challenge %u:%u", 41 + service_id, security_index); 42 + return rxrpc_kernel_reject_challenge(challenge, RX_USER_ABORT, -EPROTO, 43 + afs_abort_unsupported_sec_class); 44 + } 45 + 46 + switch (security_index) { 47 + case RXRPC_SECURITY_RXKAD: 48 + return rxkad_kernel_respond_to_challenge(challenge); 49 + 50 + default: 51 + return rxrpc_kernel_reject_challenge(challenge, RX_USER_ABORT, -EPROTO, 52 + afs_abort_unsupported_sec_class); 53 + } 54 + } 55 + 56 + /* 57 + * Process the OOB message queue, processing challenge packets. 58 + */ 59 + void afs_process_oob_queue(struct work_struct *work) 60 + { 61 + struct afs_net *net = container_of(work, struct afs_net, rx_oob_work); 62 + struct sk_buff *oob; 63 + enum rxrpc_oob_type type; 64 + 65 + while ((oob = rxrpc_kernel_dequeue_oob(net->socket, &type))) { 66 + switch (type) { 67 + case RXRPC_OOB_CHALLENGE: 68 + afs_respond_to_challenge(oob); 69 + break; 70 + } 71 + rxrpc_kernel_free_oob(oob); 72 + } 73 + }
+6
fs/afs/internal.h
··· 281 281 struct socket *socket; 282 282 struct afs_call *spare_incoming_call; 283 283 struct work_struct charge_preallocation_work; 284 + struct work_struct rx_oob_work; 284 285 struct mutex socket_mutex; 285 286 atomic_t nr_outstanding_calls; 286 287 atomic_t nr_superblocks; ··· 1058 1057 * cmservice.c 1059 1058 */ 1060 1059 extern bool afs_cm_incoming_call(struct afs_call *); 1060 + 1061 + /* 1062 + * cm_security.c 1063 + */ 1064 + void afs_process_oob_queue(struct work_struct *work); 1061 1065 1062 1066 /* 1063 1067 * dir.c
+1
fs/afs/main.c
··· 73 73 generate_random_uuid((unsigned char *)&net->uuid); 74 74 75 75 INIT_WORK(&net->charge_preallocation_work, afs_charge_preallocation); 76 + INIT_WORK(&net->rx_oob_work, afs_process_oob_queue); 76 77 mutex_init(&net->socket_mutex); 77 78 78 79 net->cells = RB_ROOT;
+18
fs/afs/rxrpc.c
··· 25 25 static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long); 26 26 static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long); 27 27 static void afs_rx_attach(struct rxrpc_call *rxcall, unsigned long user_call_ID); 28 + static void afs_rx_notify_oob(struct sock *sk, struct sk_buff *oob); 28 29 static int afs_deliver_cm_op_id(struct afs_call *); 29 30 30 31 static const struct rxrpc_kernel_ops afs_rxrpc_callback_ops = { 31 32 .notify_new_call = afs_rx_new_call, 32 33 .discard_new_call = afs_rx_discard_new_call, 33 34 .user_attach_call = afs_rx_attach, 35 + .notify_oob = afs_rx_notify_oob, 34 36 }; 35 37 36 38 /* asynchronous incoming call initial processing */ ··· 58 56 goto error_1; 59 57 60 58 socket->sk->sk_allocation = GFP_NOFS; 59 + socket->sk->sk_user_data = net; 61 60 62 61 /* bind the callback manager's address to make this a server socket */ 63 62 memset(&srx, 0, sizeof(srx)); ··· 71 68 72 69 ret = rxrpc_sock_set_min_security_level(socket->sk, 73 70 RXRPC_SECURITY_ENCRYPT); 71 + if (ret < 0) 72 + goto error_2; 73 + 74 + ret = rxrpc_sock_set_manage_response(socket->sk, true); 74 75 if (ret < 0) 75 76 goto error_2; 76 77 ··· 138 131 139 132 kernel_sock_shutdown(net->socket, SHUT_RDWR); 140 133 flush_workqueue(afs_async_calls); 134 + net->socket->sk->sk_user_data = NULL; 141 135 sock_release(net->socket); 142 136 143 137 _debug("dework"); ··· 964 956 if (call) 965 957 call->unmarshalling_error = true; 966 958 return -EBADMSG; 959 + } 960 + 961 + /* 962 + * Wake up OOB notification processing. 963 + */ 964 + static void afs_rx_notify_oob(struct sock *sk, struct sk_buff *oob) 965 + { 966 + struct afs_net *net = sk->sk_user_data; 967 + 968 + schedule_work(&net->rx_oob_work); 967 969 }
+24
include/net/af_rxrpc.h
··· 16 16 struct socket; 17 17 struct rxrpc_call; 18 18 struct rxrpc_peer; 19 + struct krb5_buffer; 19 20 enum rxrpc_abort_reason; 20 21 21 22 enum rxrpc_interruptibility { 22 23 RXRPC_INTERRUPTIBLE, /* Call is interruptible */ 23 24 RXRPC_PREINTERRUPTIBLE, /* Call can be cancelled whilst waiting for a slot */ 24 25 RXRPC_UNINTERRUPTIBLE, /* Call should not be interruptible at all */ 26 + }; 27 + 28 + enum rxrpc_oob_type { 29 + RXRPC_OOB_CHALLENGE, /* Security challenge for a connection */ 25 30 }; 26 31 27 32 /* ··· 42 37 unsigned long user_call_ID); 43 38 void (*discard_new_call)(struct rxrpc_call *call, unsigned long user_call_ID); 44 39 void (*user_attach_call)(struct rxrpc_call *call, unsigned long user_call_ID); 40 + void (*notify_oob)(struct sock *sk, struct sk_buff *oob); 45 41 }; 46 42 47 43 typedef void (*rxrpc_notify_rx_t)(struct sock *, struct rxrpc_call *, ··· 94 88 95 89 int rxrpc_sock_set_min_security_level(struct sock *sk, unsigned int val); 96 90 int rxrpc_sock_set_security_keyring(struct sock *, struct key *); 91 + int rxrpc_sock_set_manage_response(struct sock *sk, bool set); 92 + 93 + enum rxrpc_oob_type rxrpc_kernel_query_oob(struct sk_buff *oob, 94 + struct rxrpc_peer **_peer, 95 + unsigned long *_peer_appdata); 96 + struct sk_buff *rxrpc_kernel_dequeue_oob(struct socket *sock, 97 + enum rxrpc_oob_type *_type); 98 + void rxrpc_kernel_free_oob(struct sk_buff *oob); 99 + void rxrpc_kernel_query_challenge(struct sk_buff *challenge, 100 + struct rxrpc_peer **_peer, 101 + unsigned long *_peer_appdata, 102 + u16 *_service_id, u8 *_security_index); 103 + int rxrpc_kernel_reject_challenge(struct sk_buff *challenge, u32 abort_code, 104 + int error, enum rxrpc_abort_reason why); 105 + int rxkad_kernel_respond_to_challenge(struct sk_buff *challenge); 106 + u32 rxgk_kernel_query_challenge(struct sk_buff *challenge); 107 + int rxgk_kernel_respond_to_challenge(struct sk_buff *challenge, 108 + struct krb5_buffer *appdata); 97 109 98 110 #endif /* _NET_RXRPC_H */
+17 -1
include/trace/events/rxrpc.h
··· 25 25 EM(afs_abort_probeuuid_negative, "afs-probeuuid-neg") \ 26 26 EM(afs_abort_send_data_error, "afs-send-data") \ 27 27 EM(afs_abort_unmarshal_error, "afs-unmarshal") \ 28 + EM(afs_abort_unsupported_sec_class, "afs-unsup-sec-class") \ 28 29 /* rxperf errors */ \ 29 30 EM(rxperf_abort_general_error, "rxperf-error") \ 30 31 EM(rxperf_abort_oom, "rxperf-oom") \ ··· 78 77 EM(rxrpc_abort_call_timeout, "call-timeout") \ 79 78 EM(rxrpc_abort_no_service_key, "no-serv-key") \ 80 79 EM(rxrpc_abort_nomem, "nomem") \ 80 + EM(rxrpc_abort_response_sendmsg, "resp-sendmsg") \ 81 81 EM(rxrpc_abort_service_not_offered, "serv-not-offered") \ 82 82 EM(rxrpc_abort_shut_down, "shut-down") \ 83 83 EM(rxrpc_abort_unsupported_security, "unsup-sec") \ ··· 135 133 EM(rxrpc_skb_get_conn_secured, "GET conn-secd") \ 136 134 EM(rxrpc_skb_get_conn_work, "GET conn-work") \ 137 135 EM(rxrpc_skb_get_local_work, "GET locl-work") \ 136 + EM(rxrpc_skb_get_post_oob, "GET post-oob ") \ 138 137 EM(rxrpc_skb_get_reject_work, "GET rej-work ") \ 139 138 EM(rxrpc_skb_get_to_recvmsg, "GET to-recv ") \ 140 139 EM(rxrpc_skb_get_to_recvmsg_oos, "GET to-recv-o") \ 141 140 EM(rxrpc_skb_new_encap_rcv, "NEW encap-rcv") \ 142 141 EM(rxrpc_skb_new_error_report, "NEW error-rpt") \ 143 142 EM(rxrpc_skb_new_jumbo_subpacket, "NEW jumbo-sub") \ 143 + EM(rxrpc_skb_new_response_rxgk, "NEW resp-rxgk") \ 144 + EM(rxrpc_skb_new_response_rxkad, "NEW resp-rxkd") \ 144 145 EM(rxrpc_skb_new_unshared, "NEW unshared ") \ 145 146 EM(rxrpc_skb_put_call_rx, "PUT call-rx ") \ 147 + EM(rxrpc_skb_put_challenge, "PUT challenge") \ 146 148 EM(rxrpc_skb_put_conn_secured, "PUT conn-secd") \ 147 149 EM(rxrpc_skb_put_conn_work, "PUT conn-work") \ 148 150 EM(rxrpc_skb_put_error_report, "PUT error-rep") \ 149 151 EM(rxrpc_skb_put_input, "PUT input ") \ 150 152 EM(rxrpc_skb_put_jumbo_subpacket, "PUT jumbo-sub") \ 153 + EM(rxrpc_skb_put_oob, "PUT oob ") \ 151 154 EM(rxrpc_skb_put_purge, "PUT purge ") \ 155 + EM(rxrpc_skb_put_purge_oob, "PUT purge-oob") \ 156 + EM(rxrpc_skb_put_response, "PUT response ") \ 152 157 EM(rxrpc_skb_put_rotate, "PUT rotate ") \ 153 158 EM(rxrpc_skb_put_unknown, "PUT unknown ") \ 154 159 EM(rxrpc_skb_see_conn_work, "SEE conn-work") \ 160 + EM(rxrpc_skb_see_oob_challenge, "SEE oob-chall") \ 155 161 EM(rxrpc_skb_see_recvmsg, "SEE recvmsg ") \ 162 + EM(rxrpc_skb_see_recvmsg_oob, "SEE recvm-oob") \ 156 163 EM(rxrpc_skb_see_reject, "SEE reject ") \ 157 164 EM(rxrpc_skb_see_rotate, "SEE rotate ") \ 158 165 E_(rxrpc_skb_see_version, "SEE version ") ··· 227 216 EM(rxrpc_conn_free, "FREE ") \ 228 217 EM(rxrpc_conn_get_activate_call, "GET act-call") \ 229 218 EM(rxrpc_conn_get_call_input, "GET inp-call") \ 219 + EM(rxrpc_conn_get_challenge_input, "GET inp-chal") \ 230 220 EM(rxrpc_conn_get_conn_input, "GET inp-conn") \ 231 221 EM(rxrpc_conn_get_idle, "GET idle ") \ 232 222 EM(rxrpc_conn_get_poke_abort, "GET pk-abort") \ 223 + EM(rxrpc_conn_get_poke_response, "GET response") \ 233 224 EM(rxrpc_conn_get_poke_secured, "GET secured ") \ 234 225 EM(rxrpc_conn_get_poke_timer, "GET poke ") \ 235 226 EM(rxrpc_conn_get_service_conn, "GET svc-conn") \ ··· 239 226 EM(rxrpc_conn_new_service, "NEW service ") \ 240 227 EM(rxrpc_conn_put_call, "PUT call ") \ 241 228 EM(rxrpc_conn_put_call_input, "PUT inp-call") \ 229 + EM(rxrpc_conn_put_challenge_input, "PUT inp-chal") \ 242 230 EM(rxrpc_conn_put_conn_input, "PUT inp-conn") \ 243 231 EM(rxrpc_conn_put_discard_idle, "PUT disc-idl") \ 244 232 EM(rxrpc_conn_put_local_dead, "PUT loc-dead") \ 245 233 EM(rxrpc_conn_put_noreuse, "PUT noreuse ") \ 234 + EM(rxrpc_conn_put_oob, "PUT oob ") \ 246 235 EM(rxrpc_conn_put_poke, "PUT poke ") \ 247 236 EM(rxrpc_conn_put_service_reaped, "PUT svc-reap") \ 248 237 EM(rxrpc_conn_put_unbundle, "PUT unbundle") \ ··· 346 331 EM(rxrpc_recvmsg_full, "FULL") \ 347 332 EM(rxrpc_recvmsg_hole, "HOLE") \ 348 333 EM(rxrpc_recvmsg_next, "NEXT") \ 334 + EM(rxrpc_recvmsg_oobq, "OOBQ") \ 349 335 EM(rxrpc_recvmsg_requeue, "REQU") \ 350 336 EM(rxrpc_recvmsg_return, "RETN") \ 351 337 EM(rxrpc_recvmsg_terminal, "TERM") \ ··· 472 456 EM(rxrpc_tx_point_conn_abort, "ConnAbort") \ 473 457 EM(rxrpc_tx_point_reject, "Reject") \ 474 458 EM(rxrpc_tx_point_rxkad_challenge, "RxkadChall") \ 475 - EM(rxrpc_tx_point_rxkad_response, "RxkadResp") \ 459 + EM(rxrpc_tx_point_response, "Response") \ 476 460 EM(rxrpc_tx_point_version_keepalive, "VerKeepalive") \ 477 461 E_(rxrpc_tx_point_version_reply, "VerReply") 478 462
+34 -12
include/uapi/linux/rxrpc.h
··· 36 36 #define RXRPC_MIN_SECURITY_LEVEL 4 /* minimum security level */ 37 37 #define RXRPC_UPGRADEABLE_SERVICE 5 /* Upgrade service[0] -> service[1] */ 38 38 #define RXRPC_SUPPORTED_CMSG 6 /* Get highest supported control message type */ 39 + #define RXRPC_MANAGE_RESPONSE 7 /* [clnt] Want to manage RESPONSE packets */ 39 40 40 41 /* 41 42 * RxRPC control messages 42 43 * - If neither abort or accept are specified, the message is a data message. 43 44 * - terminal messages mean that a user call ID tag can be recycled 45 + * - C/S/- indicate whether these are applicable to client, server or both 44 46 * - s/r/- indicate whether these are applicable to sendmsg() and/or recvmsg() 45 47 */ 46 48 enum rxrpc_cmsg_type { 47 - RXRPC_USER_CALL_ID = 1, /* sr: user call ID specifier */ 48 - RXRPC_ABORT = 2, /* sr: abort request / notification [terminal] */ 49 - RXRPC_ACK = 3, /* -r: [Service] RPC op final ACK received [terminal] */ 50 - RXRPC_NET_ERROR = 5, /* -r: network error received [terminal] */ 51 - RXRPC_BUSY = 6, /* -r: server busy received [terminal] */ 52 - RXRPC_LOCAL_ERROR = 7, /* -r: local error generated [terminal] */ 53 - RXRPC_NEW_CALL = 8, /* -r: [Service] new incoming call notification */ 54 - RXRPC_EXCLUSIVE_CALL = 10, /* s-: Call should be on exclusive connection */ 55 - RXRPC_UPGRADE_SERVICE = 11, /* s-: Request service upgrade for client call */ 56 - RXRPC_TX_LENGTH = 12, /* s-: Total length of Tx data */ 57 - RXRPC_SET_CALL_TIMEOUT = 13, /* s-: Set one or more call timeouts */ 58 - RXRPC_CHARGE_ACCEPT = 14, /* s-: Charge the accept pool with a user call ID */ 49 + RXRPC_USER_CALL_ID = 1, /* -sr: User call ID specifier */ 50 + RXRPC_ABORT = 2, /* -sr: Abort request / notification [terminal] */ 51 + RXRPC_ACK = 3, /* S-r: RPC op final ACK received [terminal] */ 52 + RXRPC_NET_ERROR = 5, /* --r: Network error received [terminal] */ 53 + RXRPC_BUSY = 6, /* C-r: Server busy received [terminal] */ 54 + RXRPC_LOCAL_ERROR = 7, /* --r: Local error generated [terminal] */ 55 + RXRPC_NEW_CALL = 8, /* S-r: New incoming call notification */ 56 + RXRPC_EXCLUSIVE_CALL = 10, /* Cs-: Call should be on exclusive connection */ 57 + RXRPC_UPGRADE_SERVICE = 11, /* Cs-: Request service upgrade for client call */ 58 + RXRPC_TX_LENGTH = 12, /* -s-: Total length of Tx data */ 59 + RXRPC_SET_CALL_TIMEOUT = 13, /* -s-: Set one or more call timeouts */ 60 + RXRPC_CHARGE_ACCEPT = 14, /* Ss-: Charge the accept pool with a user call ID */ 61 + RXRPC_OOB_ID = 15, /* -sr: OOB message ID */ 62 + RXRPC_CHALLENGED = 16, /* C-r: Info on a received CHALLENGE */ 63 + RXRPC_RESPOND = 17, /* Cs-: Respond to a challenge */ 64 + RXRPC_RESPONDED = 18, /* S-r: Data received in RESPONSE */ 65 + RXRPC_RESP_RXGK_APPDATA = 19, /* Cs-: RESPONSE: RxGK app data to include */ 59 66 RXRPC__SUPPORTED 60 67 }; 61 68 ··· 124 117 #define RXKADSEALEDINCON 19270410 /* sealed data inconsistent */ 125 118 #define RXKADDATALEN 19270411 /* user data too long */ 126 119 #define RXKADILLEGALLEVEL 19270412 /* caller not authorised to use encrypted conns */ 120 + 121 + /* 122 + * Challenge information in the RXRPC_CHALLENGED control message. 123 + */ 124 + struct rxrpc_challenge { 125 + __u16 service_id; /* The service ID of the connection (may be upgraded) */ 126 + __u8 security_index; /* The security index of the connection */ 127 + __u8 pad; /* Round out to a multiple of 4 bytes. */ 128 + /* ... The security class gets to append extra information ... */ 129 + }; 130 + 131 + struct rxgk_challenge { 132 + struct rxrpc_challenge base; 133 + __u32 enctype; /* Krb5 encoding type */ 134 + }; 127 135 128 136 #endif /* _UAPI_LINUX_RXRPC_H */
+1
net/rxrpc/Makefile
··· 24 24 local_object.o \ 25 25 misc.o \ 26 26 net_ns.o \ 27 + oob.o \ 27 28 output.o \ 28 29 peer_event.o \ 29 30 peer_object.o \
+50 -2
net/rxrpc/af_rxrpc.c
··· 633 633 fallthrough; 634 634 case RXRPC_SERVER_BOUND: 635 635 case RXRPC_SERVER_LISTENING: 636 - ret = rxrpc_do_sendmsg(rx, m, len); 636 + if (m->msg_flags & MSG_OOB) 637 + ret = rxrpc_sendmsg_oob(rx, m, len); 638 + else 639 + ret = rxrpc_do_sendmsg(rx, m, len); 637 640 /* The socket has been unlocked */ 638 641 goto out; 639 642 default: ··· 671 668 sockptr_t optval, unsigned int optlen) 672 669 { 673 670 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 674 - unsigned int min_sec_level; 671 + unsigned int min_sec_level, val; 675 672 u16 service_upgrade[2]; 676 673 int ret; 677 674 ··· 750 747 goto error; 751 748 rx->service_upgrade.from = service_upgrade[0]; 752 749 rx->service_upgrade.to = service_upgrade[1]; 750 + goto success; 751 + 752 + case RXRPC_MANAGE_RESPONSE: 753 + ret = -EINVAL; 754 + if (optlen != sizeof(unsigned int)) 755 + goto error; 756 + ret = -EISCONN; 757 + if (rx->sk.sk_state != RXRPC_UNBOUND) 758 + goto error; 759 + ret = copy_safe_from_sockptr(&val, sizeof(val), 760 + optval, optlen); 761 + if (ret) 762 + goto error; 763 + ret = -EINVAL; 764 + if (val > 1) 765 + goto error; 766 + if (val) 767 + set_bit(RXRPC_SOCK_MANAGE_RESPONSE, &rx->flags); 768 + else 769 + clear_bit(RXRPC_SOCK_MANAGE_RESPONSE, &rx->flags); 753 770 goto success; 754 771 755 772 default: ··· 878 855 rx->calls = RB_ROOT; 879 856 880 857 spin_lock_init(&rx->incoming_lock); 858 + skb_queue_head_init(&rx->recvmsg_oobq); 859 + rx->pending_oobq = RB_ROOT; 881 860 INIT_LIST_HEAD(&rx->sock_calls); 882 861 INIT_LIST_HEAD(&rx->to_be_accepted); 883 862 INIT_LIST_HEAD(&rx->recvmsg_q); ··· 913 888 lock_sock(sk); 914 889 915 890 if (sk->sk_state < RXRPC_CLOSE) { 891 + spin_lock_irq(&rx->recvmsg_lock); 916 892 sk->sk_state = RXRPC_CLOSE; 917 893 sk->sk_shutdown = SHUTDOWN_MASK; 894 + spin_unlock_irq(&rx->recvmsg_lock); 918 895 } else { 919 896 ret = -ESHUTDOWN; 920 897 } ··· 928 901 } 929 902 930 903 /* 904 + * Purge the out-of-band queue. 905 + */ 906 + static void rxrpc_purge_oob_queue(struct sock *sk) 907 + { 908 + struct rxrpc_sock *rx = rxrpc_sk(sk); 909 + struct sk_buff *skb; 910 + 911 + while ((skb = skb_dequeue(&rx->recvmsg_oobq))) 912 + rxrpc_kernel_free_oob(skb); 913 + while (!RB_EMPTY_ROOT(&rx->pending_oobq)) { 914 + skb = rb_entry(rx->pending_oobq.rb_node, struct sk_buff, rbnode); 915 + rb_erase(&skb->rbnode, &rx->pending_oobq); 916 + rxrpc_kernel_free_oob(skb); 917 + } 918 + } 919 + 920 + /* 931 921 * RxRPC socket destructor 932 922 */ 933 923 static void rxrpc_sock_destructor(struct sock *sk) 934 924 { 935 925 _enter("%p", sk); 936 926 927 + rxrpc_purge_oob_queue(sk); 937 928 rxrpc_purge_queue(&sk->sk_receive_queue); 938 929 939 930 WARN_ON(refcount_read(&sk->sk_wmem_alloc)); ··· 990 945 break; 991 946 } 992 947 948 + spin_lock_irq(&rx->recvmsg_lock); 993 949 sk->sk_state = RXRPC_CLOSE; 950 + spin_unlock_irq(&rx->recvmsg_lock); 994 951 995 952 if (rx->local && rx->local->service == rx) { 996 953 write_lock(&rx->local->services_lock); ··· 1004 957 rxrpc_discard_prealloc(rx); 1005 958 rxrpc_release_calls_on_socket(rx); 1006 959 flush_workqueue(rxrpc_workqueue); 960 + rxrpc_purge_oob_queue(sk); 1007 961 rxrpc_purge_queue(&sk->sk_receive_queue); 1008 962 1009 963 rxrpc_unuse_local(rx->local, rxrpc_local_unuse_release_sock);
+48 -3
net/rxrpc/ar-internal.h
··· 39 39 enum rxrpc_skb_mark { 40 40 RXRPC_SKB_MARK_PACKET, /* Received packet */ 41 41 RXRPC_SKB_MARK_ERROR, /* Error notification */ 42 + RXRPC_SKB_MARK_CHALLENGE, /* Challenge notification */ 42 43 RXRPC_SKB_MARK_SERVICE_CONN_SECURED, /* Service connection response has been verified */ 43 44 RXRPC_SKB_MARK_REJECT_BUSY, /* Reject with BUSY */ 44 45 RXRPC_SKB_MARK_REJECT_ABORT, /* Reject with ABORT (code in skb->priority) */ ··· 150 149 const struct rxrpc_kernel_ops *app_ops; /* Table of kernel app notification funcs */ 151 150 struct rxrpc_local *local; /* local endpoint */ 152 151 struct rxrpc_backlog *backlog; /* Preallocation for services */ 152 + struct sk_buff_head recvmsg_oobq; /* OOB messages for recvmsg to pick up */ 153 + struct rb_root pending_oobq; /* OOB messages awaiting userspace to respond to */ 154 + u64 oob_id_counter; /* OOB message ID counter */ 153 155 spinlock_t incoming_lock; /* Incoming call vs service shutdown lock */ 154 156 struct list_head sock_calls; /* List of calls owned by this socket */ 155 157 struct list_head to_be_accepted; /* calls awaiting acceptance */ ··· 163 159 struct rb_root calls; /* User ID -> call mapping */ 164 160 unsigned long flags; 165 161 #define RXRPC_SOCK_CONNECTED 0 /* connect_srx is set */ 162 + #define RXRPC_SOCK_MANAGE_RESPONSE 1 /* User wants to manage RESPONSE packets */ 166 163 rwlock_t call_lock; /* lock for calls */ 167 164 u32 min_sec_level; /* minimum security level */ 168 165 #define RXRPC_SECURITY_MAX RXRPC_SECURITY_ENCRYPT ··· 207 202 */ 208 203 struct rxrpc_skb_priv { 209 204 union { 210 - struct rxrpc_connection *conn; /* Connection referred to (poke packet) */ 205 + struct rxrpc_connection *poke_conn; /* Conn referred to (poke packet) */ 211 206 struct { 212 207 u16 offset; /* Offset of data */ 213 208 u16 len; /* Length of data */ ··· 221 216 u16 nr_acks; /* Number of acks+nacks */ 222 217 u8 reason; /* Reason for ack */ 223 218 } ack; 219 + struct { 220 + struct rxrpc_connection *conn; /* Connection referred to */ 221 + union { 222 + u32 rxkad_nonce; 223 + }; 224 + } chall; 225 + struct { 226 + rxrpc_serial_t challenge_serial; 227 + u32 kvno; 228 + u32 version; 229 + u16 len; 230 + u16 ticket_len; 231 + } resp; 224 232 }; 225 233 struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */ 226 234 }; ··· 287 269 /* issue a challenge */ 288 270 int (*issue_challenge)(struct rxrpc_connection *); 289 271 272 + /* Validate a challenge packet */ 273 + bool (*validate_challenge)(struct rxrpc_connection *conn, 274 + struct sk_buff *skb); 275 + 276 + /* Fill out the cmsg for recvmsg() to pass on a challenge to userspace. 277 + * The security class gets to add additional information. 278 + */ 279 + int (*challenge_to_recvmsg)(struct rxrpc_connection *conn, 280 + struct sk_buff *challenge, 281 + struct msghdr *msg); 282 + 283 + /* Parse sendmsg() control message and respond to challenge. */ 284 + int (*sendmsg_respond_to_challenge)(struct sk_buff *challenge, 285 + struct msghdr *msg); 286 + 290 287 /* respond to a challenge */ 291 - int (*respond_to_challenge)(struct rxrpc_connection *, 292 - struct sk_buff *); 288 + int (*respond_to_challenge)(struct rxrpc_connection *conn, 289 + struct sk_buff *challenge); 293 290 294 291 /* verify a response */ 295 292 int (*verify_response)(struct rxrpc_connection *, ··· 559 526 u32 nonce; /* response re-use preventer */ 560 527 } rxkad; 561 528 }; 529 + struct sk_buff *tx_response; /* Response packet to be transmitted */ 562 530 unsigned long flags; 563 531 unsigned long events; 564 532 unsigned long idle_timestamp; /* Time at which last became idle */ ··· 1233 1199 bool rxrpc_direct_abort(struct sk_buff *skb, enum rxrpc_abort_reason why, 1234 1200 s32 abort_code, int err); 1235 1201 int rxrpc_io_thread(void *data); 1202 + void rxrpc_post_response(struct rxrpc_connection *conn, struct sk_buff *skb); 1236 1203 static inline void rxrpc_wake_up_io_thread(struct rxrpc_local *local) 1237 1204 { 1205 + if (!local->io_thread) 1206 + return; 1238 1207 wake_up_process(READ_ONCE(local->io_thread)); 1239 1208 } 1240 1209 ··· 1327 1290 } 1328 1291 1329 1292 /* 1293 + * out_of_band.c 1294 + */ 1295 + void rxrpc_notify_socket_oob(struct rxrpc_call *call, struct sk_buff *skb); 1296 + void rxrpc_add_pending_oob(struct rxrpc_sock *rx, struct sk_buff *skb); 1297 + int rxrpc_sendmsg_oob(struct rxrpc_sock *rx, struct msghdr *msg, size_t len); 1298 + 1299 + /* 1330 1300 * output.c 1331 1301 */ 1332 1302 void rxrpc_send_ACK(struct rxrpc_call *call, u8 ack_reason, ··· 1344 1300 void rxrpc_send_conn_abort(struct rxrpc_connection *conn); 1345 1301 void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb); 1346 1302 void rxrpc_send_keepalive(struct rxrpc_peer *); 1303 + void rxrpc_send_response(struct rxrpc_connection *conn, struct sk_buff *skb); 1347 1304 1348 1305 /* 1349 1306 * peer_event.c
+1 -1
net/rxrpc/call_object.c
··· 145 145 INIT_LIST_HEAD(&call->recvmsg_link); 146 146 INIT_LIST_HEAD(&call->sock_link); 147 147 INIT_LIST_HEAD(&call->attend_link); 148 - skb_queue_head_init(&call->rx_queue); 149 148 skb_queue_head_init(&call->recvmsg_queue); 149 + skb_queue_head_init(&call->rx_queue); 150 150 skb_queue_head_init(&call->rx_oos_queue); 151 151 init_waitqueue_head(&call->waitq); 152 152 spin_lock_init(&call->notify_lock);
+126 -8
net/rxrpc/conn_event.c
··· 19 19 /* 20 20 * Set the completion state on an aborted connection. 21 21 */ 22 - static bool rxrpc_set_conn_aborted(struct rxrpc_connection *conn, struct sk_buff *skb, 22 + static bool rxrpc_set_conn_aborted(struct rxrpc_connection *conn, 23 23 s32 abort_code, int err, 24 24 enum rxrpc_call_completion compl) 25 25 { ··· 49 49 int rxrpc_abort_conn(struct rxrpc_connection *conn, struct sk_buff *skb, 50 50 s32 abort_code, int err, enum rxrpc_abort_reason why) 51 51 { 52 - struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 53 52 54 - if (rxrpc_set_conn_aborted(conn, skb, abort_code, err, 53 + u32 cid = conn->proto.cid, call = 0, seq = 0; 54 + 55 + if (skb) { 56 + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 57 + 58 + cid = sp->hdr.cid; 59 + call = sp->hdr.callNumber; 60 + seq = sp->hdr.seq; 61 + } 62 + 63 + if (rxrpc_set_conn_aborted(conn, abort_code, err, 55 64 RXRPC_CALL_LOCALLY_ABORTED)) { 56 - trace_rxrpc_abort(0, why, sp->hdr.cid, sp->hdr.callNumber, 57 - sp->hdr.seq, abort_code, err); 65 + trace_rxrpc_abort(0, why, cid, call, seq, abort_code, err); 58 66 rxrpc_poke_conn(conn, rxrpc_conn_get_poke_abort); 59 67 } 60 68 return -EPROTO; ··· 75 67 struct sk_buff *skb) 76 68 { 77 69 trace_rxrpc_rx_conn_abort(conn, skb); 78 - rxrpc_set_conn_aborted(conn, skb, skb->priority, -ECONNABORTED, 70 + rxrpc_set_conn_aborted(conn, skb->priority, -ECONNABORTED, 79 71 RXRPC_CALL_REMOTELY_ABORTED); 80 72 } 81 73 ··· 256 248 257 249 switch (sp->hdr.type) { 258 250 case RXRPC_PACKET_TYPE_CHALLENGE: 259 - return conn->security->respond_to_challenge(conn, skb); 251 + ret = conn->security->respond_to_challenge(conn, skb); 252 + sp->chall.conn = NULL; 253 + rxrpc_put_connection(conn, rxrpc_conn_put_challenge_input); 254 + return ret; 260 255 261 256 case RXRPC_PACKET_TYPE_RESPONSE: 262 257 ret = conn->security->verify_response(conn, skb); ··· 281 270 * we've already received the packet, put it on the 282 271 * front of the queue. 283 272 */ 284 - sp->conn = rxrpc_get_connection(conn, rxrpc_conn_get_poke_secured); 273 + sp->poke_conn = rxrpc_get_connection( 274 + conn, rxrpc_conn_get_poke_secured); 285 275 skb->mark = RXRPC_SKB_MARK_SERVICE_CONN_SECURED; 286 276 rxrpc_get_skb(skb, rxrpc_skb_get_conn_secured); 287 277 skb_queue_head(&conn->local->rx_queue, skb); ··· 404 392 } 405 393 406 394 /* 395 + * Post a CHALLENGE packet to the socket of one of a connection's calls so that 396 + * it can get application data to include in the packet, possibly querying 397 + * userspace. 398 + */ 399 + static bool rxrpc_post_challenge(struct rxrpc_connection *conn, 400 + struct sk_buff *skb) 401 + { 402 + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 403 + struct rxrpc_call *call = NULL; 404 + struct rxrpc_sock *rx; 405 + bool respond = false; 406 + 407 + sp->chall.conn = 408 + rxrpc_get_connection(conn, rxrpc_conn_get_challenge_input); 409 + 410 + if (!conn->security->challenge_to_recvmsg) { 411 + rxrpc_post_packet_to_conn(conn, skb); 412 + return true; 413 + } 414 + 415 + rcu_read_lock(); 416 + 417 + for (int i = 0; i < ARRAY_SIZE(conn->channels); i++) { 418 + if (conn->channels[i].call) { 419 + call = conn->channels[i].call; 420 + rx = rcu_dereference(call->socket); 421 + if (!rx) { 422 + call = NULL; 423 + continue; 424 + } 425 + 426 + respond = true; 427 + if (test_bit(RXRPC_SOCK_MANAGE_RESPONSE, &rx->flags)) 428 + break; 429 + call = NULL; 430 + } 431 + } 432 + 433 + if (!respond) { 434 + rcu_read_unlock(); 435 + rxrpc_put_connection(conn, rxrpc_conn_put_challenge_input); 436 + sp->chall.conn = NULL; 437 + return false; 438 + } 439 + 440 + if (call) 441 + rxrpc_notify_socket_oob(call, skb); 442 + rcu_read_unlock(); 443 + 444 + if (!call) 445 + rxrpc_post_packet_to_conn(conn, skb); 446 + return true; 447 + } 448 + 449 + /* 407 450 * Input a connection-level packet. 408 451 */ 409 452 bool rxrpc_input_conn_packet(struct rxrpc_connection *conn, struct sk_buff *skb) ··· 478 411 return true; 479 412 480 413 case RXRPC_PACKET_TYPE_CHALLENGE: 414 + rxrpc_see_skb(skb, rxrpc_skb_see_oob_challenge); 415 + if (rxrpc_is_conn_aborted(conn)) { 416 + if (conn->completion == RXRPC_CALL_LOCALLY_ABORTED) 417 + rxrpc_send_conn_abort(conn); 418 + return true; 419 + } 420 + if (!conn->security->validate_challenge(conn, skb)) 421 + return false; 422 + return rxrpc_post_challenge(conn, skb); 423 + 481 424 case RXRPC_PACKET_TYPE_RESPONSE: 482 425 if (rxrpc_is_conn_aborted(conn)) { 483 426 if (conn->completion == RXRPC_CALL_LOCALLY_ABORTED) ··· 513 436 if (test_and_clear_bit(RXRPC_CONN_EV_ABORT_CALLS, &conn->events)) 514 437 rxrpc_abort_calls(conn); 515 438 439 + if (conn->tx_response) { 440 + struct sk_buff *skb; 441 + 442 + spin_lock_irq(&conn->local->lock); 443 + skb = conn->tx_response; 444 + conn->tx_response = NULL; 445 + spin_unlock_irq(&conn->local->lock); 446 + 447 + if (conn->state != RXRPC_CONN_ABORTED) 448 + rxrpc_send_response(conn, skb); 449 + rxrpc_free_skb(skb, rxrpc_skb_put_response); 450 + } 451 + 516 452 if (skb) { 517 453 switch (skb->mark) { 518 454 case RXRPC_SKB_MARK_SERVICE_CONN_SECURED: ··· 541 451 /* Process delayed ACKs whose time has come. */ 542 452 if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK) 543 453 rxrpc_process_delayed_final_acks(conn, false); 454 + } 455 + 456 + /* 457 + * Post a RESPONSE message to the I/O thread for transmission. 458 + */ 459 + void rxrpc_post_response(struct rxrpc_connection *conn, struct sk_buff *skb) 460 + { 461 + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 462 + struct rxrpc_local *local = conn->local; 463 + struct sk_buff *old; 464 + 465 + _enter("%x", sp->resp.challenge_serial); 466 + 467 + spin_lock_irq(&local->lock); 468 + old = conn->tx_response; 469 + if (old) { 470 + struct rxrpc_skb_priv *osp = rxrpc_skb(skb); 471 + 472 + /* Always go with the response to the most recent challenge. */ 473 + if (after(sp->resp.challenge_serial, osp->resp.challenge_serial)) 474 + conn->tx_response = old; 475 + else 476 + old = skb; 477 + } else { 478 + conn->tx_response = skb; 479 + } 480 + spin_unlock_irq(&local->lock); 481 + rxrpc_poke_conn(conn, rxrpc_conn_get_poke_response); 544 482 }
+1
net/rxrpc/conn_object.c
··· 329 329 } 330 330 331 331 rxrpc_purge_queue(&conn->rx_queue); 332 + rxrpc_free_skb(conn->tx_response, rxrpc_skb_put_response); 332 333 333 334 rxrpc_kill_client_conn(conn); 334 335
+10 -3
net/rxrpc/insecure.c
··· 42 42 { 43 43 } 44 44 45 - static int none_respond_to_challenge(struct rxrpc_connection *conn, 46 - struct sk_buff *skb) 45 + static bool none_validate_challenge(struct rxrpc_connection *conn, 46 + struct sk_buff *skb) 47 47 { 48 48 return rxrpc_abort_conn(conn, skb, RX_PROTOCOL_ERROR, -EPROTO, 49 49 rxrpc_eproto_rxnull_challenge); 50 + } 51 + 52 + static int none_sendmsg_respond_to_challenge(struct sk_buff *challenge, 53 + struct msghdr *msg) 54 + { 55 + return -EINVAL; 50 56 } 51 57 52 58 static int none_verify_response(struct rxrpc_connection *conn, ··· 88 82 .alloc_txbuf = none_alloc_txbuf, 89 83 .secure_packet = none_secure_packet, 90 84 .verify_packet = none_verify_packet, 91 - .respond_to_challenge = none_respond_to_challenge, 85 + .validate_challenge = none_validate_challenge, 86 + .sendmsg_respond_to_challenge = none_sendmsg_respond_to_challenge, 92 87 .verify_response = none_verify_response, 93 88 .clear = none_clear, 94 89 };
+7 -5
net/rxrpc/io_thread.c
··· 489 489 rxrpc_free_skb(skb, rxrpc_skb_put_error_report); 490 490 break; 491 491 case RXRPC_SKB_MARK_SERVICE_CONN_SECURED: 492 - rxrpc_input_conn_event(sp->conn, skb); 493 - rxrpc_put_connection(sp->conn, rxrpc_conn_put_poke); 492 + rxrpc_input_conn_event(sp->poke_conn, skb); 493 + rxrpc_put_connection(sp->poke_conn, rxrpc_conn_put_poke); 494 494 rxrpc_free_skb(skb, rxrpc_skb_put_conn_secured); 495 495 break; 496 496 default: ··· 501 501 } 502 502 503 503 /* Deal with connections that want immediate attention. */ 504 - spin_lock_irq(&local->lock); 505 - list_splice_tail_init(&local->conn_attend_q, &conn_attend_q); 506 - spin_unlock_irq(&local->lock); 504 + if (!list_empty_careful(&local->conn_attend_q)) { 505 + spin_lock_irq(&local->lock); 506 + list_splice_tail_init(&local->conn_attend_q, &conn_attend_q); 507 + spin_unlock_irq(&local->lock); 508 + } 507 509 508 510 while ((conn = list_first_entry_or_null(&conn_attend_q, 509 511 struct rxrpc_connection,
+379
net/rxrpc/oob.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-or-later 2 + /* Out of band message handling (e.g. challenge-response) 3 + * 4 + * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved. 5 + * Written by David Howells (dhowells@redhat.com) 6 + */ 7 + 8 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 + 10 + #include <linux/net.h> 11 + #include <linux/gfp.h> 12 + #include <linux/skbuff.h> 13 + #include <linux/export.h> 14 + #include <linux/sched/signal.h> 15 + #include <net/sock.h> 16 + #include <net/af_rxrpc.h> 17 + #include "ar-internal.h" 18 + 19 + enum rxrpc_oob_command { 20 + RXRPC_OOB_CMD_UNSET, 21 + RXRPC_OOB_CMD_RESPOND, 22 + } __mode(byte); 23 + 24 + struct rxrpc_oob_params { 25 + u64 oob_id; /* ID number of message if reply */ 26 + s32 abort_code; 27 + enum rxrpc_oob_command command; 28 + bool have_oob_id:1; 29 + }; 30 + 31 + /* 32 + * Post an out-of-band message for attention by the socket or kernel service 33 + * associated with a reference call. 34 + */ 35 + void rxrpc_notify_socket_oob(struct rxrpc_call *call, struct sk_buff *skb) 36 + { 37 + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 38 + struct rxrpc_sock *rx; 39 + struct sock *sk; 40 + 41 + rcu_read_lock(); 42 + 43 + rx = rcu_dereference(call->socket); 44 + if (rx) { 45 + sk = &rx->sk; 46 + spin_lock_irq(&rx->recvmsg_lock); 47 + 48 + if (sk->sk_state < RXRPC_CLOSE) { 49 + skb->skb_mstamp_ns = rx->oob_id_counter++; 50 + rxrpc_get_skb(skb, rxrpc_skb_get_post_oob); 51 + skb_queue_tail(&rx->recvmsg_oobq, skb); 52 + 53 + trace_rxrpc_notify_socket(call->debug_id, sp->hdr.serial); 54 + if (rx->app_ops) 55 + rx->app_ops->notify_oob(sk, skb); 56 + } 57 + 58 + spin_unlock_irq(&rx->recvmsg_lock); 59 + if (!rx->app_ops && !sock_flag(sk, SOCK_DEAD)) 60 + sk->sk_data_ready(sk); 61 + } 62 + 63 + rcu_read_unlock(); 64 + } 65 + 66 + /* 67 + * Locate the OOB message to respond to by its ID. 68 + */ 69 + static struct sk_buff *rxrpc_find_pending_oob(struct rxrpc_sock *rx, u64 oob_id) 70 + { 71 + struct rb_node *p; 72 + struct sk_buff *skb; 73 + 74 + p = rx->pending_oobq.rb_node; 75 + while (p) { 76 + skb = rb_entry(p, struct sk_buff, rbnode); 77 + 78 + if (oob_id < skb->skb_mstamp_ns) 79 + p = p->rb_left; 80 + else if (oob_id > skb->skb_mstamp_ns) 81 + p = p->rb_right; 82 + else 83 + return skb; 84 + } 85 + 86 + return NULL; 87 + } 88 + 89 + /* 90 + * Add an OOB message into the pending-response set. We always assign the next 91 + * value from a 64-bit counter to the oob_id, so just assume we're always going 92 + * to be on the right-hand edge of the tree and that the counter won't wrap. 93 + * The tree is also given a ref to the message. 94 + */ 95 + void rxrpc_add_pending_oob(struct rxrpc_sock *rx, struct sk_buff *skb) 96 + { 97 + struct rb_node **pp = &rx->pending_oobq.rb_node, *p = NULL; 98 + 99 + while (*pp) { 100 + p = *pp; 101 + pp = &(*pp)->rb_right; 102 + } 103 + 104 + rb_link_node(&skb->rbnode, p, pp); 105 + rb_insert_color(&skb->rbnode, &rx->pending_oobq); 106 + } 107 + 108 + /* 109 + * Extract control messages from the sendmsg() control buffer. 110 + */ 111 + static int rxrpc_sendmsg_oob_cmsg(struct msghdr *msg, struct rxrpc_oob_params *p) 112 + { 113 + struct cmsghdr *cmsg; 114 + int len; 115 + 116 + if (msg->msg_controllen == 0) 117 + return -EINVAL; 118 + 119 + for_each_cmsghdr(cmsg, msg) { 120 + if (!CMSG_OK(msg, cmsg)) 121 + return -EINVAL; 122 + 123 + len = cmsg->cmsg_len - sizeof(struct cmsghdr); 124 + _debug("CMSG %d, %d, %d", 125 + cmsg->cmsg_level, cmsg->cmsg_type, len); 126 + 127 + if (cmsg->cmsg_level != SOL_RXRPC) 128 + continue; 129 + 130 + switch (cmsg->cmsg_type) { 131 + case RXRPC_OOB_ID: 132 + if (len != sizeof(p->oob_id) || p->have_oob_id) 133 + return -EINVAL; 134 + memcpy(&p->oob_id, CMSG_DATA(cmsg), sizeof(p->oob_id)); 135 + p->have_oob_id = true; 136 + break; 137 + case RXRPC_RESPOND: 138 + if (p->command != RXRPC_OOB_CMD_UNSET) 139 + return -EINVAL; 140 + p->command = RXRPC_OOB_CMD_RESPOND; 141 + break; 142 + case RXRPC_ABORT: 143 + if (len != sizeof(p->abort_code) || p->abort_code) 144 + return -EINVAL; 145 + memcpy(&p->abort_code, CMSG_DATA(cmsg), sizeof(p->abort_code)); 146 + if (p->abort_code == 0) 147 + return -EINVAL; 148 + break; 149 + case RXRPC_RESP_RXGK_APPDATA: 150 + if (p->command != RXRPC_OOB_CMD_RESPOND) 151 + return -EINVAL; 152 + break; 153 + default: 154 + return -EINVAL; 155 + } 156 + } 157 + 158 + switch (p->command) { 159 + case RXRPC_OOB_CMD_RESPOND: 160 + if (!p->have_oob_id) 161 + return -EBADSLT; 162 + break; 163 + default: 164 + return -EINVAL; 165 + } 166 + 167 + return 0; 168 + } 169 + 170 + /* 171 + * Allow userspace to respond to an OOB using sendmsg(). 172 + */ 173 + static int rxrpc_respond_to_oob(struct rxrpc_sock *rx, 174 + struct rxrpc_oob_params *p, 175 + struct msghdr *msg) 176 + { 177 + struct rxrpc_connection *conn; 178 + struct rxrpc_skb_priv *sp; 179 + struct sk_buff *skb; 180 + int ret; 181 + 182 + skb = rxrpc_find_pending_oob(rx, p->oob_id); 183 + if (skb) 184 + rb_erase(&skb->rbnode, &rx->pending_oobq); 185 + release_sock(&rx->sk); 186 + if (!skb) 187 + return -EBADSLT; 188 + 189 + sp = rxrpc_skb(skb); 190 + 191 + switch (p->command) { 192 + case RXRPC_OOB_CMD_RESPOND: 193 + ret = -EPROTO; 194 + if (skb->mark != RXRPC_OOB_CHALLENGE) 195 + break; 196 + conn = sp->chall.conn; 197 + ret = -EOPNOTSUPP; 198 + if (!conn->security->sendmsg_respond_to_challenge) 199 + break; 200 + if (p->abort_code) { 201 + rxrpc_abort_conn(conn, NULL, p->abort_code, -ECONNABORTED, 202 + rxrpc_abort_response_sendmsg); 203 + ret = 0; 204 + } else { 205 + ret = conn->security->sendmsg_respond_to_challenge(skb, msg); 206 + } 207 + break; 208 + default: 209 + ret = -EINVAL; 210 + break; 211 + } 212 + 213 + rxrpc_free_skb(skb, rxrpc_skb_put_oob); 214 + return ret; 215 + } 216 + 217 + /* 218 + * Send an out-of-band message or respond to a received out-of-band message. 219 + * - caller gives us the socket lock 220 + * - the socket may be either a client socket or a server socket 221 + */ 222 + int rxrpc_sendmsg_oob(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) 223 + { 224 + struct rxrpc_oob_params p = {}; 225 + int ret; 226 + 227 + _enter(""); 228 + 229 + ret = rxrpc_sendmsg_oob_cmsg(msg, &p); 230 + if (ret < 0) 231 + goto error_release_sock; 232 + 233 + if (p.have_oob_id) 234 + return rxrpc_respond_to_oob(rx, &p, msg); 235 + 236 + release_sock(&rx->sk); 237 + 238 + switch (p.command) { 239 + default: 240 + ret = -EINVAL; 241 + break; 242 + } 243 + 244 + _leave(" = %d", ret); 245 + return ret; 246 + 247 + error_release_sock: 248 + release_sock(&rx->sk); 249 + return ret; 250 + } 251 + 252 + /** 253 + * rxrpc_kernel_query_oob - Query the parameters of an out-of-band message 254 + * @oob: The message to query 255 + * @_peer: Where to return the peer record 256 + * @_peer_appdata: The application data attached to a peer record 257 + * 258 + * Extract useful parameters from an out-of-band message. The source peer 259 + * parameters are returned through the argument list and the message type is 260 + * returned. 261 + * 262 + * Return: 263 + * * %RXRPC_OOB_CHALLENGE - Challenge wanting a response. 264 + */ 265 + enum rxrpc_oob_type rxrpc_kernel_query_oob(struct sk_buff *oob, 266 + struct rxrpc_peer **_peer, 267 + unsigned long *_peer_appdata) 268 + { 269 + struct rxrpc_skb_priv *sp = rxrpc_skb(oob); 270 + enum rxrpc_oob_type type = oob->mark; 271 + 272 + switch (type) { 273 + case RXRPC_OOB_CHALLENGE: 274 + *_peer = sp->chall.conn->peer; 275 + *_peer_appdata = 0; /* TODO: retrieve appdata */ 276 + break; 277 + default: 278 + WARN_ON_ONCE(1); 279 + *_peer = NULL; 280 + *_peer_appdata = 0; 281 + break; 282 + } 283 + 284 + return type; 285 + } 286 + EXPORT_SYMBOL(rxrpc_kernel_query_oob); 287 + 288 + /** 289 + * rxrpc_kernel_dequeue_oob - Dequeue and return the front OOB message 290 + * @sock: The socket to query 291 + * @_type: Where to return the message type 292 + * 293 + * Dequeue the front OOB message, if there is one, and return it and 294 + * its type. 295 + * 296 + * Return: The sk_buff representing the OOB message or %NULL if the queue was 297 + * empty. 298 + */ 299 + struct sk_buff *rxrpc_kernel_dequeue_oob(struct socket *sock, 300 + enum rxrpc_oob_type *_type) 301 + { 302 + struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 303 + struct sk_buff *oob; 304 + 305 + oob = skb_dequeue(&rx->recvmsg_oobq); 306 + if (oob) 307 + *_type = oob->mark; 308 + return oob; 309 + } 310 + EXPORT_SYMBOL(rxrpc_kernel_dequeue_oob); 311 + 312 + /** 313 + * rxrpc_kernel_free_oob - Free an out-of-band message 314 + * @oob: The OOB message to free 315 + * 316 + * Free an OOB message along with any resources it holds. 317 + */ 318 + void rxrpc_kernel_free_oob(struct sk_buff *oob) 319 + { 320 + struct rxrpc_skb_priv *sp = rxrpc_skb(oob); 321 + 322 + switch (oob->mark) { 323 + case RXRPC_OOB_CHALLENGE: 324 + rxrpc_put_connection(sp->chall.conn, rxrpc_conn_put_oob); 325 + break; 326 + } 327 + 328 + rxrpc_free_skb(oob, rxrpc_skb_put_purge_oob); 329 + } 330 + EXPORT_SYMBOL(rxrpc_kernel_free_oob); 331 + 332 + /** 333 + * rxrpc_kernel_query_challenge - Query the parameters of a challenge 334 + * @challenge: The challenge to query 335 + * @_peer: Where to return the peer record 336 + * @_peer_appdata: The application data attached to a peer record 337 + * @_service_id: Where to return the connection service ID 338 + * @_security_index: Where to return the connection security index 339 + * 340 + * Extract useful parameters from a CHALLENGE message. 341 + */ 342 + void rxrpc_kernel_query_challenge(struct sk_buff *challenge, 343 + struct rxrpc_peer **_peer, 344 + unsigned long *_peer_appdata, 345 + u16 *_service_id, u8 *_security_index) 346 + { 347 + struct rxrpc_skb_priv *sp = rxrpc_skb(challenge); 348 + 349 + *_peer = sp->chall.conn->peer; 350 + *_peer_appdata = 0; /* TODO: retrieve appdata */ 351 + *_service_id = sp->hdr.serviceId; 352 + *_security_index = sp->hdr.securityIndex; 353 + } 354 + EXPORT_SYMBOL(rxrpc_kernel_query_challenge); 355 + 356 + /** 357 + * rxrpc_kernel_reject_challenge - Allow a kernel service to reject a challenge 358 + * @challenge: The challenge to be rejected 359 + * @abort_code: The abort code to stick into the ABORT packet 360 + * @error: Local error value 361 + * @why: Indication as to why. 362 + * 363 + * Allow a kernel service to reject a challenge by aborting the connection if 364 + * it's still in an abortable state. The error is returned so this function 365 + * can be used with a return statement. 366 + * 367 + * Return: The %error parameter. 368 + */ 369 + int rxrpc_kernel_reject_challenge(struct sk_buff *challenge, u32 abort_code, 370 + int error, enum rxrpc_abort_reason why) 371 + { 372 + struct rxrpc_skb_priv *sp = rxrpc_skb(challenge); 373 + 374 + _enter("{%x},%d,%d,%u", sp->hdr.serial, abort_code, error, why); 375 + 376 + rxrpc_abort_conn(sp->chall.conn, NULL, abort_code, error, why); 377 + return error; 378 + } 379 + EXPORT_SYMBOL(rxrpc_kernel_reject_challenge);
+56
net/rxrpc/output.c
··· 916 916 peer->last_tx_at = ktime_get_seconds(); 917 917 _leave(""); 918 918 } 919 + 920 + /* 921 + * Send a RESPONSE message. 922 + */ 923 + void rxrpc_send_response(struct rxrpc_connection *conn, struct sk_buff *response) 924 + { 925 + struct rxrpc_skb_priv *sp = rxrpc_skb(response); 926 + struct scatterlist sg[16]; 927 + struct bio_vec bvec[16]; 928 + struct msghdr msg; 929 + size_t len = sp->resp.len; 930 + __be32 wserial; 931 + u32 serial = 0; 932 + int ret, nr_sg; 933 + 934 + _enter("C=%x,%x", conn->debug_id, sp->resp.challenge_serial); 935 + 936 + sg_init_table(sg, ARRAY_SIZE(sg)); 937 + ret = skb_to_sgvec(response, sg, 0, len); 938 + if (ret < 0) 939 + goto fail; 940 + nr_sg = ret; 941 + 942 + for (int i = 0; i < nr_sg; i++) 943 + bvec_set_page(&bvec[i], sg_page(&sg[i]), sg[i].length, sg[i].offset); 944 + 945 + iov_iter_bvec(&msg.msg_iter, WRITE, bvec, nr_sg, len); 946 + 947 + msg.msg_name = &conn->peer->srx.transport; 948 + msg.msg_namelen = conn->peer->srx.transport_len; 949 + msg.msg_control = NULL; 950 + msg.msg_controllen = 0; 951 + msg.msg_flags = MSG_SPLICE_PAGES; 952 + 953 + serial = rxrpc_get_next_serials(conn, 1); 954 + wserial = htonl(serial); 955 + 956 + ret = skb_store_bits(response, offsetof(struct rxrpc_wire_header, serial), 957 + &wserial, sizeof(wserial)); 958 + if (ret < 0) 959 + goto fail; 960 + 961 + rxrpc_local_dont_fragment(conn->local, false); 962 + 963 + ret = do_udp_sendmsg(conn->local->socket, &msg, len); 964 + if (ret < 0) 965 + goto fail; 966 + 967 + conn->peer->last_tx_at = ktime_get_seconds(); 968 + return; 969 + 970 + fail: 971 + trace_rxrpc_tx_fail(conn->debug_id, serial, ret, 972 + rxrpc_tx_point_response); 973 + kleave(" = %d", ret); 974 + }
+102 -18
net/rxrpc/recvmsg.c
··· 155 155 } 156 156 157 157 /* 158 + * Transcribe a call's user ID to a control message. 159 + */ 160 + static int rxrpc_recvmsg_user_id(struct rxrpc_call *call, struct msghdr *msg, 161 + int flags) 162 + { 163 + if (!test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) 164 + return 0; 165 + 166 + if (flags & MSG_CMSG_COMPAT) { 167 + unsigned int id32 = call->user_call_ID; 168 + 169 + return put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, 170 + sizeof(unsigned int), &id32); 171 + } else { 172 + unsigned long idl = call->user_call_ID; 173 + 174 + return put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, 175 + sizeof(unsigned long), &idl); 176 + } 177 + } 178 + 179 + /* 180 + * Deal with a CHALLENGE packet. 181 + */ 182 + static int rxrpc_recvmsg_challenge(struct socket *sock, struct msghdr *msg, 183 + struct sk_buff *challenge, unsigned int flags) 184 + { 185 + struct rxrpc_skb_priv *sp = rxrpc_skb(challenge); 186 + struct rxrpc_connection *conn = sp->chall.conn; 187 + 188 + return conn->security->challenge_to_recvmsg(conn, challenge, msg); 189 + } 190 + 191 + /* 192 + * Process OOB packets. Called with the socket locked. 193 + */ 194 + static int rxrpc_recvmsg_oob(struct socket *sock, struct msghdr *msg, 195 + unsigned int flags) 196 + { 197 + struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 198 + struct sk_buff *skb; 199 + bool need_response = false; 200 + int ret; 201 + 202 + skb = skb_peek(&rx->recvmsg_oobq); 203 + if (!skb) 204 + return -EAGAIN; 205 + rxrpc_see_skb(skb, rxrpc_skb_see_recvmsg); 206 + 207 + ret = put_cmsg(msg, SOL_RXRPC, RXRPC_OOB_ID, sizeof(u64), 208 + &skb->skb_mstamp_ns); 209 + if (ret < 0) 210 + return ret; 211 + 212 + switch ((enum rxrpc_oob_type)skb->mark) { 213 + case RXRPC_OOB_CHALLENGE: 214 + need_response = true; 215 + ret = rxrpc_recvmsg_challenge(sock, msg, skb, flags); 216 + break; 217 + default: 218 + WARN_ONCE(1, "recvmsg() can't process unknown OOB type %u\n", 219 + skb->mark); 220 + ret = -EIO; 221 + break; 222 + } 223 + 224 + if (!(flags & MSG_PEEK)) 225 + skb_unlink(skb, &rx->recvmsg_oobq); 226 + if (need_response) 227 + rxrpc_add_pending_oob(rx, skb); 228 + else 229 + rxrpc_free_skb(skb, rxrpc_skb_put_oob); 230 + return ret; 231 + } 232 + 233 + /* 158 234 * Deliver messages to a call. This keeps processing packets until the buffer 159 235 * is filled and we find either more DATA (returns 0) or the end of the DATA 160 236 * (returns 1). If more packets are required, it returns -EAGAIN and if the ··· 241 165 size_t len, int flags, size_t *_offset) 242 166 { 243 167 struct rxrpc_skb_priv *sp; 168 + struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 244 169 struct sk_buff *skb; 245 170 rxrpc_seq_t seq = 0; 246 171 size_t remain; ··· 284 207 trace_rxrpc_recvdata(call, rxrpc_recvmsg_next, seq, 285 208 sp->offset, sp->len, ret2); 286 209 if (ret2 < 0) { 287 - kdebug("verify = %d", ret2); 288 210 ret = ret2; 289 211 goto out; 290 212 } ··· 331 255 332 256 if (!(flags & MSG_PEEK)) 333 257 rxrpc_rotate_rx_window(call); 258 + 259 + if (!rx->app_ops && 260 + !skb_queue_empty_lockless(&rx->recvmsg_oobq)) { 261 + trace_rxrpc_recvdata(call, rxrpc_recvmsg_oobq, seq, 262 + rx_pkt_offset, rx_pkt_len, ret); 263 + break; 264 + } 334 265 } 335 266 336 267 out: ··· 345 262 call->rx_pkt_offset = rx_pkt_offset; 346 263 call->rx_pkt_len = rx_pkt_len; 347 264 } 265 + 348 266 done: 349 267 trace_rxrpc_recvdata(call, rxrpc_recvmsg_data_return, seq, 350 268 rx_pkt_offset, rx_pkt_len, ret); ··· 385 301 /* Return immediately if a client socket has no outstanding calls */ 386 302 if (RB_EMPTY_ROOT(&rx->calls) && 387 303 list_empty(&rx->recvmsg_q) && 304 + skb_queue_empty_lockless(&rx->recvmsg_oobq) && 388 305 rx->sk.sk_state != RXRPC_SERVER_LISTENING) { 389 306 release_sock(&rx->sk); 390 307 return -EAGAIN; ··· 407 322 if (ret) 408 323 goto wait_error; 409 324 410 - if (list_empty(&rx->recvmsg_q)) { 325 + if (list_empty(&rx->recvmsg_q) && 326 + skb_queue_empty_lockless(&rx->recvmsg_oobq)) { 411 327 if (signal_pending(current)) 412 328 goto wait_interrupted; 413 329 trace_rxrpc_recvmsg(0, rxrpc_recvmsg_wait, 0); ··· 416 330 } 417 331 finish_wait(sk_sleep(&rx->sk), &wait); 418 332 goto try_again; 333 + } 334 + 335 + /* Deal with OOB messages before we consider getting normal data. */ 336 + if (!skb_queue_empty_lockless(&rx->recvmsg_oobq)) { 337 + ret = rxrpc_recvmsg_oob(sock, msg, flags); 338 + release_sock(&rx->sk); 339 + if (ret == -EAGAIN) 340 + goto try_again; 341 + goto error_no_call; 419 342 } 420 343 421 344 /* Find the next call and dequeue it if we're not just peeking. If we ··· 437 342 call = list_entry(l, struct rxrpc_call, recvmsg_link); 438 343 439 344 if (!rxrpc_call_is_complete(call) && 440 - skb_queue_empty(&call->recvmsg_queue)) { 345 + skb_queue_empty(&call->recvmsg_queue) && 346 + skb_queue_empty(&rx->recvmsg_oobq)) { 441 347 list_del_init(&call->recvmsg_link); 442 348 spin_unlock_irq(&rx->recvmsg_lock); 443 349 release_sock(&rx->sk); ··· 473 377 if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) 474 378 BUG(); 475 379 476 - if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) { 477 - if (flags & MSG_CMSG_COMPAT) { 478 - unsigned int id32 = call->user_call_ID; 479 - 480 - ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, 481 - sizeof(unsigned int), &id32); 482 - } else { 483 - unsigned long idl = call->user_call_ID; 484 - 485 - ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, 486 - sizeof(unsigned long), &idl); 487 - } 488 - if (ret < 0) 489 - goto error_unlock_call; 490 - } 380 + ret = rxrpc_recvmsg_user_id(call, msg, flags); 381 + if (ret < 0) 382 + goto error_unlock_call; 491 383 492 384 if (msg->msg_name && call->peer) { 493 385 size_t len = sizeof(call->dest_srx);
+184 -112
net/rxrpc/rxkad.c
··· 698 698 } 699 699 700 700 /* 701 - * send a Kerberos security response 702 - */ 703 - static int rxkad_send_response(struct rxrpc_connection *conn, 704 - struct rxrpc_host_header *hdr, 705 - struct rxkad_response *resp, 706 - const struct rxkad_key *s2) 707 - { 708 - struct rxrpc_wire_header whdr; 709 - struct msghdr msg; 710 - struct kvec iov[3]; 711 - size_t len; 712 - u32 serial; 713 - int ret; 714 - 715 - _enter(""); 716 - 717 - msg.msg_name = &conn->peer->srx.transport; 718 - msg.msg_namelen = conn->peer->srx.transport_len; 719 - msg.msg_control = NULL; 720 - msg.msg_controllen = 0; 721 - msg.msg_flags = 0; 722 - 723 - memset(&whdr, 0, sizeof(whdr)); 724 - whdr.epoch = htonl(hdr->epoch); 725 - whdr.cid = htonl(hdr->cid); 726 - whdr.type = RXRPC_PACKET_TYPE_RESPONSE; 727 - whdr.flags = conn->out_clientflag; 728 - whdr.securityIndex = hdr->securityIndex; 729 - whdr.serviceId = htons(hdr->serviceId); 730 - 731 - iov[0].iov_base = &whdr; 732 - iov[0].iov_len = sizeof(whdr); 733 - iov[1].iov_base = resp; 734 - iov[1].iov_len = sizeof(*resp); 735 - iov[2].iov_base = (void *)s2->ticket; 736 - iov[2].iov_len = s2->ticket_len; 737 - 738 - len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len; 739 - 740 - serial = rxrpc_get_next_serial(conn); 741 - whdr.serial = htonl(serial); 742 - 743 - rxrpc_local_dont_fragment(conn->local, false); 744 - ret = kernel_sendmsg(conn->local->socket, &msg, iov, 3, len); 745 - if (ret < 0) { 746 - trace_rxrpc_tx_fail(conn->debug_id, serial, ret, 747 - rxrpc_tx_point_rxkad_response); 748 - return -EAGAIN; 749 - } 750 - 751 - conn->peer->last_tx_at = ktime_get_seconds(); 752 - _leave(" = 0"); 753 - return 0; 754 - } 755 - 756 - /* 757 701 * calculate the response checksum 758 702 */ 759 703 static void rxkad_calc_response_checksum(struct rxkad_response *response) ··· 716 772 * encrypt the response packet 717 773 */ 718 774 static int rxkad_encrypt_response(struct rxrpc_connection *conn, 719 - struct rxkad_response *resp, 775 + struct sk_buff *response, 720 776 const struct rxkad_key *s2) 721 777 { 722 778 struct skcipher_request *req; 723 779 struct rxrpc_crypt iv; 724 780 struct scatterlist sg[1]; 781 + size_t encsize = sizeof(((struct rxkad_response *)0)->encrypted); 782 + int ret; 783 + 784 + sg_init_table(sg, ARRAY_SIZE(sg)); 785 + ret = skb_to_sgvec(response, sg, 786 + sizeof(struct rxrpc_wire_header) + 787 + offsetof(struct rxkad_response, encrypted), encsize); 788 + if (ret < 0) 789 + return ret; 725 790 726 791 req = skcipher_request_alloc(&conn->rxkad.cipher->base, GFP_NOFS); 727 792 if (!req) ··· 739 786 /* continue encrypting from where we left off */ 740 787 memcpy(&iv, s2->session_key, sizeof(iv)); 741 788 742 - sg_init_table(sg, 1); 743 - sg_set_buf(sg, &resp->encrypted, sizeof(resp->encrypted)); 744 789 skcipher_request_set_sync_tfm(req, conn->rxkad.cipher); 745 790 skcipher_request_set_callback(req, 0, NULL, NULL); 746 - skcipher_request_set_crypt(req, sg, sg, sizeof(resp->encrypted), iv.x); 747 - crypto_skcipher_encrypt(req); 791 + skcipher_request_set_crypt(req, sg, sg, encsize, iv.x); 792 + ret = crypto_skcipher_encrypt(req); 748 793 skcipher_request_free(req); 749 - return 0; 794 + return ret; 795 + } 796 + 797 + /* 798 + * Validate a challenge packet. 799 + */ 800 + static bool rxkad_validate_challenge(struct rxrpc_connection *conn, 801 + struct sk_buff *skb) 802 + { 803 + struct rxkad_challenge challenge; 804 + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 805 + u32 version, min_level; 806 + int ret; 807 + 808 + _enter("{%d,%x}", conn->debug_id, key_serial(conn->key)); 809 + 810 + if (!conn->key) { 811 + rxrpc_abort_conn(conn, skb, RX_PROTOCOL_ERROR, -EPROTO, 812 + rxkad_abort_chall_no_key); 813 + return false; 814 + } 815 + 816 + ret = key_validate(conn->key); 817 + if (ret < 0) { 818 + rxrpc_abort_conn(conn, skb, RXKADEXPIRED, ret, 819 + rxkad_abort_chall_key_expired); 820 + return false; 821 + } 822 + 823 + if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header), 824 + &challenge, sizeof(challenge)) < 0) { 825 + rxrpc_abort_conn(conn, skb, RXKADPACKETSHORT, -EPROTO, 826 + rxkad_abort_chall_short); 827 + return false; 828 + } 829 + 830 + version = ntohl(challenge.version); 831 + sp->chall.rxkad_nonce = ntohl(challenge.nonce); 832 + min_level = ntohl(challenge.min_level); 833 + 834 + trace_rxrpc_rx_challenge(conn, sp->hdr.serial, version, 835 + sp->chall.rxkad_nonce, min_level); 836 + 837 + if (version != RXKAD_VERSION) { 838 + rxrpc_abort_conn(conn, skb, RXKADINCONSISTENCY, -EPROTO, 839 + rxkad_abort_chall_version); 840 + return false; 841 + } 842 + 843 + if (conn->security_level < min_level) { 844 + rxrpc_abort_conn(conn, skb, RXKADLEVELFAIL, -EACCES, 845 + rxkad_abort_chall_level); 846 + return false; 847 + } 848 + return true; 849 + } 850 + 851 + /* 852 + * Insert the header into the response. 853 + */ 854 + static noinline 855 + int rxkad_insert_response_header(struct rxrpc_connection *conn, 856 + const struct rxrpc_key_token *token, 857 + struct sk_buff *challenge, 858 + struct sk_buff *response, 859 + size_t *offset) 860 + { 861 + struct rxrpc_skb_priv *csp = rxrpc_skb(challenge); 862 + struct { 863 + struct rxrpc_wire_header whdr; 864 + struct rxkad_response resp; 865 + } h; 866 + int ret; 867 + 868 + h.whdr.epoch = htonl(conn->proto.epoch); 869 + h.whdr.cid = htonl(conn->proto.cid); 870 + h.whdr.callNumber = 0; 871 + h.whdr.serial = 0; 872 + h.whdr.seq = 0; 873 + h.whdr.type = RXRPC_PACKET_TYPE_RESPONSE; 874 + h.whdr.flags = conn->out_clientflag; 875 + h.whdr.userStatus = 0; 876 + h.whdr.securityIndex = conn->security_ix; 877 + h.whdr.cksum = 0; 878 + h.whdr.serviceId = htons(conn->service_id); 879 + h.resp.version = htonl(RXKAD_VERSION); 880 + h.resp.__pad = 0; 881 + h.resp.encrypted.epoch = htonl(conn->proto.epoch); 882 + h.resp.encrypted.cid = htonl(conn->proto.cid); 883 + h.resp.encrypted.checksum = 0; 884 + h.resp.encrypted.securityIndex = htonl(conn->security_ix); 885 + h.resp.encrypted.call_id[0] = htonl(conn->channels[0].call_counter); 886 + h.resp.encrypted.call_id[1] = htonl(conn->channels[1].call_counter); 887 + h.resp.encrypted.call_id[2] = htonl(conn->channels[2].call_counter); 888 + h.resp.encrypted.call_id[3] = htonl(conn->channels[3].call_counter); 889 + h.resp.encrypted.inc_nonce = htonl(csp->chall.rxkad_nonce + 1); 890 + h.resp.encrypted.level = htonl(conn->security_level); 891 + h.resp.kvno = htonl(token->kad->kvno); 892 + h.resp.ticket_len = htonl(token->kad->ticket_len); 893 + 894 + rxkad_calc_response_checksum(&h.resp); 895 + 896 + ret = skb_store_bits(response, *offset, &h, sizeof(h)); 897 + *offset += sizeof(h); 898 + return ret; 750 899 } 751 900 752 901 /* 753 902 * respond to a challenge packet 754 903 */ 755 904 static int rxkad_respond_to_challenge(struct rxrpc_connection *conn, 756 - struct sk_buff *skb) 905 + struct sk_buff *challenge) 757 906 { 758 907 const struct rxrpc_key_token *token; 759 - struct rxkad_challenge challenge; 760 - struct rxkad_response *resp; 761 - struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 762 - u32 version, nonce, min_level; 908 + struct rxrpc_skb_priv *csp, *rsp; 909 + struct sk_buff *response; 910 + size_t len, offset = 0; 763 911 int ret = -EPROTO; 764 912 765 913 _enter("{%d,%x}", conn->debug_id, key_serial(conn->key)); 766 914 767 - if (!conn->key) 768 - return rxrpc_abort_conn(conn, skb, RX_PROTOCOL_ERROR, -EPROTO, 769 - rxkad_abort_chall_no_key); 770 - 771 915 ret = key_validate(conn->key); 772 916 if (ret < 0) 773 - return rxrpc_abort_conn(conn, skb, RXKADEXPIRED, ret, 917 + return rxrpc_abort_conn(conn, challenge, RXKADEXPIRED, ret, 774 918 rxkad_abort_chall_key_expired); 775 - 776 - if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header), 777 - &challenge, sizeof(challenge)) < 0) 778 - return rxrpc_abort_conn(conn, skb, RXKADPACKETSHORT, -EPROTO, 779 - rxkad_abort_chall_short); 780 - 781 - version = ntohl(challenge.version); 782 - nonce = ntohl(challenge.nonce); 783 - min_level = ntohl(challenge.min_level); 784 - 785 - trace_rxrpc_rx_challenge(conn, sp->hdr.serial, version, nonce, min_level); 786 - 787 - if (version != RXKAD_VERSION) 788 - return rxrpc_abort_conn(conn, skb, RXKADINCONSISTENCY, -EPROTO, 789 - rxkad_abort_chall_version); 790 - 791 - if (conn->security_level < min_level) 792 - return rxrpc_abort_conn(conn, skb, RXKADLEVELFAIL, -EACCES, 793 - rxkad_abort_chall_level); 794 919 795 920 token = conn->key->payload.data[0]; 796 921 797 922 /* build the response packet */ 798 - resp = kzalloc(sizeof(struct rxkad_response), GFP_NOFS); 799 - if (!resp) 800 - return -ENOMEM; 923 + len = sizeof(struct rxrpc_wire_header) + 924 + sizeof(struct rxkad_response) + 925 + token->kad->ticket_len; 801 926 802 - resp->version = htonl(RXKAD_VERSION); 803 - resp->encrypted.epoch = htonl(conn->proto.epoch); 804 - resp->encrypted.cid = htonl(conn->proto.cid); 805 - resp->encrypted.securityIndex = htonl(conn->security_ix); 806 - resp->encrypted.inc_nonce = htonl(nonce + 1); 807 - resp->encrypted.level = htonl(conn->security_level); 808 - resp->kvno = htonl(token->kad->kvno); 809 - resp->ticket_len = htonl(token->kad->ticket_len); 810 - resp->encrypted.call_id[0] = htonl(conn->channels[0].call_counter); 811 - resp->encrypted.call_id[1] = htonl(conn->channels[1].call_counter); 812 - resp->encrypted.call_id[2] = htonl(conn->channels[2].call_counter); 813 - resp->encrypted.call_id[3] = htonl(conn->channels[3].call_counter); 927 + response = alloc_skb_with_frags(0, len, 0, &ret, GFP_NOFS); 928 + if (!response) 929 + goto error; 930 + rxrpc_new_skb(response, rxrpc_skb_new_response_rxkad); 931 + response->len = len; 932 + response->data_len = len; 814 933 815 - /* calculate the response checksum and then do the encryption */ 816 - rxkad_calc_response_checksum(resp); 817 - ret = rxkad_encrypt_response(conn, resp, token->kad); 818 - if (ret == 0) 819 - ret = rxkad_send_response(conn, &sp->hdr, resp, token->kad); 820 - kfree(resp); 934 + offset = 0; 935 + ret = rxkad_insert_response_header(conn, token, challenge, response, 936 + &offset); 937 + if (ret < 0) 938 + goto error; 939 + 940 + ret = rxkad_encrypt_response(conn, response, token->kad); 941 + if (ret < 0) 942 + goto error; 943 + 944 + ret = skb_store_bits(response, offset, token->kad->ticket, 945 + token->kad->ticket_len); 946 + if (ret < 0) 947 + goto error; 948 + 949 + csp = rxrpc_skb(challenge); 950 + rsp = rxrpc_skb(response); 951 + rsp->resp.len = len; 952 + rsp->resp.challenge_serial = csp->hdr.serial; 953 + rxrpc_post_response(conn, response); 954 + response = NULL; 955 + ret = 0; 956 + 957 + error: 958 + rxrpc_free_skb(response, rxrpc_skb_put_response); 821 959 return ret; 822 960 } 961 + 962 + /* 963 + * RxKAD does automatic response only as there's nothing to manage that isn't 964 + * already in the key. 965 + */ 966 + static int rxkad_sendmsg_respond_to_challenge(struct sk_buff *challenge, 967 + struct msghdr *msg) 968 + { 969 + return -EINVAL; 970 + } 971 + 972 + /** 973 + * rxkad_kernel_respond_to_challenge - Respond to a challenge with appdata 974 + * @challenge: The challenge to respond to 975 + * 976 + * Allow a kernel application to respond to a CHALLENGE. 977 + * 978 + * Return: %0 if successful and a negative error code otherwise. 979 + */ 980 + int rxkad_kernel_respond_to_challenge(struct sk_buff *challenge) 981 + { 982 + struct rxrpc_skb_priv *csp = rxrpc_skb(challenge); 983 + 984 + return rxkad_respond_to_challenge(csp->chall.conn, challenge); 985 + } 986 + EXPORT_SYMBOL(rxkad_kernel_respond_to_challenge); 823 987 824 988 /* 825 989 * decrypt the kerberos IV ticket in the response ··· 1346 1276 .verify_packet = rxkad_verify_packet, 1347 1277 .free_call_crypto = rxkad_free_call_crypto, 1348 1278 .issue_challenge = rxkad_issue_challenge, 1279 + .validate_challenge = rxkad_validate_challenge, 1280 + .sendmsg_respond_to_challenge = rxkad_sendmsg_respond_to_challenge, 1349 1281 .respond_to_challenge = rxkad_respond_to_challenge, 1350 1282 .verify_response = rxkad_verify_response, 1351 1283 .clear = rxkad_clear,
+11 -4
net/rxrpc/sendmsg.c
··· 758 758 if (rxrpc_call_is_complete(call)) { 759 759 /* it's too late for this call */ 760 760 ret = -ESHUTDOWN; 761 - } else if (p.command == RXRPC_CMD_SEND_ABORT) { 761 + goto out_put_unlock; 762 + } 763 + 764 + switch (p.command) { 765 + case RXRPC_CMD_SEND_ABORT: 762 766 rxrpc_propose_abort(call, p.abort_code, -ECONNABORTED, 763 767 rxrpc_abort_call_sendmsg); 764 768 ret = 0; 765 - } else if (p.command != RXRPC_CMD_SEND_DATA) { 766 - ret = -EINVAL; 767 - } else { 769 + break; 770 + case RXRPC_CMD_SEND_DATA: 768 771 ret = rxrpc_send_data(rx, call, msg, len, NULL, &dropped_lock); 772 + break; 773 + default: 774 + ret = -EINVAL; 775 + break; 769 776 } 770 777 771 778 out_put_unlock:
+40
net/rxrpc/server_key.c
··· 171 171 return ret; 172 172 } 173 173 EXPORT_SYMBOL(rxrpc_sock_set_security_keyring); 174 + 175 + /** 176 + * rxrpc_sock_set_manage_response - Set the manage-response flag for a kernel service 177 + * @sk: The socket to set the keyring on 178 + * @set: True to set, false to clear the flag 179 + * 180 + * Set the flag on an rxrpc socket to say that the caller wants to manage the 181 + * RESPONSE packet and the user-defined data it may contain. Setting this 182 + * means that recvmsg() will return messages with RXRPC_CHALLENGED in the 183 + * control message buffer containing information about the challenge. 184 + * 185 + * The user should respond to the challenge by passing RXRPC_RESPOND or 186 + * RXRPC_RESPOND_ABORT control messages with sendmsg() to the same call. 187 + * Supplementary control messages, such as RXRPC_RESP_RXGK_APPDATA, may be 188 + * included to indicate the parts the user wants to supply. 189 + * 190 + * The server will be passed the response data with a RXRPC_RESPONDED control 191 + * message when it gets the first data from each call. 192 + * 193 + * Note that this is only honoured by security classes that need auxiliary data 194 + * (e.g. RxGK). Those that don't offer the facility (e.g. RxKAD) respond 195 + * without consulting userspace. 196 + * 197 + * Return: The previous setting. 198 + */ 199 + int rxrpc_sock_set_manage_response(struct sock *sk, bool set) 200 + { 201 + struct rxrpc_sock *rx = rxrpc_sk(sk); 202 + int ret; 203 + 204 + lock_sock(sk); 205 + ret = !!test_bit(RXRPC_SOCK_MANAGE_RESPONSE, &rx->flags); 206 + if (set) 207 + set_bit(RXRPC_SOCK_MANAGE_RESPONSE, &rx->flags); 208 + else 209 + clear_bit(RXRPC_SOCK_MANAGE_RESPONSE, &rx->flags); 210 + release_sock(sk); 211 + return ret; 212 + } 213 + EXPORT_SYMBOL(rxrpc_sock_set_manage_response);