Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'rxrpc-fixes-20201005' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs

David Howells says:

====================
rxrpc: Miscellaneous fixes

Here are some miscellaneous rxrpc fixes:

(1) Fix the xdr encoding of the contents read from an rxrpc key.

(2) Fix a BUG() for a unsupported encoding type.

(3) Fix missing _bh lock annotations.

(4) Fix acceptance handling for an incoming call where the incoming call
is encrypted.

(5) The server token keyring isn't network namespaced - it belongs to the
server, so there's no need. Namespacing it means that request_key()
fails to find it.

(6) Fix a leak of the server keyring.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+66 -290
+1 -1
include/uapi/linux/rxrpc.h
··· 51 51 RXRPC_BUSY = 6, /* -r: server busy received [terminal] */ 52 52 RXRPC_LOCAL_ERROR = 7, /* -r: local error generated [terminal] */ 53 53 RXRPC_NEW_CALL = 8, /* -r: [Service] new incoming call notification */ 54 - RXRPC_ACCEPT = 9, /* s-: [Service] accept request */ 55 54 RXRPC_EXCLUSIVE_CALL = 10, /* s-: Call should be on exclusive connection */ 56 55 RXRPC_UPGRADE_SERVICE = 11, /* s-: Request service upgrade for client call */ 57 56 RXRPC_TX_LENGTH = 12, /* s-: Total length of Tx data */ 58 57 RXRPC_SET_CALL_TIMEOUT = 13, /* s-: Set one or more call timeouts */ 58 + RXRPC_CHARGE_ACCEPT = 14, /* s-: Charge the accept pool with a user call ID */ 59 59 RXRPC__SUPPORTED 60 60 }; 61 61
+2 -5
net/rxrpc/ar-internal.h
··· 518 518 RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */ 519 519 RXRPC_CALL_SERVER_PREALLOC, /* - service preallocation */ 520 520 RXRPC_CALL_SERVER_SECURING, /* - server securing request connection */ 521 - RXRPC_CALL_SERVER_ACCEPTING, /* - server accepting request */ 522 521 RXRPC_CALL_SERVER_RECV_REQUEST, /* - server receiving request */ 523 522 RXRPC_CALL_SERVER_ACK_REQUEST, /* - server pending ACK of request */ 524 523 RXRPC_CALL_SERVER_SEND_REPLY, /* - server sending reply */ ··· 713 714 enum rxrpc_command { 714 715 RXRPC_CMD_SEND_DATA, /* send data message */ 715 716 RXRPC_CMD_SEND_ABORT, /* request abort generation */ 716 - RXRPC_CMD_ACCEPT, /* [server] accept incoming call */ 717 717 RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */ 718 + RXRPC_CMD_CHARGE_ACCEPT, /* [server] charge accept preallocation */ 718 719 }; 719 720 720 721 struct rxrpc_call_params { ··· 754 755 struct rxrpc_sock *, 755 756 struct sk_buff *); 756 757 void rxrpc_accept_incoming_calls(struct rxrpc_local *); 757 - struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long, 758 - rxrpc_notify_rx_t); 759 - int rxrpc_reject_call(struct rxrpc_sock *); 758 + int rxrpc_user_charge_accept(struct rxrpc_sock *, unsigned long); 760 759 761 760 /* 762 761 * call_event.c
+38 -225
net/rxrpc/call_accept.c
··· 39 39 unsigned int debug_id) 40 40 { 41 41 const void *here = __builtin_return_address(0); 42 - struct rxrpc_call *call; 42 + struct rxrpc_call *call, *xcall; 43 43 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); 44 + struct rb_node *parent, **pp; 44 45 int max, tmp; 45 46 unsigned int size = RXRPC_BACKLOG_MAX; 46 47 unsigned int head, tail, call_head, call_tail; ··· 95 94 } 96 95 97 96 /* Now it gets complicated, because calls get registered with the 98 - * socket here, particularly if a user ID is preassigned by the user. 97 + * socket here, with a user ID preassigned by the user. 99 98 */ 100 99 call = rxrpc_alloc_call(rx, gfp, debug_id); 101 100 if (!call) ··· 108 107 here, (const void *)user_call_ID); 109 108 110 109 write_lock(&rx->call_lock); 110 + 111 + /* Check the user ID isn't already in use */ 112 + pp = &rx->calls.rb_node; 113 + parent = NULL; 114 + while (*pp) { 115 + parent = *pp; 116 + xcall = rb_entry(parent, struct rxrpc_call, sock_node); 117 + if (user_call_ID < xcall->user_call_ID) 118 + pp = &(*pp)->rb_left; 119 + else if (user_call_ID > xcall->user_call_ID) 120 + pp = &(*pp)->rb_right; 121 + else 122 + goto id_in_use; 123 + } 124 + 125 + call->user_call_ID = user_call_ID; 126 + call->notify_rx = notify_rx; 111 127 if (user_attach_call) { 112 - struct rxrpc_call *xcall; 113 - struct rb_node *parent, **pp; 114 - 115 - /* Check the user ID isn't already in use */ 116 - pp = &rx->calls.rb_node; 117 - parent = NULL; 118 - while (*pp) { 119 - parent = *pp; 120 - xcall = rb_entry(parent, struct rxrpc_call, sock_node); 121 - if (user_call_ID < xcall->user_call_ID) 122 - pp = &(*pp)->rb_left; 123 - else if (user_call_ID > xcall->user_call_ID) 124 - pp = &(*pp)->rb_right; 125 - else 126 - goto id_in_use; 127 - } 128 - 129 - call->user_call_ID = user_call_ID; 130 - call->notify_rx = notify_rx; 131 128 rxrpc_get_call(call, rxrpc_call_got_kernel); 132 129 user_attach_call(call, user_call_ID); 133 - rxrpc_get_call(call, rxrpc_call_got_userid); 134 - rb_link_node(&call->sock_node, parent, pp); 135 - rb_insert_color(&call->sock_node, &rx->calls); 136 - set_bit(RXRPC_CALL_HAS_USERID, &call->flags); 137 130 } 131 + 132 + rxrpc_get_call(call, rxrpc_call_got_userid); 133 + rb_link_node(&call->sock_node, parent, pp); 134 + rb_insert_color(&call->sock_node, &rx->calls); 135 + set_bit(RXRPC_CALL_HAS_USERID, &call->flags); 138 136 139 137 list_add(&call->sock_link, &rx->sock_calls); 140 138 ··· 157 157 } 158 158 159 159 /* 160 - * Preallocate sufficient service connections, calls and peers to cover the 161 - * entire backlog of a socket. When a new call comes in, if we don't have 162 - * sufficient of each available, the call gets rejected as busy or ignored. 163 - * 164 - * The backlog is replenished when a connection is accepted or rejected. 160 + * Allocate the preallocation buffers for incoming service calls. These must 161 + * be charged manually. 165 162 */ 166 163 int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp) 167 164 { ··· 170 173 return -ENOMEM; 171 174 rx->backlog = b; 172 175 } 173 - 174 - if (rx->discard_new_call) 175 - return 0; 176 - 177 - while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp, 178 - atomic_inc_return(&rxrpc_debug_id)) == 0) 179 - ; 180 176 181 177 return 0; 182 178 } ··· 323 333 rxrpc_see_call(call); 324 334 call->conn = conn; 325 335 call->security = conn->security; 336 + call->security_ix = conn->security_ix; 326 337 call->peer = rxrpc_get_peer(conn->params.peer); 327 338 call->cong_cwnd = call->peer->cong_cwnd; 328 339 return call; ··· 393 402 394 403 if (rx->notify_new_call) 395 404 rx->notify_new_call(&rx->sk, call, call->user_call_ID); 396 - else 397 - sk_acceptq_added(&rx->sk); 398 405 399 406 spin_lock(&conn->state_lock); 400 407 switch (conn->state) { ··· 404 415 405 416 case RXRPC_CONN_SERVICE: 406 417 write_lock(&call->state_lock); 407 - if (call->state < RXRPC_CALL_COMPLETE) { 408 - if (rx->discard_new_call) 409 - call->state = RXRPC_CALL_SERVER_RECV_REQUEST; 410 - else 411 - call->state = RXRPC_CALL_SERVER_ACCEPTING; 412 - } 418 + if (call->state < RXRPC_CALL_COMPLETE) 419 + call->state = RXRPC_CALL_SERVER_RECV_REQUEST; 413 420 write_unlock(&call->state_lock); 414 421 break; 415 422 ··· 425 440 426 441 rxrpc_send_ping(call, skb); 427 442 428 - if (call->state == RXRPC_CALL_SERVER_ACCEPTING) 429 - rxrpc_notify_socket(call); 430 - 431 443 /* We have to discard the prealloc queue's ref here and rely on a 432 444 * combination of the RCU read lock and refs held either by the socket 433 445 * (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel ··· 442 460 } 443 461 444 462 /* 445 - * handle acceptance of a call by userspace 446 - * - assign the user call ID to the call at the front of the queue 447 - * - called with the socket locked. 463 + * Charge up socket with preallocated calls, attaching user call IDs. 448 464 */ 449 - struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx, 450 - unsigned long user_call_ID, 451 - rxrpc_notify_rx_t notify_rx) 452 - __releases(&rx->sk.sk_lock.slock) 453 - __acquires(call->user_mutex) 465 + int rxrpc_user_charge_accept(struct rxrpc_sock *rx, unsigned long user_call_ID) 454 466 { 455 - struct rxrpc_call *call; 456 - struct rb_node *parent, **pp; 457 - int ret; 467 + struct rxrpc_backlog *b = rx->backlog; 458 468 459 - _enter(",%lx", user_call_ID); 469 + if (rx->sk.sk_state == RXRPC_CLOSE) 470 + return -ESHUTDOWN; 460 471 461 - ASSERT(!irqs_disabled()); 462 - 463 - write_lock(&rx->call_lock); 464 - 465 - if (list_empty(&rx->to_be_accepted)) { 466 - write_unlock(&rx->call_lock); 467 - release_sock(&rx->sk); 468 - kleave(" = -ENODATA [empty]"); 469 - return ERR_PTR(-ENODATA); 470 - } 471 - 472 - /* check the user ID isn't already in use */ 473 - pp = &rx->calls.rb_node; 474 - parent = NULL; 475 - while (*pp) { 476 - parent = *pp; 477 - call = rb_entry(parent, struct rxrpc_call, sock_node); 478 - 479 - if (user_call_ID < call->user_call_ID) 480 - pp = &(*pp)->rb_left; 481 - else if (user_call_ID > call->user_call_ID) 482 - pp = &(*pp)->rb_right; 483 - else 484 - goto id_in_use; 485 - } 486 - 487 - /* Dequeue the first call and check it's still valid. We gain 488 - * responsibility for the queue's reference. 489 - */ 490 - call = list_entry(rx->to_be_accepted.next, 491 - struct rxrpc_call, accept_link); 492 - write_unlock(&rx->call_lock); 493 - 494 - /* We need to gain the mutex from the interrupt handler without 495 - * upsetting lockdep, so we have to release it there and take it here. 496 - * We are, however, still holding the socket lock, so other accepts 497 - * must wait for us and no one can add the user ID behind our backs. 498 - */ 499 - if (mutex_lock_interruptible(&call->user_mutex) < 0) { 500 - release_sock(&rx->sk); 501 - kleave(" = -ERESTARTSYS"); 502 - return ERR_PTR(-ERESTARTSYS); 503 - } 504 - 505 - write_lock(&rx->call_lock); 506 - list_del_init(&call->accept_link); 507 - sk_acceptq_removed(&rx->sk); 508 - rxrpc_see_call(call); 509 - 510 - /* Find the user ID insertion point. */ 511 - pp = &rx->calls.rb_node; 512 - parent = NULL; 513 - while (*pp) { 514 - parent = *pp; 515 - call = rb_entry(parent, struct rxrpc_call, sock_node); 516 - 517 - if (user_call_ID < call->user_call_ID) 518 - pp = &(*pp)->rb_left; 519 - else if (user_call_ID > call->user_call_ID) 520 - pp = &(*pp)->rb_right; 521 - else 522 - BUG(); 523 - } 524 - 525 - write_lock_bh(&call->state_lock); 526 - switch (call->state) { 527 - case RXRPC_CALL_SERVER_ACCEPTING: 528 - call->state = RXRPC_CALL_SERVER_RECV_REQUEST; 529 - break; 530 - case RXRPC_CALL_COMPLETE: 531 - ret = call->error; 532 - goto out_release; 533 - default: 534 - BUG(); 535 - } 536 - 537 - /* formalise the acceptance */ 538 - call->notify_rx = notify_rx; 539 - call->user_call_ID = user_call_ID; 540 - rxrpc_get_call(call, rxrpc_call_got_userid); 541 - rb_link_node(&call->sock_node, parent, pp); 542 - rb_insert_color(&call->sock_node, &rx->calls); 543 - if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags)) 544 - BUG(); 545 - 546 - write_unlock_bh(&call->state_lock); 547 - write_unlock(&rx->call_lock); 548 - rxrpc_notify_socket(call); 549 - rxrpc_service_prealloc(rx, GFP_KERNEL); 550 - release_sock(&rx->sk); 551 - _leave(" = %p{%d}", call, call->debug_id); 552 - return call; 553 - 554 - out_release: 555 - _debug("release %p", call); 556 - write_unlock_bh(&call->state_lock); 557 - write_unlock(&rx->call_lock); 558 - rxrpc_release_call(rx, call); 559 - rxrpc_put_call(call, rxrpc_call_put); 560 - goto out; 561 - 562 - id_in_use: 563 - ret = -EBADSLT; 564 - write_unlock(&rx->call_lock); 565 - out: 566 - rxrpc_service_prealloc(rx, GFP_KERNEL); 567 - release_sock(&rx->sk); 568 - _leave(" = %d", ret); 569 - return ERR_PTR(ret); 570 - } 571 - 572 - /* 573 - * Handle rejection of a call by userspace 574 - * - reject the call at the front of the queue 575 - */ 576 - int rxrpc_reject_call(struct rxrpc_sock *rx) 577 - { 578 - struct rxrpc_call *call; 579 - bool abort = false; 580 - int ret; 581 - 582 - _enter(""); 583 - 584 - ASSERT(!irqs_disabled()); 585 - 586 - write_lock(&rx->call_lock); 587 - 588 - if (list_empty(&rx->to_be_accepted)) { 589 - write_unlock(&rx->call_lock); 590 - return -ENODATA; 591 - } 592 - 593 - /* Dequeue the first call and check it's still valid. We gain 594 - * responsibility for the queue's reference. 595 - */ 596 - call = list_entry(rx->to_be_accepted.next, 597 - struct rxrpc_call, accept_link); 598 - list_del_init(&call->accept_link); 599 - sk_acceptq_removed(&rx->sk); 600 - rxrpc_see_call(call); 601 - 602 - write_lock_bh(&call->state_lock); 603 - switch (call->state) { 604 - case RXRPC_CALL_SERVER_ACCEPTING: 605 - __rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, -ECONNABORTED); 606 - abort = true; 607 - fallthrough; 608 - case RXRPC_CALL_COMPLETE: 609 - ret = call->error; 610 - goto out_discard; 611 - default: 612 - BUG(); 613 - } 614 - 615 - out_discard: 616 - write_unlock_bh(&call->state_lock); 617 - write_unlock(&rx->call_lock); 618 - if (abort) { 619 - rxrpc_send_abort_packet(call); 620 - rxrpc_release_call(rx, call); 621 - rxrpc_put_call(call, rxrpc_call_put); 622 - } 623 - rxrpc_service_prealloc(rx, GFP_KERNEL); 624 - _leave(" = %d", ret); 625 - return ret; 472 + return rxrpc_service_prealloc_one(rx, b, NULL, NULL, user_call_ID, 473 + GFP_KERNEL, 474 + atomic_inc_return(&rxrpc_debug_id)); 626 475 } 627 476 628 477 /*
+1 -4
net/rxrpc/call_object.c
··· 23 23 [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl", 24 24 [RXRPC_CALL_SERVER_PREALLOC] = "SvPrealc", 25 25 [RXRPC_CALL_SERVER_SECURING] = "SvSecure", 26 - [RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept", 27 26 [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq", 28 27 [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq", 29 28 [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl", ··· 351 352 call->call_id = sp->hdr.callNumber; 352 353 call->service_id = sp->hdr.serviceId; 353 354 call->cid = sp->hdr.cid; 354 - call->state = RXRPC_CALL_SERVER_ACCEPTING; 355 - if (sp->hdr.securityIndex > 0) 356 - call->state = RXRPC_CALL_SERVER_SECURING; 355 + call->state = RXRPC_CALL_SERVER_SECURING; 357 356 call->cong_tstamp = skb->tstamp; 358 357 359 358 /* Set the channel for this call. We don't get channel_lock as we're
+4 -4
net/rxrpc/conn_event.c
··· 269 269 if (call) { 270 270 write_lock_bh(&call->state_lock); 271 271 if (call->state == RXRPC_CALL_SERVER_SECURING) { 272 - call->state = RXRPC_CALL_SERVER_ACCEPTING; 272 + call->state = RXRPC_CALL_SERVER_RECV_REQUEST; 273 273 rxrpc_notify_socket(call); 274 274 } 275 275 write_unlock_bh(&call->state_lock); ··· 340 340 return ret; 341 341 342 342 spin_lock(&conn->channel_lock); 343 - spin_lock(&conn->state_lock); 343 + spin_lock_bh(&conn->state_lock); 344 344 345 345 if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING) { 346 346 conn->state = RXRPC_CONN_SERVICE; 347 - spin_unlock(&conn->state_lock); 347 + spin_unlock_bh(&conn->state_lock); 348 348 for (loop = 0; loop < RXRPC_MAXCALLS; loop++) 349 349 rxrpc_call_is_secure( 350 350 rcu_dereference_protected( 351 351 conn->channels[loop].call, 352 352 lockdep_is_held(&conn->channel_lock))); 353 353 } else { 354 - spin_unlock(&conn->state_lock); 354 + spin_unlock_bh(&conn->state_lock); 355 355 } 356 356 357 357 spin_unlock(&conn->channel_lock);
+14 -6
net/rxrpc/key.c
··· 903 903 904 904 _enter(""); 905 905 906 - if (optlen <= 0 || optlen > PAGE_SIZE - 1) 906 + if (optlen <= 0 || optlen > PAGE_SIZE - 1 || rx->securities) 907 907 return -EINVAL; 908 908 909 909 description = memdup_sockptr_nul(optval, optlen); ··· 940 940 if (IS_ERR(description)) 941 941 return PTR_ERR(description); 942 942 943 - key = request_key_net(&key_type_keyring, description, sock_net(&rx->sk), NULL); 943 + key = request_key(&key_type_keyring, description, NULL); 944 944 if (IS_ERR(key)) { 945 945 kfree(description); 946 946 _leave(" = %ld", PTR_ERR(key)); ··· 1072 1072 1073 1073 switch (token->security_index) { 1074 1074 case RXRPC_SECURITY_RXKAD: 1075 - toksize += 9 * 4; /* viceid, kvno, key*2 + len, begin, 1075 + toksize += 8 * 4; /* viceid, kvno, key*2, begin, 1076 1076 * end, primary, tktlen */ 1077 1077 toksize += RND(token->kad->ticket_len); 1078 1078 break; ··· 1107 1107 break; 1108 1108 1109 1109 default: /* we have a ticket we can't encode */ 1110 - BUG(); 1110 + pr_err("Unsupported key token type (%u)\n", 1111 + token->security_index); 1111 1112 continue; 1112 1113 } 1113 1114 ··· 1134 1133 do { \ 1135 1134 u32 _l = (l); \ 1136 1135 ENCODE(l); \ 1136 + memcpy(xdr, (s), _l); \ 1137 + if (_l & 3) \ 1138 + memcpy((u8 *)xdr + _l, &zero, 4 - (_l & 3)); \ 1139 + xdr += (_l + 3) >> 2; \ 1140 + } while(0) 1141 + #define ENCODE_BYTES(l, s) \ 1142 + do { \ 1143 + u32 _l = (l); \ 1137 1144 memcpy(xdr, (s), _l); \ 1138 1145 if (_l & 3) \ 1139 1146 memcpy((u8 *)xdr + _l, &zero, 4 - (_l & 3)); \ ··· 1174 1165 case RXRPC_SECURITY_RXKAD: 1175 1166 ENCODE(token->kad->vice_id); 1176 1167 ENCODE(token->kad->kvno); 1177 - ENCODE_DATA(8, token->kad->session_key); 1168 + ENCODE_BYTES(8, token->kad->session_key); 1178 1169 ENCODE(token->kad->start); 1179 1170 ENCODE(token->kad->expiry); 1180 1171 ENCODE(token->kad->primary_flag); ··· 1224 1215 break; 1225 1216 1226 1217 default: 1227 - BUG(); 1228 1218 break; 1229 1219 } 1230 1220
+1 -35
net/rxrpc/recvmsg.c
··· 179 179 } 180 180 181 181 /* 182 - * Pass back notification of a new call. The call is added to the 183 - * to-be-accepted list. This means that the next call to be accepted might not 184 - * be the last call seen awaiting acceptance, but unless we leave this on the 185 - * front of the queue and block all other messages until someone gives us a 186 - * user_ID for it, there's not a lot we can do. 187 - */ 188 - static int rxrpc_recvmsg_new_call(struct rxrpc_sock *rx, 189 - struct rxrpc_call *call, 190 - struct msghdr *msg, int flags) 191 - { 192 - int tmp = 0, ret; 193 - 194 - ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NEW_CALL, 0, &tmp); 195 - 196 - if (ret == 0 && !(flags & MSG_PEEK)) { 197 - _debug("to be accepted"); 198 - write_lock_bh(&rx->recvmsg_lock); 199 - list_del_init(&call->recvmsg_link); 200 - write_unlock_bh(&rx->recvmsg_lock); 201 - 202 - rxrpc_get_call(call, rxrpc_call_got); 203 - write_lock(&rx->call_lock); 204 - list_add_tail(&call->accept_link, &rx->to_be_accepted); 205 - write_unlock(&rx->call_lock); 206 - } 207 - 208 - trace_rxrpc_recvmsg(call, rxrpc_recvmsg_to_be_accepted, 1, 0, 0, ret); 209 - return ret; 210 - } 211 - 212 - /* 213 182 * End the packet reception phase. 214 183 */ 215 184 static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial) ··· 599 630 } 600 631 601 632 switch (READ_ONCE(call->state)) { 602 - case RXRPC_CALL_SERVER_ACCEPTING: 603 - ret = rxrpc_recvmsg_new_call(rx, call, msg, flags); 604 - break; 605 633 case RXRPC_CALL_CLIENT_RECV_REPLY: 606 634 case RXRPC_CALL_SERVER_RECV_REQUEST: 607 635 case RXRPC_CALL_SERVER_ACK_REQUEST: ··· 694 728 call->debug_id, rxrpc_call_states[call->state], 695 729 iov_iter_count(iter), want_more); 696 730 697 - ASSERTCMP(call->state, !=, RXRPC_CALL_SERVER_ACCEPTING); 731 + ASSERTCMP(call->state, !=, RXRPC_CALL_SERVER_SECURING); 698 732 699 733 mutex_lock(&call->user_mutex); 700 734
+5 -10
net/rxrpc/sendmsg.c
··· 530 530 return -EINVAL; 531 531 break; 532 532 533 - case RXRPC_ACCEPT: 533 + case RXRPC_CHARGE_ACCEPT: 534 534 if (p->command != RXRPC_CMD_SEND_DATA) 535 535 return -EINVAL; 536 - p->command = RXRPC_CMD_ACCEPT; 536 + p->command = RXRPC_CMD_CHARGE_ACCEPT; 537 537 if (len != 0) 538 538 return -EINVAL; 539 539 break; ··· 659 659 if (ret < 0) 660 660 goto error_release_sock; 661 661 662 - if (p.command == RXRPC_CMD_ACCEPT) { 662 + if (p.command == RXRPC_CMD_CHARGE_ACCEPT) { 663 663 ret = -EINVAL; 664 664 if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) 665 665 goto error_release_sock; 666 - call = rxrpc_accept_call(rx, p.call.user_call_ID, NULL); 667 - /* The socket is now unlocked. */ 668 - if (IS_ERR(call)) 669 - return PTR_ERR(call); 670 - ret = 0; 671 - goto out_put_unlock; 666 + ret = rxrpc_user_charge_accept(rx, p.call.user_call_ID); 667 + goto error_release_sock; 672 668 } 673 669 674 670 call = rxrpc_find_call_by_user_ID(rx, p.call.user_call_ID); ··· 686 690 case RXRPC_CALL_CLIENT_AWAIT_CONN: 687 691 case RXRPC_CALL_SERVER_PREALLOC: 688 692 case RXRPC_CALL_SERVER_SECURING: 689 - case RXRPC_CALL_SERVER_ACCEPTING: 690 693 rxrpc_put_call(call, rxrpc_call_put); 691 694 ret = -EBUSY; 692 695 goto error_release_sock;