Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.6-rc5 1353 lines 35 kB view raw
1/* Management of Tx window, Tx resend, ACKs and out-of-sequence reception 2 * 3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12#include <linux/module.h> 13#include <linux/circ_buf.h> 14#include <linux/net.h> 15#include <linux/skbuff.h> 16#include <linux/slab.h> 17#include <linux/udp.h> 18#include <net/sock.h> 19#include <net/af_rxrpc.h> 20#include "ar-internal.h" 21 22/* 23 * How long to wait before scheduling ACK generation after seeing a 24 * packet with RXRPC_REQUEST_ACK set (in jiffies). 25 */ 26unsigned int rxrpc_requested_ack_delay = 1; 27 28/* 29 * How long to wait before scheduling an ACK with subtype DELAY (in jiffies). 30 * 31 * We use this when we've received new data packets. If those packets aren't 32 * all consumed within this time we will send a DELAY ACK if an ACK was not 33 * requested to let the sender know it doesn't need to resend. 34 */ 35unsigned int rxrpc_soft_ack_delay = 1 * HZ; 36 37/* 38 * How long to wait before scheduling an ACK with subtype IDLE (in jiffies). 39 * 40 * We use this when we've consumed some previously soft-ACK'd packets when 41 * further packets aren't immediately received to decide when to send an IDLE 42 * ACK let the other end know that it can free up its Tx buffer space. 43 */ 44unsigned int rxrpc_idle_ack_delay = 0.5 * HZ; 45 46/* 47 * Receive window size in packets. This indicates the maximum number of 48 * unconsumed received packets we're willing to retain in memory. Once this 49 * limit is hit, we should generate an EXCEEDS_WINDOW ACK and discard further 50 * packets. 51 */ 52unsigned int rxrpc_rx_window_size = 32; 53 54/* 55 * Maximum Rx MTU size. This indicates to the sender the size of jumbo packet 56 * made by gluing normal packets together that we're willing to handle. 57 */ 58unsigned int rxrpc_rx_mtu = 5692; 59 60/* 61 * The maximum number of fragments in a received jumbo packet that we tell the 62 * sender that we're willing to handle. 63 */ 64unsigned int rxrpc_rx_jumbo_max = 4; 65 66static const char *rxrpc_acks(u8 reason) 67{ 68 static const char *const str[] = { 69 "---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY", 70 "IDL", "-?-" 71 }; 72 73 if (reason >= ARRAY_SIZE(str)) 74 reason = ARRAY_SIZE(str) - 1; 75 return str[reason]; 76} 77 78static const s8 rxrpc_ack_priority[] = { 79 [0] = 0, 80 [RXRPC_ACK_DELAY] = 1, 81 [RXRPC_ACK_REQUESTED] = 2, 82 [RXRPC_ACK_IDLE] = 3, 83 [RXRPC_ACK_PING_RESPONSE] = 4, 84 [RXRPC_ACK_DUPLICATE] = 5, 85 [RXRPC_ACK_OUT_OF_SEQUENCE] = 6, 86 [RXRPC_ACK_EXCEEDS_WINDOW] = 7, 87 [RXRPC_ACK_NOSPACE] = 8, 88}; 89 90/* 91 * propose an ACK be sent 92 */ 93void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason, 94 u32 serial, bool immediate) 95{ 96 unsigned long expiry; 97 s8 prior = rxrpc_ack_priority[ack_reason]; 98 99 ASSERTCMP(prior, >, 0); 100 101 _enter("{%d},%s,%%%x,%u", 102 call->debug_id, rxrpc_acks(ack_reason), serial, immediate); 103 104 if (prior < rxrpc_ack_priority[call->ackr_reason]) { 105 if (immediate) 106 goto cancel_timer; 107 return; 108 } 109 110 /* update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial 111 * numbers */ 112 if (prior == rxrpc_ack_priority[call->ackr_reason]) { 113 if (prior <= 4) 114 call->ackr_serial = serial; 115 if (immediate) 116 goto cancel_timer; 117 return; 118 } 119 120 call->ackr_reason = ack_reason; 121 call->ackr_serial = serial; 122 123 switch (ack_reason) { 124 case RXRPC_ACK_DELAY: 125 _debug("run delay timer"); 126 expiry = rxrpc_soft_ack_delay; 127 goto run_timer; 128 129 case RXRPC_ACK_IDLE: 130 if (!immediate) { 131 _debug("run defer timer"); 132 expiry = rxrpc_idle_ack_delay; 133 goto run_timer; 134 } 135 goto cancel_timer; 136 137 case RXRPC_ACK_REQUESTED: 138 expiry = rxrpc_requested_ack_delay; 139 if (!expiry) 140 goto cancel_timer; 141 if (!immediate || serial == 1) { 142 _debug("run defer timer"); 143 goto run_timer; 144 } 145 146 default: 147 _debug("immediate ACK"); 148 goto cancel_timer; 149 } 150 151run_timer: 152 expiry += jiffies; 153 if (!timer_pending(&call->ack_timer) || 154 time_after(call->ack_timer.expires, expiry)) 155 mod_timer(&call->ack_timer, expiry); 156 return; 157 158cancel_timer: 159 _debug("cancel timer %%%u", serial); 160 try_to_del_timer_sync(&call->ack_timer); 161 read_lock_bh(&call->state_lock); 162 if (call->state <= RXRPC_CALL_COMPLETE && 163 !test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events)) 164 rxrpc_queue_call(call); 165 read_unlock_bh(&call->state_lock); 166} 167 168/* 169 * propose an ACK be sent, locking the call structure 170 */ 171void rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason, 172 u32 serial, bool immediate) 173{ 174 s8 prior = rxrpc_ack_priority[ack_reason]; 175 176 if (prior > rxrpc_ack_priority[call->ackr_reason]) { 177 spin_lock_bh(&call->lock); 178 __rxrpc_propose_ACK(call, ack_reason, serial, immediate); 179 spin_unlock_bh(&call->lock); 180 } 181} 182 183/* 184 * set the resend timer 185 */ 186static void rxrpc_set_resend(struct rxrpc_call *call, u8 resend, 187 unsigned long resend_at) 188{ 189 read_lock_bh(&call->state_lock); 190 if (call->state >= RXRPC_CALL_COMPLETE) 191 resend = 0; 192 193 if (resend & 1) { 194 _debug("SET RESEND"); 195 set_bit(RXRPC_CALL_EV_RESEND, &call->events); 196 } 197 198 if (resend & 2) { 199 _debug("MODIFY RESEND TIMER"); 200 set_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); 201 mod_timer(&call->resend_timer, resend_at); 202 } else { 203 _debug("KILL RESEND TIMER"); 204 del_timer_sync(&call->resend_timer); 205 clear_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events); 206 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); 207 } 208 read_unlock_bh(&call->state_lock); 209} 210 211/* 212 * resend packets 213 */ 214static void rxrpc_resend(struct rxrpc_call *call) 215{ 216 struct rxrpc_wire_header *whdr; 217 struct rxrpc_skb_priv *sp; 218 struct sk_buff *txb; 219 unsigned long *p_txb, resend_at; 220 bool stop; 221 int loop; 222 u8 resend; 223 224 _enter("{%d,%d,%d,%d},", 225 call->acks_hard, call->acks_unacked, 226 atomic_read(&call->sequence), 227 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz)); 228 229 stop = false; 230 resend = 0; 231 resend_at = 0; 232 233 for (loop = call->acks_tail; 234 loop != call->acks_head || stop; 235 loop = (loop + 1) & (call->acks_winsz - 1) 236 ) { 237 p_txb = call->acks_window + loop; 238 smp_read_barrier_depends(); 239 if (*p_txb & 1) 240 continue; 241 242 txb = (struct sk_buff *) *p_txb; 243 sp = rxrpc_skb(txb); 244 245 if (sp->need_resend) { 246 sp->need_resend = false; 247 248 /* each Tx packet has a new serial number */ 249 sp->hdr.serial = atomic_inc_return(&call->conn->serial); 250 251 whdr = (struct rxrpc_wire_header *)txb->head; 252 whdr->serial = htonl(sp->hdr.serial); 253 254 _proto("Tx DATA %%%u { #%d }", 255 sp->hdr.serial, sp->hdr.seq); 256 if (rxrpc_send_packet(call->conn->trans, txb) < 0) { 257 stop = true; 258 sp->resend_at = jiffies + 3; 259 } else { 260 sp->resend_at = 261 jiffies + rxrpc_resend_timeout; 262 } 263 } 264 265 if (time_after_eq(jiffies + 1, sp->resend_at)) { 266 sp->need_resend = true; 267 resend |= 1; 268 } else if (resend & 2) { 269 if (time_before(sp->resend_at, resend_at)) 270 resend_at = sp->resend_at; 271 } else { 272 resend_at = sp->resend_at; 273 resend |= 2; 274 } 275 } 276 277 rxrpc_set_resend(call, resend, resend_at); 278 _leave(""); 279} 280 281/* 282 * handle resend timer expiry 283 */ 284static void rxrpc_resend_timer(struct rxrpc_call *call) 285{ 286 struct rxrpc_skb_priv *sp; 287 struct sk_buff *txb; 288 unsigned long *p_txb, resend_at; 289 int loop; 290 u8 resend; 291 292 _enter("%d,%d,%d", 293 call->acks_tail, call->acks_unacked, call->acks_head); 294 295 if (call->state >= RXRPC_CALL_COMPLETE) 296 return; 297 298 resend = 0; 299 resend_at = 0; 300 301 for (loop = call->acks_unacked; 302 loop != call->acks_head; 303 loop = (loop + 1) & (call->acks_winsz - 1) 304 ) { 305 p_txb = call->acks_window + loop; 306 smp_read_barrier_depends(); 307 txb = (struct sk_buff *) (*p_txb & ~1); 308 sp = rxrpc_skb(txb); 309 310 ASSERT(!(*p_txb & 1)); 311 312 if (sp->need_resend) { 313 ; 314 } else if (time_after_eq(jiffies + 1, sp->resend_at)) { 315 sp->need_resend = true; 316 resend |= 1; 317 } else if (resend & 2) { 318 if (time_before(sp->resend_at, resend_at)) 319 resend_at = sp->resend_at; 320 } else { 321 resend_at = sp->resend_at; 322 resend |= 2; 323 } 324 } 325 326 rxrpc_set_resend(call, resend, resend_at); 327 _leave(""); 328} 329 330/* 331 * process soft ACKs of our transmitted packets 332 * - these indicate packets the peer has or has not received, but hasn't yet 333 * given to the consumer, and so can still be discarded and re-requested 334 */ 335static int rxrpc_process_soft_ACKs(struct rxrpc_call *call, 336 struct rxrpc_ackpacket *ack, 337 struct sk_buff *skb) 338{ 339 struct rxrpc_skb_priv *sp; 340 struct sk_buff *txb; 341 unsigned long *p_txb, resend_at; 342 int loop; 343 u8 sacks[RXRPC_MAXACKS], resend; 344 345 _enter("{%d,%d},{%d},", 346 call->acks_hard, 347 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz), 348 ack->nAcks); 349 350 if (skb_copy_bits(skb, 0, sacks, ack->nAcks) < 0) 351 goto protocol_error; 352 353 resend = 0; 354 resend_at = 0; 355 for (loop = 0; loop < ack->nAcks; loop++) { 356 p_txb = call->acks_window; 357 p_txb += (call->acks_tail + loop) & (call->acks_winsz - 1); 358 smp_read_barrier_depends(); 359 txb = (struct sk_buff *) (*p_txb & ~1); 360 sp = rxrpc_skb(txb); 361 362 switch (sacks[loop]) { 363 case RXRPC_ACK_TYPE_ACK: 364 sp->need_resend = false; 365 *p_txb |= 1; 366 break; 367 case RXRPC_ACK_TYPE_NACK: 368 sp->need_resend = true; 369 *p_txb &= ~1; 370 resend = 1; 371 break; 372 default: 373 _debug("Unsupported ACK type %d", sacks[loop]); 374 goto protocol_error; 375 } 376 } 377 378 smp_mb(); 379 call->acks_unacked = (call->acks_tail + loop) & (call->acks_winsz - 1); 380 381 /* anything not explicitly ACK'd is implicitly NACK'd, but may just not 382 * have been received or processed yet by the far end */ 383 for (loop = call->acks_unacked; 384 loop != call->acks_head; 385 loop = (loop + 1) & (call->acks_winsz - 1) 386 ) { 387 p_txb = call->acks_window + loop; 388 smp_read_barrier_depends(); 389 txb = (struct sk_buff *) (*p_txb & ~1); 390 sp = rxrpc_skb(txb); 391 392 if (*p_txb & 1) { 393 /* packet must have been discarded */ 394 sp->need_resend = true; 395 *p_txb &= ~1; 396 resend |= 1; 397 } else if (sp->need_resend) { 398 ; 399 } else if (time_after_eq(jiffies + 1, sp->resend_at)) { 400 sp->need_resend = true; 401 resend |= 1; 402 } else if (resend & 2) { 403 if (time_before(sp->resend_at, resend_at)) 404 resend_at = sp->resend_at; 405 } else { 406 resend_at = sp->resend_at; 407 resend |= 2; 408 } 409 } 410 411 rxrpc_set_resend(call, resend, resend_at); 412 _leave(" = 0"); 413 return 0; 414 415protocol_error: 416 _leave(" = -EPROTO"); 417 return -EPROTO; 418} 419 420/* 421 * discard hard-ACK'd packets from the Tx window 422 */ 423static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard) 424{ 425 unsigned long _skb; 426 int tail = call->acks_tail, old_tail; 427 int win = CIRC_CNT(call->acks_head, tail, call->acks_winsz); 428 429 kenter("{%u,%u},%u", call->acks_hard, win, hard); 430 431 ASSERTCMP(hard - call->acks_hard, <=, win); 432 433 while (call->acks_hard < hard) { 434 smp_read_barrier_depends(); 435 _skb = call->acks_window[tail] & ~1; 436 rxrpc_free_skb((struct sk_buff *) _skb); 437 old_tail = tail; 438 tail = (tail + 1) & (call->acks_winsz - 1); 439 call->acks_tail = tail; 440 if (call->acks_unacked == old_tail) 441 call->acks_unacked = tail; 442 call->acks_hard++; 443 } 444 445 wake_up(&call->tx_waitq); 446} 447 448/* 449 * clear the Tx window in the event of a failure 450 */ 451static void rxrpc_clear_tx_window(struct rxrpc_call *call) 452{ 453 rxrpc_rotate_tx_window(call, atomic_read(&call->sequence)); 454} 455 456/* 457 * drain the out of sequence received packet queue into the packet Rx queue 458 */ 459static int rxrpc_drain_rx_oos_queue(struct rxrpc_call *call) 460{ 461 struct rxrpc_skb_priv *sp; 462 struct sk_buff *skb; 463 bool terminal; 464 int ret; 465 466 _enter("{%d,%d}", call->rx_data_post, call->rx_first_oos); 467 468 spin_lock_bh(&call->lock); 469 470 ret = -ECONNRESET; 471 if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) 472 goto socket_unavailable; 473 474 skb = skb_dequeue(&call->rx_oos_queue); 475 if (skb) { 476 sp = rxrpc_skb(skb); 477 478 _debug("drain OOS packet %d [%d]", 479 sp->hdr.seq, call->rx_first_oos); 480 481 if (sp->hdr.seq != call->rx_first_oos) { 482 skb_queue_head(&call->rx_oos_queue, skb); 483 call->rx_first_oos = rxrpc_skb(skb)->hdr.seq; 484 _debug("requeue %p {%u}", skb, call->rx_first_oos); 485 } else { 486 skb->mark = RXRPC_SKB_MARK_DATA; 487 terminal = ((sp->hdr.flags & RXRPC_LAST_PACKET) && 488 !(sp->hdr.flags & RXRPC_CLIENT_INITIATED)); 489 ret = rxrpc_queue_rcv_skb(call, skb, true, terminal); 490 BUG_ON(ret < 0); 491 _debug("drain #%u", call->rx_data_post); 492 call->rx_data_post++; 493 494 /* find out what the next packet is */ 495 skb = skb_peek(&call->rx_oos_queue); 496 if (skb) 497 call->rx_first_oos = rxrpc_skb(skb)->hdr.seq; 498 else 499 call->rx_first_oos = 0; 500 _debug("peek %p {%u}", skb, call->rx_first_oos); 501 } 502 } 503 504 ret = 0; 505socket_unavailable: 506 spin_unlock_bh(&call->lock); 507 _leave(" = %d", ret); 508 return ret; 509} 510 511/* 512 * insert an out of sequence packet into the buffer 513 */ 514static void rxrpc_insert_oos_packet(struct rxrpc_call *call, 515 struct sk_buff *skb) 516{ 517 struct rxrpc_skb_priv *sp, *psp; 518 struct sk_buff *p; 519 u32 seq; 520 521 sp = rxrpc_skb(skb); 522 seq = sp->hdr.seq; 523 _enter(",,{%u}", seq); 524 525 skb->destructor = rxrpc_packet_destructor; 526 ASSERTCMP(sp->call, ==, NULL); 527 sp->call = call; 528 rxrpc_get_call(call); 529 530 /* insert into the buffer in sequence order */ 531 spin_lock_bh(&call->lock); 532 533 skb_queue_walk(&call->rx_oos_queue, p) { 534 psp = rxrpc_skb(p); 535 if (psp->hdr.seq > seq) { 536 _debug("insert oos #%u before #%u", seq, psp->hdr.seq); 537 skb_insert(p, skb, &call->rx_oos_queue); 538 goto inserted; 539 } 540 } 541 542 _debug("append oos #%u", seq); 543 skb_queue_tail(&call->rx_oos_queue, skb); 544inserted: 545 546 /* we might now have a new front to the queue */ 547 if (call->rx_first_oos == 0 || seq < call->rx_first_oos) 548 call->rx_first_oos = seq; 549 550 read_lock(&call->state_lock); 551 if (call->state < RXRPC_CALL_COMPLETE && 552 call->rx_data_post == call->rx_first_oos) { 553 _debug("drain rx oos now"); 554 set_bit(RXRPC_CALL_EV_DRAIN_RX_OOS, &call->events); 555 } 556 read_unlock(&call->state_lock); 557 558 spin_unlock_bh(&call->lock); 559 _leave(" [stored #%u]", call->rx_first_oos); 560} 561 562/* 563 * clear the Tx window on final ACK reception 564 */ 565static void rxrpc_zap_tx_window(struct rxrpc_call *call) 566{ 567 struct rxrpc_skb_priv *sp; 568 struct sk_buff *skb; 569 unsigned long _skb, *acks_window; 570 u8 winsz = call->acks_winsz; 571 int tail; 572 573 acks_window = call->acks_window; 574 call->acks_window = NULL; 575 576 while (CIRC_CNT(call->acks_head, call->acks_tail, winsz) > 0) { 577 tail = call->acks_tail; 578 smp_read_barrier_depends(); 579 _skb = acks_window[tail] & ~1; 580 smp_mb(); 581 call->acks_tail = (call->acks_tail + 1) & (winsz - 1); 582 583 skb = (struct sk_buff *) _skb; 584 sp = rxrpc_skb(skb); 585 _debug("+++ clear Tx %u", sp->hdr.seq); 586 rxrpc_free_skb(skb); 587 } 588 589 kfree(acks_window); 590} 591 592/* 593 * process the extra information that may be appended to an ACK packet 594 */ 595static void rxrpc_extract_ackinfo(struct rxrpc_call *call, struct sk_buff *skb, 596 unsigned int latest, int nAcks) 597{ 598 struct rxrpc_ackinfo ackinfo; 599 struct rxrpc_peer *peer; 600 unsigned int mtu; 601 602 if (skb_copy_bits(skb, nAcks + 3, &ackinfo, sizeof(ackinfo)) < 0) { 603 _leave(" [no ackinfo]"); 604 return; 605 } 606 607 _proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }", 608 latest, 609 ntohl(ackinfo.rxMTU), ntohl(ackinfo.maxMTU), 610 ntohl(ackinfo.rwind), ntohl(ackinfo.jumbo_max)); 611 612 mtu = min(ntohl(ackinfo.rxMTU), ntohl(ackinfo.maxMTU)); 613 614 peer = call->conn->trans->peer; 615 if (mtu < peer->maxdata) { 616 spin_lock_bh(&peer->lock); 617 peer->maxdata = mtu; 618 peer->mtu = mtu + peer->hdrsize; 619 spin_unlock_bh(&peer->lock); 620 _net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata); 621 } 622} 623 624/* 625 * process packets in the reception queue 626 */ 627static int rxrpc_process_rx_queue(struct rxrpc_call *call, 628 u32 *_abort_code) 629{ 630 struct rxrpc_ackpacket ack; 631 struct rxrpc_skb_priv *sp; 632 struct sk_buff *skb; 633 bool post_ACK; 634 int latest; 635 u32 hard, tx; 636 637 _enter(""); 638 639process_further: 640 skb = skb_dequeue(&call->rx_queue); 641 if (!skb) 642 return -EAGAIN; 643 644 _net("deferred skb %p", skb); 645 646 sp = rxrpc_skb(skb); 647 648 _debug("process %s [st %d]", rxrpc_pkts[sp->hdr.type], call->state); 649 650 post_ACK = false; 651 652 switch (sp->hdr.type) { 653 /* data packets that wind up here have been received out of 654 * order, need security processing or are jumbo packets */ 655 case RXRPC_PACKET_TYPE_DATA: 656 _proto("OOSQ DATA %%%u { #%u }", sp->hdr.serial, sp->hdr.seq); 657 658 /* secured packets must be verified and possibly decrypted */ 659 if (rxrpc_verify_packet(call, skb, _abort_code) < 0) 660 goto protocol_error; 661 662 rxrpc_insert_oos_packet(call, skb); 663 goto process_further; 664 665 /* partial ACK to process */ 666 case RXRPC_PACKET_TYPE_ACK: 667 if (skb_copy_bits(skb, 0, &ack, sizeof(ack)) < 0) { 668 _debug("extraction failure"); 669 goto protocol_error; 670 } 671 if (!skb_pull(skb, sizeof(ack))) 672 BUG(); 673 674 latest = sp->hdr.serial; 675 hard = ntohl(ack.firstPacket); 676 tx = atomic_read(&call->sequence); 677 678 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }", 679 latest, 680 ntohs(ack.maxSkew), 681 hard, 682 ntohl(ack.previousPacket), 683 ntohl(ack.serial), 684 rxrpc_acks(ack.reason), 685 ack.nAcks); 686 687 rxrpc_extract_ackinfo(call, skb, latest, ack.nAcks); 688 689 if (ack.reason == RXRPC_ACK_PING) { 690 _proto("Rx ACK %%%u PING Request", latest); 691 rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE, 692 sp->hdr.serial, true); 693 } 694 695 /* discard any out-of-order or duplicate ACKs */ 696 if (latest - call->acks_latest <= 0) { 697 _debug("discard ACK %d <= %d", 698 latest, call->acks_latest); 699 goto discard; 700 } 701 call->acks_latest = latest; 702 703 if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST && 704 call->state != RXRPC_CALL_CLIENT_AWAIT_REPLY && 705 call->state != RXRPC_CALL_SERVER_SEND_REPLY && 706 call->state != RXRPC_CALL_SERVER_AWAIT_ACK) 707 goto discard; 708 709 _debug("Tx=%d H=%u S=%d", tx, call->acks_hard, call->state); 710 711 if (hard > 0) { 712 if (hard - 1 > tx) { 713 _debug("hard-ACK'd packet %d not transmitted" 714 " (%d top)", 715 hard - 1, tx); 716 goto protocol_error; 717 } 718 719 if ((call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY || 720 call->state == RXRPC_CALL_SERVER_AWAIT_ACK) && 721 hard > tx) { 722 call->acks_hard = tx; 723 goto all_acked; 724 } 725 726 smp_rmb(); 727 rxrpc_rotate_tx_window(call, hard - 1); 728 } 729 730 if (ack.nAcks > 0) { 731 if (hard - 1 + ack.nAcks > tx) { 732 _debug("soft-ACK'd packet %d+%d not" 733 " transmitted (%d top)", 734 hard - 1, ack.nAcks, tx); 735 goto protocol_error; 736 } 737 738 if (rxrpc_process_soft_ACKs(call, &ack, skb) < 0) 739 goto protocol_error; 740 } 741 goto discard; 742 743 /* complete ACK to process */ 744 case RXRPC_PACKET_TYPE_ACKALL: 745 goto all_acked; 746 747 /* abort and busy are handled elsewhere */ 748 case RXRPC_PACKET_TYPE_BUSY: 749 case RXRPC_PACKET_TYPE_ABORT: 750 BUG(); 751 752 /* connection level events - also handled elsewhere */ 753 case RXRPC_PACKET_TYPE_CHALLENGE: 754 case RXRPC_PACKET_TYPE_RESPONSE: 755 case RXRPC_PACKET_TYPE_DEBUG: 756 BUG(); 757 } 758 759 /* if we've had a hard ACK that covers all the packets we've sent, then 760 * that ends that phase of the operation */ 761all_acked: 762 write_lock_bh(&call->state_lock); 763 _debug("ack all %d", call->state); 764 765 switch (call->state) { 766 case RXRPC_CALL_CLIENT_AWAIT_REPLY: 767 call->state = RXRPC_CALL_CLIENT_RECV_REPLY; 768 break; 769 case RXRPC_CALL_SERVER_AWAIT_ACK: 770 _debug("srv complete"); 771 call->state = RXRPC_CALL_COMPLETE; 772 post_ACK = true; 773 break; 774 case RXRPC_CALL_CLIENT_SEND_REQUEST: 775 case RXRPC_CALL_SERVER_RECV_REQUEST: 776 goto protocol_error_unlock; /* can't occur yet */ 777 default: 778 write_unlock_bh(&call->state_lock); 779 goto discard; /* assume packet left over from earlier phase */ 780 } 781 782 write_unlock_bh(&call->state_lock); 783 784 /* if all the packets we sent are hard-ACK'd, then we can discard 785 * whatever we've got left */ 786 _debug("clear Tx %d", 787 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz)); 788 789 del_timer_sync(&call->resend_timer); 790 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); 791 clear_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events); 792 793 if (call->acks_window) 794 rxrpc_zap_tx_window(call); 795 796 if (post_ACK) { 797 /* post the final ACK message for userspace to pick up */ 798 _debug("post ACK"); 799 skb->mark = RXRPC_SKB_MARK_FINAL_ACK; 800 sp->call = call; 801 rxrpc_get_call(call); 802 spin_lock_bh(&call->lock); 803 if (rxrpc_queue_rcv_skb(call, skb, true, true) < 0) 804 BUG(); 805 spin_unlock_bh(&call->lock); 806 goto process_further; 807 } 808 809discard: 810 rxrpc_free_skb(skb); 811 goto process_further; 812 813protocol_error_unlock: 814 write_unlock_bh(&call->state_lock); 815protocol_error: 816 rxrpc_free_skb(skb); 817 _leave(" = -EPROTO"); 818 return -EPROTO; 819} 820 821/* 822 * post a message to the socket Rx queue for recvmsg() to pick up 823 */ 824static int rxrpc_post_message(struct rxrpc_call *call, u32 mark, u32 error, 825 bool fatal) 826{ 827 struct rxrpc_skb_priv *sp; 828 struct sk_buff *skb; 829 int ret; 830 831 _enter("{%d,%lx},%u,%u,%d", 832 call->debug_id, call->flags, mark, error, fatal); 833 834 /* remove timers and things for fatal messages */ 835 if (fatal) { 836 del_timer_sync(&call->resend_timer); 837 del_timer_sync(&call->ack_timer); 838 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); 839 } 840 841 if (mark != RXRPC_SKB_MARK_NEW_CALL && 842 !test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) { 843 _leave("[no userid]"); 844 return 0; 845 } 846 847 if (!test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) { 848 skb = alloc_skb(0, GFP_NOFS); 849 if (!skb) 850 return -ENOMEM; 851 852 rxrpc_new_skb(skb); 853 854 skb->mark = mark; 855 856 sp = rxrpc_skb(skb); 857 memset(sp, 0, sizeof(*sp)); 858 sp->error = error; 859 sp->call = call; 860 rxrpc_get_call(call); 861 862 spin_lock_bh(&call->lock); 863 ret = rxrpc_queue_rcv_skb(call, skb, true, fatal); 864 spin_unlock_bh(&call->lock); 865 BUG_ON(ret < 0); 866 } 867 868 return 0; 869} 870 871/* 872 * handle background processing of incoming call packets and ACK / abort 873 * generation 874 */ 875void rxrpc_process_call(struct work_struct *work) 876{ 877 struct rxrpc_call *call = 878 container_of(work, struct rxrpc_call, processor); 879 struct rxrpc_wire_header whdr; 880 struct rxrpc_ackpacket ack; 881 struct rxrpc_ackinfo ackinfo; 882 struct msghdr msg; 883 struct kvec iov[5]; 884 enum rxrpc_call_event genbit; 885 unsigned long bits; 886 __be32 data, pad; 887 size_t len; 888 int loop, nbit, ioc, ret, mtu; 889 u32 serial, abort_code = RX_PROTOCOL_ERROR; 890 u8 *acks = NULL; 891 892 //printk("\n--------------------\n"); 893 _enter("{%d,%s,%lx} [%lu]", 894 call->debug_id, rxrpc_call_states[call->state], call->events, 895 (jiffies - call->creation_jif) / (HZ / 10)); 896 897 if (test_and_set_bit(RXRPC_CALL_PROC_BUSY, &call->flags)) { 898 _debug("XXXXXXXXXXXXX RUNNING ON MULTIPLE CPUS XXXXXXXXXXXXX"); 899 return; 900 } 901 902 /* there's a good chance we're going to have to send a message, so set 903 * one up in advance */ 904 msg.msg_name = &call->conn->trans->peer->srx.transport.sin; 905 msg.msg_namelen = sizeof(call->conn->trans->peer->srx.transport.sin); 906 msg.msg_control = NULL; 907 msg.msg_controllen = 0; 908 msg.msg_flags = 0; 909 910 whdr.epoch = htonl(call->conn->epoch); 911 whdr.cid = htonl(call->cid); 912 whdr.callNumber = htonl(call->call_id); 913 whdr.seq = 0; 914 whdr.type = RXRPC_PACKET_TYPE_ACK; 915 whdr.flags = call->conn->out_clientflag; 916 whdr.userStatus = 0; 917 whdr.securityIndex = call->conn->security_ix; 918 whdr._rsvd = 0; 919 whdr.serviceId = htons(call->service_id); 920 921 memset(iov, 0, sizeof(iov)); 922 iov[0].iov_base = &whdr; 923 iov[0].iov_len = sizeof(whdr); 924 925 /* deal with events of a final nature */ 926 if (test_bit(RXRPC_CALL_EV_RELEASE, &call->events)) { 927 rxrpc_release_call(call); 928 clear_bit(RXRPC_CALL_EV_RELEASE, &call->events); 929 } 930 931 if (test_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events)) { 932 int error; 933 934 clear_bit(RXRPC_CALL_EV_CONN_ABORT, &call->events); 935 clear_bit(RXRPC_CALL_EV_REJECT_BUSY, &call->events); 936 clear_bit(RXRPC_CALL_EV_ABORT, &call->events); 937 938 error = call->conn->trans->peer->net_error; 939 _debug("post net error %d", error); 940 941 if (rxrpc_post_message(call, RXRPC_SKB_MARK_NET_ERROR, 942 error, true) < 0) 943 goto no_mem; 944 clear_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events); 945 goto kill_ACKs; 946 } 947 948 if (test_bit(RXRPC_CALL_EV_CONN_ABORT, &call->events)) { 949 ASSERTCMP(call->state, >, RXRPC_CALL_COMPLETE); 950 951 clear_bit(RXRPC_CALL_EV_REJECT_BUSY, &call->events); 952 clear_bit(RXRPC_CALL_EV_ABORT, &call->events); 953 954 _debug("post conn abort"); 955 956 if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR, 957 call->conn->error, true) < 0) 958 goto no_mem; 959 clear_bit(RXRPC_CALL_EV_CONN_ABORT, &call->events); 960 goto kill_ACKs; 961 } 962 963 if (test_bit(RXRPC_CALL_EV_REJECT_BUSY, &call->events)) { 964 whdr.type = RXRPC_PACKET_TYPE_BUSY; 965 genbit = RXRPC_CALL_EV_REJECT_BUSY; 966 goto send_message; 967 } 968 969 if (test_bit(RXRPC_CALL_EV_ABORT, &call->events)) { 970 ASSERTCMP(call->state, >, RXRPC_CALL_COMPLETE); 971 972 if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR, 973 ECONNABORTED, true) < 0) 974 goto no_mem; 975 whdr.type = RXRPC_PACKET_TYPE_ABORT; 976 data = htonl(call->abort_code); 977 iov[1].iov_base = &data; 978 iov[1].iov_len = sizeof(data); 979 genbit = RXRPC_CALL_EV_ABORT; 980 goto send_message; 981 } 982 983 if (test_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events)) { 984 genbit = RXRPC_CALL_EV_ACK_FINAL; 985 986 ack.bufferSpace = htons(8); 987 ack.maxSkew = 0; 988 ack.serial = 0; 989 ack.reason = RXRPC_ACK_IDLE; 990 ack.nAcks = 0; 991 call->ackr_reason = 0; 992 993 spin_lock_bh(&call->lock); 994 ack.serial = htonl(call->ackr_serial); 995 ack.previousPacket = htonl(call->ackr_prev_seq); 996 ack.firstPacket = htonl(call->rx_data_eaten + 1); 997 spin_unlock_bh(&call->lock); 998 999 pad = 0; 1000 1001 iov[1].iov_base = &ack; 1002 iov[1].iov_len = sizeof(ack); 1003 iov[2].iov_base = &pad; 1004 iov[2].iov_len = 3; 1005 iov[3].iov_base = &ackinfo; 1006 iov[3].iov_len = sizeof(ackinfo); 1007 goto send_ACK; 1008 } 1009 1010 if (call->events & ((1 << RXRPC_CALL_EV_RCVD_BUSY) | 1011 (1 << RXRPC_CALL_EV_RCVD_ABORT)) 1012 ) { 1013 u32 mark; 1014 1015 if (test_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events)) 1016 mark = RXRPC_SKB_MARK_REMOTE_ABORT; 1017 else 1018 mark = RXRPC_SKB_MARK_BUSY; 1019 1020 _debug("post abort/busy"); 1021 rxrpc_clear_tx_window(call); 1022 if (rxrpc_post_message(call, mark, ECONNABORTED, true) < 0) 1023 goto no_mem; 1024 1025 clear_bit(RXRPC_CALL_EV_RCVD_BUSY, &call->events); 1026 clear_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events); 1027 goto kill_ACKs; 1028 } 1029 1030 if (test_and_clear_bit(RXRPC_CALL_EV_RCVD_ACKALL, &call->events)) { 1031 _debug("do implicit ackall"); 1032 rxrpc_clear_tx_window(call); 1033 } 1034 1035 if (test_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events)) { 1036 write_lock_bh(&call->state_lock); 1037 if (call->state <= RXRPC_CALL_COMPLETE) { 1038 call->state = RXRPC_CALL_LOCALLY_ABORTED; 1039 call->abort_code = RX_CALL_TIMEOUT; 1040 set_bit(RXRPC_CALL_EV_ABORT, &call->events); 1041 } 1042 write_unlock_bh(&call->state_lock); 1043 1044 _debug("post timeout"); 1045 if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR, 1046 ETIME, true) < 0) 1047 goto no_mem; 1048 1049 clear_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events); 1050 goto kill_ACKs; 1051 } 1052 1053 /* deal with assorted inbound messages */ 1054 if (!skb_queue_empty(&call->rx_queue)) { 1055 switch (rxrpc_process_rx_queue(call, &abort_code)) { 1056 case 0: 1057 case -EAGAIN: 1058 break; 1059 case -ENOMEM: 1060 goto no_mem; 1061 case -EKEYEXPIRED: 1062 case -EKEYREJECTED: 1063 case -EPROTO: 1064 rxrpc_abort_call(call, abort_code); 1065 goto kill_ACKs; 1066 } 1067 } 1068 1069 /* handle resending */ 1070 if (test_and_clear_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events)) 1071 rxrpc_resend_timer(call); 1072 if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events)) 1073 rxrpc_resend(call); 1074 1075 /* consider sending an ordinary ACK */ 1076 if (test_bit(RXRPC_CALL_EV_ACK, &call->events)) { 1077 _debug("send ACK: window: %d - %d { %lx }", 1078 call->rx_data_eaten, call->ackr_win_top, 1079 call->ackr_window[0]); 1080 1081 if (call->state > RXRPC_CALL_SERVER_ACK_REQUEST && 1082 call->ackr_reason != RXRPC_ACK_PING_RESPONSE) { 1083 /* ACK by sending reply DATA packet in this state */ 1084 clear_bit(RXRPC_CALL_EV_ACK, &call->events); 1085 goto maybe_reschedule; 1086 } 1087 1088 genbit = RXRPC_CALL_EV_ACK; 1089 1090 acks = kzalloc(call->ackr_win_top - call->rx_data_eaten, 1091 GFP_NOFS); 1092 if (!acks) 1093 goto no_mem; 1094 1095 //hdr.flags = RXRPC_SLOW_START_OK; 1096 ack.bufferSpace = htons(8); 1097 ack.maxSkew = 0; 1098 1099 spin_lock_bh(&call->lock); 1100 ack.reason = call->ackr_reason; 1101 ack.serial = htonl(call->ackr_serial); 1102 ack.previousPacket = htonl(call->ackr_prev_seq); 1103 ack.firstPacket = htonl(call->rx_data_eaten + 1); 1104 1105 ack.nAcks = 0; 1106 for (loop = 0; loop < RXRPC_ACKR_WINDOW_ASZ; loop++) { 1107 nbit = loop * BITS_PER_LONG; 1108 for (bits = call->ackr_window[loop]; bits; bits >>= 1 1109 ) { 1110 _debug("- l=%d n=%d b=%lx", loop, nbit, bits); 1111 if (bits & 1) { 1112 acks[nbit] = RXRPC_ACK_TYPE_ACK; 1113 ack.nAcks = nbit + 1; 1114 } 1115 nbit++; 1116 } 1117 } 1118 call->ackr_reason = 0; 1119 spin_unlock_bh(&call->lock); 1120 1121 pad = 0; 1122 1123 iov[1].iov_base = &ack; 1124 iov[1].iov_len = sizeof(ack); 1125 iov[2].iov_base = acks; 1126 iov[2].iov_len = ack.nAcks; 1127 iov[3].iov_base = &pad; 1128 iov[3].iov_len = 3; 1129 iov[4].iov_base = &ackinfo; 1130 iov[4].iov_len = sizeof(ackinfo); 1131 1132 switch (ack.reason) { 1133 case RXRPC_ACK_REQUESTED: 1134 case RXRPC_ACK_DUPLICATE: 1135 case RXRPC_ACK_OUT_OF_SEQUENCE: 1136 case RXRPC_ACK_EXCEEDS_WINDOW: 1137 case RXRPC_ACK_NOSPACE: 1138 case RXRPC_ACK_PING: 1139 case RXRPC_ACK_PING_RESPONSE: 1140 goto send_ACK_with_skew; 1141 case RXRPC_ACK_DELAY: 1142 case RXRPC_ACK_IDLE: 1143 goto send_ACK; 1144 } 1145 } 1146 1147 /* handle completion of security negotiations on an incoming 1148 * connection */ 1149 if (test_and_clear_bit(RXRPC_CALL_EV_SECURED, &call->events)) { 1150 _debug("secured"); 1151 spin_lock_bh(&call->lock); 1152 1153 if (call->state == RXRPC_CALL_SERVER_SECURING) { 1154 _debug("securing"); 1155 write_lock(&call->conn->lock); 1156 if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) && 1157 !test_bit(RXRPC_CALL_EV_RELEASE, &call->events)) { 1158 _debug("not released"); 1159 call->state = RXRPC_CALL_SERVER_ACCEPTING; 1160 list_move_tail(&call->accept_link, 1161 &call->socket->acceptq); 1162 } 1163 write_unlock(&call->conn->lock); 1164 read_lock(&call->state_lock); 1165 if (call->state < RXRPC_CALL_COMPLETE) 1166 set_bit(RXRPC_CALL_EV_POST_ACCEPT, &call->events); 1167 read_unlock(&call->state_lock); 1168 } 1169 1170 spin_unlock_bh(&call->lock); 1171 if (!test_bit(RXRPC_CALL_EV_POST_ACCEPT, &call->events)) 1172 goto maybe_reschedule; 1173 } 1174 1175 /* post a notification of an acceptable connection to the app */ 1176 if (test_bit(RXRPC_CALL_EV_POST_ACCEPT, &call->events)) { 1177 _debug("post accept"); 1178 if (rxrpc_post_message(call, RXRPC_SKB_MARK_NEW_CALL, 1179 0, false) < 0) 1180 goto no_mem; 1181 clear_bit(RXRPC_CALL_EV_POST_ACCEPT, &call->events); 1182 goto maybe_reschedule; 1183 } 1184 1185 /* handle incoming call acceptance */ 1186 if (test_and_clear_bit(RXRPC_CALL_EV_ACCEPTED, &call->events)) { 1187 _debug("accepted"); 1188 ASSERTCMP(call->rx_data_post, ==, 0); 1189 call->rx_data_post = 1; 1190 read_lock_bh(&call->state_lock); 1191 if (call->state < RXRPC_CALL_COMPLETE) 1192 set_bit(RXRPC_CALL_EV_DRAIN_RX_OOS, &call->events); 1193 read_unlock_bh(&call->state_lock); 1194 } 1195 1196 /* drain the out of sequence received packet queue into the packet Rx 1197 * queue */ 1198 if (test_and_clear_bit(RXRPC_CALL_EV_DRAIN_RX_OOS, &call->events)) { 1199 while (call->rx_data_post == call->rx_first_oos) 1200 if (rxrpc_drain_rx_oos_queue(call) < 0) 1201 break; 1202 goto maybe_reschedule; 1203 } 1204 1205 /* other events may have been raised since we started checking */ 1206 goto maybe_reschedule; 1207 1208send_ACK_with_skew: 1209 ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) - 1210 ntohl(ack.serial)); 1211send_ACK: 1212 mtu = call->conn->trans->peer->if_mtu; 1213 mtu -= call->conn->trans->peer->hdrsize; 1214 ackinfo.maxMTU = htonl(mtu); 1215 ackinfo.rwind = htonl(rxrpc_rx_window_size); 1216 1217 /* permit the peer to send us jumbo packets if it wants to */ 1218 ackinfo.rxMTU = htonl(rxrpc_rx_mtu); 1219 ackinfo.jumbo_max = htonl(rxrpc_rx_jumbo_max); 1220 1221 serial = atomic_inc_return(&call->conn->serial); 1222 whdr.serial = htonl(serial); 1223 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }", 1224 serial, 1225 ntohs(ack.maxSkew), 1226 ntohl(ack.firstPacket), 1227 ntohl(ack.previousPacket), 1228 ntohl(ack.serial), 1229 rxrpc_acks(ack.reason), 1230 ack.nAcks); 1231 1232 del_timer_sync(&call->ack_timer); 1233 if (ack.nAcks > 0) 1234 set_bit(RXRPC_CALL_TX_SOFT_ACK, &call->flags); 1235 goto send_message_2; 1236 1237send_message: 1238 _debug("send message"); 1239 1240 serial = atomic_inc_return(&call->conn->serial); 1241 whdr.serial = htonl(serial); 1242 _proto("Tx %s %%%u", rxrpc_pkts[whdr.type], serial); 1243send_message_2: 1244 1245 len = iov[0].iov_len; 1246 ioc = 1; 1247 if (iov[4].iov_len) { 1248 ioc = 5; 1249 len += iov[4].iov_len; 1250 len += iov[3].iov_len; 1251 len += iov[2].iov_len; 1252 len += iov[1].iov_len; 1253 } else if (iov[3].iov_len) { 1254 ioc = 4; 1255 len += iov[3].iov_len; 1256 len += iov[2].iov_len; 1257 len += iov[1].iov_len; 1258 } else if (iov[2].iov_len) { 1259 ioc = 3; 1260 len += iov[2].iov_len; 1261 len += iov[1].iov_len; 1262 } else if (iov[1].iov_len) { 1263 ioc = 2; 1264 len += iov[1].iov_len; 1265 } 1266 1267 ret = kernel_sendmsg(call->conn->trans->local->socket, 1268 &msg, iov, ioc, len); 1269 if (ret < 0) { 1270 _debug("sendmsg failed: %d", ret); 1271 read_lock_bh(&call->state_lock); 1272 if (call->state < RXRPC_CALL_DEAD) 1273 rxrpc_queue_call(call); 1274 read_unlock_bh(&call->state_lock); 1275 goto error; 1276 } 1277 1278 switch (genbit) { 1279 case RXRPC_CALL_EV_ABORT: 1280 clear_bit(genbit, &call->events); 1281 clear_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events); 1282 goto kill_ACKs; 1283 1284 case RXRPC_CALL_EV_ACK_FINAL: 1285 write_lock_bh(&call->state_lock); 1286 if (call->state == RXRPC_CALL_CLIENT_FINAL_ACK) 1287 call->state = RXRPC_CALL_COMPLETE; 1288 write_unlock_bh(&call->state_lock); 1289 goto kill_ACKs; 1290 1291 default: 1292 clear_bit(genbit, &call->events); 1293 switch (call->state) { 1294 case RXRPC_CALL_CLIENT_AWAIT_REPLY: 1295 case RXRPC_CALL_CLIENT_RECV_REPLY: 1296 case RXRPC_CALL_SERVER_RECV_REQUEST: 1297 case RXRPC_CALL_SERVER_ACK_REQUEST: 1298 _debug("start ACK timer"); 1299 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 1300 call->ackr_serial, false); 1301 default: 1302 break; 1303 } 1304 goto maybe_reschedule; 1305 } 1306 1307kill_ACKs: 1308 del_timer_sync(&call->ack_timer); 1309 if (test_and_clear_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events)) 1310 rxrpc_put_call(call); 1311 clear_bit(RXRPC_CALL_EV_ACK, &call->events); 1312 1313maybe_reschedule: 1314 if (call->events || !skb_queue_empty(&call->rx_queue)) { 1315 read_lock_bh(&call->state_lock); 1316 if (call->state < RXRPC_CALL_DEAD) 1317 rxrpc_queue_call(call); 1318 read_unlock_bh(&call->state_lock); 1319 } 1320 1321 /* don't leave aborted connections on the accept queue */ 1322 if (call->state >= RXRPC_CALL_COMPLETE && 1323 !list_empty(&call->accept_link)) { 1324 _debug("X unlinking once-pending call %p { e=%lx f=%lx c=%x }", 1325 call, call->events, call->flags, call->conn->cid); 1326 1327 read_lock_bh(&call->state_lock); 1328 if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) && 1329 !test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events)) 1330 rxrpc_queue_call(call); 1331 read_unlock_bh(&call->state_lock); 1332 } 1333 1334error: 1335 clear_bit(RXRPC_CALL_PROC_BUSY, &call->flags); 1336 kfree(acks); 1337 1338 /* because we don't want two CPUs both processing the work item for one 1339 * call at the same time, we use a flag to note when it's busy; however 1340 * this means there's a race between clearing the flag and setting the 1341 * work pending bit and the work item being processed again */ 1342 if (call->events && !work_pending(&call->processor)) { 1343 _debug("jumpstart %x", call->conn->cid); 1344 rxrpc_queue_call(call); 1345 } 1346 1347 _leave(""); 1348 return; 1349 1350no_mem: 1351 _debug("out of memory"); 1352 goto maybe_reschedule; 1353}