Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

rxrpc: Implement a mechanism to send an event notification to a connection

Provide a means by which an event notification can be sent to a connection
through such that the I/O thread can pick it up and handle it rather than
doing it in a separate workqueue.

This is then used to move the deferred final ACK of a call into the I/O
thread rather than a separate work queue as part of the drive to do all
transmission from the I/O thread.

Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org

+55 -9
+2 -3
include/trace/events/rxrpc.h
··· 111 111 EM(rxrpc_conn_get_call_input, "GET inp-call") \ 112 112 EM(rxrpc_conn_get_conn_input, "GET inp-conn") \ 113 113 EM(rxrpc_conn_get_idle, "GET idle ") \ 114 - EM(rxrpc_conn_get_poke, "GET poke ") \ 114 + EM(rxrpc_conn_get_poke_timer, "GET poke ") \ 115 115 EM(rxrpc_conn_get_service_conn, "GET svc-conn") \ 116 116 EM(rxrpc_conn_new_client, "NEW client ") \ 117 117 EM(rxrpc_conn_new_service, "NEW service ") \ ··· 126 126 EM(rxrpc_conn_put_service_reaped, "PUT svc-reap") \ 127 127 EM(rxrpc_conn_put_unbundle, "PUT unbundle") \ 128 128 EM(rxrpc_conn_put_unidle, "PUT unidle ") \ 129 + EM(rxrpc_conn_put_work, "PUT work ") \ 129 130 EM(rxrpc_conn_queue_challenge, "QUE chall ") \ 130 - EM(rxrpc_conn_queue_retry_work, "QUE retry-wk") \ 131 131 EM(rxrpc_conn_queue_rx_work, "QUE rx-work ") \ 132 - EM(rxrpc_conn_queue_timer, "QUE timer ") \ 133 132 EM(rxrpc_conn_see_new_service_conn, "SEE new-svc ") \ 134 133 EM(rxrpc_conn_see_reap_service, "SEE reap-svc") \ 135 134 E_(rxrpc_conn_see_work, "SEE work ")
+5
net/rxrpc/ar-internal.h
··· 202 202 * - max 48 bytes (struct sk_buff::cb) 203 203 */ 204 204 struct rxrpc_skb_priv { 205 + struct rxrpc_connection *conn; /* Connection referred to (poke packet) */ 205 206 u16 offset; /* Offset of data */ 206 207 u16 len; /* Length of data */ 207 208 u8 flags; ··· 293 292 struct rxrpc_sock __rcu *service; /* Service(s) listening on this endpoint */ 294 293 struct rw_semaphore defrag_sem; /* control re-enablement of IP DF bit */ 295 294 struct sk_buff_head rx_queue; /* Received packets */ 295 + struct list_head conn_attend_q; /* Conns requiring immediate attention */ 296 296 struct list_head call_attend_q; /* Calls requiring immediate attention */ 297 297 struct rb_root client_bundles; /* Client connection bundles by socket params */ 298 298 spinlock_t client_bundles_lock; /* Lock for client_bundles */ ··· 443 441 struct rxrpc_peer *peer; /* Remote endpoint */ 444 442 struct rxrpc_net *rxnet; /* Network namespace to which call belongs */ 445 443 struct key *key; /* Security details */ 444 + struct list_head attend_link; /* Link in local->conn_attend_q */ 446 445 447 446 refcount_t ref; 448 447 atomic_t active; /* Active count for service conns */ ··· 908 905 void rxrpc_process_connection(struct work_struct *); 909 906 void rxrpc_process_delayed_final_acks(struct rxrpc_connection *, bool); 910 907 int rxrpc_input_conn_packet(struct rxrpc_connection *conn, struct sk_buff *skb); 908 + void rxrpc_input_conn_event(struct rxrpc_connection *conn, struct sk_buff *skb); 911 909 912 910 /* 913 911 * conn_object.c ··· 916 912 extern unsigned int rxrpc_connection_expiry; 917 913 extern unsigned int rxrpc_closed_conn_expiry; 918 914 915 + void rxrpc_poke_conn(struct rxrpc_connection *conn, enum rxrpc_conn_trace why); 919 916 struct rxrpc_connection *rxrpc_alloc_connection(struct rxrpc_net *, gfp_t); 920 917 struct rxrpc_connection *rxrpc_find_client_connection_rcu(struct rxrpc_local *, 921 918 struct sockaddr_rxrpc *,
+10 -4
net/rxrpc/conn_event.c
··· 412 412 if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE, &conn->events)) 413 413 rxrpc_secure_connection(conn); 414 414 415 - /* Process delayed ACKs whose time has come. */ 416 - if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK) 417 - rxrpc_process_delayed_final_acks(conn, false); 418 - 419 415 /* go through the conn-level event packets, releasing the ref on this 420 416 * connection that each one has when we've finished with it */ 421 417 while ((skb = skb_dequeue(&conn->rx_queue))) { ··· 510 514 tracepoint_string("bad_conn_pkt")); 511 515 return -EPROTO; 512 516 } 517 + } 518 + 519 + /* 520 + * Input a connection event. 521 + */ 522 + void rxrpc_input_conn_event(struct rxrpc_connection *conn, struct sk_buff *skb) 523 + { 524 + /* Process delayed ACKs whose time has come. */ 525 + if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK) 526 + rxrpc_process_delayed_final_acks(conn, false); 513 527 }
+19 -1
net/rxrpc/conn_object.c
··· 23 23 static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet, 24 24 unsigned long reap_at); 25 25 26 + void rxrpc_poke_conn(struct rxrpc_connection *conn, enum rxrpc_conn_trace why) 27 + { 28 + struct rxrpc_local *local = conn->local; 29 + bool busy; 30 + 31 + if (WARN_ON_ONCE(!local)) 32 + return; 33 + 34 + spin_lock_bh(&local->lock); 35 + busy = !list_empty(&conn->attend_link); 36 + if (!busy) { 37 + rxrpc_get_connection(conn, why); 38 + list_add_tail(&conn->attend_link, &local->conn_attend_q); 39 + } 40 + spin_unlock_bh(&local->lock); 41 + rxrpc_wake_up_io_thread(local); 42 + } 43 + 26 44 static void rxrpc_connection_timer(struct timer_list *timer) 27 45 { 28 46 struct rxrpc_connection *conn = 29 47 container_of(timer, struct rxrpc_connection, timer); 30 48 31 - rxrpc_queue_conn(conn, rxrpc_conn_queue_timer); 49 + rxrpc_poke_conn(conn, rxrpc_conn_get_poke_timer); 32 50 } 33 51 34 52 /*
+18 -1
net/rxrpc/io_thread.c
··· 421 421 */ 422 422 int rxrpc_io_thread(void *data) 423 423 { 424 + struct rxrpc_connection *conn; 424 425 struct sk_buff_head rx_queue; 425 426 struct rxrpc_local *local = data; 426 427 struct rxrpc_call *call; ··· 436 435 437 436 for (;;) { 438 437 rxrpc_inc_stat(local->rxnet, stat_io_loop); 438 + 439 + /* Deal with connections that want immediate attention. */ 440 + conn = list_first_entry_or_null(&local->conn_attend_q, 441 + struct rxrpc_connection, 442 + attend_link); 443 + if (conn) { 444 + spin_lock_bh(&local->lock); 445 + list_del_init(&conn->attend_link); 446 + spin_unlock_bh(&local->lock); 447 + 448 + rxrpc_input_conn_event(conn, NULL); 449 + rxrpc_put_connection(conn, rxrpc_conn_put_poke); 450 + continue; 451 + } 439 452 440 453 /* Deal with calls that want immediate attention. */ 441 454 if ((call = list_first_entry_or_null(&local->call_attend_q, ··· 478 463 rxrpc_input_error(local, skb); 479 464 rxrpc_free_skb(skb, rxrpc_skb_put_error_report); 480 465 break; 466 + break; 481 467 default: 482 468 WARN_ON_ONCE(1); 483 469 rxrpc_free_skb(skb, rxrpc_skb_put_unknown); ··· 497 481 set_current_state(TASK_INTERRUPTIBLE); 498 482 should_stop = kthread_should_stop(); 499 483 if (!skb_queue_empty(&local->rx_queue) || 500 - !list_empty(&local->call_attend_q)) { 484 + !list_empty(&local->call_attend_q) || 485 + !list_empty(&local->conn_attend_q)) { 501 486 __set_current_state(TASK_RUNNING); 502 487 continue; 503 488 }
+1
net/rxrpc/local_object.c
··· 100 100 init_rwsem(&local->defrag_sem); 101 101 init_completion(&local->io_thread_ready); 102 102 skb_queue_head_init(&local->rx_queue); 103 + INIT_LIST_HEAD(&local->conn_attend_q); 103 104 INIT_LIST_HEAD(&local->call_attend_q); 104 105 local->client_bundles = RB_ROOT; 105 106 spin_lock_init(&local->client_bundles_lock);