Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] RPC: clean up after nocong was removed

Clean-up: Move some macros that are specific to the Van Jacobson
implementation into xprt.c. Get rid of the cong_wait field in
rpc_xprt, which is no longer used. Get rid of xprt_clear_backlog.

Test-plan:
Compile with CONFIG_NFS enabled.

Signed-off-by: Chuck Lever <cel@netapp.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>

authored by

Chuck Lever and committed by
Trond Myklebust
555ee3af ed63c003

+19 -34
-22
include/linux/sunrpc/xprt.h
··· 15 15 #include <linux/sunrpc/sched.h> 16 16 #include <linux/sunrpc/xdr.h> 17 17 18 - /* 19 - * The transport code maintains an estimate on the maximum number of out- 20 - * standing RPC requests, using a smoothed version of the congestion 21 - * avoidance implemented in 44BSD. This is basically the Van Jacobson 22 - * congestion algorithm: If a retransmit occurs, the congestion window is 23 - * halved; otherwise, it is incremented by 1/cwnd when 24 - * 25 - * - a reply is received and 26 - * - a full number of requests are outstanding and 27 - * - the congestion window hasn't been updated recently. 28 - * 29 - * Upper procedures may check whether a request would block waiting for 30 - * a free RPC slot by using the RPC_CONGESTED() macro. 31 - */ 32 18 extern unsigned int xprt_udp_slot_table_entries; 33 19 extern unsigned int xprt_tcp_slot_table_entries; 34 20 35 21 #define RPC_MIN_SLOT_TABLE (2U) 36 22 #define RPC_DEF_SLOT_TABLE (16U) 37 23 #define RPC_MAX_SLOT_TABLE (128U) 38 - 39 - #define RPC_CWNDSHIFT (8U) 40 - #define RPC_CWNDSCALE (1U << RPC_CWNDSHIFT) 41 - #define RPC_INITCWND RPC_CWNDSCALE 42 - #define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT) 43 - #define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd) 44 24 45 25 /* Default timeout values */ 46 26 #define RPC_MAX_UDP_TIMEOUT (60*HZ) ··· 193 213 void (*old_data_ready)(struct sock *, int); 194 214 void (*old_state_change)(struct sock *); 195 215 void (*old_write_space)(struct sock *); 196 - 197 - wait_queue_head_t cong_wait; 198 216 }; 199 217 200 218 #define XPRT_LAST_FRAG (1 << 0)
+19 -10
net/sunrpc/xprt.c
··· 62 62 static void xprt_connect_status(struct rpc_task *task); 63 63 static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *); 64 64 65 - static int xprt_clear_backlog(struct rpc_xprt *xprt); 65 + /* 66 + * The transport code maintains an estimate on the maximum number of out- 67 + * standing RPC requests, using a smoothed version of the congestion 68 + * avoidance implemented in 44BSD. This is basically the Van Jacobson 69 + * congestion algorithm: If a retransmit occurs, the congestion window is 70 + * halved; otherwise, it is incremented by 1/cwnd when 71 + * 72 + * - a reply is received and 73 + * - a full number of requests are outstanding and 74 + * - the congestion window hasn't been updated recently. 75 + */ 76 + #define RPC_CWNDSHIFT (8U) 77 + #define RPC_CWNDSCALE (1U << RPC_CWNDSHIFT) 78 + #define RPC_INITCWND RPC_CWNDSCALE 79 + #define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT) 80 + 81 + #define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd) 66 82 67 83 /** 68 84 * xprt_reserve_xprt - serialize write access to transports ··· 866 850 867 851 spin_lock(&xprt->reserve_lock); 868 852 list_add(&req->rq_list, &xprt->free); 869 - xprt_clear_backlog(xprt); 853 + rpc_wake_up_next(&xprt->backlog); 870 854 spin_unlock(&xprt->reserve_lock); 871 855 } 872 856 ··· 918 902 919 903 spin_lock_init(&xprt->transport_lock); 920 904 spin_lock_init(&xprt->reserve_lock); 921 - init_waitqueue_head(&xprt->cong_wait); 922 905 923 906 INIT_LIST_HEAD(&xprt->free); 924 907 INIT_LIST_HEAD(&xprt->recv); ··· 926 911 xprt->timer.function = xprt_init_autodisconnect; 927 912 xprt->timer.data = (unsigned long) xprt; 928 913 xprt->last_used = jiffies; 914 + xprt->cwnd = RPC_INITCWND; 929 915 930 916 rpc_init_wait_queue(&xprt->pending, "xprt_pending"); 931 917 rpc_init_wait_queue(&xprt->sending, "xprt_sending"); ··· 971 955 rpc_wake_up(&xprt->resend); 972 956 xprt_wake_pending_tasks(xprt, -EIO); 973 957 rpc_wake_up(&xprt->backlog); 974 - wake_up(&xprt->cong_wait); 975 958 del_timer_sync(&xprt->timer); 976 - } 977 - 978 - static int xprt_clear_backlog(struct rpc_xprt *xprt) { 979 - rpc_wake_up_next(&xprt->backlog); 980 - wake_up(&xprt->cong_wait); 981 - return 1; 982 959 } 983 960 984 961 /**
-2
net/sunrpc/xprtsock.c
··· 1100 1100 xprt->prot = IPPROTO_UDP; 1101 1101 xprt->port = XS_MAX_RESVPORT; 1102 1102 xprt->tsh_size = 0; 1103 - xprt->cwnd = RPC_INITCWND; 1104 1103 xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; 1105 1104 /* XXX: header size can vary due to auth type, IPv6, etc. */ 1106 1105 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); ··· 1138 1139 xprt->prot = IPPROTO_TCP; 1139 1140 xprt->port = XS_MAX_RESVPORT; 1140 1141 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); 1141 - xprt->cwnd = RPC_MAXCWND(xprt); 1142 1142 xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; 1143 1143 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; 1144 1144