Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/net/sunrpc/xprtsock.c
4 *
5 * Client-side transport implementation for sockets.
6 *
7 * TCP callback races fixes (C) 1998 Red Hat
8 * TCP send fixes (C) 1998 Red Hat
9 * TCP NFS related read + write fixes
10 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
11 *
12 * Rewrite of larges part of the code in order to stabilize TCP stuff.
13 * Fix behaviour when socket buffer is full.
14 * (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no>
15 *
16 * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com>
17 *
18 * IPv6 support contributed by Gilles Quillard, Bull Open Source, 2005.
19 * <gilles.quillard@bull.net>
20 */
21
22#include <linux/types.h>
23#include <linux/string.h>
24#include <linux/slab.h>
25#include <linux/module.h>
26#include <linux/capability.h>
27#include <linux/pagemap.h>
28#include <linux/errno.h>
29#include <linux/socket.h>
30#include <linux/in.h>
31#include <linux/net.h>
32#include <linux/mm.h>
33#include <linux/un.h>
34#include <linux/udp.h>
35#include <linux/tcp.h>
36#include <linux/sunrpc/clnt.h>
37#include <linux/sunrpc/addr.h>
38#include <linux/sunrpc/sched.h>
39#include <linux/sunrpc/svcsock.h>
40#include <linux/sunrpc/xprtsock.h>
41#include <linux/file.h>
42#ifdef CONFIG_SUNRPC_BACKCHANNEL
43#include <linux/sunrpc/bc_xprt.h>
44#endif
45
46#include <net/sock.h>
47#include <net/checksum.h>
48#include <net/udp.h>
49#include <net/tcp.h>
50#include <linux/bvec.h>
51#include <linux/highmem.h>
52#include <linux/uio.h>
53
54#include <trace/events/sunrpc.h>
55
56#include "sunrpc.h"
57
58static void xs_close(struct rpc_xprt *xprt);
59static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt,
60 struct socket *sock);
61
62/*
63 * xprtsock tunables
64 */
65static unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE;
66static unsigned int xprt_tcp_slot_table_entries = RPC_MIN_SLOT_TABLE;
67static unsigned int xprt_max_tcp_slot_table_entries = RPC_MAX_SLOT_TABLE;
68
69static unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT;
70static unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT;
71
72#define XS_TCP_LINGER_TO (15U * HZ)
73static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO;
74
75/*
76 * We can register our own files under /proc/sys/sunrpc by
77 * calling register_sysctl_table() again. The files in that
78 * directory become the union of all files registered there.
79 *
80 * We simply need to make sure that we don't collide with
81 * someone else's file names!
82 */
83
84static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE;
85static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE;
86static unsigned int max_tcp_slot_table_limit = RPC_MAX_SLOT_TABLE_LIMIT;
87static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT;
88static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT;
89
90static struct ctl_table_header *sunrpc_table_header;
91
92/*
93 * FIXME: changing the UDP slot table size should also resize the UDP
94 * socket buffers for existing UDP transports
95 */
96static struct ctl_table xs_tunables_table[] = {
97 {
98 .procname = "udp_slot_table_entries",
99 .data = &xprt_udp_slot_table_entries,
100 .maxlen = sizeof(unsigned int),
101 .mode = 0644,
102 .proc_handler = proc_dointvec_minmax,
103 .extra1 = &min_slot_table_size,
104 .extra2 = &max_slot_table_size
105 },
106 {
107 .procname = "tcp_slot_table_entries",
108 .data = &xprt_tcp_slot_table_entries,
109 .maxlen = sizeof(unsigned int),
110 .mode = 0644,
111 .proc_handler = proc_dointvec_minmax,
112 .extra1 = &min_slot_table_size,
113 .extra2 = &max_slot_table_size
114 },
115 {
116 .procname = "tcp_max_slot_table_entries",
117 .data = &xprt_max_tcp_slot_table_entries,
118 .maxlen = sizeof(unsigned int),
119 .mode = 0644,
120 .proc_handler = proc_dointvec_minmax,
121 .extra1 = &min_slot_table_size,
122 .extra2 = &max_tcp_slot_table_limit
123 },
124 {
125 .procname = "min_resvport",
126 .data = &xprt_min_resvport,
127 .maxlen = sizeof(unsigned int),
128 .mode = 0644,
129 .proc_handler = proc_dointvec_minmax,
130 .extra1 = &xprt_min_resvport_limit,
131 .extra2 = &xprt_max_resvport_limit
132 },
133 {
134 .procname = "max_resvport",
135 .data = &xprt_max_resvport,
136 .maxlen = sizeof(unsigned int),
137 .mode = 0644,
138 .proc_handler = proc_dointvec_minmax,
139 .extra1 = &xprt_min_resvport_limit,
140 .extra2 = &xprt_max_resvport_limit
141 },
142 {
143 .procname = "tcp_fin_timeout",
144 .data = &xs_tcp_fin_timeout,
145 .maxlen = sizeof(xs_tcp_fin_timeout),
146 .mode = 0644,
147 .proc_handler = proc_dointvec_jiffies,
148 },
149 { },
150};
151
152static struct ctl_table sunrpc_table[] = {
153 {
154 .procname = "sunrpc",
155 .mode = 0555,
156 .child = xs_tunables_table
157 },
158 { },
159};
160
161/*
162 * Wait duration for a reply from the RPC portmapper.
163 */
164#define XS_BIND_TO (60U * HZ)
165
166/*
167 * Delay if a UDP socket connect error occurs. This is most likely some
168 * kind of resource problem on the local host.
169 */
170#define XS_UDP_REEST_TO (2U * HZ)
171
172/*
173 * The reestablish timeout allows clients to delay for a bit before attempting
174 * to reconnect to a server that just dropped our connection.
175 *
176 * We implement an exponential backoff when trying to reestablish a TCP
177 * transport connection with the server. Some servers like to drop a TCP
178 * connection when they are overworked, so we start with a short timeout and
179 * increase over time if the server is down or not responding.
180 */
181#define XS_TCP_INIT_REEST_TO (3U * HZ)
182
183/*
184 * TCP idle timeout; client drops the transport socket if it is idle
185 * for this long. Note that we also timeout UDP sockets to prevent
186 * holding port numbers when there is no RPC traffic.
187 */
188#define XS_IDLE_DISC_TO (5U * 60 * HZ)
189
190#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
191# undef RPC_DEBUG_DATA
192# define RPCDBG_FACILITY RPCDBG_TRANS
193#endif
194
195#ifdef RPC_DEBUG_DATA
196static void xs_pktdump(char *msg, u32 *packet, unsigned int count)
197{
198 u8 *buf = (u8 *) packet;
199 int j;
200
201 dprintk("RPC: %s\n", msg);
202 for (j = 0; j < count && j < 128; j += 4) {
203 if (!(j & 31)) {
204 if (j)
205 dprintk("\n");
206 dprintk("0x%04x ", j);
207 }
208 dprintk("%02x%02x%02x%02x ",
209 buf[j], buf[j+1], buf[j+2], buf[j+3]);
210 }
211 dprintk("\n");
212}
213#else
214static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count)
215{
216 /* NOP */
217}
218#endif
219
220static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
221{
222 return (struct rpc_xprt *) sk->sk_user_data;
223}
224
225static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt)
226{
227 return (struct sockaddr *) &xprt->addr;
228}
229
230static inline struct sockaddr_un *xs_addr_un(struct rpc_xprt *xprt)
231{
232 return (struct sockaddr_un *) &xprt->addr;
233}
234
235static inline struct sockaddr_in *xs_addr_in(struct rpc_xprt *xprt)
236{
237 return (struct sockaddr_in *) &xprt->addr;
238}
239
240static inline struct sockaddr_in6 *xs_addr_in6(struct rpc_xprt *xprt)
241{
242 return (struct sockaddr_in6 *) &xprt->addr;
243}
244
245static void xs_format_common_peer_addresses(struct rpc_xprt *xprt)
246{
247 struct sockaddr *sap = xs_addr(xprt);
248 struct sockaddr_in6 *sin6;
249 struct sockaddr_in *sin;
250 struct sockaddr_un *sun;
251 char buf[128];
252
253 switch (sap->sa_family) {
254 case AF_LOCAL:
255 sun = xs_addr_un(xprt);
256 strlcpy(buf, sun->sun_path, sizeof(buf));
257 xprt->address_strings[RPC_DISPLAY_ADDR] =
258 kstrdup(buf, GFP_KERNEL);
259 break;
260 case AF_INET:
261 (void)rpc_ntop(sap, buf, sizeof(buf));
262 xprt->address_strings[RPC_DISPLAY_ADDR] =
263 kstrdup(buf, GFP_KERNEL);
264 sin = xs_addr_in(xprt);
265 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
266 break;
267 case AF_INET6:
268 (void)rpc_ntop(sap, buf, sizeof(buf));
269 xprt->address_strings[RPC_DISPLAY_ADDR] =
270 kstrdup(buf, GFP_KERNEL);
271 sin6 = xs_addr_in6(xprt);
272 snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
273 break;
274 default:
275 BUG();
276 }
277
278 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
279}
280
281static void xs_format_common_peer_ports(struct rpc_xprt *xprt)
282{
283 struct sockaddr *sap = xs_addr(xprt);
284 char buf[128];
285
286 snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
287 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
288
289 snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
290 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
291}
292
293static void xs_format_peer_addresses(struct rpc_xprt *xprt,
294 const char *protocol,
295 const char *netid)
296{
297 xprt->address_strings[RPC_DISPLAY_PROTO] = protocol;
298 xprt->address_strings[RPC_DISPLAY_NETID] = netid;
299 xs_format_common_peer_addresses(xprt);
300 xs_format_common_peer_ports(xprt);
301}
302
303static void xs_update_peer_port(struct rpc_xprt *xprt)
304{
305 kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]);
306 kfree(xprt->address_strings[RPC_DISPLAY_PORT]);
307
308 xs_format_common_peer_ports(xprt);
309}
310
311static void xs_free_peer_addresses(struct rpc_xprt *xprt)
312{
313 unsigned int i;
314
315 for (i = 0; i < RPC_DISPLAY_MAX; i++)
316 switch (i) {
317 case RPC_DISPLAY_PROTO:
318 case RPC_DISPLAY_NETID:
319 continue;
320 default:
321 kfree(xprt->address_strings[i]);
322 }
323}
324
325static size_t
326xs_alloc_sparse_pages(struct xdr_buf *buf, size_t want, gfp_t gfp)
327{
328 size_t i,n;
329
330 if (!want || !(buf->flags & XDRBUF_SPARSE_PAGES))
331 return want;
332 n = (buf->page_base + want + PAGE_SIZE - 1) >> PAGE_SHIFT;
333 for (i = 0; i < n; i++) {
334 if (buf->pages[i])
335 continue;
336 buf->bvec[i].bv_page = buf->pages[i] = alloc_page(gfp);
337 if (!buf->pages[i]) {
338 i *= PAGE_SIZE;
339 return i > buf->page_base ? i - buf->page_base : 0;
340 }
341 }
342 return want;
343}
344
345static ssize_t
346xs_sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags, size_t seek)
347{
348 ssize_t ret;
349 if (seek != 0)
350 iov_iter_advance(&msg->msg_iter, seek);
351 ret = sock_recvmsg(sock, msg, flags);
352 return ret > 0 ? ret + seek : ret;
353}
354
355static ssize_t
356xs_read_kvec(struct socket *sock, struct msghdr *msg, int flags,
357 struct kvec *kvec, size_t count, size_t seek)
358{
359 iov_iter_kvec(&msg->msg_iter, READ, kvec, 1, count);
360 return xs_sock_recvmsg(sock, msg, flags, seek);
361}
362
363static ssize_t
364xs_read_bvec(struct socket *sock, struct msghdr *msg, int flags,
365 struct bio_vec *bvec, unsigned long nr, size_t count,
366 size_t seek)
367{
368 iov_iter_bvec(&msg->msg_iter, READ, bvec, nr, count);
369 return xs_sock_recvmsg(sock, msg, flags, seek);
370}
371
372static ssize_t
373xs_read_discard(struct socket *sock, struct msghdr *msg, int flags,
374 size_t count)
375{
376 iov_iter_discard(&msg->msg_iter, READ, count);
377 return sock_recvmsg(sock, msg, flags);
378}
379
380#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
381static void
382xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek)
383{
384 struct bvec_iter bi = {
385 .bi_size = count,
386 };
387 struct bio_vec bv;
388
389 bvec_iter_advance(bvec, &bi, seek & PAGE_MASK);
390 for_each_bvec(bv, bvec, bi, bi)
391 flush_dcache_page(bv.bv_page);
392}
393#else
394static inline void
395xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek)
396{
397}
398#endif
399
400static ssize_t
401xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
402 struct xdr_buf *buf, size_t count, size_t seek, size_t *read)
403{
404 size_t want, seek_init = seek, offset = 0;
405 ssize_t ret;
406
407 if (seek < buf->head[0].iov_len) {
408 want = min_t(size_t, count, buf->head[0].iov_len);
409 ret = xs_read_kvec(sock, msg, flags, &buf->head[0], want, seek);
410 if (ret <= 0)
411 goto sock_err;
412 offset += ret;
413 if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
414 goto out;
415 if (ret != want)
416 goto out;
417 seek = 0;
418 } else {
419 seek -= buf->head[0].iov_len;
420 offset += buf->head[0].iov_len;
421 }
422
423 want = xs_alloc_sparse_pages(buf,
424 min_t(size_t, count - offset, buf->page_len),
425 GFP_NOWAIT);
426 if (seek < want) {
427 ret = xs_read_bvec(sock, msg, flags, buf->bvec,
428 xdr_buf_pagecount(buf),
429 want + buf->page_base,
430 seek + buf->page_base);
431 if (ret <= 0)
432 goto sock_err;
433 xs_flush_bvec(buf->bvec, ret, seek + buf->page_base);
434 offset += ret - buf->page_base;
435 if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
436 goto out;
437 if (ret != want)
438 goto out;
439 seek = 0;
440 } else {
441 seek -= want;
442 offset += want;
443 }
444
445 if (seek < buf->tail[0].iov_len) {
446 want = min_t(size_t, count - offset, buf->tail[0].iov_len);
447 ret = xs_read_kvec(sock, msg, flags, &buf->tail[0], want, seek);
448 if (ret <= 0)
449 goto sock_err;
450 offset += ret;
451 if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
452 goto out;
453 if (ret != want)
454 goto out;
455 } else
456 offset += buf->tail[0].iov_len;
457 ret = -EMSGSIZE;
458out:
459 *read = offset - seek_init;
460 return ret;
461sock_err:
462 offset += seek;
463 goto out;
464}
465
466static void
467xs_read_header(struct sock_xprt *transport, struct xdr_buf *buf)
468{
469 if (!transport->recv.copied) {
470 if (buf->head[0].iov_len >= transport->recv.offset)
471 memcpy(buf->head[0].iov_base,
472 &transport->recv.xid,
473 transport->recv.offset);
474 transport->recv.copied = transport->recv.offset;
475 }
476}
477
478static bool
479xs_read_stream_request_done(struct sock_xprt *transport)
480{
481 return transport->recv.fraghdr & cpu_to_be32(RPC_LAST_STREAM_FRAGMENT);
482}
483
484static ssize_t
485xs_read_stream_request(struct sock_xprt *transport, struct msghdr *msg,
486 int flags, struct rpc_rqst *req)
487{
488 struct xdr_buf *buf = &req->rq_private_buf;
489 size_t want, read;
490 ssize_t ret;
491
492 xs_read_header(transport, buf);
493
494 want = transport->recv.len - transport->recv.offset;
495 ret = xs_read_xdr_buf(transport->sock, msg, flags, buf,
496 transport->recv.copied + want, transport->recv.copied,
497 &read);
498 transport->recv.offset += read;
499 transport->recv.copied += read;
500 if (transport->recv.offset == transport->recv.len) {
501 if (xs_read_stream_request_done(transport))
502 msg->msg_flags |= MSG_EOR;
503 return read;
504 }
505
506 switch (ret) {
507 default:
508 break;
509 case -EFAULT:
510 case -EMSGSIZE:
511 msg->msg_flags |= MSG_TRUNC;
512 return read;
513 case 0:
514 return -ESHUTDOWN;
515 }
516 return ret < 0 ? ret : read;
517}
518
519static size_t
520xs_read_stream_headersize(bool isfrag)
521{
522 if (isfrag)
523 return sizeof(__be32);
524 return 3 * sizeof(__be32);
525}
526
527static ssize_t
528xs_read_stream_header(struct sock_xprt *transport, struct msghdr *msg,
529 int flags, size_t want, size_t seek)
530{
531 struct kvec kvec = {
532 .iov_base = &transport->recv.fraghdr,
533 .iov_len = want,
534 };
535 return xs_read_kvec(transport->sock, msg, flags, &kvec, want, seek);
536}
537
538#if defined(CONFIG_SUNRPC_BACKCHANNEL)
539static ssize_t
540xs_read_stream_call(struct sock_xprt *transport, struct msghdr *msg, int flags)
541{
542 struct rpc_xprt *xprt = &transport->xprt;
543 struct rpc_rqst *req;
544 ssize_t ret;
545
546 /* Look up and lock the request corresponding to the given XID */
547 req = xprt_lookup_bc_request(xprt, transport->recv.xid);
548 if (!req) {
549 printk(KERN_WARNING "Callback slot table overflowed\n");
550 return -ESHUTDOWN;
551 }
552
553 ret = xs_read_stream_request(transport, msg, flags, req);
554 if (msg->msg_flags & (MSG_EOR|MSG_TRUNC))
555 xprt_complete_bc_request(req, transport->recv.copied);
556
557 return ret;
558}
559#else /* CONFIG_SUNRPC_BACKCHANNEL */
560static ssize_t
561xs_read_stream_call(struct sock_xprt *transport, struct msghdr *msg, int flags)
562{
563 return -ESHUTDOWN;
564}
565#endif /* CONFIG_SUNRPC_BACKCHANNEL */
566
567static ssize_t
568xs_read_stream_reply(struct sock_xprt *transport, struct msghdr *msg, int flags)
569{
570 struct rpc_xprt *xprt = &transport->xprt;
571 struct rpc_rqst *req;
572 ssize_t ret = 0;
573
574 /* Look up and lock the request corresponding to the given XID */
575 spin_lock(&xprt->queue_lock);
576 req = xprt_lookup_rqst(xprt, transport->recv.xid);
577 if (!req) {
578 msg->msg_flags |= MSG_TRUNC;
579 goto out;
580 }
581 xprt_pin_rqst(req);
582 spin_unlock(&xprt->queue_lock);
583
584 ret = xs_read_stream_request(transport, msg, flags, req);
585
586 spin_lock(&xprt->queue_lock);
587 if (msg->msg_flags & (MSG_EOR|MSG_TRUNC))
588 xprt_complete_rqst(req->rq_task, transport->recv.copied);
589 xprt_unpin_rqst(req);
590out:
591 spin_unlock(&xprt->queue_lock);
592 return ret;
593}
594
595static ssize_t
596xs_read_stream(struct sock_xprt *transport, int flags)
597{
598 struct msghdr msg = { 0 };
599 size_t want, read = 0;
600 ssize_t ret = 0;
601
602 if (transport->recv.len == 0) {
603 want = xs_read_stream_headersize(transport->recv.copied != 0);
604 ret = xs_read_stream_header(transport, &msg, flags, want,
605 transport->recv.offset);
606 if (ret <= 0)
607 goto out_err;
608 transport->recv.offset = ret;
609 if (transport->recv.offset != want)
610 return transport->recv.offset;
611 transport->recv.len = be32_to_cpu(transport->recv.fraghdr) &
612 RPC_FRAGMENT_SIZE_MASK;
613 transport->recv.offset -= sizeof(transport->recv.fraghdr);
614 read = ret;
615 }
616
617 switch (be32_to_cpu(transport->recv.calldir)) {
618 default:
619 msg.msg_flags |= MSG_TRUNC;
620 break;
621 case RPC_CALL:
622 ret = xs_read_stream_call(transport, &msg, flags);
623 break;
624 case RPC_REPLY:
625 ret = xs_read_stream_reply(transport, &msg, flags);
626 }
627 if (msg.msg_flags & MSG_TRUNC) {
628 transport->recv.calldir = cpu_to_be32(-1);
629 transport->recv.copied = -1;
630 }
631 if (ret < 0)
632 goto out_err;
633 read += ret;
634 if (transport->recv.offset < transport->recv.len) {
635 if (!(msg.msg_flags & MSG_TRUNC))
636 return read;
637 msg.msg_flags = 0;
638 ret = xs_read_discard(transport->sock, &msg, flags,
639 transport->recv.len - transport->recv.offset);
640 if (ret <= 0)
641 goto out_err;
642 transport->recv.offset += ret;
643 read += ret;
644 if (transport->recv.offset != transport->recv.len)
645 return read;
646 }
647 if (xs_read_stream_request_done(transport)) {
648 trace_xs_stream_read_request(transport);
649 transport->recv.copied = 0;
650 }
651 transport->recv.offset = 0;
652 transport->recv.len = 0;
653 return read;
654out_err:
655 return ret != 0 ? ret : -ESHUTDOWN;
656}
657
658static void xs_stream_data_receive(struct sock_xprt *transport)
659{
660 size_t read = 0;
661 ssize_t ret = 0;
662
663 mutex_lock(&transport->recv_mutex);
664 clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
665 if (transport->sock == NULL)
666 goto out;
667 for (;;) {
668 ret = xs_read_stream(transport, MSG_DONTWAIT);
669 if (ret < 0)
670 break;
671 read += ret;
672 cond_resched();
673 }
674out:
675 mutex_unlock(&transport->recv_mutex);
676 trace_xs_stream_read_data(&transport->xprt, ret, read);
677}
678
679static void xs_stream_data_receive_workfn(struct work_struct *work)
680{
681 struct sock_xprt *transport =
682 container_of(work, struct sock_xprt, recv_worker);
683 xs_stream_data_receive(transport);
684}
685
686static void
687xs_stream_reset_connect(struct sock_xprt *transport)
688{
689 transport->recv.offset = 0;
690 transport->recv.len = 0;
691 transport->recv.copied = 0;
692 transport->xmit.offset = 0;
693 transport->xprt.stat.connect_count++;
694 transport->xprt.stat.connect_start = jiffies;
695}
696
697#define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL)
698
699static int xs_send_kvec(struct socket *sock, struct sockaddr *addr, int addrlen, struct kvec *vec, unsigned int base, int more)
700{
701 struct msghdr msg = {
702 .msg_name = addr,
703 .msg_namelen = addrlen,
704 .msg_flags = XS_SENDMSG_FLAGS | (more ? MSG_MORE : 0),
705 };
706 struct kvec iov = {
707 .iov_base = vec->iov_base + base,
708 .iov_len = vec->iov_len - base,
709 };
710
711 if (iov.iov_len != 0)
712 return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
713 return kernel_sendmsg(sock, &msg, NULL, 0, 0);
714}
715
716static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more, bool zerocopy, int *sent_p)
717{
718 ssize_t (*do_sendpage)(struct socket *sock, struct page *page,
719 int offset, size_t size, int flags);
720 struct page **ppage;
721 unsigned int remainder;
722 int err;
723
724 remainder = xdr->page_len - base;
725 base += xdr->page_base;
726 ppage = xdr->pages + (base >> PAGE_SHIFT);
727 base &= ~PAGE_MASK;
728 do_sendpage = sock->ops->sendpage;
729 if (!zerocopy)
730 do_sendpage = sock_no_sendpage;
731 for(;;) {
732 unsigned int len = min_t(unsigned int, PAGE_SIZE - base, remainder);
733 int flags = XS_SENDMSG_FLAGS;
734
735 remainder -= len;
736 if (more)
737 flags |= MSG_MORE;
738 if (remainder != 0)
739 flags |= MSG_SENDPAGE_NOTLAST | MSG_MORE;
740 err = do_sendpage(sock, *ppage, base, len, flags);
741 if (remainder == 0 || err != len)
742 break;
743 *sent_p += err;
744 ppage++;
745 base = 0;
746 }
747 if (err > 0) {
748 *sent_p += err;
749 err = 0;
750 }
751 return err;
752}
753
754/**
755 * xs_sendpages - write pages directly to a socket
756 * @sock: socket to send on
757 * @addr: UDP only -- address of destination
758 * @addrlen: UDP only -- length of destination address
759 * @xdr: buffer containing this request
760 * @base: starting position in the buffer
761 * @zerocopy: true if it is safe to use sendpage()
762 * @sent_p: return the total number of bytes successfully queued for sending
763 *
764 */
765static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, bool zerocopy, int *sent_p)
766{
767 unsigned int remainder = xdr->len - base;
768 int err = 0;
769 int sent = 0;
770
771 if (unlikely(!sock))
772 return -ENOTSOCK;
773
774 if (base != 0) {
775 addr = NULL;
776 addrlen = 0;
777 }
778
779 if (base < xdr->head[0].iov_len || addr != NULL) {
780 unsigned int len = xdr->head[0].iov_len - base;
781 remainder -= len;
782 err = xs_send_kvec(sock, addr, addrlen, &xdr->head[0], base, remainder != 0);
783 if (remainder == 0 || err != len)
784 goto out;
785 *sent_p += err;
786 base = 0;
787 } else
788 base -= xdr->head[0].iov_len;
789
790 if (base < xdr->page_len) {
791 unsigned int len = xdr->page_len - base;
792 remainder -= len;
793 err = xs_send_pagedata(sock, xdr, base, remainder != 0, zerocopy, &sent);
794 *sent_p += sent;
795 if (remainder == 0 || sent != len)
796 goto out;
797 base = 0;
798 } else
799 base -= xdr->page_len;
800
801 if (base >= xdr->tail[0].iov_len)
802 return 0;
803 err = xs_send_kvec(sock, NULL, 0, &xdr->tail[0], base, 0);
804out:
805 if (err > 0) {
806 *sent_p += err;
807 err = 0;
808 }
809 return err;
810}
811
812/**
813 * xs_nospace - handle transmit was incomplete
814 * @req: pointer to RPC request
815 *
816 */
817static int xs_nospace(struct rpc_rqst *req)
818{
819 struct rpc_xprt *xprt = req->rq_xprt;
820 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
821 struct sock *sk = transport->inet;
822 int ret = -EAGAIN;
823
824 dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
825 req->rq_task->tk_pid,
826 req->rq_slen - transport->xmit.offset,
827 req->rq_slen);
828
829 /* Protect against races with write_space */
830 spin_lock_bh(&xprt->transport_lock);
831
832 /* Don't race with disconnect */
833 if (xprt_connected(xprt)) {
834 /* wait for more buffer space */
835 sk->sk_write_pending++;
836 xprt_wait_for_buffer_space(xprt);
837 } else
838 ret = -ENOTCONN;
839
840 spin_unlock_bh(&xprt->transport_lock);
841
842 /* Race breaker in case memory is freed before above code is called */
843 if (ret == -EAGAIN) {
844 struct socket_wq *wq;
845
846 rcu_read_lock();
847 wq = rcu_dereference(sk->sk_wq);
848 set_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags);
849 rcu_read_unlock();
850
851 sk->sk_write_space(sk);
852 }
853 return ret;
854}
855
856static void
857xs_stream_prepare_request(struct rpc_rqst *req)
858{
859 req->rq_task->tk_status = xdr_alloc_bvec(&req->rq_rcv_buf, GFP_NOIO);
860}
861
862/*
863 * Determine if the previous message in the stream was aborted before it
864 * could complete transmission.
865 */
866static bool
867xs_send_request_was_aborted(struct sock_xprt *transport, struct rpc_rqst *req)
868{
869 return transport->xmit.offset != 0 && req->rq_bytes_sent == 0;
870}
871
872/*
873 * Construct a stream transport record marker in @buf.
874 */
875static inline void xs_encode_stream_record_marker(struct xdr_buf *buf)
876{
877 u32 reclen = buf->len - sizeof(rpc_fraghdr);
878 rpc_fraghdr *base = buf->head[0].iov_base;
879 *base = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | reclen);
880}
881
882/**
883 * xs_local_send_request - write an RPC request to an AF_LOCAL socket
884 * @req: pointer to RPC request
885 *
886 * Return values:
887 * 0: The request has been sent
888 * EAGAIN: The socket was blocked, please call again later to
889 * complete the request
890 * ENOTCONN: Caller needs to invoke connect logic then call again
891 * other: Some other error occured, the request was not sent
892 */
893static int xs_local_send_request(struct rpc_rqst *req)
894{
895 struct rpc_xprt *xprt = req->rq_xprt;
896 struct sock_xprt *transport =
897 container_of(xprt, struct sock_xprt, xprt);
898 struct xdr_buf *xdr = &req->rq_snd_buf;
899 int status;
900 int sent = 0;
901
902 /* Close the stream if the previous transmission was incomplete */
903 if (xs_send_request_was_aborted(transport, req)) {
904 xs_close(xprt);
905 return -ENOTCONN;
906 }
907
908 xs_encode_stream_record_marker(&req->rq_snd_buf);
909
910 xs_pktdump("packet data:",
911 req->rq_svec->iov_base, req->rq_svec->iov_len);
912
913 req->rq_xtime = ktime_get();
914 status = xs_sendpages(transport->sock, NULL, 0, xdr,
915 transport->xmit.offset,
916 true, &sent);
917 dprintk("RPC: %s(%u) = %d\n",
918 __func__, xdr->len - transport->xmit.offset, status);
919
920 if (status == -EAGAIN && sock_writeable(transport->inet))
921 status = -ENOBUFS;
922
923 if (likely(sent > 0) || status == 0) {
924 transport->xmit.offset += sent;
925 req->rq_bytes_sent = transport->xmit.offset;
926 if (likely(req->rq_bytes_sent >= req->rq_slen)) {
927 req->rq_xmit_bytes_sent += transport->xmit.offset;
928 req->rq_bytes_sent = 0;
929 transport->xmit.offset = 0;
930 return 0;
931 }
932 status = -EAGAIN;
933 }
934
935 switch (status) {
936 case -ENOBUFS:
937 break;
938 case -EAGAIN:
939 status = xs_nospace(req);
940 break;
941 default:
942 dprintk("RPC: sendmsg returned unrecognized error %d\n",
943 -status);
944 /* fall through */
945 case -EPIPE:
946 xs_close(xprt);
947 status = -ENOTCONN;
948 }
949
950 return status;
951}
952
953/**
954 * xs_udp_send_request - write an RPC request to a UDP socket
955 * @req: pointer to RPC request
956 *
957 * Return values:
958 * 0: The request has been sent
959 * EAGAIN: The socket was blocked, please call again later to
960 * complete the request
961 * ENOTCONN: Caller needs to invoke connect logic then call again
962 * other: Some other error occurred, the request was not sent
963 */
964static int xs_udp_send_request(struct rpc_rqst *req)
965{
966 struct rpc_xprt *xprt = req->rq_xprt;
967 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
968 struct xdr_buf *xdr = &req->rq_snd_buf;
969 int sent = 0;
970 int status;
971
972 xs_pktdump("packet data:",
973 req->rq_svec->iov_base,
974 req->rq_svec->iov_len);
975
976 if (!xprt_bound(xprt))
977 return -ENOTCONN;
978
979 if (!xprt_request_get_cong(xprt, req))
980 return -EBADSLT;
981
982 req->rq_xtime = ktime_get();
983 status = xs_sendpages(transport->sock, xs_addr(xprt), xprt->addrlen,
984 xdr, 0, true, &sent);
985
986 dprintk("RPC: xs_udp_send_request(%u) = %d\n",
987 xdr->len, status);
988
989 /* firewall is blocking us, don't return -EAGAIN or we end up looping */
990 if (status == -EPERM)
991 goto process_status;
992
993 if (status == -EAGAIN && sock_writeable(transport->inet))
994 status = -ENOBUFS;
995
996 if (sent > 0 || status == 0) {
997 req->rq_xmit_bytes_sent += sent;
998 if (sent >= req->rq_slen)
999 return 0;
1000 /* Still some bytes left; set up for a retry later. */
1001 status = -EAGAIN;
1002 }
1003
1004process_status:
1005 switch (status) {
1006 case -ENOTSOCK:
1007 status = -ENOTCONN;
1008 /* Should we call xs_close() here? */
1009 break;
1010 case -EAGAIN:
1011 status = xs_nospace(req);
1012 break;
1013 case -ENETUNREACH:
1014 case -ENOBUFS:
1015 case -EPIPE:
1016 case -ECONNREFUSED:
1017 case -EPERM:
1018 /* When the server has died, an ICMP port unreachable message
1019 * prompts ECONNREFUSED. */
1020 break;
1021 default:
1022 dprintk("RPC: sendmsg returned unrecognized error %d\n",
1023 -status);
1024 }
1025
1026 return status;
1027}
1028
1029/**
1030 * xs_tcp_send_request - write an RPC request to a TCP socket
1031 * @req: pointer to RPC request
1032 *
1033 * Return values:
1034 * 0: The request has been sent
1035 * EAGAIN: The socket was blocked, please call again later to
1036 * complete the request
1037 * ENOTCONN: Caller needs to invoke connect logic then call again
1038 * other: Some other error occurred, the request was not sent
1039 *
1040 * XXX: In the case of soft timeouts, should we eventually give up
1041 * if sendmsg is not able to make progress?
1042 */
1043static int xs_tcp_send_request(struct rpc_rqst *req)
1044{
1045 struct rpc_xprt *xprt = req->rq_xprt;
1046 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1047 struct xdr_buf *xdr = &req->rq_snd_buf;
1048 bool zerocopy = true;
1049 bool vm_wait = false;
1050 int status;
1051 int sent;
1052
1053 /* Close the stream if the previous transmission was incomplete */
1054 if (xs_send_request_was_aborted(transport, req)) {
1055 if (transport->sock != NULL)
1056 kernel_sock_shutdown(transport->sock, SHUT_RDWR);
1057 return -ENOTCONN;
1058 }
1059
1060 xs_encode_stream_record_marker(&req->rq_snd_buf);
1061
1062 xs_pktdump("packet data:",
1063 req->rq_svec->iov_base,
1064 req->rq_svec->iov_len);
1065 /* Don't use zero copy if this is a resend. If the RPC call
1066 * completes while the socket holds a reference to the pages,
1067 * then we may end up resending corrupted data.
1068 */
1069 if (req->rq_task->tk_flags & RPC_TASK_SENT)
1070 zerocopy = false;
1071
1072 if (test_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state))
1073 xs_tcp_set_socket_timeouts(xprt, transport->sock);
1074
1075 /* Continue transmitting the packet/record. We must be careful
1076 * to cope with writespace callbacks arriving _after_ we have
1077 * called sendmsg(). */
1078 req->rq_xtime = ktime_get();
1079 while (1) {
1080 sent = 0;
1081 status = xs_sendpages(transport->sock, NULL, 0, xdr,
1082 transport->xmit.offset,
1083 zerocopy, &sent);
1084
1085 dprintk("RPC: xs_tcp_send_request(%u) = %d\n",
1086 xdr->len - transport->xmit.offset, status);
1087
1088 /* If we've sent the entire packet, immediately
1089 * reset the count of bytes sent. */
1090 transport->xmit.offset += sent;
1091 req->rq_bytes_sent = transport->xmit.offset;
1092 if (likely(req->rq_bytes_sent >= req->rq_slen)) {
1093 req->rq_xmit_bytes_sent += transport->xmit.offset;
1094 req->rq_bytes_sent = 0;
1095 transport->xmit.offset = 0;
1096 return 0;
1097 }
1098
1099 WARN_ON_ONCE(sent == 0 && status == 0);
1100
1101 if (status == -EAGAIN ) {
1102 /*
1103 * Return EAGAIN if we're sure we're hitting the
1104 * socket send buffer limits.
1105 */
1106 if (test_bit(SOCK_NOSPACE, &transport->sock->flags))
1107 break;
1108 /*
1109 * Did we hit a memory allocation failure?
1110 */
1111 if (sent == 0) {
1112 status = -ENOBUFS;
1113 if (vm_wait)
1114 break;
1115 /* Retry, knowing now that we're below the
1116 * socket send buffer limit
1117 */
1118 vm_wait = true;
1119 }
1120 continue;
1121 }
1122 if (status < 0)
1123 break;
1124 vm_wait = false;
1125 }
1126
1127 switch (status) {
1128 case -ENOTSOCK:
1129 status = -ENOTCONN;
1130 /* Should we call xs_close() here? */
1131 break;
1132 case -EAGAIN:
1133 status = xs_nospace(req);
1134 break;
1135 case -ECONNRESET:
1136 case -ECONNREFUSED:
1137 case -ENOTCONN:
1138 case -EADDRINUSE:
1139 case -ENOBUFS:
1140 case -EPIPE:
1141 break;
1142 default:
1143 dprintk("RPC: sendmsg returned unrecognized error %d\n",
1144 -status);
1145 }
1146
1147 return status;
1148}
1149
1150static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk)
1151{
1152 transport->old_data_ready = sk->sk_data_ready;
1153 transport->old_state_change = sk->sk_state_change;
1154 transport->old_write_space = sk->sk_write_space;
1155 transport->old_error_report = sk->sk_error_report;
1156}
1157
1158static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk)
1159{
1160 sk->sk_data_ready = transport->old_data_ready;
1161 sk->sk_state_change = transport->old_state_change;
1162 sk->sk_write_space = transport->old_write_space;
1163 sk->sk_error_report = transport->old_error_report;
1164}
1165
1166static void xs_sock_reset_state_flags(struct rpc_xprt *xprt)
1167{
1168 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1169
1170 clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
1171}
1172
1173static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
1174{
1175 smp_mb__before_atomic();
1176 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
1177 clear_bit(XPRT_CLOSING, &xprt->state);
1178 xs_sock_reset_state_flags(xprt);
1179 smp_mb__after_atomic();
1180}
1181
1182/**
1183 * xs_error_report - callback to handle TCP socket state errors
1184 * @sk: socket
1185 *
1186 * Note: we don't call sock_error() since there may be a rpc_task
1187 * using the socket, and so we don't want to clear sk->sk_err.
1188 */
1189static void xs_error_report(struct sock *sk)
1190{
1191 struct rpc_xprt *xprt;
1192 int err;
1193
1194 read_lock_bh(&sk->sk_callback_lock);
1195 if (!(xprt = xprt_from_sock(sk)))
1196 goto out;
1197
1198 err = -sk->sk_err;
1199 if (err == 0)
1200 goto out;
1201 dprintk("RPC: xs_error_report client %p, error=%d...\n",
1202 xprt, -err);
1203 trace_rpc_socket_error(xprt, sk->sk_socket, err);
1204 xprt_wake_pending_tasks(xprt, err);
1205 out:
1206 read_unlock_bh(&sk->sk_callback_lock);
1207}
1208
1209static void xs_reset_transport(struct sock_xprt *transport)
1210{
1211 struct socket *sock = transport->sock;
1212 struct sock *sk = transport->inet;
1213 struct rpc_xprt *xprt = &transport->xprt;
1214
1215 if (sk == NULL)
1216 return;
1217
1218 if (atomic_read(&transport->xprt.swapper))
1219 sk_clear_memalloc(sk);
1220
1221 kernel_sock_shutdown(sock, SHUT_RDWR);
1222
1223 mutex_lock(&transport->recv_mutex);
1224 write_lock_bh(&sk->sk_callback_lock);
1225 transport->inet = NULL;
1226 transport->sock = NULL;
1227
1228 sk->sk_user_data = NULL;
1229
1230 xs_restore_old_callbacks(transport, sk);
1231 xprt_clear_connected(xprt);
1232 write_unlock_bh(&sk->sk_callback_lock);
1233 xs_sock_reset_connection_flags(xprt);
1234 mutex_unlock(&transport->recv_mutex);
1235
1236 trace_rpc_socket_close(xprt, sock);
1237 sock_release(sock);
1238
1239 xprt_disconnect_done(xprt);
1240}
1241
1242/**
1243 * xs_close - close a socket
1244 * @xprt: transport
1245 *
1246 * This is used when all requests are complete; ie, no DRC state remains
1247 * on the server we want to save.
1248 *
1249 * The caller _must_ be holding XPRT_LOCKED in order to avoid issues with
1250 * xs_reset_transport() zeroing the socket from underneath a writer.
1251 */
1252static void xs_close(struct rpc_xprt *xprt)
1253{
1254 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1255
1256 dprintk("RPC: xs_close xprt %p\n", xprt);
1257
1258 xs_reset_transport(transport);
1259 xprt->reestablish_timeout = 0;
1260}
1261
1262static void xs_inject_disconnect(struct rpc_xprt *xprt)
1263{
1264 dprintk("RPC: injecting transport disconnect on xprt=%p\n",
1265 xprt);
1266 xprt_disconnect_done(xprt);
1267}
1268
1269static void xs_xprt_free(struct rpc_xprt *xprt)
1270{
1271 xs_free_peer_addresses(xprt);
1272 xprt_free(xprt);
1273}
1274
1275/**
1276 * xs_destroy - prepare to shutdown a transport
1277 * @xprt: doomed transport
1278 *
1279 */
1280static void xs_destroy(struct rpc_xprt *xprt)
1281{
1282 struct sock_xprt *transport = container_of(xprt,
1283 struct sock_xprt, xprt);
1284 dprintk("RPC: xs_destroy xprt %p\n", xprt);
1285
1286 cancel_delayed_work_sync(&transport->connect_worker);
1287 xs_close(xprt);
1288 cancel_work_sync(&transport->recv_worker);
1289 xs_xprt_free(xprt);
1290 module_put(THIS_MODULE);
1291}
1292
1293/**
1294 * xs_udp_data_read_skb - receive callback for UDP sockets
1295 * @xprt: transport
1296 * @sk: socket
1297 * @skb: skbuff
1298 *
1299 */
1300static void xs_udp_data_read_skb(struct rpc_xprt *xprt,
1301 struct sock *sk,
1302 struct sk_buff *skb)
1303{
1304 struct rpc_task *task;
1305 struct rpc_rqst *rovr;
1306 int repsize, copied;
1307 u32 _xid;
1308 __be32 *xp;
1309
1310 repsize = skb->len;
1311 if (repsize < 4) {
1312 dprintk("RPC: impossible RPC reply size %d!\n", repsize);
1313 return;
1314 }
1315
1316 /* Copy the XID from the skb... */
1317 xp = skb_header_pointer(skb, 0, sizeof(_xid), &_xid);
1318 if (xp == NULL)
1319 return;
1320
1321 /* Look up and lock the request corresponding to the given XID */
1322 spin_lock(&xprt->queue_lock);
1323 rovr = xprt_lookup_rqst(xprt, *xp);
1324 if (!rovr)
1325 goto out_unlock;
1326 xprt_pin_rqst(rovr);
1327 xprt_update_rtt(rovr->rq_task);
1328 spin_unlock(&xprt->queue_lock);
1329 task = rovr->rq_task;
1330
1331 if ((copied = rovr->rq_private_buf.buflen) > repsize)
1332 copied = repsize;
1333
1334 /* Suck it into the iovec, verify checksum if not done by hw. */
1335 if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) {
1336 spin_lock(&xprt->queue_lock);
1337 __UDPX_INC_STATS(sk, UDP_MIB_INERRORS);
1338 goto out_unpin;
1339 }
1340
1341
1342 spin_lock_bh(&xprt->transport_lock);
1343 xprt_adjust_cwnd(xprt, task, copied);
1344 spin_unlock_bh(&xprt->transport_lock);
1345 spin_lock(&xprt->queue_lock);
1346 xprt_complete_rqst(task, copied);
1347 __UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS);
1348out_unpin:
1349 xprt_unpin_rqst(rovr);
1350 out_unlock:
1351 spin_unlock(&xprt->queue_lock);
1352}
1353
1354static void xs_udp_data_receive(struct sock_xprt *transport)
1355{
1356 struct sk_buff *skb;
1357 struct sock *sk;
1358 int err;
1359
1360 mutex_lock(&transport->recv_mutex);
1361 clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
1362 sk = transport->inet;
1363 if (sk == NULL)
1364 goto out;
1365 for (;;) {
1366 skb = skb_recv_udp(sk, 0, 1, &err);
1367 if (skb == NULL)
1368 break;
1369 xs_udp_data_read_skb(&transport->xprt, sk, skb);
1370 consume_skb(skb);
1371 cond_resched();
1372 }
1373out:
1374 mutex_unlock(&transport->recv_mutex);
1375}
1376
1377static void xs_udp_data_receive_workfn(struct work_struct *work)
1378{
1379 struct sock_xprt *transport =
1380 container_of(work, struct sock_xprt, recv_worker);
1381 xs_udp_data_receive(transport);
1382}
1383
1384/**
1385 * xs_data_ready - "data ready" callback for UDP sockets
1386 * @sk: socket with data to read
1387 *
1388 */
1389static void xs_data_ready(struct sock *sk)
1390{
1391 struct rpc_xprt *xprt;
1392
1393 read_lock_bh(&sk->sk_callback_lock);
1394 dprintk("RPC: xs_data_ready...\n");
1395 xprt = xprt_from_sock(sk);
1396 if (xprt != NULL) {
1397 struct sock_xprt *transport = container_of(xprt,
1398 struct sock_xprt, xprt);
1399 transport->old_data_ready(sk);
1400 /* Any data means we had a useful conversation, so
1401 * then we don't need to delay the next reconnect
1402 */
1403 if (xprt->reestablish_timeout)
1404 xprt->reestablish_timeout = 0;
1405 if (!test_and_set_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
1406 queue_work(xprtiod_workqueue, &transport->recv_worker);
1407 }
1408 read_unlock_bh(&sk->sk_callback_lock);
1409}
1410
1411/*
1412 * Helper function to force a TCP close if the server is sending
1413 * junk and/or it has put us in CLOSE_WAIT
1414 */
1415static void xs_tcp_force_close(struct rpc_xprt *xprt)
1416{
1417 xprt_force_disconnect(xprt);
1418}
1419
1420#if defined(CONFIG_SUNRPC_BACKCHANNEL)
1421static size_t xs_tcp_bc_maxpayload(struct rpc_xprt *xprt)
1422{
1423 return PAGE_SIZE;
1424}
1425#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1426
1427/**
1428 * xs_tcp_state_change - callback to handle TCP socket state changes
1429 * @sk: socket whose state has changed
1430 *
1431 */
1432static void xs_tcp_state_change(struct sock *sk)
1433{
1434 struct rpc_xprt *xprt;
1435 struct sock_xprt *transport;
1436
1437 read_lock_bh(&sk->sk_callback_lock);
1438 if (!(xprt = xprt_from_sock(sk)))
1439 goto out;
1440 dprintk("RPC: xs_tcp_state_change client %p...\n", xprt);
1441 dprintk("RPC: state %x conn %d dead %d zapped %d sk_shutdown %d\n",
1442 sk->sk_state, xprt_connected(xprt),
1443 sock_flag(sk, SOCK_DEAD),
1444 sock_flag(sk, SOCK_ZAPPED),
1445 sk->sk_shutdown);
1446
1447 transport = container_of(xprt, struct sock_xprt, xprt);
1448 trace_rpc_socket_state_change(xprt, sk->sk_socket);
1449 switch (sk->sk_state) {
1450 case TCP_ESTABLISHED:
1451 spin_lock(&xprt->transport_lock);
1452 if (!xprt_test_and_set_connected(xprt)) {
1453 xprt->connect_cookie++;
1454 clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
1455 xprt_clear_connecting(xprt);
1456
1457 xprt->stat.connect_count++;
1458 xprt->stat.connect_time += (long)jiffies -
1459 xprt->stat.connect_start;
1460 xprt_wake_pending_tasks(xprt, -EAGAIN);
1461 }
1462 spin_unlock(&xprt->transport_lock);
1463 break;
1464 case TCP_FIN_WAIT1:
1465 /* The client initiated a shutdown of the socket */
1466 xprt->connect_cookie++;
1467 xprt->reestablish_timeout = 0;
1468 set_bit(XPRT_CLOSING, &xprt->state);
1469 smp_mb__before_atomic();
1470 clear_bit(XPRT_CONNECTED, &xprt->state);
1471 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
1472 smp_mb__after_atomic();
1473 break;
1474 case TCP_CLOSE_WAIT:
1475 /* The server initiated a shutdown of the socket */
1476 xprt->connect_cookie++;
1477 clear_bit(XPRT_CONNECTED, &xprt->state);
1478 xs_tcp_force_close(xprt);
1479 /* fall through */
1480 case TCP_CLOSING:
1481 /*
1482 * If the server closed down the connection, make sure that
1483 * we back off before reconnecting
1484 */
1485 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
1486 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
1487 break;
1488 case TCP_LAST_ACK:
1489 set_bit(XPRT_CLOSING, &xprt->state);
1490 smp_mb__before_atomic();
1491 clear_bit(XPRT_CONNECTED, &xprt->state);
1492 smp_mb__after_atomic();
1493 break;
1494 case TCP_CLOSE:
1495 if (test_and_clear_bit(XPRT_SOCK_CONNECTING,
1496 &transport->sock_state))
1497 xprt_clear_connecting(xprt);
1498 clear_bit(XPRT_CLOSING, &xprt->state);
1499 /* Trigger the socket release */
1500 xs_tcp_force_close(xprt);
1501 }
1502 out:
1503 read_unlock_bh(&sk->sk_callback_lock);
1504}
1505
1506static void xs_write_space(struct sock *sk)
1507{
1508 struct socket_wq *wq;
1509 struct rpc_xprt *xprt;
1510
1511 if (!sk->sk_socket)
1512 return;
1513 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1514
1515 if (unlikely(!(xprt = xprt_from_sock(sk))))
1516 return;
1517 rcu_read_lock();
1518 wq = rcu_dereference(sk->sk_wq);
1519 if (!wq || test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags) == 0)
1520 goto out;
1521
1522 if (xprt_write_space(xprt))
1523 sk->sk_write_pending--;
1524out:
1525 rcu_read_unlock();
1526}
1527
1528/**
1529 * xs_udp_write_space - callback invoked when socket buffer space
1530 * becomes available
1531 * @sk: socket whose state has changed
1532 *
1533 * Called when more output buffer space is available for this socket.
1534 * We try not to wake our writers until they can make "significant"
1535 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
1536 * with a bunch of small requests.
1537 */
1538static void xs_udp_write_space(struct sock *sk)
1539{
1540 read_lock_bh(&sk->sk_callback_lock);
1541
1542 /* from net/core/sock.c:sock_def_write_space */
1543 if (sock_writeable(sk))
1544 xs_write_space(sk);
1545
1546 read_unlock_bh(&sk->sk_callback_lock);
1547}
1548
1549/**
1550 * xs_tcp_write_space - callback invoked when socket buffer space
1551 * becomes available
1552 * @sk: socket whose state has changed
1553 *
1554 * Called when more output buffer space is available for this socket.
1555 * We try not to wake our writers until they can make "significant"
1556 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
1557 * with a bunch of small requests.
1558 */
1559static void xs_tcp_write_space(struct sock *sk)
1560{
1561 read_lock_bh(&sk->sk_callback_lock);
1562
1563 /* from net/core/stream.c:sk_stream_write_space */
1564 if (sk_stream_is_writeable(sk))
1565 xs_write_space(sk);
1566
1567 read_unlock_bh(&sk->sk_callback_lock);
1568}
1569
1570static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
1571{
1572 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1573 struct sock *sk = transport->inet;
1574
1575 if (transport->rcvsize) {
1576 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
1577 sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2;
1578 }
1579 if (transport->sndsize) {
1580 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
1581 sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2;
1582 sk->sk_write_space(sk);
1583 }
1584}
1585
1586/**
1587 * xs_udp_set_buffer_size - set send and receive limits
1588 * @xprt: generic transport
1589 * @sndsize: requested size of send buffer, in bytes
1590 * @rcvsize: requested size of receive buffer, in bytes
1591 *
1592 * Set socket send and receive buffer size limits.
1593 */
1594static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize)
1595{
1596 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1597
1598 transport->sndsize = 0;
1599 if (sndsize)
1600 transport->sndsize = sndsize + 1024;
1601 transport->rcvsize = 0;
1602 if (rcvsize)
1603 transport->rcvsize = rcvsize + 1024;
1604
1605 xs_udp_do_set_buffer_size(xprt);
1606}
1607
1608/**
1609 * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport
1610 * @xprt: controlling transport
1611 * @task: task that timed out
1612 *
1613 * Adjust the congestion window after a retransmit timeout has occurred.
1614 */
1615static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task)
1616{
1617 spin_lock_bh(&xprt->transport_lock);
1618 xprt_adjust_cwnd(xprt, task, -ETIMEDOUT);
1619 spin_unlock_bh(&xprt->transport_lock);
1620}
1621
1622static int xs_get_random_port(void)
1623{
1624 unsigned short min = xprt_min_resvport, max = xprt_max_resvport;
1625 unsigned short range;
1626 unsigned short rand;
1627
1628 if (max < min)
1629 return -EADDRINUSE;
1630 range = max - min + 1;
1631 rand = (unsigned short) prandom_u32() % range;
1632 return rand + min;
1633}
1634
1635/**
1636 * xs_set_reuseaddr_port - set the socket's port and address reuse options
1637 * @sock: socket
1638 *
1639 * Note that this function has to be called on all sockets that share the
1640 * same port, and it must be called before binding.
1641 */
1642static void xs_sock_set_reuseport(struct socket *sock)
1643{
1644 int opt = 1;
1645
1646 kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEPORT,
1647 (char *)&opt, sizeof(opt));
1648}
1649
1650static unsigned short xs_sock_getport(struct socket *sock)
1651{
1652 struct sockaddr_storage buf;
1653 unsigned short port = 0;
1654
1655 if (kernel_getsockname(sock, (struct sockaddr *)&buf) < 0)
1656 goto out;
1657 switch (buf.ss_family) {
1658 case AF_INET6:
1659 port = ntohs(((struct sockaddr_in6 *)&buf)->sin6_port);
1660 break;
1661 case AF_INET:
1662 port = ntohs(((struct sockaddr_in *)&buf)->sin_port);
1663 }
1664out:
1665 return port;
1666}
1667
1668/**
1669 * xs_set_port - reset the port number in the remote endpoint address
1670 * @xprt: generic transport
1671 * @port: new port number
1672 *
1673 */
1674static void xs_set_port(struct rpc_xprt *xprt, unsigned short port)
1675{
1676 dprintk("RPC: setting port for xprt %p to %u\n", xprt, port);
1677
1678 rpc_set_port(xs_addr(xprt), port);
1679 xs_update_peer_port(xprt);
1680}
1681
1682static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock)
1683{
1684 if (transport->srcport == 0)
1685 transport->srcport = xs_sock_getport(sock);
1686}
1687
1688static int xs_get_srcport(struct sock_xprt *transport)
1689{
1690 int port = transport->srcport;
1691
1692 if (port == 0 && transport->xprt.resvport)
1693 port = xs_get_random_port();
1694 return port;
1695}
1696
1697static unsigned short xs_next_srcport(struct sock_xprt *transport, unsigned short port)
1698{
1699 if (transport->srcport != 0)
1700 transport->srcport = 0;
1701 if (!transport->xprt.resvport)
1702 return 0;
1703 if (port <= xprt_min_resvport || port > xprt_max_resvport)
1704 return xprt_max_resvport;
1705 return --port;
1706}
1707static int xs_bind(struct sock_xprt *transport, struct socket *sock)
1708{
1709 struct sockaddr_storage myaddr;
1710 int err, nloop = 0;
1711 int port = xs_get_srcport(transport);
1712 unsigned short last;
1713
1714 /*
1715 * If we are asking for any ephemeral port (i.e. port == 0 &&
1716 * transport->xprt.resvport == 0), don't bind. Let the local
1717 * port selection happen implicitly when the socket is used
1718 * (for example at connect time).
1719 *
1720 * This ensures that we can continue to establish TCP
1721 * connections even when all local ephemeral ports are already
1722 * a part of some TCP connection. This makes no difference
1723 * for UDP sockets, but also doens't harm them.
1724 *
1725 * If we're asking for any reserved port (i.e. port == 0 &&
1726 * transport->xprt.resvport == 1) xs_get_srcport above will
1727 * ensure that port is non-zero and we will bind as needed.
1728 */
1729 if (port <= 0)
1730 return port;
1731
1732 memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen);
1733 do {
1734 rpc_set_port((struct sockaddr *)&myaddr, port);
1735 err = kernel_bind(sock, (struct sockaddr *)&myaddr,
1736 transport->xprt.addrlen);
1737 if (err == 0) {
1738 transport->srcport = port;
1739 break;
1740 }
1741 last = port;
1742 port = xs_next_srcport(transport, port);
1743 if (port > last)
1744 nloop++;
1745 } while (err == -EADDRINUSE && nloop != 2);
1746
1747 if (myaddr.ss_family == AF_INET)
1748 dprintk("RPC: %s %pI4:%u: %s (%d)\n", __func__,
1749 &((struct sockaddr_in *)&myaddr)->sin_addr,
1750 port, err ? "failed" : "ok", err);
1751 else
1752 dprintk("RPC: %s %pI6:%u: %s (%d)\n", __func__,
1753 &((struct sockaddr_in6 *)&myaddr)->sin6_addr,
1754 port, err ? "failed" : "ok", err);
1755 return err;
1756}
1757
1758/*
1759 * We don't support autobind on AF_LOCAL sockets
1760 */
1761static void xs_local_rpcbind(struct rpc_task *task)
1762{
1763 xprt_set_bound(task->tk_xprt);
1764}
1765
1766static void xs_local_set_port(struct rpc_xprt *xprt, unsigned short port)
1767{
1768}
1769
1770#ifdef CONFIG_DEBUG_LOCK_ALLOC
1771static struct lock_class_key xs_key[2];
1772static struct lock_class_key xs_slock_key[2];
1773
1774static inline void xs_reclassify_socketu(struct socket *sock)
1775{
1776 struct sock *sk = sock->sk;
1777
1778 sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC",
1779 &xs_slock_key[1], "sk_lock-AF_LOCAL-RPC", &xs_key[1]);
1780}
1781
1782static inline void xs_reclassify_socket4(struct socket *sock)
1783{
1784 struct sock *sk = sock->sk;
1785
1786 sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC",
1787 &xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]);
1788}
1789
1790static inline void xs_reclassify_socket6(struct socket *sock)
1791{
1792 struct sock *sk = sock->sk;
1793
1794 sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC",
1795 &xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]);
1796}
1797
1798static inline void xs_reclassify_socket(int family, struct socket *sock)
1799{
1800 if (WARN_ON_ONCE(!sock_allow_reclassification(sock->sk)))
1801 return;
1802
1803 switch (family) {
1804 case AF_LOCAL:
1805 xs_reclassify_socketu(sock);
1806 break;
1807 case AF_INET:
1808 xs_reclassify_socket4(sock);
1809 break;
1810 case AF_INET6:
1811 xs_reclassify_socket6(sock);
1812 break;
1813 }
1814}
1815#else
1816static inline void xs_reclassify_socket(int family, struct socket *sock)
1817{
1818}
1819#endif
1820
1821static void xs_dummy_setup_socket(struct work_struct *work)
1822{
1823}
1824
1825static struct socket *xs_create_sock(struct rpc_xprt *xprt,
1826 struct sock_xprt *transport, int family, int type,
1827 int protocol, bool reuseport)
1828{
1829 struct socket *sock;
1830 int err;
1831
1832 err = __sock_create(xprt->xprt_net, family, type, protocol, &sock, 1);
1833 if (err < 0) {
1834 dprintk("RPC: can't create %d transport socket (%d).\n",
1835 protocol, -err);
1836 goto out;
1837 }
1838 xs_reclassify_socket(family, sock);
1839
1840 if (reuseport)
1841 xs_sock_set_reuseport(sock);
1842
1843 err = xs_bind(transport, sock);
1844 if (err) {
1845 sock_release(sock);
1846 goto out;
1847 }
1848
1849 return sock;
1850out:
1851 return ERR_PTR(err);
1852}
1853
1854static int xs_local_finish_connecting(struct rpc_xprt *xprt,
1855 struct socket *sock)
1856{
1857 struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
1858 xprt);
1859
1860 if (!transport->inet) {
1861 struct sock *sk = sock->sk;
1862
1863 write_lock_bh(&sk->sk_callback_lock);
1864
1865 xs_save_old_callbacks(transport, sk);
1866
1867 sk->sk_user_data = xprt;
1868 sk->sk_data_ready = xs_data_ready;
1869 sk->sk_write_space = xs_udp_write_space;
1870 sock_set_flag(sk, SOCK_FASYNC);
1871 sk->sk_error_report = xs_error_report;
1872 sk->sk_allocation = GFP_NOIO;
1873
1874 xprt_clear_connected(xprt);
1875
1876 /* Reset to new socket */
1877 transport->sock = sock;
1878 transport->inet = sk;
1879
1880 write_unlock_bh(&sk->sk_callback_lock);
1881 }
1882
1883 xs_stream_reset_connect(transport);
1884
1885 return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, 0);
1886}
1887
1888/**
1889 * xs_local_setup_socket - create AF_LOCAL socket, connect to a local endpoint
1890 * @transport: socket transport to connect
1891 */
1892static int xs_local_setup_socket(struct sock_xprt *transport)
1893{
1894 struct rpc_xprt *xprt = &transport->xprt;
1895 struct socket *sock;
1896 int status = -EIO;
1897
1898 status = __sock_create(xprt->xprt_net, AF_LOCAL,
1899 SOCK_STREAM, 0, &sock, 1);
1900 if (status < 0) {
1901 dprintk("RPC: can't create AF_LOCAL "
1902 "transport socket (%d).\n", -status);
1903 goto out;
1904 }
1905 xs_reclassify_socket(AF_LOCAL, sock);
1906
1907 dprintk("RPC: worker connecting xprt %p via AF_LOCAL to %s\n",
1908 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1909
1910 status = xs_local_finish_connecting(xprt, sock);
1911 trace_rpc_socket_connect(xprt, sock, status);
1912 switch (status) {
1913 case 0:
1914 dprintk("RPC: xprt %p connected to %s\n",
1915 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1916 xprt->stat.connect_count++;
1917 xprt->stat.connect_time += (long)jiffies -
1918 xprt->stat.connect_start;
1919 xprt_set_connected(xprt);
1920 case -ENOBUFS:
1921 break;
1922 case -ENOENT:
1923 dprintk("RPC: xprt %p: socket %s does not exist\n",
1924 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1925 break;
1926 case -ECONNREFUSED:
1927 dprintk("RPC: xprt %p: connection refused for %s\n",
1928 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1929 break;
1930 default:
1931 printk(KERN_ERR "%s: unhandled error (%d) connecting to %s\n",
1932 __func__, -status,
1933 xprt->address_strings[RPC_DISPLAY_ADDR]);
1934 }
1935
1936out:
1937 xprt_clear_connecting(xprt);
1938 xprt_wake_pending_tasks(xprt, status);
1939 return status;
1940}
1941
1942static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task)
1943{
1944 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1945 int ret;
1946
1947 if (RPC_IS_ASYNC(task)) {
1948 /*
1949 * We want the AF_LOCAL connect to be resolved in the
1950 * filesystem namespace of the process making the rpc
1951 * call. Thus we connect synchronously.
1952 *
1953 * If we want to support asynchronous AF_LOCAL calls,
1954 * we'll need to figure out how to pass a namespace to
1955 * connect.
1956 */
1957 rpc_exit(task, -ENOTCONN);
1958 return;
1959 }
1960 ret = xs_local_setup_socket(transport);
1961 if (ret && !RPC_IS_SOFTCONN(task))
1962 msleep_interruptible(15000);
1963}
1964
1965#if IS_ENABLED(CONFIG_SUNRPC_SWAP)
1966/*
1967 * Note that this should be called with XPRT_LOCKED held (or when we otherwise
1968 * know that we have exclusive access to the socket), to guard against
1969 * races with xs_reset_transport.
1970 */
1971static void xs_set_memalloc(struct rpc_xprt *xprt)
1972{
1973 struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
1974 xprt);
1975
1976 /*
1977 * If there's no sock, then we have nothing to set. The
1978 * reconnecting process will get it for us.
1979 */
1980 if (!transport->inet)
1981 return;
1982 if (atomic_read(&xprt->swapper))
1983 sk_set_memalloc(transport->inet);
1984}
1985
1986/**
1987 * xs_enable_swap - Tag this transport as being used for swap.
1988 * @xprt: transport to tag
1989 *
1990 * Take a reference to this transport on behalf of the rpc_clnt, and
1991 * optionally mark it for swapping if it wasn't already.
1992 */
1993static int
1994xs_enable_swap(struct rpc_xprt *xprt)
1995{
1996 struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt);
1997
1998 if (atomic_inc_return(&xprt->swapper) != 1)
1999 return 0;
2000 if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE))
2001 return -ERESTARTSYS;
2002 if (xs->inet)
2003 sk_set_memalloc(xs->inet);
2004 xprt_release_xprt(xprt, NULL);
2005 return 0;
2006}
2007
2008/**
2009 * xs_disable_swap - Untag this transport as being used for swap.
2010 * @xprt: transport to tag
2011 *
2012 * Drop a "swapper" reference to this xprt on behalf of the rpc_clnt. If the
2013 * swapper refcount goes to 0, untag the socket as a memalloc socket.
2014 */
2015static void
2016xs_disable_swap(struct rpc_xprt *xprt)
2017{
2018 struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt);
2019
2020 if (!atomic_dec_and_test(&xprt->swapper))
2021 return;
2022 if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE))
2023 return;
2024 if (xs->inet)
2025 sk_clear_memalloc(xs->inet);
2026 xprt_release_xprt(xprt, NULL);
2027}
2028#else
2029static void xs_set_memalloc(struct rpc_xprt *xprt)
2030{
2031}
2032
2033static int
2034xs_enable_swap(struct rpc_xprt *xprt)
2035{
2036 return -EINVAL;
2037}
2038
2039static void
2040xs_disable_swap(struct rpc_xprt *xprt)
2041{
2042}
2043#endif
2044
2045static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2046{
2047 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2048
2049 if (!transport->inet) {
2050 struct sock *sk = sock->sk;
2051
2052 write_lock_bh(&sk->sk_callback_lock);
2053
2054 xs_save_old_callbacks(transport, sk);
2055
2056 sk->sk_user_data = xprt;
2057 sk->sk_data_ready = xs_data_ready;
2058 sk->sk_write_space = xs_udp_write_space;
2059 sock_set_flag(sk, SOCK_FASYNC);
2060 sk->sk_allocation = GFP_NOIO;
2061
2062 xprt_set_connected(xprt);
2063
2064 /* Reset to new socket */
2065 transport->sock = sock;
2066 transport->inet = sk;
2067
2068 xs_set_memalloc(xprt);
2069
2070 write_unlock_bh(&sk->sk_callback_lock);
2071 }
2072 xs_udp_do_set_buffer_size(xprt);
2073
2074 xprt->stat.connect_start = jiffies;
2075}
2076
2077static void xs_udp_setup_socket(struct work_struct *work)
2078{
2079 struct sock_xprt *transport =
2080 container_of(work, struct sock_xprt, connect_worker.work);
2081 struct rpc_xprt *xprt = &transport->xprt;
2082 struct socket *sock;
2083 int status = -EIO;
2084
2085 sock = xs_create_sock(xprt, transport,
2086 xs_addr(xprt)->sa_family, SOCK_DGRAM,
2087 IPPROTO_UDP, false);
2088 if (IS_ERR(sock))
2089 goto out;
2090
2091 dprintk("RPC: worker connecting xprt %p via %s to "
2092 "%s (port %s)\n", xprt,
2093 xprt->address_strings[RPC_DISPLAY_PROTO],
2094 xprt->address_strings[RPC_DISPLAY_ADDR],
2095 xprt->address_strings[RPC_DISPLAY_PORT]);
2096
2097 xs_udp_finish_connecting(xprt, sock);
2098 trace_rpc_socket_connect(xprt, sock, 0);
2099 status = 0;
2100out:
2101 xprt_clear_connecting(xprt);
2102 xprt_unlock_connect(xprt, transport);
2103 xprt_wake_pending_tasks(xprt, status);
2104}
2105
2106/**
2107 * xs_tcp_shutdown - gracefully shut down a TCP socket
2108 * @xprt: transport
2109 *
2110 * Initiates a graceful shutdown of the TCP socket by calling the
2111 * equivalent of shutdown(SHUT_RDWR);
2112 */
2113static void xs_tcp_shutdown(struct rpc_xprt *xprt)
2114{
2115 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2116 struct socket *sock = transport->sock;
2117 int skst = transport->inet ? transport->inet->sk_state : TCP_CLOSE;
2118
2119 if (sock == NULL)
2120 return;
2121 switch (skst) {
2122 default:
2123 kernel_sock_shutdown(sock, SHUT_RDWR);
2124 trace_rpc_socket_shutdown(xprt, sock);
2125 break;
2126 case TCP_CLOSE:
2127 case TCP_TIME_WAIT:
2128 xs_reset_transport(transport);
2129 }
2130}
2131
2132static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt,
2133 struct socket *sock)
2134{
2135 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2136 unsigned int keepidle;
2137 unsigned int keepcnt;
2138 unsigned int opt_on = 1;
2139 unsigned int timeo;
2140
2141 spin_lock_bh(&xprt->transport_lock);
2142 keepidle = DIV_ROUND_UP(xprt->timeout->to_initval, HZ);
2143 keepcnt = xprt->timeout->to_retries + 1;
2144 timeo = jiffies_to_msecs(xprt->timeout->to_initval) *
2145 (xprt->timeout->to_retries + 1);
2146 clear_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state);
2147 spin_unlock_bh(&xprt->transport_lock);
2148
2149 /* TCP Keepalive options */
2150 kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
2151 (char *)&opt_on, sizeof(opt_on));
2152 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE,
2153 (char *)&keepidle, sizeof(keepidle));
2154 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL,
2155 (char *)&keepidle, sizeof(keepidle));
2156 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT,
2157 (char *)&keepcnt, sizeof(keepcnt));
2158
2159 /* TCP user timeout (see RFC5482) */
2160 kernel_setsockopt(sock, SOL_TCP, TCP_USER_TIMEOUT,
2161 (char *)&timeo, sizeof(timeo));
2162}
2163
2164static void xs_tcp_set_connect_timeout(struct rpc_xprt *xprt,
2165 unsigned long connect_timeout,
2166 unsigned long reconnect_timeout)
2167{
2168 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2169 struct rpc_timeout to;
2170 unsigned long initval;
2171
2172 spin_lock_bh(&xprt->transport_lock);
2173 if (reconnect_timeout < xprt->max_reconnect_timeout)
2174 xprt->max_reconnect_timeout = reconnect_timeout;
2175 if (connect_timeout < xprt->connect_timeout) {
2176 memcpy(&to, xprt->timeout, sizeof(to));
2177 initval = DIV_ROUND_UP(connect_timeout, to.to_retries + 1);
2178 /* Arbitrary lower limit */
2179 if (initval < XS_TCP_INIT_REEST_TO << 1)
2180 initval = XS_TCP_INIT_REEST_TO << 1;
2181 to.to_initval = initval;
2182 to.to_maxval = initval;
2183 memcpy(&transport->tcp_timeout, &to,
2184 sizeof(transport->tcp_timeout));
2185 xprt->timeout = &transport->tcp_timeout;
2186 xprt->connect_timeout = connect_timeout;
2187 }
2188 set_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state);
2189 spin_unlock_bh(&xprt->transport_lock);
2190}
2191
2192static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2193{
2194 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2195 int ret = -ENOTCONN;
2196
2197 if (!transport->inet) {
2198 struct sock *sk = sock->sk;
2199 unsigned int addr_pref = IPV6_PREFER_SRC_PUBLIC;
2200
2201 /* Avoid temporary address, they are bad for long-lived
2202 * connections such as NFS mounts.
2203 * RFC4941, section 3.6 suggests that:
2204 * Individual applications, which have specific
2205 * knowledge about the normal duration of connections,
2206 * MAY override this as appropriate.
2207 */
2208 kernel_setsockopt(sock, SOL_IPV6, IPV6_ADDR_PREFERENCES,
2209 (char *)&addr_pref, sizeof(addr_pref));
2210
2211 xs_tcp_set_socket_timeouts(xprt, sock);
2212
2213 write_lock_bh(&sk->sk_callback_lock);
2214
2215 xs_save_old_callbacks(transport, sk);
2216
2217 sk->sk_user_data = xprt;
2218 sk->sk_data_ready = xs_data_ready;
2219 sk->sk_state_change = xs_tcp_state_change;
2220 sk->sk_write_space = xs_tcp_write_space;
2221 sock_set_flag(sk, SOCK_FASYNC);
2222 sk->sk_error_report = xs_error_report;
2223 sk->sk_allocation = GFP_NOIO;
2224
2225 /* socket options */
2226 sock_reset_flag(sk, SOCK_LINGER);
2227 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
2228
2229 xprt_clear_connected(xprt);
2230
2231 /* Reset to new socket */
2232 transport->sock = sock;
2233 transport->inet = sk;
2234
2235 write_unlock_bh(&sk->sk_callback_lock);
2236 }
2237
2238 if (!xprt_bound(xprt))
2239 goto out;
2240
2241 xs_set_memalloc(xprt);
2242
2243 /* Reset TCP record info */
2244 xs_stream_reset_connect(transport);
2245
2246 /* Tell the socket layer to start connecting... */
2247 set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
2248 ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
2249 switch (ret) {
2250 case 0:
2251 xs_set_srcport(transport, sock);
2252 /* fall through */
2253 case -EINPROGRESS:
2254 /* SYN_SENT! */
2255 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
2256 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2257 break;
2258 case -EADDRNOTAVAIL:
2259 /* Source port number is unavailable. Try a new one! */
2260 transport->srcport = 0;
2261 }
2262out:
2263 return ret;
2264}
2265
2266/**
2267 * xs_tcp_setup_socket - create a TCP socket and connect to a remote endpoint
2268 * @work: queued work item
2269 *
2270 * Invoked by a work queue tasklet.
2271 */
2272static void xs_tcp_setup_socket(struct work_struct *work)
2273{
2274 struct sock_xprt *transport =
2275 container_of(work, struct sock_xprt, connect_worker.work);
2276 struct socket *sock = transport->sock;
2277 struct rpc_xprt *xprt = &transport->xprt;
2278 int status = -EIO;
2279
2280 if (!sock) {
2281 sock = xs_create_sock(xprt, transport,
2282 xs_addr(xprt)->sa_family, SOCK_STREAM,
2283 IPPROTO_TCP, true);
2284 if (IS_ERR(sock)) {
2285 status = PTR_ERR(sock);
2286 goto out;
2287 }
2288 }
2289
2290 dprintk("RPC: worker connecting xprt %p via %s to "
2291 "%s (port %s)\n", xprt,
2292 xprt->address_strings[RPC_DISPLAY_PROTO],
2293 xprt->address_strings[RPC_DISPLAY_ADDR],
2294 xprt->address_strings[RPC_DISPLAY_PORT]);
2295
2296 status = xs_tcp_finish_connecting(xprt, sock);
2297 trace_rpc_socket_connect(xprt, sock, status);
2298 dprintk("RPC: %p connect status %d connected %d sock state %d\n",
2299 xprt, -status, xprt_connected(xprt),
2300 sock->sk->sk_state);
2301 switch (status) {
2302 default:
2303 printk("%s: connect returned unhandled error %d\n",
2304 __func__, status);
2305 /* fall through */
2306 case -EADDRNOTAVAIL:
2307 /* We're probably in TIME_WAIT. Get rid of existing socket,
2308 * and retry
2309 */
2310 xs_tcp_force_close(xprt);
2311 break;
2312 case 0:
2313 case -EINPROGRESS:
2314 case -EALREADY:
2315 xprt_unlock_connect(xprt, transport);
2316 return;
2317 case -EINVAL:
2318 /* Happens, for instance, if the user specified a link
2319 * local IPv6 address without a scope-id.
2320 */
2321 case -ECONNREFUSED:
2322 case -ECONNRESET:
2323 case -ENETDOWN:
2324 case -ENETUNREACH:
2325 case -EHOSTUNREACH:
2326 case -EADDRINUSE:
2327 case -ENOBUFS:
2328 /*
2329 * xs_tcp_force_close() wakes tasks with -EIO.
2330 * We need to wake them first to ensure the
2331 * correct error code.
2332 */
2333 xprt_wake_pending_tasks(xprt, status);
2334 xs_tcp_force_close(xprt);
2335 goto out;
2336 }
2337 status = -EAGAIN;
2338out:
2339 xprt_clear_connecting(xprt);
2340 xprt_unlock_connect(xprt, transport);
2341 xprt_wake_pending_tasks(xprt, status);
2342}
2343
2344static unsigned long xs_reconnect_delay(const struct rpc_xprt *xprt)
2345{
2346 unsigned long start, now = jiffies;
2347
2348 start = xprt->stat.connect_start + xprt->reestablish_timeout;
2349 if (time_after(start, now))
2350 return start - now;
2351 return 0;
2352}
2353
2354static void xs_reconnect_backoff(struct rpc_xprt *xprt)
2355{
2356 xprt->reestablish_timeout <<= 1;
2357 if (xprt->reestablish_timeout > xprt->max_reconnect_timeout)
2358 xprt->reestablish_timeout = xprt->max_reconnect_timeout;
2359 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
2360 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2361}
2362
2363/**
2364 * xs_connect - connect a socket to a remote endpoint
2365 * @xprt: pointer to transport structure
2366 * @task: address of RPC task that manages state of connect request
2367 *
2368 * TCP: If the remote end dropped the connection, delay reconnecting.
2369 *
2370 * UDP socket connects are synchronous, but we use a work queue anyway
2371 * to guarantee that even unprivileged user processes can set up a
2372 * socket on a privileged port.
2373 *
2374 * If a UDP socket connect fails, the delay behavior here prevents
2375 * retry floods (hard mounts).
2376 */
2377static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
2378{
2379 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2380 unsigned long delay = 0;
2381
2382 WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport));
2383
2384 if (transport->sock != NULL) {
2385 dprintk("RPC: xs_connect delayed xprt %p for %lu "
2386 "seconds\n",
2387 xprt, xprt->reestablish_timeout / HZ);
2388
2389 /* Start by resetting any existing state */
2390 xs_reset_transport(transport);
2391
2392 delay = xs_reconnect_delay(xprt);
2393 xs_reconnect_backoff(xprt);
2394
2395 } else
2396 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt);
2397
2398 queue_delayed_work(xprtiod_workqueue,
2399 &transport->connect_worker,
2400 delay);
2401}
2402
2403/**
2404 * xs_local_print_stats - display AF_LOCAL socket-specifc stats
2405 * @xprt: rpc_xprt struct containing statistics
2406 * @seq: output file
2407 *
2408 */
2409static void xs_local_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2410{
2411 long idle_time = 0;
2412
2413 if (xprt_connected(xprt))
2414 idle_time = (long)(jiffies - xprt->last_used) / HZ;
2415
2416 seq_printf(seq, "\txprt:\tlocal %lu %lu %lu %ld %lu %lu %lu "
2417 "%llu %llu %lu %llu %llu\n",
2418 xprt->stat.bind_count,
2419 xprt->stat.connect_count,
2420 xprt->stat.connect_time / HZ,
2421 idle_time,
2422 xprt->stat.sends,
2423 xprt->stat.recvs,
2424 xprt->stat.bad_xids,
2425 xprt->stat.req_u,
2426 xprt->stat.bklog_u,
2427 xprt->stat.max_slots,
2428 xprt->stat.sending_u,
2429 xprt->stat.pending_u);
2430}
2431
2432/**
2433 * xs_udp_print_stats - display UDP socket-specifc stats
2434 * @xprt: rpc_xprt struct containing statistics
2435 * @seq: output file
2436 *
2437 */
2438static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2439{
2440 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2441
2442 seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %llu %llu "
2443 "%lu %llu %llu\n",
2444 transport->srcport,
2445 xprt->stat.bind_count,
2446 xprt->stat.sends,
2447 xprt->stat.recvs,
2448 xprt->stat.bad_xids,
2449 xprt->stat.req_u,
2450 xprt->stat.bklog_u,
2451 xprt->stat.max_slots,
2452 xprt->stat.sending_u,
2453 xprt->stat.pending_u);
2454}
2455
2456/**
2457 * xs_tcp_print_stats - display TCP socket-specifc stats
2458 * @xprt: rpc_xprt struct containing statistics
2459 * @seq: output file
2460 *
2461 */
2462static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2463{
2464 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2465 long idle_time = 0;
2466
2467 if (xprt_connected(xprt))
2468 idle_time = (long)(jiffies - xprt->last_used) / HZ;
2469
2470 seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu "
2471 "%llu %llu %lu %llu %llu\n",
2472 transport->srcport,
2473 xprt->stat.bind_count,
2474 xprt->stat.connect_count,
2475 xprt->stat.connect_time / HZ,
2476 idle_time,
2477 xprt->stat.sends,
2478 xprt->stat.recvs,
2479 xprt->stat.bad_xids,
2480 xprt->stat.req_u,
2481 xprt->stat.bklog_u,
2482 xprt->stat.max_slots,
2483 xprt->stat.sending_u,
2484 xprt->stat.pending_u);
2485}
2486
2487/*
2488 * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason
2489 * we allocate pages instead doing a kmalloc like rpc_malloc is because we want
2490 * to use the server side send routines.
2491 */
2492static int bc_malloc(struct rpc_task *task)
2493{
2494 struct rpc_rqst *rqst = task->tk_rqstp;
2495 size_t size = rqst->rq_callsize;
2496 struct page *page;
2497 struct rpc_buffer *buf;
2498
2499 if (size > PAGE_SIZE - sizeof(struct rpc_buffer)) {
2500 WARN_ONCE(1, "xprtsock: large bc buffer request (size %zu)\n",
2501 size);
2502 return -EINVAL;
2503 }
2504
2505 page = alloc_page(GFP_KERNEL);
2506 if (!page)
2507 return -ENOMEM;
2508
2509 buf = page_address(page);
2510 buf->len = PAGE_SIZE;
2511
2512 rqst->rq_buffer = buf->data;
2513 rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
2514 return 0;
2515}
2516
2517/*
2518 * Free the space allocated in the bc_alloc routine
2519 */
2520static void bc_free(struct rpc_task *task)
2521{
2522 void *buffer = task->tk_rqstp->rq_buffer;
2523 struct rpc_buffer *buf;
2524
2525 buf = container_of(buffer, struct rpc_buffer, data);
2526 free_page((unsigned long)buf);
2527}
2528
2529/*
2530 * Use the svc_sock to send the callback. Must be called with svsk->sk_mutex
2531 * held. Borrows heavily from svc_tcp_sendto and xs_tcp_send_request.
2532 */
2533static int bc_sendto(struct rpc_rqst *req)
2534{
2535 int len;
2536 struct xdr_buf *xbufp = &req->rq_snd_buf;
2537 struct rpc_xprt *xprt = req->rq_xprt;
2538 struct sock_xprt *transport =
2539 container_of(xprt, struct sock_xprt, xprt);
2540 struct socket *sock = transport->sock;
2541 unsigned long headoff;
2542 unsigned long tailoff;
2543
2544 xs_encode_stream_record_marker(xbufp);
2545
2546 tailoff = (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK;
2547 headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK;
2548 len = svc_send_common(sock, xbufp,
2549 virt_to_page(xbufp->head[0].iov_base), headoff,
2550 xbufp->tail[0].iov_base, tailoff);
2551
2552 if (len != xbufp->len) {
2553 printk(KERN_NOTICE "Error sending entire callback!\n");
2554 len = -EAGAIN;
2555 }
2556
2557 return len;
2558}
2559
2560/*
2561 * The send routine. Borrows from svc_send
2562 */
2563static int bc_send_request(struct rpc_rqst *req)
2564{
2565 struct svc_xprt *xprt;
2566 int len;
2567
2568 dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid));
2569 /*
2570 * Get the server socket associated with this callback xprt
2571 */
2572 xprt = req->rq_xprt->bc_xprt;
2573
2574 /*
2575 * Grab the mutex to serialize data as the connection is shared
2576 * with the fore channel
2577 */
2578 mutex_lock(&xprt->xpt_mutex);
2579 if (test_bit(XPT_DEAD, &xprt->xpt_flags))
2580 len = -ENOTCONN;
2581 else
2582 len = bc_sendto(req);
2583 mutex_unlock(&xprt->xpt_mutex);
2584
2585 if (len > 0)
2586 len = 0;
2587
2588 return len;
2589}
2590
2591/*
2592 * The close routine. Since this is client initiated, we do nothing
2593 */
2594
2595static void bc_close(struct rpc_xprt *xprt)
2596{
2597}
2598
2599/*
2600 * The xprt destroy routine. Again, because this connection is client
2601 * initiated, we do nothing
2602 */
2603
2604static void bc_destroy(struct rpc_xprt *xprt)
2605{
2606 dprintk("RPC: bc_destroy xprt %p\n", xprt);
2607
2608 xs_xprt_free(xprt);
2609 module_put(THIS_MODULE);
2610}
2611
2612static const struct rpc_xprt_ops xs_local_ops = {
2613 .reserve_xprt = xprt_reserve_xprt,
2614 .release_xprt = xprt_release_xprt,
2615 .alloc_slot = xprt_alloc_slot,
2616 .free_slot = xprt_free_slot,
2617 .rpcbind = xs_local_rpcbind,
2618 .set_port = xs_local_set_port,
2619 .connect = xs_local_connect,
2620 .buf_alloc = rpc_malloc,
2621 .buf_free = rpc_free,
2622 .prepare_request = xs_stream_prepare_request,
2623 .send_request = xs_local_send_request,
2624 .set_retrans_timeout = xprt_set_retrans_timeout_def,
2625 .close = xs_close,
2626 .destroy = xs_destroy,
2627 .print_stats = xs_local_print_stats,
2628 .enable_swap = xs_enable_swap,
2629 .disable_swap = xs_disable_swap,
2630};
2631
2632static const struct rpc_xprt_ops xs_udp_ops = {
2633 .set_buffer_size = xs_udp_set_buffer_size,
2634 .reserve_xprt = xprt_reserve_xprt_cong,
2635 .release_xprt = xprt_release_xprt_cong,
2636 .alloc_slot = xprt_alloc_slot,
2637 .free_slot = xprt_free_slot,
2638 .rpcbind = rpcb_getport_async,
2639 .set_port = xs_set_port,
2640 .connect = xs_connect,
2641 .buf_alloc = rpc_malloc,
2642 .buf_free = rpc_free,
2643 .send_request = xs_udp_send_request,
2644 .set_retrans_timeout = xprt_set_retrans_timeout_rtt,
2645 .timer = xs_udp_timer,
2646 .release_request = xprt_release_rqst_cong,
2647 .close = xs_close,
2648 .destroy = xs_destroy,
2649 .print_stats = xs_udp_print_stats,
2650 .enable_swap = xs_enable_swap,
2651 .disable_swap = xs_disable_swap,
2652 .inject_disconnect = xs_inject_disconnect,
2653};
2654
2655static const struct rpc_xprt_ops xs_tcp_ops = {
2656 .reserve_xprt = xprt_reserve_xprt,
2657 .release_xprt = xprt_release_xprt,
2658 .alloc_slot = xprt_alloc_slot,
2659 .free_slot = xprt_free_slot,
2660 .rpcbind = rpcb_getport_async,
2661 .set_port = xs_set_port,
2662 .connect = xs_connect,
2663 .buf_alloc = rpc_malloc,
2664 .buf_free = rpc_free,
2665 .prepare_request = xs_stream_prepare_request,
2666 .send_request = xs_tcp_send_request,
2667 .set_retrans_timeout = xprt_set_retrans_timeout_def,
2668 .close = xs_tcp_shutdown,
2669 .destroy = xs_destroy,
2670 .set_connect_timeout = xs_tcp_set_connect_timeout,
2671 .print_stats = xs_tcp_print_stats,
2672 .enable_swap = xs_enable_swap,
2673 .disable_swap = xs_disable_swap,
2674 .inject_disconnect = xs_inject_disconnect,
2675#ifdef CONFIG_SUNRPC_BACKCHANNEL
2676 .bc_setup = xprt_setup_bc,
2677 .bc_maxpayload = xs_tcp_bc_maxpayload,
2678 .bc_free_rqst = xprt_free_bc_rqst,
2679 .bc_destroy = xprt_destroy_bc,
2680#endif
2681};
2682
2683/*
2684 * The rpc_xprt_ops for the server backchannel
2685 */
2686
2687static const struct rpc_xprt_ops bc_tcp_ops = {
2688 .reserve_xprt = xprt_reserve_xprt,
2689 .release_xprt = xprt_release_xprt,
2690 .alloc_slot = xprt_alloc_slot,
2691 .free_slot = xprt_free_slot,
2692 .buf_alloc = bc_malloc,
2693 .buf_free = bc_free,
2694 .send_request = bc_send_request,
2695 .set_retrans_timeout = xprt_set_retrans_timeout_def,
2696 .close = bc_close,
2697 .destroy = bc_destroy,
2698 .print_stats = xs_tcp_print_stats,
2699 .enable_swap = xs_enable_swap,
2700 .disable_swap = xs_disable_swap,
2701 .inject_disconnect = xs_inject_disconnect,
2702};
2703
2704static int xs_init_anyaddr(const int family, struct sockaddr *sap)
2705{
2706 static const struct sockaddr_in sin = {
2707 .sin_family = AF_INET,
2708 .sin_addr.s_addr = htonl(INADDR_ANY),
2709 };
2710 static const struct sockaddr_in6 sin6 = {
2711 .sin6_family = AF_INET6,
2712 .sin6_addr = IN6ADDR_ANY_INIT,
2713 };
2714
2715 switch (family) {
2716 case AF_LOCAL:
2717 break;
2718 case AF_INET:
2719 memcpy(sap, &sin, sizeof(sin));
2720 break;
2721 case AF_INET6:
2722 memcpy(sap, &sin6, sizeof(sin6));
2723 break;
2724 default:
2725 dprintk("RPC: %s: Bad address family\n", __func__);
2726 return -EAFNOSUPPORT;
2727 }
2728 return 0;
2729}
2730
2731static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
2732 unsigned int slot_table_size,
2733 unsigned int max_slot_table_size)
2734{
2735 struct rpc_xprt *xprt;
2736 struct sock_xprt *new;
2737
2738 if (args->addrlen > sizeof(xprt->addr)) {
2739 dprintk("RPC: xs_setup_xprt: address too large\n");
2740 return ERR_PTR(-EBADF);
2741 }
2742
2743 xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size,
2744 max_slot_table_size);
2745 if (xprt == NULL) {
2746 dprintk("RPC: xs_setup_xprt: couldn't allocate "
2747 "rpc_xprt\n");
2748 return ERR_PTR(-ENOMEM);
2749 }
2750
2751 new = container_of(xprt, struct sock_xprt, xprt);
2752 mutex_init(&new->recv_mutex);
2753 memcpy(&xprt->addr, args->dstaddr, args->addrlen);
2754 xprt->addrlen = args->addrlen;
2755 if (args->srcaddr)
2756 memcpy(&new->srcaddr, args->srcaddr, args->addrlen);
2757 else {
2758 int err;
2759 err = xs_init_anyaddr(args->dstaddr->sa_family,
2760 (struct sockaddr *)&new->srcaddr);
2761 if (err != 0) {
2762 xprt_free(xprt);
2763 return ERR_PTR(err);
2764 }
2765 }
2766
2767 return xprt;
2768}
2769
2770static const struct rpc_timeout xs_local_default_timeout = {
2771 .to_initval = 10 * HZ,
2772 .to_maxval = 10 * HZ,
2773 .to_retries = 2,
2774};
2775
2776/**
2777 * xs_setup_local - Set up transport to use an AF_LOCAL socket
2778 * @args: rpc transport creation arguments
2779 *
2780 * AF_LOCAL is a "tpi_cots_ord" transport, just like TCP
2781 */
2782static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
2783{
2784 struct sockaddr_un *sun = (struct sockaddr_un *)args->dstaddr;
2785 struct sock_xprt *transport;
2786 struct rpc_xprt *xprt;
2787 struct rpc_xprt *ret;
2788
2789 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2790 xprt_max_tcp_slot_table_entries);
2791 if (IS_ERR(xprt))
2792 return xprt;
2793 transport = container_of(xprt, struct sock_xprt, xprt);
2794
2795 xprt->prot = 0;
2796 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
2797 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2798
2799 xprt->bind_timeout = XS_BIND_TO;
2800 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2801 xprt->idle_timeout = XS_IDLE_DISC_TO;
2802
2803 xprt->ops = &xs_local_ops;
2804 xprt->timeout = &xs_local_default_timeout;
2805
2806 INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn);
2807 INIT_DELAYED_WORK(&transport->connect_worker, xs_dummy_setup_socket);
2808
2809 switch (sun->sun_family) {
2810 case AF_LOCAL:
2811 if (sun->sun_path[0] != '/') {
2812 dprintk("RPC: bad AF_LOCAL address: %s\n",
2813 sun->sun_path);
2814 ret = ERR_PTR(-EINVAL);
2815 goto out_err;
2816 }
2817 xprt_set_bound(xprt);
2818 xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL);
2819 ret = ERR_PTR(xs_local_setup_socket(transport));
2820 if (ret)
2821 goto out_err;
2822 break;
2823 default:
2824 ret = ERR_PTR(-EAFNOSUPPORT);
2825 goto out_err;
2826 }
2827
2828 dprintk("RPC: set up xprt to %s via AF_LOCAL\n",
2829 xprt->address_strings[RPC_DISPLAY_ADDR]);
2830
2831 if (try_module_get(THIS_MODULE))
2832 return xprt;
2833 ret = ERR_PTR(-EINVAL);
2834out_err:
2835 xs_xprt_free(xprt);
2836 return ret;
2837}
2838
2839static const struct rpc_timeout xs_udp_default_timeout = {
2840 .to_initval = 5 * HZ,
2841 .to_maxval = 30 * HZ,
2842 .to_increment = 5 * HZ,
2843 .to_retries = 5,
2844};
2845
2846/**
2847 * xs_setup_udp - Set up transport to use a UDP socket
2848 * @args: rpc transport creation arguments
2849 *
2850 */
2851static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
2852{
2853 struct sockaddr *addr = args->dstaddr;
2854 struct rpc_xprt *xprt;
2855 struct sock_xprt *transport;
2856 struct rpc_xprt *ret;
2857
2858 xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries,
2859 xprt_udp_slot_table_entries);
2860 if (IS_ERR(xprt))
2861 return xprt;
2862 transport = container_of(xprt, struct sock_xprt, xprt);
2863
2864 xprt->prot = IPPROTO_UDP;
2865 xprt->tsh_size = 0;
2866 /* XXX: header size can vary due to auth type, IPv6, etc. */
2867 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
2868
2869 xprt->bind_timeout = XS_BIND_TO;
2870 xprt->reestablish_timeout = XS_UDP_REEST_TO;
2871 xprt->idle_timeout = XS_IDLE_DISC_TO;
2872
2873 xprt->ops = &xs_udp_ops;
2874
2875 xprt->timeout = &xs_udp_default_timeout;
2876
2877 INIT_WORK(&transport->recv_worker, xs_udp_data_receive_workfn);
2878 INIT_DELAYED_WORK(&transport->connect_worker, xs_udp_setup_socket);
2879
2880 switch (addr->sa_family) {
2881 case AF_INET:
2882 if (((struct sockaddr_in *)addr)->sin_port != htons(0))
2883 xprt_set_bound(xprt);
2884
2885 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP);
2886 break;
2887 case AF_INET6:
2888 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
2889 xprt_set_bound(xprt);
2890
2891 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6);
2892 break;
2893 default:
2894 ret = ERR_PTR(-EAFNOSUPPORT);
2895 goto out_err;
2896 }
2897
2898 if (xprt_bound(xprt))
2899 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
2900 xprt->address_strings[RPC_DISPLAY_ADDR],
2901 xprt->address_strings[RPC_DISPLAY_PORT],
2902 xprt->address_strings[RPC_DISPLAY_PROTO]);
2903 else
2904 dprintk("RPC: set up xprt to %s (autobind) via %s\n",
2905 xprt->address_strings[RPC_DISPLAY_ADDR],
2906 xprt->address_strings[RPC_DISPLAY_PROTO]);
2907
2908 if (try_module_get(THIS_MODULE))
2909 return xprt;
2910 ret = ERR_PTR(-EINVAL);
2911out_err:
2912 xs_xprt_free(xprt);
2913 return ret;
2914}
2915
2916static const struct rpc_timeout xs_tcp_default_timeout = {
2917 .to_initval = 60 * HZ,
2918 .to_maxval = 60 * HZ,
2919 .to_retries = 2,
2920};
2921
2922/**
2923 * xs_setup_tcp - Set up transport to use a TCP socket
2924 * @args: rpc transport creation arguments
2925 *
2926 */
2927static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
2928{
2929 struct sockaddr *addr = args->dstaddr;
2930 struct rpc_xprt *xprt;
2931 struct sock_xprt *transport;
2932 struct rpc_xprt *ret;
2933 unsigned int max_slot_table_size = xprt_max_tcp_slot_table_entries;
2934
2935 if (args->flags & XPRT_CREATE_INFINITE_SLOTS)
2936 max_slot_table_size = RPC_MAX_SLOT_TABLE_LIMIT;
2937
2938 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2939 max_slot_table_size);
2940 if (IS_ERR(xprt))
2941 return xprt;
2942 transport = container_of(xprt, struct sock_xprt, xprt);
2943
2944 xprt->prot = IPPROTO_TCP;
2945 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
2946 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2947
2948 xprt->bind_timeout = XS_BIND_TO;
2949 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2950 xprt->idle_timeout = XS_IDLE_DISC_TO;
2951
2952 xprt->ops = &xs_tcp_ops;
2953 xprt->timeout = &xs_tcp_default_timeout;
2954
2955 xprt->max_reconnect_timeout = xprt->timeout->to_maxval;
2956 xprt->connect_timeout = xprt->timeout->to_initval *
2957 (xprt->timeout->to_retries + 1);
2958
2959 INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn);
2960 INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket);
2961
2962 switch (addr->sa_family) {
2963 case AF_INET:
2964 if (((struct sockaddr_in *)addr)->sin_port != htons(0))
2965 xprt_set_bound(xprt);
2966
2967 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP);
2968 break;
2969 case AF_INET6:
2970 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
2971 xprt_set_bound(xprt);
2972
2973 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
2974 break;
2975 default:
2976 ret = ERR_PTR(-EAFNOSUPPORT);
2977 goto out_err;
2978 }
2979
2980 if (xprt_bound(xprt))
2981 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
2982 xprt->address_strings[RPC_DISPLAY_ADDR],
2983 xprt->address_strings[RPC_DISPLAY_PORT],
2984 xprt->address_strings[RPC_DISPLAY_PROTO]);
2985 else
2986 dprintk("RPC: set up xprt to %s (autobind) via %s\n",
2987 xprt->address_strings[RPC_DISPLAY_ADDR],
2988 xprt->address_strings[RPC_DISPLAY_PROTO]);
2989
2990 if (try_module_get(THIS_MODULE))
2991 return xprt;
2992 ret = ERR_PTR(-EINVAL);
2993out_err:
2994 xs_xprt_free(xprt);
2995 return ret;
2996}
2997
2998/**
2999 * xs_setup_bc_tcp - Set up transport to use a TCP backchannel socket
3000 * @args: rpc transport creation arguments
3001 *
3002 */
3003static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
3004{
3005 struct sockaddr *addr = args->dstaddr;
3006 struct rpc_xprt *xprt;
3007 struct sock_xprt *transport;
3008 struct svc_sock *bc_sock;
3009 struct rpc_xprt *ret;
3010
3011 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
3012 xprt_tcp_slot_table_entries);
3013 if (IS_ERR(xprt))
3014 return xprt;
3015 transport = container_of(xprt, struct sock_xprt, xprt);
3016
3017 xprt->prot = IPPROTO_TCP;
3018 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
3019 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
3020 xprt->timeout = &xs_tcp_default_timeout;
3021
3022 /* backchannel */
3023 xprt_set_bound(xprt);
3024 xprt->bind_timeout = 0;
3025 xprt->reestablish_timeout = 0;
3026 xprt->idle_timeout = 0;
3027
3028 xprt->ops = &bc_tcp_ops;
3029
3030 switch (addr->sa_family) {
3031 case AF_INET:
3032 xs_format_peer_addresses(xprt, "tcp",
3033 RPCBIND_NETID_TCP);
3034 break;
3035 case AF_INET6:
3036 xs_format_peer_addresses(xprt, "tcp",
3037 RPCBIND_NETID_TCP6);
3038 break;
3039 default:
3040 ret = ERR_PTR(-EAFNOSUPPORT);
3041 goto out_err;
3042 }
3043
3044 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
3045 xprt->address_strings[RPC_DISPLAY_ADDR],
3046 xprt->address_strings[RPC_DISPLAY_PORT],
3047 xprt->address_strings[RPC_DISPLAY_PROTO]);
3048
3049 /*
3050 * Once we've associated a backchannel xprt with a connection,
3051 * we want to keep it around as long as the connection lasts,
3052 * in case we need to start using it for a backchannel again;
3053 * this reference won't be dropped until bc_xprt is destroyed.
3054 */
3055 xprt_get(xprt);
3056 args->bc_xprt->xpt_bc_xprt = xprt;
3057 xprt->bc_xprt = args->bc_xprt;
3058 bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt);
3059 transport->sock = bc_sock->sk_sock;
3060 transport->inet = bc_sock->sk_sk;
3061
3062 /*
3063 * Since we don't want connections for the backchannel, we set
3064 * the xprt status to connected
3065 */
3066 xprt_set_connected(xprt);
3067
3068 if (try_module_get(THIS_MODULE))
3069 return xprt;
3070
3071 args->bc_xprt->xpt_bc_xprt = NULL;
3072 args->bc_xprt->xpt_bc_xps = NULL;
3073 xprt_put(xprt);
3074 ret = ERR_PTR(-EINVAL);
3075out_err:
3076 xs_xprt_free(xprt);
3077 return ret;
3078}
3079
3080static struct xprt_class xs_local_transport = {
3081 .list = LIST_HEAD_INIT(xs_local_transport.list),
3082 .name = "named UNIX socket",
3083 .owner = THIS_MODULE,
3084 .ident = XPRT_TRANSPORT_LOCAL,
3085 .setup = xs_setup_local,
3086};
3087
3088static struct xprt_class xs_udp_transport = {
3089 .list = LIST_HEAD_INIT(xs_udp_transport.list),
3090 .name = "udp",
3091 .owner = THIS_MODULE,
3092 .ident = XPRT_TRANSPORT_UDP,
3093 .setup = xs_setup_udp,
3094};
3095
3096static struct xprt_class xs_tcp_transport = {
3097 .list = LIST_HEAD_INIT(xs_tcp_transport.list),
3098 .name = "tcp",
3099 .owner = THIS_MODULE,
3100 .ident = XPRT_TRANSPORT_TCP,
3101 .setup = xs_setup_tcp,
3102};
3103
3104static struct xprt_class xs_bc_tcp_transport = {
3105 .list = LIST_HEAD_INIT(xs_bc_tcp_transport.list),
3106 .name = "tcp NFSv4.1 backchannel",
3107 .owner = THIS_MODULE,
3108 .ident = XPRT_TRANSPORT_BC_TCP,
3109 .setup = xs_setup_bc_tcp,
3110};
3111
3112/**
3113 * init_socket_xprt - set up xprtsock's sysctls, register with RPC client
3114 *
3115 */
3116int init_socket_xprt(void)
3117{
3118 if (!sunrpc_table_header)
3119 sunrpc_table_header = register_sysctl_table(sunrpc_table);
3120
3121 xprt_register_transport(&xs_local_transport);
3122 xprt_register_transport(&xs_udp_transport);
3123 xprt_register_transport(&xs_tcp_transport);
3124 xprt_register_transport(&xs_bc_tcp_transport);
3125
3126 return 0;
3127}
3128
3129/**
3130 * cleanup_socket_xprt - remove xprtsock's sysctls, unregister
3131 *
3132 */
3133void cleanup_socket_xprt(void)
3134{
3135 if (sunrpc_table_header) {
3136 unregister_sysctl_table(sunrpc_table_header);
3137 sunrpc_table_header = NULL;
3138 }
3139
3140 xprt_unregister_transport(&xs_local_transport);
3141 xprt_unregister_transport(&xs_udp_transport);
3142 xprt_unregister_transport(&xs_tcp_transport);
3143 xprt_unregister_transport(&xs_bc_tcp_transport);
3144}
3145
3146static int param_set_uint_minmax(const char *val,
3147 const struct kernel_param *kp,
3148 unsigned int min, unsigned int max)
3149{
3150 unsigned int num;
3151 int ret;
3152
3153 if (!val)
3154 return -EINVAL;
3155 ret = kstrtouint(val, 0, &num);
3156 if (ret)
3157 return ret;
3158 if (num < min || num > max)
3159 return -EINVAL;
3160 *((unsigned int *)kp->arg) = num;
3161 return 0;
3162}
3163
3164static int param_set_portnr(const char *val, const struct kernel_param *kp)
3165{
3166 return param_set_uint_minmax(val, kp,
3167 RPC_MIN_RESVPORT,
3168 RPC_MAX_RESVPORT);
3169}
3170
3171static const struct kernel_param_ops param_ops_portnr = {
3172 .set = param_set_portnr,
3173 .get = param_get_uint,
3174};
3175
3176#define param_check_portnr(name, p) \
3177 __param_check(name, p, unsigned int);
3178
3179module_param_named(min_resvport, xprt_min_resvport, portnr, 0644);
3180module_param_named(max_resvport, xprt_max_resvport, portnr, 0644);
3181
3182static int param_set_slot_table_size(const char *val,
3183 const struct kernel_param *kp)
3184{
3185 return param_set_uint_minmax(val, kp,
3186 RPC_MIN_SLOT_TABLE,
3187 RPC_MAX_SLOT_TABLE);
3188}
3189
3190static const struct kernel_param_ops param_ops_slot_table_size = {
3191 .set = param_set_slot_table_size,
3192 .get = param_get_uint,
3193};
3194
3195#define param_check_slot_table_size(name, p) \
3196 __param_check(name, p, unsigned int);
3197
3198static int param_set_max_slot_table_size(const char *val,
3199 const struct kernel_param *kp)
3200{
3201 return param_set_uint_minmax(val, kp,
3202 RPC_MIN_SLOT_TABLE,
3203 RPC_MAX_SLOT_TABLE_LIMIT);
3204}
3205
3206static const struct kernel_param_ops param_ops_max_slot_table_size = {
3207 .set = param_set_max_slot_table_size,
3208 .get = param_get_uint,
3209};
3210
3211#define param_check_max_slot_table_size(name, p) \
3212 __param_check(name, p, unsigned int);
3213
3214module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries,
3215 slot_table_size, 0644);
3216module_param_named(tcp_max_slot_table_entries, xprt_max_tcp_slot_table_entries,
3217 max_slot_table_size, 0644);
3218module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries,
3219 slot_table_size, 0644);