sunrpc: if we're closing down a socket, clear memalloc on it first
[deliverable/linux.git] / net / sunrpc / xprtsock.c
1 /*
2 * linux/net/sunrpc/xprtsock.c
3 *
4 * Client-side transport implementation for sockets.
5 *
6 * TCP callback races fixes (C) 1998 Red Hat
7 * TCP send fixes (C) 1998 Red Hat
8 * TCP NFS related read + write fixes
9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
10 *
11 * Rewrite of larges part of the code in order to stabilize TCP stuff.
12 * Fix behaviour when socket buffer is full.
13 * (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no>
14 *
15 * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com>
16 *
17 * IPv6 support contributed by Gilles Quillard, Bull Open Source, 2005.
18 * <gilles.quillard@bull.net>
19 */
20
21 #include <linux/types.h>
22 #include <linux/string.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 #include <linux/capability.h>
26 #include <linux/pagemap.h>
27 #include <linux/errno.h>
28 #include <linux/socket.h>
29 #include <linux/in.h>
30 #include <linux/net.h>
31 #include <linux/mm.h>
32 #include <linux/un.h>
33 #include <linux/udp.h>
34 #include <linux/tcp.h>
35 #include <linux/sunrpc/clnt.h>
36 #include <linux/sunrpc/addr.h>
37 #include <linux/sunrpc/sched.h>
38 #include <linux/sunrpc/svcsock.h>
39 #include <linux/sunrpc/xprtsock.h>
40 #include <linux/file.h>
41 #ifdef CONFIG_SUNRPC_BACKCHANNEL
42 #include <linux/sunrpc/bc_xprt.h>
43 #endif
44
45 #include <net/sock.h>
46 #include <net/checksum.h>
47 #include <net/udp.h>
48 #include <net/tcp.h>
49
50 #include <trace/events/sunrpc.h>
51
52 #include "sunrpc.h"
53
54 static void xs_close(struct rpc_xprt *xprt);
55
56 /*
57 * xprtsock tunables
58 */
59 static unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE;
60 static unsigned int xprt_tcp_slot_table_entries = RPC_MIN_SLOT_TABLE;
61 static unsigned int xprt_max_tcp_slot_table_entries = RPC_MAX_SLOT_TABLE;
62
63 static unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT;
64 static unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT;
65
66 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
67
68 #define XS_TCP_LINGER_TO (15U * HZ)
69 static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO;
70
71 /*
72 * We can register our own files under /proc/sys/sunrpc by
73 * calling register_sysctl_table() again. The files in that
74 * directory become the union of all files registered there.
75 *
76 * We simply need to make sure that we don't collide with
77 * someone else's file names!
78 */
79
80 static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE;
81 static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE;
82 static unsigned int max_tcp_slot_table_limit = RPC_MAX_SLOT_TABLE_LIMIT;
83 static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT;
84 static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT;
85
86 static struct ctl_table_header *sunrpc_table_header;
87
88 /*
89 * FIXME: changing the UDP slot table size should also resize the UDP
90 * socket buffers for existing UDP transports
91 */
92 static struct ctl_table xs_tunables_table[] = {
93 {
94 .procname = "udp_slot_table_entries",
95 .data = &xprt_udp_slot_table_entries,
96 .maxlen = sizeof(unsigned int),
97 .mode = 0644,
98 .proc_handler = proc_dointvec_minmax,
99 .extra1 = &min_slot_table_size,
100 .extra2 = &max_slot_table_size
101 },
102 {
103 .procname = "tcp_slot_table_entries",
104 .data = &xprt_tcp_slot_table_entries,
105 .maxlen = sizeof(unsigned int),
106 .mode = 0644,
107 .proc_handler = proc_dointvec_minmax,
108 .extra1 = &min_slot_table_size,
109 .extra2 = &max_slot_table_size
110 },
111 {
112 .procname = "tcp_max_slot_table_entries",
113 .data = &xprt_max_tcp_slot_table_entries,
114 .maxlen = sizeof(unsigned int),
115 .mode = 0644,
116 .proc_handler = proc_dointvec_minmax,
117 .extra1 = &min_slot_table_size,
118 .extra2 = &max_tcp_slot_table_limit
119 },
120 {
121 .procname = "min_resvport",
122 .data = &xprt_min_resvport,
123 .maxlen = sizeof(unsigned int),
124 .mode = 0644,
125 .proc_handler = proc_dointvec_minmax,
126 .extra1 = &xprt_min_resvport_limit,
127 .extra2 = &xprt_max_resvport_limit
128 },
129 {
130 .procname = "max_resvport",
131 .data = &xprt_max_resvport,
132 .maxlen = sizeof(unsigned int),
133 .mode = 0644,
134 .proc_handler = proc_dointvec_minmax,
135 .extra1 = &xprt_min_resvport_limit,
136 .extra2 = &xprt_max_resvport_limit
137 },
138 {
139 .procname = "tcp_fin_timeout",
140 .data = &xs_tcp_fin_timeout,
141 .maxlen = sizeof(xs_tcp_fin_timeout),
142 .mode = 0644,
143 .proc_handler = proc_dointvec_jiffies,
144 },
145 { },
146 };
147
148 static struct ctl_table sunrpc_table[] = {
149 {
150 .procname = "sunrpc",
151 .mode = 0555,
152 .child = xs_tunables_table
153 },
154 { },
155 };
156
157 #endif
158
159 /*
160 * Wait duration for a reply from the RPC portmapper.
161 */
162 #define XS_BIND_TO (60U * HZ)
163
164 /*
165 * Delay if a UDP socket connect error occurs. This is most likely some
166 * kind of resource problem on the local host.
167 */
168 #define XS_UDP_REEST_TO (2U * HZ)
169
170 /*
171 * The reestablish timeout allows clients to delay for a bit before attempting
172 * to reconnect to a server that just dropped our connection.
173 *
174 * We implement an exponential backoff when trying to reestablish a TCP
175 * transport connection with the server. Some servers like to drop a TCP
176 * connection when they are overworked, so we start with a short timeout and
177 * increase over time if the server is down or not responding.
178 */
179 #define XS_TCP_INIT_REEST_TO (3U * HZ)
180 #define XS_TCP_MAX_REEST_TO (5U * 60 * HZ)
181
182 /*
183 * TCP idle timeout; client drops the transport socket if it is idle
184 * for this long. Note that we also timeout UDP sockets to prevent
185 * holding port numbers when there is no RPC traffic.
186 */
187 #define XS_IDLE_DISC_TO (5U * 60 * HZ)
188
189 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
190 # undef RPC_DEBUG_DATA
191 # define RPCDBG_FACILITY RPCDBG_TRANS
192 #endif
193
194 #ifdef RPC_DEBUG_DATA
195 static void xs_pktdump(char *msg, u32 *packet, unsigned int count)
196 {
197 u8 *buf = (u8 *) packet;
198 int j;
199
200 dprintk("RPC: %s\n", msg);
201 for (j = 0; j < count && j < 128; j += 4) {
202 if (!(j & 31)) {
203 if (j)
204 dprintk("\n");
205 dprintk("0x%04x ", j);
206 }
207 dprintk("%02x%02x%02x%02x ",
208 buf[j], buf[j+1], buf[j+2], buf[j+3]);
209 }
210 dprintk("\n");
211 }
212 #else
213 static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count)
214 {
215 /* NOP */
216 }
217 #endif
218
219 static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
220 {
221 return (struct rpc_xprt *) sk->sk_user_data;
222 }
223
224 static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt)
225 {
226 return (struct sockaddr *) &xprt->addr;
227 }
228
229 static inline struct sockaddr_un *xs_addr_un(struct rpc_xprt *xprt)
230 {
231 return (struct sockaddr_un *) &xprt->addr;
232 }
233
234 static inline struct sockaddr_in *xs_addr_in(struct rpc_xprt *xprt)
235 {
236 return (struct sockaddr_in *) &xprt->addr;
237 }
238
239 static inline struct sockaddr_in6 *xs_addr_in6(struct rpc_xprt *xprt)
240 {
241 return (struct sockaddr_in6 *) &xprt->addr;
242 }
243
244 static void xs_format_common_peer_addresses(struct rpc_xprt *xprt)
245 {
246 struct sockaddr *sap = xs_addr(xprt);
247 struct sockaddr_in6 *sin6;
248 struct sockaddr_in *sin;
249 struct sockaddr_un *sun;
250 char buf[128];
251
252 switch (sap->sa_family) {
253 case AF_LOCAL:
254 sun = xs_addr_un(xprt);
255 strlcpy(buf, sun->sun_path, sizeof(buf));
256 xprt->address_strings[RPC_DISPLAY_ADDR] =
257 kstrdup(buf, GFP_KERNEL);
258 break;
259 case AF_INET:
260 (void)rpc_ntop(sap, buf, sizeof(buf));
261 xprt->address_strings[RPC_DISPLAY_ADDR] =
262 kstrdup(buf, GFP_KERNEL);
263 sin = xs_addr_in(xprt);
264 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
265 break;
266 case AF_INET6:
267 (void)rpc_ntop(sap, buf, sizeof(buf));
268 xprt->address_strings[RPC_DISPLAY_ADDR] =
269 kstrdup(buf, GFP_KERNEL);
270 sin6 = xs_addr_in6(xprt);
271 snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
272 break;
273 default:
274 BUG();
275 }
276
277 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
278 }
279
280 static void xs_format_common_peer_ports(struct rpc_xprt *xprt)
281 {
282 struct sockaddr *sap = xs_addr(xprt);
283 char buf[128];
284
285 snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
286 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
287
288 snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
289 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
290 }
291
292 static void xs_format_peer_addresses(struct rpc_xprt *xprt,
293 const char *protocol,
294 const char *netid)
295 {
296 xprt->address_strings[RPC_DISPLAY_PROTO] = protocol;
297 xprt->address_strings[RPC_DISPLAY_NETID] = netid;
298 xs_format_common_peer_addresses(xprt);
299 xs_format_common_peer_ports(xprt);
300 }
301
302 static void xs_update_peer_port(struct rpc_xprt *xprt)
303 {
304 kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]);
305 kfree(xprt->address_strings[RPC_DISPLAY_PORT]);
306
307 xs_format_common_peer_ports(xprt);
308 }
309
310 static void xs_free_peer_addresses(struct rpc_xprt *xprt)
311 {
312 unsigned int i;
313
314 for (i = 0; i < RPC_DISPLAY_MAX; i++)
315 switch (i) {
316 case RPC_DISPLAY_PROTO:
317 case RPC_DISPLAY_NETID:
318 continue;
319 default:
320 kfree(xprt->address_strings[i]);
321 }
322 }
323
324 #define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL)
325
326 static int xs_send_kvec(struct socket *sock, struct sockaddr *addr, int addrlen, struct kvec *vec, unsigned int base, int more)
327 {
328 struct msghdr msg = {
329 .msg_name = addr,
330 .msg_namelen = addrlen,
331 .msg_flags = XS_SENDMSG_FLAGS | (more ? MSG_MORE : 0),
332 };
333 struct kvec iov = {
334 .iov_base = vec->iov_base + base,
335 .iov_len = vec->iov_len - base,
336 };
337
338 if (iov.iov_len != 0)
339 return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
340 return kernel_sendmsg(sock, &msg, NULL, 0, 0);
341 }
342
343 static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more, bool zerocopy, int *sent_p)
344 {
345 ssize_t (*do_sendpage)(struct socket *sock, struct page *page,
346 int offset, size_t size, int flags);
347 struct page **ppage;
348 unsigned int remainder;
349 int err;
350
351 remainder = xdr->page_len - base;
352 base += xdr->page_base;
353 ppage = xdr->pages + (base >> PAGE_SHIFT);
354 base &= ~PAGE_MASK;
355 do_sendpage = sock->ops->sendpage;
356 if (!zerocopy)
357 do_sendpage = sock_no_sendpage;
358 for(;;) {
359 unsigned int len = min_t(unsigned int, PAGE_SIZE - base, remainder);
360 int flags = XS_SENDMSG_FLAGS;
361
362 remainder -= len;
363 if (remainder != 0 || more)
364 flags |= MSG_MORE;
365 err = do_sendpage(sock, *ppage, base, len, flags);
366 if (remainder == 0 || err != len)
367 break;
368 *sent_p += err;
369 ppage++;
370 base = 0;
371 }
372 if (err > 0) {
373 *sent_p += err;
374 err = 0;
375 }
376 return err;
377 }
378
379 /**
380 * xs_sendpages - write pages directly to a socket
381 * @sock: socket to send on
382 * @addr: UDP only -- address of destination
383 * @addrlen: UDP only -- length of destination address
384 * @xdr: buffer containing this request
385 * @base: starting position in the buffer
386 * @zerocopy: true if it is safe to use sendpage()
387 * @sent_p: return the total number of bytes successfully queued for sending
388 *
389 */
390 static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, bool zerocopy, int *sent_p)
391 {
392 unsigned int remainder = xdr->len - base;
393 int err = 0;
394 int sent = 0;
395
396 if (unlikely(!sock))
397 return -ENOTSOCK;
398
399 clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
400 if (base != 0) {
401 addr = NULL;
402 addrlen = 0;
403 }
404
405 if (base < xdr->head[0].iov_len || addr != NULL) {
406 unsigned int len = xdr->head[0].iov_len - base;
407 remainder -= len;
408 err = xs_send_kvec(sock, addr, addrlen, &xdr->head[0], base, remainder != 0);
409 if (remainder == 0 || err != len)
410 goto out;
411 *sent_p += err;
412 base = 0;
413 } else
414 base -= xdr->head[0].iov_len;
415
416 if (base < xdr->page_len) {
417 unsigned int len = xdr->page_len - base;
418 remainder -= len;
419 err = xs_send_pagedata(sock, xdr, base, remainder != 0, zerocopy, &sent);
420 *sent_p += sent;
421 if (remainder == 0 || sent != len)
422 goto out;
423 base = 0;
424 } else
425 base -= xdr->page_len;
426
427 if (base >= xdr->tail[0].iov_len)
428 return 0;
429 err = xs_send_kvec(sock, NULL, 0, &xdr->tail[0], base, 0);
430 out:
431 if (err > 0) {
432 *sent_p += err;
433 err = 0;
434 }
435 return err;
436 }
437
438 static void xs_nospace_callback(struct rpc_task *task)
439 {
440 struct sock_xprt *transport = container_of(task->tk_rqstp->rq_xprt, struct sock_xprt, xprt);
441
442 transport->inet->sk_write_pending--;
443 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
444 }
445
446 /**
447 * xs_nospace - place task on wait queue if transmit was incomplete
448 * @task: task to put to sleep
449 *
450 */
451 static int xs_nospace(struct rpc_task *task)
452 {
453 struct rpc_rqst *req = task->tk_rqstp;
454 struct rpc_xprt *xprt = req->rq_xprt;
455 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
456 struct sock *sk = transport->inet;
457 int ret = -EAGAIN;
458
459 dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
460 task->tk_pid, req->rq_slen - req->rq_bytes_sent,
461 req->rq_slen);
462
463 /* Protect against races with write_space */
464 spin_lock_bh(&xprt->transport_lock);
465
466 /* Don't race with disconnect */
467 if (xprt_connected(xprt)) {
468 if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) {
469 /*
470 * Notify TCP that we're limited by the application
471 * window size
472 */
473 set_bit(SOCK_NOSPACE, &transport->sock->flags);
474 sk->sk_write_pending++;
475 /* ...and wait for more buffer space */
476 xprt_wait_for_buffer_space(task, xs_nospace_callback);
477 }
478 } else {
479 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
480 ret = -ENOTCONN;
481 }
482
483 spin_unlock_bh(&xprt->transport_lock);
484
485 /* Race breaker in case memory is freed before above code is called */
486 sk->sk_write_space(sk);
487 return ret;
488 }
489
490 /*
491 * Construct a stream transport record marker in @buf.
492 */
493 static inline void xs_encode_stream_record_marker(struct xdr_buf *buf)
494 {
495 u32 reclen = buf->len - sizeof(rpc_fraghdr);
496 rpc_fraghdr *base = buf->head[0].iov_base;
497 *base = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | reclen);
498 }
499
500 /**
501 * xs_local_send_request - write an RPC request to an AF_LOCAL socket
502 * @task: RPC task that manages the state of an RPC request
503 *
504 * Return values:
505 * 0: The request has been sent
506 * EAGAIN: The socket was blocked, please call again later to
507 * complete the request
508 * ENOTCONN: Caller needs to invoke connect logic then call again
509 * other: Some other error occured, the request was not sent
510 */
511 static int xs_local_send_request(struct rpc_task *task)
512 {
513 struct rpc_rqst *req = task->tk_rqstp;
514 struct rpc_xprt *xprt = req->rq_xprt;
515 struct sock_xprt *transport =
516 container_of(xprt, struct sock_xprt, xprt);
517 struct xdr_buf *xdr = &req->rq_snd_buf;
518 int status;
519 int sent = 0;
520
521 xs_encode_stream_record_marker(&req->rq_snd_buf);
522
523 xs_pktdump("packet data:",
524 req->rq_svec->iov_base, req->rq_svec->iov_len);
525
526 status = xs_sendpages(transport->sock, NULL, 0, xdr, req->rq_bytes_sent,
527 true, &sent);
528 dprintk("RPC: %s(%u) = %d\n",
529 __func__, xdr->len - req->rq_bytes_sent, status);
530 if (likely(sent > 0) || status == 0) {
531 req->rq_bytes_sent += sent;
532 req->rq_xmit_bytes_sent += sent;
533 if (likely(req->rq_bytes_sent >= req->rq_slen)) {
534 req->rq_bytes_sent = 0;
535 return 0;
536 }
537 status = -EAGAIN;
538 }
539
540 switch (status) {
541 case -ENOBUFS:
542 case -EAGAIN:
543 status = xs_nospace(task);
544 break;
545 default:
546 dprintk("RPC: sendmsg returned unrecognized error %d\n",
547 -status);
548 case -EPIPE:
549 xs_close(xprt);
550 status = -ENOTCONN;
551 }
552
553 return status;
554 }
555
556 /**
557 * xs_udp_send_request - write an RPC request to a UDP socket
558 * @task: address of RPC task that manages the state of an RPC request
559 *
560 * Return values:
561 * 0: The request has been sent
562 * EAGAIN: The socket was blocked, please call again later to
563 * complete the request
564 * ENOTCONN: Caller needs to invoke connect logic then call again
565 * other: Some other error occurred, the request was not sent
566 */
567 static int xs_udp_send_request(struct rpc_task *task)
568 {
569 struct rpc_rqst *req = task->tk_rqstp;
570 struct rpc_xprt *xprt = req->rq_xprt;
571 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
572 struct xdr_buf *xdr = &req->rq_snd_buf;
573 int sent = 0;
574 int status;
575
576 xs_pktdump("packet data:",
577 req->rq_svec->iov_base,
578 req->rq_svec->iov_len);
579
580 if (!xprt_bound(xprt))
581 return -ENOTCONN;
582 status = xs_sendpages(transport->sock, xs_addr(xprt), xprt->addrlen,
583 xdr, req->rq_bytes_sent, true, &sent);
584
585 dprintk("RPC: xs_udp_send_request(%u) = %d\n",
586 xdr->len - req->rq_bytes_sent, status);
587
588 /* firewall is blocking us, don't return -EAGAIN or we end up looping */
589 if (status == -EPERM)
590 goto process_status;
591
592 if (sent > 0 || status == 0) {
593 req->rq_xmit_bytes_sent += sent;
594 if (sent >= req->rq_slen)
595 return 0;
596 /* Still some bytes left; set up for a retry later. */
597 status = -EAGAIN;
598 }
599
600 process_status:
601 switch (status) {
602 case -ENOTSOCK:
603 status = -ENOTCONN;
604 /* Should we call xs_close() here? */
605 break;
606 case -EAGAIN:
607 status = xs_nospace(task);
608 break;
609 default:
610 dprintk("RPC: sendmsg returned unrecognized error %d\n",
611 -status);
612 case -ENETUNREACH:
613 case -ENOBUFS:
614 case -EPIPE:
615 case -ECONNREFUSED:
616 case -EPERM:
617 /* When the server has died, an ICMP port unreachable message
618 * prompts ECONNREFUSED. */
619 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
620 }
621
622 return status;
623 }
624
625 /**
626 * xs_tcp_shutdown - gracefully shut down a TCP socket
627 * @xprt: transport
628 *
629 * Initiates a graceful shutdown of the TCP socket by calling the
630 * equivalent of shutdown(SHUT_RDWR);
631 */
632 static void xs_tcp_shutdown(struct rpc_xprt *xprt)
633 {
634 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
635 struct socket *sock = transport->sock;
636
637 if (sock != NULL) {
638 kernel_sock_shutdown(sock, SHUT_RDWR);
639 trace_rpc_socket_shutdown(xprt, sock);
640 }
641 }
642
643 /**
644 * xs_tcp_send_request - write an RPC request to a TCP socket
645 * @task: address of RPC task that manages the state of an RPC request
646 *
647 * Return values:
648 * 0: The request has been sent
649 * EAGAIN: The socket was blocked, please call again later to
650 * complete the request
651 * ENOTCONN: Caller needs to invoke connect logic then call again
652 * other: Some other error occurred, the request was not sent
653 *
654 * XXX: In the case of soft timeouts, should we eventually give up
655 * if sendmsg is not able to make progress?
656 */
657 static int xs_tcp_send_request(struct rpc_task *task)
658 {
659 struct rpc_rqst *req = task->tk_rqstp;
660 struct rpc_xprt *xprt = req->rq_xprt;
661 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
662 struct xdr_buf *xdr = &req->rq_snd_buf;
663 bool zerocopy = true;
664 int status;
665 int sent;
666
667 xs_encode_stream_record_marker(&req->rq_snd_buf);
668
669 xs_pktdump("packet data:",
670 req->rq_svec->iov_base,
671 req->rq_svec->iov_len);
672 /* Don't use zero copy if this is a resend. If the RPC call
673 * completes while the socket holds a reference to the pages,
674 * then we may end up resending corrupted data.
675 */
676 if (task->tk_flags & RPC_TASK_SENT)
677 zerocopy = false;
678
679 /* Continue transmitting the packet/record. We must be careful
680 * to cope with writespace callbacks arriving _after_ we have
681 * called sendmsg(). */
682 while (1) {
683 sent = 0;
684 status = xs_sendpages(transport->sock, NULL, 0, xdr,
685 req->rq_bytes_sent, zerocopy, &sent);
686
687 dprintk("RPC: xs_tcp_send_request(%u) = %d\n",
688 xdr->len - req->rq_bytes_sent, status);
689
690 if (unlikely(sent == 0 && status < 0))
691 break;
692
693 /* If we've sent the entire packet, immediately
694 * reset the count of bytes sent. */
695 req->rq_bytes_sent += sent;
696 req->rq_xmit_bytes_sent += sent;
697 if (likely(req->rq_bytes_sent >= req->rq_slen)) {
698 req->rq_bytes_sent = 0;
699 return 0;
700 }
701
702 if (sent != 0)
703 continue;
704 status = -EAGAIN;
705 break;
706 }
707
708 switch (status) {
709 case -ENOTSOCK:
710 status = -ENOTCONN;
711 /* Should we call xs_close() here? */
712 break;
713 case -ENOBUFS:
714 case -EAGAIN:
715 status = xs_nospace(task);
716 break;
717 default:
718 dprintk("RPC: sendmsg returned unrecognized error %d\n",
719 -status);
720 case -ECONNRESET:
721 case -ECONNREFUSED:
722 case -ENOTCONN:
723 case -EADDRINUSE:
724 case -EPIPE:
725 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
726 }
727
728 return status;
729 }
730
731 /**
732 * xs_tcp_release_xprt - clean up after a tcp transmission
733 * @xprt: transport
734 * @task: rpc task
735 *
736 * This cleans up if an error causes us to abort the transmission of a request.
737 * In this case, the socket may need to be reset in order to avoid confusing
738 * the server.
739 */
740 static void xs_tcp_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
741 {
742 struct rpc_rqst *req;
743
744 if (task != xprt->snd_task)
745 return;
746 if (task == NULL)
747 goto out_release;
748 req = task->tk_rqstp;
749 if (req == NULL)
750 goto out_release;
751 if (req->rq_bytes_sent == 0)
752 goto out_release;
753 if (req->rq_bytes_sent == req->rq_snd_buf.len)
754 goto out_release;
755 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
756 out_release:
757 xprt_release_xprt(xprt, task);
758 }
759
760 static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk)
761 {
762 transport->old_data_ready = sk->sk_data_ready;
763 transport->old_state_change = sk->sk_state_change;
764 transport->old_write_space = sk->sk_write_space;
765 transport->old_error_report = sk->sk_error_report;
766 }
767
768 static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk)
769 {
770 sk->sk_data_ready = transport->old_data_ready;
771 sk->sk_state_change = transport->old_state_change;
772 sk->sk_write_space = transport->old_write_space;
773 sk->sk_error_report = transport->old_error_report;
774 }
775
776 static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
777 {
778 smp_mb__before_atomic();
779 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
780 clear_bit(XPRT_CLOSING, &xprt->state);
781 smp_mb__after_atomic();
782 }
783
784 static void xs_sock_mark_closed(struct rpc_xprt *xprt)
785 {
786 xs_sock_reset_connection_flags(xprt);
787 /* Mark transport as closed and wake up all pending tasks */
788 xprt_disconnect_done(xprt);
789 }
790
791 /**
792 * xs_error_report - callback to handle TCP socket state errors
793 * @sk: socket
794 *
795 * Note: we don't call sock_error() since there may be a rpc_task
796 * using the socket, and so we don't want to clear sk->sk_err.
797 */
798 static void xs_error_report(struct sock *sk)
799 {
800 struct rpc_xprt *xprt;
801 int err;
802
803 read_lock_bh(&sk->sk_callback_lock);
804 if (!(xprt = xprt_from_sock(sk)))
805 goto out;
806
807 err = -sk->sk_err;
808 if (err == 0)
809 goto out;
810 /* Is this a reset event? */
811 if (sk->sk_state == TCP_CLOSE)
812 xs_sock_mark_closed(xprt);
813 dprintk("RPC: xs_error_report client %p, error=%d...\n",
814 xprt, -err);
815 trace_rpc_socket_error(xprt, sk->sk_socket, err);
816 xprt_wake_pending_tasks(xprt, err);
817 out:
818 read_unlock_bh(&sk->sk_callback_lock);
819 }
820
821 static void xs_reset_transport(struct sock_xprt *transport)
822 {
823 struct socket *sock = transport->sock;
824 struct sock *sk = transport->inet;
825 struct rpc_xprt *xprt = &transport->xprt;
826
827 if (sk == NULL)
828 return;
829
830 if (atomic_read(&transport->xprt.swapper))
831 sk_clear_memalloc(sk);
832
833 write_lock_bh(&sk->sk_callback_lock);
834 transport->inet = NULL;
835 transport->sock = NULL;
836
837 sk->sk_user_data = NULL;
838
839 xs_restore_old_callbacks(transport, sk);
840 write_unlock_bh(&sk->sk_callback_lock);
841 xs_sock_reset_connection_flags(xprt);
842
843 trace_rpc_socket_close(xprt, sock);
844 sock_release(sock);
845 }
846
847 /**
848 * xs_close - close a socket
849 * @xprt: transport
850 *
851 * This is used when all requests are complete; ie, no DRC state remains
852 * on the server we want to save.
853 *
854 * The caller _must_ be holding XPRT_LOCKED in order to avoid issues with
855 * xs_reset_transport() zeroing the socket from underneath a writer.
856 */
857 static void xs_close(struct rpc_xprt *xprt)
858 {
859 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
860
861 dprintk("RPC: xs_close xprt %p\n", xprt);
862
863 xs_reset_transport(transport);
864 xprt->reestablish_timeout = 0;
865
866 xprt_disconnect_done(xprt);
867 }
868
869 static void xs_xprt_free(struct rpc_xprt *xprt)
870 {
871 xs_free_peer_addresses(xprt);
872 xprt_free(xprt);
873 }
874
875 /**
876 * xs_destroy - prepare to shutdown a transport
877 * @xprt: doomed transport
878 *
879 */
880 static void xs_destroy(struct rpc_xprt *xprt)
881 {
882 dprintk("RPC: xs_destroy xprt %p\n", xprt);
883
884 xs_close(xprt);
885 xs_xprt_free(xprt);
886 module_put(THIS_MODULE);
887 }
888
889 static int xs_local_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
890 {
891 struct xdr_skb_reader desc = {
892 .skb = skb,
893 .offset = sizeof(rpc_fraghdr),
894 .count = skb->len - sizeof(rpc_fraghdr),
895 };
896
897 if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0)
898 return -1;
899 if (desc.count)
900 return -1;
901 return 0;
902 }
903
904 /**
905 * xs_local_data_ready - "data ready" callback for AF_LOCAL sockets
906 * @sk: socket with data to read
907 *
908 * Currently this assumes we can read the whole reply in a single gulp.
909 */
910 static void xs_local_data_ready(struct sock *sk)
911 {
912 struct rpc_task *task;
913 struct rpc_xprt *xprt;
914 struct rpc_rqst *rovr;
915 struct sk_buff *skb;
916 int err, repsize, copied;
917 u32 _xid;
918 __be32 *xp;
919
920 read_lock_bh(&sk->sk_callback_lock);
921 dprintk("RPC: %s...\n", __func__);
922 xprt = xprt_from_sock(sk);
923 if (xprt == NULL)
924 goto out;
925
926 skb = skb_recv_datagram(sk, 0, 1, &err);
927 if (skb == NULL)
928 goto out;
929
930 repsize = skb->len - sizeof(rpc_fraghdr);
931 if (repsize < 4) {
932 dprintk("RPC: impossible RPC reply size %d\n", repsize);
933 goto dropit;
934 }
935
936 /* Copy the XID from the skb... */
937 xp = skb_header_pointer(skb, sizeof(rpc_fraghdr), sizeof(_xid), &_xid);
938 if (xp == NULL)
939 goto dropit;
940
941 /* Look up and lock the request corresponding to the given XID */
942 spin_lock(&xprt->transport_lock);
943 rovr = xprt_lookup_rqst(xprt, *xp);
944 if (!rovr)
945 goto out_unlock;
946 task = rovr->rq_task;
947
948 copied = rovr->rq_private_buf.buflen;
949 if (copied > repsize)
950 copied = repsize;
951
952 if (xs_local_copy_to_xdr(&rovr->rq_private_buf, skb)) {
953 dprintk("RPC: sk_buff copy failed\n");
954 goto out_unlock;
955 }
956
957 xprt_complete_rqst(task, copied);
958
959 out_unlock:
960 spin_unlock(&xprt->transport_lock);
961 dropit:
962 skb_free_datagram(sk, skb);
963 out:
964 read_unlock_bh(&sk->sk_callback_lock);
965 }
966
967 /**
968 * xs_udp_data_ready - "data ready" callback for UDP sockets
969 * @sk: socket with data to read
970 *
971 */
972 static void xs_udp_data_ready(struct sock *sk)
973 {
974 struct rpc_task *task;
975 struct rpc_xprt *xprt;
976 struct rpc_rqst *rovr;
977 struct sk_buff *skb;
978 int err, repsize, copied;
979 u32 _xid;
980 __be32 *xp;
981
982 read_lock_bh(&sk->sk_callback_lock);
983 dprintk("RPC: xs_udp_data_ready...\n");
984 if (!(xprt = xprt_from_sock(sk)))
985 goto out;
986
987 if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL)
988 goto out;
989
990 repsize = skb->len - sizeof(struct udphdr);
991 if (repsize < 4) {
992 dprintk("RPC: impossible RPC reply size %d!\n", repsize);
993 goto dropit;
994 }
995
996 /* Copy the XID from the skb... */
997 xp = skb_header_pointer(skb, sizeof(struct udphdr),
998 sizeof(_xid), &_xid);
999 if (xp == NULL)
1000 goto dropit;
1001
1002 /* Look up and lock the request corresponding to the given XID */
1003 spin_lock(&xprt->transport_lock);
1004 rovr = xprt_lookup_rqst(xprt, *xp);
1005 if (!rovr)
1006 goto out_unlock;
1007 task = rovr->rq_task;
1008
1009 if ((copied = rovr->rq_private_buf.buflen) > repsize)
1010 copied = repsize;
1011
1012 /* Suck it into the iovec, verify checksum if not done by hw. */
1013 if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) {
1014 UDPX_INC_STATS_BH(sk, UDP_MIB_INERRORS);
1015 goto out_unlock;
1016 }
1017
1018 UDPX_INC_STATS_BH(sk, UDP_MIB_INDATAGRAMS);
1019
1020 xprt_adjust_cwnd(xprt, task, copied);
1021 xprt_complete_rqst(task, copied);
1022
1023 out_unlock:
1024 spin_unlock(&xprt->transport_lock);
1025 dropit:
1026 skb_free_datagram(sk, skb);
1027 out:
1028 read_unlock_bh(&sk->sk_callback_lock);
1029 }
1030
1031 /*
1032 * Helper function to force a TCP close if the server is sending
1033 * junk and/or it has put us in CLOSE_WAIT
1034 */
1035 static void xs_tcp_force_close(struct rpc_xprt *xprt)
1036 {
1037 xprt_force_disconnect(xprt);
1038 }
1039
1040 static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
1041 {
1042 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1043 size_t len, used;
1044 char *p;
1045
1046 p = ((char *) &transport->tcp_fraghdr) + transport->tcp_offset;
1047 len = sizeof(transport->tcp_fraghdr) - transport->tcp_offset;
1048 used = xdr_skb_read_bits(desc, p, len);
1049 transport->tcp_offset += used;
1050 if (used != len)
1051 return;
1052
1053 transport->tcp_reclen = ntohl(transport->tcp_fraghdr);
1054 if (transport->tcp_reclen & RPC_LAST_STREAM_FRAGMENT)
1055 transport->tcp_flags |= TCP_RCV_LAST_FRAG;
1056 else
1057 transport->tcp_flags &= ~TCP_RCV_LAST_FRAG;
1058 transport->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK;
1059
1060 transport->tcp_flags &= ~TCP_RCV_COPY_FRAGHDR;
1061 transport->tcp_offset = 0;
1062
1063 /* Sanity check of the record length */
1064 if (unlikely(transport->tcp_reclen < 8)) {
1065 dprintk("RPC: invalid TCP record fragment length\n");
1066 xs_tcp_force_close(xprt);
1067 return;
1068 }
1069 dprintk("RPC: reading TCP record fragment of length %d\n",
1070 transport->tcp_reclen);
1071 }
1072
1073 static void xs_tcp_check_fraghdr(struct sock_xprt *transport)
1074 {
1075 if (transport->tcp_offset == transport->tcp_reclen) {
1076 transport->tcp_flags |= TCP_RCV_COPY_FRAGHDR;
1077 transport->tcp_offset = 0;
1078 if (transport->tcp_flags & TCP_RCV_LAST_FRAG) {
1079 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1080 transport->tcp_flags |= TCP_RCV_COPY_XID;
1081 transport->tcp_copied = 0;
1082 }
1083 }
1084 }
1085
1086 static inline void xs_tcp_read_xid(struct sock_xprt *transport, struct xdr_skb_reader *desc)
1087 {
1088 size_t len, used;
1089 char *p;
1090
1091 len = sizeof(transport->tcp_xid) - transport->tcp_offset;
1092 dprintk("RPC: reading XID (%Zu bytes)\n", len);
1093 p = ((char *) &transport->tcp_xid) + transport->tcp_offset;
1094 used = xdr_skb_read_bits(desc, p, len);
1095 transport->tcp_offset += used;
1096 if (used != len)
1097 return;
1098 transport->tcp_flags &= ~TCP_RCV_COPY_XID;
1099 transport->tcp_flags |= TCP_RCV_READ_CALLDIR;
1100 transport->tcp_copied = 4;
1101 dprintk("RPC: reading %s XID %08x\n",
1102 (transport->tcp_flags & TCP_RPC_REPLY) ? "reply for"
1103 : "request with",
1104 ntohl(transport->tcp_xid));
1105 xs_tcp_check_fraghdr(transport);
1106 }
1107
1108 static inline void xs_tcp_read_calldir(struct sock_xprt *transport,
1109 struct xdr_skb_reader *desc)
1110 {
1111 size_t len, used;
1112 u32 offset;
1113 char *p;
1114
1115 /*
1116 * We want transport->tcp_offset to be 8 at the end of this routine
1117 * (4 bytes for the xid and 4 bytes for the call/reply flag).
1118 * When this function is called for the first time,
1119 * transport->tcp_offset is 4 (after having already read the xid).
1120 */
1121 offset = transport->tcp_offset - sizeof(transport->tcp_xid);
1122 len = sizeof(transport->tcp_calldir) - offset;
1123 dprintk("RPC: reading CALL/REPLY flag (%Zu bytes)\n", len);
1124 p = ((char *) &transport->tcp_calldir) + offset;
1125 used = xdr_skb_read_bits(desc, p, len);
1126 transport->tcp_offset += used;
1127 if (used != len)
1128 return;
1129 transport->tcp_flags &= ~TCP_RCV_READ_CALLDIR;
1130 /*
1131 * We don't yet have the XDR buffer, so we will write the calldir
1132 * out after we get the buffer from the 'struct rpc_rqst'
1133 */
1134 switch (ntohl(transport->tcp_calldir)) {
1135 case RPC_REPLY:
1136 transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
1137 transport->tcp_flags |= TCP_RCV_COPY_DATA;
1138 transport->tcp_flags |= TCP_RPC_REPLY;
1139 break;
1140 case RPC_CALL:
1141 transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
1142 transport->tcp_flags |= TCP_RCV_COPY_DATA;
1143 transport->tcp_flags &= ~TCP_RPC_REPLY;
1144 break;
1145 default:
1146 dprintk("RPC: invalid request message type\n");
1147 xs_tcp_force_close(&transport->xprt);
1148 }
1149 xs_tcp_check_fraghdr(transport);
1150 }
1151
1152 static inline void xs_tcp_read_common(struct rpc_xprt *xprt,
1153 struct xdr_skb_reader *desc,
1154 struct rpc_rqst *req)
1155 {
1156 struct sock_xprt *transport =
1157 container_of(xprt, struct sock_xprt, xprt);
1158 struct xdr_buf *rcvbuf;
1159 size_t len;
1160 ssize_t r;
1161
1162 rcvbuf = &req->rq_private_buf;
1163
1164 if (transport->tcp_flags & TCP_RCV_COPY_CALLDIR) {
1165 /*
1166 * Save the RPC direction in the XDR buffer
1167 */
1168 memcpy(rcvbuf->head[0].iov_base + transport->tcp_copied,
1169 &transport->tcp_calldir,
1170 sizeof(transport->tcp_calldir));
1171 transport->tcp_copied += sizeof(transport->tcp_calldir);
1172 transport->tcp_flags &= ~TCP_RCV_COPY_CALLDIR;
1173 }
1174
1175 len = desc->count;
1176 if (len > transport->tcp_reclen - transport->tcp_offset) {
1177 struct xdr_skb_reader my_desc;
1178
1179 len = transport->tcp_reclen - transport->tcp_offset;
1180 memcpy(&my_desc, desc, sizeof(my_desc));
1181 my_desc.count = len;
1182 r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied,
1183 &my_desc, xdr_skb_read_bits);
1184 desc->count -= r;
1185 desc->offset += r;
1186 } else
1187 r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied,
1188 desc, xdr_skb_read_bits);
1189
1190 if (r > 0) {
1191 transport->tcp_copied += r;
1192 transport->tcp_offset += r;
1193 }
1194 if (r != len) {
1195 /* Error when copying to the receive buffer,
1196 * usually because we weren't able to allocate
1197 * additional buffer pages. All we can do now
1198 * is turn off TCP_RCV_COPY_DATA, so the request
1199 * will not receive any additional updates,
1200 * and time out.
1201 * Any remaining data from this record will
1202 * be discarded.
1203 */
1204 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1205 dprintk("RPC: XID %08x truncated request\n",
1206 ntohl(transport->tcp_xid));
1207 dprintk("RPC: xprt = %p, tcp_copied = %lu, "
1208 "tcp_offset = %u, tcp_reclen = %u\n",
1209 xprt, transport->tcp_copied,
1210 transport->tcp_offset, transport->tcp_reclen);
1211 return;
1212 }
1213
1214 dprintk("RPC: XID %08x read %Zd bytes\n",
1215 ntohl(transport->tcp_xid), r);
1216 dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, "
1217 "tcp_reclen = %u\n", xprt, transport->tcp_copied,
1218 transport->tcp_offset, transport->tcp_reclen);
1219
1220 if (transport->tcp_copied == req->rq_private_buf.buflen)
1221 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1222 else if (transport->tcp_offset == transport->tcp_reclen) {
1223 if (transport->tcp_flags & TCP_RCV_LAST_FRAG)
1224 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1225 }
1226 }
1227
1228 /*
1229 * Finds the request corresponding to the RPC xid and invokes the common
1230 * tcp read code to read the data.
1231 */
1232 static inline int xs_tcp_read_reply(struct rpc_xprt *xprt,
1233 struct xdr_skb_reader *desc)
1234 {
1235 struct sock_xprt *transport =
1236 container_of(xprt, struct sock_xprt, xprt);
1237 struct rpc_rqst *req;
1238
1239 dprintk("RPC: read reply XID %08x\n", ntohl(transport->tcp_xid));
1240
1241 /* Find and lock the request corresponding to this xid */
1242 spin_lock(&xprt->transport_lock);
1243 req = xprt_lookup_rqst(xprt, transport->tcp_xid);
1244 if (!req) {
1245 dprintk("RPC: XID %08x request not found!\n",
1246 ntohl(transport->tcp_xid));
1247 spin_unlock(&xprt->transport_lock);
1248 return -1;
1249 }
1250
1251 xs_tcp_read_common(xprt, desc, req);
1252
1253 if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
1254 xprt_complete_rqst(req->rq_task, transport->tcp_copied);
1255
1256 spin_unlock(&xprt->transport_lock);
1257 return 0;
1258 }
1259
1260 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1261 /*
1262 * Obtains an rpc_rqst previously allocated and invokes the common
1263 * tcp read code to read the data. The result is placed in the callback
1264 * queue.
1265 * If we're unable to obtain the rpc_rqst we schedule the closing of the
1266 * connection and return -1.
1267 */
1268 static int xs_tcp_read_callback(struct rpc_xprt *xprt,
1269 struct xdr_skb_reader *desc)
1270 {
1271 struct sock_xprt *transport =
1272 container_of(xprt, struct sock_xprt, xprt);
1273 struct rpc_rqst *req;
1274
1275 /* Look up and lock the request corresponding to the given XID */
1276 spin_lock(&xprt->transport_lock);
1277 req = xprt_lookup_bc_request(xprt, transport->tcp_xid);
1278 if (req == NULL) {
1279 spin_unlock(&xprt->transport_lock);
1280 printk(KERN_WARNING "Callback slot table overflowed\n");
1281 xprt_force_disconnect(xprt);
1282 return -1;
1283 }
1284
1285 dprintk("RPC: read callback XID %08x\n", ntohl(req->rq_xid));
1286 xs_tcp_read_common(xprt, desc, req);
1287
1288 if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
1289 xprt_complete_bc_request(req, transport->tcp_copied);
1290 spin_unlock(&xprt->transport_lock);
1291
1292 return 0;
1293 }
1294
1295 static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
1296 struct xdr_skb_reader *desc)
1297 {
1298 struct sock_xprt *transport =
1299 container_of(xprt, struct sock_xprt, xprt);
1300
1301 return (transport->tcp_flags & TCP_RPC_REPLY) ?
1302 xs_tcp_read_reply(xprt, desc) :
1303 xs_tcp_read_callback(xprt, desc);
1304 }
1305 #else
1306 static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
1307 struct xdr_skb_reader *desc)
1308 {
1309 return xs_tcp_read_reply(xprt, desc);
1310 }
1311 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1312
1313 /*
1314 * Read data off the transport. This can be either an RPC_CALL or an
1315 * RPC_REPLY. Relay the processing to helper functions.
1316 */
1317 static void xs_tcp_read_data(struct rpc_xprt *xprt,
1318 struct xdr_skb_reader *desc)
1319 {
1320 struct sock_xprt *transport =
1321 container_of(xprt, struct sock_xprt, xprt);
1322
1323 if (_xs_tcp_read_data(xprt, desc) == 0)
1324 xs_tcp_check_fraghdr(transport);
1325 else {
1326 /*
1327 * The transport_lock protects the request handling.
1328 * There's no need to hold it to update the tcp_flags.
1329 */
1330 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1331 }
1332 }
1333
1334 static inline void xs_tcp_read_discard(struct sock_xprt *transport, struct xdr_skb_reader *desc)
1335 {
1336 size_t len;
1337
1338 len = transport->tcp_reclen - transport->tcp_offset;
1339 if (len > desc->count)
1340 len = desc->count;
1341 desc->count -= len;
1342 desc->offset += len;
1343 transport->tcp_offset += len;
1344 dprintk("RPC: discarded %Zu bytes\n", len);
1345 xs_tcp_check_fraghdr(transport);
1346 }
1347
1348 static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len)
1349 {
1350 struct rpc_xprt *xprt = rd_desc->arg.data;
1351 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1352 struct xdr_skb_reader desc = {
1353 .skb = skb,
1354 .offset = offset,
1355 .count = len,
1356 };
1357
1358 dprintk("RPC: xs_tcp_data_recv started\n");
1359 do {
1360 trace_xs_tcp_data_recv(transport);
1361 /* Read in a new fragment marker if necessary */
1362 /* Can we ever really expect to get completely empty fragments? */
1363 if (transport->tcp_flags & TCP_RCV_COPY_FRAGHDR) {
1364 xs_tcp_read_fraghdr(xprt, &desc);
1365 continue;
1366 }
1367 /* Read in the xid if necessary */
1368 if (transport->tcp_flags & TCP_RCV_COPY_XID) {
1369 xs_tcp_read_xid(transport, &desc);
1370 continue;
1371 }
1372 /* Read in the call/reply flag */
1373 if (transport->tcp_flags & TCP_RCV_READ_CALLDIR) {
1374 xs_tcp_read_calldir(transport, &desc);
1375 continue;
1376 }
1377 /* Read in the request data */
1378 if (transport->tcp_flags & TCP_RCV_COPY_DATA) {
1379 xs_tcp_read_data(xprt, &desc);
1380 continue;
1381 }
1382 /* Skip over any trailing bytes on short reads */
1383 xs_tcp_read_discard(transport, &desc);
1384 } while (desc.count);
1385 trace_xs_tcp_data_recv(transport);
1386 dprintk("RPC: xs_tcp_data_recv done\n");
1387 return len - desc.count;
1388 }
1389
1390 /**
1391 * xs_tcp_data_ready - "data ready" callback for TCP sockets
1392 * @sk: socket with data to read
1393 *
1394 */
1395 static void xs_tcp_data_ready(struct sock *sk)
1396 {
1397 struct rpc_xprt *xprt;
1398 read_descriptor_t rd_desc;
1399 int read;
1400 unsigned long total = 0;
1401
1402 dprintk("RPC: xs_tcp_data_ready...\n");
1403
1404 read_lock_bh(&sk->sk_callback_lock);
1405 if (!(xprt = xprt_from_sock(sk))) {
1406 read = 0;
1407 goto out;
1408 }
1409 /* Any data means we had a useful conversation, so
1410 * the we don't need to delay the next reconnect
1411 */
1412 if (xprt->reestablish_timeout)
1413 xprt->reestablish_timeout = 0;
1414
1415 /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */
1416 rd_desc.arg.data = xprt;
1417 do {
1418 rd_desc.count = 65536;
1419 read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
1420 if (read > 0)
1421 total += read;
1422 } while (read > 0);
1423 out:
1424 trace_xs_tcp_data_ready(xprt, read, total);
1425 read_unlock_bh(&sk->sk_callback_lock);
1426 }
1427
1428 /**
1429 * xs_tcp_state_change - callback to handle TCP socket state changes
1430 * @sk: socket whose state has changed
1431 *
1432 */
1433 static void xs_tcp_state_change(struct sock *sk)
1434 {
1435 struct rpc_xprt *xprt;
1436
1437 read_lock_bh(&sk->sk_callback_lock);
1438 if (!(xprt = xprt_from_sock(sk)))
1439 goto out;
1440 dprintk("RPC: xs_tcp_state_change client %p...\n", xprt);
1441 dprintk("RPC: state %x conn %d dead %d zapped %d sk_shutdown %d\n",
1442 sk->sk_state, xprt_connected(xprt),
1443 sock_flag(sk, SOCK_DEAD),
1444 sock_flag(sk, SOCK_ZAPPED),
1445 sk->sk_shutdown);
1446
1447 trace_rpc_socket_state_change(xprt, sk->sk_socket);
1448 switch (sk->sk_state) {
1449 case TCP_ESTABLISHED:
1450 spin_lock(&xprt->transport_lock);
1451 if (!xprt_test_and_set_connected(xprt)) {
1452 struct sock_xprt *transport = container_of(xprt,
1453 struct sock_xprt, xprt);
1454
1455 /* Reset TCP record info */
1456 transport->tcp_offset = 0;
1457 transport->tcp_reclen = 0;
1458 transport->tcp_copied = 0;
1459 transport->tcp_flags =
1460 TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID;
1461 xprt->connect_cookie++;
1462
1463 xprt_wake_pending_tasks(xprt, -EAGAIN);
1464 }
1465 spin_unlock(&xprt->transport_lock);
1466 break;
1467 case TCP_FIN_WAIT1:
1468 /* The client initiated a shutdown of the socket */
1469 xprt->connect_cookie++;
1470 xprt->reestablish_timeout = 0;
1471 set_bit(XPRT_CLOSING, &xprt->state);
1472 smp_mb__before_atomic();
1473 clear_bit(XPRT_CONNECTED, &xprt->state);
1474 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
1475 smp_mb__after_atomic();
1476 break;
1477 case TCP_CLOSE_WAIT:
1478 /* The server initiated a shutdown of the socket */
1479 xprt->connect_cookie++;
1480 clear_bit(XPRT_CONNECTED, &xprt->state);
1481 xs_tcp_force_close(xprt);
1482 case TCP_CLOSING:
1483 /*
1484 * If the server closed down the connection, make sure that
1485 * we back off before reconnecting
1486 */
1487 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
1488 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
1489 break;
1490 case TCP_LAST_ACK:
1491 set_bit(XPRT_CLOSING, &xprt->state);
1492 smp_mb__before_atomic();
1493 clear_bit(XPRT_CONNECTED, &xprt->state);
1494 smp_mb__after_atomic();
1495 break;
1496 case TCP_CLOSE:
1497 xs_sock_mark_closed(xprt);
1498 }
1499 out:
1500 read_unlock_bh(&sk->sk_callback_lock);
1501 }
1502
1503 static void xs_write_space(struct sock *sk)
1504 {
1505 struct socket *sock;
1506 struct rpc_xprt *xprt;
1507
1508 if (unlikely(!(sock = sk->sk_socket)))
1509 return;
1510 clear_bit(SOCK_NOSPACE, &sock->flags);
1511
1512 if (unlikely(!(xprt = xprt_from_sock(sk))))
1513 return;
1514 if (test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags) == 0)
1515 return;
1516
1517 xprt_write_space(xprt);
1518 }
1519
1520 /**
1521 * xs_udp_write_space - callback invoked when socket buffer space
1522 * becomes available
1523 * @sk: socket whose state has changed
1524 *
1525 * Called when more output buffer space is available for this socket.
1526 * We try not to wake our writers until they can make "significant"
1527 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
1528 * with a bunch of small requests.
1529 */
1530 static void xs_udp_write_space(struct sock *sk)
1531 {
1532 read_lock_bh(&sk->sk_callback_lock);
1533
1534 /* from net/core/sock.c:sock_def_write_space */
1535 if (sock_writeable(sk))
1536 xs_write_space(sk);
1537
1538 read_unlock_bh(&sk->sk_callback_lock);
1539 }
1540
1541 /**
1542 * xs_tcp_write_space - callback invoked when socket buffer space
1543 * becomes available
1544 * @sk: socket whose state has changed
1545 *
1546 * Called when more output buffer space is available for this socket.
1547 * We try not to wake our writers until they can make "significant"
1548 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
1549 * with a bunch of small requests.
1550 */
1551 static void xs_tcp_write_space(struct sock *sk)
1552 {
1553 read_lock_bh(&sk->sk_callback_lock);
1554
1555 /* from net/core/stream.c:sk_stream_write_space */
1556 if (sk_stream_is_writeable(sk))
1557 xs_write_space(sk);
1558
1559 read_unlock_bh(&sk->sk_callback_lock);
1560 }
1561
1562 static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
1563 {
1564 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1565 struct sock *sk = transport->inet;
1566
1567 if (transport->rcvsize) {
1568 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
1569 sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2;
1570 }
1571 if (transport->sndsize) {
1572 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
1573 sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2;
1574 sk->sk_write_space(sk);
1575 }
1576 }
1577
1578 /**
1579 * xs_udp_set_buffer_size - set send and receive limits
1580 * @xprt: generic transport
1581 * @sndsize: requested size of send buffer, in bytes
1582 * @rcvsize: requested size of receive buffer, in bytes
1583 *
1584 * Set socket send and receive buffer size limits.
1585 */
1586 static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize)
1587 {
1588 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1589
1590 transport->sndsize = 0;
1591 if (sndsize)
1592 transport->sndsize = sndsize + 1024;
1593 transport->rcvsize = 0;
1594 if (rcvsize)
1595 transport->rcvsize = rcvsize + 1024;
1596
1597 xs_udp_do_set_buffer_size(xprt);
1598 }
1599
1600 /**
1601 * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport
1602 * @task: task that timed out
1603 *
1604 * Adjust the congestion window after a retransmit timeout has occurred.
1605 */
1606 static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task)
1607 {
1608 xprt_adjust_cwnd(xprt, task, -ETIMEDOUT);
1609 }
1610
1611 static unsigned short xs_get_random_port(void)
1612 {
1613 unsigned short range = xprt_max_resvport - xprt_min_resvport;
1614 unsigned short rand = (unsigned short) prandom_u32() % range;
1615 return rand + xprt_min_resvport;
1616 }
1617
1618 /**
1619 * xs_set_reuseaddr_port - set the socket's port and address reuse options
1620 * @sock: socket
1621 *
1622 * Note that this function has to be called on all sockets that share the
1623 * same port, and it must be called before binding.
1624 */
1625 static void xs_sock_set_reuseport(struct socket *sock)
1626 {
1627 int opt = 1;
1628
1629 kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEPORT,
1630 (char *)&opt, sizeof(opt));
1631 }
1632
1633 static unsigned short xs_sock_getport(struct socket *sock)
1634 {
1635 struct sockaddr_storage buf;
1636 int buflen;
1637 unsigned short port = 0;
1638
1639 if (kernel_getsockname(sock, (struct sockaddr *)&buf, &buflen) < 0)
1640 goto out;
1641 switch (buf.ss_family) {
1642 case AF_INET6:
1643 port = ntohs(((struct sockaddr_in6 *)&buf)->sin6_port);
1644 break;
1645 case AF_INET:
1646 port = ntohs(((struct sockaddr_in *)&buf)->sin_port);
1647 }
1648 out:
1649 return port;
1650 }
1651
1652 /**
1653 * xs_set_port - reset the port number in the remote endpoint address
1654 * @xprt: generic transport
1655 * @port: new port number
1656 *
1657 */
1658 static void xs_set_port(struct rpc_xprt *xprt, unsigned short port)
1659 {
1660 dprintk("RPC: setting port for xprt %p to %u\n", xprt, port);
1661
1662 rpc_set_port(xs_addr(xprt), port);
1663 xs_update_peer_port(xprt);
1664 }
1665
1666 static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock)
1667 {
1668 if (transport->srcport == 0)
1669 transport->srcport = xs_sock_getport(sock);
1670 }
1671
1672 static unsigned short xs_get_srcport(struct sock_xprt *transport)
1673 {
1674 unsigned short port = transport->srcport;
1675
1676 if (port == 0 && transport->xprt.resvport)
1677 port = xs_get_random_port();
1678 return port;
1679 }
1680
1681 static unsigned short xs_next_srcport(struct sock_xprt *transport, unsigned short port)
1682 {
1683 if (transport->srcport != 0)
1684 transport->srcport = 0;
1685 if (!transport->xprt.resvport)
1686 return 0;
1687 if (port <= xprt_min_resvport || port > xprt_max_resvport)
1688 return xprt_max_resvport;
1689 return --port;
1690 }
1691 static int xs_bind(struct sock_xprt *transport, struct socket *sock)
1692 {
1693 struct sockaddr_storage myaddr;
1694 int err, nloop = 0;
1695 unsigned short port = xs_get_srcport(transport);
1696 unsigned short last;
1697
1698 /*
1699 * If we are asking for any ephemeral port (i.e. port == 0 &&
1700 * transport->xprt.resvport == 0), don't bind. Let the local
1701 * port selection happen implicitly when the socket is used
1702 * (for example at connect time).
1703 *
1704 * This ensures that we can continue to establish TCP
1705 * connections even when all local ephemeral ports are already
1706 * a part of some TCP connection. This makes no difference
1707 * for UDP sockets, but also doens't harm them.
1708 *
1709 * If we're asking for any reserved port (i.e. port == 0 &&
1710 * transport->xprt.resvport == 1) xs_get_srcport above will
1711 * ensure that port is non-zero and we will bind as needed.
1712 */
1713 if (port == 0)
1714 return 0;
1715
1716 memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen);
1717 do {
1718 rpc_set_port((struct sockaddr *)&myaddr, port);
1719 err = kernel_bind(sock, (struct sockaddr *)&myaddr,
1720 transport->xprt.addrlen);
1721 if (err == 0) {
1722 transport->srcport = port;
1723 break;
1724 }
1725 last = port;
1726 port = xs_next_srcport(transport, port);
1727 if (port > last)
1728 nloop++;
1729 } while (err == -EADDRINUSE && nloop != 2);
1730
1731 if (myaddr.ss_family == AF_INET)
1732 dprintk("RPC: %s %pI4:%u: %s (%d)\n", __func__,
1733 &((struct sockaddr_in *)&myaddr)->sin_addr,
1734 port, err ? "failed" : "ok", err);
1735 else
1736 dprintk("RPC: %s %pI6:%u: %s (%d)\n", __func__,
1737 &((struct sockaddr_in6 *)&myaddr)->sin6_addr,
1738 port, err ? "failed" : "ok", err);
1739 return err;
1740 }
1741
1742 /*
1743 * We don't support autobind on AF_LOCAL sockets
1744 */
1745 static void xs_local_rpcbind(struct rpc_task *task)
1746 {
1747 rcu_read_lock();
1748 xprt_set_bound(rcu_dereference(task->tk_client->cl_xprt));
1749 rcu_read_unlock();
1750 }
1751
1752 static void xs_local_set_port(struct rpc_xprt *xprt, unsigned short port)
1753 {
1754 }
1755
1756 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1757 static struct lock_class_key xs_key[2];
1758 static struct lock_class_key xs_slock_key[2];
1759
1760 static inline void xs_reclassify_socketu(struct socket *sock)
1761 {
1762 struct sock *sk = sock->sk;
1763
1764 sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC",
1765 &xs_slock_key[1], "sk_lock-AF_LOCAL-RPC", &xs_key[1]);
1766 }
1767
1768 static inline void xs_reclassify_socket4(struct socket *sock)
1769 {
1770 struct sock *sk = sock->sk;
1771
1772 sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC",
1773 &xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]);
1774 }
1775
1776 static inline void xs_reclassify_socket6(struct socket *sock)
1777 {
1778 struct sock *sk = sock->sk;
1779
1780 sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC",
1781 &xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]);
1782 }
1783
1784 static inline void xs_reclassify_socket(int family, struct socket *sock)
1785 {
1786 WARN_ON_ONCE(sock_owned_by_user(sock->sk));
1787 if (sock_owned_by_user(sock->sk))
1788 return;
1789
1790 switch (family) {
1791 case AF_LOCAL:
1792 xs_reclassify_socketu(sock);
1793 break;
1794 case AF_INET:
1795 xs_reclassify_socket4(sock);
1796 break;
1797 case AF_INET6:
1798 xs_reclassify_socket6(sock);
1799 break;
1800 }
1801 }
1802 #else
1803 static inline void xs_reclassify_socketu(struct socket *sock)
1804 {
1805 }
1806
1807 static inline void xs_reclassify_socket4(struct socket *sock)
1808 {
1809 }
1810
1811 static inline void xs_reclassify_socket6(struct socket *sock)
1812 {
1813 }
1814
1815 static inline void xs_reclassify_socket(int family, struct socket *sock)
1816 {
1817 }
1818 #endif
1819
1820 static void xs_dummy_setup_socket(struct work_struct *work)
1821 {
1822 }
1823
1824 static struct socket *xs_create_sock(struct rpc_xprt *xprt,
1825 struct sock_xprt *transport, int family, int type,
1826 int protocol, bool reuseport)
1827 {
1828 struct socket *sock;
1829 int err;
1830
1831 err = __sock_create(xprt->xprt_net, family, type, protocol, &sock, 1);
1832 if (err < 0) {
1833 dprintk("RPC: can't create %d transport socket (%d).\n",
1834 protocol, -err);
1835 goto out;
1836 }
1837 xs_reclassify_socket(family, sock);
1838
1839 if (reuseport)
1840 xs_sock_set_reuseport(sock);
1841
1842 err = xs_bind(transport, sock);
1843 if (err) {
1844 sock_release(sock);
1845 goto out;
1846 }
1847
1848 return sock;
1849 out:
1850 return ERR_PTR(err);
1851 }
1852
1853 static int xs_local_finish_connecting(struct rpc_xprt *xprt,
1854 struct socket *sock)
1855 {
1856 struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
1857 xprt);
1858
1859 if (!transport->inet) {
1860 struct sock *sk = sock->sk;
1861
1862 write_lock_bh(&sk->sk_callback_lock);
1863
1864 xs_save_old_callbacks(transport, sk);
1865
1866 sk->sk_user_data = xprt;
1867 sk->sk_data_ready = xs_local_data_ready;
1868 sk->sk_write_space = xs_udp_write_space;
1869 sk->sk_error_report = xs_error_report;
1870 sk->sk_allocation = GFP_ATOMIC;
1871
1872 xprt_clear_connected(xprt);
1873
1874 /* Reset to new socket */
1875 transport->sock = sock;
1876 transport->inet = sk;
1877
1878 write_unlock_bh(&sk->sk_callback_lock);
1879 }
1880
1881 /* Tell the socket layer to start connecting... */
1882 xprt->stat.connect_count++;
1883 xprt->stat.connect_start = jiffies;
1884 return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, 0);
1885 }
1886
1887 /**
1888 * xs_local_setup_socket - create AF_LOCAL socket, connect to a local endpoint
1889 * @transport: socket transport to connect
1890 */
1891 static int xs_local_setup_socket(struct sock_xprt *transport)
1892 {
1893 struct rpc_xprt *xprt = &transport->xprt;
1894 struct socket *sock;
1895 int status = -EIO;
1896
1897 status = __sock_create(xprt->xprt_net, AF_LOCAL,
1898 SOCK_STREAM, 0, &sock, 1);
1899 if (status < 0) {
1900 dprintk("RPC: can't create AF_LOCAL "
1901 "transport socket (%d).\n", -status);
1902 goto out;
1903 }
1904 xs_reclassify_socketu(sock);
1905
1906 dprintk("RPC: worker connecting xprt %p via AF_LOCAL to %s\n",
1907 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1908
1909 status = xs_local_finish_connecting(xprt, sock);
1910 trace_rpc_socket_connect(xprt, sock, status);
1911 switch (status) {
1912 case 0:
1913 dprintk("RPC: xprt %p connected to %s\n",
1914 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1915 xprt_set_connected(xprt);
1916 case -ENOBUFS:
1917 break;
1918 case -ENOENT:
1919 dprintk("RPC: xprt %p: socket %s does not exist\n",
1920 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1921 break;
1922 case -ECONNREFUSED:
1923 dprintk("RPC: xprt %p: connection refused for %s\n",
1924 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1925 break;
1926 default:
1927 printk(KERN_ERR "%s: unhandled error (%d) connecting to %s\n",
1928 __func__, -status,
1929 xprt->address_strings[RPC_DISPLAY_ADDR]);
1930 }
1931
1932 out:
1933 xprt_clear_connecting(xprt);
1934 xprt_wake_pending_tasks(xprt, status);
1935 return status;
1936 }
1937
1938 static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task)
1939 {
1940 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1941 int ret;
1942
1943 if (RPC_IS_ASYNC(task)) {
1944 /*
1945 * We want the AF_LOCAL connect to be resolved in the
1946 * filesystem namespace of the process making the rpc
1947 * call. Thus we connect synchronously.
1948 *
1949 * If we want to support asynchronous AF_LOCAL calls,
1950 * we'll need to figure out how to pass a namespace to
1951 * connect.
1952 */
1953 rpc_exit(task, -ENOTCONN);
1954 return;
1955 }
1956 ret = xs_local_setup_socket(transport);
1957 if (ret && !RPC_IS_SOFTCONN(task))
1958 msleep_interruptible(15000);
1959 }
1960
1961 #if IS_ENABLED(CONFIG_SUNRPC_SWAP)
1962 static void xs_set_memalloc(struct rpc_xprt *xprt)
1963 {
1964 struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
1965 xprt);
1966
1967 if (atomic_read(&xprt->swapper))
1968 sk_set_memalloc(transport->inet);
1969 }
1970
1971 /**
1972 * xs_swapper_enable - Tag this transport as being used for swap.
1973 * @xprt: transport to tag
1974 *
1975 * Take a reference to this transport on behalf of the rpc_clnt, and
1976 * optionally mark it for swapping if it wasn't already.
1977 */
1978 int
1979 xs_swapper_enable(struct rpc_xprt *xprt)
1980 {
1981 struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
1982 xprt);
1983
1984 if (atomic_inc_return(&xprt->swapper) == 1)
1985 sk_set_memalloc(transport->inet);
1986 return 0;
1987 }
1988
1989 /**
1990 * xs_swapper_disable - Untag this transport as being used for swap.
1991 * @xprt: transport to tag
1992 *
1993 * Drop a "swapper" reference to this xprt on behalf of the rpc_clnt. If the
1994 * swapper refcount goes to 0, untag the socket as a memalloc socket.
1995 */
1996 void
1997 xs_swapper_disable(struct rpc_xprt *xprt)
1998 {
1999 struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
2000 xprt);
2001
2002 if (atomic_dec_and_test(&xprt->swapper))
2003 sk_clear_memalloc(transport->inet);
2004 }
2005 #else
2006 static void xs_set_memalloc(struct rpc_xprt *xprt)
2007 {
2008 }
2009 #endif
2010
2011 static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2012 {
2013 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2014
2015 if (!transport->inet) {
2016 struct sock *sk = sock->sk;
2017
2018 write_lock_bh(&sk->sk_callback_lock);
2019
2020 xs_save_old_callbacks(transport, sk);
2021
2022 sk->sk_user_data = xprt;
2023 sk->sk_data_ready = xs_udp_data_ready;
2024 sk->sk_write_space = xs_udp_write_space;
2025 sk->sk_allocation = GFP_ATOMIC;
2026
2027 xprt_set_connected(xprt);
2028
2029 /* Reset to new socket */
2030 transport->sock = sock;
2031 transport->inet = sk;
2032
2033 xs_set_memalloc(xprt);
2034
2035 write_unlock_bh(&sk->sk_callback_lock);
2036 }
2037 xs_udp_do_set_buffer_size(xprt);
2038 }
2039
2040 static void xs_udp_setup_socket(struct work_struct *work)
2041 {
2042 struct sock_xprt *transport =
2043 container_of(work, struct sock_xprt, connect_worker.work);
2044 struct rpc_xprt *xprt = &transport->xprt;
2045 struct socket *sock = transport->sock;
2046 int status = -EIO;
2047
2048 sock = xs_create_sock(xprt, transport,
2049 xs_addr(xprt)->sa_family, SOCK_DGRAM,
2050 IPPROTO_UDP, false);
2051 if (IS_ERR(sock))
2052 goto out;
2053
2054 dprintk("RPC: worker connecting xprt %p via %s to "
2055 "%s (port %s)\n", xprt,
2056 xprt->address_strings[RPC_DISPLAY_PROTO],
2057 xprt->address_strings[RPC_DISPLAY_ADDR],
2058 xprt->address_strings[RPC_DISPLAY_PORT]);
2059
2060 xs_udp_finish_connecting(xprt, sock);
2061 trace_rpc_socket_connect(xprt, sock, 0);
2062 status = 0;
2063 out:
2064 xprt_unlock_connect(xprt, transport);
2065 xprt_clear_connecting(xprt);
2066 xprt_wake_pending_tasks(xprt, status);
2067 }
2068
2069 static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2070 {
2071 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2072 int ret = -ENOTCONN;
2073
2074 if (!transport->inet) {
2075 struct sock *sk = sock->sk;
2076 unsigned int keepidle = xprt->timeout->to_initval / HZ;
2077 unsigned int keepcnt = xprt->timeout->to_retries + 1;
2078 unsigned int opt_on = 1;
2079
2080 /* TCP Keepalive options */
2081 kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
2082 (char *)&opt_on, sizeof(opt_on));
2083 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE,
2084 (char *)&keepidle, sizeof(keepidle));
2085 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL,
2086 (char *)&keepidle, sizeof(keepidle));
2087 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT,
2088 (char *)&keepcnt, sizeof(keepcnt));
2089
2090 write_lock_bh(&sk->sk_callback_lock);
2091
2092 xs_save_old_callbacks(transport, sk);
2093
2094 sk->sk_user_data = xprt;
2095 sk->sk_data_ready = xs_tcp_data_ready;
2096 sk->sk_state_change = xs_tcp_state_change;
2097 sk->sk_write_space = xs_tcp_write_space;
2098 sk->sk_error_report = xs_error_report;
2099 sk->sk_allocation = GFP_ATOMIC;
2100
2101 /* socket options */
2102 sock_reset_flag(sk, SOCK_LINGER);
2103 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
2104
2105 xprt_clear_connected(xprt);
2106
2107 /* Reset to new socket */
2108 transport->sock = sock;
2109 transport->inet = sk;
2110
2111 write_unlock_bh(&sk->sk_callback_lock);
2112 }
2113
2114 if (!xprt_bound(xprt))
2115 goto out;
2116
2117 xs_set_memalloc(xprt);
2118
2119 /* Tell the socket layer to start connecting... */
2120 xprt->stat.connect_count++;
2121 xprt->stat.connect_start = jiffies;
2122 ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
2123 switch (ret) {
2124 case 0:
2125 xs_set_srcport(transport, sock);
2126 case -EINPROGRESS:
2127 /* SYN_SENT! */
2128 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
2129 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2130 }
2131 out:
2132 return ret;
2133 }
2134
2135 /**
2136 * xs_tcp_setup_socket - create a TCP socket and connect to a remote endpoint
2137 *
2138 * Invoked by a work queue tasklet.
2139 */
2140 static void xs_tcp_setup_socket(struct work_struct *work)
2141 {
2142 struct sock_xprt *transport =
2143 container_of(work, struct sock_xprt, connect_worker.work);
2144 struct socket *sock = transport->sock;
2145 struct rpc_xprt *xprt = &transport->xprt;
2146 int status = -EIO;
2147
2148 if (!sock) {
2149 sock = xs_create_sock(xprt, transport,
2150 xs_addr(xprt)->sa_family, SOCK_STREAM,
2151 IPPROTO_TCP, true);
2152 if (IS_ERR(sock)) {
2153 status = PTR_ERR(sock);
2154 goto out;
2155 }
2156 }
2157
2158 dprintk("RPC: worker connecting xprt %p via %s to "
2159 "%s (port %s)\n", xprt,
2160 xprt->address_strings[RPC_DISPLAY_PROTO],
2161 xprt->address_strings[RPC_DISPLAY_ADDR],
2162 xprt->address_strings[RPC_DISPLAY_PORT]);
2163
2164 status = xs_tcp_finish_connecting(xprt, sock);
2165 trace_rpc_socket_connect(xprt, sock, status);
2166 dprintk("RPC: %p connect status %d connected %d sock state %d\n",
2167 xprt, -status, xprt_connected(xprt),
2168 sock->sk->sk_state);
2169 switch (status) {
2170 default:
2171 printk("%s: connect returned unhandled error %d\n",
2172 __func__, status);
2173 case -EADDRNOTAVAIL:
2174 /* We're probably in TIME_WAIT. Get rid of existing socket,
2175 * and retry
2176 */
2177 xs_tcp_force_close(xprt);
2178 break;
2179 case 0:
2180 case -EINPROGRESS:
2181 case -EALREADY:
2182 xprt_unlock_connect(xprt, transport);
2183 xprt_clear_connecting(xprt);
2184 return;
2185 case -EINVAL:
2186 /* Happens, for instance, if the user specified a link
2187 * local IPv6 address without a scope-id.
2188 */
2189 case -ECONNREFUSED:
2190 case -ECONNRESET:
2191 case -ENETUNREACH:
2192 case -EADDRINUSE:
2193 case -ENOBUFS:
2194 /* retry with existing socket, after a delay */
2195 xs_tcp_force_close(xprt);
2196 goto out;
2197 }
2198 status = -EAGAIN;
2199 out:
2200 xprt_unlock_connect(xprt, transport);
2201 xprt_clear_connecting(xprt);
2202 xprt_wake_pending_tasks(xprt, status);
2203 }
2204
2205 /**
2206 * xs_connect - connect a socket to a remote endpoint
2207 * @xprt: pointer to transport structure
2208 * @task: address of RPC task that manages state of connect request
2209 *
2210 * TCP: If the remote end dropped the connection, delay reconnecting.
2211 *
2212 * UDP socket connects are synchronous, but we use a work queue anyway
2213 * to guarantee that even unprivileged user processes can set up a
2214 * socket on a privileged port.
2215 *
2216 * If a UDP socket connect fails, the delay behavior here prevents
2217 * retry floods (hard mounts).
2218 */
2219 static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
2220 {
2221 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2222
2223 WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport));
2224
2225 /* Start by resetting any existing state */
2226 xs_reset_transport(transport);
2227
2228 if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) {
2229 dprintk("RPC: xs_connect delayed xprt %p for %lu "
2230 "seconds\n",
2231 xprt, xprt->reestablish_timeout / HZ);
2232 queue_delayed_work(rpciod_workqueue,
2233 &transport->connect_worker,
2234 xprt->reestablish_timeout);
2235 xprt->reestablish_timeout <<= 1;
2236 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
2237 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2238 if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO)
2239 xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
2240 } else {
2241 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt);
2242 queue_delayed_work(rpciod_workqueue,
2243 &transport->connect_worker, 0);
2244 }
2245 }
2246
2247 /**
2248 * xs_local_print_stats - display AF_LOCAL socket-specifc stats
2249 * @xprt: rpc_xprt struct containing statistics
2250 * @seq: output file
2251 *
2252 */
2253 static void xs_local_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2254 {
2255 long idle_time = 0;
2256
2257 if (xprt_connected(xprt))
2258 idle_time = (long)(jiffies - xprt->last_used) / HZ;
2259
2260 seq_printf(seq, "\txprt:\tlocal %lu %lu %lu %ld %lu %lu %lu "
2261 "%llu %llu %lu %llu %llu\n",
2262 xprt->stat.bind_count,
2263 xprt->stat.connect_count,
2264 xprt->stat.connect_time,
2265 idle_time,
2266 xprt->stat.sends,
2267 xprt->stat.recvs,
2268 xprt->stat.bad_xids,
2269 xprt->stat.req_u,
2270 xprt->stat.bklog_u,
2271 xprt->stat.max_slots,
2272 xprt->stat.sending_u,
2273 xprt->stat.pending_u);
2274 }
2275
2276 /**
2277 * xs_udp_print_stats - display UDP socket-specifc stats
2278 * @xprt: rpc_xprt struct containing statistics
2279 * @seq: output file
2280 *
2281 */
2282 static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2283 {
2284 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2285
2286 seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %llu %llu "
2287 "%lu %llu %llu\n",
2288 transport->srcport,
2289 xprt->stat.bind_count,
2290 xprt->stat.sends,
2291 xprt->stat.recvs,
2292 xprt->stat.bad_xids,
2293 xprt->stat.req_u,
2294 xprt->stat.bklog_u,
2295 xprt->stat.max_slots,
2296 xprt->stat.sending_u,
2297 xprt->stat.pending_u);
2298 }
2299
2300 /**
2301 * xs_tcp_print_stats - display TCP socket-specifc stats
2302 * @xprt: rpc_xprt struct containing statistics
2303 * @seq: output file
2304 *
2305 */
2306 static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2307 {
2308 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2309 long idle_time = 0;
2310
2311 if (xprt_connected(xprt))
2312 idle_time = (long)(jiffies - xprt->last_used) / HZ;
2313
2314 seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu "
2315 "%llu %llu %lu %llu %llu\n",
2316 transport->srcport,
2317 xprt->stat.bind_count,
2318 xprt->stat.connect_count,
2319 xprt->stat.connect_time,
2320 idle_time,
2321 xprt->stat.sends,
2322 xprt->stat.recvs,
2323 xprt->stat.bad_xids,
2324 xprt->stat.req_u,
2325 xprt->stat.bklog_u,
2326 xprt->stat.max_slots,
2327 xprt->stat.sending_u,
2328 xprt->stat.pending_u);
2329 }
2330
2331 /*
2332 * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason
2333 * we allocate pages instead doing a kmalloc like rpc_malloc is because we want
2334 * to use the server side send routines.
2335 */
2336 static void *bc_malloc(struct rpc_task *task, size_t size)
2337 {
2338 struct page *page;
2339 struct rpc_buffer *buf;
2340
2341 WARN_ON_ONCE(size > PAGE_SIZE - sizeof(struct rpc_buffer));
2342 if (size > PAGE_SIZE - sizeof(struct rpc_buffer))
2343 return NULL;
2344
2345 page = alloc_page(GFP_KERNEL);
2346 if (!page)
2347 return NULL;
2348
2349 buf = page_address(page);
2350 buf->len = PAGE_SIZE;
2351
2352 return buf->data;
2353 }
2354
2355 /*
2356 * Free the space allocated in the bc_alloc routine
2357 */
2358 static void bc_free(void *buffer)
2359 {
2360 struct rpc_buffer *buf;
2361
2362 if (!buffer)
2363 return;
2364
2365 buf = container_of(buffer, struct rpc_buffer, data);
2366 free_page((unsigned long)buf);
2367 }
2368
2369 /*
2370 * Use the svc_sock to send the callback. Must be called with svsk->sk_mutex
2371 * held. Borrows heavily from svc_tcp_sendto and xs_tcp_send_request.
2372 */
2373 static int bc_sendto(struct rpc_rqst *req)
2374 {
2375 int len;
2376 struct xdr_buf *xbufp = &req->rq_snd_buf;
2377 struct rpc_xprt *xprt = req->rq_xprt;
2378 struct sock_xprt *transport =
2379 container_of(xprt, struct sock_xprt, xprt);
2380 struct socket *sock = transport->sock;
2381 unsigned long headoff;
2382 unsigned long tailoff;
2383
2384 xs_encode_stream_record_marker(xbufp);
2385
2386 tailoff = (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK;
2387 headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK;
2388 len = svc_send_common(sock, xbufp,
2389 virt_to_page(xbufp->head[0].iov_base), headoff,
2390 xbufp->tail[0].iov_base, tailoff);
2391
2392 if (len != xbufp->len) {
2393 printk(KERN_NOTICE "Error sending entire callback!\n");
2394 len = -EAGAIN;
2395 }
2396
2397 return len;
2398 }
2399
2400 /*
2401 * The send routine. Borrows from svc_send
2402 */
2403 static int bc_send_request(struct rpc_task *task)
2404 {
2405 struct rpc_rqst *req = task->tk_rqstp;
2406 struct svc_xprt *xprt;
2407 u32 len;
2408
2409 dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid));
2410 /*
2411 * Get the server socket associated with this callback xprt
2412 */
2413 xprt = req->rq_xprt->bc_xprt;
2414
2415 /*
2416 * Grab the mutex to serialize data as the connection is shared
2417 * with the fore channel
2418 */
2419 if (!mutex_trylock(&xprt->xpt_mutex)) {
2420 rpc_sleep_on(&xprt->xpt_bc_pending, task, NULL);
2421 if (!mutex_trylock(&xprt->xpt_mutex))
2422 return -EAGAIN;
2423 rpc_wake_up_queued_task(&xprt->xpt_bc_pending, task);
2424 }
2425 if (test_bit(XPT_DEAD, &xprt->xpt_flags))
2426 len = -ENOTCONN;
2427 else
2428 len = bc_sendto(req);
2429 mutex_unlock(&xprt->xpt_mutex);
2430
2431 if (len > 0)
2432 len = 0;
2433
2434 return len;
2435 }
2436
2437 /*
2438 * The close routine. Since this is client initiated, we do nothing
2439 */
2440
2441 static void bc_close(struct rpc_xprt *xprt)
2442 {
2443 }
2444
2445 /*
2446 * The xprt destroy routine. Again, because this connection is client
2447 * initiated, we do nothing
2448 */
2449
2450 static void bc_destroy(struct rpc_xprt *xprt)
2451 {
2452 dprintk("RPC: bc_destroy xprt %p\n", xprt);
2453
2454 xs_xprt_free(xprt);
2455 module_put(THIS_MODULE);
2456 }
2457
2458 static struct rpc_xprt_ops xs_local_ops = {
2459 .reserve_xprt = xprt_reserve_xprt,
2460 .release_xprt = xs_tcp_release_xprt,
2461 .alloc_slot = xprt_alloc_slot,
2462 .rpcbind = xs_local_rpcbind,
2463 .set_port = xs_local_set_port,
2464 .connect = xs_local_connect,
2465 .buf_alloc = rpc_malloc,
2466 .buf_free = rpc_free,
2467 .send_request = xs_local_send_request,
2468 .set_retrans_timeout = xprt_set_retrans_timeout_def,
2469 .close = xs_close,
2470 .destroy = xs_destroy,
2471 .print_stats = xs_local_print_stats,
2472 };
2473
2474 static struct rpc_xprt_ops xs_udp_ops = {
2475 .set_buffer_size = xs_udp_set_buffer_size,
2476 .reserve_xprt = xprt_reserve_xprt_cong,
2477 .release_xprt = xprt_release_xprt_cong,
2478 .alloc_slot = xprt_alloc_slot,
2479 .rpcbind = rpcb_getport_async,
2480 .set_port = xs_set_port,
2481 .connect = xs_connect,
2482 .buf_alloc = rpc_malloc,
2483 .buf_free = rpc_free,
2484 .send_request = xs_udp_send_request,
2485 .set_retrans_timeout = xprt_set_retrans_timeout_rtt,
2486 .timer = xs_udp_timer,
2487 .release_request = xprt_release_rqst_cong,
2488 .close = xs_close,
2489 .destroy = xs_destroy,
2490 .print_stats = xs_udp_print_stats,
2491 };
2492
2493 static struct rpc_xprt_ops xs_tcp_ops = {
2494 .reserve_xprt = xprt_reserve_xprt,
2495 .release_xprt = xs_tcp_release_xprt,
2496 .alloc_slot = xprt_lock_and_alloc_slot,
2497 .rpcbind = rpcb_getport_async,
2498 .set_port = xs_set_port,
2499 .connect = xs_connect,
2500 .buf_alloc = rpc_malloc,
2501 .buf_free = rpc_free,
2502 .send_request = xs_tcp_send_request,
2503 .set_retrans_timeout = xprt_set_retrans_timeout_def,
2504 .close = xs_tcp_shutdown,
2505 .destroy = xs_destroy,
2506 .print_stats = xs_tcp_print_stats,
2507 };
2508
2509 /*
2510 * The rpc_xprt_ops for the server backchannel
2511 */
2512
2513 static struct rpc_xprt_ops bc_tcp_ops = {
2514 .reserve_xprt = xprt_reserve_xprt,
2515 .release_xprt = xprt_release_xprt,
2516 .alloc_slot = xprt_alloc_slot,
2517 .buf_alloc = bc_malloc,
2518 .buf_free = bc_free,
2519 .send_request = bc_send_request,
2520 .set_retrans_timeout = xprt_set_retrans_timeout_def,
2521 .close = bc_close,
2522 .destroy = bc_destroy,
2523 .print_stats = xs_tcp_print_stats,
2524 };
2525
2526 static int xs_init_anyaddr(const int family, struct sockaddr *sap)
2527 {
2528 static const struct sockaddr_in sin = {
2529 .sin_family = AF_INET,
2530 .sin_addr.s_addr = htonl(INADDR_ANY),
2531 };
2532 static const struct sockaddr_in6 sin6 = {
2533 .sin6_family = AF_INET6,
2534 .sin6_addr = IN6ADDR_ANY_INIT,
2535 };
2536
2537 switch (family) {
2538 case AF_LOCAL:
2539 break;
2540 case AF_INET:
2541 memcpy(sap, &sin, sizeof(sin));
2542 break;
2543 case AF_INET6:
2544 memcpy(sap, &sin6, sizeof(sin6));
2545 break;
2546 default:
2547 dprintk("RPC: %s: Bad address family\n", __func__);
2548 return -EAFNOSUPPORT;
2549 }
2550 return 0;
2551 }
2552
2553 static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
2554 unsigned int slot_table_size,
2555 unsigned int max_slot_table_size)
2556 {
2557 struct rpc_xprt *xprt;
2558 struct sock_xprt *new;
2559
2560 if (args->addrlen > sizeof(xprt->addr)) {
2561 dprintk("RPC: xs_setup_xprt: address too large\n");
2562 return ERR_PTR(-EBADF);
2563 }
2564
2565 xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size,
2566 max_slot_table_size);
2567 if (xprt == NULL) {
2568 dprintk("RPC: xs_setup_xprt: couldn't allocate "
2569 "rpc_xprt\n");
2570 return ERR_PTR(-ENOMEM);
2571 }
2572
2573 new = container_of(xprt, struct sock_xprt, xprt);
2574 memcpy(&xprt->addr, args->dstaddr, args->addrlen);
2575 xprt->addrlen = args->addrlen;
2576 if (args->srcaddr)
2577 memcpy(&new->srcaddr, args->srcaddr, args->addrlen);
2578 else {
2579 int err;
2580 err = xs_init_anyaddr(args->dstaddr->sa_family,
2581 (struct sockaddr *)&new->srcaddr);
2582 if (err != 0) {
2583 xprt_free(xprt);
2584 return ERR_PTR(err);
2585 }
2586 }
2587
2588 return xprt;
2589 }
2590
2591 static const struct rpc_timeout xs_local_default_timeout = {
2592 .to_initval = 10 * HZ,
2593 .to_maxval = 10 * HZ,
2594 .to_retries = 2,
2595 };
2596
2597 /**
2598 * xs_setup_local - Set up transport to use an AF_LOCAL socket
2599 * @args: rpc transport creation arguments
2600 *
2601 * AF_LOCAL is a "tpi_cots_ord" transport, just like TCP
2602 */
2603 static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
2604 {
2605 struct sockaddr_un *sun = (struct sockaddr_un *)args->dstaddr;
2606 struct sock_xprt *transport;
2607 struct rpc_xprt *xprt;
2608 struct rpc_xprt *ret;
2609
2610 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2611 xprt_max_tcp_slot_table_entries);
2612 if (IS_ERR(xprt))
2613 return xprt;
2614 transport = container_of(xprt, struct sock_xprt, xprt);
2615
2616 xprt->prot = 0;
2617 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
2618 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2619
2620 xprt->bind_timeout = XS_BIND_TO;
2621 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2622 xprt->idle_timeout = XS_IDLE_DISC_TO;
2623
2624 xprt->ops = &xs_local_ops;
2625 xprt->timeout = &xs_local_default_timeout;
2626
2627 INIT_DELAYED_WORK(&transport->connect_worker,
2628 xs_dummy_setup_socket);
2629
2630 switch (sun->sun_family) {
2631 case AF_LOCAL:
2632 if (sun->sun_path[0] != '/') {
2633 dprintk("RPC: bad AF_LOCAL address: %s\n",
2634 sun->sun_path);
2635 ret = ERR_PTR(-EINVAL);
2636 goto out_err;
2637 }
2638 xprt_set_bound(xprt);
2639 xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL);
2640 ret = ERR_PTR(xs_local_setup_socket(transport));
2641 if (ret)
2642 goto out_err;
2643 break;
2644 default:
2645 ret = ERR_PTR(-EAFNOSUPPORT);
2646 goto out_err;
2647 }
2648
2649 dprintk("RPC: set up xprt to %s via AF_LOCAL\n",
2650 xprt->address_strings[RPC_DISPLAY_ADDR]);
2651
2652 if (try_module_get(THIS_MODULE))
2653 return xprt;
2654 ret = ERR_PTR(-EINVAL);
2655 out_err:
2656 xs_xprt_free(xprt);
2657 return ret;
2658 }
2659
2660 static const struct rpc_timeout xs_udp_default_timeout = {
2661 .to_initval = 5 * HZ,
2662 .to_maxval = 30 * HZ,
2663 .to_increment = 5 * HZ,
2664 .to_retries = 5,
2665 };
2666
2667 /**
2668 * xs_setup_udp - Set up transport to use a UDP socket
2669 * @args: rpc transport creation arguments
2670 *
2671 */
2672 static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
2673 {
2674 struct sockaddr *addr = args->dstaddr;
2675 struct rpc_xprt *xprt;
2676 struct sock_xprt *transport;
2677 struct rpc_xprt *ret;
2678
2679 xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries,
2680 xprt_udp_slot_table_entries);
2681 if (IS_ERR(xprt))
2682 return xprt;
2683 transport = container_of(xprt, struct sock_xprt, xprt);
2684
2685 xprt->prot = IPPROTO_UDP;
2686 xprt->tsh_size = 0;
2687 /* XXX: header size can vary due to auth type, IPv6, etc. */
2688 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
2689
2690 xprt->bind_timeout = XS_BIND_TO;
2691 xprt->reestablish_timeout = XS_UDP_REEST_TO;
2692 xprt->idle_timeout = XS_IDLE_DISC_TO;
2693
2694 xprt->ops = &xs_udp_ops;
2695
2696 xprt->timeout = &xs_udp_default_timeout;
2697
2698 switch (addr->sa_family) {
2699 case AF_INET:
2700 if (((struct sockaddr_in *)addr)->sin_port != htons(0))
2701 xprt_set_bound(xprt);
2702
2703 INIT_DELAYED_WORK(&transport->connect_worker,
2704 xs_udp_setup_socket);
2705 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP);
2706 break;
2707 case AF_INET6:
2708 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
2709 xprt_set_bound(xprt);
2710
2711 INIT_DELAYED_WORK(&transport->connect_worker,
2712 xs_udp_setup_socket);
2713 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6);
2714 break;
2715 default:
2716 ret = ERR_PTR(-EAFNOSUPPORT);
2717 goto out_err;
2718 }
2719
2720 if (xprt_bound(xprt))
2721 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
2722 xprt->address_strings[RPC_DISPLAY_ADDR],
2723 xprt->address_strings[RPC_DISPLAY_PORT],
2724 xprt->address_strings[RPC_DISPLAY_PROTO]);
2725 else
2726 dprintk("RPC: set up xprt to %s (autobind) via %s\n",
2727 xprt->address_strings[RPC_DISPLAY_ADDR],
2728 xprt->address_strings[RPC_DISPLAY_PROTO]);
2729
2730 if (try_module_get(THIS_MODULE))
2731 return xprt;
2732 ret = ERR_PTR(-EINVAL);
2733 out_err:
2734 xs_xprt_free(xprt);
2735 return ret;
2736 }
2737
2738 static const struct rpc_timeout xs_tcp_default_timeout = {
2739 .to_initval = 60 * HZ,
2740 .to_maxval = 60 * HZ,
2741 .to_retries = 2,
2742 };
2743
2744 /**
2745 * xs_setup_tcp - Set up transport to use a TCP socket
2746 * @args: rpc transport creation arguments
2747 *
2748 */
2749 static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
2750 {
2751 struct sockaddr *addr = args->dstaddr;
2752 struct rpc_xprt *xprt;
2753 struct sock_xprt *transport;
2754 struct rpc_xprt *ret;
2755 unsigned int max_slot_table_size = xprt_max_tcp_slot_table_entries;
2756
2757 if (args->flags & XPRT_CREATE_INFINITE_SLOTS)
2758 max_slot_table_size = RPC_MAX_SLOT_TABLE_LIMIT;
2759
2760 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2761 max_slot_table_size);
2762 if (IS_ERR(xprt))
2763 return xprt;
2764 transport = container_of(xprt, struct sock_xprt, xprt);
2765
2766 xprt->prot = IPPROTO_TCP;
2767 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
2768 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2769
2770 xprt->bind_timeout = XS_BIND_TO;
2771 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2772 xprt->idle_timeout = XS_IDLE_DISC_TO;
2773
2774 xprt->ops = &xs_tcp_ops;
2775 xprt->timeout = &xs_tcp_default_timeout;
2776
2777 switch (addr->sa_family) {
2778 case AF_INET:
2779 if (((struct sockaddr_in *)addr)->sin_port != htons(0))
2780 xprt_set_bound(xprt);
2781
2782 INIT_DELAYED_WORK(&transport->connect_worker,
2783 xs_tcp_setup_socket);
2784 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP);
2785 break;
2786 case AF_INET6:
2787 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
2788 xprt_set_bound(xprt);
2789
2790 INIT_DELAYED_WORK(&transport->connect_worker,
2791 xs_tcp_setup_socket);
2792 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
2793 break;
2794 default:
2795 ret = ERR_PTR(-EAFNOSUPPORT);
2796 goto out_err;
2797 }
2798
2799 if (xprt_bound(xprt))
2800 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
2801 xprt->address_strings[RPC_DISPLAY_ADDR],
2802 xprt->address_strings[RPC_DISPLAY_PORT],
2803 xprt->address_strings[RPC_DISPLAY_PROTO]);
2804 else
2805 dprintk("RPC: set up xprt to %s (autobind) via %s\n",
2806 xprt->address_strings[RPC_DISPLAY_ADDR],
2807 xprt->address_strings[RPC_DISPLAY_PROTO]);
2808
2809 if (try_module_get(THIS_MODULE))
2810 return xprt;
2811 ret = ERR_PTR(-EINVAL);
2812 out_err:
2813 xs_xprt_free(xprt);
2814 return ret;
2815 }
2816
2817 /**
2818 * xs_setup_bc_tcp - Set up transport to use a TCP backchannel socket
2819 * @args: rpc transport creation arguments
2820 *
2821 */
2822 static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
2823 {
2824 struct sockaddr *addr = args->dstaddr;
2825 struct rpc_xprt *xprt;
2826 struct sock_xprt *transport;
2827 struct svc_sock *bc_sock;
2828 struct rpc_xprt *ret;
2829
2830 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2831 xprt_tcp_slot_table_entries);
2832 if (IS_ERR(xprt))
2833 return xprt;
2834 transport = container_of(xprt, struct sock_xprt, xprt);
2835
2836 xprt->prot = IPPROTO_TCP;
2837 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
2838 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2839 xprt->timeout = &xs_tcp_default_timeout;
2840
2841 /* backchannel */
2842 xprt_set_bound(xprt);
2843 xprt->bind_timeout = 0;
2844 xprt->reestablish_timeout = 0;
2845 xprt->idle_timeout = 0;
2846
2847 xprt->ops = &bc_tcp_ops;
2848
2849 switch (addr->sa_family) {
2850 case AF_INET:
2851 xs_format_peer_addresses(xprt, "tcp",
2852 RPCBIND_NETID_TCP);
2853 break;
2854 case AF_INET6:
2855 xs_format_peer_addresses(xprt, "tcp",
2856 RPCBIND_NETID_TCP6);
2857 break;
2858 default:
2859 ret = ERR_PTR(-EAFNOSUPPORT);
2860 goto out_err;
2861 }
2862
2863 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
2864 xprt->address_strings[RPC_DISPLAY_ADDR],
2865 xprt->address_strings[RPC_DISPLAY_PORT],
2866 xprt->address_strings[RPC_DISPLAY_PROTO]);
2867
2868 /*
2869 * Once we've associated a backchannel xprt with a connection,
2870 * we want to keep it around as long as the connection lasts,
2871 * in case we need to start using it for a backchannel again;
2872 * this reference won't be dropped until bc_xprt is destroyed.
2873 */
2874 xprt_get(xprt);
2875 args->bc_xprt->xpt_bc_xprt = xprt;
2876 xprt->bc_xprt = args->bc_xprt;
2877 bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt);
2878 transport->sock = bc_sock->sk_sock;
2879 transport->inet = bc_sock->sk_sk;
2880
2881 /*
2882 * Since we don't want connections for the backchannel, we set
2883 * the xprt status to connected
2884 */
2885 xprt_set_connected(xprt);
2886
2887 if (try_module_get(THIS_MODULE))
2888 return xprt;
2889
2890 args->bc_xprt->xpt_bc_xprt = NULL;
2891 xprt_put(xprt);
2892 ret = ERR_PTR(-EINVAL);
2893 out_err:
2894 xs_xprt_free(xprt);
2895 return ret;
2896 }
2897
2898 static struct xprt_class xs_local_transport = {
2899 .list = LIST_HEAD_INIT(xs_local_transport.list),
2900 .name = "named UNIX socket",
2901 .owner = THIS_MODULE,
2902 .ident = XPRT_TRANSPORT_LOCAL,
2903 .setup = xs_setup_local,
2904 };
2905
2906 static struct xprt_class xs_udp_transport = {
2907 .list = LIST_HEAD_INIT(xs_udp_transport.list),
2908 .name = "udp",
2909 .owner = THIS_MODULE,
2910 .ident = XPRT_TRANSPORT_UDP,
2911 .setup = xs_setup_udp,
2912 };
2913
2914 static struct xprt_class xs_tcp_transport = {
2915 .list = LIST_HEAD_INIT(xs_tcp_transport.list),
2916 .name = "tcp",
2917 .owner = THIS_MODULE,
2918 .ident = XPRT_TRANSPORT_TCP,
2919 .setup = xs_setup_tcp,
2920 };
2921
2922 static struct xprt_class xs_bc_tcp_transport = {
2923 .list = LIST_HEAD_INIT(xs_bc_tcp_transport.list),
2924 .name = "tcp NFSv4.1 backchannel",
2925 .owner = THIS_MODULE,
2926 .ident = XPRT_TRANSPORT_BC_TCP,
2927 .setup = xs_setup_bc_tcp,
2928 };
2929
2930 /**
2931 * init_socket_xprt - set up xprtsock's sysctls, register with RPC client
2932 *
2933 */
2934 int init_socket_xprt(void)
2935 {
2936 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
2937 if (!sunrpc_table_header)
2938 sunrpc_table_header = register_sysctl_table(sunrpc_table);
2939 #endif
2940
2941 xprt_register_transport(&xs_local_transport);
2942 xprt_register_transport(&xs_udp_transport);
2943 xprt_register_transport(&xs_tcp_transport);
2944 xprt_register_transport(&xs_bc_tcp_transport);
2945
2946 return 0;
2947 }
2948
2949 /**
2950 * cleanup_socket_xprt - remove xprtsock's sysctls, unregister
2951 *
2952 */
2953 void cleanup_socket_xprt(void)
2954 {
2955 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
2956 if (sunrpc_table_header) {
2957 unregister_sysctl_table(sunrpc_table_header);
2958 sunrpc_table_header = NULL;
2959 }
2960 #endif
2961
2962 xprt_unregister_transport(&xs_local_transport);
2963 xprt_unregister_transport(&xs_udp_transport);
2964 xprt_unregister_transport(&xs_tcp_transport);
2965 xprt_unregister_transport(&xs_bc_tcp_transport);
2966 }
2967
2968 static int param_set_uint_minmax(const char *val,
2969 const struct kernel_param *kp,
2970 unsigned int min, unsigned int max)
2971 {
2972 unsigned int num;
2973 int ret;
2974
2975 if (!val)
2976 return -EINVAL;
2977 ret = kstrtouint(val, 0, &num);
2978 if (ret == -EINVAL || num < min || num > max)
2979 return -EINVAL;
2980 *((unsigned int *)kp->arg) = num;
2981 return 0;
2982 }
2983
2984 static int param_set_portnr(const char *val, const struct kernel_param *kp)
2985 {
2986 return param_set_uint_minmax(val, kp,
2987 RPC_MIN_RESVPORT,
2988 RPC_MAX_RESVPORT);
2989 }
2990
2991 static struct kernel_param_ops param_ops_portnr = {
2992 .set = param_set_portnr,
2993 .get = param_get_uint,
2994 };
2995
2996 #define param_check_portnr(name, p) \
2997 __param_check(name, p, unsigned int);
2998
2999 module_param_named(min_resvport, xprt_min_resvport, portnr, 0644);
3000 module_param_named(max_resvport, xprt_max_resvport, portnr, 0644);
3001
3002 static int param_set_slot_table_size(const char *val,
3003 const struct kernel_param *kp)
3004 {
3005 return param_set_uint_minmax(val, kp,
3006 RPC_MIN_SLOT_TABLE,
3007 RPC_MAX_SLOT_TABLE);
3008 }
3009
3010 static struct kernel_param_ops param_ops_slot_table_size = {
3011 .set = param_set_slot_table_size,
3012 .get = param_get_uint,
3013 };
3014
3015 #define param_check_slot_table_size(name, p) \
3016 __param_check(name, p, unsigned int);
3017
3018 static int param_set_max_slot_table_size(const char *val,
3019 const struct kernel_param *kp)
3020 {
3021 return param_set_uint_minmax(val, kp,
3022 RPC_MIN_SLOT_TABLE,
3023 RPC_MAX_SLOT_TABLE_LIMIT);
3024 }
3025
3026 static struct kernel_param_ops param_ops_max_slot_table_size = {
3027 .set = param_set_max_slot_table_size,
3028 .get = param_get_uint,
3029 };
3030
3031 #define param_check_max_slot_table_size(name, p) \
3032 __param_check(name, p, unsigned int);
3033
3034 module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries,
3035 slot_table_size, 0644);
3036 module_param_named(tcp_max_slot_table_entries, xprt_max_tcp_slot_table_entries,
3037 max_slot_table_size, 0644);
3038 module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries,
3039 slot_table_size, 0644);
3040
This page took 0.095447 seconds and 5 git commands to generate.