9deed17fd3e49435a69a598af756d7f8c5babfd9
[deliverable/linux.git] / net / sunrpc / xprtsock.c
1 /*
2 * linux/net/sunrpc/xprtsock.c
3 *
4 * Client-side transport implementation for sockets.
5 *
6 * TCP callback races fixes (C) 1998 Red Hat
7 * TCP send fixes (C) 1998 Red Hat
8 * TCP NFS related read + write fixes
9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
10 *
11 * Rewrite of larges part of the code in order to stabilize TCP stuff.
12 * Fix behaviour when socket buffer is full.
13 * (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no>
14 *
15 * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com>
16 *
17 * IPv6 support contributed by Gilles Quillard, Bull Open Source, 2005.
18 * <gilles.quillard@bull.net>
19 */
20
21 #include <linux/types.h>
22 #include <linux/string.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 #include <linux/capability.h>
26 #include <linux/pagemap.h>
27 #include <linux/errno.h>
28 #include <linux/socket.h>
29 #include <linux/in.h>
30 #include <linux/net.h>
31 #include <linux/mm.h>
32 #include <linux/un.h>
33 #include <linux/udp.h>
34 #include <linux/tcp.h>
35 #include <linux/sunrpc/clnt.h>
36 #include <linux/sunrpc/addr.h>
37 #include <linux/sunrpc/sched.h>
38 #include <linux/sunrpc/svcsock.h>
39 #include <linux/sunrpc/xprtsock.h>
40 #include <linux/file.h>
41 #ifdef CONFIG_SUNRPC_BACKCHANNEL
42 #include <linux/sunrpc/bc_xprt.h>
43 #endif
44
45 #include <net/sock.h>
46 #include <net/checksum.h>
47 #include <net/udp.h>
48 #include <net/tcp.h>
49
50 #include <trace/events/sunrpc.h>
51
52 #include "sunrpc.h"
53
54 static void xs_close(struct rpc_xprt *xprt);
55
56 /*
57 * xprtsock tunables
58 */
59 static unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE;
60 static unsigned int xprt_tcp_slot_table_entries = RPC_MIN_SLOT_TABLE;
61 static unsigned int xprt_max_tcp_slot_table_entries = RPC_MAX_SLOT_TABLE;
62
63 static unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT;
64 static unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT;
65
66 #define XS_TCP_LINGER_TO (15U * HZ)
67 static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO;
68
69 /*
70 * We can register our own files under /proc/sys/sunrpc by
71 * calling register_sysctl_table() again. The files in that
72 * directory become the union of all files registered there.
73 *
74 * We simply need to make sure that we don't collide with
75 * someone else's file names!
76 */
77
78 #ifdef RPC_DEBUG
79
80 static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE;
81 static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE;
82 static unsigned int max_tcp_slot_table_limit = RPC_MAX_SLOT_TABLE_LIMIT;
83 static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT;
84 static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT;
85
86 static struct ctl_table_header *sunrpc_table_header;
87
88 /*
89 * FIXME: changing the UDP slot table size should also resize the UDP
90 * socket buffers for existing UDP transports
91 */
92 static struct ctl_table xs_tunables_table[] = {
93 {
94 .procname = "udp_slot_table_entries",
95 .data = &xprt_udp_slot_table_entries,
96 .maxlen = sizeof(unsigned int),
97 .mode = 0644,
98 .proc_handler = proc_dointvec_minmax,
99 .extra1 = &min_slot_table_size,
100 .extra2 = &max_slot_table_size
101 },
102 {
103 .procname = "tcp_slot_table_entries",
104 .data = &xprt_tcp_slot_table_entries,
105 .maxlen = sizeof(unsigned int),
106 .mode = 0644,
107 .proc_handler = proc_dointvec_minmax,
108 .extra1 = &min_slot_table_size,
109 .extra2 = &max_slot_table_size
110 },
111 {
112 .procname = "tcp_max_slot_table_entries",
113 .data = &xprt_max_tcp_slot_table_entries,
114 .maxlen = sizeof(unsigned int),
115 .mode = 0644,
116 .proc_handler = proc_dointvec_minmax,
117 .extra1 = &min_slot_table_size,
118 .extra2 = &max_tcp_slot_table_limit
119 },
120 {
121 .procname = "min_resvport",
122 .data = &xprt_min_resvport,
123 .maxlen = sizeof(unsigned int),
124 .mode = 0644,
125 .proc_handler = proc_dointvec_minmax,
126 .extra1 = &xprt_min_resvport_limit,
127 .extra2 = &xprt_max_resvport_limit
128 },
129 {
130 .procname = "max_resvport",
131 .data = &xprt_max_resvport,
132 .maxlen = sizeof(unsigned int),
133 .mode = 0644,
134 .proc_handler = proc_dointvec_minmax,
135 .extra1 = &xprt_min_resvport_limit,
136 .extra2 = &xprt_max_resvport_limit
137 },
138 {
139 .procname = "tcp_fin_timeout",
140 .data = &xs_tcp_fin_timeout,
141 .maxlen = sizeof(xs_tcp_fin_timeout),
142 .mode = 0644,
143 .proc_handler = proc_dointvec_jiffies,
144 },
145 { },
146 };
147
148 static struct ctl_table sunrpc_table[] = {
149 {
150 .procname = "sunrpc",
151 .mode = 0555,
152 .child = xs_tunables_table
153 },
154 { },
155 };
156
157 #endif
158
159 /*
160 * Wait duration for a reply from the RPC portmapper.
161 */
162 #define XS_BIND_TO (60U * HZ)
163
164 /*
165 * Delay if a UDP socket connect error occurs. This is most likely some
166 * kind of resource problem on the local host.
167 */
168 #define XS_UDP_REEST_TO (2U * HZ)
169
170 /*
171 * The reestablish timeout allows clients to delay for a bit before attempting
172 * to reconnect to a server that just dropped our connection.
173 *
174 * We implement an exponential backoff when trying to reestablish a TCP
175 * transport connection with the server. Some servers like to drop a TCP
176 * connection when they are overworked, so we start with a short timeout and
177 * increase over time if the server is down or not responding.
178 */
179 #define XS_TCP_INIT_REEST_TO (3U * HZ)
180 #define XS_TCP_MAX_REEST_TO (5U * 60 * HZ)
181
182 /*
183 * TCP idle timeout; client drops the transport socket if it is idle
184 * for this long. Note that we also timeout UDP sockets to prevent
185 * holding port numbers when there is no RPC traffic.
186 */
187 #define XS_IDLE_DISC_TO (5U * 60 * HZ)
188
189 #ifdef RPC_DEBUG
190 # undef RPC_DEBUG_DATA
191 # define RPCDBG_FACILITY RPCDBG_TRANS
192 #endif
193
194 #ifdef RPC_DEBUG_DATA
195 static void xs_pktdump(char *msg, u32 *packet, unsigned int count)
196 {
197 u8 *buf = (u8 *) packet;
198 int j;
199
200 dprintk("RPC: %s\n", msg);
201 for (j = 0; j < count && j < 128; j += 4) {
202 if (!(j & 31)) {
203 if (j)
204 dprintk("\n");
205 dprintk("0x%04x ", j);
206 }
207 dprintk("%02x%02x%02x%02x ",
208 buf[j], buf[j+1], buf[j+2], buf[j+3]);
209 }
210 dprintk("\n");
211 }
212 #else
213 static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count)
214 {
215 /* NOP */
216 }
217 #endif
218
219 struct sock_xprt {
220 struct rpc_xprt xprt;
221
222 /*
223 * Network layer
224 */
225 struct socket * sock;
226 struct sock * inet;
227
228 /*
229 * State of TCP reply receive
230 */
231 __be32 tcp_fraghdr,
232 tcp_xid,
233 tcp_calldir;
234
235 u32 tcp_offset,
236 tcp_reclen;
237
238 unsigned long tcp_copied,
239 tcp_flags;
240
241 /*
242 * Connection of transports
243 */
244 struct delayed_work connect_worker;
245 struct sockaddr_storage srcaddr;
246 unsigned short srcport;
247
248 /*
249 * UDP socket buffer size parameters
250 */
251 size_t rcvsize,
252 sndsize;
253
254 /*
255 * Saved socket callback addresses
256 */
257 void (*old_data_ready)(struct sock *, int);
258 void (*old_state_change)(struct sock *);
259 void (*old_write_space)(struct sock *);
260 };
261
262 /*
263 * TCP receive state flags
264 */
265 #define TCP_RCV_LAST_FRAG (1UL << 0)
266 #define TCP_RCV_COPY_FRAGHDR (1UL << 1)
267 #define TCP_RCV_COPY_XID (1UL << 2)
268 #define TCP_RCV_COPY_DATA (1UL << 3)
269 #define TCP_RCV_READ_CALLDIR (1UL << 4)
270 #define TCP_RCV_COPY_CALLDIR (1UL << 5)
271
272 /*
273 * TCP RPC flags
274 */
275 #define TCP_RPC_REPLY (1UL << 6)
276
277 static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt)
278 {
279 return (struct sockaddr *) &xprt->addr;
280 }
281
282 static inline struct sockaddr_un *xs_addr_un(struct rpc_xprt *xprt)
283 {
284 return (struct sockaddr_un *) &xprt->addr;
285 }
286
287 static inline struct sockaddr_in *xs_addr_in(struct rpc_xprt *xprt)
288 {
289 return (struct sockaddr_in *) &xprt->addr;
290 }
291
292 static inline struct sockaddr_in6 *xs_addr_in6(struct rpc_xprt *xprt)
293 {
294 return (struct sockaddr_in6 *) &xprt->addr;
295 }
296
297 static void xs_format_common_peer_addresses(struct rpc_xprt *xprt)
298 {
299 struct sockaddr *sap = xs_addr(xprt);
300 struct sockaddr_in6 *sin6;
301 struct sockaddr_in *sin;
302 struct sockaddr_un *sun;
303 char buf[128];
304
305 switch (sap->sa_family) {
306 case AF_LOCAL:
307 sun = xs_addr_un(xprt);
308 strlcpy(buf, sun->sun_path, sizeof(buf));
309 xprt->address_strings[RPC_DISPLAY_ADDR] =
310 kstrdup(buf, GFP_KERNEL);
311 break;
312 case AF_INET:
313 (void)rpc_ntop(sap, buf, sizeof(buf));
314 xprt->address_strings[RPC_DISPLAY_ADDR] =
315 kstrdup(buf, GFP_KERNEL);
316 sin = xs_addr_in(xprt);
317 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
318 break;
319 case AF_INET6:
320 (void)rpc_ntop(sap, buf, sizeof(buf));
321 xprt->address_strings[RPC_DISPLAY_ADDR] =
322 kstrdup(buf, GFP_KERNEL);
323 sin6 = xs_addr_in6(xprt);
324 snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
325 break;
326 default:
327 BUG();
328 }
329
330 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
331 }
332
333 static void xs_format_common_peer_ports(struct rpc_xprt *xprt)
334 {
335 struct sockaddr *sap = xs_addr(xprt);
336 char buf[128];
337
338 snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
339 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
340
341 snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
342 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
343 }
344
345 static void xs_format_peer_addresses(struct rpc_xprt *xprt,
346 const char *protocol,
347 const char *netid)
348 {
349 xprt->address_strings[RPC_DISPLAY_PROTO] = protocol;
350 xprt->address_strings[RPC_DISPLAY_NETID] = netid;
351 xs_format_common_peer_addresses(xprt);
352 xs_format_common_peer_ports(xprt);
353 }
354
355 static void xs_update_peer_port(struct rpc_xprt *xprt)
356 {
357 kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]);
358 kfree(xprt->address_strings[RPC_DISPLAY_PORT]);
359
360 xs_format_common_peer_ports(xprt);
361 }
362
363 static void xs_free_peer_addresses(struct rpc_xprt *xprt)
364 {
365 unsigned int i;
366
367 for (i = 0; i < RPC_DISPLAY_MAX; i++)
368 switch (i) {
369 case RPC_DISPLAY_PROTO:
370 case RPC_DISPLAY_NETID:
371 continue;
372 default:
373 kfree(xprt->address_strings[i]);
374 }
375 }
376
377 #define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL)
378
379 static int xs_send_kvec(struct socket *sock, struct sockaddr *addr, int addrlen, struct kvec *vec, unsigned int base, int more)
380 {
381 struct msghdr msg = {
382 .msg_name = addr,
383 .msg_namelen = addrlen,
384 .msg_flags = XS_SENDMSG_FLAGS | (more ? MSG_MORE : 0),
385 };
386 struct kvec iov = {
387 .iov_base = vec->iov_base + base,
388 .iov_len = vec->iov_len - base,
389 };
390
391 if (iov.iov_len != 0)
392 return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
393 return kernel_sendmsg(sock, &msg, NULL, 0, 0);
394 }
395
396 static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more)
397 {
398 struct page **ppage;
399 unsigned int remainder;
400 int err, sent = 0;
401
402 remainder = xdr->page_len - base;
403 base += xdr->page_base;
404 ppage = xdr->pages + (base >> PAGE_SHIFT);
405 base &= ~PAGE_MASK;
406 for(;;) {
407 unsigned int len = min_t(unsigned int, PAGE_SIZE - base, remainder);
408 int flags = XS_SENDMSG_FLAGS;
409
410 remainder -= len;
411 if (remainder != 0 || more)
412 flags |= MSG_MORE;
413 err = sock->ops->sendpage(sock, *ppage, base, len, flags);
414 if (remainder == 0 || err != len)
415 break;
416 sent += err;
417 ppage++;
418 base = 0;
419 }
420 if (sent == 0)
421 return err;
422 if (err > 0)
423 sent += err;
424 return sent;
425 }
426
427 /**
428 * xs_sendpages - write pages directly to a socket
429 * @sock: socket to send on
430 * @addr: UDP only -- address of destination
431 * @addrlen: UDP only -- length of destination address
432 * @xdr: buffer containing this request
433 * @base: starting position in the buffer
434 *
435 */
436 static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base)
437 {
438 unsigned int remainder = xdr->len - base;
439 int err, sent = 0;
440
441 if (unlikely(!sock))
442 return -ENOTSOCK;
443
444 clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
445 if (base != 0) {
446 addr = NULL;
447 addrlen = 0;
448 }
449
450 if (base < xdr->head[0].iov_len || addr != NULL) {
451 unsigned int len = xdr->head[0].iov_len - base;
452 remainder -= len;
453 err = xs_send_kvec(sock, addr, addrlen, &xdr->head[0], base, remainder != 0);
454 if (remainder == 0 || err != len)
455 goto out;
456 sent += err;
457 base = 0;
458 } else
459 base -= xdr->head[0].iov_len;
460
461 if (base < xdr->page_len) {
462 unsigned int len = xdr->page_len - base;
463 remainder -= len;
464 err = xs_send_pagedata(sock, xdr, base, remainder != 0);
465 if (remainder == 0 || err != len)
466 goto out;
467 sent += err;
468 base = 0;
469 } else
470 base -= xdr->page_len;
471
472 if (base >= xdr->tail[0].iov_len)
473 return sent;
474 err = xs_send_kvec(sock, NULL, 0, &xdr->tail[0], base, 0);
475 out:
476 if (sent == 0)
477 return err;
478 if (err > 0)
479 sent += err;
480 return sent;
481 }
482
483 static void xs_nospace_callback(struct rpc_task *task)
484 {
485 struct sock_xprt *transport = container_of(task->tk_rqstp->rq_xprt, struct sock_xprt, xprt);
486
487 transport->inet->sk_write_pending--;
488 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
489 }
490
491 /**
492 * xs_nospace - place task on wait queue if transmit was incomplete
493 * @task: task to put to sleep
494 *
495 */
496 static int xs_nospace(struct rpc_task *task)
497 {
498 struct rpc_rqst *req = task->tk_rqstp;
499 struct rpc_xprt *xprt = req->rq_xprt;
500 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
501 int ret = -EAGAIN;
502
503 dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
504 task->tk_pid, req->rq_slen - req->rq_bytes_sent,
505 req->rq_slen);
506
507 /* Protect against races with write_space */
508 spin_lock_bh(&xprt->transport_lock);
509
510 /* Don't race with disconnect */
511 if (xprt_connected(xprt)) {
512 if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) {
513 /*
514 * Notify TCP that we're limited by the application
515 * window size
516 */
517 set_bit(SOCK_NOSPACE, &transport->sock->flags);
518 transport->inet->sk_write_pending++;
519 /* ...and wait for more buffer space */
520 xprt_wait_for_buffer_space(task, xs_nospace_callback);
521 }
522 } else {
523 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
524 ret = -ENOTCONN;
525 }
526
527 spin_unlock_bh(&xprt->transport_lock);
528 return ret;
529 }
530
531 /*
532 * Construct a stream transport record marker in @buf.
533 */
534 static inline void xs_encode_stream_record_marker(struct xdr_buf *buf)
535 {
536 u32 reclen = buf->len - sizeof(rpc_fraghdr);
537 rpc_fraghdr *base = buf->head[0].iov_base;
538 *base = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | reclen);
539 }
540
541 /**
542 * xs_local_send_request - write an RPC request to an AF_LOCAL socket
543 * @task: RPC task that manages the state of an RPC request
544 *
545 * Return values:
546 * 0: The request has been sent
547 * EAGAIN: The socket was blocked, please call again later to
548 * complete the request
549 * ENOTCONN: Caller needs to invoke connect logic then call again
550 * other: Some other error occured, the request was not sent
551 */
552 static int xs_local_send_request(struct rpc_task *task)
553 {
554 struct rpc_rqst *req = task->tk_rqstp;
555 struct rpc_xprt *xprt = req->rq_xprt;
556 struct sock_xprt *transport =
557 container_of(xprt, struct sock_xprt, xprt);
558 struct xdr_buf *xdr = &req->rq_snd_buf;
559 int status;
560
561 xs_encode_stream_record_marker(&req->rq_snd_buf);
562
563 xs_pktdump("packet data:",
564 req->rq_svec->iov_base, req->rq_svec->iov_len);
565
566 status = xs_sendpages(transport->sock, NULL, 0,
567 xdr, req->rq_bytes_sent);
568 dprintk("RPC: %s(%u) = %d\n",
569 __func__, xdr->len - req->rq_bytes_sent, status);
570 if (likely(status >= 0)) {
571 req->rq_bytes_sent += status;
572 req->rq_xmit_bytes_sent += status;
573 if (likely(req->rq_bytes_sent >= req->rq_slen)) {
574 req->rq_bytes_sent = 0;
575 return 0;
576 }
577 status = -EAGAIN;
578 }
579
580 switch (status) {
581 case -EAGAIN:
582 status = xs_nospace(task);
583 break;
584 default:
585 dprintk("RPC: sendmsg returned unrecognized error %d\n",
586 -status);
587 case -EPIPE:
588 xs_close(xprt);
589 status = -ENOTCONN;
590 }
591
592 return status;
593 }
594
595 /**
596 * xs_udp_send_request - write an RPC request to a UDP socket
597 * @task: address of RPC task that manages the state of an RPC request
598 *
599 * Return values:
600 * 0: The request has been sent
601 * EAGAIN: The socket was blocked, please call again later to
602 * complete the request
603 * ENOTCONN: Caller needs to invoke connect logic then call again
604 * other: Some other error occurred, the request was not sent
605 */
606 static int xs_udp_send_request(struct rpc_task *task)
607 {
608 struct rpc_rqst *req = task->tk_rqstp;
609 struct rpc_xprt *xprt = req->rq_xprt;
610 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
611 struct xdr_buf *xdr = &req->rq_snd_buf;
612 int status;
613
614 xs_pktdump("packet data:",
615 req->rq_svec->iov_base,
616 req->rq_svec->iov_len);
617
618 if (!xprt_bound(xprt))
619 return -ENOTCONN;
620 status = xs_sendpages(transport->sock,
621 xs_addr(xprt),
622 xprt->addrlen, xdr,
623 req->rq_bytes_sent);
624
625 dprintk("RPC: xs_udp_send_request(%u) = %d\n",
626 xdr->len - req->rq_bytes_sent, status);
627
628 if (status >= 0) {
629 req->rq_xmit_bytes_sent += status;
630 if (status >= req->rq_slen)
631 return 0;
632 /* Still some bytes left; set up for a retry later. */
633 status = -EAGAIN;
634 }
635
636 switch (status) {
637 case -ENOTSOCK:
638 status = -ENOTCONN;
639 /* Should we call xs_close() here? */
640 break;
641 case -EAGAIN:
642 status = xs_nospace(task);
643 break;
644 default:
645 dprintk("RPC: sendmsg returned unrecognized error %d\n",
646 -status);
647 case -ENETUNREACH:
648 case -EPIPE:
649 case -ECONNREFUSED:
650 /* When the server has died, an ICMP port unreachable message
651 * prompts ECONNREFUSED. */
652 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
653 }
654
655 return status;
656 }
657
658 /**
659 * xs_tcp_shutdown - gracefully shut down a TCP socket
660 * @xprt: transport
661 *
662 * Initiates a graceful shutdown of the TCP socket by calling the
663 * equivalent of shutdown(SHUT_WR);
664 */
665 static void xs_tcp_shutdown(struct rpc_xprt *xprt)
666 {
667 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
668 struct socket *sock = transport->sock;
669
670 if (sock != NULL) {
671 kernel_sock_shutdown(sock, SHUT_WR);
672 trace_rpc_socket_shutdown(xprt, sock);
673 }
674 }
675
676 /**
677 * xs_tcp_send_request - write an RPC request to a TCP socket
678 * @task: address of RPC task that manages the state of an RPC request
679 *
680 * Return values:
681 * 0: The request has been sent
682 * EAGAIN: The socket was blocked, please call again later to
683 * complete the request
684 * ENOTCONN: Caller needs to invoke connect logic then call again
685 * other: Some other error occurred, the request was not sent
686 *
687 * XXX: In the case of soft timeouts, should we eventually give up
688 * if sendmsg is not able to make progress?
689 */
690 static int xs_tcp_send_request(struct rpc_task *task)
691 {
692 struct rpc_rqst *req = task->tk_rqstp;
693 struct rpc_xprt *xprt = req->rq_xprt;
694 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
695 struct xdr_buf *xdr = &req->rq_snd_buf;
696 int status;
697
698 xs_encode_stream_record_marker(&req->rq_snd_buf);
699
700 xs_pktdump("packet data:",
701 req->rq_svec->iov_base,
702 req->rq_svec->iov_len);
703
704 /* Continue transmitting the packet/record. We must be careful
705 * to cope with writespace callbacks arriving _after_ we have
706 * called sendmsg(). */
707 while (1) {
708 status = xs_sendpages(transport->sock,
709 NULL, 0, xdr, req->rq_bytes_sent);
710
711 dprintk("RPC: xs_tcp_send_request(%u) = %d\n",
712 xdr->len - req->rq_bytes_sent, status);
713
714 if (unlikely(status < 0))
715 break;
716
717 /* If we've sent the entire packet, immediately
718 * reset the count of bytes sent. */
719 req->rq_bytes_sent += status;
720 req->rq_xmit_bytes_sent += status;
721 if (likely(req->rq_bytes_sent >= req->rq_slen)) {
722 req->rq_bytes_sent = 0;
723 return 0;
724 }
725
726 if (status != 0)
727 continue;
728 status = -EAGAIN;
729 break;
730 }
731
732 switch (status) {
733 case -ENOTSOCK:
734 status = -ENOTCONN;
735 /* Should we call xs_close() here? */
736 break;
737 case -EAGAIN:
738 status = xs_nospace(task);
739 break;
740 default:
741 dprintk("RPC: sendmsg returned unrecognized error %d\n",
742 -status);
743 case -ECONNRESET:
744 xs_tcp_shutdown(xprt);
745 case -ECONNREFUSED:
746 case -ENOTCONN:
747 case -EPIPE:
748 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
749 }
750
751 return status;
752 }
753
754 /**
755 * xs_tcp_release_xprt - clean up after a tcp transmission
756 * @xprt: transport
757 * @task: rpc task
758 *
759 * This cleans up if an error causes us to abort the transmission of a request.
760 * In this case, the socket may need to be reset in order to avoid confusing
761 * the server.
762 */
763 static void xs_tcp_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
764 {
765 struct rpc_rqst *req;
766
767 if (task != xprt->snd_task)
768 return;
769 if (task == NULL)
770 goto out_release;
771 req = task->tk_rqstp;
772 if (req == NULL)
773 goto out_release;
774 if (req->rq_bytes_sent == 0)
775 goto out_release;
776 if (req->rq_bytes_sent == req->rq_snd_buf.len)
777 goto out_release;
778 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
779 out_release:
780 xprt_release_xprt(xprt, task);
781 }
782
783 static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk)
784 {
785 transport->old_data_ready = sk->sk_data_ready;
786 transport->old_state_change = sk->sk_state_change;
787 transport->old_write_space = sk->sk_write_space;
788 }
789
790 static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk)
791 {
792 sk->sk_data_ready = transport->old_data_ready;
793 sk->sk_state_change = transport->old_state_change;
794 sk->sk_write_space = transport->old_write_space;
795 }
796
797 static void xs_reset_transport(struct sock_xprt *transport)
798 {
799 struct socket *sock = transport->sock;
800 struct sock *sk = transport->inet;
801
802 if (sk == NULL)
803 return;
804
805 transport->srcport = 0;
806
807 write_lock_bh(&sk->sk_callback_lock);
808 transport->inet = NULL;
809 transport->sock = NULL;
810
811 sk->sk_user_data = NULL;
812
813 xs_restore_old_callbacks(transport, sk);
814 write_unlock_bh(&sk->sk_callback_lock);
815
816 sk->sk_no_check = 0;
817
818 trace_rpc_socket_close(&transport->xprt, sock);
819 sock_release(sock);
820 }
821
822 /**
823 * xs_close - close a socket
824 * @xprt: transport
825 *
826 * This is used when all requests are complete; ie, no DRC state remains
827 * on the server we want to save.
828 *
829 * The caller _must_ be holding XPRT_LOCKED in order to avoid issues with
830 * xs_reset_transport() zeroing the socket from underneath a writer.
831 */
832 static void xs_close(struct rpc_xprt *xprt)
833 {
834 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
835
836 dprintk("RPC: xs_close xprt %p\n", xprt);
837
838 xs_reset_transport(transport);
839 xprt->reestablish_timeout = 0;
840
841 smp_mb__before_clear_bit();
842 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
843 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
844 clear_bit(XPRT_CLOSING, &xprt->state);
845 smp_mb__after_clear_bit();
846 xprt_disconnect_done(xprt);
847 }
848
849 static void xs_tcp_close(struct rpc_xprt *xprt)
850 {
851 if (test_and_clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state))
852 xs_close(xprt);
853 else
854 xs_tcp_shutdown(xprt);
855 }
856
857 static void xs_local_destroy(struct rpc_xprt *xprt)
858 {
859 xs_close(xprt);
860 xs_free_peer_addresses(xprt);
861 xprt_free(xprt);
862 module_put(THIS_MODULE);
863 }
864
865 /**
866 * xs_destroy - prepare to shutdown a transport
867 * @xprt: doomed transport
868 *
869 */
870 static void xs_destroy(struct rpc_xprt *xprt)
871 {
872 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
873
874 dprintk("RPC: xs_destroy xprt %p\n", xprt);
875
876 cancel_delayed_work_sync(&transport->connect_worker);
877
878 xs_local_destroy(xprt);
879 }
880
881 static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
882 {
883 return (struct rpc_xprt *) sk->sk_user_data;
884 }
885
886 static int xs_local_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
887 {
888 struct xdr_skb_reader desc = {
889 .skb = skb,
890 .offset = sizeof(rpc_fraghdr),
891 .count = skb->len - sizeof(rpc_fraghdr),
892 };
893
894 if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0)
895 return -1;
896 if (desc.count)
897 return -1;
898 return 0;
899 }
900
901 /**
902 * xs_local_data_ready - "data ready" callback for AF_LOCAL sockets
903 * @sk: socket with data to read
904 * @len: how much data to read
905 *
906 * Currently this assumes we can read the whole reply in a single gulp.
907 */
908 static void xs_local_data_ready(struct sock *sk, int len)
909 {
910 struct rpc_task *task;
911 struct rpc_xprt *xprt;
912 struct rpc_rqst *rovr;
913 struct sk_buff *skb;
914 int err, repsize, copied;
915 u32 _xid;
916 __be32 *xp;
917
918 read_lock_bh(&sk->sk_callback_lock);
919 dprintk("RPC: %s...\n", __func__);
920 xprt = xprt_from_sock(sk);
921 if (xprt == NULL)
922 goto out;
923
924 skb = skb_recv_datagram(sk, 0, 1, &err);
925 if (skb == NULL)
926 goto out;
927
928 repsize = skb->len - sizeof(rpc_fraghdr);
929 if (repsize < 4) {
930 dprintk("RPC: impossible RPC reply size %d\n", repsize);
931 goto dropit;
932 }
933
934 /* Copy the XID from the skb... */
935 xp = skb_header_pointer(skb, sizeof(rpc_fraghdr), sizeof(_xid), &_xid);
936 if (xp == NULL)
937 goto dropit;
938
939 /* Look up and lock the request corresponding to the given XID */
940 spin_lock(&xprt->transport_lock);
941 rovr = xprt_lookup_rqst(xprt, *xp);
942 if (!rovr)
943 goto out_unlock;
944 task = rovr->rq_task;
945
946 copied = rovr->rq_private_buf.buflen;
947 if (copied > repsize)
948 copied = repsize;
949
950 if (xs_local_copy_to_xdr(&rovr->rq_private_buf, skb)) {
951 dprintk("RPC: sk_buff copy failed\n");
952 goto out_unlock;
953 }
954
955 xprt_complete_rqst(task, copied);
956
957 out_unlock:
958 spin_unlock(&xprt->transport_lock);
959 dropit:
960 skb_free_datagram(sk, skb);
961 out:
962 read_unlock_bh(&sk->sk_callback_lock);
963 }
964
965 /**
966 * xs_udp_data_ready - "data ready" callback for UDP sockets
967 * @sk: socket with data to read
968 * @len: how much data to read
969 *
970 */
971 static void xs_udp_data_ready(struct sock *sk, int len)
972 {
973 struct rpc_task *task;
974 struct rpc_xprt *xprt;
975 struct rpc_rqst *rovr;
976 struct sk_buff *skb;
977 int err, repsize, copied;
978 u32 _xid;
979 __be32 *xp;
980
981 read_lock_bh(&sk->sk_callback_lock);
982 dprintk("RPC: xs_udp_data_ready...\n");
983 if (!(xprt = xprt_from_sock(sk)))
984 goto out;
985
986 if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL)
987 goto out;
988
989 repsize = skb->len - sizeof(struct udphdr);
990 if (repsize < 4) {
991 dprintk("RPC: impossible RPC reply size %d!\n", repsize);
992 goto dropit;
993 }
994
995 /* Copy the XID from the skb... */
996 xp = skb_header_pointer(skb, sizeof(struct udphdr),
997 sizeof(_xid), &_xid);
998 if (xp == NULL)
999 goto dropit;
1000
1001 /* Look up and lock the request corresponding to the given XID */
1002 spin_lock(&xprt->transport_lock);
1003 rovr = xprt_lookup_rqst(xprt, *xp);
1004 if (!rovr)
1005 goto out_unlock;
1006 task = rovr->rq_task;
1007
1008 if ((copied = rovr->rq_private_buf.buflen) > repsize)
1009 copied = repsize;
1010
1011 /* Suck it into the iovec, verify checksum if not done by hw. */
1012 if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) {
1013 UDPX_INC_STATS_BH(sk, UDP_MIB_INERRORS);
1014 goto out_unlock;
1015 }
1016
1017 UDPX_INC_STATS_BH(sk, UDP_MIB_INDATAGRAMS);
1018
1019 xprt_adjust_cwnd(xprt, task, copied);
1020 xprt_complete_rqst(task, copied);
1021
1022 out_unlock:
1023 spin_unlock(&xprt->transport_lock);
1024 dropit:
1025 skb_free_datagram(sk, skb);
1026 out:
1027 read_unlock_bh(&sk->sk_callback_lock);
1028 }
1029
1030 /*
1031 * Helper function to force a TCP close if the server is sending
1032 * junk and/or it has put us in CLOSE_WAIT
1033 */
1034 static void xs_tcp_force_close(struct rpc_xprt *xprt)
1035 {
1036 set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
1037 xprt_force_disconnect(xprt);
1038 }
1039
1040 static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
1041 {
1042 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1043 size_t len, used;
1044 char *p;
1045
1046 p = ((char *) &transport->tcp_fraghdr) + transport->tcp_offset;
1047 len = sizeof(transport->tcp_fraghdr) - transport->tcp_offset;
1048 used = xdr_skb_read_bits(desc, p, len);
1049 transport->tcp_offset += used;
1050 if (used != len)
1051 return;
1052
1053 transport->tcp_reclen = ntohl(transport->tcp_fraghdr);
1054 if (transport->tcp_reclen & RPC_LAST_STREAM_FRAGMENT)
1055 transport->tcp_flags |= TCP_RCV_LAST_FRAG;
1056 else
1057 transport->tcp_flags &= ~TCP_RCV_LAST_FRAG;
1058 transport->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK;
1059
1060 transport->tcp_flags &= ~TCP_RCV_COPY_FRAGHDR;
1061 transport->tcp_offset = 0;
1062
1063 /* Sanity check of the record length */
1064 if (unlikely(transport->tcp_reclen < 8)) {
1065 dprintk("RPC: invalid TCP record fragment length\n");
1066 xs_tcp_force_close(xprt);
1067 return;
1068 }
1069 dprintk("RPC: reading TCP record fragment of length %d\n",
1070 transport->tcp_reclen);
1071 }
1072
1073 static void xs_tcp_check_fraghdr(struct sock_xprt *transport)
1074 {
1075 if (transport->tcp_offset == transport->tcp_reclen) {
1076 transport->tcp_flags |= TCP_RCV_COPY_FRAGHDR;
1077 transport->tcp_offset = 0;
1078 if (transport->tcp_flags & TCP_RCV_LAST_FRAG) {
1079 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1080 transport->tcp_flags |= TCP_RCV_COPY_XID;
1081 transport->tcp_copied = 0;
1082 }
1083 }
1084 }
1085
1086 static inline void xs_tcp_read_xid(struct sock_xprt *transport, struct xdr_skb_reader *desc)
1087 {
1088 size_t len, used;
1089 char *p;
1090
1091 len = sizeof(transport->tcp_xid) - transport->tcp_offset;
1092 dprintk("RPC: reading XID (%Zu bytes)\n", len);
1093 p = ((char *) &transport->tcp_xid) + transport->tcp_offset;
1094 used = xdr_skb_read_bits(desc, p, len);
1095 transport->tcp_offset += used;
1096 if (used != len)
1097 return;
1098 transport->tcp_flags &= ~TCP_RCV_COPY_XID;
1099 transport->tcp_flags |= TCP_RCV_READ_CALLDIR;
1100 transport->tcp_copied = 4;
1101 dprintk("RPC: reading %s XID %08x\n",
1102 (transport->tcp_flags & TCP_RPC_REPLY) ? "reply for"
1103 : "request with",
1104 ntohl(transport->tcp_xid));
1105 xs_tcp_check_fraghdr(transport);
1106 }
1107
1108 static inline void xs_tcp_read_calldir(struct sock_xprt *transport,
1109 struct xdr_skb_reader *desc)
1110 {
1111 size_t len, used;
1112 u32 offset;
1113 char *p;
1114
1115 /*
1116 * We want transport->tcp_offset to be 8 at the end of this routine
1117 * (4 bytes for the xid and 4 bytes for the call/reply flag).
1118 * When this function is called for the first time,
1119 * transport->tcp_offset is 4 (after having already read the xid).
1120 */
1121 offset = transport->tcp_offset - sizeof(transport->tcp_xid);
1122 len = sizeof(transport->tcp_calldir) - offset;
1123 dprintk("RPC: reading CALL/REPLY flag (%Zu bytes)\n", len);
1124 p = ((char *) &transport->tcp_calldir) + offset;
1125 used = xdr_skb_read_bits(desc, p, len);
1126 transport->tcp_offset += used;
1127 if (used != len)
1128 return;
1129 transport->tcp_flags &= ~TCP_RCV_READ_CALLDIR;
1130 /*
1131 * We don't yet have the XDR buffer, so we will write the calldir
1132 * out after we get the buffer from the 'struct rpc_rqst'
1133 */
1134 switch (ntohl(transport->tcp_calldir)) {
1135 case RPC_REPLY:
1136 transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
1137 transport->tcp_flags |= TCP_RCV_COPY_DATA;
1138 transport->tcp_flags |= TCP_RPC_REPLY;
1139 break;
1140 case RPC_CALL:
1141 transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
1142 transport->tcp_flags |= TCP_RCV_COPY_DATA;
1143 transport->tcp_flags &= ~TCP_RPC_REPLY;
1144 break;
1145 default:
1146 dprintk("RPC: invalid request message type\n");
1147 xs_tcp_force_close(&transport->xprt);
1148 }
1149 xs_tcp_check_fraghdr(transport);
1150 }
1151
1152 static inline void xs_tcp_read_common(struct rpc_xprt *xprt,
1153 struct xdr_skb_reader *desc,
1154 struct rpc_rqst *req)
1155 {
1156 struct sock_xprt *transport =
1157 container_of(xprt, struct sock_xprt, xprt);
1158 struct xdr_buf *rcvbuf;
1159 size_t len;
1160 ssize_t r;
1161
1162 rcvbuf = &req->rq_private_buf;
1163
1164 if (transport->tcp_flags & TCP_RCV_COPY_CALLDIR) {
1165 /*
1166 * Save the RPC direction in the XDR buffer
1167 */
1168 memcpy(rcvbuf->head[0].iov_base + transport->tcp_copied,
1169 &transport->tcp_calldir,
1170 sizeof(transport->tcp_calldir));
1171 transport->tcp_copied += sizeof(transport->tcp_calldir);
1172 transport->tcp_flags &= ~TCP_RCV_COPY_CALLDIR;
1173 }
1174
1175 len = desc->count;
1176 if (len > transport->tcp_reclen - transport->tcp_offset) {
1177 struct xdr_skb_reader my_desc;
1178
1179 len = transport->tcp_reclen - transport->tcp_offset;
1180 memcpy(&my_desc, desc, sizeof(my_desc));
1181 my_desc.count = len;
1182 r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied,
1183 &my_desc, xdr_skb_read_bits);
1184 desc->count -= r;
1185 desc->offset += r;
1186 } else
1187 r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied,
1188 desc, xdr_skb_read_bits);
1189
1190 if (r > 0) {
1191 transport->tcp_copied += r;
1192 transport->tcp_offset += r;
1193 }
1194 if (r != len) {
1195 /* Error when copying to the receive buffer,
1196 * usually because we weren't able to allocate
1197 * additional buffer pages. All we can do now
1198 * is turn off TCP_RCV_COPY_DATA, so the request
1199 * will not receive any additional updates,
1200 * and time out.
1201 * Any remaining data from this record will
1202 * be discarded.
1203 */
1204 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1205 dprintk("RPC: XID %08x truncated request\n",
1206 ntohl(transport->tcp_xid));
1207 dprintk("RPC: xprt = %p, tcp_copied = %lu, "
1208 "tcp_offset = %u, tcp_reclen = %u\n",
1209 xprt, transport->tcp_copied,
1210 transport->tcp_offset, transport->tcp_reclen);
1211 return;
1212 }
1213
1214 dprintk("RPC: XID %08x read %Zd bytes\n",
1215 ntohl(transport->tcp_xid), r);
1216 dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, "
1217 "tcp_reclen = %u\n", xprt, transport->tcp_copied,
1218 transport->tcp_offset, transport->tcp_reclen);
1219
1220 if (transport->tcp_copied == req->rq_private_buf.buflen)
1221 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1222 else if (transport->tcp_offset == transport->tcp_reclen) {
1223 if (transport->tcp_flags & TCP_RCV_LAST_FRAG)
1224 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1225 }
1226 }
1227
1228 /*
1229 * Finds the request corresponding to the RPC xid and invokes the common
1230 * tcp read code to read the data.
1231 */
1232 static inline int xs_tcp_read_reply(struct rpc_xprt *xprt,
1233 struct xdr_skb_reader *desc)
1234 {
1235 struct sock_xprt *transport =
1236 container_of(xprt, struct sock_xprt, xprt);
1237 struct rpc_rqst *req;
1238
1239 dprintk("RPC: read reply XID %08x\n", ntohl(transport->tcp_xid));
1240
1241 /* Find and lock the request corresponding to this xid */
1242 spin_lock(&xprt->transport_lock);
1243 req = xprt_lookup_rqst(xprt, transport->tcp_xid);
1244 if (!req) {
1245 dprintk("RPC: XID %08x request not found!\n",
1246 ntohl(transport->tcp_xid));
1247 spin_unlock(&xprt->transport_lock);
1248 return -1;
1249 }
1250
1251 xs_tcp_read_common(xprt, desc, req);
1252
1253 if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
1254 xprt_complete_rqst(req->rq_task, transport->tcp_copied);
1255
1256 spin_unlock(&xprt->transport_lock);
1257 return 0;
1258 }
1259
1260 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1261 /*
1262 * Obtains an rpc_rqst previously allocated and invokes the common
1263 * tcp read code to read the data. The result is placed in the callback
1264 * queue.
1265 * If we're unable to obtain the rpc_rqst we schedule the closing of the
1266 * connection and return -1.
1267 */
1268 static inline int xs_tcp_read_callback(struct rpc_xprt *xprt,
1269 struct xdr_skb_reader *desc)
1270 {
1271 struct sock_xprt *transport =
1272 container_of(xprt, struct sock_xprt, xprt);
1273 struct rpc_rqst *req;
1274
1275 req = xprt_alloc_bc_request(xprt);
1276 if (req == NULL) {
1277 printk(KERN_WARNING "Callback slot table overflowed\n");
1278 xprt_force_disconnect(xprt);
1279 return -1;
1280 }
1281
1282 req->rq_xid = transport->tcp_xid;
1283 dprintk("RPC: read callback XID %08x\n", ntohl(req->rq_xid));
1284 xs_tcp_read_common(xprt, desc, req);
1285
1286 if (!(transport->tcp_flags & TCP_RCV_COPY_DATA)) {
1287 struct svc_serv *bc_serv = xprt->bc_serv;
1288
1289 /*
1290 * Add callback request to callback list. The callback
1291 * service sleeps on the sv_cb_waitq waiting for new
1292 * requests. Wake it up after adding enqueing the
1293 * request.
1294 */
1295 dprintk("RPC: add callback request to list\n");
1296 spin_lock(&bc_serv->sv_cb_lock);
1297 list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
1298 spin_unlock(&bc_serv->sv_cb_lock);
1299 wake_up(&bc_serv->sv_cb_waitq);
1300 }
1301
1302 req->rq_private_buf.len = transport->tcp_copied;
1303
1304 return 0;
1305 }
1306
1307 static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
1308 struct xdr_skb_reader *desc)
1309 {
1310 struct sock_xprt *transport =
1311 container_of(xprt, struct sock_xprt, xprt);
1312
1313 return (transport->tcp_flags & TCP_RPC_REPLY) ?
1314 xs_tcp_read_reply(xprt, desc) :
1315 xs_tcp_read_callback(xprt, desc);
1316 }
1317 #else
1318 static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
1319 struct xdr_skb_reader *desc)
1320 {
1321 return xs_tcp_read_reply(xprt, desc);
1322 }
1323 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1324
1325 /*
1326 * Read data off the transport. This can be either an RPC_CALL or an
1327 * RPC_REPLY. Relay the processing to helper functions.
1328 */
1329 static void xs_tcp_read_data(struct rpc_xprt *xprt,
1330 struct xdr_skb_reader *desc)
1331 {
1332 struct sock_xprt *transport =
1333 container_of(xprt, struct sock_xprt, xprt);
1334
1335 if (_xs_tcp_read_data(xprt, desc) == 0)
1336 xs_tcp_check_fraghdr(transport);
1337 else {
1338 /*
1339 * The transport_lock protects the request handling.
1340 * There's no need to hold it to update the tcp_flags.
1341 */
1342 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1343 }
1344 }
1345
1346 static inline void xs_tcp_read_discard(struct sock_xprt *transport, struct xdr_skb_reader *desc)
1347 {
1348 size_t len;
1349
1350 len = transport->tcp_reclen - transport->tcp_offset;
1351 if (len > desc->count)
1352 len = desc->count;
1353 desc->count -= len;
1354 desc->offset += len;
1355 transport->tcp_offset += len;
1356 dprintk("RPC: discarded %Zu bytes\n", len);
1357 xs_tcp_check_fraghdr(transport);
1358 }
1359
1360 static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len)
1361 {
1362 struct rpc_xprt *xprt = rd_desc->arg.data;
1363 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1364 struct xdr_skb_reader desc = {
1365 .skb = skb,
1366 .offset = offset,
1367 .count = len,
1368 };
1369
1370 dprintk("RPC: xs_tcp_data_recv started\n");
1371 do {
1372 /* Read in a new fragment marker if necessary */
1373 /* Can we ever really expect to get completely empty fragments? */
1374 if (transport->tcp_flags & TCP_RCV_COPY_FRAGHDR) {
1375 xs_tcp_read_fraghdr(xprt, &desc);
1376 continue;
1377 }
1378 /* Read in the xid if necessary */
1379 if (transport->tcp_flags & TCP_RCV_COPY_XID) {
1380 xs_tcp_read_xid(transport, &desc);
1381 continue;
1382 }
1383 /* Read in the call/reply flag */
1384 if (transport->tcp_flags & TCP_RCV_READ_CALLDIR) {
1385 xs_tcp_read_calldir(transport, &desc);
1386 continue;
1387 }
1388 /* Read in the request data */
1389 if (transport->tcp_flags & TCP_RCV_COPY_DATA) {
1390 xs_tcp_read_data(xprt, &desc);
1391 continue;
1392 }
1393 /* Skip over any trailing bytes on short reads */
1394 xs_tcp_read_discard(transport, &desc);
1395 } while (desc.count);
1396 dprintk("RPC: xs_tcp_data_recv done\n");
1397 return len - desc.count;
1398 }
1399
1400 /**
1401 * xs_tcp_data_ready - "data ready" callback for TCP sockets
1402 * @sk: socket with data to read
1403 * @bytes: how much data to read
1404 *
1405 */
1406 static void xs_tcp_data_ready(struct sock *sk, int bytes)
1407 {
1408 struct rpc_xprt *xprt;
1409 read_descriptor_t rd_desc;
1410 int read;
1411
1412 dprintk("RPC: xs_tcp_data_ready...\n");
1413
1414 read_lock_bh(&sk->sk_callback_lock);
1415 if (!(xprt = xprt_from_sock(sk)))
1416 goto out;
1417 /* Any data means we had a useful conversation, so
1418 * the we don't need to delay the next reconnect
1419 */
1420 if (xprt->reestablish_timeout)
1421 xprt->reestablish_timeout = 0;
1422
1423 /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */
1424 rd_desc.arg.data = xprt;
1425 do {
1426 rd_desc.count = 65536;
1427 read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
1428 } while (read > 0);
1429 out:
1430 read_unlock_bh(&sk->sk_callback_lock);
1431 }
1432
1433 /*
1434 * Do the equivalent of linger/linger2 handling for dealing with
1435 * broken servers that don't close the socket in a timely
1436 * fashion
1437 */
1438 static void xs_tcp_schedule_linger_timeout(struct rpc_xprt *xprt,
1439 unsigned long timeout)
1440 {
1441 struct sock_xprt *transport;
1442
1443 if (xprt_test_and_set_connecting(xprt))
1444 return;
1445 set_bit(XPRT_CONNECTION_ABORT, &xprt->state);
1446 transport = container_of(xprt, struct sock_xprt, xprt);
1447 queue_delayed_work(rpciod_workqueue, &transport->connect_worker,
1448 timeout);
1449 }
1450
1451 static void xs_tcp_cancel_linger_timeout(struct rpc_xprt *xprt)
1452 {
1453 struct sock_xprt *transport;
1454
1455 transport = container_of(xprt, struct sock_xprt, xprt);
1456
1457 if (!test_bit(XPRT_CONNECTION_ABORT, &xprt->state) ||
1458 !cancel_delayed_work(&transport->connect_worker))
1459 return;
1460 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
1461 xprt_clear_connecting(xprt);
1462 }
1463
1464 static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
1465 {
1466 smp_mb__before_clear_bit();
1467 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
1468 clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
1469 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
1470 clear_bit(XPRT_CLOSING, &xprt->state);
1471 smp_mb__after_clear_bit();
1472 }
1473
1474 static void xs_sock_mark_closed(struct rpc_xprt *xprt)
1475 {
1476 xs_sock_reset_connection_flags(xprt);
1477 /* Mark transport as closed and wake up all pending tasks */
1478 xprt_disconnect_done(xprt);
1479 }
1480
1481 /**
1482 * xs_tcp_state_change - callback to handle TCP socket state changes
1483 * @sk: socket whose state has changed
1484 *
1485 */
1486 static void xs_tcp_state_change(struct sock *sk)
1487 {
1488 struct rpc_xprt *xprt;
1489
1490 read_lock_bh(&sk->sk_callback_lock);
1491 if (!(xprt = xprt_from_sock(sk)))
1492 goto out;
1493 dprintk("RPC: xs_tcp_state_change client %p...\n", xprt);
1494 dprintk("RPC: state %x conn %d dead %d zapped %d sk_shutdown %d\n",
1495 sk->sk_state, xprt_connected(xprt),
1496 sock_flag(sk, SOCK_DEAD),
1497 sock_flag(sk, SOCK_ZAPPED),
1498 sk->sk_shutdown);
1499
1500 trace_rpc_socket_state_change(xprt, sk->sk_socket);
1501 switch (sk->sk_state) {
1502 case TCP_ESTABLISHED:
1503 spin_lock(&xprt->transport_lock);
1504 if (!xprt_test_and_set_connected(xprt)) {
1505 struct sock_xprt *transport = container_of(xprt,
1506 struct sock_xprt, xprt);
1507
1508 /* Reset TCP record info */
1509 transport->tcp_offset = 0;
1510 transport->tcp_reclen = 0;
1511 transport->tcp_copied = 0;
1512 transport->tcp_flags =
1513 TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID;
1514 xprt->connect_cookie++;
1515
1516 xprt_wake_pending_tasks(xprt, -EAGAIN);
1517 }
1518 spin_unlock(&xprt->transport_lock);
1519 break;
1520 case TCP_FIN_WAIT1:
1521 /* The client initiated a shutdown of the socket */
1522 xprt->connect_cookie++;
1523 xprt->reestablish_timeout = 0;
1524 set_bit(XPRT_CLOSING, &xprt->state);
1525 smp_mb__before_clear_bit();
1526 clear_bit(XPRT_CONNECTED, &xprt->state);
1527 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
1528 smp_mb__after_clear_bit();
1529 xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout);
1530 break;
1531 case TCP_CLOSE_WAIT:
1532 /* The server initiated a shutdown of the socket */
1533 xprt->connect_cookie++;
1534 clear_bit(XPRT_CONNECTED, &xprt->state);
1535 xs_tcp_force_close(xprt);
1536 case TCP_CLOSING:
1537 /*
1538 * If the server closed down the connection, make sure that
1539 * we back off before reconnecting
1540 */
1541 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
1542 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
1543 break;
1544 case TCP_LAST_ACK:
1545 set_bit(XPRT_CLOSING, &xprt->state);
1546 xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout);
1547 smp_mb__before_clear_bit();
1548 clear_bit(XPRT_CONNECTED, &xprt->state);
1549 smp_mb__after_clear_bit();
1550 break;
1551 case TCP_CLOSE:
1552 xs_tcp_cancel_linger_timeout(xprt);
1553 xs_sock_mark_closed(xprt);
1554 }
1555 out:
1556 read_unlock_bh(&sk->sk_callback_lock);
1557 }
1558
1559 static void xs_write_space(struct sock *sk)
1560 {
1561 struct socket *sock;
1562 struct rpc_xprt *xprt;
1563
1564 if (unlikely(!(sock = sk->sk_socket)))
1565 return;
1566 clear_bit(SOCK_NOSPACE, &sock->flags);
1567
1568 if (unlikely(!(xprt = xprt_from_sock(sk))))
1569 return;
1570 if (test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags) == 0)
1571 return;
1572
1573 xprt_write_space(xprt);
1574 }
1575
1576 /**
1577 * xs_udp_write_space - callback invoked when socket buffer space
1578 * becomes available
1579 * @sk: socket whose state has changed
1580 *
1581 * Called when more output buffer space is available for this socket.
1582 * We try not to wake our writers until they can make "significant"
1583 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
1584 * with a bunch of small requests.
1585 */
1586 static void xs_udp_write_space(struct sock *sk)
1587 {
1588 read_lock_bh(&sk->sk_callback_lock);
1589
1590 /* from net/core/sock.c:sock_def_write_space */
1591 if (sock_writeable(sk))
1592 xs_write_space(sk);
1593
1594 read_unlock_bh(&sk->sk_callback_lock);
1595 }
1596
1597 /**
1598 * xs_tcp_write_space - callback invoked when socket buffer space
1599 * becomes available
1600 * @sk: socket whose state has changed
1601 *
1602 * Called when more output buffer space is available for this socket.
1603 * We try not to wake our writers until they can make "significant"
1604 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
1605 * with a bunch of small requests.
1606 */
1607 static void xs_tcp_write_space(struct sock *sk)
1608 {
1609 read_lock_bh(&sk->sk_callback_lock);
1610
1611 /* from net/core/stream.c:sk_stream_write_space */
1612 if (sk_stream_is_writeable(sk))
1613 xs_write_space(sk);
1614
1615 read_unlock_bh(&sk->sk_callback_lock);
1616 }
1617
1618 static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
1619 {
1620 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1621 struct sock *sk = transport->inet;
1622
1623 if (transport->rcvsize) {
1624 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
1625 sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2;
1626 }
1627 if (transport->sndsize) {
1628 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
1629 sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2;
1630 sk->sk_write_space(sk);
1631 }
1632 }
1633
1634 /**
1635 * xs_udp_set_buffer_size - set send and receive limits
1636 * @xprt: generic transport
1637 * @sndsize: requested size of send buffer, in bytes
1638 * @rcvsize: requested size of receive buffer, in bytes
1639 *
1640 * Set socket send and receive buffer size limits.
1641 */
1642 static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize)
1643 {
1644 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1645
1646 transport->sndsize = 0;
1647 if (sndsize)
1648 transport->sndsize = sndsize + 1024;
1649 transport->rcvsize = 0;
1650 if (rcvsize)
1651 transport->rcvsize = rcvsize + 1024;
1652
1653 xs_udp_do_set_buffer_size(xprt);
1654 }
1655
1656 /**
1657 * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport
1658 * @task: task that timed out
1659 *
1660 * Adjust the congestion window after a retransmit timeout has occurred.
1661 */
1662 static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task)
1663 {
1664 xprt_adjust_cwnd(xprt, task, -ETIMEDOUT);
1665 }
1666
1667 static unsigned short xs_get_random_port(void)
1668 {
1669 unsigned short range = xprt_max_resvport - xprt_min_resvport;
1670 unsigned short rand = (unsigned short) net_random() % range;
1671 return rand + xprt_min_resvport;
1672 }
1673
1674 /**
1675 * xs_set_port - reset the port number in the remote endpoint address
1676 * @xprt: generic transport
1677 * @port: new port number
1678 *
1679 */
1680 static void xs_set_port(struct rpc_xprt *xprt, unsigned short port)
1681 {
1682 dprintk("RPC: setting port for xprt %p to %u\n", xprt, port);
1683
1684 rpc_set_port(xs_addr(xprt), port);
1685 xs_update_peer_port(xprt);
1686 }
1687
1688 static unsigned short xs_get_srcport(struct sock_xprt *transport)
1689 {
1690 unsigned short port = transport->srcport;
1691
1692 if (port == 0 && transport->xprt.resvport)
1693 port = xs_get_random_port();
1694 return port;
1695 }
1696
1697 static unsigned short xs_next_srcport(struct sock_xprt *transport, unsigned short port)
1698 {
1699 if (transport->srcport != 0)
1700 transport->srcport = 0;
1701 if (!transport->xprt.resvport)
1702 return 0;
1703 if (port <= xprt_min_resvport || port > xprt_max_resvport)
1704 return xprt_max_resvport;
1705 return --port;
1706 }
1707 static int xs_bind(struct sock_xprt *transport, struct socket *sock)
1708 {
1709 struct sockaddr_storage myaddr;
1710 int err, nloop = 0;
1711 unsigned short port = xs_get_srcport(transport);
1712 unsigned short last;
1713
1714 memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen);
1715 do {
1716 rpc_set_port((struct sockaddr *)&myaddr, port);
1717 err = kernel_bind(sock, (struct sockaddr *)&myaddr,
1718 transport->xprt.addrlen);
1719 if (port == 0)
1720 break;
1721 if (err == 0) {
1722 transport->srcport = port;
1723 break;
1724 }
1725 last = port;
1726 port = xs_next_srcport(transport, port);
1727 if (port > last)
1728 nloop++;
1729 } while (err == -EADDRINUSE && nloop != 2);
1730
1731 if (myaddr.ss_family == AF_INET)
1732 dprintk("RPC: %s %pI4:%u: %s (%d)\n", __func__,
1733 &((struct sockaddr_in *)&myaddr)->sin_addr,
1734 port, err ? "failed" : "ok", err);
1735 else
1736 dprintk("RPC: %s %pI6:%u: %s (%d)\n", __func__,
1737 &((struct sockaddr_in6 *)&myaddr)->sin6_addr,
1738 port, err ? "failed" : "ok", err);
1739 return err;
1740 }
1741
1742 /*
1743 * We don't support autobind on AF_LOCAL sockets
1744 */
1745 static void xs_local_rpcbind(struct rpc_task *task)
1746 {
1747 rcu_read_lock();
1748 xprt_set_bound(rcu_dereference(task->tk_client->cl_xprt));
1749 rcu_read_unlock();
1750 }
1751
1752 static void xs_local_set_port(struct rpc_xprt *xprt, unsigned short port)
1753 {
1754 }
1755
1756 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1757 static struct lock_class_key xs_key[2];
1758 static struct lock_class_key xs_slock_key[2];
1759
1760 static inline void xs_reclassify_socketu(struct socket *sock)
1761 {
1762 struct sock *sk = sock->sk;
1763
1764 sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC",
1765 &xs_slock_key[1], "sk_lock-AF_LOCAL-RPC", &xs_key[1]);
1766 }
1767
1768 static inline void xs_reclassify_socket4(struct socket *sock)
1769 {
1770 struct sock *sk = sock->sk;
1771
1772 sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC",
1773 &xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]);
1774 }
1775
1776 static inline void xs_reclassify_socket6(struct socket *sock)
1777 {
1778 struct sock *sk = sock->sk;
1779
1780 sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC",
1781 &xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]);
1782 }
1783
1784 static inline void xs_reclassify_socket(int family, struct socket *sock)
1785 {
1786 WARN_ON_ONCE(sock_owned_by_user(sock->sk));
1787 if (sock_owned_by_user(sock->sk))
1788 return;
1789
1790 switch (family) {
1791 case AF_LOCAL:
1792 xs_reclassify_socketu(sock);
1793 break;
1794 case AF_INET:
1795 xs_reclassify_socket4(sock);
1796 break;
1797 case AF_INET6:
1798 xs_reclassify_socket6(sock);
1799 break;
1800 }
1801 }
1802 #else
1803 static inline void xs_reclassify_socketu(struct socket *sock)
1804 {
1805 }
1806
1807 static inline void xs_reclassify_socket4(struct socket *sock)
1808 {
1809 }
1810
1811 static inline void xs_reclassify_socket6(struct socket *sock)
1812 {
1813 }
1814
1815 static inline void xs_reclassify_socket(int family, struct socket *sock)
1816 {
1817 }
1818 #endif
1819
1820 static struct socket *xs_create_sock(struct rpc_xprt *xprt,
1821 struct sock_xprt *transport, int family, int type, int protocol)
1822 {
1823 struct socket *sock;
1824 int err;
1825
1826 err = __sock_create(xprt->xprt_net, family, type, protocol, &sock, 1);
1827 if (err < 0) {
1828 dprintk("RPC: can't create %d transport socket (%d).\n",
1829 protocol, -err);
1830 goto out;
1831 }
1832 xs_reclassify_socket(family, sock);
1833
1834 err = xs_bind(transport, sock);
1835 if (err) {
1836 sock_release(sock);
1837 goto out;
1838 }
1839
1840 return sock;
1841 out:
1842 return ERR_PTR(err);
1843 }
1844
1845 static int xs_local_finish_connecting(struct rpc_xprt *xprt,
1846 struct socket *sock)
1847 {
1848 struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
1849 xprt);
1850
1851 if (!transport->inet) {
1852 struct sock *sk = sock->sk;
1853
1854 write_lock_bh(&sk->sk_callback_lock);
1855
1856 xs_save_old_callbacks(transport, sk);
1857
1858 sk->sk_user_data = xprt;
1859 sk->sk_data_ready = xs_local_data_ready;
1860 sk->sk_write_space = xs_udp_write_space;
1861 sk->sk_allocation = GFP_ATOMIC;
1862
1863 xprt_clear_connected(xprt);
1864
1865 /* Reset to new socket */
1866 transport->sock = sock;
1867 transport->inet = sk;
1868
1869 write_unlock_bh(&sk->sk_callback_lock);
1870 }
1871
1872 /* Tell the socket layer to start connecting... */
1873 xprt->stat.connect_count++;
1874 xprt->stat.connect_start = jiffies;
1875 return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, 0);
1876 }
1877
1878 /**
1879 * xs_local_setup_socket - create AF_LOCAL socket, connect to a local endpoint
1880 * @xprt: RPC transport to connect
1881 * @transport: socket transport to connect
1882 * @create_sock: function to create a socket of the correct type
1883 */
1884 static int xs_local_setup_socket(struct sock_xprt *transport)
1885 {
1886 struct rpc_xprt *xprt = &transport->xprt;
1887 struct socket *sock;
1888 int status = -EIO;
1889
1890 current->flags |= PF_FSTRANS;
1891
1892 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
1893 status = __sock_create(xprt->xprt_net, AF_LOCAL,
1894 SOCK_STREAM, 0, &sock, 1);
1895 if (status < 0) {
1896 dprintk("RPC: can't create AF_LOCAL "
1897 "transport socket (%d).\n", -status);
1898 goto out;
1899 }
1900 xs_reclassify_socketu(sock);
1901
1902 dprintk("RPC: worker connecting xprt %p via AF_LOCAL to %s\n",
1903 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1904
1905 status = xs_local_finish_connecting(xprt, sock);
1906 trace_rpc_socket_connect(xprt, sock, status);
1907 switch (status) {
1908 case 0:
1909 dprintk("RPC: xprt %p connected to %s\n",
1910 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1911 xprt_set_connected(xprt);
1912 break;
1913 case -ENOENT:
1914 dprintk("RPC: xprt %p: socket %s does not exist\n",
1915 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1916 break;
1917 case -ECONNREFUSED:
1918 dprintk("RPC: xprt %p: connection refused for %s\n",
1919 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1920 break;
1921 default:
1922 printk(KERN_ERR "%s: unhandled error (%d) connecting to %s\n",
1923 __func__, -status,
1924 xprt->address_strings[RPC_DISPLAY_ADDR]);
1925 }
1926
1927 out:
1928 xprt_clear_connecting(xprt);
1929 xprt_wake_pending_tasks(xprt, status);
1930 current->flags &= ~PF_FSTRANS;
1931 return status;
1932 }
1933
1934 static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task)
1935 {
1936 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1937 int ret;
1938
1939 if (RPC_IS_ASYNC(task)) {
1940 /*
1941 * We want the AF_LOCAL connect to be resolved in the
1942 * filesystem namespace of the process making the rpc
1943 * call. Thus we connect synchronously.
1944 *
1945 * If we want to support asynchronous AF_LOCAL calls,
1946 * we'll need to figure out how to pass a namespace to
1947 * connect.
1948 */
1949 rpc_exit(task, -ENOTCONN);
1950 return;
1951 }
1952 ret = xs_local_setup_socket(transport);
1953 if (ret && !RPC_IS_SOFTCONN(task))
1954 msleep_interruptible(15000);
1955 }
1956
1957 #ifdef CONFIG_SUNRPC_SWAP
1958 static void xs_set_memalloc(struct rpc_xprt *xprt)
1959 {
1960 struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
1961 xprt);
1962
1963 if (xprt->swapper)
1964 sk_set_memalloc(transport->inet);
1965 }
1966
1967 /**
1968 * xs_swapper - Tag this transport as being used for swap.
1969 * @xprt: transport to tag
1970 * @enable: enable/disable
1971 *
1972 */
1973 int xs_swapper(struct rpc_xprt *xprt, int enable)
1974 {
1975 struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
1976 xprt);
1977 int err = 0;
1978
1979 if (enable) {
1980 xprt->swapper++;
1981 xs_set_memalloc(xprt);
1982 } else if (xprt->swapper) {
1983 xprt->swapper--;
1984 sk_clear_memalloc(transport->inet);
1985 }
1986
1987 return err;
1988 }
1989 EXPORT_SYMBOL_GPL(xs_swapper);
1990 #else
1991 static void xs_set_memalloc(struct rpc_xprt *xprt)
1992 {
1993 }
1994 #endif
1995
1996 static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
1997 {
1998 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1999
2000 if (!transport->inet) {
2001 struct sock *sk = sock->sk;
2002
2003 write_lock_bh(&sk->sk_callback_lock);
2004
2005 xs_save_old_callbacks(transport, sk);
2006
2007 sk->sk_user_data = xprt;
2008 sk->sk_data_ready = xs_udp_data_ready;
2009 sk->sk_write_space = xs_udp_write_space;
2010 sk->sk_no_check = UDP_CSUM_NORCV;
2011 sk->sk_allocation = GFP_ATOMIC;
2012
2013 xprt_set_connected(xprt);
2014
2015 /* Reset to new socket */
2016 transport->sock = sock;
2017 transport->inet = sk;
2018
2019 xs_set_memalloc(xprt);
2020
2021 write_unlock_bh(&sk->sk_callback_lock);
2022 }
2023 xs_udp_do_set_buffer_size(xprt);
2024 }
2025
2026 static void xs_udp_setup_socket(struct work_struct *work)
2027 {
2028 struct sock_xprt *transport =
2029 container_of(work, struct sock_xprt, connect_worker.work);
2030 struct rpc_xprt *xprt = &transport->xprt;
2031 struct socket *sock = transport->sock;
2032 int status = -EIO;
2033
2034 current->flags |= PF_FSTRANS;
2035
2036 /* Start by resetting any existing state */
2037 xs_reset_transport(transport);
2038 sock = xs_create_sock(xprt, transport,
2039 xs_addr(xprt)->sa_family, SOCK_DGRAM, IPPROTO_UDP);
2040 if (IS_ERR(sock))
2041 goto out;
2042
2043 dprintk("RPC: worker connecting xprt %p via %s to "
2044 "%s (port %s)\n", xprt,
2045 xprt->address_strings[RPC_DISPLAY_PROTO],
2046 xprt->address_strings[RPC_DISPLAY_ADDR],
2047 xprt->address_strings[RPC_DISPLAY_PORT]);
2048
2049 xs_udp_finish_connecting(xprt, sock);
2050 trace_rpc_socket_connect(xprt, sock, 0);
2051 status = 0;
2052 out:
2053 xprt_clear_connecting(xprt);
2054 xprt_wake_pending_tasks(xprt, status);
2055 current->flags &= ~PF_FSTRANS;
2056 }
2057
2058 /*
2059 * We need to preserve the port number so the reply cache on the server can
2060 * find our cached RPC replies when we get around to reconnecting.
2061 */
2062 static void xs_abort_connection(struct sock_xprt *transport)
2063 {
2064 int result;
2065 struct sockaddr any;
2066
2067 dprintk("RPC: disconnecting xprt %p to reuse port\n", transport);
2068
2069 /*
2070 * Disconnect the transport socket by doing a connect operation
2071 * with AF_UNSPEC. This should return immediately...
2072 */
2073 memset(&any, 0, sizeof(any));
2074 any.sa_family = AF_UNSPEC;
2075 result = kernel_connect(transport->sock, &any, sizeof(any), 0);
2076 trace_rpc_socket_reset_connection(&transport->xprt,
2077 transport->sock, result);
2078 if (!result)
2079 xs_sock_reset_connection_flags(&transport->xprt);
2080 dprintk("RPC: AF_UNSPEC connect return code %d\n", result);
2081 }
2082
2083 static void xs_tcp_reuse_connection(struct sock_xprt *transport)
2084 {
2085 unsigned int state = transport->inet->sk_state;
2086
2087 if (state == TCP_CLOSE && transport->sock->state == SS_UNCONNECTED) {
2088 /* we don't need to abort the connection if the socket
2089 * hasn't undergone a shutdown
2090 */
2091 if (transport->inet->sk_shutdown == 0)
2092 return;
2093 dprintk("RPC: %s: TCP_CLOSEd and sk_shutdown set to %d\n",
2094 __func__, transport->inet->sk_shutdown);
2095 }
2096 if ((1 << state) & (TCPF_ESTABLISHED|TCPF_SYN_SENT)) {
2097 /* we don't need to abort the connection if the socket
2098 * hasn't undergone a shutdown
2099 */
2100 if (transport->inet->sk_shutdown == 0)
2101 return;
2102 dprintk("RPC: %s: ESTABLISHED/SYN_SENT "
2103 "sk_shutdown set to %d\n",
2104 __func__, transport->inet->sk_shutdown);
2105 }
2106 xs_abort_connection(transport);
2107 }
2108
2109 static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2110 {
2111 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2112 int ret = -ENOTCONN;
2113
2114 if (!transport->inet) {
2115 struct sock *sk = sock->sk;
2116 unsigned int keepidle = xprt->timeout->to_initval / HZ;
2117 unsigned int keepcnt = xprt->timeout->to_retries + 1;
2118 unsigned int opt_on = 1;
2119
2120 /* TCP Keepalive options */
2121 kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
2122 (char *)&opt_on, sizeof(opt_on));
2123 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE,
2124 (char *)&keepidle, sizeof(keepidle));
2125 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL,
2126 (char *)&keepidle, sizeof(keepidle));
2127 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT,
2128 (char *)&keepcnt, sizeof(keepcnt));
2129
2130 write_lock_bh(&sk->sk_callback_lock);
2131
2132 xs_save_old_callbacks(transport, sk);
2133
2134 sk->sk_user_data = xprt;
2135 sk->sk_data_ready = xs_tcp_data_ready;
2136 sk->sk_state_change = xs_tcp_state_change;
2137 sk->sk_write_space = xs_tcp_write_space;
2138 sk->sk_allocation = GFP_ATOMIC;
2139
2140 /* socket options */
2141 sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
2142 sock_reset_flag(sk, SOCK_LINGER);
2143 tcp_sk(sk)->linger2 = 0;
2144 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
2145
2146 xprt_clear_connected(xprt);
2147
2148 /* Reset to new socket */
2149 transport->sock = sock;
2150 transport->inet = sk;
2151
2152 write_unlock_bh(&sk->sk_callback_lock);
2153 }
2154
2155 if (!xprt_bound(xprt))
2156 goto out;
2157
2158 xs_set_memalloc(xprt);
2159
2160 /* Tell the socket layer to start connecting... */
2161 xprt->stat.connect_count++;
2162 xprt->stat.connect_start = jiffies;
2163 ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
2164 switch (ret) {
2165 case 0:
2166 case -EINPROGRESS:
2167 /* SYN_SENT! */
2168 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
2169 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2170 }
2171 out:
2172 return ret;
2173 }
2174
2175 /**
2176 * xs_tcp_setup_socket - create a TCP socket and connect to a remote endpoint
2177 * @xprt: RPC transport to connect
2178 * @transport: socket transport to connect
2179 * @create_sock: function to create a socket of the correct type
2180 *
2181 * Invoked by a work queue tasklet.
2182 */
2183 static void xs_tcp_setup_socket(struct work_struct *work)
2184 {
2185 struct sock_xprt *transport =
2186 container_of(work, struct sock_xprt, connect_worker.work);
2187 struct socket *sock = transport->sock;
2188 struct rpc_xprt *xprt = &transport->xprt;
2189 int status = -EIO;
2190
2191 current->flags |= PF_FSTRANS;
2192
2193 if (!sock) {
2194 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
2195 sock = xs_create_sock(xprt, transport,
2196 xs_addr(xprt)->sa_family, SOCK_STREAM, IPPROTO_TCP);
2197 if (IS_ERR(sock)) {
2198 status = PTR_ERR(sock);
2199 goto out;
2200 }
2201 } else {
2202 int abort_and_exit;
2203
2204 abort_and_exit = test_and_clear_bit(XPRT_CONNECTION_ABORT,
2205 &xprt->state);
2206 /* "close" the socket, preserving the local port */
2207 xs_tcp_reuse_connection(transport);
2208
2209 if (abort_and_exit)
2210 goto out_eagain;
2211 }
2212
2213 dprintk("RPC: worker connecting xprt %p via %s to "
2214 "%s (port %s)\n", xprt,
2215 xprt->address_strings[RPC_DISPLAY_PROTO],
2216 xprt->address_strings[RPC_DISPLAY_ADDR],
2217 xprt->address_strings[RPC_DISPLAY_PORT]);
2218
2219 status = xs_tcp_finish_connecting(xprt, sock);
2220 trace_rpc_socket_connect(xprt, sock, status);
2221 dprintk("RPC: %p connect status %d connected %d sock state %d\n",
2222 xprt, -status, xprt_connected(xprt),
2223 sock->sk->sk_state);
2224 switch (status) {
2225 default:
2226 printk("%s: connect returned unhandled error %d\n",
2227 __func__, status);
2228 case -EADDRNOTAVAIL:
2229 /* We're probably in TIME_WAIT. Get rid of existing socket,
2230 * and retry
2231 */
2232 xs_tcp_force_close(xprt);
2233 break;
2234 case 0:
2235 case -EINPROGRESS:
2236 case -EALREADY:
2237 xprt_clear_connecting(xprt);
2238 current->flags &= ~PF_FSTRANS;
2239 return;
2240 case -EINVAL:
2241 /* Happens, for instance, if the user specified a link
2242 * local IPv6 address without a scope-id.
2243 */
2244 case -ECONNREFUSED:
2245 case -ECONNRESET:
2246 case -ENETUNREACH:
2247 /* retry with existing socket, after a delay */
2248 goto out;
2249 }
2250 out_eagain:
2251 status = -EAGAIN;
2252 out:
2253 xprt_clear_connecting(xprt);
2254 xprt_wake_pending_tasks(xprt, status);
2255 current->flags &= ~PF_FSTRANS;
2256 }
2257
2258 /**
2259 * xs_connect - connect a socket to a remote endpoint
2260 * @xprt: pointer to transport structure
2261 * @task: address of RPC task that manages state of connect request
2262 *
2263 * TCP: If the remote end dropped the connection, delay reconnecting.
2264 *
2265 * UDP socket connects are synchronous, but we use a work queue anyway
2266 * to guarantee that even unprivileged user processes can set up a
2267 * socket on a privileged port.
2268 *
2269 * If a UDP socket connect fails, the delay behavior here prevents
2270 * retry floods (hard mounts).
2271 */
2272 static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
2273 {
2274 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2275
2276 if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) {
2277 dprintk("RPC: xs_connect delayed xprt %p for %lu "
2278 "seconds\n",
2279 xprt, xprt->reestablish_timeout / HZ);
2280 queue_delayed_work(rpciod_workqueue,
2281 &transport->connect_worker,
2282 xprt->reestablish_timeout);
2283 xprt->reestablish_timeout <<= 1;
2284 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
2285 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2286 if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO)
2287 xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
2288 } else {
2289 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt);
2290 queue_delayed_work(rpciod_workqueue,
2291 &transport->connect_worker, 0);
2292 }
2293 }
2294
2295 /**
2296 * xs_local_print_stats - display AF_LOCAL socket-specifc stats
2297 * @xprt: rpc_xprt struct containing statistics
2298 * @seq: output file
2299 *
2300 */
2301 static void xs_local_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2302 {
2303 long idle_time = 0;
2304
2305 if (xprt_connected(xprt))
2306 idle_time = (long)(jiffies - xprt->last_used) / HZ;
2307
2308 seq_printf(seq, "\txprt:\tlocal %lu %lu %lu %ld %lu %lu %lu "
2309 "%llu %llu %lu %llu %llu\n",
2310 xprt->stat.bind_count,
2311 xprt->stat.connect_count,
2312 xprt->stat.connect_time,
2313 idle_time,
2314 xprt->stat.sends,
2315 xprt->stat.recvs,
2316 xprt->stat.bad_xids,
2317 xprt->stat.req_u,
2318 xprt->stat.bklog_u,
2319 xprt->stat.max_slots,
2320 xprt->stat.sending_u,
2321 xprt->stat.pending_u);
2322 }
2323
2324 /**
2325 * xs_udp_print_stats - display UDP socket-specifc stats
2326 * @xprt: rpc_xprt struct containing statistics
2327 * @seq: output file
2328 *
2329 */
2330 static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2331 {
2332 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2333
2334 seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %llu %llu "
2335 "%lu %llu %llu\n",
2336 transport->srcport,
2337 xprt->stat.bind_count,
2338 xprt->stat.sends,
2339 xprt->stat.recvs,
2340 xprt->stat.bad_xids,
2341 xprt->stat.req_u,
2342 xprt->stat.bklog_u,
2343 xprt->stat.max_slots,
2344 xprt->stat.sending_u,
2345 xprt->stat.pending_u);
2346 }
2347
2348 /**
2349 * xs_tcp_print_stats - display TCP socket-specifc stats
2350 * @xprt: rpc_xprt struct containing statistics
2351 * @seq: output file
2352 *
2353 */
2354 static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2355 {
2356 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2357 long idle_time = 0;
2358
2359 if (xprt_connected(xprt))
2360 idle_time = (long)(jiffies - xprt->last_used) / HZ;
2361
2362 seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu "
2363 "%llu %llu %lu %llu %llu\n",
2364 transport->srcport,
2365 xprt->stat.bind_count,
2366 xprt->stat.connect_count,
2367 xprt->stat.connect_time,
2368 idle_time,
2369 xprt->stat.sends,
2370 xprt->stat.recvs,
2371 xprt->stat.bad_xids,
2372 xprt->stat.req_u,
2373 xprt->stat.bklog_u,
2374 xprt->stat.max_slots,
2375 xprt->stat.sending_u,
2376 xprt->stat.pending_u);
2377 }
2378
2379 /*
2380 * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason
2381 * we allocate pages instead doing a kmalloc like rpc_malloc is because we want
2382 * to use the server side send routines.
2383 */
2384 static void *bc_malloc(struct rpc_task *task, size_t size)
2385 {
2386 struct page *page;
2387 struct rpc_buffer *buf;
2388
2389 WARN_ON_ONCE(size > PAGE_SIZE - sizeof(struct rpc_buffer));
2390 if (size > PAGE_SIZE - sizeof(struct rpc_buffer))
2391 return NULL;
2392
2393 page = alloc_page(GFP_KERNEL);
2394 if (!page)
2395 return NULL;
2396
2397 buf = page_address(page);
2398 buf->len = PAGE_SIZE;
2399
2400 return buf->data;
2401 }
2402
2403 /*
2404 * Free the space allocated in the bc_alloc routine
2405 */
2406 static void bc_free(void *buffer)
2407 {
2408 struct rpc_buffer *buf;
2409
2410 if (!buffer)
2411 return;
2412
2413 buf = container_of(buffer, struct rpc_buffer, data);
2414 free_page((unsigned long)buf);
2415 }
2416
2417 /*
2418 * Use the svc_sock to send the callback. Must be called with svsk->sk_mutex
2419 * held. Borrows heavily from svc_tcp_sendto and xs_tcp_send_request.
2420 */
2421 static int bc_sendto(struct rpc_rqst *req)
2422 {
2423 int len;
2424 struct xdr_buf *xbufp = &req->rq_snd_buf;
2425 struct rpc_xprt *xprt = req->rq_xprt;
2426 struct sock_xprt *transport =
2427 container_of(xprt, struct sock_xprt, xprt);
2428 struct socket *sock = transport->sock;
2429 unsigned long headoff;
2430 unsigned long tailoff;
2431
2432 xs_encode_stream_record_marker(xbufp);
2433
2434 tailoff = (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK;
2435 headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK;
2436 len = svc_send_common(sock, xbufp,
2437 virt_to_page(xbufp->head[0].iov_base), headoff,
2438 xbufp->tail[0].iov_base, tailoff);
2439
2440 if (len != xbufp->len) {
2441 printk(KERN_NOTICE "Error sending entire callback!\n");
2442 len = -EAGAIN;
2443 }
2444
2445 return len;
2446 }
2447
2448 /*
2449 * The send routine. Borrows from svc_send
2450 */
2451 static int bc_send_request(struct rpc_task *task)
2452 {
2453 struct rpc_rqst *req = task->tk_rqstp;
2454 struct svc_xprt *xprt;
2455 u32 len;
2456
2457 dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid));
2458 /*
2459 * Get the server socket associated with this callback xprt
2460 */
2461 xprt = req->rq_xprt->bc_xprt;
2462
2463 /*
2464 * Grab the mutex to serialize data as the connection is shared
2465 * with the fore channel
2466 */
2467 if (!mutex_trylock(&xprt->xpt_mutex)) {
2468 rpc_sleep_on(&xprt->xpt_bc_pending, task, NULL);
2469 if (!mutex_trylock(&xprt->xpt_mutex))
2470 return -EAGAIN;
2471 rpc_wake_up_queued_task(&xprt->xpt_bc_pending, task);
2472 }
2473 if (test_bit(XPT_DEAD, &xprt->xpt_flags))
2474 len = -ENOTCONN;
2475 else
2476 len = bc_sendto(req);
2477 mutex_unlock(&xprt->xpt_mutex);
2478
2479 if (len > 0)
2480 len = 0;
2481
2482 return len;
2483 }
2484
2485 /*
2486 * The close routine. Since this is client initiated, we do nothing
2487 */
2488
2489 static void bc_close(struct rpc_xprt *xprt)
2490 {
2491 }
2492
2493 /*
2494 * The xprt destroy routine. Again, because this connection is client
2495 * initiated, we do nothing
2496 */
2497
2498 static void bc_destroy(struct rpc_xprt *xprt)
2499 {
2500 }
2501
2502 static struct rpc_xprt_ops xs_local_ops = {
2503 .reserve_xprt = xprt_reserve_xprt,
2504 .release_xprt = xs_tcp_release_xprt,
2505 .alloc_slot = xprt_alloc_slot,
2506 .rpcbind = xs_local_rpcbind,
2507 .set_port = xs_local_set_port,
2508 .connect = xs_local_connect,
2509 .buf_alloc = rpc_malloc,
2510 .buf_free = rpc_free,
2511 .send_request = xs_local_send_request,
2512 .set_retrans_timeout = xprt_set_retrans_timeout_def,
2513 .close = xs_close,
2514 .destroy = xs_local_destroy,
2515 .print_stats = xs_local_print_stats,
2516 };
2517
2518 static struct rpc_xprt_ops xs_udp_ops = {
2519 .set_buffer_size = xs_udp_set_buffer_size,
2520 .reserve_xprt = xprt_reserve_xprt_cong,
2521 .release_xprt = xprt_release_xprt_cong,
2522 .alloc_slot = xprt_alloc_slot,
2523 .rpcbind = rpcb_getport_async,
2524 .set_port = xs_set_port,
2525 .connect = xs_connect,
2526 .buf_alloc = rpc_malloc,
2527 .buf_free = rpc_free,
2528 .send_request = xs_udp_send_request,
2529 .set_retrans_timeout = xprt_set_retrans_timeout_rtt,
2530 .timer = xs_udp_timer,
2531 .release_request = xprt_release_rqst_cong,
2532 .close = xs_close,
2533 .destroy = xs_destroy,
2534 .print_stats = xs_udp_print_stats,
2535 };
2536
2537 static struct rpc_xprt_ops xs_tcp_ops = {
2538 .reserve_xprt = xprt_reserve_xprt,
2539 .release_xprt = xs_tcp_release_xprt,
2540 .alloc_slot = xprt_lock_and_alloc_slot,
2541 .rpcbind = rpcb_getport_async,
2542 .set_port = xs_set_port,
2543 .connect = xs_connect,
2544 .buf_alloc = rpc_malloc,
2545 .buf_free = rpc_free,
2546 .send_request = xs_tcp_send_request,
2547 .set_retrans_timeout = xprt_set_retrans_timeout_def,
2548 .close = xs_tcp_close,
2549 .destroy = xs_destroy,
2550 .print_stats = xs_tcp_print_stats,
2551 };
2552
2553 /*
2554 * The rpc_xprt_ops for the server backchannel
2555 */
2556
2557 static struct rpc_xprt_ops bc_tcp_ops = {
2558 .reserve_xprt = xprt_reserve_xprt,
2559 .release_xprt = xprt_release_xprt,
2560 .alloc_slot = xprt_alloc_slot,
2561 .buf_alloc = bc_malloc,
2562 .buf_free = bc_free,
2563 .send_request = bc_send_request,
2564 .set_retrans_timeout = xprt_set_retrans_timeout_def,
2565 .close = bc_close,
2566 .destroy = bc_destroy,
2567 .print_stats = xs_tcp_print_stats,
2568 };
2569
2570 static int xs_init_anyaddr(const int family, struct sockaddr *sap)
2571 {
2572 static const struct sockaddr_in sin = {
2573 .sin_family = AF_INET,
2574 .sin_addr.s_addr = htonl(INADDR_ANY),
2575 };
2576 static const struct sockaddr_in6 sin6 = {
2577 .sin6_family = AF_INET6,
2578 .sin6_addr = IN6ADDR_ANY_INIT,
2579 };
2580
2581 switch (family) {
2582 case AF_LOCAL:
2583 break;
2584 case AF_INET:
2585 memcpy(sap, &sin, sizeof(sin));
2586 break;
2587 case AF_INET6:
2588 memcpy(sap, &sin6, sizeof(sin6));
2589 break;
2590 default:
2591 dprintk("RPC: %s: Bad address family\n", __func__);
2592 return -EAFNOSUPPORT;
2593 }
2594 return 0;
2595 }
2596
2597 static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
2598 unsigned int slot_table_size,
2599 unsigned int max_slot_table_size)
2600 {
2601 struct rpc_xprt *xprt;
2602 struct sock_xprt *new;
2603
2604 if (args->addrlen > sizeof(xprt->addr)) {
2605 dprintk("RPC: xs_setup_xprt: address too large\n");
2606 return ERR_PTR(-EBADF);
2607 }
2608
2609 xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size,
2610 max_slot_table_size);
2611 if (xprt == NULL) {
2612 dprintk("RPC: xs_setup_xprt: couldn't allocate "
2613 "rpc_xprt\n");
2614 return ERR_PTR(-ENOMEM);
2615 }
2616
2617 new = container_of(xprt, struct sock_xprt, xprt);
2618 memcpy(&xprt->addr, args->dstaddr, args->addrlen);
2619 xprt->addrlen = args->addrlen;
2620 if (args->srcaddr)
2621 memcpy(&new->srcaddr, args->srcaddr, args->addrlen);
2622 else {
2623 int err;
2624 err = xs_init_anyaddr(args->dstaddr->sa_family,
2625 (struct sockaddr *)&new->srcaddr);
2626 if (err != 0) {
2627 xprt_free(xprt);
2628 return ERR_PTR(err);
2629 }
2630 }
2631
2632 return xprt;
2633 }
2634
2635 static const struct rpc_timeout xs_local_default_timeout = {
2636 .to_initval = 10 * HZ,
2637 .to_maxval = 10 * HZ,
2638 .to_retries = 2,
2639 };
2640
2641 /**
2642 * xs_setup_local - Set up transport to use an AF_LOCAL socket
2643 * @args: rpc transport creation arguments
2644 *
2645 * AF_LOCAL is a "tpi_cots_ord" transport, just like TCP
2646 */
2647 static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
2648 {
2649 struct sockaddr_un *sun = (struct sockaddr_un *)args->dstaddr;
2650 struct sock_xprt *transport;
2651 struct rpc_xprt *xprt;
2652 struct rpc_xprt *ret;
2653
2654 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2655 xprt_max_tcp_slot_table_entries);
2656 if (IS_ERR(xprt))
2657 return xprt;
2658 transport = container_of(xprt, struct sock_xprt, xprt);
2659
2660 xprt->prot = 0;
2661 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
2662 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2663
2664 xprt->bind_timeout = XS_BIND_TO;
2665 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2666 xprt->idle_timeout = XS_IDLE_DISC_TO;
2667
2668 xprt->ops = &xs_local_ops;
2669 xprt->timeout = &xs_local_default_timeout;
2670
2671 switch (sun->sun_family) {
2672 case AF_LOCAL:
2673 if (sun->sun_path[0] != '/') {
2674 dprintk("RPC: bad AF_LOCAL address: %s\n",
2675 sun->sun_path);
2676 ret = ERR_PTR(-EINVAL);
2677 goto out_err;
2678 }
2679 xprt_set_bound(xprt);
2680 xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL);
2681 ret = ERR_PTR(xs_local_setup_socket(transport));
2682 if (ret)
2683 goto out_err;
2684 break;
2685 default:
2686 ret = ERR_PTR(-EAFNOSUPPORT);
2687 goto out_err;
2688 }
2689
2690 dprintk("RPC: set up xprt to %s via AF_LOCAL\n",
2691 xprt->address_strings[RPC_DISPLAY_ADDR]);
2692
2693 if (try_module_get(THIS_MODULE))
2694 return xprt;
2695 ret = ERR_PTR(-EINVAL);
2696 out_err:
2697 xprt_free(xprt);
2698 return ret;
2699 }
2700
2701 static const struct rpc_timeout xs_udp_default_timeout = {
2702 .to_initval = 5 * HZ,
2703 .to_maxval = 30 * HZ,
2704 .to_increment = 5 * HZ,
2705 .to_retries = 5,
2706 };
2707
2708 /**
2709 * xs_setup_udp - Set up transport to use a UDP socket
2710 * @args: rpc transport creation arguments
2711 *
2712 */
2713 static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
2714 {
2715 struct sockaddr *addr = args->dstaddr;
2716 struct rpc_xprt *xprt;
2717 struct sock_xprt *transport;
2718 struct rpc_xprt *ret;
2719
2720 xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries,
2721 xprt_udp_slot_table_entries);
2722 if (IS_ERR(xprt))
2723 return xprt;
2724 transport = container_of(xprt, struct sock_xprt, xprt);
2725
2726 xprt->prot = IPPROTO_UDP;
2727 xprt->tsh_size = 0;
2728 /* XXX: header size can vary due to auth type, IPv6, etc. */
2729 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
2730
2731 xprt->bind_timeout = XS_BIND_TO;
2732 xprt->reestablish_timeout = XS_UDP_REEST_TO;
2733 xprt->idle_timeout = XS_IDLE_DISC_TO;
2734
2735 xprt->ops = &xs_udp_ops;
2736
2737 xprt->timeout = &xs_udp_default_timeout;
2738
2739 switch (addr->sa_family) {
2740 case AF_INET:
2741 if (((struct sockaddr_in *)addr)->sin_port != htons(0))
2742 xprt_set_bound(xprt);
2743
2744 INIT_DELAYED_WORK(&transport->connect_worker,
2745 xs_udp_setup_socket);
2746 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP);
2747 break;
2748 case AF_INET6:
2749 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
2750 xprt_set_bound(xprt);
2751
2752 INIT_DELAYED_WORK(&transport->connect_worker,
2753 xs_udp_setup_socket);
2754 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6);
2755 break;
2756 default:
2757 ret = ERR_PTR(-EAFNOSUPPORT);
2758 goto out_err;
2759 }
2760
2761 if (xprt_bound(xprt))
2762 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
2763 xprt->address_strings[RPC_DISPLAY_ADDR],
2764 xprt->address_strings[RPC_DISPLAY_PORT],
2765 xprt->address_strings[RPC_DISPLAY_PROTO]);
2766 else
2767 dprintk("RPC: set up xprt to %s (autobind) via %s\n",
2768 xprt->address_strings[RPC_DISPLAY_ADDR],
2769 xprt->address_strings[RPC_DISPLAY_PROTO]);
2770
2771 if (try_module_get(THIS_MODULE))
2772 return xprt;
2773 ret = ERR_PTR(-EINVAL);
2774 out_err:
2775 xprt_free(xprt);
2776 return ret;
2777 }
2778
2779 static const struct rpc_timeout xs_tcp_default_timeout = {
2780 .to_initval = 60 * HZ,
2781 .to_maxval = 60 * HZ,
2782 .to_retries = 2,
2783 };
2784
2785 /**
2786 * xs_setup_tcp - Set up transport to use a TCP socket
2787 * @args: rpc transport creation arguments
2788 *
2789 */
2790 static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
2791 {
2792 struct sockaddr *addr = args->dstaddr;
2793 struct rpc_xprt *xprt;
2794 struct sock_xprt *transport;
2795 struct rpc_xprt *ret;
2796 unsigned int max_slot_table_size = xprt_max_tcp_slot_table_entries;
2797
2798 if (args->flags & XPRT_CREATE_INFINITE_SLOTS)
2799 max_slot_table_size = RPC_MAX_SLOT_TABLE_LIMIT;
2800
2801 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2802 max_slot_table_size);
2803 if (IS_ERR(xprt))
2804 return xprt;
2805 transport = container_of(xprt, struct sock_xprt, xprt);
2806
2807 xprt->prot = IPPROTO_TCP;
2808 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
2809 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2810
2811 xprt->bind_timeout = XS_BIND_TO;
2812 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2813 xprt->idle_timeout = XS_IDLE_DISC_TO;
2814
2815 xprt->ops = &xs_tcp_ops;
2816 xprt->timeout = &xs_tcp_default_timeout;
2817
2818 switch (addr->sa_family) {
2819 case AF_INET:
2820 if (((struct sockaddr_in *)addr)->sin_port != htons(0))
2821 xprt_set_bound(xprt);
2822
2823 INIT_DELAYED_WORK(&transport->connect_worker,
2824 xs_tcp_setup_socket);
2825 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP);
2826 break;
2827 case AF_INET6:
2828 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
2829 xprt_set_bound(xprt);
2830
2831 INIT_DELAYED_WORK(&transport->connect_worker,
2832 xs_tcp_setup_socket);
2833 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
2834 break;
2835 default:
2836 ret = ERR_PTR(-EAFNOSUPPORT);
2837 goto out_err;
2838 }
2839
2840 if (xprt_bound(xprt))
2841 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
2842 xprt->address_strings[RPC_DISPLAY_ADDR],
2843 xprt->address_strings[RPC_DISPLAY_PORT],
2844 xprt->address_strings[RPC_DISPLAY_PROTO]);
2845 else
2846 dprintk("RPC: set up xprt to %s (autobind) via %s\n",
2847 xprt->address_strings[RPC_DISPLAY_ADDR],
2848 xprt->address_strings[RPC_DISPLAY_PROTO]);
2849
2850
2851 if (try_module_get(THIS_MODULE))
2852 return xprt;
2853 ret = ERR_PTR(-EINVAL);
2854 out_err:
2855 xprt_free(xprt);
2856 return ret;
2857 }
2858
2859 /**
2860 * xs_setup_bc_tcp - Set up transport to use a TCP backchannel socket
2861 * @args: rpc transport creation arguments
2862 *
2863 */
2864 static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
2865 {
2866 struct sockaddr *addr = args->dstaddr;
2867 struct rpc_xprt *xprt;
2868 struct sock_xprt *transport;
2869 struct svc_sock *bc_sock;
2870 struct rpc_xprt *ret;
2871
2872 if (args->bc_xprt->xpt_bc_xprt) {
2873 /*
2874 * This server connection already has a backchannel
2875 * transport; we can't create a new one, as we wouldn't
2876 * be able to match replies based on xid any more. So,
2877 * reuse the already-existing one:
2878 */
2879 return args->bc_xprt->xpt_bc_xprt;
2880 }
2881 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2882 xprt_tcp_slot_table_entries);
2883 if (IS_ERR(xprt))
2884 return xprt;
2885 transport = container_of(xprt, struct sock_xprt, xprt);
2886
2887 xprt->prot = IPPROTO_TCP;
2888 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
2889 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2890 xprt->timeout = &xs_tcp_default_timeout;
2891
2892 /* backchannel */
2893 xprt_set_bound(xprt);
2894 xprt->bind_timeout = 0;
2895 xprt->reestablish_timeout = 0;
2896 xprt->idle_timeout = 0;
2897
2898 xprt->ops = &bc_tcp_ops;
2899
2900 switch (addr->sa_family) {
2901 case AF_INET:
2902 xs_format_peer_addresses(xprt, "tcp",
2903 RPCBIND_NETID_TCP);
2904 break;
2905 case AF_INET6:
2906 xs_format_peer_addresses(xprt, "tcp",
2907 RPCBIND_NETID_TCP6);
2908 break;
2909 default:
2910 ret = ERR_PTR(-EAFNOSUPPORT);
2911 goto out_err;
2912 }
2913
2914 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
2915 xprt->address_strings[RPC_DISPLAY_ADDR],
2916 xprt->address_strings[RPC_DISPLAY_PORT],
2917 xprt->address_strings[RPC_DISPLAY_PROTO]);
2918
2919 /*
2920 * Once we've associated a backchannel xprt with a connection,
2921 * we want to keep it around as long as long as the connection
2922 * lasts, in case we need to start using it for a backchannel
2923 * again; this reference won't be dropped until bc_xprt is
2924 * destroyed.
2925 */
2926 xprt_get(xprt);
2927 args->bc_xprt->xpt_bc_xprt = xprt;
2928 xprt->bc_xprt = args->bc_xprt;
2929 bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt);
2930 transport->sock = bc_sock->sk_sock;
2931 transport->inet = bc_sock->sk_sk;
2932
2933 /*
2934 * Since we don't want connections for the backchannel, we set
2935 * the xprt status to connected
2936 */
2937 xprt_set_connected(xprt);
2938
2939
2940 if (try_module_get(THIS_MODULE))
2941 return xprt;
2942 xprt_put(xprt);
2943 ret = ERR_PTR(-EINVAL);
2944 out_err:
2945 xprt_free(xprt);
2946 return ret;
2947 }
2948
2949 static struct xprt_class xs_local_transport = {
2950 .list = LIST_HEAD_INIT(xs_local_transport.list),
2951 .name = "named UNIX socket",
2952 .owner = THIS_MODULE,
2953 .ident = XPRT_TRANSPORT_LOCAL,
2954 .setup = xs_setup_local,
2955 };
2956
2957 static struct xprt_class xs_udp_transport = {
2958 .list = LIST_HEAD_INIT(xs_udp_transport.list),
2959 .name = "udp",
2960 .owner = THIS_MODULE,
2961 .ident = XPRT_TRANSPORT_UDP,
2962 .setup = xs_setup_udp,
2963 };
2964
2965 static struct xprt_class xs_tcp_transport = {
2966 .list = LIST_HEAD_INIT(xs_tcp_transport.list),
2967 .name = "tcp",
2968 .owner = THIS_MODULE,
2969 .ident = XPRT_TRANSPORT_TCP,
2970 .setup = xs_setup_tcp,
2971 };
2972
2973 static struct xprt_class xs_bc_tcp_transport = {
2974 .list = LIST_HEAD_INIT(xs_bc_tcp_transport.list),
2975 .name = "tcp NFSv4.1 backchannel",
2976 .owner = THIS_MODULE,
2977 .ident = XPRT_TRANSPORT_BC_TCP,
2978 .setup = xs_setup_bc_tcp,
2979 };
2980
2981 /**
2982 * init_socket_xprt - set up xprtsock's sysctls, register with RPC client
2983 *
2984 */
2985 int init_socket_xprt(void)
2986 {
2987 #ifdef RPC_DEBUG
2988 if (!sunrpc_table_header)
2989 sunrpc_table_header = register_sysctl_table(sunrpc_table);
2990 #endif
2991
2992 xprt_register_transport(&xs_local_transport);
2993 xprt_register_transport(&xs_udp_transport);
2994 xprt_register_transport(&xs_tcp_transport);
2995 xprt_register_transport(&xs_bc_tcp_transport);
2996
2997 return 0;
2998 }
2999
3000 /**
3001 * cleanup_socket_xprt - remove xprtsock's sysctls, unregister
3002 *
3003 */
3004 void cleanup_socket_xprt(void)
3005 {
3006 #ifdef RPC_DEBUG
3007 if (sunrpc_table_header) {
3008 unregister_sysctl_table(sunrpc_table_header);
3009 sunrpc_table_header = NULL;
3010 }
3011 #endif
3012
3013 xprt_unregister_transport(&xs_local_transport);
3014 xprt_unregister_transport(&xs_udp_transport);
3015 xprt_unregister_transport(&xs_tcp_transport);
3016 xprt_unregister_transport(&xs_bc_tcp_transport);
3017 }
3018
3019 static int param_set_uint_minmax(const char *val,
3020 const struct kernel_param *kp,
3021 unsigned int min, unsigned int max)
3022 {
3023 unsigned long num;
3024 int ret;
3025
3026 if (!val)
3027 return -EINVAL;
3028 ret = strict_strtoul(val, 0, &num);
3029 if (ret == -EINVAL || num < min || num > max)
3030 return -EINVAL;
3031 *((unsigned int *)kp->arg) = num;
3032 return 0;
3033 }
3034
3035 static int param_set_portnr(const char *val, const struct kernel_param *kp)
3036 {
3037 return param_set_uint_minmax(val, kp,
3038 RPC_MIN_RESVPORT,
3039 RPC_MAX_RESVPORT);
3040 }
3041
3042 static struct kernel_param_ops param_ops_portnr = {
3043 .set = param_set_portnr,
3044 .get = param_get_uint,
3045 };
3046
3047 #define param_check_portnr(name, p) \
3048 __param_check(name, p, unsigned int);
3049
3050 module_param_named(min_resvport, xprt_min_resvport, portnr, 0644);
3051 module_param_named(max_resvport, xprt_max_resvport, portnr, 0644);
3052
3053 static int param_set_slot_table_size(const char *val,
3054 const struct kernel_param *kp)
3055 {
3056 return param_set_uint_minmax(val, kp,
3057 RPC_MIN_SLOT_TABLE,
3058 RPC_MAX_SLOT_TABLE);
3059 }
3060
3061 static struct kernel_param_ops param_ops_slot_table_size = {
3062 .set = param_set_slot_table_size,
3063 .get = param_get_uint,
3064 };
3065
3066 #define param_check_slot_table_size(name, p) \
3067 __param_check(name, p, unsigned int);
3068
3069 static int param_set_max_slot_table_size(const char *val,
3070 const struct kernel_param *kp)
3071 {
3072 return param_set_uint_minmax(val, kp,
3073 RPC_MIN_SLOT_TABLE,
3074 RPC_MAX_SLOT_TABLE_LIMIT);
3075 }
3076
3077 static struct kernel_param_ops param_ops_max_slot_table_size = {
3078 .set = param_set_max_slot_table_size,
3079 .get = param_get_uint,
3080 };
3081
3082 #define param_check_max_slot_table_size(name, p) \
3083 __param_check(name, p, unsigned int);
3084
3085 module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries,
3086 slot_table_size, 0644);
3087 module_param_named(tcp_max_slot_table_entries, xprt_max_tcp_slot_table_entries,
3088 max_slot_table_size, 0644);
3089 module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries,
3090 slot_table_size, 0644);
3091
This page took 0.335198 seconds and 4 git commands to generate.