2 * linux/net/sunrpc/svcsock.c
4 * These are the RPC server socket internals.
6 * The server scheduling algorithm does not always distribute the load
7 * evenly when servicing a single client. May need to modify the
8 * svc_sock_enqueue procedure...
10 * TCP support is largely untested and may be a little slow. The problem
11 * is that we currently do two separate recvfrom's, one for the 4-byte
12 * record length, and the second for the actual record. This could possibly
13 * be improved by always reading a minimum size of around 100 bytes and
14 * tucking any superfluous bytes away in a temporary store. Still, that
15 * leaves write requests out in the rain. An alternative may be to peek at
16 * the first skb in the queue, and if it matches the next TCP sequence
17 * number, to extract the record marker. Yuck.
19 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/errno.h>
25 #include <linux/fcntl.h>
26 #include <linux/net.h>
28 #include <linux/inet.h>
29 #include <linux/udp.h>
30 #include <linux/tcp.h>
31 #include <linux/unistd.h>
32 #include <linux/slab.h>
33 #include <linux/netdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/file.h>
36 #include <linux/freezer.h>
38 #include <net/checksum.h>
41 #include <net/tcp_states.h>
42 #include <asm/uaccess.h>
43 #include <asm/ioctls.h>
45 #include <linux/sunrpc/types.h>
46 #include <linux/sunrpc/clnt.h>
47 #include <linux/sunrpc/xdr.h>
48 #include <linux/sunrpc/svcsock.h>
49 #include <linux/sunrpc/stats.h>
51 /* SMP locking strategy:
53 * svc_pool->sp_lock protects most of the fields of that pool.
54 * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt.
55 * when both need to be taken (rare), svc_serv->sv_lock is first.
56 * BKL protects svc_serv->sv_nrthread.
57 * svc_sock->sk_lock protects the svc_sock->sk_deferred list
58 * and the ->sk_info_authunix cache.
59 * svc_sock->sk_xprt.xpt_flags.XPT_BUSY prevents a svc_sock being
62 * Some flags can be set to certain values at any time
63 * providing that certain rules are followed:
65 * XPT_CONN, XPT_DATA, can be set or cleared at any time.
66 * after a set, svc_sock_enqueue must be called.
67 * after a clear, the socket must be read/accepted
68 * if this succeeds, it must be set again.
69 * XPT_CLOSE can set at any time. It is never cleared.
70 * xpt_ref contains a bias of '1' until XPT_DEAD is set.
71 * so when xprt_ref hits zero, we know the transport is dead
72 * and no-one is using it.
73 * XPT_DEAD can only be set while XPT_BUSY is held which ensures
74 * no other thread will be using the socket or will try to
79 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
82 static struct svc_sock
*svc_setup_socket(struct svc_serv
*, struct socket
*,
83 int *errp
, int flags
);
84 static void svc_delete_socket(struct svc_sock
*svsk
);
85 static void svc_udp_data_ready(struct sock
*, int);
86 static int svc_udp_recvfrom(struct svc_rqst
*);
87 static int svc_udp_sendto(struct svc_rqst
*);
88 static void svc_close_socket(struct svc_sock
*svsk
);
89 static void svc_sock_detach(struct svc_xprt
*);
90 static void svc_sock_free(struct svc_xprt
*);
92 static struct svc_deferred_req
*svc_deferred_dequeue(struct svc_sock
*svsk
);
93 static int svc_deferred_recv(struct svc_rqst
*rqstp
);
94 static struct cache_deferred_req
*svc_defer(struct cache_req
*req
);
95 static struct svc_xprt
*svc_create_socket(struct svc_serv
*, int,
96 struct sockaddr
*, int, int);
98 /* apparently the "standard" is that clients close
99 * idle connections after 5 minutes, servers after
101 * http://www.connectathon.org/talks96/nfstcp.pdf
103 static int svc_conn_age_period
= 6*60;
105 #ifdef CONFIG_DEBUG_LOCK_ALLOC
106 static struct lock_class_key svc_key
[2];
107 static struct lock_class_key svc_slock_key
[2];
109 static inline void svc_reclassify_socket(struct socket
*sock
)
111 struct sock
*sk
= sock
->sk
;
112 BUG_ON(sock_owned_by_user(sk
));
113 switch (sk
->sk_family
) {
115 sock_lock_init_class_and_name(sk
, "slock-AF_INET-NFSD",
116 &svc_slock_key
[0], "sk_lock-AF_INET-NFSD", &svc_key
[0]);
120 sock_lock_init_class_and_name(sk
, "slock-AF_INET6-NFSD",
121 &svc_slock_key
[1], "sk_lock-AF_INET6-NFSD", &svc_key
[1]);
129 static inline void svc_reclassify_socket(struct socket
*sock
)
134 static char *__svc_print_addr(struct sockaddr
*addr
, char *buf
, size_t len
)
136 switch (addr
->sa_family
) {
138 snprintf(buf
, len
, "%u.%u.%u.%u, port=%u",
139 NIPQUAD(((struct sockaddr_in
*) addr
)->sin_addr
),
140 ntohs(((struct sockaddr_in
*) addr
)->sin_port
));
144 snprintf(buf
, len
, "%x:%x:%x:%x:%x:%x:%x:%x, port=%u",
145 NIP6(((struct sockaddr_in6
*) addr
)->sin6_addr
),
146 ntohs(((struct sockaddr_in6
*) addr
)->sin6_port
));
150 snprintf(buf
, len
, "unknown address type: %d", addr
->sa_family
);
157 * svc_print_addr - Format rq_addr field for printing
158 * @rqstp: svc_rqst struct containing address to print
159 * @buf: target buffer for formatted address
160 * @len: length of target buffer
163 char *svc_print_addr(struct svc_rqst
*rqstp
, char *buf
, size_t len
)
165 return __svc_print_addr(svc_addr(rqstp
), buf
, len
);
167 EXPORT_SYMBOL_GPL(svc_print_addr
);
170 * Queue up an idle server thread. Must have pool->sp_lock held.
171 * Note: this is really a stack rather than a queue, so that we only
172 * use as many different threads as we need, and the rest don't pollute
176 svc_thread_enqueue(struct svc_pool
*pool
, struct svc_rqst
*rqstp
)
178 list_add(&rqstp
->rq_list
, &pool
->sp_threads
);
182 * Dequeue an nfsd thread. Must have pool->sp_lock held.
185 svc_thread_dequeue(struct svc_pool
*pool
, struct svc_rqst
*rqstp
)
187 list_del(&rqstp
->rq_list
);
191 * Release an skbuff after use
193 static void svc_release_skb(struct svc_rqst
*rqstp
)
195 struct sk_buff
*skb
= rqstp
->rq_xprt_ctxt
;
196 struct svc_deferred_req
*dr
= rqstp
->rq_deferred
;
199 rqstp
->rq_xprt_ctxt
= NULL
;
201 dprintk("svc: service %p, releasing skb %p\n", rqstp
, skb
);
202 skb_free_datagram(rqstp
->rq_sock
->sk_sk
, skb
);
205 rqstp
->rq_deferred
= NULL
;
211 * Queue up a socket with data pending. If there are idle nfsd
212 * processes, wake 'em up.
216 svc_sock_enqueue(struct svc_sock
*svsk
)
218 struct svc_serv
*serv
= svsk
->sk_xprt
.xpt_server
;
219 struct svc_pool
*pool
;
220 struct svc_rqst
*rqstp
;
223 if (!(svsk
->sk_xprt
.xpt_flags
&
224 ((1<<XPT_CONN
)|(1<<XPT_DATA
)|(1<<XPT_CLOSE
)|(1<<XPT_DEFERRED
))))
226 if (test_bit(XPT_DEAD
, &svsk
->sk_xprt
.xpt_flags
))
230 pool
= svc_pool_for_cpu(svsk
->sk_xprt
.xpt_server
, cpu
);
233 spin_lock_bh(&pool
->sp_lock
);
235 if (!list_empty(&pool
->sp_threads
) &&
236 !list_empty(&pool
->sp_sockets
))
238 "svc_sock_enqueue: threads and sockets both waiting??\n");
240 if (test_bit(XPT_DEAD
, &svsk
->sk_xprt
.xpt_flags
)) {
241 /* Don't enqueue dead sockets */
242 dprintk("svc: socket %p is dead, not enqueued\n", svsk
->sk_sk
);
246 /* Mark socket as busy. It will remain in this state until the
247 * server has processed all pending data and put the socket back
248 * on the idle list. We update XPT_BUSY atomically because
249 * it also guards against trying to enqueue the svc_sock twice.
251 if (test_and_set_bit(XPT_BUSY
, &svsk
->sk_xprt
.xpt_flags
)) {
252 /* Don't enqueue socket while already enqueued */
253 dprintk("svc: socket %p busy, not enqueued\n", svsk
->sk_sk
);
256 BUG_ON(svsk
->sk_xprt
.xpt_pool
!= NULL
);
257 svsk
->sk_xprt
.xpt_pool
= pool
;
259 /* Handle pending connection */
260 if (test_bit(XPT_CONN
, &svsk
->sk_xprt
.xpt_flags
))
263 /* Handle close in-progress */
264 if (test_bit(XPT_CLOSE
, &svsk
->sk_xprt
.xpt_flags
))
267 /* Check if we have space to reply to a request */
268 if (!svsk
->sk_xprt
.xpt_ops
->xpo_has_wspace(&svsk
->sk_xprt
)) {
269 /* Don't enqueue while not enough space for reply */
270 dprintk("svc: no write space, socket %p not enqueued\n", svsk
);
271 svsk
->sk_xprt
.xpt_pool
= NULL
;
272 clear_bit(XPT_BUSY
, &svsk
->sk_xprt
.xpt_flags
);
277 if (!list_empty(&pool
->sp_threads
)) {
278 rqstp
= list_entry(pool
->sp_threads
.next
,
281 dprintk("svc: socket %p served by daemon %p\n",
283 svc_thread_dequeue(pool
, rqstp
);
286 "svc_sock_enqueue: server %p, rq_sock=%p!\n",
287 rqstp
, rqstp
->rq_sock
);
288 rqstp
->rq_sock
= svsk
;
289 svc_xprt_get(&svsk
->sk_xprt
);
290 rqstp
->rq_reserved
= serv
->sv_max_mesg
;
291 atomic_add(rqstp
->rq_reserved
, &svsk
->sk_reserved
);
292 BUG_ON(svsk
->sk_xprt
.xpt_pool
!= pool
);
293 wake_up(&rqstp
->rq_wait
);
295 dprintk("svc: socket %p put into queue\n", svsk
->sk_sk
);
296 list_add_tail(&svsk
->sk_ready
, &pool
->sp_sockets
);
297 BUG_ON(svsk
->sk_xprt
.xpt_pool
!= pool
);
301 spin_unlock_bh(&pool
->sp_lock
);
305 * Dequeue the first socket. Must be called with the pool->sp_lock held.
307 static inline struct svc_sock
*
308 svc_sock_dequeue(struct svc_pool
*pool
)
310 struct svc_sock
*svsk
;
312 if (list_empty(&pool
->sp_sockets
))
315 svsk
= list_entry(pool
->sp_sockets
.next
,
316 struct svc_sock
, sk_ready
);
317 list_del_init(&svsk
->sk_ready
);
319 dprintk("svc: socket %p dequeued, inuse=%d\n",
320 svsk
->sk_sk
, atomic_read(&svsk
->sk_xprt
.xpt_ref
.refcount
));
326 * Having read something from a socket, check whether it
327 * needs to be re-enqueued.
328 * Note: XPT_DATA only gets cleared when a read-attempt finds
329 * no (or insufficient) data.
332 svc_sock_received(struct svc_sock
*svsk
)
334 svsk
->sk_xprt
.xpt_pool
= NULL
;
335 clear_bit(XPT_BUSY
, &svsk
->sk_xprt
.xpt_flags
);
336 svc_sock_enqueue(svsk
);
341 * svc_reserve - change the space reserved for the reply to a request.
342 * @rqstp: The request in question
343 * @space: new max space to reserve
345 * Each request reserves some space on the output queue of the socket
346 * to make sure the reply fits. This function reduces that reserved
347 * space to be the amount of space used already, plus @space.
350 void svc_reserve(struct svc_rqst
*rqstp
, int space
)
352 space
+= rqstp
->rq_res
.head
[0].iov_len
;
354 if (space
< rqstp
->rq_reserved
) {
355 struct svc_sock
*svsk
= rqstp
->rq_sock
;
356 atomic_sub((rqstp
->rq_reserved
- space
), &svsk
->sk_reserved
);
357 rqstp
->rq_reserved
= space
;
359 svc_sock_enqueue(svsk
);
364 svc_sock_release(struct svc_rqst
*rqstp
)
366 struct svc_sock
*svsk
= rqstp
->rq_sock
;
368 rqstp
->rq_xprt
->xpt_ops
->xpo_release_rqst(rqstp
);
370 svc_free_res_pages(rqstp
);
371 rqstp
->rq_res
.page_len
= 0;
372 rqstp
->rq_res
.page_base
= 0;
375 /* Reset response buffer and release
377 * But first, check that enough space was reserved
378 * for the reply, otherwise we have a bug!
380 if ((rqstp
->rq_res
.len
) > rqstp
->rq_reserved
)
381 printk(KERN_ERR
"RPC request reserved %d but used %d\n",
385 rqstp
->rq_res
.head
[0].iov_len
= 0;
386 svc_reserve(rqstp
, 0);
387 rqstp
->rq_sock
= NULL
;
389 svc_xprt_put(&svsk
->sk_xprt
);
393 * External function to wake up a server waiting for data
394 * This really only makes sense for services like lockd
395 * which have exactly one thread anyway.
398 svc_wake_up(struct svc_serv
*serv
)
400 struct svc_rqst
*rqstp
;
402 struct svc_pool
*pool
;
404 for (i
= 0; i
< serv
->sv_nrpools
; i
++) {
405 pool
= &serv
->sv_pools
[i
];
407 spin_lock_bh(&pool
->sp_lock
);
408 if (!list_empty(&pool
->sp_threads
)) {
409 rqstp
= list_entry(pool
->sp_threads
.next
,
412 dprintk("svc: daemon %p woken up.\n", rqstp
);
414 svc_thread_dequeue(pool, rqstp);
415 rqstp->rq_sock = NULL;
417 wake_up(&rqstp
->rq_wait
);
419 spin_unlock_bh(&pool
->sp_lock
);
423 union svc_pktinfo_u
{
424 struct in_pktinfo pkti
;
425 struct in6_pktinfo pkti6
;
427 #define SVC_PKTINFO_SPACE \
428 CMSG_SPACE(sizeof(union svc_pktinfo_u))
430 static void svc_set_cmsg_data(struct svc_rqst
*rqstp
, struct cmsghdr
*cmh
)
432 switch (rqstp
->rq_sock
->sk_sk
->sk_family
) {
434 struct in_pktinfo
*pki
= CMSG_DATA(cmh
);
436 cmh
->cmsg_level
= SOL_IP
;
437 cmh
->cmsg_type
= IP_PKTINFO
;
438 pki
->ipi_ifindex
= 0;
439 pki
->ipi_spec_dst
.s_addr
= rqstp
->rq_daddr
.addr
.s_addr
;
440 cmh
->cmsg_len
= CMSG_LEN(sizeof(*pki
));
445 struct in6_pktinfo
*pki
= CMSG_DATA(cmh
);
447 cmh
->cmsg_level
= SOL_IPV6
;
448 cmh
->cmsg_type
= IPV6_PKTINFO
;
449 pki
->ipi6_ifindex
= 0;
450 ipv6_addr_copy(&pki
->ipi6_addr
,
451 &rqstp
->rq_daddr
.addr6
);
452 cmh
->cmsg_len
= CMSG_LEN(sizeof(*pki
));
460 * Generic sendto routine
463 svc_sendto(struct svc_rqst
*rqstp
, struct xdr_buf
*xdr
)
465 struct svc_sock
*svsk
= rqstp
->rq_sock
;
466 struct socket
*sock
= svsk
->sk_sock
;
470 long all
[SVC_PKTINFO_SPACE
/ sizeof(long)];
472 struct cmsghdr
*cmh
= &buffer
.hdr
;
476 struct page
**ppage
= xdr
->pages
;
477 size_t base
= xdr
->page_base
;
478 unsigned int pglen
= xdr
->page_len
;
479 unsigned int flags
= MSG_MORE
;
480 char buf
[RPC_MAX_ADDRBUFLEN
];
484 if (rqstp
->rq_prot
== IPPROTO_UDP
) {
485 struct msghdr msg
= {
486 .msg_name
= &rqstp
->rq_addr
,
487 .msg_namelen
= rqstp
->rq_addrlen
,
489 .msg_controllen
= sizeof(buffer
),
490 .msg_flags
= MSG_MORE
,
493 svc_set_cmsg_data(rqstp
, cmh
);
495 if (sock_sendmsg(sock
, &msg
, 0) < 0)
500 if (slen
== xdr
->head
[0].iov_len
)
502 len
= kernel_sendpage(sock
, rqstp
->rq_respages
[0], 0,
503 xdr
->head
[0].iov_len
, flags
);
504 if (len
!= xdr
->head
[0].iov_len
)
506 slen
-= xdr
->head
[0].iov_len
;
511 size
= PAGE_SIZE
- base
< pglen
? PAGE_SIZE
- base
: pglen
;
515 result
= kernel_sendpage(sock
, *ppage
, base
, size
, flags
);
522 size
= PAGE_SIZE
< pglen
? PAGE_SIZE
: pglen
;
527 if (xdr
->tail
[0].iov_len
) {
528 result
= kernel_sendpage(sock
, rqstp
->rq_respages
[0],
529 ((unsigned long)xdr
->tail
[0].iov_base
)
531 xdr
->tail
[0].iov_len
, 0);
537 dprintk("svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %s)\n",
538 rqstp
->rq_sock
, xdr
->head
[0].iov_base
, xdr
->head
[0].iov_len
,
539 xdr
->len
, len
, svc_print_addr(rqstp
, buf
, sizeof(buf
)));
545 * Report socket names for nfsdfs
547 static int one_sock_name(char *buf
, struct svc_sock
*svsk
)
551 switch(svsk
->sk_sk
->sk_family
) {
553 len
= sprintf(buf
, "ipv4 %s %u.%u.%u.%u %d\n",
554 svsk
->sk_sk
->sk_protocol
==IPPROTO_UDP
?
556 NIPQUAD(inet_sk(svsk
->sk_sk
)->rcv_saddr
),
557 inet_sk(svsk
->sk_sk
)->num
);
560 len
= sprintf(buf
, "*unknown-%d*\n",
561 svsk
->sk_sk
->sk_family
);
567 svc_sock_names(char *buf
, struct svc_serv
*serv
, char *toclose
)
569 struct svc_sock
*svsk
, *closesk
= NULL
;
574 spin_lock_bh(&serv
->sv_lock
);
575 list_for_each_entry(svsk
, &serv
->sv_permsocks
, sk_list
) {
576 int onelen
= one_sock_name(buf
+len
, svsk
);
577 if (toclose
&& strcmp(toclose
, buf
+len
) == 0)
582 spin_unlock_bh(&serv
->sv_lock
);
584 /* Should unregister with portmap, but you cannot
585 * unregister just one protocol...
587 svc_close_socket(closesk
);
592 EXPORT_SYMBOL(svc_sock_names
);
595 * Check input queue length
598 svc_recv_available(struct svc_sock
*svsk
)
600 struct socket
*sock
= svsk
->sk_sock
;
603 err
= kernel_sock_ioctl(sock
, TIOCINQ
, (unsigned long) &avail
);
605 return (err
>= 0)? avail
: err
;
609 * Generic recvfrom routine.
612 svc_recvfrom(struct svc_rqst
*rqstp
, struct kvec
*iov
, int nr
, int buflen
)
614 struct svc_sock
*svsk
= rqstp
->rq_sock
;
615 struct msghdr msg
= {
616 .msg_flags
= MSG_DONTWAIT
,
618 struct sockaddr
*sin
;
621 len
= kernel_recvmsg(svsk
->sk_sock
, &msg
, iov
, nr
, buflen
,
624 /* sock_recvmsg doesn't fill in the name/namelen, so we must..
626 memcpy(&rqstp
->rq_addr
, &svsk
->sk_remote
, svsk
->sk_remotelen
);
627 rqstp
->rq_addrlen
= svsk
->sk_remotelen
;
629 /* Destination address in request is needed for binding the
630 * source address in RPC callbacks later.
632 sin
= (struct sockaddr
*)&svsk
->sk_local
;
633 switch (sin
->sa_family
) {
635 rqstp
->rq_daddr
.addr
= ((struct sockaddr_in
*)sin
)->sin_addr
;
638 rqstp
->rq_daddr
.addr6
= ((struct sockaddr_in6
*)sin
)->sin6_addr
;
642 dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n",
643 svsk
, iov
[0].iov_base
, iov
[0].iov_len
, len
);
649 * Set socket snd and rcv buffer lengths
652 svc_sock_setbufsize(struct socket
*sock
, unsigned int snd
, unsigned int rcv
)
656 oldfs
= get_fs(); set_fs(KERNEL_DS
);
657 sock_setsockopt(sock
, SOL_SOCKET
, SO_SNDBUF
,
658 (char*)&snd
, sizeof(snd
));
659 sock_setsockopt(sock
, SOL_SOCKET
, SO_RCVBUF
,
660 (char*)&rcv
, sizeof(rcv
));
662 /* sock_setsockopt limits use to sysctl_?mem_max,
663 * which isn't acceptable. Until that is made conditional
664 * on not having CAP_SYS_RESOURCE or similar, we go direct...
665 * DaveM said I could!
668 sock
->sk
->sk_sndbuf
= snd
* 2;
669 sock
->sk
->sk_rcvbuf
= rcv
* 2;
670 sock
->sk
->sk_userlocks
|= SOCK_SNDBUF_LOCK
|SOCK_RCVBUF_LOCK
;
671 release_sock(sock
->sk
);
675 * INET callback when data has been received on the socket.
678 svc_udp_data_ready(struct sock
*sk
, int count
)
680 struct svc_sock
*svsk
= (struct svc_sock
*)sk
->sk_user_data
;
683 dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n",
685 test_bit(XPT_BUSY
, &svsk
->sk_xprt
.xpt_flags
));
686 set_bit(XPT_DATA
, &svsk
->sk_xprt
.xpt_flags
);
687 svc_sock_enqueue(svsk
);
689 if (sk
->sk_sleep
&& waitqueue_active(sk
->sk_sleep
))
690 wake_up_interruptible(sk
->sk_sleep
);
694 * INET callback when space is newly available on the socket.
697 svc_write_space(struct sock
*sk
)
699 struct svc_sock
*svsk
= (struct svc_sock
*)(sk
->sk_user_data
);
702 dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
703 svsk
, sk
, test_bit(XPT_BUSY
, &svsk
->sk_xprt
.xpt_flags
));
704 svc_sock_enqueue(svsk
);
707 if (sk
->sk_sleep
&& waitqueue_active(sk
->sk_sleep
)) {
708 dprintk("RPC svc_write_space: someone sleeping on %p\n",
710 wake_up_interruptible(sk
->sk_sleep
);
714 static inline void svc_udp_get_dest_address(struct svc_rqst
*rqstp
,
717 switch (rqstp
->rq_sock
->sk_sk
->sk_family
) {
719 struct in_pktinfo
*pki
= CMSG_DATA(cmh
);
720 rqstp
->rq_daddr
.addr
.s_addr
= pki
->ipi_spec_dst
.s_addr
;
724 struct in6_pktinfo
*pki
= CMSG_DATA(cmh
);
725 ipv6_addr_copy(&rqstp
->rq_daddr
.addr6
, &pki
->ipi6_addr
);
732 * Receive a datagram from a UDP socket.
735 svc_udp_recvfrom(struct svc_rqst
*rqstp
)
737 struct svc_sock
*svsk
= rqstp
->rq_sock
;
738 struct svc_serv
*serv
= svsk
->sk_xprt
.xpt_server
;
742 long all
[SVC_PKTINFO_SPACE
/ sizeof(long)];
744 struct cmsghdr
*cmh
= &buffer
.hdr
;
746 struct msghdr msg
= {
747 .msg_name
= svc_addr(rqstp
),
749 .msg_controllen
= sizeof(buffer
),
750 .msg_flags
= MSG_DONTWAIT
,
753 if (test_and_clear_bit(XPT_CHNGBUF
, &svsk
->sk_xprt
.xpt_flags
))
754 /* udp sockets need large rcvbuf as all pending
755 * requests are still in that buffer. sndbuf must
756 * also be large enough that there is enough space
757 * for one reply per thread. We count all threads
758 * rather than threads in a particular pool, which
759 * provides an upper bound on the number of threads
760 * which will access the socket.
762 svc_sock_setbufsize(svsk
->sk_sock
,
763 (serv
->sv_nrthreads
+3) * serv
->sv_max_mesg
,
764 (serv
->sv_nrthreads
+3) * serv
->sv_max_mesg
);
766 if ((rqstp
->rq_deferred
= svc_deferred_dequeue(svsk
))) {
767 svc_sock_received(svsk
);
768 return svc_deferred_recv(rqstp
);
771 clear_bit(XPT_DATA
, &svsk
->sk_xprt
.xpt_flags
);
773 err
= kernel_recvmsg(svsk
->sk_sock
, &msg
, NULL
,
774 0, 0, MSG_PEEK
| MSG_DONTWAIT
);
776 skb
= skb_recv_datagram(svsk
->sk_sk
, 0, 1, &err
);
779 if (err
!= -EAGAIN
) {
780 /* possibly an icmp error */
781 dprintk("svc: recvfrom returned error %d\n", -err
);
782 set_bit(XPT_DATA
, &svsk
->sk_xprt
.xpt_flags
);
784 svc_sock_received(svsk
);
787 rqstp
->rq_addrlen
= sizeof(rqstp
->rq_addr
);
788 if (skb
->tstamp
.tv64
== 0) {
789 skb
->tstamp
= ktime_get_real();
790 /* Don't enable netstamp, sunrpc doesn't
791 need that much accuracy */
793 svsk
->sk_sk
->sk_stamp
= skb
->tstamp
;
794 set_bit(XPT_DATA
, &svsk
->sk_xprt
.xpt_flags
); /* there may be more data... */
797 * Maybe more packets - kick another thread ASAP.
799 svc_sock_received(svsk
);
801 len
= skb
->len
- sizeof(struct udphdr
);
802 rqstp
->rq_arg
.len
= len
;
804 rqstp
->rq_prot
= IPPROTO_UDP
;
806 if (cmh
->cmsg_level
!= IPPROTO_IP
||
807 cmh
->cmsg_type
!= IP_PKTINFO
) {
809 printk("rpcsvc: received unknown control message:"
811 cmh
->cmsg_level
, cmh
->cmsg_type
);
812 skb_free_datagram(svsk
->sk_sk
, skb
);
815 svc_udp_get_dest_address(rqstp
, cmh
);
817 if (skb_is_nonlinear(skb
)) {
818 /* we have to copy */
820 if (csum_partial_copy_to_xdr(&rqstp
->rq_arg
, skb
)) {
823 skb_free_datagram(svsk
->sk_sk
, skb
);
827 skb_free_datagram(svsk
->sk_sk
, skb
);
829 /* we can use it in-place */
830 rqstp
->rq_arg
.head
[0].iov_base
= skb
->data
+ sizeof(struct udphdr
);
831 rqstp
->rq_arg
.head
[0].iov_len
= len
;
832 if (skb_checksum_complete(skb
)) {
833 skb_free_datagram(svsk
->sk_sk
, skb
);
836 rqstp
->rq_xprt_ctxt
= skb
;
839 rqstp
->rq_arg
.page_base
= 0;
840 if (len
<= rqstp
->rq_arg
.head
[0].iov_len
) {
841 rqstp
->rq_arg
.head
[0].iov_len
= len
;
842 rqstp
->rq_arg
.page_len
= 0;
843 rqstp
->rq_respages
= rqstp
->rq_pages
+1;
845 rqstp
->rq_arg
.page_len
= len
- rqstp
->rq_arg
.head
[0].iov_len
;
846 rqstp
->rq_respages
= rqstp
->rq_pages
+ 1 +
847 DIV_ROUND_UP(rqstp
->rq_arg
.page_len
, PAGE_SIZE
);
851 serv
->sv_stats
->netudpcnt
++;
857 svc_udp_sendto(struct svc_rqst
*rqstp
)
861 error
= svc_sendto(rqstp
, &rqstp
->rq_res
);
862 if (error
== -ECONNREFUSED
)
863 /* ICMP error on earlier request. */
864 error
= svc_sendto(rqstp
, &rqstp
->rq_res
);
869 static void svc_udp_prep_reply_hdr(struct svc_rqst
*rqstp
)
873 static int svc_udp_has_wspace(struct svc_xprt
*xprt
)
875 struct svc_sock
*svsk
= container_of(xprt
, struct svc_sock
, sk_xprt
);
876 struct svc_serv
*serv
= xprt
->xpt_server
;
877 unsigned long required
;
880 * Set the SOCK_NOSPACE flag before checking the available
883 set_bit(SOCK_NOSPACE
, &svsk
->sk_sock
->flags
);
884 required
= atomic_read(&svsk
->sk_reserved
) + serv
->sv_max_mesg
;
885 if (required
*2 > sock_wspace(svsk
->sk_sk
))
887 clear_bit(SOCK_NOSPACE
, &svsk
->sk_sock
->flags
);
891 static struct svc_xprt
*svc_udp_accept(struct svc_xprt
*xprt
)
897 static struct svc_xprt
*svc_udp_create(struct svc_serv
*serv
,
898 struct sockaddr
*sa
, int salen
,
901 return svc_create_socket(serv
, IPPROTO_UDP
, sa
, salen
, flags
);
904 static struct svc_xprt_ops svc_udp_ops
= {
905 .xpo_create
= svc_udp_create
,
906 .xpo_recvfrom
= svc_udp_recvfrom
,
907 .xpo_sendto
= svc_udp_sendto
,
908 .xpo_release_rqst
= svc_release_skb
,
909 .xpo_detach
= svc_sock_detach
,
910 .xpo_free
= svc_sock_free
,
911 .xpo_prep_reply_hdr
= svc_udp_prep_reply_hdr
,
912 .xpo_has_wspace
= svc_udp_has_wspace
,
913 .xpo_accept
= svc_udp_accept
,
916 static struct svc_xprt_class svc_udp_class
= {
918 .xcl_owner
= THIS_MODULE
,
919 .xcl_ops
= &svc_udp_ops
,
920 .xcl_max_payload
= RPCSVC_MAXPAYLOAD_UDP
,
923 static void svc_udp_init(struct svc_sock
*svsk
, struct svc_serv
*serv
)
928 svc_xprt_init(&svc_udp_class
, &svsk
->sk_xprt
, serv
);
929 svsk
->sk_sk
->sk_data_ready
= svc_udp_data_ready
;
930 svsk
->sk_sk
->sk_write_space
= svc_write_space
;
932 /* initialise setting must have enough space to
933 * receive and respond to one request.
934 * svc_udp_recvfrom will re-adjust if necessary
936 svc_sock_setbufsize(svsk
->sk_sock
,
937 3 * svsk
->sk_xprt
.xpt_server
->sv_max_mesg
,
938 3 * svsk
->sk_xprt
.xpt_server
->sv_max_mesg
);
940 set_bit(XPT_DATA
, &svsk
->sk_xprt
.xpt_flags
); /* might have come in before data_ready set up */
941 set_bit(XPT_CHNGBUF
, &svsk
->sk_xprt
.xpt_flags
);
945 /* make sure we get destination address info */
946 svsk
->sk_sock
->ops
->setsockopt(svsk
->sk_sock
, IPPROTO_IP
, IP_PKTINFO
,
947 (char __user
*)&one
, sizeof(one
));
952 * A data_ready event on a listening socket means there's a connection
953 * pending. Do not use state_change as a substitute for it.
956 svc_tcp_listen_data_ready(struct sock
*sk
, int count_unused
)
958 struct svc_sock
*svsk
= (struct svc_sock
*)sk
->sk_user_data
;
960 dprintk("svc: socket %p TCP (listen) state change %d\n",
964 * This callback may called twice when a new connection
965 * is established as a child socket inherits everything
966 * from a parent LISTEN socket.
967 * 1) data_ready method of the parent socket will be called
968 * when one of child sockets become ESTABLISHED.
969 * 2) data_ready method of the child socket may be called
970 * when it receives data before the socket is accepted.
971 * In case of 2, we should ignore it silently.
973 if (sk
->sk_state
== TCP_LISTEN
) {
975 set_bit(XPT_CONN
, &svsk
->sk_xprt
.xpt_flags
);
976 svc_sock_enqueue(svsk
);
978 printk("svc: socket %p: no user data\n", sk
);
981 if (sk
->sk_sleep
&& waitqueue_active(sk
->sk_sleep
))
982 wake_up_interruptible_all(sk
->sk_sleep
);
986 * A state change on a connected socket means it's dying or dead.
989 svc_tcp_state_change(struct sock
*sk
)
991 struct svc_sock
*svsk
= (struct svc_sock
*)sk
->sk_user_data
;
993 dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n",
994 sk
, sk
->sk_state
, sk
->sk_user_data
);
997 printk("svc: socket %p: no user data\n", sk
);
999 set_bit(XPT_CLOSE
, &svsk
->sk_xprt
.xpt_flags
);
1000 svc_sock_enqueue(svsk
);
1002 if (sk
->sk_sleep
&& waitqueue_active(sk
->sk_sleep
))
1003 wake_up_interruptible_all(sk
->sk_sleep
);
1007 svc_tcp_data_ready(struct sock
*sk
, int count
)
1009 struct svc_sock
*svsk
= (struct svc_sock
*)sk
->sk_user_data
;
1011 dprintk("svc: socket %p TCP data ready (svsk %p)\n",
1012 sk
, sk
->sk_user_data
);
1014 set_bit(XPT_DATA
, &svsk
->sk_xprt
.xpt_flags
);
1015 svc_sock_enqueue(svsk
);
1017 if (sk
->sk_sleep
&& waitqueue_active(sk
->sk_sleep
))
1018 wake_up_interruptible(sk
->sk_sleep
);
1021 static inline int svc_port_is_privileged(struct sockaddr
*sin
)
1023 switch (sin
->sa_family
) {
1025 return ntohs(((struct sockaddr_in
*)sin
)->sin_port
)
1028 return ntohs(((struct sockaddr_in6
*)sin
)->sin6_port
)
1036 * Accept a TCP connection
1038 static struct svc_xprt
*svc_tcp_accept(struct svc_xprt
*xprt
)
1040 struct svc_sock
*svsk
= container_of(xprt
, struct svc_sock
, sk_xprt
);
1041 struct sockaddr_storage addr
;
1042 struct sockaddr
*sin
= (struct sockaddr
*) &addr
;
1043 struct svc_serv
*serv
= svsk
->sk_xprt
.xpt_server
;
1044 struct socket
*sock
= svsk
->sk_sock
;
1045 struct socket
*newsock
;
1046 struct svc_sock
*newsvsk
;
1048 char buf
[RPC_MAX_ADDRBUFLEN
];
1050 dprintk("svc: tcp_accept %p sock %p\n", svsk
, sock
);
1054 clear_bit(XPT_CONN
, &svsk
->sk_xprt
.xpt_flags
);
1055 err
= kernel_accept(sock
, &newsock
, O_NONBLOCK
);
1058 printk(KERN_WARNING
"%s: no more sockets!\n",
1060 else if (err
!= -EAGAIN
&& net_ratelimit())
1061 printk(KERN_WARNING
"%s: accept failed (err %d)!\n",
1062 serv
->sv_name
, -err
);
1065 set_bit(XPT_CONN
, &svsk
->sk_xprt
.xpt_flags
);
1067 err
= kernel_getpeername(newsock
, sin
, &slen
);
1069 if (net_ratelimit())
1070 printk(KERN_WARNING
"%s: peername failed (err %d)!\n",
1071 serv
->sv_name
, -err
);
1072 goto failed
; /* aborted connection or whatever */
1075 /* Ideally, we would want to reject connections from unauthorized
1076 * hosts here, but when we get encryption, the IP of the host won't
1077 * tell us anything. For now just warn about unpriv connections.
1079 if (!svc_port_is_privileged(sin
)) {
1080 dprintk(KERN_WARNING
1081 "%s: connect from unprivileged port: %s\n",
1083 __svc_print_addr(sin
, buf
, sizeof(buf
)));
1085 dprintk("%s: connect from %s\n", serv
->sv_name
,
1086 __svc_print_addr(sin
, buf
, sizeof(buf
)));
1088 /* make sure that a write doesn't block forever when
1091 newsock
->sk
->sk_sndtimeo
= HZ
*30;
1093 if (!(newsvsk
= svc_setup_socket(serv
, newsock
, &err
,
1094 (SVC_SOCK_ANONYMOUS
| SVC_SOCK_TEMPORARY
))))
1096 memcpy(&newsvsk
->sk_remote
, sin
, slen
);
1097 newsvsk
->sk_remotelen
= slen
;
1098 err
= kernel_getsockname(newsock
, sin
, &slen
);
1099 if (unlikely(err
< 0)) {
1100 dprintk("svc_tcp_accept: kernel_getsockname error %d\n", -err
);
1101 slen
= offsetof(struct sockaddr
, sa_data
);
1103 memcpy(&newsvsk
->sk_local
, sin
, slen
);
1105 svc_sock_received(newsvsk
);
1108 serv
->sv_stats
->nettcpconn
++;
1110 return &newsvsk
->sk_xprt
;
1113 sock_release(newsock
);
1118 * Receive data from a TCP socket.
1121 svc_tcp_recvfrom(struct svc_rqst
*rqstp
)
1123 struct svc_sock
*svsk
= rqstp
->rq_sock
;
1124 struct svc_serv
*serv
= svsk
->sk_xprt
.xpt_server
;
1129 dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
1130 svsk
, test_bit(XPT_DATA
, &svsk
->sk_xprt
.xpt_flags
),
1131 test_bit(XPT_CONN
, &svsk
->sk_xprt
.xpt_flags
),
1132 test_bit(XPT_CLOSE
, &svsk
->sk_xprt
.xpt_flags
));
1134 if ((rqstp
->rq_deferred
= svc_deferred_dequeue(svsk
))) {
1135 svc_sock_received(svsk
);
1136 return svc_deferred_recv(rqstp
);
1139 if (test_and_clear_bit(XPT_CHNGBUF
, &svsk
->sk_xprt
.xpt_flags
))
1140 /* sndbuf needs to have room for one request
1141 * per thread, otherwise we can stall even when the
1142 * network isn't a bottleneck.
1144 * We count all threads rather than threads in a
1145 * particular pool, which provides an upper bound
1146 * on the number of threads which will access the socket.
1148 * rcvbuf just needs to be able to hold a few requests.
1149 * Normally they will be removed from the queue
1150 * as soon a a complete request arrives.
1152 svc_sock_setbufsize(svsk
->sk_sock
,
1153 (serv
->sv_nrthreads
+3) * serv
->sv_max_mesg
,
1154 3 * serv
->sv_max_mesg
);
1156 clear_bit(XPT_DATA
, &svsk
->sk_xprt
.xpt_flags
);
1158 /* Receive data. If we haven't got the record length yet, get
1159 * the next four bytes. Otherwise try to gobble up as much as
1160 * possible up to the complete record length.
1162 if (svsk
->sk_tcplen
< 4) {
1163 unsigned long want
= 4 - svsk
->sk_tcplen
;
1166 iov
.iov_base
= ((char *) &svsk
->sk_reclen
) + svsk
->sk_tcplen
;
1168 if ((len
= svc_recvfrom(rqstp
, &iov
, 1, want
)) < 0)
1170 svsk
->sk_tcplen
+= len
;
1173 dprintk("svc: short recvfrom while reading record length (%d of %lu)\n",
1175 svc_sock_received(svsk
);
1176 return -EAGAIN
; /* record header not complete */
1179 svsk
->sk_reclen
= ntohl(svsk
->sk_reclen
);
1180 if (!(svsk
->sk_reclen
& 0x80000000)) {
1181 /* FIXME: technically, a record can be fragmented,
1182 * and non-terminal fragments will not have the top
1183 * bit set in the fragment length header.
1184 * But apparently no known nfs clients send fragmented
1186 if (net_ratelimit())
1187 printk(KERN_NOTICE
"RPC: bad TCP reclen 0x%08lx"
1188 " (non-terminal)\n",
1189 (unsigned long) svsk
->sk_reclen
);
1192 svsk
->sk_reclen
&= 0x7fffffff;
1193 dprintk("svc: TCP record, %d bytes\n", svsk
->sk_reclen
);
1194 if (svsk
->sk_reclen
> serv
->sv_max_mesg
) {
1195 if (net_ratelimit())
1196 printk(KERN_NOTICE
"RPC: bad TCP reclen 0x%08lx"
1198 (unsigned long) svsk
->sk_reclen
);
1203 /* Check whether enough data is available */
1204 len
= svc_recv_available(svsk
);
1208 if (len
< svsk
->sk_reclen
) {
1209 dprintk("svc: incomplete TCP record (%d of %d)\n",
1210 len
, svsk
->sk_reclen
);
1211 svc_sock_received(svsk
);
1212 return -EAGAIN
; /* record not complete */
1214 len
= svsk
->sk_reclen
;
1215 set_bit(XPT_DATA
, &svsk
->sk_xprt
.xpt_flags
);
1217 vec
= rqstp
->rq_vec
;
1218 vec
[0] = rqstp
->rq_arg
.head
[0];
1221 while (vlen
< len
) {
1222 vec
[pnum
].iov_base
= page_address(rqstp
->rq_pages
[pnum
]);
1223 vec
[pnum
].iov_len
= PAGE_SIZE
;
1227 rqstp
->rq_respages
= &rqstp
->rq_pages
[pnum
];
1229 /* Now receive data */
1230 len
= svc_recvfrom(rqstp
, vec
, pnum
, len
);
1234 dprintk("svc: TCP complete record (%d bytes)\n", len
);
1235 rqstp
->rq_arg
.len
= len
;
1236 rqstp
->rq_arg
.page_base
= 0;
1237 if (len
<= rqstp
->rq_arg
.head
[0].iov_len
) {
1238 rqstp
->rq_arg
.head
[0].iov_len
= len
;
1239 rqstp
->rq_arg
.page_len
= 0;
1241 rqstp
->rq_arg
.page_len
= len
- rqstp
->rq_arg
.head
[0].iov_len
;
1244 rqstp
->rq_xprt_ctxt
= NULL
;
1245 rqstp
->rq_prot
= IPPROTO_TCP
;
1247 /* Reset TCP read info */
1248 svsk
->sk_reclen
= 0;
1249 svsk
->sk_tcplen
= 0;
1251 svc_sock_received(svsk
);
1253 serv
->sv_stats
->nettcpcnt
++;
1258 set_bit(XPT_CLOSE
, &svsk
->sk_xprt
.xpt_flags
);
1262 if (len
== -EAGAIN
) {
1263 dprintk("RPC: TCP recvfrom got EAGAIN\n");
1264 svc_sock_received(svsk
);
1266 printk(KERN_NOTICE
"%s: recvfrom returned errno %d\n",
1267 svsk
->sk_xprt
.xpt_server
->sv_name
, -len
);
1275 * Send out data on TCP socket.
1278 svc_tcp_sendto(struct svc_rqst
*rqstp
)
1280 struct xdr_buf
*xbufp
= &rqstp
->rq_res
;
1284 /* Set up the first element of the reply kvec.
1285 * Any other kvecs that may be in use have been taken
1286 * care of by the server implementation itself.
1288 reclen
= htonl(0x80000000|((xbufp
->len
) - 4));
1289 memcpy(xbufp
->head
[0].iov_base
, &reclen
, 4);
1291 if (test_bit(XPT_DEAD
, &rqstp
->rq_sock
->sk_xprt
.xpt_flags
))
1294 sent
= svc_sendto(rqstp
, &rqstp
->rq_res
);
1295 if (sent
!= xbufp
->len
) {
1296 printk(KERN_NOTICE
"rpc-srv/tcp: %s: %s %d when sending %d bytes - shutting down socket\n",
1297 rqstp
->rq_sock
->sk_xprt
.xpt_server
->sv_name
,
1298 (sent
<0)?"got error":"sent only",
1300 set_bit(XPT_CLOSE
, &rqstp
->rq_sock
->sk_xprt
.xpt_flags
);
1301 svc_sock_enqueue(rqstp
->rq_sock
);
1308 * Setup response header. TCP has a 4B record length field.
1310 static void svc_tcp_prep_reply_hdr(struct svc_rqst
*rqstp
)
1312 struct kvec
*resv
= &rqstp
->rq_res
.head
[0];
1314 /* tcp needs a space for the record length... */
1318 static int svc_tcp_has_wspace(struct svc_xprt
*xprt
)
1320 struct svc_sock
*svsk
= container_of(xprt
, struct svc_sock
, sk_xprt
);
1321 struct svc_serv
*serv
= svsk
->sk_xprt
.xpt_server
;
1326 * Set the SOCK_NOSPACE flag before checking the available
1329 set_bit(SOCK_NOSPACE
, &svsk
->sk_sock
->flags
);
1330 required
= atomic_read(&svsk
->sk_reserved
) + serv
->sv_max_mesg
;
1331 wspace
= sk_stream_wspace(svsk
->sk_sk
);
1333 if (wspace
< sk_stream_min_wspace(svsk
->sk_sk
))
1335 if (required
* 2 > wspace
)
1338 clear_bit(SOCK_NOSPACE
, &svsk
->sk_sock
->flags
);
1342 static struct svc_xprt
*svc_tcp_create(struct svc_serv
*serv
,
1343 struct sockaddr
*sa
, int salen
,
1346 return svc_create_socket(serv
, IPPROTO_TCP
, sa
, salen
, flags
);
1349 static struct svc_xprt_ops svc_tcp_ops
= {
1350 .xpo_create
= svc_tcp_create
,
1351 .xpo_recvfrom
= svc_tcp_recvfrom
,
1352 .xpo_sendto
= svc_tcp_sendto
,
1353 .xpo_release_rqst
= svc_release_skb
,
1354 .xpo_detach
= svc_sock_detach
,
1355 .xpo_free
= svc_sock_free
,
1356 .xpo_prep_reply_hdr
= svc_tcp_prep_reply_hdr
,
1357 .xpo_has_wspace
= svc_tcp_has_wspace
,
1358 .xpo_accept
= svc_tcp_accept
,
1361 static struct svc_xprt_class svc_tcp_class
= {
1363 .xcl_owner
= THIS_MODULE
,
1364 .xcl_ops
= &svc_tcp_ops
,
1365 .xcl_max_payload
= RPCSVC_MAXPAYLOAD_TCP
,
1368 void svc_init_xprt_sock(void)
1370 svc_reg_xprt_class(&svc_tcp_class
);
1371 svc_reg_xprt_class(&svc_udp_class
);
1374 void svc_cleanup_xprt_sock(void)
1376 svc_unreg_xprt_class(&svc_tcp_class
);
1377 svc_unreg_xprt_class(&svc_udp_class
);
1380 static void svc_tcp_init(struct svc_sock
*svsk
, struct svc_serv
*serv
)
1382 struct sock
*sk
= svsk
->sk_sk
;
1383 struct tcp_sock
*tp
= tcp_sk(sk
);
1385 svc_xprt_init(&svc_tcp_class
, &svsk
->sk_xprt
, serv
);
1387 if (sk
->sk_state
== TCP_LISTEN
) {
1388 dprintk("setting up TCP socket for listening\n");
1389 set_bit(XPT_LISTENER
, &svsk
->sk_xprt
.xpt_flags
);
1390 sk
->sk_data_ready
= svc_tcp_listen_data_ready
;
1391 set_bit(XPT_CONN
, &svsk
->sk_xprt
.xpt_flags
);
1393 dprintk("setting up TCP socket for reading\n");
1394 sk
->sk_state_change
= svc_tcp_state_change
;
1395 sk
->sk_data_ready
= svc_tcp_data_ready
;
1396 sk
->sk_write_space
= svc_write_space
;
1398 svsk
->sk_reclen
= 0;
1399 svsk
->sk_tcplen
= 0;
1401 tp
->nonagle
= 1; /* disable Nagle's algorithm */
1403 /* initialise setting must have enough space to
1404 * receive and respond to one request.
1405 * svc_tcp_recvfrom will re-adjust if necessary
1407 svc_sock_setbufsize(svsk
->sk_sock
,
1408 3 * svsk
->sk_xprt
.xpt_server
->sv_max_mesg
,
1409 3 * svsk
->sk_xprt
.xpt_server
->sv_max_mesg
);
1411 set_bit(XPT_CHNGBUF
, &svsk
->sk_xprt
.xpt_flags
);
1412 set_bit(XPT_DATA
, &svsk
->sk_xprt
.xpt_flags
);
1413 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1414 set_bit(XPT_CLOSE
, &svsk
->sk_xprt
.xpt_flags
);
1419 svc_sock_update_bufs(struct svc_serv
*serv
)
1422 * The number of server threads has changed. Update
1423 * rcvbuf and sndbuf accordingly on all sockets
1425 struct list_head
*le
;
1427 spin_lock_bh(&serv
->sv_lock
);
1428 list_for_each(le
, &serv
->sv_permsocks
) {
1429 struct svc_sock
*svsk
=
1430 list_entry(le
, struct svc_sock
, sk_list
);
1431 set_bit(XPT_CHNGBUF
, &svsk
->sk_xprt
.xpt_flags
);
1433 list_for_each(le
, &serv
->sv_tempsocks
) {
1434 struct svc_sock
*svsk
=
1435 list_entry(le
, struct svc_sock
, sk_list
);
1436 set_bit(XPT_CHNGBUF
, &svsk
->sk_xprt
.xpt_flags
);
1438 spin_unlock_bh(&serv
->sv_lock
);
1442 * Make sure that we don't have too many active connections. If we
1443 * have, something must be dropped.
1445 * There's no point in trying to do random drop here for DoS
1446 * prevention. The NFS clients does 1 reconnect in 15 seconds. An
1447 * attacker can easily beat that.
1449 * The only somewhat efficient mechanism would be if drop old
1450 * connections from the same IP first. But right now we don't even
1451 * record the client IP in svc_sock.
1453 static void svc_check_conn_limits(struct svc_serv
*serv
)
1455 if (serv
->sv_tmpcnt
> (serv
->sv_nrthreads
+3)*20) {
1456 struct svc_sock
*svsk
= NULL
;
1457 spin_lock_bh(&serv
->sv_lock
);
1458 if (!list_empty(&serv
->sv_tempsocks
)) {
1459 if (net_ratelimit()) {
1460 /* Try to help the admin */
1461 printk(KERN_NOTICE
"%s: too many open TCP "
1462 "sockets, consider increasing the "
1463 "number of nfsd threads\n",
1467 * Always select the oldest socket. It's not fair,
1470 svsk
= list_entry(serv
->sv_tempsocks
.prev
,
1473 set_bit(XPT_CLOSE
, &svsk
->sk_xprt
.xpt_flags
);
1474 svc_xprt_get(&svsk
->sk_xprt
);
1476 spin_unlock_bh(&serv
->sv_lock
);
1479 svc_sock_enqueue(svsk
);
1480 svc_xprt_put(&svsk
->sk_xprt
);
1486 * Receive the next request on any socket. This code is carefully
1487 * organised not to touch any cachelines in the shared svc_serv
1488 * structure, only cachelines in the local svc_pool.
1491 svc_recv(struct svc_rqst
*rqstp
, long timeout
)
1493 struct svc_sock
*svsk
= NULL
;
1494 struct svc_serv
*serv
= rqstp
->rq_server
;
1495 struct svc_pool
*pool
= rqstp
->rq_pool
;
1498 struct xdr_buf
*arg
;
1499 DECLARE_WAITQUEUE(wait
, current
);
1501 dprintk("svc: server %p waiting for data (to = %ld)\n",
1506 "svc_recv: service %p, socket not NULL!\n",
1508 if (waitqueue_active(&rqstp
->rq_wait
))
1510 "svc_recv: service %p, wait queue active!\n",
1514 /* now allocate needed pages. If we get a failure, sleep briefly */
1515 pages
= (serv
->sv_max_mesg
+ PAGE_SIZE
) / PAGE_SIZE
;
1516 for (i
=0; i
< pages
; i
++)
1517 while (rqstp
->rq_pages
[i
] == NULL
) {
1518 struct page
*p
= alloc_page(GFP_KERNEL
);
1520 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1521 rqstp
->rq_pages
[i
] = p
;
1523 rqstp
->rq_pages
[i
++] = NULL
; /* this might be seen in nfs_read_actor */
1524 BUG_ON(pages
>= RPCSVC_MAXPAGES
);
1526 /* Make arg->head point to first page and arg->pages point to rest */
1527 arg
= &rqstp
->rq_arg
;
1528 arg
->head
[0].iov_base
= page_address(rqstp
->rq_pages
[0]);
1529 arg
->head
[0].iov_len
= PAGE_SIZE
;
1530 arg
->pages
= rqstp
->rq_pages
+ 1;
1532 /* save at least one page for response */
1533 arg
->page_len
= (pages
-2)*PAGE_SIZE
;
1534 arg
->len
= (pages
-1)*PAGE_SIZE
;
1535 arg
->tail
[0].iov_len
= 0;
1542 spin_lock_bh(&pool
->sp_lock
);
1543 if ((svsk
= svc_sock_dequeue(pool
)) != NULL
) {
1544 rqstp
->rq_sock
= svsk
;
1545 svc_xprt_get(&svsk
->sk_xprt
);
1546 rqstp
->rq_reserved
= serv
->sv_max_mesg
;
1547 atomic_add(rqstp
->rq_reserved
, &svsk
->sk_reserved
);
1549 /* No data pending. Go to sleep */
1550 svc_thread_enqueue(pool
, rqstp
);
1553 * We have to be able to interrupt this wait
1554 * to bring down the daemons ...
1556 set_current_state(TASK_INTERRUPTIBLE
);
1557 add_wait_queue(&rqstp
->rq_wait
, &wait
);
1558 spin_unlock_bh(&pool
->sp_lock
);
1560 schedule_timeout(timeout
);
1564 spin_lock_bh(&pool
->sp_lock
);
1565 remove_wait_queue(&rqstp
->rq_wait
, &wait
);
1567 if (!(svsk
= rqstp
->rq_sock
)) {
1568 svc_thread_dequeue(pool
, rqstp
);
1569 spin_unlock_bh(&pool
->sp_lock
);
1570 dprintk("svc: server %p, no data yet\n", rqstp
);
1571 return signalled()? -EINTR
: -EAGAIN
;
1574 spin_unlock_bh(&pool
->sp_lock
);
1577 if (test_bit(XPT_CLOSE
, &svsk
->sk_xprt
.xpt_flags
)) {
1578 dprintk("svc_recv: found XPT_CLOSE\n");
1579 svc_delete_socket(svsk
);
1580 } else if (test_bit(XPT_LISTENER
, &svsk
->sk_xprt
.xpt_flags
)) {
1581 struct svc_xprt
*newxpt
;
1582 newxpt
= svsk
->sk_xprt
.xpt_ops
->xpo_accept(&svsk
->sk_xprt
);
1585 * We know this module_get will succeed because the
1586 * listener holds a reference too
1588 __module_get(newxpt
->xpt_class
->xcl_owner
);
1589 svc_check_conn_limits(svsk
->sk_xprt
.xpt_server
);
1591 svc_sock_received(svsk
);
1593 dprintk("svc: server %p, pool %u, socket %p, inuse=%d\n",
1594 rqstp
, pool
->sp_id
, svsk
,
1595 atomic_read(&svsk
->sk_xprt
.xpt_ref
.refcount
));
1596 len
= svsk
->sk_xprt
.xpt_ops
->xpo_recvfrom(rqstp
);
1597 dprintk("svc: got len=%d\n", len
);
1600 /* No data, incomplete (TCP) read, or accept() */
1601 if (len
== 0 || len
== -EAGAIN
) {
1602 rqstp
->rq_res
.len
= 0;
1603 svc_sock_release(rqstp
);
1606 svsk
->sk_lastrecv
= get_seconds();
1607 clear_bit(XPT_OLD
, &svsk
->sk_xprt
.xpt_flags
);
1609 rqstp
->rq_secure
= svc_port_is_privileged(svc_addr(rqstp
));
1610 rqstp
->rq_chandle
.defer
= svc_defer
;
1613 serv
->sv_stats
->netcnt
++;
1621 svc_drop(struct svc_rqst
*rqstp
)
1623 dprintk("svc: socket %p dropped request\n", rqstp
->rq_sock
);
1624 svc_sock_release(rqstp
);
1628 * Return reply to client.
1631 svc_send(struct svc_rqst
*rqstp
)
1633 struct svc_sock
*svsk
;
1637 if ((svsk
= rqstp
->rq_sock
) == NULL
) {
1638 printk(KERN_WARNING
"NULL socket pointer in %s:%d\n",
1639 __FILE__
, __LINE__
);
1643 /* release the receive skb before sending the reply */
1644 rqstp
->rq_xprt
->xpt_ops
->xpo_release_rqst(rqstp
);
1646 /* calculate over-all length */
1647 xb
= & rqstp
->rq_res
;
1648 xb
->len
= xb
->head
[0].iov_len
+
1650 xb
->tail
[0].iov_len
;
1652 /* Grab svsk->sk_mutex to serialize outgoing data. */
1653 mutex_lock(&svsk
->sk_mutex
);
1654 if (test_bit(XPT_DEAD
, &svsk
->sk_xprt
.xpt_flags
))
1657 len
= svsk
->sk_xprt
.xpt_ops
->xpo_sendto(rqstp
);
1658 mutex_unlock(&svsk
->sk_mutex
);
1659 svc_sock_release(rqstp
);
1661 if (len
== -ECONNREFUSED
|| len
== -ENOTCONN
|| len
== -EAGAIN
)
1667 * Timer function to close old temporary sockets, using
1668 * a mark-and-sweep algorithm.
1671 svc_age_temp_sockets(unsigned long closure
)
1673 struct svc_serv
*serv
= (struct svc_serv
*)closure
;
1674 struct svc_sock
*svsk
;
1675 struct list_head
*le
, *next
;
1676 LIST_HEAD(to_be_aged
);
1678 dprintk("svc_age_temp_sockets\n");
1680 if (!spin_trylock_bh(&serv
->sv_lock
)) {
1681 /* busy, try again 1 sec later */
1682 dprintk("svc_age_temp_sockets: busy\n");
1683 mod_timer(&serv
->sv_temptimer
, jiffies
+ HZ
);
1687 list_for_each_safe(le
, next
, &serv
->sv_tempsocks
) {
1688 svsk
= list_entry(le
, struct svc_sock
, sk_list
);
1690 if (!test_and_set_bit(XPT_OLD
, &svsk
->sk_xprt
.xpt_flags
))
1692 if (atomic_read(&svsk
->sk_xprt
.xpt_ref
.refcount
) > 1
1693 || test_bit(XPT_BUSY
, &svsk
->sk_xprt
.xpt_flags
))
1695 svc_xprt_get(&svsk
->sk_xprt
);
1696 list_move(le
, &to_be_aged
);
1697 set_bit(XPT_CLOSE
, &svsk
->sk_xprt
.xpt_flags
);
1698 set_bit(XPT_DETACHED
, &svsk
->sk_xprt
.xpt_flags
);
1700 spin_unlock_bh(&serv
->sv_lock
);
1702 while (!list_empty(&to_be_aged
)) {
1703 le
= to_be_aged
.next
;
1704 /* fiddling the sk_list node is safe 'cos we're XPT_DETACHED */
1706 svsk
= list_entry(le
, struct svc_sock
, sk_list
);
1708 dprintk("queuing svsk %p for closing, %lu seconds old\n",
1709 svsk
, get_seconds() - svsk
->sk_lastrecv
);
1711 /* a thread will dequeue and close it soon */
1712 svc_sock_enqueue(svsk
);
1713 svc_xprt_put(&svsk
->sk_xprt
);
1716 mod_timer(&serv
->sv_temptimer
, jiffies
+ svc_conn_age_period
* HZ
);
1720 * Initialize socket for RPC use and create svc_sock struct
1721 * XXX: May want to setsockopt SO_SNDBUF and SO_RCVBUF.
1723 static struct svc_sock
*svc_setup_socket(struct svc_serv
*serv
,
1724 struct socket
*sock
,
1725 int *errp
, int flags
)
1727 struct svc_sock
*svsk
;
1729 int pmap_register
= !(flags
& SVC_SOCK_ANONYMOUS
);
1730 int is_temporary
= flags
& SVC_SOCK_TEMPORARY
;
1732 dprintk("svc: svc_setup_socket %p\n", sock
);
1733 if (!(svsk
= kzalloc(sizeof(*svsk
), GFP_KERNEL
))) {
1740 /* Register socket with portmapper */
1741 if (*errp
>= 0 && pmap_register
)
1742 *errp
= svc_register(serv
, inet
->sk_protocol
,
1743 ntohs(inet_sk(inet
)->sport
));
1750 set_bit(XPT_BUSY
, &svsk
->sk_xprt
.xpt_flags
);
1751 inet
->sk_user_data
= svsk
;
1752 svsk
->sk_sock
= sock
;
1754 svsk
->sk_ostate
= inet
->sk_state_change
;
1755 svsk
->sk_odata
= inet
->sk_data_ready
;
1756 svsk
->sk_owspace
= inet
->sk_write_space
;
1757 svsk
->sk_lastrecv
= get_seconds();
1758 spin_lock_init(&svsk
->sk_lock
);
1759 INIT_LIST_HEAD(&svsk
->sk_deferred
);
1760 INIT_LIST_HEAD(&svsk
->sk_ready
);
1761 mutex_init(&svsk
->sk_mutex
);
1763 /* Initialize the socket */
1764 if (sock
->type
== SOCK_DGRAM
)
1765 svc_udp_init(svsk
, serv
);
1767 svc_tcp_init(svsk
, serv
);
1769 spin_lock_bh(&serv
->sv_lock
);
1771 set_bit(XPT_TEMP
, &svsk
->sk_xprt
.xpt_flags
);
1772 list_add(&svsk
->sk_list
, &serv
->sv_tempsocks
);
1774 if (serv
->sv_temptimer
.function
== NULL
) {
1775 /* setup timer to age temp sockets */
1776 setup_timer(&serv
->sv_temptimer
, svc_age_temp_sockets
,
1777 (unsigned long)serv
);
1778 mod_timer(&serv
->sv_temptimer
,
1779 jiffies
+ svc_conn_age_period
* HZ
);
1782 clear_bit(XPT_TEMP
, &svsk
->sk_xprt
.xpt_flags
);
1783 list_add(&svsk
->sk_list
, &serv
->sv_permsocks
);
1785 spin_unlock_bh(&serv
->sv_lock
);
1787 dprintk("svc: svc_setup_socket created %p (inet %p)\n",
1793 int svc_addsock(struct svc_serv
*serv
,
1799 struct socket
*so
= sockfd_lookup(fd
, &err
);
1800 struct svc_sock
*svsk
= NULL
;
1804 if (so
->sk
->sk_family
!= AF_INET
)
1805 err
= -EAFNOSUPPORT
;
1806 else if (so
->sk
->sk_protocol
!= IPPROTO_TCP
&&
1807 so
->sk
->sk_protocol
!= IPPROTO_UDP
)
1808 err
= -EPROTONOSUPPORT
;
1809 else if (so
->state
> SS_UNCONNECTED
)
1812 svsk
= svc_setup_socket(serv
, so
, &err
, SVC_SOCK_DEFAULTS
);
1814 svc_sock_received(svsk
);
1822 if (proto
) *proto
= so
->sk
->sk_protocol
;
1823 return one_sock_name(name_return
, svsk
);
1825 EXPORT_SYMBOL_GPL(svc_addsock
);
1828 * Create socket for RPC service.
1830 static struct svc_xprt
*svc_create_socket(struct svc_serv
*serv
,
1832 struct sockaddr
*sin
, int len
,
1835 struct svc_sock
*svsk
;
1836 struct socket
*sock
;
1839 char buf
[RPC_MAX_ADDRBUFLEN
];
1841 dprintk("svc: svc_create_socket(%s, %d, %s)\n",
1842 serv
->sv_program
->pg_name
, protocol
,
1843 __svc_print_addr(sin
, buf
, sizeof(buf
)));
1845 if (protocol
!= IPPROTO_UDP
&& protocol
!= IPPROTO_TCP
) {
1846 printk(KERN_WARNING
"svc: only UDP and TCP "
1847 "sockets supported\n");
1848 return ERR_PTR(-EINVAL
);
1850 type
= (protocol
== IPPROTO_UDP
)? SOCK_DGRAM
: SOCK_STREAM
;
1852 error
= sock_create_kern(sin
->sa_family
, type
, protocol
, &sock
);
1854 return ERR_PTR(error
);
1856 svc_reclassify_socket(sock
);
1858 if (type
== SOCK_STREAM
)
1859 sock
->sk
->sk_reuse
= 1; /* allow address reuse */
1860 error
= kernel_bind(sock
, sin
, len
);
1864 if (protocol
== IPPROTO_TCP
) {
1865 if ((error
= kernel_listen(sock
, 64)) < 0)
1869 if ((svsk
= svc_setup_socket(serv
, sock
, &error
, flags
)) != NULL
) {
1870 svc_sock_received(svsk
);
1871 return (struct svc_xprt
*)svsk
;
1875 dprintk("svc: svc_create_socket error = %d\n", -error
);
1877 return ERR_PTR(error
);
1881 * Detach the svc_sock from the socket so that no
1882 * more callbacks occur.
1884 static void svc_sock_detach(struct svc_xprt
*xprt
)
1886 struct svc_sock
*svsk
= container_of(xprt
, struct svc_sock
, sk_xprt
);
1887 struct sock
*sk
= svsk
->sk_sk
;
1889 dprintk("svc: svc_sock_detach(%p)\n", svsk
);
1891 /* put back the old socket callbacks */
1892 sk
->sk_state_change
= svsk
->sk_ostate
;
1893 sk
->sk_data_ready
= svsk
->sk_odata
;
1894 sk
->sk_write_space
= svsk
->sk_owspace
;
1898 * Free the svc_sock's socket resources and the svc_sock itself.
1900 static void svc_sock_free(struct svc_xprt
*xprt
)
1902 struct svc_sock
*svsk
= container_of(xprt
, struct svc_sock
, sk_xprt
);
1903 dprintk("svc: svc_sock_free(%p)\n", svsk
);
1905 if (svsk
->sk_info_authunix
!= NULL
)
1906 svcauth_unix_info_release(svsk
->sk_info_authunix
);
1907 if (svsk
->sk_sock
->file
)
1908 sockfd_put(svsk
->sk_sock
);
1910 sock_release(svsk
->sk_sock
);
1915 * Remove a dead socket
1918 svc_delete_socket(struct svc_sock
*svsk
)
1920 struct svc_serv
*serv
;
1923 dprintk("svc: svc_delete_socket(%p)\n", svsk
);
1925 serv
= svsk
->sk_xprt
.xpt_server
;
1928 svsk
->sk_xprt
.xpt_ops
->xpo_detach(&svsk
->sk_xprt
);
1930 spin_lock_bh(&serv
->sv_lock
);
1932 if (!test_and_set_bit(XPT_DETACHED
, &svsk
->sk_xprt
.xpt_flags
))
1933 list_del_init(&svsk
->sk_list
);
1935 * We used to delete the svc_sock from whichever list
1936 * it's sk_ready node was on, but we don't actually
1937 * need to. This is because the only time we're called
1938 * while still attached to a queue, the queue itself
1939 * is about to be destroyed (in svc_destroy).
1941 if (!test_and_set_bit(XPT_DEAD
, &svsk
->sk_xprt
.xpt_flags
)) {
1942 BUG_ON(atomic_read(&svsk
->sk_xprt
.xpt_ref
.refcount
) < 2);
1943 if (test_bit(XPT_TEMP
, &svsk
->sk_xprt
.xpt_flags
))
1945 svc_xprt_put(&svsk
->sk_xprt
);
1948 spin_unlock_bh(&serv
->sv_lock
);
1951 static void svc_close_socket(struct svc_sock
*svsk
)
1953 set_bit(XPT_CLOSE
, &svsk
->sk_xprt
.xpt_flags
);
1954 if (test_and_set_bit(XPT_BUSY
, &svsk
->sk_xprt
.xpt_flags
))
1955 /* someone else will have to effect the close */
1958 svc_xprt_get(&svsk
->sk_xprt
);
1959 svc_delete_socket(svsk
);
1960 clear_bit(XPT_BUSY
, &svsk
->sk_xprt
.xpt_flags
);
1961 svc_xprt_put(&svsk
->sk_xprt
);
1964 void svc_force_close_socket(struct svc_sock
*svsk
)
1966 set_bit(XPT_CLOSE
, &svsk
->sk_xprt
.xpt_flags
);
1967 if (test_bit(XPT_BUSY
, &svsk
->sk_xprt
.xpt_flags
)) {
1968 /* Waiting to be processed, but no threads left,
1969 * So just remove it from the waiting list
1971 list_del_init(&svsk
->sk_ready
);
1972 clear_bit(XPT_BUSY
, &svsk
->sk_xprt
.xpt_flags
);
1974 svc_close_socket(svsk
);
1978 * Handle defer and revisit of requests
1981 static void svc_revisit(struct cache_deferred_req
*dreq
, int too_many
)
1983 struct svc_deferred_req
*dr
= container_of(dreq
, struct svc_deferred_req
, handle
);
1984 struct svc_sock
*svsk
;
1987 svc_xprt_put(&dr
->svsk
->sk_xprt
);
1991 dprintk("revisit queued\n");
1994 spin_lock(&svsk
->sk_lock
);
1995 list_add(&dr
->handle
.recent
, &svsk
->sk_deferred
);
1996 spin_unlock(&svsk
->sk_lock
);
1997 set_bit(XPT_DEFERRED
, &svsk
->sk_xprt
.xpt_flags
);
1998 svc_sock_enqueue(svsk
);
1999 svc_xprt_put(&svsk
->sk_xprt
);
2002 static struct cache_deferred_req
*
2003 svc_defer(struct cache_req
*req
)
2005 struct svc_rqst
*rqstp
= container_of(req
, struct svc_rqst
, rq_chandle
);
2006 int size
= sizeof(struct svc_deferred_req
) + (rqstp
->rq_arg
.len
);
2007 struct svc_deferred_req
*dr
;
2009 if (rqstp
->rq_arg
.page_len
)
2010 return NULL
; /* if more than a page, give up FIXME */
2011 if (rqstp
->rq_deferred
) {
2012 dr
= rqstp
->rq_deferred
;
2013 rqstp
->rq_deferred
= NULL
;
2015 int skip
= rqstp
->rq_arg
.len
- rqstp
->rq_arg
.head
[0].iov_len
;
2016 /* FIXME maybe discard if size too large */
2017 dr
= kmalloc(size
, GFP_KERNEL
);
2021 dr
->handle
.owner
= rqstp
->rq_server
;
2022 dr
->prot
= rqstp
->rq_prot
;
2023 memcpy(&dr
->addr
, &rqstp
->rq_addr
, rqstp
->rq_addrlen
);
2024 dr
->addrlen
= rqstp
->rq_addrlen
;
2025 dr
->daddr
= rqstp
->rq_daddr
;
2026 dr
->argslen
= rqstp
->rq_arg
.len
>> 2;
2027 memcpy(dr
->args
, rqstp
->rq_arg
.head
[0].iov_base
-skip
, dr
->argslen
<<2);
2029 svc_xprt_get(rqstp
->rq_xprt
);
2030 dr
->svsk
= rqstp
->rq_sock
;
2032 dr
->handle
.revisit
= svc_revisit
;
2037 * recv data from a deferred request into an active one
2039 static int svc_deferred_recv(struct svc_rqst
*rqstp
)
2041 struct svc_deferred_req
*dr
= rqstp
->rq_deferred
;
2043 rqstp
->rq_arg
.head
[0].iov_base
= dr
->args
;
2044 rqstp
->rq_arg
.head
[0].iov_len
= dr
->argslen
<<2;
2045 rqstp
->rq_arg
.page_len
= 0;
2046 rqstp
->rq_arg
.len
= dr
->argslen
<<2;
2047 rqstp
->rq_prot
= dr
->prot
;
2048 memcpy(&rqstp
->rq_addr
, &dr
->addr
, dr
->addrlen
);
2049 rqstp
->rq_addrlen
= dr
->addrlen
;
2050 rqstp
->rq_daddr
= dr
->daddr
;
2051 rqstp
->rq_respages
= rqstp
->rq_pages
;
2052 return dr
->argslen
<<2;
2056 static struct svc_deferred_req
*svc_deferred_dequeue(struct svc_sock
*svsk
)
2058 struct svc_deferred_req
*dr
= NULL
;
2060 if (!test_bit(XPT_DEFERRED
, &svsk
->sk_xprt
.xpt_flags
))
2062 spin_lock(&svsk
->sk_lock
);
2063 clear_bit(XPT_DEFERRED
, &svsk
->sk_xprt
.xpt_flags
);
2064 if (!list_empty(&svsk
->sk_deferred
)) {
2065 dr
= list_entry(svsk
->sk_deferred
.next
,
2066 struct svc_deferred_req
,
2068 list_del_init(&dr
->handle
.recent
);
2069 set_bit(XPT_DEFERRED
, &svsk
->sk_xprt
.xpt_flags
);
2071 spin_unlock(&svsk
->sk_lock
);