Merge branch 'linux_next' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
[deliverable/linux.git] / net / sunrpc / xprt.c
CommitLineData
1da177e4
LT
1/*
2 * linux/net/sunrpc/xprt.c
3 *
4 * This is a generic RPC call interface supporting congestion avoidance,
5 * and asynchronous calls.
6 *
7 * The interface works like this:
8 *
9 * - When a process places a call, it allocates a request slot if
10 * one is available. Otherwise, it sleeps on the backlog queue
11 * (xprt_reserve).
12 * - Next, the caller puts together the RPC message, stuffs it into
55aa4f58
CL
13 * the request struct, and calls xprt_transmit().
14 * - xprt_transmit sends the message and installs the caller on the
55ae1aab
RL
15 * transport's wait list. At the same time, if a reply is expected,
16 * it installs a timer that is run after the packet's timeout has
17 * expired.
1da177e4 18 * - When a packet arrives, the data_ready handler walks the list of
55aa4f58 19 * pending requests for that transport. If a matching XID is found, the
1da177e4
LT
20 * caller is woken up, and the timer removed.
21 * - When no reply arrives within the timeout interval, the timer is
22 * fired by the kernel and runs xprt_timer(). It either adjusts the
23 * timeout values (minor timeout) or wakes up the caller with a status
24 * of -ETIMEDOUT.
25 * - When the caller receives a notification from RPC that a reply arrived,
26 * it should release the RPC slot, and process the reply.
27 * If the call timed out, it may choose to retry the operation by
28 * adjusting the initial timeout value, and simply calling rpc_call
29 * again.
30 *
31 * Support for async RPC is done through a set of RPC-specific scheduling
32 * primitives that `transparently' work for processes as well as async
33 * tasks that rely on callbacks.
34 *
35 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
55aa4f58
CL
36 *
37 * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
1da177e4
LT
38 */
39
a246b010
CL
40#include <linux/module.h>
41
1da177e4 42#include <linux/types.h>
a246b010 43#include <linux/interrupt.h>
1da177e4 44#include <linux/workqueue.h>
bf3fcf89 45#include <linux/net.h>
ff839970 46#include <linux/ktime.h>
1da177e4 47
a246b010 48#include <linux/sunrpc/clnt.h>
11c556b3 49#include <linux/sunrpc/metrics.h>
c9acb42e 50#include <linux/sunrpc/bc_xprt.h>
1da177e4 51
55ae1aab
RL
52#include "sunrpc.h"
53
1da177e4
LT
54/*
55 * Local variables
56 */
57
58#ifdef RPC_DEBUG
1da177e4
LT
59# define RPCDBG_FACILITY RPCDBG_XPRT
60#endif
61
1da177e4
LT
62/*
63 * Local functions
64 */
21de0a95 65static void xprt_init(struct rpc_xprt *xprt, struct net *net);
1da177e4 66static void xprt_request_init(struct rpc_task *, struct rpc_xprt *);
1da177e4 67static void xprt_connect_status(struct rpc_task *task);
1da177e4 68static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
4e0038b6 69static void xprt_destroy(struct rpc_xprt *xprt);
1da177e4 70
5ba03e82 71static DEFINE_SPINLOCK(xprt_list_lock);
81c098af
TT
72static LIST_HEAD(xprt_list);
73
81c098af
TT
74/**
75 * xprt_register_transport - register a transport implementation
76 * @transport: transport to register
77 *
78 * If a transport implementation is loaded as a kernel module, it can
79 * call this interface to make itself known to the RPC client.
80 *
81 * Returns:
82 * 0: transport successfully registered
83 * -EEXIST: transport already registered
84 * -EINVAL: transport module being unloaded
85 */
86int xprt_register_transport(struct xprt_class *transport)
87{
88 struct xprt_class *t;
89 int result;
90
91 result = -EEXIST;
92 spin_lock(&xprt_list_lock);
93 list_for_each_entry(t, &xprt_list, list) {
94 /* don't register the same transport class twice */
4fa016eb 95 if (t->ident == transport->ident)
81c098af
TT
96 goto out;
97 }
98
c9f6cde6
DL
99 list_add_tail(&transport->list, &xprt_list);
100 printk(KERN_INFO "RPC: Registered %s transport module.\n",
101 transport->name);
102 result = 0;
81c098af
TT
103
104out:
105 spin_unlock(&xprt_list_lock);
106 return result;
107}
108EXPORT_SYMBOL_GPL(xprt_register_transport);
109
110/**
111 * xprt_unregister_transport - unregister a transport implementation
65b6e42c 112 * @transport: transport to unregister
81c098af
TT
113 *
114 * Returns:
115 * 0: transport successfully unregistered
116 * -ENOENT: transport never registered
117 */
118int xprt_unregister_transport(struct xprt_class *transport)
119{
120 struct xprt_class *t;
121 int result;
122
123 result = 0;
124 spin_lock(&xprt_list_lock);
125 list_for_each_entry(t, &xprt_list, list) {
126 if (t == transport) {
127 printk(KERN_INFO
128 "RPC: Unregistered %s transport module.\n",
129 transport->name);
130 list_del_init(&transport->list);
81c098af
TT
131 goto out;
132 }
133 }
134 result = -ENOENT;
135
136out:
137 spin_unlock(&xprt_list_lock);
138 return result;
139}
140EXPORT_SYMBOL_GPL(xprt_unregister_transport);
141
441e3e24
TT
142/**
143 * xprt_load_transport - load a transport implementation
144 * @transport_name: transport to load
145 *
146 * Returns:
147 * 0: transport successfully loaded
148 * -ENOENT: transport module not available
149 */
150int xprt_load_transport(const char *transport_name)
151{
152 struct xprt_class *t;
441e3e24
TT
153 int result;
154
155 result = 0;
156 spin_lock(&xprt_list_lock);
157 list_for_each_entry(t, &xprt_list, list) {
158 if (strcmp(t->name, transport_name) == 0) {
159 spin_unlock(&xprt_list_lock);
160 goto out;
161 }
162 }
163 spin_unlock(&xprt_list_lock);
ef7ffe8f 164 result = request_module("xprt%s", transport_name);
441e3e24
TT
165out:
166 return result;
167}
168EXPORT_SYMBOL_GPL(xprt_load_transport);
169
12a80469
CL
170/**
171 * xprt_reserve_xprt - serialize write access to transports
172 * @task: task that is requesting access to the transport
177c27bf 173 * @xprt: pointer to the target transport
12a80469
CL
174 *
175 * This prevents mixing the payload of separate requests, and prevents
176 * transport connects from colliding with writes. No congestion control
177 * is provided.
178 */
43cedbf0 179int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
12a80469 180{
12a80469 181 struct rpc_rqst *req = task->tk_rqstp;
34006cee 182 int priority;
12a80469
CL
183
184 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
185 if (task == xprt->snd_task)
186 return 1;
12a80469
CL
187 goto out_sleep;
188 }
189 xprt->snd_task = task;
92551948 190 if (req != NULL)
43cedbf0 191 req->rq_ntrans++;
4d4a76f3 192
12a80469
CL
193 return 1;
194
195out_sleep:
46121cf7 196 dprintk("RPC: %5u failed to lock transport %p\n",
12a80469
CL
197 task->tk_pid, xprt);
198 task->tk_timeout = 0;
199 task->tk_status = -EAGAIN;
34006cee
TM
200 if (req == NULL)
201 priority = RPC_PRIORITY_LOW;
202 else if (!req->rq_ntrans)
203 priority = RPC_PRIORITY_NORMAL;
12a80469 204 else
34006cee
TM
205 priority = RPC_PRIORITY_HIGH;
206 rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
12a80469
CL
207 return 0;
208}
12444809 209EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
12a80469 210
632e3bdc
TM
211static void xprt_clear_locked(struct rpc_xprt *xprt)
212{
213 xprt->snd_task = NULL;
d19751e7 214 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
4e857c58 215 smp_mb__before_atomic();
632e3bdc 216 clear_bit(XPRT_LOCKED, &xprt->state);
4e857c58 217 smp_mb__after_atomic();
632e3bdc 218 } else
c1384c9c 219 queue_work(rpciod_workqueue, &xprt->task_cleanup);
632e3bdc
TM
220}
221
1da177e4 222/*
12a80469
CL
223 * xprt_reserve_xprt_cong - serialize write access to transports
224 * @task: task that is requesting access to the transport
225 *
226 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
227 * integrated into the decision of whether a request is allowed to be
228 * woken up and given access to the transport.
1da177e4 229 */
43cedbf0 230int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
1da177e4
LT
231{
232 struct rpc_rqst *req = task->tk_rqstp;
34006cee 233 int priority;
1da177e4 234
2226feb6 235 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
1da177e4
LT
236 if (task == xprt->snd_task)
237 return 1;
1da177e4
LT
238 goto out_sleep;
239 }
43cedbf0
TM
240 if (req == NULL) {
241 xprt->snd_task = task;
242 return 1;
243 }
12a80469 244 if (__xprt_get_cong(xprt, task)) {
1da177e4 245 xprt->snd_task = task;
43cedbf0 246 req->rq_ntrans++;
1da177e4
LT
247 return 1;
248 }
632e3bdc 249 xprt_clear_locked(xprt);
1da177e4 250out_sleep:
46121cf7 251 dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
1da177e4
LT
252 task->tk_timeout = 0;
253 task->tk_status = -EAGAIN;
34006cee
TM
254 if (req == NULL)
255 priority = RPC_PRIORITY_LOW;
256 else if (!req->rq_ntrans)
257 priority = RPC_PRIORITY_NORMAL;
1da177e4 258 else
34006cee
TM
259 priority = RPC_PRIORITY_HIGH;
260 rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
1da177e4
LT
261 return 0;
262}
12444809 263EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
1da177e4 264
12a80469 265static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
1da177e4
LT
266{
267 int retval;
268
4a0f8c04 269 spin_lock_bh(&xprt->transport_lock);
43cedbf0 270 retval = xprt->ops->reserve_xprt(xprt, task);
4a0f8c04 271 spin_unlock_bh(&xprt->transport_lock);
1da177e4
LT
272 return retval;
273}
274
961a828d 275static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
49e9a890 276{
961a828d 277 struct rpc_xprt *xprt = data;
49e9a890
CL
278 struct rpc_rqst *req;
279
49e9a890
CL
280 req = task->tk_rqstp;
281 xprt->snd_task = task;
92551948 282 if (req)
49e9a890 283 req->rq_ntrans++;
961a828d
TM
284 return true;
285}
49e9a890 286
961a828d
TM
287static void __xprt_lock_write_next(struct rpc_xprt *xprt)
288{
289 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
290 return;
291
292 if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_func, xprt))
293 return;
632e3bdc 294 xprt_clear_locked(xprt);
49e9a890
CL
295}
296
961a828d 297static bool __xprt_lock_write_cong_func(struct rpc_task *task, void *data)
1da177e4 298{
961a828d 299 struct rpc_xprt *xprt = data;
43cedbf0 300 struct rpc_rqst *req;
1da177e4 301
43cedbf0
TM
302 req = task->tk_rqstp;
303 if (req == NULL) {
304 xprt->snd_task = task;
961a828d 305 return true;
43cedbf0 306 }
49e9a890 307 if (__xprt_get_cong(xprt, task)) {
1da177e4 308 xprt->snd_task = task;
43cedbf0 309 req->rq_ntrans++;
961a828d 310 return true;
1da177e4 311 }
961a828d
TM
312 return false;
313}
314
315static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
316{
317 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
318 return;
319 if (RPCXPRT_CONGESTED(xprt))
320 goto out_unlock;
321 if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_cong_func, xprt))
322 return;
1da177e4 323out_unlock:
632e3bdc 324 xprt_clear_locked(xprt);
1da177e4
LT
325}
326
49e9a890
CL
327/**
328 * xprt_release_xprt - allow other requests to use a transport
329 * @xprt: transport with other tasks potentially waiting
330 * @task: task that is releasing access to the transport
331 *
332 * Note that "task" can be NULL. No congestion control is provided.
1da177e4 333 */
49e9a890 334void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
1da177e4
LT
335{
336 if (xprt->snd_task == task) {
ee071eff
TM
337 if (task != NULL) {
338 struct rpc_rqst *req = task->tk_rqstp;
339 if (req != NULL)
340 req->rq_bytes_sent = 0;
341 }
632e3bdc 342 xprt_clear_locked(xprt);
1da177e4
LT
343 __xprt_lock_write_next(xprt);
344 }
345}
12444809 346EXPORT_SYMBOL_GPL(xprt_release_xprt);
1da177e4 347
49e9a890
CL
348/**
349 * xprt_release_xprt_cong - allow other requests to use a transport
350 * @xprt: transport with other tasks potentially waiting
351 * @task: task that is releasing access to the transport
352 *
353 * Note that "task" can be NULL. Another task is awoken to use the
354 * transport if the transport's congestion window allows it.
355 */
356void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
357{
358 if (xprt->snd_task == task) {
ee071eff
TM
359 if (task != NULL) {
360 struct rpc_rqst *req = task->tk_rqstp;
361 if (req != NULL)
362 req->rq_bytes_sent = 0;
363 }
632e3bdc 364 xprt_clear_locked(xprt);
49e9a890
CL
365 __xprt_lock_write_next_cong(xprt);
366 }
367}
12444809 368EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
49e9a890
CL
369
370static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
1da177e4 371{
4a0f8c04 372 spin_lock_bh(&xprt->transport_lock);
49e9a890 373 xprt->ops->release_xprt(xprt, task);
4a0f8c04 374 spin_unlock_bh(&xprt->transport_lock);
1da177e4
LT
375}
376
1da177e4
LT
377/*
378 * Van Jacobson congestion avoidance. Check if the congestion window
379 * overflowed. Put the task to sleep if this is the case.
380 */
381static int
382__xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task)
383{
384 struct rpc_rqst *req = task->tk_rqstp;
385
386 if (req->rq_cong)
387 return 1;
46121cf7 388 dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
1da177e4
LT
389 task->tk_pid, xprt->cong, xprt->cwnd);
390 if (RPCXPRT_CONGESTED(xprt))
391 return 0;
392 req->rq_cong = 1;
393 xprt->cong += RPC_CWNDSCALE;
394 return 1;
395}
396
397/*
398 * Adjust the congestion window, and wake up the next task
399 * that has been sleeping due to congestion
400 */
401static void
402__xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
403{
404 if (!req->rq_cong)
405 return;
406 req->rq_cong = 0;
407 xprt->cong -= RPC_CWNDSCALE;
49e9a890 408 __xprt_lock_write_next_cong(xprt);
1da177e4
LT
409}
410
a58dd398
CL
411/**
412 * xprt_release_rqst_cong - housekeeping when request is complete
413 * @task: RPC request that recently completed
414 *
415 * Useful for transports that require congestion control.
416 */
417void xprt_release_rqst_cong(struct rpc_task *task)
418{
a4f0835c
TM
419 struct rpc_rqst *req = task->tk_rqstp;
420
421 __xprt_put_cong(req->rq_xprt, req);
a58dd398 422}
12444809 423EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
a58dd398 424
46c0ee8b
CL
425/**
426 * xprt_adjust_cwnd - adjust transport congestion window
6a24dfb6 427 * @xprt: pointer to xprt
46c0ee8b
CL
428 * @task: recently completed RPC request used to adjust window
429 * @result: result code of completed RPC request
430 *
4f4cf5ad
CL
431 * The transport code maintains an estimate on the maximum number of out-
432 * standing RPC requests, using a smoothed version of the congestion
433 * avoidance implemented in 44BSD. This is basically the Van Jacobson
434 * congestion algorithm: If a retransmit occurs, the congestion window is
435 * halved; otherwise, it is incremented by 1/cwnd when
436 *
437 * - a reply is received and
438 * - a full number of requests are outstanding and
439 * - the congestion window hasn't been updated recently.
1da177e4 440 */
6a24dfb6 441void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
1da177e4 442{
46c0ee8b 443 struct rpc_rqst *req = task->tk_rqstp;
46c0ee8b 444 unsigned long cwnd = xprt->cwnd;
1da177e4 445
1da177e4
LT
446 if (result >= 0 && cwnd <= xprt->cong) {
447 /* The (cwnd >> 1) term makes sure
448 * the result gets rounded properly. */
449 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
450 if (cwnd > RPC_MAXCWND(xprt))
451 cwnd = RPC_MAXCWND(xprt);
49e9a890 452 __xprt_lock_write_next_cong(xprt);
1da177e4
LT
453 } else if (result == -ETIMEDOUT) {
454 cwnd >>= 1;
455 if (cwnd < RPC_CWNDSCALE)
456 cwnd = RPC_CWNDSCALE;
457 }
46121cf7 458 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n",
1da177e4
LT
459 xprt->cong, xprt->cwnd, cwnd);
460 xprt->cwnd = cwnd;
46c0ee8b 461 __xprt_put_cong(xprt, req);
1da177e4 462}
12444809 463EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
1da177e4 464
44fbac22
CL
465/**
466 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
467 * @xprt: transport with waiting tasks
468 * @status: result code to plant in each task before waking it
469 *
470 */
471void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
472{
473 if (status < 0)
474 rpc_wake_up_status(&xprt->pending, status);
475 else
476 rpc_wake_up(&xprt->pending);
477}
12444809 478EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
44fbac22 479
c7b2cae8
CL
480/**
481 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
482 * @task: task to be put to sleep
0b80ae42 483 * @action: function pointer to be executed after wait
a9a6b52e
TM
484 *
485 * Note that we only set the timer for the case of RPC_IS_SOFT(), since
486 * we don't in general want to force a socket disconnection due to
487 * an incomplete RPC call transmission.
c7b2cae8 488 */
b6ddf64f 489void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action)
c7b2cae8
CL
490{
491 struct rpc_rqst *req = task->tk_rqstp;
492 struct rpc_xprt *xprt = req->rq_xprt;
493
a9a6b52e 494 task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
b6ddf64f 495 rpc_sleep_on(&xprt->pending, task, action);
c7b2cae8 496}
12444809 497EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
c7b2cae8
CL
498
499/**
500 * xprt_write_space - wake the task waiting for transport output buffer space
501 * @xprt: transport with waiting tasks
502 *
503 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
504 */
505void xprt_write_space(struct rpc_xprt *xprt)
506{
c7b2cae8
CL
507 spin_lock_bh(&xprt->transport_lock);
508 if (xprt->snd_task) {
46121cf7
CL
509 dprintk("RPC: write space: waking waiting task on "
510 "xprt %p\n", xprt);
fda13939 511 rpc_wake_up_queued_task(&xprt->pending, xprt->snd_task);
c7b2cae8
CL
512 }
513 spin_unlock_bh(&xprt->transport_lock);
514}
12444809 515EXPORT_SYMBOL_GPL(xprt_write_space);
c7b2cae8 516
fe3aca29
CL
517/**
518 * xprt_set_retrans_timeout_def - set a request's retransmit timeout
519 * @task: task whose timeout is to be set
520 *
521 * Set a request's retransmit timeout based on the transport's
522 * default timeout parameters. Used by transports that don't adjust
523 * the retransmit timeout based on round-trip time estimation.
524 */
525void xprt_set_retrans_timeout_def(struct rpc_task *task)
526{
527 task->tk_timeout = task->tk_rqstp->rq_timeout;
528}
12444809 529EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def);
fe3aca29 530
2c53040f 531/**
fe3aca29
CL
532 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
533 * @task: task whose timeout is to be set
cca5172a 534 *
fe3aca29
CL
535 * Set a request's retransmit timeout using the RTT estimator.
536 */
537void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
538{
539 int timer = task->tk_msg.rpc_proc->p_timer;
ba7392bb
TM
540 struct rpc_clnt *clnt = task->tk_client;
541 struct rpc_rtt *rtt = clnt->cl_rtt;
fe3aca29 542 struct rpc_rqst *req = task->tk_rqstp;
ba7392bb 543 unsigned long max_timeout = clnt->cl_timeout->to_maxval;
fe3aca29
CL
544
545 task->tk_timeout = rpc_calc_rto(rtt, timer);
546 task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
547 if (task->tk_timeout > max_timeout || task->tk_timeout == 0)
548 task->tk_timeout = max_timeout;
549}
12444809 550EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_rtt);
fe3aca29 551
1da177e4
LT
552static void xprt_reset_majortimeo(struct rpc_rqst *req)
553{
ba7392bb 554 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
1da177e4
LT
555
556 req->rq_majortimeo = req->rq_timeout;
557 if (to->to_exponential)
558 req->rq_majortimeo <<= to->to_retries;
559 else
560 req->rq_majortimeo += to->to_increment * to->to_retries;
561 if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0)
562 req->rq_majortimeo = to->to_maxval;
563 req->rq_majortimeo += jiffies;
564}
565
9903cd1c
CL
566/**
567 * xprt_adjust_timeout - adjust timeout values for next retransmit
568 * @req: RPC request containing parameters to use for the adjustment
569 *
1da177e4
LT
570 */
571int xprt_adjust_timeout(struct rpc_rqst *req)
572{
573 struct rpc_xprt *xprt = req->rq_xprt;
ba7392bb 574 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
1da177e4
LT
575 int status = 0;
576
577 if (time_before(jiffies, req->rq_majortimeo)) {
578 if (to->to_exponential)
579 req->rq_timeout <<= 1;
580 else
581 req->rq_timeout += to->to_increment;
582 if (to->to_maxval && req->rq_timeout >= to->to_maxval)
583 req->rq_timeout = to->to_maxval;
584 req->rq_retries++;
1da177e4
LT
585 } else {
586 req->rq_timeout = to->to_initval;
587 req->rq_retries = 0;
588 xprt_reset_majortimeo(req);
589 /* Reset the RTT counters == "slow start" */
4a0f8c04 590 spin_lock_bh(&xprt->transport_lock);
1da177e4 591 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
4a0f8c04 592 spin_unlock_bh(&xprt->transport_lock);
1da177e4
LT
593 status = -ETIMEDOUT;
594 }
595
596 if (req->rq_timeout == 0) {
597 printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
598 req->rq_timeout = 5 * HZ;
599 }
600 return status;
601}
602
65f27f38 603static void xprt_autoclose(struct work_struct *work)
1da177e4 604{
65f27f38
DH
605 struct rpc_xprt *xprt =
606 container_of(work, struct rpc_xprt, task_cleanup);
1da177e4 607
a246b010 608 xprt->ops->close(xprt);
66af1e55 609 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
1da177e4
LT
610 xprt_release_write(xprt, NULL);
611}
612
9903cd1c 613/**
62da3b24 614 * xprt_disconnect_done - mark a transport as disconnected
9903cd1c
CL
615 * @xprt: transport to flag for disconnect
616 *
1da177e4 617 */
62da3b24 618void xprt_disconnect_done(struct rpc_xprt *xprt)
1da177e4 619{
46121cf7 620 dprintk("RPC: disconnected transport %p\n", xprt);
4a0f8c04 621 spin_lock_bh(&xprt->transport_lock);
1da177e4 622 xprt_clear_connected(xprt);
2a491991 623 xprt_wake_pending_tasks(xprt, -EAGAIN);
4a0f8c04 624 spin_unlock_bh(&xprt->transport_lock);
1da177e4 625}
62da3b24 626EXPORT_SYMBOL_GPL(xprt_disconnect_done);
1da177e4 627
66af1e55
TM
628/**
629 * xprt_force_disconnect - force a transport to disconnect
630 * @xprt: transport to disconnect
631 *
632 */
633void xprt_force_disconnect(struct rpc_xprt *xprt)
634{
635 /* Don't race with the test_bit() in xprt_clear_locked() */
636 spin_lock_bh(&xprt->transport_lock);
637 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
638 /* Try to schedule an autoclose RPC call */
639 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
640 queue_work(rpciod_workqueue, &xprt->task_cleanup);
2a491991 641 xprt_wake_pending_tasks(xprt, -EAGAIN);
66af1e55
TM
642 spin_unlock_bh(&xprt->transport_lock);
643}
66af1e55 644
7c1d71cf
TM
645/**
646 * xprt_conditional_disconnect - force a transport to disconnect
647 * @xprt: transport to disconnect
648 * @cookie: 'connection cookie'
649 *
650 * This attempts to break the connection if and only if 'cookie' matches
651 * the current transport 'connection cookie'. It ensures that we don't
652 * try to break the connection more than once when we need to retransmit
653 * a batch of RPC requests.
654 *
655 */
656void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
657{
658 /* Don't race with the test_bit() in xprt_clear_locked() */
659 spin_lock_bh(&xprt->transport_lock);
660 if (cookie != xprt->connect_cookie)
661 goto out;
662 if (test_bit(XPRT_CLOSING, &xprt->state) || !xprt_connected(xprt))
663 goto out;
664 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
665 /* Try to schedule an autoclose RPC call */
666 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
667 queue_work(rpciod_workqueue, &xprt->task_cleanup);
2a491991 668 xprt_wake_pending_tasks(xprt, -EAGAIN);
7c1d71cf
TM
669out:
670 spin_unlock_bh(&xprt->transport_lock);
671}
672
1da177e4
LT
673static void
674xprt_init_autodisconnect(unsigned long data)
675{
676 struct rpc_xprt *xprt = (struct rpc_xprt *)data;
677
4a0f8c04 678 spin_lock(&xprt->transport_lock);
d19751e7 679 if (!list_empty(&xprt->recv))
1da177e4 680 goto out_abort;
2226feb6 681 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
1da177e4 682 goto out_abort;
4a0f8c04 683 spin_unlock(&xprt->transport_lock);
f75e6745
TM
684 set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
685 queue_work(rpciod_workqueue, &xprt->task_cleanup);
1da177e4
LT
686 return;
687out_abort:
4a0f8c04 688 spin_unlock(&xprt->transport_lock);
1da177e4
LT
689}
690
9903cd1c
CL
691/**
692 * xprt_connect - schedule a transport connect operation
693 * @task: RPC task that is requesting the connect
1da177e4
LT
694 *
695 */
696void xprt_connect(struct rpc_task *task)
697{
ad2368d6 698 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
1da177e4 699
46121cf7 700 dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
1da177e4
LT
701 xprt, (xprt_connected(xprt) ? "is" : "is not"));
702
ec739ef0 703 if (!xprt_bound(xprt)) {
01d37c42 704 task->tk_status = -EAGAIN;
1da177e4
LT
705 return;
706 }
707 if (!xprt_lock_write(xprt, task))
708 return;
feb8ca37
TM
709
710 if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
711 xprt->ops->close(xprt);
712
1da177e4 713 if (xprt_connected(xprt))
a246b010
CL
714 xprt_release_write(xprt, task);
715 else {
87e3c055 716 task->tk_rqstp->rq_bytes_sent = 0;
a8ce4a8f 717 task->tk_timeout = task->tk_rqstp->rq_timeout;
5d00837b 718 rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
0b9e7943
TM
719
720 if (test_bit(XPRT_CLOSING, &xprt->state))
721 return;
722 if (xprt_test_and_set_connecting(xprt))
723 return;
262ca07d 724 xprt->stat.connect_start = jiffies;
1b092092 725 xprt->ops->connect(xprt, task);
1da177e4 726 }
1da177e4
LT
727}
728
9903cd1c 729static void xprt_connect_status(struct rpc_task *task)
1da177e4 730{
ad2368d6 731 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
1da177e4 732
cd983ef8 733 if (task->tk_status == 0) {
262ca07d
CL
734 xprt->stat.connect_count++;
735 xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start;
46121cf7 736 dprintk("RPC: %5u xprt_connect_status: connection established\n",
1da177e4
LT
737 task->tk_pid);
738 return;
739 }
740
1da177e4 741 switch (task->tk_status) {
0fe8d04e
TM
742 case -ECONNREFUSED:
743 case -ECONNRESET:
744 case -ECONNABORTED:
745 case -ENETUNREACH:
746 case -EHOSTUNREACH:
2fc193cf 747 case -EPIPE:
2a491991
TM
748 case -EAGAIN:
749 dprintk("RPC: %5u xprt_connect_status: retrying\n", task->tk_pid);
23475d66 750 break;
1da177e4 751 case -ETIMEDOUT:
46121cf7
CL
752 dprintk("RPC: %5u xprt_connect_status: connect attempt timed "
753 "out\n", task->tk_pid);
1da177e4
LT
754 break;
755 default:
46121cf7
CL
756 dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
757 "server %s\n", task->tk_pid, -task->tk_status,
4e0038b6 758 xprt->servername);
23475d66
CL
759 xprt_release_write(xprt, task);
760 task->tk_status = -EIO;
1da177e4 761 }
1da177e4
LT
762}
763
9903cd1c
CL
764/**
765 * xprt_lookup_rqst - find an RPC request corresponding to an XID
766 * @xprt: transport on which the original request was transmitted
767 * @xid: RPC XID of incoming reply
768 *
1da177e4 769 */
d8ed029d 770struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
1da177e4 771{
8f3a6de3 772 struct rpc_rqst *entry;
1da177e4 773
8f3a6de3 774 list_for_each_entry(entry, &xprt->recv, rq_list)
262ca07d
CL
775 if (entry->rq_xid == xid)
776 return entry;
46121cf7
CL
777
778 dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n",
779 ntohl(xid));
262ca07d
CL
780 xprt->stat.bad_xids++;
781 return NULL;
1da177e4 782}
12444809 783EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
1da177e4 784
bbc72cea 785static void xprt_update_rtt(struct rpc_task *task)
1570c1e4
CL
786{
787 struct rpc_rqst *req = task->tk_rqstp;
788 struct rpc_rtt *rtt = task->tk_client->cl_rtt;
95c96174 789 unsigned int timer = task->tk_msg.rpc_proc->p_timer;
d60dbb20 790 long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
1570c1e4
CL
791
792 if (timer) {
793 if (req->rq_ntrans == 1)
ff839970 794 rpc_update_rtt(rtt, timer, m);
1570c1e4
CL
795 rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
796 }
797}
798
9903cd1c
CL
799/**
800 * xprt_complete_rqst - called when reply processing is complete
1570c1e4 801 * @task: RPC request that recently completed
9903cd1c
CL
802 * @copied: actual number of bytes received from the transport
803 *
1570c1e4 804 * Caller holds transport lock.
1da177e4 805 */
1570c1e4 806void xprt_complete_rqst(struct rpc_task *task, int copied)
1da177e4 807{
1570c1e4 808 struct rpc_rqst *req = task->tk_rqstp;
fda13939 809 struct rpc_xprt *xprt = req->rq_xprt;
1da177e4 810
1570c1e4
CL
811 dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
812 task->tk_pid, ntohl(req->rq_xid), copied);
1da177e4 813
fda13939 814 xprt->stat.recvs++;
d60dbb20 815 req->rq_rtt = ktime_sub(ktime_get(), req->rq_xtime);
bbc72cea
CL
816 if (xprt->ops->timer != NULL)
817 xprt_update_rtt(task);
ef759a2e 818
1da177e4 819 list_del_init(&req->rq_list);
1e799b67 820 req->rq_private_buf.len = copied;
dd2b63d0
RL
821 /* Ensure all writes are done before we update */
822 /* req->rq_reply_bytes_recvd */
43ac3f29 823 smp_wmb();
dd2b63d0 824 req->rq_reply_bytes_recvd = copied;
fda13939 825 rpc_wake_up_queued_task(&xprt->pending, task);
1da177e4 826}
12444809 827EXPORT_SYMBOL_GPL(xprt_complete_rqst);
1da177e4 828
46c0ee8b 829static void xprt_timer(struct rpc_task *task)
1da177e4 830{
46c0ee8b 831 struct rpc_rqst *req = task->tk_rqstp;
1da177e4
LT
832 struct rpc_xprt *xprt = req->rq_xprt;
833
5d00837b
TM
834 if (task->tk_status != -ETIMEDOUT)
835 return;
46121cf7 836 dprintk("RPC: %5u xprt_timer\n", task->tk_pid);
1da177e4 837
5d00837b 838 spin_lock_bh(&xprt->transport_lock);
dd2b63d0 839 if (!req->rq_reply_bytes_recvd) {
46c0ee8b 840 if (xprt->ops->timer)
6a24dfb6 841 xprt->ops->timer(xprt, task);
5d00837b
TM
842 } else
843 task->tk_status = 0;
844 spin_unlock_bh(&xprt->transport_lock);
1da177e4
LT
845}
846
4cfc7e60
RI
847static inline int xprt_has_timer(struct rpc_xprt *xprt)
848{
849 return xprt->idle_timeout != 0;
850}
851
9903cd1c
CL
852/**
853 * xprt_prepare_transmit - reserve the transport before sending a request
854 * @task: RPC task about to send a request
855 *
1da177e4 856 */
90051ea7 857bool xprt_prepare_transmit(struct rpc_task *task)
1da177e4
LT
858{
859 struct rpc_rqst *req = task->tk_rqstp;
860 struct rpc_xprt *xprt = req->rq_xprt;
90051ea7 861 bool ret = false;
1da177e4 862
46121cf7 863 dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
1da177e4 864
4a0f8c04 865 spin_lock_bh(&xprt->transport_lock);
8a19a0b6
TM
866 if (!req->rq_bytes_sent) {
867 if (req->rq_reply_bytes_recvd) {
868 task->tk_status = req->rq_reply_bytes_recvd;
869 goto out_unlock;
870 }
871 if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT)
872 && xprt_connected(xprt)
873 && req->rq_connect_cookie == xprt->connect_cookie) {
874 xprt->ops->set_retrans_timeout(task);
875 rpc_sleep_on(&xprt->pending, task, xprt_timer);
876 goto out_unlock;
877 }
1da177e4 878 }
90051ea7
TM
879 if (!xprt->ops->reserve_xprt(xprt, task)) {
880 task->tk_status = -EAGAIN;
881 goto out_unlock;
882 }
883 ret = true;
1da177e4 884out_unlock:
4a0f8c04 885 spin_unlock_bh(&xprt->transport_lock);
90051ea7 886 return ret;
1da177e4
LT
887}
888
e0ab53de 889void xprt_end_transmit(struct rpc_task *task)
5e5ce5be 890{
343952fa 891 xprt_release_write(task->tk_rqstp->rq_xprt, task);
5e5ce5be
TM
892}
893
9903cd1c
CL
894/**
895 * xprt_transmit - send an RPC request on a transport
896 * @task: controlling RPC task
897 *
898 * We have to copy the iovec because sendmsg fiddles with its contents.
899 */
900void xprt_transmit(struct rpc_task *task)
1da177e4 901{
1da177e4
LT
902 struct rpc_rqst *req = task->tk_rqstp;
903 struct rpc_xprt *xprt = req->rq_xprt;
15a45206 904 int status, numreqs;
1da177e4 905
46121cf7 906 dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
1da177e4 907
dd2b63d0 908 if (!req->rq_reply_bytes_recvd) {
55ae1aab
RL
909 if (list_empty(&req->rq_list) && rpc_reply_expected(task)) {
910 /*
911 * Add to the list only if we're expecting a reply
912 */
4a0f8c04 913 spin_lock_bh(&xprt->transport_lock);
1da177e4
LT
914 /* Update the softirq receive buffer */
915 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
916 sizeof(req->rq_private_buf));
917 /* Add request to the receive list */
918 list_add_tail(&req->rq_list, &xprt->recv);
4a0f8c04 919 spin_unlock_bh(&xprt->transport_lock);
1da177e4 920 xprt_reset_majortimeo(req);
0f9dc2b1
TM
921 /* Turn off autodisconnect */
922 del_singleshot_timer_sync(&xprt->timer);
1da177e4
LT
923 }
924 } else if (!req->rq_bytes_sent)
925 return;
926
ff839970 927 req->rq_xtime = ktime_get();
a246b010 928 status = xprt->ops->send_request(task);
c8485e4d
TM
929 if (status != 0) {
930 task->tk_status = status;
931 return;
932 }
262ca07d 933
c8485e4d 934 dprintk("RPC: %5u xmit complete\n", task->tk_pid);
468f8613 935 task->tk_flags |= RPC_TASK_SENT;
c8485e4d 936 spin_lock_bh(&xprt->transport_lock);
262ca07d 937
c8485e4d 938 xprt->ops->set_retrans_timeout(task);
262ca07d 939
15a45206
AA
940 numreqs = atomic_read(&xprt->num_reqs);
941 if (numreqs > xprt->stat.max_slots)
942 xprt->stat.max_slots = numreqs;
c8485e4d
TM
943 xprt->stat.sends++;
944 xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
945 xprt->stat.bklog_u += xprt->backlog.qlen;
15a45206
AA
946 xprt->stat.sending_u += xprt->sending.qlen;
947 xprt->stat.pending_u += xprt->pending.qlen;
1da177e4 948
c8485e4d
TM
949 /* Don't race with disconnect */
950 if (!xprt_connected(xprt))
951 task->tk_status = -ENOTCONN;
0a660521 952 else {
55ae1aab
RL
953 /*
954 * Sleep on the pending queue since
955 * we're expecting a reply.
956 */
0a660521
TM
957 if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task))
958 rpc_sleep_on(&xprt->pending, task, xprt_timer);
959 req->rq_connect_cookie = xprt->connect_cookie;
55ae1aab 960 }
c8485e4d 961 spin_unlock_bh(&xprt->transport_lock);
1da177e4
LT
962}
963
ba60eb25
TM
964static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
965{
966 set_bit(XPRT_CONGESTED, &xprt->state);
967 rpc_sleep_on(&xprt->backlog, task, NULL);
968}
969
970static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
971{
972 if (rpc_wake_up_next(&xprt->backlog) == NULL)
973 clear_bit(XPRT_CONGESTED, &xprt->state);
974}
975
976static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
977{
978 bool ret = false;
979
980 if (!test_bit(XPRT_CONGESTED, &xprt->state))
981 goto out;
982 spin_lock(&xprt->reserve_lock);
983 if (test_bit(XPRT_CONGESTED, &xprt->state)) {
984 rpc_sleep_on(&xprt->backlog, task, NULL);
985 ret = true;
986 }
987 spin_unlock(&xprt->reserve_lock);
988out:
989 return ret;
990}
991
d9ba131d
TM
992static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt, gfp_t gfp_flags)
993{
994 struct rpc_rqst *req = ERR_PTR(-EAGAIN);
995
996 if (!atomic_add_unless(&xprt->num_reqs, 1, xprt->max_reqs))
997 goto out;
998 req = kzalloc(sizeof(struct rpc_rqst), gfp_flags);
999 if (req != NULL)
1000 goto out;
1001 atomic_dec(&xprt->num_reqs);
1002 req = ERR_PTR(-ENOMEM);
1003out:
1004 return req;
1005}
1006
1007static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1008{
1009 if (atomic_add_unless(&xprt->num_reqs, -1, xprt->min_reqs)) {
1010 kfree(req);
1011 return true;
1012 }
1013 return false;
1014}
1015
f39c1bfb 1016void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1da177e4 1017{
d9ba131d 1018 struct rpc_rqst *req;
1da177e4 1019
f39c1bfb 1020 spin_lock(&xprt->reserve_lock);
1da177e4 1021 if (!list_empty(&xprt->free)) {
d9ba131d
TM
1022 req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
1023 list_del(&req->rq_list);
1024 goto out_init_req;
1025 }
6b343099 1026 req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT|__GFP_NOWARN);
d9ba131d
TM
1027 if (!IS_ERR(req))
1028 goto out_init_req;
1029 switch (PTR_ERR(req)) {
1030 case -ENOMEM:
d9ba131d
TM
1031 dprintk("RPC: dynamic allocation of request slot "
1032 "failed! Retrying\n");
1afeaf5c 1033 task->tk_status = -ENOMEM;
d9ba131d
TM
1034 break;
1035 case -EAGAIN:
ba60eb25 1036 xprt_add_backlog(xprt, task);
d9ba131d 1037 dprintk("RPC: waiting for request slot\n");
1afeaf5c
TM
1038 default:
1039 task->tk_status = -EAGAIN;
1da177e4 1040 }
f39c1bfb 1041 spin_unlock(&xprt->reserve_lock);
d9ba131d
TM
1042 return;
1043out_init_req:
1044 task->tk_status = 0;
1045 task->tk_rqstp = req;
1046 xprt_request_init(task, xprt);
f39c1bfb
TM
1047 spin_unlock(&xprt->reserve_lock);
1048}
1049EXPORT_SYMBOL_GPL(xprt_alloc_slot);
1050
1051void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1052{
1053 /* Note: grabbing the xprt_lock_write() ensures that we throttle
1054 * new slot allocation if the transport is congested (i.e. when
1055 * reconnecting a stream transport or when out of socket write
1056 * buffer space).
1057 */
1058 if (xprt_lock_write(xprt, task)) {
1059 xprt_alloc_slot(xprt, task);
1060 xprt_release_write(xprt, task);
1061 }
1da177e4 1062}
f39c1bfb 1063EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot);
1da177e4 1064
ee5ebe85
TM
1065static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1066{
ee5ebe85 1067 spin_lock(&xprt->reserve_lock);
c25573b5
TM
1068 if (!xprt_dynamic_free_slot(xprt, req)) {
1069 memset(req, 0, sizeof(*req)); /* mark unused */
1070 list_add(&req->rq_list, &xprt->free);
1071 }
ba60eb25 1072 xprt_wake_up_backlog(xprt);
ee5ebe85
TM
1073 spin_unlock(&xprt->reserve_lock);
1074}
1075
21de0a95
TM
1076static void xprt_free_all_slots(struct rpc_xprt *xprt)
1077{
1078 struct rpc_rqst *req;
1079 while (!list_empty(&xprt->free)) {
1080 req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
1081 list_del(&req->rq_list);
1082 kfree(req);
1083 }
1084}
1085
d9ba131d
TM
1086struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
1087 unsigned int num_prealloc,
1088 unsigned int max_alloc)
bd1722d4
PE
1089{
1090 struct rpc_xprt *xprt;
21de0a95
TM
1091 struct rpc_rqst *req;
1092 int i;
bd1722d4
PE
1093
1094 xprt = kzalloc(size, GFP_KERNEL);
1095 if (xprt == NULL)
1096 goto out;
1097
21de0a95
TM
1098 xprt_init(xprt, net);
1099
1100 for (i = 0; i < num_prealloc; i++) {
1101 req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
1102 if (!req)
8313164c 1103 goto out_free;
21de0a95
TM
1104 list_add(&req->rq_list, &xprt->free);
1105 }
d9ba131d
TM
1106 if (max_alloc > num_prealloc)
1107 xprt->max_reqs = max_alloc;
1108 else
1109 xprt->max_reqs = num_prealloc;
1110 xprt->min_reqs = num_prealloc;
1111 atomic_set(&xprt->num_reqs, num_prealloc);
bd1722d4
PE
1112
1113 return xprt;
1114
1115out_free:
21de0a95 1116 xprt_free(xprt);
bd1722d4
PE
1117out:
1118 return NULL;
1119}
1120EXPORT_SYMBOL_GPL(xprt_alloc);
1121
e204e621
PE
1122void xprt_free(struct rpc_xprt *xprt)
1123{
37aa2133 1124 put_net(xprt->xprt_net);
21de0a95 1125 xprt_free_all_slots(xprt);
e204e621
PE
1126 kfree(xprt);
1127}
1128EXPORT_SYMBOL_GPL(xprt_free);
1129
9903cd1c
CL
1130/**
1131 * xprt_reserve - allocate an RPC request slot
1132 * @task: RPC task requesting a slot allocation
1133 *
ba60eb25
TM
1134 * If the transport is marked as being congested, or if no more
1135 * slots are available, place the task on the transport's
9903cd1c
CL
1136 * backlog queue.
1137 */
1138void xprt_reserve(struct rpc_task *task)
1da177e4 1139{
45bc0dce 1140 struct rpc_xprt *xprt;
1da177e4 1141
43cedbf0
TM
1142 task->tk_status = 0;
1143 if (task->tk_rqstp != NULL)
1144 return;
1145
43cedbf0
TM
1146 task->tk_timeout = 0;
1147 task->tk_status = -EAGAIN;
45bc0dce
TM
1148 rcu_read_lock();
1149 xprt = rcu_dereference(task->tk_client->cl_xprt);
ba60eb25
TM
1150 if (!xprt_throttle_congested(xprt, task))
1151 xprt->ops->alloc_slot(xprt, task);
1152 rcu_read_unlock();
1153}
1154
1155/**
1156 * xprt_retry_reserve - allocate an RPC request slot
1157 * @task: RPC task requesting a slot allocation
1158 *
1159 * If no more slots are available, place the task on the transport's
1160 * backlog queue.
1161 * Note that the only difference with xprt_reserve is that we now
1162 * ignore the value of the XPRT_CONGESTED flag.
1163 */
1164void xprt_retry_reserve(struct rpc_task *task)
1165{
1166 struct rpc_xprt *xprt;
1167
1168 task->tk_status = 0;
1169 if (task->tk_rqstp != NULL)
1170 return;
1171
1172 task->tk_timeout = 0;
1173 task->tk_status = -EAGAIN;
1174 rcu_read_lock();
1175 xprt = rcu_dereference(task->tk_client->cl_xprt);
f39c1bfb 1176 xprt->ops->alloc_slot(xprt, task);
45bc0dce 1177 rcu_read_unlock();
1da177e4
LT
1178}
1179
d8ed029d 1180static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
1da177e4 1181{
0eae88f3 1182 return (__force __be32)xprt->xid++;
1da177e4
LT
1183}
1184
1185static inline void xprt_init_xid(struct rpc_xprt *xprt)
1186{
63862b5b 1187 xprt->xid = prandom_u32();
1da177e4
LT
1188}
1189
9903cd1c 1190static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
1da177e4
LT
1191{
1192 struct rpc_rqst *req = task->tk_rqstp;
1193
d9ba131d 1194 INIT_LIST_HEAD(&req->rq_list);
ba7392bb 1195 req->rq_timeout = task->tk_client->cl_timeout->to_initval;
1da177e4
LT
1196 req->rq_task = task;
1197 req->rq_xprt = xprt;
02107148 1198 req->rq_buffer = NULL;
1da177e4 1199 req->rq_xid = xprt_alloc_xid(xprt);
0a660521 1200 req->rq_connect_cookie = xprt->connect_cookie - 1;
92551948
TM
1201 req->rq_bytes_sent = 0;
1202 req->rq_snd_buf.len = 0;
1203 req->rq_snd_buf.buflen = 0;
1204 req->rq_rcv_buf.len = 0;
1205 req->rq_rcv_buf.buflen = 0;
ead5e1c2 1206 req->rq_release_snd_buf = NULL;
da45828e 1207 xprt_reset_majortimeo(req);
46121cf7 1208 dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
1da177e4
LT
1209 req, ntohl(req->rq_xid));
1210}
1211
9903cd1c
CL
1212/**
1213 * xprt_release - release an RPC request slot
1214 * @task: task which is finished with the slot
1215 *
1da177e4 1216 */
9903cd1c 1217void xprt_release(struct rpc_task *task)
1da177e4 1218{
55ae1aab 1219 struct rpc_xprt *xprt;
87ed5003 1220 struct rpc_rqst *req = task->tk_rqstp;
1da177e4 1221
87ed5003
TM
1222 if (req == NULL) {
1223 if (task->tk_client) {
1224 rcu_read_lock();
1225 xprt = rcu_dereference(task->tk_client->cl_xprt);
1226 if (xprt->snd_task == task)
1227 xprt_release_write(xprt, task);
1228 rcu_read_unlock();
1229 }
1da177e4 1230 return;
87ed5003 1231 }
55ae1aab 1232
55ae1aab 1233 xprt = req->rq_xprt;
0a702195
WAA
1234 if (task->tk_ops->rpc_count_stats != NULL)
1235 task->tk_ops->rpc_count_stats(task, task->tk_calldata);
1236 else if (task->tk_client)
1237 rpc_count_iostats(task, task->tk_client->cl_metrics);
4a0f8c04 1238 spin_lock_bh(&xprt->transport_lock);
49e9a890 1239 xprt->ops->release_xprt(xprt, task);
a58dd398
CL
1240 if (xprt->ops->release_request)
1241 xprt->ops->release_request(task);
1da177e4
LT
1242 if (!list_empty(&req->rq_list))
1243 list_del(&req->rq_list);
1244 xprt->last_used = jiffies;
4cfc7e60 1245 if (list_empty(&xprt->recv) && xprt_has_timer(xprt))
a246b010 1246 mod_timer(&xprt->timer,
03bf4b70 1247 xprt->last_used + xprt->idle_timeout);
4a0f8c04 1248 spin_unlock_bh(&xprt->transport_lock);
ee5ebe85 1249 if (req->rq_buffer)
55ae1aab 1250 xprt->ops->buf_free(req->rq_buffer);
a17c2153
TM
1251 if (req->rq_cred != NULL)
1252 put_rpccred(req->rq_cred);
1da177e4 1253 task->tk_rqstp = NULL;
ead5e1c2
BF
1254 if (req->rq_release_snd_buf)
1255 req->rq_release_snd_buf(req);
55ae1aab 1256
46121cf7 1257 dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
ee5ebe85
TM
1258 if (likely(!bc_prealloc(req)))
1259 xprt_free_slot(xprt, req);
1260 else
c9acb42e 1261 xprt_free_bc_request(req);
1da177e4
LT
1262}
1263
21de0a95 1264static void xprt_init(struct rpc_xprt *xprt, struct net *net)
c2866763 1265{
21de0a95 1266 atomic_set(&xprt->count, 1);
c2866763
CL
1267
1268 spin_lock_init(&xprt->transport_lock);
1269 spin_lock_init(&xprt->reserve_lock);
1270
1271 INIT_LIST_HEAD(&xprt->free);
1272 INIT_LIST_HEAD(&xprt->recv);
9e00abc3 1273#if defined(CONFIG_SUNRPC_BACKCHANNEL)
f9acac1a
RL
1274 spin_lock_init(&xprt->bc_pa_lock);
1275 INIT_LIST_HEAD(&xprt->bc_pa_list);
9e00abc3 1276#endif /* CONFIG_SUNRPC_BACKCHANNEL */
f9acac1a 1277
c2866763
CL
1278 xprt->last_used = jiffies;
1279 xprt->cwnd = RPC_INITCWND;
a509050b 1280 xprt->bind_index = 0;
c2866763
CL
1281
1282 rpc_init_wait_queue(&xprt->binding, "xprt_binding");
1283 rpc_init_wait_queue(&xprt->pending, "xprt_pending");
34006cee 1284 rpc_init_priority_wait_queue(&xprt->sending, "xprt_sending");
c2866763
CL
1285 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
1286
c2866763
CL
1287 xprt_init_xid(xprt);
1288
21de0a95 1289 xprt->xprt_net = get_net(net);
8d9266ff
TM
1290}
1291
1292/**
1293 * xprt_create_transport - create an RPC transport
1294 * @args: rpc transport creation arguments
1295 *
1296 */
1297struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
1298{
1299 struct rpc_xprt *xprt;
1300 struct xprt_class *t;
1301
1302 spin_lock(&xprt_list_lock);
1303 list_for_each_entry(t, &xprt_list, list) {
1304 if (t->ident == args->ident) {
1305 spin_unlock(&xprt_list_lock);
1306 goto found;
1307 }
1308 }
1309 spin_unlock(&xprt_list_lock);
3c45ddf8 1310 dprintk("RPC: transport (%d) not supported\n", args->ident);
8d9266ff
TM
1311 return ERR_PTR(-EIO);
1312
1313found:
1314 xprt = t->setup(args);
1315 if (IS_ERR(xprt)) {
1316 dprintk("RPC: xprt_create_transport: failed, %ld\n",
1317 -PTR_ERR(xprt));
21de0a95 1318 goto out;
8d9266ff 1319 }
33d90ac0
BF
1320 if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
1321 xprt->idle_timeout = 0;
21de0a95
TM
1322 INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
1323 if (xprt_has_timer(xprt))
1324 setup_timer(&xprt->timer, xprt_init_autodisconnect,
1325 (unsigned long)xprt);
1326 else
1327 init_timer(&xprt->timer);
4e0038b6
TM
1328
1329 if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
1330 xprt_destroy(xprt);
1331 return ERR_PTR(-EINVAL);
1332 }
1333 xprt->servername = kstrdup(args->servername, GFP_KERNEL);
1334 if (xprt->servername == NULL) {
1335 xprt_destroy(xprt);
1336 return ERR_PTR(-ENOMEM);
1337 }
1338
46121cf7 1339 dprintk("RPC: created transport %p with %u slots\n", xprt,
c2866763 1340 xprt->max_reqs);
21de0a95 1341out:
c2866763
CL
1342 return xprt;
1343}
1344
9903cd1c
CL
1345/**
1346 * xprt_destroy - destroy an RPC transport, killing off all requests.
a8de240a 1347 * @xprt: transport to destroy
9903cd1c 1348 *
1da177e4 1349 */
a8de240a 1350static void xprt_destroy(struct rpc_xprt *xprt)
1da177e4 1351{
46121cf7 1352 dprintk("RPC: destroying transport %p\n", xprt);
0065db32 1353 del_timer_sync(&xprt->timer);
c8541ecd 1354
f6a1cc89
TM
1355 rpc_destroy_wait_queue(&xprt->binding);
1356 rpc_destroy_wait_queue(&xprt->pending);
1357 rpc_destroy_wait_queue(&xprt->sending);
f6a1cc89 1358 rpc_destroy_wait_queue(&xprt->backlog);
c3ae62ae 1359 cancel_work_sync(&xprt->task_cleanup);
4e0038b6 1360 kfree(xprt->servername);
c8541ecd
CL
1361 /*
1362 * Tear down transport state and free the rpc_xprt
1363 */
a246b010 1364 xprt->ops->destroy(xprt);
6b6ca86b 1365}
1da177e4 1366
6b6ca86b
TM
1367/**
1368 * xprt_put - release a reference to an RPC transport.
1369 * @xprt: pointer to the transport
1370 *
1371 */
1372void xprt_put(struct rpc_xprt *xprt)
1373{
a8de240a
TM
1374 if (atomic_dec_and_test(&xprt->count))
1375 xprt_destroy(xprt);
6b6ca86b 1376}
This page took 0.719001 seconds and 5 git commands to generate.