oprofile: introduce module_param oprofile.cpu_type
[deliverable/linux.git] / net / sunrpc / xprt.c
1 /*
2 * linux/net/sunrpc/xprt.c
3 *
4 * This is a generic RPC call interface supporting congestion avoidance,
5 * and asynchronous calls.
6 *
7 * The interface works like this:
8 *
9 * - When a process places a call, it allocates a request slot if
10 * one is available. Otherwise, it sleeps on the backlog queue
11 * (xprt_reserve).
12 * - Next, the caller puts together the RPC message, stuffs it into
13 * the request struct, and calls xprt_transmit().
14 * - xprt_transmit sends the message and installs the caller on the
15 * transport's wait list. At the same time, it installs a timer that
16 * is run after the packet's timeout has expired.
17 * - When a packet arrives, the data_ready handler walks the list of
18 * pending requests for that transport. If a matching XID is found, the
19 * caller is woken up, and the timer removed.
20 * - When no reply arrives within the timeout interval, the timer is
21 * fired by the kernel and runs xprt_timer(). It either adjusts the
22 * timeout values (minor timeout) or wakes up the caller with a status
23 * of -ETIMEDOUT.
24 * - When the caller receives a notification from RPC that a reply arrived,
25 * it should release the RPC slot, and process the reply.
26 * If the call timed out, it may choose to retry the operation by
27 * adjusting the initial timeout value, and simply calling rpc_call
28 * again.
29 *
30 * Support for async RPC is done through a set of RPC-specific scheduling
31 * primitives that `transparently' work for processes as well as async
32 * tasks that rely on callbacks.
33 *
34 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
35 *
36 * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
37 */
38
39 #include <linux/module.h>
40
41 #include <linux/types.h>
42 #include <linux/interrupt.h>
43 #include <linux/workqueue.h>
44 #include <linux/net.h>
45
46 #include <linux/sunrpc/clnt.h>
47 #include <linux/sunrpc/metrics.h>
48
49 /*
50 * Local variables
51 */
52
53 #ifdef RPC_DEBUG
54 # define RPCDBG_FACILITY RPCDBG_XPRT
55 #endif
56
57 /*
58 * Local functions
59 */
60 static void xprt_request_init(struct rpc_task *, struct rpc_xprt *);
61 static inline void do_xprt_reserve(struct rpc_task *);
62 static void xprt_connect_status(struct rpc_task *task);
63 static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
64
65 static DEFINE_SPINLOCK(xprt_list_lock);
66 static LIST_HEAD(xprt_list);
67
68 /*
69 * The transport code maintains an estimate on the maximum number of out-
70 * standing RPC requests, using a smoothed version of the congestion
71 * avoidance implemented in 44BSD. This is basically the Van Jacobson
72 * congestion algorithm: If a retransmit occurs, the congestion window is
73 * halved; otherwise, it is incremented by 1/cwnd when
74 *
75 * - a reply is received and
76 * - a full number of requests are outstanding and
77 * - the congestion window hasn't been updated recently.
78 */
79 #define RPC_CWNDSHIFT (8U)
80 #define RPC_CWNDSCALE (1U << RPC_CWNDSHIFT)
81 #define RPC_INITCWND RPC_CWNDSCALE
82 #define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT)
83
84 #define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
85
86 /**
87 * xprt_register_transport - register a transport implementation
88 * @transport: transport to register
89 *
90 * If a transport implementation is loaded as a kernel module, it can
91 * call this interface to make itself known to the RPC client.
92 *
93 * Returns:
94 * 0: transport successfully registered
95 * -EEXIST: transport already registered
96 * -EINVAL: transport module being unloaded
97 */
98 int xprt_register_transport(struct xprt_class *transport)
99 {
100 struct xprt_class *t;
101 int result;
102
103 result = -EEXIST;
104 spin_lock(&xprt_list_lock);
105 list_for_each_entry(t, &xprt_list, list) {
106 /* don't register the same transport class twice */
107 if (t->ident == transport->ident)
108 goto out;
109 }
110
111 list_add_tail(&transport->list, &xprt_list);
112 printk(KERN_INFO "RPC: Registered %s transport module.\n",
113 transport->name);
114 result = 0;
115
116 out:
117 spin_unlock(&xprt_list_lock);
118 return result;
119 }
120 EXPORT_SYMBOL_GPL(xprt_register_transport);
121
122 /**
123 * xprt_unregister_transport - unregister a transport implementation
124 * @transport: transport to unregister
125 *
126 * Returns:
127 * 0: transport successfully unregistered
128 * -ENOENT: transport never registered
129 */
130 int xprt_unregister_transport(struct xprt_class *transport)
131 {
132 struct xprt_class *t;
133 int result;
134
135 result = 0;
136 spin_lock(&xprt_list_lock);
137 list_for_each_entry(t, &xprt_list, list) {
138 if (t == transport) {
139 printk(KERN_INFO
140 "RPC: Unregistered %s transport module.\n",
141 transport->name);
142 list_del_init(&transport->list);
143 goto out;
144 }
145 }
146 result = -ENOENT;
147
148 out:
149 spin_unlock(&xprt_list_lock);
150 return result;
151 }
152 EXPORT_SYMBOL_GPL(xprt_unregister_transport);
153
154 /**
155 * xprt_load_transport - load a transport implementation
156 * @transport_name: transport to load
157 *
158 * Returns:
159 * 0: transport successfully loaded
160 * -ENOENT: transport module not available
161 */
162 int xprt_load_transport(const char *transport_name)
163 {
164 struct xprt_class *t;
165 char module_name[sizeof t->name + 5];
166 int result;
167
168 result = 0;
169 spin_lock(&xprt_list_lock);
170 list_for_each_entry(t, &xprt_list, list) {
171 if (strcmp(t->name, transport_name) == 0) {
172 spin_unlock(&xprt_list_lock);
173 goto out;
174 }
175 }
176 spin_unlock(&xprt_list_lock);
177 strcpy(module_name, "xprt");
178 strncat(module_name, transport_name, sizeof t->name);
179 result = request_module(module_name);
180 out:
181 return result;
182 }
183 EXPORT_SYMBOL_GPL(xprt_load_transport);
184
185 /**
186 * xprt_reserve_xprt - serialize write access to transports
187 * @task: task that is requesting access to the transport
188 *
189 * This prevents mixing the payload of separate requests, and prevents
190 * transport connects from colliding with writes. No congestion control
191 * is provided.
192 */
193 int xprt_reserve_xprt(struct rpc_task *task)
194 {
195 struct rpc_xprt *xprt = task->tk_xprt;
196 struct rpc_rqst *req = task->tk_rqstp;
197
198 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
199 if (task == xprt->snd_task)
200 return 1;
201 if (task == NULL)
202 return 0;
203 goto out_sleep;
204 }
205 xprt->snd_task = task;
206 if (req) {
207 req->rq_bytes_sent = 0;
208 req->rq_ntrans++;
209 }
210 return 1;
211
212 out_sleep:
213 dprintk("RPC: %5u failed to lock transport %p\n",
214 task->tk_pid, xprt);
215 task->tk_timeout = 0;
216 task->tk_status = -EAGAIN;
217 if (req && req->rq_ntrans)
218 rpc_sleep_on(&xprt->resend, task, NULL);
219 else
220 rpc_sleep_on(&xprt->sending, task, NULL);
221 return 0;
222 }
223 EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
224
225 static void xprt_clear_locked(struct rpc_xprt *xprt)
226 {
227 xprt->snd_task = NULL;
228 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state) || xprt->shutdown) {
229 smp_mb__before_clear_bit();
230 clear_bit(XPRT_LOCKED, &xprt->state);
231 smp_mb__after_clear_bit();
232 } else
233 queue_work(rpciod_workqueue, &xprt->task_cleanup);
234 }
235
236 /*
237 * xprt_reserve_xprt_cong - serialize write access to transports
238 * @task: task that is requesting access to the transport
239 *
240 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
241 * integrated into the decision of whether a request is allowed to be
242 * woken up and given access to the transport.
243 */
244 int xprt_reserve_xprt_cong(struct rpc_task *task)
245 {
246 struct rpc_xprt *xprt = task->tk_xprt;
247 struct rpc_rqst *req = task->tk_rqstp;
248
249 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
250 if (task == xprt->snd_task)
251 return 1;
252 goto out_sleep;
253 }
254 if (__xprt_get_cong(xprt, task)) {
255 xprt->snd_task = task;
256 if (req) {
257 req->rq_bytes_sent = 0;
258 req->rq_ntrans++;
259 }
260 return 1;
261 }
262 xprt_clear_locked(xprt);
263 out_sleep:
264 dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
265 task->tk_timeout = 0;
266 task->tk_status = -EAGAIN;
267 if (req && req->rq_ntrans)
268 rpc_sleep_on(&xprt->resend, task, NULL);
269 else
270 rpc_sleep_on(&xprt->sending, task, NULL);
271 return 0;
272 }
273 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
274
275 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
276 {
277 int retval;
278
279 spin_lock_bh(&xprt->transport_lock);
280 retval = xprt->ops->reserve_xprt(task);
281 spin_unlock_bh(&xprt->transport_lock);
282 return retval;
283 }
284
285 static void __xprt_lock_write_next(struct rpc_xprt *xprt)
286 {
287 struct rpc_task *task;
288 struct rpc_rqst *req;
289
290 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
291 return;
292
293 task = rpc_wake_up_next(&xprt->resend);
294 if (!task) {
295 task = rpc_wake_up_next(&xprt->sending);
296 if (!task)
297 goto out_unlock;
298 }
299
300 req = task->tk_rqstp;
301 xprt->snd_task = task;
302 if (req) {
303 req->rq_bytes_sent = 0;
304 req->rq_ntrans++;
305 }
306 return;
307
308 out_unlock:
309 xprt_clear_locked(xprt);
310 }
311
312 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
313 {
314 struct rpc_task *task;
315
316 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
317 return;
318 if (RPCXPRT_CONGESTED(xprt))
319 goto out_unlock;
320 task = rpc_wake_up_next(&xprt->resend);
321 if (!task) {
322 task = rpc_wake_up_next(&xprt->sending);
323 if (!task)
324 goto out_unlock;
325 }
326 if (__xprt_get_cong(xprt, task)) {
327 struct rpc_rqst *req = task->tk_rqstp;
328 xprt->snd_task = task;
329 if (req) {
330 req->rq_bytes_sent = 0;
331 req->rq_ntrans++;
332 }
333 return;
334 }
335 out_unlock:
336 xprt_clear_locked(xprt);
337 }
338
339 /**
340 * xprt_release_xprt - allow other requests to use a transport
341 * @xprt: transport with other tasks potentially waiting
342 * @task: task that is releasing access to the transport
343 *
344 * Note that "task" can be NULL. No congestion control is provided.
345 */
346 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
347 {
348 if (xprt->snd_task == task) {
349 xprt_clear_locked(xprt);
350 __xprt_lock_write_next(xprt);
351 }
352 }
353 EXPORT_SYMBOL_GPL(xprt_release_xprt);
354
355 /**
356 * xprt_release_xprt_cong - allow other requests to use a transport
357 * @xprt: transport with other tasks potentially waiting
358 * @task: task that is releasing access to the transport
359 *
360 * Note that "task" can be NULL. Another task is awoken to use the
361 * transport if the transport's congestion window allows it.
362 */
363 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
364 {
365 if (xprt->snd_task == task) {
366 xprt_clear_locked(xprt);
367 __xprt_lock_write_next_cong(xprt);
368 }
369 }
370 EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
371
372 static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
373 {
374 spin_lock_bh(&xprt->transport_lock);
375 xprt->ops->release_xprt(xprt, task);
376 spin_unlock_bh(&xprt->transport_lock);
377 }
378
379 /*
380 * Van Jacobson congestion avoidance. Check if the congestion window
381 * overflowed. Put the task to sleep if this is the case.
382 */
383 static int
384 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task)
385 {
386 struct rpc_rqst *req = task->tk_rqstp;
387
388 if (req->rq_cong)
389 return 1;
390 dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
391 task->tk_pid, xprt->cong, xprt->cwnd);
392 if (RPCXPRT_CONGESTED(xprt))
393 return 0;
394 req->rq_cong = 1;
395 xprt->cong += RPC_CWNDSCALE;
396 return 1;
397 }
398
399 /*
400 * Adjust the congestion window, and wake up the next task
401 * that has been sleeping due to congestion
402 */
403 static void
404 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
405 {
406 if (!req->rq_cong)
407 return;
408 req->rq_cong = 0;
409 xprt->cong -= RPC_CWNDSCALE;
410 __xprt_lock_write_next_cong(xprt);
411 }
412
413 /**
414 * xprt_release_rqst_cong - housekeeping when request is complete
415 * @task: RPC request that recently completed
416 *
417 * Useful for transports that require congestion control.
418 */
419 void xprt_release_rqst_cong(struct rpc_task *task)
420 {
421 __xprt_put_cong(task->tk_xprt, task->tk_rqstp);
422 }
423 EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
424
425 /**
426 * xprt_adjust_cwnd - adjust transport congestion window
427 * @task: recently completed RPC request used to adjust window
428 * @result: result code of completed RPC request
429 *
430 * We use a time-smoothed congestion estimator to avoid heavy oscillation.
431 */
432 void xprt_adjust_cwnd(struct rpc_task *task, int result)
433 {
434 struct rpc_rqst *req = task->tk_rqstp;
435 struct rpc_xprt *xprt = task->tk_xprt;
436 unsigned long cwnd = xprt->cwnd;
437
438 if (result >= 0 && cwnd <= xprt->cong) {
439 /* The (cwnd >> 1) term makes sure
440 * the result gets rounded properly. */
441 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
442 if (cwnd > RPC_MAXCWND(xprt))
443 cwnd = RPC_MAXCWND(xprt);
444 __xprt_lock_write_next_cong(xprt);
445 } else if (result == -ETIMEDOUT) {
446 cwnd >>= 1;
447 if (cwnd < RPC_CWNDSCALE)
448 cwnd = RPC_CWNDSCALE;
449 }
450 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n",
451 xprt->cong, xprt->cwnd, cwnd);
452 xprt->cwnd = cwnd;
453 __xprt_put_cong(xprt, req);
454 }
455 EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
456
457 /**
458 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
459 * @xprt: transport with waiting tasks
460 * @status: result code to plant in each task before waking it
461 *
462 */
463 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
464 {
465 if (status < 0)
466 rpc_wake_up_status(&xprt->pending, status);
467 else
468 rpc_wake_up(&xprt->pending);
469 }
470 EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
471
472 /**
473 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
474 * @task: task to be put to sleep
475 * @action: function pointer to be executed after wait
476 */
477 void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action)
478 {
479 struct rpc_rqst *req = task->tk_rqstp;
480 struct rpc_xprt *xprt = req->rq_xprt;
481
482 task->tk_timeout = req->rq_timeout;
483 rpc_sleep_on(&xprt->pending, task, action);
484 }
485 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
486
487 /**
488 * xprt_write_space - wake the task waiting for transport output buffer space
489 * @xprt: transport with waiting tasks
490 *
491 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
492 */
493 void xprt_write_space(struct rpc_xprt *xprt)
494 {
495 if (unlikely(xprt->shutdown))
496 return;
497
498 spin_lock_bh(&xprt->transport_lock);
499 if (xprt->snd_task) {
500 dprintk("RPC: write space: waking waiting task on "
501 "xprt %p\n", xprt);
502 rpc_wake_up_queued_task(&xprt->pending, xprt->snd_task);
503 }
504 spin_unlock_bh(&xprt->transport_lock);
505 }
506 EXPORT_SYMBOL_GPL(xprt_write_space);
507
508 /**
509 * xprt_set_retrans_timeout_def - set a request's retransmit timeout
510 * @task: task whose timeout is to be set
511 *
512 * Set a request's retransmit timeout based on the transport's
513 * default timeout parameters. Used by transports that don't adjust
514 * the retransmit timeout based on round-trip time estimation.
515 */
516 void xprt_set_retrans_timeout_def(struct rpc_task *task)
517 {
518 task->tk_timeout = task->tk_rqstp->rq_timeout;
519 }
520 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def);
521
522 /*
523 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
524 * @task: task whose timeout is to be set
525 *
526 * Set a request's retransmit timeout using the RTT estimator.
527 */
528 void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
529 {
530 int timer = task->tk_msg.rpc_proc->p_timer;
531 struct rpc_clnt *clnt = task->tk_client;
532 struct rpc_rtt *rtt = clnt->cl_rtt;
533 struct rpc_rqst *req = task->tk_rqstp;
534 unsigned long max_timeout = clnt->cl_timeout->to_maxval;
535
536 task->tk_timeout = rpc_calc_rto(rtt, timer);
537 task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
538 if (task->tk_timeout > max_timeout || task->tk_timeout == 0)
539 task->tk_timeout = max_timeout;
540 }
541 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_rtt);
542
543 static void xprt_reset_majortimeo(struct rpc_rqst *req)
544 {
545 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
546
547 req->rq_majortimeo = req->rq_timeout;
548 if (to->to_exponential)
549 req->rq_majortimeo <<= to->to_retries;
550 else
551 req->rq_majortimeo += to->to_increment * to->to_retries;
552 if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0)
553 req->rq_majortimeo = to->to_maxval;
554 req->rq_majortimeo += jiffies;
555 }
556
557 /**
558 * xprt_adjust_timeout - adjust timeout values for next retransmit
559 * @req: RPC request containing parameters to use for the adjustment
560 *
561 */
562 int xprt_adjust_timeout(struct rpc_rqst *req)
563 {
564 struct rpc_xprt *xprt = req->rq_xprt;
565 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
566 int status = 0;
567
568 if (time_before(jiffies, req->rq_majortimeo)) {
569 if (to->to_exponential)
570 req->rq_timeout <<= 1;
571 else
572 req->rq_timeout += to->to_increment;
573 if (to->to_maxval && req->rq_timeout >= to->to_maxval)
574 req->rq_timeout = to->to_maxval;
575 req->rq_retries++;
576 } else {
577 req->rq_timeout = to->to_initval;
578 req->rq_retries = 0;
579 xprt_reset_majortimeo(req);
580 /* Reset the RTT counters == "slow start" */
581 spin_lock_bh(&xprt->transport_lock);
582 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
583 spin_unlock_bh(&xprt->transport_lock);
584 status = -ETIMEDOUT;
585 }
586
587 if (req->rq_timeout == 0) {
588 printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
589 req->rq_timeout = 5 * HZ;
590 }
591 return status;
592 }
593
594 static void xprt_autoclose(struct work_struct *work)
595 {
596 struct rpc_xprt *xprt =
597 container_of(work, struct rpc_xprt, task_cleanup);
598
599 xprt->ops->close(xprt);
600 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
601 xprt_release_write(xprt, NULL);
602 }
603
604 /**
605 * xprt_disconnect_done - mark a transport as disconnected
606 * @xprt: transport to flag for disconnect
607 *
608 */
609 void xprt_disconnect_done(struct rpc_xprt *xprt)
610 {
611 dprintk("RPC: disconnected transport %p\n", xprt);
612 spin_lock_bh(&xprt->transport_lock);
613 xprt_clear_connected(xprt);
614 xprt_wake_pending_tasks(xprt, -EAGAIN);
615 spin_unlock_bh(&xprt->transport_lock);
616 }
617 EXPORT_SYMBOL_GPL(xprt_disconnect_done);
618
619 /**
620 * xprt_force_disconnect - force a transport to disconnect
621 * @xprt: transport to disconnect
622 *
623 */
624 void xprt_force_disconnect(struct rpc_xprt *xprt)
625 {
626 /* Don't race with the test_bit() in xprt_clear_locked() */
627 spin_lock_bh(&xprt->transport_lock);
628 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
629 /* Try to schedule an autoclose RPC call */
630 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
631 queue_work(rpciod_workqueue, &xprt->task_cleanup);
632 xprt_wake_pending_tasks(xprt, -EAGAIN);
633 spin_unlock_bh(&xprt->transport_lock);
634 }
635
636 /**
637 * xprt_conditional_disconnect - force a transport to disconnect
638 * @xprt: transport to disconnect
639 * @cookie: 'connection cookie'
640 *
641 * This attempts to break the connection if and only if 'cookie' matches
642 * the current transport 'connection cookie'. It ensures that we don't
643 * try to break the connection more than once when we need to retransmit
644 * a batch of RPC requests.
645 *
646 */
647 void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
648 {
649 /* Don't race with the test_bit() in xprt_clear_locked() */
650 spin_lock_bh(&xprt->transport_lock);
651 if (cookie != xprt->connect_cookie)
652 goto out;
653 if (test_bit(XPRT_CLOSING, &xprt->state) || !xprt_connected(xprt))
654 goto out;
655 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
656 /* Try to schedule an autoclose RPC call */
657 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
658 queue_work(rpciod_workqueue, &xprt->task_cleanup);
659 xprt_wake_pending_tasks(xprt, -EAGAIN);
660 out:
661 spin_unlock_bh(&xprt->transport_lock);
662 }
663
664 static void
665 xprt_init_autodisconnect(unsigned long data)
666 {
667 struct rpc_xprt *xprt = (struct rpc_xprt *)data;
668
669 spin_lock(&xprt->transport_lock);
670 if (!list_empty(&xprt->recv) || xprt->shutdown)
671 goto out_abort;
672 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
673 goto out_abort;
674 spin_unlock(&xprt->transport_lock);
675 if (xprt_connecting(xprt))
676 xprt_release_write(xprt, NULL);
677 else
678 queue_work(rpciod_workqueue, &xprt->task_cleanup);
679 return;
680 out_abort:
681 spin_unlock(&xprt->transport_lock);
682 }
683
684 /**
685 * xprt_connect - schedule a transport connect operation
686 * @task: RPC task that is requesting the connect
687 *
688 */
689 void xprt_connect(struct rpc_task *task)
690 {
691 struct rpc_xprt *xprt = task->tk_xprt;
692
693 dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
694 xprt, (xprt_connected(xprt) ? "is" : "is not"));
695
696 if (!xprt_bound(xprt)) {
697 task->tk_status = -EAGAIN;
698 return;
699 }
700 if (!xprt_lock_write(xprt, task))
701 return;
702 if (xprt_connected(xprt))
703 xprt_release_write(xprt, task);
704 else {
705 if (task->tk_rqstp)
706 task->tk_rqstp->rq_bytes_sent = 0;
707
708 task->tk_timeout = xprt->connect_timeout;
709 rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
710 xprt->stat.connect_start = jiffies;
711 xprt->ops->connect(task);
712 }
713 return;
714 }
715
716 static void xprt_connect_status(struct rpc_task *task)
717 {
718 struct rpc_xprt *xprt = task->tk_xprt;
719
720 if (task->tk_status == 0) {
721 xprt->stat.connect_count++;
722 xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start;
723 dprintk("RPC: %5u xprt_connect_status: connection established\n",
724 task->tk_pid);
725 return;
726 }
727
728 switch (task->tk_status) {
729 case -EAGAIN:
730 dprintk("RPC: %5u xprt_connect_status: retrying\n", task->tk_pid);
731 break;
732 case -ETIMEDOUT:
733 dprintk("RPC: %5u xprt_connect_status: connect attempt timed "
734 "out\n", task->tk_pid);
735 break;
736 default:
737 dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
738 "server %s\n", task->tk_pid, -task->tk_status,
739 task->tk_client->cl_server);
740 xprt_release_write(xprt, task);
741 task->tk_status = -EIO;
742 }
743 }
744
745 /**
746 * xprt_lookup_rqst - find an RPC request corresponding to an XID
747 * @xprt: transport on which the original request was transmitted
748 * @xid: RPC XID of incoming reply
749 *
750 */
751 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
752 {
753 struct list_head *pos;
754
755 list_for_each(pos, &xprt->recv) {
756 struct rpc_rqst *entry = list_entry(pos, struct rpc_rqst, rq_list);
757 if (entry->rq_xid == xid)
758 return entry;
759 }
760
761 dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n",
762 ntohl(xid));
763 xprt->stat.bad_xids++;
764 return NULL;
765 }
766 EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
767
768 /**
769 * xprt_update_rtt - update an RPC client's RTT state after receiving a reply
770 * @task: RPC request that recently completed
771 *
772 */
773 void xprt_update_rtt(struct rpc_task *task)
774 {
775 struct rpc_rqst *req = task->tk_rqstp;
776 struct rpc_rtt *rtt = task->tk_client->cl_rtt;
777 unsigned timer = task->tk_msg.rpc_proc->p_timer;
778
779 if (timer) {
780 if (req->rq_ntrans == 1)
781 rpc_update_rtt(rtt, timer,
782 (long)jiffies - req->rq_xtime);
783 rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
784 }
785 }
786 EXPORT_SYMBOL_GPL(xprt_update_rtt);
787
788 /**
789 * xprt_complete_rqst - called when reply processing is complete
790 * @task: RPC request that recently completed
791 * @copied: actual number of bytes received from the transport
792 *
793 * Caller holds transport lock.
794 */
795 void xprt_complete_rqst(struct rpc_task *task, int copied)
796 {
797 struct rpc_rqst *req = task->tk_rqstp;
798 struct rpc_xprt *xprt = req->rq_xprt;
799
800 dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
801 task->tk_pid, ntohl(req->rq_xid), copied);
802
803 xprt->stat.recvs++;
804 task->tk_rtt = (long)jiffies - req->rq_xtime;
805
806 list_del_init(&req->rq_list);
807 req->rq_private_buf.len = copied;
808 /* Ensure all writes are done before we update req->rq_received */
809 smp_wmb();
810 req->rq_received = copied;
811 rpc_wake_up_queued_task(&xprt->pending, task);
812 }
813 EXPORT_SYMBOL_GPL(xprt_complete_rqst);
814
815 static void xprt_timer(struct rpc_task *task)
816 {
817 struct rpc_rqst *req = task->tk_rqstp;
818 struct rpc_xprt *xprt = req->rq_xprt;
819
820 if (task->tk_status != -ETIMEDOUT)
821 return;
822 dprintk("RPC: %5u xprt_timer\n", task->tk_pid);
823
824 spin_lock_bh(&xprt->transport_lock);
825 if (!req->rq_received) {
826 if (xprt->ops->timer)
827 xprt->ops->timer(task);
828 } else
829 task->tk_status = 0;
830 spin_unlock_bh(&xprt->transport_lock);
831 }
832
833 /**
834 * xprt_prepare_transmit - reserve the transport before sending a request
835 * @task: RPC task about to send a request
836 *
837 */
838 int xprt_prepare_transmit(struct rpc_task *task)
839 {
840 struct rpc_rqst *req = task->tk_rqstp;
841 struct rpc_xprt *xprt = req->rq_xprt;
842 int err = 0;
843
844 dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
845
846 spin_lock_bh(&xprt->transport_lock);
847 if (req->rq_received && !req->rq_bytes_sent) {
848 err = req->rq_received;
849 goto out_unlock;
850 }
851 if (!xprt->ops->reserve_xprt(task))
852 err = -EAGAIN;
853 out_unlock:
854 spin_unlock_bh(&xprt->transport_lock);
855 return err;
856 }
857
858 void xprt_end_transmit(struct rpc_task *task)
859 {
860 xprt_release_write(task->tk_xprt, task);
861 }
862
863 /**
864 * xprt_transmit - send an RPC request on a transport
865 * @task: controlling RPC task
866 *
867 * We have to copy the iovec because sendmsg fiddles with its contents.
868 */
869 void xprt_transmit(struct rpc_task *task)
870 {
871 struct rpc_rqst *req = task->tk_rqstp;
872 struct rpc_xprt *xprt = req->rq_xprt;
873 int status;
874
875 dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
876
877 if (!req->rq_received) {
878 if (list_empty(&req->rq_list)) {
879 spin_lock_bh(&xprt->transport_lock);
880 /* Update the softirq receive buffer */
881 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
882 sizeof(req->rq_private_buf));
883 /* Add request to the receive list */
884 list_add_tail(&req->rq_list, &xprt->recv);
885 spin_unlock_bh(&xprt->transport_lock);
886 xprt_reset_majortimeo(req);
887 /* Turn off autodisconnect */
888 del_singleshot_timer_sync(&xprt->timer);
889 }
890 } else if (!req->rq_bytes_sent)
891 return;
892
893 req->rq_connect_cookie = xprt->connect_cookie;
894 req->rq_xtime = jiffies;
895 status = xprt->ops->send_request(task);
896 if (status != 0) {
897 task->tk_status = status;
898 return;
899 }
900
901 dprintk("RPC: %5u xmit complete\n", task->tk_pid);
902 spin_lock_bh(&xprt->transport_lock);
903
904 xprt->ops->set_retrans_timeout(task);
905
906 xprt->stat.sends++;
907 xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
908 xprt->stat.bklog_u += xprt->backlog.qlen;
909
910 /* Don't race with disconnect */
911 if (!xprt_connected(xprt))
912 task->tk_status = -ENOTCONN;
913 else if (!req->rq_received)
914 rpc_sleep_on(&xprt->pending, task, xprt_timer);
915 spin_unlock_bh(&xprt->transport_lock);
916 }
917
918 static inline void do_xprt_reserve(struct rpc_task *task)
919 {
920 struct rpc_xprt *xprt = task->tk_xprt;
921
922 task->tk_status = 0;
923 if (task->tk_rqstp)
924 return;
925 if (!list_empty(&xprt->free)) {
926 struct rpc_rqst *req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
927 list_del_init(&req->rq_list);
928 task->tk_rqstp = req;
929 xprt_request_init(task, xprt);
930 return;
931 }
932 dprintk("RPC: waiting for request slot\n");
933 task->tk_status = -EAGAIN;
934 task->tk_timeout = 0;
935 rpc_sleep_on(&xprt->backlog, task, NULL);
936 }
937
938 /**
939 * xprt_reserve - allocate an RPC request slot
940 * @task: RPC task requesting a slot allocation
941 *
942 * If no more slots are available, place the task on the transport's
943 * backlog queue.
944 */
945 void xprt_reserve(struct rpc_task *task)
946 {
947 struct rpc_xprt *xprt = task->tk_xprt;
948
949 task->tk_status = -EIO;
950 spin_lock(&xprt->reserve_lock);
951 do_xprt_reserve(task);
952 spin_unlock(&xprt->reserve_lock);
953 }
954
955 static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
956 {
957 return xprt->xid++;
958 }
959
960 static inline void xprt_init_xid(struct rpc_xprt *xprt)
961 {
962 xprt->xid = net_random();
963 }
964
965 static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
966 {
967 struct rpc_rqst *req = task->tk_rqstp;
968
969 req->rq_timeout = task->tk_client->cl_timeout->to_initval;
970 req->rq_task = task;
971 req->rq_xprt = xprt;
972 req->rq_buffer = NULL;
973 req->rq_xid = xprt_alloc_xid(xprt);
974 req->rq_release_snd_buf = NULL;
975 xprt_reset_majortimeo(req);
976 dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
977 req, ntohl(req->rq_xid));
978 }
979
980 /**
981 * xprt_release - release an RPC request slot
982 * @task: task which is finished with the slot
983 *
984 */
985 void xprt_release(struct rpc_task *task)
986 {
987 struct rpc_xprt *xprt = task->tk_xprt;
988 struct rpc_rqst *req;
989
990 if (!(req = task->tk_rqstp))
991 return;
992 rpc_count_iostats(task);
993 spin_lock_bh(&xprt->transport_lock);
994 xprt->ops->release_xprt(xprt, task);
995 if (xprt->ops->release_request)
996 xprt->ops->release_request(task);
997 if (!list_empty(&req->rq_list))
998 list_del(&req->rq_list);
999 xprt->last_used = jiffies;
1000 if (list_empty(&xprt->recv))
1001 mod_timer(&xprt->timer,
1002 xprt->last_used + xprt->idle_timeout);
1003 spin_unlock_bh(&xprt->transport_lock);
1004 xprt->ops->buf_free(req->rq_buffer);
1005 task->tk_rqstp = NULL;
1006 if (req->rq_release_snd_buf)
1007 req->rq_release_snd_buf(req);
1008 memset(req, 0, sizeof(*req)); /* mark unused */
1009
1010 dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
1011
1012 spin_lock(&xprt->reserve_lock);
1013 list_add(&req->rq_list, &xprt->free);
1014 rpc_wake_up_next(&xprt->backlog);
1015 spin_unlock(&xprt->reserve_lock);
1016 }
1017
1018 /**
1019 * xprt_create_transport - create an RPC transport
1020 * @args: rpc transport creation arguments
1021 *
1022 */
1023 struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
1024 {
1025 struct rpc_xprt *xprt;
1026 struct rpc_rqst *req;
1027 struct xprt_class *t;
1028
1029 spin_lock(&xprt_list_lock);
1030 list_for_each_entry(t, &xprt_list, list) {
1031 if (t->ident == args->ident) {
1032 spin_unlock(&xprt_list_lock);
1033 goto found;
1034 }
1035 }
1036 spin_unlock(&xprt_list_lock);
1037 printk(KERN_ERR "RPC: transport (%d) not supported\n", args->ident);
1038 return ERR_PTR(-EIO);
1039
1040 found:
1041 xprt = t->setup(args);
1042 if (IS_ERR(xprt)) {
1043 dprintk("RPC: xprt_create_transport: failed, %ld\n",
1044 -PTR_ERR(xprt));
1045 return xprt;
1046 }
1047
1048 kref_init(&xprt->kref);
1049 spin_lock_init(&xprt->transport_lock);
1050 spin_lock_init(&xprt->reserve_lock);
1051
1052 INIT_LIST_HEAD(&xprt->free);
1053 INIT_LIST_HEAD(&xprt->recv);
1054 INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
1055 setup_timer(&xprt->timer, xprt_init_autodisconnect,
1056 (unsigned long)xprt);
1057 xprt->last_used = jiffies;
1058 xprt->cwnd = RPC_INITCWND;
1059 xprt->bind_index = 0;
1060
1061 rpc_init_wait_queue(&xprt->binding, "xprt_binding");
1062 rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1063 rpc_init_wait_queue(&xprt->sending, "xprt_sending");
1064 rpc_init_wait_queue(&xprt->resend, "xprt_resend");
1065 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
1066
1067 /* initialize free list */
1068 for (req = &xprt->slot[xprt->max_reqs-1]; req >= &xprt->slot[0]; req--)
1069 list_add(&req->rq_list, &xprt->free);
1070
1071 xprt_init_xid(xprt);
1072
1073 dprintk("RPC: created transport %p with %u slots\n", xprt,
1074 xprt->max_reqs);
1075
1076 return xprt;
1077 }
1078
1079 /**
1080 * xprt_destroy - destroy an RPC transport, killing off all requests.
1081 * @kref: kref for the transport to destroy
1082 *
1083 */
1084 static void xprt_destroy(struct kref *kref)
1085 {
1086 struct rpc_xprt *xprt = container_of(kref, struct rpc_xprt, kref);
1087
1088 dprintk("RPC: destroying transport %p\n", xprt);
1089 xprt->shutdown = 1;
1090 del_timer_sync(&xprt->timer);
1091
1092 rpc_destroy_wait_queue(&xprt->binding);
1093 rpc_destroy_wait_queue(&xprt->pending);
1094 rpc_destroy_wait_queue(&xprt->sending);
1095 rpc_destroy_wait_queue(&xprt->resend);
1096 rpc_destroy_wait_queue(&xprt->backlog);
1097 /*
1098 * Tear down transport state and free the rpc_xprt
1099 */
1100 xprt->ops->destroy(xprt);
1101 }
1102
1103 /**
1104 * xprt_put - release a reference to an RPC transport.
1105 * @xprt: pointer to the transport
1106 *
1107 */
1108 void xprt_put(struct rpc_xprt *xprt)
1109 {
1110 kref_put(&xprt->kref, xprt_destroy);
1111 }
1112
1113 /**
1114 * xprt_get - return a reference to an RPC transport.
1115 * @xprt: pointer to the transport
1116 *
1117 */
1118 struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
1119 {
1120 kref_get(&xprt->kref);
1121 return xprt;
1122 }
This page took 0.057821 seconds and 5 git commands to generate.