SUNRPC: add EXPORT_SYMBOL_GPL for generic transport functions
[deliverable/linux.git] / net / sunrpc / xprt.c
1 /*
2 * linux/net/sunrpc/xprt.c
3 *
4 * This is a generic RPC call interface supporting congestion avoidance,
5 * and asynchronous calls.
6 *
7 * The interface works like this:
8 *
9 * - When a process places a call, it allocates a request slot if
10 * one is available. Otherwise, it sleeps on the backlog queue
11 * (xprt_reserve).
12 * - Next, the caller puts together the RPC message, stuffs it into
13 * the request struct, and calls xprt_transmit().
14 * - xprt_transmit sends the message and installs the caller on the
15 * transport's wait list. At the same time, it installs a timer that
16 * is run after the packet's timeout has expired.
17 * - When a packet arrives, the data_ready handler walks the list of
18 * pending requests for that transport. If a matching XID is found, the
19 * caller is woken up, and the timer removed.
20 * - When no reply arrives within the timeout interval, the timer is
21 * fired by the kernel and runs xprt_timer(). It either adjusts the
22 * timeout values (minor timeout) or wakes up the caller with a status
23 * of -ETIMEDOUT.
24 * - When the caller receives a notification from RPC that a reply arrived,
25 * it should release the RPC slot, and process the reply.
26 * If the call timed out, it may choose to retry the operation by
27 * adjusting the initial timeout value, and simply calling rpc_call
28 * again.
29 *
30 * Support for async RPC is done through a set of RPC-specific scheduling
31 * primitives that `transparently' work for processes as well as async
32 * tasks that rely on callbacks.
33 *
34 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
35 *
36 * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
37 */
38
39 #include <linux/module.h>
40
41 #include <linux/types.h>
42 #include <linux/interrupt.h>
43 #include <linux/workqueue.h>
44 #include <linux/net.h>
45
46 #include <linux/sunrpc/clnt.h>
47 #include <linux/sunrpc/metrics.h>
48
49 /*
50 * Local variables
51 */
52
53 #ifdef RPC_DEBUG
54 # define RPCDBG_FACILITY RPCDBG_XPRT
55 #endif
56
57 /*
58 * Local functions
59 */
60 static void xprt_request_init(struct rpc_task *, struct rpc_xprt *);
61 static inline void do_xprt_reserve(struct rpc_task *);
62 static void xprt_connect_status(struct rpc_task *task);
63 static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
64
65 /*
66 * The transport code maintains an estimate on the maximum number of out-
67 * standing RPC requests, using a smoothed version of the congestion
68 * avoidance implemented in 44BSD. This is basically the Van Jacobson
69 * congestion algorithm: If a retransmit occurs, the congestion window is
70 * halved; otherwise, it is incremented by 1/cwnd when
71 *
72 * - a reply is received and
73 * - a full number of requests are outstanding and
74 * - the congestion window hasn't been updated recently.
75 */
76 #define RPC_CWNDSHIFT (8U)
77 #define RPC_CWNDSCALE (1U << RPC_CWNDSHIFT)
78 #define RPC_INITCWND RPC_CWNDSCALE
79 #define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT)
80
81 #define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
82
83 /**
84 * xprt_reserve_xprt - serialize write access to transports
85 * @task: task that is requesting access to the transport
86 *
87 * This prevents mixing the payload of separate requests, and prevents
88 * transport connects from colliding with writes. No congestion control
89 * is provided.
90 */
91 int xprt_reserve_xprt(struct rpc_task *task)
92 {
93 struct rpc_xprt *xprt = task->tk_xprt;
94 struct rpc_rqst *req = task->tk_rqstp;
95
96 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
97 if (task == xprt->snd_task)
98 return 1;
99 if (task == NULL)
100 return 0;
101 goto out_sleep;
102 }
103 xprt->snd_task = task;
104 if (req) {
105 req->rq_bytes_sent = 0;
106 req->rq_ntrans++;
107 }
108 return 1;
109
110 out_sleep:
111 dprintk("RPC: %5u failed to lock transport %p\n",
112 task->tk_pid, xprt);
113 task->tk_timeout = 0;
114 task->tk_status = -EAGAIN;
115 if (req && req->rq_ntrans)
116 rpc_sleep_on(&xprt->resend, task, NULL, NULL);
117 else
118 rpc_sleep_on(&xprt->sending, task, NULL, NULL);
119 return 0;
120 }
121 EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
122
123 static void xprt_clear_locked(struct rpc_xprt *xprt)
124 {
125 xprt->snd_task = NULL;
126 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state) || xprt->shutdown) {
127 smp_mb__before_clear_bit();
128 clear_bit(XPRT_LOCKED, &xprt->state);
129 smp_mb__after_clear_bit();
130 } else
131 queue_work(rpciod_workqueue, &xprt->task_cleanup);
132 }
133
134 /*
135 * xprt_reserve_xprt_cong - serialize write access to transports
136 * @task: task that is requesting access to the transport
137 *
138 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
139 * integrated into the decision of whether a request is allowed to be
140 * woken up and given access to the transport.
141 */
142 int xprt_reserve_xprt_cong(struct rpc_task *task)
143 {
144 struct rpc_xprt *xprt = task->tk_xprt;
145 struct rpc_rqst *req = task->tk_rqstp;
146
147 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
148 if (task == xprt->snd_task)
149 return 1;
150 goto out_sleep;
151 }
152 if (__xprt_get_cong(xprt, task)) {
153 xprt->snd_task = task;
154 if (req) {
155 req->rq_bytes_sent = 0;
156 req->rq_ntrans++;
157 }
158 return 1;
159 }
160 xprt_clear_locked(xprt);
161 out_sleep:
162 dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
163 task->tk_timeout = 0;
164 task->tk_status = -EAGAIN;
165 if (req && req->rq_ntrans)
166 rpc_sleep_on(&xprt->resend, task, NULL, NULL);
167 else
168 rpc_sleep_on(&xprt->sending, task, NULL, NULL);
169 return 0;
170 }
171 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
172
173 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
174 {
175 int retval;
176
177 spin_lock_bh(&xprt->transport_lock);
178 retval = xprt->ops->reserve_xprt(task);
179 spin_unlock_bh(&xprt->transport_lock);
180 return retval;
181 }
182
183 static void __xprt_lock_write_next(struct rpc_xprt *xprt)
184 {
185 struct rpc_task *task;
186 struct rpc_rqst *req;
187
188 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
189 return;
190
191 task = rpc_wake_up_next(&xprt->resend);
192 if (!task) {
193 task = rpc_wake_up_next(&xprt->sending);
194 if (!task)
195 goto out_unlock;
196 }
197
198 req = task->tk_rqstp;
199 xprt->snd_task = task;
200 if (req) {
201 req->rq_bytes_sent = 0;
202 req->rq_ntrans++;
203 }
204 return;
205
206 out_unlock:
207 xprt_clear_locked(xprt);
208 }
209
210 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
211 {
212 struct rpc_task *task;
213
214 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
215 return;
216 if (RPCXPRT_CONGESTED(xprt))
217 goto out_unlock;
218 task = rpc_wake_up_next(&xprt->resend);
219 if (!task) {
220 task = rpc_wake_up_next(&xprt->sending);
221 if (!task)
222 goto out_unlock;
223 }
224 if (__xprt_get_cong(xprt, task)) {
225 struct rpc_rqst *req = task->tk_rqstp;
226 xprt->snd_task = task;
227 if (req) {
228 req->rq_bytes_sent = 0;
229 req->rq_ntrans++;
230 }
231 return;
232 }
233 out_unlock:
234 xprt_clear_locked(xprt);
235 }
236
237 /**
238 * xprt_release_xprt - allow other requests to use a transport
239 * @xprt: transport with other tasks potentially waiting
240 * @task: task that is releasing access to the transport
241 *
242 * Note that "task" can be NULL. No congestion control is provided.
243 */
244 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
245 {
246 if (xprt->snd_task == task) {
247 xprt_clear_locked(xprt);
248 __xprt_lock_write_next(xprt);
249 }
250 }
251 EXPORT_SYMBOL_GPL(xprt_release_xprt);
252
253 /**
254 * xprt_release_xprt_cong - allow other requests to use a transport
255 * @xprt: transport with other tasks potentially waiting
256 * @task: task that is releasing access to the transport
257 *
258 * Note that "task" can be NULL. Another task is awoken to use the
259 * transport if the transport's congestion window allows it.
260 */
261 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
262 {
263 if (xprt->snd_task == task) {
264 xprt_clear_locked(xprt);
265 __xprt_lock_write_next_cong(xprt);
266 }
267 }
268 EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
269
270 static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
271 {
272 spin_lock_bh(&xprt->transport_lock);
273 xprt->ops->release_xprt(xprt, task);
274 spin_unlock_bh(&xprt->transport_lock);
275 }
276
277 /*
278 * Van Jacobson congestion avoidance. Check if the congestion window
279 * overflowed. Put the task to sleep if this is the case.
280 */
281 static int
282 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task)
283 {
284 struct rpc_rqst *req = task->tk_rqstp;
285
286 if (req->rq_cong)
287 return 1;
288 dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
289 task->tk_pid, xprt->cong, xprt->cwnd);
290 if (RPCXPRT_CONGESTED(xprt))
291 return 0;
292 req->rq_cong = 1;
293 xprt->cong += RPC_CWNDSCALE;
294 return 1;
295 }
296
297 /*
298 * Adjust the congestion window, and wake up the next task
299 * that has been sleeping due to congestion
300 */
301 static void
302 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
303 {
304 if (!req->rq_cong)
305 return;
306 req->rq_cong = 0;
307 xprt->cong -= RPC_CWNDSCALE;
308 __xprt_lock_write_next_cong(xprt);
309 }
310
311 /**
312 * xprt_release_rqst_cong - housekeeping when request is complete
313 * @task: RPC request that recently completed
314 *
315 * Useful for transports that require congestion control.
316 */
317 void xprt_release_rqst_cong(struct rpc_task *task)
318 {
319 __xprt_put_cong(task->tk_xprt, task->tk_rqstp);
320 }
321 EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
322
323 /**
324 * xprt_adjust_cwnd - adjust transport congestion window
325 * @task: recently completed RPC request used to adjust window
326 * @result: result code of completed RPC request
327 *
328 * We use a time-smoothed congestion estimator to avoid heavy oscillation.
329 */
330 void xprt_adjust_cwnd(struct rpc_task *task, int result)
331 {
332 struct rpc_rqst *req = task->tk_rqstp;
333 struct rpc_xprt *xprt = task->tk_xprt;
334 unsigned long cwnd = xprt->cwnd;
335
336 if (result >= 0 && cwnd <= xprt->cong) {
337 /* The (cwnd >> 1) term makes sure
338 * the result gets rounded properly. */
339 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
340 if (cwnd > RPC_MAXCWND(xprt))
341 cwnd = RPC_MAXCWND(xprt);
342 __xprt_lock_write_next_cong(xprt);
343 } else if (result == -ETIMEDOUT) {
344 cwnd >>= 1;
345 if (cwnd < RPC_CWNDSCALE)
346 cwnd = RPC_CWNDSCALE;
347 }
348 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n",
349 xprt->cong, xprt->cwnd, cwnd);
350 xprt->cwnd = cwnd;
351 __xprt_put_cong(xprt, req);
352 }
353 EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
354
355 /**
356 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
357 * @xprt: transport with waiting tasks
358 * @status: result code to plant in each task before waking it
359 *
360 */
361 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
362 {
363 if (status < 0)
364 rpc_wake_up_status(&xprt->pending, status);
365 else
366 rpc_wake_up(&xprt->pending);
367 }
368 EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
369
370 /**
371 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
372 * @task: task to be put to sleep
373 *
374 */
375 void xprt_wait_for_buffer_space(struct rpc_task *task)
376 {
377 struct rpc_rqst *req = task->tk_rqstp;
378 struct rpc_xprt *xprt = req->rq_xprt;
379
380 task->tk_timeout = req->rq_timeout;
381 rpc_sleep_on(&xprt->pending, task, NULL, NULL);
382 }
383 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
384
385 /**
386 * xprt_write_space - wake the task waiting for transport output buffer space
387 * @xprt: transport with waiting tasks
388 *
389 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
390 */
391 void xprt_write_space(struct rpc_xprt *xprt)
392 {
393 if (unlikely(xprt->shutdown))
394 return;
395
396 spin_lock_bh(&xprt->transport_lock);
397 if (xprt->snd_task) {
398 dprintk("RPC: write space: waking waiting task on "
399 "xprt %p\n", xprt);
400 rpc_wake_up_task(xprt->snd_task);
401 }
402 spin_unlock_bh(&xprt->transport_lock);
403 }
404 EXPORT_SYMBOL_GPL(xprt_write_space);
405
406 /**
407 * xprt_set_retrans_timeout_def - set a request's retransmit timeout
408 * @task: task whose timeout is to be set
409 *
410 * Set a request's retransmit timeout based on the transport's
411 * default timeout parameters. Used by transports that don't adjust
412 * the retransmit timeout based on round-trip time estimation.
413 */
414 void xprt_set_retrans_timeout_def(struct rpc_task *task)
415 {
416 task->tk_timeout = task->tk_rqstp->rq_timeout;
417 }
418 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def);
419
420 /*
421 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
422 * @task: task whose timeout is to be set
423 *
424 * Set a request's retransmit timeout using the RTT estimator.
425 */
426 void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
427 {
428 int timer = task->tk_msg.rpc_proc->p_timer;
429 struct rpc_rtt *rtt = task->tk_client->cl_rtt;
430 struct rpc_rqst *req = task->tk_rqstp;
431 unsigned long max_timeout = req->rq_xprt->timeout.to_maxval;
432
433 task->tk_timeout = rpc_calc_rto(rtt, timer);
434 task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
435 if (task->tk_timeout > max_timeout || task->tk_timeout == 0)
436 task->tk_timeout = max_timeout;
437 }
438 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_rtt);
439
440 static void xprt_reset_majortimeo(struct rpc_rqst *req)
441 {
442 struct rpc_timeout *to = &req->rq_xprt->timeout;
443
444 req->rq_majortimeo = req->rq_timeout;
445 if (to->to_exponential)
446 req->rq_majortimeo <<= to->to_retries;
447 else
448 req->rq_majortimeo += to->to_increment * to->to_retries;
449 if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0)
450 req->rq_majortimeo = to->to_maxval;
451 req->rq_majortimeo += jiffies;
452 }
453
454 /**
455 * xprt_adjust_timeout - adjust timeout values for next retransmit
456 * @req: RPC request containing parameters to use for the adjustment
457 *
458 */
459 int xprt_adjust_timeout(struct rpc_rqst *req)
460 {
461 struct rpc_xprt *xprt = req->rq_xprt;
462 struct rpc_timeout *to = &xprt->timeout;
463 int status = 0;
464
465 if (time_before(jiffies, req->rq_majortimeo)) {
466 if (to->to_exponential)
467 req->rq_timeout <<= 1;
468 else
469 req->rq_timeout += to->to_increment;
470 if (to->to_maxval && req->rq_timeout >= to->to_maxval)
471 req->rq_timeout = to->to_maxval;
472 req->rq_retries++;
473 } else {
474 req->rq_timeout = to->to_initval;
475 req->rq_retries = 0;
476 xprt_reset_majortimeo(req);
477 /* Reset the RTT counters == "slow start" */
478 spin_lock_bh(&xprt->transport_lock);
479 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
480 spin_unlock_bh(&xprt->transport_lock);
481 status = -ETIMEDOUT;
482 }
483
484 if (req->rq_timeout == 0) {
485 printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
486 req->rq_timeout = 5 * HZ;
487 }
488 return status;
489 }
490
491 static void xprt_autoclose(struct work_struct *work)
492 {
493 struct rpc_xprt *xprt =
494 container_of(work, struct rpc_xprt, task_cleanup);
495
496 xprt_disconnect(xprt);
497 xprt->ops->close(xprt);
498 xprt_release_write(xprt, NULL);
499 }
500
501 /**
502 * xprt_disconnect - mark a transport as disconnected
503 * @xprt: transport to flag for disconnect
504 *
505 */
506 void xprt_disconnect(struct rpc_xprt *xprt)
507 {
508 dprintk("RPC: disconnected transport %p\n", xprt);
509 spin_lock_bh(&xprt->transport_lock);
510 xprt_clear_connected(xprt);
511 xprt_wake_pending_tasks(xprt, -ENOTCONN);
512 spin_unlock_bh(&xprt->transport_lock);
513 }
514 EXPORT_SYMBOL_GPL(xprt_disconnect);
515
516 static void
517 xprt_init_autodisconnect(unsigned long data)
518 {
519 struct rpc_xprt *xprt = (struct rpc_xprt *)data;
520
521 spin_lock(&xprt->transport_lock);
522 if (!list_empty(&xprt->recv) || xprt->shutdown)
523 goto out_abort;
524 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
525 goto out_abort;
526 spin_unlock(&xprt->transport_lock);
527 if (xprt_connecting(xprt))
528 xprt_release_write(xprt, NULL);
529 else
530 queue_work(rpciod_workqueue, &xprt->task_cleanup);
531 return;
532 out_abort:
533 spin_unlock(&xprt->transport_lock);
534 }
535
536 /**
537 * xprt_connect - schedule a transport connect operation
538 * @task: RPC task that is requesting the connect
539 *
540 */
541 void xprt_connect(struct rpc_task *task)
542 {
543 struct rpc_xprt *xprt = task->tk_xprt;
544
545 dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
546 xprt, (xprt_connected(xprt) ? "is" : "is not"));
547
548 if (!xprt_bound(xprt)) {
549 task->tk_status = -EIO;
550 return;
551 }
552 if (!xprt_lock_write(xprt, task))
553 return;
554 if (xprt_connected(xprt))
555 xprt_release_write(xprt, task);
556 else {
557 if (task->tk_rqstp)
558 task->tk_rqstp->rq_bytes_sent = 0;
559
560 task->tk_timeout = xprt->connect_timeout;
561 rpc_sleep_on(&xprt->pending, task, xprt_connect_status, NULL);
562 xprt->stat.connect_start = jiffies;
563 xprt->ops->connect(task);
564 }
565 return;
566 }
567
568 static void xprt_connect_status(struct rpc_task *task)
569 {
570 struct rpc_xprt *xprt = task->tk_xprt;
571
572 if (task->tk_status >= 0) {
573 xprt->stat.connect_count++;
574 xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start;
575 dprintk("RPC: %5u xprt_connect_status: connection established\n",
576 task->tk_pid);
577 return;
578 }
579
580 switch (task->tk_status) {
581 case -ECONNREFUSED:
582 case -ECONNRESET:
583 dprintk("RPC: %5u xprt_connect_status: server %s refused "
584 "connection\n", task->tk_pid,
585 task->tk_client->cl_server);
586 break;
587 case -ENOTCONN:
588 dprintk("RPC: %5u xprt_connect_status: connection broken\n",
589 task->tk_pid);
590 break;
591 case -ETIMEDOUT:
592 dprintk("RPC: %5u xprt_connect_status: connect attempt timed "
593 "out\n", task->tk_pid);
594 break;
595 default:
596 dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
597 "server %s\n", task->tk_pid, -task->tk_status,
598 task->tk_client->cl_server);
599 xprt_release_write(xprt, task);
600 task->tk_status = -EIO;
601 }
602 }
603
604 /**
605 * xprt_lookup_rqst - find an RPC request corresponding to an XID
606 * @xprt: transport on which the original request was transmitted
607 * @xid: RPC XID of incoming reply
608 *
609 */
610 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
611 {
612 struct list_head *pos;
613
614 list_for_each(pos, &xprt->recv) {
615 struct rpc_rqst *entry = list_entry(pos, struct rpc_rqst, rq_list);
616 if (entry->rq_xid == xid)
617 return entry;
618 }
619
620 dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n",
621 ntohl(xid));
622 xprt->stat.bad_xids++;
623 return NULL;
624 }
625 EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
626
627 /**
628 * xprt_update_rtt - update an RPC client's RTT state after receiving a reply
629 * @task: RPC request that recently completed
630 *
631 */
632 void xprt_update_rtt(struct rpc_task *task)
633 {
634 struct rpc_rqst *req = task->tk_rqstp;
635 struct rpc_rtt *rtt = task->tk_client->cl_rtt;
636 unsigned timer = task->tk_msg.rpc_proc->p_timer;
637
638 if (timer) {
639 if (req->rq_ntrans == 1)
640 rpc_update_rtt(rtt, timer,
641 (long)jiffies - req->rq_xtime);
642 rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
643 }
644 }
645 EXPORT_SYMBOL_GPL(xprt_update_rtt);
646
647 /**
648 * xprt_complete_rqst - called when reply processing is complete
649 * @task: RPC request that recently completed
650 * @copied: actual number of bytes received from the transport
651 *
652 * Caller holds transport lock.
653 */
654 void xprt_complete_rqst(struct rpc_task *task, int copied)
655 {
656 struct rpc_rqst *req = task->tk_rqstp;
657
658 dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
659 task->tk_pid, ntohl(req->rq_xid), copied);
660
661 task->tk_xprt->stat.recvs++;
662 task->tk_rtt = (long)jiffies - req->rq_xtime;
663
664 list_del_init(&req->rq_list);
665 /* Ensure all writes are done before we update req->rq_received */
666 smp_wmb();
667 req->rq_received = req->rq_private_buf.len = copied;
668 rpc_wake_up_task(task);
669 }
670 EXPORT_SYMBOL_GPL(xprt_complete_rqst);
671
672 static void xprt_timer(struct rpc_task *task)
673 {
674 struct rpc_rqst *req = task->tk_rqstp;
675 struct rpc_xprt *xprt = req->rq_xprt;
676
677 dprintk("RPC: %5u xprt_timer\n", task->tk_pid);
678
679 spin_lock(&xprt->transport_lock);
680 if (!req->rq_received) {
681 if (xprt->ops->timer)
682 xprt->ops->timer(task);
683 task->tk_status = -ETIMEDOUT;
684 }
685 task->tk_timeout = 0;
686 rpc_wake_up_task(task);
687 spin_unlock(&xprt->transport_lock);
688 }
689
690 /**
691 * xprt_prepare_transmit - reserve the transport before sending a request
692 * @task: RPC task about to send a request
693 *
694 */
695 int xprt_prepare_transmit(struct rpc_task *task)
696 {
697 struct rpc_rqst *req = task->tk_rqstp;
698 struct rpc_xprt *xprt = req->rq_xprt;
699 int err = 0;
700
701 dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
702
703 spin_lock_bh(&xprt->transport_lock);
704 if (req->rq_received && !req->rq_bytes_sent) {
705 err = req->rq_received;
706 goto out_unlock;
707 }
708 if (!xprt->ops->reserve_xprt(task)) {
709 err = -EAGAIN;
710 goto out_unlock;
711 }
712
713 if (!xprt_connected(xprt)) {
714 err = -ENOTCONN;
715 goto out_unlock;
716 }
717 out_unlock:
718 spin_unlock_bh(&xprt->transport_lock);
719 return err;
720 }
721
722 void xprt_end_transmit(struct rpc_task *task)
723 {
724 xprt_release_write(task->tk_xprt, task);
725 }
726
727 /**
728 * xprt_transmit - send an RPC request on a transport
729 * @task: controlling RPC task
730 *
731 * We have to copy the iovec because sendmsg fiddles with its contents.
732 */
733 void xprt_transmit(struct rpc_task *task)
734 {
735 struct rpc_rqst *req = task->tk_rqstp;
736 struct rpc_xprt *xprt = req->rq_xprt;
737 int status;
738
739 dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
740
741 if (!req->rq_received) {
742 if (list_empty(&req->rq_list)) {
743 spin_lock_bh(&xprt->transport_lock);
744 /* Update the softirq receive buffer */
745 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
746 sizeof(req->rq_private_buf));
747 /* Add request to the receive list */
748 list_add_tail(&req->rq_list, &xprt->recv);
749 spin_unlock_bh(&xprt->transport_lock);
750 xprt_reset_majortimeo(req);
751 /* Turn off autodisconnect */
752 del_singleshot_timer_sync(&xprt->timer);
753 }
754 } else if (!req->rq_bytes_sent)
755 return;
756
757 status = xprt->ops->send_request(task);
758 if (status == 0) {
759 dprintk("RPC: %5u xmit complete\n", task->tk_pid);
760 spin_lock_bh(&xprt->transport_lock);
761
762 xprt->ops->set_retrans_timeout(task);
763
764 xprt->stat.sends++;
765 xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
766 xprt->stat.bklog_u += xprt->backlog.qlen;
767
768 /* Don't race with disconnect */
769 if (!xprt_connected(xprt))
770 task->tk_status = -ENOTCONN;
771 else if (!req->rq_received)
772 rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer);
773 spin_unlock_bh(&xprt->transport_lock);
774 return;
775 }
776
777 /* Note: at this point, task->tk_sleeping has not yet been set,
778 * hence there is no danger of the waking up task being put on
779 * schedq, and being picked up by a parallel run of rpciod().
780 */
781 task->tk_status = status;
782 if (status == -ECONNREFUSED)
783 rpc_sleep_on(&xprt->sending, task, NULL, NULL);
784 }
785
786 static inline void do_xprt_reserve(struct rpc_task *task)
787 {
788 struct rpc_xprt *xprt = task->tk_xprt;
789
790 task->tk_status = 0;
791 if (task->tk_rqstp)
792 return;
793 if (!list_empty(&xprt->free)) {
794 struct rpc_rqst *req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
795 list_del_init(&req->rq_list);
796 task->tk_rqstp = req;
797 xprt_request_init(task, xprt);
798 return;
799 }
800 dprintk("RPC: waiting for request slot\n");
801 task->tk_status = -EAGAIN;
802 task->tk_timeout = 0;
803 rpc_sleep_on(&xprt->backlog, task, NULL, NULL);
804 }
805
806 /**
807 * xprt_reserve - allocate an RPC request slot
808 * @task: RPC task requesting a slot allocation
809 *
810 * If no more slots are available, place the task on the transport's
811 * backlog queue.
812 */
813 void xprt_reserve(struct rpc_task *task)
814 {
815 struct rpc_xprt *xprt = task->tk_xprt;
816
817 task->tk_status = -EIO;
818 spin_lock(&xprt->reserve_lock);
819 do_xprt_reserve(task);
820 spin_unlock(&xprt->reserve_lock);
821 }
822
823 static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
824 {
825 return xprt->xid++;
826 }
827
828 static inline void xprt_init_xid(struct rpc_xprt *xprt)
829 {
830 xprt->xid = net_random();
831 }
832
833 static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
834 {
835 struct rpc_rqst *req = task->tk_rqstp;
836
837 req->rq_timeout = xprt->timeout.to_initval;
838 req->rq_task = task;
839 req->rq_xprt = xprt;
840 req->rq_buffer = NULL;
841 req->rq_xid = xprt_alloc_xid(xprt);
842 req->rq_release_snd_buf = NULL;
843 xprt_reset_majortimeo(req);
844 dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
845 req, ntohl(req->rq_xid));
846 }
847
848 /**
849 * xprt_release - release an RPC request slot
850 * @task: task which is finished with the slot
851 *
852 */
853 void xprt_release(struct rpc_task *task)
854 {
855 struct rpc_xprt *xprt = task->tk_xprt;
856 struct rpc_rqst *req;
857
858 if (!(req = task->tk_rqstp))
859 return;
860 rpc_count_iostats(task);
861 spin_lock_bh(&xprt->transport_lock);
862 xprt->ops->release_xprt(xprt, task);
863 if (xprt->ops->release_request)
864 xprt->ops->release_request(task);
865 if (!list_empty(&req->rq_list))
866 list_del(&req->rq_list);
867 xprt->last_used = jiffies;
868 if (list_empty(&xprt->recv))
869 mod_timer(&xprt->timer,
870 xprt->last_used + xprt->idle_timeout);
871 spin_unlock_bh(&xprt->transport_lock);
872 xprt->ops->buf_free(req->rq_buffer);
873 task->tk_rqstp = NULL;
874 if (req->rq_release_snd_buf)
875 req->rq_release_snd_buf(req);
876 memset(req, 0, sizeof(*req)); /* mark unused */
877
878 dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
879
880 spin_lock(&xprt->reserve_lock);
881 list_add(&req->rq_list, &xprt->free);
882 rpc_wake_up_next(&xprt->backlog);
883 spin_unlock(&xprt->reserve_lock);
884 }
885
886 /**
887 * xprt_set_timeout - set constant RPC timeout
888 * @to: RPC timeout parameters to set up
889 * @retr: number of retries
890 * @incr: amount of increase after each retry
891 *
892 */
893 void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr)
894 {
895 to->to_initval =
896 to->to_increment = incr;
897 to->to_maxval = to->to_initval + (incr * retr);
898 to->to_retries = retr;
899 to->to_exponential = 0;
900 }
901
902 /**
903 * xprt_create_transport - create an RPC transport
904 * @args: rpc transport creation arguments
905 *
906 */
907 struct rpc_xprt *xprt_create_transport(struct rpc_xprtsock_create *args)
908 {
909 struct rpc_xprt *xprt;
910 struct rpc_rqst *req;
911
912 switch (args->proto) {
913 case IPPROTO_UDP:
914 xprt = xs_setup_udp(args);
915 break;
916 case IPPROTO_TCP:
917 xprt = xs_setup_tcp(args);
918 break;
919 default:
920 printk(KERN_ERR "RPC: unrecognized transport protocol: %d\n",
921 args->proto);
922 return ERR_PTR(-EIO);
923 }
924 if (IS_ERR(xprt)) {
925 dprintk("RPC: xprt_create_transport: failed, %ld\n",
926 -PTR_ERR(xprt));
927 return xprt;
928 }
929
930 kref_init(&xprt->kref);
931 spin_lock_init(&xprt->transport_lock);
932 spin_lock_init(&xprt->reserve_lock);
933
934 INIT_LIST_HEAD(&xprt->free);
935 INIT_LIST_HEAD(&xprt->recv);
936 INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
937 init_timer(&xprt->timer);
938 xprt->timer.function = xprt_init_autodisconnect;
939 xprt->timer.data = (unsigned long) xprt;
940 xprt->last_used = jiffies;
941 xprt->cwnd = RPC_INITCWND;
942 xprt->bind_index = 0;
943
944 rpc_init_wait_queue(&xprt->binding, "xprt_binding");
945 rpc_init_wait_queue(&xprt->pending, "xprt_pending");
946 rpc_init_wait_queue(&xprt->sending, "xprt_sending");
947 rpc_init_wait_queue(&xprt->resend, "xprt_resend");
948 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
949
950 /* initialize free list */
951 for (req = &xprt->slot[xprt->max_reqs-1]; req >= &xprt->slot[0]; req--)
952 list_add(&req->rq_list, &xprt->free);
953
954 xprt_init_xid(xprt);
955
956 dprintk("RPC: created transport %p with %u slots\n", xprt,
957 xprt->max_reqs);
958
959 return xprt;
960 }
961
962 /**
963 * xprt_destroy - destroy an RPC transport, killing off all requests.
964 * @kref: kref for the transport to destroy
965 *
966 */
967 static void xprt_destroy(struct kref *kref)
968 {
969 struct rpc_xprt *xprt = container_of(kref, struct rpc_xprt, kref);
970
971 dprintk("RPC: destroying transport %p\n", xprt);
972 xprt->shutdown = 1;
973 del_timer_sync(&xprt->timer);
974
975 /*
976 * Tear down transport state and free the rpc_xprt
977 */
978 xprt->ops->destroy(xprt);
979 }
980
981 /**
982 * xprt_put - release a reference to an RPC transport.
983 * @xprt: pointer to the transport
984 *
985 */
986 void xprt_put(struct rpc_xprt *xprt)
987 {
988 kref_put(&xprt->kref, xprt_destroy);
989 }
990
991 /**
992 * xprt_get - return a reference to an RPC transport.
993 * @xprt: pointer to the transport
994 *
995 */
996 struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
997 {
998 kref_get(&xprt->kref);
999 return xprt;
1000 }
This page took 0.050635 seconds and 6 git commands to generate.