Merge commit 'v2.6.31-rc7' into irq/core
[deliverable/linux.git] / net / sunrpc / sched.c
1 /*
2 * linux/net/sunrpc/sched.c
3 *
4 * Scheduling for synchronous and asynchronous RPC requests.
5 *
6 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
7 *
8 * TCP NFS related read + write fixes
9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
10 */
11
12 #include <linux/module.h>
13
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/slab.h>
17 #include <linux/mempool.h>
18 #include <linux/smp.h>
19 #include <linux/spinlock.h>
20 #include <linux/mutex.h>
21
22 #include <linux/sunrpc/clnt.h>
23
24 #ifdef RPC_DEBUG
25 #define RPCDBG_FACILITY RPCDBG_SCHED
26 #define RPC_TASK_MAGIC_ID 0xf00baa
27 #endif
28
29 /*
30 * RPC slabs and memory pools
31 */
32 #define RPC_BUFFER_MAXSIZE (2048)
33 #define RPC_BUFFER_POOLSIZE (8)
34 #define RPC_TASK_POOLSIZE (8)
35 static struct kmem_cache *rpc_task_slabp __read_mostly;
36 static struct kmem_cache *rpc_buffer_slabp __read_mostly;
37 static mempool_t *rpc_task_mempool __read_mostly;
38 static mempool_t *rpc_buffer_mempool __read_mostly;
39
40 static void rpc_async_schedule(struct work_struct *);
41 static void rpc_release_task(struct rpc_task *task);
42 static void __rpc_queue_timer_fn(unsigned long ptr);
43
44 /*
45 * RPC tasks sit here while waiting for conditions to improve.
46 */
47 static struct rpc_wait_queue delay_queue;
48
49 /*
50 * rpciod-related stuff
51 */
52 struct workqueue_struct *rpciod_workqueue;
53
54 /*
55 * Disable the timer for a given RPC task. Should be called with
56 * queue->lock and bh_disabled in order to avoid races within
57 * rpc_run_timer().
58 */
59 static void
60 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
61 {
62 if (task->tk_timeout == 0)
63 return;
64 dprintk("RPC: %5u disabling timer\n", task->tk_pid);
65 task->tk_timeout = 0;
66 list_del(&task->u.tk_wait.timer_list);
67 if (list_empty(&queue->timer_list.list))
68 del_timer(&queue->timer_list.timer);
69 }
70
71 static void
72 rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
73 {
74 queue->timer_list.expires = expires;
75 mod_timer(&queue->timer_list.timer, expires);
76 }
77
78 /*
79 * Set up a timer for the current task.
80 */
81 static void
82 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
83 {
84 if (!task->tk_timeout)
85 return;
86
87 dprintk("RPC: %5u setting alarm for %lu ms\n",
88 task->tk_pid, task->tk_timeout * 1000 / HZ);
89
90 task->u.tk_wait.expires = jiffies + task->tk_timeout;
91 if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires))
92 rpc_set_queue_timer(queue, task->u.tk_wait.expires);
93 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
94 }
95
96 /*
97 * Add new request to a priority queue.
98 */
99 static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct rpc_task *task)
100 {
101 struct list_head *q;
102 struct rpc_task *t;
103
104 INIT_LIST_HEAD(&task->u.tk_wait.links);
105 q = &queue->tasks[task->tk_priority];
106 if (unlikely(task->tk_priority > queue->maxpriority))
107 q = &queue->tasks[queue->maxpriority];
108 list_for_each_entry(t, q, u.tk_wait.list) {
109 if (t->tk_owner == task->tk_owner) {
110 list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
111 return;
112 }
113 }
114 list_add_tail(&task->u.tk_wait.list, q);
115 }
116
117 /*
118 * Add new request to wait queue.
119 *
120 * Swapper tasks always get inserted at the head of the queue.
121 * This should avoid many nasty memory deadlocks and hopefully
122 * improve overall performance.
123 * Everyone else gets appended to the queue to ensure proper FIFO behavior.
124 */
125 static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
126 {
127 BUG_ON (RPC_IS_QUEUED(task));
128
129 if (RPC_IS_PRIORITY(queue))
130 __rpc_add_wait_queue_priority(queue, task);
131 else if (RPC_IS_SWAPPER(task))
132 list_add(&task->u.tk_wait.list, &queue->tasks[0]);
133 else
134 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
135 task->tk_waitqueue = queue;
136 queue->qlen++;
137 rpc_set_queued(task);
138
139 dprintk("RPC: %5u added to queue %p \"%s\"\n",
140 task->tk_pid, queue, rpc_qname(queue));
141 }
142
143 /*
144 * Remove request from a priority queue.
145 */
146 static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
147 {
148 struct rpc_task *t;
149
150 if (!list_empty(&task->u.tk_wait.links)) {
151 t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list);
152 list_move(&t->u.tk_wait.list, &task->u.tk_wait.list);
153 list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links);
154 }
155 }
156
157 /*
158 * Remove request from queue.
159 * Note: must be called with spin lock held.
160 */
161 static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
162 {
163 __rpc_disable_timer(queue, task);
164 if (RPC_IS_PRIORITY(queue))
165 __rpc_remove_wait_queue_priority(task);
166 list_del(&task->u.tk_wait.list);
167 queue->qlen--;
168 dprintk("RPC: %5u removed from queue %p \"%s\"\n",
169 task->tk_pid, queue, rpc_qname(queue));
170 }
171
172 static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
173 {
174 queue->priority = priority;
175 queue->count = 1 << (priority * 2);
176 }
177
178 static inline void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
179 {
180 queue->owner = pid;
181 queue->nr = RPC_BATCH_COUNT;
182 }
183
184 static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
185 {
186 rpc_set_waitqueue_priority(queue, queue->maxpriority);
187 rpc_set_waitqueue_owner(queue, 0);
188 }
189
190 static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
191 {
192 int i;
193
194 spin_lock_init(&queue->lock);
195 for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
196 INIT_LIST_HEAD(&queue->tasks[i]);
197 queue->maxpriority = nr_queues - 1;
198 rpc_reset_waitqueue_priority(queue);
199 queue->qlen = 0;
200 setup_timer(&queue->timer_list.timer, __rpc_queue_timer_fn, (unsigned long)queue);
201 INIT_LIST_HEAD(&queue->timer_list.list);
202 #ifdef RPC_DEBUG
203 queue->name = qname;
204 #endif
205 }
206
207 void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
208 {
209 __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
210 }
211
212 void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
213 {
214 __rpc_init_priority_wait_queue(queue, qname, 1);
215 }
216 EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
217
218 void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
219 {
220 del_timer_sync(&queue->timer_list.timer);
221 }
222 EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
223
224 static int rpc_wait_bit_killable(void *word)
225 {
226 if (fatal_signal_pending(current))
227 return -ERESTARTSYS;
228 schedule();
229 return 0;
230 }
231
232 #ifdef RPC_DEBUG
233 static void rpc_task_set_debuginfo(struct rpc_task *task)
234 {
235 static atomic_t rpc_pid;
236
237 task->tk_magic = RPC_TASK_MAGIC_ID;
238 task->tk_pid = atomic_inc_return(&rpc_pid);
239 }
240 #else
241 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
242 {
243 }
244 #endif
245
246 static void rpc_set_active(struct rpc_task *task)
247 {
248 struct rpc_clnt *clnt;
249 if (test_and_set_bit(RPC_TASK_ACTIVE, &task->tk_runstate) != 0)
250 return;
251 rpc_task_set_debuginfo(task);
252 /* Add to global list of all tasks */
253 clnt = task->tk_client;
254 if (clnt != NULL) {
255 spin_lock(&clnt->cl_lock);
256 list_add_tail(&task->tk_task, &clnt->cl_tasks);
257 spin_unlock(&clnt->cl_lock);
258 }
259 }
260
261 /*
262 * Mark an RPC call as having completed by clearing the 'active' bit
263 */
264 static void rpc_mark_complete_task(struct rpc_task *task)
265 {
266 smp_mb__before_clear_bit();
267 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
268 smp_mb__after_clear_bit();
269 wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE);
270 }
271
272 /*
273 * Allow callers to wait for completion of an RPC call
274 */
275 int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
276 {
277 if (action == NULL)
278 action = rpc_wait_bit_killable;
279 return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
280 action, TASK_KILLABLE);
281 }
282 EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
283
284 /*
285 * Make an RPC task runnable.
286 *
287 * Note: If the task is ASYNC, this must be called with
288 * the spinlock held to protect the wait queue operation.
289 */
290 static void rpc_make_runnable(struct rpc_task *task)
291 {
292 rpc_clear_queued(task);
293 if (rpc_test_and_set_running(task))
294 return;
295 if (RPC_IS_ASYNC(task)) {
296 int status;
297
298 INIT_WORK(&task->u.tk_work, rpc_async_schedule);
299 status = queue_work(rpciod_workqueue, &task->u.tk_work);
300 if (status < 0) {
301 printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status);
302 task->tk_status = status;
303 return;
304 }
305 } else
306 wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
307 }
308
309 /*
310 * Prepare for sleeping on a wait queue.
311 * By always appending tasks to the list we ensure FIFO behavior.
312 * NB: An RPC task will only receive interrupt-driven events as long
313 * as it's on a wait queue.
314 */
315 static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
316 rpc_action action)
317 {
318 dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
319 task->tk_pid, rpc_qname(q), jiffies);
320
321 if (!RPC_IS_ASYNC(task) && !RPC_IS_ACTIVATED(task)) {
322 printk(KERN_ERR "RPC: Inactive synchronous task put to sleep!\n");
323 return;
324 }
325
326 __rpc_add_wait_queue(q, task);
327
328 BUG_ON(task->tk_callback != NULL);
329 task->tk_callback = action;
330 __rpc_add_timer(q, task);
331 }
332
333 void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
334 rpc_action action)
335 {
336 /* Mark the task as being activated if so needed */
337 rpc_set_active(task);
338
339 /*
340 * Protect the queue operations.
341 */
342 spin_lock_bh(&q->lock);
343 __rpc_sleep_on(q, task, action);
344 spin_unlock_bh(&q->lock);
345 }
346 EXPORT_SYMBOL_GPL(rpc_sleep_on);
347
348 /**
349 * __rpc_do_wake_up_task - wake up a single rpc_task
350 * @queue: wait queue
351 * @task: task to be woken up
352 *
353 * Caller must hold queue->lock, and have cleared the task queued flag.
354 */
355 static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task *task)
356 {
357 dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n",
358 task->tk_pid, jiffies);
359
360 #ifdef RPC_DEBUG
361 BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
362 #endif
363 /* Has the task been executed yet? If not, we cannot wake it up! */
364 if (!RPC_IS_ACTIVATED(task)) {
365 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
366 return;
367 }
368
369 __rpc_remove_wait_queue(queue, task);
370
371 rpc_make_runnable(task);
372
373 dprintk("RPC: __rpc_wake_up_task done\n");
374 }
375
376 /*
377 * Wake up a queued task while the queue lock is being held
378 */
379 static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
380 {
381 if (RPC_IS_QUEUED(task) && task->tk_waitqueue == queue)
382 __rpc_do_wake_up_task(queue, task);
383 }
384
385 /*
386 * Wake up a task on a specific queue
387 */
388 void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
389 {
390 spin_lock_bh(&queue->lock);
391 rpc_wake_up_task_queue_locked(queue, task);
392 spin_unlock_bh(&queue->lock);
393 }
394 EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
395
396 /*
397 * Wake up the specified task
398 */
399 static void rpc_wake_up_task(struct rpc_task *task)
400 {
401 rpc_wake_up_queued_task(task->tk_waitqueue, task);
402 }
403
404 /*
405 * Wake up the next task on a priority queue.
406 */
407 static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue)
408 {
409 struct list_head *q;
410 struct rpc_task *task;
411
412 /*
413 * Service a batch of tasks from a single owner.
414 */
415 q = &queue->tasks[queue->priority];
416 if (!list_empty(q)) {
417 task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
418 if (queue->owner == task->tk_owner) {
419 if (--queue->nr)
420 goto out;
421 list_move_tail(&task->u.tk_wait.list, q);
422 }
423 /*
424 * Check if we need to switch queues.
425 */
426 if (--queue->count)
427 goto new_owner;
428 }
429
430 /*
431 * Service the next queue.
432 */
433 do {
434 if (q == &queue->tasks[0])
435 q = &queue->tasks[queue->maxpriority];
436 else
437 q = q - 1;
438 if (!list_empty(q)) {
439 task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
440 goto new_queue;
441 }
442 } while (q != &queue->tasks[queue->priority]);
443
444 rpc_reset_waitqueue_priority(queue);
445 return NULL;
446
447 new_queue:
448 rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
449 new_owner:
450 rpc_set_waitqueue_owner(queue, task->tk_owner);
451 out:
452 rpc_wake_up_task_queue_locked(queue, task);
453 return task;
454 }
455
456 /*
457 * Wake up the next task on the wait queue.
458 */
459 struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue)
460 {
461 struct rpc_task *task = NULL;
462
463 dprintk("RPC: wake_up_next(%p \"%s\")\n",
464 queue, rpc_qname(queue));
465 spin_lock_bh(&queue->lock);
466 if (RPC_IS_PRIORITY(queue))
467 task = __rpc_wake_up_next_priority(queue);
468 else {
469 task_for_first(task, &queue->tasks[0])
470 rpc_wake_up_task_queue_locked(queue, task);
471 }
472 spin_unlock_bh(&queue->lock);
473
474 return task;
475 }
476 EXPORT_SYMBOL_GPL(rpc_wake_up_next);
477
478 /**
479 * rpc_wake_up - wake up all rpc_tasks
480 * @queue: rpc_wait_queue on which the tasks are sleeping
481 *
482 * Grabs queue->lock
483 */
484 void rpc_wake_up(struct rpc_wait_queue *queue)
485 {
486 struct rpc_task *task, *next;
487 struct list_head *head;
488
489 spin_lock_bh(&queue->lock);
490 head = &queue->tasks[queue->maxpriority];
491 for (;;) {
492 list_for_each_entry_safe(task, next, head, u.tk_wait.list)
493 rpc_wake_up_task_queue_locked(queue, task);
494 if (head == &queue->tasks[0])
495 break;
496 head--;
497 }
498 spin_unlock_bh(&queue->lock);
499 }
500 EXPORT_SYMBOL_GPL(rpc_wake_up);
501
502 /**
503 * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
504 * @queue: rpc_wait_queue on which the tasks are sleeping
505 * @status: status value to set
506 *
507 * Grabs queue->lock
508 */
509 void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
510 {
511 struct rpc_task *task, *next;
512 struct list_head *head;
513
514 spin_lock_bh(&queue->lock);
515 head = &queue->tasks[queue->maxpriority];
516 for (;;) {
517 list_for_each_entry_safe(task, next, head, u.tk_wait.list) {
518 task->tk_status = status;
519 rpc_wake_up_task_queue_locked(queue, task);
520 }
521 if (head == &queue->tasks[0])
522 break;
523 head--;
524 }
525 spin_unlock_bh(&queue->lock);
526 }
527 EXPORT_SYMBOL_GPL(rpc_wake_up_status);
528
529 static void __rpc_queue_timer_fn(unsigned long ptr)
530 {
531 struct rpc_wait_queue *queue = (struct rpc_wait_queue *)ptr;
532 struct rpc_task *task, *n;
533 unsigned long expires, now, timeo;
534
535 spin_lock(&queue->lock);
536 expires = now = jiffies;
537 list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
538 timeo = task->u.tk_wait.expires;
539 if (time_after_eq(now, timeo)) {
540 dprintk("RPC: %5u timeout\n", task->tk_pid);
541 task->tk_status = -ETIMEDOUT;
542 rpc_wake_up_task_queue_locked(queue, task);
543 continue;
544 }
545 if (expires == now || time_after(expires, timeo))
546 expires = timeo;
547 }
548 if (!list_empty(&queue->timer_list.list))
549 rpc_set_queue_timer(queue, expires);
550 spin_unlock(&queue->lock);
551 }
552
553 static void __rpc_atrun(struct rpc_task *task)
554 {
555 task->tk_status = 0;
556 }
557
558 /*
559 * Run a task at a later time
560 */
561 void rpc_delay(struct rpc_task *task, unsigned long delay)
562 {
563 task->tk_timeout = delay;
564 rpc_sleep_on(&delay_queue, task, __rpc_atrun);
565 }
566 EXPORT_SYMBOL_GPL(rpc_delay);
567
568 /*
569 * Helper to call task->tk_ops->rpc_call_prepare
570 */
571 void rpc_prepare_task(struct rpc_task *task)
572 {
573 task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
574 }
575
576 /*
577 * Helper that calls task->tk_ops->rpc_call_done if it exists
578 */
579 void rpc_exit_task(struct rpc_task *task)
580 {
581 task->tk_action = NULL;
582 if (task->tk_ops->rpc_call_done != NULL) {
583 task->tk_ops->rpc_call_done(task, task->tk_calldata);
584 if (task->tk_action != NULL) {
585 WARN_ON(RPC_ASSASSINATED(task));
586 /* Always release the RPC slot and buffer memory */
587 xprt_release(task);
588 }
589 }
590 }
591 EXPORT_SYMBOL_GPL(rpc_exit_task);
592
593 void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
594 {
595 if (ops->rpc_release != NULL)
596 ops->rpc_release(calldata);
597 }
598
599 /*
600 * This is the RPC `scheduler' (or rather, the finite state machine).
601 */
602 static void __rpc_execute(struct rpc_task *task)
603 {
604 struct rpc_wait_queue *queue;
605 int task_is_async = RPC_IS_ASYNC(task);
606 int status = 0;
607
608 dprintk("RPC: %5u __rpc_execute flags=0x%x\n",
609 task->tk_pid, task->tk_flags);
610
611 BUG_ON(RPC_IS_QUEUED(task));
612
613 for (;;) {
614
615 /*
616 * Execute any pending callback.
617 */
618 if (task->tk_callback) {
619 void (*save_callback)(struct rpc_task *);
620
621 /*
622 * We set tk_callback to NULL before calling it,
623 * in case it sets the tk_callback field itself:
624 */
625 save_callback = task->tk_callback;
626 task->tk_callback = NULL;
627 save_callback(task);
628 }
629
630 /*
631 * Perform the next FSM step.
632 * tk_action may be NULL when the task has been killed
633 * by someone else.
634 */
635 if (!RPC_IS_QUEUED(task)) {
636 if (task->tk_action == NULL)
637 break;
638 task->tk_action(task);
639 }
640
641 /*
642 * Lockless check for whether task is sleeping or not.
643 */
644 if (!RPC_IS_QUEUED(task))
645 continue;
646 /*
647 * The queue->lock protects against races with
648 * rpc_make_runnable().
649 *
650 * Note that once we clear RPC_TASK_RUNNING on an asynchronous
651 * rpc_task, rpc_make_runnable() can assign it to a
652 * different workqueue. We therefore cannot assume that the
653 * rpc_task pointer may still be dereferenced.
654 */
655 queue = task->tk_waitqueue;
656 spin_lock_bh(&queue->lock);
657 if (!RPC_IS_QUEUED(task)) {
658 spin_unlock_bh(&queue->lock);
659 continue;
660 }
661 rpc_clear_running(task);
662 spin_unlock_bh(&queue->lock);
663 if (task_is_async)
664 return;
665
666 /* sync task: sleep here */
667 dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
668 status = out_of_line_wait_on_bit(&task->tk_runstate,
669 RPC_TASK_QUEUED, rpc_wait_bit_killable,
670 TASK_KILLABLE);
671 if (status == -ERESTARTSYS) {
672 /*
673 * When a sync task receives a signal, it exits with
674 * -ERESTARTSYS. In order to catch any callbacks that
675 * clean up after sleeping on some queue, we don't
676 * break the loop here, but go around once more.
677 */
678 dprintk("RPC: %5u got signal\n", task->tk_pid);
679 task->tk_flags |= RPC_TASK_KILLED;
680 rpc_exit(task, -ERESTARTSYS);
681 rpc_wake_up_task(task);
682 }
683 rpc_set_running(task);
684 dprintk("RPC: %5u sync task resuming\n", task->tk_pid);
685 }
686
687 dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status,
688 task->tk_status);
689 /* Release all resources associated with the task */
690 rpc_release_task(task);
691 }
692
693 /*
694 * User-visible entry point to the scheduler.
695 *
696 * This may be called recursively if e.g. an async NFS task updates
697 * the attributes and finds that dirty pages must be flushed.
698 * NOTE: Upon exit of this function the task is guaranteed to be
699 * released. In particular note that tk_release() will have
700 * been called, so your task memory may have been freed.
701 */
702 void rpc_execute(struct rpc_task *task)
703 {
704 rpc_set_active(task);
705 rpc_set_running(task);
706 __rpc_execute(task);
707 }
708
709 static void rpc_async_schedule(struct work_struct *work)
710 {
711 __rpc_execute(container_of(work, struct rpc_task, u.tk_work));
712 }
713
714 struct rpc_buffer {
715 size_t len;
716 char data[];
717 };
718
719 /**
720 * rpc_malloc - allocate an RPC buffer
721 * @task: RPC task that will use this buffer
722 * @size: requested byte size
723 *
724 * To prevent rpciod from hanging, this allocator never sleeps,
725 * returning NULL if the request cannot be serviced immediately.
726 * The caller can arrange to sleep in a way that is safe for rpciod.
727 *
728 * Most requests are 'small' (under 2KiB) and can be serviced from a
729 * mempool, ensuring that NFS reads and writes can always proceed,
730 * and that there is good locality of reference for these buffers.
731 *
732 * In order to avoid memory starvation triggering more writebacks of
733 * NFS requests, we avoid using GFP_KERNEL.
734 */
735 void *rpc_malloc(struct rpc_task *task, size_t size)
736 {
737 struct rpc_buffer *buf;
738 gfp_t gfp = RPC_IS_SWAPPER(task) ? GFP_ATOMIC : GFP_NOWAIT;
739
740 size += sizeof(struct rpc_buffer);
741 if (size <= RPC_BUFFER_MAXSIZE)
742 buf = mempool_alloc(rpc_buffer_mempool, gfp);
743 else
744 buf = kmalloc(size, gfp);
745
746 if (!buf)
747 return NULL;
748
749 buf->len = size;
750 dprintk("RPC: %5u allocated buffer of size %zu at %p\n",
751 task->tk_pid, size, buf);
752 return &buf->data;
753 }
754 EXPORT_SYMBOL_GPL(rpc_malloc);
755
756 /**
757 * rpc_free - free buffer allocated via rpc_malloc
758 * @buffer: buffer to free
759 *
760 */
761 void rpc_free(void *buffer)
762 {
763 size_t size;
764 struct rpc_buffer *buf;
765
766 if (!buffer)
767 return;
768
769 buf = container_of(buffer, struct rpc_buffer, data);
770 size = buf->len;
771
772 dprintk("RPC: freeing buffer of size %zu at %p\n",
773 size, buf);
774
775 if (size <= RPC_BUFFER_MAXSIZE)
776 mempool_free(buf, rpc_buffer_mempool);
777 else
778 kfree(buf);
779 }
780 EXPORT_SYMBOL_GPL(rpc_free);
781
782 /*
783 * Creation and deletion of RPC task structures
784 */
785 static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
786 {
787 memset(task, 0, sizeof(*task));
788 atomic_set(&task->tk_count, 1);
789 task->tk_flags = task_setup_data->flags;
790 task->tk_ops = task_setup_data->callback_ops;
791 task->tk_calldata = task_setup_data->callback_data;
792 INIT_LIST_HEAD(&task->tk_task);
793
794 /* Initialize retry counters */
795 task->tk_garb_retry = 2;
796 task->tk_cred_retry = 2;
797
798 task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
799 task->tk_owner = current->tgid;
800
801 /* Initialize workqueue for async tasks */
802 task->tk_workqueue = task_setup_data->workqueue;
803
804 task->tk_client = task_setup_data->rpc_client;
805 if (task->tk_client != NULL) {
806 kref_get(&task->tk_client->cl_kref);
807 if (task->tk_client->cl_softrtry)
808 task->tk_flags |= RPC_TASK_SOFT;
809 }
810
811 if (task->tk_ops->rpc_call_prepare != NULL)
812 task->tk_action = rpc_prepare_task;
813
814 if (task_setup_data->rpc_message != NULL) {
815 task->tk_msg.rpc_proc = task_setup_data->rpc_message->rpc_proc;
816 task->tk_msg.rpc_argp = task_setup_data->rpc_message->rpc_argp;
817 task->tk_msg.rpc_resp = task_setup_data->rpc_message->rpc_resp;
818 /* Bind the user cred */
819 rpcauth_bindcred(task, task_setup_data->rpc_message->rpc_cred, task_setup_data->flags);
820 if (task->tk_action == NULL)
821 rpc_call_start(task);
822 }
823
824 /* starting timestamp */
825 task->tk_start = jiffies;
826
827 dprintk("RPC: new task initialized, procpid %u\n",
828 task_pid_nr(current));
829 }
830
831 static struct rpc_task *
832 rpc_alloc_task(void)
833 {
834 return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
835 }
836
837 /*
838 * Create a new task for the specified client.
839 */
840 struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
841 {
842 struct rpc_task *task = setup_data->task;
843 unsigned short flags = 0;
844
845 if (task == NULL) {
846 task = rpc_alloc_task();
847 if (task == NULL)
848 goto out;
849 flags = RPC_TASK_DYNAMIC;
850 }
851
852 rpc_init_task(task, setup_data);
853
854 task->tk_flags |= flags;
855 dprintk("RPC: allocated task %p\n", task);
856 out:
857 return task;
858 }
859
860 static void rpc_free_task(struct rpc_task *task)
861 {
862 const struct rpc_call_ops *tk_ops = task->tk_ops;
863 void *calldata = task->tk_calldata;
864
865 if (task->tk_flags & RPC_TASK_DYNAMIC) {
866 dprintk("RPC: %5u freeing task\n", task->tk_pid);
867 mempool_free(task, rpc_task_mempool);
868 }
869 rpc_release_calldata(tk_ops, calldata);
870 }
871
872 static void rpc_async_release(struct work_struct *work)
873 {
874 rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
875 }
876
877 void rpc_put_task(struct rpc_task *task)
878 {
879 if (!atomic_dec_and_test(&task->tk_count))
880 return;
881 /* Release resources */
882 if (task->tk_rqstp)
883 xprt_release(task);
884 if (task->tk_msg.rpc_cred)
885 rpcauth_unbindcred(task);
886 if (task->tk_client) {
887 rpc_release_client(task->tk_client);
888 task->tk_client = NULL;
889 }
890 if (task->tk_workqueue != NULL) {
891 INIT_WORK(&task->u.tk_work, rpc_async_release);
892 queue_work(task->tk_workqueue, &task->u.tk_work);
893 } else
894 rpc_free_task(task);
895 }
896 EXPORT_SYMBOL_GPL(rpc_put_task);
897
898 static void rpc_release_task(struct rpc_task *task)
899 {
900 #ifdef RPC_DEBUG
901 BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
902 #endif
903 dprintk("RPC: %5u release task\n", task->tk_pid);
904
905 if (!list_empty(&task->tk_task)) {
906 struct rpc_clnt *clnt = task->tk_client;
907 /* Remove from client task list */
908 spin_lock(&clnt->cl_lock);
909 list_del(&task->tk_task);
910 spin_unlock(&clnt->cl_lock);
911 }
912 BUG_ON (RPC_IS_QUEUED(task));
913
914 #ifdef RPC_DEBUG
915 task->tk_magic = 0;
916 #endif
917 /* Wake up anyone who is waiting for task completion */
918 rpc_mark_complete_task(task);
919
920 rpc_put_task(task);
921 }
922
923 /*
924 * Kill all tasks for the given client.
925 * XXX: kill their descendants as well?
926 */
927 void rpc_killall_tasks(struct rpc_clnt *clnt)
928 {
929 struct rpc_task *rovr;
930
931
932 if (list_empty(&clnt->cl_tasks))
933 return;
934 dprintk("RPC: killing all tasks for client %p\n", clnt);
935 /*
936 * Spin lock all_tasks to prevent changes...
937 */
938 spin_lock(&clnt->cl_lock);
939 list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) {
940 if (! RPC_IS_ACTIVATED(rovr))
941 continue;
942 if (!(rovr->tk_flags & RPC_TASK_KILLED)) {
943 rovr->tk_flags |= RPC_TASK_KILLED;
944 rpc_exit(rovr, -EIO);
945 rpc_wake_up_task(rovr);
946 }
947 }
948 spin_unlock(&clnt->cl_lock);
949 }
950 EXPORT_SYMBOL_GPL(rpc_killall_tasks);
951
952 int rpciod_up(void)
953 {
954 return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
955 }
956
957 void rpciod_down(void)
958 {
959 module_put(THIS_MODULE);
960 }
961
962 /*
963 * Start up the rpciod workqueue.
964 */
965 static int rpciod_start(void)
966 {
967 struct workqueue_struct *wq;
968
969 /*
970 * Create the rpciod thread and wait for it to start.
971 */
972 dprintk("RPC: creating workqueue rpciod\n");
973 wq = create_workqueue("rpciod");
974 rpciod_workqueue = wq;
975 return rpciod_workqueue != NULL;
976 }
977
978 static void rpciod_stop(void)
979 {
980 struct workqueue_struct *wq = NULL;
981
982 if (rpciod_workqueue == NULL)
983 return;
984 dprintk("RPC: destroying workqueue rpciod\n");
985
986 wq = rpciod_workqueue;
987 rpciod_workqueue = NULL;
988 destroy_workqueue(wq);
989 }
990
991 void
992 rpc_destroy_mempool(void)
993 {
994 rpciod_stop();
995 if (rpc_buffer_mempool)
996 mempool_destroy(rpc_buffer_mempool);
997 if (rpc_task_mempool)
998 mempool_destroy(rpc_task_mempool);
999 if (rpc_task_slabp)
1000 kmem_cache_destroy(rpc_task_slabp);
1001 if (rpc_buffer_slabp)
1002 kmem_cache_destroy(rpc_buffer_slabp);
1003 rpc_destroy_wait_queue(&delay_queue);
1004 }
1005
1006 int
1007 rpc_init_mempool(void)
1008 {
1009 /*
1010 * The following is not strictly a mempool initialisation,
1011 * but there is no harm in doing it here
1012 */
1013 rpc_init_wait_queue(&delay_queue, "delayq");
1014 if (!rpciod_start())
1015 goto err_nomem;
1016
1017 rpc_task_slabp = kmem_cache_create("rpc_tasks",
1018 sizeof(struct rpc_task),
1019 0, SLAB_HWCACHE_ALIGN,
1020 NULL);
1021 if (!rpc_task_slabp)
1022 goto err_nomem;
1023 rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
1024 RPC_BUFFER_MAXSIZE,
1025 0, SLAB_HWCACHE_ALIGN,
1026 NULL);
1027 if (!rpc_buffer_slabp)
1028 goto err_nomem;
1029 rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
1030 rpc_task_slabp);
1031 if (!rpc_task_mempool)
1032 goto err_nomem;
1033 rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
1034 rpc_buffer_slabp);
1035 if (!rpc_buffer_mempool)
1036 goto err_nomem;
1037 return 0;
1038 err_nomem:
1039 rpc_destroy_mempool();
1040 return -ENOMEM;
1041 }
This page took 0.05301 seconds and 6 git commands to generate.