NFS: Generalise the nfs_client structure
[deliverable/linux.git] / fs / nfs / nfs4state.c
1 /*
2 * fs/nfs/nfs4state.c
3 *
4 * Client-side XDR for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
25 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Implementation of the NFSv4 state model. For the time being,
37 * this is minimal, but will be made much more complex in a
38 * subsequent patch.
39 */
40
41 #include <linux/slab.h>
42 #include <linux/smp_lock.h>
43 #include <linux/nfs_fs.h>
44 #include <linux/nfs_idmap.h>
45 #include <linux/kthread.h>
46 #include <linux/module.h>
47 #include <linux/workqueue.h>
48 #include <linux/bitops.h>
49
50 #include "nfs4_fs.h"
51 #include "callback.h"
52 #include "delegation.h"
53 #include "internal.h"
54
55 #define OPENOWNER_POOL_SIZE 8
56
57 const nfs4_stateid zero_stateid;
58
59 static LIST_HEAD(nfs4_clientid_list);
60
61 void
62 init_nfsv4_state(struct nfs_server *server)
63 {
64 server->nfs_client = NULL;
65 INIT_LIST_HEAD(&server->nfs4_siblings);
66 }
67
68 void
69 destroy_nfsv4_state(struct nfs_server *server)
70 {
71 kfree(server->mnt_path);
72 server->mnt_path = NULL;
73 if (server->nfs_client) {
74 nfs_put_client(server->nfs_client);
75 server->nfs_client = NULL;
76 }
77 }
78
79 static int nfs4_init_client(struct nfs_client *clp, struct rpc_cred *cred)
80 {
81 int status = nfs4_proc_setclientid(clp, NFS4_CALLBACK,
82 nfs_callback_tcpport, cred);
83 if (status == 0)
84 status = nfs4_proc_setclientid_confirm(clp, cred);
85 if (status == 0)
86 nfs4_schedule_state_renewal(clp);
87 return status;
88 }
89
90 u32
91 nfs4_alloc_lockowner_id(struct nfs_client *clp)
92 {
93 return clp->cl_lockowner_id ++;
94 }
95
96 static struct nfs4_state_owner *
97 nfs4_client_grab_unused(struct nfs_client *clp, struct rpc_cred *cred)
98 {
99 struct nfs4_state_owner *sp = NULL;
100
101 if (!list_empty(&clp->cl_unused)) {
102 sp = list_entry(clp->cl_unused.next, struct nfs4_state_owner, so_list);
103 atomic_inc(&sp->so_count);
104 sp->so_cred = cred;
105 list_move(&sp->so_list, &clp->cl_state_owners);
106 clp->cl_nunused--;
107 }
108 return sp;
109 }
110
111 struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp)
112 {
113 struct nfs4_state_owner *sp;
114 struct rpc_cred *cred = NULL;
115
116 list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
117 if (list_empty(&sp->so_states))
118 continue;
119 cred = get_rpccred(sp->so_cred);
120 break;
121 }
122 return cred;
123 }
124
125 struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
126 {
127 struct nfs4_state_owner *sp;
128
129 if (!list_empty(&clp->cl_state_owners)) {
130 sp = list_entry(clp->cl_state_owners.next,
131 struct nfs4_state_owner, so_list);
132 return get_rpccred(sp->so_cred);
133 }
134 return NULL;
135 }
136
137 static struct nfs4_state_owner *
138 nfs4_find_state_owner(struct nfs_client *clp, struct rpc_cred *cred)
139 {
140 struct nfs4_state_owner *sp, *res = NULL;
141
142 list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
143 if (sp->so_cred != cred)
144 continue;
145 atomic_inc(&sp->so_count);
146 /* Move to the head of the list */
147 list_move(&sp->so_list, &clp->cl_state_owners);
148 res = sp;
149 break;
150 }
151 return res;
152 }
153
154 /*
155 * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
156 * create a new state_owner.
157 *
158 */
159 static struct nfs4_state_owner *
160 nfs4_alloc_state_owner(void)
161 {
162 struct nfs4_state_owner *sp;
163
164 sp = kzalloc(sizeof(*sp),GFP_KERNEL);
165 if (!sp)
166 return NULL;
167 spin_lock_init(&sp->so_lock);
168 INIT_LIST_HEAD(&sp->so_states);
169 INIT_LIST_HEAD(&sp->so_delegations);
170 rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue");
171 sp->so_seqid.sequence = &sp->so_sequence;
172 spin_lock_init(&sp->so_sequence.lock);
173 INIT_LIST_HEAD(&sp->so_sequence.list);
174 atomic_set(&sp->so_count, 1);
175 return sp;
176 }
177
178 void
179 nfs4_drop_state_owner(struct nfs4_state_owner *sp)
180 {
181 struct nfs_client *clp = sp->so_client;
182 spin_lock(&clp->cl_lock);
183 list_del_init(&sp->so_list);
184 spin_unlock(&clp->cl_lock);
185 }
186
187 /*
188 * Note: must be called with clp->cl_sem held in order to prevent races
189 * with reboot recovery!
190 */
191 struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
192 {
193 struct nfs_client *clp = server->nfs_client;
194 struct nfs4_state_owner *sp, *new;
195
196 get_rpccred(cred);
197 new = nfs4_alloc_state_owner();
198 spin_lock(&clp->cl_lock);
199 sp = nfs4_find_state_owner(clp, cred);
200 if (sp == NULL)
201 sp = nfs4_client_grab_unused(clp, cred);
202 if (sp == NULL && new != NULL) {
203 list_add(&new->so_list, &clp->cl_state_owners);
204 new->so_client = clp;
205 new->so_id = nfs4_alloc_lockowner_id(clp);
206 new->so_cred = cred;
207 sp = new;
208 new = NULL;
209 }
210 spin_unlock(&clp->cl_lock);
211 kfree(new);
212 if (sp != NULL)
213 return sp;
214 put_rpccred(cred);
215 return NULL;
216 }
217
218 /*
219 * Must be called with clp->cl_sem held in order to avoid races
220 * with state recovery...
221 */
222 void nfs4_put_state_owner(struct nfs4_state_owner *sp)
223 {
224 struct nfs_client *clp = sp->so_client;
225 struct rpc_cred *cred = sp->so_cred;
226
227 if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
228 return;
229 if (clp->cl_nunused >= OPENOWNER_POOL_SIZE)
230 goto out_free;
231 if (list_empty(&sp->so_list))
232 goto out_free;
233 list_move(&sp->so_list, &clp->cl_unused);
234 clp->cl_nunused++;
235 spin_unlock(&clp->cl_lock);
236 put_rpccred(cred);
237 cred = NULL;
238 return;
239 out_free:
240 list_del(&sp->so_list);
241 spin_unlock(&clp->cl_lock);
242 put_rpccred(cred);
243 kfree(sp);
244 }
245
246 static struct nfs4_state *
247 nfs4_alloc_open_state(void)
248 {
249 struct nfs4_state *state;
250
251 state = kzalloc(sizeof(*state), GFP_KERNEL);
252 if (!state)
253 return NULL;
254 atomic_set(&state->count, 1);
255 INIT_LIST_HEAD(&state->lock_states);
256 spin_lock_init(&state->state_lock);
257 return state;
258 }
259
260 void
261 nfs4_state_set_mode_locked(struct nfs4_state *state, mode_t mode)
262 {
263 if (state->state == mode)
264 return;
265 /* NB! List reordering - see the reclaim code for why. */
266 if ((mode & FMODE_WRITE) != (state->state & FMODE_WRITE)) {
267 if (mode & FMODE_WRITE)
268 list_move(&state->open_states, &state->owner->so_states);
269 else
270 list_move_tail(&state->open_states, &state->owner->so_states);
271 }
272 if (mode == 0)
273 list_del_init(&state->inode_states);
274 state->state = mode;
275 }
276
277 static struct nfs4_state *
278 __nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
279 {
280 struct nfs_inode *nfsi = NFS_I(inode);
281 struct nfs4_state *state;
282
283 list_for_each_entry(state, &nfsi->open_states, inode_states) {
284 /* Is this in the process of being freed? */
285 if (state->state == 0)
286 continue;
287 if (state->owner == owner) {
288 atomic_inc(&state->count);
289 return state;
290 }
291 }
292 return NULL;
293 }
294
295 static void
296 nfs4_free_open_state(struct nfs4_state *state)
297 {
298 kfree(state);
299 }
300
301 struct nfs4_state *
302 nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
303 {
304 struct nfs4_state *state, *new;
305 struct nfs_inode *nfsi = NFS_I(inode);
306
307 spin_lock(&inode->i_lock);
308 state = __nfs4_find_state_byowner(inode, owner);
309 spin_unlock(&inode->i_lock);
310 if (state)
311 goto out;
312 new = nfs4_alloc_open_state();
313 spin_lock(&owner->so_lock);
314 spin_lock(&inode->i_lock);
315 state = __nfs4_find_state_byowner(inode, owner);
316 if (state == NULL && new != NULL) {
317 state = new;
318 state->owner = owner;
319 atomic_inc(&owner->so_count);
320 list_add(&state->inode_states, &nfsi->open_states);
321 state->inode = igrab(inode);
322 spin_unlock(&inode->i_lock);
323 /* Note: The reclaim code dictates that we add stateless
324 * and read-only stateids to the end of the list */
325 list_add_tail(&state->open_states, &owner->so_states);
326 spin_unlock(&owner->so_lock);
327 } else {
328 spin_unlock(&inode->i_lock);
329 spin_unlock(&owner->so_lock);
330 if (new)
331 nfs4_free_open_state(new);
332 }
333 out:
334 return state;
335 }
336
337 /*
338 * Beware! Caller must be holding exactly one
339 * reference to clp->cl_sem!
340 */
341 void nfs4_put_open_state(struct nfs4_state *state)
342 {
343 struct inode *inode = state->inode;
344 struct nfs4_state_owner *owner = state->owner;
345
346 if (!atomic_dec_and_lock(&state->count, &owner->so_lock))
347 return;
348 spin_lock(&inode->i_lock);
349 if (!list_empty(&state->inode_states))
350 list_del(&state->inode_states);
351 list_del(&state->open_states);
352 spin_unlock(&inode->i_lock);
353 spin_unlock(&owner->so_lock);
354 iput(inode);
355 nfs4_free_open_state(state);
356 nfs4_put_state_owner(owner);
357 }
358
359 /*
360 * Close the current file.
361 */
362 void nfs4_close_state(struct nfs4_state *state, mode_t mode)
363 {
364 struct inode *inode = state->inode;
365 struct nfs4_state_owner *owner = state->owner;
366 int oldstate, newstate = 0;
367
368 atomic_inc(&owner->so_count);
369 /* Protect against nfs4_find_state() */
370 spin_lock(&owner->so_lock);
371 spin_lock(&inode->i_lock);
372 switch (mode & (FMODE_READ | FMODE_WRITE)) {
373 case FMODE_READ:
374 state->n_rdonly--;
375 break;
376 case FMODE_WRITE:
377 state->n_wronly--;
378 break;
379 case FMODE_READ|FMODE_WRITE:
380 state->n_rdwr--;
381 }
382 oldstate = newstate = state->state;
383 if (state->n_rdwr == 0) {
384 if (state->n_rdonly == 0)
385 newstate &= ~FMODE_READ;
386 if (state->n_wronly == 0)
387 newstate &= ~FMODE_WRITE;
388 }
389 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
390 nfs4_state_set_mode_locked(state, newstate);
391 oldstate = newstate;
392 }
393 spin_unlock(&inode->i_lock);
394 spin_unlock(&owner->so_lock);
395
396 if (oldstate != newstate && nfs4_do_close(inode, state) == 0)
397 return;
398 nfs4_put_open_state(state);
399 nfs4_put_state_owner(owner);
400 }
401
402 /*
403 * Search the state->lock_states for an existing lock_owner
404 * that is compatible with current->files
405 */
406 static struct nfs4_lock_state *
407 __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
408 {
409 struct nfs4_lock_state *pos;
410 list_for_each_entry(pos, &state->lock_states, ls_locks) {
411 if (pos->ls_owner != fl_owner)
412 continue;
413 atomic_inc(&pos->ls_count);
414 return pos;
415 }
416 return NULL;
417 }
418
419 /*
420 * Return a compatible lock_state. If no initialized lock_state structure
421 * exists, return an uninitialized one.
422 *
423 */
424 static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
425 {
426 struct nfs4_lock_state *lsp;
427 struct nfs_client *clp = state->owner->so_client;
428
429 lsp = kzalloc(sizeof(*lsp), GFP_KERNEL);
430 if (lsp == NULL)
431 return NULL;
432 lsp->ls_seqid.sequence = &state->owner->so_sequence;
433 atomic_set(&lsp->ls_count, 1);
434 lsp->ls_owner = fl_owner;
435 spin_lock(&clp->cl_lock);
436 lsp->ls_id = nfs4_alloc_lockowner_id(clp);
437 spin_unlock(&clp->cl_lock);
438 INIT_LIST_HEAD(&lsp->ls_locks);
439 return lsp;
440 }
441
442 /*
443 * Return a compatible lock_state. If no initialized lock_state structure
444 * exists, return an uninitialized one.
445 *
446 * The caller must be holding clp->cl_sem
447 */
448 static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
449 {
450 struct nfs4_lock_state *lsp, *new = NULL;
451
452 for(;;) {
453 spin_lock(&state->state_lock);
454 lsp = __nfs4_find_lock_state(state, owner);
455 if (lsp != NULL)
456 break;
457 if (new != NULL) {
458 new->ls_state = state;
459 list_add(&new->ls_locks, &state->lock_states);
460 set_bit(LK_STATE_IN_USE, &state->flags);
461 lsp = new;
462 new = NULL;
463 break;
464 }
465 spin_unlock(&state->state_lock);
466 new = nfs4_alloc_lock_state(state, owner);
467 if (new == NULL)
468 return NULL;
469 }
470 spin_unlock(&state->state_lock);
471 kfree(new);
472 return lsp;
473 }
474
475 /*
476 * Release reference to lock_state, and free it if we see that
477 * it is no longer in use
478 */
479 void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
480 {
481 struct nfs4_state *state;
482
483 if (lsp == NULL)
484 return;
485 state = lsp->ls_state;
486 if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock))
487 return;
488 list_del(&lsp->ls_locks);
489 if (list_empty(&state->lock_states))
490 clear_bit(LK_STATE_IN_USE, &state->flags);
491 spin_unlock(&state->state_lock);
492 kfree(lsp);
493 }
494
495 static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
496 {
497 struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner;
498
499 dst->fl_u.nfs4_fl.owner = lsp;
500 atomic_inc(&lsp->ls_count);
501 }
502
503 static void nfs4_fl_release_lock(struct file_lock *fl)
504 {
505 nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner);
506 }
507
508 static struct file_lock_operations nfs4_fl_lock_ops = {
509 .fl_copy_lock = nfs4_fl_copy_lock,
510 .fl_release_private = nfs4_fl_release_lock,
511 };
512
513 int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
514 {
515 struct nfs4_lock_state *lsp;
516
517 if (fl->fl_ops != NULL)
518 return 0;
519 lsp = nfs4_get_lock_state(state, fl->fl_owner);
520 if (lsp == NULL)
521 return -ENOMEM;
522 fl->fl_u.nfs4_fl.owner = lsp;
523 fl->fl_ops = &nfs4_fl_lock_ops;
524 return 0;
525 }
526
527 /*
528 * Byte-range lock aware utility to initialize the stateid of read/write
529 * requests.
530 */
531 void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner)
532 {
533 struct nfs4_lock_state *lsp;
534
535 memcpy(dst, &state->stateid, sizeof(*dst));
536 if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
537 return;
538
539 spin_lock(&state->state_lock);
540 lsp = __nfs4_find_lock_state(state, fl_owner);
541 if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
542 memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
543 spin_unlock(&state->state_lock);
544 nfs4_put_lock_state(lsp);
545 }
546
547 struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter)
548 {
549 struct rpc_sequence *sequence = counter->sequence;
550 struct nfs_seqid *new;
551
552 new = kmalloc(sizeof(*new), GFP_KERNEL);
553 if (new != NULL) {
554 new->sequence = counter;
555 spin_lock(&sequence->lock);
556 list_add_tail(&new->list, &sequence->list);
557 spin_unlock(&sequence->lock);
558 }
559 return new;
560 }
561
562 void nfs_free_seqid(struct nfs_seqid *seqid)
563 {
564 struct rpc_sequence *sequence = seqid->sequence->sequence;
565
566 spin_lock(&sequence->lock);
567 list_del(&seqid->list);
568 spin_unlock(&sequence->lock);
569 rpc_wake_up(&sequence->wait);
570 kfree(seqid);
571 }
572
573 /*
574 * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
575 * failed with a seqid incrementing error -
576 * see comments nfs_fs.h:seqid_mutating_error()
577 */
578 static inline void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
579 {
580 switch (status) {
581 case 0:
582 break;
583 case -NFS4ERR_BAD_SEQID:
584 case -NFS4ERR_STALE_CLIENTID:
585 case -NFS4ERR_STALE_STATEID:
586 case -NFS4ERR_BAD_STATEID:
587 case -NFS4ERR_BADXDR:
588 case -NFS4ERR_RESOURCE:
589 case -NFS4ERR_NOFILEHANDLE:
590 /* Non-seqid mutating errors */
591 return;
592 };
593 /*
594 * Note: no locking needed as we are guaranteed to be first
595 * on the sequence list
596 */
597 seqid->sequence->counter++;
598 }
599
600 void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
601 {
602 if (status == -NFS4ERR_BAD_SEQID) {
603 struct nfs4_state_owner *sp = container_of(seqid->sequence,
604 struct nfs4_state_owner, so_seqid);
605 nfs4_drop_state_owner(sp);
606 }
607 return nfs_increment_seqid(status, seqid);
608 }
609
610 /*
611 * Increment the seqid if the LOCK/LOCKU succeeded, or
612 * failed with a seqid incrementing error -
613 * see comments nfs_fs.h:seqid_mutating_error()
614 */
615 void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid)
616 {
617 return nfs_increment_seqid(status, seqid);
618 }
619
620 int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
621 {
622 struct rpc_sequence *sequence = seqid->sequence->sequence;
623 int status = 0;
624
625 if (sequence->list.next == &seqid->list)
626 goto out;
627 spin_lock(&sequence->lock);
628 if (sequence->list.next != &seqid->list) {
629 rpc_sleep_on(&sequence->wait, task, NULL, NULL);
630 status = -EAGAIN;
631 }
632 spin_unlock(&sequence->lock);
633 out:
634 return status;
635 }
636
637 static int reclaimer(void *);
638
639 static inline void nfs4_clear_recover_bit(struct nfs_client *clp)
640 {
641 smp_mb__before_clear_bit();
642 clear_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state);
643 smp_mb__after_clear_bit();
644 wake_up_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER);
645 rpc_wake_up(&clp->cl_rpcwaitq);
646 }
647
648 /*
649 * State recovery routine
650 */
651 static void nfs4_recover_state(struct nfs_client *clp)
652 {
653 struct task_struct *task;
654
655 __module_get(THIS_MODULE);
656 atomic_inc(&clp->cl_count);
657 task = kthread_run(reclaimer, clp, "%u.%u.%u.%u-reclaim",
658 NIPQUAD(clp->cl_addr.sin_addr));
659 if (!IS_ERR(task))
660 return;
661 nfs4_clear_recover_bit(clp);
662 nfs_put_client(clp);
663 module_put(THIS_MODULE);
664 }
665
666 /*
667 * Schedule a state recovery attempt
668 */
669 void nfs4_schedule_state_recovery(struct nfs_client *clp)
670 {
671 if (!clp)
672 return;
673 if (test_and_set_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) == 0)
674 nfs4_recover_state(clp);
675 }
676
677 static int nfs4_reclaim_locks(struct nfs4_state_recovery_ops *ops, struct nfs4_state *state)
678 {
679 struct inode *inode = state->inode;
680 struct file_lock *fl;
681 int status = 0;
682
683 for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) {
684 if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
685 continue;
686 if (((struct nfs_open_context *)fl->fl_file->private_data)->state != state)
687 continue;
688 status = ops->recover_lock(state, fl);
689 if (status >= 0)
690 continue;
691 switch (status) {
692 default:
693 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
694 __FUNCTION__, status);
695 case -NFS4ERR_EXPIRED:
696 case -NFS4ERR_NO_GRACE:
697 case -NFS4ERR_RECLAIM_BAD:
698 case -NFS4ERR_RECLAIM_CONFLICT:
699 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
700 break;
701 case -NFS4ERR_STALE_CLIENTID:
702 goto out_err;
703 }
704 }
705 return 0;
706 out_err:
707 return status;
708 }
709
710 static int nfs4_reclaim_open_state(struct nfs4_state_recovery_ops *ops, struct nfs4_state_owner *sp)
711 {
712 struct nfs4_state *state;
713 struct nfs4_lock_state *lock;
714 int status = 0;
715
716 /* Note: we rely on the sp->so_states list being ordered
717 * so that we always reclaim open(O_RDWR) and/or open(O_WRITE)
718 * states first.
719 * This is needed to ensure that the server won't give us any
720 * read delegations that we have to return if, say, we are
721 * recovering after a network partition or a reboot from a
722 * server that doesn't support a grace period.
723 */
724 list_for_each_entry(state, &sp->so_states, open_states) {
725 if (state->state == 0)
726 continue;
727 status = ops->recover_open(sp, state);
728 if (status >= 0) {
729 status = nfs4_reclaim_locks(ops, state);
730 if (status < 0)
731 goto out_err;
732 list_for_each_entry(lock, &state->lock_states, ls_locks) {
733 if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
734 printk("%s: Lock reclaim failed!\n",
735 __FUNCTION__);
736 }
737 continue;
738 }
739 switch (status) {
740 default:
741 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
742 __FUNCTION__, status);
743 case -ENOENT:
744 case -NFS4ERR_RECLAIM_BAD:
745 case -NFS4ERR_RECLAIM_CONFLICT:
746 /*
747 * Open state on this file cannot be recovered
748 * All we can do is revert to using the zero stateid.
749 */
750 memset(state->stateid.data, 0,
751 sizeof(state->stateid.data));
752 /* Mark the file as being 'closed' */
753 state->state = 0;
754 break;
755 case -NFS4ERR_EXPIRED:
756 case -NFS4ERR_NO_GRACE:
757 case -NFS4ERR_STALE_CLIENTID:
758 goto out_err;
759 }
760 }
761 return 0;
762 out_err:
763 return status;
764 }
765
766 static void nfs4_state_mark_reclaim(struct nfs_client *clp)
767 {
768 struct nfs4_state_owner *sp;
769 struct nfs4_state *state;
770 struct nfs4_lock_state *lock;
771
772 /* Reset all sequence ids to zero */
773 list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
774 sp->so_seqid.counter = 0;
775 sp->so_seqid.flags = 0;
776 spin_lock(&sp->so_lock);
777 list_for_each_entry(state, &sp->so_states, open_states) {
778 list_for_each_entry(lock, &state->lock_states, ls_locks) {
779 lock->ls_seqid.counter = 0;
780 lock->ls_seqid.flags = 0;
781 lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
782 }
783 }
784 spin_unlock(&sp->so_lock);
785 }
786 }
787
788 static int reclaimer(void *ptr)
789 {
790 struct nfs_client *clp = ptr;
791 struct nfs4_state_owner *sp;
792 struct nfs4_state_recovery_ops *ops;
793 struct rpc_cred *cred;
794 int status = 0;
795
796 allow_signal(SIGKILL);
797
798 /* Ensure exclusive access to NFSv4 state */
799 lock_kernel();
800 down_write(&clp->cl_sem);
801 /* Are there any NFS mounts out there? */
802 if (list_empty(&clp->cl_superblocks))
803 goto out;
804 restart_loop:
805 ops = &nfs4_network_partition_recovery_ops;
806 /* Are there any open files on this volume? */
807 cred = nfs4_get_renew_cred(clp);
808 if (cred != NULL) {
809 /* Yes there are: try to renew the old lease */
810 status = nfs4_proc_renew(clp, cred);
811 switch (status) {
812 case 0:
813 case -NFS4ERR_CB_PATH_DOWN:
814 put_rpccred(cred);
815 goto out;
816 case -NFS4ERR_STALE_CLIENTID:
817 case -NFS4ERR_LEASE_MOVED:
818 ops = &nfs4_reboot_recovery_ops;
819 }
820 } else {
821 /* "reboot" to ensure we clear all state on the server */
822 clp->cl_boot_time = CURRENT_TIME;
823 cred = nfs4_get_setclientid_cred(clp);
824 }
825 /* We're going to have to re-establish a clientid */
826 nfs4_state_mark_reclaim(clp);
827 status = -ENOENT;
828 if (cred != NULL) {
829 status = nfs4_init_client(clp, cred);
830 put_rpccred(cred);
831 }
832 if (status)
833 goto out_error;
834 /* Mark all delegations for reclaim */
835 nfs_delegation_mark_reclaim(clp);
836 /* Note: list is protected by exclusive lock on cl->cl_sem */
837 list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
838 status = nfs4_reclaim_open_state(ops, sp);
839 if (status < 0) {
840 if (status == -NFS4ERR_NO_GRACE) {
841 ops = &nfs4_network_partition_recovery_ops;
842 status = nfs4_reclaim_open_state(ops, sp);
843 }
844 if (status == -NFS4ERR_STALE_CLIENTID)
845 goto restart_loop;
846 if (status == -NFS4ERR_EXPIRED)
847 goto restart_loop;
848 }
849 }
850 nfs_delegation_reap_unclaimed(clp);
851 out:
852 up_write(&clp->cl_sem);
853 unlock_kernel();
854 if (status == -NFS4ERR_CB_PATH_DOWN)
855 nfs_handle_cb_pathdown(clp);
856 nfs4_clear_recover_bit(clp);
857 nfs_put_client(clp);
858 module_put_and_exit(0);
859 return 0;
860 out_error:
861 printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %u.%u.%u.%u with error %d\n",
862 NIPQUAD(clp->cl_addr.sin_addr), -status);
863 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
864 goto out;
865 }
866
867 /*
868 * Local variables:
869 * c-basic-offset: 8
870 * End:
871 */
This page took 0.048052 seconds and 6 git commands to generate.