Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
[deliverable/linux.git] / fs / nfs / nfs4state.c
1 /*
2 * fs/nfs/nfs4state.c
3 *
4 * Client-side XDR for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
25 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Implementation of the NFSv4 state model. For the time being,
37 * this is minimal, but will be made much more complex in a
38 * subsequent patch.
39 */
40
41 #include <linux/kernel.h>
42 #include <linux/slab.h>
43 #include <linux/fs.h>
44 #include <linux/nfs_fs.h>
45 #include <linux/nfs_idmap.h>
46 #include <linux/kthread.h>
47 #include <linux/module.h>
48 #include <linux/random.h>
49 #include <linux/ratelimit.h>
50 #include <linux/workqueue.h>
51 #include <linux/bitops.h>
52
53 #include "nfs4_fs.h"
54 #include "callback.h"
55 #include "delegation.h"
56 #include "internal.h"
57 #include "pnfs.h"
58
59 #define OPENOWNER_POOL_SIZE 8
60
61 const nfs4_stateid zero_stateid;
62
63 static LIST_HEAD(nfs4_clientid_list);
64
65 int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
66 {
67 struct nfs4_setclientid_res clid;
68 unsigned short port;
69 int status;
70
71 port = nfs_callback_tcpport;
72 if (clp->cl_addr.ss_family == AF_INET6)
73 port = nfs_callback_tcpport6;
74
75 status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred, &clid);
76 if (status != 0)
77 goto out;
78 status = nfs4_proc_setclientid_confirm(clp, &clid, cred);
79 if (status != 0)
80 goto out;
81 clp->cl_clientid = clid.clientid;
82 nfs4_schedule_state_renewal(clp);
83 out:
84 return status;
85 }
86
87 struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp)
88 {
89 struct rpc_cred *cred = NULL;
90
91 if (clp->cl_machine_cred != NULL)
92 cred = get_rpccred(clp->cl_machine_cred);
93 return cred;
94 }
95
96 static void nfs4_clear_machine_cred(struct nfs_client *clp)
97 {
98 struct rpc_cred *cred;
99
100 spin_lock(&clp->cl_lock);
101 cred = clp->cl_machine_cred;
102 clp->cl_machine_cred = NULL;
103 spin_unlock(&clp->cl_lock);
104 if (cred != NULL)
105 put_rpccred(cred);
106 }
107
108 struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
109 {
110 struct nfs4_state_owner *sp;
111 struct rb_node *pos;
112 struct rpc_cred *cred = NULL;
113
114 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
115 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
116 if (list_empty(&sp->so_states))
117 continue;
118 cred = get_rpccred(sp->so_cred);
119 break;
120 }
121 return cred;
122 }
123
124 #if defined(CONFIG_NFS_V4_1)
125
126 static int nfs41_setup_state_renewal(struct nfs_client *clp)
127 {
128 int status;
129 struct nfs_fsinfo fsinfo;
130
131 status = nfs4_proc_get_lease_time(clp, &fsinfo);
132 if (status == 0) {
133 /* Update lease time and schedule renewal */
134 spin_lock(&clp->cl_lock);
135 clp->cl_lease_time = fsinfo.lease_time * HZ;
136 clp->cl_last_renewal = jiffies;
137 spin_unlock(&clp->cl_lock);
138
139 nfs4_schedule_state_renewal(clp);
140 }
141
142 return status;
143 }
144
145 static void nfs4_end_drain_session(struct nfs_client *clp)
146 {
147 struct nfs4_session *ses = clp->cl_session;
148 int max_slots;
149
150 if (ses == NULL)
151 return;
152 if (test_and_clear_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
153 spin_lock(&ses->fc_slot_table.slot_tbl_lock);
154 max_slots = ses->fc_slot_table.max_slots;
155 while (max_slots--) {
156 struct rpc_task *task;
157
158 task = rpc_wake_up_next(&ses->fc_slot_table.
159 slot_tbl_waitq);
160 if (!task)
161 break;
162 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
163 }
164 spin_unlock(&ses->fc_slot_table.slot_tbl_lock);
165 }
166 }
167
168 static int nfs4_begin_drain_session(struct nfs_client *clp)
169 {
170 struct nfs4_session *ses = clp->cl_session;
171 struct nfs4_slot_table *tbl = &ses->fc_slot_table;
172
173 spin_lock(&tbl->slot_tbl_lock);
174 set_bit(NFS4_SESSION_DRAINING, &ses->session_state);
175 if (tbl->highest_used_slotid != -1) {
176 INIT_COMPLETION(ses->complete);
177 spin_unlock(&tbl->slot_tbl_lock);
178 return wait_for_completion_interruptible(&ses->complete);
179 }
180 spin_unlock(&tbl->slot_tbl_lock);
181 return 0;
182 }
183
184 int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
185 {
186 int status;
187
188 nfs4_begin_drain_session(clp);
189 status = nfs4_proc_exchange_id(clp, cred);
190 if (status != 0)
191 goto out;
192 status = nfs4_proc_create_session(clp);
193 if (status != 0)
194 goto out;
195 nfs41_setup_state_renewal(clp);
196 nfs_mark_client_ready(clp, NFS_CS_READY);
197 out:
198 return status;
199 }
200
201 struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp)
202 {
203 struct rpc_cred *cred;
204
205 spin_lock(&clp->cl_lock);
206 cred = nfs4_get_machine_cred_locked(clp);
207 spin_unlock(&clp->cl_lock);
208 return cred;
209 }
210
211 #endif /* CONFIG_NFS_V4_1 */
212
213 struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
214 {
215 struct nfs4_state_owner *sp;
216 struct rb_node *pos;
217 struct rpc_cred *cred;
218
219 spin_lock(&clp->cl_lock);
220 cred = nfs4_get_machine_cred_locked(clp);
221 if (cred != NULL)
222 goto out;
223 pos = rb_first(&clp->cl_state_owners);
224 if (pos != NULL) {
225 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
226 cred = get_rpccred(sp->so_cred);
227 }
228 out:
229 spin_unlock(&clp->cl_lock);
230 return cred;
231 }
232
233 static void nfs_alloc_unique_id(struct rb_root *root, struct nfs_unique_id *new,
234 __u64 minval, int maxbits)
235 {
236 struct rb_node **p, *parent;
237 struct nfs_unique_id *pos;
238 __u64 mask = ~0ULL;
239
240 if (maxbits < 64)
241 mask = (1ULL << maxbits) - 1ULL;
242
243 /* Ensure distribution is more or less flat */
244 get_random_bytes(&new->id, sizeof(new->id));
245 new->id &= mask;
246 if (new->id < minval)
247 new->id += minval;
248 retry:
249 p = &root->rb_node;
250 parent = NULL;
251
252 while (*p != NULL) {
253 parent = *p;
254 pos = rb_entry(parent, struct nfs_unique_id, rb_node);
255
256 if (new->id < pos->id)
257 p = &(*p)->rb_left;
258 else if (new->id > pos->id)
259 p = &(*p)->rb_right;
260 else
261 goto id_exists;
262 }
263 rb_link_node(&new->rb_node, parent, p);
264 rb_insert_color(&new->rb_node, root);
265 return;
266 id_exists:
267 for (;;) {
268 new->id++;
269 if (new->id < minval || (new->id & mask) != new->id) {
270 new->id = minval;
271 break;
272 }
273 parent = rb_next(parent);
274 if (parent == NULL)
275 break;
276 pos = rb_entry(parent, struct nfs_unique_id, rb_node);
277 if (new->id < pos->id)
278 break;
279 }
280 goto retry;
281 }
282
283 static void nfs_free_unique_id(struct rb_root *root, struct nfs_unique_id *id)
284 {
285 rb_erase(&id->rb_node, root);
286 }
287
288 static struct nfs4_state_owner *
289 nfs4_find_state_owner(struct nfs_server *server, struct rpc_cred *cred)
290 {
291 struct nfs_client *clp = server->nfs_client;
292 struct rb_node **p = &clp->cl_state_owners.rb_node,
293 *parent = NULL;
294 struct nfs4_state_owner *sp, *res = NULL;
295
296 while (*p != NULL) {
297 parent = *p;
298 sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
299
300 if (server < sp->so_server) {
301 p = &parent->rb_left;
302 continue;
303 }
304 if (server > sp->so_server) {
305 p = &parent->rb_right;
306 continue;
307 }
308 if (cred < sp->so_cred)
309 p = &parent->rb_left;
310 else if (cred > sp->so_cred)
311 p = &parent->rb_right;
312 else {
313 atomic_inc(&sp->so_count);
314 res = sp;
315 break;
316 }
317 }
318 return res;
319 }
320
321 static struct nfs4_state_owner *
322 nfs4_insert_state_owner(struct nfs_client *clp, struct nfs4_state_owner *new)
323 {
324 struct rb_node **p = &clp->cl_state_owners.rb_node,
325 *parent = NULL;
326 struct nfs4_state_owner *sp;
327
328 while (*p != NULL) {
329 parent = *p;
330 sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
331
332 if (new->so_server < sp->so_server) {
333 p = &parent->rb_left;
334 continue;
335 }
336 if (new->so_server > sp->so_server) {
337 p = &parent->rb_right;
338 continue;
339 }
340 if (new->so_cred < sp->so_cred)
341 p = &parent->rb_left;
342 else if (new->so_cred > sp->so_cred)
343 p = &parent->rb_right;
344 else {
345 atomic_inc(&sp->so_count);
346 return sp;
347 }
348 }
349 nfs_alloc_unique_id(&clp->cl_openowner_id, &new->so_owner_id, 1, 64);
350 rb_link_node(&new->so_client_node, parent, p);
351 rb_insert_color(&new->so_client_node, &clp->cl_state_owners);
352 return new;
353 }
354
355 static void
356 nfs4_remove_state_owner(struct nfs_client *clp, struct nfs4_state_owner *sp)
357 {
358 if (!RB_EMPTY_NODE(&sp->so_client_node))
359 rb_erase(&sp->so_client_node, &clp->cl_state_owners);
360 nfs_free_unique_id(&clp->cl_openowner_id, &sp->so_owner_id);
361 }
362
363 /*
364 * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
365 * create a new state_owner.
366 *
367 */
368 static struct nfs4_state_owner *
369 nfs4_alloc_state_owner(void)
370 {
371 struct nfs4_state_owner *sp;
372
373 sp = kzalloc(sizeof(*sp),GFP_NOFS);
374 if (!sp)
375 return NULL;
376 spin_lock_init(&sp->so_lock);
377 INIT_LIST_HEAD(&sp->so_states);
378 rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue");
379 sp->so_seqid.sequence = &sp->so_sequence;
380 spin_lock_init(&sp->so_sequence.lock);
381 INIT_LIST_HEAD(&sp->so_sequence.list);
382 atomic_set(&sp->so_count, 1);
383 return sp;
384 }
385
386 static void
387 nfs4_drop_state_owner(struct nfs4_state_owner *sp)
388 {
389 if (!RB_EMPTY_NODE(&sp->so_client_node)) {
390 struct nfs_client *clp = sp->so_server->nfs_client;
391
392 spin_lock(&clp->cl_lock);
393 rb_erase(&sp->so_client_node, &clp->cl_state_owners);
394 RB_CLEAR_NODE(&sp->so_client_node);
395 spin_unlock(&clp->cl_lock);
396 }
397 }
398
399 struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
400 {
401 struct nfs_client *clp = server->nfs_client;
402 struct nfs4_state_owner *sp, *new;
403
404 spin_lock(&clp->cl_lock);
405 sp = nfs4_find_state_owner(server, cred);
406 spin_unlock(&clp->cl_lock);
407 if (sp != NULL)
408 return sp;
409 new = nfs4_alloc_state_owner();
410 if (new == NULL)
411 return NULL;
412 new->so_server = server;
413 new->so_cred = cred;
414 spin_lock(&clp->cl_lock);
415 sp = nfs4_insert_state_owner(clp, new);
416 spin_unlock(&clp->cl_lock);
417 if (sp == new)
418 get_rpccred(cred);
419 else {
420 rpc_destroy_wait_queue(&new->so_sequence.wait);
421 kfree(new);
422 }
423 return sp;
424 }
425
426 void nfs4_put_state_owner(struct nfs4_state_owner *sp)
427 {
428 struct nfs_client *clp = sp->so_server->nfs_client;
429 struct rpc_cred *cred = sp->so_cred;
430
431 if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
432 return;
433 nfs4_remove_state_owner(clp, sp);
434 spin_unlock(&clp->cl_lock);
435 rpc_destroy_wait_queue(&sp->so_sequence.wait);
436 put_rpccred(cred);
437 kfree(sp);
438 }
439
440 static struct nfs4_state *
441 nfs4_alloc_open_state(void)
442 {
443 struct nfs4_state *state;
444
445 state = kzalloc(sizeof(*state), GFP_NOFS);
446 if (!state)
447 return NULL;
448 atomic_set(&state->count, 1);
449 INIT_LIST_HEAD(&state->lock_states);
450 spin_lock_init(&state->state_lock);
451 seqlock_init(&state->seqlock);
452 return state;
453 }
454
455 void
456 nfs4_state_set_mode_locked(struct nfs4_state *state, fmode_t fmode)
457 {
458 if (state->state == fmode)
459 return;
460 /* NB! List reordering - see the reclaim code for why. */
461 if ((fmode & FMODE_WRITE) != (state->state & FMODE_WRITE)) {
462 if (fmode & FMODE_WRITE)
463 list_move(&state->open_states, &state->owner->so_states);
464 else
465 list_move_tail(&state->open_states, &state->owner->so_states);
466 }
467 state->state = fmode;
468 }
469
470 static struct nfs4_state *
471 __nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
472 {
473 struct nfs_inode *nfsi = NFS_I(inode);
474 struct nfs4_state *state;
475
476 list_for_each_entry(state, &nfsi->open_states, inode_states) {
477 if (state->owner != owner)
478 continue;
479 if (atomic_inc_not_zero(&state->count))
480 return state;
481 }
482 return NULL;
483 }
484
485 static void
486 nfs4_free_open_state(struct nfs4_state *state)
487 {
488 kfree(state);
489 }
490
491 struct nfs4_state *
492 nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
493 {
494 struct nfs4_state *state, *new;
495 struct nfs_inode *nfsi = NFS_I(inode);
496
497 spin_lock(&inode->i_lock);
498 state = __nfs4_find_state_byowner(inode, owner);
499 spin_unlock(&inode->i_lock);
500 if (state)
501 goto out;
502 new = nfs4_alloc_open_state();
503 spin_lock(&owner->so_lock);
504 spin_lock(&inode->i_lock);
505 state = __nfs4_find_state_byowner(inode, owner);
506 if (state == NULL && new != NULL) {
507 state = new;
508 state->owner = owner;
509 atomic_inc(&owner->so_count);
510 list_add(&state->inode_states, &nfsi->open_states);
511 state->inode = igrab(inode);
512 spin_unlock(&inode->i_lock);
513 /* Note: The reclaim code dictates that we add stateless
514 * and read-only stateids to the end of the list */
515 list_add_tail(&state->open_states, &owner->so_states);
516 spin_unlock(&owner->so_lock);
517 } else {
518 spin_unlock(&inode->i_lock);
519 spin_unlock(&owner->so_lock);
520 if (new)
521 nfs4_free_open_state(new);
522 }
523 out:
524 return state;
525 }
526
527 void nfs4_put_open_state(struct nfs4_state *state)
528 {
529 struct inode *inode = state->inode;
530 struct nfs4_state_owner *owner = state->owner;
531
532 if (!atomic_dec_and_lock(&state->count, &owner->so_lock))
533 return;
534 spin_lock(&inode->i_lock);
535 list_del(&state->inode_states);
536 list_del(&state->open_states);
537 spin_unlock(&inode->i_lock);
538 spin_unlock(&owner->so_lock);
539 iput(inode);
540 nfs4_free_open_state(state);
541 nfs4_put_state_owner(owner);
542 }
543
544 /*
545 * Close the current file.
546 */
547 static void __nfs4_close(struct path *path, struct nfs4_state *state,
548 fmode_t fmode, gfp_t gfp_mask, int wait)
549 {
550 struct nfs4_state_owner *owner = state->owner;
551 int call_close = 0;
552 fmode_t newstate;
553
554 atomic_inc(&owner->so_count);
555 /* Protect against nfs4_find_state() */
556 spin_lock(&owner->so_lock);
557 switch (fmode & (FMODE_READ | FMODE_WRITE)) {
558 case FMODE_READ:
559 state->n_rdonly--;
560 break;
561 case FMODE_WRITE:
562 state->n_wronly--;
563 break;
564 case FMODE_READ|FMODE_WRITE:
565 state->n_rdwr--;
566 }
567 newstate = FMODE_READ|FMODE_WRITE;
568 if (state->n_rdwr == 0) {
569 if (state->n_rdonly == 0) {
570 newstate &= ~FMODE_READ;
571 call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
572 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
573 }
574 if (state->n_wronly == 0) {
575 newstate &= ~FMODE_WRITE;
576 call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
577 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
578 }
579 if (newstate == 0)
580 clear_bit(NFS_DELEGATED_STATE, &state->flags);
581 }
582 nfs4_state_set_mode_locked(state, newstate);
583 spin_unlock(&owner->so_lock);
584
585 if (!call_close) {
586 nfs4_put_open_state(state);
587 nfs4_put_state_owner(owner);
588 } else
589 nfs4_do_close(path, state, gfp_mask, wait);
590 }
591
592 void nfs4_close_state(struct path *path, struct nfs4_state *state, fmode_t fmode)
593 {
594 __nfs4_close(path, state, fmode, GFP_NOFS, 0);
595 }
596
597 void nfs4_close_sync(struct path *path, struct nfs4_state *state, fmode_t fmode)
598 {
599 __nfs4_close(path, state, fmode, GFP_KERNEL, 1);
600 }
601
602 /*
603 * Search the state->lock_states for an existing lock_owner
604 * that is compatible with current->files
605 */
606 static struct nfs4_lock_state *
607 __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid, unsigned int type)
608 {
609 struct nfs4_lock_state *pos;
610 list_for_each_entry(pos, &state->lock_states, ls_locks) {
611 if (type != NFS4_ANY_LOCK_TYPE && pos->ls_owner.lo_type != type)
612 continue;
613 switch (pos->ls_owner.lo_type) {
614 case NFS4_POSIX_LOCK_TYPE:
615 if (pos->ls_owner.lo_u.posix_owner != fl_owner)
616 continue;
617 break;
618 case NFS4_FLOCK_LOCK_TYPE:
619 if (pos->ls_owner.lo_u.flock_owner != fl_pid)
620 continue;
621 }
622 atomic_inc(&pos->ls_count);
623 return pos;
624 }
625 return NULL;
626 }
627
628 /*
629 * Return a compatible lock_state. If no initialized lock_state structure
630 * exists, return an uninitialized one.
631 *
632 */
633 static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid, unsigned int type)
634 {
635 struct nfs4_lock_state *lsp;
636 struct nfs_client *clp = state->owner->so_server->nfs_client;
637
638 lsp = kzalloc(sizeof(*lsp), GFP_NOFS);
639 if (lsp == NULL)
640 return NULL;
641 rpc_init_wait_queue(&lsp->ls_sequence.wait, "lock_seqid_waitqueue");
642 spin_lock_init(&lsp->ls_sequence.lock);
643 INIT_LIST_HEAD(&lsp->ls_sequence.list);
644 lsp->ls_seqid.sequence = &lsp->ls_sequence;
645 atomic_set(&lsp->ls_count, 1);
646 lsp->ls_state = state;
647 lsp->ls_owner.lo_type = type;
648 switch (lsp->ls_owner.lo_type) {
649 case NFS4_FLOCK_LOCK_TYPE:
650 lsp->ls_owner.lo_u.flock_owner = fl_pid;
651 break;
652 case NFS4_POSIX_LOCK_TYPE:
653 lsp->ls_owner.lo_u.posix_owner = fl_owner;
654 break;
655 default:
656 kfree(lsp);
657 return NULL;
658 }
659 spin_lock(&clp->cl_lock);
660 nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64);
661 spin_unlock(&clp->cl_lock);
662 INIT_LIST_HEAD(&lsp->ls_locks);
663 return lsp;
664 }
665
666 static void nfs4_free_lock_state(struct nfs4_lock_state *lsp)
667 {
668 struct nfs_client *clp = lsp->ls_state->owner->so_server->nfs_client;
669
670 spin_lock(&clp->cl_lock);
671 nfs_free_unique_id(&clp->cl_lockowner_id, &lsp->ls_id);
672 spin_unlock(&clp->cl_lock);
673 rpc_destroy_wait_queue(&lsp->ls_sequence.wait);
674 kfree(lsp);
675 }
676
677 /*
678 * Return a compatible lock_state. If no initialized lock_state structure
679 * exists, return an uninitialized one.
680 *
681 */
682 static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner, pid_t pid, unsigned int type)
683 {
684 struct nfs4_lock_state *lsp, *new = NULL;
685
686 for(;;) {
687 spin_lock(&state->state_lock);
688 lsp = __nfs4_find_lock_state(state, owner, pid, type);
689 if (lsp != NULL)
690 break;
691 if (new != NULL) {
692 list_add(&new->ls_locks, &state->lock_states);
693 set_bit(LK_STATE_IN_USE, &state->flags);
694 lsp = new;
695 new = NULL;
696 break;
697 }
698 spin_unlock(&state->state_lock);
699 new = nfs4_alloc_lock_state(state, owner, pid, type);
700 if (new == NULL)
701 return NULL;
702 }
703 spin_unlock(&state->state_lock);
704 if (new != NULL)
705 nfs4_free_lock_state(new);
706 return lsp;
707 }
708
709 /*
710 * Release reference to lock_state, and free it if we see that
711 * it is no longer in use
712 */
713 void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
714 {
715 struct nfs4_state *state;
716
717 if (lsp == NULL)
718 return;
719 state = lsp->ls_state;
720 if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock))
721 return;
722 list_del(&lsp->ls_locks);
723 if (list_empty(&state->lock_states))
724 clear_bit(LK_STATE_IN_USE, &state->flags);
725 spin_unlock(&state->state_lock);
726 if (lsp->ls_flags & NFS_LOCK_INITIALIZED)
727 nfs4_release_lockowner(lsp);
728 nfs4_free_lock_state(lsp);
729 }
730
731 static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
732 {
733 struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner;
734
735 dst->fl_u.nfs4_fl.owner = lsp;
736 atomic_inc(&lsp->ls_count);
737 }
738
739 static void nfs4_fl_release_lock(struct file_lock *fl)
740 {
741 nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner);
742 }
743
744 static const struct file_lock_operations nfs4_fl_lock_ops = {
745 .fl_copy_lock = nfs4_fl_copy_lock,
746 .fl_release_private = nfs4_fl_release_lock,
747 };
748
749 int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
750 {
751 struct nfs4_lock_state *lsp;
752
753 if (fl->fl_ops != NULL)
754 return 0;
755 if (fl->fl_flags & FL_POSIX)
756 lsp = nfs4_get_lock_state(state, fl->fl_owner, 0, NFS4_POSIX_LOCK_TYPE);
757 else if (fl->fl_flags & FL_FLOCK)
758 lsp = nfs4_get_lock_state(state, 0, fl->fl_pid, NFS4_FLOCK_LOCK_TYPE);
759 else
760 return -EINVAL;
761 if (lsp == NULL)
762 return -ENOMEM;
763 fl->fl_u.nfs4_fl.owner = lsp;
764 fl->fl_ops = &nfs4_fl_lock_ops;
765 return 0;
766 }
767
768 /*
769 * Byte-range lock aware utility to initialize the stateid of read/write
770 * requests.
771 */
772 void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid)
773 {
774 struct nfs4_lock_state *lsp;
775 int seq;
776
777 do {
778 seq = read_seqbegin(&state->seqlock);
779 memcpy(dst, &state->stateid, sizeof(*dst));
780 } while (read_seqretry(&state->seqlock, seq));
781 if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
782 return;
783
784 spin_lock(&state->state_lock);
785 lsp = __nfs4_find_lock_state(state, fl_owner, fl_pid, NFS4_ANY_LOCK_TYPE);
786 if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
787 memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
788 spin_unlock(&state->state_lock);
789 nfs4_put_lock_state(lsp);
790 }
791
792 struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask)
793 {
794 struct nfs_seqid *new;
795
796 new = kmalloc(sizeof(*new), gfp_mask);
797 if (new != NULL) {
798 new->sequence = counter;
799 INIT_LIST_HEAD(&new->list);
800 }
801 return new;
802 }
803
804 void nfs_release_seqid(struct nfs_seqid *seqid)
805 {
806 if (!list_empty(&seqid->list)) {
807 struct rpc_sequence *sequence = seqid->sequence->sequence;
808
809 spin_lock(&sequence->lock);
810 list_del_init(&seqid->list);
811 spin_unlock(&sequence->lock);
812 rpc_wake_up(&sequence->wait);
813 }
814 }
815
816 void nfs_free_seqid(struct nfs_seqid *seqid)
817 {
818 nfs_release_seqid(seqid);
819 kfree(seqid);
820 }
821
822 /*
823 * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
824 * failed with a seqid incrementing error -
825 * see comments nfs_fs.h:seqid_mutating_error()
826 */
827 static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
828 {
829 BUG_ON(list_first_entry(&seqid->sequence->sequence->list, struct nfs_seqid, list) != seqid);
830 switch (status) {
831 case 0:
832 break;
833 case -NFS4ERR_BAD_SEQID:
834 if (seqid->sequence->flags & NFS_SEQID_CONFIRMED)
835 return;
836 printk(KERN_WARNING "NFS: v4 server returned a bad"
837 " sequence-id error on an"
838 " unconfirmed sequence %p!\n",
839 seqid->sequence);
840 case -NFS4ERR_STALE_CLIENTID:
841 case -NFS4ERR_STALE_STATEID:
842 case -NFS4ERR_BAD_STATEID:
843 case -NFS4ERR_BADXDR:
844 case -NFS4ERR_RESOURCE:
845 case -NFS4ERR_NOFILEHANDLE:
846 /* Non-seqid mutating errors */
847 return;
848 };
849 /*
850 * Note: no locking needed as we are guaranteed to be first
851 * on the sequence list
852 */
853 seqid->sequence->counter++;
854 }
855
856 void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
857 {
858 struct nfs4_state_owner *sp = container_of(seqid->sequence,
859 struct nfs4_state_owner, so_seqid);
860 struct nfs_server *server = sp->so_server;
861
862 if (status == -NFS4ERR_BAD_SEQID)
863 nfs4_drop_state_owner(sp);
864 if (!nfs4_has_session(server->nfs_client))
865 nfs_increment_seqid(status, seqid);
866 }
867
868 /*
869 * Increment the seqid if the LOCK/LOCKU succeeded, or
870 * failed with a seqid incrementing error -
871 * see comments nfs_fs.h:seqid_mutating_error()
872 */
873 void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid)
874 {
875 nfs_increment_seqid(status, seqid);
876 }
877
878 int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
879 {
880 struct rpc_sequence *sequence = seqid->sequence->sequence;
881 int status = 0;
882
883 spin_lock(&sequence->lock);
884 if (list_empty(&seqid->list))
885 list_add_tail(&seqid->list, &sequence->list);
886 if (list_first_entry(&sequence->list, struct nfs_seqid, list) == seqid)
887 goto unlock;
888 rpc_sleep_on(&sequence->wait, task, NULL);
889 status = -EAGAIN;
890 unlock:
891 spin_unlock(&sequence->lock);
892 return status;
893 }
894
895 static int nfs4_run_state_manager(void *);
896
897 static void nfs4_clear_state_manager_bit(struct nfs_client *clp)
898 {
899 smp_mb__before_clear_bit();
900 clear_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state);
901 smp_mb__after_clear_bit();
902 wake_up_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING);
903 rpc_wake_up(&clp->cl_rpcwaitq);
904 }
905
906 /*
907 * Schedule the nfs_client asynchronous state management routine
908 */
909 void nfs4_schedule_state_manager(struct nfs_client *clp)
910 {
911 struct task_struct *task;
912
913 if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
914 return;
915 __module_get(THIS_MODULE);
916 atomic_inc(&clp->cl_count);
917 task = kthread_run(nfs4_run_state_manager, clp, "%s-manager",
918 rpc_peeraddr2str(clp->cl_rpcclient,
919 RPC_DISPLAY_ADDR));
920 if (!IS_ERR(task))
921 return;
922 nfs4_clear_state_manager_bit(clp);
923 nfs_put_client(clp);
924 module_put(THIS_MODULE);
925 }
926
927 /*
928 * Schedule a state recovery attempt
929 */
930 void nfs4_schedule_state_recovery(struct nfs_client *clp)
931 {
932 if (!clp)
933 return;
934 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
935 set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
936 nfs4_schedule_state_manager(clp);
937 }
938
939 int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state)
940 {
941
942 set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
943 /* Don't recover state that expired before the reboot */
944 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) {
945 clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
946 return 0;
947 }
948 set_bit(NFS_OWNER_RECLAIM_REBOOT, &state->owner->so_flags);
949 set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
950 return 1;
951 }
952
953 int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state)
954 {
955 set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags);
956 clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
957 set_bit(NFS_OWNER_RECLAIM_NOGRACE, &state->owner->so_flags);
958 set_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
959 return 1;
960 }
961
962 static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops)
963 {
964 struct inode *inode = state->inode;
965 struct nfs_inode *nfsi = NFS_I(inode);
966 struct file_lock *fl;
967 int status = 0;
968
969 if (inode->i_flock == NULL)
970 return 0;
971
972 /* Guard against delegation returns and new lock/unlock calls */
973 down_write(&nfsi->rwsem);
974 /* Protect inode->i_flock using the BKL */
975 lock_flocks();
976 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
977 if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
978 continue;
979 if (nfs_file_open_context(fl->fl_file)->state != state)
980 continue;
981 unlock_flocks();
982 status = ops->recover_lock(state, fl);
983 switch (status) {
984 case 0:
985 break;
986 case -ESTALE:
987 case -NFS4ERR_ADMIN_REVOKED:
988 case -NFS4ERR_STALE_STATEID:
989 case -NFS4ERR_BAD_STATEID:
990 case -NFS4ERR_EXPIRED:
991 case -NFS4ERR_NO_GRACE:
992 case -NFS4ERR_STALE_CLIENTID:
993 case -NFS4ERR_BADSESSION:
994 case -NFS4ERR_BADSLOT:
995 case -NFS4ERR_BAD_HIGH_SLOT:
996 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
997 goto out;
998 default:
999 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
1000 __func__, status);
1001 case -ENOMEM:
1002 case -NFS4ERR_DENIED:
1003 case -NFS4ERR_RECLAIM_BAD:
1004 case -NFS4ERR_RECLAIM_CONFLICT:
1005 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
1006 status = 0;
1007 }
1008 lock_flocks();
1009 }
1010 unlock_flocks();
1011 out:
1012 up_write(&nfsi->rwsem);
1013 return status;
1014 }
1015
1016 static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs4_state_recovery_ops *ops)
1017 {
1018 struct nfs4_state *state;
1019 struct nfs4_lock_state *lock;
1020 int status = 0;
1021
1022 /* Note: we rely on the sp->so_states list being ordered
1023 * so that we always reclaim open(O_RDWR) and/or open(O_WRITE)
1024 * states first.
1025 * This is needed to ensure that the server won't give us any
1026 * read delegations that we have to return if, say, we are
1027 * recovering after a network partition or a reboot from a
1028 * server that doesn't support a grace period.
1029 */
1030 restart:
1031 spin_lock(&sp->so_lock);
1032 list_for_each_entry(state, &sp->so_states, open_states) {
1033 if (!test_and_clear_bit(ops->state_flag_bit, &state->flags))
1034 continue;
1035 if (state->state == 0)
1036 continue;
1037 atomic_inc(&state->count);
1038 spin_unlock(&sp->so_lock);
1039 status = ops->recover_open(sp, state);
1040 if (status >= 0) {
1041 status = nfs4_reclaim_locks(state, ops);
1042 if (status >= 0) {
1043 list_for_each_entry(lock, &state->lock_states, ls_locks) {
1044 if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
1045 printk("%s: Lock reclaim failed!\n",
1046 __func__);
1047 }
1048 nfs4_put_open_state(state);
1049 goto restart;
1050 }
1051 }
1052 switch (status) {
1053 default:
1054 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
1055 __func__, status);
1056 case -ENOENT:
1057 case -ENOMEM:
1058 case -ESTALE:
1059 /*
1060 * Open state on this file cannot be recovered
1061 * All we can do is revert to using the zero stateid.
1062 */
1063 memset(state->stateid.data, 0,
1064 sizeof(state->stateid.data));
1065 /* Mark the file as being 'closed' */
1066 state->state = 0;
1067 break;
1068 case -EKEYEXPIRED:
1069 /*
1070 * User RPCSEC_GSS context has expired.
1071 * We cannot recover this stateid now, so
1072 * skip it and allow recovery thread to
1073 * proceed.
1074 */
1075 break;
1076 case -NFS4ERR_ADMIN_REVOKED:
1077 case -NFS4ERR_STALE_STATEID:
1078 case -NFS4ERR_BAD_STATEID:
1079 case -NFS4ERR_RECLAIM_BAD:
1080 case -NFS4ERR_RECLAIM_CONFLICT:
1081 nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
1082 break;
1083 case -NFS4ERR_EXPIRED:
1084 case -NFS4ERR_NO_GRACE:
1085 nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
1086 case -NFS4ERR_STALE_CLIENTID:
1087 case -NFS4ERR_BADSESSION:
1088 case -NFS4ERR_BADSLOT:
1089 case -NFS4ERR_BAD_HIGH_SLOT:
1090 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1091 goto out_err;
1092 }
1093 nfs4_put_open_state(state);
1094 goto restart;
1095 }
1096 spin_unlock(&sp->so_lock);
1097 return 0;
1098 out_err:
1099 nfs4_put_open_state(state);
1100 return status;
1101 }
1102
1103 static void nfs4_clear_open_state(struct nfs4_state *state)
1104 {
1105 struct nfs4_lock_state *lock;
1106
1107 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1108 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1109 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1110 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1111 list_for_each_entry(lock, &state->lock_states, ls_locks) {
1112 lock->ls_seqid.flags = 0;
1113 lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
1114 }
1115 }
1116
1117 static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp, int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
1118 {
1119 struct nfs4_state_owner *sp;
1120 struct rb_node *pos;
1121 struct nfs4_state *state;
1122
1123 /* Reset all sequence ids to zero */
1124 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
1125 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
1126 sp->so_seqid.flags = 0;
1127 spin_lock(&sp->so_lock);
1128 list_for_each_entry(state, &sp->so_states, open_states) {
1129 if (mark_reclaim(clp, state))
1130 nfs4_clear_open_state(state);
1131 }
1132 spin_unlock(&sp->so_lock);
1133 }
1134 }
1135
1136 static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
1137 {
1138 /* Mark all delegations for reclaim */
1139 nfs_delegation_mark_reclaim(clp);
1140 nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot);
1141 }
1142
1143 static void nfs4_reclaim_complete(struct nfs_client *clp,
1144 const struct nfs4_state_recovery_ops *ops)
1145 {
1146 /* Notify the server we're done reclaiming our state */
1147 if (ops->reclaim_complete)
1148 (void)ops->reclaim_complete(clp);
1149 }
1150
1151 static int nfs4_state_clear_reclaim_reboot(struct nfs_client *clp)
1152 {
1153 struct nfs4_state_owner *sp;
1154 struct rb_node *pos;
1155 struct nfs4_state *state;
1156
1157 if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
1158 return 0;
1159
1160 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
1161 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
1162 spin_lock(&sp->so_lock);
1163 list_for_each_entry(state, &sp->so_states, open_states) {
1164 if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags))
1165 continue;
1166 nfs4_state_mark_reclaim_nograce(clp, state);
1167 }
1168 spin_unlock(&sp->so_lock);
1169 }
1170
1171 nfs_delegation_reap_unclaimed(clp);
1172 return 1;
1173 }
1174
1175 static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
1176 {
1177 if (!nfs4_state_clear_reclaim_reboot(clp))
1178 return;
1179 nfs4_reclaim_complete(clp, clp->cl_mvops->reboot_recovery_ops);
1180 }
1181
1182 static void nfs_delegation_clear_all(struct nfs_client *clp)
1183 {
1184 nfs_delegation_mark_reclaim(clp);
1185 nfs_delegation_reap_unclaimed(clp);
1186 }
1187
1188 static void nfs4_state_start_reclaim_nograce(struct nfs_client *clp)
1189 {
1190 nfs_delegation_clear_all(clp);
1191 nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce);
1192 }
1193
1194 static void nfs4_warn_keyexpired(const char *s)
1195 {
1196 printk_ratelimited(KERN_WARNING "Error: state manager"
1197 " encountered RPCSEC_GSS session"
1198 " expired against NFSv4 server %s.\n",
1199 s);
1200 }
1201
1202 static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
1203 {
1204 switch (error) {
1205 case -NFS4ERR_CB_PATH_DOWN:
1206 nfs_handle_cb_pathdown(clp);
1207 return 0;
1208 case -NFS4ERR_NO_GRACE:
1209 nfs4_state_end_reclaim_reboot(clp);
1210 return 0;
1211 case -NFS4ERR_STALE_CLIENTID:
1212 case -NFS4ERR_LEASE_MOVED:
1213 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1214 nfs4_state_clear_reclaim_reboot(clp);
1215 nfs4_state_start_reclaim_reboot(clp);
1216 break;
1217 case -NFS4ERR_EXPIRED:
1218 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1219 nfs4_state_start_reclaim_nograce(clp);
1220 break;
1221 case -NFS4ERR_BADSESSION:
1222 case -NFS4ERR_BADSLOT:
1223 case -NFS4ERR_BAD_HIGH_SLOT:
1224 case -NFS4ERR_DEADSESSION:
1225 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1226 case -NFS4ERR_SEQ_FALSE_RETRY:
1227 case -NFS4ERR_SEQ_MISORDERED:
1228 set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
1229 /* Zero session reset errors */
1230 return 0;
1231 case -EKEYEXPIRED:
1232 /* Nothing we can do */
1233 nfs4_warn_keyexpired(clp->cl_hostname);
1234 return 0;
1235 }
1236 return error;
1237 }
1238
1239 static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops)
1240 {
1241 struct rb_node *pos;
1242 int status = 0;
1243
1244 restart:
1245 spin_lock(&clp->cl_lock);
1246 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
1247 struct nfs4_state_owner *sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
1248 if (!test_and_clear_bit(ops->owner_flag_bit, &sp->so_flags))
1249 continue;
1250 atomic_inc(&sp->so_count);
1251 spin_unlock(&clp->cl_lock);
1252 status = nfs4_reclaim_open_state(sp, ops);
1253 if (status < 0) {
1254 set_bit(ops->owner_flag_bit, &sp->so_flags);
1255 nfs4_put_state_owner(sp);
1256 return nfs4_recovery_handle_error(clp, status);
1257 }
1258 nfs4_put_state_owner(sp);
1259 goto restart;
1260 }
1261 spin_unlock(&clp->cl_lock);
1262 return status;
1263 }
1264
1265 static int nfs4_check_lease(struct nfs_client *clp)
1266 {
1267 struct rpc_cred *cred;
1268 const struct nfs4_state_maintenance_ops *ops =
1269 clp->cl_mvops->state_renewal_ops;
1270 int status = -NFS4ERR_EXPIRED;
1271
1272 /* Is the client already known to have an expired lease? */
1273 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1274 return 0;
1275 spin_lock(&clp->cl_lock);
1276 cred = ops->get_state_renewal_cred_locked(clp);
1277 spin_unlock(&clp->cl_lock);
1278 if (cred == NULL) {
1279 cred = nfs4_get_setclientid_cred(clp);
1280 if (cred == NULL)
1281 goto out;
1282 }
1283 status = ops->renew_lease(clp, cred);
1284 put_rpccred(cred);
1285 out:
1286 return nfs4_recovery_handle_error(clp, status);
1287 }
1288
1289 static int nfs4_reclaim_lease(struct nfs_client *clp)
1290 {
1291 struct rpc_cred *cred;
1292 const struct nfs4_state_recovery_ops *ops =
1293 clp->cl_mvops->reboot_recovery_ops;
1294 int status = -ENOENT;
1295
1296 cred = ops->get_clid_cred(clp);
1297 if (cred != NULL) {
1298 status = ops->establish_clid(clp, cred);
1299 put_rpccred(cred);
1300 /* Handle case where the user hasn't set up machine creds */
1301 if (status == -EACCES && cred == clp->cl_machine_cred) {
1302 nfs4_clear_machine_cred(clp);
1303 status = -EAGAIN;
1304 }
1305 if (status == -NFS4ERR_MINOR_VERS_MISMATCH)
1306 status = -EPROTONOSUPPORT;
1307 }
1308 return status;
1309 }
1310
1311 #ifdef CONFIG_NFS_V4_1
1312 void nfs41_handle_recall_slot(struct nfs_client *clp)
1313 {
1314 set_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
1315 nfs4_schedule_state_recovery(clp);
1316 }
1317
1318 static void nfs4_reset_all_state(struct nfs_client *clp)
1319 {
1320 if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
1321 clp->cl_boot_time = CURRENT_TIME;
1322 nfs4_state_start_reclaim_nograce(clp);
1323 nfs4_schedule_state_recovery(clp);
1324 }
1325 }
1326
1327 static void nfs41_handle_server_reboot(struct nfs_client *clp)
1328 {
1329 if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
1330 nfs4_state_start_reclaim_reboot(clp);
1331 nfs4_schedule_state_recovery(clp);
1332 }
1333 }
1334
1335 static void nfs41_handle_state_revoked(struct nfs_client *clp)
1336 {
1337 /* Temporary */
1338 nfs4_reset_all_state(clp);
1339 }
1340
1341 static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp)
1342 {
1343 /* This will need to handle layouts too */
1344 nfs_expire_all_delegations(clp);
1345 }
1346
1347 static void nfs41_handle_cb_path_down(struct nfs_client *clp)
1348 {
1349 nfs_expire_all_delegations(clp);
1350 if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0)
1351 nfs4_schedule_state_recovery(clp);
1352 }
1353
1354 void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
1355 {
1356 if (!flags)
1357 return;
1358 else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
1359 nfs41_handle_server_reboot(clp);
1360 else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
1361 SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
1362 SEQ4_STATUS_ADMIN_STATE_REVOKED |
1363 SEQ4_STATUS_LEASE_MOVED))
1364 nfs41_handle_state_revoked(clp);
1365 else if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
1366 nfs41_handle_recallable_state_revoked(clp);
1367 else if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
1368 SEQ4_STATUS_BACKCHANNEL_FAULT |
1369 SEQ4_STATUS_CB_PATH_DOWN_SESSION))
1370 nfs41_handle_cb_path_down(clp);
1371 }
1372
1373 static int nfs4_reset_session(struct nfs_client *clp)
1374 {
1375 int status;
1376
1377 nfs4_begin_drain_session(clp);
1378 status = nfs4_proc_destroy_session(clp->cl_session);
1379 if (status && status != -NFS4ERR_BADSESSION &&
1380 status != -NFS4ERR_DEADSESSION) {
1381 status = nfs4_recovery_handle_error(clp, status);
1382 goto out;
1383 }
1384
1385 memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN);
1386 status = nfs4_proc_create_session(clp);
1387 if (status) {
1388 status = nfs4_recovery_handle_error(clp, status);
1389 goto out;
1390 }
1391 /* create_session negotiated new slot table */
1392 clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
1393
1394 /* Let the state manager reestablish state */
1395 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1396 nfs41_setup_state_renewal(clp);
1397 out:
1398 return status;
1399 }
1400
1401 static int nfs4_recall_slot(struct nfs_client *clp)
1402 {
1403 struct nfs4_slot_table *fc_tbl = &clp->cl_session->fc_slot_table;
1404 struct nfs4_channel_attrs *fc_attrs = &clp->cl_session->fc_attrs;
1405 struct nfs4_slot *new, *old;
1406 int i;
1407
1408 nfs4_begin_drain_session(clp);
1409 new = kmalloc(fc_tbl->target_max_slots * sizeof(struct nfs4_slot),
1410 GFP_NOFS);
1411 if (!new)
1412 return -ENOMEM;
1413
1414 spin_lock(&fc_tbl->slot_tbl_lock);
1415 for (i = 0; i < fc_tbl->target_max_slots; i++)
1416 new[i].seq_nr = fc_tbl->slots[i].seq_nr;
1417 old = fc_tbl->slots;
1418 fc_tbl->slots = new;
1419 fc_tbl->max_slots = fc_tbl->target_max_slots;
1420 fc_tbl->target_max_slots = 0;
1421 fc_attrs->max_reqs = fc_tbl->max_slots;
1422 spin_unlock(&fc_tbl->slot_tbl_lock);
1423
1424 kfree(old);
1425 nfs4_end_drain_session(clp);
1426 return 0;
1427 }
1428
1429 #else /* CONFIG_NFS_V4_1 */
1430 static int nfs4_reset_session(struct nfs_client *clp) { return 0; }
1431 static int nfs4_end_drain_session(struct nfs_client *clp) { return 0; }
1432 static int nfs4_recall_slot(struct nfs_client *clp) { return 0; }
1433 #endif /* CONFIG_NFS_V4_1 */
1434
1435 /* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors
1436 * on EXCHANGE_ID for v4.1
1437 */
1438 static void nfs4_set_lease_expired(struct nfs_client *clp, int status)
1439 {
1440 if (nfs4_has_session(clp)) {
1441 switch (status) {
1442 case -NFS4ERR_DELAY:
1443 case -NFS4ERR_CLID_INUSE:
1444 case -EAGAIN:
1445 break;
1446
1447 case -EKEYEXPIRED:
1448 nfs4_warn_keyexpired(clp->cl_hostname);
1449 case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
1450 * in nfs4_exchange_id */
1451 default:
1452 return;
1453 }
1454 }
1455 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1456 }
1457
1458 static void nfs4_state_manager(struct nfs_client *clp)
1459 {
1460 int status = 0;
1461
1462 /* Ensure exclusive access to NFSv4 state */
1463 for(;;) {
1464 if (test_and_clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) {
1465 /* We're going to have to re-establish a clientid */
1466 status = nfs4_reclaim_lease(clp);
1467 if (status) {
1468 nfs4_set_lease_expired(clp, status);
1469 if (test_bit(NFS4CLNT_LEASE_EXPIRED,
1470 &clp->cl_state))
1471 continue;
1472 if (clp->cl_cons_state ==
1473 NFS_CS_SESSION_INITING)
1474 nfs_mark_client_ready(clp, status);
1475 goto out_error;
1476 }
1477 clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
1478 set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
1479 pnfs_destroy_all_layouts(clp);
1480 }
1481
1482 if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) {
1483 status = nfs4_check_lease(clp);
1484 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1485 continue;
1486 if (status < 0 && status != -NFS4ERR_CB_PATH_DOWN)
1487 goto out_error;
1488 }
1489
1490 /* Initialize or reset the session */
1491 if (test_and_clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state)
1492 && nfs4_has_session(clp)) {
1493 status = nfs4_reset_session(clp);
1494 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1495 continue;
1496 if (status < 0)
1497 goto out_error;
1498 }
1499
1500 /* First recover reboot state... */
1501 if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) {
1502 status = nfs4_do_reclaim(clp,
1503 clp->cl_mvops->reboot_recovery_ops);
1504 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
1505 test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
1506 continue;
1507 nfs4_state_end_reclaim_reboot(clp);
1508 if (test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state))
1509 continue;
1510 if (status < 0)
1511 goto out_error;
1512 }
1513
1514 /* Now recover expired state... */
1515 if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
1516 status = nfs4_do_reclaim(clp,
1517 clp->cl_mvops->nograce_recovery_ops);
1518 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
1519 test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) ||
1520 test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
1521 continue;
1522 if (status < 0)
1523 goto out_error;
1524 }
1525
1526 nfs4_end_drain_session(clp);
1527 if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) {
1528 nfs_client_return_marked_delegations(clp);
1529 continue;
1530 }
1531 /* Recall session slots */
1532 if (test_and_clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state)
1533 && nfs4_has_session(clp)) {
1534 status = nfs4_recall_slot(clp);
1535 if (status < 0)
1536 goto out_error;
1537 continue;
1538 }
1539
1540
1541 nfs4_clear_state_manager_bit(clp);
1542 /* Did we race with an attempt to give us more work? */
1543 if (clp->cl_state == 0)
1544 break;
1545 if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
1546 break;
1547 }
1548 return;
1549 out_error:
1550 printk(KERN_WARNING "Error: state manager failed on NFSv4 server %s"
1551 " with error %d\n", clp->cl_hostname, -status);
1552 nfs4_end_drain_session(clp);
1553 nfs4_clear_state_manager_bit(clp);
1554 }
1555
1556 static int nfs4_run_state_manager(void *ptr)
1557 {
1558 struct nfs_client *clp = ptr;
1559
1560 allow_signal(SIGKILL);
1561 nfs4_state_manager(clp);
1562 nfs_put_client(clp);
1563 module_put_and_exit(0);
1564 return 0;
1565 }
1566
1567 /*
1568 * Local variables:
1569 * c-basic-offset: 8
1570 * End:
1571 */
This page took 0.09119 seconds and 5 git commands to generate.