nfs41: nfs41_setup_state_renewal
[deliverable/linux.git] / fs / nfs / nfs4state.c
CommitLineData
1da177e4
LT
1/*
2 * fs/nfs/nfs4state.c
3 *
4 * Client-side XDR for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
25 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Implementation of the NFSv4 state model. For the time being,
37 * this is minimal, but will be made much more complex in a
38 * subsequent patch.
39 */
40
6f43ddcc 41#include <linux/kernel.h>
1da177e4
LT
42#include <linux/slab.h>
43#include <linux/smp_lock.h>
44#include <linux/nfs_fs.h>
45#include <linux/nfs_idmap.h>
5043e900
TM
46#include <linux/kthread.h>
47#include <linux/module.h>
9f958ab8 48#include <linux/random.h>
1da177e4
LT
49#include <linux/workqueue.h>
50#include <linux/bitops.h>
51
4ce79717 52#include "nfs4_fs.h"
1da177e4
LT
53#include "callback.h"
54#include "delegation.h"
24c8dbbb 55#include "internal.h"
1da177e4
LT
56
57#define OPENOWNER_POOL_SIZE 8
58
4ce79717 59const nfs4_stateid zero_stateid;
1da177e4
LT
60
61static LIST_HEAD(nfs4_clientid_list);
62
591d71cb 63int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
1da177e4 64{
f738f517
CL
65 unsigned short port;
66 int status;
67
68 port = nfs_callback_tcpport;
69 if (clp->cl_addr.ss_family == AF_INET6)
70 port = nfs_callback_tcpport6;
71
72 status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred);
1da177e4 73 if (status == 0)
286d7d6a 74 status = nfs4_proc_setclientid_confirm(clp, cred);
1da177e4
LT
75 if (status == 0)
76 nfs4_schedule_state_renewal(clp);
77 return status;
78}
79
a7b72103 80struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp)
a2b2bb88
TM
81{
82 struct rpc_cred *cred = NULL;
83
a2b2bb88
TM
84 if (clp->cl_machine_cred != NULL)
85 cred = get_rpccred(clp->cl_machine_cred);
a2b2bb88
TM
86 return cred;
87}
88
89static void nfs4_clear_machine_cred(struct nfs_client *clp)
90{
91 struct rpc_cred *cred;
92
93 spin_lock(&clp->cl_lock);
94 cred = clp->cl_machine_cred;
95 clp->cl_machine_cred = NULL;
96 spin_unlock(&clp->cl_lock);
97 if (cred != NULL)
98 put_rpccred(cred);
99}
100
6dc9d57a 101struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
b4454fe1
TM
102{
103 struct nfs4_state_owner *sp;
9f958ab8 104 struct rb_node *pos;
b4454fe1
TM
105 struct rpc_cred *cred = NULL;
106
9f958ab8
TM
107 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
108 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
b4454fe1
TM
109 if (list_empty(&sp->so_states))
110 continue;
111 cred = get_rpccred(sp->so_cred);
112 break;
113 }
114 return cred;
115}
116
b4b82607
AA
117#if defined(CONFIG_NFS_V4_1)
118
9430fb6b
RL
119static int nfs41_setup_state_renewal(struct nfs_client *clp)
120{
121 int status;
122 struct nfs_fsinfo fsinfo;
123
124 status = nfs4_proc_get_lease_time(clp, &fsinfo);
125 if (status == 0) {
126 /* Update lease time and schedule renewal */
127 spin_lock(&clp->cl_lock);
128 clp->cl_lease_time = fsinfo.lease_time * HZ;
129 clp->cl_last_renewal = jiffies;
130 spin_unlock(&clp->cl_lock);
131
132 nfs4_schedule_state_renewal(clp);
133 }
134
135 return status;
136}
137
4d643d1d
AA
138int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
139{
140 int status;
141
142 status = nfs4_proc_exchange_id(clp, cred);
9430fb6b
RL
143 if (status != 0)
144 goto out;
145 status = nfs4_proc_create_session(clp);
146 if (status != 0)
147 goto out;
148 nfs41_setup_state_renewal(clp);
149 nfs_mark_client_ready(clp, NFS_CS_READY);
150out:
4d643d1d
AA
151 return status;
152}
153
b4b82607
AA
154struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp)
155{
156 struct rpc_cred *cred;
157
158 spin_lock(&clp->cl_lock);
159 cred = nfs4_get_machine_cred_locked(clp);
160 spin_unlock(&clp->cl_lock);
161 return cred;
162}
163
164#endif /* CONFIG_NFS_V4_1 */
165
a7b72103 166struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
286d7d6a
TM
167{
168 struct nfs4_state_owner *sp;
9f958ab8 169 struct rb_node *pos;
a2b2bb88 170 struct rpc_cred *cred;
286d7d6a 171
6dc9d57a
TM
172 spin_lock(&clp->cl_lock);
173 cred = nfs4_get_machine_cred_locked(clp);
a2b2bb88
TM
174 if (cred != NULL)
175 goto out;
9f958ab8
TM
176 pos = rb_first(&clp->cl_state_owners);
177 if (pos != NULL) {
178 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
a2b2bb88 179 cred = get_rpccred(sp->so_cred);
286d7d6a 180 }
a2b2bb88 181out:
6dc9d57a 182 spin_unlock(&clp->cl_lock);
a2b2bb88 183 return cred;
286d7d6a
TM
184}
185
9f958ab8
TM
186static void nfs_alloc_unique_id(struct rb_root *root, struct nfs_unique_id *new,
187 __u64 minval, int maxbits)
188{
189 struct rb_node **p, *parent;
190 struct nfs_unique_id *pos;
191 __u64 mask = ~0ULL;
192
193 if (maxbits < 64)
194 mask = (1ULL << maxbits) - 1ULL;
195
196 /* Ensure distribution is more or less flat */
197 get_random_bytes(&new->id, sizeof(new->id));
198 new->id &= mask;
199 if (new->id < minval)
200 new->id += minval;
201retry:
202 p = &root->rb_node;
203 parent = NULL;
204
205 while (*p != NULL) {
206 parent = *p;
207 pos = rb_entry(parent, struct nfs_unique_id, rb_node);
208
209 if (new->id < pos->id)
210 p = &(*p)->rb_left;
211 else if (new->id > pos->id)
212 p = &(*p)->rb_right;
213 else
214 goto id_exists;
215 }
216 rb_link_node(&new->rb_node, parent, p);
217 rb_insert_color(&new->rb_node, root);
218 return;
219id_exists:
220 for (;;) {
221 new->id++;
222 if (new->id < minval || (new->id & mask) != new->id) {
223 new->id = minval;
224 break;
225 }
226 parent = rb_next(parent);
227 if (parent == NULL)
228 break;
229 pos = rb_entry(parent, struct nfs_unique_id, rb_node);
230 if (new->id < pos->id)
231 break;
232 }
233 goto retry;
234}
235
236static void nfs_free_unique_id(struct rb_root *root, struct nfs_unique_id *id)
237{
238 rb_erase(&id->rb_node, root);
239}
240
1da177e4 241static struct nfs4_state_owner *
6f2e64d3 242nfs4_find_state_owner(struct nfs_server *server, struct rpc_cred *cred)
1da177e4 243{
6f2e64d3 244 struct nfs_client *clp = server->nfs_client;
9f958ab8
TM
245 struct rb_node **p = &clp->cl_state_owners.rb_node,
246 *parent = NULL;
1da177e4
LT
247 struct nfs4_state_owner *sp, *res = NULL;
248
9f958ab8
TM
249 while (*p != NULL) {
250 parent = *p;
251 sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
252
6f2e64d3
TM
253 if (server < sp->so_server) {
254 p = &parent->rb_left;
255 continue;
256 }
257 if (server > sp->so_server) {
258 p = &parent->rb_right;
259 continue;
260 }
9f958ab8
TM
261 if (cred < sp->so_cred)
262 p = &parent->rb_left;
263 else if (cred > sp->so_cred)
264 p = &parent->rb_right;
265 else {
266 atomic_inc(&sp->so_count);
267 res = sp;
268 break;
269 }
1da177e4
LT
270 }
271 return res;
272}
273
9f958ab8
TM
274static struct nfs4_state_owner *
275nfs4_insert_state_owner(struct nfs_client *clp, struct nfs4_state_owner *new)
276{
277 struct rb_node **p = &clp->cl_state_owners.rb_node,
278 *parent = NULL;
279 struct nfs4_state_owner *sp;
280
281 while (*p != NULL) {
282 parent = *p;
283 sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
284
6f2e64d3
TM
285 if (new->so_server < sp->so_server) {
286 p = &parent->rb_left;
287 continue;
288 }
289 if (new->so_server > sp->so_server) {
290 p = &parent->rb_right;
291 continue;
292 }
9f958ab8
TM
293 if (new->so_cred < sp->so_cred)
294 p = &parent->rb_left;
295 else if (new->so_cred > sp->so_cred)
296 p = &parent->rb_right;
297 else {
298 atomic_inc(&sp->so_count);
299 return sp;
300 }
301 }
302 nfs_alloc_unique_id(&clp->cl_openowner_id, &new->so_owner_id, 1, 64);
303 rb_link_node(&new->so_client_node, parent, p);
304 rb_insert_color(&new->so_client_node, &clp->cl_state_owners);
305 return new;
306}
307
308static void
309nfs4_remove_state_owner(struct nfs_client *clp, struct nfs4_state_owner *sp)
310{
311 if (!RB_EMPTY_NODE(&sp->so_client_node))
312 rb_erase(&sp->so_client_node, &clp->cl_state_owners);
313 nfs_free_unique_id(&clp->cl_openowner_id, &sp->so_owner_id);
314}
315
1da177e4
LT
316/*
317 * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
318 * create a new state_owner.
319 *
320 */
321static struct nfs4_state_owner *
322nfs4_alloc_state_owner(void)
323{
324 struct nfs4_state_owner *sp;
325
cee54fc9 326 sp = kzalloc(sizeof(*sp),GFP_KERNEL);
1da177e4
LT
327 if (!sp)
328 return NULL;
ec073428 329 spin_lock_init(&sp->so_lock);
1da177e4
LT
330 INIT_LIST_HEAD(&sp->so_states);
331 INIT_LIST_HEAD(&sp->so_delegations);
cee54fc9
TM
332 rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue");
333 sp->so_seqid.sequence = &sp->so_sequence;
334 spin_lock_init(&sp->so_sequence.lock);
335 INIT_LIST_HEAD(&sp->so_sequence.list);
1da177e4
LT
336 atomic_set(&sp->so_count, 1);
337 return sp;
338}
339
1d2e88e7 340static void
1da177e4
LT
341nfs4_drop_state_owner(struct nfs4_state_owner *sp)
342{
9f958ab8
TM
343 if (!RB_EMPTY_NODE(&sp->so_client_node)) {
344 struct nfs_client *clp = sp->so_client;
345
346 spin_lock(&clp->cl_lock);
347 rb_erase(&sp->so_client_node, &clp->cl_state_owners);
348 RB_CLEAR_NODE(&sp->so_client_node);
349 spin_unlock(&clp->cl_lock);
350 }
1da177e4
LT
351}
352
1da177e4
LT
353struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
354{
7539bbab 355 struct nfs_client *clp = server->nfs_client;
1da177e4
LT
356 struct nfs4_state_owner *sp, *new;
357
1da177e4 358 spin_lock(&clp->cl_lock);
6f2e64d3 359 sp = nfs4_find_state_owner(server, cred);
1da177e4 360 spin_unlock(&clp->cl_lock);
1da177e4
LT
361 if (sp != NULL)
362 return sp;
9f958ab8
TM
363 new = nfs4_alloc_state_owner();
364 if (new == NULL)
365 return NULL;
366 new->so_client = clp;
6f2e64d3 367 new->so_server = server;
9f958ab8
TM
368 new->so_cred = cred;
369 spin_lock(&clp->cl_lock);
370 sp = nfs4_insert_state_owner(clp, new);
371 spin_unlock(&clp->cl_lock);
372 if (sp == new)
373 get_rpccred(cred);
f6a1cc89
TM
374 else {
375 rpc_destroy_wait_queue(&new->so_sequence.wait);
9f958ab8 376 kfree(new);
f6a1cc89 377 }
9f958ab8 378 return sp;
1da177e4
LT
379}
380
1da177e4
LT
381void nfs4_put_state_owner(struct nfs4_state_owner *sp)
382{
adfa6f98 383 struct nfs_client *clp = sp->so_client;
1da177e4
LT
384 struct rpc_cred *cred = sp->so_cred;
385
386 if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
387 return;
9f958ab8 388 nfs4_remove_state_owner(clp, sp);
1da177e4 389 spin_unlock(&clp->cl_lock);
f6a1cc89 390 rpc_destroy_wait_queue(&sp->so_sequence.wait);
1da177e4
LT
391 put_rpccred(cred);
392 kfree(sp);
393}
394
395static struct nfs4_state *
396nfs4_alloc_open_state(void)
397{
398 struct nfs4_state *state;
399
e7616923 400 state = kzalloc(sizeof(*state), GFP_KERNEL);
1da177e4
LT
401 if (!state)
402 return NULL;
1da177e4
LT
403 atomic_set(&state->count, 1);
404 INIT_LIST_HEAD(&state->lock_states);
8d0a8a9d 405 spin_lock_init(&state->state_lock);
8bda4e4c 406 seqlock_init(&state->seqlock);
1da177e4
LT
407 return state;
408}
409
4cecb76f 410void
dc0b027d 411nfs4_state_set_mode_locked(struct nfs4_state *state, fmode_t fmode)
4cecb76f 412{
dc0b027d 413 if (state->state == fmode)
4cecb76f
TM
414 return;
415 /* NB! List reordering - see the reclaim code for why. */
dc0b027d
TM
416 if ((fmode & FMODE_WRITE) != (state->state & FMODE_WRITE)) {
417 if (fmode & FMODE_WRITE)
4cecb76f
TM
418 list_move(&state->open_states, &state->owner->so_states);
419 else
420 list_move_tail(&state->open_states, &state->owner->so_states);
421 }
dc0b027d 422 state->state = fmode;
4cecb76f
TM
423}
424
1da177e4
LT
425static struct nfs4_state *
426__nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
427{
428 struct nfs_inode *nfsi = NFS_I(inode);
429 struct nfs4_state *state;
430
431 list_for_each_entry(state, &nfsi->open_states, inode_states) {
1c816efa 432 if (state->owner != owner)
1da177e4 433 continue;
1c816efa 434 if (atomic_inc_not_zero(&state->count))
1da177e4 435 return state;
1da177e4
LT
436 }
437 return NULL;
438}
439
1da177e4
LT
440static void
441nfs4_free_open_state(struct nfs4_state *state)
442{
443 kfree(state);
444}
445
446struct nfs4_state *
447nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
448{
449 struct nfs4_state *state, *new;
450 struct nfs_inode *nfsi = NFS_I(inode);
451
452 spin_lock(&inode->i_lock);
453 state = __nfs4_find_state_byowner(inode, owner);
454 spin_unlock(&inode->i_lock);
455 if (state)
456 goto out;
457 new = nfs4_alloc_open_state();
ec073428 458 spin_lock(&owner->so_lock);
1da177e4
LT
459 spin_lock(&inode->i_lock);
460 state = __nfs4_find_state_byowner(inode, owner);
461 if (state == NULL && new != NULL) {
462 state = new;
1da177e4
LT
463 state->owner = owner;
464 atomic_inc(&owner->so_count);
465 list_add(&state->inode_states, &nfsi->open_states);
466 state->inode = igrab(inode);
467 spin_unlock(&inode->i_lock);
ec073428
TM
468 /* Note: The reclaim code dictates that we add stateless
469 * and read-only stateids to the end of the list */
470 list_add_tail(&state->open_states, &owner->so_states);
471 spin_unlock(&owner->so_lock);
1da177e4
LT
472 } else {
473 spin_unlock(&inode->i_lock);
ec073428 474 spin_unlock(&owner->so_lock);
1da177e4
LT
475 if (new)
476 nfs4_free_open_state(new);
477 }
478out:
479 return state;
480}
481
1da177e4
LT
482void nfs4_put_open_state(struct nfs4_state *state)
483{
484 struct inode *inode = state->inode;
485 struct nfs4_state_owner *owner = state->owner;
486
ec073428 487 if (!atomic_dec_and_lock(&state->count, &owner->so_lock))
1da177e4 488 return;
ec073428 489 spin_lock(&inode->i_lock);
ba683031 490 list_del(&state->inode_states);
1da177e4 491 list_del(&state->open_states);
ec073428
TM
492 spin_unlock(&inode->i_lock);
493 spin_unlock(&owner->so_lock);
1da177e4 494 iput(inode);
1da177e4
LT
495 nfs4_free_open_state(state);
496 nfs4_put_state_owner(owner);
497}
498
499/*
83c9d41e 500 * Close the current file.
1da177e4 501 */
dc0b027d 502static void __nfs4_close(struct path *path, struct nfs4_state *state, fmode_t fmode, int wait)
1da177e4 503{
1da177e4 504 struct nfs4_state_owner *owner = state->owner;
003707c7 505 int call_close = 0;
dc0b027d 506 fmode_t newstate;
1da177e4
LT
507
508 atomic_inc(&owner->so_count);
1da177e4 509 /* Protect against nfs4_find_state() */
ec073428 510 spin_lock(&owner->so_lock);
dc0b027d 511 switch (fmode & (FMODE_READ | FMODE_WRITE)) {
e7616923
TM
512 case FMODE_READ:
513 state->n_rdonly--;
514 break;
515 case FMODE_WRITE:
516 state->n_wronly--;
517 break;
518 case FMODE_READ|FMODE_WRITE:
519 state->n_rdwr--;
520 }
003707c7 521 newstate = FMODE_READ|FMODE_WRITE;
e7616923 522 if (state->n_rdwr == 0) {
003707c7 523 if (state->n_rdonly == 0) {
e7616923 524 newstate &= ~FMODE_READ;
003707c7
TM
525 call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
526 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
527 }
528 if (state->n_wronly == 0) {
e7616923 529 newstate &= ~FMODE_WRITE;
003707c7
TM
530 call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
531 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
532 }
533 if (newstate == 0)
534 clear_bit(NFS_DELEGATED_STATE, &state->flags);
e7616923 535 }
003707c7 536 nfs4_state_set_mode_locked(state, newstate);
ec073428 537 spin_unlock(&owner->so_lock);
4cecb76f 538
003707c7 539 if (!call_close) {
b39e625b
TM
540 nfs4_put_open_state(state);
541 nfs4_put_state_owner(owner);
542 } else
a49c3c77
TM
543 nfs4_do_close(path, state, wait);
544}
545
dc0b027d 546void nfs4_close_state(struct path *path, struct nfs4_state *state, fmode_t fmode)
a49c3c77 547{
dc0b027d 548 __nfs4_close(path, state, fmode, 0);
a49c3c77
TM
549}
550
dc0b027d 551void nfs4_close_sync(struct path *path, struct nfs4_state *state, fmode_t fmode)
a49c3c77 552{
dc0b027d 553 __nfs4_close(path, state, fmode, 1);
1da177e4
LT
554}
555
556/*
557 * Search the state->lock_states for an existing lock_owner
558 * that is compatible with current->files
559 */
560static struct nfs4_lock_state *
561__nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
562{
563 struct nfs4_lock_state *pos;
564 list_for_each_entry(pos, &state->lock_states, ls_locks) {
565 if (pos->ls_owner != fl_owner)
566 continue;
567 atomic_inc(&pos->ls_count);
568 return pos;
569 }
570 return NULL;
571}
572
1da177e4
LT
573/*
574 * Return a compatible lock_state. If no initialized lock_state structure
575 * exists, return an uninitialized one.
576 *
1da177e4
LT
577 */
578static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
579{
580 struct nfs4_lock_state *lsp;
adfa6f98 581 struct nfs_client *clp = state->owner->so_client;
1da177e4 582
cee54fc9 583 lsp = kzalloc(sizeof(*lsp), GFP_KERNEL);
1da177e4
LT
584 if (lsp == NULL)
585 return NULL;
d0dc3701
TM
586 rpc_init_wait_queue(&lsp->ls_sequence.wait, "lock_seqid_waitqueue");
587 spin_lock_init(&lsp->ls_sequence.lock);
588 INIT_LIST_HEAD(&lsp->ls_sequence.list);
589 lsp->ls_seqid.sequence = &lsp->ls_sequence;
1da177e4 590 atomic_set(&lsp->ls_count, 1);
b64aec8d 591 lsp->ls_state = state;
1da177e4 592 lsp->ls_owner = fl_owner;
1da177e4 593 spin_lock(&clp->cl_lock);
9f958ab8 594 nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64);
1da177e4 595 spin_unlock(&clp->cl_lock);
8d0a8a9d 596 INIT_LIST_HEAD(&lsp->ls_locks);
1da177e4
LT
597 return lsp;
598}
599
9f958ab8
TM
600static void nfs4_free_lock_state(struct nfs4_lock_state *lsp)
601{
602 struct nfs_client *clp = lsp->ls_state->owner->so_client;
603
604 spin_lock(&clp->cl_lock);
605 nfs_free_unique_id(&clp->cl_lockowner_id, &lsp->ls_id);
606 spin_unlock(&clp->cl_lock);
f6a1cc89 607 rpc_destroy_wait_queue(&lsp->ls_sequence.wait);
9f958ab8
TM
608 kfree(lsp);
609}
610
1da177e4
LT
611/*
612 * Return a compatible lock_state. If no initialized lock_state structure
613 * exists, return an uninitialized one.
614 *
1da177e4 615 */
8d0a8a9d 616static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
1da177e4 617{
8d0a8a9d 618 struct nfs4_lock_state *lsp, *new = NULL;
1da177e4 619
8d0a8a9d
TM
620 for(;;) {
621 spin_lock(&state->state_lock);
622 lsp = __nfs4_find_lock_state(state, owner);
623 if (lsp != NULL)
624 break;
625 if (new != NULL) {
8d0a8a9d
TM
626 list_add(&new->ls_locks, &state->lock_states);
627 set_bit(LK_STATE_IN_USE, &state->flags);
628 lsp = new;
629 new = NULL;
630 break;
631 }
632 spin_unlock(&state->state_lock);
633 new = nfs4_alloc_lock_state(state, owner);
634 if (new == NULL)
635 return NULL;
636 }
637 spin_unlock(&state->state_lock);
9f958ab8
TM
638 if (new != NULL)
639 nfs4_free_lock_state(new);
1da177e4
LT
640 return lsp;
641}
642
643/*
8d0a8a9d
TM
644 * Release reference to lock_state, and free it if we see that
645 * it is no longer in use
1da177e4 646 */
faf5f49c 647void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
1da177e4 648{
8d0a8a9d 649 struct nfs4_state *state;
1da177e4 650
8d0a8a9d
TM
651 if (lsp == NULL)
652 return;
653 state = lsp->ls_state;
654 if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock))
655 return;
656 list_del(&lsp->ls_locks);
657 if (list_empty(&state->lock_states))
658 clear_bit(LK_STATE_IN_USE, &state->flags);
659 spin_unlock(&state->state_lock);
9f958ab8 660 nfs4_free_lock_state(lsp);
1da177e4
LT
661}
662
8d0a8a9d 663static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
1da177e4 664{
8d0a8a9d 665 struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner;
1da177e4 666
8d0a8a9d
TM
667 dst->fl_u.nfs4_fl.owner = lsp;
668 atomic_inc(&lsp->ls_count);
669}
1da177e4 670
8d0a8a9d 671static void nfs4_fl_release_lock(struct file_lock *fl)
1da177e4 672{
8d0a8a9d 673 nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner);
1da177e4
LT
674}
675
6aed6285 676static const struct file_lock_operations nfs4_fl_lock_ops = {
8d0a8a9d
TM
677 .fl_copy_lock = nfs4_fl_copy_lock,
678 .fl_release_private = nfs4_fl_release_lock,
679};
680
681int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
1da177e4 682{
8d0a8a9d
TM
683 struct nfs4_lock_state *lsp;
684
685 if (fl->fl_ops != NULL)
686 return 0;
687 lsp = nfs4_get_lock_state(state, fl->fl_owner);
688 if (lsp == NULL)
689 return -ENOMEM;
690 fl->fl_u.nfs4_fl.owner = lsp;
691 fl->fl_ops = &nfs4_fl_lock_ops;
692 return 0;
1da177e4
LT
693}
694
8d0a8a9d
TM
695/*
696 * Byte-range lock aware utility to initialize the stateid of read/write
697 * requests.
1da177e4 698 */
8d0a8a9d 699void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner)
1da177e4 700{
8d0a8a9d 701 struct nfs4_lock_state *lsp;
8bda4e4c 702 int seq;
1da177e4 703
8bda4e4c
TM
704 do {
705 seq = read_seqbegin(&state->seqlock);
706 memcpy(dst, &state->stateid, sizeof(*dst));
707 } while (read_seqretry(&state->seqlock, seq));
8d0a8a9d
TM
708 if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
709 return;
1da177e4 710
8d0a8a9d
TM
711 spin_lock(&state->state_lock);
712 lsp = __nfs4_find_lock_state(state, fl_owner);
713 if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
714 memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
715 spin_unlock(&state->state_lock);
1da177e4
LT
716 nfs4_put_lock_state(lsp);
717}
718
cee54fc9
TM
719struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter)
720{
cee54fc9
TM
721 struct nfs_seqid *new;
722
723 new = kmalloc(sizeof(*new), GFP_KERNEL);
724 if (new != NULL) {
725 new->sequence = counter;
2f74c0a0 726 INIT_LIST_HEAD(&new->list);
cee54fc9
TM
727 }
728 return new;
729}
730
731void nfs_free_seqid(struct nfs_seqid *seqid)
1da177e4 732{
2f74c0a0
TM
733 if (!list_empty(&seqid->list)) {
734 struct rpc_sequence *sequence = seqid->sequence->sequence;
cee54fc9 735
2f74c0a0
TM
736 spin_lock(&sequence->lock);
737 list_del(&seqid->list);
738 spin_unlock(&sequence->lock);
739 rpc_wake_up(&sequence->wait);
740 }
cee54fc9 741 kfree(seqid);
1da177e4
LT
742}
743
744/*
cee54fc9
TM
745 * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
746 * failed with a seqid incrementing error -
747 * see comments nfs_fs.h:seqid_mutating_error()
748 */
88d90939 749static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
cee54fc9 750{
2f74c0a0 751 BUG_ON(list_first_entry(&seqid->sequence->sequence->list, struct nfs_seqid, list) != seqid);
cee54fc9
TM
752 switch (status) {
753 case 0:
754 break;
755 case -NFS4ERR_BAD_SEQID:
6f43ddcc
TM
756 if (seqid->sequence->flags & NFS_SEQID_CONFIRMED)
757 return;
758 printk(KERN_WARNING "NFS: v4 server returned a bad"
497799e7
DM
759 " sequence-id error on an"
760 " unconfirmed sequence %p!\n",
6f43ddcc 761 seqid->sequence);
cee54fc9
TM
762 case -NFS4ERR_STALE_CLIENTID:
763 case -NFS4ERR_STALE_STATEID:
764 case -NFS4ERR_BAD_STATEID:
765 case -NFS4ERR_BADXDR:
766 case -NFS4ERR_RESOURCE:
767 case -NFS4ERR_NOFILEHANDLE:
768 /* Non-seqid mutating errors */
769 return;
770 };
771 /*
772 * Note: no locking needed as we are guaranteed to be first
773 * on the sequence list
774 */
775 seqid->sequence->counter++;
776}
777
778void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
779{
34dc1ad7
BH
780 struct nfs4_state_owner *sp = container_of(seqid->sequence,
781 struct nfs4_state_owner, so_seqid);
782 struct nfs_server *server = sp->so_server;
783
784 if (status == -NFS4ERR_BAD_SEQID)
1da177e4 785 nfs4_drop_state_owner(sp);
34dc1ad7
BH
786 if (!nfs4_has_session(server->nfs_client))
787 nfs_increment_seqid(status, seqid);
cee54fc9
TM
788}
789
790/*
cee54fc9
TM
791 * Increment the seqid if the LOCK/LOCKU succeeded, or
792 * failed with a seqid incrementing error -
793 * see comments nfs_fs.h:seqid_mutating_error()
794 */
795void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid)
796{
88d90939 797 nfs_increment_seqid(status, seqid);
cee54fc9
TM
798}
799
800int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
801{
802 struct rpc_sequence *sequence = seqid->sequence->sequence;
803 int status = 0;
804
805 spin_lock(&sequence->lock);
2f74c0a0
TM
806 if (list_empty(&seqid->list))
807 list_add_tail(&seqid->list, &sequence->list);
808 if (list_first_entry(&sequence->list, struct nfs_seqid, list) == seqid)
809 goto unlock;
5d00837b 810 rpc_sleep_on(&sequence->wait, task, NULL);
2f74c0a0
TM
811 status = -EAGAIN;
812unlock:
cee54fc9
TM
813 spin_unlock(&sequence->lock);
814 return status;
1da177e4
LT
815}
816
e005e804 817static int nfs4_run_state_manager(void *);
1da177e4 818
e005e804 819static void nfs4_clear_state_manager_bit(struct nfs_client *clp)
433fbe4c
TM
820{
821 smp_mb__before_clear_bit();
e005e804 822 clear_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state);
433fbe4c 823 smp_mb__after_clear_bit();
e005e804 824 wake_up_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING);
433fbe4c
TM
825 rpc_wake_up(&clp->cl_rpcwaitq);
826}
827
1da177e4 828/*
e005e804 829 * Schedule the nfs_client asynchronous state management routine
1da177e4 830 */
b0d3ded1 831void nfs4_schedule_state_manager(struct nfs_client *clp)
1da177e4 832{
5043e900 833 struct task_struct *task;
1da177e4 834
e005e804
TM
835 if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
836 return;
5043e900
TM
837 __module_get(THIS_MODULE);
838 atomic_inc(&clp->cl_count);
e005e804 839 task = kthread_run(nfs4_run_state_manager, clp, "%s-manager",
5d8515ca
CL
840 rpc_peeraddr2str(clp->cl_rpcclient,
841 RPC_DISPLAY_ADDR));
5043e900
TM
842 if (!IS_ERR(task))
843 return;
e005e804 844 nfs4_clear_state_manager_bit(clp);
24c8dbbb 845 nfs_put_client(clp);
5043e900 846 module_put(THIS_MODULE);
1da177e4
LT
847}
848
849/*
850 * Schedule a state recovery attempt
851 */
adfa6f98 852void nfs4_schedule_state_recovery(struct nfs_client *clp)
1da177e4
LT
853{
854 if (!clp)
855 return;
e598d843
TM
856 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
857 set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
e005e804 858 nfs4_schedule_state_manager(clp);
1da177e4
LT
859}
860
b79a4a1b
TM
861static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state)
862{
863
864 set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
865 /* Don't recover state that expired before the reboot */
866 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) {
867 clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
868 return 0;
869 }
7eff03ae 870 set_bit(NFS_OWNER_RECLAIM_REBOOT, &state->owner->so_flags);
b79a4a1b
TM
871 set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
872 return 1;
873}
874
9e33bed5 875int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state)
b79a4a1b
TM
876{
877 set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags);
878 clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
7eff03ae 879 set_bit(NFS_OWNER_RECLAIM_NOGRACE, &state->owner->so_flags);
b79a4a1b
TM
880 set_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
881 return 1;
882}
883
02860014 884static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops)
1da177e4
LT
885{
886 struct inode *inode = state->inode;
19e03c57 887 struct nfs_inode *nfsi = NFS_I(inode);
1da177e4
LT
888 struct file_lock *fl;
889 int status = 0;
890
3f09df70
TM
891 if (inode->i_flock == NULL)
892 return 0;
893
894 /* Guard against delegation returns and new lock/unlock calls */
19e03c57 895 down_write(&nfsi->rwsem);
3f09df70
TM
896 /* Protect inode->i_flock using the BKL */
897 lock_kernel();
90dc7d27 898 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
43b2a33a 899 if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
1da177e4 900 continue;
cd3758e3 901 if (nfs_file_open_context(fl->fl_file)->state != state)
1da177e4 902 continue;
3f09df70 903 unlock_kernel();
1da177e4 904 status = ops->recover_lock(state, fl);
1da177e4 905 switch (status) {
965b5d67
TM
906 case 0:
907 break;
908 case -ESTALE:
909 case -NFS4ERR_ADMIN_REVOKED:
910 case -NFS4ERR_STALE_STATEID:
911 case -NFS4ERR_BAD_STATEID:
912 case -NFS4ERR_EXPIRED:
913 case -NFS4ERR_NO_GRACE:
914 case -NFS4ERR_STALE_CLIENTID:
9c4c761a
TM
915 case -NFS4ERR_BADSESSION:
916 case -NFS4ERR_BADSLOT:
917 case -NFS4ERR_BAD_HIGH_SLOT:
918 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
965b5d67 919 goto out;
1da177e4
LT
920 default:
921 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
3110ff80 922 __func__, status);
965b5d67
TM
923 case -ENOMEM:
924 case -NFS4ERR_DENIED:
1da177e4
LT
925 case -NFS4ERR_RECLAIM_BAD:
926 case -NFS4ERR_RECLAIM_CONFLICT:
43b2a33a 927 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
965b5d67 928 status = 0;
1da177e4 929 }
3f09df70 930 lock_kernel();
1da177e4 931 }
3f09df70 932 unlock_kernel();
965b5d67 933out:
19e03c57 934 up_write(&nfsi->rwsem);
1da177e4
LT
935 return status;
936}
937
02860014 938static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs4_state_recovery_ops *ops)
1da177e4
LT
939{
940 struct nfs4_state *state;
941 struct nfs4_lock_state *lock;
942 int status = 0;
943
944 /* Note: we rely on the sp->so_states list being ordered
945 * so that we always reclaim open(O_RDWR) and/or open(O_WRITE)
946 * states first.
947 * This is needed to ensure that the server won't give us any
948 * read delegations that we have to return if, say, we are
949 * recovering after a network partition or a reboot from a
950 * server that doesn't support a grace period.
951 */
fe1d8195
TM
952restart:
953 spin_lock(&sp->so_lock);
1da177e4 954 list_for_each_entry(state, &sp->so_states, open_states) {
b79a4a1b
TM
955 if (!test_and_clear_bit(ops->state_flag_bit, &state->flags))
956 continue;
1da177e4
LT
957 if (state->state == 0)
958 continue;
fe1d8195
TM
959 atomic_inc(&state->count);
960 spin_unlock(&sp->so_lock);
1da177e4 961 status = ops->recover_open(sp, state);
1da177e4 962 if (status >= 0) {
02860014
TM
963 status = nfs4_reclaim_locks(state, ops);
964 if (status >= 0) {
965 list_for_each_entry(lock, &state->lock_states, ls_locks) {
966 if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
967 printk("%s: Lock reclaim failed!\n",
3110ff80 968 __func__);
02860014 969 }
fe1d8195
TM
970 nfs4_put_open_state(state);
971 goto restart;
1da177e4 972 }
1da177e4
LT
973 }
974 switch (status) {
975 default:
976 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
3110ff80 977 __func__, status);
1da177e4 978 case -ENOENT:
965b5d67 979 case -ENOMEM:
b79a4a1b 980 case -ESTALE:
1da177e4
LT
981 /*
982 * Open state on this file cannot be recovered
983 * All we can do is revert to using the zero stateid.
984 */
985 memset(state->stateid.data, 0,
986 sizeof(state->stateid.data));
987 /* Mark the file as being 'closed' */
988 state->state = 0;
989 break;
965b5d67
TM
990 case -NFS4ERR_ADMIN_REVOKED:
991 case -NFS4ERR_STALE_STATEID:
992 case -NFS4ERR_BAD_STATEID:
b79a4a1b
TM
993 case -NFS4ERR_RECLAIM_BAD:
994 case -NFS4ERR_RECLAIM_CONFLICT:
995 nfs4_state_mark_reclaim_nograce(sp->so_client, state);
996 break;
1da177e4
LT
997 case -NFS4ERR_EXPIRED:
998 case -NFS4ERR_NO_GRACE:
b79a4a1b 999 nfs4_state_mark_reclaim_nograce(sp->so_client, state);
1da177e4 1000 case -NFS4ERR_STALE_CLIENTID:
9c4c761a
TM
1001 case -NFS4ERR_BADSESSION:
1002 case -NFS4ERR_BADSLOT:
1003 case -NFS4ERR_BAD_HIGH_SLOT:
1004 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1da177e4
LT
1005 goto out_err;
1006 }
fe1d8195
TM
1007 nfs4_put_open_state(state);
1008 goto restart;
1da177e4 1009 }
fe1d8195 1010 spin_unlock(&sp->so_lock);
1da177e4
LT
1011 return 0;
1012out_err:
fe1d8195 1013 nfs4_put_open_state(state);
1da177e4
LT
1014 return status;
1015}
1016
b79a4a1b
TM
1017static void nfs4_clear_open_state(struct nfs4_state *state)
1018{
1019 struct nfs4_lock_state *lock;
1020
1021 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1022 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1023 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1024 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1025 list_for_each_entry(lock, &state->lock_states, ls_locks) {
b79a4a1b
TM
1026 lock->ls_seqid.flags = 0;
1027 lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
1028 }
1029}
1030
1031static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp, int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
cee54fc9
TM
1032{
1033 struct nfs4_state_owner *sp;
9f958ab8 1034 struct rb_node *pos;
cee54fc9 1035 struct nfs4_state *state;
cee54fc9
TM
1036
1037 /* Reset all sequence ids to zero */
9f958ab8
TM
1038 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
1039 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
cee54fc9 1040 sp->so_seqid.flags = 0;
ec073428 1041 spin_lock(&sp->so_lock);
cee54fc9 1042 list_for_each_entry(state, &sp->so_states, open_states) {
b79a4a1b
TM
1043 if (mark_reclaim(clp, state))
1044 nfs4_clear_open_state(state);
cee54fc9 1045 }
ec073428 1046 spin_unlock(&sp->so_lock);
cee54fc9
TM
1047 }
1048}
1049
b79a4a1b
TM
1050static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
1051{
1052 /* Mark all delegations for reclaim */
1053 nfs_delegation_mark_reclaim(clp);
1054 nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot);
1055}
1056
fce5c838
RL
1057static void nfs4_reclaim_complete(struct nfs_client *clp,
1058 const struct nfs4_state_recovery_ops *ops)
1059{
1060 /* Notify the server we're done reclaiming our state */
1061 if (ops->reclaim_complete)
1062 (void)ops->reclaim_complete(clp);
1063}
1064
b79a4a1b
TM
1065static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
1066{
1067 struct nfs4_state_owner *sp;
1068 struct rb_node *pos;
1069 struct nfs4_state *state;
1070
da6ebfe3
RL
1071 nfs4_reclaim_complete(clp,
1072 nfs4_reboot_recovery_ops[clp->cl_minorversion]);
1073
b79a4a1b
TM
1074 if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
1075 return;
1076
1077 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
1078 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
1079 spin_lock(&sp->so_lock);
1080 list_for_each_entry(state, &sp->so_states, open_states) {
1081 if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags))
1082 continue;
1083 nfs4_state_mark_reclaim_nograce(clp, state);
1084 }
1085 spin_unlock(&sp->so_lock);
1086 }
1087
1088 nfs_delegation_reap_unclaimed(clp);
1089}
1090
1091static void nfs_delegation_clear_all(struct nfs_client *clp)
1092{
1093 nfs_delegation_mark_reclaim(clp);
1094 nfs_delegation_reap_unclaimed(clp);
1095}
1096
1097static void nfs4_state_start_reclaim_nograce(struct nfs_client *clp)
1098{
1099 nfs_delegation_clear_all(clp);
1100 nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce);
1101}
1102
4f7cdf18 1103static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
e598d843
TM
1104{
1105 switch (error) {
1106 case -NFS4ERR_CB_PATH_DOWN:
707fb4b3 1107 nfs_handle_cb_pathdown(clp);
4f7cdf18 1108 return 0;
c8b7ae3d
TM
1109 case -NFS4ERR_NO_GRACE:
1110 nfs4_state_end_reclaim_reboot(clp);
1111 return 0;
e598d843
TM
1112 case -NFS4ERR_STALE_CLIENTID:
1113 case -NFS4ERR_LEASE_MOVED:
1114 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
e345e88a 1115 nfs4_state_end_reclaim_reboot(clp);
e598d843
TM
1116 nfs4_state_start_reclaim_reboot(clp);
1117 break;
1118 case -NFS4ERR_EXPIRED:
1119 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1120 nfs4_state_start_reclaim_nograce(clp);
8ba9bf8e 1121 break;
c3fad1b1
AA
1122 case -NFS4ERR_BADSESSION:
1123 case -NFS4ERR_BADSLOT:
1124 case -NFS4ERR_BAD_HIGH_SLOT:
1125 case -NFS4ERR_DEADSESSION:
1126 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1127 case -NFS4ERR_SEQ_FALSE_RETRY:
1128 case -NFS4ERR_SEQ_MISORDERED:
6df08189 1129 set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
0b9e2d41
AA
1130 /* Zero session reset errors */
1131 return 0;
e598d843 1132 }
4f7cdf18 1133 return error;
e598d843
TM
1134}
1135
02860014 1136static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops)
1da177e4 1137{
9f958ab8 1138 struct rb_node *pos;
1da177e4
LT
1139 int status = 0;
1140
7eff03ae
TM
1141restart:
1142 spin_lock(&clp->cl_lock);
02860014
TM
1143 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
1144 struct nfs4_state_owner *sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
7eff03ae
TM
1145 if (!test_and_clear_bit(ops->owner_flag_bit, &sp->so_flags))
1146 continue;
1147 atomic_inc(&sp->so_count);
1148 spin_unlock(&clp->cl_lock);
02860014 1149 status = nfs4_reclaim_open_state(sp, ops);
7eff03ae
TM
1150 if (status < 0) {
1151 set_bit(ops->owner_flag_bit, &sp->so_flags);
1152 nfs4_put_state_owner(sp);
4f7cdf18 1153 return nfs4_recovery_handle_error(clp, status);
7eff03ae
TM
1154 }
1155 nfs4_put_state_owner(sp);
1156 goto restart;
02860014 1157 }
7eff03ae 1158 spin_unlock(&clp->cl_lock);
02860014
TM
1159 return status;
1160}
1161
1162static int nfs4_check_lease(struct nfs_client *clp)
1163{
1164 struct rpc_cred *cred;
8e69514f
BH
1165 struct nfs4_state_maintenance_ops *ops =
1166 nfs4_state_renewal_ops[clp->cl_minorversion];
02860014 1167 int status = -NFS4ERR_EXPIRED;
1da177e4 1168
0f605b56
TM
1169 /* Is the client already known to have an expired lease? */
1170 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1171 return 0;
a7b72103
AA
1172 spin_lock(&clp->cl_lock);
1173 cred = ops->get_state_renewal_cred_locked(clp);
1174 spin_unlock(&clp->cl_lock);
0f605b56
TM
1175 if (cred == NULL) {
1176 cred = nfs4_get_setclientid_cred(clp);
1177 if (cred == NULL)
1178 goto out;
286d7d6a 1179 }
8e69514f 1180 status = ops->renew_lease(clp, cred);
0f605b56
TM
1181 put_rpccred(cred);
1182out:
4f7cdf18 1183 return nfs4_recovery_handle_error(clp, status);
02860014
TM
1184}
1185
1186static int nfs4_reclaim_lease(struct nfs_client *clp)
1187{
1188 struct rpc_cred *cred;
591d71cb
AA
1189 struct nfs4_state_recovery_ops *ops =
1190 nfs4_reboot_recovery_ops[clp->cl_minorversion];
02860014
TM
1191 int status = -ENOENT;
1192
90a16617 1193 cred = ops->get_clid_cred(clp);
286d7d6a 1194 if (cred != NULL) {
591d71cb 1195 status = ops->establish_clid(clp, cred);
286d7d6a 1196 put_rpccred(cred);
a2b2bb88
TM
1197 /* Handle case where the user hasn't set up machine creds */
1198 if (status == -EACCES && cred == clp->cl_machine_cred) {
1199 nfs4_clear_machine_cred(clp);
02860014 1200 status = -EAGAIN;
a2b2bb88 1201 }
c2e713dd
BH
1202 if (status == -NFS4ERR_MINOR_VERS_MISMATCH)
1203 status = -EPROTONOSUPPORT;
286d7d6a 1204 }
02860014
TM
1205 return status;
1206}
1207
76db6d95 1208#ifdef CONFIG_NFS_V4_1
0629e370
AB
1209void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
1210{
1211 if (!flags)
1212 return;
1213 else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED) {
1214 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1215 nfs4_state_start_reclaim_reboot(clp);
1216 nfs4_schedule_state_recovery(clp);
1217 } else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
1218 SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
1219 SEQ4_STATUS_ADMIN_STATE_REVOKED |
1220 SEQ4_STATUS_RECALLABLE_STATE_REVOKED |
1221 SEQ4_STATUS_LEASE_MOVED)) {
1222 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1223 nfs4_state_start_reclaim_nograce(clp);
1224 nfs4_schedule_state_recovery(clp);
1225 } else if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
1226 SEQ4_STATUS_BACKCHANNEL_FAULT |
1227 SEQ4_STATUS_CB_PATH_DOWN_SESSION))
1228 nfs_expire_all_delegations(clp);
1229}
1230
c3fad1b1
AA
1231static void nfs4_session_recovery_handle_error(struct nfs_client *clp, int err)
1232{
1233 switch (err) {
1234 case -NFS4ERR_STALE_CLIENTID:
1235 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
c3fad1b1
AA
1236 }
1237}
1238
1239static int nfs4_reset_session(struct nfs_client *clp)
1240{
ea028ac9
AA
1241 struct nfs4_session *ses = clp->cl_session;
1242 struct nfs4_slot_table *tbl = &ses->fc_slot_table;
c3fad1b1
AA
1243 int status;
1244
ea028ac9 1245 spin_lock(&tbl->slot_tbl_lock);
0556d1a6 1246 set_bit(NFS4CLNT_SESSION_DRAINING, &clp->cl_state);
ea028ac9 1247 if (tbl->highest_used_slotid != -1) {
35dc1d74 1248 INIT_COMPLETION(ses->complete);
ea028ac9
AA
1249 spin_unlock(&tbl->slot_tbl_lock);
1250 status = wait_for_completion_interruptible(&ses->complete);
ea028ac9
AA
1251 if (status) /* -ERESTARTSYS */
1252 goto out;
1253 } else {
1254 spin_unlock(&tbl->slot_tbl_lock);
1255 }
1256
c3fad1b1
AA
1257 status = nfs4_proc_destroy_session(clp->cl_session);
1258 if (status && status != -NFS4ERR_BADSESSION &&
1259 status != -NFS4ERR_DEADSESSION) {
1260 nfs4_session_recovery_handle_error(clp, status);
1261 goto out;
1262 }
1263
1264 memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN);
f26468fb 1265 status = nfs4_proc_create_session(clp);
c3fad1b1
AA
1266 if (status)
1267 nfs4_session_recovery_handle_error(clp, status);
1268 /* fall through*/
1269out:
1270 /* Wake up the next rpc task even on error */
0b9e2d41 1271 clear_bit(NFS4CLNT_SESSION_DRAINING, &clp->cl_state);
35dc1d74 1272 rpc_wake_up(&clp->cl_session->fc_slot_table.slot_tbl_waitq);
9430fb6b
RL
1273 if (status == 0)
1274 nfs41_setup_state_renewal(clp);
c3fad1b1
AA
1275 return status;
1276}
76db6d95 1277
76db6d95 1278#else /* CONFIG_NFS_V4_1 */
c3fad1b1 1279static int nfs4_reset_session(struct nfs_client *clp) { return 0; }
76db6d95
AA
1280#endif /* CONFIG_NFS_V4_1 */
1281
78722e9c
AA
1282/* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors
1283 * on EXCHANGE_ID for v4.1
1284 */
1285static void nfs4_set_lease_expired(struct nfs_client *clp, int status)
1286{
1287 if (nfs4_has_session(clp)) {
1288 switch (status) {
1289 case -NFS4ERR_DELAY:
1290 case -NFS4ERR_CLID_INUSE:
1291 case -EAGAIN:
1292 break;
1293
1294 case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
1295 * in nfs4_exchange_id */
1296 default:
1297 return;
1298 }
1299 }
1300 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1301}
1302
e005e804 1303static void nfs4_state_manager(struct nfs_client *clp)
02860014 1304{
02860014
TM
1305 int status = 0;
1306
02860014 1307 /* Ensure exclusive access to NFSv4 state */
f3c76491 1308 for(;;) {
b79a4a1b
TM
1309 if (test_and_clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) {
1310 /* We're going to have to re-establish a clientid */
1311 status = nfs4_reclaim_lease(clp);
1312 if (status) {
78722e9c 1313 nfs4_set_lease_expired(clp, status);
b6d408ba
TM
1314 if (test_bit(NFS4CLNT_LEASE_EXPIRED,
1315 &clp->cl_state))
b79a4a1b 1316 continue;
76db6d95
AA
1317 if (clp->cl_cons_state ==
1318 NFS_CS_SESSION_INITING)
1319 nfs_mark_client_ready(clp, status);
b79a4a1b
TM
1320 goto out_error;
1321 }
e598d843
TM
1322 clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
1323 }
1324
1325 if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) {
1326 status = nfs4_check_lease(clp);
b6d408ba 1327 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
e598d843 1328 continue;
b6d408ba
TM
1329 if (status < 0 && status != -NFS4ERR_CB_PATH_DOWN)
1330 goto out_error;
b79a4a1b 1331 }
b6d408ba 1332
c3fad1b1 1333 /* Initialize or reset the session */
6df08189 1334 if (test_and_clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state)
7111dc73 1335 && nfs4_has_session(clp)) {
4d643d1d 1336 status = nfs4_reset_session(clp);
b6d408ba
TM
1337 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1338 continue;
1339 if (status < 0)
76db6d95 1340 goto out_error;
76db6d95 1341 }
b6d408ba 1342
b79a4a1b 1343 /* First recover reboot state... */
e345e88a 1344 if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) {
591d71cb
AA
1345 status = nfs4_do_reclaim(clp,
1346 nfs4_reboot_recovery_ops[clp->cl_minorversion]);
b6d408ba 1347 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
6df08189 1348 test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
c3fad1b1 1349 continue;
b79a4a1b 1350 nfs4_state_end_reclaim_reboot(clp);
b6d408ba
TM
1351 if (test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state))
1352 continue;
1353 if (status < 0)
1354 goto out_error;
02860014
TM
1355 }
1356
b79a4a1b
TM
1357 /* Now recover expired state... */
1358 if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
591d71cb
AA
1359 status = nfs4_do_reclaim(clp,
1360 nfs4_nograce_recovery_ops[clp->cl_minorversion]);
b6d408ba 1361 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
6df08189 1362 test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) ||
b6d408ba
TM
1363 test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
1364 continue;
1365 if (status < 0)
b79a4a1b 1366 goto out_error;
1da177e4 1367 }
707fb4b3
TM
1368
1369 if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) {
1370 nfs_client_return_marked_delegations(clp);
1371 continue;
1372 }
e005e804
TM
1373
1374 nfs4_clear_state_manager_bit(clp);
f3c76491
TM
1375 /* Did we race with an attempt to give us more work? */
1376 if (clp->cl_state == 0)
1377 break;
1378 if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
1379 break;
1da177e4 1380 }
e005e804 1381 return;
1da177e4 1382out_error:
e005e804 1383 printk(KERN_WARNING "Error: state manager failed on NFSv4 server %s"
5d8515ca 1384 " with error %d\n", clp->cl_hostname, -status);
e005e804
TM
1385 nfs4_clear_state_manager_bit(clp);
1386}
1387
1388static int nfs4_run_state_manager(void *ptr)
1389{
1390 struct nfs_client *clp = ptr;
1391
1392 allow_signal(SIGKILL);
1393 nfs4_state_manager(clp);
1394 nfs_put_client(clp);
1395 module_put_and_exit(0);
1396 return 0;
1da177e4
LT
1397}
1398
1399/*
1400 * Local variables:
1401 * c-basic-offset: 8
1402 * End:
1403 */
This page took 0.532729 seconds and 5 git commands to generate.