NFS: Use the inode->i_version to cache NFSv4 change attribute information
[deliverable/linux.git] / fs / nfs / delegation.c
1 /*
2 * linux/fs/nfs/delegation.c
3 *
4 * Copyright (C) 2004 Trond Myklebust
5 *
6 * NFS file delegation management
7 *
8 */
9 #include <linux/completion.h>
10 #include <linux/kthread.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15
16 #include <linux/nfs4.h>
17 #include <linux/nfs_fs.h>
18 #include <linux/nfs_xdr.h>
19
20 #include "nfs4_fs.h"
21 #include "delegation.h"
22 #include "internal.h"
23
24 static void nfs_free_delegation(struct nfs_delegation *delegation)
25 {
26 if (delegation->cred) {
27 put_rpccred(delegation->cred);
28 delegation->cred = NULL;
29 }
30 kfree_rcu(delegation, rcu);
31 }
32
33 /**
34 * nfs_mark_delegation_referenced - set delegation's REFERENCED flag
35 * @delegation: delegation to process
36 *
37 */
38 void nfs_mark_delegation_referenced(struct nfs_delegation *delegation)
39 {
40 set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags);
41 }
42
43 /**
44 * nfs_have_delegation - check if inode has a delegation
45 * @inode: inode to check
46 * @flags: delegation types to check for
47 *
48 * Returns one if inode has the indicated delegation, otherwise zero.
49 */
50 int nfs_have_delegation(struct inode *inode, fmode_t flags)
51 {
52 struct nfs_delegation *delegation;
53 int ret = 0;
54
55 flags &= FMODE_READ|FMODE_WRITE;
56 rcu_read_lock();
57 delegation = rcu_dereference(NFS_I(inode)->delegation);
58 if (delegation != NULL && (delegation->type & flags) == flags) {
59 nfs_mark_delegation_referenced(delegation);
60 ret = 1;
61 }
62 rcu_read_unlock();
63 return ret;
64 }
65
66 static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state)
67 {
68 struct inode *inode = state->inode;
69 struct file_lock *fl;
70 int status = 0;
71
72 if (inode->i_flock == NULL)
73 goto out;
74
75 /* Protect inode->i_flock using the file locks lock */
76 lock_flocks();
77 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
78 if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
79 continue;
80 if (nfs_file_open_context(fl->fl_file) != ctx)
81 continue;
82 unlock_flocks();
83 status = nfs4_lock_delegation_recall(state, fl);
84 if (status < 0)
85 goto out;
86 lock_flocks();
87 }
88 unlock_flocks();
89 out:
90 return status;
91 }
92
93 static int nfs_delegation_claim_opens(struct inode *inode, const nfs4_stateid *stateid)
94 {
95 struct nfs_inode *nfsi = NFS_I(inode);
96 struct nfs_open_context *ctx;
97 struct nfs4_state *state;
98 int err;
99
100 again:
101 spin_lock(&inode->i_lock);
102 list_for_each_entry(ctx, &nfsi->open_files, list) {
103 state = ctx->state;
104 if (state == NULL)
105 continue;
106 if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
107 continue;
108 if (memcmp(state->stateid.data, stateid->data, sizeof(state->stateid.data)) != 0)
109 continue;
110 get_nfs_open_context(ctx);
111 spin_unlock(&inode->i_lock);
112 err = nfs4_open_delegation_recall(ctx, state, stateid);
113 if (err >= 0)
114 err = nfs_delegation_claim_locks(ctx, state);
115 put_nfs_open_context(ctx);
116 if (err != 0)
117 return err;
118 goto again;
119 }
120 spin_unlock(&inode->i_lock);
121 return 0;
122 }
123
124 /**
125 * nfs_inode_reclaim_delegation - process a delegation reclaim request
126 * @inode: inode to process
127 * @cred: credential to use for request
128 * @res: new delegation state from server
129 *
130 */
131 void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred,
132 struct nfs_openres *res)
133 {
134 struct nfs_delegation *delegation;
135 struct rpc_cred *oldcred = NULL;
136
137 rcu_read_lock();
138 delegation = rcu_dereference(NFS_I(inode)->delegation);
139 if (delegation != NULL) {
140 spin_lock(&delegation->lock);
141 if (delegation->inode != NULL) {
142 memcpy(delegation->stateid.data, res->delegation.data,
143 sizeof(delegation->stateid.data));
144 delegation->type = res->delegation_type;
145 delegation->maxsize = res->maxsize;
146 oldcred = delegation->cred;
147 delegation->cred = get_rpccred(cred);
148 clear_bit(NFS_DELEGATION_NEED_RECLAIM,
149 &delegation->flags);
150 NFS_I(inode)->delegation_state = delegation->type;
151 spin_unlock(&delegation->lock);
152 put_rpccred(oldcred);
153 rcu_read_unlock();
154 } else {
155 /* We appear to have raced with a delegation return. */
156 spin_unlock(&delegation->lock);
157 rcu_read_unlock();
158 nfs_inode_set_delegation(inode, cred, res);
159 }
160 } else {
161 rcu_read_unlock();
162 }
163 }
164
165 static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync)
166 {
167 int res = 0;
168
169 res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid, issync);
170 nfs_free_delegation(delegation);
171 return res;
172 }
173
174 static struct inode *nfs_delegation_grab_inode(struct nfs_delegation *delegation)
175 {
176 struct inode *inode = NULL;
177
178 spin_lock(&delegation->lock);
179 if (delegation->inode != NULL)
180 inode = igrab(delegation->inode);
181 spin_unlock(&delegation->lock);
182 return inode;
183 }
184
185 static struct nfs_delegation *
186 nfs_detach_delegation_locked(struct nfs_inode *nfsi,
187 struct nfs_server *server)
188 {
189 struct nfs_delegation *delegation =
190 rcu_dereference_protected(nfsi->delegation,
191 lockdep_is_held(&server->nfs_client->cl_lock));
192
193 if (delegation == NULL)
194 goto nomatch;
195
196 spin_lock(&delegation->lock);
197 list_del_rcu(&delegation->super_list);
198 delegation->inode = NULL;
199 nfsi->delegation_state = 0;
200 rcu_assign_pointer(nfsi->delegation, NULL);
201 spin_unlock(&delegation->lock);
202 return delegation;
203 nomatch:
204 return NULL;
205 }
206
207 static struct nfs_delegation *nfs_detach_delegation(struct nfs_inode *nfsi,
208 struct nfs_server *server)
209 {
210 struct nfs_client *clp = server->nfs_client;
211 struct nfs_delegation *delegation;
212
213 spin_lock(&clp->cl_lock);
214 delegation = nfs_detach_delegation_locked(nfsi, server);
215 spin_unlock(&clp->cl_lock);
216 return delegation;
217 }
218
219 /**
220 * nfs_inode_set_delegation - set up a delegation on an inode
221 * @inode: inode to which delegation applies
222 * @cred: cred to use for subsequent delegation processing
223 * @res: new delegation state from server
224 *
225 * Returns zero on success, or a negative errno value.
226 */
227 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
228 {
229 struct nfs_server *server = NFS_SERVER(inode);
230 struct nfs_client *clp = server->nfs_client;
231 struct nfs_inode *nfsi = NFS_I(inode);
232 struct nfs_delegation *delegation, *old_delegation;
233 struct nfs_delegation *freeme = NULL;
234 int status = 0;
235
236 delegation = kmalloc(sizeof(*delegation), GFP_NOFS);
237 if (delegation == NULL)
238 return -ENOMEM;
239 memcpy(delegation->stateid.data, res->delegation.data,
240 sizeof(delegation->stateid.data));
241 delegation->type = res->delegation_type;
242 delegation->maxsize = res->maxsize;
243 delegation->change_attr = inode->i_version;
244 delegation->cred = get_rpccred(cred);
245 delegation->inode = inode;
246 delegation->flags = 1<<NFS_DELEGATION_REFERENCED;
247 spin_lock_init(&delegation->lock);
248
249 spin_lock(&clp->cl_lock);
250 old_delegation = rcu_dereference_protected(nfsi->delegation,
251 lockdep_is_held(&clp->cl_lock));
252 if (old_delegation != NULL) {
253 if (memcmp(&delegation->stateid, &old_delegation->stateid,
254 sizeof(old_delegation->stateid)) == 0 &&
255 delegation->type == old_delegation->type) {
256 goto out;
257 }
258 /*
259 * Deal with broken servers that hand out two
260 * delegations for the same file.
261 */
262 dfprintk(FILE, "%s: server %s handed out "
263 "a duplicate delegation!\n",
264 __func__, clp->cl_hostname);
265 if (delegation->type <= old_delegation->type) {
266 freeme = delegation;
267 delegation = NULL;
268 goto out;
269 }
270 freeme = nfs_detach_delegation_locked(nfsi, server);
271 }
272 list_add_rcu(&delegation->super_list, &server->delegations);
273 nfsi->delegation_state = delegation->type;
274 rcu_assign_pointer(nfsi->delegation, delegation);
275 delegation = NULL;
276
277 /* Ensure we revalidate the attributes and page cache! */
278 spin_lock(&inode->i_lock);
279 nfsi->cache_validity |= NFS_INO_REVAL_FORCED;
280 spin_unlock(&inode->i_lock);
281
282 out:
283 spin_unlock(&clp->cl_lock);
284 if (delegation != NULL)
285 nfs_free_delegation(delegation);
286 if (freeme != NULL)
287 nfs_do_return_delegation(inode, freeme, 0);
288 return status;
289 }
290
291 /*
292 * Basic procedure for returning a delegation to the server
293 */
294 static int __nfs_inode_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync)
295 {
296 struct nfs_inode *nfsi = NFS_I(inode);
297 int err;
298
299 /*
300 * Guard against new delegated open/lock/unlock calls and against
301 * state recovery
302 */
303 down_write(&nfsi->rwsem);
304 err = nfs_delegation_claim_opens(inode, &delegation->stateid);
305 up_write(&nfsi->rwsem);
306 if (err)
307 goto out;
308
309 err = nfs_do_return_delegation(inode, delegation, issync);
310 out:
311 return err;
312 }
313
314 /**
315 * nfs_client_return_marked_delegations - return previously marked delegations
316 * @clp: nfs_client to process
317 *
318 * Returns zero on success, or a negative errno value.
319 */
320 int nfs_client_return_marked_delegations(struct nfs_client *clp)
321 {
322 struct nfs_delegation *delegation;
323 struct nfs_server *server;
324 struct inode *inode;
325 int err = 0;
326
327 restart:
328 rcu_read_lock();
329 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
330 list_for_each_entry_rcu(delegation, &server->delegations,
331 super_list) {
332 if (!test_and_clear_bit(NFS_DELEGATION_RETURN,
333 &delegation->flags))
334 continue;
335 inode = nfs_delegation_grab_inode(delegation);
336 if (inode == NULL)
337 continue;
338 delegation = nfs_detach_delegation(NFS_I(inode),
339 server);
340 rcu_read_unlock();
341
342 if (delegation != NULL) {
343 filemap_flush(inode->i_mapping);
344 err = __nfs_inode_return_delegation(inode,
345 delegation, 0);
346 }
347 iput(inode);
348 if (!err)
349 goto restart;
350 set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
351 return err;
352 }
353 }
354 rcu_read_unlock();
355 return 0;
356 }
357
358 /**
359 * nfs_inode_return_delegation_noreclaim - return delegation, don't reclaim opens
360 * @inode: inode to process
361 *
362 * Does not protect against delegation reclaims, therefore really only safe
363 * to be called from nfs4_clear_inode().
364 */
365 void nfs_inode_return_delegation_noreclaim(struct inode *inode)
366 {
367 struct nfs_server *server = NFS_SERVER(inode);
368 struct nfs_inode *nfsi = NFS_I(inode);
369 struct nfs_delegation *delegation;
370
371 if (rcu_access_pointer(nfsi->delegation) != NULL) {
372 delegation = nfs_detach_delegation(nfsi, server);
373 if (delegation != NULL)
374 nfs_do_return_delegation(inode, delegation, 0);
375 }
376 }
377
378 /**
379 * nfs_inode_return_delegation - synchronously return a delegation
380 * @inode: inode to process
381 *
382 * Returns zero on success, or a negative errno value.
383 */
384 int nfs_inode_return_delegation(struct inode *inode)
385 {
386 struct nfs_server *server = NFS_SERVER(inode);
387 struct nfs_inode *nfsi = NFS_I(inode);
388 struct nfs_delegation *delegation;
389 int err = 0;
390
391 if (rcu_access_pointer(nfsi->delegation) != NULL) {
392 delegation = nfs_detach_delegation(nfsi, server);
393 if (delegation != NULL) {
394 nfs_wb_all(inode);
395 err = __nfs_inode_return_delegation(inode, delegation, 1);
396 }
397 }
398 return err;
399 }
400
401 static void nfs_mark_return_delegation(struct nfs_server *server,
402 struct nfs_delegation *delegation)
403 {
404 set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
405 set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
406 }
407
408 /**
409 * nfs_super_return_all_delegations - return delegations for one superblock
410 * @sb: sb to process
411 *
412 */
413 void nfs_super_return_all_delegations(struct super_block *sb)
414 {
415 struct nfs_server *server = NFS_SB(sb);
416 struct nfs_client *clp = server->nfs_client;
417 struct nfs_delegation *delegation;
418
419 if (clp == NULL)
420 return;
421
422 rcu_read_lock();
423 list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
424 spin_lock(&delegation->lock);
425 set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
426 spin_unlock(&delegation->lock);
427 }
428 rcu_read_unlock();
429
430 if (nfs_client_return_marked_delegations(clp) != 0)
431 nfs4_schedule_state_manager(clp);
432 }
433
434 static void nfs_mark_return_all_delegation_types(struct nfs_server *server,
435 fmode_t flags)
436 {
437 struct nfs_delegation *delegation;
438
439 list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
440 if ((delegation->type == (FMODE_READ|FMODE_WRITE)) && !(flags & FMODE_WRITE))
441 continue;
442 if (delegation->type & flags)
443 nfs_mark_return_delegation(server, delegation);
444 }
445 }
446
447 static void nfs_client_mark_return_all_delegation_types(struct nfs_client *clp,
448 fmode_t flags)
449 {
450 struct nfs_server *server;
451
452 rcu_read_lock();
453 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
454 nfs_mark_return_all_delegation_types(server, flags);
455 rcu_read_unlock();
456 }
457
458 static void nfs_client_mark_return_all_delegations(struct nfs_client *clp)
459 {
460 nfs_client_mark_return_all_delegation_types(clp, FMODE_READ|FMODE_WRITE);
461 }
462
463 static void nfs_delegation_run_state_manager(struct nfs_client *clp)
464 {
465 if (test_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state))
466 nfs4_schedule_state_manager(clp);
467 }
468
469 /**
470 * nfs_expire_all_delegation_types
471 * @clp: client to process
472 * @flags: delegation types to expire
473 *
474 */
475 void nfs_expire_all_delegation_types(struct nfs_client *clp, fmode_t flags)
476 {
477 nfs_client_mark_return_all_delegation_types(clp, flags);
478 nfs_delegation_run_state_manager(clp);
479 }
480
481 /**
482 * nfs_expire_all_delegations
483 * @clp: client to process
484 *
485 */
486 void nfs_expire_all_delegations(struct nfs_client *clp)
487 {
488 nfs_expire_all_delegation_types(clp, FMODE_READ|FMODE_WRITE);
489 }
490
491 /**
492 * nfs_handle_cb_pathdown - return all delegations after NFS4ERR_CB_PATH_DOWN
493 * @clp: client to process
494 *
495 */
496 void nfs_handle_cb_pathdown(struct nfs_client *clp)
497 {
498 if (clp == NULL)
499 return;
500 nfs_client_mark_return_all_delegations(clp);
501 }
502
503 static void nfs_mark_return_unreferenced_delegations(struct nfs_server *server)
504 {
505 struct nfs_delegation *delegation;
506
507 list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
508 if (test_and_clear_bit(NFS_DELEGATION_REFERENCED, &delegation->flags))
509 continue;
510 nfs_mark_return_delegation(server, delegation);
511 }
512 }
513
514 /**
515 * nfs_expire_unreferenced_delegations - Eliminate unused delegations
516 * @clp: nfs_client to process
517 *
518 */
519 void nfs_expire_unreferenced_delegations(struct nfs_client *clp)
520 {
521 struct nfs_server *server;
522
523 rcu_read_lock();
524 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
525 nfs_mark_return_unreferenced_delegations(server);
526 rcu_read_unlock();
527
528 nfs_delegation_run_state_manager(clp);
529 }
530
531 /**
532 * nfs_async_inode_return_delegation - asynchronously return a delegation
533 * @inode: inode to process
534 * @stateid: state ID information from CB_RECALL arguments
535 *
536 * Returns zero on success, or a negative errno value.
537 */
538 int nfs_async_inode_return_delegation(struct inode *inode,
539 const nfs4_stateid *stateid)
540 {
541 struct nfs_server *server = NFS_SERVER(inode);
542 struct nfs_client *clp = server->nfs_client;
543 struct nfs_delegation *delegation;
544
545 rcu_read_lock();
546 delegation = rcu_dereference(NFS_I(inode)->delegation);
547
548 if (!clp->cl_mvops->validate_stateid(delegation, stateid)) {
549 rcu_read_unlock();
550 return -ENOENT;
551 }
552 nfs_mark_return_delegation(server, delegation);
553 rcu_read_unlock();
554
555 nfs_delegation_run_state_manager(clp);
556 return 0;
557 }
558
559 static struct inode *
560 nfs_delegation_find_inode_server(struct nfs_server *server,
561 const struct nfs_fh *fhandle)
562 {
563 struct nfs_delegation *delegation;
564 struct inode *res = NULL;
565
566 list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
567 spin_lock(&delegation->lock);
568 if (delegation->inode != NULL &&
569 nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
570 res = igrab(delegation->inode);
571 }
572 spin_unlock(&delegation->lock);
573 if (res != NULL)
574 break;
575 }
576 return res;
577 }
578
579 /**
580 * nfs_delegation_find_inode - retrieve the inode associated with a delegation
581 * @clp: client state handle
582 * @fhandle: filehandle from a delegation recall
583 *
584 * Returns pointer to inode matching "fhandle," or NULL if a matching inode
585 * cannot be found.
586 */
587 struct inode *nfs_delegation_find_inode(struct nfs_client *clp,
588 const struct nfs_fh *fhandle)
589 {
590 struct nfs_server *server;
591 struct inode *res = NULL;
592
593 rcu_read_lock();
594 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
595 res = nfs_delegation_find_inode_server(server, fhandle);
596 if (res != NULL)
597 break;
598 }
599 rcu_read_unlock();
600 return res;
601 }
602
603 static void nfs_delegation_mark_reclaim_server(struct nfs_server *server)
604 {
605 struct nfs_delegation *delegation;
606
607 list_for_each_entry_rcu(delegation, &server->delegations, super_list)
608 set_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
609 }
610
611 /**
612 * nfs_delegation_mark_reclaim - mark all delegations as needing to be reclaimed
613 * @clp: nfs_client to process
614 *
615 */
616 void nfs_delegation_mark_reclaim(struct nfs_client *clp)
617 {
618 struct nfs_server *server;
619
620 rcu_read_lock();
621 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
622 nfs_delegation_mark_reclaim_server(server);
623 rcu_read_unlock();
624 }
625
626 /**
627 * nfs_delegation_reap_unclaimed - reap unclaimed delegations after reboot recovery is done
628 * @clp: nfs_client to process
629 *
630 */
631 void nfs_delegation_reap_unclaimed(struct nfs_client *clp)
632 {
633 struct nfs_delegation *delegation;
634 struct nfs_server *server;
635 struct inode *inode;
636
637 restart:
638 rcu_read_lock();
639 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
640 list_for_each_entry_rcu(delegation, &server->delegations,
641 super_list) {
642 if (test_bit(NFS_DELEGATION_NEED_RECLAIM,
643 &delegation->flags) == 0)
644 continue;
645 inode = nfs_delegation_grab_inode(delegation);
646 if (inode == NULL)
647 continue;
648 delegation = nfs_detach_delegation(NFS_I(inode),
649 server);
650 rcu_read_unlock();
651
652 if (delegation != NULL)
653 nfs_free_delegation(delegation);
654 iput(inode);
655 goto restart;
656 }
657 }
658 rcu_read_unlock();
659 }
660
661 /**
662 * nfs_delegations_present - check for existence of delegations
663 * @clp: client state handle
664 *
665 * Returns one if there are any nfs_delegation structures attached
666 * to this nfs_client.
667 */
668 int nfs_delegations_present(struct nfs_client *clp)
669 {
670 struct nfs_server *server;
671 int ret = 0;
672
673 rcu_read_lock();
674 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
675 if (!list_empty(&server->delegations)) {
676 ret = 1;
677 break;
678 }
679 rcu_read_unlock();
680 return ret;
681 }
682
683 /**
684 * nfs4_copy_delegation_stateid - Copy inode's state ID information
685 * @dst: stateid data structure to fill in
686 * @inode: inode to check
687 *
688 * Returns one and fills in "dst->data" * if inode had a delegation,
689 * otherwise zero is returned.
690 */
691 int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode)
692 {
693 struct nfs_inode *nfsi = NFS_I(inode);
694 struct nfs_delegation *delegation;
695 int ret = 0;
696
697 rcu_read_lock();
698 delegation = rcu_dereference(nfsi->delegation);
699 if (delegation != NULL) {
700 memcpy(dst->data, delegation->stateid.data, sizeof(dst->data));
701 ret = 1;
702 }
703 rcu_read_unlock();
704 return ret;
705 }
This page took 0.045571 seconds and 5 git commands to generate.