Merge branch 'for-1209' of git://gitorious.org/smack-next/kernel into next
[deliverable/linux.git] / fs / nfs / delegation.c
1 /*
2 * linux/fs/nfs/delegation.c
3 *
4 * Copyright (C) 2004 Trond Myklebust
5 *
6 * NFS file delegation management
7 *
8 */
9 #include <linux/completion.h>
10 #include <linux/kthread.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15
16 #include <linux/nfs4.h>
17 #include <linux/nfs_fs.h>
18 #include <linux/nfs_xdr.h>
19
20 #include "nfs4_fs.h"
21 #include "delegation.h"
22 #include "internal.h"
23
24 static void nfs_free_delegation(struct nfs_delegation *delegation)
25 {
26 if (delegation->cred) {
27 put_rpccred(delegation->cred);
28 delegation->cred = NULL;
29 }
30 kfree_rcu(delegation, rcu);
31 }
32
33 /**
34 * nfs_mark_delegation_referenced - set delegation's REFERENCED flag
35 * @delegation: delegation to process
36 *
37 */
38 void nfs_mark_delegation_referenced(struct nfs_delegation *delegation)
39 {
40 set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags);
41 }
42
43 /**
44 * nfs_have_delegation - check if inode has a delegation
45 * @inode: inode to check
46 * @flags: delegation types to check for
47 *
48 * Returns one if inode has the indicated delegation, otherwise zero.
49 */
50 int nfs4_have_delegation(struct inode *inode, fmode_t flags)
51 {
52 struct nfs_delegation *delegation;
53 int ret = 0;
54
55 flags &= FMODE_READ|FMODE_WRITE;
56 rcu_read_lock();
57 delegation = rcu_dereference(NFS_I(inode)->delegation);
58 if (delegation != NULL && (delegation->type & flags) == flags) {
59 nfs_mark_delegation_referenced(delegation);
60 ret = 1;
61 }
62 rcu_read_unlock();
63 return ret;
64 }
65
66 static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state)
67 {
68 struct inode *inode = state->inode;
69 struct file_lock *fl;
70 int status = 0;
71
72 if (inode->i_flock == NULL)
73 goto out;
74
75 /* Protect inode->i_flock using the file locks lock */
76 lock_flocks();
77 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
78 if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
79 continue;
80 if (nfs_file_open_context(fl->fl_file) != ctx)
81 continue;
82 unlock_flocks();
83 status = nfs4_lock_delegation_recall(state, fl);
84 if (status < 0)
85 goto out;
86 lock_flocks();
87 }
88 unlock_flocks();
89 out:
90 return status;
91 }
92
93 static int nfs_delegation_claim_opens(struct inode *inode, const nfs4_stateid *stateid)
94 {
95 struct nfs_inode *nfsi = NFS_I(inode);
96 struct nfs_open_context *ctx;
97 struct nfs4_state *state;
98 int err;
99
100 again:
101 spin_lock(&inode->i_lock);
102 list_for_each_entry(ctx, &nfsi->open_files, list) {
103 state = ctx->state;
104 if (state == NULL)
105 continue;
106 if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
107 continue;
108 if (!nfs4_stateid_match(&state->stateid, stateid))
109 continue;
110 get_nfs_open_context(ctx);
111 spin_unlock(&inode->i_lock);
112 err = nfs4_open_delegation_recall(ctx, state, stateid);
113 if (err >= 0)
114 err = nfs_delegation_claim_locks(ctx, state);
115 put_nfs_open_context(ctx);
116 if (err != 0)
117 return err;
118 goto again;
119 }
120 spin_unlock(&inode->i_lock);
121 return 0;
122 }
123
124 /**
125 * nfs_inode_reclaim_delegation - process a delegation reclaim request
126 * @inode: inode to process
127 * @cred: credential to use for request
128 * @res: new delegation state from server
129 *
130 */
131 void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred,
132 struct nfs_openres *res)
133 {
134 struct nfs_delegation *delegation;
135 struct rpc_cred *oldcred = NULL;
136
137 rcu_read_lock();
138 delegation = rcu_dereference(NFS_I(inode)->delegation);
139 if (delegation != NULL) {
140 spin_lock(&delegation->lock);
141 if (delegation->inode != NULL) {
142 nfs4_stateid_copy(&delegation->stateid, &res->delegation);
143 delegation->type = res->delegation_type;
144 delegation->maxsize = res->maxsize;
145 oldcred = delegation->cred;
146 delegation->cred = get_rpccred(cred);
147 clear_bit(NFS_DELEGATION_NEED_RECLAIM,
148 &delegation->flags);
149 NFS_I(inode)->delegation_state = delegation->type;
150 spin_unlock(&delegation->lock);
151 put_rpccred(oldcred);
152 rcu_read_unlock();
153 } else {
154 /* We appear to have raced with a delegation return. */
155 spin_unlock(&delegation->lock);
156 rcu_read_unlock();
157 nfs_inode_set_delegation(inode, cred, res);
158 }
159 } else {
160 rcu_read_unlock();
161 }
162 }
163
164 static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync)
165 {
166 int res = 0;
167
168 res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid, issync);
169 nfs_free_delegation(delegation);
170 return res;
171 }
172
173 static struct inode *nfs_delegation_grab_inode(struct nfs_delegation *delegation)
174 {
175 struct inode *inode = NULL;
176
177 spin_lock(&delegation->lock);
178 if (delegation->inode != NULL)
179 inode = igrab(delegation->inode);
180 spin_unlock(&delegation->lock);
181 return inode;
182 }
183
184 static struct nfs_delegation *
185 nfs_detach_delegation_locked(struct nfs_inode *nfsi,
186 struct nfs_server *server)
187 {
188 struct nfs_delegation *delegation =
189 rcu_dereference_protected(nfsi->delegation,
190 lockdep_is_held(&server->nfs_client->cl_lock));
191
192 if (delegation == NULL)
193 goto nomatch;
194
195 spin_lock(&delegation->lock);
196 list_del_rcu(&delegation->super_list);
197 delegation->inode = NULL;
198 nfsi->delegation_state = 0;
199 rcu_assign_pointer(nfsi->delegation, NULL);
200 spin_unlock(&delegation->lock);
201 return delegation;
202 nomatch:
203 return NULL;
204 }
205
206 static struct nfs_delegation *nfs_detach_delegation(struct nfs_inode *nfsi,
207 struct nfs_server *server)
208 {
209 struct nfs_client *clp = server->nfs_client;
210 struct nfs_delegation *delegation;
211
212 spin_lock(&clp->cl_lock);
213 delegation = nfs_detach_delegation_locked(nfsi, server);
214 spin_unlock(&clp->cl_lock);
215 return delegation;
216 }
217
218 /**
219 * nfs_inode_set_delegation - set up a delegation on an inode
220 * @inode: inode to which delegation applies
221 * @cred: cred to use for subsequent delegation processing
222 * @res: new delegation state from server
223 *
224 * Returns zero on success, or a negative errno value.
225 */
226 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
227 {
228 struct nfs_server *server = NFS_SERVER(inode);
229 struct nfs_client *clp = server->nfs_client;
230 struct nfs_inode *nfsi = NFS_I(inode);
231 struct nfs_delegation *delegation, *old_delegation;
232 struct nfs_delegation *freeme = NULL;
233 int status = 0;
234
235 delegation = kmalloc(sizeof(*delegation), GFP_NOFS);
236 if (delegation == NULL)
237 return -ENOMEM;
238 nfs4_stateid_copy(&delegation->stateid, &res->delegation);
239 delegation->type = res->delegation_type;
240 delegation->maxsize = res->maxsize;
241 delegation->change_attr = inode->i_version;
242 delegation->cred = get_rpccred(cred);
243 delegation->inode = inode;
244 delegation->flags = 1<<NFS_DELEGATION_REFERENCED;
245 spin_lock_init(&delegation->lock);
246
247 spin_lock(&clp->cl_lock);
248 old_delegation = rcu_dereference_protected(nfsi->delegation,
249 lockdep_is_held(&clp->cl_lock));
250 if (old_delegation != NULL) {
251 if (nfs4_stateid_match(&delegation->stateid,
252 &old_delegation->stateid) &&
253 delegation->type == old_delegation->type) {
254 goto out;
255 }
256 /*
257 * Deal with broken servers that hand out two
258 * delegations for the same file.
259 * Allow for upgrades to a WRITE delegation, but
260 * nothing else.
261 */
262 dfprintk(FILE, "%s: server %s handed out "
263 "a duplicate delegation!\n",
264 __func__, clp->cl_hostname);
265 if (delegation->type == old_delegation->type ||
266 !(delegation->type & FMODE_WRITE)) {
267 freeme = delegation;
268 delegation = NULL;
269 goto out;
270 }
271 freeme = nfs_detach_delegation_locked(nfsi, server);
272 }
273 list_add_rcu(&delegation->super_list, &server->delegations);
274 nfsi->delegation_state = delegation->type;
275 rcu_assign_pointer(nfsi->delegation, delegation);
276 delegation = NULL;
277
278 /* Ensure we revalidate the attributes and page cache! */
279 spin_lock(&inode->i_lock);
280 nfsi->cache_validity |= NFS_INO_REVAL_FORCED;
281 spin_unlock(&inode->i_lock);
282
283 out:
284 spin_unlock(&clp->cl_lock);
285 if (delegation != NULL)
286 nfs_free_delegation(delegation);
287 if (freeme != NULL)
288 nfs_do_return_delegation(inode, freeme, 0);
289 return status;
290 }
291
292 /*
293 * Basic procedure for returning a delegation to the server
294 */
295 static int __nfs_inode_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync)
296 {
297 struct nfs_inode *nfsi = NFS_I(inode);
298 int err;
299
300 /*
301 * Guard against new delegated open/lock/unlock calls and against
302 * state recovery
303 */
304 down_write(&nfsi->rwsem);
305 err = nfs_delegation_claim_opens(inode, &delegation->stateid);
306 up_write(&nfsi->rwsem);
307 if (err)
308 goto out;
309
310 err = nfs_do_return_delegation(inode, delegation, issync);
311 out:
312 return err;
313 }
314
315 /**
316 * nfs_client_return_marked_delegations - return previously marked delegations
317 * @clp: nfs_client to process
318 *
319 * Note that this function is designed to be called by the state
320 * manager thread. For this reason, it cannot flush the dirty data,
321 * since that could deadlock in case of a state recovery error.
322 *
323 * Returns zero on success, or a negative errno value.
324 */
325 int nfs_client_return_marked_delegations(struct nfs_client *clp)
326 {
327 struct nfs_delegation *delegation;
328 struct nfs_server *server;
329 struct inode *inode;
330 int err = 0;
331
332 restart:
333 rcu_read_lock();
334 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
335 list_for_each_entry_rcu(delegation, &server->delegations,
336 super_list) {
337 if (!test_and_clear_bit(NFS_DELEGATION_RETURN,
338 &delegation->flags))
339 continue;
340 inode = nfs_delegation_grab_inode(delegation);
341 if (inode == NULL)
342 continue;
343 delegation = nfs_detach_delegation(NFS_I(inode),
344 server);
345 rcu_read_unlock();
346
347 if (delegation != NULL)
348 err = __nfs_inode_return_delegation(inode,
349 delegation, 0);
350 iput(inode);
351 if (!err)
352 goto restart;
353 set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
354 return err;
355 }
356 }
357 rcu_read_unlock();
358 return 0;
359 }
360
361 /**
362 * nfs_inode_return_delegation_noreclaim - return delegation, don't reclaim opens
363 * @inode: inode to process
364 *
365 * Does not protect against delegation reclaims, therefore really only safe
366 * to be called from nfs4_clear_inode().
367 */
368 void nfs_inode_return_delegation_noreclaim(struct inode *inode)
369 {
370 struct nfs_server *server = NFS_SERVER(inode);
371 struct nfs_inode *nfsi = NFS_I(inode);
372 struct nfs_delegation *delegation;
373
374 if (rcu_access_pointer(nfsi->delegation) != NULL) {
375 delegation = nfs_detach_delegation(nfsi, server);
376 if (delegation != NULL)
377 nfs_do_return_delegation(inode, delegation, 0);
378 }
379 }
380
381 /**
382 * nfs_inode_return_delegation - synchronously return a delegation
383 * @inode: inode to process
384 *
385 * This routine will always flush any dirty data to disk on the
386 * assumption that if we need to return the delegation, then
387 * we should stop caching.
388 *
389 * Returns zero on success, or a negative errno value.
390 */
391 int nfs4_inode_return_delegation(struct inode *inode)
392 {
393 struct nfs_server *server = NFS_SERVER(inode);
394 struct nfs_inode *nfsi = NFS_I(inode);
395 struct nfs_delegation *delegation;
396 int err = 0;
397
398 nfs_wb_all(inode);
399 if (rcu_access_pointer(nfsi->delegation) != NULL) {
400 delegation = nfs_detach_delegation(nfsi, server);
401 if (delegation != NULL) {
402 err = __nfs_inode_return_delegation(inode, delegation, 1);
403 }
404 }
405 return err;
406 }
407
408 static void nfs_mark_return_delegation(struct nfs_server *server,
409 struct nfs_delegation *delegation)
410 {
411 set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
412 set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
413 }
414
415 /**
416 * nfs_super_return_all_delegations - return delegations for one superblock
417 * @sb: sb to process
418 *
419 */
420 void nfs_server_return_all_delegations(struct nfs_server *server)
421 {
422 struct nfs_client *clp = server->nfs_client;
423 struct nfs_delegation *delegation;
424
425 if (clp == NULL)
426 return;
427
428 rcu_read_lock();
429 list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
430 spin_lock(&delegation->lock);
431 set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
432 spin_unlock(&delegation->lock);
433 }
434 rcu_read_unlock();
435
436 if (nfs_client_return_marked_delegations(clp) != 0)
437 nfs4_schedule_state_manager(clp);
438 }
439
440 static void nfs_mark_return_all_delegation_types(struct nfs_server *server,
441 fmode_t flags)
442 {
443 struct nfs_delegation *delegation;
444
445 list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
446 if ((delegation->type == (FMODE_READ|FMODE_WRITE)) && !(flags & FMODE_WRITE))
447 continue;
448 if (delegation->type & flags)
449 nfs_mark_return_delegation(server, delegation);
450 }
451 }
452
453 static void nfs_client_mark_return_all_delegation_types(struct nfs_client *clp,
454 fmode_t flags)
455 {
456 struct nfs_server *server;
457
458 rcu_read_lock();
459 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
460 nfs_mark_return_all_delegation_types(server, flags);
461 rcu_read_unlock();
462 }
463
464 static void nfs_delegation_run_state_manager(struct nfs_client *clp)
465 {
466 if (test_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state))
467 nfs4_schedule_state_manager(clp);
468 }
469
470 void nfs_remove_bad_delegation(struct inode *inode)
471 {
472 struct nfs_delegation *delegation;
473
474 delegation = nfs_detach_delegation(NFS_I(inode), NFS_SERVER(inode));
475 if (delegation) {
476 nfs_inode_find_state_and_recover(inode, &delegation->stateid);
477 nfs_free_delegation(delegation);
478 }
479 }
480 EXPORT_SYMBOL_GPL(nfs_remove_bad_delegation);
481
482 /**
483 * nfs_expire_all_delegation_types
484 * @clp: client to process
485 * @flags: delegation types to expire
486 *
487 */
488 void nfs_expire_all_delegation_types(struct nfs_client *clp, fmode_t flags)
489 {
490 nfs_client_mark_return_all_delegation_types(clp, flags);
491 nfs_delegation_run_state_manager(clp);
492 }
493
494 /**
495 * nfs_expire_all_delegations
496 * @clp: client to process
497 *
498 */
499 void nfs_expire_all_delegations(struct nfs_client *clp)
500 {
501 nfs_expire_all_delegation_types(clp, FMODE_READ|FMODE_WRITE);
502 }
503
504 static void nfs_mark_return_unreferenced_delegations(struct nfs_server *server)
505 {
506 struct nfs_delegation *delegation;
507
508 list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
509 if (test_and_clear_bit(NFS_DELEGATION_REFERENCED, &delegation->flags))
510 continue;
511 nfs_mark_return_delegation(server, delegation);
512 }
513 }
514
515 /**
516 * nfs_expire_unreferenced_delegations - Eliminate unused delegations
517 * @clp: nfs_client to process
518 *
519 */
520 void nfs_expire_unreferenced_delegations(struct nfs_client *clp)
521 {
522 struct nfs_server *server;
523
524 rcu_read_lock();
525 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
526 nfs_mark_return_unreferenced_delegations(server);
527 rcu_read_unlock();
528
529 nfs_delegation_run_state_manager(clp);
530 }
531
532 /**
533 * nfs_async_inode_return_delegation - asynchronously return a delegation
534 * @inode: inode to process
535 * @stateid: state ID information
536 *
537 * Returns zero on success, or a negative errno value.
538 */
539 int nfs_async_inode_return_delegation(struct inode *inode,
540 const nfs4_stateid *stateid)
541 {
542 struct nfs_server *server = NFS_SERVER(inode);
543 struct nfs_client *clp = server->nfs_client;
544 struct nfs_delegation *delegation;
545
546 filemap_flush(inode->i_mapping);
547
548 rcu_read_lock();
549 delegation = rcu_dereference(NFS_I(inode)->delegation);
550
551 if (!clp->cl_mvops->match_stateid(&delegation->stateid, stateid)) {
552 rcu_read_unlock();
553 return -ENOENT;
554 }
555 nfs_mark_return_delegation(server, delegation);
556 rcu_read_unlock();
557
558 nfs_delegation_run_state_manager(clp);
559 return 0;
560 }
561
562 static struct inode *
563 nfs_delegation_find_inode_server(struct nfs_server *server,
564 const struct nfs_fh *fhandle)
565 {
566 struct nfs_delegation *delegation;
567 struct inode *res = NULL;
568
569 list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
570 spin_lock(&delegation->lock);
571 if (delegation->inode != NULL &&
572 nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
573 res = igrab(delegation->inode);
574 }
575 spin_unlock(&delegation->lock);
576 if (res != NULL)
577 break;
578 }
579 return res;
580 }
581
582 /**
583 * nfs_delegation_find_inode - retrieve the inode associated with a delegation
584 * @clp: client state handle
585 * @fhandle: filehandle from a delegation recall
586 *
587 * Returns pointer to inode matching "fhandle," or NULL if a matching inode
588 * cannot be found.
589 */
590 struct inode *nfs_delegation_find_inode(struct nfs_client *clp,
591 const struct nfs_fh *fhandle)
592 {
593 struct nfs_server *server;
594 struct inode *res = NULL;
595
596 rcu_read_lock();
597 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
598 res = nfs_delegation_find_inode_server(server, fhandle);
599 if (res != NULL)
600 break;
601 }
602 rcu_read_unlock();
603 return res;
604 }
605
606 static void nfs_delegation_mark_reclaim_server(struct nfs_server *server)
607 {
608 struct nfs_delegation *delegation;
609
610 list_for_each_entry_rcu(delegation, &server->delegations, super_list)
611 set_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
612 }
613
614 /**
615 * nfs_delegation_mark_reclaim - mark all delegations as needing to be reclaimed
616 * @clp: nfs_client to process
617 *
618 */
619 void nfs_delegation_mark_reclaim(struct nfs_client *clp)
620 {
621 struct nfs_server *server;
622
623 rcu_read_lock();
624 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
625 nfs_delegation_mark_reclaim_server(server);
626 rcu_read_unlock();
627 }
628
629 /**
630 * nfs_delegation_reap_unclaimed - reap unclaimed delegations after reboot recovery is done
631 * @clp: nfs_client to process
632 *
633 */
634 void nfs_delegation_reap_unclaimed(struct nfs_client *clp)
635 {
636 struct nfs_delegation *delegation;
637 struct nfs_server *server;
638 struct inode *inode;
639
640 restart:
641 rcu_read_lock();
642 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
643 list_for_each_entry_rcu(delegation, &server->delegations,
644 super_list) {
645 if (test_bit(NFS_DELEGATION_NEED_RECLAIM,
646 &delegation->flags) == 0)
647 continue;
648 inode = nfs_delegation_grab_inode(delegation);
649 if (inode == NULL)
650 continue;
651 delegation = nfs_detach_delegation(NFS_I(inode),
652 server);
653 rcu_read_unlock();
654
655 if (delegation != NULL)
656 nfs_free_delegation(delegation);
657 iput(inode);
658 goto restart;
659 }
660 }
661 rcu_read_unlock();
662 }
663
664 /**
665 * nfs_delegations_present - check for existence of delegations
666 * @clp: client state handle
667 *
668 * Returns one if there are any nfs_delegation structures attached
669 * to this nfs_client.
670 */
671 int nfs_delegations_present(struct nfs_client *clp)
672 {
673 struct nfs_server *server;
674 int ret = 0;
675
676 rcu_read_lock();
677 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
678 if (!list_empty(&server->delegations)) {
679 ret = 1;
680 break;
681 }
682 rcu_read_unlock();
683 return ret;
684 }
685
686 /**
687 * nfs4_copy_delegation_stateid - Copy inode's state ID information
688 * @dst: stateid data structure to fill in
689 * @inode: inode to check
690 * @flags: delegation type requirement
691 *
692 * Returns "true" and fills in "dst->data" * if inode had a delegation,
693 * otherwise "false" is returned.
694 */
695 bool nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode,
696 fmode_t flags)
697 {
698 struct nfs_inode *nfsi = NFS_I(inode);
699 struct nfs_delegation *delegation;
700 bool ret;
701
702 flags &= FMODE_READ|FMODE_WRITE;
703 rcu_read_lock();
704 delegation = rcu_dereference(nfsi->delegation);
705 ret = (delegation != NULL && (delegation->type & flags) == flags);
706 if (ret) {
707 nfs4_stateid_copy(dst, &delegation->stateid);
708 nfs_mark_delegation_referenced(delegation);
709 }
710 rcu_read_unlock();
711 return ret;
712 }
This page took 0.12072 seconds and 5 git commands to generate.