Btrfs: make sure the backref walker catches all refs to our extent
[deliverable/linux.git] / fs / nfs / delegation.c
1 /*
2 * linux/fs/nfs/delegation.c
3 *
4 * Copyright (C) 2004 Trond Myklebust
5 *
6 * NFS file delegation management
7 *
8 */
9 #include <linux/completion.h>
10 #include <linux/kthread.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15
16 #include <linux/nfs4.h>
17 #include <linux/nfs_fs.h>
18 #include <linux/nfs_xdr.h>
19
20 #include "nfs4_fs.h"
21 #include "delegation.h"
22 #include "internal.h"
23
24 static void nfs_free_delegation(struct nfs_delegation *delegation)
25 {
26 if (delegation->cred) {
27 put_rpccred(delegation->cred);
28 delegation->cred = NULL;
29 }
30 kfree_rcu(delegation, rcu);
31 }
32
33 /**
34 * nfs_mark_delegation_referenced - set delegation's REFERENCED flag
35 * @delegation: delegation to process
36 *
37 */
38 void nfs_mark_delegation_referenced(struct nfs_delegation *delegation)
39 {
40 set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags);
41 }
42
43 /**
44 * nfs_have_delegation - check if inode has a delegation
45 * @inode: inode to check
46 * @flags: delegation types to check for
47 *
48 * Returns one if inode has the indicated delegation, otherwise zero.
49 */
50 int nfs4_have_delegation(struct inode *inode, fmode_t flags)
51 {
52 struct nfs_delegation *delegation;
53 int ret = 0;
54
55 flags &= FMODE_READ|FMODE_WRITE;
56 rcu_read_lock();
57 delegation = rcu_dereference(NFS_I(inode)->delegation);
58 if (delegation != NULL && (delegation->type & flags) == flags &&
59 !test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
60 nfs_mark_delegation_referenced(delegation);
61 ret = 1;
62 }
63 rcu_read_unlock();
64 return ret;
65 }
66
67 static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
68 {
69 struct inode *inode = state->inode;
70 struct file_lock *fl;
71 int status = 0;
72
73 if (inode->i_flock == NULL)
74 goto out;
75
76 /* Protect inode->i_flock using the i_lock */
77 spin_lock(&inode->i_lock);
78 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
79 if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
80 continue;
81 if (nfs_file_open_context(fl->fl_file) != ctx)
82 continue;
83 spin_unlock(&inode->i_lock);
84 status = nfs4_lock_delegation_recall(fl, state, stateid);
85 if (status < 0)
86 goto out;
87 spin_lock(&inode->i_lock);
88 }
89 spin_unlock(&inode->i_lock);
90 out:
91 return status;
92 }
93
94 static int nfs_delegation_claim_opens(struct inode *inode, const nfs4_stateid *stateid)
95 {
96 struct nfs_inode *nfsi = NFS_I(inode);
97 struct nfs_open_context *ctx;
98 struct nfs4_state_owner *sp;
99 struct nfs4_state *state;
100 unsigned int seq;
101 int err;
102
103 again:
104 spin_lock(&inode->i_lock);
105 list_for_each_entry(ctx, &nfsi->open_files, list) {
106 state = ctx->state;
107 if (state == NULL)
108 continue;
109 if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
110 continue;
111 if (!nfs4_stateid_match(&state->stateid, stateid))
112 continue;
113 get_nfs_open_context(ctx);
114 spin_unlock(&inode->i_lock);
115 sp = state->owner;
116 /* Block nfs4_proc_unlck */
117 mutex_lock(&sp->so_delegreturn_mutex);
118 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
119 err = nfs4_open_delegation_recall(ctx, state, stateid);
120 if (!err)
121 err = nfs_delegation_claim_locks(ctx, state, stateid);
122 if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
123 err = -EAGAIN;
124 mutex_unlock(&sp->so_delegreturn_mutex);
125 put_nfs_open_context(ctx);
126 if (err != 0)
127 return err;
128 goto again;
129 }
130 spin_unlock(&inode->i_lock);
131 return 0;
132 }
133
134 /**
135 * nfs_inode_reclaim_delegation - process a delegation reclaim request
136 * @inode: inode to process
137 * @cred: credential to use for request
138 * @res: new delegation state from server
139 *
140 */
141 void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred,
142 struct nfs_openres *res)
143 {
144 struct nfs_delegation *delegation;
145 struct rpc_cred *oldcred = NULL;
146
147 rcu_read_lock();
148 delegation = rcu_dereference(NFS_I(inode)->delegation);
149 if (delegation != NULL) {
150 spin_lock(&delegation->lock);
151 if (delegation->inode != NULL) {
152 nfs4_stateid_copy(&delegation->stateid, &res->delegation);
153 delegation->type = res->delegation_type;
154 delegation->maxsize = res->maxsize;
155 oldcred = delegation->cred;
156 delegation->cred = get_rpccred(cred);
157 clear_bit(NFS_DELEGATION_NEED_RECLAIM,
158 &delegation->flags);
159 NFS_I(inode)->delegation_state = delegation->type;
160 spin_unlock(&delegation->lock);
161 put_rpccred(oldcred);
162 rcu_read_unlock();
163 } else {
164 /* We appear to have raced with a delegation return. */
165 spin_unlock(&delegation->lock);
166 rcu_read_unlock();
167 nfs_inode_set_delegation(inode, cred, res);
168 }
169 } else {
170 rcu_read_unlock();
171 }
172 }
173
174 static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync)
175 {
176 int res = 0;
177
178 res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid, issync);
179 nfs_free_delegation(delegation);
180 return res;
181 }
182
183 static struct inode *nfs_delegation_grab_inode(struct nfs_delegation *delegation)
184 {
185 struct inode *inode = NULL;
186
187 spin_lock(&delegation->lock);
188 if (delegation->inode != NULL)
189 inode = igrab(delegation->inode);
190 spin_unlock(&delegation->lock);
191 return inode;
192 }
193
194 static struct nfs_delegation *
195 nfs_start_delegation_return_locked(struct nfs_inode *nfsi)
196 {
197 struct nfs_delegation *ret = NULL;
198 struct nfs_delegation *delegation = rcu_dereference(nfsi->delegation);
199
200 if (delegation == NULL)
201 goto out;
202 spin_lock(&delegation->lock);
203 if (!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
204 ret = delegation;
205 spin_unlock(&delegation->lock);
206 out:
207 return ret;
208 }
209
210 static struct nfs_delegation *
211 nfs_start_delegation_return(struct nfs_inode *nfsi)
212 {
213 struct nfs_delegation *delegation;
214
215 rcu_read_lock();
216 delegation = nfs_start_delegation_return_locked(nfsi);
217 rcu_read_unlock();
218 return delegation;
219 }
220
221 static void
222 nfs_abort_delegation_return(struct nfs_delegation *delegation,
223 struct nfs_client *clp)
224 {
225
226 spin_lock(&delegation->lock);
227 clear_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
228 set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
229 spin_unlock(&delegation->lock);
230 set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
231 }
232
233 static struct nfs_delegation *
234 nfs_detach_delegation_locked(struct nfs_inode *nfsi,
235 struct nfs_delegation *delegation,
236 struct nfs_client *clp)
237 {
238 struct nfs_delegation *deleg_cur =
239 rcu_dereference_protected(nfsi->delegation,
240 lockdep_is_held(&clp->cl_lock));
241
242 if (deleg_cur == NULL || delegation != deleg_cur)
243 return NULL;
244
245 spin_lock(&delegation->lock);
246 set_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
247 list_del_rcu(&delegation->super_list);
248 delegation->inode = NULL;
249 nfsi->delegation_state = 0;
250 rcu_assign_pointer(nfsi->delegation, NULL);
251 spin_unlock(&delegation->lock);
252 return delegation;
253 }
254
255 static struct nfs_delegation *nfs_detach_delegation(struct nfs_inode *nfsi,
256 struct nfs_delegation *delegation,
257 struct nfs_server *server)
258 {
259 struct nfs_client *clp = server->nfs_client;
260
261 spin_lock(&clp->cl_lock);
262 delegation = nfs_detach_delegation_locked(nfsi, delegation, clp);
263 spin_unlock(&clp->cl_lock);
264 return delegation;
265 }
266
267 static struct nfs_delegation *
268 nfs_inode_detach_delegation(struct inode *inode)
269 {
270 struct nfs_inode *nfsi = NFS_I(inode);
271 struct nfs_server *server = NFS_SERVER(inode);
272 struct nfs_delegation *delegation;
273
274 delegation = nfs_start_delegation_return(nfsi);
275 if (delegation == NULL)
276 return NULL;
277 return nfs_detach_delegation(nfsi, delegation, server);
278 }
279
280 /**
281 * nfs_inode_set_delegation - set up a delegation on an inode
282 * @inode: inode to which delegation applies
283 * @cred: cred to use for subsequent delegation processing
284 * @res: new delegation state from server
285 *
286 * Returns zero on success, or a negative errno value.
287 */
288 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
289 {
290 struct nfs_server *server = NFS_SERVER(inode);
291 struct nfs_client *clp = server->nfs_client;
292 struct nfs_inode *nfsi = NFS_I(inode);
293 struct nfs_delegation *delegation, *old_delegation;
294 struct nfs_delegation *freeme = NULL;
295 int status = 0;
296
297 delegation = kmalloc(sizeof(*delegation), GFP_NOFS);
298 if (delegation == NULL)
299 return -ENOMEM;
300 nfs4_stateid_copy(&delegation->stateid, &res->delegation);
301 delegation->type = res->delegation_type;
302 delegation->maxsize = res->maxsize;
303 delegation->change_attr = inode->i_version;
304 delegation->cred = get_rpccred(cred);
305 delegation->inode = inode;
306 delegation->flags = 1<<NFS_DELEGATION_REFERENCED;
307 spin_lock_init(&delegation->lock);
308
309 spin_lock(&clp->cl_lock);
310 old_delegation = rcu_dereference_protected(nfsi->delegation,
311 lockdep_is_held(&clp->cl_lock));
312 if (old_delegation != NULL) {
313 if (nfs4_stateid_match(&delegation->stateid,
314 &old_delegation->stateid) &&
315 delegation->type == old_delegation->type) {
316 goto out;
317 }
318 /*
319 * Deal with broken servers that hand out two
320 * delegations for the same file.
321 * Allow for upgrades to a WRITE delegation, but
322 * nothing else.
323 */
324 dfprintk(FILE, "%s: server %s handed out "
325 "a duplicate delegation!\n",
326 __func__, clp->cl_hostname);
327 if (delegation->type == old_delegation->type ||
328 !(delegation->type & FMODE_WRITE)) {
329 freeme = delegation;
330 delegation = NULL;
331 goto out;
332 }
333 freeme = nfs_detach_delegation_locked(nfsi,
334 old_delegation, clp);
335 if (freeme == NULL)
336 goto out;
337 }
338 list_add_rcu(&delegation->super_list, &server->delegations);
339 nfsi->delegation_state = delegation->type;
340 rcu_assign_pointer(nfsi->delegation, delegation);
341 delegation = NULL;
342
343 /* Ensure we revalidate the attributes and page cache! */
344 spin_lock(&inode->i_lock);
345 nfsi->cache_validity |= NFS_INO_REVAL_FORCED;
346 spin_unlock(&inode->i_lock);
347
348 out:
349 spin_unlock(&clp->cl_lock);
350 if (delegation != NULL)
351 nfs_free_delegation(delegation);
352 if (freeme != NULL)
353 nfs_do_return_delegation(inode, freeme, 0);
354 return status;
355 }
356
357 /*
358 * Basic procedure for returning a delegation to the server
359 */
360 static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation *delegation, int issync)
361 {
362 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
363 struct nfs_inode *nfsi = NFS_I(inode);
364 int err;
365
366 if (delegation == NULL)
367 return 0;
368 do {
369 err = nfs_delegation_claim_opens(inode, &delegation->stateid);
370 if (!issync || err != -EAGAIN)
371 break;
372 /*
373 * Guard against state recovery
374 */
375 err = nfs4_wait_clnt_recover(clp);
376 } while (err == 0);
377
378 if (err) {
379 nfs_abort_delegation_return(delegation, clp);
380 goto out;
381 }
382 if (!nfs_detach_delegation(nfsi, delegation, NFS_SERVER(inode)))
383 goto out;
384
385 err = nfs_do_return_delegation(inode, delegation, issync);
386 out:
387 return err;
388 }
389
390 static bool nfs_delegation_need_return(struct nfs_delegation *delegation)
391 {
392 bool ret = false;
393
394 if (test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags))
395 ret = true;
396 if (test_and_clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags) && !ret) {
397 struct inode *inode;
398
399 spin_lock(&delegation->lock);
400 inode = delegation->inode;
401 if (inode && list_empty(&NFS_I(inode)->open_files))
402 ret = true;
403 spin_unlock(&delegation->lock);
404 }
405 return ret;
406 }
407
408 /**
409 * nfs_client_return_marked_delegations - return previously marked delegations
410 * @clp: nfs_client to process
411 *
412 * Note that this function is designed to be called by the state
413 * manager thread. For this reason, it cannot flush the dirty data,
414 * since that could deadlock in case of a state recovery error.
415 *
416 * Returns zero on success, or a negative errno value.
417 */
418 int nfs_client_return_marked_delegations(struct nfs_client *clp)
419 {
420 struct nfs_delegation *delegation;
421 struct nfs_server *server;
422 struct inode *inode;
423 int err = 0;
424
425 restart:
426 rcu_read_lock();
427 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
428 list_for_each_entry_rcu(delegation, &server->delegations,
429 super_list) {
430 if (!nfs_delegation_need_return(delegation))
431 continue;
432 inode = nfs_delegation_grab_inode(delegation);
433 if (inode == NULL)
434 continue;
435 delegation = nfs_start_delegation_return_locked(NFS_I(inode));
436 rcu_read_unlock();
437
438 err = nfs_end_delegation_return(inode, delegation, 0);
439 iput(inode);
440 if (!err)
441 goto restart;
442 set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
443 return err;
444 }
445 }
446 rcu_read_unlock();
447 return 0;
448 }
449
450 /**
451 * nfs_inode_return_delegation_noreclaim - return delegation, don't reclaim opens
452 * @inode: inode to process
453 *
454 * Does not protect against delegation reclaims, therefore really only safe
455 * to be called from nfs4_clear_inode().
456 */
457 void nfs_inode_return_delegation_noreclaim(struct inode *inode)
458 {
459 struct nfs_delegation *delegation;
460
461 delegation = nfs_inode_detach_delegation(inode);
462 if (delegation != NULL)
463 nfs_do_return_delegation(inode, delegation, 0);
464 }
465
466 /**
467 * nfs_inode_return_delegation - synchronously return a delegation
468 * @inode: inode to process
469 *
470 * This routine will always flush any dirty data to disk on the
471 * assumption that if we need to return the delegation, then
472 * we should stop caching.
473 *
474 * Returns zero on success, or a negative errno value.
475 */
476 int nfs4_inode_return_delegation(struct inode *inode)
477 {
478 struct nfs_inode *nfsi = NFS_I(inode);
479 struct nfs_delegation *delegation;
480 int err = 0;
481
482 nfs_wb_all(inode);
483 delegation = nfs_start_delegation_return(nfsi);
484 if (delegation != NULL)
485 err = nfs_end_delegation_return(inode, delegation, 1);
486 return err;
487 }
488
489 static void nfs_mark_return_if_closed_delegation(struct nfs_server *server,
490 struct nfs_delegation *delegation)
491 {
492 set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
493 set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
494 }
495
496 static void nfs_mark_return_delegation(struct nfs_server *server,
497 struct nfs_delegation *delegation)
498 {
499 set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
500 set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
501 }
502
503 static bool nfs_server_mark_return_all_delegations(struct nfs_server *server)
504 {
505 struct nfs_delegation *delegation;
506 bool ret = false;
507
508 list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
509 nfs_mark_return_delegation(server, delegation);
510 ret = true;
511 }
512 return ret;
513 }
514
515 static void nfs_client_mark_return_all_delegations(struct nfs_client *clp)
516 {
517 struct nfs_server *server;
518
519 rcu_read_lock();
520 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
521 nfs_server_mark_return_all_delegations(server);
522 rcu_read_unlock();
523 }
524
525 static void nfs_delegation_run_state_manager(struct nfs_client *clp)
526 {
527 if (test_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state))
528 nfs4_schedule_state_manager(clp);
529 }
530
531 /**
532 * nfs_expire_all_delegations
533 * @clp: client to process
534 *
535 */
536 void nfs_expire_all_delegations(struct nfs_client *clp)
537 {
538 nfs_client_mark_return_all_delegations(clp);
539 nfs_delegation_run_state_manager(clp);
540 }
541
542 /**
543 * nfs_super_return_all_delegations - return delegations for one superblock
544 * @sb: sb to process
545 *
546 */
547 void nfs_server_return_all_delegations(struct nfs_server *server)
548 {
549 struct nfs_client *clp = server->nfs_client;
550 bool need_wait;
551
552 if (clp == NULL)
553 return;
554
555 rcu_read_lock();
556 need_wait = nfs_server_mark_return_all_delegations(server);
557 rcu_read_unlock();
558
559 if (need_wait) {
560 nfs4_schedule_state_manager(clp);
561 nfs4_wait_clnt_recover(clp);
562 }
563 }
564
565 static void nfs_mark_return_unused_delegation_types(struct nfs_server *server,
566 fmode_t flags)
567 {
568 struct nfs_delegation *delegation;
569
570 list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
571 if ((delegation->type == (FMODE_READ|FMODE_WRITE)) && !(flags & FMODE_WRITE))
572 continue;
573 if (delegation->type & flags)
574 nfs_mark_return_if_closed_delegation(server, delegation);
575 }
576 }
577
578 static void nfs_client_mark_return_unused_delegation_types(struct nfs_client *clp,
579 fmode_t flags)
580 {
581 struct nfs_server *server;
582
583 rcu_read_lock();
584 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
585 nfs_mark_return_unused_delegation_types(server, flags);
586 rcu_read_unlock();
587 }
588
589 void nfs_remove_bad_delegation(struct inode *inode)
590 {
591 struct nfs_delegation *delegation;
592
593 delegation = nfs_inode_detach_delegation(inode);
594 if (delegation) {
595 nfs_inode_find_state_and_recover(inode, &delegation->stateid);
596 nfs_free_delegation(delegation);
597 }
598 }
599 EXPORT_SYMBOL_GPL(nfs_remove_bad_delegation);
600
601 /**
602 * nfs_expire_unused_delegation_types
603 * @clp: client to process
604 * @flags: delegation types to expire
605 *
606 */
607 void nfs_expire_unused_delegation_types(struct nfs_client *clp, fmode_t flags)
608 {
609 nfs_client_mark_return_unused_delegation_types(clp, flags);
610 nfs_delegation_run_state_manager(clp);
611 }
612
613 static void nfs_mark_return_unreferenced_delegations(struct nfs_server *server)
614 {
615 struct nfs_delegation *delegation;
616
617 list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
618 if (test_and_clear_bit(NFS_DELEGATION_REFERENCED, &delegation->flags))
619 continue;
620 nfs_mark_return_if_closed_delegation(server, delegation);
621 }
622 }
623
624 /**
625 * nfs_expire_unreferenced_delegations - Eliminate unused delegations
626 * @clp: nfs_client to process
627 *
628 */
629 void nfs_expire_unreferenced_delegations(struct nfs_client *clp)
630 {
631 struct nfs_server *server;
632
633 rcu_read_lock();
634 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
635 nfs_mark_return_unreferenced_delegations(server);
636 rcu_read_unlock();
637
638 nfs_delegation_run_state_manager(clp);
639 }
640
641 /**
642 * nfs_async_inode_return_delegation - asynchronously return a delegation
643 * @inode: inode to process
644 * @stateid: state ID information
645 *
646 * Returns zero on success, or a negative errno value.
647 */
648 int nfs_async_inode_return_delegation(struct inode *inode,
649 const nfs4_stateid *stateid)
650 {
651 struct nfs_server *server = NFS_SERVER(inode);
652 struct nfs_client *clp = server->nfs_client;
653 struct nfs_delegation *delegation;
654
655 filemap_flush(inode->i_mapping);
656
657 rcu_read_lock();
658 delegation = rcu_dereference(NFS_I(inode)->delegation);
659
660 if (!clp->cl_mvops->match_stateid(&delegation->stateid, stateid)) {
661 rcu_read_unlock();
662 return -ENOENT;
663 }
664 nfs_mark_return_delegation(server, delegation);
665 rcu_read_unlock();
666
667 nfs_delegation_run_state_manager(clp);
668 return 0;
669 }
670
671 static struct inode *
672 nfs_delegation_find_inode_server(struct nfs_server *server,
673 const struct nfs_fh *fhandle)
674 {
675 struct nfs_delegation *delegation;
676 struct inode *res = NULL;
677
678 list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
679 spin_lock(&delegation->lock);
680 if (delegation->inode != NULL &&
681 nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
682 res = igrab(delegation->inode);
683 }
684 spin_unlock(&delegation->lock);
685 if (res != NULL)
686 break;
687 }
688 return res;
689 }
690
691 /**
692 * nfs_delegation_find_inode - retrieve the inode associated with a delegation
693 * @clp: client state handle
694 * @fhandle: filehandle from a delegation recall
695 *
696 * Returns pointer to inode matching "fhandle," or NULL if a matching inode
697 * cannot be found.
698 */
699 struct inode *nfs_delegation_find_inode(struct nfs_client *clp,
700 const struct nfs_fh *fhandle)
701 {
702 struct nfs_server *server;
703 struct inode *res = NULL;
704
705 rcu_read_lock();
706 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
707 res = nfs_delegation_find_inode_server(server, fhandle);
708 if (res != NULL)
709 break;
710 }
711 rcu_read_unlock();
712 return res;
713 }
714
715 static void nfs_delegation_mark_reclaim_server(struct nfs_server *server)
716 {
717 struct nfs_delegation *delegation;
718
719 list_for_each_entry_rcu(delegation, &server->delegations, super_list)
720 set_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
721 }
722
723 /**
724 * nfs_delegation_mark_reclaim - mark all delegations as needing to be reclaimed
725 * @clp: nfs_client to process
726 *
727 */
728 void nfs_delegation_mark_reclaim(struct nfs_client *clp)
729 {
730 struct nfs_server *server;
731
732 rcu_read_lock();
733 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
734 nfs_delegation_mark_reclaim_server(server);
735 rcu_read_unlock();
736 }
737
738 /**
739 * nfs_delegation_reap_unclaimed - reap unclaimed delegations after reboot recovery is done
740 * @clp: nfs_client to process
741 *
742 */
743 void nfs_delegation_reap_unclaimed(struct nfs_client *clp)
744 {
745 struct nfs_delegation *delegation;
746 struct nfs_server *server;
747 struct inode *inode;
748
749 restart:
750 rcu_read_lock();
751 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
752 list_for_each_entry_rcu(delegation, &server->delegations,
753 super_list) {
754 if (test_bit(NFS_DELEGATION_NEED_RECLAIM,
755 &delegation->flags) == 0)
756 continue;
757 inode = nfs_delegation_grab_inode(delegation);
758 if (inode == NULL)
759 continue;
760 delegation = nfs_detach_delegation(NFS_I(inode),
761 delegation, server);
762 rcu_read_unlock();
763
764 if (delegation != NULL)
765 nfs_free_delegation(delegation);
766 iput(inode);
767 goto restart;
768 }
769 }
770 rcu_read_unlock();
771 }
772
773 /**
774 * nfs_delegations_present - check for existence of delegations
775 * @clp: client state handle
776 *
777 * Returns one if there are any nfs_delegation structures attached
778 * to this nfs_client.
779 */
780 int nfs_delegations_present(struct nfs_client *clp)
781 {
782 struct nfs_server *server;
783 int ret = 0;
784
785 rcu_read_lock();
786 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
787 if (!list_empty(&server->delegations)) {
788 ret = 1;
789 break;
790 }
791 rcu_read_unlock();
792 return ret;
793 }
794
795 /**
796 * nfs4_copy_delegation_stateid - Copy inode's state ID information
797 * @dst: stateid data structure to fill in
798 * @inode: inode to check
799 * @flags: delegation type requirement
800 *
801 * Returns "true" and fills in "dst->data" * if inode had a delegation,
802 * otherwise "false" is returned.
803 */
804 bool nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode,
805 fmode_t flags)
806 {
807 struct nfs_inode *nfsi = NFS_I(inode);
808 struct nfs_delegation *delegation;
809 bool ret;
810
811 flags &= FMODE_READ|FMODE_WRITE;
812 rcu_read_lock();
813 delegation = rcu_dereference(nfsi->delegation);
814 ret = (delegation != NULL && (delegation->type & flags) == flags);
815 if (ret) {
816 nfs4_stateid_copy(dst, &delegation->stateid);
817 nfs_mark_delegation_referenced(delegation);
818 }
819 rcu_read_unlock();
820 return ret;
821 }
This page took 0.048391 seconds and 5 git commands to generate.